diff options
author | Pasha <pasha@member.fsf.org> | 2024-02-20 18:49:50 +0000 |
---|---|---|
committer | Pasha <pasha@member.fsf.org> | 2024-02-20 18:49:50 +0000 |
commit | 5e0b8d508ed51004bd836384293be00950ee62c9 (patch) | |
tree | e3f16b1aa8b7177032ce3ec429fbad2b1d92a876 /linux/dev | |
download | gnumach-riscv-5e0b8d508ed51004bd836384293be00950ee62c9.tar.gz gnumach-riscv-5e0b8d508ed51004bd836384293be00950ee62c9.tar.bz2 |
init gnumach copy
Diffstat (limited to 'linux/dev')
53 files changed, 23502 insertions, 0 deletions
diff --git a/linux/dev/README b/linux/dev/README new file mode 100644 index 0000000..c3ceca1 --- /dev/null +++ b/linux/dev/README @@ -0,0 +1,8 @@ +This hierarchy used to contain modified files, based on files from the +Linux kernel, as opposed to `../src/' containing only files that have not +been modified (or have only been modified marginally). This policy is +NOT adhered to any further, so please don't change (or even add) files +below here, but instead merge the files in here back into `../src/' +(which should really be called `../linux-2.0' or similar) or even better +--- when adding large chunks --- create a more suitable hierarchy like +we've done with `../pcmcia-cs/'. diff --git a/linux/dev/arch/i386/kernel/irq.c b/linux/dev/arch/i386/kernel/irq.c new file mode 100644 index 0000000..3b349cc --- /dev/null +++ b/linux/dev/arch/i386/kernel/irq.c @@ -0,0 +1,775 @@ +/* + * Linux IRQ management. + * Copyright (C) 1995 Shantanu Goel. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/* + * linux/arch/i386/kernel/irq.c + * + * Copyright (C) 1992 Linus Torvalds + */ + +#include <sys/types.h> +#include <mach/mach_types.h> +#include <mach/vm_param.h> +#include <kern/assert.h> +#include <kern/cpu_number.h> + +#include <i386/spl.h> +#include <i386/irq.h> +#include <i386/pit.h> + +#define MACH_INCLUDE +#include <linux/mm.h> +#include <linux/interrupt.h> +#include <linux/ptrace.h> +#include <linux/delay.h> +#include <linux/kernel_stat.h> +#include <linux/malloc.h> +#include <linux/ioport.h> + +#include <asm/system.h> +#include <asm/bitops.h> +#include <asm/irq.h> +#include <asm/io.h> +#include <asm/hardirq.h> + +#include <linux/dev/glue/glue.h> +#include <machine/machspl.h> + +#include <device/intr.h> + +#if 0 +/* XXX: This is the way it's done in linux 2.2. GNU Mach currently uses intr_count. It should be made using local_{bh/irq}_count instead (through hardirq_enter/exit) for SMP support. */ +unsigned int local_bh_count[NR_CPUS]; +unsigned int local_irq_count[NR_CPUS]; +#else +#define local_bh_count (&intr_count) +#define local_irq_count (&intr_count) +#endif + +/* + * XXX Move this into more suitable place... + * Set if the machine has an EISA bus. + */ +int EISA_bus = 0; + +/* + * Flag indicating an interrupt is being handled. + */ +unsigned int intr_count = 0; + +/* + * List of Linux interrupt handlers. + */ +struct linux_action +{ + void (*handler) (int, void *, struct pt_regs *); + void *dev_id; + struct linux_action *next; + unsigned long flags; + user_intr_t *user_intr; +}; + +static struct linux_action *irq_action[NINTR] = {0}; + +/* + * Generic interrupt handler for Linux devices. + * Set up a fake `struct pt_regs' then call the real handler. + */ +static void +linux_intr (int irq) +{ + struct pt_regs regs; + struct linux_action *action = *(irq_action + irq); + struct linux_action **prev = &irq_action[irq]; + unsigned long flags; + + kstat.interrupts[irq]++; + intr_count++; + + save_flags (flags); + if (action && (action->flags & SA_INTERRUPT)) + cli (); + + while (action) + { + // TODO I might need to check whether the interrupt belongs to + // the current device. But I don't do it for now. + if (action->user_intr) + { + if (!deliver_user_intr(&irqtab, irq, action->user_intr)) + { + *prev = action->next; + linux_kfree(action); + action = *prev; + continue; + } + } + else if (action->handler) + action->handler (irq, action->dev_id, ®s); + prev = &action->next; + action = action->next; + } + + if (!irq_action[irq]) + { + /* No handler any more, disable interrupt */ + mask_irq (irq); + ivect[irq] = intnull; + iunit[irq] = irq; + } + + restore_flags (flags); + + intr_count--; +} + +/* IRQ mask according to Linux drivers */ +static unsigned linux_pic_mask; + +/* These only record that Linux requested to mask IRQs */ +void +disable_irq (unsigned int irq_nr) +{ + unsigned long flags; + unsigned mask = 1U << irq_nr; + + save_flags (flags); + cli (); + if (!(linux_pic_mask & mask)) + { + linux_pic_mask |= mask; + __disable_irq(irq_nr); + } + restore_flags (flags); +} + +void +enable_irq (unsigned int irq_nr) +{ + unsigned long flags; + unsigned mask = 1U << irq_nr; + + save_flags (flags); + cli (); + if (linux_pic_mask & mask) + { + linux_pic_mask &= ~mask; + __enable_irq(irq_nr); + } + restore_flags (flags); +} + +static int +setup_x86_irq (int irq, struct linux_action *new) +{ + int shared = 0; + struct linux_action *old, **p; + unsigned long flags; + + p = irq_action + irq; + if ((old = *p) != NULL) + { + /* Can't share interrupts unless both agree to */ + if (!(old->flags & new->flags & SA_SHIRQ)) + return (-EBUSY); + + /* Can't share interrupts unless both are same type */ + if ((old->flags ^ new->flags) & SA_INTERRUPT) + return (-EBUSY); + + /* add new interrupt at end of irq queue */ + do + { + p = &old->next; + old = *p; + } + while (old); + shared = 1; + } + + save_flags (flags); + cli (); + *p = new; + + if (!shared) + { + ivect[irq] = linux_intr; + iunit[irq] = irq; + unmask_irq (irq); + } + restore_flags (flags); + return 0; +} + +int +install_user_intr_handler (struct irqdev *dev, int id, unsigned long flags, + user_intr_t *user_intr) +{ + struct linux_action *action; + struct linux_action *old; + int retval; + + unsigned int irq = dev->irq[id]; + + assert (irq < NINTR); + + /* Test whether the irq handler has been set */ + // TODO I need to protect the array when iterating it. + old = irq_action[irq]; + while (old) + { + if (old->user_intr && old->user_intr->dst_port == user_intr->dst_port) + { + printk ("The interrupt handler has already been installed on line %d", irq); + return linux_to_mach_error (-EAGAIN); + } + old = old->next; + } + + /* + * Hmm... Should I use `kalloc()' ? + * By OKUJI Yoshinori. + */ + action = (struct linux_action *) + linux_kmalloc (sizeof (struct linux_action), GFP_KERNEL); + if (action == NULL) + return linux_to_mach_error (-ENOMEM); + + action->handler = NULL; + action->next = NULL; + action->dev_id = NULL; + action->flags = SA_SHIRQ; + action->user_intr = user_intr; + + retval = setup_x86_irq (irq, action); + if (retval) + linux_kfree (action); + + return linux_to_mach_error (retval); +} + +/* + * Attach a handler to an IRQ. + */ +int +request_irq (unsigned int irq, void (*handler) (int, void *, struct pt_regs *), + unsigned long flags, const char *device, void *dev_id) +{ + struct linux_action *action; + int retval; + + assert (irq < NINTR); + + if (!handler) + return -EINVAL; + + /* + * Hmm... Should I use `kalloc()' ? + * By OKUJI Yoshinori. + */ + action = (struct linux_action *) + linux_kmalloc (sizeof (struct linux_action), GFP_KERNEL); + if (action == NULL) + return -ENOMEM; + + action->handler = handler; + action->next = NULL; + action->dev_id = dev_id; + action->flags = flags; + action->user_intr = NULL; + + retval = setup_x86_irq (irq, action); + if (retval) + linux_kfree (action); + + return retval; +} + +/* + * Deallocate an irq. + */ +void +free_irq (unsigned int irq, void *dev_id) +{ + struct linux_action *action, **p; + unsigned long flags; + + if (irq >= NINTR) + panic ("free_irq: bad irq number"); + + for (p = irq_action + irq; (action = *p) != NULL; p = &action->next) + { + if (action->dev_id != dev_id) + continue; + + save_flags (flags); + cli (); + *p = action->next; + if (!irq_action[irq]) + { + mask_irq (irq); + ivect[irq] = intnull; + iunit[irq] = irq; + } + restore_flags (flags); + linux_kfree (action); + return; + } + + panic ("free_irq: bad irq number"); +} + +/* + * Set for an irq probe. + */ +unsigned long +probe_irq_on (void) +{ + unsigned i, irqs = 0; + unsigned long delay; + + assert (curr_ipl[cpu_number()] == 0); + + /* + * Allocate all available IRQs. + */ + for (i = NINTR - 1; i > 0; i--) + { + if (!irq_action[i] && ivect[i] == intnull) + { + enable_irq (i); + irqs |= 1 << i; + } + } + + /* + * Wait for spurious interrupts to mask themselves out. + */ + for (delay = jiffies + HZ / 10; delay > jiffies;) + ; + + return (irqs & ~linux_pic_mask); +} + +/* + * Return the result of an irq probe. + */ +int +probe_irq_off (unsigned long irqs) +{ + unsigned int i; + + assert (curr_ipl[cpu_number()] == 0); + + irqs &= linux_pic_mask; + + /* + * Disable unnecessary IRQs. + */ + for (i = NINTR - 1; i > 0; i--) + { + if (!irq_action[i] && ivect[i] == intnull) + { + disable_irq (i); + } + } + + /* + * Return IRQ number. + */ + if (!irqs) + return 0; + i = ffz (~irqs); + if (irqs != (irqs & (1 << i))) + i = -i; + return i; +} + +/* + * Reserve IRQs used by Mach drivers. + * Must be called before Linux IRQ detection, after Mach IRQ detection. + */ + +static void reserved_mach_handler (int line, void *cookie, struct pt_regs *regs) +{ + /* These interrupts are actually handled in Mach. */ + assert (! "reached"); +} + +static const struct linux_action reserved_mach = + { + reserved_mach_handler, NULL, NULL, 0 + }; + +static void +reserve_mach_irqs (void) +{ + unsigned int i; + + for (i = 0; i < NINTR; i++) + { + if (ivect[i] != intnull) + /* This dummy action does not specify SA_SHIRQ, so + setup_x86_irq will not try to add a handler to this + slot. Therefore, the cast is safe. */ + irq_action[i] = (struct linux_action *) &reserved_mach; + } +} + +#ifdef __SMP__ +unsigned char global_irq_holder = NO_PROC_ID; +unsigned volatile int global_irq_lock; +atomic_t global_irq_count; + +atomic_t global_bh_count; +atomic_t global_bh_lock; + +/* + * "global_cli()" is a special case, in that it can hold the + * interrupts disabled for a longish time, and also because + * we may be doing TLB invalidates when holding the global + * IRQ lock for historical reasons. Thus we may need to check + * SMP invalidate events specially by hand here (but not in + * any normal spinlocks) + */ +#if 0 +/* XXX: check how Mach handles this */ +static inline void check_smp_invalidate(int cpu) +{ + if (test_bit(cpu, &smp_invalidate_needed)) { + clear_bit(cpu, &smp_invalidate_needed); + local_flush_tlb(); + } +} +#endif + +static void show(char * str) +{ + int i; + unsigned long *stack; + int cpu = smp_processor_id(); + + printk("\n%s, CPU %d:\n", str, cpu); + printk("irq: %d [%d %d]\n", + atomic_read(&global_irq_count), local_irq_count[0], local_irq_count[1]); + printk("bh: %d [%d %d]\n", + atomic_read(&global_bh_count), local_bh_count[0], local_bh_count[1]); + stack = (unsigned long *) &stack; + for (i = 40; i ; i--) { + unsigned long x = *++stack; + //if (x > (unsigned long) &get_options && x < (unsigned long) &vsprintf) { + printk("<[%08lx]> ", x); + //} + } +} + +#define MAXCOUNT 100000000 + +static inline void wait_on_bh(void) +{ + int count = MAXCOUNT; + do { + if (!--count) { + show("wait_on_bh"); + count = ~0; + } + /* nothing .. wait for the other bh's to go away */ + } while (atomic_read(&global_bh_count) != 0); +} + +/* + * I had a lockup scenario where a tight loop doing + * spin_unlock()/spin_lock() on CPU#1 was racing with + * spin_lock() on CPU#0. CPU#0 should have noticed spin_unlock(), but + * apparently the spin_unlock() information did not make it + * through to CPU#0 ... nasty, is this by design, do we have to limit + * 'memory update oscillation frequency' artificially like here? + * + * Such 'high frequency update' races can be avoided by careful design, but + * some of our major constructs like spinlocks use similar techniques, + * it would be nice to clarify this issue. Set this define to 0 if you + * want to check whether your system freezes. I suspect the delay done + * by SYNC_OTHER_CORES() is in correlation with 'snooping latency', but + * i thought that such things are guaranteed by design, since we use + * the 'LOCK' prefix. + */ +#define SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND 1 + +#if SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND +# define SYNC_OTHER_CORES(x) udelay(x+1) +#else +/* + * We have to allow irqs to arrive between __sti and __cli + */ +# define SYNC_OTHER_CORES(x) __asm__ __volatile__ ("nop") +#endif + +static inline void wait_on_irq(int cpu) +{ + int count = MAXCOUNT; + + for (;;) { + + /* + * Wait until all interrupts are gone. Wait + * for bottom half handlers unless we're + * already executing in one.. + */ + if (!atomic_read(&global_irq_count)) { + if (local_bh_count[cpu] || !atomic_read(&global_bh_count)) + break; + } + + /* Duh, we have to loop. Release the lock to avoid deadlocks */ + clear_bit(0,&global_irq_lock); + + for (;;) { + if (!--count) { + show("wait_on_irq"); + count = ~0; + } + __sti(); + SYNC_OTHER_CORES(cpu); + __cli(); + //check_smp_invalidate(cpu); + if (atomic_read(&global_irq_count)) + continue; + if (global_irq_lock) + continue; + if (!local_bh_count[cpu] && atomic_read(&global_bh_count)) + continue; + if (!test_and_set_bit(0,&global_irq_lock)) + break; + } + } +} + +/* + * This is called when we want to synchronize with + * bottom half handlers. We need to wait until + * no other CPU is executing any bottom half handler. + * + * Don't wait if we're already running in an interrupt + * context or are inside a bh handler. + */ +void synchronize_bh(void) +{ + if (atomic_read(&global_bh_count) && !in_interrupt()) + wait_on_bh(); +} + +/* + * This is called when we want to synchronize with + * interrupts. We may for example tell a device to + * stop sending interrupts: but to make sure there + * are no interrupts that are executing on another + * CPU we need to call this function. + */ +void synchronize_irq(void) +{ + if (atomic_read(&global_irq_count)) { + /* Stupid approach */ + cli(); + sti(); + } +} + +static inline void get_irqlock(int cpu) +{ + if (test_and_set_bit(0,&global_irq_lock)) { + /* do we already hold the lock? */ + if ((unsigned char) cpu == global_irq_holder) + return; + /* Uhhuh.. Somebody else got it. Wait.. */ + do { + do { + //check_smp_invalidate(cpu); + } while (test_bit(0,&global_irq_lock)); + } while (test_and_set_bit(0,&global_irq_lock)); + } + /* + * We also to make sure that nobody else is running + * in an interrupt context. + */ + wait_on_irq(cpu); + + /* + * Ok, finally.. + */ + global_irq_holder = cpu; +} + +#define EFLAGS_IF_SHIFT 9 + +/* + * A global "cli()" while in an interrupt context + * turns into just a local cli(). Interrupts + * should use spinlocks for the (very unlikely) + * case that they ever want to protect against + * each other. + * + * If we already have local interrupts disabled, + * this will not turn a local disable into a + * global one (problems with spinlocks: this makes + * save_flags+cli+sti usable inside a spinlock). + */ +void __global_cli(void) +{ + unsigned int flags; + + __save_flags(flags); + if (flags & (1 << EFLAGS_IF_SHIFT)) { + int cpu = smp_processor_id(); + __cli(); + if (!local_irq_count[cpu]) + get_irqlock(cpu); + } +} + +void __global_sti(void) +{ + int cpu = smp_processor_id(); + + if (!local_irq_count[cpu]) + release_irqlock(cpu); + __sti(); +} + +/* + * SMP flags value to restore to: + * 0 - global cli + * 1 - global sti + * 2 - local cli + * 3 - local sti + */ +unsigned long __global_save_flags(void) +{ + int retval; + int local_enabled; + unsigned long flags; + + __save_flags(flags); + local_enabled = (flags >> EFLAGS_IF_SHIFT) & 1; + /* default to local */ + retval = 2 + local_enabled; + + /* check for global flags if we're not in an interrupt */ + if (!local_irq_count[smp_processor_id()]) { + if (local_enabled) + retval = 1; + if (global_irq_holder == (unsigned char) smp_processor_id()) + retval = 0; + } + return retval; +} + +void __global_restore_flags(unsigned long flags) +{ + switch (flags) { + case 0: + __global_cli(); + break; + case 1: + __global_sti(); + break; + case 2: + __cli(); + break; + case 3: + __sti(); + break; + default: + printk("global_restore_flags: %08lx (%08lx)\n", + flags, (&flags)[-1]); + } +} + +#endif + +static void (*old_clock_handler) (); + +void +init_IRQ (void) +{ + char *p; + int latch = (CLKNUM + hz / 2) / hz; + + /* + * Ensure interrupts are disabled. + */ + (void) splhigh (); + +#ifndef APIC + /* + * Program counter 0 of 8253 to interrupt hz times per second. + */ + outb_p (PIT_C0 | PIT_SQUAREMODE | PIT_READMODE, PITCTL_PORT); + outb_p (latch & 0xff, PITCTR0_PORT); + outb (latch >> 8, PITCTR0_PORT); + + /* + * Install our clock interrupt handler. + */ + old_clock_handler = ivect[0]; + ivect[0] = linux_timer_intr; +#endif + + reserve_mach_irqs (); + + /* + * Enable interrupts. + */ + (void) spl0 (); + + /* + * Check if the machine has an EISA bus. + */ + p = (char *) phystokv(0x0FFFD9); + if (*p++ == 'E' && *p++ == 'I' && *p++ == 'S' && *p == 'A') + EISA_bus = 1; + + /* + * Permanently allocate standard device ports. + */ + request_region (0x00, 0x20, "dma1"); + request_region (0x20, 0x20, "pic1"); + request_region (0x40, 0x20, "timer"); + request_region (0x70, 0x10, "rtc"); + request_region (0x80, 0x20, "dma page reg"); + request_region (0xa0, 0x20, "pic2"); + request_region (0xc0, 0x20, "dma2"); + request_region (0xf0, 0x10, "npu"); +} + +void +restore_IRQ (void) +{ + /* + * Disable interrupts. + */ + (void) splhigh (); + +#ifndef APIC + /* + * Restore clock interrupt handler. + */ + ivect[0] = old_clock_handler; +#endif +} + diff --git a/linux/dev/arch/i386/kernel/setup.c b/linux/dev/arch/i386/kernel/setup.c new file mode 100644 index 0000000..92b782a --- /dev/null +++ b/linux/dev/arch/i386/kernel/setup.c @@ -0,0 +1,13 @@ +char x86 = +#if defined(CONFIG_M386) +3; +#elif defined(CONFIG_M486) +4; +#elif defined(CONFIG_M586) +5; +#elif defined(CONFIG_M686) +6; +#else +#error "CPU type is undefined!" +#endif + diff --git a/linux/dev/drivers/block/ahci.c b/linux/dev/drivers/block/ahci.c new file mode 100644 index 0000000..751c7ca --- /dev/null +++ b/linux/dev/drivers/block/ahci.c @@ -0,0 +1,1038 @@ +/* + * Copyright (C) 2013 Free Software Foundation + * + * This program is free software ; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation ; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY ; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with the program ; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <ahci.h> +#include <kern/assert.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/fs.h> +#include <linux/bios32.h> +#include <linux/major.h> +#include <linux/hdreg.h> +#include <linux/genhd.h> +#include <asm/io.h> + +#define MAJOR_NR SCSI_DISK_MAJOR +#include <linux/blk.h> + +/* Standard AHCI BAR for mmio */ +#define AHCI_PCI_BAR 5 + +/* minor: 2 bits for device number, 6 bits for partition number. */ + +#define MAX_PORTS 8 +#define PARTN_BITS 5 +#define PARTN_MASK ((1<<PARTN_BITS)-1) + +/* We need to use one DMA scatter element per physical page. + * ll_rw_block creates at most 8 buffer heads */ +/* See MAX_BUF */ +#define PRDTL_SIZE 8 + +#define WAIT_MAX (1*HZ) /* Wait at most 1s for requests completion */ + +/* AHCI standard structures */ + +struct ahci_prdt { + u32 dba; /* Data base address */ + u32 dbau; /* upper 32bit */ + u32 rsv0; /* Reserved */ + + u32 dbc; /* Byte count bits 0-21, + * bit31 interrupt on completion. */ +}; + +struct ahci_cmd_tbl { + u8 cfis[64]; + u8 acmd[16]; + u8 rsv[48]; + + struct ahci_prdt prdtl[PRDTL_SIZE]; +}; + +struct ahci_command { + u32 opts; /* Command options */ + + u32 prdbc; /* Physical Region Descriptor byte count */ + + u32 ctba; /* Command Table Descriptor Base Address */ + u32 ctbau; /* upper 32bit */ + + u32 rsv1[4]; /* Reserved */ +}; + +struct ahci_fis_dma { + u8 fis_type; + u8 flags; + u8 rsved[2]; + u64 id; + u32 rsvd; + u32 offset; + u32 count; + u32 resvd; +}; + +struct ahci_fis_pio { + u8 fis_type; + u8 flags; + u8 status; + u8 error; + + u8 lba0; + u8 lba1; + u8 lba2; + u8 device; + + u8 lba3; + u8 lba4; + u8 lba5; + u8 rsv2; + + u8 countl; + u8 counth; + u8 rsv3; + u8 e_status; + + u16 tc; /* Transfer Count */ + u8 rsv4[2]; +}; + +struct ahci_fis_d2h { + u8 fis_type; + u8 flags; + u8 status; + u8 error; + + u8 lba0; + u8 lba1; + u8 lba2; + u8 device; + + u8 lba3; + u8 lba4; + u8 lba5; + u8 rsv2; + + u8 countl; + u8 counth; + u8 rsv3[2]; + + u8 rsv4[4]; +}; + +struct ahci_fis_dev { + u8 rsvd[8]; +}; + +struct ahci_fis_h2d { + u8 fis_type; + u8 flags; + u8 command; + u8 featurel; + + u8 lba0; + u8 lba1; + u8 lba2; + u8 device; + + u8 lba3; + u8 lba4; + u8 lba5; + u8 featureh; + + u8 countl; + u8 counth; + u8 icc; + u8 control; + + u8 rsv1[4]; +}; + +struct ahci_fis_data { + u8 fis_type; + u8 flags; + u8 rsv1[2]; + u32 data1[]; +}; + +struct ahci_fis { + struct ahci_fis_dma dma_fis; + u8 pad0[4]; + + struct ahci_fis_pio pio_fis; + u8 pad1[12]; + + struct ahci_fis_d2h d2h_fis; + u8 pad2[4]; + + struct ahci_fis_dev dev_fis; + + u8 ufis[64]; + + u8 rsv[0x100 - 0xa0]; +}; + +struct ahci_port { + u32 clb; /* Command List Base address */ + u32 clbu; /* upper 32bit */ + u32 fb; /* FIS Base */ + u32 fbu; /* upper 32bit */ + u32 is; /* Interrupt Status */ + u32 ie; /* Interrupt Enable */ + u32 cmd; /* Command and Status */ + u32 rsv0; /* Reserved */ + u32 tfd; /* Task File Data */ + u32 sig; /* Signature */ + u32 ssts; /* SATA Status */ + u32 sctl; /* SATA Control */ + u32 serr; /* SATA Error */ + u32 sact; /* SATA Active */ + u32 ci; /* Command Issue */ + u32 sntf; /* SATA Notification */ + u32 fbs; /* FIS-based switch control */ + u8 rsv1[0x70 - 0x44]; /* Reserved */ + u8 vendor[0x80 - 0x70]; /* Vendor-specific */ +}; + +struct ahci_host { + u32 cap; /* Host capabilities */ + u32 ghc; /* Global Host Control */ + u32 is; /* Interrupt Status */ + u32 pi; /* Port Implemented */ + u32 v; /* Version */ + u32 ccc_ctl; /* Command Completion Coalescing control */ + u32 ccc_pts; /* Command Completion Coalescing ports */ + u32 em_loc; /* Enclosure Management location */ + u32 em_ctrl; /* Enclosure Management control */ + u32 cap2; /* Host capabilities extended */ + u32 bohc; /* BIOS/OS Handoff Control and status */ + u8 rsv[0xa0 - 0x2c]; /* Reserved */ + u8 vendor[0x100 - 0xa0]; /* Vendor-specific */ + struct ahci_port ports[]; /* Up to 32 ports */ +}; + +/* Our own data */ + +static struct port { + /* memory-mapped regions */ + const volatile struct ahci_host *ahci_host; + const volatile struct ahci_port *ahci_port; + + /* host-memory buffers */ + struct ahci_command *command; + struct ahci_fis *fis; + struct ahci_cmd_tbl *prdtl; + + struct hd_driveid id; + unsigned is_cd; + unsigned long long capacity; /* Nr of sectors */ + u32 status; /* interrupt status */ + unsigned cls; /* Command list maximum size. + We currently only use 1. */ + struct wait_queue *q; /* IRQ wait queue */ + struct hd_struct *part; /* drive partition table */ + unsigned lba48; /* Whether LBA48 is supported */ + unsigned identify; /* Whether we are just identifying + at boot */ + struct gendisk *gd; +} ports[MAX_PORTS]; + + +/* do_request() gets called by the block layer to push a request to the disk. + We just push one, and when an interrupt tells it's over, we call do_request() + ourself again to push the next request, etc. */ + +/* Request completed, either successfully or with an error */ +static void ahci_end_request(int uptodate) +{ + struct request *rq = CURRENT; + struct buffer_head *bh; + + rq->errors = 0; + if (!uptodate) { + if (!rq->quiet) + printk("end_request: I/O error, dev %s, sector %lu\n", + kdevname(rq->rq_dev), rq->sector); + } + + for (bh = rq->bh; bh; ) + { + struct buffer_head *next = bh->b_reqnext; + bh->b_reqnext = NULL; + mark_buffer_uptodate (bh, uptodate); + unlock_buffer (bh); + bh = next; + } + + CURRENT = rq->next; + if (rq->sem != NULL) + up(rq->sem); + rq->rq_status = RQ_INACTIVE; + wake_up(&wait_for_request); +} + +/* Push the request to the controler port */ +static int ahci_do_port_request(struct port *port, unsigned long long sector, struct request *rq) +{ + struct ahci_command *command = port->command; + struct ahci_cmd_tbl *prdtl = port->prdtl; + struct ahci_fis_h2d *fis_h2d; + unsigned slot = 0; + struct buffer_head *bh; + unsigned i; + + rq->rq_status = RQ_SCSI_BUSY; + + /* Shouldn't ever happen: the block glue is limited at 8 blocks */ + assert(rq->nr_sectors < 0x10000); + + fis_h2d = (void*) &prdtl[slot].cfis; + fis_h2d->fis_type = FIS_TYPE_REG_H2D; + fis_h2d->flags = 128; + if (port->lba48) { + if (sector >= 1ULL << 48) { + printk("sector %llu beyond LBA48\n", sector); + return -EOVERFLOW; + } + if (rq->cmd == READ) + fis_h2d->command = WIN_READDMA_EXT; + else + fis_h2d->command = WIN_WRITEDMA_EXT; + } else { + if (sector >= 1ULL << 28) { + printk("sector %llu beyond LBA28\n", sector); + return -EOVERFLOW; + } + if (rq->cmd == READ) + fis_h2d->command = WIN_READDMA; + else + fis_h2d->command = WIN_WRITEDMA; + } + + fis_h2d->device = 1<<6; /* LBA */ + + fis_h2d->lba0 = sector; + fis_h2d->lba1 = sector >> 8; + fis_h2d->lba2 = sector >> 16; + + fis_h2d->lba3 = sector >> 24; + fis_h2d->lba4 = sector >> 32; + fis_h2d->lba5 = sector >> 40; + + fis_h2d->countl = rq->nr_sectors; + fis_h2d->counth = rq->nr_sectors >> 8; + + command[slot].opts = sizeof(*fis_h2d) / sizeof(u32); + + if (rq->cmd == WRITE) + command[slot].opts |= AHCI_CMD_WRITE; + + for (i = 0, bh = rq->bh; bh; i++, bh = bh->b_reqnext) + { + assert(i < PRDTL_SIZE); + assert((((unsigned long) bh->b_data) & ~PAGE_MASK) == + (((unsigned long) bh->b_data + bh->b_size - 1) & ~PAGE_MASK)); + prdtl[slot].prdtl[i].dbau = 0; + prdtl[slot].prdtl[i].dba = vmtophys(bh->b_data); + prdtl[slot].prdtl[i].dbc = bh->b_size - 1; + } + + command[slot].opts |= i << 16; + + /* Make sure main memory buffers are up to date */ + mb(); + + /* Issue command */ + writel(1 << slot, &port->ahci_port->ci); + + /* TODO: IRQ timeout handler */ + return 0; +} + +/* Called by block core to push a request */ +/* TODO: ideally, would have one request queue per port */ +/* TODO: ideally, would use tags to process several requests at a time */ +static void ahci_do_request() /* invoked with cli() */ +{ + struct request *rq; + unsigned minor, unit; + unsigned long long block, blockend; + struct port *port; + + rq = CURRENT; + if (!rq) + return; + + if (rq->rq_status != RQ_ACTIVE) + /* Current one is already ongoing, let the interrupt handler + * push the new one when the current one is finished. */ + return; + + if (MAJOR(rq->rq_dev) != MAJOR_NR) { + printk("bad ahci major %u\n", MAJOR(rq->rq_dev)); + goto kill_rq; + } + + minor = MINOR(rq->rq_dev); + unit = minor >> PARTN_BITS; + if (unit >= MAX_PORTS) { + printk("bad ahci unit %u\n", unit); + goto kill_rq; + } + + port = &ports[unit]; + + /* Compute start sector */ + block = rq->sector; + block += port->part[minor & PARTN_MASK].start_sect; + + /* And check end */ + blockend = block + rq->nr_sectors; + if (blockend < block) { + if (!rq->quiet) + printk("bad blockend %lu vs %lu\n", (unsigned long) blockend, (unsigned long) block); + goto kill_rq; + } + if (blockend > port->capacity) { + if (!rq->quiet) + { + printk("offset for %u was %lu\n", minor, port->part[minor & PARTN_MASK].start_sect); + printk("bad access: block %lu, count= %lu\n", (unsigned long) blockend, (unsigned long) port->capacity); + } + goto kill_rq; + } + + /* Push this to the port */ + if (ahci_do_port_request(port, block, rq)) + goto kill_rq; + return; + +kill_rq: + ahci_end_request(0); +} + +/* The given port got an interrupt, terminate the current request if any */ +static void ahci_port_interrupt(struct port *port, u32 status) +{ + unsigned slot = 0; + + if (readl(&port->ahci_port->ci) & (1 << slot)) { + /* Command still pending */ + return; + } + + if (port->identify) { + port->status = status; + wake_up(&port->q); + return; + } + + if (!CURRENT || CURRENT->rq_status != RQ_SCSI_BUSY) { + /* No request currently running */ + return; + } + + if (status & (PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_IF_NONFATAL)) { + printk("ahci error %x %x\n", status, readl(&port->ahci_port->tfd)); + ahci_end_request(0); + return; + } + + ahci_end_request(1); +} + +/* Start of IRQ handler. Iterate over all ports for this host */ +static void ahci_interrupt (int irq, void *host, struct pt_regs *regs) +{ + struct port *port; + struct ahci_host *ahci_host = host; + u32 irq_mask; + u32 status; + + irq_mask = readl(&ahci_host->is); + + if (!irq_mask) + return; + + for (port = &ports[0]; port < &ports[MAX_PORTS]; port++) { + if (port->ahci_host == ahci_host && (irq_mask & (1 << (port->ahci_port - ahci_host->ports)))) { + status = readl(&port->ahci_port->is); + /* Clear interrupt before possibly triggering others */ + writel(status, &port->ahci_port->is); + ahci_port_interrupt (port, status); + } + } + + if (CURRENT) + /* Still some requests, queue another one */ + ahci_do_request(); + + /* Clear host after clearing ports */ + writel(irq_mask, &ahci_host->is); + + /* unlock */ +} + +static int ahci_ioctl (struct inode *inode, struct file *file, + unsigned int cmd, unsigned long arg) +{ + int major, unit; + + if (!inode || !inode->i_rdev) + return -EINVAL; + + major = MAJOR(inode->i_rdev); + if (major != MAJOR_NR) + return -ENOTTY; + + unit = DEVICE_NR(inode->i_rdev); + if (unit >= MAX_PORTS) + return -EINVAL; + + switch (cmd) { + case BLKRRPART: + if (!suser()) return -EACCES; + if (!ports[unit].gd) + return -EINVAL; + resetup_one_dev(ports[unit].gd, unit); + return 0; + default: + return -EPERM; + } +} + +static int ahci_open (struct inode *inode, struct file *file) +{ + int target; + + if (MAJOR(inode->i_rdev) != MAJOR_NR) + return -ENXIO; + + target = MINOR(inode->i_rdev) >> PARTN_BITS; + if (target >= MAX_PORTS) + return -ENXIO; + + if (!ports[target].ahci_port) + return -ENXIO; + + return 0; +} + +static void ahci_release (struct inode *inode, struct file *file) +{ +} + +static int ahci_fsync (struct inode *inode, struct file *file) +{ + printk("fsync\n"); + return -ENOSYS; +} + +static struct file_operations ahci_fops = { + .lseek = NULL, + .read = block_read, + .write = block_write, + .readdir = NULL, + .select = NULL, + .ioctl = ahci_ioctl, + .mmap = NULL, + .open = ahci_open, + .release = ahci_release, + .fsync = ahci_fsync, + .fasync = NULL, + .check_media_change = NULL, + .revalidate = NULL, +}; + +/* Disk timed out while processing identify, interrupt ahci_probe_port */ +static void identify_timeout(unsigned long data) +{ + struct port *port = (void*) data; + + wake_up(&port->q); +} + +static struct timer_list identify_timer = { .function = identify_timeout }; + +static int ahci_identify(const volatile struct ahci_host *ahci_host, const volatile struct ahci_port *ahci_port, struct port *port, unsigned cmd) +{ + struct hd_driveid id; + struct ahci_fis_h2d *fis_h2d; + struct ahci_command *command = port->command; + struct ahci_cmd_tbl *prdtl = port->prdtl; + unsigned long flags; + unsigned slot; + unsigned long first_part; + unsigned long long timeout; + int ret = 0; + + /* Identify device */ + /* TODO: make this a request */ + slot = 0; + + fis_h2d = (void*) &prdtl[slot].cfis; + fis_h2d->fis_type = FIS_TYPE_REG_H2D; + fis_h2d->flags = 128; + fis_h2d->command = cmd; + fis_h2d->device = 0; + + /* Fetch the 512 identify data */ + memset(&id, 0, sizeof(id)); + + command[slot].opts = sizeof(*fis_h2d) / sizeof(u32); + + first_part = PAGE_ALIGN((unsigned long) &id) - (unsigned long) &id; + + if (first_part && first_part < sizeof(id)) { + /* split over two pages */ + + command[slot].opts |= (2 << 16); + + prdtl[slot].prdtl[0].dbau = 0; + prdtl[slot].prdtl[0].dba = vmtophys((void*) &id); + prdtl[slot].prdtl[0].dbc = first_part - 1; + prdtl[slot].prdtl[1].dbau = 0; + prdtl[slot].prdtl[1].dba = vmtophys((void*) &id + first_part); + prdtl[slot].prdtl[1].dbc = sizeof(id) - first_part - 1; + } + else + { + command[slot].opts |= (1 << 16); + + prdtl[slot].prdtl[0].dbau = 0; + prdtl[slot].prdtl[0].dba = vmtophys((void*) &id); + prdtl[slot].prdtl[0].dbc = sizeof(id) - 1; + } + + timeout = jiffies + WAIT_MAX; + while (readl(&ahci_port->tfd) & (BUSY_STAT | DRQ_STAT)) + if (jiffies > timeout) { + printk("sd%u: timeout waiting for ready\n", port-ports); + port->ahci_host = NULL; + port->ahci_port = NULL; + return 3; + } + + save_flags(flags); + cli(); + + port->identify = 1; + port->status = 0; + + /* Issue command */ + mb(); + writel(1 << slot, &ahci_port->ci); + + timeout = jiffies + WAIT_MAX; + identify_timer.expires = timeout; + identify_timer.data = (unsigned long) port; + add_timer(&identify_timer); + while (!port->status) { + if (jiffies >= timeout) { + printk("sd%u: timeout waiting for identify\n", port-ports); + port->ahci_host = NULL; + port->ahci_port = NULL; + del_timer(&identify_timer); + restore_flags(flags); + return 3; + } + sleep_on(&port->q); + } + del_timer(&identify_timer); + restore_flags(flags); + + if ((port->status & PORT_IRQ_TF_ERR) || readl(&ahci_port->is) & PORT_IRQ_TF_ERR) + { + /* Identify error */ + port->capacity = 0; + port->lba48 = 0; + ret = 2; + } else { + memcpy(&port->id, &id, sizeof(id)); + port->is_cd = 0; + + ide_fixstring(id.model, sizeof(id.model), 1); + ide_fixstring(id.fw_rev, sizeof(id.fw_rev), 1); + ide_fixstring(id.serial_no, sizeof(id.serial_no), 1); + if (cmd == WIN_PIDENTIFY) + { + unsigned char type = (id.config >> 8) & 0x1f; + + printk("sd%u: %s, ATAPI ", port - ports, id.model); + if (type == 5) + { + printk("unsupported CDROM drive\n"); + port->is_cd = 1; + port->lba48 = 0; + port->capacity = 0; + } + else + { + printk("unsupported type %d\n", type); + port->lba48 = 0; + port->capacity = 0; + return 2; + } + return 0; + } + + if (id.command_set_2 & (1U<<10)) + { + port->lba48 = 1; + port->capacity = id.lba_capacity_2; + if (port->capacity >= (1ULL << 32)) + { + port->capacity = (1ULL << 32) - 1; + printk("Warning: truncating disk size to 2TiB\n"); + } + } + else + { + port->lba48 = 0; + port->capacity = id.lba_capacity; + if (port->capacity > (1ULL << 24)) + { + port->capacity = (1ULL << 24); + printk("Warning: truncating disk size to 128GiB\n"); + } + } + if (port->capacity/2048 >= 10240) + printk("sd%u: %s, %uGB w/%dkB Cache\n", (unsigned) (port - ports), id.model, (unsigned) (port->capacity/(2048*1024)), id.buf_size/2); + else + printk("sd%u: %s, %uMB w/%dkB Cache\n", (unsigned) (port - ports), id.model, (unsigned) (port->capacity/2048), id.buf_size/2); + } + port->identify = 0; + + return ret; +} + +/* Probe one AHCI port */ +static void ahci_probe_port(const volatile struct ahci_host *ahci_host, const volatile struct ahci_port *ahci_port) +{ + struct port *port; + void *mem; + unsigned cls = ((readl(&ahci_host->cap) >> 8) & 0x1f) + 1; + struct ahci_command *command; + struct ahci_fis *fis; + struct ahci_cmd_tbl *prdtl; + vm_size_t size = + cls * sizeof(*command) + + sizeof(*fis) + + cls * sizeof(*prdtl); + unsigned i; + unsigned long long timeout; + + for (i = 0; i < MAX_PORTS; i++) { + if (!ports[i].ahci_port) + break; + } + if (i == MAX_PORTS) + return; + port = &ports[i]; + + /* Has to be 1K-aligned */ + mem = vmalloc (size); + if (!mem) + return; + assert (!(((unsigned long) mem) & (1024-1))); + memset (mem, 0, size); + + port->ahci_host = ahci_host; + port->ahci_port = ahci_port; + port->cls = cls; + + port->command = command = mem; + port->fis = fis = (void*) command + cls * sizeof(*command); + port->prdtl = prdtl = (void*) fis + sizeof(*fis); + + /* Stop commands */ + writel(readl(&ahci_port->cmd) & ~PORT_CMD_START, &ahci_port->cmd); + timeout = jiffies + WAIT_MAX; + while (readl(&ahci_port->cmd) & PORT_CMD_LIST_ON) + if (jiffies > timeout) { + printk("sd%u: timeout waiting for list completion\n", (unsigned) (port-ports)); + port->ahci_host = NULL; + port->ahci_port = NULL; + return; + } + + writel(readl(&ahci_port->cmd) & ~PORT_CMD_FIS_RX, &ahci_port->cmd); + timeout = jiffies + WAIT_MAX; + while (readl(&ahci_port->cmd) & PORT_CMD_FIS_ON) + if (jiffies > timeout) { + printk("sd%u: timeout waiting for FIS completion\n", (unsigned) (port-ports)); + port->ahci_host = NULL; + port->ahci_port = NULL; + return; + } + + /* We don't support 64bit */ + /* Point controller to our buffers */ + writel(0, &ahci_port->clbu); + writel(vmtophys((void*) command), &ahci_port->clb); + writel(0, &ahci_port->fbu); + writel(vmtophys((void*) fis), &ahci_port->fb); + + /* Clear any previous interrupts */ + writel(readl(&ahci_port->is), &ahci_port->is); + writel(1 << (ahci_port - ahci_host->ports), &ahci_host->is); + + /* And activate them */ + writel(DEF_PORT_IRQ, &ahci_port->ie); + writel(readl(&ahci_host->ghc) | HOST_IRQ_EN, &ahci_host->ghc); + + for (i = 0; i < cls; i++) + { + command[i].ctbau = 0; + command[i].ctba = vmtophys((void*) &prdtl[i]); + } + + /* Start commands */ + timeout = jiffies + WAIT_MAX; + while (readl(&ahci_port->cmd) & PORT_CMD_LIST_ON) + if (jiffies > timeout) { + printk("sd%u: timeout waiting for list completion\n", (unsigned) (port-ports)); + port->ahci_host = NULL; + port->ahci_port = NULL; + return; + } + + writel(readl(&ahci_port->cmd) | PORT_CMD_FIS_RX | PORT_CMD_START, &ahci_port->cmd); + + /* if PxCMD.ATAPI is set, try ATAPI identify; otherwise try AHCI, then ATAPI */ + if (readl(&ahci_port->cmd) & PORT_CMD_ATAPI || + ahci_identify(ahci_host, ahci_port, port, WIN_IDENTIFY) >= 2) + ahci_identify(ahci_host, ahci_port, port, WIN_PIDENTIFY); +} + +/* Probe one AHCI PCI device */ +static void ahci_probe_dev(unsigned char bus, unsigned char device) +{ + unsigned char hdrtype; + unsigned char dev, fun; + const volatile struct ahci_host *ahci_host; + const volatile struct ahci_port *ahci_port; + unsigned nports, n, i; + unsigned port_map; + unsigned bar; + unsigned char irq; + + dev = PCI_SLOT(device); + fun = PCI_FUNC(device); + + /* Get configuration */ + if (pcibios_read_config_byte(bus, device, PCI_HEADER_TYPE, &hdrtype) != PCIBIOS_SUCCESSFUL) { + printk("ahci: %02x:%02x.%x: Can not read configuration", bus, dev, fun); + return; + } + /* Ignore multifunction bit */ + hdrtype &= ~0x80; + + if (hdrtype != 0) { + printk("ahci: %02x:%02x.%x: Unknown hdrtype %d\n", bus, dev, fun, hdrtype); + return; + } + + if (pcibios_read_config_dword(bus, device, PCI_BASE_ADDRESS_5, &bar) != PCIBIOS_SUCCESSFUL) { + printk("ahci: %02x:%02x.%x: Can not read BAR 5", bus, dev, fun); + return; + } + if (bar & PCI_BASE_ADDRESS_SPACE_IO) { + printk("ahci: %02x:%02x.%x: BAR 5 is I/O?!", bus, dev, fun); + return; + } + bar &= PCI_BASE_ADDRESS_MEM_MASK; + + if (pcibios_read_config_byte(bus, device, PCI_INTERRUPT_LINE, &irq) != PCIBIOS_SUCCESSFUL) { + printk("ahci: %02x:%02x.%x: Can not read IRQ", bus, dev, fun); + return; + } + + printk("AHCI SATA %02x:%02x.%x BAR 0x%x IRQ %u\n", bus, dev, fun, bar, irq); + + /* Map mmio */ + ahci_host = vremap(bar, 0x2000); + + /* Request IRQ */ + if (request_irq(irq, &ahci_interrupt, SA_SHIRQ, "ahci", (void*) ahci_host)) { + printk("ahci: %02x:%02x.%x: Can not get irq %u\n", bus, dev, fun, irq); + return; + } + +#ifdef CONFIG_BLK_DEV_IDE + /* OK, we will handle it. Disable probing on legacy IDE ports it may have. */ + for (i = 0; i < 6; i++) + { + unsigned mybar; + if (pcibios_read_config_dword(bus, device, PCI_BASE_ADDRESS_0 + i*4, &mybar) == PCIBIOS_SUCCESSFUL) { + if (!(bar & PCI_BASE_ADDRESS_SPACE_IO)) + /* Memory, don't care */ + continue; + /* printk("ahci: %02x:%02x.%x: BAR %d is %x\n", bus, dev, fun, i, mybar); */ + ide_disable_base(bar & PCI_BASE_ADDRESS_IO_MASK); + } + } +#endif + + nports = (readl(&ahci_host->cap) & 0x1f) + 1; + port_map = readl(&ahci_host->pi); + + for (n = 0, i = 0; i < AHCI_MAX_PORTS; i++) + if (port_map & (1U << i)) + n++; + + if (nports != n) { + printk("ahci: %02x:%02x.%x: Odd number of ports %u, assuming %u is correct\n", bus, dev, fun, n, nports); + port_map = 0; + } + if (!port_map) { + port_map = (1U << nports) - 1; + } + + for (i = 0; i < AHCI_MAX_PORTS; i++) { + u32 ssts; + u8 det, ipm; + + if (!(port_map & (1U << i))) + continue; + + ahci_port = &ahci_host->ports[i]; + + ssts = readl(&ahci_port->ssts); + det = ssts & 0xf; + switch (det) + { + case 0x0: + /* Device not present */ + continue; + case 0x1: + printk("ahci: %02x:%02x.%x: Port %u communication not established. TODO: power on device\n", bus, dev, fun, i); + continue; + case 0x3: + /* Present and communication established */ + break; + case 0x4: + printk("ahci: %02x:%02x.%x: Port %u phy offline?!\n", bus, dev, fun, i); + continue; + default: + printk("ahci: %02x:%02x.%x: Unknown port %u DET %x\n", bus, dev, fun, i, det); + continue; + } + + ipm = (ssts >> 8) & 0xf; + switch (ipm) + { + case 0x0: + /* Device not present */ + continue; + case 0x1: + /* Active */ + break; + case 0x2: + printk("ahci: %02x:%02x.%x: Port %u in Partial power management. TODO: power on device\n", bus, dev, fun, i); + continue; + case 0x6: + printk("ahci: %02x:%02x.%x: Port %u in Slumber power management. TODO: power on device\n", bus, dev, fun, i); + continue; + default: + printk("ahci: %02x:%02x.%x: Unknown port %u IPM %x\n", bus, dev, fun, i, ipm); + continue; + } + + /* OK! Probe this port */ + ahci_probe_port(ahci_host, ahci_port); + } +} + +/* genhd callback to set size of disks */ +static void ahci_geninit(struct gendisk *gd) +{ + unsigned unit; + struct port *port; + + for (unit = 0; unit < gd->nr_real; unit++) { + port = &ports[unit]; + port->part[0].nr_sects = port->capacity; + if (!port->part[0].nr_sects) + port->part[0].nr_sects = -1; + } +} + +/* Probe all AHCI PCI devices */ +void ahci_probe_pci(void) +{ + unsigned char bus, device; + unsigned short index; + int ret; + unsigned nports, unit, nminors; + struct port *port; + struct gendisk *gd, **gdp; + int *bs; + + for (index = 0; + (ret = pcibios_find_class(PCI_CLASS_STORAGE_SATA_AHCI, index, &bus, &device)) == PCIBIOS_SUCCESSFUL; + index++) + { + /* Note: this prevents from also having a SCSI controler. + * It shouldn't harm too much until we have proper hardware + * enumeration. + */ + if (register_blkdev(MAJOR_NR, "sd", &ahci_fops) < 0) + printk("could not register ahci\n"); + ahci_probe_dev(bus, device); + } + + for (nports = 0, port = &ports[0]; port < &ports[MAX_PORTS]; port++) + if (port->ahci_port) + nports++; + + nminors = nports * (1<<PARTN_BITS); + + gd = kmalloc(sizeof(*gd), GFP_KERNEL); + gd->sizes = kmalloc(nminors * sizeof(*gd->sizes), GFP_KERNEL); + gd->part = kmalloc(nminors * sizeof(*gd->part), GFP_KERNEL); + bs = kmalloc(nminors * sizeof(*bs), GFP_KERNEL); + + blksize_size[MAJOR_NR] = bs; + for (unit = 0; unit < nminors; unit++) + /* We prefer to transfer whole pages */ + *bs++ = PAGE_SIZE; + + memset(gd->part, 0, nminors * sizeof(*gd->part)); + + for (unit = 0; unit < nports; unit++) { + ports[unit].gd = gd; + ports[unit].part = &gd->part[unit << PARTN_BITS]; + } + + gd->major = MAJOR_NR; + gd->major_name = "sd"; + gd->minor_shift = PARTN_BITS; + gd->max_p = 1<<PARTN_BITS; + gd->max_nr = nports; + gd->nr_real = nports; + gd->init = ahci_geninit; + gd->next = NULL; + + for (gdp = &gendisk_head; *gdp; gdp = &((*gdp)->next)) + ; + *gdp = gd; + + blk_dev[MAJOR_NR].request_fn = ahci_do_request; +} diff --git a/linux/dev/drivers/block/floppy.c b/linux/dev/drivers/block/floppy.c new file mode 100644 index 0000000..83d66f0 --- /dev/null +++ b/linux/dev/drivers/block/floppy.c @@ -0,0 +1,4288 @@ +/* + * linux/kernel/floppy.c + * + * Copyright (C) 1991, 1992 Linus Torvalds + * Copyright (C) 1993, 1994 Alain Knaff + */ +/* + * 02.12.91 - Changed to static variables to indicate need for reset + * and recalibrate. This makes some things easier (output_byte reset + * checking etc), and means less interrupt jumping in case of errors, + * so the code is hopefully easier to understand. + */ + +/* + * This file is certainly a mess. I've tried my best to get it working, + * but I don't like programming floppies, and I have only one anyway. + * Urgel. I should check for more errors, and do more graceful error + * recovery. Seems there are problems with several drives. I've tried to + * correct them. No promises. + */ + +/* + * As with hd.c, all routines within this file can (and will) be called + * by interrupts, so extreme caution is needed. A hardware interrupt + * handler may not sleep, or a kernel panic will happen. Thus I cannot + * call "floppy-on" directly, but have to set a special timer interrupt + * etc. + */ + +/* + * 28.02.92 - made track-buffering routines, based on the routines written + * by entropy@wintermute.wpi.edu (Lawrence Foard). Linus. + */ + +/* + * Automatic floppy-detection and formatting written by Werner Almesberger + * (almesber@nessie.cs.id.ethz.ch), who also corrected some problems with + * the floppy-change signal detection. + */ + +/* + * 1992/7/22 -- Hennus Bergman: Added better error reporting, fixed + * FDC data overrun bug, added some preliminary stuff for vertical + * recording support. + * + * 1992/9/17: Added DMA allocation & DMA functions. -- hhb. + * + * TODO: Errors are still not counted properly. + */ + +/* 1992/9/20 + * Modifications for ``Sector Shifting'' by Rob Hooft (hooft@chem.ruu.nl) + * modeled after the freeware MS-DOS program fdformat/88 V1.8 by + * Christoph H. Hochst\"atter. + * I have fixed the shift values to the ones I always use. Maybe a new + * ioctl() should be created to be able to modify them. + * There is a bug in the driver that makes it impossible to format a + * floppy as the first thing after bootup. + */ + +/* + * 1993/4/29 -- Linus -- cleaned up the timer handling in the kernel, and + * this helped the floppy driver as well. Much cleaner, and still seems to + * work. + */ + +/* 1994/6/24 --bbroad-- added the floppy table entries and made + * minor modifications to allow 2.88 floppies to be run. + */ + +/* 1994/7/13 -- Paul Vojta -- modified the probing code to allow three or more + * disk types. + */ + +/* + * 1994/8/8 -- Alain Knaff -- Switched to fdpatch driver: Support for bigger + * format bug fixes, but unfortunately some new bugs too... + */ + +/* 1994/9/17 -- Koen Holtman -- added logging of physical floppy write + * errors to allow safe writing by specialized programs. + */ + +/* 1995/4/24 -- Dan Fandrich -- added support for Commodore 1581 3.5" disks + * by defining bit 1 of the "stretch" parameter to mean put sectors on the + * opposite side of the disk, leaving the sector IDs alone (i.e. Commodore's + * drives are "upside-down"). + */ + +/* + * 1995/8/26 -- Andreas Busse -- added Mips support. + */ + +/* + * 1995/10/18 -- Ralf Baechle -- Portability cleanup; move machine dependent + * features to asm/floppy.h. + */ + + +#define FLOPPY_SANITY_CHECK +#undef FLOPPY_SILENT_DCL_CLEAR + +#define REALLY_SLOW_IO + +#define DEBUGT 2 +#define DCL_DEBUG /* debug disk change line */ + +/* do print messages for unexpected interrupts */ +static int print_unex=1; +#include <linux/utsname.h> +#include <linux/module.h> + +/* the following is the mask of allowed drives. By default units 2 and + * 3 of both floppy controllers are disabled, because switching on the + * motor of these drives causes system hangs on some PCI computers. drive + * 0 is the low bit (0x1), and drive 7 is the high bit (0x80). Bits are on if + * a drive is allowed. */ +static int FLOPPY_IRQ=6; +static int FLOPPY_DMA=2; +static int allowed_drive_mask = 0x33; + +static int irqdma_allocated = 0; + +#include <linux/sched.h> +#include <linux/fs.h> +#include <linux/kernel.h> +#include <linux/timer.h> +#include <linux/tqueue.h> +#define FDPATCHES +#include <linux/fdreg.h> + + +#include <linux/fd.h> + + +#define OLDFDRAWCMD 0x020d /* send a raw command to the FDC */ + +struct old_floppy_raw_cmd { + void *data; + long length; + + unsigned char rate; + unsigned char flags; + unsigned char cmd_count; + unsigned char cmd[9]; + unsigned char reply_count; + unsigned char reply[7]; + int track; +}; + +#include <linux/errno.h> +#include <linux/malloc.h> +#include <linux/mm.h> +#include <linux/string.h> +#include <linux/fcntl.h> +#include <linux/delay.h> +#include <linux/mc146818rtc.h> /* CMOS defines */ +#include <linux/ioport.h> +#include <linux/interrupt.h> + +#include <asm/dma.h> +#include <asm/irq.h> +#include <asm/system.h> +#include <asm/io.h> +#include <asm/segment.h> + +static int use_virtual_dma=0; /* virtual DMA for Intel */ +static unsigned short virtual_dma_port=0x3f0; +void floppy_interrupt(int irq, void *dev_id, struct pt_regs * regs); +static int set_dor(int fdc, char mask, char data); +static inline int __get_order(unsigned long size); +#include <asm/floppy.h> + + +#define MAJOR_NR FLOPPY_MAJOR + +#include <linux/blk.h> +#include <linux/cdrom.h> /* for the compatibility eject ioctl */ + +#include <linux/dev/glue/glue.h> + + +#ifndef FLOPPY_MOTOR_MASK +#define FLOPPY_MOTOR_MASK 0xf0 +#endif + +#ifndef fd_get_dma_residue +#define fd_get_dma_residue() get_dma_residue(FLOPPY_DMA) +#endif + +/* Dma Memory related stuff */ + +/* Pure 2^n version of get_order */ +static inline int __get_order(unsigned long size) +{ + int order; + + size = (size-1) >> (PAGE_SHIFT-1); + order = -1; + do { + size >>= 1; + order++; + } while (size); + return order; +} + +#ifndef fd_dma_mem_free +#define fd_dma_mem_free(addr, size) free_pages(addr, __get_order(size)) +#endif + +#ifndef fd_dma_mem_alloc +#define fd_dma_mem_alloc(size) __get_dma_pages(GFP_KERNEL,__get_order(size)) +#endif + +/* End dma memory related stuff */ + +static unsigned int fake_change = 0; +static int initialising=1; + +static inline int TYPE(kdev_t x) { + return (MINOR(x)>>2) & 0x1f; +} +static inline int DRIVE(kdev_t x) { + return (MINOR(x)&0x03) | ((MINOR(x)&0x80) >> 5); +} +#define ITYPE(x) (((x)>>2) & 0x1f) +#define TOMINOR(x) ((x & 3) | ((x & 4) << 5)) +#define UNIT(x) ((x) & 0x03) /* drive on fdc */ +#define FDC(x) (((x) & 0x04) >> 2) /* fdc of drive */ +#define REVDRIVE(fdc, unit) ((unit) + ((fdc) << 2)) + /* reverse mapping from unit and fdc to drive */ +#define DP (&drive_params[current_drive]) +#define DRS (&drive_state[current_drive]) +#define DRWE (&write_errors[current_drive]) +#define FDCS (&fdc_state[fdc]) +#define CLEARF(x) (clear_bit(x##_BIT, &DRS->flags)) +#define SETF(x) (set_bit(x##_BIT, &DRS->flags)) +#define TESTF(x) (test_bit(x##_BIT, &DRS->flags)) + +#define UDP (&drive_params[drive]) +#define UDRS (&drive_state[drive]) +#define UDRWE (&write_errors[drive]) +#define UFDCS (&fdc_state[FDC(drive)]) +#define UCLEARF(x) (clear_bit(x##_BIT, &UDRS->flags)) +#define USETF(x) (set_bit(x##_BIT, &UDRS->flags)) +#define UTESTF(x) (test_bit(x##_BIT, &UDRS->flags)) + +#define DPRINT(format, args...) printk(DEVICE_NAME "%d: " format, current_drive , ## args) + +#define PH_HEAD(floppy,head) (((((floppy)->stretch & 2) >>1) ^ head) << 2) +#define STRETCH(floppy) ((floppy)->stretch & FD_STRETCH) + +#define CLEARSTRUCT(x) memset((x), 0, sizeof(*(x))) + +#define INT_OFF save_flags(flags); cli() +#define INT_ON restore_flags(flags) + +/* read/write */ +#define COMMAND raw_cmd->cmd[0] +#define DR_SELECT raw_cmd->cmd[1] +#define TRACK raw_cmd->cmd[2] +#define HEAD raw_cmd->cmd[3] +#define SECTOR raw_cmd->cmd[4] +#define SIZECODE raw_cmd->cmd[5] +#define SECT_PER_TRACK raw_cmd->cmd[6] +#define GAP raw_cmd->cmd[7] +#define SIZECODE2 raw_cmd->cmd[8] +#define NR_RW 9 + +/* format */ +#define F_SIZECODE raw_cmd->cmd[2] +#define F_SECT_PER_TRACK raw_cmd->cmd[3] +#define F_GAP raw_cmd->cmd[4] +#define F_FILL raw_cmd->cmd[5] +#define NR_F 6 + +/* + * Maximum disk size (in kilobytes). This default is used whenever the + * current disk size is unknown. + * [Now it is rather a minimum] + */ +#define MAX_DISK_SIZE 4 /* 3984*/ + +#define K_64 0x10000 /* 64KB */ + +/* + * globals used by 'result()' + */ +#define MAX_REPLIES 16 +static unsigned char reply_buffer[MAX_REPLIES]; +static int inr; /* size of reply buffer, when called from interrupt */ +#define ST0 (reply_buffer[0]) +#define ST1 (reply_buffer[1]) +#define ST2 (reply_buffer[2]) +#define ST3 (reply_buffer[0]) /* result of GETSTATUS */ +#define R_TRACK (reply_buffer[3]) +#define R_HEAD (reply_buffer[4]) +#define R_SECTOR (reply_buffer[5]) +#define R_SIZECODE (reply_buffer[6]) + +#define SEL_DLY (2*HZ/100) + +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +/* + * this struct defines the different floppy drive types. + */ +static struct { + struct floppy_drive_params params; + const char *name; /* name printed while booting */ +} default_drive_params[]= { +/* NOTE: the time values in jiffies should be in msec! + CMOS drive type + | Maximum data rate supported by drive type + | | Head load time, msec + | | | Head unload time, msec (not used) + | | | | Step rate interval, usec + | | | | | Time needed for spinup time (jiffies) + | | | | | | Timeout for spinning down (jiffies) + | | | | | | | Spindown offset (where disk stops) + | | | | | | | | Select delay + | | | | | | | | | RPS + | | | | | | | | | | Max number of tracks + | | | | | | | | | | | Interrupt timeout + | | | | | | | | | | | | Max nonintlv. sectors + | | | | | | | | | | | | | -Max Errors- flags */ +{{0, 500, 16, 16, 8000, 1*HZ, 3*HZ, 0, SEL_DLY, 5, 80, 3*HZ, 20, {3,1,2,0,2}, 0, + 0, { 7, 4, 8, 2, 1, 5, 3,10}, 3*HZ/2, 0 }, "unknown" }, + +{{1, 300, 16, 16, 8000, 1*HZ, 3*HZ, 0, SEL_DLY, 5, 40, 3*HZ, 17, {3,1,2,0,2}, 0, + 0, { 1, 0, 0, 0, 0, 0, 0, 0}, 3*HZ/2, 1 }, "360K PC" }, /*5 1/4 360 KB PC*/ + +{{2, 500, 16, 16, 6000, 4*HZ/10, 3*HZ, 14, SEL_DLY, 6, 83, 3*HZ, 17, {3,1,2,0,2}, 0, + 0, { 2, 5, 6,23,10,20,12, 0}, 3*HZ/2, 2 }, "1.2M" }, /*5 1/4 HD AT*/ + +{{3, 250, 16, 16, 3000, 1*HZ, 3*HZ, 0, SEL_DLY, 5, 83, 3*HZ, 20, {3,1,2,0,2}, 0, + 0, { 4,22,21,30, 3, 0, 0, 0}, 3*HZ/2, 4 }, "720k" }, /*3 1/2 DD*/ + +{{4, 500, 16, 16, 4000, 4*HZ/10, 3*HZ, 10, SEL_DLY, 5, 83, 3*HZ, 20, {3,1,2,0,2}, 0, + 0, { 7, 4,25,22,31,21,29,11}, 3*HZ/2, 7 }, "1.44M" }, /*3 1/2 HD*/ + +{{5, 1000, 15, 8, 3000, 4*HZ/10, 3*HZ, 10, SEL_DLY, 5, 83, 3*HZ, 40, {3,1,2,0,2}, 0, + 0, { 7, 8, 4,25,28,22,31,21}, 3*HZ/2, 8 }, "2.88M AMI BIOS" }, /*3 1/2 ED*/ + +{{6, 1000, 15, 8, 3000, 4*HZ/10, 3*HZ, 10, SEL_DLY, 5, 83, 3*HZ, 40, {3,1,2,0,2}, 0, + 0, { 7, 8, 4,25,28,22,31,21}, 3*HZ/2, 8 }, "2.88M" } /*3 1/2 ED*/ +/* | --autodetected formats--- | | | + * read_track | | Name printed when booting + * | Native format + * Frequency of disk change checks */ +}; + +static struct floppy_drive_params drive_params[N_DRIVE]; +static struct floppy_drive_struct drive_state[N_DRIVE]; +static struct floppy_write_errors write_errors[N_DRIVE]; +static struct floppy_raw_cmd *raw_cmd, default_raw_cmd; + +/* + * This struct defines the different floppy types. + * + * Bit 0 of 'stretch' tells if the tracks need to be doubled for some + * types (e.g. 360kB diskette in 1.2MB drive, etc.). Bit 1 of 'stretch' + * tells if the disk is in Commodore 1581 format, which means side 0 sectors + * are located on side 1 of the disk but with a side 0 ID, and vice-versa. + * This is the same as the Sharp MZ-80 5.25" CP/M disk format, except that the + * 1581's logical side 0 is on physical side 1, whereas the Sharp's logical + * side 0 is on physical side 0 (but with the misnamed sector IDs). + * 'stretch' should probably be renamed to something more general, like + * 'options'. Other parameters should be self-explanatory (see also + * setfdprm(8)). + */ +static struct floppy_struct floppy_type[32] = { + { 0, 0,0, 0,0,0x00,0x00,0x00,0x00,NULL }, /* 0 no testing */ + { 720, 9,2,40,0,0x2A,0x02,0xDF,0x50,"d360" }, /* 1 360KB PC */ + { 2400,15,2,80,0,0x1B,0x00,0xDF,0x54,"h1200" }, /* 2 1.2MB AT */ + { 720, 9,1,80,0,0x2A,0x02,0xDF,0x50,"D360" }, /* 3 360KB SS 3.5" */ + { 1440, 9,2,80,0,0x2A,0x02,0xDF,0x50,"D720" }, /* 4 720KB 3.5" */ + { 720, 9,2,40,1,0x23,0x01,0xDF,0x50,"h360" }, /* 5 360KB AT */ + { 1440, 9,2,80,0,0x23,0x01,0xDF,0x50,"h720" }, /* 6 720KB AT */ + { 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,"H1440" }, /* 7 1.44MB 3.5" */ + { 5760,36,2,80,0,0x1B,0x43,0xAF,0x54,"E2880" }, /* 8 2.88MB 3.5" */ + { 6240,39,2,80,0,0x1B,0x43,0xAF,0x28,"E3120"}, /* 9 3.12MB 3.5" */ + + { 2880,18,2,80,0,0x25,0x00,0xDF,0x02,"h1440" }, /* 10 1.44MB 5.25" */ + { 3360,21,2,80,0,0x1C,0x00,0xCF,0x0C,"H1680" }, /* 11 1.68MB 3.5" */ + { 820,10,2,41,1,0x25,0x01,0xDF,0x2E,"h410" }, /* 12 410KB 5.25" */ + { 1640,10,2,82,0,0x25,0x02,0xDF,0x2E,"H820" }, /* 13 820KB 3.5" */ + { 2952,18,2,82,0,0x25,0x00,0xDF,0x02,"h1476" }, /* 14 1.48MB 5.25" */ + { 3444,21,2,82,0,0x25,0x00,0xDF,0x0C,"H1722" }, /* 15 1.72MB 3.5" */ + { 840,10,2,42,1,0x25,0x01,0xDF,0x2E,"h420" }, /* 16 420KB 5.25" */ + { 1660,10,2,83,0,0x25,0x02,0xDF,0x2E,"H830" }, /* 17 830KB 3.5" */ + { 2988,18,2,83,0,0x25,0x00,0xDF,0x02,"h1494" }, /* 18 1.49MB 5.25" */ + { 3486,21,2,83,0,0x25,0x00,0xDF,0x0C,"H1743" }, /* 19 1.74 MB 3.5" */ + + { 1760,11,2,80,0,0x1C,0x09,0xCF,0x00,"h880" }, /* 20 880KB 5.25" */ + { 2080,13,2,80,0,0x1C,0x01,0xCF,0x00,"D1040" }, /* 21 1.04MB 3.5" */ + { 2240,14,2,80,0,0x1C,0x19,0xCF,0x00,"D1120" }, /* 22 1.12MB 3.5" */ + { 3200,20,2,80,0,0x1C,0x20,0xCF,0x2C,"h1600" }, /* 23 1.6MB 5.25" */ + { 3520,22,2,80,0,0x1C,0x08,0xCF,0x2e,"H1760" }, /* 24 1.76MB 3.5" */ + { 3840,24,2,80,0,0x1C,0x20,0xCF,0x00,"H1920" }, /* 25 1.92MB 3.5" */ + { 6400,40,2,80,0,0x25,0x5B,0xCF,0x00,"E3200" }, /* 26 3.20MB 3.5" */ + { 7040,44,2,80,0,0x25,0x5B,0xCF,0x00,"E3520" }, /* 27 3.52MB 3.5" */ + { 7680,48,2,80,0,0x25,0x63,0xCF,0x00,"E3840" }, /* 28 3.84MB 3.5" */ + + { 3680,23,2,80,0,0x1C,0x10,0xCF,0x00,"H1840" }, /* 29 1.84MB 3.5" */ + { 1600,10,2,80,0,0x25,0x02,0xDF,0x2E,"D800" }, /* 30 800KB 3.5" */ + { 3200,20,2,80,0,0x1C,0x00,0xCF,0x2C,"H1600" }, /* 31 1.6MB 3.5" */ +}; + +#define NUMBER(x) (sizeof(x) / sizeof(*(x))) +#define SECTSIZE (_FD_SECTSIZE(*floppy)) + +/* Auto-detection: Disk type used until the next media change occurs. */ +static struct floppy_struct *current_type[N_DRIVE] = { + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL +}; + +/* + * User-provided type information. current_type points to + * the respective entry of this array. + */ +static struct floppy_struct user_params[N_DRIVE]; + +static int floppy_sizes[256]; +static int floppy_blocksizes[256] = { 0, }; + +/* + * The driver is trying to determine the correct media format + * while probing is set. rw_interrupt() clears it after a + * successful access. + */ +static int probing = 0; + +/* Synchronization of FDC access. */ +#define FD_COMMAND_NONE -1 +#define FD_COMMAND_ERROR 2 +#define FD_COMMAND_OKAY 3 + +static volatile int command_status = FD_COMMAND_NONE, fdc_busy = 0; +static struct wait_queue *fdc_wait = NULL, *command_done = NULL; +#ifdef MACH +#define NO_SIGNAL (! issig () || ! interruptible) +#else +#define NO_SIGNAL (!(current->signal & ~current->blocked) || !interruptible) +#endif +#define CALL(x) if ((x) == -EINTR) return -EINTR +#define ECALL(x) if ((ret = (x))) return ret; +#define _WAIT(x,i) CALL(ret=wait_til_done((x),i)) +#define WAIT(x) _WAIT((x),interruptible) +#define IWAIT(x) _WAIT((x),1) + +/* Errors during formatting are counted here. */ +static int format_errors; + +/* Format request descriptor. */ +static struct format_descr format_req; + +/* + * Rate is 0 for 500kb/s, 1 for 300kbps, 2 for 250kbps + * Spec1 is 0xSH, where S is stepping rate (F=1ms, E=2ms, D=3ms etc), + * H is head unload time (1=16ms, 2=32ms, etc) + */ + +/* + * Track buffer + * Because these are written to by the DMA controller, they must + * not contain a 64k byte boundary crossing, or data will be + * corrupted/lost. + */ +static char *floppy_track_buffer=0; +static int max_buffer_sectors=0; + +static int *errors; +typedef void (*done_f)(int); +static struct cont_t { + void (*interrupt)(void); /* this is called after the interrupt of the + * main command */ + void (*redo)(void); /* this is called to retry the operation */ + void (*error)(void); /* this is called to tally an error */ + done_f done; /* this is called to say if the operation has + * succeeded/failed */ +} *cont=NULL; + +static void floppy_ready(void); +static void floppy_start(void); +static void process_fd_request(void); +static void recalibrate_floppy(void); +static void floppy_shutdown(void); + +static int floppy_grab_irq_and_dma(void); +static void floppy_release_irq_and_dma(void); + +/* + * The "reset" variable should be tested whenever an interrupt is scheduled, + * after the commands have been sent. This is to ensure that the driver doesn't + * get wedged when the interrupt doesn't come because of a failed command. + * reset doesn't need to be tested before sending commands, because + * output_byte is automatically disabled when reset is set. + */ +#define CHECK_RESET { if (FDCS->reset){ reset_fdc(); return; } } +static void reset_fdc(void); + +/* + * These are global variables, as that's the easiest way to give + * information to interrupts. They are the data used for the current + * request. + */ +#define NO_TRACK -1 +#define NEED_1_RECAL -2 +#define NEED_2_RECAL -3 + +/* */ +static int usage_count = 0; + + +/* buffer related variables */ +static int buffer_track = -1; +static int buffer_drive = -1; +static int buffer_min = -1; +static int buffer_max = -1; + +/* fdc related variables, should end up in a struct */ +static struct floppy_fdc_state fdc_state[N_FDC]; +static int fdc; /* current fdc */ + +static struct floppy_struct *_floppy = floppy_type; +static unsigned char current_drive = 0; +static long current_count_sectors = 0; +static unsigned char sector_t; /* sector in track */ + +#ifndef fd_eject +#define fd_eject(x) -EINVAL +#endif + + +#ifdef DEBUGT +static long unsigned debugtimer; +#endif + +/* + * Debugging + * ========= + */ +static inline void set_debugt(void) +{ +#ifdef DEBUGT + debugtimer = jiffies; +#endif +} + +static inline void debugt(const char *message) +{ +#ifdef DEBUGT + if (DP->flags & DEBUGT) + printk("%s dtime=%lu\n", message, jiffies-debugtimer); +#endif +} + +typedef void (*timeout_fn)(unsigned long); +static struct timer_list fd_timeout ={ NULL, NULL, 0, 0, + (timeout_fn) floppy_shutdown }; + +static const char *timeout_message; + +#ifdef FLOPPY_SANITY_CHECK +static void is_alive(const char *message) +{ + /* this routine checks whether the floppy driver is "alive" */ + if (fdc_busy && command_status < 2 && !fd_timeout.prev){ + DPRINT("timeout handler died: %s\n",message); + } +} +#endif + +#ifdef FLOPPY_SANITY_CHECK + +#define OLOGSIZE 20 + +static void (*lasthandler)(void) = NULL; +static int interruptjiffies=0; +static int resultjiffies=0; +static int resultsize=0; +static int lastredo=0; + +static struct output_log { + unsigned char data; + unsigned char status; + unsigned long jiffies; +} output_log[OLOGSIZE]; + +static int output_log_pos=0; +#endif + +#define CURRENTD -1 +#define MAXTIMEOUT -2 + +static void reschedule_timeout(int drive, const char *message, int marg) +{ + if (drive == CURRENTD) + drive = current_drive; + del_timer(&fd_timeout); + if (drive < 0 || drive > N_DRIVE) { + fd_timeout.expires = jiffies + 20*HZ; + drive=0; + } else + fd_timeout.expires = jiffies + UDP->timeout; + add_timer(&fd_timeout); + if (UDP->flags & FD_DEBUG){ + DPRINT("reschedule timeout "); + printk(message, marg); + printk("\n"); + } + timeout_message = message; +} + +static int maximum(int a, int b) +{ + if(a > b) + return a; + else + return b; +} +#define INFBOUND(a,b) (a)=maximum((a),(b)); + +static int minimum(int a, int b) +{ + if(a < b) + return a; + else + return b; +} +#define SUPBOUND(a,b) (a)=minimum((a),(b)); + + +/* + * Bottom half floppy driver. + * ========================== + * + * This part of the file contains the code talking directly to the hardware, + * and also the main service loop (seek-configure-spinup-command) + */ + +/* + * disk change. + * This routine is responsible for maintaining the FD_DISK_CHANGE flag, + * and the last_checked date. + * + * last_checked is the date of the last check which showed 'no disk change' + * FD_DISK_CHANGE is set under two conditions: + * 1. The floppy has been changed after some i/o to that floppy already + * took place. + * 2. No floppy disk is in the drive. This is done in order to ensure that + * requests are quickly flushed in case there is no disk in the drive. It + * follows that FD_DISK_CHANGE can only be cleared if there is a disk in + * the drive. + * + * For 1., maxblock is observed. Maxblock is 0 if no i/o has taken place yet. + * For 2., FD_DISK_NEWCHANGE is watched. FD_DISK_NEWCHANGE is cleared on + * each seek. If a disk is present, the disk change line should also be + * cleared on each seek. Thus, if FD_DISK_NEWCHANGE is clear, but the disk + * change line is set, this means either that no disk is in the drive, or + * that it has been removed since the last seek. + * + * This means that we really have a third possibility too: + * The floppy has been changed after the last seek. + */ + +static int disk_change(int drive) +{ + int fdc=FDC(drive); +#ifdef FLOPPY_SANITY_CHECK + if (jiffies - UDRS->select_date < UDP->select_delay) + DPRINT("WARNING disk change called early\n"); + if (!(FDCS->dor & (0x10 << UNIT(drive))) || + (FDCS->dor & 3) != UNIT(drive) || + fdc != FDC(drive)){ + DPRINT("probing disk change on unselected drive\n"); + DPRINT("drive=%d fdc=%d dor=%x\n",drive, FDC(drive), + FDCS->dor); + } +#endif + +#ifdef DCL_DEBUG + if (UDP->flags & FD_DEBUG){ + DPRINT("checking disk change line for drive %d\n",drive); + DPRINT("jiffies=%ld\n", jiffies); + DPRINT("disk change line=%x\n",fd_inb(FD_DIR)&0x80); + DPRINT("flags=%x\n",UDRS->flags); + } +#endif + if (UDP->flags & FD_BROKEN_DCL) + return UTESTF(FD_DISK_CHANGED); + if ((fd_inb(FD_DIR) ^ UDP->flags) & 0x80){ + USETF(FD_VERIFY); /* verify write protection */ + if (UDRS->maxblock){ + /* mark it changed */ + USETF(FD_DISK_CHANGED); + } + + /* invalidate its geometry */ + if (UDRS->keep_data >= 0) { + if ((UDP->flags & FTD_MSG) && + current_type[drive] != NULL) + DPRINT("Disk type is undefined after " + "disk change\n"); + current_type[drive] = NULL; + floppy_sizes[TOMINOR(drive)] = MAX_DISK_SIZE; + } + + /*USETF(FD_DISK_NEWCHANGE);*/ + return 1; + } else { + UDRS->last_checked=jiffies; + UCLEARF(FD_DISK_NEWCHANGE); + } + return 0; +} + +static inline int is_selected(int dor, int unit) +{ + return ((dor & (0x10 << unit)) && (dor &3) == unit); +} + +static int set_dor(int fdc, char mask, char data) +{ + register unsigned char drive, unit, newdor,olddor; + + if (FDCS->address == -1) + return -1; + + olddor = FDCS->dor; + newdor = (olddor & mask) | data; + if (newdor != olddor){ + unit = olddor & 0x3; + if (is_selected(olddor, unit) && !is_selected(newdor,unit)){ + drive = REVDRIVE(fdc,unit); +#ifdef DCL_DEBUG + if (UDP->flags & FD_DEBUG){ + DPRINT("calling disk change from set_dor\n"); + } +#endif + disk_change(drive); + } + FDCS->dor = newdor; + fd_outb(newdor, FD_DOR); + + unit = newdor & 0x3; + if (!is_selected(olddor, unit) && is_selected(newdor,unit)){ + drive = REVDRIVE(fdc,unit); + UDRS->select_date = jiffies; + } + } + + /* FIXME: we should be more graceful here */ + + if (newdor & FLOPPY_MOTOR_MASK) + floppy_grab_irq_and_dma(); + if (olddor & FLOPPY_MOTOR_MASK) + floppy_release_irq_and_dma(); + return olddor; +} + +static void twaddle(void) +{ + if (DP->select_delay) + return; + fd_outb(FDCS->dor & ~(0x10<<UNIT(current_drive)),FD_DOR); + fd_outb(FDCS->dor, FD_DOR); + DRS->select_date = jiffies; +} + +/* reset all driver information about the current fdc. This is needed after + * a reset, and after a raw command. */ +static void reset_fdc_info(int mode) +{ + int drive; + + FDCS->spec1 = FDCS->spec2 = -1; + FDCS->need_configure = 1; + FDCS->perp_mode = 1; + FDCS->rawcmd = 0; + for (drive = 0; drive < N_DRIVE; drive++) + if (FDC(drive) == fdc && + (mode || UDRS->track != NEED_1_RECAL)) + UDRS->track = NEED_2_RECAL; +} + +/* selects the fdc and drive, and enables the fdc's input/dma. */ +static void set_fdc(int drive) +{ + if (drive >= 0 && drive < N_DRIVE){ + fdc = FDC(drive); + current_drive = drive; + } + if (fdc != 1 && fdc != 0) { + printk("bad fdc value\n"); + return; + } + set_dor(fdc,~0,8); +#if N_FDC > 1 + set_dor(1-fdc, ~8, 0); +#endif + if (FDCS->rawcmd == 2) + reset_fdc_info(1); + if (fd_inb(FD_STATUS) != STATUS_READY) + FDCS->reset = 1; +} + +/* locks the driver */ +static int lock_fdc(int drive, int interruptible) +{ + unsigned long flags; + + if (!usage_count){ + printk(KERN_ERR "trying to lock fdc while usage count=0\n"); + return -1; + } + if(floppy_grab_irq_and_dma()==-1) + return -EBUSY; + INT_OFF; + while (fdc_busy && NO_SIGNAL) + interruptible_sleep_on(&fdc_wait); + if (fdc_busy){ + INT_ON; + return -EINTR; + } + fdc_busy = 1; + INT_ON; + command_status = FD_COMMAND_NONE; + reschedule_timeout(drive, "lock fdc", 0); + set_fdc(drive); + return 0; +} + +#define LOCK_FDC(drive,interruptible) \ +if (lock_fdc(drive,interruptible)) return -EINTR; + + +/* unlocks the driver */ +static inline void unlock_fdc(void) +{ + raw_cmd = 0; + if (!fdc_busy) + DPRINT("FDC access conflict!\n"); + + if (DEVICE_INTR) + DPRINT("device interrupt still active at FDC release: %p!\n", + DEVICE_INTR); + command_status = FD_COMMAND_NONE; + del_timer(&fd_timeout); + cont = NULL; + fdc_busy = 0; + floppy_release_irq_and_dma(); + wake_up(&fdc_wait); +} + +/* switches the motor off after a given timeout */ +static void motor_off_callback(unsigned long nr) +{ + unsigned char mask = ~(0x10 << UNIT(nr)); + + set_dor(FDC(nr), mask, 0); +} + +static struct timer_list motor_off_timer[N_DRIVE] = { + { NULL, NULL, 0, 0, motor_off_callback }, + { NULL, NULL, 0, 1, motor_off_callback }, + { NULL, NULL, 0, 2, motor_off_callback }, + { NULL, NULL, 0, 3, motor_off_callback }, + { NULL, NULL, 0, 4, motor_off_callback }, + { NULL, NULL, 0, 5, motor_off_callback }, + { NULL, NULL, 0, 6, motor_off_callback }, + { NULL, NULL, 0, 7, motor_off_callback } +}; + +/* schedules motor off */ +static void floppy_off(unsigned int drive) +{ + unsigned long volatile delta; + register int fdc=FDC(drive); + + if (!(FDCS->dor & (0x10 << UNIT(drive)))) + return; + + del_timer(motor_off_timer+drive); + + /* make spindle stop in a position which minimizes spinup time + * next time */ + if (UDP->rps){ + delta = jiffies - UDRS->first_read_date + HZ - + UDP->spindown_offset; + delta = ((delta * UDP->rps) % HZ) / UDP->rps; + motor_off_timer[drive].expires = jiffies + UDP->spindown - delta; + } + add_timer(motor_off_timer+drive); +} + +/* + * cycle through all N_DRIVE floppy drives, for disk change testing. + * stopping at current drive. This is done before any long operation, to + * be sure to have up to date disk change information. + */ +static void scandrives(void) +{ + int i, drive, saved_drive; + + if (DP->select_delay) + return; + + saved_drive = current_drive; + for (i=0; i < N_DRIVE; i++){ + drive = (saved_drive + i + 1) % N_DRIVE; + if (UDRS->fd_ref == 0 || UDP->select_delay != 0) + continue; /* skip closed drives */ + set_fdc(drive); + if (!(set_dor(fdc, ~3, UNIT(drive) | (0x10 << UNIT(drive))) & + (0x10 << UNIT(drive)))) + /* switch the motor off again, if it was off to + * begin with */ + set_dor(fdc, ~(0x10 << UNIT(drive)), 0); + } + set_fdc(saved_drive); +} + +static void empty(void) +{ +} + +static struct tq_struct floppy_tq = +{ 0, 0, 0, 0 }; + +static struct timer_list fd_timer ={ NULL, NULL, 0, 0, 0 }; + +static void cancel_activity(void) +{ + CLEAR_INTR; + floppy_tq.routine = (void *)(void *) empty; + del_timer(&fd_timer); +} + +/* this function makes sure that the disk stays in the drive during the + * transfer */ +static void fd_watchdog(void) +{ +#ifdef DCL_DEBUG + if (DP->flags & FD_DEBUG){ + DPRINT("calling disk change from watchdog\n"); + } +#endif + + if (disk_change(current_drive)){ + DPRINT("disk removed during i/o\n"); + cancel_activity(); + cont->done(0); + reset_fdc(); + } else { + del_timer(&fd_timer); + fd_timer.function = (timeout_fn) fd_watchdog; + fd_timer.expires = jiffies + HZ / 10; + add_timer(&fd_timer); + } +} + +static void main_command_interrupt(void) +{ + del_timer(&fd_timer); + cont->interrupt(); +} + +/* waits for a delay (spinup or select) to pass */ +static int wait_for_completion(int delay, timeout_fn function) +{ + if (FDCS->reset){ + reset_fdc(); /* do the reset during sleep to win time + * if we don't need to sleep, it's a good + * occasion anyways */ + return 1; + } + + if ((signed) (jiffies - delay) < 0){ + del_timer(&fd_timer); + fd_timer.function = function; + fd_timer.expires = delay; + add_timer(&fd_timer); + return 1; + } + return 0; +} + +static int hlt_disabled=0; +static void floppy_disable_hlt(void) +{ + unsigned long flags; + + INT_OFF; + if (!hlt_disabled){ + hlt_disabled=1; +#ifdef HAVE_DISABLE_HLT + disable_hlt(); +#endif + } + INT_ON; +} + +static void floppy_enable_hlt(void) +{ + unsigned long flags; + + INT_OFF; + if (hlt_disabled){ + hlt_disabled=0; +#ifdef HAVE_DISABLE_HLT + enable_hlt(); +#endif + } + INT_ON; +} + + +static void setup_DMA(void) +{ + unsigned long flags; + +#ifdef FLOPPY_SANITY_CHECK + if (raw_cmd->length == 0){ + int i; + + printk("zero dma transfer size:"); + for (i=0; i < raw_cmd->cmd_count; i++) + printk("%x,", raw_cmd->cmd[i]); + printk("\n"); + cont->done(0); + FDCS->reset = 1; + return; + } + if ((long) raw_cmd->kernel_data % 512){ + printk("non aligned address: %p\n", raw_cmd->kernel_data); + cont->done(0); + FDCS->reset=1; + return; + } + if (CROSS_64KB(raw_cmd->kernel_data, raw_cmd->length)) { + printk("DMA crossing 64-K boundary %p-%p\n", + raw_cmd->kernel_data, + raw_cmd->kernel_data + raw_cmd->length); + cont->done(0); + FDCS->reset=1; + return; + } +#endif + INT_OFF; + fd_disable_dma(); + fd_clear_dma_ff(); + fd_cacheflush(raw_cmd->kernel_data, raw_cmd->length); + fd_set_dma_mode((raw_cmd->flags & FD_RAW_READ)? + DMA_MODE_READ : DMA_MODE_WRITE); + fd_set_dma_addr(virt_to_bus(raw_cmd->kernel_data)); + fd_set_dma_count(raw_cmd->length); + virtual_dma_port = FDCS->address; + fd_enable_dma(); + INT_ON; + floppy_disable_hlt(); +} + +void show_floppy(void); + +/* waits until the fdc becomes ready */ +static int wait_til_ready(void) +{ + int counter, status; + if(FDCS->reset) + return -1; + for (counter = 0; counter < 10000; counter++) { + status = fd_inb(FD_STATUS); + if (status & STATUS_READY) + return status; + } + if (!initialising) { + DPRINT("Getstatus times out (%x) on fdc %d\n", + status, fdc); + show_floppy(); + } + FDCS->reset = 1; + return -1; +} + +/* sends a command byte to the fdc */ +static int output_byte(char byte) +{ + int status; + + if ((status = wait_til_ready()) < 0) + return -1; + if ((status & (STATUS_READY|STATUS_DIR|STATUS_DMA)) == STATUS_READY){ + fd_outb(byte,FD_DATA); +#ifdef FLOPPY_SANITY_CHECK + output_log[output_log_pos].data = byte; + output_log[output_log_pos].status = status; + output_log[output_log_pos].jiffies = jiffies; + output_log_pos = (output_log_pos + 1) % OLOGSIZE; +#endif + return 0; + } + FDCS->reset = 1; + if (!initialising) { + DPRINT("Unable to send byte %x to FDC. Fdc=%x Status=%x\n", + byte, fdc, status); + show_floppy(); + } + return -1; +} +#define LAST_OUT(x) if (output_byte(x)<0){ reset_fdc();return;} + +/* gets the response from the fdc */ +static int result(void) +{ + int i, status; + + for(i=0; i < MAX_REPLIES; i++) { + if ((status = wait_til_ready()) < 0) + break; + status &= STATUS_DIR|STATUS_READY|STATUS_BUSY|STATUS_DMA; + if ((status & ~STATUS_BUSY) == STATUS_READY){ +#ifdef FLOPPY_SANITY_CHECK + resultjiffies = jiffies; + resultsize = i; +#endif + return i; + } + if (status == (STATUS_DIR|STATUS_READY|STATUS_BUSY)) + reply_buffer[i] = fd_inb(FD_DATA); + else + break; + } + if(!initialising) { + DPRINT("get result error. Fdc=%d Last status=%x Read bytes=%d\n", + fdc, status, i); + show_floppy(); + } + FDCS->reset = 1; + return -1; +} + +#define MORE_OUTPUT -2 +/* does the fdc need more output? */ +static int need_more_output(void) +{ + int status; + if( (status = wait_til_ready()) < 0) + return -1; + if ((status & (STATUS_READY|STATUS_DIR|STATUS_DMA)) == STATUS_READY) + return MORE_OUTPUT; + return result(); +} + +/* Set perpendicular mode as required, based on data rate, if supported. + * 82077 Now tested. 1Mbps data rate only possible with 82077-1. + */ +static inline void perpendicular_mode(void) +{ + unsigned char perp_mode; + + if (raw_cmd->rate & 0x40){ + switch(raw_cmd->rate & 3){ + case 0: + perp_mode=2; + break; + case 3: + perp_mode=3; + break; + default: + DPRINT("Invalid data rate for perpendicular mode!\n"); + cont->done(0); + FDCS->reset = 1; /* convenient way to return to + * redo without to much hassle (deep + * stack et al. */ + return; + } + } else + perp_mode = 0; + + if (FDCS->perp_mode == perp_mode) + return; + if (FDCS->version >= FDC_82077_ORIG) { + output_byte(FD_PERPENDICULAR); + output_byte(perp_mode); + FDCS->perp_mode = perp_mode; + } else if (perp_mode) { + DPRINT("perpendicular mode not supported by this FDC.\n"); + } +} /* perpendicular_mode */ + +static int fifo_depth = 0xa; +static int no_fifo = 0; + +static int fdc_configure(void) +{ + /* Turn on FIFO */ + output_byte(FD_CONFIGURE); + if(need_more_output() != MORE_OUTPUT) + return 0; + output_byte(0); + output_byte(0x10 | (no_fifo & 0x20) | (fifo_depth & 0xf)); + output_byte(0); /* pre-compensation from track + 0 upwards */ + return 1; +} + +#define NOMINAL_DTR 500 + +/* Issue a "SPECIFY" command to set the step rate time, head unload time, + * head load time, and DMA disable flag to values needed by floppy. + * + * The value "dtr" is the data transfer rate in Kbps. It is needed + * to account for the data rate-based scaling done by the 82072 and 82077 + * FDC types. This parameter is ignored for other types of FDCs (i.e. + * 8272a). + * + * Note that changing the data transfer rate has a (probably deleterious) + * effect on the parameters subject to scaling for 82072/82077 FDCs, so + * fdc_specify is called again after each data transfer rate + * change. + * + * srt: 1000 to 16000 in microseconds + * hut: 16 to 240 milliseconds + * hlt: 2 to 254 milliseconds + * + * These values are rounded up to the next highest available delay time. + */ +static void fdc_specify(void) +{ + unsigned char spec1, spec2; + int srt, hlt, hut; + unsigned long dtr = NOMINAL_DTR; + unsigned long scale_dtr = NOMINAL_DTR; + int hlt_max_code = 0x7f; + int hut_max_code = 0xf; + + if (FDCS->need_configure && FDCS->version >= FDC_82072A) { + fdc_configure(); + FDCS->need_configure = 0; + /*DPRINT("FIFO enabled\n");*/ + } + + switch (raw_cmd->rate & 0x03) { + case 3: + dtr = 1000; + break; + case 1: + dtr = 300; + if (FDCS->version >= FDC_82078) { + /* chose the default rate table, not the one + * where 1 = 2 Mbps */ + output_byte(FD_DRIVESPEC); + if(need_more_output() == MORE_OUTPUT) { + output_byte(UNIT(current_drive)); + output_byte(0xc0); + } + } + break; + case 2: + dtr = 250; + break; + } + + if (FDCS->version >= FDC_82072) { + scale_dtr = dtr; + hlt_max_code = 0x00; /* 0==256msec*dtr0/dtr (not linear!) */ + hut_max_code = 0x0; /* 0==256msec*dtr0/dtr (not linear!) */ + } + + /* Convert step rate from microseconds to milliseconds and 4 bits */ + srt = 16 - (DP->srt*scale_dtr/1000 + NOMINAL_DTR - 1)/NOMINAL_DTR; + SUPBOUND(srt, 0xf); + INFBOUND(srt, 0); + + hlt = (DP->hlt*scale_dtr/2 + NOMINAL_DTR - 1)/NOMINAL_DTR; + if (hlt < 0x01) + hlt = 0x01; + else if (hlt > 0x7f) + hlt = hlt_max_code; + + hut = (DP->hut*scale_dtr/16 + NOMINAL_DTR - 1)/NOMINAL_DTR; + if (hut < 0x1) + hut = 0x1; + else if (hut > 0xf) + hut = hut_max_code; + + spec1 = (srt << 4) | hut; + spec2 = (hlt << 1) | (use_virtual_dma & 1); + + /* If these parameters did not change, just return with success */ + if (FDCS->spec1 != spec1 || FDCS->spec2 != spec2) { + /* Go ahead and set spec1 and spec2 */ + output_byte(FD_SPECIFY); + output_byte(FDCS->spec1 = spec1); + output_byte(FDCS->spec2 = spec2); + } +} /* fdc_specify */ + +/* Set the FDC's data transfer rate on behalf of the specified drive. + * NOTE: with 82072/82077 FDCs, changing the data rate requires a reissue + * of the specify command (i.e. using the fdc_specify function). + */ +static int fdc_dtr(void) +{ + /* If data rate not already set to desired value, set it. */ + if ((raw_cmd->rate & 3) == FDCS->dtr) + return 0; + + /* Set dtr */ + fd_outb(raw_cmd->rate & 3, FD_DCR); + + /* TODO: some FDC/drive combinations (C&T 82C711 with TEAC 1.2MB) + * need a stabilization period of several milliseconds to be + * enforced after data rate changes before R/W operations. + * Pause 5 msec to avoid trouble. (Needs to be 2 jiffies) + */ + FDCS->dtr = raw_cmd->rate & 3; + return(wait_for_completion(jiffies+2*HZ/100, + (timeout_fn) floppy_ready)); +} /* fdc_dtr */ + +static void tell_sector(void) +{ + printk(": track %d, head %d, sector %d, size %d", + R_TRACK, R_HEAD, R_SECTOR, R_SIZECODE); +} /* tell_sector */ + + +/* + * OK, this error interpreting routine is called after a + * DMA read/write has succeeded + * or failed, so we check the results, and copy any buffers. + * hhb: Added better error reporting. + * ak: Made this into a separate routine. + */ +static int interpret_errors(void) +{ + char bad; + + if (inr!=7) { + DPRINT("-- FDC reply error"); + FDCS->reset = 1; + return 1; + } + + /* check IC to find cause of interrupt */ + switch (ST0 & ST0_INTR) { + case 0x40: /* error occurred during command execution */ + if (ST1 & ST1_EOC) + return 0; /* occurs with pseudo-DMA */ + bad = 1; + if (ST1 & ST1_WP) { + DPRINT("Drive is write protected\n"); + CLEARF(FD_DISK_WRITABLE); + cont->done(0); + bad = 2; + } else if (ST1 & ST1_ND) { + SETF(FD_NEED_TWADDLE); + } else if (ST1 & ST1_OR) { + if (DP->flags & FTD_MSG) + DPRINT("Over/Underrun - retrying\n"); + bad = 0; + }else if (*errors >= DP->max_errors.reporting){ + DPRINT(""); + if (ST0 & ST0_ECE) { + printk("Recalibrate failed!"); + } else if (ST2 & ST2_CRC) { + printk("data CRC error"); + tell_sector(); + } else if (ST1 & ST1_CRC) { + printk("CRC error"); + tell_sector(); + } else if ((ST1 & (ST1_MAM|ST1_ND)) || (ST2 & ST2_MAM)) { + if (!probing) { + printk("sector not found"); + tell_sector(); + } else + printk("probe failed..."); + } else if (ST2 & ST2_WC) { /* seek error */ + printk("wrong cylinder"); + } else if (ST2 & ST2_BC) { /* cylinder marked as bad */ + printk("bad cylinder"); + } else { + printk("unknown error. ST[0..2] are: 0x%x 0x%x 0x%x", ST0, ST1, ST2); + tell_sector(); + } + printk("\n"); + + } + if (ST2 & ST2_WC || ST2 & ST2_BC) + /* wrong cylinder => recal */ + DRS->track = NEED_2_RECAL; + return bad; + case 0x80: /* invalid command given */ + DPRINT("Invalid FDC command given!\n"); + cont->done(0); + return 2; + case 0xc0: + DPRINT("Abnormal termination caused by polling\n"); + cont->error(); + return 2; + default: /* (0) Normal command termination */ + return 0; + } +} + +/* + * This routine is called when everything should be correctly set up + * for the transfer (i.e. floppy motor is on, the correct floppy is + * selected, and the head is sitting on the right track). + */ +static void setup_rw_floppy(void) +{ + int i, ready_date, r, flags; + timeout_fn function; + + flags = raw_cmd->flags; + if (flags & (FD_RAW_READ | FD_RAW_WRITE)) + flags |= FD_RAW_INTR; + + if ((flags & FD_RAW_SPIN) && !(flags & FD_RAW_NO_MOTOR)){ + ready_date = DRS->spinup_date + DP->spinup; + /* If spinup will take a long time, rerun scandrives + * again just before spinup completion. Beware that + * after scandrives, we must again wait for selection. + */ + if ((signed) (ready_date - jiffies) > DP->select_delay){ + ready_date -= DP->select_delay; + function = (timeout_fn) floppy_start; + } else + function = (timeout_fn) setup_rw_floppy; + + /* wait until the floppy is spinning fast enough */ + if (wait_for_completion(ready_date,function)) + return; + } + + if ((flags & FD_RAW_READ) || (flags & FD_RAW_WRITE)) + setup_DMA(); + + if (flags & FD_RAW_INTR) + SET_INTR(main_command_interrupt); + + r=0; + for (i=0; i< raw_cmd->cmd_count; i++) + r|=output_byte(raw_cmd->cmd[i]); + +#ifdef DEBUGT + debugt("rw_command: "); +#endif + if (r){ + cont->error(); + reset_fdc(); + return; + } + + if (!(flags & FD_RAW_INTR)){ + inr = result(); + cont->interrupt(); + } else if (flags & FD_RAW_NEED_DISK) + fd_watchdog(); +} + +static int blind_seek; + +/* + * This is the routine called after every seek (or recalibrate) interrupt + * from the floppy controller. + */ +static void seek_interrupt(void) +{ +#ifdef DEBUGT + debugt("seek interrupt:"); +#endif + if (inr != 2 || (ST0 & 0xF8) != 0x20) { + DPRINT("seek failed\n"); + DRS->track = NEED_2_RECAL; + cont->error(); + cont->redo(); + return; + } + if (DRS->track >= 0 && DRS->track != ST1 && !blind_seek){ +#ifdef DCL_DEBUG + if (DP->flags & FD_DEBUG){ + DPRINT("clearing NEWCHANGE flag because of effective seek\n"); + DPRINT("jiffies=%ld\n", jiffies); + } +#endif + CLEARF(FD_DISK_NEWCHANGE); /* effective seek */ + DRS->select_date = jiffies; + } + DRS->track = ST1; + floppy_ready(); +} + +static void check_wp(void) +{ + if (TESTF(FD_VERIFY)) { + /* check write protection */ + output_byte(FD_GETSTATUS); + output_byte(UNIT(current_drive)); + if (result() != 1){ + FDCS->reset = 1; + return; + } + CLEARF(FD_VERIFY); + CLEARF(FD_NEED_TWADDLE); +#ifdef DCL_DEBUG + if (DP->flags & FD_DEBUG){ + DPRINT("checking whether disk is write protected\n"); + DPRINT("wp=%x\n",ST3 & 0x40); + } +#endif + if (!(ST3 & 0x40)) + SETF(FD_DISK_WRITABLE); + else + CLEARF(FD_DISK_WRITABLE); + } +} + +static void seek_floppy(void) +{ + int track; + + blind_seek=0; + +#ifdef DCL_DEBUG + if (DP->flags & FD_DEBUG){ + DPRINT("calling disk change from seek\n"); + } +#endif + + if (!TESTF(FD_DISK_NEWCHANGE) && + disk_change(current_drive) && + (raw_cmd->flags & FD_RAW_NEED_DISK)){ + /* the media changed flag should be cleared after the seek. + * If it isn't, this means that there is really no disk in + * the drive. + */ + SETF(FD_DISK_CHANGED); + cont->done(0); + cont->redo(); + return; + } + if (DRS->track <= NEED_1_RECAL){ + recalibrate_floppy(); + return; + } else if (TESTF(FD_DISK_NEWCHANGE) && + (raw_cmd->flags & FD_RAW_NEED_DISK) && + (DRS->track <= NO_TRACK || DRS->track == raw_cmd->track)) { + /* we seek to clear the media-changed condition. Does anybody + * know a more elegant way, which works on all drives? */ + if (raw_cmd->track) + track = raw_cmd->track - 1; + else { + if (DP->flags & FD_SILENT_DCL_CLEAR){ + set_dor(fdc, ~(0x10 << UNIT(current_drive)), 0); + blind_seek = 1; + raw_cmd->flags |= FD_RAW_NEED_SEEK; + } + track = 1; + } + } else { + check_wp(); + if (raw_cmd->track != DRS->track && + (raw_cmd->flags & FD_RAW_NEED_SEEK)) + track = raw_cmd->track; + else { + setup_rw_floppy(); + return; + } + } + + SET_INTR(seek_interrupt); + output_byte(FD_SEEK); + output_byte(UNIT(current_drive)); + LAST_OUT(track); +#ifdef DEBUGT + debugt("seek command:"); +#endif +} + +static void recal_interrupt(void) +{ +#ifdef DEBUGT + debugt("recal interrupt:"); +#endif + if (inr !=2) + FDCS->reset = 1; + else if (ST0 & ST0_ECE) { + switch(DRS->track){ + case NEED_1_RECAL: +#ifdef DEBUGT + debugt("recal interrupt need 1 recal:"); +#endif + /* after a second recalibrate, we still haven't + * reached track 0. Probably no drive. Raise an + * error, as failing immediately might upset + * computers possessed by the Devil :-) */ + cont->error(); + cont->redo(); + return; + case NEED_2_RECAL: +#ifdef DEBUGT + debugt("recal interrupt need 2 recal:"); +#endif + /* If we already did a recalibrate, + * and we are not at track 0, this + * means we have moved. (The only way + * not to move at recalibration is to + * be already at track 0.) Clear the + * new change flag */ +#ifdef DCL_DEBUG + if (DP->flags & FD_DEBUG){ + DPRINT("clearing NEWCHANGE flag because of second recalibrate\n"); + } +#endif + + CLEARF(FD_DISK_NEWCHANGE); + DRS->select_date = jiffies; + /* fall through */ + default: +#ifdef DEBUGT + debugt("recal interrupt default:"); +#endif + /* Recalibrate moves the head by at + * most 80 steps. If after one + * recalibrate we don't have reached + * track 0, this might mean that we + * started beyond track 80. Try + * again. */ + DRS->track = NEED_1_RECAL; + break; + } + } else + DRS->track = ST1; + floppy_ready(); +} + +static void print_result(char *message, int inr) +{ + int i; + + DPRINT("%s ", message); + if (inr >= 0) + for (i=0; i<inr; i++) + printk("repl[%d]=%x ", i, reply_buffer[i]); + printk("\n"); +} + +/* interrupt handler */ +void floppy_interrupt(int irq, void *dev_id, struct pt_regs * regs) +{ + void (*handler)(void) = DEVICE_INTR; + int do_print; + + lasthandler = handler; + interruptjiffies = jiffies; + + fd_disable_dma(); + floppy_enable_hlt(); + CLEAR_INTR; + if (fdc >= N_FDC || FDCS->address == -1){ + /* we don't even know which FDC is the culprit */ + printk("DOR0=%x\n", fdc_state[0].dor); + printk("floppy interrupt on bizarre fdc %d\n",fdc); + printk("handler=%p\n", handler); + is_alive("bizarre fdc"); + return; + } + + FDCS->reset = 0; + /* We have to clear the reset flag here, because apparently on boxes + * with level triggered interrupts (PS/2, Sparc, ...), it is needed to + * emit SENSEI's to clear the interrupt line. And FDCS->reset blocks the + * emission of the SENSEI's. + * It is OK to emit floppy commands because we are in an interrupt + * handler here, and thus we have to fear no interference of other + * activity. + */ + + do_print = !handler && print_unex && !initialising; + + inr = result(); + if(do_print) + print_result("unexpected interrupt", inr); + if (inr == 0){ + int max_sensei = 4; + do { + output_byte(FD_SENSEI); + inr = result(); + if(do_print) + print_result("sensei", inr); + max_sensei--; + } while ((ST0 & 0x83) != UNIT(current_drive) && inr == 2 && max_sensei); + } + if (handler) { + if(intr_count >= 2) { + /* expected interrupt */ + floppy_tq.routine = (void *)(void *) handler; + queue_task_irq(&floppy_tq, &tq_immediate); + mark_bh(IMMEDIATE_BH); + } else + handler(); + } else + FDCS->reset = 1; + is_alive("normal interrupt end"); +} + +static void recalibrate_floppy(void) +{ +#ifdef DEBUGT + debugt("recalibrate floppy:"); +#endif + SET_INTR(recal_interrupt); + output_byte(FD_RECALIBRATE); + LAST_OUT(UNIT(current_drive)); +} + +/* + * Must do 4 FD_SENSEIs after reset because of ``drive polling''. + */ +static void reset_interrupt(void) +{ +#ifdef DEBUGT + debugt("reset interrupt:"); +#endif + result(); /* get the status ready for set_fdc */ + if (FDCS->reset) { + printk("reset set in interrupt, calling %p\n", cont->error); + cont->error(); /* a reset just after a reset. BAD! */ + } + cont->redo(); +} + +/* + * reset is done by pulling bit 2 of DOR low for a while (old FDCs), + * or by setting the self clearing bit 7 of STATUS (newer FDCs) + */ +static void reset_fdc(void) +{ + SET_INTR(reset_interrupt); + FDCS->reset = 0; + reset_fdc_info(0); + + /* Pseudo-DMA may intercept 'reset finished' interrupt. */ + /* Irrelevant for systems with true DMA (i386). */ + fd_disable_dma(); + + if (FDCS->version >= FDC_82072A) + fd_outb(0x80 | (FDCS->dtr &3), FD_STATUS); + else { + fd_outb(FDCS->dor & ~0x04, FD_DOR); + udelay(FD_RESET_DELAY); + fd_outb(FDCS->dor, FD_DOR); + } +} + +void show_floppy(void) +{ + int i; + + printk("\n"); + printk("floppy driver state\n"); + printk("-------------------\n"); + printk("now=%ld last interrupt=%d last called handler=%p\n", + jiffies, interruptjiffies, lasthandler); + + +#ifdef FLOPPY_SANITY_CHECK + printk("timeout_message=%s\n", timeout_message); + printk("last output bytes:\n"); + for (i=0; i < OLOGSIZE; i++) + printk("%2x %2x %ld\n", + output_log[(i+output_log_pos) % OLOGSIZE].data, + output_log[(i+output_log_pos) % OLOGSIZE].status, + output_log[(i+output_log_pos) % OLOGSIZE].jiffies); + printk("last result at %d\n", resultjiffies); + printk("last redo_fd_request at %d\n", lastredo); + for (i=0; i<resultsize; i++){ + printk("%2x ", reply_buffer[i]); + } + printk("\n"); +#endif + + printk("status=%x\n", fd_inb(FD_STATUS)); + printk("fdc_busy=%d\n", fdc_busy); + if (DEVICE_INTR) + printk("DEVICE_INTR=%p\n", DEVICE_INTR); + if (floppy_tq.sync) + printk("floppy_tq.routine=%p\n", floppy_tq.routine); + if (fd_timer.prev) + printk("fd_timer.function=%p\n", fd_timer.function); + if (fd_timeout.prev){ + printk("timer_table=%p\n",fd_timeout.function); + printk("expires=%ld\n",fd_timeout.expires-jiffies); + printk("now=%ld\n",jiffies); + } + printk("cont=%p\n", cont); + printk("CURRENT=%p\n", CURRENT); + printk("command_status=%d\n", command_status); + printk("\n"); +} + +static void floppy_shutdown(void) +{ + if (!initialising) + show_floppy(); + cancel_activity(); + sti(); + + floppy_enable_hlt(); + fd_disable_dma(); + /* avoid dma going to a random drive after shutdown */ + + if (!initialising) + DPRINT("floppy timeout called\n"); + FDCS->reset = 1; + if (cont){ + cont->done(0); + cont->redo(); /* this will recall reset when needed */ + } else { + printk("no cont in shutdown!\n"); + process_fd_request(); + } + is_alive("floppy shutdown"); +} +/*typedef void (*timeout_fn)(unsigned long);*/ + +/* start motor, check media-changed condition and write protection */ +static int start_motor(void (*function)(void) ) +{ + int mask, data; + + mask = 0xfc; + data = UNIT(current_drive); + if (!(raw_cmd->flags & FD_RAW_NO_MOTOR)){ + if (!(FDCS->dor & (0x10 << UNIT(current_drive)))){ + set_debugt(); + /* no read since this drive is running */ + DRS->first_read_date = 0; + /* note motor start time if motor is not yet running */ + DRS->spinup_date = jiffies; + data |= (0x10 << UNIT(current_drive)); + } + } else + if (FDCS->dor & (0x10 << UNIT(current_drive))) + mask &= ~(0x10 << UNIT(current_drive)); + + /* starts motor and selects floppy */ + del_timer(motor_off_timer + current_drive); + set_dor(fdc, mask, data); + + /* wait_for_completion also schedules reset if needed. */ + return(wait_for_completion(DRS->select_date+DP->select_delay, + (timeout_fn) function)); +} + +static void floppy_ready(void) +{ + CHECK_RESET; + if (start_motor(floppy_ready)) return; + if (fdc_dtr()) return; + +#ifdef DCL_DEBUG + if (DP->flags & FD_DEBUG){ + DPRINT("calling disk change from floppy_ready\n"); + } +#endif + + if (!(raw_cmd->flags & FD_RAW_NO_MOTOR) && + disk_change(current_drive) && + !DP->select_delay) + twaddle(); /* this clears the dcl on certain drive/controller + * combinations */ + + if (raw_cmd->flags & (FD_RAW_NEED_SEEK | FD_RAW_NEED_DISK)){ + perpendicular_mode(); + fdc_specify(); /* must be done here because of hut, hlt ... */ + seek_floppy(); + } else + setup_rw_floppy(); +} + +static void floppy_start(void) +{ + reschedule_timeout(CURRENTD, "floppy start", 0); + + scandrives(); +#ifdef DCL_DEBUG + if (DP->flags & FD_DEBUG){ + DPRINT("setting NEWCHANGE in floppy_start\n"); + } +#endif + SETF(FD_DISK_NEWCHANGE); + floppy_ready(); +} + +/* + * ======================================================================== + * here ends the bottom half. Exported routines are: + * floppy_start, floppy_off, floppy_ready, lock_fdc, unlock_fdc, set_fdc, + * start_motor, reset_fdc, reset_fdc_info, interpret_errors. + * Initialization also uses output_byte, result, set_dor, floppy_interrupt + * and set_dor. + * ======================================================================== + */ +/* + * General purpose continuations. + * ============================== + */ + +static void do_wakeup(void) +{ + reschedule_timeout(MAXTIMEOUT, "do wakeup", 0); + cont = 0; + command_status += 2; + wake_up(&command_done); +} + +static struct cont_t wakeup_cont={ + empty, + do_wakeup, + empty, + (done_f)empty +}; + + +static struct cont_t intr_cont={ + empty, + process_fd_request, + empty, + (done_f) empty +}; + +static int wait_til_done(void (*handler)(void), int interruptible) +{ + int ret; + unsigned long flags; + + floppy_tq.routine = (void *)(void *) handler; + queue_task(&floppy_tq, &tq_immediate); + mark_bh(IMMEDIATE_BH); + INT_OFF; + while(command_status < 2 && NO_SIGNAL){ + is_alive("wait_til_done"); + if (interruptible) + interruptible_sleep_on(&command_done); + else + sleep_on(&command_done); + } + if (command_status < 2){ + cancel_activity(); + cont = &intr_cont; + reset_fdc(); + INT_ON; + return -EINTR; + } + INT_ON; + + if (FDCS->reset) + command_status = FD_COMMAND_ERROR; + if (command_status == FD_COMMAND_OKAY) + ret=0; + else + ret=-EIO; + command_status = FD_COMMAND_NONE; + return ret; +} + +static void generic_done(int result) +{ + command_status = result; + cont = &wakeup_cont; +} + +static void generic_success(void) +{ + cont->done(1); +} + +static void generic_failure(void) +{ + cont->done(0); +} + +static void success_and_wakeup(void) +{ + generic_success(); + cont->redo(); +} + + +/* + * formatting and rw support. + * ========================== + */ + +static int next_valid_format(void) +{ + int probed_format; + + probed_format = DRS->probed_format; + while(1){ + if (probed_format >= 8 || + !DP->autodetect[probed_format]){ + DRS->probed_format = 0; + return 1; + } + if (floppy_type[DP->autodetect[probed_format]].sect){ + DRS->probed_format = probed_format; + return 0; + } + probed_format++; + } +} + +static void bad_flp_intr(void) +{ + if (probing){ + DRS->probed_format++; + if (!next_valid_format()) + return; + } + (*errors)++; + INFBOUND(DRWE->badness, *errors); + if (*errors > DP->max_errors.abort) + cont->done(0); + if (*errors > DP->max_errors.reset) + FDCS->reset = 1; + else if (*errors > DP->max_errors.recal) + DRS->track = NEED_2_RECAL; +} + +static void set_floppy(kdev_t device) +{ + if (TYPE(device)) + _floppy = TYPE(device) + floppy_type; + else + _floppy = current_type[ DRIVE(device) ]; +} + +/* + * formatting support. + * =================== + */ +static void format_interrupt(void) +{ + switch (interpret_errors()){ + case 1: + cont->error(); + case 2: + break; + case 0: + cont->done(1); + } + cont->redo(); +} + +#define CODE2SIZE (ssize = ((1 << SIZECODE) + 3) >> 2) +#define FM_MODE(x,y) ((y) & ~(((x)->rate & 0x80) >>1)) +#define CT(x) ((x) | 0x40) +static void setup_format_params(int track) +{ + struct fparm { + unsigned char track,head,sect,size; + } *here = (struct fparm *)floppy_track_buffer; + int il,n; + int count,head_shift,track_shift; + + raw_cmd = &default_raw_cmd; + raw_cmd->track = track; + + raw_cmd->flags = FD_RAW_WRITE | FD_RAW_INTR | FD_RAW_SPIN | + FD_RAW_NEED_DISK | FD_RAW_NEED_SEEK; + raw_cmd->rate = _floppy->rate & 0x43; + raw_cmd->cmd_count = NR_F; + COMMAND = FM_MODE(_floppy,FD_FORMAT); + DR_SELECT = UNIT(current_drive) + PH_HEAD(_floppy,format_req.head); + F_SIZECODE = FD_SIZECODE(_floppy); + F_SECT_PER_TRACK = _floppy->sect << 2 >> F_SIZECODE; + F_GAP = _floppy->fmt_gap; + F_FILL = FD_FILL_BYTE; + + raw_cmd->kernel_data = floppy_track_buffer; + raw_cmd->length = 4 * F_SECT_PER_TRACK; + + /* allow for about 30ms for data transport per track */ + head_shift = (F_SECT_PER_TRACK + 5) / 6; + + /* a ``cylinder'' is two tracks plus a little stepping time */ + track_shift = 2 * head_shift + 3; + + /* position of logical sector 1 on this track */ + n = (track_shift * format_req.track + head_shift * format_req.head) + % F_SECT_PER_TRACK; + + /* determine interleave */ + il = 1; + if (_floppy->fmt_gap < 0x22) + il++; + + /* initialize field */ + for (count = 0; count < F_SECT_PER_TRACK; ++count) { + here[count].track = format_req.track; + here[count].head = format_req.head; + here[count].sect = 0; + here[count].size = F_SIZECODE; + } + /* place logical sectors */ + for (count = 1; count <= F_SECT_PER_TRACK; ++count) { + here[n].sect = count; + n = (n+il) % F_SECT_PER_TRACK; + if (here[n].sect) { /* sector busy, find next free sector */ + ++n; + if (n>= F_SECT_PER_TRACK) { + n-=F_SECT_PER_TRACK; + while (here[n].sect) ++n; + } + } + } +} + +static void redo_format(void) +{ + buffer_track = -1; + setup_format_params(format_req.track << STRETCH(_floppy)); + floppy_start(); +#ifdef DEBUGT + debugt("queue format request"); +#endif +} + +static struct cont_t format_cont={ + format_interrupt, + redo_format, + bad_flp_intr, + generic_done }; + +static int do_format(kdev_t device, struct format_descr *tmp_format_req) +{ + int ret; + int drive=DRIVE(device); + + LOCK_FDC(drive,1); + set_floppy(device); + if (!_floppy || + _floppy->track > DP->tracks || + tmp_format_req->track >= _floppy->track || + tmp_format_req->head >= _floppy->head || + (_floppy->sect << 2) % (1 << FD_SIZECODE(_floppy)) || + !_floppy->fmt_gap) { + process_fd_request(); + return -EINVAL; + } + format_req = *tmp_format_req; + format_errors = 0; + cont = &format_cont; + errors = &format_errors; + IWAIT(redo_format); + process_fd_request(); + return ret; +} + +/* + * Buffer read/write and support + * ============================= + */ + +/* new request_done. Can handle physical sectors which are smaller than a + * logical buffer */ +static void request_done(int uptodate) +{ + int block; + + probing = 0; + reschedule_timeout(MAXTIMEOUT, "request done %d", uptodate); + + if (!CURRENT){ + DPRINT("request list destroyed in floppy request done\n"); + return; + } + + if (uptodate){ + /* maintain values for invalidation on geometry + * change */ + block = current_count_sectors + CURRENT->sector; + INFBOUND(DRS->maxblock, block); + if (block > _floppy->sect) + DRS->maxtrack = 1; + + /* unlock chained buffers */ + while (current_count_sectors && CURRENT && + current_count_sectors >= CURRENT->current_nr_sectors){ + current_count_sectors -= CURRENT->current_nr_sectors; + CURRENT->nr_sectors -= CURRENT->current_nr_sectors; + CURRENT->sector += CURRENT->current_nr_sectors; + end_request(1); + } + if (current_count_sectors && CURRENT){ + /* "unlock" last subsector */ + CURRENT->buffer += current_count_sectors <<9; + CURRENT->current_nr_sectors -= current_count_sectors; + CURRENT->nr_sectors -= current_count_sectors; + CURRENT->sector += current_count_sectors; + return; + } + + if (current_count_sectors && !CURRENT) + DPRINT("request list destroyed in floppy request done\n"); + + } else { + if (CURRENT->cmd == WRITE) { + /* record write error information */ + DRWE->write_errors++; + if (DRWE->write_errors == 1) { + DRWE->first_error_sector = CURRENT->sector; + DRWE->first_error_generation = DRS->generation; + } + DRWE->last_error_sector = CURRENT->sector; + DRWE->last_error_generation = DRS->generation; + } + end_request(0); + } +} + +/* Interrupt handler evaluating the result of the r/w operation */ +static void rw_interrupt(void) +{ + int nr_sectors, ssize, eoc; + + if (!DRS->first_read_date) + DRS->first_read_date = jiffies; + + nr_sectors = 0; + CODE2SIZE; + + if(ST1 & ST1_EOC) + eoc = 1; + else + eoc = 0; + nr_sectors = ((R_TRACK-TRACK)*_floppy->head+R_HEAD-HEAD) * + _floppy->sect + ((R_SECTOR-SECTOR+eoc) << SIZECODE >> 2) - + (sector_t % _floppy->sect) % ssize; + +#ifdef FLOPPY_SANITY_CHECK + if (nr_sectors > current_count_sectors + ssize - + (current_count_sectors + sector_t) % ssize + + sector_t % ssize){ + DPRINT("long rw: %x instead of %lx\n", + nr_sectors, current_count_sectors); + printk("rs=%d s=%d\n", R_SECTOR, SECTOR); + printk("rh=%d h=%d\n", R_HEAD, HEAD); + printk("rt=%d t=%d\n", R_TRACK, TRACK); + printk("spt=%d st=%d ss=%d\n", SECT_PER_TRACK, + sector_t, ssize); + } +#endif + INFBOUND(nr_sectors,0); + SUPBOUND(current_count_sectors, nr_sectors); + + switch (interpret_errors()){ + case 2: + cont->redo(); + return; + case 1: + if (!current_count_sectors){ + cont->error(); + cont->redo(); + return; + } + break; + case 0: + if (!current_count_sectors){ + cont->redo(); + return; + } + current_type[current_drive] = _floppy; + floppy_sizes[TOMINOR(current_drive) ]= _floppy->size>>1; + break; + } + + if (probing) { + if (DP->flags & FTD_MSG) + DPRINT("Auto-detected floppy type %s in fd%d\n", + _floppy->name,current_drive); + current_type[current_drive] = _floppy; + floppy_sizes[TOMINOR(current_drive)] = _floppy->size >> 1; + probing = 0; + } + + if (CT(COMMAND) != FD_READ || + raw_cmd->kernel_data == CURRENT->buffer){ + /* transfer directly from buffer */ + cont->done(1); + } else if (CT(COMMAND) == FD_READ){ + buffer_track = raw_cmd->track; + buffer_drive = current_drive; + INFBOUND(buffer_max, nr_sectors + sector_t); + } + cont->redo(); +} + +/* Compute maximal contiguous buffer size. */ +static int buffer_chain_size(void) +{ + struct buffer_head *bh; + int size; + char *base; + + base = CURRENT->buffer; + size = CURRENT->current_nr_sectors << 9; + bh = CURRENT->bh; + + if (bh){ + bh = bh->b_reqnext; + while (bh && bh->b_data == base + size){ + size += bh->b_size; + bh = bh->b_reqnext; + } + } + return size >> 9; +} + +/* Compute the maximal transfer size */ +static int transfer_size(int ssize, int max_sector, int max_size) +{ + SUPBOUND(max_sector, sector_t + max_size); + + /* alignment */ + max_sector -= (max_sector % _floppy->sect) % ssize; + + /* transfer size, beginning not aligned */ + current_count_sectors = max_sector - sector_t ; + + return max_sector; +} + +/* + * Move data from/to the track buffer to/from the buffer cache. + */ +static void copy_buffer(int ssize, int max_sector, int max_sector_2) +{ + int remaining; /* number of transferred 512-byte sectors */ + struct buffer_head *bh; + char *buffer, *dma_buffer; + int size; + + max_sector = transfer_size(ssize, + minimum(max_sector, max_sector_2), + CURRENT->nr_sectors); + + if (current_count_sectors <= 0 && CT(COMMAND) == FD_WRITE && + buffer_max > sector_t + CURRENT->nr_sectors) + current_count_sectors = minimum(buffer_max - sector_t, + CURRENT->nr_sectors); + + remaining = current_count_sectors << 9; +#ifdef FLOPPY_SANITY_CHECK + if ((remaining >> 9) > CURRENT->nr_sectors && + CT(COMMAND) == FD_WRITE){ + DPRINT("in copy buffer\n"); + printk("current_count_sectors=%ld\n", current_count_sectors); + printk("remaining=%d\n", remaining >> 9); + printk("CURRENT->nr_sectors=%ld\n",CURRENT->nr_sectors); + printk("CURRENT->current_nr_sectors=%ld\n", + CURRENT->current_nr_sectors); + printk("max_sector=%d\n", max_sector); + printk("ssize=%d\n", ssize); + } +#endif + + buffer_max = maximum(max_sector, buffer_max); + + dma_buffer = floppy_track_buffer + ((sector_t - buffer_min) << 9); + + bh = CURRENT->bh; + size = CURRENT->current_nr_sectors << 9; + buffer = CURRENT->buffer; + + while (remaining > 0){ + SUPBOUND(size, remaining); +#ifdef FLOPPY_SANITY_CHECK + if (dma_buffer + size > + floppy_track_buffer + (max_buffer_sectors << 10) || + dma_buffer < floppy_track_buffer){ + DPRINT("buffer overrun in copy buffer %d\n", + (int) ((floppy_track_buffer - dma_buffer) >>9)); + printk("sector_t=%d buffer_min=%d\n", + sector_t, buffer_min); + printk("current_count_sectors=%ld\n", + current_count_sectors); + if (CT(COMMAND) == FD_READ) + printk("read\n"); + if (CT(COMMAND) == FD_READ) + printk("write\n"); + break; + } + if (((unsigned long)buffer) % 512) + DPRINT("%p buffer not aligned\n", buffer); +#endif + if (CT(COMMAND) == FD_READ) + memcpy(buffer, dma_buffer, size); + else + memcpy(dma_buffer, buffer, size); + remaining -= size; + if (!remaining) + break; + + dma_buffer += size; + bh = bh->b_reqnext; +#ifdef FLOPPY_SANITY_CHECK + if (!bh){ + DPRINT("bh=null in copy buffer after copy\n"); + break; + } +#endif + size = bh->b_size; + buffer = bh->b_data; + } +#ifdef FLOPPY_SANITY_CHECK + if (remaining){ + if (remaining > 0) + max_sector -= remaining >> 9; + DPRINT("weirdness: remaining %d\n", remaining>>9); + } +#endif +} + +/* + * Formulate a read/write request. + * this routine decides where to load the data (directly to buffer, or to + * tmp floppy area), how much data to load (the size of the buffer, the whole + * track, or a single sector) + * All floppy_track_buffer handling goes in here. If we ever add track buffer + * allocation on the fly, it should be done here. No other part should need + * modification. + */ + +static int make_raw_rw_request(void) +{ + int aligned_sector_t; + int max_sector, max_size, tracksize, ssize; + + set_fdc(DRIVE(CURRENT->rq_dev)); + + raw_cmd = &default_raw_cmd; + raw_cmd->flags = FD_RAW_SPIN | FD_RAW_NEED_DISK | FD_RAW_NEED_DISK | + FD_RAW_NEED_SEEK; + raw_cmd->cmd_count = NR_RW; + if (CURRENT->cmd == READ){ + raw_cmd->flags |= FD_RAW_READ; + COMMAND = FM_MODE(_floppy,FD_READ); + } else if (CURRENT->cmd == WRITE){ + raw_cmd->flags |= FD_RAW_WRITE; + COMMAND = FM_MODE(_floppy,FD_WRITE); + } else { + DPRINT("make_raw_rw_request: unknown command\n"); + return 0; + } + + max_sector = _floppy->sect * _floppy->head; + + TRACK = CURRENT->sector / max_sector; + sector_t = CURRENT->sector % max_sector; + if (_floppy->track && TRACK >= _floppy->track) + return 0; + HEAD = sector_t / _floppy->sect; + + if (((_floppy->stretch & FD_SWAPSIDES) || TESTF(FD_NEED_TWADDLE)) && + sector_t < _floppy->sect) + max_sector = _floppy->sect; + + /* 2M disks have phantom sectors on the first track */ + if ((_floppy->rate & FD_2M) && (!TRACK) && (!HEAD)){ + max_sector = 2 * _floppy->sect / 3; + if (sector_t >= max_sector){ + current_count_sectors = minimum(_floppy->sect - sector_t, + CURRENT->nr_sectors); + return 1; + } + SIZECODE = 2; + } else + SIZECODE = FD_SIZECODE(_floppy); + raw_cmd->rate = _floppy->rate & 0x43; + if ((_floppy->rate & FD_2M) && + (TRACK || HEAD) && + raw_cmd->rate == 2) + raw_cmd->rate = 1; + + if (SIZECODE) + SIZECODE2 = 0xff; + else + SIZECODE2 = 0x80; + raw_cmd->track = TRACK << STRETCH(_floppy); + DR_SELECT = UNIT(current_drive) + PH_HEAD(_floppy,HEAD); + GAP = _floppy->gap; + CODE2SIZE; + SECT_PER_TRACK = _floppy->sect << 2 >> SIZECODE; + SECTOR = ((sector_t % _floppy->sect) << 2 >> SIZECODE) + 1; + tracksize = _floppy->sect - _floppy->sect % ssize; + if (tracksize < _floppy->sect){ + SECT_PER_TRACK ++; + if (tracksize <= sector_t % _floppy->sect) + SECTOR--; + while (tracksize <= sector_t % _floppy->sect){ + while(tracksize + ssize > _floppy->sect){ + SIZECODE--; + ssize >>= 1; + } + SECTOR++; SECT_PER_TRACK ++; + tracksize += ssize; + } + max_sector = HEAD * _floppy->sect + tracksize; + } else if (!TRACK && !HEAD && !(_floppy->rate & FD_2M) && probing) + max_sector = _floppy->sect; + + aligned_sector_t = sector_t - (sector_t % _floppy->sect) % ssize; + max_size = CURRENT->nr_sectors; + if ((raw_cmd->track == buffer_track) && + (current_drive == buffer_drive) && + (sector_t >= buffer_min) && (sector_t < buffer_max)) { + /* data already in track buffer */ + if (CT(COMMAND) == FD_READ) { + copy_buffer(1, max_sector, buffer_max); + return 1; + } + } else if (aligned_sector_t != sector_t || CURRENT->nr_sectors < ssize){ + if (CT(COMMAND) == FD_WRITE){ + if (sector_t + CURRENT->nr_sectors > ssize && + sector_t + CURRENT->nr_sectors < ssize + ssize) + max_size = ssize + ssize; + else + max_size = ssize; + } + raw_cmd->flags &= ~FD_RAW_WRITE; + raw_cmd->flags |= FD_RAW_READ; + COMMAND = FM_MODE(_floppy,FD_READ); + } else if ((unsigned long)CURRENT->buffer < MAX_DMA_ADDRESS) { + unsigned long dma_limit; + int direct, indirect; + + indirect= transfer_size(ssize,max_sector,max_buffer_sectors*2) - + sector_t; + + /* + * Do NOT use minimum() here---MAX_DMA_ADDRESS is 64 bits wide + * on a 64 bit machine! + */ + max_size = buffer_chain_size(); + dma_limit = (MAX_DMA_ADDRESS - ((unsigned long) CURRENT->buffer)) >> 9; + if ((unsigned long) max_size > dma_limit) { + max_size = dma_limit; + } + /* 64 kb boundaries */ + if (CROSS_64KB(CURRENT->buffer, max_size << 9)) + max_size = (K_64 - ((long) CURRENT->buffer) % K_64)>>9; + direct = transfer_size(ssize,max_sector,max_size) - sector_t; + /* + * We try to read tracks, but if we get too many errors, we + * go back to reading just one sector at a time. + * + * This means we should be able to read a sector even if there + * are other bad sectors on this track. + */ + if (!direct || + (indirect * 2 > direct * 3 && + *errors < DP->max_errors.read_track && + /*!TESTF(FD_NEED_TWADDLE) &&*/ + ((!probing || (DP->read_track&(1<<DRS->probed_format)))))){ + max_size = CURRENT->nr_sectors; + } else { + raw_cmd->kernel_data = CURRENT->buffer; + raw_cmd->length = current_count_sectors << 9; + if (raw_cmd->length == 0){ + DPRINT("zero dma transfer attempted from make_raw_request\n"); + DPRINT("indirect=%d direct=%d sector_t=%d", + indirect, direct, sector_t); + return 0; + } + return 2; + } + } + + if (CT(COMMAND) == FD_READ) + max_size = max_sector; /* unbounded */ + + /* claim buffer track if needed */ + if (buffer_track != raw_cmd->track || /* bad track */ + buffer_drive !=current_drive || /* bad drive */ + sector_t > buffer_max || + sector_t < buffer_min || + ((CT(COMMAND) == FD_READ || + (aligned_sector_t == sector_t && CURRENT->nr_sectors >= ssize))&& + max_sector > 2 * max_buffer_sectors + buffer_min && + max_size + sector_t > 2 * max_buffer_sectors + buffer_min) + /* not enough space */){ + buffer_track = -1; + buffer_drive = current_drive; + buffer_max = buffer_min = aligned_sector_t; + } + raw_cmd->kernel_data = floppy_track_buffer + + ((aligned_sector_t-buffer_min)<<9); + + if (CT(COMMAND) == FD_WRITE){ + /* copy write buffer to track buffer. + * if we get here, we know that the write + * is either aligned or the data already in the buffer + * (buffer will be overwritten) */ +#ifdef FLOPPY_SANITY_CHECK + if (sector_t != aligned_sector_t && buffer_track == -1) + DPRINT("internal error offset !=0 on write\n"); +#endif + buffer_track = raw_cmd->track; + buffer_drive = current_drive; + copy_buffer(ssize, max_sector, 2*max_buffer_sectors+buffer_min); + } else + transfer_size(ssize, max_sector, + 2*max_buffer_sectors+buffer_min-aligned_sector_t); + + /* round up current_count_sectors to get dma xfer size */ + raw_cmd->length = sector_t+current_count_sectors-aligned_sector_t; + raw_cmd->length = ((raw_cmd->length -1)|(ssize-1))+1; + raw_cmd->length <<= 9; +#ifdef FLOPPY_SANITY_CHECK + if ((raw_cmd->length < current_count_sectors << 9) || + (raw_cmd->kernel_data != CURRENT->buffer && + CT(COMMAND) == FD_WRITE && + (aligned_sector_t + (raw_cmd->length >> 9) > buffer_max || + aligned_sector_t < buffer_min)) || + raw_cmd->length % (128 << SIZECODE) || + raw_cmd->length <= 0 || current_count_sectors <= 0){ + DPRINT("fractionary current count b=%lx s=%lx\n", + raw_cmd->length, current_count_sectors); + if (raw_cmd->kernel_data != CURRENT->buffer) + printk("addr=%d, length=%ld\n", + (int) ((raw_cmd->kernel_data - + floppy_track_buffer) >> 9), + current_count_sectors); + printk("st=%d ast=%d mse=%d msi=%d\n", + sector_t, aligned_sector_t, max_sector, max_size); + printk("ssize=%x SIZECODE=%d\n", ssize, SIZECODE); + printk("command=%x SECTOR=%d HEAD=%d, TRACK=%d\n", + COMMAND, SECTOR, HEAD, TRACK); + printk("buffer drive=%d\n", buffer_drive); + printk("buffer track=%d\n", buffer_track); + printk("buffer_min=%d\n", buffer_min); + printk("buffer_max=%d\n", buffer_max); + return 0; + } + + if (raw_cmd->kernel_data != CURRENT->buffer){ + if (raw_cmd->kernel_data < floppy_track_buffer || + current_count_sectors < 0 || + raw_cmd->length < 0 || + raw_cmd->kernel_data + raw_cmd->length > + floppy_track_buffer + (max_buffer_sectors << 10)){ + DPRINT("buffer overrun in schedule dma\n"); + printk("sector_t=%d buffer_min=%d current_count=%ld\n", + sector_t, buffer_min, + raw_cmd->length >> 9); + printk("current_count_sectors=%ld\n", + current_count_sectors); + if (CT(COMMAND) == FD_READ) + printk("read\n"); + if (CT(COMMAND) == FD_READ) + printk("write\n"); + return 0; + } + } else if (raw_cmd->length > CURRENT->nr_sectors << 9 || + current_count_sectors > CURRENT->nr_sectors){ + DPRINT("buffer overrun in direct transfer\n"); + return 0; + } else if (raw_cmd->length < current_count_sectors << 9){ + DPRINT("more sectors than bytes\n"); + printk("bytes=%ld\n", raw_cmd->length >> 9); + printk("sectors=%ld\n", current_count_sectors); + } + if (raw_cmd->length == 0){ + DPRINT("zero dma transfer attempted from make_raw_request\n"); + return 0; + } +#endif + return 2; +} + +static void redo_fd_request(void) +{ +#define REPEAT {request_done(0); continue; } + kdev_t device; + int tmp; + + lastredo = jiffies; + if (current_drive < N_DRIVE) + floppy_off(current_drive); + + if (CURRENT && CURRENT->rq_status == RQ_INACTIVE){ + CLEAR_INTR; + unlock_fdc(); + return; + } + + while(1){ + if (!CURRENT) { + CLEAR_INTR; + unlock_fdc(); + return; + } + if (MAJOR(CURRENT->rq_dev) != MAJOR_NR) + panic(DEVICE_NAME ": request list destroyed"); + if (CURRENT->bh && !buffer_locked(CURRENT->bh)) + panic(DEVICE_NAME ": block not locked"); + + device = CURRENT->rq_dev; + set_fdc(DRIVE(device)); + reschedule_timeout(CURRENTD, "redo fd request", 0); + + set_floppy(device); + raw_cmd = & default_raw_cmd; + raw_cmd->flags = 0; + if (start_motor(redo_fd_request)) return; + disk_change(current_drive); + if (test_bit(current_drive, &fake_change) || + TESTF(FD_DISK_CHANGED)){ + DPRINT("disk absent or changed during operation\n"); + REPEAT; + } + if (!_floppy) { /* Autodetection */ + if (!probing){ + DRS->probed_format = 0; + if (next_valid_format()){ + DPRINT("no autodetectable formats\n"); + _floppy = NULL; + REPEAT; + } + } + probing = 1; + _floppy = floppy_type+DP->autodetect[DRS->probed_format]; + } else + probing = 0; + errors = & (CURRENT->errors); + tmp = make_raw_rw_request(); + if (tmp < 2){ + request_done(tmp); + continue; + } + + if (TESTF(FD_NEED_TWADDLE)) + twaddle(); + floppy_tq.routine = (void *)(void *) floppy_start; + queue_task(&floppy_tq, &tq_immediate); + mark_bh(IMMEDIATE_BH); +#ifdef DEBUGT + debugt("queue fd request"); +#endif + return; + } +#undef REPEAT +} + +static struct cont_t rw_cont={ + rw_interrupt, + redo_fd_request, + bad_flp_intr, + request_done }; + +static struct tq_struct request_tq = +{ 0, 0, (void *) (void *) redo_fd_request, 0 }; + +static void process_fd_request(void) +{ + cont = &rw_cont; + queue_task(&request_tq, &tq_immediate); + mark_bh(IMMEDIATE_BH); +} + +static void do_fd_request(void) +{ + sti(); + if (fdc_busy){ + /* fdc busy, this new request will be treated when the + current one is done */ + is_alive("do fd request, old request running"); + return; + } + lock_fdc(MAXTIMEOUT,0); + process_fd_request(); + is_alive("do fd request"); +} + +static struct cont_t poll_cont={ + success_and_wakeup, + floppy_ready, + generic_failure, + generic_done }; + +static int poll_drive(int interruptible, int flag) +{ + int ret; + /* no auto-sense, just clear dcl */ + raw_cmd = &default_raw_cmd; + raw_cmd->flags= flag; + raw_cmd->track=0; + raw_cmd->cmd_count=0; + cont = &poll_cont; +#ifdef DCL_DEBUG + if (DP->flags & FD_DEBUG){ + DPRINT("setting NEWCHANGE in poll_drive\n"); + } +#endif + SETF(FD_DISK_NEWCHANGE); + WAIT(floppy_ready); + return ret; +} + +/* + * User triggered reset + * ==================== + */ + +static void reset_intr(void) +{ + printk("weird, reset interrupt called\n"); +} + +static struct cont_t reset_cont={ + reset_intr, + success_and_wakeup, + generic_failure, + generic_done }; + +static int user_reset_fdc(int drive, int arg, int interruptible) +{ + int ret; + + ret=0; + LOCK_FDC(drive,interruptible); + if (arg == FD_RESET_ALWAYS) + FDCS->reset=1; + if (FDCS->reset){ + cont = &reset_cont; + WAIT(reset_fdc); + } + process_fd_request(); + return ret; +} + +/* + * Misc Ioctl's and support + * ======================== + */ +static int fd_copyout(void *param, const void *address, int size) +{ + int ret; + + ECALL(verify_area(VERIFY_WRITE,param,size)); + memcpy_tofs(param,(void *) address, size); + return 0; +} + +static int fd_copyin(void *param, void *address, int size) +{ + int ret; + + ECALL(verify_area(VERIFY_READ,param,size)); + memcpy_fromfs((void *) address, param, size); + return 0; +} + +#define COPYOUT(x) ECALL(fd_copyout((void *)param, &(x), sizeof(x))) +#define COPYIN(x) ECALL(fd_copyin((void *)param, &(x), sizeof(x))) + +static inline const char *drive_name(int type, int drive) +{ + struct floppy_struct *floppy; + + if (type) + floppy = floppy_type + type; + else { + if (UDP->native_format) + floppy = floppy_type + UDP->native_format; + else + return "(null)"; + } + if (floppy->name) + return floppy->name; + else + return "(null)"; +} + + +/* raw commands */ +static void raw_cmd_done(int flag) +{ + int i; + + if (!flag) { + raw_cmd->flags |= FD_RAW_FAILURE; + raw_cmd->flags |= FD_RAW_HARDFAILURE; + } else { + raw_cmd->reply_count = inr; + for (i=0; i< raw_cmd->reply_count; i++) + raw_cmd->reply[i] = reply_buffer[i]; + + if (raw_cmd->flags & (FD_RAW_READ | FD_RAW_WRITE)) + raw_cmd->length = fd_get_dma_residue(); + + if ((raw_cmd->flags & FD_RAW_SOFTFAILURE) && + (!raw_cmd->reply_count || (raw_cmd->reply[0] & 0xc0))) + raw_cmd->flags |= FD_RAW_FAILURE; + + if (disk_change(current_drive)) + raw_cmd->flags |= FD_RAW_DISK_CHANGE; + else + raw_cmd->flags &= ~FD_RAW_DISK_CHANGE; + if (raw_cmd->flags & FD_RAW_NO_MOTOR_AFTER) + motor_off_callback(current_drive); + + if (raw_cmd->next && + (!(raw_cmd->flags & FD_RAW_FAILURE) || + !(raw_cmd->flags & FD_RAW_STOP_IF_FAILURE)) && + ((raw_cmd->flags & FD_RAW_FAILURE) || + !(raw_cmd->flags &FD_RAW_STOP_IF_SUCCESS))) { + raw_cmd = raw_cmd->next; + return; + } + } + generic_done(flag); +} + + +static struct cont_t raw_cmd_cont={ + success_and_wakeup, + floppy_start, + generic_failure, + raw_cmd_done +}; + +static inline int raw_cmd_copyout(int cmd, char *param, + struct floppy_raw_cmd *ptr) +{ + struct old_floppy_raw_cmd old_raw_cmd; + int ret; + + while(ptr) { + if (cmd == OLDFDRAWCMD) { + old_raw_cmd.flags = ptr->flags; + old_raw_cmd.data = ptr->data; + old_raw_cmd.length = ptr->length; + old_raw_cmd.rate = ptr->rate; + old_raw_cmd.reply_count = ptr->reply_count; + memcpy(old_raw_cmd.reply, ptr->reply, 7); + COPYOUT(old_raw_cmd); + param += sizeof(old_raw_cmd); + } else { + COPYOUT(*ptr); + param += sizeof(struct floppy_raw_cmd); + } + + if ((ptr->flags & FD_RAW_READ) && ptr->buffer_length){ + if (ptr->length>=0 && ptr->length<=ptr->buffer_length) + ECALL(fd_copyout(ptr->data, + ptr->kernel_data, + ptr->buffer_length - + ptr->length)); + } + ptr = ptr->next; + } + return 0; +} + + +static void raw_cmd_free(struct floppy_raw_cmd **ptr) +{ + struct floppy_raw_cmd *next,*this; + + this = *ptr; + *ptr = 0; + while(this) { + if (this->buffer_length) { + fd_dma_mem_free((unsigned long)this->kernel_data, + this->buffer_length); + this->buffer_length = 0; + } + next = this->next; + kfree(this); + this = next; + } +} + + +static inline int raw_cmd_copyin(int cmd, char *param, + struct floppy_raw_cmd **rcmd) +{ + struct floppy_raw_cmd *ptr; + struct old_floppy_raw_cmd old_raw_cmd; + int ret; + int i; + + *rcmd = 0; + while(1) { + ptr = (struct floppy_raw_cmd *) + kmalloc(sizeof(struct floppy_raw_cmd), GFP_USER); + if (!ptr) + return -ENOMEM; + *rcmd = ptr; + if (cmd == OLDFDRAWCMD){ + COPYIN(old_raw_cmd); + ptr->flags = old_raw_cmd.flags; + ptr->data = old_raw_cmd.data; + ptr->length = old_raw_cmd.length; + ptr->rate = old_raw_cmd.rate; + ptr->cmd_count = old_raw_cmd.cmd_count; + ptr->track = old_raw_cmd.track; + ptr->phys_length = 0; + ptr->next = 0; + ptr->buffer_length = 0; + memcpy(ptr->cmd, old_raw_cmd.cmd, 9); + param += sizeof(struct old_floppy_raw_cmd); + if (ptr->cmd_count > 9) + return -EINVAL; + } else { + COPYIN(*ptr); + ptr->next = 0; + ptr->buffer_length = 0; + param += sizeof(struct floppy_raw_cmd); + if (ptr->cmd_count > 33) + /* the command may now also take up the space + * initially intended for the reply & the + * reply count. Needed for long 82078 commands + * such as RESTORE, which takes ... 17 command + * bytes. Murphy's law #137: When you reserve + * 16 bytes for a structure, you'll one day + * discover that you really need 17... + */ + return -EINVAL; + } + + for (i=0; i< 16; i++) + ptr->reply[i] = 0; + ptr->resultcode = 0; + ptr->kernel_data = 0; + + if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) { + if (ptr->length <= 0) + return -EINVAL; + ptr->kernel_data =(char*)fd_dma_mem_alloc(ptr->length); + if (!ptr->kernel_data) + return -ENOMEM; + ptr->buffer_length = ptr->length; + } + if ( ptr->flags & FD_RAW_READ ) + ECALL( verify_area( VERIFY_WRITE, ptr->data, + ptr->length )); + if (ptr->flags & FD_RAW_WRITE) + ECALL(fd_copyin(ptr->data, ptr->kernel_data, + ptr->length)); + rcmd = & (ptr->next); + if (!(ptr->flags & FD_RAW_MORE)) + return 0; + ptr->rate &= 0x43; + } +} + + +static int raw_cmd_ioctl(int cmd, void *param) +{ + int drive, ret, ret2; + struct floppy_raw_cmd *my_raw_cmd; + + if (FDCS->rawcmd <= 1) + FDCS->rawcmd = 1; + for (drive= 0; drive < N_DRIVE; drive++){ + if (FDC(drive) != fdc) + continue; + if (drive == current_drive){ + if (UDRS->fd_ref > 1){ + FDCS->rawcmd = 2; + break; + } + } else if (UDRS->fd_ref){ + FDCS->rawcmd = 2; + break; + } + } + + if (FDCS->reset) + return -EIO; + + ret = raw_cmd_copyin(cmd, param, &my_raw_cmd); + if (ret) { + raw_cmd_free(&my_raw_cmd); + return ret; + } + + raw_cmd = my_raw_cmd; + cont = &raw_cmd_cont; + ret=wait_til_done(floppy_start,1); +#ifdef DCL_DEBUG + if (DP->flags & FD_DEBUG){ + DPRINT("calling disk change from raw_cmd ioctl\n"); + } +#endif + + if (ret != -EINTR && FDCS->reset) + ret = -EIO; + + DRS->track = NO_TRACK; + + ret2 = raw_cmd_copyout(cmd, param, my_raw_cmd); + if (!ret) + ret = ret2; + raw_cmd_free(&my_raw_cmd); + return ret; +} + +static int invalidate_drive(kdev_t rdev) +{ + /* invalidate the buffer track to force a reread */ + set_bit(DRIVE(rdev), &fake_change); + process_fd_request(); + check_disk_change(rdev); + return 0; +} + + +static inline void clear_write_error(int drive) +{ + CLEARSTRUCT(UDRWE); +} + +static inline int set_geometry(unsigned int cmd, struct floppy_struct *g, + int drive, int type, kdev_t device) +{ + int cnt; + + /* sanity checking for parameters.*/ + if (g->sect <= 0 || + g->head <= 0 || + g->track <= 0 || + g->track > UDP->tracks>>STRETCH(g) || + /* check if reserved bits are set */ + (g->stretch&~(FD_STRETCH|FD_SWAPSIDES)) != 0) + return -EINVAL; + if (type){ + if (!suser()) + return -EPERM; + LOCK_FDC(drive,1); + for (cnt = 0; cnt < N_DRIVE; cnt++){ + if (ITYPE(drive_state[cnt].fd_device) == type && + drive_state[cnt].fd_ref) + set_bit(drive, &fake_change); + } + floppy_type[type] = *g; + floppy_type[type].name="user format"; + for (cnt = type << 2; cnt < (type << 2) + 4; cnt++) + floppy_sizes[cnt]= floppy_sizes[cnt+0x80]= + floppy_type[type].size>>1; + process_fd_request(); + for (cnt = 0; cnt < N_DRIVE; cnt++){ + if (ITYPE(drive_state[cnt].fd_device) == type && + drive_state[cnt].fd_ref) + check_disk_change( + MKDEV(FLOPPY_MAJOR, + drive_state[cnt].fd_device)); + } + } else { + LOCK_FDC(drive,1); + if (cmd != FDDEFPRM) + /* notice a disk change immediately, else + * we loose our settings immediately*/ + CALL(poll_drive(1, FD_RAW_NEED_DISK)); + user_params[drive] = *g; + if (buffer_drive == drive) + SUPBOUND(buffer_max, user_params[drive].sect); + current_type[drive] = &user_params[drive]; + floppy_sizes[drive] = user_params[drive].size >> 1; + if (cmd == FDDEFPRM) + DRS->keep_data = -1; + else + DRS->keep_data = 1; + /* invalidation. Invalidate only when needed, i.e. + * when there are already sectors in the buffer cache + * whose number will change. This is useful, because + * mtools often changes the geometry of the disk after + * looking at the boot block */ + if (DRS->maxblock > user_params[drive].sect || DRS->maxtrack) + invalidate_drive(device); + else + process_fd_request(); + } + return 0; +} + +/* handle obsolete ioctl's */ +static struct translation_entry { + int newcmd; + int oldcmd; + int oldsize; /* size of 0x00xx-style ioctl. Reflects old structures, thus + * use numeric values. NO SIZEOFS */ +} translation_table[]= { + {FDCLRPRM, 0, 0}, + {FDSETPRM, 1, 28}, + {FDDEFPRM, 2, 28}, + {FDGETPRM, 3, 28}, + {FDMSGON, 4, 0}, + {FDMSGOFF, 5, 0}, + {FDFMTBEG, 6, 0}, + {FDFMTTRK, 7, 12}, + {FDFMTEND, 8, 0}, + {FDSETEMSGTRESH, 10, 0}, + {FDFLUSH, 11, 0}, + {FDSETMAXERRS, 12, 20}, + {OLDFDRAWCMD, 30, 0}, + {FDGETMAXERRS, 14, 20}, + {FDGETDRVTYP, 16, 16}, + {FDSETDRVPRM, 20, 88}, + {FDGETDRVPRM, 21, 88}, + {FDGETDRVSTAT, 22, 52}, + {FDPOLLDRVSTAT, 23, 52}, + {FDRESET, 24, 0}, + {FDGETFDCSTAT, 25, 40}, + {FDWERRORCLR, 27, 0}, + {FDWERRORGET, 28, 24}, + {FDRAWCMD, 0, 0}, + {FDEJECT, 0, 0}, + {FDTWADDLE, 40, 0} }; + +static inline int normalize_0x02xx_ioctl(int *cmd, int *size) +{ + int i; + + for (i=0; i < ARRAY_SIZE(translation_table); i++) { + if ((*cmd & 0xffff) == (translation_table[i].newcmd & 0xffff)){ + *size = _IOC_SIZE(*cmd); + *cmd = translation_table[i].newcmd; + if (*size > _IOC_SIZE(*cmd)) { + printk("ioctl not yet supported\n"); + return -EFAULT; + } + return 0; + } + } + return -EINVAL; +} + +static inline int xlate_0x00xx_ioctl(int *cmd, int *size) +{ + int i; + /* old ioctls' for kernels <= 1.3.33 */ + /* When the next even release will come around, we'll start + * warning against these. + * When the next odd release will come around, we'll fail with + * -EINVAL */ + if(strcmp(system_utsname.version, "1.4.0") >= 0) + printk("obsolete floppy ioctl %x\n", *cmd); + if((system_utsname.version[0] == '1' && + strcmp(system_utsname.version, "1.5.0") >= 0) || + (system_utsname.version[0] >= '2' && + strcmp(system_utsname.version, "2.1.0") >= 0)) + return -EINVAL; + for (i=0; i < ARRAY_SIZE(translation_table); i++) { + if (*cmd == translation_table[i].oldcmd) { + *size = translation_table[i].oldsize; + *cmd = translation_table[i].newcmd; + return 0; + } + } + return -EINVAL; +} + +static int fd_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long param) +{ +#define IOCTL_MODE_BIT 8 +#define OPEN_WRITE_BIT 16 +#define IOCTL_ALLOWED (filp && (filp->f_mode & IOCTL_MODE_BIT)) +#define OUT(c,x) case c: outparam = (const char *) (x); break +#define IN(c,x,tag) case c: *(x) = inparam. tag ; return 0 + + int i,drive,type; + kdev_t device; + int ret; + int size; + union inparam { + struct floppy_struct g; /* geometry */ + struct format_descr f; + struct floppy_max_errors max_errors; + struct floppy_drive_params dp; + } inparam; /* parameters coming from user space */ + const char *outparam; /* parameters passed back to user space */ + + device = inode->i_rdev; + switch (cmd) { + RO_IOCTLS(device,param); + } + type = TYPE(device); + drive = DRIVE(device); + + /* convert compatibility eject ioctls into floppy eject ioctl. + * We do this in order to provide a means to eject floppy disks before + * installing the new fdutils package */ + if(cmd == CDROMEJECT || /* CD-ROM eject */ + cmd == 0x6470 /* SunOS floppy eject */) { + DPRINT("obsolete eject ioctl\n"); + DPRINT("please use floppycontrol --eject\n"); + cmd = FDEJECT; + } + + /* convert the old style command into a new style command */ + if ((cmd & 0xff00) == 0x0200) { + ECALL(normalize_0x02xx_ioctl(&cmd, &size)); + } else if ((cmd & 0xff00) == 0x0000) { + ECALL(xlate_0x00xx_ioctl(&cmd, &size)); + } else + return -EINVAL; + + /* permission checks */ + if (((cmd & 0x80) && !suser()) || + ((cmd & 0x40) && !IOCTL_ALLOWED)) + return -EPERM; + + /* verify writability of result, and fail early */ + if (_IOC_DIR(cmd) & _IOC_READ) + ECALL(verify_area(VERIFY_WRITE,(void *) param, size)); + + /* copyin */ + CLEARSTRUCT(&inparam); + if (_IOC_DIR(cmd) & _IOC_WRITE) + ECALL(fd_copyin((void *)param, &inparam, size)) + + switch (cmd) { + case FDEJECT: + if(UDRS->fd_ref != 1) + /* somebody else has this drive open */ + return -EBUSY; + LOCK_FDC(drive,1); + + /* do the actual eject. Fails on + * non-Sparc architectures */ + ret=fd_eject(UNIT(drive)); + + USETF(FD_DISK_CHANGED); + USETF(FD_VERIFY); + process_fd_request(); + return ret; + case FDCLRPRM: + LOCK_FDC(drive,1); + current_type[drive] = NULL; + floppy_sizes[drive] = MAX_DISK_SIZE; + UDRS->keep_data = 0; + return invalidate_drive(device); + case FDSETPRM: + case FDDEFPRM: + return set_geometry(cmd, & inparam.g, + drive, type, device); + case FDGETPRM: + LOCK_FDC(drive,1); + CALL(poll_drive(1,0)); + process_fd_request(); + if (type) + outparam = (char *) &floppy_type[type]; + else + outparam = (char *) current_type[drive]; + if(!outparam) + return -ENODEV; + break; + + case FDMSGON: + UDP->flags |= FTD_MSG; + return 0; + case FDMSGOFF: + UDP->flags &= ~FTD_MSG; + return 0; + + case FDFMTBEG: + LOCK_FDC(drive,1); + CALL(poll_drive(1, FD_RAW_NEED_DISK)); + ret = UDRS->flags; + process_fd_request(); + if(ret & FD_VERIFY) + return -ENODEV; + if(!(ret & FD_DISK_WRITABLE)) + return -EROFS; + return 0; + case FDFMTTRK: + if (UDRS->fd_ref != 1) + return -EBUSY; + return do_format(device, &inparam.f); + case FDFMTEND: + case FDFLUSH: + LOCK_FDC(drive,1); + return invalidate_drive(device); + + case FDSETEMSGTRESH: + UDP->max_errors.reporting = + (unsigned short) (param & 0x0f); + return 0; + OUT(FDGETMAXERRS, &UDP->max_errors); + IN(FDSETMAXERRS, &UDP->max_errors, max_errors); + + case FDGETDRVTYP: + outparam = drive_name(type,drive); + SUPBOUND(size,strlen(outparam)+1); + break; + + IN(FDSETDRVPRM, UDP, dp); + OUT(FDGETDRVPRM, UDP); + + case FDPOLLDRVSTAT: + LOCK_FDC(drive,1); + CALL(poll_drive(1, FD_RAW_NEED_DISK)); + process_fd_request(); + /* fall through */ + OUT(FDGETDRVSTAT, UDRS); + + case FDRESET: + return user_reset_fdc(drive, (int)param, 1); + + OUT(FDGETFDCSTAT,UFDCS); + + case FDWERRORCLR: + CLEARSTRUCT(UDRWE); + return 0; + OUT(FDWERRORGET,UDRWE); + + case OLDFDRAWCMD: + case FDRAWCMD: + if (type) + return -EINVAL; + LOCK_FDC(drive,1); + set_floppy(device); + CALL(i = raw_cmd_ioctl(cmd,(void *) param)); + process_fd_request(); + return i; + + case FDTWADDLE: + LOCK_FDC(drive,1); + twaddle(); + process_fd_request(); + return 0; + + default: + return -EINVAL; + } + + if (_IOC_DIR(cmd) & _IOC_READ) + return fd_copyout((void *)param, outparam, size); + else + return 0; +#undef IOCTL_ALLOWED +#undef OUT +#undef IN +} + +static void config_types(void) +{ + int first=1; + int drive; + + /* read drive info out of physical CMOS */ + drive=0; + if (!UDP->cmos) + UDP->cmos= FLOPPY0_TYPE; + drive=1; + if (!UDP->cmos && FLOPPY1_TYPE) + UDP->cmos = FLOPPY1_TYPE; + + /* XXX */ + /* additional physical CMOS drive detection should go here */ + + for (drive=0; drive < N_DRIVE; drive++){ + if (UDP->cmos >= 16) + UDP->cmos = 0; + if (UDP->cmos >= 0 && UDP->cmos <= NUMBER(default_drive_params)) + memcpy((char *) UDP, + (char *) (&default_drive_params[(int)UDP->cmos].params), + sizeof(struct floppy_drive_params)); + if (UDP->cmos){ + if (first) + printk(KERN_INFO "Floppy drive(s): "); + else + printk(", "); + first=0; + if (UDP->cmos > 0){ + allowed_drive_mask |= 1 << drive; + printk("fd%d is %s", drive, + default_drive_params[(int)UDP->cmos].name); + } else + printk("fd%d is unknown type %d",drive, + UDP->cmos); + } + else + allowed_drive_mask &= ~(1 << drive); + } + if (!first) + printk("\n"); +} + +static int floppy_read(struct inode * inode, struct file * filp, + char * buf, int count) +{ + int drive = DRIVE(inode->i_rdev); + + check_disk_change(inode->i_rdev); + if (UTESTF(FD_DISK_CHANGED)) + return -ENXIO; + return block_read(inode, filp, buf, count); +} + +static int floppy_write(struct inode * inode, struct file * filp, + const char * buf, int count) +{ + int block; + int ret; + int drive = DRIVE(inode->i_rdev); + + if (!UDRS->maxblock) + UDRS->maxblock=1;/* make change detectable */ + check_disk_change(inode->i_rdev); + if (UTESTF(FD_DISK_CHANGED)) + return -ENXIO; + if (!UTESTF(FD_DISK_WRITABLE)) + return -EROFS; + block = (filp->f_pos + count) >> 9; + INFBOUND(UDRS->maxblock, block); + ret= block_write(inode, filp, buf, count); + return ret; +} + +static void floppy_release(struct inode * inode, struct file * filp) +{ + int drive; + + drive = DRIVE(inode->i_rdev); + + if (!filp || (filp->f_mode & (2 | OPEN_WRITE_BIT))) + /* if the file is mounted OR (writable now AND writable at + * open time) Linus: Does this cover all cases? */ + block_fsync(inode,filp); + + if (UDRS->fd_ref < 0) + UDRS->fd_ref=0; + else if (!UDRS->fd_ref--) { + DPRINT("floppy_release with fd_ref == 0"); + UDRS->fd_ref = 0; + } + floppy_release_irq_and_dma(); +} + +/* + * floppy_open check for aliasing (/dev/fd0 can be the same as + * /dev/PS0 etc), and disallows simultaneous access to the same + * drive with different device numbers. + */ +#define RETERR(x) do{floppy_release(inode,filp); return -(x);}while(0) + +static int floppy_open(struct inode * inode, struct file * filp) +{ + int drive; + int old_dev; + int try; + char *tmp; + + if (!filp) { + DPRINT("Weird, open called with filp=0\n"); + return -EIO; + } + + drive = DRIVE(inode->i_rdev); + if (drive >= N_DRIVE || + !(allowed_drive_mask & (1 << drive)) || + fdc_state[FDC(drive)].version == FDC_NONE) + return -ENXIO; + + if (TYPE(inode->i_rdev) >= NUMBER(floppy_type)) + return -ENXIO; + old_dev = UDRS->fd_device; + if (UDRS->fd_ref && old_dev != MINOR(inode->i_rdev)) + return -EBUSY; + + if (!UDRS->fd_ref && (UDP->flags & FD_BROKEN_DCL)){ + USETF(FD_DISK_CHANGED); + USETF(FD_VERIFY); + } + + if (UDRS->fd_ref == -1 || + (UDRS->fd_ref && (filp->f_flags & O_EXCL))) + return -EBUSY; + + if (floppy_grab_irq_and_dma()) + return -EBUSY; + + if (filp->f_flags & O_EXCL) + UDRS->fd_ref = -1; + else + UDRS->fd_ref++; + + if (!floppy_track_buffer){ + /* if opening an ED drive, reserve a big buffer, + * else reserve a small one */ + if ((UDP->cmos == 6) || (UDP->cmos == 5)) + try = 64; /* Only 48 actually useful */ + else + try = 32; /* Only 24 actually useful */ + + tmp=(char *)fd_dma_mem_alloc(1024 * try); + if (!tmp) { + try >>= 1; /* buffer only one side */ + INFBOUND(try, 16); + tmp= (char *)fd_dma_mem_alloc(1024*try); + } + if (!tmp) { + DPRINT("Unable to allocate DMA memory\n"); + RETERR(ENXIO); + } + if (floppy_track_buffer) + fd_dma_mem_free((unsigned long)tmp,try*1024); + else { + buffer_min = buffer_max = -1; + floppy_track_buffer = tmp; + max_buffer_sectors = try; + } + } + + UDRS->fd_device = MINOR(inode->i_rdev); + if (old_dev != -1 && old_dev != MINOR(inode->i_rdev)) { + if (buffer_drive == drive) + buffer_track = -1; + invalidate_buffers(MKDEV(FLOPPY_MAJOR,old_dev)); + } + + /* Allow ioctls if we have write-permissions even if read-only open */ + if ((filp->f_mode & 2) || (permission(inode,2) == 0)) + filp->f_mode |= IOCTL_MODE_BIT; + if (filp->f_mode & 2) + filp->f_mode |= OPEN_WRITE_BIT; + + if (UFDCS->rawcmd == 1) + UFDCS->rawcmd = 2; + + if (filp->f_flags & O_NDELAY) + return 0; + if (filp->f_mode & 3) { + UDRS->last_checked = 0; + check_disk_change(inode->i_rdev); + if (UTESTF(FD_DISK_CHANGED)) + RETERR(ENXIO); + } + if ((filp->f_mode & 2) && !(UTESTF(FD_DISK_WRITABLE))) + RETERR(EROFS); + return 0; +#undef RETERR +} + +/* + * Check if the disk has been changed or if a change has been faked. + */ +static int check_floppy_change(kdev_t dev) +{ + int drive = DRIVE(dev); + + if (MAJOR(dev) != MAJOR_NR) { + DPRINT("check_floppy_change: not a floppy\n"); + return 0; + } + + if (UTESTF(FD_DISK_CHANGED) || UTESTF(FD_VERIFY)) + return 1; + + if (UDP->checkfreq < jiffies - UDRS->last_checked){ + lock_fdc(drive,0); + poll_drive(0,0); + process_fd_request(); + } + + if (UTESTF(FD_DISK_CHANGED) || + UTESTF(FD_VERIFY) || + test_bit(drive, &fake_change) || + (!TYPE(dev) && !current_type[drive])) + return 1; + return 0; +} + +/* revalidate the floppy disk, i.e. trigger format autodetection by reading + * the bootblock (block 0). "Autodetection" is also needed to check whether + * there is a disk in the drive at all... Thus we also do it for fixed + * geometry formats */ +static int floppy_revalidate(kdev_t dev) +{ +#define NO_GEOM (!current_type[drive] && !TYPE(dev)) + struct buffer_head * bh; + int drive=DRIVE(dev); + int cf; + + if (UTESTF(FD_DISK_CHANGED) || + UTESTF(FD_VERIFY) || + test_bit(drive, &fake_change) || + NO_GEOM){ + lock_fdc(drive,0); + cf = UTESTF(FD_DISK_CHANGED) || UTESTF(FD_VERIFY); + if (!(cf || test_bit(drive, &fake_change) || NO_GEOM)){ + process_fd_request(); /*already done by another thread*/ + return 0; + } + UDRS->maxblock = 0; + UDRS->maxtrack = 0; + if (buffer_drive == drive) + buffer_track = -1; + clear_bit(drive, &fake_change); + UCLEARF(FD_DISK_CHANGED); + if (cf) + UDRS->generation++; + if (NO_GEOM){ + /* auto-sensing */ + int size = floppy_blocksizes[MINOR(dev)]; + if (!size) + size = 1024; + if (!(bh = getblk(dev,0,size))){ + process_fd_request(); + return 1; + } + if (bh && !buffer_uptodate(bh)) + ll_rw_block(READ, 1, &bh, 1); + process_fd_request(); + wait_on_buffer(bh); + brelse(bh); + return 0; + } + if (cf) + poll_drive(0, FD_RAW_NEED_DISK); + process_fd_request(); + } + return 0; +} + +static struct file_operations floppy_fops = { + NULL, /* lseek - default */ + floppy_read, /* read - general block-dev read */ + floppy_write, /* write - general block-dev write */ + NULL, /* readdir - bad */ + NULL, /* select */ + fd_ioctl, /* ioctl */ + NULL, /* mmap */ + floppy_open, /* open */ + floppy_release, /* release */ + block_fsync, /* fsync */ + NULL, /* fasync */ + check_floppy_change, /* media_change */ + floppy_revalidate, /* revalidate */ +}; + +/* + * Floppy Driver initialization + * ============================= + */ + +/* Determine the floppy disk controller type */ +/* This routine was written by David C. Niemi */ +static char get_fdc_version(void) +{ + int r; + + output_byte(FD_DUMPREGS); /* 82072 and better know DUMPREGS */ + if (FDCS->reset) + return FDC_NONE; + if ((r = result()) <= 0x00) + return FDC_NONE; /* No FDC present ??? */ + if ((r==1) && (reply_buffer[0] == 0x80)){ + printk(KERN_INFO "FDC %d is an 8272A\n",fdc); + return FDC_8272A; /* 8272a/765 don't know DUMPREGS */ + } + if (r != 10) { + printk("FDC %d init: DUMPREGS: unexpected return of %d bytes.\n", + fdc, r); + return FDC_UNKNOWN; + } + + if(!fdc_configure()) { + printk(KERN_INFO "FDC %d is an 82072\n",fdc); + return FDC_82072; /* 82072 doesn't know CONFIGURE */ + } + + output_byte(FD_PERPENDICULAR); + if(need_more_output() == MORE_OUTPUT) { + output_byte(0); + } else { + printk(KERN_INFO "FDC %d is an 82072A\n", fdc); + return FDC_82072A; /* 82072A as found on Sparcs. */ + } + + output_byte(FD_UNLOCK); + r = result(); + if ((r == 1) && (reply_buffer[0] == 0x80)){ + printk(KERN_INFO "FDC %d is a pre-1991 82077\n", fdc); + return FDC_82077_ORIG; /* Pre-1991 82077, doesn't know + * LOCK/UNLOCK */ + } + if ((r != 1) || (reply_buffer[0] != 0x00)) { + printk("FDC %d init: UNLOCK: unexpected return of %d bytes.\n", + fdc, r); + return FDC_UNKNOWN; + } + output_byte(FD_PARTID); + r = result(); + if (r != 1) { + printk("FDC %d init: PARTID: unexpected return of %d bytes.\n", + fdc, r); + return FDC_UNKNOWN; + } + if (reply_buffer[0] == 0x80) { + printk(KERN_INFO "FDC %d is a post-1991 82077\n",fdc); + return FDC_82077; /* Revised 82077AA passes all the tests */ + } + switch (reply_buffer[0] >> 5) { + case 0x0: + /* Either a 82078-1 or a 82078SL running at 5Volt */ + printk(KERN_INFO "FDC %d is an 82078.\n",fdc); + return FDC_82078; + case 0x1: + printk(KERN_INFO "FDC %d is a 44pin 82078\n",fdc); + return FDC_82078; + case 0x2: + printk(KERN_INFO "FDC %d is a S82078B\n", fdc); + return FDC_S82078B; + case 0x3: + printk(KERN_INFO "FDC %d is a National Semiconductor PC87306\n", fdc); + return FDC_87306; + default: + printk(KERN_INFO "FDC %d init: 82078 variant with unknown PARTID=%d.\n", + fdc, reply_buffer[0] >> 5); + return FDC_82078_UNKN; + } +} /* get_fdc_version */ + +/* lilo configuration */ + +/* we make the invert_dcl function global. One day, somebody might + * want to centralize all thinkpad related options into one lilo option, + * there are just so many thinkpad related quirks! */ +void floppy_invert_dcl(int *ints,int param) +{ + int i; + + for (i=0; i < ARRAY_SIZE(default_drive_params); i++){ + if (param) + default_drive_params[i].params.flags |= 0x80; + else + default_drive_params[i].params.flags &= ~0x80; + } + DPRINT("Configuring drives for inverted dcl\n"); +} + +static void daring(int *ints,int param) +{ + int i; + + for (i=0; i < ARRAY_SIZE(default_drive_params); i++){ + if (param){ + default_drive_params[i].params.select_delay = 0; + default_drive_params[i].params.flags |= FD_SILENT_DCL_CLEAR; + } else { + default_drive_params[i].params.select_delay = 2*HZ/100; + default_drive_params[i].params.flags &= ~FD_SILENT_DCL_CLEAR; + } + } + DPRINT("Assuming %s floppy hardware\n", param ? "standard" : "broken"); +} + +static void set_cmos(int *ints, int dummy) +{ + int current_drive=0; + + if (ints[0] != 2){ + DPRINT("wrong number of parameter for cmos\n"); + return; + } + current_drive = ints[1]; + if (current_drive < 0 || current_drive >= 8){ + DPRINT("bad drive for set_cmos\n"); + return; + } + if (current_drive >= 4 && !FDC2) + FDC2 = 0x370; + if (ints[2] <= 0 || + (ints[2] >= NUMBER(default_drive_params) && ints[2] != 16)){ + DPRINT("bad cmos code %d\n", ints[2]); + return; + } + DP->cmos = ints[2]; + DPRINT("setting cmos code to %d\n", ints[2]); +} + +static struct param_table { + const char *name; + void (*fn)(int *ints, int param); + int *var; + int def_param; +} config_params[]={ + { "allowed_drive_mask", 0, &allowed_drive_mask, 0xff }, + { "all_drives", 0, &allowed_drive_mask, 0xff }, + { "asus_pci", 0, &allowed_drive_mask, 0x33 }, + + { "daring", daring, 0, 1}, + + { "two_fdc", 0, &FDC2, 0x370 }, + { "one_fdc", 0, &FDC2, 0 }, + + { "thinkpad", floppy_invert_dcl, 0, 1 }, + + { "nodma", 0, &use_virtual_dma, 1 }, + { "omnibook", 0, &use_virtual_dma, 1 }, + { "dma", 0, &use_virtual_dma, 0 }, + + { "fifo_depth", 0, &fifo_depth, 0xa }, + { "nofifo", 0, &no_fifo, 0x20 }, + { "usefifo", 0, &no_fifo, 0 }, + + { "cmos", set_cmos, 0, 0 }, + + { "unexpected_interrupts", 0, &print_unex, 1 }, + { "no_unexpected_interrupts", 0, &print_unex, 0 }, + { "L40SX", 0, &print_unex, 0 } }; + +#define FLOPPY_SETUP +void floppy_setup(char *str, int *ints) +{ + int i; + int param; + if (str) + for (i=0; i< ARRAY_SIZE(config_params); i++){ + if (strcmp(str,config_params[i].name) == 0){ + if (ints[0]) + param = ints[1]; + else + param = config_params[i].def_param; + if(config_params[i].fn) + config_params[i].fn(ints,param); + if(config_params[i].var) { + DPRINT("%s=%d\n", str, param); + *config_params[i].var = param; + } + return; + } + } + if (str) { + DPRINT("unknown floppy option [%s]\n", str); + + DPRINT("allowed options are:"); + for (i=0; i< ARRAY_SIZE(config_params); i++) + printk(" %s",config_params[i].name); + printk("\n"); + } else + DPRINT("botched floppy option\n"); + DPRINT("Read linux/drivers/block/README.fd\n"); +} + +int floppy_init(void) +{ + int i,unit,drive; + int have_no_fdc= -EIO; + + raw_cmd = 0; + + if (register_blkdev(MAJOR_NR,"fd",&floppy_fops)) { + printk("Unable to get major %d for floppy\n",MAJOR_NR); + return -EBUSY; + } + + for (i=0; i<256; i++) + if (ITYPE(i)) + floppy_sizes[i] = floppy_type[ITYPE(i)].size >> 1; + else + floppy_sizes[i] = MAX_DISK_SIZE; + + blk_size[MAJOR_NR] = floppy_sizes; + blksize_size[MAJOR_NR] = floppy_blocksizes; + blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST; + reschedule_timeout(MAXTIMEOUT, "floppy init", MAXTIMEOUT); + config_types(); + + for (i = 0; i < N_FDC; i++) { + fdc = i; + CLEARSTRUCT(FDCS); + FDCS->dtr = -1; + FDCS->dor = 0x4; +#ifdef __sparc__ + /*sparcs don't have a DOR reset which we can fall back on to*/ + FDCS->version = FDC_82072A; +#endif + } + + fdc_state[0].address = FDC1; +#if N_FDC > 1 + fdc_state[1].address = FDC2; +#endif + + if (floppy_grab_irq_and_dma()){ + del_timer(&fd_timeout); + blk_dev[MAJOR_NR].request_fn = NULL; + unregister_blkdev(MAJOR_NR,"fd"); + return -EBUSY; + } + + /* initialise drive state */ + for (drive = 0; drive < N_DRIVE; drive++) { + CLEARSTRUCT(UDRS); + CLEARSTRUCT(UDRWE); + UDRS->flags = FD_VERIFY | FD_DISK_NEWCHANGE | FD_DISK_CHANGED; + UDRS->fd_device = -1; + floppy_track_buffer = NULL; + max_buffer_sectors = 0; + } + + for (i = 0; i < N_FDC; i++) { + fdc = i; + FDCS->driver_version = FD_DRIVER_VERSION; + for (unit=0; unit<4; unit++) + FDCS->track[unit] = 0; + if (FDCS->address == -1) + continue; + FDCS->rawcmd = 2; + if (user_reset_fdc(-1,FD_RESET_ALWAYS,0)){ + FDCS->address = -1; + FDCS->version = FDC_NONE; + continue; + } + /* Try to determine the floppy controller type */ + FDCS->version = get_fdc_version(); + if (FDCS->version == FDC_NONE){ + FDCS->address = -1; + continue; + } + + request_region(FDCS->address, 6, "floppy"); + request_region(FDCS->address+7, 1, "floppy DIR"); + /* address + 6 is reserved, and may be taken by IDE. + * Unfortunately, Adaptec doesn't know this :-(, */ + + have_no_fdc = 0; + /* Not all FDCs seem to be able to handle the version command + * properly, so force a reset for the standard FDC clones, + * to avoid interrupt garbage. + */ + user_reset_fdc(-1,FD_RESET_ALWAYS,0); + } + fdc=0; + del_timer(&fd_timeout); + current_drive = 0; + floppy_release_irq_and_dma(); + initialising=0; + if (have_no_fdc) { + DPRINT("no floppy controllers found\n"); + request_tq.routine = (void *)(void *) empty; + /* + * When we return we may be unloaded. This little + * trick forces the immediate_bh handler to have run + * before we unload it, lest we cause bad things. + */ + mark_bh(IMMEDIATE_BH); + schedule(); + if (usage_count) + floppy_release_irq_and_dma(); + blk_dev[MAJOR_NR].request_fn = NULL; + unregister_blkdev(MAJOR_NR,"fd"); + } + return have_no_fdc; +} + +static int floppy_grab_irq_and_dma(void) +{ + int i; + unsigned long flags; + + INT_OFF; + if (usage_count++){ + INT_ON; + return 0; + } + INT_ON; + MOD_INC_USE_COUNT; + for (i=0; i< N_FDC; i++){ + if (fdc_state[i].address != -1){ + fdc = i; + reset_fdc_info(1); + fd_outb(FDCS->dor, FD_DOR); + } + } + fdc = 0; + set_dor(0, ~0, 8); /* avoid immediate interrupt */ + + if (fd_request_irq()) { + DPRINT("Unable to grab IRQ%d for the floppy driver\n", + FLOPPY_IRQ); + MOD_DEC_USE_COUNT; + usage_count--; + return -1; + } + if (fd_request_dma()) { + DPRINT("Unable to grab DMA%d for the floppy driver\n", + FLOPPY_DMA); + fd_free_irq(); + MOD_DEC_USE_COUNT; + usage_count--; + return -1; + } + for (fdc = 0; fdc < N_FDC; fdc++) + if (FDCS->address != -1) + fd_outb(FDCS->dor, FD_DOR); + fdc = 0; + fd_enable_irq(); + irqdma_allocated=1; + return 0; +} + +static void floppy_release_irq_and_dma(void) +{ +#ifdef FLOPPY_SANITY_CHECK + int drive; +#endif + long tmpsize; + unsigned long tmpaddr; + unsigned long flags; + + INT_OFF; + if (--usage_count){ + INT_ON; + return; + } + INT_ON; + if(irqdma_allocated) + { + fd_disable_dma(); + fd_free_dma(); + fd_disable_irq(); + fd_free_irq(); + irqdma_allocated=0; + } + + set_dor(0, ~0, 8); +#if N_FDC > 1 + set_dor(1, ~8, 0); +#endif + floppy_enable_hlt(); + + if (floppy_track_buffer && max_buffer_sectors) { + tmpsize = max_buffer_sectors*1024; + tmpaddr = (unsigned long)floppy_track_buffer; + floppy_track_buffer = 0; + max_buffer_sectors = 0; + buffer_min = buffer_max = -1; + fd_dma_mem_free(tmpaddr, tmpsize); + } + +#ifdef FLOPPY_SANITY_CHECK +#ifndef __sparc__ + for (drive=0; drive < N_FDC * 4; drive++) + if (motor_off_timer[drive].next) + printk("motor off timer %d still active\n", drive); +#endif + + if (fd_timeout.next) + printk("floppy timer still active:%s\n", timeout_message); + if (fd_timer.next) + printk("auxiliary floppy timer still active\n"); + if (floppy_tq.sync) + printk("task queue still active\n"); +#endif + MOD_DEC_USE_COUNT; +} + + +#ifdef MODULE + +char *floppy=NULL; + +static void parse_floppy_cfg_string(char *cfg) +{ + char *ptr; + int ints[11]; + + while(*cfg) { + for(ptr = cfg;*cfg && *cfg != ' ' && *cfg != '\t'; cfg++); + if(*cfg) { + *cfg = '\0'; + cfg++; + } + if(*ptr) + floppy_setup(get_options(ptr,ints),ints); + } +} + +static void mod_setup(char *pattern, void (*setup)(char *, int *)) +{ + unsigned long i; + char c; + int j; + int match; + char buffer[100]; + int ints[11]; + int length = strlen(pattern)+1; + + match=0; + j=1; + + for (i=current->mm->env_start; i< current->mm->env_end; i ++){ + c= get_fs_byte(i); + if (match){ + if (j==99) + c='\0'; + buffer[j] = c; + if (!c || c == ' ' || c == '\t'){ + if (j){ + buffer[j] = '\0'; + setup(get_options(buffer,ints),ints); + } + j=0; + } else + j++; + if (!c) + break; + continue; + } + if ((!j && !c) || (j && c == pattern[j-1])) + j++; + else + j=0; + if (j==length){ + match=1; + j=0; + } + } +} + + +#ifdef __cplusplus +extern "C" { +#endif +int init_module(void) +{ + printk(KERN_INFO "inserting floppy driver for %s\n", kernel_version); + + if(floppy) + parse_floppy_cfg_string(floppy); + else + mod_setup("floppy=", floppy_setup); + + return floppy_init(); +} + +void cleanup_module(void) +{ + int fdc, dummy; + + for (fdc=0; fdc<2; fdc++) + if (FDCS->address != -1){ + release_region(FDCS->address, 6); + release_region(FDCS->address+7, 1); + } + + unregister_blkdev(MAJOR_NR, "fd"); + + blk_dev[MAJOR_NR].request_fn = 0; + /* eject disk, if any */ + dummy = fd_eject(0); +} + +#ifdef __cplusplus +} +#endif + +#else +/* eject the boot floppy (if we need the drive for a different root floppy) */ +/* This should only be called at boot time when we're sure that there's no + * resource contention. */ +void floppy_eject(void) +{ + if(floppy_grab_irq_and_dma()==0) + { + lock_fdc(MAXTIMEOUT,0); + fd_eject(0); + process_fd_request(); + floppy_release_irq_and_dma(); + } +} +#endif diff --git a/linux/dev/drivers/block/genhd.c b/linux/dev/drivers/block/genhd.c new file mode 100644 index 0000000..903135c --- /dev/null +++ b/linux/dev/drivers/block/genhd.c @@ -0,0 +1,1080 @@ +/* + * Code extracted from + * linux/kernel/hd.c + * + * Copyright (C) 1991, 1992 Linus Torvalds + * + * + * Thanks to Branko Lankester, lankeste@fwi.uva.nl, who found a bug + * in the early extended-partition checks and added DM partitions + * + * Support for DiskManager v6.0x added by Mark Lord, + * with information provided by OnTrack. This now works for linux fdisk + * and LILO, as well as loadlin and bootln. Note that disks other than + * /dev/hda *must* have a "DOS" type 0x51 partition in the first slot (hda1). + * + * More flexible handling of extended partitions - aeb, 950831 + * + * Check partition table on IDE disks for common CHS translations + */ + +#include <linux/config.h> +#include <linux/fs.h> +#include <linux/genhd.h> +#include <linux/kernel.h> +#include <linux/major.h> +#include <linux/string.h> +#ifdef CONFIG_BLK_DEV_INITRD +#include <linux/blk.h> +#endif +#include <linux/hdreg.h> +#include <alloca.h> +#ifdef CONFIG_GPT_DISKLABEL +#include <linux/blkdev.h> +#include <kern/kalloc.h> +#include <stddef.h> +#endif + +#include <asm/system.h> + +/* + * Many architectures don't like unaligned accesses, which is + * frequently the case with the nr_sects and start_sect partition + * table entries. + */ +#include <asm/unaligned.h> + +#ifdef MACH +#include <machine/spl.h> +#include <linux/dev/glue/glue.h> +#endif + +#define SYS_IND(p) get_unaligned(&p->sys_ind) +#define NR_SECTS(p) get_unaligned(&p->nr_sects) +#define START_SECT(p) get_unaligned(&p->start_sect) + + +struct gendisk *gendisk_head = NULL; + +static int current_minor = 0; +extern int *blk_size[]; +extern void rd_load(void); +extern void initrd_load(void); + +extern int chr_dev_init(void); +extern int blk_dev_init(void); +extern int scsi_dev_init(void); +extern int net_dev_init(void); + +/* + * disk_name() is used by genhd.c and md.c. + * It formats the devicename of the indicated disk + * into the supplied buffer, and returns a pointer + * to that same buffer (for convenience). + */ +char *disk_name (struct gendisk *hd, int minor, char *buf) +{ + unsigned int part; + const char *maj = hd->major_name; +#ifdef MACH + char unit = (minor >> hd->minor_shift) + '0'; +#else + char unit = (minor >> hd->minor_shift) + 'a'; +#endif + +#ifdef CONFIG_BLK_DEV_IDE + /* + * IDE devices use multiple major numbers, but the drives + * are named as: {hda,hdb}, {hdc,hdd}, {hde,hdf}, {hdg,hdh}.. + * This requires special handling here. + */ + switch (hd->major) { + case IDE3_MAJOR: + unit += 2; + case IDE2_MAJOR: + unit += 2; + case IDE1_MAJOR: + unit += 2; + case IDE0_MAJOR: + maj = "hd"; + } +#endif + part = minor & ((1 << hd->minor_shift) - 1); + if (part) +#ifdef MACH + sprintf(buf, "%s%cs%d", maj, unit, part); +#else + sprintf(buf, "%s%c%d", maj, unit, part); +#endif + else + sprintf(buf, "%s%c", maj, unit); + return buf; +} + +static void add_partition (struct gendisk *hd, int minor, int start, int size) +{ + char buf[8]; + hd->part[minor].start_sect = start; + hd->part[minor].nr_sects = size; + printk(" %s", disk_name(hd, minor, buf)); +} + +#if defined (MACH) && defined (CONFIG_BSD_DISKLABEL) +static int mach_minor; +static void +add_bsd_partition (struct gendisk *hd, int minor, int slice, + int start, int size) +{ + char buf[16]; + hd->part[minor].start_sect = start; + hd->part[minor].nr_sects = size; + printk (" %s%c", disk_name (hd, mach_minor, buf), slice); +} +#endif + +static inline int is_extended_partition(struct partition *p) +{ + return (SYS_IND(p) == DOS_EXTENDED_PARTITION || + SYS_IND(p) == WIN98_EXTENDED_PARTITION || + SYS_IND(p) == LINUX_EXTENDED_PARTITION); +} + +#ifdef CONFIG_MSDOS_PARTITION +/* + * Create devices for each logical partition in an extended partition. + * The logical partitions form a linked list, with each entry being + * a partition table with two entries. The first entry + * is the real data partition (with a start relative to the partition + * table start). The second is a pointer to the next logical partition + * (with a start relative to the entire extended partition). + * We do not create a Linux partition for the partition tables, but + * only for the actual data partitions. + */ + +static void extended_partition(struct gendisk *hd, kdev_t dev) +{ + struct buffer_head *bh; + struct partition *p; + unsigned long first_sector, first_size, this_sector, this_size; + int mask = (1 << hd->minor_shift) - 1; + int i; + + first_sector = hd->part[MINOR(dev)].start_sect; + first_size = hd->part[MINOR(dev)].nr_sects; + this_sector = first_sector; + + while (1) { + if ((current_minor & mask) == 0) + return; + if (!(bh = bread(dev,0,1024))) + return; + /* + * This block is from a device that we're about to stomp on. + * So make sure nobody thinks this block is usable. + */ + bh->b_state = 0; + + if (*(unsigned short *) (bh->b_data+510) != 0xAA55) + goto done; + + p = (struct partition *) (0x1BE + bh->b_data); + + this_size = hd->part[MINOR(dev)].nr_sects; + + /* + * Usually, the first entry is the real data partition, + * the 2nd entry is the next extended partition, or empty, + * and the 3rd and 4th entries are unused. + * However, DRDOS sometimes has the extended partition as + * the first entry (when the data partition is empty), + * and OS/2 seems to use all four entries. + */ + + /* + * First process the data partition(s) + */ + for (i=0; i<4; i++, p++) { + if (!NR_SECTS(p) || is_extended_partition(p)) + continue; + + /* Check the 3rd and 4th entries - + these sometimes contain random garbage */ + if (i >= 2 + && START_SECT(p) + NR_SECTS(p) > this_size + && (this_sector + START_SECT(p) < first_sector || + this_sector + START_SECT(p) + NR_SECTS(p) > + first_sector + first_size)) + continue; + + add_partition(hd, current_minor, this_sector+START_SECT(p), NR_SECTS(p)); + current_minor++; + if ((current_minor & mask) == 0) + goto done; + } + /* + * Next, process the (first) extended partition, if present. + * (So far, there seems to be no reason to make + * extended_partition() recursive and allow a tree + * of extended partitions.) + * It should be a link to the next logical partition. + * Create a minor for this just long enough to get the next + * partition table. The minor will be reused for the next + * data partition. + */ + p -= 4; + for (i=0; i<4; i++, p++) + if(NR_SECTS(p) && is_extended_partition(p)) + break; + if (i == 4) + goto done; /* nothing left to do */ + + hd->part[current_minor].nr_sects = NR_SECTS(p); + hd->part[current_minor].start_sect = first_sector + START_SECT(p); + this_sector = first_sector + START_SECT(p); + dev = MKDEV(hd->major, current_minor); + brelse(bh); + } +done: + brelse(bh); +} + +#ifdef CONFIG_BSD_DISKLABEL +/* + * Create devices for BSD partitions listed in a disklabel, under a + * dos-like partition. See extended_partition() for more information. + */ +static void bsd_disklabel_partition(struct gendisk *hd, kdev_t dev) +{ + struct buffer_head *bh; + struct bsd_disklabel *l; + struct bsd_partition *p; + int mask = (1 << hd->minor_shift) - 1; + + if (!(bh = bread(dev,0,1024))) + return; + bh->b_state = 0; + l = (struct bsd_disklabel *) (bh->b_data+512); + if (l->d_magic != BSD_DISKMAGIC) { + brelse(bh); + return; + } + + p = &l->d_partitions[0]; + while (p - &l->d_partitions[0] <= BSD_MAXPARTITIONS) { + if ((current_minor & mask) >= (4 + hd->max_p)) + break; + + if (p->p_fstype != BSD_FS_UNUSED) { +#ifdef MACH + add_bsd_partition (hd, current_minor, + p - &l->d_partitions[0] + 'a', + p->p_offset, p->p_size); +#else + add_partition(hd, current_minor, p->p_offset, p->p_size); +#endif + current_minor++; + } + p++; + } + brelse(bh); + +} +#endif + +#ifdef CONFIG_GPT_DISKLABEL +/* + * Compute a CRC32 but treat some range as if it were zeros. + * + * Straight copy of ether_crc_le() from linux/pcmcia-cs/include/linux/crc32.h, except for the first if/else + */ +static inline unsigned ether_crc_le_hole(int length, unsigned char *data, unsigned int skip_offset, unsigned int skip_length) +{ + static unsigned const ethernet_polynomial_le = 0xedb88320U; + unsigned int crc = 0xffffffff; /* Initial value. */ + while(--length >= 0) { + unsigned char current_octet = *data++; + if(skip_offset == 0 && skip_length-- != 0) + current_octet = 0; + else + --skip_offset; + int bit; + for (bit = 8; --bit >= 0; current_octet >>= 1) { + if ((crc ^ current_octet) & 1) { + crc >>= 1; + crc ^= ethernet_polynomial_le; + } else + crc >>= 1; + } + } + return crc; +} + +/* + * Read in a full GPT array into a contiguous chunk, allocates *PP_S bytes into *PP. + * + * An attempt to do as few round-trips as possible is made by reading a PAGE_SIZE at a time, + * since that's the bread() maximum. + */ +static int gpt_read_part_table(void **pp, vm_size_t *pp_s, kdev_t dev, int bsize, __u64 first_sector, struct gpt_disklabel_header *h) +{ + __u64 lba = first_sector + h->h_part_table_lba; + __u32 bytes_left = *pp_s = h->h_part_table_len * h->h_part_table_entry_size; + struct buffer_head *bh; + void *cur = *pp = (void *)kalloc(*pp_s); + if (!cur) { + printk(" unable to allocate GPT partition table buffer"); + return -2; + } + + while (bytes_left) { + unsigned bytes_to_read = MIN(bytes_left, PAGE_SIZE); + if(!(bh = bread(dev, lba, bytes_to_read))) { + printk(" unable to read partition table array"); + return -3; + } + + memcpy(cur, bh->b_data, bytes_to_read); + cur += bytes_to_read; + bytes_left -= bytes_to_read; + lba += PAGE_SIZE / bsize; + + brelse(bh); + } + + return 0; +} + +/* + * Sequence from section 5.3.2 of spec 2.8A: + * signature, CRC, lba_current matches, partition table CRC, primary: check backup for validity + */ +static int gpt_verify_header(void **pp, vm_size_t *pp_s, kdev_t dev, int bsize, __u64 first_sector, __u64 lba, struct gpt_disklabel_header *h) +{ + int res; + __u32 crc; + + if (memcmp(h->h_signature, GPT_SIGNATURE, strlen(GPT_SIGNATURE)) != 0) { + printk(" bad GPT signature \"%c%c%c%c%c%c%c%c\";", + h->h_signature[0], h->h_signature[1], h->h_signature[2], h->h_signature[3], + h->h_signature[4], h->h_signature[5], h->h_signature[6], h->h_signature[7]); + return 1; + } + + crc = ether_crc_le_hole(h->h_header_size, (void *)h, + offsetof(struct gpt_disklabel_header, h_header_crc), sizeof(h->h_header_crc)) ^ ~0; + if (crc != h->h_header_crc) { + printk(" bad header CRC: %x != %x;", crc, h->h_header_crc); + return 2; + } + + if (h->h_lba_current != lba) { + printk(" current LBA mismatch: %lld != %lld;", h->h_lba_current, lba); + return 3; + } + + if (*pp) { + kfree((vm_offset_t)*pp, *pp_s); + *pp = NULL; + } + if ((res = gpt_read_part_table(pp, pp_s, dev, bsize, first_sector, h))) + return res; + + crc = ether_crc_le_hole(*pp_s, *pp, 0, 0) ^ ~0; + if (crc != h->h_part_table_crc) { + printk(" bad partition table CRC: %x != %x;", crc, h->h_part_table_crc); + return 4; + } + + for (int i = h->h_header_size; i < bsize; ++i) + res |= ((char*)h)[i]; + if (res) { + printk(" rest of GPT block dirty;"); + return 5; + } + + return 0; +} + +static void gpt_print_part_name(struct gpt_disklabel_part *p) +{ + for(int n = 0; n < sizeof(p->p_name) / sizeof(*p->p_name) && p->p_name[n]; ++n) + if(p->p_name[n] & ~0xFF) + printk("?"); /* Can't support all of Unicode, but don't print garbage at least... */ + else + printk("%c", p->p_name[n]); +} + +#ifdef DEBUG +static void gpt_print_guid(struct gpt_guid *guid) +{ + printk("%08X-%04X-%04X-%02X%02X-", guid->g_time_low, guid->g_time_mid, guid->g_time_high_version, guid->g_clock_sec_high, guid->g_clock_sec_low); + for (int i = 0; i < sizeof(guid->g_node_id); ++i) + printk("%02X", guid->g_node_id[i]); +} + +static void gpt_dump_header(struct gpt_disklabel_header *h) +{ + printk(" [h_signature: \"%c%c%c%c%c%c%c%c\"; ", + h->h_signature[0], h->h_signature[1], h->h_signature[2], h->h_signature[3], + h->h_signature[4], h->h_signature[5], h->h_signature[6], h->h_signature[7]); + printk("h_revision: %x; ", h->h_revision); + printk("h_header_size: %u; ", h->h_header_size); + printk("h_header_crc: %x; ", h->h_header_crc); + printk("h_reserved: %u; ", h->h_reserved); + printk("h_lba_current: %llu; ", h->h_lba_current); + printk("h_lba_backup: %llu; ", h->h_lba_backup); + printk("h_lba_usable_first: %llu; ", h->h_lba_usable_first); + printk("h_lba_usable_last: %llu; ", h->h_lba_usable_last); + printk("h_guid: "); gpt_print_guid(&h->h_guid); printk("; "); + printk("h_part_table_lba: %llu; ", h->h_part_table_lba); + printk("h_part_table_len: %u; ", h->h_part_table_len); + printk("h_part_table_crc: %x]", h->h_part_table_crc); +} + +static void gpt_dump_part(struct gpt_disklabel_part *p, int i) +{ + printk(" part#%d:[", i); + printk("p_type: "); gpt_print_guid(&p->p_type); + printk("; p_guid:"); gpt_print_guid(&p->p_guid); + printk("; p_lba_first: %llu", p->p_lba_first); + printk("; p_lba_last: %llu", p->p_lba_last); + printk("; p_attrs: %llx", p->p_attrs); + printk("; p_name: \""); gpt_print_part_name(p); printk("\"]"); +} +#else +static void gpt_dump_header(struct gpt_disklabel_header *h) {} +static void gpt_dump_part(struct gpt_disklabel_part *p, int i) {} +#endif + +static int gpt_partition(struct gendisk *hd, kdev_t dev, __u64 first_sector, int minor) +{ + struct buffer_head *bh; + struct gpt_disklabel_header *h; + void *pp = NULL; vm_size_t pp_s = 0; + int res, bsize = 512; + /* Note: this must be set by the driver; SCSI does -- + * only, in practice, it always sets this to 512, see sd_init() in sd.c */ + if (hardsect_size[MAJOR(dev)] && hardsect_size[MAJOR(dev)][MINOR(dev)]) + bsize = hardsect_size[MAJOR(dev)][MINOR(dev)]; + set_blocksize(dev,bsize); /* Must override read block size since GPT has pointers, stolen from amiga_partition(). */ + if (!(bh = bread(dev, first_sector + 1, bsize))) { + printk("unable to read GPT"); + res = -1; + goto done; + } + + h = (struct gpt_disklabel_header *)bh->b_data; + gpt_dump_header(h); + + res = gpt_verify_header(&pp, &pp_s, dev, bsize, first_sector, 1, h); + if (res < 0) + goto done; + else if (res > 0) { + printk(" main GPT dirty, trying backup at %llu;", h->h_lba_backup); + __u64 lba = h->h_lba_backup; + brelse(bh); + + if (!(bh = bread(dev, first_sector + lba, bsize))) { + printk("unable to read backup GPT"); + res = -4; + goto done; + } + + h = (struct gpt_disklabel_header *)bh->b_data; + gpt_dump_header(h); + + res = gpt_verify_header(&pp, &pp_s, dev, bsize, first_sector, lba, h); + if (res < 0) + goto done; + else if (res > 0) { + printk(" backup GPT dirty as well; cowardly refusing to continue"); + res = -5; + goto done; + } + } + + /* At least one good GPT+array */ + + for(int i = 0; i < h->h_part_table_len; ++i, ++minor) { + struct gpt_disklabel_part *p = + (struct gpt_disklabel_part *) (pp + i * h->h_part_table_entry_size); + if(memcmp(&p->p_type, &GPT_GUID_TYPE_UNUSED, sizeof(struct gpt_guid)) == 0) + continue; + gpt_dump_part(p, i); + + if (minor > hd->max_nr * hd->max_p) { + printk(" [ignoring GPT partition %d \"", i); gpt_print_part_name(p); printk("\": too many partitions (max %d)]", hd->max_p); + } else { + add_partition(hd, minor, first_sector + p->p_lba_first, p->p_lba_last - p->p_lba_first + 1); + if(p->p_name[0]) { + printk(" ("); gpt_print_part_name(p); printk(")"); + } + } + } + +done: + brelse(bh); + set_blocksize(dev,BLOCK_SIZE); + kfree((vm_offset_t)pp, pp_s); + printk("\n"); + return !res; +} +#endif + +static int msdos_partition(struct gendisk *hd, kdev_t dev, unsigned long first_sector) +{ + int i, minor = current_minor; + struct buffer_head *bh; + struct partition *p; + unsigned char *data; + int mask = (1 << hd->minor_shift) - 1; +#ifdef CONFIG_BLK_DEV_IDE + int tested_for_xlate = 0; + +read_mbr: +#endif + if (!(bh = bread(dev,0,1024))) { + printk(" unable to read partition table\n"); + return -1; + } + data = (unsigned char *)bh->b_data; + /* In some cases we modify the geometry */ + /* of the drive (below), so ensure that */ + /* nobody else tries to re-use this data. */ + bh->b_state = 0; +#ifdef CONFIG_BLK_DEV_IDE +check_table: +#endif + if (*(unsigned short *) (0x1fe + data) != 0xAA55) { + brelse(bh); + return 0; + } + p = (struct partition *) (0x1be + data); + +#ifdef CONFIG_BLK_DEV_IDE + if (!tested_for_xlate++) { /* Do this only once per disk */ + /* + * Look for various forms of IDE disk geometry translation + */ + extern int ide_xlate_1024(kdev_t, int, const char *); + unsigned int sig = *(unsigned short *)(data + 2); + if (SYS_IND(p) == EZD_PARTITION) { + /* + * The remainder of the disk must be accessed using + * a translated geometry that reduces the number of + * apparent cylinders to less than 1024 if possible. + * + * ide_xlate_1024() will take care of the necessary + * adjustments to fool fdisk/LILO and partition check. + */ + if (ide_xlate_1024(dev, -1, " [EZD]")) { + data += 512; + goto check_table; + } + } else if (SYS_IND(p) == DM6_PARTITION) { + + /* + * Everything on the disk is offset by 63 sectors, + * including a "new" MBR with its own partition table, + * and the remainder of the disk must be accessed using + * a translated geometry that reduces the number of + * apparent cylinders to less than 1024 if possible. + * + * ide_xlate_1024() will take care of the necessary + * adjustments to fool fdisk/LILO and partition check. + */ + if (ide_xlate_1024(dev, 1, " [DM6:DDO]")) { + brelse(bh); + goto read_mbr; /* start over with new MBR */ + } + } else if (sig <= 0x1ae && *(unsigned short *)(data + sig) == 0x55AA + && (1 & *(unsigned char *)(data + sig + 2)) ) + { + /* + * DM6 signature in MBR, courtesy of OnTrack + */ + (void) ide_xlate_1024 (dev, 0, " [DM6:MBR]"); + } else if (SYS_IND(p) == DM6_AUX1PARTITION || SYS_IND(p) == DM6_AUX3PARTITION) { + /* + * DM6 on other than the first (boot) drive + */ + (void) ide_xlate_1024(dev, 0, " [DM6:AUX]"); + } else { + /* + * Examine the partition table for common translations. + * This is necessary for drives for situations where + * the translated geometry is unavailable from the BIOS. + */ + for (i = 0; i < 4 ; i++) { + struct partition *q = &p[i]; + if (NR_SECTS(q) + && (q->sector & 63) == 1 + && (q->end_sector & 63) == 63) { + unsigned int heads = q->end_head + 1; + if (heads == 32 || heads == 64 || heads == 128 || heads == 255) { + + (void) ide_xlate_1024(dev, heads, " [PTBL]"); + break; + } + } + } + } + } +#endif /* CONFIG_BLK_DEV_IDE */ + + current_minor += 4; /* first "extra" minor (for extended partitions) */ + for (i=1 ; i<=4 ; minor++,i++,p++) { + if (!NR_SECTS(p)) + continue; +#ifdef CONFIG_GPT_DISKLABEL + if (SYS_IND(p) == GPT_PARTITION) { + brelse(bh); + return gpt_partition(hd, dev, first_sector, minor); + } else +#endif + add_partition(hd, minor, first_sector+START_SECT(p), NR_SECTS(p)); + if (is_extended_partition(p)) { + printk(" <"); + /* + * If we are rereading the partition table, we need + * to set the size of the partition so that we will + * be able to bread the block containing the extended + * partition info. + */ + hd->sizes[minor] = hd->part[minor].nr_sects + >> (BLOCK_SIZE_BITS - 9); + extended_partition(hd, MKDEV(hd->major, minor)); + printk(" >"); + /* prevent someone doing mkfs or mkswap on an + extended partition, but leave room for LILO */ + if (hd->part[minor].nr_sects > 2) + hd->part[minor].nr_sects = 2; + } +#ifdef CONFIG_BSD_DISKLABEL + if (SYS_IND(p) == BSD_PARTITION) { + printk(" <"); +#ifdef MACH + mach_minor = minor; +#endif + bsd_disklabel_partition(hd, MKDEV(hd->major, minor)); + printk(" >"); + } +#endif + } + /* + * Check for old-style Disk Manager partition table + */ + if (*(unsigned short *) (data+0xfc) == 0x55AA) { + p = (struct partition *) (0x1be + data); + for (i = 4 ; i < 16 ; i++, current_minor++) { + p--; + if ((current_minor & mask) == 0) + break; + if (!(START_SECT(p) && NR_SECTS(p))) + continue; + add_partition(hd, current_minor, START_SECT(p), NR_SECTS(p)); + } + } + printk("\n"); + brelse(bh); + return 1; +} + +#endif /* CONFIG_MSDOS_PARTITION */ + +#ifdef CONFIG_OSF_PARTITION + +static int osf_partition(struct gendisk *hd, unsigned int dev, unsigned long first_sector) +{ + int i; + int mask = (1 << hd->minor_shift) - 1; + struct buffer_head *bh; + struct disklabel { + u32 d_magic; + u16 d_type,d_subtype; + u8 d_typename[16]; + u8 d_packname[16]; + u32 d_secsize; + u32 d_nsectors; + u32 d_ntracks; + u32 d_ncylinders; + u32 d_secpercyl; + u32 d_secprtunit; + u16 d_sparespertrack; + u16 d_sparespercyl; + u32 d_acylinders; + u16 d_rpm, d_interleave, d_trackskew, d_cylskew; + u32 d_headswitch, d_trkseek, d_flags; + u32 d_drivedata[5]; + u32 d_spare[5]; + u32 d_magic2; + u16 d_checksum; + u16 d_npartitions; + u32 d_bbsize, d_sbsize; + struct d_partition { + u32 p_size; + u32 p_offset; + u32 p_fsize; + u8 p_fstype; + u8 p_frag; + u16 p_cpg; + } d_partitions[8]; + } * label; + struct d_partition * partition; +#define DISKLABELMAGIC (0x82564557UL) + + if (!(bh = bread(dev,0,1024))) { + printk("unable to read partition table\n"); + return -1; + } + label = (struct disklabel *) (bh->b_data+64); + partition = label->d_partitions; + if (label->d_magic != DISKLABELMAGIC) { + printk("magic: %08x\n", label->d_magic); + brelse(bh); + return 0; + } + if (label->d_magic2 != DISKLABELMAGIC) { + printk("magic2: %08x\n", label->d_magic2); + brelse(bh); + return 0; + } + for (i = 0 ; i < label->d_npartitions; i++, partition++) { + if ((current_minor & mask) == 0) + break; + if (partition->p_size) + add_partition(hd, current_minor, + first_sector+partition->p_offset, + partition->p_size); + current_minor++; + } + printk("\n"); + brelse(bh); + return 1; +} + +#endif /* CONFIG_OSF_PARTITION */ + +#ifdef CONFIG_SUN_PARTITION + +static int sun_partition(struct gendisk *hd, kdev_t dev, unsigned long first_sector) +{ + int i, csum; + unsigned short *ush; + struct buffer_head *bh; + struct sun_disklabel { + unsigned char info[128]; /* Informative text string */ + unsigned char spare[292]; /* Boot information etc. */ + unsigned short rspeed; /* Disk rotational speed */ + unsigned short pcylcount; /* Physical cylinder count */ + unsigned short sparecyl; /* extra sects per cylinder */ + unsigned char spare2[4]; /* More magic... */ + unsigned short ilfact; /* Interleave factor */ + unsigned short ncyl; /* Data cylinder count */ + unsigned short nacyl; /* Alt. cylinder count */ + unsigned short ntrks; /* Tracks per cylinder */ + unsigned short nsect; /* Sectors per track */ + unsigned char spare3[4]; /* Even more magic... */ + struct sun_partition { + __u32 start_cylinder; + __u32 num_sectors; + } partitions[8]; + unsigned short magic; /* Magic number */ + unsigned short csum; /* Label xor'd checksum */ + } * label; + struct sun_partition *p; + int other_endian; + unsigned long spc; +#define SUN_LABEL_MAGIC 0xDABE +#define SUN_LABEL_MAGIC_SWAPPED 0xBEDA +/* No need to optimize these macros since they are called only when reading + * the partition table. This occurs only at each disk change. */ +#define SWAP16(x) (other_endian ? (((__u16)(x) & 0xFF) << 8) \ + | (((__u16)(x) & 0xFF00) >> 8) \ + : (__u16)(x)) +#define SWAP32(x) (other_endian ? (((__u32)(x) & 0xFF) << 24) \ + | (((__u32)(x) & 0xFF00) << 8) \ + | (((__u32)(x) & 0xFF0000) >> 8) \ + | (((__u32)(x) & 0xFF000000) >> 24) \ + : (__u32)(x)) + + if(!(bh = bread(dev, 0, 1024))) { + printk("Dev %s: unable to read partition table\n", + kdevname(dev)); + return -1; + } + label = (struct sun_disklabel *) bh->b_data; + p = label->partitions; + if (label->magic != SUN_LABEL_MAGIC && label->magic != SUN_LABEL_MAGIC_SWAPPED) { + printk("Dev %s Sun disklabel: bad magic %04x\n", + kdevname(dev), label->magic); + brelse(bh); + return 0; + } + other_endian = (label->magic == SUN_LABEL_MAGIC_SWAPPED); + /* Look at the checksum */ + ush = ((unsigned short *) (label+1)) - 1; + for(csum = 0; ush >= ((unsigned short *) label);) + csum ^= *ush--; + if(csum) { + printk("Dev %s Sun disklabel: Csum bad, label corrupted\n", + kdevname(dev)); + brelse(bh); + return 0; + } + /* All Sun disks have 8 partition entries */ + spc = SWAP16(label->ntrks) * SWAP16(label->nsect); + for(i=0; i < 8; i++, p++) { + unsigned long st_sector; + + /* We register all partitions, even if zero size, so that + * the minor numbers end up ok as per SunOS interpretation. + */ + st_sector = first_sector + SWAP32(p->start_cylinder) * spc; + add_partition(hd, current_minor, st_sector, SWAP32(p->num_sectors)); + current_minor++; + } + printk("\n"); + brelse(bh); + return 1; +#undef SWAP16 +#undef SWAP32 +} + +#endif /* CONFIG_SUN_PARTITION */ + +#ifdef CONFIG_AMIGA_PARTITION +#include <asm/byteorder.h> +#include <linux/affs_hardblocks.h> + +static __inline__ __u32 +checksum_block(__u32 *m, int size) +{ + __u32 sum = 0; + + while (size--) + sum += htonl(*m++); + return sum; +} + +static int +amiga_partition(struct gendisk *hd, unsigned int dev, unsigned long first_sector) +{ + struct buffer_head *bh; + struct RigidDiskBlock *rdb; + struct PartitionBlock *pb; + int start_sect; + int nr_sects; + int blk; + int part, res; + + set_blocksize(dev,512); + res = 0; + + for (blk = 0; blk < RDB_ALLOCATION_LIMIT; blk++) { + if(!(bh = bread(dev,blk,512))) { + printk("Dev %d: unable to read RDB block %d\n",dev,blk); + goto rdb_done; + } + if (*(__u32 *)bh->b_data == htonl(IDNAME_RIGIDDISK)) { + rdb = (struct RigidDiskBlock *)bh->b_data; + if (checksum_block((__u32 *)bh->b_data,htonl(rdb->rdb_SummedLongs) & 0x7F)) { + printk("Dev %d: RDB in block %d has bad checksum\n",dev,blk); + brelse(bh); + continue; + } + printk(" RDSK"); + blk = htonl(rdb->rdb_PartitionList); + brelse(bh); + for (part = 1; blk > 0 && part <= 16; part++) { + if (!(bh = bread(dev,blk,512))) { + printk("Dev %d: unable to read partition block %d\n", + dev,blk); + goto rdb_done; + } + pb = (struct PartitionBlock *)bh->b_data; + blk = htonl(pb->pb_Next); + if (pb->pb_ID == htonl(IDNAME_PARTITION) && checksum_block( + (__u32 *)pb,htonl(pb->pb_SummedLongs) & 0x7F) == 0 ) { + + /* Tell Kernel about it */ + + if (!(nr_sects = (htonl(pb->pb_Environment[10]) + 1 - + htonl(pb->pb_Environment[9])) * + htonl(pb->pb_Environment[3]) * + htonl(pb->pb_Environment[5]))) { + continue; + } + start_sect = htonl(pb->pb_Environment[9]) * + htonl(pb->pb_Environment[3]) * + htonl(pb->pb_Environment[5]); + add_partition(hd,current_minor,start_sect,nr_sects); + current_minor++; + res = 1; + } + brelse(bh); + } + printk("\n"); + break; + } + } + +rdb_done: + set_blocksize(dev,BLOCK_SIZE); + return res; +} +#endif /* CONFIG_AMIGA_PARTITION */ + +static void check_partition(struct gendisk *hd, kdev_t dev) +{ + static int first_time = 1; + unsigned long first_sector; + char buf[8]; + + if (first_time) + printk("Partition check (DOS partitions):\n"); + first_time = 0; + first_sector = hd->part[MINOR(dev)].start_sect; + + /* + * This is a kludge to allow the partition check to be + * skipped for specific drives (e.g. IDE cd-rom drives) + */ + if ((int)first_sector == -1) { + hd->part[MINOR(dev)].start_sect = 0; + return; + } + + printk(" %s:", disk_name(hd, MINOR(dev), buf)); +#ifdef CONFIG_MSDOS_PARTITION + if (msdos_partition(hd, dev, first_sector)) + return; +#endif +#ifdef CONFIG_OSF_PARTITION + if (osf_partition(hd, dev, first_sector)) + return; +#endif +#ifdef CONFIG_SUN_PARTITION + if(sun_partition(hd, dev, first_sector)) + return; +#endif +#ifdef CONFIG_AMIGA_PARTITION + if(amiga_partition(hd, dev, first_sector)) + return; +#endif + printk(" unknown partition table\n"); +} + +/* This function is used to re-read partition tables for removable disks. + Much of the cleanup from the old partition tables should have already been + done */ + +/* This function will re-read the partition tables for a given device, +and set things back up again. There are some important caveats, +however. You must ensure that no one is using the device, and no one +can start using the device while this function is being executed. */ + +void resetup_one_dev(struct gendisk *dev, int drive) +{ + int i; + int first_minor = drive << dev->minor_shift; + int end_minor = first_minor + dev->max_p; + + blk_size[dev->major] = NULL; + current_minor = 1 + first_minor; + check_partition(dev, MKDEV(dev->major, first_minor)); + + /* + * We need to set the sizes array before we will be able to access + * any of the partitions on this device. + */ + if (dev->sizes != NULL) { /* optional safeguard in ll_rw_blk.c */ + for (i = first_minor; i < end_minor; i++) + dev->sizes[i] = dev->part[i].nr_sects >> (BLOCK_SIZE_BITS - 9); + blk_size[dev->major] = dev->sizes; + } +} + +static void setup_dev(struct gendisk *dev) +{ + int i, drive; + int end_minor = dev->max_nr * dev->max_p; + + blk_size[dev->major] = NULL; + for (i = 0 ; i < end_minor; i++) { + dev->part[i].start_sect = 0; + dev->part[i].nr_sects = 0; + } + dev->init(dev); + for (drive = 0 ; drive < dev->nr_real ; drive++) { + int first_minor = drive << dev->minor_shift; + current_minor = 1 + first_minor; + check_partition(dev, MKDEV(dev->major, first_minor)); + } + if (dev->sizes != NULL) { /* optional safeguard in ll_rw_blk.c */ + for (i = 0; i < end_minor; i++) + dev->sizes[i] = dev->part[i].nr_sects >> (BLOCK_SIZE_BITS - 9); + blk_size[dev->major] = dev->sizes; + } +} + +void device_setup(void) +{ + extern void console_map_init(void); + struct gendisk *p; + int nr=0; + +#ifdef CONFIG_BLK_DEV_IDE + extern char *kernel_cmdline; + char *c, *param, *white; + + for (c = kernel_cmdline; c; ) + { + param = strstr(c, " ide"); + if (!param) + param = strstr(c, " hd"); + if (!param) + break; + if (param) { + param++; + white = strchr(param, ' '); + if (!white) { + ide_setup(param); + c = NULL; + } else { + char *word = alloca(white - param + 1); + strncpy(word, param, white - param); + word[white-param] = '\0'; + ide_setup(word); + c = white + 1; + } + } + } +#endif +#ifndef MACH + chr_dev_init(); +#endif + blk_dev_init(); + sti(); +#ifdef CONFIG_SCSI + scsi_dev_init(); +#endif +#ifdef CONFIG_INET + net_dev_init(); +#endif +#ifndef MACH + console_map_init(); +#endif + + for (p = gendisk_head ; p ; p=p->next) { + setup_dev(p); + nr += p->nr_real; + } +#ifdef CONFIG_BLK_DEV_RAM +#ifdef CONFIG_BLK_DEV_INITRD + if (initrd_start && mount_initrd) initrd_load(); + else +#endif + rd_load(); +#endif +} diff --git a/linux/dev/drivers/net/Space.c b/linux/dev/drivers/net/Space.c new file mode 100644 index 0000000..213fa9b --- /dev/null +++ b/linux/dev/drivers/net/Space.c @@ -0,0 +1,582 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Holds initial configuration information for devices. + * + * NOTE: This file is a nice idea, but its current format does not work + * well for drivers that support multiple units, like the SLIP + * driver. We should actually have only one pointer to a driver + * here, with the driver knowing how many units it supports. + * Currently, the SLIP driver abuses the "base_addr" integer + * field of the 'device' structure to store the unit number... + * -FvK + * + * Version: @(#)Space.c 1.0.8 07/31/96 + * + * Authors: Ross Biro, <bir7@leland.Stanford.Edu> + * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> + * Donald J. Becker, <becker@super.org> + * + * FIXME: + * Sort the device chain fastest first. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include <linux/config.h> +#include <linux/netdevice.h> +#include <linux/errno.h> + +#define NEXT_DEV NULL + + +/* A unified ethernet device probe. This is the easiest way to have every + ethernet adaptor have the name "eth[0123...]". + */ + +extern int tulip_probe(struct device *dev); +extern int hp100_probe(struct device *dev); +extern int ultra_probe(struct device *dev); +extern int ultra32_probe(struct device *dev); +extern int wd_probe(struct device *dev); +extern int el2_probe(struct device *dev); +extern int ne_probe(struct device *dev); +extern int ne2k_pci_probe(struct device *dev); +extern int hp_probe(struct device *dev); +extern int hp_plus_probe(struct device *dev); +extern int znet_probe(struct device *); +extern int express_probe(struct device *); +extern int eepro_probe(struct device *); +extern int el3_probe(struct device *); +extern int at1500_probe(struct device *); +extern int at1700_probe(struct device *); +extern int fmv18x_probe(struct device *); +extern int eth16i_probe(struct device *); +extern int depca_probe(struct device *); +extern int apricot_probe(struct device *); +extern int ewrk3_probe(struct device *); +extern int de4x5_probe(struct device *); +extern int el1_probe(struct device *); +extern int via_rhine_probe(struct device *); +extern int natsemi_probe(struct device *); +extern int ns820_probe(struct device *); +extern int winbond840_probe(struct device *); +extern int hamachi_probe(struct device *); +extern int sundance_probe(struct device *); +extern int starfire_probe(struct device *); +extern int myson803_probe(struct device *); +extern int igige_probe(struct device *); +#if defined(CONFIG_WAVELAN) +extern int wavelan_probe(struct device *); +#endif /* defined(CONFIG_WAVELAN) */ +extern int el16_probe(struct device *); +extern int elplus_probe(struct device *); +extern int ac3200_probe(struct device *); +extern int e2100_probe(struct device *); +extern int ni52_probe(struct device *); +extern int ni65_probe(struct device *); +extern int SK_init(struct device *); +extern int seeq8005_probe(struct device *); +extern int tc59x_probe(struct device *); +extern int dgrs_probe(struct device *); +extern int smc_init( struct device * ); +extern int sparc_lance_probe(struct device *); +extern int atarilance_probe(struct device *); +extern int a2065_probe(struct device *); +extern int ariadne_probe(struct device *); +extern int hydra_probe(struct device *); +extern int yellowfin_probe(struct device *); +extern int eepro100_probe(struct device *); +extern int epic100_probe(struct device *); +extern int rtl8139_probe(struct device *); +extern int sis900_probe(struct device *); +extern int tlan_probe(struct device *); +extern int isa515_probe(struct device *); +extern int pcnet32_probe(struct device *); +extern int lance_probe(struct device *); +/* Detachable devices ("pocket adaptors") */ +extern int atp_init(struct device *); +extern int de600_probe(struct device *); +extern int de620_probe(struct device *); +extern int tc515_probe(struct device *); + +static int +ethif_probe(struct device *dev) +{ + u_long base_addr = dev->base_addr; + + if ((base_addr == 0xffe0) || (base_addr == 1)) + return 1; /* ENXIO */ + + if (1 + /* All PCI probes are safe, and thus should be first. */ +#ifdef CONFIG_DE4X5 /* DEC DE425, DE434, DE435 adapters */ + && de4x5_probe(dev) +#endif +#ifdef CONFIG_DGRS + && dgrs_probe(dev) +#endif +#ifdef CONFIG_EEXPRESS_PRO100B /* Intel EtherExpress Pro100B */ + && eepro100_probe(dev) +#endif +#ifdef CONFIG_EPIC + && epic100_probe(dev) +#endif +#if defined(CONFIG_HP100) + && hp100_probe(dev) +#endif +#if defined(CONFIG_NE2K_PCI) + && ne2k_pci_probe(dev) +#endif +#ifdef CONFIG_PCNET32 + && pcnet32_probe(dev) +#endif +#ifdef CONFIG_RTL8139 + && rtl8139_probe(dev) +#endif +#ifdef CONFIG_SIS900 + && sis900_probe(dev) +#endif +#ifdef CONFIG_VIA_RHINE + && via_rhine_probe(dev) +#endif +#ifdef CONFIG_NATSEMI + && natsemi_probe(dev) +#endif +#ifdef CONFIG_NS820 + && ns820_probe(dev) +#endif +#ifdef CONFIG_WINBOND840 + && winbond840_probe(dev) +#endif +#ifdef CONFIG_HAMACHI + && hamachi_probe(dev) +#endif +#ifdef CONFIG_SUNDANCE + && sundance_probe(dev) +#endif +#ifdef CONFIG_STARFIRE + && starfire_probe(dev) +#endif +#ifdef CONFIG_MYSON803 + && myson803_probe(dev) +#endif +#ifdef CONFIG_INTEL_GIGE + && igige_probe(dev) +#endif +#if defined(CONFIG_DEC_ELCP) + && tulip_probe(dev) +#endif +#ifdef CONFIG_YELLOWFIN + && yellowfin_probe(dev) +#endif + /* Next mostly-safe EISA-only drivers. */ +#ifdef CONFIG_AC3200 /* Ansel Communications EISA 3200. */ + && ac3200_probe(dev) +#endif +#if defined(CONFIG_ULTRA32) + && ultra32_probe(dev) +#endif + /* Third, sensitive ISA boards. */ +#ifdef CONFIG_AT1700 + && at1700_probe(dev) +#endif +#if defined(CONFIG_ULTRA) + && ultra_probe(dev) +#endif +#if defined(CONFIG_SMC9194) + && smc_init(dev) +#endif +#if defined(CONFIG_WD80x3) + && wd_probe(dev) +#endif +#if defined(CONFIG_EL2) /* 3c503 */ + && el2_probe(dev) +#endif +#if defined(CONFIG_HPLAN) + && hp_probe(dev) +#endif +#if defined(CONFIG_HPLAN_PLUS) + && hp_plus_probe(dev) +#endif +#if defined(CONFIG_SEEQ8005) + && seeq8005_probe(dev) +#endif +#ifdef CONFIG_E2100 /* Cabletron E21xx series. */ + && e2100_probe(dev) +#endif +#if defined(CONFIG_NE2000) + && ne_probe(dev) +#endif +#ifdef CONFIG_AT1500 + && at1500_probe(dev) +#endif +#ifdef CONFIG_FMV18X /* Fujitsu FMV-181/182 */ + && fmv18x_probe(dev) +#endif +#ifdef CONFIG_ETH16I + && eth16i_probe(dev) /* ICL EtherTeam 16i/32 */ +#endif +#ifdef CONFIG_EL3 /* 3c509 */ + && el3_probe(dev) +#endif +#if defined(CONFIG_VORTEX) + && tc59x_probe(dev) +#endif +#ifdef CONFIG_3C515 /* 3c515 */ + && tc515_probe(dev) +#endif +#ifdef CONFIG_ZNET /* Zenith Z-Note and some IBM Thinkpads. */ + && znet_probe(dev) +#endif +#ifdef CONFIG_EEXPRESS /* Intel EtherExpress */ + && express_probe(dev) +#endif +#ifdef CONFIG_EEXPRESS_PRO /* Intel EtherExpress Pro/10 */ + && eepro_probe(dev) +#endif +#ifdef CONFIG_DEPCA /* DEC DEPCA */ + && depca_probe(dev) +#endif +#ifdef CONFIG_EWRK3 /* DEC EtherWORKS 3 */ + && ewrk3_probe(dev) +#endif +#ifdef CONFIG_APRICOT /* Apricot I82596 */ + && apricot_probe(dev) +#endif +#ifdef CONFIG_EL1 /* 3c501 */ + && el1_probe(dev) +#endif +#if defined(CONFIG_WAVELAN) /* WaveLAN */ + && wavelan_probe(dev) +#endif /* defined(CONFIG_WAVELAN) */ +#ifdef CONFIG_EL16 /* 3c507 */ + && el16_probe(dev) +#endif +#ifdef CONFIG_ELPLUS /* 3c505 */ + && elplus_probe(dev) +#endif +#ifdef CONFIG_DE600 /* D-Link DE-600 adapter */ + && de600_probe(dev) +#endif +#ifdef CONFIG_DE620 /* D-Link DE-620 adapter */ + && de620_probe(dev) +#endif +#if defined(CONFIG_SK_G16) + && SK_init(dev) +#endif +#ifdef CONFIG_NI52 + && ni52_probe(dev) +#endif +#ifdef CONFIG_NI65 + && ni65_probe(dev) +#endif +#ifdef CONFIG_LANCE /* ISA LANCE boards */ + && lance_probe(dev) +#endif +#ifdef CONFIG_ATARILANCE /* Lance-based Atari ethernet boards */ + && atarilance_probe(dev) +#endif +#ifdef CONFIG_A2065 /* Commodore/Ameristar A2065 Ethernet Board */ + && a2065_probe(dev) +#endif +#ifdef CONFIG_ARIADNE /* Village Tronic Ariadne Ethernet Board */ + && ariadne_probe(dev) +#endif +#ifdef CONFIG_HYDRA /* Hydra Systems Amiganet Ethernet board */ + && hydra_probe(dev) +#endif +#ifdef CONFIG_SUNLANCE + && sparc_lance_probe(dev) +#endif +#ifdef CONFIG_TLAN + && tlan_probe(dev) +#endif +#ifdef CONFIG_LANCE + && lance_probe(dev) +#endif + && 1 ) { + return 1; /* -ENODEV or -EAGAIN would be more accurate. */ + } + return 0; +} + +#ifdef CONFIG_SDLA + extern int sdla_init(struct device *); + static struct device sdla0_dev = { "sdla0", 0, 0, 0, 0, 0, 0, 0, 0, 0, NEXT_DEV, sdla_init, }; + +# undef NEXT_DEV +# define NEXT_DEV (&sdla0_dev) +#endif + +#ifdef CONFIG_NETROM + extern int nr_init(struct device *); + + static struct device nr3_dev = { "nr3", 0, 0, 0, 0, 0, 0, 0, 0, 0, NEXT_DEV, nr_init, }; + static struct device nr2_dev = { "nr2", 0, 0, 0, 0, 0, 0, 0, 0, 0, &nr3_dev, nr_init, }; + static struct device nr1_dev = { "nr1", 0, 0, 0, 0, 0, 0, 0, 0, 0, &nr2_dev, nr_init, }; + static struct device nr0_dev = { "nr0", 0, 0, 0, 0, 0, 0, 0, 0, 0, &nr1_dev, nr_init, }; + +# undef NEXT_DEV +# define NEXT_DEV (&nr0_dev) +#endif + +/* Run-time ATtachable (Pocket) devices have a different (not "eth#") name. */ +#ifdef CONFIG_ATP /* AT-LAN-TEC (RealTek) pocket adaptor. */ +static struct device atp_dev = { + "atp0", 0, 0, 0, 0, 0, 0, 0, 0, 0, NEXT_DEV, atp_init, /* ... */ }; +# undef NEXT_DEV +# define NEXT_DEV (&atp_dev) +#endif + +#ifdef CONFIG_ARCNET + extern int arcnet_probe(struct device *dev); + static struct device arcnet_dev = { + "arc0", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, arcnet_probe, }; +# undef NEXT_DEV +# define NEXT_DEV (&arcnet_dev) +#endif + +/* In Mach, by default allow at least 2 interfaces. */ +#ifdef MACH +#ifndef ETH1_ADDR +# define ETH1_ADDR 0 +#endif +#ifndef ETH1_IRQ +# define ETH1_IRQ 0 +#endif +#endif + +/* The first device defaults to I/O base '0', which means autoprobe. */ +#ifndef ETH0_ADDR +# define ETH0_ADDR 0 +#endif +#ifndef ETH0_IRQ +# define ETH0_IRQ 0 +#endif +/* "eth0" defaults to autoprobe (== 0), other use a base of 0xffe0 (== -0x20), + which means "don't probe". These entries exist to only to provide empty + slots which may be enabled at boot-time. */ + +static struct device eth7_dev = { + "eth7", 0,0,0,0,0xffe0 /* I/O base*/, 0,0,0,0, NEXT_DEV, ethif_probe }; +static struct device eth6_dev = { + "eth6", 0,0,0,0,0xffe0 /* I/O base*/, 0,0,0,0, ð7_dev, ethif_probe }; +static struct device eth5_dev = { + "eth5", 0,0,0,0,0xffe0 /* I/O base*/, 0,0,0,0, ð6_dev, ethif_probe }; +static struct device eth4_dev = { + "eth4", 0,0,0,0,0xffe0 /* I/O base*/, 0,0,0,0, ð5_dev, ethif_probe }; +static struct device eth3_dev = { + "eth3", 0,0,0,0,0xffe0 /* I/O base*/, 0,0,0,0, ð4_dev, ethif_probe }; +static struct device eth2_dev = { + "eth2", 0,0,0,0,0xffe0 /* I/O base*/, 0,0,0,0, ð3_dev, ethif_probe }; + +#ifdef MACH +static struct device eth1_dev = { + "eth1", 0, 0, 0, 0, ETH1_ADDR, ETH1_IRQ, 0, 0, 0, ð2_dev, ethif_probe }; +#else +static struct device eth1_dev = { + "eth1", 0,0,0,0,0xffe0 /* I/O base*/, 0,0,0,0, ð2_dev, ethif_probe }; +#endif + +static struct device eth0_dev = { + "eth0", 0, 0, 0, 0, ETH0_ADDR, ETH0_IRQ, 0, 0, 0, ð1_dev, ethif_probe }; + +# undef NEXT_DEV +# define NEXT_DEV (ð0_dev) + +#if defined(PLIP) || defined(CONFIG_PLIP) + extern int plip_init(struct device *); + static struct device plip2_dev = { + "plip2", 0, 0, 0, 0, 0x278, 2, 0, 0, 0, NEXT_DEV, plip_init, }; + static struct device plip1_dev = { + "plip1", 0, 0, 0, 0, 0x378, 7, 0, 0, 0, &plip2_dev, plip_init, }; + static struct device plip0_dev = { + "plip0", 0, 0, 0, 0, 0x3BC, 5, 0, 0, 0, &plip1_dev, plip_init, }; +# undef NEXT_DEV +# define NEXT_DEV (&plip0_dev) +#endif /* PLIP */ + +#if defined(SLIP) || defined(CONFIG_SLIP) + /* To be exact, this node just hooks the initialization + routines to the device structures. */ +extern int slip_init_ctrl_dev(struct device *); +static struct device slip_bootstrap = { + "slip_proto", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, slip_init_ctrl_dev, }; +#undef NEXT_DEV +#define NEXT_DEV (&slip_bootstrap) +#endif /* SLIP */ + +#if defined(CONFIG_STRIP) +extern int strip_init_ctrl_dev(struct device *); +static struct device strip_bootstrap = { + "strip_proto", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, strip_init_ctrl_dev, }; +#undef NEXT_DEV +#define NEXT_DEV (&strip_bootstrap) +#endif /* STRIP */ + +#if defined(CONFIG_PPP) +extern int ppp_init(struct device *); +static struct device ppp_bootstrap = { + "ppp_proto", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, ppp_init, }; +#undef NEXT_DEV +#define NEXT_DEV (&ppp_bootstrap) +#endif /* PPP */ + +#ifdef CONFIG_DUMMY + extern int dummy_init(struct device *dev); + static struct device dummy_dev = { + "dummy", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, dummy_init, }; +# undef NEXT_DEV +# define NEXT_DEV (&dummy_dev) +#endif + +#ifdef CONFIG_EQUALIZER +extern int eql_init(struct device *dev); +struct device eql_dev = { + "eql", /* Master device for IP traffic load + balancing */ + 0x0, 0x0, 0x0, 0x0, /* recv end/start; mem end/start */ + 0, /* base I/O address */ + 0, /* IRQ */ + 0, 0, 0, /* flags */ + NEXT_DEV, /* next device */ + eql_init /* set up the rest */ +}; +# undef NEXT_DEV +# define NEXT_DEV (&eql_dev) +#endif + +#ifdef CONFIG_IBMTR + + extern int tok_probe(struct device *dev); + static struct device ibmtr_dev1 = { + "tr1", /* IBM Token Ring (Non-DMA) Interface */ + 0x0, /* recv memory end */ + 0x0, /* recv memory start */ + 0x0, /* memory end */ + 0x0, /* memory start */ + 0xa24, /* base I/O address */ + 0, /* IRQ */ + 0, 0, 0, /* flags */ + NEXT_DEV, /* next device */ + tok_probe /* ??? Token_init should set up the rest */ + }; +# undef NEXT_DEV +# define NEXT_DEV (&ibmtr_dev1) + + + static struct device ibmtr_dev0 = { + "tr0", /* IBM Token Ring (Non-DMA) Interface */ + 0x0, /* recv memory end */ + 0x0, /* recv memory start */ + 0x0, /* memory end */ + 0x0, /* memory start */ + 0xa20, /* base I/O address */ + 0, /* IRQ */ + 0, 0, 0, /* flags */ + NEXT_DEV, /* next device */ + tok_probe /* ??? Token_init should set up the rest */ + }; +# undef NEXT_DEV +# define NEXT_DEV (&ibmtr_dev0) + +#endif + +#ifdef CONFIG_DEFXX + extern int dfx_probe(struct device *dev); + static struct device fddi7_dev = + {"fddi7", 0, 0, 0, 0, 0, 0, 0, 0, 0, NEXT_DEV, dfx_probe}; + static struct device fddi6_dev = + {"fddi6", 0, 0, 0, 0, 0, 0, 0, 0, 0, &fddi7_dev, dfx_probe}; + static struct device fddi5_dev = + {"fddi5", 0, 0, 0, 0, 0, 0, 0, 0, 0, &fddi6_dev, dfx_probe}; + static struct device fddi4_dev = + {"fddi4", 0, 0, 0, 0, 0, 0, 0, 0, 0, &fddi5_dev, dfx_probe}; + static struct device fddi3_dev = + {"fddi3", 0, 0, 0, 0, 0, 0, 0, 0, 0, &fddi4_dev, dfx_probe}; + static struct device fddi2_dev = + {"fddi2", 0, 0, 0, 0, 0, 0, 0, 0, 0, &fddi3_dev, dfx_probe}; + static struct device fddi1_dev = + {"fddi1", 0, 0, 0, 0, 0, 0, 0, 0, 0, &fddi2_dev, dfx_probe}; + static struct device fddi0_dev = + {"fddi0", 0, 0, 0, 0, 0, 0, 0, 0, 0, &fddi1_dev, dfx_probe}; + +#undef NEXT_DEV +#define NEXT_DEV (&fddi0_dev) +#endif + +#ifdef CONFIG_NET_IPIP + extern int tunnel_init(struct device *); + + static struct device tunnel_dev1 = + { + "tunl1", /* IPIP tunnel */ + 0x0, /* recv memory end */ + 0x0, /* recv memory start */ + 0x0, /* memory end */ + 0x0, /* memory start */ + 0x0, /* base I/O address */ + 0, /* IRQ */ + 0, 0, 0, /* flags */ + NEXT_DEV, /* next device */ + tunnel_init /* Fill in the details */ + }; + + static struct device tunnel_dev0 = + { + "tunl0", /* IPIP tunnel */ + 0x0, /* recv memory end */ + 0x0, /* recv memory start */ + 0x0, /* memory end */ + 0x0, /* memory start */ + 0x0, /* base I/O address */ + 0, /* IRQ */ + 0, 0, 0, /* flags */ + &tunnel_dev1, /* next device */ + tunnel_init /* Fill in the details */ + }; +# undef NEXT_DEV +# define NEXT_DEV (&tunnel_dev0) + +#endif + +#ifdef CONFIG_APFDDI + extern int apfddi_init(struct device *dev); + static struct device fddi_dev = { + "fddi", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, apfddi_init }; +# undef NEXT_DEV +# define NEXT_DEV (&fddi_dev) +#endif + +#ifdef CONFIG_APBIF + extern int bif_init(struct device *dev); + static struct device bif_dev = { + "bif", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, bif_init }; +# undef NEXT_DEV +# define NEXT_DEV (&bif_dev) +#endif + +#ifdef MACH +struct device *dev_base = ð0_dev; +#else +extern int loopback_init(struct device *dev); +struct device loopback_dev = { + "lo", /* Software Loopback interface */ + 0x0, /* recv memory end */ + 0x0, /* recv memory start */ + 0x0, /* memory end */ + 0x0, /* memory start */ + 0, /* base I/O address */ + 0, /* IRQ */ + 0, 0, 0, /* flags */ + NEXT_DEV, /* next device */ + loopback_init /* loopback_init should set up the rest */ +}; + +struct device *dev_base = &loopback_dev; +#endif diff --git a/linux/dev/drivers/net/auto_irq.c b/linux/dev/drivers/net/auto_irq.c new file mode 100644 index 0000000..73cfe34 --- /dev/null +++ b/linux/dev/drivers/net/auto_irq.c @@ -0,0 +1,123 @@ +/* auto_irq.c: Auto-configure IRQ lines for linux. */ +/* + Written 1994 by Donald Becker. + + The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O + Center of Excellence in Space Data and Information Sciences + Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771 + + This code is a general-purpose IRQ line detector for devices with + jumpered IRQ lines. If you can make the device raise an IRQ (and + that IRQ line isn't already being used), these routines will tell + you what IRQ line it's using -- perfect for those oh-so-cool boot-time + device probes! + + To use this, first call autoirq_setup(timeout). TIMEOUT is how many + 'jiffies' (1/100 sec.) to detect other devices that have active IRQ lines, + and can usually be zero at boot. 'autoirq_setup()' returns the bit + vector of nominally-available IRQ lines (lines may be physically in-use, + but not yet registered to a device). + Next, set up your device to trigger an interrupt. + Finally call autoirq_report(TIMEOUT) to find out which IRQ line was + most recently active. The TIMEOUT should usually be zero, but may + be set to the number of jiffies to wait for a slow device to raise an IRQ. + + The idea of using the setup timeout to filter out bogus IRQs came from + the serial driver. + */ + + +#ifdef version +static const char *version = +"auto_irq.c:v1.11 Donald Becker (becker@cesdis.gsfc.nasa.gov)"; +#endif + +#include <sys/types.h> +#include <mach/mach_types.h> +#include <mach/vm_param.h> + +#define MACH_INCLUDE +#include <linux/sched.h> +#include <linux/delay.h> +#include <asm/bitops.h> +#include <asm/io.h> +#include <asm/irq.h> +#include <linux/netdevice.h> + +void *irq2dev_map[NR_IRQS] = {0, 0, /* ... zeroed */ }; + +unsigned long irqs_busy = 0x2147; /* The set of fixed IRQs (keyboard, timer, etc) */ +unsigned long irqs_used = 0x0001; /* The set of fixed IRQs sometimes enabled. */ +unsigned long irqs_reserved = 0x0000; /* An advisory "reserved" table. */ +unsigned long irqs_shared = 0x0000; /* IRQ lines "shared" among conforming cards. */ + +static volatile unsigned long irq_bitmap; /* The irqs we actually found. */ +static unsigned long irq_handled; /* The irq lines we have a handler on. */ +static volatile int irq_number; /* The latest irq number we actually found. */ + +static void +autoirq_probe (int irq, void *dev_id, struct pt_regs *regs) +{ + irq_number = irq; + set_bit (irq, (void *) &irq_bitmap); /* irq_bitmap |= 1 << irq; */ + /* This code used to disable the irq. However, the interrupt stub + * would then re-enable the interrupt with (potentially) disastrous + * consequences + */ + free_irq (irq, dev_id); + return; +} + +int +autoirq_setup (int waittime) +{ + int i; + unsigned long timeout = jiffies + waittime; + unsigned long boguscount = (waittime * loops_per_sec) / 100; + + irq_handled = 0; + irq_bitmap = 0; + + for (i = 0; i < 16; i++) + { + if (test_bit (i, &irqs_busy) == 0 + && request_irq (i, autoirq_probe, SA_INTERRUPT, "irq probe", NULL) == 0) + set_bit (i, (void *) &irq_handled); /* irq_handled |= 1 << i; */ + } + /* Update our USED lists. */ + irqs_used |= ~irq_handled; + + /* Hang out at least <waittime> jiffies waiting for bogus IRQ hits. */ + while (timeout > jiffies && --boguscount > 0) + ; + + irq_handled &= ~irq_bitmap; + + irq_number = 0; /* We are interested in new interrupts from now on */ + + return irq_handled; +} + +int +autoirq_report (int waittime) +{ + int i; + unsigned long timeout = jiffies + waittime; + unsigned long boguscount = (waittime * loops_per_sec) / 100; + + /* Hang out at least <waittime> jiffies waiting for the IRQ. */ + + while (timeout > jiffies && --boguscount > 0) + if (irq_number) + break; + + irq_handled &= ~irq_bitmap; /* This eliminates the already reset handlers */ + + /* Retract the irq handlers that we installed. */ + for (i = 0; i < 16; i++) + { + if (test_bit (i, (void *) &irq_handled)) + free_irq (i, NULL); + } + return irq_number; +} diff --git a/linux/dev/drivers/net/net_init.c b/linux/dev/drivers/net/net_init.c new file mode 100644 index 0000000..46dbb17 --- /dev/null +++ b/linux/dev/drivers/net/net_init.c @@ -0,0 +1,446 @@ +/* netdrv_init.c: Initialization for network devices. */ +/* + Written 1993,1994,1995 by Donald Becker. + + The author may be reached as becker@cesdis.gsfc.nasa.gov or + C/O Center of Excellence in Space Data and Information Sciences + Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771 + + This file contains the initialization for the "pl14+" style ethernet + drivers. It should eventually replace most of drivers/net/Space.c. + It's primary advantage is that it's able to allocate low-memory buffers. + A secondary advantage is that the dangerous NE*000 netcards can reserve + their I/O port region before the SCSI probes start. + + Modifications/additions by Bjorn Ekwall <bj0rn@blox.se>: + ethdev_index[MAX_ETH_CARDS] + register_netdev() / unregister_netdev() + + Modifications by Wolfgang Walter + Use dev_close cleanly so we always shut things down tidily. + + Changed 29/10/95, Alan Cox to pass sockaddr's around for mac addresses. + + 14/06/96 - Paul Gortmaker: Add generic eth_change_mtu() function. + + August 12, 1996 - Lawrence V. Stefani: Added fddi_change_mtu() and + fddi_setup() functions. + Sept. 10, 1996 - Lawrence V. Stefani: Increased hard_header_len to + include 3 pad bytes. +*/ + +#include <linux/config.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/types.h> +#include <linux/fs.h> +#include <linux/malloc.h> +#include <linux/if_ether.h> +#include <linux/string.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/fddidevice.h> +#include <linux/trdevice.h> +#include <linux/if_arp.h> +#ifdef CONFIG_NET_ALIAS +#include <linux/net_alias.h> +#endif + +/* The network devices currently exist only in the socket namespace, so these + entries are unused. The only ones that make sense are + open start the ethercard + close stop the ethercard + ioctl To get statistics, perhaps set the interface port (AUI, BNC, etc.) + One can also imagine getting raw packets using + read & write + but this is probably better handled by a raw packet socket. + + Given that almost all of these functions are handled in the current + socket-based scheme, putting ethercard devices in /dev/ seems pointless. + + [Removed all support for /dev network devices. When someone adds + streams then by magic we get them, but otherwise they are un-needed + and a space waste] +*/ + +/* The list of used and available "eth" slots (for "eth0", "eth1", etc.) */ +#define MAX_ETH_CARDS 16 /* same as the number if irq's in irq2dev[] */ +static struct device *ethdev_index[MAX_ETH_CARDS]; + + +/* Fill in the fields of the device structure with ethernet-generic values. + + If no device structure is passed, a new one is constructed, complete with + a SIZEOF_PRIVATE private data area. + + If an empty string area is passed as dev->name, or a new structure is made, + a new name string is constructed. The passed string area should be 8 bytes + long. + */ + +struct device * +init_etherdev(struct device *dev, int sizeof_priv) +{ + int new_device = 0; + int i; + + /* Use an existing correctly named device in Space.c:dev_base. */ + if (dev == NULL) { + int alloc_size = sizeof(struct device) + sizeof("eth%d ") + + sizeof_priv + 3; + struct device *cur_dev; + char pname[8]; /* Putative name for the device. */ + + for (i = 0; i < MAX_ETH_CARDS; ++i) + if (ethdev_index[i] == NULL) { + sprintf(pname, "eth%d", i); + for (cur_dev = dev_base; cur_dev; cur_dev = cur_dev->next) + if (strcmp(pname, cur_dev->name) == 0) { + dev = cur_dev; + dev->init = NULL; + sizeof_priv = (sizeof_priv + 3) & ~3; + dev->priv = sizeof_priv + ? kmalloc(sizeof_priv, GFP_KERNEL) + : NULL; + if (dev->priv) memset(dev->priv, 0, sizeof_priv); + goto found; + } + } + + alloc_size &= ~3; /* Round to dword boundary. */ + + dev = (struct device *)kmalloc(alloc_size, GFP_KERNEL); + memset(dev, 0, alloc_size); + if (sizeof_priv) + dev->priv = (void *) (dev + 1); + dev->name = sizeof_priv + (char *)(dev + 1); + new_device = 1; + } + + found: /* From the double loop above. */ + + if (dev->name && + ((dev->name[0] == '\0') || (dev->name[0] == ' '))) { + for (i = 0; i < MAX_ETH_CARDS; ++i) + if (ethdev_index[i] == NULL) { + sprintf(dev->name, "eth%d", i); + ethdev_index[i] = dev; + break; + } + } + + ether_setup(dev); /* Hmmm, should this be called here? */ + + if (new_device) { + /* Append the device to the device queue. */ + struct device **old_devp = &dev_base; + while ((*old_devp)->next) + old_devp = & (*old_devp)->next; + (*old_devp)->next = dev; + dev->next = 0; + } + return dev; +} + + +static int eth_mac_addr(struct device *dev, void *p) +{ + struct sockaddr *addr=p; + if(dev->start) + return -EBUSY; + memcpy(dev->dev_addr, addr->sa_data,dev->addr_len); + return 0; +} + +static int eth_change_mtu(struct device *dev, int new_mtu) +{ + if ((new_mtu < 68) || (new_mtu > 1500)) + return -EINVAL; + dev->mtu = new_mtu; + return 0; +} + +#ifdef CONFIG_FDDI + +static int fddi_change_mtu(struct device *dev, int new_mtu) +{ + if ((new_mtu < FDDI_K_SNAP_HLEN) || (new_mtu > FDDI_K_SNAP_DLEN)) + return(-EINVAL); + dev->mtu = new_mtu; + return(0); +} + +#endif + +void ether_setup(struct device *dev) +{ + int i; + /* Fill in the fields of the device structure with ethernet-generic values. + This should be in a common file instead of per-driver. */ + for (i = 0; i < DEV_NUMBUFFS; i++) + skb_queue_head_init(&dev->buffs[i]); + + /* register boot-defined "eth" devices */ + if (dev->name && (strncmp(dev->name, "eth", 3) == 0)) { + i = simple_strtoul(dev->name + 3, NULL, 0); + if (ethdev_index[i] == NULL) { + ethdev_index[i] = dev; + } + else if (dev != ethdev_index[i]) { + /* Really shouldn't happen! */ +#ifdef MACH + panic("ether_setup: Ouch! Someone else took %s\n", + dev->name); +#else + printk("ether_setup: Ouch! Someone else took %s\n", + dev->name); +#endif + } + } + +#ifndef MACH + dev->change_mtu = eth_change_mtu; + dev->hard_header = eth_header; + dev->rebuild_header = eth_rebuild_header; + dev->set_mac_address = eth_mac_addr; + dev->header_cache_bind = eth_header_cache_bind; + dev->header_cache_update= eth_header_cache_update; +#endif + + dev->type = ARPHRD_ETHER; + dev->hard_header_len = ETH_HLEN; + dev->mtu = 1500; /* eth_mtu */ + dev->addr_len = ETH_ALEN; + dev->tx_queue_len = 100; /* Ethernet wants good queues */ + + memset(dev->broadcast,0xFF, ETH_ALEN); + + /* New-style flags. */ + dev->flags = IFF_BROADCAST|IFF_MULTICAST; + dev->family = AF_INET; + dev->pa_addr = 0; + dev->pa_brdaddr = 0; + dev->pa_mask = 0; + dev->pa_alen = 4; +} + +#ifdef CONFIG_TR + +void tr_setup(struct device *dev) +{ + int i; + /* Fill in the fields of the device structure with ethernet-generic values. + This should be in a common file instead of per-driver. */ + for (i = 0; i < DEV_NUMBUFFS; i++) + skb_queue_head_init(&dev->buffs[i]); + + dev->hard_header = tr_header; + dev->rebuild_header = tr_rebuild_header; + + dev->type = ARPHRD_IEEE802; + dev->hard_header_len = TR_HLEN; + dev->mtu = 2000; /* bug in fragmenter...*/ + dev->addr_len = TR_ALEN; + dev->tx_queue_len = 100; /* Long queues on tr */ + + memset(dev->broadcast,0xFF, TR_ALEN); + + /* New-style flags. */ + dev->flags = IFF_BROADCAST; + dev->family = AF_INET; + dev->pa_addr = 0; + dev->pa_brdaddr = 0; + dev->pa_mask = 0; + dev->pa_alen = 4; +} + +#endif + +#ifdef CONFIG_FDDI + +void fddi_setup(struct device *dev) + { + int i; + + /* + * Fill in the fields of the device structure with FDDI-generic values. + * This should be in a common file instead of per-driver. + */ + for (i=0; i < DEV_NUMBUFFS; i++) + skb_queue_head_init(&dev->buffs[i]); + + dev->change_mtu = fddi_change_mtu; + dev->hard_header = fddi_header; + dev->rebuild_header = fddi_rebuild_header; + + dev->type = ARPHRD_FDDI; + dev->hard_header_len = FDDI_K_SNAP_HLEN+3; /* Assume 802.2 SNAP hdr len + 3 pad bytes */ + dev->mtu = FDDI_K_SNAP_DLEN; /* Assume max payload of 802.2 SNAP frame */ + dev->addr_len = FDDI_K_ALEN; + dev->tx_queue_len = 100; /* Long queues on FDDI */ + + memset(dev->broadcast, 0xFF, FDDI_K_ALEN); + + /* New-style flags */ + dev->flags = IFF_BROADCAST | IFF_MULTICAST; + dev->family = AF_INET; + dev->pa_addr = 0; + dev->pa_brdaddr = 0; + dev->pa_mask = 0; + dev->pa_alen = 4; + return; + } + +#endif + +int ether_config(struct device *dev, struct ifmap *map) +{ + if (map->mem_start != (u_long)(-1)) + dev->mem_start = map->mem_start; + if (map->mem_end != (u_long)(-1)) + dev->mem_end = map->mem_end; + if (map->base_addr != (u_short)(-1)) + dev->base_addr = map->base_addr; + if (map->irq != (u_char)(-1)) + dev->irq = map->irq; + if (map->dma != (u_char)(-1)) + dev->dma = map->dma; + if (map->port != (u_char)(-1)) + dev->if_port = map->port; + return 0; +} + +int register_netdev(struct device *dev) +{ + struct device *d = dev_base; + unsigned long flags; + int i=MAX_ETH_CARDS; + + save_flags(flags); + cli(); + + if (dev && dev->init) { + if (dev->name && + ((dev->name[0] == '\0') || (dev->name[0] == ' '))) { + for (i = 0; i < MAX_ETH_CARDS; ++i) + if (ethdev_index[i] == NULL) { + sprintf(dev->name, "eth%d", i); + printk("loading device '%s'...\n", dev->name); + ethdev_index[i] = dev; + break; + } + } + + sti(); /* device probes assume interrupts enabled */ + if (dev->init(dev) != 0) { + if (i < MAX_ETH_CARDS) ethdev_index[i] = NULL; + restore_flags(flags); + return -EIO; + } + cli(); + + /* Add device to end of chain */ + if (dev_base) { + while (d->next) + d = d->next; + d->next = dev; + } + else + dev_base = dev; + dev->next = NULL; + } + restore_flags(flags); + return 0; +} + +void unregister_netdev(struct device *dev) +{ + struct device *d = dev_base; + unsigned long flags; + int i; + + save_flags(flags); + cli(); + + if (dev == NULL) + { + printk("was NULL\n"); + restore_flags(flags); + return; + } + /* else */ + if (dev->start) + printk("ERROR '%s' busy and not MOD_IN_USE.\n", dev->name); + + /* + * must jump over main_device+aliases + * avoid alias devices unregistration so that only + * net_alias module manages them + */ +#ifdef CONFIG_NET_ALIAS + if (dev_base == dev) + dev_base = net_alias_nextdev(dev); + else + { + while(d && (net_alias_nextdev(d) != dev)) /* skip aliases */ + d = net_alias_nextdev(d); + + if (d && (net_alias_nextdev(d) == dev)) + { + /* + * Critical: Bypass by consider devices as blocks (maindev+aliases) + */ + net_alias_nextdev_set(d, net_alias_nextdev(dev)); + } +#else + if (dev_base == dev) + dev_base = dev->next; + else + { + while (d && (d->next != dev)) + d = d->next; + + if (d && (d->next == dev)) + { + d->next = dev->next; + } +#endif + else + { + printk("unregister_netdev: '%s' not found\n", dev->name); + restore_flags(flags); + return; + } + } + for (i = 0; i < MAX_ETH_CARDS; ++i) + { + if (ethdev_index[i] == dev) + { + ethdev_index[i] = NULL; + break; + } + } + + restore_flags(flags); + + /* + * You can i.e use a interfaces in a route though it is not up. + * We call close_dev (which is changed: it will down a device even if + * dev->flags==0 (but it will not call dev->stop if IFF_UP + * is not set). + * This will call notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev), + * dev_mc_discard(dev), .... + */ + + dev_close(dev); +} + + +/* + * Local variables: + * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c net_init.c" + * version-control: t + * kept-new-versions: 5 + * tab-width: 4 + * End: + */ diff --git a/linux/dev/drivers/net/wavelan.p.h b/linux/dev/drivers/net/wavelan.p.h new file mode 100644 index 0000000..0549844 --- /dev/null +++ b/linux/dev/drivers/net/wavelan.p.h @@ -0,0 +1,639 @@ +/* + * Wavelan ISA driver + * + * Jean II - HPLB '96 + * + * Reorganisation and extension of the driver. + * + * This file contain all definition and declarations necessary for the + * wavelan isa driver. This file is a private header, so it should + * be included only on wavelan.c !!! + */ + +#ifndef WAVELAN_P_H +#define WAVELAN_P_H + +/************************** DOCUMENTATION **************************/ +/* + * This driver provide a Linux interface to the Wavelan ISA hardware + * The Wavelan is a product of Lucent ("http://wavelan.netland.nl/"). + * This division was formerly part of NCR and then AT&T. + * Wavelan are also distributed by DEC (RoamAbout), Digital Ocean and + * Aironet (Arlan). If you have one of those product, you will need to + * make some changes below... + * + * This driver is still a beta software. A lot of bugs have been corrected, + * a lot of functionalities are implemented, the whole appear pretty stable, + * but there is still some area of improvement (encryption, performance...). + * + * To know how to use this driver, read the NET3 HOWTO. + * If you want to exploit the many other fonctionalities, look comments + * in the code... + * + * This driver is the result of the effort of many peoples (see below). + */ + +/* ------------------------ SPECIFIC NOTES ------------------------ */ +/* + * wavelan.o is darn too big + * ------------------------- + * That's true ! There is a very simple way to reduce the driver + * object by 33% (yes !). Comment out the following line : + * #include <linux/wireless.h> + * + * MAC address and hardware detection : + * ---------------------------------- + * The detection code of the wavelan chech that the first 3 + * octets of the MAC address fit the company code. This type of + * detection work well for AT&T cards (because the AT&T code is + * hardcoded in wavelan.h), but of course will fail for other + * manufacturer. + * + * If you are sure that your card is derived from the wavelan, + * here is the way to configure it : + * 1) Get your MAC address + * a) With your card utilities (wfreqsel, instconf, ...) + * b) With the driver : + * o compile the kernel with DEBUG_CONFIG_INFO enabled + * o Boot and look the card messages + * 2) Set your MAC code (3 octets) in MAC_ADDRESSES[][3] (wavelan.h) + * 3) Compile & verify + * 4) Send me the MAC code - I will include it in the next version... + * + * "CU Inactive" message at boot up : + * ----------------------------------- + * It seem that there is some weird timings problems with the + * Intel microcontroler. In fact, this message is triggered by a + * bad reading of the on board ram the first time we read the + * control block. If you ignore this message, all is ok (but in + * fact, currently, it reset the wavelan hardware). + * + * To get rid of that problem, there is two solution. The first + * is to add a dummy read of the scb at the end of + * wv_82586_config. The second is to add the timers + * wv_synchronous_cmd and wv_ack (the udelay just after the + * waiting loops - seem that the controler is not totally ready + * when it say it is !). + * + * In the current code, I use the second solution (to be + * consistent with the original solution of Bruce Janson). + */ + +/* --------------------- WIRELESS EXTENSIONS --------------------- */ +/* + * This driver is the first one to support "wireless extensions". + * This set of extensions provide you some way to control the wireless + * caracteristics of the hardware in a standard way and support for + * applications for taking advantage of it (like Mobile IP). + * + * You will need to enable the CONFIG_NET_RADIO define in the kernel + * configuration to enable the wireless extensions (this is the one + * giving access to the radio network device choice). + * + * It might also be a good idea as well to fetch the wireless tools to + * configure the device and play a bit. + */ + +/* ---------------------------- FILES ---------------------------- */ +/* + * wavelan.c : The actual code for the driver - C functions + * + * wavelan.p.h : Private header : local types / vars for the driver + * + * wavelan.h : Description of the hardware interface & structs + * + * i82586.h : Description if the Ethernet controler + */ + +/* --------------------------- HISTORY --------------------------- */ +/* + * (Made with information in drivers headers. It may not be accurate, + * and I garantee nothing except my best effort...) + * + * The history of the Wavelan drivers is as complicated as history of + * the Wavelan itself (NCR -> AT&T -> Lucent). + * + * All started with Anders Klemets <klemets@paul.rutgers.edu>, + * writting a Wavelan ISA driver for the MACH microkernel. Girish + * Welling <welling@paul.rutgers.edu> had also worked on it. + * Keith Moore modify this for the Pcmcia hardware. + * + * Robert Morris <rtm@das.harvard.edu> port these two drivers to BSDI + * and add specific Pcmcia support (there is currently no equivalent + * of the PCMCIA package under BSD...). + * + * Jim Binkley <jrb@cs.pdx.edu> port both BSDI drivers to freeBSD. + * + * Bruce Janson <bruce@cs.usyd.edu.au> port the BSDI ISA driver to Linux. + * + * Anthony D. Joseph <adj@lcs.mit.edu> started modify Bruce driver + * (with help of the BSDI PCMCIA driver) for PCMCIA. + * Yunzhou Li <yunzhou@strat.iol.unh.edu> finished is work. + * Joe Finney <joe@comp.lancs.ac.uk> patched the driver to start + * correctly 2.00 cards (2.4 GHz with frequency selection). + * David Hinds <dhinds@hyper.stanford.edu> integrated the whole in his + * Pcmcia package (+ bug corrections). + * + * I (Jean Tourrilhes - jt@hplb.hpl.hp.com) then started to make some + * patchs to the Pcmcia driver. After, I added code in the ISA driver + * for Wireless Extensions and full support of frequency selection + * cards. Then, I've done the same to the Pcmcia driver + some + * reorganisation. Finally, I came back to the ISA driver to + * upgrade it at the same level as the Pcmcia one and reorganise + * the code + * Loeke Brederveld <lbrederv@wavelan.com> from Lucent has given me + * much needed informations on the Wavelan hardware. + */ + +/* The original copyrights and litteratures mention others names and + * credits. I don't know what there part in this development was... + */ + +/* By the way : for the copyright & legal stuff : + * Almost everybody wrote code under GNU or BSD license (or alike), + * and want that their original copyright remain somewhere in the + * code (for myself, I go with the GPL). + * Nobody want to take responsibility for anything, except the fame... + */ + +/* --------------------------- CREDITS --------------------------- */ +/* + * This software was developed as a component of the + * Linux operating system. + * It is based on other device drivers and information + * either written or supplied by: + * Ajay Bakre (bakre@paul.rutgers.edu), + * Donald Becker (becker@cesdis.gsfc.nasa.gov), + * Loeke Brederveld (Loeke.Brederveld@Utrecht.NCR.com), + * Brent Elphick <belphick@uwaterloo.ca>, + * Anders Klemets (klemets@it.kth.se), + * Vladimir V. Kolpakov (w@stier.koenig.ru), + * Marc Meertens (Marc.Meertens@Utrecht.NCR.com), + * Pauline Middelink (middelin@polyware.iaf.nl), + * Robert Morris (rtm@das.harvard.edu), + * Jean Tourrilhes (jt@hplb.hpl.hp.com), + * Girish Welling (welling@paul.rutgers.edu), + * Clark Woodworth <clark@hiway1.exit109.com> + * Yongguang Zhang <ygz@isl.hrl.hac.com>... + * + * Thanks go also to: + * James Ashton (jaa101@syseng.anu.edu.au), + * Alan Cox (iialan@iiit.swan.ac.uk), + * Allan Creighton (allanc@cs.usyd.edu.au), + * Matthew Geier (matthew@cs.usyd.edu.au), + * Remo di Giovanni (remo@cs.usyd.edu.au), + * Eckhard Grah (grah@wrcs1.urz.uni-wuppertal.de), + * Vipul Gupta (vgupta@cs.binghamton.edu), + * Mark Hagan (mhagan@wtcpost.daytonoh.NCR.COM), + * Tim Nicholson (tim@cs.usyd.edu.au), + * Ian Parkin (ian@cs.usyd.edu.au), + * John Rosenberg (johnr@cs.usyd.edu.au), + * George Rossi (george@phm.gov.au), + * Arthur Scott (arthur@cs.usyd.edu.au), + * Stanislav Sinyagin <stas@isf.ru> + * Peter Storey, + * for their assistance and advice. + * + * Additional Credits: + * + * My developpement has been done under Linux 2.0.x (Debian 1.1) with + * an HP Vectra XP/60. + * + */ + +/* ------------------------- IMPROVEMENTS ------------------------- */ +/* + * I proudly present : + * + * Changes mades in first pre-release : + * ---------------------------------- + * - Reorganisation of the code, function name change + * - Creation of private header (wavelan.p.h) + * - Reorganised debug messages + * - More comments, history, ... + * - mmc_init : configure the PSA if not done + * - mmc_init : correct default value of level threshold for pcmcia + * - mmc_init : 2.00 detection better code for 2.00 init + * - better info at startup + * - irq setting (note : this setting is permanent...) + * - Watchdog : change strategy (+ solve module removal problems) + * - add wireless extensions (ioctl & get_wireless_stats) + * get/set nwid/frequency on fly, info for /proc/net/wireless + * - More wireless extension : SETSPY and GETSPY + * - Make wireless extensions optional + * - Private ioctl to set/get quality & level threshold, histogram + * - Remove /proc/net/wavelan + * - Supress useless stuff from lp (net_local) + * - kernel 2.1 support (copy_to/from_user instead of memcpy_to/fromfs) + * - Add message level (debug stuff in /var/adm/debug & errors not + * displayed at console and still in /var/adm/messages) + * - multi device support + * - Start fixing the probe (init code) + * - More inlines + * - man page + * - Lot of others minor details & cleanups + * + * Changes made in second pre-release : + * ---------------------------------- + * - Cleanup init code (probe & module init) + * - Better multi device support (module) + * - name assignement (module) + * + * Changes made in third pre-release : + * --------------------------------- + * - Be more conservative on timers + * - Preliminary support for multicast (I still lack some details...) + * + * Changes made in fourth pre-release : + * ---------------------------------- + * - multicast (revisited and finished) + * - Avoid reset in set_multicast_list (a really big hack) + * if somebody could apply this code for other i82586 based driver... + * - Share on board memory 75% RU / 25% CU (instead of 50/50) + * + * Changes made for release in 2.1.15 : + * ---------------------------------- + * - Change the detection code for multi manufacturer code support + * + * Changes made for release in 2.1.17 : + * ---------------------------------- + * - Update to wireless extensions changes + * - Silly bug in card initial configuration (psa_conf_status) + * + * Changes made for release in 2.1.27 & 2.0.30 : + * ------------------------------------------- + * - Small bug in debug code (probably not the last one...) + * - Remove extern kerword for wavelan_probe() + * - Level threshold is now a standard wireless extension (version 4 !) + * + * Changes made for release in 2.1.36 : + * ---------------------------------- + * - Encryption setting from Brent Elphick (thanks a lot !) + * - 'ioaddr' to 'u_long' for the Alpha (thanks to Stanislav Sinyagin) + * + * Wishes & dreams : + * --------------- + * - Roaming + */ + +/***************************** INCLUDES *****************************/ + +#include <linux/module.h> + +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/types.h> +#include <linux/fcntl.h> +#include <linux/interrupt.h> +#include <linux/stat.h> +#include <linux/ptrace.h> +#include <linux/ioport.h> +#include <linux/in.h> +#include <linux/string.h> +#include <linux/delay.h> +#include <asm/system.h> +#include <asm/bitops.h> +#include <asm/io.h> +#include <asm/dma.h> +#include <linux/errno.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/malloc.h> +#include <linux/timer.h> + +#include <linux/wireless.h> /* Wireless extensions */ + +/* Wavelan declarations */ +#ifdef MACH +#include <linuxdev/drivers/net/i82586.h> +#else +#include "i82586.h" +#endif +#include "wavelan.h" + +/****************************** DEBUG ******************************/ + +#undef DEBUG_MODULE_TRACE /* Module insertion/removal */ +#undef DEBUG_CALLBACK_TRACE /* Calls made by Linux */ +#undef DEBUG_INTERRUPT_TRACE /* Calls to handler */ +#undef DEBUG_INTERRUPT_INFO /* type of interrupt & so on */ +#define DEBUG_INTERRUPT_ERROR /* problems */ +#undef DEBUG_CONFIG_TRACE /* Trace the config functions */ +#undef DEBUG_CONFIG_INFO /* What's going on... */ +#define DEBUG_CONFIG_ERRORS /* Errors on configuration */ +#undef DEBUG_TX_TRACE /* Transmission calls */ +#undef DEBUG_TX_INFO /* Header of the transmited packet */ +#define DEBUG_TX_ERROR /* unexpected conditions */ +#undef DEBUG_RX_TRACE /* Transmission calls */ +#undef DEBUG_RX_INFO /* Header of the transmited packet */ +#define DEBUG_RX_ERROR /* unexpected conditions */ +#undef DEBUG_PACKET_DUMP 16 /* Dump packet on the screen */ +#undef DEBUG_IOCTL_TRACE /* Misc call by Linux */ +#undef DEBUG_IOCTL_INFO /* Various debug info */ +#define DEBUG_IOCTL_ERROR /* What's going wrong */ +#define DEBUG_BASIC_SHOW /* Show basic startup info */ +#undef DEBUG_VERSION_SHOW /* Print version info */ +#undef DEBUG_PSA_SHOW /* Dump psa to screen */ +#undef DEBUG_MMC_SHOW /* Dump mmc to screen */ +#undef DEBUG_SHOW_UNUSED /* Show also unused fields */ +#undef DEBUG_I82586_SHOW /* Show i82586 status */ +#undef DEBUG_DEVICE_SHOW /* Show device parameters */ + +/* Options : */ +#define USE_PSA_CONFIG /* Use info from the PSA */ +#define IGNORE_NORMAL_XMIT_ERRS /* Don't bother with normal conditions */ +#undef STRUCT_CHECK /* Verify padding of structures */ +#undef PSA_CRC /* Check CRC in PSA */ +#undef OLDIES /* Old code (to redo) */ +#undef RECORD_SNR /* To redo */ +#undef EEPROM_IS_PROTECTED /* Doesn't seem to be necessary */ +#define MULTICAST_AVOID /* Avoid extra multicast (I'm sceptical) */ + +#ifdef WIRELESS_EXT /* If wireless extension exist in the kernel */ +/* Warning : these stuff will slow down the driver... */ +#define WIRELESS_SPY /* Enable spying addresses */ +#undef HISTOGRAM /* Enable histogram of sig level... */ +#endif + +/************************ CONSTANTS & MACROS ************************/ + +#ifdef DEBUG_VERSION_SHOW +static const char *version = "wavelan.c : v16 (wireless extensions) 17/4/97\n"; +#endif + +/* Watchdog temporisation */ +#define WATCHDOG_JIFFIES 32 /* TODO: express in HZ. */ + +/* Macro to get the number of elements in an array */ +#define NELS(a) (sizeof(a) / sizeof(a[0])) + +/* ------------------------ PRIVATE IOCTL ------------------------ */ + +#define SIOCSIPQTHR SIOCDEVPRIVATE /* Set quality threshold */ +#define SIOCGIPQTHR SIOCDEVPRIVATE + 1 /* Get quality threshold */ +#define SIOCSIPLTHR SIOCDEVPRIVATE + 2 /* Set level threshold */ +#define SIOCGIPLTHR SIOCDEVPRIVATE + 3 /* Get level threshold */ + +#define SIOCSIPHISTO SIOCDEVPRIVATE + 6 /* Set histogram ranges */ +#define SIOCGIPHISTO SIOCDEVPRIVATE + 7 /* Get histogram values */ + +/* ----------------------- VERSION SUPPORT ----------------------- */ + +/* This ugly patch is needed to cope with old version of the kernel */ +#ifndef copy_from_user +#define copy_from_user memcpy_fromfs +#define copy_to_user memcpy_tofs +#endif + +/****************************** TYPES ******************************/ + +/* Shortcuts */ +typedef struct device device; +typedef struct enet_statistics en_stats; +typedef struct iw_statistics iw_stats; +typedef struct iw_quality iw_qual; +typedef struct iw_freq iw_freq; +typedef struct net_local net_local; +typedef struct timer_list timer_list; + +/* Basic types */ +typedef u_char mac_addr[WAVELAN_ADDR_SIZE]; /* Hardware address */ + +/* + * Static specific data for the interface. + * + * For each network interface, Linux keep data in two structure. "device" + * keep the generic data (same format for everybody) and "net_local" keep + * the additional specific data. + * Note that some of this specific data is in fact generic (en_stats, for + * example). + */ +struct net_local +{ + net_local * next; /* Linked list of the devices */ + device * dev; /* Reverse link... */ + en_stats stats; /* Ethernet interface statistics */ + int nresets; /* Number of hw resets */ + u_char reconfig_82586; /* Need to reconfigure the controler */ + u_char promiscuous; /* Promiscuous mode */ + int mc_count; /* Number of multicast addresses */ + timer_list watchdog; /* To avoid blocking state */ + u_short hacr; /* Current host interface state */ + + int tx_n_in_use; + u_short rx_head; + u_short rx_last; + u_short tx_first_free; + u_short tx_first_in_use; + +#ifdef WIRELESS_EXT + iw_stats wstats; /* Wireless specific stats */ +#endif + +#ifdef WIRELESS_SPY + int spy_number; /* Number of addresses to spy */ + mac_addr spy_address[IW_MAX_SPY]; /* The addresses to spy */ + iw_qual spy_stat[IW_MAX_SPY]; /* Statistics gathered */ +#endif /* WIRELESS_SPY */ +#ifdef HISTOGRAM + int his_number; /* Number of intervals */ + u_char his_range[16]; /* Boundaries of interval ]n-1; n] */ + u_long his_sum[16]; /* Sum in interval */ +#endif /* HISTOGRAM */ +}; + +/**************************** PROTOTYPES ****************************/ + +/* ----------------------- MISC SUBROUTINES ------------------------ */ +static inline unsigned long /* flags */ + wv_splhi(void); /* Disable interrupts */ +static inline void + wv_splx(unsigned long); /* ReEnable interrupts : flags */ +static u_char + wv_irq_to_psa(int); +static int + wv_psa_to_irq(u_char); +/* ------------------- HOST ADAPTER SUBROUTINES ------------------- */ +static inline u_short /* data */ + hasr_read(u_long); /* Read the host interface : base address */ +static inline void + hacr_write(u_long, /* Write to host interface : base address */ + u_short), /* data */ + hacr_write_slow(u_long, + u_short), + set_chan_attn(u_long, /* ioaddr */ + u_short), /* hacr */ + wv_hacr_reset(u_long), /* ioaddr */ + wv_16_off(u_long, /* ioaddr */ + u_short), /* hacr */ + wv_16_on(u_long, /* ioaddr */ + u_short), /* hacr */ + wv_ints_off(device *), + wv_ints_on(device *); +/* ----------------- MODEM MANAGEMENT SUBROUTINES ----------------- */ +static void + psa_read(u_long, /* Read the Parameter Storage Area */ + u_short, /* hacr */ + int, /* offset in PSA */ + u_char *, /* buffer to fill */ + int), /* size to read */ + psa_write(u_long, /* Write to the PSA */ + u_short, /* hacr */ + int, /* Offset in psa */ + u_char *, /* Buffer in memory */ + int); /* Length of buffer */ +static inline void + mmc_out(u_long, /* Write 1 byte to the Modem Manag Control */ + u_short, + u_char), + mmc_write(u_long, /* Write n bytes to the MMC */ + u_char, + u_char *, + int); +static inline u_char /* Read 1 byte from the MMC */ + mmc_in(u_long, + u_short); +static inline void + mmc_read(u_long, /* Read n bytes from the MMC */ + u_char, + u_char *, + int), + fee_wait(u_long, /* Wait for frequency EEprom : base address */ + int, /* Base delay to wait for */ + int); /* Number of time to wait */ +static void + fee_read(u_long, /* Read the frequency EEprom : base address */ + u_short, /* destination offset */ + u_short *, /* data buffer */ + int); /* number of registers */ +/* ---------------------- I82586 SUBROUTINES ----------------------- */ +static /*inline*/ void + obram_read(u_long, /* ioaddr */ + u_short, /* o */ + u_char *, /* b */ + int); /* n */ +static inline void + obram_write(u_long, /* ioaddr */ + u_short, /* o */ + u_char *, /* b */ + int); /* n */ +static void + wv_ack(device *); +static inline int + wv_synchronous_cmd(device *, + const char *), + wv_config_complete(device *, + u_long, + net_local *); +static int + wv_complete(device *, + u_long, + net_local *); +static inline void + wv_82586_reconfig(device *); +/* ------------------- DEBUG & INFO SUBROUTINES ------------------- */ +#ifdef DEBUG_I82586_SHOW +static void + wv_scb_show(unsigned short); +#endif +static inline void + wv_init_info(device *); /* display startup info */ +/* ------------------- IOCTL, STATS & RECONFIG ------------------- */ +static en_stats * + wavelan_get_stats(device *); /* Give stats /proc/net/dev */ +static void + wavelan_set_multicast_list(device *); +/* ----------------------- PACKET RECEPTION ----------------------- */ +static inline void + wv_packet_read(device *, /* Read a packet from a frame */ + u_short, + int), + wv_receive(device *); /* Read all packets waiting */ +/* --------------------- PACKET TRANSMISSION --------------------- */ +static inline void + wv_packet_write(device *, /* Write a packet to the Tx buffer */ + void *, + short); +static int + wavelan_packet_xmit(struct sk_buff *, /* Send a packet */ + device *); +/* -------------------- HARDWARE CONFIGURATION -------------------- */ +static inline int + wv_mmc_init(device *), /* Initialize the modem */ + wv_ru_start(device *), /* Start the i82586 receiver unit */ + wv_cu_start(device *), /* Start the i82586 command unit */ + wv_82586_start(device *); /* Start the i82586 */ +static void + wv_82586_config(device *); /* Configure the i82586 */ +static inline void + wv_82586_stop(device *); +static int + wv_hw_reset(device *), /* Reset the wavelan hardware */ + wv_check_ioaddr(u_long, /* ioaddr */ + u_char *); /* mac address (read) */ +/* ---------------------- INTERRUPT HANDLING ---------------------- */ +static void + wavelan_interrupt(int, /* Interrupt handler */ + void *, + struct pt_regs *); +static void + wavelan_watchdog(u_long); /* Transmission watchdog */ +/* ------------------- CONFIGURATION CALLBACKS ------------------- */ +static int + wavelan_open(device *), /* Open the device */ + wavelan_close(device *), /* Close the device */ + wavelan_config(device *); /* Configure one device */ +extern int + wavelan_probe(device *); /* See Space.c */ + +/**************************** VARIABLES ****************************/ + +/* + * This is the root of the linked list of wavelan drivers + * It is use to verify that we don't reuse the same base address + * for two differents drivers and to make the cleanup when + * removing the module. + */ +static net_local * wavelan_list = (net_local *) NULL; + +/* + * This table is used to translate the psa value to irq number + * and vice versa... + */ +static u_char irqvals[] = +{ + 0, 0, 0, 0x01, + 0x02, 0x04, 0, 0x08, + 0, 0, 0x10, 0x20, + 0x40, 0, 0, 0x80, +}; + +/* + * Table of the available i/o address (base address) for wavelan + */ +static unsigned short iobase[] = +{ +#if 0 + /* Leave out 0x3C0 for now -- seems to clash with some video + * controllers. + * Leave out the others too -- we will always use 0x390 and leave + * 0x300 for the Ethernet device. + * Jean II : 0x3E0 is really fine as well... + */ + 0x300, 0x390, 0x3E0, 0x3C0 +#endif /* 0 */ + 0x390, 0x3E0 +}; + +#ifdef MODULE +/* Name of the devices (memory allocation) */ +static char devname[4][IFNAMSIZ] = { "", "", "", "" }; + +/* Parameters set by insmod */ +static int io[4] = { 0, 0, 0, 0 }; +static int irq[4] = { 0, 0, 0, 0 }; +static char * name[4] = { devname[0], devname[1], devname[2], devname[3] }; +#endif /* MODULE */ + +#endif /* WAVELAN_P_H */ diff --git a/linux/dev/drivers/scsi/eata_dma.c b/linux/dev/drivers/scsi/eata_dma.c new file mode 100644 index 0000000..e902ea1 --- /dev/null +++ b/linux/dev/drivers/scsi/eata_dma.c @@ -0,0 +1,1607 @@ +/************************************************************ + * * + * Linux EATA SCSI driver * + * * + * based on the CAM document CAM/89-004 rev. 2.0c, * + * DPT's driver kit, some internal documents and source, * + * and several other Linux scsi drivers and kernel docs. * + * * + * The driver currently: * + * -supports all ISA based EATA-DMA boards * + * like PM2011, PM2021, PM2041, PM3021 * + * -supports all EISA based EATA-DMA boards * + * like PM2012B, PM2022, PM2122, PM2322, PM2042, * + * PM3122, PM3222, PM3332 * + * -supports all PCI based EATA-DMA boards * + * like PM2024, PM2124, PM2044, PM2144, PM3224, * + * PM3334 * + * -supports the Wide, Ultra Wide and Differential * + * versions of the boards * + * -supports multiple HBAs with & without IRQ sharing * + * -supports all SCSI channels on multi channel boards * + * -supports ix86 and MIPS, untested on ALPHA * + * -needs identical IDs on all channels of a HBA * + * -can be loaded as module * + * -displays statistical and hardware information * + * in /proc/scsi/eata_dma * + * -provides rudimentary latency measurement * + * possibilities via /proc/scsi/eata_dma/<hostnum> * + * * + * (c)1993-96 Michael Neuffer * + * mike@i-Connect.Net * + * neuffer@mail.uni-mainz.de * + * * + * This program is free software; you can redistribute it * + * and/or modify it under the terms of the GNU General * + * Public License as published by the Free Software * + * Foundation; either version 2 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be * + * useful, but WITHOUT ANY WARRANTY; without even the * + * implied warranty of MERCHANTABILITY or FITNESS FOR A * + * PARTICULAR PURPOSE. See the GNU General Public License * + * for more details. * + * * + * You should have received a copy of the GNU General * + * Public License along with this kernel; if not, write to * + * the Free Software Foundation, Inc., 675 Mass Ave, * + * Cambridge, MA 02139, USA. * + * * + * I have to thank DPT for their excellent support. I took * + * me almost a year and a stopover at their HQ, on my first * + * trip to the USA, to get it, but since then they've been * + * very helpful and tried to give me all the infos and * + * support I need. * + * * + * Thanks also to Simon Shapiro, Greg Hosler and Mike * + * Jagdis who did a lot of testing and found quite a number * + * of bugs during the development. * + ************************************************************ + * last change: 96/10/21 OS: Linux 2.0.23 * + ************************************************************/ + +/* Look in eata_dma.h for configuration and revision information */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/string.h> +#include <linux/ioport.h> +#include <linux/malloc.h> +#include <linux/in.h> +#include <linux/bios32.h> +#include <linux/pci.h> +#include <linux/proc_fs.h> +#include <linux/delay.h> +#include <asm/byteorder.h> +#include <asm/types.h> +#include <asm/io.h> +#include <asm/dma.h> +#ifdef MACH +#define flush_cache_all() +#else +#include <asm/pgtable.h> +#endif +#ifdef __mips__ +#include <asm/cachectl.h> +#endif +#include <linux/blk.h> +#include "scsi.h" +#include "sd.h" +#include "hosts.h" +#include "eata_dma.h" +#include "eata_dma_proc.h" + +#include <linux/stat.h> +#include <linux/config.h> /* for CONFIG_PCI */ + +struct proc_dir_entry proc_scsi_eata_dma = { + PROC_SCSI_EATA, 8, "eata_dma", + S_IFDIR | S_IRUGO | S_IXUGO, 2 +}; + +static u32 ISAbases[] = +{0x1F0, 0x170, 0x330, 0x230}; +static unchar EISAbases[] = +{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; +static uint registered_HBAs = 0; +static struct Scsi_Host *last_HBA = NULL; +static struct Scsi_Host *first_HBA = NULL; +static unchar reg_IRQ[] = +{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; +static unchar reg_IRQL[] = +{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; +static struct eata_sp *status = 0; /* Statuspacket array */ +static void *dma_scratch = 0; + +static struct eata_register *fake_int_base; +static int fake_int_result; +static int fake_int_happened; + +static ulong int_counter = 0; +static ulong queue_counter = 0; + +void eata_scsi_done (Scsi_Cmnd * scmd) +{ + scmd->request.rq_status = RQ_SCSI_DONE; + + if (scmd->request.sem != NULL) + up(scmd->request.sem); + + return; +} + +void eata_fake_int_handler(s32 irq, void *dev_id, struct pt_regs * regs) +{ + fake_int_result = inb((ulong)fake_int_base + HA_RSTATUS); + fake_int_happened = TRUE; + DBG(DBG_INTR3, printk("eata_fake_int_handler called irq%d base %p" + " res %#x\n", irq, fake_int_base, fake_int_result)); + return; +} + +#include "eata_dma_proc.c" + +#ifdef MODULE +int eata_release(struct Scsi_Host *sh) +{ + uint i; + if (sh->irq && reg_IRQ[sh->irq] == 1) free_irq(sh->irq, NULL); + else reg_IRQ[sh->irq]--; + + scsi_init_free((void *)status, 512); + scsi_init_free((void *)dma_scratch - 4, 1024); + for (i = 0; i < sh->can_queue; i++){ /* Free all SG arrays */ + if(SD(sh)->ccb[i].sg_list != NULL) + scsi_init_free((void *) SD(sh)->ccb[i].sg_list, + sh->sg_tablesize * sizeof(struct eata_sg_list)); + } + + if (SD(sh)->channel == 0) { + if (sh->dma_channel != BUSMASTER) free_dma(sh->dma_channel); + if (sh->io_port && sh->n_io_port) + release_region(sh->io_port, sh->n_io_port); + } + return(TRUE); +} +#endif + + +inline void eata_latency_in(struct eata_ccb *cp, hostdata *hd) +{ + uint time; + time = jiffies - cp->timestamp; + if(hd->all_lat[1] > time) + hd->all_lat[1] = time; + if(hd->all_lat[2] < time) + hd->all_lat[2] = time; + hd->all_lat[3] += time; + hd->all_lat[0]++; + if((cp->rw_latency) == WRITE) { /* was WRITE */ + if(hd->writes_lat[cp->sizeindex][1] > time) + hd->writes_lat[cp->sizeindex][1] = time; + if(hd->writes_lat[cp->sizeindex][2] < time) + hd->writes_lat[cp->sizeindex][2] = time; + hd->writes_lat[cp->sizeindex][3] += time; + hd->writes_lat[cp->sizeindex][0]++; + } else if((cp->rw_latency) == READ) { + if(hd->reads_lat[cp->sizeindex][1] > time) + hd->reads_lat[cp->sizeindex][1] = time; + if(hd->reads_lat[cp->sizeindex][2] < time) + hd->reads_lat[cp->sizeindex][2] = time; + hd->reads_lat[cp->sizeindex][3] += time; + hd->reads_lat[cp->sizeindex][0]++; + } +} + +inline void eata_latency_out(struct eata_ccb *cp, Scsi_Cmnd *cmd) +{ + int x, z; + short *sho; + long *lon; + x = 0; /* just to keep GCC quiet */ + cp->timestamp = jiffies; /* For latency measurements */ + switch(cmd->cmnd[0]) { + case WRITE_6: + x = cmd->cmnd[4]/2; + cp->rw_latency = WRITE; + break; + case READ_6: + x = cmd->cmnd[4]/2; + cp->rw_latency = READ; + break; + case WRITE_10: + sho = (short *) &cmd->cmnd[7]; + x = ntohs(*sho)/2; + cp->rw_latency = WRITE; + break; + case READ_10: + sho = (short *) &cmd->cmnd[7]; + x = ntohs(*sho)/2; + cp->rw_latency = READ; + break; + case WRITE_12: + lon = (long *) &cmd->cmnd[6]; + x = ntohl(*lon)/2; + cp->rw_latency = WRITE; + break; + case READ_12: + lon = (long *) &cmd->cmnd[6]; + x = ntohl(*lon)/2; + cp->rw_latency = READ; + break; + default: + cp->rw_latency = OTHER; + break; + } + if (cmd->cmnd[0] == WRITE_6 || cmd->cmnd[0] == WRITE_10 || + cmd->cmnd[0] == WRITE_12 || cmd->cmnd[0] == READ_6 || + cmd->cmnd[0] == READ_10 || cmd->cmnd[0] == READ_12) { + for(z = 0; (x > (1 << z)) && (z <= 11); z++) + /* nothing */; + cp->sizeindex = z; + } +} + + +void eata_int_handler(int irq, void *dev_id, struct pt_regs * regs) +{ + uint i, result = 0; + uint hba_stat, scsi_stat, eata_stat; + Scsi_Cmnd *cmd; + struct eata_ccb *ccb; + struct eata_sp *sp; + uint base; + uint x; + struct Scsi_Host *sh; + + for (x = 1, sh = first_HBA; x <= registered_HBAs; x++, sh = SD(sh)->next) { + if (sh->irq != irq) + continue; + + while(inb((uint)sh->base + HA_RAUXSTAT) & HA_AIRQ) { + + int_counter++; + + sp = &SD(sh)->sp; +#ifdef __mips__ + sys_cacheflush(sp, sizeof(struct eata_sp), 2); +#endif + ccb = sp->ccb; + + if(ccb == NULL) { + eata_stat = inb((uint)sh->base + HA_RSTATUS); + printk("eata_dma: int_handler, Spurious IRQ %d " + "received. CCB pointer not set.\n", irq); + break; + } + + cmd = ccb->cmd; + base = (uint) cmd->host->base; + hba_stat = sp->hba_stat; + + scsi_stat = (sp->scsi_stat >> 1) & 0x1f; + + if (sp->EOC == FALSE) { + eata_stat = inb(base + HA_RSTATUS); + printk(KERN_WARNING "eata_dma: int_handler, board: %x cmd %lx " + "returned unfinished.\n" + "EATA: %x HBA: %x SCSI: %x spadr %lx spadrirq %lx, " + "irq%d\n", base, (long)ccb, eata_stat, hba_stat, + scsi_stat,(long)&status, (long)&status[irq], irq); + cmd->result = DID_ERROR << 16; + ccb->status = FREE; + cmd->scsi_done(cmd); + break; + } + + sp->EOC = FALSE; /* Clean out this flag */ + + if (ccb->status == LOCKED || ccb->status == RESET) { + printk("eata_dma: int_handler, reseted command pid %ld returned" + "\n", cmd->pid); + DBG(DBG_INTR && DBG_DELAY, DELAY(1)); + } + + eata_stat = inb(base + HA_RSTATUS); + DBG(DBG_INTR, printk("IRQ %d received, base %#.4x, pid %ld, " + "target: %x, lun: %x, ea_s: %#.2x, hba_s: " + "%#.2x \n", irq, base, cmd->pid, cmd->target, + cmd->lun, eata_stat, hba_stat)); + + switch (hba_stat) { + case HA_NO_ERROR: /* NO Error */ + if(HD(cmd)->do_latency == TRUE && ccb->timestamp) + eata_latency_in(ccb, HD(cmd)); + result = DID_OK << 16; + break; + case HA_ERR_SEL_TO: /* Selection Timeout */ + case HA_ERR_CMD_TO: /* Command Timeout */ + result = DID_TIME_OUT << 16; + break; + case HA_BUS_RESET: /* SCSI Bus Reset Received */ + result = DID_RESET << 16; + DBG(DBG_STATUS, printk(KERN_WARNING "scsi%d: BUS RESET " + "received on cmd %ld\n", + HD(cmd)->HBA_number, cmd->pid)); + break; + case HA_INIT_POWERUP: /* Initial Controller Power-up */ + if (cmd->device->type != TYPE_TAPE) + result = DID_BUS_BUSY << 16; + else + result = DID_ERROR << 16; + + for (i = 0; i < MAXTARGET; i++) + DBG(DBG_STATUS, printk(KERN_DEBUG "scsi%d: cmd pid %ld " + "returned with INIT_POWERUP\n", + HD(cmd)->HBA_number, cmd->pid)); + break; + case HA_CP_ABORT_NA: + case HA_CP_ABORTED: + result = DID_ABORT << 16; + DBG(DBG_STATUS, printk(KERN_WARNING "scsi%d: aborted cmd " + "returned\n", HD(cmd)->HBA_number)); + break; + case HA_CP_RESET_NA: + case HA_CP_RESET: + HD(cmd)->resetlevel[cmd->channel] = 0; + result = DID_RESET << 16; + DBG(DBG_STATUS, printk(KERN_WARNING "scsi%d: reseted cmd " + "pid %ldreturned\n", + HD(cmd)->HBA_number, cmd->pid)); + case HA_SCSI_HUNG: /* SCSI Hung */ + printk(KERN_ERR "scsi%d: SCSI hung\n", HD(cmd)->HBA_number); + result = DID_ERROR << 16; + break; + case HA_RSENSE_FAIL: /* Auto Request-Sense Failed */ + DBG(DBG_STATUS, printk(KERN_ERR "scsi%d: Auto Request Sense " + "Failed\n", HD(cmd)->HBA_number)); + result = DID_ERROR << 16; + break; + case HA_UNX_BUSPHASE: /* Unexpected Bus Phase */ + case HA_UNX_BUS_FREE: /* Unexpected Bus Free */ + case HA_BUS_PARITY: /* Bus Parity Error */ + case HA_UNX_MSGRJCT: /* Unexpected Message Reject */ + case HA_RESET_STUCK: /* SCSI Bus Reset Stuck */ + case HA_PARITY_ERR: /* Controller Ram Parity */ + default: + result = DID_ERROR << 16; + break; + } + cmd->result = result | (scsi_stat << 1); + +#if DBG_INTR2 + if (scsi_stat || result || hba_stat || eata_stat != 0x50 + || cmd->scsi_done == NULL || cmd->device->id == 7) + printk("HBA: %d, channel %d, id: %d, lun %d, pid %ld:\n" + "eata_stat %#x, hba_stat %#.2x, scsi_stat %#.2x, " + "sense_key: %#x, result: %#.8x\n", x, + cmd->device->channel, cmd->device->id, cmd->device->lun, + cmd->pid, eata_stat, hba_stat, scsi_stat, + cmd->sense_buffer[2] & 0xf, cmd->result); + DBG(DBG_INTR&&DBG_DELAY,DELAY(1)); +#endif + + ccb->status = FREE; /* now we can release the slot */ + cmd->scsi_done(cmd); + } + } + + return; +} + +inline int eata_send_command(u32 addr, u32 base, u8 command) +{ + long loop = R_LIMIT; + + while (inb(base + HA_RAUXSTAT) & HA_ABUSY) + if (--loop == 0) + return(FALSE); + + if(addr != (u32) NULL) + addr = virt_to_bus((void *)addr); + + /* + * This is overkill.....but the MIPSen seem to need this + * and it will be optimized away for i86 and ALPHA machines. + */ + flush_cache_all(); + + /* And now the address in nice little byte chunks */ +#ifdef __LITTLE_ENDIAN + outb(addr, base + HA_WDMAADDR); + outb(addr >> 8, base + HA_WDMAADDR + 1); + outb(addr >> 16, base + HA_WDMAADDR + 2); + outb(addr >> 24, base + HA_WDMAADDR + 3); +#else + outb(addr >> 24, base + HA_WDMAADDR); + outb(addr >> 16, base + HA_WDMAADDR + 1); + outb(addr >> 8, base + HA_WDMAADDR + 2); + outb(addr, base + HA_WDMAADDR + 3); +#endif + outb(command, base + HA_WCOMMAND); + return(TRUE); +} + +inline int eata_send_immediate(u32 base, u32 addr, u8 ifc, u8 code, u8 code2) +{ + if(addr != (u32) NULL) + addr = virt_to_bus((void *)addr); + + /* + * This is overkill.....but the MIPSen seem to need this + * and it will be optimized away for i86 and ALPHA machines. + */ + flush_cache_all(); + + outb(0x0, base + HA_WDMAADDR - 1); + if(addr){ +#ifdef __LITTLE_ENDIAN + outb(addr, base + HA_WDMAADDR); + outb(addr >> 8, base + HA_WDMAADDR + 1); + outb(addr >> 16, base + HA_WDMAADDR + 2); + outb(addr >> 24, base + HA_WDMAADDR + 3); +#else + outb(addr >> 24, base + HA_WDMAADDR); + outb(addr >> 16, base + HA_WDMAADDR + 1); + outb(addr >> 8, base + HA_WDMAADDR + 2); + outb(addr, base + HA_WDMAADDR + 3); +#endif + } else { + outb(0x0, base + HA_WDMAADDR); + outb(0x0, base + HA_WDMAADDR + 1); + outb(code2, base + HA_WCODE2); + outb(code, base + HA_WCODE); + } + + outb(ifc, base + HA_WIFC); + outb(EATA_CMD_IMMEDIATE, base + HA_WCOMMAND); + return(TRUE); +} + +int eata_queue(Scsi_Cmnd * cmd, void (* done) (Scsi_Cmnd *)) +{ + unsigned int i, x, y; + ulong flags; + hostdata *hd; + struct Scsi_Host *sh; + struct eata_ccb *ccb; + struct scatterlist *sl; + + + save_flags(flags); + cli(); + +#if 0 + for (x = 1, sh = first_HBA; x <= registered_HBAs; x++, sh = SD(sh)->next) { + if(inb((uint)sh->base + HA_RAUXSTAT) & HA_AIRQ) { + printk("eata_dma: scsi%d interrupt pending in eata_queue.\n" + " Calling interrupt handler.\n", sh->host_no); + eata_int_handler(sh->irq, 0, 0); + } + } +#endif + + queue_counter++; + + hd = HD(cmd); + sh = cmd->host; + + if (cmd->cmnd[0] == REQUEST_SENSE && cmd->sense_buffer[0] != 0) { + DBG(DBG_REQSENSE, printk(KERN_DEBUG "Tried to REQUEST SENSE\n")); + cmd->result = DID_OK << 16; + done(cmd); + + return(0); + } + + /* check for free slot */ + for (y = hd->last_ccb + 1, x = 0; x < sh->can_queue; x++, y++) { + if (y >= sh->can_queue) + y = 0; + if (hd->ccb[y].status == FREE) + break; + } + + hd->last_ccb = y; + + if (x >= sh->can_queue) { + cmd->result = DID_BUS_BUSY << 16; + DBG(DBG_QUEUE && DBG_ABNORM, + printk(KERN_CRIT "eata_queue pid %ld, HBA QUEUE FULL..., " + "returning DID_BUS_BUSY\n", cmd->pid)); + done(cmd); + restore_flags(flags); + return(0); + } + ccb = &hd->ccb[y]; + + memset(ccb, 0, sizeof(struct eata_ccb) - sizeof(struct eata_sg_list *)); + + ccb->status = USED; /* claim free slot */ + + restore_flags(flags); + + DBG(DBG_QUEUE, printk("eata_queue pid %ld, target: %x, lun: %x, y %d\n", + cmd->pid, cmd->target, cmd->lun, y)); + DBG(DBG_QUEUE && DBG_DELAY, DELAY(1)); + + if(hd->do_latency == TRUE) + eata_latency_out(ccb, cmd); + + cmd->scsi_done = (void *)done; + + switch (cmd->cmnd[0]) { + case CHANGE_DEFINITION: case COMPARE: case COPY: + case COPY_VERIFY: case LOG_SELECT: case MODE_SELECT: + case MODE_SELECT_10: case SEND_DIAGNOSTIC: case WRITE_BUFFER: + case FORMAT_UNIT: case REASSIGN_BLOCKS: case RESERVE: + case SEARCH_EQUAL: case SEARCH_HIGH: case SEARCH_LOW: + case WRITE_6: case WRITE_10: case WRITE_VERIFY: + case UPDATE_BLOCK: case WRITE_LONG: case WRITE_SAME: + case SEARCH_HIGH_12: case SEARCH_EQUAL_12: case SEARCH_LOW_12: + case WRITE_12: case WRITE_VERIFY_12: case SET_WINDOW: + case MEDIUM_SCAN: case SEND_VOLUME_TAG: + case 0xea: /* alternate number for WRITE LONG */ + ccb->DataOut = TRUE; /* Output mode */ + break; + case TEST_UNIT_READY: + default: + ccb->DataIn = TRUE; /* Input mode */ + } + + /* FIXME: This will have to be changed once the midlevel driver + * allows different HBA IDs on every channel. + */ + if (cmd->target == sh->this_id) + ccb->Interpret = TRUE; /* Interpret command */ + + if (cmd->use_sg) { + ccb->scatter = TRUE; /* SG mode */ + if (ccb->sg_list == NULL) { + ccb->sg_list = kmalloc(sh->sg_tablesize * sizeof(struct eata_sg_list), + GFP_ATOMIC | GFP_DMA); + } + if (ccb->sg_list == NULL) + panic("eata_dma: Run out of DMA memory for SG lists !\n"); + ccb->cp_dataDMA = htonl(virt_to_bus(ccb->sg_list)); + + ccb->cp_datalen = htonl(cmd->use_sg * sizeof(struct eata_sg_list)); + sl=(struct scatterlist *)cmd->request_buffer; + for(i = 0; i < cmd->use_sg; i++, sl++){ + ccb->sg_list[i].data = htonl(virt_to_bus(sl->address)); + ccb->sg_list[i].len = htonl((u32) sl->length); + } + } else { + ccb->scatter = FALSE; + ccb->cp_datalen = htonl(cmd->request_bufflen); + ccb->cp_dataDMA = htonl(virt_to_bus(cmd->request_buffer)); + } + + ccb->Auto_Req_Sen = TRUE; + ccb->cp_reqDMA = htonl(virt_to_bus(cmd->sense_buffer)); + ccb->reqlen = sizeof(cmd->sense_buffer); + + ccb->cp_id = cmd->target; + ccb->cp_channel = cmd->channel; + ccb->cp_lun = cmd->lun; + ccb->cp_dispri = TRUE; + ccb->cp_identify = TRUE; + memcpy(ccb->cp_cdb, cmd->cmnd, cmd->cmd_len); + + ccb->cp_statDMA = htonl(virt_to_bus(&(hd->sp))); + + ccb->cp_viraddr = ccb; /* This will be passed thru, so we don't need to + * convert it */ + ccb->cmd = cmd; + cmd->host_scribble = (char *)&hd->ccb[y]; + + if(eata_send_command((u32) ccb, (u32) sh->base, EATA_CMD_DMA_SEND_CP) == FALSE) { + cmd->result = DID_BUS_BUSY << 16; + DBG(DBG_QUEUE && DBG_ABNORM, + printk("eata_queue target %d, pid %ld, HBA busy, " + "returning DID_BUS_BUSY\n",cmd->target, cmd->pid)); + ccb->status = FREE; + done(cmd); + return(0); + } + DBG(DBG_QUEUE, printk("Queued base %#.4x pid: %ld target: %x lun: %x " + "slot %d irq %d\n", (s32)sh->base, cmd->pid, + cmd->target, cmd->lun, y, sh->irq)); + DBG(DBG_QUEUE && DBG_DELAY, DELAY(1)); + + return(0); +} + + +int eata_abort(Scsi_Cmnd * cmd) +{ + ulong loop = HZ / 2; + ulong flags; + int x; + struct Scsi_Host *sh; + + save_flags(flags); + cli(); + + DBG(DBG_ABNORM, printk("eata_abort called pid: %ld target: %x lun: %x" + " reason %x\n", cmd->pid, cmd->target, cmd->lun, + cmd->abort_reason)); + DBG(DBG_ABNORM && DBG_DELAY, DELAY(1)); + + /* Some interrupt controllers seem to loose interrupts */ + for (x = 1, sh = first_HBA; x <= registered_HBAs; x++, sh = SD(sh)->next) { + if(inb((uint)sh->base + HA_RAUXSTAT) & HA_AIRQ) { + printk("eata_dma: scsi%d interrupt pending in eata_abort.\n" + " Calling interrupt handler.\n", sh->host_no); + eata_int_handler(sh->irq, 0, 0); + } + } + + while (inb((u32)(cmd->host->base) + HA_RAUXSTAT) & HA_ABUSY) { + if (--loop == 0) { + printk("eata_dma: abort, timeout error.\n"); + DBG(DBG_ABNORM && DBG_DELAY, DELAY(1)); + restore_flags(flags); + return (SCSI_ABORT_ERROR); + } + } + if (CD(cmd)->status == RESET) { + printk("eata_dma: abort, command reset error.\n"); + DBG(DBG_ABNORM && DBG_DELAY, DELAY(1)); + restore_flags(flags); + return (SCSI_ABORT_ERROR); + } + if (CD(cmd)->status == LOCKED) { + DBG(DBG_ABNORM, printk("eata_dma: abort, queue slot locked.\n")); + DBG(DBG_ABNORM && DBG_DELAY, DELAY(1)); + restore_flags(flags); + return (SCSI_ABORT_NOT_RUNNING); + } + if (CD(cmd)->status == USED) { + DBG(DBG_ABNORM, printk("Returning: SCSI_ABORT_BUSY\n")); + restore_flags(flags); + return (SCSI_ABORT_BUSY); /* SNOOZE */ + } + if (CD(cmd)->status == FREE) { + DBG(DBG_ABNORM, printk("Returning: SCSI_ABORT_NOT_RUNNING\n")); + restore_flags(flags); + return (SCSI_ABORT_NOT_RUNNING); + } + restore_flags(flags); + panic("eata_dma: abort: invalid slot status\n"); +} + +int eata_reset(Scsi_Cmnd * cmd, unsigned int resetflags) +{ + uint x; + ulong loop = loops_per_sec / 3; + ulong flags; + unchar success = FALSE; + Scsi_Cmnd *sp; + struct Scsi_Host *sh; + + save_flags(flags); + cli(); + + DBG(DBG_ABNORM, printk("eata_reset called pid:%ld target: %x lun: %x" + " reason %x\n", cmd->pid, cmd->target, cmd->lun, + cmd->abort_reason)); + + for (x = 1, sh = first_HBA; x <= registered_HBAs; x++, sh = SD(sh)->next) { + if(inb((uint)sh->base + HA_RAUXSTAT) & HA_AIRQ) { + printk("eata_dma: scsi%d interrupt pending in eata_reset.\n" + " Calling interrupt handler.\n", sh->host_no); + eata_int_handler(sh->irq, 0, 0); + } + } + + if (HD(cmd)->state == RESET) { + printk("eata_reset: exit, already in reset.\n"); + restore_flags(flags); + DBG(DBG_ABNORM && DBG_DELAY, DELAY(1)); + return (SCSI_RESET_ERROR); + } + + while (inb((u32)(cmd->host->base) + HA_RAUXSTAT) & HA_ABUSY) + if (--loop == 0) { + printk("eata_reset: exit, timeout error.\n"); + restore_flags(flags); + DBG(DBG_ABNORM && DBG_DELAY, DELAY(1)); + return (SCSI_RESET_ERROR); + } + + for (x = 0; x < cmd->host->can_queue; x++) { + if (HD(cmd)->ccb[x].status == FREE) + continue; + + if (HD(cmd)->ccb[x].status == LOCKED) { + HD(cmd)->ccb[x].status = FREE; + printk("eata_reset: locked slot %d forced free.\n", x); + DBG(DBG_ABNORM && DBG_DELAY, DELAY(1)); + continue; + } + + + sp = HD(cmd)->ccb[x].cmd; + HD(cmd)->ccb[x].status = RESET; + + if (sp == NULL) + panic("eata_reset: slot %d, sp==NULL.\n", x); + + printk("eata_reset: slot %d in reset, pid %ld.\n", x, sp->pid); + + DBG(DBG_ABNORM && DBG_DELAY, DELAY(1)); + + if (sp == cmd) + success = TRUE; + } + + /* hard reset the HBA */ + inb((u32) (cmd->host->base) + HA_RSTATUS); /* This might cause trouble */ + eata_send_command(0, (u32) cmd->host->base, EATA_CMD_RESET); + + HD(cmd)->state = RESET; + + DBG(DBG_ABNORM, printk("eata_reset: board reset done, enabling " + "interrupts.\n")); + + DELAY(2); /* In theorie we should get interrupts and set free all + * used queueslots */ + + DBG(DBG_ABNORM, printk("eata_reset: interrupts disabled again.\n")); + DBG(DBG_ABNORM && DBG_DELAY, DELAY(1)); + + for (x = 0; x < cmd->host->can_queue; x++) { + + /* Skip slots already set free by interrupt and those that + * are still LOCKED from the last reset */ + if (HD(cmd)->ccb[x].status != RESET) + continue; + + sp = HD(cmd)->ccb[x].cmd; + sp->result = DID_RESET << 16; + + /* This mailbox is still waiting for its interrupt */ + HD(cmd)->ccb[x].status = LOCKED; + + printk("eata_reset: slot %d locked, DID_RESET, pid %ld done.\n", + x, sp->pid); + DBG(DBG_ABNORM && DBG_DELAY, DELAY(1)); + + sp->scsi_done(sp); + } + + HD(cmd)->state = FALSE; + restore_flags(flags); + + if (success) { + DBG(DBG_ABNORM, printk("eata_reset: exit, pending.\n")); + DBG(DBG_ABNORM && DBG_DELAY, DELAY(1)); + return (SCSI_RESET_PENDING); + } else { + DBG(DBG_ABNORM, printk("eata_reset: exit, wakeup.\n")); + DBG(DBG_ABNORM && DBG_DELAY, DELAY(1)); + return (SCSI_RESET_PUNT); + } +} + +/* Here we try to determine the optimum queue depth for + * each attached device. + * + * At the moment the algorithm is rather simple + */ +static void eata_select_queue_depths(struct Scsi_Host *host, + Scsi_Device *devicelist) +{ + Scsi_Device *device; + int devcount = 0; + int factor = 0; + +#if CRIPPLE_QUEUE + for(device = devicelist; device != NULL; device = device->next) { + if(device->host == host) + device->queue_depth = 2; + } +#else + /* First we do a sample run go find out what we have */ + for(device = devicelist; device != NULL; device = device->next) { + if (device->host == host) { + devcount++; + switch(device->type) { + case TYPE_DISK: + case TYPE_MOD: + factor += TYPE_DISK_QUEUE; + break; + case TYPE_TAPE: + factor += TYPE_TAPE_QUEUE; + break; + case TYPE_WORM: + case TYPE_ROM: + factor += TYPE_ROM_QUEUE; + break; + case TYPE_PROCESSOR: + case TYPE_SCANNER: + default: + factor += TYPE_OTHER_QUEUE; + break; + } + } + } + + DBG(DBG_REGISTER, printk(KERN_DEBUG "scsi%d: needed queueslots %d\n", + host->host_no, factor)); + + if(factor == 0) /* We don't want to get a DIV BY ZERO error */ + factor = 1; + + factor = (SD(host)->queuesize * 10) / factor; + + DBG(DBG_REGISTER, printk(KERN_DEBUG "scsi%d: using factor %dE-1\n", + host->host_no, factor)); + + /* Now that have the factor we can set the individual queuesizes */ + for(device = devicelist; device != NULL; device = device->next) { + if(device->host == host) { + if(SD(device->host)->bustype != IS_ISA){ + switch(device->type) { + case TYPE_DISK: + case TYPE_MOD: + device->queue_depth = (TYPE_DISK_QUEUE * factor) / 10; + break; + case TYPE_TAPE: + device->queue_depth = (TYPE_TAPE_QUEUE * factor) / 10; + break; + case TYPE_WORM: + case TYPE_ROM: + device->queue_depth = (TYPE_ROM_QUEUE * factor) / 10; + break; + case TYPE_PROCESSOR: + case TYPE_SCANNER: + default: + device->queue_depth = (TYPE_OTHER_QUEUE * factor) / 10; + break; + } + } else /* ISA forces us to limit the queue depth because of the + * bounce buffer memory overhead. I know this is cruel */ + device->queue_depth = 2; + + /* + * It showed that we need to set an upper limit of commands + * we can allow to queue for a single device on the bus. + * If we get above that limit, the broken midlevel SCSI code + * will produce bogus timeouts and aborts en masse. :-( + */ + if(device->queue_depth > UPPER_DEVICE_QUEUE_LIMIT) + device->queue_depth = UPPER_DEVICE_QUEUE_LIMIT; + if(device->queue_depth == 0) + device->queue_depth = 1; + + printk(KERN_INFO "scsi%d: queue depth for target %d on channel %d " + "set to %d\n", host->host_no, device->id, device->channel, + device->queue_depth); + } + } +#endif +} + +#if CHECK_BLINK +int check_blink_state(long base) +{ + ushort loops = 10; + u32 blinkindicator; + u32 state = 0x12345678; + u32 oldstate = 0; + + blinkindicator = htonl(0x54504442); + while ((loops--) && (state != oldstate)) { + oldstate = state; + state = inl((uint) base + 1); + } + + DBG(DBG_BLINK, printk("Did Blink check. Status: %d\n", + (state == oldstate) && (state == blinkindicator))); + + if ((state == oldstate) && (state == blinkindicator)) + return(TRUE); + else + return (FALSE); +} +#endif + +char * get_board_data(u32 base, u32 irq, u32 id) +{ + struct eata_ccb *cp; + struct eata_sp *sp; + static char *buff; + ulong i; + + cp = (struct eata_ccb *) scsi_init_malloc(sizeof(struct eata_ccb), + GFP_ATOMIC | GFP_DMA); + sp = (struct eata_sp *) scsi_init_malloc(sizeof(struct eata_sp), + GFP_ATOMIC | GFP_DMA); + + buff = dma_scratch; + + memset(cp, 0, sizeof(struct eata_ccb)); + memset(sp, 0, sizeof(struct eata_sp)); + memset(buff, 0, 256); + + cp->DataIn = TRUE; + cp->Interpret = TRUE; /* Interpret command */ + cp->cp_dispri = TRUE; + cp->cp_identify = TRUE; + + cp->cp_datalen = htonl(56); + cp->cp_dataDMA = htonl(virt_to_bus(buff)); + cp->cp_statDMA = htonl(virt_to_bus(sp)); + cp->cp_viraddr = cp; + + cp->cp_id = id; + cp->cp_lun = 0; + + cp->cp_cdb[0] = INQUIRY; + cp->cp_cdb[1] = 0; + cp->cp_cdb[2] = 0; + cp->cp_cdb[3] = 0; + cp->cp_cdb[4] = 56; + cp->cp_cdb[5] = 0; + + fake_int_base = (struct eata_register *) base; + fake_int_result = FALSE; + fake_int_happened = FALSE; + + eata_send_command((u32) cp, (u32) base, EATA_CMD_DMA_SEND_CP); + + i = jiffies + (3 * HZ); + while (fake_int_happened == FALSE && jiffies <= i) + barrier(); + + DBG(DBG_INTR3, printk(KERN_DEBUG "fake_int_result: %#x hbastat %#x " + "scsistat %#x, buff %p sp %p\n", + fake_int_result, (u32) (sp->hba_stat /*& 0x7f*/), + (u32) sp->scsi_stat, buff, sp)); + + scsi_init_free((void *)cp, sizeof(struct eata_ccb)); + scsi_init_free((void *)sp, sizeof(struct eata_sp)); + + if ((fake_int_result & HA_SERROR) || jiffies > i){ + printk(KERN_WARNING "eata_dma: trying to reset HBA at %x to clear " + "possible blink state\n", base); + /* hard reset the HBA */ + inb((u32) (base) + HA_RSTATUS); + eata_send_command(0, base, EATA_CMD_RESET); + DELAY(1); + return (NULL); + } else + return (buff); +} + + +int get_conf_PIO(u32 base, struct get_conf *buf) +{ + ulong loop = R_LIMIT; + u16 *p; + + if(check_region(base, 9)) + return (FALSE); + + memset(buf, 0, sizeof(struct get_conf)); + + while (inb(base + HA_RSTATUS) & HA_SBUSY) + if (--loop == 0) + return (FALSE); + + fake_int_base = (struct eata_register *) base; + fake_int_result = FALSE; + fake_int_happened = FALSE; + + DBG(DBG_PIO && DBG_PROBE, + printk("Issuing PIO READ CONFIG to HBA at %#x\n", base)); + eata_send_command(0, base, EATA_CMD_PIO_READ_CONFIG); + + loop = R_LIMIT; + for (p = (u16 *) buf; + (long)p <= ((long)buf + (sizeof(struct get_conf) / 2)); p++) { + while (!(inb(base + HA_RSTATUS) & HA_SDRQ)) + if (--loop == 0) + return (FALSE); + + loop = R_LIMIT; + *p = inw(base + HA_RDATA); + } + + if (!(inb(base + HA_RSTATUS) & HA_SERROR)) { /* Error ? */ + if (htonl(EATA_SIGNATURE) == buf->signature) { + DBG(DBG_PIO&&DBG_PROBE, printk("EATA Controller found at %x " + "EATA Level: %x\n", (uint) base, + (uint) (buf->version))); + + while (inb(base + HA_RSTATUS) & HA_SDRQ) + inw(base + HA_RDATA); + return (TRUE); + } + } else { + DBG(DBG_PROBE, printk("eata_dma: get_conf_PIO, error during transfer " + "for HBA at %lx\n", (long)base)); + } + return (FALSE); +} + + +void print_config(struct get_conf *gc) +{ + printk("LEN: %d ver:%d OCS:%d TAR:%d TRNXFR:%d MORES:%d DMAS:%d\n", + (u32) ntohl(gc->len), gc->version, + gc->OCS_enabled, gc->TAR_support, gc->TRNXFR, gc->MORE_support, + gc->DMA_support); + printk("DMAV:%d HAAV:%d SCSIID0:%d ID1:%d ID2:%d QUEUE:%d SG:%d SEC:%d\n", + gc->DMA_valid, gc->HAA_valid, gc->scsi_id[3], gc->scsi_id[2], + gc->scsi_id[1], ntohs(gc->queuesiz), ntohs(gc->SGsiz), gc->SECOND); + printk("IRQ:%d IRQT:%d DMAC:%d FORCADR:%d SG_64K:%d SG_UAE:%d MID:%d " + "MCH:%d MLUN:%d\n", + gc->IRQ, gc->IRQ_TR, (8 - gc->DMA_channel) & 7, gc->FORCADR, + gc->SG_64K, gc->SG_UAE, gc->MAX_ID, gc->MAX_CHAN, gc->MAX_LUN); + printk("RIDQ:%d PCI:%d EISA:%d\n", + gc->ID_qest, gc->is_PCI, gc->is_EISA); + DBG(DPT_DEBUG, DELAY(14)); +} + +short register_HBA(u32 base, struct get_conf *gc, Scsi_Host_Template * tpnt, + u8 bustype) +{ + ulong size = 0; + unchar dma_channel = 0; + char *buff = 0; + unchar bugs = 0; + struct Scsi_Host *sh; + hostdata *hd; + int x; + + + DBG(DBG_REGISTER, print_config(gc)); + + if (gc->DMA_support == FALSE) { + printk("The EATA HBA at %#.4x does not support DMA.\n" + "Please use the EATA-PIO driver.\n", base); + return (FALSE); + } + if(gc->HAA_valid == FALSE || ntohl(gc->len) < 0x22) + gc->MAX_CHAN = 0; + + if (reg_IRQ[gc->IRQ] == FALSE) { /* Interrupt already registered ? */ + if (!request_irq(gc->IRQ, (void *) eata_fake_int_handler, SA_INTERRUPT, + "eata_dma", NULL)){ + reg_IRQ[gc->IRQ]++; + if (!gc->IRQ_TR) + reg_IRQL[gc->IRQ] = TRUE; /* IRQ is edge triggered */ + } else { + printk("Couldn't allocate IRQ %d, Sorry.", gc->IRQ); + return (FALSE); + } + } else { /* More than one HBA on this IRQ */ + if (reg_IRQL[gc->IRQ] == TRUE) { + printk("Can't support more than one HBA on this IRQ,\n" + " if the IRQ is edge triggered. Sorry.\n"); + return (FALSE); + } else + reg_IRQ[gc->IRQ]++; + } + + + /* If DMA is supported but DMA_valid isn't set to indicate that + * the channel number is given we must have pre 2.0 firmware (1.7?) + * which leaves us to guess since the "newer ones" also don't set the + * DMA_valid bit. + */ + if (gc->DMA_support && !gc->DMA_valid && gc->DMA_channel) { + printk(KERN_WARNING "eata_dma: If you are using a pre 2.0 firmware " + "please update it !\n" + " You can get new firmware releases from ftp.dpt.com\n"); + gc->DMA_channel = (base == 0x1f0 ? 3 /* DMA=5 */ : 2 /* DMA=6 */); + gc->DMA_valid = TRUE; + } + + /* if gc->DMA_valid it must be an ISA HBA and we have to register it */ + dma_channel = BUSMASTER; + if (gc->DMA_valid) { + if (request_dma(dma_channel = (8 - gc->DMA_channel) & 7, "eata_dma")) { + printk(KERN_WARNING "Unable to allocate DMA channel %d for ISA HBA" + " at %#.4x.\n", dma_channel, base); + reg_IRQ[gc->IRQ]--; + if (reg_IRQ[gc->IRQ] == 0) + free_irq(gc->IRQ, NULL); + if (gc->IRQ_TR == FALSE) + reg_IRQL[gc->IRQ] = FALSE; + return (FALSE); + } + } + + if (dma_channel != BUSMASTER) { + disable_dma(dma_channel); + clear_dma_ff(dma_channel); + set_dma_mode(dma_channel, DMA_MODE_CASCADE); + enable_dma(dma_channel); + } + + if (bustype != IS_EISA && bustype != IS_ISA) + buff = get_board_data(base, gc->IRQ, gc->scsi_id[3]); + + if (buff == NULL) { + if (bustype == IS_EISA || bustype == IS_ISA) { + bugs = bugs || BROKEN_INQUIRY; + } else { + if (gc->DMA_support == FALSE) + printk(KERN_WARNING "HBA at %#.4x doesn't support DMA. " + "Sorry\n", base); + else + printk(KERN_WARNING "HBA at %#.4x does not react on INQUIRY. " + "Sorry.\n", base); + if (gc->DMA_valid) + free_dma(dma_channel); + reg_IRQ[gc->IRQ]--; + if (reg_IRQ[gc->IRQ] == 0) + free_irq(gc->IRQ, NULL); + if (gc->IRQ_TR == FALSE) + reg_IRQL[gc->IRQ] = FALSE; + return (FALSE); + } + } + + if (gc->DMA_support == FALSE && buff != NULL) + printk(KERN_WARNING "HBA %.12sat %#.4x doesn't set the DMA_support " + "flag correctly.\n", &buff[16], base); + + request_region(base, 9, "eata_dma"); /* We already checked the + * availability, so this + * should not fail. + */ + + if(ntohs(gc->queuesiz) == 0) { + gc->queuesiz = ntohs(64); + printk(KERN_WARNING "Warning: Queue size has to be corrected. Assuming" + " 64 queueslots\n" + " This might be a PM2012B with a defective Firmware\n" + " Contact DPT support@dpt.com for an upgrade\n"); + } + + size = sizeof(hostdata) + ((sizeof(struct eata_ccb) + sizeof(long)) + * ntohs(gc->queuesiz)); + + DBG(DBG_REGISTER, printk("scsi_register size: %ld\n", size)); + + sh = scsi_register(tpnt, size); + + if(sh != NULL) { + + hd = SD(sh); + + memset(hd->reads, 0, sizeof(u32) * 26); + + sh->select_queue_depths = eata_select_queue_depths; + + hd->bustype = bustype; + + /* + * If we are using a ISA board, we can't use extended SG, + * because we would need excessive amounts of memory for + * bounce buffers. + */ + if (gc->SG_64K==TRUE && ntohs(gc->SGsiz)==64 && hd->bustype!=IS_ISA){ + sh->sg_tablesize = SG_SIZE_BIG; + } else { + sh->sg_tablesize = ntohs(gc->SGsiz); + if (sh->sg_tablesize > SG_SIZE || sh->sg_tablesize == 0) { + if (sh->sg_tablesize == 0) + printk(KERN_WARNING "Warning: SG size had to be fixed.\n" + "This might be a PM2012 with a defective Firmware" + "\nContact DPT support@dpt.com for an upgrade\n"); + sh->sg_tablesize = SG_SIZE; + } + } + hd->sgsize = sh->sg_tablesize; + } + + if(sh != NULL) { + sh->can_queue = hd->queuesize = ntohs(gc->queuesiz); + sh->cmd_per_lun = 0; + } + + if(sh == NULL) { + DBG(DBG_REGISTER, printk(KERN_NOTICE "eata_dma: couldn't register HBA" + " at%x \n", base)); + scsi_unregister(sh); + if (gc->DMA_valid) + free_dma(dma_channel); + + reg_IRQ[gc->IRQ]--; + if (reg_IRQ[gc->IRQ] == 0) + free_irq(gc->IRQ, NULL); + if (gc->IRQ_TR == FALSE) + reg_IRQL[gc->IRQ] = FALSE; + return (FALSE); + } + + + hd->broken_INQUIRY = (bugs & BROKEN_INQUIRY); + + if(hd->broken_INQUIRY == TRUE) { + strcpy(hd->vendor, "DPT"); + strcpy(hd->name, "??????????"); + strcpy(hd->revision, "???.?"); + hd->firmware_revision = 0; + } else { + strncpy(hd->vendor, &buff[8], 8); + hd->vendor[8] = 0; + strncpy(hd->name, &buff[16], 17); + hd->name[17] = 0; + hd->revision[0] = buff[32]; + hd->revision[1] = buff[33]; + hd->revision[2] = buff[34]; + hd->revision[3] = '.'; + hd->revision[4] = buff[35]; + hd->revision[5] = 0; + hd->firmware_revision = (buff[32] << 24) + (buff[33] << 16) + + (buff[34] << 8) + buff[35]; + } + + if (hd->firmware_revision >= (('0'<<24) + ('7'<<16) + ('G'<< 8) + '0')) + hd->immediate_support = 1; + else + hd->immediate_support = 0; + + switch (ntohl(gc->len)) { + case 0x1c: + hd->EATA_revision = 'a'; + break; + case 0x1e: + hd->EATA_revision = 'b'; + break; + case 0x22: + hd->EATA_revision = 'c'; + break; + case 0x24: + hd->EATA_revision = 'z'; + default: + hd->EATA_revision = '?'; + } + + + if(ntohl(gc->len) >= 0x22) { + sh->max_id = gc->MAX_ID + 1; + sh->max_lun = gc->MAX_LUN + 1; + } else { + sh->max_id = 8; + sh->max_lun = 8; + } + + hd->HBA_number = sh->host_no; + hd->channel = gc->MAX_CHAN; + sh->max_channel = gc->MAX_CHAN; + sh->unique_id = base; + sh->base = (char *) base; + sh->io_port = base; + sh->n_io_port = 9; + sh->irq = gc->IRQ; + sh->dma_channel = dma_channel; + + /* FIXME: + * SCSI midlevel code should support different HBA ids on every channel + */ + sh->this_id = gc->scsi_id[3]; + + if (gc->SECOND) + hd->primary = FALSE; + else + hd->primary = TRUE; + + sh->wish_block = FALSE; + + if (hd->bustype != IS_ISA) { + sh->unchecked_isa_dma = FALSE; + } else { + sh->unchecked_isa_dma = TRUE; /* We're doing ISA DMA */ + } + + for(x = 0; x <= 11; x++){ /* Initialize min. latency */ + hd->writes_lat[x][1] = 0xffffffff; + hd->reads_lat[x][1] = 0xffffffff; + } + hd->all_lat[1] = 0xffffffff; + + hd->next = NULL; /* build a linked list of all HBAs */ + hd->prev = last_HBA; + if(hd->prev != NULL) + SD(hd->prev)->next = sh; + last_HBA = sh; + if (first_HBA == NULL) + first_HBA = sh; + registered_HBAs++; + + return (TRUE); +} + + + +void find_EISA(struct get_conf *buf, Scsi_Host_Template * tpnt) +{ + u32 base; + int i; + +#if CHECKPAL + u8 pal1, pal2, pal3; +#endif + + for (i = 0; i < MAXEISA; i++) { + if (EISAbases[i] == TRUE) { /* Still a possibility ? */ + + base = 0x1c88 + (i * 0x1000); +#if CHECKPAL + pal1 = inb((u16)base - 8); + pal2 = inb((u16)base - 7); + pal3 = inb((u16)base - 6); + + if (((pal1 == DPT_ID1) && (pal2 == DPT_ID2)) || + ((pal1 == NEC_ID1) && (pal2 == NEC_ID2) && (pal3 == NEC_ID3))|| + ((pal1 == ATT_ID1) && (pal2 == ATT_ID2) && (pal3 == ATT_ID3))){ + DBG(DBG_PROBE, printk("EISA EATA id tags found: %x %x %x \n", + (int)pal1, (int)pal2, (int)pal3)); +#endif + if (get_conf_PIO(base, buf) == TRUE) { + if (buf->IRQ) { + DBG(DBG_EISA, printk("Registering EISA HBA\n")); + register_HBA(base, buf, tpnt, IS_EISA); + } else + printk("eata_dma: No valid IRQ. HBA removed from list\n"); + } +#if CHECK_BLINK + else { + if (check_blink_state(base)) + printk("HBA is in BLINK state. Consult your HBAs " + "Manual to correct this.\n"); + } +#endif + /* Nothing found here so we take it from the list */ + EISAbases[i] = 0; +#if CHECKPAL + } +#endif + } + } + return; +} + +void find_ISA(struct get_conf *buf, Scsi_Host_Template * tpnt) +{ + int i; + + for (i = 0; i < MAXISA; i++) { + if (ISAbases[i]) { + if (get_conf_PIO(ISAbases[i],buf) == TRUE){ + DBG(DBG_ISA, printk("Registering ISA HBA\n")); + register_HBA(ISAbases[i], buf, tpnt, IS_ISA); + } +#if CHECK_BLINK + else { + if (check_blink_state(ISAbases[i])) + printk("HBA is in BLINK state. Consult your HBAs " + "Manual to correct this.\n"); + } +#endif + ISAbases[i] = 0; + } + } + return; +} + +void find_PCI(struct get_conf *buf, Scsi_Host_Template * tpnt) +{ + +#ifndef CONFIG_PCI + printk("eata_dma: kernel PCI support not enabled. Skipping scan for PCI HBAs.\n"); +#else + + u8 pci_bus, pci_device_fn; + static s16 pci_index = 0; /* Device index to PCI BIOS calls */ + u32 base = 0; + u16 com_adr; + u16 rev_device; + u32 error, i, x; + u8 pal1, pal2, pal3; + + if (pcibios_present()) { + for (i = 0; i <= MAXPCI; ++i, ++pci_index) { + if (pcibios_find_device(PCI_VENDOR_ID_DPT, PCI_DEVICE_ID_DPT, + pci_index, &pci_bus, &pci_device_fn)) + break; + DBG(DBG_PROBE && DBG_PCI, + printk("eata_dma: find_PCI, HBA at bus %d, device %d," + " function %d, index %d\n", (s32)pci_bus, + (s32)((pci_device_fn & 0xf8) >> 3), + (s32)(pci_device_fn & 7), pci_index)); + + if (!(error = pcibios_read_config_word(pci_bus, pci_device_fn, + PCI_CLASS_DEVICE, &rev_device))) { + if (rev_device == PCI_CLASS_STORAGE_SCSI) { + if (!(error = pcibios_read_config_word(pci_bus, + pci_device_fn, PCI_COMMAND, + (u16 *) & com_adr))) { + if (!((com_adr & PCI_COMMAND_IO) && + (com_adr & PCI_COMMAND_MASTER))) { + printk("eata_dma: find_PCI, HBA has IO or" + " BUSMASTER mode disabled\n"); + continue; + } + } else + printk("eata_dma: find_PCI, error %x while reading " + "PCI_COMMAND\n", error); + } else + printk("eata_dma: find_PCI, DEVICECLASSID %x didn't match\n", + rev_device); + } else { + printk("eata_dma: find_PCI, error %x while reading " + "PCI_CLASS_BASE\n", + error); + continue; + } + + if (!(error = pcibios_read_config_dword(pci_bus, pci_device_fn, + PCI_BASE_ADDRESS_0, (int *) &base))){ + + /* Check if the address is valid */ + if (base & 0x01) { + base &= 0xfffffffe; + /* EISA tag there ? */ + pal1 = inb(base); + pal2 = inb(base + 1); + pal3 = inb(base + 2); + if (((pal1 == DPT_ID1) && (pal2 == DPT_ID2)) || + ((pal1 == NEC_ID1) && (pal2 == NEC_ID2) && + (pal3 == NEC_ID3)) || + ((pal1 == ATT_ID1) && (pal2 == ATT_ID2) && + (pal3 == ATT_ID3))) + base += 0x08; + else + base += 0x10; /* Now, THIS is the real address */ + + if (base != 0x1f8) { + /* We didn't find it in the primary search */ + if (get_conf_PIO(base, buf) == TRUE) { + + /* OK. We made it till here, so we can go now + * and register it. We only have to check and + * eventually remove it from the EISA and ISA list + */ + DBG(DBG_PCI, printk("Registering PCI HBA\n")); + register_HBA(base, buf, tpnt, IS_PCI); + + if (base < 0x1000) { + for (x = 0; x < MAXISA; ++x) { + if (ISAbases[x] == base) { + ISAbases[x] = 0; + break; + } + } + } else if ((base & 0x0fff) == 0x0c88) + EISAbases[(base >> 12) & 0x0f] = 0; + continue; /* break; */ + } +#if CHECK_BLINK + else if (check_blink_state(base) == TRUE) { + printk("eata_dma: HBA is in BLINK state.\n" + "Consult your HBAs manual to correct this.\n"); + } +#endif + } + } + } else { + printk("eata_dma: error %x while reading " + "PCI_BASE_ADDRESS_0\n", error); + } + } + } else { + printk("eata_dma: No BIOS32 extensions present. This driver release " + "still depends on it.\n" + " Skipping scan for PCI HBAs. \n"); + } +#endif /* #ifndef CONFIG_PCI */ + return; +} + +int eata_detect(Scsi_Host_Template * tpnt) +{ + struct Scsi_Host *HBA_ptr; + struct get_conf gc; + int i; + + DBG((DBG_PROBE && DBG_DELAY) || DPT_DEBUG, + printk("Using lots of delays to let you read the debugging output\n")); + + tpnt->proc_dir = &proc_scsi_eata_dma; + + status = scsi_init_malloc(512, GFP_ATOMIC | GFP_DMA); + dma_scratch = scsi_init_malloc(1024, GFP_ATOMIC | GFP_DMA); + + if(status == NULL || dma_scratch == NULL) { + printk("eata_dma: can't allocate enough memory to probe for hosts !\n"); + return(0); + } + + dma_scratch += 4; + + find_PCI(&gc, tpnt); + + find_EISA(&gc, tpnt); + + find_ISA(&gc, tpnt); + + for (i = 0; i < MAXIRQ; i++) { /* Now that we know what we have, we */ + if (reg_IRQ[i] >= 1){ /* exchange the interrupt handler which */ + free_irq(i, NULL); /* we used for probing with the real one */ + request_irq(i, (void *)(eata_int_handler), SA_INTERRUPT|SA_SHIRQ, + "eata_dma", NULL); + } + } + + HBA_ptr = first_HBA; + + if (registered_HBAs != 0) { + printk("EATA (Extended Attachment) driver version: %d.%d%s" + "\ndeveloped in co-operation with DPT\n" + "(c) 1993-96 Michael Neuffer, mike@i-Connect.Net\n", + VER_MAJOR, VER_MINOR, VER_SUB); + printk("Registered HBAs:"); + printk("\nHBA no. Boardtype Revis EATA Bus BaseIO IRQ" + " DMA Ch ID Pr QS S/G IS\n"); + for (i = 1; i <= registered_HBAs; i++) { + printk("scsi%-2d: %.12s v%s 2.0%c %s %#.4x %2d", + HBA_ptr->host_no, SD(HBA_ptr)->name, SD(HBA_ptr)->revision, + SD(HBA_ptr)->EATA_revision, (SD(HBA_ptr)->bustype == 'P')? + "PCI ":(SD(HBA_ptr)->bustype == 'E')?"EISA":"ISA ", + (u32) HBA_ptr->base, HBA_ptr->irq); + if(HBA_ptr->dma_channel != BUSMASTER) + printk(" %2x ", HBA_ptr->dma_channel); + else + printk(" %s", "BMST"); + printk(" %d %d %c %3d %3d %c\n", + SD(HBA_ptr)->channel+1, HBA_ptr->this_id, + (SD(HBA_ptr)->primary == TRUE)?'Y':'N', + HBA_ptr->can_queue, HBA_ptr->sg_tablesize, + (SD(HBA_ptr)->immediate_support == TRUE)?'Y':'N'); + HBA_ptr = SD(HBA_ptr)->next; + } + } else { + scsi_init_free((void *)status, 512); + } + + scsi_init_free((void *)dma_scratch - 4, 1024); + + DBG(DPT_DEBUG, DELAY(12)); + + return(registered_HBAs); +} + +#ifdef MODULE +/* Eventually this will go into an include file, but this will be later */ +Scsi_Host_Template driver_template = EATA_DMA; +#include "scsi_module.c" +#endif + +/* + * Overrides for Emacs so that we almost follow Linus's tabbing style. + * Emacs will notice this stuff at the end of the file and automatically + * adjust the settings for this buffer only. This must remain at the end + * of the file. + * --------------------------------------------------------------------------- + * Local variables: + * c-indent-level: 4 + * c-brace-imaginary-offset: 0 + * c-brace-offset: -4 + * c-argdecl-indent: 4 + * c-label-offset: -4 + * c-continued-statement-offset: 4 + * c-continued-brace-offset: 0 + * tab-width: 8 + * End: + */ diff --git a/linux/dev/drivers/scsi/g_NCR5380.c b/linux/dev/drivers/scsi/g_NCR5380.c new file mode 100644 index 0000000..687dd36 --- /dev/null +++ b/linux/dev/drivers/scsi/g_NCR5380.c @@ -0,0 +1,735 @@ +/* + * Generic Generic NCR5380 driver + * + * Copyright 1993, Drew Eckhardt + * Visionary Computing + * (Unix and Linux consulting and custom programming) + * drew@colorado.edu + * +1 (303) 440-4894 + * + * NCR53C400 extensions (c) 1994,1995,1996, Kevin Lentin + * K.Lentin@cs.monash.edu.au + * + * ALPHA RELEASE 1. + * + * For more information, please consult + * + * NCR 5380 Family + * SCSI Protocol Controller + * Databook + * + * NCR Microelectronics + * 1635 Aeroplaza Drive + * Colorado Springs, CO 80916 + * 1+ (719) 578-3400 + * 1+ (800) 334-5454 + */ + +/* + * TODO : flesh out DMA support, find some one actually using this (I have + * a memory mapped Trantor board that works fine) + */ + +/* + * Options : + * + * PARITY - enable parity checking. Not supported. + * + * SCSI2 - enable support for SCSI-II tagged queueing. Untested. + * + * USLEEP - enable support for devices that don't disconnect. Untested. + * + * The card is detected and initialized in one of several ways : + * 1. With command line overrides - NCR5380=port,irq may be + * used on the LILO command line to override the defaults. + * + * 2. With the GENERIC_NCR5380_OVERRIDE compile time define. This is + * specified as an array of address, irq, dma, board tuples. Ie, for + * one board at 0x350, IRQ5, no dma, I could say + * -DGENERIC_NCR5380_OVERRIDE={{0xcc000, 5, DMA_NONE, BOARD_NCR5380}} + * + * -1 should be specified for no or DMA interrupt, -2 to autoprobe for an + * IRQ line if overridden on the command line. + * + * 3. When included as a module, with arguments passed on the command line: + * ncr_irq=xx the interrupt + * ncr_addr=xx the port or base address (for port or memory + * mapped, resp.) + * ncr_dma=xx the DMA + * ncr_5380=1 to set up for a NCR5380 board + * ncr_53c400=1 to set up for a NCR53C400 board + * e.g. + * modprobe g_NCR5380 ncr_irq=5 ncr_addr=0x350 ncr_5380=1 + * for a port mapped NCR5380 board or + * modprobe g_NCR5380 ncr_irq=255 ncr_addr=0xc8000 ncr_53c400=1 + * for a memory mapped NCR53C400 board with interrupts disabled. + * + * 255 should be specified for no or DMA interrupt, 254 to autoprobe for an + * IRQ line if overridden on the command line. + * + */ + +#ifdef MACH +#define GENERIC_NCR5380_OVERRIDE {{(NCR5380_map_type)0x350,5,0,BOARD_NCR53C400}}; +#define CONFIG_SCSI_GENERIC_NCR53C400 +#define CONFIG_SCSI_G_NCR5380_MEM +#endif + +#define AUTOPROBE_IRQ +#define AUTOSENSE + +#include <linux/config.h> + +#ifdef CONFIG_SCSI_GENERIC_NCR53C400 +#define NCR53C400_PSEUDO_DMA 1 +#define PSEUDO_DMA +#define NCR53C400 +#define NCR5380_STATS +#undef NCR5380_STAT_LIMIT +#endif +#if defined(CONFIG_SCSI_G_NCR5380_PORT) && defined(CONFIG_SCSI_G_NCR5380_MEM) +#error You can not configure the Generic NCR 5380 SCSI Driver for memory mapped I/O and port mapped I/O at the same time (yet) +#endif +#if !defined(CONFIG_SCSI_G_NCR5380_PORT) && !defined(CONFIG_SCSI_G_NCR5380_MEM) +#error You must configure the Generic NCR 5380 SCSI Driver for one of memory mapped I/O and port mapped I/O. +#endif + +#include <asm/system.h> +#include <asm/io.h> +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/blk.h> +#include "scsi.h" +#include "hosts.h" +#include "g_NCR5380.h" +#include "NCR5380.h" +#include "constants.h" +#include "sd.h" +#include<linux/stat.h> + +struct proc_dir_entry proc_scsi_g_ncr5380 = { + PROC_SCSI_GENERIC_NCR5380, 9, "g_NCR5380", + S_IFDIR | S_IRUGO | S_IXUGO, 2 +}; + +#define NCR_NOT_SET 0 +static int ncr_irq=NCR_NOT_SET; +static int ncr_dma=NCR_NOT_SET; +static int ncr_addr=NCR_NOT_SET; +static int ncr_5380=NCR_NOT_SET; +static int ncr_53c400=NCR_NOT_SET; + +static struct override { + NCR5380_implementation_fields; + int irq; + int dma; + int board; /* Use NCR53c400, Ricoh, etc. extensions ? */ +} overrides +#ifdef GENERIC_NCR5380_OVERRIDE + [] = GENERIC_NCR5380_OVERRIDE +#else + [1] = {{0,},}; +#endif + +#define NO_OVERRIDES (sizeof(overrides) / sizeof(struct override)) + +/* + * Function : static internal_setup(int board, char *str, int *ints) + * + * Purpose : LILO command line initialization of the overrides array, + * + * Inputs : board - either BOARD_NCR5380 for a normal NCR5380 board, + * or BOARD_NCR53C400 for a NCR53C400 board. str - unused, ints - + * array of integer parameters with ints[0] equal to the number of ints. + * + */ + +static void internal_setup(int board, char *str, int *ints) { + static int commandline_current = 0; + switch (board) { + case BOARD_NCR5380: + if (ints[0] != 2 && ints[0] != 3) { + printk("generic_NCR5380_setup : usage ncr5380=" STRVAL(NCR5380_map_name) ",irq,dma\n"); + return; + } + case BOARD_NCR53C400: + if (ints[0] != 2) { + printk("generic_NCR53C400_setup : usage ncr53c400=" STRVAL(NCR5380_map_name) ",irq\n"); + return; + } + } + + if (commandline_current < NO_OVERRIDES) { + overrides[commandline_current].NCR5380_map_name = (NCR5380_map_type)ints[1]; + overrides[commandline_current].irq = ints[2]; + if (ints[0] == 3) + overrides[commandline_current].dma = ints[3]; + else + overrides[commandline_current].dma = DMA_NONE; + overrides[commandline_current].board = board; + ++commandline_current; + } +} + +/* + * Function : generic_NCR5380_setup (char *str, int *ints) + * + * Purpose : LILO command line initialization of the overrides array, + * + * Inputs : str - unused, ints - array of integer parameters with ints[0] + * equal to the number of ints. + */ + +void generic_NCR5380_setup (char *str, int *ints) { + internal_setup (BOARD_NCR5380, str, ints); +} + +/* + * Function : generic_NCR53C400_setup (char *str, int *ints) + * + * Purpose : LILO command line initialization of the overrides array, + * + * Inputs : str - unused, ints - array of integer parameters with ints[0] + * equal to the number of ints. + */ + +void generic_NCR53C400_setup (char *str, int *ints) { + internal_setup (BOARD_NCR53C400, str, ints); +} + +/* + * Function : int generic_NCR5380_detect(Scsi_Host_Template * tpnt) + * + * Purpose : initializes generic NCR5380 driver based on the + * command line / compile time port and irq definitions. + * + * Inputs : tpnt - template for this SCSI adapter. + * + * Returns : 1 if a host adapter was found, 0 if not. + * + */ + +int generic_NCR5380_detect(Scsi_Host_Template * tpnt) { + static int current_override = 0; + int count; + int flags = 0; + struct Scsi_Host *instance; + + if (ncr_irq != NCR_NOT_SET) + overrides[0].irq=ncr_irq; + if (ncr_dma != NCR_NOT_SET) + overrides[0].dma=ncr_dma; + if (ncr_addr != NCR_NOT_SET) + overrides[0].NCR5380_map_name=(NCR5380_map_type)ncr_addr; + if (ncr_5380 != NCR_NOT_SET) + overrides[0].board=BOARD_NCR5380; + else if (ncr_53c400 != NCR_NOT_SET) + overrides[0].board=BOARD_NCR53C400; + + tpnt->proc_dir = &proc_scsi_g_ncr5380; + + for (count = 0; current_override < NO_OVERRIDES; ++current_override) { + if (!(overrides[current_override].NCR5380_map_name)) + continue; + + switch (overrides[current_override].board) { + case BOARD_NCR5380: + flags = FLAG_NO_PSEUDO_DMA; + break; + case BOARD_NCR53C400: + flags = FLAG_NCR53C400; + break; + } + + instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata)); + instance->NCR5380_instance_name = overrides[current_override].NCR5380_map_name; + + NCR5380_init(instance, flags); + + if (overrides[current_override].irq != IRQ_AUTO) + instance->irq = overrides[current_override].irq; + else + instance->irq = NCR5380_probe_irq(instance, 0xffff); + + if (instance->irq != IRQ_NONE) + if (request_irq(instance->irq, generic_NCR5380_intr, SA_INTERRUPT, "NCR5380", NULL)) { + printk("scsi%d : IRQ%d not free, interrupts disabled\n", + instance->host_no, instance->irq); + instance->irq = IRQ_NONE; + } + + if (instance->irq == IRQ_NONE) { + printk("scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no); + printk("scsi%d : please jumper the board for a free IRQ.\n", instance->host_no); + } + + printk("scsi%d : at " STRVAL(NCR5380_map_name) " 0x%x", instance->host_no, (unsigned int)instance->NCR5380_instance_name); + if (instance->irq == IRQ_NONE) + printk (" interrupts disabled"); + else + printk (" irq %d", instance->irq); + printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d", + CAN_QUEUE, CMD_PER_LUN, GENERIC_NCR5380_PUBLIC_RELEASE); + NCR5380_print_options(instance); + printk("\n"); + + ++current_override; + ++count; + } + return count; +} + +const char * generic_NCR5380_info (struct Scsi_Host* host) { + static const char string[]="Generic NCR5380/53C400 Driver"; + return string; +} + +int generic_NCR5380_release_resources(struct Scsi_Host * instance) +{ + NCR5380_local_declare(); + + NCR5380_setup(instance); + + if (instance->irq != IRQ_NONE) + free_irq(instance->irq, NULL); + + return 0; +} + +#ifdef BIOSPARAM +/* + * Function : int generic_NCR5380_biosparam(Disk * disk, kdev_t dev, int *ip) + * + * Purpose : Generates a BIOS / DOS compatible H-C-S mapping for + * the specified device / size. + * + * Inputs : size = size of device in sectors (512 bytes), dev = block device + * major / minor, ip[] = {heads, sectors, cylinders} + * + * Returns : always 0 (success), initializes ip + * + */ + +/* + * XXX Most SCSI boards use this mapping, I could be incorrect. Some one + * using hard disks on a trantor should verify that this mapping corresponds + * to that used by the BIOS / ASPI driver by running the linux fdisk program + * and matching the H_C_S coordinates to what DOS uses. + */ + +int generic_NCR5380_biosparam(Disk * disk, kdev_t dev, int *ip) +{ + int size = disk->capacity; + ip[0] = 64; + ip[1] = 32; + ip[2] = size >> 11; + return 0; +} +#endif + +#if NCR53C400_PSEUDO_DMA +static inline int NCR5380_pread (struct Scsi_Host *instance, unsigned char *dst, int len) +{ + int blocks = len / 128; + int start = 0; + int i; + int bl; + NCR5380_local_declare(); + + NCR5380_setup(instance); + +#if (NDEBUG & NDEBUG_C400_PREAD) + printk("53C400r: About to read %d blocks for %d bytes\n", blocks, len); +#endif + + NCR5380_write(C400_CONTROL_STATUS_REG, CSR_BASE | CSR_TRANS_DIR); + NCR5380_write(C400_BLOCK_COUNTER_REG, blocks); + while (1) { + +#if (NDEBUG & NDEBUG_C400_PREAD) + printk("53C400r: %d blocks left\n", blocks); +#endif + + if ((bl=NCR5380_read(C400_BLOCK_COUNTER_REG)) == 0) { +#if (NDEBUG & NDEBUG_C400_PREAD) + if (blocks) + printk("53C400r: blocks still == %d\n", blocks); + else + printk("53C400r: Exiting loop\n"); +#endif + break; + } + +#if 1 + if (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_GATED_53C80_IRQ) { + printk("53C400r: Got 53C80_IRQ start=%d, blocks=%d\n", start, blocks); + return -1; + } +#endif + +#if (NDEBUG & NDEBUG_C400_PREAD) + printk("53C400r: Waiting for buffer, bl=%d\n", bl); +#endif + + while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_HOST_BUF_NOT_RDY) + ; +#if (NDEBUG & NDEBUG_C400_PREAD) + printk("53C400r: Transferring 128 bytes\n"); +#endif + +#ifdef CONFIG_SCSI_G_NCR5380_PORT + for (i=0; i<128; i++) + dst[start+i] = NCR5380_read(C400_HOST_BUFFER); +#else + /* implies CONFIG_SCSI_G_NCR5380_MEM */ + memmove(dst+start,NCR53C400_host_buffer+NCR5380_map_name,128); +#endif + start+=128; + blocks--; + } + + if (blocks) { +#if (NDEBUG & NDEBUG_C400_PREAD) + printk("53C400r: EXTRA: Waiting for buffer\n"); +#endif + while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_HOST_BUF_NOT_RDY) + ; + +#if (NDEBUG & NDEBUG_C400_PREAD) + printk("53C400r: Transferring EXTRA 128 bytes\n"); +#endif +#ifdef CONFIG_SCSI_G_NCR5380_PORT + for (i=0; i<128; i++) + dst[start+i] = NCR5380_read(C400_HOST_BUFFER); +#else + /* implies CONFIG_SCSI_G_NCR5380_MEM */ + memmove(dst+start,NCR53C400_host_buffer+NCR5380_map_name,128); +#endif + start+=128; + blocks--; + } +#if (NDEBUG & NDEBUG_C400_PREAD) + else + printk("53C400r: No EXTRA required\n"); +#endif + +#if (NDEBUG & NDEBUG_C400_PREAD) + printk("53C400r: Final values: blocks=%d start=%d\n", blocks, start); +#endif + + if (!(NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_GATED_53C80_IRQ)) + printk("53C400r: no 53C80 gated irq after transfer"); +#if (NDEBUG & NDEBUG_C400_PREAD) + else + printk("53C400r: Got 53C80 interrupt and tried to clear it\n"); +#endif + +/* DON'T DO THIS - THEY NEVER ARRIVE! + printk("53C400r: Waiting for 53C80 registers\n"); + while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_53C80_REG) + ; +*/ + + if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_END_DMA_TRANSFER)) + printk("53C400r: no end dma signal\n"); +#if (NDEBUG & NDEBUG_C400_PREAD) + else + printk("53C400r: end dma as expected\n"); +#endif + + NCR5380_write(MODE_REG, MR_BASE); + NCR5380_read(RESET_PARITY_INTERRUPT_REG); + return 0; +} + +static inline int NCR5380_pwrite (struct Scsi_Host *instance, unsigned char *src, int len) +{ + int blocks = len / 128; + int start = 0; + int i; + int bl; + NCR5380_local_declare(); + + NCR5380_setup(instance); + +#if (NDEBUG & NDEBUG_C400_PWRITE) + printk("53C400w: About to write %d blocks for %d bytes\n", blocks, len); +#endif + + NCR5380_write(C400_CONTROL_STATUS_REG, CSR_BASE); + NCR5380_write(C400_BLOCK_COUNTER_REG, blocks); + while (1) { + if (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_GATED_53C80_IRQ) { + printk("53C400w: Got 53C80_IRQ start=%d, blocks=%d\n", start, blocks); + return -1; + } + + if ((bl=NCR5380_read(C400_BLOCK_COUNTER_REG)) == 0) { +#if (NDEBUG & NDEBUG_C400_PWRITE) + if (blocks) + printk("53C400w: exiting loop, blocks still == %d\n", blocks); + else + printk("53C400w: exiting loop\n"); +#endif + break; + } + +#if (NDEBUG & NDEBUG_C400_PWRITE) + printk("53C400w: %d blocks left\n", blocks); + + printk("53C400w: waiting for buffer, bl=%d\n", bl); +#endif + while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_HOST_BUF_NOT_RDY) + ; + +#if (NDEBUG & NDEBUG_C400_PWRITE) + printk("53C400w: transferring 128 bytes\n"); +#endif +#ifdef CONFIG_SCSI_G_NCR5380_PORT + for (i=0; i<128; i++) + NCR5380_write(C400_HOST_BUFFER, src[start+i]); +#else + /* implies CONFIG_SCSI_G_NCR5380_MEM */ + memmove(NCR53C400_host_buffer+NCR5380_map_name,src+start,128); +#endif + start+=128; + blocks--; + } + if (blocks) { +#if (NDEBUG & NDEBUG_C400_PWRITE) + printk("53C400w: EXTRA waiting for buffer\n"); +#endif + while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_HOST_BUF_NOT_RDY) + ; + +#if (NDEBUG & NDEBUG_C400_PWRITE) + printk("53C400w: transferring EXTRA 128 bytes\n"); +#endif +#ifdef CONFIG_SCSI_G_NCR5380_PORT + for (i=0; i<128; i++) + NCR5380_write(C400_HOST_BUFFER, src[start+i]); +#else + /* implies CONFIG_SCSI_G_NCR5380_MEM */ + memmove(NCR53C400_host_buffer+NCR5380_map_name,src+start,128); +#endif + start+=128; + blocks--; + } +#if (NDEBUG & NDEBUG_C400_PWRITE) + else + printk("53C400w: No EXTRA required\n"); +#endif + +#if (NDEBUG & NDEBUG_C400_PWRITE) + printk("53C400w: Final values: blocks=%d start=%d\n", blocks, start); +#endif + +#if 0 + printk("53C400w: waiting for registers to be available\n"); + THEY NEVER DO! + while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_53C80_REG) + ; + printk("53C400w: Got em\n"); +#endif + + /* Let's wait for this instead - could be ugly */ + /* All documentation says to check for this. Maybe my hardware is too + * fast. Waiting for it seems to work fine! KLL + */ + while (!(i = NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_GATED_53C80_IRQ)) + ; + + /* + * I know. i is certainly != 0 here but the loop is new. See previous + * comment. + */ + if (i) { +#if (NDEBUG & NDEBUG_C400_PWRITE) + printk("53C400w: got 53C80 gated irq (last block)\n"); +#endif + if (!((i=NCR5380_read(BUS_AND_STATUS_REG)) & BASR_END_DMA_TRANSFER)) + printk("53C400w: No END OF DMA bit - WHOOPS! BASR=%0x\n",i); +#if (NDEBUG & NDEBUG_C400_PWRITE) + else + printk("53C400w: Got END OF DMA\n"); +#endif + } + else + printk("53C400w: no 53C80 gated irq after transfer (last block)\n"); + +#if 0 + if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_END_DMA_TRANSFER)) { + printk("53C400w: no end dma signal\n"); + } +#endif + +#if (NDEBUG & NDEBUG_C400_PWRITE) + printk("53C400w: waiting for last byte...\n"); +#endif + while (!(NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT)) + ; + +#if (NDEBUG & NDEBUG_C400_PWRITE) + printk("53C400w: got last byte.\n"); + printk("53C400w: pwrite exiting with status 0, whoopee!\n"); +#endif + return 0; +} +#endif /* PSEUDO_DMA */ + +#include "NCR5380.c" + +#define PRINTP(x) len += sprintf(buffer+len, x) +#define ANDP , + +static int sprint_opcode(char* buffer, int len, int opcode) { + int start = len; + PRINTP("0x%02x " ANDP opcode); + return len-start; +} + +static int sprint_command (char* buffer, int len, unsigned char *command) { + int i,s,start=len; + len += sprint_opcode(buffer, len, command[0]); + for ( i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i) + PRINTP("%02x " ANDP command[i]); + PRINTP("\n"); + return len-start; +} + +static int sprint_Scsi_Cmnd (char* buffer, int len, Scsi_Cmnd *cmd) { + int start = len; + PRINTP("host number %d destination target %d, lun %d\n" ANDP + cmd->host->host_no ANDP + cmd->target ANDP + cmd->lun); + PRINTP(" command = "); + len += sprint_command (buffer, len, cmd->cmnd); + return len-start; +} + +int generic_NCR5380_proc_info(char* buffer, char** start, off_t offset, int length, int hostno, int inout) +{ + int len = 0; + NCR5380_local_declare(); + unsigned char status; + int i; + struct Scsi_Host *scsi_ptr; + Scsi_Cmnd *ptr; + Scsi_Device *dev; + struct NCR5380_hostdata *hostdata; + + cli(); + + for (scsi_ptr = first_instance; scsi_ptr; scsi_ptr=scsi_ptr->next) + if (scsi_ptr->host_no == hostno) + break; + NCR5380_setup(scsi_ptr); + hostdata = (struct NCR5380_hostdata *)scsi_ptr->hostdata; + + PRINTP("SCSI host number %d : %s\n" ANDP scsi_ptr->host_no ANDP scsi_ptr->hostt->name); + PRINTP("Generic NCR5380 driver version %d\n" ANDP GENERIC_NCR5380_PUBLIC_RELEASE); + PRINTP("NCR5380 core version %d\n" ANDP NCR5380_PUBLIC_RELEASE); +#ifdef NCR53C400 + PRINTP("NCR53C400 extension version %d\n" ANDP NCR53C400_PUBLIC_RELEASE); + PRINTP("NCR53C400 card%s detected\n" ANDP (((struct NCR5380_hostdata *)scsi_ptr->hostdata)->flags & FLAG_NCR53C400)?"":" not"); +# if NCR53C400_PSEUDO_DMA + PRINTP("NCR53C400 pseudo DMA used\n"); +# endif +#else + PRINTP("NO NCR53C400 driver extensions\n"); +#endif + PRINTP("Using %s mapping at %s 0x%x, " ANDP STRVAL(NCR5380_map_config) ANDP STRVAL(NCR5380_map_name) ANDP scsi_ptr->NCR5380_instance_name); + if (scsi_ptr->irq == IRQ_NONE) + PRINTP("no interrupt\n"); + else + PRINTP("on interrupt %d\n" ANDP scsi_ptr->irq); + +#ifdef NCR5380_STATS + if (hostdata->connected || hostdata->issue_queue || hostdata->disconnected_queue) + PRINTP("There are commands pending, transfer rates may be crud\n"); + if (hostdata->pendingr) + PRINTP(" %d pending reads" ANDP hostdata->pendingr); + if (hostdata->pendingw) + PRINTP(" %d pending writes" ANDP hostdata->pendingw); + if (hostdata->pendingr || hostdata->pendingw) + PRINTP("\n"); + for (dev = scsi_devices; dev; dev=dev->next) { + if (dev->host == scsi_ptr) { + unsigned long br = hostdata->bytes_read[dev->id]; + unsigned long bw = hostdata->bytes_write[dev->id]; + long tr = hostdata->time_read[dev->id] / HZ; + long tw = hostdata->time_write[dev->id] / HZ; + + PRINTP(" T:%d %s " ANDP dev->id ANDP (dev->type < MAX_SCSI_DEVICE_CODE) ? scsi_device_types[(int)dev->type] : "Unknown"); + for (i=0; i<8; i++) + if (dev->vendor[i] >= 0x20) + *(buffer+(len++)) = dev->vendor[i]; + *(buffer+(len++)) = ' '; + for (i=0; i<16; i++) + if (dev->model[i] >= 0x20) + *(buffer+(len++)) = dev->model[i]; + *(buffer+(len++)) = ' '; + for (i=0; i<4; i++) + if (dev->rev[i] >= 0x20) + *(buffer+(len++)) = dev->rev[i]; + *(buffer+(len++)) = ' '; + + PRINTP("\n%10ld kb read in %5ld secs" ANDP br/1024 ANDP tr); + if (tr) + PRINTP(" @ %5ld bps" ANDP br / tr); + + PRINTP("\n%10ld kb written in %5ld secs" ANDP bw/1024 ANDP tw); + if (tw) + PRINTP(" @ %5ld bps" ANDP bw / tw); + PRINTP("\n"); + } + } +#endif + + status = NCR5380_read(STATUS_REG); + if (!(status & SR_REQ)) + PRINTP("REQ not asserted, phase unknown.\n"); + else { + for (i = 0; (phases[i].value != PHASE_UNKNOWN) && + (phases[i].value != (status & PHASE_MASK)); ++i) + ; + PRINTP("Phase %s\n" ANDP phases[i].name); + } + + if (!hostdata->connected) { + PRINTP("No currently connected command\n"); + } else { + len += sprint_Scsi_Cmnd (buffer, len, (Scsi_Cmnd *) hostdata->connected); + } + + PRINTP("issue_queue\n"); + + for (ptr = (Scsi_Cmnd *) hostdata->issue_queue; ptr; + ptr = (Scsi_Cmnd *) ptr->host_scribble) + len += sprint_Scsi_Cmnd (buffer, len, ptr); + + PRINTP("disconnected_queue\n"); + + for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr; + ptr = (Scsi_Cmnd *) ptr->host_scribble) + len += sprint_Scsi_Cmnd (buffer, len, ptr); + + *start = buffer + offset; + len -= offset; + if (len > length) + len = length; + sti(); + return len; +} + +#undef PRINTP +#undef ANDP + +#ifdef MODULE +/* Eventually this will go into an include file, but this will be later */ +Scsi_Host_Template driver_template = GENERIC_NCR5380; + +#include <linux/module.h> +#include "scsi_module.c" +#endif diff --git a/linux/dev/glue/block.c b/linux/dev/glue/block.c new file mode 100644 index 0000000..a830781 --- /dev/null +++ b/linux/dev/glue/block.c @@ -0,0 +1,1770 @@ +/* + * Linux block driver support. + * + * Copyright (C) 1996 The University of Utah and the Computer Systems + * Laboratory at the University of Utah (CSL) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Author: Shantanu Goel, University of Utah CSL + */ + +/* + * linux/drivers/block/ll_rw_blk.c + * + * Copyright (C) 1991, 1992 Linus Torvalds + * Copyright (C) 1994, Karl Keyte: Added support for disk statistics + */ + +/* + * linux/fs/block_dev.c + * + * Copyright (C) 1991, 1992 Linus Torvalds + */ + +/* + * linux/fs/buffer.c + * + * Copyright (C) 1991, 1992 Linus Torvalds + */ + +#include <sys/types.h> +#include <machine/spl.h> +#include <mach/mach_types.h> +#include <mach/kern_return.h> +#include <mach/mig_errors.h> +#include <mach/port.h> +#include <mach/vm_param.h> +#include <mach/notify.h> + +#include <kern/kalloc.h> +#include <kern/list.h> + +#include <ipc/ipc_port.h> +#include <ipc/ipc_space.h> + +#include <vm/vm_map.h> +#include <vm/vm_kern.h> +#include <vm/vm_page.h> + +#include <device/device_types.h> +#include <device/device_port.h> +#include <device/disk_status.h> +#include <device/device_reply.user.h> +#include <device/device_emul.h> +#include <device/ds_routines.h> + +/* TODO. This should be fixed to not be i386 specific. */ +#include <i386at/disk.h> + +#define MACH_INCLUDE +#include <linux/fs.h> +#include <linux/blk.h> +#include <linux/string.h> +#include <linux/errno.h> +#include <linux/fcntl.h> +#include <linux/major.h> +#include <linux/kdev_t.h> +#include <linux/delay.h> +#include <linux/malloc.h> +#include <linux/hdreg.h> +#include <asm/io.h> + +#include <linux/dev/glue/glue.h> + +#ifdef PAE +#define VM_PAGE_LINUX VM_PAGE_DMA32 +#else +#define VM_PAGE_LINUX VM_PAGE_HIGHMEM +#endif + +/* This task queue is not used in Mach: just for fixing undefined symbols. */ +DECLARE_TASK_QUEUE (tq_disk); + +/* Location of VTOC in units for sectors (512 bytes). */ +#define PDLOCATION 29 + +/* Linux kernel variables. */ + +/* Temporary data allocated on the stack. */ +struct temp_data +{ + struct inode inode; + struct file file; + struct request req; + struct list pages; +}; + +/* One of these exists for each + driver associated with a major number. */ +struct device_struct +{ + const char *name; /* device name */ + struct file_operations *fops; /* operations vector */ + int busy:1; /* driver is being opened/closed */ + int want:1; /* someone wants to open/close driver */ + struct gendisk *gd; /* DOS partition information */ + int default_slice; /* what slice to use when none is given */ + struct disklabel **labels; /* disklabels for each DOS partition */ +}; + +/* An entry in the Mach name to Linux major number conversion table. */ +struct name_map +{ + const char *name; /* Mach name for device */ + unsigned major; /* Linux major number */ + unsigned unit; /* Linux unit number */ + int read_only; /* 1 if device is read only */ +}; + +/* Driver operation table. */ +static struct device_struct blkdevs[MAX_BLKDEV]; + +/* Driver request function table. */ +struct blk_dev_struct blk_dev[MAX_BLKDEV] = +{ + { NULL, NULL }, /* 0 no_dev */ + { NULL, NULL }, /* 1 dev mem */ + { NULL, NULL }, /* 2 dev fd */ + { NULL, NULL }, /* 3 dev ide0 or hd */ + { NULL, NULL }, /* 4 dev ttyx */ + { NULL, NULL }, /* 5 dev tty */ + { NULL, NULL }, /* 6 dev lp */ + { NULL, NULL }, /* 7 dev pipes */ + { NULL, NULL }, /* 8 dev sd */ + { NULL, NULL }, /* 9 dev st */ + { NULL, NULL }, /* 10 */ + { NULL, NULL }, /* 11 */ + { NULL, NULL }, /* 12 */ + { NULL, NULL }, /* 13 */ + { NULL, NULL }, /* 14 */ + { NULL, NULL }, /* 15 */ + { NULL, NULL }, /* 16 */ + { NULL, NULL }, /* 17 */ + { NULL, NULL }, /* 18 */ + { NULL, NULL }, /* 19 */ + { NULL, NULL }, /* 20 */ + { NULL, NULL }, /* 21 */ + { NULL, NULL } /* 22 dev ide1 */ +}; + +/* + * blk_size contains the size of all block-devices in units of 1024 byte + * sectors: + * + * blk_size[MAJOR][MINOR] + * + * if (!blk_size[MAJOR]) then no minor size checking is done. + */ +int *blk_size[MAX_BLKDEV] = { NULL, NULL, }; + +/* + * blksize_size contains the size of all block-devices: + * + * blksize_size[MAJOR][MINOR] + * + * if (!blksize_size[MAJOR]) then 1024 bytes is assumed. + */ +int *blksize_size[MAX_BLKDEV] = { NULL, NULL, }; + +/* + * hardsect_size contains the size of the hardware sector of a device. + * + * hardsect_size[MAJOR][MINOR] + * + * if (!hardsect_size[MAJOR]) + * then 512 bytes is assumed. + * else + * sector_size is hardsect_size[MAJOR][MINOR] + * This is currently set by some scsi device and read by the msdos fs driver + * This might be a some uses later. + */ +int *hardsect_size[MAX_BLKDEV] = { NULL, NULL, }; + +/* This specifies how many sectors to read ahead on the disk. + This is unused in Mach. It is here to make drivers compile. */ +int read_ahead[MAX_BLKDEV] = {0, }; + +/* Use to wait on when there are no free requests. + This is unused in Mach. It is here to make drivers compile. */ +struct wait_queue *wait_for_request = NULL; + +/* Initialize block drivers. */ +int +blk_dev_init () +{ +#ifdef CONFIG_BLK_DEV_IDE + extern char *kernel_cmdline; + if (strncmp(kernel_cmdline, "noide", 5) && + !strstr(kernel_cmdline, " noide")) + ide_init (); +#endif +#ifdef CONFIG_BLK_DEV_FD + floppy_init (); +#else + outb_p (0xc, 0x3f2); +#endif + return 0; +} + +/* Return 1 if major number MAJOR corresponds to a disk device. */ +static inline int +disk_major (int major) +{ + return (major == IDE0_MAJOR + || major == IDE1_MAJOR + || major == IDE2_MAJOR + || major == IDE3_MAJOR + || major == SCSI_DISK_MAJOR); +} + +/* Linux kernel block support routines. */ + +/* Register a driver for major number MAJOR, + with name NAME, and operations vector FOPS. */ +int +register_blkdev (unsigned major, const char *name, + struct file_operations *fops) +{ + if (major == 0) + { + for (major = MAX_BLKDEV - 1; major > 0; major--) + if (blkdevs[major].fops == NULL) + goto out; + return -EBUSY; + } + if (major >= MAX_BLKDEV) + return -EINVAL; + if (blkdevs[major].fops && blkdevs[major].fops != fops) + return -EBUSY; + +out: + blkdevs[major].name = name; + blkdevs[major].fops = fops; + blkdevs[major].busy = 0; + blkdevs[major].want = 0; + blkdevs[major].gd = NULL; + blkdevs[major].default_slice = 0; + blkdevs[major].labels = NULL; + return 0; +} + +/* Unregister the driver associated with + major number MAJOR and having the name NAME. */ +int +unregister_blkdev (unsigned major, const char *name) +{ + if (major >= MAX_BLKDEV) + return -EINVAL; + if (! blkdevs[major].fops || strcmp (blkdevs[major].name, name)) + return -EINVAL; + blkdevs[major].fops = NULL; + if (blkdevs[major].labels) + { + assert (blkdevs[major].gd); + kfree ((vm_offset_t) blkdevs[major].labels, + (sizeof (struct disklabel *) + * blkdevs[major].gd->max_p * blkdevs[major].gd->max_nr)); + } + return 0; +} + +void +set_blocksize (kdev_t dev, int size) +{ + if (! blksize_size[MAJOR (dev)]) + return; + + switch (size) + { + case 512: + case 1024: + case 2048: + case 4096: + break; + default: + panic ("Invalid blocksize passed to set_blocksize"); + break; + } + blksize_size[MAJOR (dev)][MINOR (dev)] = size; +} + +/* Allocate a buffer SIZE bytes long. */ +static void * +alloc_buffer (int size) +{ + vm_page_t m; + struct temp_data *d; + + assert (size <= PAGE_SIZE); + + if (! linux_auto_config) + { + while ((m = vm_page_grab (VM_PAGE_DMA32)) == 0) + VM_PAGE_WAIT (0); + d = current_thread ()->pcb->data; + assert (d); + list_insert_tail (&d->pages, &m->node); + return (void *) phystokv(m->phys_addr); + } + return (void *) __get_free_pages (GFP_KERNEL, 0, ~0UL); +} + +/* Free buffer P which is SIZE bytes long. */ +static void +free_buffer (void *p, int size) +{ + struct temp_data *d; + vm_page_t m, tmp; + + assert (size <= PAGE_SIZE); + + if (! linux_auto_config) + { + d = current_thread ()->pcb->data; + assert (d); + list_for_each_entry_safe (&d->pages, m, tmp, node) + { + if (phystokv(m->phys_addr) == (vm_offset_t) p) + { + list_remove (&m->node); + VM_PAGE_FREE (m); + return; + } + } + panic ("free_buffer"); + } + free_pages ((unsigned long) p, 0); +} + +/* Allocate a buffer of SIZE bytes and + associate it with block number BLOCK of device DEV. */ +struct buffer_head * +getblk (kdev_t dev, int block, int size) +{ + struct buffer_head *bh; + + assert (size <= PAGE_SIZE); + + bh = (struct buffer_head *) kalloc (sizeof (struct buffer_head)); + if (bh) + { + memset (bh, 0, sizeof (struct buffer_head)); + bh->b_data = alloc_buffer (size); + if (! bh->b_data) + { + kfree ((vm_offset_t) bh, sizeof (struct buffer_head)); + return NULL; + } + bh->b_dev = dev; + bh->b_size = size; + bh->b_state = 1 << BH_Lock; + bh->b_blocknr = block; + } + return bh; +} + +/* Release buffer BH previously allocated by getblk. */ +void +__brelse (struct buffer_head *bh) +{ + free_buffer (bh->b_data, bh->b_size); + kfree ((vm_offset_t) bh, sizeof (*bh)); +} + +/* Allocate a buffer of SIZE bytes and fill it with data + from device DEV starting at block number BLOCK. */ +struct buffer_head * +bread (kdev_t dev, int block, int size) +{ + struct buffer_head *bh; + + bh = getblk (dev, block, size); + if (bh) + { + ll_rw_block (READ, 1, &bh, 0); + wait_on_buffer (bh); + if (! buffer_uptodate (bh)) + { + __brelse (bh); + return NULL; + } + } + return bh; +} + +/* Return the block size for device DEV in *BSIZE and + log2(block size) in *BSHIFT. */ +static void +get_block_size (kdev_t dev, int *bsize, int *bshift) +{ + int i; + + *bsize = BLOCK_SIZE; + if (blksize_size[MAJOR (dev)] + && blksize_size[MAJOR (dev)][MINOR (dev)]) + *bsize = blksize_size[MAJOR (dev)][MINOR (dev)]; + for (i = *bsize, *bshift = 0; i != 1; i >>= 1, (*bshift)++) + ; +} + +/* Enqueue request REQ on a driver's queue. */ +static inline void +enqueue_request (struct request *req) +{ + struct request *tmp; + struct blk_dev_struct *dev; + + dev = blk_dev + MAJOR (req->rq_dev); + cli (); + tmp = dev->current_request; + if (! tmp) + { + dev->current_request = req; + (*dev->request_fn) (); + sti (); + return; + } + while (tmp->next) + { + if ((IN_ORDER (tmp, req) || ! IN_ORDER (tmp, tmp->next)) + && IN_ORDER (req, tmp->next)) + break; + tmp = tmp->next; + } + req->next = tmp->next; + tmp->next = req; + if (scsi_blk_major (MAJOR (req->rq_dev))) + (*dev->request_fn) (); + sti (); +} + +int +check_rw_block (int nr, struct buffer_head **bh) +{ + int i, bshift, bsize; + get_block_size (bh[0]->b_dev, &bsize, &bshift); + loff_t sectorl = bh[0]->b_blocknr << (bshift - 9); + + for (i = 0; i < nr; i++) + { + sectorl += bh[i]->b_size >> 9; + unsigned long sector = sectorl; + if (sector != sectorl) + return -EOVERFLOW; + } + + return 0; +} + +/* Perform the I/O operation RW on the buffer list BH + containing NR buffers. */ +void +ll_rw_block (int rw, int nr, struct buffer_head **bh, int quiet) +{ + int i, bshift, bsize; + unsigned major; + struct request *r; + static struct request req; + + major = MAJOR (bh[0]->b_dev); + assert (major < MAX_BLKDEV); + + get_block_size (bh[0]->b_dev, &bsize, &bshift); + + if (! linux_auto_config) + { + assert (current_thread ()->pcb->data); + r = &((struct temp_data *) current_thread ()->pcb->data)->req; + } + else + r = &req; + + for (i = 0, r->nr_sectors = 0; i < nr - 1; i++) + { + r->nr_sectors += bh[i]->b_size >> 9; + bh[i]->b_reqnext = bh[i + 1]; + } + r->nr_sectors += bh[i]->b_size >> 9; + bh[i]->b_reqnext = NULL; + + r->rq_status = RQ_ACTIVE; + r->rq_dev = bh[0]->b_dev; + r->cmd = rw; + r->errors = 0; + r->quiet = quiet; + r->sector = bh[0]->b_blocknr << (bshift - 9); + r->current_nr_sectors = bh[0]->b_size >> 9; + r->buffer = bh[0]->b_data; + r->bh = bh[0]; + r->bhtail = bh[nr - 1]; + r->sem = NULL; + r->next = NULL; + + enqueue_request (r); +} + +#define BSIZE (1 << bshift) +#define BMASK (BSIZE - 1) + +/* Perform read/write operation RW on device DEV + starting at *off to/from buffer *BUF of size *RESID. + The device block size is given by BSHIFT. *OFF and + *RESID may be non-multiples of the block size. + *OFF, *BUF and *RESID are updated if the operation + completed successfully. */ +static int +rdwr_partial (int rw, kdev_t dev, loff_t *off, + char **buf, int *resid, int bshift) +{ + int c, err = 0, o; + long sect, nsect; + struct buffer_head bhead, *bh = &bhead; + struct gendisk *gd; + loff_t blkl; + + memset (bh, 0, sizeof (struct buffer_head)); + bh->b_state = 1 << BH_Lock; + bh->b_dev = dev; + blkl = *off >> bshift; + bh->b_blocknr = blkl; + if (bh->b_blocknr != blkl) + return -EOVERFLOW; + bh->b_size = BSIZE; + + /* Check if this device has non even number of blocks. */ + for (gd = gendisk_head, nsect = -1; gd; gd = gd->next) + if (gd->major == MAJOR (dev)) + { + nsect = gd->part[MINOR (dev)].nr_sects; + break; + } + if (nsect > 0) + { + loff_t sectl; + sectl = bh->b_blocknr << (bshift - 9); + sect = sectl; + assert ((nsect - sect) > 0); + if (nsect - sect < (BSIZE >> 9)) + bh->b_size = (nsect - sect) << 9; + } + bh->b_data = alloc_buffer (bh->b_size); + if (! bh->b_data) + return -ENOMEM; + err = check_rw_block (1, &bh); + if (err) + goto out; + ll_rw_block (READ, 1, &bh, 0); + wait_on_buffer (bh); + if (buffer_uptodate (bh)) + { + o = *off & BMASK; + c = bh->b_size - o; + if (c > *resid) + c = *resid; + if (rw == READ) + memcpy (*buf, bh->b_data + o, c); + else + { + memcpy (bh->b_data + o, *buf, c); + bh->b_state = (1 << BH_Dirty) | (1 << BH_Lock); + err = check_rw_block (1, &bh); + if (err) + goto out; + ll_rw_block (WRITE, 1, &bh, 0); + wait_on_buffer (bh); + if (! buffer_uptodate (bh)) + { + err = -EIO; + goto out; + } + } + *buf += c; + *resid -= c; + *off += c; + } + else + err = -EIO; +out: + free_buffer (bh->b_data, bh->b_size); + return err; +} + +#define BH_Bounce 16 +#define MAX_BUF 8 + +/* Perform read/write operation RW on device DEV + starting at *off to/from buffer *BUF of size *RESID. + The device block size is given by BSHIFT. *OFF and + *RESID must be multiples of the block size. + *OFF, *BUF and *RESID are updated if the operation + completed successfully. */ +static int +rdwr_full (int rw, kdev_t dev, loff_t *off, char **buf, int *resid, int bshift) +{ + int cc, err = 0, i, j, nb, nbuf; + loff_t blkl; + long blk, newblk; + struct buffer_head bhead[MAX_BUF], *bh, *bhp[MAX_BUF]; + phys_addr_t pa; + + assert ((*off & BMASK) == 0); + + nbuf = *resid >> bshift; + blkl = *off >> bshift; + blk = blkl; + if (blk != blkl) + return -EOVERFLOW; + for (i = nb = 0, bh = bhead; nb < nbuf; bh++) + { + memset (bh, 0, sizeof (*bh)); + bh->b_dev = dev; + bh->b_blocknr = blk; + set_bit (BH_Lock, &bh->b_state); + if (rw == WRITE) + set_bit (BH_Dirty, &bh->b_state); + cc = PAGE_SIZE - (((int) *buf + (nb << bshift)) & PAGE_MASK); + pa = pmap_extract (vm_map_pmap (device_io_map), + (((vm_offset_t) *buf) + (nb << bshift))); + if (cc >= BSIZE && (((int) *buf + (nb << bshift)) & 511) == 0 + && pa + cc <= VM_PAGE_DIRECTMAP_LIMIT) + cc &= ~BMASK; + else + { + cc = PAGE_SIZE; + set_bit (BH_Bounce, &bh->b_state); + } + if (cc > ((nbuf - nb) << bshift)) + cc = (nbuf - nb) << bshift; + if (! test_bit (BH_Bounce, &bh->b_state)) + bh->b_data = (char *) phystokv(pa); + else + { + bh->b_data = alloc_buffer (cc); + if (! bh->b_data) + { + err = -ENOMEM; + break; + } + if (rw == WRITE) + memcpy (bh->b_data, *buf + (nb << bshift), cc); + } + bh->b_size = cc; + bhp[i] = bh; + nb += cc >> bshift; + newblk = blk + (cc >> bshift); + if (newblk < blk) + { + err = -EOVERFLOW; + break; + } + blk = newblk; + if (++i == MAX_BUF) + break; + } + if (! err) + err = check_rw_block (i, bhp); + if (! err) + { + assert (i > 0); + ll_rw_block (rw, i, bhp, 0); + wait_on_buffer (bhp[i - 1]); + } + for (bh = bhead, cc = 0, j = 0; j < i; cc += bh->b_size, bh++, j++) + { + if (! err && buffer_uptodate (bh) + && rw == READ && test_bit (BH_Bounce, &bh->b_state)) + memcpy (*buf + cc, bh->b_data, bh->b_size); + else if (! err && ! buffer_uptodate (bh)) + err = -EIO; + if (test_bit (BH_Bounce, &bh->b_state)) + free_buffer (bh->b_data, bh->b_size); + } + if (! err) + { + *buf += cc; + *resid -= cc; + *off += cc; + } + return err; +} + +/* Perform read/write operation RW on device DEV + starting at *off to/from buffer BUF of size COUNT. + *OFF is updated if the operation completed successfully. */ +static int +do_rdwr (int rw, kdev_t dev, loff_t *off, char *buf, int count) +{ + int bsize, bshift, err = 0, resid = count; + + get_block_size (dev, &bsize, &bshift); + if (*off & BMASK) + err = rdwr_partial (rw, dev, off, &buf, &resid, bshift); + while (resid >= bsize && ! err) + err = rdwr_full (rw, dev, off, &buf, &resid, bshift); + if (! err && resid) + err = rdwr_partial (rw, dev, off, &buf, &resid, bshift); + return err ? err : count - resid; +} + +int +block_write (struct inode *inode, struct file *filp, + const char *buf, int count) +{ + return do_rdwr (WRITE, inode->i_rdev, &filp->f_pos, (char *) buf, count); +} + +int +block_read (struct inode *inode, struct file *filp, char *buf, int count) +{ + return do_rdwr (READ, inode->i_rdev, &filp->f_pos, buf, count); +} + +/* + * This routine checks whether a removable media has been changed, + * and invalidates all buffer-cache-entries in that case. This + * is a relatively slow routine, so we have to try to minimize using + * it. Thus it is called only upon a 'mount' or 'open'. This + * is the best way of combining speed and utility, I think. + * People changing diskettes in the middle of an operation deserve + * to loose :-) + */ +int +check_disk_change (kdev_t dev) +{ + unsigned i; + struct file_operations * fops; + + i = MAJOR(dev); + if (i >= MAX_BLKDEV || (fops = blkdevs[i].fops) == NULL) + return 0; + if (fops->check_media_change == NULL) + return 0; + if (! (*fops->check_media_change) (dev)) + return 0; + + /* printf ("Disk change detected on device %s\n", kdevname(dev));*/ + + if (fops->revalidate) + (*fops->revalidate) (dev); + + return 1; +} + +/* Mach device interface routines. */ + +/* Mach name to Linux major/minor number mapping table. */ +static struct name_map name_to_major[] = +{ + /* IDE disks */ + { "hd0", IDE0_MAJOR, 0, 0 }, + { "hd1", IDE0_MAJOR, 1, 0 }, + { "hd2", IDE1_MAJOR, 0, 0 }, + { "hd3", IDE1_MAJOR, 1, 0 }, + { "hd4", IDE2_MAJOR, 0, 0 }, + { "hd5", IDE2_MAJOR, 1, 0 }, + { "hd6", IDE3_MAJOR, 0, 0 }, + { "hd7", IDE3_MAJOR, 1, 0 }, + + /* IDE CDROMs */ + { "wcd0", IDE0_MAJOR, 0, 1 }, + { "wcd1", IDE0_MAJOR, 1, 1 }, + { "wcd2", IDE1_MAJOR, 0, 1 }, + { "wcd3", IDE1_MAJOR, 1, 1 }, + { "wcd4", IDE2_MAJOR, 0, 1 }, + { "wcd5", IDE2_MAJOR, 1, 1 }, + { "wcd6", IDE3_MAJOR, 0, 1 }, + { "wcd7", IDE3_MAJOR, 1, 1 }, + + /* SCSI disks */ + { "sd0", SCSI_DISK_MAJOR, 0, 0 }, + { "sd1", SCSI_DISK_MAJOR, 1, 0 }, + { "sd2", SCSI_DISK_MAJOR, 2, 0 }, + { "sd3", SCSI_DISK_MAJOR, 3, 0 }, + { "sd4", SCSI_DISK_MAJOR, 4, 0 }, + { "sd5", SCSI_DISK_MAJOR, 5, 0 }, + { "sd6", SCSI_DISK_MAJOR, 6, 0 }, + { "sd7", SCSI_DISK_MAJOR, 7, 0 }, + + /* SCSI CDROMs */ + { "cd0", SCSI_CDROM_MAJOR, 0, 1 }, + { "cd1", SCSI_CDROM_MAJOR, 1, 1 }, + + /* Floppy disks */ + { "fd0", FLOPPY_MAJOR, 0, 0 }, + { "fd1", FLOPPY_MAJOR, 1, 0 }, +}; + +#define NUM_NAMES (sizeof (name_to_major) / sizeof (name_to_major[0])) + +/* One of these is associated with each open instance of a device. */ +struct block_data +{ + const char *name; /* Mach name for device */ + int want:1; /* someone is waiting for I/O to complete */ + int open_count; /* number of opens */ + int iocount; /* number of pending I/O operations */ + int part; /* BSD partition number (-1 if none) */ + int flags; /* Linux file flags */ + int mode; /* Linux file mode */ + kdev_t dev; /* Linux device number */ + ipc_port_t port; /* port representing device */ + struct device_struct *ds; /* driver operation table entry */ + struct device device; /* generic device header */ + struct name_map *np; /* name to inode map */ + struct block_data *next; /* forward link */ +}; + +/* List of open devices. */ +static struct block_data *open_list; + +/* Forward declarations. */ + +extern struct device_emulation_ops linux_block_emulation_ops; + +static io_return_t device_close (void *); +static io_return_t device_close_forced (void *, int); + +/* Return a send right for block device BD. */ +static ipc_port_t +dev_to_port (void *bd) +{ + return (bd + ? ipc_port_make_send (((struct block_data *) bd)->port) + : IP_NULL); +} + +/* Return 1 if C is a letter of the alphabet. */ +static inline int +isalpha (int c) +{ + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z'); +} + +/* Return 1 if C is a digit. */ +static inline int +isdigit (int c) +{ + return c >= '0' && c <= '9'; +} + +/* Find the name map entry for device NAME. + Set *SLICE to be the DOS partition and + *PART the BSD/Mach partition, if any. */ +static struct name_map * +find_name (char *name, int *slice, int *part) +{ + char *p, *q; + int i, len; + struct name_map *np; + + /* Parse name into name, unit, DOS partition (slice) and partition. */ + for (*slice = 0, *part = -1, p = name; isalpha (*p); p++) + ; + if (p == name || ! isdigit (*p)) + return NULL; + do + p++; + while (isdigit (*p)); + if (*p) + { + q = p; + if (*q == 's' && isdigit (*(q + 1))) + { + q++; + do + *slice = *slice * 10 + *q++ - '0'; + while (isdigit (*q)); + if (! *q) + goto find_major; + } + if (! isalpha (*q) || *(q + 1)) + return NULL; + *part = *q - 'a'; + } + +find_major: + /* Convert name to major number. */ + for (i = 0, np = name_to_major; i < NUM_NAMES; i++, np++) + { + len = strlen (np->name); + if (len == (p - name) && ! strncmp (np->name, name, len)) + return np; + } + return NULL; +} + +/* Attempt to read a BSD disklabel from device DEV. */ +static struct disklabel * +read_bsd_label (kdev_t dev) +{ + int bsize, bshift; + struct buffer_head *bh; + struct disklabel *dlp, *lp = NULL; + + get_block_size (dev, &bsize, &bshift); + bh = bread (dev, LBLLOC >> (bshift - 9), bsize); + if (bh) + { + dlp = (struct disklabel *) (bh->b_data + ((LBLLOC << 9) & (bsize - 1))); + if (dlp->d_magic == DISKMAGIC && dlp->d_magic2 == DISKMAGIC) + { + lp = (struct disklabel *) kalloc (sizeof (*lp)); + assert (lp); + memcpy (lp, dlp, sizeof (*lp)); + } + __brelse (bh); + } + return lp; +} + +/* Attempt to read a VTOC from device DEV. */ +static struct disklabel * +read_vtoc (kdev_t dev) +{ + int bshift, bsize, i; + struct buffer_head *bh; + struct evtoc *evp; + struct disklabel *lp = NULL; + + get_block_size (dev, &bsize, &bshift); + bh = bread (dev, PDLOCATION >> (bshift - 9), bsize); + if (bh) + { + evp = (struct evtoc *) (bh->b_data + ((PDLOCATION << 9) & (bsize - 1))); + if (evp->sanity == VTOC_SANE) + { + lp = (struct disklabel *) kalloc (sizeof (*lp)); + assert (lp); + lp->d_npartitions = evp->nparts; + if (lp->d_npartitions > MAXPARTITIONS) + lp->d_npartitions = MAXPARTITIONS; + for (i = 0; i < lp->d_npartitions; i++) + { + lp->d_partitions[i].p_size = evp->part[i].p_size; + lp->d_partitions[i].p_offset = evp->part[i].p_start; + lp->d_partitions[i].p_fstype = FS_BSDFFS; + } + } + __brelse (bh); + } + return lp; +} + +/* Initialize BSD/Mach partition table for device + specified by NP, DS and *DEV. Check SLICE and *PART for validity. */ +static kern_return_t +init_partition (struct name_map *np, kdev_t *dev, + struct device_struct *ds, int slice, int *part) +{ + int i, j; + struct disklabel *lp; + struct gendisk *gd = ds->gd; + struct partition *p; + struct temp_data *d = current_thread ()->pcb->data; + + if (! gd) + { + *part = -1; + return 0; + } + if (ds->labels) + goto check; + ds->labels = (struct disklabel **) kalloc (sizeof (struct disklabel *) + * gd->max_nr * gd->max_p); + if (! ds->labels) + return D_NO_MEMORY; + memset ((void *) ds->labels, 0, + sizeof (struct disklabel *) * gd->max_nr * gd->max_p); + for (i = 1; i < gd->max_p; i++) + { + d->inode.i_rdev = *dev | i; + if (gd->part[MINOR (d->inode.i_rdev)].nr_sects <= 0 + || gd->part[MINOR (d->inode.i_rdev)].start_sect < 0) + continue; + d->file.f_flags = 0; + d->file.f_mode = O_RDONLY; + if (ds->fops->open && (*ds->fops->open) (&d->inode, &d->file)) + continue; + lp = read_bsd_label (d->inode.i_rdev); + if (! lp && gd->part[MINOR (d->inode.i_rdev)].nr_sects > PDLOCATION) + lp = read_vtoc (d->inode.i_rdev); + if (ds->fops->release) + (*ds->fops->release) (&d->inode, &d->file); + if (lp) + { + if (ds->default_slice == 0) + ds->default_slice = i; + for (j = 0, p = lp->d_partitions; j < lp->d_npartitions; j++, p++) + { + if (p->p_offset < 0 || p->p_size <= 0) + continue; + + /* Sanity check. */ + if (p->p_size > gd->part[MINOR (d->inode.i_rdev)].nr_sects) + p->p_size = gd->part[MINOR (d->inode.i_rdev)].nr_sects; + } + } + ds->labels[MINOR (d->inode.i_rdev)] = lp; + } + +check: + if (*part >= 0 && slice == 0) + slice = ds->default_slice; + if (*part >= 0 && slice == 0) + return D_NO_SUCH_DEVICE; + *dev = MKDEV (MAJOR (*dev), MINOR (*dev) | slice); + if (slice >= gd->max_p + || gd->part[MINOR (*dev)].start_sect < 0 + || gd->part[MINOR (*dev)].nr_sects <= 0) + return D_NO_SUCH_DEVICE; + if (*part >= 0) + { + lp = ds->labels[MINOR (*dev)]; + if (! lp + || *part >= lp->d_npartitions + || lp->d_partitions[*part].p_offset < 0 + || lp->d_partitions[*part].p_size <= 0) + return D_NO_SUCH_DEVICE; + } + return 0; +} + +#define DECL_DATA struct temp_data td +#define INIT_DATA() \ +{ \ + list_init (&td.pages); \ + td.inode.i_rdev = bd->dev; \ + td.file.f_mode = bd->mode; \ + td.file.f_flags = bd->flags; \ + current_thread ()->pcb->data = &td; \ +} + +static io_return_t +device_open (ipc_port_t reply_port, mach_msg_type_name_t reply_port_type, + dev_mode_t mode, char *name, device_t *devp) +{ + int part, slice, err; + unsigned major, minor; + kdev_t dev; + ipc_port_t notify; + struct block_data *bd = NULL, *bdp; + struct device_struct *ds; + struct gendisk *gd; + struct name_map *np; + DECL_DATA; + + np = find_name (name, &slice, &part); + if (! np) + return D_NO_SUCH_DEVICE; + major = np->major; + ds = &blkdevs[major]; + + /* Check that driver exists. */ + if (! ds->fops) + return D_NO_SUCH_DEVICE; + + /* Wait for any other open/close calls to finish. */ + ds = &blkdevs[major]; + while (ds->busy) + { + ds->want = 1; + assert_wait ((event_t) ds, FALSE); + schedule (); + } + ds->busy = 1; + + /* Compute minor number. */ + if (! ds->gd) + { + for (gd = gendisk_head; gd && gd->major != major; gd = gd->next) + ; + ds->gd = gd; + } + minor = np->unit; + gd = ds->gd; + if (gd) + minor <<= gd->minor_shift; + dev = MKDEV (major, minor); + + list_init (&td.pages); + current_thread ()->pcb->data = &td; + + /* Check partition. */ + err = init_partition (np, &dev, ds, slice, &part); + if (err) + goto out; + + /* Initialize file structure. */ + switch (mode & (D_READ|D_WRITE)) + { + case D_WRITE: + td.file.f_mode = O_WRONLY; + break; + + case D_READ|D_WRITE: + td.file.f_mode = O_RDWR; + break; + + default: + td.file.f_mode = O_RDONLY; + break; + } + td.file.f_flags = (mode & D_NODELAY) ? O_NDELAY : 0; + + /* Check if the device is currently open. */ + for (bdp = open_list; bdp; bdp = bdp->next) + if (bdp->dev == dev + && bdp->part == part + && bdp->mode == td.file.f_mode + && bdp->flags == td.file.f_flags) + { + bd = bdp; + goto out; + } + + /* Open the device. */ + if (ds->fops->open) + { + td.inode.i_rdev = dev; + err = (*ds->fops->open) (&td.inode, &td.file); + if (err) + { + err = linux_to_mach_error (err); + goto out; + } + } + + /* Allocate and initialize device data. */ + bd = (struct block_data *) kalloc (sizeof (struct block_data)); + if (! bd) + { + err = D_NO_MEMORY; + goto bad; + } + bd->want = 0; + bd->open_count = 0; + bd->iocount = 0; + bd->part = part; + bd->ds = ds; + bd->device.emul_data = bd; + bd->device.emul_ops = &linux_block_emulation_ops; + bd->dev = dev; + bd->mode = td.file.f_mode; + bd->flags = td.file.f_flags; + bd->port = ipc_port_alloc_kernel (); + if (bd->port == IP_NULL) + { + err = KERN_RESOURCE_SHORTAGE; + goto bad; + } + ipc_kobject_set (bd->port, (ipc_kobject_t) &bd->device, IKOT_DEVICE); + notify = ipc_port_make_sonce (bd->port); + ip_lock (bd->port); + ipc_port_nsrequest (bd->port, 1, notify, ¬ify); + assert (notify == IP_NULL); + goto out; + +bad: + if (ds->fops->release) + (*ds->fops->release) (&td.inode, &td.file); + +out: + ds->busy = 0; + if (ds->want) + { + ds->want = 0; + thread_wakeup ((event_t) ds); + } + + if (bd && bd->open_count > 0) + { + if (err) + *devp = NULL; + else + { + *devp = &bd->device; + bd->open_count++; + } + return err; + } + + if (err) + { + if (bd) + { + if (bd->port != IP_NULL) + { + ipc_kobject_set (bd->port, IKO_NULL, IKOT_NONE); + ipc_port_dealloc_kernel (bd->port); + *devp = (device_t) IP_NULL; + } + kfree ((vm_offset_t) bd, sizeof (struct block_data)); + bd = NULL; + } + } + else + { + bd->open_count = 1; + bd->next = open_list; + open_list = bd; + *devp = &bd -> device; + } + + if (!IP_VALID (reply_port) && ! err) + device_close (bd); + return err; +} + +static io_return_t +device_close_forced (void *d, int force) +{ + struct block_data *bd = d, *bdp, **prev; + struct device_struct *ds = bd->ds; + DECL_DATA; + + INIT_DATA (); + + /* Wait for any other open/close to complete. */ + while (ds->busy) + { + ds->want = 1; + assert_wait ((event_t) ds, FALSE); + schedule (); + } + ds->busy = 1; + + if (force || --bd->open_count == 0) + { + /* Wait for pending I/O to complete. */ + while (bd->iocount > 0) + { + bd->want = 1; + assert_wait ((event_t) bd, FALSE); + schedule (); + } + + /* Remove device from open list. */ + prev = &open_list; + bdp = open_list; + while (bdp) + { + if (bdp == bd) + { + *prev = bdp->next; + break; + } + prev = &bdp->next; + bdp = bdp->next; + } + + assert (bdp == bd); + + if (ds->fops->release) + (*ds->fops->release) (&td.inode, &td.file); + + ipc_kobject_set (bd->port, IKO_NULL, IKOT_NONE); + ipc_port_dealloc_kernel (bd->port); + kfree ((vm_offset_t) bd, sizeof (struct block_data)); + } + + ds->busy = 0; + if (ds->want) + { + ds->want = 0; + thread_wakeup ((event_t) ds); + } + return D_SUCCESS; +} + +static io_return_t +device_close (void *d) +{ + return device_close_forced (d, 0); +} + + +#define MAX_COPY (VM_MAP_COPY_PAGE_LIST_MAX << PAGE_SHIFT) + +/* Check block BN and size COUNT for I/O validity + to from device BD. Set *OFF to the byte offset + where I/O is to begin and return the size of transfer. */ +static int +check_limit (struct block_data *bd, loff_t *off, long bn, int count) +{ + int major, minor; + long maxsz, sz; + struct disklabel *lp = NULL; + + if (count <= 0) + return count; + + major = MAJOR (bd->dev); + minor = MINOR (bd->dev); + + if (bd->ds->gd) + { + if (bd->part >= 0) + { + assert (bd->ds->labels); + assert (bd->ds->labels[minor]); + lp = bd->ds->labels[minor]; + maxsz = lp->d_partitions[bd->part].p_size; + } + else + maxsz = bd->ds->gd->part[minor].nr_sects; + } + else + { + assert (blk_size[major]); + maxsz = blk_size[major][minor] << (BLOCK_SIZE_BITS - 9); + } + assert (maxsz > 0); + sz = maxsz - bn; + if (sz <= 0) + return sz; + if (sz < ((count + 511) >> 9)) + count = sz << 9; + if (lp) + bn += (lp->d_partitions[bd->part].p_offset + - bd->ds->gd->part[minor].start_sect); + *off = (loff_t) bn << 9; + bd->iocount++; + return count; +} + +static io_return_t +device_write (void *d, ipc_port_t reply_port, + mach_msg_type_name_t reply_port_type, dev_mode_t mode, + recnum_t bn, io_buf_ptr_t data, unsigned int orig_count, + int *bytes_written) +{ + int resid, amt, i; + int count = (int) orig_count; + io_return_t err = 0; + vm_map_copy_t copy = (vm_map_copy_t) data; + vm_offset_t addr, uaddr; + vm_size_t len, size; + struct block_data *bd = d; + DECL_DATA; + + INIT_DATA (); + + *bytes_written = 0; + + if (bd->mode == O_RDONLY) + return D_INVALID_OPERATION; + if (! bd->ds->fops->write) + return D_READ_ONLY; + count = check_limit (bd, &td.file.f_pos, bn, count); + if (count < 0) + return D_INVALID_SIZE; + if (count == 0) + { + vm_map_copy_discard (copy); + return 0; + } + + resid = count; + uaddr = copy->offset; + + /* Allocate a kernel buffer. */ + size = round_page (uaddr + count) - trunc_page (uaddr); + if (size > MAX_COPY) + size = MAX_COPY; + addr = vm_map_min (device_io_map); + err = vm_map_enter (device_io_map, &addr, size, 0, TRUE, + NULL, 0, FALSE, VM_PROT_READ|VM_PROT_WRITE, + VM_PROT_READ|VM_PROT_WRITE, VM_INHERIT_NONE); + if (err) + { + vm_map_copy_discard (copy); + goto out; + } + + /* Determine size of I/O this time around. */ + len = size - (uaddr & PAGE_MASK); + if (len > resid) + len = resid; + + while (1) + { + /* Map user pages. */ + for (i = 0; i < copy->cpy_npages; i++) + pmap_enter (vm_map_pmap (device_io_map), + addr + (i << PAGE_SHIFT), + copy->cpy_page_list[i]->phys_addr, + VM_PROT_READ|VM_PROT_WRITE, TRUE); + + /* Do the write. */ + amt = (*bd->ds->fops->write) (&td.inode, &td.file, + (char *) addr + (uaddr & PAGE_MASK), len); + + /* Unmap pages and deallocate copy. */ + pmap_remove (vm_map_pmap (device_io_map), + addr, addr + (copy->cpy_npages << PAGE_SHIFT)); + vm_map_copy_discard (copy); + + /* Check result of write. */ + if (amt > 0) + { + resid -= amt; + if (resid == 0) + break; + uaddr += amt; + } + else + { + if (amt < 0) + err = linux_to_mach_error (amt); + break; + } + + /* Determine size of I/O this time around and copy in pages. */ + len = round_page (uaddr + resid) - trunc_page (uaddr); + if (len > MAX_COPY) + len = MAX_COPY; + len -= uaddr & PAGE_MASK; + if (len > resid) + len = resid; + err = vm_map_copyin_page_list (current_map (), uaddr, len, + FALSE, FALSE, ©, FALSE); + if (err) + break; + } + + /* Delete kernel buffer. */ + vm_map_remove (device_io_map, addr, addr + size); + +out: + if (--bd->iocount == 0 && bd->want) + { + bd->want = 0; + thread_wakeup ((event_t) bd); + } + if (IP_VALID (reply_port)) + ds_device_write_reply (reply_port, reply_port_type, err, count - resid); + return MIG_NO_REPLY; +} + +static io_return_t +device_read (void *d, ipc_port_t reply_port, + mach_msg_type_name_t reply_port_type, dev_mode_t mode, + recnum_t bn, int count, io_buf_ptr_t *data, + unsigned *bytes_read) +{ + boolean_t dirty; + int resid, amt; + io_return_t err = 0; + struct list pages; + vm_map_copy_t copy; + vm_offset_t addr, offset, alloc_offset, o; + vm_object_t object; + vm_page_t m; + vm_size_t len, size; + struct block_data *bd = d; + DECL_DATA; + + INIT_DATA (); + + *data = 0; + *bytes_read = 0; + + if (! bd->ds->fops->read) + return D_INVALID_OPERATION; + count = check_limit (bd, &td.file.f_pos, bn, count); + if (count < 0) + return D_INVALID_SIZE; + if (count == 0) + return 0; + + /* Allocate an object to hold the data. */ + size = round_page (count); + object = vm_object_allocate (size); + if (! object) + { + err = D_NO_MEMORY; + goto out; + } + alloc_offset = offset = 0; + resid = count; + + /* Allocate a kernel buffer. */ + addr = vm_map_min (device_io_map); + if (size > MAX_COPY) + size = MAX_COPY; + err = vm_map_enter (device_io_map, &addr, size, 0, TRUE, NULL, + 0, FALSE, VM_PROT_READ|VM_PROT_WRITE, + VM_PROT_READ|VM_PROT_WRITE, VM_INHERIT_NONE); + if (err) + goto out; + + list_init (&pages); + + while (resid) + { + /* Determine size of I/O this time around. */ + len = round_page (offset + resid) - trunc_page (offset); + if (len > MAX_COPY) + len = MAX_COPY; + + /* Map any pages left from previous operation. */ + o = trunc_page (offset); + list_for_each_entry (&pages, m, node) + { + pmap_enter (vm_map_pmap (device_io_map), + addr + o - trunc_page (offset), + m->phys_addr, VM_PROT_READ|VM_PROT_WRITE, TRUE); + o += PAGE_SIZE; + } + assert (o == alloc_offset); + + /* Allocate and map pages. */ + while (alloc_offset < trunc_page (offset) + len) + { + while ((m = vm_page_grab (VM_PAGE_LINUX)) == 0) + VM_PAGE_WAIT (0); + assert (! m->active && ! m->inactive); + m->busy = TRUE; + list_insert_tail (&pages, &m->node); + pmap_enter (vm_map_pmap (device_io_map), + addr + alloc_offset - trunc_page (offset), + m->phys_addr, VM_PROT_READ|VM_PROT_WRITE, TRUE); + alloc_offset += PAGE_SIZE; + } + + /* Do the read. */ + amt = len - (offset & PAGE_MASK); + if (amt > resid) + amt = resid; + amt = (*bd->ds->fops->read) (&td.inode, &td.file, + (char *) addr + (offset & PAGE_MASK), amt); + + /* Compute number of pages to insert in object. */ + o = trunc_page (offset); + if (amt > 0) + { + dirty = TRUE; + resid -= amt; + if (resid == 0) + { + /* Zero any unused space. */ + if (offset + amt < o + len) + memset ((void *) (addr + offset - o + amt), + 0, o + len - offset - amt); + offset = o + len; + } + else + offset += amt; + } + else + { + dirty = FALSE; + offset = o + len; + } + + /* Unmap pages and add them to the object. */ + pmap_remove (vm_map_pmap (device_io_map), addr, addr + len); + vm_object_lock (object); + while (o < trunc_page (offset)) + { + m = list_first_entry (&pages, struct vm_page, node); + assert (! list_end (&pages, &m->node)); + list_remove (&m->node); + assert (m->busy); + vm_page_lock_queues (); + if (dirty) + { + PAGE_WAKEUP_DONE (m); + m->dirty = TRUE; + vm_page_insert (m, object, o); + } + else + vm_page_free (m); + vm_page_unlock_queues (); + o += PAGE_SIZE; + } + vm_object_unlock (object); + if (amt <= 0) + { + if (amt < 0) + err = linux_to_mach_error (amt); + break; + } + } + + /* Delete kernel buffer. */ + vm_map_remove (device_io_map, addr, addr + size); + + assert (list_empty (&pages)); + +out: + if (! err) + err = vm_map_copyin_object (object, 0, round_page (count), ©); + if (! err) + { + *data = (io_buf_ptr_t) copy; + *bytes_read = count - resid; + } + else + vm_object_deallocate (object); + if (--bd->iocount == 0 && bd->want) + { + bd->want = 0; + thread_wakeup ((event_t) bd); + } + return err; +} + +static io_return_t +device_get_status (void *d, dev_flavor_t flavor, dev_status_t status, + mach_msg_type_number_t *status_count) +{ + struct block_data *bd = d; + + switch (flavor) + { + case DEV_GET_SIZE: + if (disk_major (MAJOR (bd->dev))) + { + assert (bd->ds->gd); + + if (bd->part >= 0) + { + struct disklabel *lp; + + assert (bd->ds->labels); + lp = bd->ds->labels[MINOR (bd->dev)]; + assert (lp); + (status[DEV_GET_SIZE_DEVICE_SIZE] + = lp->d_partitions[bd->part].p_size << 9); + } + else + (status[DEV_GET_SIZE_DEVICE_SIZE] + = bd->ds->gd->part[MINOR (bd->dev)].nr_sects << 9); + } + else + { + assert (blk_size[MAJOR (bd->dev)]); + (status[DEV_GET_SIZE_DEVICE_SIZE] + = (blk_size[MAJOR (bd->dev)][MINOR (bd->dev)] + << BLOCK_SIZE_BITS)); + } + /* It would be nice to return the block size as reported by + the driver, but a lot of user level code assumes the sector + size to be 512. */ + status[DEV_GET_SIZE_RECORD_SIZE] = 512; + /* Always return DEV_GET_SIZE_COUNT. This is what all native + Mach drivers do, and makes it possible to detect the absence + of the call by setting it to a different value on input. MiG + makes sure that we will never return more integers than the + user asked for. */ + *status_count = DEV_GET_SIZE_COUNT; + break; + + case DEV_GET_RECORDS: + if (disk_major (MAJOR (bd->dev))) + { + assert (bd->ds->gd); + + if (bd->part >= 0) + { + struct disklabel *lp; + + assert (bd->ds->labels); + lp = bd->ds->labels[MINOR (bd->dev)]; + assert (lp); + (status[DEV_GET_RECORDS_DEVICE_RECORDS] + = lp->d_partitions[bd->part].p_size); + } + else + (status[DEV_GET_RECORDS_DEVICE_RECORDS] + = bd->ds->gd->part[MINOR (bd->dev)].nr_sects); + } + else + { + assert (blk_size[MAJOR (bd->dev)]); + status[DEV_GET_RECORDS_DEVICE_RECORDS] + = (blk_size[MAJOR (bd->dev)][MINOR (bd->dev)] + << (BLOCK_SIZE_BITS - 9)); + } + /* It would be nice to return the block size as reported by + the driver, but a lot of user level code assumes the sector + size to be 512. */ + status[DEV_GET_RECORDS_RECORD_SIZE] = 512; + /* Always return DEV_GET_RECORDS_COUNT. This is what all native + Mach drivers do, and makes it possible to detect the absence + of the call by setting it to a different value on input. MiG + makes sure that we will never return more integers than the + user asked for. */ + *status_count = DEV_GET_RECORDS_COUNT; + break; + + default: + return D_INVALID_OPERATION; + } + + return D_SUCCESS; +} + +static io_return_t +device_set_status (void *d, dev_flavor_t flavor, dev_status_t status, + mach_msg_type_number_t status_count) +{ + struct block_data *bd = d; + + switch (flavor) + { + case BLKRRPART: + { + DECL_DATA; + INIT_DATA(); + return (*bd->ds->fops->ioctl) (&td.inode, &td.file, flavor, 0); + } + } + + return D_INVALID_OPERATION; +} + + +static void +device_no_senders (mach_no_senders_notification_t *ns) +{ + device_t dev; + + dev = dev_port_lookup((ipc_port_t) ns->not_header.msgh_remote_port); + assert(dev); + device_close_forced (dev->emul_data, 1); +} + +struct device_emulation_ops linux_block_emulation_ops = +{ + NULL, + NULL, + dev_to_port, + device_open, + device_close, + device_write, + NULL, + device_read, + NULL, + device_set_status, + device_get_status, + NULL, + NULL, + device_no_senders, + NULL, + NULL +}; diff --git a/linux/dev/glue/glue.h b/linux/dev/glue/glue.h new file mode 100644 index 0000000..e94ff55 --- /dev/null +++ b/linux/dev/glue/glue.h @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2011 Free Software Foundation + * + * This program is free software ; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation ; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY ; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with the program ; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef LINUX_DEV_GLUE_GLUE_H +#define LINUX_DEV_GLUE_GLUE_H + +#include <vm/vm_types.h> +#include <mach/machine/vm_types.h> + +extern int linux_auto_config; + +extern unsigned long alloc_contig_mem (unsigned, unsigned, unsigned, vm_page_t *); +extern void free_contig_mem (vm_page_t, unsigned); +extern void init_IRQ (void); +extern void restore_IRQ (void); +extern void linux_kmem_init (void); +extern void linux_net_emulation_init (void); +extern void device_setup (void); +extern void linux_timer_intr (void); +extern void linux_sched_init (void); +extern void pcmcia_init (void); +extern void linux_soft_intr (void); +extern int issig (void); +extern int linux_to_mach_error (int); +extern char *get_options(char *str, int *ints); + +#endif /* LINUX_DEV_GLUE_GLUE_H */ diff --git a/linux/dev/glue/kmem.c b/linux/dev/glue/kmem.c new file mode 100644 index 0000000..509229d --- /dev/null +++ b/linux/dev/glue/kmem.c @@ -0,0 +1,589 @@ +/* + * Linux memory allocation. + * + * Copyright (C) 1996 The University of Utah and the Computer Systems + * Laboratory at the University of Utah (CSL) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Author: Shantanu Goel, University of Utah CSL + * + */ + +#include <sys/types.h> + +#include <mach/mach_types.h> +#include <mach/vm_param.h> + +#include <kern/assert.h> +#include <kern/kalloc.h> +#include <kern/printf.h> + +#include <vm/vm_page.h> +#include <vm/vm_kern.h> + +#define MACH_INCLUDE +#include <linux/sched.h> +#include <linux/malloc.h> +#include <linux/delay.h> + +#include <asm/system.h> + +#include <linux/dev/glue/glue.h> + +/* Amount of memory to reserve for Linux memory allocator. + We reserve 64K chunks to stay within DMA limits. + Increase MEM_CHUNKS if the kernel is running out of memory. */ +#define MEM_CHUNK_SIZE (64 * 1024) +#define MEM_CHUNKS 32 +#define MEM_DMA_LIMIT (16 * 1024 * 1024) + +/* Mininum amount that linux_kmalloc will allocate. */ +#define MIN_ALLOC 12 + +#ifndef NBPW +#define NBPW 32 +#endif + +/* Memory block header. */ +struct blkhdr +{ + unsigned short free; /* 1 if block is free */ + unsigned short size; /* size of block */ +}; + +/* This structure heads a page allocated by linux_kmalloc. */ +struct pagehdr +{ + unsigned size; /* size (multiple of PAGE_SIZE) */ + struct pagehdr *next; /* next header in list */ +}; + +/* This structure describes a memory chunk. */ +struct chunkhdr +{ + unsigned long start; /* start address */ + unsigned long end; /* end address */ + unsigned long bitmap; /* busy/free bitmap of pages */ +}; + +/* Chunks from which pages are allocated. */ +static struct chunkhdr pages_free[MEM_CHUNKS]; + +/* Memory list maintained by linux_kmalloc. */ +static struct pagehdr *memlist; + +/* Some statistics. */ +int num_block_coalesce = 0; +int num_page_collect = 0; +int linux_mem_avail; + +/* Initialize the Linux memory allocator. */ +void +linux_kmem_init () +{ + int i, j; + vm_page_t p, pages; + + for (i = 0; i < MEM_CHUNKS; i++) + { + /* Allocate memory. */ + pages_free[i].start = (unsigned long) alloc_contig_mem (MEM_CHUNK_SIZE, + MEM_DMA_LIMIT, + 0xffff, &pages); + + assert (pages_free[i].start); + assert ((pages_free[i].start & 0xffff) == 0); + + /* Sanity check: ensure pages are contiguous and within DMA limits. */ + for (p = pages, j = 0; j < MEM_CHUNK_SIZE - PAGE_SIZE; j += PAGE_SIZE) + { + assert (p->phys_addr < MEM_DMA_LIMIT); + assert (p->phys_addr + PAGE_SIZE == (p + 1)->phys_addr); + p++; + } + + pages_free[i].end = pages_free[i].start + MEM_CHUNK_SIZE; + + /* Initialize free page bitmap. */ + pages_free[i].bitmap = 0; + j = MEM_CHUNK_SIZE >> PAGE_SHIFT; + while (--j >= 0) + pages_free[i].bitmap |= 1 << j; + } + + linux_mem_avail = (MEM_CHUNKS * MEM_CHUNK_SIZE) >> PAGE_SHIFT; +} + +/* Return the number by which the page size should be + shifted such that the resulting value is >= SIZE. */ +static unsigned long +get_page_order (int size) +{ + unsigned long order; + + for (order = 0; (PAGE_SIZE << order) < size; order++) + ; + return order; +} + +#ifdef LINUX_DEV_DEBUG +static void +check_page_list (int line) +{ + unsigned size; + struct pagehdr *ph; + struct blkhdr *bh; + + for (ph = memlist; ph; ph = ph->next) + { + if ((int) ph & PAGE_MASK) + panic ("%s:%d: page header not aligned", __FILE__, line); + + size = 0; + bh = (struct blkhdr *) (ph + 1); + while (bh < (struct blkhdr *) ((void *) ph + ph->size)) + { + size += bh->size + sizeof (struct blkhdr); + bh = (void *) (bh + 1) + bh->size; + } + + if (size + sizeof (struct pagehdr) != ph->size) + panic ("%s:%d: memory list destroyed", __FILE__, line); + } +} +#else +#define check_page_list(line) +#endif + +/* Merge adjacent free blocks in the memory list. */ +static void +coalesce_blocks () +{ + struct pagehdr *ph; + struct blkhdr *bh, *bhp, *ebh; + + num_block_coalesce++; + + for (ph = memlist; ph; ph = ph->next) + { + bh = (struct blkhdr *) (ph + 1); + ebh = (struct blkhdr *) ((void *) ph + ph->size); + while (1) + { + /* Skip busy blocks. */ + while (bh < ebh && !bh->free) + bh = (struct blkhdr *) ((void *) (bh + 1) + bh->size); + if (bh == ebh) + break; + + /* Merge adjacent free blocks. */ + while (1) + { + bhp = (struct blkhdr *) ((void *) (bh + 1) + bh->size); + if (bhp == ebh) + { + bh = bhp; + break; + } + if (!bhp->free) + { + bh = (struct blkhdr *) ((void *) (bhp + 1) + bhp->size); + break; + } + bh->size += bhp->size + sizeof (struct blkhdr); + } + } + } +} + +/* Allocate SIZE bytes of memory. + The PRIORITY parameter specifies various flags + such as DMA, atomicity, etc. It is not used by Mach. */ +void * +linux_kmalloc (unsigned int size, int priority) +{ + int order, coalesced = 0; + unsigned long flags; + struct pagehdr *ph; + struct blkhdr *bh, *new_bh; + + if (size < MIN_ALLOC) + size = MIN_ALLOC; + else + size = (size + sizeof (int) - 1) & ~(sizeof (int) - 1); + + assert (size <= (MEM_CHUNK_SIZE + - sizeof (struct pagehdr) + - sizeof (struct blkhdr))); + + save_flags (flags); + cli (); + +again: + check_page_list (__LINE__); + + /* Walk the page list and find the first free block with size + greater than or equal to the one required. */ + for (ph = memlist; ph; ph = ph->next) + { + bh = (struct blkhdr *) (ph + 1); + while (bh < (struct blkhdr *) ((void *) ph + ph->size)) + { + if (bh->free && bh->size >= size) + { + bh->free = 0; + if (bh->size - size >= MIN_ALLOC + sizeof (struct blkhdr)) + { + /* Split the current block and create a new free block. */ + new_bh = (void *) (bh + 1) + size; + new_bh->free = 1; + new_bh->size = bh->size - size - sizeof (struct blkhdr); + bh->size = size; + } + + check_page_list (__LINE__); + + restore_flags (flags); + return bh + 1; + } + bh = (void *) (bh + 1) + bh->size; + } + } + + check_page_list (__LINE__); + + /* Allocation failed; coalesce free blocks and try again. */ + if (!coalesced) + { + coalesce_blocks (); + coalesced = 1; + goto again; + } + + /* Allocate more pages. */ + order = get_page_order (size + + sizeof (struct pagehdr) + + sizeof (struct blkhdr)); + ph = (struct pagehdr *) __get_free_pages (GFP_KERNEL, order, ~0UL); + if (!ph) + { + restore_flags (flags); + return NULL; + } + + ph->size = PAGE_SIZE << order; + ph->next = memlist; + memlist = ph; + bh = (struct blkhdr *) (ph + 1); + bh->free = 0; + bh->size = ph->size - sizeof (struct pagehdr) - sizeof (struct blkhdr); + if (bh->size - size >= MIN_ALLOC + sizeof (struct blkhdr)) + { + new_bh = (void *) (bh + 1) + size; + new_bh->free = 1; + new_bh->size = bh->size - size - sizeof (struct blkhdr); + bh->size = size; + } + + check_page_list (__LINE__); + + restore_flags (flags); + return bh + 1; +} + +/* Free memory P previously allocated by linux_kmalloc. */ +void +linux_kfree (void *p) +{ + unsigned long flags; + struct blkhdr *bh; + struct pagehdr *ph; + + assert (((int) p & (sizeof (int) - 1)) == 0); + + save_flags (flags); + cli (); + + check_page_list (__LINE__); + + for (ph = memlist; ph; ph = ph->next) + if (p >= (void *) ph && p < (void *) ph + ph->size) + break; + + assert (ph); + + bh = (struct blkhdr *) p - 1; + + assert (!bh->free); + assert (bh->size >= MIN_ALLOC); + assert ((bh->size & (sizeof (int) - 1)) == 0); + + bh->free = 1; + + check_page_list (__LINE__); + + restore_flags (flags); +} + +/* Free any pages that are not in use. + Called by __get_free_pages when pages are running low. */ +static void +collect_kmalloc_pages () +{ + struct blkhdr *bh; + struct pagehdr *ph, **prev_ph; + + check_page_list (__LINE__); + + coalesce_blocks (); + + check_page_list (__LINE__); + + ph = memlist; + prev_ph = &memlist; + while (ph) + { + bh = (struct blkhdr *) (ph + 1); + if (bh->free && (void *) (bh + 1) + bh->size == (void *) ph + ph->size) + { + *prev_ph = ph->next; + free_pages ((unsigned long) ph, get_page_order (ph->size)); + ph = *prev_ph; + } + else + { + prev_ph = &ph->next; + ph = ph->next; + } + } + + check_page_list (__LINE__); +} + +/* Allocate ORDER + 1 number of physically contiguous pages. + PRIORITY and DMA are not used in Mach. + + XXX: This needs to be dynamic. To do that we need to make + the Mach page manipulation routines interrupt safe and they + must provide machine dependant hooks. */ +unsigned long +__get_free_pages (int priority, unsigned long order, int dma) +{ + int i, pages_collected = 0; + unsigned bits, off, j, len; + unsigned long flags; + + assert ((PAGE_SIZE << order) <= MEM_CHUNK_SIZE); + + /* Construct bitmap of contiguous pages. */ + bits = 0; + j = 0; + len = 0; + while (len < (PAGE_SIZE << order)) + { + bits |= 1 << j++; + len += PAGE_SIZE; + } + + save_flags (flags); + cli (); +again: + + /* Search each chunk for the required number of contiguous pages. */ + for (i = 0; i < MEM_CHUNKS; i++) + { + off = 0; + j = bits; + while (MEM_CHUNK_SIZE - off >= (PAGE_SIZE << order)) + { + if ((pages_free[i].bitmap & j) == j) + { + pages_free[i].bitmap &= ~j; + linux_mem_avail -= order + 1; + restore_flags (flags); + return pages_free[i].start + off; + } + j <<= 1; + off += PAGE_SIZE; + } + } + + /* Allocation failed; collect kmalloc and buffer pages + and try again. */ + if (!pages_collected) + { + num_page_collect++; + collect_kmalloc_pages (); + pages_collected = 1; + goto again; + } + + printf ("%s:%d: __get_free_pages: ran out of pages\n", __FILE__, __LINE__); + + restore_flags (flags); + return 0; +} + +/* Free ORDER + 1 number of physically + contiguous pages starting at address ADDR. */ +void +free_pages (unsigned long addr, unsigned long order) +{ + int i; + unsigned bits, len, j; + unsigned long flags; + + assert ((addr & PAGE_MASK) == 0); + + for (i = 0; i < MEM_CHUNKS; i++) + if (addr >= pages_free[i].start && addr < pages_free[i].end) + break; + + assert (i < MEM_CHUNKS); + + /* Contruct bitmap of contiguous pages. */ + len = 0; + j = 0; + bits = 0; + while (len < (PAGE_SIZE << order)) + { + bits |= 1 << j++; + len += PAGE_SIZE; + } + bits <<= (addr - pages_free[i].start) >> PAGE_SHIFT; + + save_flags (flags); + cli (); + + assert ((pages_free[i].bitmap & bits) == 0); + + pages_free[i].bitmap |= bits; + linux_mem_avail += order + 1; + restore_flags (flags); +} + + +/* vmalloc management routines. */ +struct vmalloc_struct +{ + struct vmalloc_struct *prev; + struct vmalloc_struct *next; + vm_offset_t start; + vm_size_t size; +}; + +static struct vmalloc_struct +vmalloc_list = { &vmalloc_list, &vmalloc_list, 0, 0 }; + +static inline void +vmalloc_list_insert (vm_offset_t start, vm_size_t size) +{ + struct vmalloc_struct *p; + + p = (struct vmalloc_struct *) kalloc (sizeof (struct vmalloc_struct)); + if (p == NULL) + panic ("kernel memory is exhausted"); + + p->prev = vmalloc_list.prev; + p->next = &vmalloc_list; + vmalloc_list.prev->next = p; + vmalloc_list.prev = p; + + p->start = start; + p->size = size; +} + +static struct vmalloc_struct * +vmalloc_list_lookup (vm_offset_t start) +{ + struct vmalloc_struct *p; + + for (p = vmalloc_list.next; p != &vmalloc_list; p = p->next) + { + if (p->start == start) + return p; + } + + return NULL; +} + +static inline void +vmalloc_list_remove (struct vmalloc_struct *p) +{ + p->next->prev = p->prev; + p->prev->next = p->next; + + kfree ((vm_offset_t) p, sizeof (struct vmalloc_struct)); +} + +/* Allocate SIZE bytes of memory. The pages need not be contiguous. */ +void * +vmalloc (unsigned long size) +{ + kern_return_t ret; + vm_offset_t addr; + + ret = kmem_alloc_wired (kernel_map, &addr, round_page (size)); + if (ret != KERN_SUCCESS) + return NULL; + + vmalloc_list_insert (addr, round_page (size)); + return (void *) addr; +} + +/* Free vmalloc'ed and vremap'ed virtual address space. */ +void +vfree (void *addr) +{ + struct vmalloc_struct *p; + + p = vmalloc_list_lookup ((vm_offset_t) addr); + if (!p) + panic ("vmalloc_list_lookup failure"); + + kmem_free (kernel_map, (vm_offset_t) addr, p->size); + vmalloc_list_remove (p); +} + +unsigned long +vmtophys (void *addr) +{ + return kvtophys((vm_offset_t) addr); +} + +/* XXX: Quick hacking. */ +/* Remap physical address into virtual address. */ + +#include <vm/pmap.h> + +void * +vremap (unsigned long offset, unsigned long size) +{ + vm_offset_t addr; + kern_return_t ret; + + assert(page_aligned(offset)); + + ret = kmem_valloc (kernel_map, &addr, round_page (size)); + if (ret != KERN_SUCCESS) + return NULL; + + (void) pmap_map_bd (addr, offset, offset + round_page (size), + VM_PROT_READ | VM_PROT_WRITE); + + vmalloc_list_insert (addr, round_page (size)); + return (void *) addr; +} diff --git a/linux/dev/glue/misc.c b/linux/dev/glue/misc.c new file mode 100644 index 0000000..5646e5e --- /dev/null +++ b/linux/dev/glue/misc.c @@ -0,0 +1,248 @@ +/* + * Miscellaneous routines and data for Linux emulation. + * + * Copyright (C) 1996 The University of Utah and the Computer Systems + * Laboratory at the University of Utah (CSL) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Author: Shantanu Goel, University of Utah CSL + */ + +/* + * linux/fs/proc/scsi.c + * (c) 1995 Michael Neuffer neuffer@goofy.zdv.uni-mainz.de + * + * The original version was derived from linux/fs/proc/net.c, + * which is Copyright (C) 1991, 1992 Linus Torvalds. + * Much has been rewritten, but some of the code still remains. + * + * /proc/scsi directory handling functions + * + * last change: 95/07/04 + * + * Initial version: March '95 + * 95/05/15 Added subdirectories for each driver and show every + * registered HBA as a single file. + * 95/05/30 Added rudimentary write support for parameter passing + * 95/07/04 Fixed bugs in directory handling + * 95/09/13 Update to support the new proc-dir tree + * + * TODO: Improve support to write to the driver files + * Add some more comments + */ + +/* + * linux/fs/buffer.c + * + * Copyright (C) 1991, 1992 Linus Torvalds + */ + +#include <sys/types.h> +#include <mach/vm_param.h> +#include <kern/thread.h> +#include <kern/printf.h> +#include <kern/mach_host.server.h> +#include <vm/vm_map.h> +#include <vm/vm_page.h> +#include <device/device_types.h> + +#define MACH_INCLUDE +#include <linux/types.h> +#include <linux/config.h> +#include <linux/errno.h> +#include <linux/mm.h> +#include <linux/fs.h> +#include <linux/blk.h> +#include <linux/proc_fs.h> +#include <linux/kernel_stat.h> +#include <linux/dev/glue/glue.h> + +int (*dispatch_scsi_info_ptr) (int ino, char *buffer, char **start, + off_t offset, int length, int inout) = 0; + +struct kernel_stat kstat; + +int +linux_to_mach_error (int err) +{ + switch (err) + { + case 0: + return D_SUCCESS; + + case -EPERM: + return D_INVALID_OPERATION; + + case -EIO: + return D_IO_ERROR; + + case -ENXIO: + return D_NO_SUCH_DEVICE; + + case -EACCES: + return D_INVALID_OPERATION; + + case -EFAULT: + return D_INVALID_SIZE; + + case -EBUSY: + return D_ALREADY_OPEN; + + case -EINVAL: + return D_INVALID_SIZE; + + case -EROFS: + return D_READ_ONLY; + + case -EWOULDBLOCK: + return D_WOULD_BLOCK; + + case -ENOMEM: + return D_NO_MEMORY; + + default: + printf ("linux_to_mach_error: unknown code %d\n", err); + return D_IO_ERROR; + } +} + +int +issig () +{ + if (!current_thread()) + return 0; + return current_thread ()->wait_result != THREAD_AWAKENED; +} + +int +block_fsync (struct inode *inode, struct file *filp) +{ + return 0; +} + +int +verify_area (int rw, const void *p, unsigned long size) +{ + vm_prot_t prot = (rw == VERIFY_WRITE) ? VM_PROT_WRITE : VM_PROT_READ; + vm_offset_t addr = trunc_page ((vm_offset_t) p); + vm_size_t len = round_page ((vm_size_t) size); + vm_map_entry_t entry; + + vm_map_lock_read (current_map ()); + + while (1) + { + if (!vm_map_lookup_entry (current_map (), addr, &entry) + || (entry->protection & prot) != prot) + { + vm_map_unlock_read (current_map ()); + return -EFAULT; + } + if (entry->vme_end - entry->vme_start >= len) + break; + len -= entry->vme_end - entry->vme_start; + addr += entry->vme_end - entry->vme_start; + } + + vm_map_unlock_read (current_map ()); + return 0; +} + +/* + * Print device name (in decimal, hexadecimal or symbolic) - + * at present hexadecimal only. + * Note: returns pointer to static data! + */ +char * +kdevname (kdev_t dev) +{ + static char buffer[32]; + linux_sprintf (buffer, "%02x:%02x", MAJOR (dev), MINOR (dev)); + return buffer; +} + +/* RO fail safe mechanism */ + +static long ro_bits[MAX_BLKDEV][8]; + +int +is_read_only (kdev_t dev) +{ + int minor, major; + + major = MAJOR (dev); + minor = MINOR (dev); + if (major < 0 || major >= MAX_BLKDEV) + return 0; + return ro_bits[major][minor >> 5] & (1 << (minor & 31)); +} + +void +set_device_ro (kdev_t dev, int flag) +{ + int minor, major; + + major = MAJOR (dev); + minor = MINOR (dev); + if (major < 0 || major >= MAX_BLKDEV) + return; + if (flag) + ro_bits[major][minor >> 5] |= 1 << (minor & 31); + else + ro_bits[major][minor >> 5] &= ~(1 << (minor & 31)); +} + +struct proc_dir_entry proc_scsi; +struct inode_operations proc_scsi_inode_operations; +struct proc_dir_entry proc_net; +struct inode_operations proc_net_inode_operations; + +int +proc_register (struct proc_dir_entry *xxx1, struct proc_dir_entry *xxx2) +{ + return 0; +} + +int +proc_unregister (struct proc_dir_entry *xxx1, int xxx2) +{ + return 0; +} + +void +add_blkdev_randomness (int major) +{ +} + +void +do_gettimeofday (struct timeval *tv) +{ + /* + * XXX: The first argument should be mach_host_self (), but that's too + * expensive, and the host argument is not used by host_get_time (), + * only checked not to be HOST_NULL. + */ + time_value64_t tv64; + host_get_time64 ((host_t) 1, &tv64); + tv->tv_sec = tv64.seconds; + tv->tv_usec = tv64.nanoseconds / 1000; +} + +int +dev_get_info (char *buffer, char **start, off_t offset, int length, int dummy) +{ + return 0; +} diff --git a/linux/dev/glue/net.c b/linux/dev/glue/net.c new file mode 100644 index 0000000..dd80622 --- /dev/null +++ b/linux/dev/glue/net.c @@ -0,0 +1,670 @@ +/* + * Linux network driver support. + * + * Copyright (C) 1996 The University of Utah and the Computer Systems + * Laboratory at the University of Utah (CSL) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Author: Shantanu Goel, University of Utah CSL + */ + +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Ethernet-type device handling. + * + * Version: @(#)eth.c 1.0.7 05/25/93 + * + * Authors: Ross Biro, <bir7@leland.Stanford.Edu> + * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> + * Mark Evans, <evansmp@uhura.aston.ac.uk> + * Florian La Roche, <rzsfl@rz.uni-sb.de> + * Alan Cox, <gw4pts@gw4pts.ampr.org> + * + * Fixes: + * Mr Linux : Arp problems + * Alan Cox : Generic queue tidyup (very tiny here) + * Alan Cox : eth_header ntohs should be htons + * Alan Cox : eth_rebuild_header missing an htons and + * minor other things. + * Tegge : Arp bug fixes. + * Florian : Removed many unnecessary functions, code cleanup + * and changes for new arp and skbuff. + * Alan Cox : Redid header building to reflect new format. + * Alan Cox : ARP only when compiled with CONFIG_INET + * Greg Page : 802.2 and SNAP stuff. + * Alan Cox : MAC layer pointers/new format. + * Paul Gortmaker : eth_copy_and_sum shouldn't csum padding. + * Alan Cox : Protect against forwarding explosions with + * older network drivers and IFF_ALLMULTI + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <sys/types.h> +#include <machine/spl.h> +#include <machine/vm_param.h> + +#include <mach/mach_types.h> +#include <mach/kern_return.h> +#include <mach/mig_errors.h> +#include <mach/port.h> +#include <mach/vm_param.h> +#include <mach/notify.h> + +#include <kern/kalloc.h> +#include <kern/printf.h> + +#include <ipc/ipc_port.h> +#include <ipc/ipc_space.h> + +#include <vm/vm_map.h> +#include <vm/vm_kern.h> +#include <vm/vm_page.h> + +#include <device/device_types.h> +#include <device/device_port.h> +#include <device/if_hdr.h> +#include <device/if_ether.h> +#include <device/if_hdr.h> +#include <device/net_io.h> +#include <device/device_reply.user.h> +#include <device/device_emul.h> +#include <device/ds_routines.h> + +#define MACH_INCLUDE +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/string.h> +#include <linux/errno.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/malloc.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/wireless.h> + +#include <linux/dev/glue/glue.h> + +/* One of these is associated with each instance of a device. */ +struct net_data +{ + ipc_port_t port; /* device port */ + struct ifnet ifnet; /* Mach ifnet structure (needed for filters) */ + struct device device; /* generic device structure */ + struct linux_device *dev; /* Linux network device structure */ +}; + +/* List of sk_buffs waiting to be freed. */ +static struct sk_buff_head skb_done_list; + +/* Forward declarations. */ + +extern struct device_emulation_ops linux_net_emulation_ops; + +static int print_packet_size = 0; + +/* Linux kernel network support routines. */ + +/* Requeue packet SKB for transmission after the interface DEV + has timed out. The priority of the packet is PRI. + In Mach, we simply drop the packet like the native drivers. */ +void +dev_queue_xmit (struct sk_buff *skb, struct linux_device *dev, int pri) +{ + dev_kfree_skb (skb, FREE_WRITE); +} + +/* Close the device DEV. */ +int +dev_close (struct linux_device *dev) +{ + return 0; +} + +/* Network software interrupt handler. */ +void +net_bh (void) +{ + int len; + struct sk_buff *skb; + struct linux_device *dev; + + /* Start transmission on interfaces. */ + for (dev = dev_base; dev; dev = dev->next) + { + if (dev->base_addr && dev->base_addr != 0xffe0) + while (1) + { + skb = skb_dequeue (&dev->buffs[0]); + if (skb) + { + len = skb->len; + if ((*dev->hard_start_xmit) (skb, dev)) + { + skb_queue_head (&dev->buffs[0], skb); + mark_bh (NET_BH); + break; + } + else if (print_packet_size) + printf ("net_bh: length %d\n", len); + } + else + break; + } + } +} + +/* Free all sk_buffs on the done list. + This routine is called by the iodone thread in ds_routines.c. */ +void +free_skbuffs () +{ + struct sk_buff *skb; + + while (1) + { + skb = skb_dequeue (&skb_done_list); + if (skb) + { + if (skb->copy) + { + vm_map_copy_discard (skb->copy); + skb->copy = NULL; + } + if (IP_VALID (skb->reply)) + { + ds_device_write_reply (skb->reply, skb->reply_type, 0, skb->len); + skb->reply = IP_NULL; + } + dev_kfree_skb (skb, FREE_WRITE); + } + else + break; + } +} + +/* Allocate an sk_buff with SIZE bytes of data space. */ +struct sk_buff * +alloc_skb (unsigned int size, int priority) +{ + return dev_alloc_skb (size); +} + +/* Free SKB. */ +void +kfree_skb (struct sk_buff *skb, int priority) +{ + dev_kfree_skb (skb, priority); +} + +/* Allocate an sk_buff with SIZE bytes of data space. */ +struct sk_buff * +dev_alloc_skb (unsigned int size) +{ + struct sk_buff *skb; + unsigned char *bptr; + int len = size; + + size = (size + 15) & ~15; + size += sizeof (struct sk_buff); + + bptr = linux_kmalloc (size, GFP_KERNEL); + if (bptr == NULL) + return NULL; + + /* XXX: In Mach, a sk_buff is located at the head, + while it's located at the tail in Linux. */ + skb = bptr; + skb->dev = NULL; + skb->reply = IP_NULL; + skb->copy = NULL; + skb->len = 0; + skb->prev = skb->next = NULL; + skb->list = NULL; + skb->data = bptr + sizeof (struct sk_buff); + skb->tail = skb->data; + skb->head = skb->data; + skb->end = skb->data + len; + + return skb; +} + +/* Free the sk_buff SKB. */ +void +dev_kfree_skb (struct sk_buff *skb, int mode) +{ + unsigned flags; + + /* Queue sk_buff on done list if there is a + page list attached or we need to send a reply. + Wakeup the iodone thread to process the list. */ + if (skb->copy || IP_VALID (skb->reply)) + { + skb_queue_tail (&skb_done_list, skb); + save_flags (flags); + thread_wakeup ((event_t) & io_done_list); + restore_flags (flags); + return; + } + linux_kfree (skb); +} + +/* Accept packet SKB received on an interface. */ +void +netif_rx (struct sk_buff *skb) +{ + ipc_kmsg_t kmsg; + struct ether_header *eh; + struct packet_header *ph; + struct linux_device *dev = skb->dev; + + assert (skb != NULL); + + if (print_packet_size) + printf ("netif_rx: length %ld\n", skb->len); + + /* Allocate a kernel message buffer. */ + kmsg = net_kmsg_get (); + if (!kmsg) + { + dev_kfree_skb (skb, FREE_READ); + return; + } + + /* Copy packet into message buffer. */ + eh = (struct ether_header *) (net_kmsg (kmsg)->header); + ph = (struct packet_header *) (net_kmsg (kmsg)->packet); + memcpy (eh, skb->data, sizeof (struct ether_header)); + + /* packet is prefixed with a struct packet_header, + see include/device/net_status.h. */ + memcpy (ph + 1, skb->data + sizeof (struct ether_header), + skb->len - sizeof (struct ether_header)); + ph->type = eh->ether_type; + ph->length = (skb->len - sizeof (struct ether_header) + + sizeof (struct packet_header)); + + dev_kfree_skb (skb, FREE_READ); + + net_kmsg(kmsg)->sent = FALSE; /* Mark packet as received. */ + + /* Pass packet up to the microkernel. */ + net_packet (&dev->net_data->ifnet, kmsg, + ph->length, ethernet_priority (kmsg)); +} + +/* Mach device interface routines. */ + +/* Return a send right associated with network device ND. */ +static ipc_port_t +dev_to_port (void *nd) +{ + return (nd + ? ipc_port_make_send (((struct net_data *) nd)->port) + : IP_NULL); +} + +static io_return_t +device_open (ipc_port_t reply_port, mach_msg_type_name_t reply_port_type, + dev_mode_t mode, char *name, device_t *devp) +{ + io_return_t err = D_SUCCESS; + ipc_port_t notify; + struct ifnet *ifp; + struct linux_device *dev; + struct net_data *nd; + + /* Search for the device. */ + for (dev = dev_base; dev; dev = dev->next) + if (dev->base_addr + && dev->base_addr != 0xffe0 + && !strcmp (name, dev->name)) + break; + if (!dev) + return D_NO_SUCH_DEVICE; + + /* Allocate and initialize device data if this is the first open. */ + nd = dev->net_data; + if (!nd) + { + dev->net_data = nd = ((struct net_data *) + kalloc (sizeof (struct net_data))); + if (!nd) + { + err = D_NO_MEMORY; + goto out; + } + nd->dev = dev; + nd->device.emul_data = nd; + nd->device.emul_ops = &linux_net_emulation_ops; + nd->port = ipc_port_alloc_kernel (); + if (nd->port == IP_NULL) + { + err = KERN_RESOURCE_SHORTAGE; + goto out; + } + ipc_kobject_set (nd->port, (ipc_kobject_t) & nd->device, IKOT_DEVICE); + notify = ipc_port_make_sonce (nd->port); + ip_lock (nd->port); + ipc_port_nsrequest (nd->port, 1, notify, ¬ify); + assert (notify == IP_NULL); + + ifp = &nd->ifnet; + ifp->if_unit = dev->name[strlen (dev->name) - 1] - '0'; + ifp->if_flags = IFF_UP | IFF_RUNNING; + ifp->if_mtu = dev->mtu; + ifp->if_header_size = dev->hard_header_len; + ifp->if_header_format = dev->type; + ifp->if_address_size = dev->addr_len; + ifp->if_address = dev->dev_addr; + if_init_queues (ifp); + + if (dev->open) + { + if ((*dev->open) (dev)) + err = D_NO_SUCH_DEVICE; + } + + out: + if (err) + { + if (nd) + { + if (nd->port != IP_NULL) + { + ipc_kobject_set (nd->port, IKO_NULL, IKOT_NONE); + ipc_port_dealloc_kernel (nd->port); + } + kfree ((vm_offset_t) nd, sizeof (struct net_data)); + nd = NULL; + dev->net_data = NULL; + } + } + else + { + /* IPv6 heavily relies on multicasting (especially router and + neighbor solicits and advertisements), so enable reception of + those multicast packets by setting `LINUX_IFF_ALLMULTI'. */ + dev->flags |= LINUX_IFF_UP | LINUX_IFF_RUNNING | LINUX_IFF_ALLMULTI; + skb_queue_head_init (&dev->buffs[0]); + + if (dev->set_multicast_list) + dev->set_multicast_list (dev); + } + if (IP_VALID (reply_port)) + ds_device_open_reply (reply_port, reply_port_type, + err, dev_to_port (nd)); + return MIG_NO_REPLY; + } + + *devp = &nd->device; + return D_SUCCESS; +} + +static io_return_t +device_write (void *d, ipc_port_t reply_port, + mach_msg_type_name_t reply_port_type, dev_mode_t mode, + recnum_t bn, io_buf_ptr_t data, unsigned int count, + int *bytes_written) +{ + int s; + vm_map_copy_t copy = (vm_map_copy_t) data; + char *map_data; + vm_offset_t map_addr; + vm_size_t map_size; + struct net_data *nd = d; + struct linux_device *dev = nd->dev; + struct sk_buff *skb; + kern_return_t kr; + + if (count == 0 || count > dev->mtu + dev->hard_header_len) + return D_INVALID_SIZE; + + /* Allocate a sk_buff. */ + skb = dev_alloc_skb (count); + if (!skb) + return D_NO_MEMORY; + + /* Map user data. */ + kr = kmem_io_map_copyout(device_io_map, (vm_offset_t *)&map_data, + &map_addr, &map_size, copy, count); + + if (kr) { + dev_kfree_skb (skb, FREE_WRITE); + return D_NO_MEMORY; + } + + /* XXX The underlying physical pages of the mapping could be highmem, + for which drivers require the use of a bounce buffer. */ + memcpy (skb->data, map_data, count); + kmem_io_map_deallocate (device_io_map, map_addr, map_size); + vm_map_copy_discard (copy); + + skb->len = count; + skb->head = skb->data; + skb->tail = skb->data + skb->len; + skb->end = skb->tail; + skb->dev = dev; + skb->reply = reply_port; + skb->reply_type = reply_port_type; + + /* Queue packet for transmission and schedule a software interrupt. */ + s = splimp (); + if (dev->buffs[0].next != (struct sk_buff *) &dev->buffs[0] + || (*dev->hard_start_xmit) (skb, dev)) + { + __skb_queue_tail (&dev->buffs[0], skb); + mark_bh (NET_BH); + } + splx (s); + + /* Send packet to filters. */ + { + struct packet_header *packet; + struct ether_header *header; + ipc_kmsg_t kmsg; + + kmsg = net_kmsg_get (); + + if (kmsg != IKM_NULL) + { + /* Suitable for Ethernet only. */ + header = (struct ether_header *) (net_kmsg (kmsg)->header); + packet = (struct packet_header *) (net_kmsg (kmsg)->packet); + memcpy (header, skb->data, sizeof (struct ether_header)); + + /* packet is prefixed with a struct packet_header, + see include/device/net_status.h. */ + memcpy (packet + 1, skb->data + sizeof (struct ether_header), + skb->len - sizeof (struct ether_header)); + packet->length = skb->len - sizeof (struct ether_header) + + sizeof (struct packet_header); + packet->type = header->ether_type; + net_kmsg (kmsg)->sent = TRUE; /* Mark packet as sent. */ + s = splimp (); + net_packet (&dev->net_data->ifnet, kmsg, packet->length, + ethernet_priority (kmsg)); + splx (s); + } + } + + return MIG_NO_REPLY; +} + + +static io_return_t +device_get_status (void *d, dev_flavor_t flavor, dev_status_t status, + mach_msg_type_number_t *count) +{ + if (flavor == NET_FLAGS) + { + struct net_data *net = (struct net_data *) d; + + if (*count != 1) + return D_INVALID_SIZE; + + status[0] = net->dev->flags; + return D_SUCCESS; + } + + if(flavor >= SIOCIWFIRST && flavor <= SIOCIWLAST) + { + /* handle wireless ioctl */ + if(! IW_IS_GET(flavor)) + return D_INVALID_OPERATION; + + if(*count * sizeof(int) < sizeof(struct ifreq)) + return D_INVALID_OPERATION; + + struct net_data *nd = d; + struct linux_device *dev = nd->dev; + + if(! dev->do_ioctl) + return D_INVALID_OPERATION; + + int result; + + if (flavor == SIOCGIWRANGE || flavor == SIOCGIWENCODE + || flavor == SIOCGIWESSID || flavor == SIOCGIWNICKN + || flavor == SIOCGIWSPY) + { + /* + * These ioctls require an `iw_point' as their argument (i.e. + * they want to return some data to userspace. + * Therefore supply some sane values and carry the data back + * to userspace right behind the `struct iwreq'. + */ + struct iw_point *iwp = &((struct iwreq *) status)->u.data; + iwp->length = *count * sizeof (dev_status_t) - sizeof (struct ifreq); + iwp->pointer = (void *) status + sizeof (struct ifreq); + + result = dev->do_ioctl (dev, (struct ifreq *) status, flavor); + + *count = ((sizeof (struct ifreq) + iwp->length) + / sizeof (dev_status_t)); + if (iwp->length % sizeof (dev_status_t)) + (*count) ++; + } + else + { + *count = sizeof(struct ifreq) / sizeof(int); + result = dev->do_ioctl(dev, (struct ifreq *) status, flavor); + } + + return result ? D_IO_ERROR : D_SUCCESS; + } + else + { + /* common get_status request */ + return net_getstat (&((struct net_data *) d)->ifnet, flavor, + status, count); + } +} + + +static io_return_t +device_set_status(void *d, dev_flavor_t flavor, dev_status_t status, + mach_msg_type_number_t count) +{ + if (flavor == NET_FLAGS) + { + if (count != 1) + return D_INVALID_SIZE; + + short flags = status[0]; + struct net_data *net = (struct net_data *) d; + + dev_change_flags (net->dev, flags); + + /* Change the flags of the Mach device, too. */ + net->ifnet.if_flags = net->dev->flags; + return D_SUCCESS; + } + + if(flavor < SIOCIWFIRST || flavor > SIOCIWLAST) + return D_INVALID_OPERATION; + + if(! IW_IS_SET(flavor)) + return D_INVALID_OPERATION; + + if(count * sizeof(int) < sizeof(struct ifreq)) + return D_INVALID_OPERATION; + + struct net_data *nd = d; + struct linux_device *dev = nd->dev; + + if(! dev->do_ioctl) + return D_INVALID_OPERATION; + + if((flavor == SIOCSIWENCODE || flavor == SIOCSIWESSID + || flavor == SIOCSIWNICKN || flavor == SIOCSIWSPY) + && ((struct iwreq *) status)->u.data.pointer) + { + struct iw_point *iwp = &((struct iwreq *) status)->u.data; + + /* safety check whether the status array is long enough ... */ + if(count * sizeof(int) < sizeof(struct ifreq) + iwp->length) + return D_INVALID_OPERATION; + + /* make sure, iwp->pointer points to the correct address */ + if(iwp->pointer) iwp->pointer = (void *) status + sizeof(struct ifreq); + } + + int result = dev->do_ioctl(dev, (struct ifreq *) status, flavor); + return result ? D_IO_ERROR : D_SUCCESS; +} + + +static io_return_t +device_set_filter (void *d, ipc_port_t port, int priority, + filter_t * filter, unsigned filter_count) +{ + return net_set_filter (&((struct net_data *) d)->ifnet, + port, priority, filter, filter_count); +} + +struct device_emulation_ops linux_net_emulation_ops = +{ + NULL, + NULL, + dev_to_port, + device_open, + NULL, + device_write, + NULL, + NULL, + NULL, + device_set_status, + device_get_status, + device_set_filter, + NULL, + NULL, + NULL, + NULL +}; + +/* Do any initialization required for network devices. */ +void +linux_net_emulation_init () +{ + skb_queue_head_init (&skb_done_list); +} diff --git a/linux/dev/include/ahci.h b/linux/dev/include/ahci.h new file mode 100644 index 0000000..31977b6 --- /dev/null +++ b/linux/dev/include/ahci.h @@ -0,0 +1,268 @@ +#ifndef _GNUMACH_AHCI_H +#define _GNUMACH_AHCI_H +extern void ahci_probe_pci(void); + +/* From linux 3.9's drivers/ata/ahci.h */ + +/* + * ahci.h - Common AHCI SATA definitions and declarations + * + * Maintained by: Jeff Garzik <jgarzik@pobox.com> + * Please ALWAYS copy linux-ide@vger.kernel.org + * on emails. + * + * Copyright 2004-2005 Red Hat, Inc. + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + * + * + * libata documentation is available via 'make {ps|pdf}docs', + * as Documentation/DocBook/libata.* + * + * AHCI hardware documentation: + * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf + * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf + * + */ + +enum { + AHCI_MAX_PORTS = 32, + AHCI_MAX_SG = 168, /* hardware max is 64K */ + AHCI_DMA_BOUNDARY = 0xffffffff, + AHCI_MAX_CMDS = 32, + AHCI_CMD_SZ = 32, + AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ, + AHCI_RX_FIS_SZ = 256, + AHCI_CMD_TBL_CDB = 0x40, + AHCI_CMD_TBL_HDR_SZ = 0x80, + AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16), + AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS, + AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ + + AHCI_RX_FIS_SZ, + AHCI_PORT_PRIV_FBS_DMA_SZ = AHCI_CMD_SLOT_SZ + + AHCI_CMD_TBL_AR_SZ + + (AHCI_RX_FIS_SZ * 16), + AHCI_IRQ_ON_SG = (1 << 31), + AHCI_CMD_ATAPI = (1 << 5), + AHCI_CMD_WRITE = (1 << 6), + AHCI_CMD_PREFETCH = (1 << 7), + AHCI_CMD_RESET = (1 << 8), + AHCI_CMD_CLR_BUSY = (1 << 10), + + RX_FIS_PIO_SETUP = 0x20, /* offset of PIO Setup FIS data */ + RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */ + RX_FIS_SDB = 0x58, /* offset of SDB FIS data */ + RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */ + + /* global controller registers */ + HOST_CAP = 0x00, /* host capabilities */ + HOST_CTL = 0x04, /* global host control */ + HOST_IRQ_STAT = 0x08, /* interrupt status */ + HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */ + HOST_VERSION = 0x10, /* AHCI spec. version compliancy */ + HOST_EM_LOC = 0x1c, /* Enclosure Management location */ + HOST_EM_CTL = 0x20, /* Enclosure Management Control */ + HOST_CAP2 = 0x24, /* host capabilities, extended */ + + /* HOST_CTL bits */ + HOST_RESET = (1 << 0), /* reset controller; self-clear */ + HOST_IRQ_EN = (1 << 1), /* global IRQ enable */ + HOST_AHCI_EN = (1 << 31), /* AHCI enabled */ + + /* HOST_CAP bits */ + HOST_CAP_SXS = (1 << 5), /* Supports External SATA */ + HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */ + HOST_CAP_CCC = (1 << 7), /* Command Completion Coalescing */ + HOST_CAP_PART = (1 << 13), /* Partial state capable */ + HOST_CAP_SSC = (1 << 14), /* Slumber state capable */ + HOST_CAP_PIO_MULTI = (1 << 15), /* PIO multiple DRQ support */ + HOST_CAP_FBS = (1 << 16), /* FIS-based switching support */ + HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */ + HOST_CAP_ONLY = (1 << 18), /* Supports AHCI mode only */ + HOST_CAP_CLO = (1 << 24), /* Command List Override support */ + HOST_CAP_LED = (1 << 25), /* Supports activity LED */ + HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */ + HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */ + HOST_CAP_MPS = (1 << 28), /* Mechanical presence switch */ + HOST_CAP_SNTF = (1 << 29), /* SNotification register */ + HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */ + HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */ + + /* HOST_CAP2 bits */ + HOST_CAP2_BOH = (1 << 0), /* BIOS/OS handoff supported */ + HOST_CAP2_NVMHCI = (1 << 1), /* NVMHCI supported */ + HOST_CAP2_APST = (1 << 2), /* Automatic partial to slumber */ + HOST_CAP2_SDS = (1 << 3), /* Support device sleep */ + HOST_CAP2_SADM = (1 << 4), /* Support aggressive DevSlp */ + HOST_CAP2_DESO = (1 << 5), /* DevSlp from slumber only */ + + /* registers for each SATA port */ + PORT_LST_ADDR = 0x00, /* command list DMA addr */ + PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */ + PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */ + PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */ + PORT_IRQ_STAT = 0x10, /* interrupt status */ + PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */ + PORT_CMD = 0x18, /* port command */ + PORT_TFDATA = 0x20, /* taskfile data */ + PORT_SIG = 0x24, /* device TF signature */ + PORT_CMD_ISSUE = 0x38, /* command issue */ + PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */ + PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */ + PORT_SCR_ERR = 0x30, /* SATA phy register: SError */ + PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */ + PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */ + PORT_FBS = 0x40, /* FIS-based Switching */ + PORT_DEVSLP = 0x44, /* device sleep */ + + /* PORT_IRQ_{STAT,MASK} bits */ + PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */ + PORT_IRQ_TF_ERR = (1 << 30), /* task file error */ + PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */ + PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */ + PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */ + PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */ + PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */ + PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */ + + PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */ + PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */ + PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */ + PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */ + PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */ + PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */ + PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */ + PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */ + PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */ + + PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR | + PORT_IRQ_IF_ERR | + PORT_IRQ_CONNECT | + PORT_IRQ_PHYRDY | + PORT_IRQ_UNK_FIS | + PORT_IRQ_BAD_PMP, + PORT_IRQ_ERROR = PORT_IRQ_FREEZE | + PORT_IRQ_TF_ERR | + PORT_IRQ_HBUS_DATA_ERR, + DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE | + PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS | + PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS, + + /* PORT_CMD bits */ + PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */ + PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */ + PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */ + PORT_CMD_FBSCP = (1 << 22), /* FBS Capable Port */ + PORT_CMD_PMP = (1 << 17), /* PMP attached */ + PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */ + PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */ + PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */ + PORT_CMD_CLO = (1 << 3), /* Command list override */ + PORT_CMD_POWER_ON = (1 << 2), /* Power up device */ + PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */ + PORT_CMD_START = (1 << 0), /* Enable port DMA engine */ + + PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */ + PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */ + PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */ + PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */ + + /* PORT_FBS bits */ + PORT_FBS_DWE_OFFSET = 16, /* FBS device with error offset */ + PORT_FBS_ADO_OFFSET = 12, /* FBS active dev optimization offset */ + PORT_FBS_DEV_OFFSET = 8, /* FBS device to issue offset */ + PORT_FBS_DEV_MASK = (0xf << PORT_FBS_DEV_OFFSET), /* FBS.DEV */ + PORT_FBS_SDE = (1 << 2), /* FBS single device error */ + PORT_FBS_DEC = (1 << 1), /* FBS device error clear */ + PORT_FBS_EN = (1 << 0), /* Enable FBS */ + + /* PORT_DEVSLP bits */ + PORT_DEVSLP_DM_OFFSET = 25, /* DITO multiplier offset */ + PORT_DEVSLP_DM_MASK = (0xf << 25), /* DITO multiplier mask */ + PORT_DEVSLP_DITO_OFFSET = 15, /* DITO offset */ + PORT_DEVSLP_MDAT_OFFSET = 10, /* Minimum assertion time */ + PORT_DEVSLP_DETO_OFFSET = 2, /* DevSlp exit timeout */ + PORT_DEVSLP_DSP = (1 << 1), /* DevSlp present */ + PORT_DEVSLP_ADSE = (1 << 0), /* Aggressive DevSlp enable */ + + /* hpriv->flags bits */ + +#define AHCI_HFLAGS(flags) .private_data = (void *)(flags) + + AHCI_HFLAG_NO_NCQ = (1 << 0), + AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */ + AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */ + AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */ + AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */ + AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */ + AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */ + AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */ + AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */ + AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */ + AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as + link offline */ + AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */ + AHCI_HFLAG_NO_FPDMA_AA = (1 << 13), /* no FPDMA AA */ + AHCI_HFLAG_YES_FBS = (1 << 14), /* force FBS cap on */ + AHCI_HFLAG_DELAY_ENGINE = (1 << 15), /* do not start engine on + port start (wait until + error-handling stage) */ + AHCI_HFLAG_MULTI_MSI = (1 << 16), /* multiple PCI MSIs */ + + /* ap->flags bits */ + + /* + AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | + ATA_FLAG_ACPI_SATA | ATA_FLAG_AN, + */ + + ICH_MAP = 0x90, /* ICH MAP register */ + + /* em constants */ + EM_MAX_SLOTS = 8, + EM_MAX_RETRY = 5, + + /* em_ctl bits */ + EM_CTL_RST = (1 << 9), /* Reset */ + EM_CTL_TM = (1 << 8), /* Transmit Message */ + EM_CTL_MR = (1 << 0), /* Message Received */ + EM_CTL_ALHD = (1 << 26), /* Activity LED */ + EM_CTL_XMT = (1 << 25), /* Transmit Only */ + EM_CTL_SMB = (1 << 24), /* Single Message Buffer */ + EM_CTL_SGPIO = (1 << 19), /* SGPIO messages supported */ + EM_CTL_SES = (1 << 18), /* SES-2 messages supported */ + EM_CTL_SAFTE = (1 << 17), /* SAF-TE messages supported */ + EM_CTL_LED = (1 << 16), /* LED messages supported */ + + /* em message type */ + EM_MSG_TYPE_LED = (1 << 0), /* LED */ + EM_MSG_TYPE_SAFTE = (1 << 1), /* SAF-TE */ + EM_MSG_TYPE_SES2 = (1 << 2), /* SES-2 */ + EM_MSG_TYPE_SGPIO = (1 << 3), /* SGPIO */ + + FIS_TYPE_REG_H2D = 0x27, + FIS_TYPE_REG_D2H = 0x34, + FIS_TYPE_DMA_ACT = 0x39, + FIS_TYPE_DMA_SETUP = 0x41, + FIS_TYPE_DATA = 0x46, + FIS_TYPE_BIST = 0x58, + FIS_TYPE_PIO_SETUP = 0x5F, + FIS_TYPE_DEV_BITS = 0xA1, +}; + +/* End from linux 3.9 */ + +#endif /* _GNUMACH_AHCI_H */ diff --git a/linux/dev/include/asm-i386/page.h b/linux/dev/include/asm-i386/page.h new file mode 100644 index 0000000..be81848 --- /dev/null +++ b/linux/dev/include/asm-i386/page.h @@ -0,0 +1,59 @@ +#ifndef _I386_PAGE_H +#define _I386_PAGE_H + +#include <mach/vm_param.h> + +#ifdef __KERNEL__ + +#define STRICT_MM_TYPECHECKS + +#ifdef STRICT_MM_TYPECHECKS +/* + * These are used to make use of C type-checking.. + */ +typedef struct { unsigned long pte; } pte_t; +typedef struct { unsigned long pmd; } pmd_t; +typedef struct { unsigned long pgd; } pgd_t; +typedef struct { unsigned long pgprot; } pgprot_t; + +#define pte_val(x) ((x).pte) +#define pmd_val(x) ((x).pmd) +#define pgd_val(x) ((x).pgd) +#define pgprot_val(x) ((x).pgprot) + +#define __pte(x) ((pte_t) { (x) } ) +#define __pmd(x) ((pmd_t) { (x) } ) +#define __pgd(x) ((pgd_t) { (x) } ) +#define __pgprot(x) ((pgprot_t) { (x) } ) + +#else +/* + * .. while these make it easier on the compiler + */ +typedef unsigned long pte_t; +typedef unsigned long pmd_t; +typedef unsigned long pgd_t; +typedef unsigned long pgprot_t; + +#define pte_val(x) (x) +#define pmd_val(x) (x) +#define pgd_val(x) (x) +#define pgprot_val(x) (x) + +#define __pte(x) (x) +#define __pmd(x) (x) +#define __pgd(x) (x) +#define __pgprot(x) (x) + +#endif + +/* to align the pointer to the (next) page boundary */ +#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) + +/* This handles the memory map.. */ +#define PAGE_OFFSET 0 +#define MAP_NR(addr) (((unsigned long)(addr)) >> PAGE_SHIFT) + +#endif /* __KERNEL__ */ + +#endif /* _I386_PAGE_H */ diff --git a/linux/dev/include/asm-i386/smp.h b/linux/dev/include/asm-i386/smp.h new file mode 100644 index 0000000..fabe01d --- /dev/null +++ b/linux/dev/include/asm-i386/smp.h @@ -0,0 +1,8 @@ +#ifndef _I386_SMP_H +#define _I386_SMP_H + +#include <machine/cpu_number.h> + +#define smp_processor_id() cpu_number() + +#endif /* _I386_SMP_H */ diff --git a/linux/dev/include/asm-i386/string.h b/linux/dev/include/asm-i386/string.h new file mode 100644 index 0000000..f41ca5c --- /dev/null +++ b/linux/dev/include/asm-i386/string.h @@ -0,0 +1,487 @@ +#ifndef _I386_STRING_H_ +#define _I386_STRING_H_ + +/* + * On a 486 or Pentium, we are better off not using the + * byte string operations. But on a 386 or a PPro the + * byte string ops are faster than doing it by hand + * (MUCH faster on a Pentium). + * + * Also, the byte strings actually work correctly. Forget + * the i486 routines for now as they may be broken.. + */ +#if FIXED_486_STRING && (CPU == 486 || CPU == 586) +#include <asm/string-486.h> +#else + +/* + * This string-include defines all string functions as inline + * functions. Use gcc. It also assumes ds=es=data space, this should be + * normal. Most of the string-functions are rather heavily hand-optimized, + * see especially strtok,strstr,str[c]spn. They should work, but are not + * very easy to understand. Everything is done entirely within the register + * set, making the functions fast and clean. String instructions have been + * used through-out, making for "slightly" unclear code :-) + * + * NO Copyright (C) 1991, 1992 Linus Torvalds, + * consider these trivial functions to be PD. + */ + +#define __HAVE_ARCH_STRCPY +static inline char * strcpy(char * dest,const char *src) +{ +int d0, d1, d2; +__asm__ __volatile__( + "cld\n" + "1:\tlodsb\n\t" + "stosb\n\t" + "testb %%al,%%al\n\t" + "jne 1b" + : "=&S" (d0), "=&D" (d1), "=&a" (d2) + :"0" (src),"1" (dest) : "memory"); +return dest; +} + +#define __HAVE_ARCH_STRNCPY +static inline char * strncpy(char * dest,const char *src,size_t count) +{ +int d0, d1, d2, d3; +__asm__ __volatile__( + "cld\n" + "1:\tdecl %2\n\t" + "js 2f\n\t" + "lodsb\n\t" + "stosb\n\t" + "testb %%al,%%al\n\t" + "jne 1b\n\t" + "rep\n\t" + "stosb\n" + "2:" + : "=&S" (d0), "=&D" (d1), "=&c" (d2), "=&a" (d3) + :"0" (src),"1" (dest),"2" (count) : "memory"); +return dest; +} + +#define __HAVE_ARCH_STRCAT +static inline char * strcat(char * dest,const char * src) +{ +int d0, d1, d2, d3; +__asm__ __volatile__( + "cld\n\t" + "repne\n\t" + "scasb\n\t" + "decl %1\n" + "1:\tlodsb\n\t" + "stosb\n\t" + "testb %%al,%%al\n\t" + "jne 1b" + : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3) + : "0" (src), "1" (dest), "2" (0), "3" (0xffffffff):"memory"); +return dest; +} + +#define __HAVE_ARCH_STRNCAT +static inline char * strncat(char * dest,const char * src,size_t count) +{ +int d0, d1, d2, d3; +__asm__ __volatile__( + "cld\n\t" + "repne\n\t" + "scasb\n\t" + "decl %1\n\t" + "movl %8,%3\n" + "1:\tdecl %3\n\t" + "js 2f\n\t" + "lodsb\n\t" + "stosb\n\t" + "testb %%al,%%al\n\t" + "jne 1b\n" + "2:\txorl %2,%2\n\t" + "stosb" + : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3) + : "0" (src),"1" (dest),"2" (0),"3" (0xffffffff), "g" (count) + : "memory"); +return dest; +} + +#define __HAVE_ARCH_STRCMP +static inline int strcmp(const char * cs,const char * ct) +{ +int d0, d1; +register int __res; +__asm__ __volatile__( + "cld\n" + "1:\tlodsb\n\t" + "scasb\n\t" + "jne 2f\n\t" + "testb %%al,%%al\n\t" + "jne 1b\n\t" + "xorl %%eax,%%eax\n\t" + "jmp 3f\n" + "2:\tsbbl %%eax,%%eax\n\t" + "orb $1,%%al\n" + "3:" + :"=a" (__res), "=&S" (d0), "=&D" (d1) + :"1" (cs),"2" (ct)); +return __res; +} + +#define __HAVE_ARCH_STRNCMP +static inline int strncmp(const char * cs,const char * ct,size_t count) +{ +register int __res; +int d0, d1, d2; +__asm__ __volatile__( + "cld\n" + "1:\tdecl %3\n\t" + "js 2f\n\t" + "lodsb\n\t" + "scasb\n\t" + "jne 3f\n\t" + "testb %%al,%%al\n\t" + "jne 1b\n" + "2:\txorl %%eax,%%eax\n\t" + "jmp 4f\n" + "3:\tsbbl %%eax,%%eax\n\t" + "orb $1,%%al\n" + "4:" + :"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2) + :"1" (cs),"2" (ct),"3" (count)); +return __res; +} + +#define __HAVE_ARCH_STRCHR +static inline char * strchr(const char * s, int c) +{ +int d0; +register char * __res; +__asm__ __volatile__( + "cld\n\t" + "movb %%al,%%ah\n" + "1:\tlodsb\n\t" + "cmpb %%ah,%%al\n\t" + "je 2f\n\t" + "testb %%al,%%al\n\t" + "jne 1b\n\t" + "movl $1,%1\n" + "2:\tmovl %1,%0\n\t" + "decl %0" + :"=a" (__res), "=&S" (d0) : "1" (s),"0" (c)); +return __res; +} + +#define __HAVE_ARCH_STRRCHR +static inline char * strrchr(const char * s, int c) +{ +int d0, d1; +register char * __res; +__asm__ __volatile__( + "cld\n\t" + "movb %%al,%%ah\n" + "1:\tlodsb\n\t" + "cmpb %%ah,%%al\n\t" + "jne 2f\n\t" + "leal -1(%%esi),%0\n" + "2:\ttestb %%al,%%al\n\t" + "jne 1b" + :"=g" (__res), "=&S" (d0), "=&a" (d1) :"0" (0),"1" (s),"2" (c)); +return __res; +} + +#define __HAVE_ARCH_STRLEN +static inline size_t strlen(const char * s) +{ +int d0; +register int __res; +__asm__ __volatile__( + "cld\n\t" + "repne\n\t" + "scasb\n\t" + "notl %0\n\t" + "decl %0" + :"=c" (__res), "=&D" (d0) :"1" (s),"a" (0), "0" (0xffffffff)); +return __res; +} + +static inline void * __memcpy(void * to, const void * from, size_t n) +{ +int d0, d1, d2; +__asm__ __volatile__( + "cld\n\t" + "rep ; movsl\n\t" + "testb $2,%b4\n\t" + "je 1f\n\t" + "movsw\n" + "1:\ttestb $1,%b4\n\t" + "je 2f\n\t" + "movsb\n" + "2:" + : "=&c" (d0), "=&D" (d1), "=&S" (d2) + :"0" (n/4), "q" (n),"1" ((long) to),"2" ((long) from) + : "memory"); +return (to); +} + +/* + * This looks horribly ugly, but the compiler can optimize it totally, + * as the count is constant. + */ +static inline void * __constant_memcpy(void * to, const void * from, size_t n) +{ + switch (n) { + case 0: + return to; + case 1: + *(unsigned char *)to = *(const unsigned char *)from; + return to; + case 2: + *(unsigned short *)to = *(const unsigned short *)from; + return to; + case 3: + *(unsigned short *)to = *(const unsigned short *)from; + *(2+(unsigned char *)to) = *(2+(const unsigned char *)from); + return to; + case 4: + *(unsigned long *)to = *(const unsigned long *)from; + return to; + case 6: /* for Ethernet addresses */ + *(unsigned long *)to = *(const unsigned long *)from; + *(2+(unsigned short *)to) = *(2+(const unsigned short *)from); + return to; + case 8: + *(unsigned long *)to = *(const unsigned long *)from; + *(1+(unsigned long *)to) = *(1+(const unsigned long *)from); + return to; + case 12: + *(unsigned long *)to = *(const unsigned long *)from; + *(1+(unsigned long *)to) = *(1+(const unsigned long *)from); + *(2+(unsigned long *)to) = *(2+(const unsigned long *)from); + return to; + case 16: + *(unsigned long *)to = *(const unsigned long *)from; + *(1+(unsigned long *)to) = *(1+(const unsigned long *)from); + *(2+(unsigned long *)to) = *(2+(const unsigned long *)from); + *(3+(unsigned long *)to) = *(3+(const unsigned long *)from); + return to; + case 20: + *(unsigned long *)to = *(const unsigned long *)from; + *(1+(unsigned long *)to) = *(1+(const unsigned long *)from); + *(2+(unsigned long *)to) = *(2+(const unsigned long *)from); + *(3+(unsigned long *)to) = *(3+(const unsigned long *)from); + *(4+(unsigned long *)to) = *(4+(const unsigned long *)from); + return to; + } +#define COMMON(x) \ +__asm__ __volatile__( \ + "cld\n\t" \ + "rep ; movsl" \ + x \ + : "=&c" (d0), "=&D" (d1), "=&S" (d2) \ + : "0" (n/4),"1" ((long) to),"2" ((long) from) \ + : "memory"); +{ + int d0, d1, d2; + switch (n % 4) { + case 0: COMMON(""); return to; + case 1: COMMON("\n\tmovsb"); return to; + case 2: COMMON("\n\tmovsw"); return to; + default: COMMON("\n\tmovsw\n\tmovsb"); return to; + } +} + +#undef COMMON +} + +#define __HAVE_ARCH_MEMCPY +#define memcpy(t, f, n) \ +(__builtin_constant_p(n) ? \ + __constant_memcpy((t),(f),(n)) : \ + __memcpy((t),(f),(n))) + +#define __HAVE_ARCH_MEMMOVE +static inline void * memmove(void * dest,const void * src, size_t n) +{ +int d0, d1, d2; +if (dest<src) +__asm__ __volatile__( + "cld\n\t" + "rep\n\t" + "movsb" + : "=&c" (d0), "=&S" (d1), "=&D" (d2) + :"0" (n),"1" (src),"2" (dest) + : "memory"); +else +__asm__ __volatile__( + "std\n\t" + "rep\n\t" + "movsb\n\t" + "cld" + : "=&c" (d0), "=&S" (d1), "=&D" (d2) + :"0" (n), + "1" (n-1+(const char *)src), + "2" (n-1+(char *)dest) + :"memory"); +return dest; +} + +#define memcmp __builtin_memcmp + +#define __HAVE_ARCH_MEMCHR +static inline void * memchr(const void * cs,int c,size_t count) +{ +int d0; +register void * __res; +if (!count) + return NULL; +__asm__ __volatile__( + "cld\n\t" + "repne\n\t" + "scasb\n\t" + "je 1f\n\t" + "movl $1,%0\n" + "1:\tdecl %0" + :"=D" (__res), "=&c" (d0) : "a" (c),"0" (cs),"1" (count)); +return __res; +} + +static inline void * __memset_generic(void * s, char c,size_t count) +{ +int d0, d1; +__asm__ __volatile__( + "cld\n\t" + "rep\n\t" + "stosb" + : "=&c" (d0), "=&D" (d1) + :"a" (c),"1" (s),"0" (count) + :"memory"); +return s; +} + +/* we might want to write optimized versions of these later */ +#define __constant_count_memset(s,c,count) __memset_generic((s),(c),(count)) + +/* + * memset(x,0,y) is a reasonably common thing to do, so we want to fill + * things 32 bits at a time even when we don't know the size of the + * area at compile-time.. + */ +static inline void * __constant_c_memset(void * s, unsigned long c, size_t count) +{ +int d0, d1; +__asm__ __volatile__( + "cld\n\t" + "rep ; stosl\n\t" + "testb $2,%b3\n\t" + "je 1f\n\t" + "stosw\n" + "1:\ttestb $1,%b3\n\t" + "je 2f\n\t" + "stosb\n" + "2:" + : "=&c" (d0), "=&D" (d1) + :"a" (c), "q" (count), "0" (count/4), "1" ((long) s) + :"memory"); +return (s); +} + +/* Added by Gertjan van Wingerde to make minix and sysv module work */ +#define __HAVE_ARCH_STRNLEN +static inline size_t strnlen(const char * s, size_t count) +{ +int d0; +register int __res; +__asm__ __volatile__( + "movl %2,%0\n\t" + "jmp 2f\n" + "1:\tcmpb $0,(%0)\n\t" + "je 3f\n\t" + "incl %0\n" + "2:\tdecl %1\n\t" + "cmpl $-1,%1\n\t" + "jne 1b\n" + "3:\tsubl %2,%0" + :"=a" (__res), "=&d" (d0) + :"c" (s),"1" (count)); +return __res; +} +/* end of additional stuff */ + +/* + * This looks horribly ugly, but the compiler can optimize it totally, + * as we by now know that both pattern and count is constant.. + */ +static inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count) +{ + switch (count) { + case 0: + return s; + case 1: + *(unsigned char *)s = pattern; + return s; + case 2: + *(unsigned short *)s = pattern; + return s; + case 3: + *(unsigned short *)s = pattern; + *(2+(unsigned char *)s) = pattern; + return s; + case 4: + *(unsigned long *)s = pattern; + return s; + } +#define COMMON(x) \ +__asm__ __volatile__("cld\n\t" \ + "rep ; stosl" \ + x \ + : "=&c" (d0), "=&D" (d1) \ + : "a" (pattern),"0" (count/4),"1" ((long) s) \ + : "memory") +{ + int d0, d1; + switch (count % 4) { + case 0: COMMON(""); return s; + case 1: COMMON("\n\tstosb"); return s; + case 2: COMMON("\n\tstosw"); return s; + default: COMMON("\n\tstosw\n\tstosb"); return s; + } +} + +#undef COMMON +} + +#define __constant_c_x_memset(s, c, count) \ +(__builtin_constant_p(count) ? \ + __constant_c_and_count_memset((s),(c),(count)) : \ + __constant_c_memset((s),(c),(count))) + +#define __memset(s, c, count) \ +(__builtin_constant_p(count) ? \ + __constant_count_memset((s),(c),(count)) : \ + __memset_generic((s),(c),(count))) + +#define __HAVE_ARCH_MEMSET +#define memset(s, c, count) \ +(__builtin_constant_p(c) ? \ + __constant_c_x_memset((s),(0x01010101UL*(unsigned char)(c)),(count)) : \ + __memset((s),(c),(count))) + +/* + * find the first occurrence of byte 'c', or 1 past the area if none + */ +#define __HAVE_ARCH_MEMSCAN +static inline void * memscan(void * addr, int c, size_t size) +{ + if (!size) + return addr; + __asm__("cld\n" + "repnz; scasb\n" + "jnz 1f\n" + "dec %%edi\n" + "1:\n" + : "=D" (addr), "=c" (size) + : "0" (addr), "1" (size), "a" (c)); + return addr; +} + +#endif +#endif diff --git a/linux/dev/include/asm-i386/system.h b/linux/dev/include/asm-i386/system.h new file mode 100644 index 0000000..5187c5e --- /dev/null +++ b/linux/dev/include/asm-i386/system.h @@ -0,0 +1,356 @@ +#ifndef __ASM_SYSTEM_H +#define __ASM_SYSTEM_H + +#include <i386/ipl.h> /* curr_ipl[], splx */ +#include <kern/cpu_number.h> + +#include <asm/segment.h> + +/* + * Entry into gdt where to find first TSS. GDT layout: + * 0 - null + * 1 - not used + * 2 - kernel code segment + * 3 - kernel data segment + * 4 - user code segment + * 5 - user data segment + * ... + * 8 - TSS #0 + * 9 - LDT #0 + * 10 - TSS #1 + * 11 - LDT #1 + */ +#define FIRST_TSS_ENTRY 8 +#define FIRST_LDT_ENTRY (FIRST_TSS_ENTRY+1) +#define _TSS(n) ((((unsigned long) n)<<4)+(FIRST_TSS_ENTRY<<3)) +#define _LDT(n) ((((unsigned long) n)<<4)+(FIRST_LDT_ENTRY<<3)) +#define load_TR(n) __asm__("ltr %%ax": /* no output */ :"a" (_TSS(n))) +#define load_ldt(n) __asm__("lldt %%ax": /* no output */ :"a" (_LDT(n))) +#define store_TR(n) \ +__asm__("str %%ax\n\t" \ + "subl %2,%%eax\n\t" \ + "shrl $4,%%eax" \ + :"=a" (n) \ + :"0" (0),"i" (FIRST_TSS_ENTRY<<3)) + +/* This special macro can be used to load a debugging register */ + +#define loaddebug(tsk,register) \ + __asm__("movl %0,%%edx\n\t" \ + "movl %%edx,%%db" #register "\n\t" \ + : /* no output */ \ + :"m" (tsk->debugreg[register]) \ + :"dx"); + + +/* + * switch_to(n) should switch tasks to task nr n, first + * checking that n isn't the current task, in which case it does nothing. + * This also clears the TS-flag if the task we switched to has used + * the math co-processor latest. + * + * It also reloads the debug regs if necessary.. + */ + + +#ifdef __SMP__ + /* + * Keep the lock depth straight. If we switch on an interrupt from + * kernel->user task we need to lose a depth, and if we switch the + * other way we need to gain a depth. Same layer switches come out + * the same. + * + * We spot a switch in user mode because the kernel counter is the + * same as the interrupt counter depth. (We never switch during the + * message/invalidate IPI). + * + * We fsave/fwait so that an exception goes off at the right time + * (as a call from the fsave or fwait in effect) rather than to + * the wrong process. + */ + +#define switch_to(prev,next) do { \ + cli();\ + if(prev->flags&PF_USEDFPU) \ + { \ + __asm__ __volatile__("fnsave %0":"=m" (prev->tss.i387.hard)); \ + __asm__ __volatile__("fwait"); \ + prev->flags&=~PF_USEDFPU; \ + } \ + prev->lock_depth=syscall_count; \ + kernel_counter+=next->lock_depth-prev->lock_depth; \ + syscall_count=next->lock_depth; \ +__asm__("pushl %%edx\n\t" \ + "movl "SYMBOL_NAME_STR(apic_reg)",%%edx\n\t" \ + "movl 0x20(%%edx), %%edx\n\t" \ + "shrl $22,%%edx\n\t" \ + "and $0x3C,%%edx\n\t" \ + "movl %%ecx,"SYMBOL_NAME_STR(current_set)"(,%%edx)\n\t" \ + "popl %%edx\n\t" \ + "ljmp %0\n\t" \ + "sti\n\t" \ + : /* no output */ \ + :"m" (*(((char *)&next->tss.tr)-4)), \ + "c" (next)); \ + /* Now maybe reload the debug registers */ \ + if(prev->debugreg[7]){ \ + loaddebug(prev,0); \ + loaddebug(prev,1); \ + loaddebug(prev,2); \ + loaddebug(prev,3); \ + loaddebug(prev,6); \ + } \ +} while (0) + +#else +#define switch_to(prev,next) do { \ +__asm__("movl %2,"SYMBOL_NAME_STR(current_set)"\n\t" \ + "ljmp %0\n\t" \ + "cmpl %1,"SYMBOL_NAME_STR(last_task_used_math)"\n\t" \ + "jne 1f\n\t" \ + "clts\n" \ + "1:" \ + : /* no outputs */ \ + :"m" (*(((char *)&next->tss.tr)-4)), \ + "r" (prev), "r" (next)); \ + /* Now maybe reload the debug registers */ \ + if(prev->debugreg[7]){ \ + loaddebug(prev,0); \ + loaddebug(prev,1); \ + loaddebug(prev,2); \ + loaddebug(prev,3); \ + loaddebug(prev,6); \ + } \ +} while (0) +#endif + +#define _set_base(addr,base) \ +__asm__("movw %%dx,%0\n\t" \ + "rorl $16,%%edx\n\t" \ + "movb %%dl,%1\n\t" \ + "movb %%dh,%2" \ + : /* no output */ \ + :"m" (*((addr)+2)), \ + "m" (*((addr)+4)), \ + "m" (*((addr)+7)), \ + "d" (base) \ + :"dx") + +#define _set_limit(addr,limit) \ +__asm__("movw %%dx,%0\n\t" \ + "rorl $16,%%edx\n\t" \ + "movb %1,%%dh\n\t" \ + "andb $0xf0,%%dh\n\t" \ + "orb %%dh,%%dl\n\t" \ + "movb %%dl,%1" \ + : /* no output */ \ + :"m" (*(addr)), \ + "m" (*((addr)+6)), \ + "d" (limit) \ + :"dx") + +#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , base ) +#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , (limit-1)>>12 ) + +static inline unsigned long _get_base(char * addr) +{ + unsigned long __base; + __asm__("movb %3,%%dh\n\t" + "movb %2,%%dl\n\t" + "shll $16,%%edx\n\t" + "movw %1,%%dx" + :"=&d" (__base) + :"m" (*((addr)+2)), + "m" (*((addr)+4)), + "m" (*((addr)+7))); + return __base; +} + +#define get_base(ldt) _get_base( ((char *)&(ldt)) ) + +static inline unsigned long get_limit(unsigned long segment) +{ + unsigned long __limit; + __asm__("lsll %1,%0" + :"=r" (__limit):"r" (segment)); + return __limit+1; +} + +#define nop() __asm__ __volatile__ ("nop") + +/* + * Clear and set 'TS' bit respectively + */ +#define clts() __asm__ __volatile__ ("clts") +#define stts() \ +__asm__ __volatile__ ( \ + "movl %%cr0,%%eax\n\t" \ + "orl $8,%%eax\n\t" \ + "movl %%eax,%%cr0" \ + : /* no outputs */ \ + : /* no inputs */ \ + :"ax") + + +#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) +#define tas(ptr) (xchg((ptr),1)) + +struct __xchg_dummy { unsigned long a[100]; }; +#define __xg(x) ((struct __xchg_dummy *)(x)) + +static inline unsigned long __xchg(unsigned long x, void * ptr, int size) +{ + switch (size) { + case 1: + __asm__("xchgb %b0,%1" + :"=q" (x) + :"m" (*__xg(ptr)), "0" (x) + :"memory"); + break; + case 2: + __asm__("xchgw %w0,%1" + :"=r" (x) + :"m" (*__xg(ptr)), "0" (x) + :"memory"); + break; + case 4: + __asm__("xchgl %0,%1" + :"=r" (x) + :"m" (*__xg(ptr)), "0" (x) + :"memory"); + break; + } + return x; +} + +#define mb() __asm__ __volatile__ ("" : : :"memory") +#define __sti() __asm__ __volatile__ ("sti": : :"memory") +#define __cli() __asm__ __volatile__ ("cli": : :"memory") +#define __save_flags(x) (x = ((curr_ipl[cpu_number()] > 0) ? 0 : (1 << 9))) +#define __restore_flags(x) splx((x & (1 << 9)) ? 0 : 7) + +#ifdef __SMP__ + +extern void __global_cli(void); +extern void __global_sti(void); +extern unsigned long __global_save_flags(void); +extern void __global_restore_flags(unsigned long); +#define cli() __global_cli() +#define sti() __global_sti() +#define save_flags(x) ((x)=__global_save_flags()) +#define restore_flags(x) __global_restore_flags(x) + +#else + +#define cli() __cli() +#define sti() __sti() +#define save_flags(x) __save_flags(x) +#define restore_flags(x) __restore_flags(x) + +#endif + + +#define iret() __asm__ __volatile__ ("iret": : :"memory") + +#define _set_gate(gate_addr,type,dpl,addr) \ +__asm__ __volatile__ ("movw %%dx,%%ax\n\t" \ + "movw %2,%%dx\n\t" \ + "movl %%eax,%0\n\t" \ + "movl %%edx,%1" \ + :"=m" (*((long *) (gate_addr))), \ + "=m" (*(1+(long *) (gate_addr))) \ + :"i" ((short) (0x8000+(dpl<<13)+(type<<8))), \ + "d" ((char *) (addr)),"a" (KERNEL_CS << 16) \ + :"ax","dx") + +#define set_intr_gate(n,addr) \ + _set_gate(&idt[n],14,0,addr) + +#define set_trap_gate(n,addr) \ + _set_gate(&idt[n],15,0,addr) + +#define set_system_gate(n,addr) \ + _set_gate(&idt[n],15,3,addr) + +#define set_call_gate(a,addr) \ + _set_gate(a,12,3,addr) + +#define _set_seg_desc(gate_addr,type,dpl,base,limit) {\ + *((gate_addr)+1) = ((base) & 0xff000000) | \ + (((base) & 0x00ff0000)>>16) | \ + ((limit) & 0xf0000) | \ + ((dpl)<<13) | \ + (0x00408000) | \ + ((type)<<8); \ + *(gate_addr) = (((base) & 0x0000ffff)<<16) | \ + ((limit) & 0x0ffff); } + +#define _set_tssldt_desc(n,addr,limit,type) \ +__asm__ __volatile__ ("movw $" #limit ",%1\n\t" \ + "movw %%ax,%2\n\t" \ + "rorl $16,%%eax\n\t" \ + "movb %%al,%3\n\t" \ + "movb $" type ",%4\n\t" \ + "movb $0x00,%5\n\t" \ + "movb %%ah,%6\n\t" \ + "rorl $16,%%eax" \ + : /* no output */ \ + :"a" (addr+0xc0000000), "m" (*(n)), "m" (*(n+2)), "m" (*(n+4)), \ + "m" (*(n+5)), "m" (*(n+6)), "m" (*(n+7)) \ + ) + +#define set_tss_desc(n,addr) _set_tssldt_desc(((char *) (n)),((int)(addr)),235,"0x89") +#define set_ldt_desc(n,addr,size) \ + _set_tssldt_desc(((char *) (n)),((int)(addr)),((size << 3) - 1),"0x82") + +/* + * This is the ldt that every process will get unless we need + * something other than this. + */ +extern struct desc_struct default_ldt; + +/* + * disable hlt during certain critical i/o operations + */ +#ifndef MACH +#define HAVE_DISABLE_HLT +#endif +void disable_hlt(void); +void enable_hlt(void); + +static __inline__ unsigned long long rdmsr(unsigned int msr) +{ + unsigned long long ret; + __asm__ __volatile__("rdmsr" + : "=A" (ret) + : "c" (msr)); + return ret; +} + +static __inline__ void wrmsr(unsigned int msr,unsigned long long val) +{ + __asm__ __volatile__("wrmsr" + : /* no Outputs */ + : "c" (msr), "A" (val)); +} + + +static __inline__ unsigned long long rdtsc(void) +{ + unsigned long long ret; + __asm__ __volatile__("rdtsc" + : "=A" (ret) + : /* no inputs */); + return ret; +} + +static __inline__ unsigned long long rdpmc(unsigned int counter) +{ + unsigned long long ret; + __asm__ __volatile__("rdpmc" + : "=A" (ret) + : "c" (counter)); + return ret; +} + +#endif diff --git a/linux/dev/include/asm-i386/uaccess.h b/linux/dev/include/asm-i386/uaccess.h new file mode 100644 index 0000000..9d841c9 --- /dev/null +++ b/linux/dev/include/asm-i386/uaccess.h @@ -0,0 +1 @@ +/* Dummy file. */ diff --git a/linux/dev/include/linux/blk.h b/linux/dev/include/linux/blk.h new file mode 100644 index 0000000..b924a14 --- /dev/null +++ b/linux/dev/include/linux/blk.h @@ -0,0 +1,471 @@ +/* Is this okay? by OKUJI Yoshinori */ +#ifndef _BLK_H +#define _BLK_H + +#include <linux/blkdev.h> +#include <linux/locks.h> +#include <linux/malloc.h> +#include <linux/config.h> +#include <linux/md.h> + +/* + * NR_REQUEST is the number of entries in the request-queue. + * NOTE that writes may use only the low 2/3 of these: reads + * take precedence. + */ +#define NR_REQUEST 64 + +/* + * This is used in the elevator algorithm. We don't prioritise reads + * over writes any more --- although reads are more time-critical than + * writes, by treating them equally we increase filesystem throughput. + * This turns out to give better overall performance. -- sct + */ +#define IN_ORDER(s1,s2) \ +((s1)->rq_dev < (s2)->rq_dev || (((s1)->rq_dev == (s2)->rq_dev && \ +(s1)->sector < (s2)->sector))) + +/* + * These will have to be changed to be aware of different buffer + * sizes etc.. It actually needs a major cleanup. + */ +#if defined(IDE_DRIVER) || defined(MD_DRIVER) +#define SECTOR_MASK ((BLOCK_SIZE >> 9) - 1) +#else +#define SECTOR_MASK (blksize_size[MAJOR_NR] && \ + blksize_size[MAJOR_NR][MINOR(CURRENT->rq_dev)] ? \ + ((blksize_size[MAJOR_NR][MINOR(CURRENT->rq_dev)] >> 9) - 1) : \ + ((BLOCK_SIZE >> 9) - 1)) +#endif /* IDE_DRIVER */ + +#define SUBSECTOR(block) (CURRENT->current_nr_sectors > 0) + +#ifdef CONFIG_CDU31A +extern int cdu31a_init(void); +#endif /* CONFIG_CDU31A */ +#ifdef CONFIG_MCD +extern int mcd_init(void); +#endif /* CONFIG_MCD */ +#ifdef CONFIG_MCDX +extern int mcdx_init(void); +#endif /* CONFIG_MCDX */ +#ifdef CONFIG_SBPCD +extern int sbpcd_init(void); +#endif /* CONFIG_SBPCD */ +#ifdef CONFIG_AZTCD +extern int aztcd_init(void); +#endif /* CONFIG_AZTCD */ +#ifdef CONFIG_CDU535 +extern int sony535_init(void); +#endif /* CONFIG_CDU535 */ +#ifdef CONFIG_GSCD +extern int gscd_init(void); +#endif /* CONFIG_GSCD */ +#ifdef CONFIG_CM206 +extern int cm206_init(void); +#endif /* CONFIG_CM206 */ +#ifdef CONFIG_OPTCD +extern int optcd_init(void); +#endif /* CONFIG_OPTCD */ +#ifdef CONFIG_SJCD +extern int sjcd_init(void); +#endif /* CONFIG_SJCD */ +#ifdef CONFIG_CDI_INIT +extern int cdi_init(void); +#endif /* CONFIG_CDI_INIT */ +#ifdef CONFIG_BLK_DEV_HD +extern int hd_init(void); +#endif +#ifdef CONFIG_BLK_DEV_IDE +extern int ide_init(void); +extern void ide_disable_base(unsigned base); +#endif +#ifdef CONFIG_BLK_DEV_XD +extern int xd_init(void); +#endif +#ifdef CONFIG_BLK_DEV_LOOP +extern int loop_init(void); +#endif +#ifdef CONFIG_BLK_DEV_MD +extern int md_init(void); +#endif /* CONFIG_BLK_DEV_MD */ + +extern void set_device_ro(kdev_t dev,int flag); +void add_blkdev_randomness(int major); + +extern int floppy_init(void); +extern void rd_load(void); +extern int rd_init(void); +extern int rd_doload; /* 1 = load ramdisk, 0 = don't load */ +extern int rd_prompt; /* 1 = prompt for ramdisk, 0 = don't prompt */ +extern int rd_image_start; /* starting block # of image */ + +#ifdef CONFIG_BLK_DEV_INITRD + +#define INITRD_MINOR 250 /* shouldn't collide with /dev/ram* too soon ... */ + +extern unsigned long initrd_start,initrd_end; +extern int mount_initrd; /* zero if initrd should not be mounted */ +void initrd_init(void); + +#endif + +#define RO_IOCTLS(dev,where) \ + case BLKROSET: { int __err; if (!suser()) return -EACCES; \ + __err = verify_area(VERIFY_READ, (void *) (where), sizeof(long)); \ + if (!__err) set_device_ro((dev),get_fs_long((long *) (where))); return __err; } \ + case BLKROGET: { int __err = verify_area(VERIFY_WRITE, (void *) (where), sizeof(long)); \ + if (!__err) put_fs_long(0!=is_read_only(dev),(long *) (where)); return __err; } + +#if defined(MAJOR_NR) || defined(IDE_DRIVER) + +/* + * Add entries as needed. + */ + +#ifdef IDE_DRIVER + +#define DEVICE_NR(device) (MINOR(device) >> PARTN_BITS) +#define DEVICE_ON(device) /* nothing */ +#define DEVICE_OFF(device) /* nothing */ + +#elif (MAJOR_NR == RAMDISK_MAJOR) + +/* ram disk */ +#define DEVICE_NAME "ramdisk" +#define DEVICE_REQUEST rd_request +#define DEVICE_NR(device) (MINOR(device)) +#define DEVICE_ON(device) +#define DEVICE_OFF(device) +#define DEVICE_NO_RANDOM + +#elif (MAJOR_NR == FLOPPY_MAJOR) + +static void floppy_off(unsigned int nr); + +#define DEVICE_NAME "floppy" +#define DEVICE_INTR do_floppy +#define DEVICE_REQUEST do_fd_request +#define DEVICE_NR(device) ( (MINOR(device) & 3) | ((MINOR(device) & 0x80 ) >> 5 )) +#define DEVICE_ON(device) +#define DEVICE_OFF(device) floppy_off(DEVICE_NR(device)) + +#elif (MAJOR_NR == HD_MAJOR) + +/* harddisk: timeout is 6 seconds.. */ +#define DEVICE_NAME "harddisk" +#define DEVICE_INTR do_hd +#define DEVICE_TIMEOUT HD_TIMER +#define TIMEOUT_VALUE (6*HZ) +#define DEVICE_REQUEST do_hd_request +#define DEVICE_NR(device) (MINOR(device)>>6) +#define DEVICE_ON(device) +#define DEVICE_OFF(device) + +#elif (MAJOR_NR == SCSI_DISK_MAJOR) + +#define DEVICE_NAME "scsidisk" +#define DEVICE_INTR do_sd +#define TIMEOUT_VALUE (2*HZ) +#define DEVICE_REQUEST do_sd_request +#define DEVICE_NR(device) (MINOR(device) >> 4) +#define DEVICE_ON(device) +#define DEVICE_OFF(device) + +/* Kludge to use the same number for both char and block major numbers */ +#elif (MAJOR_NR == MD_MAJOR) && defined(MD_DRIVER) + +#define DEVICE_NAME "Multiple devices driver" +#define DEVICE_REQUEST do_md_request +#define DEVICE_NR(device) (MINOR(device)) +#define DEVICE_ON(device) +#define DEVICE_OFF(device) + +#elif (MAJOR_NR == SCSI_TAPE_MAJOR) + +#define DEVICE_NAME "scsitape" +#define DEVICE_INTR do_st +#define DEVICE_NR(device) (MINOR(device) & 0x7f) +#define DEVICE_ON(device) +#define DEVICE_OFF(device) + +#elif (MAJOR_NR == SCSI_CDROM_MAJOR) + +#define DEVICE_NAME "CD-ROM" +#define DEVICE_INTR do_sr +#define DEVICE_REQUEST do_sr_request +#define DEVICE_NR(device) (MINOR(device)) +#define DEVICE_ON(device) +#define DEVICE_OFF(device) + +#elif (MAJOR_NR == XT_DISK_MAJOR) + +#define DEVICE_NAME "xt disk" +#define DEVICE_REQUEST do_xd_request +#define DEVICE_NR(device) (MINOR(device) >> 6) +#define DEVICE_ON(device) +#define DEVICE_OFF(device) + +#elif (MAJOR_NR == CDU31A_CDROM_MAJOR) + +#define DEVICE_NAME "CDU31A" +#define DEVICE_REQUEST do_cdu31a_request +#define DEVICE_NR(device) (MINOR(device)) +#define DEVICE_ON(device) +#define DEVICE_OFF(device) + +#elif (MAJOR_NR == MITSUMI_CDROM_MAJOR) + +#define DEVICE_NAME "Mitsumi CD-ROM" +/* #define DEVICE_INTR do_mcd */ +#define DEVICE_REQUEST do_mcd_request +#define DEVICE_NR(device) (MINOR(device)) +#define DEVICE_ON(device) +#define DEVICE_OFF(device) + +#elif (MAJOR_NR == MITSUMI_X_CDROM_MAJOR) + +#define DEVICE_NAME "Mitsumi CD-ROM" +/* #define DEVICE_INTR do_mcdx */ +#define DEVICE_REQUEST do_mcdx_request +#define DEVICE_NR(device) (MINOR(device)) +#define DEVICE_ON(device) +#define DEVICE_OFF(device) + +#elif (MAJOR_NR == MATSUSHITA_CDROM_MAJOR) + +#define DEVICE_NAME "Matsushita CD-ROM controller #1" +#define DEVICE_REQUEST do_sbpcd_request +#define DEVICE_NR(device) (MINOR(device)) +#define DEVICE_ON(device) +#define DEVICE_OFF(device) + +#elif (MAJOR_NR == MATSUSHITA_CDROM2_MAJOR) + +#define DEVICE_NAME "Matsushita CD-ROM controller #2" +#define DEVICE_REQUEST do_sbpcd2_request +#define DEVICE_NR(device) (MINOR(device)) +#define DEVICE_ON(device) +#define DEVICE_OFF(device) + +#elif (MAJOR_NR == MATSUSHITA_CDROM3_MAJOR) + +#define DEVICE_NAME "Matsushita CD-ROM controller #3" +#define DEVICE_REQUEST do_sbpcd3_request +#define DEVICE_NR(device) (MINOR(device)) +#define DEVICE_ON(device) +#define DEVICE_OFF(device) + +#elif (MAJOR_NR == MATSUSHITA_CDROM4_MAJOR) + +#define DEVICE_NAME "Matsushita CD-ROM controller #4" +#define DEVICE_REQUEST do_sbpcd4_request +#define DEVICE_NR(device) (MINOR(device)) +#define DEVICE_ON(device) +#define DEVICE_OFF(device) + +#elif (MAJOR_NR == AZTECH_CDROM_MAJOR) + +#define DEVICE_NAME "Aztech CD-ROM" +#define DEVICE_REQUEST do_aztcd_request +#define DEVICE_NR(device) (MINOR(device)) +#define DEVICE_ON(device) +#define DEVICE_OFF(device) + +#elif (MAJOR_NR == CDU535_CDROM_MAJOR) + +#define DEVICE_NAME "SONY-CDU535" +#define DEVICE_INTR do_cdu535 +#define DEVICE_REQUEST do_cdu535_request +#define DEVICE_NR(device) (MINOR(device)) +#define DEVICE_ON(device) +#define DEVICE_OFF(device) + +#elif (MAJOR_NR == GOLDSTAR_CDROM_MAJOR) + +#define DEVICE_NAME "Goldstar R420" +#define DEVICE_REQUEST do_gscd_request +#define DEVICE_NR(device) (MINOR(device)) +#define DEVICE_ON(device) +#define DEVICE_OFF(device) + +#elif (MAJOR_NR == CM206_CDROM_MAJOR) +#define DEVICE_NAME "Philips/LMS cd-rom cm206" +#define DEVICE_REQUEST do_cm206_request +#define DEVICE_NR(device) (MINOR(device)) +#define DEVICE_ON(device) +#define DEVICE_OFF(device) + +#elif (MAJOR_NR == OPTICS_CDROM_MAJOR) + +#define DEVICE_NAME "DOLPHIN 8000AT CD-ROM" +#define DEVICE_REQUEST do_optcd_request +#define DEVICE_NR(device) (MINOR(device)) +#define DEVICE_ON(device) +#define DEVICE_OFF(device) + +#elif (MAJOR_NR == SANYO_CDROM_MAJOR) + +#define DEVICE_NAME "Sanyo H94A CD-ROM" +#define DEVICE_REQUEST do_sjcd_request +#define DEVICE_NR(device) (MINOR(device)) +#define DEVICE_ON(device) +#define DEVICE_OFF(device) + +#endif /* MAJOR_NR == whatever */ + +#if (MAJOR_NR != SCSI_TAPE_MAJOR) +#if !defined(IDE_DRIVER) + +#ifndef CURRENT +#define CURRENT (blk_dev[MAJOR_NR].current_request) +#endif + +#define CURRENT_DEV DEVICE_NR(CURRENT->rq_dev) + +#ifdef DEVICE_INTR +static void (*DEVICE_INTR)(void) = NULL; +#endif +#ifdef DEVICE_TIMEOUT + +#define SET_TIMER \ +((timer_table[DEVICE_TIMEOUT].expires = jiffies + TIMEOUT_VALUE), \ +(timer_active |= 1<<DEVICE_TIMEOUT)) + +#define CLEAR_TIMER \ +timer_active &= ~(1<<DEVICE_TIMEOUT) + +#define SET_INTR(x) \ +if ((DEVICE_INTR = (x)) != NULL) \ + SET_TIMER; \ +else \ + CLEAR_TIMER; + +#else + +#define SET_INTR(x) (DEVICE_INTR = (x)) + +#endif /* DEVICE_TIMEOUT */ + +static void (DEVICE_REQUEST)(void); + +#ifdef DEVICE_INTR +#define CLEAR_INTR SET_INTR(NULL) +#else +#define CLEAR_INTR +#endif + +#define INIT_REQUEST \ + if (!CURRENT) {\ + CLEAR_INTR; \ + return; \ + } \ + if (MAJOR(CURRENT->rq_dev) != MAJOR_NR) \ + panic(DEVICE_NAME ": request list destroyed"); \ + if (CURRENT->bh) { \ + if (!buffer_locked(CURRENT->bh)) \ + panic(DEVICE_NAME ": block not locked"); \ + } + +#endif /* !defined(IDE_DRIVER) */ + +/* end_request() - SCSI devices have their own version */ +/* - IDE drivers have their own copy too */ + +#if ! SCSI_BLK_MAJOR(MAJOR_NR) + +#if defined(IDE_DRIVER) && !defined(_IDE_C) /* shared copy for IDE modules */ +void ide_end_request(byte uptodate, ide_hwgroup_t *hwgroup); +#else + +#ifdef IDE_DRIVER +void ide_end_request(byte uptodate, ide_hwgroup_t *hwgroup) { + struct request *req = hwgroup->rq; +#else +static void end_request(int uptodate) { + struct request *req = CURRENT; +#endif /* IDE_DRIVER */ + struct buffer_head * bh; +#ifndef MACH + int nsect; +#endif + + req->errors = 0; + if (!uptodate) { + if (!req->quiet) + printk("end_request: I/O error, dev %s, sector %lu\n", + kdevname(req->rq_dev), req->sector); +#ifdef MACH + for (bh = req->bh; bh; ) + { + struct buffer_head *next = bh->b_reqnext; + bh->b_reqnext = NULL; + mark_buffer_uptodate (bh, 0); + unlock_buffer (bh); + bh = next; + } + req->bh = NULL; +#else + if ((bh = req->bh) != NULL) { + nsect = bh->b_size >> 9; + req->nr_sectors--; + req->nr_sectors &= ~(nsect - 1); + req->sector += nsect; + req->sector &= ~(nsect - 1); + } +#endif + } + + if ((bh = req->bh) != NULL) { + req->bh = bh->b_reqnext; + bh->b_reqnext = NULL; + + /* + * This is our 'MD IO has finished' event handler. + * note that b_state should be cached in a register + * anyways, so the overhead if this checking is almost + * zero. But anyways .. we never get OO for free :) + */ + if (test_bit(BH_MD, &bh->b_state)) { + struct md_personality * pers=(struct md_personality *)bh->personality; + pers->end_request(bh,uptodate); + } + /* + * the normal (nonmirrored and no RAID5) case: + */ + else { + mark_buffer_uptodate(bh, uptodate); + unlock_buffer(bh); + } + if ((bh = req->bh) != NULL) { + req->current_nr_sectors = bh->b_size >> 9; + if (req->nr_sectors < req->current_nr_sectors) { + req->nr_sectors = req->current_nr_sectors; + printk("end_request: buffer-list destroyed\n"); + } + req->buffer = bh->b_data; + return; + } + } +#ifndef DEVICE_NO_RANDOM + add_blkdev_randomness(MAJOR(req->rq_dev)); +#endif +#ifdef IDE_DRIVER + blk_dev[MAJOR(req->rq_dev)].current_request = req->next; + hwgroup->rq = NULL; +#else + DEVICE_OFF(req->rq_dev); + CURRENT = req->next; +#endif /* IDE_DRIVER */ + if (req->sem != NULL) + up(req->sem); + req->rq_status = RQ_INACTIVE; + wake_up(&wait_for_request); +} +#endif /* defined(IDE_DRIVER) && !defined(_IDE_C) */ +#endif /* ! SCSI_BLK_MAJOR(MAJOR_NR) */ +#endif /* (MAJOR_NR != SCSI_TAPE_MAJOR) */ + +#endif /* defined(MAJOR_NR) || defined(IDE_DRIVER) */ + +#endif /* _BLK_H */ diff --git a/linux/dev/include/linux/blkdev.h b/linux/dev/include/linux/blkdev.h new file mode 100644 index 0000000..5bf0a28 --- /dev/null +++ b/linux/dev/include/linux/blkdev.h @@ -0,0 +1,73 @@ +#ifndef _LINUX_BLKDEV_H +#define _LINUX_BLKDEV_H + +#include <linux/major.h> +#include <linux/sched.h> +#include <linux/genhd.h> +#include <linux/tqueue.h> + +/* + * Ok, this is an expanded form so that we can use the same + * request for paging requests when that is implemented. In + * paging, 'bh' is NULL, and the semaphore is used to wait + * for read/write completion. + */ +struct request { + volatile int rq_status; /* should split this into a few status bits */ +#define RQ_INACTIVE (-1) +#define RQ_ACTIVE 1 +#define RQ_SCSI_BUSY 0xffff +#define RQ_SCSI_DONE 0xfffe +#define RQ_SCSI_DISCONNECTING 0xffe0 + + kdev_t rq_dev; + int cmd; /* READ or WRITE */ + int errors; + int quiet; + unsigned long sector; + unsigned long nr_sectors; + unsigned long current_nr_sectors; + char * buffer; + struct semaphore * sem; + struct buffer_head * bh; + struct buffer_head * bhtail; + struct request * next; +}; + +struct blk_dev_struct { + void (*request_fn)(void); + struct request * current_request; + struct request plug; + struct tq_struct plug_tq; +}; + +struct sec_size { + unsigned block_size; + unsigned block_size_bits; +}; + +extern struct sec_size * blk_sec[MAX_BLKDEV]; +extern struct blk_dev_struct blk_dev[MAX_BLKDEV]; +extern struct wait_queue * wait_for_request; +extern void resetup_one_dev(struct gendisk *dev, int drive); + +#ifdef MACH +extern inline void unplug_device(void *data) { } +#else +extern void unplug_device(void * data); +#endif + +extern void make_request(int major,int rw, struct buffer_head * bh); + +/* md needs this function to remap requests */ +extern int md_map (int minor, kdev_t *rdev, unsigned long *rsector, unsigned long size); +extern int md_make_request (int major, int rw, struct buffer_head * bh); +extern int md_error (kdev_t mddev, kdev_t rdev); + +extern int * blk_size[MAX_BLKDEV]; + +extern int * blksize_size[MAX_BLKDEV]; + +extern int * hardsect_size[MAX_BLKDEV]; + +#endif diff --git a/linux/dev/include/linux/compile.h b/linux/dev/include/linux/compile.h new file mode 100644 index 0000000..7d43a20 --- /dev/null +++ b/linux/dev/include/linux/compile.h @@ -0,0 +1,6 @@ +#define UTS_VERSION "#11 Fri Apr 24 23:03:10 JST 1998" +#define LINUX_COMPILE_TIME "23:03:10" +#define LINUX_COMPILE_BY "somebody" +#define LINUX_COMPILE_HOST "unknown" +#define LINUX_COMPILE_DOMAIN "somewhere.org" +#define LINUX_COMPILER "gcc version 2.7.2.3" diff --git a/linux/dev/include/linux/etherdevice.h b/linux/dev/include/linux/etherdevice.h new file mode 100644 index 0000000..eb262b2 --- /dev/null +++ b/linux/dev/include/linux/etherdevice.h @@ -0,0 +1,62 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. NET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Definitions for the Ethernet handlers. + * + * Version: @(#)eth.h 1.0.4 05/13/93 + * + * Authors: Ross Biro, <bir7@leland.Stanford.Edu> + * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> + * + * Relocated to include/linux where it belongs by Alan Cox + * <gw4pts@gw4pts.ampr.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * WARNING: This move may well be temporary. This file will get merged with others RSN. + * + */ +#ifndef _LINUX_ETHERDEVICE_H +#define _LINUX_ETHERDEVICE_H + + +#include <linux/if_ether.h> + +#ifdef __KERNEL__ +extern int eth_header(struct sk_buff *skb, struct device *dev, + unsigned short type, void *daddr, + void *saddr, unsigned len); +extern int eth_rebuild_header(void *buff, struct device *dev, + unsigned long dst, struct sk_buff *skb); + +/* This cause skb->protocol = 0. I don't sure if this is really ok. + * Last modified: 19980402 by OKUJI Yoshinori <okuji@kmc.kyoto-u.ac.jp> + */ +#ifdef MACH +#define eth_type_trans(skb, dev) ((unsigned short)0) +#else +extern unsigned short eth_type_trans(struct sk_buff *skb, struct device *dev); +#endif + +extern void eth_header_cache_bind(struct hh_cache ** hhp, struct device *dev, + unsigned short htype, __u32 daddr); +extern void eth_header_cache_update(struct hh_cache *hh, struct device *dev, unsigned char * haddr); + +#ifdef MACH +#define eth_copy_and_sum(dest, src, length, base) \ + memcpy((dest)->data, src, length) +#else +extern void eth_copy_and_sum(struct sk_buff *dest, + unsigned char *src, int length, int base); +#endif + +extern struct device * init_etherdev(struct device *, int); + +#endif + +#endif /* _LINUX_ETHERDEVICE_H */ diff --git a/linux/dev/include/linux/fs.h b/linux/dev/include/linux/fs.h new file mode 100644 index 0000000..def2bc9 --- /dev/null +++ b/linux/dev/include/linux/fs.h @@ -0,0 +1,803 @@ +#ifndef _LINUX_FS_H +#define _LINUX_FS_H + +/* + * This file has definitions for some important file table + * structures etc. + */ + +#include <linux/config.h> +#include <linux/linkage.h> +#include <linux/limits.h> +#include <linux/wait.h> +#include <linux/types.h> +#include <linux/vfs.h> +#include <linux/net.h> +#include <linux/kdev_t.h> +#include <linux/ioctl.h> + +/* + * It's silly to have NR_OPEN bigger than NR_FILE, but I'll fix + * that later. Anyway, now the file code is no longer dependent + * on bitmaps in unsigned longs, but uses the new fd_set structure.. + * + * Some programs (notably those using select()) may have to be + * recompiled to take full advantage of the new limits.. + */ + +/* Fixed constants first: */ +#undef NR_OPEN +#define NR_OPEN 256 + +#define NR_SUPER 64 +#define BLOCK_SIZE 1024 +#define BLOCK_SIZE_BITS 10 + +/* And dynamically-tunable limits and defaults: */ +extern int max_inodes, nr_inodes; +extern int max_files, nr_files; +#define NR_INODE 3072 /* this should be bigger than NR_FILE */ +#define NR_FILE 1024 /* this can well be larger on a larger system */ + +#define MAY_EXEC 1 +#define MAY_WRITE 2 +#define MAY_READ 4 + +#define FMODE_READ 1 +#define FMODE_WRITE 2 + +#define READ 0 +#define WRITE 1 +#define READA 2 /* read-ahead - don't block if no resources */ +#define WRITEA 3 /* write-ahead - don't block if no resources */ + +#ifndef NULL +#define NULL ((void *) 0) +#endif + +#define NIL_FILP ((struct file *)0) +#define SEL_IN 1 +#define SEL_OUT 2 +#define SEL_EX 4 + +/* + * These are the fs-independent mount-flags: up to 16 flags are supported + */ +#define MS_RDONLY 1 /* Mount read-only */ +#define MS_NOSUID 2 /* Ignore suid and sgid bits */ +#define MS_NODEV 4 /* Disallow access to device special files */ +#define MS_NOEXEC 8 /* Disallow program execution */ +#define MS_SYNCHRONOUS 16 /* Writes are synced at once */ +#define MS_REMOUNT 32 /* Alter flags of a mounted FS */ +#define MS_MANDLOCK 64 /* Allow mandatory locks on an FS */ +#define S_WRITE 128 /* Write on file/directory/symlink */ +#define S_APPEND 256 /* Append-only file */ +#define S_IMMUTABLE 512 /* Immutable file */ +#define MS_NOATIME 1024 /* Do not update access times. */ +#define S_BAD_INODE 2048 /* Marker for unreadable inodes */ +#define S_ZERO_WR 4096 /* Device accepts 0 length writes */ +/* + * Flags that can be altered by MS_REMOUNT + */ +#define MS_RMT_MASK (MS_RDONLY|MS_NOSUID|MS_NODEV|MS_NOEXEC|MS_SYNCHRONOUS|MS_MANDLOCK|MS_NOATIME) + +/* + * Magic mount flag number. Has to be or-ed to the flag values. + */ +#define MS_MGC_VAL 0xC0ED0000 /* magic flag number to indicate "new" flags */ +#define MS_MGC_MSK 0xffff0000 /* magic flag number mask */ + +/* + * Note that read-only etc flags are inode-specific: setting some file-system + * flags just means all the inodes inherit those flags by default. It might be + * possible to override it selectively if you really wanted to with some + * ioctl() that is not currently implemented. + * + * Exception: MS_RDONLY is always applied to the entire file system. + */ +#define IS_RDONLY(inode) (((inode)->i_sb) && ((inode)->i_sb->s_flags & MS_RDONLY)) +#define IS_NOSUID(inode) ((inode)->i_flags & MS_NOSUID) +#define IS_NODEV(inode) ((inode)->i_flags & MS_NODEV) +#define IS_NOEXEC(inode) ((inode)->i_flags & MS_NOEXEC) +#define IS_SYNC(inode) ((inode)->i_flags & MS_SYNCHRONOUS) +#define IS_MANDLOCK(inode) ((inode)->i_flags & MS_MANDLOCK) + +#define IS_WRITABLE(inode) ((inode)->i_flags & S_WRITE) +#define IS_APPEND(inode) ((inode)->i_flags & S_APPEND) +#define IS_IMMUTABLE(inode) ((inode)->i_flags & S_IMMUTABLE) +#define IS_NOATIME(inode) ((inode)->i_flags & MS_NOATIME) +#define IS_ZERO_WR(inode) ((inode)->i_flags & S_ZERO_WR) + +#define UPDATE_ATIME(inode) \ + if (!IS_NOATIME(inode) && !IS_RDONLY(inode)) { \ + inode->i_atime = CURRENT_TIME; \ + inode->i_dirt = 1; \ + } + +/* the read-only stuff doesn't really belong here, but any other place is + probably as bad and I don't want to create yet another include file. */ + +#define BLKROSET _IO(0x12,93) /* set device read-only (0 = read-write) */ +#define BLKROGET _IO(0x12,94) /* get read-only status (0 = read_write) */ +#define BLKRRPART _IO(0x12,95) /* re-read partition table */ +#define BLKGETSIZE _IO(0x12,96) /* return device size */ +#define BLKFLSBUF _IO(0x12,97) /* flush buffer cache */ +#define BLKRASET _IO(0x12,98) /* Set read ahead for block device */ +#define BLKRAGET _IO(0x12,99) /* get current read ahead setting */ + +#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */ +#define FIBMAP _IO(0x00,1) /* bmap access */ +#define FIGETBSZ _IO(0x00,2) /* get the block size used for bmap */ + +#ifdef __KERNEL__ + +#include <asm/semaphore.h> +#include <asm/bitops.h> + +extern void buffer_init(void); +extern unsigned long inode_init(unsigned long start, unsigned long end); +extern unsigned long file_table_init(unsigned long start, unsigned long end); +extern unsigned long name_cache_init(unsigned long start, unsigned long end); + +typedef char buffer_block[BLOCK_SIZE]; + +/* bh state bits */ +#define BH_Uptodate 0 /* 1 if the buffer contains valid data */ +#define BH_Dirty 1 /* 1 if the buffer is dirty */ +#define BH_Lock 2 /* 1 if the buffer is locked */ +#define BH_Req 3 /* 0 if the buffer has been invalidated */ +#define BH_Touched 4 /* 1 if the buffer has been touched (aging) */ +#define BH_Has_aged 5 /* 1 if the buffer has been aged (aging) */ +#define BH_Protected 6 /* 1 if the buffer is protected */ +#define BH_FreeOnIO 7 /* 1 to discard the buffer_head after IO */ +#define BH_MD 8 /* 1 if the buffer is an MD request */ + +/* + * Try to keep the most commonly used fields in single cache lines (16 + * bytes) to improve performance. This ordering should be + * particularly beneficial on 32-bit processors. + * + * We use the first 16 bytes for the data which is used in searches + * over the block hash lists (ie. getblk(), find_buffer() and + * friends). + * + * The second 16 bytes we use for lru buffer scans, as used by + * sync_buffers() and refill_freelist(). -- sct + */ +struct buffer_head { + /* First cache line: */ + unsigned long b_blocknr; /* block number */ + kdev_t b_dev; /* device (B_FREE = free) */ + kdev_t b_rdev; /* Real device */ + unsigned long b_rsector; /* Real buffer location on disk */ + struct buffer_head * b_next; /* Hash queue list */ + struct buffer_head * b_this_page; /* circular list of buffers in one page */ + + /* Second cache line: */ + unsigned long b_state; /* buffer state bitmap (see above) */ + struct buffer_head * b_next_free; + unsigned int b_count; /* users using this block */ + unsigned long b_size; /* block size */ + + /* Non-performance-critical data follows. */ + char * b_data; /* pointer to data block (1024 bytes) */ + unsigned int b_list; /* List that this buffer appears */ + unsigned long b_flushtime; /* Time when this (dirty) buffer + * should be written */ + unsigned long b_lru_time; /* Time when this buffer was + * last used. */ + struct wait_queue * b_wait; + struct buffer_head * b_prev; /* doubly linked list of hash-queue */ + struct buffer_head * b_prev_free; /* doubly linked list of buffers */ + struct buffer_head * b_reqnext; /* request queue */ + +/* + * Some MD stuff like RAID5 needs special event handlers and + * special private buffer_head fields: + */ + void * personality; + void * private_bh; +}; + +static inline int buffer_uptodate(struct buffer_head * bh) +{ + return test_bit(BH_Uptodate, &bh->b_state); +} + +static inline int buffer_dirty(struct buffer_head * bh) +{ + return test_bit(BH_Dirty, &bh->b_state); +} + +static inline int buffer_locked(struct buffer_head * bh) +{ + return test_bit(BH_Lock, &bh->b_state); +} + +static inline int buffer_req(struct buffer_head * bh) +{ + return test_bit(BH_Req, &bh->b_state); +} + +static inline int buffer_touched(struct buffer_head * bh) +{ + return test_bit(BH_Touched, &bh->b_state); +} + +static inline int buffer_has_aged(struct buffer_head * bh) +{ + return test_bit(BH_Has_aged, &bh->b_state); +} + +static inline int buffer_protected(struct buffer_head * bh) +{ + return test_bit(BH_Protected, &bh->b_state); +} + +#ifndef MACH +#include <linux/pipe_fs_i.h> +#include <linux/minix_fs_i.h> +#include <linux/ext_fs_i.h> +#include <linux/ext2_fs_i.h> +#include <linux/hpfs_fs_i.h> +#include <linux/msdos_fs_i.h> +#include <linux/umsdos_fs_i.h> +#include <linux/iso_fs_i.h> +#include <linux/nfs_fs_i.h> +#include <linux/xia_fs_i.h> +#include <linux/sysv_fs_i.h> +#include <linux/affs_fs_i.h> +#include <linux/ufs_fs_i.h> +#endif + +/* + * Attribute flags. These should be or-ed together to figure out what + * has been changed! + */ +#define ATTR_MODE 1 +#define ATTR_UID 2 +#define ATTR_GID 4 +#define ATTR_SIZE 8 +#define ATTR_ATIME 16 +#define ATTR_MTIME 32 +#define ATTR_CTIME 64 +#define ATTR_ATIME_SET 128 +#define ATTR_MTIME_SET 256 +#define ATTR_FORCE 512 /* Not a change, but a change it */ + +/* + * This is the Inode Attributes structure, used for notify_change(). It + * uses the above definitions as flags, to know which values have changed. + * Also, in this manner, a Filesystem can look at only the values it cares + * about. Basically, these are the attributes that the VFS layer can + * request to change from the FS layer. + * + * Derek Atkins <warlord@MIT.EDU> 94-10-20 + */ +struct iattr { + unsigned int ia_valid; + umode_t ia_mode; + uid_t ia_uid; + gid_t ia_gid; + off_t ia_size; + time_t ia_atime; + time_t ia_mtime; + time_t ia_ctime; +}; + +#include <linux/quota.h> + +#ifdef MACH +/* Supress GCC's warnings. by OKUJI Yoshinori. */ +struct vm_area_struct; +struct page; + +struct inode +{ + umode_t i_mode; + kdev_t i_rdev; +}; + +struct file +{ + mode_t f_mode; + loff_t f_pos; + unsigned short f_flags; + int f_resid; + void *f_object; + void *f_np; +}; + +#else /* !MACH */ + +struct inode { + kdev_t i_dev; + unsigned long i_ino; + umode_t i_mode; + nlink_t i_nlink; + uid_t i_uid; + gid_t i_gid; + kdev_t i_rdev; + off_t i_size; + time_t i_atime; + time_t i_mtime; + time_t i_ctime; + unsigned long i_blksize; + unsigned long i_blocks; + unsigned long i_version; + unsigned long i_nrpages; + struct semaphore i_sem; + struct inode_operations *i_op; + struct super_block *i_sb; + struct wait_queue *i_wait; + struct file_lock *i_flock; + struct vm_area_struct *i_mmap; + struct page *i_pages; + struct dquot *i_dquot[MAXQUOTAS]; + struct inode *i_next, *i_prev; + struct inode *i_hash_next, *i_hash_prev; + struct inode *i_bound_to, *i_bound_by; + struct inode *i_mount; + unsigned long i_count; + unsigned short i_flags; + unsigned short i_writecount; + unsigned char i_lock; + unsigned char i_dirt; + unsigned char i_pipe; + unsigned char i_sock; + unsigned char i_seek; + unsigned char i_update; + unsigned char i_condemned; + union { + struct pipe_inode_info pipe_i; + struct minix_inode_info minix_i; + struct ext_inode_info ext_i; + struct ext2_inode_info ext2_i; + struct hpfs_inode_info hpfs_i; + struct msdos_inode_info msdos_i; + struct umsdos_inode_info umsdos_i; + struct iso_inode_info isofs_i; + struct nfs_inode_info nfs_i; + struct xiafs_inode_info xiafs_i; + struct sysv_inode_info sysv_i; + struct affs_inode_info affs_i; + struct ufs_inode_info ufs_i; + struct socket socket_i; + void * generic_ip; + } u; +}; + +struct fown_struct { + int pid; /* pid or -pgrp where SIGIO should be sent */ + uid_t uid, euid; /* uid/euid of process setting the owner */ +}; + +struct file { + mode_t f_mode; + loff_t f_pos; + unsigned short f_flags; + unsigned short f_count; + unsigned long f_reada, f_ramax, f_raend, f_ralen, f_rawin; + struct file *f_next, *f_prev; + struct fown_struct f_owner; + struct inode * f_inode; + struct file_operations * f_op; + unsigned long f_version; + void *private_data; /* needed for tty driver, and maybe others */ +}; +#endif /* !MACH */ + +#define FL_POSIX 1 +#define FL_FLOCK 2 +#define FL_BROKEN 4 /* broken flock() emulation */ +#define FL_ACCESS 8 /* for processes suspended by mandatory locking */ + +struct file_lock { + struct file_lock *fl_next; /* singly linked list for this inode */ + struct file_lock *fl_nextlink; /* doubly linked list of all locks */ + struct file_lock *fl_prevlink; /* used to simplify lock removal */ + struct file_lock *fl_nextblock; /* circular list of blocked processes */ + struct file_lock *fl_prevblock; + struct task_struct *fl_owner; + struct wait_queue *fl_wait; + struct file *fl_file; + unsigned char fl_flags; + unsigned char fl_type; + off_t fl_start; + off_t fl_end; +}; + +#include <linux/fcntl.h> + +extern int fcntl_getlk(unsigned int fd, struct flock *l); +extern int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l); +extern void locks_remove_locks(struct task_struct *task, struct file *filp); + +#include <linux/stat.h> + +#define FLOCK_VERIFY_READ 1 +#define FLOCK_VERIFY_WRITE 2 + +extern int locks_mandatory_locked(struct inode *inode); +extern int locks_mandatory_area(int read_write, struct inode *inode, + struct file *filp, unsigned int offset, + unsigned int count); + +#ifndef MACH +extern inline int locks_verify_locked(struct inode *inode) +{ + /* Candidates for mandatory locking have the setgid bit set + * but no group execute bit - an otherwise meaningless combination. + */ + if (IS_MANDLOCK(inode) && + (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID) + return (locks_mandatory_locked(inode)); + return (0); +} +extern inline int locks_verify_area(int read_write, struct inode *inode, + struct file *filp, unsigned int offset, + unsigned int count) +{ + /* Candidates for mandatory locking have the setgid bit set + * but no group execute bit - an otherwise meaningless combination. + */ + if (IS_MANDLOCK(inode) && + (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID) + return (locks_mandatory_area(read_write, inode, filp, offset, + count)); + return (0); +} +#endif + +struct fasync_struct { + int magic; + struct fasync_struct *fa_next; /* singly linked list */ + struct file *fa_file; +}; + +#define FASYNC_MAGIC 0x4601 + +extern int fasync_helper(struct inode *, struct file *, int, struct fasync_struct **); + +#ifndef MACH +#include <linux/minix_fs_sb.h> +#include <linux/ext_fs_sb.h> +#include <linux/ext2_fs_sb.h> +#include <linux/hpfs_fs_sb.h> +#include <linux/msdos_fs_sb.h> +#include <linux/iso_fs_sb.h> +#include <linux/nfs_fs_sb.h> +#include <linux/xia_fs_sb.h> +#include <linux/sysv_fs_sb.h> +#include <linux/affs_fs_sb.h> +#include <linux/ufs_fs_sb.h> + +struct super_block { + kdev_t s_dev; + unsigned long s_blocksize; + unsigned char s_blocksize_bits; + unsigned char s_lock; + unsigned char s_rd_only; + unsigned char s_dirt; + struct file_system_type *s_type; + struct super_operations *s_op; + struct dquot_operations *dq_op; + unsigned long s_flags; + unsigned long s_magic; + unsigned long s_time; + struct inode * s_covered; + struct inode * s_mounted; + struct wait_queue * s_wait; + union { + struct minix_sb_info minix_sb; + struct ext_sb_info ext_sb; + struct ext2_sb_info ext2_sb; + struct hpfs_sb_info hpfs_sb; + struct msdos_sb_info msdos_sb; + struct isofs_sb_info isofs_sb; + struct nfs_sb_info nfs_sb; + struct xiafs_sb_info xiafs_sb; + struct sysv_sb_info sysv_sb; + struct affs_sb_info affs_sb; + struct ufs_sb_info ufs_sb; + void *generic_sbp; + } u; +}; +#endif /* !MACH */ + +/* + * This is the "filldir" function type, used by readdir() to let + * the kernel specify what kind of dirent layout it wants to have. + * This allows the kernel to read directories into kernel space or + * to have different dirent layouts depending on the binary type. + */ +typedef int (*filldir_t)(void *, const char *, int, off_t, ino_t); + +struct file_operations { + int (*lseek) (struct inode *, struct file *, off_t, int); + int (*read) (struct inode *, struct file *, char *, int); + int (*write) (struct inode *, struct file *, const char *, int); + int (*readdir) (struct inode *, struct file *, void *, filldir_t); + int (*select) (struct inode *, struct file *, int, select_table *); + int (*ioctl) (struct inode *, struct file *, unsigned int, unsigned long); + int (*mmap) (struct inode *, struct file *, struct vm_area_struct *); + int (*open) (struct inode *, struct file *); + void (*release) (struct inode *, struct file *); + int (*fsync) (struct inode *, struct file *); + int (*fasync) (struct inode *, struct file *, int); + int (*check_media_change) (kdev_t dev); + int (*revalidate) (kdev_t dev); +}; + +struct inode_operations { + struct file_operations * default_file_ops; + int (*create) (struct inode *,const char *,int,int,struct inode **); + int (*lookup) (struct inode *,const char *,int,struct inode **); + int (*link) (struct inode *,struct inode *,const char *,int); + int (*unlink) (struct inode *,const char *,int); + int (*symlink) (struct inode *,const char *,int,const char *); + int (*mkdir) (struct inode *,const char *,int,int); + int (*rmdir) (struct inode *,const char *,int); + int (*mknod) (struct inode *,const char *,int,int,int); + int (*rename) (struct inode *,const char *,int,struct inode *,const char *,int, int); + int (*readlink) (struct inode *,char *,int); + int (*follow_link) (struct inode *,struct inode *,int,int,struct inode **); + int (*readpage) (struct inode *, struct page *); + int (*writepage) (struct inode *, struct page *); + int (*bmap) (struct inode *,int); + void (*truncate) (struct inode *); + int (*permission) (struct inode *, int); + int (*smap) (struct inode *,int); +}; + +struct super_operations { + void (*read_inode) (struct inode *); + int (*notify_change) (struct inode *, struct iattr *); + void (*write_inode) (struct inode *); + void (*put_inode) (struct inode *); + void (*put_super) (struct super_block *); + void (*write_super) (struct super_block *); + void (*statfs) (struct super_block *, struct statfs *, int); + int (*remount_fs) (struct super_block *, int *, char *); +}; + +struct dquot_operations { + void (*initialize) (struct inode *, short); + void (*drop) (struct inode *); + int (*alloc_block) (const struct inode *, unsigned long); + int (*alloc_inode) (const struct inode *, unsigned long); + void (*free_block) (const struct inode *, unsigned long); + void (*free_inode) (const struct inode *, unsigned long); + int (*transfer) (struct inode *, struct iattr *, char); +}; + +struct file_system_type { + struct super_block *(*read_super) (struct super_block *, void *, int); + const char *name; + int requires_dev; + struct file_system_type * next; +}; + +extern int register_filesystem(struct file_system_type *); +extern int unregister_filesystem(struct file_system_type *); + +asmlinkage int sys_open(const char *, int, int); +asmlinkage int sys_close(unsigned int); /* yes, it's really unsigned */ +asmlinkage int sys_read(unsigned int, char *, int); + +extern void kill_fasync(struct fasync_struct *fa, int sig); + +extern int getname(const char * filename, char **result); +extern void putname(char * name); +extern int do_truncate(struct inode *, unsigned long); +extern int register_blkdev(unsigned int, const char *, struct file_operations *); +extern int unregister_blkdev(unsigned int major, const char * name); +extern int blkdev_open(struct inode * inode, struct file * filp); +extern void blkdev_release (struct inode * inode); +extern struct file_operations def_blk_fops; +extern struct inode_operations blkdev_inode_operations; + +extern int register_chrdev(unsigned int, const char *, struct file_operations *); +extern int unregister_chrdev(unsigned int major, const char * name); +extern int chrdev_open(struct inode * inode, struct file * filp); +extern struct file_operations def_chr_fops; +extern struct inode_operations chrdev_inode_operations; + +extern void init_fifo(struct inode * inode); +extern struct inode_operations fifo_inode_operations; + +extern struct file_operations connecting_fifo_fops; +extern struct file_operations read_fifo_fops; +extern struct file_operations write_fifo_fops; +extern struct file_operations rdwr_fifo_fops; +extern struct file_operations read_pipe_fops; +extern struct file_operations write_pipe_fops; +extern struct file_operations rdwr_pipe_fops; + +extern struct file_system_type *get_fs_type(const char *name); + +extern int fs_may_mount(kdev_t dev); +extern int fs_may_umount(kdev_t dev, struct inode * mount_root); +extern int fs_may_remount_ro(kdev_t dev); + +extern struct file *first_file; +extern struct super_block *super_blocks; + +extern void refile_buffer(struct buffer_head * buf); +extern void set_writetime(struct buffer_head * buf, int flag); +extern int try_to_free_buffer(struct buffer_head*, struct buffer_head**, int); + +extern int nr_buffers; +extern int buffermem; +extern int nr_buffer_heads; + +#define BUF_CLEAN 0 +#define BUF_LOCKED 1 /* Buffers scheduled for write */ +#define BUF_LOCKED1 2 /* Supers, inodes */ +#define BUF_DIRTY 3 /* Dirty buffers, not yet scheduled for write */ +#define NR_LIST 4 + +#ifdef MACH +static inline void +mark_buffer_uptodate (struct buffer_head *bh, int on) +{ + if (on) + set_bit (BH_Uptodate, &bh->b_state); + else + clear_bit (BH_Uptodate, &bh->b_state); +} +#else +void mark_buffer_uptodate(struct buffer_head * bh, int on); +#endif + +static inline void mark_buffer_clean(struct buffer_head * bh) +{ +#ifdef MACH + clear_bit (BH_Dirty, &bh->b_state); +#else + if (clear_bit(BH_Dirty, &bh->b_state)) { + if (bh->b_list == BUF_DIRTY) + refile_buffer(bh); + } +#endif +} + +static inline void mark_buffer_dirty(struct buffer_head * bh, int flag) +{ +#ifdef MACH + set_bit (BH_Dirty, &bh->b_state); +#else + if (!set_bit(BH_Dirty, &bh->b_state)) { + set_writetime(bh, flag); + if (bh->b_list != BUF_DIRTY) + refile_buffer(bh); + } +#endif +} + +extern int check_disk_change(kdev_t dev); + +#ifdef MACH +#define invalidate_inodes(dev) +#else +extern void invalidate_inodes(kdev_t dev); +#endif + +extern void invalidate_inode_pages(struct inode *); + +#ifdef MACH +#define invalidate_buffers(dev) +#else +extern void invalidate_buffers(kdev_t dev); +#endif + +extern int floppy_is_wp(int minor); +extern void sync_inodes(kdev_t dev); + +#ifdef MACH +#define sync_dev(dev) +#define fsync_dev(dev) +#else +extern void sync_dev(kdev_t dev); +extern int fsync_dev(kdev_t dev); +#endif + +extern void sync_supers(kdev_t dev); +extern int bmap(struct inode * inode,int block); +extern int notify_change(struct inode *, struct iattr *); +extern int namei(const char * pathname, struct inode ** res_inode); +extern int lnamei(const char * pathname, struct inode ** res_inode); + +#ifdef MACH +#define permission(inode, mask) 0 +#else +extern int permission(struct inode * inode,int mask); +#endif + +extern int get_write_access(struct inode *inode); +extern void put_write_access(struct inode *inode); +extern int open_namei(const char * pathname, int flag, int mode, + struct inode ** res_inode, struct inode * base); +extern int do_mknod(const char * filename, int mode, dev_t dev); +extern int do_pipe(int *); +extern void iput(struct inode * inode); +extern struct inode * __iget(struct super_block * sb,int nr,int crsmnt); +extern struct inode * get_empty_inode(void); +extern void insert_inode_hash(struct inode *); +extern void clear_inode(struct inode *); +extern struct inode * get_pipe_inode(void); +extern void make_bad_inode(struct inode *); +extern int get_unused_fd(void); +extern void put_unused_fd(int); +extern struct file * get_empty_filp(void); +extern int close_fp(struct file *filp); +extern struct buffer_head * get_hash_table(kdev_t dev, int block, int size); +extern struct buffer_head * getblk(kdev_t dev, int block, int size); +extern void ll_rw_block(int rw, int nr, struct buffer_head * bh[], int quiet); +extern void ll_rw_page(int rw, kdev_t dev, unsigned long nr, char * buffer); +extern void ll_rw_swap_file(int rw, kdev_t dev, unsigned int *b, int nb, char *buffer); +extern int is_read_only(kdev_t dev); +extern void __brelse(struct buffer_head *buf); +extern inline void brelse(struct buffer_head *buf) +{ + if (buf) + __brelse(buf); +} +extern void __bforget(struct buffer_head *buf); +extern inline void bforget(struct buffer_head *buf) +{ + if (buf) + __bforget(buf); +} +extern void set_blocksize(kdev_t dev, int size); +extern struct buffer_head * bread(kdev_t dev, int block, int size); +extern struct buffer_head * breada(kdev_t dev,int block, int size, + unsigned int pos, unsigned int filesize); + +extern int generic_readpage(struct inode *, struct page *); +extern int generic_file_read(struct inode *, struct file *, char *, int); +extern int generic_file_mmap(struct inode *, struct file *, struct vm_area_struct *); +extern int brw_page(int, struct page *, kdev_t, int [], int, int); + +extern void put_super(kdev_t dev); +unsigned long generate_cluster(kdev_t dev, int b[], int size); +extern kdev_t ROOT_DEV; + +extern void show_buffers(void); +extern void mount_root(void); + +#ifdef CONFIG_BLK_DEV_INITRD +extern kdev_t real_root_dev; +extern int change_root(kdev_t new_root_dev,const char *put_old); +#endif + +extern int char_read(struct inode *, struct file *, char *, int); +extern int block_read(struct inode *, struct file *, char *, int); +extern int read_ahead[]; + +extern int char_write(struct inode *, struct file *, const char *, int); +extern int block_write(struct inode *, struct file *, const char *, int); + +extern int block_fsync(struct inode *, struct file *); +extern int file_fsync(struct inode *, struct file *); + +extern void dcache_add(struct inode *, const char *, int, unsigned long); +extern int dcache_lookup(struct inode *, const char *, int, unsigned long *); + +extern int inode_change_ok(struct inode *, struct iattr *); +extern void inode_setattr(struct inode *, struct iattr *); + +extern inline struct inode * iget(struct super_block * sb,int nr) +{ + return __iget(sb, nr, 1); +} + +/* kludge to get SCSI modules working */ +#ifndef MACH +#include <linux/minix_fs.h> +#include <linux/minix_fs_sb.h> +#endif + +#endif /* __KERNEL__ */ + +#endif diff --git a/linux/dev/include/linux/genhd.h b/linux/dev/include/linux/genhd.h new file mode 100644 index 0000000..f19015d --- /dev/null +++ b/linux/dev/include/linux/genhd.h @@ -0,0 +1,208 @@ +#ifndef _LINUX_GENHD_H +#define _LINUX_GENHD_H + +/* + * genhd.h Copyright (C) 1992 Drew Eckhardt + * Generic hard disk header file by + * Drew Eckhardt + * + * <drew@colorado.edu> + */ + +#include <linux/config.h> + +#define CONFIG_MSDOS_PARTITION 1 + +#ifdef __alpha__ +#define CONFIG_OSF_PARTITION 1 +#endif + +#if defined(__sparc__) || defined(CONFIG_SMD_DISKLABEL) +#define CONFIG_SUN_PARTITION 1 +#endif + +/* These three have identical behaviour; use the second one if DOS fdisk gets + confused about extended/logical partitions starting past cylinder 1023. */ +#define DOS_EXTENDED_PARTITION 5 +#define LINUX_EXTENDED_PARTITION 0x85 +#define WIN98_EXTENDED_PARTITION 0x0f + +#define DM6_PARTITION 0x54 /* has DDO: use xlated geom & offset */ +#define EZD_PARTITION 0x55 /* EZ-DRIVE: same as DM6 (we think) */ +#define DM6_AUX1PARTITION 0x51 /* no DDO: use xlated geom */ +#define DM6_AUX3PARTITION 0x53 /* no DDO: use xlated geom */ + +#ifdef MACH_INCLUDE +struct linux_partition +{ +#else +struct partition { +#endif + unsigned char boot_ind; /* 0x80 - active */ + unsigned char head; /* starting head */ + unsigned char sector; /* starting sector */ + unsigned char cyl; /* starting cylinder */ + unsigned char sys_ind; /* What partition type */ + unsigned char end_head; /* end head */ + unsigned char end_sector; /* end sector */ + unsigned char end_cyl; /* end cylinder */ + unsigned int start_sect; /* starting sector counting from 0 */ + unsigned int nr_sects; /* nr of sectors in partition */ +} __attribute((packed)); /* Give a polite hint to egcs/alpha to generate + unaligned operations */ + +struct hd_struct { + long start_sect; + long nr_sects; +}; + +struct gendisk { + int major; /* major number of driver */ + const char *major_name; /* name of major driver */ + int minor_shift; /* number of times minor is shifted to + get real minor */ + int max_p; /* maximum partitions per device */ + int max_nr; /* maximum number of real devices */ + + void (*init)(struct gendisk *); /* Initialization called before we do our thing */ + struct hd_struct *part; /* partition table */ + int *sizes; /* device size in blocks, copied to blk_size[] */ + int nr_real; /* number of real devices */ + + void *real_devices; /* internal use */ + struct gendisk *next; +}; + +#ifdef CONFIG_BSD_DISKLABEL +/* + * BSD disklabel support by Yossi Gottlieb <yogo@math.tau.ac.il> + */ + +#define BSD_PARTITION 0xa5 /* Partition ID */ + +#define BSD_DISKMAGIC (0x82564557UL) /* The disk magic number */ +#define BSD_MAXPARTITIONS 8 +#define BSD_FS_UNUSED 0 /* disklabel unused partition entry ID */ +struct bsd_disklabel { + __u32 d_magic; /* the magic number */ + __s16 d_type; /* drive type */ + __s16 d_subtype; /* controller/d_type specific */ + char d_typename[16]; /* type name, e.g. "eagle" */ + char d_packname[16]; /* pack identifier */ + __u32 d_secsize; /* # of bytes per sector */ + __u32 d_nsectors; /* # of data sectors per track */ + __u32 d_ntracks; /* # of tracks per cylinder */ + __u32 d_ncylinders; /* # of data cylinders per unit */ + __u32 d_secpercyl; /* # of data sectors per cylinder */ + __u32 d_secperunit; /* # of data sectors per unit */ + __u16 d_sparespertrack; /* # of spare sectors per track */ + __u16 d_sparespercyl; /* # of spare sectors per cylinder */ + __u32 d_acylinders; /* # of alt. cylinders per unit */ + __u16 d_rpm; /* rotational speed */ + __u16 d_interleave; /* hardware sector interleave */ + __u16 d_trackskew; /* sector 0 skew, per track */ + __u16 d_cylskew; /* sector 0 skew, per cylinder */ + __u32 d_headswitch; /* head switch time, usec */ + __u32 d_trkseek; /* track-to-track seek, usec */ + __u32 d_flags; /* generic flags */ +#define NDDATA 5 + __u32 d_drivedata[NDDATA]; /* drive-type specific information */ +#define NSPARE 5 + __u32 d_spare[NSPARE]; /* reserved for future use */ + __u32 d_magic2; /* the magic number (again) */ + __u16 d_checksum; /* xor of data incl. partitions */ + + /* filesystem and partition information: */ + __u16 d_npartitions; /* number of partitions in following */ + __u32 d_bbsize; /* size of boot area at sn0, bytes */ + __u32 d_sbsize; /* max size of fs superblock, bytes */ + struct bsd_partition { /* the partition table */ + __u32 p_size; /* number of sectors in partition */ + __u32 p_offset; /* starting sector */ + __u32 p_fsize; /* filesystem basic fragment size */ + __u8 p_fstype; /* filesystem type, see below */ + __u8 p_frag; /* filesystem fragments per block */ + __u16 p_cpg; /* filesystem cylinders per group */ + } d_partitions[BSD_MAXPARTITIONS]; /* actually may be more */ +}; + +#endif /* CONFIG_BSD_DISKLABEL */ + +#ifdef CONFIG_GPT_DISKLABEL +/* + * GPT disklabel support by наб <nabijaczleweli@gmail.com> + * + * Based on UEFI specification 2.8A (current as of May 2020): + * https://uefi.org/specifications + * https://uefi.org/sites/default/files/resources/UEFI_Spec_2_8_A_Feb14.pdf + * + * CRC32 behaviour (final ^ ~0) courtesy of util-linux documentation: + * https://git.kernel.org/pub/scm/utils/util-linux/util-linux.git/tree/libblkid/src/partitions/gpt.c?id=042f62dfc514da177c148c257e4dcb32e5f8379d#n104 + */ + +#define GPT_PARTITION 0xee /* Partition ID in MBR */ + +#define GPT_GUID_SIZE 16 +struct gpt_guid { + __u32 g_time_low; /* Low field of timestamp */ + __u16 g_time_mid; /* Medium field of timestamp */ + __u16 g_time_high_version; /* High field of timestamp and version */ + __u8 g_clock_sec_high; /* High field of clock sequence and variant */ + __u8 g_clock_sec_low; /* Low field of clock sequence */ + __u8 g_node_id[6]; /* Spatially unique node identifier (MAC address or urandom) */ +} __attribute((packed)); +typedef char __gpt_guid_right_size[(sizeof(struct gpt_guid) == GPT_GUID_SIZE) ? 1 : -1]; + +static const struct gpt_guid GPT_GUID_TYPE_UNUSED = {0,0,0,0,0,{0,0,0,0,0,0}}; + +#define GPT_SIGNATURE "EFI PART" /* The header signauture */ +#define GPT_REVISION (0x00010000UL) /* Little-endian on disk */ +#define GPT_HEADER_SIZE 92 +#define GPT_MAXPARTITIONS 128 +struct gpt_disklabel_header { + char h_signature[8]; /* Must match GPT_SIGNATURE */ + __u32 h_revision; /* Disklabel revision, must match GPT_REVISION */ + __u32 h_header_size; /* Must match GPT_HEADER_SIZE */ + __u32 h_header_crc; /* CRC32 of header, zero for calculation */ + __u32 h_reserved; /* Must be zero */ + __u64 h_lba_current; /* LBA of this copy of the header */ + __u64 h_lba_backup; /* LBA of the second (backup) copy of the header */ + __u64 h_lba_usable_first; /* First usable LBA for partitions (last LBA of primary table + 1) */ + __u64 h_lba_usable_last; /* Last usable LBA for partitions (first LBA of secondary table - 1) */ + struct gpt_guid h_guid; /* ID of the disk */ + __u64 h_part_table_lba; /* First LBA of the partition table (usually 2 in primary header) */ + __u32 h_part_table_len; /* Amount of entries in the partition table */ + __u32 h_part_table_entry_size; /* Size of each partition entry (usually 128) */ + __u32 h_part_table_crc; /* CRC32 of entire partition table, starts at h_part_table_lba, is h_part_table_len*h_part_table_entry_size long */ + /* Rest of block must be zero */ +} __attribute((packed)); +typedef char __gpt_header_right_size[(sizeof(struct gpt_disklabel_header) == GPT_HEADER_SIZE) ? 1 : -1]; + +/* 3-47: reserved; 48-63: defined for individual partition types. */ +#define GPT_PARTITION_ATTR_PLATFORM_REQUIRED (1ULL << 0) /* Required by the platform to function */ +#define GPT_PARTITION_ATTR_EFI_IGNORE (1ULL << 1) /* To be ignored by the EFI firmware */ +#define GPT_PARTITION_ATTR_BIOS_BOOTABLE (1ULL << 2) /* Equivalent to MBR active flag */ + +#define GPT_PARTITION_ENTRY_SIZE 128 /* Minimum size, implementations must respect bigger vendor-specific entries */ +struct gpt_disklabel_part { + struct gpt_guid p_type; /* Partition type GUID */ + struct gpt_guid p_guid; /* ID of the partition */ + __u64 p_lba_first; /* First LBA of the partition */ + __u64 p_lba_last; /* Last LBA of the partition */ + __u64 p_attrs; /* Partition attribute bitfield, see above */ + __u16 p_name[36]; /* Display name of partition, UTF-16 */ +} __attribute((packed)); +typedef char __gpt_part_entry_right_size[(sizeof(struct gpt_disklabel_part) == GPT_PARTITION_ENTRY_SIZE) ? 1 : -1]; +#endif /* CONFIG_GPT_DISKLABEL */ + +extern struct gendisk *gendisk_head; /* linked list of disks */ + +/* + * disk_name() is used by genhd.c and md.c. + * It formats the devicename of the indicated disk + * into the supplied buffer, and returns a pointer + * to that same buffer (for convenience). + */ +char *disk_name (struct gendisk *hd, int minor, char *buf); + +#endif diff --git a/linux/dev/include/linux/if.h b/linux/dev/include/linux/if.h new file mode 100644 index 0000000..50dd138 --- /dev/null +++ b/linux/dev/include/linux/if.h @@ -0,0 +1,184 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Global definitions for the INET interface module. + * + * Version: @(#)if.h 1.0.2 04/18/93 + * + * Authors: Original taken from Berkeley UNIX 4.3, (c) UCB 1982-1988 + * Ross Biro, <bir7@leland.Stanford.Edu> + * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _LINUX_IF_H +#define _LINUX_IF_H + +#include <linux/types.h> /* for "caddr_t" et al */ +#include <linux/socket.h> /* for "struct sockaddr" et al */ + +/* Standard interface flags. */ + +#ifdef MACH_INCLUDE + +#define LINUX_IFF_UP 0x1 /* interface is up */ +#define LINUX_IFF_BROADCAST 0x2 /* broadcast address valid */ +#define LINUX_IFF_DEBUG 0x4 /* turn on debugging */ +#define LINUX_IFF_LOOPBACK 0x8 /* is a loopback net */ +#define LINUX_IFF_POINTOPOINT 0x10 /* interface is has p-p link */ +#define LINUX_IFF_NOTRAILERS 0x20 /* avoid use of trailers */ +#define LINUX_IFF_RUNNING 0x40 /* resources allocated */ +#define LINUX_IFF_NOARP 0x80 /* no ARP protocol */ +#define LINUX_IFF_PROMISC 0x100 /* receive all packets */ +/* Not supported */ +#define LINUX_IFF_ALLMULTI 0x200 /* receive all multicast packets*/ + +#define LINUX_IFF_MASTER 0x400 /* master of a load balancer */ +#define LINUX_IFF_SLAVE 0x800 /* slave of a load balancer */ + +#define LINUX_IFF_MULTICAST 0x1000 /* Supports multicast */ +#define LINUX_IFF_SOFTHEADERS 0x2000 /* Device cannot construct headers + * until broadcast time. Therefore + * SOCK_PACKET must call header + * construction. Private flag. + * Never visible outside of kernel. + */ + +#else /* !MACH_INCLUDE */ + +#define IFF_UP 0x1 /* interface is up */ +#define IFF_BROADCAST 0x2 /* broadcast address valid */ +#define IFF_DEBUG 0x4 /* turn on debugging */ +#define IFF_LOOPBACK 0x8 /* is a loopback net */ +#define IFF_POINTOPOINT 0x10 /* interface is has p-p link */ +#define IFF_NOTRAILERS 0x20 /* avoid use of trailers */ +#define IFF_RUNNING 0x40 /* resources allocated */ +#define IFF_NOARP 0x80 /* no ARP protocol */ +#define IFF_PROMISC 0x100 /* receive all packets */ +/* Not supported */ +#define IFF_ALLMULTI 0x200 /* receive all multicast packets*/ + +#define IFF_MASTER 0x400 /* master of a load balancer */ +#define IFF_SLAVE 0x800 /* slave of a load balancer */ + +#define IFF_MULTICAST 0x1000 /* Supports multicast */ +#define IFF_SOFTHEADERS 0x2000 /* Device cannot construct headers + * until broadcast time. Therefore + * SOCK_PACKET must call header + * construction. Private flag. + * Never visible outside of kernel. + */ +#endif /* !MACH_INCLUDE */ + +/* + * The ifaddr structure contains information about one address + * of an interface. They are maintained by the different address + * families, are allocated and attached when an address is set, + * and are linked together so all addresses for an interface can + * be located. + */ + +struct ifaddr +{ + struct sockaddr ifa_addr; /* address of interface */ + union { + struct sockaddr ifu_broadaddr; + struct sockaddr ifu_dstaddr; + } ifa_ifu; + struct iface *ifa_ifp; /* back-pointer to interface */ + struct ifaddr *ifa_next; /* next address for interface */ +}; + +#define ifa_broadaddr ifa_ifu.ifu_broadaddr /* broadcast address */ +#define ifa_dstaddr ifa_ifu.ifu_dstaddr /* other end of link */ + +/* + * Device mapping structure. I'd just gone off and designed a + * beautiful scheme using only loadable modules with arguments + * for driver options and along come the PCMCIA people 8) + * + * Ah well. The get() side of this is good for WDSETUP, and it'll + * be handy for debugging things. The set side is fine for now and + * being very small might be worth keeping for clean configuration. + */ + +struct ifmap +{ + unsigned long mem_start; + unsigned long mem_end; + unsigned short base_addr; + unsigned char irq; + unsigned char dma; + unsigned char port; + /* 3 bytes spare */ +}; + +/* + * Interface request structure used for socket + * ioctl's. All interface ioctl's must have parameter + * definitions which begin with ifr_name. The + * remainder may be interface specific. + */ + +struct ifreq +{ +#define IFHWADDRLEN 6 +#define IFNAMSIZ 16 + union + { + char ifrn_name[IFNAMSIZ]; /* if name, e.g. "en0" */ + } ifr_ifrn; + + union { + struct sockaddr ifru_addr; + struct sockaddr ifru_dstaddr; + struct sockaddr ifru_broadaddr; + struct sockaddr ifru_netmask; + struct sockaddr ifru_hwaddr; + short ifru_flags; + int ifru_metric; + int ifru_mtu; + struct ifmap ifru_map; + char ifru_slave[IFNAMSIZ]; /* Just fits the size */ + caddr_t ifru_data; + } ifr_ifru; +}; + +#define ifr_name ifr_ifrn.ifrn_name /* interface name */ +#define ifr_hwaddr ifr_ifru.ifru_hwaddr /* MAC address */ +#define ifr_addr ifr_ifru.ifru_addr /* address */ +#define ifr_dstaddr ifr_ifru.ifru_dstaddr /* other end of p-p lnk */ +#define ifr_broadaddr ifr_ifru.ifru_broadaddr /* broadcast address */ +#define ifr_netmask ifr_ifru.ifru_netmask /* interface net mask */ +#define ifr_flags ifr_ifru.ifru_flags /* flags */ +#define ifr_metric ifr_ifru.ifru_metric /* metric */ +#define ifr_mtu ifr_ifru.ifru_mtu /* mtu */ +#define ifr_map ifr_ifru.ifru_map /* device map */ +#define ifr_slave ifr_ifru.ifru_slave /* slave device */ +#define ifr_data ifr_ifru.ifru_data /* for use by interface */ + +/* + * Structure used in SIOCGIFCONF request. + * Used to retrieve interface configuration + * for machine (useful for programs which + * must know all networks accessible). + */ + +struct ifconf +{ + int ifc_len; /* size of buffer */ + union + { + caddr_t ifcu_buf; + struct ifreq *ifcu_req; + } ifc_ifcu; +}; +#define ifc_buf ifc_ifcu.ifcu_buf /* buffer address */ +#define ifc_req ifc_ifcu.ifcu_req /* array of structures */ + +#endif /* _LINUX_IF_H */ diff --git a/linux/dev/include/linux/kernel.h b/linux/dev/include/linux/kernel.h new file mode 100644 index 0000000..9c60b41 --- /dev/null +++ b/linux/dev/include/linux/kernel.h @@ -0,0 +1,107 @@ +#ifndef _LINUX_KERNEL_H +#define _LINUX_KERNEL_H + +/* + * 'kernel.h' contains some often-used function prototypes etc + */ + +#ifdef __KERNEL__ + +#include <stdarg.h> +#include <linux/linkage.h> +#include <linux/compiler.h> + +#define INT_MAX ((int)(~0U>>1)) +#define UINT_MAX (~0U) +#define LONG_MAX ((long)(~0UL>>1)) +#define ULONG_MAX (~0UL) + +#define STACK_MAGIC 0xdeadbeef + +#define KERN_EMERG "<0>" /* system is unusable */ +#define KERN_ALERT "<1>" /* action must be taken immediately */ +#define KERN_CRIT "<2>" /* critical conditions */ +#define KERN_ERR "<3>" /* error conditions */ +#define KERN_WARNING "<4>" /* warning conditions */ +#define KERN_NOTICE "<5>" /* normal but significant condition */ +#define KERN_INFO "<6>" /* informational */ +#define KERN_DEBUG "<7>" /* debug-level messages */ + +# define NORET_TYPE /**/ +# define ATTRIB_NORET __attribute__((noreturn)) +# define NORET_AND noreturn, + +extern void math_error(void); + +/* Use Mach's panic. */ +#include <kern/debug.h> + +NORET_TYPE void do_exit(long error_code) + ATTRIB_NORET; +extern unsigned long simple_strtoul(const char *,char **,unsigned int); + +extern int linux_sprintf(char *buf, const char *fmt, ...); +extern int linux_vsprintf(char *buf, const char *fmt, va_list args); + +#ifndef MACH_INCLUDE +#define sprintf linux_sprintf +#define vsprintf linux_vsprintf +#endif + +extern int session_of_pgrp(int pgrp); + +extern int kill_proc(int pid, int sig, int priv); +extern int kill_pg(int pgrp, int sig, int priv); +extern int kill_sl(int sess, int sig, int priv); + +asmlinkage int printk(const char * fmt, ...) + __attribute__ ((format (printf, 1, 2))); + +#if DEBUG +#define pr_debug(fmt,arg...) \ + printk(KERN_DEBUG fmt,##arg) +#else +#define pr_debug(fmt,arg...) \ + do { } while (0) +#endif + +#define pr_info(fmt,arg...) \ + printk(KERN_INFO fmt,##arg) + +/* + * "suser()" checks against the effective user id, while "fsuser()" + * is used for file permission checking and checks against the fsuid.. + */ +#ifdef MACH +#define fsuser() 1 +#else +#define fsuser() (current->fsuid == 0) +#endif + +/* + * Display an IP address in readable format. + */ + +#define NIPQUAD(addr) \ + (((addr) >> 0) & 0xff), \ + (((addr) >> 8) & 0xff), \ + (((addr) >> 16) & 0xff), \ + (((addr) >> 24) & 0xff) + +#endif /* __KERNEL__ */ + +#define SI_LOAD_SHIFT 16 +struct sysinfo { + long uptime; /* Seconds since boot */ + unsigned long loads[3]; /* 1, 5, and 15 minute load averages */ + unsigned long totalram; /* Total usable main memory size */ + unsigned long freeram; /* Available memory size */ + unsigned long sharedram; /* Amount of shared memory */ + unsigned long bufferram; /* Memory used by buffers */ + unsigned long totalswap; /* Total swap space size */ + unsigned long freeswap; /* swap space still available */ + unsigned short procs; /* Number of current processes */ + char _f[22]; /* Pads structure to 64 bytes */ +}; + +#endif diff --git a/linux/dev/include/linux/locks.h b/linux/dev/include/linux/locks.h new file mode 100644 index 0000000..ae063fb --- /dev/null +++ b/linux/dev/include/linux/locks.h @@ -0,0 +1,66 @@ +#ifndef _LINUX_LOCKS_H +#define _LINUX_LOCKS_H + +#ifndef _LINUX_MM_H +#include <linux/mm.h> +#endif +#ifndef _LINUX_PAGEMAP_H +#include <linux/pagemap.h> +#endif + +/* + * Unlocked, temporary IO buffer_heads gets moved to the reuse_list + * once their page becomes unlocked. + */ +extern struct buffer_head *reuse_list; + +/* + * Buffer cache locking - note that interrupts may only unlock, not + * lock buffers. + */ +extern void __wait_on_buffer(struct buffer_head *); + +static inline void wait_on_buffer(struct buffer_head * bh) +{ + if (test_bit(BH_Lock, &bh->b_state)) + __wait_on_buffer(bh); +} + +static inline void lock_buffer(struct buffer_head * bh) +{ + while (set_bit(BH_Lock, &bh->b_state)) + __wait_on_buffer(bh); +} + +void unlock_buffer(struct buffer_head *); + +#ifndef MACH +/* + * super-block locking. Again, interrupts may only unlock + * a super-block (although even this isn't done right now. + * nfs may need it). + */ +extern void __wait_on_super(struct super_block *); + +static inline void wait_on_super(struct super_block * sb) +{ + if (sb->s_lock) + __wait_on_super(sb); +} + +static inline void lock_super(struct super_block * sb) +{ + if (sb->s_lock) + __wait_on_super(sb); + sb->s_lock = 1; +} + +static inline void unlock_super(struct super_block * sb) +{ + sb->s_lock = 0; + wake_up(&sb->s_wait); +} +#endif /* !MACH */ + +#endif /* _LINUX_LOCKS_H */ + diff --git a/linux/dev/include/linux/malloc.h b/linux/dev/include/linux/malloc.h new file mode 100644 index 0000000..50d8114 --- /dev/null +++ b/linux/dev/include/linux/malloc.h @@ -0,0 +1,18 @@ +#ifndef _LINUX_MALLOC_H +#define _LINUX_MALLOC_H + +#include <linux/mm.h> +#include <asm/cache.h> + +#ifndef MACH_INCLUDE +#define kmalloc linux_kmalloc +#define kfree linux_kfree +#define kfree_s linux_kfree_s +#endif + +extern void *linux_kmalloc (unsigned int size, int priority); +extern void linux_kfree (void *obj); + +#define linux_kfree_s(a,b) linux_kfree(a) + +#endif /* _LINUX_MALLOC_H */ diff --git a/linux/dev/include/linux/mm.h b/linux/dev/include/linux/mm.h new file mode 100644 index 0000000..b0c3ab0 --- /dev/null +++ b/linux/dev/include/linux/mm.h @@ -0,0 +1,378 @@ +#ifndef _LINUX_MM_H +#define _LINUX_MM_H + +#include <linux/sched.h> +#include <linux/errno.h> +#include <linux/kernel.h> + +#ifdef __KERNEL__ + +#include <linux/string.h> + +extern unsigned long high_memory; + +#include <asm/page.h> +#include <asm/atomic.h> + +#define VERIFY_READ 0 +#define VERIFY_WRITE 1 + +extern int verify_area(int, const void *, unsigned long); + +/* + * Linux kernel virtual memory manager primitives. + * The idea being to have a "virtual" mm in the same way + * we have a virtual fs - giving a cleaner interface to the + * mm details, and allowing different kinds of memory mappings + * (from shared memory to executable loading to arbitrary + * mmap() functions). + */ + +/* + * This struct defines a memory VMM memory area. There is one of these + * per VM-area/task. A VM area is any part of the process virtual memory + * space that has a special rule for the page-fault handlers (ie a shared + * library, the executable area etc). + */ +struct vm_area_struct { + struct mm_struct * vm_mm; /* VM area parameters */ + unsigned long vm_start; + unsigned long vm_end; + pgprot_t vm_page_prot; + unsigned short vm_flags; +/* AVL tree of VM areas per task, sorted by address */ + short vm_avl_height; + struct vm_area_struct * vm_avl_left; + struct vm_area_struct * vm_avl_right; +/* linked list of VM areas per task, sorted by address */ + struct vm_area_struct * vm_next; +/* for areas with inode, the circular list inode->i_mmap */ +/* for shm areas, the circular list of attaches */ +/* otherwise unused */ + struct vm_area_struct * vm_next_share; + struct vm_area_struct * vm_prev_share; +/* more */ + struct vm_operations_struct * vm_ops; + unsigned long vm_offset; + struct inode * vm_inode; + unsigned long vm_pte; /* shared mem */ +}; + +/* + * vm_flags.. + */ +#define VM_READ 0x0001 /* currently active flags */ +#define VM_WRITE 0x0002 +#define VM_EXEC 0x0004 +#define VM_SHARED 0x0008 + +#define VM_MAYREAD 0x0010 /* limits for mprotect() etc */ +#define VM_MAYWRITE 0x0020 +#define VM_MAYEXEC 0x0040 +#define VM_MAYSHARE 0x0080 + +#define VM_GROWSDOWN 0x0100 /* general info on the segment */ +#define VM_GROWSUP 0x0200 +#define VM_SHM 0x0400 /* shared memory area, don't swap out */ +#define VM_DENYWRITE 0x0800 /* ETXTBSY on write attempts.. */ + +#define VM_EXECUTABLE 0x1000 +#define VM_LOCKED 0x2000 + +#define VM_STACK_FLAGS 0x0177 + +/* + * mapping from the currently active vm_flags protection bits (the + * low four bits) to a page protection mask.. + */ +extern pgprot_t protection_map[16]; + + +/* + * These are the virtual MM functions - opening of an area, closing and + * unmapping it (needed to keep files on disk up-to-date etc), pointer + * to the functions called when a no-page or a wp-page exception occurs. + */ +struct vm_operations_struct { + void (*open)(struct vm_area_struct * area); + void (*close)(struct vm_area_struct * area); + void (*unmap)(struct vm_area_struct *area, unsigned long, size_t); + void (*protect)(struct vm_area_struct *area, unsigned long, size_t, unsigned int newprot); + int (*sync)(struct vm_area_struct *area, unsigned long, size_t, unsigned int flags); + void (*advise)(struct vm_area_struct *area, unsigned long, size_t, unsigned int advise); + unsigned long (*nopage)(struct vm_area_struct * area, unsigned long address, int write_access); + unsigned long (*wppage)(struct vm_area_struct * area, unsigned long address, + unsigned long page); + int (*swapout)(struct vm_area_struct *, unsigned long, pte_t *); + pte_t (*swapin)(struct vm_area_struct *, unsigned long, unsigned long); +}; + +/* + * Try to keep the most commonly accessed fields in single cache lines + * here (16 bytes or greater). This ordering should be particularly + * beneficial on 32-bit processors. + * + * The first line is data used in page cache lookup, the second line + * is used for linear searches (eg. clock algorithm scans). + */ +typedef struct page { + /* these must be first (free area handling) */ + struct page *next; + struct page *prev; + struct inode *inode; + unsigned long offset; + struct page *next_hash; + atomic_t count; + unsigned flags; /* atomic flags, some possibly updated asynchronously */ + unsigned dirty:16, + age:8; + struct wait_queue *wait; + struct page *prev_hash; + struct buffer_head * buffers; + unsigned long swap_unlock_entry; + unsigned long map_nr; /* page->map_nr == page - mem_map */ +} mem_map_t; + +/* Page flag bit values */ +#define PG_locked 0 +#define PG_error 1 +#define PG_referenced 2 +#define PG_uptodate 3 +#define PG_free_after 4 +#define PG_decr_after 5 +#define PG_swap_unlock_after 6 +#define PG_DMA 7 +#define PG_reserved 31 + +/* Make it prettier to test the above... */ +#define PageLocked(page) (test_bit(PG_locked, &(page)->flags)) +#define PageError(page) (test_bit(PG_error, &(page)->flags)) +#define PageReferenced(page) (test_bit(PG_referenced, &(page)->flags)) +#define PageDirty(page) (test_bit(PG_dirty, &(page)->flags)) +#define PageUptodate(page) (test_bit(PG_uptodate, &(page)->flags)) +#define PageFreeAfter(page) (test_bit(PG_free_after, &(page)->flags)) +#define PageDecrAfter(page) (test_bit(PG_decr_after, &(page)->flags)) +#define PageSwapUnlockAfter(page) (test_bit(PG_swap_unlock_after, &(page)->flags)) +#define PageDMA(page) (test_bit(PG_DMA, &(page)->flags)) +#define PageReserved(page) (test_bit(PG_reserved, &(page)->flags)) + +/* + * page->reserved denotes a page which must never be accessed (which + * may not even be present). + * + * page->dma is set for those pages which lie in the range of + * physical addresses capable of carrying DMA transfers. + * + * Multiple processes may "see" the same page. E.g. for untouched + * mappings of /dev/null, all processes see the same page full of + * zeroes, and text pages of executables and shared libraries have + * only one copy in memory, at most, normally. + * + * For the non-reserved pages, page->count denotes a reference count. + * page->count == 0 means the page is free. + * page->count == 1 means the page is used for exactly one purpose + * (e.g. a private data page of one process). + * + * A page may be used for kmalloc() or anyone else who does a + * get_free_page(). In this case the page->count is at least 1, and + * all other fields are unused but should be 0 or NULL. The + * management of this page is the responsibility of the one who uses + * it. + * + * The other pages (we may call them "process pages") are completely + * managed by the Linux memory manager: I/O, buffers, swapping etc. + * The following discussion applies only to them. + * + * A page may belong to an inode's memory mapping. In this case, + * page->inode is the inode, and page->offset is the file offset + * of the page (not necessarily a multiple of PAGE_SIZE). + * + * A page may have buffers allocated to it. In this case, + * page->buffers is a circular list of these buffer heads. Else, + * page->buffers == NULL. + * + * For pages belonging to inodes, the page->count is the number of + * attaches, plus 1 if buffers are allocated to the page. + * + * All pages belonging to an inode make up a doubly linked list + * inode->i_pages, using the fields page->next and page->prev. (These + * fields are also used for freelist management when page->count==0.) + * There is also a hash table mapping (inode,offset) to the page + * in memory if present. The lists for this hash table use the fields + * page->next_hash and page->prev_hash. + * + * All process pages can do I/O: + * - inode pages may need to be read from disk, + * - inode pages which have been modified and are MAP_SHARED may need + * to be written to disk, + * - private pages which have been modified may need to be swapped out + * to swap space and (later) to be read back into memory. + * During disk I/O, page->locked is true. This bit is set before I/O + * and reset when I/O completes. page->wait is a wait queue of all + * tasks waiting for the I/O on this page to complete. + * page->uptodate tells whether the page's contents is valid. + * When a read completes, the page becomes uptodate, unless a disk I/O + * error happened. + * When a write completes, and page->free_after is true, the page is + * freed without any further delay. + * + * For choosing which pages to swap out, inode pages carry a + * page->referenced bit, which is set any time the system accesses + * that page through the (inode,offset) hash table. + * There is also the page->age counter, which implements a linear + * decay (why not an exponential decay?), see swapctl.h. + */ + +extern mem_map_t * mem_map; + +/* + * This is timing-critical - most of the time in getting a new page + * goes to clearing the page. If you want a page without the clearing + * overhead, just use __get_free_page() directly.. + */ +#define __get_free_page(priority) __get_free_pages((priority),0,0) +#define __get_dma_pages(priority, order) __get_free_pages((priority),(order),1) +extern unsigned long __get_free_pages(int priority, unsigned long gfporder, int dma); + +static inline unsigned long get_free_page(int priority) +{ + unsigned long page; + + page = __get_free_page(priority); + if (page) + memset((void *) page, 0, PAGE_SIZE); + return page; +} + +/* memory.c & swap.c*/ + +#define free_page(addr) free_pages((addr),0) +extern void free_pages(unsigned long addr, unsigned long order); +extern void __free_page(struct page *); + +extern void show_free_areas(void); +extern unsigned long put_dirty_page(struct task_struct * tsk,unsigned long page, + unsigned long address); + +extern void free_page_tables(struct mm_struct * mm); +extern void clear_page_tables(struct task_struct * tsk); +extern int new_page_tables(struct task_struct * tsk); +extern int copy_page_tables(struct task_struct * to); + +extern int zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size); +extern int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma); +extern int remap_page_range(unsigned long from, unsigned long to, unsigned long size, pgprot_t prot); +extern int zeromap_page_range(unsigned long from, unsigned long size, pgprot_t prot); + +extern void vmtruncate(struct inode * inode, unsigned long offset); +extern void handle_mm_fault(struct vm_area_struct *vma, unsigned long address, int write_access); +extern void do_wp_page(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long address, int write_access); +extern void do_no_page(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long address, int write_access); + +extern unsigned long paging_init(unsigned long start_mem, unsigned long end_mem); +extern void mem_init(unsigned long start_mem, unsigned long end_mem); +extern void show_mem(void); +extern void oom(struct task_struct * tsk); +extern void si_meminfo(struct sysinfo * val); + +/* vmalloc.c */ + +extern void * vmalloc(unsigned long size); +extern void * vremap(unsigned long offset, unsigned long size); +extern void vfree(void * addr); +extern int vread(char *buf, char *addr, int count); +extern unsigned long vmtophys (void *); + +/* mmap.c */ +extern unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, unsigned long off); +extern void merge_segments(struct mm_struct *, unsigned long, unsigned long); +extern void insert_vm_struct(struct mm_struct *, struct vm_area_struct *); +extern void remove_shared_vm_struct(struct vm_area_struct *); +extern void build_mmap_avl(struct mm_struct *); +extern void exit_mmap(struct mm_struct *); +extern int do_munmap(unsigned long, size_t); +extern unsigned long get_unmapped_area(unsigned long, unsigned long); + +/* filemap.c */ +extern unsigned long page_unuse(unsigned long); +extern int shrink_mmap(int, int, int); +extern void truncate_inode_pages(struct inode *, unsigned long); + +#define GFP_BUFFER 0x00 +#define GFP_ATOMIC 0x01 +#define GFP_USER 0x02 +#define GFP_KERNEL 0x03 +#define GFP_NOBUFFER 0x04 +#define GFP_NFS 0x05 +#define GFP_IO 0x06 + +/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some + platforms, used as appropriate on others */ + +#define GFP_DMA 0x80 + +#define GFP_LEVEL_MASK 0xf + +#ifndef MACH +/* vma is the first one with address < vma->vm_end, + * and even address < vma->vm_start. Have to extend vma. */ +static inline int expand_stack(struct vm_area_struct * vma, unsigned long address) +{ + unsigned long grow; + + address &= PAGE_MASK; + grow = vma->vm_start - address; + if (vma->vm_end - address + > (unsigned long) current->rlim[RLIMIT_STACK].rlim_cur || + (vma->vm_mm->total_vm << PAGE_SHIFT) + grow + > (unsigned long) current->rlim[RLIMIT_AS].rlim_cur) + return -ENOMEM; + vma->vm_start = address; + vma->vm_offset -= grow; + vma->vm_mm->total_vm += grow >> PAGE_SHIFT; + if (vma->vm_flags & VM_LOCKED) + vma->vm_mm->locked_vm += grow >> PAGE_SHIFT; + return 0; +} + +#define avl_empty (struct vm_area_struct *) NULL + +/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ +static inline struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr) +{ + struct vm_area_struct * result = NULL; + + if (mm) { + struct vm_area_struct * tree = mm->mmap_avl; + for (;;) { + if (tree == avl_empty) + break; + if (tree->vm_end > addr) { + result = tree; + if (tree->vm_start <= addr) + break; + tree = tree->vm_avl_left; + } else + tree = tree->vm_avl_right; + } + } + return result; +} + +/* Look up the first VMA which intersects the interval start_addr..end_addr-1, + NULL if none. Assume start_addr < end_addr. */ +static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr) +{ + struct vm_area_struct * vma; + + vma = find_vma(mm,start_addr); + if (vma && end_addr <= vma->vm_start) + vma = NULL; + return vma; +} +#endif /* !MACH */ + +#endif /* __KERNEL__ */ + +#endif diff --git a/linux/dev/include/linux/modversions.h b/linux/dev/include/linux/modversions.h new file mode 100644 index 0000000..9d841c9 --- /dev/null +++ b/linux/dev/include/linux/modversions.h @@ -0,0 +1 @@ +/* Dummy file. */ diff --git a/linux/dev/include/linux/netdevice.h b/linux/dev/include/linux/netdevice.h new file mode 100644 index 0000000..e1a9a34 --- /dev/null +++ b/linux/dev/include/linux/netdevice.h @@ -0,0 +1,339 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Definitions for the Interfaces handler. + * + * Version: @(#)dev.h 1.0.11 07/31/96 + * + * Authors: Ross Biro, <bir7@leland.Stanford.Edu> + * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> + * Corey Minyard <wf-rch!minyard@relay.EU.net> + * Donald J. Becker, <becker@super.org> + * Alan Cox, <A.Cox@swansea.ac.uk> + * Bjorn Ekwall. <bj0rn@blox.se> + * Lawrence V. Stefani, <stefani@lkg.dec.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Moved to /usr/include/linux for NET3 + * Added extern for fddi_setup() + */ +#ifndef _LINUX_NETDEVICE_H +#define _LINUX_NETDEVICE_H + +#include <linux/config.h> +#include <linux/if.h> +#include <linux/if_ether.h> + +/* for future expansion when we will have different priorities. */ +#define DEV_NUMBUFFS 3 +#define MAX_ADDR_LEN 7 +#ifndef CONFIG_AX25 +#ifndef CONFIG_TR +#if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) +#define MAX_HEADER 32 /* We really need about 18 worst case .. so 32 is aligned */ +#else +#define MAX_HEADER 80 /* We need to allow for having tunnel headers */ +#endif /* IPIP */ +#else +#define MAX_HEADER 48 /* Token Ring header needs 40 bytes ... 48 is aligned */ +#endif /* TR */ +#else +#define MAX_HEADER 96 /* AX.25 + NetROM */ +#endif /* AX25 */ + +#define IS_MYADDR 1 /* address is (one of) our own */ +#define IS_LOOPBACK 2 /* address is for LOOPBACK */ +#define IS_BROADCAST 3 /* address is a valid broadcast */ +#define IS_INVBCAST 4 /* Wrong netmask bcast not for us (unused)*/ +#define IS_MULTICAST 5 /* Multicast IP address */ + +#ifdef __KERNEL__ + +#include <linux/skbuff.h> + +/* + * We tag multicasts with these structures. + */ + +struct dev_mc_list +{ + struct dev_mc_list *next; + char dmi_addr[MAX_ADDR_LEN]; + unsigned short dmi_addrlen; + unsigned short dmi_users; +}; + +struct hh_cache +{ + struct hh_cache *hh_next; + void *hh_arp; /* Opaque pointer, used by + * any address resolution module, + * not only ARP. + */ + int hh_refcnt; /* number of users */ + unsigned short hh_type; /* protocol identifier, f.e ETH_P_IP */ + char hh_uptodate; /* hh_data is valid */ + char hh_data[16]; /* cached hardware header */ +}; + +/* + * The DEVICE structure. + * Actually, this whole structure is a big mistake. It mixes I/O + * data with strictly "high-level" data, and it has to know about + * almost every data structure used in the INET module. + */ + +#ifdef MACH + +#ifndef MACH_INCLUDE +#define device linux_device +#endif + +struct linux_device + +#else + +struct device + +#endif +{ + + /* + * This is the first field of the "visible" part of this structure + * (i.e. as seen by users in the "Space.c" file). It is the name + * the interface. + */ + char *name; + + /* I/O specific fields - FIXME: Merge these and struct ifmap into one */ + unsigned long rmem_end; /* shmem "recv" end */ + unsigned long rmem_start; /* shmem "recv" start */ + unsigned long mem_end; /* shared mem end */ + unsigned long mem_start; /* shared mem start */ + unsigned long base_addr; /* device I/O address */ + unsigned char irq; /* device IRQ number */ + + /* Low-level status flags. */ + volatile unsigned char start, /* start an operation */ + interrupt; /* interrupt arrived */ + unsigned long tbusy; /* transmitter busy must be long for bitops */ + + struct linux_device *next; + + /* The device initialization function. Called only once. */ + int (*init)(struct linux_device *dev); + + /* Some hardware also needs these fields, but they are not part of the + usual set specified in Space.c. */ + unsigned char if_port; /* Selectable AUI, TP,..*/ + unsigned char dma; /* DMA channel */ + + struct enet_statistics* (*get_stats)(struct linux_device *dev); + + /* + * This marks the end of the "visible" part of the structure. All + * fields hereafter are internal to the system, and may change at + * will (read: may be cleaned up at will). + */ + + /* These may be needed for future network-power-down code. */ + unsigned long trans_start; /* Time (in jiffies) of last Tx */ + unsigned long last_rx; /* Time of last Rx */ + + unsigned short flags; /* interface flags (a la BSD) */ + unsigned short family; /* address family ID (AF_INET) */ + unsigned short metric; /* routing metric (not used) */ + unsigned short mtu; /* interface MTU value */ + unsigned short type; /* interface hardware type */ + unsigned short hard_header_len; /* hardware hdr length */ + void *priv; /* pointer to private data */ + + /* Interface address info. */ + unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ + unsigned char pad; /* make dev_addr aligned to 8 bytes */ + unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address */ + unsigned char addr_len; /* hardware address length */ + unsigned long pa_addr; /* protocol address */ + unsigned long pa_brdaddr; /* protocol broadcast addr */ + unsigned long pa_dstaddr; /* protocol P-P other side addr */ + unsigned long pa_mask; /* protocol netmask */ + unsigned short pa_alen; /* protocol address length */ + + struct dev_mc_list *mc_list; /* Multicast mac addresses */ + int mc_count; /* Number of installed mcasts */ + + struct ip_mc_list *ip_mc_list; /* IP multicast filter chain */ + __u32 tx_queue_len; /* Max frames per queue allowed */ + + /* For load balancing driver pair support */ + + unsigned long pkt_queue; /* Packets queued */ + struct linux_device *slave; /* Slave device */ + struct net_alias_info *alias_info; /* main dev alias info */ + struct net_alias *my_alias; /* alias devs */ + + /* Pointer to the interface buffers. */ + struct sk_buff_head buffs[DEV_NUMBUFFS]; + + /* Pointers to interface service routines. */ + int (*open)(struct linux_device *dev); + int (*stop)(struct linux_device *dev); + int (*hard_start_xmit) (struct sk_buff *skb, + struct linux_device *dev); + int (*hard_header) (struct sk_buff *skb, + struct linux_device *dev, + unsigned short type, + void *daddr, + void *saddr, + unsigned len); + int (*rebuild_header)(void *eth, struct linux_device *dev, + unsigned long raddr, struct sk_buff *skb); +#define HAVE_MULTICAST + void (*set_multicast_list)(struct linux_device *dev); +#define HAVE_SET_MAC_ADDR + int (*set_mac_address)(struct linux_device *dev, void *addr); +#define HAVE_PRIVATE_IOCTL + int (*do_ioctl)(struct linux_device *dev, struct ifreq *ifr, int cmd); +#define HAVE_SET_CONFIG + int (*set_config)(struct linux_device *dev, struct ifmap *map); +#define HAVE_HEADER_CACHE + void (*header_cache_bind)(struct hh_cache **hhp, struct linux_device *dev, unsigned short htype, __u32 daddr); + void (*header_cache_update)(struct hh_cache *hh, struct linux_device *dev, unsigned char * haddr); +#define HAVE_CHANGE_MTU + int (*change_mtu)(struct linux_device *dev, int new_mtu); + + struct iw_statistics* (*get_wireless_stats)(struct linux_device *dev); + +#ifdef MACH + +#ifdef MACH_INCLUDE + struct net_data *net_data; +#else + void *net_data; +#endif + +#endif +}; + + +struct packet_type { + unsigned short type; /* This is really htons(ether_type). */ + struct linux_device * dev; + int (*func) (struct sk_buff *, struct linux_device *, + struct packet_type *); + void *data; + struct packet_type *next; +}; + + +#include <linux/interrupt.h> +#include <linux/notifier.h> + +/* Used by dev_rint */ +#define IN_SKBUFF 1 + +extern volatile unsigned long in_bh; + +extern struct linux_device loopback_dev; +extern struct linux_device *dev_base; +extern struct packet_type *ptype_base[16]; + + +extern int ip_addr_match(unsigned long addr1, unsigned long addr2); +extern int ip_chk_addr(unsigned long addr); +extern struct linux_device *ip_dev_bynet(unsigned long daddr, unsigned long mask); +extern unsigned long ip_my_addr(void); +extern unsigned long ip_get_mask(unsigned long addr); +extern struct linux_device *ip_dev_find(unsigned long addr); +extern struct linux_device *dev_getbytype(unsigned short type); + +extern void dev_add_pack(struct packet_type *pt); +extern void dev_remove_pack(struct packet_type *pt); +extern struct linux_device *dev_get(const char *name); +extern int dev_open(struct linux_device *dev); +extern int dev_close(struct linux_device *dev); +extern void dev_queue_xmit(struct sk_buff *skb, struct linux_device *dev, + int pri); + +#define HAVE_NETIF_RX 1 +extern void netif_rx(struct sk_buff *skb); +extern void net_bh(void); + +#ifdef MACH +#define dev_tint(dev) +#else +extern void dev_tint(struct linux_device *dev); +#endif + +extern int dev_change_flags(struct linux_device *dev, short flags); +extern int dev_get_info(char *buffer, char **start, off_t offset, int length, int dummy); +extern int dev_ioctl(unsigned int cmd, void *); + +extern void dev_init(void); + +/* Locking protection for page faults during outputs to devices unloaded during the fault */ + +extern int dev_lockct; + +/* + * These two don't currently need to be interrupt-safe + * but they may do soon. Do it properly anyway. + */ + +extern __inline__ void dev_lock_list(void) +{ + unsigned long flags; + save_flags(flags); + cli(); + dev_lockct++; + restore_flags(flags); +} + +extern __inline__ void dev_unlock_list(void) +{ + unsigned long flags; + save_flags(flags); + cli(); + dev_lockct--; + restore_flags(flags); +} + +/* + * This almost never occurs, isn't in performance critical paths + * and we can thus be relaxed about it + */ + +extern __inline__ void dev_lock_wait(void) +{ + while(dev_lockct) + schedule(); +} + + +/* These functions live elsewhere (drivers/net/net_init.c, but related) */ + +extern void ether_setup(struct linux_device *dev); +extern void tr_setup(struct linux_device *dev); +extern void fddi_setup(struct linux_device *dev); +extern int ether_config(struct linux_device *dev, struct ifmap *map); +/* Support for loadable net-drivers */ +extern int register_netdev(struct linux_device *dev); +extern void unregister_netdev(struct linux_device *dev); +extern int register_netdevice_notifier(struct notifier_block *nb); +extern int unregister_netdevice_notifier(struct notifier_block *nb); +/* Functions used for multicast support */ +extern void dev_mc_upload(struct linux_device *dev); +extern void dev_mc_delete(struct linux_device *dev, void *addr, int alen, int all); +extern void dev_mc_add(struct linux_device *dev, void *addr, int alen, int newonly); +extern void dev_mc_discard(struct linux_device *dev); +/* This is the wrong place but it'll do for the moment */ +extern void ip_mc_allhost(struct linux_device *dev); +#endif /* __KERNEL__ */ + +#endif /* _LINUX_DEV_H */ diff --git a/linux/dev/include/linux/notifier.h b/linux/dev/include/linux/notifier.h new file mode 100644 index 0000000..b3c9ccf --- /dev/null +++ b/linux/dev/include/linux/notifier.h @@ -0,0 +1,96 @@ +/* + * Routines to manage notifier chains for passing status changes to any + * interested routines. We need this instead of hard coded call lists so + * that modules can poke their nose into the innards. The network devices + * needed them so here they are for the rest of you. + * + * Alan Cox <Alan.Cox@linux.org> + */ + +#ifndef _LINUX_NOTIFIER_H +#define _LINUX_NOTIFIER_H +#include <linux/errno.h> + +struct notifier_block +{ + int (*notifier_call)(struct notifier_block *this, unsigned long, void *); + struct notifier_block *next; + int priority; +}; + + +#ifdef __KERNEL__ + +#define NOTIFY_DONE 0x0000 /* Don't care */ +#define NOTIFY_OK 0x0001 /* Suits me */ +#define NOTIFY_STOP_MASK 0x8000 /* Don't call further */ +#define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002) /* Bad/Veto action */ + +extern __inline__ int notifier_chain_register(struct notifier_block **list, struct notifier_block *n) +{ + while(*list) + { + if(n->priority > (*list)->priority) + break; + list= &((*list)->next); + } + n->next = *list; + *list=n; + return 0; +} + +/* + * Warning to any non GPL module writers out there.. these functions are + * GPL'd + */ + +extern __inline__ int notifier_chain_unregister(struct notifier_block **nl, struct notifier_block *n) +{ + while((*nl)!=NULL) + { + if((*nl)==n) + { + *nl=n->next; + return 0; + } + nl=&((*nl)->next); + } + return -ENOENT; +} + +/* + * This is one of these things that is generally shorter inline + */ + +extern __inline__ int notifier_call_chain(struct notifier_block **n, unsigned long val, void *v) +{ + int ret=NOTIFY_DONE; + struct notifier_block *nb = *n; + while(nb) + { + ret=nb->notifier_call(nb,val,v); + if(ret&NOTIFY_STOP_MASK) + return ret; + nb=nb->next; + } + return ret; +} + + +/* + * Declared notifiers so far. I can imagine quite a few more chains + * over time (eg laptop power reset chains, reboot chain (to clean + * device units up), device [un]mount chain, module load/unload chain, + * low memory chain, screenblank chain (for plug in modular screenblankers) + * VC switch chains (for loadable kernel svgalib VC switch helpers) etc... + */ + +/* netdevice notifier chain */ +#define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */ +#define NETDEV_DOWN 0x0002 +#define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface + detected a hardware crash and restarted + - we can use this eg to kick tcp sessions + once done */ +#endif +#endif diff --git a/linux/dev/include/linux/pagemap.h b/linux/dev/include/linux/pagemap.h new file mode 100644 index 0000000..6e21f3d --- /dev/null +++ b/linux/dev/include/linux/pagemap.h @@ -0,0 +1,150 @@ +#ifndef _LINUX_PAGEMAP_H +#define _LINUX_PAGEMAP_H + +#include <asm/system.h> + +/* + * Page-mapping primitive inline functions + * + * Copyright 1995 Linus Torvalds + */ + +#ifndef MACH + +#include <linux/mm.h> +#include <linux/fs.h> +#include <linux/swapctl.h> + +static inline unsigned long page_address(struct page * page) +{ + return PAGE_OFFSET + PAGE_SIZE * page->map_nr; +} + +#define PAGE_HASH_BITS 11 +#define PAGE_HASH_SIZE (1 << PAGE_HASH_BITS) + +#define PAGE_AGE_VALUE ((PAGE_INITIAL_AGE)+(PAGE_ADVANCE)) + +extern unsigned long page_cache_size; /* # of pages currently in the hash table */ +extern struct page * page_hash_table[PAGE_HASH_SIZE]; + +/* + * We use a power-of-two hash table to avoid a modulus, + * and get a reasonable hash by knowing roughly how the + * inode pointer and offsets are distributed (ie, we + * roughly know which bits are "significant") + */ +static inline unsigned long _page_hashfn(struct inode * inode, unsigned long offset) +{ +#define i (((unsigned long) inode)/(sizeof(struct inode) & ~ (sizeof(struct inode) - 1))) +#define o (offset >> PAGE_SHIFT) +#define s(x) ((x)+((x)>>PAGE_HASH_BITS)) + return s(i+o) & (PAGE_HASH_SIZE-1); +#undef i +#undef o +#undef s +} + +#define page_hash(inode,offset) (page_hash_table+_page_hashfn(inode,offset)) + +static inline struct page * __find_page(struct inode * inode, unsigned long offset, struct page *page) +{ + goto inside; + for (;;) { + page = page->next_hash; +inside: + if (!page) + goto not_found; + if (page->inode != inode) + continue; + if (page->offset == offset) + break; + } + /* Found the page. */ + atomic_inc(&page->count); + set_bit(PG_referenced, &page->flags); +not_found: + return page; +} + +static inline struct page *find_page(struct inode * inode, unsigned long offset) +{ + return __find_page(inode, offset, *page_hash(inode, offset)); +} + +static inline void remove_page_from_hash_queue(struct page * page) +{ + struct page **p; + struct page *next_hash, *prev_hash; + + next_hash = page->next_hash; + prev_hash = page->prev_hash; + page->next_hash = NULL; + page->prev_hash = NULL; + if (next_hash) + next_hash->prev_hash = prev_hash; + if (prev_hash) + prev_hash->next_hash = next_hash; + p = page_hash(page->inode,page->offset); + if (*p == page) + *p = next_hash; + page_cache_size--; +} + +static inline void __add_page_to_hash_queue(struct page * page, struct page **p) +{ + page_cache_size++; + set_bit(PG_referenced, &page->flags); + page->age = PAGE_AGE_VALUE; + page->prev_hash = NULL; + if ((page->next_hash = *p) != NULL) + page->next_hash->prev_hash = page; + *p = page; +} + +static inline void add_page_to_hash_queue(struct page * page, struct inode * inode, unsigned long offset) +{ + __add_page_to_hash_queue(page, page_hash(inode,offset)); +} + + +static inline void remove_page_from_inode_queue(struct page * page) +{ + struct inode * inode = page->inode; + + page->inode = NULL; + inode->i_nrpages--; + if (inode->i_pages == page) + inode->i_pages = page->next; + if (page->next) + page->next->prev = page->prev; + if (page->prev) + page->prev->next = page->next; + page->next = NULL; + page->prev = NULL; +} + +static inline void add_page_to_inode_queue(struct inode * inode, struct page * page) +{ + struct page **p = &inode->i_pages; + + inode->i_nrpages++; + page->inode = inode; + page->prev = NULL; + if ((page->next = *p) != NULL) + page->next->prev = page; + *p = page; +} + +extern void __wait_on_page(struct page *); +static inline void wait_on_page(struct page * page) +{ + if (PageLocked(page)) + __wait_on_page(page); +} + +extern void update_vm_cache(struct inode *, unsigned long, const char *, int); + +#endif /* !MACH */ + +#endif diff --git a/linux/dev/include/linux/pm.h b/linux/dev/include/linux/pm.h new file mode 100644 index 0000000..9d841c9 --- /dev/null +++ b/linux/dev/include/linux/pm.h @@ -0,0 +1 @@ +/* Dummy file. */ diff --git a/linux/dev/include/linux/proc_fs.h b/linux/dev/include/linux/proc_fs.h new file mode 100644 index 0000000..8ce0bb2 --- /dev/null +++ b/linux/dev/include/linux/proc_fs.h @@ -0,0 +1,292 @@ +#ifndef _LINUX_PROC_FS_H +#define _LINUX_PROC_FS_H + +#include <linux/fs.h> +#include <linux/malloc.h> + +/* + * The proc filesystem constants/structures + */ + +/* + * We always define these enumerators + */ + +enum root_directory_inos { + PROC_ROOT_INO = 1, + PROC_LOADAVG, + PROC_UPTIME, + PROC_MEMINFO, + PROC_KMSG, + PROC_VERSION, + PROC_CPUINFO, + PROC_PCI, + PROC_SELF, /* will change inode # */ + PROC_NET, + PROC_SCSI, + PROC_MALLOC, + PROC_KCORE, + PROC_MODULES, + PROC_STAT, + PROC_DEVICES, + PROC_INTERRUPTS, + PROC_FILESYSTEMS, + PROC_KSYMS, + PROC_DMA, + PROC_IOPORTS, +#ifdef __SMP_PROF__ + PROC_SMP_PROF, +#endif + PROC_PROFILE, /* whether enabled or not */ + PROC_CMDLINE, + PROC_SYS, + PROC_MTAB, + PROC_MD, + PROC_RTC, + PROC_LOCKS +}; + +enum pid_directory_inos { + PROC_PID_INO = 2, + PROC_PID_STATUS, + PROC_PID_MEM, + PROC_PID_CWD, + PROC_PID_ROOT, + PROC_PID_EXE, + PROC_PID_FD, + PROC_PID_ENVIRON, + PROC_PID_CMDLINE, + PROC_PID_STAT, + PROC_PID_STATM, + PROC_PID_MAPS +}; + +enum pid_subdirectory_inos { + PROC_PID_FD_DIR = 1 +}; + +enum net_directory_inos { + PROC_NET_UNIX = 128, + PROC_NET_ARP, + PROC_NET_ROUTE, + PROC_NET_DEV, + PROC_NET_RAW, + PROC_NET_TCP, + PROC_NET_UDP, + PROC_NET_SNMP, + PROC_NET_RARP, + PROC_NET_IGMP, + PROC_NET_IPMR_VIF, + PROC_NET_IPMR_MFC, + PROC_NET_IPFWFWD, + PROC_NET_IPFWIN, + PROC_NET_IPFWOUT, + PROC_NET_IPACCT, + PROC_NET_IPMSQHST, + PROC_NET_WIRELESS, + PROC_NET_IPX_INTERFACE, + PROC_NET_IPX_ROUTE, + PROC_NET_IPX, + PROC_NET_ATALK, + PROC_NET_AT_ROUTE, + PROC_NET_ATIF, + PROC_NET_AX25_ROUTE, + PROC_NET_AX25, + PROC_NET_AX25_CALLS, + PROC_NET_NR_NODES, + PROC_NET_NR_NEIGH, + PROC_NET_NR, + PROC_NET_SOCKSTAT, + PROC_NET_RTCACHE, + PROC_NET_AX25_BPQETHER, + PROC_NET_ALIAS_TYPES, + PROC_NET_ALIASES, + PROC_NET_IP_MASQ_APP, + PROC_NET_STRIP_STATUS, + PROC_NET_STRIP_TRACE, + PROC_NET_IPAUTOFW, + PROC_NET_RS_NODES, + PROC_NET_RS_NEIGH, + PROC_NET_RS_ROUTES, + PROC_NET_RS, + PROC_NET_Z8530, + PROC_NET_LAST +}; + +enum scsi_directory_inos { + PROC_SCSI_SCSI = 256, + PROC_SCSI_ADVANSYS, + PROC_SCSI_EATA, + PROC_SCSI_EATA_PIO, + PROC_SCSI_AHA152X, + PROC_SCSI_AHA1542, + PROC_SCSI_AHA1740, + PROC_SCSI_AIC7XXX, + PROC_SCSI_BUSLOGIC, + PROC_SCSI_U14_34F, + PROC_SCSI_FDOMAIN, + PROC_SCSI_GENERIC_NCR5380, + PROC_SCSI_IN2000, + PROC_SCSI_PAS16, + PROC_SCSI_QLOGICFAS, + PROC_SCSI_QLOGICISP, + PROC_SCSI_SEAGATE, + PROC_SCSI_T128, + PROC_SCSI_DC390WUF, + PROC_SCSI_DC390T, + PROC_SCSI_NCR53C7xx, + PROC_SCSI_NCR53C8XX, + PROC_SCSI_ULTRASTOR, + PROC_SCSI_7000FASST, + PROC_SCSI_EATA2X, + PROC_SCSI_AM53C974, + PROC_SCSI_SSC, + PROC_SCSI_NCR53C406A, + PROC_SCSI_PPA, + PROC_SCSI_ESP, + PROC_SCSI_A3000, + PROC_SCSI_A2091, + PROC_SCSI_GVP11, + PROC_SCSI_ATARI, + PROC_SCSI_GDTH, + PROC_SCSI_IDESCSI, + PROC_SCSI_SCSI_DEBUG, + PROC_SCSI_NOT_PRESENT, + PROC_SCSI_FILE, /* I'm assuming here that we */ + PROC_SCSI_LAST = (PROC_SCSI_FILE + 16) /* won't ever see more than */ +}; /* 16 HBAs in one machine */ + +/* Finally, the dynamically allocatable proc entries are reserved: */ + +#define PROC_DYNAMIC_FIRST 4096 +#define PROC_NDYNAMIC 4096 + +#define PROC_SUPER_MAGIC 0x9fa0 + +/* + * This is not completely implemented yet. The idea is to + * create a in-memory tree (like the actual /proc filesystem + * tree) of these proc_dir_entries, so that we can dynamically + * add new files to /proc. + * + * The "next" pointer creates a linked list of one /proc directory, + * while parent/subdir create the directory structure (every + * /proc file has a parent, but "subdir" is NULL for all + * non-directory entries). + * + * "get_info" is called at "read", while "fill_inode" is used to + * fill in file type/protection/owner information specific to the + * particular /proc file. + */ +struct proc_dir_entry { + unsigned short low_ino; + unsigned short namelen; + const char *name; + mode_t mode; + nlink_t nlink; + uid_t uid; + gid_t gid; + unsigned long size; + struct inode_operations * ops; + int (*get_info)(char *, char **, off_t, int, int); + void (*fill_inode)(struct inode *); + struct proc_dir_entry *next, *parent, *subdir; + void *data; +}; + +extern int (* dispatch_scsi_info_ptr) (int ino, char *buffer, char **start, + off_t offset, int length, int inout); + +extern struct proc_dir_entry proc_root; +extern struct proc_dir_entry proc_net; +extern struct proc_dir_entry proc_scsi; +extern struct proc_dir_entry proc_sys; +extern struct proc_dir_entry proc_pid; +extern struct proc_dir_entry proc_pid_fd; + +extern struct inode_operations proc_scsi_inode_operations; + +extern void proc_root_init(void); +extern void proc_base_init(void); +extern void proc_net_init(void); + +extern int proc_register(struct proc_dir_entry *, struct proc_dir_entry *); +extern int proc_register_dynamic(struct proc_dir_entry *, + struct proc_dir_entry *); +extern int proc_unregister(struct proc_dir_entry *, int); + +static inline int proc_net_register(struct proc_dir_entry * x) +{ + return proc_register(&proc_net, x); +} + +static inline int proc_net_unregister(int x) +{ + return proc_unregister(&proc_net, x); +} + +static inline int proc_scsi_register(struct proc_dir_entry *driver, + struct proc_dir_entry *x) +{ + x->ops = &proc_scsi_inode_operations; + if(x->low_ino < PROC_SCSI_FILE){ + return(proc_register(&proc_scsi, x)); + }else{ + return(proc_register(driver, x)); + } +} + +static inline int proc_scsi_unregister(struct proc_dir_entry *driver, int x) +{ + extern void scsi_init_free(char *ptr, unsigned int size); + + if(x <= PROC_SCSI_FILE) + return(proc_unregister(&proc_scsi, x)); + else { + struct proc_dir_entry **p = &driver->subdir, *dp; + int ret; + + while ((dp = *p) != NULL) { + if (dp->low_ino == x) + break; + p = &dp->next; + } + ret = proc_unregister(driver, x); + scsi_init_free((char *) dp, sizeof(struct proc_dir_entry) + 4); + return(ret); + } +} + +extern struct super_block *proc_read_super(struct super_block *,void *,int); +extern int init_proc_fs(void); +extern struct inode * proc_get_inode(struct super_block *, int, struct proc_dir_entry *); +extern void proc_statfs(struct super_block *, struct statfs *, int); +extern void proc_read_inode(struct inode *); +extern void proc_write_inode(struct inode *); +extern int proc_match(int, const char *, struct proc_dir_entry *); + +/* + * These are generic /proc routines that use the internal + * "struct proc_dir_entry" tree to traverse the filesystem. + * + * The /proc root directory has extended versions to take care + * of the /proc/<pid> subdirectories. + */ +extern int proc_readdir(struct inode *, struct file *, void *, filldir_t); +extern int proc_lookup(struct inode *, const char *, int, struct inode **); + +extern struct inode_operations proc_dir_inode_operations; +extern struct inode_operations proc_net_inode_operations; +extern struct inode_operations proc_netdir_inode_operations; +extern struct inode_operations proc_scsi_inode_operations; +extern struct inode_operations proc_mem_inode_operations; +extern struct inode_operations proc_sys_inode_operations; +extern struct inode_operations proc_array_inode_operations; +extern struct inode_operations proc_arraylong_inode_operations; +extern struct inode_operations proc_kcore_inode_operations; +extern struct inode_operations proc_profile_inode_operations; +extern struct inode_operations proc_kmsg_inode_operations; +extern struct inode_operations proc_link_inode_operations; +extern struct inode_operations proc_fd_inode_operations; + +#endif diff --git a/linux/dev/include/linux/sched.h b/linux/dev/include/linux/sched.h new file mode 100644 index 0000000..3e7bcd4 --- /dev/null +++ b/linux/dev/include/linux/sched.h @@ -0,0 +1,521 @@ +#ifndef _LINUX_SCHED_H +#define _LINUX_SCHED_H + +/* + * define DEBUG if you want the wait-queues to have some extra + * debugging code. It's not normally used, but might catch some + * wait-queue coding errors. + * + * #define DEBUG + */ + +#include <asm/param.h> /* for HZ */ + +extern unsigned long event; + +#include <linux/binfmts.h> +#include <linux/personality.h> +#include <linux/tasks.h> +#include <linux/kernel.h> + +#include <asm/system.h> +#include <asm/semaphore.h> +#include <asm/page.h> + +#include <linux/smp.h> +#include <linux/tty.h> +#include <linux/sem.h> + +/* + * cloning flags: + */ +#define CSIGNAL 0x000000ff /* signal mask to be sent at exit */ +#define CLONE_VM 0x00000100 /* set if VM shared between processes */ +#define CLONE_FS 0x00000200 /* set if fs info shared between processes */ +#define CLONE_FILES 0x00000400 /* set if open files shared between processes */ +#define CLONE_SIGHAND 0x00000800 /* set if signal handlers shared */ +#define CLONE_PID 0x00001000 /* set if pid shared */ + +/* + * These are the constant used to fake the fixed-point load-average + * counting. Some notes: + * - 11 bit fractions expand to 22 bits by the multiplies: this gives + * a load-average precision of 10 bits integer + 11 bits fractional + * - if you want to count load-averages more often, you need more + * precision, or rounding will get you. With 2-second counting freq, + * the EXP_n values would be 1981, 2034 and 2043 if still using only + * 11 bit fractions. + */ +extern unsigned long avenrun[]; /* Load averages */ + +#define FSHIFT 11 /* nr of bits of precision */ +#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */ +#define LOAD_FREQ (5*HZ) /* 5 sec intervals */ +#define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */ +#define EXP_5 2014 /* 1/exp(5sec/5min) */ +#define EXP_15 2037 /* 1/exp(5sec/15min) */ + +#define CALC_LOAD(load,exp,n) \ + load *= exp; \ + load += n*(FIXED_1-exp); \ + load >>= FSHIFT; + +#define CT_TO_SECS(x) ((x) / HZ) +#define CT_TO_USECS(x) (((x) % HZ) * 1000000/HZ) + +extern int nr_running, nr_tasks; +extern int last_pid; + +#define FIRST_TASK task[0] +#define LAST_TASK task[NR_TASKS-1] + +#include <linux/head.h> +#include <linux/fs.h> +#include <linux/signal.h> +#include <linux/time.h> +#include <linux/param.h> +#include <linux/resource.h> +#include <linux/ptrace.h> +#include <linux/timer.h> + +#include <asm/processor.h> + +#define TASK_RUNNING 0 +#define TASK_INTERRUPTIBLE 1 +#define TASK_UNINTERRUPTIBLE 2 +#define TASK_ZOMBIE 3 +#define TASK_STOPPED 4 +#define TASK_SWAPPING 5 + +/* + * Scheduling policies + */ +#define SCHED_OTHER 0 +#define SCHED_FIFO 1 +#define SCHED_RR 2 + +struct sched_param { + int sched_priority; +}; + +#ifndef NULL +#define NULL ((void *) 0) +#endif + +#ifdef __KERNEL__ + +extern void sched_init(void); +extern void show_state(void); +extern void trap_init(void); + +asmlinkage void schedule(void); + +/* Open file table structure */ +struct files_struct { + int count; + fd_set close_on_exec; + fd_set open_fds; + struct file * fd[NR_OPEN]; +}; + +#define INIT_FILES { \ + 1, \ + { { 0, } }, \ + { { 0, } }, \ + { NULL, } \ +} + +struct fs_struct { + int count; + unsigned short umask; + struct inode * root, * pwd; +}; + +#define INIT_FS { \ + 1, \ + 0022, \ + NULL, NULL \ +} + +struct mm_struct { + int count; + pgd_t * pgd; + unsigned long context; + unsigned long start_code, end_code, start_data, end_data; + unsigned long start_brk, brk, start_stack, start_mmap; + unsigned long arg_start, arg_end, env_start, env_end; + unsigned long rss, total_vm, locked_vm; + unsigned long def_flags; + struct vm_area_struct * mmap; + struct vm_area_struct * mmap_avl; + struct semaphore mmap_sem; +}; + +#define INIT_MM { \ + 1, \ + swapper_pg_dir, \ + 0, \ + 0, 0, 0, 0, \ + 0, 0, 0, 0, \ + 0, 0, 0, 0, \ + 0, 0, 0, \ + 0, \ + &init_mmap, &init_mmap, MUTEX } + +struct signal_struct { + int count; + struct sigaction action[32]; +}; + +#define INIT_SIGNALS { \ + 1, \ + { {0,}, } } + +struct task_struct { +/* these are hardcoded - don't touch */ + volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ + long counter; + long priority; + unsigned long signal; + unsigned long blocked; /* bitmap of masked signals */ + unsigned long flags; /* per process flags, defined below */ + int errno; + long debugreg[8]; /* Hardware debugging registers */ + struct exec_domain *exec_domain; +/* various fields */ + struct linux_binfmt *binfmt; + struct task_struct *next_task, *prev_task; + struct task_struct *next_run, *prev_run; + unsigned long saved_kernel_stack; + unsigned long kernel_stack_page; + int exit_code, exit_signal; + /* ??? */ + unsigned long personality; + int dumpable:1; + int did_exec:1; + /* shouldn't this be pid_t? */ + int pid; + int pgrp; + int tty_old_pgrp; + int session; + /* boolean value for session group leader */ + int leader; + int groups[NGROUPS]; + /* + * pointers to (original) parent process, youngest child, younger sibling, + * older sibling, respectively. (p->father can be replaced with + * p->p_pptr->pid) + */ + struct task_struct *p_opptr, *p_pptr, *p_cptr, *p_ysptr, *p_osptr; + struct wait_queue *wait_chldexit; /* for wait4() */ + unsigned short uid,euid,suid,fsuid; + unsigned short gid,egid,sgid,fsgid; + unsigned long timeout, policy, rt_priority; + unsigned long it_real_value, it_prof_value, it_virt_value; + unsigned long it_real_incr, it_prof_incr, it_virt_incr; + struct timer_list real_timer; + long utime, stime, cutime, cstime, start_time; +/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ + unsigned long min_flt, maj_flt, nswap, cmin_flt, cmaj_flt, cnswap; + int swappable:1; + unsigned long swap_address; + unsigned long old_maj_flt; /* old value of maj_flt */ + unsigned long dec_flt; /* page fault count of the last time */ + unsigned long swap_cnt; /* number of pages to swap on next pass */ +/* limits */ + struct rlimit rlim[RLIM_NLIMITS]; + unsigned short used_math; + char comm[16]; +/* file system info */ + int link_count; + struct tty_struct *tty; /* NULL if no tty */ +/* ipc stuff */ + struct sem_undo *semundo; + struct sem_queue *semsleeping; +/* ldt for this task - used by Wine. If NULL, default_ldt is used */ + struct desc_struct *ldt; +/* tss for this task */ + struct thread_struct tss; +/* filesystem information */ + struct fs_struct *fs; +/* open file information */ + struct files_struct *files; +/* memory management info */ + struct mm_struct *mm; +/* signal handlers */ + struct signal_struct *sig; +#ifdef __SMP__ + int processor; + int last_processor; + int lock_depth; /* Lock depth. We can context switch in and out of holding a syscall kernel lock... */ +#endif +}; + +/* + * Per process flags + */ +#define PF_ALIGNWARN 0x00000001 /* Print alignment warning msgs */ + /* Not implemented yet, only for 486*/ +#define PF_PTRACED 0x00000010 /* set if ptrace (0) has been called. */ +#define PF_TRACESYS 0x00000020 /* tracing system calls */ +#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ +#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ +#define PF_DUMPCORE 0x00000200 /* dumped core */ +#define PF_SIGNALED 0x00000400 /* killed by a signal */ + +#define PF_STARTING 0x00000002 /* being created */ +#define PF_EXITING 0x00000004 /* getting shut down */ + +#define PF_USEDFPU 0x00100000 /* Process used the FPU this quantum (SMP only) */ +#define PF_DTRACE 0x00200000 /* delayed trace (used on m68k) */ + +/* + * Limit the stack by to some sane default: root can always + * increase this limit if needed.. 8MB seems reasonable. + */ +#define _STK_LIM (8*1024*1024) + +#define DEF_PRIORITY (20*HZ/100) /* 200 ms time slices */ + +/* + * INIT_TASK is used to set up the first task table, touch at + * your own risk!. Base=0, limit=0x1fffff (=2MB) + */ +#define INIT_TASK \ +/* state etc */ { 0,DEF_PRIORITY,DEF_PRIORITY,0,0,0,0, \ +/* debugregs */ { 0, }, \ +/* exec domain */&default_exec_domain, \ +/* binfmt */ NULL, \ +/* schedlink */ &init_task,&init_task, &init_task, &init_task, \ +/* stack */ 0,(unsigned long) &init_kernel_stack, \ +/* ec,brk... */ 0,0,0,0,0, \ +/* pid etc.. */ 0,0,0,0,0, \ +/* suppl grps*/ {NOGROUP,}, \ +/* proc links*/ &init_task,&init_task,NULL,NULL,NULL,NULL, \ +/* uid etc */ 0,0,0,0,0,0,0,0, \ +/* timeout */ 0,SCHED_OTHER,0,0,0,0,0,0,0, \ +/* timer */ { NULL, NULL, 0, 0, it_real_fn }, \ +/* utime */ 0,0,0,0,0, \ +/* flt */ 0,0,0,0,0,0, \ +/* swp */ 0,0,0,0,0, \ +/* rlimits */ INIT_RLIMITS, \ +/* math */ 0, \ +/* comm */ "swapper", \ +/* fs info */ 0,NULL, \ +/* ipc */ NULL, NULL, \ +/* ldt */ NULL, \ +/* tss */ INIT_TSS, \ +/* fs */ &init_fs, \ +/* files */ &init_files, \ +/* mm */ &init_mm, \ +/* signals */ &init_signals, \ +} + +extern struct mm_struct init_mm; +extern struct task_struct init_task; +extern struct task_struct *task[NR_TASKS]; +extern struct task_struct *last_task_used_math; +extern struct task_struct *current_set[NR_CPUS]; +/* + * On a single processor system this comes out as current_set[0] when cpp + * has finished with it, which gcc will optimise away. + */ +#define current (0+current_set[smp_processor_id()]) /* Current on this processor */ +extern unsigned long volatile jiffies; +extern unsigned long itimer_ticks; +extern unsigned long itimer_next; +extern struct timeval xtime; +extern int need_resched; +extern void do_timer(struct pt_regs *); + +extern unsigned int * prof_buffer; +extern unsigned long prof_len; +extern unsigned long prof_shift; + +extern int securelevel; /* system security level */ + +#define CURRENT_TIME (xtime.tv_sec) + +extern void sleep_on(struct wait_queue ** p); +extern void interruptible_sleep_on(struct wait_queue ** p); +extern void wake_up(struct wait_queue ** p); +extern void wake_up_interruptible(struct wait_queue ** p); +extern void wake_up_process(struct task_struct * tsk); + +extern void notify_parent(struct task_struct * tsk, int signal); +extern void force_sig(unsigned long sig,struct task_struct * p); +extern int send_sig(unsigned long sig,struct task_struct * p,int priv); +extern int in_group_p(gid_t grp); + +extern int request_irq(unsigned int irq, + void (*handler)(int, void *, struct pt_regs *), + unsigned long flags, + const char *device, + void *dev_id); +extern void free_irq(unsigned int irq, void *dev_id); + +/* + * This has now become a routine instead of a macro, it sets a flag if + * it returns true (to do BSD-style accounting where the process is flagged + * if it uses root privs). The implication of this is that you should do + * normal permissions checks first, and check suser() last. + */ +#ifdef MACH + +extern inline int +suser(void) +{ + return 1; +} + +#else + +extern inline int suser(void) +{ + if (current->euid == 0) { + current->flags |= PF_SUPERPRIV; + return 1; + } + return 0; +} +#endif + +extern void copy_thread(int, unsigned long, unsigned long, struct task_struct *, struct pt_regs *); +extern void flush_thread(void); +extern void exit_thread(void); + +extern void exit_mm(struct task_struct *); +extern void exit_fs(struct task_struct *); +extern void exit_files(struct task_struct *); +extern void exit_sighand(struct task_struct *); +extern void release_thread(struct task_struct *); + +extern int do_execve(char *, char **, char **, struct pt_regs *); +extern int do_fork(unsigned long, unsigned long, struct pt_regs *); + +extern void add_wait_queue(struct wait_queue **p, struct wait_queue *wait); +extern void remove_wait_queue(struct wait_queue **p, struct wait_queue *wait); + +/* See if we have a valid user level fd. + * If it makes sense, return the file structure it references. + * Otherwise return NULL. + */ + +#ifdef MACH + +extern void __add_wait_queue (struct wait_queue **q, struct wait_queue *wait); +extern void add_wait_queue (struct wait_queue **q, struct wait_queue *wait); +extern void __remove_wait_queue (struct wait_queue **q, struct wait_queue *wait); +extern void remove_wait_queue (struct wait_queue **q, struct wait_queue *wait); + +#else /* !MACH */ + +extern inline struct file *file_from_fd(const unsigned int fd) +{ + + if (fd >= NR_OPEN) + return NULL; + /* either valid or null */ + return current->files->fd[fd]; +} + +/* + * The wait-queues are circular lists, and you have to be *very* sure + * to keep them correct. Use only these two functions to add/remove + * entries in the queues. + */ +extern inline void __add_wait_queue(struct wait_queue ** p, struct wait_queue * wait) +{ + struct wait_queue *head = *p; + struct wait_queue *next = WAIT_QUEUE_HEAD(p); + + if (head) + next = head; + *p = wait; + wait->next = next; +} + +extern inline void add_wait_queue(struct wait_queue ** p, struct wait_queue * wait) +{ + unsigned long flags; + + save_flags(flags); + cli(); + __add_wait_queue(p, wait); + restore_flags(flags); +} + +extern inline void __remove_wait_queue(struct wait_queue ** p, struct wait_queue * wait) +{ + struct wait_queue * next = wait->next; + struct wait_queue * head = next; + + for (;;) { + struct wait_queue * nextlist = head->next; + if (nextlist == wait) + break; + head = nextlist; + } + head->next = next; +} + +extern inline void remove_wait_queue(struct wait_queue ** p, struct wait_queue * wait) +{ + unsigned long flags; + + save_flags(flags); + cli(); + __remove_wait_queue(p, wait); + restore_flags(flags); +} + +extern inline void select_wait(struct wait_queue ** wait_address, select_table * p) +{ + struct select_table_entry * entry; + + if (!p || !wait_address) + return; + if (p->nr >= __MAX_SELECT_TABLE_ENTRIES) + return; + entry = p->entry + p->nr; + entry->wait_address = wait_address; + entry->wait.task = current; + entry->wait.next = NULL; + add_wait_queue(wait_address,&entry->wait); + p->nr++; +} + +#endif /* !MACH */ + +#define REMOVE_LINKS(p) do { unsigned long flags; \ + save_flags(flags) ; cli(); \ + (p)->next_task->prev_task = (p)->prev_task; \ + (p)->prev_task->next_task = (p)->next_task; \ + restore_flags(flags); \ + if ((p)->p_osptr) \ + (p)->p_osptr->p_ysptr = (p)->p_ysptr; \ + if ((p)->p_ysptr) \ + (p)->p_ysptr->p_osptr = (p)->p_osptr; \ + else \ + (p)->p_pptr->p_cptr = (p)->p_osptr; \ + } while (0) + +#define SET_LINKS(p) do { unsigned long flags; \ + save_flags(flags); cli(); \ + (p)->next_task = &init_task; \ + (p)->prev_task = init_task.prev_task; \ + init_task.prev_task->next_task = (p); \ + init_task.prev_task = (p); \ + restore_flags(flags); \ + (p)->p_ysptr = NULL; \ + if (((p)->p_osptr = (p)->p_pptr->p_cptr) != NULL) \ + (p)->p_osptr->p_ysptr = p; \ + (p)->p_pptr->p_cptr = p; \ + } while (0) + +#define for_each_task(p) \ + for (p = &init_task ; (p = p->next_task) != &init_task ; ) + +#endif /* __KERNEL__ */ + +#endif diff --git a/linux/dev/include/linux/skbuff.h b/linux/dev/include/linux/skbuff.h new file mode 100644 index 0000000..c55e529 --- /dev/null +++ b/linux/dev/include/linux/skbuff.h @@ -0,0 +1,466 @@ +/* + * Definitions for the 'struct sk_buff' memory handlers. + * + * Authors: + * Alan Cox, <gw4pts@gw4pts.ampr.org> + * Florian La Roche, <rzsfl@rz.uni-sb.de> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _LINUX_SKBUFF_H +#define _LINUX_SKBUFF_H + +#include <linux/config.h> +#include <linux/time.h> + +#include <asm/atomic.h> +#include <asm/types.h> + +#define CONFIG_SKB_CHECK 0 + +#define HAVE_ALLOC_SKB /* For the drivers to know */ +#define HAVE_ALIGNABLE_SKB /* Ditto 8) */ + + +#define FREE_READ 1 +#define FREE_WRITE 0 + +#define CHECKSUM_NONE 0 +#define CHECKSUM_HW 1 +#define CHECKSUM_UNNECESSARY 2 + +struct sk_buff_head +{ + struct sk_buff * next; + struct sk_buff * prev; + __u32 qlen; /* Must be same length as a pointer + for using debugging */ +#if CONFIG_SKB_CHECK + int magic_debug_cookie; +#endif +}; + + +struct sk_buff +{ + struct sk_buff * next; /* Next buffer in list */ + struct sk_buff * prev; /* Previous buffer in list */ + struct sk_buff_head * list; /* List we are on */ +#if CONFIG_SKB_CHECK + int magic_debug_cookie; +#endif + struct sk_buff *link3; /* Link for IP protocol level buffer chains */ + struct sock *sk; /* Socket we are owned by */ + unsigned long when; /* used to compute rtt's */ + struct timeval stamp; /* Time we arrived */ + struct linux_device *dev; /* Device we arrived on/are leaving by */ + union + { + struct tcphdr *th; + struct ethhdr *eth; + struct iphdr *iph; + struct udphdr *uh; + unsigned char *raw; + /* for passing file handles in a unix domain socket */ + void *filp; + } h; + + union + { + /* As yet incomplete physical layer views */ + unsigned char *raw; + struct ethhdr *ethernet; + } mac; + + struct iphdr *ip_hdr; /* For IPPROTO_RAW */ + unsigned long len; /* Length of actual data */ + unsigned long csum; /* Checksum */ + __u32 saddr; /* IP source address */ + __u32 daddr; /* IP target address */ + __u32 raddr; /* IP next hop address */ + __u32 seq; /* TCP sequence number */ + __u32 end_seq; /* seq [+ fin] [+ syn] + datalen */ + __u32 ack_seq; /* TCP ack sequence number */ + unsigned char proto_priv[16]; /* Protocol private data */ + volatile char acked, /* Are we acked ? */ + used, /* Are we in use ? */ + free, /* How to free this buffer */ + arp; /* Has IP/ARP resolution finished */ + unsigned char tries, /* Times tried */ + lock, /* Are we locked ? */ + localroute, /* Local routing asserted for this frame */ + pkt_type, /* Packet class */ + pkt_bridged, /* Tracker for bridging */ + ip_summed; /* Driver fed us an IP checksum */ +#define PACKET_HOST 0 /* To us */ +#define PACKET_BROADCAST 1 /* To all */ +#define PACKET_MULTICAST 2 /* To group */ +#define PACKET_OTHERHOST 3 /* To someone else */ + unsigned short users; /* User count - see datagram.c,tcp.c */ + unsigned short protocol; /* Packet protocol from driver. */ + unsigned int truesize; /* Buffer size */ + + atomic_t count; /* reference count */ + struct sk_buff *data_skb; /* Link to the actual data skb */ + unsigned char *head; /* Head of buffer */ + unsigned char *data; /* Data head pointer */ + unsigned char *tail; /* Tail pointer */ + unsigned char *end; /* End pointer */ + void (*destructor)(struct sk_buff *); /* Destruct function */ + __u16 redirport; /* Redirect port */ +#ifdef MACH +#ifdef MACH_INCLUDE + ipc_port_t reply; + mach_msg_type_name_t reply_type; + vm_map_copy_t copy; +#else + void *reply; + unsigned reply_type; + void *copy; +#endif +#endif +}; + +#ifdef CONFIG_SKB_LARGE +#define SK_WMEM_MAX 65535 +#define SK_RMEM_MAX 65535 +#else +#define SK_WMEM_MAX 32767 +#define SK_RMEM_MAX 32767 +#endif + +#if CONFIG_SKB_CHECK +#define SK_FREED_SKB 0x0DE2C0DE +#define SK_GOOD_SKB 0xDEC0DED1 +#define SK_HEAD_SKB 0x12231298 +#endif + +#ifdef __KERNEL__ +/* + * Handling routines are only of interest to the kernel + */ +#include <linux/malloc.h> + +#include <asm/system.h> + +#if 0 +extern void print_skb(struct sk_buff *); +#endif +extern void kfree_skb(struct sk_buff *skb, int rw); +extern void skb_queue_head_init(struct sk_buff_head *list); +extern void skb_queue_head(struct sk_buff_head *list,struct sk_buff *buf); +extern void skb_queue_tail(struct sk_buff_head *list,struct sk_buff *buf); +extern struct sk_buff * skb_dequeue(struct sk_buff_head *list); +extern void skb_insert(struct sk_buff *old,struct sk_buff *newsk); +extern void skb_append(struct sk_buff *old,struct sk_buff *newsk); +extern void skb_unlink(struct sk_buff *buf); +extern __u32 skb_queue_len(struct sk_buff_head *list); +extern struct sk_buff * skb_peek_copy(struct sk_buff_head *list); +extern struct sk_buff * alloc_skb(unsigned int size, int priority); +extern struct sk_buff * dev_alloc_skb(unsigned int size); +extern void kfree_skbmem(struct sk_buff *skb); +extern struct sk_buff * skb_clone(struct sk_buff *skb, int priority); +extern struct sk_buff * skb_copy(struct sk_buff *skb, int priority); +extern void skb_device_lock(struct sk_buff *skb); +extern void skb_device_unlock(struct sk_buff *skb); +extern void dev_kfree_skb(struct sk_buff *skb, int mode); +extern int skb_device_locked(struct sk_buff *skb); +extern unsigned char * skb_put(struct sk_buff *skb, int len); +extern unsigned char * skb_push(struct sk_buff *skb, int len); +extern unsigned char * skb_pull(struct sk_buff *skb, int len); +extern int skb_headroom(struct sk_buff *skb); +extern int skb_tailroom(struct sk_buff *skb); +extern void skb_reserve(struct sk_buff *skb, int len); +extern void skb_trim(struct sk_buff *skb, int len); + +extern __inline__ int skb_queue_empty(struct sk_buff_head *list) +{ + return (list->next == (struct sk_buff *) list); +} + +/* + * Peek an sk_buff. Unlike most other operations you _MUST_ + * be careful with this one. A peek leaves the buffer on the + * list and someone else may run off with it. For an interrupt + * type system cli() peek the buffer copy the data and sti(); + */ +extern __inline__ struct sk_buff *skb_peek(struct sk_buff_head *list_) +{ + struct sk_buff *list = ((struct sk_buff *)list_)->next; + if (list == (struct sk_buff *)list_) + list = NULL; + return list; +} + +/* + * Return the length of an sk_buff queue + */ + +extern __inline__ __u32 skb_queue_len(struct sk_buff_head *list_) +{ + return(list_->qlen); +} + +#if CONFIG_SKB_CHECK +extern int skb_check(struct sk_buff *skb,int,int, char *); +#define IS_SKB(skb) skb_check((skb), 0, __LINE__,__FILE__) +#define IS_SKB_HEAD(skb) skb_check((skb), 1, __LINE__,__FILE__) +#else +#define IS_SKB(skb) +#define IS_SKB_HEAD(skb) + +extern __inline__ void skb_queue_head_init(struct sk_buff_head *list) +{ + list->prev = (struct sk_buff *)list; + list->next = (struct sk_buff *)list; + list->qlen = 0; +} + +/* + * Insert an sk_buff at the start of a list. + * + * The "__skb_xxxx()" functions are the non-atomic ones that + * can only be called with interrupts disabled. + */ + +extern __inline__ void __skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) +{ + struct sk_buff *prev, *next; + + newsk->list = list; + list->qlen++; + prev = (struct sk_buff *)list; + next = prev->next; + newsk->next = next; + newsk->prev = prev; + next->prev = newsk; + prev->next = newsk; +} + +extern __inline__ void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) +{ + unsigned long flags; + + save_flags(flags); + cli(); + __skb_queue_head(list, newsk); + restore_flags(flags); +} + +/* + * Insert an sk_buff at the end of a list. + */ + +extern __inline__ void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) +{ + struct sk_buff *prev, *next; + + newsk->list = list; + list->qlen++; + next = (struct sk_buff *)list; + prev = next->prev; + newsk->next = next; + newsk->prev = prev; + next->prev = newsk; + prev->next = newsk; +} + +extern __inline__ void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) +{ + unsigned long flags; + + save_flags(flags); + cli(); + __skb_queue_tail(list, newsk); + restore_flags(flags); +} + +/* + * Remove an sk_buff from a list. + */ + +extern __inline__ struct sk_buff *__skb_dequeue(struct sk_buff_head *list) +{ + struct sk_buff *next, *prev, *result; + + prev = (struct sk_buff *) list; + next = prev->next; + result = NULL; + if (next != prev) { + result = next; + next = next->next; + list->qlen--; + next->prev = prev; + prev->next = next; + result->next = NULL; + result->prev = NULL; + result->list = NULL; + } + return result; +} + +extern __inline__ struct sk_buff *skb_dequeue(struct sk_buff_head *list) +{ + long flags; + struct sk_buff *result; + + save_flags(flags); + cli(); + result = __skb_dequeue(list); + restore_flags(flags); + return result; +} + +/* + * Insert a packet on a list. + */ + +extern __inline__ void __skb_insert(struct sk_buff *newsk, + struct sk_buff * prev, struct sk_buff *next, + struct sk_buff_head * list) +{ + newsk->next = next; + newsk->prev = prev; + next->prev = newsk; + prev->next = newsk; + newsk->list = list; + list->qlen++; +} + +/* + * Place a packet before a given packet in a list + */ +extern __inline__ void skb_insert(struct sk_buff *old, struct sk_buff *newsk) +{ + unsigned long flags; + + save_flags(flags); + cli(); + __skb_insert(newsk, old->prev, old, old->list); + restore_flags(flags); +} + +/* + * Place a packet after a given packet in a list. + */ + +extern __inline__ void skb_append(struct sk_buff *old, struct sk_buff *newsk) +{ + unsigned long flags; + + save_flags(flags); + cli(); + __skb_insert(newsk, old, old->next, old->list); + restore_flags(flags); +} + +/* + * remove sk_buff from list. _Must_ be called atomically, and with + * the list known.. + */ +extern __inline__ void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) +{ + struct sk_buff * next, * prev; + + list->qlen--; + next = skb->next; + prev = skb->prev; + skb->next = NULL; + skb->prev = NULL; + skb->list = NULL; + next->prev = prev; + prev->next = next; +} + +/* + * Remove an sk_buff from its list. Works even without knowing the list it + * is sitting on, which can be handy at times. It also means that THE LIST + * MUST EXIST when you unlink. Thus a list must have its contents unlinked + * _FIRST_. + */ + +extern __inline__ void skb_unlink(struct sk_buff *skb) +{ + unsigned long flags; + + save_flags(flags); + cli(); + if(skb->list) + __skb_unlink(skb, skb->list); + restore_flags(flags); +} + +/* + * Add data to an sk_buff + */ +extern __inline__ unsigned char *skb_put(struct sk_buff *skb, int len) +{ + unsigned char *tmp=skb->tail; + skb->tail+=len; + skb->len+=len; + if(skb->tail>skb->end) + { + panic("skput:over: %d", len); + } + return tmp; +} + +extern __inline__ unsigned char *skb_push(struct sk_buff *skb, int len) +{ + skb->data-=len; + skb->len+=len; + if(skb->data<skb->head) + { + panic("skpush:under: %d", len); + } + return skb->data; +} + +extern __inline__ unsigned char * skb_pull(struct sk_buff *skb, int len) +{ + if(len > skb->len) + return NULL; + skb->data+=len; + skb->len-=len; + return skb->data; +} + +extern __inline__ int skb_headroom(struct sk_buff *skb) +{ + return skb->data-skb->head; +} + +extern __inline__ int skb_tailroom(struct sk_buff *skb) +{ + return skb->end-skb->tail; +} + +extern __inline__ void skb_reserve(struct sk_buff *skb, int len) +{ + skb->data+=len; + skb->tail+=len; +} + +extern __inline__ void skb_trim(struct sk_buff *skb, int len) +{ + if(skb->len>len) + { + skb->len=len; + skb->tail=skb->data+len; + } +} + +#endif + +extern struct sk_buff * skb_recv_datagram(struct sock *sk,unsigned flags,int noblock, int *err); +extern int datagram_select(struct sock *sk, int sel_type, select_table *wait); +extern void skb_copy_datagram(struct sk_buff *from, int offset, char *to,int size); +extern void skb_copy_datagram_iovec(struct sk_buff *from, int offset, struct iovec *to,int size); +extern void skb_free_datagram(struct sock * sk, struct sk_buff *skb); + +#endif /* __KERNEL__ */ +#endif /* _LINUX_SKBUFF_H */ diff --git a/linux/dev/include/linux/threads.h b/linux/dev/include/linux/threads.h new file mode 100644 index 0000000..9d841c9 --- /dev/null +++ b/linux/dev/include/linux/threads.h @@ -0,0 +1 @@ +/* Dummy file. */ diff --git a/linux/dev/include/linux/types.h b/linux/dev/include/linux/types.h new file mode 100644 index 0000000..eb086c2 --- /dev/null +++ b/linux/dev/include/linux/types.h @@ -0,0 +1,117 @@ +#ifndef _LINUX_TYPES_H +#define _LINUX_TYPES_H + +#include <linux/posix_types.h> +#include <asm/types.h> + +#ifndef __KERNEL_STRICT_NAMES + +typedef __kernel_fd_set fd_set; + +#ifndef MACH_INCLUDE +typedef __kernel_dev_t dev_t; +typedef __kernel_ino_t ino_t; +typedef __kernel_mode_t mode_t; +typedef __kernel_nlink_t nlink_t; +#endif + +#ifdef MACH_INCLUDE +#define off_t long +#else +typedef __kernel_off_t off_t; +#endif + +typedef __kernel_pid_t pid_t; + +#ifdef MACH_INCLUDE +#define uid_t unsigned short +#define gid_t unsigned short +#define daddr_t int +#else +typedef __kernel_uid_t uid_t; +typedef __kernel_gid_t gid_t; +typedef __kernel_daddr_t daddr_t; +#endif + +#if defined(__GNUC__) && !defined(__STRICT_ANSI__) +typedef __kernel_loff_t loff_t; +#endif + +/* + * The following typedefs are also protected by individual ifdefs for + * historical reasons: + */ +#ifndef _SIZE_T +#define _SIZE_T +#ifndef MACH_INCLUDE +typedef __kernel_size_t size_t; +#endif +#endif + +#ifndef _SSIZE_T +#define _SSIZE_T +#ifndef MACH_INCLUDE +typedef __kernel_ssize_t ssize_t; +#endif +#endif + +#ifndef _PTRDIFF_T +#define _PTRDIFF_T +typedef __kernel_ptrdiff_t ptrdiff_t; +#endif + +#ifndef _TIME_T +#define _TIME_T +#ifdef MACH_INCLUDE +#define time_t long +#else +typedef __kernel_time_t time_t; +#endif +#endif + +#ifndef _CLOCK_T +#define _CLOCK_T +typedef __kernel_clock_t clock_t; +#endif + +#ifndef _CADDR_T +#define _CADDR_T +#ifndef MACH_INCLUDE +typedef __kernel_caddr_t caddr_t; +#endif +#endif + +#ifndef MACH_INCLUDE +/* bsd */ +typedef unsigned char u_char; +typedef unsigned short u_short; +typedef unsigned int u_int; +typedef unsigned long u_long; +#endif + +/* sysv */ +typedef unsigned char unchar; +typedef unsigned short ushort; +typedef unsigned int uint; +typedef unsigned long ulong; + +#endif /* __KERNEL_STRICT_NAMES */ + +/* + * Below are truly Linux-specific types that should never collide with + * any application/library that wants linux/types.h. + */ + +struct ustat { + __kernel_daddr_t f_tfree; + __kernel_ino_t f_tinode; + char f_fname[6]; + char f_fpack[6]; +}; + +/* Yes, this is ugly. But that's why it is called glue code. */ + +#define _MACH_SA_SYS_TYPES_H_ + + +#endif /* _LINUX_TYPES_H */ diff --git a/linux/dev/init/main.c b/linux/dev/init/main.c new file mode 100644 index 0000000..6d85395 --- /dev/null +++ b/linux/dev/init/main.c @@ -0,0 +1,261 @@ +/* + * Linux initialization. + * + * Copyright (C) 1996 The University of Utah and the Computer Systems + * Laboratory at the University of Utah (CSL) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Author: Shantanu Goel, University of Utah CSL + */ + +/* + * linux/init/main.c + * + * Copyright (C) 1991, 1992 Linus Torvalds + */ + +#include <sys/types.h> + +#include <mach/vm_param.h> +#include <mach/vm_prot.h> +#include <mach/machine.h> + +#include <vm/vm_page.h> +#include <kern/kalloc.h> + +#include <machine/spl.h> +#include <machine/pmap.h> +#include <machine/vm_param.h> +#include <machine/model_dep.h> + +#define MACH_INCLUDE +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/ioport.h> +#include <linux/string.h> +#include <linux/pci.h> +#include <linux/dev/glue/glue.h> + +#include <asm/system.h> +#include <asm/io.h> + +/* + * Timing loop count. + */ +unsigned long loops_per_sec = 1; + +#if defined(__SMP__) && defined(__i386__) +unsigned long smp_loops_per_tick = 1000000; +#endif + +/* + * End of physical memory. + */ +unsigned long high_memory; + +/* + * Flag to indicate auto-configuration is in progress. + */ +int linux_auto_config = 1; + +/* + * Hard drive parameters obtained from the BIOS. + */ +struct drive_info_struct +{ + char dummy[32]; +} drive_info; + +/* + * Forward declarations. + */ +static void calibrate_delay (void); + +/* + * Amount of contiguous memory to allocate for initialization. + */ +#define CONTIG_ALLOC (512 * 1024) + +/* + * Initialize Linux drivers. + */ +void +linux_init (void) +{ + int addr; + unsigned long memory_start, memory_end; + vm_page_t pages; + + /* + * Initialize memory size. + */ + high_memory = vm_page_seg_end(VM_PAGE_SEL_DIRECTMAP); + init_IRQ (); + linux_sched_init (); + + /* + * Set loop count. + */ + calibrate_delay (); + + /* + * Initialize drive info. + */ + addr = *((unsigned *) phystokv (0x104)); + memcpy (&drive_info, + (void *) ((addr & 0xffff) + ((addr >> 12) & 0xffff0)), 16); + addr = *((unsigned *) phystokv (0x118)); + memcpy ((char *) &drive_info + 16, + (void *) ((addr & 0xffff) + ((addr >> 12) & 0xffff0)), 16); + + /* + * Initialize Linux memory allocator. + */ + linux_kmem_init (); + + /* + * Allocate contiguous memory below 16 MB. + */ + memory_start = alloc_contig_mem (CONTIG_ALLOC, 16 * 1024 * 1024, 0, &pages); + if (memory_start == 0) + panic ("linux_init: alloc_contig_mem failed"); + memory_end = memory_start + CONTIG_ALLOC; + + /* + * Initialize PCI bus. + */ + memory_start = pci_init (memory_start, memory_end); + + if (memory_start > memory_end) + panic ("linux_init: ran out memory"); + + /* + * Initialize devices. + */ +#ifdef CONFIG_INET + linux_net_emulation_init (); +#endif + + device_setup (); + +#ifdef CONFIG_PCMCIA + /* + * Initialize pcmcia. + */ + pcmcia_init (); +#endif + + restore_IRQ (); + + linux_auto_config = 0; +} + +#ifndef NBPW +#define NBPW 32 +#endif + +/* + * Allocate contiguous memory with the given constraints. + */ +unsigned long +alloc_contig_mem (unsigned size, unsigned limit, + unsigned mask, vm_page_t * pages) +{ + vm_page_t p; + + p = vm_page_grab_contig(size, VM_PAGE_SEL_DMA); + + if (p == NULL) + return 0; + + if (pages) + *pages = p; + + return phystokv(vm_page_to_pa(p)); +} + +/* + * Free memory allocated by alloc_contig_mem. + */ +void +free_contig_mem (vm_page_t pages, unsigned size) +{ + vm_page_free_contig(pages, size); +} + +/* This is the number of bits of precision for the loops_per_second. Each + * bit takes on average 1.5/HZ seconds. This (like the original) is a little + * better than 1% + */ +#define LPS_PREC 8 + +static void +calibrate_delay (void) +{ + int ticks; + int loopbit; + int lps_precision = LPS_PREC; + + loops_per_sec = (1 << 12); + +#ifndef MACH + printk ("Calibrating delay loop.. "); +#endif + while (loops_per_sec <<= 1) + { + /* wait for "start of" clock tick */ + ticks = jiffies; + while (ticks == jiffies) + /* nothing */ ; + /* Go .. */ + ticks = jiffies; + __delay (loops_per_sec); + ticks = jiffies - ticks; + if (ticks) + break; + } + + /* Do a binary approximation to get loops_per_second set to equal one clock + * (up to lps_precision bits) + */ + loops_per_sec >>= 1; + loopbit = loops_per_sec; + while (lps_precision-- && (loopbit >>= 1)) + { + loops_per_sec |= loopbit; + ticks = jiffies; + while (ticks == jiffies); + ticks = jiffies; + __delay (loops_per_sec); + if (jiffies != ticks) /* longer than 1 tick */ + loops_per_sec &= ~loopbit; + } + + /* finally, adjust loops per second in terms of seconds instead of clocks */ + loops_per_sec *= HZ; + /* Round the value and print it */ +#ifndef MACH + printk ("ok - %lu.%02lu BogoMIPS\n", + (loops_per_sec + 2500) / 500000, + ((loops_per_sec + 2500) / 5000) % 100); +#endif + +#if defined(__SMP__) && defined(__i386__) + smp_loops_per_tick = loops_per_sec / 400; +#endif +} diff --git a/linux/dev/init/version.c b/linux/dev/init/version.c new file mode 100644 index 0000000..1989483 --- /dev/null +++ b/linux/dev/init/version.c @@ -0,0 +1,32 @@ +/* + * linux/version.c + * + * Copyright (C) 1992 Theodore Ts'o + * + * May be freely distributed as part of Linux. + */ + +#define MACH_INCLUDE +#include <linux/config.h> +#include <linux/utsname.h> +#include <linux/version.h> +#include <linux/compile.h> + +/* make the "checkconfig" script happy: we really need to include config.h */ +#ifdef CONFIG_BOGUS +#endif + +#define version(a) Version_ ## a +#define version_string(a) version(a) + +int version_string (LINUX_VERSION_CODE) = 0; + +struct new_utsname system_utsname = +{ + UTS_SYSNAME, UTS_NODENAME, UTS_RELEASE, UTS_VERSION, + UTS_MACHINE, UTS_DOMAINNAME +}; + +const char *linux_banner = +"Linux version " UTS_RELEASE " (" LINUX_COMPILE_BY "@" +LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION "\n"; diff --git a/linux/dev/kernel/dma.c b/linux/dev/kernel/dma.c new file mode 100644 index 0000000..bbda4bb --- /dev/null +++ b/linux/dev/kernel/dma.c @@ -0,0 +1,109 @@ +/* $Id: dma.c,v 1.1 1999/04/26 05:49:35 tb Exp $ + * linux/kernel/dma.c: A DMA channel allocator. Inspired by linux/kernel/irq.c. + * + * Written by Hennus Bergman, 1992. + * + * 1994/12/26: Changes by Alex Nash to fix a minor bug in /proc/dma. + * In the previous version the reported device could end up being wrong, + * if a device requested a DMA channel that was already in use. + * [It also happened to remove the sizeof(char *) == sizeof(int) + * assumption introduced because of those /proc/dma patches. -- Hennus] + */ + +#define MACH_INCLUDE +#include <linux/kernel.h> +#include <linux/errno.h> +#include <asm/dma.h> +#include <asm/system.h> + + +/* A note on resource allocation: + * + * All drivers needing DMA channels, should allocate and release them + * through the public routines `request_dma()' and `free_dma()'. + * + * In order to avoid problems, all processes should allocate resources in + * the same sequence and release them in the reverse order. + * + * So, when allocating DMAs and IRQs, first allocate the IRQ, then the DMA. + * When releasing them, first release the DMA, then release the IRQ. + * If you don't, you may cause allocation requests to fail unnecessarily. + * This doesn't really matter now, but it will once we get real semaphores + * in the kernel. + */ + + + +/* Channel n is busy iff dma_chan_busy[n].lock != 0. + * DMA0 used to be reserved for DRAM refresh, but apparently not any more... + * DMA4 is reserved for cascading. + */ + +struct dma_chan +{ + int lock; + const char *device_id; +}; + +static struct dma_chan dma_chan_busy[MAX_DMA_CHANNELS] = +{ + { 0, 0 }, + { 0, 0 }, + { 0, 0 }, + { 0, 0 }, + { 1, "cascade" }, + { 0, 0 }, + { 0, 0 }, + { 0, 0 } +}; + +#ifndef MACH +int +get_dma_list (char *buf) +{ + int i, len = 0; + + for (i = 0 ; i < MAX_DMA_CHANNELS ; i++) + { + if (dma_chan_busy[i].lock) + { + len += linux_sprintf (buf+len, "%2d: %s\n", + i, + dma_chan_busy[i].device_id); + } + } + return len; +} /* get_dma_list */ +#endif + +int +request_dma (unsigned int dmanr, const char *device_id) +{ + if (dmanr >= MAX_DMA_CHANNELS) + return -EINVAL; + + if (xchg (&dma_chan_busy[dmanr].lock, 1) != 0) + return -EBUSY; + + dma_chan_busy[dmanr].device_id = device_id; + + /* old flag was 0, now contains 1 to indicate busy */ + return 0; +} /* request_dma */ + + +void +free_dma (unsigned int dmanr) +{ + if (dmanr >= MAX_DMA_CHANNELS) + { + printk ("Trying to free DMA%d\n", dmanr); + return; + } + + if (xchg (&dma_chan_busy[dmanr].lock, 0) == 0) + { + printk ("Trying to free free DMA%d\n", dmanr); + return; + } +} /* free_dma */ diff --git a/linux/dev/kernel/printk.c b/linux/dev/kernel/printk.c new file mode 100644 index 0000000..7c65d30 --- /dev/null +++ b/linux/dev/kernel/printk.c @@ -0,0 +1,83 @@ +/* + * Linux kernel print routine. + * Copyright (C) 1995 Shantanu Goel. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/* + * linux/kernel/printk.c + * + * Copyright (C) 1991, 1992 Linus Torvalds + */ + +#define MACH_INCLUDE +#include <stdarg.h> +#include <asm/system.h> +#include <kern/assert.h> +#include <kern/printf.h> +#include <device/cons.h> + +static char buf[2048]; + +#define DEFAULT_MESSAGE_LOGLEVEL 4 +#define DEFAULT_CONSOLE_LOGLEVEL 7 + +int console_loglevel = DEFAULT_CONSOLE_LOGLEVEL; + +int +printk (char *fmt, ...) +{ + va_list args; + int n; + unsigned long flags; + char *p, *msg, *buf_end; + static int msg_level = -1; + + save_flags (flags); + cli (); + va_start (args, fmt); + n = vsnprintf (buf + 3, sizeof (buf) - 3, fmt, args); + assert (n <= sizeof (buf) - 3); + buf_end = buf + 3 + n; + va_end (args); + for (p = buf + 3; p < buf_end; p++) + { + msg = p; + if (msg_level < 0) + { + if (p[0] != '<' || p[1] < '0' || p[1] > '7' || p[2] != '>') + { + p -= 3; + p[0] = '<'; + p[1] = DEFAULT_MESSAGE_LOGLEVEL + '0'; + p[2] = '>'; + } + else + msg += 3; + msg_level = p[1] - '0'; + } + for (; p < buf_end; p++) + if (*p == '\n') + break; + if (msg_level < console_loglevel) + while (msg <= p) + cnputc (*msg++); + if (*p == '\n') + msg_level = -1; + } + restore_flags (flags); + return n; +} diff --git a/linux/dev/kernel/resource.c b/linux/dev/kernel/resource.c new file mode 100644 index 0000000..ba107e8 --- /dev/null +++ b/linux/dev/kernel/resource.c @@ -0,0 +1,145 @@ +/* + * linux/kernel/resource.c + * + * Copyright (C) 1995 Linus Torvalds + * David Hinds + * + * Kernel io-region resource management + */ + +#include <sys/types.h> + +#define MACH_INCLUDE +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/ioport.h> + +#define IOTABLE_SIZE 128 + +typedef struct resource_entry_t +{ + u_long from, num; + const char *name; + struct resource_entry_t *next; +} resource_entry_t; + +static resource_entry_t iolist = { 0, 0, "", NULL }; + +static resource_entry_t iotable[IOTABLE_SIZE]; + +/* + * This generates the report for /proc/ioports + */ +#ifndef MACH +int +get_ioport_list (char *buf) +{ + resource_entry_t *p; + int len = 0; + + for (p = iolist.next; (p) && (len < 4000); p = p->next) + len += linux_sprintf (buf+len, "%04lx-%04lx : %s\n", + p->from, p->from+p->num-1, p->name); + if (p) + len += linux_sprintf (buf+len, "4K limit reached!\n"); + return len; +} +#endif + +/* + * The workhorse function: find where to put a new entry + */ +static resource_entry_t * +find_gap (resource_entry_t *root, u_long from, u_long num) +{ + unsigned long flags; + resource_entry_t *p; + + if (from > from+num-1) + return NULL; + save_flags (flags); + cli (); + for (p = root; ; p = p->next) + { + if ((p != root) && (p->from+p->num-1 >= from)) + { + p = NULL; + break; + } + if ((p->next == NULL) || (p->next->from > from+num-1)) + break; + } + restore_flags (flags); + return p; +} + +/* + * Call this from the device driver to register the ioport region. + */ +void +request_region (unsigned int from, unsigned int num, const char *name) +{ + resource_entry_t *p; + int i; + + for (i = 0; i < IOTABLE_SIZE; i++) + if (iotable[i].num == 0) + break; + if (i == IOTABLE_SIZE) + printk ("warning: ioport table is full\n"); + else + { + p = find_gap (&iolist, from, num); + if (p == NULL) + return; + iotable[i].name = name; + iotable[i].from = from; + iotable[i].num = num; + iotable[i].next = p->next; + p->next = &iotable[i]; + return; + } +} + +/* + * Call this when the device driver is unloaded + */ +void +release_region (unsigned int from, unsigned int num) +{ + resource_entry_t *p, *q; + + for (p = &iolist; ; p = q) + { + q = p->next; + if (q == NULL) + break; + if ((q->from == from) && (q->num == num)) + { + q->num = 0; + p->next = q->next; + return; + } + } +} + +/* + * Call this to check the ioport region before probing + */ +int +check_region (unsigned int from, unsigned int num) +{ + return (find_gap (&iolist, from, num) == NULL) ? -EBUSY : 0; +} + +/* Called from init/main.c to reserve IO ports. */ +void +reserve_setup(char *str, int *ints) +{ + int i; + + for (i = 1; i < ints[0]; i += 2) + request_region (ints[i], ints[i+1], "reserved"); +} diff --git a/linux/dev/kernel/sched.c b/linux/dev/kernel/sched.c new file mode 100644 index 0000000..f87482e --- /dev/null +++ b/linux/dev/kernel/sched.c @@ -0,0 +1,630 @@ +/* + * Linux scheduling support. + * + * Copyright (C) 1996 The University of Utah and the Computer Systems + * Laboratory at the University of Utah (CSL) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Author: Shantanu Goel, University of Utah CSL + */ + +/* + * linux/kernel/sched.c + * + * Copyright (C) 1991, 1992 Linus Torvalds + */ + +#include <sys/types.h> +#include <machine/spl.h> + +#include <mach/boolean.h> + +#include <kern/thread.h> +#include <kern/sched_prim.h> +#include <kern/printf.h> + +#include <machine/machspl.h> + +#define MACH_INCLUDE +#include <linux/sched.h> +#include <linux/timer.h> +#include <linux/fs.h> +#include <linux/blkdev.h> +#include <linux/interrupt.h> +#include <linux/dev/glue/glue.h> + +#include <asm/system.h> +#include <asm/atomic.h> + +int securelevel = 0; + +static void timer_bh (void); + +DECLARE_TASK_QUEUE (tq_timer); +DECLARE_TASK_QUEUE (tq_immediate); +DECLARE_TASK_QUEUE (tq_scheduler); + +static struct wait_queue **auto_config_queue; + +static inline void +handle_soft_intr (void) +{ + if (bh_active & bh_mask) + { + intr_count = 1; + linux_soft_intr (); + intr_count = 0; + } +} + +static void +tqueue_bh (void) +{ + run_task_queue(&tq_timer); +} + +static void +immediate_bh (void) +{ + run_task_queue (&tq_immediate); +} + +void +add_wait_queue (struct wait_queue **q, struct wait_queue *wait) +{ + unsigned long flags; + + if (! linux_auto_config) + { + save_flags (flags); + cli (); + assert_wait ((event_t) q, FALSE); + restore_flags (flags); + return; + } + + if (auto_config_queue) + printf ("add_wait_queue: queue not empty\n"); + auto_config_queue = q; +} + +void +remove_wait_queue (struct wait_queue **q, struct wait_queue *wait) +{ + unsigned long flags; + + if (! linux_auto_config) + { + save_flags (flags); + thread_wakeup ((event_t) q); + restore_flags (flags); + return; + } + + auto_config_queue = NULL; +} + +static inline int +waking_non_zero (struct semaphore *sem) +{ + int ret; + unsigned long flags; + + get_buzz_lock (&sem->lock); + save_flags (flags); + cli (); + + if ((ret = (sem->waking > 0))) + sem->waking--; + + restore_flags (flags); + give_buzz_lock (&sem->lock); + return ret; +} + +void +__up (struct semaphore *sem) +{ + atomic_inc (&sem->waking); + wake_up (&sem->wait); +} + +int +__do_down (struct semaphore *sem, int task_state) +{ + unsigned long flags; + int ret = 0; + int s; + + if (!linux_auto_config) + { + save_flags (flags); + s = splhigh (); + for (;;) + { + if (waking_non_zero (sem)) + break; + + if (task_state == TASK_INTERRUPTIBLE && issig ()) + { + ret = -EINTR; + atomic_inc (&sem->count); + break; + } + + assert_wait ((event_t) &sem->wait, + task_state == TASK_INTERRUPTIBLE ? TRUE : FALSE); + splx (s); + schedule (); + s = splhigh (); + } + splx (s); + restore_flags (flags); + return ret; + } + + while (!waking_non_zero (sem)) + { + if (task_state == TASK_INTERRUPTIBLE && issig ()) + { + ret = -EINTR; + atomic_inc (&sem->count); + break; + } + schedule (); + } + + return ret; +} + +void +__down (struct semaphore *sem) +{ + __do_down(sem, TASK_UNINTERRUPTIBLE); +} + +int +__down_interruptible (struct semaphore *sem) +{ + return __do_down (sem, TASK_INTERRUPTIBLE); +} + +void +__sleep_on (struct wait_queue **q, int state) +{ + unsigned long flags; + + if (!q) + return; + save_flags (flags); + if (!linux_auto_config) + { + assert_wait ((event_t) q, state == TASK_INTERRUPTIBLE ? TRUE : FALSE); + sti (); + schedule (); + restore_flags (flags); + return; + } + + add_wait_queue (q, NULL); + sti (); + while (auto_config_queue) + schedule (); + restore_flags (flags); +} + +void +sleep_on (struct wait_queue **q) +{ + __sleep_on (q, TASK_UNINTERRUPTIBLE); +} + +void +interruptible_sleep_on (struct wait_queue **q) +{ + __sleep_on (q, TASK_INTERRUPTIBLE); +} + +void +wake_up (struct wait_queue **q) +{ + unsigned long flags; + + if (! linux_auto_config) + { + if (q != &wait_for_request) /* ??? by OKUJI Yoshinori. */ + { + save_flags (flags); + thread_wakeup ((event_t) q); + restore_flags (flags); + } + return; + } + + if (auto_config_queue == q) + auto_config_queue = NULL; +} + +void +__wait_on_buffer (struct buffer_head *bh) +{ + unsigned long flags; + + save_flags (flags); + if (! linux_auto_config) + { + while (1) + { + cli (); + run_task_queue (&tq_disk); + if (! buffer_locked (bh)) + break; + bh->b_wait = (struct wait_queue *) 1; + assert_wait ((event_t) bh, FALSE); + sti (); + schedule (); + } + restore_flags (flags); + return; + } + + sti (); + while (buffer_locked (bh)) + { + run_task_queue (&tq_disk); + schedule (); + } + restore_flags (flags); +} + +void +unlock_buffer (struct buffer_head *bh) +{ + unsigned long flags; + + save_flags (flags); + cli (); + clear_bit (BH_Lock, &bh->b_state); + if (bh->b_wait && ! linux_auto_config) + { + bh->b_wait = NULL; + thread_wakeup ((event_t) bh); + } + restore_flags (flags); +} + +void +schedule (void) +{ + if (intr_count) + printk ("Aiee: scheduling in interrupt %p\n", + __builtin_return_address (0)); + + handle_soft_intr (); + run_task_queue (&tq_scheduler); + + if (!linux_auto_config) + thread_block (0); +} + +void +linux_sched_init (void) +{ + /* + * Install software interrupt handlers. + */ + init_bh (TIMER_BH, timer_bh); + init_bh (TQUEUE_BH, tqueue_bh); + init_bh (IMMEDIATE_BH, immediate_bh); +} + +/* + * Linux timers. + * + * Copyright (C) 1996 The University of Utah and the Computer Systems + * Laboratory at the University of Utah (CSL) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Author: Shantanu Goel, University of Utah CSL + */ + +unsigned long volatile jiffies = 0; + +/* + * Mask of active timers. + */ +unsigned long timer_active = 0; + +/* + * List of timeout routines. + */ +struct timer_struct timer_table[32]; + +#define TVN_BITS 6 +#define TVR_BITS 8 +#define TVN_SIZE (1 << TVN_BITS) +#define TVR_SIZE (1 << TVR_BITS) +#define TVN_MASK (TVN_SIZE - 1) +#define TVR_MASK (TVR_SIZE - 1) + +#define SLOW_BUT_DEBUGGING_TIMERS 0 + +struct timer_vec + { + int index; + struct timer_list *vec[TVN_SIZE]; + }; + +struct timer_vec_root + { + int index; + struct timer_list *vec[TVR_SIZE]; + }; + +static struct timer_vec tv5 = +{0}; +static struct timer_vec tv4 = +{0}; +static struct timer_vec tv3 = +{0}; +static struct timer_vec tv2 = +{0}; +static struct timer_vec_root tv1 = +{0}; + +static struct timer_vec *const tvecs[] = +{ + (struct timer_vec *) &tv1, &tv2, &tv3, &tv4, &tv5 +}; + +#define NOOF_TVECS (sizeof(tvecs) / sizeof(tvecs[0])) + +static unsigned long timer_jiffies = 0; + +static inline void +insert_timer (struct timer_list *timer, struct timer_list **vec, int idx) +{ + if ((timer->next = vec[idx])) + vec[idx]->prev = timer; + vec[idx] = timer; + timer->prev = (struct timer_list *) &vec[idx]; +} + +static inline void +internal_add_timer (struct timer_list *timer) +{ + /* + * must be cli-ed when calling this + */ + unsigned long expires = timer->expires; + unsigned long idx = expires - timer_jiffies; + + if (idx < TVR_SIZE) + { + int i = expires & TVR_MASK; + insert_timer (timer, tv1.vec, i); + } + else if (idx < 1 << (TVR_BITS + TVN_BITS)) + { + int i = (expires >> TVR_BITS) & TVN_MASK; + insert_timer (timer, tv2.vec, i); + } + else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) + { + int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK; + insert_timer (timer, tv3.vec, i); + } + else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) + { + int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK; + insert_timer (timer, tv4.vec, i); + } + else if (expires < timer_jiffies) + { + /* can happen if you add a timer with expires == jiffies, + * or you set a timer to go off in the past + */ + insert_timer (timer, tv1.vec, tv1.index); + } + else if (idx < 0xffffffffUL) + { + int i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK; + insert_timer (timer, tv5.vec, i); + } + else + { + /* Can only get here on architectures with 64-bit jiffies */ + timer->next = timer->prev = timer; + } +} + +void +add_timer (struct timer_list *timer) +{ + unsigned long flags; + + save_flags (flags); + cli (); +#if SLOW_BUT_DEBUGGING_TIMERS + if (timer->next || timer->prev) + { + printk ("add_timer() called with non-zero list from %p\n", + __builtin_return_address (0)); + goto out; + } +#endif + internal_add_timer (timer); +#if SLOW_BUT_DEBUGGING_TIMERS +out: +#endif + restore_flags (flags); +} + +static inline int +detach_timer (struct timer_list *timer) +{ + int ret = 0; + struct timer_list *next, *prev; + + next = timer->next; + prev = timer->prev; + if (next) + { + next->prev = prev; + } + if (prev) + { + ret = 1; + prev->next = next; + } + return ret; +} + +int +del_timer (struct timer_list *timer) +{ + int ret; + unsigned long flags; + + save_flags (flags); + cli (); + ret = detach_timer (timer); + timer->next = timer->prev = 0; + restore_flags (flags); + return ret; +} + +static inline void +run_old_timers (void) +{ + struct timer_struct *tp; + unsigned long mask; + + for (mask = 1, tp = timer_table + 0; mask; tp++, mask += mask) + { + if (mask > timer_active) + break; + if (!(mask & timer_active)) + continue; + if (tp->expires > jiffies) + continue; + timer_active &= ~mask; + tp->fn (); + sti (); + } +} + +static inline void +cascade_timers (struct timer_vec *tv) +{ + /* cascade all the timers from tv up one level */ + struct timer_list *timer; + + timer = tv->vec[tv->index]; + /* + * We are removing _all_ timers from the list, so we don't have to + * detach them individually, just clear the list afterwards. + */ + while (timer) + { + struct timer_list *tmp = timer; + timer = timer->next; + internal_add_timer (tmp); + } + tv->vec[tv->index] = NULL; + tv->index = (tv->index + 1) & TVN_MASK; +} + +static inline void +run_timer_list (void) +{ + cli (); + while ((long) (jiffies - timer_jiffies) >= 0) + { + struct timer_list *timer; + + if (!tv1.index) + { + int n = 1; + + do + { + cascade_timers (tvecs[n]); + } + while (tvecs[n]->index == 1 && ++n < NOOF_TVECS); + } + while ((timer = tv1.vec[tv1.index])) + { + void (*fn) (unsigned long) = timer->function; + unsigned long data = timer->data; + + detach_timer (timer); + timer->next = timer->prev = NULL; + sti (); + fn (data); + cli (); + } + ++timer_jiffies; + tv1.index = (tv1.index + 1) & TVR_MASK; + } + sti (); +} + +/* + * Timer software interrupt handler. + */ +static void +timer_bh (void) +{ + run_old_timers (); + run_timer_list (); +} + +#if 0 +int linux_timer_print = 0; +#endif + +/* + * Timer interrupt handler. + */ +void +linux_timer_intr (void) +{ + if (cpu_number() != master_cpu) + return; + + (*(unsigned long *) &jiffies)++; + mark_bh (TIMER_BH); + if (tq_timer) + mark_bh (TQUEUE_BH); +#if 0 + if (linux_timer_print) + printf ("linux_timer_intr: hello\n"); +#endif +} diff --git a/linux/dev/kernel/softirq.c b/linux/dev/kernel/softirq.c new file mode 100644 index 0000000..ac95a7d --- /dev/null +++ b/linux/dev/kernel/softirq.c @@ -0,0 +1,48 @@ +/* + * linux/kernel/softirq.c + * + * Copyright (C) 1992 Linus Torvalds + * + * do_bottom_half() runs at normal kernel priority: all interrupts + * enabled. do_bottom_half() is atomic with respect to itself: a + * bottom_half handler need not be re-entrant. + */ + +#define MACH_INCLUDE +#include <linux/ptrace.h> +#include <linux/interrupt.h> +#include <asm/system.h> + +#include <linux/dev/glue/glue.h> + +int bh_mask_count[32]; +unsigned int bh_active = 0; +unsigned int bh_mask = 0; +void (*bh_base[32]) (void); + +void +linux_soft_intr (void) +{ + unsigned int active; + unsigned int mask, left; + void (**bh) (void); + + sti (); + bh = bh_base; + active = bh_active & bh_mask; + for (mask = 1, left = ~0; left & active; bh++, mask += mask, left += left) + { + if (mask & active) + { + void (*fn) (void); + bh_active &= ~mask; + fn = *bh; + if (!fn) + goto bad_bh; + fn (); + } + } + return; +bad_bh: + printk ("linux_soft_intr:bad interrupt handler entry %08x\n", mask); +} diff --git a/linux/dev/lib/vsprintf.c b/linux/dev/lib/vsprintf.c new file mode 100644 index 0000000..541ec65 --- /dev/null +++ b/linux/dev/lib/vsprintf.c @@ -0,0 +1,354 @@ +/* + * linux/lib/vsprintf.c + * + * Copyright (C) 1991, 1992 Linus Torvalds + */ + +/* vsprintf.c -- Lars Wirzenius & Linus Torvalds. */ +/* + * Wirzenius wrote this portably, Torvalds fucked it up :-) + */ + +#include <sys/types.h> + +#define MACH_INCLUDE +#include <stdarg.h> +#include <linux/types.h> +#include <linux/string.h> +#include <linux/ctype.h> + +unsigned long +simple_strtoul (const char *cp, char **endp, unsigned int base) +{ + unsigned long result = 0, value; + + if (!base) + { + base = 10; + if (*cp == '0') + { + base = 8; + cp++; + if ((*cp == 'x') && isxdigit (cp[1])) + { + cp++; + base = 16; + } + } + } + while (isxdigit (*cp) + && (value = isdigit (*cp) ? *cp - '0' + : (islower (*cp) ? toupper (*cp) : *cp) - 'A' + 10) < base) + { + result = result * base + value; + cp++; + } + if (endp) + *endp = (char *) cp; + return result; +} + +/* we use this so that we can do without the ctype library */ +#define is_digit(c) ((c) >= '0' && (c) <= '9') + +static int +skip_atoi (const char **s) +{ + int i = 0; + + while (is_digit (**s)) + i = i * 10 + *((*s)++) - '0'; + return i; +} + +#define ZEROPAD 1 /* pad with zero */ +#define SIGN 2 /* unsigned/signed long */ +#define PLUS 4 /* show plus */ +#define SPACE 8 /* space if plus */ +#define LEFT 16 /* left justified */ +#define SPECIAL 32 /* 0x */ +#define LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */ + +#define do_div(n,base) ({ \ +int __res; \ +__res = ((unsigned long) n) % (unsigned) base; \ +n = ((unsigned long) n) / (unsigned) base; \ +__res; }) + +static char * +number (char *str, long num, int base, int size, int precision, int type) +{ + char c, sign, tmp[66]; + const char *digits = "0123456789abcdefghijklmnopqrstuvwxyz"; + int i; + + if (type & LARGE) + digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; + if (type & LEFT) + type &= ~ZEROPAD; + if (base < 2 || base > 36) + return 0; + c = (type & ZEROPAD) ? '0' : ' '; + sign = 0; + if (type & SIGN) + { + if (num < 0) + { + sign = '-'; + num = -num; + size--; + } + else if (type & PLUS) + { + sign = '+'; + size--; + } + else if (type & SPACE) + { + sign = ' '; + size--; + } + } + if (type & SPECIAL) + { + if (base == 16) + size -= 2; + else if (base == 8) + size--; + } + i = 0; + if (num == 0) + tmp[i++] = '0'; + else + while (num != 0) + tmp[i++] = digits[do_div (num, base)]; + if (i > precision) + precision = i; + size -= precision; + if (!(type & (ZEROPAD + LEFT))) + while (size-- > 0) + *str++ = ' '; + if (sign) + *str++ = sign; + if (type & SPECIAL) + { + if (base == 8) + { + *str++ = '0'; + } + else if (base == 16) + { + *str++ = '0'; + *str++ = digits[33]; + } + } + if (!(type & LEFT)) + while (size-- > 0) + *str++ = c; + while (i < precision--) + *str++ = '0'; + while (i-- > 0) + *str++ = tmp[i]; + while (size-- > 0) + *str++ = ' '; + return str; +} + +int +linux_vsprintf (char *buf, const char *fmt, va_list args) +{ + int len; + unsigned long num; + int i, base; + char *str; + const char *s; + + int flags; /* flags to number() */ + + int field_width; /* width of output field */ + int precision; /* min. # of digits for integers; max + * number of chars for from string + */ + int qualifier; /* 'h', 'l', or 'L' for integer fields */ + + for (str = buf; *fmt; ++fmt) + { + if (*fmt != '%') + { + *str++ = *fmt; + continue; + } + + /* process flags */ + flags = 0; + repeat: + ++fmt; /* this also skips first '%' */ + switch (*fmt) + { + case '-': + flags |= LEFT; + goto repeat; + case '+': + flags |= PLUS; + goto repeat; + case ' ': + flags |= SPACE; + goto repeat; + case '#': + flags |= SPECIAL; + goto repeat; + case '0': + flags |= ZEROPAD; + goto repeat; + } + + /* get field width */ + field_width = -1; + if (is_digit (*fmt)) + field_width = skip_atoi (&fmt); + else if (*fmt == '*') + { + ++fmt; + /* it's the next argument */ + field_width = va_arg (args, int); + if (field_width < 0) + { + field_width = -field_width; + flags |= LEFT; + } + } + + /* get the precision */ + precision = -1; + if (*fmt == '.') + { + ++fmt; + if (is_digit (*fmt)) + precision = skip_atoi (&fmt); + else if (*fmt == '*') + { + ++fmt; + /* it's the next argument */ + precision = va_arg (args, int); + } + if (precision < 0) + precision = 0; + } + + /* get the conversion qualifier */ + qualifier = -1; + if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L') + { + qualifier = *fmt; + ++fmt; + } + + /* default base */ + base = 10; + + switch (*fmt) + { + case 'c': + if (!(flags & LEFT)) + while (--field_width > 0) + *str++ = ' '; + *str++ = (unsigned char) va_arg (args, int); + while (--field_width > 0) + *str++ = ' '; + continue; + + case 's': + s = va_arg (args, char *); + if (!s) + s = "<NULL>"; + + len = strnlen (s, precision); + + if (!(flags & LEFT)) + while (len < field_width--) + *str++ = ' '; + for (i = 0; i < len; ++i) + *str++ = *s++; + while (len < field_width--) + *str++ = ' '; + continue; + + case 'p': + if (field_width == -1) + { + field_width = 2 * sizeof (void *); + flags |= ZEROPAD; + } + str = number (str, + (unsigned long) va_arg (args, void *), 16, + field_width, precision, flags); + continue; + + + case 'n': + if (qualifier == 'l') + { + long *ip = va_arg (args, long *); + *ip = (str - buf); + } + else + { + int *ip = va_arg (args, int *); + *ip = (str - buf); + } + continue; + + /* integer number formats - set up the flags and "break" */ + case 'o': + base = 8; + break; + + case 'X': + flags |= LARGE; + case 'x': + base = 16; + break; + + case 'd': + case 'i': + flags |= SIGN; + case 'u': + break; + + default: + if (*fmt != '%') + *str++ = '%'; + if (*fmt) + *str++ = *fmt; + else + --fmt; + continue; + } + if (qualifier == 'l') + num = va_arg (args, unsigned long); + else if (qualifier == 'h') + if (flags & SIGN) + num = (short) va_arg (args, int); + else + num = (unsigned short) va_arg (args, unsigned int); + else if (flags & SIGN) + num = va_arg (args, int); + else + num = va_arg (args, unsigned int); + str = number (str, num, base, field_width, precision, flags); + } + *str = '\0'; + return str - buf; +} + +int +linux_sprintf (char *buf, const char *fmt,...) +{ + va_list args; + int i; + + va_start (args, fmt); + i = linux_vsprintf (buf, fmt, args); + va_end (args); + return i; +} diff --git a/linux/dev/net/core/dev.c b/linux/dev/net/core/dev.c new file mode 100644 index 0000000..cbdf8cc --- /dev/null +++ b/linux/dev/net/core/dev.c @@ -0,0 +1,1648 @@ +/* + * NET3 Protocol independent device support routines. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Derived from the non IP parts of dev.c 1.0.19 + * Authors: Ross Biro, <bir7@leland.Stanford.Edu> + * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> + * Mark Evans, <evansmp@uhura.aston.ac.uk> + * + * Additional Authors: + * Florian la Roche <rzsfl@rz.uni-sb.de> + * Alan Cox <gw4pts@gw4pts.ampr.org> + * David Hinds <dhinds@allegro.stanford.edu> + * + * Changes: + * Alan Cox : device private ioctl copies fields back. + * Alan Cox : Transmit queue code does relevant stunts to + * keep the queue safe. + * Alan Cox : Fixed double lock. + * Alan Cox : Fixed promisc NULL pointer trap + * ???????? : Support the full private ioctl range + * Alan Cox : Moved ioctl permission check into drivers + * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI + * Alan Cox : 100 backlog just doesn't cut it when + * you start doing multicast video 8) + * Alan Cox : Rewrote net_bh and list manager. + * Alan Cox : Fix ETH_P_ALL echoback lengths. + * Alan Cox : Took out transmit every packet pass + * Saved a few bytes in the ioctl handler + * Alan Cox : Network driver sets packet type before calling netif_rx. Saves + * a function call a packet. + * Alan Cox : Hashed net_bh() + * Richard Kooijman: Timestamp fixes. + * Alan Cox : Wrong field in SIOCGIFDSTADDR + * Alan Cox : Device lock protection. + * Alan Cox : Fixed nasty side effect of device close changes. + * Rudi Cilibrasi : Pass the right thing to set_mac_address() + * Dave Miller : 32bit quantity for the device lock to make it work out + * on a Sparc. + * Bjorn Ekwall : Added KERNELD hack. + * Alan Cox : Cleaned up the backlog initialise. + * Craig Metz : SIOCGIFCONF fix if space for under + * 1 device. + * Thomas Bogendoerfer : Return ENODEV for dev_open, if there + * is no device open function. + * Lawrence V. Stefani : Changed set MTU ioctl to not assume + * min MTU of 68 bytes for devices + * that have change MTU functions. + * + */ + +#include <asm/segment.h> +#include <asm/system.h> +#include <asm/bitops.h> +#include <linux/config.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/string.h> +#include <linux/mm.h> +#include <linux/socket.h> +#include <linux/sockios.h> +#include <linux/in.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/if_ether.h> +#include <linux/inet.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/notifier.h> +#include <net/ip.h> +#include <net/route.h> +#include <linux/skbuff.h> +#include <net/sock.h> +#include <net/arp.h> +#include <net/slhc.h> +#include <linux/proc_fs.h> +#include <linux/stat.h> +#include <net/br.h> +#ifdef CONFIG_NET_ALIAS +#include <linux/net_alias.h> +#endif +#ifdef CONFIG_KERNELD +#include <linux/kerneld.h> +#endif +#ifdef CONFIG_NET_RADIO +#include <linux/wireless.h> +#endif /* CONFIG_NET_RADIO */ + +#ifndef MACH +/* + * The list of packet types we will receive (as opposed to discard) + * and the routines to invoke. + */ + +struct packet_type *ptype_base[16]; +struct packet_type *ptype_all = NULL; /* Taps */ + +/* + * Device list lock + */ + +int dev_lockct=0; + +/* + * Our notifier list + */ + +struct notifier_block *netdev_chain=NULL; + +/* + * Device drivers call our routines to queue packets here. We empty the + * queue in the bottom half handler. + */ + +static struct sk_buff_head backlog; + +/* + * We don't overdo the queue or we will thrash memory badly. + */ + +static int backlog_size = 0; + +/* + * Return the lesser of the two values. + */ + +static __inline__ unsigned long min(unsigned long a, unsigned long b) +{ + return (a < b)? a : b; +} + + +/****************************************************************************************** + + Protocol management and registration routines + +*******************************************************************************************/ + +/* + * For efficiency + */ + +static int dev_nit=0; + +/* + * Add a protocol ID to the list. Now that the input handler is + * smarter we can dispense with all the messy stuff that used to be + * here. + */ + +void dev_add_pack(struct packet_type *pt) +{ + int hash; + if(pt->type==htons(ETH_P_ALL)) + { + dev_nit++; + pt->next=ptype_all; + ptype_all=pt; + } + else + { + hash=ntohs(pt->type)&15; + pt->next = ptype_base[hash]; + ptype_base[hash] = pt; + } +} + + +/* + * Remove a protocol ID from the list. + */ + +void dev_remove_pack(struct packet_type *pt) +{ + struct packet_type **pt1; + if(pt->type==htons(ETH_P_ALL)) + { + dev_nit--; + pt1=&ptype_all; + } + else + pt1=&ptype_base[ntohs(pt->type)&15]; + for(; (*pt1)!=NULL; pt1=&((*pt1)->next)) + { + if(pt==(*pt1)) + { + *pt1=pt->next; + return; + } + } + printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt); +} + +/***************************************************************************************** + + Device Interface Subroutines + +******************************************************************************************/ + +/* + * Find an interface by name. + */ + +struct device *dev_get(const char *name) +{ + struct device *dev; + + for (dev = dev_base; dev != NULL; dev = dev->next) + { + if (strcmp(dev->name, name) == 0) + return(dev); + } + return NULL; +} + +/* + * Find and possibly load an interface. + */ + +#ifdef CONFIG_KERNELD + +extern __inline__ void dev_load(const char *name) +{ + if(!dev_get(name) && suser()) { +#ifdef CONFIG_NET_ALIAS + const char *sptr; + + for (sptr=name ; *sptr ; sptr++) if(*sptr==':') break; + if (!(*sptr && *(sptr+1))) +#endif + request_module(name); + } +} + +#endif + +/* + * Prepare an interface for use. + */ + +int dev_open(struct device *dev) +{ + int ret = -ENODEV; + + /* + * Call device private open method + */ + if (dev->open) + ret = dev->open(dev); + + /* + * If it went open OK then set the flags + */ + + if (ret == 0) + { + dev->flags |= (IFF_UP | IFF_RUNNING); + /* + * Initialise multicasting status + */ + dev_mc_upload(dev); + notifier_call_chain(&netdev_chain, NETDEV_UP, dev); + } + return(ret); +} + + +/* + * Completely shutdown an interface. + */ + +int dev_close(struct device *dev) +{ + int ct=0; + + /* + * Call the device specific close. This cannot fail. + * Only if device is UP + */ + + if ((dev->flags & IFF_UP) && dev->stop) + dev->stop(dev); + + /* + * Device is now down. + */ + + dev->flags&=~(IFF_UP|IFF_RUNNING); + + /* + * Tell people we are going down + */ + notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev); + /* + * Flush the multicast chain + */ + dev_mc_discard(dev); + + /* + * Purge any queued packets when we down the link + */ + while(ct<DEV_NUMBUFFS) + { + struct sk_buff *skb; + while((skb=skb_dequeue(&dev->buffs[ct]))!=NULL) + if(skb->free) + kfree_skb(skb,FREE_WRITE); + ct++; + } + return(0); +} + + +/* + * Device change register/unregister. These are not inline or static + * as we export them to the world. + */ + +int register_netdevice_notifier(struct notifier_block *nb) +{ + return notifier_chain_register(&netdev_chain, nb); +} + +int unregister_netdevice_notifier(struct notifier_block *nb) +{ + return notifier_chain_unregister(&netdev_chain,nb); +} + +/* + * Send (or queue for sending) a packet. + * + * IMPORTANT: When this is called to resend frames. The caller MUST + * already have locked the sk_buff. Apart from that we do the + * rest of the magic. + */ + +static void do_dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri) +{ + unsigned long flags; + struct sk_buff_head *list; + int retransmission = 0; /* used to say if the packet should go */ + /* at the front or the back of the */ + /* queue - front is a retransmit try */ + + if(pri>=0 && !skb_device_locked(skb)) + skb_device_lock(skb); /* Shove a lock on the frame */ +#if CONFIG_SKB_CHECK + IS_SKB(skb); +#endif + skb->dev = dev; + + /* + * Negative priority is used to flag a frame that is being pulled from the + * queue front as a retransmit attempt. It therefore goes back on the queue + * start on a failure. + */ + + if (pri < 0) + { + pri = -pri-1; + retransmission = 1; + } + +#ifdef CONFIG_NET_DEBUG + if (pri >= DEV_NUMBUFFS) + { + printk(KERN_WARNING "bad priority in dev_queue_xmit.\n"); + pri = 1; + } +#endif + + /* + * If the address has not been resolved. Call the device header rebuilder. + * This can cover all protocols and technically not just ARP either. + */ + + if (!skb->arp && dev->rebuild_header(skb->data, dev, skb->raddr, skb)) { + return; + } + + /* + * + * If dev is an alias, switch to its main device. + * "arp" resolution has been made with alias device, so + * arp entries refer to alias, not main. + * + */ + +#ifdef CONFIG_NET_ALIAS + if (net_alias_is(dev)) + skb->dev = dev = net_alias_dev_tx(dev); +#endif + + /* + * If we are bridging and this is directly generated output + * pass the frame via the bridge. + */ + +#ifdef CONFIG_BRIDGE + if(skb->pkt_bridged!=IS_BRIDGED && br_stats.flags & BR_UP) + { + if(br_tx_frame(skb)) + return; + } +#endif + + list = dev->buffs + pri; + + save_flags(flags); + /* if this isn't a retransmission, use the first packet instead... */ + if (!retransmission) { + if (skb_queue_len(list)) { + /* avoid overrunning the device queue.. */ + if (skb_queue_len(list) > dev->tx_queue_len) { + dev_kfree_skb(skb, FREE_WRITE); + return; + } + } + + /* copy outgoing packets to any sniffer packet handlers */ + if (dev_nit) { + struct packet_type *ptype; + skb->stamp=xtime; + for (ptype = ptype_all; ptype!=NULL; ptype = ptype->next) + { + /* Never send packets back to the socket + * they originated from - MvS (miquels@drinkel.ow.org) + */ + if ((ptype->dev == dev || !ptype->dev) && + ((struct sock *)ptype->data != skb->sk)) + { + struct sk_buff *skb2; + if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL) + break; + /* FIXME?: Wrong when the hard_header_len + * is an upper bound. Is this even + * used anywhere? + */ + skb2->h.raw = skb2->data + dev->hard_header_len; + /* On soft header devices we + * yank the header before mac.raw + * back off. This is set by + * dev->hard_header(). + */ + if (dev->flags&IFF_SOFTHEADERS) + skb_pull(skb2,skb2->mac.raw-skb2->data); + skb2->mac.raw = skb2->data; + ptype->func(skb2, skb->dev, ptype); + } + } + } + + if (skb_queue_len(list)) { + cli(); + skb_device_unlock(skb); /* Buffer is on the device queue and can be freed safely */ + __skb_queue_tail(list, skb); + skb = __skb_dequeue(list); + skb_device_lock(skb); /* New buffer needs locking down */ + restore_flags(flags); + } + } + if (dev->hard_start_xmit(skb, dev) == 0) { + /* + * Packet is now solely the responsibility of the driver + */ + return; + } + + /* + * Transmission failed, put skb back into a list. Once on the list it's safe and + * no longer device locked (it can be freed safely from the device queue) + */ + cli(); + skb_device_unlock(skb); + __skb_queue_head(list,skb); + restore_flags(flags); +} + +void dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri) +{ + start_bh_atomic(); + do_dev_queue_xmit(skb, dev, pri); + end_bh_atomic(); +} + +/* + * Receive a packet from a device driver and queue it for the upper + * (protocol) levels. It always succeeds. This is the recommended + * interface to use. + */ + +void netif_rx(struct sk_buff *skb) +{ + static int dropping = 0; + + /* + * Any received buffers are un-owned and should be discarded + * when freed. These will be updated later as the frames get + * owners. + */ + + skb->sk = NULL; + skb->free = 1; + if(skb->stamp.tv_sec==0) + skb->stamp = xtime; + + /* + * Check that we aren't overdoing things. + */ + + if (!backlog_size) + dropping = 0; + else if (backlog_size > 300) + dropping = 1; + + if (dropping) + { + kfree_skb(skb, FREE_READ); + return; + } + + /* + * Add it to the "backlog" queue. + */ +#if CONFIG_SKB_CHECK + IS_SKB(skb); +#endif + skb_queue_tail(&backlog,skb); + backlog_size++; + + /* + * If any packet arrived, mark it for processing after the + * hardware interrupt returns. + */ + + mark_bh(NET_BH); + return; +} + +/* + * This routine causes all interfaces to try to send some data. + */ + +static void dev_transmit(void) +{ + struct device *dev; + + for (dev = dev_base; dev != NULL; dev = dev->next) + { + if (dev->flags != 0 && !dev->tbusy) { + /* + * Kick the device + */ + dev_tint(dev); + } + } +} + + +/********************************************************************************** + + Receive Queue Processor + +***********************************************************************************/ + +/* + * When we are called the queue is ready to grab, the interrupts are + * on and hardware can interrupt and queue to the receive queue as we + * run with no problems. + * This is run as a bottom half after an interrupt handler that does + * mark_bh(NET_BH); + */ + +void net_bh(void) +{ + struct packet_type *ptype; + struct packet_type *pt_prev; + unsigned short type; + + /* + * Can we send anything now? We want to clear the + * decks for any more sends that get done as we + * process the input. This also minimises the + * latency on a transmit interrupt bh. + */ + + dev_transmit(); + + /* + * Any data left to process. This may occur because a + * mark_bh() is done after we empty the queue including + * that from the device which does a mark_bh() just after + */ + + /* + * While the queue is not empty.. + * + * Note that the queue never shrinks due to + * an interrupt, so we can do this test without + * disabling interrupts. + */ + + while (!skb_queue_empty(&backlog)) { + struct sk_buff * skb = backlog.next; + + /* + * We have a packet. Therefore the queue has shrunk + */ + cli(); + __skb_unlink(skb, &backlog); + backlog_size--; + sti(); + + +#ifdef CONFIG_BRIDGE + + /* + * If we are bridging then pass the frame up to the + * bridging code. If it is bridged then move on + */ + + if (br_stats.flags & BR_UP) + { + /* + * We pass the bridge a complete frame. This means + * recovering the MAC header first. + */ + + int offset=skb->data-skb->mac.raw; + cli(); + skb_push(skb,offset); /* Put header back on for bridge */ + if(br_receive_frame(skb)) + { + sti(); + continue; + } + /* + * Pull the MAC header off for the copy going to + * the upper layers. + */ + skb_pull(skb,offset); + sti(); + } +#endif + + /* + * Bump the pointer to the next structure. + * + * On entry to the protocol layer. skb->data and + * skb->h.raw point to the MAC and encapsulated data + */ + + skb->h.raw = skb->data; + + /* + * Fetch the packet protocol ID. + */ + + type = skb->protocol; + + /* + * We got a packet ID. Now loop over the "known protocols" + * list. There are two lists. The ptype_all list of taps (normally empty) + * and the main protocol list which is hashed perfectly for normal protocols. + */ + + pt_prev = NULL; + for (ptype = ptype_all; ptype!=NULL; ptype=ptype->next) + { + if(!ptype->dev || ptype->dev == skb->dev) { + if(pt_prev) { + struct sk_buff *skb2=skb_clone(skb, GFP_ATOMIC); + if(skb2) + pt_prev->func(skb2,skb->dev, pt_prev); + } + pt_prev=ptype; + } + } + + for (ptype = ptype_base[ntohs(type)&15]; ptype != NULL; ptype = ptype->next) + { + if (ptype->type == type && (!ptype->dev || ptype->dev==skb->dev)) + { + /* + * We already have a match queued. Deliver + * to it and then remember the new match + */ + if(pt_prev) + { + struct sk_buff *skb2; + + skb2=skb_clone(skb, GFP_ATOMIC); + + /* + * Kick the protocol handler. This should be fast + * and efficient code. + */ + + if(skb2) + pt_prev->func(skb2, skb->dev, pt_prev); + } + /* Remember the current last to do */ + pt_prev=ptype; + } + } /* End of protocol list loop */ + + /* + * Is there a last item to send to ? + */ + + if(pt_prev) + pt_prev->func(skb, skb->dev, pt_prev); + /* + * Has an unknown packet has been received ? + */ + + else + kfree_skb(skb, FREE_WRITE); + /* + * Again, see if we can transmit anything now. + * [Ought to take this out judging by tests it slows + * us down not speeds us up] + */ +#ifdef XMIT_EVERY + dev_transmit(); +#endif + } /* End of queue loop */ + + /* + * We have emptied the queue + */ + + /* + * One last output flush. + */ + +#ifdef XMIT_AFTER + dev_transmit(); +#endif +} + + +/* + * This routine is called when an device driver (i.e. an + * interface) is ready to transmit a packet. + */ + +void dev_tint(struct device *dev) +{ + int i; + unsigned long flags; + struct sk_buff_head * head; + + /* + * aliases do not transmit (for now :) ) + */ + +#ifdef CONFIG_NET_ALIAS + if (net_alias_is(dev)) return; +#endif + head = dev->buffs; + save_flags(flags); + cli(); + + /* + * Work the queues in priority order + */ + for(i = 0;i < DEV_NUMBUFFS; i++,head++) + { + + while (!skb_queue_empty(head)) { + struct sk_buff *skb; + + skb = head->next; + __skb_unlink(skb, head); + /* + * Stop anyone freeing the buffer while we retransmit it + */ + skb_device_lock(skb); + restore_flags(flags); + /* + * Feed them to the output stage and if it fails + * indicate they re-queue at the front. + */ + do_dev_queue_xmit(skb,dev,-i - 1); + /* + * If we can take no more then stop here. + */ + if (dev->tbusy) + return; + cli(); + } + } + restore_flags(flags); +} + + +/* + * Perform a SIOCGIFCONF call. This structure will change + * size shortly, and there is nothing I can do about it. + * Thus we will need a 'compatibility mode'. + */ + +static int dev_ifconf(char *arg) +{ + struct ifconf ifc; + struct ifreq ifr; + struct device *dev; + char *pos; + int len; + int err; + + /* + * Fetch the caller's info block. + */ + + err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifconf)); + if(err) + return err; + memcpy_fromfs(&ifc, arg, sizeof(struct ifconf)); + len = ifc.ifc_len; + pos = ifc.ifc_buf; + + /* + * We now walk the device list filling each active device + * into the array. + */ + + err=verify_area(VERIFY_WRITE,pos,len); + if(err) + return err; + + /* + * Loop over the interfaces, and write an info block for each. + */ + + for (dev = dev_base; dev != NULL; dev = dev->next) + { + if(!(dev->flags & IFF_UP)) /* Downed devices don't count */ + continue; + /* + * Have we run out of space here ? + */ + + if (len < sizeof(struct ifreq)) + break; + + memset(&ifr, 0, sizeof(struct ifreq)); + strcpy(ifr.ifr_name, dev->name); + (*(struct sockaddr_in *) &ifr.ifr_addr).sin_family = dev->family; + (*(struct sockaddr_in *) &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr; + + + /* + * Write this block to the caller's space. + */ + + memcpy_tofs(pos, &ifr, sizeof(struct ifreq)); + pos += sizeof(struct ifreq); + len -= sizeof(struct ifreq); + } + + /* + * All done. Write the updated control block back to the caller. + */ + + ifc.ifc_len = (pos - ifc.ifc_buf); + ifc.ifc_req = (struct ifreq *) ifc.ifc_buf; + memcpy_tofs(arg, &ifc, sizeof(struct ifconf)); + + /* + * Report how much was filled in + */ + + return(pos - arg); +} + + +/* + * This is invoked by the /proc filesystem handler to display a device + * in detail. + */ + +#ifdef CONFIG_PROC_FS +static int sprintf_stats(char *buffer, struct device *dev) +{ + struct enet_statistics *stats = (dev->get_stats ? dev->get_stats(dev): NULL); + int size; + + if (stats) + size = sprintf(buffer, "%6s:%7d %4d %4d %4d %4d %8d %4d %4d %4d %5d %4d\n", + dev->name, + stats->rx_packets, stats->rx_errors, + stats->rx_dropped + stats->rx_missed_errors, + stats->rx_fifo_errors, + stats->rx_length_errors + stats->rx_over_errors + + stats->rx_crc_errors + stats->rx_frame_errors, + stats->tx_packets, stats->tx_errors, stats->tx_dropped, + stats->tx_fifo_errors, stats->collisions, + stats->tx_carrier_errors + stats->tx_aborted_errors + + stats->tx_window_errors + stats->tx_heartbeat_errors); + else + size = sprintf(buffer, "%6s: No statistics available.\n", dev->name); + + return size; +} + +/* + * Called from the PROCfs module. This now uses the new arbitrary sized /proc/net interface + * to create /proc/net/dev + */ + +int dev_get_info(char *buffer, char **start, off_t offset, int length, int dummy) +{ + int len=0; + off_t begin=0; + off_t pos=0; + int size; + + struct device *dev; + + + size = sprintf(buffer, "Inter-| Receive | Transmit\n" + " face |packets errs drop fifo frame|packets errs drop fifo colls carrier\n"); + + pos+=size; + len+=size; + + + for (dev = dev_base; dev != NULL; dev = dev->next) + { + size = sprintf_stats(buffer+len, dev); + len+=size; + pos=begin+len; + + if(pos<offset) + { + len=0; + begin=pos; + } + if(pos>offset+length) + break; + } + + *start=buffer+(offset-begin); /* Start of wanted data */ + len-=(offset-begin); /* Start slop */ + if(len>length) + len=length; /* Ending slop */ + return len; +} +#endif /* CONFIG_PROC_FS */ + + +#ifdef CONFIG_NET_RADIO +#ifdef CONFIG_PROC_FS + +/* + * Print one entry of /proc/net/wireless + * This is a clone of /proc/net/dev (just above) + */ +static int +sprintf_wireless_stats(char * buffer, + struct device * dev) +{ + /* Get stats from the driver */ + struct iw_statistics *stats = (dev->get_wireless_stats ? + dev->get_wireless_stats(dev) : + (struct iw_statistics *) NULL); + int size; + + if(stats != (struct iw_statistics *) NULL) + size = sprintf(buffer, + "%6s: %02x %3d%c %3d%c %3d%c %5d %5d %5d\n", + dev->name, + stats->status, + stats->qual.qual, + stats->qual.updated & 1 ? '.' : ' ', + stats->qual.level, + stats->qual.updated & 2 ? '.' : ' ', + stats->qual.noise, + stats->qual.updated & 3 ? '.' : ' ', + stats->discard.nwid, + stats->discard.code, + stats->discard.misc); + else + size = 0; + + return size; +} + +/* + * Print info for /proc/net/wireless (print all entries) + * This is a clone of /proc/net/dev (just above) + */ +int +dev_get_wireless_info(char * buffer, + char ** start, + off_t offset, + int length, + int dummy) +{ + int len = 0; + off_t begin = 0; + off_t pos = 0; + int size; + + struct device * dev; + + size = sprintf(buffer, + "Inter-|sta| Quality | Discarded packets\n" + " face |tus|link level noise| nwid crypt misc\n"); + + pos+=size; + len+=size; + + + for(dev = dev_base; dev != NULL; dev = dev->next) + { + size = sprintf_wireless_stats(buffer+len, dev); + len+=size; + pos=begin+len; + + if(pos < offset) + { + len=0; + begin=pos; + } + if(pos > offset + length) + break; + } + + *start = buffer + (offset - begin); /* Start of wanted data */ + len -= (offset - begin); /* Start slop */ + if(len > length) + len = length; /* Ending slop */ + + return len; +} +#endif /* CONFIG_PROC_FS */ +#endif /* CONFIG_NET_RADIO */ + + +/* + * This checks bitmasks for the ioctl calls for devices. + */ + +static inline int bad_mask(unsigned long mask, unsigned long addr) +{ + if (addr & (mask = ~mask)) + return 1; + mask = ntohl(mask); + if (mask & (mask+1)) + return 1; + return 0; +} + +/* + * Perform the SIOCxIFxxx calls. + * + * The socket layer has seen an ioctl the address family thinks is + * for the device. At this point we get invoked to make a decision + */ + +static int dev_ifsioc(void *arg, unsigned int getset) +{ + struct ifreq ifr; + struct device *dev; + int ret; + + /* + * Fetch the caller's info block into kernel space + */ + + int err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifreq)); + if(err) + return err; + + memcpy_fromfs(&ifr, arg, sizeof(struct ifreq)); + + /* + * See which interface the caller is talking about. + */ + + /* + * + * net_alias_dev_get(): dev_get() with added alias naming magic. + * only allow alias creation/deletion if (getset==SIOCSIFADDR) + * + */ + +#ifdef CONFIG_KERNELD + dev_load(ifr.ifr_name); +#endif + +#ifdef CONFIG_NET_ALIAS + if ((dev = net_alias_dev_get(ifr.ifr_name, getset == SIOCSIFADDR, &err, NULL, NULL)) == NULL) + return(err); +#else + if ((dev = dev_get(ifr.ifr_name)) == NULL) + return(-ENODEV); +#endif + switch(getset) + { + case SIOCGIFFLAGS: /* Get interface flags */ + ifr.ifr_flags = (dev->flags & ~IFF_SOFTHEADERS); + goto rarok; + + case SIOCSIFFLAGS: /* Set interface flags */ + { + int old_flags = dev->flags; + + if(securelevel>0) + ifr.ifr_flags&=~IFF_PROMISC; + /* + * We are not allowed to potentially close/unload + * a device until we get this lock. + */ + + dev_lock_wait(); + + /* + * Set the flags on our device. + */ + + dev->flags = (ifr.ifr_flags & ( + IFF_BROADCAST | IFF_DEBUG | IFF_LOOPBACK | + IFF_POINTOPOINT | IFF_NOTRAILERS | IFF_RUNNING | + IFF_NOARP | IFF_PROMISC | IFF_ALLMULTI | IFF_SLAVE | IFF_MASTER + | IFF_MULTICAST)) | (dev->flags & (IFF_SOFTHEADERS|IFF_UP)); + /* + * Load in the correct multicast list now the flags have changed. + */ + + dev_mc_upload(dev); + + /* + * Have we downed the interface. We handle IFF_UP ourselves + * according to user attempts to set it, rather than blindly + * setting it. + */ + + if ((old_flags^ifr.ifr_flags)&IFF_UP) /* Bit is different ? */ + { + if(old_flags&IFF_UP) /* Gone down */ + ret=dev_close(dev); + else /* Come up */ + { + ret=dev_open(dev); + if(ret<0) + dev->flags&=~IFF_UP; /* Open failed */ + } + } + else + ret=0; + /* + * Load in the correct multicast list now the flags have changed. + */ + + dev_mc_upload(dev); + } + break; + + case SIOCGIFADDR: /* Get interface address (and family) */ + if(ifr.ifr_addr.sa_family==AF_UNSPEC) + { + memcpy(ifr.ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN); + ifr.ifr_hwaddr.sa_family=dev->type; + goto rarok; + } + else + { + (*(struct sockaddr_in *) + &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr; + (*(struct sockaddr_in *) + &ifr.ifr_addr).sin_family = dev->family; + (*(struct sockaddr_in *) + &ifr.ifr_addr).sin_port = 0; + } + goto rarok; + + case SIOCSIFADDR: /* Set interface address (and family) */ + + /* + * BSDism. SIOCSIFADDR family=AF_UNSPEC sets the + * physical address. We can cope with this now. + */ + + if(ifr.ifr_addr.sa_family==AF_UNSPEC) + { + if(dev->set_mac_address==NULL) + return -EOPNOTSUPP; + if(securelevel>0) + return -EPERM; + ret=dev->set_mac_address(dev,&ifr.ifr_addr); + } + else + { + u32 new_pa_addr = (*(struct sockaddr_in *) + &ifr.ifr_addr).sin_addr.s_addr; + u16 new_family = ifr.ifr_addr.sa_family; + + if (new_family == dev->family && + new_pa_addr == dev->pa_addr) { + ret =0; + break; + } + if (dev->flags & IFF_UP) + notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev); + + /* + * if dev is an alias, must rehash to update + * address change + */ + +#ifdef CONFIG_NET_ALIAS + if (net_alias_is(dev)) + net_alias_dev_rehash(dev ,&ifr.ifr_addr); +#endif + dev->pa_addr = new_pa_addr; + dev->family = new_family; + +#ifdef CONFIG_INET + /* This is naughty. When net-032e comes out It wants moving into the net032 + code not the kernel. Till then it can sit here (SIGH) */ + if (!dev->pa_mask) + dev->pa_mask = ip_get_mask(dev->pa_addr); +#endif + if (!dev->pa_brdaddr) + dev->pa_brdaddr = dev->pa_addr | ~dev->pa_mask; + if (dev->flags & IFF_UP) + notifier_call_chain(&netdev_chain, NETDEV_UP, dev); + ret = 0; + } + break; + + case SIOCGIFBRDADDR: /* Get the broadcast address */ + (*(struct sockaddr_in *) + &ifr.ifr_broadaddr).sin_addr.s_addr = dev->pa_brdaddr; + (*(struct sockaddr_in *) + &ifr.ifr_broadaddr).sin_family = dev->family; + (*(struct sockaddr_in *) + &ifr.ifr_broadaddr).sin_port = 0; + goto rarok; + + case SIOCSIFBRDADDR: /* Set the broadcast address */ + dev->pa_brdaddr = (*(struct sockaddr_in *) + &ifr.ifr_broadaddr).sin_addr.s_addr; + ret = 0; + break; + + case SIOCGIFDSTADDR: /* Get the destination address (for point-to-point links) */ + (*(struct sockaddr_in *) + &ifr.ifr_dstaddr).sin_addr.s_addr = dev->pa_dstaddr; + (*(struct sockaddr_in *) + &ifr.ifr_dstaddr).sin_family = dev->family; + (*(struct sockaddr_in *) + &ifr.ifr_dstaddr).sin_port = 0; + goto rarok; + + case SIOCSIFDSTADDR: /* Set the destination address (for point-to-point links) */ + dev->pa_dstaddr = (*(struct sockaddr_in *) + &ifr.ifr_dstaddr).sin_addr.s_addr; + ret = 0; + break; + + case SIOCGIFNETMASK: /* Get the netmask for the interface */ + (*(struct sockaddr_in *) + &ifr.ifr_netmask).sin_addr.s_addr = dev->pa_mask; + (*(struct sockaddr_in *) + &ifr.ifr_netmask).sin_family = dev->family; + (*(struct sockaddr_in *) + &ifr.ifr_netmask).sin_port = 0; + goto rarok; + + case SIOCSIFNETMASK: /* Set the netmask for the interface */ + { + unsigned long mask = (*(struct sockaddr_in *) + &ifr.ifr_netmask).sin_addr.s_addr; + ret = -EINVAL; + /* + * The mask we set must be legal. + */ + if (bad_mask(mask,0)) + break; + dev->pa_mask = mask; + ret = 0; + } + break; + + case SIOCGIFMETRIC: /* Get the metric on the interface (currently unused) */ + + ifr.ifr_metric = dev->metric; + goto rarok; + + case SIOCSIFMETRIC: /* Set the metric on the interface (currently unused) */ + dev->metric = ifr.ifr_metric; + ret=0; + break; + + case SIOCGIFMTU: /* Get the MTU of a device */ + ifr.ifr_mtu = dev->mtu; + goto rarok; + + case SIOCSIFMTU: /* Set the MTU of a device */ + + if (dev->change_mtu) + ret = dev->change_mtu(dev, ifr.ifr_mtu); + else + { + /* + * MTU must be positive. + */ + + if(ifr.ifr_mtu<68) + return -EINVAL; + + dev->mtu = ifr.ifr_mtu; + ret = 0; + } + break; + + case SIOCGIFMEM: /* Get the per device memory space. We can add this but currently + do not support it */ + ret = -EINVAL; + break; + + case SIOCSIFMEM: /* Set the per device memory buffer space. Not applicable in our case */ + ret = -EINVAL; + break; + + case SIOCGIFHWADDR: + memcpy(ifr.ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN); + ifr.ifr_hwaddr.sa_family=dev->type; + goto rarok; + + case SIOCSIFHWADDR: + if(dev->set_mac_address==NULL) + return -EOPNOTSUPP; + if(securelevel > 0) + return -EPERM; + if(ifr.ifr_hwaddr.sa_family!=dev->type) + return -EINVAL; + ret=dev->set_mac_address(dev,&ifr.ifr_hwaddr); + break; + + case SIOCGIFMAP: + ifr.ifr_map.mem_start=dev->mem_start; + ifr.ifr_map.mem_end=dev->mem_end; + ifr.ifr_map.base_addr=dev->base_addr; + ifr.ifr_map.irq=dev->irq; + ifr.ifr_map.dma=dev->dma; + ifr.ifr_map.port=dev->if_port; + goto rarok; + + case SIOCSIFMAP: + if(dev->set_config==NULL) + return -EOPNOTSUPP; + return dev->set_config(dev,&ifr.ifr_map); + + case SIOCADDMULTI: + if(dev->set_multicast_list==NULL) + return -EINVAL; + if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC) + return -EINVAL; + dev_mc_add(dev,ifr.ifr_hwaddr.sa_data, dev->addr_len, 1); + return 0; + + case SIOCDELMULTI: + if(dev->set_multicast_list==NULL) + return -EINVAL; + if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC) + return -EINVAL; + dev_mc_delete(dev,ifr.ifr_hwaddr.sa_data,dev->addr_len, 1); + return 0; + /* + * Unknown or private ioctl + */ + + default: + if((getset >= SIOCDEVPRIVATE) && + (getset <= (SIOCDEVPRIVATE + 15))) { + if(dev->do_ioctl==NULL) + return -EOPNOTSUPP; + ret=dev->do_ioctl(dev, &ifr, getset); + memcpy_tofs(arg,&ifr,sizeof(struct ifreq)); + break; + } + +#ifdef CONFIG_NET_RADIO + if((getset >= SIOCIWFIRST) && + (getset <= SIOCIWLAST)) + { + if(dev->do_ioctl==NULL) + return -EOPNOTSUPP; + /* Perform the ioctl */ + ret=dev->do_ioctl(dev, &ifr, getset); + /* If return args... */ + if(IW_IS_GET(getset)) + memcpy_tofs(arg, &ifr, + sizeof(struct ifreq)); + break; + } +#endif /* CONFIG_NET_RADIO */ + + ret = -EINVAL; + } + return(ret); +/* + * The load of calls that return an ifreq and ok (saves memory). + */ +rarok: + memcpy_tofs(arg, &ifr, sizeof(struct ifreq)); + return 0; +} + + +/* + * This function handles all "interface"-type I/O control requests. The actual + * 'doing' part of this is dev_ifsioc above. + */ + +int dev_ioctl(unsigned int cmd, void *arg) +{ + switch(cmd) + { + case SIOCGIFCONF: + (void) dev_ifconf((char *) arg); + return 0; + + /* + * Ioctl calls that can be done by all. + */ + + case SIOCGIFFLAGS: + case SIOCGIFADDR: + case SIOCGIFDSTADDR: + case SIOCGIFBRDADDR: + case SIOCGIFNETMASK: + case SIOCGIFMETRIC: + case SIOCGIFMTU: + case SIOCGIFMEM: + case SIOCGIFHWADDR: + case SIOCGIFSLAVE: + case SIOCGIFMAP: + return dev_ifsioc(arg, cmd); + + /* + * Ioctl calls requiring the power of a superuser + */ + + case SIOCSIFFLAGS: + case SIOCSIFADDR: + case SIOCSIFDSTADDR: + case SIOCSIFBRDADDR: + case SIOCSIFNETMASK: + case SIOCSIFMETRIC: + case SIOCSIFMTU: + case SIOCSIFMEM: + case SIOCSIFHWADDR: + case SIOCSIFMAP: + case SIOCSIFSLAVE: + case SIOCADDMULTI: + case SIOCDELMULTI: + if (!suser()) + return -EPERM; + return dev_ifsioc(arg, cmd); + + case SIOCSIFLINK: + return -EINVAL; + + /* + * Unknown or private ioctl. + */ + + default: + if((cmd >= SIOCDEVPRIVATE) && + (cmd <= (SIOCDEVPRIVATE + 15))) { + return dev_ifsioc(arg, cmd); + } +#ifdef CONFIG_NET_RADIO + if((cmd >= SIOCIWFIRST) && + (cmd <= SIOCIWLAST)) + { + if((IW_IS_SET(cmd)) && (!suser())) + return -EPERM; + return dev_ifsioc(arg, cmd); + } +#endif /* CONFIG_NET_RADIO */ + return -EINVAL; + } +} +#endif /* !MACH */ + + +/* + * Initialize the DEV module. At boot time this walks the device list and + * unhooks any devices that fail to initialise (normally hardware not + * present) and leaves us with a valid list of present and active devices. + * + */ +extern int lance_init(void); +extern int pi_init(void); +extern void sdla_setup(void); +extern int dlci_setup(void); + +int net_dev_init(void) +{ + struct device *dev, **dp; + + /* + * Initialise the packet receive queue. + */ + +#ifndef MACH + skb_queue_head_init(&backlog); +#endif + + /* + * The bridge has to be up before the devices + */ + +#ifdef CONFIG_BRIDGE + br_init(); +#endif + + /* + * This is Very Ugly(tm). + * + * Some devices want to be initialized early.. + */ +#if defined(CONFIG_PI) + pi_init(); +#endif +#if defined(CONFIG_PT) + pt_init(); +#endif +#if defined(CONFIG_DLCI) + dlci_setup(); +#endif +#if defined(CONFIG_SDLA) + sdla_setup(); +#endif + /* + * SLHC if present needs attaching so other people see it + * even if not opened. + */ +#if (defined(CONFIG_SLIP) && defined(CONFIG_SLIP_COMPRESSED)) \ + || defined(CONFIG_PPP) \ + || (defined(CONFIG_ISDN) && defined(CONFIG_ISDN_PPP)) + slhc_install(); +#endif + + /* + * Add the devices. + * If the call to dev->init fails, the dev is removed + * from the chain disconnecting the device until the + * next reboot. + */ + + dp = &dev_base; + while ((dev = *dp) != NULL) + { + int i; + for (i = 0; i < DEV_NUMBUFFS; i++) { + skb_queue_head_init(dev->buffs + i); + } + + if (dev->init && dev->init(dev)) + { + /* + * It failed to come up. Unhook it. + */ + *dp = dev->next; + } + else + { + dp = &dev->next; + } + } + +#ifdef CONFIG_PROC_FS + proc_net_register(&(struct proc_dir_entry) { + PROC_NET_DEV, 3, "dev", + S_IFREG | S_IRUGO, 1, 0, 0, + 0, &proc_net_inode_operations, + dev_get_info + }); +#endif + +#ifdef CONFIG_NET_RADIO +#ifdef CONFIG_PROC_FS + proc_net_register(&(struct proc_dir_entry) { + PROC_NET_WIRELESS, 8, "wireless", + S_IFREG | S_IRUGO, 1, 0, 0, + 0, &proc_net_inode_operations, + dev_get_wireless_info + }); +#endif /* CONFIG_PROC_FS */ +#endif /* CONFIG_NET_RADIO */ + + /* + * Initialise net_alias engine + * + * - register net_alias device notifier + * - register proc entries: /proc/net/alias_types + * /proc/net/aliases + */ + +#ifdef CONFIG_NET_ALIAS + net_alias_init(); +#endif + + init_bh(NET_BH, net_bh); + return 0; +} + +/* + * Change the flags of device DEV to FLAGS. + */ +int dev_change_flags (struct device *dev, short flags) +{ + if (securelevel > 0) + flags &= ~IFF_PROMISC; + + /* + * Set the flags on our device. + */ + + dev->flags = (flags & + (IFF_BROADCAST | IFF_DEBUG | IFF_LOOPBACK | + IFF_POINTOPOINT | IFF_NOTRAILERS | IFF_RUNNING | + IFF_NOARP | IFF_PROMISC | IFF_ALLMULTI | IFF_SLAVE + | IFF_MASTER | IFF_MULTICAST)) + | (dev->flags & (IFF_SOFTHEADERS|IFF_UP)); + + /* The flags are taken into account (multicast, promiscuous, ...) + in the set_multicast_list handler. */ + if ((dev->flags & IFF_UP) && dev->set_multicast_list != NULL) + dev->set_multicast_list (dev); + + return 0; +} + |