aboutsummaryrefslogtreecommitdiff
path: root/i386/i386
diff options
context:
space:
mode:
Diffstat (limited to 'i386/i386')
-rw-r--r--i386/i386/.gitignore1
-rw-r--r--i386/i386/_setjmp.S63
-rw-r--r--i386/i386/apic.c453
-rw-r--r--i386/i386/apic.h337
-rw-r--r--i386/i386/ast.h47
-rw-r--r--i386/i386/ast_check.c56
-rw-r--r--i386/i386/ast_types.h36
-rw-r--r--i386/i386/copy_user.h100
-rw-r--r--i386/i386/cpu.h110
-rw-r--r--i386/i386/cpu_number.h119
-rw-r--r--i386/i386/cpuboot.S245
-rw-r--r--i386/i386/cswitch.S139
-rw-r--r--i386/i386/db_disasm.c1437
-rw-r--r--i386/i386/db_interface.c865
-rw-r--r--i386/i386/db_interface.h149
-rw-r--r--i386/i386/db_machdep.h105
-rw-r--r--i386/i386/db_trace.c586
-rw-r--r--i386/i386/db_trace.h33
-rw-r--r--i386/i386/debug.h73
-rw-r--r--i386/i386/debug_i386.c178
-rw-r--r--i386/i386/debug_trace.S56
-rw-r--r--i386/i386/eflags.h35
-rw-r--r--i386/i386/fpu.c948
-rw-r--r--i386/i386/fpu.h250
-rw-r--r--i386/i386/gdt.c166
-rw-r--r--i386/i386/gdt.h121
-rw-r--r--i386/i386/hardclock.c81
-rw-r--r--i386/i386/hardclock.h28
-rw-r--r--i386/i386/i386asm.sym194
-rw-r--r--i386/i386/idt-gen.h47
-rw-r--r--i386/i386/idt.c87
-rw-r--r--i386/i386/idt_inittab.S140
-rw-r--r--i386/i386/io_perm.c329
-rw-r--r--i386/i386/io_perm.h63
-rw-r--r--i386/i386/ipl.h83
-rw-r--r--i386/i386/irq.c73
-rw-r--r--i386/i386/irq.h31
-rw-r--r--i386/i386/ktss.c92
-rw-r--r--i386/i386/ktss.h33
-rw-r--r--i386/i386/kttd_interface.c574
-rw-r--r--i386/i386/kttd_machdep.h59
-rw-r--r--i386/i386/ldt.c117
-rw-r--r--i386/i386/ldt.h77
-rw-r--r--i386/i386/lock.h132
-rw-r--r--i386/i386/locore.S1603
-rw-r--r--i386/i386/locore.h98
-rw-r--r--i386/i386/loose_ends.c49
-rw-r--r--i386/i386/loose_ends.h33
-rw-r--r--i386/i386/mach_i386.srv27
-rw-r--r--i386/i386/mach_param.h31
-rw-r--r--i386/i386/machine_routines.h38
-rw-r--r--i386/i386/machine_task.c80
-rw-r--r--i386/i386/machspl.h29
-rw-r--r--i386/i386/model_dep.h68
-rw-r--r--i386/i386/mp_desc.c357
-rw-r--r--i386/i386/mp_desc.h98
-rw-r--r--i386/i386/msr.h56
-rw-r--r--i386/i386/pcb.c958
-rw-r--r--i386/i386/pcb.h90
-rw-r--r--i386/i386/percpu.c33
-rw-r--r--i386/i386/percpu.h98
-rw-r--r--i386/i386/phys.c187
-rw-r--r--i386/i386/pic.c262
-rw-r--r--i386/i386/pic.h191
-rw-r--r--i386/i386/pio.h61
-rw-r--r--i386/i386/pit.c140
-rw-r--r--i386/i386/pit.h98
-rw-r--r--i386/i386/pmap.h27
-rw-r--r--i386/i386/proc_reg.h407
-rw-r--r--i386/i386/sched_param.h40
-rw-r--r--i386/i386/seg.h264
-rw-r--r--i386/i386/setjmp.h44
-rw-r--r--i386/i386/smp.c199
-rw-r--r--i386/i386/smp.h34
-rw-r--r--i386/i386/spl.S264
-rw-r--r--i386/i386/spl.h78
-rw-r--r--i386/i386/strings.c96
-rw-r--r--i386/i386/task.h61
-rw-r--r--i386/i386/thread.h276
-rw-r--r--i386/i386/time_stamp.h30
-rw-r--r--i386/i386/trap.c675
-rw-r--r--i386/i386/trap.h71
-rw-r--r--i386/i386/tss.h109
-rw-r--r--i386/i386/user_ldt.c451
-rw-r--r--i386/i386/user_ldt.h50
-rw-r--r--i386/i386/vm_param.h200
-rw-r--r--i386/i386/xen.h412
-rw-r--r--i386/i386/xpr.h32
88 files changed, 17453 insertions, 0 deletions
diff --git a/i386/i386/.gitignore b/i386/i386/.gitignore
new file mode 100644
index 0000000..4520a2a
--- /dev/null
+++ b/i386/i386/.gitignore
@@ -0,0 +1 @@
+/i386asm.h
diff --git a/i386/i386/_setjmp.S b/i386/i386/_setjmp.S
new file mode 100644
index 0000000..efabeb6
--- /dev/null
+++ b/i386/i386/_setjmp.S
@@ -0,0 +1,63 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * C library -- _setjmp, _longjmp
+ *
+ * _longjmp(a,v)
+ * will generate a "return(v)" from
+ * the last call to
+ * _setjmp(a)
+ * by restoring registers from the stack,
+ * The previous signal state is NOT restored.
+ *
+ */
+
+#include <mach/machine/asm.h>
+
+ENTRY(_setjmp)
+ movl 4(%esp),%ecx /* fetch buffer */
+ movl %ebx,0(%ecx)
+ movl %esi,4(%ecx)
+ movl %edi,8(%ecx)
+ movl %ebp,12(%ecx) /* save frame pointer of caller */
+ popl %edx
+ movl %esp,16(%ecx) /* save stack pointer of caller */
+ movl %edx,20(%ecx) /* save pc of caller */
+ xorl %eax,%eax
+ jmp *%edx
+
+ENTRY(_longjmp)
+ movl 8(%esp),%eax /* return(v) */
+ movl 4(%esp),%ecx /* fetch buffer */
+ movl 0(%ecx),%ebx
+ movl 4(%ecx),%esi
+ movl 8(%ecx),%edi
+ movl 12(%ecx),%ebp
+ movl 16(%ecx),%esp
+ orl %eax,%eax
+ jnz 0f
+ incl %eax
+0: jmp *20(%ecx) /* done, return.... */
diff --git a/i386/i386/apic.c b/i386/i386/apic.c
new file mode 100644
index 0000000..0b5be50
--- /dev/null
+++ b/i386/i386/apic.c
@@ -0,0 +1,453 @@
+/* apic.c - APIC controller management for Mach.
+ Copyright (C) 2020 Free Software Foundation, Inc.
+ Written by Almudena Garcia Jurado-Centurion
+
+ This file is part of GNU Mach.
+
+ GNU Mach is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ GNU Mach is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
+
+#include <i386/apic.h>
+#include <i386/cpu.h>
+#include <i386at/idt.h>
+#include <string.h>
+#include <vm/vm_kern.h>
+#include <kern/printf.h>
+#include <kern/kalloc.h>
+
+/*
+ * Period of HPET timer in nanoseconds
+ */
+uint32_t hpet_period_nsec;
+
+/*
+ * This dummy structure is needed so that CPU_NUMBER can be called
+ * before the lapic pointer is initialized to point to the real Local Apic.
+ * It causes the apic_id to be faked as 0, which is the master processor.
+ */
+static ApicLocalUnit dummy_lapic = {0};
+volatile ApicLocalUnit* lapic = &dummy_lapic;
+
+/* This lookup table of [apic_id] -> kernel_id is initially populated with zeroes
+ * so every lookup results in master processor until real kernel ids are populated.
+ */
+int cpu_id_lut[UINT8_MAX + 1] = {0};
+
+ApicInfo apic_data;
+
+/*
+ * apic_data_init: initialize the apic_data structures to preliminary values.
+ * Reserve memory to the lapic list dynamic vector.
+ * Returns 0 if success, -1 if error.
+ */
+int
+apic_data_init(void)
+{
+ apic_data.cpu_lapic_list = NULL;
+ apic_data.ncpus = 0;
+ apic_data.nioapics = 0;
+ apic_data.nirqoverride = 0;
+
+ /* Reserve the vector memory for the maximum number of processors. */
+ apic_data.cpu_lapic_list = (uint16_t*) kalloc(NCPUS*sizeof(uint16_t));
+
+ /* If the memory reserve fails, return -1 to advice about the error. */
+ if (apic_data.cpu_lapic_list == NULL)
+ return -1;
+
+ return 0;
+}
+
+/*
+ * apic_lapic_init: initialize lapic pointer to the memory common address.
+ * Receives as input a pointer to the virtual memory address, previously mapped in a page.
+ */
+void
+apic_lapic_init(ApicLocalUnit* lapic_ptr)
+{
+ lapic = lapic_ptr;
+}
+
+/*
+ * apic_add_cpu: add a new lapic/cpu entry to the cpu_lapic list.
+ * Receives as input the lapic's APIC ID.
+ */
+void
+apic_add_cpu(uint16_t apic_id)
+{
+ apic_data.cpu_lapic_list[apic_data.ncpus] = apic_id;
+ apic_data.ncpus++;
+}
+
+/*
+ * apic_add_ioapic: add a new ioapic entry to the ioapic list.
+ * Receives as input an ioapic_data structure, filled with the IOAPIC entry's data.
+ */
+void
+apic_add_ioapic(IoApicData ioapic)
+{
+ apic_data.ioapic_list[apic_data.nioapics] = ioapic;
+ apic_data.nioapics++;
+}
+
+/*
+ * apic_add_irq_override: add a new IRQ to the irq_override list.
+ * Receives as input an irq_override_data structure, filled with the IRQ entry's data.
+ */
+void
+apic_add_irq_override(IrqOverrideData irq_over)
+{
+ apic_data.irq_override_list[apic_data.nirqoverride] = irq_over;
+ apic_data.nirqoverride++;
+}
+
+IrqOverrideData *
+acpi_get_irq_override(uint8_t pin)
+{
+ int i;
+
+ for (i = 0; i < apic_data.nirqoverride; i++) {
+ if (apic_data.irq_override_list[i].irq == pin) {
+ return &apic_data.irq_override_list[i];
+ }
+ }
+ return NULL;
+}
+
+/*
+ * apic_get_cpu_apic_id: returns the apic_id of a cpu.
+ * Receives as input the kernel ID of a CPU.
+ */
+int
+apic_get_cpu_apic_id(int kernel_id)
+{
+ if (kernel_id >= NCPUS)
+ return -1;
+
+ return apic_data.cpu_lapic_list[kernel_id];
+}
+
+
+/*
+ * apic_get_cpu_kernel_id: returns the kernel_id of a cpu.
+ * Receives as input the APIC ID of a CPU.
+ */
+int
+apic_get_cpu_kernel_id(uint16_t apic_id)
+{
+ return cpu_id_lut[apic_id];
+}
+
+/* apic_get_lapic: returns a reference to the common memory address for Local APIC. */
+volatile ApicLocalUnit*
+apic_get_lapic(void)
+{
+ return lapic;
+}
+
+/*
+ * apic_get_ioapic: returns the IOAPIC identified by its kernel ID.
+ * Receives as input the IOAPIC's Kernel ID.
+ * Returns a ioapic_data structure pointer with the IOAPIC's data.
+ */
+struct IoApicData *
+apic_get_ioapic(int kernel_id)
+{
+ if (kernel_id < MAX_IOAPICS)
+ return &apic_data.ioapic_list[kernel_id];
+ return NULL;
+}
+
+/* apic_get_numcpus: returns the current number of cpus. */
+uint8_t
+apic_get_numcpus(void)
+{
+ return apic_data.ncpus;
+}
+
+/* apic_get_num_ioapics: returns the current number of ioapics. */
+uint8_t
+apic_get_num_ioapics(void)
+{
+ return apic_data.nioapics;
+}
+
+/* apic_get_total_gsis: returns the total number of GSIs in the system. */
+int
+apic_get_total_gsis(void)
+{
+ int id;
+ int gsis = 0;
+
+ for (id = 0; id < apic_get_num_ioapics(); id++)
+ gsis += apic_get_ioapic(id)->ngsis;
+
+ return gsis;
+}
+
+/*
+ * apic_get_current_cpu: returns the apic_id of current cpu.
+ */
+int
+apic_get_current_cpu(void)
+{
+ unsigned int eax, ebx, ecx, edx;
+ eax = 1;
+ ecx = 0;
+ cpuid(eax, ebx, ecx, edx);
+ return (ebx >> 24);
+}
+
+
+/*
+ * apic_refit_cpulist: adjust the size of cpu_lapic array to fit the real number of cpus
+ * instead the maximum number.
+ *
+ * Returns 0 if success, -1 if error.
+ */
+int apic_refit_cpulist(void)
+{
+ uint16_t* old_list = apic_data.cpu_lapic_list;
+ uint16_t* new_list = NULL;
+
+ if (old_list == NULL)
+ return -1;
+
+ new_list = (uint16_t*) kalloc(apic_data.ncpus*sizeof(uint16_t));
+
+ if (new_list == NULL)
+ return -1;
+
+ for (int i = 0; i < apic_data.ncpus; i++)
+ new_list[i] = old_list[i];
+
+ apic_data.cpu_lapic_list = new_list;
+ kfree((vm_offset_t) old_list, NCPUS*sizeof(uint16_t));
+
+ return 0;
+}
+
+/*
+ * apic_generate_cpu_id_lut: Generate lookup table of cpu kernel ids from apic ids
+ */
+void apic_generate_cpu_id_lut(void)
+{
+ int i, apic_id;
+
+ for (i = 0; i < apic_data.ncpus; i++) {
+ apic_id = apic_get_cpu_apic_id(i);
+ if (apic_id >= 0)
+ cpu_id_lut[apic_id] = i;
+ else
+ printf("apic_get_cpu_apic_id(%d) failed...\n", i);
+ }
+}
+
+/*
+ * apic_print_info: shows the list of Local APIC and IOAPIC.
+ * Shows each CPU and IOAPIC, with Its Kernel ID and APIC ID.
+ */
+void apic_print_info(void)
+{
+ int i;
+ int ncpus, nioapics;
+
+ ncpus = apic_get_numcpus();
+ nioapics = apic_get_num_ioapics();
+
+ uint16_t lapic_id;
+ uint16_t ioapic_id;
+
+ IoApicData *ioapic;
+
+ printf("CPUS:\n");
+ for (i = 0; i < ncpus; i++) {
+ lapic_id = apic_get_cpu_apic_id(i);
+ printf(" CPU %d - APIC ID %x - addr=0x%p\n", i, lapic_id, apic_get_lapic());
+ }
+
+ printf("IOAPICS:\n");
+ for (i = 0; i < nioapics; i++) {
+ ioapic = apic_get_ioapic(i);
+ if (!ioapic) {
+ printf("ERROR: invalid IOAPIC ID %x\n", i);
+ } else {
+ ioapic_id = ioapic->apic_id;
+ printf(" IOAPIC %d - APIC ID %x - addr=0x%p\n", i, ioapic_id, ioapic->ioapic);
+ }
+ }
+}
+
+void apic_send_ipi(unsigned dest_shorthand, unsigned deliv_mode, unsigned dest_mode, unsigned level, unsigned trig_mode, unsigned vector, unsigned dest_id)
+{
+ IcrLReg icrl_values;
+ IcrHReg icrh_values;
+
+ /* Keep previous values and only overwrite known fields */
+ icrl_values.r = lapic->icr_low.r;
+ icrh_values.r = lapic->icr_high.r;
+
+ icrl_values.destination_shorthand = dest_shorthand;
+ icrl_values.delivery_mode = deliv_mode;
+ icrl_values.destination_mode = dest_mode;
+ icrl_values.level = level;
+ icrl_values.trigger_mode = trig_mode;
+ icrl_values.vector = vector;
+ icrh_values.destination_field = dest_id;
+
+ lapic->icr_high.r = icrh_values.r;
+ lapic->icr_low.r = icrl_values.r;
+}
+
+void
+lapic_enable(void)
+{
+ lapic->spurious_vector.r |= LAPIC_ENABLE;
+}
+
+void
+lapic_disable(void)
+{
+ lapic->spurious_vector.r &= ~LAPIC_ENABLE;
+}
+
+void
+lapic_setup(void)
+{
+ unsigned long flags;
+ int apic_id;
+ volatile uint32_t dummy;
+
+ cpu_intr_save(&flags);
+
+ apic_id = apic_get_current_cpu();
+
+ dummy = lapic->dest_format.r;
+ lapic->dest_format.r = 0xffffffff; /* flat model */
+ dummy = lapic->logical_dest.r;
+ lapic->logical_dest.r = lapic->apic_id.r; /* target self */
+ dummy = lapic->lvt_lint0.r;
+ lapic->lvt_lint0.r = dummy | LAPIC_DISABLE;
+ dummy = lapic->lvt_lint1.r;
+ lapic->lvt_lint1.r = dummy | LAPIC_DISABLE;
+ dummy = lapic->lvt_performance_monitor.r;
+ lapic->lvt_performance_monitor.r = dummy | LAPIC_DISABLE;
+ if (apic_id != 0)
+ {
+ dummy = lapic->lvt_timer.r;
+ lapic->lvt_timer.r = dummy | LAPIC_DISABLE;
+ }
+ dummy = lapic->task_pri.r;
+ lapic->task_pri.r = 0;
+
+ /* Enable LAPIC to send or recieve IPI/SIPIs */
+ dummy = lapic->spurious_vector.r;
+ lapic->spurious_vector.r = IOAPIC_SPURIOUS_BASE
+ | LAPIC_ENABLE_DIRECTED_EOI;
+
+ lapic->error_status.r = 0;
+
+ cpu_intr_restore(flags);
+}
+
+void
+lapic_eoi(void)
+{
+ lapic->eoi.r = 0;
+}
+
+#define HPET32(x) *((volatile uint32_t *)((uint8_t *)hpet_addr + x))
+#define HPET_CAP_PERIOD 0x04
+#define HPET_CFG 0x10
+# define HPET_CFG_ENABLE (1 << 0)
+# define HPET_LEGACY_ROUTE (1 << 1)
+#define HPET_COUNTER 0xf0
+#define HPET_T0_CFG 0x100
+# define HPET_T0_32BIT_MODE (1 << 8)
+# define HPET_T0_VAL_SET (1 << 6)
+# define HPET_T0_TYPE_PERIODIC (1 << 3)
+# define HPET_T0_INT_ENABLE (1 << 2)
+#define HPET_T0_COMPARATOR 0x108
+
+#define FSEC_PER_NSEC 1000000
+#define NSEC_PER_USEC 1000
+
+/* This function sets up the HPET timer to be in
+ * 32 bit periodic mode and not generating any interrupts.
+ * The timer counts upwards and when it reaches 0xffffffff it
+ * wraps to zero. The timer ticks at a constant rate in nanoseconds which
+ * is stored in hpet_period_nsec variable.
+ */
+void
+hpet_init(void)
+{
+ uint32_t period;
+ uint32_t val;
+
+ assert(hpet_addr != 0);
+
+ /* Find out how often the HPET ticks in nanoseconds */
+ period = HPET32(HPET_CAP_PERIOD);
+ hpet_period_nsec = period / FSEC_PER_NSEC;
+ printf("HPET ticks every %d nanoseconds\n", hpet_period_nsec);
+
+ /* Disable HPET and legacy interrupt routing mode */
+ val = HPET32(HPET_CFG);
+ val = val & ~(HPET_LEGACY_ROUTE | HPET_CFG_ENABLE);
+ HPET32(HPET_CFG) = val;
+
+ /* Clear the counter */
+ HPET32(HPET_COUNTER) = 0;
+
+ /* Set up 32 bit periodic timer with no interrupts */
+ val = HPET32(HPET_T0_CFG);
+ val = (val & ~HPET_T0_INT_ENABLE) | HPET_T0_32BIT_MODE | HPET_T0_TYPE_PERIODIC | HPET_T0_VAL_SET;
+ HPET32(HPET_T0_CFG) = val;
+
+ /* Set comparator to max */
+ HPET32(HPET_T0_COMPARATOR) = 0xffffffff;
+
+ /* Enable the HPET */
+ HPET32(HPET_CFG) |= HPET_CFG_ENABLE;
+
+ printf("HPET enabled\n");
+}
+
+void
+hpet_udelay(uint32_t us)
+{
+ uint32_t start, now;
+ uint32_t max_delay_us = 0xffffffff / NSEC_PER_USEC;
+
+ if (us > max_delay_us) {
+ printf("HPET ERROR: Delay too long, %d usec, truncating to %d usec\n",
+ us, max_delay_us);
+ us = max_delay_us;
+ }
+
+ /* Convert us to HPET ticks */
+ us = (us * NSEC_PER_USEC) / hpet_period_nsec;
+
+ start = HPET32(HPET_COUNTER);
+ do {
+ now = HPET32(HPET_COUNTER);
+ } while (now - start < us);
+}
+
+void
+hpet_mdelay(uint32_t ms)
+{
+ hpet_udelay(ms * 1000);
+}
+
diff --git a/i386/i386/apic.h b/i386/i386/apic.h
new file mode 100644
index 0000000..9eef0d8
--- /dev/null
+++ b/i386/i386/apic.h
@@ -0,0 +1,337 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _IMPS_APIC_
+#define _IMPS_APIC_
+
+#ifndef __ASSEMBLER__
+
+#include <stdint.h>
+
+typedef struct ApicReg {
+ uint32_t r; /* the actual register */
+ uint32_t p[3]; /* pad to the next 128-bit boundary */
+} ApicReg;
+
+typedef struct ApicIoUnit {
+ ApicReg select;
+ ApicReg window;
+ ApicReg unused[2];
+ ApicReg eoi; /* write the vector you wish to EOI to this reg */
+} ApicIoUnit;
+
+struct ioapic_route_entry {
+ uint32_t vector : 8,
+ delvmode : 3, /* 000=fixed 001=lowest 111=ExtInt */
+ destmode : 1, /* 0=physical 1=logical */
+ delvstatus : 1,
+ polarity : 1, /* 0=activehigh 1=activelow */
+ irr : 1,
+ trigger : 1, /* 0=edge 1=level */
+ mask : 1, /* 0=enabled 1=disabled */
+ reserved1 : 15;
+ uint32_t reserved2 : 24,
+ dest : 8;
+} __attribute__ ((packed));
+
+union ioapic_route_entry_union {
+ struct {
+ uint32_t lo;
+ uint32_t hi;
+ };
+ struct ioapic_route_entry both;
+};
+
+
+/* Grateful to trasterlabs for this snippet */
+
+typedef union u_icr_low
+{
+ uint32_t value[4];
+ struct
+ {
+ uint32_t r; // FEE0 0300H - 4 bytes
+ unsigned :32; // FEE0 0304H
+ unsigned :32; // FEE0 0308H
+ unsigned :32; // FEE0 030CH
+ };
+ struct
+ {
+ unsigned vector: 8; /* Vector of interrupt. Lowest 8 bits of routine address */
+ unsigned delivery_mode : 3;
+ unsigned destination_mode: 1;
+ unsigned delivery_status: 1;
+ unsigned :1;
+ unsigned level: 1;
+ unsigned trigger_mode: 1;
+ unsigned remote_read_status: 2; /* Read-only field */
+ unsigned destination_shorthand: 2;
+ unsigned :12;
+ };
+} IcrLReg;
+
+typedef union u_icr_high
+{
+ uint32_t value[4];
+ struct
+ {
+ uint32_t r; // FEE0 0310H - 4 bytes
+ unsigned :32; // FEE0 0314H
+ unsigned :32; // FEE0 0318H
+ unsigned :32; // FEE0 031CH
+ };
+ struct
+ {
+ unsigned :24; // FEE0 0310H - 4 bytes
+ unsigned destination_field :8; /* APIC ID (in physical mode) or MDA (in logical) of destination processor */
+ };
+} IcrHReg;
+
+
+typedef enum e_icr_dest_shorthand
+{
+ NO_SHORTHAND = 0,
+ SELF = 1,
+ ALL_INCLUDING_SELF = 2,
+ ALL_EXCLUDING_SELF = 3
+} icr_dest_shorthand;
+
+typedef enum e_icr_deliv_mode
+{
+ FIXED = 0,
+ LOWEST_PRIORITY = 1,
+ SMI = 2,
+ NMI = 4,
+ INIT = 5,
+ STARTUP = 6,
+} icr_deliv_mode;
+
+typedef enum e_icr_dest_mode
+{
+ PHYSICAL = 0,
+ LOGICAL = 1
+} icr_dest_mode;
+
+typedef enum e_icr_deliv_status
+{
+ IDLE = 0,
+ SEND_PENDING = 1
+} icr_deliv_status;
+
+typedef enum e_icr_level
+{
+ DE_ASSERT = 0,
+ ASSERT = 1
+} icr_level;
+
+typedef enum e_irc_trigger_mode
+{
+ EDGE = 0,
+ LEVEL = 1
+} irc_trigger_mode;
+
+
+typedef struct ApicLocalUnit {
+ ApicReg reserved0; /* 0x000 */
+ ApicReg reserved1; /* 0x010 */
+ ApicReg apic_id; /* 0x020. Hardware ID of current processor */
+ ApicReg version; /* 0x030 */
+ ApicReg reserved4; /* 0x040 */
+ ApicReg reserved5; /* 0x050 */
+ ApicReg reserved6; /* 0x060 */
+ ApicReg reserved7; /* 0x070 */
+ ApicReg task_pri; /* 0x080 */
+ ApicReg arbitration_pri; /* 0x090 */
+ ApicReg processor_pri; /* 0x0a0 */
+ ApicReg eoi; /* 0x0b0 */
+ ApicReg remote; /* 0x0c0 */
+ ApicReg logical_dest; /* 0x0d0 */
+ ApicReg dest_format; /* 0x0e0 */
+ ApicReg spurious_vector; /* 0x0f0 */
+ ApicReg isr[8]; /* 0x100 */
+ ApicReg tmr[8]; /* 0x180 */
+ ApicReg irr[8]; /* 0x200 */
+ ApicReg error_status; /* 0x280 */
+ ApicReg reserved28[6]; /* 0x290 */
+ ApicReg lvt_cmci; /* 0x2f0 */
+ IcrLReg icr_low; /* 0x300. Store the information to send an IPI (Inter-processor Interrupt) */
+ IcrHReg icr_high; /* 0x310. Store the IPI destination */
+ ApicReg lvt_timer; /* 0x320 */
+ ApicReg lvt_thermal; /* 0x330 */
+ ApicReg lvt_performance_monitor; /* 0x340 */
+ ApicReg lvt_lint0; /* 0x350 */
+ ApicReg lvt_lint1; /* 0x360 */
+ ApicReg lvt_error; /* 0x370 */
+ ApicReg init_count; /* 0x380 */
+ ApicReg cur_count; /* 0x390 */
+ ApicReg reserved3a; /* 0x3a0 */
+ ApicReg reserved3b; /* 0x3b0 */
+ ApicReg reserved3c; /* 0x3c0 */
+ ApicReg reserved3d; /* 0x3d0 */
+ ApicReg divider_config; /* 0x3e0 */
+ ApicReg reserved3f; /* 0x3f0 */
+} ApicLocalUnit;
+
+typedef struct IoApicData {
+ uint8_t apic_id;
+ uint8_t ngsis;
+ uint32_t addr;
+ uint32_t gsi_base;
+ ApicIoUnit *ioapic;
+} IoApicData;
+
+#define APIC_IRQ_OVERRIDE_POLARITY_MASK 1
+#define APIC_IRQ_OVERRIDE_ACTIVE_LOW 2
+#define APIC_IRQ_OVERRIDE_TRIGGER_MASK 4
+#define APIC_IRQ_OVERRIDE_LEVEL_TRIGGERED 8
+
+typedef struct IrqOverrideData {
+ uint8_t bus;
+ uint8_t irq;
+ uint32_t gsi;
+ uint16_t flags;
+} IrqOverrideData;
+
+#define MAX_IOAPICS 16
+#define MAX_IRQ_OVERRIDE 24
+
+typedef struct ApicInfo {
+ uint8_t ncpus;
+ uint8_t nioapics;
+ int nirqoverride;
+ uint16_t* cpu_lapic_list;
+ struct IoApicData ioapic_list[MAX_IOAPICS];
+ struct IrqOverrideData irq_override_list[MAX_IRQ_OVERRIDE];
+} ApicInfo;
+
+int apic_data_init(void);
+void apic_add_cpu(uint16_t apic_id);
+void apic_lapic_init(ApicLocalUnit* lapic_ptr);
+void apic_add_ioapic(struct IoApicData);
+void apic_add_irq_override(struct IrqOverrideData irq_over);
+void apic_send_ipi(unsigned dest_shorthand, unsigned deliv_mode, unsigned dest_mode, unsigned level, unsigned trig_mode, unsigned vector, unsigned dest_id);
+IrqOverrideData *acpi_get_irq_override(uint8_t gsi);
+int apic_get_cpu_apic_id(int kernel_id);
+int apic_get_cpu_kernel_id(uint16_t apic_id);
+volatile ApicLocalUnit* apic_get_lapic(void);
+struct IoApicData *apic_get_ioapic(int kernel_id);
+uint8_t apic_get_numcpus(void);
+uint8_t apic_get_num_ioapics(void);
+int apic_get_current_cpu(void);
+void apic_print_info(void);
+int apic_refit_cpulist(void);
+void apic_generate_cpu_id_lut(void);
+int apic_get_total_gsis(void);
+void picdisable(void);
+void lapic_eoi(void);
+void ioapic_irq_eoi(int pin);
+void lapic_setup(void);
+void lapic_disable(void);
+void lapic_enable(void);
+void lapic_enable_timer(void);
+void calibrate_lapic_timer(void);
+void ioapic_toggle(int pin, int mask);
+void ioapic_configure(void);
+
+void hpet_init(void);
+void hpet_udelay(uint32_t us);
+void hpet_mdelay(uint32_t ms);
+
+extern int timer_pin;
+extern void intnull(int unit);
+extern volatile ApicLocalUnit* lapic;
+extern int cpu_id_lut[];
+extern uint32_t *hpet_addr;
+
+#endif
+
+#define APIC_IO_UNIT_ID 0x00
+#define APIC_IO_VERSION 0x01
+# define APIC_IO_VERSION_SHIFT 0
+# define APIC_IO_ENTRIES_SHIFT 16
+#define APIC_IO_REDIR_LOW(int_pin) (0x10+(int_pin)*2)
+#define APIC_IO_REDIR_HIGH(int_pin) (0x11+(int_pin)*2)
+
+#define IMCR_SELECT 0x22
+#define IMCR_DATA 0x23
+#define MODE_IMCR 0x70
+# define IMCR_USE_PIC 0
+# define IMCR_USE_APIC 1
+
+#define LAPIC_LOW_PRIO 0x100
+#define LAPIC_NMI 0x400
+#define LAPIC_EXTINT 0x700
+#define LAPIC_LEVEL_TRIGGERED 0x8000
+
+#define LAPIC_ENABLE 0x100
+#define LAPIC_FOCUS 0x200
+#define LAPIC_ENABLE_DIRECTED_EOI 0x1000
+#define LAPIC_DISABLE 0x10000
+#define LAPIC_TIMER_PERIODIC 0x20000
+#define LAPIC_TIMER_DIVIDE_2 0
+#define LAPIC_TIMER_DIVIDE_4 1
+#define LAPIC_TIMER_DIVIDE_8 2
+#define LAPIC_TIMER_DIVIDE_16 3
+#define LAPIC_TIMER_BASEDIV 0x100000
+#define LAPIC_HAS_DIRECTED_EOI 0x1000000
+
+#define NINTR 64 /* Max 32 GSIs on each of two IOAPICs */
+#define IOAPIC_FIXED 0
+#define IOAPIC_PHYSICAL 0
+#define IOAPIC_LOGICAL 1
+#define IOAPIC_NMI 4
+#define IOAPIC_EXTINT 7
+#define IOAPIC_ACTIVE_HIGH 0
+#define IOAPIC_ACTIVE_LOW 1
+#define IOAPIC_EDGE_TRIGGERED 0
+#define IOAPIC_LEVEL_TRIGGERED 1
+#define IOAPIC_MASK_ENABLED 0
+#define IOAPIC_MASK_DISABLED 1
+
+#define APIC_MSR 0x1b
+#define APIC_MSR_BSP 0x100 /* Processor is a BSP */
+#define APIC_MSR_X2APIC 0x400 /* LAPIC is in x2APIC mode */
+#define APIC_MSR_ENABLE 0x800 /* LAPIC is enabled */
+
+/* Set or clear a bit in a 255-bit APIC mask register.
+ These registers are spread through eight 32-bit registers. */
+#define APIC_SET_MASK_BIT(reg, bit) \
+ ((reg)[(bit) >> 5].r |= 1 << ((bit) & 0x1f))
+#define APIC_CLEAR_MASK_BIT(reg, bit) \
+ ((reg)[(bit) >> 5].r &= ~(1 << ((bit) & 0x1f)))
+
+#ifndef __ASSEMBLER__
+
+#ifdef APIC
+static inline void mask_irq (unsigned int irq_nr) {
+ ioapic_toggle(irq_nr, IOAPIC_MASK_DISABLED);
+}
+
+static inline void unmask_irq (unsigned int irq_nr) {
+ ioapic_toggle(irq_nr, IOAPIC_MASK_ENABLED);
+}
+#endif
+
+#endif /* !__ASSEMBLER__ */
+
+#endif /*_IMPS_APIC_*/
+
diff --git a/i386/i386/ast.h b/i386/i386/ast.h
new file mode 100644
index 0000000..7afaa41
--- /dev/null
+++ b/i386/i386/ast.h
@@ -0,0 +1,47 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_AST_H_
+#define _I386_AST_H_
+
+/*
+ * Machine-dependent AST file for machines with no hardware AST support.
+ *
+ * For the I386, we define AST_I386_FP to handle delayed
+ * floating-point exceptions. The FPU may interrupt on errors
+ * while the user is not running (in kernel or other thread running).
+ */
+
+#define AST_I386_FP 0x80000000
+
+#define MACHINE_AST_PER_THREAD AST_I386_FP
+
+
+/* Chain to the machine-independent header. */
+/* #include_next "ast.h" */
+
+
+#endif /* _I386_AST_H_ */
diff --git a/i386/i386/ast_check.c b/i386/i386/ast_check.c
new file mode 100644
index 0000000..61cd5e8
--- /dev/null
+++ b/i386/i386/ast_check.c
@@ -0,0 +1,56 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#if NCPUS > 1
+
+/*
+ * Handle signalling ASTs on other processors.
+ *
+ * Initial i386 implementation does nothing.
+ */
+
+#include <kern/ast.h>
+#include <kern/processor.h>
+#include <kern/smp.h>
+#include <machine/cpu_number.h>
+#include <machine/apic.h>
+
+/*
+ * Initialize for remote invocation of ast_check.
+ */
+void init_ast_check(const processor_t processor)
+{
+}
+
+/*
+ * Cause remote invocation of ast_check. Caller is at splsched().
+ */
+void cause_ast_check(const processor_t processor)
+{
+ smp_remote_ast(apic_get_cpu_apic_id(processor->slot_num));
+}
+
+#endif /* NCPUS > 1 */
diff --git a/i386/i386/ast_types.h b/i386/i386/ast_types.h
new file mode 100644
index 0000000..89e3182
--- /dev/null
+++ b/i386/i386/ast_types.h
@@ -0,0 +1,36 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_AST_TYPES_H_
+#define _I386_AST_TYPES_H_
+
+/*
+ * Data type for remote ast_check() invocation support. Currently
+ * not implemented. Do this first to avoid include problems.
+ */
+typedef int ast_check_t;
+
+#endif /* _I386_AST_TYPES_H_ */
diff --git a/i386/i386/copy_user.h b/i386/i386/copy_user.h
new file mode 100644
index 0000000..3d1c727
--- /dev/null
+++ b/i386/i386/copy_user.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2023 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef COPY_USER_H
+#define COPY_USER_H
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <machine/locore.h>
+#include <mach/message.h>
+
+/*
+ * The copyin_32to64() and copyout_64to32() routines are meant for data types
+ * that have different size in kernel and user space. They should be independent
+ * of endianness and hopefully can be reused in the future on other archs.
+ * These types are e.g.:
+ * - port names vs port pointers, on a 64-bit kernel
+ * - memory addresses, on a 64-bit kernel and 32-bit user
+ */
+
+static inline int copyin_32to64(const uint32_t *uaddr, uint64_t *kaddr)
+{
+ uint32_t rkaddr;
+ int ret;
+ ret = copyin(uaddr, &rkaddr, sizeof(uint32_t));
+ if (ret)
+ return ret;
+ *kaddr = rkaddr;
+ return 0;
+}
+
+static inline int copyout_64to32(const uint64_t *kaddr, uint32_t *uaddr)
+{
+ uint32_t rkaddr=*kaddr;
+ return copyout(&rkaddr, uaddr, sizeof(uint32_t));
+}
+
+static inline int copyin_address(const rpc_vm_offset_t *uaddr, vm_offset_t *kaddr)
+{
+#ifdef USER32
+ return copyin_32to64(uaddr, kaddr);
+#else /* USER32 */
+ return copyin(uaddr, kaddr, sizeof(*uaddr));
+#endif /* USER32 */
+}
+
+static inline int copyout_address(const vm_offset_t *kaddr, rpc_vm_offset_t *uaddr)
+{
+#ifdef USER32
+ return copyout_64to32(kaddr, uaddr);
+#else /* USER32 */
+ return copyout(kaddr, uaddr, sizeof(*kaddr));
+#endif /* USER32 */
+}
+
+static inline int copyin_port(const mach_port_name_t *uaddr, mach_port_t *kaddr)
+{
+#ifdef __x86_64__
+ return copyin_32to64(uaddr, kaddr);
+#else /* __x86_64__ */
+ return copyin(uaddr, kaddr, sizeof(*uaddr));
+#endif /* __x86_64__ */
+}
+
+static inline int copyout_port(const mach_port_t *kaddr, mach_port_name_t *uaddr)
+{
+#ifdef __x86_64__
+ return copyout_64to32(kaddr, uaddr);
+#else /* __x86_64__ */
+ return copyout(kaddr, uaddr, sizeof(*kaddr));
+#endif /* __x86_64__ */
+}
+
+#if defined(__x86_64__) && defined(USER32)
+/* For 32 bit userland, kernel and user land messages are not the same size. */
+size_t msg_usize(const mach_msg_header_t *kmsg);
+#else
+static inline size_t msg_usize(const mach_msg_header_t *kmsg)
+{
+ return kmsg->msgh_size;
+}
+#endif /* __x86_64__ && USER32 */
+
+#endif /* COPY_USER_H */
diff --git a/i386/i386/cpu.h b/i386/i386/cpu.h
new file mode 100644
index 0000000..1bf40dc
--- /dev/null
+++ b/i386/i386/cpu.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2010-2014 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _X86_CPU_H
+#define _X86_CPU_H
+
+#include <kern/macros.h>
+
+/*
+ * EFLAGS register flags.
+ */
+#define CPU_EFL_ONE 0x00000002
+#define CPU_EFL_IF 0x00000200
+
+/*
+ * Return the content of the EFLAGS register.
+ *
+ * Implies a compiler barrier.
+ */
+static __always_inline unsigned long
+cpu_get_eflags(void)
+{
+ unsigned long eflags;
+
+ asm volatile("pushf\n"
+ "pop %0\n"
+ : "=r" (eflags)
+ : : "memory");
+
+ return eflags;
+}
+
+/*
+ * Enable local interrupts.
+ *
+ * Implies a compiler barrier.
+ */
+static __always_inline void
+cpu_intr_enable(void)
+{
+ asm volatile("sti" : : : "memory");
+}
+
+/*
+ * Disable local interrupts.
+ *
+ * Implies a compiler barrier.
+ */
+static __always_inline void
+cpu_intr_disable(void)
+{
+ asm volatile("cli" : : : "memory");
+}
+
+/*
+ * Restore the content of the EFLAGS register, possibly enabling interrupts.
+ *
+ * Implies a compiler barrier.
+ */
+static __always_inline void
+cpu_intr_restore(unsigned long flags)
+{
+ asm volatile("push %0\n"
+ "popf\n"
+ : : "r" (flags)
+ : "memory");
+}
+
+/*
+ * Disable local interrupts, returning the previous content of the EFLAGS
+ * register.
+ *
+ * Implies a compiler barrier.
+ */
+static __always_inline void
+cpu_intr_save(unsigned long *flags)
+{
+ *flags = cpu_get_eflags();
+ cpu_intr_disable();
+}
+
+/*
+ * Return true if interrupts are enabled.
+ *
+ * Implies a compiler barrier.
+ */
+static __always_inline int
+cpu_intr_enabled(void)
+{
+ unsigned long eflags;
+
+ eflags = cpu_get_eflags();
+ return (eflags & CPU_EFL_IF) ? 1 : 0;
+}
+
+#endif /* _X86_CPU_H */
diff --git a/i386/i386/cpu_number.h b/i386/i386/cpu_number.h
new file mode 100644
index 0000000..67c19e9
--- /dev/null
+++ b/i386/i386/cpu_number.h
@@ -0,0 +1,119 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Machine-dependent definitions for cpu identification.
+ *
+ */
+#ifndef _I386_CPU_NUMBER_H_
+#define _I386_CPU_NUMBER_H_
+
+#if NCPUS > 1
+
+#define MY(stm) %gs:PERCPU_##stm
+
+#ifdef __i386__
+#define CX(addr, reg) addr(,reg,4)
+#endif
+#ifdef __x86_64__
+#define CX(addr, reg) addr(,reg,8)
+#endif
+
+#define CPU_NUMBER_NO_STACK(reg) \
+ movl %cs:lapic, reg ;\
+ movl %cs:APIC_ID(reg), reg ;\
+ shrl $24, reg ;\
+ movl %cs:CX(cpu_id_lut, reg), reg ;\
+
+#ifdef __i386__
+/* Never call CPU_NUMBER_NO_GS(%esi) */
+#define CPU_NUMBER_NO_GS(reg) \
+ pushl %esi ;\
+ pushl %eax ;\
+ pushl %ebx ;\
+ pushl %ecx ;\
+ pushl %edx ;\
+ movl $1, %eax ;\
+ cpuid ;\
+ shrl $24, %ebx ;\
+ movl %cs:CX(cpu_id_lut, %ebx), %esi ;\
+ popl %edx ;\
+ popl %ecx ;\
+ popl %ebx ;\
+ popl %eax ;\
+ movl %esi, reg ;\
+ popl %esi
+#endif
+#ifdef __x86_64__
+/* Never call CPU_NUMBER_NO_GS(%esi) */
+#define CPU_NUMBER_NO_GS(reg) \
+ pushq %rsi ;\
+ pushq %rax ;\
+ pushq %rbx ;\
+ pushq %rcx ;\
+ pushq %rdx ;\
+ movl $1, %eax ;\
+ cpuid ;\
+ shrl $24, %ebx ;\
+ movl %cs:CX(cpu_id_lut, %ebx), %esi ;\
+ popq %rdx ;\
+ popq %rcx ;\
+ popq %rbx ;\
+ popq %rax ;\
+ movl %esi, reg ;\
+ popq %rsi
+#endif
+
+#define CPU_NUMBER(reg) \
+ movl MY(CPU_ID), reg;
+
+#ifndef __ASSEMBLER__
+#include <kern/cpu_number.h>
+#include <i386/apic.h>
+#include <i386/percpu.h>
+
+static inline int cpu_number_slow(void)
+{
+ return cpu_id_lut[apic_get_current_cpu()];
+}
+
+static inline int cpu_number(void)
+{
+ return percpu_get(int, cpu_id);
+}
+#endif
+
+#else /* NCPUS == 1 */
+
+#define MY(stm) (percpu_array + PERCPU_##stm)
+
+#define CPU_NUMBER_NO_STACK(reg)
+#define CPU_NUMBER_NO_GS(reg)
+#define CPU_NUMBER(reg)
+#define CX(addr,reg) addr
+
+#endif /* NCPUS == 1 */
+
+#endif /* _I386_CPU_NUMBER_H_ */
diff --git a/i386/i386/cpuboot.S b/i386/i386/cpuboot.S
new file mode 100644
index 0000000..7e6c477
--- /dev/null
+++ b/i386/i386/cpuboot.S
@@ -0,0 +1,245 @@
+/*
+ * Copyright (c) 2022 Free Software Foundation, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#if NCPUS > 1
+#include <mach/machine/asm.h>
+#include <i386/i386asm.h>
+#include <i386/proc_reg.h>
+#include <i386/apic.h>
+#include <i386/cpu_number.h>
+#include <i386/seg.h>
+#include <i386/gdt.h>
+
+#define M(addr) (addr - apboot)
+#define CR0_CLEAR_FLAGS_CACHE_ENABLE (CR0_CD | CR0_NW)
+#define CR0_SET_FLAGS (CR0_CLEAR_FLAGS_CACHE_ENABLE | CR0_PE)
+#define CR0_CLEAR_FLAGS (CR0_PG | CR0_AM | CR0_WP | CR0_NE | CR0_TS | CR0_EM | CR0_MP)
+#define BOOT_CS 0x8
+#define BOOT_DS 0x10
+
+.data
+
+.align 16
+apboot_idt_ptr:
+ .long 0
+.align 16
+ .word 0
+apboot_gdt_descr:
+ .word 14*8-1
+ .long apboot_gdt - KERNELBASE
+.align 16
+apboot_gdt:
+ /* NULL segment = 0x0 */
+ .quad 0
+
+ /* KERNEL_CS = 0x8 */
+ .word 0xffff /* Segment limit first 0-15 bits*/
+ .word (-KERNELBASE) & 0xffff /*Base first 0-15 bits*/
+ .byte ((-KERNELBASE) >> 16) & 0xff /*Base 16-23 bits */
+ .byte ACC_PL_K | ACC_CODE_R | ACC_P /*Access byte */
+ .byte ((SZ_32 | SZ_G) << 4) | 0xf /* High 4 bits */
+ .byte ((-KERNELBASE) >> 24) & 0xff /*Base 24-31 bits */
+
+ /* KERNEL_DS = 0x10 */
+ .word 0xffff /*Segment limit */
+ .word (-KERNELBASE) & 0xffff /*Base first 0-15 bits*/
+ .byte ((-KERNELBASE) >> 16) & 0xff
+ .byte ACC_PL_K | ACC_DATA_W | ACC_P /*Access byte*/
+ .byte ((SZ_32 | SZ_G) << 4) | 0xf /* High 4 bits */
+ .byte ((-KERNELBASE) >> 24) & 0xff /*Base 24-31 bits */
+
+ /* LDT = 0x18 */
+ .quad 0
+
+ /* TSS = 0x20 */
+ .quad 0
+
+ /* USER_LDT = 0x28 */
+ .quad 0
+
+ /* USER_TSS = 0x30 */
+ .quad 0
+
+ /* LINEAR = 0x38 */
+ .quad 0
+
+ /* FPREGS = 0x40 */
+ .quad 0
+
+ /* USER_GDT = 0x48 and 0x50 */
+ .quad 0
+ .quad 0
+
+ /* USER_TSS64 = 0x58 */
+ .quad 0
+
+ /* USER_TSS64 = 0x60 */
+ .quad 0
+
+ /* boot GS = 0x68 */
+ .word 0xffff
+apboot_percpu_low:
+ .word 0
+apboot_percpu_med:
+ .byte 0
+ .byte ACC_PL_K | ACC_DATA_W | ACC_P
+ .byte ((SZ_32 | SZ_G) << 4) | 0xf
+apboot_percpu_high:
+ .byte 0
+
+.globl apboot, apbootend, gdt_descr_tmp, apboot_jmp_offset
+.align 16
+.code16
+
+apboot:
+_apboot:
+ /* This is now address CS:0 in real mode */
+
+ /* Set data seg same as code seg */
+ mov %cs, %dx
+ mov %dx, %ds
+
+ cli
+ xorl %eax, %eax
+ movl %eax, %cr3
+
+ mov %ax, %es
+ mov %ax, %fs
+ mov %ax, %gs
+ mov %ax, %ss
+
+ lgdt M(gdt_descr_tmp)
+
+ movl %cr0, %eax
+ andl $~CR0_CLEAR_FLAGS, %eax
+ orl $CR0_SET_FLAGS, %eax
+ movl %eax, %cr0
+
+ /* ljmpl with relocation from machine_init */
+ .byte 0x66
+ .byte 0xea
+apboot_jmp_offset:
+ .long M(0f)
+ .word BOOT_CS
+
+0:
+ .code32
+ /* Protected mode! */
+ movw $BOOT_DS, %ax
+ movw %ax, %ds
+ movw %ax, %es
+ movw %ax, %ss
+
+ lgdtl apboot_gdt_descr - KERNELBASE
+ ljmpl $KERNEL_CS, $1f
+1:
+ xorl %eax, %eax
+ movw %ax, %ds
+ movw %ax, %es
+ movw %ax, %fs
+ movw %ax, %gs
+ movw $KERNEL_DS, %ax
+ movw %ax, %ds
+ movw %ax, %es
+ movw %ax, %fs
+ movw %ax, %gs
+ movw %ax, %ss
+
+ /* Get CPU number */
+ movl $1, %eax
+ cpuid
+ shrl $24, %ebx
+ movl %cs:CX(cpu_id_lut, %ebx), %ecx
+
+ /* Access per_cpu area */
+ movl %ecx,%eax
+ movl $PC_SIZE,%ebx
+ mul %ebx
+ addl $percpu_array - KERNELBASE, %eax
+
+ /* Record our cpu number */
+ movl %ecx, (PERCPU_CPU_ID + KERNELBASE)(%eax)
+
+ /* Set up temporary percpu descriptor */
+ movw %ax, apboot_percpu_low
+ shr $16, %eax
+ movb %al, apboot_percpu_med
+ shr $8, %ax
+ movb %al, apboot_percpu_high
+
+ movw $PERCPU_DS, %ax
+ movw %ax, %gs
+
+ /* Load null Interrupt descriptor table */
+ mov apboot_idt_ptr, %ebx
+ lidt (%ebx)
+
+ /* Enable local apic in xAPIC mode */
+ xorl %eax, %eax
+ xorl %edx, %edx
+ movl $APIC_MSR, %ecx
+ rdmsr
+ orl $APIC_MSR_ENABLE, %eax
+ andl $(~(APIC_MSR_BSP | APIC_MSR_X2APIC)), %eax
+ movl $APIC_MSR, %ecx
+ wrmsr
+
+ /* Load int_stack_top[cpu] -> esp */
+ CPU_NUMBER_NO_STACK(%edx)
+ movl CX(EXT(int_stack_top), %edx), %esp
+
+ /* Ensure stack alignment */
+ andl $0xfffffff0, %esp
+
+ /* Reset EFLAGS to a known state */
+ pushl $0
+ popfl
+
+ /* Finish the cpu configuration */
+ call EXT(cpu_ap_main)
+
+ /* NOT REACHED */
+ hlt
+
+.align 16
+ .word 0
+gdt_descr_tmp:
+ .short 3*8-1
+ .long M(gdt_tmp)
+
+.align 16
+gdt_tmp:
+ /* 0 */
+ .quad 0
+ /* BOOT_CS */
+ .word 0xffff
+ .word 0x0000
+ .byte 0x00
+ .byte ACC_PL_K | ACC_CODE_R | ACC_P
+ .byte ((SZ_32 | SZ_G) << 4) | 0xf
+ .byte 0x00
+ /* BOOT_DS */
+ .word 0xffff
+ .word 0x0000
+ .byte 0x00
+ .byte ACC_PL_K | ACC_DATA_W | ACC_P
+ .byte ((SZ_32 | SZ_G) << 4) | 0xf
+ .byte 0x00
+
+_apbootend:
+apbootend:
+#endif
diff --git a/i386/i386/cswitch.S b/i386/i386/cswitch.S
new file mode 100644
index 0000000..2dee309
--- /dev/null
+++ b/i386/i386/cswitch.S
@@ -0,0 +1,139 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <mach/machine/asm.h>
+
+#include <i386/proc_reg.h>
+#include <i386/i386asm.h>
+#include <i386/cpu_number.h>
+#include <i386/gdt.h>
+
+/*
+ * Context switch routines for i386.
+ */
+
+ENTRY(Load_context)
+ movl S_ARG0,%ecx /* get thread */
+ movl TH_KERNEL_STACK(%ecx),%ecx /* get kernel stack */
+ lea KERNEL_STACK_SIZE-IKS_SIZE-IEL_SIZE(%ecx),%edx
+ /* point to stack top */
+ CPU_NUMBER(%eax)
+ movl %ecx,MY(ACTIVE_STACK) /* store stack address */
+ movl %edx,CX(EXT(kernel_stack),%eax) /* store stack top */
+
+ movl KSS_ESP(%ecx),%esp /* switch stacks */
+ movl KSS_ESI(%ecx),%esi /* restore registers */
+ movl KSS_EDI(%ecx),%edi
+ movl KSS_EBP(%ecx),%ebp
+ movl KSS_EBX(%ecx),%ebx
+ xorl %eax,%eax /* return zero (no old thread) */
+ jmp *KSS_EIP(%ecx) /* resume thread */
+
+/*
+ * This really only has to save registers
+ * when there is no explicit continuation.
+ */
+
+ENTRY(Switch_context)
+ movl MY(ACTIVE_STACK),%ecx /* get old kernel stack */
+
+ movl %ebx,KSS_EBX(%ecx) /* save registers */
+ movl %ebp,KSS_EBP(%ecx)
+ movl %edi,KSS_EDI(%ecx)
+ movl %esi,KSS_ESI(%ecx)
+ popl KSS_EIP(%ecx) /* save return PC */
+ movl %esp,KSS_ESP(%ecx) /* save SP */
+
+ movl 0(%esp),%eax /* get old thread */
+ movl %ecx,TH_KERNEL_STACK(%eax) /* save old stack */
+ movl 4(%esp),%ebx /* get continuation */
+ movl %ebx,TH_SWAP_FUNC(%eax) /* save continuation */
+
+ movl 8(%esp),%esi /* get new thread */
+
+ movl TH_KERNEL_STACK(%esi),%ecx /* get its kernel stack */
+ lea KERNEL_STACK_SIZE-IKS_SIZE-IEL_SIZE(%ecx),%ebx
+ /* point to stack top */
+
+ CPU_NUMBER(%edx)
+ movl %esi,MY(ACTIVE_THREAD) /* new thread is active */
+ movl %ecx,MY(ACTIVE_STACK) /* set current stack */
+ movl %ebx,CX(EXT(kernel_stack),%edx) /* set stack top */
+
+ movl KSS_ESP(%ecx),%esp /* switch stacks */
+ movl KSS_ESI(%ecx),%esi /* restore registers */
+ movl KSS_EDI(%ecx),%edi
+ movl KSS_EBP(%ecx),%ebp
+ movl KSS_EBX(%ecx),%ebx
+ jmp *KSS_EIP(%ecx) /* return old thread */
+
+ENTRY(Thread_continue)
+ pushl %eax /* push the thread argument */
+ xorl %ebp,%ebp /* zero frame pointer */
+ call *%ebx /* call real continuation */
+
+#if NCPUS > 1
+/*
+ * void switch_to_shutdown_context(thread_t thread,
+ * void (*routine)(processor_t),
+ * processor_t processor)
+ *
+ * saves the kernel context of the thread,
+ * switches to the interrupt stack,
+ * continues the thread (with thread_continue),
+ * then runs routine on the interrupt stack.
+ *
+ * Assumes that the thread is a kernel thread (thus
+ * has no FPU state)
+ */
+ENTRY(switch_to_shutdown_context)
+ movl MY(ACTIVE_STACK),%ecx /* get old kernel stack */
+ movl %ebx,KSS_EBX(%ecx) /* save registers */
+ movl %ebp,KSS_EBP(%ecx)
+ movl %edi,KSS_EDI(%ecx)
+ movl %esi,KSS_ESI(%ecx)
+ popl KSS_EIP(%ecx) /* save return PC */
+ movl %esp,KSS_ESP(%ecx) /* save SP */
+
+ movl 0(%esp),%eax /* get old thread */
+ movl %ecx,TH_KERNEL_STACK(%eax) /* save old stack */
+ movl $0,TH_SWAP_FUNC(%eax) /* clear continuation */
+ movl 4(%esp),%ebx /* get routine to run next */
+ movl 8(%esp),%esi /* get its argument */
+
+ CPU_NUMBER(%edx)
+ movl CX(EXT(int_stack_base),%edx),%ecx /* point to its interrupt stack */
+ lea -4+INTSTACK_SIZE(%ecx),%esp /* switch to it (top) */
+
+ pushl %eax /* push thread */
+ call EXT(thread_dispatch) /* reschedule thread */
+ addl $4,%esp /* clean stack */
+
+ pushl %esi /* push argument */
+ call *%ebx /* call routine to run */
+ hlt /* (should never return) */
+
+#endif /* NCPUS > 1 */
diff --git a/i386/i386/db_disasm.c b/i386/i386/db_disasm.c
new file mode 100644
index 0000000..303b462
--- /dev/null
+++ b/i386/i386/db_disasm.c
@@ -0,0 +1,1437 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1994,1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#if MACH_KDB
+
+/*
+ * Instruction disassembler.
+ */
+#include <mach/boolean.h>
+#include <machine/db_machdep.h>
+
+#include <ddb/db_access.h>
+#include <ddb/db_examine.h>
+#include <ddb/db_output.h>
+#include <ddb/db_sym.h>
+
+#include <kern/task.h>
+
+/*
+ * Switch to disassemble 16-bit code.
+ */
+boolean_t db_disasm_16 = FALSE;
+
+/*
+ * Size attributes
+ */
+#define BYTE 0
+#define WORD 1
+#define LONG 2
+#define QUAD 3
+#define SNGL 4
+#define DBLR 5
+#define EXTR 6
+#define SDEP 7
+#define NONE 8
+
+/*
+ * Addressing modes
+ */
+#define E 1 /* general effective address */
+#define Eind 2 /* indirect address (jump, call) */
+#define El 3 /* address, long size */
+#define Ew 4 /* address, word size */
+#define Eb 5 /* address, byte size */
+#define R 6 /* register, in 'reg' field */
+#define Rw 7 /* word register, in 'reg' field */
+#define Ri 8 /* register in instruction */
+#define S 9 /* segment reg, in 'reg' field */
+#define Si 10 /* segment reg, in instruction */
+#define A 11 /* accumulator */
+#define BX 12 /* (bx) */
+#define CL 13 /* cl, for shifts */
+#define DX 14 /* dx, for IO */
+#define SI 15 /* si */
+#define DI 16 /* di */
+#define CR 17 /* control register */
+#define DR 18 /* debug register */
+#define TR 19 /* test register */
+#define I 20 /* immediate, unsigned */
+#define Is 21 /* immediate, signed */
+#define Ib 22 /* byte immediate, unsigned */
+#define Ibs 23 /* byte immediate, signed */
+#define Iw 24 /* word immediate, unsigned */
+#define Il 25 /* long immediate */
+#define O 26 /* direct address */
+#define Db 27 /* byte displacement from EIP */
+#define Dl 28 /* long displacement from EIP */
+#define o1 29 /* constant 1 */
+#define o3 30 /* constant 3 */
+#define OS 31 /* immediate offset/segment */
+#define ST 32 /* FP stack top */
+#define STI 33 /* FP stack */
+#define X 34 /* extended FP op */
+#define XA 35 /* for 'fstcw %ax' */
+#define Iba 36 /* byte immediate, don't print if 0xa */
+
+struct inst {
+ char * i_name; /* name */
+ short i_has_modrm; /* has regmodrm byte */
+ short i_size; /* operand size */
+ int i_mode; /* addressing modes */
+ char * i_extra; /* pointer to extra opcode table */
+};
+
+#define op1(x) (x)
+#define op2(x,y) ((x)|((y)<<8))
+#define op3(x,y,z) ((x)|((y)<<8)|((z)<<16))
+
+struct finst {
+ char * f_name; /* name for memory instruction */
+ int f_size; /* size for memory instruction */
+ int f_rrmode; /* mode for rr instruction */
+ char * f_rrname; /* name for rr instruction
+ (or pointer to table) */
+};
+
+char * db_Grp6[] = {
+ "sldt",
+ "str",
+ "lldt",
+ "ltr",
+ "verr",
+ "verw",
+ "",
+ ""
+};
+
+char * db_Grp7[] = {
+ "sgdt",
+ "sidt",
+ "lgdt",
+ "lidt",
+ "smsw",
+ "",
+ "lmsw",
+ "invlpg"
+};
+
+char * db_Grp8[] = {
+ "",
+ "",
+ "",
+ "",
+ "bt",
+ "bts",
+ "btr",
+ "btc"
+};
+
+struct inst db_inst_0f0x[] = {
+/*00*/ { "", TRUE, NONE, op1(Ew), (char *)db_Grp6 },
+/*01*/ { "", TRUE, NONE, op1(Ew), (char *)db_Grp7 },
+/*02*/ { "lar", TRUE, LONG, op2(E,R), 0 },
+/*03*/ { "lsl", TRUE, LONG, op2(E,R), 0 },
+/*04*/ { "", FALSE, NONE, 0, 0 },
+/*05*/ { "", FALSE, NONE, 0, 0 },
+/*06*/ { "clts", FALSE, NONE, 0, 0 },
+/*07*/ { "", FALSE, NONE, 0, 0 },
+
+/*08*/ { "invd", FALSE, NONE, 0, 0 },
+/*09*/ { "wbinvd",FALSE, NONE, 0, 0 },
+/*0a*/ { "", FALSE, NONE, 0, 0 },
+/*0b*/ { "ud2", FALSE, NONE, 0, 0 },
+/*0c*/ { "", FALSE, NONE, 0, 0 },
+/*0d*/ { "", FALSE, NONE, 0, 0 },
+/*0e*/ { "", FALSE, NONE, 0, 0 },
+/*0f*/ { "", FALSE, NONE, 0, 0 },
+};
+
+struct inst db_inst_0f2x[] = {
+/*20*/ { "mov", TRUE, LONG, op2(CR,El), 0 }, /* use El for reg */
+/*21*/ { "mov", TRUE, LONG, op2(DR,El), 0 }, /* since mod == 11 */
+/*22*/ { "mov", TRUE, LONG, op2(El,CR), 0 },
+/*23*/ { "mov", TRUE, LONG, op2(El,DR), 0 },
+/*24*/ { "mov", TRUE, LONG, op2(TR,El), 0 },
+/*25*/ { "", FALSE, NONE, 0, 0 },
+/*26*/ { "mov", TRUE, LONG, op2(El,TR), 0 },
+/*27*/ { "", FALSE, NONE, 0, 0 },
+
+/*28*/ { "", FALSE, NONE, 0, 0 },
+/*29*/ { "", FALSE, NONE, 0, 0 },
+/*2a*/ { "", FALSE, NONE, 0, 0 },
+/*2b*/ { "", FALSE, NONE, 0, 0 },
+/*2c*/ { "", FALSE, NONE, 0, 0 },
+/*2d*/ { "", FALSE, NONE, 0, 0 },
+/*2e*/ { "", FALSE, NONE, 0, 0 },
+/*2f*/ { "", FALSE, NONE, 0, 0 },
+};
+
+struct inst db_inst_0f8x[] = {
+/*80*/ { "jo", FALSE, NONE, op1(Dl), 0 },
+/*81*/ { "jno", FALSE, NONE, op1(Dl), 0 },
+/*82*/ { "jb", FALSE, NONE, op1(Dl), 0 },
+/*83*/ { "jnb", FALSE, NONE, op1(Dl), 0 },
+/*84*/ { "jz", FALSE, NONE, op1(Dl), 0 },
+/*85*/ { "jnz", FALSE, NONE, op1(Dl), 0 },
+/*86*/ { "jbe", FALSE, NONE, op1(Dl), 0 },
+/*87*/ { "jnbe", FALSE, NONE, op1(Dl), 0 },
+
+/*88*/ { "js", FALSE, NONE, op1(Dl), 0 },
+/*89*/ { "jns", FALSE, NONE, op1(Dl), 0 },
+/*8a*/ { "jp", FALSE, NONE, op1(Dl), 0 },
+/*8b*/ { "jnp", FALSE, NONE, op1(Dl), 0 },
+/*8c*/ { "jl", FALSE, NONE, op1(Dl), 0 },
+/*8d*/ { "jnl", FALSE, NONE, op1(Dl), 0 },
+/*8e*/ { "jle", FALSE, NONE, op1(Dl), 0 },
+/*8f*/ { "jnle", FALSE, NONE, op1(Dl), 0 },
+};
+
+struct inst db_inst_0f9x[] = {
+/*90*/ { "seto", TRUE, NONE, op1(Eb), 0 },
+/*91*/ { "setno", TRUE, NONE, op1(Eb), 0 },
+/*92*/ { "setb", TRUE, NONE, op1(Eb), 0 },
+/*93*/ { "setnb", TRUE, NONE, op1(Eb), 0 },
+/*94*/ { "setz", TRUE, NONE, op1(Eb), 0 },
+/*95*/ { "setnz", TRUE, NONE, op1(Eb), 0 },
+/*96*/ { "setbe", TRUE, NONE, op1(Eb), 0 },
+/*97*/ { "setnbe",TRUE, NONE, op1(Eb), 0 },
+
+/*98*/ { "sets", TRUE, NONE, op1(Eb), 0 },
+/*99*/ { "setns", TRUE, NONE, op1(Eb), 0 },
+/*9a*/ { "setp", TRUE, NONE, op1(Eb), 0 },
+/*9b*/ { "setnp", TRUE, NONE, op1(Eb), 0 },
+/*9c*/ { "setl", TRUE, NONE, op1(Eb), 0 },
+/*9d*/ { "setnl", TRUE, NONE, op1(Eb), 0 },
+/*9e*/ { "setle", TRUE, NONE, op1(Eb), 0 },
+/*9f*/ { "setnle",TRUE, NONE, op1(Eb), 0 },
+};
+
+struct inst db_inst_0fax[] = {
+/*a0*/ { "push", FALSE, NONE, op1(Si), 0 },
+/*a1*/ { "pop", FALSE, NONE, op1(Si), 0 },
+/*a2*/ { "", FALSE, NONE, 0, 0 },
+/*a3*/ { "bt", TRUE, LONG, op2(R,E), 0 },
+/*a4*/ { "shld", TRUE, LONG, op3(Ib,E,R), 0 },
+/*a5*/ { "shld", TRUE, LONG, op3(CL,E,R), 0 },
+/*a6*/ { "", FALSE, NONE, 0, 0 },
+/*a7*/ { "", FALSE, NONE, 0, 0 },
+
+/*a8*/ { "push", FALSE, NONE, op1(Si), 0 },
+/*a9*/ { "pop", FALSE, NONE, op1(Si), 0 },
+/*aa*/ { "", FALSE, NONE, 0, 0 },
+/*ab*/ { "bts", TRUE, LONG, op2(R,E), 0 },
+/*ac*/ { "shrd", TRUE, LONG, op3(Ib,E,R), 0 },
+/*ad*/ { "shrd", TRUE, LONG, op3(CL,E,R), 0 },
+/*a6*/ { "", FALSE, NONE, 0, 0 },
+/*a7*/ { "imul", TRUE, LONG, op2(E,R), 0 },
+};
+
+struct inst db_inst_0fbx[] = {
+/*b0*/ { "", FALSE, NONE, 0, 0 },
+/*b1*/ { "", FALSE, NONE, 0, 0 },
+/*b2*/ { "lss", TRUE, LONG, op2(E, R), 0 },
+/*b3*/ { "btr", TRUE, LONG, op2(R, E), 0 },
+/*b4*/ { "lfs", TRUE, LONG, op2(E, R), 0 },
+/*b5*/ { "lgs", TRUE, LONG, op2(E, R), 0 },
+/*b6*/ { "movzb", TRUE, LONG, op2(Eb,R), 0 },
+/*b7*/ { "movzw", TRUE, LONG, op2(Ew,R), 0 },
+
+/*b8*/ { "", FALSE, NONE, 0, 0 },
+/*b9*/ { "", FALSE, NONE, 0, 0 },
+/*ba*/ { "", TRUE, LONG, op2(Ibs,E), (char *)db_Grp8 },
+/*bb*/ { "btc", TRUE, LONG, op2(R, E), 0 },
+/*bc*/ { "bsf", TRUE, LONG, op2(E, R), 0 },
+/*bd*/ { "bsr", TRUE, LONG, op2(E, R), 0 },
+/*be*/ { "movsb", TRUE, LONG, op2(Eb,R), 0 },
+/*bf*/ { "movsw", TRUE, LONG, op2(Ew,R), 0 },
+};
+
+struct inst db_inst_0fcx[] = {
+/*c0*/ { "xadd", TRUE, BYTE, op2(R, E), 0 },
+/*c1*/ { "xadd", TRUE, LONG, op2(R, E), 0 },
+/*c2*/ { "", FALSE, NONE, 0, 0 },
+/*c3*/ { "", FALSE, NONE, 0, 0 },
+/*c4*/ { "", FALSE, NONE, 0, 0 },
+/*c5*/ { "", FALSE, NONE, 0, 0 },
+/*c6*/ { "", FALSE, NONE, 0, 0 },
+/*c7*/ { "", FALSE, NONE, 0, 0 },
+/*c8*/ { "bswap", FALSE, LONG, op1(Ri), 0 },
+/*c9*/ { "bswap", FALSE, LONG, op1(Ri), 0 },
+/*ca*/ { "bswap", FALSE, LONG, op1(Ri), 0 },
+/*cb*/ { "bswap", FALSE, LONG, op1(Ri), 0 },
+/*cc*/ { "bswap", FALSE, LONG, op1(Ri), 0 },
+/*cd*/ { "bswap", FALSE, LONG, op1(Ri), 0 },
+/*ce*/ { "bswap", FALSE, LONG, op1(Ri), 0 },
+/*cf*/ { "bswap", FALSE, LONG, op1(Ri), 0 },
+};
+
+struct inst db_inst_0fdx[] = {
+/*c0*/ { "cmpxchg",TRUE, BYTE, op2(R, E), 0 },
+/*c1*/ { "cmpxchg",TRUE, LONG, op2(R, E), 0 },
+/*c2*/ { "", FALSE, NONE, 0, 0 },
+/*c3*/ { "", FALSE, NONE, 0, 0 },
+/*c4*/ { "", FALSE, NONE, 0, 0 },
+/*c5*/ { "", FALSE, NONE, 0, 0 },
+/*c6*/ { "", FALSE, NONE, 0, 0 },
+/*c7*/ { "", FALSE, NONE, 0, 0 },
+/*c8*/ { "", FALSE, NONE, 0, 0 },
+/*c9*/ { "", FALSE, NONE, 0, 0 },
+/*ca*/ { "", FALSE, NONE, 0, 0 },
+/*cb*/ { "", FALSE, NONE, 0, 0 },
+/*cc*/ { "", FALSE, NONE, 0, 0 },
+/*cd*/ { "", FALSE, NONE, 0, 0 },
+/*ce*/ { "", FALSE, NONE, 0, 0 },
+/*cf*/ { "", FALSE, NONE, 0, 0 },
+};
+
+struct inst *db_inst_0f[] = {
+ db_inst_0f0x,
+ 0,
+ db_inst_0f2x,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ db_inst_0f8x,
+ db_inst_0f9x,
+ db_inst_0fax,
+ db_inst_0fbx,
+ db_inst_0fcx,
+ db_inst_0fdx,
+ 0,
+ 0
+};
+
+char * db_Esc92[] = {
+ "fnop", "", "", "", "", "", "", ""
+};
+char * db_Esc93[] = {
+ "", "", "", "", "", "", "", ""
+};
+char * db_Esc94[] = {
+ "fchs", "fabs", "", "", "ftst", "fxam", "", ""
+};
+char * db_Esc95[] = {
+ "fld1", "fldl2t","fldl2e","fldpi","fldlg2","fldln2","fldz",""
+};
+char * db_Esc96[] = {
+ "f2xm1","fyl2x","fptan","fpatan","fxtract","fprem1","fdecstp",
+ "fincstp"
+};
+char * db_Esc97[] = {
+ "fprem","fyl2xp1","fsqrt","fsincos","frndint","fscale","fsin","fcos"
+};
+
+char * db_Esca4[] = {
+ "", "fucompp","", "", "", "", "", ""
+};
+
+char * db_Escb4[] = {
+ "", "", "fnclex","fninit","", "", "", ""
+};
+
+char * db_Esce3[] = {
+ "", "fcompp","", "", "", "", "", ""
+};
+
+char * db_Escf4[] = {
+ "fnstsw","", "", "", "", "", "", ""
+};
+
+struct finst db_Esc8[] = {
+/*0*/ { "fadd", SNGL, op2(STI,ST), 0 },
+/*1*/ { "fmul", SNGL, op2(STI,ST), 0 },
+/*2*/ { "fcom", SNGL, op2(STI,ST), 0 },
+/*3*/ { "fcomp", SNGL, op2(STI,ST), 0 },
+/*4*/ { "fsub", SNGL, op2(STI,ST), 0 },
+/*5*/ { "fsubr", SNGL, op2(STI,ST), 0 },
+/*6*/ { "fdiv", SNGL, op2(STI,ST), 0 },
+/*7*/ { "fdivr", SNGL, op2(STI,ST), 0 },
+};
+
+struct finst db_Esc9[] = {
+/*0*/ { "fld", SNGL, op1(STI), 0 },
+/*1*/ { "", NONE, op1(STI), "fxch" },
+/*2*/ { "fst", SNGL, op1(X), (char *)db_Esc92 },
+/*3*/ { "fstp", SNGL, op1(X), (char *)db_Esc93 },
+/*4*/ { "fldenv", NONE, op1(X), (char *)db_Esc94 },
+/*5*/ { "fldcw", NONE, op1(X), (char *)db_Esc95 },
+/*6*/ { "fnstenv",NONE, op1(X), (char *)db_Esc96 },
+/*7*/ { "fnstcw", NONE, op1(X), (char *)db_Esc97 },
+};
+
+struct finst db_Esca[] = {
+/*0*/ { "fiadd", WORD, 0, 0 },
+/*1*/ { "fimul", WORD, 0, 0 },
+/*2*/ { "ficom", WORD, 0, 0 },
+/*3*/ { "ficomp", WORD, 0, 0 },
+/*4*/ { "fisub", WORD, op1(X), (char *)db_Esca4 },
+/*5*/ { "fisubr", WORD, 0, 0 },
+/*6*/ { "fidiv", WORD, 0, 0 },
+/*7*/ { "fidivr", WORD, 0, 0 }
+};
+
+struct finst db_Escb[] = {
+/*0*/ { "fild", WORD, 0, 0 },
+/*1*/ { "", NONE, 0, 0 },
+/*2*/ { "fist", WORD, 0, 0 },
+/*3*/ { "fistp", WORD, 0, 0 },
+/*4*/ { "", WORD, op1(X), (char *)db_Escb4 },
+/*5*/ { "fld", EXTR, 0, 0 },
+/*6*/ { "", WORD, 0, 0 },
+/*7*/ { "fstp", EXTR, 0, 0 },
+};
+
+struct finst db_Escc[] = {
+/*0*/ { "fadd", DBLR, op2(ST,STI), 0 },
+/*1*/ { "fmul", DBLR, op2(ST,STI), 0 },
+/*2*/ { "fcom", DBLR, op2(ST,STI), 0 },
+/*3*/ { "fcomp", DBLR, op2(ST,STI), 0 },
+/*4*/ { "fsub", DBLR, op2(ST,STI), "fsubr" },
+/*5*/ { "fsubr", DBLR, op2(ST,STI), "fsub" },
+/*6*/ { "fdiv", DBLR, op2(ST,STI), "fdivr" },
+/*7*/ { "fdivr", DBLR, op2(ST,STI), "fdiv" },
+};
+
+struct finst db_Escd[] = {
+/*0*/ { "fld", DBLR, op1(STI), "ffree" },
+/*1*/ { "", NONE, 0, 0 },
+/*2*/ { "fst", DBLR, op1(STI), 0 },
+/*3*/ { "fstp", DBLR, op1(STI), 0 },
+/*4*/ { "frstor", NONE, op1(STI), "fucom" },
+/*5*/ { "", NONE, op1(STI), "fucomp" },
+/*6*/ { "fnsave", NONE, 0, 0 },
+/*7*/ { "fnstsw", NONE, 0, 0 },
+};
+
+struct finst db_Esce[] = {
+/*0*/ { "fiadd", LONG, op2(ST,STI), "faddp" },
+/*1*/ { "fimul", LONG, op2(ST,STI), "fmulp" },
+/*2*/ { "ficom", LONG, 0, 0 },
+/*3*/ { "ficomp", LONG, op1(X), (char *)db_Esce3 },
+/*4*/ { "fisub", LONG, op2(ST,STI), "fsubrp" },
+/*5*/ { "fisubr", LONG, op2(ST,STI), "fsubp" },
+/*6*/ { "fidiv", LONG, op2(ST,STI), "fdivrp" },
+/*7*/ { "fidivr", LONG, op2(ST,STI), "fdivp" },
+};
+
+struct finst db_Escf[] = {
+/*0*/ { "fild", LONG, 0, 0 },
+/*1*/ { "", LONG, 0, 0 },
+/*2*/ { "fist", LONG, 0, 0 },
+/*3*/ { "fistp", LONG, 0, 0 },
+/*4*/ { "fbld", NONE, op1(XA), (char *)db_Escf4 },
+/*5*/ { "fld", QUAD, 0, 0 },
+/*6*/ { "fbstp", NONE, 0, 0 },
+/*7*/ { "fstp", QUAD, 0, 0 },
+};
+
+struct finst *db_Esc_inst[] = {
+ db_Esc8, db_Esc9, db_Esca, db_Escb,
+ db_Escc, db_Escd, db_Esce, db_Escf
+};
+
+char * db_Grp1[] = {
+ "add",
+ "or",
+ "adc",
+ "sbb",
+ "and",
+ "sub",
+ "xor",
+ "cmp"
+};
+
+char * db_Grp2[] = {
+ "rol",
+ "ror",
+ "rcl",
+ "rcr",
+ "shl",
+ "shr",
+ "shl",
+ "sar"
+};
+
+struct inst db_Grp3[] = {
+ { "test", TRUE, NONE, op2(I,E), 0 },
+ { "test", TRUE, NONE, op2(I,E), 0 },
+ { "not", TRUE, NONE, op1(E), 0 },
+ { "neg", TRUE, NONE, op1(E), 0 },
+ { "mul", TRUE, NONE, op2(E,A), 0 },
+ { "imul", TRUE, NONE, op2(E,A), 0 },
+ { "div", TRUE, NONE, op2(E,A), 0 },
+ { "idiv", TRUE, NONE, op2(E,A), 0 },
+};
+
+struct inst db_Grp4[] = {
+ { "inc", TRUE, BYTE, op1(E), 0 },
+ { "dec", TRUE, BYTE, op1(E), 0 },
+ { "", TRUE, NONE, 0, 0 },
+ { "", TRUE, NONE, 0, 0 },
+ { "", TRUE, NONE, 0, 0 },
+ { "", TRUE, NONE, 0, 0 },
+ { "", TRUE, NONE, 0, 0 },
+ { "", TRUE, NONE, 0, 0 }
+};
+
+struct inst db_Grp5[] = {
+ { "inc", TRUE, LONG, op1(E), 0 },
+ { "dec", TRUE, LONG, op1(E), 0 },
+ { "call", TRUE, NONE, op1(Eind),0 },
+ { "lcall", TRUE, NONE, op1(Eind),0 },
+ { "jmp", TRUE, NONE, op1(Eind),0 },
+ { "ljmp", TRUE, NONE, op1(Eind),0 },
+ { "push", TRUE, LONG, op1(E), 0 },
+ { "", TRUE, NONE, 0, 0 }
+};
+
+struct inst db_inst_table[256] = {
+/*00*/ { "add", TRUE, BYTE, op2(R, E), 0 },
+/*01*/ { "add", TRUE, LONG, op2(R, E), 0 },
+/*02*/ { "add", TRUE, BYTE, op2(E, R), 0 },
+/*03*/ { "add", TRUE, LONG, op2(E, R), 0 },
+/*04*/ { "add", FALSE, BYTE, op2(Is, A), 0 },
+/*05*/ { "add", FALSE, LONG, op2(Is, A), 0 },
+/*06*/ { "push", FALSE, NONE, op1(Si), 0 },
+/*07*/ { "pop", FALSE, NONE, op1(Si), 0 },
+
+/*08*/ { "or", TRUE, BYTE, op2(R, E), 0 },
+/*09*/ { "or", TRUE, LONG, op2(R, E), 0 },
+/*0a*/ { "or", TRUE, BYTE, op2(E, R), 0 },
+/*0b*/ { "or", TRUE, LONG, op2(E, R), 0 },
+/*0c*/ { "or", FALSE, BYTE, op2(I, A), 0 },
+/*0d*/ { "or", FALSE, LONG, op2(I, A), 0 },
+/*0e*/ { "push", FALSE, NONE, op1(Si), 0 },
+/*0f*/ { "", FALSE, NONE, 0, 0 },
+
+/*10*/ { "adc", TRUE, BYTE, op2(R, E), 0 },
+/*11*/ { "adc", TRUE, LONG, op2(R, E), 0 },
+/*12*/ { "adc", TRUE, BYTE, op2(E, R), 0 },
+/*13*/ { "adc", TRUE, LONG, op2(E, R), 0 },
+/*14*/ { "adc", FALSE, BYTE, op2(Is, A), 0 },
+/*15*/ { "adc", FALSE, LONG, op2(Is, A), 0 },
+/*16*/ { "push", FALSE, NONE, op1(Si), 0 },
+/*17*/ { "pop", FALSE, NONE, op1(Si), 0 },
+
+/*18*/ { "sbb", TRUE, BYTE, op2(R, E), 0 },
+/*19*/ { "sbb", TRUE, LONG, op2(R, E), 0 },
+/*1a*/ { "sbb", TRUE, BYTE, op2(E, R), 0 },
+/*1b*/ { "sbb", TRUE, LONG, op2(E, R), 0 },
+/*1c*/ { "sbb", FALSE, BYTE, op2(Is, A), 0 },
+/*1d*/ { "sbb", FALSE, LONG, op2(Is, A), 0 },
+/*1e*/ { "push", FALSE, NONE, op1(Si), 0 },
+/*1f*/ { "pop", FALSE, NONE, op1(Si), 0 },
+
+/*20*/ { "and", TRUE, BYTE, op2(R, E), 0 },
+/*21*/ { "and", TRUE, LONG, op2(R, E), 0 },
+/*22*/ { "and", TRUE, BYTE, op2(E, R), 0 },
+/*23*/ { "and", TRUE, LONG, op2(E, R), 0 },
+/*24*/ { "and", FALSE, BYTE, op2(I, A), 0 },
+/*25*/ { "and", FALSE, LONG, op2(I, A), 0 },
+/*26*/ { "", FALSE, NONE, 0, 0 },
+/*27*/ { "aaa", FALSE, NONE, 0, 0 },
+
+/*28*/ { "sub", TRUE, BYTE, op2(R, E), 0 },
+/*29*/ { "sub", TRUE, LONG, op2(R, E), 0 },
+/*2a*/ { "sub", TRUE, BYTE, op2(E, R), 0 },
+/*2b*/ { "sub", TRUE, LONG, op2(E, R), 0 },
+/*2c*/ { "sub", FALSE, BYTE, op2(Is, A), 0 },
+/*2d*/ { "sub", FALSE, LONG, op2(Is, A), 0 },
+/*2e*/ { "", FALSE, NONE, 0, 0 },
+/*2f*/ { "das", FALSE, NONE, 0, 0 },
+
+/*30*/ { "xor", TRUE, BYTE, op2(R, E), 0 },
+/*31*/ { "xor", TRUE, LONG, op2(R, E), 0 },
+/*32*/ { "xor", TRUE, BYTE, op2(E, R), 0 },
+/*33*/ { "xor", TRUE, LONG, op2(E, R), 0 },
+/*34*/ { "xor", FALSE, BYTE, op2(I, A), 0 },
+/*35*/ { "xor", FALSE, LONG, op2(I, A), 0 },
+/*36*/ { "", FALSE, NONE, 0, 0 },
+/*37*/ { "daa", FALSE, NONE, 0, 0 },
+
+/*38*/ { "cmp", TRUE, BYTE, op2(R, E), 0 },
+/*39*/ { "cmp", TRUE, LONG, op2(R, E), 0 },
+/*3a*/ { "cmp", TRUE, BYTE, op2(E, R), 0 },
+/*3b*/ { "cmp", TRUE, LONG, op2(E, R), 0 },
+/*3c*/ { "cmp", FALSE, BYTE, op2(Is, A), 0 },
+/*3d*/ { "cmp", FALSE, LONG, op2(Is, A), 0 },
+/*3e*/ { "", FALSE, NONE, 0, 0 },
+/*3f*/ { "aas", FALSE, NONE, 0, 0 },
+
+/*40*/ { "inc", FALSE, LONG, op1(Ri), 0 },
+/*41*/ { "inc", FALSE, LONG, op1(Ri), 0 },
+/*42*/ { "inc", FALSE, LONG, op1(Ri), 0 },
+/*43*/ { "inc", FALSE, LONG, op1(Ri), 0 },
+/*44*/ { "inc", FALSE, LONG, op1(Ri), 0 },
+/*45*/ { "inc", FALSE, LONG, op1(Ri), 0 },
+/*46*/ { "inc", FALSE, LONG, op1(Ri), 0 },
+/*47*/ { "inc", FALSE, LONG, op1(Ri), 0 },
+
+/*48*/ { "dec", FALSE, LONG, op1(Ri), 0 },
+/*49*/ { "dec", FALSE, LONG, op1(Ri), 0 },
+/*4a*/ { "dec", FALSE, LONG, op1(Ri), 0 },
+/*4b*/ { "dec", FALSE, LONG, op1(Ri), 0 },
+/*4c*/ { "dec", FALSE, LONG, op1(Ri), 0 },
+/*4d*/ { "dec", FALSE, LONG, op1(Ri), 0 },
+/*4e*/ { "dec", FALSE, LONG, op1(Ri), 0 },
+/*4f*/ { "dec", FALSE, LONG, op1(Ri), 0 },
+
+/*50*/ { "push", FALSE, LONG, op1(Ri), 0 },
+/*51*/ { "push", FALSE, LONG, op1(Ri), 0 },
+/*52*/ { "push", FALSE, LONG, op1(Ri), 0 },
+/*53*/ { "push", FALSE, LONG, op1(Ri), 0 },
+/*54*/ { "push", FALSE, LONG, op1(Ri), 0 },
+/*55*/ { "push", FALSE, LONG, op1(Ri), 0 },
+/*56*/ { "push", FALSE, LONG, op1(Ri), 0 },
+/*57*/ { "push", FALSE, LONG, op1(Ri), 0 },
+
+/*58*/ { "pop", FALSE, LONG, op1(Ri), 0 },
+/*59*/ { "pop", FALSE, LONG, op1(Ri), 0 },
+/*5a*/ { "pop", FALSE, LONG, op1(Ri), 0 },
+/*5b*/ { "pop", FALSE, LONG, op1(Ri), 0 },
+/*5c*/ { "pop", FALSE, LONG, op1(Ri), 0 },
+/*5d*/ { "pop", FALSE, LONG, op1(Ri), 0 },
+/*5e*/ { "pop", FALSE, LONG, op1(Ri), 0 },
+/*5f*/ { "pop", FALSE, LONG, op1(Ri), 0 },
+
+/*60*/ { "pusha", FALSE, LONG, 0, 0 },
+/*61*/ { "popa", FALSE, LONG, 0, 0 },
+/*62*/ { "bound", TRUE, LONG, op2(E, R), 0 },
+/*63*/ { "arpl", TRUE, NONE, op2(Ew,Rw), 0 },
+
+/*64*/ { "", FALSE, NONE, 0, 0 },
+/*65*/ { "", FALSE, NONE, 0, 0 },
+/*66*/ { "", FALSE, NONE, 0, 0 },
+/*67*/ { "", FALSE, NONE, 0, 0 },
+
+/*68*/ { "push", FALSE, LONG, op1(I), 0 },
+/*69*/ { "imul", TRUE, LONG, op3(I,E,R), 0 },
+/*6a*/ { "push", FALSE, LONG, op1(Ib), 0 },
+/*6b*/ { "imul", TRUE, LONG, op3(Ibs,E,R),0 },
+/*6c*/ { "ins", FALSE, BYTE, op2(DX, DI), 0 },
+/*6d*/ { "ins", FALSE, LONG, op2(DX, DI), 0 },
+/*6e*/ { "outs", FALSE, BYTE, op2(SI, DX), 0 },
+/*6f*/ { "outs", FALSE, LONG, op2(SI, DX), 0 },
+
+/*70*/ { "jo", FALSE, NONE, op1(Db), 0 },
+/*71*/ { "jno", FALSE, NONE, op1(Db), 0 },
+/*72*/ { "jb", FALSE, NONE, op1(Db), 0 },
+/*73*/ { "jnb", FALSE, NONE, op1(Db), 0 },
+/*74*/ { "jz", FALSE, NONE, op1(Db), 0 },
+/*75*/ { "jnz", FALSE, NONE, op1(Db), 0 },
+/*76*/ { "jbe", FALSE, NONE, op1(Db), 0 },
+/*77*/ { "jnbe", FALSE, NONE, op1(Db), 0 },
+
+/*78*/ { "js", FALSE, NONE, op1(Db), 0 },
+/*79*/ { "jns", FALSE, NONE, op1(Db), 0 },
+/*7a*/ { "jp", FALSE, NONE, op1(Db), 0 },
+/*7b*/ { "jnp", FALSE, NONE, op1(Db), 0 },
+/*7c*/ { "jl", FALSE, NONE, op1(Db), 0 },
+/*7d*/ { "jnl", FALSE, NONE, op1(Db), 0 },
+/*7e*/ { "jle", FALSE, NONE, op1(Db), 0 },
+/*7f*/ { "jnle", FALSE, NONE, op1(Db), 0 },
+
+/*80*/ { "", TRUE, BYTE, op2(I, E), (char *)db_Grp1 },
+/*81*/ { "", TRUE, LONG, op2(I, E), (char *)db_Grp1 },
+/*82*/ { "", TRUE, BYTE, op2(Is,E), (char *)db_Grp1 },
+/*83*/ { "", TRUE, LONG, op2(Ibs,E), (char *)db_Grp1 },
+/*84*/ { "test", TRUE, BYTE, op2(R, E), 0 },
+/*85*/ { "test", TRUE, LONG, op2(R, E), 0 },
+/*86*/ { "xchg", TRUE, BYTE, op2(R, E), 0 },
+/*87*/ { "xchg", TRUE, LONG, op2(R, E), 0 },
+
+/*88*/ { "mov", TRUE, BYTE, op2(R, E), 0 },
+/*89*/ { "mov", TRUE, LONG, op2(R, E), 0 },
+/*8a*/ { "mov", TRUE, BYTE, op2(E, R), 0 },
+/*8b*/ { "mov", TRUE, LONG, op2(E, R), 0 },
+/*8c*/ { "mov", TRUE, NONE, op2(S, Ew), 0 },
+/*8d*/ { "lea", TRUE, LONG, op2(E, R), 0 },
+/*8e*/ { "mov", TRUE, NONE, op2(Ew, S), 0 },
+/*8f*/ { "pop", TRUE, LONG, op1(E), 0 },
+
+/*90*/ { "nop", FALSE, NONE, 0, 0 },
+/*91*/ { "xchg", FALSE, LONG, op2(A, Ri), 0 },
+/*92*/ { "xchg", FALSE, LONG, op2(A, Ri), 0 },
+/*93*/ { "xchg", FALSE, LONG, op2(A, Ri), 0 },
+/*94*/ { "xchg", FALSE, LONG, op2(A, Ri), 0 },
+/*95*/ { "xchg", FALSE, LONG, op2(A, Ri), 0 },
+/*96*/ { "xchg", FALSE, LONG, op2(A, Ri), 0 },
+/*97*/ { "xchg", FALSE, LONG, op2(A, Ri), 0 },
+
+/*98*/ { "cbw", FALSE, SDEP, 0, "cwde" }, /* cbw/cwde */
+/*99*/ { "cwd", FALSE, SDEP, 0, "cdq" }, /* cwd/cdq */
+/*9a*/ { "lcall", FALSE, NONE, op1(OS), 0 },
+/*9b*/ { "wait", FALSE, NONE, 0, 0 },
+/*9c*/ { "pushf", FALSE, LONG, 0, 0 },
+/*9d*/ { "popf", FALSE, LONG, 0, 0 },
+/*9e*/ { "sahf", FALSE, NONE, 0, 0 },
+/*9f*/ { "lahf", FALSE, NONE, 0, 0 },
+
+/*a0*/ { "mov", FALSE, BYTE, op2(O, A), 0 },
+/*a1*/ { "mov", FALSE, LONG, op2(O, A), 0 },
+/*a2*/ { "mov", FALSE, BYTE, op2(A, O), 0 },
+/*a3*/ { "mov", FALSE, LONG, op2(A, O), 0 },
+/*a4*/ { "movs", FALSE, BYTE, op2(SI,DI), 0 },
+/*a5*/ { "movs", FALSE, LONG, op2(SI,DI), 0 },
+/*a6*/ { "cmps", FALSE, BYTE, op2(SI,DI), 0 },
+/*a7*/ { "cmps", FALSE, LONG, op2(SI,DI), 0 },
+
+/*a8*/ { "test", FALSE, BYTE, op2(I, A), 0 },
+/*a9*/ { "test", FALSE, LONG, op2(I, A), 0 },
+/*aa*/ { "stos", FALSE, BYTE, op1(DI), 0 },
+/*ab*/ { "stos", FALSE, LONG, op1(DI), 0 },
+/*ac*/ { "lods", FALSE, BYTE, op1(SI), 0 },
+/*ad*/ { "lods", FALSE, LONG, op1(SI), 0 },
+/*ae*/ { "scas", FALSE, BYTE, op1(DI), 0 },
+/*af*/ { "scas", FALSE, LONG, op1(DI), 0 },
+
+/*b0*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 },
+/*b1*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 },
+/*b2*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 },
+/*b3*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 },
+/*b4*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 },
+/*b5*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 },
+/*b6*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 },
+/*b7*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 },
+
+/*b8*/ { "mov", FALSE, LONG, op2(I, Ri), 0 },
+/*b9*/ { "mov", FALSE, LONG, op2(I, Ri), 0 },
+/*ba*/ { "mov", FALSE, LONG, op2(I, Ri), 0 },
+/*bb*/ { "mov", FALSE, LONG, op2(I, Ri), 0 },
+/*bc*/ { "mov", FALSE, LONG, op2(I, Ri), 0 },
+/*bd*/ { "mov", FALSE, LONG, op2(I, Ri), 0 },
+/*be*/ { "mov", FALSE, LONG, op2(I, Ri), 0 },
+/*bf*/ { "mov", FALSE, LONG, op2(I, Ri), 0 },
+
+/*c0*/ { "", TRUE, BYTE, op2(Ib, E), (char *)db_Grp2 },
+/*c1*/ { "", TRUE, LONG, op2(Ib, E), (char *)db_Grp2 },
+/*c2*/ { "ret", FALSE, NONE, op1(Iw), 0 },
+/*c3*/ { "ret", FALSE, NONE, 0, 0 },
+/*c4*/ { "les", TRUE, LONG, op2(E, R), 0 },
+/*c5*/ { "lds", TRUE, LONG, op2(E, R), 0 },
+/*c6*/ { "mov", TRUE, BYTE, op2(I, E), 0 },
+/*c7*/ { "mov", TRUE, LONG, op2(I, E), 0 },
+
+/*c8*/ { "enter", FALSE, NONE, op2(Ib, Iw), 0 },
+/*c9*/ { "leave", FALSE, NONE, 0, 0 },
+/*ca*/ { "lret", FALSE, NONE, op1(Iw), 0 },
+/*cb*/ { "lret", FALSE, NONE, 0, 0 },
+/*cc*/ { "int", FALSE, NONE, op1(o3), 0 },
+/*cd*/ { "int", FALSE, NONE, op1(Ib), 0 },
+/*ce*/ { "into", FALSE, NONE, 0, 0 },
+/*cf*/ { "iret", FALSE, NONE, 0, 0 },
+
+/*d0*/ { "", TRUE, BYTE, op2(o1, E), (char *)db_Grp2 },
+/*d1*/ { "", TRUE, LONG, op2(o1, E), (char *)db_Grp2 },
+/*d2*/ { "", TRUE, BYTE, op2(CL, E), (char *)db_Grp2 },
+/*d3*/ { "", TRUE, LONG, op2(CL, E), (char *)db_Grp2 },
+/*d4*/ { "aam", FALSE, NONE, op1(Iba), 0 },
+/*d5*/ { "aad", FALSE, NONE, op1(Iba), 0 },
+/*d6*/ { "", FALSE, NONE, 0, 0 },
+/*d7*/ { "xlat", FALSE, BYTE, op1(BX), 0 },
+
+/*d8*/ { "", TRUE, NONE, 0, (char *)db_Esc8 },
+/*d9*/ { "", TRUE, NONE, 0, (char *)db_Esc9 },
+/*da*/ { "", TRUE, NONE, 0, (char *)db_Esca },
+/*db*/ { "", TRUE, NONE, 0, (char *)db_Escb },
+/*dc*/ { "", TRUE, NONE, 0, (char *)db_Escc },
+/*dd*/ { "", TRUE, NONE, 0, (char *)db_Escd },
+/*de*/ { "", TRUE, NONE, 0, (char *)db_Esce },
+/*df*/ { "", TRUE, NONE, 0, (char *)db_Escf },
+
+/*e0*/ { "loopne",FALSE, NONE, op1(Db), 0 },
+/*e1*/ { "loope", FALSE, NONE, op1(Db), 0 },
+/*e2*/ { "loop", FALSE, NONE, op1(Db), 0 },
+/*e3*/ { "jcxz", FALSE, SDEP, op1(Db), "jecxz" },
+/*e4*/ { "in", FALSE, BYTE, op2(Ib, A), 0 },
+/*e5*/ { "in", FALSE, LONG, op2(Ib, A) , 0 },
+/*e6*/ { "out", FALSE, BYTE, op2(A, Ib), 0 },
+/*e7*/ { "out", FALSE, LONG, op2(A, Ib) , 0 },
+
+/*e8*/ { "call", FALSE, NONE, op1(Dl), 0 },
+/*e9*/ { "jmp", FALSE, NONE, op1(Dl), 0 },
+/*ea*/ { "ljmp", FALSE, NONE, op1(OS), 0 },
+/*eb*/ { "jmp", FALSE, NONE, op1(Db), 0 },
+/*ec*/ { "in", FALSE, BYTE, op2(DX, A), 0 },
+/*ed*/ { "in", FALSE, LONG, op2(DX, A) , 0 },
+/*ee*/ { "out", FALSE, BYTE, op2(A, DX), 0 },
+/*ef*/ { "out", FALSE, LONG, op2(A, DX) , 0 },
+
+/*f0*/ { "", FALSE, NONE, 0, 0 },
+/*f1*/ { "", FALSE, NONE, 0, 0 },
+/*f2*/ { "", FALSE, NONE, 0, 0 },
+/*f3*/ { "", FALSE, NONE, 0, 0 },
+/*f4*/ { "hlt", FALSE, NONE, 0, 0 },
+/*f5*/ { "cmc", FALSE, NONE, 0, 0 },
+/*f6*/ { "", TRUE, BYTE, 0, (char *)db_Grp3 },
+/*f7*/ { "", TRUE, LONG, 0, (char *)db_Grp3 },
+
+/*f8*/ { "clc", FALSE, NONE, 0, 0 },
+/*f9*/ { "stc", FALSE, NONE, 0, 0 },
+/*fa*/ { "cli", FALSE, NONE, 0, 0 },
+/*fb*/ { "sti", FALSE, NONE, 0, 0 },
+/*fc*/ { "cld", FALSE, NONE, 0, 0 },
+/*fd*/ { "std", FALSE, NONE, 0, 0 },
+/*fe*/ { "", TRUE, NONE, 0, (char *)db_Grp4 },
+/*ff*/ { "", TRUE, NONE, 0, (char *)db_Grp5 },
+};
+
+struct inst db_bad_inst =
+ { "???", FALSE, NONE, 0, 0 }
+;
+
+#define f_mod(byte) ((byte)>>6)
+#define f_reg(byte) (((byte)>>3)&0x7)
+#define f_rm(byte) ((byte)&0x7)
+
+#define sib_ss(byte) ((byte)>>6)
+#define sib_index(byte) (((byte)>>3)&0x7)
+#define sib_base(byte) ((byte)&0x7)
+
+struct i_addr {
+ int is_reg; /* if reg, reg number is in 'disp' */
+ int disp;
+ char * base;
+ char * index;
+ int ss;
+};
+
+char * db_index_reg_16[8] = {
+ "%bx,%si",
+ "%bx,%di",
+ "%bp,%si",
+ "%bp,%di",
+ "%si",
+ "%di",
+ "%bp",
+ "%bx"
+};
+
+char * db_reg[3][8] = {
+ { "%al", "%cl", "%dl", "%bl", "%ah", "%ch", "%dh", "%bh" },
+ { "%ax", "%cx", "%dx", "%bx", "%sp", "%bp", "%si", "%di" },
+ { "%eax", "%ecx", "%edx", "%ebx", "%esp", "%ebp", "%esi", "%edi" }
+};
+
+char * db_seg_reg[8] = {
+ "%es", "%cs", "%ss", "%ds", "%fs", "%gs", "", ""
+};
+
+/*
+ * lengths for size attributes
+ */
+int db_lengths[] = {
+ 1, /* BYTE */
+ 2, /* WORD */
+ 4, /* LONG */
+ 8, /* QUAD */
+ 4, /* SNGL */
+ 8, /* DBLR */
+ 10, /* EXTR */
+};
+
+#define get_value_inc(result, loc, size, is_signed, task) \
+ result = db_get_task_value((loc), (size), (is_signed), (task)); \
+ (loc) += (size);
+
+/*
+ * Read address at location and return updated location.
+ */
+static db_addr_t
+db_read_address(
+ db_addr_t loc,
+ int short_addr,
+ int regmodrm,
+ struct i_addr *addrp, /* out */
+ task_t task)
+{
+ int mod, rm, sib, index, disp;
+
+ mod = f_mod(regmodrm);
+ rm = f_rm(regmodrm);
+
+ if (mod == 3) {
+ addrp->is_reg = TRUE;
+ addrp->disp = rm;
+ return loc;
+ }
+ addrp->is_reg = FALSE;
+ addrp->index = 0;
+
+ if (short_addr) {
+ addrp->index = 0;
+ addrp->ss = 0;
+ switch (mod) {
+ case 0:
+ if (rm == 6) {
+ get_value_inc(disp, loc, 2, TRUE, task);
+ addrp->disp = disp;
+ addrp->base = 0;
+ }
+ else {
+ addrp->disp = 0;
+ addrp->base = db_index_reg_16[rm];
+ }
+ break;
+ case 1:
+ get_value_inc(disp, loc, 1, TRUE, task);
+ addrp->disp = disp;
+ addrp->base = db_index_reg_16[rm];
+ break;
+ case 2:
+ get_value_inc(disp, loc, 2, TRUE, task);
+ addrp->disp = disp;
+ addrp->base = db_index_reg_16[rm];
+ break;
+ }
+ }
+ else {
+ if (mod != 3 && rm == 4) {
+ get_value_inc(sib, loc, 1, FALSE, task);
+ rm = sib_base(sib);
+ index = sib_index(sib);
+ if (index != 4)
+ addrp->index = db_reg[LONG][index];
+ addrp->ss = sib_ss(sib);
+ }
+
+ switch (mod) {
+ case 0:
+ if (rm == 5) {
+ get_value_inc(addrp->disp, loc, 4, FALSE, task);
+ addrp->base = 0;
+ }
+ else {
+ addrp->disp = 0;
+ addrp->base = db_reg[LONG][rm];
+ }
+ break;
+
+ case 1:
+ get_value_inc(disp, loc, 1, TRUE, task);
+ addrp->disp = disp;
+ addrp->base = db_reg[LONG][rm];
+ break;
+
+ case 2:
+ get_value_inc(disp, loc, 4, FALSE, task);
+ addrp->disp = disp;
+ addrp->base = db_reg[LONG][rm];
+ break;
+ }
+ }
+ return loc;
+}
+
+static void
+db_print_address(
+ const char * seg,
+ int size,
+ const struct i_addr *addrp,
+ task_t task)
+{
+ if (addrp->is_reg) {
+ db_printf("%s", db_reg[size][addrp->disp]);
+ return;
+ }
+
+ if (seg) {
+ db_printf("%s:", seg);
+ }
+
+ if (addrp->base != 0 || addrp->index != 0) {
+ db_printf("%#n", addrp->disp);
+ db_printf("(");
+ if (addrp->base)
+ db_printf("%s", addrp->base);
+ if (addrp->index)
+ db_printf(",%s,%d", addrp->index, 1<<addrp->ss);
+ db_printf(")");
+ } else
+ db_task_printsym((db_addr_t)addrp->disp, DB_STGY_ANY, task);
+}
+
+/*
+ * Disassemble floating-point ("escape") instruction
+ * and return updated location.
+ */
+static db_addr_t
+db_disasm_esc(
+ db_addr_t loc,
+ int inst,
+ int short_addr,
+ int size,
+ const char * seg,
+ task_t task)
+{
+ int regmodrm;
+ struct finst *fp;
+ int mod;
+ struct i_addr address;
+ char * name;
+
+ get_value_inc(regmodrm, loc, 1, FALSE, task);
+ fp = &db_Esc_inst[inst - 0xd8][f_reg(regmodrm)];
+ mod = f_mod(regmodrm);
+ if (mod != 3) {
+ /*
+ * Normal address modes.
+ */
+ loc = db_read_address(loc, short_addr, regmodrm, &address, task);
+ db_printf(fp->f_name);
+ switch(fp->f_size) {
+ case SNGL:
+ db_printf("s");
+ break;
+ case DBLR:
+ db_printf("l");
+ break;
+ case EXTR:
+ db_printf("t");
+ break;
+ case WORD:
+ db_printf("s");
+ break;
+ case LONG:
+ db_printf("l");
+ break;
+ case QUAD:
+ db_printf("q");
+ break;
+ default:
+ break;
+ }
+ db_printf("\t");
+ db_print_address(seg, BYTE, &address, task);
+ }
+ else {
+ /*
+ * 'reg-reg' - special formats
+ */
+ switch (fp->f_rrmode) {
+ case op2(ST,STI):
+ name = (fp->f_rrname) ? fp->f_rrname : fp->f_name;
+ db_printf("%s\t%%st,%%st(%d)",name,f_rm(regmodrm));
+ break;
+ case op2(STI,ST):
+ name = (fp->f_rrname) ? fp->f_rrname : fp->f_name;
+ db_printf("%s\t%%st(%d),%%st",name, f_rm(regmodrm));
+ break;
+ case op1(STI):
+ name = (fp->f_rrname) ? fp->f_rrname : fp->f_name;
+ db_printf("%s\t%%st(%d)",name, f_rm(regmodrm));
+ break;
+ case op1(X):
+ db_printf("%s", ((char **)fp->f_rrname)[f_rm(regmodrm)]);
+ break;
+ case op1(XA):
+ db_printf("%s\t%%ax",
+ ((char **)fp->f_rrname)[f_rm(regmodrm)]);
+ break;
+ default:
+ db_printf("<bad instruction>");
+ break;
+ }
+ }
+
+ return loc;
+}
+
+/*
+ * Disassemble instruction at 'loc'. 'altfmt' specifies an
+ * (optional) alternate format. Return address of start of
+ * next instruction.
+ */
+db_addr_t
+db_disasm(
+ db_addr_t loc,
+ boolean_t altfmt,
+ task_t task)
+{
+ int inst;
+ int size;
+ int short_addr;
+ char * seg;
+ struct inst * ip;
+ char * i_name;
+ int i_size;
+ int i_mode;
+ int regmodrm;
+ boolean_t first;
+ int displ;
+ int prefix;
+ int imm;
+ int imm2;
+ int len;
+ struct i_addr address;
+
+#ifdef __x86_64__
+ /* The instruction set decoding needs an update, avoid showing bogus output. */
+ db_printf("TODO\n");
+ return loc+1;
+#endif
+
+ get_value_inc(inst, loc, 1, FALSE, task);
+ if (db_disasm_16) {
+ short_addr = TRUE;
+ size = WORD;
+ }
+ else {
+ short_addr = FALSE;
+ size = LONG;
+ }
+ seg = 0;
+ regmodrm = 0;
+
+ /*
+ * Get prefixes
+ */
+ prefix = TRUE;
+ do {
+ switch (inst) {
+ case 0x66: /* data16 */
+ if (size == LONG)
+ size = WORD;
+ else
+ size = LONG;
+ break;
+ case 0x67:
+ short_addr = !short_addr;
+ break;
+ case 0x26:
+ seg = "%es";
+ break;
+ case 0x36:
+ seg = "%ss";
+ break;
+ case 0x2e:
+ seg = "%cs";
+ break;
+ case 0x3e:
+ seg = "%ds";
+ break;
+ case 0x64:
+ seg = "%fs";
+ break;
+ case 0x65:
+ seg = "%gs";
+ break;
+ case 0xf0:
+ db_printf("lock ");
+ break;
+ case 0xf2:
+ db_printf("repne ");
+ break;
+ case 0xf3:
+ db_printf("repe "); /* XXX repe VS rep */
+ break;
+ default:
+ prefix = FALSE;
+ break;
+ }
+ if (prefix) {
+ get_value_inc(inst, loc, 1, FALSE, task);
+ }
+ } while (prefix);
+
+ if (inst >= 0xd8 && inst <= 0xdf) {
+ loc = db_disasm_esc(loc, inst, short_addr, size, seg, task);
+ db_printf("\n");
+ return loc;
+ }
+
+ if (inst == 0x0f) {
+ get_value_inc(inst, loc, 1, FALSE, task);
+ ip = db_inst_0f[inst>>4];
+ if (ip == 0) {
+ ip = &db_bad_inst;
+ }
+ else {
+ ip = &ip[inst&0xf];
+ }
+ }
+ else
+ ip = &db_inst_table[inst];
+
+ if (ip->i_has_modrm) {
+ get_value_inc(regmodrm, loc, 1, FALSE, task);
+ loc = db_read_address(loc, short_addr, regmodrm, &address, task);
+ }
+
+ i_name = ip->i_name;
+ i_size = ip->i_size;
+ i_mode = ip->i_mode;
+
+ if (ip->i_extra == (char *)db_Grp1 ||
+ ip->i_extra == (char *)db_Grp2 ||
+ ip->i_extra == (char *)db_Grp6 ||
+ ip->i_extra == (char *)db_Grp7 ||
+ ip->i_extra == (char *)db_Grp8) {
+ i_name = ((char **)ip->i_extra)[f_reg(regmodrm)];
+ }
+ else if (ip->i_extra == (char *)db_Grp3) {
+ ip = (struct inst *)ip->i_extra;
+ ip = &ip[f_reg(regmodrm)];
+ i_name = ip->i_name;
+ i_mode = ip->i_mode;
+ }
+ else if (ip->i_extra == (char *)db_Grp4 ||
+ ip->i_extra == (char *)db_Grp5) {
+ ip = (struct inst *)ip->i_extra;
+ ip = &ip[f_reg(regmodrm)];
+ i_name = ip->i_name;
+ i_mode = ip->i_mode;
+ i_size = ip->i_size;
+ }
+
+ if (i_size == SDEP) {
+ if (size == WORD)
+ db_printf(i_name);
+ else
+ db_printf(ip->i_extra);
+ }
+ else {
+ db_printf(i_name);
+ if (i_size != NONE) {
+ if (i_size == BYTE) {
+ db_printf("b");
+ size = BYTE;
+ }
+ else if (i_size == WORD) {
+ db_printf("w");
+ size = WORD;
+ }
+ else if (size == WORD)
+ db_printf("w");
+ else
+ db_printf("l");
+ }
+ }
+ db_printf("\t");
+ for (first = TRUE;
+ i_mode != 0;
+ i_mode >>= 8, first = FALSE)
+ {
+ if (!first)
+ db_printf(",");
+
+ switch (i_mode & 0xFF) {
+
+ case E:
+ db_print_address(seg, size, &address, task);
+ break;
+
+ case Eind:
+ db_printf("*");
+ db_print_address(seg, size, &address, task);
+ break;
+
+ case El:
+ db_print_address(seg, LONG, &address, task);
+ break;
+
+ case Ew:
+ db_print_address(seg, WORD, &address, task);
+ break;
+
+ case Eb:
+ db_print_address(seg, BYTE, &address, task);
+ break;
+
+ case R:
+ db_printf("%s", db_reg[size][f_reg(regmodrm)]);
+ break;
+
+ case Rw:
+ db_printf("%s", db_reg[WORD][f_reg(regmodrm)]);
+ break;
+
+ case Ri:
+ db_printf("%s", db_reg[size][f_rm(inst)]);
+ break;
+
+ case S:
+ db_printf("%s", db_seg_reg[f_reg(regmodrm)]);
+ break;
+
+ case Si:
+ db_printf("%s", db_seg_reg[f_reg(inst)]);
+ break;
+
+ case A:
+ db_printf("%s", db_reg[size][0]); /* acc */
+ break;
+
+ case BX:
+ if (seg)
+ db_printf("%s:", seg);
+ db_printf("(%s)", short_addr ? "%bx" : "%ebx");
+ break;
+
+ case CL:
+ db_printf("%%cl");
+ break;
+
+ case DX:
+ db_printf("%%dx");
+ break;
+
+ case SI:
+ if (seg)
+ db_printf("%s:", seg);
+ db_printf("(%s)", short_addr ? "%si" : "%esi");
+ break;
+
+ case DI:
+ db_printf("%%es:(%s)", short_addr ? "%di" : "%edi");
+ break;
+
+ case CR:
+ db_printf("%%cr%d", f_reg(regmodrm));
+ break;
+
+ case DR:
+ db_printf("%%dr%d", f_reg(regmodrm));
+ break;
+
+ case TR:
+ db_printf("%%tr%d", f_reg(regmodrm));
+ break;
+
+ case I:
+ len = db_lengths[size];
+ get_value_inc(imm, loc, len, FALSE, task);/* unsigned */
+ db_printf("$%#n", imm);
+ break;
+
+ case Is:
+ len = db_lengths[size];
+ get_value_inc(imm, loc, len, TRUE, task); /* signed */
+ db_printf("$%#r", imm);
+ break;
+
+ case Ib:
+ get_value_inc(imm, loc, 1, FALSE, task); /* unsigned */
+ db_printf("$%#n", imm);
+ break;
+
+ case Iba:
+ get_value_inc(imm, loc, 1, FALSE, task);
+ if (imm != 0x0a)
+ db_printf("$%#r", imm);
+ break;
+
+ case Ibs:
+ get_value_inc(imm, loc, 1, TRUE, task); /* signed */
+ db_printf("$%#r", imm);
+ break;
+
+ case Iw:
+ get_value_inc(imm, loc, 2, FALSE, task); /* unsigned */
+ db_printf("$%#n", imm);
+ break;
+
+ case Il:
+ get_value_inc(imm, loc, 4, FALSE, task);
+ db_printf("$%#n", imm);
+ break;
+
+ case O:
+ if (short_addr) {
+ get_value_inc(displ, loc, 2, TRUE, task);
+ }
+ else {
+ get_value_inc(displ, loc, 4, TRUE, task);
+ }
+ if (seg)
+ db_printf("%s:%#r",seg, displ);
+ else
+ db_task_printsym((db_addr_t)displ, DB_STGY_ANY, task);
+ break;
+
+ case Db:
+ get_value_inc(displ, loc, 1, TRUE, task);
+ if (short_addr) {
+ /* offset only affects low 16 bits */
+ displ = (loc & 0xffff0000)
+ | ((loc + displ) & 0xffff);
+ }
+ else
+ displ = displ + loc;
+ db_task_printsym((db_addr_t)displ,DB_STGY_XTRN,task);
+ break;
+
+ case Dl:
+ if (short_addr) {
+ get_value_inc(displ, loc, 2, TRUE, task);
+ /* offset only affects low 16 bits */
+ displ = (loc & 0xffff0000)
+ | ((loc + displ) & 0xffff);
+ }
+ else {
+ get_value_inc(displ, loc, 4, TRUE, task);
+ displ = displ + loc;
+ }
+ db_task_printsym((db_addr_t)displ, DB_STGY_XTRN, task);
+ break;
+
+ case o1:
+ db_printf("$1");
+ break;
+
+ case o3:
+ db_printf("$3");
+ break;
+
+ case OS:
+ if (short_addr) {
+ get_value_inc(imm, loc, 2, FALSE, task); /* offset */
+ }
+ else {
+ get_value_inc(imm, loc, 4, FALSE, task); /* offset */
+ }
+ get_value_inc(imm2, loc, 2, FALSE, task); /* segment */
+ db_printf("$%#n,%#n", imm2, imm);
+ break;
+ }
+ }
+
+ if (altfmt == 0 && !db_disasm_16) {
+ if (inst == 0xe9 || inst == 0xeb) {
+ /*
+ * GAS pads to longword boundary after unconditional jumps.
+ */
+ loc = (loc + (4-1)) & ~(4-1);
+ }
+ }
+ db_printf("\n");
+ return loc;
+}
+
+#endif /* MACH_KDB */
diff --git a/i386/i386/db_interface.c b/i386/i386/db_interface.c
new file mode 100644
index 0000000..483991d
--- /dev/null
+++ b/i386/i386/db_interface.c
@@ -0,0 +1,865 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992,1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Interface to new debugger.
+ */
+
+#include <string.h>
+#include <sys/reboot.h>
+#include <vm/pmap.h>
+
+#include <i386/thread.h>
+#include <i386/db_machdep.h>
+#include <i386/seg.h>
+#include <i386/trap.h>
+#include <i386/setjmp.h>
+#include <i386/pmap.h>
+#include <i386/proc_reg.h>
+#include <i386/locore.h>
+#include <i386at/biosmem.h>
+#include "gdt.h"
+#include "trap.h"
+
+#include "vm_param.h"
+#include <vm/vm_map.h>
+#include <vm/vm_fault.h>
+#include <kern/cpu_number.h>
+#include <kern/printf.h>
+#include <kern/thread.h>
+#include <kern/task.h>
+#include <ddb/db_access.h>
+#include <ddb/db_command.h>
+#include <ddb/db_output.h>
+#include <ddb/db_run.h>
+#include <ddb/db_task_thread.h>
+#include <ddb/db_trap.h>
+#include <ddb/db_watch.h>
+#include <ddb/db_mp.h>
+#include <machine/db_interface.h>
+#include <machine/machspl.h>
+
+#if MACH_KDB
+/* Whether the kernel uses any debugging register. */
+static boolean_t kernel_dr;
+#endif
+/* Whether the current debug registers are zero. */
+static boolean_t zero_dr;
+
+db_regs_t ddb_regs;
+
+void db_load_context(pcb_t pcb)
+{
+#if MACH_KDB
+ int s = splhigh();
+
+ if (kernel_dr) {
+ splx(s);
+ return;
+ }
+#endif
+ /* Else set user debug registers, if any */
+ unsigned int *dr = pcb->ims.ids.dr;
+ boolean_t will_zero_dr = !dr[0] && !dr[1] && !dr[2] && !dr[3] && !dr[7];
+
+ if (!(zero_dr && will_zero_dr))
+ {
+ set_dr0(dr[0]);
+ set_dr1(dr[1]);
+ set_dr2(dr[2]);
+ set_dr3(dr[3]);
+ set_dr7(dr[7]);
+ zero_dr = will_zero_dr;
+ }
+
+#if MACH_KDB
+ splx(s);
+#endif
+}
+
+void cpu_interrupt_to_db(int i){
+#if MACH_KDB && NCPUS > 1
+ db_on(i);
+#endif
+}
+
+void db_get_debug_state(
+ pcb_t pcb,
+ struct i386_debug_state *state)
+{
+ *state = pcb->ims.ids;
+}
+
+kern_return_t db_set_debug_state(
+ pcb_t pcb,
+ const struct i386_debug_state *state)
+{
+ int i;
+
+ for (i = 0; i <= 3; i++)
+ if (state->dr[i] < VM_MIN_USER_ADDRESS
+ || state->dr[i] >= VM_MAX_USER_ADDRESS)
+ return KERN_INVALID_ARGUMENT;
+
+ pcb->ims.ids = *state;
+
+ if (pcb == current_thread()->pcb)
+ db_load_context(pcb);
+
+ return KERN_SUCCESS;
+}
+
+#if MACH_KDB
+
+struct i386_saved_state *i386_last_saved_statep;
+struct i386_saved_state i386_nested_saved_state;
+uintptr_t i386_last_kdb_sp;
+
+extern thread_t db_default_thread;
+
+static struct i386_debug_state ids;
+
+void db_dr (
+ int num,
+ vm_offset_t linear_addr,
+ int type,
+ int len,
+ int persistence)
+{
+ int s = splhigh();
+ unsigned long dr7;
+
+ if (!kernel_dr) {
+ if (!linear_addr) {
+ splx(s);
+ return;
+ }
+ kernel_dr = TRUE;
+ /* Clear user debugging registers */
+ set_dr7(0);
+ set_dr0(0);
+ set_dr1(0);
+ set_dr2(0);
+ set_dr3(0);
+ }
+
+ ids.dr[num] = linear_addr;
+ switch (num) {
+ case 0: set_dr0(linear_addr); break;
+ case 1: set_dr1(linear_addr); break;
+ case 2: set_dr2(linear_addr); break;
+ case 3: set_dr3(linear_addr); break;
+ }
+
+ /* Replace type/len/persistence for DRnum in dr7 */
+ dr7 = get_dr7 ();
+ dr7 &= ~(0xfUL << (4*num+16)) & ~(0x3UL << (2*num));
+ dr7 |= (((len << 2) | type) << (4*num+16)) | (persistence << (2*num));
+ set_dr7 (dr7);
+
+ if (kernel_dr) {
+ if (!ids.dr[0] && !ids.dr[1] && !ids.dr[2] && !ids.dr[3]) {
+ /* Not used any more, switch back to user debugging registers */
+ set_dr7 (0);
+ kernel_dr = FALSE;
+ zero_dr = TRUE;
+ db_load_context(current_thread()->pcb);
+ }
+ }
+ splx(s);
+}
+
+boolean_t
+db_set_hw_watchpoint(
+ const db_watchpoint_t watch,
+ unsigned num)
+{
+ vm_size_t size = watch->hiaddr - watch->loaddr;
+ db_addr_t addr = watch->loaddr;
+ vm_offset_t kern_addr;
+
+ if (num >= 4)
+ return FALSE;
+ if (size != 1 && size != 2 && size != 4)
+ return FALSE;
+
+ if (addr & (size-1))
+ /* Unaligned */
+ return FALSE;
+
+ if (watch->task) {
+ if (db_user_to_kernel_address(watch->task, addr, &kern_addr, 1) < 0)
+ return FALSE;
+ addr = kern_addr;
+ }
+ addr = kvtolin(addr);
+
+ db_dr (num, addr, I386_DB_TYPE_W, size-1, I386_DB_LOCAL|I386_DB_GLOBAL);
+
+ db_printf("Hardware watchpoint %d set for %x\n", num, addr);
+ return TRUE;
+}
+
+boolean_t
+db_clear_hw_watchpoint(
+ unsigned num)
+{
+ if (num >= 4)
+ return FALSE;
+
+ db_dr (num, 0, 0, 0, 0);
+ return TRUE;
+}
+
+/*
+ * Print trap reason.
+ */
+static void
+kdbprinttrap(
+ int type,
+ int code)
+{
+ printf("kernel: %s (%d), code=%x\n",
+ trap_name(type), type, code);
+}
+
+/*
+ * kdb_trap - field a TRACE or BPT trap
+ */
+
+extern jmp_buf_t *db_recover;
+spl_t saved_ipl[NCPUS]; /* just to know what was IPL before trap */
+
+boolean_t
+kdb_trap(
+ int type,
+ int code,
+ struct i386_saved_state *regs)
+{
+ spl_t s;
+
+ s = splhigh();
+ saved_ipl[cpu_number()] = s;
+
+ switch (type) {
+ case T_DEBUG: /* single_step */
+ {
+ int addr;
+ int status = get_dr6();
+
+ if (status & 0xf) { /* hmm hdw break */
+ addr = status & 0x8 ? get_dr3() :
+ status & 0x4 ? get_dr2() :
+ status & 0x2 ? get_dr1() :
+ get_dr0();
+ regs->efl |= EFL_RF;
+ db_single_step_cmd(addr, 0, 1, "p");
+ }
+ }
+ case T_INT3: /* breakpoint */
+ case T_WATCHPOINT: /* watchpoint */
+ case -1: /* keyboard interrupt */
+ break;
+
+ default:
+ if (db_recover) {
+ i386_nested_saved_state = *regs;
+ db_printf("Caught %s (%d), code = %x, pc = %x\n",
+ trap_name(type), type, code, regs->eip);
+ db_error("");
+ /*NOTREACHED*/
+ }
+ kdbprinttrap(type, code);
+ }
+
+#if NCPUS > 1
+ if (db_enter())
+#endif /* NCPUS > 1 */
+ {
+ i386_last_saved_statep = regs;
+ i386_last_kdb_sp = (uintptr_t) &type;
+
+ /* XXX Should switch to ddb`s own stack here. */
+
+ ddb_regs = *regs;
+ if ((regs->cs & 0x3) == KERNEL_RING) {
+ /*
+ * Kernel mode - esp and ss not saved
+ */
+ ddb_regs.uesp = (uintptr_t)&regs->uesp; /* kernel stack pointer */
+ ddb_regs.ss = KERNEL_DS;
+ }
+
+ cnpollc(TRUE);
+ db_task_trap(type, code, (regs->cs & 0x3) != 0);
+ cnpollc(FALSE);
+
+ regs->eip = ddb_regs.eip;
+ regs->efl = ddb_regs.efl;
+ regs->eax = ddb_regs.eax;
+ regs->ecx = ddb_regs.ecx;
+ regs->edx = ddb_regs.edx;
+ regs->ebx = ddb_regs.ebx;
+ if ((regs->cs & 0x3) != KERNEL_RING) {
+ /*
+ * user mode - saved esp and ss valid
+ */
+ regs->uesp = ddb_regs.uesp; /* user stack pointer */
+ regs->ss = ddb_regs.ss & 0xffff; /* user stack segment */
+ }
+ regs->ebp = ddb_regs.ebp;
+ regs->esi = ddb_regs.esi;
+ regs->edi = ddb_regs.edi;
+ regs->cs = ddb_regs.cs & 0xffff;
+#if !defined(__x86_64__) || defined(USER32)
+ regs->es = ddb_regs.es & 0xffff;
+ regs->ds = ddb_regs.ds & 0xffff;
+ regs->fs = ddb_regs.fs & 0xffff;
+ regs->gs = ddb_regs.gs & 0xffff;
+#endif
+ if ((type == T_INT3) &&
+ (db_get_task_value(regs->eip, BKPT_SIZE, FALSE, TASK_NULL)
+ == BKPT_INST))
+ regs->eip += BKPT_SIZE;
+ }
+#if NCPUS > 1
+ db_leave();
+#endif /* NCPUS > 1 */
+
+ splx(s);
+ return 1;
+}
+
+/*
+ * Enter KDB through a keyboard trap.
+ * We show the registers as of the keyboard interrupt
+ * instead of those at its call to KDB.
+ */
+struct int_regs {
+#ifdef __i386__
+ long edi;
+ long esi;
+#endif
+ long ebp;
+ long ebx;
+ struct i386_interrupt_state *is;
+};
+
+void
+kdb_kentry(
+ struct int_regs *int_regs)
+{
+ struct i386_interrupt_state *is = int_regs->is;
+ spl_t s = splhigh();
+
+#if NCPUS > 1
+ if (db_enter())
+#endif /* NCPUS > 1 */
+ {
+ if ((is->cs & 0x3) != KERNEL_RING) {
+ ddb_regs.uesp = *(uintptr_t *)(is+1);
+ ddb_regs.ss = *(int *)((uintptr_t *)(is+1)+1);
+ }
+ else {
+ ddb_regs.ss = KERNEL_DS;
+ ddb_regs.uesp= (uintptr_t)(is+1);
+ }
+ ddb_regs.efl = is->efl;
+ ddb_regs.cs = is->cs;
+ ddb_regs.eip = is->eip;
+ ddb_regs.eax = is->eax;
+ ddb_regs.ecx = is->ecx;
+ ddb_regs.edx = is->edx;
+ ddb_regs.ebx = int_regs->ebx;
+ ddb_regs.ebp = int_regs->ebp;
+#ifdef __i386__
+ ddb_regs.esi = int_regs->esi;
+ ddb_regs.edi = int_regs->edi;
+#endif
+#ifdef __x86_64__
+ ddb_regs.esi = is->rsi;
+ ddb_regs.edi = is->rdi;
+#endif
+#if !defined(__x86_64__) || defined(USER32)
+ ddb_regs.ds = is->ds;
+ ddb_regs.es = is->es;
+ ddb_regs.fs = is->fs;
+ ddb_regs.gs = is->gs;
+#endif
+ cnpollc(TRUE);
+ db_task_trap(-1, 0, (ddb_regs.cs & 0x3) != 0);
+ cnpollc(FALSE);
+
+ if ((ddb_regs.cs & 0x3) != KERNEL_RING) {
+ ((int *)(is+1))[0] = ddb_regs.uesp;
+ ((int *)(is+1))[1] = ddb_regs.ss & 0xffff;
+ }
+ is->efl = ddb_regs.efl;
+ is->cs = ddb_regs.cs & 0xffff;
+ is->eip = ddb_regs.eip;
+ is->eax = ddb_regs.eax;
+ is->ecx = ddb_regs.ecx;
+ is->edx = ddb_regs.edx;
+ int_regs->ebx = ddb_regs.ebx;
+ int_regs->ebp = ddb_regs.ebp;
+#ifdef __i386__
+ int_regs->esi = ddb_regs.esi;
+ int_regs->edi = ddb_regs.edi;
+#endif
+#ifdef __x86_64__
+ is->rsi = ddb_regs.esi;
+ is->rdi = ddb_regs.edi;
+#endif
+#if !defined(__x86_64__) || defined(USER32)
+ is->ds = ddb_regs.ds & 0xffff;
+ is->es = ddb_regs.es & 0xffff;
+ is->fs = ddb_regs.fs & 0xffff;
+ is->gs = ddb_regs.gs & 0xffff;
+#endif
+ }
+#if NCPUS > 1
+ db_leave();
+#endif /* NCPUS > 1 */
+
+ (void) splx(s);
+}
+
+boolean_t db_no_vm_fault = TRUE;
+
+static int
+db_user_to_phys_address(
+ const task_t task,
+ vm_offset_t addr,
+ phys_addr_t *paddr,
+ int flag)
+{
+ pt_entry_t *ptp;
+ boolean_t faulted = FALSE;
+
+ retry:
+ ptp = pmap_pte(task->map->pmap, addr);
+ if (ptp == PT_ENTRY_NULL || (*ptp & INTEL_PTE_VALID) == 0) {
+ if (!faulted && !db_no_vm_fault) {
+ kern_return_t err;
+
+ faulted = TRUE;
+ err = vm_fault( task->map,
+ trunc_page(addr),
+ VM_PROT_READ,
+ FALSE, FALSE, 0);
+ if (err == KERN_SUCCESS)
+ goto retry;
+ }
+ if (flag) {
+ db_printf("\nno memory is assigned to address %08x\n", addr);
+ }
+ return(-1);
+ }
+
+ *paddr = pte_to_pa(*ptp) + (addr & (INTEL_PGBYTES-1));
+ return(0);
+}
+
+int
+db_user_to_kernel_address(
+ const task_t task,
+ vm_offset_t addr,
+ vm_offset_t *kaddr,
+ int flag)
+{
+ phys_addr_t paddr;
+
+ if (db_user_to_phys_address(task, addr, &paddr, flag) < 0)
+ return(-1);
+
+ if (paddr >= biosmem_directmap_end()) {
+ db_printf("\naddr %016llx is stored in highmem at physical %016llx, accessing it is not supported yet\n", (unsigned long long) addr, (unsigned long long) paddr);
+ return(-1);
+ }
+
+ *kaddr = phystokv(paddr);
+ return(0);
+}
+
+/*
+ * Read bytes from kernel address space for debugger.
+ */
+
+boolean_t
+db_read_bytes(
+ vm_offset_t addr,
+ int size,
+ char *data,
+ task_t task)
+{
+ char *src;
+ int n;
+ phys_addr_t phys_addr;
+
+ src = (char *)addr;
+ if ((addr >= VM_MIN_KERNEL_ADDRESS && addr < VM_MAX_KERNEL_ADDRESS) || task == TASK_NULL) {
+ if (task == TASK_NULL)
+ task = db_current_task();
+ while (--size >= 0) {
+ if (addr < VM_MIN_KERNEL_ADDRESS && task == TASK_NULL) {
+ db_printf("\nbad address %x\n", addr);
+ return FALSE;
+ }
+ addr++;
+ *data++ = *src++;
+ }
+ return TRUE;
+ }
+ while (size > 0) {
+ if (db_user_to_phys_address(task, addr, &phys_addr, 1) < 0)
+ return FALSE;
+ n = intel_trunc_page(addr+INTEL_PGBYTES) - addr;
+ if (n > size)
+ n = size;
+ size -= n;
+ addr += n;
+ copy_from_phys(phys_addr, (vm_offset_t) data, n);
+ data += n;
+ }
+ return TRUE;
+}
+
+/*
+ * Write bytes to kernel address space for debugger.
+ */
+void
+db_write_bytes(
+ vm_offset_t addr,
+ int size,
+ char *data,
+ task_t task)
+{
+ char *dst;
+
+ pt_entry_t *ptep0 = 0;
+ pt_entry_t oldmap0 = 0;
+ vm_offset_t addr1;
+ pt_entry_t *ptep1 = 0;
+ pt_entry_t oldmap1 = 0;
+ extern char etext;
+
+ if ((addr < VM_MIN_KERNEL_ADDRESS) ^
+ ((addr + size) <= VM_MIN_KERNEL_ADDRESS)) {
+ db_error("\ncannot write data into mixed space\n");
+ /* NOTREACHED */
+ }
+ if (addr < VM_MIN_KERNEL_ADDRESS) {
+ if (task) {
+ db_write_bytes_user_space(addr, size, data, task);
+ return;
+ } else if (db_current_task() == TASK_NULL) {
+ db_printf("\nbad address %x\n", addr);
+ db_error(0);
+ /* NOTREACHED */
+ }
+ }
+
+ if (addr >= VM_MIN_KERNEL_ADDRESS &&
+ addr <= (vm_offset_t)&etext)
+ {
+ ptep0 = pmap_pte(kernel_pmap, addr);
+ oldmap0 = *ptep0;
+ *ptep0 |= INTEL_PTE_WRITE;
+
+ addr1 = i386_trunc_page(addr + size - 1);
+ if (i386_trunc_page(addr) != addr1) {
+ /* data crosses a page boundary */
+
+ ptep1 = pmap_pte(kernel_pmap, addr1);
+ oldmap1 = *ptep1;
+ *ptep1 |= INTEL_PTE_WRITE;
+ }
+ if (CPU_HAS_FEATURE(CPU_FEATURE_PGE))
+ set_cr4(get_cr4() & ~CR4_PGE);
+ flush_tlb();
+ }
+
+ dst = (char *)addr;
+
+ while (--size >= 0)
+ *dst++ = *data++;
+
+ if (ptep0) {
+ *ptep0 = oldmap0;
+ if (ptep1) {
+ *ptep1 = oldmap1;
+ }
+ flush_tlb();
+ if (CPU_HAS_FEATURE(CPU_FEATURE_PGE))
+ set_cr4(get_cr4() | CR4_PGE);
+ }
+}
+
+void
+db_write_bytes_user_space(
+ vm_offset_t addr,
+ int size,
+ char *data,
+ task_t task)
+{
+ int n;
+ phys_addr_t phys_addr;
+
+ while (size > 0) {
+ if (db_user_to_phys_address(task, addr, &phys_addr, 1) < 0)
+ return;
+ n = intel_trunc_page(addr+INTEL_PGBYTES) - addr;
+ if (n > size)
+ n = size;
+ size -= n;
+ addr += n;
+ copy_to_phys((vm_offset_t) data, phys_addr, n);
+ }
+}
+
+boolean_t
+db_check_access(
+ vm_offset_t addr,
+ int size,
+ task_t task)
+{
+ int n;
+ phys_addr_t phys_addr;
+
+ if (addr >= VM_MIN_KERNEL_ADDRESS) {
+ if (kernel_task == TASK_NULL)
+ return TRUE;
+ task = kernel_task;
+ } else if (task == TASK_NULL) {
+ if (current_thread() == THREAD_NULL)
+ return FALSE;
+ task = current_thread()->task;
+ }
+ while (size > 0) {
+ if (db_user_to_phys_address(task, addr, &phys_addr, 0) < 0)
+ return FALSE;
+ n = intel_trunc_page(addr+INTEL_PGBYTES) - addr;
+ if (n > size)
+ n = size;
+ size -= n;
+ addr += n;
+ }
+ return TRUE;
+}
+
+boolean_t
+db_phys_eq(
+ task_t task1,
+ vm_offset_t addr1,
+ const task_t task2,
+ vm_offset_t addr2)
+{
+ phys_addr_t phys_addr1, phys_addr2;
+
+ if (addr1 >= VM_MIN_KERNEL_ADDRESS || addr2 >= VM_MIN_KERNEL_ADDRESS)
+ return FALSE;
+ if ((addr1 & (INTEL_PGBYTES-1)) != (addr2 & (INTEL_PGBYTES-1)))
+ return FALSE;
+ if (task1 == TASK_NULL) {
+ if (current_thread() == THREAD_NULL)
+ return FALSE;
+ task1 = current_thread()->task;
+ }
+ if (db_user_to_phys_address(task1, addr1, &phys_addr1, 0) < 0
+ || db_user_to_phys_address(task2, addr2, &phys_addr2, 0) < 0)
+ return FALSE;
+ return(phys_addr1 == phys_addr2);
+}
+
+#define DB_USER_STACK_ADDR (VM_MIN_KERNEL_ADDRESS)
+#define DB_NAME_SEARCH_LIMIT (DB_USER_STACK_ADDR-(INTEL_PGBYTES*3))
+
+#define GNU
+
+#ifndef GNU
+static boolean_t
+db_search_null(
+ const task_t task,
+ vm_offset_t *svaddr,
+ vm_offset_t evaddr,
+ vm_offset_t *skaddr,
+ int flag)
+{
+ unsigned vaddr;
+ unsigned *kaddr;
+
+ kaddr = (unsigned *)*skaddr;
+ for (vaddr = *svaddr; vaddr > evaddr; ) {
+ if (vaddr % INTEL_PGBYTES == 0) {
+ vaddr -= sizeof(unsigned);
+ if (db_user_to_kernel_address(task, vaddr, skaddr, 0) < 0)
+ return FALSE;
+ kaddr = (vm_offset_t *)*skaddr;
+ } else {
+ vaddr -= sizeof(unsigned);
+ kaddr--;
+ }
+ if ((*kaddr == 0) ^ (flag == 0)) {
+ *svaddr = vaddr;
+ *skaddr = (unsigned)kaddr;
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+#endif /* GNU */
+
+#ifdef GNU
+static boolean_t
+looks_like_command(
+ const task_t task,
+ char* kaddr)
+{
+ char *c;
+
+ assert(!((vm_offset_t) kaddr & (INTEL_PGBYTES-1)));
+
+ /*
+ * Must be the environment.
+ */
+ if (!memcmp(kaddr, "PATH=", 5) || !memcmp(kaddr, "TERM=", 5) || !memcmp(kaddr, "SHELL=", 6) || !memcmp(kaddr, "LOCAL_PART=", 11) || !memcmp(kaddr, "LC_ALL=", 7))
+ return FALSE;
+
+ /*
+ * This is purely heuristical but works quite nicely.
+ * We know that it should look like words separated by \0, and
+ * eventually only \0s.
+ */
+ c = kaddr;
+ while (c < kaddr + INTEL_PGBYTES) {
+ if (!*c) {
+ if (c == kaddr)
+ /* Starts by \0. */
+ return FALSE;
+ break;
+ }
+ while (c < kaddr + INTEL_PGBYTES && *c)
+ c++;
+ if (c < kaddr + INTEL_PGBYTES)
+ c++; /* Skip \0 */
+ }
+ /*
+ * Check that the remainder is just \0s.
+ */
+ while (c < kaddr + INTEL_PGBYTES)
+ if (*c++)
+ return FALSE;
+
+ return TRUE;
+}
+#endif /* GNU */
+
+void
+db_task_name(
+ const task_t task)
+{
+ char *p;
+ int n;
+ vm_offset_t vaddr, kaddr;
+ unsigned sp;
+
+ if (task->name[0]) {
+ db_printf("%s", task->name);
+ return;
+ }
+
+#ifdef GNU
+ /*
+ * GNU Hurd-specific heuristics.
+ */
+
+ /* Heuristical address first. */
+ vaddr = 0x1026000;
+ if (db_user_to_kernel_address(task, vaddr, &kaddr, 0) >= 0 &&
+ looks_like_command(task, (char*) kaddr))
+ goto ok;
+
+ /* Try to catch SP of the main thread. */
+ thread_t thread;
+
+ task_lock(task);
+ thread = (thread_t) queue_first(&task->thread_list);
+ if (!thread) {
+ task_unlock(task);
+ db_printf(DB_NULL_TASK_NAME);
+ return;
+ }
+ sp = thread->pcb->iss.uesp;
+ task_unlock(task);
+
+ vaddr = (sp & ~(INTEL_PGBYTES - 1)) + INTEL_PGBYTES;
+ while (1) {
+ if (db_user_to_kernel_address(task, vaddr, &kaddr, 0) < 0)
+ return;
+ if (looks_like_command(task, (char*) kaddr))
+ break;
+ vaddr += INTEL_PGBYTES;
+ }
+#else /* GNU */
+ vaddr = DB_USER_STACK_ADDR;
+ kaddr = 0;
+
+ /*
+ * skip nulls at the end
+ */
+ if (!db_search_null(task, &vaddr, DB_NAME_SEARCH_LIMIT, &kaddr, 0)) {
+ db_printf(DB_NULL_TASK_NAME);
+ return;
+ }
+ /*
+ * search start of args
+ */
+ if (!db_search_null(task, &vaddr, DB_NAME_SEARCH_LIMIT, &kaddr, 1)) {
+ db_printf(DB_NULL_TASK_NAME);
+ return;
+ }
+#endif /* GNU */
+
+ok:
+ n = DB_TASK_NAME_LEN-1;
+#ifdef GNU
+ p = (char *)kaddr;
+ for (; n > 0; vaddr++, p++, n--) {
+#else /* GNU */
+ p = (char *)kaddr + sizeof(unsigned);
+ for (vaddr += sizeof(int); vaddr < DB_USER_STACK_ADDR && n > 0;
+ vaddr++, p++, n--) {
+#endif /* GNU */
+ if (vaddr % INTEL_PGBYTES == 0) {
+ (void)db_user_to_kernel_address(task, vaddr, &kaddr, 0);
+ p = (char*)kaddr;
+ }
+ db_printf("%c", (*p < ' ' || *p > '~')? ' ': *p);
+ }
+ while (n-- >= 0) /* compare with >= 0 for one more space */
+ db_printf(" ");
+}
+
+#endif /* MACH_KDB */
diff --git a/i386/i386/db_interface.h b/i386/i386/db_interface.h
new file mode 100644
index 0000000..69a277a
--- /dev/null
+++ b/i386/i386/db_interface.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) 2007 Free Software Foundation, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Barry deFreese.
+ */
+
+#ifndef _I386_DB_INTERFACE_H_
+#define _I386_DB_INTERFACE_H_
+
+#include <sys/types.h>
+#include <kern/task.h>
+#include <machine/thread.h>
+#include <ddb/db_watch.h>
+#include <ddb/db_variables.h>
+
+extern boolean_t kdb_trap (
+ int type,
+ int code,
+ struct i386_saved_state *regs);
+
+struct int_regs;
+
+extern void kdb_kentry(struct int_regs *int_regs);
+
+extern boolean_t db_read_bytes (
+ vm_offset_t addr,
+ int size,
+ char *data,
+ task_t task);
+
+extern void db_write_bytes (
+ vm_offset_t addr,
+ int size,
+ char *data,
+ task_t task);
+
+extern boolean_t db_check_access (
+ vm_offset_t addr,
+ int size,
+ task_t task);
+
+extern boolean_t db_phys_eq (
+ task_t task1,
+ vm_offset_t addr1,
+ task_t task2,
+ vm_offset_t addr2);
+
+extern int db_user_to_kernel_address(
+ task_t task,
+ vm_offset_t addr,
+ vm_offset_t *kaddr,
+ int flag);
+
+extern void db_task_name (task_t task);
+
+extern void cpu_interrupt_to_db(int i);
+
+#define I386_DB_TYPE_X 0
+#define I386_DB_TYPE_W 1
+#define I386_DB_TYPE_RW 3
+
+#define I386_DB_LEN_1 0
+#define I386_DB_LEN_2 1
+#define I386_DB_LEN_4 3
+#define I386_DB_LEN_8 2 /* For >= Pentium4 and Xen CPUID >= 15 only */
+
+#define I386_DB_LOCAL 1
+#define I386_DB_GLOBAL 2
+
+#if MACH_KDB
+extern boolean_t db_set_hw_watchpoint(
+ db_watchpoint_t watch,
+ unsigned num);
+
+extern boolean_t db_clear_hw_watchpoint(
+ unsigned num);
+
+extern void db_dr (
+ int num,
+ vm_offset_t linear_addr,
+ int type,
+ int len,
+ int persistence);
+
+extern void
+db_stack_trace_cmd(
+ db_expr_t addr,
+ boolean_t have_addr,
+ db_expr_t count,
+ const char *modif);
+
+extern void
+db_halt_cpu(void);
+extern void
+db_reset_cpu(void);
+
+void
+db_i386_reg_value(
+ struct db_variable *vp,
+ db_expr_t *valuep,
+ int flag,
+ struct db_var_aux_param *ap);
+
+void feep(void);
+
+/*
+ * Put a debugging character on the screen.
+ * LOC=0 means put it in the bottom right corner, LOC=1 means put it
+ * one column to the left, etc.
+ */
+void kd_debug_put(int loc, char c);
+
+#endif
+
+extern void db_get_debug_state(
+ pcb_t pcb,
+ struct i386_debug_state *state);
+extern kern_return_t db_set_debug_state(
+ pcb_t pcb,
+ const struct i386_debug_state *state);
+
+extern void db_load_context(pcb_t pcb);
+
+extern void cnpollc(boolean_t on);
+
+void
+db_write_bytes_user_space(
+ vm_offset_t addr,
+ int size,
+ char *data,
+ task_t task);
+
+void db_debug_all_traps (boolean_t enable);
+
+#endif /* _I386_DB_INTERFACE_H_ */
diff --git a/i386/i386/db_machdep.h b/i386/i386/db_machdep.h
new file mode 100644
index 0000000..04c874b
--- /dev/null
+++ b/i386/i386/db_machdep.h
@@ -0,0 +1,105 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_DB_MACHDEP_H_
+#define _I386_DB_MACHDEP_H_
+
+/*
+ * Machine-dependent defines for new kernel debugger.
+ */
+
+#include <mach/machine/vm_types.h>
+#include <mach/machine/vm_param.h>
+#include <mach/machine/eflags.h>
+#include <i386/thread.h> /* for thread_status */
+#include <i386/trap.h>
+
+typedef vm_offset_t db_addr_t; /* address - unsigned */
+typedef long db_expr_t; /* expression - signed */
+
+typedef struct i386_saved_state db_regs_t;
+extern db_regs_t ddb_regs; /* register state */
+#define DDB_REGS (&ddb_regs)
+#define SAVE_DDB_REGS DB_SAVE(db_regs_t, ddb_regs)
+#define RESTORE_DDB_REGS DB_RESTORE(ddb_regs)
+
+#define PC_REGS(regs) ((db_addr_t)(regs)->eip)
+
+#define BKPT_INST 0xcc /* breakpoint instruction */
+#define BKPT_SIZE (1) /* size of breakpoint inst */
+#define BKPT_SET(inst) (BKPT_INST)
+
+#define FIXUP_PC_AFTER_BREAK ddb_regs.eip -= 1;
+
+#define db_clear_single_step(regs) ((regs)->efl &= ~EFL_TF)
+#define db_set_single_step(regs) ((regs)->efl |= EFL_TF)
+
+#define IS_BREAKPOINT_TRAP(type, code) ((type) == T_INT3)
+#define IS_WATCHPOINT_TRAP(type, code) ((type) == T_WATCHPOINT)
+
+#define I_CALL 0xe8
+#define I_CALLI 0xff
+#define I_RET 0xc3
+#define I_IRET 0xcf
+
+#define inst_trap_return(ins) (((ins)&0xff) == I_IRET)
+#define inst_return(ins) (((ins)&0xff) == I_RET)
+#define inst_call(ins) (((ins)&0xff) == I_CALL || \
+ (((ins)&0xff) == I_CALLI && \
+ ((ins)&0x3800) == 0x1000))
+#define inst_load(ins) 0
+#define inst_store(ins) 0
+
+/* access capability and access macros */
+
+#define DB_ACCESS_LEVEL 2 /* access any space */
+#define DB_CHECK_ACCESS(addr,size,task) \
+ db_check_access(addr,size,task)
+#define DB_PHYS_EQ(task1,addr1,task2,addr2) \
+ db_phys_eq(task1,addr1,task2,addr2)
+#define DB_VALID_KERN_ADDR(addr) \
+ ((addr) >= VM_MIN_KERNEL_ADDRESS && \
+ (addr) < VM_MAX_KERNEL_ADDRESS)
+#define DB_VALID_ADDRESS(addr,user) \
+ ((!(user) && DB_VALID_KERN_ADDR(addr)) || \
+ ((user) && (addr) < VM_MIN_KERNEL_ADDRESS))
+
+/* macros for printing OS server dependent task name */
+
+#define DB_TASK_NAME(task) db_task_name(task)
+#define DB_TASK_NAME_TITLE "COMMAND "
+#define DB_TASK_NAME_LEN 23
+#define DB_NULL_TASK_NAME "? "
+
+/* macro for checking if a thread has used floating-point */
+
+#define db_thread_fp_used(thread) ((thread)->pcb->ims.ifps != 0)
+
+/* only a.out symbol tables */
+
+#define DB_NO_COFF 1
+
+#endif /* _I386_DB_MACHDEP_H_ */
diff --git a/i386/i386/db_trace.c b/i386/i386/db_trace.c
new file mode 100644
index 0000000..0ef7251
--- /dev/null
+++ b/i386/i386/db_trace.c
@@ -0,0 +1,586 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992,1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#if MACH_KDB
+
+#include <string.h>
+
+#include <mach/boolean.h>
+#include <vm/vm_map.h>
+#include <kern/thread.h>
+#include <kern/task.h>
+
+#include <machine/db_machdep.h>
+#include <machine/machspl.h>
+#include <machine/db_interface.h>
+#include <machine/db_trace.h>
+#include <machine/cpu_number.h>
+#include <i386at/model_dep.h>
+
+#include <ddb/db_access.h>
+#include <ddb/db_command.h>
+#include <ddb/db_output.h>
+#include <ddb/db_sym.h>
+#include <ddb/db_variables.h>
+#include <ddb/db_task_thread.h>
+
+#include "trap.h"
+
+/*
+ * Machine register set.
+ */
+struct db_variable db_regs[] = {
+ { "cs", (long *)&ddb_regs.cs, db_i386_reg_value },
+#if !defined(__x86_64__) || defined(USER32)
+ { "ds", (long *)&ddb_regs.ds, db_i386_reg_value },
+ { "es", (long *)&ddb_regs.es, db_i386_reg_value },
+ { "fs", (long *)&ddb_regs.fs, db_i386_reg_value },
+ { "gs", (long *)&ddb_regs.gs, db_i386_reg_value },
+#endif
+ { "ss", (long *)&ddb_regs.ss, db_i386_reg_value },
+ { "eax",(long *)&ddb_regs.eax, db_i386_reg_value },
+ { "ecx",(long *)&ddb_regs.ecx, db_i386_reg_value },
+ { "edx",(long *)&ddb_regs.edx, db_i386_reg_value },
+ { "ebx",(long *)&ddb_regs.ebx, db_i386_reg_value },
+ { "esp",(long *)&ddb_regs.uesp,db_i386_reg_value },
+ { "ebp",(long *)&ddb_regs.ebp, db_i386_reg_value },
+ { "esi",(long *)&ddb_regs.esi, db_i386_reg_value },
+ { "edi",(long *)&ddb_regs.edi, db_i386_reg_value },
+ { "eip",(long *)&ddb_regs.eip, db_i386_reg_value },
+ { "efl",(long *)&ddb_regs.efl, db_i386_reg_value },
+#ifdef __x86_64__
+ { "r8", (long *)&ddb_regs.r8, db_i386_reg_value },
+ { "r9", (long *)&ddb_regs.r9, db_i386_reg_value },
+ { "r10",(long *)&ddb_regs.r10, db_i386_reg_value },
+ { "r11",(long *)&ddb_regs.r11, db_i386_reg_value },
+ { "r12",(long *)&ddb_regs.r12, db_i386_reg_value },
+ { "r13",(long *)&ddb_regs.r13, db_i386_reg_value },
+ { "r14",(long *)&ddb_regs.r14, db_i386_reg_value },
+ { "r15",(long *)&ddb_regs.r15, db_i386_reg_value },
+#endif
+};
+struct db_variable *db_eregs = db_regs + sizeof(db_regs)/sizeof(db_regs[0]);
+
+/*
+ * Stack trace.
+ */
+#define INKERNEL(va) (((vm_offset_t)(va)) >= VM_MIN_KERNEL_ADDRESS)
+
+struct i386_frame {
+ struct i386_frame *f_frame;
+ long f_retaddr;
+ long f_arg0;
+};
+
+#define TRAP 1
+#define INTERRUPT 2
+#define SYSCALL 3
+
+db_addr_t db_user_trap_symbol_value = 0;
+db_addr_t db_kernel_trap_symbol_value = 0;
+db_addr_t db_interrupt_symbol_value = 0;
+db_addr_t db_return_to_iret_symbol_value = 0;
+db_addr_t db_syscall_symbol_value = 0;
+boolean_t db_trace_symbols_found = FALSE;
+
+struct i386_kregs {
+ char *name;
+ long offset;
+} i386_kregs[] = {
+ { "ebx", (long)(&((struct i386_kernel_state *)0)->k_ebx) },
+ { "esp", (long)(&((struct i386_kernel_state *)0)->k_esp) },
+ { "ebp", (long)(&((struct i386_kernel_state *)0)->k_ebp) },
+#ifdef __i386__
+ { "edi", (long)(&((struct i386_kernel_state *)0)->k_edi) },
+ { "esi", (long)(&((struct i386_kernel_state *)0)->k_esi) },
+#endif
+#ifdef __x86_64__
+ { "r12", (long)(&((struct i386_kernel_state *)0)->k_r12) },
+ { "r13", (long)(&((struct i386_kernel_state *)0)->k_r13) },
+ { "r14", (long)(&((struct i386_kernel_state *)0)->k_r14) },
+ { "r15", (long)(&((struct i386_kernel_state *)0)->k_r15) },
+#endif
+ { "eip", (long)(&((struct i386_kernel_state *)0)->k_eip) },
+ { 0 },
+};
+
+static long *
+db_lookup_i386_kreg(
+ const char *name,
+ const long *kregp)
+{
+ struct i386_kregs *kp;
+
+ for (kp = i386_kregs; kp->name; kp++) {
+ if (strcmp(name, kp->name) == 0)
+ return (long *)((long)kregp + kp->offset);
+ }
+ return 0;
+}
+
+void
+db_i386_reg_value(
+ struct db_variable *vp,
+ db_expr_t *valuep,
+ int flag,
+ db_var_aux_param_t ap)
+{
+ long *dp = 0;
+ db_expr_t null_reg = 0;
+ thread_t thread = ap->thread;
+
+ if (db_option(ap->modif, 'u')) {
+ if (thread == THREAD_NULL) {
+ if ((thread = current_thread()) == THREAD_NULL)
+ db_error("no user registers\n");
+ }
+ if (thread == current_thread()) {
+ if (ddb_regs.cs & 0x3)
+ dp = vp->valuep;
+ else if (ON_INT_STACK(ddb_regs.ebp, cpu_number()))
+ db_error("cannot get/set user registers in nested interrupt\n");
+ }
+ } else {
+ if (thread == THREAD_NULL || thread == current_thread()) {
+ dp = vp->valuep;
+ } else if ((thread->state & TH_SWAPPED) == 0 &&
+ thread->kernel_stack) {
+ dp = db_lookup_i386_kreg(vp->name,
+ (long *)(STACK_IKS(thread->kernel_stack)));
+ if (dp == 0)
+ dp = &null_reg;
+ } else if ((thread->state & TH_SWAPPED) &&
+ thread->swap_func != thread_exception_return) {
+/*.....this breaks t/t $taskN.0...*/
+ /* only EIP is valid */
+ if (vp->valuep == (long *) &ddb_regs.eip) {
+ dp = (long *)(&thread->swap_func);
+ } else {
+ dp = &null_reg;
+ }
+ }
+ }
+ if (dp == 0) {
+ if (thread->pcb == 0)
+ db_error("no pcb\n");
+ dp = (long *)((long)(&thread->pcb->iss) +
+ ((long)vp->valuep - (long)&ddb_regs));
+ }
+ if (flag == DB_VAR_SET)
+ *dp = *valuep;
+ else
+ *valuep = *dp;
+}
+
+static void
+db_find_trace_symbols(void)
+{
+ db_expr_t value;
+#ifdef __ELF__
+#define P
+#else
+#define P "_"
+#endif
+ if (db_value_of_name(P"user_trap", &value))
+ db_user_trap_symbol_value = (db_addr_t) value;
+ if (db_value_of_name(P"kernel_trap", &value))
+ db_kernel_trap_symbol_value = (db_addr_t) value;
+ if (db_value_of_name(P"interrupt", &value))
+ db_interrupt_symbol_value = (db_addr_t) value;
+ if (db_value_of_name(P"return_to_iret", &value))
+ db_return_to_iret_symbol_value = (db_addr_t) value;
+ if (db_value_of_name(P"syscall", &value))
+ db_syscall_symbol_value = (db_addr_t) value;
+#undef P
+ db_trace_symbols_found = TRUE;
+}
+
+/*
+ * Figure out how many arguments were passed into the frame at "fp".
+ */
+const int db_numargs_default = 5;
+
+#ifdef __x86_64
+/* Args are in registers */
+#define db_numargs(fp, task) -1
+#else
+static int
+db_numargs(
+ struct i386_frame *fp,
+ task_t task)
+{
+ long *argp;
+ long inst;
+ long args;
+ extern char etext[];
+
+ argp = (long *)db_get_task_value((long)&fp->f_retaddr, sizeof(long), FALSE, task);
+ if (argp < (long *)VM_MIN_KERNEL_ADDRESS || argp > (long *)etext)
+ args = db_numargs_default;
+ else if (!DB_CHECK_ACCESS((long)argp, sizeof(long), task))
+ args = db_numargs_default;
+ else {
+ inst = db_get_task_value((long)argp, sizeof(long), FALSE, task);
+ if ((inst & 0xff) == 0x59) /* popl %ecx */
+ args = 1;
+ else if ((inst & 0xffff) == 0xc483) /* addl %n, %esp */
+ args = ((inst >> 16) & 0xff) / 4;
+ else
+ args = db_numargs_default;
+ }
+ return args;
+}
+#endif
+
+struct interrupt_frame {
+ struct i386_frame *if_frame; /* point to next frame */
+ long if_retaddr; /* return address to _interrupt */
+ long if_unit; /* unit number */
+ spl_t if_spl; /* saved spl */
+ long if_iretaddr; /* _return_to_{iret,iret_i} */
+ long if_edx; /* old sp(iret) or saved edx(iret_i) */
+ long if_ecx; /* saved ecx(iret_i) */
+ long if_eax; /* saved eax(iret_i) */
+ long if_eip; /* saved eip(iret_i) */
+ long if_cs; /* saved cs(iret_i) */
+ long if_efl; /* saved efl(iret_i) */
+};
+
+/*
+ * Figure out the next frame up in the call stack.
+ * For trap(), we print the address of the faulting instruction and
+ * proceed with the calling frame. We return the ip that faulted.
+ * If the trap was caused by jumping through a bogus pointer, then
+ * the next line in the backtrace will list some random function as
+ * being called. It should get the argument list correct, though.
+ * It might be possible to dig out from the next frame up the name
+ * of the function that faulted, but that could get hairy.
+ */
+static void
+db_nextframe(
+ struct i386_frame **lfp, /* in/out */
+ struct i386_frame **fp, /* in/out */
+ db_addr_t *sp, /* out */
+ db_addr_t *ip, /* out */
+ long frame_type, /* in */
+ const thread_t thread) /* in */
+{
+ struct i386_saved_state *saved_regs;
+ struct interrupt_frame *ifp;
+ task_t task = (thread != THREAD_NULL)? thread->task: TASK_NULL;
+
+ switch(frame_type) {
+ case TRAP:
+ /*
+ * We know that trap() has 1 argument and we know that
+ * it is an (struct i386_saved_state *).
+ */
+ saved_regs = (struct i386_saved_state *)
+ db_get_task_value((long)&((*fp)->f_arg0),sizeof(long),FALSE,task);
+ db_printf(">>>>> %s (%d)",
+ trap_name(saved_regs->trapno), saved_regs->trapno);
+ if (saved_regs->trapno == T_PAGE_FAULT)
+ db_printf(" for %s%s%s %lx",
+ saved_regs->err & T_PF_PROT ? "P" : "",
+ saved_regs->err & T_PF_WRITE ? "W" : "",
+ saved_regs->err & T_PF_USER ? "U" : "",
+ lintokv(saved_regs->cr2));
+ db_printf(" at ");
+ db_task_printsym(saved_regs->eip, DB_STGY_PROC, task);
+ db_printf(" <<<<<\n");
+ *fp = (struct i386_frame *)saved_regs->ebp;
+ *sp = (db_addr_t)saved_regs->uesp;
+ *ip = (db_addr_t)saved_regs->eip;
+ break;
+ case INTERRUPT:
+ if (*lfp == 0) {
+ db_printf(">>>>> interrupt <<<<<\n");
+ goto miss_frame;
+ }
+ db_printf(">>>>> interrupt at ");
+ ifp = (struct interrupt_frame *)(*lfp);
+ *fp = ifp->if_frame;
+ *sp = (db_addr_t) ifp->if_frame;
+ if (ifp->if_iretaddr == db_return_to_iret_symbol_value)
+ *ip = ((struct i386_interrupt_state *) ifp->if_edx)->eip;
+ else
+ *ip = (db_addr_t) ifp->if_eip;
+ db_task_printsym(*ip, DB_STGY_PROC, task);
+ db_printf(" <<<<<\n");
+ break;
+ case SYSCALL:
+ if (thread != THREAD_NULL && thread->pcb) {
+ *ip = (db_addr_t) thread->pcb->iss.eip;
+ *sp = (db_addr_t) thread->pcb->iss.uesp;
+ *fp = (struct i386_frame *) thread->pcb->iss.ebp;
+ break;
+ }
+ /* falling down for unknown case */
+ default:
+ miss_frame:
+ *ip = (db_addr_t)
+ db_get_task_value((long)&(*fp)->f_retaddr, sizeof(long), FALSE, task);
+ *lfp = *fp;
+ *fp = (struct i386_frame *)
+ db_get_task_value((long)&(*fp)->f_frame, sizeof(long), FALSE, task);
+ *sp = (db_addr_t) *fp;
+ break;
+ }
+}
+
+#define F_USER_TRACE 1
+#define F_TRACE_THREAD 2
+
+void
+db_stack_trace_cmd(
+ db_expr_t addr,
+ boolean_t have_addr,
+ db_expr_t count,
+ const char *modif)
+{
+ boolean_t trace_thread = FALSE;
+ struct i386_frame *frame;
+ db_addr_t callpc, sp;
+ int flags = 0;
+ thread_t th;
+
+ {
+ const char *cp = modif;
+ char c;
+
+ while ((c = *cp++) != 0) {
+ if (c == 't')
+ trace_thread = TRUE;
+ if (c == 'u')
+ flags |= F_USER_TRACE;
+ }
+ }
+
+ if (!have_addr && !trace_thread) {
+ frame = (struct i386_frame *)ddb_regs.ebp;
+ sp = (db_addr_t)ddb_regs.uesp;
+ callpc = (db_addr_t)ddb_regs.eip;
+ th = current_thread();
+ } else if (trace_thread) {
+ if (have_addr) {
+ th = (thread_t) addr;
+ if (!db_check_thread_address_valid(th))
+ return;
+ } else {
+ th = db_default_thread;
+ if (th == THREAD_NULL)
+ th = current_thread();
+ if (th == THREAD_NULL) {
+ db_printf("no active thread\n");
+ return;
+ }
+ }
+ if (th == current_thread()) {
+ frame = (struct i386_frame *)ddb_regs.ebp;
+ sp = (db_addr_t)ddb_regs.uesp;
+ callpc = (db_addr_t)ddb_regs.eip;
+ } else {
+ if (th->pcb == 0) {
+ db_printf("thread has no pcb\n");
+ return;
+ }
+ if ((th->state & TH_SWAPPED) || th->kernel_stack == 0) {
+ struct i386_saved_state *iss = &th->pcb->iss;
+
+ db_printf("Continuation ");
+ db_task_printsym((db_addr_t)th->swap_func,
+ DB_STGY_PROC,
+ th->task);
+ db_printf("\n");
+
+ frame = (struct i386_frame *) (iss->ebp);
+ sp = (db_addr_t) (iss->uesp);
+ callpc = (db_addr_t) (iss->eip);
+ } else {
+ struct i386_kernel_state *iks;
+ iks = STACK_IKS(th->kernel_stack);
+ frame = (struct i386_frame *) (iks->k_ebp);
+ sp = (db_addr_t) (iks->k_esp);
+ callpc = (db_addr_t) (iks->k_eip);
+ }
+ }
+ } else {
+ frame = (struct i386_frame *)addr;
+ sp = (db_addr_t)addr;
+ th = (db_default_thread)? db_default_thread: current_thread();
+ callpc = (db_addr_t)db_get_task_value((long)&frame->f_retaddr, sizeof(long),
+ FALSE,
+ (th == THREAD_NULL) ? TASK_NULL : th->task);
+ }
+
+ db_i386_stack_trace( th, frame, sp, callpc, count, flags );
+}
+
+
+void
+db_i386_stack_trace(
+ const thread_t th,
+ struct i386_frame *frame,
+ db_addr_t sp,
+ db_addr_t callpc,
+ db_expr_t count,
+ int flags)
+{
+ task_t task;
+ boolean_t kernel_only;
+ long *argp;
+ long user_frame = 0;
+ struct i386_frame *lastframe;
+ int frame_type;
+ char *filename;
+ int linenum;
+ extern unsigned long db_maxoff;
+
+ if (count == -1)
+ count = 65535;
+
+ kernel_only = (flags & F_USER_TRACE) == 0;
+
+ task = (th == THREAD_NULL) ? TASK_NULL : th->task;
+
+ if (!db_trace_symbols_found)
+ db_find_trace_symbols();
+
+ if (!INKERNEL(callpc) && !INKERNEL(frame)) {
+ db_printf(">>>>> user space <<<<<\n");
+ user_frame++;
+ }
+
+ lastframe = 0;
+ while (count--) {
+ int narg;
+ char * name;
+ db_expr_t offset;
+
+ if (INKERNEL(callpc) && user_frame == 0) {
+ db_addr_t call_func = 0;
+
+ db_sym_t sym_tmp;
+ db_symbol_values(0,
+ sym_tmp = db_search_task_symbol(callpc,
+ DB_STGY_XTRN,
+ (db_addr_t *)&offset,
+ TASK_NULL),
+ &name, (db_expr_t *)&call_func);
+ db_free_symbol(sym_tmp);
+ if ((db_user_trap_symbol_value && call_func == db_user_trap_symbol_value) ||
+ (db_kernel_trap_symbol_value && call_func == db_kernel_trap_symbol_value)) {
+ frame_type = TRAP;
+ narg = 1;
+ } else if (db_interrupt_symbol_value && call_func == db_interrupt_symbol_value) {
+ frame_type = INTERRUPT;
+ goto next_frame;
+ } else if (db_syscall_symbol_value && call_func == db_syscall_symbol_value) {
+ frame_type = SYSCALL;
+ goto next_frame;
+ } else {
+ frame_type = 0;
+ if (frame)
+ narg = db_numargs(frame, task);
+ else
+ narg = -1;
+ }
+ } else if (!frame || INKERNEL(callpc) ^ INKERNEL(frame)) {
+ frame_type = 0;
+ narg = -1;
+ } else {
+ frame_type = 0;
+ narg = db_numargs(frame, task);
+ }
+
+ db_find_task_sym_and_offset(callpc, &name,
+ (db_addr_t *)&offset, task);
+ if (name == 0 || offset > db_maxoff) {
+ db_printf("0x%x(", callpc);
+ offset = 0;
+ } else
+ db_printf("%s(", name);
+
+ if (!frame) {
+ db_printf(")\n");
+ }
+
+ if (sp) {
+ unsigned char inst = db_get_task_value(callpc, sizeof(char), FALSE, task);
+ if (inst == 0xc3) {
+ /* RET, unwind this directly */
+ callpc = db_get_task_value(sp, sizeof(callpc), FALSE, task);
+ sp += sizeof(callpc);
+ continue;
+ }
+ }
+
+ if (!frame) {
+ break;
+ }
+
+ argp = &frame->f_arg0;
+ while (narg > 0) {
+ db_printf("%x", db_get_task_value((long)argp,sizeof(long),FALSE,task));
+ argp++;
+ if (--narg != 0)
+ db_printf(",");
+ }
+ if (narg < 0)
+ db_printf("...");
+ db_printf(")");
+ if (offset) {
+ db_printf("+0x%x", offset);
+ }
+ if (db_line_at_pc(0, &filename, &linenum, callpc)) {
+ db_printf(" [%s", filename);
+ if (linenum > 0)
+ db_printf(":%d", linenum);
+ db_printf("]");
+ }
+ db_printf("\n");
+
+ next_frame:
+ db_nextframe(&lastframe, &frame, &sp, &callpc, frame_type, th);
+
+ if (!INKERNEL(lastframe) ||
+ (!INKERNEL(callpc) && !INKERNEL(frame)))
+ user_frame++;
+ if (user_frame == 1) {
+ db_printf(">>>>> user space <<<<<\n");
+ if (kernel_only)
+ break;
+ }
+ if (frame && frame <= lastframe) {
+ if (INKERNEL(lastframe) && !INKERNEL(frame))
+ continue;
+ db_printf("Bad frame pointer: 0x%x\n", frame);
+ break;
+ }
+ }
+}
+
+#endif /* MACH_KDB */
diff --git a/i386/i386/db_trace.h b/i386/i386/db_trace.h
new file mode 100644
index 0000000..4684f57
--- /dev/null
+++ b/i386/i386/db_trace.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _I386_DB_TRACE_H_
+#define _I386_DB_TRACE_H_
+
+struct i386_frame;
+
+void
+db_i386_stack_trace(
+ thread_t th,
+ struct i386_frame *frame,
+ db_addr_t sp,
+ db_addr_t callpc,
+ db_expr_t count,
+ int flags);
+
+#endif /* _I386_DB_TRACE_H_ */
diff --git a/i386/i386/debug.h b/i386/i386/debug.h
new file mode 100644
index 0000000..84397ba
--- /dev/null
+++ b/i386/i386/debug.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _I386_DEBUG_
+#define _I386_DEBUG_
+
+#ifndef __ASSEMBLER__
+/* Dump a saved state.
+ Probably a good idea to have this around
+ even when DEBUG isn't turned on. */
+void dump_ss(const struct i386_saved_state *st);
+#endif /* __ASSEMBLER__ */
+
+#ifdef DEBUG
+
+
+/* Maximum number of entries in a debug trace.
+ If the buffer overflows, the oldest entries are forgotten. */
+#define DEBUG_TRACE_LEN 512
+
+/* Add the caller's current position to the debug trace buffer.
+ Only the kernel stack needs to be valid;
+ the other data segment registers are not needed
+ and all registers are saved. */
+#ifndef __ASSEMBLER__
+
+#define DEBUG_TRACE _debug_trace(__FILE__,__LINE__)
+
+/* Reset the debug trace buffer so it contains no valid entries. */
+void debug_trace_reset(void);
+
+/* Dump the contents of the trace buffer to the console.
+ Also clears the trace buffer. */
+void debug_trace_dump(void);
+
+#else /* __ASSEMBLER__ */
+
+#define DEBUG_TRACE \
+ pushl $__LINE__ ;\
+ pushl $9f ;\
+ call __debug_trace ;\
+ addl $8,%esp ;\
+ .data ;\
+9: .ascii __FILE__"\0" ;\
+ .text
+
+#endif /* __ASSEMBLER__ */
+
+
+#endif /* DEBUG */
+
+/* XXX #include_next "debug.h" */
+
+#endif /* _I386_DEBUG_ */
diff --git a/i386/i386/debug_i386.c b/i386/i386/debug_i386.c
new file mode 100644
index 0000000..41d032e
--- /dev/null
+++ b/i386/i386/debug_i386.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <kern/printf.h>
+
+#include "thread.h"
+#include "trap.h"
+#include "debug.h"
+#include "spl.h"
+
+void dump_ss(const struct i386_saved_state *st)
+{
+ printf("Dump of i386_saved_state %p:\n", st);
+#if defined(__x86_64__) && ! defined(USER32)
+ printf("RAX %016lx RBX %016lx RCX %016lx RDX %016lx\n",
+ st->eax, st->ebx, st->ecx, st->edx);
+ printf("RSI %016lx RDI %016lx RBP %016lx RSP %016lx\n",
+ st->esi, st->edi, st->ebp, st->uesp);
+ printf("R8 %016lx R9 %016lx R10 %016lx R11 %016lx\n",
+ st->r8, st->r9, st->r10, st->r11);
+ printf("R12 %016lx R13 %016lx R14 %016lx R15 %016lx\n",
+ st->r12, st->r13, st->r14, st->r15);
+ printf("RIP %016lx EFLAGS %08lx\n", st->eip, st->efl);
+#else
+ printf("EAX %08lx EBX %08lx ECX %08lx EDX %08lx\n",
+ st->eax, st->ebx, st->ecx, st->edx);
+ printf("ESI %08lx EDI %08lx EBP %08lx ESP %08lx\n",
+ st->esi, st->edi, st->ebp, st->uesp);
+ printf("CS %04lx SS %04lx "
+ "DS %04lx ES %04lx "
+ "FS %04lx GS %04lx\n",
+ st->cs & 0xffff, st->ss & 0xffff,
+ st->ds & 0xffff, st->es & 0xffff,
+ st->fs & 0xffff, st->gs & 0xffff);
+ printf("v86: DS %04lx ES %04lx FS %04lx GS %04lx\n",
+ st->v86_segs.v86_ds & 0xffff, st->v86_segs.v86_es & 0xffff,
+ st->v86_segs.v86_gs & 0xffff, st->v86_segs.v86_gs & 0xffff);
+ printf("EIP %08lx EFLAGS %08lx\n", st->eip, st->efl);
+#endif
+ printf("trapno %ld: %s, error %08lx\n",
+ st->trapno, trap_name(st->trapno),
+ st->err);
+}
+
+#ifdef DEBUG
+
+struct debug_trace_entry
+{
+ char *filename;
+ int linenum;
+};
+struct debug_trace_entry debug_trace_buf[DEBUG_TRACE_LEN];
+int debug_trace_pos;
+
+void
+debug_trace_reset(void)
+{
+ int s = splhigh();
+ debug_trace_pos = 0;
+ debug_trace_buf[DEBUG_TRACE_LEN-1].filename = 0;
+ splx(s);
+}
+
+static void
+print_entry(int i, int *col)
+{
+ char *fn, *p;
+
+ /* Strip off the path from the filename. */
+ fn = debug_trace_buf[i].filename;
+ for (p = fn; *p; p++)
+ if (*p == '/')
+ fn = p+1;
+
+ printf(" %9s:%-4d", fn, debug_trace_buf[i].linenum);
+ if (++*col == 5)
+ {
+ printf("\n");
+ *col = 0;
+ }
+}
+
+void
+debug_trace_dump(void)
+{
+ int s = splhigh();
+ int i;
+ int col = 0;
+
+ printf("Debug trace dump ");
+
+ /* If the last entry is nonzero,
+ the trace probably wrapped around.
+ Print out all the entries after the current position
+ before all the entries before it,
+ so we get a total of DEBUG_TRACE_LEN entries
+ in correct time order. */
+ if (debug_trace_buf[DEBUG_TRACE_LEN-1].filename != 0)
+ {
+ printf("(full):\n");
+
+ for (i = debug_trace_pos; i < DEBUG_TRACE_LEN; i++)
+ {
+ print_entry(i, &col);
+ }
+ }
+ else
+ printf("(%d entries):\n", debug_trace_pos);
+
+ /* Print the entries before the current position. */
+ for (i = 0; i < debug_trace_pos; i++)
+ {
+ print_entry(i, &col);
+ }
+
+ if (col != 0)
+ printf("\n");
+
+ debug_trace_reset();
+
+ splx(s);
+}
+
+#include <kern/syscall_sw.h>
+
+int syscall_trace = 0;
+task_t syscall_trace_task;
+
+int
+syscall_trace_print(int syscallvec, ...)
+{
+ int syscallnum = syscallvec >> 4;
+ int i;
+ const mach_trap_t *trap = &mach_trap_table[syscallnum];
+
+ if (syscall_trace_task && syscall_trace_task != current_task())
+ goto out;
+
+ printf("0x%08x:0x%08x:%s(",
+ current_task(), current_thread(), trap->mach_trap_name);
+ for (i = 0; i < trap->mach_trap_arg_count; i++) {
+ unsigned long value = (&syscallvec)[1+i];
+ /* Use a crude heuristic to format pointers. */
+ if (value > 1024)
+ printf("0x%08x", value);
+ else
+ printf("%d", value);
+
+ if (i + 1 < trap->mach_trap_arg_count)
+ printf(", ");
+ }
+ printf(")\n");
+
+ out:
+ return syscallvec;
+}
+
+#endif /* DEBUG */
diff --git a/i386/i386/debug_trace.S b/i386/i386/debug_trace.S
new file mode 100644
index 0000000..f275e1b
--- /dev/null
+++ b/i386/i386/debug_trace.S
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#ifdef DEBUG
+
+#include <mach/machine/asm.h>
+#include <i386/xen.h>
+
+#include "debug.h"
+
+ .text
+ENTRY(_debug_trace)
+ pushf
+ cli
+ pushl %eax
+ pushl %ebx
+ .byte 0x36 /* SS: bug in gas? */
+ movl %ss:EXT(debug_trace_pos),%eax
+ movl 16(%esp),%ebx
+ movl %ebx,%ss:EXT(debug_trace_buf)(,%eax,8)
+ movl 20(%esp),%ebx
+ movl %ebx,%ss:EXT(debug_trace_buf)+4(,%eax,8)
+ incl %eax
+ andl $DEBUG_TRACE_LEN-1,%eax
+ .byte 0x36 /* SS: bug in gas? */
+ movl %eax,%ss:EXT(debug_trace_pos)
+ popl %ebx
+ popl %eax
+ popf
+ ret
+
+#endif /* DEBUG */
+
+/* XXX gas bug? need at least one symbol... */
+foo:
+
diff --git a/i386/i386/eflags.h b/i386/i386/eflags.h
new file mode 100644
index 0000000..58ad968
--- /dev/null
+++ b/i386/i386/eflags.h
@@ -0,0 +1,35 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+#ifndef _KERNEL_I386_EFLAGS_H_
+#define _KERNEL_I386_EFLAGS_H_
+
+#include <mach/machine/eflags.h>
+
+/* Eflags bit combinations used by the Mach kernel. */
+#define EFL_USER_SET (EFL_IF)
+#define EFL_USER_CLEAR (EFL_IOPL|EFL_NT|EFL_RF)
+
+#endif /* _KERNEL_I386_EFLAGS_H_ */
diff --git a/i386/i386/fpu.c b/i386/i386/fpu.c
new file mode 100644
index 0000000..4cd31dd
--- /dev/null
+++ b/i386/i386/fpu.c
@@ -0,0 +1,948 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992-1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/*
+ * Copyright (C) 1994 Linus Torvalds
+ *
+ * Pentium III FXSR, SSE support
+ * General FPU state handling cleanups
+ * Gareth Hughes <gareth@valinux.com>, May 2000
+ */
+
+/*
+ * Support for 80387 floating point or FP emulator.
+ */
+
+#include <string.h>
+
+#include <mach/exception.h>
+#include <mach/machine/thread_status.h>
+#include <mach/machine/fp_reg.h>
+
+#include <kern/debug.h>
+#include <machine/machspl.h> /* spls */
+#include <kern/printf.h>
+#include <kern/thread.h>
+#include <kern/slab.h>
+
+#include <i386/thread.h>
+#include <i386/fpu.h>
+#include <i386/pio.h>
+#include <i386/irq.h>
+#include <i386/locore.h>
+#include <i386/trap.h>
+#include "cpu_number.h"
+
+#if 0
+#include <i386/ipl.h>
+#define ASSERT_IPL(L) \
+{ \
+ if (curr_ipl[cpu_number()] != L) { \
+ printf("IPL is %d, expected %d\n", curr_ipl[cpu_number()], L); \
+ panic("fpu: wrong ipl"); \
+ } \
+}
+#else
+#define ASSERT_IPL(L)
+#endif
+
+_Static_assert(sizeof(struct i386_xfp_xstate_header) == 8*8,
+ "struct i386_xfp_xstate_header size");
+_Static_assert(sizeof(struct i386_xfp_save) == 512 + 8*8,
+ "struct i386_xfp_save size");
+
+int fp_kind = FP_387; /* 80387 present */
+enum fp_save_kind fp_save_kind = FP_FNSAVE; /* Which instruction we use to save/restore FPU state */
+uint64_t fp_xsave_support; /* Bitmap of supported XSAVE save areas */
+unsigned fp_xsave_size = sizeof(struct i386_fpsave_state);
+struct i386_fpsave_state *fp_default_state;
+struct kmem_cache ifps_cache; /* cache for FPU save area */
+static unsigned long mxcsr_feature_mask = 0xffffffff; /* Always AND user-provided mxcsr with this security mask */
+
+#if NCPUS == 1
+volatile thread_t fp_thread = THREAD_NULL;
+ /* thread whose state is in FPU */
+ /* always THREAD_NULL if emulating
+ FPU */
+volatile thread_t fp_intr_thread = THREAD_NULL;
+
+
+#define clear_fpu() \
+ { \
+ set_ts(); \
+ fp_thread = THREAD_NULL; \
+ }
+
+#else /* NCPUS > 1 */
+#define clear_fpu() \
+ { \
+ set_ts(); \
+ }
+
+#endif
+
+
+/*
+ * Look for FPU and initialize it.
+ * Called on each CPU.
+ */
+void
+init_fpu(void)
+{
+ unsigned short status, control;
+
+#ifdef MACH_RING1
+ clear_ts();
+#else /* MACH_RING1 */
+ unsigned int native = 0;
+
+ if (machine_slot[cpu_number()].cpu_type >= CPU_TYPE_I486)
+ native = CR0_NE;
+
+ /*
+ * Check for FPU by initializing it,
+ * then trying to read the correct bit patterns from
+ * the control and status registers.
+ */
+ set_cr0((get_cr0() & ~(CR0_EM|CR0_TS)) | native); /* allow use of FPU */
+#endif /* MACH_RING1 */
+
+ fninit();
+ status = fnstsw();
+ fnstcw(&control);
+
+ if ((status & 0xff) == 0 &&
+ (control & 0x103f) == 0x3f)
+ {
+ /*
+ * We have a FPU of some sort.
+ * Compare -infinity against +infinity
+ * to check whether we have a 287 or a 387.
+ */
+ volatile double fp_infinity, fp_one, fp_zero;
+ fp_one = 1.0;
+ fp_zero = 0.0;
+ fp_infinity = fp_one / fp_zero;
+ if (fp_infinity == -fp_infinity) {
+ /*
+ * We have an 80287.
+ */
+ fp_kind = FP_287;
+ fp_save_kind = FP_FNSAVE;
+ asm volatile(".byte 0xdb; .byte 0xe4"); /* fnsetpm */
+ }
+ else {
+ /*
+ * We have a 387.
+ */
+ fp_kind = FP_387;
+ fp_save_kind = FP_FNSAVE;
+
+ if (CPU_HAS_FEATURE(CPU_FEATURE_XSAVE)) {
+ unsigned eax, ebx, ecx, edx;
+ unsigned xsave_cpu_features;
+
+ eax = 0xd;
+ ecx = 0x0;
+ cpuid(eax, ebx, ecx, edx);
+ fp_xsave_support = eax + (((uint64_t) edx) << 32);
+
+#ifndef MACH_RING1
+ set_cr4(get_cr4() | CR4_OSFXSR | CR4_OSXSAVE);
+ set_xcr0(fp_xsave_support);
+#endif /* MACH_RING1 */
+
+ eax = 0xd;
+ ecx = 0x1;
+ cpuid(eax, ebx, ecx, edx);
+ xsave_cpu_features = eax;
+
+ if (xsave_cpu_features & CPU_FEATURE_XSAVES) {
+ // all states enabled by XCR0|IA32_XSS
+ fp_xsave_size = offsetof(struct i386_fpsave_state, xfp_save_state) + ebx;
+ if (fp_xsave_size < sizeof(struct i386_fpsave_state))
+ panic("CPU-provided xstate size %d "
+ "is smaller than our minimum %d!\n",
+ fp_xsave_size,
+ (int) sizeof(struct i386_fpsave_state));
+
+ fp_save_kind = FP_XSAVES;
+ } else {
+ eax = 0xd;
+ ecx = 0x0;
+ cpuid(eax, ebx, ecx, edx);
+ // all states enabled by XCR0
+ fp_xsave_size = offsetof(struct i386_fpsave_state, xfp_save_state) + ebx;
+ if(fp_xsave_size < sizeof(struct i386_fpsave_state))
+ panic("CPU-provided xstate size %d "
+ "is smaller than our minimum %d!\n",
+ fp_xsave_size,
+ (int) sizeof(struct i386_fpsave_state));
+
+ if (xsave_cpu_features & CPU_FEATURE_XSAVEOPT)
+ fp_save_kind = FP_XSAVEOPT;
+ else if (xsave_cpu_features & CPU_FEATURE_XSAVEC)
+ fp_save_kind = FP_XSAVEC;
+ else
+ fp_save_kind = FP_XSAVE;
+ }
+
+ fp_kind = FP_387X;
+ }
+
+ else if (CPU_HAS_FEATURE(CPU_FEATURE_FXSR)) {
+#ifndef MACH_RING1
+ set_cr4(get_cr4() | CR4_OSFXSR);
+#endif /* MACH_RING1 */
+ fp_kind = FP_387FX;
+ fp_save_kind = FP_FXSAVE;
+ }
+
+ if (fp_save_kind != FP_FNSAVE) {
+ /* Compute mxcsr_feature_mask. */
+ static /* because we _need_ alignment */
+ struct i386_xfp_save save;
+ unsigned long mask;
+ fxsave(&save);
+ mask = save.fp_mxcsr_mask;
+ if (!mask)
+ mask = 0x0000ffbf;
+ mxcsr_feature_mask &= mask;
+ }
+ }
+#ifdef MACH_RING1
+ set_ts();
+#else /* MACH_RING1 */
+ /*
+ * Trap wait instructions. Turn off FPU for now.
+ */
+ set_cr0(get_cr0() | CR0_TS | CR0_MP);
+#endif /* MACH_RING1 */
+ }
+ else {
+ /*
+ * NO FPU.
+ */
+ panic("No FPU!");
+ }
+}
+
+/*
+ * Initialize FP handling.
+ */
+void
+fpu_module_init(void)
+{
+ kmem_cache_init(&ifps_cache, "i386_fpsave_state",
+ fp_xsave_size,
+ alignof(struct i386_fpsave_state),
+ NULL, 0);
+
+ fp_default_state = (struct i386_fpsave_state *) kmem_cache_alloc(&ifps_cache);
+ memset(fp_default_state, 0, fp_xsave_size);
+
+ /* Get default state from CPU. */
+ clear_ts();
+ fninit();
+ switch (fp_save_kind) {
+ case FP_XSAVEC:
+ case FP_XSAVES:
+ /* XRSTORS requires compact format, a bit faster anyway */
+ fp_default_state->xfp_save_state.header.xcomp_bv = XSAVE_XCOMP_BV_COMPACT;
+ /* Fallthrough */
+ case FP_XSAVE:
+ case FP_XSAVEOPT:
+ case FP_FXSAVE:
+ fxsave(&fp_default_state->xfp_save_state);
+ break;
+ case FP_FNSAVE:
+ fnsave(&fp_default_state->fp_save_state);
+ break;
+ }
+ set_ts();
+
+ fp_default_state->fp_valid = TRUE;
+}
+
+/*
+ * Free a FPU save area.
+ * Called only when thread terminating - no locking necessary.
+ */
+void
+fp_free(struct i386_fpsave_state *fps)
+{
+ASSERT_IPL(SPL0);
+#if NCPUS == 1
+ if ((fp_thread != THREAD_NULL) && (fp_thread->pcb->ims.ifps == fps)) {
+ /*
+ * Make sure we don't get FPU interrupts later for
+ * this thread
+ */
+ clear_ts();
+ fwait();
+
+ /* Mark it free and disable access */
+ clear_fpu();
+ }
+#endif /* NCPUS == 1 */
+ kmem_cache_free(&ifps_cache, (vm_offset_t) fps);
+}
+
+/* The two following functions were stolen from Linux's i387.c */
+static inline unsigned short
+twd_i387_to_fxsr (unsigned short twd)
+{
+ unsigned int tmp; /* to avoid 16 bit prefixes in the code */
+
+ /* Transform each pair of bits into 01 (valid) or 00 (empty) */
+ tmp = ~twd;
+ tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
+ /* and move the valid bits to the lower byte. */
+ tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
+ tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
+ tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
+ return tmp;
+}
+
+static inline unsigned long
+twd_fxsr_to_i387 (struct i386_xfp_save *fxsave)
+{
+ struct {
+ unsigned short significand[4];
+ unsigned short exponent;
+ unsigned short padding[3];
+ } *st = NULL;
+ unsigned long tos = (fxsave->fp_status >> 11) & 7;
+ unsigned long twd = (unsigned long) fxsave->fp_tag;
+ unsigned long tag;
+ unsigned long ret = 0xffff0000u;
+ int i;
+
+#define FPREG_ADDR(f, n) ((void *)&(f)->fp_reg_word + (n) * 16);
+
+ for (i = 0 ; i < 8 ; i++) {
+ if (twd & 0x1) {
+ st = FPREG_ADDR (fxsave, (i - tos) & 7);
+
+ switch (st->exponent & 0x7fff) {
+ case 0x7fff:
+ tag = 2; /* Special */
+ break;
+ case 0x0000:
+ if (!st->significand[0] &&
+ !st->significand[1] &&
+ !st->significand[2] &&
+ !st->significand[3] ) {
+ tag = 1; /* Zero */
+ } else {
+ tag = 2; /* Special */
+ }
+ break;
+ default:
+ if (st->significand[3] & 0x8000) {
+ tag = 0; /* Valid */
+ } else {
+ tag = 2; /* Special */
+ }
+ break;
+ }
+ } else {
+ tag = 3; /* Empty */
+ }
+ ret |= (tag << (2 * i));
+ twd = twd >> 1;
+ }
+ return ret;
+}
+
+/*
+ * Set the floating-point state for a thread.
+ * If the thread is not the current thread, it is
+ * not running (held). Locking needed against
+ * concurrent fpu_set_state or fpu_get_state.
+ */
+kern_return_t
+fpu_set_state(const thread_t thread,
+ struct i386_float_state *state)
+{
+ pcb_t pcb = thread->pcb;
+ struct i386_fpsave_state *ifps;
+ struct i386_fpsave_state *new_ifps;
+
+ASSERT_IPL(SPL0);
+ if (fp_kind == FP_NO)
+ return KERN_FAILURE;
+
+#if NCPUS == 1
+
+ /*
+ * If this thread`s state is in the FPU,
+ * discard it; we are replacing the entire
+ * FPU state.
+ */
+ if (fp_thread == thread) {
+ clear_ts();
+ fwait(); /* wait for possible interrupt */
+ clear_fpu(); /* no state in FPU */
+ }
+#endif
+
+ if (state->initialized == 0) {
+ /*
+ * new FPU state is 'invalid'.
+ * Deallocate the fp state if it exists.
+ */
+ simple_lock(&pcb->lock);
+ ifps = pcb->ims.ifps;
+ pcb->ims.ifps = 0;
+ simple_unlock(&pcb->lock);
+
+ if (ifps != 0) {
+ kmem_cache_free(&ifps_cache, (vm_offset_t) ifps);
+ }
+ }
+ else {
+ /*
+ * Valid state. Allocate the fp state if there is none.
+ */
+ struct i386_fp_save *user_fp_state;
+ struct i386_fp_regs *user_fp_regs;
+
+ user_fp_state = (struct i386_fp_save *) &state->hw_state[0];
+ user_fp_regs = (struct i386_fp_regs *)
+ &state->hw_state[sizeof(struct i386_fp_save)];
+
+ new_ifps = 0;
+ Retry:
+ simple_lock(&pcb->lock);
+ ifps = pcb->ims.ifps;
+ if (ifps == 0) {
+ if (new_ifps == 0) {
+ simple_unlock(&pcb->lock);
+ new_ifps = (struct i386_fpsave_state *) kmem_cache_alloc(&ifps_cache);
+ goto Retry;
+ }
+ ifps = new_ifps;
+ new_ifps = 0;
+ pcb->ims.ifps = ifps;
+ }
+
+ /*
+ * Ensure that reserved parts of the environment are 0.
+ */
+ memset(ifps, 0, fp_xsave_size);
+
+ if (fp_save_kind != FP_FNSAVE) {
+ int i;
+
+ ifps->xfp_save_state.fp_control = user_fp_state->fp_control;
+ ifps->xfp_save_state.fp_status = user_fp_state->fp_status;
+ ifps->xfp_save_state.fp_tag = twd_i387_to_fxsr(user_fp_state->fp_tag);
+ ifps->xfp_save_state.fp_eip = user_fp_state->fp_eip;
+ ifps->xfp_save_state.fp_cs = user_fp_state->fp_cs;
+ ifps->xfp_save_state.fp_opcode = user_fp_state->fp_opcode;
+ ifps->xfp_save_state.fp_dp = user_fp_state->fp_dp;
+ ifps->xfp_save_state.fp_ds = user_fp_state->fp_ds;
+ for (i=0; i<8; i++)
+ memcpy(&ifps->xfp_save_state.fp_reg_word[i], &user_fp_regs->fp_reg_word[i], sizeof(user_fp_regs->fp_reg_word[i]));
+ } else {
+ ifps->fp_save_state.fp_control = user_fp_state->fp_control;
+ ifps->fp_save_state.fp_status = user_fp_state->fp_status;
+ ifps->fp_save_state.fp_tag = user_fp_state->fp_tag;
+ ifps->fp_save_state.fp_eip = user_fp_state->fp_eip;
+ ifps->fp_save_state.fp_cs = user_fp_state->fp_cs;
+ ifps->fp_save_state.fp_opcode = user_fp_state->fp_opcode;
+ ifps->fp_save_state.fp_dp = user_fp_state->fp_dp;
+ ifps->fp_save_state.fp_ds = user_fp_state->fp_ds;
+ ifps->fp_regs = *user_fp_regs;
+ }
+
+ simple_unlock(&pcb->lock);
+ if (new_ifps != 0)
+ kmem_cache_free(&ifps_cache, (vm_offset_t) new_ifps);
+ }
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * Get the floating-point state for a thread.
+ * If the thread is not the current thread, it is
+ * not running (held). Locking needed against
+ * concurrent fpu_set_state or fpu_get_state.
+ */
+kern_return_t
+fpu_get_state(const thread_t thread,
+ struct i386_float_state *state)
+{
+ pcb_t pcb = thread->pcb;
+ struct i386_fpsave_state *ifps;
+
+ASSERT_IPL(SPL0);
+ if (fp_kind == FP_NO)
+ return KERN_FAILURE;
+
+ simple_lock(&pcb->lock);
+ ifps = pcb->ims.ifps;
+ if (ifps == 0) {
+ /*
+ * No valid floating-point state.
+ */
+ simple_unlock(&pcb->lock);
+ memset(state, 0, sizeof(struct i386_float_state));
+ return KERN_SUCCESS;
+ }
+
+ /* Make sure we`ve got the latest fp state info */
+ /* If the live fpu state belongs to our target */
+#if NCPUS == 1
+ if (thread == fp_thread)
+#else
+ if (thread == current_thread())
+#endif
+ {
+ clear_ts();
+ fp_save(thread);
+ clear_fpu();
+ }
+
+ state->fpkind = fp_kind;
+ state->exc_status = 0;
+
+ {
+ struct i386_fp_save *user_fp_state;
+ struct i386_fp_regs *user_fp_regs;
+
+ state->initialized = ifps->fp_valid;
+
+ user_fp_state = (struct i386_fp_save *) &state->hw_state[0];
+ user_fp_regs = (struct i386_fp_regs *)
+ &state->hw_state[sizeof(struct i386_fp_save)];
+
+ /*
+ * Ensure that reserved parts of the environment are 0.
+ */
+ memset(user_fp_state, 0, sizeof(struct i386_fp_save));
+
+ if (fp_save_kind != FP_FNSAVE) {
+ int i;
+
+ user_fp_state->fp_control = ifps->xfp_save_state.fp_control;
+ user_fp_state->fp_status = ifps->xfp_save_state.fp_status;
+ user_fp_state->fp_tag = twd_fxsr_to_i387(&ifps->xfp_save_state);
+ user_fp_state->fp_eip = ifps->xfp_save_state.fp_eip;
+ user_fp_state->fp_cs = ifps->xfp_save_state.fp_cs;
+ user_fp_state->fp_opcode = ifps->xfp_save_state.fp_opcode;
+ user_fp_state->fp_dp = ifps->xfp_save_state.fp_dp;
+ user_fp_state->fp_ds = ifps->xfp_save_state.fp_ds;
+ for (i=0; i<8; i++)
+ memcpy(&user_fp_regs->fp_reg_word[i], &ifps->xfp_save_state.fp_reg_word[i], sizeof(user_fp_regs->fp_reg_word[i]));
+ } else {
+ user_fp_state->fp_control = ifps->fp_save_state.fp_control;
+ user_fp_state->fp_status = ifps->fp_save_state.fp_status;
+ user_fp_state->fp_tag = ifps->fp_save_state.fp_tag;
+ user_fp_state->fp_eip = ifps->fp_save_state.fp_eip;
+ user_fp_state->fp_cs = ifps->fp_save_state.fp_cs;
+ user_fp_state->fp_opcode = ifps->fp_save_state.fp_opcode;
+ user_fp_state->fp_dp = ifps->fp_save_state.fp_dp;
+ user_fp_state->fp_ds = ifps->fp_save_state.fp_ds;
+ *user_fp_regs = ifps->fp_regs;
+ }
+ }
+ simple_unlock(&pcb->lock);
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * Initialize FPU for an already-running thread.
+ */
+static void fpinit(thread_t thread)
+{
+ unsigned short control;
+
+ASSERT_IPL(SPL0);
+ clear_ts();
+ fpu_rstor(fp_default_state);
+
+ control = thread->pcb->init_control;
+ if (control)
+ fldcw(control);
+}
+
+/*
+ * Inherit FPU state from a parent to a child, if any
+ */
+void fpinherit(thread_t parent_thread, thread_t thread)
+{
+ pcb_t pcb = parent_thread->pcb;
+ struct i386_fpsave_state *ifps;
+
+ ifps = pcb->ims.ifps;
+ if (ifps) {
+ /* Parent does have a state, inherit it */
+ if (ifps->fp_valid == TRUE)
+ thread->pcb->init_control = ifps->fp_save_state.fp_control;
+ else
+ /* State is in the FPU, fetch from there */
+ fnstcw(&thread->pcb->init_control);
+ }
+}
+
+/*
+ * Coprocessor not present.
+ */
+void
+fpnoextflt(void)
+{
+ /*
+ * Enable FPU use.
+ */
+ASSERT_IPL(SPL0);
+ clear_ts();
+#if NCPUS == 1
+
+ /*
+ * If this thread`s state is in the FPU, we are done.
+ */
+ if (fp_thread == current_thread())
+ return;
+
+ /* Make sure we don't do fpsave() in fp_intr while doing fpsave()
+ * here if the current fpu instruction generates an error.
+ */
+ fwait();
+ /*
+ * If another thread`s state is in the FPU, save it.
+ */
+ if (fp_thread != THREAD_NULL) {
+ fp_save(fp_thread);
+ }
+
+ /*
+ * Give this thread the FPU.
+ */
+ fp_thread = current_thread();
+
+#endif /* NCPUS == 1 */
+
+ /*
+ * Load this thread`s state into the FPU.
+ */
+ fp_load(current_thread());
+}
+
+/*
+ * FPU overran end of segment.
+ * Re-initialize FPU. Floating point state is not valid.
+ */
+void
+fpextovrflt(void)
+{
+ thread_t thread = current_thread();
+ pcb_t pcb;
+ struct i386_fpsave_state *ifps;
+
+#if NCPUS == 1
+
+ /*
+ * Is exception for the currently running thread?
+ */
+ if (fp_thread != thread) {
+ /* Uh oh... */
+ panic("fpextovrflt");
+ }
+#endif
+
+ /*
+ * This is a non-recoverable error.
+ * Invalidate the thread`s FPU state.
+ */
+ pcb = thread->pcb;
+ simple_lock(&pcb->lock);
+ ifps = pcb->ims.ifps;
+ pcb->ims.ifps = 0;
+ simple_unlock(&pcb->lock);
+
+ /*
+ * Re-initialize the FPU.
+ */
+ clear_ts();
+ fninit();
+
+ /*
+ * And disable access.
+ */
+ clear_fpu();
+
+ if (ifps)
+ kmem_cache_free(&ifps_cache, (vm_offset_t) ifps);
+
+ /*
+ * Raise exception.
+ */
+ i386_exception(EXC_BAD_ACCESS, VM_PROT_READ|VM_PROT_EXECUTE, 0);
+ /*NOTREACHED*/
+}
+
+static int
+fphandleerr(void)
+{
+ thread_t thread = current_thread();
+
+ /*
+ * Save the FPU context to the thread using it.
+ */
+#if NCPUS == 1
+ if (fp_thread == THREAD_NULL) {
+ printf("fphandleerr: FPU not belonging to anyone!\n");
+ clear_ts();
+ fninit();
+ clear_fpu();
+ return 1;
+ }
+
+ if (fp_thread != thread) {
+ /*
+ * FPU exception is for a different thread.
+ * When that thread again uses the FPU an exception will be
+ * raised in fp_load. Remember the condition in fp_valid (== 2).
+ */
+ clear_ts();
+ fp_save(fp_thread);
+ fp_thread->pcb->ims.ifps->fp_valid = 2;
+ fninit();
+ clear_fpu();
+ /* leave fp_intr_thread THREAD_NULL */
+ return 1;
+ }
+#endif /* NCPUS == 1 */
+
+ /*
+ * Save the FPU state and turn off the FPU.
+ */
+ clear_ts();
+ fp_save(thread);
+ fninit();
+ clear_fpu();
+
+ return 0;
+}
+
+/*
+ * FPU error. Called by exception handler.
+ */
+void
+fpexterrflt(void)
+{
+ thread_t thread = current_thread();
+
+ if (fphandleerr())
+ return;
+
+ /*
+ * Raise FPU exception.
+ * Locking not needed on pcb->ims.ifps,
+ * since thread is running.
+ */
+ i386_exception(EXC_ARITHMETIC,
+ EXC_I386_EXTERR,
+ fp_save_kind != FP_FNSAVE ?
+ thread->pcb->ims.ifps->xfp_save_state.fp_status :
+ thread->pcb->ims.ifps->fp_save_state.fp_status);
+ /*NOTREACHED*/
+}
+
+#ifndef MACH_RING1
+/*
+ * FPU error. Called by AST.
+ */
+void
+fpastintr(void)
+{
+ thread_t thread = current_thread();
+
+ASSERT_IPL(SPL0);
+#if NCPUS == 1
+ /*
+ * Since FPU errors only occur on ESC or WAIT instructions,
+ * the current thread should own the FPU. If it didn`t,
+ * we should have gotten the task-switched interrupt first.
+ */
+ if (fp_thread != THREAD_NULL) {
+ panic("fpexterrflt");
+ return;
+ }
+
+ /*
+ * Check if we got a context switch between the interrupt and the AST
+ * This can happen if the interrupt arrived after the FPU AST was
+ * checked. In this case, raise the exception in fp_load when this
+ * thread next time uses the FPU. Remember exception condition in
+ * fp_valid (extended boolean 2).
+ */
+ if (fp_intr_thread != thread) {
+ if (fp_intr_thread == THREAD_NULL) {
+ panic("fpexterrflt: fp_intr_thread == THREAD_NULL");
+ return;
+ }
+ fp_intr_thread->pcb->ims.ifps->fp_valid = 2;
+ fp_intr_thread = THREAD_NULL;
+ return;
+ }
+ fp_intr_thread = THREAD_NULL;
+#else /* NCPUS == 1 */
+ /*
+ * Save the FPU state and turn off the FPU.
+ */
+ fp_save(thread);
+#endif /* NCPUS == 1 */
+
+ /*
+ * Raise FPU exception.
+ * Locking not needed on pcb->ims.ifps,
+ * since thread is running.
+ */
+ i386_exception(EXC_ARITHMETIC,
+ EXC_I386_EXTERR,
+ fp_save_kind != FP_FNSAVE ?
+ thread->pcb->ims.ifps->xfp_save_state.fp_status :
+ thread->pcb->ims.ifps->fp_save_state.fp_status);
+ /*NOTREACHED*/
+}
+#endif /* MACH_RING1 */
+
+/*
+ * Save FPU state.
+ *
+ * Locking not needed:
+ * . if called from fpu_get_state, pcb already locked.
+ * . if called from fpnoextflt or fp_intr, we are single-cpu
+ * . otherwise, thread is running.
+ */
+void
+fp_save(thread_t thread)
+{
+ pcb_t pcb = thread->pcb;
+ struct i386_fpsave_state *ifps = pcb->ims.ifps;
+
+ if (ifps != 0 && !ifps->fp_valid)
+ /* registers are in FPU */
+ fpu_save(ifps);
+}
+
+/*
+ * Restore FPU state from PCB.
+ *
+ * Locking not needed; always called on the current thread.
+ */
+void
+fp_load(thread_t thread)
+{
+ pcb_t pcb = thread->pcb;
+ struct i386_fpsave_state *ifps;
+
+ASSERT_IPL(SPL0);
+ ifps = pcb->ims.ifps;
+ if (ifps == 0) {
+ ifps = (struct i386_fpsave_state *) kmem_cache_alloc(&ifps_cache);
+ memcpy(ifps, fp_default_state, fp_xsave_size);
+ pcb->ims.ifps = ifps;
+ fpinit(thread);
+#if 1
+/*
+ * I'm not sure this is needed. Does the fpu regenerate the interrupt in
+ * frstor or not? Without this code we may miss some exceptions, with it
+ * we might send too many exceptions.
+ */
+ } else if (ifps->fp_valid == 2) {
+ /* delayed exception pending */
+
+ ifps->fp_valid = TRUE;
+ clear_fpu();
+ /*
+ * Raise FPU exception.
+ * Locking not needed on pcb->ims.ifps,
+ * since thread is running.
+ */
+ i386_exception(EXC_ARITHMETIC,
+ EXC_I386_EXTERR,
+ fp_save_kind != FP_FNSAVE ?
+ thread->pcb->ims.ifps->xfp_save_state.fp_status :
+ thread->pcb->ims.ifps->fp_save_state.fp_status);
+ /*NOTREACHED*/
+#endif
+ } else if (! ifps->fp_valid) {
+ printf("fp_load: invalid FPU state!\n");
+ fninit ();
+ } else {
+ fpu_rstor(ifps);
+ }
+ ifps->fp_valid = FALSE; /* in FPU */
+}
+
+#if (defined(AT386) || defined(ATX86_64)) && !defined(MACH_XEN)
+/*
+ * Handle a coprocessor error interrupt on the AT386.
+ * This comes in on line 5 of the slave PIC at SPL1.
+ */
+void
+fpintr(int unit)
+{
+ spl_t s;
+#if NCPUS == 1
+ thread_t thread = current_thread();
+#endif /* NCPUS == 1 */
+
+ASSERT_IPL(SPL1);
+ /*
+ * Turn off the extended 'busy' line.
+ */
+ outb(0xf0, 0);
+
+ if (fphandleerr())
+ return;
+
+#if NCPUS == 1
+ if (fp_intr_thread != THREAD_NULL && fp_intr_thread != thread)
+ panic("fp_intr: already caught intr");
+ fp_intr_thread = thread;
+#endif /* NCPUS == 1 */
+
+ /*
+ * Since we are running on the interrupt stack, we must
+ * signal the thread to take the exception when we return
+ * to user mode. Use an AST to do this.
+ *
+ * Don`t set the thread`s AST field. If the thread is
+ * descheduled before it takes the AST, it will notice
+ * the FPU error when it reloads its FPU state.
+ */
+ s = splsched();
+ ast_on(cpu_number(), AST_I386_FP);
+ splx(s);
+}
+#endif /* AT386 */
diff --git a/i386/i386/fpu.h b/i386/i386/fpu.h
new file mode 100644
index 0000000..51e0f31
--- /dev/null
+++ b/i386/i386/fpu.h
@@ -0,0 +1,250 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_FPU_H_
+#define _I386_FPU_H_
+
+/*
+ * Macro definitions for routines to manipulate the
+ * floating-point processor.
+ */
+
+#include <sys/types.h>
+#include <i386/proc_reg.h>
+#include <kern/thread.h>
+
+/*
+ * FPU instructions.
+ */
+#define fninit() \
+ asm volatile("fninit")
+
+#define fnstcw(control) \
+ asm("fnstcw %0" : "=m" (*(unsigned short *)(control)))
+
+#define fstcw(control) \
+ asm volatile("fstcw %0" : "=m" (*(unsigned short *)(control)))
+
+#define fldcw(control) \
+ asm volatile("fldcw %0" : : "m" (*(unsigned short *) &(control)) )
+
+#define fnstsw() \
+ ({ \
+ unsigned short _status__; \
+ asm("fnstsw %0" : "=ma" (_status__)); \
+ _status__; \
+ })
+
+#define fnclex() \
+ asm volatile("fnclex")
+
+#define fnsave(state) \
+ asm volatile("fnsave %0" : "=m" (*state))
+
+#define frstor(state) \
+ asm volatile("frstor %0" : : "m" (state))
+
+#define fxsave(state) \
+ asm volatile("fxsave %0" : "=m" (*state))
+
+#define fxrstor(state) \
+ asm volatile("fxrstor %0" : : "m" (state))
+
+static inline uint64_t xgetbv(uint32_t n) {
+ uint32_t eax, edx;
+ asm volatile("xgetbv" : "=a" (eax), "=d" (edx) : "c" (n));
+ return eax + ((uint64_t) edx << 32);
+}
+
+static inline uint64_t get_xcr0(void) {
+ return xgetbv(0);
+}
+
+static inline void xsetbv(uint32_t n, uint64_t value) {
+ uint32_t eax, edx;
+
+ eax = value;
+ edx = value >> 32;
+
+ asm volatile("xsetbv" : : "c" (n), "a" (eax), "d" (edx));
+}
+
+static inline void set_xcr0(uint64_t value) {
+ xsetbv(0, value);
+}
+
+#define CPU_XCR0_X87 (1 << 0)
+#define CPU_XCR0_SSE (1 << 1)
+#define CPU_XCR0_AVX (1 << 2)
+#define CPU_XCR0_MPX (3 << 3)
+#define CPU_XCR0_AVX512 (7 << 5)
+
+#define CPU_FEATURE_XSAVEOPT (1 << 0)
+#define CPU_FEATURE_XSAVEC (1 << 1)
+#define CPU_FEATURE_XGETBV1 (1 << 2)
+#define CPU_FEATURE_XSAVES (1 << 3)
+
+#define xsave(state) \
+ asm volatile("xsave %0" \
+ : "=m" (*state) \
+ : "a" ((unsigned) fp_xsave_support) \
+ , "d" ((unsigned) (fp_xsave_support >> 32)))
+
+#define xsaveopt(state) \
+ asm volatile("xsaveopt %0" \
+ : "=m" (*state) \
+ : "a" ((unsigned) fp_xsave_support) \
+ , "d" ((unsigned) (fp_xsave_support >> 32)))
+
+#define xsavec(state) \
+ asm volatile("xsavec %0" \
+ : "=m" (*state) \
+ : "a" ((unsigned) fp_xsave_support) \
+ , "d" ((unsigned) (fp_xsave_support >> 32)))
+
+#define xsaves(state) \
+ asm volatile("xsaves %0" \
+ : "=m" (*state) \
+ : "a" ((unsigned) fp_xsave_support) \
+ , "d" ((unsigned) (fp_xsave_support >> 32)))
+
+#define xrstor(state) \
+ asm volatile("xrstor %0" : : "m" (state) \
+ , "a" ((unsigned) fp_xsave_support) \
+ , "d" ((unsigned) (fp_xsave_support >> 32)))
+
+#define xrstors(state) \
+ asm volatile("xrstors %0" : : "m" (state) \
+ , "a" ((unsigned) fp_xsave_support) \
+ , "d" ((unsigned) (fp_xsave_support >> 32)))
+
+#define fwait() \
+ asm("fwait");
+
+#define fpu_load_context(pcb)
+
+#define fpu_save(ifps) \
+ do { \
+ switch (fp_save_kind) { \
+ case FP_XSAVE: \
+ xsave(&(ifps)->xfp_save_state); \
+ break; \
+ case FP_XSAVEOPT: \
+ xsaveopt(&(ifps)->xfp_save_state); \
+ break; \
+ case FP_XSAVEC: \
+ xsavec(&(ifps)->xfp_save_state); \
+ break; \
+ case FP_XSAVES: \
+ xsaves(&(ifps)->xfp_save_state); \
+ break; \
+ case FP_FXSAVE: \
+ fxsave(&(ifps)->xfp_save_state); \
+ break; \
+ case FP_FNSAVE: \
+ fnsave(&(ifps)->fp_save_state); \
+ break; \
+ } \
+ (ifps)->fp_valid = TRUE; \
+ } while (0)
+
+#define fpu_rstor(ifps) \
+ do { \
+ switch (fp_save_kind) { \
+ case FP_XSAVE: \
+ case FP_XSAVEOPT: \
+ case FP_XSAVEC: \
+ xrstor((ifps)->xfp_save_state); \
+ break; \
+ case FP_XSAVES: \
+ xrstors((ifps)->xfp_save_state); \
+ break; \
+ case FP_FXSAVE: \
+ fxrstor((ifps)->xfp_save_state); \
+ break; \
+ case FP_FNSAVE: \
+ frstor((ifps)->fp_save_state); \
+ break; \
+ } \
+ } while (0)
+
+/*
+ * Save thread`s FPU context.
+ * If only one CPU, we just set the task-switched bit,
+ * to keep the new thread from using the coprocessor.
+ * If multiple CPUs, we save the entire state.
+ */
+#if NCPUS > 1
+#define fpu_save_context(thread) \
+ { \
+ struct i386_fpsave_state *ifps; \
+ ifps = (thread)->pcb->ims.ifps; \
+ if (ifps != 0 && !ifps->fp_valid) { \
+ /* registers are in FPU - save to memory */ \
+ fpu_save(ifps); \
+ set_ts(); \
+ } \
+ }
+
+#else /* NCPUS == 1 */
+#define fpu_save_context(thread) \
+ { \
+ set_ts(); \
+ }
+
+#endif /* NCPUS == 1 */
+
+enum fp_save_kind {
+ FP_FNSAVE,
+ FP_FXSAVE,
+ FP_XSAVE,
+ FP_XSAVEOPT,
+ FP_XSAVEC,
+ FP_XSAVES,
+};
+extern int fp_kind;
+extern enum fp_save_kind fp_save_kind;
+extern struct i386_fpsave_state *fp_default_state;
+extern uint64_t fp_xsave_support;
+extern void fp_save(thread_t thread);
+extern void fp_load(thread_t thread);
+extern void fp_free(struct i386_fpsave_state *fps);
+extern void fpu_module_init(void);
+extern kern_return_t fpu_set_state(
+ thread_t thread,
+ struct i386_float_state *state);
+extern kern_return_t fpu_get_state(
+ thread_t thread,
+ struct i386_float_state *state);
+extern void fpnoextflt(void);
+extern void fpextovrflt(void);
+extern void fpexterrflt(void);
+extern void fpastintr(void);
+extern void init_fpu(void);
+extern void fpintr(int unit);
+extern void fpinherit(thread_t parent_thread, thread_t thread);
+
+#endif /* _I386_FPU_H_ */
diff --git a/i386/i386/gdt.c b/i386/i386/gdt.c
new file mode 100644
index 0000000..4edd3ec
--- /dev/null
+++ b/i386/i386/gdt.c
@@ -0,0 +1,166 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * Copyright (c) 1991 IBM Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation,
+ * and that the name IBM not be used in advertising or publicity
+ * pertaining to distribution of the software without specific, written
+ * prior permission.
+ *
+ * CARNEGIE MELLON AND IBM ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND IBM DISCLAIM ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Global descriptor table.
+ */
+#include <mach/machine/vm_types.h>
+#include <mach/xen.h>
+
+#include <kern/assert.h>
+#include <intel/pmap.h>
+#include <kern/cpu_number.h>
+#include <machine/percpu.h>
+
+#include "vm_param.h"
+#include "seg.h"
+#include "gdt.h"
+#include "mp_desc.h"
+
+#ifdef MACH_PV_DESCRIPTORS
+/* It is actually defined in xen_boothdr.S */
+extern
+#endif /* MACH_PV_DESCRIPTORS */
+struct real_descriptor gdt[GDTSZ];
+
+static void
+gdt_fill(int cpu, struct real_descriptor *mygdt)
+{
+ /* Initialize the kernel code and data segment descriptors. */
+#ifdef __x86_64__
+ assert(LINEAR_MIN_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS == 0);
+ _fill_gdt_descriptor(mygdt, KERNEL_CS, 0, 0, ACC_PL_K|ACC_CODE_R, SZ_64);
+ _fill_gdt_descriptor(mygdt, KERNEL_DS, 0, 0, ACC_PL_K|ACC_DATA_W, SZ_64);
+#ifndef MACH_PV_DESCRIPTORS
+ _fill_gdt_descriptor(mygdt, LINEAR_DS, 0, 0, ACC_PL_K|ACC_DATA_W, SZ_64);
+#endif /* MACH_PV_DESCRIPTORS */
+#else
+ _fill_gdt_descriptor(mygdt, KERNEL_CS,
+ LINEAR_MIN_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
+ LINEAR_MAX_KERNEL_ADDRESS - (LINEAR_MIN_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) - 1,
+ ACC_PL_K|ACC_CODE_R, SZ_32);
+ _fill_gdt_descriptor(mygdt, KERNEL_DS,
+ LINEAR_MIN_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
+ LINEAR_MAX_KERNEL_ADDRESS - (LINEAR_MIN_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) - 1,
+ ACC_PL_K|ACC_DATA_W, SZ_32);
+#ifndef MACH_PV_DESCRIPTORS
+ _fill_gdt_descriptor(mygdt, LINEAR_DS,
+ 0,
+ 0xffffffff,
+ ACC_PL_K|ACC_DATA_W, SZ_32);
+#endif /* MACH_PV_DESCRIPTORS */
+ vm_offset_t thiscpu = kvtolin(&percpu_array[cpu]);
+ _fill_gdt_descriptor(mygdt, PERCPU_DS,
+ thiscpu,
+ thiscpu + sizeof(struct percpu) - 1,
+#ifdef __x86_64__
+ ACC_PL_K|ACC_DATA_W, SZ_64
+#else
+ ACC_PL_K|ACC_DATA_W, SZ_32
+#endif
+ );
+#endif
+
+#ifdef MACH_PV_DESCRIPTORS
+ unsigned long frame = kv_to_mfn(mygdt);
+ pmap_set_page_readonly(mygdt);
+ if (hyp_set_gdt(kv_to_la(&frame), GDTSZ))
+ panic("couldn't set gdt\n");
+#endif
+#ifdef MACH_PV_PAGETABLES
+ if (hyp_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments))
+ panic("couldn't set 4gb segments vm assist");
+#if 0
+ if (hyp_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments_notify))
+ panic("couldn't set 4gb segments vm assist notify");
+#endif
+#endif /* MACH_PV_PAGETABLES */
+
+#ifndef MACH_PV_DESCRIPTORS
+ /* Load the new GDT. */
+ {
+ struct pseudo_descriptor pdesc;
+
+ pdesc.limit = (GDTSZ * sizeof(struct real_descriptor))-1;
+ pdesc.linear_base = kvtolin(mygdt);
+ lgdt(&pdesc);
+ }
+#endif /* MACH_PV_DESCRIPTORS */
+}
+
+static void
+reload_segs(void)
+{
+ /* Reload all the segment registers from the new GDT.
+ We must load ds and es with 0 before loading them with KERNEL_DS
+ because some processors will "optimize out" the loads
+ if the previous selector values happen to be the same. */
+#ifndef __x86_64__
+ asm volatile("ljmp %0,$1f\n"
+ "1:\n"
+ "movw %w2,%%ds\n"
+ "movw %w2,%%es\n"
+ "movw %w2,%%fs\n"
+ "movw %w2,%%gs\n"
+
+ "movw %w1,%%ds\n"
+ "movw %w1,%%es\n"
+ "movw %w3,%%gs\n"
+ "movw %w1,%%ss\n"
+ : : "i" (KERNEL_CS), "r" (KERNEL_DS), "r" (0), "r" (PERCPU_DS));
+#endif
+}
+
+void
+gdt_init(void)
+{
+ gdt_fill(0, gdt);
+
+ reload_segs();
+
+#ifdef MACH_PV_PAGETABLES
+#if VM_MIN_KERNEL_ADDRESS != LINEAR_MIN_KERNEL_ADDRESS
+ /* things now get shifted */
+#ifdef MACH_PSEUDO_PHYS
+ pfn_list = (void*) pfn_list + VM_MIN_KERNEL_ADDRESS - LINEAR_MIN_KERNEL_ADDRESS;
+#endif /* MACH_PSEUDO_PHYS */
+ la_shift += LINEAR_MIN_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
+#endif
+#endif /* MACH_PV_PAGETABLES */
+}
+
+#if NCPUS > 1
+void
+ap_gdt_init(int cpu)
+{
+ gdt_fill(cpu, mp_gdt[cpu]);
+
+ reload_segs();
+}
+#endif
diff --git a/i386/i386/gdt.h b/i386/i386/gdt.h
new file mode 100644
index 0000000..c7da012
--- /dev/null
+++ b/i386/i386/gdt.h
@@ -0,0 +1,121 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * Copyright (c) 1991 IBM Corporation
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation,
+ * and that the name IBM not be used in advertising or publicity
+ * pertaining to distribution of the software without specific, written
+ * prior permission.
+ *
+ * CARNEGIE MELLON, IBM, AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON, IBM, AND CSL DISCLAIM ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_GDT_
+#define _I386_GDT_
+
+#include "seg.h"
+
+/*
+ * Kernel descriptors for Mach - 32-bit flat address space.
+ */
+#define KERNEL_CS (0x08 | KERNEL_RING) /* kernel code */
+#define KERNEL_DS (0x10 | KERNEL_RING) /* kernel data */
+
+
+#ifndef MACH_PV_DESCRIPTORS
+#define KERNEL_LDT 0x18 /* master LDT */
+#endif /* MACH_PV_DESCRIPTORS */
+
+#ifdef __x86_64__
+/* LDT needs two entries */
+#define KERNEL_TSS 0x40 /* master TSS (uniprocessor) */
+#else
+#define KERNEL_TSS 0x20 /* master TSS (uniprocessor) */
+#endif
+
+
+#define USER_LDT 0x28 /* place for per-thread LDT */
+
+#ifdef __x86_64__
+/* LDT needs two entries */
+#define USER_TSS 0x58 /* place for per-thread TSS
+ that holds IO bitmap */
+#else
+#define USER_TSS 0x30 /* place for per-thread TSS
+ that holds IO bitmap */
+#endif
+
+
+#ifndef MACH_PV_DESCRIPTORS
+#define LINEAR_DS 0x38 /* linear mapping */
+#endif /* MACH_PV_DESCRIPTORS */
+
+/* 0x40 was USER_FPREGS, now used by TSS in 64bit mode */
+
+#define USER_GDT 0x48 /* user-defined 32bit GDT entries */
+#define USER_GDT_SLOTS 2
+
+/* 0x58 used by user TSS in 64bit mode */
+
+#define PERCPU_DS 0x68 /* per-cpu data mapping */
+
+#define GDTSZ sel_idx(0x70)
+
+#ifndef __ASSEMBLER__
+
+extern struct real_descriptor gdt[GDTSZ];
+
+/* Fill a segment descriptor in the GDT. */
+#define _fill_gdt_descriptor(_gdt, segment, base, limit, access, sizebits) \
+ fill_descriptor(&_gdt[sel_idx(segment)], base, limit, access, sizebits)
+
+#define fill_gdt_descriptor(segment, base, limit, access, sizebits) \
+ _fill_gdt_descriptor(gdt, segment, base, limit, access, sizebits)
+
+/* 64bit variant */
+#ifdef __x86_64__
+#define _fill_gdt_descriptor64(_gdt, segment, base, limit, access, sizebits) \
+ fill_descriptor64((struct real_descriptor64 *) &_gdt[sel_idx(segment)], base, limit, access, sizebits)
+
+#define fill_gdt_descriptor64(segment, base, limit, access, sizebits) \
+ _fill_gdt_descriptor64(gdt, segment, base, limit, access, sizebits)
+#endif
+
+/* System descriptor variants */
+#ifdef __x86_64__
+#define _fill_gdt_sys_descriptor(_gdt, segment, base, limit, access, sizebits) \
+ _fill_gdt_descriptor64(_gdt, segment, base, limit, access, sizebits)
+#define fill_gdt_sys_descriptor(segment, base, limit, access, sizebits) \
+ fill_gdt_descriptor64(segment, base, limit, access, sizebits)
+#else
+#define _fill_gdt_sys_descriptor(_gdt, segment, base, limit, access, sizebits) \
+ _fill_gdt_descriptor(_gdt, segment, base, limit, access, sizebits)
+#define fill_gdt_sys_descriptor(segment, base, limit, access, sizebits) \
+ fill_gdt_descriptor(segment, base, limit, access, sizebits)
+#endif
+
+extern void gdt_init(void);
+extern void ap_gdt_init(int cpu);
+
+#endif /* __ASSEMBLER__ */
+#endif /* _I386_GDT_ */
diff --git a/i386/i386/hardclock.c b/i386/i386/hardclock.c
new file mode 100644
index 0000000..9ac4f51
--- /dev/null
+++ b/i386/i386/hardclock.c
@@ -0,0 +1,81 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * Copyright (c) 1991 IBM Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation,
+ * and that the name IBM not be used in advertising or publicity
+ * pertaining to distribution of the software without specific, written
+ * prior permission.
+ *
+ * CARNEGIE MELLON AND IBM ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND IBM DISCLAIM ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Clock interrupt.
+ */
+#include <mach/machine/eflags.h>
+
+#include <kern/mach_clock.h>
+#include <i386/thread.h>
+#include <i386/hardclock.h>
+
+#if defined(AT386) || defined(ATX86_64)
+#include <i386/ipl.h>
+#endif
+
+#ifdef LINUX_DEV
+#include <linux/dev/glue/glue.h>
+#endif
+
+extern char return_to_iret[];
+
+void
+hardclock(int iunit, /* 'unit' number */
+ int old_ipl, /* old interrupt level */
+ const char *ret_addr, /* return address in interrupt handler */
+ struct i386_interrupt_state *regs /* saved registers */
+ )
+{
+ if (ret_addr == return_to_iret)
+ /*
+ * Interrupt from user mode or from thread stack.
+ */
+ clock_interrupt(tick, /* usec per tick */
+ (regs->efl & EFL_VM) || /* user mode */
+ ((regs->cs & 0x03) != 0), /* user mode */
+#if defined(LINUX_DEV)
+ FALSE, /* ignore SPL0 */
+#else /* LINUX_DEV */
+ old_ipl == SPL0, /* base priority */
+#endif /* LINUX_DEV */
+ regs->eip); /* interrupted eip */
+ else
+ /*
+ * Interrupt from interrupt stack.
+ */
+ clock_interrupt(tick, /* usec per tick */
+ FALSE, /* kernel mode */
+ FALSE, /* not SPL0 */
+ 0); /* interrupted eip */
+
+#ifdef LINUX_DEV
+ linux_timer_intr();
+#endif /* LINUX_DEV */
+}
diff --git a/i386/i386/hardclock.h b/i386/i386/hardclock.h
new file mode 100644
index 0000000..b326c3c
--- /dev/null
+++ b/i386/i386/hardclock.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _I386_HARDCLOCK_H_
+#define _I386_HARDCLOCK_H_
+
+void hardclock(
+ int iunit,
+ int old_ipl,
+ const char *ret_addr,
+ struct i386_interrupt_state *regs);
+
+#endif /* _I386_HARDCLOCK_H_ */
diff --git a/i386/i386/i386asm.sym b/i386/i386/i386asm.sym
new file mode 100644
index 0000000..e1f5c6b
--- /dev/null
+++ b/i386/i386/i386asm.sym
@@ -0,0 +1,194 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * Copyright (c) 1991 IBM Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation,
+ * and that the name IBM not be used in advertising or publicity
+ * pertaining to distribution of the software without specific, written
+ * prior permission.
+ *
+ * CARNEGIE MELLON AND IBM ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND IBM DISCLAIM ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/*
+ * Pass field offsets to assembly code.
+ */
+#include <sys/reboot.h>
+
+#include <kern/thread.h>
+#include <kern/task.h>
+#include <kern/syscall_emulation.h>
+#include <i386/thread.h>
+#include <i386/pmap.h>
+#include <i386/vm_param.h>
+#include <i386/seg.h>
+#include <i386/tss.h>
+#include <i386at/idt.h>
+#include <i386/gdt.h>
+#include <i386/ldt.h>
+#include <i386/mp_desc.h>
+#include <i386/apic.h>
+#include <i386/xen.h>
+
+expr CALL_AST_CHECK
+expr CALL_PMAP_UPDATE
+
+offset ApicLocalUnit lu apic_id APIC_ID
+
+offset percpu pc cpu_id PERCPU_CPU_ID
+offset percpu pc active_thread PERCPU_ACTIVE_THREAD
+offset percpu pc active_stack PERCPU_ACTIVE_STACK
+
+offset pcb pcb iss
+
+size percpu pc
+
+offset thread th pcb
+offset thread th task
+offset thread th recover
+offset thread th kernel_stack
+offset thread th swap_func
+
+offset task task eml_dispatch TASK_EMUL
+
+offset eml_dispatch eml disp_min DISP_MIN
+offset eml_dispatch eml disp_count DISP_COUNT
+offset eml_dispatch eml disp_vector DISP_VECTOR
+
+expr &STACK_IKS(0)->k_ebx KSS_EBX
+expr &STACK_IKS(0)->k_esp KSS_ESP
+expr &STACK_IKS(0)->k_ebp KSS_EBP
+#ifdef __i386__
+expr &STACK_IKS(0)->k_esi KSS_ESI
+expr &STACK_IKS(0)->k_edi KSS_EDI
+#endif
+expr &STACK_IKS(0)->k_eip KSS_EIP
+#ifdef __x86_64__
+expr &STACK_IKS(0)->k_r12 KSS_R12
+expr &STACK_IKS(0)->k_r13 KSS_R13
+expr &STACK_IKS(0)->k_r14 KSS_R14
+expr &STACK_IKS(0)->k_r15 KSS_R15
+#endif
+size i386_kernel_state iks
+
+size i386_exception_link iel
+
+#if !defined(__x86_64__) || defined(USER32)
+offset i386_saved_state r gs
+offset i386_saved_state r fs
+#endif
+offset i386_saved_state r cs
+offset i386_saved_state r uesp
+offset i386_saved_state r eax
+offset i386_saved_state r ebx
+offset i386_saved_state r ecx
+offset i386_saved_state r edx
+offset i386_saved_state r ebp
+offset i386_saved_state r trapno
+offset i386_saved_state r err
+offset i386_saved_state r efl R_EFLAGS
+offset i386_saved_state r eip
+offset i386_saved_state r cr2
+offset i386_saved_state r edi
+offset i386_saved_state r esi
+#ifdef __x86_64__
+offset i386_saved_state r r8
+offset i386_saved_state r r9
+offset i386_saved_state r r10
+offset i386_saved_state r r12
+offset i386_saved_state r r13
+offset i386_saved_state r r14
+offset i386_saved_state r r15
+#endif
+
+offset i386_interrupt_state i eip
+offset i386_interrupt_state i cs
+offset i386_interrupt_state i efl
+
+#ifdef __x86_64__
+offset i386_tss tss rsp0
+#else
+offset i386_tss tss esp0
+offset i386_tss tss ss0
+#endif
+
+offset machine_slot sub_type cpu_type
+
+expr I386_PGBYTES NBPG
+expr VM_MIN_ADDRESS
+expr VM_MAX_ADDRESS
+expr VM_MIN_KERNEL_ADDRESS KERNELBASE
+expr KERNEL_STACK_SIZE
+#if defined MACH_PSEUDO_PHYS && (VM_MIN_KERNEL_ADDRESS == LINEAR_MIN_KERNEL_ADDRESS)
+expr PFN_LIST pfn_list
+#endif
+
+#if PAE
+expr PDPSHIFT
+#endif /* PAE */
+expr PDESHIFT
+expr PDEMASK
+expr PTESHIFT
+expr PTEMASK
+
+expr sizeof(pt_entry_t) PTE_SIZE
+
+expr INTEL_PTE_PFN PTE_PFN
+expr INTEL_PTE_VALID PTE_V
+expr INTEL_PTE_WRITE PTE_W
+expr INTEL_PTE_PS PTE_S
+expr ~INTEL_PTE_VALID PTE_INVALID
+expr NPTES PTES_PER_PAGE
+expr INTEL_PTE_VALID|INTEL_PTE_WRITE INTEL_PTE_KERNEL
+
+expr IDTSZ
+
+expr KERNEL_RING
+
+expr (VM_MIN_KERNEL_ADDRESS>>PDESHIFT)*sizeof(pt_entry_t) KERNELBASEPDE
+
+#if MACH_KDB
+expr RB_KDB
+#endif /* MACH_KDB */
+
+expr INTSTACK_SIZE
+
+#if !STAT_TIME
+offset timer tm low_bits LOW_BITS
+offset timer tm high_bits HIGH_BITS
+offset timer tm high_bits_check HIGH_BITS_CHECK
+expr TIMER_HIGH_UNIT
+offset thread th system_timer
+offset thread th user_timer
+#endif
+
+#ifdef MACH_XEN
+offset shared_info si vcpu_info[0].evtchn_upcall_mask CPU_CLI
+offset shared_info si vcpu_info[0].evtchn_upcall_pending CPU_PENDING
+offset shared_info si vcpu_info[0].evtchn_pending_sel CPU_PENDING_SEL
+offset shared_info si evtchn_pending PENDING
+offset shared_info si evtchn_mask EVTMASK
+#ifdef MACH_PV_PAGETABLES
+offset shared_info si vcpu_info[0].arch.cr2 CR2
+#endif /* MACH_PV_PAGETABLES */
+#endif /* MACH_XEN */
+
+offset mach_msg_header msgh msgh_size
diff --git a/i386/i386/idt-gen.h b/i386/i386/idt-gen.h
new file mode 100644
index 0000000..daa6aaf
--- /dev/null
+++ b/i386/i386/idt-gen.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#ifndef _I386_IDT_
+#define _I386_IDT_
+
+#include <mach/vm_param.h>
+
+#include "seg.h"
+
+/*
+ * Interrupt table must always be at least 32 entries long,
+ * to cover the basic i386 exception vectors.
+ * More-specific code will probably define it to be longer,
+ * to allow separate entrypoints for hardware interrupts.
+ */
+#ifndef IDTSZ
+#error you need to define IDTSZ
+#endif
+
+extern struct real_gate idt[IDTSZ];
+
+/* Fill a gate in the IDT. */
+#define fill_idt_gate(_idt, int_num, entry, selector, access, dword_count) \
+ fill_gate(&_idt[int_num], entry, selector, access, dword_count)
+
+#endif /* _I386_IDT_ */
diff --git a/i386/i386/idt.c b/i386/i386/idt.c
new file mode 100644
index 0000000..caa44d7
--- /dev/null
+++ b/i386/i386/idt.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+
+#include <i386/vm_param.h>
+#include <i386/seg.h>
+#include <i386at/idt.h>
+#include <i386/gdt.h>
+#include <i386/mp_desc.h>
+
+struct real_gate idt[IDTSZ];
+
+struct idt_init_entry
+{
+ unsigned long entrypoint;
+ unsigned short vector;
+ unsigned short type;
+#ifdef __x86_64__
+ unsigned short ist;
+ unsigned short pad_0;
+#endif
+};
+extern struct idt_init_entry idt_inittab[];
+
+static void
+idt_fill(struct real_gate *myidt)
+{
+#ifdef MACH_PV_DESCRIPTORS
+ if (hyp_set_trap_table(kvtolin(idt_inittab)))
+ panic("couldn't set trap table\n");
+#else /* MACH_PV_DESCRIPTORS */
+ struct idt_init_entry *iie = idt_inittab;
+
+ /* Initialize the exception vectors from the idt_inittab. */
+ while (iie->entrypoint)
+ {
+ fill_idt_gate(myidt, iie->vector, iie->entrypoint, KERNEL_CS, iie->type,
+#ifdef __x86_64__
+ iie->ist
+#else
+ 0
+#endif
+ );
+ iie++;
+ }
+
+ /* Load the IDT pointer into the processor. */
+ {
+ struct pseudo_descriptor pdesc;
+
+ pdesc.limit = (IDTSZ * sizeof(struct real_gate))-1;
+ pdesc.linear_base = kvtolin(myidt);
+ lidt(&pdesc);
+ }
+#endif /* MACH_PV_DESCRIPTORS */
+}
+
+void idt_init(void)
+{
+ idt_fill(idt);
+}
+
+#if NCPUS > 1
+void ap_idt_init(int cpu)
+{
+ idt_fill(mp_desc_table[cpu]->idt);
+}
+#endif
diff --git a/i386/i386/idt_inittab.S b/i386/i386/idt_inittab.S
new file mode 100644
index 0000000..fc80e21
--- /dev/null
+++ b/i386/i386/idt_inittab.S
@@ -0,0 +1,140 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+#include <mach/machine/asm.h>
+
+#include <i386/seg.h>
+#include <i386/i386asm.h>
+#include <i386/gdt.h>
+
+
+/* We'll be using macros to fill in a table in data hunk 2
+ while writing trap entrypoint routines at the same time.
+ Here's the header that comes before everything else. */
+ .data 2
+ENTRY(idt_inittab)
+ .text
+
+/*
+ * Interrupt descriptor table and code vectors for it.
+ */
+#ifdef MACH_PV_DESCRIPTORS
+#define IDT_ENTRY(n,entry,type) \
+ .data 2 ;\
+ .byte n ;\
+ .byte (((type)&ACC_PL)>>5)|((((type)&(ACC_TYPE|ACC_A))==ACC_INTR_GATE)<<2) ;\
+ .word KERNEL_CS ;\
+ .long entry ;\
+ .text
+#else /* MACH_PV_DESCRIPTORS */
+#define IDT_ENTRY(n,entry,type) \
+ .data 2 ;\
+ .long entry ;\
+ .word n ;\
+ .word type ;\
+ .text
+#endif /* MACH_PV_DESCRIPTORS */
+
+/*
+ * No error code. Clear error code and push trap number.
+ */
+#define EXCEPTION(n,name) \
+ IDT_ENTRY(n,EXT(name),ACC_PL_K|ACC_TRAP_GATE);\
+ENTRY(name) ;\
+ pushl $(0) ;\
+ pushl $(n) ;\
+ jmp EXT(alltraps)
+
+/*
+ * User-accessible exception. Otherwise, same as above.
+ */
+#define EXCEP_USR(n,name) \
+ IDT_ENTRY(n,EXT(name),ACC_PL_U|ACC_TRAP_GATE);\
+ENTRY(name) ;\
+ pushl $(0) ;\
+ pushl $(n) ;\
+ jmp EXT(alltraps)
+
+/*
+ * Error code has been pushed. Just push trap number.
+ */
+#define EXCEP_ERR(n,name) \
+ IDT_ENTRY(n,EXT(name),ACC_PL_K|ACC_INTR_GATE);\
+ENTRY(name) ;\
+ pushl $(n) ;\
+ jmp EXT(alltraps)
+
+/*
+ * Special interrupt code: dispatches to a unique entrypoint,
+ * not defined automatically here.
+ */
+#define EXCEP_SPC(n,name) \
+ IDT_ENTRY(n,EXT(name),ACC_PL_K|ACC_TRAP_GATE)
+
+
+EXCEPTION(0x00,t_zero_div)
+EXCEP_SPC(0x01,t_debug)
+/* skip NMI interrupt - let more specific code figure that out. */
+EXCEP_USR(0x03,t_int3)
+EXCEP_USR(0x04,t_into)
+EXCEP_USR(0x05,t_bounds)
+EXCEPTION(0x06,t_invop)
+EXCEPTION(0x07,t_nofpu)
+EXCEPTION(0x08,a_dbl_fault)
+EXCEPTION(0x09,a_fpu_over)
+EXCEPTION(0x0a,a_inv_tss)
+EXCEP_SPC(0x0b,t_segnp)
+EXCEP_ERR(0x0c,t_stack_fault)
+EXCEP_SPC(0x0d,t_gen_prot)
+EXCEP_SPC(0x0e,t_page_fault)
+#ifdef MACH_PV_DESCRIPTORS
+EXCEP_ERR(0x0f,t_trap_0f)
+#else
+EXCEPTION(0x0f,t_trap_0f)
+#endif
+EXCEPTION(0x10,t_fpu_err)
+EXCEPTION(0x11,t_trap_11)
+EXCEPTION(0x12,t_trap_12)
+EXCEPTION(0x13,t_trap_13)
+EXCEPTION(0x14,t_trap_14)
+EXCEPTION(0x15,t_trap_15)
+EXCEPTION(0x16,t_trap_16)
+EXCEPTION(0x17,t_trap_17)
+EXCEPTION(0x18,t_trap_18)
+EXCEPTION(0x19,t_trap_19)
+EXCEPTION(0x1a,t_trap_1a)
+EXCEPTION(0x1b,t_trap_1b)
+EXCEPTION(0x1c,t_trap_1c)
+EXCEPTION(0x1d,t_trap_1d)
+EXCEPTION(0x1e,t_trap_1e)
+EXCEPTION(0x1f,t_trap_1f)
+
+/* Terminator */
+ .data 2
+ .long 0
+#ifdef MACH_PV_DESCRIPTORS
+ .long 0
+#endif /* MACH_PV_DESCRIPTORS */
+
diff --git a/i386/i386/io_perm.c b/i386/i386/io_perm.c
new file mode 100644
index 0000000..aabff49
--- /dev/null
+++ b/i386/i386/io_perm.c
@@ -0,0 +1,329 @@
+/* Manipulate I/O permission bitmap objects.
+
+ Copyright (C) 2002, 2007 Free Software Foundation, Inc.
+
+ Written by Marcus Brinkmann. Glued into GNU Mach by Thomas Schwinge.
+
+ This file is part of GNU Mach.
+
+ GNU Mach is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 2, or (at your option) any later
+ version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992,1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <string.h>
+
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_space.h>
+
+#include <kern/slab.h>
+#include <kern/kalloc.h>
+#include <kern/lock.h>
+#include <kern/queue.h>
+#include <kern/thread.h>
+
+#include <device/dev_hdr.h>
+#include <device/device_emul.h>
+#include <device/device_port.h>
+
+#include <i386/i386/mach_i386.server.h>
+
+#include "io_perm.h"
+#include "gdt.h"
+#include "pcb.h"
+
+#define PCI_CFG1_START 0xcf8
+#define PCI_CFG1_END 0xcff
+
+#define CONTAINS_PCI_CFG(from, to) \
+ ( ( from <= PCI_CFG1_END ) && ( to >= PCI_CFG1_START ) )
+
+
+/* Our device emulation ops. See below, at the bottom of this file. */
+static struct device_emulation_ops io_perm_device_emulation_ops;
+
+/* Flag to hold PCI io cfg access lock */
+static boolean_t taken_pci_cfg = FALSE;
+
+/* The outtran which allows MIG to convert an io_perm_t object to a port
+ representing it. */
+ipc_port_t
+convert_io_perm_to_port (io_perm_t io_perm)
+{
+ if (io_perm == IO_PERM_NULL)
+ return IP_NULL;
+
+ ipc_port_t port;
+
+ port = ipc_port_make_send (io_perm->port);
+
+ return port;
+}
+
+
+/* The intran which allows MIG to convert a port representing an
+ io_perm_t object to the object itself. */
+io_perm_t
+convert_port_to_io_perm (ipc_port_t port)
+{
+ device_t device;
+
+ device = dev_port_lookup (port);
+
+ if (device == DEVICE_NULL)
+ return IO_PERM_NULL;
+
+ io_perm_t io_perm;
+
+ io_perm = device->emul_data;
+
+ return io_perm;
+}
+
+/* The destructor which is called when the last send right to a port
+ representing an io_perm_t object vanishes. */
+void
+io_perm_deallocate (io_perm_t io_perm)
+{
+ /* We need to check if the io_perm was a PCI cfg one and release it */
+ if (CONTAINS_PCI_CFG(io_perm->from, io_perm->to))
+ taken_pci_cfg = FALSE;
+}
+
+/* Our ``no senders'' handling routine. Deallocate the object. */
+static
+void
+no_senders (mach_no_senders_notification_t *notification)
+{
+ io_perm_t io_perm;
+
+ io_perm = convert_port_to_io_perm
+ ((ipc_port_t) notification->not_header.msgh_remote_port);
+
+ assert (io_perm != IO_PERM_NULL);
+
+ ipc_kobject_set (io_perm->port, IKO_NULL, IKOT_NONE);
+ ipc_port_dealloc_kernel (io_perm->port);
+
+ kfree ((vm_offset_t) io_perm, sizeof *io_perm);
+}
+
+
+/* Initialize bitmap by setting all bits to OFF == 1. */
+static inline void
+io_bitmap_init (unsigned char *iopb)
+{
+ memset (iopb, ~0, IOPB_BYTES);
+}
+
+
+/* Set selected bits in bitmap to ON == 0. */
+static inline void
+io_bitmap_set (unsigned char *iopb, io_port_t from, io_port_t to)
+{
+ do
+ iopb[from >> 3] &= ~(1 << (from & 0x7));
+ while (from++ != to);
+}
+
+
+/* Set selected bits in bitmap to OFF == 1. */
+static inline void
+io_bitmap_clear (unsigned char *iopb, io_port_t from, io_port_t to)
+{
+ do
+ iopb[from >> 3] |= (1 << (from & 0x7));
+ while (from++ != to);
+}
+
+
+/* Request a new port IO_PERM that represents the capability to access
+ the I/O ports [FROM; TO] directly. MASTER_PORT is the master device port.
+
+ The function returns KERN_INVALID_ARGUMENT if TARGET_TASK is not a task,
+ or FROM is greater than TO.
+
+ The function is exported. */
+kern_return_t
+i386_io_perm_create (const ipc_port_t master_port, io_port_t from, io_port_t to,
+ io_perm_t *new)
+{
+ if (master_port != master_device_port)
+ return KERN_INVALID_ARGUMENT;
+
+ /* We do not have to check FROM and TO for the limits [0;IOPB_MAX], as
+ they're short integers and all values are within these very limits. */
+ if (from > to)
+ return KERN_INVALID_ARGUMENT;
+
+ /* Only one process may take a range that includes PCI cfg registers */
+ if (taken_pci_cfg && CONTAINS_PCI_CFG(from, to))
+ return KERN_PROTECTION_FAILURE;
+
+ io_perm_t io_perm;
+
+ io_perm = (io_perm_t) kalloc (sizeof *io_perm);
+ if (io_perm == NULL)
+ return KERN_RESOURCE_SHORTAGE;
+
+ io_perm->from = from;
+ io_perm->to = to;
+
+ io_perm->port = ipc_port_alloc_kernel ();
+ if (io_perm->port == IP_NULL)
+ {
+ kfree ((vm_offset_t) io_perm, sizeof *io_perm);
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ /* Set up the dummy device. */
+ ipc_kobject_set(io_perm->port,
+ (ipc_kobject_t) &io_perm->device, IKOT_DEVICE);
+ io_perm->device.emul_data = io_perm;
+ io_perm->device.emul_ops = &io_perm_device_emulation_ops;
+
+ ipc_port_t notify;
+
+ notify = ipc_port_make_sonce(io_perm->port);
+ ip_lock(io_perm->port);
+ ipc_port_nsrequest(io_perm->port, 1, notify, &notify);
+ assert(notify == IP_NULL);
+
+ *new = io_perm;
+
+ if (CONTAINS_PCI_CFG(from, to))
+ taken_pci_cfg = TRUE;
+
+ return KERN_SUCCESS;
+}
+
+/* Modify the I/O permissions for TARGET_TASK. If ENABLE is TRUE, the
+ permission to access the I/O ports specified by IO_PERM is granted,
+ otherwise it is withdrawn.
+
+ The function returns KERN_INVALID_ARGUMENT if TARGET_TASK is not a valid
+ task or IO_PERM not a valid I/O permission port.
+
+ The function is exported. */
+kern_return_t
+i386_io_perm_modify (task_t target_task, io_perm_t io_perm, boolean_t enable)
+{
+ io_port_t from, to;
+ unsigned char *iopb;
+ io_port_t iopb_size;
+
+ if (target_task == TASK_NULL || io_perm == IO_PERM_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ from = io_perm->from;
+ to = io_perm->to;
+
+ simple_lock (&target_task->machine.iopb_lock);
+ iopb = target_task->machine.iopb;
+ iopb_size = target_task->machine.iopb_size;
+
+ if (!enable && !iopb_size)
+ {
+ simple_unlock (&target_task->machine.iopb_lock);
+ return KERN_SUCCESS;
+ }
+
+ if (!iopb)
+ {
+ simple_unlock (&target_task->machine.iopb_lock);
+ iopb = (unsigned char *) kmem_cache_alloc (&machine_task_iopb_cache);
+ simple_lock (&target_task->machine.iopb_lock);
+ if (target_task->machine.iopb)
+ {
+ if (iopb)
+ kmem_cache_free (&machine_task_iopb_cache, (vm_offset_t) iopb);
+ iopb = target_task->machine.iopb;
+ iopb_size = target_task->machine.iopb_size;
+ }
+ else if (iopb)
+ {
+ target_task->machine.iopb = iopb;
+ io_bitmap_init (iopb);
+ }
+ else
+ {
+ simple_unlock (&target_task->machine.iopb_lock);
+ return KERN_RESOURCE_SHORTAGE;
+ }
+ }
+
+ if (enable)
+ {
+ io_bitmap_set (iopb, from, to);
+ if ((to >> 3) + 1 > iopb_size)
+ target_task->machine.iopb_size = (to >> 3) + 1;
+ }
+ else
+ {
+ if ((from >> 3) + 1 > iopb_size)
+ {
+ simple_unlock (&target_task->machine.iopb_lock);
+ return KERN_SUCCESS;
+ }
+
+ io_bitmap_clear (iopb, from, to);
+ while (iopb_size > 0 && iopb[iopb_size - 1] == 0xff)
+ iopb_size--;
+ target_task->machine.iopb_size = iopb_size;
+ }
+
+#if NCPUS>1
+#warning SMP support missing (notify all CPUs running threads in that of the I/O bitmap change).
+#endif
+ if (target_task == current_task())
+ update_ktss_iopb (iopb, target_task->machine.iopb_size);
+
+ simple_unlock (&target_task->machine.iopb_lock);
+ return KERN_SUCCESS;
+}
+
+/* We are some sort of Mach device... */
+static struct device_emulation_ops io_perm_device_emulation_ops =
+{
+ /* ... in order to be easily able to receive a ``no senders'' notification
+ which we then use to deallocate ourselves. */
+ .no_senders = no_senders
+};
diff --git a/i386/i386/io_perm.h b/i386/i386/io_perm.h
new file mode 100644
index 0000000..b97cf97
--- /dev/null
+++ b/i386/i386/io_perm.h
@@ -0,0 +1,63 @@
+/* Data types for I/O permission bitmap objects.
+
+ Copyright (C) 2002, 2007 Free Software Foundation, Inc.
+
+ Written by Marcus Brinkmann. Glued into GNU Mach by Thomas Schwinge.
+
+ This file is part of GNU Mach.
+
+ GNU Mach is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 2, or (at your option) any later
+ version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#ifndef _I386_IO_PERM_H_
+#define _I386_IO_PERM_H_
+
+#include <device/dev_hdr.h>
+#include <ipc/ipc_types.h>
+
+
+/* The highest possible I/O port. */
+#define IOPB_MAX 0xffff
+
+/* The number of bytes needed to hold all permission bits. */
+#define IOPB_BYTES (((IOPB_MAX + 1) + 7) / 8)
+
+/* An offset that points outside of the permission bitmap, used to
+ disable all permission. */
+#define IOPB_INVAL 0x2fff
+
+
+/* The type of an I/O port address. */
+typedef unsigned short io_port_t;
+
+
+struct io_perm
+{
+ /* We use a ``struct device'' for easy management. */
+ struct device device;
+
+ ipc_port_t port;
+
+ io_port_t from, to;
+};
+
+typedef struct io_perm *io_perm_t;
+
+#define IO_PERM_NULL ((io_perm_t) 0)
+
+extern io_perm_t convert_port_to_io_perm (ipc_port_t);
+extern ipc_port_t convert_io_perm_to_port (io_perm_t);
+extern void io_perm_deallocate (io_perm_t);
+
+#endif /* _I386_IO_PERM_H_ */
diff --git a/i386/i386/ipl.h b/i386/i386/ipl.h
new file mode 100644
index 0000000..6e59b36
--- /dev/null
+++ b/i386/i386/ipl.h
@@ -0,0 +1,83 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+Copyright (c) 1988,1989 Prime Computer, Inc. Natick, MA 01760
+All Rights Reserved.
+
+Permission to use, copy, modify, and distribute this
+software and its documentation for any purpose and
+without fee is hereby granted, provided that the above
+copyright notice appears in all copies and that both the
+copyright notice and this permission notice appear in
+supporting documentation, and that the name of Prime
+Computer, Inc. not be used in advertising or publicity
+pertaining to distribution of the software without
+specific, written prior permission.
+
+THIS SOFTWARE IS PROVIDED "AS IS", AND PRIME COMPUTER,
+INC. DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
+SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN
+NO EVENT SHALL PRIME COMPUTER, INC. BE LIABLE FOR ANY
+SPECIAL, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+PROFITS, WHETHER IN ACTION OF CONTRACT, NEGLIGENCE, OR
+OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+#ifndef _I386_IPL_H_
+#define _I386_IPL_H_
+
+#define SPL0 0
+#define SPL1 1
+#define SPL2 2
+#define SPL3 3
+#define SPL4 4
+#define SPL5 5
+#define SPL6 6
+#define SPL7 7
+
+#define SPLPP 5
+#define SPLTTY 6
+#define SPLNI 6
+#define SPLHI 7
+#define IPLHI SPLHI
+
+#define NSPL (SPL7 + 1)
+
+#ifdef KERNEL
+#ifndef __ASSEMBLER__
+#include <machine/machspl.h>
+/* Note that interrupts have varying signatures */
+typedef void (*interrupt_handler_fn)(int);
+extern interrupt_handler_fn ivect[];
+extern int iunit[];
+extern spl_t curr_ipl[NCPUS];
+#endif /* __ASSEMBLER__ */
+#endif /* KERNEL */
+
+#endif /* _I386_IPL_H_ */
diff --git a/i386/i386/irq.c b/i386/i386/irq.c
new file mode 100644
index 0000000..a7c9889
--- /dev/null
+++ b/i386/i386/irq.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 1995 Shantanu Goel
+ * Copyright (C) 2020 Free Software Foundation, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <i386/irq.h>
+#include <device/intr.h>
+#include <mach/kern_return.h>
+#include <kern/queue.h>
+#include <kern/assert.h>
+#include <machine/machspl.h>
+
+extern queue_head_t main_intr_queue;
+
+static void
+irq_eoi (struct irqdev *dev, int id)
+{
+#ifdef APIC
+ ioapic_irq_eoi (dev->irq[id]);
+#endif
+}
+
+static unsigned int ndisabled_irq[NINTR];
+
+void
+__disable_irq (irq_t irq_nr)
+{
+ assert (irq_nr < NINTR);
+
+ spl_t s = splhigh();
+ ndisabled_irq[irq_nr]++;
+ assert (ndisabled_irq[irq_nr] > 0);
+ if (ndisabled_irq[irq_nr] == 1)
+ mask_irq (irq_nr);
+ splx(s);
+}
+
+void
+__enable_irq (irq_t irq_nr)
+{
+ assert (irq_nr < NINTR);
+
+ spl_t s = splhigh();
+ assert (ndisabled_irq[irq_nr] > 0);
+ ndisabled_irq[irq_nr]--;
+ if (ndisabled_irq[irq_nr] == 0)
+ unmask_irq (irq_nr);
+ splx(s);
+}
+
+struct irqdev irqtab = {
+ "irq", irq_eoi, &main_intr_queue, 0,
+#ifdef APIC
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23},
+#else
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
+#endif
+};
+
diff --git a/i386/i386/irq.h b/i386/i386/irq.h
new file mode 100644
index 0000000..72bbe57
--- /dev/null
+++ b/i386/i386/irq.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2020 Free Software Foundation, Inc.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE FREE SOFTWARE FOUNDATION ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. THE FREE SOFTWARE FOUNDATION DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ */
+
+#ifndef _I386_IRQ_H
+#define _I386_IRQ_H
+
+#ifdef APIC
+# include <i386/apic.h>
+#else
+# include <i386/pic.h>
+#endif
+
+typedef unsigned int irq_t;
+
+void __enable_irq (irq_t irq);
+void __disable_irq (irq_t irq);
+
+extern struct irqdev irqtab;
+
+#endif
diff --git a/i386/i386/ktss.c b/i386/i386/ktss.c
new file mode 100644
index 0000000..34cb6df
--- /dev/null
+++ b/i386/i386/ktss.c
@@ -0,0 +1,92 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Kernel task state segment.
+ *
+ * We don't use the i386 task switch mechanism. We need a TSS
+ * only to hold the kernel stack pointer for the current thread.
+ *
+ * XXX multiprocessor??
+ */
+#include "vm_param.h"
+#include "seg.h"
+#include "gdt.h"
+#include "ktss.h"
+#include "mp_desc.h"
+
+/* A kernel TSS with a complete I/O bitmap. */
+struct task_tss ktss;
+
+void
+ktss_fill(struct task_tss *myktss, struct real_descriptor *mygdt)
+{
+ /* XXX temporary exception stacks */
+ /* FIXME: make it per-processor */
+ static int exception_stack[1024];
+ static int double_fault_stack[1024];
+
+#ifdef MACH_RING1
+ /* Xen won't allow us to do any I/O by default anyway, just register
+ * exception stack */
+ if (hyp_stack_switch(KERNEL_DS, (unsigned long)(exception_stack+1024)))
+ panic("couldn't register exception stack\n");
+#else /* MACH_RING1 */
+ /* Initialize the master TSS descriptor. */
+ _fill_gdt_sys_descriptor(mygdt, KERNEL_TSS,
+ kvtolin(myktss), sizeof(struct task_tss) - 1,
+ ACC_PL_K|ACC_TSS, 0);
+
+ /* Initialize the master TSS. */
+#ifdef __x86_64__
+ myktss->tss.rsp0 = (unsigned long)(exception_stack+1024);
+ myktss->tss.ist1 = (unsigned long)(double_fault_stack+1024);
+#else /* ! __x86_64__ */
+ myktss->tss.ss0 = KERNEL_DS;
+ myktss->tss.esp0 = (unsigned long)(exception_stack+1024);
+#endif /* __x86_64__ */
+
+ myktss->tss.io_bit_map_offset = IOPB_INVAL;
+ /* Set the last byte in the I/O bitmap to all 1's. */
+ myktss->barrier = 0xff;
+
+ /* Load the TSS. */
+ ltr(KERNEL_TSS);
+#endif /* MACH_RING1 */
+}
+
+void
+ktss_init(void)
+{
+ ktss_fill(&ktss, gdt);
+}
+
+#if NCPUS > 1
+void
+ap_ktss_init(int cpu)
+{
+ ktss_fill(&mp_desc_table[cpu]->ktss, mp_gdt[cpu]);
+}
+#endif
diff --git a/i386/i386/ktss.h b/i386/i386/ktss.h
new file mode 100644
index 0000000..171332d
--- /dev/null
+++ b/i386/i386/ktss.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _I386_KTSS_
+#define _I386_KTSS_
+
+#include "tss.h"
+
+extern struct task_tss ktss;
+
+extern void ktss_init(void);
+extern void ap_ktss_init(int cpu);
+
+#endif /* _I386_KTSS_ */
diff --git a/i386/i386/kttd_interface.c b/i386/i386/kttd_interface.c
new file mode 100644
index 0000000..f48fe8e
--- /dev/null
+++ b/i386/i386/kttd_interface.c
@@ -0,0 +1,574 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#if MACH_TTD
+
+#include <sys/types.h>
+#include <kern/printf.h>
+
+#include <mach/machine/eflags.h>
+
+#include <kern/thread.h>
+#include <kern/processor.h>
+#include <mach/thread_status.h>
+#include <mach/vm_param.h>
+#include <i386/seg.h>
+
+#include <ttd/ttd_types.h>
+#include <ttd/ttd_stub.h>
+#include <machine/kttd_machdep.h>
+
+/*
+ * Shamelessly copied from the ddb sources:
+ */
+struct i386_saved_state *kttd_last_saved_statep;
+struct i386_saved_state kttd_nested_saved_state;
+unsigned last_kttd_sp;
+
+struct i386_saved_state kttd_regs; /* was ddb_regs */
+
+extern int kttd_debug;
+extern boolean_t kttd_enabled;
+extern vm_offset_t virtual_end;
+
+#define I386_BREAKPOINT 0xcc
+
+/*
+ * kernel map
+ */
+extern vm_map_t kernel_map;
+
+boolean_t kttd_console_init(void)
+{
+ /*
+ * Get local machine's IP address via bootp.
+ */
+ return(ttd_ip_bootp());
+}
+
+/*
+ * Execute a break instruction that will invoke ttd
+ */
+void kttd_break(void)
+{
+ if (!kttd_enabled)
+ return;
+ asm("int3");
+}
+
+/*
+ * Halt all processors on the 386at (not really applicable).
+ */
+void kttd_halt_processors(void)
+{
+ /* XXX Fix for Sequent!!! */
+ /* Only one on AT386, so ignore for now... */
+}
+
+/*
+ * Determine whether or not the ehternet device driver supports
+ * ttd.
+ */
+boolean_t kttd_supported(void)
+{
+ return ((int)ttd_get_packet != NULL);
+}
+
+/*
+ * Return the ttd machine type for the i386at
+ */
+ttd_machine_type get_ttd_machine_type(void)
+{
+ return TTD_AT386;
+}
+
+void kttd_machine_getregs(struct i386_gdb_register_state *ttd_state)
+{
+ ttd_state->gs = kttd_regs.gs;
+ ttd_state->fs = kttd_regs.fs;
+ ttd_state->es = kttd_regs.es;
+ ttd_state->ds = kttd_regs.ds;
+ ttd_state->edi = kttd_regs.edi;
+ ttd_state->esi = kttd_regs.esi;
+ ttd_state->ebp = kttd_regs.ebp;
+
+ /*
+ * This is set up to point to the right place in
+ * kttd_trap and .
+ */
+ ttd_state->esp = kttd_regs.uesp;
+
+ ttd_state->ebx = kttd_regs.ebx;
+ ttd_state->edx = kttd_regs.edx;
+ ttd_state->ecx = kttd_regs.ecx;
+ ttd_state->eax = kttd_regs.eax;
+ ttd_state->eip = kttd_regs.eip;
+ ttd_state->cs = kttd_regs.cs;
+ ttd_state->efl = kttd_regs.efl;
+ ttd_state->ss = kttd_regs.ss;
+}
+
+void kttd_machine_setregs(struct i386_gdb_register_state *ttd_state)
+{
+ if (kttd_regs.gs != ttd_state->gs) {
+ if (kttd_debug)
+ printf("gs 0x%x:0x%x, ", kttd_regs.gs, ttd_state->gs);
+ kttd_regs.gs = ttd_state->gs;
+ }
+ if (kttd_regs.fs != ttd_state->fs) {
+ if (kttd_debug)
+ printf("fs 0x%x:0x%x, ", kttd_regs.fs, ttd_state->fs);
+ kttd_regs.fs = ttd_state->fs;
+ }
+ if (kttd_regs.es != ttd_state->es) {
+ if (kttd_debug)
+ printf("es 0x%x:0x%x, ", kttd_regs.es, ttd_state->es);
+ kttd_regs.es = ttd_state->es;
+ }
+ if (kttd_regs.ds != ttd_state->ds) {
+ if (kttd_debug)
+ printf("ds 0x%x:0x%x, ", kttd_regs.ds, ttd_state->ds);
+ kttd_regs.ds = ttd_state->ds;
+ }
+ if (kttd_regs.edi != ttd_state->edi) {
+ if (kttd_debug)
+ printf("edi 0x%x:0x%x, ", kttd_regs.edi, ttd_state->edi);
+ kttd_regs.edi = ttd_state->edi;
+ }
+ if (kttd_regs.esi != ttd_state->esi) {
+ if (kttd_debug)
+ printf("esi 0x%x:0x%x, ", kttd_regs.esi, ttd_state->esi);
+ kttd_regs.esi = ttd_state->esi;
+ }
+ if (kttd_regs.ebp != ttd_state->ebp) {
+ if (kttd_debug)
+ printf("ebp 0x%x:0x%x, ", kttd_regs.ebp, ttd_state->ebp);
+ kttd_regs.ebp = ttd_state->ebp;
+ }
+ if (kttd_regs.ebx != ttd_state->ebx) {
+ if (kttd_debug)
+ printf("ebx 0x%x:0x%x, ", kttd_regs.ebx, ttd_state->ebx);
+ kttd_regs.ebx = ttd_state->ebx;
+ }
+ if (kttd_regs.edx != ttd_state->edx) {
+ if (kttd_debug)
+ printf("edx 0x%x:0x%x, ", kttd_regs.edx, ttd_state->edx);
+ kttd_regs.edx = ttd_state->edx;
+ }
+ if (kttd_regs.ecx != ttd_state->ecx) {
+ if (kttd_debug)
+ printf("ecx 0x%x:0x%x, ", kttd_regs.ecx, ttd_state->ecx);
+ kttd_regs.ecx = ttd_state->ecx;
+ }
+ if (kttd_regs.eax != ttd_state->eax) {
+ if (kttd_debug)
+ printf("eax 0x%x:0x%x, ", kttd_regs.eax, ttd_state->eax);
+ kttd_regs.eax = ttd_state->eax;
+ }
+ if (kttd_regs.eip != ttd_state->eip) {
+ if (kttd_debug)
+ printf("eip 0x%x:0x%x, ", kttd_regs.eip, ttd_state->eip);
+ kttd_regs.eip = ttd_state->eip;
+ }
+ if (kttd_regs.cs != ttd_state->cs) {
+ if (kttd_debug)
+ printf("cs 0x%x:0x%x, ", kttd_regs.cs, ttd_state->cs);
+ kttd_regs.cs = ttd_state->cs;
+ }
+ if (kttd_regs.efl != ttd_state->efl) {
+ if (kttd_debug)
+ printf("efl 0x%x:0x%x, ", kttd_regs.efl, ttd_state->efl);
+ kttd_regs.efl = ttd_state->efl;
+ }
+#if 0
+ /*
+ * We probably shouldn't mess with the uesp or the ss? XXX
+ */
+ if (kttd_regs.ss != ttd_state->ss) {
+ if (kttd_debug)
+ printf("ss 0x%x:0x%x, ", kttd_regs.ss, ttd_state->ss);
+ kttd_regs.ss = ttd_state->ss;
+ }
+#endif /* 0 */
+
+}
+
+/*
+ * Enable a page for access, faulting it in if necessary
+ */
+boolean_t kttd_mem_access(vm_offset_t offset, vm_prot_t access)
+{
+ kern_return_t code;
+
+ /*
+ * VM_MIN_KERNEL_ADDRESS if the beginning of equiv
+ * mapped kernel memory. virtual_end is the end.
+ * If it's in between it's always accessible
+ */
+ if (offset >= VM_MIN_KERNEL_ADDRESS && offset < virtual_end)
+ return TRUE;
+
+ if (offset >= virtual_end) {
+ /*
+ * fault in the memory just to make sure we can access it
+ */
+ if (kttd_debug)
+ printf(">>>>>>>>>>Faulting in memory: 0x%x, 0x%x\n",
+ trunc_page(offset), access);
+ code = vm_fault(kernel_map, trunc_page(offset), access, FALSE,
+ FALSE, (void (*)()) 0);
+ } else {
+ /*
+ * Check for user thread
+ */
+#if 1
+ if ((current_thread() != THREAD_NULL) &&
+ (current_thread()->task->map->pmap != kernel_pmap) &&
+ (current_thread()->task->map->pmap != PMAP_NULL)) {
+ code = vm_fault(current_thread()->task->map,
+ trunc_page(offset), access, FALSE,
+ FALSE, (void (*)()) 0);
+ }else{
+ /*
+ * Invalid kernel address (below VM_MIN_KERNEL_ADDRESS)
+ */
+ return FALSE;
+ }
+#else
+ if (kttd_debug)
+ printf("==========Would've tried to map in user area 0x%x\n",
+ trunc_page(offset));
+ return FALSE;
+#endif /* 0 */
+ }
+
+ return (code == KERN_SUCCESS);
+}
+
+/*
+ * See if we modified the kernel text and if so flush the caches.
+ * This routine is never called with a range that crosses a page
+ * boundary.
+ */
+void kttd_flush_cache(vm_offset_t offset, vm_size_t length)
+{
+ /* 386 doesn't need this */
+ return;
+}
+
+/*
+ * Insert a breakpoint into memory.
+ */
+boolean_t kttd_insert_breakpoint(vm_address_t address,
+ ttd_saved_inst *saved_inst)
+{
+ /*
+ * Saved old memory data:
+ */
+ *saved_inst = *(unsigned char *)address;
+
+ /*
+ * Put in a Breakpoint:
+ */
+ *(unsigned char *)address = I386_BREAKPOINT;
+
+ return TRUE;
+}
+
+/*
+ * Remove breakpoint from memory.
+ */
+boolean_t kttd_remove_breakpoint(vm_address_t address,
+ ttd_saved_inst saved_inst)
+{
+ /*
+ * replace it:
+ */
+ *(unsigned char *)address = (saved_inst & 0xff);
+
+ return TRUE;
+}
+
+/*
+ * Set single stepping mode. Assumes that program counter is set
+ * to the location where single stepping is to begin. The 386 is
+ * an easy single stepping machine, ie. built into the processor.
+ */
+boolean_t kttd_set_machine_single_step(void)
+{
+ /* Turn on Single Stepping */
+ kttd_regs.efl |= EFL_TF;
+
+ return TRUE;
+}
+
+/*
+ * Clear single stepping mode.
+ */
+boolean_t kttd_clear_machine_single_step(void)
+{
+ /* Turn off the trace flag */
+ kttd_regs.efl &= ~EFL_TF;
+
+ return TRUE;
+}
+
+
+/*
+ * kttd_type_to_ttdtrap:
+ *
+ * Fills in the task and thread info structures with the reason
+ * for entering the Teledebugger (bp, single step, pg flt, etc.)
+ *
+ */
+void kttd_type_to_ttdtrap(int type)
+{
+ /* XXX Fill this in sometime for i386 */
+}
+
+/*
+ * kttd_trap:
+ *
+ * This routine is called from the trap or interrupt handler when a
+ * breakpoint instruction is encountered or a single step operation
+ * completes. The argument is a pointer to a machine dependent
+ * saved_state structure that was built on the interrupt or kernel stack.
+ *
+ */
+boolean_t kttd_trap(int type, int code, struct i386_saved_state *regs)
+{
+ int s;
+
+ if (kttd_debug)
+ printf("kttd_TRAP, before splhigh()\n");
+
+ /*
+ * TTD isn't supported by the driver.
+ *
+ * Try to switch off to kdb if it is resident.
+ * Otherwise just hang (this might be panic).
+ *
+ * Check to make sure that TTD is supported.
+ * (Both by the machine's driver's, and bootp if using ether).
+ */
+ if (!kttd_supported()) {
+ kttd_enabled = FALSE;
+ return FALSE;
+ }
+
+ s = splhigh();
+
+ /*
+ * We are already in TTD!
+ */
+ if (++kttd_active > MAX_KTTD_ACTIVE) {
+ printf("kttd_trap: RE-ENTERED!!!\n");
+ }
+
+ if (kttd_debug)
+ printf("kttd_TRAP, after splhigh()\n");
+
+ /* Should switch to kttd's own stack here. */
+
+ kttd_regs = *regs;
+
+ if ((regs->cs & 0x3) == KERNEL_RING) {
+ /*
+ * Kernel mode - esp and ss not saved
+ */
+ kttd_regs.uesp = (int)&regs->uesp; /* kernel stack pointer */
+ kttd_regs.ss = KERNEL_DS;
+ }
+
+ /*
+ * If this was not entered via an interrupt (type != -1)
+ * then we've entered via a bpt, single, etc. and must
+ * set the globals.
+ *
+ * Setup the kttd globals for entry....
+ */
+ if (type != -1) {
+ kttd_current_request = NULL;
+ kttd_current_length = 0;
+ kttd_current_kmsg = NULL;
+ kttd_run_status = FULL_STOP;
+ }else{
+ /*
+ * We know that we can only get here if we did a kttd_intr
+ * since it's the way that we are called with type -1 (via
+ * the trampoline), so we don't have to worry about entering
+ * from Cntl-Alt-D like the mips does.
+ */
+ /*
+ * Perform sanity check!
+ */
+ if ((kttd_current_request == NULL) ||
+ (kttd_current_length == 0) ||
+ (kttd_current_kmsg == NULL) ||
+ (kttd_run_status != ONE_STOP)) {
+
+ printf("kttd_trap: INSANITY!!!\n");
+ }
+ }
+
+ kttd_task_trap(type, code, (regs->cs & 0x3) != 0);
+
+ regs->eip = kttd_regs.eip;
+ regs->efl = kttd_regs.efl;
+ regs->eax = kttd_regs.eax;
+ regs->ecx = kttd_regs.ecx;
+ regs->edx = kttd_regs.edx;
+ regs->ebx = kttd_regs.ebx;
+ if ((regs->cs & 0x3) != KERNEL_RING) {
+ /*
+ * user mode - saved esp and ss valid
+ */
+ regs->uesp = kttd_regs.uesp; /* user stack pointer */
+ regs->ss = kttd_regs.ss & 0xffff; /* user stack segment */
+ }
+ regs->ebp = kttd_regs.ebp;
+ regs->esi = kttd_regs.esi;
+ regs->edi = kttd_regs.edi;
+ regs->es = kttd_regs.es & 0xffff;
+ regs->cs = kttd_regs.cs & 0xffff;
+ regs->ds = kttd_regs.ds & 0xffff;
+ regs->fs = kttd_regs.fs & 0xffff;
+ regs->gs = kttd_regs.gs & 0xffff;
+
+ if (--kttd_active < MIN_KTTD_ACTIVE)
+ printf("ttd_trap: kttd_active < 0\n");
+
+ if (kttd_debug) {
+ printf("Leaving kttd_trap, kttd_active = %d\n", kttd_active);
+ }
+
+ /*
+ * Only reset this if we entered kttd_trap via an async trampoline.
+ */
+ if (type == -1) {
+ if (kttd_run_status == RUNNING)
+ printf("kttd_trap: $$$$$ run_status already RUNNING! $$$$$\n");
+ kttd_run_status = RUNNING;
+ }
+
+ /* Is this right? XXX */
+ kttd_run_status = RUNNING;
+
+ (void) splx(s);
+
+ /*
+ * Return true, that yes we handled the trap.
+ */
+ return TRUE;
+}
+
+/*
+ * Enter KTTD through a network packet trap.
+ * We show the registers as of the network interrupt
+ * instead of those at its call to KDB.
+ */
+struct int_regs {
+ int edi;
+ int esi;
+ int ebp;
+ int ebx;
+ struct i386_interrupt_state *is;
+};
+
+void
+kttd_netentry(struct int_regs *int_regs)
+{
+ struct i386_interrupt_state *is = int_regs->is;
+ int s;
+
+ if (kttd_debug)
+ printf("kttd_NETENTRY before slphigh()\n");
+
+ s = splhigh();
+
+ if (kttd_debug)
+ printf("kttd_NETENTRY after slphigh()\n");
+
+ if ((is->cs & 0x3) != KERNEL_RING) {
+ /*
+ * Interrupted from User Space
+ */
+ kttd_regs.uesp = ((int *)(is+1))[0];
+ kttd_regs.ss = ((int *)(is+1))[1];
+ }
+ else {
+ /*
+ * Interrupted from Kernel Space
+ */
+ kttd_regs.ss = KERNEL_DS;
+ kttd_regs.uesp= (int)(is+1);
+ }
+ kttd_regs.efl = is->efl;
+ kttd_regs.cs = is->cs;
+ kttd_regs.eip = is->eip;
+ kttd_regs.eax = is->eax;
+ kttd_regs.ecx = is->ecx;
+ kttd_regs.edx = is->edx;
+ kttd_regs.ebx = int_regs->ebx;
+ kttd_regs.ebp = int_regs->ebp;
+ kttd_regs.esi = int_regs->esi;
+ kttd_regs.edi = int_regs->edi;
+ kttd_regs.ds = is->ds;
+ kttd_regs.es = is->es;
+ kttd_regs.fs = is->fs;
+ kttd_regs.gs = is->gs;
+
+ kttd_active++;
+ kttd_task_trap(-1, 0, (kttd_regs.cs & 0x3) != 0);
+ kttd_active--;
+
+ if ((kttd_regs.cs & 0x3) != KERNEL_RING) {
+ ((int *)(is+1))[0] = kttd_regs.uesp;
+ ((int *)(is+1))[1] = kttd_regs.ss & 0xffff;
+ }
+ is->efl = kttd_regs.efl;
+ is->cs = kttd_regs.cs & 0xffff;
+ is->eip = kttd_regs.eip;
+ is->eax = kttd_regs.eax;
+ is->ecx = kttd_regs.ecx;
+ is->edx = kttd_regs.edx;
+ int_regs->ebx = kttd_regs.ebx;
+ int_regs->ebp = kttd_regs.ebp;
+ int_regs->esi = kttd_regs.esi;
+ int_regs->edi = kttd_regs.edi;
+ is->ds = kttd_regs.ds & 0xffff;
+ is->es = kttd_regs.es & 0xffff;
+ is->fs = kttd_regs.fs & 0xffff;
+ is->gs = kttd_regs.gs & 0xffff;
+
+ if (kttd_run_status == RUNNING)
+ printf("kttd_netentry: %%%%% run_status already RUNNING! %%%%%\n");
+ kttd_run_status = RUNNING;
+
+ (void) splx(s);
+}
+
+#endif /* MACH_TTD */
diff --git a/i386/i386/kttd_machdep.h b/i386/i386/kttd_machdep.h
new file mode 100644
index 0000000..8ac7de1
--- /dev/null
+++ b/i386/i386/kttd_machdep.h
@@ -0,0 +1,59 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _KTTD_MACHDEP_H_
+#define _KTTD_MACHDEP_H_
+
+#define MAX_KTTD_ACTIVE 2
+#define MIN_KTTD_ACTIVE 0
+
+/*
+ * Register state for gdb
+ */
+struct i386_gdb_register_state {
+ int eax;
+ int ecx;
+ int edx;
+ int ebx;
+ int esp; /* 4 */
+ int ebp; /* 5 */
+ int esi;
+ int edi;
+ int eip; /* 8 */
+ int efl; /* 9 */
+ int cs;
+ int ss;
+ int ds;
+ int es;
+ int fs;
+ int gs;
+};
+
+typedef struct i386_gdb_register_state ttd_machine_state;
+
+typedef unsigned long ttd_saved_inst;
+
+#endif /* _KTTD_MACHDEP_H_ */
diff --git a/i386/i386/ldt.c b/i386/i386/ldt.c
new file mode 100644
index 0000000..5db3642
--- /dev/null
+++ b/i386/i386/ldt.c
@@ -0,0 +1,117 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * "Local" descriptor table. At the moment, all tasks use the
+ * same LDT.
+ */
+#include <mach/machine/eflags.h>
+#include <mach/machine/vm_types.h>
+#include <mach/xen.h>
+
+#include <intel/pmap.h>
+#include <kern/debug.h>
+
+#include "vm_param.h"
+#include "seg.h"
+#include "gdt.h"
+#include "ldt.h"
+#include "locore.h"
+#include "mp_desc.h"
+#include "msr.h"
+
+#ifdef MACH_PV_DESCRIPTORS
+/* It is actually defined in xen_boothdr.S */
+extern
+#endif /* MACH_PV_DESCRIPTORS */
+struct real_descriptor ldt[LDTSZ];
+
+#if defined(__x86_64__) && ! defined(USER32)
+#define USER_SEGMENT_SIZEBITS SZ_64
+#else
+#define USER_SEGMENT_SIZEBITS SZ_32
+#endif
+
+void
+ldt_fill(struct real_descriptor *myldt, struct real_descriptor *mygdt)
+{
+#ifdef MACH_PV_DESCRIPTORS
+#ifdef MACH_PV_PAGETABLES
+ pmap_set_page_readwrite(myldt);
+#endif /* MACH_PV_PAGETABLES */
+#else /* MACH_PV_DESCRIPTORS */
+ /* Initialize the master LDT descriptor in the GDT. */
+ _fill_gdt_sys_descriptor(mygdt, KERNEL_LDT,
+ kvtolin(myldt), (LDTSZ * sizeof(struct real_descriptor))-1,
+ ACC_PL_K|ACC_LDT, 0);
+#endif /* MACH_PV_DESCRIPTORS */
+
+ /* Initialize the syscall entry point */
+#if defined(__x86_64__) && ! defined(USER32)
+ if (!CPU_HAS_FEATURE(CPU_FEATURE_SEP))
+ panic("syscall support is missing on 64 bit");
+ /* Enable 64-bit syscalls */
+ wrmsr(MSR_REG_EFER, rdmsr(MSR_REG_EFER) | MSR_EFER_SCE);
+ wrmsr(MSR_REG_LSTAR, (vm_offset_t)syscall64);
+ wrmsr(MSR_REG_STAR, ((((long)USER_CS - 16) << 16) | (long)KERNEL_CS) << 32);
+ wrmsr(MSR_REG_FMASK, EFL_IF | EFL_IOPL_USER);
+#else /* defined(__x86_64__) && ! defined(USER32) */
+ fill_ldt_gate(myldt, USER_SCALL,
+ (vm_offset_t)&syscall, KERNEL_CS,
+ ACC_PL_U|ACC_CALL_GATE, 0);
+#endif /* defined(__x86_64__) && ! defined(USER32) */
+
+ /* Initialize the 32bit LDT descriptors. */
+ fill_ldt_descriptor(myldt, USER_CS,
+ VM_MIN_USER_ADDRESS,
+ VM_MAX_USER_ADDRESS-VM_MIN_USER_ADDRESS-4096,
+ /* XXX LINEAR_... */
+ ACC_PL_U|ACC_CODE_R, USER_SEGMENT_SIZEBITS);
+ fill_ldt_descriptor(myldt, USER_DS,
+ VM_MIN_USER_ADDRESS,
+ VM_MAX_USER_ADDRESS-VM_MIN_USER_ADDRESS-4096,
+ ACC_PL_U|ACC_DATA_W, USER_SEGMENT_SIZEBITS);
+
+ /* Activate the LDT. */
+#ifdef MACH_PV_DESCRIPTORS
+ hyp_set_ldt(myldt, LDTSZ);
+#else /* MACH_PV_DESCRIPTORS */
+ lldt(KERNEL_LDT);
+#endif /* MACH_PV_DESCRIPTORS */
+}
+
+void
+ldt_init(void)
+{
+ ldt_fill(ldt, gdt);
+}
+
+#if NCPUS > 1
+void
+ap_ldt_init(int cpu)
+{
+ ldt_fill(mp_desc_table[cpu]->ldt, mp_gdt[cpu]);
+}
+#endif
diff --git a/i386/i386/ldt.h b/i386/i386/ldt.h
new file mode 100644
index 0000000..51867f4
--- /dev/null
+++ b/i386/i386/ldt.h
@@ -0,0 +1,77 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * Copyright (c) 1991 IBM Corporation
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation,
+ * and that the name IBM not be used in advertising or publicity
+ * pertaining to distribution of the software without specific, written
+ * prior permission.
+ *
+ * CARNEGIE MELLON, IBM, AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON, IBM, AND CSL DISCLAIM ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/*
+ * This file describes the standard LDT provided by default
+ * to all user-level Mach tasks.
+ */
+#ifndef _I386_LDT_
+#define _I386_LDT_
+
+#include "seg.h"
+
+/*
+ * User descriptors for Mach - 32-bit flat address space
+ */
+#define USER_SCALL 0x07 /* system call gate */
+#if defined(__x86_64__) && ! defined(USER32)
+/* Call gate needs two entries */
+
+/* The sysret instruction puts some constraints on the user segment indexes */
+#define USER_CS 0x1f /* user code segment */
+#define USER_DS 0x17 /* user data segment */
+#else
+#define USER_CS 0x17 /* user code segment */
+#define USER_DS 0x1f /* user data segment */
+#endif
+
+#define LDTSZ 4
+
+
+#ifndef __ASSEMBLER__
+
+extern struct real_descriptor ldt[LDTSZ];
+
+/* Fill a 32bit segment descriptor in the LDT. */
+#define fill_ldt_descriptor(_ldt, selector, base, limit, access, sizebits) \
+ fill_descriptor(&_ldt[sel_idx(selector)], base, limit, access, sizebits)
+
+#define fill_ldt_gate(_ldt, selector, offset, dest_selector, access, word_count) \
+ fill_gate((struct real_gate*)&_ldt[sel_idx(selector)], \
+ offset, dest_selector, access, word_count)
+
+void ldt_init(void);
+void ap_ldt_init(int cpu);
+
+#endif /* !__ASSEMBLER__ */
+
+#endif /* _I386_LDT_ */
diff --git a/i386/i386/lock.h b/i386/i386/lock.h
new file mode 100644
index 0000000..b325ae0
--- /dev/null
+++ b/i386/i386/lock.h
@@ -0,0 +1,132 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Machine-dependent simple locks for the i386.
+ */
+#ifndef _I386_LOCK_H_
+#define _I386_LOCK_H_
+
+#if NCPUS > 1
+#include <i386/smp.h>
+
+/*
+ * All of the locking routines are built from calls on
+ * a locked-exchange operation. Values of the lock are
+ * 0 for unlocked, 1 for locked.
+ */
+
+#ifdef __GNUC__
+
+/*
+ * The code here depends on the GNU C compiler.
+ */
+
+#define _simple_lock_xchg_(lock, new_val) \
+({ natural_t _old_val_; \
+ asm volatile("xchg %0, %2" \
+ : "=r" (_old_val_) \
+ : "0" ((natural_t)(new_val)), "m" (*(lock)) : "memory" \
+ ); \
+ _old_val_; \
+ })
+
+#define simple_lock_init(l) \
+ ((l)->lock_data = 0)
+
+#define SIMPLE_LOCK_INITIALIZER(l) \
+ {.lock_data = 0}
+
+#define _simple_lock(l) \
+ ({ \
+ while(_simple_lock_xchg_(l, 1)) \
+ while (*(volatile natural_t *)&(l)->lock_data) \
+ cpu_pause(); \
+ 0; \
+ })
+
+#define _simple_unlock(l) \
+ (_simple_lock_xchg_(l, 0))
+
+#define _simple_lock_try(l) \
+ (!_simple_lock_xchg_(l, 1))
+
+/*
+ * General bit-lock routines.
+ */
+#define bit_lock(bit, l) \
+ ({ \
+ asm volatile(" jmp 1f \n\
+ 0: btl %0, %1 \n\
+ jb 0b \n\
+ 1: lock \n\
+ btsl %0, %1 \n\
+ jb 0b" \
+ : \
+ : "r" ((int)(bit)), "m" (*(volatile int *)(l)) : "memory"); \
+ 0; \
+ })
+
+#define bit_unlock(bit, l) \
+ ({ \
+ asm volatile(" lock \n\
+ btrl %0, %1" \
+ : \
+ : "r" ((int)(bit)), "m" (*(volatile int *)(l)) : "memory"); \
+ 0; \
+ })
+
+/*
+ * Set or clear individual bits in a long word.
+ * The locked access is needed only to lock access
+ * to the word, not to individual bits.
+ */
+#define i_bit_set(bit, l) \
+ ({ \
+ asm volatile(" lock \n\
+ btsl %0, %1" \
+ : \
+ : "r" ((int)(bit)), "m" (*(l)) ); \
+ 0; \
+ })
+
+#define i_bit_clear(bit, l) \
+ ({ \
+ asm volatile(" lock \n\
+ btrl %0, %1" \
+ : \
+ : "r" ((int)(bit)), "m" (*(l)) ); \
+ 0; \
+ })
+
+#endif /* __GNUC__ */
+
+extern void simple_lock_pause(void);
+
+#endif /* NCPUS > 1 */
+
+
+
+#endif /* _I386_LOCK_H_ */
diff --git a/i386/i386/locore.S b/i386/i386/locore.S
new file mode 100644
index 0000000..9d0513a
--- /dev/null
+++ b/i386/i386/locore.S
@@ -0,0 +1,1603 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992,1991,1990 Carnegie Mellon University
+ * Copyright (c) 1991 IBM Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation,
+ * and that the nema IBM not be used in advertising or publicity
+ * pertaining to distribution of the software without specific, written
+ * prior permission.
+ *
+ * CARNEGIE MELLON AND IBM ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND IBM DISCLAIM ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <mach/machine/asm.h>
+#include <mach/machine/eflags.h>
+#include <i386/proc_reg.h>
+#include <i386/trap.h>
+#include <i386/seg.h>
+#include <i386/gdt.h>
+#include <i386/ldt.h>
+#include <i386/i386asm.h>
+#include <i386/cpu_number.h>
+#include <i386/xen.h>
+
+#define PUSH_REGS_ISR \
+ pushl %ecx ;\
+ pushl %edx
+
+#define PUSH_AREGS_ISR \
+ pushl %eax ;\
+ PUSH_REGS_ISR
+
+
+#define POP_REGS_ISR \
+ popl %edx ;\
+ popl %ecx
+
+#define POP_AREGS_ISR \
+ POP_REGS_ISR ;\
+ popl %eax
+
+/*
+ * Note that we have to load the kernel segment registers even if this
+ * is a trap from the kernel, because the kernel uses user segment
+ * registers for copyin/copyout.
+ * (XXX Would it be smarter just to use fs or gs for that?)
+ */
+#define PUSH_SEGMENTS \
+ pushl %ds ;\
+ pushl %es ;\
+ pushl %fs ;\
+ pushl %gs
+
+#define POP_SEGMENTS \
+ popl %gs ;\
+ popl %fs ;\
+ popl %es ;\
+ popl %ds
+
+#define PUSH_SEGMENTS_ISR \
+ pushl %ds ;\
+ pushl %es ;\
+ pushl %fs ;\
+ pushl %gs
+
+#define POP_SEGMENTS_ISR \
+ popl %gs ;\
+ popl %fs ;\
+ popl %es ;\
+ popl %ds
+
+#define SET_KERNEL_SEGMENTS(reg) \
+ mov %ss,reg /* switch to kernel segments */ ;\
+ mov reg,%ds /* (same as kernel stack segment) */ ;\
+ mov reg,%es ;\
+ mov reg,%fs ;\
+ mov $(PERCPU_DS),reg ;\
+ mov reg,%gs
+
+/*
+ * Fault recovery.
+ */
+#define RECOVER_TABLE_START \
+ .text 2 ;\
+DATA(recover_table) ;\
+ .text
+
+#define RECOVER(addr) \
+ .text 2 ;\
+ .long 9f ;\
+ .long addr ;\
+ .text ;\
+9:
+
+#define RECOVER_TABLE_END \
+ .text 2 ;\
+ .globl EXT(recover_table_end) ;\
+LEXT(recover_table_end) ;\
+ .text
+
+/*
+ * Retry table for certain successful faults.
+ */
+#define RETRY_TABLE_START \
+ .text 3 ;\
+DATA(retry_table) ;\
+ .text
+
+#define RETRY(addr) \
+ .text 3 ;\
+ .long 9f ;\
+ .long addr ;\
+ .text ;\
+9:
+
+#define RETRY_TABLE_END \
+ .text 3 ;\
+ .globl EXT(retry_table_end) ;\
+LEXT(retry_table_end) ;\
+ .text
+
+/*
+ * Allocate recovery and retry tables.
+ */
+ RECOVER_TABLE_START
+ RETRY_TABLE_START
+
+/*
+ * Timing routines.
+ */
+#if STAT_TIME
+
+#define TIME_TRAP_UENTRY
+#define TIME_TRAP_SENTRY
+#define TIME_TRAP_UEXIT
+#define TIME_INT_ENTRY
+#define TIME_INT_EXIT
+
+#else /* microsecond timing */
+
+/*
+ * Microsecond timing.
+ * Assumes a free-running microsecond counter.
+ * no TIMER_MAX check needed.
+ */
+
+/*
+ * There is only one current time-stamp per CPU, since only
+ * the time-stamp in the current timer is used.
+ * To save time, we allocate the current time-stamps here.
+ */
+ .comm EXT(current_tstamp), 4*NCPUS
+
+/*
+ * Update time on user trap entry.
+ * 11 instructions (including cli on entry)
+ * Assumes CPU number in %edx.
+ * Uses %eax, %ebx, %ecx.
+ */
+#define TIME_TRAP_UENTRY \
+ pushf /* Save flags */ ;\
+ cli /* block interrupts */ ;\
+ movl VA_ETC,%ebx /* get timer value */ ;\
+ movl CX(EXT(current_tstamp),%edx),%ecx /* get old time stamp */;\
+ movl %ebx,CX(EXT(current_tstamp),%edx) /* set new time stamp */;\
+ subl %ecx,%ebx /* elapsed = new-old */ ;\
+ movl CX(EXT(current_timer),%edx),%ecx /* get current timer */ ;\
+ addl %ebx,LOW_BITS(%ecx) /* add to low bits */ ;\
+ jns 0f /* if overflow, */ ;\
+ call timer_normalize /* normalize timer */ ;\
+0: addl $(TH_SYSTEM_TIMER-TH_USER_TIMER),%ecx ;\
+ /* switch to sys timer */;\
+ movl %ecx,CX(EXT(current_timer),%edx) /* make it current */ ;\
+ popf /* allow interrupts */
+
+/*
+ * Update time on system call entry.
+ * 11 instructions (including cli on entry)
+ * Assumes CPU number in %edx.
+ * Uses %ebx, %ecx.
+ * Same as TIME_TRAP_UENTRY, but preserves %eax.
+ */
+#define TIME_TRAP_SENTRY \
+ pushf /* Save flags */ ;\
+ cli /* block interrupts */ ;\
+ movl VA_ETC,%ebx /* get timer value */ ;\
+ movl CX(EXT(current_tstamp),%edx),%ecx /* get old time stamp */;\
+ movl %ebx,CX(EXT(current_tstamp),%edx) /* set new time stamp */;\
+ subl %ecx,%ebx /* elapsed = new-old */ ;\
+ movl CX(EXT(current_timer),%edx),%ecx /* get current timer */ ;\
+ addl %ebx,LOW_BITS(%ecx) /* add to low bits */ ;\
+ jns 0f /* if overflow, */ ;\
+ pushl %eax /* save %eax */ ;\
+ call timer_normalize /* normalize timer */ ;\
+ popl %eax /* restore %eax */ ;\
+0: addl $(TH_SYSTEM_TIMER-TH_USER_TIMER),%ecx ;\
+ /* switch to sys timer */;\
+ movl %ecx,CX(EXT(current_timer),%edx) /* make it current */ ;\
+ popf /* allow interrupts */
+
+/*
+ * update time on user trap exit.
+ * 10 instructions.
+ * Assumes CPU number in %edx.
+ * Uses %ebx, %ecx.
+ */
+#define TIME_TRAP_UEXIT \
+ cli /* block interrupts */ ;\
+ movl VA_ETC,%ebx /* get timer */ ;\
+ movl CX(EXT(current_tstamp),%edx),%ecx /* get old time stamp */;\
+ movl %ebx,CX(EXT(current_tstamp),%edx) /* set new time stamp */;\
+ subl %ecx,%ebx /* elapsed = new-old */ ;\
+ movl CX(EXT(current_timer),%edx),%ecx /* get current timer */ ;\
+ addl %ebx,LOW_BITS(%ecx) /* add to low bits */ ;\
+ jns 0f /* if overflow, */ ;\
+ call timer_normalize /* normalize timer */ ;\
+0: addl $(TH_USER_TIMER-TH_SYSTEM_TIMER),%ecx ;\
+ /* switch to user timer */;\
+ movl %ecx,CX(EXT(current_timer),%edx) /* make it current */
+
+/*
+ * update time on interrupt entry.
+ * 9 instructions.
+ * Assumes CPU number in %edx.
+ * Leaves old timer in %ebx.
+ * Uses %ecx.
+ */
+#define TIME_INT_ENTRY \
+ movl VA_ETC,%ecx /* get timer */ ;\
+ movl CX(EXT(current_tstamp),%edx),%ebx /* get old time stamp */;\
+ movl %ecx,CX(EXT(current_tstamp),%edx) /* set new time stamp */;\
+ subl %ebx,%ecx /* elapsed = new-old */ ;\
+ movl CX(EXT(current_timer),%edx),%ebx /* get current timer */ ;\
+ addl %ecx,LOW_BITS(%ebx) /* add to low bits */ ;\
+ leal CX(0,%edx),%ecx /* timer is 16 bytes */ ;\
+ lea CX(EXT(kernel_timer),%edx),%ecx /* get interrupt timer*/;\
+ movl %ecx,CX(EXT(current_timer),%edx) /* set timer */
+
+/*
+ * update time on interrupt exit.
+ * 11 instructions
+ * Assumes CPU number in %edx, old timer in %ebx.
+ * Uses %eax, %ecx.
+ */
+#define TIME_INT_EXIT \
+ movl VA_ETC,%eax /* get timer */ ;\
+ movl CX(EXT(current_tstamp),%edx),%ecx /* get old time stamp */;\
+ movl %eax,CX(EXT(current_tstamp),%edx) /* set new time stamp */;\
+ subl %ecx,%eax /* elapsed = new-old */ ;\
+ movl CX(EXT(current_timer),%edx),%ecx /* get current timer */ ;\
+ addl %eax,LOW_BITS(%ecx) /* add to low bits */ ;\
+ jns 0f /* if overflow, */ ;\
+ call timer_normalize /* normalize timer */ ;\
+0: testb $0x80,LOW_BITS+3(%ebx) /* old timer overflow? */;\
+ jz 0f /* if overflow, */ ;\
+ movl %ebx,%ecx /* get old timer */ ;\
+ call timer_normalize /* normalize timer */ ;\
+0: movl %ebx,CX(EXT(current_timer),%edx) /* set timer */
+
+
+/*
+ * Normalize timer in ecx.
+ * Preserves edx; clobbers eax.
+ */
+ .align 2
+timer_high_unit:
+ .long TIMER_HIGH_UNIT /* div has no immediate opnd */
+
+timer_normalize:
+ pushl %edx /* save register */
+ xorl %edx,%edx /* clear divisor high */
+ movl LOW_BITS(%ecx),%eax /* get divisor low */
+ divl timer_high_unit,%eax /* quotient in eax */
+ /* remainder in edx */
+ addl %eax,HIGH_BITS_CHECK(%ecx) /* add high_inc to check */
+ movl %edx,LOW_BITS(%ecx) /* remainder to low_bits */
+ addl %eax,HIGH_BITS(%ecx) /* add high_inc to high bits */
+ popl %edx /* restore register */
+ ret
+
+/*
+ * Switch to a new timer.
+ */
+ENTRY(timer_switch)
+ CPU_NUMBER(%edx) /* get this CPU */
+ movl VA_ETC,%ecx /* get timer */
+ movl CX(EXT(current_tstamp),%edx),%eax /* get old time stamp */
+ movl %ecx,CX(EXT(current_tstamp),%edx) /* set new time stamp */
+ subl %ecx,%eax /* elapsed = new - old */
+ movl CX(EXT(current_timer),%edx),%ecx /* get current timer */
+ addl %eax,LOW_BITS(%ecx) /* add to low bits */
+ jns 0f /* if overflow, */
+ call timer_normalize /* normalize timer */
+0:
+ movl S_ARG0,%ecx /* get new timer */
+ movl %ecx,CX(EXT(current_timer),%edx) /* set timer */
+ ret
+
+/*
+ * Initialize the first timer for a CPU.
+ */
+ENTRY(start_timer)
+ CPU_NUMBER(%edx) /* get this CPU */
+ movl VA_ETC,%ecx /* get timer */
+ movl %ecx,CX(EXT(current_tstamp),%edx) /* set initial time stamp */
+ movl S_ARG0,%ecx /* get timer */
+ movl %ecx,CX(EXT(current_timer),%edx) /* set initial timer */
+ ret
+
+#endif /* accurate timing */
+
+/* */
+
+/*
+ * Trap/interrupt entry points.
+ *
+ * All traps must create the following save area on the kernel stack:
+ *
+ * gs
+ * fs
+ * es
+ * ds
+ * edi
+ * esi
+ * ebp
+ * cr2 if page fault - otherwise unused
+ * ebx
+ * edx
+ * ecx
+ * eax
+ * trap number
+ * error code
+ * eip
+ * cs
+ * eflags
+ * user esp - if from user
+ * user ss - if from user
+ * es - if from V86 thread
+ * ds - if from V86 thread
+ * fs - if from V86 thread
+ * gs - if from V86 thread
+ *
+ */
+
+/*
+ * General protection or segment-not-present fault.
+ * Check for a GP/NP fault in the kernel_return
+ * sequence; if there, report it as a GP/NP fault on the user's instruction.
+ *
+ * esp-> 0: trap code (NP or GP)
+ * 4: segment number in error
+ * 8 eip
+ * 12 cs
+ * 16 eflags
+ * 20 old registers (trap is from kernel)
+ */
+ENTRY(t_gen_prot)
+ pushl $(T_GENERAL_PROTECTION) /* indicate fault type */
+ jmp trap_check_kernel_exit /* check for kernel exit sequence */
+
+ENTRY(t_segnp)
+ pushl $(T_SEGMENT_NOT_PRESENT)
+ /* indicate fault type */
+
+trap_check_kernel_exit:
+ testl $(EFL_VM),16(%esp) /* is trap from V86 mode? */
+ jnz EXT(alltraps) /* isn`t kernel trap if so */
+ /* Note: handling KERNEL_RING value by hand */
+ testl $2,12(%esp) /* is trap from kernel mode? */
+ jnz EXT(alltraps) /* if so: */
+ /* check for the kernel exit sequence */
+ cmpl $_kret_iret,8(%esp) /* on IRET? */
+ je fault_iret
+ cmpl $_kret_popl_ds,8(%esp) /* popping DS? */
+ je fault_popl_ds
+ cmpl $_kret_popl_es,8(%esp) /* popping ES? */
+ je fault_popl_es
+ cmpl $_kret_popl_fs,8(%esp) /* popping FS? */
+ je fault_popl_fs
+ cmpl $_kret_popl_gs,8(%esp) /* popping GS? */
+ je fault_popl_gs
+take_fault: /* if none of the above: */
+ jmp EXT(alltraps) /* treat as normal trap. */
+
+/*
+ * GP/NP fault on IRET: CS or SS is in error.
+ * All registers contain the user's values.
+ *
+ * on SP is
+ * 0 trap number
+ * 4 errcode
+ * 8 eip
+ * 12 cs --> trapno
+ * 16 efl --> errcode
+ * 20 user eip
+ * 24 user cs
+ * 28 user eflags
+ * 32 user esp
+ * 36 user ss
+ */
+fault_iret:
+ movl %eax,8(%esp) /* save eax (we don`t need saved eip) */
+ popl %eax /* get trap number */
+ movl %eax,12-4(%esp) /* put in user trap number */
+ popl %eax /* get error code */
+ movl %eax,16-8(%esp) /* put in user errcode */
+ popl %eax /* restore eax */
+ jmp EXT(alltraps) /* take fault */
+
+/*
+ * Fault restoring a segment register. The user's registers are still
+ * saved on the stack. The offending segment register has not been
+ * popped.
+ */
+fault_popl_ds:
+ popl %eax /* get trap number */
+ popl %edx /* get error code */
+ addl $12,%esp /* pop stack to user regs */
+ jmp push_es /* (DS on top of stack) */
+fault_popl_es:
+ popl %eax /* get trap number */
+ popl %edx /* get error code */
+ addl $12,%esp /* pop stack to user regs */
+ jmp push_fs /* (ES on top of stack) */
+fault_popl_fs:
+ popl %eax /* get trap number */
+ popl %edx /* get error code */
+ addl $12,%esp /* pop stack to user regs */
+ jmp push_gs /* (FS on top of stack) */
+fault_popl_gs:
+ popl %eax /* get trap number */
+ popl %edx /* get error code */
+ addl $12,%esp /* pop stack to user regs */
+ jmp push_segregs /* (GS on top of stack) */
+
+push_es:
+ pushl %es /* restore es, */
+push_fs:
+ pushl %fs /* restore fs, */
+push_gs:
+ pushl %gs /* restore gs. */
+push_segregs:
+ movl %eax,R_TRAPNO(%esp) /* set trap number */
+ movl %edx,R_ERR(%esp) /* set error code */
+ jmp trap_set_segs /* take trap */
+
+/*
+ * Debug trap. Check for single-stepping across system call into
+ * kernel. If this is the case, taking the debug trap has turned
+ * off single-stepping - save the flags register with the trace
+ * bit set.
+ */
+ENTRY(t_debug)
+ testl $(EFL_VM),8(%esp) /* is trap from V86 mode? */
+ jnz 0f /* isn`t kernel trap if so */
+ /* Note: handling KERNEL_RING value by hand */
+ testl $2,4(%esp) /* is trap from kernel mode? */
+ jnz 0f /* if so: */
+ cmpl $syscall_entry,(%esp) /* system call entry? */
+ jne 0f /* if so: */
+ /* flags are sitting where syscall */
+ /* wants them */
+ addl $8,%esp /* remove eip/cs */
+ jmp syscall_entry_2 /* continue system call entry */
+
+0: pushl $0 /* otherwise: */
+ pushl $(T_DEBUG) /* handle as normal */
+ jmp EXT(alltraps) /* debug fault */
+
+/*
+ * Page fault traps save cr2.
+ */
+ENTRY(t_page_fault)
+ pushl $(T_PAGE_FAULT) /* mark a page fault trap */
+ pusha /* save the general registers */
+#ifdef MACH_PV_PAGETABLES
+ movl %ss:hyp_shared_info+CR2,%eax
+#else /* MACH_PV_PAGETABLES */
+ movl %cr2,%eax /* get the faulting address */
+#endif /* MACH_PV_PAGETABLES */
+ movl %eax,R_CR2-R_EDI(%esp) /* save in esp save slot */
+ jmp trap_push_segs /* continue fault */
+
+/*
+ * All 'exceptions' enter here with:
+ * esp-> trap number
+ * error code
+ * old eip
+ * old cs
+ * old eflags
+ * old esp if trapped from user
+ * old ss if trapped from user
+ */
+ENTRY(alltraps)
+ pusha /* save the general registers */
+trap_push_segs:
+ PUSH_SEGMENTS /* and the segment registers */
+ SET_KERNEL_SEGMENTS(%ax) /* switch to kernel data segment */
+trap_set_segs:
+ cld /* clear direction flag */
+ testl $(EFL_VM),R_EFLAGS(%esp) /* in V86 mode? */
+ jnz trap_from_user /* user mode trap if so */
+ /* Note: handling KERNEL_RING value by hand */
+ testb $2,R_CS(%esp) /* user mode trap? */
+ jz trap_from_kernel /* kernel trap if not */
+trap_from_user:
+
+ CPU_NUMBER(%edx)
+ TIME_TRAP_UENTRY
+
+ movl CX(EXT(kernel_stack),%edx),%ebx
+ xchgl %ebx,%esp /* switch to kernel stack */
+ /* user regs pointer already set */
+_take_trap:
+ pushl %ebx /* pass register save area to trap */
+ call EXT(user_trap) /* call user trap routine */
+ movl 4(%esp),%esp /* switch back to PCB stack */
+
+ orl %eax,%eax /* emulated syscall? */
+ jz _return_from_trap /* no, just return */
+ movl R_EAX(%ebx),%eax /* yes, get syscall number */
+ jmp syscall_entry_3 /* and emulate it */
+
+/*
+ * Return from trap or system call, checking for ASTs.
+ * On PCB stack.
+ */
+
+_return_from_trap:
+ CPU_NUMBER(%edx)
+ cmpl $0,CX(EXT(need_ast),%edx)
+ jz _return_to_user /* if we need an AST: */
+
+ movl CX(EXT(kernel_stack),%edx),%esp
+ /* switch to kernel stack */
+ call EXT(i386_astintr) /* take the AST */
+ popl %esp /* switch back to PCB stack */
+ jmp _return_from_trap /* and check again (rare) */
+ /* ASTs after this point will */
+ /* have to wait */
+
+_return_to_user:
+ TIME_TRAP_UEXIT
+
+/*
+ * Return from kernel mode to interrupted thread.
+ */
+
+_return_from_kernel:
+_kret_popl_gs:
+ popl %gs /* restore segment registers */
+_kret_popl_fs:
+ popl %fs
+_kret_popl_es:
+ popl %es
+_kret_popl_ds:
+ popl %ds
+ popa /* restore general registers */
+ addl $8,%esp /* discard trap number and error code */
+_kret_iret:
+ iret /* return from interrupt */
+
+
+/*
+ * Trap from kernel mode. No need to switch stacks.
+ */
+trap_from_kernel:
+#if MACH_KDB || MACH_TTD
+ movl %esp,%ebx /* save current stack */
+ movl %esp,%edx /* on an interrupt stack? */
+
+ CPU_NUMBER(%ecx)
+ and $(~(INTSTACK_SIZE-1)),%edx
+ cmpl CX(EXT(int_stack_base),%ecx),%edx
+ je 1f /* OK if so */
+
+ movl %ecx,%edx
+ cmpl CX(EXT(kernel_stack),%edx),%esp
+ /* already on kernel stack? */
+ ja 0f
+ cmpl MY(ACTIVE_STACK),%esp
+ ja 1f /* switch if not */
+0:
+ movl CX(EXT(kernel_stack),%edx),%esp
+1:
+ pushl %ebx /* save old stack */
+ pushl %ebx /* pass as parameter */
+ call EXT(kernel_trap) /* to kernel trap routine */
+ addl $4,%esp /* pop parameter */
+ popl %esp /* return to old stack */
+#else /* MACH_KDB || MACH_TTD */
+
+ pushl %esp /* pass parameter */
+ call EXT(kernel_trap) /* to kernel trap routine */
+ addl $4,%esp /* pop parameter */
+#endif /* MACH_KDB || MACH_TTD */
+
+ jmp _return_from_kernel
+
+
+/*
+ * Called as a function, makes the current thread
+ * return from the kernel as if from an exception.
+ */
+
+ENTRY(thread_exception_return)
+ENTRY(thread_bootstrap_return)
+ movl %esp,%ecx /* get kernel stack */
+ or $(KERNEL_STACK_SIZE-1),%ecx
+ movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
+ jmp _return_from_trap
+
+/*
+ * Called as a function, makes the current thread
+ * return from the kernel as if from a syscall.
+ * Takes the syscall's return code as an argument.
+ */
+
+ENTRY(thread_syscall_return)
+ movl S_ARG0,%eax /* get return value */
+ movl %esp,%ecx /* get kernel stack */
+ or $(KERNEL_STACK_SIZE-1),%ecx
+ movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
+ movl %eax,R_EAX(%esp) /* save return value */
+ jmp _return_from_trap
+
+ENTRY(call_continuation)
+ movl S_ARG0,%eax /* get continuation */
+ movl %esp,%ecx /* get kernel stack */
+ or $(KERNEL_STACK_SIZE-1),%ecx
+ addl $(-3-IKS_SIZE),%ecx
+ movl %ecx,%esp /* pop the stack */
+ xorl %ebp,%ebp /* zero frame pointer */
+ pushl $0 /* Dummy return address */
+ jmp *%eax /* goto continuation */
+
+
+/* IOAPIC has 24 interrupts, put spurious in the same array */
+
+#define INTERRUPT(n) \
+ .data 2 ;\
+ .long 0f ;\
+ .text ;\
+ P2ALIGN(TEXT_ALIGN) ;\
+0: ;\
+ pushl %eax ;\
+ movl $(n),%eax ;\
+ jmp EXT(all_intrs)
+
+ .data 2
+DATA(int_entry_table)
+ .text
+/* Legacy APIC interrupts or PIC interrupts */
+INTERRUPT(0)
+INTERRUPT(1)
+INTERRUPT(2)
+INTERRUPT(3)
+INTERRUPT(4)
+INTERRUPT(5)
+INTERRUPT(6)
+INTERRUPT(7)
+INTERRUPT(8)
+INTERRUPT(9)
+INTERRUPT(10)
+INTERRUPT(11)
+INTERRUPT(12)
+INTERRUPT(13)
+INTERRUPT(14)
+INTERRUPT(15)
+#ifdef APIC
+/* APIC PCI interrupts PIRQ A-H */
+INTERRUPT(16)
+INTERRUPT(17)
+INTERRUPT(18)
+INTERRUPT(19)
+INTERRUPT(20)
+INTERRUPT(21)
+INTERRUPT(22)
+INTERRUPT(23)
+/* Possibly 8 more GSIs */
+INTERRUPT(24)
+INTERRUPT(25)
+INTERRUPT(26)
+INTERRUPT(27)
+INTERRUPT(28)
+INTERRUPT(29)
+INTERRUPT(30)
+INTERRUPT(31)
+/* ... APIC IOAPIC #2 */
+INTERRUPT(32)
+INTERRUPT(33)
+INTERRUPT(34)
+INTERRUPT(35)
+INTERRUPT(36)
+INTERRUPT(37)
+INTERRUPT(38)
+INTERRUPT(39)
+INTERRUPT(40)
+INTERRUPT(41)
+INTERRUPT(42)
+INTERRUPT(43)
+INTERRUPT(44)
+INTERRUPT(45)
+INTERRUPT(46)
+INTERRUPT(47)
+INTERRUPT(48)
+INTERRUPT(49)
+INTERRUPT(50)
+INTERRUPT(51)
+INTERRUPT(52)
+INTERRUPT(53)
+INTERRUPT(54)
+INTERRUPT(55)
+/* Possibly 8 more GSIs */
+INTERRUPT(56)
+INTERRUPT(57)
+INTERRUPT(58)
+INTERRUPT(59)
+INTERRUPT(60)
+INTERRUPT(61)
+INTERRUPT(62)
+INTERRUPT(63)
+#endif
+#if NCPUS > 1
+INTERRUPT(CALL_AST_CHECK)
+INTERRUPT(CALL_PMAP_UPDATE)
+#endif
+#ifdef APIC
+/* Spurious interrupt, set irq number to vect number */
+INTERRUPT(255)
+#endif
+
+/* XXX handle NMI - at least print a warning like Linux does. */
+
+/*
+ * All interrupts enter here.
+ * old %eax on stack; interrupt number in %eax.
+ */
+ENTRY(all_intrs)
+ PUSH_REGS_ISR /* save registers */
+ cld /* clear direction flag */
+
+ CPU_NUMBER_NO_GS(%ecx)
+ movl %esp,%edx /* on an interrupt stack? */
+ and $(~(INTSTACK_SIZE-1)),%edx
+ cmpl %ss:CX(EXT(int_stack_base),%ecx),%edx
+ je int_from_intstack /* if not: */
+
+ PUSH_SEGMENTS_ISR /* save segment registers */
+ SET_KERNEL_SEGMENTS(%dx) /* switch to kernel segments */
+
+ CPU_NUMBER(%edx)
+
+ movl CX(EXT(int_stack_top),%edx),%ecx
+
+ xchgl %ecx,%esp /* switch to interrupt stack */
+
+#if STAT_TIME
+ pushl %ecx /* save pointer to old stack */
+#else
+ pushl %ebx /* save %ebx - out of the way */
+ /* so stack looks the same */
+ pushl %ecx /* save pointer to old stack */
+ TIME_INT_ENTRY /* do timing */
+#endif
+
+#ifdef MACH_LDEBUG
+ incl CX(EXT(in_interrupt),%edx)
+#endif
+
+ call EXT(interrupt) /* call generic interrupt routine */
+ .globl EXT(return_to_iret) /* ( label for kdb_kintr and hardclock */
+LEXT(return_to_iret) /* to find the return from calling interrupt) */
+
+ CPU_NUMBER(%edx)
+#ifdef MACH_LDEBUG
+ decl CX(EXT(in_interrupt),%edx)
+#endif
+
+#if STAT_TIME
+#else
+ TIME_INT_EXIT /* do timing */
+ movl 4(%esp),%ebx /* restore the extra reg we saved */
+#endif
+
+ popl %esp /* switch back to old stack */
+
+ testl $(EFL_VM),I_EFL(%esp) /* if in V86 */
+ jnz 0f /* or */
+ /* Note: handling KERNEL_RING value by hand */
+ testb $2,I_CS(%esp) /* user mode, */
+ jz 1f /* check for ASTs */
+0:
+ cmpl $0,CX(EXT(need_ast),%edx)
+ jnz ast_from_interrupt /* take it if so */
+1:
+ POP_SEGMENTS_ISR /* restore segment regs */
+ POP_AREGS_ISR /* restore registers */
+
+ iret /* return to caller */
+
+int_from_intstack:
+ CPU_NUMBER_NO_GS(%edx)
+ cmpl CX(EXT(int_stack_base),%edx),%esp /* seemingly looping? */
+ jb stack_overflowed /* if not: */
+ call EXT(interrupt) /* call interrupt routine */
+_return_to_iret_i: /* ( label for kdb_kintr) */
+ /* must have been on kernel segs */
+ POP_AREGS_ISR /* restore registers */
+ /* no ASTs */
+
+ iret
+
+stack_overflowed:
+ ud2
+
+/*
+ * Take an AST from an interrupt.
+ * On PCB stack.
+ * sp-> gs -> edx
+ * fs -> ecx
+ * es -> eax
+ * ds -> trapno
+ * edx -> code
+ * ecx
+ * eax
+ * eip
+ * cs
+ * efl
+ * esp
+ * ss
+ */
+ast_from_interrupt:
+ POP_SEGMENTS_ISR /* restore all registers ... */
+ POP_AREGS_ISR
+ pushl $0 /* zero code */
+ pushl $0 /* zero trap number */
+ pusha /* save general registers */
+ PUSH_SEGMENTS_ISR /* save segment registers */
+ SET_KERNEL_SEGMENTS(%dx) /* switch to kernel segments */
+ CPU_NUMBER(%edx)
+ TIME_TRAP_UENTRY
+
+ movl CX(EXT(kernel_stack),%edx),%esp
+ /* switch to kernel stack */
+ call EXT(i386_astintr) /* take the AST */
+ popl %esp /* back to PCB stack */
+ jmp _return_from_trap /* return */
+
+#if MACH_KDB
+/*
+ * kdb_kintr: enter kdb from keyboard interrupt.
+ * Chase down the stack frames until we find one whose return
+ * address is the interrupt handler. At that point, we have:
+ *
+ * frame-> saved %ebp
+ * return address in interrupt handler
+ * #ifndef MACH_XEN
+ * 1st parameter iunit
+ * 2nd parameter saved SPL
+ * 3th parameter return address
+ * 4th parameter registers
+ * saved SPL
+ * saved IRQ
+ * #endif
+ * return address == return_to_iret_i
+ * saved %edx
+ * saved %ecx
+ * saved %eax
+ * saved %eip
+ * saved %cs
+ * saved %efl
+ *
+ * OR:
+ * frame-> saved %ebp
+ * return address in interrupt handler
+ * #ifndef MACH_XEN
+ * iunit
+ * saved SPL
+ * irq
+ * #endif
+ * return address == return_to_iret
+ * pointer to save area on old stack
+ * [ saved %ebx, if accurate timing ]
+ *
+ * old stack: saved %gs
+ * saved %fs
+ * saved %es
+ * saved %ds
+ * saved %edx
+ * saved %ecx
+ * saved %eax
+ * saved %eip
+ * saved %cs
+ * saved %efl
+ *
+ * Call kdb, passing it that register save area.
+ */
+
+#ifdef MACH_XEN
+#define RET_OFFSET 8
+#else /* MACH_XEN */
+#define RET_OFFSET 32
+#endif /* MACH_XEN */
+
+ENTRY(kdb_kintr)
+ movl %ebp,%eax /* save caller`s frame pointer */
+ movl $EXT(return_to_iret),%ecx /* interrupt return address 1 */
+ movl $_return_to_iret_i,%edx /* interrupt return address 2 */
+
+0: cmpl RET_OFFSET(%eax),%ecx /* does this frame return to */
+ /* interrupt handler (1)? */
+ je 1f
+ cmpl RET_OFFSET(%eax),%edx /* interrupt handler (2)? */
+ je 2f /* if not: */
+ movl (%eax),%eax /* try next frame */
+ testl %eax,%eax
+ jnz 0b
+ ud2 /* oops, didn't find frame, fix me :/ */
+
+1: movl $kdb_from_iret,RET_OFFSET(%eax)
+ ret /* returns to kernel/user stack */
+
+2: movl $kdb_from_iret_i,RET_OFFSET(%eax)
+ /* returns to interrupt stack */
+ ret
+
+/*
+ * On return from keyboard interrupt, we will execute
+ * kdb_from_iret_i
+ * if returning to an interrupt on the interrupt stack
+ * kdb_from_iret
+ * if returning to an interrupt on the user or kernel stack
+ */
+kdb_from_iret:
+ /* save regs in known locations */
+#if STAT_TIME
+ pushl %ebx /* caller`s %ebx is in reg */
+#else
+ movl 4(%esp),%eax /* get caller`s %ebx */
+ pushl %eax /* push on stack */
+#endif
+ pushl %ebp
+ pushl %esi
+ pushl %edi
+ pushl %esp /* pass regs */
+ call EXT(kdb_kentry) /* to kdb */
+ addl $4,%esp /* pop parameters */
+ popl %edi /* restore registers */
+ popl %esi
+ popl %ebp
+#if STAT_TIME
+ popl %ebx
+#else
+ popl %eax
+ movl %eax,4(%esp)
+#endif
+ jmp EXT(return_to_iret) /* normal interrupt return */
+
+kdb_from_iret_i: /* on interrupt stack */
+ pop %edx /* restore saved registers */
+ pop %ecx
+ pop %eax
+ pushl $0 /* zero error code */
+ pushl $0 /* zero trap number */
+ pusha /* save general registers */
+ PUSH_SEGMENTS /* save segment registers */
+ pushl %esp /* pass regs, */
+ pushl $0 /* code, */
+ pushl $-1 /* type to kdb */
+ call EXT(kdb_trap)
+ addl $12,%esp /* remove parameters */
+ POP_SEGMENTS /* restore segment registers */
+ popa /* restore general registers */
+ addl $8,%esp
+ iret
+
+#endif /* MACH_KDB */
+
+#if MACH_TTD
+/*
+ * Same code as that above for the keyboard entry into kdb.
+ */
+ENTRY(kttd_intr)
+ movl %ebp,%eax /* save caller`s frame pointer */
+ movl $EXT(return_to_iret),%ecx /* interrupt return address 1 */
+ movl $_return_to_iret_i,%edx /* interrupt return address 2 */
+
+0: cmpl 16(%eax),%ecx /* does this frame return to */
+ /* interrupt handler (1)? */
+ je 1f
+ cmpl 16(%eax),%edx /* interrupt handler (2)? */
+ je 2f /* if not: */
+ movl (%eax),%eax /* try next frame */
+ jmp 0b
+
+1: movl $ttd_from_iret,16(%eax) /* returns to kernel/user stack */
+ ret
+
+2: movl $ttd_from_iret_i,16(%eax)
+ /* returns to interrupt stack */
+ ret
+
+/*
+ * On return from keyboard interrupt, we will execute
+ * ttd_from_iret_i
+ * if returning to an interrupt on the interrupt stack
+ * ttd_from_iret
+ * if returning to an interrupt on the user or kernel stack
+ */
+ttd_from_iret:
+ /* save regs in known locations */
+#if STAT_TIME
+ pushl %ebx /* caller`s %ebx is in reg */
+#else
+ movl 4(%esp),%eax /* get caller`s %ebx */
+ pushl %eax /* push on stack */
+#endif
+ pushl %ebp
+ pushl %esi
+ pushl %edi
+ pushl %esp /* pass regs */
+ call _kttd_netentry /* to kdb */
+ addl $4,%esp /* pop parameters */
+ popl %edi /* restore registers */
+ popl %esi
+ popl %ebp
+#if STAT_TIME
+ popl %ebx
+#else
+ popl %eax
+ movl %eax,4(%esp)
+#endif
+ jmp EXT(return_to_iret) /* normal interrupt return */
+
+ttd_from_iret_i: /* on interrupt stack */
+ pop %edx /* restore saved registers */
+ pop %ecx
+ pop %eax
+ pushl $0 /* zero error code */
+ pushl $0 /* zero trap number */
+ pusha /* save general registers */
+ PUSH_SEGMENTS_ISR /* save segment registers */
+ pushl %esp /* pass regs, */
+ pushl $0 /* code, */
+ pushl $-1 /* type to kdb */
+ call _kttd_trap
+ addl $12,%esp /* remove parameters */
+ POP_SEGMENTS_ISR /* restore segment registers */
+ popa /* restore general registers */
+ addl $8,%esp
+ iret
+
+#endif /* MACH_TTD */
+
+/*
+ * System call enters through a call gate. Flags are not saved -
+ * we must shuffle stack to look like trap save area.
+ *
+ * esp-> old eip
+ * old cs
+ * old esp
+ * old ss
+ *
+ * eax contains system call number.
+ */
+ENTRY(syscall)
+syscall_entry:
+ pushf /* save flags as soon as possible */
+syscall_entry_2:
+ cld /* clear direction flag */
+
+ pushl %eax /* save system call number */
+ pushl $0 /* clear trap number slot */
+
+ pusha /* save the general registers */
+ PUSH_SEGMENTS /* and the segment registers */
+ SET_KERNEL_SEGMENTS(%dx) /* switch to kernel data segment */
+
+/*
+ * Shuffle eflags,eip,cs into proper places
+ */
+
+ movl R_EIP(%esp),%ebx /* eflags are in EIP slot */
+ movl R_CS(%esp),%ecx /* eip is in CS slot */
+ movl R_EFLAGS(%esp),%edx /* cs is in EFLAGS slot */
+ movl %ecx,R_EIP(%esp) /* fix eip */
+ movl %edx,R_CS(%esp) /* fix cs */
+ movl %ebx,R_EFLAGS(%esp) /* fix eflags */
+
+ CPU_NUMBER(%edx)
+ TIME_TRAP_SENTRY
+
+ movl CX(EXT(kernel_stack),%edx),%ebx
+ /* get current kernel stack */
+ xchgl %ebx,%esp /* switch stacks - %ebx points to */
+ /* user registers. */
+ /* user regs pointer already set */
+
+/*
+ * Check for MACH or emulated system call
+ */
+syscall_entry_3:
+ movl MY(ACTIVE_THREAD),%edx
+ /* point to current thread */
+ movl TH_TASK(%edx),%edx /* point to task */
+ movl TASK_EMUL(%edx),%edx /* get emulation vector */
+ orl %edx,%edx /* if none, */
+ je syscall_native /* do native system call */
+ movl %eax,%ecx /* copy system call number */
+ subl DISP_MIN(%edx),%ecx /* get displacement into syscall */
+ /* vector table */
+ jl syscall_native /* too low - native system call */
+ cmpl DISP_COUNT(%edx),%ecx /* check range */
+ jnl syscall_native /* too high - native system call */
+ movl DISP_VECTOR(%edx,%ecx,4),%edx
+ /* get the emulation vector */
+ orl %edx,%edx /* emulated system call if not zero */
+ jnz syscall_emul
+
+/*
+ * Native system call.
+ */
+syscall_native:
+ negl %eax /* get system call number */
+ jl mach_call_range /* out of range if it was positive */
+ cmpl EXT(mach_trap_count),%eax /* check system call table bounds */
+ jg mach_call_range /* error if out of range */
+#if 0 /* debug hack to show the syscall number on the screen */
+ movb %al,%dl
+ shrb $4,%dl
+ orb $0x30,%dl
+ movb $0x0f,%dh
+ movw %dx,0xb800a
+ movb %al,%dl
+ andb $0xf,%dl
+ orb $0x30,%dl
+ movb $0xf,%dh
+ movw %dx,0xb800c
+#endif
+ shll $4,%eax /* manual indexing of mach_trap_t */
+ movl EXT(mach_trap_table)(%eax),%ecx
+ /* get number of arguments */
+ jecxz mach_call_call /* skip argument copy if none */
+
+ movl R_UESP(%ebx),%esi /* get user stack pointer */
+ lea 4(%esi,%ecx,4),%esi /* skip user return address, */
+ /* and point past last argument */
+ movl $USER_DS,%edx /* use user data segment for accesses */
+ mov %dx,%fs
+ movl %esp,%edx /* save kernel ESP for error recovery */
+
+0: subl $4,%esi
+ RECOVER(mach_call_addr_push)
+ pushl %fs:(%esi) /* push argument on stack */
+ loop 0b /* loop for all arguments */
+
+mach_call_call:
+
+#ifdef DEBUG
+ testb $0xff,EXT(syscall_trace)
+ jz 0f
+ pushl %eax
+ call EXT(syscall_trace_print)
+ /* will return with syscallofs still (or again) in eax */
+ addl $4,%esp
+0:
+#endif /* DEBUG */
+
+ call *EXT(mach_trap_table)+4(%eax)
+ /* call procedure */
+ movl %esp,%ecx /* get kernel stack */
+ or $(KERNEL_STACK_SIZE-1),%ecx
+ movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */
+ movl %eax,R_EAX(%esp) /* save return value */
+ jmp _return_from_trap /* return to user */
+
+/*
+ * Address out of range. Change to page fault.
+ * %esi holds failing address.
+ */
+mach_call_addr_push:
+ movl %edx,%esp /* clean parameters from stack */
+mach_call_addr:
+ movl %esi,R_CR2(%ebx) /* set fault address */
+ movl $(T_PAGE_FAULT),R_TRAPNO(%ebx)
+ /* set page-fault trap */
+ movl $(T_PF_USER),R_ERR(%ebx)
+ /* set error code - read user space */
+ jmp _take_trap /* treat as a trap */
+
+/*
+ * System call out of range. Treat as invalid-instruction trap.
+ * (? general protection?)
+ */
+mach_call_range:
+ movl $(T_INVALID_OPCODE),R_TRAPNO(%ebx)
+ /* set invalid-operation trap */
+ movl $0,R_ERR(%ebx) /* clear error code */
+ jmp _take_trap /* treat as a trap */
+
+/*
+ * User space emulation of system calls.
+ * edx - user address to handle syscall
+ *
+ * User stack will become:
+ * uesp-> eflags
+ * eip
+ * eax still contains syscall number.
+ */
+syscall_emul:
+ movl $USER_DS,%edi /* use user data segment for accesses */
+ mov %di,%fs
+
+/* XXX what about write-protected pages? */
+ movl R_UESP(%ebx),%edi /* get user stack pointer */
+ subl $8,%edi /* push space for new arguments */
+ movl R_EFLAGS(%ebx),%eax /* move flags */
+ RECOVER(syscall_addr)
+ movl %eax,%fs:0(%edi) /* to user stack */
+ movl R_EIP(%ebx),%eax /* move eip */
+ RECOVER(syscall_addr)
+ movl %eax,%fs:4(%edi) /* to user stack */
+ movl %edi,R_UESP(%ebx) /* set new user stack pointer */
+ movl %edx,R_EIP(%ebx) /* change return address to trap */
+ movl %ebx,%esp /* back to PCB stack */
+ jmp _return_from_trap /* return to user */
+
+/*
+ * Address error - address is in %edi.
+ */
+syscall_addr:
+ movl %edi,R_CR2(%ebx) /* set fault address */
+ movl $(T_PAGE_FAULT),R_TRAPNO(%ebx)
+ /* set page-fault trap */
+ movl $(T_PF_USER),R_ERR(%ebx)
+ /* set error code - read user space */
+ jmp _take_trap /* treat as a trap */
+
+
+ .data
+DATA(cpu_features)
+DATA(cpu_features_edx)
+ .long 0
+DATA(cpu_features_ecx)
+ .long 0
+ .text
+
+END(syscall)
+
+/* Discover what kind of cpu we have; return the family number
+ (3, 4, 5, 6, for 386, 486, 586, 686 respectively). */
+ENTRY(discover_x86_cpu_type)
+ pushl %ebp /* Save frame pointer */
+ movl %esp,%ebp /* Save stack pointer */
+ and $~0x3,%esp /* Align stack pointer */
+
+#if 0
+/* Seems to hang with kvm linux 4.3.0 */
+#ifdef MACH_HYP
+#warning Assuming not Cyrix CPU
+#else /* MACH_HYP */
+ inb $0xe8,%al /* Enable ID flag for Cyrix CPU ... */
+ andb $0x80,%al /* ... in CCR4 reg bit7 */
+ outb %al,$0xe8
+#endif /* MACH_HYP */
+#endif
+
+ pushfl /* Fetch flags ... */
+ popl %eax /* ... into eax */
+ movl %eax,%ecx /* Save original flags for return */
+ xorl $(EFL_AC+EFL_ID),%eax /* Attempt to toggle ID and AC bits */
+ pushl %eax /* Save flags... */
+ popfl /* ... In EFLAGS */
+ pushfl /* Fetch flags back ... */
+ popl %eax /* ... into eax */
+ pushl %ecx /* From ecx... */
+ popfl /* ... restore original flags */
+
+ xorl %ecx,%eax /* See if any bits didn't change */
+ testl $EFL_AC,%eax /* Test AC bit */
+ jnz 0f /* Skip next bit if AC toggled */
+ movl $3,%eax /* Return value is 386 */
+ jmp 9f /* And RETURN */
+
+0: testl $EFL_ID,%eax /* Test ID bit */
+ jnz 0f /* Skip next bit if ID toggled */
+ movl $4,%eax /* Return value is 486 */
+ jmp 9f /* And RETURN */
+
+ /* We are a modern enough processor to have the CPUID instruction;
+ use it to find out what we are. */
+0: movl $1,%eax /* Fetch CPU type info ... */
+ cpuid /* ... into eax */
+ movl %ecx,cpu_features_ecx /* Keep a copy */
+ movl %edx,cpu_features_edx /* Keep a copy */
+ shrl $8,%eax /* Slide family bits down */
+ andl $15,%eax /* And select them */
+
+9: movl %ebp,%esp /* Restore stack pointer */
+ popl %ebp /* Restore frame pointer */
+ ret /* And return */
+
+
+/* */
+/*
+ * Utility routines.
+ */
+
+/*
+ * Copy from user address space - generic version.
+ * arg0: user address
+ * arg1: kernel address
+ * arg2: byte count
+ */
+ENTRY(copyin)
+ pushl %esi
+ pushl %edi /* save registers */
+
+ movl 8+S_ARG0,%esi /* get user start address */
+ movl 8+S_ARG1,%edi /* get kernel destination address */
+ movl 8+S_ARG2,%edx /* get count */
+
+ movl $USER_DS,%eax /* use user data segment for accesses */
+ mov %ax,%ds
+
+ /*cld*/ /* count up: default mode in all GCC code */
+ movl %edx,%ecx /* move by longwords first */
+ shrl $2,%ecx
+ RECOVER(copyin_fail)
+ rep
+ movsl /* move longwords */
+ movl %edx,%ecx /* now move remaining bytes */
+ andl $3,%ecx
+ RECOVER(copyin_fail)
+ rep
+ movsb
+ xorl %eax,%eax /* return 0 for success */
+
+copyin_ret:
+ mov %ss,%di /* restore DS to kernel segment */
+ mov %di,%ds
+
+ popl %edi /* restore registers */
+ popl %esi
+ ret /* and return */
+
+copyin_fail:
+ movl $1,%eax /* return 1 for failure */
+ jmp copyin_ret /* pop frame and return */
+
+/*
+ * Copy from user address space - version for copying messages.
+ * arg0: user address
+ * arg1: kernel address
+ * arg2: byte count - must be a multiple of four
+ * arg3: kernel byte count
+ */
+ENTRY(copyinmsg)
+ pushl %esi
+ pushl %edi /* save registers */
+
+ movl 8+S_ARG0,%esi /* get user start address */
+ movl 8+S_ARG1,%edi /* get kernel destination address */
+ movl 8+S_ARG2,%ecx /* get count */
+ movl %ecx,%edx /* save count */
+
+ movl $USER_DS,%eax /* use user data segment for accesses */
+ mov %ax,%ds
+
+ /*cld*/ /* count up: default mode in all GCC code */
+ shrl $2,%ecx
+ RECOVER(copyinmsg_fail)
+ rep
+ movsl /* move longwords */
+ xorl %eax,%eax /* return 0 for success */
+
+ movl 8+S_ARG1,%edi
+ movl %edx,%es:MSGH_MSGH_SIZE(%edi) /* set msgh_size */
+
+copyinmsg_ret:
+ mov %ss,%di /* restore DS to kernel segment */
+ mov %di,%ds
+
+ popl %edi /* restore registers */
+ popl %esi
+ ret /* and return */
+
+copyinmsg_fail:
+ movl $1,%eax /* return 1 for failure */
+ jmp copyinmsg_ret /* pop frame and return */
+
+/*
+ * Copy to user address space - generic version.
+ * arg0: kernel address
+ * arg1: user address
+ * arg2: byte count
+ */
+ENTRY(copyout)
+ pushl %esi
+ pushl %edi /* save registers */
+
+ movl 8+S_ARG0,%esi /* get kernel start address */
+ movl 8+S_ARG1,%edi /* get user start address */
+ movl 8+S_ARG2,%edx /* get count */
+
+ movl $USER_DS,%eax /* use user data segment for accesses */
+ mov %ax,%es
+
+#if !defined(MACH_HYP) && !PAE
+ cmpl $3,machine_slot+SUB_TYPE_CPU_TYPE
+ jbe copyout_retry /* Use slow version on i386 */
+#endif /* !defined(MACH_HYP) && !PAE */
+
+ /*cld*/ /* count up: always this way in GCC code */
+ movl %edx,%ecx /* move by longwords first */
+ shrl $2,%ecx
+ RECOVER(copyout_fail)
+ rep
+ movsl
+ movl %edx,%ecx /* now move remaining bytes */
+ andl $3,%ecx
+ RECOVER(copyout_fail)
+ rep
+ movsb /* move */
+ xorl %eax,%eax /* return 0 for success */
+
+copyout_ret:
+ mov %ss,%di /* restore ES to kernel segment */
+ mov %di,%es
+
+ popl %edi /* restore registers */
+ popl %esi
+ ret /* and return */
+
+copyout_fail:
+ movl $1,%eax /* return 1 for failure */
+ jmp copyout_ret /* pop frame and return */
+
+/*
+ * Copy to user address space - version for copying messages.
+ * arg0: kernel address
+ * arg1: user address
+ * arg2: byte count - must be a multiple of four
+ */
+ENTRY(copyoutmsg)
+ pushl %esi
+ pushl %edi /* save registers */
+
+ movl 8+S_ARG0,%esi /* get kernel start address */
+ movl 8+S_ARG1,%edi /* get user start address */
+ movl 8+S_ARG2,%ecx /* get count */
+
+ movl $USER_DS,%eax /* use user data segment for accesses */
+ mov %ax,%es
+
+#if !defined(MACH_HYP) && !PAE
+ movl 8+S_ARG2,%edx /* copyout_retry expects count here */
+ cmpl $3,machine_slot+SUB_TYPE_CPU_TYPE
+ jbe copyout_retry /* Use slow version on i386 */
+#endif /* !defined(MACH_HYP) && !PAE */
+
+ shrl $2,%ecx /* move by longwords */
+ RECOVER(copyoutmsg_fail)
+ rep
+ movsl
+ xorl %eax,%eax /* return 0 for success */
+
+copyoutmsg_ret:
+ mov %ss,%di /* restore ES to kernel segment */
+ mov %di,%es
+
+ popl %edi /* restore registers */
+ popl %esi
+ ret /* and return */
+
+copyoutmsg_fail:
+ movl $1,%eax /* return 1 for failure */
+ jmp copyoutmsg_ret /* pop frame and return */
+
+#if !defined(MACH_HYP) && !PAE
+/*
+ * Check whether user address space is writable
+ * before writing to it - i386 hardware is broken.
+ */
+copyout_retry:
+ movl %cr3,%ecx /* point to page directory */
+ movl %edi,%eax /* get page directory bits */
+ shrl $(PDESHIFT),%eax /* from user address */
+ movl KERNELBASE(%ecx,%eax,PTE_SIZE),%ecx
+ /* get page directory pointer */
+ testl $(PTE_V),%ecx /* present? */
+ jz 0f /* if not, fault is OK */
+ andl $(PTE_PFN),%ecx /* isolate page frame address */
+ movl %edi,%eax /* get page table bits */
+ shrl $(PTESHIFT),%eax
+ andl $(PTEMASK),%eax /* from user address */
+ leal KERNELBASE(%ecx,%eax,PTE_SIZE),%ecx
+ /* point to page table entry */
+ movl (%ecx),%eax /* get it */
+ testl $(PTE_V),%eax /* present? */
+ jz 0f /* if not, fault is OK */
+ testl $(PTE_W),%eax /* writable? */
+ jnz 0f /* OK if so */
+/*
+ * Not writable - must fake a fault. Turn off access to the page.
+ */
+ andl $(PTE_INVALID),(%ecx) /* turn off valid bit */
+ movl %cr3,%eax /* invalidate TLB */
+ movl %eax,%cr3
+0:
+
+/*
+ * Copy only what fits on the current destination page.
+ * Check for write-fault again on the next page.
+ */
+ leal NBPG(%edi),%eax /* point to */
+ andl $(-NBPG),%eax /* start of next page */
+ subl %edi,%eax /* get number of bytes to that point */
+ cmpl %edx,%eax /* bigger than count? */
+ jle 1f /* if so, */
+ movl %edx,%eax /* use count */
+1:
+
+ /*cld*/ /* count up: always this way in GCC code */
+ movl %eax,%ecx /* move by longwords first */
+ shrl $2,%ecx
+ RECOVER(copyout_fail)
+ RETRY(copyout_retry)
+ rep
+ movsl
+ movl %eax,%ecx /* now move remaining bytes */
+ andl $3,%ecx
+ RECOVER(copyout_fail)
+ RETRY(copyout_retry)
+ rep
+ movsb /* move */
+ subl %eax,%edx /* decrement count */
+ jg copyout_retry /* restart on next page if not done */
+ xorl %eax,%eax /* return 0 for success */
+ jmp copyout_ret
+#endif /* !defined(MACH_HYP) && !PAE */
+
+/*
+ * int inst_fetch(int eip, int cs);
+ *
+ * Fetch instruction byte. Return -1 if invalid address.
+ */
+ENTRY(inst_fetch)
+ movl S_ARG1, %eax /* get segment */
+ movw %ax,%fs /* into FS */
+ movl S_ARG0, %eax /* get offset */
+ RETRY(EXT(inst_fetch)) /* re-load FS on retry */
+ RECOVER(_inst_fetch_fault)
+ movzbl %fs:(%eax),%eax /* load instruction byte */
+ ret
+
+_inst_fetch_fault:
+ movl $-1,%eax /* return -1 if error */
+ ret
+
+
+/*
+ * Done with recovery and retry tables.
+ */
+ RECOVER_TABLE_END
+ RETRY_TABLE_END
+
+
+
+/*
+ * cpu_shutdown()
+ * Force reboot
+ */
+null_idt:
+ .space 8 * 32
+
+null_idtr:
+ .word 8 * 32 - 1
+ .long null_idt
+
+Entry(cpu_shutdown)
+ lidt null_idtr /* disable the interrupt handler */
+ xor %ecx,%ecx /* generate a divide by zero */
+ div %ecx,%eax /* reboot now */
+ ret /* this will "never" be executed */
diff --git a/i386/i386/locore.h b/i386/i386/locore.h
new file mode 100644
index 0000000..374c8cf
--- /dev/null
+++ b/i386/i386/locore.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2006, 2011 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _MACHINE_LOCORE_H_
+#define _MACHINE_LOCORE_H_
+
+#include <sys/types.h>
+
+#include <kern/sched_prim.h>
+
+/*
+ * Fault recovery in copyin/copyout routines.
+ */
+struct recovery {
+ vm_offset_t fault_addr;
+ vm_offset_t recover_addr;
+};
+
+extern struct recovery recover_table[];
+extern struct recovery recover_table_end[];
+
+/*
+ * Recovery from Successful fault in copyout does not
+ * return directly - it retries the pte check, since
+ * the 386 ignores write protection in kernel mode.
+ */
+extern struct recovery retry_table[];
+extern struct recovery retry_table_end[];
+
+
+extern int call_continuation (continuation_t continuation);
+
+extern int discover_x86_cpu_type (void);
+
+extern int copyin (const void *userbuf, void *kernelbuf, size_t cn);
+extern int copyinmsg (const void *userbuf, void *kernelbuf, size_t cn, size_t kn);
+extern int copyout (const void *kernelbuf, void *userbuf, size_t cn);
+extern int copyoutmsg (const void *kernelbuf, void *userbuf, size_t cn);
+
+extern int inst_fetch (int eip, int cs);
+
+extern void cpu_shutdown (void);
+
+extern int syscall (void);
+extern int syscall64 (void);
+
+extern unsigned int cpu_features[2];
+
+#define CPU_FEATURE_FPU 0
+#define CPU_FEATURE_VME 1
+#define CPU_FEATURE_DE 2
+#define CPU_FEATURE_PSE 3
+#define CPU_FEATURE_TSC 4
+#define CPU_FEATURE_MSR 5
+#define CPU_FEATURE_PAE 6
+#define CPU_FEATURE_MCE 7
+#define CPU_FEATURE_CX8 8
+#define CPU_FEATURE_APIC 9
+#define CPU_FEATURE_SEP 11
+#define CPU_FEATURE_MTRR 12
+#define CPU_FEATURE_PGE 13
+#define CPU_FEATURE_MCA 14
+#define CPU_FEATURE_CMOV 15
+#define CPU_FEATURE_PAT 16
+#define CPU_FEATURE_PSE_36 17
+#define CPU_FEATURE_PSN 18
+#define CPU_FEATURE_CFLSH 19
+#define CPU_FEATURE_DS 21
+#define CPU_FEATURE_ACPI 22
+#define CPU_FEATURE_MMX 23
+#define CPU_FEATURE_FXSR 24
+#define CPU_FEATURE_SSE 25
+#define CPU_FEATURE_SSE2 26
+#define CPU_FEATURE_SS 27
+#define CPU_FEATURE_HTT 28
+#define CPU_FEATURE_TM 29
+#define CPU_FEATURE_PBE 31
+#define CPU_FEATURE_XSAVE (1*32 + 26)
+
+#define CPU_HAS_FEATURE(feature) (cpu_features[(feature) / 32] & (1 << ((feature) % 32)))
+
+#endif /* _MACHINE__LOCORE_H_ */
+
diff --git a/i386/i386/loose_ends.c b/i386/i386/loose_ends.c
new file mode 100644
index 0000000..7e7f943
--- /dev/null
+++ b/i386/i386/loose_ends.c
@@ -0,0 +1,49 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+
+#include <i386/i386/loose_ends.h>
+
+#ifndef NDEBUG
+#define MACH_ASSERT 1
+#else
+#define MACH_ASSERT 0
+#endif /* NDEBUG */
+
+ /*
+ * For now we will always go to single user mode, since there is
+ * no way pass this request through the boot.
+ */
+
+/* Someone with time should write code to set cpuspeed automagically */
+int cpuspeed = 4;
+#define DELAY(n) { volatile int N = cpuspeed * (n); while (--N > 0); }
+void
+delay(int n)
+{
+ DELAY(n);
+}
diff --git a/i386/i386/loose_ends.h b/i386/i386/loose_ends.h
new file mode 100644
index 0000000..c085527
--- /dev/null
+++ b/i386/i386/loose_ends.h
@@ -0,0 +1,33 @@
+/*
+ * Other useful functions?
+ * Copyright (C) 2008 Free Software Foundation, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Barry deFreese.
+ */
+/*
+ * Other useful functions?
+ *
+ */
+
+#ifndef _LOOSE_ENDS_H_
+#define _LOOSE_ENDS_H_
+
+#include <mach/std_types.h>
+
+extern void delay (int n);
+
+#endif /* _LOOSE_ENDS_H_ */
diff --git a/i386/i386/mach_i386.srv b/i386/i386/mach_i386.srv
new file mode 100644
index 0000000..48d16ba
--- /dev/null
+++ b/i386/i386/mach_i386.srv
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/* This is a server presentation file. */
+
+#define KERNEL_SERVER 1
+
+#include <mach/machine/mach_i386.defs>
diff --git a/i386/i386/mach_param.h b/i386/i386/mach_param.h
new file mode 100644
index 0000000..d7d4dee
--- /dev/null
+++ b/i386/i386/mach_param.h
@@ -0,0 +1,31 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Machine-dependent parameters for i386.
+ */
+
+#define HZ (100)
+ /* clock tick each 10 ms. */
diff --git a/i386/i386/machine_routines.h b/i386/i386/machine_routines.h
new file mode 100644
index 0000000..d9dd94b
--- /dev/null
+++ b/i386/i386/machine_routines.h
@@ -0,0 +1,38 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_MACHINE_ROUTINES_H_
+#define _I386_MACHINE_ROUTINES_H_
+
+/*
+ * The i386 has a set of machine-dependent interfaces.
+ */
+#define MACHINE_SERVER mach_i386_server
+#define MACHINE_SERVER_HEADER "i386/i386/mach_i386.server.h"
+#define MACHINE_SERVER_ROUTINE mach_i386_server_routine
+
+#endif /* _I386_MACHINE_ROUTINES_H_ */
+
diff --git a/i386/i386/machine_task.c b/i386/i386/machine_task.c
new file mode 100644
index 0000000..8bebf36
--- /dev/null
+++ b/i386/i386/machine_task.c
@@ -0,0 +1,80 @@
+/* Machine specific data for a task on i386.
+
+ Copyright (C) 2002, 2007 Free Software Foundation, Inc.
+
+ Written by Marcus Brinkmann. Glued into GNU Mach by Thomas Schwinge.
+
+ This file is part of GNU Mach.
+
+ GNU Mach is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 2, or (at your option) any later
+ version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include <kern/lock.h>
+#include <mach/mach_types.h>
+#include <kern/slab.h>
+#include <kern/task.h>
+#include <machine/task.h>
+
+#include <machine/io_perm.h>
+
+
+/* The cache which holds our IO permission bitmaps. */
+struct kmem_cache machine_task_iopb_cache;
+
+
+/* Initialize the machine task module. The function is called once at
+ start up by task_init in kern/task.c. */
+void
+machine_task_module_init (void)
+{
+ kmem_cache_init (&machine_task_iopb_cache, "i386_task_iopb", IOPB_BYTES, 0,
+ NULL, 0);
+}
+
+
+/* Initialize the machine specific part of task TASK. */
+void
+machine_task_init (task_t task)
+{
+ task->machine.iopb_size = 0;
+ task->machine.iopb = 0;
+ simple_lock_init (&task->machine.iopb_lock);
+}
+
+
+/* Destroy the machine specific part of task TASK and release all
+ associated resources. */
+void
+machine_task_terminate (const task_t task)
+{
+ if (task->machine.iopb)
+ kmem_cache_free (&machine_task_iopb_cache,
+ (vm_offset_t) task->machine.iopb);
+}
+
+
+/* Try to release as much memory from the machine specific data in
+ task TASK. */
+void
+machine_task_collect (task_t task)
+{
+ simple_lock (&task->machine.iopb_lock);
+ if (task->machine.iopb_size == 0 && task->machine.iopb)
+ {
+ kmem_cache_free (&machine_task_iopb_cache,
+ (vm_offset_t) task->machine.iopb);
+ task->machine.iopb = 0;
+ }
+ simple_unlock (&task->machine.iopb_lock);
+}
diff --git a/i386/i386/machspl.h b/i386/i386/machspl.h
new file mode 100644
index 0000000..bbb2675
--- /dev/null
+++ b/i386/i386/machspl.h
@@ -0,0 +1,29 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/* XXX replaced by... */
+#include <i386/spl.h>
+
diff --git a/i386/i386/model_dep.h b/i386/i386/model_dep.h
new file mode 100644
index 0000000..5369e28
--- /dev/null
+++ b/i386/i386/model_dep.h
@@ -0,0 +1,68 @@
+/*
+ * Arch dependent functions
+ * Copyright (C) 2008 Free Software Foundation, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Barry deFreese.
+ */
+/*
+ * Arch dependent functions.
+ *
+ */
+
+#ifndef _I386AT_MODEL_DEP_H_
+#define _I386AT_MODEL_DEP_H_
+
+#include <mach/std_types.h>
+
+/*
+ * Address to hold AP boot code, held in ASM
+ */
+extern phys_addr_t apboot_addr;
+
+/*
+ * Find devices. The system is alive.
+ */
+extern void machine_init (void);
+
+/* Conserve power on processor CPU. */
+extern void machine_idle (int cpu);
+
+extern void resettodr (void);
+
+extern void startrtclock (void);
+
+/*
+ * Halt a cpu.
+ */
+extern void halt_cpu (void) __attribute__ ((noreturn));
+
+/*
+ * Halt the system or reboot.
+ */
+extern void halt_all_cpus (boolean_t reboot) __attribute__ ((noreturn));
+
+/*
+ * Make cpu pause a bit.
+ */
+extern void machine_relax (void);
+
+/*
+ * C boot entrypoint - called by boot_entry in boothdr.S.
+ */
+extern void c_boot_entry(vm_offset_t bi);
+
+#endif /* _I386AT_MODEL_DEP_H_ */
diff --git a/i386/i386/mp_desc.c b/i386/i386/mp_desc.c
new file mode 100644
index 0000000..61a7607
--- /dev/null
+++ b/i386/i386/mp_desc.c
@@ -0,0 +1,357 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <kern/cpu_number.h>
+#include <kern/debug.h>
+#include <kern/printf.h>
+#include <kern/smp.h>
+#include <kern/startup.h>
+#include <kern/kmutex.h>
+#include <mach/machine.h>
+#include <mach/xen.h>
+#include <vm/vm_kern.h>
+
+#include <i386/mp_desc.h>
+#include <i386/lock.h>
+#include <i386/apic.h>
+#include <i386/locore.h>
+#include <i386/fpu.h>
+#include <i386/gdt.h>
+#include <i386at/idt.h>
+#include <i386at/int_init.h>
+#include <i386/cpu.h>
+#include <i386/smp.h>
+
+#include <i386at/model_dep.h>
+#include <machine/ktss.h>
+#include <machine/smp.h>
+#include <machine/tss.h>
+#include <machine/io_perm.h>
+#include <machine/vm_param.h>
+
+#include <i386at/acpi_parse_apic.h>
+#include <string.h>
+
+/*
+ * The i386 needs an interrupt stack to keep the PCB stack from being
+ * overrun by interrupts. All interrupt stacks MUST lie at lower addresses
+ * than any thread`s kernel stack.
+ */
+
+/*
+ * Addresses of bottom and top of interrupt stacks.
+ */
+vm_offset_t int_stack_top[NCPUS];
+vm_offset_t int_stack_base[NCPUS];
+
+/*
+ * Whether we are currently handling an interrupt.
+ * To catch code erroneously taking non-irq-safe locks.
+ */
+#ifdef MACH_LDEBUG
+unsigned long in_interrupt[NCPUS];
+#endif
+
+/* Interrupt stack allocation */
+uint8_t solid_intstack[NCPUS*INTSTACK_SIZE] __aligned(NCPUS*INTSTACK_SIZE);
+
+void
+interrupt_stack_alloc(void)
+{
+ int i;
+
+ /*
+ * Set up pointers to the top of the interrupt stack.
+ */
+
+ for (i = 0; i < NCPUS; i++) {
+ int_stack_base[i] = (vm_offset_t) &solid_intstack[i * INTSTACK_SIZE];
+ int_stack_top[i] = (vm_offset_t) &solid_intstack[(i + 1) * INTSTACK_SIZE] - 4;
+ }
+}
+
+#if NCPUS > 1
+/*
+ * Flag to mark SMP init by BSP complete
+ */
+int bspdone;
+
+phys_addr_t apboot_addr;
+extern void *apboot, *apbootend;
+extern volatile ApicLocalUnit* lapic;
+
+/*
+ * Multiprocessor i386/i486 systems use a separate copy of the
+ * GDT, IDT, LDT, and kernel TSS per processor. The first three
+ * are separate to avoid lock contention: the i386 uses locked
+ * memory cycles to access the descriptor tables. The TSS is
+ * separate since each processor needs its own kernel stack,
+ * and since using a TSS marks it busy.
+ */
+
+/*
+ * Descriptor tables.
+ */
+struct mp_desc_table *mp_desc_table[NCPUS] = { 0 };
+
+/*
+ * Pointer to TSS for access in load_context.
+ */
+struct task_tss *mp_ktss[NCPUS] = { 0 };
+
+/*
+ * Pointer to GDT to reset the KTSS busy bit.
+ */
+struct real_descriptor *mp_gdt[NCPUS] = { 0 };
+
+/*
+ * Boot-time tables, for initialization and master processor.
+ */
+extern struct real_gate idt[IDTSZ];
+extern struct real_descriptor gdt[GDTSZ];
+extern struct real_descriptor ldt[LDTSZ];
+
+/*
+ * Allocate and initialize the per-processor descriptor tables.
+ */
+
+int
+mp_desc_init(int mycpu)
+{
+ struct mp_desc_table *mpt;
+ vm_offset_t mem;
+
+ if (mycpu == 0) {
+ /*
+ * Master CPU uses the tables built at boot time.
+ * Just set the TSS and GDT pointers.
+ */
+ mp_ktss[mycpu] = (struct task_tss *) &ktss;
+ mp_gdt[mycpu] = gdt;
+ return 0;
+ }
+ else {
+ /*
+ * Allocate tables for other CPUs
+ */
+ if (!init_alloc_aligned(sizeof(struct mp_desc_table), &mem))
+ panic("not enough memory for descriptor tables");
+ mpt = (struct mp_desc_table *)phystokv(mem);
+
+ mp_desc_table[mycpu] = mpt;
+ mp_ktss[mycpu] = &mpt->ktss;
+ mp_gdt[mycpu] = mpt->gdt;
+
+ /*
+ * Zero the tables
+ */
+ memset(mpt->idt, 0, sizeof(idt));
+ memset(mpt->gdt, 0, sizeof(gdt));
+ memset(mpt->ldt, 0, sizeof(ldt));
+ memset(&mpt->ktss, 0, sizeof(struct task_tss));
+
+ return mycpu;
+ }
+}
+
+/* XXX should be adjusted per CPU speed */
+int simple_lock_pause_loop = 100;
+
+unsigned int simple_lock_pause_count = 0; /* debugging */
+
+void
+simple_lock_pause(void)
+{
+ static volatile int dummy;
+ int i;
+
+ simple_lock_pause_count++;
+
+ /*
+ * Used in loops that are trying to acquire locks out-of-order.
+ */
+
+ for (i = 0; i < simple_lock_pause_loop; i++)
+ dummy++; /* keep the compiler from optimizing the loop away */
+}
+
+kern_return_t
+cpu_control(int cpu, const int *info, unsigned int count)
+{
+ printf("cpu_control(%d, %p, %d) not implemented\n",
+ cpu, info, count);
+ return KERN_FAILURE;
+}
+
+void
+interrupt_processor(int cpu)
+{
+ smp_pmap_update(apic_get_cpu_apic_id(cpu));
+}
+
+static void
+paging_enable(void)
+{
+#ifndef MACH_HYP
+ /* Turn paging on.
+ * TODO: Why does setting the WP bit here cause a crash?
+ */
+#if PAE
+ set_cr4(get_cr4() | CR4_PAE);
+#endif
+ set_cr0(get_cr0() | CR0_PG /* | CR0_WP */);
+ set_cr0(get_cr0() & ~(CR0_CD | CR0_NW));
+ if (CPU_HAS_FEATURE(CPU_FEATURE_PGE))
+ set_cr4(get_cr4() | CR4_PGE);
+#endif /* MACH_HYP */
+}
+
+void
+cpu_setup(int cpu)
+{
+ pmap_make_temporary_mapping();
+ printf("AP=(%u) tempmap done\n", cpu);
+
+ paging_enable();
+ flush_instr_queue();
+ printf("AP=(%u) paging done\n", cpu);
+
+ init_percpu(cpu);
+ mp_desc_init(cpu);
+ printf("AP=(%u) mpdesc done\n", cpu);
+
+ ap_gdt_init(cpu);
+ printf("AP=(%u) gdt done\n", cpu);
+
+ ap_idt_init(cpu);
+ printf("AP=(%u) idt done\n", cpu);
+
+ ap_int_init(cpu);
+ printf("AP=(%u) int done\n", cpu);
+
+ ap_ldt_init(cpu);
+ printf("AP=(%u) ldt done\n", cpu);
+
+ ap_ktss_init(cpu);
+ printf("AP=(%u) ktss done\n", cpu);
+
+ pmap_remove_temporary_mapping();
+ printf("AP=(%u) remove tempmap done\n", cpu);
+
+ pmap_set_page_dir();
+ flush_tlb();
+ printf("AP=(%u) reset page dir done\n", cpu);
+
+ /* Initialize machine_slot fields with the cpu data */
+ machine_slot[cpu].cpu_subtype = CPU_SUBTYPE_AT386;
+ machine_slot[cpu].cpu_type = machine_slot[0].cpu_type;
+
+ init_fpu();
+ lapic_setup();
+ lapic_enable();
+ cpu_launch_first_thread(THREAD_NULL);
+}
+
+void
+cpu_ap_main()
+{
+ int cpu = cpu_number();
+
+ do {
+ cpu_pause();
+ } while (bspdone != cpu);
+
+ __sync_synchronize();
+
+ cpu_setup(cpu);
+}
+
+kern_return_t
+cpu_start(int cpu)
+{
+ int err;
+
+ assert(machine_slot[cpu].running != TRUE);
+
+ uint16_t apic_id = apic_get_cpu_apic_id(cpu);
+
+ printf("Trying to enable: %d at 0x%lx\n", apic_id, apboot_addr);
+
+ err = smp_startup_cpu(apic_id, apboot_addr);
+
+ if (!err) {
+ printf("Started cpu %d (lapic id %04x)\n", cpu, apic_id);
+ return KERN_SUCCESS;
+ }
+ printf("FATAL: Cannot init AP %d\n", cpu);
+ for (;;);
+}
+
+void
+start_other_cpus(void)
+{
+ int ncpus = smp_get_numcpus();
+
+ //Copy cpu initialization assembly routine
+ memcpy((void*) phystokv(apboot_addr), (void*) &apboot,
+ (uint32_t)&apbootend - (uint32_t)&apboot);
+
+ unsigned cpu;
+
+ splhigh();
+
+ /* Disable IOAPIC interrupts (IPIs not affected).
+ * Clearing this flag is similar to masking all
+ * IOAPIC interrupts individually.
+ *
+ * This is done to prevent IOAPIC interrupts from
+ * interfering with SMP startup. splhigh() may be enough for BSP,
+ * but I'm not sure. We cannot control the lapic
+ * on APs because we don't have execution on them yet.
+ */
+ lapic_disable();
+
+ bspdone = 0;
+ for (cpu = 1; cpu < ncpus; cpu++) {
+ machine_slot[cpu].running = FALSE;
+
+ //Start cpu
+ printf("Starting AP %d\n", cpu);
+ cpu_start(cpu);
+
+ bspdone++;
+ do {
+ cpu_pause();
+ } while (machine_slot[cpu].running == FALSE);
+
+ __sync_synchronize();
+ }
+ printf("BSP: Completed SMP init\n");
+
+ /* Re-enable IOAPIC interrupts as per setup */
+ lapic_enable();
+}
+#endif /* NCPUS > 1 */
diff --git a/i386/i386/mp_desc.h b/i386/i386/mp_desc.h
new file mode 100644
index 0000000..dc3a7dc
--- /dev/null
+++ b/i386/i386/mp_desc.h
@@ -0,0 +1,98 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_MP_DESC_H_
+#define _I386_MP_DESC_H_
+
+#include <mach/kern_return.h>
+
+#if MULTIPROCESSOR
+
+/*
+ * Multiprocessor i386/i486 systems use a separate copy of the
+ * GDT, IDT, LDT, and kernel TSS per processor. The first three
+ * are separate to avoid lock contention: the i386 uses locked
+ * memory cycles to access the descriptor tables. The TSS is
+ * separate since each processor needs its own kernel stack,
+ * and since using a TSS marks it busy.
+ */
+
+#include "seg.h"
+#include "tss.h"
+#include <i386at/idt.h>
+#include "gdt.h"
+#include "ldt.h"
+
+/*
+ * The descriptor tables are together in a structure
+ * allocated one per processor (except for the boot processor).
+ */
+struct mp_desc_table {
+ struct real_gate idt[IDTSZ]; /* IDT */
+ struct real_descriptor gdt[GDTSZ]; /* GDT */
+ struct real_descriptor ldt[LDTSZ]; /* LDT */
+ struct task_tss ktss;
+};
+
+/*
+ * They are pointed to by a per-processor array.
+ */
+extern struct mp_desc_table *mp_desc_table[NCPUS];
+
+/*
+ * The kernel TSS gets its own pointer.
+ */
+extern struct task_tss *mp_ktss[NCPUS];
+
+/*
+ * So does the GDT.
+ */
+extern struct real_descriptor *mp_gdt[NCPUS];
+
+extern uint8_t solid_intstack[];
+
+extern int bspdone;
+
+/*
+ * Each CPU calls this routine to set up its descriptor tables.
+ */
+extern int mp_desc_init(int);
+
+
+extern void interrupt_processor(int cpu);
+
+
+#endif /* MULTIPROCESSOR */
+
+extern void start_other_cpus(void);
+
+extern kern_return_t cpu_start(int cpu);
+
+extern kern_return_t cpu_control(int cpu, const int *info, unsigned int count);
+
+extern void interrupt_stack_alloc(void);
+
+#endif /* _I386_MP_DESC_H_ */
diff --git a/i386/i386/msr.h b/i386/i386/msr.h
new file mode 100644
index 0000000..8f09b80
--- /dev/null
+++ b/i386/i386/msr.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2023 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _MACHINE_MSR_H_
+#define _MACHINE_MSR_H_
+
+#define MSR_REG_EFER 0xC0000080
+#define MSR_REG_STAR 0xC0000081
+#define MSR_REG_LSTAR 0xC0000082
+#define MSR_REG_CSTAR 0xC0000083
+#define MSR_REG_FMASK 0xC0000084
+#define MSR_REG_FSBASE 0xC0000100
+#define MSR_REG_GSBASE 0xC0000101
+
+#define MSR_EFER_SCE 0x00000001
+
+#ifndef __ASSEMBLER__
+
+static inline void wrmsr(uint32_t regaddr, uint64_t value)
+{
+ uint32_t low = (uint32_t) value, high = ((uint32_t) (value >> 32));
+ asm volatile("wrmsr"
+ :
+ : "c" (regaddr), "a" (low), "d" (high)
+ : "memory" /* wrmsr may cause a read from memory, so
+ * make the compiler flush any changes */
+ );
+}
+
+static inline uint64_t rdmsr(uint32_t regaddr)
+{
+ uint32_t low, high;
+ asm volatile("rdmsr"
+ : "=a" (low), "=d" (high)
+ : "c" (regaddr)
+ );
+ return ((uint64_t)high << 32) | low;
+}
+#endif /* __ASSEMBLER__ */
+
+#endif /* _MACHINE_MSR_H_ */
diff --git a/i386/i386/pcb.c b/i386/i386/pcb.c
new file mode 100644
index 0000000..e890155
--- /dev/null
+++ b/i386/i386/pcb.c
@@ -0,0 +1,958 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <stddef.h>
+#include <string.h>
+
+#include <mach/std_types.h>
+#include <mach/kern_return.h>
+#include <mach/thread_status.h>
+#include <mach/exec/exec.h>
+#include <mach/xen.h>
+
+#include "vm_param.h"
+#include <kern/counters.h>
+#include <kern/debug.h>
+#include <kern/thread.h>
+#include <kern/sched_prim.h>
+#include <kern/slab.h>
+#include <vm/vm_kern.h>
+#include <vm/pmap.h>
+
+#include <i386/thread.h>
+#include <i386/proc_reg.h>
+#include <i386/seg.h>
+#include <i386/user_ldt.h>
+#include <i386/db_interface.h>
+#include <i386/fpu.h>
+#include "eflags.h"
+#include "gdt.h"
+#include "ldt.h"
+#include "msr.h"
+#include "ktss.h"
+#include "pcb.h"
+
+#include <machine/tss.h>
+
+#if NCPUS > 1
+#include <i386/mp_desc.h>
+#endif
+
+struct kmem_cache pcb_cache;
+
+vm_offset_t kernel_stack[NCPUS]; /* top of active_stack */
+
+/*
+ * stack_attach:
+ *
+ * Attach a kernel stack to a thread.
+ */
+
+void stack_attach(
+ thread_t thread,
+ vm_offset_t stack,
+ void (*continuation)(thread_t))
+{
+ counter(if (++c_stacks_current > c_stacks_max)
+ c_stacks_max = c_stacks_current);
+
+ thread->kernel_stack = stack;
+
+ /*
+ * We want to run continuation, giving it as an argument
+ * the return value from Load_context/Switch_context.
+ * Thread_continue takes care of the mismatch between
+ * the argument-passing/return-value conventions.
+ * This function will not return normally,
+ * so we don`t have to worry about a return address.
+ */
+ STACK_IKS(stack)->k_eip = (long) Thread_continue;
+ STACK_IKS(stack)->k_ebx = (long) continuation;
+ STACK_IKS(stack)->k_esp = (long) STACK_IEL(stack);
+ STACK_IKS(stack)->k_ebp = (long) 0;
+
+ /*
+ * Point top of kernel stack to user`s registers.
+ */
+ STACK_IEL(stack)->saved_state = USER_REGS(thread);
+}
+
+/*
+ * stack_detach:
+ *
+ * Detaches a kernel stack from a thread, returning the old stack.
+ */
+
+vm_offset_t stack_detach(thread_t thread)
+{
+ vm_offset_t stack;
+
+ counter(if (--c_stacks_current < c_stacks_min)
+ c_stacks_min = c_stacks_current);
+
+ stack = thread->kernel_stack;
+ thread->kernel_stack = 0;
+
+ return stack;
+}
+
+#if NCPUS > 1
+#define curr_gdt(mycpu) (mp_gdt[mycpu])
+#define curr_ktss(mycpu) (mp_ktss[mycpu])
+#else
+#define curr_gdt(mycpu) ((void)(mycpu), gdt)
+#define curr_ktss(mycpu) ((void)(mycpu), (struct task_tss *)&ktss)
+#endif
+
+#define gdt_desc_p(mycpu,sel) \
+ ((struct real_descriptor *)&curr_gdt(mycpu)[sel_idx(sel)])
+
+void switch_ktss(pcb_t pcb)
+{
+ int mycpu = cpu_number();
+ {
+ vm_offset_t pcb_stack_top;
+
+ /*
+ * Save a pointer to the top of the "kernel" stack -
+ * actually the place in the PCB where a trap into
+ * kernel mode will push the registers.
+ * The location depends on V8086 mode. If we are
+ * not in V8086 mode, then a trap into the kernel
+ * won`t save the v86 segments, so we leave room.
+ */
+
+#if !defined(__x86_64__) || defined(USER32)
+ pcb_stack_top = (pcb->iss.efl & EFL_VM)
+ ? (long) (&pcb->iss + 1)
+ : (long) (&pcb->iss.v86_segs);
+#else
+ pcb_stack_top = (vm_offset_t) (&pcb->iss + 1);
+#endif
+
+#ifdef __x86_64__
+ assert((pcb_stack_top & 0xF) == 0);
+#endif
+
+#ifdef MACH_RING1
+ /* No IO mask here */
+ if (hyp_stack_switch(KERNEL_DS, pcb_stack_top))
+ panic("stack_switch");
+#else /* MACH_RING1 */
+#ifdef __x86_64__
+ curr_ktss(mycpu)->tss.rsp0 = pcb_stack_top;
+#else /* __x86_64__ */
+ curr_ktss(mycpu)->tss.esp0 = pcb_stack_top;
+#endif /* __x86_64__ */
+#endif /* MACH_RING1 */
+ }
+
+ {
+ user_ldt_t tldt = pcb->ims.ldt;
+ /*
+ * Set the thread`s LDT.
+ */
+ if (tldt == 0) {
+ /*
+ * Use system LDT.
+ */
+#ifdef MACH_PV_DESCRIPTORS
+ hyp_set_ldt(&ldt, LDTSZ);
+#else /* MACH_PV_DESCRIPTORS */
+ if (get_ldt() != KERNEL_LDT)
+ set_ldt(KERNEL_LDT);
+#endif /* MACH_PV_DESCRIPTORS */
+ }
+ else {
+ /*
+ * Thread has its own LDT.
+ */
+#ifdef MACH_PV_DESCRIPTORS
+ hyp_set_ldt(tldt->ldt,
+ (tldt->desc.limit_low|(tldt->desc.limit_high<<16)) /
+ sizeof(struct real_descriptor));
+#else /* MACH_PV_DESCRIPTORS */
+ *gdt_desc_p(mycpu,USER_LDT) = tldt->desc;
+ set_ldt(USER_LDT);
+#endif /* MACH_PV_DESCRIPTORS */
+ }
+ }
+
+#ifdef MACH_PV_DESCRIPTORS
+ {
+ int i;
+ for (i=0; i < USER_GDT_SLOTS; i++) {
+ if (memcmp(gdt_desc_p (mycpu, USER_GDT + (i << 3)),
+ &pcb->ims.user_gdt[i], sizeof pcb->ims.user_gdt[i])) {
+ union {
+ struct real_descriptor real_descriptor;
+ uint64_t descriptor;
+ } user_gdt;
+ user_gdt.real_descriptor = pcb->ims.user_gdt[i];
+
+ if (hyp_do_update_descriptor(kv_to_ma(gdt_desc_p (mycpu, USER_GDT + (i << 3))),
+ user_gdt.descriptor))
+ panic("couldn't set user gdt %d\n",i);
+ }
+ }
+ }
+#else /* MACH_PV_DESCRIPTORS */
+
+ /* Copy in the per-thread GDT slots. No reloading is necessary
+ because just restoring the segment registers on the way back to
+ user mode reloads the shadow registers from the in-memory GDT. */
+ memcpy (gdt_desc_p (mycpu, USER_GDT),
+ pcb->ims.user_gdt, sizeof pcb->ims.user_gdt);
+#endif /* MACH_PV_DESCRIPTORS */
+
+#if defined(__x86_64__) && !defined(USER32)
+ wrmsr(MSR_REG_FSBASE, pcb->ims.sbs.fsbase);
+ wrmsr(MSR_REG_GSBASE, pcb->ims.sbs.gsbase);
+#endif
+
+ db_load_context(pcb);
+
+ /*
+ * Load the floating-point context, if necessary.
+ */
+ fpu_load_context(pcb);
+
+}
+
+/* If NEW_IOPB is not null, the SIZE denotes the number of bytes in
+ the new bitmap. Expects iopb_lock to be held. */
+void
+update_ktss_iopb (unsigned char *new_iopb, io_port_t size)
+{
+ struct task_tss *tss = curr_ktss (cpu_number ());
+
+ if (new_iopb && size > 0)
+ {
+ tss->tss.io_bit_map_offset
+ = offsetof (struct task_tss, barrier) - size;
+ memcpy (((char *) tss) + tss->tss.io_bit_map_offset,
+ new_iopb, size);
+ }
+ else
+ tss->tss.io_bit_map_offset = IOPB_INVAL;
+}
+
+/*
+ * stack_handoff:
+ *
+ * Move the current thread's kernel stack to the new thread.
+ */
+
+void stack_handoff(
+ thread_t old,
+ thread_t new)
+{
+ int mycpu = cpu_number();
+ vm_offset_t stack;
+
+ /*
+ * Save FP registers if in use.
+ */
+ fpu_save_context(old);
+
+ /*
+ * Switch address maps if switching tasks.
+ */
+ {
+ task_t old_task, new_task;
+
+ if ((old_task = old->task) != (new_task = new->task)) {
+ PMAP_DEACTIVATE_USER(vm_map_pmap(old_task->map),
+ old, mycpu);
+ PMAP_ACTIVATE_USER(vm_map_pmap(new_task->map),
+ new, mycpu);
+
+ simple_lock (&new_task->machine.iopb_lock);
+#if NCPUS>1
+#warning SMP support missing (avoid races with io_perm_modify).
+#else
+ /* This optimization only works on a single processor
+ machine, where old_task's iopb can not change while
+ we are switching. */
+ if (old_task->machine.iopb || new_task->machine.iopb)
+#endif
+ update_ktss_iopb (new_task->machine.iopb,
+ new_task->machine.iopb_size);
+ simple_unlock (&new_task->machine.iopb_lock);
+ }
+ }
+
+ /*
+ * Load the rest of the user state for the new thread
+ */
+ switch_ktss(new->pcb);
+
+ /*
+ * Switch to new thread
+ */
+ stack = current_stack();
+ old->kernel_stack = 0;
+ new->kernel_stack = stack;
+ percpu_assign(active_thread, new);
+
+ /*
+ * Switch exception link to point to new
+ * user registers.
+ */
+
+ STACK_IEL(stack)->saved_state = USER_REGS(new);
+
+}
+
+/*
+ * Switch to the first thread on a CPU.
+ */
+void load_context(thread_t new)
+{
+ switch_ktss(new->pcb);
+ Load_context(new);
+}
+
+/*
+ * Switch to a new thread.
+ * Save the old thread`s kernel state or continuation,
+ * and return it.
+ */
+thread_t switch_context(
+ thread_t old,
+ continuation_t continuation,
+ thread_t new)
+{
+ /*
+ * Save FP registers if in use.
+ */
+ fpu_save_context(old);
+
+ /*
+ * Switch address maps if switching tasks.
+ */
+ {
+ task_t old_task, new_task;
+ int mycpu = cpu_number();
+
+ if ((old_task = old->task) != (new_task = new->task)) {
+ PMAP_DEACTIVATE_USER(vm_map_pmap(old_task->map),
+ old, mycpu);
+ PMAP_ACTIVATE_USER(vm_map_pmap(new_task->map),
+ new, mycpu);
+
+ simple_lock (&new_task->machine.iopb_lock);
+#if NCPUS>1
+#warning SMP support missing (avoid races with io_perm_modify).
+#else
+ /* This optimization only works on a single processor
+ machine, where old_task's iopb can not change while
+ we are switching. */
+ if (old_task->machine.iopb || new_task->machine.iopb)
+#endif
+ update_ktss_iopb (new_task->machine.iopb,
+ new_task->machine.iopb_size);
+ simple_unlock (&new_task->machine.iopb_lock);
+ }
+ }
+
+ /*
+ * Load the rest of the user state for the new thread
+ */
+ switch_ktss(new->pcb);
+ return Switch_context(old, continuation, new);
+}
+
+void pcb_module_init(void)
+{
+ kmem_cache_init(&pcb_cache, "pcb", sizeof(struct pcb),
+ KERNEL_STACK_ALIGN, NULL, 0);
+
+ fpu_module_init();
+}
+
+void pcb_init(task_t parent_task, thread_t thread)
+{
+ pcb_t pcb;
+
+ pcb = (pcb_t) kmem_cache_alloc(&pcb_cache);
+ if (pcb == 0)
+ panic("pcb_init");
+
+ counter(if (++c_threads_current > c_threads_max)
+ c_threads_max = c_threads_current);
+
+ /*
+ * We can't let random values leak out to the user.
+ */
+ memset(pcb, 0, sizeof *pcb);
+ simple_lock_init(&pcb->lock);
+
+ /*
+ * Guarantee that the bootstrapped thread will be in user
+ * mode.
+ */
+ pcb->iss.cs = USER_CS;
+ pcb->iss.ss = USER_DS;
+#if !defined(__x86_64__) || defined(USER32)
+ pcb->iss.ds = USER_DS;
+ pcb->iss.es = USER_DS;
+ pcb->iss.fs = USER_DS;
+ pcb->iss.gs = USER_DS;
+#endif
+ pcb->iss.efl = EFL_USER_SET;
+
+ thread->pcb = pcb;
+
+ /* This is a new thread for the current task, make it inherit our FPU
+ state. */
+ if (current_thread() && parent_task == current_task())
+ fpinherit(current_thread(), thread);
+}
+
+void pcb_terminate(thread_t thread)
+{
+ pcb_t pcb = thread->pcb;
+
+ counter(if (--c_threads_current < c_threads_min)
+ c_threads_min = c_threads_current);
+
+ if (pcb->ims.ifps != 0)
+ fp_free(pcb->ims.ifps);
+ if (pcb->ims.ldt != 0)
+ user_ldt_free(pcb->ims.ldt);
+ kmem_cache_free(&pcb_cache, (vm_offset_t) pcb);
+ thread->pcb = 0;
+}
+
+/*
+ * pcb_collect:
+ *
+ * Attempt to free excess pcb memory.
+ */
+
+void pcb_collect(__attribute__((unused)) const thread_t thread)
+{
+}
+
+
+/*
+ * thread_setstatus:
+ *
+ * Set the status of the specified thread.
+ */
+
+kern_return_t thread_setstatus(
+ thread_t thread,
+ int flavor,
+ thread_state_t tstate,
+ unsigned int count)
+{
+ switch (flavor) {
+ case i386_THREAD_STATE:
+ case i386_REGS_SEGS_STATE:
+ {
+ struct i386_thread_state *state;
+ struct i386_saved_state *saved_state;
+
+ if (count < i386_THREAD_STATE_COUNT) {
+ return(KERN_INVALID_ARGUMENT);
+ }
+
+ state = (struct i386_thread_state *) tstate;
+
+ if (flavor == i386_REGS_SEGS_STATE) {
+ /*
+ * Code and stack selectors must not be null,
+ * and must have user protection levels.
+ * Only the low 16 bits are valid.
+ */
+ state->cs &= 0xffff;
+ state->ss &= 0xffff;
+#if !defined(__x86_64__) || defined(USER32)
+ state->ds &= 0xffff;
+ state->es &= 0xffff;
+ state->fs &= 0xffff;
+ state->gs &= 0xffff;
+#endif
+
+ if (state->cs == 0 || (state->cs & SEL_PL) != SEL_PL_U
+ || state->ss == 0 || (state->ss & SEL_PL) != SEL_PL_U)
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ saved_state = USER_REGS(thread);
+
+ /*
+ * General registers
+ */
+#if defined(__x86_64__) && !defined(USER32)
+ saved_state->r8 = state->r8;
+ saved_state->r9 = state->r9;
+ saved_state->r10 = state->r10;
+ saved_state->r11 = state->r11;
+ saved_state->r12 = state->r12;
+ saved_state->r13 = state->r13;
+ saved_state->r14 = state->r14;
+ saved_state->r15 = state->r15;
+ saved_state->edi = state->rdi;
+ saved_state->esi = state->rsi;
+ saved_state->ebp = state->rbp;
+ saved_state->uesp = state->ursp;
+ saved_state->ebx = state->rbx;
+ saved_state->edx = state->rdx;
+ saved_state->ecx = state->rcx;
+ saved_state->eax = state->rax;
+ saved_state->eip = state->rip;
+ saved_state->efl = (state->rfl & ~EFL_USER_CLEAR)
+ | EFL_USER_SET;
+#else
+ saved_state->edi = state->edi;
+ saved_state->esi = state->esi;
+ saved_state->ebp = state->ebp;
+ saved_state->uesp = state->uesp;
+ saved_state->ebx = state->ebx;
+ saved_state->edx = state->edx;
+ saved_state->ecx = state->ecx;
+ saved_state->eax = state->eax;
+ saved_state->eip = state->eip;
+ saved_state->efl = (state->efl & ~EFL_USER_CLEAR)
+ | EFL_USER_SET;
+#endif /* __x86_64__ && !USER32 */
+
+#if !defined(__x86_64__) || defined(USER32)
+ /*
+ * Segment registers. Set differently in V8086 mode.
+ */
+ if (saved_state->efl & EFL_VM) {
+ /*
+ * Set V8086 mode segment registers.
+ */
+ saved_state->cs = state->cs & 0xffff;
+ saved_state->ss = state->ss & 0xffff;
+ saved_state->v86_segs.v86_ds = state->ds & 0xffff;
+ saved_state->v86_segs.v86_es = state->es & 0xffff;
+ saved_state->v86_segs.v86_fs = state->fs & 0xffff;
+ saved_state->v86_segs.v86_gs = state->gs & 0xffff;
+
+ /*
+ * Zero protected mode segment registers.
+ */
+ saved_state->ds = 0;
+ saved_state->es = 0;
+ saved_state->fs = 0;
+ saved_state->gs = 0;
+
+ if (thread->pcb->ims.v86s.int_table) {
+ /*
+ * Hardware assist on.
+ */
+ thread->pcb->ims.v86s.flags =
+ saved_state->efl & (EFL_TF | EFL_IF);
+ }
+ } else
+#endif
+ if (flavor == i386_THREAD_STATE) {
+ /*
+ * 386 mode. Set segment registers for flat
+ * 32-bit address space.
+ */
+ saved_state->cs = USER_CS;
+ saved_state->ss = USER_DS;
+#if !defined(__x86_64__) || defined(USER32)
+ saved_state->ds = USER_DS;
+ saved_state->es = USER_DS;
+ saved_state->fs = USER_DS;
+ saved_state->gs = USER_DS;
+#endif
+ }
+ else {
+ /*
+ * User setting segment registers.
+ * Code and stack selectors have already been
+ * checked. Others will be reset by 'iret'
+ * if they are not valid.
+ */
+ saved_state->cs = state->cs;
+ saved_state->ss = state->ss;
+#if !defined(__x86_64__) || defined(USER32)
+ saved_state->ds = state->ds;
+ saved_state->es = state->es;
+ saved_state->fs = state->fs;
+ saved_state->gs = state->gs;
+#endif
+ }
+ break;
+ }
+
+ case i386_FLOAT_STATE: {
+
+ if (count < i386_FLOAT_STATE_COUNT)
+ return(KERN_INVALID_ARGUMENT);
+
+ return fpu_set_state(thread,
+ (struct i386_float_state *) tstate);
+ }
+
+ /*
+ * Temporary - replace by i386_io_map
+ */
+ case i386_ISA_PORT_MAP_STATE: {
+ //register struct i386_isa_port_map_state *state;
+
+ if (count < i386_ISA_PORT_MAP_STATE_COUNT)
+ return(KERN_INVALID_ARGUMENT);
+
+#if 0
+ /*
+ * If the thread has no ktss yet,
+ * we must allocate one.
+ */
+
+ state = (struct i386_isa_port_map_state *) tstate;
+ tss = thread->pcb->ims.io_tss;
+ if (tss == 0) {
+ tss = iopb_create();
+ thread->pcb->ims.io_tss = tss;
+ }
+
+ memcpy(tss->bitmap,
+ state->pm,
+ sizeof state->pm);
+#endif
+ break;
+ }
+#if !defined(__x86_64__) || defined(USER32)
+ case i386_V86_ASSIST_STATE:
+ {
+ struct i386_v86_assist_state *state;
+ vm_offset_t int_table;
+ int int_count;
+
+ if (count < i386_V86_ASSIST_STATE_COUNT)
+ return KERN_INVALID_ARGUMENT;
+
+ state = (struct i386_v86_assist_state *) tstate;
+ int_table = state->int_table;
+ int_count = state->int_count;
+
+ if (int_table >= VM_MAX_USER_ADDRESS ||
+ int_table +
+ int_count * sizeof(struct v86_interrupt_table)
+ > VM_MAX_USER_ADDRESS)
+ return KERN_INVALID_ARGUMENT;
+
+ thread->pcb->ims.v86s.int_table = int_table;
+ thread->pcb->ims.v86s.int_count = int_count;
+
+ thread->pcb->ims.v86s.flags =
+ USER_REGS(thread)->efl & (EFL_TF | EFL_IF);
+ break;
+ }
+#endif
+ case i386_DEBUG_STATE:
+ {
+ struct i386_debug_state *state;
+ kern_return_t ret;
+
+ if (count < i386_DEBUG_STATE_COUNT)
+ return KERN_INVALID_ARGUMENT;
+
+ state = (struct i386_debug_state *) tstate;
+ ret = db_set_debug_state(thread->pcb, state);
+ if (ret)
+ return ret;
+ break;
+ }
+#if defined(__x86_64__) && !defined(USER32)
+ case i386_FSGS_BASE_STATE:
+ {
+ struct i386_fsgs_base_state *state;
+ if (count < i386_FSGS_BASE_STATE_COUNT)
+ return KERN_INVALID_ARGUMENT;
+
+ state = (struct i386_fsgs_base_state *) tstate;
+ thread->pcb->ims.sbs.fsbase = state->fs_base;
+ thread->pcb->ims.sbs.gsbase = state->gs_base;
+ if (thread == current_thread()) {
+ wrmsr(MSR_REG_FSBASE, state->fs_base);
+ wrmsr(MSR_REG_GSBASE, state->gs_base);
+ }
+ break;
+ }
+#endif
+ default:
+ return(KERN_INVALID_ARGUMENT);
+ }
+
+ return(KERN_SUCCESS);
+}
+
+/*
+ * thread_getstatus:
+ *
+ * Get the status of the specified thread.
+ */
+
+kern_return_t thread_getstatus(
+ thread_t thread,
+ int flavor,
+ thread_state_t tstate, /* pointer to OUT array */
+ unsigned int *count) /* IN/OUT */
+{
+ switch (flavor) {
+ case THREAD_STATE_FLAVOR_LIST:
+#if !defined(__x86_64__) || defined(USER32)
+ unsigned int ncount = 4;
+#else
+ unsigned int ncount = 3;
+#endif
+ if (*count < ncount)
+ return (KERN_INVALID_ARGUMENT);
+ tstate[0] = i386_THREAD_STATE;
+ tstate[1] = i386_FLOAT_STATE;
+ tstate[2] = i386_ISA_PORT_MAP_STATE;
+#if !defined(__x86_64__) || defined(USER32)
+ tstate[3] = i386_V86_ASSIST_STATE;
+#endif
+ *count = ncount;
+ break;
+
+ case i386_THREAD_STATE:
+ case i386_REGS_SEGS_STATE:
+ {
+ struct i386_thread_state *state;
+ struct i386_saved_state *saved_state;
+
+ if (*count < i386_THREAD_STATE_COUNT)
+ return(KERN_INVALID_ARGUMENT);
+
+ state = (struct i386_thread_state *) tstate;
+ saved_state = USER_REGS(thread);
+
+ /*
+ * General registers.
+ */
+#if defined(__x86_64__) && !defined(USER32)
+ state->r8 = saved_state->r8;
+ state->r9 = saved_state->r9;
+ state->r10 = saved_state->r10;
+ state->r11 = saved_state->r11;
+ state->r12 = saved_state->r12;
+ state->r13 = saved_state->r13;
+ state->r14 = saved_state->r14;
+ state->r15 = saved_state->r15;
+ state->rdi = saved_state->edi;
+ state->rsi = saved_state->esi;
+ state->rbp = saved_state->ebp;
+ state->rbx = saved_state->ebx;
+ state->rdx = saved_state->edx;
+ state->rcx = saved_state->ecx;
+ state->rax = saved_state->eax;
+ state->rip = saved_state->eip;
+ state->ursp = saved_state->uesp;
+ state->rfl = saved_state->efl;
+ state->rsp = 0; /* unused */
+#else
+ state->edi = saved_state->edi;
+ state->esi = saved_state->esi;
+ state->ebp = saved_state->ebp;
+ state->ebx = saved_state->ebx;
+ state->edx = saved_state->edx;
+ state->ecx = saved_state->ecx;
+ state->eax = saved_state->eax;
+ state->eip = saved_state->eip;
+ state->uesp = saved_state->uesp;
+ state->efl = saved_state->efl;
+ state->esp = 0; /* unused */
+#endif /* __x86_64__ && !USER32 */
+
+ state->cs = saved_state->cs;
+ state->ss = saved_state->ss;
+#if !defined(__x86_64__) || defined(USER32)
+ if (saved_state->efl & EFL_VM) {
+ /*
+ * V8086 mode.
+ */
+ state->ds = saved_state->v86_segs.v86_ds & 0xffff;
+ state->es = saved_state->v86_segs.v86_es & 0xffff;
+ state->fs = saved_state->v86_segs.v86_fs & 0xffff;
+ state->gs = saved_state->v86_segs.v86_gs & 0xffff;
+
+ if (thread->pcb->ims.v86s.int_table) {
+ /*
+ * Hardware assist on
+ */
+ if ((thread->pcb->ims.v86s.flags &
+ (EFL_IF|V86_IF_PENDING))
+ == 0)
+ saved_state->efl &= ~EFL_IF;
+ }
+ } else {
+ /*
+ * 386 mode.
+ */
+ state->ds = saved_state->ds & 0xffff;
+ state->es = saved_state->es & 0xffff;
+ state->fs = saved_state->fs & 0xffff;
+ state->gs = saved_state->gs & 0xffff;
+ }
+#endif
+ *count = i386_THREAD_STATE_COUNT;
+ break;
+ }
+
+ case i386_FLOAT_STATE: {
+
+ if (*count < i386_FLOAT_STATE_COUNT)
+ return(KERN_INVALID_ARGUMENT);
+
+ *count = i386_FLOAT_STATE_COUNT;
+ return fpu_get_state(thread,
+ (struct i386_float_state *)tstate);
+ }
+
+ /*
+ * Temporary - replace by i386_io_map
+ */
+ case i386_ISA_PORT_MAP_STATE: {
+ struct i386_isa_port_map_state *state;
+
+ if (*count < i386_ISA_PORT_MAP_STATE_COUNT)
+ return(KERN_INVALID_ARGUMENT);
+
+ state = (struct i386_isa_port_map_state *) tstate;
+
+ simple_lock (&thread->task->machine.iopb_lock);
+ if (thread->task->machine.iopb == 0)
+ memset (state->pm, 0xff, sizeof state->pm);
+ else
+ memcpy(state->pm,
+ thread->task->machine.iopb,
+ sizeof state->pm);
+ simple_unlock (&thread->task->machine.iopb_lock);
+
+ *count = i386_ISA_PORT_MAP_STATE_COUNT;
+ break;
+ }
+#if !defined(__x86_64__) || defined(USER32)
+ case i386_V86_ASSIST_STATE:
+ {
+ struct i386_v86_assist_state *state;
+
+ if (*count < i386_V86_ASSIST_STATE_COUNT)
+ return KERN_INVALID_ARGUMENT;
+
+ state = (struct i386_v86_assist_state *) tstate;
+ state->int_table = thread->pcb->ims.v86s.int_table;
+ state->int_count = thread->pcb->ims.v86s.int_count;
+
+ *count = i386_V86_ASSIST_STATE_COUNT;
+ break;
+ }
+#endif
+ case i386_DEBUG_STATE:
+ {
+ struct i386_debug_state *state;
+
+ if (*count < i386_DEBUG_STATE_COUNT)
+ return KERN_INVALID_ARGUMENT;
+
+ state = (struct i386_debug_state *) tstate;
+ db_get_debug_state(thread->pcb, state);
+
+ *count = i386_DEBUG_STATE_COUNT;
+ break;
+ }
+#if defined(__x86_64__) && !defined(USER32)
+ case i386_FSGS_BASE_STATE:
+ {
+ struct i386_fsgs_base_state *state;
+ if (*count < i386_FSGS_BASE_STATE_COUNT)
+ return KERN_INVALID_ARGUMENT;
+
+ state = (struct i386_fsgs_base_state *) tstate;
+ state->fs_base = thread->pcb->ims.sbs.fsbase;
+ state->gs_base = thread->pcb->ims.sbs.gsbase;
+ *count = i386_FSGS_BASE_STATE_COUNT;
+ break;
+ }
+#endif
+ default:
+ return(KERN_INVALID_ARGUMENT);
+ }
+
+ return(KERN_SUCCESS);
+}
+
+/*
+ * Alter the thread`s state so that a following thread_exception_return
+ * will make the thread return 'retval' from a syscall.
+ */
+void
+thread_set_syscall_return(
+ thread_t thread,
+ kern_return_t retval)
+{
+ thread->pcb->iss.eax = retval;
+}
+
+/*
+ * Return preferred address of user stack.
+ * Always returns low address. If stack grows up,
+ * the stack grows away from this address;
+ * if stack grows down, the stack grows towards this
+ * address.
+ */
+vm_offset_t
+user_stack_low(vm_size_t stack_size)
+{
+ return (VM_MAX_USER_ADDRESS - stack_size);
+}
+
+/*
+ * Allocate argument area and set registers for first user thread.
+ */
+vm_offset_t
+set_user_regs(vm_offset_t stack_base, /* low address */
+ vm_offset_t stack_size,
+ const struct exec_info *exec_info,
+ vm_size_t arg_size)
+{
+ vm_offset_t arg_addr;
+ struct i386_saved_state *saved_state;
+
+ assert(P2ALIGNED(stack_size, USER_STACK_ALIGN));
+ assert(P2ALIGNED(stack_base, USER_STACK_ALIGN));
+ arg_size = P2ROUND(arg_size, USER_STACK_ALIGN);
+ arg_addr = stack_base + stack_size - arg_size;
+
+ saved_state = USER_REGS(current_thread());
+ saved_state->uesp = (rpc_vm_offset_t)arg_addr;
+ saved_state->eip = exec_info->entry;
+
+ return (arg_addr);
+}
diff --git a/i386/i386/pcb.h b/i386/i386/pcb.h
new file mode 100644
index 0000000..4d48b9f
--- /dev/null
+++ b/i386/i386/pcb.h
@@ -0,0 +1,90 @@
+/*
+ *
+ * Copyright (C) 2006 Free Software Foundation, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Barry deFreese.
+ */
+/*
+ *
+ *
+ */
+
+#ifndef _I386_PCB_H_
+#define _I386_PCB_H_
+
+#include <sys/types.h>
+#include <mach/exec/exec.h>
+#include <mach/thread_status.h>
+#include <machine/thread.h>
+#include <machine/io_perm.h>
+
+extern void pcb_init (task_t parent_task, thread_t thread);
+
+extern void pcb_terminate (thread_t thread);
+
+extern void pcb_collect (thread_t thread);
+
+extern kern_return_t thread_setstatus (
+ thread_t thread,
+ int flavor,
+ thread_state_t tstate,
+ unsigned int count);
+
+extern kern_return_t thread_getstatus (
+ thread_t thread,
+ int flavor,
+ thread_state_t tstate,
+ unsigned int *count);
+
+extern void thread_set_syscall_return (
+ thread_t thread,
+ kern_return_t retval);
+
+extern vm_offset_t user_stack_low (vm_size_t stack_size);
+
+extern vm_offset_t set_user_regs (
+ vm_offset_t stack_base,
+ vm_offset_t stack_size,
+ const struct exec_info *exec_info,
+ vm_size_t arg_size);
+
+extern void load_context (thread_t new);
+
+extern void stack_attach (
+ thread_t thread,
+ vm_offset_t stack,
+ void (*continuation)(thread_t));
+
+extern vm_offset_t stack_detach (thread_t thread);
+
+extern void switch_ktss (pcb_t pcb);
+
+extern void update_ktss_iopb (unsigned char *new_iopb, io_port_t size);
+
+extern thread_t Load_context (thread_t new);
+
+extern thread_t Switch_context (thread_t old, continuation_t continuation, thread_t new);
+
+extern void switch_to_shutdown_context(thread_t thread,
+ void (*routine)(processor_t),
+ processor_t processor);
+
+extern void Thread_continue (void);
+
+extern void pcb_module_init (void);
+
+#endif /* _I386_PCB_H_ */
diff --git a/i386/i386/percpu.c b/i386/i386/percpu.c
new file mode 100644
index 0000000..c6b728b
--- /dev/null
+++ b/i386/i386/percpu.c
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2023 Free Software Foundation, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <i386/smp.h>
+#include <i386/apic.h>
+#include <kern/cpu_number.h>
+#include <i386/percpu.h>
+
+struct percpu percpu_array[NCPUS] = {0};
+
+#ifndef MACH_XEN
+void init_percpu(int cpu)
+{
+ int apic_id = apic_get_current_cpu();
+
+ percpu_array[cpu].self = &percpu_array[cpu];
+ percpu_array[cpu].apic_id = apic_id;
+ percpu_array[cpu].cpu_id = cpu;
+}
+#endif
diff --git a/i386/i386/percpu.h b/i386/i386/percpu.h
new file mode 100644
index 0000000..637d2ca
--- /dev/null
+++ b/i386/i386/percpu.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2023 Free Software Foundation, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _PERCPU_H_
+#define _PERCPU_H_
+
+struct percpu;
+
+#if NCPUS > 1
+
+#define percpu_assign(stm, val) \
+ asm("mov %[src], %%gs:%c[offs]" \
+ : /* No outputs */ \
+ : [src] "r" (val), [offs] "e" (__builtin_offsetof(struct percpu, stm)) \
+ : );
+
+#define percpu_get(typ, stm) \
+MACRO_BEGIN \
+ typ val_; \
+ \
+ asm("mov %%gs:%c[offs], %[dst]" \
+ : [dst] "=r" (val_) \
+ : [offs] "e" (__builtin_offsetof(struct percpu, stm)) \
+ : ); \
+ \
+ val_; \
+MACRO_END
+
+#define percpu_ptr(typ, stm) \
+MACRO_BEGIN \
+ typ *ptr_ = (typ *)__builtin_offsetof(struct percpu, stm); \
+ \
+ asm("add %%gs:0, %[pointer]" \
+ : [pointer] "+r" (ptr_) \
+ : /* No inputs */ \
+ : ); \
+ \
+ ptr_; \
+MACRO_END
+
+#else
+
+#define percpu_assign(stm, val) \
+MACRO_BEGIN \
+ percpu_array[0].stm = val; \
+MACRO_END
+#define percpu_get(typ, stm) \
+ (percpu_array[0].stm)
+#define percpu_ptr(typ, stm) \
+ (&percpu_array[0].stm)
+
+#endif
+
+#include <kern/processor.h>
+#include <mach/mach_types.h>
+
+struct percpu {
+ struct percpu *self;
+ int apic_id;
+ int cpu_id;
+ struct processor processor;
+ thread_t active_thread;
+ vm_offset_t active_stack;
+/*
+ struct machine_slot machine_slot;
+ struct mp_desc_table mp_desc_table;
+ vm_offset_t int_stack_top;
+ vm_offset_t int_stack_base;
+ ast_t need_ast;
+ ipc_kmsg_t ipc_kmsg_cache;
+ pmap_update_list cpu_update_list;
+ spl_t saved_ipl;
+ spl_t curr_ipl;
+ timer_data_t kernel_timer;
+ timer_t current_timer;
+ unsigned long in_interrupt;
+*/
+};
+
+extern struct percpu percpu_array[NCPUS];
+
+void init_percpu(int cpu);
+
+#endif /* _PERCPU_H_ */
diff --git a/i386/i386/phys.c b/i386/i386/phys.c
new file mode 100644
index 0000000..e864489
--- /dev/null
+++ b/i386/i386/phys.c
@@ -0,0 +1,187 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <string.h>
+
+#include <mach/boolean.h>
+#include <mach/xen.h>
+#include <kern/task.h>
+#include <kern/thread.h>
+#include <vm/vm_map.h>
+#include "vm_param.h"
+#include <mach/vm_prot.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+
+#include <i386/pmap.h>
+#include <i386/model_dep.h>
+#include <mach/machine/vm_param.h>
+
+#define INTEL_PTE_W(p) (INTEL_PTE_VALID | INTEL_PTE_WRITE | INTEL_PTE_REF | INTEL_PTE_MOD | pa_to_pte(p))
+#define INTEL_PTE_R(p) (INTEL_PTE_VALID | INTEL_PTE_REF | pa_to_pte(p))
+
+/*
+ * pmap_zero_page zeros the specified (machine independent) page.
+ */
+void
+pmap_zero_page(phys_addr_t p)
+{
+ assert(p != vm_page_fictitious_addr);
+ vm_offset_t v;
+ pmap_mapwindow_t *map;
+ boolean_t mapped = p >= VM_PAGE_DIRECTMAP_LIMIT;
+
+ if (mapped)
+ {
+ map = pmap_get_mapwindow(INTEL_PTE_W(p));
+ v = map->vaddr;
+ }
+ else
+ v = phystokv(p);
+
+ memset((void*) v, 0, PAGE_SIZE);
+
+ if (mapped)
+ pmap_put_mapwindow(map);
+}
+
+/*
+ * pmap_copy_page copies the specified (machine independent) pages.
+ */
+void
+pmap_copy_page(
+ phys_addr_t src,
+ phys_addr_t dst)
+{
+ vm_offset_t src_addr_v, dst_addr_v;
+ pmap_mapwindow_t *src_map = NULL;
+ pmap_mapwindow_t *dst_map;
+ boolean_t src_mapped = src >= VM_PAGE_DIRECTMAP_LIMIT;
+ boolean_t dst_mapped = dst >= VM_PAGE_DIRECTMAP_LIMIT;
+ assert(src != vm_page_fictitious_addr);
+ assert(dst != vm_page_fictitious_addr);
+
+ if (src_mapped)
+ {
+ src_map = pmap_get_mapwindow(INTEL_PTE_R(src));
+ src_addr_v = src_map->vaddr;
+ }
+ else
+ src_addr_v = phystokv(src);
+
+ if (dst_mapped)
+ {
+ dst_map = pmap_get_mapwindow(INTEL_PTE_W(dst));
+ dst_addr_v = dst_map->vaddr;
+ }
+ else
+ dst_addr_v = phystokv(dst);
+
+ memcpy((void *) dst_addr_v, (void *) src_addr_v, PAGE_SIZE);
+
+ if (src_mapped)
+ pmap_put_mapwindow(src_map);
+ if (dst_mapped)
+ pmap_put_mapwindow(dst_map);
+}
+
+/*
+ * copy_to_phys(src_addr_v, dst_addr_p, count)
+ *
+ * Copy virtual memory to physical memory
+ */
+void
+copy_to_phys(
+ vm_offset_t src_addr_v,
+ phys_addr_t dst_addr_p,
+ int count)
+{
+ vm_offset_t dst_addr_v;
+ pmap_mapwindow_t *dst_map;
+ boolean_t mapped = dst_addr_p >= VM_PAGE_DIRECTMAP_LIMIT;
+ assert(dst_addr_p != vm_page_fictitious_addr);
+ assert(pa_to_pte(dst_addr_p + count-1) == pa_to_pte(dst_addr_p));
+
+ if (mapped)
+ {
+ dst_map = pmap_get_mapwindow(INTEL_PTE_W(dst_addr_p));
+ dst_addr_v = dst_map->vaddr + (dst_addr_p & (INTEL_PGBYTES-1));
+ }
+ else
+ dst_addr_v = phystokv(dst_addr_p);
+
+ memcpy((void *)dst_addr_v, (void *)src_addr_v, count);
+
+ if (mapped)
+ pmap_put_mapwindow(dst_map);
+}
+
+/*
+ * copy_from_phys(src_addr_p, dst_addr_v, count)
+ *
+ * Copy physical memory to virtual memory. The virtual memory
+ * is assumed to be present (e.g. the buffer pool).
+ */
+void
+copy_from_phys(
+ phys_addr_t src_addr_p,
+ vm_offset_t dst_addr_v,
+ int count)
+{
+ vm_offset_t src_addr_v;
+ pmap_mapwindow_t *src_map;
+ boolean_t mapped = src_addr_p >= VM_PAGE_DIRECTMAP_LIMIT;
+ assert(src_addr_p != vm_page_fictitious_addr);
+ assert(pa_to_pte(src_addr_p + count-1) == pa_to_pte(src_addr_p));
+
+ if (mapped)
+ {
+ src_map = pmap_get_mapwindow(INTEL_PTE_R(src_addr_p));
+ src_addr_v = src_map->vaddr + (src_addr_p & (INTEL_PGBYTES-1));
+ }
+ else
+ src_addr_v = phystokv(src_addr_p);
+
+ memcpy((void *)dst_addr_v, (void *)src_addr_v, count);
+
+ if (mapped)
+ pmap_put_mapwindow(src_map);
+}
+
+/*
+ * kvtophys(addr)
+ *
+ * Convert a kernel virtual address to a physical address
+ */
+phys_addr_t
+kvtophys(vm_offset_t addr)
+{
+ pt_entry_t *pte;
+
+ if ((pte = pmap_pte(kernel_pmap, addr)) == PT_ENTRY_NULL)
+ return 0;
+ return pte_to_pa(*pte) | (addr & INTEL_OFFMASK);
+}
diff --git a/i386/i386/pic.c b/i386/i386/pic.c
new file mode 100644
index 0000000..66fbc04
--- /dev/null
+++ b/i386/i386/pic.c
@@ -0,0 +1,262 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+Copyright (c) 1988,1989 Prime Computer, Inc. Natick, MA 01760
+All Rights Reserved.
+
+Permission to use, copy, modify, and distribute this
+software and its documentation for any purpose and
+without fee is hereby granted, provided that the above
+copyright notice appears in all copies and that both the
+copyright notice and this permission notice appear in
+supporting documentation, and that the name of Prime
+Computer, Inc. not be used in advertising or publicity
+pertaining to distribution of the software without
+specific, written prior permission.
+
+THIS SOFTWARE IS PROVIDED "AS IS", AND PRIME COMPUTER,
+INC. DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
+SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN
+NO EVENT SHALL PRIME COMPUTER, INC. BE LIABLE FOR ANY
+SPECIAL, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+PROFITS, WHETHER IN ACTION OF CONTRACT, NEGLIGENCE, OR
+OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/*
+ * Copyright (C) 1995 Shantanu Goel.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <sys/types.h>
+#include <kern/printf.h>
+#include <i386/ipl.h>
+#include <i386/pic.h>
+#include <i386/machspl.h>
+#include <i386/pio.h>
+
+spl_t curr_ipl[NCPUS] = {0};
+int curr_pic_mask;
+int spl_init = 0;
+
+int iunit[NINTR] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
+
+unsigned short master_icw, master_ocw, slaves_icw, slaves_ocw;
+
+u_short PICM_ICW1, PICM_OCW1, PICS_ICW1, PICS_OCW1 ;
+u_short PICM_ICW2, PICM_OCW2, PICS_ICW2, PICS_OCW2 ;
+u_short PICM_ICW3, PICM_OCW3, PICS_ICW3, PICS_OCW3 ;
+u_short PICM_ICW4, PICS_ICW4 ;
+
+/*
+** picinit() - This routine
+** * Establishes a table of interrupt vectors
+** * Establishes location of PICs in the system
+** * Unmasks all interrupts in the PICs
+** * Initialises them
+**
+** At this stage the interrupt functionality of this system should be
+** complete.
+*/
+
+/*
+** Initialise the PICs , master first, then the slave.
+** All the register field definitions are described in pic.h also
+** the settings of these fields for the various registers are selected.
+*/
+
+void
+picinit(void)
+{
+
+ asm("cli");
+
+ /*
+ ** 0. Initialise the current level to match cli()
+ */
+ int i;
+
+ for (i = 0; i < NCPUS; i++)
+ curr_ipl[i] = SPLHI;
+ curr_pic_mask = 0;
+
+ /*
+ ** 1. Generate addresses to each PIC port.
+ */
+
+ master_icw = PIC_MASTER_ICW;
+ master_ocw = PIC_MASTER_OCW;
+ slaves_icw = PIC_SLAVE_ICW;
+ slaves_ocw = PIC_SLAVE_OCW;
+
+ /*
+ ** 2. Select options for each ICW and each OCW for each PIC.
+ */
+
+ PICM_ICW1 =
+ (ICW_TEMPLATE | EDGE_TRIGGER | ADDR_INTRVL8 | CASCADE_MODE | ICW4__NEEDED);
+
+ PICS_ICW1 =
+ (ICW_TEMPLATE | EDGE_TRIGGER | ADDR_INTRVL8 | CASCADE_MODE | ICW4__NEEDED);
+
+ PICM_ICW2 = PICM_VECTBASE;
+ PICS_ICW2 = PICS_VECTBASE;
+
+#ifdef AT386
+ PICM_ICW3 = ( SLAVE_ON_IR2 );
+ PICS_ICW3 = ( I_AM_SLAVE_2 );
+#endif /* AT386 */
+
+ PICM_ICW4 =
+ (SNF_MODE_DIS | NONBUFD_MODE | NRML_EOI_MOD | I8086_EMM_MOD);
+ PICS_ICW4 =
+ (SNF_MODE_DIS | NONBUFD_MODE | NRML_EOI_MOD | I8086_EMM_MOD);
+
+ PICM_OCW1 = (curr_pic_mask & 0x00FF);
+ PICS_OCW1 = ((curr_pic_mask & 0xFF00)>>8);
+
+ PICM_OCW2 = NON_SPEC_EOI;
+ PICS_OCW2 = NON_SPEC_EOI;
+
+ PICM_OCW3 = (OCW_TEMPLATE | READ_NEXT_RD | READ_IR_ONRD );
+ PICS_OCW3 = (OCW_TEMPLATE | READ_NEXT_RD | READ_IR_ONRD );
+
+ /*
+ ** 3. Initialise master - send commands to master PIC
+ */
+
+ outb ( master_icw, PICM_ICW1 );
+ outb ( master_ocw, PICM_ICW2 );
+ outb ( master_ocw, PICM_ICW3 );
+ outb ( master_ocw, PICM_ICW4 );
+
+ outb ( master_ocw, PICM_MASK );
+ outb ( master_icw, PICM_OCW3 );
+
+ /*
+ ** 4. Initialise slave - send commands to slave PIC
+ */
+
+ outb ( slaves_icw, PICS_ICW1 );
+ outb ( slaves_ocw, PICS_ICW2 );
+ outb ( slaves_ocw, PICS_ICW3 );
+ outb ( slaves_ocw, PICS_ICW4 );
+
+
+ outb ( slaves_ocw, PICS_OCW1 );
+ outb ( slaves_icw, PICS_OCW3 );
+
+ /*
+ ** 5. Initialise interrupts
+ */
+ outb ( master_ocw, PICM_OCW1 );
+
+}
+
+void
+intnull(int unit_dev)
+{
+ static char warned[NINTR];
+
+ if (unit_dev >= NINTR)
+ printf("Unknown interrupt %d\n", unit_dev);
+ else if (!warned[unit_dev])
+ {
+ printf("intnull(%d)\n", unit_dev);
+ warned[unit_dev] = 1;
+ }
+
+}
+
+/*
+ * Mask a PIC IRQ.
+ */
+void
+mask_irq (unsigned int irq_nr)
+{
+ int new_pic_mask = curr_pic_mask | 1 << irq_nr;
+
+ if (curr_pic_mask != new_pic_mask)
+ {
+ curr_pic_mask = new_pic_mask;
+ if (irq_nr < 8)
+ {
+ outb (PIC_MASTER_OCW, curr_pic_mask & 0xff);
+ }
+ else
+ {
+ outb (PIC_SLAVE_OCW, curr_pic_mask >> 8);
+ }
+ }
+}
+
+/*
+ * Unmask a PIC IRQ.
+ */
+void
+unmask_irq (unsigned int irq_nr)
+{
+ int mask;
+ int new_pic_mask;
+
+ mask = 1 << irq_nr;
+ if (irq_nr >= 8)
+ {
+ mask |= 1 << 2;
+ }
+
+ new_pic_mask = curr_pic_mask & ~mask;
+
+ if (curr_pic_mask != new_pic_mask)
+ {
+ curr_pic_mask = new_pic_mask;
+ if (irq_nr < 8)
+ {
+ outb (PIC_MASTER_OCW, curr_pic_mask & 0xff);
+ }
+ else
+ {
+ outb (PIC_SLAVE_OCW, curr_pic_mask >> 8);
+ }
+ }
+}
+
diff --git a/i386/i386/pic.h b/i386/i386/pic.h
new file mode 100644
index 0000000..aec0ef6
--- /dev/null
+++ b/i386/i386/pic.h
@@ -0,0 +1,191 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+Copyright (c) 1988,1989 Prime Computer, Inc. Natick, MA 01760
+All Rights Reserved.
+
+Permission to use, copy, modify, and distribute this
+software and its documentation for any purpose and
+without fee is hereby granted, provided that the above
+copyright notice appears in all copies and that both the
+copyright notice and this permission notice appear in
+supporting documentation, and that the name of Prime
+Computer, Inc. not be used in advertising or publicity
+pertaining to distribution of the software without
+specific, written prior permission.
+
+THIS SOFTWARE IS PROVIDED "AS IS", AND PRIME COMPUTER,
+INC. DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
+SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN
+NO EVENT SHALL PRIME COMPUTER, INC. BE LIABLE FOR ANY
+SPECIAL, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+PROFITS, WHETHER IN ACTION OF CONTRACT, NEGLIGENCE, OR
+OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+#ifndef _I386_PIC_H_
+#define _I386_PIC_H_
+
+#ifndef APIC
+#define NINTR 0x10
+#endif
+#define NPICS 0x02
+
+/*
+** The following are definitions used to locate the PICs in the system
+*/
+
+#if defined(AT386) || defined(ATX86_64)
+#define ADDR_PIC_BASE 0x20
+#define OFF_ICW 0x00
+#define OFF_OCW 0x01
+#define SIZE_PIC 0x80
+#endif /* defined(AT386) */
+
+#define PIC_MASTER_ICW (ADDR_PIC_BASE + OFF_ICW)
+#define PIC_MASTER_OCW (ADDR_PIC_BASE + OFF_OCW)
+#define PIC_SLAVE_ICW (PIC_MASTER_ICW + SIZE_PIC)
+#define PIC_SLAVE_OCW (PIC_MASTER_OCW + SIZE_PIC)
+
+/*
+** The following banks of definitions ICW1, ICW2, ICW3, and ICW4 are used
+** to define the fields of the various ICWs for initialisation of the PICs
+*/
+
+/*
+** ICW1
+*/
+
+#define ICW_TEMPLATE 0x10
+
+#define LEVL_TRIGGER 0x08
+#define EDGE_TRIGGER 0x00
+#define ADDR_INTRVL4 0x04
+#define ADDR_INTRVL8 0x00
+#define SINGLE__MODE 0x02
+#define CASCADE_MODE 0x00
+#define ICW4__NEEDED 0x01
+#define NO_ICW4_NEED 0x00
+
+/*
+** ICW2
+*/
+
+#if defined(AT386) || defined(ATX86_64)
+#define PICM_VECTBASE 0x20
+#define PICS_VECTBASE PICM_VECTBASE + 0x08
+#endif /* defined(AT386) */
+
+/*
+** ICW3
+*/
+
+#define SLAVE_ON_IR0 0x01
+#define SLAVE_ON_IR1 0x02
+#define SLAVE_ON_IR2 0x04
+#define SLAVE_ON_IR3 0x08
+#define SLAVE_ON_IR4 0x10
+#define SLAVE_ON_IR5 0x20
+#define SLAVE_ON_IR6 0x40
+#define SLAVE_ON_IR7 0x80
+
+#define I_AM_SLAVE_0 0x00
+#define I_AM_SLAVE_1 0x01
+#define I_AM_SLAVE_2 0x02
+#define I_AM_SLAVE_3 0x03
+#define I_AM_SLAVE_4 0x04
+#define I_AM_SLAVE_5 0x05
+#define I_AM_SLAVE_6 0x06
+#define I_AM_SLAVE_7 0x07
+
+/*
+** ICW4
+*/
+
+#define SNF_MODE_ENA 0x10
+#define SNF_MODE_DIS 0x00
+#define BUFFERD_MODE 0x08
+#define NONBUFD_MODE 0x00
+#define AUTO_EOI_MOD 0x02
+#define NRML_EOI_MOD 0x00
+#define I8086_EMM_MOD 0x01
+#define SET_MCS_MODE 0x00
+
+/*
+** OCW1
+*/
+#define PICM_MASK 0xFF
+#define PICS_MASK 0xFF
+/*
+** OCW2
+*/
+
+#define NON_SPEC_EOI 0x20
+#define SPECIFIC_EOI 0x60
+#define ROT_NON_SPEC 0xA0
+#define SET_ROT_AEOI 0x80
+#define RSET_ROTAEOI 0x00
+#define ROT_SPEC_EOI 0xE0
+#define SET_PRIORITY 0xC0
+#define NO_OPERATION 0x40
+
+#define SEND_EOI_IR0 0x00
+#define SEND_EOI_IR1 0x01
+#define SEND_EOI_IR2 0x02
+#define SEND_EOI_IR3 0x03
+#define SEND_EOI_IR4 0x04
+#define SEND_EOI_IR5 0x05
+#define SEND_EOI_IR6 0x06
+#define SEND_EOI_IR7 0x07
+
+/*
+** OCW3
+*/
+
+#define OCW_TEMPLATE 0x08
+#define SPECIAL_MASK 0x40
+#define MASK_MDE_SET 0x20
+#define MASK_MDE_RST 0x00
+#define POLL_COMMAND 0x04
+#define NO_POLL_CMND 0x00
+#define READ_NEXT_RD 0x02
+#define READ_IR_ONRD 0x00
+#define READ_IS_ONRD 0x01
+
+#define PIC_MASK_ZERO 0x00
+
+#if !defined(__ASSEMBLER__) && !defined(APIC)
+extern void picinit (void);
+extern int curr_pic_mask;
+extern void intnull(int unit);
+extern void mask_irq (unsigned int irq_nr);
+extern void unmask_irq (unsigned int irq_nr);
+#endif /* __ASSEMBLER__ */
+
+#endif /* _I386_PIC_H_ */
diff --git a/i386/i386/pio.h b/i386/i386/pio.h
new file mode 100644
index 0000000..c488fbb
--- /dev/null
+++ b/i386/i386/pio.h
@@ -0,0 +1,61 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_PIO_H_
+#define _I386_PIO_H_
+
+#ifndef __GNUC__
+#error You do not stand a chance. This file is gcc only.
+#endif /* __GNUC__ */
+
+#define inl(y) \
+({ unsigned int _tmp__; \
+ asm volatile("inl %1, %0" : "=a" (_tmp__) : "dN" ((unsigned short)(y))); \
+ _tmp__; })
+
+#define inw(y) \
+({ unsigned short _tmp__; \
+ asm volatile("inw %1, %0" : "=a" (_tmp__) : "dN" ((unsigned short)(y))); \
+ _tmp__; })
+
+#define inb(y) \
+({ unsigned char _tmp__; \
+ asm volatile("inb %1, %0" : "=a" (_tmp__) : "dN" ((unsigned short)(y))); \
+ _tmp__; })
+
+
+#define outl(x, y) \
+{ asm volatile("outl %0, %1" : : "a" ((unsigned int)(y)) , "dN" ((unsigned short)(x))); }
+
+
+#define outw(x, y) \
+{ asm volatile("outw %0, %1" : : "a" ((unsigned short)(y)) , "dN" ((unsigned short)(x))); }
+
+
+#define outb(x, y) \
+{ asm volatile("outb %0, %1" : : "a" ((unsigned char)(y)) , "dN" ((unsigned short)(x))); }
+
+#endif /* _I386_PIO_H_ */
diff --git a/i386/i386/pit.c b/i386/i386/pit.c
new file mode 100644
index 0000000..6c006a9
--- /dev/null
+++ b/i386/i386/pit.c
@@ -0,0 +1,140 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * Copyright (c) 1991 IBM Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation,
+ * and that the name IBM not be used in advertising or publicity
+ * pertaining to distribution of the software without specific, written
+ * prior permission.
+ *
+ * CARNEGIE MELLON AND IBM ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND IBM DISCLAIM ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ Copyright 1988, 1989 by Intel Corporation, Santa Clara, California.
+
+ All Rights Reserved
+
+Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Intel
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+INTEL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL INTEL BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+#include <kern/mach_clock.h>
+#include <i386/ipl.h>
+#include <machine/irq.h>
+#include <i386/pit.h>
+#include <i386/pio.h>
+#include <kern/cpu_number.h>
+
+int pitctl_port = PITCTL_PORT; /* For 386/20 Board */
+int pitctr0_port = PITCTR0_PORT; /* For 386/20 Board */
+/* We want PIT 0 in square wave mode */
+
+int pit0_mode = PIT_C0|PIT_SQUAREMODE|PIT_READMODE ;
+
+
+unsigned int clknumb = CLKNUM; /* interrupt interval for timer 0 */
+
+void
+pit_prepare_sleep(int persec)
+{
+ /* Prepare to sleep for 1/persec seconds */
+ uint32_t val = 0;
+ uint8_t lsb, msb;
+
+ val = inb(PITAUX_PORT);
+ val &= ~PITAUX_OUT2;
+ val |= PITAUX_GATE2;
+ outb (PITAUX_PORT, val);
+ outb (PITCTL_PORT, PIT_C2 | PIT_LOADMODE | PIT_ONESHOTMODE);
+ val = CLKNUM / persec;
+ lsb = val & 0xff;
+ msb = val >> 8;
+ outb (PITCTR2_PORT, lsb);
+ val = inb(POST_PORT); /* ~1us i/o delay */
+ outb (PITCTR2_PORT, msb);
+}
+
+void
+pit_sleep(void)
+{
+ uint8_t val;
+
+ /* Start counting down */
+ val = inb(PITAUX_PORT);
+ val &= ~PITAUX_GATE2;
+ outb (PITAUX_PORT, val); /* Gate low */
+ val |= PITAUX_GATE2;
+ outb (PITAUX_PORT, val); /* Gate high */
+
+ /* Wait until counter reaches zero */
+ while ((inb(PITAUX_PORT) & PITAUX_VAL) == 0);
+}
+
+void
+pit_udelay(int usec)
+{
+ pit_prepare_sleep(1000000 / usec);
+ pit_sleep();
+}
+
+void
+pit_mdelay(int msec)
+{
+ pit_prepare_sleep(1000 / msec);
+ pit_sleep();
+}
+
+void
+clkstart(void)
+{
+ if (cpu_number() != 0)
+ /* Only one PIT initialization is needed */
+ return;
+ unsigned char byte;
+ unsigned long s;
+
+ s = sploff(); /* disable interrupts */
+
+ /* Since we use only timer 0, we program that.
+ * 8254 Manual specifically says you do not need to program
+ * timers you do not use
+ */
+ outb(pitctl_port, pit0_mode);
+ clknumb = (CLKNUM + hz / 2) / hz;
+ byte = clknumb;
+ outb(pitctr0_port, byte);
+ byte = clknumb>>8;
+ outb(pitctr0_port, byte);
+ splon(s); /* restore interrupt state */
+}
diff --git a/i386/i386/pit.h b/i386/i386/pit.h
new file mode 100644
index 0000000..49e1051
--- /dev/null
+++ b/i386/i386/pit.h
@@ -0,0 +1,98 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ Copyright 1988, 1989 by Intel Corporation, Santa Clara, California.
+
+ All Rights Reserved
+
+Permission to use, copy, modify, and distribute this software and
+its documentation for any purpose and without fee is hereby
+granted, provided that the above copyright notice appears in all
+copies and that both the copyright notice and this permission notice
+appear in supporting documentation, and that the name of Intel
+not be used in advertising or publicity pertaining to distribution
+of the software without specific, written prior permission.
+
+INTEL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+IN NO EVENT SHALL INTEL BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+#ifndef _I386_PIT_H_
+#define _I386_PIT_H_
+
+#if defined(AT386) || defined(ATX86_64)
+/* Definitions for 8254 Programmable Interrupt Timer ports on AT 386 */
+#define PITCTR0_PORT 0x40 /* counter 0 port */
+#define PITCTR1_PORT 0x41 /* counter 1 port */
+#define PITCTR2_PORT 0x42 /* counter 2 port */
+#define PITCTL_PORT 0x43 /* PIT control port */
+#define PITAUX_PORT 0x61 /* PIT auxiliary port */
+/* bits used in auxiliary control port for timer 2 */
+#define PITAUX_GATE2 0x01 /* aux port, PIT gate 2 input */
+#define PITAUX_OUT2 0x02 /* aux port, PIT clock out 2 enable */
+#define PITAUX_VAL 0x20 /* aux port, output */
+#endif /* defined(AT386) */
+
+/* Following are used for Timer 0 */
+#define PIT_C0 0x00 /* select counter 0 */
+#define PIT_LOADMODE 0x30 /* load least significant byte followed
+ * by most significant byte */
+#define PIT_NDIVMODE 0x04 /*divide by N counter */
+
+/* Used for Timer 1. Used for delay calculations in countdown mode */
+#define PIT_C1 0x40 /* select counter 1 */
+#define PIT_READMODE 0x30 /* read or load least significant byte
+ * followed by most significant byte */
+
+#define PIT_SQUAREMODE 0x06 /* square-wave mode */
+#define PIT_RATEMODE 0x04 /* rate generator mode */
+#define PIT_ONESHOTMODE 0x02 /* one-shot mode */
+
+/* Used for Timer 2. */
+#define PIT_C2 0x80 /* select counter 2 */
+
+#define POST_PORT 0x80 /* used for tiny i/o delay */
+
+/*
+ * Clock speed for the timer in hz divided by the constant HZ
+ * (defined in param.h)
+ */
+#if defined(AT386) || defined(ATX86_64)
+#define CLKNUM 1193182
+#endif /* AT386 */
+
+extern void clkstart(void);
+extern void pit_prepare_sleep(int hz);
+extern void pit_sleep(void);
+extern void pit_udelay(int usec);
+extern void pit_mdelay(int msec);
+
+#endif /* _I386_PIT_H_ */
diff --git a/i386/i386/pmap.h b/i386/i386/pmap.h
new file mode 100644
index 0000000..a989923
--- /dev/null
+++ b/i386/i386/pmap.h
@@ -0,0 +1,27 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <intel/pmap.h>
diff --git a/i386/i386/proc_reg.h b/i386/i386/proc_reg.h
new file mode 100644
index 0000000..704676c
--- /dev/null
+++ b/i386/i386/proc_reg.h
@@ -0,0 +1,407 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Processor registers for i386 and i486.
+ */
+#ifndef _I386_PROC_REG_H_
+#define _I386_PROC_REG_H_
+
+/*
+ * CR0
+ */
+#define CR0_PG 0x80000000 /* enable paging */
+#define CR0_CD 0x40000000 /* i486: cache disable */
+#define CR0_NW 0x20000000 /* i486: no write-through */
+#define CR0_AM 0x00040000 /* i486: alignment check mask */
+#define CR0_WP 0x00010000 /* i486: write-protect kernel access */
+#define CR0_NE 0x00000020 /* i486: handle numeric exceptions */
+#define CR0_ET 0x00000010 /* extension type is 80387 */
+ /* (not official) */
+#define CR0_TS 0x00000008 /* task switch */
+#define CR0_EM 0x00000004 /* emulate coprocessor */
+#define CR0_MP 0x00000002 /* monitor coprocessor */
+#define CR0_PE 0x00000001 /* enable protected mode */
+
+/*
+ * CR3
+ */
+#define CR3_PCD 0x0010 /* Page-level Cache Disable */
+#define CR3_PWT 0x0008 /* Page-level Writes Transparent */
+
+/*
+ * CR4
+ */
+#define CR4_VME 0x0001 /* Virtual-8086 Mode Extensions */
+#define CR4_PVI 0x0002 /* Protected-Mode Virtual Interrupts */
+#define CR4_TSD 0x0004 /* Time Stamp Disable */
+#define CR4_DE 0x0008 /* Debugging Extensions */
+#define CR4_PSE 0x0010 /* Page Size Extensions */
+#define CR4_PAE 0x0020 /* Physical Address Extension */
+#define CR4_MCE 0x0040 /* Machine-Check Enable */
+#define CR4_PGE 0x0080 /* Page Global Enable */
+#define CR4_PCE 0x0100 /* Performance-Monitoring Counter
+ * Enable */
+#define CR4_OSFXSR 0x0200 /* Operating System Support for FXSAVE
+ * and FXRSTOR instructions */
+#define CR4_OSXMMEXCPT 0x0400 /* Operating System Support for Unmasked
+ * SIMD Floating-Point Exceptions */
+#define CR4_OSXSAVE 0x40000 /* Operating System Support for XSAVE
+ * and XRSTOR instructions */
+
+#ifndef __ASSEMBLER__
+#ifdef __GNUC__
+
+#ifndef MACH_HYP
+#include <i386/gdt.h>
+#include <i386/ldt.h>
+#endif /* MACH_HYP */
+
+static inline unsigned long
+get_eflags(void)
+{
+ unsigned long eflags;
+#ifdef __x86_64__
+ asm("pushfq; popq %0" : "=r" (eflags));
+#else
+ asm("pushfl; popl %0" : "=r" (eflags));
+#endif
+ return eflags;
+}
+
+static inline void
+set_eflags(unsigned long eflags)
+{
+#ifdef __x86_64__
+ asm volatile("pushq %0; popfq" : : "r" (eflags));
+#else
+ asm volatile("pushl %0; popfl" : : "r" (eflags));
+#endif
+}
+
+#define get_esp() \
+ ({ \
+ register unsigned long _temp__ asm("esp"); \
+ _temp__; \
+ })
+
+#ifdef __x86_64__
+#define get_eflags() \
+ ({ \
+ register unsigned long _temp__; \
+ asm("pushfq; popq %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+#else
+#define get_eflags() \
+ ({ \
+ register unsigned long _temp__; \
+ asm("pushfl; popl %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+#endif
+
+#define get_cr0() \
+ ({ \
+ register unsigned long _temp__; \
+ asm volatile("mov %%cr0, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+
+#define set_cr0(value) \
+ ({ \
+ register unsigned long _temp__ = (value); \
+ asm volatile("mov %0, %%cr0" : : "r" (_temp__)); \
+ })
+
+#define get_cr2() \
+ ({ \
+ register unsigned long _temp__; \
+ asm volatile("mov %%cr2, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+
+#ifdef MACH_PV_PAGETABLES
+extern unsigned long cr3;
+#define get_cr3() (cr3)
+#define set_cr3(value) \
+ ({ \
+ cr3 = (value); \
+ if (!hyp_set_cr3(value)) \
+ panic("set_cr3"); \
+ })
+#else /* MACH_PV_PAGETABLES */
+#define get_cr3() \
+ ({ \
+ register unsigned long _temp__; \
+ asm volatile("mov %%cr3, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+
+#define set_cr3(value) \
+ ({ \
+ register unsigned long _temp__ = (value); \
+ asm volatile("mov %0, %%cr3" : : "r" (_temp__) : "memory"); \
+ })
+#endif /* MACH_PV_PAGETABLES */
+
+#define flush_tlb() set_cr3(get_cr3())
+
+#ifndef MACH_PV_PAGETABLES
+#define invlpg(addr) \
+ ({ \
+ asm volatile("invlpg (%0)" : : "r" (addr)); \
+ })
+
+#define invlpg_linear(start) \
+ ({ \
+ asm volatile( \
+ "movw %w1,%%es\n" \
+ "\tinvlpg %%es:(%0)\n" \
+ "\tmovw %w2,%%es" \
+ :: "r" (start), "q" (LINEAR_DS), "q" (KERNEL_DS)); \
+ })
+
+#define invlpg_linear_range(start, end) \
+ ({ \
+ register unsigned long var = trunc_page(start); \
+ asm volatile( \
+ "movw %w2,%%es\n" \
+ "1:\tinvlpg %%es:(%0)\n" \
+ "\taddl %c4,%0\n" \
+ "\tcmpl %0,%1\n" \
+ "\tjb 1b\n" \
+ "\tmovw %w3,%%es" \
+ : "+r" (var) : "r" (end), \
+ "q" (LINEAR_DS), "q" (KERNEL_DS), "i" (PAGE_SIZE)); \
+ })
+#endif /* MACH_PV_PAGETABLES */
+
+#define get_cr4() \
+ ({ \
+ register unsigned long _temp__; \
+ asm volatile("mov %%cr4, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+
+#define set_cr4(value) \
+ ({ \
+ register unsigned long _temp__ = (value); \
+ asm volatile("mov %0, %%cr4" : : "r" (_temp__)); \
+ })
+
+
+#ifdef MACH_RING1
+#define set_ts() \
+ hyp_fpu_taskswitch(1)
+#define clear_ts() \
+ hyp_fpu_taskswitch(0)
+#else /* MACH_RING1 */
+#define set_ts() \
+ set_cr0(get_cr0() | CR0_TS)
+
+#define clear_ts() \
+ asm volatile("clts")
+#endif /* MACH_RING1 */
+
+#define get_tr() \
+ ({ \
+ unsigned short _seg__; \
+ asm volatile("str %0" : "=rm" (_seg__) ); \
+ _seg__; \
+ })
+
+#define set_tr(seg) \
+ asm volatile("ltr %0" : : "rm" ((unsigned short)(seg)) )
+
+#define get_ldt() \
+ ({ \
+ unsigned short _seg__; \
+ asm volatile("sldt %0" : "=rm" (_seg__) ); \
+ _seg__; \
+ })
+
+#define set_ldt(seg) \
+ asm volatile("lldt %0" : : "rm" ((unsigned short)(seg)) )
+
+/* This doesn't set a processor register,
+ but it's often used immediately after setting one,
+ to flush the instruction queue. */
+#define flush_instr_queue() \
+ asm("jmp 0f\n" \
+ "0:\n")
+
+#ifdef MACH_RING1
+#define get_dr0() hyp_get_debugreg(0)
+#else
+#define get_dr0() \
+ ({ \
+ register unsigned long _temp__; \
+ asm volatile("mov %%dr0, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+#endif
+
+#ifdef MACH_RING1
+#define set_dr0(value) hyp_set_debugreg(0, value)
+#else
+#define set_dr0(value) \
+ ({ \
+ register unsigned long _temp__ = (value); \
+ asm volatile("mov %0,%%dr0" : : "r" (_temp__)); \
+ })
+#endif
+
+#ifdef MACH_RING1
+#define get_dr1() hyp_get_debugreg(1)
+#else
+#define get_dr1() \
+ ({ \
+ register unsigned long _temp__; \
+ asm volatile("mov %%dr1, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+#endif
+
+#ifdef MACH_RING1
+#define set_dr1(value) hyp_set_debugreg(1, value)
+#else
+#define set_dr1(value) \
+ ({ \
+ register unsigned long _temp__ = (value); \
+ asm volatile("mov %0,%%dr1" : : "r" (_temp__)); \
+ })
+#endif
+
+#ifdef MACH_RING1
+#define get_dr2() hyp_get_debugreg(2)
+#else
+#define get_dr2() \
+ ({ \
+ register unsigned long _temp__; \
+ asm volatile("mov %%dr2, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+#endif
+
+#ifdef MACH_RING1
+#define set_dr2(value) hyp_set_debugreg(2, value)
+#else
+#define set_dr2(value) \
+ ({ \
+ register unsigned long _temp__ = (value); \
+ asm volatile("mov %0,%%dr2" : : "r" (_temp__)); \
+ })
+#endif
+
+#ifdef MACH_RING1
+#define get_dr3() hyp_get_debugreg(3)
+#else
+#define get_dr3() \
+ ({ \
+ register unsigned long _temp__; \
+ asm volatile("mov %%dr3, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+#endif
+
+#ifdef MACH_RING1
+#define set_dr3(value) hyp_set_debugreg(3, value)
+#else
+#define set_dr3(value) \
+ ({ \
+ register unsigned long _temp__ = (value); \
+ asm volatile("mov %0,%%dr3" : : "r" (_temp__)); \
+ })
+#endif
+
+#ifdef MACH_RING1
+#define get_dr6() hyp_get_debugreg(6)
+#else
+#define get_dr6() \
+ ({ \
+ register unsigned long _temp__; \
+ asm volatile("mov %%dr6, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+#endif
+
+#ifdef MACH_RING1
+#define set_dr6(value) hyp_set_debugreg(6, value)
+#else
+#define set_dr6(value) \
+ ({ \
+ register unsigned long _temp__ = (value); \
+ asm volatile("mov %0,%%dr6" : : "r" (_temp__)); \
+ })
+#endif
+
+#ifdef MACH_RING1
+#define get_dr7() hyp_get_debugreg(7)
+#else
+#define get_dr7() \
+ ({ \
+ register unsigned long _temp__; \
+ asm volatile("mov %%dr7, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+#endif
+
+#ifdef MACH_RING1
+#define set_dr7(value) hyp_set_debugreg(7, value)
+#else
+#define set_dr7(value) \
+ ({ \
+ register unsigned long _temp__ = (value); \
+ asm volatile("mov %0,%%dr7" : : "r" (_temp__)); \
+ })
+#endif
+
+/* Note: gcc might want to use bx or the stack for %1 addressing, so we can't
+ * use them :/ */
+#ifdef __x86_64__
+#define cpuid(eax, ebx, ecx, edx) \
+{ \
+ uint64_t sav_rbx; \
+ asm( "mov %%rbx,%2\n\t" \
+ "cpuid\n\t" \
+ "xchg %2,%%rbx\n\t" \
+ "movl %k2,%1\n\t" \
+ : "+a" (eax), "=m" (ebx), "=&r" (sav_rbx), "+c" (ecx), "=&d" (edx)); \
+}
+#else
+#define cpuid(eax, ebx, ecx, edx) \
+{ \
+ asm ( "mov %%ebx,%1\n\t" \
+ "cpuid\n\t" \
+ "xchg %%ebx,%1\n\t" \
+ : "+a" (eax), "=&SD" (ebx), "+c" (ecx), "=&d" (edx)); \
+}
+#endif
+
+#endif /* __GNUC__ */
+#endif /* __ASSEMBLER__ */
+
+#endif /* _I386_PROC_REG_H_ */
diff --git a/i386/i386/sched_param.h b/i386/i386/sched_param.h
new file mode 100644
index 0000000..c93ed8a
--- /dev/null
+++ b/i386/i386/sched_param.h
@@ -0,0 +1,40 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Scheduler parameters.
+ */
+
+#ifndef _I386_SCHED_PARAM_H_
+#define _I386_SCHED_PARAM_H_
+
+/*
+ * Sequent requires a right shift of 17 bits to convert
+ * microseconds to priorities.
+ */
+
+#define PRI_SHIFT 17
+
+#endif /* _I386_SCHED_PARAM_H_ */
diff --git a/i386/i386/seg.h b/i386/i386/seg.h
new file mode 100644
index 0000000..673d1d9
--- /dev/null
+++ b/i386/i386/seg.h
@@ -0,0 +1,264 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * Copyright (c) 1991 IBM Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation,
+ * and that the name IBM not be used in advertising or publicity
+ * pertaining to distribution of the software without specific, written
+ * prior permission.
+ *
+ * CARNEGIE MELLON AND IBM ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND IBM DISCLAIM ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_SEG_H_
+#define _I386_SEG_H_
+
+#include <mach/inline.h>
+#include <mach/machine/vm_types.h>
+
+/*
+ * i386 segmentation.
+ */
+
+/* Note: the value of KERNEL_RING is handled by hand in locore.S */
+#ifdef MACH_RING1
+#define KERNEL_RING 1
+#else /* MACH_RING1 */
+#define KERNEL_RING 0
+#endif /* MACH_RING1 */
+
+#ifndef __ASSEMBLER__
+
+/*
+ * Real segment descriptor.
+ */
+struct real_descriptor {
+ unsigned int limit_low:16, /* limit 0..15 */
+ base_low:16, /* base 0..15 */
+ base_med:8, /* base 16..23 */
+ access:8, /* access byte */
+ limit_high:4, /* limit 16..19 */
+ granularity:4, /* granularity */
+ base_high:8; /* base 24..31 */
+};
+typedef struct real_descriptor real_descriptor_t;
+typedef real_descriptor_t *real_descriptor_list_t;
+typedef const real_descriptor_list_t const_real_descriptor_list_t;
+
+#ifdef __x86_64__
+struct real_descriptor64 {
+ unsigned int limit_low:16, /* limit 0..15 */
+ base_low:16, /* base 0..15 */
+ base_med:8, /* base 16..23 */
+ access:8, /* access byte */
+ limit_high:4, /* limit 16..19 */
+ granularity:4, /* granularity */
+ base_high:8, /* base 24..31 */
+ base_ext:32, /* base 32..63 */
+ reserved1:8,
+ zero:5,
+ reserved2:19;
+};
+#endif
+
+struct real_gate {
+ unsigned int offset_low:16, /* offset 0..15 */
+ selector:16,
+ word_count:8,
+ access:8,
+ offset_high:16; /* offset 16..31 */
+#ifdef __x86_64__
+ unsigned int offset_ext:32, /* offset 32..63 */
+ reserved:32;
+#endif
+};
+
+#endif /* !__ASSEMBLER__ */
+
+#define SZ_64 0x2 /* 64-bit segment */
+#define SZ_32 0x4 /* 32-bit segment */
+#define SZ_16 0x0 /* 16-bit segment */
+#define SZ_G 0x8 /* 4K limit field */
+
+#define ACC_A 0x01 /* accessed */
+#define ACC_TYPE 0x1e /* type field: */
+
+#define ACC_TYPE_SYSTEM 0x00 /* system descriptors: */
+
+#define ACC_LDT 0x02 /* LDT */
+#define ACC_CALL_GATE_16 0x04 /* 16-bit call gate */
+#define ACC_TASK_GATE 0x05 /* task gate */
+#define ACC_TSS 0x09 /* task segment */
+#define ACC_CALL_GATE 0x0c /* call gate */
+#define ACC_INTR_GATE 0x0e /* interrupt gate */
+#define ACC_TRAP_GATE 0x0f /* trap gate */
+
+#define ACC_TSS_BUSY 0x02 /* task busy */
+
+#define ACC_TYPE_USER 0x10 /* user descriptors */
+
+#define ACC_DATA 0x10 /* data */
+#define ACC_DATA_W 0x12 /* data, writable */
+#define ACC_DATA_E 0x14 /* data, expand-down */
+#define ACC_DATA_EW 0x16 /* data, expand-down,
+ writable */
+#define ACC_CODE 0x18 /* code */
+#define ACC_CODE_R 0x1a /* code, readable */
+#define ACC_CODE_C 0x1c /* code, conforming */
+#define ACC_CODE_CR 0x1e /* code, conforming,
+ readable */
+#define ACC_PL 0x60 /* access rights: */
+#define ACC_PL_K (KERNEL_RING << 5) /* kernel access only */
+#define ACC_PL_U 0x60 /* user access */
+#define ACC_P 0x80 /* segment present */
+
+/*
+ * Components of a selector
+ */
+#define SEL_LDT 0x04 /* local selector */
+#define SEL_PL 0x03 /* privilege level: */
+#define SEL_PL_K KERNEL_RING /* kernel selector */
+#define SEL_PL_U 0x03 /* user selector */
+
+/*
+ * Convert selector to descriptor table index.
+ */
+#define sel_idx(sel) ((sel)>>3)
+
+
+#ifndef __ASSEMBLER__
+
+#include <mach/inline.h>
+#include <mach/xen.h>
+
+
+/* Format of a "pseudo-descriptor", used for loading the IDT and GDT. */
+struct pseudo_descriptor
+{
+ unsigned short limit;
+ unsigned long linear_base;
+ short pad;
+} __attribute__((packed));
+
+
+/* Load the processor's IDT, GDT, or LDT pointers. */
+static inline void lgdt(struct pseudo_descriptor *pdesc)
+{
+ __asm volatile("lgdt %0" : : "m" (*pdesc));
+}
+static inline void lidt(struct pseudo_descriptor *pdesc)
+{
+ __asm volatile("lidt %0" : : "m" (*pdesc));
+}
+static inline void lldt(unsigned short ldt_selector)
+{
+ __asm volatile("lldt %w0" : : "r" (ldt_selector) : "memory");
+}
+
+#ifdef CODE16
+#define i16_lgdt lgdt
+#define i16_lidt lidt
+#define i16_lldt lldt
+#endif
+
+
+/* Fill a segment descriptor. */
+static inline void
+fill_descriptor(struct real_descriptor *_desc, vm_offset_t base, vm_offset_t limit,
+ unsigned char access, unsigned char sizebits)
+{
+ /* TODO: when !MACH_PV_DESCRIPTORS, setting desc and just memcpy isn't simpler actually */
+#ifdef MACH_PV_DESCRIPTORS
+ struct real_descriptor __desc, *desc = &__desc;
+#else /* MACH_PV_DESCRIPTORS */
+ struct real_descriptor *desc = _desc;
+#endif /* MACH_PV_DESCRIPTORS */
+ if (limit > 0xfffff)
+ {
+ limit >>= 12;
+ sizebits |= SZ_G;
+ }
+ desc->limit_low = limit & 0xffff;
+ desc->base_low = base & 0xffff;
+ desc->base_med = (base >> 16) & 0xff;
+ desc->access = access | ACC_P;
+ desc->limit_high = limit >> 16;
+ desc->granularity = sizebits;
+ desc->base_high = base >> 24;
+#ifdef MACH_PV_DESCRIPTORS
+ if (hyp_do_update_descriptor(kv_to_ma(_desc), *(uint64_t*)desc))
+ panic("couldn't update descriptor(%zu to %08lx%08lx)\n", (vm_offset_t) kv_to_ma(_desc), *(((unsigned long*)desc)+1), *(unsigned long *)desc);
+#endif /* MACH_PV_DESCRIPTORS */
+}
+
+#ifdef __x86_64__
+static inline void
+fill_descriptor64(struct real_descriptor64 *_desc, unsigned long base, unsigned limit,
+ unsigned char access, unsigned char sizebits)
+{
+ /* TODO: when !MACH_PV_DESCRIPTORS, setting desc and just memcpy isn't simpler actually */
+#ifdef MACH_PV_DESCRIPTORS
+ struct real_descriptor64 __desc, *desc = &__desc;
+#else /* MACH_PV_DESCRIPTORS */
+ struct real_descriptor64 *desc = _desc;
+#endif /* MACH_PV_DESCRIPTORS */
+ if (limit > 0xfffff)
+ {
+ limit >>= 12;
+ sizebits |= SZ_G;
+ }
+ desc->limit_low = limit & 0xffff;
+ desc->base_low = base & 0xffff;
+ desc->base_med = (base >> 16) & 0xff;
+ desc->access = access | ACC_P;
+ desc->limit_high = limit >> 16;
+ desc->granularity = sizebits;
+ desc->base_high = base >> 24;
+ desc->base_ext = base >> 32;
+ desc->reserved1 = 0;
+ desc->zero = 0;
+ desc->reserved2 = 0;
+#ifdef MACH_PV_DESCRIPTORS
+ if (hyp_do_update_descriptor(kv_to_ma(_desc), *(uint64_t*)desc))
+ panic("couldn't update descriptor(%lu to %08lx%08lx)\n", (vm_offset_t) kv_to_ma(_desc), *(((unsigned long*)desc)+1), *(unsigned long *)desc);
+#endif /* MACH_PV_DESCRIPTORS */
+}
+#endif
+
+/* Fill a gate with particular values. */
+static inline void
+fill_gate(struct real_gate *gate, unsigned long offset, unsigned short selector,
+ unsigned char access, unsigned char word_count)
+{
+ gate->offset_low = offset & 0xffff;
+ gate->selector = selector;
+ gate->word_count = word_count;
+ gate->access = access | ACC_P;
+ gate->offset_high = (offset >> 16) & 0xffff;
+#ifdef __x86_64__
+ gate->offset_ext = offset >> 32;
+ gate->reserved = 0;
+#endif
+}
+
+#endif /* !__ASSEMBLER__ */
+
+#endif /* _I386_SEG_H_ */
diff --git a/i386/i386/setjmp.h b/i386/i386/setjmp.h
new file mode 100644
index 0000000..eacc8e4
--- /dev/null
+++ b/i386/i386/setjmp.h
@@ -0,0 +1,44 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Setjmp/longjmp buffer for i386.
+ */
+#ifndef _I386_SETJMP_H_
+#define _I386_SETJMP_H_
+
+typedef struct jmp_buf {
+#ifdef __i386__
+ int jmp_buf[6]; /* ebx, esi, edi, ebp, esp, eip */
+#else
+ long jmp_buf[8]; /* rbx, rbp, r12, r13, r14, r15, rsp, rip */
+#endif
+} jmp_buf_t;
+
+extern int _setjmp(jmp_buf_t*);
+
+extern void _longjmp(jmp_buf_t*, int) __attribute__ ((noreturn));
+
+#endif /* _I386_SETJMP_H_ */
diff --git a/i386/i386/smp.c b/i386/i386/smp.c
new file mode 100644
index 0000000..05e9de6
--- /dev/null
+++ b/i386/i386/smp.c
@@ -0,0 +1,199 @@
+/* smp.h - i386 SMP controller for Mach
+ Copyright (C) 2020 Free Software Foundation, Inc.
+ Written by Almudena Garcia Jurado-Centurion
+
+ This file is part of GNU Mach.
+
+ GNU Mach is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ GNU Mach is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
+
+#include <string.h>
+#include <i386/apic.h>
+#include <i386/smp.h>
+#include <i386/cpu.h>
+#include <i386/pio.h>
+#include <i386/vm_param.h>
+#include <i386at/idt.h>
+#include <i386at/cram.h>
+#include <i386at/acpi_parse_apic.h>
+#include <kern/printf.h>
+#include <mach/machine.h>
+
+#include <kern/smp.h>
+
+/*
+ * smp_data_init: initialize smp_data structure
+ * Must be called after smp_init(), once all APIC structures
+ * has been initialized
+ */
+static void smp_data_init(void)
+{
+ uint8_t numcpus = apic_get_numcpus();
+ smp_set_numcpus(numcpus);
+
+ for(int i = 0; i < numcpus; i++){
+ machine_slot[i].is_cpu = TRUE;
+ }
+
+}
+
+static void smp_send_ipi(unsigned apic_id, unsigned vector)
+{
+ unsigned long flags;
+
+ cpu_intr_save(&flags);
+
+ apic_send_ipi(NO_SHORTHAND, FIXED, PHYSICAL, ASSERT, EDGE, vector, apic_id);
+
+ do {
+ cpu_pause();
+ } while(lapic->icr_low.delivery_status == SEND_PENDING);
+
+ apic_send_ipi(NO_SHORTHAND, FIXED, PHYSICAL, DE_ASSERT, EDGE, vector, apic_id);
+
+ do {
+ cpu_pause();
+ } while(lapic->icr_low.delivery_status == SEND_PENDING);
+
+ cpu_intr_restore(flags);
+}
+
+void smp_remote_ast(unsigned apic_id)
+{
+ smp_send_ipi(apic_id, CALL_AST_CHECK);
+}
+
+void smp_pmap_update(unsigned apic_id)
+{
+ smp_send_ipi(apic_id, CALL_PMAP_UPDATE);
+}
+
+static void
+wait_for_ipi(void)
+{
+ /* This could have a timeout, but if the IPI
+ * is never delivered, its a disaster anyway */
+ while (lapic->icr_low.delivery_status == SEND_PENDING) {
+ cpu_pause();
+ }
+}
+
+static int
+smp_send_ipi_init(int apic_id)
+{
+ int err;
+
+ lapic->error_status.r = 0;
+
+ /* Assert INIT IPI:
+ *
+ * This is EDGE triggered to match the deassert
+ */
+ apic_send_ipi(NO_SHORTHAND, INIT, PHYSICAL, ASSERT, EDGE, 0, apic_id);
+
+ /* Wait for delivery */
+ wait_for_ipi();
+ hpet_mdelay(10);
+
+ /* Deassert INIT IPI:
+ *
+ * NB: This must be an EDGE triggered deassert signal.
+ * A LEVEL triggered deassert is only supported on very old hardware
+ * that does not support STARTUP IPIs at all, and instead jump
+ * via a warm reset vector.
+ */
+ apic_send_ipi(NO_SHORTHAND, INIT, PHYSICAL, DE_ASSERT, EDGE, 0, apic_id);
+
+ /* Wait for delivery */
+ wait_for_ipi();
+
+ err = lapic->error_status.r;
+ if (err) {
+ printf("ESR error upon INIT 0x%x\n", err);
+ }
+ return 0;
+}
+
+static int
+smp_send_ipi_startup(int apic_id, int vector)
+{
+ int err;
+
+ lapic->error_status.r = 0;
+
+ /* StartUp IPI:
+ *
+ * Have not seen any documentation for trigger mode for this IPI
+ * but it seems to work with EDGE. (AMD BKDG FAM16h document specifies dont care)
+ */
+ apic_send_ipi(NO_SHORTHAND, STARTUP, PHYSICAL, ASSERT, EDGE, vector, apic_id);
+
+ /* Wait for delivery */
+ wait_for_ipi();
+
+ err = lapic->error_status.r;
+ if (err) {
+ printf("ESR error upon STARTUP 0x%x\n", err);
+ }
+ return 0;
+}
+
+/* See Intel IA32/64 Software Developer's Manual 3A Section 8.4.4.1 */
+int smp_startup_cpu(unsigned apic_id, phys_addr_t start_eip)
+{
+#if 0
+ /* This block goes with a legacy method of INIT that only works with
+ * old hardware that does not support SIPIs.
+ * Must use INIT DEASSERT LEVEL triggered IPI to use this block.
+ * (At least one AMD FCH does not support this IPI mode,
+ * See AMD BKDG FAM16h document # 48751 page 461).
+ */
+
+ /* Tell CMOS to warm reset through through 40:67 */
+ outb(CMOS_ADDR, CMOS_SHUTDOWN);
+ outb(CMOS_DATA, CM_JMP_467);
+
+ /* Set warm reset vector to point to AP startup code */
+ uint16_t dword[2];
+ dword[0] = 0;
+ dword[1] = start_eip >> 4;
+ memcpy((uint8_t *)phystokv(0x467), dword, 4);
+#endif
+
+ /* Local cache flush */
+ asm("wbinvd":::"memory");
+
+ printf("Sending IPIs to APIC ID %u...\n", apic_id);
+
+ smp_send_ipi_init(apic_id);
+ hpet_mdelay(10);
+ smp_send_ipi_startup(apic_id, start_eip >> STARTUP_VECTOR_SHIFT);
+ hpet_udelay(200);
+ smp_send_ipi_startup(apic_id, start_eip >> STARTUP_VECTOR_SHIFT);
+ hpet_udelay(200);
+
+ printf("done\n");
+ return 0;
+}
+
+/*
+ * smp_init: initialize the SMP support, starting the cpus searching
+ * and enumeration.
+ */
+int smp_init(void)
+{
+ smp_data_init();
+
+ return 0;
+}
diff --git a/i386/i386/smp.h b/i386/i386/smp.h
new file mode 100644
index 0000000..73d273e
--- /dev/null
+++ b/i386/i386/smp.h
@@ -0,0 +1,34 @@
+/* smp.h - i386 SMP controller for Mach. Header file
+ Copyright (C) 2020 Free Software Foundation, Inc.
+ Written by Almudena Garcia Jurado-Centurion
+
+ This file is part of GNU Mach.
+
+ GNU Mach is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ GNU Mach is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
+
+#ifndef _SMP_H_
+#define _SMP_H_
+
+#include <mach/machine/vm_types.h>
+
+int smp_init(void);
+void smp_remote_ast(unsigned apic_id);
+void smp_pmap_update(unsigned apic_id);
+int smp_startup_cpu(unsigned apic_id, phys_addr_t start_eip);
+
+#define cpu_pause() asm volatile ("pause" : : : "memory")
+#define STARTUP_VECTOR_SHIFT (20 - 8)
+
+#endif
diff --git a/i386/i386/spl.S b/i386/i386/spl.S
new file mode 100644
index 0000000..2f2c8e3
--- /dev/null
+++ b/i386/i386/spl.S
@@ -0,0 +1,264 @@
+/*
+ * Copyright (c) 1995 Shantanu Goel
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE AUTHOR ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. THE AUTHOR DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ */
+
+/*
+ * spl routines for the i386at.
+ */
+
+#include <mach/machine/asm.h>
+#include <i386/ipl.h>
+#include <i386/i386asm.h>
+#include <i386/xen.h>
+#include <i386/cpu_number.h>
+#include <i386/gdt.h>
+
+#if NCPUS > 1
+#define mb lock; addl $0,(%esp)
+#else
+#define mb
+#endif
+
+/*
+ * Program XEN evt masks from %eax.
+ */
+#define XEN_SETMASK() \
+ pushl %ebx; \
+ movl %eax,%ebx; \
+ xchgl %eax,hyp_shared_info+EVTMASK; \
+ notl %ebx; \
+ andl %eax,%ebx; /* Get unmasked events */ \
+ testl hyp_shared_info+PENDING, %ebx; \
+ popl %ebx; \
+ jz 9f; /* Check whether there was some pending */ \
+lock orl $1,hyp_shared_info+CPU_PENDING_SEL; /* Yes, activate it */ \
+ movb $1,hyp_shared_info+CPU_PENDING; \
+9:
+
+ENTRY(spl0)
+ mb;
+ CPU_NUMBER(%edx)
+ movl CX(EXT(curr_ipl),%edx),%eax /* save current ipl */
+ pushl %eax
+ cli /* disable interrupts */
+#ifdef LINUX_DEV
+ movl EXT(bh_active),%eax
+ /* get pending mask */
+ andl EXT(bh_mask),%eax /* any pending unmasked interrupts? */
+ jz 1f /* no, skip */
+ call EXT(spl1) /* block further interrupts */
+ incl EXT(intr_count) /* set interrupt flag */
+ call EXT(linux_soft_intr) /* go handle interrupt */
+ decl EXT(intr_count) /* decrement interrupt flag */
+ cli /* disable interrupts */
+1:
+#endif
+ cmpl $0,softclkpending /* softclock pending? */
+ je 1f /* no, skip */
+ movl $0,softclkpending /* clear flag */
+ call EXT(spl1) /* block further interrupts */
+#ifdef LINUX_DEV
+ incl EXT(intr_count) /* set interrupt flag */
+#endif
+ call EXT(softclock) /* go handle interrupt */
+#ifdef LINUX_DEV
+ decl EXT(intr_count) /* decrement interrupt flag */
+#endif
+ cli /* disable interrupts */
+1:
+ CPU_NUMBER(%edx)
+ cmpl $(SPL0),CX(EXT(curr_ipl),%edx) /* are we at spl0? */
+ je 1f /* yes, all done */
+ movl $(SPL0),CX(EXT(curr_ipl),%edx) /* set ipl */
+#ifdef MACH_XEN
+ movl EXT(int_mask)+SPL0*4,%eax
+ /* get xen mask */
+ XEN_SETMASK() /* program xen evts */
+#endif
+1:
+ sti /* enable interrupts */
+ popl %eax /* return previous mask */
+ ret
+
+
+/*
+ * Historically, SETIPL(level) was called
+ * for spl levels 1-6, now we have combined
+ * all the intermediate levels into the highest level
+ * such that interrupts are either on or off,
+ * since modern hardware can handle it.
+ * This simplifies the interrupt handling
+ * especially for the linux drivers.
+ */
+Entry(splsoftclock)
+ENTRY(spl1)
+ENTRY(spl2)
+ENTRY(spl3)
+Entry(splnet)
+Entry(splhdw)
+ENTRY(spl4)
+Entry(splbio)
+Entry(spldcm)
+ENTRY(spl5)
+Entry(spltty)
+Entry(splimp)
+Entry(splvm)
+ENTRY(spl6)
+Entry(splclock)
+Entry(splsched)
+Entry(splhigh)
+Entry(splhi)
+ENTRY(spl7)
+ mb;
+ /* just clear IF */
+ cli
+ CPU_NUMBER(%edx)
+ movl $SPL7,%eax
+ xchgl CX(EXT(curr_ipl),%edx),%eax
+ ret
+
+ENTRY(splx)
+ movl S_ARG0,%edx /* get ipl */
+ CPU_NUMBER(%eax)
+#if (MACH_KDB || MACH_TTD) && !defined(MACH_XEN)
+ /* First make sure that if we're exitting from ipl7, IF is still cleared */
+ cmpl $SPL7,CX(EXT(curr_ipl),%eax) /* from ipl7? */
+ jne 0f
+ pushfl
+ popl %eax
+ testl $0x200,%eax /* IF? */
+ jz 0f
+ int3 /* Oops, interrupts got enabled?! */
+
+0:
+#endif /* (MACH_KDB || MACH_TTD) && !MACH_XEN */
+ testl %edx,%edx /* spl0? */
+ jz EXT(spl0) /* yes, handle specially */
+ CPU_NUMBER(%eax)
+ cmpl CX(EXT(curr_ipl),%eax),%edx /* same ipl as current? */
+ jne spl /* no */
+ cmpl $SPL7,%edx /* spl7? */
+ je 1f /* to ipl7, don't enable interrupts */
+ sti /* ensure interrupts are enabled */
+1:
+ movl %edx,%eax /* return previous ipl */
+ ret
+
+/*
+ * Like splx() but returns with interrupts disabled and does
+ * not return the previous ipl. This should only be called
+ * when returning from an interrupt.
+ */
+ .align TEXT_ALIGN
+ .globl splx_cli
+splx_cli:
+ movl S_ARG0,%edx /* get ipl */
+ cli /* disable interrupts */
+ testl %edx,%edx /* spl0? */
+ jnz 2f /* no, skip */
+#ifdef LINUX_DEV
+ movl EXT(bh_active),%eax
+ /* get pending mask */
+ andl EXT(bh_mask),%eax /* any pending unmasked interrupts? */
+ jz 1f /* no, skip */
+ call EXT(spl1) /* block further interrupts */
+ incl EXT(intr_count) /* set interrupt flag */
+ call EXT(linux_soft_intr) /* go handle interrupt */
+ decl EXT(intr_count) /* decrement interrupt flag */
+ cli /* disable interrupts */
+1:
+#endif
+ cmpl $0,softclkpending /* softclock pending? */
+ je 1f /* no, skip */
+ movl $0,softclkpending /* clear flag */
+ call EXT(spl1) /* block further interrupts */
+#ifdef LINUX_DEV
+ incl EXT(intr_count) /* set interrupt flag */
+#endif
+ call EXT(softclock) /* go handle interrupt */
+#ifdef LINUX_DEV
+ decl EXT(intr_count) /* decrement interrupt flag */
+#endif
+ cli /* disable interrupts */
+1:
+ xorl %edx,%edx /* edx = ipl 0 */
+2:
+ CPU_NUMBER(%eax)
+ cmpl CX(EXT(curr_ipl),%eax),%edx /* same ipl as current? */
+ je 1f /* yes, all done */
+ movl %edx,CX(EXT(curr_ipl),%eax) /* set ipl */
+#ifdef MACH_XEN
+ movl EXT(int_mask)(,%edx,4),%eax
+ /* get int mask */
+ XEN_SETMASK() /* program xen evts with new mask */
+#endif
+1:
+ ret
+
+/*
+ * NOTE: This routine must *not* use %ecx, otherwise
+ * the interrupt code will break.
+ */
+ .align TEXT_ALIGN
+ .globl spl
+spl:
+ CPU_NUMBER(%eax)
+#if (MACH_KDB || MACH_TTD) && !defined(MACH_XEN)
+ /* First make sure that if we're exitting from ipl7, IF is still cleared */
+ cmpl $SPL7,CX(EXT(curr_ipl),%eax) /* from ipl7? */
+ jne 0f
+ pushfl
+ popl %eax
+ testl $0x200,%eax /* IF? */
+ jz 0f
+ int3 /* Oops, interrupts got enabled?! */
+
+0:
+#endif /* (MACH_KDB || MACH_TTD) && !MACH_XEN */
+ cmpl $SPL7,%edx /* spl7? */
+ je EXT(spl7) /* yes, handle specially */
+#ifdef MACH_XEN
+ movl EXT(int_mask)(,%edx,4),%eax
+ /* get int mask */
+#endif
+ cli /* disable interrupts */
+ CPU_NUMBER(%eax)
+ xchgl CX(EXT(curr_ipl),%eax),%edx /* set ipl */
+#ifdef MACH_XEN
+ XEN_SETMASK() /* program PICs with new mask */
+#endif
+ sti /* enable interrupts */
+ movl %edx,%eax /* return previous ipl */
+ ret
+
+ENTRY(sploff)
+ pushfl
+ popl %eax
+ cli
+ ret
+
+ENTRY(splon)
+ pushl 4(%esp)
+ popfl
+ ret
+
+ .data
+ .align DATA_ALIGN
+softclkpending:
+ .long 0
+ .text
+
+ENTRY(setsoftclock)
+ incl softclkpending
+ ret
diff --git a/i386/i386/spl.h b/i386/i386/spl.h
new file mode 100644
index 0000000..41ad225
--- /dev/null
+++ b/i386/i386/spl.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 1995, 1994, 1993, 1992, 1991, 1990
+ * Open Software Foundation, Inc.
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby granted,
+ * provided that the above copyright notice appears in all copies and
+ * that both the copyright notice and this permission notice appear in
+ * supporting documentation, and that the name of ("OSF") or Open Software
+ * Foundation not be used in advertising or publicity pertaining to
+ * distribution of the software without specific, written prior permission.
+ *
+ * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL OSF BE LIABLE FOR ANY
+ * SPECIAL, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
+ * ACTION OF CONTRACT, NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING
+ * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE
+ */
+/*
+ * OSF Research Institute MK6.1 (unencumbered) 1/31/1995
+ */
+
+#ifndef _MACHINE_SPL_H_
+#define _MACHINE_SPL_H_
+
+/*
+ * This file defines the interrupt priority levels used by
+ * machine-dependent code.
+ */
+
+typedef int spl_t;
+
+extern spl_t (splhi)(void);
+
+extern spl_t (spl0)(void);
+
+extern spl_t (spl1)(void);
+extern spl_t (splsoftclock)(void);
+
+extern spl_t (spl2)(void);
+
+extern spl_t (spl3)(void);
+
+extern spl_t (spl4)(void);
+extern spl_t (splnet)(void);
+extern spl_t (splhdw)(void);
+
+extern spl_t (spl5)(void);
+extern spl_t (splbio)(void);
+extern spl_t (spldcm)(void);
+
+extern spl_t (spl6)(void);
+extern spl_t (spltty)(void);
+extern spl_t (splimp)(void);
+extern spl_t (splvm)(void);
+
+extern spl_t (spl7)(void);
+extern spl_t (splclock)(void);
+extern spl_t (splsched)(void);
+#define assert_splsched() assert(splsched() == SPL7)
+extern spl_t (splhigh)(void);
+
+extern spl_t (splx)(spl_t n);
+extern spl_t (splx_cli)(spl_t n);
+
+extern void splon (unsigned long n);
+
+extern unsigned long sploff (void);
+
+extern void setsoftclock (void);
+extern int spl_init;
+
+/* XXX Include each other... */
+#include <i386/ipl.h>
+
+#endif /* _MACHINE_SPL_H_ */
diff --git a/i386/i386/strings.c b/i386/i386/strings.c
new file mode 100644
index 0000000..f1752de
--- /dev/null
+++ b/i386/i386/strings.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2014 Richard Braun.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <stddef.h>
+#include <string.h>
+
+#define ARCH_STRING_MEMCPY
+#define ARCH_STRING_MEMMOVE
+#define ARCH_STRING_MEMSET
+#define ARCH_STRING_MEMCMP
+
+#ifdef ARCH_STRING_MEMCPY
+void *
+memcpy(void *dest, const void *src, size_t n)
+{
+ void *orig_dest;
+
+ orig_dest = dest;
+ asm volatile("rep movsb"
+ : "+D" (dest), "+S" (src), "+c" (n)
+ : : "memory");
+ return orig_dest;
+}
+#endif /* ARCH_STRING_MEMCPY */
+
+#ifdef ARCH_STRING_MEMMOVE
+void *
+memmove(void *dest, const void *src, size_t n)
+{
+ void *orig_dest;
+
+ orig_dest = dest;
+
+ if (dest <= src)
+ asm volatile("rep movsb"
+ : "+D" (dest), "+S" (src), "+c" (n)
+ : : "memory");
+ else {
+ dest += n - 1;
+ src += n - 1;
+ asm volatile("std; rep movsb; cld"
+ : "+D" (dest), "+S" (src), "+c" (n)
+ : : "memory");
+ }
+
+ return orig_dest;
+}
+#endif /* ARCH_STRING_MEMMOVE */
+
+#ifdef ARCH_STRING_MEMSET
+void *
+memset(void *s, int c, size_t n)
+{
+ void *orig_s;
+
+ orig_s = s;
+ asm volatile("rep stosb"
+ : "+D" (s), "+c" (n)
+ : "a" (c)
+ : "memory");
+ return orig_s;
+}
+#endif /* ARCH_STRING_MEMSET */
+
+#ifdef ARCH_STRING_MEMCMP
+int
+memcmp(const void *s1, const void *s2, size_t n)
+{
+ unsigned char c1, c2;
+
+ if (n == 0)
+ return 0;
+
+ asm volatile("repe cmpsb"
+ : "+D" (s1), "+S" (s2), "+c" (n)
+ : : "memory");
+ c1 = *(((const unsigned char *)s1) - 1);
+ c2 = *(((const unsigned char *)s2) - 1);
+ return (int)c1 - (int)c2;
+}
+#endif /* ARCH_STRING_MEMCMP */
diff --git a/i386/i386/task.h b/i386/i386/task.h
new file mode 100644
index 0000000..0060ad4
--- /dev/null
+++ b/i386/i386/task.h
@@ -0,0 +1,61 @@
+/* Data types for machine specific parts of tasks on i386.
+
+ Copyright (C) 2002, 2007 Free Software Foundation, Inc.
+
+ Written by Marcus Brinkmann. Glued into GNU Mach by Thomas Schwinge.
+
+ This file is part of GNU Mach.
+
+ GNU Mach is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 2, or (at your option) any later
+ version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#ifndef _I386_TASK_H_
+#define _I386_TASK_H_
+
+#include <kern/kern_types.h>
+#include <kern/slab.h>
+
+/* The machine specific data of a task. */
+struct machine_task
+{
+ /* A lock protecting iopb_size and iopb. */
+ decl_simple_lock_data (, iopb_lock);
+
+ /* The highest I/O port number enabled. */
+ int iopb_size;
+
+ /* The I/O permission bitmap. */
+ unsigned char *iopb;
+};
+typedef struct machine_task machine_task_t;
+
+
+extern struct kmem_cache machine_task_iopb_cache;
+
+/* Initialize the machine task module. The function is called once at
+ start up by task_init in kern/task.c. */
+void machine_task_module_init (void);
+
+/* Initialize the machine specific part of task TASK. */
+void machine_task_init (task_t);
+
+/* Destroy the machine specific part of task TASK and release all
+ associated resources. */
+void machine_task_terminate (task_t);
+
+/* Try to release as much memory from the machine specific data in
+ task TASK. */
+void machine_task_collect (task_t);
+
+#endif /* _I386_TASK_H_ */
diff --git a/i386/i386/thread.h b/i386/i386/thread.h
new file mode 100644
index 0000000..9c88d09
--- /dev/null
+++ b/i386/i386/thread.h
@@ -0,0 +1,276 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: machine/thread.h
+ *
+ * This file contains the structure definitions for the thread
+ * state as applied to I386 processors.
+ */
+
+#ifndef _I386_THREAD_H_
+#define _I386_THREAD_H_
+
+#include <mach/boolean.h>
+#include <mach/machine/vm_types.h>
+#include <mach/machine/fp_reg.h>
+#include <mach/machine/thread_status.h>
+
+#include <kern/lock.h>
+
+#include "gdt.h"
+
+/*
+ * i386_saved_state:
+ *
+ * This structure corresponds to the state of user registers
+ * as saved upon kernel entry. It lives in the pcb.
+ * It is also pushed onto the stack for exceptions in the kernel.
+ */
+
+struct i386_saved_state {
+#if !defined(__x86_64__) || defined(USER32)
+ unsigned long gs;
+ unsigned long fs;
+ unsigned long es;
+ unsigned long ds;
+#endif
+#ifdef __x86_64__
+ unsigned long r15;
+ unsigned long r14;
+ unsigned long r13;
+ unsigned long r12;
+ unsigned long r11;
+ unsigned long r10;
+ unsigned long r9;
+ unsigned long r8;
+#endif
+ unsigned long edi;
+ unsigned long esi;
+ unsigned long ebp;
+ unsigned long cr2; /* kernel esp stored by pusha -
+ we save cr2 here later */
+ unsigned long ebx;
+ unsigned long edx;
+ unsigned long ecx;
+ unsigned long eax;
+ unsigned long trapno;
+ unsigned long err;
+ unsigned long eip;
+ unsigned long cs;
+ unsigned long efl;
+ unsigned long uesp;
+ unsigned long ss;
+#if !defined(__x86_64__) || defined(USER32)
+ struct v86_segs {
+ unsigned long v86_es; /* virtual 8086 segment registers */
+ unsigned long v86_ds;
+ unsigned long v86_fs;
+ unsigned long v86_gs;
+ } v86_segs;
+#endif
+};
+
+/*
+ * i386_exception_link:
+ *
+ * This structure lives at the high end of the kernel stack.
+ * It points to the current thread`s user registers.
+ */
+struct i386_exception_link {
+ struct i386_saved_state *saved_state;
+};
+
+/*
+ * i386_kernel_state:
+ *
+ * This structure corresponds to the state of kernel registers
+ * as saved in a context-switch. It lives at the base of the stack.
+ */
+
+struct i386_kernel_state {
+ long k_ebx; /* kernel context */
+ long k_esp;
+ long k_ebp;
+#ifdef __i386__
+ long k_edi;
+ long k_esi;
+#endif
+ long k_eip;
+#ifdef __x86_64__
+ long k_r12;
+ long k_r13;
+ long k_r14;
+ long k_r15;
+#endif
+};
+
+/*
+ * Save area for user floating-point state.
+ * Allocated only when necessary.
+ */
+
+struct i386_fpsave_state {
+ boolean_t fp_valid;
+
+ union {
+ struct {
+ struct i386_fp_save fp_save_state;
+ struct i386_fp_regs fp_regs;
+ };
+ struct i386_xfp_save xfp_save_state;
+ };
+};
+
+#if !defined(__x86_64__) || defined(USER32)
+/*
+ * v86_assist_state:
+ *
+ * This structure provides data to simulate 8086 mode
+ * interrupts. It lives in the pcb.
+ */
+
+struct v86_assist_state {
+ vm_offset_t int_table;
+ unsigned short int_count;
+ unsigned short flags; /* 8086 flag bits */
+};
+#define V86_IF_PENDING 0x8000 /* unused bit */
+#endif
+
+#if defined(__x86_64__) && !defined(USER32)
+struct i386_segment_base_state {
+ unsigned long fsbase;
+ unsigned long gsbase;
+};
+#endif
+
+/*
+ * i386_interrupt_state:
+ *
+ * This structure describes the set of registers that must
+ * be pushed on the current ring-0 stack by an interrupt before
+ * we can switch to the interrupt stack.
+ */
+
+struct i386_interrupt_state {
+#if !defined(__x86_64__) || defined(USER32)
+ long gs;
+ long fs;
+ long es;
+ long ds;
+#endif
+#ifdef __x86_64__
+ long r11;
+ long r10;
+ long r9;
+ long r8;
+ long rdi;
+ long rsi;
+#endif
+ long edx;
+ long ecx;
+ long eax;
+ long eip;
+ long cs;
+ long efl;
+};
+
+/*
+ * i386_machine_state:
+ *
+ * This structure corresponds to special machine state.
+ * It lives in the pcb. It is not saved by default.
+ */
+
+struct i386_machine_state {
+ struct user_ldt * ldt;
+ struct i386_fpsave_state *ifps;
+#if !defined(__x86_64__) || defined(USER32)
+ struct v86_assist_state v86s;
+#endif
+ struct real_descriptor user_gdt[USER_GDT_SLOTS];
+ struct i386_debug_state ids;
+#if defined(__x86_64__) && !defined(USER32)
+ struct i386_segment_base_state sbs;
+#endif
+};
+
+typedef struct pcb {
+ /* START of the exception stack.
+ * NOTE: this area is used as exception stack when switching
+ * CPL, and it MUST be big enough to save the thread state and
+ * switch to a proper stack area, even considering recursive
+ * exceptions, otherwise it could corrupt nearby memory */
+ struct i386_interrupt_state iis[2]; /* interrupt and NMI */
+#ifdef __x86_64__
+ unsigned long pad; /* ensure exception stack is aligned to 16 */
+#endif
+ struct i386_saved_state iss;
+ /* END of exception stack*/
+ struct i386_machine_state ims;
+ decl_simple_lock_data(, lock)
+ unsigned short init_control; /* Initial FPU control to set */
+#ifdef LINUX_DEV
+ void *data;
+#endif /* LINUX_DEV */
+} *pcb_t;
+
+/*
+ * On the kernel stack is:
+ * stack: ...
+ * struct i386_exception_link
+ * struct i386_kernel_state
+ * stack+KERNEL_STACK_SIZE
+ */
+
+#define STACK_IKS(stack) \
+ ((struct i386_kernel_state *)((stack) + KERNEL_STACK_SIZE) - 1)
+#define STACK_IEL(stack) \
+ ((struct i386_exception_link *)STACK_IKS(stack) - 1)
+
+#ifdef __x86_64__
+#define KERNEL_STACK_ALIGN 16
+#else
+#define KERNEL_STACK_ALIGN 4
+#endif
+
+#if defined(__x86_64__) && !defined(USER32)
+/* Follow System V AMD64 ABI guidelines. */
+#define USER_STACK_ALIGN 16
+#else
+#define USER_STACK_ALIGN 4
+#endif
+
+#define USER_REGS(thread) (&(thread)->pcb->iss)
+
+
+#define syscall_emulation_sync(task) /* do nothing */
+
+
+/* #include_next "thread.h" */
+
+
+#endif /* _I386_THREAD_H_ */
diff --git a/i386/i386/time_stamp.h b/i386/i386/time_stamp.h
new file mode 100644
index 0000000..43bb956
--- /dev/null
+++ b/i386/i386/time_stamp.h
@@ -0,0 +1,30 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * The i386 timestamp implementation uses the default, so we don't
+ * need to do anything here.
+ */
+
diff --git a/i386/i386/trap.c b/i386/i386/trap.c
new file mode 100644
index 0000000..db4c702
--- /dev/null
+++ b/i386/i386/trap.c
@@ -0,0 +1,675 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Hardware trap/fault handler.
+ */
+
+#include <sys/types.h>
+#include <string.h>
+
+#include <mach/machine/eflags.h>
+#include <i386/trap.h>
+#include <i386/fpu.h>
+#include <i386/locore.h>
+#include <i386/model_dep.h>
+#include <intel/read_fault.h>
+#include <machine/machspl.h> /* for spl_t */
+#include <machine/db_interface.h>
+
+#include <mach/exception.h>
+#include <mach/kern_return.h>
+#include "vm_param.h"
+#include <mach/machine/thread_status.h>
+
+#include <vm/vm_fault.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_map.h>
+
+#include <kern/ast.h>
+#include <kern/debug.h>
+#include <kern/printf.h>
+#include <kern/thread.h>
+#include <kern/task.h>
+#include <kern/sched.h>
+#include <kern/sched_prim.h>
+#include <kern/exception.h>
+
+#if MACH_KDB
+#include <ddb/db_break.h>
+#include <ddb/db_run.h>
+#include <ddb/db_watch.h>
+#endif
+
+#include "debug.h"
+
+#if MACH_KDB
+boolean_t debug_all_traps_with_kdb = FALSE;
+extern struct db_watchpoint *db_watchpoint_list;
+extern boolean_t db_watchpoints_inserted;
+
+void
+thread_kdb_return(void)
+{
+ thread_t thread = current_thread();
+ struct i386_saved_state *regs = USER_REGS(thread);
+
+ if (kdb_trap(regs->trapno, regs->err, regs)) {
+ thread_exception_return();
+ /*NOTREACHED*/
+ }
+}
+#endif /* MACH_KDB */
+
+#if MACH_TTD
+extern boolean_t kttd_enabled;
+boolean_t debug_all_traps_with_kttd = TRUE;
+#endif /* MACH_TTD */
+
+static void
+user_page_fault_continue(kern_return_t kr)
+{
+ thread_t thread = current_thread();
+ struct i386_saved_state *regs = USER_REGS(thread);
+
+ if (kr == KERN_SUCCESS) {
+#if MACH_KDB
+ if (db_watchpoint_list &&
+ db_watchpoints_inserted &&
+ (regs->err & T_PF_WRITE) &&
+ db_find_watchpoint(thread->task->map,
+ (vm_offset_t)regs->cr2,
+ regs))
+ kdb_trap(T_WATCHPOINT, 0, regs);
+#endif /* MACH_KDB */
+ thread_exception_return();
+ /*NOTREACHED*/
+ }
+
+#if MACH_KDB
+ if (debug_all_traps_with_kdb &&
+ kdb_trap(regs->trapno, regs->err, regs)) {
+ thread_exception_return();
+ /*NOTREACHED*/
+ }
+#endif /* MACH_KDB */
+
+ i386_exception(EXC_BAD_ACCESS, kr, regs->cr2);
+ /*NOTREACHED*/
+}
+
+
+static char *trap_type[] = {
+ "Divide error",
+ "Debug trap",
+ "NMI",
+ "Breakpoint",
+ "Overflow",
+ "Bounds check",
+ "Invalid opcode",
+ "No coprocessor",
+ "Double fault",
+ "Coprocessor overrun",
+ "Invalid TSS",
+ "Segment not present",
+ "Stack bounds",
+ "General protection",
+ "Page fault",
+ "(reserved)",
+ "Coprocessor error"
+};
+#define TRAP_TYPES (sizeof(trap_type)/sizeof(trap_type[0]))
+
+char *trap_name(unsigned int trapnum)
+{
+ return trapnum < TRAP_TYPES ? trap_type[trapnum] : "(unknown)";
+}
+
+/*
+ * Trap from kernel mode. Only page-fault errors are recoverable,
+ * and then only in special circumstances. All other errors are
+ * fatal.
+ */
+void kernel_trap(struct i386_saved_state *regs)
+{
+ unsigned long code;
+ unsigned long subcode;
+ unsigned long type;
+ vm_map_t map;
+ kern_return_t result;
+ thread_t thread;
+ extern char _start[], etext[];
+
+ type = regs->trapno;
+ code = regs->err;
+ thread = current_thread();
+
+#if 0
+((short*)0xb8700)[0] = 0x0f00+'K';
+((short*)0xb8700)[1] = 0x0f30+(type / 10);
+((short*)0xb8700)[2] = 0x0f30+(type % 10);
+#endif
+#if 0
+printf("kernel trap %d error %d\n", (int) type, (int) code);
+dump_ss(regs);
+#endif
+
+ switch (type) {
+ case T_NO_FPU:
+ fpnoextflt();
+ return;
+
+ case T_FPU_FAULT:
+ fpextovrflt();
+ return;
+
+ case T_FLOATING_POINT_ERROR:
+ fpexterrflt();
+ return;
+
+ case T_PAGE_FAULT:
+
+ /* Get faulting linear address */
+ subcode = regs->cr2;
+#if 0
+ printf("kernel page fault at linear address %08x\n", subcode);
+#endif
+
+ /* If it's in the kernel linear address region,
+ convert it to a kernel virtual address
+ and use the kernel map to process the fault. */
+ if (lintokv(subcode) == 0 ||
+ subcode >= LINEAR_MIN_KERNEL_ADDRESS) {
+#if 0
+ printf("%08x in kernel linear address range\n", subcode);
+#endif
+ map = kernel_map;
+ subcode = lintokv(subcode);
+#if 0
+ printf("now %08x\n", subcode);
+#endif
+ if (trunc_page(subcode) == 0
+ || (subcode >= (long)_start
+ && subcode < (long)etext)) {
+ printf("Kernel page fault at address 0x%lx, "
+ "eip = 0x%lx\n",
+ subcode, regs->eip);
+ goto badtrap;
+ }
+ } else {
+ if (thread)
+ map = thread->task->map;
+ if (!thread || map == kernel_map) {
+ printf("kernel page fault at %08lx:\n", subcode);
+ dump_ss(regs);
+ panic("kernel thread accessed user space!\n");
+ }
+ }
+
+ /*
+ * Since the 386 ignores write protection in
+ * kernel mode, always try for write permission
+ * first. If that fails and the fault was a
+ * read fault, retry with read permission.
+ */
+ result = vm_fault(map,
+ trunc_page((vm_offset_t)subcode),
+#if !(__i486__ || __i586__ || __i686__)
+ VM_PROT_READ|VM_PROT_WRITE,
+#else
+ (code & T_PF_WRITE)
+ ? VM_PROT_READ|VM_PROT_WRITE
+ : VM_PROT_READ,
+#endif
+ FALSE,
+ FALSE,
+ (void (*)()) 0);
+#if MACH_KDB
+ if (result == KERN_SUCCESS) {
+ /* Look for watchpoints */
+ if (db_watchpoint_list &&
+ db_watchpoints_inserted &&
+ (code & T_PF_WRITE) &&
+ db_find_watchpoint(map,
+ (vm_offset_t)subcode, regs))
+ kdb_trap(T_WATCHPOINT, 0, regs);
+ }
+ else
+#endif /* MACH_KDB */
+#if !(__i486__ || __i586__ || __i686__)
+ if ((code & T_PF_WRITE) == 0 &&
+ result == KERN_PROTECTION_FAILURE)
+ {
+ /*
+ * Must expand vm_fault by hand,
+ * so that we can ask for read-only access
+ * but enter a (kernel)writable mapping.
+ */
+ result = intel_read_fault(map,
+ trunc_page((vm_offset_t)subcode));
+ }
+#else
+ ;
+#endif
+
+ if (result == KERN_SUCCESS) {
+ /*
+ * Certain faults require that we back up
+ * the EIP.
+ */
+ struct recovery *rp;
+
+ /* Linear searching; but the list is small enough. */
+ for (rp = retry_table; rp < retry_table_end; rp++) {
+ if (regs->eip == rp->fault_addr) {
+ regs->eip = rp->recover_addr;
+ break;
+ }
+ }
+ return;
+ }
+
+ /*
+ * If there is a failure recovery address
+ * for this fault, go there.
+ */
+ {
+ struct recovery *rp;
+
+ /* Linear searching; but the list is small enough. */
+ for (rp = recover_table;
+ rp < recover_table_end;
+ rp++) {
+ if (regs->eip == rp->fault_addr) {
+ regs->eip = rp->recover_addr;
+ return;
+ }
+ }
+ }
+
+ /*
+ * Check thread recovery address also -
+ * v86 assist uses it.
+ */
+ if (thread->recover) {
+ regs->eip = thread->recover;
+ thread->recover = 0;
+ return;
+ }
+
+ /*
+ * Unanticipated page-fault errors in kernel
+ * should not happen.
+ */
+ /* fall through */
+
+ default:
+ badtrap:
+ printf("Kernel ");
+ if (type < TRAP_TYPES)
+ printf("%s trap", trap_type[type]);
+ else
+ printf("trap %ld", type);
+ printf(", eip 0x%lx, code %lx, cr2 %lx\n", regs->eip, code, regs->cr2);
+#if MACH_TTD
+ if (kttd_enabled && kttd_trap(type, code, regs))
+ return;
+#endif /* MACH_TTD */
+#if MACH_KDB
+ if (kdb_trap(type, code, regs))
+ return;
+#endif /* MACH_KDB */
+ splhigh();
+ printf("kernel trap, type %ld, code = %lx\n",
+ type, code);
+ dump_ss(regs);
+ panic("trap");
+ return;
+ }
+}
+
+
+/*
+ * Trap from user mode.
+ * Return TRUE if from emulated system call.
+ */
+int user_trap(struct i386_saved_state *regs)
+{
+ int exc = 0; /* Suppress gcc warning */
+ unsigned long code;
+ unsigned long subcode;
+ unsigned long type;
+ thread_t thread = current_thread();
+
+#ifdef __x86_64__
+ assert(regs == &thread->pcb->iss);
+#endif
+
+ type = regs->trapno;
+ code = 0;
+ subcode = 0;
+
+#if 0
+ ((short*)0xb8700)[3] = 0x0f00+'U';
+ ((short*)0xb8700)[4] = 0x0f30+(type / 10);
+ ((short*)0xb8700)[5] = 0x0f30+(type % 10);
+#endif
+#if 0
+ printf("user trap %d error %d\n", type, code);
+ dump_ss(regs);
+#endif
+
+ switch (type) {
+
+ case T_DIVIDE_ERROR:
+ exc = EXC_ARITHMETIC;
+ code = EXC_I386_DIV;
+ break;
+
+ case T_DEBUG:
+#if MACH_TTD
+ if (kttd_enabled && kttd_in_single_step()) {
+ if (kttd_trap(type, regs->err, regs))
+ return 0;
+ }
+#endif /* MACH_TTD */
+#if MACH_KDB
+ if (db_in_single_step()) {
+ if (kdb_trap(type, regs->err, regs))
+ return 0;
+ }
+#endif /* MACH_KDB */
+ /* Make the content of the debug status register (DR6)
+ available to user space. */
+ if (thread->pcb)
+ thread->pcb->ims.ids.dr[6] = get_dr6() & 0x600F;
+ set_dr6(0);
+ exc = EXC_BREAKPOINT;
+ code = EXC_I386_SGL;
+ break;
+
+ case T_INT3:
+#if MACH_TTD
+ if (kttd_enabled && kttd_trap(type, regs->err, regs))
+ return 0;
+ break;
+#endif /* MACH_TTD */
+#if MACH_KDB
+ {
+ if (db_find_breakpoint_here(
+ (current_thread())? current_thread()->task: TASK_NULL,
+ regs->eip - 1)) {
+ if (kdb_trap(type, regs->err, regs))
+ return 0;
+ }
+ }
+#endif /* MACH_KDB */
+ exc = EXC_BREAKPOINT;
+ code = EXC_I386_BPT;
+ break;
+
+ case T_OVERFLOW:
+ exc = EXC_ARITHMETIC;
+ code = EXC_I386_INTO;
+ break;
+
+ case T_OUT_OF_BOUNDS:
+ exc = EXC_SOFTWARE;
+ code = EXC_I386_BOUND;
+ break;
+
+ case T_INVALID_OPCODE:
+ exc = EXC_BAD_INSTRUCTION;
+ code = EXC_I386_INVOP;
+ break;
+
+ case T_NO_FPU:
+ case 32: /* XXX */
+ fpnoextflt();
+ return 0;
+
+ case T_FPU_FAULT:
+ fpextovrflt();
+ return 0;
+
+ case 10: /* invalid TSS == iret with NT flag set */
+ exc = EXC_BAD_INSTRUCTION;
+ code = EXC_I386_INVTSSFLT;
+ subcode = regs->err & 0xffff;
+ break;
+
+ case T_SEGMENT_NOT_PRESENT:
+ exc = EXC_BAD_INSTRUCTION;
+ code = EXC_I386_SEGNPFLT;
+ subcode = regs->err & 0xffff;
+ break;
+
+ case T_STACK_FAULT:
+ exc = EXC_BAD_INSTRUCTION;
+ code = EXC_I386_STKFLT;
+ subcode = regs->err & 0xffff;
+ break;
+
+ case T_GENERAL_PROTECTION:
+ /* Check for an emulated int80 system call.
+ NetBSD-current and Linux use trap instead of call gate. */
+ if (thread->task->eml_dispatch) {
+ unsigned char opcode, intno;
+
+ opcode = inst_fetch(regs->eip, regs->cs);
+ intno = inst_fetch(regs->eip+1, regs->cs);
+ if (opcode == 0xcd && intno == 0x80) {
+ regs->eip += 2;
+ return 1;
+ }
+ }
+#ifdef __x86_64__
+ {
+ unsigned char opcode, addr[4], seg[2];
+ int i;
+
+ opcode = inst_fetch(regs->eip, regs->cs);
+ for (i = 0; i < 4; i++)
+ addr[i] = inst_fetch(regs->eip+i+1, regs->cs);
+ (void) addr;
+ for (i = 0; i < 2; i++)
+ seg[i] = inst_fetch(regs->eip+i+5, regs->cs);
+ if (opcode == 0x9a && seg[0] == 0x7 && seg[1] == 0) {
+ regs->eip += 7;
+ return 1;
+ }
+ }
+#endif
+ exc = EXC_BAD_INSTRUCTION;
+ code = EXC_I386_GPFLT;
+ subcode = regs->err & 0xffff;
+ break;
+
+ case T_PAGE_FAULT:
+ subcode = regs->cr2;
+#if 0
+ printf("user page fault at linear address %08x\n", subcode);
+ dump_ss (regs);
+
+#endif
+ if (subcode >= LINEAR_MIN_KERNEL_ADDRESS)
+ i386_exception(EXC_BAD_ACCESS, EXC_I386_PGFLT, subcode);
+ (void) vm_fault(thread->task->map,
+ trunc_page((vm_offset_t)subcode),
+ (regs->err & T_PF_WRITE)
+ ? VM_PROT_READ|VM_PROT_WRITE
+ : VM_PROT_READ,
+ FALSE,
+ FALSE,
+ user_page_fault_continue);
+ /*NOTREACHED*/
+ break;
+
+#ifdef MACH_PV_PAGETABLES
+ case 15:
+ {
+ static unsigned count = 0;
+ count++;
+ if (!(count % 10000))
+ printf("%d 4gb segments accesses\n", count);
+ if (count > 1000000) {
+ printf("A million 4gb segment accesses, stopping reporting them.");
+ if (hyp_vm_assist(VMASST_CMD_disable, VMASST_TYPE_4gb_segments_notify))
+ panic("couldn't disable 4gb segments vm assist notify");
+ }
+ return 0;
+ }
+#endif /* MACH_PV_PAGETABLES */
+
+ case T_FLOATING_POINT_ERROR:
+ fpexterrflt();
+ return 0;
+
+ default:
+#if MACH_TTD
+ if (kttd_enabled && kttd_trap(type, regs->err, regs))
+ return 0;
+#endif /* MACH_TTD */
+#if MACH_KDB
+ if (kdb_trap(type, regs->err, regs))
+ return 0;
+#endif /* MACH_KDB */
+ splhigh();
+ printf("user trap, type %ld, code = %lx\n",
+ type, regs->err);
+ dump_ss(regs);
+ panic("trap");
+ return 0;
+ }
+
+#if MACH_TTD
+ if ((debug_all_traps_with_kttd || thread->task->essential) &&
+ kttd_trap(type, regs->err, regs))
+ return 0;
+#endif /* MACH_TTD */
+#if MACH_KDB
+ if ((debug_all_traps_with_kdb || thread->task->essential) &&
+ kdb_trap(type, regs->err, regs))
+ return 0;
+#endif /* MACH_KDB */
+
+ i386_exception(exc, code, subcode);
+ /*NOTREACHED*/
+}
+
+#define V86_IRET_PENDING 0x4000
+
+/*
+ * Handle AST traps for i386.
+ * Check for delayed floating-point exception from
+ * AT-bus machines.
+ */
+void
+i386_astintr(void)
+{
+ (void) splsched(); /* block interrupts to check reasons */
+#ifndef MACH_RING1
+ int mycpu = cpu_number();
+
+ if (need_ast[mycpu] & AST_I386_FP) {
+ /*
+ * AST was for delayed floating-point exception -
+ * FP interrupt occurred while in kernel.
+ * Turn off this AST reason and handle the FPU error.
+ */
+ ast_off(mycpu, AST_I386_FP);
+ (void) spl0();
+
+ fpastintr();
+ }
+ else
+#endif /* MACH_RING1 */
+ {
+ /*
+ * Not an FPU trap. Handle the AST.
+ * Interrupts are still blocked.
+ */
+ ast_taken();
+ }
+}
+
+/*
+ * Handle exceptions for i386.
+ *
+ * If we are an AT bus machine, we must turn off the AST for a
+ * delayed floating-point exception.
+ *
+ * If we are providing floating-point emulation, we may have
+ * to retrieve the real register values from the floating point
+ * emulator.
+ */
+void
+i386_exception(
+ int exc,
+ int code,
+ long subcode)
+{
+ spl_t s;
+
+ /*
+ * Turn off delayed FPU error handling.
+ */
+ s = splsched();
+ ast_off(cpu_number(), AST_I386_FP);
+ splx(s);
+
+ exception(exc, code, subcode);
+ /*NOTREACHED*/
+}
+
+#if MACH_PCSAMPLE > 0
+/*
+ * return saved state for interrupted user thread
+ */
+unsigned
+interrupted_pc(const thread_t t)
+{
+ struct i386_saved_state *iss;
+
+ iss = USER_REGS(t);
+ return iss->eip;
+}
+#endif /* MACH_PCSAMPLE > 0 */
+
+#if MACH_KDB
+
+void
+db_debug_all_traps (boolean_t enable)
+{
+ debug_all_traps_with_kdb = enable;
+}
+
+#endif /* MACH_KDB */
+
+void handle_double_fault(struct i386_saved_state *regs)
+{
+ dump_ss(regs);
+ panic("DOUBLE FAULT! This is critical\n");
+}
diff --git a/i386/i386/trap.h b/i386/i386/trap.h
new file mode 100644
index 0000000..db22273
--- /dev/null
+++ b/i386/i386/trap.h
@@ -0,0 +1,71 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_TRAP_H_
+#define _I386_TRAP_H_
+
+#include <mach/machine/trap.h>
+
+#ifndef __ASSEMBLER__
+#include <i386/thread.h>
+#include <mach/mach_types.h>
+
+char *trap_name(unsigned int trapnum);
+
+unsigned int interrupted_pc(thread_t);
+
+void
+i386_exception(
+ int exc,
+ int code,
+ long subcode) __attribute__ ((noreturn));
+
+extern void
+thread_kdb_return(void);
+
+/*
+ * Trap from kernel mode. Only page-fault errors are recoverable,
+ * and then only in special circumstances. All other errors are
+ * fatal.
+ */
+void kernel_trap(struct i386_saved_state *regs);
+
+/*
+ * Trap from user mode.
+ * Return TRUE if from emulated system call.
+ */
+int user_trap(struct i386_saved_state *regs);
+
+/*
+ * Handle AST traps for i386.
+ * Check for delayed floating-point exception from
+ * AT-bus machines.
+ */
+void i386_astintr(void);
+
+#endif /* !__ASSEMBLER__ */
+
+#endif /* _I386_TRAP_H_ */
diff --git a/i386/i386/tss.h b/i386/i386/tss.h
new file mode 100644
index 0000000..fd7e714
--- /dev/null
+++ b/i386/i386/tss.h
@@ -0,0 +1,109 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_TSS_H_
+#define _I386_TSS_H_
+
+#include <sys/types.h>
+#include <mach/inline.h>
+
+#include <machine/io_perm.h>
+
+/*
+ * x86 Task State Segment
+ */
+#ifdef __x86_64__
+struct i386_tss {
+ uint32_t _reserved0;
+ uint64_t rsp0;
+ uint64_t rsp1;
+ uint64_t rsp2;
+ uint64_t _reserved1;
+ uint64_t ist1;
+ uint64_t ist2;
+ uint64_t ist3;
+ uint64_t ist4;
+ uint64_t ist5;
+ uint64_t ist6;
+ uint64_t ist7;
+ uint64_t _reserved2;
+ uint16_t _reserved3;
+ uint16_t io_bit_map_offset;
+} __attribute__((__packed__));
+#else /* ! __x86_64__ */
+struct i386_tss {
+ int back_link; /* segment number of previous task,
+ if nested */
+ int esp0; /* initial stack pointer ... */
+ int ss0; /* and segment for ring 0 */
+ int esp1; /* initial stack pointer ... */
+ int ss1; /* and segment for ring 1 */
+ int esp2; /* initial stack pointer ... */
+ int ss2; /* and segment for ring 2 */
+ int cr3; /* CR3 - page table directory
+ physical address */
+ int eip;
+ int eflags;
+ int eax;
+ int ecx;
+ int edx;
+ int ebx;
+ int esp; /* current stack pointer */
+ int ebp;
+ int esi;
+ int edi;
+ int es;
+ int cs;
+ int ss; /* current stack segment */
+ int ds;
+ int fs;
+ int gs;
+ int ldt; /* local descriptor table segment */
+ unsigned short trace_trap; /* trap on switch to this task */
+ unsigned short io_bit_map_offset;
+ /* offset to start of IO permission
+ bit map */
+};
+#endif /* __x86_64__ */
+
+/* The structure extends the above TSS structure by an I/O permission bitmap
+ and the barrier. */
+struct task_tss
+ {
+ struct i386_tss tss;
+ unsigned char iopb[IOPB_BYTES];
+ unsigned char barrier;
+};
+
+
+/* Load the current task register. */
+static inline void
+ltr(unsigned short segment)
+{
+ __asm volatile("ltr %0" : : "r" (segment) : "memory");
+}
+
+#endif /* _I386_TSS_H_ */
diff --git a/i386/i386/user_ldt.c b/i386/i386/user_ldt.c
new file mode 100644
index 0000000..4c89bd4
--- /dev/null
+++ b/i386/i386/user_ldt.c
@@ -0,0 +1,451 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1994,1993,1992,1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * User LDT management.
+ * Each thread in a task may have its own LDT.
+ */
+
+#include <string.h>
+
+#include <kern/kalloc.h>
+#include <kern/thread.h>
+
+#include <vm/vm_kern.h>
+
+#include <i386/pcb.h>
+#include <i386/seg.h>
+#include <i386/thread.h>
+#include <i386/user_ldt.h>
+#include <i386/i386/mach_i386.server.h>
+#include <stddef.h>
+#include "ldt.h"
+#include "vm_param.h"
+
+/*
+ * Add the descriptors to the LDT, starting with
+ * the descriptor for 'first_selector'.
+ */
+kern_return_t
+i386_set_ldt(
+ thread_t thread,
+ int first_selector,
+ const struct descriptor *descriptor_list,
+ unsigned int count,
+ boolean_t desc_list_inline)
+{
+ struct real_descriptor* desc_list = (struct real_descriptor *)descriptor_list;
+ user_ldt_t new_ldt, old_ldt, temp;
+ struct real_descriptor *dp;
+ unsigned i;
+ unsigned min_selector = 0;
+ pcb_t pcb;
+ vm_size_t ldt_size_needed;
+ unsigned first_desc = sel_idx(first_selector);
+ vm_map_copy_t old_copy_object = NULL; /* Suppress gcc warning */
+
+ if (thread == THREAD_NULL)
+ return KERN_INVALID_ARGUMENT;
+ if (thread == current_thread())
+ min_selector = LDTSZ;
+ if (first_desc < min_selector || first_desc > 8191)
+ return KERN_INVALID_ARGUMENT;
+ if (first_desc + count >= 8192)
+ return KERN_INVALID_ARGUMENT;
+
+ /*
+ * If desc_list is not inline, it is in copyin form.
+ * We must copy it out to the kernel map, and wire
+ * it down (we touch it while the PCB is locked).
+ *
+ * We make a copy of the copyin object, and clear
+ * out the old one, so that returning KERN_INVALID_ARGUMENT
+ * will not try to deallocate the data twice.
+ */
+ if (!desc_list_inline) {
+ kern_return_t kr;
+ vm_offset_t dst_addr;
+
+ old_copy_object = (vm_map_copy_t) desc_list;
+
+ kr = vm_map_copyout(ipc_kernel_map, &dst_addr,
+ vm_map_copy_copy(old_copy_object));
+ if (kr != KERN_SUCCESS)
+ return kr;
+
+ (void) vm_map_pageable(ipc_kernel_map,
+ dst_addr,
+ dst_addr + count * sizeof(struct real_descriptor),
+ VM_PROT_READ|VM_PROT_WRITE, TRUE, TRUE);
+ desc_list = (struct real_descriptor *)dst_addr;
+ }
+
+ for (i = 0, dp = desc_list;
+ i < count;
+ i++, dp++)
+ {
+ switch (dp->access & ~ACC_A) {
+ case 0:
+ case ACC_P:
+ /* valid empty descriptor */
+ break;
+ case ACC_P | ACC_CALL_GATE:
+ /* Mach kernel call */
+ *dp = *(struct real_descriptor *)
+ &ldt[sel_idx(USER_SCALL)];
+ break;
+ case ACC_P | ACC_PL_U | ACC_DATA:
+ case ACC_P | ACC_PL_U | ACC_DATA_W:
+ case ACC_P | ACC_PL_U | ACC_DATA_E:
+ case ACC_P | ACC_PL_U | ACC_DATA_EW:
+ case ACC_P | ACC_PL_U | ACC_CODE:
+ case ACC_P | ACC_PL_U | ACC_CODE_R:
+ case ACC_P | ACC_PL_U | ACC_CODE_C:
+ case ACC_P | ACC_PL_U | ACC_CODE_CR:
+ case ACC_P | ACC_PL_U | ACC_CALL_GATE_16:
+ case ACC_P | ACC_PL_U | ACC_CALL_GATE:
+ break;
+ default:
+ return KERN_INVALID_ARGUMENT;
+ }
+ }
+ ldt_size_needed = sizeof(struct real_descriptor)
+ * (first_desc + count);
+
+ pcb = thread->pcb;
+ new_ldt = 0;
+ Retry:
+ simple_lock(&pcb->lock);
+ old_ldt = pcb->ims.ldt;
+ if (old_ldt == 0 ||
+ old_ldt->desc.limit_low + 1 < ldt_size_needed)
+ {
+ /*
+ * No old LDT, or not big enough
+ */
+ if (new_ldt == 0) {
+ simple_unlock(&pcb->lock);
+
+#ifdef MACH_PV_DESCRIPTORS
+ /* LDT needs to be aligned on a page */
+ vm_offset_t alloc = kalloc(ldt_size_needed + PAGE_SIZE + offsetof(struct user_ldt, ldt));
+ new_ldt = (user_ldt_t) (round_page((alloc + offsetof(struct user_ldt, ldt))) - offsetof(struct user_ldt, ldt));
+ new_ldt->alloc = alloc;
+
+#else /* MACH_PV_DESCRIPTORS */
+ new_ldt = (user_ldt_t)
+ kalloc(ldt_size_needed
+ + sizeof(struct real_descriptor));
+#endif /* MACH_PV_DESCRIPTORS */
+ /*
+ * Build a descriptor that describes the
+ * LDT itself
+ */
+ {
+ vm_offset_t ldt_base;
+
+ ldt_base = kvtolin(&new_ldt->ldt[0]);
+
+ new_ldt->desc.limit_low = ldt_size_needed - 1;
+ new_ldt->desc.limit_high = 0;
+ new_ldt->desc.base_low = ldt_base & 0xffff;
+ new_ldt->desc.base_med = (ldt_base >> 16) & 0xff;
+ new_ldt->desc.base_high = ldt_base >> 24;
+ new_ldt->desc.access = ACC_P | ACC_LDT;
+ new_ldt->desc.granularity = 0;
+ }
+
+ goto Retry;
+ }
+
+ /*
+ * Have new LDT. If there was a an old ldt, copy descriptors
+ * from old to new. Otherwise copy the default ldt.
+ */
+ if (old_ldt) {
+ memcpy(&new_ldt->ldt[0],
+ &old_ldt->ldt[0],
+ old_ldt->desc.limit_low + 1);
+ }
+ else {
+ struct real_descriptor template = {0, 0, 0, ACC_P, 0, 0 ,0};
+
+ for (dp = &new_ldt->ldt[0], i = 0; i < first_desc; i++, dp++) {
+ if (i < LDTSZ)
+ *dp = *(struct real_descriptor *) &ldt[i];
+ else
+ *dp = template;
+ }
+ }
+
+ temp = old_ldt;
+ old_ldt = new_ldt; /* use new LDT from now on */
+ new_ldt = temp; /* discard old LDT */
+
+ pcb->ims.ldt = old_ldt; /* set LDT for thread */
+
+ /*
+ * If we are modifying the LDT for the current thread,
+ * make sure it is properly set.
+ */
+ if (thread == current_thread())
+ switch_ktss(pcb);
+ }
+
+ /*
+ * Install new descriptors.
+ */
+ memcpy(&old_ldt->ldt[first_desc],
+ desc_list,
+ count * sizeof(struct real_descriptor));
+
+ simple_unlock(&pcb->lock);
+
+ if (new_ldt)
+#ifdef MACH_PV_DESCRIPTORS
+ {
+#ifdef MACH_PV_PAGETABLES
+ for (i=0; i<(new_ldt->desc.limit_low + 1)/sizeof(struct real_descriptor); i+=PAGE_SIZE/sizeof(struct real_descriptor))
+ pmap_set_page_readwrite(&new_ldt->ldt[i]);
+#endif /* MACH_PV_PAGETABLES*/
+ kfree(new_ldt->alloc, new_ldt->desc.limit_low + 1
+ + PAGE_SIZE + offsetof(struct user_ldt, ldt));
+ }
+#else /* MACH_PV_DESCRIPTORS */
+ kfree((vm_offset_t)new_ldt,
+ new_ldt->desc.limit_low + 1
+ + sizeof(struct real_descriptor));
+#endif /* MACH_PV_DESCRIPTORS */
+
+ /*
+ * Free the descriptor list, if it was
+ * out-of-line. Also discard the original
+ * copy object for it.
+ */
+ if (!desc_list_inline) {
+ (void) kmem_free(ipc_kernel_map,
+ (vm_offset_t) desc_list,
+ count * sizeof(struct real_descriptor));
+ vm_map_copy_discard(old_copy_object);
+ }
+
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+i386_get_ldt(const thread_t thread,
+ int first_selector,
+ int selector_count, /* number wanted */
+ struct descriptor **descriptor_list, /* in/out */
+ unsigned int *count /* in/out */
+ )
+{
+ struct real_descriptor** desc_list = (struct real_descriptor **)descriptor_list;
+ struct user_ldt *user_ldt;
+ pcb_t pcb;
+ int first_desc = sel_idx(first_selector);
+ unsigned ldt_count;
+ vm_size_t ldt_size;
+ vm_size_t size, size_needed;
+ vm_offset_t addr;
+
+ if (thread == THREAD_NULL)
+ return KERN_INVALID_ARGUMENT;
+ if (first_desc < 0 || first_desc > 8191)
+ return KERN_INVALID_ARGUMENT;
+ if (first_desc + selector_count >= 8192)
+ return KERN_INVALID_ARGUMENT;
+
+ pcb = thread->pcb;
+ addr = 0;
+ size = 0;
+
+ for (;;) {
+ simple_lock(&pcb->lock);
+ user_ldt = pcb->ims.ldt;
+ if (user_ldt == 0) {
+ simple_unlock(&pcb->lock);
+ if (addr)
+ kmem_free(ipc_kernel_map, addr, size);
+ *count = 0;
+ return KERN_SUCCESS;
+ }
+
+ /*
+ * Find how many descriptors we should return.
+ */
+ ldt_count = (user_ldt->desc.limit_low + 1) /
+ sizeof (struct real_descriptor);
+ ldt_count -= first_desc;
+ if (ldt_count > selector_count)
+ ldt_count = selector_count;
+
+ ldt_size = ldt_count * sizeof(struct real_descriptor);
+
+ /*
+ * Do we have the memory we need?
+ */
+ if (ldt_count <= *count)
+ break; /* fits in-line */
+
+ size_needed = round_page(ldt_size);
+ if (size_needed <= size)
+ break;
+
+ /*
+ * Unlock the pcb and allocate more memory
+ */
+ simple_unlock(&pcb->lock);
+
+ if (size != 0)
+ kmem_free(ipc_kernel_map, addr, size);
+
+ size = size_needed;
+
+ if (kmem_alloc(ipc_kernel_map, &addr, size)
+ != KERN_SUCCESS)
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ /*
+ * copy out the descriptors
+ */
+ memcpy(*desc_list,
+ &user_ldt->ldt[first_desc],
+ ldt_size);
+ *count = ldt_count;
+ simple_unlock(&pcb->lock);
+
+ if (addr) {
+ vm_size_t size_used, size_left;
+ vm_map_copy_t memory;
+
+ /*
+ * Free any unused memory beyond the end of the last page used
+ */
+ size_used = round_page(ldt_size);
+ if (size_used != size)
+ kmem_free(ipc_kernel_map,
+ addr + size_used, size - size_used);
+
+ /*
+ * Zero the remainder of the page being returned.
+ */
+ size_left = size_used - ldt_size;
+ if (size_left > 0)
+ memset((char *)addr + ldt_size, 0, size_left);
+
+ /*
+ * Make memory into copyin form - this unwires it.
+ */
+ (void) vm_map_copyin(ipc_kernel_map, addr, size_used,
+ TRUE, &memory);
+ *desc_list = (struct real_descriptor *)memory;
+ }
+
+ return KERN_SUCCESS;
+}
+
+void
+user_ldt_free(user_ldt_t user_ldt)
+{
+#ifdef MACH_PV_DESCRIPTORS
+ unsigned i;
+#ifdef MACH_PV_PAGETABLES
+ for (i=0; i<(user_ldt->desc.limit_low + 1)/sizeof(struct real_descriptor); i+=PAGE_SIZE/sizeof(struct real_descriptor))
+ pmap_set_page_readwrite(&user_ldt->ldt[i]);
+#endif /* MACH_PV_PAGETABLES */
+ kfree(user_ldt->alloc, user_ldt->desc.limit_low + 1
+ + PAGE_SIZE + offsetof(struct user_ldt, ldt));
+#else /* MACH_PV_DESCRIPTORS */
+ kfree((vm_offset_t)user_ldt,
+ user_ldt->desc.limit_low + 1
+ + sizeof(struct real_descriptor));
+#endif /* MACH_PV_DESCRIPTORS */
+}
+
+
+kern_return_t
+i386_set_gdt (thread_t thread, int *selector, struct descriptor descriptor)
+{
+ const struct real_descriptor *desc = (struct real_descriptor *)&descriptor;
+ int idx;
+
+ if (thread == THREAD_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ if (*selector == -1)
+ {
+ for (idx = 0; idx < USER_GDT_SLOTS; ++idx)
+ if ((thread->pcb->ims.user_gdt[idx].access & ACC_P) == 0)
+ {
+ *selector = ((idx + sel_idx(USER_GDT)) << 3) | SEL_PL_U;
+ break;
+ }
+ if (idx == USER_GDT_SLOTS)
+ return KERN_NO_SPACE; /* ? */
+ }
+ else if ((*selector & (SEL_LDT|SEL_PL)) != SEL_PL_U
+ || sel_idx (*selector) < sel_idx(USER_GDT)
+ || sel_idx (*selector) >= sel_idx(USER_GDT) + USER_GDT_SLOTS)
+ return KERN_INVALID_ARGUMENT;
+ else
+ idx = sel_idx (*selector) - sel_idx(USER_GDT);
+
+ if ((desc->access & ACC_P) == 0)
+ memset (&thread->pcb->ims.user_gdt[idx], 0,
+ sizeof thread->pcb->ims.user_gdt[idx]);
+ else if ((desc->access & (ACC_TYPE_USER|ACC_PL)) != (ACC_TYPE_USER|ACC_PL_U) || (desc->granularity & SZ_64))
+
+ return KERN_INVALID_ARGUMENT;
+ else
+ memcpy (&thread->pcb->ims.user_gdt[idx], desc, sizeof (struct descriptor));
+
+ /*
+ * If we are modifying the GDT for the current thread,
+ * make sure it is properly set.
+ */
+ if (thread == current_thread())
+ switch_ktss(thread->pcb);
+
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+i386_get_gdt (const thread_t thread, int selector, struct descriptor *descriptor)
+{
+ struct real_descriptor *desc = (struct real_descriptor *)descriptor;
+ if (thread == THREAD_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ if ((selector & (SEL_LDT|SEL_PL)) != SEL_PL_U
+ || sel_idx (selector) < sel_idx(USER_GDT)
+ || sel_idx (selector) >= sel_idx(USER_GDT) + USER_GDT_SLOTS)
+ return KERN_INVALID_ARGUMENT;
+
+ *desc = thread->pcb->ims.user_gdt[sel_idx (selector) - sel_idx(USER_GDT)];
+
+ return KERN_SUCCESS;
+}
diff --git a/i386/i386/user_ldt.h b/i386/i386/user_ldt.h
new file mode 100644
index 0000000..26caa27
--- /dev/null
+++ b/i386/i386/user_ldt.h
@@ -0,0 +1,50 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_USER_LDT_H_
+#define _I386_USER_LDT_H_
+
+/*
+ * User LDT management.
+ *
+ * Each thread in a task may have its own LDT.
+ */
+
+#include <i386/seg.h>
+
+struct user_ldt {
+#ifdef MACH_PV_DESCRIPTORS
+ vm_offset_t alloc; /* allocation before alignment */
+#endif /* MACH_PV_DESCRIPTORS */
+ struct real_descriptor desc; /* descriptor for self */
+ struct real_descriptor ldt[1]; /* descriptor table (variable) */
+};
+typedef struct user_ldt * user_ldt_t;
+
+extern void
+user_ldt_free(user_ldt_t user_ldt);
+
+#endif /* _I386_USER_LDT_H_ */
diff --git a/i386/i386/vm_param.h b/i386/i386/vm_param.h
new file mode 100644
index 0000000..056aa52
--- /dev/null
+++ b/i386/i386/vm_param.h
@@ -0,0 +1,200 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _I386_KERNEL_I386_VM_PARAM_
+#define _I386_KERNEL_I386_VM_PARAM_
+
+#include <kern/macros.h>
+
+/* XXX use xu/vm_param.h */
+#include <mach/vm_param.h>
+#ifdef MACH_PV_PAGETABLES
+#include <xen/public/xen.h>
+#endif
+
+/* To avoid ambiguity in kernel code, make the name explicit */
+#define VM_MIN_USER_ADDRESS VM_MIN_ADDRESS
+#define VM_MAX_USER_ADDRESS VM_MAX_ADDRESS
+
+/* The kernel address space is usually 1GB, usually starting at virtual address 0. */
+/* This can be changed freely to separate kernel addresses from user addresses
+ * for better trace support in kdb; the _START symbol has to be offset by the
+ * same amount. */
+#ifdef __x86_64__
+#define VM_MIN_KERNEL_ADDRESS KERNEL_MAP_BASE
+#else
+#define VM_MIN_KERNEL_ADDRESS 0xC0000000UL
+#endif
+
+#if defined(MACH_XEN) || defined (__x86_64__)
+/* PV kernels can be loaded directly to the target virtual address */
+#define INIT_VM_MIN_KERNEL_ADDRESS VM_MIN_KERNEL_ADDRESS
+#else /* MACH_XEN */
+/* This must remain 0 */
+#define INIT_VM_MIN_KERNEL_ADDRESS 0x00000000UL
+#endif /* MACH_XEN */
+
+#ifdef MACH_PV_PAGETABLES
+#ifdef __i386__
+#if PAE
+#define HYP_VIRT_START HYPERVISOR_VIRT_START_PAE
+#else /* PAE */
+#define HYP_VIRT_START HYPERVISOR_VIRT_START_NONPAE
+#endif /* PAE */
+#define VM_MAX_KERNEL_ADDRESS (HYP_VIRT_START - LINEAR_MIN_KERNEL_ADDRESS + VM_MIN_KERNEL_ADDRESS)
+#else
+#define HYP_VIRT_START HYPERVISOR_VIRT_START
+#define VM_MAX_KERNEL_ADDRESS (LINEAR_MAX_KERNEL_ADDRESS - LINEAR_MIN_KERNEL_ADDRESS + VM_MIN_KERNEL_ADDRESS)
+#endif
+#else /* MACH_PV_PAGETABLES */
+#define VM_MAX_KERNEL_ADDRESS (LINEAR_MAX_KERNEL_ADDRESS - LINEAR_MIN_KERNEL_ADDRESS + VM_MIN_KERNEL_ADDRESS)
+#endif /* MACH_PV_PAGETABLES */
+
+/*
+ * Reserve mapping room for the kernel map, which includes
+ * the device I/O map and the IPC map.
+ */
+#ifdef __x86_64__
+/*
+ * Vm structures are quite bigger on 64 bit.
+ * This should be well enough for 8G of physical memory; on the other hand,
+ * maybe not all of them need to be in directly-mapped memory, see the parts
+ * allocated with pmap_steal_memory().
+ */
+#define VM_KERNEL_MAP_SIZE (512 * 1024 * 1024)
+#else
+#define VM_KERNEL_MAP_SIZE (152 * 1024 * 1024)
+#endif
+
+/* This is the kernel address range in linear addresses. */
+#ifdef __x86_64__
+#define LINEAR_MIN_KERNEL_ADDRESS VM_MIN_KERNEL_ADDRESS
+#define LINEAR_MAX_KERNEL_ADDRESS (0xffffffffffffffffUL)
+#else
+/* On x86, the kernel virtual address space is actually located
+ at high linear addresses. */
+#define LINEAR_MIN_KERNEL_ADDRESS (VM_MAX_USER_ADDRESS)
+#define LINEAR_MAX_KERNEL_ADDRESS (0xffffffffUL)
+#endif
+
+#ifdef MACH_PV_PAGETABLES
+/* need room for mmu updates (2*8bytes) */
+#define KERNEL_STACK_SIZE (4*I386_PGBYTES)
+#define INTSTACK_SIZE (4*I386_PGBYTES)
+#else /* MACH_PV_PAGETABLES */
+#define KERNEL_STACK_SIZE (1*I386_PGBYTES)
+#define INTSTACK_SIZE (1*I386_PGBYTES)
+#endif /* MACH_PV_PAGETABLES */
+ /* interrupt stack size */
+
+/*
+ * Conversion between 80386 pages and VM pages
+ */
+
+#define trunc_i386_to_vm(p) (atop(trunc_page(i386_ptob(p))))
+#define round_i386_to_vm(p) (atop(round_page(i386_ptob(p))))
+#define vm_to_i386(p) (i386_btop(ptoa(p)))
+
+/*
+ * Physical memory is direct-mapped to virtual memory
+ * starting at virtual address VM_MIN_KERNEL_ADDRESS.
+ */
+#define phystokv(a) ((vm_offset_t)(a) + VM_MIN_KERNEL_ADDRESS)
+/*
+ * This can not be used with virtual mappings, but can be used during bootstrap
+ */
+#define _kvtophys(a) ((vm_offset_t)(a) - VM_MIN_KERNEL_ADDRESS)
+
+/*
+ * Kernel virtual memory is actually at 0xc0000000 in linear addresses.
+ */
+#define kvtolin(a) ((vm_offset_t)(a) - VM_MIN_KERNEL_ADDRESS + LINEAR_MIN_KERNEL_ADDRESS)
+#define lintokv(a) ((vm_offset_t)(a) - LINEAR_MIN_KERNEL_ADDRESS + VM_MIN_KERNEL_ADDRESS)
+
+/*
+ * Physical memory properties.
+ */
+#define VM_PAGE_DMA_LIMIT DECL_CONST(0x1000000, UL)
+
+#ifdef MACH_XEN
+/* TODO Completely check Xen physical/virtual layout */
+#ifdef __LP64__
+#define VM_PAGE_MAX_SEGS 4
+#define VM_PAGE_DMA32_LIMIT DECL_CONST(0x100000000, UL)
+#define VM_PAGE_DIRECTMAP_LIMIT DECL_CONST(0x400000000000, UL)
+#define VM_PAGE_HIGHMEM_LIMIT DECL_CONST(0x10000000000000, ULL)
+#else
+#define VM_PAGE_MAX_SEGS 4
+#define VM_PAGE_DMA32_LIMIT DECL_CONST(0x100000000, UL)
+#define VM_PAGE_DIRECTMAP_LIMIT (VM_MAX_KERNEL_ADDRESS \
+ - VM_MIN_KERNEL_ADDRESS \
+ - VM_KERNEL_MAP_SIZE)
+#define VM_PAGE_HIGHMEM_LIMIT DECL_CONST(0x10000000000000, ULL)
+#endif
+#else /* MACH_XEN */
+#ifdef __LP64__
+#define VM_PAGE_MAX_SEGS 4
+#define VM_PAGE_DMA32_LIMIT DECL_CONST(0x100000000, UL)
+#define VM_PAGE_DIRECTMAP_LIMIT (VM_MAX_KERNEL_ADDRESS \
+ - VM_MIN_KERNEL_ADDRESS \
+ - VM_KERNEL_MAP_SIZE + 1)
+#define VM_PAGE_HIGHMEM_LIMIT DECL_CONST(0x10000000000000, UL)
+#else /* __LP64__ */
+#define VM_PAGE_DIRECTMAP_LIMIT (VM_MAX_KERNEL_ADDRESS \
+ - VM_MIN_KERNEL_ADDRESS \
+ - VM_KERNEL_MAP_SIZE + 1)
+#ifdef PAE
+#define VM_PAGE_MAX_SEGS 4
+#define VM_PAGE_DMA32_LIMIT DECL_CONST(0x100000000, UL)
+#define VM_PAGE_HIGHMEM_LIMIT DECL_CONST(0x10000000000000, ULL)
+#else /* PAE */
+#define VM_PAGE_MAX_SEGS 3
+#define VM_PAGE_HIGHMEM_LIMIT DECL_CONST(0xfffff000, UL)
+#endif /* PAE */
+#endif /* __LP64__ */
+#endif /* MACH_XEN */
+
+/*
+ * Physical segment indexes.
+ */
+#define VM_PAGE_SEG_DMA 0
+
+#if defined(VM_PAGE_DMA32_LIMIT) && (VM_PAGE_DMA32_LIMIT != VM_PAGE_DIRECTMAP_LIMIT)
+
+#if VM_PAGE_DMA32_LIMIT < VM_PAGE_DIRECTMAP_LIMIT
+#define VM_PAGE_SEG_DMA32 (VM_PAGE_SEG_DMA+1)
+#define VM_PAGE_SEG_DIRECTMAP (VM_PAGE_SEG_DMA32+1)
+#define VM_PAGE_SEG_HIGHMEM (VM_PAGE_SEG_DIRECTMAP+1)
+#else /* VM_PAGE_DMA32_LIMIT > VM_PAGE_DIRECTMAP_LIMIT */
+#define VM_PAGE_SEG_DIRECTMAP (VM_PAGE_SEG_DMA+1)
+#define VM_PAGE_SEG_DMA32 (VM_PAGE_SEG_DIRECTMAP+1)
+#define VM_PAGE_SEG_HIGHMEM (VM_PAGE_SEG_DMA32+1)
+#endif
+
+#else
+
+#define VM_PAGE_SEG_DIRECTMAP (VM_PAGE_SEG_DMA+1)
+#define VM_PAGE_SEG_DMA32 VM_PAGE_SEG_DIRECTMAP /* Alias for the DIRECTMAP segment */
+#define VM_PAGE_SEG_HIGHMEM (VM_PAGE_SEG_DIRECTMAP+1)
+#endif
+
+#endif /* _I386_KERNEL_I386_VM_PARAM_ */
diff --git a/i386/i386/xen.h b/i386/i386/xen.h
new file mode 100644
index 0000000..2cd81be
--- /dev/null
+++ b/i386/i386/xen.h
@@ -0,0 +1,412 @@
+/*
+ * Copyright (C) 2006-2011 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef XEN_HYPCALL_H
+#define XEN_HYPCALL_H
+
+#ifdef MACH_XEN
+#ifndef __ASSEMBLER__
+#include <kern/macros.h>
+#include <kern/printf.h>
+#include <mach/machine/vm_types.h>
+#include <mach/vm_param.h>
+#include <mach/inline.h>
+#include <mach/xen.h>
+#include <machine/vm_param.h>
+#include <intel/pmap.h>
+#include <kern/debug.h>
+#include <xen/public/xen.h>
+
+/* TODO: this should be moved in appropriate non-Xen place. */
+#define mb() __asm__ __volatile__("lock; addl $0,0(%%esp)":::"memory")
+#define rmb() mb()
+#define wmb() mb()
+static inline unsigned long xchgl(volatile unsigned long *ptr, unsigned long x)
+{
+ __asm__ __volatile__("xchg %0, %1"
+ : "=r" (x)
+ : "m" (*(ptr)), "0" (x): "memory");
+ return x;
+}
+#define _TOSTR(x) #x
+#define TOSTR(x) _TOSTR (x)
+
+#ifdef __i386__
+#define _hypcall_ret "=a"
+#define _hypcall_arg1 "ebx"
+#define _hypcall_arg2 "ecx"
+#define _hypcall_arg3 "edx"
+#define _hypcall_arg4 "esi"
+#define _hypcall_arg5 "edi"
+#endif
+#ifdef __x86_64__
+#define _hypcall_ret "=a"
+#define _hypcall_arg1 "rdi"
+#define _hypcall_arg2 "rsi"
+#define _hypcall_arg3 "rdx"
+#define _hypcall_arg4 "r10"
+#define _hypcall_arg5 "r8"
+#endif
+
+
+/* x86-specific hypercall interface. */
+#define _hypcall0(type, name) \
+static inline type hyp_##name(void) \
+{ \
+ unsigned long __ret; \
+ asm volatile ("call hypcalls+("TOSTR(__HYPERVISOR_##name)"*32)" \
+ : "=a" (__ret) \
+ : : "memory"); \
+ return __ret; \
+}
+
+#define _hypcall1(type, name, type1, arg1) \
+static inline type hyp_##name(type1 arg1) \
+{ \
+ unsigned long __ret; \
+ register unsigned long __arg1 asm(_hypcall_arg1) = (unsigned long) arg1; \
+ asm volatile ("call hypcalls+("TOSTR(__HYPERVISOR_##name)"*32)" \
+ : "=a" (__ret), \
+ "+r" (__arg1) \
+ : : "memory"); \
+ return __ret; \
+}
+
+#define _hypcall2(type, name, type1, arg1, type2, arg2) \
+static inline type hyp_##name(type1 arg1, type2 arg2) \
+{ \
+ unsigned long __ret; \
+ register unsigned long __arg1 asm(_hypcall_arg1) = (unsigned long) arg1; \
+ register unsigned long __arg2 asm(_hypcall_arg2) = (unsigned long) arg2; \
+ asm volatile ("call hypcalls+("TOSTR(__HYPERVISOR_##name)"*32)" \
+ : "=a" (__ret), \
+ "+r" (__arg1), \
+ "+r" (__arg2) \
+ : : "memory"); \
+ return __ret; \
+}
+
+#define _hypcall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
+static inline type hyp_##name(type1 arg1, type2 arg2, type3 arg3) \
+{ \
+ unsigned long __ret; \
+ register unsigned long __arg1 asm(_hypcall_arg1) = (unsigned long) arg1; \
+ register unsigned long __arg2 asm(_hypcall_arg2) = (unsigned long) arg2; \
+ register unsigned long __arg3 asm(_hypcall_arg3) = (unsigned long) arg3; \
+ asm volatile ("call hypcalls+("TOSTR(__HYPERVISOR_##name)"*32)" \
+ : "=a" (__ret), \
+ "+r" (__arg1), \
+ "+r" (__arg2), \
+ "+r" (__arg3) \
+ : : "memory"); \
+ return __ret; \
+}
+
+#define _hypcall4(type, name, type1, arg1, type2, arg2, type3, arg3, type4, arg4) \
+static inline type hyp_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
+{ \
+ unsigned long __ret; \
+ register unsigned long __arg1 asm(_hypcall_arg1) = (unsigned long) arg1; \
+ register unsigned long __arg2 asm(_hypcall_arg2) = (unsigned long) arg2; \
+ register unsigned long __arg3 asm(_hypcall_arg3) = (unsigned long) arg3; \
+ register unsigned long __arg4 asm(_hypcall_arg4) = (unsigned long) arg4; \
+ asm volatile ("call hypcalls+("TOSTR(__HYPERVISOR_##name)"*32)" \
+ : "=a" (__ret), \
+ "+r" (__arg1), \
+ "+r" (__arg2), \
+ "+r" (__arg3), \
+ "+r" (__arg4) \
+ : : "memory"); \
+ return __ret; \
+}
+
+#define _hypcall5(type, name, type1, arg1, type2, arg2, type3, arg3, type4, arg4, type5, arg5) \
+static inline type hyp_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
+{ \
+ unsigned long __ret; \
+ register unsigned long __arg1 asm(_hypcall_arg1) = (unsigned long) arg1; \
+ register unsigned long __arg2 asm(_hypcall_arg2) = (unsigned long) arg2; \
+ register unsigned long __arg3 asm(_hypcall_arg3) = (unsigned long) arg3; \
+ register unsigned long __arg4 asm(_hypcall_arg4) = (unsigned long) arg4; \
+ register unsigned long __arg5 asm(_hypcall_arg5) = (unsigned long) arg5; \
+ asm volatile ("call hypcalls+("TOSTR(__HYPERVISOR_##name)"*32)" \
+ : "=a" (__ret), \
+ "+r" (__arg1), \
+ "+r" (__arg2), \
+ "+r" (__arg3), \
+ "+r" (__arg4), \
+ "+r" (__arg5) \
+ : : "memory"); \
+ return __ret; \
+}
+
+/* x86 Hypercalls */
+
+/* Note: since Hypervisor uses flat memory model, remember to always use
+ * kvtolin when giving pointers as parameters for the hypercall to read data
+ * at. Use kv_to_la when they may be used before GDT got set up. */
+
+_hypcall1(long, set_trap_table, vm_offset_t /* struct trap_info * */, traps);
+
+#ifdef MACH_PV_PAGETABLES
+_hypcall4(int, mmu_update, vm_offset_t /* struct mmu_update * */, req, int, count, vm_offset_t /* int * */, success_count, domid_t, domid)
+static inline int hyp_mmu_update_pte(pt_entry_t pte, pt_entry_t val)
+{
+ struct mmu_update update =
+ {
+ .ptr = pte,
+ .val = val,
+ };
+ int count;
+ hyp_mmu_update(kv_to_la(&update), 1, kv_to_la(&count), DOMID_SELF);
+ return count;
+}
+/* Note: make sure this fits in KERNEL_STACK_SIZE */
+#define HYP_BATCH_MMU_UPDATES 256
+
+#define hyp_mmu_update_la(la, val) hyp_mmu_update_pte( \
+ (kernel_page_dir[lin2pdenum_cont((vm_offset_t)(la))] & INTEL_PTE_PFN) \
+ + ptenum((vm_offset_t)(la)) * sizeof(pt_entry_t), val)
+#endif
+
+_hypcall2(long, set_gdt, vm_offset_t /* unsigned long * */, frame_list, unsigned int, entries)
+
+_hypcall2(long, stack_switch, unsigned long, ss, unsigned long, esp);
+
+#ifdef __i386__
+_hypcall4(long, set_callbacks, unsigned long, es, void *, ea,
+ unsigned long, fss, void *, fsa);
+#endif
+#ifdef __x86_64__
+_hypcall3(long, set_callbacks, void *, ea, void *, fsa, void *, sc);
+#endif
+_hypcall1(long, fpu_taskswitch, int, set);
+
+#ifdef PAE
+#define hyp_high(pte) ((pte) >> 32)
+#else
+#define hyp_high(pte) 0
+#endif
+#ifdef __i386__
+_hypcall4(long, update_descriptor, unsigned long, ma_lo, unsigned long, ma_hi, unsigned long, desc_lo, unsigned long, desc_hi);
+#define hyp_do_update_descriptor(ma, desc) ({ \
+ pt_entry_t __ma = (ma); \
+ uint64_t __desc = (desc); \
+ hyp_update_descriptor(__ma & 0xffffffffU, hyp_high(__ma), __desc & 0xffffffffU, __desc >> 32); \
+})
+#endif
+#ifdef __x86_64__
+_hypcall2(long, update_descriptor, unsigned long, ma, unsigned long, desc);
+#define hyp_do_update_descriptor(ma, desc) hyp_update_descriptor(ma, desc)
+#endif
+
+#ifdef __x86_64__
+_hypcall2(long, set_segment_base, int, reg, unsigned long, value);
+#endif
+
+#include <xen/public/memory.h>
+_hypcall2(long, memory_op, unsigned long, cmd, vm_offset_t /* void * */, arg);
+static inline void hyp_free_mfn(unsigned long mfn)
+{
+ struct xen_memory_reservation reservation;
+ reservation.extent_start = (void*) kvtolin(&mfn);
+ reservation.nr_extents = 1;
+ reservation.extent_order = 0;
+ reservation.address_bits = 0;
+ reservation.domid = DOMID_SELF;
+ if (hyp_memory_op(XENMEM_decrease_reservation, kvtolin(&reservation)) != 1)
+ panic("couldn't free page %lu\n", mfn);
+}
+
+#ifdef __i386__
+_hypcall4(int, update_va_mapping, unsigned long, va, unsigned long, val_lo, unsigned long, val_hi, unsigned long, flags);
+#define hyp_do_update_va_mapping(va, val, flags) ({ \
+ pt_entry_t __val = (val); \
+ hyp_update_va_mapping(va, __val & 0xffffffffU, hyp_high(__val), flags); \
+})
+#endif
+#ifdef __x86_64__
+_hypcall3(int, update_va_mapping, unsigned long, va, unsigned long, val, unsigned long, flags);
+#define hyp_do_update_va_mapping(va, val, flags) hyp_update_va_mapping(va, val, flags)
+#endif
+
+static inline void hyp_free_page(unsigned long pfn, void *va)
+{
+ /* save mfn */
+ unsigned long mfn = pfn_to_mfn(pfn);
+
+#ifdef MACH_PV_PAGETABLES
+ /* remove from mappings */
+ if (hyp_do_update_va_mapping(kvtolin(va), 0, UVMF_INVLPG|UVMF_ALL))
+ panic("couldn't clear page %lu at %p\n", pfn, va);
+
+#ifdef MACH_PSEUDO_PHYS
+ /* drop machine page */
+ mfn_list[pfn] = ~0;
+#endif /* MACH_PSEUDO_PHYS */
+#endif
+
+ /* and free from Xen */
+ hyp_free_mfn(mfn);
+}
+
+#ifdef MACH_PV_PAGETABLES
+_hypcall4(int, mmuext_op, vm_offset_t /* struct mmuext_op * */, op, int, count, vm_offset_t /* int * */, success_count, domid_t, domid);
+static inline int hyp_mmuext_op_void(unsigned int cmd)
+{
+ struct mmuext_op op = {
+ .cmd = cmd,
+ };
+ int count;
+ hyp_mmuext_op(kv_to_la(&op), 1, kv_to_la(&count), DOMID_SELF);
+ return count;
+}
+static inline int hyp_mmuext_op_mfn(unsigned int cmd, unsigned long mfn)
+{
+ struct mmuext_op op = {
+ .cmd = cmd,
+ .arg1.mfn = mfn,
+ };
+ int count;
+ hyp_mmuext_op(kv_to_la(&op), 1, kv_to_la(&count), DOMID_SELF);
+ return count;
+}
+static inline void hyp_set_ldt(void *ldt, unsigned long nbentries) {
+ struct mmuext_op op = {
+ .cmd = MMUEXT_SET_LDT,
+ .arg1.linear_addr = kvtolin(ldt),
+ .arg2.nr_ents = nbentries,
+ };
+ unsigned long count;
+ if (((unsigned long)ldt) & PAGE_MASK)
+ panic("ldt %p is not aligned on a page\n", ldt);
+ for (count=0; count<nbentries; count+= PAGE_SIZE/8)
+ pmap_set_page_readonly(ldt+count*8);
+ hyp_mmuext_op(kvtolin(&op), 1, kvtolin(&count), DOMID_SELF);
+ if (!count)
+ panic("couldn't set LDT\n");
+}
+#define hyp_set_cr3(value) hyp_mmuext_op_mfn(MMUEXT_NEW_BASEPTR, pa_to_mfn(value))
+#define hyp_set_user_cr3(value) hyp_mmuext_op_mfn(MMUEXT_NEW_USER_BASEPTR, pa_to_mfn(value))
+static inline void hyp_invlpg(vm_offset_t lin) {
+ struct mmuext_op ops;
+ int n;
+ ops.cmd = MMUEXT_INVLPG_ALL;
+ ops.arg1.linear_addr = lin;
+ hyp_mmuext_op(kvtolin(&ops), 1, kvtolin(&n), DOMID_SELF);
+ if (n < 1)
+ panic("couldn't invlpg\n");
+}
+#endif
+
+#ifdef __i386__
+_hypcall2(long, set_timer_op, unsigned long, absolute_lo, unsigned long, absolute_hi);
+#define hyp_do_set_timer_op(absolute_nsec) ({ \
+ uint64_t __absolute = (absolute_nsec); \
+ hyp_set_timer_op(__absolute & 0xffffffffU, __absolute >> 32); \
+})
+#endif
+#ifdef __x86_64__
+_hypcall1(long, set_timer_op, unsigned long, absolute);
+#define hyp_do_set_timer_op(absolute_nsec) hyp_set_timer_op(absolute_nsec)
+#endif
+
+#include <xen/public/event_channel.h>
+_hypcall1(int, event_channel_op, vm_offset_t /* evtchn_op_t * */, op);
+static inline int hyp_event_channel_send(evtchn_port_t port) {
+ evtchn_op_t op = {
+ .cmd = EVTCHNOP_send,
+ .u.send.port = port,
+ };
+ return hyp_event_channel_op(kvtolin(&op));
+}
+static inline evtchn_port_t hyp_event_channel_alloc(domid_t domid) {
+ evtchn_op_t op = {
+ .cmd = EVTCHNOP_alloc_unbound,
+ .u.alloc_unbound.dom = DOMID_SELF,
+ .u.alloc_unbound.remote_dom = domid,
+ };
+ if (hyp_event_channel_op(kvtolin(&op)))
+ panic("couldn't allocate event channel");
+ return op.u.alloc_unbound.port;
+}
+static inline evtchn_port_t hyp_event_channel_bind_virq(uint32_t virq, uint32_t vcpu) {
+ evtchn_op_t op = { .cmd = EVTCHNOP_bind_virq, .u.bind_virq = { .virq = virq, .vcpu = vcpu }};
+ if (hyp_event_channel_op(kvtolin(&op)))
+ panic("can't bind virq %d\n",virq);
+ return op.u.bind_virq.port;
+}
+
+_hypcall3(int, console_io, int, cmd, int, count, vm_offset_t /* const char * */, buffer);
+
+_hypcall3(long, grant_table_op, unsigned int, cmd, vm_offset_t /* void * */, uop, unsigned int, count);
+
+_hypcall2(long, vm_assist, unsigned int, cmd, unsigned int, type);
+
+_hypcall0(long, iret);
+
+#include <xen/public/sched.h>
+_hypcall2(long, sched_op, int, cmd, vm_offset_t /* void* */, arg)
+#define hyp_yield() hyp_sched_op(SCHEDOP_yield, 0)
+#define hyp_block() hyp_sched_op(SCHEDOP_block, 0)
+static inline void __attribute__((noreturn)) hyp_crash(void)
+{
+ unsigned int shut = SHUTDOWN_crash;
+ hyp_sched_op(SCHEDOP_shutdown, kvtolin(&shut));
+ /* really shouldn't return */
+ printf("uh, shutdown returned?!\n");
+ for(;;);
+}
+
+static inline void __attribute__((noreturn)) hyp_halt(void)
+{
+ unsigned int shut = SHUTDOWN_poweroff;
+ hyp_sched_op(SCHEDOP_shutdown, kvtolin(&shut));
+ /* really shouldn't return */
+ printf("uh, shutdown returned?!\n");
+ for(;;);
+}
+
+static inline void __attribute__((noreturn)) hyp_reboot(void)
+{
+ unsigned int shut = SHUTDOWN_reboot;
+ hyp_sched_op(SCHEDOP_shutdown, kvtolin(&shut));
+ /* really shouldn't return */
+ printf("uh, reboot returned?!\n");
+ for(;;);
+}
+
+_hypcall2(int, set_debugreg, int, reg, unsigned long, value);
+_hypcall1(unsigned long, get_debugreg, int, reg);
+
+/* x86-specific */
+static inline uint64_t hyp_cpu_clock(void) {
+ uint32_t hi, lo;
+ asm volatile("rdtsc" : "=d"(hi), "=a"(lo));
+ return (((uint64_t) hi) << 32) | lo;
+}
+
+#else /* __ASSEMBLER__ */
+/* TODO: SMP */
+#define cli movb $0xff,hyp_shared_info+CPU_CLI
+#define sti call hyp_sti
+#define iretq jmp hyp_iretq
+#endif /* ASSEMBLER */
+#endif /* MACH_XEN */
+
+#endif /* XEN_HYPCALL_H */
diff --git a/i386/i386/xpr.h b/i386/i386/xpr.h
new file mode 100644
index 0000000..19ef026
--- /dev/null
+++ b/i386/i386/xpr.h
@@ -0,0 +1,32 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: xpr.h
+ *
+ * Machine dependent module for the XPR tracing facility.
+ */
+
+#define XPR_TIMESTAMP (0)