From 5e0b8d508ed51004bd836384293be00950ee62c9 Mon Sep 17 00:00:00 2001 From: Pasha Date: Tue, 20 Feb 2024 18:49:50 +0000 Subject: init gnumach copy --- kern/.gitignore | 2 + kern/act.c | 1118 ++++++++++++++++++++ kern/act.h | 192 ++++ kern/assert.h | 54 + kern/ast.c | 235 ++++ kern/ast.h | 139 +++ kern/atomic.h | 54 + kern/boot_script.c | 791 ++++++++++++++ kern/boot_script.h | 111 ++ kern/bootstrap.c | 918 ++++++++++++++++ kern/bootstrap.h | 26 + kern/counters.c | 82 ++ kern/counters.h | 107 ++ kern/cpu_number.h | 47 + kern/debug.c | 207 ++++ kern/debug.h | 72 ++ kern/elf-load.c | 97 ++ kern/eventcount.c | 361 +++++++ kern/eventcount.h | 66 ++ kern/exc.defs | 22 + kern/exception.c | 1023 ++++++++++++++++++ kern/exception.h | 66 ++ kern/experimental.srv | 3 + kern/gnumach.srv | 23 + kern/gsync.c | 517 +++++++++ kern/gsync.h | 41 + kern/host.c | 389 +++++++ kern/host.h | 48 + kern/ipc_host.c | 451 ++++++++ kern/ipc_host.h | 72 ++ kern/ipc_kobject.c | 365 +++++++ kern/ipc_kobject.h | 123 +++ kern/ipc_mig.c | 984 +++++++++++++++++ kern/ipc_mig.h | 143 +++ kern/ipc_sched.c | 283 +++++ kern/ipc_sched.h | 32 + kern/ipc_tt.c | 1113 +++++++++++++++++++ kern/ipc_tt.h | 92 ++ kern/kalloc.h | 38 + kern/kern_types.h | 70 ++ kern/kmutex.c | 76 ++ kern/kmutex.h | 52 + kern/list.h | 357 +++++++ kern/lock.c | 689 ++++++++++++ kern/lock.h | 316 ++++++ kern/lock_mon.c | 364 +++++++ kern/log2.h | 50 + kern/mach.srv | 40 + kern/mach4.srv | 32 + kern/mach_clock.c | 657 ++++++++++++ kern/mach_clock.h | 112 ++ kern/mach_debug.srv | 26 + kern/mach_factor.c | 150 +++ kern/mach_factor.h | 31 + kern/mach_host.srv | 37 + kern/machine.c | 672 ++++++++++++ kern/machine.h | 59 ++ kern/macros.h | 93 ++ kern/pc_sample.c | 306 ++++++ kern/pc_sample.h | 94 ++ kern/printf.c | 656 ++++++++++++ kern/printf.h | 68 ++ kern/priority.c | 223 ++++ kern/priority.h | 28 + kern/processor.c | 1034 ++++++++++++++++++ kern/processor.h | 326 ++++++ kern/profile.c | 408 +++++++ kern/queue.c | 121 +++ kern/queue.h | 391 +++++++ kern/rbtree.c | 483 +++++++++ kern/rbtree.h | 306 ++++++ kern/rbtree_i.h | 186 ++++ kern/rdxtree.c | 830 +++++++++++++++ kern/rdxtree.h | 209 ++++ kern/rdxtree_i.h | 74 ++ kern/refcount.h | 68 ++ kern/sched.h | 186 ++++ kern/sched_prim.c | 2059 ++++++++++++++++++++++++++++++++++++ kern/sched_prim.h | 189 ++++ kern/shuttle.h | 71 ++ kern/slab.c | 1686 +++++++++++++++++++++++++++++ kern/slab.h | 243 +++++ kern/smp.c | 49 + kern/smp.h | 24 + kern/startup.c | 316 ++++++ kern/startup.h | 28 + kern/strings.c | 275 +++++ kern/syscall_emulation.c | 453 ++++++++ kern/syscall_emulation.h | 67 ++ kern/syscall_subr.c | 386 +++++++ kern/syscall_subr.h | 42 + kern/syscall_sw.c | 224 ++++ kern/syscall_sw.h | 57 + kern/task.c | 1351 +++++++++++++++++++++++ kern/task.h | 197 ++++ kern/task_notify.cli | 7 + kern/thread.c | 2646 ++++++++++++++++++++++++++++++++++++++++++++++ kern/thread.h | 437 ++++++++ kern/thread_swap.c | 200 ++++ kern/thread_swap.h | 43 + kern/timer.c | 501 +++++++++ kern/timer.h | 195 ++++ kern/xpr.c | 197 ++++ kern/xpr.h | 97 ++ 104 files changed, 32427 insertions(+) create mode 100644 kern/.gitignore create mode 100644 kern/act.c create mode 100644 kern/act.h create mode 100644 kern/assert.h create mode 100644 kern/ast.c create mode 100644 kern/ast.h create mode 100644 kern/atomic.h create mode 100644 kern/boot_script.c create mode 100644 kern/boot_script.h create mode 100644 kern/bootstrap.c create mode 100644 kern/bootstrap.h create mode 100644 kern/counters.c create mode 100644 kern/counters.h create mode 100644 kern/cpu_number.h create mode 100644 kern/debug.c create mode 100644 kern/debug.h create mode 100644 kern/elf-load.c create mode 100644 kern/eventcount.c create mode 100644 kern/eventcount.h create mode 100644 kern/exc.defs create mode 100644 kern/exception.c create mode 100644 kern/exception.h create mode 100644 kern/experimental.srv create mode 100644 kern/gnumach.srv create mode 100644 kern/gsync.c create mode 100644 kern/gsync.h create mode 100644 kern/host.c create mode 100644 kern/host.h create mode 100644 kern/ipc_host.c create mode 100644 kern/ipc_host.h create mode 100644 kern/ipc_kobject.c create mode 100644 kern/ipc_kobject.h create mode 100644 kern/ipc_mig.c create mode 100644 kern/ipc_mig.h create mode 100644 kern/ipc_sched.c create mode 100644 kern/ipc_sched.h create mode 100644 kern/ipc_tt.c create mode 100644 kern/ipc_tt.h create mode 100644 kern/kalloc.h create mode 100644 kern/kern_types.h create mode 100644 kern/kmutex.c create mode 100644 kern/kmutex.h create mode 100644 kern/list.h create mode 100644 kern/lock.c create mode 100644 kern/lock.h create mode 100644 kern/lock_mon.c create mode 100644 kern/log2.h create mode 100644 kern/mach.srv create mode 100644 kern/mach4.srv create mode 100644 kern/mach_clock.c create mode 100644 kern/mach_clock.h create mode 100644 kern/mach_debug.srv create mode 100644 kern/mach_factor.c create mode 100644 kern/mach_factor.h create mode 100644 kern/mach_host.srv create mode 100644 kern/machine.c create mode 100644 kern/machine.h create mode 100644 kern/macros.h create mode 100644 kern/pc_sample.c create mode 100644 kern/pc_sample.h create mode 100644 kern/printf.c create mode 100644 kern/printf.h create mode 100644 kern/priority.c create mode 100644 kern/priority.h create mode 100644 kern/processor.c create mode 100644 kern/processor.h create mode 100644 kern/profile.c create mode 100644 kern/queue.c create mode 100644 kern/queue.h create mode 100644 kern/rbtree.c create mode 100644 kern/rbtree.h create mode 100644 kern/rbtree_i.h create mode 100644 kern/rdxtree.c create mode 100644 kern/rdxtree.h create mode 100644 kern/rdxtree_i.h create mode 100644 kern/refcount.h create mode 100644 kern/sched.h create mode 100644 kern/sched_prim.c create mode 100644 kern/sched_prim.h create mode 100644 kern/shuttle.h create mode 100644 kern/slab.c create mode 100644 kern/slab.h create mode 100644 kern/smp.c create mode 100644 kern/smp.h create mode 100644 kern/startup.c create mode 100644 kern/startup.h create mode 100644 kern/strings.c create mode 100644 kern/syscall_emulation.c create mode 100644 kern/syscall_emulation.h create mode 100644 kern/syscall_subr.c create mode 100644 kern/syscall_subr.h create mode 100644 kern/syscall_sw.c create mode 100644 kern/syscall_sw.h create mode 100644 kern/task.c create mode 100644 kern/task.h create mode 100644 kern/task_notify.cli create mode 100644 kern/thread.c create mode 100644 kern/thread.h create mode 100644 kern/thread_swap.c create mode 100644 kern/thread_swap.h create mode 100644 kern/timer.c create mode 100644 kern/timer.h create mode 100644 kern/xpr.c create mode 100644 kern/xpr.h (limited to 'kern') diff --git a/kern/.gitignore b/kern/.gitignore new file mode 100644 index 0000000..72bccc6 --- /dev/null +++ b/kern/.gitignore @@ -0,0 +1,2 @@ +exc.none.defs.c +exc.none.msgids diff --git a/kern/act.c b/kern/act.c new file mode 100644 index 0000000..3819ef3 --- /dev/null +++ b/kern/act.c @@ -0,0 +1,1118 @@ +/* + * Copyright (c) 1993,1994 The University of Utah and + * the Computer Systems Laboratory (CSL). All rights reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS + * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF + * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * CSL requests users of this software to return to csl-dist@cs.utah.edu any + * improvements that they make and grant CSL redistribution rights. + * + * Author: Bryan Ford, University of Utah CSL + */ +/* + * File: act.c + * + * Activation management routines + * + */ + +#ifdef MIGRATING_THREADS + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include "ipc_target.h" + +static void special_handler(ReturnHandler *rh, struct Act *act); + +#ifdef ACT_STATIC_KLUDGE +#undef ACT_STATIC_KLUDGE +#define ACT_STATIC_KLUDGE 300 +#endif + +#ifndef ACT_STATIC_KLUDGE +static struct kmem_cache act_cache; +#else +static Act *act_freelist; +static Act free_acts[ACT_STATIC_KLUDGE]; +#endif + +/* This is a rather special activation + which resides at the top and bottom of every thread. + When the last "real" activation on a thread is destroyed, + the null_act on the bottom gets invoked, destroying the thread. + At the top, the null_act acts as an "invalid" cached activation, + which will always fail the cached-activation test on RPC paths. + + As you might expect, most of its members have no particular value. + alerts is zero. */ +Act null_act; + +void +global_act_init(void) +{ +#ifndef ACT_STATIC_KLUDGE + kmem_cache_init(&act_cache, "Act", sizeof(struct Act), 0, + NULL, 0); +#else + int i; + +printf("activations: [%x-%x]\n", &free_acts[0], &free_acts[ACT_STATIC_KLUDGE]); + act_freelist = &free_acts[0]; + free_acts[0].ipt_next = 0; + for (i = 1; i < ACT_STATIC_KLUDGE; i++) { + free_acts[i].ipt_next = act_freelist; + act_freelist = &free_acts[i]; + } + /* XXX simple_lock_init(&act_freelist->lock); */ +#endif + +#if 0 + simple_lock_init(&null_act.lock); + refcount_init(&null_act.ref_count, 1); +#endif + + act_machine_init(); +} + +/* Create a new activation in a specific task. + Locking: Task */ +kern_return_t act_create(task_t task, vm_offset_t user_stack, + vm_offset_t user_rbuf, vm_size_t user_rbuf_size, + struct Act **new_act) +{ + Act *act; + +#ifndef ACT_STATIC_KLUDGE + act = (Act*)kmem_cache_alloc(&act_cache); + if (act == 0) + return(KERN_RESOURCE_SHORTAGE); +#else + /* XXX ipt_lock(act_freelist); */ + act = act_freelist; + if (act == 0) panic("out of activations"); + act_freelist = act->ipt_next; + /* XXX ipt_unlock(act_freelist); */ + act->ipt_next = 0; +#endif + memset(act, 0, sizeof(*act)); /*XXX shouldn't be needed */ + +#ifdef DEBUG + act->lower = act->higher = 0; +#endif + + /* Start with one reference for being active, another for the caller */ + simple_lock_init(&act->lock); + refcount_init(&act->ref_count, 2); + + /* Latch onto the task. */ + act->task = task; + task_reference(task); + + /* Other simple setup */ + act->ipt = 0; + act->thread = 0; + act->suspend_count = 0; + act->active = 1; + act->handlers = 0; + + /* The special_handler will always be last on the returnhandlers list. */ + act->special_handler.next = 0; + act->special_handler.handler = special_handler; + + ipc_act_init(task, act); + act_machine_create(task, act, user_stack, user_rbuf, user_rbuf_size); + + task_lock(task); + + /* Chain the act onto the task's list */ + act->task_links.next = task->acts.next; + act->task_links.prev = &task->acts; + task->acts.next->prev = &act->task_links; + task->acts.next = &act->task_links; + task->act_count++; + + task_unlock(task); + + *new_act = act; + return KERN_SUCCESS; +} + +/* This is called when an act's ref_count drops to zero. + This can only happen when thread is zero (not in use), + ipt is zero (not attached to any ipt), + and active is false (terminated). */ +static void act_free(Act *inc) +{ + act_machine_destroy(inc); + ipc_act_destroy(inc); + + /* Drop the task reference. */ + task_deallocate(inc->task); + + /* Put the act back on the act cache */ +#ifndef ACT_STATIC_KLUDGE + kmem_cache_free(&act_cache, (vm_offset_t)inc); +#else + /* XXX ipt_lock(act_freelist); */ + inc->ipt_next = act_freelist; + act_freelist = inc; + /* XXX ipt_unlock(act_freelist); */ +#endif +} + +void act_deallocate(Act *inc) +{ + refcount_drop(&inc->ref_count, act_free(inc)); +} + +/* Attach an act to the top of a thread ("push the stack"). + The thread must be either the current one or a brand-new one. + Assumes the act is active but not in use. + Assumes that if it is attached to an ipt (i.e. the ipt pointer is nonzero), + the act has already been taken off the ipt's list. + + Already locked: cur_thread, act */ +void act_attach(Act *act, thread_t thread, unsigned init_alert_mask) +{ + Act *lower; + + act->thread = thread; + + /* The thread holds a reference to the activation while using it. */ + refcount_take(&act->ref_count); + + /* XXX detach any cached activations from above the target */ + + /* Chain the act onto the thread's act stack. */ + lower = thread->top_act; + act->lower = lower; + lower->higher = act; + thread->top_act = act; + + act->alert_mask = init_alert_mask; + act->alerts = lower->alerts & init_alert_mask; +} + +/* Remove the current act from the top of the current thread ("pop the stack"). + Return it to the ipt it lives on, if any. + Locking: Thread > Act(not on ipt) > ipc_target */ +void act_detach(Act *cur_act) +{ + thread_t cur_thread = cur_act->thread; + + thread_lock(cur_thread); + act_lock(cur_act); + + /* Unlink the act from the thread's act stack */ + cur_thread->top_act = cur_act->lower; + cur_act->thread = 0; +#ifdef DEBUG + cur_act->lower = cur_act->higher = 0; +#endif + + thread_unlock(cur_thread); + + /* Return it to the ipt's list */ + if (cur_act->ipt) + { + ipt_lock(cur_act->ipt); + cur_act->ipt_next = cur_act->ipt->ipt_acts; + cur_act->ipt->ipt_acts = cur_act; + ipt_unlock(cur_act->ipt); +#if 0 + printf(" return to ipt %x\n", cur_act->ipt); +#endif + } + + act_unlock(cur_act); + + /* Drop the act reference taken for being in use. */ + refcount_drop(&cur_act->ref_count, act_free(cur_act)); +} + + + +/*** Activation control support routines ***/ + +/* This is called by system-dependent code + when it detects that act->handlers is non-null + while returning into user mode. + Activations linked onto an ipt always have null act->handlers, + so RPC entry paths need not check it. + + Locking: Act */ +void act_execute_returnhandlers(void) +{ + Act *act = current_act(); + +#if 0 + printf("execute_returnhandlers\n"); +#endif + while (1) { + ReturnHandler *rh; + + /* Grab the next returnhandler */ + act_lock(act); + rh = act->handlers; + if (!rh) { + act_unlock(act); + return; + } + act->handlers = rh->next; + act_unlock(act); + + /* Execute it */ + (*rh->handler)(rh, act); + } +} + +/* Try to nudge an act into executing its returnhandler chain. + Ensures that the activation will execute its returnhandlers + before it next executes any of its user-level code. + Also ensures that it is safe to break the thread's activation chain + immediately above this activation, + by rolling out of any outstanding two-way-optimized RPC. + + The target activation is not necessarily active + or even in use by a thread. + If it isn't, this routine does nothing. + + Already locked: Act */ +static void act_nudge(struct Act *act) +{ + /* If it's suspended, wake it up. */ + thread_wakeup(&act->suspend_count); + + /* Do a machine-dependent low-level nudge. + If we're on a multiprocessor, + this may mean sending an interprocessor interrupt. + In any case, it means rolling out of two-way-optimized RPC paths. */ + act_machine_nudge(act); +} + +/* Install the special returnhandler that handles suspension and termination, + if it hasn't been installed already. + + Already locked: Act */ +static void install_special_handler(struct Act *act) +{ + ReturnHandler **rh; + + /* The work handler must always be the last ReturnHandler on the list, + because it can do tricky things like detach the act. */ + for (rh = &act->handlers; *rh; rh = &(*rh)->next); + if (rh != &act->special_handler.next) { + *rh = &act->special_handler; + } + + /* Nudge the target activation, + to ensure that it will see the returnhandler we're adding. */ + act_nudge(act); +} + +/* Locking: Act */ +static void special_handler(ReturnHandler *rh, struct Act *cur_act) +{ + retry: + + act_lock(cur_act); + + /* If someone has killed this invocation, + invoke the return path with a terminated exception. */ + if (!cur_act->active) { + act_unlock(cur_act); + act_machine_return(KERN_TERMINATED); + /* XXX should just set the activation's reentry_routine + and then return from special_handler(). + The magic reentry_routine should just pop its own activation + and chain to the reentry_routine of the _lower_ activation. + If that lower activation is the null_act, + the thread will then be terminated. */ + } + + /* If we're suspended, go to sleep and wait for someone to wake us up. */ + if (cur_act->suspend_count) { + act_unlock(cur_act); + /* XXX mp unsafe */ + thread_wait((int)&cur_act->suspend_count, FALSE); + + act_lock(cur_act); + + /* If we're still (or again) suspended, + go to sleep again after executing any new returnhandlers that may have appeared. */ + if (cur_act->suspend_count) + install_special_handler(cur_act); + } + + act_unlock(cur_act); +} + +#if 0 /************************ OLD SEMI-OBSOLETE CODE *********************/ +static __dead void act_throughcall_return(Act *act) +{ + /* Done - destroy the act and return */ + act_detach(act); + act_terminate(act); + act_deallocate(act); + + /* XXX */ + thread_terminate_self(); +} + +__dead void act_throughcall(task_t task, void (*infunc)()) +{ + thread_t thread = current_thread(); + Act *act; + ReturnHandler rh; + int rc; + + rc = act_create(task, 0, 0, 0, &act); + if (rc) return rc; + + act->return_routine = act_throughcall_return; + + thread_lock(thread); + act_lock(act); + + act_attach(thread, act, 0); + + rh.handler = infunc; + rh.next = act->handlers; + act->handlers = &rh; + + act_unlock(act); + thread_unlock(thread); + + /* Call through the act into the returnhandler list */ + act_machine_throughcall(act); +} + + +/* Grab an act from the specified pool, to pass to act_upcall. + Returns with the act locked, since it's in an inconsistent state + (not on its ipt but not on a thread either). + Returns null if no acts are available on the ipt. + + Locking: ipc_target > Act(on ipt) */ +Act *act_grab(struct ipc_target *ipt) +{ + Act *act; + + ipt_lock(ipt); + + retry: + + /* Pull an act off the ipt's list. */ + act = ipt->acts; + if (!act) + goto none_avail; + ipt->acts = act->ipt_next; + + act_lock(act); + + /* If it's been terminated, drop it and get another one. */ + if (!act->active) { +#if 0 + printf("dropping terminated act %08x\n", act); +#endif + /* XXX ipt_deallocate(ipt); */ + act->ipt = 0; + act_unlock(act); + act_deallocate(act); + goto retry; + } + +none_avail: + ipt_unlock(ipt); + + return act; +} + +/* Try to make an upcall with an act on the specified ipt. + If the ipt is empty, returns KERN_RESOURCE_SHORTAGE. XXX??? + + Locking: ipc_target > Act > Thread */ +kern_return_t act_upcall(struct Act *act, unsigned init_alert_mask, + vm_offset_t user_entrypoint, vm_offset_t user_data) +{ + thread_t cur_thread = current_thread(); + int rc; + + /* XXX locking */ + + act_attach(cur_thread, act, init_alert_mask); + + /* Make the upcall into the destination task */ + rc = act_machine_upcall(act, user_entrypoint, user_data); + + /* Done - detach the act and return */ + act_detach(act); + + return rc; +} +#endif /************************ END OF OLD SEMI-OBSOLETE CODE *********************/ + + + + +/*** Act service routines ***/ + +/* Lock this act and its current thread. + We can only find the thread from the act + and the thread must be locked before the act, + requiring a little icky juggling. + + If the thread is not currently on any thread, + returns with only the act locked. + + Note that this routine is not called on any performance-critical path. + It is only for explicit act operations + which don't happen often. + + Locking: Thread > Act */ +static thread_t act_lock_thread(Act *act) +{ + thread_t thread; + + retry: + + /* Find the thread */ + act_lock(act); + thread = act->thread; + if (thread == 0) + { + act_unlock(act); + return 0; + } + thread_reference(thread); + act_unlock(act); + + /* Lock the thread and re-lock the act, + and make sure the thread didn't change. */ + thread_lock(thread); + act_lock(act); + if (act->thread != thread) + { + act_unlock(act); + thread_unlock(thread); + thread_deallocate(thread); + goto retry; + } + + thread_deallocate(thread); + + return thread; +} + +/* Already locked: act->task + Locking: Task > Act */ +kern_return_t act_terminate_task_locked(struct Act *act) +{ + act_lock(act); + + if (act->active) + { + /* Unlink the act from the task's act list, + so it doesn't appear in calls to task_acts and such. + The act still keeps its ref on the task, however, + until it loses all its own references and is freed. */ + act->task_links.next->prev = act->task_links.prev; + act->task_links.prev->next = act->task_links.next; + act->task->act_count--; + + /* Remove it from any ipc_target. XXX is this right? */ + act_set_target(act, 0); + + /* This will allow no more control operations on this act. */ + act->active = 0; + + /* When the special_handler gets executed, + it will see the terminated condition and exit immediately. */ + install_special_handler(act); + + /* Drop the act reference taken for being active. + (There is still at least one reference left: the one we were passed.) */ + act_deallocate(act); + } + + act_unlock(act); + + return KERN_SUCCESS; +} + +/* Locking: Task > Act */ +kern_return_t act_terminate(struct Act *act) +{ + task_t task = act->task; + kern_return_t rc; + + /* act->task never changes, + so we can read it before locking the act. */ + task_lock(act->task); + + rc = act_terminate_task_locked(act); + + task_unlock(act->task); + + return rc; +} + +/* If this Act is on a Thread and is not the topmost, + yank it and everything below it off of the thread's stack + and put it all on a new thread forked from the original one. + May fail due to resource shortage, but can always be retried. + + Locking: Thread > Act */ +kern_return_t act_yank(Act *act) +{ + thread_t thread = act_lock_thread(act); + +#if 0 + printf("act_yank inc %08x thread %08x\n", act, thread); +#endif + if (thread) + { + if (thread->top_act != act) + { + printf("detaching act %08x from thread %08x\n", act, thread); + + /* Nudge the activation into a clean point for detachment. */ + act_nudge(act); + + /* Now detach the activation + and give the orphan its own flow of control. */ + /*XXX*/ + } + + thread_unlock(thread); + } + act_unlock(act); + + /* Ask the thread to return as quickly as possible, + because its results are now useless. */ + act_abort(act); + + return KERN_SUCCESS; +} + +/* Assign an activation to a specific ipc_target. + Fails if the activation is already assigned to another pool. + If ipt == 0, we remove the from its ipt. + + Locking: Act(not on ipt) > ipc_target > Act(on ipt) */ +kern_return_t act_set_target(Act *act, struct ipc_target *ipt) +{ + act_lock(act); + + if (ipt == 0) + { + Act **lact; + + ipt = act->ipt; + if (ipt == 0) + return; + + /* XXX This is a violation of the locking order. */ + ipt_lock(ipt); + for (lact = &ipt->ipt_acts; *lact; lact = &((*lact)->ipt_next)) + if (act == *lact) + { + *lact = act->ipt_next; + break; + } + ipt_unlock(ipt); + + act->ipt = 0; + /* XXX ipt_deallocate(ipt); */ + act_deallocate(act); + return; + } + if (act->ipt != ipt) + { + if (act->ipt != 0) + { + act_unlock(act); + return KERN_FAILURE; /*XXX*/ + } + act->ipt = ipt; + ipt->ipt_type |= IPT_TYPE_MIGRATE_RPC; + + /* They get references to each other. */ + act_reference(act); + ipt_reference(ipt); + + /* If it is available, + add it to the ipt's available-activation list. */ + if ((act->thread == 0) && (act->suspend_count == 0)) + { + ipt_lock(ipt); + act->ipt_next = ipt->ipt_acts; + act->ipt->ipt_acts = act; + ipt_unlock(ipt); + } + } + act_unlock(act); + + return KERN_SUCCESS; +} + +/* Register an alert from this activation. + Each set bit is propagated upward from (but not including) this activation, + until the top of the chain is reached or the bit is masked. + + Locking: Thread > Act */ +kern_return_t act_alert(struct Act *act, unsigned alerts) +{ + thread_t thread = act_lock_thread(act); + +#if 0 + printf("act_alert %08x: %08x\n", act, alerts); +#endif + if (thread) + { + struct Act *act_up = act; + while ((alerts) && (act_up != thread->top_act)) + { + act_up = act_up->higher; + alerts &= act_up->alert_mask; + act_up->alerts |= alerts; + } + + /* XXX If we reach the top, and it is blocked in glue code, do something. */ + + thread_unlock(thread); + } + act_unlock(act); + + return KERN_SUCCESS; +} + +/* Locking: Thread > Act */ +kern_return_t act_abort(struct Act *act) +{ + return act_alert(act, ALERT_ABORT_STRONG); +} + +/* Locking: Thread > Act */ +kern_return_t act_abort_safely(struct Act *act) +{ + return act_alert(act, ALERT_ABORT_SAFE); +} + +/* Locking: Thread > Act */ +kern_return_t act_alert_mask(struct Act *act, unsigned alert_mask) +{ + panic("act_alert_mask\n"); + return KERN_SUCCESS; +} + +/* Locking: Thread > Act */ +kern_return_t act_suspend(struct Act *act) +{ + thread_t thread = act_lock_thread(act); + kern_return_t rc = KERN_SUCCESS; + +#if 0 + printf("act_suspend %08x\n", act); +#endif + if (act->active) + { + if (act->suspend_count++ == 0) + { + /* XXX remove from ipt */ + install_special_handler(act); + act_nudge(act); + } + } + else + rc = KERN_TERMINATED; + + if (thread) + thread_unlock(thread); + act_unlock(act); + + return rc; +} + +/* Locking: Act */ +kern_return_t act_resume(struct Act *act) +{ +#if 0 + printf("act_resume %08x from %d\n", act, act->suspend_count); +#endif + + act_lock(act); + if (!act->active) + { + act_unlock(act); + return KERN_TERMINATED; + } + + if (act->suspend_count > 0) { + if (--act->suspend_count == 0) { + thread_wakeup(&act->suspend_count); + /* XXX return to ipt */ + } + } + + act_unlock(act); + + return KERN_SUCCESS; +} + +typedef struct GetSetState { + struct ReturnHandler rh; + int flavor; + void *state; + int *pcount; + int result; +} GetSetState; + +/* Locking: Thread */ +kern_return_t get_set_state(struct Act *act, int flavor, void *state, int *pcount, + void (*handler)(ReturnHandler *rh, struct Act *act)) +{ + GetSetState gss; + + /* Initialize a small parameter structure */ + gss.rh.handler = handler; + gss.flavor = flavor; + gss.state = state; + gss.pcount = pcount; + + /* Add it to the act's return handler list */ + act_lock(act); + gss.rh.next = act->handlers; + act->handlers = &gss.rh; + + act_nudge(act); + + act_unlock(act); + /* XXX mp unsafe */ + thread_wait((int)&gss, 0); /* XXX could be interruptible */ + + return gss.result; +} + +static void get_state_handler(ReturnHandler *rh, struct Act *act) +{ + GetSetState *gss = (GetSetState*)rh; + + gss->result = act_machine_get_state(act, gss->flavor, gss->state, gss->pcount); + thread_wakeup((int)gss); +} + +/* Locking: Thread */ +kern_return_t act_get_state(struct Act *act, int flavor, natural_t *state, natural_t *pcount) +{ + return get_set_state(act, flavor, state, pcount, get_state_handler); +} + +static void set_state_handler(ReturnHandler *rh, struct Act *act) +{ + GetSetState *gss = (GetSetState*)rh; + + gss->result = act_machine_set_state(act, gss->flavor, gss->state, *gss->pcount); + thread_wakeup((int)gss); +} + +/* Locking: Thread */ +kern_return_t act_set_state(struct Act *act, int flavor, natural_t *state, natural_t count) +{ + return get_set_state(act, flavor, state, &count, set_state_handler); +} + + + +/*** backward compatibility hacks ***/ + +#include +#include +#include + +kern_return_t act_thread_info(Act *act, int flavor, + thread_info_t thread_info_out, unsigned *thread_info_count) +{ + return thread_info(act->thread, flavor, thread_info_out, thread_info_count); +} + +kern_return_t +act_thread_assign(Act *act, processor_set_t new_pset) +{ + return thread_assign(act->thread, new_pset); +} + +kern_return_t +act_thread_assign_default(Act *act) +{ + return thread_assign_default(act->thread); +} + +kern_return_t +act_thread_get_assignment(Act *act, processor_set_t *pset) +{ + return thread_get_assignment(act->thread, pset); +} + +kern_return_t +act_thread_priority(Act *act, int priority, boolean_t set_max) +{ + return thread_priority(act->thread, priority, set_max); +} + +kern_return_t +act_thread_max_priority(Act *act, processor_set_t *pset, int max_priority) +{ + return thread_max_priority(act->thread, pset, max_priority); +} + +kern_return_t +act_thread_policy(Act *act, int policy, int data) +{ + return thread_policy(act->thread, policy, data); +} + +kern_return_t +act_thread_wire(struct host *host, Act *act, boolean_t wired) +{ + return thread_wire(host, act->thread, wired); +} + +kern_return_t +act_thread_depress_abort(Act *act) +{ + return thread_depress_abort(act->thread); +} + +/* + * Routine: act_get_special_port [kernel call] + * Purpose: + * Clones a send right for one of the thread's + * special ports. + * Conditions: + * Nothing locked. + * Returns: + * KERN_SUCCESS Extracted a send right. + * KERN_INVALID_ARGUMENT The thread is null. + * KERN_FAILURE The thread is dead. + * KERN_INVALID_ARGUMENT Invalid special port. + */ + +kern_return_t +act_get_special_port(Act *act, int which, ipc_port_t *portp) +{ + ipc_port_t *whichp; + ipc_port_t port; + +#if 0 + printf("act_get_special_port\n"); +#endif + if (act == 0) + return KERN_INVALID_ARGUMENT; + + switch (which) { + case THREAD_KERNEL_PORT: + whichp = &act->self_port; + break; + + case THREAD_EXCEPTION_PORT: + whichp = &act->exception_port; + break; + + default: + return KERN_INVALID_ARGUMENT; + } + + thread_lock(act->thread); + + if (act->self_port == IP_NULL) { + thread_unlock(act->thread); + return KERN_FAILURE; + } + + port = ipc_port_copy_send(*whichp); + thread_unlock(act->thread); + + *portp = port; + return KERN_SUCCESS; +} + +/* + * Routine: act_set_special_port [kernel call] + * Purpose: + * Changes one of the thread's special ports, + * setting it to the supplied send right. + * Conditions: + * Nothing locked. If successful, consumes + * the supplied send right. + * Returns: + * KERN_SUCCESS Changed the special port. + * KERN_INVALID_ARGUMENT The thread is null. + * KERN_FAILURE The thread is dead. + * KERN_INVALID_ARGUMENT Invalid special port. + */ + +kern_return_t +act_set_special_port(Act *act, int which, ipc_port_t port) +{ + ipc_port_t *whichp; + ipc_port_t old; + +#if 0 + printf("act_set_special_port\n"); +#endif + if (act == 0) + return KERN_INVALID_ARGUMENT; + + switch (which) { + case THREAD_KERNEL_PORT: + whichp = &act->self_port; + break; + + case THREAD_EXCEPTION_PORT: + whichp = &act->exception_port; + break; + + default: + return KERN_INVALID_ARGUMENT; + } + + thread_lock(act->thread); + if (act->self_port == IP_NULL) { + thread_unlock(act->thread); + return KERN_FAILURE; + } + + old = *whichp; + *whichp = port; + thread_unlock(act->thread); + + if (IP_VALID(old)) + ipc_port_release_send(old); + return KERN_SUCCESS; +} + +/* + * XXX lame, non-blocking ways to get/set state. + * Return thread's machine-dependent state. + */ +kern_return_t +act_get_state_immediate( + Act *act, + int flavor, + void *old_state, /* pointer to OUT array */ + unsigned int *old_state_count) /*IN/OUT*/ +{ + kern_return_t ret; + + act_lock(act); + /* not the top activation, return current state */ + if (act->thread && act->thread->top_act != act) { + ret = act_machine_get_state(act, flavor, + old_state, old_state_count); + act_unlock(act); + return ret; + } + act_unlock(act); + + /* not sure this makes sense */ + return act_get_state(act, flavor, old_state, old_state_count); +} + +/* + * Change thread's machine-dependent state. + */ +kern_return_t +act_set_state_immediate( + Act *act, + int flavor, + void *new_state, + unsigned int new_state_count) +{ + kern_return_t ret; + + act_lock(act); + /* not the top activation, set it now */ + if (act->thread && act->thread->top_act != act) { + ret = act_machine_set_state(act, flavor, + new_state, new_state_count); + act_unlock(act); + return ret; + } + act_unlock(act); + + /* not sure this makes sense */ + return act_set_state(act, flavor, new_state, new_state_count); +} + +void act_count(void) +{ + int i; + Act *act; + static int amin = ACT_STATIC_KLUDGE; + + i = 0; + for (act = act_freelist; act; act = act->ipt_next) + i++; + if (i < amin) + amin = i; + printf("%d of %d activations in use, %d max\n", + ACT_STATIC_KLUDGE-i, ACT_STATIC_KLUDGE, ACT_STATIC_KLUDGE-amin); +} + +void dump_act(act) + Act *act; +{ + act_count(); + kact_count(); + while (act) { + printf("%08.8x: thread=%x, task=%x, hi=%x, lo=%x, ref=%x\n", + act, act->thread, act->task, + act->higher, act->lower, act->ref_count); + printf("\talerts=%x, mask=%x, susp=%x, active=%x\n", + act->alerts, act->alert_mask, + act->suspend_count, act->active); + machine_dump_act(&act->mact); + if (act == act->lower) + break; + act = act->lower; + } +} + +#ifdef ACTWATCH +Act * +get_next_act(int sp) +{ + static int i; + Act *act; + + while (1) { + if (i == ACT_STATIC_KLUDGE) { + i = 0; + return 0; + } + act = &free_acts[i]; + i++; + if (act->mact.space == sp) + return act; + } +} +#endif /* ACTWATCH */ + +#endif /* MIGRATING_THREADS */ diff --git a/kern/act.h b/kern/act.h new file mode 100644 index 0000000..f46f53a --- /dev/null +++ b/kern/act.h @@ -0,0 +1,192 @@ +/* + * Copyright (c) 1993,1994 The University of Utah and + * the Computer Systems Laboratory (CSL). All rights reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS + * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF + * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * CSL requests users of this software to return to csl-dist@cs.utah.edu any + * improvements that they make and grant CSL redistribution rights. + * + * Author: Bryan Ford, University of Utah CSL + */ +/* + * File: act.h + * + * This defines the Act structure, + * which is the kernel representation of a user-space activation. + * + */ + +#ifndef _KERN_ACT_H_ +#define _KERN_ACT_H_ + +#ifdef MIGRATING_THREADS + +#ifndef __dead /* XXX */ +#define __dead +#endif + +#include +#include +#include +#include +#include + +struct task; +struct thread; +struct Act; + + +struct ReturnHandler { + struct ReturnHandler *next; + void (*handler)(struct ReturnHandler *rh, struct Act *act); +}; +typedef struct ReturnHandler ReturnHandler; + + + +struct Act { + + /*** Task linkage ***/ + + /* Links for task's circular list of activations. + The activation is only on the task's activation list while active. + Must be first. */ + queue_chain_t task_links; + + /* Reference to the task this activation is in. + This is constant as long as the activation is allocated. */ + struct task *task; + + + + /*** Machine-dependent state ***/ + /* XXX should be first to allow maximum flexibility to MD code */ + MachineAct mact; + + + + /*** Consistency ***/ + RefCount ref_count; + decl_simple_lock_data(,lock) + + + + /*** ipc_target-related stuff ***/ + + /* ActPool this activation normally lives on, zero if none. + The activation and actpool hold references to each other as long as this is nonzero + (even when the activation isn't actually on the actpool's list). */ + struct ipc_target *ipt; + + /* Link on the ipt's list of activations. + The activation is only actually on the ipt's list (and hence this is valid) + when we're not in use (thread == 0) and not suspended (suspend_count == 0). */ + struct Act *ipt_next; + + + + /*** Thread linkage ***/ + + /* Thread this activation is in, zero if not in use. + The thread holds a reference on the activation while this is nonzero. */ + struct thread *thread; + + /* The rest in this section is only valid when thread is nonzero. */ + + /* Next higher and next lower activation on the thread's activation stack. + For a topmost activation or the null_act, higher is undefined. + The bottommost activation is always the null_act. */ + struct Act *higher, *lower; + + /* Alert bits pending at this activation; + some of them may have propagated from lower activations. */ + unsigned alerts; + + /* Mask of alert bits to be allowed to pass through from lower levels. */ + unsigned alert_mask; + + + + /*** Control information ***/ + + /* Number of outstanding suspensions on this activation. */ + int suspend_count; + + /* This is normally true, but is set to false when the activation is terminated. */ + int active; + + /* Chain of return handlers to be called + before the thread is allowed to return to this invocation */ + ReturnHandler *handlers; + + /* A special ReturnHandler attached to the above chain to handle suspension and such */ + ReturnHandler special_handler; + + + + /* Special ports attached to this activation */ + struct ipc_port *self; /* not a right, doesn't hold ref */ + struct ipc_port *self_port; /* a send right */ + struct ipc_port *exception_port; /* a send right */ + struct ipc_port *syscall_port; /* a send right */ +}; +typedef struct Act Act; +typedef struct Act *act_t; +typedef mach_port_t *act_array_t; + +#define ACT_NULL ((Act*)0) + + +/* Exported to world */ +kern_return_t act_create(struct task *task, vm_offset_t user_stack, vm_offset_t user_rbuf, vm_size_t user_rbuf_size, struct Act **new_act); +kern_return_t act_alert_mask(struct Act *act, unsigned alert_mask); +kern_return_t act_alert(struct Act *act, unsigned alerts); +kern_return_t act_abort(struct Act *act); +kern_return_t act_abort_safely(struct Act *act); +kern_return_t act_terminate(struct Act *act); +kern_return_t act_suspend(struct Act *act); +kern_return_t act_resume(struct Act *act); +kern_return_t act_get_state(struct Act *act, int flavor, + natural_t *state, natural_t *pcount); +kern_return_t act_set_state(struct Act *act, int flavor, + natural_t *state, natural_t count); + +#define act_lock(act) simple_lock(&(act)->lock) +#define act_unlock(act) simple_unlock(&(act)->lock) + +#define act_reference(act) refcount_take(&(act)->ref_count) +void act_deallocate(struct Act *act); + +/* Exported to startup.c */ +void act_init(void); + +/* Exported to task.c */ +kern_return_t act_terminate_task_locked(struct Act *act); + +/* Exported to thread.c */ +extern Act null_act; + +/* Exported to machine-dependent activation code */ +void act_execute_returnhandlers(void); + + + +/* System-dependent functions */ +kern_return_t act_machine_create(struct task *task, Act *inc, vm_offset_t user_stack, vm_offset_t user_rbuf, vm_size_t user_rbuf_size); +void act_machine_destroy(Act *inc); +kern_return_t act_machine_set_state(Act *inc, int flavor, int *tstate, unsigned count); +kern_return_t act_machine_get_state(Act *inc, int flavor, int *tstate, unsigned *count); + + + +#endif /* MIGRATING_THREADS */ +#endif /* _KERN_ACT_H_ */ diff --git a/kern/assert.h b/kern/assert.h new file mode 100644 index 0000000..fed2a20 --- /dev/null +++ b/kern/assert.h @@ -0,0 +1,54 @@ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +#ifndef _KERN_ASSERT_H_ +#define _KERN_ASSERT_H_ + +/* assert.h 4.2 85/01/21 */ + +#include + +#ifndef NDEBUG +#define MACH_ASSERT 1 +#endif + +#if MACH_ASSERT +extern void Assert(const char *exp, const char *filename, int line, + const char *fun) __attribute__ ((noreturn)); + +#define assert(ex) \ + (likely(ex) \ + ? (void) (0) \ + : Assert (#ex, __FILE__, __LINE__, __FUNCTION__)) + +#define assert_static(x) assert(x) + +#else /* MACH_ASSERT */ +#define assert(ex) +#define assert_static(ex) +#endif /* MACH_ASSERT */ + +#endif /* _KERN_ASSERT_H_ */ diff --git a/kern/ast.c b/kern/ast.c new file mode 100644 index 0000000..8c514b3 --- /dev/null +++ b/kern/ast.c @@ -0,0 +1,235 @@ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University. + * Copyright (c) 1993,1994 The University of Utah and + * the Computer Systems Laboratory (CSL). + * All rights reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF + * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY + * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF + * THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + * + * This file contains routines to check whether an ast is needed. + * + * ast_check() - check whether ast is needed for interrupt or context + * switch. Usually called by clock interrupt handler. + * + */ + +#include +#include +#include +#include "cpu_number.h" +#include +#include +#include +#include +#include +#include + +#include /* for splsched */ + +#if MACH_FIXPRI +#include +#endif /* MACH_FIXPRI */ + + +volatile ast_t need_ast[NCPUS]; + +void +ast_init(void) +{ +#ifndef MACHINE_AST + int i; + + for (i=0; iidle_thread) { +#ifndef MIGRATING_THREADS + while (thread_should_halt(self)) + thread_halt_self(thread_exception_return); +#endif + + /* + * One of the previous actions might well have + * woken a high-priority thread, so we use + * csw_needed in addition to AST_BLOCK. + */ + + if ((reasons & AST_BLOCK) || + csw_needed(self, current_processor())) { + counter(c_ast_taken_block++); + thread_block(thread_exception_return); + } + } +} + +void +ast_check(void) +{ + int mycpu = cpu_number(); + processor_t myprocessor; + thread_t thread = current_thread(); + run_queue_t rq; + spl_t s = splsched(); + + /* + * Check processor state for ast conditions. + */ + myprocessor = cpu_to_processor(mycpu); + switch(myprocessor->state) { + case PROCESSOR_OFF_LINE: + case PROCESSOR_IDLE: + case PROCESSOR_DISPATCHING: + /* + * No ast. + */ + break; + +#if NCPUS > 1 + case PROCESSOR_ASSIGN: + case PROCESSOR_SHUTDOWN: + /* + * Need ast to force action thread onto processor. + * + * XXX Should check if action thread is already there. + */ + ast_on(mycpu, AST_BLOCK); + break; +#endif /* NCPUS > 1 */ + + case PROCESSOR_RUNNING: + + /* + * Propagate thread ast to processor. If we already + * need an ast, don't look for more reasons. + */ + ast_propagate(thread, mycpu); + if (ast_needed(mycpu)) + break; + + /* + * Context switch check. The csw_needed macro isn't + * used here because the rq->low hint may be wrong, + * and fixing it here avoids an extra ast. + * First check the easy cases. + */ + if (thread->state & TH_SUSP || myprocessor->runq.count > 0) { + ast_on(mycpu, AST_BLOCK); + break; + } + + /* + * Update lazy evaluated runq->low if only timesharing. + */ +#if MACH_FIXPRI + if (myprocessor->processor_set->policies & POLICY_FIXEDPRI) { + if (csw_needed(thread,myprocessor)) { + ast_on(mycpu, AST_BLOCK); + break; + } + else { + /* + * For fixed priority threads, set first_quantum + * so entire new quantum is used. + */ + if (thread->policy == POLICY_FIXEDPRI) + myprocessor->first_quantum = TRUE; + } + } + else { +#endif /* MACH_FIXPRI */ + rq = &(myprocessor->processor_set->runq); + if (!(myprocessor->first_quantum) && (rq->count > 0)) { + queue_t q; + /* + * This is not the first quantum, and there may + * be something in the processor_set runq. + * Check whether low hint is accurate. + */ + q = rq->runq + *(volatile int *)&rq->low; + if (queue_empty(q)) { + int i; + + /* + * Need to recheck and possibly update hint. + */ + runq_lock(rq); + q = rq->runq + rq->low; + if (rq->count > 0) { + for (i = rq->low; i < NRQS; i++) { + if(!(queue_empty(q))) + break; + q++; + } + rq->low = i; + } + runq_unlock(rq); + } + + if (rq->low <= thread->sched_pri) { + ast_on(mycpu, AST_BLOCK); + break; + } + } +#if MACH_FIXPRI + } +#endif /* MACH_FIXPRI */ + break; + + default: + panic("ast_check: Bad processor state (cpu %d processor %p) state: %d", + mycpu, myprocessor, myprocessor->state); + } + + (void) splx(s); +} diff --git a/kern/ast.h b/kern/ast.h new file mode 100644 index 0000000..aded167 --- /dev/null +++ b/kern/ast.h @@ -0,0 +1,139 @@ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University. + * Copyright (c) 1993,1994 The University of Utah and + * the Computer Systems Laboratory (CSL). + * All rights reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF + * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY + * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF + * THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + * kern/ast.h: Definitions for Asynchronous System Traps. + */ + +#ifndef _KERN_AST_H_ +#define _KERN_AST_H_ + +/* + * A CPU takes an AST when it is about to return to user code. + * Instead of going back to user code, it calls ast_taken. + * Machine-dependent code is responsible for maintaining + * a set of reasons for an AST, and passing this set to ast_taken. + */ + +#include +#include +#include + +/* + * Bits for reasons + */ + +#define AST_ZILCH 0x0 +#define AST_HALT 0x1 +#define AST_TERMINATE 0x2 +#define AST_BLOCK 0x4 +#define AST_NETWORK 0x8 +#define AST_NETIPC 0x10 + +#define AST_SCHEDULING (AST_HALT|AST_TERMINATE|AST_BLOCK) + +/* + * Per-thread ASTs are reset at context-switch time. + * machine/ast.h can define MACHINE_AST_PER_THREAD. + */ + +#ifndef MACHINE_AST_PER_THREAD +#define MACHINE_AST_PER_THREAD 0 +#endif + +#define AST_PER_THREAD (AST_HALT | AST_TERMINATE | MACHINE_AST_PER_THREAD) + +typedef unsigned long ast_t; + +extern volatile ast_t need_ast[NCPUS]; + +#ifdef MACHINE_AST +/* + * machine/ast.h is responsible for defining aston and astoff. + */ +#else /* MACHINE_AST */ + +#define aston(mycpu) +#define astoff(mycpu) + +#endif /* MACHINE_AST */ + +extern void ast_taken(void); + +/* + * ast_needed, ast_on, ast_off, ast_context, and ast_propagate + * assume splsched. mycpu is always cpu_number(). It is an + * argument in case cpu_number() is expensive. + */ + +#define ast_needed(mycpu) need_ast[mycpu] + +#define ast_on(mycpu, reasons) \ +MACRO_BEGIN \ + if ((need_ast[mycpu] |= (reasons)) != AST_ZILCH) \ + { aston(mycpu); } \ +MACRO_END + +#define ast_off(mycpu, reasons) \ +MACRO_BEGIN \ + if ((need_ast[mycpu] &= ~(reasons)) == AST_ZILCH) \ + { astoff(mycpu); } \ +MACRO_END + +#define ast_propagate(thread, mycpu) ast_on((mycpu), (thread)->ast) + +#define ast_context(thread, mycpu) \ +MACRO_BEGIN \ + if ((need_ast[mycpu] = \ + (need_ast[mycpu] &~ AST_PER_THREAD) | (thread)->ast) \ + != AST_ZILCH) \ + { aston(mycpu); } \ + else \ + { astoff(mycpu); } \ +MACRO_END + + +#define thread_ast_set(thread, reason) (thread)->ast |= (reason) +#define thread_ast_clear(thread, reason) (thread)->ast &= ~(reason) +#define thread_ast_clear_all(thread) (thread)->ast = AST_ZILCH + +/* + * NOTE: if thread is the current thread, thread_ast_set should + * be followed by ast_propagate(). + */ + +extern void ast_init (void); + +extern void ast_check (void); + +#if NCPUS > 1 +extern void init_ast_check(const processor_t processor); +extern void cause_ast_check(const processor_t processor); +#endif + +#endif /* _KERN_AST_H_ */ diff --git a/kern/atomic.h b/kern/atomic.h new file mode 100644 index 0000000..00da164 --- /dev/null +++ b/kern/atomic.h @@ -0,0 +1,54 @@ +/* Copyright (C) 2017 Free Software Foundation, Inc. + Contributed by Agustina Arzille , 2017. + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License + as published by the Free Software Foundation; either + version 2 of the license, or (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public + License along with this program; if not, see + . +*/ + +#ifndef _KERN_ATOMIC_H_ +#define _KERN_ATOMIC_H_ 1 + +/* Atomically compare *PTR with EXP and set it to NVAL if they're equal. + * Evaluates to a boolean, indicating whether the comparison was successful.*/ +#define __atomic_cas_helper(ptr, exp, nval, mo) \ + ({ \ + typeof(exp) __e = (exp); \ + __atomic_compare_exchange_n ((ptr), &__e, (nval), 0, \ + __ATOMIC_##mo, __ATOMIC_RELAXED); \ + }) + +#define atomic_cas_acq(ptr, exp, nval) \ + __atomic_cas_helper (ptr, exp, nval, ACQUIRE) + +#define atomic_cas_rel(ptr, exp, nval) \ + __atomic_cas_helper (ptr, exp, nval, RELEASE) + +#define atomic_cas_seq(ptr, exp, nval) \ + __atomic_cas_helper (ptr, exp, nval, SEQ_CST) + +/* Atomically exchange the value of *PTR with VAL, evaluating to + * its previous value. */ +#define __atomic_swap_helper(ptr, val, mo) \ + __atomic_exchange_n ((ptr), (val), __ATOMIC_##mo) + +#define atomic_swap_acq(ptr, val) \ + __atomic_swap_helper (ptr, val, ACQUIRE) + +#define atomic_swap_rel(ptr, val) \ + __atomic_swap_helper (ptr, val, RELEASE) + +#define atomic_swap_seq(ptr, val) \ + __atomic_swap_helper (ptr, val, SEQ_CST) + +#endif diff --git a/kern/boot_script.c b/kern/boot_script.c new file mode 100644 index 0000000..07ce4b3 --- /dev/null +++ b/kern/boot_script.c @@ -0,0 +1,791 @@ +/* Boot script parser for Mach. */ + +/* Written by Shantanu Goel (goel@cs.columbia.edu). */ + +#include +#include +#include +#include "boot_script.h" +#include "bootstrap.h" + + +/* This structure describes a symbol. */ +struct sym +{ + /* Symbol name. */ + const char *name; + + /* Type of value returned by function. */ + int type; + + /* Symbol value. */ + long val; + + /* For function symbols; type of value returned by function. */ + int ret_type; + + /* For function symbols; if set, execute function at the time + of command execution, not during parsing. A function with + this field set must also have `no_arg' set. Also, the function's + `val' argument will always be NULL. */ + int run_on_exec; +}; + +/* Additional values symbols can take. + These are only used internally. */ +#define VAL_SYM 10 /* symbol table entry */ +#define VAL_FUNC 11 /* function pointer */ + +/* This structure describes an argument. */ +struct arg +{ + /* Argument text copied verbatim. 0 if none. */ + char *text; + + /* Type of value assigned. 0 if none. */ + int type; + + /* Argument value. */ + long val; +}; + +/* List of commands. */ +static struct cmd **cmds = 0; + +/* Amount allocated for `cmds'. */ +static int cmds_alloc = 0; + +/* Next available slot in `cmds'. */ +static int cmds_index = 0; + +/* Symbol table. */ +static struct sym **symtab = 0; + +/* Amount allocated for `symtab'. */ +static int symtab_alloc = 0; + +/* Next available slot in `symtab'. */ +static int symtab_index = 0; + +/* Create a task and suspend it. */ +static int +create_task (struct cmd *cmd, long *val) +{ + int err = boot_script_task_create (cmd); + *val = (long) cmd->task; + return err; +} + +/* Resume a task. */ +static int +resume_task (struct cmd *cmd, const long *val) +{ + return boot_script_task_resume (cmd); +} + +/* Resume a task when the user hits return. */ +static int +prompt_resume_task (struct cmd *cmd, const long *val) +{ + return boot_script_prompt_task_resume (cmd); +} + +/* List of builtin symbols. */ +static struct sym builtin_symbols[] = +{ + { "task-create", VAL_FUNC, (long) create_task, VAL_TASK, 0 }, + { "task-resume", VAL_FUNC, (long) resume_task, VAL_NONE, 1 }, + { "prompt-task-resume", VAL_FUNC, (long) prompt_resume_task, VAL_NONE, 1 }, +}; +#define NUM_BUILTIN (sizeof (builtin_symbols) / sizeof (builtin_symbols[0])) + +/* Free CMD and all storage associated with it. + If ABORTING is set, terminate the task associated with CMD, + otherwise just deallocate the send right. */ +static void +free_cmd (struct cmd *cmd, int aborting) +{ + if (cmd->task) + boot_script_free_task (cmd->task, aborting); + if (cmd->args) + { + int i; + for (i = 0; i < cmd->args_index; i++) + boot_script_free (cmd->args[i], sizeof *cmd->args[i]); + boot_script_free (cmd->args, sizeof cmd->args[0] * cmd->args_alloc); + } + if (cmd->exec_funcs) + boot_script_free (cmd->exec_funcs, + sizeof cmd->exec_funcs[0] * cmd->exec_funcs_alloc); + boot_script_free (cmd, sizeof *cmd); +} + +/* Free all storage allocated by the parser. + If ABORTING is set, terminate all tasks. */ +static void +cleanup (int aborting) +{ + int i; + + for (i = 0; i < cmds_index; i++) + free_cmd (cmds[i], aborting); + boot_script_free (cmds, sizeof cmds[0] * cmds_alloc); + cmds = 0; + cmds_index = cmds_alloc = 0; + + for (i = 0; i < symtab_index; i++) + boot_script_free (symtab[i], sizeof *symtab[i]); + boot_script_free (symtab, sizeof symtab[0] * symtab_alloc); + symtab = 0; + symtab_index = symtab_alloc = 0; +} + +/* Add PTR to the list of pointers PTR_LIST, which + currently has ALLOC amount of space allocated to it, and + whose next available slot is INDEX. If more space + needs to to allocated, INCR is the amount by which + to increase it. Return 0 on success, non-zero otherwise. */ +static int +add_list (void *ptr, void ***ptr_list, int *alloc, int *index, int incr) +{ + if (*index == *alloc) + { + void **p; + + *alloc += incr; + p = boot_script_malloc (*alloc * sizeof (void *)); + if (! p) + { + *alloc -= incr; + return 1; + } + if (*ptr_list) + { + memcpy (p, *ptr_list, *index * sizeof (void *)); + boot_script_free (*ptr_list, (*alloc - incr) * sizeof (void *)); + } + *ptr_list = p; + } + *(*ptr_list + *index) = ptr; + *index += 1; + return 0; +} + +/* Create an argument with TEXT, value type TYPE, and value VAL. + Add the argument to the argument list of CMD. */ +static struct arg * +add_arg (struct cmd *cmd, char *text, int type, long val) +{ + struct arg *arg; + + arg = boot_script_malloc (sizeof (struct arg)); + if (arg) + { + arg->text = text; + arg->type = type; + arg->val = val; + if (add_list (arg, (void ***) &cmd->args, + &cmd->args_alloc, &cmd->args_index, 5)) + { + boot_script_free (arg, sizeof *arg); + return 0; + } + } + return arg; +} + +/* Search for the symbol NAME in the symbol table. */ +static struct sym * +sym_lookup (const char *name) +{ + int i; + + for (i = 0; i < symtab_index; i++) + if (! strcmp (name, symtab[i]->name)) + return symtab[i]; + return 0; +} + +/* Create an entry for symbol NAME in the symbol table. */ +static struct sym * +sym_enter (const char *name) +{ + struct sym *sym; + + sym = boot_script_malloc (sizeof (struct sym)); + if (sym) + { + memset (sym, 0, sizeof (struct sym)); + sym->name = name; + if (add_list (sym, (void ***) &symtab, &symtab_alloc, &symtab_index, 20)) + { + boot_script_free (sym, sizeof *sym); + return 0; + } + } + return sym; +} + +/* Parse the command line CMDLINE. */ +int +boot_script_parse_line (void *hook, char *cmdline) +{ + char *p, *q; + int error; + struct cmd *cmd; + struct arg *arg; + + /* Extract command name. Ignore line if it lacks a command. */ + for (p = cmdline; *p == ' ' || *p == '\t'; p++) + ; + if (*p == '#') + /* Ignore comment line. */ + return 0; + +#if 0 + if (*p && *p != ' ' && *p != '\t' && *p != '\n') + { + printf ("(bootstrap): %s\n", cmdline); + } +#endif + + for (q = p; *q && *q != ' ' && *q != '\t' && *q != '\n'; q++) + ; + if (p == q) + return 0; + + *q = '\0'; + + /* Allocate a command structure. */ + cmd = boot_script_malloc (sizeof (struct cmd)); + if (! cmd) + return BOOT_SCRIPT_NOMEM; + memset (cmd, 0, sizeof (struct cmd)); + cmd->hook = hook; + cmd->path = p; + p = q + 1; + + for (arg = 0;;) + { + if (! arg) + { + /* Skip whitespace. */ + while (*p == ' ' || *p == '\t') + p++; + + /* End of command line. */ + if (! *p || *p == '\n') + { + /* Add command to list. */ + if (add_list (cmd, (void ***) &cmds, + &cmds_alloc, &cmds_index, 10)) + { + error = BOOT_SCRIPT_NOMEM; + goto bad; + } + return 0; + } + } + + /* Look for a symbol. */ + if (arg || (*p == '$' && (*(p + 1) == '{' || *(p + 1) == '('))) + { + char end_char = (*(p + 1) == '{') ? '}' : ')'; + struct sym *sym = 0; + + for (p += 2;;) + { + char c; + unsigned i; + int type; + long val; + struct sym *s; + + /* Parse symbol name. */ + for (q = p; *q && *q != '\n' && *q != end_char && *q != '='; q++) + ; + if (p == q || ! *q || *q == '\n' + || (end_char == '}' && *q != '}')) + { + error = BOOT_SCRIPT_SYNTAX_ERROR; + goto bad; + } + c = *q; + *q = '\0'; + + /* See if this is a builtin symbol. */ + for (i = 0; i < NUM_BUILTIN; i++) + if (! strcmp (p, builtin_symbols[i].name)) + break; + + if (i < NUM_BUILTIN) + s = &builtin_symbols[i]; + else + { + /* Look up symbol in symbol table. + If no entry exists, create one. */ + s = sym_lookup (p); + if (! s) + { + s = sym_enter (p); + if (! s) + { + error = BOOT_SCRIPT_NOMEM; + goto bad; + } + } + } + + /* Only values are allowed in ${...} constructs. */ + if (end_char == '}' && s->type == VAL_FUNC) + return BOOT_SCRIPT_INVALID_SYM; + + /* Check that assignment is valid. */ + if (c == '=' && s->type == VAL_FUNC) + { + error = BOOT_SCRIPT_INVALID_ASG; + goto bad; + } + + /* For function symbols, execute the function. */ + if (s->type == VAL_FUNC) + { + if (! s->run_on_exec) + { + (error + = ((*((int (*) (struct cmd *, long *)) s->val)) + (cmd, &val))); + if (error) + goto bad; + type = s->ret_type; + } + else + { + if (add_list (s, (void ***) &cmd->exec_funcs, + &cmd->exec_funcs_alloc, + &cmd->exec_funcs_index, 5)) + { + error = BOOT_SCRIPT_NOMEM; + goto bad; + } + type = VAL_NONE; + goto out; + } + } + else if (s->type == VAL_NONE) + { + type = VAL_SYM; + val = (long) s; + } + else + { + type = s->type; + val = s->val; + } + + if (sym) + { + sym->type = type; + sym->val = val; + } + else if (arg) + { + arg->type = type; + arg->val = val; + } + + out: + p = q + 1; + if (c == end_char) + { + /* Create an argument if necessary. + We create an argument if the symbol appears + in the expression by itself. + + NOTE: This is temporary till the boot filesystem + servers support arguments. When that happens, + symbol values will only be printed if they're + associated with an argument. */ + if (! arg && end_char == '}') + { + if (! add_arg (cmd, 0, type, val)) + { + error = BOOT_SCRIPT_NOMEM; + goto bad; + } + } + arg = 0; + break; + } + if (s->type != VAL_FUNC) + sym = s; + } + } + else + { + char c; + + /* Command argument; just copy the text. */ + for (q = p;; q++) + { + if (! *q || *q == ' ' || *q == '\t' || *q == '\n') + break; + if (*q == '$' && *(q + 1) == '{') + break; + } + c = *q; + *q = '\0'; + + /* Add argument to list. */ + arg = add_arg (cmd, p, VAL_NONE, 0); + if (! arg) + { + error = BOOT_SCRIPT_NOMEM; + goto bad; + } + if (c == '$') + p = q; + else + { + if (c) + p = q + 1; + else + p = q; + arg = 0; + } + } + } + + + bad: + free_cmd (cmd, 1); + cleanup (1); + return error; +} + +/* Ensure that the command line buffer can accommodate LEN bytes of space. */ +#define CHECK_CMDLINE_LEN(len) \ +{ \ + if (cmdline_alloc - cmdline_index < len) \ + { \ + char *ptr; \ + int alloc, i; \ + alloc = cmdline_alloc + len - (cmdline_alloc - cmdline_index) + 100; \ + ptr = boot_script_malloc (alloc); \ + if (! ptr) \ + { \ + error = BOOT_SCRIPT_NOMEM; \ + goto done; \ + } \ + memcpy (ptr, cmdline, cmdline_index); \ + for (i = 0; i < argc; ++i) \ + argv[i] = ptr + (argv[i] - cmdline); \ + boot_script_free (cmdline, cmdline_alloc); \ + cmdline = ptr; \ + cmdline_alloc = alloc; \ + } \ +} + +/* Execute commands previously parsed. */ +int +boot_script_exec (void) +{ + int cmd_index; + + for (cmd_index = 0; cmd_index < cmds_index; cmd_index++) + { + char **argv, *cmdline; + int i, argc, cmdline_alloc; + int cmdline_index, error, arg_index; + struct cmd *cmd = cmds[cmd_index]; + + /* Skip command if it doesn't have an associated task. */ + if (cmd->task == 0) + continue; + + /* Allocate a command line and copy command name. */ + cmdline_index = strlen (cmd->path) + 1; + cmdline_alloc = cmdline_index + 100; + cmdline = boot_script_malloc (cmdline_alloc); + if (! cmdline) + { + cleanup (1); + return BOOT_SCRIPT_NOMEM; + } + memcpy (cmdline, cmd->path, cmdline_index); + + /* Allocate argument vector. */ + argv = boot_script_malloc (sizeof (char *) * (cmd->args_index + 2)); + if (! argv) + { + boot_script_free (cmdline, cmdline_alloc); + cleanup (1); + return BOOT_SCRIPT_NOMEM; + } + argv[0] = cmdline; + argc = 1; + + /* Build arguments. */ + for (arg_index = 0; arg_index < cmd->args_index; arg_index++) + { + struct arg *arg = cmd->args[arg_index]; + + /* Copy argument text. */ + if (arg->text) + { + int len = strlen (arg->text); + + if (arg->type == VAL_NONE) + len++; + CHECK_CMDLINE_LEN (len); + memcpy (cmdline + cmdline_index, arg->text, len); + argv[argc++] = &cmdline[cmdline_index]; + cmdline_index += len; + } + + /* Add value of any symbol associated with this argument. */ + if (arg->type != VAL_NONE) + { + char *p, buf[50]; + int len; + mach_port_name_t name; + + if (arg->type == VAL_SYM) + { + struct sym *sym = (struct sym *) arg->val; + + /* Resolve symbol value. */ + while (sym->type == VAL_SYM) + sym = (struct sym *) sym->val; + if (sym->type == VAL_NONE) + { + error = BOOT_SCRIPT_UNDEF_SYM; + printf("bootstrap script missing symbol '%s'\n", sym->name); + goto done; + } + arg->type = sym->type; + arg->val = sym->val; + } + + /* Print argument value. */ + switch (arg->type) + { + case VAL_STR: + p = (char *) arg->val; + len = strlen (p); + break; + + case VAL_TASK: + case VAL_PORT: + if (arg->type == VAL_TASK) + /* Insert send right to task port. */ + error = boot_script_insert_task_port + (cmd, (task_t) arg->val, &name); + else + /* Insert send right. */ + error = boot_script_insert_right (cmd, + (mach_port_t) arg->val, + &name); + if (error) + goto done; + + i = name; + p = buf + sizeof (buf); + len = 0; + do + { + *--p = i % 10 + '0'; + len++; + } + while (i /= 10); + break; + + default: + error = BOOT_SCRIPT_BAD_TYPE; + goto done; + } + len++; + CHECK_CMDLINE_LEN (len); + memcpy (cmdline + cmdline_index, p, len - 1); + *(cmdline + cmdline_index + len - 1) = '\0'; + if (! arg->text) + argv[argc++] = &cmdline[cmdline_index]; + cmdline_index += len; + } + } + + /* Terminate argument vector. */ + argv[argc] = 0; + + /* Execute the command. */ + if (boot_script_exec_cmd (cmd->hook, cmd->task, cmd->path, + argc, argv, cmdline, cmdline_index)) + { + error = BOOT_SCRIPT_EXEC_ERROR; + goto done; + } + + error = 0; + + done: + boot_script_free (cmdline, cmdline_alloc); + boot_script_free (argv, sizeof (char *) * (cmd->args_index + 2)); + if (error) + { + cleanup (1); + return error; + } + } + + for (cmd_index = 0; cmd_index < cmds_index; cmd_index++) + { + int i; + struct cmd *cmd = cmds[cmd_index]; + + /* Execute functions that want to be run on exec. */ + for (i = 0; i < cmd->exec_funcs_index; i++) + { + struct sym *sym = cmd->exec_funcs[i]; + int error = ((*((int (*) (struct cmd *, int *)) sym->val)) + (cmd, 0)); + if (error) + { + cleanup (1); + return error; + } + } + } + + cleanup (0); + return 0; +} + +/* Create an entry for the variable NAME with TYPE and value VAL, + in the symbol table. */ +int +boot_script_set_variable (const char *name, int type, long val) +{ + struct sym *sym = sym_enter (name); + + if (sym) + { + sym->type = type; + sym->val = val; + } + return sym ? 0 : 1; +} + + +/* Define the function NAME, which will return type RET_TYPE. */ +int +boot_script_define_function (const char *name, int ret_type, + int (*func) (const struct cmd *cmd, int *val)) +{ + struct sym *sym = sym_enter (name); + + if (sym) + { + sym->type = VAL_FUNC; + sym->val = (long) func; + sym->ret_type = ret_type; + sym->run_on_exec = ret_type == VAL_NONE; + } + return sym ? 0 : 1; +} + + +/* Return a string describing ERR. */ +char * +boot_script_error_string (int err) +{ + switch (err) + { + case BOOT_SCRIPT_NOMEM: + return "no memory"; + + case BOOT_SCRIPT_SYNTAX_ERROR: + return "syntax error"; + + case BOOT_SCRIPT_INVALID_ASG: + return "invalid variable in assignment"; + + case BOOT_SCRIPT_MACH_ERROR: + return "mach error"; + + case BOOT_SCRIPT_UNDEF_SYM: + return "undefined symbol"; + + case BOOT_SCRIPT_EXEC_ERROR: + return "exec error"; + + case BOOT_SCRIPT_INVALID_SYM: + return "invalid variable in expression"; + + case BOOT_SCRIPT_BAD_TYPE: + return "invalid value type"; + } + return 0; +} + +#ifdef BOOT_SCRIPT_TEST +#include + +int +boot_script_exec_cmd (void *hook, + mach_port_t task, char *path, int argc, + char **argv, char *strings, int stringlen) +{ + int i; + + printf ("port = %d: ", (int) task); + for (i = 0; i < argc; i++) + printf ("%s ", argv[i]); + printf ("\n"); + return 0; +} + +void +main (int argc, char **argv) +{ + char buf[500], *p; + int len; + FILE *fp; + mach_port_name_t host_port, device_port; + + if (argc < 2) + { + fprintf (stderr, "Usage: %s