Sophie

Sophie

distrib > Mandriva > 10.0-com > i586 > by-pkgid > 4f3a920ebaa906755b046a698d17e508 > files > 4

kernel-win4lin-2.4.25.5mdk-3-7mdk.src.rpm

diff -Naur mki-adapter-old/arch/i386/Makefile mki-adapter-new/arch/i386/Makefile
--- mki-adapter-old/arch/i386/Makefile	2002-10-27 14:49:30.000000000 -0800
+++ mki-adapter-new/arch/i386/Makefile	2002-11-13 12:56:47.000000000 -0800
@@ -0,0 +1,5 @@
+# Added by mki-adapter patch:
+ifdef CONFIG_MKI
+SUBDIRS += arch/i386/mki-adapter
+MOD_SUB_DIRS += arch/i386/mki-adapter
+endif
diff -Naur mki-adapter-old/arch/i386/mki-adapter/get-version.c mki-adapter-new/arch/i386/mki-adapter/get-version.c
--- mki-adapter-old/arch/i386/mki-adapter/get-version.c	1969-12-31 16:00:00.000000000 -0800
+++ mki-adapter-new/arch/i386/mki-adapter/get-version.c	2003-09-08 11:26:24.000000000 -0700
@@ -0,0 +1,22 @@
+/*
+ ****************************************************************************
+ * Copyright 2001 by NeTraverse, Inc.
+ * This software is distributed under the terms of the GPL
+ * which is supplied in the LICENSE file with this distribution
+ ***************************************************************************
+ * $Id: get-version.c,v 1.3 2001/08/15 23:29:38 rlawrence Exp $
+ ***************************************************************************
+ * get-version.c - simple prog to print the UTS_RELEASE that 
+ * /usr/include/linux/version.h 
+ ***************************************************************************
+ */
+
+#include <stdio.h>
+#include <linux/version.h>
+
+int main()
+{
+	printf(UTS_RELEASE);
+	return 0;
+}
+
diff -Naur mki-adapter-old/arch/i386/mki-adapter/LICENSE mki-adapter-new/arch/i386/mki-adapter/LICENSE
--- mki-adapter-old/arch/i386/mki-adapter/LICENSE	1969-12-31 16:00:00.000000000 -0800
+++ mki-adapter-new/arch/i386/mki-adapter/LICENSE	2003-09-08 11:26:24.000000000 -0700
@@ -0,0 +1,340 @@
+		    GNU GENERAL PUBLIC LICENSE
+		       Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+                       59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+			    Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users.  This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it.  (Some other Free Software Foundation software is covered by
+the GNU Library General Public License instead.)  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+  To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have.  You must make sure that they, too, receive or can get the
+source code.  And you must show them these terms so they know their
+rights.
+
+  We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+  Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software.  If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+  Finally, any free program is threatened constantly by software
+patents.  We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary.  To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+		    GNU GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License.  The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language.  (Hereinafter, translation is included without limitation in
+the term "modification".)  Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+  1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+  2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) You must cause the modified files to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    b) You must cause any work that you distribute or publish, that in
+    whole or in part contains or is derived from the Program or any
+    part thereof, to be licensed as a whole at no charge to all third
+    parties under the terms of this License.
+
+    c) If the modified program normally reads commands interactively
+    when run, you must cause it, when started running for such
+    interactive use in the most ordinary way, to print or display an
+    announcement including an appropriate copyright notice and a
+    notice that there is no warranty (or else, saying that you provide
+    a warranty) and that users may redistribute the program under
+    these conditions, and telling the user how to view a copy of this
+    License.  (Exception: if the Program itself is interactive but
+    does not normally print such an announcement, your work based on
+    the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+    a) Accompany it with the complete corresponding machine-readable
+    source code, which must be distributed under the terms of Sections
+    1 and 2 above on a medium customarily used for software interchange; or,
+
+    b) Accompany it with a written offer, valid for at least three
+    years, to give any third party, for a charge no more than your
+    cost of physically performing source distribution, a complete
+    machine-readable copy of the corresponding source code, to be
+    distributed under the terms of Sections 1 and 2 above on a medium
+    customarily used for software interchange; or,
+
+    c) Accompany it with the information you received as to the offer
+    to distribute corresponding source code.  (This alternative is
+    allowed only for noncommercial distribution and only if you
+    received the program in object code or executable form with such
+    an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it.  For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable.  However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License.  Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+  5. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Program or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+  6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+  7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded.  In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+  9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation.  If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+  10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission.  For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this.  Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+			    NO WARRANTY
+
+  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+		     END OF TERMS AND CONDITIONS
+
+	    How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) 19yy  <name of author>
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+    Gnomovision version 69, Copyright (C) 19yy name of author
+    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+    This is free software, and you are welcome to redistribute it
+    under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License.  Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary.  Here is a sample; alter the names:
+
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+  `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+  <signature of Ty Coon>, 1 April 1989
+  Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs.  If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library.  If this is what you want to do, use the GNU Library General
+Public License instead of this License.
diff -Naur mki-adapter-old/arch/i386/mki-adapter/Makefile mki-adapter-new/arch/i386/mki-adapter/Makefile
--- mki-adapter-old/arch/i386/mki-adapter/Makefile	1969-12-31 16:00:00.000000000 -0800
+++ mki-adapter-new/arch/i386/mki-adapter/Makefile	2003-09-08 11:32:32.000000000 -0700
@@ -0,0 +1,233 @@
+##############################################################################
+# Copyright 2001 by NeTraverse, Inc.
+# This software is distributed under the terms of the GPL
+# which is supplied in the LICENSE file with this distribution
+##############################################################################
+# $Id: Makefile,v 1.11 2003/02/13 23:23:52 rwb Exp $
+##############################################################################
+# This file will live inside or outside of the kernel tree, and should 
+# "do the right thing" in either case. 
+##############################################################################
+
+ifneq ($(TOPDIR),)  # we are in the kernel tree
+
+MKI_OBJ_DIR=
+
+#if called from the kernel, use the TOPDIR as KERN_SRC
+KERN_SRC=$(TOPDIR)
+
+faketarget: 
+
+# required targets to live in the kernel tree:
+modules: mki-adapter.o
+	rm -f $(TOPDIR)/modules/mki-adapter.o
+	@ln -sf ../arch/i386/mki-adapter/mki-adapter.o $(KERN_SRC)/modules/. || echo "no symlink for 2.4"
+
+fastdep: 
+	@echo "no deps needed"
+
+VERSION=$(shell grep '^VERSION' $(TOPDIR)/Makefile|\
+	sed -e's/VERSION *= *//g'|sed -e's/ //g')
+PATCHLEVEL=$(shell grep '^PATCHLEVEL' $(TOPDIR)/Makefile|\
+	sed -e's/PATCHLEVEL *= *//g'|sed -e's/ //g')
+
+else # we are not called from the kernel tree, so set everything up.
+############################################################################
+# OVERRIDE THIS HERE OR ON THE MAKE COMMAND LINE TO GET THE RIGHT KERNEL 
+# SOURCES FOR YOUR NETRAVERSE KERNEL. if the patch is applied to a kernel tree
+# then this default should work
+############################################################################
+ifeq ($(KERN_SRC),)
+KERN_SRC=../../..
+endif
+
+CC=gcc
+LD=ld
+
+.PHONY: mki-all
+mki-all: all-mki-targets
+
+export KERN_SRC
+	
+Makefile.kernel: makefile-made
+	sed -e 's:^include :include \$$\(TOPDIR\)/:'<$(KERN_SRC)/Makefile | \
+	sed -e "s:^TOPDIR.*:TOPDIR=$(KERN_SRC):" | \
+	sed -e "s/^clean:/old-clean:/" >Makefile.kernel
+
+makefile-made:
+	touch makefile-made
+
+# ok, get the config included, we don't really care about deps. what we are after
+# is just the CFLAGS here
+ifeq ($(KERN_SRC)/.config, $(wildcard $(KERN_SRC)/.config))
+include $(KERN_SRC)/.config
+endif
+
+include Makefile.kernel # rule to create it is above...
+
+MKI_OBJ_DIR=$(KERNELRELEASE)/
+
+endif # end of set up based on kernel/non-kernel location
+
+prefix=
+STD_INC=/usr/include
+
+KERNVER=$(VERSION)$(PATCHLEVEL)
+
+ifeq ($(KERN_INC),)
+KERN_INC=$(KERN_SRC)/include
+endif
+
+# Test for "new" version 3 mki
+MKI=$(shell if ! grep -q "MKI_HOOK_TABLE_EXISTS" $(KERN_INC)/asm*/mki.h ; then echo mki ; fi)
+# if so, then use the mkinn.c instead of the mkimki.c version
+ifeq ($(MKI),mki)
+	MKIMOD=mki$(MKI)
+else
+	MKIMOD=mki$(KERNVER)
+endif
+
+REGCFLAGS=-I. -I$(KERN_INC) -I$(STD_INC) -Wall -Wno-format -O2 -pipe -fno-strength-reduce -malign-loops=2 -malign-jumps=2 -m486
+
+MKICFLAGS = -I. -I$(KERN_INC)
+# get rid of strict prototypes, because it is a problem for the network structures
+MKICFLAGS += $(shell echo $(CFLAGS)|sed -e's/-Wstrict-prototypes//g')
+
+ifdef CONFIG_SMP
+MKICFLAGS += -D__SMP__
+endif
+
+MKI_FOUND_VMA_PATCH=$(shell if grep -q 'remap_page_range.*vm_area_struct' \
+    $(KERN_INC)/linux/mm.h ; then echo -DMKI_FOUND_VMA_PATCH ; fi)
+
+MKICFLAGS += $(MKI_FOUND_VMA_PATCH)
+
+TARGETS=$(MKI_OBJ_DIR)mki-adapter.o rmmakefile
+
+MOD_OBJS=$(MKI_OBJ_DIR)mki-main.o \
+	$(MKI_OBJ_DIR)mkivnet.o \
+	$(MKI_OBJ_DIR)timer.o \
+	$(MKI_OBJ_DIR)mki.o 
+
+.PHONY: all-mki-targets
+all-mki-targets: check-version $(TARGETS)
+
+force: $(TARGETS)
+
+$(MKI_OBJ_DIR)mki-adapter.o: $(MKI_OBJ_DIR) $(MOD_OBJS)
+	$(LD) -r $(LDFLAGS) -o $@ $(MOD_OBJS)
+
+.PHONY: rmmakefile
+rmmakefile:
+	rm -f makefile-made
+
+modules_install:
+	mkdir -p $(MODLIB)/kernel/arch/i386/mki-adapter
+	cp mki-adapter.o $(MODLIB)/kernel/arch/i386/mki-adapter/mki-adapter.o
+
+local_modules_install:
+	mkdir -p $(MODLIB)/kernel/arch/i386/mki-adapter
+	cp $(MKI_OBJ_DIR)mki-adapter.o $(MODLIB)/kernel/arch/i386/mki-adapter/mki-adapter.o
+
+$(MKI_OBJ_DIR):
+	mkdir -p $(MKI_OBJ_DIR)
+
+$(MKI_OBJ_DIR)mki-main.o: mki-main.c mki-adapter.h mkifunc.h
+	$(CC) $(MKICFLAGS) $(MODFLAGS) -c mki-main.c -o $@
+
+$(MKI_OBJ_DIR)mki.o: $(MKIMOD).c mkifunc.h mki-adapter.h $(KERN_INC)/asm-i386/mki.h
+	$(CC) $(MKICFLAGS) $(MODFLAGS) -c $(MKIMOD).c -o $@
+
+$(MKI_OBJ_DIR)timer.o: timer.c mkifunc.h mki-adapter.h
+	$(CC) $(MKICFLAGS) $(MODFLAGS) -c timer.c -o $@
+
+$(MKI_OBJ_DIR)mkivnet.o: mkivnet.c mkifunc.h mki-adapter.h vnetint-pub.h vneteth.h
+	$(CC) $(MKICFLAGS) $(MODFLAGS) -c mkivnet.c -o $@
+
+get-version: get-version.c
+	$(CC) $(REGCFLAGS) -g -o $@ get-version.c
+
+check-version: version.header version.running
+	@cmp version.header version.running >/dev/null 2>&1 ; \
+	if [ $$? -ne 0 ] ; then \
+	  echo ; \
+	  echo "WARNING - The kernel version you are running:" ; \
+	  echo `cat version.running` ; \
+	  echo "does not match the version of the headers you are using:" ; \
+	  echo `cat version.header` ; \
+	  echo ; \
+	  echo "If you are sure you know what you are doing, just run \"make force\" "; \
+	  echo "but you probably want to use KERN_INC=/path/to/linux/include "; \
+	  echo "after your make target (i.e. make install KERN_INC=/usr/src/linux/include )"; \
+	  echo ; \
+	  rm -f version.header version.running ; \
+	  exit 1 ;\
+	fi
+	rm -f version.header version.running
+
+version.header: get-version
+	@echo `./get-version` > version.header
+
+version.running:
+	@uname -r > version.running
+
+
+clean:
+	rm -f $(TARGETS) $(MOD_OBJS) get-version *.o *~ core ID tags TAGS *.tar.gz version.* Makefile.kernel makefile-made
+
+install_mki: mki-adapter.o get-version
+	mkdir -p $(prefix)/lib/modules/`./get-version`/kernel/arch/i386/kernel
+	mkdir -p $(prefix)/lib/modules/`./get-version`/misc
+	install -m 555 mki-adapter.o $(prefix)/lib/modules/`./get-version`/kernel/arch/i386/kernel
+	ln -s ../kernel/arch/i386/kernel/mki-adapter.o $(prefix)/lib/modules/`./get-version`/misc/.
+	ln -s ./`./get-version`/kernel/arch/i386/kernel/mki-adapter.o $(prefix)/lib/modules/.
+
+install_headers: 
+	mkdir -p $(prefix)/usr/include/mki-adapter
+	install -m 666 mkifunc.h $(prefix)/usr/include/mki-adapter/mkifunc.h
+	install -m 666 mki-adapter.h $(prefix)/usr/include/mki-adapter/mki-adapter.h
+	install -m 666 vnetint-pub.h $(prefix)/usr/include/mki-adapter/vnetint-pub.h
+	install -m 666 vneteth.h $(prefix)/usr/include/mki-adapter/vneteth.h
+
+#
+# these rules will create a patch file for a particular kernel tree that 
+# already has an mki version 2 or greater patch applied to it.
+#
+
+DIFFDIR=$(HOME)/tmp
+
+DISTFILES=README LICENSE get-version.c vneteth.h vnetint-pub.h Makefile mkifunc.h mkivnet.c timer.c mki-adapter.h mki-main.c mki22.c mki24.c mkimki.c
+
+patch:
+	mkdir -p $(DIFFDIR)/kernelold/arch/i386
+	mkdir -p $(DIFFDIR)/kernelnew/arch/i386/mki-adapter
+	@if grep '^MOD_SUB_DIRS += arch/i386/mki-adapter' \
+		$(KERN_SRC)/arch/i386/Makefile >/dev/null 2>&1 ; \
+	then \
+		echo "" ; \
+		echo "The mki-adapter is already in this kernel tree" ; \
+		echo "Please point to a kernel tree that does not" ; \
+		echo "contain the adapter to generate a patchfile" ; \
+		echo "" ; \
+		exit 1 ; \
+	else \
+		cp $(KERN_SRC)/arch/i386/Makefile \
+			$(DIFFDIR)/kernelold/arch/i386 ;\
+		echo "# Added by mki-adapter patch:" \
+			>$(DIFFDIR)/kernelnew/arch/i386/Makefile ; \
+		echo "ifdef CONFIG_MKI" \
+			>>$(DIFFDIR)/kernelnew/arch/i386/Makefile ; \
+		echo "SUBDIRS += arch/i386/mki-adapter" \
+			>>$(DIFFDIR)/kernelnew/arch/i386/Makefile ; \
+		echo "MOD_SUB_DIRS += arch/i386/mki-adapter" \
+			>>$(DIFFDIR)/kernelnew/arch/i386/Makefile ; \
+		echo "endif" \
+			>>$(DIFFDIR)/kernelnew/arch/i386/Makefile ; \
+		cat $(KERN_SRC)/arch/i386/Makefile \
+		>> $(DIFFDIR)/kernelnew/arch/i386/Makefile ; \
+	fi
+	cp -R $(DISTFILES) $(DIFFDIR)/kernelnew/arch/i386/mki-adapter/.
+	@(cd $(DIFFDIR);diff -ruN kernelold kernelnew) >mki-adapter.patch \
+		|| echo "mki-adapter.patch created"
+	rm -fr $(DIFFDIR)/kernelold $(DIFFDIR)/kernelnew
+
diff -Naur mki-adapter-old/arch/i386/mki-adapter/mki22.c mki-adapter-new/arch/i386/mki-adapter/mki22.c
--- mki-adapter-old/arch/i386/mki-adapter/mki22.c	1969-12-31 16:00:00.000000000 -0800
+++ mki-adapter-new/arch/i386/mki-adapter/mki22.c	2003-09-08 11:26:25.000000000 -0700
@@ -0,0 +1,1613 @@
+/*
+ ****************************************************************************
+ * Copyright 2001 by NeTraverse, Inc.
+ * This software is distributed under the terms of the GPL
+ * which is supplied in the LICENSE file with this distribution
+ ***************************************************************************
+ * $Id: mki22.c,v 1.7 2003/06/20 03:51:48 rwb Exp $
+ ***************************************************************************
+ */
+/*
+ * Implementation of the Linux specific Merge Kernel Interface (mki).
+ */
+
+#include <linux/config.h>
+#include <linux/modversions.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/vmalloc.h>
+#include <linux/smp_lock.h>
+#include <linux/smp.h>
+#include <linux/stddef.h>
+#include <linux/file.h>
+#include <linux/swap.h>
+
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+#include <asm/desc.h>
+#include <asm/softirq.h>
+#include <asm/hardirq.h>
+#include <asm/uaccess.h>
+#include <asm/ldt.h>
+#include <asm/desc.h>
+#include <asm/mman.h>
+#include <asm/io.h>
+
+#include <asm/mki.h>
+#include <mkifunc.h>
+
+#define DBG(a)
+//#define DBG(a)	printk a
+
+/*
+ * merge function offset into the hook functions table.
+ */
+#define SWITCH_AWAY   0
+#define SWITCH_TO     1
+#define THREAD_EXIT   2
+#define RET_USER      3
+#define SIGNAL	4
+#define QUERY	 5
+#define SWAP_PAGES    6
+#define NUM_HOOKS     7
+
+/* Addtional mmap*() routine related constants */
+#define PROT_USER	 0x08 	/* page is USER accessbile */
+#define PAGESIZE    PAGE_SIZE	/* compatibility */
+
+extern int filemap_swapout(struct vm_area_struct * vma, struct page * page);
+
+/*
+ * This is the address where the MKI expects the Linux kernel to live.
+ * If someone makes a kernel with PAGE_OFFSET at a different address,
+ * then we probably won't work, because the Windows address space
+ * is very tight as it is!
+ */
+#if __PAGE_OFFSET != MKI_END_USER_ADDR
+	#error MKI will not work if __PAGE_OFFSET is not MKI_END_USER_ADDR
+#endif
+
+atomic_t mkia_context_rss = ATOMIC_INIT(0);
+
+#define MKI_FC_SIZE	6
+#define FC_FAULT_EIP	0
+#define FC_SAVE_FS	1
+
+typedef struct {
+	int mkifc_catching_user_fault; /* Boolean */
+	int mkifc_os_dependent[MKI_FC_SIZE]; /* OS dependent state */
+} mkia_fault_catch_t;
+
+/* Performance/debugging counters */
+unsigned int mkia_cnt_rpr_pagereserved = 0;
+unsigned int mkia_cnt_rpr_not_dirty_acc = 0;
+unsigned int mkia_cnt_rpr_not_context_transfer = 0;
+unsigned int mkia_cnt_rpr_swapout = 0;
+
+/*
+ * Linux does not have a constant for the task struct size.  This
+ * number should match what is encoded in get_current() found in
+ * include/asm-i386/current.h
+ */
+#define LINUX_TASK_STRUCT_SIZE	(8192)
+
+/*
+ * Used in the implementation of the MKI functions
+ */
+void mkia_assert_failure(char *exprstr, char *filename, int linenum);
+
+#define	MKIA_ASSERT(expression) do { \
+	if (!(expression)) \
+		mkia_assert_failure(#expression, __FILE__, __LINE__); \
+} while (0)
+
+
+/*
+ * Global data.  Could be static except that we want it visible
+ * for debugging.  Note a given element of mkia_task_table[] only
+ * gets modified by the owning task (i.e. "current"), and thus
+ * needs no MP locking.  The same applies for mkia_task_flags[].
+ */
+struct mkia_task_info {
+	void *mti_vm86p;
+	void *mti_save_ldtp;
+	void *mti_merge_gdtp;
+	size_t mti_current_ldt_limit;
+} mkia_task_table[NR_TASKS];
+
+unsigned char mkia_task_flags[NR_TASKS];
+
+#define MKIF_MARKED		0x01
+#define MKIF_SETLDT_DONE	0x02
+#define MKIF_SETGDT_DONE	0x04
+
+/*
+ * mkia_event_pending can be set out of an interrupt routine which
+ * is why we've separated it out from mkia_task_flags.  Since all
+ * modifications are via direct assignment (rather than read-modified
+ * write as would be a bit set), we don't need a BUS-LOCK prefix.
+ * Its test in mhia_ret_user() constitutes a fair race.
+ */
+unsigned char mkia_event_pending[NR_TASKS];
+
+
+/* mkia_calling_version: the version of the user of the MKI */
+unsigned long mkia_calling_version;
+
+struct idt_gdt_desc_struct {
+	unsigned short limit;
+	struct desc_struct __attribute__((packed)) *table;
+	unsigned short pad;	/* Align for stack variables */
+};
+
+/*
+ * Note: we use the Linux kernel_lock to protect mkia_idt and mkia_idt_rw
+ * for SMP.
+ */
+struct idt_gdt_desc_struct mkia_idt_ = {(256 * 8) - 1, 0};
+#define mkia_idt mkia_idt_.table
+
+/* The next one might as well go into bss */
+static struct desc_struct * mkia_idt_rw;
+
+/* Forward function declarations */
+static inline void mkia_cleanup_idt(void);
+static inline void mkia_cleanup_gdt(void);
+void _caller_not_smp_safe(char *);
+
+static int
+mhia_void(void *parm)
+{
+	return -1;
+}
+
+int (*mhia_table[])(void *) = {
+	&mhia_void,	     /* SWITCH_TO */
+	&mhia_void,	     /* SWITCH_AWAY */
+	&mhia_void,	     /* THREAD_EXIT */
+	&mhia_void,	     /* RET_USER */
+	&mhia_void,	     /* SIGNAL */
+	&mhia_void,	     /* QUERY */
+	&mhia_void,	     /* SWAP_PAGES */
+};
+
+/*
+ * Lock to protect mhia_table.  In reality this is not needed because
+ * the hooks only get modified on driver load/unload and the OS should
+ * prevent concurrency.  However, we do the locks just to be safe, since
+ * this is not a frequently occurring situation (the drivers are rarely
+ * unloaded).
+ */
+spinlock_t mkia_hook_table_lock = SPIN_LOCK_UNLOCKED;
+
+int
+mkia_install_hook(int id, int (*hook_fn)(void *))
+{
+	if ((id >= 0) && (id < NUM_HOOKS)) {
+		spin_lock(&mkia_hook_table_lock);
+		mhia_table[id] = hook_fn;
+		spin_unlock(&mkia_hook_table_lock);
+		return 0;
+	}
+	return -1;
+}
+
+void
+mkia_remove_hook(int id)
+{
+	/* 
+	 * For now all the dummy hooks return the same value.
+	 * If we ever add hooks where mhia_void() is not appropriate
+	 * we need to change the code below to a switch() {} statement
+	 */
+	MKIA_ASSERT(mhia_table != NULL);
+	if ((id >= 0) && (id < NUM_HOOKS)) {
+		spin_lock(&mkia_hook_table_lock);
+		mhia_table[id] = mhia_void;
+		spin_unlock(&mkia_hook_table_lock);
+		if (id == SWITCH_TO) {
+			/*
+			 * If we are removing the SWITCH_TO hook, then
+			 * merge is unloading so clean up the IDT as well.
+			 */
+			mkia_cleanup_idt();
+		}
+	}
+}
+
+void
+mhia_switch(struct task_struct *prev, struct task_struct *next)
+{
+	struct idt_gdt_desc_struct cur_desc;
+	int nr = prev->tarray_ptr - &task[0]; 
+
+	/* If previous task is MARKED, call the SWITCH_AWAY hook */
+	if (mkia_task_flags[nr] & MKIF_MARKED)  {
+		int zero = 0;
+		/* switch in linux gdt and idt */
+		asm volatile("movl %0, %%fs": : "m" (zero));
+		asm volatile("movl %0, %%gs": : "m" (zero));
+		asm volatile("lgdt %0": : "m" (gdt_descr));
+		asm volatile("lidt %0": : "m" (idt_descr));
+		(void) (*mhia_table[SWITCH_AWAY])(mkia_task_table[nr].mti_vm86p);
+	}
+
+	/* If the next task is MARKED, call the SWITCH_TO hook */
+	nr = next->tarray_ptr - &task[0]; 
+	if (mkia_task_flags[nr] & MKIF_MARKED)  {
+		/* switch in private gdt and idt */
+		if (mkia_task_flags[nr] & MKIF_SETGDT_DONE) {
+			asm volatile("sgdt %0": "=m" (cur_desc));
+			cur_desc.table = mkia_task_table[nr].mti_merge_gdtp;
+			asm volatile("lgdt %0": : "m" (cur_desc));
+		}
+		if ( mkia_idt != NULL ) {
+			asm volatile("lidt %0": : "m" (mkia_idt_.limit));
+		}
+
+		/*
+		 * The lldt in __switch_to (process.c), only occurs if
+		 * prev->mm->segments and next->mm->segments are different.
+		 * In our case, they may be the same, but may have different
+		 * limits, so we force a reload here.
+		 */
+		if (next->mm->segments)
+			load_ldt(nr);
+		(void) (*mhia_table[SWITCH_TO])(mkia_task_table[nr].mti_vm86p);
+	}
+}
+
+void
+mhia_exit(void)
+{
+	int nr = current->tarray_ptr - &task[0]; 
+
+	/* Call the EXIT hook for MARKED tasks */
+	if (mkia_task_flags[nr] & MKIF_MARKED)  {
+		(void) (*mhia_table[THREAD_EXIT])(mkia_task_table[nr].mti_vm86p);
+	}
+}
+
+/*
+ * mhia_ret_user
+ *
+ * This routine gets called just before the kernel is going to make
+ * a kernel mode to user mode transition.  If this is a MARKED task,
+ * and there is an event pending, we call the RET_USER hook.
+ */
+void
+mhia_ret_user(unsigned long *r0ptr)
+{
+	int nr = current->tarray_ptr - &task[0];
+
+	if (!(mkia_task_flags[nr] & MKIF_MARKED))
+		return;
+
+	if (current->sigpending) {
+		/*
+		 * We catch signals here so that the lower layer does
+		 * not try to do the Linux DOSEMU vm86 handling.  On
+		 * merge a signal in the VM86 process is always a reason
+		 * to exit.
+		 */
+		__sti();
+		do_exit(1);
+	}
+	if (mkia_event_pending[nr]) {
+		mkia_event_pending[nr] = 0;
+		(void) (*mhia_table[RET_USER])(r0ptr);
+	}
+}
+
+/*
+ * mhia_swap
+ */
+void
+mhia_swap(int priority, int gfp_mask)
+{
+	int hard_flag;
+
+	/*
+	 * A "HARD" swap means get rid of all mappings rather than
+	 * just aging them.
+	 */
+	hard_flag = (priority < 6);
+	(void) (*mhia_table[SWAP_PAGES])((void *) hard_flag);
+}
+
+
+/* mkia_pgvfree assumes PAGE_SIZE <= size <= 4M */
+/* mkia_pgvfree assumes size is multiple of PAGE_SIZE */
+void
+mkia_pgvfree(void * addr, unsigned long size)
+{
+	pgd_t * dir;
+	pmd_t * pmd;
+	pte_t * pte;
+	unsigned long end;
+	unsigned long address = (unsigned long) addr;
+
+	MKIA_ASSERT((size >= PAGE_SIZE) && (size < 0x400000) &&
+		((size & 0xfff) == 0));
+
+	dir = pgd_offset_k(address);
+	pmd = pmd_offset(dir, address);
+	pte = pte_offset(pmd, address);
+	address &= ~PGDIR_MASK;
+	end = address + size;
+	if (end > PGDIR_SIZE) {
+		end = PGDIR_SIZE;
+	}	
+
+	/* skip first page and just clear pte of others */
+
+	pte++;
+	address += PAGE_SIZE;
+	size -= PAGE_SIZE;
+	while (address < end) {
+		pte_clear(pte);
+		address += PAGE_SIZE;
+		size -= PAGE_SIZE;
+		pte++;
+	}
+	if (size) {
+		dir++;
+		pmd = pmd_offset(dir, address);
+		pte = pte_offset(pmd, address);
+		while (size) {
+			pte_clear(pte);
+			size -= PAGE_SIZE;
+			pte++;
+		}
+	}
+	vfree(addr);
+}
+
+
+void
+mkia_set_idt_dpl (void)
+{
+	int i;
+	struct desc_struct *p;
+
+	/*
+	 * Go make all IDT descriptors DPL 0.  Note that Merge
+	 * has special case code to enable Linux's "int 0x80"
+	 * system calls.
+	 */
+	lock_kernel();
+	p = mkia_idt_rw;
+	for (i = 0; i < 256; i++, p++) {
+		if ( p->a | p->b ) {
+			p->b &= ~(3 << 13);
+		}
+	}
+	unlock_kernel();
+}
+
+void
+mkia_set_idt_entry (unsigned short vect_num, unsigned long *new_entry,
+			unsigned long *prev_entry)
+{
+	lock_kernel();
+	if ( mkia_idt == NULL ) {
+		/* No private IDT yet. Make private copy of IDT.
+		 * For F00F bug systems allocate two pages. 
+		 * Make a ro version, which points to the rw.
+		 */
+		if (boot_cpu_data.f00f_bug) {
+			pte_t * pte;
+			pte_t * pte_rw;
+			unsigned long page;
+
+			mkia_idt_rw = vmalloc(2*PAGE_SIZE);
+			page = (unsigned long) mkia_idt_rw;
+			pte_rw = pte_offset(pmd_offset(pgd_offset(&init_mm,
+				page), page), page);
+			page += PAGE_SIZE;
+			mkia_idt = (struct desc_struct *) page;
+			pte = pte_offset(pmd_offset(pgd_offset(&init_mm,
+				page), page), page);
+			free_page(pte_page(*pte));
+			*pte = *pte_rw;
+			pte_modify(*pte, PAGE_KERNEL_RO);
+			flush_tlb_all();
+		}
+		else {
+			mkia_idt = mkia_idt_rw = vmalloc(PAGE_SIZE);
+		}
+		if ( mkia_idt != NULL ) {
+			memcpy(mkia_idt_rw, idt, PAGE_SIZE);
+			asm volatile("lidt %0": : "m" (mkia_idt_.limit));
+		}
+		else {
+			unlock_kernel();
+			return;
+		}
+	}
+	*(struct desc_struct *) prev_entry  = mkia_idt[vect_num];
+
+	/*
+	 * Other ports seem to get away without cli() around the
+	 * next instruction, so we'll do the same for Linux.
+	 */
+	mkia_idt_rw[vect_num] = *(struct desc_struct *) new_entry;
+	unlock_kernel();
+}
+
+
+/* Called from mkia_remove_hook */
+static inline void
+mkia_cleanup_idt( void )
+{
+	lock_kernel();
+	if (mkia_idt_rw != NULL) {
+		if (boot_cpu_data.f00f_bug) {
+			mkia_pgvfree(mkia_idt_rw, 2*PAGE_SIZE);
+		}
+		else {
+			vfree(mkia_idt_rw);
+		}
+		mkia_idt_rw = NULL;
+	}
+	mkia_idt = NULL;
+	unlock_kernel();
+}
+
+
+/*
+ * mkia_set_gdt_entry()
+ *
+ * This function is always called in task context.  Since the merge
+ * task is not multi-threaded, we don't need any locking since a task
+ * can only be running on one processor at a time.
+ */
+int 
+mkia_set_gdt_entry(unsigned short sel, unsigned long *new_entry)
+{
+
+	struct idt_gdt_desc_struct cur_desc;
+	int i, nr;
+	unsigned long newpage;
+	unsigned long origpage;
+	unsigned long size;
+	unsigned short tbllimit;
+	struct desc_struct *pgdt_table;
+	pgd_t * pgd;
+	pmd_t * pmd;
+	pte_t * pte;     
+
+	asm volatile("sgdt %0": "=m" (cur_desc));
+	tbllimit = cur_desc.limit;
+	if ( sel < 1 || sel > tbllimit ) {
+		return EINVAL;
+	}
+	nr = current->tarray_ptr - &task[0]; 
+	if ((pgdt_table = mkia_task_table[nr].mti_merge_gdtp) == NULL) {
+		/* this task does not have priv gdt yet */
+		size = PAGE_ALIGN(tbllimit + 1);
+		if ( !(pgdt_table = vmalloc(size))) {
+			return ENOMEM;
+		}
+
+		memset(pgdt_table, 0, size);
+
+		for (i = PAGE_SIZE; i < size; i += PAGE_SIZE) {
+			origpage = (unsigned long) gdt_table + i;
+			newpage = (unsigned long) pgdt_table + i;
+
+			pgd = pgd_offset(&init_mm, newpage);
+			pmd = pmd_offset(pgd, newpage);
+			pte = pte_offset(pmd, newpage);
+
+			free_page(pte_page(*pte));
+			*pte = mk_pte(origpage, PAGE_KERNEL);
+		}
+		flush_tlb_all();
+		mkia_task_flags[nr] |= MKIF_SETGDT_DONE;
+		mkia_task_table[nr].mti_merge_gdtp = pgdt_table;
+		cur_desc.table = pgdt_table;
+		asm volatile("lgdt %0": : "m" (cur_desc));
+	}
+	if (sel >= PAGE_SIZE) {
+		return EBUSY;
+	}
+
+	/* selector -> index */
+	i = sel >> 3;
+	pgdt_table[i] = *(struct desc_struct *) new_entry;
+	return 0;
+}
+
+/*
+ * mkia_cleanup_gdt()
+ *    needs to be called for each dead session
+ *
+ * This function is always called in task context so no locking is
+ * necessary.
+ */
+static inline void
+mkia_cleanup_gdt( void )
+{
+	struct idt_gdt_desc_struct cur_desc;
+	unsigned long size;
+	void *address;
+	int nr;
+
+	asm volatile("sgdt %0": "=m" (cur_desc));
+	size = PAGE_ALIGN(cur_desc.limit + 1);
+	nr = current->tarray_ptr - &task[0]; 
+	if ((address = mkia_task_table[nr].mti_merge_gdtp) != NULL) {
+		mkia_pgvfree(address, size);
+		mkia_task_table[nr].mti_merge_gdtp = NULL;
+	}
+}
+
+
+int
+_mkia_mem_lock_unlock(unsigned long start, unsigned long end, int lock)
+{
+	struct task_struct *tsk;
+	struct mm_struct *mm;
+	struct vm_area_struct * vma;
+	unsigned long curaddr;
+	unsigned long curend;
+	int pages;
+
+	tsk = current;
+	mm = tsk->mm;
+
+	/*
+	 * If we're in an interrupt or have no user
+	 * context, we must not take the fault..
+	 */
+	if (in_interrupt() || mm == &init_mm)
+		return -EBUSY;
+
+	DBG(("down in _mkia_mem_lock_unlock\n"));
+	down(&mm->mmap_sem);
+
+	/* Do page alignment */
+	start &= PAGE_MASK;
+	end = (end + ~PAGE_MASK) & PAGE_MASK;
+
+	/*
+	 * Make sure that there are no overlaps, i.e. that the
+	 * area we are talking about is completely filled in and
+	 * that a given vma does not span the area we want to lock,
+	 * since we don't support slitting any VMAs.
+	 */
+	curaddr = start;
+	while (curaddr < end) {
+		vma = find_vma(mm, curaddr);
+		if (!vma)
+			goto up_and_fail;
+		if (vma->vm_start != curaddr)
+			goto up_and_fail;
+		if (vma->vm_end > end)
+			goto up_and_fail;
+		curaddr = vma->vm_end;
+	}
+
+	/*
+	 * OK.  Now that we've done that, we can go ahead and lock (or unlock)
+	 * the pages.  If we are locking we just have to set VM_LOCKED and
+	 * then call make_pages_present to make sure the ptes get filled in.
+	 * VM_LOCKED will prevent swap_out_vma() from stealing the pages.
+	 * For unlock, we just have to clear VM_LOCKED.
+	 */
+	curaddr = start;
+	while (curaddr < end) {
+		vma = find_vma(mm, curaddr);
+		curend = vma->vm_end;
+		pages = (curend - curaddr) >> PAGE_SHIFT; 
+		if (lock) {
+			MKIA_ASSERT((vma->vm_flags & VM_LOCKED) == 0);
+			vma->vm_flags |= VM_LOCKED;
+			mm->locked_vm += pages;
+			make_pages_present(curaddr, curend);
+		} else {
+			MKIA_ASSERT(vma->vm_flags & VM_LOCKED);
+			vma->vm_flags &= ~VM_LOCKED;
+			MKIA_ASSERT(mm->locked_vm >= pages);
+			mm->locked_vm -= pages;
+		}
+		curaddr = curend;
+	}
+	up(&mm->mmap_sem);
+	DBG(("  up in _mkia_mem_lock_unlock\n"));
+	return 0;
+
+up_and_fail:
+	up(&mm->mmap_sem);
+	DBG(("  up in _mkia_mem_lock_unlock\n"));
+	printk("_mkia_mem_lock_unlock: bad params: "
+	    "start %lx, end %lx, lock %x\n", start, end, lock);
+	return -EINVAL;
+}
+
+
+/*
+ * mkia_set_ldt_entry()
+ *
+ * This function is always called in task context so no locking is
+ * necessary.
+ */
+int 
+mkia_set_ldt_entry(unsigned short sel, unsigned long *new_entry)
+{
+	struct mm_struct * mm = current->mm;
+	struct desc_struct *pldte;
+
+	if (!mm->segments) {
+		void * ldt;
+		int i = current->tarray_ptr - &task[0];
+
+		ldt = vmalloc(LDT_ENTRIES*LDT_ENTRY_SIZE);
+		if (!ldt)
+			return ENOMEM;
+
+		memset(ldt, 0, LDT_ENTRIES*LDT_ENTRY_SIZE);
+		mm->segments = ldt;
+		mkia_task_table[i].mti_current_ldt_limit =
+			LDT_ENTRIES*LDT_ENTRY_SIZE - 1;
+		set_ldt_desc(i, ldt, LDT_ENTRIES);
+		current->tss.ldt = _LDT(i);
+		load_ldt(i);
+	}
+	pldte = (struct desc_struct *) ((char *) mm->segments + (sel & ~(0x7)));
+	*pldte = *(struct desc_struct *) new_entry;
+
+	return 0;
+}
+
+
+/*
+ * int
+ * mkia_set_private_ldt(void *ldtp, size_t limit)
+ *
+ * This function is always called in task context so no locking is
+ * necessary.
+ */
+int
+mkia_set_private_ldt(void *ldtp, size_t limit)
+{
+	unsigned long ldtaddr;
+	unsigned long endaddr;
+	int nr;
+
+	if (current->mm == NULL)
+		return EINVAL;
+
+	nr = current->tarray_ptr - &task[0]; 
+
+	if (! (mkia_task_flags[nr] & MKIF_SETLDT_DONE)) {
+		mkia_task_table[nr].mti_save_ldtp =
+			current->mm->segments;
+		mkia_task_flags[nr] |= MKIF_SETLDT_DONE;
+		if (ldtp == NULL)
+			return 0;
+	}
+	if (ldtp == NULL) {
+		/* NULL means restore the saved original LDT */
+		ldtp = mkia_task_table[nr].mti_save_ldtp;
+		limit = (LDT_ENTRIES * LDT_ENTRY_SIZE) - 1;
+	}
+
+	/* Unlock previous LDT */
+	ldtaddr = (unsigned long) current->mm->segments;
+	if (ldtaddr < MKI_END_USER_ADDR) {
+		endaddr = ldtaddr +
+		    mkia_task_table[nr].mti_current_ldt_limit + 1;
+		_mkia_mem_lock_unlock(ldtaddr, endaddr, 0 /* Unlock */);
+	}
+
+	/* Lock the new LDT */
+	ldtaddr = (unsigned long) ldtp;
+	if (ldtaddr < MKI_END_USER_ADDR) {
+		endaddr = ldtaddr + limit + 1;
+		_mkia_mem_lock_unlock(ldtaddr, endaddr, 1 /* Lock */);
+	}
+
+	current->mm->segments = ldtp;
+	mkia_task_table[nr].mti_current_ldt_limit = limit;
+	set_ldt_desc(nr, ldtp, (limit + 1) >> 3);
+	current->tss.ldt = _LDT(nr);
+	load_ldt(nr);
+	return 0;
+}
+
+
+
+/*
+ * mkia_set_vm86p()
+ *    Set a per-task value to point to a vm86 structure
+ *
+ * This function is always called in task context so no locking is necessary.
+ */
+void
+mkia_set_vm86p(void *vm86p)
+{
+	int nr;
+
+	nr = current->tarray_ptr - &task[0]; 
+	mkia_task_table[nr].mti_vm86p = vm86p;
+	if (vm86p == NULL) {
+	    mkia_task_flags[nr] &= ~MKIF_SETLDT_DONE;
+	    mkia_task_table[nr].mti_save_ldtp = 0;
+	}
+}
+
+/*
+ * Get a per-task pointer to a vm86 structure
+ */
+void *
+mkia_get_vm86p()
+{
+	int nr;
+
+	nr = current->tarray_ptr - &task[0]; 
+	return mkia_task_table[nr].mti_vm86p;
+}
+
+/*
+ * mkia_privatep is a single global persistant pointer that the
+ * merge driver can used to stash something it wants to last across
+ * driver loads and unloads.
+ */
+void *mkia_privatep = NULL;
+
+/*
+ * void
+ * mkia_getparm(int request, void *)
+ *
+ * Get values needed by MERGE
+ */
+void
+mkia_getparm(int request, void *parm)
+{
+	switch (request) {
+	case PARM_POST_COOKIE:
+		*(void **)parm = current;
+		break;
+	case PARM_FRAME_BASE:
+		/*
+		 * This really is just the very bottom of the
+		 * stack.  To turn this into frame ptr, Merge
+		 * has to subtract off its idea of the frame size.
+		 */
+		*(void **)parm = ((char *)current) + LINUX_TASK_STRUCT_SIZE;
+		break;
+	case PARM_TASK_MASK:
+		/*
+		 * An old mki consumer will not have MCV_MAGIC in the *parm
+		 * variable.  For a new consumer, we signal we are a new mki
+		 * by returning zero to this call.  The consumer will then
+		 * use the new calls including PARM_TASK_MASK_V2.
+		 */
+		mkia_calling_version = *(unsigned long *)parm;
+		if ((mkia_calling_version & MCV_MAGIC_MASK) == MCV_MAGIC) {
+			mkia_calling_version &= MCV_VERSION_MASK;
+			if (mkia_calling_version >= 2) {
+				*(unsigned long *)parm = 0;
+				break;
+			}
+		}
+
+		/*
+		 * This is an old consumer.  We need to return the real
+		 * task mask.
+		 */
+		mkia_calling_version = 1;
+		*(unsigned long *)parm = ~(LINUX_TASK_STRUCT_SIZE - 1);
+		if (smp_num_cpus <= 1)
+			break;
+
+		/*
+		 * Uh oh.  We have a Version 1 MKI consumer, but more
+		 * than one CPU and Version 1 Win4Lin was not SMP safe.
+		 */
+		_caller_not_smp_safe("mkia_getparm");
+		break;
+	case PARM_PRIVATE:
+		*(void **) parm = &mkia_privatep;
+		break;
+	case PARM_LDTP:
+		if (current->mm != NULL)
+			*(void **) parm = current->mm->segments;
+	    	else
+			*(void **) parm = NULL;
+		break;
+	case PARM_TSSP:
+		*(void **) parm = (void *) &current->tss;
+		break;
+	case PARM_RUID:
+		*(uid_t *) parm = current->uid;
+		break;
+
+	/* ========================================================= */
+	/* Begin Version 2 MKI calls */
+
+	case PARM_TASK_MASK_V2:
+		*(unsigned long *)parm = ~(LINUX_TASK_STRUCT_SIZE - 1);
+		break;
+
+	case PARM_MKI_VERSION:
+		*(unsigned int *)parm = 3;
+		break;
+
+	case PARM_NUM_CPUS:
+		*(unsigned int *)parm = smp_num_cpus;
+		break;
+
+	case PARM_MAX_NUMPROCS:
+		*(unsigned int *)parm = NR_TASKS;
+		break;
+
+	case PARM_CURPROC_INDEX:
+		*(unsigned int *)parm =
+			current->tarray_ptr - &task[0]; 
+		break;
+
+	default:
+		printk("mkia_getparm: no support for this request %d\n",
+			request);
+		*(int *) parm = 0;		/* for sanity */
+		break;
+	}
+}
+
+static char starline[] =
+    "*****************************************************************";
+
+void
+_caller_not_smp_safe(char *funcname)
+{
+	(void) printk(KERN_ERR "\n\n%s\n", starline);
+	(void) printk(KERN_ERR "WARNING: %s: "
+	    "Win4Lin Version 1 is NOT SMP safe!\n", funcname);
+	(void) printk(KERN_ERR "Reboot with the \"nosmp\" option\n");
+	(void) printk(KERN_ERR "%s\n\n", starline);
+}
+
+
+/*
+ * void
+ * mkia_post_event(void *cookie)
+ *
+ * Set's an event pending.  NOTE this routine may be called OUT of
+ * context and, in fact, out of an interrupt.  We can get away without
+ * a BUS-LOCK prefix because we do an assigment rather than a Read-
+ * Modify-Write.
+ */
+void
+mkia_post_event(void *cookie)
+{
+	struct task_struct *t;
+#ifdef __SMP__
+	unsigned long flags;
+#endif
+	int nr;
+
+	t = (struct task_struct *) cookie;
+	nr = t->tarray_ptr - &task[0]; 
+	mkia_event_pending[nr] = 1;
+
+#ifdef __SMP__
+	/*
+	 * If the task is running on a different CPU, force a reschedule
+	 * on that CPU.  This will force that task into the kernel if it
+	 * is not already there, and on the way back to user mode,
+	 * mkia_event_pending[nr] will get checked.
+	 */
+	if (smp_num_cpus > 1) {
+		spin_lock_irqsave(&runqueue_lock, flags);
+		if (t->has_cpu && t->processor != smp_processor_id())
+			smp_send_reschedule(t->processor);
+		spin_unlock_irqrestore(&runqueue_lock, flags);
+	}
+#endif /* __SMP__ */
+}
+
+
+/*
+ * void
+ * mkia_mark_vm86(void)
+ *
+ * Note that this function is always called in task context so no
+ * locking is necessary.
+ */
+void
+mkia_mark_vm86(void)
+{
+	struct idt_gdt_desc_struct cur_desc;
+	int nr;
+
+	nr = current->tarray_ptr - &task[0]; 
+	mkia_task_flags[nr] |= MKIF_MARKED;
+	if (mkia_task_flags[nr] & MKIF_SETGDT_DONE) {
+		asm volatile("sgdt %0": "=m" (cur_desc));
+		cur_desc.table = mkia_task_table[nr].mti_merge_gdtp;
+		asm volatile("lgdt %0": : "m" (cur_desc));
+	}
+	if ( mkia_idt != NULL ) {
+		asm volatile("lidt %0": : "m" (mkia_idt_.limit));
+	}
+}
+
+/*
+ * int
+ * mkia_check_vm86(void)
+ *
+ * Note that this function is always called in task context so no
+ * locking is necessary.
+ */
+int
+mkia_check_vm86(void)
+{
+	int nr;
+
+	nr = current->tarray_ptr - &task[0]; 
+	return (mkia_task_flags[nr] & MKIF_MARKED) ? 1 : 0;
+}
+
+/*
+ *
+ * void
+ * mkia_clear_vm86(void)
+ *
+ * Note that this function is always called in task context so no
+ * locking is necessary.
+ */
+void
+mkia_clear_vm86(void)
+{
+	int nr;
+
+	nr = current->tarray_ptr - &task[0]; 
+
+	/* Make sure we go back to the linux gdt and idt */
+	asm volatile("lgdt %0": : "m" (gdt_descr));
+	asm volatile("lidt %0": : "m" (idt_descr));
+	mkia_cleanup_gdt();
+	mkia_task_flags[nr] &= ~(MKIF_MARKED | MKIF_SETGDT_DONE);
+}
+
+
+/*
+ * void
+ * mkia_pgfault_get_state(int *pfault_ok, mkia_fault_catch_t *fcstate)
+ */
+void
+mkia_pgfault_get_state(int *pfault_ok, mkia_fault_catch_t *fcstate)
+{
+	unsigned long faulting_eip;
+
+	/*
+	 *  Context check.  Make sure that we are not in the middle of
+	 *  servicing an interrupt.  If so, then the fault catch information
+	 *  is not valid.
+	 */
+	if (in_interrupt()) {
+		*pfault_ok = 0;
+		return;
+	}
+		
+	/*
+	 *  Save the old state and clear the current.
+	 */
+	*pfault_ok = 1;
+	faulting_eip = (unsigned long) 
+		fcstate->mkifc_os_dependent[FC_FAULT_EIP];
+
+	fcstate->mkifc_catching_user_fault = 
+		(search_exception_table(faulting_eip) ? 1 : 0);
+
+	fcstate->mkifc_os_dependent[FC_SAVE_FS] = (int) (get_fs()).seg;
+	set_fs(KERNEL_DS);
+}
+
+/*
+ * void
+ * mkia_pgfault_restore_state(mkia_fault_catch_t *fcstate)
+ */
+void
+mkia_pgfault_restore_state(mkia_fault_catch_t *fcstate)
+{
+	set_fs(MAKE_MM_SEG(fcstate->mkifc_os_dependent[FC_SAVE_FS]));
+	fcstate->mkifc_os_dependent[FC_SAVE_FS] = 0;
+}
+
+/*
+ * void *
+ *	mkia_alloc_priv_tss(void)
+ */
+void *
+mkia_alloc_priv_tss(void)
+{
+	struct thread_struct *t;
+
+	if ((mkia_calling_version < 2) && (smp_num_cpus > 1)) {
+		/*
+		 * Uh oh.  We have a Version 1 MKI consumer, but more
+		 * than one CPU and Version 1 Win4Lin was not SMP safe.
+		 * Returning NULL will prevent session startup.
+		 */
+		_caller_not_smp_safe("mkia_alloc_priv_tss");
+		return NULL;
+	}
+
+	/*
+	 * On Linux tasks already each have a private TSS
+	 * so we just return it, but make sure the iobitmap
+	 * is properly initialized.
+	 */
+	t = &current->tss;
+	t->bitmap = offsetof(struct thread_struct, io_bitmap);
+	memset(t->io_bitmap, 0xff, (IO_BITMAP_SIZE + 1) * 4);
+	return (void *) t;
+}
+
+/*
+ * void
+ *	mkia_yield(void)
+ */
+void
+mkia_yield(void)
+{
+	/*
+	 * Zero our remaining time slice and then call schedule()
+	 * to yield the processor to other processes.
+	 */
+	current->counter = 0;
+	schedule();
+}
+
+#if defined(CONFIG_KDB)
+#include <linux/kdb.h>
+int mkia_assert_debugger = 1;
+#endif
+
+void
+mkia_assert_failure(char *exprstr, char *filename, int linenum)
+{
+	/*
+	 * Note that we make this a separate function so that
+	 * we can put a breakpoint here to trace the problems.
+	 */
+	(void) printk(KERN_ERR
+	    "MKI Assertion \"%s\" failed: file \"%s\", line %d\n", \
+		    exprstr, filename, linenum);
+#if defined(CONFIG_KDB)
+	if (mkia_assert_debugger)
+		kdb(KDB_REASON_ENTER, 0, 0);
+#endif
+}
+
+/*
+ * void
+ *	mkia_enter_debugger(int reason, int error, struct pt_regs *regs)
+ */
+void
+mkia_enter_debugger(int reason, int error, struct pt_regs *regs)
+{
+	/*
+	 * Enter the debugger that is currently in use, if any.
+	 */
+#if defined(CONFIG_KDB)
+	switch (reason) {
+	case 1:		/* Called with error and regs values */
+		kdb(KDB_REASON_DEBUG, error, regs);
+		break;
+	default:
+		/*FALLSTHRU*/
+	case 0:		/* Called from an "assert" or some other place that
+			 * does not have an error code or regs associated.
+			 */
+		kdb(KDB_REASON_ENTER, 0, 0);
+		break;
+	}
+#else
+	/* (void) printk("mkia_enter_debugger: no debugger available\n"); */
+#endif
+}
+
+int
+mkia_ftruncate_k(int fd, off_t length)
+{
+	struct dentry *dentry;
+	struct file * file;
+	int error;
+
+	lock_kernel();
+	error = EBADF;
+	if ((file = fget(fd))) {
+		if ((dentry = file->f_dentry)) 
+			error = do_truncate(dentry, length);
+		else 
+			error = ENOENT;	
+		fput(file);
+	}
+	unlock_kernel();
+	return error;
+}
+
+/*
+ *	Support for the virtual memory aspects of Linux specific
+ *	Merge Kernel Interface (mki). 
+ */
+
+/* Beginning of function implementations */
+
+mkia_64bit_t 
+mkia_virt_to_phys(void *kaddr)
+{
+	unsigned long vaddr;
+	pte_t *pte;
+	pte_t entry;
+
+	if (kaddr < high_memory)
+		return virt_to_phys(kaddr);
+
+	vaddr = (unsigned long) kaddr;
+	pte = pte_offset(pmd_offset(pgd_offset_k(vaddr), vaddr), vaddr);
+	if (pte == NULL)
+		return (unsigned long) -1;
+		
+	entry = *pte;
+	if (! pte_present(entry))
+		return (unsigned long) -1;
+
+    	return (pte_val(entry) & PAGE_MASK);
+}
+
+
+mkia_64bit_t *
+mkia_get_pagedir(void)
+{
+	struct mm_struct *mm;
+
+	mm = current->mm;
+	MKIA_ASSERT(atomic_read(&mm->count) == 1);
+
+	return (mkia_64bit_t *) current->mm->pgd;
+}
+
+static int
+_mkia_transfer_mod_acc_bits(unsigned long vaddr, unsigned long physpte)
+{
+	struct vm_area_struct *vma;
+	struct mm_struct *mm;
+	pgd_t *pgd;
+	pmd_t *pmd;
+	pte_t *ptep;
+
+	ptep = 0;
+	vaddr &= ~(PAGE_SIZE - 1);
+	mm = current->mm;
+
+	DBG(("down in _mkia_transfer_mod_acc_bits\n"));
+	down(&mm->mmap_sem);
+	vma = find_vma(mm, vaddr);
+	if (!vma) {
+		printk("_mkia_transfer_mod_acc_bits: find_vma failed "
+			"for %lx\n", vaddr);
+		goto _mkia_transfer_mod_acc_bits_done;
+	}
+
+	pgd = pgd_offset(vma->vm_mm, vaddr);
+	if ((pmd = pmd_alloc(pgd, vaddr)) != 0)
+		ptep = pte_alloc(pmd, vaddr);
+
+	if (ptep == 0)
+		goto _mkia_transfer_mod_acc_bits_done;
+		
+	if (!(pte_val(*ptep) & _PAGE_PRESENT)) {
+		ptep = 0;
+		goto _mkia_transfer_mod_acc_bits_done;
+	}
+
+	MKIA_ASSERT((physpte & PAGE_MASK) == (pte_val(*ptep) & PAGE_MASK));
+	pte_val(*ptep) |= (physpte & (_PAGE_DIRTY | _PAGE_ACCESSED));
+
+_mkia_transfer_mod_acc_bits_done:
+	up(&mm->mmap_sem);
+	DBG(("  up in _mkia_transfer_mod_acc_bits\n"));
+	return (ptep != 0);
+}
+
+int
+mkia_remove_page_ref(unsigned long vaddr, mkia_64bit_t physpte,
+	void *file_cookie, off_t offset)
+{
+	struct vm_area_struct fake_vma;
+	struct page *pageinfop;
+	unsigned long physaddr;
+	struct file *file;
+	unsigned long mapnr;
+	int retcode;
+	pte_t pte;
+
+	retcode = 0;
+	physaddr = physpte & PAGE_MASK;
+	offset &= PAGE_MASK;
+	file = (struct file *) file_cookie;
+	MKIA_ASSERT(file != NULL);
+	if (file == NULL) {
+		/* Just in case: handle it gracefully */
+		goto rem_page_ref_done;
+	}
+
+	pte_val(pte) = physpte;
+	mapnr = MAP_NR(pte_page(pte));
+	if (mapnr >= max_mapnr)
+		goto rem_page_ref_done;
+
+	pageinfop = &mem_map[mapnr];
+	MKIA_ASSERT(atomic_read(&pageinfop->count) >= 2);
+	if (PageReserved(pageinfop)) {
+		mkia_cnt_rpr_pagereserved++;
+		goto rem_page_ref_free;
+	}
+
+
+	MKIA_ASSERT(pageinfop->inode != NULL);
+	MKIA_ASSERT(file->f_dentry->d_inode == pageinfop->inode);
+	MKIA_ASSERT(pageinfop->offset == offset);
+
+	if ((physpte & (_PAGE_DIRTY | _PAGE_ACCESSED)) == 0) {
+		/*
+		 * If this is a clean mapping, i.e. no mod or acc bits,
+		 * then we can just decrement the page use counts and
+		 * be done with it!
+		 */
+		mkia_cnt_rpr_not_dirty_acc++;
+		goto rem_page_ref_free;
+	}
+		
+	/*
+	 * If we're removing the ref and this is the vm86 proc in
+	 * context, see if the vaddr pte is present.  If so, then
+	 * just transfer the ref/dirty info there to avoid any I/O.
+	 * This is a frequent case since most of the time, page ref
+	 * drops are the results of virtual TLB flushes rather than
+	 * aging/swapping.
+	 */
+	if ((! mkia_check_vm86()) ||
+	    (! _mkia_transfer_mod_acc_bits(vaddr, physpte))) {
+		/*
+		 * Either this is not the vm86 proc in context, i.e.
+		 * is the ager OR the global area pte for this page
+		 * is not present, SO if the page is dirty, we have
+		 * to do the I/O.
+		 */
+		mkia_cnt_rpr_not_context_transfer++;
+		if (physpte & _PAGE_DIRTY) {
+			mkia_cnt_rpr_swapout++;
+			memset(&fake_vma, 0, sizeof(fake_vma));
+			fake_vma.vm_file = file;
+			retcode = filemap_swapout(&fake_vma, pageinfop);
+		}
+	}
+
+rem_page_ref_free:
+	atomic_dec(&pageinfop->count);
+	atomic_dec(&mkia_context_rss);
+	MKIA_ASSERT(atomic_read(&mkia_context_rss) >= 0);
+
+rem_page_ref_done:
+	return retcode;
+}
+
+
+unsigned long
+mkia_mmap_k(unsigned long addr, size_t len, int prot, int flags,
+   int filedes, off_t offset, int *errnop)
+{
+	struct file * file = NULL;
+	int error = -EFAULT;
+
+	if ((prot & PROT_USER) == 0) {
+		printk("mmap_k: Non-USER mapping requested\n");
+	}
+
+	DBG(("down in mkia_mmap_k: %p\n", &current->mm->mmap_sem));
+	down(&current->mm->mmap_sem);
+	lock_kernel();
+
+	if (!(flags & MAP_ANONYMOUS)) {
+		error = -EBADF;
+		file = fget(filedes);
+		if (!file)
+			goto out;
+	}
+	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+
+	error = do_mmap(file, addr, len, prot, flags, offset);
+	if (file)
+		fput(file);
+out:
+	unlock_kernel();
+	up(&current->mm->mmap_sem);
+	DBG(("  up in mkia_mmap_k\n"));
+
+	if ((error < 0) && (error > -4096)) {
+		*errnop = error;
+		error = -1;
+	} else {
+		*errnop = 0;
+	}
+	return ((unsigned long) error);
+}
+
+int
+mkia_munmap_k(unsigned long addr, size_t len)
+{
+	int ret; 
+ 
+	DBG(("down in mkia_mmunap_k\n"));
+	down(&current->mm->mmap_sem);
+	lock_kernel();
+	ret = do_munmap(addr, len);
+	unlock_kernel();
+	up(&current->mm->mmap_sem);
+	DBG(("  up in mkia_mmunap_k\n"));
+	return ret;
+}
+
+extern int do_mprotect(unsigned long, size_t, unsigned int);
+
+int
+mkia_mprotect_k(unsigned long addr, size_t len, unsigned int prot)
+{
+	int ret;
+
+	DBG(("down in mkia_mprotect_k\n"));
+	down(&current->mm->mmap_sem); 
+	lock_kernel();
+	ret = do_mprotect(addr, len, (unsigned long) prot);
+	unlock_kernel();
+	up(&current->mm->mmap_sem);
+	DBG(("up in mkia_mprotect_k\n"));
+	return ret;
+}
+
+/*
+ * int
+ * mkia_upageflt(unsigned long address, int error_code)
+ *
+ * returns:
+ *	0 if the pagefault could not be resolved
+ *	1 if the pagefault could be resolved
+ */
+int
+mkia_upageflt(unsigned long address, int error_code)
+{
+	struct task_struct *tsk;
+	struct mm_struct *mm;
+	struct vm_area_struct * vma;
+	int write;
+
+	tsk = current;
+	mm = tsk->mm;
+
+	/*
+	 * If we're in an interrupt or have no user
+	 * context, we must not take the fault..
+	 */
+	if (in_interrupt() || mm == &init_mm)
+		goto return_fail;
+
+	DBG(("down in mkia_upageflt\n"));
+	down(&mm->mmap_sem);
+
+	vma = find_vma(mm, address);
+	if (!vma)
+		goto up_and_fail;
+	if (vma->vm_start <= address)
+		goto good_area;
+	if (!(vma->vm_flags & VM_GROWSDOWN))
+		goto up_and_fail;
+
+	/*
+	 * The do_page_fault() code handles stack growth here but
+	 * we don't do this because we don't have a standard process
+	 * whose stack we can grow.  The emulator has a fixed sized
+	 * stack and it's not our job to grow the windows stacks.
+	 */
+
+good_area:
+	write = 0;
+	switch (error_code & 3) {
+		default:	/* 3: write, present */
+			/* fall through */
+		case 2:		/* write, not present */
+			if (!(vma->vm_flags & VM_WRITE))
+				goto up_and_fail;
+			write++;
+			break;
+		case 1:		/* read, present */
+			goto up_and_fail;
+		case 0:		/* read, not present */
+			if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+				goto up_and_fail;
+	}
+
+	if (!handle_mm_fault(tsk, vma, address, write))
+		goto up_and_fail;
+
+	up(&mm->mmap_sem);
+	DBG(("  up in mkia_upageflt\n"));
+	return 1;
+
+up_and_fail:
+	up(&mm->mmap_sem);
+	DBG(("  up in mkia_upageflt\n"));
+	return 0;
+
+return_fail:
+	return 0;
+}
+
+
+/*
+ * int
+ * mkia_uaddr_mapped(unsigned long address)
+ *
+ * returns:
+ *	0 if the address is not mapped
+ *	1 if the address is mapped
+ */
+int
+mkia_uaddr_mapped(unsigned long address)
+{
+	struct mm_struct *mm;
+	struct vm_area_struct * vma;
+
+	mm = current->mm;
+
+	DBG(("down in mkia_uaddr_mapped\n"));
+	down(&mm->mmap_sem);
+	vma = find_vma(mm, address);
+	up(&mm->mmap_sem);
+	DBG(("  up in mkia_uaddr_mapped\n"));
+
+	return (vma != NULL);
+}
+
+
+void *
+mkia_get_file_cookie(int filedes)
+{
+	struct file *file;
+	
+	file = fget(filedes);
+	return file;
+}
+
+void
+mkia_put_file_cookie(void *farg)
+{
+	struct file *file;
+	
+	if ((file = (struct file *) farg) != NULL)
+		fput(file);
+}
+
+
+mkia_64bit_t 
+mkia_add_page_ref(unsigned long vaddr)
+{
+	struct vm_area_struct *vma;
+	mkia_64bit_t  retval;
+	struct mm_struct *mm;
+	unsigned long mapnr;
+	int tries;
+	pgd_t *pgd;
+	pmd_t *pmd;
+	pte_t *ptep;
+
+	vaddr &= ~(PAGE_SIZE - 1);
+	mm = current->mm;
+	retval = 0;
+
+	DBG(("down in mkia_add_page_ref\n"));
+	down(&mm->mmap_sem);
+	vma = find_vma(mm, vaddr);
+	if (!vma) {
+		printk("mkia_add_page_ref: find_vma failed for %lx\n", vaddr);
+		goto mkia_add_page_ref_done;
+	}
+
+	for (tries = 0, ptep = 0; tries < 2; tries++) {
+		pgd = pgd_offset(vma->vm_mm, vaddr);
+		pmd = pmd_alloc(pgd, vaddr);
+		if (pmd) {
+			ptep = pte_alloc(pmd, vaddr);
+		}
+		if ((ptep != 0) && pte_present(*ptep) && pte_write(*ptep))
+			break;
+
+		make_pages_present(vaddr, vaddr + PAGE_SIZE);
+		ptep = 0;
+	}
+	if (ptep == 0) {
+		printk("mkia_add_page_ref: couldn't make %lx present\n", vaddr);
+		goto mkia_add_page_ref_done;
+	}
+	mapnr = MAP_NR(pte_page(*ptep));
+	if (mapnr < max_mapnr) {
+		atomic_inc(&mem_map[mapnr].count);
+	}
+	retval =  pte_val(*ptep);
+	atomic_inc(&mkia_context_rss);
+
+mkia_add_page_ref_done:
+	up(&mm->mmap_sem);
+	DBG(("  up in mkia_add_page_ref\n"));
+	return retval;
+}
+
+void
+mkia_adjust_esp0(int numlongs)
+{
+	current->tss.esp0 -= (numlongs * sizeof(unsigned long));
+}
+
+int
+mkia_get_current_task_index(void)
+{
+	return (unsigned int) (current->tarray_ptr - &task[0]);
+}
+
+/*
+ * void mkia_wakeup()
+ *
+ * Synchronization variable wakeup.
+ * standard wake_up() works for 2.4, but on 2.2, an interrupts
+ * are not disabled, which can cause a deadlock
+ */
+void
+mkia_wake_up(void *wqp)
+{
+	struct task_struct *p;
+	struct wait_queue *head, *next;
+	unsigned long flags;
+
+	if (!wqp)
+		goto out;
+	/*
+	 * this is safe to be done before the check because it
+	 * means no deference, just pointer operations.
+	 */
+	head = WAIT_QUEUE_HEAD((struct wait_queue **)wqp);
+
+	read_lock_irqsave(&_mkia_waitqueue_lock, flags);
+	next = *((struct wait_queue **)wqp);
+	if (!next)
+		goto out_unlock;
+
+	while (next != head) {
+		p = next->task;
+		next = next->next;
+		if (p->state & (TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE)) {
+			/*
+			 * We can drop the read-lock early if this
+			 * is the only/last process.
+			 */
+			if (next == head) {
+				read_unlock_irqrestore(&_mkia_waitqueue_lock,
+					flags);
+				wake_up_process(p);
+				goto out;
+			}
+			wake_up_process(p);
+		}
+	}
+out_unlock:
+	read_unlock_irqrestore(&_mkia_waitqueue_lock, flags);
+out:
+	return;
+}
+
diff -Naur mki-adapter-old/arch/i386/mki-adapter/mki24.c mki-adapter-new/arch/i386/mki-adapter/mki24.c
--- mki-adapter-old/arch/i386/mki-adapter/mki24.c	1969-12-31 16:00:00.000000000 -0800
+++ mki-adapter-new/arch/i386/mki-adapter/mki24.c	2003-12-01 17:40:18.000000000 -0800
@@ -0,0 +1,1931 @@
+/*
+ ****************************************************************************
+ * Copyright 2001 by NeTraverse, Inc.
+ * This software is distributed under the terms of the GPL
+ * which is supplied in the LICENSE file with this distribution
+ ***************************************************************************
+ * $Id: mki24.c,v 1.31 2003/12/02 01:40:18 rwb Exp $
+ ***************************************************************************
+ */
+#include <linux/config.h>
+#include <linux/modversions.h>
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,4,9)
+#include <linux/compiler.h>
+#else
+#if __GNUC__ == 2 && __GNUC_MINOR__ < 96
+#define __builtin_expect(x, expected_value) (x)
+#endif
+#define likely(x)       __builtin_expect((x),1)
+#define unlikely(x)     __builtin_expect((x),0)
+#endif
+
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/vmalloc.h>
+#include <linux/highmem.h>
+#include <linux/smp_lock.h>
+#include <linux/smp.h>
+#include <linux/stddef.h>
+#include <linux/file.h>
+#include <linux/swap.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+#include <asm/pgtable.h>
+#include <asm/desc.h>
+#include <asm/softirq.h>
+#include <asm/hardirq.h>
+#include <asm/uaccess.h>
+#include <asm/ldt.h>
+#include <asm/desc.h>
+#include <asm/pgalloc.h>
+#include <asm/io.h>
+#include <asm/mman.h>
+
+#include <asm/mki.h>
+#include <mkifunc.h>
+
+#if __KERNEL_CS >= PAGE_SIZE
+#define LINUX_IDTP  idt
+#else
+extern struct desc_struct idt_table[], gdt_table[];
+#define LINUX_IDTP  idt_table
+#endif
+
+/******************************/
+/* Linux version changes      */
+/******************************/
+/* the mm_struct sem type and the pte / pmd alloc changed in 2.4.3, but
+ * OpenLinux patched it back in. so for openlinux, it's 2,4,2
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3)
+#define MM_SEM_DOWN_WRITE(a) down(a)
+#define MM_SEM_UP_WRITE(a) up(a)
+#define MM_SEM_DOWN_READ(a) down(a)
+#define MM_SEM_UP_READ(a) up(a)
+#define PMD_ALLOC(mm, pgd, vaddr) pmd_alloc(pgd, vaddr)
+#define PTE_ALLOC(mm, dir, addr) pte_alloc(dir, addr)
+#define PAGE_TABLE_LOCK(a) 
+#define PAGE_TABLE_UNLOCK(a) 
+
+#else  /* >= 2.4.3 */
+
+#define MM_SEM_DOWN_WRITE(a) down_write(a)
+#define MM_SEM_UP_WRITE(a) up_write(a)
+#define MM_SEM_DOWN_READ(a) down_read(a)
+#define MM_SEM_UP_READ(a) up_read(a)
+#define PMD_ALLOC(mm, pgd, vaddr) pmd_alloc(mm, pgd, vaddr)
+
+#ifdef pte_offset_map	  /* HIGHPTE patch present */
+#define PTE_ALLOC(mm, dir, addr) pte_alloc_map(mm, dir, addr)
+#define PTE_UNMAP(ptep)  pte_unmap(ptep)
+
+#elif defined(pte_kunmap) /* SuSE Highmem patch */
+#define PTE_ALLOC(mm, dir, addr) pte_alloc(mm, dir, addr)
+#define PTE_UNMAP(ptep)  pte_kunmap(ptep);
+
+#else
+#define PTE_ALLOC(mm, dir, addr) pte_alloc(mm, dir, addr)
+#define PTE_UNMAP(ptep)   /* Nothing */
+#endif
+
+#define PAGE_TABLE_LOCK(a) spin_lock(a)
+#define PAGE_TABLE_UNLOCK(a) spin_unlock(a)
+
+#endif /* >= 2.4.3 */
+
+#ifndef pte_offset_kernel
+#define pte_offset_kernel(dir, addr)  pte_offset(dir, addr)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,15)) && !defined(task_has_cpu)
+#define task_has_cpu(p)  ((p)->has_cpu)
+#endif
+
+/* externals */
+extern asmlinkage int sys_ftruncate(int, unsigned long);
+extern asmlinkage long sys_munmap(unsigned long, size_t);
+extern asmlinkage long sys_mprotect(unsigned long, size_t, unsigned long);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20)
+extern asmlinkage long sys_sched_yield(void);
+#endif
+
+
+#ifdef MAX_LDT_PAGES
+#define GET_LINUX_LDTP(c) 		(0)
+#define GET_LINUX_LDT_NUM_ENTRIES(c) 	(0)
+#elif defined(MM_CONTEXT_HAS_LDT_FIELD)
+#define GET_LINUX_LDTP(c) 		((c)->ldt)
+#define GET_LINUX_LDT_NUM_ENTRIES(c) 	((c)->size)
+#else
+#define GET_LINUX_LDTP(c) 		((c)->segments)
+#define GET_LINUX_LDT_NUM_ENTRIES(c) 	(((c)->segments) ? LDT_ENTRIES : 0 )
+#endif
+
+/*
+ * Linux does not have a constant for the task struct size.  This
+ * number should match what is encoded in get_current() found in
+ * include/asm-i386/current.h
+ */
+#define LINUX_TASK_STRUCT_SIZE  (8192)
+
+atomic_t mkia_context_rss = ATOMIC_INIT(0);
+
+/* Performance/debugging counters */
+unsigned int mkia_cnt_rpr_pagereserved = 0;
+unsigned int mkia_cnt_rpr_not_dirty_acc = 0;
+unsigned int mkia_cnt_rpr_dirty = 0;
+unsigned int mkia_cnt_rpr_accessed = 0;
+
+struct idt_gdt_desc_struct {
+	unsigned short limit;
+	struct desc_struct __attribute__((packed)) *table;
+	unsigned short pad;     /* Align for stack variables */
+};
+
+/*
+ * Note: we use the Linux kernel_lock to protect mkia_idt and mkia_idt_rw
+ * for SMP.
+ */
+struct idt_gdt_desc_struct mkia_idt_ = {(256 * 8) - 1, 0};
+#define mkia_idt mkia_idt_.table
+
+#define MKI_FC_SIZE	6
+#define FC_FAULT_EIP	0
+#define FC_SAVE_FS	1
+
+typedef struct {
+	int mkifc_catching_user_fault; /* Boolean */
+	int mkifc_os_dependent[MKI_FC_SIZE]; /* OS dependent state */
+} mkia_fault_catch_t;
+
+
+static int
+mhia_void(void *parm)
+{
+	return -1;
+}
+
+int (*mhia_table[])(void *) = {
+	&mhia_void,	     /* SWITCH_TO */
+	&mhia_void,	     /* SWITCH_AWAY */
+	&mhia_void,	     /* THREAD_EXIT */
+	&mhia_void,	     /* RET_USER */
+	&mhia_void,	     /* SIGNAL */
+	&mhia_void,	     /* QUERY */
+	&mhia_void,	     /* SWAP_PAGES */
+};
+
+/* The next one might as well go into bss */
+static struct desc_struct * mkia_idt_rw;
+
+/*
+ * merge function offset into the hook functions table.
+ */
+#define SWITCH_AWAY   0
+#define SWITCH_TO     1
+#define THREAD_EXIT   2
+#define RET_USER      3
+#define SIGNAL        4
+#define QUERY         5
+#define SWAP_PAGES    6
+#define NUM_HOOKS     7
+
+/*
+ * Used in the implementation of the MKI functions
+ */
+void mkia_assert_failure(char *exprstr, char *filename, int linenum);
+
+#define	MKIA_ASSERT(expression) do { \
+	if (!(expression)) \
+		mkia_assert_failure(#expression, __FILE__, __LINE__); \
+} while (0)
+
+
+/* Addtional mmap*() routine related constants */
+#define PROT_USER	 0x08 	/* page is USER accessbile */
+#define PAGESIZE    PAGE_SIZE	/* compatibility */
+
+/* TSS is 104 bytes long for 32 bit TSS */
+#define TSS_SIZE_32 104
+/* There are this many bits in the TSS's iobitmap */
+#define MKI_TSS_IOBITMAP_ENTRIES 1024
+/* fudge factor to add to the tss limit */
+#define MKI_TSS_FUDGE 8
+/* this will be used as limit for the TSS: */
+#define MKI_TSS_LIMIT(x) TSS_SIZE_32 + ((x) >> 3) + MKI_TSS_FUDGE
+
+/*
+ * This is the address where the MKI expects the Linux kernel to live.
+ * If someone makes a kernel with PAGE_OFFSET at a different address,
+ * then we probably won't work, because the Windows address space
+ * is very tight as it is!
+ */
+
+#if __PAGE_OFFSET < MKI_END_USER_ADDR
+	#error MKI will not work if __PAGE_OFFSET is not >= MKI_END_USER_ADDR
+#endif
+
+void
+mkia_init(void)
+{
+}
+
+mkia_task_t * mkia_alloc_task_info(void);
+void mkia_pgvfree(void *, unsigned long);
+void mkia_alloc_tss(void *, int);
+
+/***************************************************************************/
+
+#ifdef CONFIG_PREEMPT
+#ifdef PREEMPT_DEBUG
+unsigned long mkia_preempt_enable_counter = 0;
+unsigned long mkia_preempt_enable_nosched_counter = 0;
+unsigned long mkia_preempt_disable_counter = 0;
+#define PREEMPT_COUNT(a) a++;
+#else
+#define PREEMPT_COUNT(a)
+#endif
+#endif
+
+void
+mkia_preempt_disable() {
+#ifdef CONFIG_PREEMPT
+	PREEMPT_COUNT(mkia_preempt_disable_counter);
+	preempt_disable();
+#endif
+}
+
+void
+mkia_preempt_enable() {
+#ifdef CONFIG_PREEMPT
+	PREEMPT_COUNT(mkia_preempt_enable_counter);
+	preempt_enable();
+#endif
+}
+
+void
+mkia_preempt_enable_nosched() {
+#ifdef CONFIG_PREEMPT
+	PREEMPT_COUNT(mkia_preempt_enable_nosched_counter);
+	preempt_enable_no_resched();
+#endif
+}
+
+int
+mkia_get_preempt_count() {
+#ifdef CONFIG_PREEMPT
+	return current->preempt_count & ~PREEMPT_ACTIVE;
+#else
+	return 0;
+#endif
+}
+
+
+/*
+ * mkia_get_task_info
+ * allocates the task_info struct if it's null in the current task
+ */
+static inline mkia_task_t *
+mkia_get_task_info(struct task_struct *curr)
+{
+	mkia_task_t *mtip;
+	if((mtip=curr->mki_task_info) == NULL) {
+		mtip=mkia_alloc_task_info();
+		curr->mki_task_info = mtip;
+	}
+	return mtip;
+}
+
+static inline void
+mkia_free_ldt( mkia_task_t *mtip )
+{
+	if (mtip->mti_merge_ldtp != NULL) {
+		vfree(mtip->mti_merge_ldtp);
+		mtip->mti_merge_ldtp = NULL;
+		mtip->mti_save_ldtp = NULL;
+	}
+}
+
+static inline void
+mkia_free_tss( mkia_task_t *mtip )
+{
+	if (mtip->mti_merge_tssp != NULL) {
+		kfree(mtip->mti_merge_tssp);
+		mtip->mti_merge_tssp = NULL;
+	}
+}
+
+static inline void
+mkia_free_gdt( mkia_task_t *mtip )
+{
+	struct idt_gdt_desc_struct cur_desc;
+	unsigned long size;
+
+	asm volatile("sgdt %0": "=m" (cur_desc));
+	size = PAGE_ALIGN(cur_desc.limit + 1);
+#if __KERNEL_CS < PAGE_SIZE
+	if (size != PAGE_SIZE) {
+		printk("mkia_free_gdt: unexpected GDT size %lx\n", size);
+		return;
+	}
+#endif
+	if (mtip->mti_merge_gdtp != NULL) {
+		mkia_pgvfree(mtip->mti_merge_gdtp, size);
+		mtip->mti_merge_gdtp = NULL;
+	}
+}
+
+/* Called from mkia_remove_hook */
+static inline void
+mkia_cleanup_idt( void )
+{
+	lock_kernel();
+	if (mkia_idt_rw != NULL) {
+		if (boot_cpu_data.f00f_bug) {
+			mkia_pgvfree(mkia_idt_rw, 2*PAGE_SIZE);
+		}
+		else {
+			vfree(mkia_idt_rw);
+		}
+		mkia_idt_rw = NULL;
+	}
+	mkia_idt = NULL;
+	unlock_kernel();
+}
+
+/*
+ * mkia_load_linux_descriptors
+ *
+ * switch in the linux gdt, idt, ldt, and tr
+ */
+static inline void
+mkia_load_linux_descriptors( void )
+{
+	int smp_id = smp_processor_id();
+#if __KERNEL_CS >= PAGE_SIZE
+	/*
+	 * Original 2.4 approach with shared GDT past 1 page
+	 */
+	unsigned int *gdttable;
+
+	asm volatile("lgdt %0": : "m" (gdt_descr) );
+	asm volatile("lidt %0": : "m" (idt_descr) );
+	__load_LDT(smp_id);
+	/* point to second part of TSS descriptor 
+	 * and clear busy bit prior to ltr */
+	gdttable = (unsigned int *) gdt_table;
+	gdttable[(__TSS(smp_id) << 1) + 1] &= (unsigned int) 0xfffffdff;
+	load_TR(smp_id);
+
+#else
+	/*
+	 * KERNEL gdt elements at the tail end because Linux has
+	 * per-CPU GDTs in this version.
+	 */
+	struct desc_struct *gdttable;
+
+	asm volatile("lgdt %0": : "m" (cpu_gdt_descr[smp_id]) );
+	asm volatile("lidt %0": : "m" (idt_descr) );
+	load_LDT_desc();
+
+	/* Clear busy bit prior to ltr */
+	gdttable = (struct desc_struct *) (cpu_gdt_descr[smp_id].address);
+	gdttable[GDT_ENTRY_TSS].b &= 0xfffffdff;
+	load_TR_desc();
+#endif
+}
+
+static inline 
+void
+mkia_alloc_gdt( mkia_task_t *mtip )
+{
+	unsigned long size;
+	unsigned long i;
+	unsigned long origpage, newpage;
+	pgd_t * pgd;
+	pmd_t * pmd;
+	pte_t * pte;
+	struct idt_gdt_desc_struct cur_desc;
+	struct desc_struct *pgdt_table;
+	unsigned short tbllimit;
+
+	MKIA_ASSERT(mtip->mti_merge_gdtp == NULL);
+	asm volatile("sgdt %0": "=m" (cur_desc));
+	tbllimit = cur_desc.limit;
+	size = PAGE_ALIGN(tbllimit + 1);
+	if ( !(pgdt_table = vmalloc(size))) {
+		return;
+	}
+
+#if __KERNEL_CS < PAGE_SIZE
+	if (size != PAGE_SIZE) {
+		printk("mkia_alloc_gdt: unexpected GDT size %lx\n", size);
+		return;
+	}
+#endif
+
+	/* initially a copy of the linux GDT, the first page is cleared
+	 * after the first driver call to mkia_set_gdt_entry */
+	memcpy(pgdt_table, cur_desc.table, size);
+	
+	for (i = PAGE_SIZE; i < size; i += PAGE_SIZE) {
+		origpage = (unsigned long) gdt_table + i;
+		newpage = (unsigned long) pgdt_table + i;
+
+		pgd = pgd_offset(&init_mm, newpage);
+		pmd = pmd_offset(pgd, newpage);
+		pte = pte_offset_kernel(pmd, newpage);
+
+		__free_page(pte_page(*pte));
+		*pte = mk_pte(virt_to_page(origpage), PAGE_KERNEL);
+	}
+	flush_tlb_all();
+	mtip->mti_merge_gdtp = pgdt_table;
+}
+
+/*
+ * mkia_internal_set_gdt_entry
+ * 
+ * this is only called within mki.c to set up the descriptors for the 
+ * newly allocated gdt during initialization, and is called from 
+ * mkia_set_gdt_entry below
+ */
+static inline int
+mkia_internal_set_gdt_entry(unsigned short sel, unsigned long *new_entry, mkia_task_t *mtip)
+{
+	struct idt_gdt_desc_struct cur_desc;
+	int i;
+	unsigned short tbllimit;
+	struct desc_struct *pgdt_table;
+
+	asm volatile("sgdt %0": "=m" (cur_desc));
+	tbllimit = cur_desc.limit;
+	if ( sel < 1 || sel > tbllimit ) {
+		return EINVAL;
+	}
+	pgdt_table = mtip->mti_merge_gdtp;
+	if (sel >= PAGE_SIZE) {
+		return EBUSY;
+	}
+#if __KERNEL_CS < PAGE_SIZE
+	if (sel >= __KERNEL_CS) {
+		return EBUSY;
+	}
+#endif
+	/* selector -> index */
+	i = sel >> 3;
+	pgdt_table[i] = *(struct desc_struct *) new_entry;
+	return 0;
+}
+
+static inline
+void
+mkia_setup_ldt_descriptor(mkia_task_t *mtip, void * pldt_table, int limit)
+{
+	struct desc_struct ldtdesc;
+	mtip->mti_current_ldtp = pldt_table;
+	mtip->mti_current_ldt_limit = limit;
+	ldtdesc.a = (0xffff0000 & ((unsigned long) pldt_table << 16)) | 
+	  (0x0000ffff & (limit));
+	ldtdesc.b = (0xff000000 & (unsigned long) pldt_table) |
+	  (0x000f0000 & limit) | (0x00008200) | 
+	  (0x000000ff & ((unsigned long) pldt_table >> 16));
+	if (mtip->mti_merge_gdtp == NULL) {
+		printk("mki error: gdtp not allocated and mkia_setup_ldt_descriptor called\n");
+	} else {
+		mkia_internal_set_gdt_entry(MKI_LDT_DESC, (unsigned long *) &ldtdesc, mtip);
+	}
+}
+
+static inline 
+void
+mkia_alloc_ldt( mkia_task_t *mtip )
+{
+	struct desc_struct *pldt_table;
+	char *copydest;
+	mm_context_t *contextp;
+	void *orig_ldt;
+	unsigned short limit;
+	int orig_ldt_size;
+	int new_ldt_size;
+
+	MKIA_ASSERT(mtip->mti_merge_ldtp == NULL);
+	mtip->mti_save_ldtp = NULL;
+	mtip->mti_save_ldt_size = 0;
+
+	limit = (LDT_ENTRIES * LDT_ENTRY_SIZE) - 1;
+	new_ldt_size = PAGE_ALIGN(limit + 1);
+	if ( !(pldt_table = vmalloc(new_ldt_size))) {
+		printk("mkia_alloc_ldt: vamlloc failed\n");
+		return;
+	}
+	mtip->mti_merge_ldtp = pldt_table;
+
+	contextp = &current->mm->context;
+	orig_ldt = GET_LINUX_LDTP(contextp);
+	orig_ldt_size = GET_LINUX_LDT_NUM_ENTRIES(contextp) * LDT_ENTRY_SIZE;
+
+	if (orig_ldt_size) {
+		memcpy(pldt_table, orig_ldt, orig_ldt_size);
+	}
+	if (orig_ldt_size < new_ldt_size) {
+		copydest = ((char *) pldt_table) + orig_ldt_size;
+		memset(copydest, 0, new_ldt_size - orig_ldt_size);
+	}
+	mkia_setup_ldt_descriptor(mtip, pldt_table, limit);
+}
+
+/*
+ * mkia_load_mki_descriptors
+ *
+ * switch in the task's gdt, idt, ldt, and tr
+ */
+static inline void
+mkia_load_mki_descriptors( mkia_task_t *mtip )
+{
+	struct idt_gdt_desc_struct cur_desc;
+	unsigned int *gdttable;
+
+	MKIA_ASSERT(mtip->mti_flags & MKIF_DESC_ALLOCATED);
+	asm volatile("sgdt %0": "=m" (cur_desc));
+	cur_desc.table = mtip->mti_merge_gdtp;
+	asm volatile("lgdt %0": : "m" (cur_desc));
+	asm volatile("lldt %%ax": : "a" (MKI_LDT_DESC));
+	/* point to second part of TSS descriptor 
+	 * and clear busy bit prior to ltr */
+	gdttable = mtip->mti_merge_gdtp;
+	gdttable[(MKI_TSS_ENTRY << 1) + 1] &= (unsigned int) 0xfffffdff;
+	asm volatile("ltr %%ax": : "a" (MKI_TSS_DESC));
+	if ( mkia_idt != NULL ) {
+		asm volatile("lidt %0": : "m" (mkia_idt_.limit));
+	}
+}
+
+/*******************************************/
+/*******************************************/
+/*******************************************/
+
+/*
+ * Lock to protect mhia_table.  In reality this is not needed because
+ * the hooks only get modified on driver load/unload and the OS should
+ * prevent concurrency.  However, we do the locks just to be safe, since
+ * this is not a frequently occurring situation (the drivers are rarely
+ * unloaded).
+ */
+spinlock_t mkia_hook_table_lock = SPIN_LOCK_UNLOCKED;
+
+int
+mkia_install_hook(int id, int (*hook_fn)(void *))
+{
+	if ((id >= 0) && (id < NUM_HOOKS)) {
+		spin_lock(&mkia_hook_table_lock);
+		mhia_table[id] = hook_fn;
+		spin_unlock(&mkia_hook_table_lock);
+		return 0;
+	}
+	return -1;
+}
+
+void
+mkia_alloc_descriptors( struct task_struct *curr)
+{
+	mkia_task_t *mtip=mkia_get_task_info(curr);
+
+	MKIA_ASSERT(!(mtip->mti_flags & MKIF_DESC_ALLOCATED));
+	mkia_alloc_gdt(mtip);
+	mkia_alloc_ldt(mtip);
+	mkia_alloc_tss(curr, MKI_TSS_IOBITMAP_ENTRIES);
+	MKIA_ASSERT(mtip->mti_merge_gdtp != NULL);
+	MKIA_ASSERT(mtip->mti_merge_ldtp != NULL);
+	MKIA_ASSERT(mtip->mti_merge_tssp != NULL);
+	mtip->mti_flags |= MKIF_DESC_ALLOCATED;
+}
+
+/*
+ * mkia_cleanup_descriptors()
+ *    needs to be called for each dead session
+ *
+ * This function is always called in task context so no locking is
+ * necessary.
+ */
+void
+mkia_cleanup_descriptors( mkia_task_t *mtip )
+{
+	MKIA_ASSERT(mtip->mti_flags & MKIF_DESC_ALLOCATED);
+	mkia_free_ldt(mtip);
+	mkia_free_tss(mtip);
+	mkia_free_gdt(mtip);
+	MKIA_ASSERT(mtip->mti_merge_gdtp == NULL);
+	MKIA_ASSERT(mtip->mti_merge_ldtp == NULL);
+	MKIA_ASSERT(mtip->mti_merge_tssp == NULL);
+	mtip->mti_flags &= ~MKIF_DESC_ALLOCATED;
+}
+
+/*
+ * mkia_free_task_info()
+ */
+void mkia_free_task_info(mkia_task_t *mtip)
+{
+	if(mtip->mti_merge_gdtp != NULL) {
+		printk("WARNING! freeing MKI task info struct without first freeing gdt. Freeing gdt/ldt/tss\n");
+		mkia_load_linux_descriptors();
+		mkia_cleanup_descriptors(mtip);
+	}
+	kfree(mtip);
+}
+
+void
+mkia_remove_hook(int id)
+{
+	/* 
+	 * For now all the dummy hooks return the same value.
+	 * If we ever add hooks where mhia_void() is not appropriate
+	 * we need to change the code below to a switch() {} statement
+	 */
+	MKIA_ASSERT(mhia_table != NULL);
+	if ((id >= 0) && (id < NUM_HOOKS)) {
+		mhia_table[id] = mhia_void;
+		if (id == SWITCH_TO) {
+			/*
+			 * If we are removing the SWITCH_TO hook, then
+			 * merge is unloading so clean up the IDT as well.
+			 */
+			mkia_cleanup_idt();
+		}
+	}
+}
+
+/*
+ * mhi_switch_to
+ */
+void
+mhia_switch_to(struct task_struct *next)
+{
+	mkia_task_t *mtip;
+
+	/* If the next task is MARKED, switch in our descriptor tables
+	   and call the SWITCH_TO hook */
+	/* don't use mki_get_task_info, because it allocates it if NULL */
+	if (unlikely((mtip=next->mki_task_info) != NULL &&
+	    mtip->mti_flags & MKIF_MARKED)) {
+		/* switch in private gdt, idt, ldt and tr */
+		mkia_load_mki_descriptors(mtip);
+		mtip->mti_flags |= MKIF_IN_SWITCH;
+		(void) (*mhia_table[SWITCH_TO])(mtip->mti_vm86p);
+		mtip->mti_flags &= ~MKIF_IN_SWITCH;
+		next->thread.fs = mtip->mti_fs;
+		next->thread.gs = mtip->mti_gs;
+	}
+}
+
+/*
+ * mhi_switch_away
+ */
+void
+mhia_switch_away(struct task_struct *prev)
+{
+	mkia_task_t *mtip;
+
+	/* don't use mki_get_task_info, because it allocates it
+	 * if NULL, and we don't need _every_ task having an mtip */
+	if (unlikely((mtip=prev->mki_task_info) != NULL &&
+	    mtip->mti_flags & MKIF_MARKED)) {
+		int zero = 0;
+		asm volatile("movl %%fs,%0":"=m" (*(int *)&mtip->mti_fs));
+		asm volatile("movl %%gs,%0":"=m" (*(int *)&mtip->mti_gs));
+		asm volatile("movl %0, %%fs": : "m" (zero));
+		asm volatile("movl %0, %%gs": : "m" (zero)); 
+
+		mkia_load_linux_descriptors();
+		mtip->mti_flags |= MKIF_IN_SWITCH;
+		(void) (*mhia_table[SWITCH_AWAY])(mtip->mti_vm86p);
+		mtip->mti_flags &= ~MKIF_IN_SWITCH;
+	}
+}
+
+void
+mhia_exit(void)
+{
+	/* Call the EXIT hook for MARKED tasks */
+	mkia_task_t *mtip;
+	struct task_struct *curr;
+	curr=current;
+	/* don't use mki_get_task_info, because it allocates it if NULL */
+	if (unlikely((mtip=curr->mki_task_info) != NULL)) {
+		if (likely(mtip->mti_flags & MKIF_MARKED)) {
+			(void) (*mhia_table[THREAD_EXIT])(mtip->mti_vm86p);
+			/* put everything back */
+			mkia_load_linux_descriptors();
+			if (mtip->mti_flags & MKIF_DESC_ALLOCATED) 
+				mkia_cleanup_descriptors(mtip);
+			mkia_free_task_info(mtip);
+		}
+		curr->mki_task_info = NULL;
+	}
+}
+
+/*
+ * mhia_ret_user
+ *
+ * This routine gets called just before the kernel is going to make
+ * a kernel mode to user mode transition.  If this is a MARKED task,
+ * and there is an event pending, we call the RET_USER hook.
+ */
+void
+mhia_ret_user(unsigned long *r0ptr)
+{
+	mkia_task_t *mtip;
+	struct task_struct *curr=current;
+	/* don't use mki_get_task_info, because it allocates it if NULL */
+	if (likely((mtip=curr->mki_task_info) == NULL)) {
+		return;
+	}
+
+	if (unlikely(!(mtip->mti_flags & MKIF_MARKED)))
+		return;
+
+	if (curr->sigpending) {
+		/*
+		 * We catch signals here so that the lower layer does
+		 * not try to do the Linux DOSEMU vm86 handling.  On
+		 * merge a signal in the VM86 process is always a reason
+		 * to exit.
+		 */
+		__sti();
+		do_exit(1);
+	}
+	if (mtip->mti_event_pending) {
+		mtip->mti_event_pending = 0;
+		(void) (*mhia_table[RET_USER])(r0ptr);
+	}
+	MKIA_ASSERT(mkia_get_preempt_count() == 0) ;
+
+}
+
+/*
+ * mhia_swap
+ */
+void
+mhia_swap(int priority, int gfp_mask)
+{
+	int hard_flag;
+
+	/*
+	 * A "HARD" swap means get rid of all mappings rather than
+	 * just aging them.
+	 */
+	hard_flag = (priority < 6);
+	(void) (*mhia_table[SWAP_PAGES])((void *) hard_flag);
+}
+
+/* mkia_pgvfree assumes PAGE_SIZE <= size <= 4M */
+/* mkia_pgvfree assumes size is multiple of PAGE_SIZE */
+void
+mkia_pgvfree(void * addr, unsigned long size)
+{
+	pgd_t * dir;
+	pmd_t * pmd;
+	pte_t * pte;
+	unsigned long end;
+	unsigned long address = (unsigned long) addr;
+
+	MKIA_ASSERT((size >= PAGE_SIZE) && (size < 0x400000) &&
+		((size & 0xfff) == 0));
+
+	dir = pgd_offset_k(address);
+	pmd = pmd_offset(dir, address);
+	pte = pte_offset_kernel(pmd, address);
+	address &= ~PGDIR_MASK;
+	end = address + size;
+	if (end > PGDIR_SIZE) {
+		end = PGDIR_SIZE;
+	}
+
+	/* skip first page and just clear pte of others */
+
+	pte++;
+	address += PAGE_SIZE;
+	size -= PAGE_SIZE;
+	while (address < end) {
+		pte_clear(pte);
+		address += PAGE_SIZE;
+		size -= PAGE_SIZE;
+		pte++;
+	}
+	if (size) {
+		dir++;
+		pmd = pmd_offset(dir, address);
+		pte = pte_offset_kernel(pmd, address);
+		while (size) {
+			pte_clear(pte);
+			size -= PAGE_SIZE;
+			pte++;
+		}
+	}
+	vfree(addr);
+}
+
+/*
+ * void
+ * mkia_post_event(void *cookie)
+ *
+ * Set's an event pending.  NOTE this routine may be called OUT of
+ * context and, in fact, out of an interrupt.  We can get away without
+ * a BUS-LOCK prefix because we do an assigment rather than a Read-
+ * Modify-Write.
+ */
+void 
+mkia_post_event(void *cookie)
+{
+	struct task_struct *t;
+	mkia_task_t *mtip;
+
+	unsigned long flags;
+
+	flags=0;
+	t = (struct task_struct *) cookie;
+
+	(mtip=(mkia_task_t *)t->mki_task_info)->mti_event_pending = 1;
+   
+#ifdef CONFIG_SMP
+	/*
+	 * If the task is running on a different CPU, force a reschedule
+	 * on that CPU.  This will force that task into the kernel if it
+	 * is not already there, and on the way back to user mode,
+	 * mti_event_pending will get checked. Only do this if we are not
+	 * being called from a SWITCH_TO or SWITCH_AWAY hook. Otherwise
+	 * we can get a deadlock, since interrupts could be disabled on
+	 * this cpu and we could block on the run_queue lock or the reschedule.
+	 */
+	if (!(mtip->mti_flags & MKIF_IN_SWITCH) && (smp_num_cpus > 1)) {
+#if defined(MKI_HAS_MKI_KICK_IF_RUNNING)
+		mki_kick_if_running(t);
+#elif defined(cpu) && !defined(SCHED_YIELD)
+		if ((t->state == TASK_RUNNING) && (t->cpu != cpu()))
+			kick_if_running(t);
+#else /* !cpu || SCHED_YIELD */
+		spin_lock_irqsave(&runqueue_lock, flags);
+		if (task_has_cpu(t) && t->processor != smp_processor_id())
+			smp_send_reschedule(t->processor);
+		spin_unlock_irqrestore(&runqueue_lock, flags);
+#endif /* !cpu || SCHED_YIELD */
+	}
+
+#endif /* CONFIG_SMP */
+
+}
+
+int  mkia_set_gdt_entry(unsigned short sel, unsigned long *new_entry)
+{
+	int ret;
+	mkia_task_t *mtip;
+
+	mtip = mkia_get_task_info(current);
+	if (!(mtip->mti_flags & MKIF_DESC_ALLOCATED)) {
+		printk("mki warning: allocating descriptors in "
+			"mkia_set_gdt_entry"); 
+		mkia_alloc_descriptors(current);
+	}
+	/* if this is the first time we are setting a descriptor, 
+	 * be sure the old copies of the linux descriptors are 
+	 * cleared out and that current->mm->context.segments is correct */
+	if (!(mtip->mti_flags & MKIF_GDT_SELECTOR_ADDED)) {
+		/* selectors 0-12 */
+		mtip->mti_flags |= MKIF_GDT_SELECTOR_ADDED;
+		memset( mtip->mti_merge_gdtp, 0, MKI_CLEAR_GDT_AMOUNT);
+	}
+	if (sel >= MKI_TSS_DESC) {
+		return EBUSY;
+	}
+	if (!(ret = mkia_internal_set_gdt_entry(sel, new_entry, mtip))) {
+		/* everything went ok, so switch in our private descriptors */
+		mkia_load_mki_descriptors(mtip);
+	}
+	return ret;
+}
+
+int  mkia_set_ldt_entry(unsigned short sel, unsigned long *new_entry)
+{
+	struct desc_struct *pldte;
+	mkia_task_t *mtip;
+	struct task_struct *curr = current;
+
+	mtip = mkia_get_task_info(curr);
+	MKIA_ASSERT(mtip->mti_merge_ldtp);
+
+	pldte = (struct desc_struct *)
+		    ((char *) mtip->mti_current_ldtp + (sel & ~(0x7)));
+
+	*pldte = *(struct desc_struct *) new_entry;
+	return 0;
+}
+
+int  mkia_check_vm86(void)
+{
+	return ((mkia_get_task_info(current))->mti_flags & MKIF_MARKED) 
+	    ? 1 : 0;
+}
+
+static inline
+int
+mkia_alloc_idt(void)
+{
+	/* No private IDT yet. Make private copy of IDT.
+	 * For F00F bug systems allocate two pages. 
+	 * Make a ro version, which points to the rw.
+	 */
+	if (boot_cpu_data.f00f_bug) {
+		pte_t * pte;
+		pte_t * pte_rw;
+		unsigned long page;
+
+		mkia_idt_rw = vmalloc(2*PAGE_SIZE);
+		page = (unsigned long) mkia_idt_rw;
+		pte_rw = pte_offset_kernel(pmd_offset(pgd_offset(&init_mm,
+							  page), page), page);
+		page += PAGE_SIZE;
+		mkia_idt = (struct desc_struct *) page;
+		pte = pte_offset_kernel(pmd_offset(pgd_offset(&init_mm,
+						       page), page), page);
+		__free_page(pte_page(*pte));
+		*pte = *pte_rw;
+		pte_modify(*pte, PAGE_KERNEL_RO);
+		flush_tlb_all();
+	}
+	else {
+		mkia_idt = mkia_idt_rw = vmalloc(PAGE_SIZE);
+	}
+	if ( mkia_idt != NULL ) {
+		memcpy(mkia_idt_rw, LINUX_IDTP, PAGE_SIZE);
+		asm volatile("lidt %0": : "m" (mkia_idt_.limit));
+	}
+	else {
+		return 0;
+	}
+
+	return mkia_idt != NULL;
+}
+
+void mkia_set_idt_entry(unsigned short vect_num, unsigned long *new_entry, unsigned long *prev_entry)
+{
+	if ( mkia_idt == NULL )
+		if (! mkia_alloc_idt()) return;
+
+	lock_kernel();
+	*(struct desc_struct *) prev_entry  = mkia_idt[vect_num];
+
+	/*
+	 * Other ports seem to get away without cli() around the
+	 * next instruction, so we'll do the same for Linux.
+	 */
+	mkia_idt_rw[vect_num] = *(struct desc_struct *) new_entry;
+	unlock_kernel();
+}
+
+void mkia_set_idt_dpl(void)
+{
+	int i;
+	struct desc_struct *p;
+
+	/*
+	 * Go make all IDT descriptors DPL 0.  Note that Merge
+	 * has special case code to enable Linux's "int 0x80"
+	 * system calls.
+	 */
+	lock_kernel();
+	p = mkia_idt_rw;
+	for (i = 0; i < 256; i++, p++) {
+		if ( p->a | p->b ) {
+			p->b &= ~(3 << 13);
+		}
+	}
+	unlock_kernel();
+}
+
+void mkia_getparm(int request, void *parm)
+{
+	extern int mkia_disable_kmalloc;
+	mkia_task_t *mtip;
+	switch (request) {
+	case PARM_POST_COOKIE:
+		*(void **)parm = current;
+		break;
+	case PARM_FRAME_BASE:
+		/*
+		 * This really is just the very bottom of the
+		 * stack.  To turn this into frame ptr, Merge
+		 * has to subtract off its idea of the frame size.
+		 */
+		*(void **)parm = ((char *)current) + LINUX_TASK_STRUCT_SIZE;
+		break;
+	case PARM_TASK_MASK:
+		/*
+		 * Since 2.4 requires 5.2.x of merge, just return 0
+		 */
+		*(unsigned long *)parm = 0;
+		break;
+	case PARM_TSSP:
+		/* This should not be called in 2.4, 
+		   mkia_adjust_esp0 is used instead */
+		*(void **) parm = NULL;
+		break;
+	case PARM_PRIVATE:
+		*(void **) parm = NULL;
+		break;
+	case PARM_LDTP:
+		if ((mtip = current->mki_task_info) == NULL)
+			*(void **) parm = NULL;
+		else
+			*(void **) parm = mtip->mti_current_ldtp;
+		break;
+	case PARM_RUID:
+		*(uid_t *) parm = current->uid;
+		break;
+
+	/* ========================================================= */
+	/* Begin Version 2 MKI calls */
+
+	case PARM_TASK_MASK_V2:
+		*(unsigned long *)parm = ~(LINUX_TASK_STRUCT_SIZE - 1);
+		break;
+
+	case PARM_MKI_VERSION:
+		*(unsigned int *)parm = 5;
+		break;
+
+	case PARM_NUM_CPUS:
+		*(unsigned int *)parm = smp_num_cpus;
+		break;
+
+	case PARM_MAX_NUMPROCS:
+#ifdef PID_MAX
+		*(unsigned int *)parm = PID_MAX;
+#elif defined(PID_NR)
+		*(unsigned int *)parm = PID_NR;
+#else
+		*(unsigned int *)parm = 65535;
+#endif
+		break;
+
+	case PARM_PREEMPT_ENABLE:
+#ifdef CONFIG_PREEMPT
+		*(void **) parm = mkia_preempt_enable;
+#else
+		*(int *) parm = 0;
+#endif
+		break;
+
+	case PARM_PREEMPT_DISABLE:
+#ifdef CONFIG_PREEMPT
+		*(void **) parm = mkia_preempt_disable;
+#else
+		*(int *) parm = 0;
+#endif
+		break;
+
+	case PARM_PREEMPT_COUNT:
+		mkia_disable_kmalloc = 0;
+#ifdef CONFIG_PREEMPT
+		*(void **) parm = mkia_get_preempt_count;
+#else
+		*(int *) parm = 0;
+#endif
+		break;
+
+	case PARM_HZ:
+		*(int *) parm = HZ;
+		break;
+
+	default:
+		printk("mkia_getparm: no support for this request %d\n",
+			request);
+		*(int *) parm = 0;		/* for sanity */
+		break;
+	}
+}
+
+int
+mkia_mem_lock_unlock(unsigned long start, unsigned long end, int lock)
+{
+	struct task_struct *tsk;
+	struct mm_struct *mm;
+	struct vm_area_struct * vma;
+	unsigned long curaddr;
+	unsigned long curend;
+	unsigned long pages;
+
+	tsk = current;
+	mm = tsk->mm;
+
+	/*
+	 * If we're in an interrupt or have no user
+	 * context, we must not take the fault..
+	 */
+	if (in_interrupt() || mm == &init_mm)
+		return -EBUSY;
+
+	MM_SEM_DOWN_WRITE(&mm->mmap_sem);
+
+	/* Do page alignment */
+	start &= PAGE_MASK;
+	end = (end + ~PAGE_MASK) & PAGE_MASK;
+
+	curaddr = start;
+	while (curaddr < end) {
+		vma = find_vma(mm, curaddr);
+		if (!vma) {
+			printk("up_and_fail: no vma found! (mm=0x%p curaddr=0x%lx)\n", 
+			       mm, curaddr);
+			goto up_and_fail;
+		}
+		if (vma->vm_start != curaddr) {
+			goto up_and_fail;
+		}
+		if (vma->vm_end > end) {
+			goto up_and_fail;
+		}
+		curaddr = vma->vm_end;
+	}
+
+	/*
+	 * OK.  Now that we've done that, we can go ahead and lock (or unlock)
+	 * the pages.  If we are locking we just have to set VM_LOCKED and
+	 * then call make_pages_present to make sure the ptes get filled in.
+	 * VM_LOCKED will prevent swap_out_vma() from stealing the pages.
+	 * For unlock, we just have to clear VM_LOCKED.
+	 */
+	curaddr = start;
+	while (curaddr < end) {
+		vma = find_vma(mm, curaddr);
+		curend = vma->vm_end;
+		pages = (curend - curaddr) >> PAGE_SHIFT; 
+		if (lock) {
+			MKIA_ASSERT((vma->vm_flags & VM_LOCKED) == 0);
+			vma->vm_flags |= VM_LOCKED;
+			mm->locked_vm += pages;
+			make_pages_present(curaddr, curend);
+		} else {
+			MKIA_ASSERT(vma->vm_flags & VM_LOCKED);
+			vma->vm_flags &= ~VM_LOCKED;
+			MKIA_ASSERT(mm->locked_vm >= pages);
+			mm->locked_vm -= pages;
+		}
+		curaddr = curend;
+	}
+	MM_SEM_UP_WRITE(&mm->mmap_sem);
+	return 0;
+
+up_and_fail:
+	MM_SEM_UP_WRITE(&mm->mmap_sem);
+	printk("_mki_mem_lock_unlock: bad params: "
+	    "start %lx, end %lx, lock %x\n", start, end, lock);
+	return -EINVAL;
+}
+
+int
+mkia_set_private_ldt(void *ldtp, size_t limit)
+{
+	unsigned long ldtaddr;
+	unsigned long endaddr;
+	struct task_struct *curr;
+	mm_context_t *contextp;
+	mkia_task_t *mtip;
+
+	curr=current;
+	mtip = mkia_get_task_info(curr);
+	MKIA_ASSERT((mtip->mti_flags & MKIF_MARKED) != 0);
+	if (curr->mm == NULL)
+		return EINVAL;
+
+	contextp = &curr->mm->context;
+	if (! (mtip->mti_flags & MKIF_SETLDT_DONE)) {
+		mtip->mti_save_ldtp = mtip->mti_current_ldtp;
+		mtip->mti_save_ldt_size = mtip->mti_current_ldt_limit + 1;
+		mtip->mti_flags |= MKIF_SETLDT_DONE;
+		if (ldtp == NULL)
+			return 0;
+	}
+	if (ldtp == NULL) {
+		/* NULL means restore the saved original LDT */
+		ldtp = mtip->mti_save_ldtp;
+		limit = (mtip->mti_save_ldt_size * LDT_ENTRY_SIZE) - 1;
+	}
+
+	/* Unlock previous LDT */
+	ldtaddr = (unsigned long) mtip->mti_current_ldtp;
+	if (ldtaddr < MKI_END_USER_ADDR) {
+		endaddr = ldtaddr +
+			mtip->mti_current_ldt_limit + 1;
+		mkia_mem_lock_unlock(ldtaddr, endaddr, 0 /* Unlock */);
+	}
+
+	/* Lock the new LDT */
+	ldtaddr = (unsigned long) ldtp;
+	if (ldtaddr < MKI_END_USER_ADDR) {
+		endaddr = ldtaddr + limit + 1;
+		mkia_mem_lock_unlock(ldtaddr, endaddr, 1 /* Lock */);
+	}
+
+	mkia_setup_ldt_descriptor(mtip, ldtp, limit);
+	asm volatile("lldt %%ax": : "a" (MKI_LDT_DESC));
+	return 0;
+}
+
+void mkia_pgfault_get_state(int *pfault_ok, mkia_fault_catch_t *fcstate)
+{
+	unsigned long faulting_eip;
+
+	/*
+	 *  Context check.  Make sure that we are not in the middle of
+	 *  servicing an interrupt.  If so, then the fault catch information
+	 *  is not valid.
+	 */
+	if (in_interrupt()) {
+		*pfault_ok = 0;
+		return;
+	}
+
+	/*
+	 *  Save the old state and clear the current.
+	 */
+	*pfault_ok = 1;
+	faulting_eip = (unsigned long) 
+		fcstate->mkifc_os_dependent[FC_FAULT_EIP];
+
+	fcstate->mkifc_catching_user_fault = 
+		(search_exception_table(faulting_eip) ? 1 : 0);
+
+	fcstate->mkifc_os_dependent[FC_SAVE_FS] = (int) (get_fs()).seg;
+	set_fs(KERNEL_DS);
+}
+
+void mkia_pgfault_restore_state(mkia_fault_catch_t *fcstate)
+{
+	set_fs(MAKE_MM_SEG(fcstate->mkifc_os_dependent[FC_SAVE_FS]));
+	fcstate->mkifc_os_dependent[FC_SAVE_FS] = 0;
+}
+
+/*
+ * void *
+ *	mkia_alloc_priv_tss(void)
+ */
+void *
+mkia_alloc_priv_tss(void)
+{
+	struct tss_struct *t;
+	mkia_task_t *mtip;
+
+	mtip = mkia_get_task_info(current);
+	if (mtip->mti_vm86p == NULL) {
+		printk("current task has no vm86p on entry to mki_alloc_priv_tss\n");
+		return NULL;
+	}
+	
+	MKIA_ASSERT(mtip->mti_flags & MKIF_DESC_ALLOCATED);
+	/* Just in case,
+	 * if we have not done so already, alloc the tss 
+	 * and make sure the iobitmap is properly initialized.
+	 */
+	if (!(mtip->mti_flags & MKIF_DESC_ALLOCATED)) {
+		printk("mki: warning! allocating descriptors from alloc_priv_tss\n");
+		mkia_alloc_descriptors(current);
+	}
+
+	t = mtip->mti_merge_tssp;
+
+	return (void *) t;
+}
+
+void mkia_set_vm86p(void *vm86p)
+{
+	struct task_struct *curr=current;
+	(mkia_get_task_info(curr))->mti_vm86p = vm86p;
+}
+
+void *
+mkia_get_vm86p(void)
+{
+	return (mkia_get_task_info(current))->mti_vm86p;
+}
+
+void mkia_mark_vm86(void)
+{
+	mkia_task_t *mtip;
+	mtip = mkia_get_task_info(current);
+	mtip->mti_flags |= MKIF_MARKED;
+	/* ok, now switch in the mki descriptors */	
+	if (!(mtip->mti_flags & MKIF_DESC_ALLOCATED)) {
+		printk("mki: warning! allocating descriptors from mki_mark_vm86\n");
+		mkia_alloc_descriptors(current);
+	}
+	mkia_load_mki_descriptors(mtip);
+}
+
+void mkia_yield(void)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20)
+	/*
+	 * Call sys_sched_yield
+	 * to yield the processor to other processes.
+	 */
+	sys_sched_yield();
+#else
+	/*
+	 * Starting in 2.4.20, modules are supposed to just call yield()
+	 * instead of sys_sched_yield().
+	 */
+	yield();
+#endif
+}
+
+void mkia_clear_vm86(void)
+{
+	mkia_task_t *mtip;
+
+	mtip=mkia_get_task_info(current);
+	/* Make sure we release our descriptors and go back to the 
+	   linux gdt, idt, tss, and ldt */
+	mkia_load_linux_descriptors();
+	mkia_cleanup_descriptors(mtip);
+	
+	mtip->mti_flags &= ~MKIF_MARKED;
+}
+
+int
+mkia_ftruncate_k(int fd, off_t length)
+{
+	return (int) sys_ftruncate(fd, (unsigned long) length);
+}
+
+/*
+ * mkia_alloc_task_info()
+ */
+mkia_task_t * 
+mkia_alloc_task_info(void)
+{
+	mkia_task_t *mtip;
+	mtip = (mkia_task_t *) kmalloc(sizeof(mkia_task_t), GFP_KERNEL);
+	if (mtip) memset(mtip, 0, sizeof(mkia_task_t));
+	return mtip;
+}
+
+void
+mkia_alloc_tss(void *cookie, int iob_size )
+{
+	struct desc_struct tssdesc;
+	unsigned short tbllimit;
+	int size;
+	struct tss_struct *ptss_table;
+	struct tss_struct *oldtss;
+	mkia_task_t *mtip = mkia_get_task_info((struct task_struct *) cookie);
+
+	MKIA_ASSERT(mtip->mti_merge_tssp == NULL);
+	if (( iob_size < 0 ) ||
+	    ( iob_size > 0x0ffff )) {
+		printk("mkia_alloc_tss: iob_size of 0x%x ignored\n", iob_size);
+		return;
+	}
+	oldtss = init_tss + smp_processor_id();
+	tbllimit = MKI_TSS_LIMIT(iob_size);
+	size = PAGE_ALIGN(tbllimit + 1);
+	ptss_table = kmalloc(size,GFP_KERNEL);
+	MKIA_ASSERT(ptss_table != NULL);
+	/* make sure that the iobitmap is properly initialized */
+	memcpy(ptss_table, oldtss, size);
+	ptss_table->ldt = (unsigned short) MKI_LDT_ENTRY;
+	ptss_table->bitmap = IO_BITMAP_OFFSET;
+	memset(ptss_table->io_bitmap, 0xff, (iob_size >> 3) + 1);
+	mtip->mti_merge_tssp = ptss_table;
+	tssdesc.a = (0xffff0000 & ((unsigned long) ptss_table << 16)) | 
+	  (0x0000ffff & (tbllimit));
+	tssdesc.b = (0xff000000 & (unsigned long) ptss_table) |
+	  (0x000f0000 & tbllimit) | (0x00008900) | 
+	  (0x000000ff & ((unsigned long) ptss_table >> 16));
+	if (mtip->mti_merge_gdtp == NULL) {
+		printk("mki error: gdtp not allocated and mkia_alloc_tss called\n");
+	} else {
+		mkia_internal_set_gdt_entry(MKI_TSS_DESC, (unsigned long *) &tssdesc, mtip);
+	}
+}
+
+#if defined(CONFIG_KDB)
+#include <linux/kdb.h>
+int mkia_assert_debugger = 1;
+#else
+int mkia_assert_debugger = 0;
+#endif
+
+/*
+ * void
+ *	mkia_enter_debugger(int reason, int error, struct pt_regs *regs)
+ */
+
+void
+mkia_enter_debugger(int reason, int error, struct pt_regs *regs)
+{
+	/*
+	 * Enter the debugger that is currently in use, if any.
+	 */
+#ifdef CONFIG_KDB
+	switch (reason) {
+	case 1:		/* Called with error and regs values */
+		kdb(KDB_REASON_DEBUG, error, regs);
+		break;
+	default:
+		/*FALLSTHRU*/
+	case 0:		/* Called from an "assert" or some other place that
+			 * does not have an error code or regs associated.
+			 */
+		if (in_interrupt()) {
+			kdb(KDB_REASON_CALL, 0, 0);
+		} else {
+			KDB_ENTER();
+		}
+		break;
+	}
+#else
+	(void) printk("mkia_enter_debugger: no debugger available\n"); 
+#endif
+}
+
+void
+mkia_assert_failure(char *exprstr, char *filename, int linenum)
+{
+	/*
+	 * Note that we make this a separate function so that
+	 * we can put a breakpoint here to trace the problems.
+	 */
+	mkia_preempt_disable();
+	(void) printk(KERN_ERR
+	    "MKI Assertion \"%s\" failed: file \"%s\", line %d\n", 
+		    exprstr, filename, linenum);
+	if (mkia_assert_debugger) {
+		mkia_enter_debugger(0, 0, 0);
+	} else {
+		panic("MKI Assertion \"%s\" failed: file \"%s\", line %d\n", 
+		    exprstr, filename, linenum);
+	}
+	mkia_preempt_enable_nosched();
+}
+
+mkia_64bit_t 
+mkia_virt_to_phys(void *kaddr)
+{
+	unsigned long vaddr;
+	pte_t *pte;
+	pte_t entry;
+
+	if (kaddr < high_memory)
+		return virt_to_phys(kaddr);
+
+	vaddr = (unsigned long) kaddr;
+	pte = pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr), vaddr), vaddr);
+	if (pte == NULL)
+		return (mkia_64bit_t) -1;
+		
+	entry = *pte;
+	if (! pte_present(entry))
+		return (mkia_64bit_t) -1;
+
+    	return (mkia_64bit_t) (pte_val(entry) & PAGE_MASK);
+}
+
+#if defined( CONFIG_X86_PAE) && defined(EMPTY_PGD)
+/* ensure that the PMDs are all allocated; merge PAE support depends on this */
+static inline void mkia_alloc_pmds(struct mm_struct *mm)
+{
+	int i;
+	pgd_t *pgd = pgd_offset(mm, 0);
+	for (i=0;i<USER_PTRS_PER_PGD;i++) {
+		if (!pgd_present(*pgd)) {
+			PAGE_TABLE_LOCK(&mm->page_table_lock);
+			PMD_ALLOC(mm, pgd, i << PGDIR_SHIFT);
+			PAGE_TABLE_UNLOCK(&mm->page_table_lock);
+		}
+		pgd++;
+	}
+}
+#endif
+
+mkia_64bit_t *
+mkia_get_pagedir(void)
+{
+	struct mm_struct *mm;
+
+	mm = current->mm;
+
+#if defined( CONFIG_X86_PAE) && defined(EMPTY_PGD)
+	mkia_alloc_pmds(mm); /* ensure all the pmds are allocated */
+#endif
+	return (mkia_64bit_t *) mm->pgd;
+}
+
+unsigned long
+mkia_mmap_k(unsigned long addr, size_t len, int prot, int flags,
+   int filedes, off_t offset, int *errnop)
+{
+	struct file * file = NULL;
+	int error = -EFAULT;
+
+	if ((prot & PROT_USER) == 0) {
+		printk("mmap_k: Non-USER mapping requested\n");
+	}
+
+	if (!(flags & MAP_ANONYMOUS)) {
+		error = -EBADF;
+		file = fget(filedes);
+		if (!file)
+			goto out;
+	}
+	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+
+	MM_SEM_DOWN_WRITE(&current->mm->mmap_sem);
+	error = do_mmap(file, addr, len, prot, flags, offset);
+	if (file)
+		fput(file);
+	MM_SEM_UP_WRITE(&current->mm->mmap_sem);
+out:
+
+/*	error = sys_mmap2(addr, (unsigned long) len, 
+	(unsigned long) prot, (unsigned long) flags,
+	(unsigned long) filedes, (unsigned long) offset);
+*/
+	if ((error < 0) && (error > -4096)) {
+		*errnop = error;
+		error = -1;
+	} else {
+		*errnop = 0;
+	}
+	return ((unsigned long) error);
+}
+
+int
+mkia_munmap_k(unsigned long addr, size_t len)
+{
+	return sys_munmap(addr, len);
+}
+
+int
+mkia_mprotect_k(unsigned long addr, size_t len, unsigned int prot)
+{
+	return sys_mprotect(addr, len, prot);
+}
+
+void * 
+mkia_get_file_cookie(int filedes)
+{
+	struct file *file;
+	
+	file = fget(filedes);
+	return file;
+}
+
+void mkia_put_file_cookie(void *farg)
+{
+	struct file *file;
+	
+	if ((file = (struct file *) farg) != NULL)
+		fput(file);
+}
+
+mkia_64bit_t mkia_add_page_ref(unsigned long vaddr)
+{
+	struct vm_area_struct *vma;
+	mkia_64bit_t retval;
+	struct mm_struct *mm;
+	int page_pres;
+	int tries;
+	pgd_t *pgd;
+	pmd_t *pmd;
+	pte_t *ptep;
+	pte_t entry;
+
+	vaddr &= ~(PAGE_SIZE - 1);
+	mm = current->mm;
+	retval = 0;
+
+	MM_SEM_DOWN_WRITE(&mm->mmap_sem);
+	vma = find_vma(mm, vaddr);
+	if (!vma) {
+		printk("mkia_add_page_ref: find_vma failed for %lx\n", vaddr);
+	        goto mki_add_page_ref_done;
+	}
+
+	entry = __pte(0);
+	page_pres = 0;
+	ptep = NULL;
+	for (tries = 0; tries < 2; tries++) {
+		PAGE_TABLE_LOCK(&mm->page_table_lock);
+		pgd = pgd_offset(vma->vm_mm, vaddr);
+		pmd = PMD_ALLOC(mm, pgd, vaddr);
+		if (pmd && ((ptep = PTE_ALLOC(mm, pmd, vaddr)) != 0)) {
+			entry = *ptep;
+			page_pres = (pte_present(entry) && pte_write(entry));
+			if (page_pres) {
+				/* break with the page_table_lock held! */
+				break;
+			}
+		}
+		PAGE_TABLE_UNLOCK(&mm->page_table_lock);
+		if (ptep) {
+			PTE_UNMAP(ptep);
+			ptep = NULL;
+		}
+		make_pages_present(vaddr, vaddr + PAGE_SIZE);
+	}
+	if (! page_pres) {
+		printk("mkia_add_page_ref: couldn't make %lx present\n", vaddr);
+	        goto mki_add_page_ref_done;
+	}
+	MKIA_ASSERT(VALID_PAGE(pte_page(entry)));
+	get_page(pte_page(entry));
+	PAGE_TABLE_UNLOCK(&mm->page_table_lock);
+	if (ptep) {
+		PTE_UNMAP(ptep);
+		ptep = NULL;
+	}
+	atomic_inc(&mkia_context_rss);
+	retval = pte_val(entry);
+
+mki_add_page_ref_done:
+	MM_SEM_UP_WRITE(&mm->mmap_sem);
+	return retval;
+}
+
+int  mkia_remove_page_ref(unsigned long vaddr, mkia_64bit_t physpte, void *file_cookie, off_t offset)
+{
+	struct page *pageinfop;
+	unsigned long physaddr;
+	struct file *file;
+	int retcode;
+	pte_t pte;
+
+	retcode = 0;
+	physaddr = physpte & PAGE_MASK;
+	offset &= PAGE_MASK;
+	file = (struct file *) file_cookie;
+	MKIA_ASSERT(file != NULL);
+	if (file == NULL) {
+		/* Just in case: handle it gracefully */
+		goto rem_page_ref_done;
+	}
+
+	pte = *((pte_t *)&physpte);
+	if (!VALID_PAGE(pte_page(pte)))
+	        goto rem_page_ref_done;
+
+	pageinfop = pte_page(pte);
+	if (PageReserved(pageinfop)) {
+		mkia_cnt_rpr_pagereserved++;
+		goto rem_page_ref_free;
+	}
+
+	MKIA_ASSERT(pageinfop->mapping->host != NULL);
+	MKIA_ASSERT(file->f_dentry->d_inode == pageinfop->mapping->host);
+	MKIA_ASSERT(pageinfop->index == (unsigned long)(offset >> PAGE_SHIFT));
+
+	if ((physpte & (_PAGE_DIRTY | _PAGE_ACCESSED)) == 0) {
+		/*
+		 * If this is a clean mapping, i.e. no mod or acc bits,
+		 * then we can just decrement the page use counts and
+		 * be done with it!
+		 */
+		mkia_cnt_rpr_not_dirty_acc++;
+	}
+	if (physpte & _PAGE_ACCESSED) {
+		mkia_cnt_rpr_accessed++;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,10)
+		pageinfop->age += PAGE_AGE_ADV;
+		if (pageinfop->age > PAGE_AGE_MAX)
+			pageinfop->age = PAGE_AGE_MAX;
+#else
+		flush_tlb_page(find_vma(current->mm,vaddr), vaddr);
+#endif
+	}
+	if (physpte & _PAGE_DIRTY) {
+		mkia_cnt_rpr_dirty++;
+		set_page_dirty(pageinfop);
+	}
+
+rem_page_ref_free:
+	MKIA_ASSERT(atomic_read(&pageinfop->count) >= 2);
+	put_page(pageinfop);
+	atomic_dec(&mkia_context_rss);
+	MKIA_ASSERT(atomic_read(&mkia_context_rss) >= 0);
+
+rem_page_ref_done:
+	return retcode;
+}
+
+/*
+ * int
+ * mki_uaddr_mapped(unsigned long address)
+ *
+ * returns:
+ *	0 if the address is not mapped
+ *	1 if the address is mapped
+ */
+int
+mkia_uaddr_mapped(unsigned long address)
+{
+	struct mm_struct *mm;
+	struct vm_area_struct * vma;
+
+	mm = current->mm;
+
+	MM_SEM_DOWN_READ(&mm->mmap_sem);
+	vma = find_vma(mm, address);
+	MM_SEM_UP_READ(&mm->mmap_sem);
+
+	return (vma != NULL);
+}
+
+/*
+ * int
+ * mki_upageflt(unsigned long address, int error_code)
+ *
+ * returns:
+ *	0 if the pagefault could not be resolved
+ *	1 if the pagefault could be resolved
+ */
+int
+mkia_upageflt(unsigned long address, int error_code)
+{
+	struct task_struct *tsk;
+	struct mm_struct *mm;
+	struct vm_area_struct * vma;
+	int write;
+
+	tsk = current;
+	mm = tsk->mm;
+
+	/*
+	 * If we're in an interrupt or have no user
+	 * context, we must not take the fault..
+	 */
+	if (in_interrupt() || mm == &init_mm)
+		goto return_fail;
+
+	MM_SEM_DOWN_READ(&mm->mmap_sem);
+
+	vma = find_vma(mm, address);
+	if (!vma)
+		goto up_and_fail;
+	if (vma->vm_start <= address)
+		goto good_area;
+	if (!(vma->vm_flags & VM_GROWSDOWN))
+		goto up_and_fail;
+
+	/*
+	 * The do_page_fault() code handles stack growth here but
+	 * we don't do this because we don't have a standard process
+	 * whose stack we can grow.  The emulator has a fixed sized
+	 * stack and it's not our job to grow the windows stacks.
+	 */
+
+good_area:
+	write = 0;
+	switch (error_code & 3) {
+		default:	/* 3: write, present */
+			/* fall through */
+		case 2:		/* write, not present */
+			if (!(vma->vm_flags & VM_WRITE))
+				goto up_and_fail;
+			write++;
+			break;
+		case 1:		/* read, present */
+			goto up_and_fail;
+		case 0:		/* read, not present */
+			if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+				goto up_and_fail;
+	}
+
+
+	switch (handle_mm_fault(mm, vma, address, write)) {
+	case 1:
+		tsk->min_flt++;
+		break;
+	case 2:
+		tsk->maj_flt++;
+		break;
+	default:
+		goto up_and_fail;
+	}
+
+	MM_SEM_UP_READ(&mm->mmap_sem);
+	return 1;
+
+up_and_fail:
+	MM_SEM_UP_READ(&mm->mmap_sem);
+	return 0;
+
+return_fail:
+	return 0;
+}
+
+void
+mkia_adjust_esp0(int numlongs)
+{
+	struct tss_struct *tss = init_tss + smp_processor_id();
+	struct task_struct *curr=current;
+	mkia_task_t *mtip;
+
+	tss->esp0 -= (numlongs * sizeof(unsigned long));
+	(&curr->thread)->esp0 -= (numlongs * sizeof(unsigned long));
+	mtip = mkia_get_task_info(curr);
+/* this should always be true */
+	if (!(mtip->mti_flags & MKIF_DESC_ALLOCATED)) {
+		mkia_alloc_descriptors(curr);
+	}
+
+}
+
+int
+mkia_get_current_task_index(void)
+{
+	return current->pid;
+}
+
+
+/*
+ * void mkia_wakeup()
+ *
+ * Synchronization variable wakeup.
+ * standard wake_up() works for 2.4, but on 2.2, an interrupts
+ * are not disabled, which can cause a deadlock
+ */
+void
+mkia_wake_up(void *wqp)
+{
+	wake_up((wait_queue_head_t *) wqp);
+}
+
+
+/*
+ * int mkia_call_svwait()
+ *
+ * Synchronization variable wait.
+ *
+ * This must be implemented on the mki side of the world because
+ * write_lock_irqsave() compiles differently if __SMP__ is defined.
+ * The trick here (and why we can't just use sleep_on() or
+ * interruptible_sleep_on()) is that we need to give up the lock
+ * AFTER we have set the task state and added ourselves to the wait
+ * queue.
+ *
+ * This function is called indirectly (its address is obtained via
+ * mkia_getparm().  This is so that a version 2 win4lin can still run
+ * with a version 1 mki, even though verion 1 mki does not support
+ * a mkia_sv_wait() function.
+ */
+int
+mkia_call_svwait(void *wq, volatile int *lockp, int interruptible)
+{
+	struct task_struct *taskp;
+	wait_queue_t wait;
+	unsigned long wqflags;
+	unsigned long flags;
+	int retval;
+
+#ifdef CONFIG_PREEMPT
+	MKIA_ASSERT( mkia_get_preempt_count() != 0) ;
+#endif
+	taskp = current;
+	init_waitqueue_entry(&wait, taskp);
+	__save_flags(flags);
+	if (interruptible) {
+		taskp->state = TASK_INTERRUPTIBLE;
+	} else {
+		taskp->state = TASK_UNINTERRUPTIBLE;
+	}
+/* code taken from SLEEP_ON_HEAD start */
+	wq_write_lock_irqsave(&((wait_queue_head_t *)wq)->lock, wqflags);
+	__add_wait_queue((wait_queue_head_t *)wq, &wait);
+	wq_write_unlock(&((wait_queue_head_t *) wq)->lock);
+/* code from SLEEP_ON_HEAD end */
+	*lockp = 0;     /* Unlock */
+	mkia_preempt_enable_nosched();
+	schedule();
+/* code taken from SLEEP_ON_TAIL start */
+	wq_write_lock_irq(&((wait_queue_head_t *) wq)->lock);
+	__remove_wait_queue((wait_queue_head_t *) wq, &wait);
+	wq_write_unlock_irqrestore(&((wait_queue_head_t *) wq)->lock, wqflags);
+/* code from SLEEP_ON_TAIL end */
+
+	taskp->state = TASK_RUNNING;
+	if (interruptible) {
+		retval = (signal_pending(taskp)) ? 1 : 0;
+	} else {
+		retval = 0;
+	}
+	__restore_flags(flags);
+	return retval;
+}
diff -Naur mki-adapter-old/arch/i386/mki-adapter/mki-adapter.h mki-adapter-new/arch/i386/mki-adapter/mki-adapter.h
--- mki-adapter-old/arch/i386/mki-adapter/mki-adapter.h	1969-12-31 16:00:00.000000000 -0800
+++ mki-adapter-new/arch/i386/mki-adapter/mki-adapter.h	2003-12-01 17:29:08.000000000 -0800
@@ -0,0 +1,213 @@
+/*
+ ****************************************************************************
+ * Copyright 2001 by NeTraverse, Inc.
+ * This software is distributed under the terms of the GPL
+ * which is supplied in the LICENSE file with this distribution
+ ***************************************************************************
+ * $Id: mki-adapter.h,v 1.27 2003/10/29 17:49:46 rwb Exp $
+ ***************************************************************************
+ */
+#ifndef MKI_ADAPTER_H
+#define MKI_ADAPTER_H
+
+#ifdef IN_ADAPTER
+int mkia_ver_1_2_10;
+#ifdef MODULE_LICENSE
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("NeTraverse");
+MODULE_DESCRIPTION("NeTraverse MKI Adapter");
+#endif
+#endif
+
+#ifndef PROT_USER
+#define PROT_USER       0x8             /* page can not be accessed */
+#endif
+
+#define PAGE_SHIFT      12
+#define PAGE_SIZE       (1UL << PAGE_SHIFT)
+#define PAGE_MASK       (~(PAGE_SIZE-1))
+#define PAGESIZE PAGE_SIZE
+
+#define ERESTARTSYS 512
+#define ENOIOCTLCMD 515
+
+#ifdef MODULE
+#ifndef IN_ADAPTER
+extern void __this_module;
+
+#define MKIA_MOD_INC_USE_COUNT mkia_mod_inc_use_count(&__this_module)
+#define MKIA_MOD_DEC_USE_COUNT mkia_mod_dec_use_count(&__this_module)
+
+#ifndef MKIA_LINUX_26
+static const char __module_kernel_version[]
+__attribute__((section(".modinfo"))) = "kernel_version=99.99.99-win4lin";
+
+static const char __module_license[] 
+__attribute__((section(".modinfo"))) = "license=Proprietary";
+
+static const char __module_author[]
+__attribute__((section(".modinfo"))) = "author=NeTraverse";
+
+static const char __module_description[] 
+__attribute__((section(".modinfo"))) = "description=NeTraverse binary module";
+#endif /* ! MKIA_LINUX_26 */
+
+#endif
+#else
+#define MKIA_MOD_INC_USE_COUNT
+#define MKIA_MOD_DEC_USE_COUNT
+#endif
+
+
+/**************/
+/* structures */
+/**************/
+struct mergevma {
+	unsigned long offset;
+	unsigned long vm_start;
+	unsigned long vm_end;
+	unsigned long nbytes;
+	unsigned long npages;
+	unsigned long page_size;
+	/* ... */
+};
+
+struct pt_regs;
+
+/**************/
+/* functions  */
+/**************/
+extern int mkia_process_owns_fpu(void *vm86p);
+
+extern void * mkia_kmalloc(int size, int flags);
+extern void * mkia_get_free_pages(int foo, int flags);
+/* values for flags */
+#define MKIA_SLEEP	0x0001
+#define MKIA_NOSLEEP	0x0002
+#define MKIA_DMA	0x0004
+#define MKIA_ZERO	0x0100
+
+extern void mkia_kfree(void *pointer);
+extern void mkia_free_pages(unsigned long addr, int size);
+
+extern void * mkia_vmalloc(int size);
+extern void mkia_vfree(void *pointer);
+
+extern int mkia_strncmp(char *s1, char *s2, int len);
+extern int mkia_strcmp(char *s1, char *s2);
+extern void * mkia_strncpy(char *s1, char *s2, int len);
+extern void * mkia_strcpy(char *s1, char *s2);
+extern void mkia_memset(void *mem, int val, int len);
+extern void mkia_memcpy(void *mem, void *mem2, int len);
+
+
+extern int mkia_register_chrdev(int maj, char * name, void *fops);
+extern void mkia_unregister_chrdev(int major, char * name);
+
+extern int mkia_get_fops_size(void);
+extern void *
+    mkia_alloc_file_ops(void *read, void *write, void *readdir, 
+		    void *poll, void *ioctl, void *mmap, void *open, 
+		    void *release);
+extern void mkia_free_file_ops(void *fops);
+
+extern void * mkia_mrgioctl_unlock_kernel(void);
+extern void mkia_mrgioctl_lock_kernel(void *cookie);
+extern int mkia_remap_page_range(unsigned long user_addr, 
+						    unsigned long phys_addr, 
+						    unsigned long size,
+						    void *vma);
+extern unsigned long mkia_get_vma_offset(void *vmap);
+extern unsigned long mkia_get_vma_page_prot(void *vmap);
+extern unsigned long mkia_get_vma_vm_start(void *vmap);
+extern unsigned long mkia_get_vma_vm_end(void *vmap);
+extern int mkia_get_inode_minor(void *inode);
+extern int mkia_get_file_minor(void *file);
+
+extern int mkia_install_hook(int id, int (*hook_fn)(void *));
+extern void * mkia_alloc_waitqueuep(void);
+extern void mkia_free_waitqueuep(void *wqp);
+extern void * mkia_current(void);
+extern int mkia_signal_pending(void *foo);
+extern void * mkia_get_current_task(void);
+extern int mkia_get_current_pid(void);
+extern int mkia_get_current_task_index(void);
+extern int mkia_call_svwait(void *wqp, volatile int *lockp, 
+				   int interruptible);
+extern void mkia_wake_up(void *wqp);
+extern void mkia_poll_wait(void *file, void *wqp, 
+					      void *wait);
+extern void mkia_poll_wake(void *wqp); 
+extern void mkia_adjust_esp0(int numlongs);
+extern void mkia_mod_inc_use_count(void * module);
+extern void mkia_mod_dec_use_count(void * module);
+extern int mkia_request_irq(int irq,
+		void (*handler)(int, void *, struct pt_regs *), 
+		unsigned long foo, char *bar, void *baz);
+extern void mkia_free_irq(int irq, void *baz);
+extern void mkia_kill_proc(int procref, int sig, int flag);
+extern void mkia_file_set_private_data(void * filp, void *data);
+extern void * mkia_file_get_private_data(void * filp);
+extern void * mkia_file_get_f_pos_addr(void * filp);
+extern long mkia_file_get_f_mode(void * filp);
+extern int mkia_is_file_nonblock(void *file);
+extern int mkia_copy_to_user(void *dest, void *src, int len);
+extern int mkia_copy_from_user(void *dest, void *src, int len);
+extern int mkia_get_user(void *data, int len);
+extern int mkia_put_user(unsigned long flags, void *data, int len);
+
+extern int mkia_init_vnetint(void);
+extern void mkia_cleanup_vnetint(void);
+extern int mkia_populate_mrgvma(struct mergevma *mrgvma, void * vmap);
+
+extern int timeout(void (*timeout_func)(void *), 
+		void *timeout_arg, long ticks);
+extern void untimeout(int id);
+
+extern void mkia_post_event(void *cookie);
+extern int  mkia_set_gdt_entry(unsigned short sel, unsigned long *new_entry);
+extern int  mkia_set_ldt_entry(unsigned short sel, unsigned long *new_entry);
+extern int  mkia_check_vm86(void);
+extern void * mkia_get_vm86p(void);
+extern void mkia_set_vm86p(void *vm86p);
+extern void mkia_mark_vm86(void);
+extern void mkia_clear_vm86(void);
+extern void mkia_set_idt_entry(unsigned short vect_num, 
+		unsigned long *new_entry, unsigned long *prev_entry);
+extern void mkia_set_idt_dpl(void);
+extern void mkia_enter_debugger(int reason, int error, void *regs);
+extern void mkia_post_event(void *cookie);
+extern void * mkia_getparm(int request, void *parm);
+extern int  mkia_set_private_ldt(void *ldtp, size_t limit);
+extern void mkia_pgfault_get_state(int *pfault_ok, void *fcstate);
+extern void mkia_pgfault_restore_state(void *fcstate);
+extern void mkia_yield(void);
+extern void mkia_remove_hook(int id);
+
+extern mkia_64bit_t mkia_virt_to_phys(void *kaddr);
+extern mkia_64bit_t * mkia_get_pagedir(void);
+extern int  mkia_remove_page_ref(unsigned long vaddr, mkia_64bit_t physpte, 
+		void *file_cookie, off_t offset);
+extern mkia_64bit_t mkia_add_page_ref(unsigned long vaddr);
+
+extern void * mkia_get_file_cookie(int filedes);
+extern void mkia_put_file_cookie(void *farg);
+extern int  mkia_upageflt(unsigned long address, int error_code);
+extern void * mkia_alloc_priv_tss(void);
+extern unsigned long mkia_mmap_k(unsigned long, size_t, 
+		int, int, int, off_t, int *);
+extern unsigned long mkia_mprotect_k(unsigned long, size_t, int);
+extern int mkia_munmap_k(unsigned long, size_t);
+extern int mkia_ftruncate_k(int, off_t);
+extern void mhia_switch(void *, void *);
+extern void mhia_switch_to(void *, void *);
+extern void mhia_switch_away(void *, void *);
+extern void mhia_ret_user(void *, void *);
+extern void mhia_exit(void *, void *);
+extern void mhia_swap(void *, void *);
+
+extern int printk(const char * fmt, ...);
+extern void panic(const char * fmt, ...);
+
+#endif /* MKI_ADAPTER_H */
+
diff -Naur mki-adapter-old/arch/i386/mki-adapter/mkifunc.h mki-adapter-new/arch/i386/mki-adapter/mkifunc.h
--- mki-adapter-old/arch/i386/mki-adapter/mkifunc.h	1969-12-31 16:00:00.000000000 -0800
+++ mki-adapter-new/arch/i386/mki-adapter/mkifunc.h	2003-09-08 11:26:26.000000000 -0700
@@ -0,0 +1,153 @@
+/*
+ ****************************************************************************
+ * Copyright 2001 by NeTraverse, Inc.
+ * This software is distributed under the terms of the GPL
+ * which is supplied in the LICENSE file with this distribution
+ ***************************************************************************
+ * $Id: mkifunc.h,v 1.8 2003/05/20 04:58:55 rwb Exp $
+ ***************************************************************************
+ * function declarations and constants of the Linux specific Merge/Kernel 
+ * Interface (mki).
+ */
+
+#ifndef MKIFUNC_H
+#define MKIFUNC_H
+
+/* take care of some types if not defined. */
+#ifndef _LINUX_TYPES_H
+#include <sys/types.h>
+#endif
+
+typedef unsigned long long mkia_64bit_t;
+
+#ifndef MKI_END_USER_ADDR
+#define MKI_END_USER_ADDR 0xC0000000
+#endif
+
+#ifndef PARM_POST_COOKIE
+/* mki_getparm() parameter identifiers  */
+#define PARM_POST_COOKIE     0  /* pointer to current LWP (lwp_t) */
+#define PARM_FRAME_BASE      1  /* stack frame base */
+#define PARM_CPU_TYPE        2  /* pointer to CPU type */
+#define PARM_PRIVATE         3  /* pointer to the location of a scratch
+		                                      memory pointer */
+#define PARM_GDTP            4  /* pointer to current GDT for this LWP */
+#define PARM_LDTP            5  /* pointer to current LDT for this LWP */
+#define PARM_IDTP            6  /* pointer to current IDT for this LWP */
+#define PARM_TSSP            7  /* pointer to current TSS for this LWP */
+#define PARM_RUID            8  /* real UID for this process */
+#define PARM_TASK_MASK       9  /* Old V1 request to get Task Mask */
+/* ===== End of MKI Version 1 supported calls ====== */
+
+#define PARM_TASK_MASK_V2   10  /* New V2 request to get the Task Mask, i.e. */
+                                /*   the value for esp to get to task struct */
+#define PARM_MKI_VERSION    11  /* MKI version number */
+#define PARM_NUM_CPUS       12  /* Number of CPUs present */
+#define PARM_MAX_NUMPROCS   13  /* Maximum number of tasks */
+#define PARM_CURPROC_INDEX  14  /* Index of current task */
+#define PARM_SVWAIT_FUNC    15  /* Address of _mki_sv_wait() routine */
+#define PARM_SVWAKEUP_FUNC  16  /* Address of _mki_sv_wakeup() routine */
+#define PARM_POLLWAKE_FUNC  17  /* Address of _mki_poll_wake() routine */
+#define PARM_PREEMPT_ENABLE 18  /* preemtion enable function */
+#define PARM_PREEMPT_DISABLE 19 /* preemtion disable function */
+#define PARM_PREEMPT_COUNT  20  /* preemtion count function */
+#define PARM_HZ             21  /* get HZ value */
+/*
+ * merge function offset into the hook functions table.
+ */
+#define SWITCH_AWAY   0
+#define SWITCH_TO     1
+#define THREAD_EXIT   2
+#define RET_USER      3
+#define SIGNAL        4
+#define QUERY         5
+#define SWAP_PAGES    6
+#define NUM_HOOKS     7
+
+/*
+ *  Index values for the os dependent portion of mki_fault_catch_t
+ */
+#define MKI_FC_SIZE	6
+
+#define FC_FAULT_EIP	0
+#define FC_SAVE_FS	1
+
+typedef struct {
+	int mkifc_catching_user_fault; /* Boolean */
+	int mkifc_os_dependent[MKI_FC_SIZE]; /* OS dependent state */
+} mki_fault_catch_t;
+
+/* MKI version of vaddr_t */
+typedef unsigned long mkiul_t;
+
+/* MCV: MKI Caller Version */
+#define MCV_MAGIC		(0xfabc0000)
+#define MCV_MAGIC_MASK		(0xffff0000)
+#define MCV_VERSION_MASK	(0x0000ffff)
+#define MCV_MAKE_VER(ver)	(((ver) & MCV_VERSION_MASK) | MCV_MAGIC)
+
+/*
+ * mki_enable_context_cookies data type
+ * supports fast merge context switch
+ * see platinum MKI interface doc for details.
+ */
+typedef struct mki_ecc {
+	mkiul_t 	mkiecc_addr;  /* context base addr */
+	size_t		mkiecc_len;   /* context size */
+	void *		mkiecc_cookie;/* outarg */
+} mki_ecc_t;
+#endif
+
+#ifdef MKI_HOOK_TABLE_EXISTS /* i.e. no defs in mki.h */
+
+#ifdef __GDT_SLOTS_RESERVED
+
+#define MKI_CLEAR_GDT_AMOUNT 0x100
+#define MKI_TSS_ENTRY (__GDT_SLOTS_RESERVED - 2)
+#define MKI_TSS_DESC  (MKI_TSS_ENTRY << 3)
+#define MKI_LDT_ENTRY (__GDT_SLOTS_RESERVED - 1)
+#define MKI_LDT_DESC  (MKI_LDT_ENTRY << 3)
+
+#else /* !__GDT_SLOTS_RESERVED */
+
+#define MKI_TSS_ENTRY (GDT_ENTRY_KERNEL_BASE - 2)
+#define MKI_TSS_DESC  (MKI_TSS_ENTRY << 3)
+#define MKI_LDT_ENTRY (GDT_ENTRY_KERNEL_BASE - 1)
+#define MKI_LDT_DESC  (MKI_LDT_ENTRY << 3)
+#define MKI_CLEAR_GDT_AMOUNT MKI_TSS_DESC
+
+#endif /* !__GDT_SLOTS_RESERVED */
+
+
+#ifndef MKIF_MARKED
+/* new 2.4 kernel defs */
+struct mki_task_info {
+        void *mti_vm86p;
+        void *mti_merge_gdtp;
+        void *mti_save_ldtp;
+	int mti_save_ldt_size;
+        void *mti_merge_ldtp;
+        void *mti_merge_tssp;
+        void *mti_current_ldtp;
+        size_t mti_current_ldt_limit;
+        size_t mti_current_tss_limit;
+        unsigned char mti_flags;
+        unsigned char mti_event_pending;
+        int mti_fs;
+        int mti_gs;
+};
+
+typedef struct mki_task_info mkia_task_t;
+
+/* defines for the mki_task_flags field */
+#define MKIF_MARKED             0x01
+#define MKIF_SETLDT_DONE        0x02
+#define MKIF_DESC_ALLOCATED     0x04
+#define MKIF_GDT_SELECTOR_ADDED 0x08
+#define MKIF_TASK_CLEARED       0x10
+#define MKIF_IN_SWITCH          0x20
+#endif
+
+#endif /* MKI_HOOK_TABLE_EXISTS */
+
+#endif /* MKIFUNC_H */
diff -Naur mki-adapter-old/arch/i386/mki-adapter/mki-main.c mki-adapter-new/arch/i386/mki-adapter/mki-main.c
--- mki-adapter-old/arch/i386/mki-adapter/mki-main.c	1969-12-31 16:00:00.000000000 -0800
+++ mki-adapter-new/arch/i386/mki-adapter/mki-main.c	2003-09-08 11:26:24.000000000 -0700
@@ -0,0 +1,601 @@
+/*
+ ****************************************************************************
+ * Copyright 2001 by NeTraverse, Inc.
+ * This software is distributed under the terms of the GPL
+ * which is supplied in the LICENSE file with this distribution
+ ***************************************************************************
+ * $Id: mki-main.c,v 1.10 2003/02/13 23:23:52 rwb Exp $
+ ***************************************************************************
+ * This file contains functions that are largely similar between 2.2 and 
+ * 2.4
+ ***************************************************************************
+ *
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/sched.h>
+#include <linux/kdev_t.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/smp_lock.h>
+#include <linux/poll.h>
+#include <asm/mki.h>
+#include <asm/uaccess.h>
+#include <asm/mman.h>
+
+#include <mkifunc.h>
+#define IN_ADAPTER 1
+#include <mki-adapter.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0)
+#define VMA_OFFSET(a) ((struct vm_area_struct *) a)->vm_offset
+#else 
+#define VMA_OFFSET(a) ((struct vm_area_struct *) a)->vm_pgoff << PAGE_SHIFT
+#endif
+
+
+/************* KEEP THIS FIRST! ****************
+ * this is to allow old versions of modutils 
+ * to still load this properly */
+static void hackheader(void) __attribute__ ((unused));
+static void hackheader()
+{
+	asm volatile("nop;nop;nop;nop");
+	asm volatile("nop;nop;nop;nop");
+	asm volatile("nop;nop;nop;nop");
+	asm volatile("nop;nop;nop;nop");
+}
+
+int
+mkia_process_owns_fpu(void *vm86p)
+{
+	return ((current->flags & PF_USEDFPU) != 0);
+}
+
+int 
+mkia_register_chrdev(int maj, 
+		    char *name, 
+		    void *fops)
+{
+	return register_chrdev(maj, name, (struct file_operations *) fops);
+}
+
+void
+mkia_unregister_chrdev(int major, char * name)
+{
+	unregister_chrdev(major, name);
+}
+
+
+/*
+ * populate a mergevma struct from a vmarea
+ *
+ */
+int
+mkia_populate_mrgvma(struct mergevma *mrgvma, void * vmap)
+{
+
+
+	mrgvma->offset = VMA_OFFSET(vmap);
+
+	mrgvma->vm_start = ((struct vm_area_struct *) vmap)->vm_start;
+	mrgvma->vm_end = ((struct vm_area_struct *) vmap)->vm_end;
+	mrgvma->nbytes = 
+		(((mrgvma->vm_end - mrgvma->vm_start) + 
+		  PAGE_SIZE - 1) & PAGE_SIZE);
+	mrgvma->npages = mrgvma->nbytes >> PAGE_SHIFT;
+	mrgvma->page_size = PAGE_SIZE;
+	return 0;
+}
+
+int
+mkia_remap_page_range(unsigned long user_addr, 
+			     unsigned long phys_addr, 
+			     unsigned long size,
+			     void *vma) 
+{
+	return remap_page_range(
+#ifdef MKI_FOUND_VMA_PATCH
+			(struct vm_area_struct *) vma,
+#endif
+			user_addr, 
+			phys_addr, 
+			size, 
+			((struct vm_area_struct *) vma)->vm_page_prot);
+}
+
+void *
+mkia_alloc_file_ops(void *read, 
+			   void *write, 
+			   void *readdir, 
+			   void *poll, 
+			   void *ioctl, 
+			   void *mmap, 
+			   void *open, 
+			   void *release)
+{
+	struct file_operations *ret=kmalloc(sizeof(struct file_operations), 
+					    GFP_ATOMIC);
+	if (!ret)
+		return ret;
+
+	memset(ret, 0, sizeof(struct file_operations));
+
+	ret->read = read;
+	ret->write = write;
+	ret->readdir = readdir;
+	ret->poll = poll;
+	ret->ioctl = ioctl;
+	ret->mmap = mmap;
+	ret->open = open;
+	ret->release = release;
+	return ret;
+}
+
+void
+mkia_free_file_ops(void * fops)
+{
+	kfree(fops);
+}
+
+int 
+mkia_get_inode_minor(void *inode)
+{
+	return MINOR(((struct inode *) inode)->i_rdev);
+}
+
+int 
+mkia_get_file_minor(void *file)
+{
+	return MINOR(((struct file *)file)->f_dentry->d_inode->i_rdev);
+}
+
+void *
+mkia_get_free_pages(int pages, int flags)
+{
+	int fpflags=0;
+	unsigned long ret;
+	if (flags & MKIA_NOSLEEP) fpflags |= GFP_ATOMIC;
+	if (flags & MKIA_SLEEP) fpflags |= GFP_KERNEL;
+	if (flags & MKIA_DMA) fpflags |= GFP_DMA;
+	ret = __get_free_pages(fpflags, pages);
+	if (ret && (flags & MKIA_ZERO))
+		memset((void *) ret, 0, PAGE_SIZE * (pages + 1));
+	return (void *) ret;
+}
+
+void 
+mkia_free_pages(unsigned long addr, int size)
+{
+	free_pages(addr, size);
+}
+
+#ifdef CONFIG_PREEMPT
+/* This is set to 0 on the first call to mkia_getparm if the drivers
+ * know about preempt
+ */
+int mkia_disable_kmalloc = 1;
+#else
+int mkia_disable_kmalloc = 0;
+#endif
+
+void * 
+mkia_kmalloc(int size, int flags)
+{
+	int fpflags;
+	unsigned long ret;
+
+	fpflags=0;
+	if (mkia_disable_kmalloc) {
+		printk("mki-adapter: trying to run a non-preemption "
+			"capable Win4Lin on a preemptible kernel\n");
+		return NULL;
+	}
+
+	if (flags & MKIA_NOSLEEP) fpflags |= GFP_ATOMIC;
+	if (flags & MKIA_SLEEP) fpflags |= GFP_KERNEL;
+	if (flags & MKIA_DMA) fpflags |= GFP_DMA;
+	ret = (unsigned long) kmalloc(size, fpflags);
+	if (ret && (flags & MKIA_ZERO))
+		memset((void *) ret, 0, size);
+	return (void *) ret;
+}
+
+int
+mkia_signal_pending(void *foo)
+{
+	return signal_pending((struct task_struct *) foo);
+}
+
+void * 
+mkia_current()
+{
+	return current;
+}
+
+
+void
+mkia_mod_inc_use_count(void * module) 
+{
+	atomic_inc(&((struct module *) module)->uc.usecount);
+}
+
+void
+mkia_mod_dec_use_count(void * module) 
+{
+	atomic_dec(&((struct module *) module)->uc.usecount);
+}
+
+int
+mkia_get_current_pid()
+{
+	return current->pid;
+}
+
+void *
+mkia_get_current_task()
+{
+	return current;
+}
+
+void
+mkia_kill_proc(int procref, int sig, int foo)
+{
+	kill_proc(procref, sig, foo);
+}
+
+void
+mkia_file_set_private_data(void * filp, void *data)
+{
+    ((struct file *) filp)->private_data = data;
+}
+
+void *
+mkia_file_get_private_data(void * filp)
+{
+	return ((struct file *) filp)->private_data;
+}
+
+void *
+mkia_file_get_f_pos_addr(void * filp)
+{
+	return &(((struct file *) filp)->f_pos);
+}
+
+long
+mkia_file_get_f_mode(void * filp)
+{
+	return ((struct file *) filp)->f_mode;
+}
+
+int
+mkia_is_file_nonblock(void *file)
+{
+	return ((((struct file *)file)->f_flags & O_NONBLOCK) != 0);
+}
+
+int 
+mkia_copy_to_user(void *dest, void *src, int len)
+{
+	return copy_to_user(dest, src, len);
+}
+
+int 
+mkia_copy_from_user(void *dest, void *src, int len)
+{
+	return copy_from_user(dest, src, len);
+}
+
+int
+mkia_request_irq(int irq, void (*handler)(), 
+		unsigned long foo, char *bar, void *baz)
+{
+	return request_irq(irq, handler, foo, bar, baz);
+}
+
+void
+mkia_free_irq(int irq, void *baz)
+{
+	free_irq(irq, baz);
+}
+
+void  
+mkia_kfree(void *pointer)
+{
+	kfree(pointer);
+}
+
+void * 
+mkia_vmalloc(int size)
+{
+	return vmalloc(size);
+}
+
+void  
+mkia_vfree(void *pointer)
+{
+	vfree(pointer);
+}
+
+
+int
+mkia_get_user(void * ptr, int len)
+{
+	if (len == sizeof(unsigned char)) {
+		unsigned char flags;
+		if (get_user(flags, (unsigned char *) ptr))
+			return -1;
+		else
+			return flags;
+	}
+	if (len == sizeof(unsigned short)) {
+		unsigned short flags;
+		if (get_user(flags, (unsigned short *) ptr))
+			return -1;
+		else
+			return flags;
+	}
+	if (len == sizeof(unsigned long)) {
+		unsigned long flags;
+		if (get_user(flags, (unsigned long *) ptr))
+			return -1;
+		else
+			return flags;
+	}
+	return 0;
+}
+
+int
+mkia_put_user(unsigned long flags, void * ptr, int len)
+{
+	if (len == 4) return put_user((unsigned long) flags, 
+			(unsigned long *) ptr);
+	if (len == 2) return put_user((unsigned short) flags, 
+			(unsigned short *) ptr);
+	if (len == 1) return put_user((unsigned char) flags, 
+			(unsigned char *) ptr);
+	return 0;
+}
+
+int
+mkia_strncmp(char *s1, char *s2, int len)
+{
+	return strncmp(s1,s2,len);
+}
+
+int
+mkia_strcmp(char *s1, char *s2)
+{
+	return strcmp(s1,s2);
+}
+
+void *
+mkia_strncpy(char *s1, char *s2, int len)
+{
+	return strncpy(s1,s2,len);
+}
+
+void *
+mkia_strcpy(char *s1, char *s2)
+{
+	return strcpy(s1,s2);
+}
+
+void 
+mkia_memset(void *mem, int val, int len)
+{
+	memset(mem, val, len);
+}
+
+void 
+mkia_memcpy(void *mem, void *mem2, int len)
+{
+	memcpy(mem, mem2, len);
+}
+
+void *
+mkia_alloc_waitqueuep(void)
+{
+	void *wq = NULL;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0)
+	wq = kmalloc(sizeof(struct wait_queue *), GFP_ATOMIC);
+	if (!wq) return NULL;
+	memset(wq, 0, sizeof(struct wait_queue *));
+#else
+	wq = kmalloc(sizeof(wait_queue_head_t), GFP_ATOMIC);
+	if (!wq) return NULL;
+	memset(wq, 0, sizeof(wait_queue_head_t));
+	init_waitqueue_head((wait_queue_head_t *) wq);
+#endif
+	return wq;
+}
+
+void
+mkia_free_waitqueuep(void *waitqp)
+{
+	kfree(waitqp);
+}
+
+rwlock_t _mkia_waitqueue_lock = RW_LOCK_UNLOCKED;
+
+void 
+mkia_sleep_on(void *wqp)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0)
+	sleep_on((struct wait_queue **) wqp);
+#else
+	sleep_on((wait_queue_head_t *) wqp);
+#endif
+}
+
+void 
+mkia_interruptible_sleep_on(void *wqp)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0)
+	interruptible_sleep_on((struct wait_queue **) wqp);
+#else
+	interruptible_sleep_on((wait_queue_head_t *) wqp);
+#endif
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0)
+/*
+ * int mkia_call_svwait()  
+ *
+ * Synchronization variable wait.
+ *
+ * This must be implemented on the mki side of the world because
+ * write_lock_irqsave() compiles differently if __SMP__ is defined.
+ * The trick here (and why we can't just use sleep_on() or
+ * interruptible_sleep_on()) is that we need to give up the lock
+ * AFTER we have set the task state and added ourselves to the wait
+ * queue.
+ *
+ * This function is called indirectly (its address is obtained via
+ * mkia_getparm().  This is so that a version 2 win4lin can still run
+ * with a version 1 mki, even though verion 1 mki does not support
+ * a mkia_sv_wait() function.
+ */
+int
+mkia_call_svwait(void *wq, volatile int *lockp, int interruptible)
+{
+	struct task_struct *taskp;
+	struct wait_queue wait;
+	unsigned long wqflags;
+	unsigned long flags;
+	int retval;
+
+	taskp = current;
+	__save_flags(flags);
+	if (interruptible) {
+		taskp->state = TASK_INTERRUPTIBLE;
+	} else {
+		taskp->state = TASK_UNINTERRUPTIBLE;
+	}
+	wait.task = taskp;
+	write_lock_irqsave(&_mkia_waitqueue_lock, wqflags);
+	__add_wait_queue((struct wait_queue **) wq, &wait);
+	write_unlock_irqrestore(&_mkia_waitqueue_lock, wqflags);
+	*lockp = 0;	/* Unlock */
+	schedule();
+	write_lock_irqsave(&_mkia_waitqueue_lock, wqflags);
+	__remove_wait_queue((struct wait_queue **) wq, &wait);
+	write_unlock_irqrestore(&_mkia_waitqueue_lock, wqflags);
+	taskp->state = TASK_RUNNING;
+	if (interruptible) {
+		retval = (signal_pending(taskp)) ? 1 : 0;
+	} else {
+		retval = 0;
+	}
+	__restore_flags(flags);
+	return retval;
+}
+#else /* it's 2.4 - moved to mki24.c */
+#endif
+
+void
+mkia_poll_wait(void *file, void *wqp, void *wait)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0)
+	poll_wait((struct file *) file, 
+		  (struct wait_queue **) wqp, 
+		  (poll_table *) wait);
+#else
+	poll_wait((struct file *) file, 
+		  (wait_queue_head_t *) wqp, 
+		  (poll_table *) wait);
+#endif
+}
+
+void
+mkia_poll_wake(void *wqp)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0)
+	wake_up_interruptible((struct wait_queue **) wqp);
+#else
+	wake_up_interruptible((wait_queue_head_t *) wqp);
+#endif
+}
+
+/* routine to unlock the kernel in mrgioctl for some kernel versions since
+ * we use finer locking than linux does and we need the kernel lock to be free
+ */
+void *
+mkia_mrgioctl_unlock_kernel(void) 
+{
+
+#ifdef CONFIG_SMP
+	if (current->lock_depth > 0) {
+		unlock_kernel();
+		return (void *) 1;
+	}
+#endif
+	return NULL;
+}
+
+/* routine to re-lock the kernel in mrgioctl for 2.4 after the ioctl so
+ * that linux is happy */
+void 
+mkia_mrgioctl_lock_kernel(void * cookie)
+{
+#ifdef CONFIG_SMP
+	if (cookie)
+		lock_kernel();
+#endif
+}
+
+#ifdef MODULE
+#ifdef MKI_HOOK_TABLE_EXISTS
+
+int
+install_mhi_hooks(void)
+{
+	mhi_hooks[MKI_HOOK_RET_USER] = &mhia_ret_user;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0)
+	mhi_hooks[MKI_HOOK_SWITCH] = &mhia_switch;
+#else
+	mhi_hooks[MKI_HOOK_SWITCH_TO] = &mhia_switch_to;
+	mhi_hooks[MKI_HOOK_SWITCH_AWAY] = &mhia_switch_away;
+#endif
+	mhi_hooks[MKI_HOOK_EXIT] = &mhia_exit;
+	mhi_hooks[MKI_HOOK_SWAP] = &mhia_swap;
+	return 0;
+}
+
+int
+remove_mhi_hooks(void)
+{
+	mhi_hooks[MKI_HOOK_RET_USER] = &mhi_void_hook;
+	mhi_hooks[MKI_HOOK_SWITCH_TO] = &mhi_void_hook;
+	mhi_hooks[MKI_HOOK_SWITCH_AWAY] = &mhi_void_hook;
+	mhi_hooks[MKI_HOOK_EXIT] = &mhi_void_hook;
+	mhi_hooks[MKI_HOOK_SWAP] = &mhi_void_hook;
+	return 0;
+}
+
+#endif
+
+int 
+init_module(void)
+{
+#ifdef MKI_HOOK_TABLE_EXISTS
+	return install_mhi_hooks();
+#endif
+	return 0;
+}
+
+int 
+cleanup_module(void)
+{
+#ifdef MKI_HOOK_TABLE_EXISTS
+	return remove_mhi_hooks();
+#endif
+	return 0;
+}
+
+#endif
diff -Naur mki-adapter-old/arch/i386/mki-adapter/mkimki.c mki-adapter-new/arch/i386/mki-adapter/mkimki.c
--- mki-adapter-old/arch/i386/mki-adapter/mkimki.c	1969-12-31 16:00:00.000000000 -0800
+++ mki-adapter-new/arch/i386/mki-adapter/mkimki.c	2003-09-08 11:26:26.000000000 -0700
@@ -0,0 +1,298 @@
+/*
+ ****************************************************************************
+ * Copyright 2001 by NeTraverse, Inc.
+ * This software is distributed under the terms of the GPL
+ * which is supplied in the LICENSE file with this distribution
+ ***************************************************************************
+ * $Id: mkimki.c,v 1.6 2002/03/07 19:59:29 rlawrence Exp $
+ ***************************************************************************
+ * This one just calls the mki functions
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/modversions.h>
+#include <linux/types.h>
+#include <linux/version.h>
+
+#include <asm/mki.h>
+#include <mkifunc.h>
+/*
+ *  * MKI (merge kernel interface) Function prototypes
+ *   */
+extern mkiul_t  mki_add_page_ref(unsigned long);
+extern void    *mki_alloc_priv_tss(void);
+extern int      mki_check_vm86(void);
+extern void     mki_clear_vm86(void);
+extern void     mki_enter_debugger(int, int, struct pt_regs *);
+extern void    *mki_get_file_cookie(int);
+extern mkiul_t *mki_get_pagedir(void);
+extern void    *mki_get_vm86p(void);
+extern void     mki_getparm(int, void *);
+extern int      mki_install_hook(int, int (*)(void *));
+extern void     mki_mark_vm86(void);
+extern void     mki_pgfault_get_state(int *, mki_fault_catch_t *);
+extern void     mki_pgfault_restore_state(mki_fault_catch_t *);
+extern mkiul_t  mki_physmap(unsigned long, unsigned long);
+extern void     mki_put_file_cookie(void *);
+extern void     mki_post_event(void *);
+extern void     mki_process_trapret(void);
+extern void     mki_remove_hook(int);
+extern int      mki_remove_page_ref(unsigned long, unsigned long, void *, off_t);
+extern int      mki_set_gdt_entry(unsigned short, unsigned long *);
+extern void     mki_set_idt_dpl(void);
+extern void     mki_set_idt_entry(unsigned short, unsigned long *, unsigned long *);
+extern int      mki_set_ldt_entry(unsigned short, unsigned long *);
+extern int      mki_set_private_ldt(void *, size_t);
+extern void     mki_set_vm86p(void *);
+extern int      mki_uaddr_mapped(mkiul_t);
+extern int      mki_upageflt(mkiul_t, int);
+extern mkiul_t  mki_virt_to_phys(void *);
+extern void     mki_yield(void);
+extern mkiul_t  mmap_k(mkiul_t, size_t, int, int, int, off_t, int *);
+extern int      mprotect_k(unsigned long, unsigned int, unsigned int);
+extern int      munmap_k(mkiul_t, size_t);
+extern int      ftruncate_k(int, off_t);
+
+int
+mkia_install_hook(int id, int (*hook_fn)(void *))
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0)
+	return mki_install_hook(id, hook_fn);
+#else
+	return mki_install_hook_2_4(id, hook_fn);
+#endif
+}
+
+void
+mkia_remove_hook(int id)
+{
+	mki_remove_hook(id);
+}
+
+void
+mkia_set_idt_dpl (void)
+{
+	mki_set_idt_dpl();
+}
+
+void
+mkia_set_idt_entry (unsigned short vect_num, unsigned long *new_entry,
+			unsigned long *prev_entry)
+{
+	mki_set_idt_entry(vect_num, new_entry, prev_entry);
+}
+
+
+int 
+mkia_set_gdt_entry(unsigned short sel, unsigned long *new_entry)
+{
+	return mki_set_gdt_entry(sel, new_entry);;
+}
+
+int 
+mkia_set_ldt_entry(unsigned short sel, unsigned long *new_entry)
+{
+	return mki_set_ldt_entry(sel, new_entry);
+}
+
+int
+mkia_set_private_ldt(void *ldtp, size_t limit)
+{
+	return mki_set_private_ldt(ldtp, limit);
+}
+
+void
+mkia_set_vm86p(void *vm86p)
+{
+	mki_set_vm86p(vm86p);
+}
+
+void *
+mkia_get_vm86p()
+{
+	return mki_get_vm86p();
+}
+
+/*
+ * void
+ * mkia_getparm(int request, void *)
+ *
+ * Get values needed by MERGE
+ */
+void
+mkia_getparm(int request, void *parm)
+{
+	mki_getparm(request, parm);
+}
+
+void
+mkia_post_event(void *cookie)
+{
+	mki_post_event(cookie);
+}
+
+void
+mkia_mark_vm86(void)
+{
+	mki_mark_vm86();
+}
+
+int
+mkia_check_vm86(void)
+{
+	return mki_check_vm86();
+}
+
+void
+mkia_clear_vm86(void)
+{
+	mki_clear_vm86();
+}
+
+void
+mkia_pgfault_get_state(int *pfault_ok, mki_fault_catch_t *fcstate)
+{
+	mki_pgfault_get_state(pfault_ok, fcstate);
+}
+
+void
+mkia_pgfault_restore_state(mki_fault_catch_t *fcstate)
+{
+	mki_pgfault_restore_state(fcstate);
+}
+
+void *
+mkia_alloc_priv_tss(void)
+{
+	return mki_alloc_priv_tss();
+}
+
+void
+mkia_yield(void)
+{
+	mki_yield();
+}
+
+void
+mkia_enter_debugger(int reason, int error, struct pt_regs *regs)
+{
+	mki_enter_debugger(reason, error, regs);
+}
+
+void
+mkia_assert_failure(char *exprstr, char *filename, int linenum)
+{
+	(void) printk(KERN_ERR
+		"MKI Assertion \"%s\" failed: file \"%s\", line %d\n",
+		exprstr, filename, linenum);
+	mkia_enter_debugger(0, 0, 0);
+}
+
+int
+mkia_ftruncate_k(int fd, off_t length)
+{
+	return ftruncate_k(fd, length);
+}
+
+mkia_64bit_t
+mkia_virt_to_phys(void *kaddr)
+{
+    	return (mkia_64bit_t) mki_virt_to_phys(kaddr);
+}
+
+mkia_64bit_t *
+mkia_get_pagedir(void)
+{
+	return (mkia_64bit_t *) mki_get_pagedir();
+}
+
+int
+mkia_remove_page_ref(unsigned long vaddr, mkia_64bit_t physpte,
+	void *file_cookie, off_t offset)
+{
+	return mki_remove_page_ref(vaddr, (unsigned long) physpte, file_cookie, offset);
+}
+
+unsigned long
+mkia_mmap_k(unsigned long addr, size_t len, int prot, int flags,
+   int filedes, off_t offset, int *errnop)
+{
+	return mmap_k(addr, len, prot, flags, filedes, offset, errnop);
+}
+
+int
+mkia_munmap_k(unsigned long addr, size_t len)
+{
+        return munmap_k(addr, len);
+}
+
+int
+mkia_mprotect_k(unsigned long addr, size_t len, unsigned int prot)
+{
+        return mprotect_k(addr, len, prot);
+}
+
+int
+mkia_upageflt(unsigned long address, int error_code)
+{
+	return mki_upageflt(address, error_code);
+
+}
+
+int
+mkia_uaddr_mapped(unsigned long address)
+{
+	return mki_uaddr_mapped(address);;
+}
+
+void *
+mkia_get_file_cookie(int filedes)
+{
+	return mki_get_file_cookie(filedes);
+}
+
+void
+mkia_put_file_cookie(void *farg)
+{
+	mki_put_file_cookie(farg);
+}
+
+mkia_64bit_t
+mkia_add_page_ref(unsigned long vaddr)
+{
+	return ( mkia_64bit_t ) mki_add_page_ref(vaddr);
+}
+
+void
+mkia_adjust_esp0(int numlongs)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0)
+	struct thread_struct *tss;
+	mki_getparm(PARM_TSSP, &tss);
+	tss->esp0 -= (numlongs * sizeof(unsigned long));
+#else
+	mki_adjust_esp0(numlongs);
+#endif
+}
+
+int
+mkia_get_current_task_index(void)
+{
+	int ret=0;
+	mki_getparm(PARM_CURPROC_INDEX, &ret);
+	return ret;
+}
+
+/*
+ * void mkia_wakeup()
+ */
+void
+mkia_wake_up(void *wqp)
+{
+	void (*svw) (struct wait_queue **);
+	
+	mki_getparm(PARM_SVWAKEUP_FUNC, &svw);
+	svw((struct wait_queue **) wqp);
+}
+
diff -Naur mki-adapter-old/arch/i386/mki-adapter/mkivnet.c mki-adapter-new/arch/i386/mki-adapter/mkivnet.c
--- mki-adapter-old/arch/i386/mki-adapter/mkivnet.c	1969-12-31 16:00:00.000000000 -0800
+++ mki-adapter-new/arch/i386/mki-adapter/mkivnet.c	2003-09-08 11:26:27.000000000 -0700
@@ -0,0 +1,928 @@
+/*
+ ****************************************************************************
+ * Copyright 2001 by NeTraverse, Inc.
+ * This software is distributed under the terms of the GPL
+ * which is supplied in the LICENSE file with this distribution
+ ***************************************************************************
+ * $Id: mkivnet.c,v 1.7 2002/03/20 20:09:48 rlawrence Exp $
+ ***************************************************************************
+ * This is part of a module that will be loaded on 2.2 systems, and included 
+ * in 2.4 MKIs to handle all of the specifically linux structures and function 
+ * calls for virtual network (vnet).
+ ***************************************************************************
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/modversions.h>
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/if_ether.h>
+#include <linux/skbuff.h>
+#include <linux/smp_lock.h>
+#include <linux/smp.h>
+#include <vneteth.h>
+#include <vnetint-pub.h>
+
+#include <mkifunc.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0)
+#define LINUX_22X_LOCK_KERNEL() lock_kernel()
+#define LINUX_22X_UNLOCK_KERNEL() unlock_kernel()
+#define MKI_DEV_T struct device
+#define LINUX_DEV_GET(ifname) dev_get(ifname)
+#define LINUX_DEV_PUT(dev) 
+#else 
+extern void	mki_adjust_esp0(int numlongs);
+#define LINUX_DEV_GET(ifname) dev_get_by_name(ifname)
+#define LINUX_DEV_PUT(dev) dev_put(dev)
+#define MKI_DEV_T struct net_device
+#define LINUX_22X_LOCK_KERNEL()
+#define LINUX_22X_UNLOCK_KERNEL()
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0) */
+
+//#define DBGVNET(str1) printk str1
+#define DBGVNET(str1)
+
+spinlock_t vnet_inter_lock = SPIN_LOCK_UNLOCKED;
+#define DECLARELOCKSAVE(flags) unsigned int flags=0
+#define LOCKINTERCEPTOR(flags) spin_lock_irqsave(&vnet_inter_lock, flags)
+#define UNLOCKINTERCEPTOR(flags) spin_unlock_irqrestore(&vnet_inter_lock, flags)
+
+int 
+mkia_ntohs(int protocol)
+{
+	return ntohs(protocol);
+}
+
+
+int *mkia_vni_vnetdebug = 0;
+void (*mkia_vni_logfunc)(char *, ...) = 0;
+
+extern unsigned short eth_type_trans(struct sk_buff *skb, MKI_DEV_T *dev);
+int mkia_vnetint_incoming(struct sk_buff *skb, MKI_DEV_T *dev, struct packet_type *pt);
+int mkia_vnetint_outgoing(struct sk_buff *skb, MKI_DEV_T *dev);
+
+struct notifier_block mkia_nb;
+int mkia_interceptor_count;
+struct vnetint_ifdev            *mkia_vnetint_ifdev_base;
+struct vnetint_proto            *mkia_vnetint_proto_base;
+struct vnetint_pcb              *mkia_vnetint_inter_base;
+
+/***************************/
+/* VNET interceptor code   */
+/***************************/
+
+/*********************************/
+/* dummy incoming packet handler */
+int
+mkia_vnetint_dummy(struct sk_buff *skb, 
+			  MKI_DEV_T *dev, 
+			  struct packet_type *pt)
+{
+	kfree_skb(skb);
+	return 0;
+}
+
+struct vnetint_proto *
+mkia_vnetint_find_proto(int type)
+{
+	struct vnetint_proto *vp;
+	
+	for (vp = mkia_vnetint_proto_base; vp != NULL; vp = vp->next)
+		if (vp->ptype->type == type)
+			break;
+	
+	return vp;
+}
+
+
+/* Add a protocol to the list. */
+struct vnetint_proto *
+mkia_vnetint_add_proto(int protocol)
+{
+	int type = ntohs(protocol);
+	struct packet_type *pt, *node;
+	struct vnetint_proto *vp;
+	DECLARELOCKSAVE(flags);
+		
+	LOCKINTERCEPTOR(flags);
+	if ( (vp = mkia_vnetint_find_proto(type)) ) {
+		vp->count++;
+		UNLOCKINTERCEPTOR(flags);
+		return vp;
+	}
+	UNLOCKINTERCEPTOR(flags);
+			
+	if (!(vp = kmalloc(sizeof(struct vnetint_proto), GFP_KERNEL))) {
+		return NULL;
+	}
+	vp->real_func = NULL;
+	
+	if (!(pt = kmalloc(sizeof(struct packet_type), GFP_KERNEL))) {
+		kfree(vp);
+		return NULL;
+	}
+	
+	pt->type = type;
+	pt->func = mkia_vnetint_dummy;
+	pt->dev = NULL;
+	
+	dev_add_pack(pt);
+	for (node = pt->next; node != NULL; node = node->next) {
+		if (node->type == type && node->dev == NULL) {
+			vp->ptype = node;
+			vp->real_func = node->func;
+			node->func = mkia_vnetint_incoming;	
+			DBGVNET(("vnetint_add_proto: found the real protocol handler\n"));
+			dev_remove_pack(pt);
+			kfree(pt);
+			break;
+		}
+	}
+	
+	if (node == NULL) {
+		pt->func = mkia_vnetint_incoming;
+		pt->data = vp;
+		vp->ptype = pt;
+	}
+	
+	vp->protocol = protocol;
+	
+	LOCKINTERCEPTOR(flags);
+	vp->next = mkia_vnetint_proto_base;
+	vp->count = 1;
+	mkia_vnetint_proto_base = vp;
+	UNLOCKINTERCEPTOR(flags);
+
+	return vp;
+}
+
+/* Remove a protocol from the list. */
+void
+mkia_vnetint_rmv_proto(int protocol)
+{
+	struct vnetint_proto *vp, **pnode;
+	struct packet_type pt, *node;
+	DECLARELOCKSAVE(flags);
+	
+	DBGVNET(("vnetint_rmv_proto: protocol type = 0x%0x\n", protocol));
+	LOCKINTERCEPTOR(flags);
+	for (pnode = &mkia_vnetint_proto_base; (*pnode) != NULL; pnode = &((*pnode)->next)) {
+		vp = (*pnode);
+		if (vp->protocol == protocol) {
+			vp->count--;
+			if (vp->count > 0) {
+				UNLOCKINTERCEPTOR(flags);
+				return;
+			}
+			if (vp->real_func != NULL) {
+				/* Check that the protocol is still registered */
+				pt.type = htons(protocol);
+				pt.func = mkia_vnetint_dummy;
+				pt.dev = NULL;
+				dev_add_pack(&pt);
+				for (node = pt.next; node != NULL; node = node->next) {
+					if (node == vp->ptype && node->func == mkia_vnetint_incoming) {
+						node->func = vp->real_func;
+						DBGVNET(("vnetint_rmv_proto: restored the real protocol handler\n"));
+					}
+				}
+				dev_remove_pack(&pt);
+			}
+			else {
+				if (vp->ptype && vp->ptype->data == vp) {
+					dev_remove_pack(vp->ptype);
+					kfree(vp->ptype);
+				}
+			}
+			*pnode = (*pnode)->next;
+			UNLOCKINTERCEPTOR(flags);
+			kfree(vp);
+			return;
+		}
+	}	
+	UNLOCKINTERCEPTOR(flags);
+	
+	DBGVNET(("vnetint_rmv_proto: protocol 0x%x not found.\n", protocol));
+}
+
+/* Find device from list - call with lock set */
+struct vnetint_ifdev *
+mkia_vnetint_find_dev(char *ifname)
+{
+	struct vnetint_ifdev *vd;
+	
+	for (vd = mkia_vnetint_ifdev_base; vd != NULL; vd = vd->next) {
+		DBGVNET(("vnetint_find_dev: comparing %s with %s\n", vd->ifname, ifname));
+		if (strcmp(vd->ifname, ifname) == 0) 
+			break;
+	}
+	
+	return vd;
+}
+
+/* FastFind device from list - call with lock set */
+struct vnetint_ifdev *
+mkia_vnetint_fastfind_dev(MKI_DEV_T *dev)
+{
+	struct vnetint_ifdev *vd;
+	
+	for (vd = mkia_vnetint_ifdev_base; vd != NULL; vd = vd->next)
+		if (vd->dev == dev)
+			break;
+	
+	return vd;
+}
+
+/* Add a device to the list. */
+struct vnetint_ifdev *
+mkia_vnetint_add_dev(char *ifname)
+{
+	struct vnetint_ifdev *vd;
+	MKI_DEV_T *dev = LINUX_DEV_GET(ifname);
+	DECLARELOCKSAVE(flags);
+	
+	DBGVNET(("vnetint_add_dev: device = %s\n", ifname));
+
+	if (!dev) {
+		DBGVNET(("vnetint_add_dev: no such device %s\n", ifname));
+		return NULL;
+	}
+	
+	LOCKINTERCEPTOR(flags);
+	if ( (vd = mkia_vnetint_find_dev(ifname)) ) {
+		vd->count++;
+		UNLOCKINTERCEPTOR(flags);
+		LINUX_DEV_PUT(dev);
+		return vd;
+	}
+	UNLOCKINTERCEPTOR(flags);
+			
+	if (!(vd = kmalloc(sizeof(struct vnetint_ifdev), GFP_KERNEL))) {
+		LINUX_DEV_PUT(dev);
+		return NULL;
+	}
+	
+	LOCKINTERCEPTOR(flags);
+	vd->dev = dev;
+	strncpy(vd->ifname, ifname, MAXIFNAMELEN);
+	vd->real_xmit = dev->hard_start_xmit;
+	dev->hard_start_xmit = mkia_vnetint_outgoing;
+	vd->next = mkia_vnetint_ifdev_base;
+	vd->count = 1;
+	mkia_vnetint_ifdev_base = vd;
+	UNLOCKINTERCEPTOR(flags);
+	
+	return vd;
+}
+
+
+/* Remove a device from the list. */
+void
+mkia_vnetint_rmv_dev(char *ifname)
+{
+	
+	struct vnetint_ifdev *vd, **pnode;
+	DECLARELOCKSAVE(flags);
+		
+	DBGVNET(("vnetint_rmv_dev: device = %s\n", ifname));
+	
+	LOCKINTERCEPTOR(flags);
+	for (pnode = &mkia_vnetint_ifdev_base; (*pnode) != NULL; pnode = &((*pnode)->next)) {
+		vd = (*pnode);
+		if (strcmp(vd->ifname, ifname) == 0) {
+			vd->count--;
+			if (vd->count > 0) {
+				UNLOCKINTERCEPTOR(flags);
+				return;
+			}
+			if (vd->dev != NULL && ((MKI_DEV_T *) (vd->dev))->hard_start_xmit == mkia_vnetint_outgoing) {
+				((MKI_DEV_T *) (vd->dev))->hard_start_xmit = vd->real_xmit;
+			}
+			*pnode = (*pnode)->next;
+			UNLOCKINTERCEPTOR(flags);
+			if (vd->dev != NULL) {
+				LINUX_DEV_PUT(vd->dev);
+			}
+			kfree(vd);
+			return;
+		}
+	}	
+	UNLOCKINTERCEPTOR(flags);
+	
+	DBGVNET(("vnetint_rmv_dev: dev %s not found.\n", ifname));
+}
+
+
+
+/* Called when network device changes state */
+int
+mkia_vnetint_dev_notifier(struct notifier_block *self, 
+				 unsigned long status, 
+				 void *ptr)
+{
+	
+	MKI_DEV_T *dev = (MKI_DEV_T *)ptr;
+
+	struct vnetint_ifdev *vd;
+	DECLARELOCKSAVE(flags);
+
+	DBGVNET(("vnetint_dev_notifier: Device %s changed state\n", dev->name));
+		
+	LOCKINTERCEPTOR(flags);
+	switch (status) {
+		case NETDEV_CHANGE:
+			DBGVNET(("vnetint_dev_notifier: NETDEV_CHANGE\n"));
+			break;
+		case NETDEV_UP:
+			DBGVNET(("vnetint_dev_notifier: NETDEV_UP\n"));
+			vd = mkia_vnetint_fastfind_dev(dev);
+			if (vd) {
+				vd->real_xmit = dev->hard_start_xmit;
+				dev->hard_start_xmit = mkia_vnetint_outgoing;
+			}
+			else {
+				DBGVNET(("vnetint_dev_notifier: dev %s not found\n", dev->name));
+			}
+			break;		
+		case NETDEV_DOWN:
+			DBGVNET(("vnetint_dev_notifier: NETDEV_DOWN\n"));
+			vd = mkia_vnetint_fastfind_dev(dev);
+			if (vd) {
+				DBGVNET(("vnetint_dev_notifier: found device\n"));
+				if (dev->hard_start_xmit == mkia_vnetint_outgoing) {
+					DBGVNET(("vnetint_dev_notifier: restore real_xmit\n"));
+					dev->hard_start_xmit = vd->real_xmit;
+				}
+				vd->real_xmit = NULL;
+			}
+			else {
+				DBGVNET(("vnetint_dev_notifier: dev %s not found\n", dev->name));
+			}
+			break;
+		case NETDEV_CHANGEMTU:
+			DBGVNET(("vnetint_dev_notifier: NETDEV_CHANGEMTU\n"));
+			break;
+		case NETDEV_CHANGEADDR:
+			DBGVNET(("vnetint_dev_notifier: NETDEV_CHANGEADDR\n"));
+			break;
+		case NETDEV_CHANGENAME:
+			DBGVNET(("vnetint_dev_notifier: NETDEV_CHANGENAME\n"));
+			break;
+		case NETDEV_REGISTER:
+			DBGVNET(("vnetint_dev_notifier: NETDEV_REGISTER\n"));
+			vd = mkia_vnetint_find_dev(dev->name);
+			if (vd) {
+				vd->dev = LINUX_DEV_GET(dev->name);
+			}
+			break;
+		case NETDEV_UNREGISTER:
+			DBGVNET(("vnetint_dev_notifier: NETDEV_UNREGISTER\n"));
+			vd = mkia_vnetint_find_dev(dev->name);
+			if (vd) {
+				vd->dev = NULL;
+				if (dev->hard_start_xmit == mkia_vnetint_outgoing) {
+					dev->hard_start_xmit = vd->real_xmit;
+				}
+				vd->real_xmit = NULL;
+				LINUX_DEV_PUT(dev);
+			}
+			else {
+				DBGVNET(("vnetint_dev_notifier: dev %s not found\n", dev->name));
+			}
+			break;
+		default:
+			DBGVNET(("vnetint_dev_notifier: Unknown device status\n"));
+			break;
+	}
+	UNLOCKINTERCEPTOR(flags);
+	
+	DBGVNET(("vnetint_dev_notifier: done\n"));
+
+	return 0;
+}
+
+
+/* Find interceptor on list. - call with lock set */
+struct vnetint_pcb *
+mkia_vnetint_find_inter(int protocol, char *ifname)
+{
+	struct vnetint_pcb *vi;
+	
+	for (vi = mkia_vnetint_inter_base; vi != NULL; vi = vi->next) {
+		if ((strncmp(vi->ifdev->ifname, ifname, MAXIFNAMELEN) == 0) &&
+			(vi->proto->protocol == protocol)) {
+			break;
+		}
+	}
+	
+	return vi;
+}
+
+
+/* FastFind interceptor on list. - call with lock set */
+static inline struct vnetint_pcb *
+mkia_vnetint_fastfind_inter(int type, MKI_DEV_T *dev)
+{
+	struct vnetint_pcb *vi;
+	
+	for (vi = mkia_vnetint_inter_base; vi != NULL; vi = vi->next) {
+		if ((vi->ifdev->dev == dev) && (vi->proto->ptype->type == type)) {
+			break;
+		}
+	}
+	
+	return vi;
+}
+
+
+/* Add a an interceptor to the list */
+int
+mkia_vnetint_add_interceptor(struct vnetint_filter *filter)
+{
+	
+	struct vnetint_pcb *vi;
+	DECLARELOCKSAVE(flags);
+	
+	DBGVNET(("vnetint_add_interceptor: device = %8s protocol type = 0x%0x filter_func = 0x%x\n",
+		       filter->ifname, filter->protocol, filter->func));
+
+	if (filter->func == NULL) {
+		return -EINVAL;
+	}
+	
+	if (mkia_interceptor_count == MAX_INTERCEPTORS) {
+		return -ENOENT;
+	}
+
+	if ((vi = mkia_vnetint_find_inter(filter->protocol, filter->ifname))) {
+		/*
+		 * An interceptor aready exists for this dev/proto pair.
+		 * Let the caller have the interceptor reference but return
+		 * EEXIST to warn that this interceptor was aready defined.
+		 */
+		LOCKINTERCEPTOR(flags);
+		vi->count++; /* Increment the interceptor usage count */
+		filter->handle = vi;
+		UNLOCKINTERCEPTOR(flags);
+		return -EEXIST;
+	}
+				
+	if (!(vi = kmalloc(sizeof(struct vnetint_pcb), GFP_KERNEL))) {
+		return -ENOMEM;
+	}
+	
+	vi->ifdev = mkia_vnetint_add_dev(filter->ifname);
+	if (vi->ifdev == NULL) {
+		kfree(vi);
+		return -ENXIO;
+	}
+	
+	vi->proto = mkia_vnetint_add_proto(filter->protocol);
+	if (vi->proto == NULL) {
+		mkia_vnetint_rmv_dev(filter->ifname);
+		kfree(vi);
+		return -ENXIO;
+	}
+	
+	vi->filter_func = filter->func;
+	memset(&(vi->status), 0, sizeof(struct vnetint_status));
+	vi->status.flags = 0;
+	memcpy(&(vi->status.PhysMacAddr), ((MKI_DEV_T *) vi->ifdev->dev)->dev_addr, ETH_ALEN);
+	vi->status.flags |= VNETINTF_MACSET;
+	LOCKINTERCEPTOR(flags);
+	vi->next = mkia_vnetint_inter_base;
+	vi->count = 1;
+	mkia_vnetint_inter_base = vi;
+	mkia_interceptor_count++;
+	filter->handle = vi;
+	UNLOCKINTERCEPTOR(flags);
+	
+	return(0);
+}
+
+
+/* Remove an interceptor from the list */
+void
+mkia_vnetint_rmv_interceptor(struct vnetint_filter *filter)
+{
+	
+	char *ifname;
+	int protocol;
+	struct vnetint_pcb *vi, **pnode;
+	DECLARELOCKSAVE(flags);
+	
+	if (filter == NULL) {
+		DBGVNET(("vnetint_rmv_interceptor: remove all interceptors\n"));
+		LOCKINTERCEPTOR(flags);
+		for (pnode = &mkia_vnetint_inter_base; (*pnode) != NULL; pnode = &((*pnode)->next)) {
+			ifname = (*pnode)->ifdev->ifname;
+			protocol = (*pnode)->proto->protocol;
+			DBGVNET(("vnetint_rmv_interceptor: device = %s, protocol type = 0x%0x\n", ifname, protocol));
+			mkia_interceptor_count--;
+			UNLOCKINTERCEPTOR(flags);
+			mkia_vnetint_rmv_dev(ifname);
+			mkia_vnetint_rmv_proto(protocol);
+			kfree((*pnode));
+			LOCKINTERCEPTOR(flags);
+		}
+		UNLOCKINTERCEPTOR(flags);
+		return;
+	}
+	
+	vi = filter->handle;
+	if (vi) {
+		DBGVNET(("vnetint_rmv_interceptor: device = %s, protocol type = 0x%0x\n",
+						filter->ifname, filter->protocol));
+		LOCKINTERCEPTOR(flags);
+		for (pnode = &mkia_vnetint_inter_base; (*pnode) != NULL; pnode = &((*pnode)->next)) {
+			if ((*pnode) == vi) {
+				vi->count--;
+				if ( vi->count > 0 ) {
+					UNLOCKINTERCEPTOR(flags);
+					return;
+				}
+				*pnode = (*pnode)->next;
+				mkia_interceptor_count--;
+				UNLOCKINTERCEPTOR(flags);
+				mkia_vnetint_rmv_dev(filter->ifname);
+				mkia_vnetint_rmv_proto(filter->protocol);
+				kfree(vi);
+				return;
+			}
+		}
+		UNLOCKINTERCEPTOR(flags);
+
+		
+	}
+	
+	DBGVNET(("vnetint_rmv_interceptor: no interceptor found\n"));
+	
+	return;
+}
+
+/* Incoming Packet Handler */
+int
+mkia_vnetint_incoming(struct sk_buff *skb, MKI_DEV_T *dev, struct packet_type *pt)
+{
+	
+	struct vnetint_proto *vp;
+	struct vnetint_pcb *vi;
+	int (*filter_func)() = 0;
+	int (*real_func)() = 0;
+	int consumed;
+	DECLARELOCKSAVE(flags);
+
+	DBGVNET(("vnetint_incoming: got packet from dev %s for "
+	    "protocol 0x%04x\n", dev->name, ntohs(pt->type)));
+	
+	LOCKINTERCEPTOR(flags);
+	vp = mkia_vnetint_find_proto(pt->type);
+	if (vp == NULL) {
+		/*
+		 * This should never happen but just to play safe ...
+		 * Drop the packet
+		 */
+		printk("vnetint_incoming: protocol 0x%04x not registered!!!\n",
+			ntohs(pt->type));
+		goto incoming_free_pkt;
+	}
+	real_func = vp->real_func;
+	if ((skb->pkt_type & (PACKET_HOST | PACKET_OTHERHOST))
+	    		== (PACKET_HOST | PACKET_OTHERHOST)) {
+		DBGVNET(("vnetint_incoming: ignore pkt from filter)\n"
+			"- send it on through\n"));
+		skb->pkt_type &= ~PACKET_OTHERHOST;
+		goto pass_to_original_handler;
+	}
+
+	vi = mkia_vnetint_fastfind_inter(pt->type, dev);
+	if (vi == NULL) {
+		DBGVNET(("vnetint_incoming: no interceptor for "
+		    "dev %s, protocol 0x%x\n", dev->name, ntohs(pt->type)));
+		goto pass_to_original_handler;
+	}
+
+	if (! VNETINT_ACT(vi)) {
+		DBGVNET(("vnetint_incoming: interceptor not active\n"));
+		goto pass_to_original_handler;
+	}
+
+	vi->status.NumPktsUp++;
+	filter_func = vi->filter_func;
+	if (filter_func == NULL) {
+		DBGVNET(("vnetint_incoming: missing filter_func\n"));
+		goto pass_to_original_handler;
+	}
+
+	UNLOCKINTERCEPTOR(flags);
+
+	consumed = filter_func(skb->mac.raw,
+				skb->len + dev->hard_header_len, skb);
+
+	LOCKINTERCEPTOR(flags);
+
+	if (consumed) {
+		vi->status.NumPktsConsumed++;
+		goto incoming_free_pkt;
+	}
+
+	/*
+	 * Fall through and send the packet to the original handler since
+	 * the filter did not full consume it.
+	 */
+pass_to_original_handler:
+	/* Pass packet to original protocol handler */
+	if (!real_func) {
+		DBGVNET(("vnetint_incoming: no protocol - "
+		    "dropped packet\n"));
+		goto incoming_free_pkt;
+	}
+	UNLOCKINTERCEPTOR(flags);
+	return (*real_func)(skb, dev, pt);
+
+incoming_free_pkt:
+	UNLOCKINTERCEPTOR(flags);
+	kfree_skb(skb);
+	return 0;
+}
+
+
+/* Outgoing Packet Handler */
+int
+mkia_vnetint_outgoing(struct sk_buff *skb, MKI_DEV_T *dev)
+{
+	
+	struct vnetint_ifdev *vd;
+	struct vnetint_pcb *vi;
+	int (*real_xmit)() = 0;
+	ETH_HDR_T *eth;
+	unsigned short protocol;
+	int consumed;
+	DECLARELOCKSAVE(flags);
+
+	/*
+	 * We can't rely on skb->protocol to hold a valid protocol
+	 * type so go look in the ethernet header.
+	 */
+	eth = (ETH_HDR_T *)skb->data;
+	protocol = eth->type;
+
+	DBGVNET(("vnetint_outgoing: got packet for dev %s from protocol "
+		 "0x%04x\n", dev->name, ntohs(protocol)));
+
+	LOCKINTERCEPTOR(flags);
+	
+	vd = mkia_vnetint_fastfind_dev(dev);
+	if (vd == NULL) {
+		/* This should never happen but just to play safe ... */
+		/* Drop the packet */
+		printk("vnetint_outgoing: device %p not registered!!!\n", dev);
+		goto outgoing_free_pkt;
+	}
+	real_xmit = vd->real_xmit;
+	
+	if ((skb->pkt_type & (PACKET_OUTGOING | PACKET_OTHERHOST))
+	    		== (PACKET_OUTGOING | PACKET_OTHERHOST)) {
+		DBGVNET(("vnetint_outgoing: ignore pkt from filter "
+			"- send it on through\n"));
+		skb->pkt_type &= ~PACKET_OTHERHOST;
+		goto pass_to_device;
+	}
+	vi = mkia_vnetint_fastfind_inter(protocol, dev);
+	if (vi == NULL) {
+		DBGVNET(("vnetint_outgoing: no interceptor for dev %s, "
+			"protocol 0x%04x\n", dev->name, ntohs(protocol)));
+		goto pass_to_device;
+	}
+	if (! VNETINT_ACT(vi)) {
+		DBGVNET(("vnetint_outgoing: interceptor not active\n"));
+		goto pass_to_device;
+	}
+
+	vi->status.NumPktsDown++;
+	if (vi->filter_func) {
+		/* mac.raw is not set by protocol driver */
+		skb->mac.raw = skb->data;
+
+		UNLOCKINTERCEPTOR(flags);
+		consumed = vi->filter_func(skb->data, skb->len, skb);
+		LOCKINTERCEPTOR(flags);
+		if (consumed) {
+			DBGVNET(("vnetinf_outgoing: filter consumed "
+				"packet\n"));
+			vi->status.NumPktsConsumed++;
+			goto outgoing_free_pkt;
+		}
+	}
+	
+pass_to_device:
+	DBGVNET(("vnetint_outgoing: give packet to real NIC xmitfunc\n"));
+	/* Pass packet to the network device */
+	if (!real_xmit) {
+		printk("vnetint_outgoing: bad device - dropped packet\n");	
+		goto outgoing_free_pkt;
+		return 0;
+	}
+	UNLOCKINTERCEPTOR(flags);
+	return (*real_xmit)(skb, dev);
+
+outgoing_free_pkt:
+	UNLOCKINTERCEPTOR(flags);
+	kfree_skb(skb);
+	return 0;
+}
+
+
+/* Convert Ethernet address to printable (loggable) representation. */
+char *
+mkia_ether_sprintf(ETH_ADDR_T *addr)
+{
+	register int		i;
+	static char	 etherbuf[18];
+	register char  *cp = etherbuf;
+	register unsigned char  *ap = (unsigned char *)addr;
+	static char	 digits[] = "0123456789abcdef";
+
+	for (i = 0; i < 6; i++) {
+		if (*ap > 16)
+			*cp++ = digits[*ap >> 4];
+		*cp++ = digits[*ap++ & 0xf];
+		*cp++ = ':';
+	}
+	*--cp = 0;
+	return (etherbuf);
+}
+
+/* Interceptor process packet */
+void
+vnetint_ProcessPacket(char *pPacket, 
+		      unsigned size, 
+		      int direction, 
+		      void *handle)
+{
+	
+	struct sk_buff *skb = dev_alloc_skb(size + 2);
+	struct vnetint_pcb *vi = (struct vnetint_pcb *)handle;
+	MKI_DEV_T *dev = vi->ifdev->dev;
+	DECLARELOCKSAVE(flags);
+	
+	DBGVNET(("vnetint_ProcessPacket: direction 0x%x:\n",direction));
+
+	if (dev == NULL || skb == NULL) {
+		/* No can do - drop packet */
+		return;
+	}
+
+	skb->dev = dev;
+	skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
+	memcpy(skb_put(skb, size), pPacket, size);
+
+	/* it seems that net/core/dev.c excpects this field to 
+	 * be initialized, so we will force it in the same way
+	 * the kernel does if it's not initialized
+	 * This will avoid the "protocol buggy" messages from
+	 * the syslog
+	 */
+	if (skb->nh.raw < skb->data || skb->nh.raw > skb->tail) {
+		skb->nh.raw = skb->data;
+	}
+
+	LOCKINTERCEPTOR(flags);
+	vi->status.NumPktsFromVnet++;
+	if (VNETINT_ACT(vi)) {
+		UNLOCKINTERCEPTOR(flags);
+		/*
+		 * NOTE: in the following code, use of PACKET_OTHERHOST
+		 * is nonsensical. This fact is being used to mark the
+		 * packet as having originated in the VNET so that the
+		 * packet can be recognized and ignored by the interceptor
+		 * incoming and outgoing packet handlers.
+		 */
+		if (direction == DOWNSTREAM) {
+			/* Pass packet to the network device outbound queue */
+			DBGVNET(("vnetint_ProcessPacket: sending packet down to net device\n"));
+			skb->pkt_type = (PACKET_OUTGOING | PACKET_OTHERHOST);
+
+			LINUX_22X_LOCK_KERNEL();
+			dev_queue_xmit(skb);
+			LINUX_22X_UNLOCK_KERNEL();
+		}
+		else {
+			/* Pass packet to the network device inbound queue */
+			DBGVNET(("vnetint_ProcessPacket: sending packet up to protocol\n"));
+			skb->protocol = eth_type_trans(skb, dev);
+			skb->pkt_type = (PACKET_HOST | PACKET_OTHERHOST);
+			netif_rx(skb);
+		}
+		DBGVNET(("vnetint_ProcessPacket done\n"));
+		return;
+	}
+	else {
+		DBGVNET(("vnetint_ProcessPacket: Dropped packet - interceptor down\n"));
+	}
+	UNLOCKINTERCEPTOR(flags);
+
+	kfree_skb(skb);
+}
+
+/* Interceptor control status */
+void
+vnetint_CtrlStatus(unsigned MsgType, struct vnetint_filter *filter)
+{
+	
+	struct vnetint_pcb *vi = filter->handle;
+	DECLARELOCKSAVE(flags);
+	
+	DBGVNET(("vnetint_CtrlStatus: MsgType=%x, filter=0x%p\n", MsgType, filter));
+	LOCKINTERCEPTOR(flags);
+	switch (MsgType) {
+		case INTERCEPTOR_GOACTIVE:
+			DBGVNET(("vnetint_CtrlStatus: INTERCEPTOR_GOACTIVE\n"));
+			vi->status.flags |= VNETINTF_ACTIVE;
+			break;
+		case INTERCEPTOR_GOPASSIVE:
+			DBGVNET(("vnetint_CtrlStatus: INTERCEPTOR_GOPASSIVE\n"));
+			vi->status.flags &= ~VNETINTF_ACTIVE;
+			break;
+		case INTERCEPTOR_GETSTATS:
+			DBGVNET(("vnetint_CtrlStatus: INTERCEPTOR_GETSTATS\n"));
+			if (vi->ifdev->dev) {
+				memcpy(&(vi->status.PhysMacAddr), ((MKI_DEV_T *) vi->ifdev->dev)->dev_addr, ETH_ALEN);
+			}
+			else {
+				memset(&(vi->status.PhysMacAddr), 0, ETH_ALEN);
+			}
+			memcpy(&(filter->status), &(vi->status), sizeof(struct vnetint_status));
+			break;
+		case INTERCEPTOR_SETFILTER:
+			DBGVNET(("vnetint_CtrlStatus: INTERCEPTOR_SETFILTER "));
+			UNLOCKINTERCEPTOR(flags);
+			if (filter->func) {
+				DBGVNET(("- add interceptor\n"));
+				mkia_vnetint_add_interceptor(filter);
+			}
+			else {
+				DBGVNET(("- remove interceptor\n"));
+				mkia_vnetint_rmv_interceptor(filter);
+			}
+			return;
+		default:
+			break;
+	}
+	UNLOCKINTERCEPTOR(flags);
+	return;
+}
+
+void
+vnetint_setdebug(int *debug_mask_ptr, void (*debug_log_func)(char *, ...))
+{
+	mkia_vni_vnetdebug = debug_mask_ptr;
+	mkia_vni_logfunc = debug_log_func;
+}
+
+
+/* Interceptor copy packet*/
+unsigned
+vnetint_CopyPacket(void *cookie, 
+		   unsigned offset, 
+		   char *dest, 
+		   unsigned maxpktsz)
+{
+	struct sk_buff *skb = cookie;
+	unsigned data_len = skb->tail - skb->mac.raw;
+	
+	DBGVNET(("vnetint_CopyPacket: cookie = %p, offset = %d, dest = %p, maxpktsz = %d\n",
+		 cookie, offset, dest, maxpktsz));
+
+	data_len -= offset;
+	if (data_len > maxpktsz)
+		data_len = maxpktsz;
+	memcpy(dest, skb->mac.raw + offset, data_len);
+	return data_len;
+}
+
+int mkia_init_vnetint()
+{
+	mkia_vnetint_ifdev_base = NULL;
+	mkia_vnetint_proto_base = NULL;
+	mkia_vnetint_inter_base = NULL;
+	mkia_interceptor_count = 0;
+	
+	mkia_nb.notifier_call = mkia_vnetint_dev_notifier;
+	mkia_nb.next = NULL;
+	mkia_nb.priority = 0;
+
+	register_netdevice_notifier(&mkia_nb);
+	
+	return 0;
+}
+
+void mkia_cleanup_vnetint()
+{
+	unregister_netdevice_notifier(&mkia_nb);
+	mkia_vnetint_rmv_interceptor(NULL);
+}
+
diff -Naur mki-adapter-old/arch/i386/mki-adapter/README mki-adapter-new/arch/i386/mki-adapter/README
--- mki-adapter-old/arch/i386/mki-adapter/README	1969-12-31 16:00:00.000000000 -0800
+++ mki-adapter-new/arch/i386/mki-adapter/README	2003-09-08 11:26:24.000000000 -0700
@@ -0,0 +1,35 @@
+ ***************************************************************************
+ * Copyright 2001 by NeTraverse, Inc.
+ * This software is distributed under the terms of the GPL
+ * which is supplied in the LICENSE file with this distribution
+ ****************************************************************************
+ * $Id: README,v 1.3 2001/08/15 23:29:38 rlawrence Exp $
+ ****************************************************************************
+
+This kernel module attempts to isolate all of the functions and structures
+that NeTraverse utilizes in it's binary kernel modules. 
+
+If you are running a kernel with the netraverse patch applied to it, the adapter 
+can be made by pointing to the head of the source tree of the running kernel and 
+using the default make target:
+
+  make KERN_SRC=/home/jdoe/kernels/netraverse-2.2.16
+
+If you are running a kernel that is not the one you intend to use the adapter with,
+you must use the "force" target of make.
+
+  make force KERN_SRC=/home/jdoe/kernels/netraverse-2.4.2
+
+If you want to create a patch file for a kernel tree that you are about to build,
+you can use the "patch" target. The kernel tree should already have had the mki
+patch applied to it.
+
+  make patch KERN_SRC=/home/jdoe/kernels/netraverse-2.4.6
+
+The above will create "mki-adapter.patch", which can be applied to the tree 
+with:
+
+  cd /home/jdoe/kernels/netraverse-2.4.6
+  patch -p1 -i /home/jdoe/mki-adapter/mki-adapter.patch
+
+
diff -Naur mki-adapter-old/arch/i386/mki-adapter/timer.c mki-adapter-new/arch/i386/mki-adapter/timer.c
--- mki-adapter-old/arch/i386/mki-adapter/timer.c	1969-12-31 16:00:00.000000000 -0800
+++ mki-adapter-new/arch/i386/mki-adapter/timer.c	2003-09-08 11:26:27.000000000 -0700
@@ -0,0 +1,136 @@
+/*
+ ****************************************************************************
+ * Copyright 2001 by NeTraverse, Inc.
+ * This software is distributed under the terms of the GPL
+ * which is supplied in the LICENSE file with this distribution
+ ***************************************************************************
+ * $Id: timer.c,v 1.5 2001/09/06 20:54:04 rlawrence Exp $
+ ***************************************************************************
+ * This is a module that will be loaded on 2.2 systems, and included in 2.4
+ * MKIs to handle all of the timeout calls.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/modversions.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <mkifunc.h>
+
+struct linux_unix_timer {
+	struct linux_unix_timer *lut_next;
+	struct linux_unix_timer *lut_prev;
+        void (*lut_unix_timeout_func)(void *);
+	int lut_timeoutid;
+        void *lut_unix_timeout_arg;
+	struct timer_list lut_linux_timer;
+};
+
+struct linux_unix_timer lut_list_head = { &lut_list_head, &lut_list_head } ;
+int lut_unique_id = 1;
+
+spinlock_t mkia_lut_lock = SPIN_LOCK_UNLOCKED;
+#define DECLARELOCKSAVE(flags) unsigned int flags=0
+#define LOCKTIMER(flags) spin_lock_irqsave(&mkia_lut_lock, flags)
+#define UNLOCKTIMER(flags) spin_unlock_irqrestore(&mkia_lut_lock, flags)
+
+void
+do_mki_timeout(unsigned long timer_arg)
+{
+	struct linux_unix_timer *target_lutp;
+	struct linux_unix_timer *lutp;
+	void (*timeout_func)(void *);
+	void *timeout_arg;
+	DECLARELOCKSAVE(flags);
+
+	target_lutp = (struct linux_unix_timer *) timer_arg;
+	timeout_func = target_lutp->lut_unix_timeout_func;
+	timeout_arg = target_lutp->lut_unix_timeout_arg;
+
+	/* See if the element is still on the active list */
+	LOCKTIMER(flags);
+	for (lutp = lut_list_head.lut_next; lutp != &lut_list_head;
+	     lutp = lutp->lut_next) {
+	     	if (lutp == target_lutp)
+			break;
+	}
+	if (lutp != &lut_list_head) {
+		lutp->lut_next->lut_prev = lutp->lut_prev;
+		lutp->lut_prev->lut_next = lutp->lut_next;
+		UNLOCKTIMER(flags);
+
+		del_timer(&lutp->lut_linux_timer);
+		kfree(lutp);
+		(*timeout_func)(timeout_arg);
+	} else {
+		UNLOCKTIMER(flags);
+		printk("Merge: mki-adapter: not doing timeout func\n");
+		printk("Merge: mki-adapter: func %p, arg %p\n",
+			timeout_func, timeout_arg);
+		(*timeout_func)(timeout_arg);
+	}
+}
+
+int
+timeout(void (*timeout_func)(void *), void *timeout_arg, long ticks)
+{
+	struct linux_unix_timer *lutp;
+	DECLARELOCKSAVE(flags);
+
+	lutp = kmalloc(sizeof(*lutp), GFP_ATOMIC);
+	if (lutp == NULL) {
+		printk("Merge: timeout: kmalloc failed\n");
+		return 0;
+	}
+	memset(lutp, 0, sizeof(*lutp));
+	init_timer(&lutp->lut_linux_timer);
+	lutp->lut_linux_timer.expires = jiffies + ticks;
+	lutp->lut_linux_timer.data = (unsigned long) lutp;
+	lutp->lut_linux_timer.function =  do_mki_timeout;
+	lutp->lut_unix_timeout_func = timeout_func;
+	lutp->lut_unix_timeout_arg = timeout_arg;
+
+	LOCKTIMER(flags);
+	lutp->lut_timeoutid = lut_unique_id++;
+	lutp->lut_next = &lut_list_head;
+	lutp->lut_prev = lut_list_head.lut_prev;
+	lut_list_head.lut_prev->lut_next = lutp;
+	lut_list_head.lut_prev = lutp;
+	UNLOCKTIMER(flags);
+
+	add_timer(&lutp->lut_linux_timer);
+
+	return (lutp->lut_timeoutid);
+}
+
+void
+untimeout(int id)
+{
+	struct linux_unix_timer *lutp;
+	DECLARELOCKSAVE(flags);
+
+	if (id == 0) 
+		return;
+
+	LOCKTIMER(flags);
+	for (lutp = lut_list_head.lut_next; lutp != &lut_list_head;
+	     lutp = lutp->lut_next) {
+	     	if (lutp->lut_timeoutid == id) {
+			lutp->lut_next->lut_prev = lutp->lut_prev;
+			lutp->lut_prev->lut_next = lutp->lut_next;
+			break;
+		}
+	}
+	UNLOCKTIMER(flags);
+
+	if (lutp != &lut_list_head) {
+		del_timer(&lutp->lut_linux_timer);
+		memset(lutp, 0, sizeof(*lutp));
+		kfree(lutp);
+	}
+}
+
diff -Naur mki-adapter-old/arch/i386/mki-adapter/vneteth.h mki-adapter-new/arch/i386/mki-adapter/vneteth.h
--- mki-adapter-old/arch/i386/mki-adapter/vneteth.h	1969-12-31 16:00:00.000000000 -0800
+++ mki-adapter-new/arch/i386/mki-adapter/vneteth.h	2003-09-08 11:26:27.000000000 -0700
@@ -0,0 +1,36 @@
+/*
+ ****************************************************************************
+ * Copyright 2001 by NeTraverse, Inc.
+ * This software is distributed under the terms of the GPL
+ * which is supplied in the LICENSE file with this distribution
+ ***************************************************************************
+ * $Id: vneteth.h,v 1.3 2001/08/15 23:29:38 rlawrence Exp $
+ ***************************************************************************
+ * Ethernet address structure for ease of 
+ * access to individual components. 
+ *
+ */ 
+#pragma pack(1)
+typedef	union {
+	struct {
+		u_char b0, b1, b2, b3, b4, b5;
+	} eab;
+	struct {
+		u_short w0, w1, w2;
+	} eaw;
+	struct {
+  		u_long	ls4;
+		u_short	ms2;
+	} eal;
+} ETH_ADDR_T;
+
+/* 
+ * Ethernet Header 
+ */
+typedef struct {
+	ETH_ADDR_T	dest;
+	ETH_ADDR_T	src;
+	u_short		type;
+} ETH_HDR_T;
+#pragma pack()
+
diff -Naur mki-adapter-old/arch/i386/mki-adapter/vnetint-pub.h mki-adapter-new/arch/i386/mki-adapter/vnetint-pub.h
--- mki-adapter-old/arch/i386/mki-adapter/vnetint-pub.h	1969-12-31 16:00:00.000000000 -0800
+++ mki-adapter-new/arch/i386/mki-adapter/vnetint-pub.h	2003-09-08 11:26:27.000000000 -0700
@@ -0,0 +1,71 @@
+/*
+ ****************************************************************************
+ * Copyright 2001 by NeTraverse, Inc.
+ * This software is distributed under the terms of the GPL
+ * which is supplied in the LICENSE file with this distribution
+ ***************************************************************************
+ * $Id: vnetint-pub.h,v 1.4 2001/08/16 04:15:25 rlawrence Exp $
+ ***************************************************************************
+ */
+/* Interceptor status */
+struct vnetint_status {
+        int             flags;
+        unsigned        NumPktsConsumed;
+        unsigned        NumPktsFromVnet;
+        unsigned        NumPktsUp;
+        unsigned        NumPktsDown;
+        ETH_ADDR_T      PhysMacAddr;
+};
+
+#define MAX_INTERCEPTORS 16
+#define MAXIFNAMELEN 8
+/* Interceptor state flags */
+#define VNETINTF_ACTIVE       0x2                /* interceptor is active */
+#define VNETINTF_MACSET       0x10               /* we got a mac address */
+#define VNETINT_ACT(s) \
+        (((s)->status.flags & VNETINTF_ACTIVE))
+/* Interceptor device */
+struct vnetint_ifdev {
+        char                    ifname[MAXIFNAMELEN];
+        int                     count;
+        void	               *dev;
+        int                     (*real_xmit)();
+        struct vnetint_ifdev    *next;
+};
+
+/* Interceptor protocol */
+struct vnetint_proto {
+        int                     protocol;
+        int                     count;
+        struct packet_type      *ptype;
+        int                     (*real_func)();
+        struct vnetint_proto    *next;
+};
+/* Interceptor device/protocol pairs */
+struct vnetint_pcb {
+	int			count;
+        struct vnetint_status   status;
+        struct vnetint_ifdev    *ifdev;
+        struct vnetint_proto    *proto;
+        int                     (*filter_func)();
+        struct vnetint_pcb *next;
+};
+/* Interceptor filter */
+struct vnetint_filter {
+        struct vnetint_pcb      *handle;
+        struct vnetint_status   status;
+        char                    ifname[MAXIFNAMELEN];
+        int                     protocol;
+        int                     (*func)();
+        void                    *criteria;
+};
+/* vnetint_CtrlStatus function codes */
+#define INTERCEPTOR_GOACTIVE  1
+#define INTERCEPTOR_GOPASSIVE 2
+#define INTERCEPTOR_GETSTATS  3
+#define INTERCEPTOR_NUMBER    4
+#define INTERCEPTOR_SETFILTER 5
+/* vnetint_ProcessPacket defines */
+#define UPSTREAM   1
+#define DOWNSTREAM 0
+