1 diff -X ignore -urpNb bochs-2.1.1/Makefile.in checkbochs-2.1.1/Makefile.in
2 --- bochs-2.1.1/Makefile.in 2004-02-11 14:28:02.000000000 -0800
3 +++ checkbochs-2.1.1/Makefile.in 2005-07-02 17:25:47.000000000 -0700
4 @@ -177,11 +177,11 @@ all: @PRIMARY_TARGET@ @PLUGIN_TARGET@ bx
7 bochs@EXE@: @IODEV_LIB_VAR@ @DEBUGGER_VAR@ \
8 - cpu/libcpu.a memory/libmemory.a gui/libgui.a \
9 + cpu/libcpu.a memory/libmemory.a gui/libgui.a taint/libtaint.a \
10 @DISASM_VAR@ @INSTRUMENT_VAR@ $(BX_OBJS) \
11 $(SIMX86_OBJS) @FPU_VAR@ @GDBSTUB_VAR@ @PLUGIN_VAR@
12 @LINK@ -export-dynamic $(BX_OBJS) $(SIMX86_OBJS) \
13 - iodev/libiodev.a cpu/libcpu.a memory/libmemory.a gui/libgui.a \
14 + iodev/libiodev.a cpu/libcpu.a memory/libmemory.a gui/libgui.a taint/libtaint.a \
15 @DEBUGGER_VAR@ @DISASM_VAR@ @INSTRUMENT_VAR@ @PLUGIN_VAR@ \
16 @GDBSTUB_VAR@ @FPU_VAR@ \
17 @NONPLUGIN_GUI_LINK_OPTS@ \
18 @@ -195,19 +195,19 @@ bochs@EXE@: @IODEV_LIB_VAR@ @DEBUGGER_VA
19 # libtool. This creates a .DEF file, and exports file, an import library,
20 # and then links bochs.exe with the exports file.
21 .win32_dll_plugin_target: @IODEV_LIB_VAR@ @DEBUGGER_VAR@ \
22 - cpu/libcpu.a memory/libmemory.a gui/libgui.a \
23 + cpu/libcpu.a memory/libmemory.a gui/libgui.a taint/libtaint.a \
24 @DISASM_VAR@ @INSTRUMENT_VAR@ $(BX_OBJS) \
25 $(SIMX86_OBJS) @FPU_VAR@ @GDBSTUB_VAR@ @PLUGIN_VAR@
26 $(DLLTOOL) --export-all-symbols --output-def bochs.def \
27 $(BX_OBJS) $(SIMX86_OBJS) \
28 - @IODEV_LIB_VAR@ cpu/libcpu.a memory/libmemory.a gui/libgui.a \
29 + @IODEV_LIB_VAR@ cpu/libcpu.a memory/libmemory.a gui/libgui.a taint/libtaint.a \
30 @DEBUGGER_VAR@ @DISASM_VAR@ @INSTRUMENT_VAR@ @PLUGIN_VAR@ \
31 @GDBSTUB_VAR@ @FPU_VAR@
32 $(DLLTOOL) --dllname bochs.exe --def bochs.def --output-lib dllexports.a
33 $(DLLTOOL) --dllname bochs.exe --output-exp bochs.exp --def bochs.def
34 $(CXX) -o bochs.exe $(CXXFLAGS) $(LDFLAGS) -export-dynamic \
35 $(BX_OBJS) bochs.exp $(SIMX86_OBJS) \
36 - @IODEV_LIB_VAR@ cpu/libcpu.a memory/libmemory.a gui/libgui.a \
37 + @IODEV_LIB_VAR@ cpu/libcpu.a memory/libmemory.a gui/libgui.a taint/libtaint.a \
38 @DEBUGGER_VAR@ @DISASM_VAR@ @INSTRUMENT_VAR@ @PLUGIN_VAR@ \
39 @GDBSTUB_VAR@ @FPU_VAR@ \
41 @@ -274,6 +274,11 @@ gui/libgui.a::
42 $(MAKE) $(MDEFINES) libgui.a
46 + cd taint @COMMAND_SEPARATOR@
47 + $(MAKE) $(MDEFINES) libtaint.a
51 cd disasm @COMMAND_SEPARATOR@
52 $(MAKE) $(MDEFINES) libdisasm.a
53 @@ -503,6 +508,9 @@ all-clean: clean
54 cd fpu @COMMAND_SEPARATOR@
57 + cd taint @COMMAND_SEPARATOR@
60 cd doc/docbook @COMMAND_SEPARATOR@
63 @@ -538,6 +546,9 @@ dist-clean: local-dist-clean
64 cd fpu @COMMAND_SEPARATOR@
67 + cd taint @COMMAND_SEPARATOR@
70 cd doc/docbook @COMMAND_SEPARATOR@
73 diff -X ignore -urpNb bochs-2.1.1/bochs.h checkbochs-2.1.1/bochs.h
74 --- bochs-2.1.1/bochs.h 2005-07-02 17:23:03.000000000 -0700
75 +++ checkbochs-2.1.1/bochs.h 2005-07-02 17:25:47.000000000 -0700
76 @@ -671,6 +671,7 @@ typedef struct BOCHSAPI {
78 bx_param_enum_c *Osel_config;
79 bx_param_enum_c *Osel_displaylib;
80 + bx_param_enum_c *Otaint_type ;
83 BOCHSAPI extern bx_options_t bx_options;
84 diff -X ignore -urpNb bochs-2.1.1/configure.in checkbochs-2.1.1/configure.in
85 --- bochs-2.1.1/configure.in 2004-02-11 14:28:40.000000000 -0800
86 +++ checkbochs-2.1.1/configure.in 2005-07-19 15:51:35.000000000 -0700
87 @@ -2244,6 +2244,11 @@ if test "$with_rfb" = yes; then
91 +AC_CHECK_LIB(gdbm, gdbm_open, , [dnl
92 +echo 'Error: checkbochs requires libgdbm'
96 # The ACX_PTHREAD function was written by
97 # Steven G. Johnson <stevenj@alum.mit.edu> and
98 # Alejandro Forero Cuervo <bachue@bachue.com>
99 @@ -2516,4 +2521,5 @@ AC_OUTPUT(Makefile iodev/Makefile bx_deb
100 fpu/Makefile doc/docbook/Makefile \
101 build/linux/bochs-dlx \
102 bxversion.h build/macosx/Info.plist \
103 - build/win32/nsis/Makefile build/win32/nsis/bochs.nsi)
104 + build/win32/nsis/Makefile build/win32/nsis/bochs.nsi \
106 diff -X ignore -urpNb bochs-2.1.1/cpu/cpu.cc checkbochs-2.1.1/cpu/cpu.cc
107 --- bochs-2.1.1/cpu/cpu.cc 2004-02-11 14:28:51.000000000 -0800
108 +++ checkbochs-2.1.1/cpu/cpu.cc 2005-07-19 14:47:38.000000000 -0700
111 #define LOG_THIS BX_CPU_THIS_PTR
113 +#include "taint/globals.h"
114 +#include "taint/mydebug.h"
115 +#include "taint/lockset.h"
118 #define this (BX_CPU(0))
120 @@ -104,6 +108,25 @@ extern void REGISTER_IADDR(bx_addr addr)
125 +clear_stack_taints(void)
127 + BX_CPU_C *cpu = BX_CPU(0);
129 + Bit32u old_esp = cpu->prev_esp;
130 + Bit32u new_esp = ESP;
131 + if (old_esp >= new_esp
132 + || (old_esp & 0xfffff000) != (new_esp & 0xfffff000))
135 + Bit32u start = old_esp - PHYS_BASE;
136 + Bit32u length = new_esp - old_esp;
137 + if (start > cpu->mem->len || start + length > cpu->mem->len)
139 + memset (cpu->mem->taint_vector + start, 0,
140 + sizeof *cpu->mem->taint_vector * length);
144 BX_CPU_C::cpu_loop(Bit32s max_instr_count)
146 @@ -111,7 +134,9 @@ BX_CPU_C::cpu_loop(Bit32s max_instr_coun
147 bxInstruction_c iStorage BX_CPP_AlignN(32);
148 bxInstruction_c *i = &iStorage;
150 - BxExecutePtr_t execute;
151 + BxExecutePtr_t execute, taint_execute ;
153 + BX_CPU_THIS_PTR curInstruction = i ;
156 BX_CPU_THIS_PTR break_point = 0;
157 @@ -209,6 +234,10 @@ BX_CPU_C::cpu_loop(Bit32s max_instr_coun
158 BxExecutePtr_tR resolveModRM = i->ResolveModrm; // Get as soon as possible for speculation.
160 execute = i->execute; // fetch as soon as possible for speculation.
162 + taint_execute = i->taint_execute ;
163 + if (!taint_execute) taint_execute = &BX_CPU_C::NOP ;
166 BX_CPU_CALL_METHODR(resolveModRM, (i));
168 @@ -281,6 +310,10 @@ BX_CPU_C::cpu_loop(Bit32s max_instr_coun
171 execute = i->execute; // fetch as soon as possible for speculation.
173 + taint_execute = i->taint_execute ;
174 + if (!taint_execute) taint_execute = &BX_CPU_C::NOP ;
177 BX_CPU_CALL_METHODR(resolveModRM, (i));
179 @@ -303,6 +336,8 @@ BX_CPU_C::cpu_loop(Bit32s max_instr_coun
180 BX_INSTR_BEFORE_EXECUTION(BX_CPU_ID);
182 BX_CPU_CALL_METHOD(execute, (i));
183 + BX_CPU_CALL_METHOD(taint_execute, (i));
184 + clear_stack_taints ();
185 BX_CPU_THIS_PTR prev_eip = RIP; // commit new EIP
186 BX_CPU_THIS_PTR prev_esp = RSP; // commit new ESP
187 BX_INSTR_AFTER_EXECUTION(BX_CPU_ID);
188 @@ -323,6 +358,7 @@ repeat_loop:
191 BX_CPU_CALL_METHOD(execute, (i));
192 + BX_CPU_CALL_METHOD(taint_execute, (i));
195 if ((i->repUsedValue()==3) && (get_ZF()==0)) goto repeat_done;
196 @@ -335,6 +371,7 @@ repeat_loop:
199 BX_CPU_CALL_METHOD(execute, (i));
200 + BX_CPU_CALL_METHOD(taint_execute, (i));
203 if ((i->repUsedValue()==3) && (get_ZF()==0)) goto repeat_done;
204 @@ -345,6 +382,7 @@ repeat_loop:
207 BX_CPU_CALL_METHOD(execute, (i));
208 + BX_CPU_CALL_METHOD(taint_execute, (i));
211 if ((i->repUsedValue()==3) && (get_ZF()==0)) goto repeat_done;
212 @@ -358,6 +396,7 @@ repeat_loop:
215 BX_CPU_CALL_METHOD(execute, (i));
216 + BX_CPU_CALL_METHOD(taint_execute, (i));
219 if (RCX == 0) goto repeat_done;
220 @@ -368,6 +407,7 @@ repeat_loop:
223 BX_CPU_CALL_METHOD(execute, (i));
224 + BX_CPU_CALL_METHOD(taint_execute, (i));
227 if (ECX == 0) goto repeat_done;
228 @@ -376,6 +416,7 @@ repeat_loop:
229 else { // 16bit addrsize
231 BX_CPU_CALL_METHOD(execute, (i));
232 + BX_CPU_CALL_METHOD(taint_execute, (i));
235 if (CX == 0) goto repeat_done;
236 @@ -405,6 +446,7 @@ repeat_not_done:
240 + clear_stack_taints ();
241 BX_CPU_THIS_PTR prev_eip = RIP; // commit new EIP
242 BX_CPU_THIS_PTR prev_esp = RSP; // commit new ESP
243 BX_INSTR_REPEAT_ITERATION(BX_CPU_ID);
244 @@ -617,6 +659,7 @@ BX_CPU_C::handleAsyncEvent(void)
245 // the new EIP/ESP values. But here, we call interrupt() much like
246 // it was a sofware interrupt instruction, and need to effect the
247 // commit here. This code mirrors similar code above.
248 + clear_stack_taints ();
249 BX_CPU_THIS_PTR prev_eip = RIP; // commit new RIP
250 BX_CPU_THIS_PTR prev_esp = RSP; // commit new RSP
251 BX_CPU_THIS_PTR EXT = 0;
252 @@ -865,6 +908,17 @@ BX_CPU_THIS_PTR eipPageWindowSize = 0; /
253 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b);
257 +BX_CPU_C::panic(const char *fmt, ...)
261 + printf("backtrace: %s.\n",backtrace(btstr));
264 + logfunctions::panic(fmt,arg);
268 #if BX_EXTERNAL_DEBUGGER
270 diff -X ignore -urpNb bochs-2.1.1/cpu/cpu.h checkbochs-2.1.1/cpu/cpu.h
271 --- bochs-2.1.1/cpu/cpu.h 2004-02-11 14:28:51.000000000 -0800
272 +++ checkbochs-2.1.1/cpu/cpu.h 2005-07-19 12:24:02.000000000 -0700
273 @@ -739,9 +739,11 @@ public:
275 void (*ResolveModrm)(bxInstruction_c *) BX_CPP_AttrRegparmN(1);
276 void (*execute)(bxInstruction_c *);
277 + void (*taint_execute)(bxInstruction_c *);
279 void (BX_CPU_C::*ResolveModrm)(bxInstruction_c *) BX_CPP_AttrRegparmN(1);
280 void (BX_CPU_C::*execute)(bxInstruction_c *);
281 + void (BX_CPU_C::*taint_execute)(bxInstruction_c *);
284 // 26..23 ilen (0..15). Leave this one on top so no mask is needed.
285 @@ -821,6 +823,11 @@ public:
289 + /* sorav: to check if the instruction has a lock prefix */
290 + bool locked; //whether lock prefix is held
291 + BX_CPP_INLINE void setLocked(bool val) { locked = val; }
292 + BX_CPP_INLINE bool isLocked(void) { return locked ; }
294 BX_CPP_INLINE unsigned opcodeReg() {
295 // The opcodeReg form (low 3 bits of the opcode byte (extended
296 // by REX.B on x86-64) can be accessed by IxForm or IqForm. They
297 @@ -1428,8 +1435,18 @@ union {
298 // is greated than 2 (the maximum possible for
299 // normal cases) it is a native pointer and is used
300 // for a direct write access.
303 + Bit32u taint_paddress1;
304 + Bit32u taint_paddress2;
307 + bx_ptr_equiv_t taint_pages;
311 + bxInstruction_c *curInstruction ;
313 #if BX_SUPPORT_X86_64
314 // data upper 32 bits - not used any longer
315 //Bit32s daddr_upper; // upper bits must be canonical (-virtmax --> + virtmax)
316 @@ -2952,6 +2969,35 @@ union {
318 bx_local_apic_c local_apic;
321 + /* taint functions */
322 + void panic(const char *fmt, ...);
323 + BX_SMF Bit32u thread_current(void) ;
324 + BX_SMF Bit32s BX_CPP_AttrRegparmN(3) BX_CPU_C::taint_dtranslate_linear(bx_address laddr, unsigned pl, unsigned rw);
325 + BX_SMF Bit32u BX_CPP_AttrRegparmN(2) BX_CPU_C::taint_itranslate_linear(bx_address laddr, unsigned pl);
326 + BX_SMF struct lockset *BX_CPU_C::access_linear_taint(bx_address laddr);
327 +//SHADOW STATE FUNCTIONS
328 + BX_SMF void TT_TaintSaveRegs(bxInstruction_c *i);
329 + BX_SMF void TT_TaintRestoreRegs(bxInstruction_c *i);
330 + BX_SMF void TT_Lock(bxInstruction_c *i);
331 + BX_SMF void TT_Unlock(bxInstruction_c *i);
332 + BX_SMF void TT_CommonOps(bxInstruction_c *i);
334 + BX_SMF int read_virtual_checks_silent(bx_segment_reg_t *seg, bx_address offset, unsigned length) BX_CPP_AttrRegparmN(3);
335 + BX_SMF int read_virtual_byte_silent(unsigned s, bx_address offset, Bit8u *data);
336 + BX_SMF int read_virtual_word_silent(unsigned s, bx_address offset, Bit16u *data);
337 + BX_SMF int read_virtual_dword_silent(unsigned s, bx_address offset, Bit32u *data);
338 + BX_SMF int access_linear_silent(bx_address laddr, unsigned length, unsigned pl, unsigned rw, void *data);
340 + BX_SMF char *backtrace(char *s);
341 + BX_SMF void backtrace_eips(Bit32u *eips, int n);
342 + BX_SMF Bit32u callingEIP(void);
344 + BX_SMF void eraser_access_linear(bx_address laddr, unsigned len, unsigned pl, unsigned rw, void *data);
346 + BX_SMF void eraser_init_globals (void) ;
347 + BX_SMF void eraser_done_globals (void) ;
352 @@ -3299,6 +3345,7 @@ IMPLEMENT_EFLAG_ACCESSOR (TF, 8)
353 #define BxGroup14 BxGroupN
354 #define BxGroup15 BxGroupN
355 #define BxGroup16 BxGroupN
356 +#define BxGroupTaint BxGroupN
359 typedef enum _show_flags {
360 diff -X ignore -urpNb bochs-2.1.1/cpu/cpuid.cc checkbochs-2.1.1/cpu/cpuid.cc
361 --- bochs-2.1.1/cpu/cpuid.cc 2003-12-31 09:35:43.000000000 -0800
362 +++ checkbochs-2.1.1/cpu/cpuid.cc 2005-07-02 17:25:48.000000000 -0700
363 @@ -251,6 +251,12 @@ void BX_CPU_C::CPUID(bxInstruction_c *i)
364 RDX = get_std_cpuid_features ();
367 + case 3: /*added by sorav */
368 + RBX = 0x6e696154; // "Tain"
369 + RDX = 0x49646574; // "tedI"
370 + RCX = 0x6c65746e; // "ntel"
374 #if BX_CPU_LEVEL >= 6
376 diff -X ignore -urpNb bochs-2.1.1/cpu/debugstuff.cc checkbochs-2.1.1/cpu/debugstuff.cc
377 --- bochs-2.1.1/cpu/debugstuff.cc 2003-12-24 12:32:59.000000000 -0800
378 +++ checkbochs-2.1.1/cpu/debugstuff.cc 2005-07-18 21:40:23.000000000 -0700
379 @@ -1023,4 +1023,5 @@ BX_CPU_C::atexit(void)
380 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b ? 32 : 16));
382 debug(BX_CPU_THIS_PTR prev_eip);
383 + eraser_done_globals();
385 diff -X ignore -urpNb bochs-2.1.1/cpu/fetchdecode.cc checkbochs-2.1.1/cpu/fetchdecode.cc
386 --- bochs-2.1.1/cpu/fetchdecode.cc 2003-12-28 10:19:41.000000000 -0800
387 +++ checkbochs-2.1.1/cpu/fetchdecode.cc 2005-07-02 17:25:48.000000000 -0700
390 #define LOG_THIS BX_CPU_THIS_PTR
392 +#include "taint/eraser.h"
395 ///////////////////////////
397 @@ -156,6 +158,7 @@ typedef struct BxOpcodeInfo_t {
399 BxExecutePtr_t ExecutePtr;
400 struct BxOpcodeInfo_t *AnotherArray;
401 + BxExecutePtr_t TaintExecutePtr ;
405 @@ -458,6 +461,17 @@ static BxOpcodeInfo_t BxOpcodeInfoG16[8]
406 /* 7 */ { 0, &BX_CPU_C::BxError }
409 +BxOpcodeInfo_t BxOpcodeInfoGTaint[8] = {
410 + /* 0 */ { BxImmediate_Iv, &BX_CPU_C::NOP, NULL, &BX_CPU_C::TT_TaintSaveRegs},
411 + /* 1 */ { BxImmediate_Iv, &BX_CPU_C::NOP, NULL, &BX_CPU_C::TT_TaintRestoreRegs},
412 + /* 2 */ { BxImmediate_Iv, &BX_CPU_C::NOP, NULL, &BX_CPU_C::TT_Lock /*&BX_CPU_C::TT_RegionTaint*/},
413 + /* 3 */ { BxImmediate_Iv, &BX_CPU_C::NOP, NULL, &BX_CPU_C::TT_Unlock /*&BX_CPU_C::TT_RegionCheck*/},
414 + /* 4 */ { BxImmediate_Iv, &BX_CPU_C::NOP, NULL, &BX_CPU_C::TT_CommonOps },
415 + /* 5 */ { 0, &BX_CPU_C::NOP, NULL, NULL /*&BX_CPU_C::TT_Taint*/ },
416 + /* 6 */ { 0, &BX_CPU_C::NOP, NULL, NULL /*&BX_CPU_C::TT_Untaint*/ },
417 + /* 7 */ { 0, &BX_CPU_C::NOP, NULL, NULL /*&BX_CPU_C::TT_Check*/}
421 /* ************************** */
422 /* 512 entries for 16bit mode */
423 @@ -728,7 +742,8 @@ static BxOpcodeInfo_t BxOpcodeInfo[512*2
424 /* 0F 01 */ { BxAnother | BxGroup7, NULL, BxOpcodeInfoG7 },
425 /* 0F 02 */ { BxAnother, &BX_CPU_C::LAR_GvEw },
426 /* 0F 03 */ { BxAnother, &BX_CPU_C::LSL_GvEw },
427 - /* 0F 04 */ { 0, &BX_CPU_C::BxError },
428 + ///* 0F 04 */ { 0, &BX_CPU_C::BxError },
429 + /* 0F 04 : sorav */ { BxAnother | BxGroupTaint, NULL, BxOpcodeInfoGTaint }, // 2-byte escape
430 #if BX_SUPPORT_X86_64
431 /* 0F 05 */ { 0, &BX_CPU_C::SYSCALL },
433 @@ -1263,7 +1278,7 @@ static BxOpcodeInfo_t BxOpcodeInfo[512*2
434 /* 0F 01 */ { BxAnother | BxGroup7, NULL, BxOpcodeInfoG7 },
435 /* 0F 02 */ { BxAnother, &BX_CPU_C::LAR_GvEw },
436 /* 0F 03 */ { BxAnother, &BX_CPU_C::LSL_GvEw },
437 - /* 0F 04 */ { 0, &BX_CPU_C::BxError },
438 + /* 0F 04 : sorav */ { BxAnother | BxGroupTaint, NULL, BxOpcodeInfoGTaint }, // 2-byte escape
439 #if BX_SUPPORT_X86_64
440 /* 0F 05 */ { 0, &BX_CPU_C::SYSCALL },
442 @@ -1564,6 +1579,8 @@ BX_CPU_C::fetchDecode(Bit8u *iptr, bxIns
443 /*os64*/ 0, /*as64*/ 0,
444 /*extend8bit*/ 0, /*repUsed*/ 0);
446 + instruction->setLocked (false) ;
448 sse_prefix = SSE_PREFIX_NONE;
451 @@ -1669,6 +1686,7 @@ another_byte:
453 BX_INSTR_PREFIX_LOCK(BX_CPU_ID);
455 + instruction->setLocked (false) ;
459 @@ -1883,6 +1901,7 @@ modrm_done:
462 instruction->execute = OpcodeInfoPtr->ExecutePtr;
463 + instruction->taint_execute = OpcodeInfoPtr->TaintExecutePtr;
464 instruction->setRepAttr(attr & (BxRepeatable | BxRepeatableZF));
467 @@ -1891,6 +1910,7 @@ modrm_done:
468 // the if() above after fetching the 2nd byte, so this path is
469 // taken in all cases if a modrm byte is NOT required.
470 instruction->execute = BxOpcodeInfo[b1+offset].ExecutePtr;
471 + instruction->taint_execute = BxOpcodeInfo[b1+offset].TaintExecutePtr;
472 instruction->IxForm.opcodeReg = b1 & 7;
475 diff -X ignore -urpNb bochs-2.1.1/cpu/paging.cc checkbochs-2.1.1/cpu/paging.cc
476 --- bochs-2.1.1/cpu/paging.cc 2003-12-30 14:12:45.000000000 -0800
477 +++ checkbochs-2.1.1/cpu/paging.cc 2005-07-02 17:25:48.000000000 -0700
480 #define LOG_THIS BX_CPU_THIS_PTR
482 +#include "taint/globals.h"
485 #define this (BX_CPU(0))
487 @@ -1124,6 +1126,7 @@ BX_CPU_C::access_linear(bx_address laddr
488 BX_CPU_THIS_PTR mem->writePhysicalPage(this,
489 BX_CPU_THIS_PTR address_xlation.paddress1, length, data);
491 + BX_CPU_THIS_PTR eraser_access_linear(laddr,length,pl,rw,data);
495 @@ -1195,6 +1198,7 @@ BX_CPU_C::access_linear(bx_address laddr
499 + BX_CPU_THIS_PTR eraser_access_linear(laddr,length,pl,rw,data);
503 @@ -1216,6 +1220,7 @@ BX_CPU_C::access_linear(bx_address laddr
504 lpf = laddr & 0xfffff000;
505 if (BX_CPU_THIS_PTR TLB.entry[tlbIndex].lpf == BX_TLB_LPF_VALUE(lpf)) {
506 BX_CPU_THIS_PTR mem->readPhysicalPage(this, laddr, length, data);
507 + BX_CPU_THIS_PTR eraser_access_linear(laddr,length,pl,rw,data);
510 // We haven't seen this page, or it's been bumped before.
511 @@ -1258,6 +1263,7 @@ BX_CPU_C::access_linear(bx_address laddr
512 lpf = laddr & 0xfffff000;
513 if (BX_CPU_THIS_PTR TLB.entry[tlbIndex].lpf == BX_TLB_LPF_VALUE(lpf)) {
514 BX_CPU_THIS_PTR mem->writePhysicalPage(this, laddr, length, data);
515 + BX_CPU_THIS_PTR eraser_access_linear(laddr,length,pl,rw,data);
518 // We haven't seen this page, or it's been bumped before.
519 @@ -1401,6 +1407,8 @@ BX_CPU_C::access_linear(Bit32u laddr, un
520 BX_CPU_THIS_PTR mem->readPhysicalPage(this, laddr, length, data);
522 BX_CPU_THIS_PTR mem->writePhysicalPage(this, laddr, length, data);
524 + BX_CPU_THIS_PTR eraser_access_linear(laddr,length,pl,rw,data);
528 diff -X ignore -urpNb bochs-2.1.1/gui/Makefile.in checkbochs-2.1.1/gui/Makefile.in
529 --- bochs-2.1.1/gui/Makefile.in 2003-11-28 07:07:28.000000000 -0800
530 +++ checkbochs-2.1.1/gui/Makefile.in 2005-07-02 17:25:48.000000000 -0700
531 @@ -44,7 +44,7 @@ SHELL = /bin/sh
535 -CXXFLAGS = $(BX_INCDIRS) @CXXFLAGS@ @GUI_CXXFLAGS@
536 +CXXFLAGS = $(BX_INCDIRS) @CXXFLAGS@ @GUI_CXXFLAGS@ -fms-extensions
540 diff -X ignore -urpNb bochs-2.1.1/gui/siminterface.h checkbochs-2.1.1/gui/siminterface.h
541 --- bochs-2.1.1/gui/siminterface.h 2004-02-11 14:28:52.000000000 -0800
542 +++ checkbochs-2.1.1/gui/siminterface.h 2005-07-02 17:25:48.000000000 -0700
543 @@ -464,6 +464,7 @@ typedef enum {
545 BXP_SEL_CONFIG_INTERFACE,
546 BXP_SEL_DISPLAY_LIBRARY,
547 + BXP_SEL_TAINT_TYPE,
548 BXP_THIS_IS_THE_LAST // used to determine length of list
551 diff -X ignore -urpNb bochs-2.1.1/iodev/vmware3.h checkbochs-2.1.1/iodev/vmware3.h
552 --- bochs-2.1.1/iodev/vmware3.h 2004-02-11 14:28:54.000000000 -0800
553 +++ checkbochs-2.1.1/iodev/vmware3.h 2005-07-19 12:23:26.000000000 -0700
554 @@ -75,7 +75,7 @@ class vmware3_image_t : public device_im
555 Bit32u vmware_version;
558 -#if !defined(_MSC_VER)
559 +#if 0 && !defined(_MSC_VER)
560 GCC_ATTRIBUTE((packed))
563 diff -X ignore -urpNb bochs-2.1.1/main.cc checkbochs-2.1.1/main.cc
564 --- bochs-2.1.1/main.cc 2005-07-02 17:23:03.000000000 -0700
565 +++ checkbochs-2.1.1/main.cc 2005-07-03 11:01:28.000000000 -0700
568 #include "state_file.h"
570 +#include "taint/taint_type.h"
571 +#include "taint/mydebug.h"
572 +#include "taint/globals.h"
577 @@ -1770,6 +1774,7 @@ int bxmain () {
578 SIM->set_quit_context (&context);
579 if (bx_init_main (bx_startup_flags.argc, bx_startup_flags.argv) < 0)
581 + BX_CPU(0)->eraser_init_globals() ;
582 // read a param to decide which config interface to start.
583 // If one exists, start it. If not, just begin.
584 bx_param_enum_c *ci_param = SIM->get_param_enum (BXP_SEL_CONFIG_INTERFACE);
585 @@ -2317,6 +2322,18 @@ bx_begin_simulation (int argc, char *arg
586 SIM->set_init_done (1);
588 // update headerbar buttons since drive status can change during init
589 + static char *taint_type_list[] = {
594 + bx_options.Otaint_type = new bx_param_enum_c (BXP_SEL_TAINT_TYPE,
595 + "Taint Type (Eraser,..)",
596 + "Select Taint Type",
601 bx_gui->update_drive_status_buttons ();
603 // The set handler for mouse_enabled does not actually update the gui
604 @@ -2515,7 +2532,7 @@ bx_init_hardware()
606 signal(SIGINT, bx_signal_handler);
609 + assign_taint_functions ("eraser") ;
612 signal(SIGALRM, bx_signal_handler);
613 @@ -3979,6 +3996,20 @@ parse_line_formatted(char *context, int
614 if (!bx_options.Osel_config->set_by_name (params[1]))
615 PARSE_ERR(("%s: config_interface '%s' not available", context, params[1]));
617 + else if (!strcmp (params[0], "taint")) {
618 + if (num_params!=2) {
619 + PARSE_ERR(("%s: taint directive: wrong # of args. Usage: taint <option>",context)) ;
621 + if (!bx_options.Otaint_type->set_by_name (params[1])) {
622 + PARSE_ERR(("%s: taint type '%s' not available.", context, params[1]));
625 + else if (!strcmp (params[0], "logfile")) {
626 + if (num_params!=2) {
627 + PARSE_ERR(("%s: logfile directive: wrong # of args. Usage- logfile: <filename>",context)) ;
629 + strncpy (g_logfn, params[1], 128) ;
631 else if (!strcmp(params[0], "display_library")) {
632 if (num_params != 2) {
633 PARSE_ERR(("%s: display_library directive: wrong # args.", context));
634 diff -X ignore -urpNb bochs-2.1.1/memory/memory.h checkbochs-2.1.1/memory/memory.h
635 --- bochs-2.1.1/memory/memory.h 2004-02-11 14:28:54.000000000 -0800
636 +++ checkbochs-2.1.1/memory/memory.h 2005-07-02 17:25:48.000000000 -0700
637 @@ -45,6 +45,10 @@ class BOCHSAPI BX_MEM_C : public logfunc
639 Bit8u *actual_vector;
640 Bit8u *vector; // aligned correctly
642 + Bit32u *actual_taint_vector; //keep a word for every byte
643 + Bit32u *taint_vector; // aligned correctly
646 size_t megabytes; // (len in Megabytes)
648 @@ -77,6 +81,12 @@ public:
649 unsigned long (*f)(unsigned char *buf, int len),
650 Bit32u addr1, Bit32u addr2, Bit32u *crc);
651 BX_MEM_SMF Bit8u * getHostMemAddr(BX_CPU_C *cpu, Bit32u a20Addr, unsigned op) BX_CPP_AttrRegparmN(3);
654 + BX_MEM_SMF void readPhysicalTaintPage(BX_CPU_C *cpu, Bit32u addr,
655 + unsigned len, void *data) BX_CPP_AttrRegparmN(3);
656 + BX_MEM_SMF void writePhysicalTaintPage(BX_CPU_C *cpu, Bit32u addr,
657 + unsigned len, void *data) BX_CPP_AttrRegparmN(3);
660 #if BX_PROVIDE_CPU_MEMORY==1
661 diff -X ignore -urpNb bochs-2.1.1/memory/misc_mem.cc checkbochs-2.1.1/memory/misc_mem.cc
662 --- bochs-2.1.1/memory/misc_mem.cc 2004-02-11 14:28:54.000000000 -0800
663 +++ checkbochs-2.1.1/memory/misc_mem.cc 2005-07-02 17:25:48.000000000 -0700
664 @@ -54,7 +54,9 @@ BX_MEM_C::BX_MEM_C(void)
668 + taint_vector = NULL;
669 actual_vector = NULL;
670 + actual_taint_vector = NULL;
674 @@ -69,11 +71,15 @@ BX_MEM_C::alloc_vector_aligned (size_t b
675 if (actual_vector != NULL) {
676 BX_INFO (("freeing existing memory vector"));
677 delete [] actual_vector;
678 + delete [] actual_taint_vector;
679 actual_vector = NULL;
680 + actual_taint_vector = NULL;
682 + taint_vector = NULL;
684 Bit64u test_mask = alignment - 1;
685 actual_vector = new Bit8u [bytes+test_mask];
686 + actual_taint_vector = new Bit32u [bytes+test_mask];
687 // round address forward to nearest multiple of alignment. Alignment
688 // MUST BE a power of two for this to work.
689 Bit64u masked = ((Bit64u)(actual_vector + test_mask)) & ~test_mask;
690 @@ -84,6 +90,13 @@ BX_MEM_C::alloc_vector_aligned (size_t b
691 BX_ASSERT (vector+bytes <= actual_vector+bytes+test_mask);
692 BX_INFO (("allocated memory at %p. after alignment, vector=%p",
693 actual_vector, vector));
696 + unsigned int wasted_memory = masked - (Bit64u)vector ;
697 + BX_ASSERT(wasted_memory<=test_mask);
698 + taint_vector = &(actual_taint_vector[wasted_memory]);
699 + //sanity check: after realignment, everything fits in allocated space
700 + BX_ASSERT(&(taint_vector[bytes]) <= &(actual_taint_vector[bytes+test_mask]));
704 @@ -136,6 +149,7 @@ BX_MEM_C::init_memory(int memsize)
706 if (BX_MEM_THIS vector == NULL) {
707 // memory not already allocated, do now...
708 + assert (taint_vector==NULL) ;
709 alloc_vector_aligned (memsize, BX_MEM_VECTOR_ALIGN);
710 BX_MEM_THIS len = memsize;
711 BX_MEM_THIS megabytes = memsize / (1024*1024);
712 diff -X ignore -urpNb bochs-2.1.1/taint/Makefile.in checkbochs-2.1.1/taint/Makefile.in
713 --- bochs-2.1.1/taint/Makefile.in 1969-12-31 16:00:00.000000000 -0800
714 +++ checkbochs-2.1.1/taint/Makefile.in 2005-07-02 17:25:48.000000000 -0700
722 +top_builddir = $(srcdir)/..
723 +top_srcdir = $(srcdir)/..
730 +CXXFLAGS = -g -O2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILES $(X_CFLAGS)
734 +X_LIBS = -L/usr/X11R6/lib
735 +X_PRE_LIBS = -lSM -lICE
740 +BX_INCDIRS = -I.. -I$(srcdir)/.. -I../instrument/stubs -I$(srcdir)/../instrument/stubs
745 +# Objects which are synced between the cpu and cpu64 code and
746 +# are used for either compile.
747 +OBJS = common.o globals.o taint_type.o eraser.o paging.o memory.o \
748 + lockset.o list.o hash.o silent_access.o silent_paging.o
753 +BX_INCLUDES = ../bochs.h ../config.h
759 + $(CXX) -c $(BX_INCDIRS) $(CXXFLAGS) $< -o $@
765 + $(RANLIB) libtaint.a
767 +$(OBJS): $(BX_INCLUDES)
769 +$(OBJS64): $(BX_INCLUDES)
778 +common.o: common.cc ../bochs.h ../config.h ../osdep.h ../bx_debug/debug.h \
779 + ../bxversion.h ../gui/siminterface.h ../state_file.h ../cpu/cpu.h \
780 + ../cpu/lazy_flags.h ../cpu/i387.h ../cpu/xmm.h ../memory/memory.h \
781 + ../pc_system.h ../plugin.h ../extplugin.h ../gui/gui.h \
782 + ../gui/textconfig.h ../gui/keymap.h ../iodev/iodev.h ../iodev/pci.h \
783 + ../iodev/pci2isa.h ../iodev/pcivga.h ../iodev/vga.h ../iodev/ioapic.h \
784 + ../iodev/biosdev.h ../iodev/cmos.h ../iodev/dma.h ../iodev/floppy.h \
785 + ../iodev/harddrv.h ../iodev/cdrom.h ../iodev/vmware3.h \
786 + ../iodev/keyboard.h ../iodev/parallel.h ../iodev/pic.h ../iodev/pit.h \
787 + ../iodev/pit_wrap.h ../iodev/pit82c54.h ../iodev/virt_timer.h \
788 + ../iodev/serial.h ../iodev/unmapped.h ../iodev/eth.h ../iodev/ne2k.h \
789 + ../iodev/guest2host.h ../iodev/slowdown_timer.h ../iodev/extfpuirq.h \
790 + ../instrument/stubs/instrument.h mydebug.h
792 +taint_type.o: taint_type.cc taint_type.h \
793 + ../bochs.h ../config.h ../osdep.h ../bx_debug/debug.h \
794 + ../bxversion.h ../gui/siminterface.h ../state_file.h ../cpu/cpu.h \
795 + ../cpu/lazy_flags.h ../cpu/i387.h ../cpu/xmm.h ../memory/memory.h \
796 + ../pc_system.h ../plugin.h ../extplugin.h ../gui/gui.h \
797 + ../gui/textconfig.h ../gui/keymap.h ../iodev/iodev.h ../iodev/pci.h \
798 + ../iodev/pci2isa.h ../iodev/pcivga.h ../iodev/vga.h ../iodev/ioapic.h \
799 + ../iodev/biosdev.h ../iodev/cmos.h ../iodev/dma.h ../iodev/floppy.h \
800 + ../iodev/harddrv.h ../iodev/cdrom.h ../iodev/vmware3.h \
801 + ../iodev/keyboard.h ../iodev/parallel.h ../iodev/pic.h ../iodev/pit.h \
802 + ../iodev/pit_wrap.h ../iodev/pit82c54.h ../iodev/virt_timer.h \
803 + ../iodev/serial.h ../iodev/unmapped.h ../iodev/eth.h ../iodev/ne2k.h \
804 + ../iodev/guest2host.h ../iodev/slowdown_timer.h ../iodev/extfpuirq.h \
805 + ../instrument/stubs/instrument.h mydebug.h
807 +memory.o: memory.cc ../bochs.h ../config.h ../osdep.h ../bx_debug/debug.h \
808 + ../bxversion.h ../gui/siminterface.h ../state_file.h ../cpu/cpu.h \
809 + ../cpu/lazy_flags.h ../cpu/i387.h ../cpu/xmm.h ../memory/memory.h \
810 + ../pc_system.h ../plugin.h ../extplugin.h ../gui/gui.h \
811 + ../gui/textconfig.h ../gui/keymap.h ../iodev/iodev.h ../iodev/pci.h \
812 + ../iodev/pci2isa.h ../iodev/pcivga.h ../iodev/vga.h ../iodev/ioapic.h \
813 + ../iodev/biosdev.h ../iodev/cmos.h ../iodev/dma.h ../iodev/floppy.h \
814 + ../iodev/harddrv.h ../iodev/cdrom.h ../iodev/vmware3.h \
815 + ../iodev/keyboard.h ../iodev/parallel.h ../iodev/pic.h ../iodev/pit.h \
816 + ../iodev/pit_wrap.h ../iodev/pit82c54.h ../iodev/virt_timer.h \
817 + ../iodev/serial.h ../iodev/unmapped.h ../iodev/eth.h ../iodev/ne2k.h \
818 + ../iodev/guest2host.h ../iodev/slowdown_timer.h ../iodev/extfpuirq.h \
819 + ../instrument/stubs/instrument.h mydebug.h
821 +paging.o: paging.cc ../bochs.h ../config.h ../osdep.h ../bx_debug/debug.h \
822 + ../bxversion.h ../gui/siminterface.h ../state_file.h ../cpu/cpu.h \
823 + ../cpu/lazy_flags.h ../cpu/i387.h ../cpu/xmm.h ../memory/memory.h \
824 + ../pc_system.h ../plugin.h ../extplugin.h ../gui/gui.h \
825 + ../gui/textconfig.h ../gui/keymap.h ../iodev/iodev.h ../iodev/pci.h \
826 + ../iodev/pci2isa.h ../iodev/pcivga.h ../iodev/vga.h ../iodev/ioapic.h \
827 + ../iodev/biosdev.h ../iodev/cmos.h ../iodev/dma.h ../iodev/floppy.h \
828 + ../iodev/harddrv.h ../iodev/cdrom.h ../iodev/vmware3.h \
829 + ../iodev/keyboard.h ../iodev/parallel.h ../iodev/pic.h ../iodev/pit.h \
830 + ../iodev/pit_wrap.h ../iodev/pit82c54.h ../iodev/virt_timer.h \
831 + ../iodev/serial.h ../iodev/unmapped.h ../iodev/eth.h ../iodev/ne2k.h \
832 + ../iodev/guest2host.h ../iodev/slowdown_timer.h ../iodev/extfpuirq.h \
833 + ../instrument/stubs/instrument.h mydebug.h
835 +eraser.o: eraser.cc lockset.h eraser.h \
836 + ../bochs.h ../config.h ../osdep.h ../bx_debug/debug.h \
837 + ../bxversion.h ../gui/siminterface.h ../state_file.h ../cpu/cpu.h \
838 + ../cpu/lazy_flags.h ../cpu/i387.h ../cpu/xmm.h ../memory/memory.h \
839 + ../pc_system.h ../plugin.h ../extplugin.h ../gui/gui.h \
840 + ../gui/textconfig.h ../gui/keymap.h ../iodev/iodev.h ../iodev/pci.h \
841 + ../iodev/pci2isa.h ../iodev/pcivga.h ../iodev/vga.h ../iodev/ioapic.h \
842 + ../iodev/biosdev.h ../iodev/cmos.h ../iodev/dma.h ../iodev/floppy.h \
843 + ../iodev/harddrv.h ../iodev/cdrom.h ../iodev/vmware3.h \
844 + ../iodev/keyboard.h ../iodev/parallel.h ../iodev/pic.h ../iodev/pit.h \
845 + ../iodev/pit_wrap.h ../iodev/pit82c54.h ../iodev/virt_timer.h \
846 + ../iodev/serial.h ../iodev/unmapped.h ../iodev/eth.h ../iodev/ne2k.h \
847 + ../iodev/guest2host.h ../iodev/slowdown_timer.h ../iodev/extfpuirq.h \
848 + ../instrument/stubs/instrument.h mydebug.h
850 +silent_paging.o: silent_paging.cc \
851 + ../bochs.h ../config.h ../osdep.h ../bx_debug/debug.h \
852 + ../bxversion.h ../gui/siminterface.h ../state_file.h ../cpu/cpu.h \
853 + ../cpu/lazy_flags.h ../cpu/i387.h ../cpu/xmm.h ../memory/memory.h \
854 + ../pc_system.h ../plugin.h ../extplugin.h ../gui/gui.h \
855 + ../gui/textconfig.h ../gui/keymap.h ../iodev/iodev.h ../iodev/pci.h \
856 + ../iodev/pci2isa.h ../iodev/pcivga.h ../iodev/vga.h ../iodev/ioapic.h \
857 + ../iodev/biosdev.h ../iodev/cmos.h ../iodev/dma.h ../iodev/floppy.h \
858 + ../iodev/harddrv.h ../iodev/cdrom.h ../iodev/vmware3.h \
859 + ../iodev/keyboard.h ../iodev/parallel.h ../iodev/pic.h ../iodev/pit.h \
860 + ../iodev/pit_wrap.h ../iodev/pit82c54.h ../iodev/virt_timer.h \
861 + ../iodev/serial.h ../iodev/unmapped.h ../iodev/eth.h ../iodev/ne2k.h \
862 + ../iodev/guest2host.h ../iodev/slowdown_timer.h ../iodev/extfpuirq.h \
863 + ../instrument/stubs/instrument.h mydebug.h
865 +silent_access.o: silent_access.cc \
866 + ../bochs.h ../config.h ../osdep.h ../bx_debug/debug.h \
867 + ../bxversion.h ../gui/siminterface.h ../state_file.h ../cpu/cpu.h \
868 + ../cpu/lazy_flags.h ../cpu/i387.h ../cpu/xmm.h ../memory/memory.h \
869 + ../pc_system.h ../plugin.h ../extplugin.h ../gui/gui.h \
870 + ../gui/textconfig.h ../gui/keymap.h ../iodev/iodev.h ../iodev/pci.h \
871 + ../iodev/pci2isa.h ../iodev/pcivga.h ../iodev/vga.h ../iodev/ioapic.h \
872 + ../iodev/biosdev.h ../iodev/cmos.h ../iodev/dma.h ../iodev/floppy.h \
873 + ../iodev/harddrv.h ../iodev/cdrom.h ../iodev/vmware3.h \
874 + ../iodev/keyboard.h ../iodev/parallel.h ../iodev/pic.h ../iodev/pit.h \
875 + ../iodev/pit_wrap.h ../iodev/pit82c54.h ../iodev/virt_timer.h \
876 + ../iodev/serial.h ../iodev/unmapped.h ../iodev/eth.h ../iodev/ne2k.h \
877 + ../iodev/guest2host.h ../iodev/slowdown_timer.h ../iodev/extfpuirq.h \
878 + ../instrument/stubs/instrument.h mydebug.h
880 +globals.o: globals.cc \
881 + ../bochs.h ../config.h ../osdep.h ../bx_debug/debug.h \
882 + ../bxversion.h ../gui/siminterface.h ../state_file.h ../cpu/cpu.h \
883 + ../cpu/lazy_flags.h ../cpu/i387.h ../cpu/xmm.h ../memory/memory.h \
884 + ../pc_system.h ../plugin.h ../extplugin.h ../gui/gui.h \
885 + ../gui/textconfig.h ../gui/keymap.h ../iodev/iodev.h ../iodev/pci.h \
886 + ../iodev/pci2isa.h ../iodev/pcivga.h ../iodev/vga.h ../iodev/ioapic.h \
887 + ../iodev/biosdev.h ../iodev/cmos.h ../iodev/dma.h ../iodev/floppy.h \
888 + ../iodev/harddrv.h ../iodev/cdrom.h ../iodev/vmware3.h \
889 + ../iodev/keyboard.h ../iodev/parallel.h ../iodev/pic.h ../iodev/pit.h \
890 + ../iodev/pit_wrap.h ../iodev/pit82c54.h ../iodev/virt_timer.h \
891 + ../iodev/serial.h ../iodev/unmapped.h ../iodev/eth.h ../iodev/ne2k.h \
892 + ../iodev/guest2host.h ../iodev/slowdown_timer.h ../iodev/extfpuirq.h \
893 + ../instrument/stubs/instrument.h mydebug.h
895 +lockset.o: lockset.h lockset.cc hash.h mydebug.h
897 +hash.o: hash.h hash.cc
899 +list.o: list.h list.cc
900 diff -X ignore -urpNb bochs-2.1.1/taint/common.cc checkbochs-2.1.1/taint/common.cc
901 --- bochs-2.1.1/taint/common.cc 1969-12-31 16:00:00.000000000 -0800
902 +++ checkbochs-2.1.1/taint/common.cc 2005-07-19 15:33:53.000000000 -0700
904 +#define NEED_CPU_REG_SHORTCUTS 1
906 +#define LOG_THIS BX_CPU_THIS_PTR
908 +#include "taint/mydebug.h"
909 +#include "taint/globals.h"
913 +#define STACK_DEPTH 256
916 +#define this (BX_CPU(0))
919 +#define MASK(SHIFT, CNT) (((1ul << (CNT)) - 1) << (SHIFT))
921 +/* Page offset (bits 0:13). */
922 +#define PGSHIFT 0 /* Index of first offset bit. */
925 +#define PGBITS 13 /* Number of offset bits. */
930 +#define PGBITS 13 /* Number of offset bits. */
931 +#warning "Dont know whether compiling for Pintos or Linux. Assuming Linux (PGBITS=13)"
935 +#define PGMASK MASK(PGSHIFT, PGBITS) /* Page offset bits (0:12). */
936 +#define PGSIZE (1 << PGBITS) /* Bytes in a page. */
939 + Bit32u arr[STACK_DEPTH];
942 +bool savedRegsStackInitialized = false;
944 +void push(struct stack *s, Bit32u val) {
946 + if (s->top++==STACK_DEPTH) {
947 + DBG(ERR,("Stack Overflow. Exiting.."));
950 + s->arr[s->top-1] = val;
953 +Bit32u pop(struct stack *s) {
957 + DBG(ERR,("Stack Underflow. Exiting.."));
960 + ret = s->arr[s->top-1];
965 +Bit32u stack_init(struct stack *s) {
969 +void BX_CPU_C::TT_TaintSaveRegs(bxInstruction_c *i) {
970 + Bit32u opId = i->Id();
972 + if (!savedRegsStackInitialized) {
973 + stack_init(&savedRegsStack);
974 + savedRegsStackInitialized = true;
976 + //if (opId==999) mylog(D1,("%s %d: called with opId 999.\n",__func__,__LINE__));
977 + push(&savedRegsStack, EAX);
978 + push(&savedRegsStack, EBX);
979 + push(&savedRegsStack, ECX);
980 + push(&savedRegsStack, EDX);
981 + //DBG(L1,("pushing EAX=%x, EBX=%x, ECX=%x, EDX=%x.\n",EAX,EBX,ECX,EDX));
982 + //g_instruction_display_count = 10;
985 +void BX_CPU_C::TT_TaintRestoreRegs(bxInstruction_c *i) {
986 + assert(savedRegsStackInitialized);
987 + //mylog(D1,("%s %d: ECX=%x, EDX=%x\n",__func__,__LINE__,ECX,EDX));
988 + EDX = pop(&savedRegsStack);
989 + ECX = pop(&savedRegsStack);
990 + EBX = pop(&savedRegsStack);
991 + EAX = pop(&savedRegsStack);
992 + //DBG(L1,("popping EAX=%x, EBX=%x, ECX=%x, EDX=%x.\n",EAX,EBX,ECX,EDX));
996 +BX_CPU_C::thread_current(void) {
997 + unsigned kernelPL = 0;
999 + if (CPL==kernelPL) {
1000 + pid = ((ESP-1) - 0xc0000000) >> 12;
1007 +char *BX_CPU_C::backtrace(char *s) {
1009 + int stackdepth = 0, readsuccessful = 1;
1011 + //printf("EIP = %x.\n",EIP);
1012 + snprintf(s,16,"%x ",EIP);
1014 + //while (ebp>0xc0000000 && stackdepth<10 && readsuccessful) {
1015 + while (ebp>0xc0000000 && stackdepth<10) {
1016 + readsuccessful &= read_virtual_dword_silent(BX_SEG_REG_SS, ebp+4, &eip);
1017 + readsuccessful &= read_virtual_dword_silent(BX_SEG_REG_SS, ebp, &ebp);
1018 + snprintf(tmp,16,"%x ",eip);
1019 + s = strncat(s,tmp,16);
1026 +read_dword(Bit32u laddr)
1028 + Bit32u paddr = laddr - PHYS_BASE;
1029 + if (paddr <= BX_MEM(0)->len - 4) {
1031 + ReadHostDWordFromLittleEndian(BX_MEM(0)->vector + paddr, tmp);
1039 +read_2_dwords(Bit32u laddr, Bit32u *dst0, Bit32u *dst1)
1041 + Bit32u paddr = laddr - PHYS_BASE;
1042 + if (paddr <= BX_MEM(0)->len - 8) {
1043 + ReadHostDWordFromLittleEndian(BX_MEM(0)->vector + paddr, *dst0);
1044 + ReadHostDWordFromLittleEndian(BX_MEM(0)->vector + paddr + 4, *dst1);
1051 +void BX_CPU_C::backtrace_eips(Bit32u *eips, int n) {
1055 + while (ebp>0xc0000000 && --n > 0) {
1056 + read_2_dwords(ebp, &ebp, &eip);
1058 + eip = read_dword (ebp+4);
1059 + ebp = read_dword (ebp);
1068 +Bit32u BX_CPU_C::callingEIP(void) {
1070 + int stackdepth = 0, readsuccessful = 1;
1073 + readsuccessful = read_virtual_dword_silent(BX_SEG_REG_SS, ebp+4, &eip);
1074 + if (readsuccessful) return eip;
1078 +void checkbochs_log (const char *fmt, ...)
1085 + va_start (ap, fmt) ;
1086 + vfprintf (g_logfp, fmt, ap) ;
1089 diff -X ignore -urpNb bochs-2.1.1/taint/eraser.cc checkbochs-2.1.1/taint/eraser.cc
1090 --- bochs-2.1.1/taint/eraser.cc 1969-12-31 16:00:00.000000000 -0800
1091 +++ checkbochs-2.1.1/taint/eraser.cc 2005-07-19 21:10:55.000000000 -0700
1093 +#define NEED_CPU_REG_SHORTCUTS 1
1095 +#define LOG_THIS BX_CPU_THIS_PTR
1098 +#define this (BX_CPU(0))
1102 +#include "mydebug.h"
1103 +#include "taint_type.h"
1104 +#include "lockset.h"
1106 +#include "eraser.h"
1107 +#include "globals.h"
1108 +#include "mydebug.h"
1110 +/* Backtrace database file name. */
1111 +char btdb_fn[] = "/tmp/eraserXXXXXX";
1113 +/* Backtrace database. */
1116 +struct backtrace_key {
1123 + Bit32u thread; /* Thread ID. */
1124 + Bit32u eips[16]; /* Backtrace EIPs. */
1125 + Bit32u locks[16]; /* Locks held. */
1129 +get_key(Bit32u address, unsigned seq, struct backtrace_key *key)
1131 + key->laddr = address;
1136 +mk_datum(void *block, size_t length)
1139 + d.dptr = (char *) block;
1145 +record_backtrace(const char *type, Bit32u laddr)
1147 + lockset &tv = *BX_CPU(0)->access_linear_taint(laddr);
1148 + struct backtrace_key key;
1149 + struct backtrace bt;
1151 + /* Fill in backtrace struct. */
1152 + strncpy(bt.type, type, sizeof bt.type);
1153 + bt.type[sizeof bt.type - 1] = '\0';
1154 + bt.thread = BX_CPU(0)->thread_current();
1155 + BX_CPU(0)->backtrace_eips(bt.eips, sizeof bt.eips / sizeof *bt.eips);
1156 + lockset_dump(cur_held(bt.thread),
1157 + bt.locks, sizeof bt.locks / sizeof *bt.locks);
1159 + /* Store backtrace struct. */
1160 + get_key(laddr, tv.seq, &key);
1161 + if (gdbm_store(btdb, mk_datum(&key, sizeof key), mk_datum(&bt, sizeof bt),
1162 + GDBM_REPLACE) != 0) {
1163 + DBG(ERR, ("gdbm_store failed"));
1167 + /* Increment sequence number. */
1172 +dump_backtrace(Bit32u laddr, unsigned seq)
1174 + struct backtrace_key key;
1175 + struct backtrace *bt;
1178 + /* Fetch backtrace. */
1179 + get_key(laddr, seq, &key);
1180 + content = gdbm_fetch(btdb, mk_datum(&key, sizeof key));
1181 + if (content.dptr == NULL) {
1184 + bt = (backtrace *) content.dptr;
1187 + fprintf(g_logfp, "%s: thread %x, backtrace", bt->type, bt->thread);
1188 + for (unsigned i = 0; i < sizeof bt->eips / sizeof *bt->eips; i++) {
1189 + if (bt->eips[i] == 0)
1191 + fprintf(g_logfp, " %x", bt->eips[i]);
1193 + fprintf(g_logfp, ", locks");
1194 + for (unsigned i = 0; i < sizeof bt->locks / sizeof *bt->locks; i++) {
1195 + if (bt->locks[i] == 0)
1197 + fprintf(g_logfp, " %x", bt->locks[i]);
1199 + fprintf(g_logfp, "\n");
1210 +#define WRN_UNINIT(loc) do { \
1211 + /* DBG(WRN,("Thread %x: Read on uninitialized location %x, backtrace: %s\n",myid,loc, backtrace(btstr))); */ \
1214 +#define WRN_ERASER(myid,loc,tval) do { \
1215 + if (!already_warned(loc)) { \
1217 + DBG(WRN,("Thread %x: Warning on location %x, backtrace: %s\n",myid,loc, backtrace(btstr))); \
1222 +struct warn_table_entry {
1227 +static int warn_table_initialized = 0;
1228 +static struct hash warn_table ;
1230 +unsigned warn_hash (const hash_elem *e, void *aux)
1232 + struct warn_table_entry *h = hash_entry (e, struct warn_table_entry, h_elem);
1237 +warn_less (const hash_elem *a_, const hash_elem *b_,
1240 + struct warn_table_entry *a = hash_entry (a_, struct warn_table_entry, h_elem);
1241 + struct warn_table_entry *b = hash_entry (b_, struct warn_table_entry, h_elem);
1242 + return (a->loc < b->loc);
1246 +already_warned(unsigned loc) {
1247 + struct warn_table_entry tmp;
1248 + hash_elem *h_element;
1249 + if (!warn_table_initialized) return 0;
1251 + h_element = hash_find(&warn_table, &tmp.h_elem);
1252 + if (h_element) return 1;
1257 +warn(unsigned loc) {
1258 + struct warn_table_entry *tmp = (struct warn_table_entry*)malloc(sizeof(struct warn_table_entry));
1259 + if (!warn_table_initialized) {
1260 + hash_init(&warn_table, warn_hash, warn_less, NULL);
1261 + warn_table_initialized = 1;
1264 + hash_insert(&warn_table, &tmp->h_elem);
1268 +void BX_CPU_C::TT_Lock(bxInstruction_c *i) {
1269 + Bit32u opId = i->Id();
1270 + if (opId!=ERASER_ID) return;
1272 + Bit32u myid = (BX_CPU_THIS_PTR thread_current())&0x3fffffff;
1273 + DBG(LOCKS,("%x: acquiring lock %x. backtrace: %s\n",myid,ECX,backtrace(btstr)));
1275 + //lockset_t lset = add_lock(cur_held(myid),ECX);
1276 + //update_lockset(myid,lset);
1279 +void BX_CPU_C::TT_Unlock(bxInstruction_c *i) {
1280 + Bit32u opId = i->Id();
1281 + if (opId!=ERASER_ID) return;
1283 + Bit32u myid = (BX_CPU_THIS_PTR thread_current())&0x3fffffff;
1284 + DBG(LOCKS,("%x: releasing lock %x. backtrace: %s\n",myid,ECX,backtrace(btstr)));
1285 + eraser_unlock(ECX);
1286 + //Bit32u myid = (BX_CPU_THIS_PTR thread_current())&0x3fffffff;
1287 + //lockset_t lset = remove_lock(cur_held(myid),ECX);
1288 + //update_lockset(myid,lset);
1291 +void BX_CPU_C::eraser_access_linear(bx_address laddr, unsigned len, unsigned pl, unsigned rw, void *notused)
1293 + Bit32u tid = BX_CPU_THIS_PTR thread_current ();
1294 + if (ignore_on (tid))
1297 + // Acquire a dummy lock for disabled interrupts.
1298 + if (!BX_CPU_THIS_PTR get_IF())
1299 + eraser_lock(INTERRUPT_LOCK);
1301 + // Acquire a dummy lock for h/w prefix "LOCK".
1302 + if (BX_CPU_THIS_PTR curInstruction->isLocked())
1303 + eraser_lock(HW_PREFIX_LOCK);
1305 + for (; len-- > 0; laddr++) {
1306 + lockset *tvp = access_linear_taint(laddr);
1309 + lockset &tv = *tvp;
1310 + lockset orig_tv = tv;
1312 + switch (tv.state) {
1314 + if (rw==BX_WRITE || rw==BX_RW) {
1315 + tv.state = S_EXCLUSIVE;
1318 + record_backtrace("V->E", laddr);
1320 + //WRN_UNINIT(laddr) ;
1325 + if (tv.value == tid) {
1326 + // Still exclusive to same thread.
1328 + tv.value = cur_held (tid);
1329 + if (rw==BX_WRITE || rw==BX_RW) {
1330 + tv.state = S_SH_MOD;
1331 + record_backtrace("E->SM", laddr);
1333 + tv.state = S_SHARED;
1334 + record_backtrace("E->S", laddr);
1340 + tv.value = intersect_locksets (tv.value, cur_held (tid));
1341 + if (rw==BX_WRITE || rw==BX_RW) {
1342 + tv.state = S_SH_MOD;
1343 + record_backtrace("S->SM", laddr);
1344 + } else if (tv.value != orig_tv.value) {
1345 + record_backtrace("S", laddr);
1350 + tv.value = intersect_locksets (tv.value, cur_held (tid));
1351 + if (tv.value != orig_tv.value) {
1352 + record_backtrace("SM", laddr);
1356 + /* Warn if needed */
1357 + if (tv.state == S_SH_MOD
1358 + && tv.value == LOCKSET_EMPTY
1359 + && !already_warned(laddr)) {
1364 + fprintf(g_logfp, "Warning on location %x:\n", laddr);
1366 + for (unsigned i = 0; i < tv.seq; i++)
1367 + dump_backtrace(laddr, i);
1369 + fprintf(g_logfp, "\n");
1373 + // Release interrupt lock.
1374 + if (!BX_CPU_THIS_PTR get_IF())
1375 + eraser_unlock(INTERRUPT_LOCK);
1377 + // Release LOCK prefix lock.
1378 + if (BX_CPU_THIS_PTR curInstruction->isLocked())
1379 + eraser_unlock(HW_PREFIX_LOCK);
1382 +void BX_CPU_C::TT_CommonOps(bxInstruction_c *i) {
1383 + Bit32u opId = i->Id();
1384 + Bit32u myid = (BX_CPU_THIS_PTR thread_current())&0x3fffffff;
1385 + if (opId!=ERASER_ID) return;
1387 + if (EAX==IGNORE_OP) {
1388 + for (int i=0;i<ECX;i++) warn(EDX+i);
1389 + DBG(L1,("ignoring location %x (%d).\n",EDX,ECX));
1391 + else if (EAX==LOCKINIT_OP) {
1394 + else if (EAX==IGNOREON_OP) {
1395 + if (global_startup_ignore) {
1398 + set_ignore(myid,true);
1399 + DBG(L1,("setting ignore on for thread %x. backtrace: %s\n",myid,backtrace(btstr)));
1401 + else if (EAX==IGNOREOFF_OP) {
1402 + if (global_startup_ignore) {
1405 + set_ignore(myid,false);
1406 + DBG(L1,("setting ignore off for thread %x. backtrace: %s\n",myid,backtrace(btstr)));
1408 + else if (EAX==REUSE_OP) {
1409 + int pl = 0; //kernel privileges
1410 + DBG(L1,("%x: reusing location %x (%d). ESP=%x\n",myid,EDX,ECX,ESP));
1411 + for (int i=0;i<ECX;i++) {
1412 + lockset *tv = access_linear_taint(EDX+i);
1414 + tv->state = S_VIRGIN;
1416 + } else if (EAX==DBG_MARK_OP) {
1417 + Bit32u taintval = 0;
1418 + char str[MAX_STRLEN];
1419 + int pl = 0; //kernel privileges
1422 + ret = access_linear_silent(EDX+i,1,pl,BX_READ,&str[i]) ;
1423 + } while (ret && str[i] && ++i<MAX_STRLEN);
1425 + DBG(L1,("%x: dbg mark at %s:%d. EIP=%x\n",myid,str,ECX,EIP));
1426 + } else if (EAX==GLOBAL_STARTUP_IGNOREOFF_OP) {
1427 + assert (global_startup_ignore) ;
1428 + global_startup_ignore = false ;
1429 + DBG(L1,("%x: setting global_startup_ignore to off\n",myid));
1433 +void BX_CPU_C::eraser_init_globals(void) {
1434 + g_logfp = fopen (g_logfn, "w") ;
1435 + if (g_logfp==NULL) {
1436 + DBG (ERR, ("%s(): Error opening checkbochs log %s for writing.\n",__func__,g_logfn)) ;
1439 + /* Create temporary file and set name into btdb_fn. */
1440 + int old_umask = umask(0077);
1441 + int fd = mkstemp(btdb_fn);
1443 + DBG(ERR, ("mkstemp: Couldn't create temp file: %s.\n", strerror(errno)));
1449 + /* Create database. */
1450 + btdb = gdbm_open(btdb_fn, 0, GDBM_NEWDB | GDBM_NOLOCK, 0600, NULL);
1451 + if (btdb == NULL) {
1452 + DBG(ERR, ("gdbm_open: Couldn't create backtrace database: %s.\n",
1453 + gdbm_strerror(gdbm_errno)));
1459 +void BX_CPU_C::eraser_done_globals(void)
1461 + if (global_startup_ignore) {
1463 + "WARNING: Eraser never enabled. Did you really apply "
1464 + "pintos/src/misc/checkbochs.patch?\n");
1468 diff -X ignore -urpNb bochs-2.1.1/taint/eraser.h checkbochs-2.1.1/taint/eraser.h
1469 --- bochs-2.1.1/taint/eraser.h 1969-12-31 16:00:00.000000000 -0800
1470 +++ checkbochs-2.1.1/taint/eraser.h 2005-07-19 13:59:02.000000000 -0700
1475 +#define IGNORE_OP 0
1476 +#define LOCKINIT_OP 1
1477 +#define IGNOREON_OP 2
1478 +#define IGNOREOFF_OP 3
1480 +#define DBG_MARK_OP 5
1481 +#define GLOBAL_STARTUP_IGNOREOFF_OP 6
1483 +#define eraser_lock(x) do { \
1484 + Bit32u myid = (BX_CPU_THIS_PTR thread_current())&0x3fffffff; \
1485 + lockset_t lset = add_lock(cur_held(myid),x); \
1486 + update_lockset(myid,lset); \
1489 +#define eraser_unlock(x) do { \
1490 + Bit32u myid = (BX_CPU_THIS_PTR thread_current())&0x3fffffff; \
1491 + lockset_t lset = remove_lock(cur_held(myid),x); \
1492 + update_lockset(myid,lset); \
1495 +#define eraser_init(x) do { \
1496 + Bit32u myid = (BX_CPU_THIS_PTR thread_current())&0x3fffffff; \
1497 + lockset_t mylocks = cur_held(myid); \
1499 + if (belongs(mylocks,x)) { \
1500 + lset = remove_lock(mylocks,x); \
1501 + update_lockset(myid,lset); \
1505 +#define INTERRUPT_LOCK 0xffff0001
1506 +#define HW_PREFIX_LOCK 0xffff0002
1509 diff -X ignore -urpNb bochs-2.1.1/taint/globals.cc checkbochs-2.1.1/taint/globals.cc
1510 --- bochs-2.1.1/taint/globals.cc 1969-12-31 16:00:00.000000000 -0800
1511 +++ checkbochs-2.1.1/taint/globals.cc 2005-07-02 17:25:48.000000000 -0700
1513 +#define NEED_CPU_REG_SHORTCUTS 1
1515 +#define LOG_THIS BX_CPU_THIS_PTR
1517 +#include "taint/globals.h"
1519 +void (*g_access_linear_fptr)(bx_address laddr, unsigned length, unsigned pl, unsigned rw, void *taint_value) = NULL;
1521 +char btstr[512]; //a global string to print the backtrace
1522 +int disassemble_num = 0;
1523 +bool global_startup_ignore = true ;
1524 +FILE *g_logfp = NULL ;
1525 +char g_logfn [128] = "checkbochs.log" ;
1526 diff -X ignore -urpNb bochs-2.1.1/taint/globals.h checkbochs-2.1.1/taint/globals.h
1527 --- bochs-2.1.1/taint/globals.h 1969-12-31 16:00:00.000000000 -0800
1528 +++ checkbochs-2.1.1/taint/globals.h 2005-07-19 12:09:53.000000000 -0700
1530 +#ifndef __GLOBALS_H
1531 +#define __GLOBALS_H
1533 +#define PHYS_BASE 0xc0000000
1535 +#define MAX_STRLEN 128
1537 +extern void (*g_access_linear_fptr)(bx_address laddr, unsigned length, unsigned pl, unsigned rw, void *taint_value);
1538 +extern char btstr[512]; //a global string to print the backtrace
1540 +extern bool global_startup_ignore ;
1541 +extern int disassemble_num ;
1543 +extern FILE *g_logfp ;
1544 +extern char g_logfn[128] ;
1547 diff -X ignore -urpNb bochs-2.1.1/taint/hash.cc checkbochs-2.1.1/taint/hash.cc
1548 --- bochs-2.1.1/taint/hash.cc 1969-12-31 16:00:00.000000000 -0800
1549 +++ checkbochs-2.1.1/taint/hash.cc 2005-07-02 17:25:48.000000000 -0700
1551 +#include <stdlib.h>
1553 +#include <assert.h>
1556 +static struct list *find_bucket (struct hash *, hash_elem *);
1557 +static struct list_elem *find_elem (struct hash *, struct list *, hash_elem *);
1558 +static void insert_elem (struct hash *, struct list *, hash_elem *);
1559 +static void remove_elem (struct hash *, hash_elem *);
1560 +static void rehash (struct hash *);
1562 +/* Initializes hash table H to compute hash values using HASH and
1563 + compare hash elements using LESS, given auxiliary data AUX.
1564 + this function can sleep on malloc. hence, CANNOT be called from thread_init.
1567 +hash_init (struct hash *h,
1568 + hash_hash_func *hash, hash_less_func *less, void *aux)
1571 + h->bucket_cnt = 4;
1572 + h->buckets = (list *)(malloc (sizeof *h->buckets * h->bucket_cnt));
1577 + if (h->buckets != NULL)
1586 +/* Removes all the elements from H. */
1588 +hash_clear (struct hash *h)
1592 + for (i = 0; i < h->bucket_cnt; i++)
1593 + list_init (&h->buckets[i]);
1597 +/* Destroys hash table H. */
1599 +hash_destroy (struct hash *h)
1601 + free (h->buckets);
1604 +/* Inserts NEW into hash table H and returns a null pointer, if
1605 + no equal element is already in the table.
1606 + If an equal element is already in the table, returns it
1607 + without inserting NEW. */
1609 +hash_insert (struct hash *h, hash_elem *newelem)
1611 + struct list *bucket = find_bucket (h, newelem);
1612 + struct list_elem *old = find_elem (h, bucket, newelem);
1615 + insert_elem (h, bucket, newelem);
1622 +/* Inserts NEW into hash table H, replacing any equal element
1623 + already in the table, which is returned. */
1625 +hash_replace (struct hash *h, hash_elem *newelem)
1627 + struct list *bucket = find_bucket (h, newelem);
1628 + struct list_elem *old = find_elem (h, bucket, newelem);
1631 + remove_elem (h, old);
1632 + insert_elem (h, bucket, newelem);
1639 +/* Finds and returns an element equal to E in hash table H, or a
1640 + null pointer if no equal element exists in the table. */
1642 +hash_find (struct hash *h, hash_elem *e)
1644 + return find_elem (h, find_bucket (h, e), e);
1647 +/* Finds, removes, and returns an element equal to E in hash
1648 + table H. Returns a null pointer if no equal element existed
1651 +hash_delete (struct hash *h, hash_elem *e)
1653 + struct list_elem *found = find_elem (h, find_bucket (h, e), e);
1654 + if (found != NULL)
1656 + remove_elem (h, found);
1662 +/* Initializes I for iterating hash table H.
1666 + struct hash_iterator i;
1668 + hash_first (&i, h);
1669 + while (hash_next (&i))
1671 + struct foo *f = hash_entry (hash_cur (&i), struct foo, elem);
1672 + ...do something with f...
1675 + NOTE: Modifying a hash table during iteration invalidates all
1679 +hash_first (struct hash_iterator *i, struct hash *h)
1681 + assert (i != NULL);
1682 + assert (h != NULL);
1685 + i->bucket = i->hash->buckets;
1686 + i->elem = list_head (i->bucket);
1689 +/* Advances I to the next element in the hash table and returns
1690 + it. Returns a null pointer if no elements are left. Elements
1691 + are returned in arbitrary order.
1693 + NOTE: Modifying a hash table during iteration invalidates all
1696 +hash_next (struct hash_iterator *i)
1698 + assert (i != NULL);
1700 + i->elem = list_next (i->elem);
1701 + while (i->elem == list_end (i->bucket))
1703 + if (++i->bucket >= i->hash->buckets + i->hash->bucket_cnt)
1708 + i->elem = list_begin (i->bucket);
1714 +/* Returns the current element in the hash table iteration, or a
1715 + null pointer at the end of the table. Undefined behavior
1716 + after calling hash_first() but before hash_next(). */
1718 +hash_cur (struct hash_iterator *i)
1723 +/* Returns the number of elements in H. */
1725 +hash_size (struct hash *h)
1727 + return h->elem_cnt;
1730 +/* Returns true if H contains no elements, false otherwise. */
1732 +hash_empty (struct hash *h)
1734 + return h->elem_cnt == 0;
1737 +/* Fowler-Noll-Vo hash constants, for 32-bit word sizes. */
1738 +#define FNV_32_PRIME 16777619u
1739 +#define FNV_32_BASIS 2166136261u
1741 +/* Returns a hash of the SIZE bytes in BUF. */
1743 +hash_bytes (const void *buf_, size_t size)
1745 + /* Fowler-Noll-Vo 32-bit hash, for bytes. */
1746 + const unsigned char *buf = (unsigned char *)buf_;
1749 + assert (buf != NULL);
1751 + hash = FNV_32_BASIS;
1752 + while (size-- > 0)
1753 + hash = (hash * FNV_32_PRIME) ^ *buf++;
1758 +/* Returns a hash of string S. */
1760 +hash_string (const char *s_)
1762 + const unsigned char *s = (unsigned char *)s_;
1765 + assert (s != NULL);
1767 + hash = FNV_32_BASIS;
1768 + while (*s != '\0')
1769 + hash = (hash * FNV_32_PRIME) ^ *s++;
1774 +/* Returns a hash of integer I. */
1778 + return hash_bytes (&i, sizeof i);
1781 +/* Returns the bucket in H that E belongs in. */
1782 +static struct list *
1783 +find_bucket (struct hash *h, hash_elem *e)
1785 + size_t bucket_idx = h->hash (e, h->aux) & (h->bucket_cnt - 1);
1786 + return &h->buckets[bucket_idx];
1789 +/* Searches BUCKET in H for a hash element equal to E. Returns
1790 + it if found or a null pointer otherwise. */
1791 +static struct list_elem *
1792 +find_elem (struct hash *h, struct list *bucket, hash_elem *e)
1794 + struct list_elem *i;
1796 + for (i = list_begin (bucket); i != list_end (bucket); i = list_next (i))
1797 + if (!h->less (i, e, h->aux) && !h->less (e, i, h->aux))
1802 +/* Returns X with its lowest-order bit set to 1 turned off. */
1803 +static inline size_t
1804 +turn_off_least_1bit (size_t x)
1806 + return x & (x - 1);
1809 +/* Returns true if X is a power of 2, otherwise false. */
1810 +static inline size_t
1811 +is_power_of_2 (size_t x)
1813 + return x != 0 && turn_off_least_1bit (x) == 0;
1816 +/* Element per bucket ratios. */
1817 +#define MIN_ELEMS_PER_BUCKET 1 /* Elems/bucket < 1: reduce # of buckets. */
1818 +#define BEST_ELEMS_PER_BUCKET 2 /* Ideal elems/bucket. */
1819 +#define MAX_ELEMS_PER_BUCKET 4 /* Elems/bucket > 4: increase # of buckets. */
1821 +/* Changes the number of buckets in hash table H to match the
1822 + ideal. This function can fail because of an out-of-memory
1823 + condition, but that'll just make hash accesses less efficient;
1824 + we can still continue. */
1826 +rehash (struct hash *h)
1828 + size_t old_bucket_cnt, new_bucket_cnt;
1829 + struct list *new_buckets, *old_buckets;
1832 + assert (h != NULL);
1834 + /* Save old bucket info for later use. */
1835 + old_buckets = h->buckets;
1836 + old_bucket_cnt = h->bucket_cnt;
1838 + /* Calculate the number of buckets to use now.
1839 + We want one bucket for about every BEST_ELEMS_PER_BUCKET.
1840 + We must have at least four buckets, and the number of
1841 + buckets must be a power of 2. */
1842 + new_bucket_cnt = h->elem_cnt / BEST_ELEMS_PER_BUCKET;
1843 + if (new_bucket_cnt < 4)
1844 + new_bucket_cnt = 4;
1845 + while (!is_power_of_2 (new_bucket_cnt))
1846 + new_bucket_cnt = turn_off_least_1bit (new_bucket_cnt);
1848 + /* Don't do anything if the bucket count wouldn't change. */
1849 + if (new_bucket_cnt == old_bucket_cnt)
1852 + /* Allocate new buckets and initialize them as empty. */
1853 + new_buckets = (struct list *)malloc (sizeof *new_buckets * new_bucket_cnt);
1854 + if (new_buckets == NULL)
1856 + /* Allocation failed. This means that use of the hash table will
1857 + be less efficient. However, it is still usable, so
1858 + there's no reason for it to be an error. */
1861 + for (i = 0; i < new_bucket_cnt; i++)
1862 + list_init (&new_buckets[i]);
1864 + /* Install new bucket info. */
1865 + h->buckets = new_buckets;
1866 + h->bucket_cnt = new_bucket_cnt;
1868 + /* Move each old element into the appropriate new bucket. */
1869 + for (i = 0; i < old_bucket_cnt; i++)
1871 + struct list *old_bucket;
1872 + struct list_elem *elem, *next;
1874 + old_bucket = &old_buckets[i];
1875 + for (elem = list_begin (old_bucket);
1876 + elem != list_end (old_bucket); elem = next)
1878 + struct list *new_bucket = find_bucket (h, elem);
1879 + next = list_next (elem);
1880 + list_remove (elem);
1881 + list_push_front (new_bucket, elem);
1885 + free (old_buckets);
1888 +/* Inserts E into BUCKET (in hash table H). */
1890 +insert_elem (struct hash *h, struct list *bucket, hash_elem *e)
1893 + list_push_front (bucket, e);
1896 +/* Removes E from hash table H. */
1898 +remove_elem (struct hash *h, hash_elem *e)
1904 diff -X ignore -urpNb bochs-2.1.1/taint/hash.h checkbochs-2.1.1/taint/hash.h
1905 --- bochs-2.1.1/taint/hash.h 1969-12-31 16:00:00.000000000 -0800
1906 +++ checkbochs-2.1.1/taint/hash.h 2005-07-02 17:25:48.000000000 -0700
1913 + This is a standard hash table with chaining. To locate an
1914 + element in the table, we compute a hash function over the
1915 + element's data and use that as an index into an array of
1916 + doubly linked lists, then linearly search the list.
1918 + The chain lists do not use dynamic allocation. Instead, each
1919 + structure that can potentially be in a hash must embed a
1920 + hash_elem member. All of the hash functions operate on these
1921 + `hash_elem's. The hash_entry macro allows conversion from a
1922 + hash_elem back to a structure object that contains it. This
1923 + is the same technique used in the linked list implementation.
1924 + Refer to lib/kernel/list.h for a detailed explanation.
1926 + The FAQ for the VM project contains a detailed example of how
1927 + to use the hash table. */
1929 +#include <stdbool.h>
1930 +#include <stddef.h>
1931 +#include <inttypes.h>
1934 +/* Hash element. */
1935 +typedef list_elem hash_elem;
1937 +/* Converts pointer to hash element HASH_ELEM into a pointer to
1938 + the structure that HASH_ELEM is embedded inside. Supply the
1939 + name of the outer structure STRUCT and the member name MEMBER
1940 + of the hash element. See the big comment at the top of the
1941 + file for an example. */
1942 +#define hash_entry(HASH_ELEM, STRUCT, MEMBER) \
1943 + ((STRUCT *) ((uint8_t *) (HASH_ELEM) - offsetof (STRUCT, MEMBER)))
1945 +/* Computes and returns the hash value for hash element E, given
1946 + auxiliary data AUX. */
1947 +typedef unsigned hash_hash_func (const hash_elem *e, void *aux);
1949 +/* Compares the value of two hash elements A and B, given
1950 + auxiliary data AUX. Returns true if A is less than B, or
1951 + false if A is greater than or equal to B. */
1952 +typedef bool hash_less_func (const hash_elem *a, const hash_elem *b,
1958 + size_t elem_cnt; /* Number of elements in table. */
1959 + size_t bucket_cnt; /* Number of buckets, a power of 2. */
1960 + struct list *buckets; /* Array of `bucket_cnt' lists. */
1961 + hash_hash_func *hash; /* Hash function. */
1962 + hash_less_func *less; /* Comparison function. */
1963 + void *aux; /* Auxiliary data for `hash' and `less'. */
1966 +/* A hash table iterator. */
1967 +struct hash_iterator
1969 + struct hash *hash; /* The hash table. */
1970 + struct list *bucket; /* Current bucket. */
1971 + hash_elem *elem; /* Current hash element in current bucket. */
1974 +/* Basic life cycle. */
1975 +bool hash_init (struct hash *, hash_hash_func *, hash_less_func *, void *aux);
1976 +void hash_clear (struct hash *);
1977 +void hash_destroy (struct hash *);
1979 +/* Search, insertion, deletion. */
1980 +hash_elem *hash_insert (struct hash *, hash_elem *);
1981 +hash_elem *hash_replace (struct hash *, hash_elem *);
1982 +hash_elem *hash_find (struct hash *, hash_elem *);
1983 +hash_elem *hash_delete (struct hash *, hash_elem *);
1986 +void hash_first (struct hash_iterator *, struct hash *);
1987 +hash_elem *hash_next (struct hash_iterator *);
1988 +hash_elem *hash_cur (struct hash_iterator *);
1991 +size_t hash_size (struct hash *);
1992 +bool hash_empty (struct hash *);
1994 +/* Sample hash functions. */
1995 +unsigned hash_bytes (const void *, size_t);
1996 +unsigned hash_string (const char *);
1997 +unsigned hash_int (int);
1999 +#endif /* lib/kernel/hash.h */
2000 diff -X ignore -urpNb bochs-2.1.1/taint/list.cc checkbochs-2.1.1/taint/list.cc
2001 --- bochs-2.1.1/taint/list.cc 1969-12-31 16:00:00.000000000 -0800
2002 +++ checkbochs-2.1.1/taint/list.cc 2005-07-02 17:25:48.000000000 -0700
2004 +#include <stdlib.h>
2005 +#include <assert.h>
2008 +/* Our doubly linked lists have two header elements: the "head"
2009 + just before the first element and the "tail" just after the
2010 + last element. The `prev' link of the front header is null, as
2011 + is the `next' link of the back header. Their other two links
2012 + point toward each other via the interior elements of the list.
2014 + An empty list looks like this:
2017 + <---| head |<--->| tail |--->
2020 + A list with two elements in it looks like this:
2022 + +------+ +-------+ +-------+ +------+
2023 + <---| head |<--->| 1 |<--->| 2 |<--->| tail |<--->
2024 + +------+ +-------+ +-------+ +------+
2026 + The symmetry of this arrangement eliminates lots of special
2027 + cases in list processing. For example, take a look at
2028 + list_remove(): it takes only two pointer assignments and no
2029 + conditionals. That's a lot simpler than the code would be
2030 + without header elements.
2032 + (Because only one of the pointers in each header element is used,
2033 + we could in fact combine them into a single header element
2034 + without sacrificing this simplicity. But using two separate
2035 + elements allows us to do a little bit of checking on some
2036 + operations, which can be valuable.) */
2038 +/* Returns true if ELEM is a head, false otherwise. */
2040 +is_head (list_elem *elem)
2042 + return elem != NULL && elem->prev == NULL && elem->next != NULL;
2045 +/* Returns true if ELEM is an interior element,
2046 + false otherwise. */
2048 +is_interior (list_elem *elem)
2050 + return elem != NULL && elem->prev != NULL && elem->next != NULL;
2053 +/* Returns true if ELEM is a tail, false otherwise. */
2055 +is_tail (list_elem *elem)
2057 + return elem != NULL && elem->prev != NULL && elem->next == NULL;
2060 +/* Initializes LIST as an empty list. */
2062 +list_init (struct list *list)
2064 + assert (list != NULL);
2065 + list->head.prev = NULL;
2066 + list->head.next = &list->tail;
2067 + list->tail.prev = &list->head;
2068 + list->tail.next = NULL;
2071 +/* Returns the beginning of LIST. */
2073 +list_begin (struct list *list)
2075 + assert (list != NULL);
2076 + return list->head.next;
2079 +/* Returns the element after ELEM in its list. If ELEM is the
2080 + last element in its list, returns the list tail. Results are
2081 + undefined if ELEM is itself a list tail. */
2083 +list_next (list_elem *elem)
2085 + assert (is_head (elem) || is_interior (elem));
2086 + return elem->next;
2089 +/* Returns LIST's tail.
2091 + list_end() is often used in iterating through a list from
2092 + front to back. See the big comment at the top of list.h for
2095 +list_end (struct list *list)
2097 + assert (list != NULL);
2098 + return &list->tail;
2101 +/* Returns the LIST's reverse beginning, for iterating through
2102 + LIST in reverse order, from back to front. */
2104 +list_rbegin (struct list *list)
2106 + assert (list != NULL);
2107 + return list->tail.prev;
2110 +/* Returns the element before ELEM in its list. If ELEM is the
2111 + first element in its list, returns the list head. Results are
2112 + undefined if ELEM is itself a list head. */
2114 +list_prev (list_elem *elem)
2116 + assert (is_interior (elem) || is_tail (elem));
2117 + return elem->prev;
2120 +/* Returns LIST's head.
2122 + list_rend() is often used in iterating through a list in
2123 + reverse order, from back to front. Here's typical usage,
2124 + following the example from the top of list.h:
2126 + for (e = list_rbegin (&foo_list); e != list_rend (&foo_list);
2127 + e = list_prev (e))
2129 + struct foo *f = list_entry (e, struct foo, elem);
2130 + ...do something with f...
2134 +list_rend (struct list *list)
2136 + assert (list != NULL);
2137 + return &list->head;
2140 +/* Return's LIST's head.
2142 + list_head() can be used for an alternate style of iterating
2143 + through a list, e.g.:
2145 + e = list_head (&list);
2146 + while ((e = list_next (e)) != list_end (&list))
2152 +list_head (struct list *list)
2154 + assert (list != NULL);
2155 + return &list->head;
2158 +/* Return's LIST's tail. */
2160 +list_tail (struct list *list)
2162 + assert (list != NULL);
2163 + return &list->tail;
2166 +/* Inserts ELEM just before BEFORE, which may be either an
2167 + interior element or a tail. The latter case is equivalent to
2168 + list_push_back(). */
2170 +list_insert (list_elem *before, list_elem *elem)
2172 + assert (is_interior (before) || is_tail (before));
2173 + assert (elem != NULL);
2175 + elem->prev = before->prev;
2176 + elem->next = before;
2177 + before->prev->next = elem;
2178 + before->prev = elem;
2181 +/* Removes elements FIRST though LAST (exclusive) from their
2182 + current list, then inserts them just before BEFORE, which may
2183 + be either an interior element or a tail. */
2185 +list_splice (list_elem *before,
2186 + list_elem *first, list_elem *last)
2188 + assert (is_interior (before) || is_tail (before));
2189 + if (first == last)
2191 + last = list_prev (last);
2193 + assert (is_interior (first));
2194 + assert (is_interior (last));
2196 + /* Cleanly remove FIRST...LAST from its current list. */
2197 + first->prev->next = last->next;
2198 + last->next->prev = first->prev;
2200 + /* Splice FIRST...LAST into new list. */
2201 + first->prev = before->prev;
2202 + last->next = before;
2203 + before->prev->next = first;
2204 + before->prev = last;
2207 +/* Inserts ELEM at the beginning of LIST, so that it becomes the
2210 +list_push_front (struct list *list, list_elem *elem)
2212 + list_insert (list_begin (list), elem);
2215 +/* Inserts ELEM at the end of LIST, so that it becomes the
2218 +list_push_back (struct list *list, list_elem *elem)
2220 + list_insert (list_end (list), elem);
2223 +/* Removes ELEM from its list and returns the element that
2224 + followed it. Undefined behavior if ELEM is not in a list. */
2226 +list_remove (list_elem *elem)
2228 + assert (is_interior (elem));
2229 + elem->prev->next = elem->next;
2230 + elem->next->prev = elem->prev;
2231 + return elem->next;
2234 +/* Removes the front element from LIST and returns it.
2235 + Undefined behavior if LIST is empty before removal. */
2237 +list_pop_front (struct list *list)
2239 + list_elem *front = list_front (list);
2240 + list_remove (front);
2244 +/* Removes the back element from LIST and returns it.
2245 + Undefined behavior if LIST is empty before removal. */
2247 +list_pop_back (struct list *list)
2249 + list_elem *back = list_back (list);
2250 + list_remove (back);
2254 +/* Returns the front element in LIST.
2255 + Undefined behavior if LIST is empty. */
2257 +list_front (struct list *list)
2259 + assert (!list_empty (list));
2260 + return list->head.next;
2263 +/* Returns the back element in LIST.
2264 + Undefined behavior if LIST is empty. */
2266 +list_back (struct list *list)
2268 + assert (!list_empty (list));
2269 + return list->tail.prev;
2272 +/* Returns the number of elements in LIST.
2273 + Runs in O(n) in the number of elements. */
2275 +list_size (struct list *list)
2280 + for (e = list_begin (list); e != list_end (list); e = list_next (e))
2285 +/* Returns true if LIST is empty, false otherwise. */
2287 +list_empty (struct list *list)
2289 + return list_begin (list) == list_end (list);
2292 +/* Swaps the `list_elem *'s that A and B point to. */
2294 +swap (list_elem **a, list_elem **b)
2296 + list_elem *t = *a;
2301 +/* Reverses the order of LIST. */
2303 +list_reverse (struct list *list)
2305 + if (!list_empty (list))
2309 + for (e = list_begin (list); e != list_end (list); e = e->prev)
2310 + swap (&e->prev, &e->next);
2311 + swap (&list->head.next, &list->tail.prev);
2312 + swap (&list->head.next->prev, &list->tail.prev->next);
2316 +/* Merges lists AL and BL, which must each be sorted according to
2317 + LESS given auxiliary data AUX, by inserting each element of BL
2318 + at the proper place in AL to preserve the ordering.
2319 + Runs in O(n) in the combined length of AL and BL. */
2321 +list_merge (struct list *al, struct list *bl,
2322 + list_less_func *less, void *aux)
2326 + assert (al != NULL);
2327 + assert (bl != NULL);
2328 + assert (less != NULL);
2330 + a = list_begin (al);
2331 + while (a != list_end (al))
2333 + list_elem *b = list_begin (bl);
2334 + if (less (b, a, aux))
2336 + list_splice (a, b, list_next (b));
2337 + if (list_empty (bl))
2341 + a = list_next (a);
2343 + list_splice (list_end (al), list_begin (bl), list_end (bl));
2346 +/* Returns the middle element in LIST, that is, the N/2'th
2347 + element (rounding down) in a N-element list.
2348 + Given an empty list, returns the list tail. */
2350 +middle_of_list (struct list *list)
2352 + list_elem *middle, *last;
2354 + middle = last = list_begin (list);
2355 + while (last != list_end (list) && list_next (last) != list_end (list))
2357 + middle = list_next (middle);
2358 + last = list_next (list_next (last));
2363 +/* Sorts LIST according to LESS given auxiliary data AUX.
2364 + Runs in O(n lg n) time in the number of elements in LIST. */
2366 +list_sort (struct list *list,
2367 + list_less_func *less, void *aux)
2369 + /* Find the middle of the list. */
2370 + list_elem *middle = middle_of_list (list);
2371 + if (middle != list_begin (list))
2373 + /* Extract first half of LIST into a temporary list. */
2376 + list_splice (list_begin (&tmp), list_begin (list), middle);
2378 + /* Sort each half-list and merge the result. */
2379 + list_sort (&tmp, less, aux);
2380 + list_sort (list, less, aux);
2381 + list_merge (list, &tmp, less, aux);
2385 + /* The middle is at the beginning of the list.
2386 + This only happens in empty lists and 1-element lists.
2387 + Because such lists are already sorted, we have nothing
2392 +/* Inserts ELEM in the proper position in LIST, which must be
2393 + sorted according to LESS given auxiliary data AUX.
2394 + Runs in O(n) average case in the number of elements in LIST. */
2396 +list_insert_ordered (struct list *list, list_elem *elem,
2397 + list_less_func *less, void *aux)
2401 + assert (list != NULL);
2402 + assert (elem != NULL);
2403 + assert (less != NULL);
2405 + for (e = list_begin (list); e != list_end (list); e = list_next (e))
2406 + if (less (elem, e, aux))
2408 + return list_insert (e, elem);
2411 +/* Iterates through LIST and removes all but the first in each
2412 + set of adjacent elements that are equal according to LESS
2413 + given auxiliary data AUX. If DUPLICATES is non-null, then the
2414 + elements from LIST are appended to DUPLICATES. */
2416 +list_unique (struct list *list, struct list *duplicates,
2417 + list_less_func *less, void *aux)
2419 + list_elem *elem, *next;
2421 + assert (list != NULL);
2422 + assert (less != NULL);
2423 + if (list_empty (list))
2426 + elem = list_begin (list);
2427 + while ((next = list_next (elem)) != list_end (list))
2428 + if (!less (elem, next, aux) && !less (next, elem, aux))
2430 + list_remove (next);
2431 + if (duplicates != NULL)
2432 + list_push_back (duplicates, next);
2438 +/* Returns the element in LIST with the largest value according
2439 + to LESS given auxiliary data AUX. If there is more than one
2440 + maximum, returns the one that appears earlier in the list. If
2441 + the list is empty, returns its tail. */
2443 +list_max (struct list *list, list_less_func *less, void *aux)
2445 + list_elem *max = list_begin (list);
2446 + if (max != list_end (list))
2450 + for (e = list_next (max); e != list_end (list); e = list_next (e))
2451 + if (less (max, e, aux))
2457 +/* Returns the element in LIST with the smallest value according
2458 + to LESS given auxiliary data AUX. If there is more than one
2459 + minimum, returns the one that appears earlier in the list. If
2460 + the list is empty, returns its tail. */
2462 +list_min (struct list *list, list_less_func *less, void *aux)
2464 + list_elem *min = list_begin (list);
2465 + if (min != list_end (list))
2469 + for (e = list_next (min); e != list_end (list); e = list_next (e))
2470 + if (less (e, min, aux))
2475 diff -X ignore -urpNb bochs-2.1.1/taint/list.h checkbochs-2.1.1/taint/list.h
2476 --- bochs-2.1.1/taint/list.h 1969-12-31 16:00:00.000000000 -0800
2477 +++ checkbochs-2.1.1/taint/list.h 2005-07-02 17:25:48.000000000 -0700
2479 +#ifndef __LIB_KERNEL_LIST_H
2480 +#define __LIB_KERNEL_LIST_H
2482 +/* Doubly linked list.
2484 + This implementation of a doubly linked list does not require
2485 + use of dynamically allocated memory. Instead, each structure
2486 + that is a potential list element must embed a list_elem
2487 + member. All of the list functions operate on these
2488 + `list_elem's. The list_entry macro allows conversion from a
2489 + list_elem back to a structure object that contains it.
2491 + For example, suppose there is a needed for a list of `struct
2492 + foo'. `struct foo' should contain a `list_elem' member, like
2499 + ...other members...
2502 + Then a list of `struct foo' can be be declared and initialized
2505 + struct list foo_list;
2507 + list_init (&foo_list);
2509 + Iteration is a typical situation where it is necessary to
2510 + convert from a list_elem back to its enclosing structure.
2511 + Here's an example using foo_list:
2515 + for (e = list_begin (&foo_list); e != list_end (&foo_list);
2516 + e = list_next (e))
2518 + struct foo *f = list_entry (e, struct foo, elem);
2519 + ...do something with f...
2522 + You can find real examples of list usage throughout the
2523 + source; for example, malloc.c, palloc.c, and thread.c in the
2524 + threads directory all use lists.
2526 + The interface for this list is inspired by the list<> template
2527 + in the C++ STL. If you're familiar with list<>, you should
2528 + find this easy to use. However, it should be emphasized that
2529 + these lists do *no* type checking and can't do much other
2530 + correctness checking. If you screw up, it will bite you.
2532 + Glossary of list terms:
2534 + - "front": The first element in a list. Undefined in an
2535 + empty list. Returned by list_front().
2537 + - "back": The last element in a list. Undefined in an empty
2538 + list. Returned by list_back().
2540 + - "tail": The element figuratively just after the last
2541 + element of a list. Well defined even in an empty list.
2542 + Returned by list_end(). Used as the end sentinel for an
2543 + iteration from front to back.
2545 + - "beginning": In a non-empty list, the front. In an empty
2546 + list, the tail. Returned by list_begin(). Used as the
2547 + starting point for an iteration from front to back.
2549 + - "head": The element figuratively just before the first
2550 + element of a list. Well defined even in an empty list.
2551 + Returned by list_rend(). Used as the end sentinel for an
2552 + iteration from back to front.
2554 + - "reverse beginning": In a non-empty list, the back. In an
2555 + empty list, the head. Returned by list_rbegin(). Used as
2556 + the starting point for an iteration from back to front.
2558 + - "interior element": An element that is not the head or
2559 + tail, that is, a real list element. An empty list does
2560 + not have any interior elements.
2563 +#include <stdbool.h>
2564 +#include <stddef.h>
2565 +#include <inttypes.h>
2567 +/* List element. */
2568 +typedef struct list_elem
2570 + struct list_elem *prev; /* Previous list element. */
2571 + struct list_elem *next; /* Next list element. */
2578 + list_elem head; /* List head. */
2579 + list_elem tail; /* List tail. */
2582 +/* Converts pointer to list element LIST_ELEM into a pointer to
2583 + the structure that LIST_ELEM is embedded inside. Supply the
2584 + name of the outer structure STRUCT and the member name MEMBER
2585 + of the list element. See the big comment at the top of the
2586 + file for an example. */
2587 +#define list_entry(LIST_ELEM, STRUCT, MEMBER) \
2588 + ((STRUCT *) ((uint8_t *) (LIST_ELEM) - offsetof (STRUCT, MEMBER)))
2590 +void list_init (struct list *);
2592 +/* List traversal. */
2593 +list_elem *list_begin (struct list *);
2594 +list_elem *list_next (list_elem *);
2595 +list_elem *list_end (struct list *);
2597 +list_elem *list_rbegin (struct list *);
2598 +list_elem *list_prev (list_elem *);
2599 +list_elem *list_rend (struct list *);
2601 +list_elem *list_head (struct list *);
2602 +list_elem *list_tail (struct list *);
2604 +/* List insertion. */
2605 +void list_insert (list_elem *, list_elem *);
2606 +void list_splice (list_elem *before,
2607 + list_elem *first, list_elem *last);
2608 +void list_push_front (struct list *, list_elem *);
2609 +void list_push_back (struct list *, list_elem *);
2611 +/* List removal. */
2612 +list_elem *list_remove (list_elem *);
2613 +list_elem *list_pop_front (struct list *);
2614 +list_elem *list_pop_back (struct list *);
2616 +/* List elements. */
2617 +list_elem *list_front (struct list *);
2618 +list_elem *list_back (struct list *);
2620 +/* List properties. */
2621 +size_t list_size (struct list *);
2622 +bool list_empty (struct list *);
2624 +/* Miscellaneous. */
2625 +void list_reverse (struct list *);
2627 +/* Compares the value of two list elements A and B, given
2628 + auxiliary data AUX. Returns true if A is less than B, or
2629 + false if A is greater than or equal to B. */
2630 +typedef bool list_less_func (const list_elem *a, const list_elem *b,
2633 +/* Operations on lists with ordered elements. */
2634 +void list_merge (struct list *, struct list *,
2635 + list_less_func *, void *aux);
2636 +void list_sort (struct list *,
2637 + list_less_func *, void *aux);
2638 +void list_insert_ordered (struct list *, list_elem *,
2639 + list_less_func *, void *aux);
2640 +void list_unique (struct list *, struct list *duplicates,
2641 + list_less_func *, void *aux);
2644 +list_elem *list_max (struct list *, list_less_func *, void *aux);
2645 +list_elem *list_min (struct list *, list_less_func *, void *aux);
2648 +is_head (list_elem *elem);
2649 +#endif /* lib/kernel/list.h */
2650 diff -X ignore -urpNb bochs-2.1.1/taint/lockset.cc checkbochs-2.1.1/taint/lockset.cc
2651 --- bochs-2.1.1/taint/lockset.cc 1969-12-31 16:00:00.000000000 -0800
2652 +++ checkbochs-2.1.1/taint/lockset.cc 2005-07-19 22:38:45.000000000 -0700
2655 +#include <stdlib.h>
2656 +#include <assert.h>
2658 +#define NEED_CPU_REG_SHORTCUTS 1
2660 +#define LOG_THIS BX_CPU_THIS_PTR
2661 +#include "lockset.h"
2663 +#include "eraser.h"
2664 +#include "globals.h"
2665 +#include "mydebug.h"
2668 +#define MAX_LOCKSETS ((unsigned int)100000)
2669 +#define HTABLE_SIZE ((unsigned int)4*MAX_LOCKSETS) //overprovision for closed hashing
2671 +typedef unsigned int hval_t;
2673 +struct add_lock_entry {
2674 + locksetidx_t oldindex;
2675 + address_t newlock;
2676 + locksetidx_t newindex;
2677 + hash_elem h_elem; /* Hash element */
2680 +struct remove_lock_entry {
2681 + locksetidx_t oldindex;
2682 + address_t oldlock;
2683 + locksetidx_t newindex;
2684 + hash_elem h_elem; /* Hash element */
2688 +struct intersect_entry {
2689 + locksetidx_t index1, index2, newindex;
2690 + hash_elem h_elem; /* Hash element */
2693 +unsigned add_lock_hash (const hash_elem *e, void *aux)
2695 + struct add_lock_entry *h = hash_entry (e, struct add_lock_entry, h_elem);
2696 + return hash_int ((h->oldindex << 16) | (h->newlock&0x0000ffff) );
2700 +add_lock_less (const hash_elem *a_, const hash_elem *b_,
2703 + struct add_lock_entry *a = hash_entry (a_, struct add_lock_entry, h_elem);
2704 + struct add_lock_entry *b = hash_entry (b_, struct add_lock_entry, h_elem);
2705 + return (a->oldindex < b->oldindex || (a->oldindex==b->oldindex && a->newlock < b->newlock));
2708 +unsigned remove_lock_hash (const hash_elem *e, void *aux)
2710 + struct remove_lock_entry *h = hash_entry (e, struct remove_lock_entry, h_elem);
2711 + return hash_int ((h->oldindex << 16) | (h->oldlock&0x0000ffff));
2715 +remove_lock_less (const hash_elem *a_, const hash_elem *b_,
2718 + struct remove_lock_entry *a = hash_entry (a_, struct remove_lock_entry, h_elem);
2719 + struct remove_lock_entry *b = hash_entry (b_, struct remove_lock_entry, h_elem);
2720 + return (a->oldindex < b->oldindex || (a->oldindex==b->oldindex && a->oldlock < b->oldlock));
2723 +unsigned intersect_hash (const hash_elem *e, void *aux)
2725 + struct intersect_entry *h = hash_entry (e, struct intersect_entry, h_elem);
2726 + return hash_int ((h->index1 << 16) | (h->index2&0x0000ffff));
2730 +intersect_less (const hash_elem *a_, const hash_elem *b_,
2733 + struct intersect_entry *a = hash_entry (a_, struct intersect_entry, h_elem);
2734 + struct intersect_entry *b = hash_entry (b_, struct intersect_entry, h_elem);
2735 + return (a->index1 < b->index1 || (a->index1==b->index1 && a->index2 < b->index2));
2741 +static int add_lock_cache_initialized = 0;
2742 +static struct hash add_lock_cache;
2744 +static int remove_lock_cache_initialized = 0;
2745 +static struct hash remove_lock_cache;
2747 +static int intersect_cache_initialized = 0;
2748 +static struct hash intersect_cache;
2750 +typedef struct lockvector {
2751 + address_t lockaddress;
2752 + struct lockvector *next;
2756 +static locksetidx_t lockset_index = 1;
2757 +static lockvector_t *index_table[MAX_LOCKSETS];
2758 +static locksetidx_t hash_table[HTABLE_SIZE];
2760 +static unsigned int cum_hash(unsigned int a, address_t b) {
2764 +static unsigned int hashfn(lockvector_t *vec) {
2765 + lockvector_t *cur;
2766 + unsigned int ret=0;
2770 + ret = cum_hash(ret,cur->lockaddress);
2777 +freevector(lockvector_t *lockvec) {
2778 + static lockvector_t *cur, *prev;
2787 +static lockvector_t *
2788 +clonevector(lockvector_t *lockvec) {
2789 + static lockvector_t *cur, *newlockvec,*newprev;
2790 + if (!lockvec) return NULL;
2792 + newlockvec = (lockvector_t*)malloc(sizeof(lockvector_t));
2793 + newlockvec->lockaddress = lockvec->lockaddress;
2794 + newprev = newlockvec;
2795 + cur = lockvec->next;
2797 + newprev->next = (lockvector_t*)malloc(sizeof(lockvector_t));
2798 + newprev->next->lockaddress = cur->lockaddress;
2800 + newprev = newprev->next;
2802 + newprev->next = NULL;
2803 + return newlockvec;
2806 +static lockvector_t *
2807 +remove_from_lockvector(lockvector_t *lockvec, address_t oldlock) {
2808 + lockvector_t *ret, *cur, *newlockvec, *tmp;
2811 + if (oldlock<lockvec->lockaddress) assert(0);
2812 + if (oldlock==lockvec->lockaddress) {
2813 + ret = clonevector(lockvec->next);
2817 + cur = newlockvec = clonevector(lockvec);
2818 + while (cur->next && oldlock>cur->next->lockaddress) {
2821 + assert(oldlock==cur->next->lockaddress);
2824 + cur->next = cur->next->next;
2825 + tmp->lockaddress = 0;
2828 + return newlockvec;
2831 +static lockvector_t *
2832 +intersect_lockvectors(lockvector_t *lockvec1, lockvector_t *lockvec2) {
2833 + lockvector_t *cur1, *cur2, *out, *ret;
2837 + while (cur1 && cur2) {
2838 + if (cur1->lockaddress < cur2->lockaddress) {
2839 + cur1 = cur1->next;
2840 + } else if (cur1->lockaddress > cur2->lockaddress) {
2841 + cur2 = cur2->next;
2844 + out->next = (lockvector_t *)malloc(sizeof(lockvector_t));
2847 + ret = out = (lockvector_t *)malloc(sizeof(lockvector_t));
2849 + out->lockaddress = cur1->lockaddress;
2851 + cur1 = cur1->next;
2852 + cur2 = cur2->next;
2861 +static lockvector_t *
2862 +add_to_lockvector(lockvector_t *lockvec, address_t newlock) {
2863 + lockvector_t *ret, *cur, *newlockvec;
2864 + if (lockvec==NULL) {
2865 + ret = (lockvector_t *)malloc(sizeof(lockvector_t));
2866 + ret->lockaddress = newlock;
2870 + if (newlock<lockvec->lockaddress) {
2871 + ret = (lockvector_t *)malloc(sizeof(lockvector_t));
2872 + ret->lockaddress = newlock;
2873 + ret->next = clonevector(lockvec);
2876 + if (newlock==lockvec->lockaddress) {
2877 + if (BX_CPU(0)) BX_CPU(0)->backtrace(btstr);
2879 + return clonevector(lockvec);
2881 + assert(newlock>lockvec->lockaddress);
2883 + cur = newlockvec = clonevector(lockvec);
2884 + while (cur->next && newlock>cur->next->lockaddress) {
2889 + ret = (lockvector_t *)malloc(sizeof(lockvector_t));
2890 + ret->lockaddress = newlock;
2893 + return newlockvec;
2896 + if (newlock==cur->next->lockaddress) {
2897 + BX_CPU(0)->backtrace(btstr);
2898 + assert(0); //acquiring the same lock twice
2899 + return newlockvec;
2902 + ret = (lockvector_t *)malloc(sizeof(lockvector_t));
2903 + ret->lockaddress = newlock;
2904 + ret->next = cur->next;
2906 + return newlockvec;
2909 +void add_to_hashtable(lockvector_t *newlockset, locksetidx_t index) {
2910 + hval_t hval = (hashfn(newlockset))%HTABLE_SIZE;
2912 + while (hash_table[hval]) {
2913 + hval = (hval+1)%HTABLE_SIZE; //closed hashing
2914 + //printf("hval=%d.\n",hval);
2917 + if (numtries>20) printf("%s: Warning: numtries too large for closed hashing (=%d).\n",__func__,numtries);
2918 + hash_table[hval] = index;
2921 +//returns true if they are equal
2923 +compare_locksets(lockvector_t *vec1, lockvector_t *vec2) {
2924 + lockvector_t *cur1, *cur2;
2931 + while (cur1 && cur2 && cur1->lockaddress==cur2->lockaddress) {
2932 + cur1 = cur1->next;
2933 + cur2 = cur2->next;
2936 + if (cur1) return 0;
2937 + if (cur2) return 0;
2942 +static locksetidx_t
2943 +find_in_add_cache(locksetidx_t index, address_t newlock) {
2945 + locksetidx_t new_index;
2946 + hash_elem *h_element = NULL;
2947 + struct add_lock_entry tmp;
2948 + tmp.oldindex = index; tmp.newlock = newlock;
2949 + if (add_lock_cache_initialized) h_element = hash_find(&add_lock_cache, &tmp.h_elem);
2951 + struct add_lock_entry *answer = hash_entry(h_element, struct add_lock_entry, h_elem);
2952 + //printf("%s: returning (%d, %x)->%d from cache.\n",__func__,index,newlock,answer->newindex);
2953 + return answer->newindex;
2959 +static locksetidx_t
2960 +find_in_remove_cache(locksetidx_t index, address_t oldlock) {
2962 + locksetidx_t new_index;
2963 + hash_elem *h_element = NULL;
2964 + struct remove_lock_entry tmp;
2965 + tmp.oldindex = index; tmp.oldlock = oldlock;
2966 + if (remove_lock_cache_initialized) h_element = hash_find(&remove_lock_cache, &tmp.h_elem);
2968 + struct remove_lock_entry *answer = hash_entry(h_element, struct remove_lock_entry, h_elem);
2969 + //printf("%s: returning (%d, %d)->%d from cache.\n",__func__,index,oldlock,answer->newindex);
2970 + return answer->newindex;
2976 +static locksetidx_t
2977 +find_in_intersect_cache(locksetidx_t index1, locksetidx_t index2) {
2979 + locksetidx_t new_index;
2980 + hash_elem *h_element = NULL;
2981 + struct intersect_entry tmp;
2982 + tmp.index1 = index1; tmp.index2 = index2;
2983 + if (intersect_cache_initialized) h_element = hash_find(&intersect_cache, &tmp.h_elem);
2985 + struct intersect_entry *answer = hash_entry(h_element, struct intersect_entry, h_elem);
2986 + //printf("%s: returning (%d, %x)->%d from cache.\n",__func__,index1,index2,answer->newindex);
2987 + return answer->newindex;
2993 +void insert_in_remove_cache(locksetidx_t index, address_t lock, locksetidx_t new_index)
2995 + struct remove_lock_entry *e;
2996 + if (!remove_lock_cache_initialized) {
2997 + hash_init(&remove_lock_cache, remove_lock_hash, remove_lock_less, NULL);
2998 + remove_lock_cache_initialized = 1;
3000 + e = (struct remove_lock_entry *)malloc(sizeof(struct remove_lock_entry));
3001 + e->oldindex = index; e->oldlock = lock; e->newindex = new_index;
3002 + hash_insert(&remove_lock_cache, &e->h_elem);
3006 +insert_in_add_cache(locksetidx_t index, address_t lock, locksetidx_t new_index)
3008 + struct add_lock_entry *e;
3009 + if (!add_lock_cache_initialized) {
3010 + hash_init(&add_lock_cache, add_lock_hash, add_lock_less, NULL);
3011 + add_lock_cache_initialized = 1;
3013 + e = (struct add_lock_entry *)malloc(sizeof(struct add_lock_entry));
3014 + e->oldindex = index; e->newlock = lock; e->newindex = new_index;
3015 + hash_insert(&add_lock_cache, &e->h_elem);
3019 +insert_in_intersect_cache(locksetidx_t index1, locksetidx_t index2, locksetidx_t new_index)
3021 + struct intersect_entry *e;
3022 + if (!intersect_cache_initialized) {
3023 + hash_init(&intersect_cache, intersect_hash, intersect_less, NULL);
3024 + intersect_cache_initialized = 1;
3026 + e = (struct intersect_entry *)malloc(sizeof(struct intersect_entry));
3027 + e->index1 = index1; e->index2 = index2; e->newindex = new_index;
3028 + hash_insert(&intersect_cache, &e->h_elem);
3031 +static locksetidx_t
3032 +find_duplicate(lockvector_t *vec) {
3034 + locksetidx_t new_index;
3035 + hash_elem *h_element = NULL;
3037 + hval = (hashfn(vec))%HTABLE_SIZE;
3038 + new_index = hash_table[hval];
3040 + while (new_index) {
3041 + if (compare_locksets(vec,index_table[new_index])) {
3044 + hval = (hval+1)%HTABLE_SIZE;
3045 + new_index = hash_table[hval];
3050 +locksetidx_t add_lock(locksetidx_t index, address_t newlock) {
3051 + lockvector_t *newlockset;
3052 + locksetidx_t ret_index;
3053 + ret_index = find_in_add_cache(index,newlock);
3054 + if (ret_index) return ret_index;
3056 + if (lockset_index >= MAX_LOCKSETS) {
3057 + printf("%s: Number of locksets exceed %d. returning 0\n",__func__,MAX_LOCKSETS);
3061 + if (!index) { //adding a lock to an empty lockset
3062 + newlockset = (lockvector_t *)malloc(sizeof(lockvector_t));
3063 + newlockset->lockaddress = newlock;
3064 + newlockset->next = NULL;
3066 + newlockset = add_to_lockvector(index_table[index],newlock);
3068 + assert(newlockset);
3069 + ret_index = find_duplicate(newlockset);
3071 + insert_in_add_cache(index,newlock,ret_index);
3072 + freevector(newlockset);
3076 + index_table[lockset_index] = newlockset;
3077 + add_to_hashtable(newlockset,lockset_index);
3078 + ret_index = lockset_index;
3084 +singleton_lockset(lockvector_t *vec) {
3086 + if (vec->next==NULL) return 1;
3090 +locksetidx_t remove_lock(locksetidx_t index, address_t oldlock) {
3091 + lockvector_t *newlockset;
3092 + locksetidx_t ret_index;
3093 + if (!index_table[index]) {
3094 + assert(oldlock==INTERRUPT_LOCK);
3098 + if (singleton_lockset(index_table[index])) {
3099 + if (index_table[index]->lockaddress!=oldlock) {
3100 + BX_CPU(0)->backtrace(btstr);
3101 + printf("ERROR: index=%d, index_table[index]->lockaddress=%x, oldlock=%x.\n",index,index_table[index]->lockaddress,oldlock);
3103 + assert(index_table[index]->lockaddress==oldlock);
3106 + ret_index = find_in_remove_cache(index,oldlock);
3107 + if (ret_index) return ret_index;
3109 + if (lockset_index >= MAX_LOCKSETS) {
3110 + printf("%s: Number of locksets exceed %d. returning 0\n",__func__,MAX_LOCKSETS);
3114 + assert(index); //you cannot remove from an empty lockset
3115 + newlockset = remove_from_lockvector(index_table[index],oldlock);
3116 + assert(newlockset);
3118 + ret_index = find_duplicate(newlockset);
3120 + insert_in_remove_cache(index,oldlock,ret_index);
3121 + freevector(newlockset);
3125 + index_table[lockset_index] = newlockset;
3126 + add_to_hashtable(newlockset,lockset_index);
3127 + ret_index = lockset_index;
3132 +locksetidx_t intersect_locksets(locksetidx_t index1, locksetidx_t index2) {
3133 + lockvector_t *newlockset;
3134 + locksetidx_t ret_index;
3136 + if (index1==0) return 0;
3137 + if (index2==0) return 0;
3139 + ret_index = find_in_intersect_cache(index1,index2);
3140 + if (ret_index) return ret_index;
3142 + if (lockset_index >= MAX_LOCKSETS) {
3143 + printf("%s: Number of locksets exceed %d. returning 0\n",__func__,MAX_LOCKSETS);
3149 + newlockset = intersect_lockvectors(index_table[index1],index_table[index2]);
3150 + if (!newlockset) return 0; //empty lockset
3151 + ret_index = find_duplicate(newlockset);
3153 + insert_in_intersect_cache(index1,index2,ret_index);
3154 + freevector(newlockset);
3158 + index_table[lockset_index] = newlockset;
3159 + add_to_hashtable(newlockset,lockset_index);
3160 + ret_index = lockset_index;
3165 +void lockset_dump(locksetidx_t idx, Bit32u *locks, int cnt)
3167 + lockvector_t *vec = index_table[idx];
3168 + while (vec != NULL && cnt-- > 0) {
3169 + *locks++ = vec->lockaddress;
3172 + while (cnt-- > 0) {
3177 +typedef struct locks_held {
3178 + unsigned threadId;
3179 + locksetidx_t locks_held;
3180 + int ignore; //whether this thread should be ignored. =0 if not. otherwise stores ignore depth
3181 + hash_elem h_elem; /* Hash element */
3184 +static int locks_held_table_initialized = 0;
3185 +static struct hash locks_held_table ;
3187 +unsigned locks_held_hash (const hash_elem *e, void *aux)
3189 + locks_held_t *h = hash_entry (e, struct locks_held, h_elem);
3190 + return hash_int (h->threadId);
3194 +locks_held_less (const hash_elem *a_, const hash_elem *b_,
3197 + locks_held_t *a = hash_entry (a_, struct locks_held, h_elem);
3198 + locks_held_t *b = hash_entry (b_, struct locks_held, h_elem);
3199 + return (a->threadId < b->threadId);
3204 +cur_held(unsigned threadId) {
3205 + hash_elem *h_element;
3207 + if (!locks_held_table_initialized) return 0;
3208 + tmp.threadId = threadId;
3209 + h_element = hash_find(&locks_held_table, &tmp.h_elem);
3211 + locks_held_t *answer = hash_entry(h_element, struct locks_held, h_elem);
3212 + assert(answer->locks_held<lockset_index);
3213 + return answer->locks_held;
3218 +void update_lockset(unsigned threadId, locksetidx_t lset) {
3220 + hash_elem *h_element;
3222 + assert(lset<lockset_index);
3223 + if (!locks_held_table_initialized) {
3224 + hash_init(&locks_held_table, locks_held_hash, locks_held_less, NULL);
3225 + locks_held_table_initialized = 1;
3227 + tmp.threadId = threadId;
3228 + h_element = hash_find(&locks_held_table, &tmp.h_elem);
3230 + e = (struct locks_held *)malloc(sizeof(struct locks_held));
3231 + e->threadId = threadId; e->locks_held = lset; e->ignore = 0;
3232 + hash_insert(&locks_held_table, &e->h_elem);
3234 + e = hash_entry(h_element, struct locks_held, h_elem);
3235 + assert(e->threadId==threadId);
3236 + e->locks_held = lset;
3242 +find_in_lockvector(lockvector_t *lockvec, address_t lock) {
3243 + lockvector_t *cur;
3244 + assert(lockvec!=NULL);
3245 + if (lock<lockvec->lockaddress) return false;
3246 + if (lock==lockvec->lockaddress) return true;
3248 + while (cur->next && lock>cur->next->lockaddress) {
3251 + if (!cur->next) return false;
3252 + if (cur->next->lockaddress==lock) return true;
3256 +bool belongs(locksetidx_t index, address_t lock) {
3257 + if (!index) return false;
3258 + return (find_in_lockvector(index_table[index],lock));
3261 +void set_ignore(unsigned threadId, bool val) {
3263 + hash_elem *h_element;
3265 + if (!locks_held_table_initialized) {
3266 + hash_init(&locks_held_table, locks_held_hash, locks_held_less, NULL);
3267 + locks_held_table_initialized = 1;
3269 + tmp.threadId = threadId;
3270 + h_element = hash_find(&locks_held_table, &tmp.h_elem);
3272 + e = (struct locks_held *)malloc(sizeof(struct locks_held));
3273 + e->threadId = threadId; e->locks_held = LOCKSET_EMPTY; e->ignore=0;
3274 + if (val) e->ignore++; else if (e->ignore) e->ignore--;
3275 + assert(e->ignore>=0);
3276 + hash_insert(&locks_held_table, &e->h_elem);
3278 + e = hash_entry(h_element, struct locks_held, h_elem);
3279 + assert(e->threadId==threadId);
3280 + if (val) e->ignore++; else if (e->ignore) e->ignore--;
3281 + assert(e->ignore>=0);
3283 + DBG(L1,("ignore value for thread %x = %d.\n",threadId,e->ignore));
3287 +ignore_on(unsigned threadId) {
3288 + hash_elem *h_element;
3290 + if (global_startup_ignore) return true;
3291 + if (!locks_held_table_initialized) return 0;
3292 + tmp.threadId = threadId;
3293 + h_element = hash_find(&locks_held_table, &tmp.h_elem);
3295 + locks_held_t *answer = hash_entry(h_element, struct locks_held, h_elem);
3296 + assert(answer->ignore>=0);
3297 + return answer->ignore > 0;
3301 diff -X ignore -urpNb bochs-2.1.1/taint/lockset.h checkbochs-2.1.1/taint/lockset.h
3302 --- bochs-2.1.1/taint/lockset.h 1969-12-31 16:00:00.000000000 -0800
3303 +++ checkbochs-2.1.1/taint/lockset.h 2005-07-19 13:40:21.000000000 -0700
3305 +#ifndef __LOCKSET_H
3306 +#define __LOCKSET_H
3308 +#define LOCKSET_EMPTY 0
3312 +#define S_EXCLUSIVE 1
3318 + unsigned int state : 2;
3319 + unsigned int seq : 10;
3320 + unsigned int value : 20;
3323 +typedef unsigned int address_t;
3324 +typedef unsigned long locksetidx_t;
3325 +typedef locksetidx_t lockset_t;
3327 +locksetidx_t add_lock(locksetidx_t index, address_t newlock);
3328 +locksetidx_t remove_lock(locksetidx_t index, address_t newlock);
3329 +locksetidx_t intersect_locksets(locksetidx_t index1, locksetidx_t index2);
3330 +bool belongs(locksetidx_t index, address_t lock);
3331 +void lockset_dump(locksetidx_t, Bit32u locks[], int cnt);
3333 +locksetidx_t cur_held(unsigned threadId);
3334 +void update_lockset(unsigned threadId, locksetidx_t lockset);
3336 +void set_ignore(unsigned threadId, bool val);
3337 +bool ignore_on(unsigned threadId);
3340 diff -X ignore -urpNb bochs-2.1.1/taint/main.c checkbochs-2.1.1/taint/main.c
3341 --- bochs-2.1.1/taint/main.c 1969-12-31 16:00:00.000000000 -0800
3342 +++ checkbochs-2.1.1/taint/main.c 2005-07-02 17:25:48.000000000 -0700
3344 +#include "lockset.h"
3347 + int i1, i2, i3, i12, i21, i31, i32, i123, i312, i321, i312_2, i321m2, i312m2;
3348 + int i312i123, i312i21, i312i1, i32i1;
3350 + i1 = add_lock(0,1);
3351 + i2 = add_lock(0,2);
3352 + i3 = add_lock(0,3);
3353 + i12 = add_lock(i1,2);
3354 + i21 = add_lock(i2,1);
3355 + i21 = add_lock(i2,1);
3356 + i31 = add_lock(i3,1);
3357 + i32 = add_lock(i3,2);
3358 + i123 = add_lock(i12,3);
3359 + i312 = add_lock(i31,2);
3360 + i321 = add_lock(i32,1);
3361 + i321m2 = remove_lock(i321,2);
3362 + i312m2 = remove_lock(i312,2);
3363 + i312i123 = intersect_locksets(i312,i321);
3364 + i312i123 = intersect_locksets(i312,i321);
3365 + i312i123 = intersect_locksets(i312,i321);
3366 + i312i21 = intersect_locksets(i312,i21);
3367 + i312i1 = intersect_locksets(i312,i1);
3368 + i312i1 = intersect_locksets(i312,i1);
3369 + i312i1 = intersect_locksets(i312,i1);
3370 + i32i1 = intersect_locksets(i32,i1);
3371 + i32i1 = intersect_locksets(i32,i1);
3373 + printf("i12 = %d, i21 = %d, i123=%d, i312=%d, i321=%d, i321m2=%d, i312m2=%d.\n",i12,i21,i123,i312,i321,i321m2,i312m2);
3374 + printf("i312i123 = %d, i312i21 = %d, i312i1=%d, i32i1=%d.\n",i312i123,i312i21,i312i1,i32i1);
3376 diff -X ignore -urpNb bochs-2.1.1/taint/memory.cc checkbochs-2.1.1/taint/memory.cc
3377 --- bochs-2.1.1/taint/memory.cc 1969-12-31 16:00:00.000000000 -0800
3378 +++ checkbochs-2.1.1/taint/memory.cc 2005-07-02 17:25:48.000000000 -0700
3382 +#define LOG_THIS BX_MEM_THIS
3385 + void BX_CPP_AttrRegparmN(3)
3386 +BX_MEM_C::readPhysicalTaintPage(BX_CPU_C *cpu, Bit32u addr, unsigned len, void *data)
3391 + a20addr = A20ADDR(addr);
3393 + *(Bit32u *)data = 0x0; //initialize, so that if we are unable to read mem, we return 0
3395 + if ( (a20addr + len) <= BX_MEM_THIS len ) {
3396 + // all of data is within limits of physical memory
3397 + if ( (a20addr & 0xfff80000) != 0x00080000 ) {
3400 + *(Bit32u *)data = taint_vector[a20addr];
3401 + *((Bit32u *)data+1) = taint_vector[a20addr+1];
3402 + *((Bit32u *)data+2) = taint_vector[a20addr+2];
3403 + *((Bit32u *)data+3) = taint_vector[a20addr+3];
3405 + printf("Not yet supported for big endian host platforms.\n");
3412 + *(Bit32u *)data = taint_vector[a20addr];
3413 + *((Bit32u *)data+1) = taint_vector[a20addr+1];
3415 + printf("Not yet supported for big endian host platforms.\n");
3419 + if (*(Bit16u *)data && g_instruction_display_count>0) {
3420 + printf("%s %d: tainted dword read from addr %x. data=%x.\n",__func__,__LINE__,addr,*(Bit16u*)data);
3421 + g_instruction_display_count = 512;
3428 + *(Bit32u *)data = taint_vector[a20addr];
3430 + printf("Not yet supported for big endian host platforms.\n");
3435 + // len == 3 case can just fall thru to special cases handling
3439 +#ifdef BX_LITTLE_ENDIAN
3440 + data_ptr = (Bit32u *) data;
3441 +#else // BX_BIG_ENDIAN
3442 + data_ptr = (Bit32u *) data + (len - 1);
3448 + if ( (a20addr & 0xfff80000) != 0x00080000 ) {
3449 + // addr *not* in range 00080000 .. 000FFFFF
3450 + *data_ptr = taint_vector[a20addr];
3452 + if (len == 1) return;
3455 +#ifdef BX_LITTLE_ENDIAN
3457 +#else // BX_BIG_ENDIAN
3463 + // addr in range 00080000 .. 000FFFFF
3464 +#if BX_PCI_SUPPORT == 0
3465 + if ((a20addr <= 0x0009ffff) || (a20addr >= 0x000c0000) ) {
3466 + // regular memory 80000 .. 9FFFF, C0000 .. F0000
3467 + *data_ptr = taint_vector[a20addr];
3470 + return; //assert(0);
3471 + // VGA memory A0000 .. BFFFF
3472 + //*data_ptr = DEV_vga_mem_read(a20addr);
3473 + //BX_DBG_UCMEM_REPORT(a20addr, 1, BX_READ, *data_ptr); // obsolete
3475 +#else // #if BX_PCI_SUPPORT == 0
3476 + if (a20addr <= 0x0009ffff) {
3477 + *data_ptr = taint_vector[a20addr];
3480 + if (a20addr <= 0x000BFFFF) {
3482 + // VGA memory A0000 .. BFFFF
3483 + //*data_ptr = DEV_vga_mem_read(a20addr);
3484 + //BX_DBG_UCMEM_REPORT(a20addr, 1, BX_READ, *data_ptr);
3488 + // a20addr in C0000 .. FFFFF
3489 + if (!bx_options.Oi440FXSupport->get ()) {
3490 + *data_ptr = taint_vector[a20addr];
3495 +#endif // #if BX_PCI_SUPPORT == 0
3498 + // some or all of data is outside limits of physical memory
3501 +#ifdef BX_LITTLE_ENDIAN
3502 + data_ptr = (Bit32u *) data;
3503 +#else // BX_BIG_ENDIAN
3504 + data_ptr = (Bit32u *) data + (len - 1);
3508 + // Check VBE LFB support
3509 + if ((a20addr >= VBE_DISPI_LFB_PHYSICAL_ADDRESS) &&
3510 + (a20addr < (VBE_DISPI_LFB_PHYSICAL_ADDRESS + VBE_DISPI_TOTAL_VIDEO_MEMORY_BYTES)))
3517 +#if BX_SUPPORT_APIC
3521 + for (i = 0; i < len; i++) {
3522 +#if BX_PCI_SUPPORT == 0
3523 + if (a20addr < BX_MEM_THIS len)
3524 + *data_ptr = taint_vector[a20addr];
3526 + *data_ptr = 0xffffffff;
3527 +#else // BX_PCI_SUPPORT == 0
3528 + if (a20addr < BX_MEM_THIS len) {
3529 + if ((a20addr >= 0x000C0000) && (a20addr <= 0x000FFFFF)) {
3530 + if (!bx_options.Oi440FXSupport->get ())
3531 + *data_ptr = taint_vector[a20addr];
3533 + switch (DEV_pci_rd_memtype(a20addr & 0xFC000)) {
3534 + case 0x0: // Read from ROM
3535 + *data_ptr = taint_vector[a20addr];
3536 + //BX_INFO(("Reading from ROM %08x, Data %02x ", (unsigned) a20addr, *data_ptr));
3539 + case 0x1: // Read from Shadow RAM
3543 + BX_PANIC(("readPhysicalPage: default case"));
3548 + *data_ptr = taint_vector[a20addr];
3549 + BX_INFO(("Reading from Norm %08x, Data %02x ", (unsigned) a20addr, *data_ptr));
3553 + *data_ptr = 0xffffffff;
3554 +#endif // BX_PCI_SUPPORT == 0
3557 +#ifdef BX_LITTLE_ENDIAN
3559 +#else // BX_BIG_ENDIAN
3567 + void BX_CPP_AttrRegparmN(3)
3568 +BX_MEM_C::writePhysicalTaintPage(BX_CPU_C *cpu, Bit32u addr, unsigned len, void *data)
3575 + a20addr = A20ADDR(addr);
3577 + // Note: accesses should always be contained within a single page now.
3579 + //if (*(Bit8u *)data!=0xff && *(Bit8u *)data!=0) assert(0);
3580 + //if (*(Bit8u *)data==0xff) assert(0);
3582 + if ( a20addr <= BX_MEM_THIS len ) {
3584 + //LOG_MEM_TAINT(cpu,addr,len,&taint_vector[a20addr],data,true);
3586 + // all of data is within limits of physical memory
3587 + if ( (a20addr & 0xfff80000) != 0x00080000 ) {
3590 + taint_vector[a20addr] = *(Bit32u*)data;
3591 + taint_vector[a20addr+1] = *((Bit32u*)data+1);
3592 + taint_vector[a20addr+2] = *((Bit32u*)data+2);
3593 + taint_vector[a20addr+3] = *((Bit32u*)data+3);
3595 + printf("Not yet supported for big endian host platforms.\n");
3602 + taint_vector[a20addr] = *(Bit32u*)data;
3603 + taint_vector[a20addr+1] = *((Bit32u*)data+1);
3605 + printf("Not yet supported for big endian host platforms.\n");
3612 + taint_vector[a20addr] = *(Bit32u*)data;
3614 + printf("Not yet supported for big endian host platforms.\n");
3619 + // len == other, just fall thru to special cases handling
3622 +#ifdef BX_LITTLE_ENDIAN
3623 + data_ptr = (Bit32u *) data;
3624 +#else // BX_BIG_ENDIAN
3625 + data_ptr = (Bit32u *) data + (len - 1);
3629 + if ( (a20addr & 0xfff80000) != 0x00080000 ) {
3630 + // addr *not* in range 00080000 .. 000FFFFF
3631 + taint_vector[a20addr] = *data_ptr;
3633 + if (len == 1) return;
3636 +#ifdef BX_LITTLE_ENDIAN
3638 +#else // BX_BIG_ENDIAN
3644 + // addr in range 00080000 .. 000FFFFF
3646 + if (a20addr <= 0x0009ffff) {
3647 + // regular memory 80000 .. 9FFFF
3648 + taint_vector[a20addr] = *data_ptr;
3651 + if (a20addr <= 0x000bffff) {
3652 + // VGA memory A0000 .. BFFFF
3653 + return; //assert(0);
3654 + //DEV_vga_mem_write(a20addr, *data_ptr);
3655 + //BX_DBG_DIRTY_PAGE(a20addr >> 12);
3656 + //BX_DBG_UCMEM_REPORT(a20addr, 1, BX_WRITE, *data_ptr); // obsolete
3659 + // adapter ROM C0000 .. DFFFF
3660 + // ROM BIOS memory E0000 .. FFFFF
3662 + //BX_INFO(("ROM lock %08x: len=%u",
3663 + // (unsigned) a20addr, (unsigned) len));
3664 +#if BX_PCI_SUPPORT == 0
3666 + // Write it since its in shadow RAM
3667 + taint_vector[a20addr] = *data_ptr;
3669 + // ignore write to ROM
3672 + // Write Based on 440fx Programming
3673 + if (bx_options.Oi440FXSupport->get () &&
3674 + ((a20addr >= 0xC0000) && (a20addr <= 0xFFFFF))) {
3675 + switch (DEV_pci_wr_memtype(a20addr & 0xFC000)) {
3676 + case 0x1: // Writes to ShadowRAM
3678 +// BX_INFO(("Writing to ShadowRAM %08x, len %u ! ", (unsigned) a20addr, (unsigned) len));
3679 + shadow[a20addr - 0xc0000] = *data_ptr;
3680 + BX_DBG_DIRTY_PAGE(a20addr >> 12);
3683 + case 0x0: // Writes to ROM, Inhibit
3685 + //BX_DEBUG(("Write to ROM ignored: address %08x, data %02x", (unsigned) a20addr, *data_ptr));
3688 + BX_PANIC(("writePhysicalPage: default case"));
3697 + // some or all of data is outside limits of physical memory
3700 +#ifdef BX_LITTLE_ENDIAN
3701 + data_ptr = (Bit32u *) data;
3702 +#else // BX_BIG_ENDIAN
3703 + data_ptr = (Bit32u *) data + (len - 1);
3708 + // Check VBE LFB support
3710 + if ((a20addr >= VBE_DISPI_LFB_PHYSICAL_ADDRESS) &&
3711 + (a20addr < (VBE_DISPI_LFB_PHYSICAL_ADDRESS + VBE_DISPI_TOTAL_VIDEO_MEMORY_BYTES)))
3720 +#if BX_SUPPORT_APIC
3724 + for (i = 0; i < len; i++) {
3725 + if (a20addr < BX_MEM_THIS len) {
3726 + taint_vector[a20addr] = *data_ptr;
3728 + // otherwise ignore byte, since it overruns memory
3731 +#ifdef BX_LITTLE_ENDIAN
3733 +#else // BX_BIG_ENDIAN
3740 diff -X ignore -urpNb bochs-2.1.1/taint/mydebug.h checkbochs-2.1.1/taint/mydebug.h
3741 --- bochs-2.1.1/taint/mydebug.h 1969-12-31 16:00:00.000000000 -0800
3742 +++ checkbochs-2.1.1/taint/mydebug.h 2005-07-02 17:25:48.000000000 -0700
3744 +#ifndef __MYDEBUG_H
3745 +#define __MYDEBUG_H
3747 +#define ACCESS_LINEAR 0
3750 +#define CHECKBOCHS 1
3754 +void checkbochs_log (const char *fmt, ...) ;
3756 +#define DBG(l,x) if (l) { \
3757 + checkbochs_log x; \
3758 + /*bx_dbg_disassemble_current(-1,0);*/ \
3761 +#define PANIC(...) do { \
3762 + printf("PANIC at %s:%d in %s(): ",__FILE__,__LINE__,__func__); \
3763 + BX_CPU(0)->panic(__VA_ARGS__); \
3766 +#define ASSERT(CONDITION) \
3767 + if (CONDITION) { } else { \
3768 + PANIC ("assertion `%s' failed.", #CONDITION); \
3772 diff -X ignore -urpNb bochs-2.1.1/taint/paging.cc checkbochs-2.1.1/taint/paging.cc
3773 --- bochs-2.1.1/taint/paging.cc 1969-12-31 16:00:00.000000000 -0800
3774 +++ checkbochs-2.1.1/taint/paging.cc 2005-07-19 14:40:31.000000000 -0700
3776 +#define NEED_CPU_REG_SHORTCUTS 1
3778 +#define LOG_THIS BX_CPU_THIS_PTR
3780 +#include "taint/globals.h"
3781 +#include "taint/mydebug.h"
3782 +#include "taint/lockset.h"
3785 +#define this (BX_CPU(0))
3789 +#if BX_SUPPORT_PAGING
3791 +#define InstrTLB_Stats()
3792 +#define InstrTLB_Increment(v)
3794 +// ==============================================================
3797 +// Translate a linear address to a physical address, for
3798 +// a data access (D)
3800 + Bit32s BX_CPP_AttrRegparmN(3)
3801 +BX_CPU_C::taint_dtranslate_linear(bx_address laddr, unsigned pl, unsigned rw)
3804 + Bit32u ppf, poffset, TLB_index, error_code, paddress;
3805 + Bit32u pde, pde_addr;
3807 + Bit32u accessBits, combined_access;
3808 + unsigned priv_index;
3810 + // CR4.PAE==0 (and MSR.LMA==0)
3812 + lpf = laddr & 0xfffff000; // linear page frame
3813 + poffset = laddr & 0x00000fff; // physical offset
3814 + TLB_index = BX_TLB_INDEX_OF(lpf);
3817 + //isWrite = (rw>=BX_WRITE); // write or r-m-w
3818 + isWrite = 0; // sorav: allow write accesses even if you have only read permissions on the address
3820 + if (BX_CPU_THIS_PTR TLB.entry[TLB_index].lpf == BX_TLB_LPF_VALUE(lpf)) {
3821 + paddress = BX_CPU_THIS_PTR TLB.entry[TLB_index].ppf | poffset;
3822 + accessBits = BX_CPU_THIS_PTR TLB.entry[TLB_index].accessBits;
3823 + if (accessBits & (1 << ((isWrite<<1) | pl)) ) {
3827 + // The current access does not have permission according to the info
3828 + // in our TLB cache entry. Re-walk the page tables, in case there is
3829 + // updated information in the memory image, and let the long path code
3830 + // generate an exception if one is warranted.
3833 + return(-1); //return -1 for failure
3837 +// Translate a linear address to a physical address, for
3838 +// an instruction fetch access (I)
3840 + Bit32u BX_CPP_AttrRegparmN(2)
3841 +BX_CPU_C::taint_itranslate_linear(bx_address laddr, unsigned pl)
3843 + //assign_type(type8,generic_type_executed_code8);
3845 + ////Bit32u pAddr = dtranslate_linear_type(laddr, pl, BX_READ);
3846 + //printf("Assigning a codebyte.\n");
3847 + //access_linear_type(laddr,1,pl,BX_WRITE,type8);
3848 + //typestats_t stats;
3849 + //stats.print(BX_CPU_THIS_PTR mem->type_vector,BX_CPU_THIS_PTR mem->len);
3855 +BX_CPU_C::access_linear_taint(bx_address laddr)
3857 + if (!BX_CPU_THIS_PTR cr0.pg)
3860 + BX_MEM_C *m = BX_CPU_THIS_PTR mem;
3861 + Bit32u paddr = laddr - PHYS_BASE;
3862 + return paddr < m->len ? (lockset *) &m->taint_vector[paddr] : NULL;
3866 diff -X ignore -urpNb bochs-2.1.1/taint/silent_access.cc checkbochs-2.1.1/taint/silent_access.cc
3867 --- bochs-2.1.1/taint/silent_access.cc 1969-12-31 16:00:00.000000000 -0800
3868 +++ checkbochs-2.1.1/taint/silent_access.cc 2005-07-02 17:25:48.000000000 -0700
3870 +#define NEED_CPU_REG_SHORTCUTS 1
3872 +#define LOG_THIS BX_CPU_THIS_PTR
3874 +#if BX_SUPPORT_X86_64
3875 +#define IsLongMode() (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64)
3876 +#define LPFOf(laddr) ((laddr) & BX_CONST64(0xfffffffffffff000))
3877 +#define BX_CANONICAL_BITS 48
3878 +#define IsCanonical(offset) ((Bit64u)((((Bit64s)(offset)) >> (BX_CANONICAL_BITS-1)) + 1) < 2)
3880 +//#define BX_CANONICAL_LO BX_CONST64(0xffff800000000000)
3881 +//#define BX_CANONICAL_HI BX_CONST64(0x0000800000000000)
3882 +//#define IsCanonical(offset) ((Bit64u)(offset-BX_CANONICAL_LO) < (Bit64u)(BX_CANONICAL_HI-BX_CANONICAL_LO))
3885 +#define IsLongMode() (0)
3886 +#define LPFOf(laddr) ((laddr) & 0xfffff000)
3887 +#define IsCanonical(offset) (0)
3891 + int BX_CPP_AttrRegparmN(3)
3892 +BX_CPU_C::read_virtual_checks_silent(bx_segment_reg_t *seg, bx_address offset,
3895 + Bit32u upper_limit;
3897 + if (protected_mode()) {
3898 + if (seg->cache.valid==0) {
3899 + BX_ERROR(("seg = %s", BX_CPU_THIS_PTR strseg(seg)));
3900 + BX_ERROR(("seg->selector.value = %04x", (unsigned) seg->selector.value));
3901 + //exception(BX_GP_EXCEPTION, 0, 0);
3905 + if (seg->cache.p == 0) { /* not present */
3906 + BX_INFO(("read_virtual_checks(): segment not present"));
3907 + //exception(int_number(seg), 0, 0);
3911 + switch (seg->cache.type) {
3912 + case 0: case 1: /* read only */
3913 + case 10: case 11: /* execute/read */
3914 + case 14: case 15: /* execute/read-only, conforming */
3915 + if (offset > (seg->cache.u.segment.limit_scaled - length + 1)
3916 + || (length-1 > seg->cache.u.segment.limit_scaled)) {
3917 + BX_INFO(("read_virtual_checks(): write beyond limit"));
3918 + //exception(int_number(seg), 0, 0);
3921 + if (seg->cache.u.segment.limit_scaled >= 7) {
3922 + // Mark cache as being OK type for succeeding writes. See notes for
3923 + // write checks; similar code.
3924 + seg->cache.valid |= SegAccessROK;
3928 + case 2: case 3: /* read/write */
3929 + if (offset > (seg->cache.u.segment.limit_scaled - length + 1)
3930 + || (length-1 > seg->cache.u.segment.limit_scaled)) {
3931 + BX_INFO(("read_virtual_checks(): write beyond limit"));
3932 + //exception(int_number(seg), 0, 0);
3935 + if (seg->cache.u.segment.limit_scaled >= 7) {
3936 + // Mark cache as being OK type for succeeding writes. See notes for
3937 + // write checks; similar code.
3938 + seg->cache.valid |= SegAccessROK;
3942 + case 4: case 5: /* read only, expand down */
3943 + if (seg->cache.u.segment.d_b)
3944 + upper_limit = 0xffffffff;
3946 + upper_limit = 0x0000ffff;
3947 + if ((offset <= seg->cache.u.segment.limit_scaled) ||
3948 + (offset > upper_limit) ||
3949 + ((upper_limit - offset) < (length - 1))) {
3950 + BX_INFO(("read_virtual_checks(): write beyond limit"));
3951 + //exception(int_number(seg), 0, 0);
3956 + case 6: case 7: /* read write, expand down */
3957 + if (seg->cache.u.segment.d_b)
3958 + upper_limit = 0xffffffff;
3960 + upper_limit = 0x0000ffff;
3961 + if ((offset <= seg->cache.u.segment.limit_scaled) ||
3962 + (offset > upper_limit) ||
3963 + ((upper_limit - offset) < (length - 1))) {
3964 + BX_INFO(("read_virtual_checks(): write beyond limit"));
3965 + //exception(int_number(seg), 0, 0);
3970 + case 8: case 9: /* execute only */
3971 + case 12: case 13: /* execute only, conforming */
3972 + /* can't read or write an execute-only segment */
3973 + BX_INFO(("read_virtual_checks(): execute only"));
3974 + //exception(int_number(seg), 0, 0);
3981 + else { /* real mode */
3982 + if (offset > (seg->cache.u.segment.limit_scaled - length + 1)
3983 + || (length-1 > seg->cache.u.segment.limit_scaled)) {
3984 + //BX_ERROR(("read_virtual_checks() SEG EXCEPTION: %x:%x + %x",
3985 + // (unsigned) seg->selector.value, (unsigned) offset, (unsigned) length));
3986 + if (seg == & BX_CPU_THIS_PTR sregs[2]) {
3987 + //exception(BX_SS_EXCEPTION, 0, 0);
3990 + //exception(BX_GP_EXCEPTION, 0, 0);
3994 + if (seg->cache.u.segment.limit_scaled >= 7) {
3995 + // Mark cache as being OK type for succeeding writes. See notes for
3996 + // write checks; similar code.
3997 + seg->cache.valid |= SegAccessROK;
4003 + int BX_CPP_AttrRegparmN(3)
4004 +BX_CPU_C::read_virtual_byte_silent(unsigned s, bx_address offset, Bit8u *data)
4007 + bx_segment_reg_t *seg;
4010 + seg = &BX_CPU_THIS_PTR sregs[s];
4011 + if (seg->cache.valid & SegAccessROK) {
4012 + if ((IsLongMode() && IsCanonical(offset))
4013 + || (offset <= seg->cache.u.segment.limit_scaled)) {
4016 + laddr = seg->cache.u.segment.base + offset;
4017 + BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 1, BX_READ);
4020 + ret = access_linear_silent(laddr, 1, pl, BX_READ, (void *) data);
4024 + ret = read_virtual_checks_silent(seg, offset, 1); //if exception would be raised return 0
4025 + if (!ret) return 0;
4029 + int BX_CPP_AttrRegparmN(3)
4030 +BX_CPU_C::read_virtual_word_silent(unsigned s, bx_address offset, Bit16u *data)
4033 + bx_segment_reg_t *seg;
4036 + seg = &BX_CPU_THIS_PTR sregs[s];
4037 + if (seg->cache.valid & SegAccessROK) {
4038 + if ((IsLongMode() && IsCanonical(offset))
4039 + || (offset < seg->cache.u.segment.limit_scaled)) {
4042 + laddr = seg->cache.u.segment.base + offset;
4043 + BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 2, BX_READ);
4046 + ret = access_linear_silent(laddr, 2, pl, BX_READ, (void *) data);
4050 + ret = read_virtual_checks_silent(seg, offset, 2);
4051 + if (!ret) return 0;
4055 + int BX_CPP_AttrRegparmN(3)
4056 +BX_CPU_C::read_virtual_dword_silent(unsigned s, bx_address offset, Bit32u *data)
4059 + bx_segment_reg_t *seg;
4062 + seg = &BX_CPU_THIS_PTR sregs[s];
4063 + if (seg->cache.valid & SegAccessROK) {
4064 + if ((IsLongMode() && IsCanonical(offset))
4065 + || (offset < (seg->cache.u.segment.limit_scaled-2))) {
4068 + laddr = seg->cache.u.segment.base + offset;
4069 + BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 4, BX_READ);
4072 + ret = access_linear_silent(laddr, 4, pl, BX_READ, (void *) data);
4076 + ret = read_virtual_checks_silent(seg, offset, 4);
4077 + if (!ret) return 0;
4080 diff -X ignore -urpNb bochs-2.1.1/taint/silent_paging.cc checkbochs-2.1.1/taint/silent_paging.cc
4081 --- bochs-2.1.1/taint/silent_paging.cc 1969-12-31 16:00:00.000000000 -0800
4082 +++ checkbochs-2.1.1/taint/silent_paging.cc 2005-07-02 17:25:48.000000000 -0700
4084 +#define NEED_CPU_REG_SHORTCUTS 1
4086 +#define LOG_THIS BX_CPU_THIS_PTR
4089 +#define this (BX_CPU(0))
4093 +#if BX_SUPPORT_PAGING
4095 +#define InstrTLB_Stats()
4096 +#define InstrTLB_Increment(v)
4098 +// ==============================================================
4101 + int BX_CPP_AttrRegparmN(3)
4102 +BX_CPU_C::access_linear_silent(bx_address laddr, unsigned length, unsigned pl,
4103 + unsigned rw, void *data)
4105 + Bit32u pageOffset;
4106 + unsigned xlate_rw;
4108 + assert(rw==BX_READ);
4117 + pageOffset = laddr & 0x00000fff;
4119 + if (BX_CPU_THIS_PTR cr0.pg) {
4120 + /* check for reference across multiple pages */
4121 + if ( (pageOffset + length) <= 4096 ) {
4122 + // Access within single page.
4123 + BX_CPU_THIS_PTR address_xlation.taint_paddress1 =
4124 + taint_dtranslate_linear(laddr, pl, xlate_rw);
4125 + BX_CPU_THIS_PTR address_xlation.taint_pages = 1;
4127 + if (rw == BX_READ) {
4128 + BX_INSTR_LIN_READ(BX_CPU_ID, laddr, BX_CPU_THIS_PTR address_xlation.taint_paddress1, length);
4129 + BX_CPU_THIS_PTR mem->readPhysicalPage(this,
4130 + BX_CPU_THIS_PTR address_xlation.taint_paddress1, length, data );
4133 + BX_INSTR_LIN_WRITE(BX_CPU_ID, laddr, BX_CPU_THIS_PTR address_xlation.taint_paddress1, length);
4134 + BX_CPU_THIS_PTR mem->writePhysicalPage(this,
4135 + BX_CPU_THIS_PTR address_xlation.taint_paddress1, length, data);
4137 + //if (rw==BX_WRITE) BX_CPU_THIS_PTR address_xlation.taint_write_paddress1 = BX_CPU_THIS_PTR address_xlation.taint_paddress1;
4140 + // access across 2 pages
4141 + BX_CPU_THIS_PTR address_xlation.taint_paddress1 =
4142 + taint_dtranslate_linear(laddr, pl, xlate_rw);
4143 + BX_CPU_THIS_PTR address_xlation.taint_len1 = 4096 - pageOffset;
4144 + BX_CPU_THIS_PTR address_xlation.taint_len2 = length -
4145 + BX_CPU_THIS_PTR address_xlation.taint_len1;
4146 + BX_CPU_THIS_PTR address_xlation.taint_pages = 2;
4147 + BX_CPU_THIS_PTR address_xlation.taint_paddress2 =
4148 + taint_dtranslate_linear(laddr + BX_CPU_THIS_PTR address_xlation.taint_len1,
4151 +#ifdef BX_LITTLE_ENDIAN
4152 + if (rw == BX_READ) {
4153 + BX_CPU_THIS_PTR mem->readPhysicalPage(this, BX_CPU_THIS_PTR address_xlation.taint_paddress1,
4154 + BX_CPU_THIS_PTR address_xlation.taint_len1, data);
4155 + BX_CPU_THIS_PTR mem->readPhysicalPage(this, BX_CPU_THIS_PTR address_xlation.taint_paddress2,
4156 + BX_CPU_THIS_PTR address_xlation.taint_len2,
4157 + ((Bit32u*)data) + BX_CPU_THIS_PTR address_xlation.taint_len1);
4160 + BX_CPU_THIS_PTR mem->writePhysicalPage(this, BX_CPU_THIS_PTR address_xlation.taint_paddress1,
4161 + BX_CPU_THIS_PTR address_xlation.taint_len1, data);
4162 + BX_CPU_THIS_PTR mem->writePhysicalPage(this, BX_CPU_THIS_PTR address_xlation.taint_paddress2,
4163 + BX_CPU_THIS_PTR address_xlation.taint_len2,
4164 + ((Bit32u*)data) + BX_CPU_THIS_PTR address_xlation.taint_len1);
4167 +#else // BX_BIG_ENDIAN
4168 + if (rw == BX_READ) {
4169 + BX_CPU_THIS_PTR mem->readPhysicalPage(this, BX_CPU_THIS_PTR address_xlation.taint_paddress1,
4170 + BX_CPU_THIS_PTR address_xlation.taint_len1,
4171 + ((Bit32u*)data) + (length - BX_CPU_THIS_PTR address_xlation.taint_len1));
4172 + BX_CPU_THIS_PTR mem->readPhysicalPage(this, BX_CPU_THIS_PTR address_xlation.taint_paddress2,
4173 + BX_CPU_THIS_PTR address_xlation.taint_len2, data);
4176 + BX_CPU_THIS_PTR mem->writePhysicalPage(this, BX_CPU_THIS_PTR address_xlation.taint_paddress1,
4177 + BX_CPU_THIS_PTR address_xlation.taint_len1,
4178 + ((Bit32u*)data) + (length - BX_CPU_THIS_PTR address_xlation.taint_len1));
4179 + BX_CPU_THIS_PTR mem->writePhysicalPage(this, BX_CPU_THIS_PTR address_xlation.taint_paddress2,
4180 + BX_CPU_THIS_PTR address_xlation.taint_len2, data);
4184 + //if (rw==BX_WRITE) BX_CPU_THIS_PTR address_xlation.taint_write_paddress1 = BX_CPU_THIS_PTR address_xlation.taint_paddress1;
4190 + if ( (pageOffset + length) <= 4096 ) {
4191 + // Access within single page.
4192 + BX_CPU_THIS_PTR address_xlation.taint_paddress1 = laddr;
4193 + BX_CPU_THIS_PTR address_xlation.taint_pages = 1;
4194 + if (rw == BX_READ) {
4196 + // Let access fall through to the following for this iteration.
4197 + BX_CPU_THIS_PTR mem->readPhysicalPage(this, laddr, length, data);
4200 + BX_CPU_THIS_PTR mem->writePhysicalPage(this, laddr, length, data);
4204 + // Access spans two pages.
4205 + BX_CPU_THIS_PTR address_xlation.taint_paddress1 = laddr;
4206 + BX_CPU_THIS_PTR address_xlation.taint_len1 = 4096 - pageOffset;
4207 + BX_CPU_THIS_PTR address_xlation.taint_len2 = length -
4208 + BX_CPU_THIS_PTR address_xlation.taint_len1;
4209 + BX_CPU_THIS_PTR address_xlation.taint_pages = 2;
4210 + BX_CPU_THIS_PTR address_xlation.taint_paddress2 = laddr +
4211 + BX_CPU_THIS_PTR address_xlation.taint_len1;
4213 +#ifdef BX_LITTLE_ENDIAN
4214 + if (rw == BX_READ) {
4215 + BX_CPU_THIS_PTR mem->readPhysicalPage(this,
4216 + BX_CPU_THIS_PTR address_xlation.taint_paddress1,
4217 + BX_CPU_THIS_PTR address_xlation.taint_len1, data);
4218 + BX_CPU_THIS_PTR mem->readPhysicalPage(this,
4219 + BX_CPU_THIS_PTR address_xlation.taint_paddress2,
4220 + BX_CPU_THIS_PTR address_xlation.taint_len2,
4221 + ((Bit32u*)data) + BX_CPU_THIS_PTR address_xlation.taint_len1);
4224 + BX_CPU_THIS_PTR mem->writePhysicalPage(this,
4225 + BX_CPU_THIS_PTR address_xlation.taint_paddress1,
4226 + BX_CPU_THIS_PTR address_xlation.taint_len1, data);
4227 + BX_CPU_THIS_PTR mem->writePhysicalPage(this,
4228 + BX_CPU_THIS_PTR address_xlation.taint_paddress2,
4229 + BX_CPU_THIS_PTR address_xlation.taint_len2,
4230 + ((Bit32u*)data) + BX_CPU_THIS_PTR address_xlation.taint_len1);
4233 +#else // BX_BIG_ENDIAN
4234 + if (rw == BX_READ) {
4235 + BX_CPU_THIS_PTR mem->readPhysicalPage(this,
4236 + BX_CPU_THIS_PTR address_xlation.taint_paddress1,
4237 + BX_CPU_THIS_PTR address_xlation.taint_len1,
4238 + ((Bit32u*)data) + (length - BX_CPU_THIS_PTR address_xlation.taint_len1));
4239 + BX_CPU_THIS_PTR mem->readPhysicalPage(this,
4240 + BX_CPU_THIS_PTR address_xlation.taint_paddress2,
4241 + BX_CPU_THIS_PTR address_xlation.taint_len2, data);
4244 + BX_CPU_THIS_PTR mem->writePhysicalPage(this,
4245 + BX_CPU_THIS_PTR address_xlation.taint_paddress1,
4246 + BX_CPU_THIS_PTR address_xlation.taint_len1,
4247 + ((Bit32u*)data) + (length - BX_CPU_THIS_PTR address_xlation.taint_len1));
4248 + BX_CPU_THIS_PTR mem->writePhysicalPage(this,
4249 + BX_CPU_THIS_PTR address_xlation.taint_paddress2,
4250 + BX_CPU_THIS_PTR address_xlation.taint_len2, data);
4254 + //if (rw==BX_WRITE) BX_CPU_THIS_PTR address_xlation.taint_write_paddress1 = BX_CPU_THIS_PTR address_xlation.taint_paddress1;
4259 diff -X ignore -urpNb bochs-2.1.1/taint/taint_type.cc checkbochs-2.1.1/taint/taint_type.cc
4260 --- bochs-2.1.1/taint/taint_type.cc 1969-12-31 16:00:00.000000000 -0800
4261 +++ checkbochs-2.1.1/taint/taint_type.cc 2005-07-02 17:25:48.000000000 -0700
4263 +#define NEED_CPU_REG_SHORTCUTS 1
4265 +#define LOG_THIS BX_CPU_THIS_PTR
4267 +#include "taint/taint_type.h"
4268 +#include "taint/globals.h"
4270 +void assign_taint_functions(char *type) {
4271 + if (!strcmp(type,"none")) return;
4272 + if (!strcmp(type,"eraser")) {
4273 + //g_access_linear_fptr = &(BX_CPU_C::eraser_access_linear);
4276 diff -X ignore -urpNb bochs-2.1.1/taint/taint_type.h checkbochs-2.1.1/taint/taint_type.h
4277 --- bochs-2.1.1/taint/taint_type.h 1969-12-31 16:00:00.000000000 -0800
4278 +++ checkbochs-2.1.1/taint/taint_type.h 2005-07-02 17:25:48.000000000 -0700
4280 +#ifndef __TAINT_TYPE_H
4281 +#define __TAINT_TYPE_H
4283 +#define ERASER_ID 0
4285 +void assign_taint_functions(char *taint_type);