5 #include "intr-stubs.h"
13 #define THREAD_MAGIC 0x1234abcdu
15 /* List of processes in THREAD_READY state, that is, processes
16 that are ready to run but not actually running. */
17 static struct list run_queue;
20 static struct thread *idle_thread; /* Thread. */
21 static void idle (void *aux UNUSED); /* Thread function. */
23 /* Stack frame for kernel_thread(). */
24 struct kernel_thread_frame
26 void *eip; /* Return address. */
27 void (*function) (void *); /* Function to call. */
28 void *aux; /* Auxiliary data for function. */
31 static void kernel_thread (void (*function) (void *aux), void *aux);
33 static struct thread *next_thread_to_run (void);
34 static struct thread *new_thread (const char *name);
35 static bool is_thread (struct thread *t);
36 static void *alloc_frame (struct thread *t, size_t size);
37 static void destroy_thread (struct thread *t);
38 static void schedule (void);
39 void schedule_tail (struct thread *prev);
41 /* Initializes the threading system. After calling, create some
42 threads with thread_create() or thread_execute(), then start
43 the scheduler with thread_start(). */
47 ASSERT (intr_get_level () == INTR_OFF);
49 /* Initialize run queue. */
50 list_init (&run_queue);
52 /* Create idle thread. */
53 idle_thread = thread_create ("idle", idle, NULL);
54 idle_thread->status = THREAD_BLOCKED;
57 /* Starts the thread scheduler. The caller should have created
58 some threads with thread_create() or thread_execute(). Never
59 returns to the caller. */
63 struct thread *t = next_thread_to_run ();
64 if (t->status == THREAD_READY)
65 list_remove (&t->rq_elem);
66 t->status = THREAD_RUNNING;
67 switch_threads (NULL, t);
72 /* Creates a new kernel thread named NAME, which executes
73 FUNCTION passing AUX as the argument, and adds it to the ready
74 queue. If thread_start() has been called, then the new thread
75 may be scheduled before thread_create() returns. Use a
76 semaphore or some other form of synchronization if you need to
79 thread_create (const char *name, void (*function) (void *aux), void *aux)
82 struct kernel_thread_frame *kf;
83 struct switch_entry_frame *ef;
84 struct switch_threads_frame *sf;
86 ASSERT (function != NULL);
88 t = new_thread (name);
90 /* Stack frame for kernel_thread(). */
91 kf = alloc_frame (t, sizeof *kf);
93 kf->function = function;
96 /* Stack frame for switch_entry(). */
97 ef = alloc_frame (t, sizeof *ef);
98 ef->eip = (void (*) (void)) kernel_thread;
100 /* Stack frame for switch_threads(). */
101 sf = alloc_frame (t, sizeof *sf);
102 sf->eip = switch_entry;
104 /* Add to run queue. */
111 /* Starts a new thread running a user program loaded from
112 FILENAME, and adds it to the ready queue. If thread_start()
113 has been called, then new thread may be scheduled before
114 thread_execute() returns.*/
116 thread_execute (const char *filename)
119 struct intr_frame *if_;
120 struct switch_entry_frame *ef;
121 struct switch_threads_frame *sf;
122 void (*start) (void);
124 ASSERT (filename != NULL);
126 t = new_thread (filename);
130 if (!addrspace_load (t, filename, &start))
131 PANIC ("%s: program load failed", filename);
133 /* Interrupt frame. */
134 if_ = alloc_frame (t, sizeof *if_);
139 if_->eflags = FLAG_IF | FLAG_MBS;
140 if_->esp = PHYS_BASE;
143 /* Stack frame for switch_entry(). */
144 ef = alloc_frame (t, sizeof *ef);
147 /* Stack frame for switch_threads(). */
148 sf = alloc_frame (t, sizeof *sf);
149 sf->eip = switch_entry;
151 /* Add to run queue. */
158 /* Transitions T from its current state to THREAD_READY, the
159 ready-to-run state. On entry, T must be ready or blocked.
160 (Use thread_yield() to make the running thread ready.) */
162 thread_wake (struct thread *t)
164 ASSERT (is_thread (t));
165 ASSERT (t->status == THREAD_READY || t->status == THREAD_BLOCKED);
166 if (t->status != THREAD_READY)
168 list_push_back (&run_queue, &t->rq_elem);
169 t->status = THREAD_READY;
173 /* Returns the name of thread T. */
175 thread_name (struct thread *t)
177 ASSERT (is_thread (t));
181 /* Returns the running thread. */
183 thread_current (void)
188 /* Copy the CPU's stack pointer into `esp', and then round that
189 down to the start of a page. Because `struct thread' is
190 always at the beginning of a page and the stack pointer is
191 somewhere in the middle, this locates the curent thread. */
192 asm ("movl %%esp, %0\n" : "=g" (esp));
193 t = pg_round_down (esp);
195 /* Make sure T is really a thread.
196 If this assertion fires, then your thread may have
197 overflowed its stack. Each thread has less than 4 kB of
198 stack, so a few big automatic arrays or moderate recursion
199 can cause stack overflow. */
200 ASSERT (is_thread (t));
205 /* Deschedules the current thread and destroys it. Never
206 returns to the caller. */
210 ASSERT (!intr_context ());
213 thread_current ()->status = THREAD_DYING;
218 /* Yields the CPU. The current thread is not put to sleep and
219 may be scheduled again immediately at the scheduler's whim. */
223 struct thread *cur = thread_current ();
224 enum intr_level old_level;
226 ASSERT (!intr_context ());
228 old_level = intr_disable ();
229 list_push_back (&run_queue, &cur->rq_elem);
230 cur->status = THREAD_READY;
232 intr_set_level (old_level);
235 /* Puts the current thread to sleep. It will not be scheduled
236 again until awoken by thread_wake(). */
240 ASSERT (!intr_context ());
241 ASSERT (intr_get_level () == INTR_OFF);
243 thread_current ()->status = THREAD_BLOCKED;
247 /* Idle thread. Executes when no other thread is ready to run. */
249 idle (void *aux UNUSED)
253 /* Wait for an interrupt. */
254 DEBUG (idle, "idle");
257 /* Let someone else run. */
264 /* Function used as the basis for a kernel thread. */
266 kernel_thread (void (*function) (void *aux), void *aux)
268 ASSERT (function != NULL);
270 intr_enable (); /* The scheduler runs with interrupts off. */
271 function (aux); /* Execute the thread function. */
272 thread_exit (); /* If function() returns, kill the thread. */
275 /* Returns true if T appears to point to a valid thread. */
277 is_thread (struct thread *t)
279 return t != NULL && t->magic == THREAD_MAGIC;
282 /* Creates a new thread named NAME and initializes its fields.
283 Returns the new thread if successful or a null pointer on
285 static struct thread *
286 new_thread (const char *name)
290 ASSERT (name != NULL);
292 t = palloc_get (PAL_ZERO);
295 strlcpy (t->name, name, sizeof t->name);
296 t->stack = (uint8_t *) t + PGSIZE;
297 t->status = THREAD_BLOCKED;
298 t->magic = THREAD_MAGIC;
304 /* Allocates a SIZE-byte frame at the top of thread T's stack and
305 returns a pointer to the frame's base. */
307 alloc_frame (struct thread *t, size_t size)
309 /* Stack data is always allocated in word-size units. */
310 ASSERT (is_thread (t));
311 ASSERT (size % sizeof (uint32_t) == 0);
317 /* Chooses and returns the next thread to be scheduled. Should
318 return a thread from the run queue, unless the run queue is
319 empty. (If the running thread can continue running, then it
320 will be in the run queue.) If the run queue is empty, return
322 static struct thread *
323 next_thread_to_run (void)
325 if (list_empty (&run_queue))
328 return list_entry (list_pop_front (&run_queue), struct thread, rq_elem);
331 /* Destroys T, which must be in the dying state and must not be
332 the running thread. */
334 destroy_thread (struct thread *t)
336 ASSERT (is_thread (t));
337 ASSERT (t->status == THREAD_DYING);
338 ASSERT (t != thread_current ());
340 addrspace_destroy (t);
344 /* Completes a thread switch by activating the new thread's page
345 tables, and, if the previous thread is dying, destroying it.
347 At this function's invocation, we just switched from thread
348 PREV, the new thread is already running, and interrupts are
349 still disabled. This function is normally invoked by
350 thread_schedule() as its final action before returning, but
351 the first time a thread is scheduled it is called by
352 switch_entry() (see switch.S).
354 After this function and its caller returns, the thread switch
357 schedule_tail (struct thread *prev)
359 struct thread *cur = thread_current ();
361 ASSERT (intr_get_level () == INTR_OFF);
363 cur->status = THREAD_RUNNING;
364 if (prev != NULL && prev->status == THREAD_DYING)
365 destroy_thread (prev);
368 addrspace_activate (cur);
372 /* Schedules a new process. At entry, interrupts must be off and
373 the running process's state must have been changed from
374 running to some other state. This function finds another
375 thread to run and switches to it. */
379 struct thread *cur = thread_current ();
380 struct thread *next = next_thread_to_run ();
382 ASSERT (intr_get_level () == INTR_OFF);
383 ASSERT (cur->status != THREAD_RUNNING);
384 ASSERT (is_thread (next));
388 struct thread *prev = switch_threads (cur, next);
389 schedule_tail (prev);
393 /* Offset of `stack' member within `struct thread'.
394 Used by switch.S, which can't figure it out on its own. */
395 uint32_t thread_stack_ofs = offsetof (struct thread, stack);