5 #include "intr-stubs.h"
15 #define THREAD_MAGIC 0x1234abcdu
17 /* List of processes in THREAD_READY state, that is, processes
18 that are ready to run but not actually running. */
19 static struct list run_queue;
22 static struct thread *idle_thread; /* Thread. */
23 static void idle (void *aux UNUSED); /* Thread function. */
25 /* Stack frame for kernel_thread(). */
26 struct kernel_thread_frame
28 void *eip; /* Return address. */
29 thread_func *function; /* Function to call. */
30 void *aux; /* Auxiliary data for function. */
33 static void kernel_thread (thread_func *, void *aux);
35 static struct thread *running_thread (void);
36 static struct thread *next_thread_to_run (void);
37 static struct thread *new_thread (const char *name);
38 static void init_thread (struct thread *, const char *name);
39 static bool is_thread (struct thread *);
40 static void *alloc_frame (struct thread *, size_t size);
41 static void destroy_thread (struct thread *);
42 static void schedule (void);
43 void schedule_tail (struct thread *prev);
45 /* Initializes the threading system by transforming the code
46 that's currently running into a thread. Note that this is
47 possible only because the loader was careful to put the bottom
48 of the stack at a page boundary; it won't work in general.
49 Also initializes the run queue.
51 After calling this function, be sure to initialize the page
52 allocator before trying to create any threads with
59 ASSERT (intr_get_level () == INTR_OFF);
61 /* Set up a thread structure for the running thread. */
62 t = running_thread ();
63 init_thread (t, "main");
64 t->status = THREAD_RUNNING;
66 /* Initialize run queue. */
67 list_init (&run_queue);
70 /* Starts preemptive thread scheduling by enabling interrupts.
71 Also creates the idle thread. */
75 /* Create idle thread. */
76 idle_thread = thread_create ("idle", idle, NULL);
77 idle_thread->status = THREAD_BLOCKED;
79 /* Enable interrupts. */
83 /* Creates a new kernel thread named NAME, which executes
84 FUNCTION passing AUX as the argument, and adds it to the ready
85 queue. If thread_start() has been called, then the new thread
86 may be scheduled before thread_create() returns. Use a
87 semaphore or some other form of synchronization if you need to
90 thread_create (const char *name, thread_func *function, void *aux)
93 struct kernel_thread_frame *kf;
94 struct switch_entry_frame *ef;
95 struct switch_threads_frame *sf;
97 ASSERT (function != NULL);
99 t = new_thread (name);
101 /* Stack frame for kernel_thread(). */
102 kf = alloc_frame (t, sizeof *kf);
104 kf->function = function;
107 /* Stack frame for switch_entry(). */
108 ef = alloc_frame (t, sizeof *ef);
109 ef->eip = (void (*) (void)) kernel_thread;
111 /* Stack frame for switch_threads(). */
112 sf = alloc_frame (t, sizeof *sf);
113 sf->eip = switch_entry;
115 /* Add to run queue. */
122 /* Starts a new thread running a user program loaded from
123 FILENAME, and adds it to the ready queue. If thread_start()
124 has been called, then new thread may be scheduled before
125 thread_execute() returns.*/
127 thread_execute (const char *filename)
130 struct intr_frame *if_;
131 struct switch_entry_frame *ef;
132 struct switch_threads_frame *sf;
133 void (*start) (void);
135 ASSERT (filename != NULL);
137 t = new_thread (filename);
141 if (!addrspace_load (t, filename, &start))
142 PANIC ("%s: program load failed", filename);
144 /* Interrupt frame. */
145 if_ = alloc_frame (t, sizeof *if_);
150 if_->eflags = FLAG_IF | FLAG_MBS;
151 if_->esp = PHYS_BASE;
154 /* Stack frame for switch_entry(). */
155 ef = alloc_frame (t, sizeof *ef);
158 /* Stack frame for switch_threads(). */
159 sf = alloc_frame (t, sizeof *sf);
160 sf->eip = switch_entry;
162 /* Add to run queue. */
169 /* Transitions a blocked thread T from its current state to the
170 ready-to-run state. If T is not blocked, there is no effect.
171 (Use thread_yield() to make the running thread ready.) */
173 thread_unblock (struct thread *t)
175 enum intr_level old_level;
177 ASSERT (is_thread (t));
179 old_level = intr_disable ();
180 if (t->status == THREAD_BLOCKED)
182 list_push_back (&run_queue, &t->rq_elem);
183 t->status = THREAD_READY;
185 intr_set_level (old_level);
188 /* Returns the name of thread T. */
190 thread_name (struct thread *t)
192 ASSERT (is_thread (t));
196 /* Returns the running thread.
197 This is running_thread() plus a couple of sanity checks. */
199 thread_current (void)
201 struct thread *t = running_thread ();
203 /* Make sure T is really a thread.
204 If either of these assertions fire, then your thread may
205 have overflowed its stack. Each thread has less than 4 kB
206 of stack, so a few big automatic arrays or moderate
207 recursion can cause stack overflow. */
208 ASSERT (is_thread (t));
209 ASSERT (t->status == THREAD_RUNNING);
214 /* Deschedules the current thread and destroys it. Never
215 returns to the caller. */
219 ASSERT (!intr_context ());
222 thread_current ()->status = THREAD_DYING;
227 /* Yields the CPU. The current thread is not put to sleep and
228 may be scheduled again immediately at the scheduler's whim. */
232 struct thread *cur = thread_current ();
233 enum intr_level old_level;
235 ASSERT (!intr_context ());
237 old_level = intr_disable ();
238 list_push_back (&run_queue, &cur->rq_elem);
239 cur->status = THREAD_READY;
241 intr_set_level (old_level);
244 /* Puts the current thread to sleep. It will not be scheduled
245 again until awoken by thread_unblock(). */
249 ASSERT (!intr_context ());
250 ASSERT (intr_get_level () == INTR_OFF);
252 thread_current ()->status = THREAD_BLOCKED;
256 /* Idle thread. Executes when no other thread is ready to run. */
258 idle (void *aux UNUSED)
262 /* Wait for an interrupt. */
263 DEBUG (idle, "idle");
266 /* Let someone else run. */
273 /* Function used as the basis for a kernel thread. */
275 kernel_thread (thread_func *function, void *aux)
277 ASSERT (function != NULL);
279 intr_enable (); /* The scheduler runs with interrupts off. */
280 function (aux); /* Execute the thread function. */
281 thread_exit (); /* If function() returns, kill the thread. */
284 /* Returns the running thread. */
286 running_thread (void)
290 /* Copy the CPU's stack pointer into `esp', and then round that
291 down to the start of a page. Because `struct thread' is
292 always at the beginning of a page and the stack pointer is
293 somewhere in the middle, this locates the curent thread. */
294 asm ("movl %%esp, %0\n" : "=g" (esp));
295 return pg_round_down (esp);
298 /* Returns true if T appears to point to a valid thread. */
300 is_thread (struct thread *t)
302 return t != NULL && t->magic == THREAD_MAGIC;
305 /* Creates a new thread named NAME and initializes its fields.
306 Returns the new thread if successful or a null pointer on
308 static struct thread *
309 new_thread (const char *name)
313 ASSERT (name != NULL);
315 t = palloc_get (PAL_ZERO);
317 init_thread (t, name);
322 /* Initializes T as a new thread named NAME. */
324 init_thread (struct thread *t, const char *name)
326 memset (t, 0, sizeof *t);
327 strlcpy (t->name, name, sizeof t->name);
328 t->stack = (uint8_t *) t + PGSIZE;
329 t->status = THREAD_BLOCKED;
330 t->magic = THREAD_MAGIC;
333 /* Allocates a SIZE-byte frame at the top of thread T's stack and
334 returns a pointer to the frame's base. */
336 alloc_frame (struct thread *t, size_t size)
338 /* Stack data is always allocated in word-size units. */
339 ASSERT (is_thread (t));
340 ASSERT (size % sizeof (uint32_t) == 0);
346 /* Chooses and returns the next thread to be scheduled. Should
347 return a thread from the run queue, unless the run queue is
348 empty. (If the running thread can continue running, then it
349 will be in the run queue.) If the run queue is empty, return
351 static struct thread *
352 next_thread_to_run (void)
354 if (list_empty (&run_queue))
357 return list_entry (list_pop_front (&run_queue), struct thread, rq_elem);
360 /* Destroys T, which must be in the dying state and must not be
361 the running thread. */
363 destroy_thread (struct thread *t)
365 ASSERT (is_thread (t));
366 ASSERT (t->status == THREAD_DYING);
367 ASSERT (t != thread_current ());
370 addrspace_destroy (t);
375 /* Completes a thread switch by activating the new thread's page
376 tables, and, if the previous thread is dying, destroying it.
378 At this function's invocation, we just switched from thread
379 PREV, the new thread is already running, and interrupts are
380 still disabled. This function is normally invoked by
381 thread_schedule() as its final action before returning, but
382 the first time a thread is scheduled it is called by
383 switch_entry() (see switch.S).
385 After this function and its caller returns, the thread switch
388 schedule_tail (struct thread *prev)
390 struct thread *cur = running_thread ();
392 ASSERT (intr_get_level () == INTR_OFF);
394 cur->status = THREAD_RUNNING;
397 addrspace_activate (cur);
400 if (prev != NULL && prev->status == THREAD_DYING)
401 destroy_thread (prev);
404 /* Schedules a new process. At entry, interrupts must be off and
405 the running process's state must have been changed from
406 running to some other state. This function finds another
407 thread to run and switches to it. */
411 struct thread *cur = running_thread ();
412 struct thread *next = next_thread_to_run ();
413 struct thread *prev = NULL;
415 ASSERT (intr_get_level () == INTR_OFF);
416 ASSERT (cur->status != THREAD_RUNNING);
417 ASSERT (is_thread (next));
420 prev = switch_threads (cur, next);
421 schedule_tail (prev);
424 /* Offset of `stack' member within `struct thread'.
425 Used by switch.S, which can't figure it out on its own. */
426 uint32_t thread_stack_ofs = offsetof (struct thread, stack);