Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
script3r
GitHub Repository: script3r/os161
Path: blob/master/kern/thread/thread.c
2093 views
1
/*
2
* Copyright (c) 2000, 2001, 2002, 2003, 2004, 2005, 2008, 2009
3
* The President and Fellows of Harvard College.
4
*
5
* Redistribution and use in source and binary forms, with or without
6
* modification, are permitted provided that the following conditions
7
* are met:
8
* 1. Redistributions of source code must retain the above copyright
9
* notice, this list of conditions and the following disclaimer.
10
* 2. Redistributions in binary form must reproduce the above copyright
11
* notice, this list of conditions and the following disclaimer in the
12
* documentation and/or other materials provided with the distribution.
13
* 3. Neither the name of the University nor the names of its contributors
14
* may be used to endorse or promote products derived from this software
15
* without specific prior written permission.
16
*
17
* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND CONTRIBUTORS ``AS IS'' AND
18
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20
* ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR CONTRIBUTORS BE LIABLE
21
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27
* SUCH DAMAGE.
28
*/
29
30
/*
31
* Core kernel-level thread system.
32
*/
33
34
#include <types.h>
35
#include <kern/errno.h>
36
#include <lib.h>
37
#include <array.h>
38
#include <cpu.h>
39
#include <spl.h>
40
#include <spinlock.h>
41
#include <wchan.h>
42
#include <thread.h>
43
#include <threadlist.h>
44
#include <threadprivate.h>
45
#include <current.h>
46
#include <synch.h>
47
#include <addrspace.h>
48
#include <mainbus.h>
49
#include <vnode.h>
50
51
#include "opt-synchprobs.h"
52
#include "opt-defaultscheduler.h"
53
54
55
/* Magic number used as a guard value on kernel thread stacks. */
56
#define THREAD_STACK_MAGIC 0xbaadf00d
57
58
/* Wait channel. */
59
struct wchan {
60
const char *wc_name; /* name for this channel */
61
struct threadlist wc_threads; /* list of waiting threads */
62
struct spinlock wc_lock; /* lock for mutual exclusion */
63
};
64
65
/* Master array of CPUs. */
66
DECLARRAY(cpu);
67
DEFARRAY(cpu, /*no inline*/ );
68
static struct cpuarray allcpus;
69
70
/* Used to wait for secondary CPUs to come online. */
71
static struct semaphore *cpu_startup_sem;
72
73
////////////////////////////////////////////////////////////
74
75
/*
76
* Stick a magic number on the bottom end of the stack. This will
77
* (sometimes) catch kernel stack overflows. Use thread_checkstack()
78
* to test this.
79
*/
80
static
81
void
82
thread_checkstack_init(struct thread *thread)
83
{
84
((uint32_t *)thread->t_stack)[0] = THREAD_STACK_MAGIC;
85
((uint32_t *)thread->t_stack)[1] = THREAD_STACK_MAGIC;
86
((uint32_t *)thread->t_stack)[2] = THREAD_STACK_MAGIC;
87
((uint32_t *)thread->t_stack)[3] = THREAD_STACK_MAGIC;
88
}
89
90
/*
91
* Check the magic number we put on the bottom end of the stack in
92
* thread_checkstack_init. If these assertions go off, it most likely
93
* means you overflowed your stack at some point, which can cause all
94
* kinds of mysterious other things to happen.
95
*
96
* Note that when ->t_stack is NULL, which is the case if the stack
97
* cannot be freed (which in turn is the case if the stack is the boot
98
* stack, and the thread is the boot thread) this doesn't do anything.
99
*/
100
static
101
void
102
thread_checkstack(struct thread *thread)
103
{
104
if (thread->t_stack != NULL) {
105
KASSERT(((uint32_t*)thread->t_stack)[0] == THREAD_STACK_MAGIC);
106
KASSERT(((uint32_t*)thread->t_stack)[1] == THREAD_STACK_MAGIC);
107
KASSERT(((uint32_t*)thread->t_stack)[2] == THREAD_STACK_MAGIC);
108
KASSERT(((uint32_t*)thread->t_stack)[3] == THREAD_STACK_MAGIC);
109
}
110
}
111
112
/*
113
* Create a thread. This is used both to create a first thread
114
* for each CPU and to create subsequent forked threads.
115
*/
116
static
117
struct thread *
118
thread_create(const char *name)
119
{
120
struct thread *thread;
121
122
DEBUGASSERT(name != NULL);
123
124
thread = kmalloc(sizeof(*thread));
125
if (thread == NULL) {
126
return NULL;
127
}
128
129
thread->t_name = kstrdup(name);
130
if (thread->t_name == NULL) {
131
kfree(thread);
132
return NULL;
133
}
134
thread->t_wchan_name = "NEW";
135
thread->t_state = S_READY;
136
137
/* Thread subsystem fields */
138
thread_machdep_init(&thread->t_machdep);
139
threadlistnode_init(&thread->t_listnode, thread);
140
thread->t_stack = NULL;
141
thread->t_context = NULL;
142
thread->t_cpu = NULL;
143
/* Interrupt state fields */
144
thread->t_in_interrupt = false;
145
thread->t_curspl = IPL_HIGH;
146
thread->t_iplhigh_count = 1; /* corresponding to t_curspl */
147
thread->t_vmp_count = 0;
148
thread->t_clone = 0;
149
150
/* VM fields */
151
thread->t_addrspace = NULL;
152
153
/* VFS fields */
154
thread->t_cwd = NULL;
155
156
/* If you add to struct thread, be sure to initialize here */
157
158
return thread;
159
}
160
161
/*
162
* Create a CPU structure. This is used for the bootup CPU and
163
* also for secondary CPUs.
164
*
165
* The hardware number (the number assigned by firmware or system
166
* board config or whatnot) is tracked separately because it is not
167
* necessarily anything sane or meaningful.
168
*/
169
struct cpu *
170
cpu_create(unsigned hardware_number)
171
{
172
struct cpu *c;
173
int result;
174
char namebuf[16];
175
176
c = kmalloc(sizeof(*c));
177
if (c == NULL) {
178
panic("cpu_create: Out of memory\n");
179
}
180
181
c->c_self = c;
182
c->c_hardware_number = hardware_number;
183
184
c->c_curthread = NULL;
185
threadlist_init(&c->c_zombies);
186
c->c_hardclocks = 0;
187
188
c->c_isidle = false;
189
threadlist_init(&c->c_runqueue);
190
spinlock_init(&c->c_runqueue_lock);
191
192
c->c_ipi_pending = 0;
193
c->c_numshootdown = 0;
194
spinlock_init(&c->c_ipi_lock);
195
196
result = cpuarray_add(&allcpus, c, &c->c_number);
197
if (result != 0) {
198
panic("cpu_create: array_add: %s\n", strerror(result));
199
}
200
201
snprintf(namebuf, sizeof(namebuf), "<boot #%d>", c->c_number);
202
c->c_curthread = thread_create(namebuf);
203
if (c->c_curthread == NULL) {
204
panic("cpu_create: thread_create failed\n");
205
}
206
207
if (c->c_number == 0) {
208
/*
209
* Leave c->c_curthread->t_stack NULL for the boot
210
* cpu. This means we're using the boot stack, which
211
* can't be freed. (Exercise: what would it take to
212
* make it possible to free the boot stack?)
213
*/
214
/*c->c_curthread->t_stack = ... */
215
}
216
else {
217
c->c_curthread->t_stack = kmalloc(STACK_SIZE);
218
if (c->c_curthread->t_stack == NULL) {
219
panic("cpu_create: couldn't allocate stack");
220
}
221
thread_checkstack_init(c->c_curthread);
222
}
223
c->c_curthread->t_cpu = c;
224
225
cpu_machdep_init(c);
226
227
return c;
228
}
229
230
/*
231
* Destroy a thread.
232
*
233
* This function cannot be called in the victim thread's own context.
234
* Nor can it be called on a running thread.
235
*
236
* (Freeing the stack you're actually using to run is ... inadvisable.)
237
*/
238
static
239
void
240
thread_destroy(struct thread *thread)
241
{
242
KASSERT(thread != curthread);
243
KASSERT(thread->t_state != S_RUN);
244
245
/*
246
* If you add things to struct thread, be sure to clean them up
247
* either here or in thread_exit(). (And not both...)
248
*/
249
250
/* VFS fields, cleaned up in thread_exit */
251
KASSERT(thread->t_cwd == NULL);
252
253
/* VM fields, cleaned up in thread_exit */
254
KASSERT(thread->t_addrspace == NULL);
255
256
/* Thread subsystem fields */
257
if (thread->t_stack != NULL) {
258
kfree(thread->t_stack);
259
}
260
threadlistnode_cleanup(&thread->t_listnode);
261
thread_machdep_cleanup(&thread->t_machdep);
262
263
/* sheer paranoia */
264
thread->t_wchan_name = "DESTROYED";
265
266
kfree(thread->t_name);
267
kfree(thread);
268
}
269
270
/*
271
* Clean up zombies. (Zombies are threads that have exited but still
272
* need to have thread_destroy called on them.)
273
*
274
* The list of zombies is per-cpu.
275
*/
276
static
277
void
278
exorcise(void)
279
{
280
struct thread *z;
281
282
while ((z = threadlist_remhead(&curcpu->c_zombies)) != NULL) {
283
KASSERT(z != curthread);
284
KASSERT(z->t_state == S_ZOMBIE);
285
thread_destroy(z);
286
}
287
}
288
289
/*
290
* On panic, stop the thread system (as much as is reasonably
291
* possible) to make sure we don't end up letting any other threads
292
* run.
293
*/
294
void
295
thread_panic(void)
296
{
297
/*
298
* Kill off other CPUs.
299
*
300
* We could wait for them to stop, except that they might not.
301
*/
302
ipi_broadcast(IPI_PANIC);
303
304
/*
305
* Drop runnable threads on the floor.
306
*
307
* Don't try to get the run queue lock; we might not be able
308
* to. Instead, blat the list structure by hand, and take the
309
* risk that it might not be quite atomic.
310
*/
311
curcpu->c_runqueue.tl_count = 0;
312
curcpu->c_runqueue.tl_head.tln_next = NULL;
313
curcpu->c_runqueue.tl_tail.tln_prev = NULL;
314
315
/*
316
* Ideally, we want to make sure sleeping threads don't wake
317
* up and start running. However, there's no good way to track
318
* down all the wchans floating around the system. Another
319
* alternative would be to set a global flag to make the wchan
320
* wakeup operations do nothing; but that would mean we
321
* ourselves couldn't sleep to wait for an I/O completion
322
* interrupt, and we'd like to be able to do that if the
323
* system isn't that badly hosed.
324
*
325
* So, do nothing else here.
326
*
327
* This may prove inadequate in practice and further steps
328
* might be needed. It may also be necessary to go through and
329
* forcibly unlock all locks or the like...
330
*/
331
}
332
333
/*
334
* At system shutdown, ask the other CPUs to switch off.
335
*/
336
void
337
thread_shutdown(void)
338
{
339
/*
340
* Stop the other CPUs.
341
*
342
* We should probably wait for them to stop and shut them off
343
* on the system board.
344
*/
345
ipi_broadcast(IPI_OFFLINE);
346
}
347
348
/*
349
* Thread system initialization.
350
*/
351
void
352
thread_bootstrap(void)
353
{
354
struct cpu *bootcpu;
355
struct thread *bootthread;
356
357
cpuarray_init(&allcpus);
358
359
/*
360
* Create the cpu structure for the bootup CPU, the one we're
361
* currently running on. Assume the hardware number is 0; that
362
* might be updated later by mainbus-type code. This also
363
* creates a thread structure for the first thread, the one
364
* that's already implicitly running when the kernel is
365
* started from the bootloader.
366
*/
367
bootcpu = cpu_create(0);
368
bootthread = bootcpu->c_curthread;
369
370
/*
371
* Initializing curcpu and curthread is machine-dependent
372
* because either of curcpu and curthread might be defined in
373
* terms of the other.
374
*/
375
INIT_CURCPU(bootcpu, bootthread);
376
377
/*
378
* Now make sure both t_cpu and c_curthread are set. This
379
* might be partially redundant with INIT_CURCPU depending on
380
* how things are defined.
381
*/
382
curthread->t_cpu = curcpu;
383
curcpu->c_curthread = curthread;
384
385
/* Done */
386
}
387
388
/*
389
* New CPUs come here once MD initialization is finished. curthread
390
* and curcpu should already be initialized.
391
*
392
* Other than clearing thread_start_cpus() to continue, we don't need
393
* to do anything. The startup thread can just exit; we only need it
394
* to be able to get into thread_switch() properly.
395
*/
396
void
397
cpu_hatch(unsigned software_number)
398
{
399
KASSERT(curcpu != NULL);
400
KASSERT(curthread != NULL);
401
KASSERT(curcpu->c_number == software_number);
402
403
spl0();
404
405
kprintf("cpu%u: %s\n", software_number, cpu_identify());
406
407
V(cpu_startup_sem);
408
thread_exit();
409
}
410
411
/*
412
* Start up secondary cpus. Called from boot().
413
*/
414
void
415
thread_start_cpus(void)
416
{
417
unsigned i;
418
419
kprintf("cpu0: %s\n", cpu_identify());
420
421
cpu_startup_sem = sem_create("cpu_hatch", 0);
422
mainbus_start_cpus();
423
424
for (i=0; i<cpuarray_num(&allcpus) - 1; i++) {
425
P(cpu_startup_sem);
426
}
427
sem_destroy(cpu_startup_sem);
428
cpu_startup_sem = NULL;
429
}
430
431
/*
432
* Make a thread runnable.
433
*
434
* targetcpu might be curcpu; it might not be, too.
435
*/
436
static
437
void
438
thread_make_runnable(struct thread *target, bool already_have_lock)
439
{
440
struct cpu *targetcpu;
441
bool isidle;
442
443
/* Lock the run queue of the target thread's cpu. */
444
targetcpu = target->t_cpu;
445
446
if (already_have_lock) {
447
/* The target thread's cpu should be already locked. */
448
KASSERT(spinlock_do_i_hold(&targetcpu->c_runqueue_lock));
449
}
450
else {
451
spinlock_acquire(&targetcpu->c_runqueue_lock);
452
}
453
454
isidle = targetcpu->c_isidle;
455
threadlist_addtail(&targetcpu->c_runqueue, target);
456
if (isidle) {
457
/*
458
* Other processor is idle; send interrupt to make
459
* sure it unidles.
460
*/
461
ipi_send(targetcpu, IPI_UNIDLE);
462
}
463
464
if (!already_have_lock) {
465
spinlock_release(&targetcpu->c_runqueue_lock);
466
}
467
}
468
469
/*
470
* Create a new thread based on an existing one.
471
*
472
* The new thread has name NAME, and starts executing in function
473
* ENTRYPOINT. DATA1 and DATA2 are passed to ENTRYPOINT.
474
*
475
* The new thread is given no address space (the caller decides that)
476
* but inherits its current working directory from the caller. It will
477
* start on the same CPU as the caller, unless the scheduler
478
* intervenes first.
479
*/
480
int
481
thread_fork(const char *name,
482
void (*entrypoint)(void *data1, unsigned long data2),
483
void *data1, unsigned long data2,
484
struct thread **ret)
485
{
486
struct thread *newthread;
487
488
newthread = thread_create(name);
489
if (newthread == NULL) {
490
return ENOMEM;
491
}
492
493
/* Allocate a stack */
494
newthread->t_stack = kmalloc(STACK_SIZE);
495
if (newthread->t_stack == NULL) {
496
thread_destroy(newthread);
497
return ENOMEM;
498
}
499
thread_checkstack_init(newthread);
500
501
/*
502
* Now we clone various fields from the parent thread.
503
*/
504
505
/* Thread subsystem fields */
506
newthread->t_cpu = curthread->t_cpu;
507
508
/* VM fields */
509
/* do not clone address space -- let caller decide on that */
510
511
/* VFS fields */
512
if (curthread->t_cwd != NULL) {
513
VOP_INCREF(curthread->t_cwd);
514
newthread->t_cwd = curthread->t_cwd;
515
}
516
517
/*
518
* Because new threads come out holding the cpu runqueue lock
519
* (see notes at bottom of thread_switch), we need to account
520
* for the spllower() that will be done releasing it.
521
*/
522
newthread->t_iplhigh_count++;
523
524
/* Set up the switchframe so entrypoint() gets called */
525
switchframe_init(newthread, entrypoint, data1, data2);
526
527
/* Lock the current cpu's run queue and make the new thread runnable */
528
thread_make_runnable(newthread, false);
529
530
/*
531
* Return new thread structure if it's wanted. Note that using
532
* the thread structure from the parent thread should be done
533
* only with caution, because in general the child thread
534
* might exit at any time.
535
*/
536
if (ret != NULL) {
537
*ret = newthread;
538
}
539
540
return 0;
541
}
542
543
/*
544
* High level, machine-independent context switch code.
545
*
546
* The current thread is queued appropriately and its state is changed
547
* to NEWSTATE; another thread to run is selected and switched to.
548
*
549
* If NEWSTATE is S_SLEEP, the thread is queued on the wait channel
550
* WC. Otherwise WC should be NULL.
551
*/
552
static
553
void
554
thread_switch(threadstate_t newstate, struct wchan *wc)
555
{
556
struct thread *cur, *next;
557
int spl;
558
559
DEBUGASSERT(curcpu->c_curthread == curthread);
560
DEBUGASSERT(curthread->t_cpu == curcpu->c_self);
561
562
/* Explicitly disable interrupts on this processor */
563
spl = splhigh();
564
565
cur = curthread;
566
567
/*
568
* If we're idle, return without doing anything. This happens
569
* when the timer interrupt interrupts the idle loop.
570
*/
571
if (curcpu->c_isidle) {
572
splx(spl);
573
return;
574
}
575
576
/* Check the stack guard band. */
577
thread_checkstack(cur);
578
579
/* Lock the run queue. */
580
spinlock_acquire(&curcpu->c_runqueue_lock);
581
582
/* Micro-optimization: if nothing to do, just return */
583
if (newstate == S_READY && threadlist_isempty(&curcpu->c_runqueue)) {
584
spinlock_release(&curcpu->c_runqueue_lock);
585
splx(spl);
586
return;
587
}
588
589
/* Put the thread in the right place. */
590
switch (newstate) {
591
case S_RUN:
592
panic("Illegal S_RUN in thread_switch\n");
593
case S_READY:
594
thread_make_runnable(cur, true /*have lock*/);
595
break;
596
case S_SLEEP:
597
cur->t_wchan_name = wc->wc_name;
598
/*
599
* Add the thread to the list in the wait channel, and
600
* unlock same. To avoid a race with someone else
601
* calling wchan_wake*, we must keep the wchan locked
602
* from the point the caller of wchan_sleep locked it
603
* until the thread is on the list.
604
*
605
* (We could for symmetry relock the channel before
606
* returning from wchan_sleep, but we don't, for two
607
* reasons. One is that the caller is unlikely to need
608
* or want it locked and if it does can lock it itself
609
* without racing. Exercise: what's the other?)
610
*/
611
threadlist_addtail(&wc->wc_threads, cur);
612
wchan_unlock(wc);
613
break;
614
case S_ZOMBIE:
615
cur->t_wchan_name = "ZOMBIE";
616
threadlist_addtail(&curcpu->c_zombies, cur);
617
break;
618
}
619
cur->t_state = newstate;
620
621
/*
622
* Get the next thread. While there isn't one, call md_idle().
623
* curcpu->c_isidle must be true when md_idle is
624
* called. Unlock the runqueue while idling too, to make sure
625
* things can be added to it.
626
*
627
* Note that we don't need to unlock the runqueue atomically
628
* with idling; becoming unidle requires receiving an
629
* interrupt (either a hardware interrupt or an interprocessor
630
* interrupt from another cpu posting a wakeup) and idling
631
* *is* atomic with respect to re-enabling interrupts.
632
*
633
* Note that c_isidle becomes true briefly even if we don't go
634
* idle. However, because one is supposed to hold the runqueue
635
* lock to look at it, this should not be visible or matter.
636
*/
637
638
/* The current cpu is now idle. */
639
curcpu->c_isidle = true;
640
do {
641
next = threadlist_remhead(&curcpu->c_runqueue);
642
if (next == NULL) {
643
spinlock_release(&curcpu->c_runqueue_lock);
644
cpu_idle();
645
spinlock_acquire(&curcpu->c_runqueue_lock);
646
}
647
} while (next == NULL);
648
curcpu->c_isidle = false;
649
650
/*
651
* Note that curcpu->c_curthread may be the same variable as
652
* curthread and it may not be, depending on how curthread and
653
* curcpu are defined by the MD code. We'll assign both and
654
* assume the compiler will optimize one away if they're the
655
* same.
656
*/
657
curcpu->c_curthread = next;
658
curthread = next;
659
660
/* do the switch (in assembler in switch.S) */
661
switchframe_switch(&cur->t_context, &next->t_context);
662
663
/*
664
* When we get to this point we are either running in the next
665
* thread, or have come back to the same thread again,
666
* depending on how you look at it. That is,
667
* switchframe_switch returns immediately in another thread
668
* context, which in general will be executing here with a
669
* different stack and different values in the local
670
* variables. (Although new threads go to thread_startup
671
* instead.) But, later on when the processor, or some
672
* processor, comes back to the previous thread, it's also
673
* executing here with the *same* value in the local
674
* variables.
675
*
676
* The upshot, however, is as follows:
677
*
678
* - The thread now currently running is "cur", not "next",
679
* because when we return from switchrame_switch on the
680
* same stack, we're back to the thread that
681
* switchframe_switch call switched away from, which is
682
* "cur".
683
*
684
* - "cur" is _not_ the thread that just *called*
685
* switchframe_switch.
686
*
687
* - If newstate is S_ZOMB we never get back here in that
688
* context at all.
689
*
690
* - If the thread just chosen to run ("next") was a new
691
* thread, we don't get to this code again until
692
* *another* context switch happens, because when new
693
* threads return from switchframe_switch they teleport
694
* to thread_startup.
695
*
696
* - At this point the thread whose stack we're now on may
697
* have been migrated to another cpu since it last ran.
698
*
699
* The above is inherently confusing and will probably take a
700
* while to get used to.
701
*
702
* However, the important part is that code placed here, after
703
* the call to switchframe_switch, does not necessarily run on
704
* every context switch. Thus any such code must be either
705
* skippable on some switches or also called from
706
* thread_startup.
707
*/
708
709
710
/* Clear the wait channel and set the thread state. */
711
cur->t_wchan_name = NULL;
712
cur->t_state = S_RUN;
713
714
/* Unlock the run queue. */
715
spinlock_release(&curcpu->c_runqueue_lock);
716
717
/* If we have an address space, activate it in the MMU. */
718
if (cur->t_addrspace != NULL) {
719
as_activate(cur->t_addrspace);
720
}
721
722
/* Clean up dead threads. */
723
exorcise();
724
725
/* Turn interrupts back on. */
726
splx(spl);
727
}
728
729
/*
730
* This function is where new threads start running. The arguments
731
* ENTRYPOINT, DATA1, and DATA2 are passed through from thread_fork.
732
*
733
* Because new code comes here from inside the middle of
734
* thread_switch, the beginning part of this function must match the
735
* tail of thread_switch.
736
*/
737
void
738
thread_startup(void (*entrypoint)(void *data1, unsigned long data2),
739
void *data1, unsigned long data2)
740
{
741
struct thread *cur;
742
743
cur = curthread;
744
745
/* Clear the wait channel and set the thread state. */
746
cur->t_wchan_name = NULL;
747
cur->t_state = S_RUN;
748
749
/* Release the runqueue lock acquired in thread_switch. */
750
spinlock_release(&curcpu->c_runqueue_lock);
751
752
/* If we have an address space, activate it in the MMU. */
753
if (cur->t_addrspace != NULL) {
754
as_activate(cur->t_addrspace);
755
}
756
757
/* Clean up dead threads. */
758
exorcise();
759
760
/* Enable interrupts. */
761
spl0();
762
763
#if OPT_SYNCHPROBS
764
/* Yield a random number of times to get a good mix of threads. */
765
{
766
int i, n;
767
n = random()%161 + random()%161;
768
for (i=0; i<n; i++) {
769
thread_yield();
770
}
771
}
772
#endif
773
774
/* Call the function. */
775
entrypoint(data1, data2);
776
777
/* Done. */
778
thread_exit();
779
}
780
781
/*
782
* Cause the current thread to exit.
783
*
784
* The parts of the thread structure we don't actually need to run
785
* should be cleaned up right away. The rest has to wait until
786
* thread_destroy is called from exorcise().
787
*
788
* Does not return.
789
*/
790
void
791
thread_exit(void)
792
{
793
struct thread *cur;
794
795
cur = curthread;
796
797
/* VFS fields */
798
if (cur->t_cwd) {
799
VOP_DECREF(cur->t_cwd);
800
cur->t_cwd = NULL;
801
}
802
803
/* VM fields */
804
if (cur->t_addrspace) {
805
/*
806
* Clear t_addrspace before calling as_destroy. Otherwise
807
* if as_destroy sleeps (which is quite possible) when we
808
* come back we'll call as_activate on a half-destroyed
809
* address space, which is usually messily fatal.
810
*/
811
struct addrspace *as = cur->t_addrspace;
812
cur->t_addrspace = NULL;
813
as_activate(NULL);
814
as_destroy(as);
815
}
816
817
/* Check the stack guard band. */
818
thread_checkstack(cur);
819
820
/* Interrupts off on this processor */
821
splhigh();
822
thread_switch(S_ZOMBIE, NULL);
823
panic("The zombie walks!\n");
824
}
825
826
/*
827
* Yield the cpu to another process, but stay runnable.
828
*/
829
void
830
thread_yield(void)
831
{
832
thread_switch(S_READY, NULL);
833
}
834
835
////////////////////////////////////////////////////////////
836
837
/*
838
* Scheduler.
839
*
840
* This is called periodically from hardclock(). It should reshuffle
841
* the current CPU's run queue by job priority.
842
*/
843
844
#if OPT_DEFAULTSCHEDULER
845
void
846
schedule(void)
847
{
848
// 28 Feb 2012 : GWA : Leave the default scheduler alone!
849
}
850
#else
851
void
852
schedule(void)
853
{
854
855
}
856
857
#endif
858
859
/*
860
* Thread migration.
861
*
862
* This is also called periodically from hardclock(). If the current
863
* CPU is busy and other CPUs are idle, or less busy, it should move
864
* threads across to those other other CPUs.
865
*
866
* Migrating threads isn't free because of cache affinity; a thread's
867
* working cache set will end up having to be moved to the other CPU,
868
* which is fairly slow. The tradeoff between this performance loss
869
* and the performance loss due to underutilization of some CPUs is
870
* something that needs to be tuned and probably is workload-specific.
871
*
872
* For here and now, because we know we're running on System/161 and
873
* System/161 does not (yet) model such cache effects, we'll be very
874
* aggressive.
875
*/
876
void
877
thread_consider_migration(void)
878
{
879
unsigned my_count, total_count, one_share, to_send;
880
unsigned i, numcpus;
881
struct cpu *c;
882
struct threadlist victims;
883
struct thread *t;
884
885
my_count = total_count = 0;
886
numcpus = cpuarray_num(&allcpus);
887
for (i=0; i<numcpus; i++) {
888
c = cpuarray_get(&allcpus, i);
889
spinlock_acquire(&c->c_runqueue_lock);
890
total_count += c->c_runqueue.tl_count;
891
if (c == curcpu->c_self) {
892
my_count = c->c_runqueue.tl_count;
893
}
894
spinlock_release(&c->c_runqueue_lock);
895
}
896
897
one_share = DIVROUNDUP(total_count, numcpus);
898
if (my_count < one_share) {
899
return;
900
}
901
902
to_send = my_count - one_share;
903
threadlist_init(&victims);
904
spinlock_acquire(&curcpu->c_runqueue_lock);
905
for (i=0; i<to_send; i++) {
906
t = threadlist_remtail(&curcpu->c_runqueue);
907
threadlist_addhead(&victims, t);
908
}
909
spinlock_release(&curcpu->c_runqueue_lock);
910
911
for (i=0; i < numcpus && to_send > 0; i++) {
912
c = cpuarray_get(&allcpus, i);
913
if (c == curcpu->c_self) {
914
continue;
915
}
916
spinlock_acquire(&c->c_runqueue_lock);
917
while (c->c_runqueue.tl_count < one_share && to_send > 0) {
918
t = threadlist_remhead(&victims);
919
/*
920
* Ordinarily, curthread will not appear on
921
* the run queue. However, it can under the
922
* following circumstances:
923
* - it went to sleep;
924
* - the processor became idle, so it
925
* remained curthread;
926
* - it was reawakened, so it was put on the
927
* run queue;
928
* - and the processor hasn't fully unidled
929
* yet, so all these things are still true.
930
*
931
* If the timer interrupt happens at (almost)
932
* exactly the proper moment, we can come here
933
* while things are in this state and see
934
* curthread. However, *migrating* curthread
935
* can cause bad things to happen (Exercise:
936
* Why? And what?) so shuffle it to the end of
937
* the list and decrement to_send in order to
938
* skip it. Then it goes back on our own run
939
* queue below.
940
*/
941
if (t == curthread) {
942
threadlist_addtail(&victims, t);
943
to_send--;
944
continue;
945
}
946
947
t->t_cpu = c;
948
threadlist_addtail(&c->c_runqueue, t);
949
DEBUG(DB_THREADS,
950
"Migrated thread %s: cpu %u -> %u",
951
t->t_name, curcpu->c_number, c->c_number);
952
to_send--;
953
if (c->c_isidle) {
954
/*
955
* Other processor is idle; send
956
* interrupt to make sure it unidles.
957
*/
958
ipi_send(c, IPI_UNIDLE);
959
}
960
}
961
spinlock_release(&c->c_runqueue_lock);
962
}
963
964
/*
965
* Because the code above isn't atomic, the thread counts may have
966
* changed while we were working and we may end up with leftovers.
967
* Don't panic; just put them back on our own run queue.
968
*/
969
if (!threadlist_isempty(&victims)) {
970
spinlock_acquire(&curcpu->c_runqueue_lock);
971
while ((t = threadlist_remhead(&victims)) != NULL) {
972
threadlist_addtail(&curcpu->c_runqueue, t);
973
}
974
spinlock_release(&curcpu->c_runqueue_lock);
975
}
976
977
KASSERT(threadlist_isempty(&victims));
978
threadlist_cleanup(&victims);
979
}
980
981
////////////////////////////////////////////////////////////
982
983
/*
984
* Wait channel functions
985
*/
986
987
/*
988
* Create a wait channel. NAME is a symbolic string name for it.
989
* This is what's displayed by ps -alx in Unix.
990
*
991
* NAME should generally be a string constant. If it isn't, alternate
992
* arrangements should be made to free it after the wait channel is
993
* destroyed.
994
*/
995
struct wchan *
996
wchan_create(const char *name)
997
{
998
struct wchan *wc;
999
1000
wc = kmalloc(sizeof(*wc));
1001
if (wc == NULL) {
1002
return NULL;
1003
}
1004
spinlock_init(&wc->wc_lock);
1005
threadlist_init(&wc->wc_threads);
1006
wc->wc_name = name;
1007
return wc;
1008
}
1009
1010
/*
1011
* Destroy a wait channel. Must be empty and unlocked.
1012
* (The corresponding cleanup functions require this.)
1013
*/
1014
void
1015
wchan_destroy(struct wchan *wc)
1016
{
1017
spinlock_cleanup(&wc->wc_lock);
1018
threadlist_cleanup(&wc->wc_threads);
1019
kfree(wc);
1020
}
1021
1022
/*
1023
* Lock and unlock a wait channel, respectively.
1024
*/
1025
void
1026
wchan_lock(struct wchan *wc)
1027
{
1028
spinlock_acquire(&wc->wc_lock);
1029
}
1030
1031
void
1032
wchan_unlock(struct wchan *wc)
1033
{
1034
spinlock_release(&wc->wc_lock);
1035
}
1036
1037
/*
1038
* Yield the cpu to another process, and go to sleep, on the specified
1039
* wait channel WC. Calling wakeup on the channel will make the thread
1040
* runnable again. The channel must be locked, and will be *unlocked*
1041
* upon return.
1042
*/
1043
void
1044
wchan_sleep(struct wchan *wc)
1045
{
1046
/* may not sleep in an interrupt handler */
1047
KASSERT(!curthread->t_in_interrupt);
1048
1049
thread_switch(S_SLEEP, wc);
1050
}
1051
1052
/*
1053
* Wake up one thread sleeping on a wait channel.
1054
*/
1055
void
1056
wchan_wakeone(struct wchan *wc)
1057
{
1058
struct thread *target;
1059
1060
/* Lock the channel and grab a thread from it */
1061
spinlock_acquire(&wc->wc_lock);
1062
target = threadlist_remhead(&wc->wc_threads);
1063
/*
1064
* Nobody else can wake up this thread now, so we don't need
1065
* to hang onto the lock.
1066
*/
1067
spinlock_release(&wc->wc_lock);
1068
1069
if (target == NULL) {
1070
/* Nobody was sleeping. */
1071
return;
1072
}
1073
1074
thread_make_runnable(target, false);
1075
}
1076
1077
/*
1078
* Wake up all threads sleeping on a wait channel.
1079
*/
1080
void
1081
wchan_wakeall(struct wchan *wc)
1082
{
1083
struct thread *target;
1084
struct threadlist list;
1085
1086
threadlist_init(&list);
1087
1088
/*
1089
* Lock the channel and grab all the threads, moving them to a
1090
* private list.
1091
*/
1092
spinlock_acquire(&wc->wc_lock);
1093
while ((target = threadlist_remhead(&wc->wc_threads)) != NULL) {
1094
threadlist_addtail(&list, target);
1095
}
1096
/*
1097
* Nobody else can wake up these threads now, so we don't need
1098
* to hang onto the lock.
1099
*/
1100
spinlock_release(&wc->wc_lock);
1101
1102
/*
1103
* We could conceivably sort by cpu first to cause fewer lock
1104
* ops and fewer IPIs, but for now at least don't bother. Just
1105
* make each thread runnable.
1106
*/
1107
while ((target = threadlist_remhead(&list)) != NULL) {
1108
thread_make_runnable(target, false);
1109
}
1110
1111
threadlist_cleanup(&list);
1112
}
1113
1114
/*
1115
* Return nonzero if there are no threads sleeping on the channel.
1116
* This is meant to be used only for diagnostic purposes.
1117
*/
1118
bool
1119
wchan_isempty(struct wchan *wc)
1120
{
1121
bool ret;
1122
1123
spinlock_acquire(&wc->wc_lock);
1124
ret = threadlist_isempty(&wc->wc_threads);
1125
spinlock_release(&wc->wc_lock);
1126
1127
return ret;
1128
}
1129
1130
////////////////////////////////////////////////////////////
1131
1132
/*
1133
* Machine-independent IPI handling
1134
*/
1135
1136
/*
1137
* Send an IPI (inter-processor interrupt) to the specified CPU.
1138
*/
1139
void
1140
ipi_send(struct cpu *target, int code)
1141
{
1142
KASSERT(code >= 0 && code < 32);
1143
1144
spinlock_acquire(&target->c_ipi_lock);
1145
target->c_ipi_pending |= (uint32_t)1 << code;
1146
mainbus_send_ipi(target);
1147
spinlock_release(&target->c_ipi_lock);
1148
}
1149
1150
void
1151
ipi_broadcast(int code)
1152
{
1153
unsigned i;
1154
struct cpu *c;
1155
1156
for (i=0; i < cpuarray_num(&allcpus); i++) {
1157
c = cpuarray_get(&allcpus, i);
1158
if (c != curcpu->c_self) {
1159
ipi_send(c, code);
1160
}
1161
}
1162
}
1163
1164
void
1165
ipi_tlbshootdown_by_num( unsigned cpunum, const struct tlbshootdown *mapping ) {
1166
struct cpu *target;
1167
1168
target = cpuarray_get( &allcpus, cpunum );
1169
ipi_tlbshootdown( target, mapping );
1170
}
1171
1172
void
1173
ipi_tlbshootdown(struct cpu *target, const struct tlbshootdown *mapping)
1174
{
1175
int n;
1176
1177
spinlock_acquire(&target->c_ipi_lock);
1178
1179
n = target->c_numshootdown;
1180
if (n == TLBSHOOTDOWN_MAX) {
1181
target->c_numshootdown = TLBSHOOTDOWN_ALL;
1182
}
1183
else {
1184
target->c_shootdown[n] = *mapping;
1185
target->c_numshootdown = n+1;
1186
}
1187
1188
target->c_ipi_pending |= (uint32_t)1 << IPI_TLBSHOOTDOWN;
1189
mainbus_send_ipi(target);
1190
1191
spinlock_release(&target->c_ipi_lock);
1192
}
1193
1194
void
1195
interprocessor_interrupt(void)
1196
{
1197
uint32_t bits;
1198
int i;
1199
1200
spinlock_acquire(&curcpu->c_ipi_lock);
1201
bits = curcpu->c_ipi_pending;
1202
1203
if (bits & (1U << IPI_PANIC)) {
1204
/* panic on another cpu - just stop dead */
1205
cpu_halt();
1206
}
1207
if (bits & (1U << IPI_OFFLINE)) {
1208
/* offline request */
1209
spinlock_acquire(&curcpu->c_runqueue_lock);
1210
if (!curcpu->c_isidle) {
1211
kprintf("cpu%d: offline: warning: not idle\n",
1212
curcpu->c_number);
1213
}
1214
spinlock_release(&curcpu->c_runqueue_lock);
1215
kprintf("cpu%d: offline.\n", curcpu->c_number);
1216
cpu_halt();
1217
}
1218
if (bits & (1U << IPI_UNIDLE)) {
1219
/*
1220
* The cpu has already unidled itself to take the
1221
* interrupt; don't need to do anything else.
1222
*/
1223
}
1224
if (bits & (1U << IPI_TLBSHOOTDOWN)) {
1225
if (curcpu->c_numshootdown == TLBSHOOTDOWN_ALL) {
1226
vm_tlbshootdown_all();
1227
}
1228
else {
1229
for (i=0; i<curcpu->c_numshootdown; i++) {
1230
vm_tlbshootdown(&curcpu->c_shootdown[i]);
1231
}
1232
}
1233
curcpu->c_numshootdown = 0;
1234
}
1235
1236
curcpu->c_ipi_pending = 0;
1237
spinlock_release(&curcpu->c_ipi_lock);
1238
}
1239
1240