-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathbfs1patch.patch
7890 lines (7845 loc) · 220 KB
/
bfs1patch.patch
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
Index: linux-2.6.32.27-bfs/Documentation/sysctl/kernel.txt
===================================================================
--- linux-2.6.32.27-bfs.orig/Documentation/sysctl/kernel.txt 2009-12-03 21:39:54.000000000 +1100
+++ linux-2.6.32.27-bfs/Documentation/sysctl/kernel.txt 2010-12-16 16:07:44.247620714 +1100
@@ -29,6 +29,7 @@ show up in /proc/sys/kernel:
- domainname
- hostname
- hotplug
+- iso_cpu
- java-appletviewer [ binfmt_java, obsolete ]
- java-interpreter [ binfmt_java, obsolete ]
- kstack_depth_to_print [ X86 only ]
@@ -51,6 +52,7 @@ show up in /proc/sys/kernel:
- randomize_va_space
- real-root-dev ==> Documentation/initrd.txt
- reboot-cmd [ SPARC only ]
+- rr_interval
- rtsig-max
- rtsig-nr
- sem
@@ -209,6 +211,16 @@ Default value is "/sbin/hotplug".
==============================================================
+iso_cpu: (BFS CPU scheduler only).
+
+This sets the percentage cpu that the unprivileged SCHED_ISO tasks can
+run effectively at realtime priority, averaged over a rolling five
+seconds over the -whole- system, meaning all cpus.
+
+Set to 70 (percent) by default.
+
+==============================================================
+
l2cr: (PPC only)
This flag controls the L2 cache of G3 processor boards. If
@@ -383,6 +395,20 @@ rebooting. ???
==============================================================
+rr_interval: (BFS CPU scheduler only)
+
+This is the smallest duration that any cpu process scheduling unit
+will run for. Increasing this value can increase throughput of cpu
+bound tasks substantially but at the expense of increased latencies
+overall. Conversely decreasing it will decrease average and maximum
+latencies but at the expense of throughput. This value is in
+milliseconds and the default value chosen depends on the number of
+cpus available at scheduler initialisation with a minimum of 6.
+
+Valid values are from 1-5000.
+
+==============================================================
+
rtsig-max & rtsig-nr:
The file rtsig-max can be used to tune the maximum number
Index: linux-2.6.32.27-bfs/include/linux/init_task.h
===================================================================
--- linux-2.6.32.27-bfs.orig/include/linux/init_task.h 2009-12-03 21:40:09.000000000 +1100
+++ linux-2.6.32.27-bfs/include/linux/init_task.h 2010-12-16 16:07:44.247620714 +1100
@@ -119,6 +119,69 @@ extern struct cred init_cred;
* INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB)
*/
+#ifdef CONFIG_SCHED_BFS
+#define INIT_TASK(tsk) \
+{ \
+ .state = 0, \
+ .stack = &init_thread_info, \
+ .usage = ATOMIC_INIT(2), \
+ .flags = PF_KTHREAD, \
+ .lock_depth = -1, \
+ .prio = NORMAL_PRIO, \
+ .static_prio = MAX_PRIO-20, \
+ .normal_prio = NORMAL_PRIO, \
+ .deadline = 0, \
+ .policy = SCHED_NORMAL, \
+ .cpus_allowed = CPU_MASK_ALL, \
+ .mm = NULL, \
+ .active_mm = &init_mm, \
+ .run_list = LIST_HEAD_INIT(tsk.run_list), \
+ .time_slice = HZ, \
+ .tasks = LIST_HEAD_INIT(tsk.tasks), \
+ .pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO), \
+ .ptraced = LIST_HEAD_INIT(tsk.ptraced), \
+ .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \
+ .real_parent = &tsk, \
+ .parent = &tsk, \
+ .children = LIST_HEAD_INIT(tsk.children), \
+ .sibling = LIST_HEAD_INIT(tsk.sibling), \
+ .group_leader = &tsk, \
+ .real_cred = &init_cred, \
+ .cred = &init_cred, \
+ .cred_guard_mutex = \
+ __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
+ .comm = "swapper", \
+ .thread = INIT_THREAD, \
+ .fs = &init_fs, \
+ .files = &init_files, \
+ .signal = &init_signals, \
+ .sighand = &init_sighand, \
+ .nsproxy = &init_nsproxy, \
+ .pending = { \
+ .list = LIST_HEAD_INIT(tsk.pending.list), \
+ .signal = {{0}}}, \
+ .blocked = {{0}}, \
+ .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \
+ .journal_info = NULL, \
+ .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
+ .fs_excl = ATOMIC_INIT(0), \
+ .pi_lock = __SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
+ .timer_slack_ns = 50000, /* 50 usec default slack */ \
+ .pids = { \
+ [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
+ [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
+ [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \
+ }, \
+ .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \
+ INIT_IDS \
+ INIT_PERF_EVENTS(tsk) \
+ INIT_TRACE_IRQFLAGS \
+ INIT_LOCKDEP \
+ INIT_FTRACE_GRAPH \
+ INIT_TRACE_RECURSION \
+ INIT_TASK_RCU_PREEMPT(tsk) \
+}
+#else /* CONFIG_SCHED_BFS */
#define INIT_TASK(tsk) \
{ \
.state = 0, \
@@ -185,7 +248,7 @@ extern struct cred init_cred;
INIT_TRACE_RECURSION \
INIT_TASK_RCU_PREEMPT(tsk) \
}
-
+#endif /* CONFIG_SCHED_BFS */
#define INIT_CPU_TIMERS(cpu_timers) \
{ \
Index: linux-2.6.32.27-bfs/include/linux/sched.h
===================================================================
--- linux-2.6.32.27-bfs.orig/include/linux/sched.h 2010-12-16 16:06:47.662212109 +1100
+++ linux-2.6.32.27-bfs/include/linux/sched.h 2011-01-01 15:00:05.064923587 +1100
@@ -36,8 +36,15 @@
#define SCHED_FIFO 1
#define SCHED_RR 2
#define SCHED_BATCH 3
-/* SCHED_ISO: reserved but not implemented yet */
+/* SCHED_ISO: Implemented on BFS only */
#define SCHED_IDLE 5
+#ifdef CONFIG_SCHED_BFS
+#define SCHED_ISO 4
+#define SCHED_IDLEPRIO SCHED_IDLE
+#define SCHED_MAX (SCHED_IDLEPRIO)
+#define SCHED_RANGE(policy) ((policy) <= SCHED_MAX)
+#endif
+
/* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */
#define SCHED_RESET_ON_FORK 0x40000000
@@ -260,9 +267,6 @@ extern asmlinkage void schedule_tail(str
extern void init_idle(struct task_struct *idle, int cpu);
extern void init_idle_bootup_task(struct task_struct *idle);
-extern int runqueue_is_locked(int cpu);
-extern void task_rq_unlock_wait(struct task_struct *p);
-
extern cpumask_var_t nohz_cpu_mask;
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
extern int select_nohz_load_balancer(int cpu);
@@ -1230,17 +1234,31 @@ struct task_struct {
int lock_depth; /* BKL lock depth */
+#ifndef CONFIG_SCHED_BFS
#ifdef CONFIG_SMP
#ifdef __ARCH_WANT_UNLOCKED_CTXSW
int oncpu;
#endif
#endif
+#else /* CONFIG_SCHED_BFS */
+ int oncpu;
+#endif
int prio, static_prio, normal_prio;
unsigned int rt_priority;
+#ifdef CONFIG_SCHED_BFS
+ int time_slice;
+ u64 deadline;
+ struct list_head run_list;
+ u64 last_ran;
+ u64 sched_time; /* sched_clock time spent running */
+
+ unsigned long rt_timeout;
+#else /* CONFIG_SCHED_BFS */
const struct sched_class *sched_class;
struct sched_entity se;
struct sched_rt_entity rt;
+#endif
#ifdef CONFIG_PREEMPT_NOTIFIERS
/* list of struct preempt_notifier: */
@@ -1339,6 +1357,9 @@ struct task_struct {
int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
cputime_t utime, stime, utimescaled, stimescaled;
+#ifdef CONFIG_SCHED_BFS
+ unsigned long utime_pc, stime_pc;
+#endif
cputime_t gtime;
cputime_t prev_utime, prev_stime;
unsigned long nvcsw, nivcsw; /* context switch counts */
@@ -1549,6 +1570,64 @@ struct task_struct {
#endif /* CONFIG_TRACING */
};
+#ifdef CONFIG_SCHED_BFS
+extern int grunqueue_is_locked(void);
+extern void grq_unlock_wait(void);
+#define tsk_seruntime(t) ((t)->sched_time)
+#define tsk_rttimeout(t) ((t)->rt_timeout)
+#define task_rq_unlock_wait(tsk) grq_unlock_wait()
+
+static inline void set_oom_timeslice(struct task_struct *p)
+{
+ p->time_slice = HZ;
+}
+
+static inline void tsk_cpus_current(struct task_struct *p)
+{
+}
+
+#define runqueue_is_locked(cpu) grunqueue_is_locked()
+
+static inline void print_scheduler_version(void)
+{
+ printk(KERN_INFO"BFS CPU scheduler v0.363 by Con Kolivas.\n");
+}
+
+static inline int iso_task(struct task_struct *p)
+{
+ return (p->policy == SCHED_ISO);
+}
+#else
+extern int runqueue_is_locked(int cpu);
+extern void task_rq_unlock_wait(struct task_struct *p);
+#define tsk_seruntime(t) ((t)->se.sum_exec_runtime)
+#define tsk_rttimeout(t) ((t)->rt.timeout)
+
+static inline void sched_exit(struct task_struct *p)
+{
+}
+
+static inline void set_oom_timeslice(struct task_struct *p)
+{
+ p->rt.time_slice = HZ;
+}
+
+static inline void tsk_cpus_current(struct task_struct *p)
+{
+ p->rt.nr_cpus_allowed = current->rt.nr_cpus_allowed;
+}
+
+static inline void print_scheduler_version(void)
+{
+ printk(KERN_INFO"CFS CPU scheduler.\n");
+}
+
+static inline int iso_task(struct task_struct *p)
+{
+ return 0;
+}
+#endif
+
/* Future-safe accessor for struct task_struct's cpus_allowed. */
#define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
@@ -1567,9 +1646,19 @@ struct task_struct {
#define MAX_USER_RT_PRIO 100
#define MAX_RT_PRIO MAX_USER_RT_PRIO
+#define DEFAULT_PRIO (MAX_RT_PRIO + 20)
+#ifdef CONFIG_SCHED_BFS
+#define PRIO_RANGE (40)
+#define MAX_PRIO (MAX_RT_PRIO + PRIO_RANGE)
+#define ISO_PRIO (MAX_RT_PRIO)
+#define NORMAL_PRIO (MAX_RT_PRIO + 1)
+#define IDLE_PRIO (MAX_RT_PRIO + 2)
+#define PRIO_LIMIT ((IDLE_PRIO) + 1)
+#else /* CONFIG_SCHED_BFS */
#define MAX_PRIO (MAX_RT_PRIO + 40)
-#define DEFAULT_PRIO (MAX_RT_PRIO + 20)
+#define NORMAL_PRIO DEFAULT_PRIO
+#endif /* CONFIG_SCHED_BFS */
static inline int rt_prio(int prio)
{
@@ -1879,7 +1968,7 @@ task_sched_runtime(struct task_struct *t
extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
/* sched_exec is called by processes performing an exec */
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_BFS)
extern void sched_exec(void);
#else
#define sched_exec() {}
@@ -2035,6 +2124,9 @@ extern void wake_up_new_task(struct task
static inline void kick_process(struct task_struct *tsk) { }
#endif
extern void sched_fork(struct task_struct *p, int clone_flags);
+#ifdef CONFIG_SCHED_BFS
+extern void sched_exit(struct task_struct *p);
+#endif
extern void sched_dead(struct task_struct *p);
extern void proc_caches_init(void);
Index: linux-2.6.32.27-bfs/kernel/sysctl.c
===================================================================
--- linux-2.6.32.27-bfs.orig/kernel/sysctl.c 2010-12-16 16:06:47.687212289 +1100
+++ linux-2.6.32.27-bfs/kernel/sysctl.c 2010-12-16 16:07:44.248620721 +1100
@@ -105,7 +105,12 @@ static int zero;
static int __maybe_unused one = 1;
static int __maybe_unused two = 2;
static unsigned long one_ul = 1;
-static int one_hundred = 100;
+static int __maybe_unused one_hundred = 100;
+#ifdef CONFIG_SCHED_BFS
+extern int rr_interval;
+extern int sched_iso_cpu;
+static int __read_mostly one_thousand = 1000;
+#endif
#ifdef CONFIG_PRINTK
static int ten_thousand = 10000;
#endif
@@ -243,7 +248,7 @@ static struct ctl_table root_table[] = {
{ .ctl_name = 0 }
};
-#ifdef CONFIG_SCHED_DEBUG
+#if defined(CONFIG_SCHED_DEBUG) && !defined(CONFIG_SCHED_BFS)
static int min_sched_granularity_ns = 100000; /* 100 usecs */
static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */
static int min_wakeup_granularity_ns; /* 0 usecs */
@@ -251,6 +256,7 @@ static int max_wakeup_granularity_ns = N
#endif
static struct ctl_table kern_table[] = {
+#ifndef CONFIG_SCHED_BFS
{
.ctl_name = CTL_UNNUMBERED,
.procname = "sched_child_runs_first",
@@ -379,6 +385,7 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = &proc_dointvec,
},
+#endif /* !CONFIG_SCHED_BFS */
#ifdef CONFIG_PROVE_LOCKING
{
.ctl_name = CTL_UNNUMBERED,
@@ -830,6 +837,30 @@ static struct ctl_table kern_table[] = {
.proc_handler = &proc_dointvec,
},
#endif
+#ifdef CONFIG_SCHED_BFS
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "rr_interval",
+ .data = &rr_interval,
+ .maxlen = sizeof (int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .strategy = &sysctl_intvec,
+ .extra1 = &one,
+ .extra2 = &one_thousand,
+ },
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "iso_cpu",
+ .data = &sched_iso_cpu,
+ .maxlen = sizeof (int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .strategy = &sysctl_intvec,
+ .extra1 = &zero,
+ .extra2 = &one_hundred,
+ },
+#endif
#if defined(CONFIG_S390) && defined(CONFIG_SMP)
{
.ctl_name = KERN_SPIN_RETRY,
Index: linux-2.6.32.27-bfs/kernel/sched_bfs.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-2.6.32.27-bfs/kernel/sched_bfs.c 2011-01-01 15:00:43.208785908 +1100
@@ -0,0 +1,6889 @@
+/*
+ * kernel/sched_bfs.c, was sched.c
+ *
+ * Kernel scheduler and related syscalls
+ *
+ * Copyright (C) 1991-2002 Linus Torvalds
+ *
+ * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
+ * make semaphores SMP safe
+ * 1998-11-19 Implemented schedule_timeout() and related stuff
+ * by Andrea Arcangeli
+ * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
+ * hybrid priority-list and round-robin design with
+ * an array-switch method of distributing timeslices
+ * and per-CPU runqueues. Cleanups and useful suggestions
+ * by Davide Libenzi, preemptible kernel bits by Robert Love.
+ * 2003-09-03 Interactivity tuning by Con Kolivas.
+ * 2004-04-02 Scheduler domains code by Nick Piggin
+ * 2007-04-15 Work begun on replacing all interactivity tuning with a
+ * fair scheduling design by Con Kolivas.
+ * 2007-05-05 Load balancing (smp-nice) and other improvements
+ * by Peter Williams
+ * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
+ * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
+ * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
+ * Thomas Gleixner, Mike Kravetz
+ * now Brainfuck deadline scheduling policy by Con Kolivas deletes
+ * a whole lot of those previous things.
+ */
+
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/nmi.h>
+#include <linux/init.h>
+#include <asm/uaccess.h>
+#include <linux/highmem.h>
+#include <linux/smp_lock.h>
+#include <asm/mmu_context.h>
+#include <linux/interrupt.h>
+#include <linux/capability.h>
+#include <linux/completion.h>
+#include <linux/kernel_stat.h>
+#include <linux/debug_locks.h>
+#include <linux/perf_event.h>
+#include <linux/security.h>
+#include <linux/notifier.h>
+#include <linux/profile.h>
+#include <linux/freezer.h>
+#include <linux/vmalloc.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/smp.h>
+#include <linux/threads.h>
+#include <linux/timer.h>
+#include <linux/rcupdate.h>
+#include <linux/cpu.h>
+#include <linux/cpuset.h>
+#include <linux/cpumask.h>
+#include <linux/percpu.h>
+#include <linux/kthread.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/syscalls.h>
+#include <linux/times.h>
+#include <linux/tsacct_kern.h>
+#include <linux/kprobes.h>
+#include <linux/delayacct.h>
+#include <linux/log2.h>
+#include <linux/bootmem.h>
+#include <linux/ftrace.h>
+
+#include <asm/tlb.h>
+#include <asm/unistd.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/sched.h>
+
+#define rt_prio(prio) unlikely((prio) < MAX_RT_PRIO)
+#define rt_task(p) rt_prio((p)->prio)
+#define rt_queue(rq) rt_prio((rq)->rq_prio)
+#define batch_task(p) (unlikely((p)->policy == SCHED_BATCH))
+#define is_rt_policy(policy) ((policy) == SCHED_FIFO || \
+ (policy) == SCHED_RR)
+#define has_rt_policy(p) unlikely(is_rt_policy((p)->policy))
+#define idleprio_task(p) unlikely((p)->policy == SCHED_IDLEPRIO)
+#define iso_task(p) unlikely((p)->policy == SCHED_ISO)
+#define iso_queue(rq) unlikely((rq)->rq_policy == SCHED_ISO)
+#define ISO_PERIOD ((5 * HZ * num_online_cpus()) + 1)
+
+/*
+ * Convert user-nice values [ -20 ... 0 ... 19 ]
+ * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
+ * and back.
+ */
+#define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
+#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
+#define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
+
+/*
+ * 'User priority' is the nice value converted to something we
+ * can work with better when scaling various scheduler parameters,
+ * it's a [ 0 ... 39 ] range.
+ */
+#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
+#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
+#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
+#define SCHED_PRIO(p) ((p)+MAX_RT_PRIO)
+
+/*
+ * Some helpers for converting to/from various scales. Use shifts to get
+ * approximate multiples of ten for less overhead.
+ */
+#define JIFFIES_TO_NS(TIME) ((TIME) * (1000000000 / HZ))
+#define JIFFY_NS (1000000000 / HZ)
+#define HALF_JIFFY_NS (1000000000 / HZ / 2)
+#define HALF_JIFFY_US (1000000 / HZ / 2)
+#define MS_TO_NS(TIME) ((TIME) << 20)
+#define MS_TO_US(TIME) ((TIME) << 10)
+#define US_TO_NS(TIME) ((TIME) >> 10)
+#define NS_TO_MS(TIME) ((TIME) >> 20)
+#define NS_TO_US(TIME) ((TIME) >> 10)
+
+#define RESCHED_US (100) /* Reschedule if less than this many μs left */
+
+/*
+ * This is the time all tasks within the same priority round robin.
+ * Value is in ms and set to a minimum of 6ms. Scales with number of cpus.
+ * Tunable via /proc interface.
+ */
+int rr_interval __read_mostly = 6;
+
+/*
+ * sched_iso_cpu - sysctl which determines the cpu percentage SCHED_ISO tasks
+ * are allowed to run five seconds as real time tasks. This is the total over
+ * all online cpus.
+ */
+int sched_iso_cpu __read_mostly = 70;
+
+/*
+ * The relative length of deadline for each priority(nice) level.
+ */
+static int prio_ratios[PRIO_RANGE] __read_mostly;
+
+/*
+ * The quota handed out to tasks of all priority levels when refilling their
+ * time_slice.
+ */
+static inline unsigned long timeslice(void)
+{
+ return MS_TO_US(rr_interval);
+}
+
+/*
+ * The global runqueue data that all CPUs work off. Data is protected either
+ * by the global grq lock, or the discrete lock that precedes the data in this
+ * struct.
+ */
+struct global_rq {
+ spinlock_t lock;
+ unsigned long nr_running;
+ unsigned long nr_uninterruptible;
+ unsigned long long nr_switches;
+ struct list_head queue[PRIO_LIMIT];
+ DECLARE_BITMAP(prio_bitmap, PRIO_LIMIT + 1);
+#ifdef CONFIG_SMP
+ unsigned long qnr; /* queued not running */
+ cpumask_t cpu_idle_map;
+ int idle_cpus;
+#endif
+ u64 niffies; /* Nanosecond jiffies */
+ unsigned long last_jiffy; /* Last jiffy we updated niffies */
+
+ spinlock_t iso_lock;
+ int iso_ticks;
+ int iso_refractory;
+};
+
+/* There can be only one */
+static struct global_rq grq;
+
+/*
+ * This is the main, per-CPU runqueue data structure.
+ * This data should only be modified by the local cpu.
+ */
+struct rq {
+#ifdef CONFIG_SMP
+#ifdef CONFIG_NO_HZ
+ unsigned char in_nohz_recently;
+#endif
+#endif
+
+ struct task_struct *curr, *idle;
+ struct mm_struct *prev_mm;
+
+ /* Stored data about rq->curr to work outside grq lock */
+ u64 rq_deadline;
+ unsigned int rq_policy;
+ int rq_time_slice;
+ u64 rq_last_ran;
+ int rq_prio;
+ int rq_running; /* There is a task running */
+
+ /* Accurate timekeeping data */
+ u64 timekeep_clock;
+ unsigned long user_pc, nice_pc, irq_pc, softirq_pc, system_pc,
+ iowait_pc, idle_pc;
+ atomic_t nr_iowait;
+
+#ifdef CONFIG_SMP
+ int cpu; /* cpu of this runqueue */
+ int online;
+
+ struct root_domain *rd;
+ struct sched_domain *sd;
+ unsigned long *cpu_locality; /* CPU relative cache distance */
+#ifdef CONFIG_SCHED_SMT
+ int (*siblings_idle)(unsigned long cpu);
+ /* See if all smt siblings are idle */
+ cpumask_t smt_siblings;
+#endif
+#ifdef CONFIG_SCHED_MC
+ int (*cache_idle)(unsigned long cpu);
+ /* See if all cache siblings are idle */
+ cpumask_t cache_siblings;
+#endif
+ u64 last_niffy; /* Last time this RQ updated grq.niffies */
+#endif
+ u64 clock, old_clock, last_tick;
+ int dither;
+
+#ifdef CONFIG_SCHEDSTATS
+
+ /* latency stats */
+ struct sched_info rq_sched_info;
+ unsigned long long rq_cpu_time;
+ /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
+
+ /* sys_sched_yield() stats */
+ unsigned int yld_count;
+
+ /* schedule() stats */
+ unsigned int sched_switch;
+ unsigned int sched_count;
+ unsigned int sched_goidle;
+
+ /* try_to_wake_up() stats */
+ unsigned int ttwu_count;
+ unsigned int ttwu_local;
+
+ /* BKL stats */
+ unsigned int bkl_count;
+#endif
+};
+
+static DEFINE_PER_CPU(struct rq, runqueues) ____cacheline_aligned_in_smp;
+static DEFINE_MUTEX(sched_hotcpu_mutex);
+
+#ifdef CONFIG_SMP
+
+/*
+ * We add the notion of a root-domain which will be used to define per-domain
+ * variables. Each exclusive cpuset essentially defines an island domain by
+ * fully partitioning the member cpus from any other cpuset. Whenever a new
+ * exclusive cpuset is created, we also create and attach a new root-domain
+ * object.
+ *
+ */
+struct root_domain {
+ atomic_t refcount;
+ cpumask_var_t span;
+ cpumask_var_t online;
+
+ /*
+ * The "RT overload" flag: it gets set if a CPU has more than
+ * one runnable RT task.
+ */
+ cpumask_var_t rto_mask;
+ atomic_t rto_count;
+#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
+ /*
+ * Preferred wake up cpu nominated by sched_mc balance that will be
+ * used when most cpus are idle in the system indicating overall very
+ * low system utilisation. Triggered at POWERSAVINGS_BALANCE_WAKEUP(2)
+ */
+ unsigned int sched_mc_preferred_wakeup_cpu;
+#endif
+};
+
+/*
+ * By default the system creates a single root-domain with all cpus as
+ * members (mimicking the global state we have today).
+ */
+static struct root_domain def_root_domain;
+#endif
+
+/*
+ * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
+ * See detach_destroy_domains: synchronize_sched for details.
+ *
+ * The domain tree of any CPU may only be accessed from within
+ * preempt-disabled sections.
+ */
+#define for_each_domain(cpu, __sd) \
+ for (__sd = rcu_dereference(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
+
+static inline void update_rq_clock(struct rq *rq);
+
+/*
+ * Sanity check should sched_clock return bogus values. We make sure it does
+ * not appear to go backwards, and use jiffies to determine the maximum it
+ * could possibly have increased. At least 1us will have always passed so we
+ * use that when we don't trust the difference.
+ */
+static inline void niffy_diff(s64 *niff_diff, int jiff_diff)
+{
+ unsigned long max_diff;
+
+ /* Round up to the nearest tick for maximum */
+ max_diff = JIFFIES_TO_NS(jiff_diff + 1);
+
+ if (unlikely(*niff_diff < 1 || *niff_diff > max_diff))
+ *niff_diff = US_TO_NS(1);
+}
+
+#ifdef CONFIG_SMP
+#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
+#define this_rq() (&__get_cpu_var(runqueues))
+#define task_rq(p) cpu_rq(task_cpu(p))
+#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
+static inline int cpu_of(struct rq *rq)
+{
+ return rq->cpu;
+}
+
+/*
+ * Niffies are a globally increasing nanosecond counter. Whenever a runqueue
+ * clock is updated with the grq.lock held, it is an opportunity to update the
+ * niffies value. Any CPU can update it by adding how much its clock has
+ * increased since it last updated niffies, minus any added niffies by other
+ * CPUs.
+ */
+static inline void update_clocks(struct rq *rq)
+{
+ s64 ndiff;
+ long jdiff;
+
+ update_rq_clock(rq);
+ ndiff = rq->clock - rq->old_clock;
+ /* old_clock is only updated when we are updating niffies */
+ rq->old_clock = rq->clock;
+ ndiff -= grq.niffies - rq->last_niffy;
+ jdiff = jiffies - grq.last_jiffy;
+ niffy_diff(&ndiff, jdiff);
+ grq.last_jiffy += jdiff;
+ grq.niffies += ndiff;
+ rq->last_niffy = grq.niffies;
+}
+#else /* CONFIG_SMP */
+static struct rq *uprq;
+#define cpu_rq(cpu) (uprq)
+#define this_rq() (uprq)
+#define task_rq(p) (uprq)
+#define cpu_curr(cpu) ((uprq)->curr)
+static inline int cpu_of(struct rq *rq)
+{
+ return 0;
+}
+
+static inline void update_clocks(struct rq *rq)
+{
+ s64 ndiff;
+ long jdiff;
+
+ update_rq_clock(rq);
+ ndiff = rq->clock - rq->old_clock;
+ rq->old_clock = rq->clock;
+ jdiff = jiffies - grq.last_jiffy;
+ niffy_diff(&ndiff, jdiff);
+ grq.last_jiffy += jdiff;
+ grq.niffies += ndiff;
+}
+#endif
+#define raw_rq() (&__raw_get_cpu_var(runqueues))
+
+#include "sched_stats.h"
+
+#ifndef prepare_arch_switch
+# define prepare_arch_switch(next) do { } while (0)
+#endif
+#ifndef finish_arch_switch
+# define finish_arch_switch(prev) do { } while (0)
+#endif
+
+/*
+ * All common locking functions performed on grq.lock. rq->clock is local to
+ * the CPU accessing it so it can be modified just with interrupts disabled
+ * when we're not updating niffies.
+ * Looking up task_rq must be done under grq.lock to be safe.
+ */
+static inline void update_rq_clock(struct rq *rq)
+{
+ rq->clock = sched_clock_cpu(cpu_of(rq));
+}
+
+static inline int task_running(struct task_struct *p)
+{
+ return p->oncpu;
+}
+
+static inline void grq_lock(void)
+ __acquires(grq.lock)
+{
+ spin_lock(&grq.lock);
+}
+
+static inline void grq_unlock(void)
+ __releases(grq.lock)
+{
+ spin_unlock(&grq.lock);
+}
+
+static inline void grq_lock_irq(void)
+ __acquires(grq.lock)
+{
+ spin_lock_irq(&grq.lock);
+}
+
+static inline void time_lock_grq(struct rq *rq)
+ __acquires(grq.lock)
+{
+ grq_lock();
+ update_clocks(rq);
+}
+
+static inline void grq_unlock_irq(void)
+ __releases(grq.lock)
+{
+ spin_unlock_irq(&grq.lock);
+}
+
+static inline void grq_lock_irqsave(unsigned long *flags)
+ __acquires(grq.lock)
+{
+ spin_lock_irqsave(&grq.lock, *flags);
+}
+
+static inline void grq_unlock_irqrestore(unsigned long *flags)
+ __releases(grq.lock)
+{
+ spin_unlock_irqrestore(&grq.lock, *flags);
+}
+
+static inline struct rq
+*task_grq_lock(struct task_struct *p, unsigned long *flags)
+ __acquires(grq.lock)
+{
+ grq_lock_irqsave(flags);
+ return task_rq(p);
+}
+
+static inline struct rq
+*time_task_grq_lock(struct task_struct *p, unsigned long *flags)
+ __acquires(grq.lock)
+{
+ struct rq *rq = task_grq_lock(p, flags);
+ update_clocks(rq);
+ return rq;
+}
+
+static inline struct rq *task_grq_lock_irq(struct task_struct *p)
+ __acquires(grq.lock)
+{
+ grq_lock_irq();
+ return task_rq(p);
+}
+
+static inline void time_task_grq_lock_irq(struct task_struct *p)
+ __acquires(grq.lock)
+{
+ struct rq *rq = task_grq_lock_irq(p);
+ update_clocks(rq);
+}
+
+static inline void task_grq_unlock_irq(void)
+ __releases(grq.lock)
+{
+ grq_unlock_irq();
+}
+
+static inline void task_grq_unlock(unsigned long *flags)
+ __releases(grq.lock)
+{
+ grq_unlock_irqrestore(flags);
+}
+
+/**
+ * grunqueue_is_locked
+ *
+ * Returns true if the global runqueue is locked.
+ * This interface allows printk to be called with the runqueue lock
+ * held and know whether or not it is OK to wake up the klogd.
+ */
+inline int grunqueue_is_locked(void)
+{
+ return spin_is_locked(&grq.lock);
+}
+
+inline void grq_unlock_wait(void)
+ __releases(grq.lock)
+{
+ smp_mb(); /* spin-unlock-wait is not a full memory barrier */
+ spin_unlock_wait(&grq.lock);
+}
+
+static inline void time_grq_lock(struct rq *rq, unsigned long *flags)
+ __acquires(grq.lock)
+{
+ local_irq_save(*flags);
+ time_lock_grq(rq);
+}
+
+static inline struct rq *__task_grq_lock(struct task_struct *p)
+ __acquires(grq.lock)
+{
+ grq_lock();
+ return task_rq(p);
+}
+
+static inline void __task_grq_unlock(void)
+ __releases(grq.lock)
+{
+ grq_unlock();
+}
+
+#ifndef __ARCH_WANT_UNLOCKED_CTXSW
+static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
+{
+}
+
+static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
+{
+#ifdef CONFIG_DEBUG_SPINLOCK
+ /* this is a valid case when another task releases the spinlock */
+ grq.lock.owner = current;
+#endif
+ /*
+ * If we are tracking spinlock dependencies then we have to
+ * fix up the runqueue lock - which gets 'carried over' from
+ * prev into current:
+ */
+ spin_acquire(&grq.lock.dep_map, 0, 0, _THIS_IP_);
+
+ grq_unlock_irq();
+}
+
+#else /* __ARCH_WANT_UNLOCKED_CTXSW */
+
+static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
+{
+#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
+ grq_unlock_irq();
+#else
+ grq_unlock();
+#endif
+}
+
+static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
+{
+ smp_wmb();
+#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
+ local_irq_enable();
+#endif
+}
+#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
+
+static inline int deadline_before(u64 deadline, u64 time)
+{
+ return (deadline < time);
+}
+
+static inline int deadline_after(u64 deadline, u64 time)
+{
+ return (deadline > time);
+}
+
+/*
+ * A task that is queued but not running will be on the grq run list.
+ * A task that is not running or queued will not be on the grq run list.
+ * A task that is currently running will have ->oncpu set but not on the
+ * grq run list.
+ */
+static inline int task_queued(struct task_struct *p)
+{
+ return (!list_empty(&p->run_list));
+}
+
+/*
+ * Removing from the global runqueue. Enter with grq locked.
+ */
+static void dequeue_task(struct task_struct *p)
+{
+ list_del_init(&p->run_list);