/* file: kernel/sched/fair.c */
void trigger_load_balance(struct rq *rq)
{
/*
* Don't need to rebalance while attached to NULL domain or
* runqueue CPU is not active
*/
if (unlikely(on_null_domain(rq) || !cpu_active(cpu_of(rq))))
return;
/* 检查负载均衡的时间,如果当前时间已经过了可以进行下次负载均衡的时间点,那么就产生 SCHED_SOFTIRQ 软中断,
* 该中软信号的处理函数在函数 init_sched_fair_class 中注册,为 run_rebalance_domains */
if (time_after_eq(jiffies, rq->next_balance))
raise_softirq(SCHED_SOFTIRQ);
/* 触发 nohz 负载均衡 */
nohz_balancer_kick(rq);
}
/* file: kernel/sched/fair.c */
static __latent_entropy void run_rebalance_domains(struct softirq_action *h)
{
struct rq *this_rq = this_rq();
enum cpu_idle_type idle =
this_rq->idle_balance ? CPU_IDLE : CPU_NOT_IDLE;
/*
* If this CPU has a pending nohz_balance_kick, then do the
* balancing on behalf of the other idle CPUs whose ticks are
* stopped. Do nohz_idle_balance *before* rebalance_domains to
* give the idle CPUs a chance to load balance. Else we may
* load balance only within the local sched_domain hierarchy
* and abort nohz_idle_balance altogether if we pull some load.
*/
if (nohz_idle_balance(this_rq, idle))
return;
/* normal load balance */
update_blocked_averages(this_rq->cpu);
rebalance_domains(this_rq, idle);
}
/* file: kernel/sched/fair.c */
void trigger_load_balance(struct rq *rq)
{
/*
* Don't need to rebalance while attached to NULL domain or
* runqueue CPU is not active
*/
if (unlikely(on_null_domain(rq) || !cpu_active(cpu_of(rq))))
return;
/* 检查负载均衡的时间,如果当前时间已经过了可以进行下次负载均衡的时间点,那么就产生 SCHED_SOFTIRQ 软中断,
* 该中软信号的处理函数在函数 init_sched_fair_class 中注册,为 run_rebalance_domains */
if (time_after_eq(jiffies, rq->next_balance))
raise_softirq(SCHED_SOFTIRQ);
/* 触发 nohz 负载均衡逻辑 */
nohz_balancer_kick(rq);
}
/* file: kernel/sched/fair.c */
/*
* Kick a CPU to do the nohz balancing, if it is time for it. We pick any
* idle CPU in the HK_FLAG_MISC housekeeping set (if there is one).
*/
static void kick_ilb(unsigned int flags)
{
int ilb_cpu;
/* 找到 idle CPU */
ilb_cpu = find_new_ilb();
/* 通过 IPI 通知目标的 idle CPU */
smp_call_function_single_async(ilb_cpu, &cpu_rq(ilb_cpu)->nohz_csd);
}
/* file: kernel/sched/fair.c */
/*
* newidle_balance is called by schedule() if this_cpu is about to become
* idle. Attempts to pull tasks from other CPUs.
*
* Returns:
* < 0 - we released the lock and there are !fair tasks present
* 0 - failed, no new tasks
* > 0 - success, new (fair) tasks present
*/
static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
{
int this_cpu = this_rq->cpu;
struct sched_domain *sd;
int pulled_task = 0;
u64 curr_cost = 0;
for_each_domain(this_cpu, sd)
{
int continue_balancing = 1;
u64 t0, domain_cost;
if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) {
update_next_balance(sd, &next_balance);
break;
}
if (sd->flags & SD_BALANCE_NEWIDLE) {
t0 = sched_clock_cpu(this_cpu);
pulled_task = load_balance(this_cpu, this_rq, sd,
CPU_NEWLY_IDLE,
&continue_balancing);
domain_cost = sched_clock_cpu(this_cpu) - t0;
if (domain_cost > sd->max_newidle_lb_cost)
sd->max_newidle_lb_cost = domain_cost;
curr_cost += domain_cost;
}
update_next_balance(sd, &next_balance);
/*
* Stop searching for tasks to pull if there are
* now runnable tasks on this rq.
*/
if (pulled_task || this_rq->nr_running > 0)
break;
}
}
/* file: kernel/sched/fair.c */
struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev,
struct rq_flags *rf)
{
struct cfs_rq *cfs_rq = &rq->cfs;
struct sched_entity *se;
struct task_struct *p;
int new_tasks;
again:
if (!sched_fair_runnable(rq))
goto idle;
/* 删除主要代码 */
idle:
if (!rf)
return NULL;
/* 触发负载均衡,从其它CPU拉取任务 */
new_tasks = newidle_balance(rq, rf);
/*
* Because newidle_balance() releases (and re-acquires) rq->lock, it is
* possible for any higher priority task to appear. In that case we
* must re-start the pick_next_entity() loop.
*/
if (new_tasks < 0)
return RETRY_TASK;
/* 如果成功地拉取到了任务,则尝试重新选择任务来执行 */
if (new_tasks > 0)
goto again;
return NULL;
}