Commit 262f7eed authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar:
 "This fixes an ABI bug introduced this cycle, plus fixes a throttling
  bug"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/core: Fix uclamp ABI bug, clean up and robustify sched_read_attr() ABI logic and code
  sched/fair: Don't assign runtime for throttled cfs_rq
parents 13133f93 1251201c
...@@ -5105,37 +5105,40 @@ out_unlock: ...@@ -5105,37 +5105,40 @@ out_unlock:
return retval; return retval;
} }
static int sched_read_attr(struct sched_attr __user *uattr, /*
struct sched_attr *attr, * Copy the kernel size attribute structure (which might be larger
unsigned int usize) * than what user-space knows about) to user-space.
*
* Note that all cases are valid: user-space buffer can be larger or
* smaller than the kernel-space buffer. The usual case is that both
* have the same size.
*/
static int
sched_attr_copy_to_user(struct sched_attr __user *uattr,
struct sched_attr *kattr,
unsigned int usize)
{ {
int ret; unsigned int ksize = sizeof(*kattr);
if (!access_ok(uattr, usize)) if (!access_ok(uattr, usize))
return -EFAULT; return -EFAULT;
/* /*
* If we're handed a smaller struct than we know of, * sched_getattr() ABI forwards and backwards compatibility:
* ensure all the unknown bits are 0 - i.e. old *
* user-space does not get uncomplete information. * If usize == ksize then we just copy everything to user-space and all is good.
*
* If usize < ksize then we only copy as much as user-space has space for,
* this keeps ABI compatibility as well. We skip the rest.
*
* If usize > ksize then user-space is using a newer version of the ABI,
* which part the kernel doesn't know about. Just ignore it - tooling can
* detect the kernel's knowledge of attributes from the attr->size value
* which is set to ksize in this case.
*/ */
if (usize < sizeof(*attr)) { kattr->size = min(usize, ksize);
unsigned char *addr;
unsigned char *end;
addr = (void *)attr + usize; if (copy_to_user(uattr, kattr, kattr->size))
end = (void *)attr + sizeof(*attr);
for (; addr < end; addr++) {
if (*addr)
return -EFBIG;
}
attr->size = usize;
}
ret = copy_to_user(uattr, attr, attr->size);
if (ret)
return -EFAULT; return -EFAULT;
return 0; return 0;
...@@ -5145,20 +5148,18 @@ static int sched_read_attr(struct sched_attr __user *uattr, ...@@ -5145,20 +5148,18 @@ static int sched_read_attr(struct sched_attr __user *uattr,
* sys_sched_getattr - similar to sched_getparam, but with sched_attr * sys_sched_getattr - similar to sched_getparam, but with sched_attr
* @pid: the pid in question. * @pid: the pid in question.
* @uattr: structure containing the extended parameters. * @uattr: structure containing the extended parameters.
* @size: sizeof(attr) for fwd/bwd comp. * @usize: sizeof(attr) that user-space knows about, for forwards and backwards compatibility.
* @flags: for future extension. * @flags: for future extension.
*/ */
SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
unsigned int, size, unsigned int, flags) unsigned int, usize, unsigned int, flags)
{ {
struct sched_attr attr = { struct sched_attr kattr = { };
.size = sizeof(struct sched_attr),
};
struct task_struct *p; struct task_struct *p;
int retval; int retval;
if (!uattr || pid < 0 || size > PAGE_SIZE || if (!uattr || pid < 0 || usize > PAGE_SIZE ||
size < SCHED_ATTR_SIZE_VER0 || flags) usize < SCHED_ATTR_SIZE_VER0 || flags)
return -EINVAL; return -EINVAL;
rcu_read_lock(); rcu_read_lock();
...@@ -5171,25 +5172,24 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, ...@@ -5171,25 +5172,24 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
if (retval) if (retval)
goto out_unlock; goto out_unlock;
attr.sched_policy = p->policy; kattr.sched_policy = p->policy;
if (p->sched_reset_on_fork) if (p->sched_reset_on_fork)
attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
if (task_has_dl_policy(p)) if (task_has_dl_policy(p))
__getparam_dl(p, &attr); __getparam_dl(p, &kattr);
else if (task_has_rt_policy(p)) else if (task_has_rt_policy(p))
attr.sched_priority = p->rt_priority; kattr.sched_priority = p->rt_priority;
else else
attr.sched_nice = task_nice(p); kattr.sched_nice = task_nice(p);
#ifdef CONFIG_UCLAMP_TASK #ifdef CONFIG_UCLAMP_TASK
attr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value; kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
attr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value; kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
#endif #endif
rcu_read_unlock(); rcu_read_unlock();
retval = sched_read_attr(uattr, &attr, size); return sched_attr_copy_to_user(uattr, &kattr, usize);
return retval;
out_unlock: out_unlock:
rcu_read_unlock(); rcu_read_unlock();
......
...@@ -4470,6 +4470,8 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) ...@@ -4470,6 +4470,8 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
if (likely(cfs_rq->runtime_remaining > 0)) if (likely(cfs_rq->runtime_remaining > 0))
return; return;
if (cfs_rq->throttled)
return;
/* /*
* if we're unable to extend our runtime we resched so that the active * if we're unable to extend our runtime we resched so that the active
* hierarchy can be throttled * hierarchy can be throttled
...@@ -4673,6 +4675,9 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, ...@@ -4673,6 +4675,9 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
if (!cfs_rq_throttled(cfs_rq)) if (!cfs_rq_throttled(cfs_rq))
goto next; goto next;
/* By the above check, this should never be true */
SCHED_WARN_ON(cfs_rq->runtime_remaining > 0);
runtime = -cfs_rq->runtime_remaining + 1; runtime = -cfs_rq->runtime_remaining + 1;
if (runtime > remaining) if (runtime > remaining)
runtime = remaining; runtime = remaining;
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment