Commit 291abfea authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from David Miller:

 1) Unbalanced locking in mwifiex_process_country_ie, from Brian Norris.

 2) Fix thermal zone registration in iwlwifi, from Andrei
    Otcheretianski.

 3) Fix double free_irq in sgi ioc3 eth, from Thomas Bogendoerfer.

 4) Use after free in mptcp, from Florian Westphal.

 5) Use after free in wireguard's root_remove_peer_lists, from Eric
    Dumazet.

 6) Properly access packets heads in bonding alb code, from Eric
    Dumazet.

 7) Fix data race in skb_queue_len(), from Qian Cai.

 8) Fix regression in r8169 on some chips, from Heiner Kallweit.

 9) Fix XDP program ref counting in hv_netvsc, from Haiyang Zhang.

10) Certain kinds of set link netlink operations can cause a NULL deref
    in the ipv6 addrconf code. Fix from Eric Dumazet.

11) Don't cancel uninitialized work queue in drop monitor, from Ido
    Schimmel.

* git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (84 commits)
  net: thunderx: use proper interface type for RGMII
  mt76: mt7615: fix max_nss in mt7615_eeprom_parse_hw_cap
  bpf: Improve bucket_log calculation logic
  selftests/bpf: Test freeing sockmap/sockhash with a socket in it
  bpf, sockhash: Synchronize_rcu before free'ing map
  bpf, sockmap: Don't sleep while holding RCU lock on tear-down
  bpftool: Don't crash on missing xlated program instructions
  bpf, sockmap: Check update requirements after locking
  drop_monitor: Do not cancel uninitialized work item
  mlxsw: spectrum_dpipe: Add missing error path
  mlxsw: core: Add validation of hardware device types for MGPIR register
  mlxsw: spectrum_router: Clear offload indication from IPv6 nexthops on abort
  selftests: mlxsw: Add test cases for local table route replacement
  mlxsw: spectrum_router: Prevent incorrect replacement of local table routes
  net: dsa: microchip: enable module autoprobe
  ipv6/addrconf: fix potential NULL deref in inet6_set_link_af()
  dpaa_eth: support all modes with rate adapting PHYs
  net: stmmac: update pci platform data to use phy_interface
  net: stmmac: xgmac: fix missing IFF_MULTICAST checki in dwxgmac2_set_filter
  net: stmmac: fix missing IFF_MULTICAST check in dwmac4_set_filter
  ...
parents d4f309ca 29ca3b31
......@@ -1000,8 +1000,10 @@ static void net_dm_hw_monitor_stop(struct netlink_ext_ack *extack)
{
int cpu;
if (!monitor_hw)
if (!monitor_hw) {
NL_SET_ERR_MSG_MOD(extack, "Hardware monitoring already disabled");
return;
}
monitor_hw = false;
......
......@@ -234,7 +234,6 @@ static void sock_map_free(struct bpf_map *map)
int i;
synchronize_rcu();
rcu_read_lock();
raw_spin_lock_bh(&stab->lock);
for (i = 0; i < stab->map.max_entries; i++) {
struct sock **psk = &stab->sks[i];
......@@ -243,13 +242,15 @@ static void sock_map_free(struct bpf_map *map)
sk = xchg(psk, NULL);
if (sk) {
lock_sock(sk);
rcu_read_lock();
sock_map_unref(sk, psk);
rcu_read_unlock();
release_sock(sk);
}
}
raw_spin_unlock_bh(&stab->lock);
rcu_read_unlock();
/* wait for psock readers accessing its map link */
synchronize_rcu();
bpf_map_area_free(stab->sks);
......@@ -416,14 +417,16 @@ static int sock_map_update_elem(struct bpf_map *map, void *key,
ret = -EINVAL;
goto out;
}
if (!sock_map_sk_is_suitable(sk) ||
sk->sk_state != TCP_ESTABLISHED) {
if (!sock_map_sk_is_suitable(sk)) {
ret = -EOPNOTSUPP;
goto out;
}
sock_map_sk_acquire(sk);
ret = sock_map_update_common(map, idx, sk, flags);
if (sk->sk_state != TCP_ESTABLISHED)
ret = -EOPNOTSUPP;
else
ret = sock_map_update_common(map, idx, sk, flags);
sock_map_sk_release(sk);
out:
fput(sock->file);
......@@ -739,14 +742,16 @@ static int sock_hash_update_elem(struct bpf_map *map, void *key,
ret = -EINVAL;
goto out;
}
if (!sock_map_sk_is_suitable(sk) ||
sk->sk_state != TCP_ESTABLISHED) {
if (!sock_map_sk_is_suitable(sk)) {
ret = -EOPNOTSUPP;
goto out;
}
sock_map_sk_acquire(sk);
ret = sock_hash_update_common(map, key, sk, flags);
if (sk->sk_state != TCP_ESTABLISHED)
ret = -EOPNOTSUPP;
else
ret = sock_hash_update_common(map, key, sk, flags);
sock_map_sk_release(sk);
out:
fput(sock->file);
......@@ -859,19 +864,22 @@ static void sock_hash_free(struct bpf_map *map)
int i;
synchronize_rcu();
rcu_read_lock();
for (i = 0; i < htab->buckets_num; i++) {
bucket = sock_hash_select_bucket(htab, i);
raw_spin_lock_bh(&bucket->lock);
hlist_for_each_entry_safe(elem, node, &bucket->head, node) {
hlist_del_rcu(&elem->node);
lock_sock(elem->sk);
rcu_read_lock();
sock_map_unref(elem->sk, elem);
rcu_read_unlock();
release_sock(elem->sk);
}
raw_spin_unlock_bh(&bucket->lock);
}
rcu_read_unlock();
/* wait for psock readers accessing its map link */
synchronize_rcu();
bpf_map_area_free(htab->buckets);
kfree(htab);
......
......@@ -5718,6 +5718,9 @@ static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla)
struct nlattr *tb[IFLA_INET6_MAX + 1];
int err;
if (!idev)
return -EAFNOSUPPORT;
if (nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla, NULL, NULL) < 0)
BUG();
......
......@@ -24,57 +24,12 @@
#define MPTCP_SAME_STATE TCP_MAX_STATES
static void __mptcp_close(struct sock *sk, long timeout);
static const struct proto_ops *tcp_proto_ops(struct sock *sk)
{
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
if (sk->sk_family == AF_INET6)
return &inet6_stream_ops;
struct mptcp6_sock {
struct mptcp_sock msk;
struct ipv6_pinfo np;
};
#endif
return &inet_stream_ops;
}
/* MP_CAPABLE handshake failed, convert msk to plain tcp, replacing
* socket->sk and stream ops and destroying msk
* return the msk socket, as we can't access msk anymore after this function
* completes
* Called with msk lock held, releases such lock before returning
*/
static struct socket *__mptcp_fallback_to_tcp(struct mptcp_sock *msk,
struct sock *ssk)
{
struct mptcp_subflow_context *subflow;
struct socket *sock;
struct sock *sk;
sk = (struct sock *)msk;
sock = sk->sk_socket;
subflow = mptcp_subflow_ctx(ssk);
/* detach the msk socket */
list_del_init(&subflow->node);
sock_orphan(sk);
sock->sk = NULL;
/* socket is now TCP */
lock_sock(ssk);
sock_graft(ssk, sock);
if (subflow->conn) {
/* We can't release the ULP data on a live socket,
* restore the tcp callback
*/
mptcp_subflow_tcp_fallback(ssk, subflow);
sock_put(subflow->conn);
subflow->conn = NULL;
}
release_sock(ssk);
sock->ops = tcp_proto_ops(ssk);
/* destroy the left-over msk sock */
__mptcp_close(sk, 0);
return sock;
}
/* If msk has an initial subflow socket, and the MP_CAPABLE handshake has not
* completed yet or has failed, return the subflow socket.
......@@ -93,10 +48,6 @@ static bool __mptcp_needs_tcp_fallback(const struct mptcp_sock *msk)
return msk->first && !sk_is_mptcp(msk->first);
}
/* if the mp_capable handshake has failed, it fallbacks msk to plain TCP,
* releases the socket lock and returns a reference to the now TCP socket.
* Otherwise returns NULL
*/
static struct socket *__mptcp_tcp_fallback(struct mptcp_sock *msk)
{
sock_owned_by_me((const struct sock *)msk);
......@@ -105,15 +56,11 @@ static struct socket *__mptcp_tcp_fallback(struct mptcp_sock *msk)
return NULL;
if (msk->subflow) {
/* the first subflow is an active connection, discart the
* paired socket
*/
msk->subflow->sk = NULL;
sock_release(msk->subflow);
msk->subflow = NULL;
release_sock((struct sock *)msk);
return msk->subflow;
}
return __mptcp_fallback_to_tcp(msk, msk->first);
return NULL;
}
static bool __mptcp_can_create_subflow(const struct mptcp_sock *msk)
......@@ -640,12 +587,14 @@ static void mptcp_subflow_shutdown(struct sock *ssk, int how)
}
/* Called with msk lock held, releases such lock before returning */
static void __mptcp_close(struct sock *sk, long timeout)
static void mptcp_close(struct sock *sk, long timeout)
{
struct mptcp_subflow_context *subflow, *tmp;
struct mptcp_sock *msk = mptcp_sk(sk);
LIST_HEAD(conn_list);
lock_sock(sk);
mptcp_token_destroy(msk->token);
inet_sk_state_store(sk, TCP_CLOSE);
......@@ -662,12 +611,6 @@ static void __mptcp_close(struct sock *sk, long timeout)
sk_common_release(sk);
}
static void mptcp_close(struct sock *sk, long timeout)
{
lock_sock(sk);
__mptcp_close(sk, timeout);
}
static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
{
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
......@@ -691,6 +634,30 @@ static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr;
}
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk)
{
unsigned int offset = sizeof(struct mptcp6_sock) - sizeof(struct ipv6_pinfo);
return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
}
#endif
struct sock *mptcp_sk_clone_lock(const struct sock *sk)
{
struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC);
if (!nsk)
return NULL;
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
if (nsk->sk_family == AF_INET6)
inet_sk(nsk)->pinet6 = mptcp_inet6_sk(nsk);
#endif
return nsk;
}
static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
bool kern)
{
......@@ -721,7 +688,7 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
lock_sock(sk);
local_bh_disable();
new_mptcp_sock = sk_clone_lock(sk, GFP_ATOMIC);
new_mptcp_sock = mptcp_sk_clone_lock(sk);
if (!new_mptcp_sock) {
*err = -ENOBUFS;
local_bh_enable();
......@@ -1270,8 +1237,7 @@ int mptcp_proto_v6_init(void)
strcpy(mptcp_v6_prot.name, "MPTCPv6");
mptcp_v6_prot.slab = NULL;
mptcp_v6_prot.destroy = mptcp_v6_destroy;
mptcp_v6_prot.obj_size = sizeof(struct mptcp_sock) +
sizeof(struct ipv6_pinfo);
mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock);
err = proto_register(&mptcp_v6_prot, 1);
if (err)
......
......@@ -562,11 +562,11 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
}
/*
* Final call destruction under RCU.
* Final call destruction - but must be done in process context.
*/
static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
static void rxrpc_destroy_call(struct work_struct *work)
{
struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
struct rxrpc_call *call = container_of(work, struct rxrpc_call, processor);
struct rxrpc_net *rxnet = call->rxnet;
rxrpc_put_connection(call->conn);
......@@ -578,6 +578,22 @@ static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
wake_up_var(&rxnet->nr_calls);
}
/*
* Final call destruction under RCU.
*/
static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
{
struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
if (in_softirq()) {
INIT_WORK(&call->processor, rxrpc_destroy_call);
if (!rxrpc_queue_work(&call->processor))
BUG();
} else {
rxrpc_destroy_call(&call->processor);
}
}
/*
* clean up a call
*/
......
......@@ -171,8 +171,6 @@ void __rxrpc_disconnect_call(struct rxrpc_connection *conn,
_enter("%d,%x", conn->debug_id, call->cid);
set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
if (rcu_access_pointer(chan->call) == call) {
/* Save the result of the call so that we can repeat it if necessary
* through the channel, whilst disposing of the actual call record.
......@@ -225,6 +223,7 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
__rxrpc_disconnect_call(conn, call);
spin_unlock(&conn->channel_lock);
set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
conn->idle_timestamp = jiffies;
}
......
......@@ -365,7 +365,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
err = tcindex_filter_result_init(&new_filter_result, net);
if (err < 0)
goto errout1;
goto errout_alloc;
if (old_r)
cr = r->res;
......@@ -484,7 +484,6 @@ errout_alloc:
tcindex_free_perfect_hash(cp);
else if (balloc == 2)
kfree(cp->h);
errout1:
tcf_exts_destroy(&new_filter_result.exts);
errout:
kfree(cp);
......
......@@ -349,9 +349,9 @@ static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt,
while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = fq_pie_qdisc_dequeue(sch);
kfree_skb(skb);
len_dropped += qdisc_pkt_len(skb);
num_dropped += 1;
rtnl_kfree_skbs(skb, skb);
}
qdisc_tree_reduce_backlog(sch, num_dropped, len_dropped);
......
......@@ -31,6 +31,7 @@ static DEFINE_SPINLOCK(taprio_list_lock);
#define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST)
#define FULL_OFFLOAD_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)
#define TAPRIO_FLAGS_INVALID U32_MAX
struct sched_entry {
struct list_head list;
......@@ -766,6 +767,7 @@ static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
[TCA_TAPRIO_ATTR_SCHED_CLOCKID] = { .type = NLA_S32 },
[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] = { .type = NLA_S64 },
[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 },
[TCA_TAPRIO_ATTR_FLAGS] = { .type = NLA_U32 },
};
static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry,
......@@ -1367,6 +1369,33 @@ static int taprio_mqprio_cmp(const struct net_device *dev,
return 0;
}
/* The semantics of the 'flags' argument in relation to 'change()'
* requests, are interpreted following two rules (which are applied in
* this order): (1) an omitted 'flags' argument is interpreted as
* zero; (2) the 'flags' of a "running" taprio instance cannot be
* changed.
*/
static int taprio_new_flags(const struct nlattr *attr, u32 old,
struct netlink_ext_ack *extack)
{
u32 new = 0;
if (attr)
new = nla_get_u32(attr);
if (old != TAPRIO_FLAGS_INVALID && old != new) {
NL_SET_ERR_MSG_MOD(extack, "Changing 'flags' of a running schedule is not supported");
return -EOPNOTSUPP;
}
if (!taprio_flags_valid(new)) {
NL_SET_ERR_MSG_MOD(extack, "Specified 'flags' are not valid");
return -EINVAL;
}
return new;
}
static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
......@@ -1375,7 +1404,6 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
struct taprio_sched *q = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
struct tc_mqprio_qopt *mqprio = NULL;
u32 taprio_flags = 0;
unsigned long flags;
ktime_t start;
int i, err;
......@@ -1388,21 +1416,14 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
if (tb[TCA_TAPRIO_ATTR_PRIOMAP])
mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]);
if (tb[TCA_TAPRIO_ATTR_FLAGS]) {
taprio_flags = nla_get_u32(tb[TCA_TAPRIO_ATTR_FLAGS]);
if (q->flags != 0 && q->flags != taprio_flags) {
NL_SET_ERR_MSG_MOD(extack, "Changing 'flags' of a running schedule is not supported");
return -EOPNOTSUPP;
} else if (!taprio_flags_valid(taprio_flags)) {
NL_SET_ERR_MSG_MOD(extack, "Specified 'flags' are not valid");
return -EINVAL;
}
err = taprio_new_flags(tb[TCA_TAPRIO_ATTR_FLAGS],
q->flags, extack);
if (err < 0)
return err;
q->flags = taprio_flags;
}
q->flags = err;
err = taprio_parse_mqprio_opt(dev, mqprio, extack, taprio_flags);
err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags);
if (err < 0)
return err;
......@@ -1444,7 +1465,20 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
taprio_set_picos_per_byte(dev, q);
if (FULL_OFFLOAD_IS_ENABLED(taprio_flags))
if (mqprio) {
netdev_set_num_tc(dev, mqprio->num_tc);
for (i = 0; i < mqprio->num_tc; i++)
netdev_set_tc_queue(dev, i,
mqprio->count[i],
mqprio->offset[i]);
/* Always use supplied priority mappings */
for (i = 0; i <= TC_BITMASK; i++)
netdev_set_prio_tc_map(dev, i,
mqprio->prio_tc_map[i]);
}
if (FULL_OFFLOAD_IS_ENABLED(q->flags))
err = taprio_enable_offload(dev, mqprio, q, new_admin, extack);
else
err = taprio_disable_offload(dev, q, extack);
......@@ -1464,27 +1498,14 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
q->txtime_delay = nla_get_u32(tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]);
}
if (!TXTIME_ASSIST_IS_ENABLED(taprio_flags) &&
!FULL_OFFLOAD_IS_ENABLED(taprio_flags) &&
if (!TXTIME_ASSIST_IS_ENABLED(q->flags) &&
!FULL_OFFLOAD_IS_ENABLED(q->flags) &&
!hrtimer_active(&q->advance_timer)) {
hrtimer_init(&q->advance_timer, q->clockid, HRTIMER_MODE_ABS);
q->advance_timer.function = advance_sched;
}
if (mqprio) {
netdev_set_num_tc(dev, mqprio->num_tc);
for (i = 0; i < mqprio->num_tc; i++)
netdev_set_tc_queue(dev, i,
mqprio->count[i],
mqprio->offset[i]);
/* Always use supplied priority mappings */
for (i = 0; i <= TC_BITMASK; i++)
netdev_set_prio_tc_map(dev, i,
mqprio->prio_tc_map[i]);
}
if (FULL_OFFLOAD_IS_ENABLED(taprio_flags)) {
if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
q->dequeue = taprio_dequeue_offload;
q->peek = taprio_peek_offload;
} else {
......@@ -1501,9 +1522,9 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
goto unlock;
}
if (TXTIME_ASSIST_IS_ENABLED(taprio_flags)) {
setup_txtime(q, new_admin, start);
setup_txtime(q, new_admin, start);
if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
if (!oper) {
rcu_assign_pointer(q->oper_sched, new_admin);
err = 0;
......@@ -1528,7 +1549,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
spin_unlock_irqrestore(&q->current_entry_lock, flags);
if (FULL_OFFLOAD_IS_ENABLED(taprio_flags))
if (FULL_OFFLOAD_IS_ENABLED(q->flags))
taprio_offload_config_changed(q);
}
......@@ -1567,7 +1588,7 @@ static void taprio_destroy(struct Qdisc *sch)
}
q->qdiscs = NULL;
netdev_set_num_tc(dev, 0);
netdev_reset_tc(dev);
if (q->oper_sched)
call_rcu(&q->oper_sched->rcu, taprio_free_sched_cb);
......@@ -1597,6 +1618,7 @@ static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
* and get the valid one on taprio_change().
*/
q->clockid = -1;
q->flags = TAPRIO_FLAGS_INVALID;
spin_lock(&taprio_list_lock);
list_add(&q->taprio_list, &taprio_list);
......
......@@ -189,11 +189,17 @@ static inline int unix_may_send(struct sock *sk, struct sock *osk)
return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
}
static inline int unix_recvq_full(struct sock const *sk)
static inline int unix_recvq_full(const struct sock *sk)
{
return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
}
static inline int unix_recvq_full_lockless(const struct sock *sk)
{
return skb_queue_len_lockless(&sk->sk_receive_queue) >
READ_ONCE(sk->sk_max_ack_backlog);
}
struct sock *unix_peer_get(struct sock *s)
{
struct sock *peer;
......@@ -1758,7 +1764,8 @@ restart_locked:
* - unix_peer(sk) == sk by time of get but disconnected before lock
*/
if (other != sk &&
unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
unlikely(unix_peer(other) != sk &&
unix_recvq_full_lockless(other))) {
if (timeo) {
timeo = unix_wait_for_peer(other, timeo);
......
......@@ -83,7 +83,6 @@ static u32 opt_xdp_bind_flags = XDP_USE_NEED_WAKEUP;
static u32 opt_umem_flags;
static int opt_unaligned_chunks;
static int opt_mmap_flags;
static u32 opt_xdp_bind_flags;
static int opt_xsk_frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
static int opt_timeout = 1000;
static bool opt_need_wakeup = true;
......@@ -789,7 +788,8 @@ static void kick_tx(struct xsk_socket_info *xsk)
int ret;
ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0);
if (ret >= 0 || errno == ENOBUFS || errno == EAGAIN || errno == EBUSY)
if (ret >= 0 || errno == ENOBUFS || errno == EAGAIN ||
errno == EBUSY || errno == ENETDOWN)
return;
exit_with_error(errno);
}
......
......@@ -580,7 +580,7 @@ probe_large_insn_limit(const char *define_prefix, __u32 ifindex)
res = bpf_probe_large_insn_limit(ifindex);
print_bool_feature("have_large_insn_limit",
"Large program size limit",
"HAVE_LARGE_INSN_LIMIT",
"LARGE_INSN_LIMIT",
res, define_prefix);
}
......
......@@ -536,7 +536,7 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
buf = (unsigned char *)(info->jited_prog_insns);
member_len = info->jited_prog_len;
} else { /* DUMP_XLATED */
if (info->xlated_prog_len == 0) {
if (info->xlated_prog_len == 0 || !info->xlated_prog_insns) {
p_err("error retrieving insn dump: kernel.kptr_restrict set?");
return -1;
}
......
......@@ -41,7 +41,7 @@ clean:
$(OUTPUT)/runqslower: $(OUTPUT)/runqslower.o $(BPFOBJ)
$(call msg,BINARY,$@)
$(Q)$(CC) $(CFLAGS) -lelf -lz $^ -o $@
$(Q)$(CC) $(CFLAGS) $^ -lelf -lz -o $@
$(OUTPUT)/runqslower.o: runqslower.h $(OUTPUT)/runqslower.skel.h \
$(OUTPUT)/runqslower.bpf.o
......@@ -75,7 +75,7 @@ $(OUTPUT)/vmlinux.h: $(VMLINUX_BTF_PATH) | $(OUTPUT) $(BPFTOOL)
fi
$(Q)$(BPFTOOL) btf dump file $(VMLINUX_BTF_PATH) format c > $@