@@ -239,8 +239,6 @@ enum bpf_stack_slot_type {
STACK_ITER,
};
-#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
-
#define BPF_REGMASK_ARGS ((1 << BPF_REG_1) | (1 << BPF_REG_2) | \
(1 << BPF_REG_3) | (1 << BPF_REG_4) | \
(1 << BPF_REG_5))
@@ -39,6 +39,8 @@ struct sock_reuseport;
struct ctl_table;
struct ctl_table_header;
+#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
+
/* ArgX, context and stack frame pointer register positions. Note,
* Arg1, Arg2, Arg3, etc are used as argument mappings of function
* calls in BPF_CALL instruction.
@@ -881,6 +883,16 @@ bpf_ctx_narrow_access_offset(u32 off, u32 size, u32 size_default)
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
+static inline bool
+bpf_stack_narrow_access_ok(int off, int size, int spill_size)
+{
+#ifdef __BIG_ENDIAN
+ off -= spill_size - size;
+#endif
+
+ return !(off % BPF_REG_SIZE);
+}
+
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
{
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
@@ -4774,7 +4774,8 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
if (dst_regno < 0)
return 0;
- if (!(off % BPF_REG_SIZE) && size == spill_size) {
+ if (size <= spill_size &&
+ bpf_stack_narrow_access_ok(off, size, spill_size)) {
/* The earlier check_reg_arg() has decided the
* subreg_def for this insn. Save it first.
*/
@@ -4782,6 +4783,12 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
copy_register_state(&state->regs[dst_regno], reg);
state->regs[dst_regno].subreg_def = subreg_def;
+
+ /* Break the relation on a narrowing fill.
+ * coerce_reg_to_size will adjust the boundaries.
+ */
+ if (get_reg_width(reg) > size * BITS_PER_BYTE)
+ state->regs[dst_regno].id = 0;
} else {
int spill_cnt = 0, zero_cnt = 0;
@@ -6057,10 +6064,10 @@ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
* values are also truncated so we push 64-bit bounds into
* 32-bit bounds. Above were truncated < 32-bits already.
*/
- if (size < 4) {
+ if (size < 4)
__mark_reg32_unbounded(reg);
- reg_bounds_sync(reg);
- }
+
+ reg_bounds_sync(reg);
}
static void set_sext64_default_val(struct bpf_reg_state *reg, int size)
@@ -217,7 +217,7 @@ __naked void uninit_u32_from_the_stack(void)
SEC("tc")
__description("Spill a u32 const scalar. Refill as u16. Offset to skb->data")
-__failure __msg("invalid access to packet")
+__success __retval(0)
__naked void u16_offset_to_skb_data(void)
{
asm volatile (" \
@@ -225,19 +225,24 @@ __naked void u16_offset_to_skb_data(void)
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
w4 = 20; \
*(u32*)(r10 - 8) = r4; \
- r4 = *(u16*)(r10 - 8); \
+ r4 = *(u16*)(r10 - %[offset]); \
r0 = r2; \
- /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\
+ /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=20 */\
r0 += r4; \
- /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */\
+ /* if (r0 > r3) R0=pkt,off=20 R2=pkt R3=pkt_end R4=20 */\
if r0 > r3 goto l0_%=; \
- /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */\
+ /* r0 = *(u32 *)r2 R0=pkt,off=20 R2=pkt R3=pkt_end R4=20 */\
r0 = *(u32*)(r2 + 0); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
- __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ __imm_const(offset, 8)
+#else
+ __imm_const(offset, 6)
+#endif
: __clobber_all);
}
@@ -268,7 +273,7 @@ l0_%=: r0 = 0; \
}
SEC("tc")
-__description("Spill a u32 const scalar. Refill as u16 from fp-6. Offset to skb->data")
+__description("Spill a u32 const scalar. Refill as u16 from MSB. Offset to skb->data")
__failure __msg("invalid access to packet")
__naked void _6_offset_to_skb_data(void)
{
@@ -277,7 +282,7 @@ __naked void _6_offset_to_skb_data(void)
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
w4 = 20; \
*(u32*)(r10 - 8) = r4; \
- r4 = *(u16*)(r10 - 6); \
+ r4 = *(u16*)(r10 - %[offset]); \
r0 = r2; \
/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\
r0 += r4; \
@@ -289,7 +294,12 @@ l0_%=: r0 = 0; \
exit; \
" :
: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
- __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ __imm_const(offset, 6)
+#else
+ __imm_const(offset, 8)
+#endif
: __clobber_all);
}