blob: 8591c89c0828a9ff780c63f4f83f3366f79e198f [file] [log] [blame]
David S. Miller18b3ad92017-05-10 11:43:51 -07001#include <asm/types.h>
2#include <linux/types.h>
3#include <stdint.h>
4#include <stdio.h>
5#include <stdlib.h>
6#include <unistd.h>
7#include <errno.h>
8#include <string.h>
9#include <stddef.h>
10#include <stdbool.h>
11
Daniel Borkmannf735b642017-06-11 00:50:45 +020012#include <sys/resource.h>
13
David S. Miller18b3ad92017-05-10 11:43:51 -070014#include <linux/unistd.h>
15#include <linux/filter.h>
16#include <linux/bpf_perf_event.h>
17#include <linux/bpf.h>
18
19#include <bpf/bpf.h>
20
21#include "../../../include/linux/filter.h"
22
23#ifndef ARRAY_SIZE
24# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
25#endif
26
27#define MAX_INSNS 512
28#define MAX_MATCHES 16
29
Edward Cree9fafa802017-08-07 15:27:34 +010030struct bpf_reg_match {
31 unsigned int line;
32 const char *match;
33};
34
David S. Miller18b3ad92017-05-10 11:43:51 -070035struct bpf_align_test {
36 const char *descr;
37 struct bpf_insn insns[MAX_INSNS];
38 enum {
39 UNDEF,
40 ACCEPT,
41 REJECT
42 } result;
43 enum bpf_prog_type prog_type;
Edward Cree9fafa802017-08-07 15:27:34 +010044 /* Matches must be in order of increasing line */
45 struct bpf_reg_match matches[MAX_MATCHES];
David S. Miller18b3ad92017-05-10 11:43:51 -070046};
47
48static struct bpf_align_test tests[] = {
Edward Cree9fafa802017-08-07 15:27:34 +010049 /* Four tests of known constants. These aren't staggeringly
50 * interesting since we track exact values now.
51 */
David S. Miller18b3ad92017-05-10 11:43:51 -070052 {
53 .descr = "mov",
54 .insns = {
55 BPF_MOV64_IMM(BPF_REG_3, 2),
56 BPF_MOV64_IMM(BPF_REG_3, 4),
57 BPF_MOV64_IMM(BPF_REG_3, 8),
58 BPF_MOV64_IMM(BPF_REG_3, 16),
59 BPF_MOV64_IMM(BPF_REG_3, 32),
60 BPF_MOV64_IMM(BPF_REG_0, 0),
61 BPF_EXIT_INSN(),
62 },
63 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
64 .matches = {
Edward Cree9fafa802017-08-07 15:27:34 +010065 {1, "R1=ctx(id=0,off=0,imm=0)"},
66 {1, "R10=fp0"},
67 {1, "R3=inv2"},
68 {2, "R3=inv4"},
69 {3, "R3=inv8"},
70 {4, "R3=inv16"},
71 {5, "R3=inv32"},
David S. Miller18b3ad92017-05-10 11:43:51 -070072 },
73 },
74 {
75 .descr = "shift",
76 .insns = {
77 BPF_MOV64_IMM(BPF_REG_3, 1),
78 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
79 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
80 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
81 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
82 BPF_ALU64_IMM(BPF_RSH, BPF_REG_3, 4),
83 BPF_MOV64_IMM(BPF_REG_4, 32),
84 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
85 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
86 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
87 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
88 BPF_MOV64_IMM(BPF_REG_0, 0),
89 BPF_EXIT_INSN(),
90 },
91 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
92 .matches = {
Edward Cree9fafa802017-08-07 15:27:34 +010093 {1, "R1=ctx(id=0,off=0,imm=0)"},
94 {1, "R10=fp0"},
95 {1, "R3=inv1"},
96 {2, "R3=inv2"},
97 {3, "R3=inv4"},
98 {4, "R3=inv8"},
99 {5, "R3=inv16"},
100 {6, "R3=inv1"},
101 {7, "R4=inv32"},
102 {8, "R4=inv16"},
103 {9, "R4=inv8"},
104 {10, "R4=inv4"},
105 {11, "R4=inv2"},
David S. Miller18b3ad92017-05-10 11:43:51 -0700106 },
107 },
108 {
109 .descr = "addsub",
110 .insns = {
111 BPF_MOV64_IMM(BPF_REG_3, 4),
112 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 4),
113 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 2),
114 BPF_MOV64_IMM(BPF_REG_4, 8),
115 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
116 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
117 BPF_MOV64_IMM(BPF_REG_0, 0),
118 BPF_EXIT_INSN(),
119 },
120 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
121 .matches = {
Edward Cree9fafa802017-08-07 15:27:34 +0100122 {1, "R1=ctx(id=0,off=0,imm=0)"},
123 {1, "R10=fp0"},
124 {1, "R3=inv4"},
125 {2, "R3=inv8"},
126 {3, "R3=inv10"},
127 {4, "R4=inv8"},
128 {5, "R4=inv12"},
129 {6, "R4=inv14"},
David S. Miller18b3ad92017-05-10 11:43:51 -0700130 },
131 },
132 {
133 .descr = "mul",
134 .insns = {
135 BPF_MOV64_IMM(BPF_REG_3, 7),
136 BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 1),
137 BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 2),
138 BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 4),
139 BPF_MOV64_IMM(BPF_REG_0, 0),
140 BPF_EXIT_INSN(),
141 },
142 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
143 .matches = {
Edward Cree9fafa802017-08-07 15:27:34 +0100144 {1, "R1=ctx(id=0,off=0,imm=0)"},
145 {1, "R10=fp0"},
146 {1, "R3=inv7"},
147 {2, "R3=inv7"},
148 {3, "R3=inv14"},
149 {4, "R3=inv56"},
David S. Miller18b3ad92017-05-10 11:43:51 -0700150 },
151 },
152
Edward Cree9fafa802017-08-07 15:27:34 +0100153 /* Tests using unknown values */
David S. Miller18b3ad92017-05-10 11:43:51 -0700154#define PREP_PKT_POINTERS \
155 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \
156 offsetof(struct __sk_buff, data)), \
157 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \
158 offsetof(struct __sk_buff, data_end))
159
160#define LOAD_UNKNOWN(DST_REG) \
161 PREP_PKT_POINTERS, \
162 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), \
163 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), \
164 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 1), \
165 BPF_EXIT_INSN(), \
166 BPF_LDX_MEM(BPF_B, DST_REG, BPF_REG_2, 0)
167
168 {
169 .descr = "unknown shift",
170 .insns = {
171 LOAD_UNKNOWN(BPF_REG_3),
172 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
173 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
174 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
175 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
176 LOAD_UNKNOWN(BPF_REG_4),
177 BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 5),
178 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
179 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
180 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
181 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
182 BPF_MOV64_IMM(BPF_REG_0, 0),
183 BPF_EXIT_INSN(),
184 },
185 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
186 .matches = {
Edward Cree9fafa802017-08-07 15:27:34 +0100187 {7, "R0=pkt(id=0,off=8,r=8,imm=0)"},
188 {7, "R3=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
189 {8, "R3=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
190 {9, "R3=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
191 {10, "R3=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
192 {11, "R3=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
193 {18, "R3=pkt_end(id=0,off=0,imm=0)"},
194 {18, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
195 {19, "R4=inv(id=0,umax_value=8160,var_off=(0x0; 0x1fe0))"},
196 {20, "R4=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
197 {21, "R4=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
198 {22, "R4=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
199 {23, "R4=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
David S. Miller18b3ad92017-05-10 11:43:51 -0700200 },
201 },
202 {
203 .descr = "unknown mul",
204 .insns = {
205 LOAD_UNKNOWN(BPF_REG_3),
206 BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
207 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 1),
208 BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
209 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 2),
210 BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
211 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 4),
212 BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
213 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 8),
214 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 2),
215 BPF_MOV64_IMM(BPF_REG_0, 0),
216 BPF_EXIT_INSN(),
217 },
218 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
219 .matches = {
Edward Cree9fafa802017-08-07 15:27:34 +0100220 {7, "R3=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
221 {8, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
222 {9, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
223 {10, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
224 {11, "R4=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
225 {12, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
226 {13, "R4=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
227 {14, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
228 {15, "R4=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
229 {16, "R4=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
David S. Miller18b3ad92017-05-10 11:43:51 -0700230 },
231 },
232 {
233 .descr = "packet const offset",
234 .insns = {
235 PREP_PKT_POINTERS,
236 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
237
238 BPF_MOV64_IMM(BPF_REG_0, 0),
239
240 /* Skip over ethernet header. */
241 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
242 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
243 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
244 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
245 BPF_EXIT_INSN(),
246
247 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 0),
248 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 1),
249 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 2),
250 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 3),
251 BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_5, 0),
252 BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_5, 2),
253 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
254
255 BPF_MOV64_IMM(BPF_REG_0, 0),
256 BPF_EXIT_INSN(),
257 },
258 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
259 .matches = {
Edward Cree9fafa802017-08-07 15:27:34 +0100260 {4, "R5=pkt(id=0,off=0,r=0,imm=0)"},
261 {5, "R5=pkt(id=0,off=14,r=0,imm=0)"},
262 {6, "R4=pkt(id=0,off=14,r=0,imm=0)"},
263 {10, "R2=pkt(id=0,off=0,r=18,imm=0)"},
264 {10, "R5=pkt(id=0,off=14,r=18,imm=0)"},
265 {10, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
266 {14, "R4=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
267 {15, "R4=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
David S. Miller18b3ad92017-05-10 11:43:51 -0700268 },
269 },
270 {
271 .descr = "packet variable offset",
272 .insns = {
273 LOAD_UNKNOWN(BPF_REG_6),
274 BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
275
276 /* First, add a constant to the R5 packet pointer,
277 * then a variable with a known alignment.
278 */
279 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
280 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
281 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
282 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
283 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
284 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
285 BPF_EXIT_INSN(),
286 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
287
288 /* Now, test in the other direction. Adding first
289 * the variable offset to R5, then the constant.
290 */
291 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
292 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
293 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
294 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
295 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
296 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
297 BPF_EXIT_INSN(),
298 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
299
David S. Miller6832a332017-05-11 19:30:02 -0700300 /* Test multiple accumulations of unknown values
301 * into a packet pointer.
302 */
303 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
304 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
305 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
306 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 4),
307 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
308 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
309 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
310 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
311 BPF_EXIT_INSN(),
312 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
313
David S. Miller18b3ad92017-05-10 11:43:51 -0700314 BPF_MOV64_IMM(BPF_REG_0, 0),
315 BPF_EXIT_INSN(),
316 },
317 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
318 .matches = {
319 /* Calculated offset in R6 has unknown value, but known
320 * alignment of 4.
321 */
Edward Cree9fafa802017-08-07 15:27:34 +0100322 {8, "R2=pkt(id=0,off=0,r=8,imm=0)"},
323 {8, "R6=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
324 /* Offset is added to packet pointer R5, resulting in
325 * known fixed offset, and variable offset from R6.
David S. Miller18b3ad92017-05-10 11:43:51 -0700326 */
Edward Cree9fafa802017-08-07 15:27:34 +0100327 {11, "R5=pkt(id=1,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
David S. Miller18b3ad92017-05-10 11:43:51 -0700328 /* At the time the word size load is performed from R5,
329 * it's total offset is NET_IP_ALIGN + reg->off (0) +
330 * reg->aux_off (14) which is 16. Then the variable
331 * offset is considered using reg->aux_off_align which
332 * is 4 and meets the load's requirements.
333 */
Edward Cree9fafa802017-08-07 15:27:34 +0100334 {15, "R4=pkt(id=1,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
335 {15, "R5=pkt(id=1,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
David S. Miller18b3ad92017-05-10 11:43:51 -0700336 /* Variable offset is added to R5 packet pointer,
337 * resulting in auxiliary alignment of 4.
338 */
Edward Cree9fafa802017-08-07 15:27:34 +0100339 {18, "R5=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
David S. Miller18b3ad92017-05-10 11:43:51 -0700340 /* Constant offset is added to R5, resulting in
341 * reg->off of 14.
342 */
Edward Cree9fafa802017-08-07 15:27:34 +0100343 {19, "R5=pkt(id=2,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
David S. Miller18b3ad92017-05-10 11:43:51 -0700344 /* At the time the word size load is performed from R5,
Edward Cree9fafa802017-08-07 15:27:34 +0100345 * its total fixed offset is NET_IP_ALIGN + reg->off
346 * (14) which is 16. Then the variable offset is 4-byte
347 * aligned, so the total offset is 4-byte aligned and
348 * meets the load's requirements.
David S. Miller18b3ad92017-05-10 11:43:51 -0700349 */
Edward Cree9fafa802017-08-07 15:27:34 +0100350 {23, "R4=pkt(id=2,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
351 {23, "R5=pkt(id=2,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
David S. Miller6832a332017-05-11 19:30:02 -0700352 /* Constant offset is added to R5 packet pointer,
353 * resulting in reg->off value of 14.
354 */
Edward Cree9fafa802017-08-07 15:27:34 +0100355 {26, "R5=pkt(id=0,off=14,r=8"},
356 /* Variable offset is added to R5, resulting in a
357 * variable offset of (4n).
David S. Miller6832a332017-05-11 19:30:02 -0700358 */
Edward Cree9fafa802017-08-07 15:27:34 +0100359 {27, "R5=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
360 /* Constant is added to R5 again, setting reg->off to 18. */
361 {28, "R5=pkt(id=3,off=18,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
362 /* And once more we add a variable; resulting var_off
363 * is still (4n), fixed offset is not changed.
364 * Also, we create a new reg->id.
David S. Miller6832a332017-05-11 19:30:02 -0700365 */
Edward Cree9fafa802017-08-07 15:27:34 +0100366 {29, "R5=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc))"},
David S. Miller6832a332017-05-11 19:30:02 -0700367 /* At the time the word size load is performed from R5,
Edward Cree9fafa802017-08-07 15:27:34 +0100368 * its total fixed offset is NET_IP_ALIGN + reg->off (18)
369 * which is 20. Then the variable offset is (4n), so
370 * the total offset is 4-byte aligned and meets the
371 * load's requirements.
David S. Miller6832a332017-05-11 19:30:02 -0700372 */
Edward Cree9fafa802017-08-07 15:27:34 +0100373 {33, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc))"},
374 {33, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc))"},
David S. Miller18b3ad92017-05-10 11:43:51 -0700375 },
376 },
Edward Cree715dddb2017-08-07 15:28:00 +0100377 {
378 .descr = "packet variable offset 2",
379 .insns = {
380 /* Create an unknown offset, (4n+2)-aligned */
381 LOAD_UNKNOWN(BPF_REG_6),
382 BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
383 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14),
384 /* Add it to the packet pointer */
385 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
386 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
387 /* Check bounds and perform a read */
388 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
389 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
390 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
391 BPF_EXIT_INSN(),
392 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
393 /* Make a (4n) offset from the value we just read */
394 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 0xff),
395 BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
396 /* Add it to the packet pointer */
397 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
398 /* Check bounds and perform a read */
399 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
400 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
401 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
402 BPF_EXIT_INSN(),
403 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
404 BPF_MOV64_IMM(BPF_REG_0, 0),
405 BPF_EXIT_INSN(),
406 },
407 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
408 .matches = {
409 /* Calculated offset in R6 has unknown value, but known
410 * alignment of 4.
411 */
412 {8, "R2=pkt(id=0,off=0,r=8,imm=0)"},
413 {8, "R6=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
414 /* Adding 14 makes R6 be (4n+2) */
415 {9, "R6=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
416 /* Packet pointer has (4n+2) offset */
417 {11, "R5=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
418 {13, "R4=pkt(id=1,off=4,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
419 /* At the time the word size load is performed from R5,
420 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
421 * which is 2. Then the variable offset is (4n+2), so
422 * the total offset is 4-byte aligned and meets the
423 * load's requirements.
424 */
425 {15, "R5=pkt(id=1,off=0,r=4,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
426 /* Newly read value in R6 was shifted left by 2, so has
427 * known alignment of 4.
428 */
429 {18, "R6=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
430 /* Added (4n) to packet pointer's (4n+2) var_off, giving
431 * another (4n+2).
432 */
433 {19, "R5=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
434 {21, "R4=pkt(id=2,off=4,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
435 /* At the time the word size load is performed from R5,
436 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
437 * which is 2. Then the variable offset is (4n+2), so
438 * the total offset is 4-byte aligned and meets the
439 * load's requirements.
440 */
441 {23, "R5=pkt(id=2,off=0,r=4,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
442 },
443 },
Edward Creec2c3e112017-08-07 15:28:45 +0100444 {
445 .descr = "dubious pointer arithmetic",
446 .insns = {
447 PREP_PKT_POINTERS,
448 BPF_MOV64_IMM(BPF_REG_0, 0),
449 /* ptr & const => unknown & const */
450 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
451 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 0x40),
452 /* ptr << const => unknown << const */
453 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
454 BPF_ALU64_IMM(BPF_LSH, BPF_REG_5, 2),
455 /* We have a (4n) value. Let's make a packet offset
456 * out of it. First add 14, to make it a (4n+2)
457 */
458 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
459 /* Then make sure it's nonnegative */
460 BPF_JMP_IMM(BPF_JSGE, BPF_REG_5, 0, 1),
461 BPF_EXIT_INSN(),
462 /* Add it to packet pointer */
463 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
464 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
465 /* Check bounds and perform a read */
466 BPF_MOV64_REG(BPF_REG_4, BPF_REG_6),
467 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
468 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
469 BPF_EXIT_INSN(),
470 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_6, 0),
471 BPF_EXIT_INSN(),
472 },
473 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
474 .result = REJECT,
475 .matches = {
476 {4, "R5=pkt(id=0,off=0,r=0,imm=0)"},
477 /* ptr & 0x40 == either 0 or 0x40 */
478 {5, "R5=inv(id=0,umax_value=64,var_off=(0x0; 0x40))"},
479 /* ptr << 2 == unknown, (4n) */
480 {7, "R5=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc))"},
481 /* (4n) + 14 == (4n+2). We blow our bounds, because
482 * the add could overflow.
483 */
484 {8, "R5=inv(id=0,var_off=(0x2; 0xfffffffffffffffc))"},
485 /* Checked s>=0 */
486 {10, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
487 /* packet pointer + nonnegative (4n+2) */
488 {12, "R6=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
489 {14, "R4=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
490 /* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
491 * We checked the bounds, but it might have been able
492 * to overflow if the packet pointer started in the
493 * upper half of the address space.
494 * So we did not get a 'range' on R6, and the access
495 * attempt will fail.
496 */
497 {16, "R6=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
498 }
499 },
Edward Creef999d642017-08-07 15:29:34 +0100500 {
501 .descr = "variable subtraction",
502 .insns = {
503 /* Create an unknown offset, (4n+2)-aligned */
504 LOAD_UNKNOWN(BPF_REG_6),
505 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
506 BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
507 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14),
508 /* Create another unknown, (4n)-aligned, and subtract
509 * it from the first one
510 */
511 BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 2),
512 BPF_ALU64_REG(BPF_SUB, BPF_REG_6, BPF_REG_7),
513 /* Bounds-check the result */
514 BPF_JMP_IMM(BPF_JSGE, BPF_REG_6, 0, 1),
515 BPF_EXIT_INSN(),
516 /* Add it to the packet pointer */
517 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
518 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
519 /* Check bounds and perform a read */
520 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
521 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
522 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
523 BPF_EXIT_INSN(),
524 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
525 BPF_EXIT_INSN(),
526 },
527 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
528 .matches = {
529 /* Calculated offset in R6 has unknown value, but known
530 * alignment of 4.
531 */
532 {7, "R2=pkt(id=0,off=0,r=8,imm=0)"},
533 {9, "R6=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
534 /* Adding 14 makes R6 be (4n+2) */
535 {10, "R6=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
536 /* New unknown value in R7 is (4n) */
537 {11, "R7=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
538 /* Subtracting it from R6 blows our unsigned bounds */
539 {12, "R6=inv(id=0,smin_value=-1006,smax_value=1034,var_off=(0x2; 0xfffffffffffffffc))"},
540 /* Checked s>= 0 */
541 {14, "R6=inv(id=0,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"},
542 /* At the time the word size load is performed from R5,
543 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
544 * which is 2. Then the variable offset is (4n+2), so
545 * the total offset is 4-byte aligned and meets the
546 * load's requirements.
547 */
548 {20, "R5=pkt(id=1,off=0,r=4,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"},
549 },
550 },
551 {
552 .descr = "pointer variable subtraction",
553 .insns = {
554 /* Create an unknown offset, (4n+2)-aligned and bounded
555 * to [14,74]
556 */
557 LOAD_UNKNOWN(BPF_REG_6),
558 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
559 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 0xf),
560 BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
561 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14),
562 /* Subtract it from the packet pointer */
563 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
564 BPF_ALU64_REG(BPF_SUB, BPF_REG_5, BPF_REG_6),
565 /* Create another unknown, (4n)-aligned and >= 74.
566 * That in fact means >= 76, since 74 % 4 == 2
567 */
568 BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 2),
569 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 76),
570 /* Add it to the packet pointer */
571 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_7),
572 /* Check bounds and perform a read */
573 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
574 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
575 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
576 BPF_EXIT_INSN(),
577 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
578 BPF_EXIT_INSN(),
579 },
580 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
581 .matches = {
582 /* Calculated offset in R6 has unknown value, but known
583 * alignment of 4.
584 */
585 {7, "R2=pkt(id=0,off=0,r=8,imm=0)"},
586 {10, "R6=inv(id=0,umax_value=60,var_off=(0x0; 0x3c))"},
587 /* Adding 14 makes R6 be (4n+2) */
588 {11, "R6=inv(id=0,umin_value=14,umax_value=74,var_off=(0x2; 0x7c))"},
589 /* Subtracting from packet pointer overflows ubounds */
590 {13, "R5=pkt(id=1,off=0,r=8,umin_value=18446744073709551542,umax_value=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c))"},
591 /* New unknown value in R7 is (4n), >= 76 */
592 {15, "R7=inv(id=0,umin_value=76,umax_value=1096,var_off=(0x0; 0x7fc))"},
593 /* Adding it to packet pointer gives nice bounds again */
594 {16, "R5=pkt(id=2,off=0,r=0,umin_value=2,umax_value=1082,var_off=(0x2; 0x7fc))"},
595 /* At the time the word size load is performed from R5,
596 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
597 * which is 2. Then the variable offset is (4n+2), so
598 * the total offset is 4-byte aligned and meets the
599 * load's requirements.
600 */
601 {20, "R5=pkt(id=2,off=0,r=4,umin_value=2,umax_value=1082,var_off=(0x2; 0x7fc))"},
602 },
603 },
David S. Miller18b3ad92017-05-10 11:43:51 -0700604};
605
606static int probe_filter_length(const struct bpf_insn *fp)
607{
608 int len;
609
610 for (len = MAX_INSNS - 1; len > 0; --len)
611 if (fp[len].code != 0 || fp[len].imm != 0)
612 break;
613 return len + 1;
614}
615
616static char bpf_vlog[32768];
617
618static int do_test_single(struct bpf_align_test *test)
619{
620 struct bpf_insn *prog = test->insns;
621 int prog_type = test->prog_type;
Edward Cree9fafa802017-08-07 15:27:34 +0100622 char bpf_vlog_copy[32768];
623 const char *line_ptr;
624 int cur_line = -1;
David S. Miller18b3ad92017-05-10 11:43:51 -0700625 int prog_len, i;
626 int fd_prog;
627 int ret;
628
629 prog_len = probe_filter_length(prog);
630 fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
631 prog, prog_len, 1, "GPL", 0,
Daniel Borkmannd6554902017-07-21 00:00:22 +0200632 bpf_vlog, sizeof(bpf_vlog), 2);
Edward Creec2c3e112017-08-07 15:28:45 +0100633 if (fd_prog < 0 && test->result != REJECT) {
David S. Miller18b3ad92017-05-10 11:43:51 -0700634 printf("Failed to load program.\n");
635 printf("%s", bpf_vlog);
636 ret = 1;
Edward Creec2c3e112017-08-07 15:28:45 +0100637 } else if (fd_prog >= 0 && test->result == REJECT) {
638 printf("Unexpected success to load!\n");
639 printf("%s", bpf_vlog);
640 ret = 1;
641 close(fd_prog);
David S. Miller18b3ad92017-05-10 11:43:51 -0700642 } else {
643 ret = 0;
Edward Cree9fafa802017-08-07 15:27:34 +0100644 /* We make a local copy so that we can strtok() it */
645 strncpy(bpf_vlog_copy, bpf_vlog, sizeof(bpf_vlog_copy));
646 line_ptr = strtok(bpf_vlog_copy, "\n");
David S. Miller18b3ad92017-05-10 11:43:51 -0700647 for (i = 0; i < MAX_MATCHES; i++) {
Edward Cree9fafa802017-08-07 15:27:34 +0100648 struct bpf_reg_match m = test->matches[i];
David S. Miller18b3ad92017-05-10 11:43:51 -0700649
Edward Cree9fafa802017-08-07 15:27:34 +0100650 if (!m.match)
David S. Miller18b3ad92017-05-10 11:43:51 -0700651 break;
Edward Cree9fafa802017-08-07 15:27:34 +0100652 while (line_ptr) {
653 cur_line = -1;
654 sscanf(line_ptr, "%u: ", &cur_line);
655 if (cur_line == m.line)
656 break;
657 line_ptr = strtok(NULL, "\n");
658 }
659 if (!line_ptr) {
660 printf("Failed to find line %u for match: %s\n",
661 m.line, m.match);
662 ret = 1;
663 printf("%s", bpf_vlog);
664 break;
665 }
666 if (!strstr(line_ptr, m.match)) {
667 printf("Failed to find match %u: %s\n",
668 m.line, m.match);
David S. Miller18b3ad92017-05-10 11:43:51 -0700669 ret = 1;
670 printf("%s", bpf_vlog);
671 break;
672 }
673 }
Edward Creec2c3e112017-08-07 15:28:45 +0100674 if (fd_prog >= 0)
675 close(fd_prog);
David S. Miller18b3ad92017-05-10 11:43:51 -0700676 }
677 return ret;
678}
679
680static int do_test(unsigned int from, unsigned int to)
681{
682 int all_pass = 0;
683 int all_fail = 0;
684 unsigned int i;
685
686 for (i = from; i < to; i++) {
687 struct bpf_align_test *test = &tests[i];
688 int fail;
689
690 printf("Test %3d: %s ... ",
691 i, test->descr);
692 fail = do_test_single(test);
693 if (fail) {
694 all_fail++;
695 printf("FAIL\n");
696 } else {
697 all_pass++;
698 printf("PASS\n");
699 }
700 }
701 printf("Results: %d pass %d fail\n",
702 all_pass, all_fail);
Jesper Dangaard Brouerefe5f9c2017-06-13 15:17:19 +0200703 return all_fail ? EXIT_FAILURE : EXIT_SUCCESS;
David S. Miller18b3ad92017-05-10 11:43:51 -0700704}
705
706int main(int argc, char **argv)
707{
708 unsigned int from = 0, to = ARRAY_SIZE(tests);
Daniel Borkmannf735b642017-06-11 00:50:45 +0200709 struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
710
711 setrlimit(RLIMIT_MEMLOCK, &rinf);
David S. Miller18b3ad92017-05-10 11:43:51 -0700712
713 if (argc == 3) {
714 unsigned int l = atoi(argv[argc - 2]);
715 unsigned int u = atoi(argv[argc - 1]);
716
717 if (l < to && u < to) {
718 from = l;
719 to = u + 1;
720 }
721 } else if (argc == 2) {
722 unsigned int t = atoi(argv[argc - 1]);
723
724 if (t < to) {
725 from = t;
726 to = t + 1;
727 }
728 }
729 return do_test(from, to);
730}