diff --git a/Documentation/bpf/bpf_licensing.rst b/Documentation/bpf/bpf_licensing.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b19c433f41d2c5cfae6e3bce8273b68b1effe7f3
--- /dev/null
+++ b/Documentation/bpf/bpf_licensing.rst
@@ -0,0 +1,92 @@
+=============
+BPF licensing
+=============
+
+Background
+==========
+
+* Classic BPF was BSD licensed
+
+"BPF" was originally introduced as BSD Packet Filter in
+http://www.tcpdump.org/papers/bpf-usenix93.pdf. The corresponding instruction
+set and its implementation came from BSD with BSD license. That original
+instruction set is now known as "classic BPF".
+
+However an instruction set is a specification for machine-language interaction,
+similar to a programming language.  It is not a code. Therefore, the
+application of a BSD license may be misleading in a certain context, as the
+instruction set may enjoy no copyright protection.
+
+* eBPF (extended BPF) instruction set continues to be BSD
+
+In 2014, the classic BPF instruction set was significantly extended. We
+typically refer to this instruction set as eBPF to disambiguate it from cBPF.
+The eBPF instruction set is still BSD licensed.
+
+Implementations of eBPF
+=======================
+
+Using the eBPF instruction set requires implementing code in both kernel space
+and user space.
+
+In Linux Kernel
+---------------
+
+The reference implementations of the eBPF interpreter and various just-in-time
+compilers are part of Linux and are GPLv2 licensed. The implementation of
+eBPF helper functions is also GPLv2 licensed. Interpreters, JITs, helpers,
+and verifiers are called eBPF runtime.
+
+In User Space
+-------------
+
+There are also implementations of eBPF runtime (interpreter, JITs, helper
+functions) under
+Apache2 (https://github.com/iovisor/ubpf),
+MIT (https://github.com/qmonnet/rbpf), and
+BSD (https://github.com/DPDK/dpdk/blob/main/lib/librte_bpf).
+
+In HW
+-----
+
+The HW can choose to execute eBPF instruction natively and provide eBPF runtime
+in HW or via the use of implementing firmware with a proprietary license.
+
+In other operating systems
+--------------------------
+
+Other kernels or user space implementations of eBPF instruction set and runtime
+can have proprietary licenses.
+
+Using BPF programs in the Linux kernel
+======================================
+
+Linux Kernel (while being GPLv2) allows linking of proprietary kernel modules
+under these rules:
+Documentation/process/license-rules.rst
+
+When a kernel module is loaded, the linux kernel checks which functions it
+intends to use. If any function is marked as "GPL only," the corresponding
+module or program has to have GPL compatible license.
+
+Loading BPF program into the Linux kernel is similar to loading a kernel
+module. BPF is loaded at run time and not statically linked to the Linux
+kernel. BPF program loading follows the same license checking rules as kernel
+modules. BPF programs can be proprietary if they don't use "GPL only" BPF
+helper functions.
+
+Further, some BPF program types - Linux Security Modules (LSM) and TCP
+Congestion Control (struct_ops), as of Aug 2021 - are required to be GPL
+compatible even if they don't use "GPL only" helper functions directly. The
+registration step of LSM and TCP congestion control modules of the Linux
+kernel is done through EXPORT_SYMBOL_GPL kernel functions. In that sense LSM
+and struct_ops BPF programs are implicitly calling "GPL only" functions.
+The same restriction applies to BPF programs that call kernel functions
+directly via unstable interface also known as "kfunc".
+
+Packaging BPF programs with user space applications
+====================================================
+
+Generally, proprietary-licensed applications and GPL licensed BPF programs
+written for the Linux kernel in the same package can co-exist because they are
+separate executable processes. This applies to both cBPF and eBPF programs.
diff --git a/Documentation/bpf/index.rst b/Documentation/bpf/index.rst
index 1ceb5d704a9780380de6e154143d1b9eb87a1a92..37f273a7e8b65c74f3bd7ba75aa4cb8f2750f547 100644
--- a/Documentation/bpf/index.rst
+++ b/Documentation/bpf/index.rst
@@ -82,6 +82,15 @@ Testing and debugging BPF
    s390
 
 
+Licensing
+=========
+
+.. toctree::
+   :maxdepth: 1
+
+   bpf_licensing
+
+
 Other
 =====
 
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index e7e778ca074c0f3fd8920f7aeee9f953db797901..6f85879ba9933c489c81f7d86303fbe228fa186e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -193,42 +193,40 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
 {
 	u16 ntu = rx_ring->next_to_use;
 	union i40e_rx_desc *rx_desc;
-	struct xdp_buff **bi, *xdp;
+	struct xdp_buff **xdp;
+	u32 nb_buffs, i;
 	dma_addr_t dma;
-	bool ok = true;
 
 	rx_desc = I40E_RX_DESC(rx_ring, ntu);
-	bi = i40e_rx_bi(rx_ring, ntu);
-	do {
-		xdp = xsk_buff_alloc(rx_ring->xsk_pool);
-		if (!xdp) {
-			ok = false;
-			goto no_buffers;
-		}
-		*bi = xdp;
-		dma = xsk_buff_xdp_get_dma(xdp);
+	xdp = i40e_rx_bi(rx_ring, ntu);
+
+	nb_buffs = min_t(u16, count, rx_ring->count - ntu);
+	nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs);
+	if (!nb_buffs)
+		return false;
+
+	i = nb_buffs;
+	while (i--) {
+		dma = xsk_buff_xdp_get_dma(*xdp);
 		rx_desc->read.pkt_addr = cpu_to_le64(dma);
 		rx_desc->read.hdr_addr = 0;
 
 		rx_desc++;
-		bi++;
-		ntu++;
-
-		if (unlikely(ntu == rx_ring->count)) {
-			rx_desc = I40E_RX_DESC(rx_ring, 0);
-			bi = i40e_rx_bi(rx_ring, 0);
-			ntu = 0;
-		}
-	} while (--count);
+		xdp++;
+	}
 
-no_buffers:
-	if (rx_ring->next_to_use != ntu) {
-		/* clear the status bits for the next_to_use descriptor */
-		rx_desc->wb.qword1.status_error_len = 0;
-		i40e_release_rx_desc(rx_ring, ntu);
+	ntu += nb_buffs;
+	if (ntu == rx_ring->count) {
+		rx_desc = I40E_RX_DESC(rx_ring, 0);
+		xdp = i40e_rx_bi(rx_ring, 0);
+		ntu = 0;
 	}
 
-	return ok;
+	/* clear the status bits for the next_to_use descriptor */
+	rx_desc->wb.qword1.status_error_len = 0;
+	i40e_release_rx_desc(rx_ring, ntu);
+
+	return count == nb_buffs ? true : false;
 }
 
 /**
@@ -365,7 +363,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
 			break;
 
 		bi = *i40e_rx_bi(rx_ring, next_to_clean);
-		bi->data_end = bi->data + size;
+		xsk_buff_set_size(bi, size);
 		xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool);
 
 		xdp_res = i40e_run_xdp_zc(rx_ring, bi);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index b0bbbec4e3a3c7525ba8bd9247b1b622bd258c28..cce348c83da8f2f0a72a7b4c0fd51a928f49ab38 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -164,17 +164,10 @@ struct ice_tx_offload_params {
 };
 
 struct ice_rx_buf {
-	union {
-		struct {
-			dma_addr_t dma;
-			struct page *page;
-			unsigned int page_offset;
-			u16 pagecnt_bias;
-		};
-		struct {
-			struct xdp_buff *xdp;
-		};
-	};
+	dma_addr_t dma;
+	struct page *page;
+	unsigned int page_offset;
+	u16 pagecnt_bias;
 };
 
 struct ice_q_stats {
@@ -270,6 +263,7 @@ struct ice_ring {
 	union {
 		struct ice_tx_buf *tx_buf;
 		struct ice_rx_buf *rx_buf;
+		struct xdp_buff **xdp_buf;
 	};
 	/* CL2 - 2nd cacheline starts here */
 	u16 q_index;			/* Queue number of ring */
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 5a9f61deeb38da9d31d134440d13a0e6a53d9c1c..7682eaa9a9ecea7649dd90c436f21bc2982e882c 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -364,45 +364,39 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
 {
 	union ice_32b_rx_flex_desc *rx_desc;
 	u16 ntu = rx_ring->next_to_use;
-	struct ice_rx_buf *rx_buf;
-	bool ok = true;
+	struct xdp_buff **xdp;
+	u32 nb_buffs, i;
 	dma_addr_t dma;
 
-	if (!count)
-		return true;
-
 	rx_desc = ICE_RX_DESC(rx_ring, ntu);
-	rx_buf = &rx_ring->rx_buf[ntu];
+	xdp = &rx_ring->xdp_buf[ntu];
 
-	do {
-		rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
-		if (!rx_buf->xdp) {
-			ok = false;
-			break;
-		}
+	nb_buffs = min_t(u16, count, rx_ring->count - ntu);
+	nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs);
+	if (!nb_buffs)
+		return false;
 
-		dma = xsk_buff_xdp_get_dma(rx_buf->xdp);
+	i = nb_buffs;
+	while (i--) {
+		dma = xsk_buff_xdp_get_dma(*xdp);
 		rx_desc->read.pkt_addr = cpu_to_le64(dma);
-		rx_desc->wb.status_error0 = 0;
 
 		rx_desc++;
-		rx_buf++;
-		ntu++;
-
-		if (unlikely(ntu == rx_ring->count)) {
-			rx_desc = ICE_RX_DESC(rx_ring, 0);
-			rx_buf = rx_ring->rx_buf;
-			ntu = 0;
-		}
-	} while (--count);
+		xdp++;
+	}
 
-	if (rx_ring->next_to_use != ntu) {
-		/* clear the status bits for the next_to_use descriptor */
-		rx_desc->wb.status_error0 = 0;
-		ice_release_rx_desc(rx_ring, ntu);
+	ntu += nb_buffs;
+	if (ntu == rx_ring->count) {
+		rx_desc = ICE_RX_DESC(rx_ring, 0);
+		xdp = rx_ring->xdp_buf;
+		ntu = 0;
 	}
 
-	return ok;
+	/* clear the status bits for the next_to_use descriptor */
+	rx_desc->wb.status_error0 = 0;
+	ice_release_rx_desc(rx_ring, ntu);
+
+	return count == nb_buffs ? true : false;
 }
 
 /**
@@ -421,19 +415,19 @@ static void ice_bump_ntc(struct ice_ring *rx_ring)
 /**
  * ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
  * @rx_ring: Rx ring
- * @rx_buf: zero-copy Rx buffer
+ * @xdp_arr: Pointer to the SW ring of xdp_buff pointers
  *
  * This function allocates a new skb from a zero-copy Rx buffer.
  *
  * Returns the skb on success, NULL on failure.
  */
 static struct sk_buff *
-ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
+ice_construct_skb_zc(struct ice_ring *rx_ring, struct xdp_buff **xdp_arr)
 {
-	unsigned int metasize = rx_buf->xdp->data - rx_buf->xdp->data_meta;
-	unsigned int datasize = rx_buf->xdp->data_end - rx_buf->xdp->data;
-	unsigned int datasize_hard = rx_buf->xdp->data_end -
-				     rx_buf->xdp->data_hard_start;
+	struct xdp_buff *xdp = *xdp_arr;
+	unsigned int metasize = xdp->data - xdp->data_meta;
+	unsigned int datasize = xdp->data_end - xdp->data;
+	unsigned int datasize_hard = xdp->data_end - xdp->data_hard_start;
 	struct sk_buff *skb;
 
 	skb = __napi_alloc_skb(&rx_ring->q_vector->napi, datasize_hard,
@@ -441,13 +435,13 @@ ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
 	if (unlikely(!skb))
 		return NULL;
 
-	skb_reserve(skb, rx_buf->xdp->data - rx_buf->xdp->data_hard_start);
-	memcpy(__skb_put(skb, datasize), rx_buf->xdp->data, datasize);
+	skb_reserve(skb, xdp->data - xdp->data_hard_start);
+	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
 	if (metasize)
 		skb_metadata_set(skb, metasize);
 
-	xsk_buff_free(rx_buf->xdp);
-	rx_buf->xdp = NULL;
+	xsk_buff_free(xdp);
+	*xdp_arr = NULL;
 	return skb;
 }
 
@@ -521,7 +515,7 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
 	while (likely(total_rx_packets < (unsigned int)budget)) {
 		union ice_32b_rx_flex_desc *rx_desc;
 		unsigned int size, xdp_res = 0;
-		struct ice_rx_buf *rx_buf;
+		struct xdp_buff **xdp;
 		struct sk_buff *skb;
 		u16 stat_err_bits;
 		u16 vlan_tag = 0;
@@ -544,18 +538,18 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
 		if (!size)
 			break;
 
-		rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
-		rx_buf->xdp->data_end = rx_buf->xdp->data + size;
-		xsk_buff_dma_sync_for_cpu(rx_buf->xdp, rx_ring->xsk_pool);
+		xdp = &rx_ring->xdp_buf[rx_ring->next_to_clean];
+		xsk_buff_set_size(*xdp, size);
+		xsk_buff_dma_sync_for_cpu(*xdp, rx_ring->xsk_pool);
 
-		xdp_res = ice_run_xdp_zc(rx_ring, rx_buf->xdp);
+		xdp_res = ice_run_xdp_zc(rx_ring, *xdp);
 		if (xdp_res) {
 			if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))
 				xdp_xmit |= xdp_res;
 			else
-				xsk_buff_free(rx_buf->xdp);
+				xsk_buff_free(*xdp);
 
-			rx_buf->xdp = NULL;
+			*xdp = NULL;
 			total_rx_bytes += size;
 			total_rx_packets++;
 			cleaned_count++;
@@ -565,7 +559,7 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
 		}
 
 		/* XDP_PASS path */
-		skb = ice_construct_skb_zc(rx_ring, rx_buf);
+		skb = ice_construct_skb_zc(rx_ring, xdp);
 		if (!skb) {
 			rx_ring->rx_stats.alloc_buf_failed++;
 			break;
@@ -813,12 +807,12 @@ void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring)
 	u16 i;
 
 	for (i = 0; i < rx_ring->count; i++) {
-		struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
+		struct xdp_buff **xdp = &rx_ring->xdp_buf[i];
 
-		if (!rx_buf->xdp)
+		if (!xdp)
 			continue;
 
-		rx_buf->xdp = NULL;
+		*xdp = NULL;
 	}
 }
 
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 020a7d5bf4701afbf1e504476ecd41323bd1fa6a..1c7fd7c4c6d352b134f6294f8486957503359120 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -48,6 +48,7 @@ extern struct idr btf_idr;
 extern spinlock_t btf_idr_lock;
 extern struct kobject *btf_kobj;
 
+typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64);
 typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
 					struct bpf_iter_aux_info *aux);
 typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data);
@@ -142,7 +143,8 @@ struct bpf_map_ops {
 	int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env,
 					      struct bpf_func_state *caller,
 					      struct bpf_func_state *callee);
-	int (*map_for_each_callback)(struct bpf_map *map, void *callback_fn,
+	int (*map_for_each_callback)(struct bpf_map *map,
+				     bpf_callback_t callback_fn,
 				     void *callback_ctx, u64 flags);
 
 	/* BTF name and id of struct allocated by map_alloc */
@@ -1089,6 +1091,7 @@ bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *f
 int bpf_prog_calc_tag(struct bpf_prog *fp);
 
 const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
+const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void);
 
 typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
 					unsigned long off, unsigned long len);
@@ -2217,6 +2220,8 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
 struct btf_id_set;
 bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
 
+#define MAX_BPRINTF_VARARGS		12
+
 int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
 			u32 **bin_buf, u32 num_args);
 void bpf_bprintf_cleanup(void);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 4a93c12543ee282fce511d4bffe105ae2dd3e234..47f80adbe744769ad4372e50aa8d2b6a767deb12 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -360,10 +360,9 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
 		.off   = 0,					\
 		.imm   = TGT })
 
-/* Function call */
+/* Convert function address to BPF immediate */
 
-#define BPF_CAST_CALL(x)					\
-		((u64 (*)(u64, u64, u64, u64, u64))(x))
+#define BPF_CALL_IMM(x)	((void *)(x) - (void *)__bpf_call_base)
 
 #define BPF_EMIT_CALL(FUNC)					\
 	((struct bpf_insn) {					\
@@ -371,7 +370,7 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
 		.dst_reg = 0,					\
 		.src_reg = 0,					\
 		.off   = 0,					\
-		.imm   = ((FUNC) - __bpf_call_base) })
+		.imm   = BPF_CALL_IMM(FUNC) })
 
 /* Raw code statement block */
 
diff --git a/include/net/xdp.h b/include/net/xdp.h
index ad5b02dcb6f4c4fd049ff0434c8f9615f8f7d6da..447f9b1578f387bed610bc4812632e4af64f6739 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -15,13 +15,13 @@
  * level RX-ring queues.  It is information that is specific to how
  * the driver have configured a given RX-ring queue.
  *
- * Each xdp_buff frame received in the driver carry a (pointer)
+ * Each xdp_buff frame received in the driver carries a (pointer)
  * reference to this xdp_rxq_info structure.  This provides the XDP
  * data-path read-access to RX-info for both kernel and bpf-side
  * (limited subset).
  *
  * For now, direct access is only safe while running in NAPI/softirq
- * context.  Contents is read-mostly and must not be updated during
+ * context.  Contents are read-mostly and must not be updated during
  * driver NAPI/softirq poll.
  *
  * The driver usage API is a register and unregister API.
@@ -30,8 +30,8 @@
  * can be attached as long as it doesn't change the underlying
  * RX-ring.  If the RX-ring does change significantly, the NIC driver
  * naturally need to stop the RX-ring before purging and reallocating
- * memory.  In that process the driver MUST call unregistor (which
- * also apply for driver shutdown and unload).  The register API is
+ * memory.  In that process the driver MUST call unregister (which
+ * also applies for driver shutdown and unload).  The register API is
  * also mandatory during RX-ring setup.
  */
 
diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
index 4e295541e396718d688264bb5ae7317f3c9d64d2..443d45951564894ef930b46984e112270482c8ea 100644
--- a/include/net/xdp_sock_drv.h
+++ b/include/net/xdp_sock_drv.h
@@ -77,6 +77,12 @@ static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
 	return xp_alloc(pool);
 }
 
+/* Returns as many entries as possible up to max. 0 <= N <= max. */
+static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
+{
+	return xp_alloc_batch(pool, xdp, max);
+}
+
 static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
 {
 	return xp_can_alloc(pool, count);
@@ -89,6 +95,13 @@ static inline void xsk_buff_free(struct xdp_buff *xdp)
 	xp_free(xskb);
 }
 
+static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
+{
+	xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
+	xdp->data_meta = xdp->data;
+	xdp->data_end = xdp->data + size;
+}
+
 static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
 					      u64 addr)
 {
@@ -212,6 +225,11 @@ static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
 	return NULL;
 }
 
+static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
+{
+	return 0;
+}
+
 static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
 {
 	return false;
@@ -221,6 +239,10 @@ static inline void xsk_buff_free(struct xdp_buff *xdp)
 {
 }
 
+static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
+{
+}
+
 static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
 					      u64 addr)
 {
diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
index 7a9a23e7a604a97ee63466197cefedebed82d64d..ddeefc4a10405c7d8b8c0cd7e48cda9e5ce38c91 100644
--- a/include/net/xsk_buff_pool.h
+++ b/include/net/xsk_buff_pool.h
@@ -7,6 +7,7 @@
 #include <linux/if_xdp.h>
 #include <linux/types.h>
 #include <linux/dma-mapping.h>
+#include <linux/bpf.h>
 #include <net/xdp.h>
 
 struct xsk_buff_pool;
@@ -23,7 +24,6 @@ struct xdp_buff_xsk {
 	dma_addr_t dma;
 	dma_addr_t frame_dma;
 	struct xsk_buff_pool *pool;
-	bool unaligned;
 	u64 orig_addr;
 	struct list_head free_list_node;
 };
@@ -67,6 +67,7 @@ struct xsk_buff_pool {
 	u32 free_heads_cnt;
 	u32 headroom;
 	u32 chunk_size;
+	u32 chunk_shift;
 	u32 frame_len;
 	u8 cached_need_wakeup;
 	bool uses_need_wakeup;
@@ -81,6 +82,13 @@ struct xsk_buff_pool {
 	struct xdp_buff_xsk *free_heads[];
 };
 
+/* Masks for xdp_umem_page flags.
+ * The low 12-bits of the addr will be 0 since this is the page address, so we
+ * can use them for flags.
+ */
+#define XSK_NEXT_PG_CONTIG_SHIFT 0
+#define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT)
+
 /* AF_XDP core. */
 struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
 						struct xdp_umem *umem);
@@ -89,7 +97,6 @@ int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
 int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem,
 			 struct net_device *dev, u16 queue_id);
 void xp_destroy(struct xsk_buff_pool *pool);
-void xp_release(struct xdp_buff_xsk *xskb);
 void xp_get_pool(struct xsk_buff_pool *pool);
 bool xp_put_pool(struct xsk_buff_pool *pool);
 void xp_clear_dev(struct xsk_buff_pool *pool);
@@ -99,12 +106,28 @@ void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
 /* AF_XDP, and XDP core. */
 void xp_free(struct xdp_buff_xsk *xskb);
 
+static inline void xp_init_xskb_addr(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool,
+				     u64 addr)
+{
+	xskb->orig_addr = addr;
+	xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom;
+}
+
+static inline void xp_init_xskb_dma(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool,
+				    dma_addr_t *dma_pages, u64 addr)
+{
+	xskb->frame_dma = (dma_pages[addr >> PAGE_SHIFT] & ~XSK_NEXT_PG_CONTIG_MASK) +
+		(addr & ~PAGE_MASK);
+	xskb->dma = xskb->frame_dma + pool->headroom + XDP_PACKET_HEADROOM;
+}
+
 /* AF_XDP ZC drivers, via xdp_sock_buff.h */
 void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq);
 int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
 	       unsigned long attrs, struct page **pages, u32 nr_pages);
 void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs);
 struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool);
+u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max);
 bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count);
 void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr);
 dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr);
@@ -180,4 +203,25 @@ static inline u64 xp_unaligned_add_offset_to_addr(u64 addr)
 		xp_unaligned_extract_offset(addr);
 }
 
+static inline u32 xp_aligned_extract_idx(struct xsk_buff_pool *pool, u64 addr)
+{
+	return xp_aligned_extract_addr(pool, addr) >> pool->chunk_shift;
+}
+
+static inline void xp_release(struct xdp_buff_xsk *xskb)
+{
+	if (xskb->pool->unaligned)
+		xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb;
+}
+
+static inline u64 xp_get_handle(struct xdp_buff_xsk *xskb)
+{
+	u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start;
+
+	offset += xskb->pool->headroom;
+	if (!xskb->pool->unaligned)
+		return xskb->orig_addr + offset;
+	return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
+}
+
 #endif /* XSK_BUFF_POOL_H_ */
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 3e9785f1064a23f8df088d0818fd06af722911cd..6fc59d61937ada177f3044583995ab58fa5bccdc 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -4046,7 +4046,7 @@ union bpf_attr {
  * 		arguments. The *data* are a **u64** array and corresponding format string
  * 		values are stored in the array. For strings and pointers where pointees
  * 		are accessed, only the pointer values are stored in the *data* array.
- * 		The *data_len* is the size of *data* in bytes.
+ * 		The *data_len* is the size of *data* in bytes - must be a multiple of 8.
  *
  *		Formats **%s**, **%p{i,I}{4,6}** requires to read kernel memory.
  *		Reading kernel memory may fail due to either invalid address or
@@ -4751,7 +4751,8 @@ union bpf_attr {
  *		Each format specifier in **fmt** corresponds to one u64 element
  *		in the **data** array. For strings and pointers where pointees
  *		are accessed, only the pointer values are stored in the *data*
- *		array. The *data_len* is the size of *data* in bytes.
+ *		array. The *data_len* is the size of *data* in bytes - must be
+ *		a multiple of 8.
  *
  *		Formats **%s** and **%p{i,I}{4,6}** require to read kernel
  *		memory. Reading kernel memory may fail due to either invalid
@@ -4898,6 +4899,16 @@ union bpf_attr {
  *		**-EINVAL** if *flags* is not zero.
  *
  *		**-ENOENT** if architecture does not support branch records.
+ *
+ * long bpf_trace_vprintk(const char *fmt, u32 fmt_size, const void *data, u32 data_len)
+ *	Description
+ *		Behaves like **bpf_trace_printk**\ () helper, but takes an array of u64
+ *		to format and can handle more format args as a result.
+ *
+ *		Arguments are to be used as in **bpf_seq_printf**\ () helper.
+ *	Return
+ *		The number of bytes written to the buffer, or a negative error
+ *		in case of failure.
  */
 #define __BPF_FUNC_MAPPER(FN)		\
 	FN(unspec),			\
@@ -5077,6 +5088,7 @@ union bpf_attr {
 	FN(get_attach_cookie),		\
 	FN(task_pt_regs),		\
 	FN(get_branch_snapshot),	\
+	FN(trace_vprintk),		\
 	/* */
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index cebd4fb06d190a561205b4c5ecf6aeeaffca70fd..5e1ccfae916b92abd945e16f1e5e2bfd7532d317 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -645,7 +645,7 @@ static const struct bpf_iter_seq_info iter_seq_info = {
 	.seq_priv_size		= sizeof(struct bpf_iter_seq_array_map_info),
 };
 
-static int bpf_for_each_array_elem(struct bpf_map *map, void *callback_fn,
+static int bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_fn,
 				   void *callback_ctx, u64 flags)
 {
 	u32 i, key, num_elems = 0;
@@ -668,9 +668,8 @@ static int bpf_for_each_array_elem(struct bpf_map *map, void *callback_fn,
 			val = array->value + array->elem_size * i;
 		num_elems++;
 		key = i;
-		ret = BPF_CAST_CALL(callback_fn)((u64)(long)map,
-					(u64)(long)&key, (u64)(long)val,
-					(u64)(long)callback_ctx, 0);
+		ret = callback_fn((u64)(long)map, (u64)(long)&key,
+				  (u64)(long)val, (u64)(long)callback_ctx, 0);
 		/* return value: 0 - continue, 1 - stop and return */
 		if (ret)
 			break;
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index d6b7dfdd806610165a223a48b77cfc1abe6d156a..ea8a468dbdedf7a991b5db6a1ba98095d2a0feb5 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -2357,6 +2357,11 @@ const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
 	return NULL;
 }
 
+const struct bpf_func_proto * __weak bpf_get_trace_vprintk_proto(void)
+{
+	return NULL;
+}
+
 u64 __weak
 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
 		 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 32471ba027086f8255e04a542edacd787b94f097..d29af9988f37ab159ef0f169b2f40cec2d5f411a 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -668,7 +668,7 @@ static int htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
 
 	BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
 		     (void *(*)(struct bpf_map *map, void *key))NULL));
-	*insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem));
+	*insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
 	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
 	*insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
 				offsetof(struct htab_elem, key) +
@@ -709,7 +709,7 @@ static int htab_lru_map_gen_lookup(struct bpf_map *map,
 
 	BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
 		     (void *(*)(struct bpf_map *map, void *key))NULL));
-	*insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem));
+	*insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
 	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 4);
 	*insn++ = BPF_LDX_MEM(BPF_B, ref_reg, ret,
 			      offsetof(struct htab_elem, lru_node) +
@@ -2049,7 +2049,7 @@ static const struct bpf_iter_seq_info iter_seq_info = {
 	.seq_priv_size		= sizeof(struct bpf_iter_seq_hash_map_info),
 };
 
-static int bpf_for_each_hash_elem(struct bpf_map *map, void *callback_fn,
+static int bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_fn,
 				  void *callback_ctx, u64 flags)
 {
 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
@@ -2089,9 +2089,8 @@ static int bpf_for_each_hash_elem(struct bpf_map *map, void *callback_fn,
 				val = elem->key + roundup_key_size;
 			}
 			num_elems++;
-			ret = BPF_CAST_CALL(callback_fn)((u64)(long)map,
-					(u64)(long)key, (u64)(long)val,
-					(u64)(long)callback_ctx, 0);
+			ret = callback_fn((u64)(long)map, (u64)(long)key,
+					  (u64)(long)val, (u64)(long)callback_ctx, 0);
 			/* return value: 0 - continue, 1 - stop and return */
 			if (ret) {
 				rcu_read_unlock();
@@ -2397,7 +2396,7 @@ static int htab_of_map_gen_lookup(struct bpf_map *map,
 
 	BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
 		     (void *(*)(struct bpf_map *map, void *key))NULL));
-	*insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem));
+	*insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem);
 	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 2);
 	*insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
 				offsetof(struct htab_elem, key) +
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 9aabf84afd4b25d719c12a1df5308e4437cde17b..1ffd469c217fad50b302b56505060c6ca869851b 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -979,15 +979,13 @@ int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
 	return err;
 }
 
-#define MAX_SNPRINTF_VARARGS		12
-
 BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt,
 	   const void *, data, u32, data_len)
 {
 	int err, num_args;
 	u32 *bin_args;
 
-	if (data_len % 8 || data_len > MAX_SNPRINTF_VARARGS * 8 ||
+	if (data_len % 8 || data_len > MAX_BPRINTF_VARARGS * 8 ||
 	    (data_len && !data))
 		return -EINVAL;
 	num_args = data_len / 8;
@@ -1058,7 +1056,7 @@ static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
 	struct bpf_hrtimer *t = container_of(hrtimer, struct bpf_hrtimer, timer);
 	struct bpf_map *map = t->map;
 	void *value = t->value;
-	void *callback_fn;
+	bpf_callback_t callback_fn;
 	void *key;
 	u32 idx;
 
@@ -1083,8 +1081,7 @@ static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
 		key = value - round_up(map->key_size, 8);
 	}
 
-	BPF_CAST_CALL(callback_fn)((u64)(long)map, (u64)(long)key,
-				   (u64)(long)value, 0, 0);
+	callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0);
 	/* The verifier checked that return value is zero. */
 
 	this_cpu_write(hrtimer_running, NULL);
@@ -1437,6 +1434,8 @@ bpf_base_func_proto(enum bpf_func_id func_id)
 		return &bpf_snprintf_proto;
 	case BPF_FUNC_task_pt_regs:
 		return &bpf_task_pt_regs_proto;
+	case BPF_FUNC_trace_vprintk:
+		return bpf_get_trace_vprintk_proto();
 	default:
 		return NULL;
 	}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index e76b559179054b93191b8959ddcad9ad01535e8c..1433752db7403f68acd7a707a25bd053549f78dc 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -612,6 +612,20 @@ static const char *kernel_type_name(const struct btf* btf, u32 id)
 	return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off);
 }
 
+/* The reg state of a pointer or a bounded scalar was saved when
+ * it was spilled to the stack.
+ */
+static bool is_spilled_reg(const struct bpf_stack_state *stack)
+{
+	return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL;
+}
+
+static void scrub_spilled_slot(u8 *stype)
+{
+	if (*stype != STACK_INVALID)
+		*stype = STACK_MISC;
+}
+
 static void print_verifier_state(struct bpf_verifier_env *env,
 				 const struct bpf_func_state *state)
 {
@@ -717,7 +731,7 @@ static void print_verifier_state(struct bpf_verifier_env *env,
 			continue;
 		verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
 		print_liveness(env, state->stack[i].spilled_ptr.live);
-		if (state->stack[i].slot_type[0] == STACK_SPILL) {
+		if (is_spilled_reg(&state->stack[i])) {
 			reg = &state->stack[i].spilled_ptr;
 			t = reg->type;
 			verbose(env, "=%s", reg_type_str[t]);
@@ -1730,7 +1744,7 @@ static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id)
 
 	desc = &tab->descs[tab->nr_descs++];
 	desc->func_id = func_id;
-	desc->imm = BPF_CAST_CALL(addr) - __bpf_call_base;
+	desc->imm = BPF_CALL_IMM(addr);
 	err = btf_distill_func_proto(&env->log, btf_vmlinux,
 				     func_proto, func_name,
 				     &desc->func_model);
@@ -2373,7 +2387,7 @@ static void mark_all_scalars_precise(struct bpf_verifier_env *env,
 				reg->precise = true;
 			}
 			for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
-				if (func->stack[j].slot_type[0] != STACK_SPILL)
+				if (!is_spilled_reg(&func->stack[j]))
 					continue;
 				reg = &func->stack[j].spilled_ptr;
 				if (reg->type != SCALAR_VALUE)
@@ -2415,7 +2429,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
 	}
 
 	while (spi >= 0) {
-		if (func->stack[spi].slot_type[0] != STACK_SPILL) {
+		if (!is_spilled_reg(&func->stack[spi])) {
 			stack_mask = 0;
 			break;
 		}
@@ -2514,7 +2528,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
 				return 0;
 			}
 
-			if (func->stack[i].slot_type[0] != STACK_SPILL) {
+			if (!is_spilled_reg(&func->stack[i])) {
 				stack_mask &= ~(1ull << i);
 				continue;
 			}
@@ -2626,15 +2640,21 @@ static bool __is_pointer_value(bool allow_ptr_leaks,
 }
 
 static void save_register_state(struct bpf_func_state *state,
-				int spi, struct bpf_reg_state *reg)
+				int spi, struct bpf_reg_state *reg,
+				int size)
 {
 	int i;
 
 	state->stack[spi].spilled_ptr = *reg;
-	state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
+	if (size == BPF_REG_SIZE)
+		state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
+
+	for (i = BPF_REG_SIZE; i > BPF_REG_SIZE - size; i--)
+		state->stack[spi].slot_type[i - 1] = STACK_SPILL;
 
-	for (i = 0; i < BPF_REG_SIZE; i++)
-		state->stack[spi].slot_type[i] = STACK_SPILL;
+	/* size < 8 bytes spill */
+	for (; i; i--)
+		scrub_spilled_slot(&state->stack[spi].slot_type[i - 1]);
 }
 
 /* check_stack_{read,write}_fixed_off functions track spill/fill of registers,
@@ -2681,7 +2701,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
 			env->insn_aux_data[insn_idx].sanitize_stack_spill = true;
 	}
 
-	if (reg && size == BPF_REG_SIZE && register_is_bounded(reg) &&
+	if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) &&
 	    !register_is_null(reg) && env->bpf_capable) {
 		if (dst_reg != BPF_REG_FP) {
 			/* The backtracking logic can only recognize explicit
@@ -2694,7 +2714,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
 			if (err)
 				return err;
 		}
-		save_register_state(state, spi, reg);
+		save_register_state(state, spi, reg, size);
 	} else if (reg && is_spillable_regtype(reg->type)) {
 		/* register containing pointer is being spilled into stack */
 		if (size != BPF_REG_SIZE) {
@@ -2706,16 +2726,16 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
 			verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
 			return -EINVAL;
 		}
-		save_register_state(state, spi, reg);
+		save_register_state(state, spi, reg, size);
 	} else {
 		u8 type = STACK_MISC;
 
 		/* regular write of data into stack destroys any spilled ptr */
 		state->stack[spi].spilled_ptr.type = NOT_INIT;
 		/* Mark slots as STACK_MISC if they belonged to spilled ptr. */
-		if (state->stack[spi].slot_type[0] == STACK_SPILL)
+		if (is_spilled_reg(&state->stack[spi]))
 			for (i = 0; i < BPF_REG_SIZE; i++)
-				state->stack[spi].slot_type[i] = STACK_MISC;
+				scrub_spilled_slot(&state->stack[spi].slot_type[i]);
 
 		/* only mark the slot as written if all 8 bytes were written
 		 * otherwise read propagation may incorrectly stop too soon
@@ -2918,23 +2938,50 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
 	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
 	struct bpf_reg_state *reg;
-	u8 *stype;
+	u8 *stype, type;
 
 	stype = reg_state->stack[spi].slot_type;
 	reg = &reg_state->stack[spi].spilled_ptr;
 
-	if (stype[0] == STACK_SPILL) {
+	if (is_spilled_reg(&reg_state->stack[spi])) {
 		if (size != BPF_REG_SIZE) {
+			u8 scalar_size = 0;
+
 			if (reg->type != SCALAR_VALUE) {
 				verbose_linfo(env, env->insn_idx, "; ");
 				verbose(env, "invalid size of register fill\n");
 				return -EACCES;
 			}
-			if (dst_regno >= 0) {
+
+			mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
+			if (dst_regno < 0)
+				return 0;
+
+			for (i = BPF_REG_SIZE; i > 0 && stype[i - 1] == STACK_SPILL; i--)
+				scalar_size++;
+
+			if (!(off % BPF_REG_SIZE) && size == scalar_size) {
+				/* The earlier check_reg_arg() has decided the
+				 * subreg_def for this insn.  Save it first.
+				 */
+				s32 subreg_def = state->regs[dst_regno].subreg_def;
+
+				state->regs[dst_regno] = *reg;
+				state->regs[dst_regno].subreg_def = subreg_def;
+			} else {
+				for (i = 0; i < size; i++) {
+					type = stype[(slot - i) % BPF_REG_SIZE];
+					if (type == STACK_SPILL)
+						continue;
+					if (type == STACK_MISC)
+						continue;
+					verbose(env, "invalid read from stack off %d+%d size %d\n",
+						off, i, size);
+					return -EACCES;
+				}
 				mark_reg_unknown(env, state->regs, dst_regno);
-				state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
 			}
-			mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
+			state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
 			return 0;
 		}
 		for (i = 1; i < BPF_REG_SIZE; i++) {
@@ -2965,8 +3012,6 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
 		}
 		mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
 	} else {
-		u8 type;
-
 		for (i = 0; i < size; i++) {
 			type = stype[(slot - i) % BPF_REG_SIZE];
 			if (type == STACK_MISC)
@@ -4514,17 +4559,17 @@ static int check_stack_range_initialized(
 			goto mark;
 		}
 
-		if (state->stack[spi].slot_type[0] == STACK_SPILL &&
+		if (is_spilled_reg(&state->stack[spi]) &&
 		    state->stack[spi].spilled_ptr.type == PTR_TO_BTF_ID)
 			goto mark;
 
-		if (state->stack[spi].slot_type[0] == STACK_SPILL &&
+		if (is_spilled_reg(&state->stack[spi]) &&
 		    (state->stack[spi].spilled_ptr.type == SCALAR_VALUE ||
 		     env->allow_ptr_leaks)) {
 			if (clobber) {
 				__mark_reg_unknown(env, &state->stack[spi].spilled_ptr);
 				for (j = 0; j < BPF_REG_SIZE; j++)
-					state->stack[spi].slot_type[j] = STACK_MISC;
+					scrub_spilled_slot(&state->stack[spi].slot_type[j]);
 			}
 			goto mark;
 		}
@@ -10356,9 +10401,9 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
 			 * return false to continue verification of this path
 			 */
 			return false;
-		if (i % BPF_REG_SIZE)
+		if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1)
 			continue;
-		if (old->stack[spi].slot_type[0] != STACK_SPILL)
+		if (!is_spilled_reg(&old->stack[spi]))
 			continue;
 		if (!regsafe(env, &old->stack[spi].spilled_ptr,
 			     &cur->stack[spi].spilled_ptr, idmap))
@@ -10565,7 +10610,7 @@ static int propagate_precision(struct bpf_verifier_env *env,
 	}
 
 	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
-		if (state->stack[i].slot_type[0] != STACK_SPILL)
+		if (!is_spilled_reg(&state->stack[i]))
 			continue;
 		state_reg = &state->stack[i].spilled_ptr;
 		if (state_reg->type != SCALAR_VALUE ||
@@ -12469,8 +12514,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
 			if (!bpf_pseudo_call(insn))
 				continue;
 			subprog = insn->off;
-			insn->imm = BPF_CAST_CALL(func[subprog]->bpf_func) -
-				    __bpf_call_base;
+			insn->imm = BPF_CALL_IMM(func[subprog]->bpf_func);
 		}
 
 		/* we use the aux data to keep a list of the start addresses
@@ -12950,32 +12994,25 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
 patch_map_ops_generic:
 			switch (insn->imm) {
 			case BPF_FUNC_map_lookup_elem:
-				insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) -
-					    __bpf_call_base;
+				insn->imm = BPF_CALL_IMM(ops->map_lookup_elem);
 				continue;
 			case BPF_FUNC_map_update_elem:
-				insn->imm = BPF_CAST_CALL(ops->map_update_elem) -
-					    __bpf_call_base;
+				insn->imm = BPF_CALL_IMM(ops->map_update_elem);
 				continue;
 			case BPF_FUNC_map_delete_elem:
-				insn->imm = BPF_CAST_CALL(ops->map_delete_elem) -
-					    __bpf_call_base;
+				insn->imm = BPF_CALL_IMM(ops->map_delete_elem);
 				continue;
 			case BPF_FUNC_map_push_elem:
-				insn->imm = BPF_CAST_CALL(ops->map_push_elem) -
-					    __bpf_call_base;
+				insn->imm = BPF_CALL_IMM(ops->map_push_elem);
 				continue;
 			case BPF_FUNC_map_pop_elem:
-				insn->imm = BPF_CAST_CALL(ops->map_pop_elem) -
-					    __bpf_call_base;
+				insn->imm = BPF_CALL_IMM(ops->map_pop_elem);
 				continue;
 			case BPF_FUNC_map_peek_elem:
-				insn->imm = BPF_CAST_CALL(ops->map_peek_elem) -
-					    __bpf_call_base;
+				insn->imm = BPF_CALL_IMM(ops->map_peek_elem);
 				continue;
 			case BPF_FUNC_redirect_map:
-				insn->imm = BPF_CAST_CALL(ops->map_redirect) -
-					    __bpf_call_base;
+				insn->imm = BPF_CALL_IMM(ops->map_redirect);
 				continue;
 			}
 
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 067e88c3d2ee595b478b6af588217fd31e3cef90..6b3153841a33fe6b11ee7b7385de54e016b434f1 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -398,7 +398,7 @@ static const struct bpf_func_proto bpf_trace_printk_proto = {
 	.arg2_type	= ARG_CONST_SIZE,
 };
 
-const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
+static void __set_printk_clr_event(void)
 {
 	/*
 	 * This program might be calling bpf_trace_printk,
@@ -410,11 +410,57 @@ const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
 	 */
 	if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
 		pr_warn_ratelimited("could not enable bpf_trace_printk events");
+}
 
+const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
+{
+	__set_printk_clr_event();
 	return &bpf_trace_printk_proto;
 }
 
-#define MAX_SEQ_PRINTF_VARARGS		12
+BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, data,
+	   u32, data_len)
+{
+	static char buf[BPF_TRACE_PRINTK_SIZE];
+	unsigned long flags;
+	int ret, num_args;
+	u32 *bin_args;
+
+	if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
+	    (data_len && !data))
+		return -EINVAL;
+	num_args = data_len / 8;
+
+	ret = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args);
+	if (ret < 0)
+		return ret;
+
+	raw_spin_lock_irqsave(&trace_printk_lock, flags);
+	ret = bstr_printf(buf, sizeof(buf), fmt, bin_args);
+
+	trace_bpf_trace_printk(buf);
+	raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
+
+	bpf_bprintf_cleanup();
+
+	return ret;
+}
+
+static const struct bpf_func_proto bpf_trace_vprintk_proto = {
+	.func		= bpf_trace_vprintk,
+	.gpl_only	= true,
+	.ret_type	= RET_INTEGER,
+	.arg1_type	= ARG_PTR_TO_MEM,
+	.arg2_type	= ARG_CONST_SIZE,
+	.arg3_type	= ARG_PTR_TO_MEM_OR_NULL,
+	.arg4_type	= ARG_CONST_SIZE_OR_ZERO,
+};
+
+const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void)
+{
+	__set_printk_clr_event();
+	return &bpf_trace_vprintk_proto;
+}
 
 BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
 	   const void *, data, u32, data_len)
@@ -422,7 +468,7 @@ BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
 	int err, num_args;
 	u32 *bin_args;
 
-	if (data_len & 7 || data_len > MAX_SEQ_PRINTF_VARARGS * 8 ||
+	if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
 	    (data_len && !data))
 		return -EINVAL;
 	num_args = data_len / 8;
@@ -1162,6 +1208,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 		return &bpf_get_func_ip_proto_tracing;
 	case BPF_FUNC_get_branch_snapshot:
 		return &bpf_get_branch_snapshot_proto;
+	case BPF_FUNC_trace_vprintk:
+		return bpf_get_trace_vprintk_proto();
 	default:
 		return bpf_base_func_proto(func_id);
 	}
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 0018d51b93b0a3908cbb9c20241d9f3b9f450870..b9fc330fc83b0ac05ce4391eaf1a809e2461bc31 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -52,6 +52,7 @@
 #define FLAG_NO_DATA		BIT(0)
 #define FLAG_EXPECTED_FAIL	BIT(1)
 #define FLAG_SKB_FRAG		BIT(2)
+#define FLAG_VERIFIER_ZEXT	BIT(3)
 
 enum {
 	CLASSIC  = BIT(6),	/* Old BPF instructions only. */
@@ -80,6 +81,7 @@ struct bpf_test {
 	int expected_errcode; /* used when FLAG_EXPECTED_FAIL is set in the aux */
 	__u8 frag_data[MAX_DATA];
 	int stack_depth; /* for eBPF only, since tests don't call verifier */
+	int nr_testruns; /* Custom run count, defaults to MAX_TESTRUNS if 0 */
 };
 
 /* Large test cases need separate allocation and fill handler. */
@@ -461,2087 +463,7692 @@ static int bpf_fill_stxdw(struct bpf_test *self)
 	return __bpf_fill_stxdw(self, BPF_DW);
 }
 
-static int bpf_fill_long_jmp(struct bpf_test *self)
+static int __bpf_ld_imm64(struct bpf_insn insns[2], u8 reg, s64 imm64)
 {
-	unsigned int len = BPF_MAXINSNS;
-	struct bpf_insn *insn;
+	struct bpf_insn tmp[] = {BPF_LD_IMM64(reg, imm64)};
+
+	memcpy(insns, tmp, sizeof(tmp));
+	return 2;
+}
+
+/*
+ * Branch conversion tests. Complex operations can expand to a lot
+ * of instructions when JITed. This in turn may cause jump offsets
+ * to overflow the field size of the native instruction, triggering
+ * a branch conversion mechanism in some JITs.
+ */
+static int __bpf_fill_max_jmp(struct bpf_test *self, int jmp, int imm)
+{
+	struct bpf_insn *insns;
+	int len = S16_MAX + 5;
 	int i;
 
+	insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL);
+	if (!insns)
+		return -ENOMEM;
+
+	i = __bpf_ld_imm64(insns, R1, 0x0123456789abcdefULL);
+	insns[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
+	insns[i++] = BPF_JMP_IMM(jmp, R0, imm, S16_MAX);
+	insns[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 2);
+	insns[i++] = BPF_EXIT_INSN();
+
+	while (i < len - 1) {
+		static const int ops[] = {
+			BPF_LSH, BPF_RSH, BPF_ARSH, BPF_ADD,
+			BPF_SUB, BPF_MUL, BPF_DIV, BPF_MOD,
+		};
+		int op = ops[(i >> 1) % ARRAY_SIZE(ops)];
+
+		if (i & 1)
+			insns[i++] = BPF_ALU32_REG(op, R0, R1);
+		else
+			insns[i++] = BPF_ALU64_REG(op, R0, R1);
+	}
+
+	insns[i++] = BPF_EXIT_INSN();
+	self->u.ptr.insns = insns;
+	self->u.ptr.len = len;
+	BUG_ON(i != len);
+
+	return 0;
+}
+
+/* Branch taken by runtime decision */
+static int bpf_fill_max_jmp_taken(struct bpf_test *self)
+{
+	return __bpf_fill_max_jmp(self, BPF_JEQ, 1);
+}
+
+/* Branch not taken by runtime decision */
+static int bpf_fill_max_jmp_not_taken(struct bpf_test *self)
+{
+	return __bpf_fill_max_jmp(self, BPF_JEQ, 0);
+}
+
+/* Branch always taken, known at JIT time */
+static int bpf_fill_max_jmp_always_taken(struct bpf_test *self)
+{
+	return __bpf_fill_max_jmp(self, BPF_JGE, 0);
+}
+
+/* Branch never taken, known at JIT time */
+static int bpf_fill_max_jmp_never_taken(struct bpf_test *self)
+{
+	return __bpf_fill_max_jmp(self, BPF_JLT, 0);
+}
+
+/* ALU result computation used in tests */
+static bool __bpf_alu_result(u64 *res, u64 v1, u64 v2, u8 op)
+{
+	*res = 0;
+	switch (op) {
+	case BPF_MOV:
+		*res = v2;
+		break;
+	case BPF_AND:
+		*res = v1 & v2;
+		break;
+	case BPF_OR:
+		*res = v1 | v2;
+		break;
+	case BPF_XOR:
+		*res = v1 ^ v2;
+		break;
+	case BPF_LSH:
+		*res = v1 << v2;
+		break;
+	case BPF_RSH:
+		*res = v1 >> v2;
+		break;
+	case BPF_ARSH:
+		*res = v1 >> v2;
+		if (v2 > 0 && v1 > S64_MAX)
+			*res |= ~0ULL << (64 - v2);
+		break;
+	case BPF_ADD:
+		*res = v1 + v2;
+		break;
+	case BPF_SUB:
+		*res = v1 - v2;
+		break;
+	case BPF_MUL:
+		*res = v1 * v2;
+		break;
+	case BPF_DIV:
+		if (v2 == 0)
+			return false;
+		*res = div64_u64(v1, v2);
+		break;
+	case BPF_MOD:
+		if (v2 == 0)
+			return false;
+		div64_u64_rem(v1, v2, res);
+		break;
+	}
+	return true;
+}
+
+/* Test an ALU shift operation for all valid shift values */
+static int __bpf_fill_alu_shift(struct bpf_test *self, u8 op,
+				u8 mode, bool alu32)
+{
+	static const s64 regs[] = {
+		0x0123456789abcdefLL, /* dword > 0, word < 0 */
+		0xfedcba9876543210LL, /* dowrd < 0, word > 0 */
+		0xfedcba0198765432LL, /* dowrd < 0, word < 0 */
+		0x0123458967abcdefLL, /* dword > 0, word > 0 */
+	};
+	int bits = alu32 ? 32 : 64;
+	int len = (2 + 7 * bits) * ARRAY_SIZE(regs) + 3;
+	struct bpf_insn *insn;
+	int imm, k;
+	int i = 0;
+
 	insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
 	if (!insn)
 		return -ENOMEM;
 
-	insn[0] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
-	insn[1] = BPF_JMP_IMM(BPF_JEQ, R0, 1, len - 2 - 1);
+	insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 0);
 
-	/*
-	 * Fill with a complex 64-bit operation that expands to a lot of
-	 * instructions on 32-bit JITs. The large jump offset can then
-	 * overflow the conditional branch field size, triggering a branch
-	 * conversion mechanism in some JITs.
-	 *
-	 * Note: BPF_MAXINSNS of ALU64 MUL is enough to trigger such branch
-	 * conversion on the 32-bit MIPS JIT. For other JITs, the instruction
-	 * count and/or operation may need to be modified to trigger the
-	 * branch conversion.
-	 */
-	for (i = 2; i < len - 1; i++)
-		insn[i] = BPF_ALU64_IMM(BPF_MUL, R0, (i << 16) + i);
+	for (k = 0; k < ARRAY_SIZE(regs); k++) {
+		s64 reg = regs[k];
 
-	insn[len - 1] = BPF_EXIT_INSN();
+		i += __bpf_ld_imm64(&insn[i], R3, reg);
+
+		for (imm = 0; imm < bits; imm++) {
+			u64 val;
+
+			/* Perform operation */
+			insn[i++] = BPF_ALU64_REG(BPF_MOV, R1, R3);
+			insn[i++] = BPF_ALU64_IMM(BPF_MOV, R2, imm);
+			if (alu32) {
+				if (mode == BPF_K)
+					insn[i++] = BPF_ALU32_IMM(op, R1, imm);
+				else
+					insn[i++] = BPF_ALU32_REG(op, R1, R2);
+
+				if (op == BPF_ARSH)
+					reg = (s32)reg;
+				else
+					reg = (u32)reg;
+				__bpf_alu_result(&val, reg, imm, op);
+				val = (u32)val;
+			} else {
+				if (mode == BPF_K)
+					insn[i++] = BPF_ALU64_IMM(op, R1, imm);
+				else
+					insn[i++] = BPF_ALU64_REG(op, R1, R2);
+				__bpf_alu_result(&val, reg, imm, op);
+			}
+
+			/*
+			 * When debugging a JIT that fails this test, one
+			 * can write the immediate value to R0 here to find
+			 * out which operand values that fail.
+			 */
+
+			/* Load reference and check the result */
+			i += __bpf_ld_imm64(&insn[i], R4, val);
+			insn[i++] = BPF_JMP_REG(BPF_JEQ, R1, R4, 1);
+			insn[i++] = BPF_EXIT_INSN();
+		}
+	}
+
+	insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
+	insn[i++] = BPF_EXIT_INSN();
 
 	self->u.ptr.insns = insn;
 	self->u.ptr.len = len;
+	BUG_ON(i != len);
 
 	return 0;
 }
 
-static struct bpf_test tests[] = {
-	{
-		"TAX",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_IMM, 1),
-			BPF_STMT(BPF_MISC | BPF_TAX, 0),
-			BPF_STMT(BPF_LD | BPF_IMM, 2),
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
-			BPF_STMT(BPF_ALU | BPF_NEG, 0), /* A == -3 */
-			BPF_STMT(BPF_MISC | BPF_TAX, 0),
-			BPF_STMT(BPF_LD | BPF_LEN, 0),
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
-			BPF_STMT(BPF_MISC | BPF_TAX, 0), /* X == len - 3 */
-			BPF_STMT(BPF_LD | BPF_B | BPF_IND, 1),
-			BPF_STMT(BPF_RET | BPF_A, 0)
-		},
-		CLASSIC,
-		{ 10, 20, 30, 40, 50 },
-		{ { 2, 10 }, { 3, 20 }, { 4, 30 } },
-	},
-	{
-		"TXA",
-		.u.insns = {
-			BPF_STMT(BPF_LDX | BPF_LEN, 0),
-			BPF_STMT(BPF_MISC | BPF_TXA, 0),
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
-			BPF_STMT(BPF_RET | BPF_A, 0) /* A == len * 2 */
-		},
-		CLASSIC,
-		{ 10, 20, 30, 40, 50 },
-		{ { 1, 2 }, { 3, 6 }, { 4, 8 } },
-	},
-	{
-		"ADD_SUB_MUL_K",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_IMM, 1),
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 2),
-			BPF_STMT(BPF_LDX | BPF_IMM, 3),
-			BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 0xffffffff),
-			BPF_STMT(BPF_ALU | BPF_MUL | BPF_K, 3),
-			BPF_STMT(BPF_RET | BPF_A, 0)
-		},
-		CLASSIC | FLAG_NO_DATA,
-		{ },
-		{ { 0, 0xfffffffd } }
-	},
-	{
-		"DIV_MOD_KX",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_IMM, 8),
-			BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 2),
-			BPF_STMT(BPF_MISC | BPF_TAX, 0),
-			BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff),
-			BPF_STMT(BPF_ALU | BPF_DIV | BPF_X, 0),
-			BPF_STMT(BPF_MISC | BPF_TAX, 0),
-			BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff),
-			BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0x70000000),
-			BPF_STMT(BPF_MISC | BPF_TAX, 0),
-			BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff),
-			BPF_STMT(BPF_ALU | BPF_MOD | BPF_X, 0),
-			BPF_STMT(BPF_MISC | BPF_TAX, 0),
-			BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff),
-			BPF_STMT(BPF_ALU | BPF_MOD | BPF_K, 0x70000000),
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
-			BPF_STMT(BPF_RET | BPF_A, 0)
-		},
-		CLASSIC | FLAG_NO_DATA,
-		{ },
-		{ { 0, 0x20000000 } }
-	},
-	{
-		"AND_OR_LSH_K",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_IMM, 0xff),
-			BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf0),
-			BPF_STMT(BPF_ALU | BPF_LSH | BPF_K, 27),
-			BPF_STMT(BPF_MISC | BPF_TAX, 0),
-			BPF_STMT(BPF_LD | BPF_IMM, 0xf),
-			BPF_STMT(BPF_ALU | BPF_OR | BPF_K, 0xf0),
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
-			BPF_STMT(BPF_RET | BPF_A, 0)
-		},
-		CLASSIC | FLAG_NO_DATA,
-		{ },
-		{ { 0, 0x800000ff }, { 1, 0x800000ff } },
-	},
-	{
-		"LD_IMM_0",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_IMM, 0), /* ld #0 */
-			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0, 1, 0),
-			BPF_STMT(BPF_RET | BPF_K, 0),
-			BPF_STMT(BPF_RET | BPF_K, 1),
-		},
-		CLASSIC,
-		{ },
-		{ { 1, 1 } },
-	},
-	{
-		"LD_IND",
-		.u.insns = {
-			BPF_STMT(BPF_LDX | BPF_LEN, 0),
-			BPF_STMT(BPF_LD | BPF_H | BPF_IND, MAX_K),
-			BPF_STMT(BPF_RET | BPF_K, 1)
-		},
-		CLASSIC,
-		{ },
-		{ { 1, 0 }, { 10, 0 }, { 60, 0 } },
-	},
-	{
-		"LD_ABS",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_W | BPF_ABS, 1000),
-			BPF_STMT(BPF_RET | BPF_K, 1)
-		},
-		CLASSIC,
-		{ },
-		{ { 1, 0 }, { 10, 0 }, { 60, 0 } },
-	},
-	{
-		"LD_ABS_LL",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_LL_OFF),
-			BPF_STMT(BPF_MISC | BPF_TAX, 0),
-			BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_LL_OFF + 1),
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
-			BPF_STMT(BPF_RET | BPF_A, 0)
-		},
-		CLASSIC,
-		{ 1, 2, 3 },
-		{ { 1, 0 }, { 2, 3 } },
-	},
-	{
-		"LD_IND_LL",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_IMM, SKF_LL_OFF - 1),
-			BPF_STMT(BPF_LDX | BPF_LEN, 0),
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
-			BPF_STMT(BPF_MISC | BPF_TAX, 0),
-			BPF_STMT(BPF_LD | BPF_B | BPF_IND, 0),
-			BPF_STMT(BPF_RET | BPF_A, 0)
-		},
-		CLASSIC,
-		{ 1, 2, 3, 0xff },
-		{ { 1, 1 }, { 3, 3 }, { 4, 0xff } },
-	},
-	{
-		"LD_ABS_NET",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_NET_OFF),
-			BPF_STMT(BPF_MISC | BPF_TAX, 0),
-			BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_NET_OFF + 1),
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
-			BPF_STMT(BPF_RET | BPF_A, 0)
-		},
-		CLASSIC,
-		{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3 },
-		{ { 15, 0 }, { 16, 3 } },
-	},
-	{
-		"LD_IND_NET",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_IMM, SKF_NET_OFF - 15),
-			BPF_STMT(BPF_LDX | BPF_LEN, 0),
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
-			BPF_STMT(BPF_MISC | BPF_TAX, 0),
-			BPF_STMT(BPF_LD | BPF_B | BPF_IND, 0),
-			BPF_STMT(BPF_RET | BPF_A, 0)
-		},
-		CLASSIC,
-		{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3 },
-		{ { 14, 0 }, { 15, 1 }, { 17, 3 } },
-	},
-	{
-		"LD_PKTTYPE",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
-				 SKF_AD_OFF + SKF_AD_PKTTYPE),
-			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
-			BPF_STMT(BPF_RET | BPF_K, 1),
-			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
-				 SKF_AD_OFF + SKF_AD_PKTTYPE),
-			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
-			BPF_STMT(BPF_RET | BPF_K, 1),
-			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
-				 SKF_AD_OFF + SKF_AD_PKTTYPE),
+static int bpf_fill_alu64_lsh_imm(struct bpf_test *self)
+{
+	return __bpf_fill_alu_shift(self, BPF_LSH, BPF_K, false);
+}
+
+static int bpf_fill_alu64_rsh_imm(struct bpf_test *self)
+{
+	return __bpf_fill_alu_shift(self, BPF_RSH, BPF_K, false);
+}
+
+static int bpf_fill_alu64_arsh_imm(struct bpf_test *self)
+{
+	return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_K, false);
+}
+
+static int bpf_fill_alu64_lsh_reg(struct bpf_test *self)
+{
+	return __bpf_fill_alu_shift(self, BPF_LSH, BPF_X, false);
+}
+
+static int bpf_fill_alu64_rsh_reg(struct bpf_test *self)
+{
+	return __bpf_fill_alu_shift(self, BPF_RSH, BPF_X, false);
+}
+
+static int bpf_fill_alu64_arsh_reg(struct bpf_test *self)
+{
+	return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_X, false);
+}
+
+static int bpf_fill_alu32_lsh_imm(struct bpf_test *self)
+{
+	return __bpf_fill_alu_shift(self, BPF_LSH, BPF_K, true);
+}
+
+static int bpf_fill_alu32_rsh_imm(struct bpf_test *self)
+{
+	return __bpf_fill_alu_shift(self, BPF_RSH, BPF_K, true);
+}
+
+static int bpf_fill_alu32_arsh_imm(struct bpf_test *self)
+{
+	return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_K, true);
+}
+
+static int bpf_fill_alu32_lsh_reg(struct bpf_test *self)
+{
+	return __bpf_fill_alu_shift(self, BPF_LSH, BPF_X, true);
+}
+
+static int bpf_fill_alu32_rsh_reg(struct bpf_test *self)
+{
+	return __bpf_fill_alu_shift(self, BPF_RSH, BPF_X, true);
+}
+
+static int bpf_fill_alu32_arsh_reg(struct bpf_test *self)
+{
+	return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_X, true);
+}
+
+/*
+ * Test an ALU register shift operation for all valid shift values
+ * for the case when the source and destination are the same.
+ */
+static int __bpf_fill_alu_shift_same_reg(struct bpf_test *self, u8 op,
+					 bool alu32)
+{
+	int bits = alu32 ? 32 : 64;
+	int len = 3 + 6 * bits;
+	struct bpf_insn *insn;
+	int i = 0;
+	u64 val;
+
+	insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+	if (!insn)
+		return -ENOMEM;
+
+	insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 0);
+
+	for (val = 0; val < bits; val++) {
+		u64 res;
+
+		/* Perform operation */
+		insn[i++] = BPF_ALU64_IMM(BPF_MOV, R1, val);
+		if (alu32)
+			insn[i++] = BPF_ALU32_REG(op, R1, R1);
+		else
+			insn[i++] = BPF_ALU64_REG(op, R1, R1);
+
+		/* Compute the reference result */
+		__bpf_alu_result(&res, val, val, op);
+		if (alu32)
+			res = (u32)res;
+		i += __bpf_ld_imm64(&insn[i], R2, res);
+
+		/* Check the actual result */
+		insn[i++] = BPF_JMP_REG(BPF_JEQ, R1, R2, 1);
+		insn[i++] = BPF_EXIT_INSN();
+	}
+
+	insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
+	insn[i++] = BPF_EXIT_INSN();
+
+	self->u.ptr.insns = insn;
+	self->u.ptr.len = len;
+	BUG_ON(i != len);
+
+	return 0;
+}
+
+static int bpf_fill_alu64_lsh_same_reg(struct bpf_test *self)
+{
+	return __bpf_fill_alu_shift_same_reg(self, BPF_LSH, false);
+}
+
+static int bpf_fill_alu64_rsh_same_reg(struct bpf_test *self)
+{
+	return __bpf_fill_alu_shift_same_reg(self, BPF_RSH, false);
+}
+
+static int bpf_fill_alu64_arsh_same_reg(struct bpf_test *self)
+{
+	return __bpf_fill_alu_shift_same_reg(self, BPF_ARSH, false);
+}
+
+static int bpf_fill_alu32_lsh_same_reg(struct bpf_test *self)
+{
+	return __bpf_fill_alu_shift_same_reg(self, BPF_LSH, true);
+}
+
+static int bpf_fill_alu32_rsh_same_reg(struct bpf_test *self)
+{
+	return __bpf_fill_alu_shift_same_reg(self, BPF_RSH, true);
+}
+
+static int bpf_fill_alu32_arsh_same_reg(struct bpf_test *self)
+{
+	return __bpf_fill_alu_shift_same_reg(self, BPF_ARSH, true);
+}
+
+/*
+ * Common operand pattern generator for exhaustive power-of-two magnitudes
+ * tests. The block size parameters can be adjusted to increase/reduce the
+ * number of combinatons tested and thereby execution speed and memory
+ * footprint.
+ */
+
+static inline s64 value(int msb, int delta, int sign)
+{
+	return sign * (1LL << msb) + delta;
+}
+
+static int __bpf_fill_pattern(struct bpf_test *self, void *arg,
+			      int dbits, int sbits, int block1, int block2,
+			      int (*emit)(struct bpf_test*, void*,
+					  struct bpf_insn*, s64, s64))
+{
+	static const int sgn[][2] = {{1, 1}, {1, -1}, {-1, 1}, {-1, -1}};
+	struct bpf_insn *insns;
+	int di, si, bt, db, sb;
+	int count, len, k;
+	int extra = 1 + 2;
+	int i = 0;
+
+	/* Total number of iterations for the two pattern */
+	count = (dbits - 1) * (sbits - 1) * block1 * block1 * ARRAY_SIZE(sgn);
+	count += (max(dbits, sbits) - 1) * block2 * block2 * ARRAY_SIZE(sgn);
+
+	/* Compute the maximum number of insns and allocate the buffer */
+	len = extra + count * (*emit)(self, arg, NULL, 0, 0);
+	insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL);
+	if (!insns)
+		return -ENOMEM;
+
+	/* Add head instruction(s) */
+	insns[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 0);
+
+	/*
+	 * Pattern 1: all combinations of power-of-two magnitudes and sign,
+	 * and with a block of contiguous values around each magnitude.
+	 */
+	for (di = 0; di < dbits - 1; di++)                 /* Dst magnitudes */
+		for (si = 0; si < sbits - 1; si++)         /* Src magnitudes */
+			for (k = 0; k < ARRAY_SIZE(sgn); k++) /* Sign combos */
+				for (db = -(block1 / 2);
+				     db < (block1 + 1) / 2; db++)
+					for (sb = -(block1 / 2);
+					     sb < (block1 + 1) / 2; sb++) {
+						s64 dst, src;
+
+						dst = value(di, db, sgn[k][0]);
+						src = value(si, sb, sgn[k][1]);
+						i += (*emit)(self, arg,
+							     &insns[i],
+							     dst, src);
+					}
+	/*
+	 * Pattern 2: all combinations for a larger block of values
+	 * for each power-of-two magnitude and sign, where the magnitude is
+	 * the same for both operands.
+	 */
+	for (bt = 0; bt < max(dbits, sbits) - 1; bt++)        /* Magnitude   */
+		for (k = 0; k < ARRAY_SIZE(sgn); k++)         /* Sign combos */
+			for (db = -(block2 / 2); db < (block2 + 1) / 2; db++)
+				for (sb = -(block2 / 2);
+				     sb < (block2 + 1) / 2; sb++) {
+					s64 dst, src;
+
+					dst = value(bt % dbits, db, sgn[k][0]);
+					src = value(bt % sbits, sb, sgn[k][1]);
+					i += (*emit)(self, arg, &insns[i],
+						     dst, src);
+				}
+
+	/* Append tail instructions */
+	insns[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
+	insns[i++] = BPF_EXIT_INSN();
+	BUG_ON(i > len);
+
+	self->u.ptr.insns = insns;
+	self->u.ptr.len = i;
+
+	return 0;
+}
+
+/*
+ * Block size parameters used in pattern tests below. une as needed to
+ * increase/reduce the number combinations tested, see following examples.
+ *        block   values per operand MSB
+ * ----------------------------------------
+ *           0     none
+ *           1     (1 << MSB)
+ *           2     (1 << MSB) + [-1, 0]
+ *           3     (1 << MSB) + [-1, 0, 1]
+ */
+#define PATTERN_BLOCK1 1
+#define PATTERN_BLOCK2 5
+
+/* Number of test runs for a pattern test */
+#define NR_PATTERN_RUNS 1
+
+/*
+ * Exhaustive tests of ALU operations for all combinations of power-of-two
+ * magnitudes of the operands, both for positive and negative values. The
+ * test is designed to verify e.g. the ALU and ALU64 operations for JITs that
+ * emit different code depending on the magnitude of the immediate value.
+ */
+static int __bpf_emit_alu64_imm(struct bpf_test *self, void *arg,
+				struct bpf_insn *insns, s64 dst, s64 imm)
+{
+	int op = *(int *)arg;
+	int i = 0;
+	u64 res;
+
+	if (!insns)
+		return 7;
+
+	if (__bpf_alu_result(&res, dst, (s32)imm, op)) {
+		i += __bpf_ld_imm64(&insns[i], R1, dst);
+		i += __bpf_ld_imm64(&insns[i], R3, res);
+		insns[i++] = BPF_ALU64_IMM(op, R1, imm);
+		insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
+		insns[i++] = BPF_EXIT_INSN();
+	}
+
+	return i;
+}
+
+static int __bpf_emit_alu32_imm(struct bpf_test *self, void *arg,
+				struct bpf_insn *insns, s64 dst, s64 imm)
+{
+	int op = *(int *)arg;
+	int i = 0;
+	u64 res;
+
+	if (!insns)
+		return 7;
+
+	if (__bpf_alu_result(&res, (u32)dst, (u32)imm, op)) {
+		i += __bpf_ld_imm64(&insns[i], R1, dst);
+		i += __bpf_ld_imm64(&insns[i], R3, (u32)res);
+		insns[i++] = BPF_ALU32_IMM(op, R1, imm);
+		insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
+		insns[i++] = BPF_EXIT_INSN();
+	}
+
+	return i;
+}
+
+static int __bpf_emit_alu64_reg(struct bpf_test *self, void *arg,
+				struct bpf_insn *insns, s64 dst, s64 src)
+{
+	int op = *(int *)arg;
+	int i = 0;
+	u64 res;
+
+	if (!insns)
+		return 9;
+
+	if (__bpf_alu_result(&res, dst, src, op)) {
+		i += __bpf_ld_imm64(&insns[i], R1, dst);
+		i += __bpf_ld_imm64(&insns[i], R2, src);
+		i += __bpf_ld_imm64(&insns[i], R3, res);
+		insns[i++] = BPF_ALU64_REG(op, R1, R2);
+		insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
+		insns[i++] = BPF_EXIT_INSN();
+	}
+
+	return i;
+}
+
+static int __bpf_emit_alu32_reg(struct bpf_test *self, void *arg,
+				struct bpf_insn *insns, s64 dst, s64 src)
+{
+	int op = *(int *)arg;
+	int i = 0;
+	u64 res;
+
+	if (!insns)
+		return 9;
+
+	if (__bpf_alu_result(&res, (u32)dst, (u32)src, op)) {
+		i += __bpf_ld_imm64(&insns[i], R1, dst);
+		i += __bpf_ld_imm64(&insns[i], R2, src);
+		i += __bpf_ld_imm64(&insns[i], R3, (u32)res);
+		insns[i++] = BPF_ALU32_REG(op, R1, R2);
+		insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
+		insns[i++] = BPF_EXIT_INSN();
+	}
+
+	return i;
+}
+
+static int __bpf_fill_alu64_imm(struct bpf_test *self, int op)
+{
+	return __bpf_fill_pattern(self, &op, 64, 32,
+				  PATTERN_BLOCK1, PATTERN_BLOCK2,
+				  &__bpf_emit_alu64_imm);
+}
+
+static int __bpf_fill_alu32_imm(struct bpf_test *self, int op)
+{
+	return __bpf_fill_pattern(self, &op, 64, 32,
+				  PATTERN_BLOCK1, PATTERN_BLOCK2,
+				  &__bpf_emit_alu32_imm);
+}
+
+static int __bpf_fill_alu64_reg(struct bpf_test *self, int op)
+{
+	return __bpf_fill_pattern(self, &op, 64, 64,
+				  PATTERN_BLOCK1, PATTERN_BLOCK2,
+				  &__bpf_emit_alu64_reg);
+}
+
+static int __bpf_fill_alu32_reg(struct bpf_test *self, int op)
+{
+	return __bpf_fill_pattern(self, &op, 64, 64,
+				  PATTERN_BLOCK1, PATTERN_BLOCK2,
+				  &__bpf_emit_alu32_reg);
+}
+
+/* ALU64 immediate operations */
+static int bpf_fill_alu64_mov_imm(struct bpf_test *self)
+{
+	return __bpf_fill_alu64_imm(self, BPF_MOV);
+}
+
+static int bpf_fill_alu64_and_imm(struct bpf_test *self)
+{
+	return __bpf_fill_alu64_imm(self, BPF_AND);
+}
+
+static int bpf_fill_alu64_or_imm(struct bpf_test *self)
+{
+	return __bpf_fill_alu64_imm(self, BPF_OR);
+}
+
+static int bpf_fill_alu64_xor_imm(struct bpf_test *self)
+{
+	return __bpf_fill_alu64_imm(self, BPF_XOR);
+}
+
+static int bpf_fill_alu64_add_imm(struct bpf_test *self)
+{
+	return __bpf_fill_alu64_imm(self, BPF_ADD);
+}
+
+static int bpf_fill_alu64_sub_imm(struct bpf_test *self)
+{
+	return __bpf_fill_alu64_imm(self, BPF_SUB);
+}
+
+static int bpf_fill_alu64_mul_imm(struct bpf_test *self)
+{
+	return __bpf_fill_alu64_imm(self, BPF_MUL);
+}
+
+static int bpf_fill_alu64_div_imm(struct bpf_test *self)
+{
+	return __bpf_fill_alu64_imm(self, BPF_DIV);
+}
+
+static int bpf_fill_alu64_mod_imm(struct bpf_test *self)
+{
+	return __bpf_fill_alu64_imm(self, BPF_MOD);
+}
+
+/* ALU32 immediate operations */
+static int bpf_fill_alu32_mov_imm(struct bpf_test *self)
+{
+	return __bpf_fill_alu32_imm(self, BPF_MOV);
+}
+
+static int bpf_fill_alu32_and_imm(struct bpf_test *self)
+{
+	return __bpf_fill_alu32_imm(self, BPF_AND);
+}
+
+static int bpf_fill_alu32_or_imm(struct bpf_test *self)
+{
+	return __bpf_fill_alu32_imm(self, BPF_OR);
+}
+
+static int bpf_fill_alu32_xor_imm(struct bpf_test *self)
+{
+	return __bpf_fill_alu32_imm(self, BPF_XOR);
+}
+
+static int bpf_fill_alu32_add_imm(struct bpf_test *self)
+{
+	return __bpf_fill_alu32_imm(self, BPF_ADD);
+}
+
+static int bpf_fill_alu32_sub_imm(struct bpf_test *self)
+{
+	return __bpf_fill_alu32_imm(self, BPF_SUB);
+}
+
+static int bpf_fill_alu32_mul_imm(struct bpf_test *self)
+{
+	return __bpf_fill_alu32_imm(self, BPF_MUL);
+}
+
+static int bpf_fill_alu32_div_imm(struct bpf_test *self)
+{
+	return __bpf_fill_alu32_imm(self, BPF_DIV);
+}
+
+static int bpf_fill_alu32_mod_imm(struct bpf_test *self)
+{
+	return __bpf_fill_alu32_imm(self, BPF_MOD);
+}
+
+/* ALU64 register operations */
+static int bpf_fill_alu64_mov_reg(struct bpf_test *self)
+{
+	return __bpf_fill_alu64_reg(self, BPF_MOV);
+}
+
+static int bpf_fill_alu64_and_reg(struct bpf_test *self)
+{
+	return __bpf_fill_alu64_reg(self, BPF_AND);
+}
+
+static int bpf_fill_alu64_or_reg(struct bpf_test *self)
+{
+	return __bpf_fill_alu64_reg(self, BPF_OR);
+}
+
+static int bpf_fill_alu64_xor_reg(struct bpf_test *self)
+{
+	return __bpf_fill_alu64_reg(self, BPF_XOR);
+}
+
+static int bpf_fill_alu64_add_reg(struct bpf_test *self)
+{
+	return __bpf_fill_alu64_reg(self, BPF_ADD);
+}
+
+static int bpf_fill_alu64_sub_reg(struct bpf_test *self)
+{
+	return __bpf_fill_alu64_reg(self, BPF_SUB);
+}
+
+static int bpf_fill_alu64_mul_reg(struct bpf_test *self)
+{
+	return __bpf_fill_alu64_reg(self, BPF_MUL);
+}
+
+static int bpf_fill_alu64_div_reg(struct bpf_test *self)
+{
+	return __bpf_fill_alu64_reg(self, BPF_DIV);
+}
+
+static int bpf_fill_alu64_mod_reg(struct bpf_test *self)
+{
+	return __bpf_fill_alu64_reg(self, BPF_MOD);
+}
+
+/* ALU32 register operations */
+static int bpf_fill_alu32_mov_reg(struct bpf_test *self)
+{
+	return __bpf_fill_alu32_reg(self, BPF_MOV);
+}
+
+static int bpf_fill_alu32_and_reg(struct bpf_test *self)
+{
+	return __bpf_fill_alu32_reg(self, BPF_AND);
+}
+
+static int bpf_fill_alu32_or_reg(struct bpf_test *self)
+{
+	return __bpf_fill_alu32_reg(self, BPF_OR);
+}
+
+static int bpf_fill_alu32_xor_reg(struct bpf_test *self)
+{
+	return __bpf_fill_alu32_reg(self, BPF_XOR);
+}
+
+static int bpf_fill_alu32_add_reg(struct bpf_test *self)
+{
+	return __bpf_fill_alu32_reg(self, BPF_ADD);
+}
+
+static int bpf_fill_alu32_sub_reg(struct bpf_test *self)
+{
+	return __bpf_fill_alu32_reg(self, BPF_SUB);
+}
+
+static int bpf_fill_alu32_mul_reg(struct bpf_test *self)
+{
+	return __bpf_fill_alu32_reg(self, BPF_MUL);
+}
+
+static int bpf_fill_alu32_div_reg(struct bpf_test *self)
+{
+	return __bpf_fill_alu32_reg(self, BPF_DIV);
+}
+
+static int bpf_fill_alu32_mod_reg(struct bpf_test *self)
+{
+	return __bpf_fill_alu32_reg(self, BPF_MOD);
+}
+
+/*
+ * Test JITs that implement complex ALU operations as function
+ * calls, and must re-arrange operands for argument passing.
+ */
+static int __bpf_fill_alu_imm_regs(struct bpf_test *self, u8 op, bool alu32)
+{
+	int len = 2 + 10 * 10;
+	struct bpf_insn *insns;
+	u64 dst, res;
+	int i = 0;
+	u32 imm;
+	int rd;
+
+	insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL);
+	if (!insns)
+		return -ENOMEM;
+
+	/* Operand and result values according to operation */
+	if (alu32)
+		dst = 0x76543210U;
+	else
+		dst = 0x7edcba9876543210ULL;
+	imm = 0x01234567U;
+
+	if (op == BPF_LSH || op == BPF_RSH || op == BPF_ARSH)
+		imm &= 31;
+
+	__bpf_alu_result(&res, dst, imm, op);
+
+	if (alu32)
+		res = (u32)res;
+
+	/* Check all operand registers */
+	for (rd = R0; rd <= R9; rd++) {
+		i += __bpf_ld_imm64(&insns[i], rd, dst);
+
+		if (alu32)
+			insns[i++] = BPF_ALU32_IMM(op, rd, imm);
+		else
+			insns[i++] = BPF_ALU64_IMM(op, rd, imm);
+
+		insns[i++] = BPF_JMP32_IMM(BPF_JEQ, rd, res, 2);
+		insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
+		insns[i++] = BPF_EXIT_INSN();
+
+		insns[i++] = BPF_ALU64_IMM(BPF_RSH, rd, 32);
+		insns[i++] = BPF_JMP32_IMM(BPF_JEQ, rd, res >> 32, 2);
+		insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
+		insns[i++] = BPF_EXIT_INSN();
+	}
+
+	insns[i++] = BPF_MOV64_IMM(R0, 1);
+	insns[i++] = BPF_EXIT_INSN();
+
+	self->u.ptr.insns = insns;
+	self->u.ptr.len = len;
+	BUG_ON(i != len);
+
+	return 0;
+}
+
+/* ALU64 K registers */
+static int bpf_fill_alu64_mov_imm_regs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_imm_regs(self, BPF_MOV, false);
+}
+
+static int bpf_fill_alu64_and_imm_regs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_imm_regs(self, BPF_AND, false);
+}
+
+static int bpf_fill_alu64_or_imm_regs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_imm_regs(self, BPF_OR, false);
+}
+
+static int bpf_fill_alu64_xor_imm_regs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_imm_regs(self, BPF_XOR, false);
+}
+
+static int bpf_fill_alu64_lsh_imm_regs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_imm_regs(self, BPF_LSH, false);
+}
+
+static int bpf_fill_alu64_rsh_imm_regs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_imm_regs(self, BPF_RSH, false);
+}
+
+static int bpf_fill_alu64_arsh_imm_regs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_imm_regs(self, BPF_ARSH, false);
+}
+
+static int bpf_fill_alu64_add_imm_regs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_imm_regs(self, BPF_ADD, false);
+}
+
+static int bpf_fill_alu64_sub_imm_regs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_imm_regs(self, BPF_SUB, false);
+}
+
+static int bpf_fill_alu64_mul_imm_regs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_imm_regs(self, BPF_MUL, false);
+}
+
+static int bpf_fill_alu64_div_imm_regs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_imm_regs(self, BPF_DIV, false);
+}
+
+static int bpf_fill_alu64_mod_imm_regs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_imm_regs(self, BPF_MOD, false);
+}
+
+/* ALU32 K registers */
+static int bpf_fill_alu32_mov_imm_regs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_imm_regs(self, BPF_MOV, true);
+}
+
+static int bpf_fill_alu32_and_imm_regs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_imm_regs(self, BPF_AND, true);
+}
+
+static int bpf_fill_alu32_or_imm_regs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_imm_regs(self, BPF_OR, true);
+}
+
+static int bpf_fill_alu32_xor_imm_regs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_imm_regs(self, BPF_XOR, true);
+}
+
+static int bpf_fill_alu32_lsh_imm_regs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_imm_regs(self, BPF_LSH, true);
+}
+
+static int bpf_fill_alu32_rsh_imm_regs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_imm_regs(self, BPF_RSH, true);
+}
+
+static int bpf_fill_alu32_arsh_imm_regs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_imm_regs(self, BPF_ARSH, true);
+}
+
+static int bpf_fill_alu32_add_imm_regs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_imm_regs(self, BPF_ADD, true);
+}
+
+static int bpf_fill_alu32_sub_imm_regs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_imm_regs(self, BPF_SUB, true);
+}
+
+static int bpf_fill_alu32_mul_imm_regs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_imm_regs(self, BPF_MUL, true);
+}
+
+static int bpf_fill_alu32_div_imm_regs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_imm_regs(self, BPF_DIV, true);
+}
+
+static int bpf_fill_alu32_mod_imm_regs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_imm_regs(self, BPF_MOD, true);
+}
+
+/*
+ * Test JITs that implement complex ALU operations as function
+ * calls, and must re-arrange operands for argument passing.
+ */
+static int __bpf_fill_alu_reg_pairs(struct bpf_test *self, u8 op, bool alu32)
+{
+	int len = 2 + 10 * 10 * 12;
+	u64 dst, src, res, same;
+	struct bpf_insn *insns;
+	int rd, rs;
+	int i = 0;
+
+	insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL);
+	if (!insns)
+		return -ENOMEM;
+
+	/* Operand and result values according to operation */
+	if (alu32) {
+		dst = 0x76543210U;
+		src = 0x01234567U;
+	} else {
+		dst = 0x7edcba9876543210ULL;
+		src = 0x0123456789abcdefULL;
+	}
+
+	if (op == BPF_LSH || op == BPF_RSH || op == BPF_ARSH)
+		src &= 31;
+
+	__bpf_alu_result(&res, dst, src, op);
+	__bpf_alu_result(&same, src, src, op);
+
+	if (alu32) {
+		res = (u32)res;
+		same = (u32)same;
+	}
+
+	/* Check all combinations of operand registers */
+	for (rd = R0; rd <= R9; rd++) {
+		for (rs = R0; rs <= R9; rs++) {
+			u64 val = rd == rs ? same : res;
+
+			i += __bpf_ld_imm64(&insns[i], rd, dst);
+			i += __bpf_ld_imm64(&insns[i], rs, src);
+
+			if (alu32)
+				insns[i++] = BPF_ALU32_REG(op, rd, rs);
+			else
+				insns[i++] = BPF_ALU64_REG(op, rd, rs);
+
+			insns[i++] = BPF_JMP32_IMM(BPF_JEQ, rd, val, 2);
+			insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
+			insns[i++] = BPF_EXIT_INSN();
+
+			insns[i++] = BPF_ALU64_IMM(BPF_RSH, rd, 32);
+			insns[i++] = BPF_JMP32_IMM(BPF_JEQ, rd, val >> 32, 2);
+			insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
+			insns[i++] = BPF_EXIT_INSN();
+		}
+	}
+
+	insns[i++] = BPF_MOV64_IMM(R0, 1);
+	insns[i++] = BPF_EXIT_INSN();
+
+	self->u.ptr.insns = insns;
+	self->u.ptr.len = len;
+	BUG_ON(i != len);
+
+	return 0;
+}
+
+/* ALU64 X register combinations */
+static int bpf_fill_alu64_mov_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_reg_pairs(self, BPF_MOV, false);
+}
+
+static int bpf_fill_alu64_and_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_reg_pairs(self, BPF_AND, false);
+}
+
+static int bpf_fill_alu64_or_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_reg_pairs(self, BPF_OR, false);
+}
+
+static int bpf_fill_alu64_xor_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_reg_pairs(self, BPF_XOR, false);
+}
+
+static int bpf_fill_alu64_lsh_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_reg_pairs(self, BPF_LSH, false);
+}
+
+static int bpf_fill_alu64_rsh_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_reg_pairs(self, BPF_RSH, false);
+}
+
+static int bpf_fill_alu64_arsh_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_reg_pairs(self, BPF_ARSH, false);
+}
+
+static int bpf_fill_alu64_add_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_reg_pairs(self, BPF_ADD, false);
+}
+
+static int bpf_fill_alu64_sub_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_reg_pairs(self, BPF_SUB, false);
+}
+
+static int bpf_fill_alu64_mul_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_reg_pairs(self, BPF_MUL, false);
+}
+
+static int bpf_fill_alu64_div_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_reg_pairs(self, BPF_DIV, false);
+}
+
+static int bpf_fill_alu64_mod_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_reg_pairs(self, BPF_MOD, false);
+}
+
+/* ALU32 X register combinations */
+static int bpf_fill_alu32_mov_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_reg_pairs(self, BPF_MOV, true);
+}
+
+static int bpf_fill_alu32_and_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_reg_pairs(self, BPF_AND, true);
+}
+
+static int bpf_fill_alu32_or_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_reg_pairs(self, BPF_OR, true);
+}
+
+static int bpf_fill_alu32_xor_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_reg_pairs(self, BPF_XOR, true);
+}
+
+static int bpf_fill_alu32_lsh_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_reg_pairs(self, BPF_LSH, true);
+}
+
+static int bpf_fill_alu32_rsh_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_reg_pairs(self, BPF_RSH, true);
+}
+
+static int bpf_fill_alu32_arsh_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_reg_pairs(self, BPF_ARSH, true);
+}
+
+static int bpf_fill_alu32_add_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_reg_pairs(self, BPF_ADD, true);
+}
+
+static int bpf_fill_alu32_sub_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_reg_pairs(self, BPF_SUB, true);
+}
+
+static int bpf_fill_alu32_mul_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_reg_pairs(self, BPF_MUL, true);
+}
+
+static int bpf_fill_alu32_div_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_reg_pairs(self, BPF_DIV, true);
+}
+
+static int bpf_fill_alu32_mod_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_reg_pairs(self, BPF_MOD, true);
+}
+
+/*
+ * Exhaustive tests of atomic operations for all power-of-two operand
+ * magnitudes, both for positive and negative values.
+ */
+
+static int __bpf_emit_atomic64(struct bpf_test *self, void *arg,
+			       struct bpf_insn *insns, s64 dst, s64 src)
+{
+	int op = *(int *)arg;
+	u64 keep, fetch, res;
+	int i = 0;
+
+	if (!insns)
+		return 21;
+
+	switch (op) {
+	case BPF_XCHG:
+		res = src;
+		break;
+	default:
+		__bpf_alu_result(&res, dst, src, BPF_OP(op));
+	}
+
+	keep = 0x0123456789abcdefULL;
+	if (op & BPF_FETCH)
+		fetch = dst;
+	else
+		fetch = src;
+
+	i += __bpf_ld_imm64(&insns[i], R0, keep);
+	i += __bpf_ld_imm64(&insns[i], R1, dst);
+	i += __bpf_ld_imm64(&insns[i], R2, src);
+	i += __bpf_ld_imm64(&insns[i], R3, res);
+	i += __bpf_ld_imm64(&insns[i], R4, fetch);
+	i += __bpf_ld_imm64(&insns[i], R5, keep);
+
+	insns[i++] = BPF_STX_MEM(BPF_DW, R10, R1, -8);
+	insns[i++] = BPF_ATOMIC_OP(BPF_DW, op, R10, R2, -8);
+	insns[i++] = BPF_LDX_MEM(BPF_DW, R1, R10, -8);
+
+	insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
+	insns[i++] = BPF_EXIT_INSN();
+
+	insns[i++] = BPF_JMP_REG(BPF_JEQ, R2, R4, 1);
+	insns[i++] = BPF_EXIT_INSN();
+
+	insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R5, 1);
+	insns[i++] = BPF_EXIT_INSN();
+
+	return i;
+}
+
+static int __bpf_emit_atomic32(struct bpf_test *self, void *arg,
+			       struct bpf_insn *insns, s64 dst, s64 src)
+{
+	int op = *(int *)arg;
+	u64 keep, fetch, res;
+	int i = 0;
+
+	if (!insns)
+		return 21;
+
+	switch (op) {
+	case BPF_XCHG:
+		res = src;
+		break;
+	default:
+		__bpf_alu_result(&res, (u32)dst, (u32)src, BPF_OP(op));
+	}
+
+	keep = 0x0123456789abcdefULL;
+	if (op & BPF_FETCH)
+		fetch = (u32)dst;
+	else
+		fetch = src;
+
+	i += __bpf_ld_imm64(&insns[i], R0, keep);
+	i += __bpf_ld_imm64(&insns[i], R1, (u32)dst);
+	i += __bpf_ld_imm64(&insns[i], R2, src);
+	i += __bpf_ld_imm64(&insns[i], R3, (u32)res);
+	i += __bpf_ld_imm64(&insns[i], R4, fetch);
+	i += __bpf_ld_imm64(&insns[i], R5, keep);
+
+	insns[i++] = BPF_STX_MEM(BPF_W, R10, R1, -4);
+	insns[i++] = BPF_ATOMIC_OP(BPF_W, op, R10, R2, -4);
+	insns[i++] = BPF_LDX_MEM(BPF_W, R1, R10, -4);
+
+	insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
+	insns[i++] = BPF_EXIT_INSN();
+
+	insns[i++] = BPF_JMP_REG(BPF_JEQ, R2, R4, 1);
+	insns[i++] = BPF_EXIT_INSN();
+
+	insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R5, 1);
+	insns[i++] = BPF_EXIT_INSN();
+
+	return i;
+}
+
+static int __bpf_emit_cmpxchg64(struct bpf_test *self, void *arg,
+				struct bpf_insn *insns, s64 dst, s64 src)
+{
+	int i = 0;
+
+	if (!insns)
+		return 23;
+
+	i += __bpf_ld_imm64(&insns[i], R0, ~dst);
+	i += __bpf_ld_imm64(&insns[i], R1, dst);
+	i += __bpf_ld_imm64(&insns[i], R2, src);
+
+	/* Result unsuccessful */
+	insns[i++] = BPF_STX_MEM(BPF_DW, R10, R1, -8);
+	insns[i++] = BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -8);
+	insns[i++] = BPF_LDX_MEM(BPF_DW, R3, R10, -8);
+
+	insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 2);
+	insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
+	insns[i++] = BPF_EXIT_INSN();
+
+	insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R3, 2);
+	insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
+	insns[i++] = BPF_EXIT_INSN();
+
+	/* Result successful */
+	insns[i++] = BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -8);
+	insns[i++] = BPF_LDX_MEM(BPF_DW, R3, R10, -8);
+
+	insns[i++] = BPF_JMP_REG(BPF_JEQ, R2, R3, 2);
+	insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
+	insns[i++] = BPF_EXIT_INSN();
+
+	insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R1, 2);
+	insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
+	insns[i++] = BPF_EXIT_INSN();
+
+	return i;
+}
+
+static int __bpf_emit_cmpxchg32(struct bpf_test *self, void *arg,
+				struct bpf_insn *insns, s64 dst, s64 src)
+{
+	int i = 0;
+
+	if (!insns)
+		return 27;
+
+	i += __bpf_ld_imm64(&insns[i], R0, ~dst);
+	i += __bpf_ld_imm64(&insns[i], R1, (u32)dst);
+	i += __bpf_ld_imm64(&insns[i], R2, src);
+
+	/* Result unsuccessful */
+	insns[i++] = BPF_STX_MEM(BPF_W, R10, R1, -4);
+	insns[i++] = BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R2, -4);
+	insns[i++] = BPF_ZEXT_REG(R0), /* Zext always inserted by verifier */
+	insns[i++] = BPF_LDX_MEM(BPF_W, R3, R10, -4);
+
+	insns[i++] = BPF_JMP32_REG(BPF_JEQ, R1, R3, 2);
+	insns[i++] = BPF_MOV32_IMM(R0, __LINE__);
+	insns[i++] = BPF_EXIT_INSN();
+
+	insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R3, 2);
+	insns[i++] = BPF_MOV32_IMM(R0, __LINE__);
+	insns[i++] = BPF_EXIT_INSN();
+
+	/* Result successful */
+	i += __bpf_ld_imm64(&insns[i], R0, dst);
+	insns[i++] = BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R2, -4);
+	insns[i++] = BPF_ZEXT_REG(R0), /* Zext always inserted by verifier */
+	insns[i++] = BPF_LDX_MEM(BPF_W, R3, R10, -4);
+
+	insns[i++] = BPF_JMP32_REG(BPF_JEQ, R2, R3, 2);
+	insns[i++] = BPF_MOV32_IMM(R0, __LINE__);
+	insns[i++] = BPF_EXIT_INSN();
+
+	insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R1, 2);
+	insns[i++] = BPF_MOV32_IMM(R0, __LINE__);
+	insns[i++] = BPF_EXIT_INSN();
+
+	return i;
+}
+
+static int __bpf_fill_atomic64(struct bpf_test *self, int op)
+{
+	return __bpf_fill_pattern(self, &op, 64, 64,
+				  0, PATTERN_BLOCK2,
+				  &__bpf_emit_atomic64);
+}
+
+static int __bpf_fill_atomic32(struct bpf_test *self, int op)
+{
+	return __bpf_fill_pattern(self, &op, 64, 64,
+				  0, PATTERN_BLOCK2,
+				  &__bpf_emit_atomic32);
+}
+
+/* 64-bit atomic operations */
+static int bpf_fill_atomic64_add(struct bpf_test *self)
+{
+	return __bpf_fill_atomic64(self, BPF_ADD);
+}
+
+static int bpf_fill_atomic64_and(struct bpf_test *self)
+{
+	return __bpf_fill_atomic64(self, BPF_AND);
+}
+
+static int bpf_fill_atomic64_or(struct bpf_test *self)
+{
+	return __bpf_fill_atomic64(self, BPF_OR);
+}
+
+static int bpf_fill_atomic64_xor(struct bpf_test *self)
+{
+	return __bpf_fill_atomic64(self, BPF_XOR);
+}
+
+static int bpf_fill_atomic64_add_fetch(struct bpf_test *self)
+{
+	return __bpf_fill_atomic64(self, BPF_ADD | BPF_FETCH);
+}
+
+static int bpf_fill_atomic64_and_fetch(struct bpf_test *self)
+{
+	return __bpf_fill_atomic64(self, BPF_AND | BPF_FETCH);
+}
+
+static int bpf_fill_atomic64_or_fetch(struct bpf_test *self)
+{
+	return __bpf_fill_atomic64(self, BPF_OR | BPF_FETCH);
+}
+
+static int bpf_fill_atomic64_xor_fetch(struct bpf_test *self)
+{
+	return __bpf_fill_atomic64(self, BPF_XOR | BPF_FETCH);
+}
+
+static int bpf_fill_atomic64_xchg(struct bpf_test *self)
+{
+	return __bpf_fill_atomic64(self, BPF_XCHG);
+}
+
+static int bpf_fill_cmpxchg64(struct bpf_test *self)
+{
+	return __bpf_fill_pattern(self, NULL, 64, 64, 0, PATTERN_BLOCK2,
+				  &__bpf_emit_cmpxchg64);
+}
+
+/* 32-bit atomic operations */
+static int bpf_fill_atomic32_add(struct bpf_test *self)
+{
+	return __bpf_fill_atomic32(self, BPF_ADD);
+}
+
+static int bpf_fill_atomic32_and(struct bpf_test *self)
+{
+	return __bpf_fill_atomic32(self, BPF_AND);
+}
+
+static int bpf_fill_atomic32_or(struct bpf_test *self)
+{
+	return __bpf_fill_atomic32(self, BPF_OR);
+}
+
+static int bpf_fill_atomic32_xor(struct bpf_test *self)
+{
+	return __bpf_fill_atomic32(self, BPF_XOR);
+}
+
+static int bpf_fill_atomic32_add_fetch(struct bpf_test *self)
+{
+	return __bpf_fill_atomic32(self, BPF_ADD | BPF_FETCH);
+}
+
+static int bpf_fill_atomic32_and_fetch(struct bpf_test *self)
+{
+	return __bpf_fill_atomic32(self, BPF_AND | BPF_FETCH);
+}
+
+static int bpf_fill_atomic32_or_fetch(struct bpf_test *self)
+{
+	return __bpf_fill_atomic32(self, BPF_OR | BPF_FETCH);
+}
+
+static int bpf_fill_atomic32_xor_fetch(struct bpf_test *self)
+{
+	return __bpf_fill_atomic32(self, BPF_XOR | BPF_FETCH);
+}
+
+static int bpf_fill_atomic32_xchg(struct bpf_test *self)
+{
+	return __bpf_fill_atomic32(self, BPF_XCHG);
+}
+
+static int bpf_fill_cmpxchg32(struct bpf_test *self)
+{
+	return __bpf_fill_pattern(self, NULL, 64, 64, 0, PATTERN_BLOCK2,
+				  &__bpf_emit_cmpxchg32);
+}
+
+/*
+ * Test JITs that implement ATOMIC operations as function calls or
+ * other primitives, and must re-arrange operands for argument passing.
+ */
+static int __bpf_fill_atomic_reg_pairs(struct bpf_test *self, u8 width, u8 op)
+{
+	struct bpf_insn *insn;
+	int len = 2 + 34 * 10 * 10;
+	u64 mem, upd, res;
+	int rd, rs, i = 0;
+
+	insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+	if (!insn)
+		return -ENOMEM;
+
+	/* Operand and memory values */
+	if (width == BPF_DW) {
+		mem = 0x0123456789abcdefULL;
+		upd = 0xfedcba9876543210ULL;
+	} else { /* BPF_W */
+		mem = 0x01234567U;
+		upd = 0x76543210U;
+	}
+
+	/* Memory updated according to operation */
+	switch (op) {
+	case BPF_XCHG:
+		res = upd;
+		break;
+	case BPF_CMPXCHG:
+		res = mem;
+		break;
+	default:
+		__bpf_alu_result(&res, mem, upd, BPF_OP(op));
+	}
+
+	/* Test all operand registers */
+	for (rd = R0; rd <= R9; rd++) {
+		for (rs = R0; rs <= R9; rs++) {
+			u64 cmp, src;
+
+			/* Initialize value in memory */
+			i += __bpf_ld_imm64(&insn[i], R0, mem);
+			insn[i++] = BPF_STX_MEM(width, R10, R0, -8);
+
+			/* Initialize registers in order */
+			i += __bpf_ld_imm64(&insn[i], R0, ~mem);
+			i += __bpf_ld_imm64(&insn[i], rs, upd);
+			insn[i++] = BPF_MOV64_REG(rd, R10);
+
+			/* Perform atomic operation */
+			insn[i++] = BPF_ATOMIC_OP(width, op, rd, rs, -8);
+			if (op == BPF_CMPXCHG && width == BPF_W)
+				insn[i++] = BPF_ZEXT_REG(R0);
+
+			/* Check R0 register value */
+			if (op == BPF_CMPXCHG)
+				cmp = mem;  /* Expect value from memory */
+			else if (R0 == rd || R0 == rs)
+				cmp = 0;    /* Aliased, checked below */
+			else
+				cmp = ~mem; /* Expect value to be preserved */
+			if (cmp) {
+				insn[i++] = BPF_JMP32_IMM(BPF_JEQ, R0,
+							   (u32)cmp, 2);
+				insn[i++] = BPF_MOV32_IMM(R0, __LINE__);
+				insn[i++] = BPF_EXIT_INSN();
+				insn[i++] = BPF_ALU64_IMM(BPF_RSH, R0, 32);
+				insn[i++] = BPF_JMP32_IMM(BPF_JEQ, R0,
+							   cmp >> 32, 2);
+				insn[i++] = BPF_MOV32_IMM(R0, __LINE__);
+				insn[i++] = BPF_EXIT_INSN();
+			}
+
+			/* Check source register value */
+			if (rs == R0 && op == BPF_CMPXCHG)
+				src = 0;   /* Aliased with R0, checked above */
+			else if (rs == rd && (op == BPF_CMPXCHG ||
+					      !(op & BPF_FETCH)))
+				src = 0;   /* Aliased with rd, checked below */
+			else if (op == BPF_CMPXCHG)
+				src = upd; /* Expect value to be preserved */
+			else if (op & BPF_FETCH)
+				src = mem; /* Expect fetched value from mem */
+			else /* no fetch */
+				src = upd; /* Expect value to be preserved */
+			if (src) {
+				insn[i++] = BPF_JMP32_IMM(BPF_JEQ, rs,
+							   (u32)src, 2);
+				insn[i++] = BPF_MOV32_IMM(R0, __LINE__);
+				insn[i++] = BPF_EXIT_INSN();
+				insn[i++] = BPF_ALU64_IMM(BPF_RSH, rs, 32);
+				insn[i++] = BPF_JMP32_IMM(BPF_JEQ, rs,
+							   src >> 32, 2);
+				insn[i++] = BPF_MOV32_IMM(R0, __LINE__);
+				insn[i++] = BPF_EXIT_INSN();
+			}
+
+			/* Check destination register value */
+			if (!(rd == R0 && op == BPF_CMPXCHG) &&
+			    !(rd == rs && (op & BPF_FETCH))) {
+				insn[i++] = BPF_JMP_REG(BPF_JEQ, rd, R10, 2);
+				insn[i++] = BPF_MOV32_IMM(R0, __LINE__);
+				insn[i++] = BPF_EXIT_INSN();
+			}
+
+			/* Check value in memory */
+			if (rs != rd) {                  /* No aliasing */
+				i += __bpf_ld_imm64(&insn[i], R1, res);
+			} else if (op == BPF_XCHG) {     /* Aliased, XCHG */
+				insn[i++] = BPF_MOV64_REG(R1, R10);
+			} else if (op == BPF_CMPXCHG) {  /* Aliased, CMPXCHG */
+				i += __bpf_ld_imm64(&insn[i], R1, mem);
+			} else {                        /* Aliased, ALU oper */
+				i += __bpf_ld_imm64(&insn[i], R1, mem);
+				insn[i++] = BPF_ALU64_REG(BPF_OP(op), R1, R10);
+			}
+
+			insn[i++] = BPF_LDX_MEM(width, R0, R10, -8);
+			if (width == BPF_DW)
+				insn[i++] = BPF_JMP_REG(BPF_JEQ, R0, R1, 2);
+			else /* width == BPF_W */
+				insn[i++] = BPF_JMP32_REG(BPF_JEQ, R0, R1, 2);
+			insn[i++] = BPF_MOV32_IMM(R0, __LINE__);
+			insn[i++] = BPF_EXIT_INSN();
+		}
+	}
+
+	insn[i++] = BPF_MOV64_IMM(R0, 1);
+	insn[i++] = BPF_EXIT_INSN();
+
+	self->u.ptr.insns = insn;
+	self->u.ptr.len = i;
+	BUG_ON(i > len);
+
+	return 0;
+}
+
+/* 64-bit atomic register tests */
+static int bpf_fill_atomic64_add_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_ADD);
+}
+
+static int bpf_fill_atomic64_and_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_AND);
+}
+
+static int bpf_fill_atomic64_or_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_OR);
+}
+
+static int bpf_fill_atomic64_xor_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_XOR);
+}
+
+static int bpf_fill_atomic64_add_fetch_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_ADD | BPF_FETCH);
+}
+
+static int bpf_fill_atomic64_and_fetch_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_AND | BPF_FETCH);
+}
+
+static int bpf_fill_atomic64_or_fetch_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_OR | BPF_FETCH);
+}
+
+static int bpf_fill_atomic64_xor_fetch_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_XOR | BPF_FETCH);
+}
+
+static int bpf_fill_atomic64_xchg_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_XCHG);
+}
+
+static int bpf_fill_atomic64_cmpxchg_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_CMPXCHG);
+}
+
+/* 32-bit atomic register tests */
+static int bpf_fill_atomic32_add_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_ADD);
+}
+
+static int bpf_fill_atomic32_and_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_AND);
+}
+
+static int bpf_fill_atomic32_or_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_OR);
+}
+
+static int bpf_fill_atomic32_xor_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_XOR);
+}
+
+static int bpf_fill_atomic32_add_fetch_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_ADD | BPF_FETCH);
+}
+
+static int bpf_fill_atomic32_and_fetch_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_AND | BPF_FETCH);
+}
+
+static int bpf_fill_atomic32_or_fetch_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_OR | BPF_FETCH);
+}
+
+static int bpf_fill_atomic32_xor_fetch_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_XOR | BPF_FETCH);
+}
+
+static int bpf_fill_atomic32_xchg_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_XCHG);
+}
+
+static int bpf_fill_atomic32_cmpxchg_reg_pairs(struct bpf_test *self)
+{
+	return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_CMPXCHG);
+}
+
+/*
+ * Test the two-instruction 64-bit immediate load operation for all
+ * power-of-two magnitudes of the immediate operand. For each MSB, a block
+ * of immediate values centered around the power-of-two MSB are tested,
+ * both for positive and negative values. The test is designed to verify
+ * the operation for JITs that emit different code depending on the magnitude
+ * of the immediate value. This is often the case if the native instruction
+ * immediate field width is narrower than 32 bits.
+ */
+static int bpf_fill_ld_imm64(struct bpf_test *self)
+{
+	int block = 64; /* Increase for more tests per MSB position */
+	int len = 3 + 8 * 63 * block * 2;
+	struct bpf_insn *insn;
+	int bit, adj, sign;
+	int i = 0;
+
+	insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+	if (!insn)
+		return -ENOMEM;
+
+	insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 0);
+
+	for (bit = 0; bit <= 62; bit++) {
+		for (adj = -block / 2; adj < block / 2; adj++) {
+			for (sign = -1; sign <= 1; sign += 2) {
+				s64 imm = sign * ((1LL << bit) + adj);
+
+				/* Perform operation */
+				i += __bpf_ld_imm64(&insn[i], R1, imm);
+
+				/* Load reference */
+				insn[i++] = BPF_ALU32_IMM(BPF_MOV, R2, imm);
+				insn[i++] = BPF_ALU32_IMM(BPF_MOV, R3,
+							  (u32)(imm >> 32));
+				insn[i++] = BPF_ALU64_IMM(BPF_LSH, R3, 32);
+				insn[i++] = BPF_ALU64_REG(BPF_OR, R2, R3);
+
+				/* Check result */
+				insn[i++] = BPF_JMP_REG(BPF_JEQ, R1, R2, 1);
+				insn[i++] = BPF_EXIT_INSN();
+			}
+		}
+	}
+
+	insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
+	insn[i++] = BPF_EXIT_INSN();
+
+	self->u.ptr.insns = insn;
+	self->u.ptr.len = len;
+	BUG_ON(i != len);
+
+	return 0;
+}
+
+/*
+ * Exhaustive tests of JMP operations for all combinations of power-of-two
+ * magnitudes of the operands, both for positive and negative values. The
+ * test is designed to verify e.g. the JMP and JMP32 operations for JITs that
+ * emit different code depending on the magnitude of the immediate value.
+ */
+
+static bool __bpf_match_jmp_cond(s64 v1, s64 v2, u8 op)
+{
+	switch (op) {
+	case BPF_JSET:
+		return !!(v1 & v2);
+	case BPF_JEQ:
+		return v1 == v2;
+	case BPF_JNE:
+		return v1 != v2;
+	case BPF_JGT:
+		return (u64)v1 > (u64)v2;
+	case BPF_JGE:
+		return (u64)v1 >= (u64)v2;
+	case BPF_JLT:
+		return (u64)v1 < (u64)v2;
+	case BPF_JLE:
+		return (u64)v1 <= (u64)v2;
+	case BPF_JSGT:
+		return v1 > v2;
+	case BPF_JSGE:
+		return v1 >= v2;
+	case BPF_JSLT:
+		return v1 < v2;
+	case BPF_JSLE:
+		return v1 <= v2;
+	}
+	return false;
+}
+
+static int __bpf_emit_jmp_imm(struct bpf_test *self, void *arg,
+			      struct bpf_insn *insns, s64 dst, s64 imm)
+{
+	int op = *(int *)arg;
+
+	if (insns) {
+		bool match = __bpf_match_jmp_cond(dst, (s32)imm, op);
+		int i = 0;
+
+		insns[i++] = BPF_ALU32_IMM(BPF_MOV, R0, match);
+
+		i += __bpf_ld_imm64(&insns[i], R1, dst);
+		insns[i++] = BPF_JMP_IMM(op, R1, imm, 1);
+		if (!match)
+			insns[i++] = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
+		insns[i++] = BPF_EXIT_INSN();
+
+		return i;
+	}
+
+	return 5 + 1;
+}
+
+static int __bpf_emit_jmp32_imm(struct bpf_test *self, void *arg,
+				struct bpf_insn *insns, s64 dst, s64 imm)
+{
+	int op = *(int *)arg;
+
+	if (insns) {
+		bool match = __bpf_match_jmp_cond((s32)dst, (s32)imm, op);
+		int i = 0;
+
+		i += __bpf_ld_imm64(&insns[i], R1, dst);
+		insns[i++] = BPF_JMP32_IMM(op, R1, imm, 1);
+		if (!match)
+			insns[i++] = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
+		insns[i++] = BPF_EXIT_INSN();
+
+		return i;
+	}
+
+	return 5;
+}
+
+static int __bpf_emit_jmp_reg(struct bpf_test *self, void *arg,
+			      struct bpf_insn *insns, s64 dst, s64 src)
+{
+	int op = *(int *)arg;
+
+	if (insns) {
+		bool match = __bpf_match_jmp_cond(dst, src, op);
+		int i = 0;
+
+		i += __bpf_ld_imm64(&insns[i], R1, dst);
+		i += __bpf_ld_imm64(&insns[i], R2, src);
+		insns[i++] = BPF_JMP_REG(op, R1, R2, 1);
+		if (!match)
+			insns[i++] = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
+		insns[i++] = BPF_EXIT_INSN();
+
+		return i;
+	}
+
+	return 7;
+}
+
+static int __bpf_emit_jmp32_reg(struct bpf_test *self, void *arg,
+				struct bpf_insn *insns, s64 dst, s64 src)
+{
+	int op = *(int *)arg;
+
+	if (insns) {
+		bool match = __bpf_match_jmp_cond((s32)dst, (s32)src, op);
+		int i = 0;
+
+		i += __bpf_ld_imm64(&insns[i], R1, dst);
+		i += __bpf_ld_imm64(&insns[i], R2, src);
+		insns[i++] = BPF_JMP32_REG(op, R1, R2, 1);
+		if (!match)
+			insns[i++] = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
+		insns[i++] = BPF_EXIT_INSN();
+
+		return i;
+	}
+
+	return 7;
+}
+
+static int __bpf_fill_jmp_imm(struct bpf_test *self, int op)
+{
+	return __bpf_fill_pattern(self, &op, 64, 32,
+				  PATTERN_BLOCK1, PATTERN_BLOCK2,
+				  &__bpf_emit_jmp_imm);
+}
+
+static int __bpf_fill_jmp32_imm(struct bpf_test *self, int op)
+{
+	return __bpf_fill_pattern(self, &op, 64, 32,
+				  PATTERN_BLOCK1, PATTERN_BLOCK2,
+				  &__bpf_emit_jmp32_imm);
+}
+
+static int __bpf_fill_jmp_reg(struct bpf_test *self, int op)
+{
+	return __bpf_fill_pattern(self, &op, 64, 64,
+				  PATTERN_BLOCK1, PATTERN_BLOCK2,
+				  &__bpf_emit_jmp_reg);
+}
+
+static int __bpf_fill_jmp32_reg(struct bpf_test *self, int op)
+{
+	return __bpf_fill_pattern(self, &op, 64, 64,
+				  PATTERN_BLOCK1, PATTERN_BLOCK2,
+				  &__bpf_emit_jmp32_reg);
+}
+
+/* JMP immediate tests */
+static int bpf_fill_jmp_jset_imm(struct bpf_test *self)
+{
+	return __bpf_fill_jmp_imm(self, BPF_JSET);
+}
+
+static int bpf_fill_jmp_jeq_imm(struct bpf_test *self)
+{
+	return __bpf_fill_jmp_imm(self, BPF_JEQ);
+}
+
+static int bpf_fill_jmp_jne_imm(struct bpf_test *self)
+{
+	return __bpf_fill_jmp_imm(self, BPF_JNE);
+}
+
+static int bpf_fill_jmp_jgt_imm(struct bpf_test *self)
+{
+	return __bpf_fill_jmp_imm(self, BPF_JGT);
+}
+
+static int bpf_fill_jmp_jge_imm(struct bpf_test *self)
+{
+	return __bpf_fill_jmp_imm(self, BPF_JGE);
+}
+
+static int bpf_fill_jmp_jlt_imm(struct bpf_test *self)
+{
+	return __bpf_fill_jmp_imm(self, BPF_JLT);
+}
+
+static int bpf_fill_jmp_jle_imm(struct bpf_test *self)
+{
+	return __bpf_fill_jmp_imm(self, BPF_JLE);
+}
+
+static int bpf_fill_jmp_jsgt_imm(struct bpf_test *self)
+{
+	return __bpf_fill_jmp_imm(self, BPF_JSGT);
+}
+
+static int bpf_fill_jmp_jsge_imm(struct bpf_test *self)
+{
+	return __bpf_fill_jmp_imm(self, BPF_JSGE);
+}
+
+static int bpf_fill_jmp_jslt_imm(struct bpf_test *self)
+{
+	return __bpf_fill_jmp_imm(self, BPF_JSLT);
+}
+
+static int bpf_fill_jmp_jsle_imm(struct bpf_test *self)
+{
+	return __bpf_fill_jmp_imm(self, BPF_JSLE);
+}
+
+/* JMP32 immediate tests */
+static int bpf_fill_jmp32_jset_imm(struct bpf_test *self)
+{
+	return __bpf_fill_jmp32_imm(self, BPF_JSET);
+}
+
+static int bpf_fill_jmp32_jeq_imm(struct bpf_test *self)
+{
+	return __bpf_fill_jmp32_imm(self, BPF_JEQ);
+}
+
+static int bpf_fill_jmp32_jne_imm(struct bpf_test *self)
+{
+	return __bpf_fill_jmp32_imm(self, BPF_JNE);
+}
+
+static int bpf_fill_jmp32_jgt_imm(struct bpf_test *self)
+{
+	return __bpf_fill_jmp32_imm(self, BPF_JGT);
+}
+
+static int bpf_fill_jmp32_jge_imm(struct bpf_test *self)
+{
+	return __bpf_fill_jmp32_imm(self, BPF_JGE);
+}
+
+static int bpf_fill_jmp32_jlt_imm(struct bpf_test *self)
+{
+	return __bpf_fill_jmp32_imm(self, BPF_JLT);
+}
+
+static int bpf_fill_jmp32_jle_imm(struct bpf_test *self)
+{
+	return __bpf_fill_jmp32_imm(self, BPF_JLE);
+}
+
+static int bpf_fill_jmp32_jsgt_imm(struct bpf_test *self)
+{
+	return __bpf_fill_jmp32_imm(self, BPF_JSGT);
+}
+
+static int bpf_fill_jmp32_jsge_imm(struct bpf_test *self)
+{
+	return __bpf_fill_jmp32_imm(self, BPF_JSGE);
+}
+
+static int bpf_fill_jmp32_jslt_imm(struct bpf_test *self)
+{
+	return __bpf_fill_jmp32_imm(self, BPF_JSLT);
+}
+
+static int bpf_fill_jmp32_jsle_imm(struct bpf_test *self)
+{
+	return __bpf_fill_jmp32_imm(self, BPF_JSLE);
+}
+
+/* JMP register tests */
+static int bpf_fill_jmp_jset_reg(struct bpf_test *self)
+{
+	return __bpf_fill_jmp_reg(self, BPF_JSET);
+}
+
+static int bpf_fill_jmp_jeq_reg(struct bpf_test *self)
+{
+	return __bpf_fill_jmp_reg(self, BPF_JEQ);
+}
+
+static int bpf_fill_jmp_jne_reg(struct bpf_test *self)
+{
+	return __bpf_fill_jmp_reg(self, BPF_JNE);
+}
+
+static int bpf_fill_jmp_jgt_reg(struct bpf_test *self)
+{
+	return __bpf_fill_jmp_reg(self, BPF_JGT);
+}
+
+static int bpf_fill_jmp_jge_reg(struct bpf_test *self)
+{
+	return __bpf_fill_jmp_reg(self, BPF_JGE);
+}
+
+static int bpf_fill_jmp_jlt_reg(struct bpf_test *self)
+{
+	return __bpf_fill_jmp_reg(self, BPF_JLT);
+}
+
+static int bpf_fill_jmp_jle_reg(struct bpf_test *self)
+{
+	return __bpf_fill_jmp_reg(self, BPF_JLE);
+}
+
+static int bpf_fill_jmp_jsgt_reg(struct bpf_test *self)
+{
+	return __bpf_fill_jmp_reg(self, BPF_JSGT);
+}
+
+static int bpf_fill_jmp_jsge_reg(struct bpf_test *self)
+{
+	return __bpf_fill_jmp_reg(self, BPF_JSGE);
+}
+
+static int bpf_fill_jmp_jslt_reg(struct bpf_test *self)
+{
+	return __bpf_fill_jmp_reg(self, BPF_JSLT);
+}
+
+static int bpf_fill_jmp_jsle_reg(struct bpf_test *self)
+{
+	return __bpf_fill_jmp_reg(self, BPF_JSLE);
+}
+
+/* JMP32 register tests */
+static int bpf_fill_jmp32_jset_reg(struct bpf_test *self)
+{
+	return __bpf_fill_jmp32_reg(self, BPF_JSET);
+}
+
+static int bpf_fill_jmp32_jeq_reg(struct bpf_test *self)
+{
+	return __bpf_fill_jmp32_reg(self, BPF_JEQ);
+}
+
+static int bpf_fill_jmp32_jne_reg(struct bpf_test *self)
+{
+	return __bpf_fill_jmp32_reg(self, BPF_JNE);
+}
+
+static int bpf_fill_jmp32_jgt_reg(struct bpf_test *self)
+{
+	return __bpf_fill_jmp32_reg(self, BPF_JGT);
+}
+
+static int bpf_fill_jmp32_jge_reg(struct bpf_test *self)
+{
+	return __bpf_fill_jmp32_reg(self, BPF_JGE);
+}
+
+static int bpf_fill_jmp32_jlt_reg(struct bpf_test *self)
+{
+	return __bpf_fill_jmp32_reg(self, BPF_JLT);
+}
+
+static int bpf_fill_jmp32_jle_reg(struct bpf_test *self)
+{
+	return __bpf_fill_jmp32_reg(self, BPF_JLE);
+}
+
+static int bpf_fill_jmp32_jsgt_reg(struct bpf_test *self)
+{
+	return __bpf_fill_jmp32_reg(self, BPF_JSGT);
+}
+
+static int bpf_fill_jmp32_jsge_reg(struct bpf_test *self)
+{
+	return __bpf_fill_jmp32_reg(self, BPF_JSGE);
+}
+
+static int bpf_fill_jmp32_jslt_reg(struct bpf_test *self)
+{
+	return __bpf_fill_jmp32_reg(self, BPF_JSLT);
+}
+
+static int bpf_fill_jmp32_jsle_reg(struct bpf_test *self)
+{
+	return __bpf_fill_jmp32_reg(self, BPF_JSLE);
+}
+
+/*
+ * Set up a sequence of staggered jumps, forwards and backwards with
+ * increasing offset. This tests the conversion of relative jumps to
+ * JITed native jumps. On some architectures, for example MIPS, a large
+ * PC-relative jump offset may overflow the immediate field of the native
+ * conditional branch instruction, triggering a conversion to use an
+ * absolute jump instead. Since this changes the jump offsets, another
+ * offset computation pass is necessary, and that may in turn trigger
+ * another branch conversion. This jump sequence is particularly nasty
+ * in that regard.
+ *
+ * The sequence generation is parameterized by size and jump type.
+ * The size must be even, and the expected result is always size + 1.
+ * Below is an example with size=8 and result=9.
+ *
+ *                     ________________________Start
+ *                     R0 = 0
+ *                     R1 = r1
+ *                     R2 = r2
+ *            ,------- JMP +4 * 3______________Preamble: 4 insns
+ * ,----------|-ind 0- if R0 != 7 JMP 8 * 3 + 1 <--------------------.
+ * |          |        R0 = 8                                        |
+ * |          |        JMP +7 * 3               ------------------------.
+ * | ,--------|-----1- if R0 != 5 JMP 7 * 3 + 1 <--------------.     |  |
+ * | |        |        R0 = 6                                  |     |  |
+ * | |        |        JMP +5 * 3               ------------------.  |  |
+ * | | ,------|-----2- if R0 != 3 JMP 6 * 3 + 1 <--------.     |  |  |  |
+ * | | |      |        R0 = 4                            |     |  |  |  |
+ * | | |      |        JMP +3 * 3               ------------.  |  |  |  |
+ * | | | ,----|-----3- if R0 != 1 JMP 5 * 3 + 1 <--.     |  |  |  |  |  |
+ * | | | |    |        R0 = 2                      |     |  |  |  |  |  |
+ * | | | |    |        JMP +1 * 3               ------.  |  |  |  |  |  |
+ * | | | | ,--t=====4> if R0 != 0 JMP 4 * 3 + 1    1  2  3  4  5  6  7  8 loc
+ * | | | | |           R0 = 1                     -1 +2 -3 +4 -5 +6 -7 +8 off
+ * | | | | |           JMP -2 * 3               ---'  |  |  |  |  |  |  |
+ * | | | | | ,------5- if R0 != 2 JMP 3 * 3 + 1 <-----'  |  |  |  |  |  |
+ * | | | | | |         R0 = 3                            |  |  |  |  |  |
+ * | | | | | |         JMP -4 * 3               ---------'  |  |  |  |  |
+ * | | | | | | ,----6- if R0 != 4 JMP 2 * 3 + 1 <-----------'  |  |  |  |
+ * | | | | | | |       R0 = 5                                  |  |  |  |
+ * | | | | | | |       JMP -6 * 3               ---------------'  |  |  |
+ * | | | | | | | ,--7- if R0 != 6 JMP 1 * 3 + 1 <-----------------'  |  |
+ * | | | | | | | |     R0 = 7                                        |  |
+ * | | Error | | |     JMP -8 * 3               ---------------------'  |
+ * | | paths | | | ,8- if R0 != 8 JMP 0 * 3 + 1 <-----------------------'
+ * | | | | | | | | |   R0 = 9__________________Sequence: 3 * size - 1 insns
+ * `-+-+-+-+-+-+-+-+-> EXIT____________________Return: 1 insn
+ *
+ */
+
+/* The maximum size parameter */
+#define MAX_STAGGERED_JMP_SIZE ((0x7fff / 3) & ~1)
+
+/* We use a reduced number of iterations to get a reasonable execution time */
+#define NR_STAGGERED_JMP_RUNS 10
+
+static int __bpf_fill_staggered_jumps(struct bpf_test *self,
+				      const struct bpf_insn *jmp,
+				      u64 r1, u64 r2)
+{
+	int size = self->test[0].result - 1;
+	int len = 4 + 3 * (size + 1);
+	struct bpf_insn *insns;
+	int off, ind;
+
+	insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL);
+	if (!insns)
+		return -ENOMEM;
+
+	/* Preamble */
+	insns[0] = BPF_ALU64_IMM(BPF_MOV, R0, 0);
+	insns[1] = BPF_ALU64_IMM(BPF_MOV, R1, r1);
+	insns[2] = BPF_ALU64_IMM(BPF_MOV, R2, r2);
+	insns[3] = BPF_JMP_IMM(BPF_JA, 0, 0, 3 * size / 2);
+
+	/* Sequence */
+	for (ind = 0, off = size; ind <= size; ind++, off -= 2) {
+		struct bpf_insn *ins = &insns[4 + 3 * ind];
+		int loc;
+
+		if (off == 0)
+			off--;
+
+		loc = abs(off);
+		ins[0] = BPF_JMP_IMM(BPF_JNE, R0, loc - 1,
+				     3 * (size - ind) + 1);
+		ins[1] = BPF_ALU64_IMM(BPF_MOV, R0, loc);
+		ins[2] = *jmp;
+		ins[2].off = 3 * (off - 1);
+	}
+
+	/* Return */
+	insns[len - 1] = BPF_EXIT_INSN();
+
+	self->u.ptr.insns = insns;
+	self->u.ptr.len = len;
+
+	return 0;
+}
+
+/* 64-bit unconditional jump */
+static int bpf_fill_staggered_ja(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, 0, 0);
+}
+
+/* 64-bit immediate jumps */
+static int bpf_fill_staggered_jeq_imm(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP_IMM(BPF_JEQ, R1, 1234, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
+}
+
+static int bpf_fill_staggered_jne_imm(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP_IMM(BPF_JNE, R1, 1234, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, 4321, 0);
+}
+
+static int bpf_fill_staggered_jset_imm(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSET, R1, 0x82, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, 0x86, 0);
+}
+
+static int bpf_fill_staggered_jgt_imm(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP_IMM(BPF_JGT, R1, 1234, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, 0x80000000, 0);
+}
+
+static int bpf_fill_staggered_jge_imm(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP_IMM(BPF_JGE, R1, 1234, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
+}
+
+static int bpf_fill_staggered_jlt_imm(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP_IMM(BPF_JLT, R1, 0x80000000, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
+}
+
+static int bpf_fill_staggered_jle_imm(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP_IMM(BPF_JLE, R1, 1234, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
+}
+
+static int bpf_fill_staggered_jsgt_imm(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSGT, R1, -2, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, -1, 0);
+}
+
+static int bpf_fill_staggered_jsge_imm(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSGE, R1, -2, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, -2, 0);
+}
+
+static int bpf_fill_staggered_jslt_imm(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSLT, R1, -1, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, -2, 0);
+}
+
+static int bpf_fill_staggered_jsle_imm(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSLE, R1, -1, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, -1, 0);
+}
+
+/* 64-bit register jumps */
+static int bpf_fill_staggered_jeq_reg(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP_REG(BPF_JEQ, R1, R2, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
+}
+
+static int bpf_fill_staggered_jne_reg(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP_REG(BPF_JNE, R1, R2, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, 4321, 1234);
+}
+
+static int bpf_fill_staggered_jset_reg(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP_REG(BPF_JSET, R1, R2, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, 0x86, 0x82);
+}
+
+static int bpf_fill_staggered_jgt_reg(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP_REG(BPF_JGT, R1, R2, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, 0x80000000, 1234);
+}
+
+static int bpf_fill_staggered_jge_reg(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP_REG(BPF_JGE, R1, R2, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
+}
+
+static int bpf_fill_staggered_jlt_reg(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP_REG(BPF_JLT, R1, R2, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0x80000000);
+}
+
+static int bpf_fill_staggered_jle_reg(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP_REG(BPF_JLE, R1, R2, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
+}
+
+static int bpf_fill_staggered_jsgt_reg(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP_REG(BPF_JSGT, R1, R2, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, -1, -2);
+}
+
+static int bpf_fill_staggered_jsge_reg(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP_REG(BPF_JSGE, R1, R2, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, -2, -2);
+}
+
+static int bpf_fill_staggered_jslt_reg(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP_REG(BPF_JSLT, R1, R2, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, -2, -1);
+}
+
+static int bpf_fill_staggered_jsle_reg(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP_REG(BPF_JSLE, R1, R2, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, -1, -1);
+}
+
+/* 32-bit immediate jumps */
+static int bpf_fill_staggered_jeq32_imm(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JEQ, R1, 1234, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
+}
+
+static int bpf_fill_staggered_jne32_imm(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JNE, R1, 1234, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, 4321, 0);
+}
+
+static int bpf_fill_staggered_jset32_imm(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSET, R1, 0x82, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, 0x86, 0);
+}
+
+static int bpf_fill_staggered_jgt32_imm(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JGT, R1, 1234, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, 0x80000000, 0);
+}
+
+static int bpf_fill_staggered_jge32_imm(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JGE, R1, 1234, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
+}
+
+static int bpf_fill_staggered_jlt32_imm(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JLT, R1, 0x80000000, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
+}
+
+static int bpf_fill_staggered_jle32_imm(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JLE, R1, 1234, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
+}
+
+static int bpf_fill_staggered_jsgt32_imm(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSGT, R1, -2, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, -1, 0);
+}
+
+static int bpf_fill_staggered_jsge32_imm(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSGE, R1, -2, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, -2, 0);
+}
+
+static int bpf_fill_staggered_jslt32_imm(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSLT, R1, -1, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, -2, 0);
+}
+
+static int bpf_fill_staggered_jsle32_imm(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSLE, R1, -1, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, -1, 0);
+}
+
+/* 32-bit register jumps */
+static int bpf_fill_staggered_jeq32_reg(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP32_REG(BPF_JEQ, R1, R2, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
+}
+
+static int bpf_fill_staggered_jne32_reg(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP32_REG(BPF_JNE, R1, R2, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, 4321, 1234);
+}
+
+static int bpf_fill_staggered_jset32_reg(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSET, R1, R2, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, 0x86, 0x82);
+}
+
+static int bpf_fill_staggered_jgt32_reg(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP32_REG(BPF_JGT, R1, R2, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, 0x80000000, 1234);
+}
+
+static int bpf_fill_staggered_jge32_reg(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP32_REG(BPF_JGE, R1, R2, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
+}
+
+static int bpf_fill_staggered_jlt32_reg(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP32_REG(BPF_JLT, R1, R2, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0x80000000);
+}
+
+static int bpf_fill_staggered_jle32_reg(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP32_REG(BPF_JLE, R1, R2, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
+}
+
+static int bpf_fill_staggered_jsgt32_reg(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSGT, R1, R2, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, -1, -2);
+}
+
+static int bpf_fill_staggered_jsge32_reg(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSGE, R1, R2, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, -2, -2);
+}
+
+static int bpf_fill_staggered_jslt32_reg(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSLT, R1, R2, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, -2, -1);
+}
+
+static int bpf_fill_staggered_jsle32_reg(struct bpf_test *self)
+{
+	struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSLE, R1, R2, 0);
+
+	return __bpf_fill_staggered_jumps(self, &jmp, -1, -1);
+}
+
+
+static struct bpf_test tests[] = {
+	{
+		"TAX",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_IMM, 1),
+			BPF_STMT(BPF_MISC | BPF_TAX, 0),
+			BPF_STMT(BPF_LD | BPF_IMM, 2),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+			BPF_STMT(BPF_ALU | BPF_NEG, 0), /* A == -3 */
+			BPF_STMT(BPF_MISC | BPF_TAX, 0),
+			BPF_STMT(BPF_LD | BPF_LEN, 0),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+			BPF_STMT(BPF_MISC | BPF_TAX, 0), /* X == len - 3 */
+			BPF_STMT(BPF_LD | BPF_B | BPF_IND, 1),
+			BPF_STMT(BPF_RET | BPF_A, 0)
+		},
+		CLASSIC,
+		{ 10, 20, 30, 40, 50 },
+		{ { 2, 10 }, { 3, 20 }, { 4, 30 } },
+	},
+	{
+		"TXA",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_LEN, 0),
+			BPF_STMT(BPF_MISC | BPF_TXA, 0),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+			BPF_STMT(BPF_RET | BPF_A, 0) /* A == len * 2 */
+		},
+		CLASSIC,
+		{ 10, 20, 30, 40, 50 },
+		{ { 1, 2 }, { 3, 6 }, { 4, 8 } },
+	},
+	{
+		"ADD_SUB_MUL_K",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_IMM, 1),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 2),
+			BPF_STMT(BPF_LDX | BPF_IMM, 3),
+			BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 0xffffffff),
+			BPF_STMT(BPF_ALU | BPF_MUL | BPF_K, 3),
+			BPF_STMT(BPF_RET | BPF_A, 0)
+		},
+		CLASSIC | FLAG_NO_DATA,
+		{ },
+		{ { 0, 0xfffffffd } }
+	},
+	{
+		"DIV_MOD_KX",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_IMM, 8),
+			BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 2),
+			BPF_STMT(BPF_MISC | BPF_TAX, 0),
+			BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff),
+			BPF_STMT(BPF_ALU | BPF_DIV | BPF_X, 0),
+			BPF_STMT(BPF_MISC | BPF_TAX, 0),
+			BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff),
+			BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0x70000000),
+			BPF_STMT(BPF_MISC | BPF_TAX, 0),
+			BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff),
+			BPF_STMT(BPF_ALU | BPF_MOD | BPF_X, 0),
+			BPF_STMT(BPF_MISC | BPF_TAX, 0),
+			BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff),
+			BPF_STMT(BPF_ALU | BPF_MOD | BPF_K, 0x70000000),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+			BPF_STMT(BPF_RET | BPF_A, 0)
+		},
+		CLASSIC | FLAG_NO_DATA,
+		{ },
+		{ { 0, 0x20000000 } }
+	},
+	{
+		"AND_OR_LSH_K",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_IMM, 0xff),
+			BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf0),
+			BPF_STMT(BPF_ALU | BPF_LSH | BPF_K, 27),
+			BPF_STMT(BPF_MISC | BPF_TAX, 0),
+			BPF_STMT(BPF_LD | BPF_IMM, 0xf),
+			BPF_STMT(BPF_ALU | BPF_OR | BPF_K, 0xf0),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+			BPF_STMT(BPF_RET | BPF_A, 0)
+		},
+		CLASSIC | FLAG_NO_DATA,
+		{ },
+		{ { 0, 0x800000ff }, { 1, 0x800000ff } },
+	},
+	{
+		"LD_IMM_0",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_IMM, 0), /* ld #0 */
+			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0, 1, 0),
+			BPF_STMT(BPF_RET | BPF_K, 0),
+			BPF_STMT(BPF_RET | BPF_K, 1),
+		},
+		CLASSIC,
+		{ },
+		{ { 1, 1 } },
+	},
+	{
+		"LD_IND",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_LEN, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_IND, MAX_K),
+			BPF_STMT(BPF_RET | BPF_K, 1)
+		},
+		CLASSIC,
+		{ },
+		{ { 1, 0 }, { 10, 0 }, { 60, 0 } },
+	},
+	{
+		"LD_ABS",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_W | BPF_ABS, 1000),
+			BPF_STMT(BPF_RET | BPF_K, 1)
+		},
+		CLASSIC,
+		{ },
+		{ { 1, 0 }, { 10, 0 }, { 60, 0 } },
+	},
+	{
+		"LD_ABS_LL",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_LL_OFF),
+			BPF_STMT(BPF_MISC | BPF_TAX, 0),
+			BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_LL_OFF + 1),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+			BPF_STMT(BPF_RET | BPF_A, 0)
+		},
+		CLASSIC,
+		{ 1, 2, 3 },
+		{ { 1, 0 }, { 2, 3 } },
+	},
+	{
+		"LD_IND_LL",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_IMM, SKF_LL_OFF - 1),
+			BPF_STMT(BPF_LDX | BPF_LEN, 0),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+			BPF_STMT(BPF_MISC | BPF_TAX, 0),
+			BPF_STMT(BPF_LD | BPF_B | BPF_IND, 0),
+			BPF_STMT(BPF_RET | BPF_A, 0)
+		},
+		CLASSIC,
+		{ 1, 2, 3, 0xff },
+		{ { 1, 1 }, { 3, 3 }, { 4, 0xff } },
+	},
+	{
+		"LD_ABS_NET",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_NET_OFF),
+			BPF_STMT(BPF_MISC | BPF_TAX, 0),
+			BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_NET_OFF + 1),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+			BPF_STMT(BPF_RET | BPF_A, 0)
+		},
+		CLASSIC,
+		{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3 },
+		{ { 15, 0 }, { 16, 3 } },
+	},
+	{
+		"LD_IND_NET",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_IMM, SKF_NET_OFF - 15),
+			BPF_STMT(BPF_LDX | BPF_LEN, 0),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+			BPF_STMT(BPF_MISC | BPF_TAX, 0),
+			BPF_STMT(BPF_LD | BPF_B | BPF_IND, 0),
+			BPF_STMT(BPF_RET | BPF_A, 0)
+		},
+		CLASSIC,
+		{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3 },
+		{ { 14, 0 }, { 15, 1 }, { 17, 3 } },
+	},
+	{
+		"LD_PKTTYPE",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+				 SKF_AD_OFF + SKF_AD_PKTTYPE),
+			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
+			BPF_STMT(BPF_RET | BPF_K, 1),
+			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+				 SKF_AD_OFF + SKF_AD_PKTTYPE),
+			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
+			BPF_STMT(BPF_RET | BPF_K, 1),
+			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+				 SKF_AD_OFF + SKF_AD_PKTTYPE),
 			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
 			BPF_STMT(BPF_RET | BPF_K, 1),
-			BPF_STMT(BPF_RET | BPF_A, 0)
+			BPF_STMT(BPF_RET | BPF_A, 0)
+		},
+		CLASSIC,
+		{ },
+		{ { 1, 3 }, { 10, 3 } },
+	},
+	{
+		"LD_MARK",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+				 SKF_AD_OFF + SKF_AD_MARK),
+			BPF_STMT(BPF_RET | BPF_A, 0)
+		},
+		CLASSIC,
+		{ },
+		{ { 1, SKB_MARK}, { 10, SKB_MARK} },
+	},
+	{
+		"LD_RXHASH",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+				 SKF_AD_OFF + SKF_AD_RXHASH),
+			BPF_STMT(BPF_RET | BPF_A, 0)
+		},
+		CLASSIC,
+		{ },
+		{ { 1, SKB_HASH}, { 10, SKB_HASH} },
+	},
+	{
+		"LD_QUEUE",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+				 SKF_AD_OFF + SKF_AD_QUEUE),
+			BPF_STMT(BPF_RET | BPF_A, 0)
+		},
+		CLASSIC,
+		{ },
+		{ { 1, SKB_QUEUE_MAP }, { 10, SKB_QUEUE_MAP } },
+	},
+	{
+		"LD_PROTOCOL",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 1),
+			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 20, 1, 0),
+			BPF_STMT(BPF_RET | BPF_K, 0),
+			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+				 SKF_AD_OFF + SKF_AD_PROTOCOL),
+			BPF_STMT(BPF_MISC | BPF_TAX, 0),
+			BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
+			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 30, 1, 0),
+			BPF_STMT(BPF_RET | BPF_K, 0),
+			BPF_STMT(BPF_MISC | BPF_TXA, 0),
+			BPF_STMT(BPF_RET | BPF_A, 0)
+		},
+		CLASSIC,
+		{ 10, 20, 30 },
+		{ { 10, ETH_P_IP }, { 100, ETH_P_IP } },
+	},
+	{
+		"LD_VLAN_TAG",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+				 SKF_AD_OFF + SKF_AD_VLAN_TAG),
+			BPF_STMT(BPF_RET | BPF_A, 0)
+		},
+		CLASSIC,
+		{ },
+		{
+			{ 1, SKB_VLAN_TCI },
+			{ 10, SKB_VLAN_TCI }
+		},
+	},
+	{
+		"LD_VLAN_TAG_PRESENT",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+				 SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT),
+			BPF_STMT(BPF_RET | BPF_A, 0)
+		},
+		CLASSIC,
+		{ },
+		{
+			{ 1, SKB_VLAN_PRESENT },
+			{ 10, SKB_VLAN_PRESENT }
+		},
+	},
+	{
+		"LD_IFINDEX",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+				 SKF_AD_OFF + SKF_AD_IFINDEX),
+			BPF_STMT(BPF_RET | BPF_A, 0)
+		},
+		CLASSIC,
+		{ },
+		{ { 1, SKB_DEV_IFINDEX }, { 10, SKB_DEV_IFINDEX } },
+	},
+	{
+		"LD_HATYPE",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+				 SKF_AD_OFF + SKF_AD_HATYPE),
+			BPF_STMT(BPF_RET | BPF_A, 0)
+		},
+		CLASSIC,
+		{ },
+		{ { 1, SKB_DEV_TYPE }, { 10, SKB_DEV_TYPE } },
+	},
+	{
+		"LD_CPU",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+				 SKF_AD_OFF + SKF_AD_CPU),
+			BPF_STMT(BPF_MISC | BPF_TAX, 0),
+			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+				 SKF_AD_OFF + SKF_AD_CPU),
+			BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
+			BPF_STMT(BPF_RET | BPF_A, 0)
+		},
+		CLASSIC,
+		{ },
+		{ { 1, 0 }, { 10, 0 } },
+	},
+	{
+		"LD_NLATTR",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 2),
+			BPF_STMT(BPF_MISC | BPF_TXA, 0),
+			BPF_STMT(BPF_LDX | BPF_IMM, 3),
+			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+				 SKF_AD_OFF + SKF_AD_NLATTR),
+			BPF_STMT(BPF_RET | BPF_A, 0)
+		},
+		CLASSIC,
+#ifdef __BIG_ENDIAN
+		{ 0xff, 0xff, 0, 4, 0, 2, 0, 4, 0, 3 },
+#else
+		{ 0xff, 0xff, 4, 0, 2, 0, 4, 0, 3, 0 },
+#endif
+		{ { 4, 0 }, { 20, 6 } },
+	},
+	{
+		"LD_NLATTR_NEST",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_IMM, 2),
+			BPF_STMT(BPF_LDX | BPF_IMM, 3),
+			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+				 SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+			BPF_STMT(BPF_LD | BPF_IMM, 2),
+			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+				 SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+			BPF_STMT(BPF_LD | BPF_IMM, 2),
+			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+				 SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+			BPF_STMT(BPF_LD | BPF_IMM, 2),
+			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+				 SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+			BPF_STMT(BPF_LD | BPF_IMM, 2),
+			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+				 SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+			BPF_STMT(BPF_LD | BPF_IMM, 2),
+			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+				 SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+			BPF_STMT(BPF_LD | BPF_IMM, 2),
+			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+				 SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+			BPF_STMT(BPF_LD | BPF_IMM, 2),
+			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+				 SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+			BPF_STMT(BPF_RET | BPF_A, 0)
+		},
+		CLASSIC,
+#ifdef __BIG_ENDIAN
+		{ 0xff, 0xff, 0, 12, 0, 1, 0, 4, 0, 2, 0, 4, 0, 3 },
+#else
+		{ 0xff, 0xff, 12, 0, 1, 0, 4, 0, 2, 0, 4, 0, 3, 0 },
+#endif
+		{ { 4, 0 }, { 20, 10 } },
+	},
+	{
+		"LD_PAYLOAD_OFF",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+				 SKF_AD_OFF + SKF_AD_PAY_OFFSET),
+			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+				 SKF_AD_OFF + SKF_AD_PAY_OFFSET),
+			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+				 SKF_AD_OFF + SKF_AD_PAY_OFFSET),
+			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+				 SKF_AD_OFF + SKF_AD_PAY_OFFSET),
+			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+				 SKF_AD_OFF + SKF_AD_PAY_OFFSET),
+			BPF_STMT(BPF_RET | BPF_A, 0)
+		},
+		CLASSIC,
+		/* 00:00:00:00:00:00 > 00:00:00:00:00:00, ethtype IPv4 (0x0800),
+		 * length 98: 127.0.0.1 > 127.0.0.1: ICMP echo request,
+		 * id 9737, seq 1, length 64
+		 */
+		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		  0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		  0x08, 0x00,
+		  0x45, 0x00, 0x00, 0x54, 0xac, 0x8b, 0x40, 0x00, 0x40,
+		  0x01, 0x90, 0x1b, 0x7f, 0x00, 0x00, 0x01 },
+		{ { 30, 0 }, { 100, 42 } },
+	},
+	{
+		"LD_ANC_XOR",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_IMM, 10),
+			BPF_STMT(BPF_LDX | BPF_IMM, 300),
+			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+				 SKF_AD_OFF + SKF_AD_ALU_XOR_X),
+			BPF_STMT(BPF_RET | BPF_A, 0)
+		},
+		CLASSIC,
+		{ },
+		{ { 4, 0xA ^ 300 }, { 20, 0xA ^ 300 } },
+	},
+	{
+		"SPILL_FILL",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_LEN, 0),
+			BPF_STMT(BPF_LD | BPF_IMM, 2),
+			BPF_STMT(BPF_ALU | BPF_RSH, 1),
+			BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
+			BPF_STMT(BPF_ST, 1), /* M1 = 1 ^ len */
+			BPF_STMT(BPF_ALU | BPF_XOR | BPF_K, 0x80000000),
+			BPF_STMT(BPF_ST, 2), /* M2 = 1 ^ len ^ 0x80000000 */
+			BPF_STMT(BPF_STX, 15), /* M3 = len */
+			BPF_STMT(BPF_LDX | BPF_MEM, 1),
+			BPF_STMT(BPF_LD | BPF_MEM, 2),
+			BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
+			BPF_STMT(BPF_LDX | BPF_MEM, 15),
+			BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
+			BPF_STMT(BPF_RET | BPF_A, 0)
+		},
+		CLASSIC,
+		{ },
+		{ { 1, 0x80000001 }, { 2, 0x80000002 }, { 60, 0x80000000 ^ 60 } }
+	},
+	{
+		"JEQ",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_LEN, 0),
+			BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
+			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0, 0, 1),
+			BPF_STMT(BPF_RET | BPF_K, 1),
+			BPF_STMT(BPF_RET | BPF_K, MAX_K)
+		},
+		CLASSIC,
+		{ 3, 3, 3, 3, 3 },
+		{ { 1, 0 }, { 3, 1 }, { 4, MAX_K } },
+	},
+	{
+		"JGT",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_LEN, 0),
+			BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
+			BPF_JUMP(BPF_JMP | BPF_JGT | BPF_X, 0, 0, 1),
+			BPF_STMT(BPF_RET | BPF_K, 1),
+			BPF_STMT(BPF_RET | BPF_K, MAX_K)
+		},
+		CLASSIC,
+		{ 4, 4, 4, 3, 3 },
+		{ { 2, 0 }, { 3, 1 }, { 4, MAX_K } },
+	},
+	{
+		"JGE (jt 0), test 1",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_LEN, 0),
+			BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
+			BPF_JUMP(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 1),
+			BPF_STMT(BPF_RET | BPF_K, 1),
+			BPF_STMT(BPF_RET | BPF_K, MAX_K)
+		},
+		CLASSIC,
+		{ 4, 4, 4, 3, 3 },
+		{ { 2, 0 }, { 3, 1 }, { 4, 1 } },
+	},
+	{
+		"JGE (jt 0), test 2",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_LEN, 0),
+			BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
+			BPF_JUMP(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 1),
+			BPF_STMT(BPF_RET | BPF_K, 1),
+			BPF_STMT(BPF_RET | BPF_K, MAX_K)
+		},
+		CLASSIC,
+		{ 4, 4, 5, 3, 3 },
+		{ { 4, 1 }, { 5, 1 }, { 6, MAX_K } },
+	},
+	{
+		"JGE",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_LEN, 0),
+			BPF_STMT(BPF_LD | BPF_B | BPF_IND, MAX_K),
+			BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 1, 1, 0),
+			BPF_STMT(BPF_RET | BPF_K, 10),
+			BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 2, 1, 0),
+			BPF_STMT(BPF_RET | BPF_K, 20),
+			BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 3, 1, 0),
+			BPF_STMT(BPF_RET | BPF_K, 30),
+			BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 4, 1, 0),
+			BPF_STMT(BPF_RET | BPF_K, 40),
+			BPF_STMT(BPF_RET | BPF_K, MAX_K)
+		},
+		CLASSIC,
+		{ 1, 2, 3, 4, 5 },
+		{ { 1, 20 }, { 3, 40 }, { 5, MAX_K } },
+	},
+	{
+		"JSET",
+		.u.insns = {
+			BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
+			BPF_JUMP(BPF_JMP | BPF_JA, 1, 1, 1),
+			BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
+			BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
+			BPF_STMT(BPF_LDX | BPF_LEN, 0),
+			BPF_STMT(BPF_MISC | BPF_TXA, 0),
+			BPF_STMT(BPF_ALU | BPF_SUB | BPF_K, 4),
+			BPF_STMT(BPF_MISC | BPF_TAX, 0),
+			BPF_STMT(BPF_LD | BPF_W | BPF_IND, 0),
+			BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 1, 0, 1),
+			BPF_STMT(BPF_RET | BPF_K, 10),
+			BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x80000000, 0, 1),
+			BPF_STMT(BPF_RET | BPF_K, 20),
+			BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
+			BPF_STMT(BPF_RET | BPF_K, 30),
+			BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
+			BPF_STMT(BPF_RET | BPF_K, 30),
+			BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
+			BPF_STMT(BPF_RET | BPF_K, 30),
+			BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
+			BPF_STMT(BPF_RET | BPF_K, 30),
+			BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
+			BPF_STMT(BPF_RET | BPF_K, 30),
+			BPF_STMT(BPF_RET | BPF_K, MAX_K)
+		},
+		CLASSIC,
+		{ 0, 0xAA, 0x55, 1 },
+		{ { 4, 10 }, { 5, 20 }, { 6, MAX_K } },
+	},
+	{
+		"tcpdump port 22",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 12),
+			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x86dd, 0, 8), /* IPv6 */
+			BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 20),
+			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x84, 2, 0),
+			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 1, 0),
+			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x11, 0, 17),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 54),
+			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 14, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 56),
+			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 12, 13),
+			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x0800, 0, 12), /* IPv4 */
+			BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 23),
+			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x84, 2, 0),
+			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 1, 0),
+			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x11, 0, 8),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 20),
+			BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x1fff, 6, 0),
+			BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14),
+			BPF_STMT(BPF_LD | BPF_H | BPF_IND, 14),
+			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 2, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_IND, 16),
+			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 0, 1),
+			BPF_STMT(BPF_RET | BPF_K, 0xffff),
+			BPF_STMT(BPF_RET | BPF_K, 0),
+		},
+		CLASSIC,
+		/* 3c:07:54:43:e5:76 > 10:bf:48:d6:43:d6, ethertype IPv4(0x0800)
+		 * length 114: 10.1.1.149.49700 > 10.1.2.10.22: Flags [P.],
+		 * seq 1305692979:1305693027, ack 3650467037, win 65535,
+		 * options [nop,nop,TS val 2502645400 ecr 3971138], length 48
+		 */
+		{ 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
+		  0x3c, 0x07, 0x54, 0x43, 0xe5, 0x76,
+		  0x08, 0x00,
+		  0x45, 0x10, 0x00, 0x64, 0x75, 0xb5,
+		  0x40, 0x00, 0x40, 0x06, 0xad, 0x2e, /* IP header */
+		  0x0a, 0x01, 0x01, 0x95, /* ip src */
+		  0x0a, 0x01, 0x02, 0x0a, /* ip dst */
+		  0xc2, 0x24,
+		  0x00, 0x16 /* dst port */ },
+		{ { 10, 0 }, { 30, 0 }, { 100, 65535 } },
+	},
+	{
+		"tcpdump complex",
+		.u.insns = {
+			/* tcpdump -nei eth0 'tcp port 22 and (((ip[2:2] -
+			 * ((ip[0]&0xf)<<2)) - ((tcp[12]&0xf0)>>2)) != 0) and
+			 * (len > 115 or len < 30000000000)' -d
+			 */
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 12),
+			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x86dd, 30, 0),
+			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x800, 0, 29),
+			BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 23),
+			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 0, 27),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 20),
+			BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x1fff, 25, 0),
+			BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14),
+			BPF_STMT(BPF_LD | BPF_H | BPF_IND, 14),
+			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 2, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_IND, 16),
+			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 0, 20),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 16),
+			BPF_STMT(BPF_ST, 1),
+			BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 14),
+			BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf),
+			BPF_STMT(BPF_ALU | BPF_LSH | BPF_K, 2),
+			BPF_STMT(BPF_MISC | BPF_TAX, 0x5), /* libpcap emits K on TAX */
+			BPF_STMT(BPF_LD | BPF_MEM, 1),
+			BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
+			BPF_STMT(BPF_ST, 5),
+			BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14),
+			BPF_STMT(BPF_LD | BPF_B | BPF_IND, 26),
+			BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf0),
+			BPF_STMT(BPF_ALU | BPF_RSH | BPF_K, 2),
+			BPF_STMT(BPF_MISC | BPF_TAX, 0x9), /* libpcap emits K on TAX */
+			BPF_STMT(BPF_LD | BPF_MEM, 5),
+			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0, 4, 0),
+			BPF_STMT(BPF_LD | BPF_LEN, 0),
+			BPF_JUMP(BPF_JMP | BPF_JGT | BPF_K, 0x73, 1, 0),
+			BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 0xfc23ac00, 1, 0),
+			BPF_STMT(BPF_RET | BPF_K, 0xffff),
+			BPF_STMT(BPF_RET | BPF_K, 0),
+		},
+		CLASSIC,
+		{ 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
+		  0x3c, 0x07, 0x54, 0x43, 0xe5, 0x76,
+		  0x08, 0x00,
+		  0x45, 0x10, 0x00, 0x64, 0x75, 0xb5,
+		  0x40, 0x00, 0x40, 0x06, 0xad, 0x2e, /* IP header */
+		  0x0a, 0x01, 0x01, 0x95, /* ip src */
+		  0x0a, 0x01, 0x02, 0x0a, /* ip dst */
+		  0xc2, 0x24,
+		  0x00, 0x16 /* dst port */ },
+		{ { 10, 0 }, { 30, 0 }, { 100, 65535 } },
+	},
+	{
+		"RET_A",
+		.u.insns = {
+			/* check that uninitialized X and A contain zeros */
+			BPF_STMT(BPF_MISC | BPF_TXA, 0),
+			BPF_STMT(BPF_RET | BPF_A, 0)
+		},
+		CLASSIC,
+		{ },
+		{ {1, 0}, {2, 0} },
+	},
+	{
+		"INT: ADD trivial",
+		.u.insns_int = {
+			BPF_ALU64_IMM(BPF_MOV, R1, 1),
+			BPF_ALU64_IMM(BPF_ADD, R1, 2),
+			BPF_ALU64_IMM(BPF_MOV, R2, 3),
+			BPF_ALU64_REG(BPF_SUB, R1, R2),
+			BPF_ALU64_IMM(BPF_ADD, R1, -1),
+			BPF_ALU64_IMM(BPF_MUL, R1, 3),
+			BPF_ALU64_REG(BPF_MOV, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xfffffffd } }
+	},
+	{
+		"INT: MUL_X",
+		.u.insns_int = {
+			BPF_ALU64_IMM(BPF_MOV, R0, -1),
+			BPF_ALU64_IMM(BPF_MOV, R1, -1),
+			BPF_ALU64_IMM(BPF_MOV, R2, 3),
+			BPF_ALU64_REG(BPF_MUL, R1, R2),
+			BPF_JMP_IMM(BPF_JEQ, R1, 0xfffffffd, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU64_IMM(BPF_MOV, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } }
+	},
+	{
+		"INT: MUL_X2",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, -1),
+			BPF_ALU32_IMM(BPF_MOV, R1, -1),
+			BPF_ALU32_IMM(BPF_MOV, R2, 3),
+			BPF_ALU64_REG(BPF_MUL, R1, R2),
+			BPF_ALU64_IMM(BPF_RSH, R1, 8),
+			BPF_JMP_IMM(BPF_JEQ, R1, 0x2ffffff, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } }
+	},
+	{
+		"INT: MUL32_X",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, -1),
+			BPF_ALU64_IMM(BPF_MOV, R1, -1),
+			BPF_ALU32_IMM(BPF_MOV, R2, 3),
+			BPF_ALU32_REG(BPF_MUL, R1, R2),
+			BPF_ALU64_IMM(BPF_RSH, R1, 8),
+			BPF_JMP_IMM(BPF_JEQ, R1, 0xffffff, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } }
+	},
+	{
+		/* Have to test all register combinations, since
+		 * JITing of different registers will produce
+		 * different asm code.
+		 */
+		"INT: ADD 64-bit",
+		.u.insns_int = {
+			BPF_ALU64_IMM(BPF_MOV, R0, 0),
+			BPF_ALU64_IMM(BPF_MOV, R1, 1),
+			BPF_ALU64_IMM(BPF_MOV, R2, 2),
+			BPF_ALU64_IMM(BPF_MOV, R3, 3),
+			BPF_ALU64_IMM(BPF_MOV, R4, 4),
+			BPF_ALU64_IMM(BPF_MOV, R5, 5),
+			BPF_ALU64_IMM(BPF_MOV, R6, 6),
+			BPF_ALU64_IMM(BPF_MOV, R7, 7),
+			BPF_ALU64_IMM(BPF_MOV, R8, 8),
+			BPF_ALU64_IMM(BPF_MOV, R9, 9),
+			BPF_ALU64_IMM(BPF_ADD, R0, 20),
+			BPF_ALU64_IMM(BPF_ADD, R1, 20),
+			BPF_ALU64_IMM(BPF_ADD, R2, 20),
+			BPF_ALU64_IMM(BPF_ADD, R3, 20),
+			BPF_ALU64_IMM(BPF_ADD, R4, 20),
+			BPF_ALU64_IMM(BPF_ADD, R5, 20),
+			BPF_ALU64_IMM(BPF_ADD, R6, 20),
+			BPF_ALU64_IMM(BPF_ADD, R7, 20),
+			BPF_ALU64_IMM(BPF_ADD, R8, 20),
+			BPF_ALU64_IMM(BPF_ADD, R9, 20),
+			BPF_ALU64_IMM(BPF_SUB, R0, 10),
+			BPF_ALU64_IMM(BPF_SUB, R1, 10),
+			BPF_ALU64_IMM(BPF_SUB, R2, 10),
+			BPF_ALU64_IMM(BPF_SUB, R3, 10),
+			BPF_ALU64_IMM(BPF_SUB, R4, 10),
+			BPF_ALU64_IMM(BPF_SUB, R5, 10),
+			BPF_ALU64_IMM(BPF_SUB, R6, 10),
+			BPF_ALU64_IMM(BPF_SUB, R7, 10),
+			BPF_ALU64_IMM(BPF_SUB, R8, 10),
+			BPF_ALU64_IMM(BPF_SUB, R9, 10),
+			BPF_ALU64_REG(BPF_ADD, R0, R0),
+			BPF_ALU64_REG(BPF_ADD, R0, R1),
+			BPF_ALU64_REG(BPF_ADD, R0, R2),
+			BPF_ALU64_REG(BPF_ADD, R0, R3),
+			BPF_ALU64_REG(BPF_ADD, R0, R4),
+			BPF_ALU64_REG(BPF_ADD, R0, R5),
+			BPF_ALU64_REG(BPF_ADD, R0, R6),
+			BPF_ALU64_REG(BPF_ADD, R0, R7),
+			BPF_ALU64_REG(BPF_ADD, R0, R8),
+			BPF_ALU64_REG(BPF_ADD, R0, R9), /* R0 == 155 */
+			BPF_JMP_IMM(BPF_JEQ, R0, 155, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU64_REG(BPF_ADD, R1, R0),
+			BPF_ALU64_REG(BPF_ADD, R1, R1),
+			BPF_ALU64_REG(BPF_ADD, R1, R2),
+			BPF_ALU64_REG(BPF_ADD, R1, R3),
+			BPF_ALU64_REG(BPF_ADD, R1, R4),
+			BPF_ALU64_REG(BPF_ADD, R1, R5),
+			BPF_ALU64_REG(BPF_ADD, R1, R6),
+			BPF_ALU64_REG(BPF_ADD, R1, R7),
+			BPF_ALU64_REG(BPF_ADD, R1, R8),
+			BPF_ALU64_REG(BPF_ADD, R1, R9), /* R1 == 456 */
+			BPF_JMP_IMM(BPF_JEQ, R1, 456, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU64_REG(BPF_ADD, R2, R0),
+			BPF_ALU64_REG(BPF_ADD, R2, R1),
+			BPF_ALU64_REG(BPF_ADD, R2, R2),
+			BPF_ALU64_REG(BPF_ADD, R2, R3),
+			BPF_ALU64_REG(BPF_ADD, R2, R4),
+			BPF_ALU64_REG(BPF_ADD, R2, R5),
+			BPF_ALU64_REG(BPF_ADD, R2, R6),
+			BPF_ALU64_REG(BPF_ADD, R2, R7),
+			BPF_ALU64_REG(BPF_ADD, R2, R8),
+			BPF_ALU64_REG(BPF_ADD, R2, R9), /* R2 == 1358 */
+			BPF_JMP_IMM(BPF_JEQ, R2, 1358, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU64_REG(BPF_ADD, R3, R0),
+			BPF_ALU64_REG(BPF_ADD, R3, R1),
+			BPF_ALU64_REG(BPF_ADD, R3, R2),
+			BPF_ALU64_REG(BPF_ADD, R3, R3),
+			BPF_ALU64_REG(BPF_ADD, R3, R4),
+			BPF_ALU64_REG(BPF_ADD, R3, R5),
+			BPF_ALU64_REG(BPF_ADD, R3, R6),
+			BPF_ALU64_REG(BPF_ADD, R3, R7),
+			BPF_ALU64_REG(BPF_ADD, R3, R8),
+			BPF_ALU64_REG(BPF_ADD, R3, R9), /* R3 == 4063 */
+			BPF_JMP_IMM(BPF_JEQ, R3, 4063, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU64_REG(BPF_ADD, R4, R0),
+			BPF_ALU64_REG(BPF_ADD, R4, R1),
+			BPF_ALU64_REG(BPF_ADD, R4, R2),
+			BPF_ALU64_REG(BPF_ADD, R4, R3),
+			BPF_ALU64_REG(BPF_ADD, R4, R4),
+			BPF_ALU64_REG(BPF_ADD, R4, R5),
+			BPF_ALU64_REG(BPF_ADD, R4, R6),
+			BPF_ALU64_REG(BPF_ADD, R4, R7),
+			BPF_ALU64_REG(BPF_ADD, R4, R8),
+			BPF_ALU64_REG(BPF_ADD, R4, R9), /* R4 == 12177 */
+			BPF_JMP_IMM(BPF_JEQ, R4, 12177, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU64_REG(BPF_ADD, R5, R0),
+			BPF_ALU64_REG(BPF_ADD, R5, R1),
+			BPF_ALU64_REG(BPF_ADD, R5, R2),
+			BPF_ALU64_REG(BPF_ADD, R5, R3),
+			BPF_ALU64_REG(BPF_ADD, R5, R4),
+			BPF_ALU64_REG(BPF_ADD, R5, R5),
+			BPF_ALU64_REG(BPF_ADD, R5, R6),
+			BPF_ALU64_REG(BPF_ADD, R5, R7),
+			BPF_ALU64_REG(BPF_ADD, R5, R8),
+			BPF_ALU64_REG(BPF_ADD, R5, R9), /* R5 == 36518 */
+			BPF_JMP_IMM(BPF_JEQ, R5, 36518, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU64_REG(BPF_ADD, R6, R0),
+			BPF_ALU64_REG(BPF_ADD, R6, R1),
+			BPF_ALU64_REG(BPF_ADD, R6, R2),
+			BPF_ALU64_REG(BPF_ADD, R6, R3),
+			BPF_ALU64_REG(BPF_ADD, R6, R4),
+			BPF_ALU64_REG(BPF_ADD, R6, R5),
+			BPF_ALU64_REG(BPF_ADD, R6, R6),
+			BPF_ALU64_REG(BPF_ADD, R6, R7),
+			BPF_ALU64_REG(BPF_ADD, R6, R8),
+			BPF_ALU64_REG(BPF_ADD, R6, R9), /* R6 == 109540 */
+			BPF_JMP_IMM(BPF_JEQ, R6, 109540, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU64_REG(BPF_ADD, R7, R0),
+			BPF_ALU64_REG(BPF_ADD, R7, R1),
+			BPF_ALU64_REG(BPF_ADD, R7, R2),
+			BPF_ALU64_REG(BPF_ADD, R7, R3),
+			BPF_ALU64_REG(BPF_ADD, R7, R4),
+			BPF_ALU64_REG(BPF_ADD, R7, R5),
+			BPF_ALU64_REG(BPF_ADD, R7, R6),
+			BPF_ALU64_REG(BPF_ADD, R7, R7),
+			BPF_ALU64_REG(BPF_ADD, R7, R8),
+			BPF_ALU64_REG(BPF_ADD, R7, R9), /* R7 == 328605 */
+			BPF_JMP_IMM(BPF_JEQ, R7, 328605, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU64_REG(BPF_ADD, R8, R0),
+			BPF_ALU64_REG(BPF_ADD, R8, R1),
+			BPF_ALU64_REG(BPF_ADD, R8, R2),
+			BPF_ALU64_REG(BPF_ADD, R8, R3),
+			BPF_ALU64_REG(BPF_ADD, R8, R4),
+			BPF_ALU64_REG(BPF_ADD, R8, R5),
+			BPF_ALU64_REG(BPF_ADD, R8, R6),
+			BPF_ALU64_REG(BPF_ADD, R8, R7),
+			BPF_ALU64_REG(BPF_ADD, R8, R8),
+			BPF_ALU64_REG(BPF_ADD, R8, R9), /* R8 == 985799 */
+			BPF_JMP_IMM(BPF_JEQ, R8, 985799, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU64_REG(BPF_ADD, R9, R0),
+			BPF_ALU64_REG(BPF_ADD, R9, R1),
+			BPF_ALU64_REG(BPF_ADD, R9, R2),
+			BPF_ALU64_REG(BPF_ADD, R9, R3),
+			BPF_ALU64_REG(BPF_ADD, R9, R4),
+			BPF_ALU64_REG(BPF_ADD, R9, R5),
+			BPF_ALU64_REG(BPF_ADD, R9, R6),
+			BPF_ALU64_REG(BPF_ADD, R9, R7),
+			BPF_ALU64_REG(BPF_ADD, R9, R8),
+			BPF_ALU64_REG(BPF_ADD, R9, R9), /* R9 == 2957380 */
+			BPF_ALU64_REG(BPF_MOV, R0, R9),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2957380 } }
+	},
+	{
+		"INT: ADD 32-bit",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 20),
+			BPF_ALU32_IMM(BPF_MOV, R1, 1),
+			BPF_ALU32_IMM(BPF_MOV, R2, 2),
+			BPF_ALU32_IMM(BPF_MOV, R3, 3),
+			BPF_ALU32_IMM(BPF_MOV, R4, 4),
+			BPF_ALU32_IMM(BPF_MOV, R5, 5),
+			BPF_ALU32_IMM(BPF_MOV, R6, 6),
+			BPF_ALU32_IMM(BPF_MOV, R7, 7),
+			BPF_ALU32_IMM(BPF_MOV, R8, 8),
+			BPF_ALU32_IMM(BPF_MOV, R9, 9),
+			BPF_ALU64_IMM(BPF_ADD, R1, 10),
+			BPF_ALU64_IMM(BPF_ADD, R2, 10),
+			BPF_ALU64_IMM(BPF_ADD, R3, 10),
+			BPF_ALU64_IMM(BPF_ADD, R4, 10),
+			BPF_ALU64_IMM(BPF_ADD, R5, 10),
+			BPF_ALU64_IMM(BPF_ADD, R6, 10),
+			BPF_ALU64_IMM(BPF_ADD, R7, 10),
+			BPF_ALU64_IMM(BPF_ADD, R8, 10),
+			BPF_ALU64_IMM(BPF_ADD, R9, 10),
+			BPF_ALU32_REG(BPF_ADD, R0, R1),
+			BPF_ALU32_REG(BPF_ADD, R0, R2),
+			BPF_ALU32_REG(BPF_ADD, R0, R3),
+			BPF_ALU32_REG(BPF_ADD, R0, R4),
+			BPF_ALU32_REG(BPF_ADD, R0, R5),
+			BPF_ALU32_REG(BPF_ADD, R0, R6),
+			BPF_ALU32_REG(BPF_ADD, R0, R7),
+			BPF_ALU32_REG(BPF_ADD, R0, R8),
+			BPF_ALU32_REG(BPF_ADD, R0, R9), /* R0 == 155 */
+			BPF_JMP_IMM(BPF_JEQ, R0, 155, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_REG(BPF_ADD, R1, R0),
+			BPF_ALU32_REG(BPF_ADD, R1, R1),
+			BPF_ALU32_REG(BPF_ADD, R1, R2),
+			BPF_ALU32_REG(BPF_ADD, R1, R3),
+			BPF_ALU32_REG(BPF_ADD, R1, R4),
+			BPF_ALU32_REG(BPF_ADD, R1, R5),
+			BPF_ALU32_REG(BPF_ADD, R1, R6),
+			BPF_ALU32_REG(BPF_ADD, R1, R7),
+			BPF_ALU32_REG(BPF_ADD, R1, R8),
+			BPF_ALU32_REG(BPF_ADD, R1, R9), /* R1 == 456 */
+			BPF_JMP_IMM(BPF_JEQ, R1, 456, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_REG(BPF_ADD, R2, R0),
+			BPF_ALU32_REG(BPF_ADD, R2, R1),
+			BPF_ALU32_REG(BPF_ADD, R2, R2),
+			BPF_ALU32_REG(BPF_ADD, R2, R3),
+			BPF_ALU32_REG(BPF_ADD, R2, R4),
+			BPF_ALU32_REG(BPF_ADD, R2, R5),
+			BPF_ALU32_REG(BPF_ADD, R2, R6),
+			BPF_ALU32_REG(BPF_ADD, R2, R7),
+			BPF_ALU32_REG(BPF_ADD, R2, R8),
+			BPF_ALU32_REG(BPF_ADD, R2, R9), /* R2 == 1358 */
+			BPF_JMP_IMM(BPF_JEQ, R2, 1358, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_REG(BPF_ADD, R3, R0),
+			BPF_ALU32_REG(BPF_ADD, R3, R1),
+			BPF_ALU32_REG(BPF_ADD, R3, R2),
+			BPF_ALU32_REG(BPF_ADD, R3, R3),
+			BPF_ALU32_REG(BPF_ADD, R3, R4),
+			BPF_ALU32_REG(BPF_ADD, R3, R5),
+			BPF_ALU32_REG(BPF_ADD, R3, R6),
+			BPF_ALU32_REG(BPF_ADD, R3, R7),
+			BPF_ALU32_REG(BPF_ADD, R3, R8),
+			BPF_ALU32_REG(BPF_ADD, R3, R9), /* R3 == 4063 */
+			BPF_JMP_IMM(BPF_JEQ, R3, 4063, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_REG(BPF_ADD, R4, R0),
+			BPF_ALU32_REG(BPF_ADD, R4, R1),
+			BPF_ALU32_REG(BPF_ADD, R4, R2),
+			BPF_ALU32_REG(BPF_ADD, R4, R3),
+			BPF_ALU32_REG(BPF_ADD, R4, R4),
+			BPF_ALU32_REG(BPF_ADD, R4, R5),
+			BPF_ALU32_REG(BPF_ADD, R4, R6),
+			BPF_ALU32_REG(BPF_ADD, R4, R7),
+			BPF_ALU32_REG(BPF_ADD, R4, R8),
+			BPF_ALU32_REG(BPF_ADD, R4, R9), /* R4 == 12177 */
+			BPF_JMP_IMM(BPF_JEQ, R4, 12177, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_REG(BPF_ADD, R5, R0),
+			BPF_ALU32_REG(BPF_ADD, R5, R1),
+			BPF_ALU32_REG(BPF_ADD, R5, R2),
+			BPF_ALU32_REG(BPF_ADD, R5, R3),
+			BPF_ALU32_REG(BPF_ADD, R5, R4),
+			BPF_ALU32_REG(BPF_ADD, R5, R5),
+			BPF_ALU32_REG(BPF_ADD, R5, R6),
+			BPF_ALU32_REG(BPF_ADD, R5, R7),
+			BPF_ALU32_REG(BPF_ADD, R5, R8),
+			BPF_ALU32_REG(BPF_ADD, R5, R9), /* R5 == 36518 */
+			BPF_JMP_IMM(BPF_JEQ, R5, 36518, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_REG(BPF_ADD, R6, R0),
+			BPF_ALU32_REG(BPF_ADD, R6, R1),
+			BPF_ALU32_REG(BPF_ADD, R6, R2),
+			BPF_ALU32_REG(BPF_ADD, R6, R3),
+			BPF_ALU32_REG(BPF_ADD, R6, R4),
+			BPF_ALU32_REG(BPF_ADD, R6, R5),
+			BPF_ALU32_REG(BPF_ADD, R6, R6),
+			BPF_ALU32_REG(BPF_ADD, R6, R7),
+			BPF_ALU32_REG(BPF_ADD, R6, R8),
+			BPF_ALU32_REG(BPF_ADD, R6, R9), /* R6 == 109540 */
+			BPF_JMP_IMM(BPF_JEQ, R6, 109540, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_REG(BPF_ADD, R7, R0),
+			BPF_ALU32_REG(BPF_ADD, R7, R1),
+			BPF_ALU32_REG(BPF_ADD, R7, R2),
+			BPF_ALU32_REG(BPF_ADD, R7, R3),
+			BPF_ALU32_REG(BPF_ADD, R7, R4),
+			BPF_ALU32_REG(BPF_ADD, R7, R5),
+			BPF_ALU32_REG(BPF_ADD, R7, R6),
+			BPF_ALU32_REG(BPF_ADD, R7, R7),
+			BPF_ALU32_REG(BPF_ADD, R7, R8),
+			BPF_ALU32_REG(BPF_ADD, R7, R9), /* R7 == 328605 */
+			BPF_JMP_IMM(BPF_JEQ, R7, 328605, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_REG(BPF_ADD, R8, R0),
+			BPF_ALU32_REG(BPF_ADD, R8, R1),
+			BPF_ALU32_REG(BPF_ADD, R8, R2),
+			BPF_ALU32_REG(BPF_ADD, R8, R3),
+			BPF_ALU32_REG(BPF_ADD, R8, R4),
+			BPF_ALU32_REG(BPF_ADD, R8, R5),
+			BPF_ALU32_REG(BPF_ADD, R8, R6),
+			BPF_ALU32_REG(BPF_ADD, R8, R7),
+			BPF_ALU32_REG(BPF_ADD, R8, R8),
+			BPF_ALU32_REG(BPF_ADD, R8, R9), /* R8 == 985799 */
+			BPF_JMP_IMM(BPF_JEQ, R8, 985799, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_REG(BPF_ADD, R9, R0),
+			BPF_ALU32_REG(BPF_ADD, R9, R1),
+			BPF_ALU32_REG(BPF_ADD, R9, R2),
+			BPF_ALU32_REG(BPF_ADD, R9, R3),
+			BPF_ALU32_REG(BPF_ADD, R9, R4),
+			BPF_ALU32_REG(BPF_ADD, R9, R5),
+			BPF_ALU32_REG(BPF_ADD, R9, R6),
+			BPF_ALU32_REG(BPF_ADD, R9, R7),
+			BPF_ALU32_REG(BPF_ADD, R9, R8),
+			BPF_ALU32_REG(BPF_ADD, R9, R9), /* R9 == 2957380 */
+			BPF_ALU32_REG(BPF_MOV, R0, R9),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2957380 } }
+	},
+	{	/* Mainly checking JIT here. */
+		"INT: SUB",
+		.u.insns_int = {
+			BPF_ALU64_IMM(BPF_MOV, R0, 0),
+			BPF_ALU64_IMM(BPF_MOV, R1, 1),
+			BPF_ALU64_IMM(BPF_MOV, R2, 2),
+			BPF_ALU64_IMM(BPF_MOV, R3, 3),
+			BPF_ALU64_IMM(BPF_MOV, R4, 4),
+			BPF_ALU64_IMM(BPF_MOV, R5, 5),
+			BPF_ALU64_IMM(BPF_MOV, R6, 6),
+			BPF_ALU64_IMM(BPF_MOV, R7, 7),
+			BPF_ALU64_IMM(BPF_MOV, R8, 8),
+			BPF_ALU64_IMM(BPF_MOV, R9, 9),
+			BPF_ALU64_REG(BPF_SUB, R0, R0),
+			BPF_ALU64_REG(BPF_SUB, R0, R1),
+			BPF_ALU64_REG(BPF_SUB, R0, R2),
+			BPF_ALU64_REG(BPF_SUB, R0, R3),
+			BPF_ALU64_REG(BPF_SUB, R0, R4),
+			BPF_ALU64_REG(BPF_SUB, R0, R5),
+			BPF_ALU64_REG(BPF_SUB, R0, R6),
+			BPF_ALU64_REG(BPF_SUB, R0, R7),
+			BPF_ALU64_REG(BPF_SUB, R0, R8),
+			BPF_ALU64_REG(BPF_SUB, R0, R9),
+			BPF_ALU64_IMM(BPF_SUB, R0, 10),
+			BPF_JMP_IMM(BPF_JEQ, R0, -55, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU64_REG(BPF_SUB, R1, R0),
+			BPF_ALU64_REG(BPF_SUB, R1, R2),
+			BPF_ALU64_REG(BPF_SUB, R1, R3),
+			BPF_ALU64_REG(BPF_SUB, R1, R4),
+			BPF_ALU64_REG(BPF_SUB, R1, R5),
+			BPF_ALU64_REG(BPF_SUB, R1, R6),
+			BPF_ALU64_REG(BPF_SUB, R1, R7),
+			BPF_ALU64_REG(BPF_SUB, R1, R8),
+			BPF_ALU64_REG(BPF_SUB, R1, R9),
+			BPF_ALU64_IMM(BPF_SUB, R1, 10),
+			BPF_ALU64_REG(BPF_SUB, R2, R0),
+			BPF_ALU64_REG(BPF_SUB, R2, R1),
+			BPF_ALU64_REG(BPF_SUB, R2, R3),
+			BPF_ALU64_REG(BPF_SUB, R2, R4),
+			BPF_ALU64_REG(BPF_SUB, R2, R5),
+			BPF_ALU64_REG(BPF_SUB, R2, R6),
+			BPF_ALU64_REG(BPF_SUB, R2, R7),
+			BPF_ALU64_REG(BPF_SUB, R2, R8),
+			BPF_ALU64_REG(BPF_SUB, R2, R9),
+			BPF_ALU64_IMM(BPF_SUB, R2, 10),
+			BPF_ALU64_REG(BPF_SUB, R3, R0),
+			BPF_ALU64_REG(BPF_SUB, R3, R1),
+			BPF_ALU64_REG(BPF_SUB, R3, R2),
+			BPF_ALU64_REG(BPF_SUB, R3, R4),
+			BPF_ALU64_REG(BPF_SUB, R3, R5),
+			BPF_ALU64_REG(BPF_SUB, R3, R6),
+			BPF_ALU64_REG(BPF_SUB, R3, R7),
+			BPF_ALU64_REG(BPF_SUB, R3, R8),
+			BPF_ALU64_REG(BPF_SUB, R3, R9),
+			BPF_ALU64_IMM(BPF_SUB, R3, 10),
+			BPF_ALU64_REG(BPF_SUB, R4, R0),
+			BPF_ALU64_REG(BPF_SUB, R4, R1),
+			BPF_ALU64_REG(BPF_SUB, R4, R2),
+			BPF_ALU64_REG(BPF_SUB, R4, R3),
+			BPF_ALU64_REG(BPF_SUB, R4, R5),
+			BPF_ALU64_REG(BPF_SUB, R4, R6),
+			BPF_ALU64_REG(BPF_SUB, R4, R7),
+			BPF_ALU64_REG(BPF_SUB, R4, R8),
+			BPF_ALU64_REG(BPF_SUB, R4, R9),
+			BPF_ALU64_IMM(BPF_SUB, R4, 10),
+			BPF_ALU64_REG(BPF_SUB, R5, R0),
+			BPF_ALU64_REG(BPF_SUB, R5, R1),
+			BPF_ALU64_REG(BPF_SUB, R5, R2),
+			BPF_ALU64_REG(BPF_SUB, R5, R3),
+			BPF_ALU64_REG(BPF_SUB, R5, R4),
+			BPF_ALU64_REG(BPF_SUB, R5, R6),
+			BPF_ALU64_REG(BPF_SUB, R5, R7),
+			BPF_ALU64_REG(BPF_SUB, R5, R8),
+			BPF_ALU64_REG(BPF_SUB, R5, R9),
+			BPF_ALU64_IMM(BPF_SUB, R5, 10),
+			BPF_ALU64_REG(BPF_SUB, R6, R0),
+			BPF_ALU64_REG(BPF_SUB, R6, R1),
+			BPF_ALU64_REG(BPF_SUB, R6, R2),
+			BPF_ALU64_REG(BPF_SUB, R6, R3),
+			BPF_ALU64_REG(BPF_SUB, R6, R4),
+			BPF_ALU64_REG(BPF_SUB, R6, R5),
+			BPF_ALU64_REG(BPF_SUB, R6, R7),
+			BPF_ALU64_REG(BPF_SUB, R6, R8),
+			BPF_ALU64_REG(BPF_SUB, R6, R9),
+			BPF_ALU64_IMM(BPF_SUB, R6, 10),
+			BPF_ALU64_REG(BPF_SUB, R7, R0),
+			BPF_ALU64_REG(BPF_SUB, R7, R1),
+			BPF_ALU64_REG(BPF_SUB, R7, R2),
+			BPF_ALU64_REG(BPF_SUB, R7, R3),
+			BPF_ALU64_REG(BPF_SUB, R7, R4),
+			BPF_ALU64_REG(BPF_SUB, R7, R5),
+			BPF_ALU64_REG(BPF_SUB, R7, R6),
+			BPF_ALU64_REG(BPF_SUB, R7, R8),
+			BPF_ALU64_REG(BPF_SUB, R7, R9),
+			BPF_ALU64_IMM(BPF_SUB, R7, 10),
+			BPF_ALU64_REG(BPF_SUB, R8, R0),
+			BPF_ALU64_REG(BPF_SUB, R8, R1),
+			BPF_ALU64_REG(BPF_SUB, R8, R2),
+			BPF_ALU64_REG(BPF_SUB, R8, R3),
+			BPF_ALU64_REG(BPF_SUB, R8, R4),
+			BPF_ALU64_REG(BPF_SUB, R8, R5),
+			BPF_ALU64_REG(BPF_SUB, R8, R6),
+			BPF_ALU64_REG(BPF_SUB, R8, R7),
+			BPF_ALU64_REG(BPF_SUB, R8, R9),
+			BPF_ALU64_IMM(BPF_SUB, R8, 10),
+			BPF_ALU64_REG(BPF_SUB, R9, R0),
+			BPF_ALU64_REG(BPF_SUB, R9, R1),
+			BPF_ALU64_REG(BPF_SUB, R9, R2),
+			BPF_ALU64_REG(BPF_SUB, R9, R3),
+			BPF_ALU64_REG(BPF_SUB, R9, R4),
+			BPF_ALU64_REG(BPF_SUB, R9, R5),
+			BPF_ALU64_REG(BPF_SUB, R9, R6),
+			BPF_ALU64_REG(BPF_SUB, R9, R7),
+			BPF_ALU64_REG(BPF_SUB, R9, R8),
+			BPF_ALU64_IMM(BPF_SUB, R9, 10),
+			BPF_ALU64_IMM(BPF_SUB, R0, 10),
+			BPF_ALU64_IMM(BPF_NEG, R0, 0),
+			BPF_ALU64_REG(BPF_SUB, R0, R1),
+			BPF_ALU64_REG(BPF_SUB, R0, R2),
+			BPF_ALU64_REG(BPF_SUB, R0, R3),
+			BPF_ALU64_REG(BPF_SUB, R0, R4),
+			BPF_ALU64_REG(BPF_SUB, R0, R5),
+			BPF_ALU64_REG(BPF_SUB, R0, R6),
+			BPF_ALU64_REG(BPF_SUB, R0, R7),
+			BPF_ALU64_REG(BPF_SUB, R0, R8),
+			BPF_ALU64_REG(BPF_SUB, R0, R9),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 11 } }
+	},
+	{	/* Mainly checking JIT here. */
+		"INT: XOR",
+		.u.insns_int = {
+			BPF_ALU64_REG(BPF_SUB, R0, R0),
+			BPF_ALU64_REG(BPF_XOR, R1, R1),
+			BPF_JMP_REG(BPF_JEQ, R0, R1, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU64_IMM(BPF_MOV, R0, 10),
+			BPF_ALU64_IMM(BPF_MOV, R1, -1),
+			BPF_ALU64_REG(BPF_SUB, R1, R1),
+			BPF_ALU64_REG(BPF_XOR, R2, R2),
+			BPF_JMP_REG(BPF_JEQ, R1, R2, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU64_REG(BPF_SUB, R2, R2),
+			BPF_ALU64_REG(BPF_XOR, R3, R3),
+			BPF_ALU64_IMM(BPF_MOV, R0, 10),
+			BPF_ALU64_IMM(BPF_MOV, R1, -1),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU64_REG(BPF_SUB, R3, R3),
+			BPF_ALU64_REG(BPF_XOR, R4, R4),
+			BPF_ALU64_IMM(BPF_MOV, R2, 1),
+			BPF_ALU64_IMM(BPF_MOV, R5, -1),
+			BPF_JMP_REG(BPF_JEQ, R3, R4, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU64_REG(BPF_SUB, R4, R4),
+			BPF_ALU64_REG(BPF_XOR, R5, R5),
+			BPF_ALU64_IMM(BPF_MOV, R3, 1),
+			BPF_ALU64_IMM(BPF_MOV, R7, -1),
+			BPF_JMP_REG(BPF_JEQ, R5, R4, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU64_IMM(BPF_MOV, R5, 1),
+			BPF_ALU64_REG(BPF_SUB, R5, R5),
+			BPF_ALU64_REG(BPF_XOR, R6, R6),
+			BPF_ALU64_IMM(BPF_MOV, R1, 1),
+			BPF_ALU64_IMM(BPF_MOV, R8, -1),
+			BPF_JMP_REG(BPF_JEQ, R5, R6, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU64_REG(BPF_SUB, R6, R6),
+			BPF_ALU64_REG(BPF_XOR, R7, R7),
+			BPF_JMP_REG(BPF_JEQ, R7, R6, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU64_REG(BPF_SUB, R7, R7),
+			BPF_ALU64_REG(BPF_XOR, R8, R8),
+			BPF_JMP_REG(BPF_JEQ, R7, R8, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU64_REG(BPF_SUB, R8, R8),
+			BPF_ALU64_REG(BPF_XOR, R9, R9),
+			BPF_JMP_REG(BPF_JEQ, R9, R8, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU64_REG(BPF_SUB, R9, R9),
+			BPF_ALU64_REG(BPF_XOR, R0, R0),
+			BPF_JMP_REG(BPF_JEQ, R9, R0, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU64_REG(BPF_SUB, R1, R1),
+			BPF_ALU64_REG(BPF_XOR, R0, R0),
+			BPF_JMP_REG(BPF_JEQ, R9, R0, 2),
+			BPF_ALU64_IMM(BPF_MOV, R0, 0),
+			BPF_EXIT_INSN(),
+			BPF_ALU64_IMM(BPF_MOV, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } }
+	},
+	{	/* Mainly checking JIT here. */
+		"INT: MUL",
+		.u.insns_int = {
+			BPF_ALU64_IMM(BPF_MOV, R0, 11),
+			BPF_ALU64_IMM(BPF_MOV, R1, 1),
+			BPF_ALU64_IMM(BPF_MOV, R2, 2),
+			BPF_ALU64_IMM(BPF_MOV, R3, 3),
+			BPF_ALU64_IMM(BPF_MOV, R4, 4),
+			BPF_ALU64_IMM(BPF_MOV, R5, 5),
+			BPF_ALU64_IMM(BPF_MOV, R6, 6),
+			BPF_ALU64_IMM(BPF_MOV, R7, 7),
+			BPF_ALU64_IMM(BPF_MOV, R8, 8),
+			BPF_ALU64_IMM(BPF_MOV, R9, 9),
+			BPF_ALU64_REG(BPF_MUL, R0, R0),
+			BPF_ALU64_REG(BPF_MUL, R0, R1),
+			BPF_ALU64_REG(BPF_MUL, R0, R2),
+			BPF_ALU64_REG(BPF_MUL, R0, R3),
+			BPF_ALU64_REG(BPF_MUL, R0, R4),
+			BPF_ALU64_REG(BPF_MUL, R0, R5),
+			BPF_ALU64_REG(BPF_MUL, R0, R6),
+			BPF_ALU64_REG(BPF_MUL, R0, R7),
+			BPF_ALU64_REG(BPF_MUL, R0, R8),
+			BPF_ALU64_REG(BPF_MUL, R0, R9),
+			BPF_ALU64_IMM(BPF_MUL, R0, 10),
+			BPF_JMP_IMM(BPF_JEQ, R0, 439084800, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU64_REG(BPF_MUL, R1, R0),
+			BPF_ALU64_REG(BPF_MUL, R1, R2),
+			BPF_ALU64_REG(BPF_MUL, R1, R3),
+			BPF_ALU64_REG(BPF_MUL, R1, R4),
+			BPF_ALU64_REG(BPF_MUL, R1, R5),
+			BPF_ALU64_REG(BPF_MUL, R1, R6),
+			BPF_ALU64_REG(BPF_MUL, R1, R7),
+			BPF_ALU64_REG(BPF_MUL, R1, R8),
+			BPF_ALU64_REG(BPF_MUL, R1, R9),
+			BPF_ALU64_IMM(BPF_MUL, R1, 10),
+			BPF_ALU64_REG(BPF_MOV, R2, R1),
+			BPF_ALU64_IMM(BPF_RSH, R2, 32),
+			BPF_JMP_IMM(BPF_JEQ, R2, 0x5a924, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU64_IMM(BPF_LSH, R1, 32),
+			BPF_ALU64_IMM(BPF_ARSH, R1, 32),
+			BPF_JMP_IMM(BPF_JEQ, R1, 0xebb90000, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU64_REG(BPF_MUL, R2, R0),
+			BPF_ALU64_REG(BPF_MUL, R2, R1),
+			BPF_ALU64_REG(BPF_MUL, R2, R3),
+			BPF_ALU64_REG(BPF_MUL, R2, R4),
+			BPF_ALU64_REG(BPF_MUL, R2, R5),
+			BPF_ALU64_REG(BPF_MUL, R2, R6),
+			BPF_ALU64_REG(BPF_MUL, R2, R7),
+			BPF_ALU64_REG(BPF_MUL, R2, R8),
+			BPF_ALU64_REG(BPF_MUL, R2, R9),
+			BPF_ALU64_IMM(BPF_MUL, R2, 10),
+			BPF_ALU64_IMM(BPF_RSH, R2, 32),
+			BPF_ALU64_REG(BPF_MOV, R0, R2),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x35d97ef2 } }
+	},
+	{	/* Mainly checking JIT here. */
+		"MOV REG64",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0xffffffffffffffffLL),
+			BPF_MOV64_REG(R1, R0),
+			BPF_MOV64_REG(R2, R1),
+			BPF_MOV64_REG(R3, R2),
+			BPF_MOV64_REG(R4, R3),
+			BPF_MOV64_REG(R5, R4),
+			BPF_MOV64_REG(R6, R5),
+			BPF_MOV64_REG(R7, R6),
+			BPF_MOV64_REG(R8, R7),
+			BPF_MOV64_REG(R9, R8),
+			BPF_ALU64_IMM(BPF_MOV, R0, 0),
+			BPF_ALU64_IMM(BPF_MOV, R1, 0),
+			BPF_ALU64_IMM(BPF_MOV, R2, 0),
+			BPF_ALU64_IMM(BPF_MOV, R3, 0),
+			BPF_ALU64_IMM(BPF_MOV, R4, 0),
+			BPF_ALU64_IMM(BPF_MOV, R5, 0),
+			BPF_ALU64_IMM(BPF_MOV, R6, 0),
+			BPF_ALU64_IMM(BPF_MOV, R7, 0),
+			BPF_ALU64_IMM(BPF_MOV, R8, 0),
+			BPF_ALU64_IMM(BPF_MOV, R9, 0),
+			BPF_ALU64_REG(BPF_ADD, R0, R0),
+			BPF_ALU64_REG(BPF_ADD, R0, R1),
+			BPF_ALU64_REG(BPF_ADD, R0, R2),
+			BPF_ALU64_REG(BPF_ADD, R0, R3),
+			BPF_ALU64_REG(BPF_ADD, R0, R4),
+			BPF_ALU64_REG(BPF_ADD, R0, R5),
+			BPF_ALU64_REG(BPF_ADD, R0, R6),
+			BPF_ALU64_REG(BPF_ADD, R0, R7),
+			BPF_ALU64_REG(BPF_ADD, R0, R8),
+			BPF_ALU64_REG(BPF_ADD, R0, R9),
+			BPF_ALU64_IMM(BPF_ADD, R0, 0xfefe),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xfefe } }
+	},
+	{	/* Mainly checking JIT here. */
+		"MOV REG32",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0xffffffffffffffffLL),
+			BPF_MOV64_REG(R1, R0),
+			BPF_MOV64_REG(R2, R1),
+			BPF_MOV64_REG(R3, R2),
+			BPF_MOV64_REG(R4, R3),
+			BPF_MOV64_REG(R5, R4),
+			BPF_MOV64_REG(R6, R5),
+			BPF_MOV64_REG(R7, R6),
+			BPF_MOV64_REG(R8, R7),
+			BPF_MOV64_REG(R9, R8),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_ALU32_IMM(BPF_MOV, R1, 0),
+			BPF_ALU32_IMM(BPF_MOV, R2, 0),
+			BPF_ALU32_IMM(BPF_MOV, R3, 0),
+			BPF_ALU32_IMM(BPF_MOV, R4, 0),
+			BPF_ALU32_IMM(BPF_MOV, R5, 0),
+			BPF_ALU32_IMM(BPF_MOV, R6, 0),
+			BPF_ALU32_IMM(BPF_MOV, R7, 0),
+			BPF_ALU32_IMM(BPF_MOV, R8, 0),
+			BPF_ALU32_IMM(BPF_MOV, R9, 0),
+			BPF_ALU64_REG(BPF_ADD, R0, R0),
+			BPF_ALU64_REG(BPF_ADD, R0, R1),
+			BPF_ALU64_REG(BPF_ADD, R0, R2),
+			BPF_ALU64_REG(BPF_ADD, R0, R3),
+			BPF_ALU64_REG(BPF_ADD, R0, R4),
+			BPF_ALU64_REG(BPF_ADD, R0, R5),
+			BPF_ALU64_REG(BPF_ADD, R0, R6),
+			BPF_ALU64_REG(BPF_ADD, R0, R7),
+			BPF_ALU64_REG(BPF_ADD, R0, R8),
+			BPF_ALU64_REG(BPF_ADD, R0, R9),
+			BPF_ALU64_IMM(BPF_ADD, R0, 0xfefe),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xfefe } }
+	},
+	{	/* Mainly checking JIT here. */
+		"LD IMM64",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0xffffffffffffffffLL),
+			BPF_MOV64_REG(R1, R0),
+			BPF_MOV64_REG(R2, R1),
+			BPF_MOV64_REG(R3, R2),
+			BPF_MOV64_REG(R4, R3),
+			BPF_MOV64_REG(R5, R4),
+			BPF_MOV64_REG(R6, R5),
+			BPF_MOV64_REG(R7, R6),
+			BPF_MOV64_REG(R8, R7),
+			BPF_MOV64_REG(R9, R8),
+			BPF_LD_IMM64(R0, 0x0LL),
+			BPF_LD_IMM64(R1, 0x0LL),
+			BPF_LD_IMM64(R2, 0x0LL),
+			BPF_LD_IMM64(R3, 0x0LL),
+			BPF_LD_IMM64(R4, 0x0LL),
+			BPF_LD_IMM64(R5, 0x0LL),
+			BPF_LD_IMM64(R6, 0x0LL),
+			BPF_LD_IMM64(R7, 0x0LL),
+			BPF_LD_IMM64(R8, 0x0LL),
+			BPF_LD_IMM64(R9, 0x0LL),
+			BPF_ALU64_REG(BPF_ADD, R0, R0),
+			BPF_ALU64_REG(BPF_ADD, R0, R1),
+			BPF_ALU64_REG(BPF_ADD, R0, R2),
+			BPF_ALU64_REG(BPF_ADD, R0, R3),
+			BPF_ALU64_REG(BPF_ADD, R0, R4),
+			BPF_ALU64_REG(BPF_ADD, R0, R5),
+			BPF_ALU64_REG(BPF_ADD, R0, R6),
+			BPF_ALU64_REG(BPF_ADD, R0, R7),
+			BPF_ALU64_REG(BPF_ADD, R0, R8),
+			BPF_ALU64_REG(BPF_ADD, R0, R9),
+			BPF_ALU64_IMM(BPF_ADD, R0, 0xfefe),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xfefe } }
+	},
+	{
+		"INT: ALU MIX",
+		.u.insns_int = {
+			BPF_ALU64_IMM(BPF_MOV, R0, 11),
+			BPF_ALU64_IMM(BPF_ADD, R0, -1),
+			BPF_ALU64_IMM(BPF_MOV, R2, 2),
+			BPF_ALU64_IMM(BPF_XOR, R2, 3),
+			BPF_ALU64_REG(BPF_DIV, R0, R2),
+			BPF_JMP_IMM(BPF_JEQ, R0, 10, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU64_IMM(BPF_MOD, R0, 3),
+			BPF_JMP_IMM(BPF_JEQ, R0, 1, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU64_IMM(BPF_MOV, R0, -1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, -1 } }
+	},
+	{
+		"INT: shifts by register",
+		.u.insns_int = {
+			BPF_MOV64_IMM(R0, -1234),
+			BPF_MOV64_IMM(R1, 1),
+			BPF_ALU32_REG(BPF_RSH, R0, R1),
+			BPF_JMP_IMM(BPF_JEQ, R0, 0x7ffffd97, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_IMM(R2, 1),
+			BPF_ALU64_REG(BPF_LSH, R0, R2),
+			BPF_MOV32_IMM(R4, -1234),
+			BPF_JMP_REG(BPF_JEQ, R0, R4, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU64_IMM(BPF_AND, R4, 63),
+			BPF_ALU64_REG(BPF_LSH, R0, R4), /* R0 <= 46 */
+			BPF_MOV64_IMM(R3, 47),
+			BPF_ALU64_REG(BPF_ARSH, R0, R3),
+			BPF_JMP_IMM(BPF_JEQ, R0, -617, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_IMM(R2, 1),
+			BPF_ALU64_REG(BPF_LSH, R4, R2), /* R4 = 46 << 1 */
+			BPF_JMP_IMM(BPF_JEQ, R4, 92, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_IMM(R4, 4),
+			BPF_ALU64_REG(BPF_LSH, R4, R4), /* R4 = 4 << 4 */
+			BPF_JMP_IMM(BPF_JEQ, R4, 64, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_IMM(R4, 5),
+			BPF_ALU32_REG(BPF_LSH, R4, R4), /* R4 = 5 << 5 */
+			BPF_JMP_IMM(BPF_JEQ, R4, 160, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_IMM(R0, -1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, -1 } }
+	},
+#ifdef CONFIG_32BIT
+	{
+		"INT: 32-bit context pointer word order and zero-extension",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_JMP32_IMM(BPF_JEQ, R1, 0, 3),
+			BPF_ALU64_IMM(BPF_RSH, R1, 32),
+			BPF_JMP32_IMM(BPF_JNE, R1, 0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } }
+	},
+#endif
+	{
+		"check: missing ret",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_IMM, 1),
+		},
+		CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+		{ },
+		{ },
+		.fill_helper = NULL,
+		.expected_errcode = -EINVAL,
+	},
+	{
+		"check: div_k_0",
+		.u.insns = {
+			BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0),
+			BPF_STMT(BPF_RET | BPF_K, 0)
+		},
+		CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+		{ },
+		{ },
+		.fill_helper = NULL,
+		.expected_errcode = -EINVAL,
+	},
+	{
+		"check: unknown insn",
+		.u.insns = {
+			/* seccomp insn, rejected in socket filter */
+			BPF_STMT(BPF_LDX | BPF_W | BPF_ABS, 0),
+			BPF_STMT(BPF_RET | BPF_K, 0)
+		},
+		CLASSIC | FLAG_EXPECTED_FAIL,
+		{ },
+		{ },
+		.fill_helper = NULL,
+		.expected_errcode = -EINVAL,
+	},
+	{
+		"check: out of range spill/fill",
+		.u.insns = {
+			BPF_STMT(BPF_STX, 16),
+			BPF_STMT(BPF_RET | BPF_K, 0)
+		},
+		CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+		{ },
+		{ },
+		.fill_helper = NULL,
+		.expected_errcode = -EINVAL,
+	},
+	{
+		"JUMPS + HOLES",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 15),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90c2894d, 3, 4),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90c2894d, 1, 2),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_JUMP(BPF_JMP | BPF_JGE, 0, 14, 15),
+			BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 14),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_JUMP(BPF_JMP | BPF_JEQ, 0x2ac28349, 2, 3),
+			BPF_JUMP(BPF_JMP | BPF_JEQ, 0x2ac28349, 1, 2),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_JUMP(BPF_JMP | BPF_JGE, 0, 14, 15),
+			BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 14),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90d2ff41, 2, 3),
+			BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90d2ff41, 1, 2),
+			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+			BPF_STMT(BPF_RET | BPF_A, 0),
+			BPF_STMT(BPF_RET | BPF_A, 0),
+		},
+		CLASSIC,
+		{ 0x00, 0x1b, 0x21, 0x3c, 0x9d, 0xf8,
+		  0x90, 0xe2, 0xba, 0x0a, 0x56, 0xb4,
+		  0x08, 0x00,
+		  0x45, 0x00, 0x00, 0x28, 0x00, 0x00,
+		  0x20, 0x00, 0x40, 0x11, 0x00, 0x00, /* IP header */
+		  0xc0, 0xa8, 0x33, 0x01,
+		  0xc0, 0xa8, 0x33, 0x02,
+		  0xbb, 0xb6,
+		  0xa9, 0xfa,
+		  0x00, 0x14, 0x00, 0x00,
+		  0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
+		  0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
+		  0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
+		  0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
+		  0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
+		  0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
+		  0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
+		  0xcc, 0xcc, 0xcc, 0xcc },
+		{ { 88, 0x001b } }
+	},
+	{
+		"check: RET X",
+		.u.insns = {
+			BPF_STMT(BPF_RET | BPF_X, 0),
+		},
+		CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+		{ },
+		{ },
+		.fill_helper = NULL,
+		.expected_errcode = -EINVAL,
+	},
+	{
+		"check: LDX + RET X",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 42),
+			BPF_STMT(BPF_RET | BPF_X, 0),
+		},
+		CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+		{ },
+		{ },
+		.fill_helper = NULL,
+		.expected_errcode = -EINVAL,
+	},
+	{	/* Mainly checking JIT here. */
+		"M[]: alt STX + LDX",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 100),
+			BPF_STMT(BPF_STX, 0),
+			BPF_STMT(BPF_LDX | BPF_MEM, 0),
+			BPF_STMT(BPF_MISC | BPF_TXA, 0),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+			BPF_STMT(BPF_MISC | BPF_TAX, 0),
+			BPF_STMT(BPF_STX, 1),
+			BPF_STMT(BPF_LDX | BPF_MEM, 1),
+			BPF_STMT(BPF_MISC | BPF_TXA, 0),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+			BPF_STMT(BPF_MISC | BPF_TAX, 0),
+			BPF_STMT(BPF_STX, 2),
+			BPF_STMT(BPF_LDX | BPF_MEM, 2),
+			BPF_STMT(BPF_MISC | BPF_TXA, 0),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+			BPF_STMT(BPF_MISC | BPF_TAX, 0),
+			BPF_STMT(BPF_STX, 3),
+			BPF_STMT(BPF_LDX | BPF_MEM, 3),
+			BPF_STMT(BPF_MISC | BPF_TXA, 0),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+			BPF_STMT(BPF_MISC | BPF_TAX, 0),
+			BPF_STMT(BPF_STX, 4),
+			BPF_STMT(BPF_LDX | BPF_MEM, 4),
+			BPF_STMT(BPF_MISC | BPF_TXA, 0),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+			BPF_STMT(BPF_MISC | BPF_TAX, 0),
+			BPF_STMT(BPF_STX, 5),
+			BPF_STMT(BPF_LDX | BPF_MEM, 5),
+			BPF_STMT(BPF_MISC | BPF_TXA, 0),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+			BPF_STMT(BPF_MISC | BPF_TAX, 0),
+			BPF_STMT(BPF_STX, 6),
+			BPF_STMT(BPF_LDX | BPF_MEM, 6),
+			BPF_STMT(BPF_MISC | BPF_TXA, 0),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+			BPF_STMT(BPF_MISC | BPF_TAX, 0),
+			BPF_STMT(BPF_STX, 7),
+			BPF_STMT(BPF_LDX | BPF_MEM, 7),
+			BPF_STMT(BPF_MISC | BPF_TXA, 0),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+			BPF_STMT(BPF_MISC | BPF_TAX, 0),
+			BPF_STMT(BPF_STX, 8),
+			BPF_STMT(BPF_LDX | BPF_MEM, 8),
+			BPF_STMT(BPF_MISC | BPF_TXA, 0),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+			BPF_STMT(BPF_MISC | BPF_TAX, 0),
+			BPF_STMT(BPF_STX, 9),
+			BPF_STMT(BPF_LDX | BPF_MEM, 9),
+			BPF_STMT(BPF_MISC | BPF_TXA, 0),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+			BPF_STMT(BPF_MISC | BPF_TAX, 0),
+			BPF_STMT(BPF_STX, 10),
+			BPF_STMT(BPF_LDX | BPF_MEM, 10),
+			BPF_STMT(BPF_MISC | BPF_TXA, 0),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+			BPF_STMT(BPF_MISC | BPF_TAX, 0),
+			BPF_STMT(BPF_STX, 11),
+			BPF_STMT(BPF_LDX | BPF_MEM, 11),
+			BPF_STMT(BPF_MISC | BPF_TXA, 0),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+			BPF_STMT(BPF_MISC | BPF_TAX, 0),
+			BPF_STMT(BPF_STX, 12),
+			BPF_STMT(BPF_LDX | BPF_MEM, 12),
+			BPF_STMT(BPF_MISC | BPF_TXA, 0),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+			BPF_STMT(BPF_MISC | BPF_TAX, 0),
+			BPF_STMT(BPF_STX, 13),
+			BPF_STMT(BPF_LDX | BPF_MEM, 13),
+			BPF_STMT(BPF_MISC | BPF_TXA, 0),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+			BPF_STMT(BPF_MISC | BPF_TAX, 0),
+			BPF_STMT(BPF_STX, 14),
+			BPF_STMT(BPF_LDX | BPF_MEM, 14),
+			BPF_STMT(BPF_MISC | BPF_TXA, 0),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+			BPF_STMT(BPF_MISC | BPF_TAX, 0),
+			BPF_STMT(BPF_STX, 15),
+			BPF_STMT(BPF_LDX | BPF_MEM, 15),
+			BPF_STMT(BPF_MISC | BPF_TXA, 0),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+			BPF_STMT(BPF_MISC | BPF_TAX, 0),
+			BPF_STMT(BPF_RET | BPF_A, 0),
+		},
+		CLASSIC | FLAG_NO_DATA,
+		{ },
+		{ { 0, 116 } },
+	},
+	{	/* Mainly checking JIT here. */
+		"M[]: full STX + full LDX",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0xbadfeedb),
+			BPF_STMT(BPF_STX, 0),
+			BPF_STMT(BPF_LDX | BPF_IMM, 0xecabedae),
+			BPF_STMT(BPF_STX, 1),
+			BPF_STMT(BPF_LDX | BPF_IMM, 0xafccfeaf),
+			BPF_STMT(BPF_STX, 2),
+			BPF_STMT(BPF_LDX | BPF_IMM, 0xbffdcedc),
+			BPF_STMT(BPF_STX, 3),
+			BPF_STMT(BPF_LDX | BPF_IMM, 0xfbbbdccb),
+			BPF_STMT(BPF_STX, 4),
+			BPF_STMT(BPF_LDX | BPF_IMM, 0xfbabcbda),
+			BPF_STMT(BPF_STX, 5),
+			BPF_STMT(BPF_LDX | BPF_IMM, 0xaedecbdb),
+			BPF_STMT(BPF_STX, 6),
+			BPF_STMT(BPF_LDX | BPF_IMM, 0xadebbade),
+			BPF_STMT(BPF_STX, 7),
+			BPF_STMT(BPF_LDX | BPF_IMM, 0xfcfcfaec),
+			BPF_STMT(BPF_STX, 8),
+			BPF_STMT(BPF_LDX | BPF_IMM, 0xbcdddbdc),
+			BPF_STMT(BPF_STX, 9),
+			BPF_STMT(BPF_LDX | BPF_IMM, 0xfeefdfac),
+			BPF_STMT(BPF_STX, 10),
+			BPF_STMT(BPF_LDX | BPF_IMM, 0xcddcdeea),
+			BPF_STMT(BPF_STX, 11),
+			BPF_STMT(BPF_LDX | BPF_IMM, 0xaccfaebb),
+			BPF_STMT(BPF_STX, 12),
+			BPF_STMT(BPF_LDX | BPF_IMM, 0xbdcccdcf),
+			BPF_STMT(BPF_STX, 13),
+			BPF_STMT(BPF_LDX | BPF_IMM, 0xaaedecde),
+			BPF_STMT(BPF_STX, 14),
+			BPF_STMT(BPF_LDX | BPF_IMM, 0xfaeacdad),
+			BPF_STMT(BPF_STX, 15),
+			BPF_STMT(BPF_LDX | BPF_MEM, 0),
+			BPF_STMT(BPF_MISC | BPF_TXA, 0),
+			BPF_STMT(BPF_LDX | BPF_MEM, 1),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+			BPF_STMT(BPF_LDX | BPF_MEM, 2),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+			BPF_STMT(BPF_LDX | BPF_MEM, 3),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+			BPF_STMT(BPF_LDX | BPF_MEM, 4),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+			BPF_STMT(BPF_LDX | BPF_MEM, 5),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+			BPF_STMT(BPF_LDX | BPF_MEM, 6),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+			BPF_STMT(BPF_LDX | BPF_MEM, 7),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+			BPF_STMT(BPF_LDX | BPF_MEM, 8),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+			BPF_STMT(BPF_LDX | BPF_MEM, 9),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+			BPF_STMT(BPF_LDX | BPF_MEM, 10),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+			BPF_STMT(BPF_LDX | BPF_MEM, 11),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+			BPF_STMT(BPF_LDX | BPF_MEM, 12),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+			BPF_STMT(BPF_LDX | BPF_MEM, 13),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+			BPF_STMT(BPF_LDX | BPF_MEM, 14),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+			BPF_STMT(BPF_LDX | BPF_MEM, 15),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+			BPF_STMT(BPF_RET | BPF_A, 0),
+		},
+		CLASSIC | FLAG_NO_DATA,
+		{ },
+		{ { 0, 0x2a5a5e5 } },
+	},
+	{
+		"check: SKF_AD_MAX",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+				 SKF_AD_OFF + SKF_AD_MAX),
+			BPF_STMT(BPF_RET | BPF_A, 0),
+		},
+		CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+		{ },
+		{ },
+		.fill_helper = NULL,
+		.expected_errcode = -EINVAL,
+	},
+	{	/* Passes checker but fails during runtime. */
+		"LD [SKF_AD_OFF-1]",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+				 SKF_AD_OFF - 1),
+			BPF_STMT(BPF_RET | BPF_K, 1),
+		},
+		CLASSIC,
+		{ },
+		{ { 1, 0 } },
+	},
+	{
+		"load 64-bit immediate",
+		.u.insns_int = {
+			BPF_LD_IMM64(R1, 0x567800001234LL),
+			BPF_MOV64_REG(R2, R1),
+			BPF_MOV64_REG(R3, R2),
+			BPF_ALU64_IMM(BPF_RSH, R2, 32),
+			BPF_ALU64_IMM(BPF_LSH, R3, 32),
+			BPF_ALU64_IMM(BPF_RSH, R3, 32),
+			BPF_ALU64_IMM(BPF_MOV, R0, 0),
+			BPF_JMP_IMM(BPF_JEQ, R2, 0x5678, 1),
+			BPF_EXIT_INSN(),
+			BPF_JMP_IMM(BPF_JEQ, R3, 0x1234, 1),
+			BPF_EXIT_INSN(),
+			BPF_LD_IMM64(R0, 0x1ffffffffLL),
+			BPF_ALU64_IMM(BPF_RSH, R0, 32), /* R0 = 1 */
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } }
+	},
+	/* BPF_ALU | BPF_MOV | BPF_X */
+	{
+		"ALU_MOV_X: dst = 2",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R1, 2),
+			BPF_ALU32_REG(BPF_MOV, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2 } },
+	},
+	{
+		"ALU_MOV_X: dst = 4294967295",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R1, 4294967295U),
+			BPF_ALU32_REG(BPF_MOV, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 4294967295U } },
+	},
+	{
+		"ALU64_MOV_X: dst = 2",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R1, 2),
+			BPF_ALU64_REG(BPF_MOV, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2 } },
+	},
+	{
+		"ALU64_MOV_X: dst = 4294967295",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R1, 4294967295U),
+			BPF_ALU64_REG(BPF_MOV, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 4294967295U } },
+	},
+	/* BPF_ALU | BPF_MOV | BPF_K */
+	{
+		"ALU_MOV_K: dst = 2",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 2),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2 } },
+	},
+	{
+		"ALU_MOV_K: dst = 4294967295",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 4294967295U),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 4294967295U } },
+	},
+	{
+		"ALU_MOV_K: 0x0000ffffffff0000 = 0x00000000ffffffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+			BPF_LD_IMM64(R3, 0x00000000ffffffffLL),
+			BPF_ALU32_IMM(BPF_MOV, R2, 0xffffffff),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ALU_MOV_K: small negative",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, -123),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, -123 } }
+	},
+	{
+		"ALU_MOV_K: small negative zero extension",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, -123),
+			BPF_ALU64_IMM(BPF_RSH, R0, 32),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0 } }
+	},
+	{
+		"ALU_MOV_K: large negative",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, -123456789),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, -123456789 } }
+	},
+	{
+		"ALU_MOV_K: large negative zero extension",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, -123456789),
+			BPF_ALU64_IMM(BPF_RSH, R0, 32),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0 } }
+	},
+	{
+		"ALU64_MOV_K: dst = 2",
+		.u.insns_int = {
+			BPF_ALU64_IMM(BPF_MOV, R0, 2),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2 } },
+	},
+	{
+		"ALU64_MOV_K: dst = 2147483647",
+		.u.insns_int = {
+			BPF_ALU64_IMM(BPF_MOV, R0, 2147483647),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2147483647 } },
+	},
+	{
+		"ALU64_OR_K: dst = 0x0",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+			BPF_LD_IMM64(R3, 0x0),
+			BPF_ALU64_IMM(BPF_MOV, R2, 0x0),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ALU64_MOV_K: dst = -1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+			BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
+			BPF_ALU64_IMM(BPF_MOV, R2, 0xffffffff),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ALU64_MOV_K: small negative",
+		.u.insns_int = {
+			BPF_ALU64_IMM(BPF_MOV, R0, -123),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, -123 } }
+	},
+	{
+		"ALU64_MOV_K: small negative sign extension",
+		.u.insns_int = {
+			BPF_ALU64_IMM(BPF_MOV, R0, -123),
+			BPF_ALU64_IMM(BPF_RSH, R0, 32),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xffffffff } }
+	},
+	{
+		"ALU64_MOV_K: large negative",
+		.u.insns_int = {
+			BPF_ALU64_IMM(BPF_MOV, R0, -123456789),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, -123456789 } }
+	},
+	{
+		"ALU64_MOV_K: large negative sign extension",
+		.u.insns_int = {
+			BPF_ALU64_IMM(BPF_MOV, R0, -123456789),
+			BPF_ALU64_IMM(BPF_RSH, R0, 32),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xffffffff } }
+	},
+	/* BPF_ALU | BPF_ADD | BPF_X */
+	{
+		"ALU_ADD_X: 1 + 2 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R1, 2),
+			BPF_ALU32_REG(BPF_ADD, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU_ADD_X: 1 + 4294967294 = 4294967295",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U),
+			BPF_ALU32_REG(BPF_ADD, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 4294967295U } },
+	},
+	{
+		"ALU_ADD_X: 2 + 4294967294 = 0",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 2),
+			BPF_LD_IMM64(R1, 4294967294U),
+			BPF_ALU32_REG(BPF_ADD, R0, R1),
+			BPF_JMP_IMM(BPF_JEQ, R0, 0, 2),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"ALU64_ADD_X: 1 + 2 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R1, 2),
+			BPF_ALU64_REG(BPF_ADD, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU64_ADD_X: 1 + 4294967294 = 4294967295",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U),
+			BPF_ALU64_REG(BPF_ADD, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 4294967295U } },
+	},
+	{
+		"ALU64_ADD_X: 2 + 4294967294 = 4294967296",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 2),
+			BPF_LD_IMM64(R1, 4294967294U),
+			BPF_LD_IMM64(R2, 4294967296ULL),
+			BPF_ALU64_REG(BPF_ADD, R0, R1),
+			BPF_JMP_REG(BPF_JEQ, R0, R2, 2),
+			BPF_MOV32_IMM(R0, 0),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	/* BPF_ALU | BPF_ADD | BPF_K */
+	{
+		"ALU_ADD_K: 1 + 2 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU32_IMM(BPF_ADD, R0, 2),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU_ADD_K: 3 + 0 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU32_IMM(BPF_ADD, R0, 0),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU_ADD_K: 1 + 4294967294 = 4294967295",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU32_IMM(BPF_ADD, R0, 4294967294U),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 4294967295U } },
+	},
+	{
+		"ALU_ADD_K: 4294967294 + 2 = 0",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 4294967294U),
+			BPF_ALU32_IMM(BPF_ADD, R0, 2),
+			BPF_JMP_IMM(BPF_JEQ, R0, 0, 2),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"ALU_ADD_K: 0 + (-1) = 0x00000000ffffffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x0),
+			BPF_LD_IMM64(R3, 0x00000000ffffffff),
+			BPF_ALU32_IMM(BPF_ADD, R2, 0xffffffff),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ALU_ADD_K: 0 + 0xffff = 0xffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x0),
+			BPF_LD_IMM64(R3, 0xffff),
+			BPF_ALU32_IMM(BPF_ADD, R2, 0xffff),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ALU_ADD_K: 0 + 0x7fffffff = 0x7fffffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x0),
+			BPF_LD_IMM64(R3, 0x7fffffff),
+			BPF_ALU32_IMM(BPF_ADD, R2, 0x7fffffff),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ALU_ADD_K: 0 + 0x80000000 = 0x80000000",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x0),
+			BPF_LD_IMM64(R3, 0x80000000),
+			BPF_ALU32_IMM(BPF_ADD, R2, 0x80000000),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ALU_ADD_K: 0 + 0x80008000 = 0x80008000",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x0),
+			BPF_LD_IMM64(R3, 0x80008000),
+			BPF_ALU32_IMM(BPF_ADD, R2, 0x80008000),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ALU64_ADD_K: 1 + 2 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU64_IMM(BPF_ADD, R0, 2),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU64_ADD_K: 3 + 0 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU64_IMM(BPF_ADD, R0, 0),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU64_ADD_K: 1 + 2147483646 = 2147483647",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU64_IMM(BPF_ADD, R0, 2147483646),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2147483647 } },
+	},
+	{
+		"ALU64_ADD_K: 4294967294 + 2 = 4294967296",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 4294967294U),
+			BPF_LD_IMM64(R1, 4294967296ULL),
+			BPF_ALU64_IMM(BPF_ADD, R0, 2),
+			BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"ALU64_ADD_K: 2147483646 + -2147483647 = -1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 2147483646),
+			BPF_ALU64_IMM(BPF_ADD, R0, -2147483647),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, -1 } },
+	},
+	{
+		"ALU64_ADD_K: 1 + 0 = 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x1),
+			BPF_LD_IMM64(R3, 0x1),
+			BPF_ALU64_IMM(BPF_ADD, R2, 0x0),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ALU64_ADD_K: 0 + (-1) = 0xffffffffffffffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x0),
+			BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
+			BPF_ALU64_IMM(BPF_ADD, R2, 0xffffffff),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ALU64_ADD_K: 0 + 0xffff = 0xffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x0),
+			BPF_LD_IMM64(R3, 0xffff),
+			BPF_ALU64_IMM(BPF_ADD, R2, 0xffff),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ALU64_ADD_K: 0 + 0x7fffffff = 0x7fffffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x0),
+			BPF_LD_IMM64(R3, 0x7fffffff),
+			BPF_ALU64_IMM(BPF_ADD, R2, 0x7fffffff),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ALU64_ADD_K: 0 + 0x80000000 = 0xffffffff80000000",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x0),
+			BPF_LD_IMM64(R3, 0xffffffff80000000LL),
+			BPF_ALU64_IMM(BPF_ADD, R2, 0x80000000),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ALU_ADD_K: 0 + 0x80008000 = 0xffffffff80008000",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x0),
+			BPF_LD_IMM64(R3, 0xffffffff80008000LL),
+			BPF_ALU64_IMM(BPF_ADD, R2, 0x80008000),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	/* BPF_ALU | BPF_SUB | BPF_X */
+	{
+		"ALU_SUB_X: 3 - 1 = 2",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU32_IMM(BPF_MOV, R1, 1),
+			BPF_ALU32_REG(BPF_SUB, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2 } },
+	},
+	{
+		"ALU_SUB_X: 4294967295 - 4294967294 = 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 4294967295U),
+			BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U),
+			BPF_ALU32_REG(BPF_SUB, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"ALU64_SUB_X: 3 - 1 = 2",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU32_IMM(BPF_MOV, R1, 1),
+			BPF_ALU64_REG(BPF_SUB, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2 } },
+	},
+	{
+		"ALU64_SUB_X: 4294967295 - 4294967294 = 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 4294967295U),
+			BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U),
+			BPF_ALU64_REG(BPF_SUB, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	/* BPF_ALU | BPF_SUB | BPF_K */
+	{
+		"ALU_SUB_K: 3 - 1 = 2",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU32_IMM(BPF_SUB, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2 } },
+	},
+	{
+		"ALU_SUB_K: 3 - 0 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU32_IMM(BPF_SUB, R0, 0),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU_SUB_K: 4294967295 - 4294967294 = 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 4294967295U),
+			BPF_ALU32_IMM(BPF_SUB, R0, 4294967294U),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"ALU64_SUB_K: 3 - 1 = 2",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU64_IMM(BPF_SUB, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2 } },
+	},
+	{
+		"ALU64_SUB_K: 3 - 0 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU64_IMM(BPF_SUB, R0, 0),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU64_SUB_K: 4294967294 - 4294967295 = -1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 4294967294U),
+			BPF_ALU64_IMM(BPF_SUB, R0, 4294967295U),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, -1 } },
+	},
+	{
+		"ALU64_ADD_K: 2147483646 - 2147483647 = -1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 2147483646),
+			BPF_ALU64_IMM(BPF_SUB, R0, 2147483647),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, -1 } },
+	},
+	/* BPF_ALU | BPF_MUL | BPF_X */
+	{
+		"ALU_MUL_X: 2 * 3 = 6",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 2),
+			BPF_ALU32_IMM(BPF_MOV, R1, 3),
+			BPF_ALU32_REG(BPF_MUL, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 6 } },
+	},
+	{
+		"ALU_MUL_X: 2 * 0x7FFFFFF8 = 0xFFFFFFF0",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 2),
+			BPF_ALU32_IMM(BPF_MOV, R1, 0x7FFFFFF8),
+			BPF_ALU32_REG(BPF_MUL, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xFFFFFFF0 } },
+	},
+	{
+		"ALU_MUL_X: -1 * -1 = 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, -1),
+			BPF_ALU32_IMM(BPF_MOV, R1, -1),
+			BPF_ALU32_REG(BPF_MUL, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"ALU64_MUL_X: 2 * 3 = 6",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 2),
+			BPF_ALU32_IMM(BPF_MOV, R1, 3),
+			BPF_ALU64_REG(BPF_MUL, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 6 } },
+	},
+	{
+		"ALU64_MUL_X: 1 * 2147483647 = 2147483647",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R1, 2147483647),
+			BPF_ALU64_REG(BPF_MUL, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2147483647 } },
+	},
+	{
+		"ALU64_MUL_X: 64x64 multiply, low word",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x0fedcba987654321LL),
+			BPF_LD_IMM64(R1, 0x123456789abcdef0LL),
+			BPF_ALU64_REG(BPF_MUL, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xe5618cf0 } }
+	},
+	{
+		"ALU64_MUL_X: 64x64 multiply, high word",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x0fedcba987654321LL),
+			BPF_LD_IMM64(R1, 0x123456789abcdef0LL),
+			BPF_ALU64_REG(BPF_MUL, R0, R1),
+			BPF_ALU64_IMM(BPF_RSH, R0, 32),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x2236d88f } }
+	},
+	/* BPF_ALU | BPF_MUL | BPF_K */
+	{
+		"ALU_MUL_K: 2 * 3 = 6",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 2),
+			BPF_ALU32_IMM(BPF_MUL, R0, 3),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 6 } },
+	},
+	{
+		"ALU_MUL_K: 3 * 1 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU32_IMM(BPF_MUL, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU_MUL_K: 2 * 0x7FFFFFF8 = 0xFFFFFFF0",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 2),
+			BPF_ALU32_IMM(BPF_MUL, R0, 0x7FFFFFF8),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xFFFFFFF0 } },
+	},
+	{
+		"ALU_MUL_K: 1 * (-1) = 0x00000000ffffffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x1),
+			BPF_LD_IMM64(R3, 0x00000000ffffffff),
+			BPF_ALU32_IMM(BPF_MUL, R2, 0xffffffff),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ALU64_MUL_K: 2 * 3 = 6",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 2),
+			BPF_ALU64_IMM(BPF_MUL, R0, 3),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 6 } },
+	},
+	{
+		"ALU64_MUL_K: 3 * 1 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU64_IMM(BPF_MUL, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU64_MUL_K: 1 * 2147483647 = 2147483647",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU64_IMM(BPF_MUL, R0, 2147483647),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2147483647 } },
+	},
+	{
+		"ALU64_MUL_K: 1 * -2147483647 = -2147483647",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU64_IMM(BPF_MUL, R0, -2147483647),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, -2147483647 } },
+	},
+	{
+		"ALU64_MUL_K: 1 * (-1) = 0xffffffffffffffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x1),
+			BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
+			BPF_ALU64_IMM(BPF_MUL, R2, 0xffffffff),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ALU64_MUL_K: 64x32 multiply, low word",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+			BPF_ALU64_IMM(BPF_MUL, R0, 0x12345678),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xe242d208 } }
+	},
+	{
+		"ALU64_MUL_K: 64x32 multiply, high word",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+			BPF_ALU64_IMM(BPF_MUL, R0, 0x12345678),
+			BPF_ALU64_IMM(BPF_RSH, R0, 32),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xc28f5c28 } }
+	},
+	/* BPF_ALU | BPF_DIV | BPF_X */
+	{
+		"ALU_DIV_X: 6 / 2 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 6),
+			BPF_ALU32_IMM(BPF_MOV, R1, 2),
+			BPF_ALU32_REG(BPF_DIV, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU_DIV_X: 4294967295 / 4294967295 = 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 4294967295U),
+			BPF_ALU32_IMM(BPF_MOV, R1, 4294967295U),
+			BPF_ALU32_REG(BPF_DIV, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"ALU64_DIV_X: 6 / 2 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 6),
+			BPF_ALU32_IMM(BPF_MOV, R1, 2),
+			BPF_ALU64_REG(BPF_DIV, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU64_DIV_X: 2147483647 / 2147483647 = 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 2147483647),
+			BPF_ALU32_IMM(BPF_MOV, R1, 2147483647),
+			BPF_ALU64_REG(BPF_DIV, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"ALU64_DIV_X: 0xffffffffffffffff / (-1) = 0x0000000000000001",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0xffffffffffffffffLL),
+			BPF_LD_IMM64(R4, 0xffffffffffffffffLL),
+			BPF_LD_IMM64(R3, 0x0000000000000001LL),
+			BPF_ALU64_REG(BPF_DIV, R2, R4),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	/* BPF_ALU | BPF_DIV | BPF_K */
+	{
+		"ALU_DIV_K: 6 / 2 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 6),
+			BPF_ALU32_IMM(BPF_DIV, R0, 2),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU_DIV_K: 3 / 1 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU32_IMM(BPF_DIV, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU_DIV_K: 4294967295 / 4294967295 = 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 4294967295U),
+			BPF_ALU32_IMM(BPF_DIV, R0, 4294967295U),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"ALU_DIV_K: 0xffffffffffffffff / (-1) = 0x1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0xffffffffffffffffLL),
+			BPF_LD_IMM64(R3, 0x1UL),
+			BPF_ALU32_IMM(BPF_DIV, R2, 0xffffffff),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ALU64_DIV_K: 6 / 2 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 6),
+			BPF_ALU64_IMM(BPF_DIV, R0, 2),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU64_DIV_K: 3 / 1 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU64_IMM(BPF_DIV, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU64_DIV_K: 2147483647 / 2147483647 = 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 2147483647),
+			BPF_ALU64_IMM(BPF_DIV, R0, 2147483647),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"ALU64_DIV_K: 0xffffffffffffffff / (-1) = 0x0000000000000001",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0xffffffffffffffffLL),
+			BPF_LD_IMM64(R3, 0x0000000000000001LL),
+			BPF_ALU64_IMM(BPF_DIV, R2, 0xffffffff),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	/* BPF_ALU | BPF_MOD | BPF_X */
+	{
+		"ALU_MOD_X: 3 % 2 = 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU32_IMM(BPF_MOV, R1, 2),
+			BPF_ALU32_REG(BPF_MOD, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"ALU_MOD_X: 4294967295 % 4294967293 = 2",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 4294967295U),
+			BPF_ALU32_IMM(BPF_MOV, R1, 4294967293U),
+			BPF_ALU32_REG(BPF_MOD, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2 } },
+	},
+	{
+		"ALU64_MOD_X: 3 % 2 = 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU32_IMM(BPF_MOV, R1, 2),
+			BPF_ALU64_REG(BPF_MOD, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"ALU64_MOD_X: 2147483647 % 2147483645 = 2",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 2147483647),
+			BPF_ALU32_IMM(BPF_MOV, R1, 2147483645),
+			BPF_ALU64_REG(BPF_MOD, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2 } },
+	},
+	/* BPF_ALU | BPF_MOD | BPF_K */
+	{
+		"ALU_MOD_K: 3 % 2 = 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU32_IMM(BPF_MOD, R0, 2),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"ALU_MOD_K: 3 % 1 = 0",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU32_IMM(BPF_MOD, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0 } },
+	},
+	{
+		"ALU_MOD_K: 4294967295 % 4294967293 = 2",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 4294967295U),
+			BPF_ALU32_IMM(BPF_MOD, R0, 4294967293U),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2 } },
+	},
+	{
+		"ALU64_MOD_K: 3 % 2 = 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU64_IMM(BPF_MOD, R0, 2),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"ALU64_MOD_K: 3 % 1 = 0",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU64_IMM(BPF_MOD, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0 } },
+	},
+	{
+		"ALU64_MOD_K: 2147483647 % 2147483645 = 2",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 2147483647),
+			BPF_ALU64_IMM(BPF_MOD, R0, 2147483645),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2 } },
+	},
+	/* BPF_ALU | BPF_AND | BPF_X */
+	{
+		"ALU_AND_X: 3 & 2 = 2",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU32_IMM(BPF_MOV, R1, 2),
+			BPF_ALU32_REG(BPF_AND, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2 } },
+	},
+	{
+		"ALU_AND_X: 0xffffffff & 0xffffffff = 0xffffffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0xffffffff),
+			BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
+			BPF_ALU32_REG(BPF_AND, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xffffffff } },
+	},
+	{
+		"ALU64_AND_X: 3 & 2 = 2",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU32_IMM(BPF_MOV, R1, 2),
+			BPF_ALU64_REG(BPF_AND, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2 } },
+	},
+	{
+		"ALU64_AND_X: 0xffffffff & 0xffffffff = 0xffffffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0xffffffff),
+			BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
+			BPF_ALU64_REG(BPF_AND, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xffffffff } },
+	},
+	/* BPF_ALU | BPF_AND | BPF_K */
+	{
+		"ALU_AND_K: 3 & 2 = 2",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU32_IMM(BPF_AND, R0, 2),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2 } },
+	},
+	{
+		"ALU_AND_K: 0xffffffff & 0xffffffff = 0xffffffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0xffffffff),
+			BPF_ALU32_IMM(BPF_AND, R0, 0xffffffff),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xffffffff } },
+	},
+	{
+		"ALU_AND_K: Small immediate",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 0x01020304),
+			BPF_ALU32_IMM(BPF_AND, R0, 15),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 4 } }
+	},
+	{
+		"ALU_AND_K: Large immediate",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 0xf1f2f3f4),
+			BPF_ALU32_IMM(BPF_AND, R0, 0xafbfcfdf),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xa1b2c3d4 } }
+	},
+	{
+		"ALU_AND_K: Zero extension",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+			BPF_LD_IMM64(R1, 0x0000000080a0c0e0LL),
+			BPF_ALU32_IMM(BPF_AND, R0, 0xf0f0f0f0),
+			BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } }
+	},
+	{
+		"ALU64_AND_K: 3 & 2 = 2",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU64_IMM(BPF_AND, R0, 2),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2 } },
+	},
+	{
+		"ALU64_AND_K: 0xffffffff & 0xffffffff = 0xffffffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0xffffffff),
+			BPF_ALU64_IMM(BPF_AND, R0, 0xffffffff),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xffffffff } },
+	},
+	{
+		"ALU64_AND_K: 0x0000ffffffff0000 & 0x0 = 0x0000000000000000",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+			BPF_LD_IMM64(R3, 0x0000000000000000LL),
+			BPF_ALU64_IMM(BPF_AND, R2, 0x0),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ALU64_AND_K: 0x0000ffffffff0000 & -1 = 0x0000ffffffff0000",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+			BPF_LD_IMM64(R3, 0x0000ffffffff0000LL),
+			BPF_ALU64_IMM(BPF_AND, R2, 0xffffffff),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ALU64_AND_K: 0xffffffffffffffff & -1 = 0xffffffffffffffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0xffffffffffffffffLL),
+			BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
+			BPF_ALU64_IMM(BPF_AND, R2, 0xffffffff),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ALU64_AND_K: Sign extension 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+			BPF_LD_IMM64(R1, 0x00000000090b0d0fLL),
+			BPF_ALU64_IMM(BPF_AND, R0, 0x0f0f0f0f),
+			BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } }
+	},
+	{
+		"ALU64_AND_K: Sign extension 2",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+			BPF_LD_IMM64(R1, 0x0123456780a0c0e0LL),
+			BPF_ALU64_IMM(BPF_AND, R0, 0xf0f0f0f0),
+			BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } }
+	},
+	/* BPF_ALU | BPF_OR | BPF_X */
+	{
+		"ALU_OR_X: 1 | 2 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R1, 2),
+			BPF_ALU32_REG(BPF_OR, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU_OR_X: 0x0 | 0xffffffff = 0xffffffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0),
+			BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
+			BPF_ALU32_REG(BPF_OR, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xffffffff } },
+	},
+	{
+		"ALU64_OR_X: 1 | 2 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R1, 2),
+			BPF_ALU64_REG(BPF_OR, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU64_OR_X: 0 | 0xffffffff = 0xffffffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0),
+			BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
+			BPF_ALU64_REG(BPF_OR, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xffffffff } },
+	},
+	/* BPF_ALU | BPF_OR | BPF_K */
+	{
+		"ALU_OR_K: 1 | 2 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU32_IMM(BPF_OR, R0, 2),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU_OR_K: 0 & 0xffffffff = 0xffffffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0),
+			BPF_ALU32_IMM(BPF_OR, R0, 0xffffffff),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xffffffff } },
+	},
+	{
+		"ALU_OR_K: Small immediate",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 0x01020304),
+			BPF_ALU32_IMM(BPF_OR, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x01020305 } }
+	},
+	{
+		"ALU_OR_K: Large immediate",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 0x01020304),
+			BPF_ALU32_IMM(BPF_OR, R0, 0xa0b0c0d0),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xa1b2c3d4 } }
+	},
+	{
+		"ALU_OR_K: Zero extension",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+			BPF_LD_IMM64(R1, 0x00000000f9fbfdffLL),
+			BPF_ALU32_IMM(BPF_OR, R0, 0xf0f0f0f0),
+			BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } }
+	},
+	{
+		"ALU64_OR_K: 1 | 2 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU64_IMM(BPF_OR, R0, 2),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU64_OR_K: 0 & 0xffffffff = 0xffffffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0),
+			BPF_ALU64_IMM(BPF_OR, R0, 0xffffffff),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xffffffff } },
+	},
+	{
+		"ALU64_OR_K: 0x0000ffffffff0000 | 0x0 = 0x0000ffffffff0000",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+			BPF_LD_IMM64(R3, 0x0000ffffffff0000LL),
+			BPF_ALU64_IMM(BPF_OR, R2, 0x0),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ALU64_OR_K: 0x0000ffffffff0000 | -1 = 0xffffffffffffffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+			BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
+			BPF_ALU64_IMM(BPF_OR, R2, 0xffffffff),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ALU64_OR_K: 0x000000000000000 | -1 = 0xffffffffffffffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x0000000000000000LL),
+			BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
+			BPF_ALU64_IMM(BPF_OR, R2, 0xffffffff),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ALU64_OR_K: Sign extension 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+			BPF_LD_IMM64(R1, 0x012345678fafcfefLL),
+			BPF_ALU64_IMM(BPF_OR, R0, 0x0f0f0f0f),
+			BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } }
+	},
+	{
+		"ALU64_OR_K: Sign extension 2",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+			BPF_LD_IMM64(R1, 0xfffffffff9fbfdffLL),
+			BPF_ALU64_IMM(BPF_OR, R0, 0xf0f0f0f0),
+			BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } }
+	},
+	/* BPF_ALU | BPF_XOR | BPF_X */
+	{
+		"ALU_XOR_X: 5 ^ 6 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 5),
+			BPF_ALU32_IMM(BPF_MOV, R1, 6),
+			BPF_ALU32_REG(BPF_XOR, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU_XOR_X: 0x1 ^ 0xffffffff = 0xfffffffe",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
+			BPF_ALU32_REG(BPF_XOR, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xfffffffe } },
+	},
+	{
+		"ALU64_XOR_X: 5 ^ 6 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 5),
+			BPF_ALU32_IMM(BPF_MOV, R1, 6),
+			BPF_ALU64_REG(BPF_XOR, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU64_XOR_X: 1 ^ 0xffffffff = 0xfffffffe",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
+			BPF_ALU64_REG(BPF_XOR, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xfffffffe } },
+	},
+	/* BPF_ALU | BPF_XOR | BPF_K */
+	{
+		"ALU_XOR_K: 5 ^ 6 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 5),
+			BPF_ALU32_IMM(BPF_XOR, R0, 6),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU_XOR_K: 1 ^ 0xffffffff = 0xfffffffe",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU32_IMM(BPF_XOR, R0, 0xffffffff),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xfffffffe } },
+	},
+	{
+		"ALU_XOR_K: Small immediate",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 0x01020304),
+			BPF_ALU32_IMM(BPF_XOR, R0, 15),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x0102030b } }
+	},
+	{
+		"ALU_XOR_K: Large immediate",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 0xf1f2f3f4),
+			BPF_ALU32_IMM(BPF_XOR, R0, 0xafbfcfdf),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x5e4d3c2b } }
+	},
+	{
+		"ALU_XOR_K: Zero extension",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+			BPF_LD_IMM64(R1, 0x00000000795b3d1fLL),
+			BPF_ALU32_IMM(BPF_XOR, R0, 0xf0f0f0f0),
+			BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } }
+	},
+	{
+		"ALU64_XOR_K: 5 ^ 6 = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 5),
+			BPF_ALU64_IMM(BPF_XOR, R0, 6),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU64_XOR_K: 1 ^ 0xffffffff = 0xfffffffe",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU64_IMM(BPF_XOR, R0, 0xffffffff),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xfffffffe } },
+	},
+	{
+		"ALU64_XOR_K: 0x0000ffffffff0000 ^ 0x0 = 0x0000ffffffff0000",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+			BPF_LD_IMM64(R3, 0x0000ffffffff0000LL),
+			BPF_ALU64_IMM(BPF_XOR, R2, 0x0),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ALU64_XOR_K: 0x0000ffffffff0000 ^ -1 = 0xffff00000000ffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+			BPF_LD_IMM64(R3, 0xffff00000000ffffLL),
+			BPF_ALU64_IMM(BPF_XOR, R2, 0xffffffff),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ALU64_XOR_K: 0x000000000000000 ^ -1 = 0xffffffffffffffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x0000000000000000LL),
+			BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
+			BPF_ALU64_IMM(BPF_XOR, R2, 0xffffffff),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ALU64_XOR_K: Sign extension 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+			BPF_LD_IMM64(R1, 0x0123456786a4c2e0LL),
+			BPF_ALU64_IMM(BPF_XOR, R0, 0x0f0f0f0f),
+			BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } }
+	},
+	{
+		"ALU64_XOR_K: Sign extension 2",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+			BPF_LD_IMM64(R1, 0xfedcba98795b3d1fLL),
+			BPF_ALU64_IMM(BPF_XOR, R0, 0xf0f0f0f0),
+			BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } }
+	},
+	/* BPF_ALU | BPF_LSH | BPF_X */
+	{
+		"ALU_LSH_X: 1 << 1 = 2",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R1, 1),
+			BPF_ALU32_REG(BPF_LSH, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2 } },
+	},
+	{
+		"ALU_LSH_X: 1 << 31 = 0x80000000",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R1, 31),
+			BPF_ALU32_REG(BPF_LSH, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x80000000 } },
+	},
+	{
+		"ALU_LSH_X: 0x12345678 << 12 = 0x45678000",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678),
+			BPF_ALU32_IMM(BPF_MOV, R1, 12),
+			BPF_ALU32_REG(BPF_LSH, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x45678000 } }
+	},
+	{
+		"ALU64_LSH_X: 1 << 1 = 2",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R1, 1),
+			BPF_ALU64_REG(BPF_LSH, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2 } },
+	},
+	{
+		"ALU64_LSH_X: 1 << 31 = 0x80000000",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R1, 31),
+			BPF_ALU64_REG(BPF_LSH, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x80000000 } },
+	},
+	{
+		"ALU64_LSH_X: Shift < 32, low word",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+			BPF_ALU32_IMM(BPF_MOV, R1, 12),
+			BPF_ALU64_REG(BPF_LSH, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xbcdef000 } }
+	},
+	{
+		"ALU64_LSH_X: Shift < 32, high word",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+			BPF_ALU32_IMM(BPF_MOV, R1, 12),
+			BPF_ALU64_REG(BPF_LSH, R0, R1),
+			BPF_ALU64_IMM(BPF_RSH, R0, 32),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x3456789a } }
+	},
+	{
+		"ALU64_LSH_X: Shift > 32, low word",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+			BPF_ALU32_IMM(BPF_MOV, R1, 36),
+			BPF_ALU64_REG(BPF_LSH, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0 } }
+	},
+	{
+		"ALU64_LSH_X: Shift > 32, high word",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+			BPF_ALU32_IMM(BPF_MOV, R1, 36),
+			BPF_ALU64_REG(BPF_LSH, R0, R1),
+			BPF_ALU64_IMM(BPF_RSH, R0, 32),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x9abcdef0 } }
+	},
+	{
+		"ALU64_LSH_X: Shift == 32, low word",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+			BPF_ALU32_IMM(BPF_MOV, R1, 32),
+			BPF_ALU64_REG(BPF_LSH, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0 } }
+	},
+	{
+		"ALU64_LSH_X: Shift == 32, high word",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+			BPF_ALU32_IMM(BPF_MOV, R1, 32),
+			BPF_ALU64_REG(BPF_LSH, R0, R1),
+			BPF_ALU64_IMM(BPF_RSH, R0, 32),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x89abcdef } }
+	},
+	{
+		"ALU64_LSH_X: Zero shift, low word",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+			BPF_ALU32_IMM(BPF_MOV, R1, 0),
+			BPF_ALU64_REG(BPF_LSH, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x89abcdef } }
+	},
+	{
+		"ALU64_LSH_X: Zero shift, high word",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+			BPF_ALU32_IMM(BPF_MOV, R1, 0),
+			BPF_ALU64_REG(BPF_LSH, R0, R1),
+			BPF_ALU64_IMM(BPF_RSH, R0, 32),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x01234567 } }
+	},
+	/* BPF_ALU | BPF_LSH | BPF_K */
+	{
+		"ALU_LSH_K: 1 << 1 = 2",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU32_IMM(BPF_LSH, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2 } },
+	},
+	{
+		"ALU_LSH_K: 1 << 31 = 0x80000000",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU32_IMM(BPF_LSH, R0, 31),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x80000000 } },
+	},
+	{
+		"ALU_LSH_K: 0x12345678 << 12 = 0x45678000",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678),
+			BPF_ALU32_IMM(BPF_LSH, R0, 12),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x45678000 } }
+	},
+	{
+		"ALU_LSH_K: 0x12345678 << 0 = 0x12345678",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678),
+			BPF_ALU32_IMM(BPF_LSH, R0, 0),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x12345678 } }
+	},
+	{
+		"ALU64_LSH_K: 1 << 1 = 2",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU64_IMM(BPF_LSH, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 2 } },
+	},
+	{
+		"ALU64_LSH_K: 1 << 31 = 0x80000000",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 1),
+			BPF_ALU64_IMM(BPF_LSH, R0, 31),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x80000000 } },
+	},
+	{
+		"ALU64_LSH_K: Shift < 32, low word",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+			BPF_ALU64_IMM(BPF_LSH, R0, 12),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xbcdef000 } }
+	},
+	{
+		"ALU64_LSH_K: Shift < 32, high word",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+			BPF_ALU64_IMM(BPF_LSH, R0, 12),
+			BPF_ALU64_IMM(BPF_RSH, R0, 32),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x3456789a } }
+	},
+	{
+		"ALU64_LSH_K: Shift > 32, low word",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+			BPF_ALU64_IMM(BPF_LSH, R0, 36),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0 } }
+	},
+	{
+		"ALU64_LSH_K: Shift > 32, high word",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+			BPF_ALU64_IMM(BPF_LSH, R0, 36),
+			BPF_ALU64_IMM(BPF_RSH, R0, 32),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x9abcdef0 } }
+	},
+	{
+		"ALU64_LSH_K: Shift == 32, low word",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+			BPF_ALU64_IMM(BPF_LSH, R0, 32),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0 } }
+	},
+	{
+		"ALU64_LSH_K: Shift == 32, high word",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+			BPF_ALU64_IMM(BPF_LSH, R0, 32),
+			BPF_ALU64_IMM(BPF_RSH, R0, 32),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x89abcdef } }
+	},
+	{
+		"ALU64_LSH_K: Zero shift",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+			BPF_ALU64_IMM(BPF_LSH, R0, 0),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x89abcdef } }
+	},
+	/* BPF_ALU | BPF_RSH | BPF_X */
+	{
+		"ALU_RSH_X: 2 >> 1 = 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 2),
+			BPF_ALU32_IMM(BPF_MOV, R1, 1),
+			BPF_ALU32_REG(BPF_RSH, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"ALU_RSH_X: 0x80000000 >> 31 = 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x80000000),
+			BPF_ALU32_IMM(BPF_MOV, R1, 31),
+			BPF_ALU32_REG(BPF_RSH, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"ALU_RSH_X: 0x12345678 >> 20 = 0x123",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678),
+			BPF_ALU32_IMM(BPF_MOV, R1, 20),
+			BPF_ALU32_REG(BPF_RSH, R0, R1),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC,
+		INTERNAL,
 		{ },
-		{ { 1, 3 }, { 10, 3 } },
+		{ { 0, 0x123 } }
 	},
 	{
-		"LD_MARK",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
-				 SKF_AD_OFF + SKF_AD_MARK),
-			BPF_STMT(BPF_RET | BPF_A, 0)
+		"ALU64_RSH_X: 2 >> 1 = 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 2),
+			BPF_ALU32_IMM(BPF_MOV, R1, 1),
+			BPF_ALU64_REG(BPF_RSH, R0, R1),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC,
+		INTERNAL,
 		{ },
-		{ { 1, SKB_MARK}, { 10, SKB_MARK} },
+		{ { 0, 1 } },
 	},
 	{
-		"LD_RXHASH",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
-				 SKF_AD_OFF + SKF_AD_RXHASH),
-			BPF_STMT(BPF_RET | BPF_A, 0)
+		"ALU64_RSH_X: 0x80000000 >> 31 = 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x80000000),
+			BPF_ALU32_IMM(BPF_MOV, R1, 31),
+			BPF_ALU64_REG(BPF_RSH, R0, R1),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC,
+		INTERNAL,
 		{ },
-		{ { 1, SKB_HASH}, { 10, SKB_HASH} },
+		{ { 0, 1 } },
 	},
 	{
-		"LD_QUEUE",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
-				 SKF_AD_OFF + SKF_AD_QUEUE),
-			BPF_STMT(BPF_RET | BPF_A, 0)
+		"ALU64_RSH_X: Shift < 32, low word",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+			BPF_ALU32_IMM(BPF_MOV, R1, 12),
+			BPF_ALU64_REG(BPF_RSH, R0, R1),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC,
+		INTERNAL,
 		{ },
-		{ { 1, SKB_QUEUE_MAP }, { 10, SKB_QUEUE_MAP } },
+		{ { 0, 0x56789abc } }
 	},
 	{
-		"LD_PROTOCOL",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 1),
-			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 20, 1, 0),
-			BPF_STMT(BPF_RET | BPF_K, 0),
-			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
-				 SKF_AD_OFF + SKF_AD_PROTOCOL),
-			BPF_STMT(BPF_MISC | BPF_TAX, 0),
-			BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
-			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 30, 1, 0),
-			BPF_STMT(BPF_RET | BPF_K, 0),
-			BPF_STMT(BPF_MISC | BPF_TXA, 0),
-			BPF_STMT(BPF_RET | BPF_A, 0)
+		"ALU64_RSH_X: Shift < 32, high word",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+			BPF_ALU32_IMM(BPF_MOV, R1, 12),
+			BPF_ALU64_REG(BPF_RSH, R0, R1),
+			BPF_ALU64_IMM(BPF_RSH, R0, 32),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC,
-		{ 10, 20, 30 },
-		{ { 10, ETH_P_IP }, { 100, ETH_P_IP } },
+		INTERNAL,
+		{ },
+		{ { 0, 0x00081234 } }
 	},
 	{
-		"LD_VLAN_TAG",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
-				 SKF_AD_OFF + SKF_AD_VLAN_TAG),
-			BPF_STMT(BPF_RET | BPF_A, 0)
+		"ALU64_RSH_X: Shift > 32, low word",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+			BPF_ALU32_IMM(BPF_MOV, R1, 36),
+			BPF_ALU64_REG(BPF_RSH, R0, R1),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC,
+		INTERNAL,
 		{ },
-		{
-			{ 1, SKB_VLAN_TCI },
-			{ 10, SKB_VLAN_TCI }
+		{ { 0, 0x08123456 } }
+	},
+	{
+		"ALU64_RSH_X: Shift > 32, high word",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+			BPF_ALU32_IMM(BPF_MOV, R1, 36),
+			BPF_ALU64_REG(BPF_RSH, R0, R1),
+			BPF_ALU64_IMM(BPF_RSH, R0, 32),
+			BPF_EXIT_INSN(),
 		},
+		INTERNAL,
+		{ },
+		{ { 0, 0 } }
 	},
 	{
-		"LD_VLAN_TAG_PRESENT",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
-				 SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT),
-			BPF_STMT(BPF_RET | BPF_A, 0)
+		"ALU64_RSH_X: Shift == 32, low word",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+			BPF_ALU32_IMM(BPF_MOV, R1, 32),
+			BPF_ALU64_REG(BPF_RSH, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x81234567 } }
+	},
+	{
+		"ALU64_RSH_X: Shift == 32, high word",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+			BPF_ALU32_IMM(BPF_MOV, R1, 32),
+			BPF_ALU64_REG(BPF_RSH, R0, R1),
+			BPF_ALU64_IMM(BPF_RSH, R0, 32),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0 } }
+	},
+	{
+		"ALU64_RSH_X: Zero shift, low word",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+			BPF_ALU32_IMM(BPF_MOV, R1, 0),
+			BPF_ALU64_REG(BPF_RSH, R0, R1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x89abcdef } }
+	},
+	{
+		"ALU64_RSH_X: Zero shift, high word",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+			BPF_ALU32_IMM(BPF_MOV, R1, 0),
+			BPF_ALU64_REG(BPF_RSH, R0, R1),
+			BPF_ALU64_IMM(BPF_RSH, R0, 32),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x81234567 } }
+	},
+	/* BPF_ALU | BPF_RSH | BPF_K */
+	{
+		"ALU_RSH_K: 2 >> 1 = 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 2),
+			BPF_ALU32_IMM(BPF_RSH, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"ALU_RSH_K: 0x80000000 >> 31 = 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x80000000),
+			BPF_ALU32_IMM(BPF_RSH, R0, 31),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"ALU_RSH_K: 0x12345678 >> 20 = 0x123",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678),
+			BPF_ALU32_IMM(BPF_RSH, R0, 20),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x123 } }
+	},
+	{
+		"ALU_RSH_K: 0x12345678 >> 0 = 0x12345678",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678),
+			BPF_ALU32_IMM(BPF_RSH, R0, 0),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x12345678 } }
+	},
+	{
+		"ALU64_RSH_K: 2 >> 1 = 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 2),
+			BPF_ALU64_IMM(BPF_RSH, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"ALU64_RSH_K: 0x80000000 >> 31 = 1",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x80000000),
+			BPF_ALU64_IMM(BPF_RSH, R0, 31),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"ALU64_RSH_K: Shift < 32, low word",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+			BPF_ALU64_IMM(BPF_RSH, R0, 12),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x56789abc } }
+	},
+	{
+		"ALU64_RSH_K: Shift < 32, high word",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+			BPF_ALU64_IMM(BPF_RSH, R0, 12),
+			BPF_ALU64_IMM(BPF_RSH, R0, 32),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x00081234 } }
+	},
+	{
+		"ALU64_RSH_K: Shift > 32, low word",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+			BPF_ALU64_IMM(BPF_RSH, R0, 36),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC,
+		INTERNAL,
 		{ },
-		{
-			{ 1, SKB_VLAN_PRESENT },
-			{ 10, SKB_VLAN_PRESENT }
-		},
+		{ { 0, 0x08123456 } }
 	},
 	{
-		"LD_IFINDEX",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
-				 SKF_AD_OFF + SKF_AD_IFINDEX),
-			BPF_STMT(BPF_RET | BPF_A, 0)
+		"ALU64_RSH_K: Shift > 32, high word",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+			BPF_ALU64_IMM(BPF_RSH, R0, 36),
+			BPF_ALU64_IMM(BPF_RSH, R0, 32),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC,
+		INTERNAL,
 		{ },
-		{ { 1, SKB_DEV_IFINDEX }, { 10, SKB_DEV_IFINDEX } },
+		{ { 0, 0 } }
 	},
 	{
-		"LD_HATYPE",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
-				 SKF_AD_OFF + SKF_AD_HATYPE),
-			BPF_STMT(BPF_RET | BPF_A, 0)
+		"ALU64_RSH_K: Shift == 32, low word",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+			BPF_ALU64_IMM(BPF_RSH, R0, 32),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC,
+		INTERNAL,
 		{ },
-		{ { 1, SKB_DEV_TYPE }, { 10, SKB_DEV_TYPE } },
+		{ { 0, 0x81234567 } }
 	},
 	{
-		"LD_CPU",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
-				 SKF_AD_OFF + SKF_AD_CPU),
-			BPF_STMT(BPF_MISC | BPF_TAX, 0),
-			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
-				 SKF_AD_OFF + SKF_AD_CPU),
-			BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
-			BPF_STMT(BPF_RET | BPF_A, 0)
+		"ALU64_RSH_K: Shift == 32, high word",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+			BPF_ALU64_IMM(BPF_RSH, R0, 32),
+			BPF_ALU64_IMM(BPF_RSH, R0, 32),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC,
+		INTERNAL,
 		{ },
-		{ { 1, 0 }, { 10, 0 } },
+		{ { 0, 0 } }
 	},
 	{
-		"LD_NLATTR",
-		.u.insns = {
-			BPF_STMT(BPF_LDX | BPF_IMM, 2),
-			BPF_STMT(BPF_MISC | BPF_TXA, 0),
-			BPF_STMT(BPF_LDX | BPF_IMM, 3),
-			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
-				 SKF_AD_OFF + SKF_AD_NLATTR),
-			BPF_STMT(BPF_RET | BPF_A, 0)
+		"ALU64_RSH_K: Zero shift",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+			BPF_ALU64_IMM(BPF_RSH, R0, 0),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC,
-#ifdef __BIG_ENDIAN
-		{ 0xff, 0xff, 0, 4, 0, 2, 0, 4, 0, 3 },
-#else
-		{ 0xff, 0xff, 4, 0, 2, 0, 4, 0, 3, 0 },
-#endif
-		{ { 4, 0 }, { 20, 6 } },
+		INTERNAL,
+		{ },
+		{ { 0, 0x89abcdef } }
 	},
+	/* BPF_ALU | BPF_ARSH | BPF_X */
 	{
-		"LD_NLATTR_NEST",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_IMM, 2),
-			BPF_STMT(BPF_LDX | BPF_IMM, 3),
-			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
-				 SKF_AD_OFF + SKF_AD_NLATTR_NEST),
-			BPF_STMT(BPF_LD | BPF_IMM, 2),
-			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
-				 SKF_AD_OFF + SKF_AD_NLATTR_NEST),
-			BPF_STMT(BPF_LD | BPF_IMM, 2),
-			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
-				 SKF_AD_OFF + SKF_AD_NLATTR_NEST),
-			BPF_STMT(BPF_LD | BPF_IMM, 2),
-			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
-				 SKF_AD_OFF + SKF_AD_NLATTR_NEST),
-			BPF_STMT(BPF_LD | BPF_IMM, 2),
-			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
-				 SKF_AD_OFF + SKF_AD_NLATTR_NEST),
-			BPF_STMT(BPF_LD | BPF_IMM, 2),
-			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
-				 SKF_AD_OFF + SKF_AD_NLATTR_NEST),
-			BPF_STMT(BPF_LD | BPF_IMM, 2),
-			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
-				 SKF_AD_OFF + SKF_AD_NLATTR_NEST),
-			BPF_STMT(BPF_LD | BPF_IMM, 2),
-			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
-				 SKF_AD_OFF + SKF_AD_NLATTR_NEST),
-			BPF_STMT(BPF_RET | BPF_A, 0)
+		"ALU32_ARSH_X: -1234 >> 7 = -10",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, -1234),
+			BPF_ALU32_IMM(BPF_MOV, R1, 7),
+			BPF_ALU32_REG(BPF_ARSH, R0, R1),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC,
-#ifdef __BIG_ENDIAN
-		{ 0xff, 0xff, 0, 12, 0, 1, 0, 4, 0, 2, 0, 4, 0, 3 },
-#else
-		{ 0xff, 0xff, 12, 0, 1, 0, 4, 0, 2, 0, 4, 0, 3, 0 },
-#endif
-		{ { 4, 0 }, { 20, 10 } },
+		INTERNAL,
+		{ },
+		{ { 0, -10 } }
 	},
 	{
-		"LD_PAYLOAD_OFF",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
-				 SKF_AD_OFF + SKF_AD_PAY_OFFSET),
-			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
-				 SKF_AD_OFF + SKF_AD_PAY_OFFSET),
-			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
-				 SKF_AD_OFF + SKF_AD_PAY_OFFSET),
-			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
-				 SKF_AD_OFF + SKF_AD_PAY_OFFSET),
-			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
-				 SKF_AD_OFF + SKF_AD_PAY_OFFSET),
-			BPF_STMT(BPF_RET | BPF_A, 0)
+		"ALU64_ARSH_X: 0xff00ff0000000000 >> 40 = 0xffffffffffff00ff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0xff00ff0000000000LL),
+			BPF_ALU32_IMM(BPF_MOV, R1, 40),
+			BPF_ALU64_REG(BPF_ARSH, R0, R1),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC,
-		/* 00:00:00:00:00:00 > 00:00:00:00:00:00, ethtype IPv4 (0x0800),
-		 * length 98: 127.0.0.1 > 127.0.0.1: ICMP echo request,
-		 * id 9737, seq 1, length 64
-		 */
-		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-		  0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-		  0x08, 0x00,
-		  0x45, 0x00, 0x00, 0x54, 0xac, 0x8b, 0x40, 0x00, 0x40,
-		  0x01, 0x90, 0x1b, 0x7f, 0x00, 0x00, 0x01 },
-		{ { 30, 0 }, { 100, 42 } },
+		INTERNAL,
+		{ },
+		{ { 0, 0xffff00ff } },
 	},
 	{
-		"LD_ANC_XOR",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_IMM, 10),
-			BPF_STMT(BPF_LDX | BPF_IMM, 300),
-			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
-				 SKF_AD_OFF + SKF_AD_ALU_XOR_X),
-			BPF_STMT(BPF_RET | BPF_A, 0)
+		"ALU64_ARSH_X: Shift < 32, low word",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+			BPF_ALU32_IMM(BPF_MOV, R1, 12),
+			BPF_ALU64_REG(BPF_ARSH, R0, R1),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC,
+		INTERNAL,
 		{ },
-		{ { 4, 0xA ^ 300 }, { 20, 0xA ^ 300 } },
+		{ { 0, 0x56789abc } }
 	},
 	{
-		"SPILL_FILL",
-		.u.insns = {
-			BPF_STMT(BPF_LDX | BPF_LEN, 0),
-			BPF_STMT(BPF_LD | BPF_IMM, 2),
-			BPF_STMT(BPF_ALU | BPF_RSH, 1),
-			BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
-			BPF_STMT(BPF_ST, 1), /* M1 = 1 ^ len */
-			BPF_STMT(BPF_ALU | BPF_XOR | BPF_K, 0x80000000),
-			BPF_STMT(BPF_ST, 2), /* M2 = 1 ^ len ^ 0x80000000 */
-			BPF_STMT(BPF_STX, 15), /* M3 = len */
-			BPF_STMT(BPF_LDX | BPF_MEM, 1),
-			BPF_STMT(BPF_LD | BPF_MEM, 2),
-			BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
-			BPF_STMT(BPF_LDX | BPF_MEM, 15),
-			BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
-			BPF_STMT(BPF_RET | BPF_A, 0)
+		"ALU64_ARSH_X: Shift < 32, high word",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+			BPF_ALU32_IMM(BPF_MOV, R1, 12),
+			BPF_ALU64_REG(BPF_ARSH, R0, R1),
+			BPF_ALU64_IMM(BPF_RSH, R0, 32),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC,
+		INTERNAL,
 		{ },
-		{ { 1, 0x80000001 }, { 2, 0x80000002 }, { 60, 0x80000000 ^ 60 } }
+		{ { 0, 0xfff81234 } }
 	},
 	{
-		"JEQ",
-		.u.insns = {
-			BPF_STMT(BPF_LDX | BPF_LEN, 0),
-			BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
-			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0, 0, 1),
-			BPF_STMT(BPF_RET | BPF_K, 1),
-			BPF_STMT(BPF_RET | BPF_K, MAX_K)
+		"ALU64_ARSH_X: Shift > 32, low word",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+			BPF_ALU32_IMM(BPF_MOV, R1, 36),
+			BPF_ALU64_REG(BPF_ARSH, R0, R1),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC,
-		{ 3, 3, 3, 3, 3 },
-		{ { 1, 0 }, { 3, 1 }, { 4, MAX_K } },
+		INTERNAL,
+		{ },
+		{ { 0, 0xf8123456 } }
 	},
 	{
-		"JGT",
-		.u.insns = {
-			BPF_STMT(BPF_LDX | BPF_LEN, 0),
-			BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
-			BPF_JUMP(BPF_JMP | BPF_JGT | BPF_X, 0, 0, 1),
-			BPF_STMT(BPF_RET | BPF_K, 1),
-			BPF_STMT(BPF_RET | BPF_K, MAX_K)
+		"ALU64_ARSH_X: Shift > 32, high word",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+			BPF_ALU32_IMM(BPF_MOV, R1, 36),
+			BPF_ALU64_REG(BPF_ARSH, R0, R1),
+			BPF_ALU64_IMM(BPF_RSH, R0, 32),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC,
-		{ 4, 4, 4, 3, 3 },
-		{ { 2, 0 }, { 3, 1 }, { 4, MAX_K } },
+		INTERNAL,
+		{ },
+		{ { 0, -1 } }
 	},
 	{
-		"JGE (jt 0), test 1",
-		.u.insns = {
-			BPF_STMT(BPF_LDX | BPF_LEN, 0),
-			BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
-			BPF_JUMP(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 1),
-			BPF_STMT(BPF_RET | BPF_K, 1),
-			BPF_STMT(BPF_RET | BPF_K, MAX_K)
+		"ALU64_ARSH_X: Shift == 32, low word",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+			BPF_ALU32_IMM(BPF_MOV, R1, 32),
+			BPF_ALU64_REG(BPF_ARSH, R0, R1),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC,
-		{ 4, 4, 4, 3, 3 },
-		{ { 2, 0 }, { 3, 1 }, { 4, 1 } },
+		INTERNAL,
+		{ },
+		{ { 0, 0x81234567 } }
 	},
 	{
-		"JGE (jt 0), test 2",
-		.u.insns = {
-			BPF_STMT(BPF_LDX | BPF_LEN, 0),
-			BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
-			BPF_JUMP(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 1),
-			BPF_STMT(BPF_RET | BPF_K, 1),
-			BPF_STMT(BPF_RET | BPF_K, MAX_K)
+		"ALU64_ARSH_X: Shift == 32, high word",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+			BPF_ALU32_IMM(BPF_MOV, R1, 32),
+			BPF_ALU64_REG(BPF_ARSH, R0, R1),
+			BPF_ALU64_IMM(BPF_RSH, R0, 32),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC,
-		{ 4, 4, 5, 3, 3 },
-		{ { 4, 1 }, { 5, 1 }, { 6, MAX_K } },
+		INTERNAL,
+		{ },
+		{ { 0, -1 } }
 	},
 	{
-		"JGE",
-		.u.insns = {
-			BPF_STMT(BPF_LDX | BPF_LEN, 0),
-			BPF_STMT(BPF_LD | BPF_B | BPF_IND, MAX_K),
-			BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 1, 1, 0),
-			BPF_STMT(BPF_RET | BPF_K, 10),
-			BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 2, 1, 0),
-			BPF_STMT(BPF_RET | BPF_K, 20),
-			BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 3, 1, 0),
-			BPF_STMT(BPF_RET | BPF_K, 30),
-			BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 4, 1, 0),
-			BPF_STMT(BPF_RET | BPF_K, 40),
-			BPF_STMT(BPF_RET | BPF_K, MAX_K)
+		"ALU64_ARSH_X: Zero shift, low word",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+			BPF_ALU32_IMM(BPF_MOV, R1, 0),
+			BPF_ALU64_REG(BPF_ARSH, R0, R1),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC,
-		{ 1, 2, 3, 4, 5 },
-		{ { 1, 20 }, { 3, 40 }, { 5, MAX_K } },
+		INTERNAL,
+		{ },
+		{ { 0, 0x89abcdef } }
 	},
 	{
-		"JSET",
-		.u.insns = {
-			BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
-			BPF_JUMP(BPF_JMP | BPF_JA, 1, 1, 1),
-			BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
-			BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
-			BPF_STMT(BPF_LDX | BPF_LEN, 0),
-			BPF_STMT(BPF_MISC | BPF_TXA, 0),
-			BPF_STMT(BPF_ALU | BPF_SUB | BPF_K, 4),
-			BPF_STMT(BPF_MISC | BPF_TAX, 0),
-			BPF_STMT(BPF_LD | BPF_W | BPF_IND, 0),
-			BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 1, 0, 1),
-			BPF_STMT(BPF_RET | BPF_K, 10),
-			BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x80000000, 0, 1),
-			BPF_STMT(BPF_RET | BPF_K, 20),
-			BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
-			BPF_STMT(BPF_RET | BPF_K, 30),
-			BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
-			BPF_STMT(BPF_RET | BPF_K, 30),
-			BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
-			BPF_STMT(BPF_RET | BPF_K, 30),
-			BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
-			BPF_STMT(BPF_RET | BPF_K, 30),
-			BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
-			BPF_STMT(BPF_RET | BPF_K, 30),
-			BPF_STMT(BPF_RET | BPF_K, MAX_K)
+		"ALU64_ARSH_X: Zero shift, high word",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+			BPF_ALU32_IMM(BPF_MOV, R1, 0),
+			BPF_ALU64_REG(BPF_ARSH, R0, R1),
+			BPF_ALU64_IMM(BPF_RSH, R0, 32),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC,
-		{ 0, 0xAA, 0x55, 1 },
-		{ { 4, 10 }, { 5, 20 }, { 6, MAX_K } },
+		INTERNAL,
+		{ },
+		{ { 0, 0x81234567 } }
 	},
+	/* BPF_ALU | BPF_ARSH | BPF_K */
 	{
-		"tcpdump port 22",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 12),
-			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x86dd, 0, 8), /* IPv6 */
-			BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 20),
-			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x84, 2, 0),
-			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 1, 0),
-			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x11, 0, 17),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 54),
-			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 14, 0),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 56),
-			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 12, 13),
-			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x0800, 0, 12), /* IPv4 */
-			BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 23),
-			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x84, 2, 0),
-			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 1, 0),
-			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x11, 0, 8),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 20),
-			BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x1fff, 6, 0),
-			BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14),
-			BPF_STMT(BPF_LD | BPF_H | BPF_IND, 14),
-			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 2, 0),
-			BPF_STMT(BPF_LD | BPF_H | BPF_IND, 16),
-			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 0, 1),
-			BPF_STMT(BPF_RET | BPF_K, 0xffff),
-			BPF_STMT(BPF_RET | BPF_K, 0),
+		"ALU32_ARSH_K: -1234 >> 7 = -10",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, -1234),
+			BPF_ALU32_IMM(BPF_ARSH, R0, 7),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC,
-		/* 3c:07:54:43:e5:76 > 10:bf:48:d6:43:d6, ethertype IPv4(0x0800)
-		 * length 114: 10.1.1.149.49700 > 10.1.2.10.22: Flags [P.],
-		 * seq 1305692979:1305693027, ack 3650467037, win 65535,
-		 * options [nop,nop,TS val 2502645400 ecr 3971138], length 48
-		 */
-		{ 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
-		  0x3c, 0x07, 0x54, 0x43, 0xe5, 0x76,
-		  0x08, 0x00,
-		  0x45, 0x10, 0x00, 0x64, 0x75, 0xb5,
-		  0x40, 0x00, 0x40, 0x06, 0xad, 0x2e, /* IP header */
-		  0x0a, 0x01, 0x01, 0x95, /* ip src */
-		  0x0a, 0x01, 0x02, 0x0a, /* ip dst */
-		  0xc2, 0x24,
-		  0x00, 0x16 /* dst port */ },
-		{ { 10, 0 }, { 30, 0 }, { 100, 65535 } },
+		INTERNAL,
+		{ },
+		{ { 0, -10 } }
 	},
 	{
-		"tcpdump complex",
-		.u.insns = {
-			/* tcpdump -nei eth0 'tcp port 22 and (((ip[2:2] -
-			 * ((ip[0]&0xf)<<2)) - ((tcp[12]&0xf0)>>2)) != 0) and
-			 * (len > 115 or len < 30000000000)' -d
-			 */
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 12),
-			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x86dd, 30, 0),
-			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x800, 0, 29),
-			BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 23),
-			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 0, 27),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 20),
-			BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x1fff, 25, 0),
-			BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14),
-			BPF_STMT(BPF_LD | BPF_H | BPF_IND, 14),
-			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 2, 0),
-			BPF_STMT(BPF_LD | BPF_H | BPF_IND, 16),
-			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 0, 20),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 16),
-			BPF_STMT(BPF_ST, 1),
-			BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 14),
-			BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf),
-			BPF_STMT(BPF_ALU | BPF_LSH | BPF_K, 2),
-			BPF_STMT(BPF_MISC | BPF_TAX, 0x5), /* libpcap emits K on TAX */
-			BPF_STMT(BPF_LD | BPF_MEM, 1),
-			BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
-			BPF_STMT(BPF_ST, 5),
-			BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14),
-			BPF_STMT(BPF_LD | BPF_B | BPF_IND, 26),
-			BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf0),
-			BPF_STMT(BPF_ALU | BPF_RSH | BPF_K, 2),
-			BPF_STMT(BPF_MISC | BPF_TAX, 0x9), /* libpcap emits K on TAX */
-			BPF_STMT(BPF_LD | BPF_MEM, 5),
-			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0, 4, 0),
-			BPF_STMT(BPF_LD | BPF_LEN, 0),
-			BPF_JUMP(BPF_JMP | BPF_JGT | BPF_K, 0x73, 1, 0),
-			BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 0xfc23ac00, 1, 0),
-			BPF_STMT(BPF_RET | BPF_K, 0xffff),
-			BPF_STMT(BPF_RET | BPF_K, 0),
+		"ALU32_ARSH_K: -1234 >> 0 = -1234",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, -1234),
+			BPF_ALU32_IMM(BPF_ARSH, R0, 0),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC,
-		{ 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
-		  0x3c, 0x07, 0x54, 0x43, 0xe5, 0x76,
-		  0x08, 0x00,
-		  0x45, 0x10, 0x00, 0x64, 0x75, 0xb5,
-		  0x40, 0x00, 0x40, 0x06, 0xad, 0x2e, /* IP header */
-		  0x0a, 0x01, 0x01, 0x95, /* ip src */
-		  0x0a, 0x01, 0x02, 0x0a, /* ip dst */
-		  0xc2, 0x24,
-		  0x00, 0x16 /* dst port */ },
-		{ { 10, 0 }, { 30, 0 }, { 100, 65535 } },
+		INTERNAL,
+		{ },
+		{ { 0, -1234 } }
 	},
 	{
-		"RET_A",
-		.u.insns = {
-			/* check that uninitialized X and A contain zeros */
-			BPF_STMT(BPF_MISC | BPF_TXA, 0),
-			BPF_STMT(BPF_RET | BPF_A, 0)
+		"ALU64_ARSH_K: 0xff00ff0000000000 >> 40 = 0xffffffffffff00ff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0xff00ff0000000000LL),
+			BPF_ALU64_IMM(BPF_ARSH, R0, 40),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC,
+		INTERNAL,
 		{ },
-		{ {1, 0}, {2, 0} },
+		{ { 0, 0xffff00ff } },
 	},
 	{
-		"INT: ADD trivial",
+		"ALU64_ARSH_K: Shift < 32, low word",
 		.u.insns_int = {
-			BPF_ALU64_IMM(BPF_MOV, R1, 1),
-			BPF_ALU64_IMM(BPF_ADD, R1, 2),
-			BPF_ALU64_IMM(BPF_MOV, R2, 3),
-			BPF_ALU64_REG(BPF_SUB, R1, R2),
-			BPF_ALU64_IMM(BPF_ADD, R1, -1),
-			BPF_ALU64_IMM(BPF_MUL, R1, 3),
-			BPF_ALU64_REG(BPF_MOV, R0, R1),
+			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+			BPF_ALU64_IMM(BPF_RSH, R0, 12),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0xfffffffd } }
+		{ { 0, 0x56789abc } }
 	},
 	{
-		"INT: MUL_X",
+		"ALU64_ARSH_K: Shift < 32, high word",
 		.u.insns_int = {
-			BPF_ALU64_IMM(BPF_MOV, R0, -1),
-			BPF_ALU64_IMM(BPF_MOV, R1, -1),
-			BPF_ALU64_IMM(BPF_MOV, R2, 3),
-			BPF_ALU64_REG(BPF_MUL, R1, R2),
-			BPF_JMP_IMM(BPF_JEQ, R1, 0xfffffffd, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU64_IMM(BPF_MOV, R0, 1),
+			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+			BPF_ALU64_IMM(BPF_ARSH, R0, 12),
+			BPF_ALU64_IMM(BPF_RSH, R0, 32),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 1 } }
+		{ { 0, 0xfff81234 } }
 	},
 	{
-		"INT: MUL_X2",
+		"ALU64_ARSH_K: Shift > 32, low word",
 		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, -1),
-			BPF_ALU32_IMM(BPF_MOV, R1, -1),
-			BPF_ALU32_IMM(BPF_MOV, R2, 3),
-			BPF_ALU64_REG(BPF_MUL, R1, R2),
-			BPF_ALU64_IMM(BPF_RSH, R1, 8),
-			BPF_JMP_IMM(BPF_JEQ, R1, 0x2ffffff, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+			BPF_ALU64_IMM(BPF_ARSH, R0, 36),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 1 } }
+		{ { 0, 0xf8123456 } }
 	},
 	{
-		"INT: MUL32_X",
+		"ALU64_ARSH_K: Shift > 32, high word",
 		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, -1),
-			BPF_ALU64_IMM(BPF_MOV, R1, -1),
-			BPF_ALU32_IMM(BPF_MOV, R2, 3),
-			BPF_ALU32_REG(BPF_MUL, R1, R2),
-			BPF_ALU64_IMM(BPF_RSH, R1, 8),
-			BPF_JMP_IMM(BPF_JEQ, R1, 0xffffff, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_LD_IMM64(R0, 0xf123456789abcdefLL),
+			BPF_ALU64_IMM(BPF_ARSH, R0, 36),
+			BPF_ALU64_IMM(BPF_RSH, R0, 32),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 1 } }
+		{ { 0, -1 } }
 	},
 	{
-		/* Have to test all register combinations, since
-		 * JITing of different registers will produce
-		 * different asm code.
-		 */
-		"INT: ADD 64-bit",
+		"ALU64_ARSH_K: Shift == 32, low word",
 		.u.insns_int = {
-			BPF_ALU64_IMM(BPF_MOV, R0, 0),
-			BPF_ALU64_IMM(BPF_MOV, R1, 1),
-			BPF_ALU64_IMM(BPF_MOV, R2, 2),
-			BPF_ALU64_IMM(BPF_MOV, R3, 3),
-			BPF_ALU64_IMM(BPF_MOV, R4, 4),
-			BPF_ALU64_IMM(BPF_MOV, R5, 5),
-			BPF_ALU64_IMM(BPF_MOV, R6, 6),
-			BPF_ALU64_IMM(BPF_MOV, R7, 7),
-			BPF_ALU64_IMM(BPF_MOV, R8, 8),
-			BPF_ALU64_IMM(BPF_MOV, R9, 9),
-			BPF_ALU64_IMM(BPF_ADD, R0, 20),
-			BPF_ALU64_IMM(BPF_ADD, R1, 20),
-			BPF_ALU64_IMM(BPF_ADD, R2, 20),
-			BPF_ALU64_IMM(BPF_ADD, R3, 20),
-			BPF_ALU64_IMM(BPF_ADD, R4, 20),
-			BPF_ALU64_IMM(BPF_ADD, R5, 20),
-			BPF_ALU64_IMM(BPF_ADD, R6, 20),
-			BPF_ALU64_IMM(BPF_ADD, R7, 20),
-			BPF_ALU64_IMM(BPF_ADD, R8, 20),
-			BPF_ALU64_IMM(BPF_ADD, R9, 20),
-			BPF_ALU64_IMM(BPF_SUB, R0, 10),
-			BPF_ALU64_IMM(BPF_SUB, R1, 10),
-			BPF_ALU64_IMM(BPF_SUB, R2, 10),
-			BPF_ALU64_IMM(BPF_SUB, R3, 10),
-			BPF_ALU64_IMM(BPF_SUB, R4, 10),
-			BPF_ALU64_IMM(BPF_SUB, R5, 10),
-			BPF_ALU64_IMM(BPF_SUB, R6, 10),
-			BPF_ALU64_IMM(BPF_SUB, R7, 10),
-			BPF_ALU64_IMM(BPF_SUB, R8, 10),
-			BPF_ALU64_IMM(BPF_SUB, R9, 10),
-			BPF_ALU64_REG(BPF_ADD, R0, R0),
-			BPF_ALU64_REG(BPF_ADD, R0, R1),
-			BPF_ALU64_REG(BPF_ADD, R0, R2),
-			BPF_ALU64_REG(BPF_ADD, R0, R3),
-			BPF_ALU64_REG(BPF_ADD, R0, R4),
-			BPF_ALU64_REG(BPF_ADD, R0, R5),
-			BPF_ALU64_REG(BPF_ADD, R0, R6),
-			BPF_ALU64_REG(BPF_ADD, R0, R7),
-			BPF_ALU64_REG(BPF_ADD, R0, R8),
-			BPF_ALU64_REG(BPF_ADD, R0, R9), /* R0 == 155 */
-			BPF_JMP_IMM(BPF_JEQ, R0, 155, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU64_REG(BPF_ADD, R1, R0),
-			BPF_ALU64_REG(BPF_ADD, R1, R1),
-			BPF_ALU64_REG(BPF_ADD, R1, R2),
-			BPF_ALU64_REG(BPF_ADD, R1, R3),
-			BPF_ALU64_REG(BPF_ADD, R1, R4),
-			BPF_ALU64_REG(BPF_ADD, R1, R5),
-			BPF_ALU64_REG(BPF_ADD, R1, R6),
-			BPF_ALU64_REG(BPF_ADD, R1, R7),
-			BPF_ALU64_REG(BPF_ADD, R1, R8),
-			BPF_ALU64_REG(BPF_ADD, R1, R9), /* R1 == 456 */
-			BPF_JMP_IMM(BPF_JEQ, R1, 456, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU64_REG(BPF_ADD, R2, R0),
-			BPF_ALU64_REG(BPF_ADD, R2, R1),
-			BPF_ALU64_REG(BPF_ADD, R2, R2),
-			BPF_ALU64_REG(BPF_ADD, R2, R3),
-			BPF_ALU64_REG(BPF_ADD, R2, R4),
-			BPF_ALU64_REG(BPF_ADD, R2, R5),
-			BPF_ALU64_REG(BPF_ADD, R2, R6),
-			BPF_ALU64_REG(BPF_ADD, R2, R7),
-			BPF_ALU64_REG(BPF_ADD, R2, R8),
-			BPF_ALU64_REG(BPF_ADD, R2, R9), /* R2 == 1358 */
-			BPF_JMP_IMM(BPF_JEQ, R2, 1358, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU64_REG(BPF_ADD, R3, R0),
-			BPF_ALU64_REG(BPF_ADD, R3, R1),
-			BPF_ALU64_REG(BPF_ADD, R3, R2),
-			BPF_ALU64_REG(BPF_ADD, R3, R3),
-			BPF_ALU64_REG(BPF_ADD, R3, R4),
-			BPF_ALU64_REG(BPF_ADD, R3, R5),
-			BPF_ALU64_REG(BPF_ADD, R3, R6),
-			BPF_ALU64_REG(BPF_ADD, R3, R7),
-			BPF_ALU64_REG(BPF_ADD, R3, R8),
-			BPF_ALU64_REG(BPF_ADD, R3, R9), /* R3 == 4063 */
-			BPF_JMP_IMM(BPF_JEQ, R3, 4063, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU64_REG(BPF_ADD, R4, R0),
-			BPF_ALU64_REG(BPF_ADD, R4, R1),
-			BPF_ALU64_REG(BPF_ADD, R4, R2),
-			BPF_ALU64_REG(BPF_ADD, R4, R3),
-			BPF_ALU64_REG(BPF_ADD, R4, R4),
-			BPF_ALU64_REG(BPF_ADD, R4, R5),
-			BPF_ALU64_REG(BPF_ADD, R4, R6),
-			BPF_ALU64_REG(BPF_ADD, R4, R7),
-			BPF_ALU64_REG(BPF_ADD, R4, R8),
-			BPF_ALU64_REG(BPF_ADD, R4, R9), /* R4 == 12177 */
-			BPF_JMP_IMM(BPF_JEQ, R4, 12177, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU64_REG(BPF_ADD, R5, R0),
-			BPF_ALU64_REG(BPF_ADD, R5, R1),
-			BPF_ALU64_REG(BPF_ADD, R5, R2),
-			BPF_ALU64_REG(BPF_ADD, R5, R3),
-			BPF_ALU64_REG(BPF_ADD, R5, R4),
-			BPF_ALU64_REG(BPF_ADD, R5, R5),
-			BPF_ALU64_REG(BPF_ADD, R5, R6),
-			BPF_ALU64_REG(BPF_ADD, R5, R7),
-			BPF_ALU64_REG(BPF_ADD, R5, R8),
-			BPF_ALU64_REG(BPF_ADD, R5, R9), /* R5 == 36518 */
-			BPF_JMP_IMM(BPF_JEQ, R5, 36518, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU64_REG(BPF_ADD, R6, R0),
-			BPF_ALU64_REG(BPF_ADD, R6, R1),
-			BPF_ALU64_REG(BPF_ADD, R6, R2),
-			BPF_ALU64_REG(BPF_ADD, R6, R3),
-			BPF_ALU64_REG(BPF_ADD, R6, R4),
-			BPF_ALU64_REG(BPF_ADD, R6, R5),
-			BPF_ALU64_REG(BPF_ADD, R6, R6),
-			BPF_ALU64_REG(BPF_ADD, R6, R7),
-			BPF_ALU64_REG(BPF_ADD, R6, R8),
-			BPF_ALU64_REG(BPF_ADD, R6, R9), /* R6 == 109540 */
-			BPF_JMP_IMM(BPF_JEQ, R6, 109540, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU64_REG(BPF_ADD, R7, R0),
-			BPF_ALU64_REG(BPF_ADD, R7, R1),
-			BPF_ALU64_REG(BPF_ADD, R7, R2),
-			BPF_ALU64_REG(BPF_ADD, R7, R3),
-			BPF_ALU64_REG(BPF_ADD, R7, R4),
-			BPF_ALU64_REG(BPF_ADD, R7, R5),
-			BPF_ALU64_REG(BPF_ADD, R7, R6),
-			BPF_ALU64_REG(BPF_ADD, R7, R7),
-			BPF_ALU64_REG(BPF_ADD, R7, R8),
-			BPF_ALU64_REG(BPF_ADD, R7, R9), /* R7 == 328605 */
-			BPF_JMP_IMM(BPF_JEQ, R7, 328605, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU64_REG(BPF_ADD, R8, R0),
-			BPF_ALU64_REG(BPF_ADD, R8, R1),
-			BPF_ALU64_REG(BPF_ADD, R8, R2),
-			BPF_ALU64_REG(BPF_ADD, R8, R3),
-			BPF_ALU64_REG(BPF_ADD, R8, R4),
-			BPF_ALU64_REG(BPF_ADD, R8, R5),
-			BPF_ALU64_REG(BPF_ADD, R8, R6),
-			BPF_ALU64_REG(BPF_ADD, R8, R7),
-			BPF_ALU64_REG(BPF_ADD, R8, R8),
-			BPF_ALU64_REG(BPF_ADD, R8, R9), /* R8 == 985799 */
-			BPF_JMP_IMM(BPF_JEQ, R8, 985799, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU64_REG(BPF_ADD, R9, R0),
-			BPF_ALU64_REG(BPF_ADD, R9, R1),
-			BPF_ALU64_REG(BPF_ADD, R9, R2),
-			BPF_ALU64_REG(BPF_ADD, R9, R3),
-			BPF_ALU64_REG(BPF_ADD, R9, R4),
-			BPF_ALU64_REG(BPF_ADD, R9, R5),
-			BPF_ALU64_REG(BPF_ADD, R9, R6),
-			BPF_ALU64_REG(BPF_ADD, R9, R7),
-			BPF_ALU64_REG(BPF_ADD, R9, R8),
-			BPF_ALU64_REG(BPF_ADD, R9, R9), /* R9 == 2957380 */
-			BPF_ALU64_REG(BPF_MOV, R0, R9),
+			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+			BPF_ALU64_IMM(BPF_ARSH, R0, 32),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 2957380 } }
+		{ { 0, 0x81234567 } }
 	},
 	{
-		"INT: ADD 32-bit",
+		"ALU64_ARSH_K: Shift == 32, high word",
 		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 20),
-			BPF_ALU32_IMM(BPF_MOV, R1, 1),
-			BPF_ALU32_IMM(BPF_MOV, R2, 2),
-			BPF_ALU32_IMM(BPF_MOV, R3, 3),
-			BPF_ALU32_IMM(BPF_MOV, R4, 4),
-			BPF_ALU32_IMM(BPF_MOV, R5, 5),
-			BPF_ALU32_IMM(BPF_MOV, R6, 6),
-			BPF_ALU32_IMM(BPF_MOV, R7, 7),
-			BPF_ALU32_IMM(BPF_MOV, R8, 8),
-			BPF_ALU32_IMM(BPF_MOV, R9, 9),
-			BPF_ALU64_IMM(BPF_ADD, R1, 10),
-			BPF_ALU64_IMM(BPF_ADD, R2, 10),
-			BPF_ALU64_IMM(BPF_ADD, R3, 10),
-			BPF_ALU64_IMM(BPF_ADD, R4, 10),
-			BPF_ALU64_IMM(BPF_ADD, R5, 10),
-			BPF_ALU64_IMM(BPF_ADD, R6, 10),
-			BPF_ALU64_IMM(BPF_ADD, R7, 10),
-			BPF_ALU64_IMM(BPF_ADD, R8, 10),
-			BPF_ALU64_IMM(BPF_ADD, R9, 10),
-			BPF_ALU32_REG(BPF_ADD, R0, R1),
-			BPF_ALU32_REG(BPF_ADD, R0, R2),
-			BPF_ALU32_REG(BPF_ADD, R0, R3),
-			BPF_ALU32_REG(BPF_ADD, R0, R4),
-			BPF_ALU32_REG(BPF_ADD, R0, R5),
-			BPF_ALU32_REG(BPF_ADD, R0, R6),
-			BPF_ALU32_REG(BPF_ADD, R0, R7),
-			BPF_ALU32_REG(BPF_ADD, R0, R8),
-			BPF_ALU32_REG(BPF_ADD, R0, R9), /* R0 == 155 */
-			BPF_JMP_IMM(BPF_JEQ, R0, 155, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_REG(BPF_ADD, R1, R0),
-			BPF_ALU32_REG(BPF_ADD, R1, R1),
-			BPF_ALU32_REG(BPF_ADD, R1, R2),
-			BPF_ALU32_REG(BPF_ADD, R1, R3),
-			BPF_ALU32_REG(BPF_ADD, R1, R4),
-			BPF_ALU32_REG(BPF_ADD, R1, R5),
-			BPF_ALU32_REG(BPF_ADD, R1, R6),
-			BPF_ALU32_REG(BPF_ADD, R1, R7),
-			BPF_ALU32_REG(BPF_ADD, R1, R8),
-			BPF_ALU32_REG(BPF_ADD, R1, R9), /* R1 == 456 */
-			BPF_JMP_IMM(BPF_JEQ, R1, 456, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_REG(BPF_ADD, R2, R0),
-			BPF_ALU32_REG(BPF_ADD, R2, R1),
-			BPF_ALU32_REG(BPF_ADD, R2, R2),
-			BPF_ALU32_REG(BPF_ADD, R2, R3),
-			BPF_ALU32_REG(BPF_ADD, R2, R4),
-			BPF_ALU32_REG(BPF_ADD, R2, R5),
-			BPF_ALU32_REG(BPF_ADD, R2, R6),
-			BPF_ALU32_REG(BPF_ADD, R2, R7),
-			BPF_ALU32_REG(BPF_ADD, R2, R8),
-			BPF_ALU32_REG(BPF_ADD, R2, R9), /* R2 == 1358 */
-			BPF_JMP_IMM(BPF_JEQ, R2, 1358, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_REG(BPF_ADD, R3, R0),
-			BPF_ALU32_REG(BPF_ADD, R3, R1),
-			BPF_ALU32_REG(BPF_ADD, R3, R2),
-			BPF_ALU32_REG(BPF_ADD, R3, R3),
-			BPF_ALU32_REG(BPF_ADD, R3, R4),
-			BPF_ALU32_REG(BPF_ADD, R3, R5),
-			BPF_ALU32_REG(BPF_ADD, R3, R6),
-			BPF_ALU32_REG(BPF_ADD, R3, R7),
-			BPF_ALU32_REG(BPF_ADD, R3, R8),
-			BPF_ALU32_REG(BPF_ADD, R3, R9), /* R3 == 4063 */
-			BPF_JMP_IMM(BPF_JEQ, R3, 4063, 1),
+			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+			BPF_ALU64_IMM(BPF_ARSH, R0, 32),
+			BPF_ALU64_IMM(BPF_RSH, R0, 32),
 			BPF_EXIT_INSN(),
-			BPF_ALU32_REG(BPF_ADD, R4, R0),
-			BPF_ALU32_REG(BPF_ADD, R4, R1),
-			BPF_ALU32_REG(BPF_ADD, R4, R2),
-			BPF_ALU32_REG(BPF_ADD, R4, R3),
-			BPF_ALU32_REG(BPF_ADD, R4, R4),
-			BPF_ALU32_REG(BPF_ADD, R4, R5),
-			BPF_ALU32_REG(BPF_ADD, R4, R6),
-			BPF_ALU32_REG(BPF_ADD, R4, R7),
-			BPF_ALU32_REG(BPF_ADD, R4, R8),
-			BPF_ALU32_REG(BPF_ADD, R4, R9), /* R4 == 12177 */
-			BPF_JMP_IMM(BPF_JEQ, R4, 12177, 1),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, -1 } }
+	},
+	{
+		"ALU64_ARSH_K: Zero shift",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+			BPF_ALU64_IMM(BPF_ARSH, R0, 0),
 			BPF_EXIT_INSN(),
-			BPF_ALU32_REG(BPF_ADD, R5, R0),
-			BPF_ALU32_REG(BPF_ADD, R5, R1),
-			BPF_ALU32_REG(BPF_ADD, R5, R2),
-			BPF_ALU32_REG(BPF_ADD, R5, R3),
-			BPF_ALU32_REG(BPF_ADD, R5, R4),
-			BPF_ALU32_REG(BPF_ADD, R5, R5),
-			BPF_ALU32_REG(BPF_ADD, R5, R6),
-			BPF_ALU32_REG(BPF_ADD, R5, R7),
-			BPF_ALU32_REG(BPF_ADD, R5, R8),
-			BPF_ALU32_REG(BPF_ADD, R5, R9), /* R5 == 36518 */
-			BPF_JMP_IMM(BPF_JEQ, R5, 36518, 1),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x89abcdef } }
+	},
+	/* BPF_ALU | BPF_NEG */
+	{
+		"ALU_NEG: -(3) = -3",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 3),
+			BPF_ALU32_IMM(BPF_NEG, R0, 0),
 			BPF_EXIT_INSN(),
-			BPF_ALU32_REG(BPF_ADD, R6, R0),
-			BPF_ALU32_REG(BPF_ADD, R6, R1),
-			BPF_ALU32_REG(BPF_ADD, R6, R2),
-			BPF_ALU32_REG(BPF_ADD, R6, R3),
-			BPF_ALU32_REG(BPF_ADD, R6, R4),
-			BPF_ALU32_REG(BPF_ADD, R6, R5),
-			BPF_ALU32_REG(BPF_ADD, R6, R6),
-			BPF_ALU32_REG(BPF_ADD, R6, R7),
-			BPF_ALU32_REG(BPF_ADD, R6, R8),
-			BPF_ALU32_REG(BPF_ADD, R6, R9), /* R6 == 109540 */
-			BPF_JMP_IMM(BPF_JEQ, R6, 109540, 1),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, -3 } },
+	},
+	{
+		"ALU_NEG: -(-3) = 3",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, -3),
+			BPF_ALU32_IMM(BPF_NEG, R0, 0),
 			BPF_EXIT_INSN(),
-			BPF_ALU32_REG(BPF_ADD, R7, R0),
-			BPF_ALU32_REG(BPF_ADD, R7, R1),
-			BPF_ALU32_REG(BPF_ADD, R7, R2),
-			BPF_ALU32_REG(BPF_ADD, R7, R3),
-			BPF_ALU32_REG(BPF_ADD, R7, R4),
-			BPF_ALU32_REG(BPF_ADD, R7, R5),
-			BPF_ALU32_REG(BPF_ADD, R7, R6),
-			BPF_ALU32_REG(BPF_ADD, R7, R7),
-			BPF_ALU32_REG(BPF_ADD, R7, R8),
-			BPF_ALU32_REG(BPF_ADD, R7, R9), /* R7 == 328605 */
-			BPF_JMP_IMM(BPF_JEQ, R7, 328605, 1),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	{
+		"ALU64_NEG: -(3) = -3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 3),
+			BPF_ALU64_IMM(BPF_NEG, R0, 0),
 			BPF_EXIT_INSN(),
-			BPF_ALU32_REG(BPF_ADD, R8, R0),
-			BPF_ALU32_REG(BPF_ADD, R8, R1),
-			BPF_ALU32_REG(BPF_ADD, R8, R2),
-			BPF_ALU32_REG(BPF_ADD, R8, R3),
-			BPF_ALU32_REG(BPF_ADD, R8, R4),
-			BPF_ALU32_REG(BPF_ADD, R8, R5),
-			BPF_ALU32_REG(BPF_ADD, R8, R6),
-			BPF_ALU32_REG(BPF_ADD, R8, R7),
-			BPF_ALU32_REG(BPF_ADD, R8, R8),
-			BPF_ALU32_REG(BPF_ADD, R8, R9), /* R8 == 985799 */
-			BPF_JMP_IMM(BPF_JEQ, R8, 985799, 1),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, -3 } },
+	},
+	{
+		"ALU64_NEG: -(-3) = 3",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, -3),
+			BPF_ALU64_IMM(BPF_NEG, R0, 0),
 			BPF_EXIT_INSN(),
-			BPF_ALU32_REG(BPF_ADD, R9, R0),
-			BPF_ALU32_REG(BPF_ADD, R9, R1),
-			BPF_ALU32_REG(BPF_ADD, R9, R2),
-			BPF_ALU32_REG(BPF_ADD, R9, R3),
-			BPF_ALU32_REG(BPF_ADD, R9, R4),
-			BPF_ALU32_REG(BPF_ADD, R9, R5),
-			BPF_ALU32_REG(BPF_ADD, R9, R6),
-			BPF_ALU32_REG(BPF_ADD, R9, R7),
-			BPF_ALU32_REG(BPF_ADD, R9, R8),
-			BPF_ALU32_REG(BPF_ADD, R9, R9), /* R9 == 2957380 */
-			BPF_ALU32_REG(BPF_MOV, R0, R9),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 3 } },
+	},
+	/* BPF_ALU | BPF_END | BPF_FROM_BE */
+	{
+		"ALU_END_FROM_BE 16: 0x0123456789abcdef -> 0xcdef",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+			BPF_ENDIAN(BPF_FROM_BE, R0, 16),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 2957380 } }
+		{ { 0,  cpu_to_be16(0xcdef) } },
 	},
-	{	/* Mainly checking JIT here. */
-		"INT: SUB",
+	{
+		"ALU_END_FROM_BE 32: 0x0123456789abcdef -> 0x89abcdef",
 		.u.insns_int = {
-			BPF_ALU64_IMM(BPF_MOV, R0, 0),
-			BPF_ALU64_IMM(BPF_MOV, R1, 1),
-			BPF_ALU64_IMM(BPF_MOV, R2, 2),
-			BPF_ALU64_IMM(BPF_MOV, R3, 3),
-			BPF_ALU64_IMM(BPF_MOV, R4, 4),
-			BPF_ALU64_IMM(BPF_MOV, R5, 5),
-			BPF_ALU64_IMM(BPF_MOV, R6, 6),
-			BPF_ALU64_IMM(BPF_MOV, R7, 7),
-			BPF_ALU64_IMM(BPF_MOV, R8, 8),
-			BPF_ALU64_IMM(BPF_MOV, R9, 9),
-			BPF_ALU64_REG(BPF_SUB, R0, R0),
-			BPF_ALU64_REG(BPF_SUB, R0, R1),
-			BPF_ALU64_REG(BPF_SUB, R0, R2),
-			BPF_ALU64_REG(BPF_SUB, R0, R3),
-			BPF_ALU64_REG(BPF_SUB, R0, R4),
-			BPF_ALU64_REG(BPF_SUB, R0, R5),
-			BPF_ALU64_REG(BPF_SUB, R0, R6),
-			BPF_ALU64_REG(BPF_SUB, R0, R7),
-			BPF_ALU64_REG(BPF_SUB, R0, R8),
-			BPF_ALU64_REG(BPF_SUB, R0, R9),
-			BPF_ALU64_IMM(BPF_SUB, R0, 10),
-			BPF_JMP_IMM(BPF_JEQ, R0, -55, 1),
+			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+			BPF_ENDIAN(BPF_FROM_BE, R0, 32),
+			BPF_ALU64_REG(BPF_MOV, R1, R0),
+			BPF_ALU64_IMM(BPF_RSH, R1, 32),
+			BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */
 			BPF_EXIT_INSN(),
-			BPF_ALU64_REG(BPF_SUB, R1, R0),
-			BPF_ALU64_REG(BPF_SUB, R1, R2),
-			BPF_ALU64_REG(BPF_SUB, R1, R3),
-			BPF_ALU64_REG(BPF_SUB, R1, R4),
-			BPF_ALU64_REG(BPF_SUB, R1, R5),
-			BPF_ALU64_REG(BPF_SUB, R1, R6),
-			BPF_ALU64_REG(BPF_SUB, R1, R7),
-			BPF_ALU64_REG(BPF_SUB, R1, R8),
-			BPF_ALU64_REG(BPF_SUB, R1, R9),
-			BPF_ALU64_IMM(BPF_SUB, R1, 10),
-			BPF_ALU64_REG(BPF_SUB, R2, R0),
-			BPF_ALU64_REG(BPF_SUB, R2, R1),
-			BPF_ALU64_REG(BPF_SUB, R2, R3),
-			BPF_ALU64_REG(BPF_SUB, R2, R4),
-			BPF_ALU64_REG(BPF_SUB, R2, R5),
-			BPF_ALU64_REG(BPF_SUB, R2, R6),
-			BPF_ALU64_REG(BPF_SUB, R2, R7),
-			BPF_ALU64_REG(BPF_SUB, R2, R8),
-			BPF_ALU64_REG(BPF_SUB, R2, R9),
-			BPF_ALU64_IMM(BPF_SUB, R2, 10),
-			BPF_ALU64_REG(BPF_SUB, R3, R0),
-			BPF_ALU64_REG(BPF_SUB, R3, R1),
-			BPF_ALU64_REG(BPF_SUB, R3, R2),
-			BPF_ALU64_REG(BPF_SUB, R3, R4),
-			BPF_ALU64_REG(BPF_SUB, R3, R5),
-			BPF_ALU64_REG(BPF_SUB, R3, R6),
-			BPF_ALU64_REG(BPF_SUB, R3, R7),
-			BPF_ALU64_REG(BPF_SUB, R3, R8),
-			BPF_ALU64_REG(BPF_SUB, R3, R9),
-			BPF_ALU64_IMM(BPF_SUB, R3, 10),
-			BPF_ALU64_REG(BPF_SUB, R4, R0),
-			BPF_ALU64_REG(BPF_SUB, R4, R1),
-			BPF_ALU64_REG(BPF_SUB, R4, R2),
-			BPF_ALU64_REG(BPF_SUB, R4, R3),
-			BPF_ALU64_REG(BPF_SUB, R4, R5),
-			BPF_ALU64_REG(BPF_SUB, R4, R6),
-			BPF_ALU64_REG(BPF_SUB, R4, R7),
-			BPF_ALU64_REG(BPF_SUB, R4, R8),
-			BPF_ALU64_REG(BPF_SUB, R4, R9),
-			BPF_ALU64_IMM(BPF_SUB, R4, 10),
-			BPF_ALU64_REG(BPF_SUB, R5, R0),
-			BPF_ALU64_REG(BPF_SUB, R5, R1),
-			BPF_ALU64_REG(BPF_SUB, R5, R2),
-			BPF_ALU64_REG(BPF_SUB, R5, R3),
-			BPF_ALU64_REG(BPF_SUB, R5, R4),
-			BPF_ALU64_REG(BPF_SUB, R5, R6),
-			BPF_ALU64_REG(BPF_SUB, R5, R7),
-			BPF_ALU64_REG(BPF_SUB, R5, R8),
-			BPF_ALU64_REG(BPF_SUB, R5, R9),
-			BPF_ALU64_IMM(BPF_SUB, R5, 10),
-			BPF_ALU64_REG(BPF_SUB, R6, R0),
-			BPF_ALU64_REG(BPF_SUB, R6, R1),
-			BPF_ALU64_REG(BPF_SUB, R6, R2),
-			BPF_ALU64_REG(BPF_SUB, R6, R3),
-			BPF_ALU64_REG(BPF_SUB, R6, R4),
-			BPF_ALU64_REG(BPF_SUB, R6, R5),
-			BPF_ALU64_REG(BPF_SUB, R6, R7),
-			BPF_ALU64_REG(BPF_SUB, R6, R8),
-			BPF_ALU64_REG(BPF_SUB, R6, R9),
-			BPF_ALU64_IMM(BPF_SUB, R6, 10),
-			BPF_ALU64_REG(BPF_SUB, R7, R0),
-			BPF_ALU64_REG(BPF_SUB, R7, R1),
-			BPF_ALU64_REG(BPF_SUB, R7, R2),
-			BPF_ALU64_REG(BPF_SUB, R7, R3),
-			BPF_ALU64_REG(BPF_SUB, R7, R4),
-			BPF_ALU64_REG(BPF_SUB, R7, R5),
-			BPF_ALU64_REG(BPF_SUB, R7, R6),
-			BPF_ALU64_REG(BPF_SUB, R7, R8),
-			BPF_ALU64_REG(BPF_SUB, R7, R9),
-			BPF_ALU64_IMM(BPF_SUB, R7, 10),
-			BPF_ALU64_REG(BPF_SUB, R8, R0),
-			BPF_ALU64_REG(BPF_SUB, R8, R1),
-			BPF_ALU64_REG(BPF_SUB, R8, R2),
-			BPF_ALU64_REG(BPF_SUB, R8, R3),
-			BPF_ALU64_REG(BPF_SUB, R8, R4),
-			BPF_ALU64_REG(BPF_SUB, R8, R5),
-			BPF_ALU64_REG(BPF_SUB, R8, R6),
-			BPF_ALU64_REG(BPF_SUB, R8, R7),
-			BPF_ALU64_REG(BPF_SUB, R8, R9),
-			BPF_ALU64_IMM(BPF_SUB, R8, 10),
-			BPF_ALU64_REG(BPF_SUB, R9, R0),
-			BPF_ALU64_REG(BPF_SUB, R9, R1),
-			BPF_ALU64_REG(BPF_SUB, R9, R2),
-			BPF_ALU64_REG(BPF_SUB, R9, R3),
-			BPF_ALU64_REG(BPF_SUB, R9, R4),
-			BPF_ALU64_REG(BPF_SUB, R9, R5),
-			BPF_ALU64_REG(BPF_SUB, R9, R6),
-			BPF_ALU64_REG(BPF_SUB, R9, R7),
-			BPF_ALU64_REG(BPF_SUB, R9, R8),
-			BPF_ALU64_IMM(BPF_SUB, R9, 10),
-			BPF_ALU64_IMM(BPF_SUB, R0, 10),
-			BPF_ALU64_IMM(BPF_NEG, R0, 0),
-			BPF_ALU64_REG(BPF_SUB, R0, R1),
-			BPF_ALU64_REG(BPF_SUB, R0, R2),
-			BPF_ALU64_REG(BPF_SUB, R0, R3),
-			BPF_ALU64_REG(BPF_SUB, R0, R4),
-			BPF_ALU64_REG(BPF_SUB, R0, R5),
-			BPF_ALU64_REG(BPF_SUB, R0, R6),
-			BPF_ALU64_REG(BPF_SUB, R0, R7),
-			BPF_ALU64_REG(BPF_SUB, R0, R8),
-			BPF_ALU64_REG(BPF_SUB, R0, R9),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, cpu_to_be32(0x89abcdef) } },
+	},
+	{
+		"ALU_END_FROM_BE 64: 0x0123456789abcdef -> 0x89abcdef",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+			BPF_ENDIAN(BPF_FROM_BE, R0, 64),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 11 } }
+		{ { 0, (u32) cpu_to_be64(0x0123456789abcdefLL) } },
 	},
-	{	/* Mainly checking JIT here. */
-		"INT: XOR",
+	{
+		"ALU_END_FROM_BE 64: 0x0123456789abcdef >> 32 -> 0x01234567",
 		.u.insns_int = {
-			BPF_ALU64_REG(BPF_SUB, R0, R0),
-			BPF_ALU64_REG(BPF_XOR, R1, R1),
-			BPF_JMP_REG(BPF_JEQ, R0, R1, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU64_IMM(BPF_MOV, R0, 10),
-			BPF_ALU64_IMM(BPF_MOV, R1, -1),
-			BPF_ALU64_REG(BPF_SUB, R1, R1),
-			BPF_ALU64_REG(BPF_XOR, R2, R2),
-			BPF_JMP_REG(BPF_JEQ, R1, R2, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU64_REG(BPF_SUB, R2, R2),
-			BPF_ALU64_REG(BPF_XOR, R3, R3),
-			BPF_ALU64_IMM(BPF_MOV, R0, 10),
-			BPF_ALU64_IMM(BPF_MOV, R1, -1),
-			BPF_JMP_REG(BPF_JEQ, R2, R3, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU64_REG(BPF_SUB, R3, R3),
-			BPF_ALU64_REG(BPF_XOR, R4, R4),
-			BPF_ALU64_IMM(BPF_MOV, R2, 1),
-			BPF_ALU64_IMM(BPF_MOV, R5, -1),
-			BPF_JMP_REG(BPF_JEQ, R3, R4, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU64_REG(BPF_SUB, R4, R4),
-			BPF_ALU64_REG(BPF_XOR, R5, R5),
-			BPF_ALU64_IMM(BPF_MOV, R3, 1),
-			BPF_ALU64_IMM(BPF_MOV, R7, -1),
-			BPF_JMP_REG(BPF_JEQ, R5, R4, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU64_IMM(BPF_MOV, R5, 1),
-			BPF_ALU64_REG(BPF_SUB, R5, R5),
-			BPF_ALU64_REG(BPF_XOR, R6, R6),
-			BPF_ALU64_IMM(BPF_MOV, R1, 1),
-			BPF_ALU64_IMM(BPF_MOV, R8, -1),
-			BPF_JMP_REG(BPF_JEQ, R5, R6, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU64_REG(BPF_SUB, R6, R6),
-			BPF_ALU64_REG(BPF_XOR, R7, R7),
-			BPF_JMP_REG(BPF_JEQ, R7, R6, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU64_REG(BPF_SUB, R7, R7),
-			BPF_ALU64_REG(BPF_XOR, R8, R8),
-			BPF_JMP_REG(BPF_JEQ, R7, R8, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU64_REG(BPF_SUB, R8, R8),
-			BPF_ALU64_REG(BPF_XOR, R9, R9),
-			BPF_JMP_REG(BPF_JEQ, R9, R8, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU64_REG(BPF_SUB, R9, R9),
-			BPF_ALU64_REG(BPF_XOR, R0, R0),
-			BPF_JMP_REG(BPF_JEQ, R9, R0, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU64_REG(BPF_SUB, R1, R1),
-			BPF_ALU64_REG(BPF_XOR, R0, R0),
-			BPF_JMP_REG(BPF_JEQ, R9, R0, 2),
-			BPF_ALU64_IMM(BPF_MOV, R0, 0),
-			BPF_EXIT_INSN(),
-			BPF_ALU64_IMM(BPF_MOV, R0, 1),
+			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+			BPF_ENDIAN(BPF_FROM_BE, R0, 64),
+			BPF_ALU64_IMM(BPF_RSH, R0, 32),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 1 } }
+		{ { 0, (u32) (cpu_to_be64(0x0123456789abcdefLL) >> 32) } },
 	},
-	{	/* Mainly checking JIT here. */
-		"INT: MUL",
+	/* BPF_ALU | BPF_END | BPF_FROM_BE, reversed */
+	{
+		"ALU_END_FROM_BE 16: 0xfedcba9876543210 -> 0x3210",
 		.u.insns_int = {
-			BPF_ALU64_IMM(BPF_MOV, R0, 11),
-			BPF_ALU64_IMM(BPF_MOV, R1, 1),
-			BPF_ALU64_IMM(BPF_MOV, R2, 2),
-			BPF_ALU64_IMM(BPF_MOV, R3, 3),
-			BPF_ALU64_IMM(BPF_MOV, R4, 4),
-			BPF_ALU64_IMM(BPF_MOV, R5, 5),
-			BPF_ALU64_IMM(BPF_MOV, R6, 6),
-			BPF_ALU64_IMM(BPF_MOV, R7, 7),
-			BPF_ALU64_IMM(BPF_MOV, R8, 8),
-			BPF_ALU64_IMM(BPF_MOV, R9, 9),
-			BPF_ALU64_REG(BPF_MUL, R0, R0),
-			BPF_ALU64_REG(BPF_MUL, R0, R1),
-			BPF_ALU64_REG(BPF_MUL, R0, R2),
-			BPF_ALU64_REG(BPF_MUL, R0, R3),
-			BPF_ALU64_REG(BPF_MUL, R0, R4),
-			BPF_ALU64_REG(BPF_MUL, R0, R5),
-			BPF_ALU64_REG(BPF_MUL, R0, R6),
-			BPF_ALU64_REG(BPF_MUL, R0, R7),
-			BPF_ALU64_REG(BPF_MUL, R0, R8),
-			BPF_ALU64_REG(BPF_MUL, R0, R9),
-			BPF_ALU64_IMM(BPF_MUL, R0, 10),
-			BPF_JMP_IMM(BPF_JEQ, R0, 439084800, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU64_REG(BPF_MUL, R1, R0),
-			BPF_ALU64_REG(BPF_MUL, R1, R2),
-			BPF_ALU64_REG(BPF_MUL, R1, R3),
-			BPF_ALU64_REG(BPF_MUL, R1, R4),
-			BPF_ALU64_REG(BPF_MUL, R1, R5),
-			BPF_ALU64_REG(BPF_MUL, R1, R6),
-			BPF_ALU64_REG(BPF_MUL, R1, R7),
-			BPF_ALU64_REG(BPF_MUL, R1, R8),
-			BPF_ALU64_REG(BPF_MUL, R1, R9),
-			BPF_ALU64_IMM(BPF_MUL, R1, 10),
-			BPF_ALU64_REG(BPF_MOV, R2, R1),
-			BPF_ALU64_IMM(BPF_RSH, R2, 32),
-			BPF_JMP_IMM(BPF_JEQ, R2, 0x5a924, 1),
+			BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+			BPF_ENDIAN(BPF_FROM_BE, R0, 16),
 			BPF_EXIT_INSN(),
-			BPF_ALU64_IMM(BPF_LSH, R1, 32),
-			BPF_ALU64_IMM(BPF_ARSH, R1, 32),
-			BPF_JMP_IMM(BPF_JEQ, R1, 0xebb90000, 1),
+		},
+		INTERNAL,
+		{ },
+		{ { 0,  cpu_to_be16(0x3210) } },
+	},
+	{
+		"ALU_END_FROM_BE 32: 0xfedcba9876543210 -> 0x76543210",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+			BPF_ENDIAN(BPF_FROM_BE, R0, 32),
+			BPF_ALU64_REG(BPF_MOV, R1, R0),
+			BPF_ALU64_IMM(BPF_RSH, R1, 32),
+			BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */
 			BPF_EXIT_INSN(),
-			BPF_ALU64_REG(BPF_MUL, R2, R0),
-			BPF_ALU64_REG(BPF_MUL, R2, R1),
-			BPF_ALU64_REG(BPF_MUL, R2, R3),
-			BPF_ALU64_REG(BPF_MUL, R2, R4),
-			BPF_ALU64_REG(BPF_MUL, R2, R5),
-			BPF_ALU64_REG(BPF_MUL, R2, R6),
-			BPF_ALU64_REG(BPF_MUL, R2, R7),
-			BPF_ALU64_REG(BPF_MUL, R2, R8),
-			BPF_ALU64_REG(BPF_MUL, R2, R9),
-			BPF_ALU64_IMM(BPF_MUL, R2, 10),
-			BPF_ALU64_IMM(BPF_RSH, R2, 32),
-			BPF_ALU64_REG(BPF_MOV, R0, R2),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, cpu_to_be32(0x76543210) } },
+	},
+	{
+		"ALU_END_FROM_BE 64: 0xfedcba9876543210 -> 0x76543210",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+			BPF_ENDIAN(BPF_FROM_BE, R0, 64),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0x35d97ef2 } }
+		{ { 0, (u32) cpu_to_be64(0xfedcba9876543210ULL) } },
 	},
-	{	/* Mainly checking JIT here. */
-		"MOV REG64",
+	{
+		"ALU_END_FROM_BE 64: 0xfedcba9876543210 >> 32 -> 0xfedcba98",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0xffffffffffffffffLL),
-			BPF_MOV64_REG(R1, R0),
-			BPF_MOV64_REG(R2, R1),
-			BPF_MOV64_REG(R3, R2),
-			BPF_MOV64_REG(R4, R3),
-			BPF_MOV64_REG(R5, R4),
-			BPF_MOV64_REG(R6, R5),
-			BPF_MOV64_REG(R7, R6),
-			BPF_MOV64_REG(R8, R7),
-			BPF_MOV64_REG(R9, R8),
-			BPF_ALU64_IMM(BPF_MOV, R0, 0),
-			BPF_ALU64_IMM(BPF_MOV, R1, 0),
-			BPF_ALU64_IMM(BPF_MOV, R2, 0),
-			BPF_ALU64_IMM(BPF_MOV, R3, 0),
-			BPF_ALU64_IMM(BPF_MOV, R4, 0),
-			BPF_ALU64_IMM(BPF_MOV, R5, 0),
-			BPF_ALU64_IMM(BPF_MOV, R6, 0),
-			BPF_ALU64_IMM(BPF_MOV, R7, 0),
-			BPF_ALU64_IMM(BPF_MOV, R8, 0),
-			BPF_ALU64_IMM(BPF_MOV, R9, 0),
-			BPF_ALU64_REG(BPF_ADD, R0, R0),
-			BPF_ALU64_REG(BPF_ADD, R0, R1),
-			BPF_ALU64_REG(BPF_ADD, R0, R2),
-			BPF_ALU64_REG(BPF_ADD, R0, R3),
-			BPF_ALU64_REG(BPF_ADD, R0, R4),
-			BPF_ALU64_REG(BPF_ADD, R0, R5),
-			BPF_ALU64_REG(BPF_ADD, R0, R6),
-			BPF_ALU64_REG(BPF_ADD, R0, R7),
-			BPF_ALU64_REG(BPF_ADD, R0, R8),
-			BPF_ALU64_REG(BPF_ADD, R0, R9),
-			BPF_ALU64_IMM(BPF_ADD, R0, 0xfefe),
+			BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+			BPF_ENDIAN(BPF_FROM_BE, R0, 64),
+			BPF_ALU64_IMM(BPF_RSH, R0, 32),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0xfefe } }
+		{ { 0, (u32) (cpu_to_be64(0xfedcba9876543210ULL) >> 32) } },
 	},
-	{	/* Mainly checking JIT here. */
-		"MOV REG32",
+	/* BPF_ALU | BPF_END | BPF_FROM_LE */
+	{
+		"ALU_END_FROM_LE 16: 0x0123456789abcdef -> 0xefcd",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0xffffffffffffffffLL),
-			BPF_MOV64_REG(R1, R0),
-			BPF_MOV64_REG(R2, R1),
-			BPF_MOV64_REG(R3, R2),
-			BPF_MOV64_REG(R4, R3),
-			BPF_MOV64_REG(R5, R4),
-			BPF_MOV64_REG(R6, R5),
-			BPF_MOV64_REG(R7, R6),
-			BPF_MOV64_REG(R8, R7),
-			BPF_MOV64_REG(R9, R8),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_ALU32_IMM(BPF_MOV, R1, 0),
-			BPF_ALU32_IMM(BPF_MOV, R2, 0),
-			BPF_ALU32_IMM(BPF_MOV, R3, 0),
-			BPF_ALU32_IMM(BPF_MOV, R4, 0),
-			BPF_ALU32_IMM(BPF_MOV, R5, 0),
-			BPF_ALU32_IMM(BPF_MOV, R6, 0),
-			BPF_ALU32_IMM(BPF_MOV, R7, 0),
-			BPF_ALU32_IMM(BPF_MOV, R8, 0),
-			BPF_ALU32_IMM(BPF_MOV, R9, 0),
-			BPF_ALU64_REG(BPF_ADD, R0, R0),
-			BPF_ALU64_REG(BPF_ADD, R0, R1),
-			BPF_ALU64_REG(BPF_ADD, R0, R2),
-			BPF_ALU64_REG(BPF_ADD, R0, R3),
-			BPF_ALU64_REG(BPF_ADD, R0, R4),
-			BPF_ALU64_REG(BPF_ADD, R0, R5),
-			BPF_ALU64_REG(BPF_ADD, R0, R6),
-			BPF_ALU64_REG(BPF_ADD, R0, R7),
-			BPF_ALU64_REG(BPF_ADD, R0, R8),
-			BPF_ALU64_REG(BPF_ADD, R0, R9),
-			BPF_ALU64_IMM(BPF_ADD, R0, 0xfefe),
+			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+			BPF_ENDIAN(BPF_FROM_LE, R0, 16),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0xfefe } }
+		{ { 0, cpu_to_le16(0xcdef) } },
 	},
-	{	/* Mainly checking JIT here. */
-		"LD IMM64",
+	{
+		"ALU_END_FROM_LE 32: 0x0123456789abcdef -> 0xefcdab89",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0xffffffffffffffffLL),
-			BPF_MOV64_REG(R1, R0),
-			BPF_MOV64_REG(R2, R1),
-			BPF_MOV64_REG(R3, R2),
-			BPF_MOV64_REG(R4, R3),
-			BPF_MOV64_REG(R5, R4),
-			BPF_MOV64_REG(R6, R5),
-			BPF_MOV64_REG(R7, R6),
-			BPF_MOV64_REG(R8, R7),
-			BPF_MOV64_REG(R9, R8),
-			BPF_LD_IMM64(R0, 0x0LL),
-			BPF_LD_IMM64(R1, 0x0LL),
-			BPF_LD_IMM64(R2, 0x0LL),
-			BPF_LD_IMM64(R3, 0x0LL),
-			BPF_LD_IMM64(R4, 0x0LL),
-			BPF_LD_IMM64(R5, 0x0LL),
-			BPF_LD_IMM64(R6, 0x0LL),
-			BPF_LD_IMM64(R7, 0x0LL),
-			BPF_LD_IMM64(R8, 0x0LL),
-			BPF_LD_IMM64(R9, 0x0LL),
-			BPF_ALU64_REG(BPF_ADD, R0, R0),
-			BPF_ALU64_REG(BPF_ADD, R0, R1),
-			BPF_ALU64_REG(BPF_ADD, R0, R2),
-			BPF_ALU64_REG(BPF_ADD, R0, R3),
-			BPF_ALU64_REG(BPF_ADD, R0, R4),
-			BPF_ALU64_REG(BPF_ADD, R0, R5),
-			BPF_ALU64_REG(BPF_ADD, R0, R6),
-			BPF_ALU64_REG(BPF_ADD, R0, R7),
-			BPF_ALU64_REG(BPF_ADD, R0, R8),
-			BPF_ALU64_REG(BPF_ADD, R0, R9),
-			BPF_ALU64_IMM(BPF_ADD, R0, 0xfefe),
+			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+			BPF_ENDIAN(BPF_FROM_LE, R0, 32),
+			BPF_ALU64_REG(BPF_MOV, R1, R0),
+			BPF_ALU64_IMM(BPF_RSH, R1, 32),
+			BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0xfefe } }
+		{ { 0, cpu_to_le32(0x89abcdef) } },
 	},
 	{
-		"INT: ALU MIX",
+		"ALU_END_FROM_LE 64: 0x0123456789abcdef -> 0x67452301",
 		.u.insns_int = {
-			BPF_ALU64_IMM(BPF_MOV, R0, 11),
-			BPF_ALU64_IMM(BPF_ADD, R0, -1),
-			BPF_ALU64_IMM(BPF_MOV, R2, 2),
-			BPF_ALU64_IMM(BPF_XOR, R2, 3),
-			BPF_ALU64_REG(BPF_DIV, R0, R2),
-			BPF_JMP_IMM(BPF_JEQ, R0, 10, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU64_IMM(BPF_MOD, R0, 3),
-			BPF_JMP_IMM(BPF_JEQ, R0, 1, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU64_IMM(BPF_MOV, R0, -1),
+			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+			BPF_ENDIAN(BPF_FROM_LE, R0, 64),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, -1 } }
+		{ { 0, (u32) cpu_to_le64(0x0123456789abcdefLL) } },
 	},
 	{
-		"INT: shifts by register",
+		"ALU_END_FROM_LE 64: 0x0123456789abcdef >> 32 -> 0xefcdab89",
 		.u.insns_int = {
-			BPF_MOV64_IMM(R0, -1234),
-			BPF_MOV64_IMM(R1, 1),
-			BPF_ALU32_REG(BPF_RSH, R0, R1),
-			BPF_JMP_IMM(BPF_JEQ, R0, 0x7ffffd97, 1),
-			BPF_EXIT_INSN(),
-			BPF_MOV64_IMM(R2, 1),
-			BPF_ALU64_REG(BPF_LSH, R0, R2),
-			BPF_MOV32_IMM(R4, -1234),
-			BPF_JMP_REG(BPF_JEQ, R0, R4, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU64_IMM(BPF_AND, R4, 63),
-			BPF_ALU64_REG(BPF_LSH, R0, R4), /* R0 <= 46 */
-			BPF_MOV64_IMM(R3, 47),
-			BPF_ALU64_REG(BPF_ARSH, R0, R3),
-			BPF_JMP_IMM(BPF_JEQ, R0, -617, 1),
-			BPF_EXIT_INSN(),
-			BPF_MOV64_IMM(R2, 1),
-			BPF_ALU64_REG(BPF_LSH, R4, R2), /* R4 = 46 << 1 */
-			BPF_JMP_IMM(BPF_JEQ, R4, 92, 1),
-			BPF_EXIT_INSN(),
-			BPF_MOV64_IMM(R4, 4),
-			BPF_ALU64_REG(BPF_LSH, R4, R4), /* R4 = 4 << 4 */
-			BPF_JMP_IMM(BPF_JEQ, R4, 64, 1),
-			BPF_EXIT_INSN(),
-			BPF_MOV64_IMM(R4, 5),
-			BPF_ALU32_REG(BPF_LSH, R4, R4), /* R4 = 5 << 5 */
-			BPF_JMP_IMM(BPF_JEQ, R4, 160, 1),
-			BPF_EXIT_INSN(),
-			BPF_MOV64_IMM(R0, -1),
+			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+			BPF_ENDIAN(BPF_FROM_LE, R0, 64),
+			BPF_ALU64_IMM(BPF_RSH, R0, 32),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, -1 } }
+		{ { 0, (u32) (cpu_to_le64(0x0123456789abcdefLL) >> 32) } },
 	},
+	/* BPF_ALU | BPF_END | BPF_FROM_LE, reversed */
 	{
-		/*
-		 * Register (non-)clobbering test, in the case where a 32-bit
-		 * JIT implements complex ALU64 operations via function calls.
-		 * If so, the function call must be invisible in the eBPF
-		 * registers. The JIT must then save and restore relevant
-		 * registers during the call. The following tests check that
-		 * the eBPF registers retain their values after such a call.
-		 */
-		"INT: Register clobbering, R1 updated",
+		"ALU_END_FROM_LE 16: 0xfedcba9876543210 -> 0x1032",
 		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_ALU32_IMM(BPF_MOV, R1, 123456789),
-			BPF_ALU32_IMM(BPF_MOV, R2, 2),
-			BPF_ALU32_IMM(BPF_MOV, R3, 3),
-			BPF_ALU32_IMM(BPF_MOV, R4, 4),
-			BPF_ALU32_IMM(BPF_MOV, R5, 5),
-			BPF_ALU32_IMM(BPF_MOV, R6, 6),
-			BPF_ALU32_IMM(BPF_MOV, R7, 7),
-			BPF_ALU32_IMM(BPF_MOV, R8, 8),
-			BPF_ALU32_IMM(BPF_MOV, R9, 9),
-			BPF_ALU64_IMM(BPF_DIV, R1, 123456789),
-			BPF_JMP_IMM(BPF_JNE, R0, 0, 10),
-			BPF_JMP_IMM(BPF_JNE, R1, 1, 9),
-			BPF_JMP_IMM(BPF_JNE, R2, 2, 8),
-			BPF_JMP_IMM(BPF_JNE, R3, 3, 7),
-			BPF_JMP_IMM(BPF_JNE, R4, 4, 6),
-			BPF_JMP_IMM(BPF_JNE, R5, 5, 5),
-			BPF_JMP_IMM(BPF_JNE, R6, 6, 4),
-			BPF_JMP_IMM(BPF_JNE, R7, 7, 3),
-			BPF_JMP_IMM(BPF_JNE, R8, 8, 2),
-			BPF_JMP_IMM(BPF_JNE, R9, 9, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+			BPF_ENDIAN(BPF_FROM_LE, R0, 16),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 1 } }
+		{ { 0,  cpu_to_le16(0x3210) } },
 	},
 	{
-		"INT: Register clobbering, R2 updated",
+		"ALU_END_FROM_LE 32: 0xfedcba9876543210 -> 0x10325476",
 		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_ALU32_IMM(BPF_MOV, R1, 1),
-			BPF_ALU32_IMM(BPF_MOV, R2, 2 * 123456789),
-			BPF_ALU32_IMM(BPF_MOV, R3, 3),
-			BPF_ALU32_IMM(BPF_MOV, R4, 4),
-			BPF_ALU32_IMM(BPF_MOV, R5, 5),
-			BPF_ALU32_IMM(BPF_MOV, R6, 6),
-			BPF_ALU32_IMM(BPF_MOV, R7, 7),
-			BPF_ALU32_IMM(BPF_MOV, R8, 8),
-			BPF_ALU32_IMM(BPF_MOV, R9, 9),
-			BPF_ALU64_IMM(BPF_DIV, R2, 123456789),
-			BPF_JMP_IMM(BPF_JNE, R0, 0, 10),
-			BPF_JMP_IMM(BPF_JNE, R1, 1, 9),
-			BPF_JMP_IMM(BPF_JNE, R2, 2, 8),
-			BPF_JMP_IMM(BPF_JNE, R3, 3, 7),
-			BPF_JMP_IMM(BPF_JNE, R4, 4, 6),
-			BPF_JMP_IMM(BPF_JNE, R5, 5, 5),
-			BPF_JMP_IMM(BPF_JNE, R6, 6, 4),
-			BPF_JMP_IMM(BPF_JNE, R7, 7, 3),
-			BPF_JMP_IMM(BPF_JNE, R8, 8, 2),
-			BPF_JMP_IMM(BPF_JNE, R9, 9, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+			BPF_ENDIAN(BPF_FROM_LE, R0, 32),
+			BPF_ALU64_REG(BPF_MOV, R1, R0),
+			BPF_ALU64_IMM(BPF_RSH, R1, 32),
+			BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 1 } }
+		{ { 0, cpu_to_le32(0x76543210) } },
 	},
 	{
-		/*
-		 * Test 32-bit JITs that implement complex ALU64 operations as
-		 * function calls R0 = f(R1, R2), and must re-arrange operands.
-		 */
-#define NUMER 0xfedcba9876543210ULL
-#define DENOM 0x0123456789abcdefULL
-		"ALU64_DIV X: Operand register permutations",
+		"ALU_END_FROM_LE 64: 0xfedcba9876543210 -> 0x10325476",
 		.u.insns_int = {
-			/* R0 / R2 */
-			BPF_LD_IMM64(R0, NUMER),
-			BPF_LD_IMM64(R2, DENOM),
-			BPF_ALU64_REG(BPF_DIV, R0, R2),
-			BPF_JMP_IMM(BPF_JEQ, R0, NUMER / DENOM, 1),
-			BPF_EXIT_INSN(),
-			/* R1 / R0 */
-			BPF_LD_IMM64(R1, NUMER),
-			BPF_LD_IMM64(R0, DENOM),
-			BPF_ALU64_REG(BPF_DIV, R1, R0),
-			BPF_JMP_IMM(BPF_JEQ, R1, NUMER / DENOM, 1),
-			BPF_EXIT_INSN(),
-			/* R0 / R1 */
-			BPF_LD_IMM64(R0, NUMER),
-			BPF_LD_IMM64(R1, DENOM),
-			BPF_ALU64_REG(BPF_DIV, R0, R1),
-			BPF_JMP_IMM(BPF_JEQ, R0, NUMER / DENOM, 1),
-			BPF_EXIT_INSN(),
-			/* R2 / R0 */
-			BPF_LD_IMM64(R2, NUMER),
-			BPF_LD_IMM64(R0, DENOM),
-			BPF_ALU64_REG(BPF_DIV, R2, R0),
-			BPF_JMP_IMM(BPF_JEQ, R2, NUMER / DENOM, 1),
-			BPF_EXIT_INSN(),
-			/* R2 / R1 */
-			BPF_LD_IMM64(R2, NUMER),
-			BPF_LD_IMM64(R1, DENOM),
-			BPF_ALU64_REG(BPF_DIV, R2, R1),
-			BPF_JMP_IMM(BPF_JEQ, R2, NUMER / DENOM, 1),
-			BPF_EXIT_INSN(),
-			/* R1 / R2 */
-			BPF_LD_IMM64(R1, NUMER),
-			BPF_LD_IMM64(R2, DENOM),
-			BPF_ALU64_REG(BPF_DIV, R1, R2),
-			BPF_JMP_IMM(BPF_JEQ, R1, NUMER / DENOM, 1),
-			BPF_EXIT_INSN(),
-			/* R1 / R1 */
-			BPF_LD_IMM64(R1, NUMER),
-			BPF_ALU64_REG(BPF_DIV, R1, R1),
-			BPF_JMP_IMM(BPF_JEQ, R1, 1, 1),
-			BPF_EXIT_INSN(),
-			/* R2 / R2 */
-			BPF_LD_IMM64(R2, DENOM),
-			BPF_ALU64_REG(BPF_DIV, R2, R2),
-			BPF_JMP_IMM(BPF_JEQ, R2, 1, 1),
-			BPF_EXIT_INSN(),
-			/* R3 / R4 */
-			BPF_LD_IMM64(R3, NUMER),
-			BPF_LD_IMM64(R4, DENOM),
-			BPF_ALU64_REG(BPF_DIV, R3, R4),
-			BPF_JMP_IMM(BPF_JEQ, R3, NUMER / DENOM, 1),
-			BPF_EXIT_INSN(),
-			/* Successful return */
-			BPF_LD_IMM64(R0, 1),
+			BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+			BPF_ENDIAN(BPF_FROM_LE, R0, 64),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 1 } },
-#undef NUMER
-#undef DENOM
+		{ { 0, (u32) cpu_to_le64(0xfedcba9876543210ULL) } },
 	},
-#ifdef CONFIG_32BIT
 	{
-		"INT: 32-bit context pointer word order and zero-extension",
+		"ALU_END_FROM_LE 64: 0xfedcba9876543210 >> 32 -> 0x98badcfe",
 		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_JMP32_IMM(BPF_JEQ, R1, 0, 3),
-			BPF_ALU64_IMM(BPF_RSH, R1, 32),
-			BPF_JMP32_IMM(BPF_JNE, R1, 0, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+			BPF_ENDIAN(BPF_FROM_LE, R0, 64),
+			BPF_ALU64_IMM(BPF_RSH, R0, 32),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 1 } }
+		{ { 0, (u32) (cpu_to_le64(0xfedcba9876543210ULL) >> 32) } },
 	},
-#endif
+	/* BPF_LDX_MEM B/H/W/DW */
 	{
-		"check: missing ret",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_IMM, 1),
+		"BPF_LDX_MEM | BPF_B",
+		.u.insns_int = {
+			BPF_LD_IMM64(R1, 0x0102030405060708ULL),
+			BPF_LD_IMM64(R2, 0x0000000000000008ULL),
+			BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+			BPF_LDX_MEM(BPF_B, R0, R10, -1),
+#else
+			BPF_LDX_MEM(BPF_B, R0, R10, -8),
+#endif
+			BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+			BPF_ALU64_IMM(BPF_MOV, R0, 0),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
-		{ },
+		INTERNAL,
 		{ },
-		.fill_helper = NULL,
-		.expected_errcode = -EINVAL,
+		{ { 0, 0 } },
+		.stack_depth = 8,
 	},
 	{
-		"check: div_k_0",
-		.u.insns = {
-			BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0),
-			BPF_STMT(BPF_RET | BPF_K, 0)
+		"BPF_LDX_MEM | BPF_B, MSB set",
+		.u.insns_int = {
+			BPF_LD_IMM64(R1, 0x8182838485868788ULL),
+			BPF_LD_IMM64(R2, 0x0000000000000088ULL),
+			BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+			BPF_LDX_MEM(BPF_B, R0, R10, -1),
+#else
+			BPF_LDX_MEM(BPF_B, R0, R10, -8),
+#endif
+			BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+			BPF_ALU64_IMM(BPF_MOV, R0, 0),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
-		{ },
+		INTERNAL,
 		{ },
-		.fill_helper = NULL,
-		.expected_errcode = -EINVAL,
+		{ { 0, 0 } },
+		.stack_depth = 8,
 	},
 	{
-		"check: unknown insn",
-		.u.insns = {
-			/* seccomp insn, rejected in socket filter */
-			BPF_STMT(BPF_LDX | BPF_W | BPF_ABS, 0),
-			BPF_STMT(BPF_RET | BPF_K, 0)
+		"BPF_LDX_MEM | BPF_H",
+		.u.insns_int = {
+			BPF_LD_IMM64(R1, 0x0102030405060708ULL),
+			BPF_LD_IMM64(R2, 0x0000000000000708ULL),
+			BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+			BPF_LDX_MEM(BPF_H, R0, R10, -2),
+#else
+			BPF_LDX_MEM(BPF_H, R0, R10, -8),
+#endif
+			BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+			BPF_ALU64_IMM(BPF_MOV, R0, 0),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC | FLAG_EXPECTED_FAIL,
-		{ },
+		INTERNAL,
 		{ },
-		.fill_helper = NULL,
-		.expected_errcode = -EINVAL,
+		{ { 0, 0 } },
+		.stack_depth = 8,
 	},
 	{
-		"check: out of range spill/fill",
-		.u.insns = {
-			BPF_STMT(BPF_STX, 16),
-			BPF_STMT(BPF_RET | BPF_K, 0)
+		"BPF_LDX_MEM | BPF_H, MSB set",
+		.u.insns_int = {
+			BPF_LD_IMM64(R1, 0x8182838485868788ULL),
+			BPF_LD_IMM64(R2, 0x0000000000008788ULL),
+			BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+			BPF_LDX_MEM(BPF_H, R0, R10, -2),
+#else
+			BPF_LDX_MEM(BPF_H, R0, R10, -8),
+#endif
+			BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+			BPF_ALU64_IMM(BPF_MOV, R0, 0),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+		INTERNAL,
 		{ },
+		{ { 0, 0 } },
+		.stack_depth = 8,
+	},
+	{
+		"BPF_LDX_MEM | BPF_W",
+		.u.insns_int = {
+			BPF_LD_IMM64(R1, 0x0102030405060708ULL),
+			BPF_LD_IMM64(R2, 0x0000000005060708ULL),
+			BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+			BPF_LDX_MEM(BPF_W, R0, R10, -4),
+#else
+			BPF_LDX_MEM(BPF_W, R0, R10, -8),
+#endif
+			BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+			BPF_ALU64_IMM(BPF_MOV, R0, 0),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
 		{ },
-		.fill_helper = NULL,
-		.expected_errcode = -EINVAL,
+		{ { 0, 0 } },
+		.stack_depth = 8,
 	},
 	{
-		"JUMPS + HOLES",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 15),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90c2894d, 3, 4),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90c2894d, 1, 2),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_JUMP(BPF_JMP | BPF_JGE, 0, 14, 15),
-			BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 14),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_JUMP(BPF_JMP | BPF_JEQ, 0x2ac28349, 2, 3),
-			BPF_JUMP(BPF_JMP | BPF_JEQ, 0x2ac28349, 1, 2),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_JUMP(BPF_JMP | BPF_JGE, 0, 14, 15),
-			BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 14),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90d2ff41, 2, 3),
-			BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90d2ff41, 1, 2),
-			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
-			BPF_STMT(BPF_RET | BPF_A, 0),
-			BPF_STMT(BPF_RET | BPF_A, 0),
+		"BPF_LDX_MEM | BPF_W, MSB set",
+		.u.insns_int = {
+			BPF_LD_IMM64(R1, 0x8182838485868788ULL),
+			BPF_LD_IMM64(R2, 0x0000000085868788ULL),
+			BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+			BPF_LDX_MEM(BPF_W, R0, R10, -4),
+#else
+			BPF_LDX_MEM(BPF_W, R0, R10, -8),
+#endif
+			BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+			BPF_ALU64_IMM(BPF_MOV, R0, 0),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0 } },
+		.stack_depth = 8,
+	},
+	/* BPF_STX_MEM B/H/W/DW */
+	{
+		"BPF_STX_MEM | BPF_B",
+		.u.insns_int = {
+			BPF_LD_IMM64(R1, 0x8090a0b0c0d0e0f0ULL),
+			BPF_LD_IMM64(R2, 0x0102030405060708ULL),
+			BPF_LD_IMM64(R3, 0x8090a0b0c0d0e008ULL),
+			BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+			BPF_STX_MEM(BPF_B, R10, R2, -1),
+#else
+			BPF_STX_MEM(BPF_B, R10, R2, -8),
+#endif
+			BPF_LDX_MEM(BPF_DW, R0, R10, -8),
+			BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+			BPF_ALU64_IMM(BPF_MOV, R0, 0),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC,
-		{ 0x00, 0x1b, 0x21, 0x3c, 0x9d, 0xf8,
-		  0x90, 0xe2, 0xba, 0x0a, 0x56, 0xb4,
-		  0x08, 0x00,
-		  0x45, 0x00, 0x00, 0x28, 0x00, 0x00,
-		  0x20, 0x00, 0x40, 0x11, 0x00, 0x00, /* IP header */
-		  0xc0, 0xa8, 0x33, 0x01,
-		  0xc0, 0xa8, 0x33, 0x02,
-		  0xbb, 0xb6,
-		  0xa9, 0xfa,
-		  0x00, 0x14, 0x00, 0x00,
-		  0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
-		  0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
-		  0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
-		  0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
-		  0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
-		  0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
-		  0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
-		  0xcc, 0xcc, 0xcc, 0xcc },
-		{ { 88, 0x001b } }
+		INTERNAL,
+		{ },
+		{ { 0, 0 } },
+		.stack_depth = 8,
 	},
 	{
-		"check: RET X",
-		.u.insns = {
-			BPF_STMT(BPF_RET | BPF_X, 0),
+		"BPF_STX_MEM | BPF_B, MSB set",
+		.u.insns_int = {
+			BPF_LD_IMM64(R1, 0x8090a0b0c0d0e0f0ULL),
+			BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+			BPF_LD_IMM64(R3, 0x8090a0b0c0d0e088ULL),
+			BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+			BPF_STX_MEM(BPF_B, R10, R2, -1),
+#else
+			BPF_STX_MEM(BPF_B, R10, R2, -8),
+#endif
+			BPF_LDX_MEM(BPF_DW, R0, R10, -8),
+			BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+			BPF_ALU64_IMM(BPF_MOV, R0, 0),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+		INTERNAL,
 		{ },
+		{ { 0, 0 } },
+		.stack_depth = 8,
+	},
+	{
+		"BPF_STX_MEM | BPF_H",
+		.u.insns_int = {
+			BPF_LD_IMM64(R1, 0x8090a0b0c0d0e0f0ULL),
+			BPF_LD_IMM64(R2, 0x0102030405060708ULL),
+			BPF_LD_IMM64(R3, 0x8090a0b0c0d00708ULL),
+			BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+			BPF_STX_MEM(BPF_H, R10, R2, -2),
+#else
+			BPF_STX_MEM(BPF_H, R10, R2, -8),
+#endif
+			BPF_LDX_MEM(BPF_DW, R0, R10, -8),
+			BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+			BPF_ALU64_IMM(BPF_MOV, R0, 0),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
 		{ },
-		.fill_helper = NULL,
-		.expected_errcode = -EINVAL,
+		{ { 0, 0 } },
+		.stack_depth = 8,
 	},
 	{
-		"check: LDX + RET X",
-		.u.insns = {
-			BPF_STMT(BPF_LDX | BPF_IMM, 42),
-			BPF_STMT(BPF_RET | BPF_X, 0),
+		"BPF_STX_MEM | BPF_H, MSB set",
+		.u.insns_int = {
+			BPF_LD_IMM64(R1, 0x8090a0b0c0d0e0f0ULL),
+			BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+			BPF_LD_IMM64(R3, 0x8090a0b0c0d08788ULL),
+			BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+			BPF_STX_MEM(BPF_H, R10, R2, -2),
+#else
+			BPF_STX_MEM(BPF_H, R10, R2, -8),
+#endif
+			BPF_LDX_MEM(BPF_DW, R0, R10, -8),
+			BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+			BPF_ALU64_IMM(BPF_MOV, R0, 0),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+		INTERNAL,
 		{ },
+		{ { 0, 0 } },
+		.stack_depth = 8,
+	},
+	{
+		"BPF_STX_MEM | BPF_W",
+		.u.insns_int = {
+			BPF_LD_IMM64(R1, 0x8090a0b0c0d0e0f0ULL),
+			BPF_LD_IMM64(R2, 0x0102030405060708ULL),
+			BPF_LD_IMM64(R3, 0x8090a0b005060708ULL),
+			BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+			BPF_STX_MEM(BPF_W, R10, R2, -4),
+#else
+			BPF_STX_MEM(BPF_W, R10, R2, -8),
+#endif
+			BPF_LDX_MEM(BPF_DW, R0, R10, -8),
+			BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+			BPF_ALU64_IMM(BPF_MOV, R0, 0),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
 		{ },
-		.fill_helper = NULL,
-		.expected_errcode = -EINVAL,
+		{ { 0, 0 } },
+		.stack_depth = 8,
 	},
-	{	/* Mainly checking JIT here. */
-		"M[]: alt STX + LDX",
-		.u.insns = {
-			BPF_STMT(BPF_LDX | BPF_IMM, 100),
-			BPF_STMT(BPF_STX, 0),
-			BPF_STMT(BPF_LDX | BPF_MEM, 0),
-			BPF_STMT(BPF_MISC | BPF_TXA, 0),
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
-			BPF_STMT(BPF_MISC | BPF_TAX, 0),
-			BPF_STMT(BPF_STX, 1),
-			BPF_STMT(BPF_LDX | BPF_MEM, 1),
-			BPF_STMT(BPF_MISC | BPF_TXA, 0),
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
-			BPF_STMT(BPF_MISC | BPF_TAX, 0),
-			BPF_STMT(BPF_STX, 2),
-			BPF_STMT(BPF_LDX | BPF_MEM, 2),
-			BPF_STMT(BPF_MISC | BPF_TXA, 0),
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
-			BPF_STMT(BPF_MISC | BPF_TAX, 0),
-			BPF_STMT(BPF_STX, 3),
-			BPF_STMT(BPF_LDX | BPF_MEM, 3),
-			BPF_STMT(BPF_MISC | BPF_TXA, 0),
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
-			BPF_STMT(BPF_MISC | BPF_TAX, 0),
-			BPF_STMT(BPF_STX, 4),
-			BPF_STMT(BPF_LDX | BPF_MEM, 4),
-			BPF_STMT(BPF_MISC | BPF_TXA, 0),
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
-			BPF_STMT(BPF_MISC | BPF_TAX, 0),
-			BPF_STMT(BPF_STX, 5),
-			BPF_STMT(BPF_LDX | BPF_MEM, 5),
-			BPF_STMT(BPF_MISC | BPF_TXA, 0),
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
-			BPF_STMT(BPF_MISC | BPF_TAX, 0),
-			BPF_STMT(BPF_STX, 6),
-			BPF_STMT(BPF_LDX | BPF_MEM, 6),
-			BPF_STMT(BPF_MISC | BPF_TXA, 0),
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
-			BPF_STMT(BPF_MISC | BPF_TAX, 0),
-			BPF_STMT(BPF_STX, 7),
-			BPF_STMT(BPF_LDX | BPF_MEM, 7),
-			BPF_STMT(BPF_MISC | BPF_TXA, 0),
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
-			BPF_STMT(BPF_MISC | BPF_TAX, 0),
-			BPF_STMT(BPF_STX, 8),
-			BPF_STMT(BPF_LDX | BPF_MEM, 8),
-			BPF_STMT(BPF_MISC | BPF_TXA, 0),
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
-			BPF_STMT(BPF_MISC | BPF_TAX, 0),
-			BPF_STMT(BPF_STX, 9),
-			BPF_STMT(BPF_LDX | BPF_MEM, 9),
-			BPF_STMT(BPF_MISC | BPF_TXA, 0),
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
-			BPF_STMT(BPF_MISC | BPF_TAX, 0),
-			BPF_STMT(BPF_STX, 10),
-			BPF_STMT(BPF_LDX | BPF_MEM, 10),
-			BPF_STMT(BPF_MISC | BPF_TXA, 0),
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
-			BPF_STMT(BPF_MISC | BPF_TAX, 0),
-			BPF_STMT(BPF_STX, 11),
-			BPF_STMT(BPF_LDX | BPF_MEM, 11),
-			BPF_STMT(BPF_MISC | BPF_TXA, 0),
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
-			BPF_STMT(BPF_MISC | BPF_TAX, 0),
-			BPF_STMT(BPF_STX, 12),
-			BPF_STMT(BPF_LDX | BPF_MEM, 12),
-			BPF_STMT(BPF_MISC | BPF_TXA, 0),
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
-			BPF_STMT(BPF_MISC | BPF_TAX, 0),
-			BPF_STMT(BPF_STX, 13),
-			BPF_STMT(BPF_LDX | BPF_MEM, 13),
-			BPF_STMT(BPF_MISC | BPF_TXA, 0),
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
-			BPF_STMT(BPF_MISC | BPF_TAX, 0),
-			BPF_STMT(BPF_STX, 14),
-			BPF_STMT(BPF_LDX | BPF_MEM, 14),
-			BPF_STMT(BPF_MISC | BPF_TXA, 0),
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
-			BPF_STMT(BPF_MISC | BPF_TAX, 0),
-			BPF_STMT(BPF_STX, 15),
-			BPF_STMT(BPF_LDX | BPF_MEM, 15),
-			BPF_STMT(BPF_MISC | BPF_TXA, 0),
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
-			BPF_STMT(BPF_MISC | BPF_TAX, 0),
-			BPF_STMT(BPF_RET | BPF_A, 0),
+	{
+		"BPF_STX_MEM | BPF_W, MSB set",
+		.u.insns_int = {
+			BPF_LD_IMM64(R1, 0x8090a0b0c0d0e0f0ULL),
+			BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+			BPF_LD_IMM64(R3, 0x8090a0b085868788ULL),
+			BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+			BPF_STX_MEM(BPF_W, R10, R2, -4),
+#else
+			BPF_STX_MEM(BPF_W, R10, R2, -8),
+#endif
+			BPF_LDX_MEM(BPF_DW, R0, R10, -8),
+			BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+			BPF_ALU64_IMM(BPF_MOV, R0, 0),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC | FLAG_NO_DATA,
+		INTERNAL,
 		{ },
-		{ { 0, 116 } },
+		{ { 0, 0 } },
+		.stack_depth = 8,
 	},
-	{	/* Mainly checking JIT here. */
-		"M[]: full STX + full LDX",
-		.u.insns = {
-			BPF_STMT(BPF_LDX | BPF_IMM, 0xbadfeedb),
-			BPF_STMT(BPF_STX, 0),
-			BPF_STMT(BPF_LDX | BPF_IMM, 0xecabedae),
-			BPF_STMT(BPF_STX, 1),
-			BPF_STMT(BPF_LDX | BPF_IMM, 0xafccfeaf),
-			BPF_STMT(BPF_STX, 2),
-			BPF_STMT(BPF_LDX | BPF_IMM, 0xbffdcedc),
-			BPF_STMT(BPF_STX, 3),
-			BPF_STMT(BPF_LDX | BPF_IMM, 0xfbbbdccb),
-			BPF_STMT(BPF_STX, 4),
-			BPF_STMT(BPF_LDX | BPF_IMM, 0xfbabcbda),
-			BPF_STMT(BPF_STX, 5),
-			BPF_STMT(BPF_LDX | BPF_IMM, 0xaedecbdb),
-			BPF_STMT(BPF_STX, 6),
-			BPF_STMT(BPF_LDX | BPF_IMM, 0xadebbade),
-			BPF_STMT(BPF_STX, 7),
-			BPF_STMT(BPF_LDX | BPF_IMM, 0xfcfcfaec),
-			BPF_STMT(BPF_STX, 8),
-			BPF_STMT(BPF_LDX | BPF_IMM, 0xbcdddbdc),
-			BPF_STMT(BPF_STX, 9),
-			BPF_STMT(BPF_LDX | BPF_IMM, 0xfeefdfac),
-			BPF_STMT(BPF_STX, 10),
-			BPF_STMT(BPF_LDX | BPF_IMM, 0xcddcdeea),
-			BPF_STMT(BPF_STX, 11),
-			BPF_STMT(BPF_LDX | BPF_IMM, 0xaccfaebb),
-			BPF_STMT(BPF_STX, 12),
-			BPF_STMT(BPF_LDX | BPF_IMM, 0xbdcccdcf),
-			BPF_STMT(BPF_STX, 13),
-			BPF_STMT(BPF_LDX | BPF_IMM, 0xaaedecde),
-			BPF_STMT(BPF_STX, 14),
-			BPF_STMT(BPF_LDX | BPF_IMM, 0xfaeacdad),
-			BPF_STMT(BPF_STX, 15),
-			BPF_STMT(BPF_LDX | BPF_MEM, 0),
-			BPF_STMT(BPF_MISC | BPF_TXA, 0),
-			BPF_STMT(BPF_LDX | BPF_MEM, 1),
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
-			BPF_STMT(BPF_LDX | BPF_MEM, 2),
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
-			BPF_STMT(BPF_LDX | BPF_MEM, 3),
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
-			BPF_STMT(BPF_LDX | BPF_MEM, 4),
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
-			BPF_STMT(BPF_LDX | BPF_MEM, 5),
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
-			BPF_STMT(BPF_LDX | BPF_MEM, 6),
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
-			BPF_STMT(BPF_LDX | BPF_MEM, 7),
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
-			BPF_STMT(BPF_LDX | BPF_MEM, 8),
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
-			BPF_STMT(BPF_LDX | BPF_MEM, 9),
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
-			BPF_STMT(BPF_LDX | BPF_MEM, 10),
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
-			BPF_STMT(BPF_LDX | BPF_MEM, 11),
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
-			BPF_STMT(BPF_LDX | BPF_MEM, 12),
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
-			BPF_STMT(BPF_LDX | BPF_MEM, 13),
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
-			BPF_STMT(BPF_LDX | BPF_MEM, 14),
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
-			BPF_STMT(BPF_LDX | BPF_MEM, 15),
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
-			BPF_STMT(BPF_RET | BPF_A, 0),
+	/* BPF_ST(X) | BPF_MEM | BPF_B/H/W/DW */
+	{
+		"ST_MEM_B: Store/Load byte: max negative",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_ST_MEM(BPF_B, R10, -40, 0xff),
+			BPF_LDX_MEM(BPF_B, R0, R10, -40),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC | FLAG_NO_DATA,
+		INTERNAL,
 		{ },
-		{ { 0, 0x2a5a5e5 } },
+		{ { 0, 0xff } },
+		.stack_depth = 40,
 	},
 	{
-		"check: SKF_AD_MAX",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
-				 SKF_AD_OFF + SKF_AD_MAX),
-			BPF_STMT(BPF_RET | BPF_A, 0),
+		"ST_MEM_B: Store/Load byte: max positive",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_ST_MEM(BPF_H, R10, -40, 0x7f),
+			BPF_LDX_MEM(BPF_H, R0, R10, -40),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
-		{ },
+		INTERNAL,
 		{ },
-		.fill_helper = NULL,
-		.expected_errcode = -EINVAL,
+		{ { 0, 0x7f } },
+		.stack_depth = 40,
 	},
-	{	/* Passes checker but fails during runtime. */
-		"LD [SKF_AD_OFF-1]",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
-				 SKF_AD_OFF - 1),
-			BPF_STMT(BPF_RET | BPF_K, 1),
+	{
+		"STX_MEM_B: Store/Load byte: max negative",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 0),
+			BPF_LD_IMM64(R1, 0xffLL),
+			BPF_STX_MEM(BPF_B, R10, R1, -40),
+			BPF_LDX_MEM(BPF_B, R0, R10, -40),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC,
+		INTERNAL,
 		{ },
-		{ { 1, 0 } },
+		{ { 0, 0xff } },
+		.stack_depth = 40,
 	},
 	{
-		"load 64-bit immediate",
+		"ST_MEM_H: Store/Load half word: max negative",
 		.u.insns_int = {
-			BPF_LD_IMM64(R1, 0x567800001234LL),
-			BPF_MOV64_REG(R2, R1),
-			BPF_MOV64_REG(R3, R2),
-			BPF_ALU64_IMM(BPF_RSH, R2, 32),
-			BPF_ALU64_IMM(BPF_LSH, R3, 32),
-			BPF_ALU64_IMM(BPF_RSH, R3, 32),
-			BPF_ALU64_IMM(BPF_MOV, R0, 0),
-			BPF_JMP_IMM(BPF_JEQ, R2, 0x5678, 1),
-			BPF_EXIT_INSN(),
-			BPF_JMP_IMM(BPF_JEQ, R3, 0x1234, 1),
-			BPF_EXIT_INSN(),
-			BPF_LD_IMM64(R0, 0x1ffffffffLL),
-			BPF_ALU64_IMM(BPF_RSH, R0, 32), /* R0 = 1 */
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_ST_MEM(BPF_H, R10, -40, 0xffff),
+			BPF_LDX_MEM(BPF_H, R0, R10, -40),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 1 } }
+		{ { 0, 0xffff } },
+		.stack_depth = 40,
 	},
-	/* BPF_ALU | BPF_MOV | BPF_X */
 	{
-		"ALU_MOV_X: dst = 2",
+		"ST_MEM_H: Store/Load half word: max positive",
 		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R1, 2),
-			BPF_ALU32_REG(BPF_MOV, R0, R1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_ST_MEM(BPF_H, R10, -40, 0x7fff),
+			BPF_LDX_MEM(BPF_H, R0, R10, -40),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 2 } },
+		{ { 0, 0x7fff } },
+		.stack_depth = 40,
 	},
 	{
-		"ALU_MOV_X: dst = 4294967295",
+		"STX_MEM_H: Store/Load half word: max negative",
 		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R1, 4294967295U),
-			BPF_ALU32_REG(BPF_MOV, R0, R1),
+			BPF_LD_IMM64(R0, 0),
+			BPF_LD_IMM64(R1, 0xffffLL),
+			BPF_STX_MEM(BPF_H, R10, R1, -40),
+			BPF_LDX_MEM(BPF_H, R0, R10, -40),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 4294967295U } },
+		{ { 0, 0xffff } },
+		.stack_depth = 40,
 	},
 	{
-		"ALU64_MOV_X: dst = 2",
+		"ST_MEM_W: Store/Load word: max negative",
 		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R1, 2),
-			BPF_ALU64_REG(BPF_MOV, R0, R1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_ST_MEM(BPF_W, R10, -40, 0xffffffff),
+			BPF_LDX_MEM(BPF_W, R0, R10, -40),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 2 } },
+		{ { 0, 0xffffffff } },
+		.stack_depth = 40,
 	},
 	{
-		"ALU64_MOV_X: dst = 4294967295",
+		"ST_MEM_W: Store/Load word: max positive",
 		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R1, 4294967295U),
-			BPF_ALU64_REG(BPF_MOV, R0, R1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_ST_MEM(BPF_W, R10, -40, 0x7fffffff),
+			BPF_LDX_MEM(BPF_W, R0, R10, -40),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 4294967295U } },
+		{ { 0, 0x7fffffff } },
+		.stack_depth = 40,
 	},
-	/* BPF_ALU | BPF_MOV | BPF_K */
 	{
-		"ALU_MOV_K: dst = 2",
+		"STX_MEM_W: Store/Load word: max negative",
 		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 2),
+			BPF_LD_IMM64(R0, 0),
+			BPF_LD_IMM64(R1, 0xffffffffLL),
+			BPF_STX_MEM(BPF_W, R10, R1, -40),
+			BPF_LDX_MEM(BPF_W, R0, R10, -40),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 2 } },
+		{ { 0, 0xffffffff } },
+		.stack_depth = 40,
 	},
 	{
-		"ALU_MOV_K: dst = 4294967295",
+		"ST_MEM_DW: Store/Load double word: max negative",
 		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 4294967295U),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_ST_MEM(BPF_DW, R10, -40, 0xffffffff),
+			BPF_LDX_MEM(BPF_DW, R0, R10, -40),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 4294967295U } },
+		{ { 0, 0xffffffff } },
+		.stack_depth = 40,
 	},
 	{
-		"ALU_MOV_K: 0x0000ffffffff0000 = 0x00000000ffffffff",
+		"ST_MEM_DW: Store/Load double word: max negative 2",
 		.u.insns_int = {
-			BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
-			BPF_LD_IMM64(R3, 0x00000000ffffffffLL),
-			BPF_ALU32_IMM(BPF_MOV, R2, 0xffffffff),
+			BPF_LD_IMM64(R2, 0xffff00000000ffffLL),
+			BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
+			BPF_ST_MEM(BPF_DW, R10, -40, 0xffffffff),
+			BPF_LDX_MEM(BPF_DW, R2, R10, -40),
 			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
 			BPF_MOV32_IMM(R0, 2),
 			BPF_EXIT_INSN(),
@@ -2551,899 +8158,1259 @@ static struct bpf_test tests[] = {
 		INTERNAL,
 		{ },
 		{ { 0, 0x1 } },
+		.stack_depth = 40,
 	},
 	{
-		"ALU_MOV_K: small negative",
+		"ST_MEM_DW: Store/Load double word: max positive",
 		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, -123),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_ST_MEM(BPF_DW, R10, -40, 0x7fffffff),
+			BPF_LDX_MEM(BPF_DW, R0, R10, -40),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, -123 } }
+		{ { 0, 0x7fffffff } },
+		.stack_depth = 40,
 	},
 	{
-		"ALU_MOV_K: small negative zero extension",
+		"STX_MEM_DW: Store/Load double word: max negative",
 		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, -123),
-			BPF_ALU64_IMM(BPF_RSH, R0, 32),
+			BPF_LD_IMM64(R0, 0),
+			BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
+			BPF_STX_MEM(BPF_DW, R10, R1, -40),
+			BPF_LDX_MEM(BPF_DW, R0, R10, -40),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0 } }
+		{ { 0, 0xffffffff } },
+		.stack_depth = 40,
 	},
 	{
-		"ALU_MOV_K: large negative",
+		"STX_MEM_DW: Store double word: first word in memory",
 		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, -123456789),
+			BPF_LD_IMM64(R0, 0),
+			BPF_LD_IMM64(R1, 0x0123456789abcdefLL),
+			BPF_STX_MEM(BPF_DW, R10, R1, -40),
+			BPF_LDX_MEM(BPF_W, R0, R10, -40),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, -123456789 } }
+#ifdef __BIG_ENDIAN
+		{ { 0, 0x01234567 } },
+#else
+		{ { 0, 0x89abcdef } },
+#endif
+		.stack_depth = 40,
 	},
 	{
-		"ALU_MOV_K: large negative zero extension",
+		"STX_MEM_DW: Store double word: second word in memory",
 		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, -123456789),
-			BPF_ALU64_IMM(BPF_RSH, R0, 32),
+			BPF_LD_IMM64(R0, 0),
+			BPF_LD_IMM64(R1, 0x0123456789abcdefLL),
+			BPF_STX_MEM(BPF_DW, R10, R1, -40),
+			BPF_LDX_MEM(BPF_W, R0, R10, -36),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0 } }
+#ifdef __BIG_ENDIAN
+		{ { 0, 0x89abcdef } },
+#else
+		{ { 0, 0x01234567 } },
+#endif
+		.stack_depth = 40,
+	},
+	/* BPF_STX | BPF_ATOMIC | BPF_W/DW */
+	{
+		"STX_XADD_W: X + 1 + 1 + 1 + ...",
+		{ },
+		INTERNAL,
+		{ },
+		{ { 0, 4134 } },
+		.fill_helper = bpf_fill_stxw,
+	},
+	{
+		"STX_XADD_DW: X + 1 + 1 + 1 + ...",
+		{ },
+		INTERNAL,
+		{ },
+		{ { 0, 4134 } },
+		.fill_helper = bpf_fill_stxdw,
 	},
+	/*
+	 * Exhaustive tests of atomic operation variants.
+	 * Individual tests are expanded from template macros for all
+	 * combinations of ALU operation, word size and fetching.
+	 */
+#define BPF_ATOMIC_POISON(width) ((width) == BPF_W ? (0xbaadf00dULL << 32) : 0)
+
+#define BPF_ATOMIC_OP_TEST1(width, op, logic, old, update, result)	\
+{									\
+	"BPF_ATOMIC | " #width ", " #op ": Test: "			\
+		#old " " #logic " " #update " = " #result,		\
+	.u.insns_int = {						\
+		BPF_LD_IMM64(R5, (update) | BPF_ATOMIC_POISON(width)),	\
+		BPF_ST_MEM(width, R10, -40, old),			\
+		BPF_ATOMIC_OP(width, op, R10, R5, -40),			\
+		BPF_LDX_MEM(width, R0, R10, -40),			\
+		BPF_ALU64_REG(BPF_MOV, R1, R0),				\
+		BPF_ALU64_IMM(BPF_RSH, R1, 32),				\
+		BPF_ALU64_REG(BPF_OR, R0, R1),				\
+		BPF_EXIT_INSN(),					\
+	},								\
+	INTERNAL,							\
+	{ },								\
+	{ { 0, result } },						\
+	.stack_depth = 40,						\
+}
+#define BPF_ATOMIC_OP_TEST2(width, op, logic, old, update, result)	\
+{									\
+	"BPF_ATOMIC | " #width ", " #op ": Test side effects, r10: "	\
+		#old " " #logic " " #update " = " #result,		\
+	.u.insns_int = {						\
+		BPF_ALU64_REG(BPF_MOV, R1, R10),			\
+		BPF_LD_IMM64(R0, (update) | BPF_ATOMIC_POISON(width)),	\
+		BPF_ST_MEM(BPF_W, R10, -40, old),			\
+		BPF_ATOMIC_OP(width, op, R10, R0, -40),			\
+		BPF_ALU64_REG(BPF_MOV, R0, R10),			\
+		BPF_ALU64_REG(BPF_SUB, R0, R1),				\
+		BPF_ALU64_REG(BPF_MOV, R1, R0),				\
+		BPF_ALU64_IMM(BPF_RSH, R1, 32),				\
+		BPF_ALU64_REG(BPF_OR, R0, R1),				\
+		BPF_EXIT_INSN(),					\
+	},								\
+	INTERNAL,							\
+	{ },								\
+	{ { 0, 0 } },							\
+	.stack_depth = 40,						\
+}
+#define BPF_ATOMIC_OP_TEST3(width, op, logic, old, update, result)	\
+{									\
+	"BPF_ATOMIC | " #width ", " #op ": Test side effects, r0: "	\
+		#old " " #logic " " #update " = " #result,		\
+	.u.insns_int = {						\
+		BPF_ALU64_REG(BPF_MOV, R0, R10),			\
+		BPF_LD_IMM64(R1, (update) | BPF_ATOMIC_POISON(width)),	\
+		BPF_ST_MEM(width, R10, -40, old),			\
+		BPF_ATOMIC_OP(width, op, R10, R1, -40),			\
+		BPF_ALU64_REG(BPF_SUB, R0, R10),			\
+		BPF_ALU64_REG(BPF_MOV, R1, R0),				\
+		BPF_ALU64_IMM(BPF_RSH, R1, 32),				\
+		BPF_ALU64_REG(BPF_OR, R0, R1),				\
+		BPF_EXIT_INSN(),					\
+	},								\
+	INTERNAL,                                                       \
+	{ },                                                            \
+	{ { 0, 0 } },                                                   \
+	.stack_depth = 40,                                              \
+}
+#define BPF_ATOMIC_OP_TEST4(width, op, logic, old, update, result)	\
+{									\
+	"BPF_ATOMIC | " #width ", " #op ": Test fetch: "		\
+		#old " " #logic " " #update " = " #result,		\
+	.u.insns_int = {						\
+		BPF_LD_IMM64(R3, (update) | BPF_ATOMIC_POISON(width)),	\
+		BPF_ST_MEM(width, R10, -40, old),			\
+		BPF_ATOMIC_OP(width, op, R10, R3, -40),			\
+		BPF_ALU32_REG(BPF_MOV, R0, R3),                         \
+		BPF_EXIT_INSN(),					\
+	},								\
+	INTERNAL,                                                       \
+	{ },                                                            \
+	{ { 0, (op) & BPF_FETCH ? old : update } },			\
+	.stack_depth = 40,                                              \
+}
+	/* BPF_ATOMIC | BPF_W: BPF_ADD */
+	BPF_ATOMIC_OP_TEST1(BPF_W, BPF_ADD, +, 0x12, 0xab, 0xbd),
+	BPF_ATOMIC_OP_TEST2(BPF_W, BPF_ADD, +, 0x12, 0xab, 0xbd),
+	BPF_ATOMIC_OP_TEST3(BPF_W, BPF_ADD, +, 0x12, 0xab, 0xbd),
+	BPF_ATOMIC_OP_TEST4(BPF_W, BPF_ADD, +, 0x12, 0xab, 0xbd),
+	/* BPF_ATOMIC | BPF_W: BPF_ADD | BPF_FETCH */
+	BPF_ATOMIC_OP_TEST1(BPF_W, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
+	BPF_ATOMIC_OP_TEST2(BPF_W, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
+	BPF_ATOMIC_OP_TEST3(BPF_W, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
+	BPF_ATOMIC_OP_TEST4(BPF_W, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
+	/* BPF_ATOMIC | BPF_DW: BPF_ADD */
+	BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_ADD, +, 0x12, 0xab, 0xbd),
+	BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_ADD, +, 0x12, 0xab, 0xbd),
+	BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_ADD, +, 0x12, 0xab, 0xbd),
+	BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_ADD, +, 0x12, 0xab, 0xbd),
+	/* BPF_ATOMIC | BPF_DW: BPF_ADD | BPF_FETCH */
+	BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
+	BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
+	BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
+	BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
+	/* BPF_ATOMIC | BPF_W: BPF_AND */
+	BPF_ATOMIC_OP_TEST1(BPF_W, BPF_AND, &, 0x12, 0xab, 0x02),
+	BPF_ATOMIC_OP_TEST2(BPF_W, BPF_AND, &, 0x12, 0xab, 0x02),
+	BPF_ATOMIC_OP_TEST3(BPF_W, BPF_AND, &, 0x12, 0xab, 0x02),
+	BPF_ATOMIC_OP_TEST4(BPF_W, BPF_AND, &, 0x12, 0xab, 0x02),
+	/* BPF_ATOMIC | BPF_W: BPF_AND | BPF_FETCH */
+	BPF_ATOMIC_OP_TEST1(BPF_W, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
+	BPF_ATOMIC_OP_TEST2(BPF_W, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
+	BPF_ATOMIC_OP_TEST3(BPF_W, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
+	BPF_ATOMIC_OP_TEST4(BPF_W, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
+	/* BPF_ATOMIC | BPF_DW: BPF_AND */
+	BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_AND, &, 0x12, 0xab, 0x02),
+	BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_AND, &, 0x12, 0xab, 0x02),
+	BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_AND, &, 0x12, 0xab, 0x02),
+	BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_AND, &, 0x12, 0xab, 0x02),
+	/* BPF_ATOMIC | BPF_DW: BPF_AND | BPF_FETCH */
+	BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
+	BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
+	BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
+	BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
+	/* BPF_ATOMIC | BPF_W: BPF_OR */
+	BPF_ATOMIC_OP_TEST1(BPF_W, BPF_OR, |, 0x12, 0xab, 0xbb),
+	BPF_ATOMIC_OP_TEST2(BPF_W, BPF_OR, |, 0x12, 0xab, 0xbb),
+	BPF_ATOMIC_OP_TEST3(BPF_W, BPF_OR, |, 0x12, 0xab, 0xbb),
+	BPF_ATOMIC_OP_TEST4(BPF_W, BPF_OR, |, 0x12, 0xab, 0xbb),
+	/* BPF_ATOMIC | BPF_W: BPF_OR | BPF_FETCH */
+	BPF_ATOMIC_OP_TEST1(BPF_W, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
+	BPF_ATOMIC_OP_TEST2(BPF_W, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
+	BPF_ATOMIC_OP_TEST3(BPF_W, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
+	BPF_ATOMIC_OP_TEST4(BPF_W, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
+	/* BPF_ATOMIC | BPF_DW: BPF_OR */
+	BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_OR, |, 0x12, 0xab, 0xbb),
+	BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_OR, |, 0x12, 0xab, 0xbb),
+	BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_OR, |, 0x12, 0xab, 0xbb),
+	BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_OR, |, 0x12, 0xab, 0xbb),
+	/* BPF_ATOMIC | BPF_DW: BPF_OR | BPF_FETCH */
+	BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
+	BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
+	BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
+	BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
+	/* BPF_ATOMIC | BPF_W: BPF_XOR */
+	BPF_ATOMIC_OP_TEST1(BPF_W, BPF_XOR, ^, 0x12, 0xab, 0xb9),
+	BPF_ATOMIC_OP_TEST2(BPF_W, BPF_XOR, ^, 0x12, 0xab, 0xb9),
+	BPF_ATOMIC_OP_TEST3(BPF_W, BPF_XOR, ^, 0x12, 0xab, 0xb9),
+	BPF_ATOMIC_OP_TEST4(BPF_W, BPF_XOR, ^, 0x12, 0xab, 0xb9),
+	/* BPF_ATOMIC | BPF_W: BPF_XOR | BPF_FETCH */
+	BPF_ATOMIC_OP_TEST1(BPF_W, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
+	BPF_ATOMIC_OP_TEST2(BPF_W, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
+	BPF_ATOMIC_OP_TEST3(BPF_W, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
+	BPF_ATOMIC_OP_TEST4(BPF_W, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
+	/* BPF_ATOMIC | BPF_DW: BPF_XOR */
+	BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_XOR, ^, 0x12, 0xab, 0xb9),
+	BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_XOR, ^, 0x12, 0xab, 0xb9),
+	BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_XOR, ^, 0x12, 0xab, 0xb9),
+	BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_XOR, ^, 0x12, 0xab, 0xb9),
+	/* BPF_ATOMIC | BPF_DW: BPF_XOR | BPF_FETCH */
+	BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
+	BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
+	BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
+	BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
+	/* BPF_ATOMIC | BPF_W: BPF_XCHG */
+	BPF_ATOMIC_OP_TEST1(BPF_W, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
+	BPF_ATOMIC_OP_TEST2(BPF_W, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
+	BPF_ATOMIC_OP_TEST3(BPF_W, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
+	BPF_ATOMIC_OP_TEST4(BPF_W, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
+	/* BPF_ATOMIC | BPF_DW: BPF_XCHG */
+	BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
+	BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
+	BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
+	BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
+#undef BPF_ATOMIC_POISON
+#undef BPF_ATOMIC_OP_TEST1
+#undef BPF_ATOMIC_OP_TEST2
+#undef BPF_ATOMIC_OP_TEST3
+#undef BPF_ATOMIC_OP_TEST4
+	/* BPF_ATOMIC | BPF_W, BPF_CMPXCHG */
 	{
-		"ALU64_MOV_K: dst = 2",
+		"BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test successful return",
 		.u.insns_int = {
-			BPF_ALU64_IMM(BPF_MOV, R0, 2),
+			BPF_ST_MEM(BPF_W, R10, -40, 0x01234567),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0x01234567),
+			BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef),
+			BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 2 } },
+		{ { 0, 0x01234567 } },
+		.stack_depth = 40,
 	},
 	{
-		"ALU64_MOV_K: dst = 2147483647",
+		"BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test successful store",
 		.u.insns_int = {
-			BPF_ALU64_IMM(BPF_MOV, R0, 2147483647),
+			BPF_ST_MEM(BPF_W, R10, -40, 0x01234567),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0x01234567),
+			BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef),
+			BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40),
+			BPF_LDX_MEM(BPF_W, R0, R10, -40),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 2147483647 } },
+		{ { 0, 0x89abcdef } },
+		.stack_depth = 40,
 	},
 	{
-		"ALU64_OR_K: dst = 0x0",
+		"BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test failure return",
 		.u.insns_int = {
-			BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
-			BPF_LD_IMM64(R3, 0x0),
-			BPF_ALU64_IMM(BPF_MOV, R2, 0x0),
-			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
-			BPF_MOV32_IMM(R0, 2),
-			BPF_EXIT_INSN(),
-			BPF_MOV32_IMM(R0, 1),
+			BPF_ST_MEM(BPF_W, R10, -40, 0x01234567),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0x76543210),
+			BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef),
+			BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0x1 } },
+		{ { 0, 0x01234567 } },
+		.stack_depth = 40,
 	},
 	{
-		"ALU64_MOV_K: dst = -1",
+		"BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test failure store",
 		.u.insns_int = {
-			BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
-			BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
-			BPF_ALU64_IMM(BPF_MOV, R2, 0xffffffff),
-			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
-			BPF_MOV32_IMM(R0, 2),
-			BPF_EXIT_INSN(),
-			BPF_MOV32_IMM(R0, 1),
+			BPF_ST_MEM(BPF_W, R10, -40, 0x01234567),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0x76543210),
+			BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef),
+			BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40),
+			BPF_LDX_MEM(BPF_W, R0, R10, -40),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0x1 } },
+		{ { 0, 0x01234567 } },
+		.stack_depth = 40,
 	},
 	{
-		"ALU64_MOV_K: small negative",
+		"BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test side effects",
 		.u.insns_int = {
-			BPF_ALU64_IMM(BPF_MOV, R0, -123),
+			BPF_ST_MEM(BPF_W, R10, -40, 0x01234567),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0x01234567),
+			BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef),
+			BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40),
+			BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40),
+			BPF_ALU32_REG(BPF_MOV, R0, R3),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, -123 } }
+		{ { 0, 0x89abcdef } },
+		.stack_depth = 40,
 	},
+	/* BPF_ATOMIC | BPF_DW, BPF_CMPXCHG */
 	{
-		"ALU64_MOV_K: small negative sign extension",
+		"BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test successful return",
 		.u.insns_int = {
-			BPF_ALU64_IMM(BPF_MOV, R0, -123),
-			BPF_ALU64_IMM(BPF_RSH, R0, 32),
+			BPF_LD_IMM64(R1, 0x0123456789abcdefULL),
+			BPF_LD_IMM64(R2, 0xfedcba9876543210ULL),
+			BPF_ALU64_REG(BPF_MOV, R0, R1),
+			BPF_STX_MEM(BPF_DW, R10, R1, -40),
+			BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40),
+			BPF_JMP_REG(BPF_JNE, R0, R1, 1),
+			BPF_ALU64_REG(BPF_SUB, R0, R1),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0xffffffff } }
+		{ { 0, 0 } },
+		.stack_depth = 40,
 	},
 	{
-		"ALU64_MOV_K: large negative",
+		"BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test successful store",
 		.u.insns_int = {
-			BPF_ALU64_IMM(BPF_MOV, R0, -123456789),
+			BPF_LD_IMM64(R1, 0x0123456789abcdefULL),
+			BPF_LD_IMM64(R2, 0xfedcba9876543210ULL),
+			BPF_ALU64_REG(BPF_MOV, R0, R1),
+			BPF_STX_MEM(BPF_DW, R10, R0, -40),
+			BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40),
+			BPF_LDX_MEM(BPF_DW, R0, R10, -40),
+			BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+			BPF_ALU64_REG(BPF_SUB, R0, R2),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, -123456789 } }
+		{ { 0, 0 } },
+		.stack_depth = 40,
 	},
 	{
-		"ALU64_MOV_K: large negative sign extension",
+		"BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test failure return",
 		.u.insns_int = {
-			BPF_ALU64_IMM(BPF_MOV, R0, -123456789),
-			BPF_ALU64_IMM(BPF_RSH, R0, 32),
+			BPF_LD_IMM64(R1, 0x0123456789abcdefULL),
+			BPF_LD_IMM64(R2, 0xfedcba9876543210ULL),
+			BPF_ALU64_REG(BPF_MOV, R0, R1),
+			BPF_ALU64_IMM(BPF_ADD, R0, 1),
+			BPF_STX_MEM(BPF_DW, R10, R1, -40),
+			BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40),
+			BPF_JMP_REG(BPF_JNE, R0, R1, 1),
+			BPF_ALU64_REG(BPF_SUB, R0, R1),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0xffffffff } }
+		{ { 0, 0 } },
+		.stack_depth = 40,
 	},
-	/* BPF_ALU | BPF_ADD | BPF_X */
 	{
-		"ALU_ADD_X: 1 + 2 = 3",
+		"BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test failure store",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 1),
-			BPF_ALU32_IMM(BPF_MOV, R1, 2),
-			BPF_ALU32_REG(BPF_ADD, R0, R1),
+			BPF_LD_IMM64(R1, 0x0123456789abcdefULL),
+			BPF_LD_IMM64(R2, 0xfedcba9876543210ULL),
+			BPF_ALU64_REG(BPF_MOV, R0, R1),
+			BPF_ALU64_IMM(BPF_ADD, R0, 1),
+			BPF_STX_MEM(BPF_DW, R10, R1, -40),
+			BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40),
+			BPF_LDX_MEM(BPF_DW, R0, R10, -40),
+			BPF_JMP_REG(BPF_JNE, R0, R1, 1),
+			BPF_ALU64_REG(BPF_SUB, R0, R1),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 3 } },
+		{ { 0, 0 } },
+		.stack_depth = 40,
 	},
 	{
-		"ALU_ADD_X: 1 + 4294967294 = 4294967295",
+		"BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test side effects",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 1),
-			BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U),
-			BPF_ALU32_REG(BPF_ADD, R0, R1),
+			BPF_LD_IMM64(R1, 0x0123456789abcdefULL),
+			BPF_LD_IMM64(R2, 0xfedcba9876543210ULL),
+			BPF_ALU64_REG(BPF_MOV, R0, R1),
+			BPF_STX_MEM(BPF_DW, R10, R1, -40),
+			BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40),
+			BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+			BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+			BPF_ALU64_REG(BPF_SUB, R0, R2),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 4294967295U } },
+		{ { 0, 0 } },
+		.stack_depth = 40,
 	},
+	/* BPF_JMP32 | BPF_JEQ | BPF_K */
 	{
-		"ALU_ADD_X: 2 + 4294967294 = 0",
+		"JMP32_JEQ_K: Small immediate",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 2),
-			BPF_LD_IMM64(R1, 4294967294U),
-			BPF_ALU32_REG(BPF_ADD, R0, R1),
-			BPF_JMP_IMM(BPF_JEQ, R0, 0, 2),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 123),
+			BPF_JMP32_IMM(BPF_JEQ, R0, 321, 1),
+			BPF_JMP32_IMM(BPF_JEQ, R0, 123, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 1 } },
+		{ { 0, 123 } }
 	},
 	{
-		"ALU64_ADD_X: 1 + 2 = 3",
+		"JMP32_JEQ_K: Large immediate",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 1),
-			BPF_ALU32_IMM(BPF_MOV, R1, 2),
-			BPF_ALU64_REG(BPF_ADD, R0, R1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 12345678),
+			BPF_JMP32_IMM(BPF_JEQ, R0, 12345678 & 0xffff, 1),
+			BPF_JMP32_IMM(BPF_JEQ, R0, 12345678, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 3 } },
+		{ { 0, 12345678 } }
 	},
 	{
-		"ALU64_ADD_X: 1 + 4294967294 = 4294967295",
+		"JMP32_JEQ_K: negative immediate",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 1),
-			BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U),
-			BPF_ALU64_REG(BPF_ADD, R0, R1),
+			BPF_ALU32_IMM(BPF_MOV, R0, -123),
+			BPF_JMP32_IMM(BPF_JEQ, R0,  123, 1),
+			BPF_JMP32_IMM(BPF_JEQ, R0, -123, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 4294967295U } },
+		{ { 0, -123 } }
 	},
+	/* BPF_JMP32 | BPF_JEQ | BPF_X */
 	{
-		"ALU64_ADD_X: 2 + 4294967294 = 4294967296",
+		"JMP32_JEQ_X",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 2),
-			BPF_LD_IMM64(R1, 4294967294U),
-			BPF_LD_IMM64(R2, 4294967296ULL),
-			BPF_ALU64_REG(BPF_ADD, R0, R1),
-			BPF_JMP_REG(BPF_JEQ, R0, R2, 2),
-			BPF_MOV32_IMM(R0, 0),
-			BPF_EXIT_INSN(),
-			BPF_MOV32_IMM(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1234),
+			BPF_ALU32_IMM(BPF_MOV, R1, 4321),
+			BPF_JMP32_REG(BPF_JEQ, R0, R1, 2),
+			BPF_ALU32_IMM(BPF_MOV, R1, 1234),
+			BPF_JMP32_REG(BPF_JEQ, R0, R1, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 1 } },
+		{ { 0, 1234 } }
 	},
-	/* BPF_ALU | BPF_ADD | BPF_K */
+	/* BPF_JMP32 | BPF_JNE | BPF_K */
 	{
-		"ALU_ADD_K: 1 + 2 = 3",
+		"JMP32_JNE_K: Small immediate",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 1),
-			BPF_ALU32_IMM(BPF_ADD, R0, 2),
+			BPF_ALU32_IMM(BPF_MOV, R0, 123),
+			BPF_JMP32_IMM(BPF_JNE, R0, 123, 1),
+			BPF_JMP32_IMM(BPF_JNE, R0, 321, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 3 } },
+		{ { 0, 123 } }
 	},
 	{
-		"ALU_ADD_K: 3 + 0 = 3",
+		"JMP32_JNE_K: Large immediate",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 3),
-			BPF_ALU32_IMM(BPF_ADD, R0, 0),
+			BPF_ALU32_IMM(BPF_MOV, R0, 12345678),
+			BPF_JMP32_IMM(BPF_JNE, R0, 12345678, 1),
+			BPF_JMP32_IMM(BPF_JNE, R0, 12345678 & 0xffff, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 3 } },
+		{ { 0, 12345678 } }
 	},
 	{
-		"ALU_ADD_K: 1 + 4294967294 = 4294967295",
+		"JMP32_JNE_K: negative immediate",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 1),
-			BPF_ALU32_IMM(BPF_ADD, R0, 4294967294U),
+			BPF_ALU32_IMM(BPF_MOV, R0, -123),
+			BPF_JMP32_IMM(BPF_JNE, R0, -123, 1),
+			BPF_JMP32_IMM(BPF_JNE, R0,  123, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 4294967295U } },
+		{ { 0, -123 } }
 	},
+	/* BPF_JMP32 | BPF_JNE | BPF_X */
 	{
-		"ALU_ADD_K: 4294967294 + 2 = 0",
+		"JMP32_JNE_X",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 4294967294U),
-			BPF_ALU32_IMM(BPF_ADD, R0, 2),
-			BPF_JMP_IMM(BPF_JEQ, R0, 0, 2),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1234),
+			BPF_ALU32_IMM(BPF_MOV, R1, 1234),
+			BPF_JMP32_REG(BPF_JNE, R0, R1, 2),
+			BPF_ALU32_IMM(BPF_MOV, R1, 4321),
+			BPF_JMP32_REG(BPF_JNE, R0, R1, 1),
 			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 1 } },
+		{ { 0, 1234 } }
 	},
+	/* BPF_JMP32 | BPF_JSET | BPF_K */
 	{
-		"ALU_ADD_K: 0 + (-1) = 0x00000000ffffffff",
+		"JMP32_JSET_K: Small immediate",
 		.u.insns_int = {
-			BPF_LD_IMM64(R2, 0x0),
-			BPF_LD_IMM64(R3, 0x00000000ffffffff),
-			BPF_ALU32_IMM(BPF_ADD, R2, 0xffffffff),
-			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
-			BPF_MOV32_IMM(R0, 2),
-			BPF_EXIT_INSN(),
-			BPF_MOV32_IMM(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_JMP32_IMM(BPF_JSET, R0, 2, 1),
+			BPF_JMP32_IMM(BPF_JSET, R0, 3, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0x1 } },
+		{ { 0, 1 } }
 	},
 	{
-		"ALU_ADD_K: 0 + 0xffff = 0xffff",
+		"JMP32_JSET_K: Large immediate",
 		.u.insns_int = {
-			BPF_LD_IMM64(R2, 0x0),
-			BPF_LD_IMM64(R3, 0xffff),
-			BPF_ALU32_IMM(BPF_ADD, R2, 0xffff),
-			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
-			BPF_MOV32_IMM(R0, 2),
-			BPF_EXIT_INSN(),
-			BPF_MOV32_IMM(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0x40000000),
+			BPF_JMP32_IMM(BPF_JSET, R0, 0x3fffffff, 1),
+			BPF_JMP32_IMM(BPF_JSET, R0, 0x60000000, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0x1 } },
+		{ { 0, 0x40000000 } }
 	},
 	{
-		"ALU_ADD_K: 0 + 0x7fffffff = 0x7fffffff",
+		"JMP32_JSET_K: negative immediate",
 		.u.insns_int = {
-			BPF_LD_IMM64(R2, 0x0),
-			BPF_LD_IMM64(R3, 0x7fffffff),
-			BPF_ALU32_IMM(BPF_ADD, R2, 0x7fffffff),
-			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
-			BPF_MOV32_IMM(R0, 2),
-			BPF_EXIT_INSN(),
-			BPF_MOV32_IMM(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, -123),
+			BPF_JMP32_IMM(BPF_JSET, R0, -1, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0x1 } },
+		{ { 0, -123 } }
 	},
+	/* BPF_JMP32 | BPF_JSET | BPF_X */
 	{
-		"ALU_ADD_K: 0 + 0x80000000 = 0x80000000",
+		"JMP32_JSET_X",
 		.u.insns_int = {
-			BPF_LD_IMM64(R2, 0x0),
-			BPF_LD_IMM64(R3, 0x80000000),
-			BPF_ALU32_IMM(BPF_ADD, R2, 0x80000000),
-			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
-			BPF_MOV32_IMM(R0, 2),
-			BPF_EXIT_INSN(),
-			BPF_MOV32_IMM(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 8),
+			BPF_ALU32_IMM(BPF_MOV, R1, 7),
+			BPF_JMP32_REG(BPF_JSET, R0, R1, 2),
+			BPF_ALU32_IMM(BPF_MOV, R1, 8 | 2),
+			BPF_JMP32_REG(BPF_JNE, R0, R1, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0x1 } },
+		{ { 0, 8 } }
 	},
+	/* BPF_JMP32 | BPF_JGT | BPF_K */
 	{
-		"ALU_ADD_K: 0 + 0x80008000 = 0x80008000",
+		"JMP32_JGT_K: Small immediate",
 		.u.insns_int = {
-			BPF_LD_IMM64(R2, 0x0),
-			BPF_LD_IMM64(R3, 0x80008000),
-			BPF_ALU32_IMM(BPF_ADD, R2, 0x80008000),
-			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
-			BPF_MOV32_IMM(R0, 2),
-			BPF_EXIT_INSN(),
-			BPF_MOV32_IMM(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 123),
+			BPF_JMP32_IMM(BPF_JGT, R0, 123, 1),
+			BPF_JMP32_IMM(BPF_JGT, R0, 122, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0x1 } },
+		{ { 0, 123 } }
 	},
 	{
-		"ALU64_ADD_K: 1 + 2 = 3",
+		"JMP32_JGT_K: Large immediate",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 1),
-			BPF_ALU64_IMM(BPF_ADD, R0, 2),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
+			BPF_JMP32_IMM(BPF_JGT, R0, 0xffffffff, 1),
+			BPF_JMP32_IMM(BPF_JGT, R0, 0xfffffffd, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 3 } },
+		{ { 0, 0xfffffffe } }
 	},
+	/* BPF_JMP32 | BPF_JGT | BPF_X */
 	{
-		"ALU64_ADD_K: 3 + 0 = 3",
+		"JMP32_JGT_X",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 3),
-			BPF_ALU64_IMM(BPF_ADD, R0, 0),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
+			BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
+			BPF_JMP32_REG(BPF_JGT, R0, R1, 2),
+			BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffd),
+			BPF_JMP32_REG(BPF_JGT, R0, R1, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 3 } },
+		{ { 0, 0xfffffffe } }
 	},
+	/* BPF_JMP32 | BPF_JGE | BPF_K */
 	{
-		"ALU64_ADD_K: 1 + 2147483646 = 2147483647",
+		"JMP32_JGE_K: Small immediate",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 1),
-			BPF_ALU64_IMM(BPF_ADD, R0, 2147483646),
+			BPF_ALU32_IMM(BPF_MOV, R0, 123),
+			BPF_JMP32_IMM(BPF_JGE, R0, 124, 1),
+			BPF_JMP32_IMM(BPF_JGE, R0, 123, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 2147483647 } },
+		{ { 0, 123 } }
 	},
 	{
-		"ALU64_ADD_K: 4294967294 + 2 = 4294967296",
+		"JMP32_JGE_K: Large immediate",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 4294967294U),
-			BPF_LD_IMM64(R1, 4294967296ULL),
-			BPF_ALU64_IMM(BPF_ADD, R0, 2),
-			BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
+			BPF_JMP32_IMM(BPF_JGE, R0, 0xffffffff, 1),
+			BPF_JMP32_IMM(BPF_JGE, R0, 0xfffffffe, 1),
 			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 1 } },
+		{ { 0, 0xfffffffe } }
 	},
+	/* BPF_JMP32 | BPF_JGE | BPF_X */
 	{
-		"ALU64_ADD_K: 2147483646 + -2147483647 = -1",
+		"JMP32_JGE_X",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 2147483646),
-			BPF_ALU64_IMM(BPF_ADD, R0, -2147483647),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
+			BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
+			BPF_JMP32_REG(BPF_JGE, R0, R1, 2),
+			BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffe),
+			BPF_JMP32_REG(BPF_JGE, R0, R1, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, -1 } },
+		{ { 0, 0xfffffffe } }
 	},
+	/* BPF_JMP32 | BPF_JLT | BPF_K */
 	{
-		"ALU64_ADD_K: 1 + 0 = 1",
+		"JMP32_JLT_K: Small immediate",
 		.u.insns_int = {
-			BPF_LD_IMM64(R2, 0x1),
-			BPF_LD_IMM64(R3, 0x1),
-			BPF_ALU64_IMM(BPF_ADD, R2, 0x0),
-			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
-			BPF_MOV32_IMM(R0, 2),
-			BPF_EXIT_INSN(),
-			BPF_MOV32_IMM(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 123),
+			BPF_JMP32_IMM(BPF_JLT, R0, 123, 1),
+			BPF_JMP32_IMM(BPF_JLT, R0, 124, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0x1 } },
+		{ { 0, 123 } }
 	},
 	{
-		"ALU64_ADD_K: 0 + (-1) = 0xffffffffffffffff",
+		"JMP32_JLT_K: Large immediate",
 		.u.insns_int = {
-			BPF_LD_IMM64(R2, 0x0),
-			BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
-			BPF_ALU64_IMM(BPF_ADD, R2, 0xffffffff),
-			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
-			BPF_MOV32_IMM(R0, 2),
-			BPF_EXIT_INSN(),
-			BPF_MOV32_IMM(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
+			BPF_JMP32_IMM(BPF_JLT, R0, 0xfffffffd, 1),
+			BPF_JMP32_IMM(BPF_JLT, R0, 0xffffffff, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0x1 } },
+		{ { 0, 0xfffffffe } }
 	},
+	/* BPF_JMP32 | BPF_JLT | BPF_X */
 	{
-		"ALU64_ADD_K: 0 + 0xffff = 0xffff",
+		"JMP32_JLT_X",
 		.u.insns_int = {
-			BPF_LD_IMM64(R2, 0x0),
-			BPF_LD_IMM64(R3, 0xffff),
-			BPF_ALU64_IMM(BPF_ADD, R2, 0xffff),
-			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
-			BPF_MOV32_IMM(R0, 2),
-			BPF_EXIT_INSN(),
-			BPF_MOV32_IMM(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
+			BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffd),
+			BPF_JMP32_REG(BPF_JLT, R0, R1, 2),
+			BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
+			BPF_JMP32_REG(BPF_JLT, R0, R1, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0x1 } },
+		{ { 0, 0xfffffffe } }
 	},
+	/* BPF_JMP32 | BPF_JLE | BPF_K */
 	{
-		"ALU64_ADD_K: 0 + 0x7fffffff = 0x7fffffff",
+		"JMP32_JLE_K: Small immediate",
 		.u.insns_int = {
-			BPF_LD_IMM64(R2, 0x0),
-			BPF_LD_IMM64(R3, 0x7fffffff),
-			BPF_ALU64_IMM(BPF_ADD, R2, 0x7fffffff),
-			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
-			BPF_MOV32_IMM(R0, 2),
-			BPF_EXIT_INSN(),
-			BPF_MOV32_IMM(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 123),
+			BPF_JMP32_IMM(BPF_JLE, R0, 122, 1),
+			BPF_JMP32_IMM(BPF_JLE, R0, 123, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0x1 } },
+		{ { 0, 123 } }
 	},
 	{
-		"ALU64_ADD_K: 0 + 0x80000000 = 0xffffffff80000000",
+		"JMP32_JLE_K: Large immediate",
 		.u.insns_int = {
-			BPF_LD_IMM64(R2, 0x0),
-			BPF_LD_IMM64(R3, 0xffffffff80000000LL),
-			BPF_ALU64_IMM(BPF_ADD, R2, 0x80000000),
-			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
-			BPF_MOV32_IMM(R0, 2),
-			BPF_EXIT_INSN(),
-			BPF_MOV32_IMM(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
+			BPF_JMP32_IMM(BPF_JLE, R0, 0xfffffffd, 1),
+			BPF_JMP32_IMM(BPF_JLE, R0, 0xfffffffe, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0x1 } },
+		{ { 0, 0xfffffffe } }
 	},
+	/* BPF_JMP32 | BPF_JLE | BPF_X */
 	{
-		"ALU_ADD_K: 0 + 0x80008000 = 0xffffffff80008000",
+		"JMP32_JLE_X",
 		.u.insns_int = {
-			BPF_LD_IMM64(R2, 0x0),
-			BPF_LD_IMM64(R3, 0xffffffff80008000LL),
-			BPF_ALU64_IMM(BPF_ADD, R2, 0x80008000),
-			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
-			BPF_MOV32_IMM(R0, 2),
-			BPF_EXIT_INSN(),
-			BPF_MOV32_IMM(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
+			BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffd),
+			BPF_JMP32_REG(BPF_JLE, R0, R1, 2),
+			BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffe),
+			BPF_JMP32_REG(BPF_JLE, R0, R1, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0x1 } },
+		{ { 0, 0xfffffffe } }
 	},
-	/* BPF_ALU | BPF_SUB | BPF_X */
+	/* BPF_JMP32 | BPF_JSGT | BPF_K */
 	{
-		"ALU_SUB_X: 3 - 1 = 2",
+		"JMP32_JSGT_K: Small immediate",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 3),
-			BPF_ALU32_IMM(BPF_MOV, R1, 1),
-			BPF_ALU32_REG(BPF_SUB, R0, R1),
+			BPF_ALU32_IMM(BPF_MOV, R0, -123),
+			BPF_JMP32_IMM(BPF_JSGT, R0, -123, 1),
+			BPF_JMP32_IMM(BPF_JSGT, R0, -124, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 2 } },
+		{ { 0, -123 } }
 	},
 	{
-		"ALU_SUB_X: 4294967295 - 4294967294 = 1",
+		"JMP32_JSGT_K: Large immediate",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 4294967295U),
-			BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U),
-			BPF_ALU32_REG(BPF_SUB, R0, R1),
+			BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
+			BPF_JMP32_IMM(BPF_JSGT, R0, -12345678, 1),
+			BPF_JMP32_IMM(BPF_JSGT, R0, -12345679, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 1 } },
+		{ { 0, -12345678 } }
 	},
+	/* BPF_JMP32 | BPF_JSGT | BPF_X */
 	{
-		"ALU64_SUB_X: 3 - 1 = 2",
+		"JMP32_JSGT_X",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 3),
-			BPF_ALU32_IMM(BPF_MOV, R1, 1),
-			BPF_ALU64_REG(BPF_SUB, R0, R1),
+			BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
+			BPF_ALU32_IMM(BPF_MOV, R1, -12345678),
+			BPF_JMP32_REG(BPF_JSGT, R0, R1, 2),
+			BPF_ALU32_IMM(BPF_MOV, R1, -12345679),
+			BPF_JMP32_REG(BPF_JSGT, R0, R1, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 2 } },
+		{ { 0, -12345678 } }
 	},
+	/* BPF_JMP32 | BPF_JSGE | BPF_K */
 	{
-		"ALU64_SUB_X: 4294967295 - 4294967294 = 1",
+		"JMP32_JSGE_K: Small immediate",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 4294967295U),
-			BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U),
-			BPF_ALU64_REG(BPF_SUB, R0, R1),
+			BPF_ALU32_IMM(BPF_MOV, R0, -123),
+			BPF_JMP32_IMM(BPF_JSGE, R0, -122, 1),
+			BPF_JMP32_IMM(BPF_JSGE, R0, -123, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 1 } },
+		{ { 0, -123 } }
 	},
-	/* BPF_ALU | BPF_SUB | BPF_K */
 	{
-		"ALU_SUB_K: 3 - 1 = 2",
+		"JMP32_JSGE_K: Large immediate",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 3),
-			BPF_ALU32_IMM(BPF_SUB, R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
+			BPF_JMP32_IMM(BPF_JSGE, R0, -12345677, 1),
+			BPF_JMP32_IMM(BPF_JSGE, R0, -12345678, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 2 } },
+		{ { 0, -12345678 } }
 	},
+	/* BPF_JMP32 | BPF_JSGE | BPF_X */
 	{
-		"ALU_SUB_K: 3 - 0 = 3",
+		"JMP32_JSGE_X",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 3),
-			BPF_ALU32_IMM(BPF_SUB, R0, 0),
+			BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
+			BPF_ALU32_IMM(BPF_MOV, R1, -12345677),
+			BPF_JMP32_REG(BPF_JSGE, R0, R1, 2),
+			BPF_ALU32_IMM(BPF_MOV, R1, -12345678),
+			BPF_JMP32_REG(BPF_JSGE, R0, R1, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 3 } },
+		{ { 0, -12345678 } }
 	},
+	/* BPF_JMP32 | BPF_JSLT | BPF_K */
 	{
-		"ALU_SUB_K: 4294967295 - 4294967294 = 1",
+		"JMP32_JSLT_K: Small immediate",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 4294967295U),
-			BPF_ALU32_IMM(BPF_SUB, R0, 4294967294U),
+			BPF_ALU32_IMM(BPF_MOV, R0, -123),
+			BPF_JMP32_IMM(BPF_JSLT, R0, -123, 1),
+			BPF_JMP32_IMM(BPF_JSLT, R0, -122, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 1 } },
+		{ { 0, -123 } }
 	},
 	{
-		"ALU64_SUB_K: 3 - 1 = 2",
+		"JMP32_JSLT_K: Large immediate",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 3),
-			BPF_ALU64_IMM(BPF_SUB, R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
+			BPF_JMP32_IMM(BPF_JSLT, R0, -12345678, 1),
+			BPF_JMP32_IMM(BPF_JSLT, R0, -12345677, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 2 } },
+		{ { 0, -12345678 } }
 	},
+	/* BPF_JMP32 | BPF_JSLT | BPF_X */
 	{
-		"ALU64_SUB_K: 3 - 0 = 3",
+		"JMP32_JSLT_X",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 3),
-			BPF_ALU64_IMM(BPF_SUB, R0, 0),
+			BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
+			BPF_ALU32_IMM(BPF_MOV, R1, -12345678),
+			BPF_JMP32_REG(BPF_JSLT, R0, R1, 2),
+			BPF_ALU32_IMM(BPF_MOV, R1, -12345677),
+			BPF_JMP32_REG(BPF_JSLT, R0, R1, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 3 } },
+		{ { 0, -12345678 } }
 	},
+	/* BPF_JMP32 | BPF_JSLE | BPF_K */
 	{
-		"ALU64_SUB_K: 4294967294 - 4294967295 = -1",
+		"JMP32_JSLE_K: Small immediate",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 4294967294U),
-			BPF_ALU64_IMM(BPF_SUB, R0, 4294967295U),
+			BPF_ALU32_IMM(BPF_MOV, R0, -123),
+			BPF_JMP32_IMM(BPF_JSLE, R0, -124, 1),
+			BPF_JMP32_IMM(BPF_JSLE, R0, -123, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, -1 } },
+		{ { 0, -123 } }
 	},
 	{
-		"ALU64_ADD_K: 2147483646 - 2147483647 = -1",
+		"JMP32_JSLE_K: Large immediate",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 2147483646),
-			BPF_ALU64_IMM(BPF_SUB, R0, 2147483647),
+			BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
+			BPF_JMP32_IMM(BPF_JSLE, R0, -12345679, 1),
+			BPF_JMP32_IMM(BPF_JSLE, R0, -12345678, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, -1 } },
+		{ { 0, -12345678 } }
 	},
-	/* BPF_ALU | BPF_MUL | BPF_X */
+	/* BPF_JMP32 | BPF_JSLE | BPF_K */
 	{
-		"ALU_MUL_X: 2 * 3 = 6",
+		"JMP32_JSLE_X",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 2),
-			BPF_ALU32_IMM(BPF_MOV, R1, 3),
-			BPF_ALU32_REG(BPF_MUL, R0, R1),
+			BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
+			BPF_ALU32_IMM(BPF_MOV, R1, -12345679),
+			BPF_JMP32_REG(BPF_JSLE, R0, R1, 2),
+			BPF_ALU32_IMM(BPF_MOV, R1, -12345678),
+			BPF_JMP32_REG(BPF_JSLE, R0, R1, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 6 } },
+		{ { 0, -12345678 } }
 	},
+	/* BPF_JMP | BPF_EXIT */
 	{
-		"ALU_MUL_X: 2 * 0x7FFFFFF8 = 0xFFFFFFF0",
+		"JMP_EXIT",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 2),
-			BPF_ALU32_IMM(BPF_MOV, R1, 0x7FFFFFF8),
-			BPF_ALU32_REG(BPF_MUL, R0, R1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0x4711),
 			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0x4712),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0xFFFFFFF0 } },
+		{ { 0, 0x4711 } },
 	},
+	/* BPF_JMP | BPF_JA */
 	{
-		"ALU_MUL_X: -1 * -1 = 1",
+		"JMP_JA: Unconditional jump: if (true) return 1",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, -1),
-			BPF_ALU32_IMM(BPF_MOV, R1, -1),
-			BPF_ALU32_REG(BPF_MUL, R0, R1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
 		{ { 0, 1 } },
 	},
+	/* BPF_JMP | BPF_JSLT | BPF_K */
 	{
-		"ALU64_MUL_X: 2 * 3 = 6",
+		"JMP_JSLT_K: Signed jump: if (-2 < -1) return 1",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 2),
-			BPF_ALU32_IMM(BPF_MOV, R1, 3),
-			BPF_ALU64_REG(BPF_MUL, R0, R1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 0xfffffffffffffffeLL),
+			BPF_JMP_IMM(BPF_JSLT, R1, -1, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 6 } },
+		{ { 0, 1 } },
 	},
 	{
-		"ALU64_MUL_X: 1 * 2147483647 = 2147483647",
+		"JMP_JSLT_K: Signed jump: if (-1 < -1) return 0",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 1),
-			BPF_ALU32_IMM(BPF_MOV, R1, 2147483647),
-			BPF_ALU64_REG(BPF_MUL, R0, R1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
+			BPF_JMP_IMM(BPF_JSLT, R1, -1, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 2147483647 } },
+		{ { 0, 1 } },
 	},
+	/* BPF_JMP | BPF_JSGT | BPF_K */
 	{
-		"ALU64_MUL_X: 64x64 multiply, low word",
+		"JMP_JSGT_K: Signed jump: if (-1 > -2) return 1",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x0fedcba987654321LL),
-			BPF_LD_IMM64(R1, 0x123456789abcdef0LL),
-			BPF_ALU64_REG(BPF_MUL, R0, R1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
+			BPF_JMP_IMM(BPF_JSGT, R1, -2, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0xe5618cf0 } }
+		{ { 0, 1 } },
 	},
 	{
-		"ALU64_MUL_X: 64x64 multiply, high word",
+		"JMP_JSGT_K: Signed jump: if (-1 > -1) return 0",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x0fedcba987654321LL),
-			BPF_LD_IMM64(R1, 0x123456789abcdef0LL),
-			BPF_ALU64_REG(BPF_MUL, R0, R1),
-			BPF_ALU64_IMM(BPF_RSH, R0, 32),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
+			BPF_JMP_IMM(BPF_JSGT, R1, -1, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0x2236d88f } }
+		{ { 0, 1 } },
 	},
-	/* BPF_ALU | BPF_MUL | BPF_K */
+	/* BPF_JMP | BPF_JSLE | BPF_K */
 	{
-		"ALU_MUL_K: 2 * 3 = 6",
+		"JMP_JSLE_K: Signed jump: if (-2 <= -1) return 1",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 2),
-			BPF_ALU32_IMM(BPF_MUL, R0, 3),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 0xfffffffffffffffeLL),
+			BPF_JMP_IMM(BPF_JSLE, R1, -1, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 6 } },
+		{ { 0, 1 } },
 	},
 	{
-		"ALU_MUL_K: 3 * 1 = 3",
+		"JMP_JSLE_K: Signed jump: if (-1 <= -1) return 1",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 3),
-			BPF_ALU32_IMM(BPF_MUL, R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
+			BPF_JMP_IMM(BPF_JSLE, R1, -1, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 3 } },
+		{ { 0, 1 } },
 	},
 	{
-		"ALU_MUL_K: 2 * 0x7FFFFFF8 = 0xFFFFFFF0",
+		"JMP_JSLE_K: Signed jump: value walk 1",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 2),
-			BPF_ALU32_IMM(BPF_MUL, R0, 0x7FFFFFF8),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 3),
+			BPF_JMP_IMM(BPF_JSLE, R1, 0, 6),
+			BPF_ALU64_IMM(BPF_SUB, R1, 1),
+			BPF_JMP_IMM(BPF_JSLE, R1, 0, 4),
+			BPF_ALU64_IMM(BPF_SUB, R1, 1),
+			BPF_JMP_IMM(BPF_JSLE, R1, 0, 2),
+			BPF_ALU64_IMM(BPF_SUB, R1, 1),
+			BPF_JMP_IMM(BPF_JSLE, R1, 0, 1),
+			BPF_EXIT_INSN(),		/* bad exit */
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),	/* good exit */
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0xFFFFFFF0 } },
+		{ { 0, 1 } },
 	},
 	{
-		"ALU_MUL_K: 1 * (-1) = 0x00000000ffffffff",
+		"JMP_JSLE_K: Signed jump: value walk 2",
 		.u.insns_int = {
-			BPF_LD_IMM64(R2, 0x1),
-			BPF_LD_IMM64(R3, 0x00000000ffffffff),
-			BPF_ALU32_IMM(BPF_MUL, R2, 0xffffffff),
-			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
-			BPF_MOV32_IMM(R0, 2),
-			BPF_EXIT_INSN(),
-			BPF_MOV32_IMM(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 3),
+			BPF_JMP_IMM(BPF_JSLE, R1, 0, 4),
+			BPF_ALU64_IMM(BPF_SUB, R1, 2),
+			BPF_JMP_IMM(BPF_JSLE, R1, 0, 2),
+			BPF_ALU64_IMM(BPF_SUB, R1, 2),
+			BPF_JMP_IMM(BPF_JSLE, R1, 0, 1),
+			BPF_EXIT_INSN(),		/* bad exit */
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),	/* good exit */
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0x1 } },
+		{ { 0, 1 } },
 	},
+	/* BPF_JMP | BPF_JSGE | BPF_K */
 	{
-		"ALU64_MUL_K: 2 * 3 = 6",
+		"JMP_JSGE_K: Signed jump: if (-1 >= -2) return 1",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 2),
-			BPF_ALU64_IMM(BPF_MUL, R0, 3),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
+			BPF_JMP_IMM(BPF_JSGE, R1, -2, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 6 } },
+		{ { 0, 1 } },
 	},
 	{
-		"ALU64_MUL_K: 3 * 1 = 3",
+		"JMP_JSGE_K: Signed jump: if (-1 >= -1) return 1",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 3),
-			BPF_ALU64_IMM(BPF_MUL, R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
+			BPF_JMP_IMM(BPF_JSGE, R1, -1, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 3 } },
+		{ { 0, 1 } },
 	},
 	{
-		"ALU64_MUL_K: 1 * 2147483647 = 2147483647",
+		"JMP_JSGE_K: Signed jump: value walk 1",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 1),
-			BPF_ALU64_IMM(BPF_MUL, R0, 2147483647),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, -3),
+			BPF_JMP_IMM(BPF_JSGE, R1, 0, 6),
+			BPF_ALU64_IMM(BPF_ADD, R1, 1),
+			BPF_JMP_IMM(BPF_JSGE, R1, 0, 4),
+			BPF_ALU64_IMM(BPF_ADD, R1, 1),
+			BPF_JMP_IMM(BPF_JSGE, R1, 0, 2),
+			BPF_ALU64_IMM(BPF_ADD, R1, 1),
+			BPF_JMP_IMM(BPF_JSGE, R1, 0, 1),
+			BPF_EXIT_INSN(),		/* bad exit */
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),	/* good exit */
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 2147483647 } },
+		{ { 0, 1 } },
 	},
 	{
-		"ALU64_MUL_K: 1 * -2147483647 = -2147483647",
+		"JMP_JSGE_K: Signed jump: value walk 2",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 1),
-			BPF_ALU64_IMM(BPF_MUL, R0, -2147483647),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, -3),
+			BPF_JMP_IMM(BPF_JSGE, R1, 0, 4),
+			BPF_ALU64_IMM(BPF_ADD, R1, 2),
+			BPF_JMP_IMM(BPF_JSGE, R1, 0, 2),
+			BPF_ALU64_IMM(BPF_ADD, R1, 2),
+			BPF_JMP_IMM(BPF_JSGE, R1, 0, 1),
+			BPF_EXIT_INSN(),		/* bad exit */
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),	/* good exit */
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, -2147483647 } },
+		{ { 0, 1 } },
 	},
+	/* BPF_JMP | BPF_JGT | BPF_K */
 	{
-		"ALU64_MUL_K: 1 * (-1) = 0xffffffffffffffff",
+		"JMP_JGT_K: if (3 > 2) return 1",
 		.u.insns_int = {
-			BPF_LD_IMM64(R2, 0x1),
-			BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
-			BPF_ALU64_IMM(BPF_MUL, R2, 0xffffffff),
-			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
-			BPF_MOV32_IMM(R0, 2),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 3),
+			BPF_JMP_IMM(BPF_JGT, R1, 2, 1),
 			BPF_EXIT_INSN(),
-			BPF_MOV32_IMM(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0x1 } },
+		{ { 0, 1 } },
 	},
 	{
-		"ALU64_MUL_K: 64x32 multiply, low word",
+		"JMP_JGT_K: Unsigned jump: if (-1 > 1) return 1",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
-			BPF_ALU64_IMM(BPF_MUL, R0, 0x12345678),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, -1),
+			BPF_JMP_IMM(BPF_JGT, R1, 1, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0xe242d208 } }
+		{ { 0, 1 } },
 	},
+	/* BPF_JMP | BPF_JLT | BPF_K */
 	{
-		"ALU64_MUL_K: 64x32 multiply, high word",
+		"JMP_JLT_K: if (2 < 3) return 1",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
-			BPF_ALU64_IMM(BPF_MUL, R0, 0x12345678),
-			BPF_ALU64_IMM(BPF_RSH, R0, 32),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 2),
+			BPF_JMP_IMM(BPF_JLT, R1, 3, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0xc28f5c28 } }
+		{ { 0, 1 } },
 	},
-	/* BPF_ALU | BPF_DIV | BPF_X */
 	{
-		"ALU_DIV_X: 6 / 2 = 3",
+		"JMP_JGT_K: Unsigned jump: if (1 < -1) return 1",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 6),
-			BPF_ALU32_IMM(BPF_MOV, R1, 2),
-			BPF_ALU32_REG(BPF_DIV, R0, R1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 1),
+			BPF_JMP_IMM(BPF_JLT, R1, -1, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 3 } },
+		{ { 0, 1 } },
 	},
+	/* BPF_JMP | BPF_JGE | BPF_K */
 	{
-		"ALU_DIV_X: 4294967295 / 4294967295 = 1",
+		"JMP_JGE_K: if (3 >= 2) return 1",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 4294967295U),
-			BPF_ALU32_IMM(BPF_MOV, R1, 4294967295U),
-			BPF_ALU32_REG(BPF_DIV, R0, R1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 3),
+			BPF_JMP_IMM(BPF_JGE, R1, 2, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
 		{ { 0, 1 } },
 	},
+	/* BPF_JMP | BPF_JLE | BPF_K */
 	{
-		"ALU64_DIV_X: 6 / 2 = 3",
+		"JMP_JLE_K: if (2 <= 3) return 1",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 6),
-			BPF_ALU32_IMM(BPF_MOV, R1, 2),
-			BPF_ALU64_REG(BPF_DIV, R0, R1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 2),
+			BPF_JMP_IMM(BPF_JLE, R1, 3, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 3 } },
+		{ { 0, 1 } },
 	},
+	/* BPF_JMP | BPF_JGT | BPF_K jump backwards */
 	{
-		"ALU64_DIV_X: 2147483647 / 2147483647 = 1",
+		"JMP_JGT_K: if (3 > 2) return 1 (jump backwards)",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 2147483647),
-			BPF_ALU32_IMM(BPF_MOV, R1, 2147483647),
-			BPF_ALU64_REG(BPF_DIV, R0, R1),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 2), /* goto start */
+			BPF_ALU32_IMM(BPF_MOV, R0, 1), /* out: */
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0), /* start: */
+			BPF_LD_IMM64(R1, 3), /* note: this takes 2 insns */
+			BPF_JMP_IMM(BPF_JGT, R1, 2, -6), /* goto out */
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
@@ -3451,99 +9418,118 @@ static struct bpf_test tests[] = {
 		{ { 0, 1 } },
 	},
 	{
-		"ALU64_DIV_X: 0xffffffffffffffff / (-1) = 0x0000000000000001",
+		"JMP_JGE_K: if (3 >= 3) return 1",
 		.u.insns_int = {
-			BPF_LD_IMM64(R2, 0xffffffffffffffffLL),
-			BPF_LD_IMM64(R4, 0xffffffffffffffffLL),
-			BPF_LD_IMM64(R3, 0x0000000000000001LL),
-			BPF_ALU64_REG(BPF_DIV, R2, R4),
-			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
-			BPF_MOV32_IMM(R0, 2),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 3),
+			BPF_JMP_IMM(BPF_JGE, R1, 3, 1),
 			BPF_EXIT_INSN(),
-			BPF_MOV32_IMM(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0x1 } },
+		{ { 0, 1 } },
 	},
-	/* BPF_ALU | BPF_DIV | BPF_K */
+	/* BPF_JMP | BPF_JLT | BPF_K jump backwards */
 	{
-		"ALU_DIV_K: 6 / 2 = 3",
+		"JMP_JGT_K: if (2 < 3) return 1 (jump backwards)",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 6),
-			BPF_ALU32_IMM(BPF_DIV, R0, 2),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 2), /* goto start */
+			BPF_ALU32_IMM(BPF_MOV, R0, 1), /* out: */
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0), /* start: */
+			BPF_LD_IMM64(R1, 2), /* note: this takes 2 insns */
+			BPF_JMP_IMM(BPF_JLT, R1, 3, -6), /* goto out */
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 3 } },
+		{ { 0, 1 } },
 	},
 	{
-		"ALU_DIV_K: 3 / 1 = 3",
+		"JMP_JLE_K: if (3 <= 3) return 1",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 3),
-			BPF_ALU32_IMM(BPF_DIV, R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 3),
+			BPF_JMP_IMM(BPF_JLE, R1, 3, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 3 } },
+		{ { 0, 1 } },
 	},
+	/* BPF_JMP | BPF_JNE | BPF_K */
 	{
-		"ALU_DIV_K: 4294967295 / 4294967295 = 1",
+		"JMP_JNE_K: if (3 != 2) return 1",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 4294967295U),
-			BPF_ALU32_IMM(BPF_DIV, R0, 4294967295U),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 3),
+			BPF_JMP_IMM(BPF_JNE, R1, 2, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
 		{ { 0, 1 } },
 	},
+	/* BPF_JMP | BPF_JEQ | BPF_K */
 	{
-		"ALU_DIV_K: 0xffffffffffffffff / (-1) = 0x1",
+		"JMP_JEQ_K: if (3 == 3) return 1",
 		.u.insns_int = {
-			BPF_LD_IMM64(R2, 0xffffffffffffffffLL),
-			BPF_LD_IMM64(R3, 0x1UL),
-			BPF_ALU32_IMM(BPF_DIV, R2, 0xffffffff),
-			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
-			BPF_MOV32_IMM(R0, 2),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 3),
+			BPF_JMP_IMM(BPF_JEQ, R1, 3, 1),
 			BPF_EXIT_INSN(),
-			BPF_MOV32_IMM(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0x1 } },
+		{ { 0, 1 } },
 	},
+	/* BPF_JMP | BPF_JSET | BPF_K */
 	{
-		"ALU64_DIV_K: 6 / 2 = 3",
+		"JMP_JSET_K: if (0x3 & 0x2) return 1",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 6),
-			BPF_ALU64_IMM(BPF_DIV, R0, 2),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 3),
+			BPF_JMP_IMM(BPF_JSET, R1, 2, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 3 } },
+		{ { 0, 1 } },
 	},
 	{
-		"ALU64_DIV_K: 3 / 1 = 3",
+		"JMP_JSET_K: if (0x3 & 0xffffffff) return 1",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 3),
-			BPF_ALU64_IMM(BPF_DIV, R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 3),
+			BPF_JMP_IMM(BPF_JSET, R1, 0xffffffff, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 3 } },
+		{ { 0, 1 } },
 	},
+	/* BPF_JMP | BPF_JSGT | BPF_X */
 	{
-		"ALU64_DIV_K: 2147483647 / 2147483647 = 1",
+		"JMP_JSGT_X: Signed jump: if (-1 > -2) return 1",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 2147483647),
-			BPF_ALU64_IMM(BPF_DIV, R0, 2147483647),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, -1),
+			BPF_LD_IMM64(R2, -2),
+			BPF_JMP_REG(BPF_JSGT, R1, R2, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
@@ -3551,28 +9537,30 @@ static struct bpf_test tests[] = {
 		{ { 0, 1 } },
 	},
 	{
-		"ALU64_DIV_K: 0xffffffffffffffff / (-1) = 0x0000000000000001",
+		"JMP_JSGT_X: Signed jump: if (-1 > -1) return 0",
 		.u.insns_int = {
-			BPF_LD_IMM64(R2, 0xffffffffffffffffLL),
-			BPF_LD_IMM64(R3, 0x0000000000000001LL),
-			BPF_ALU64_IMM(BPF_DIV, R2, 0xffffffff),
-			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
-			BPF_MOV32_IMM(R0, 2),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_LD_IMM64(R1, -1),
+			BPF_LD_IMM64(R2, -1),
+			BPF_JMP_REG(BPF_JSGT, R1, R2, 1),
 			BPF_EXIT_INSN(),
-			BPF_MOV32_IMM(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0x1 } },
+		{ { 0, 1 } },
 	},
-	/* BPF_ALU | BPF_MOD | BPF_X */
+	/* BPF_JMP | BPF_JSLT | BPF_X */
 	{
-		"ALU_MOD_X: 3 % 2 = 1",
+		"JMP_JSLT_X: Signed jump: if (-2 < -1) return 1",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 3),
-			BPF_ALU32_IMM(BPF_MOV, R1, 2),
-			BPF_ALU32_REG(BPF_MOD, R0, R1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, -1),
+			BPF_LD_IMM64(R2, -2),
+			BPF_JMP_REG(BPF_JSLT, R2, R1, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
@@ -3580,23 +9568,30 @@ static struct bpf_test tests[] = {
 		{ { 0, 1 } },
 	},
 	{
-		"ALU_MOD_X: 4294967295 % 4294967293 = 2",
+		"JMP_JSLT_X: Signed jump: if (-1 < -1) return 0",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 4294967295U),
-			BPF_ALU32_IMM(BPF_MOV, R1, 4294967293U),
-			BPF_ALU32_REG(BPF_MOD, R0, R1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_LD_IMM64(R1, -1),
+			BPF_LD_IMM64(R2, -1),
+			BPF_JMP_REG(BPF_JSLT, R1, R2, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 2 } },
+		{ { 0, 1 } },
 	},
+	/* BPF_JMP | BPF_JSGE | BPF_X */
 	{
-		"ALU64_MOD_X: 3 % 2 = 1",
+		"JMP_JSGE_X: Signed jump: if (-1 >= -2) return 1",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 3),
-			BPF_ALU32_IMM(BPF_MOV, R1, 2),
-			BPF_ALU64_REG(BPF_MOD, R0, R1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, -1),
+			BPF_LD_IMM64(R2, -2),
+			BPF_JMP_REG(BPF_JSGE, R1, R2, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
@@ -3604,23 +9599,30 @@ static struct bpf_test tests[] = {
 		{ { 0, 1 } },
 	},
 	{
-		"ALU64_MOD_X: 2147483647 % 2147483645 = 2",
+		"JMP_JSGE_X: Signed jump: if (-1 >= -1) return 1",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 2147483647),
-			BPF_ALU32_IMM(BPF_MOV, R1, 2147483645),
-			BPF_ALU64_REG(BPF_MOD, R0, R1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, -1),
+			BPF_LD_IMM64(R2, -1),
+			BPF_JMP_REG(BPF_JSGE, R1, R2, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 2 } },
+		{ { 0, 1 } },
 	},
-	/* BPF_ALU | BPF_MOD | BPF_K */
+	/* BPF_JMP | BPF_JSLE | BPF_X */
 	{
-		"ALU_MOD_K: 3 % 2 = 1",
+		"JMP_JSLE_X: Signed jump: if (-2 <= -1) return 1",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 3),
-			BPF_ALU32_IMM(BPF_MOD, R0, 2),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, -1),
+			BPF_LD_IMM64(R2, -2),
+			BPF_JMP_REG(BPF_JSLE, R2, R1, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
@@ -3628,1215 +9630,1432 @@ static struct bpf_test tests[] = {
 		{ { 0, 1 } },
 	},
 	{
-		"ALU_MOD_K: 3 % 1 = 0",
+		"JMP_JSLE_X: Signed jump: if (-1 <= -1) return 1",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 3),
-			BPF_ALU32_IMM(BPF_MOD, R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, -1),
+			BPF_LD_IMM64(R2, -1),
+			BPF_JMP_REG(BPF_JSLE, R1, R2, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0 } },
+		{ { 0, 1 } },
 	},
+	/* BPF_JMP | BPF_JGT | BPF_X */
 	{
-		"ALU_MOD_K: 4294967295 % 4294967293 = 2",
+		"JMP_JGT_X: if (3 > 2) return 1",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 4294967295U),
-			BPF_ALU32_IMM(BPF_MOD, R0, 4294967293U),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 3),
+			BPF_LD_IMM64(R2, 2),
+			BPF_JMP_REG(BPF_JGT, R1, R2, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 2 } },
+		{ { 0, 1 } },
 	},
 	{
-		"ALU64_MOD_K: 3 % 2 = 1",
+		"JMP_JGT_X: Unsigned jump: if (-1 > 1) return 1",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 3),
-			BPF_ALU64_IMM(BPF_MOD, R0, 2),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, -1),
+			BPF_LD_IMM64(R2, 1),
+			BPF_JMP_REG(BPF_JGT, R1, R2, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
 		{ { 0, 1 } },
 	},
+	/* BPF_JMP | BPF_JLT | BPF_X */
 	{
-		"ALU64_MOD_K: 3 % 1 = 0",
+		"JMP_JLT_X: if (2 < 3) return 1",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 3),
-			BPF_ALU64_IMM(BPF_MOD, R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 3),
+			BPF_LD_IMM64(R2, 2),
+			BPF_JMP_REG(BPF_JLT, R2, R1, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0 } },
+		{ { 0, 1 } },
 	},
 	{
-		"ALU64_MOD_K: 2147483647 % 2147483645 = 2",
+		"JMP_JLT_X: Unsigned jump: if (1 < -1) return 1",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 2147483647),
-			BPF_ALU64_IMM(BPF_MOD, R0, 2147483645),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, -1),
+			BPF_LD_IMM64(R2, 1),
+			BPF_JMP_REG(BPF_JLT, R2, R1, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 2 } },
+		{ { 0, 1 } },
 	},
-	/* BPF_ALU | BPF_AND | BPF_X */
+	/* BPF_JMP | BPF_JGE | BPF_X */
 	{
-		"ALU_AND_X: 3 & 2 = 2",
+		"JMP_JGE_X: if (3 >= 2) return 1",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 3),
-			BPF_ALU32_IMM(BPF_MOV, R1, 2),
-			BPF_ALU32_REG(BPF_AND, R0, R1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 3),
+			BPF_LD_IMM64(R2, 2),
+			BPF_JMP_REG(BPF_JGE, R1, R2, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 2 } },
+		{ { 0, 1 } },
 	},
 	{
-		"ALU_AND_X: 0xffffffff & 0xffffffff = 0xffffffff",
+		"JMP_JGE_X: if (3 >= 3) return 1",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0xffffffff),
-			BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
-			BPF_ALU32_REG(BPF_AND, R0, R1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 3),
+			BPF_LD_IMM64(R2, 3),
+			BPF_JMP_REG(BPF_JGE, R1, R2, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0xffffffff } },
+		{ { 0, 1 } },
 	},
+	/* BPF_JMP | BPF_JLE | BPF_X */
 	{
-		"ALU64_AND_X: 3 & 2 = 2",
+		"JMP_JLE_X: if (2 <= 3) return 1",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 3),
-			BPF_ALU32_IMM(BPF_MOV, R1, 2),
-			BPF_ALU64_REG(BPF_AND, R0, R1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 3),
+			BPF_LD_IMM64(R2, 2),
+			BPF_JMP_REG(BPF_JLE, R2, R1, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 2 } },
+		{ { 0, 1 } },
 	},
 	{
-		"ALU64_AND_X: 0xffffffff & 0xffffffff = 0xffffffff",
+		"JMP_JLE_X: if (3 <= 3) return 1",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0xffffffff),
-			BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
-			BPF_ALU64_REG(BPF_AND, R0, R1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 3),
+			BPF_LD_IMM64(R2, 3),
+			BPF_JMP_REG(BPF_JLE, R1, R2, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0xffffffff } },
+		{ { 0, 1 } },
 	},
-	/* BPF_ALU | BPF_AND | BPF_K */
 	{
-		"ALU_AND_K: 3 & 2 = 2",
+		/* Mainly testing JIT + imm64 here. */
+		"JMP_JGE_X: ldimm64 test 1",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 3),
-			BPF_ALU32_IMM(BPF_AND, R0, 2),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 3),
+			BPF_LD_IMM64(R2, 2),
+			BPF_JMP_REG(BPF_JGE, R1, R2, 2),
+			BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
+			BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeULL),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 2 } },
+		{ { 0, 0xeeeeeeeeU } },
 	},
 	{
-		"ALU_AND_K: 0xffffffff & 0xffffffff = 0xffffffff",
+		"JMP_JGE_X: ldimm64 test 2",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0xffffffff),
-			BPF_ALU32_IMM(BPF_AND, R0, 0xffffffff),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 3),
+			BPF_LD_IMM64(R2, 2),
+			BPF_JMP_REG(BPF_JGE, R1, R2, 0),
+			BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0xffffffff } },
+		{ { 0, 0xffffffffU } },
 	},
 	{
-		"ALU_AND_K: Small immediate",
+		"JMP_JGE_X: ldimm64 test 3",
 		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0x01020304),
-			BPF_ALU32_IMM(BPF_AND, R0, 15),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_LD_IMM64(R1, 3),
+			BPF_LD_IMM64(R2, 2),
+			BPF_JMP_REG(BPF_JGE, R1, R2, 4),
+			BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
+			BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeULL),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 4 } }
+		{ { 0, 1 } },
 	},
 	{
-		"ALU_AND_K: Large immediate",
+		"JMP_JLE_X: ldimm64 test 1",
 		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0xf1f2f3f4),
-			BPF_ALU32_IMM(BPF_AND, R0, 0xafbfcfdf),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 3),
+			BPF_LD_IMM64(R2, 2),
+			BPF_JMP_REG(BPF_JLE, R2, R1, 2),
+			BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
+			BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeULL),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0xa1b2c3d4 } }
+		{ { 0, 0xeeeeeeeeU } },
 	},
 	{
-		"ALU_AND_K: Zero extension",
+		"JMP_JLE_X: ldimm64 test 2",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
-			BPF_LD_IMM64(R1, 0x0000000080a0c0e0LL),
-			BPF_ALU32_IMM(BPF_AND, R0, 0xf0f0f0f0),
-			BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
-			BPF_MOV32_IMM(R0, 2),
-			BPF_EXIT_INSN(),
-			BPF_MOV32_IMM(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 3),
+			BPF_LD_IMM64(R2, 2),
+			BPF_JMP_REG(BPF_JLE, R2, R1, 0),
+			BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 1 } }
+		{ { 0, 0xffffffffU } },
 	},
 	{
-		"ALU64_AND_K: 3 & 2 = 2",
+		"JMP_JLE_X: ldimm64 test 3",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 3),
-			BPF_ALU64_IMM(BPF_AND, R0, 2),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_LD_IMM64(R1, 3),
+			BPF_LD_IMM64(R2, 2),
+			BPF_JMP_REG(BPF_JLE, R2, R1, 4),
+			BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
+			BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeULL),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 2 } },
+		{ { 0, 1 } },
 	},
+	/* BPF_JMP | BPF_JNE | BPF_X */
 	{
-		"ALU64_AND_K: 0xffffffff & 0xffffffff = 0xffffffff",
+		"JMP_JNE_X: if (3 != 2) return 1",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0xffffffff),
-			BPF_ALU64_IMM(BPF_AND, R0, 0xffffffff),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 3),
+			BPF_LD_IMM64(R2, 2),
+			BPF_JMP_REG(BPF_JNE, R1, R2, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0xffffffff } },
+		{ { 0, 1 } },
 	},
+	/* BPF_JMP | BPF_JEQ | BPF_X */
 	{
-		"ALU64_AND_K: 0x0000ffffffff0000 & 0x0 = 0x0000000000000000",
+		"JMP_JEQ_X: if (3 == 3) return 1",
 		.u.insns_int = {
-			BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
-			BPF_LD_IMM64(R3, 0x0000000000000000LL),
-			BPF_ALU64_IMM(BPF_AND, R2, 0x0),
-			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
-			BPF_MOV32_IMM(R0, 2),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 3),
+			BPF_LD_IMM64(R2, 3),
+			BPF_JMP_REG(BPF_JEQ, R1, R2, 1),
 			BPF_EXIT_INSN(),
-			BPF_MOV32_IMM(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0x1 } },
+		{ { 0, 1 } },
 	},
+	/* BPF_JMP | BPF_JSET | BPF_X */
 	{
-		"ALU64_AND_K: 0x0000ffffffff0000 & -1 = 0x0000ffffffff0000",
+		"JMP_JSET_X: if (0x3 & 0x2) return 1",
 		.u.insns_int = {
-			BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
-			BPF_LD_IMM64(R3, 0x0000ffffffff0000LL),
-			BPF_ALU64_IMM(BPF_AND, R2, 0xffffffff),
-			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
-			BPF_MOV32_IMM(R0, 2),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 3),
+			BPF_LD_IMM64(R2, 2),
+			BPF_JMP_REG(BPF_JSET, R1, R2, 1),
 			BPF_EXIT_INSN(),
-			BPF_MOV32_IMM(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0x1 } },
+		{ { 0, 1 } },
 	},
 	{
-		"ALU64_AND_K: 0xffffffffffffffff & -1 = 0xffffffffffffffff",
+		"JMP_JSET_X: if (0x3 & 0xffffffff) return 1",
 		.u.insns_int = {
-			BPF_LD_IMM64(R2, 0xffffffffffffffffLL),
-			BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
-			BPF_ALU64_IMM(BPF_AND, R2, 0xffffffff),
-			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
-			BPF_MOV32_IMM(R0, 2),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 3),
+			BPF_LD_IMM64(R2, 0xffffffff),
+			BPF_JMP_REG(BPF_JSET, R1, R2, 1),
 			BPF_EXIT_INSN(),
-			BPF_MOV32_IMM(R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0x1 } },
+		{ { 0, 1 } },
 	},
 	{
-		"ALU64_AND_K: Sign extension 1",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
-			BPF_LD_IMM64(R1, 0x00000000090b0d0fLL),
-			BPF_ALU64_IMM(BPF_AND, R0, 0x0f0f0f0f),
-			BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
-			BPF_MOV32_IMM(R0, 2),
-			BPF_EXIT_INSN(),
-			BPF_MOV32_IMM(R0, 1),
-			BPF_EXIT_INSN(),
+		"JMP_JA: Jump, gap, jump, ...",
+		{ },
+		CLASSIC | FLAG_NO_DATA,
+		{ },
+		{ { 0, 0xababcbac } },
+		.fill_helper = bpf_fill_ja,
+	},
+	{	/* Mainly checking JIT here. */
+		"BPF_MAXINSNS: Maximum possible literals",
+		{ },
+		CLASSIC | FLAG_NO_DATA,
+		{ },
+		{ { 0, 0xffffffff } },
+		.fill_helper = bpf_fill_maxinsns1,
+	},
+	{	/* Mainly checking JIT here. */
+		"BPF_MAXINSNS: Single literal",
+		{ },
+		CLASSIC | FLAG_NO_DATA,
+		{ },
+		{ { 0, 0xfefefefe } },
+		.fill_helper = bpf_fill_maxinsns2,
+	},
+	{	/* Mainly checking JIT here. */
+		"BPF_MAXINSNS: Run/add until end",
+		{ },
+		CLASSIC | FLAG_NO_DATA,
+		{ },
+		{ { 0, 0x947bf368 } },
+		.fill_helper = bpf_fill_maxinsns3,
+	},
+	{
+		"BPF_MAXINSNS: Too many instructions",
+		{ },
+		CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+		{ },
+		{ },
+		.fill_helper = bpf_fill_maxinsns4,
+		.expected_errcode = -EINVAL,
+	},
+	{	/* Mainly checking JIT here. */
+		"BPF_MAXINSNS: Very long jump",
+		{ },
+		CLASSIC | FLAG_NO_DATA,
+		{ },
+		{ { 0, 0xabababab } },
+		.fill_helper = bpf_fill_maxinsns5,
+	},
+	{	/* Mainly checking JIT here. */
+		"BPF_MAXINSNS: Ctx heavy transformations",
+		{ },
+		CLASSIC,
+		{ },
+		{
+			{  1, SKB_VLAN_PRESENT },
+			{ 10, SKB_VLAN_PRESENT }
 		},
-		INTERNAL,
+		.fill_helper = bpf_fill_maxinsns6,
+	},
+	{	/* Mainly checking JIT here. */
+		"BPF_MAXINSNS: Call heavy transformations",
+		{ },
+		CLASSIC | FLAG_NO_DATA,
+		{ },
+		{ { 1, 0 }, { 10, 0 } },
+		.fill_helper = bpf_fill_maxinsns7,
+	},
+	{	/* Mainly checking JIT here. */
+		"BPF_MAXINSNS: Jump heavy test",
+		{ },
+		CLASSIC | FLAG_NO_DATA,
+		{ },
+		{ { 0, 0xffffffff } },
+		.fill_helper = bpf_fill_maxinsns8,
+	},
+	{	/* Mainly checking JIT here. */
+		"BPF_MAXINSNS: Very long jump backwards",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 0xcbababab } },
+		.fill_helper = bpf_fill_maxinsns9,
+	},
+	{	/* Mainly checking JIT here. */
+		"BPF_MAXINSNS: Edge hopping nuthouse",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 0xabababac } },
+		.fill_helper = bpf_fill_maxinsns10,
+	},
+	{
+		"BPF_MAXINSNS: Jump, gap, jump, ...",
+		{ },
+		CLASSIC | FLAG_NO_DATA,
 		{ },
-		{ { 0, 1 } }
+		{ { 0, 0xababcbac } },
+		.fill_helper = bpf_fill_maxinsns11,
 	},
 	{
-		"ALU64_AND_K: Sign extension 2",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
-			BPF_LD_IMM64(R1, 0x0123456780a0c0e0LL),
-			BPF_ALU64_IMM(BPF_AND, R0, 0xf0f0f0f0),
-			BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
-			BPF_MOV32_IMM(R0, 2),
-			BPF_EXIT_INSN(),
-			BPF_MOV32_IMM(R0, 1),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"BPF_MAXINSNS: jump over MSH",
 		{ },
-		{ { 0, 1 } }
+		CLASSIC | FLAG_EXPECTED_FAIL,
+		{ 0xfa, 0xfb, 0xfc, 0xfd, },
+		{ { 4, 0xabababab } },
+		.fill_helper = bpf_fill_maxinsns12,
+		.expected_errcode = -EINVAL,
 	},
-	/* BPF_ALU | BPF_OR | BPF_X */
 	{
-		"ALU_OR_X: 1 | 2 = 3",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 1),
-			BPF_ALU32_IMM(BPF_MOV, R1, 2),
-			BPF_ALU32_REG(BPF_OR, R0, R1),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"BPF_MAXINSNS: exec all MSH",
 		{ },
-		{ { 0, 3 } },
+		CLASSIC,
+		{ 0xfa, 0xfb, 0xfc, 0xfd, },
+		{ { 4, 0xababab83 } },
+		.fill_helper = bpf_fill_maxinsns13,
 	},
 	{
-		"ALU_OR_X: 0x0 | 0xffffffff = 0xffffffff",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0),
-			BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
-			BPF_ALU32_REG(BPF_OR, R0, R1),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"BPF_MAXINSNS: ld_abs+get_processor_id",
 		{ },
-		{ { 0, 0xffffffff } },
+		CLASSIC,
+		{ },
+		{ { 1, 0xbee } },
+		.fill_helper = bpf_fill_ld_abs_get_processor_id,
 	},
+	/*
+	 * LD_IND / LD_ABS on fragmented SKBs
+	 */
 	{
-		"ALU64_OR_X: 1 | 2 = 3",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 1),
-			BPF_ALU32_IMM(BPF_MOV, R1, 2),
-			BPF_ALU64_REG(BPF_OR, R0, R1),
-			BPF_EXIT_INSN(),
+		"LD_IND byte frag",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x0),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
+		CLASSIC | FLAG_SKB_FRAG,
 		{ },
-		{ { 0, 3 } },
+		{ {0x40, 0x42} },
+		.frag_data = {
+			0x42, 0x00, 0x00, 0x00,
+			0x43, 0x44, 0x00, 0x00,
+			0x21, 0x07, 0x19, 0x83,
+		},
 	},
 	{
-		"ALU64_OR_X: 0 | 0xffffffff = 0xffffffff",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0),
-			BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
-			BPF_ALU64_REG(BPF_OR, R0, R1),
-			BPF_EXIT_INSN(),
+		"LD_IND halfword frag",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x4),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
+		CLASSIC | FLAG_SKB_FRAG,
 		{ },
-		{ { 0, 0xffffffff } },
+		{ {0x40, 0x4344} },
+		.frag_data = {
+			0x42, 0x00, 0x00, 0x00,
+			0x43, 0x44, 0x00, 0x00,
+			0x21, 0x07, 0x19, 0x83,
+		},
 	},
-	/* BPF_ALU | BPF_OR | BPF_K */
 	{
-		"ALU_OR_K: 1 | 2 = 3",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 1),
-			BPF_ALU32_IMM(BPF_OR, R0, 2),
-			BPF_EXIT_INSN(),
+		"LD_IND word frag",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x8),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
+		CLASSIC | FLAG_SKB_FRAG,
 		{ },
-		{ { 0, 3 } },
+		{ {0x40, 0x21071983} },
+		.frag_data = {
+			0x42, 0x00, 0x00, 0x00,
+			0x43, 0x44, 0x00, 0x00,
+			0x21, 0x07, 0x19, 0x83,
+		},
 	},
 	{
-		"ALU_OR_K: 0 & 0xffffffff = 0xffffffff",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0),
-			BPF_ALU32_IMM(BPF_OR, R0, 0xffffffff),
-			BPF_EXIT_INSN(),
+		"LD_IND halfword mixed head/frag",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x1),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0xffffffff } },
+		CLASSIC | FLAG_SKB_FRAG,
+		{ [0x3e] = 0x25, [0x3f] = 0x05, },
+		{ {0x40, 0x0519} },
+		.frag_data = { 0x19, 0x82 },
 	},
 	{
-		"ALU_OR_K: Small immediate",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0x01020304),
-			BPF_ALU32_IMM(BPF_OR, R0, 1),
-			BPF_EXIT_INSN(),
+		"LD_IND word mixed head/frag",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x2),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0x01020305 } }
+		CLASSIC | FLAG_SKB_FRAG,
+		{ [0x3e] = 0x25, [0x3f] = 0x05, },
+		{ {0x40, 0x25051982} },
+		.frag_data = { 0x19, 0x82 },
 	},
 	{
-		"ALU_OR_K: Large immediate",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0x01020304),
-			BPF_ALU32_IMM(BPF_OR, R0, 0xa0b0c0d0),
-			BPF_EXIT_INSN(),
+		"LD_ABS byte frag",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x40),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
+		CLASSIC | FLAG_SKB_FRAG,
 		{ },
-		{ { 0, 0xa1b2c3d4 } }
+		{ {0x40, 0x42} },
+		.frag_data = {
+			0x42, 0x00, 0x00, 0x00,
+			0x43, 0x44, 0x00, 0x00,
+			0x21, 0x07, 0x19, 0x83,
+		},
 	},
 	{
-		"ALU_OR_K: Zero extension",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
-			BPF_LD_IMM64(R1, 0x00000000f9fbfdffLL),
-			BPF_ALU32_IMM(BPF_OR, R0, 0xf0f0f0f0),
-			BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
-			BPF_MOV32_IMM(R0, 2),
-			BPF_EXIT_INSN(),
-			BPF_MOV32_IMM(R0, 1),
-			BPF_EXIT_INSN(),
+		"LD_ABS halfword frag",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x44),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
+		CLASSIC | FLAG_SKB_FRAG,
 		{ },
-		{ { 0, 1 } }
+		{ {0x40, 0x4344} },
+		.frag_data = {
+			0x42, 0x00, 0x00, 0x00,
+			0x43, 0x44, 0x00, 0x00,
+			0x21, 0x07, 0x19, 0x83,
+		},
 	},
 	{
-		"ALU64_OR_K: 1 | 2 = 3",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 1),
-			BPF_ALU64_IMM(BPF_OR, R0, 2),
-			BPF_EXIT_INSN(),
+		"LD_ABS word frag",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x48),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
+		CLASSIC | FLAG_SKB_FRAG,
 		{ },
-		{ { 0, 3 } },
+		{ {0x40, 0x21071983} },
+		.frag_data = {
+			0x42, 0x00, 0x00, 0x00,
+			0x43, 0x44, 0x00, 0x00,
+			0x21, 0x07, 0x19, 0x83,
+		},
 	},
 	{
-		"ALU64_OR_K: 0 & 0xffffffff = 0xffffffff",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0),
-			BPF_ALU64_IMM(BPF_OR, R0, 0xffffffff),
-			BPF_EXIT_INSN(),
+		"LD_ABS halfword mixed head/frag",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x3f),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0xffffffff } },
+		CLASSIC | FLAG_SKB_FRAG,
+		{ [0x3e] = 0x25, [0x3f] = 0x05, },
+		{ {0x40, 0x0519} },
+		.frag_data = { 0x19, 0x82 },
 	},
 	{
-		"ALU64_OR_K: 0x0000ffffffff0000 | 0x0 = 0x0000ffffffff0000",
-		.u.insns_int = {
-			BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
-			BPF_LD_IMM64(R3, 0x0000ffffffff0000LL),
-			BPF_ALU64_IMM(BPF_OR, R2, 0x0),
-			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
-			BPF_MOV32_IMM(R0, 2),
-			BPF_EXIT_INSN(),
-			BPF_MOV32_IMM(R0, 1),
-			BPF_EXIT_INSN(),
+		"LD_ABS word mixed head/frag",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x3e),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC | FLAG_SKB_FRAG,
+		{ [0x3e] = 0x25, [0x3f] = 0x05, },
+		{ {0x40, 0x25051982} },
+		.frag_data = { 0x19, 0x82 },
+	},
+	/*
+	 * LD_IND / LD_ABS on non fragmented SKBs
+	 */
+	{
+		/*
+		 * this tests that the JIT/interpreter correctly resets X
+		 * before using it in an LD_IND instruction.
+		 */
+		"LD_IND byte default X",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0x1 } },
+		CLASSIC,
+		{ [0x1] = 0x42 },
+		{ {0x40, 0x42 } },
 	},
 	{
-		"ALU64_OR_K: 0x0000ffffffff0000 | -1 = 0xffffffffffffffff",
-		.u.insns_int = {
-			BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
-			BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
-			BPF_ALU64_IMM(BPF_OR, R2, 0xffffffff),
-			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
-			BPF_MOV32_IMM(R0, 2),
-			BPF_EXIT_INSN(),
-			BPF_MOV32_IMM(R0, 1),
-			BPF_EXIT_INSN(),
+		"LD_IND byte positive offset",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0x1 } },
+		CLASSIC,
+		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+		{ {0x40, 0x82 } },
 	},
 	{
-		"ALU64_OR_K: 0x000000000000000 | -1 = 0xffffffffffffffff",
-		.u.insns_int = {
-			BPF_LD_IMM64(R2, 0x0000000000000000LL),
-			BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
-			BPF_ALU64_IMM(BPF_OR, R2, 0xffffffff),
-			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
-			BPF_MOV32_IMM(R0, 2),
-			BPF_EXIT_INSN(),
-			BPF_MOV32_IMM(R0, 1),
-			BPF_EXIT_INSN(),
+		"LD_IND byte negative offset",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_B, -0x1),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0x1 } },
+		CLASSIC,
+		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+		{ {0x40, 0x05 } },
 	},
 	{
-		"ALU64_OR_K: Sign extension 1",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
-			BPF_LD_IMM64(R1, 0x012345678fafcfefLL),
-			BPF_ALU64_IMM(BPF_OR, R0, 0x0f0f0f0f),
-			BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
-			BPF_MOV32_IMM(R0, 2),
-			BPF_EXIT_INSN(),
-			BPF_MOV32_IMM(R0, 1),
-			BPF_EXIT_INSN(),
+		"LD_IND byte positive offset, all ff",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 1 } }
+		CLASSIC,
+		{ [0x3c] = 0xff, [0x3d] = 0xff,  [0x3e] = 0xff, [0x3f] = 0xff },
+		{ {0x40, 0xff } },
 	},
 	{
-		"ALU64_OR_K: Sign extension 2",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
-			BPF_LD_IMM64(R1, 0xfffffffff9fbfdffLL),
-			BPF_ALU64_IMM(BPF_OR, R0, 0xf0f0f0f0),
-			BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
-			BPF_MOV32_IMM(R0, 2),
-			BPF_EXIT_INSN(),
-			BPF_MOV32_IMM(R0, 1),
-			BPF_EXIT_INSN(),
+		"LD_IND byte positive offset, out of bounds",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 1 } }
+		CLASSIC,
+		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+		{ {0x3f, 0 }, },
 	},
-	/* BPF_ALU | BPF_XOR | BPF_X */
 	{
-		"ALU_XOR_X: 5 ^ 6 = 3",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 5),
-			BPF_ALU32_IMM(BPF_MOV, R1, 6),
-			BPF_ALU32_REG(BPF_XOR, R0, R1),
-			BPF_EXIT_INSN(),
+		"LD_IND byte negative offset, out of bounds",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_B, -0x3f),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 3 } },
+		CLASSIC,
+		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+		{ {0x3f, 0 } },
 	},
 	{
-		"ALU_XOR_X: 0x1 ^ 0xffffffff = 0xfffffffe",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 1),
-			BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
-			BPF_ALU32_REG(BPF_XOR, R0, R1),
-			BPF_EXIT_INSN(),
+		"LD_IND byte negative offset, multiple calls",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x3b),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 1),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 2),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 3),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 4),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0xfffffffe } },
+		CLASSIC,
+		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+		{ {0x40, 0x82 }, },
 	},
 	{
-		"ALU64_XOR_X: 5 ^ 6 = 3",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 5),
-			BPF_ALU32_IMM(BPF_MOV, R1, 6),
-			BPF_ALU64_REG(BPF_XOR, R0, R1),
-			BPF_EXIT_INSN(),
+		"LD_IND halfword positive offset",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x2),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 3 } },
-	},
-	{
-		"ALU64_XOR_X: 1 ^ 0xffffffff = 0xfffffffe",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 1),
-			BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
-			BPF_ALU64_REG(BPF_XOR, R0, R1),
-			BPF_EXIT_INSN(),
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0xfffffffe } },
+		{ {0x40, 0xdd88 } },
 	},
-	/* BPF_ALU | BPF_XOR | BPF_K */
 	{
-		"ALU_XOR_K: 5 ^ 6 = 3",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 5),
-			BPF_ALU32_IMM(BPF_XOR, R0, 6),
-			BPF_EXIT_INSN(),
+		"LD_IND halfword negative offset",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x2),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 3 } },
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+		},
+		{ {0x40, 0xbb66 } },
 	},
 	{
-		"ALU_XOR_K: 1 ^ 0xffffffff = 0xfffffffe",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 1),
-			BPF_ALU32_IMM(BPF_XOR, R0, 0xffffffff),
-			BPF_EXIT_INSN(),
+		"LD_IND halfword unaligned",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x1),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0xfffffffe } },
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+		},
+		{ {0x40, 0x66cc } },
 	},
 	{
-		"ALU_XOR_K: Small immediate",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0x01020304),
-			BPF_ALU32_IMM(BPF_XOR, R0, 15),
-			BPF_EXIT_INSN(),
+		"LD_IND halfword positive offset, all ff",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x3d),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x1),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0x0102030b } }
+		CLASSIC,
+		{ [0x3c] = 0xff, [0x3d] = 0xff,  [0x3e] = 0xff, [0x3f] = 0xff },
+		{ {0x40, 0xffff } },
 	},
 	{
-		"ALU_XOR_K: Large immediate",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0xf1f2f3f4),
-			BPF_ALU32_IMM(BPF_XOR, R0, 0xafbfcfdf),
-			BPF_EXIT_INSN(),
+		"LD_IND halfword positive offset, out of bounds",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x1),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0x5e4d3c2b } }
+		CLASSIC,
+		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+		{ {0x3f, 0 }, },
 	},
 	{
-		"ALU_XOR_K: Zero extension",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
-			BPF_LD_IMM64(R1, 0x00000000795b3d1fLL),
-			BPF_ALU32_IMM(BPF_XOR, R0, 0xf0f0f0f0),
-			BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
-			BPF_MOV32_IMM(R0, 2),
-			BPF_EXIT_INSN(),
-			BPF_MOV32_IMM(R0, 1),
-			BPF_EXIT_INSN(),
+		"LD_IND halfword negative offset, out of bounds",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x3f),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 1 } }
+		CLASSIC,
+		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+		{ {0x3f, 0 } },
 	},
 	{
-		"ALU64_XOR_K: 5 ^ 6 = 3",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 5),
-			BPF_ALU64_IMM(BPF_XOR, R0, 6),
-			BPF_EXIT_INSN(),
+		"LD_IND word positive offset",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x4),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 3 } },
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+			[0x24] = 0xee, [0x25] = 0x99,
+			[0x26] = 0xff, [0x27] = 0xaa,
+		},
+		{ {0x40, 0xee99ffaa } },
 	},
 	{
-		"ALU64_XOR_K: 1 ^ 0xffffffff = 0xfffffffe",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 1),
-			BPF_ALU64_IMM(BPF_XOR, R0, 0xffffffff),
-			BPF_EXIT_INSN(),
+		"LD_IND word negative offset",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x4),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0xfffffffe } },
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+			[0x24] = 0xee, [0x25] = 0x99,
+			[0x26] = 0xff, [0x27] = 0xaa,
+		},
+		{ {0x40, 0xaa55bb66 } },
 	},
 	{
-		"ALU64_XOR_K: 0x0000ffffffff0000 ^ 0x0 = 0x0000ffffffff0000",
-		.u.insns_int = {
-			BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
-			BPF_LD_IMM64(R3, 0x0000ffffffff0000LL),
-			BPF_ALU64_IMM(BPF_XOR, R2, 0x0),
-			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
-			BPF_MOV32_IMM(R0, 2),
-			BPF_EXIT_INSN(),
-			BPF_MOV32_IMM(R0, 1),
-			BPF_EXIT_INSN(),
+		"LD_IND word unaligned (addr & 3 == 2)",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x2),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0x1 } },
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+			[0x24] = 0xee, [0x25] = 0x99,
+			[0x26] = 0xff, [0x27] = 0xaa,
+		},
+		{ {0x40, 0xbb66cc77 } },
 	},
 	{
-		"ALU64_XOR_K: 0x0000ffffffff0000 ^ -1 = 0xffff00000000ffff",
-		.u.insns_int = {
-			BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
-			BPF_LD_IMM64(R3, 0xffff00000000ffffLL),
-			BPF_ALU64_IMM(BPF_XOR, R2, 0xffffffff),
-			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
-			BPF_MOV32_IMM(R0, 2),
-			BPF_EXIT_INSN(),
-			BPF_MOV32_IMM(R0, 1),
-			BPF_EXIT_INSN(),
+		"LD_IND word unaligned (addr & 3 == 1)",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x3),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0x1 } },
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+			[0x24] = 0xee, [0x25] = 0x99,
+			[0x26] = 0xff, [0x27] = 0xaa,
+		},
+		{ {0x40, 0x55bb66cc } },
 	},
 	{
-		"ALU64_XOR_K: 0x000000000000000 ^ -1 = 0xffffffffffffffff",
-		.u.insns_int = {
-			BPF_LD_IMM64(R2, 0x0000000000000000LL),
-			BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
-			BPF_ALU64_IMM(BPF_XOR, R2, 0xffffffff),
-			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
-			BPF_MOV32_IMM(R0, 2),
-			BPF_EXIT_INSN(),
-			BPF_MOV32_IMM(R0, 1),
-			BPF_EXIT_INSN(),
+		"LD_IND word unaligned (addr & 3 == 3)",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x1),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0x1 } },
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+			[0x24] = 0xee, [0x25] = 0x99,
+			[0x26] = 0xff, [0x27] = 0xaa,
+		},
+		{ {0x40, 0x66cc77dd } },
 	},
 	{
-		"ALU64_XOR_K: Sign extension 1",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
-			BPF_LD_IMM64(R1, 0x0123456786a4c2e0LL),
-			BPF_ALU64_IMM(BPF_XOR, R0, 0x0f0f0f0f),
-			BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
-			BPF_MOV32_IMM(R0, 2),
-			BPF_EXIT_INSN(),
-			BPF_MOV32_IMM(R0, 1),
-			BPF_EXIT_INSN(),
+		"LD_IND word positive offset, all ff",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x3b),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x1),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 1 } }
+		CLASSIC,
+		{ [0x3c] = 0xff, [0x3d] = 0xff,  [0x3e] = 0xff, [0x3f] = 0xff },
+		{ {0x40, 0xffffffff } },
 	},
 	{
-		"ALU64_XOR_K: Sign extension 2",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
-			BPF_LD_IMM64(R1, 0xfedcba98795b3d1fLL),
-			BPF_ALU64_IMM(BPF_XOR, R0, 0xf0f0f0f0),
-			BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
-			BPF_MOV32_IMM(R0, 2),
-			BPF_EXIT_INSN(),
-			BPF_MOV32_IMM(R0, 1),
-			BPF_EXIT_INSN(),
+		"LD_IND word positive offset, out of bounds",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x1),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 1 } }
+		CLASSIC,
+		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+		{ {0x3f, 0 }, },
 	},
-	/* BPF_ALU | BPF_LSH | BPF_X */
 	{
-		"ALU_LSH_X: 1 << 1 = 2",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 1),
-			BPF_ALU32_IMM(BPF_MOV, R1, 1),
-			BPF_ALU32_REG(BPF_LSH, R0, R1),
-			BPF_EXIT_INSN(),
+		"LD_IND word negative offset, out of bounds",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x3f),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 2 } },
+		CLASSIC,
+		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+		{ {0x3f, 0 } },
 	},
 	{
-		"ALU_LSH_X: 1 << 31 = 0x80000000",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 1),
-			BPF_ALU32_IMM(BPF_MOV, R1, 31),
-			BPF_ALU32_REG(BPF_LSH, R0, R1),
-			BPF_EXIT_INSN(),
+		"LD_ABS byte",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x20),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0x80000000 } },
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+			[0x24] = 0xee, [0x25] = 0x99,
+			[0x26] = 0xff, [0x27] = 0xaa,
+		},
+		{ {0x40, 0xcc } },
 	},
 	{
-		"ALU_LSH_X: 0x12345678 << 12 = 0x45678000",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678),
-			BPF_ALU32_IMM(BPF_MOV, R1, 12),
-			BPF_ALU32_REG(BPF_LSH, R0, R1),
-			BPF_EXIT_INSN(),
+		"LD_ABS byte positive offset, all ff",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x3f),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0x45678000 } }
+		CLASSIC,
+		{ [0x3c] = 0xff, [0x3d] = 0xff,  [0x3e] = 0xff, [0x3f] = 0xff },
+		{ {0x40, 0xff } },
 	},
 	{
-		"ALU64_LSH_X: 1 << 1 = 2",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 1),
-			BPF_ALU32_IMM(BPF_MOV, R1, 1),
-			BPF_ALU64_REG(BPF_LSH, R0, R1),
-			BPF_EXIT_INSN(),
+		"LD_ABS byte positive offset, out of bounds",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x3f),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 2 } },
+		CLASSIC,
+		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+		{ {0x3f, 0 }, },
 	},
 	{
-		"ALU64_LSH_X: 1 << 31 = 0x80000000",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 1),
-			BPF_ALU32_IMM(BPF_MOV, R1, 31),
-			BPF_ALU64_REG(BPF_LSH, R0, R1),
-			BPF_EXIT_INSN(),
+		"LD_ABS byte negative offset, out of bounds load",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_B, -1),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC | FLAG_EXPECTED_FAIL,
+		.expected_errcode = -EINVAL,
+	},
+	{
+		"LD_ABS byte negative offset, in bounds",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3f),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0x80000000 } },
+		CLASSIC,
+		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+		{ {0x40, 0x82 }, },
 	},
 	{
-		"ALU64_LSH_X: Shift < 32, low word",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
-			BPF_ALU32_IMM(BPF_MOV, R1, 12),
-			BPF_ALU64_REG(BPF_LSH, R0, R1),
-			BPF_EXIT_INSN(),
+		"LD_ABS byte negative offset, out of bounds",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3f),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0xbcdef000 } }
+		CLASSIC,
+		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+		{ {0x3f, 0 }, },
 	},
 	{
-		"ALU64_LSH_X: Shift < 32, high word",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
-			BPF_ALU32_IMM(BPF_MOV, R1, 12),
-			BPF_ALU64_REG(BPF_LSH, R0, R1),
-			BPF_ALU64_IMM(BPF_RSH, R0, 32),
-			BPF_EXIT_INSN(),
+		"LD_ABS byte negative offset, multiple calls",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3c),
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3d),
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3e),
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3f),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0x3456789a } }
+		CLASSIC,
+		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+		{ {0x40, 0x82 }, },
 	},
 	{
-		"ALU64_LSH_X: Shift > 32, low word",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
-			BPF_ALU32_IMM(BPF_MOV, R1, 36),
-			BPF_ALU64_REG(BPF_LSH, R0, R1),
-			BPF_EXIT_INSN(),
+		"LD_ABS halfword",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x22),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0 } }
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+			[0x24] = 0xee, [0x25] = 0x99,
+			[0x26] = 0xff, [0x27] = 0xaa,
+		},
+		{ {0x40, 0xdd88 } },
 	},
 	{
-		"ALU64_LSH_X: Shift > 32, high word",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
-			BPF_ALU32_IMM(BPF_MOV, R1, 36),
-			BPF_ALU64_REG(BPF_LSH, R0, R1),
-			BPF_ALU64_IMM(BPF_RSH, R0, 32),
-			BPF_EXIT_INSN(),
+		"LD_ABS halfword unaligned",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x25),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0x9abcdef0 } }
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+			[0x24] = 0xee, [0x25] = 0x99,
+			[0x26] = 0xff, [0x27] = 0xaa,
+		},
+		{ {0x40, 0x99ff } },
 	},
 	{
-		"ALU64_LSH_X: Shift == 32, low word",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
-			BPF_ALU32_IMM(BPF_MOV, R1, 32),
-			BPF_ALU64_REG(BPF_LSH, R0, R1),
-			BPF_EXIT_INSN(),
+		"LD_ABS halfword positive offset, all ff",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x3e),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0 } }
+		CLASSIC,
+		{ [0x3c] = 0xff, [0x3d] = 0xff,  [0x3e] = 0xff, [0x3f] = 0xff },
+		{ {0x40, 0xffff } },
 	},
 	{
-		"ALU64_LSH_X: Shift == 32, high word",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
-			BPF_ALU32_IMM(BPF_MOV, R1, 32),
-			BPF_ALU64_REG(BPF_LSH, R0, R1),
-			BPF_ALU64_IMM(BPF_RSH, R0, 32),
-			BPF_EXIT_INSN(),
+		"LD_ABS halfword positive offset, out of bounds",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x3f),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0x89abcdef } }
+		CLASSIC,
+		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+		{ {0x3f, 0 }, },
 	},
 	{
-		"ALU64_LSH_X: Zero shift, low word",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
-			BPF_ALU32_IMM(BPF_MOV, R1, 0),
-			BPF_ALU64_REG(BPF_LSH, R0, R1),
-			BPF_EXIT_INSN(),
+		"LD_ABS halfword negative offset, out of bounds load",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_H, -1),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0x89abcdef } }
+		CLASSIC | FLAG_EXPECTED_FAIL,
+		.expected_errcode = -EINVAL,
 	},
 	{
-		"ALU64_LSH_X: Zero shift, high word",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
-			BPF_ALU32_IMM(BPF_MOV, R1, 0),
-			BPF_ALU64_REG(BPF_LSH, R0, R1),
-			BPF_ALU64_IMM(BPF_RSH, R0, 32),
-			BPF_EXIT_INSN(),
+		"LD_ABS halfword negative offset, in bounds",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_H, SKF_LL_OFF + 0x3e),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0x01234567 } }
+		CLASSIC,
+		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+		{ {0x40, 0x1982 }, },
 	},
-	/* BPF_ALU | BPF_LSH | BPF_K */
 	{
-		"ALU_LSH_K: 1 << 1 = 2",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 1),
-			BPF_ALU32_IMM(BPF_LSH, R0, 1),
-			BPF_EXIT_INSN(),
+		"LD_ABS halfword negative offset, out of bounds",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_H, SKF_LL_OFF + 0x3e),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 2 } },
+		CLASSIC,
+		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+		{ {0x3f, 0 }, },
 	},
 	{
-		"ALU_LSH_K: 1 << 31 = 0x80000000",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 1),
-			BPF_ALU32_IMM(BPF_LSH, R0, 31),
-			BPF_EXIT_INSN(),
+		"LD_ABS word",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x1c),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0x80000000 } },
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+			[0x24] = 0xee, [0x25] = 0x99,
+			[0x26] = 0xff, [0x27] = 0xaa,
+		},
+		{ {0x40, 0xaa55bb66 } },
 	},
 	{
-		"ALU_LSH_K: 0x12345678 << 12 = 0x45678000",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678),
-			BPF_ALU32_IMM(BPF_LSH, R0, 12),
-			BPF_EXIT_INSN(),
+		"LD_ABS word unaligned (addr & 3 == 2)",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x22),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0x45678000 } }
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+			[0x24] = 0xee, [0x25] = 0x99,
+			[0x26] = 0xff, [0x27] = 0xaa,
+		},
+		{ {0x40, 0xdd88ee99 } },
 	},
 	{
-		"ALU_LSH_K: 0x12345678 << 0 = 0x12345678",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678),
-			BPF_ALU32_IMM(BPF_LSH, R0, 0),
-			BPF_EXIT_INSN(),
+		"LD_ABS word unaligned (addr & 3 == 1)",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x21),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0x12345678 } }
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+			[0x24] = 0xee, [0x25] = 0x99,
+			[0x26] = 0xff, [0x27] = 0xaa,
+		},
+		{ {0x40, 0x77dd88ee } },
 	},
 	{
-		"ALU64_LSH_K: 1 << 1 = 2",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 1),
-			BPF_ALU64_IMM(BPF_LSH, R0, 1),
-			BPF_EXIT_INSN(),
+		"LD_ABS word unaligned (addr & 3 == 3)",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x23),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+			[0x24] = 0xee, [0x25] = 0x99,
+			[0x26] = 0xff, [0x27] = 0xaa,
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 2 } },
+		{ {0x40, 0x88ee99ff } },
 	},
 	{
-		"ALU64_LSH_K: 1 << 31 = 0x80000000",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 1),
-			BPF_ALU64_IMM(BPF_LSH, R0, 31),
-			BPF_EXIT_INSN(),
+		"LD_ABS word positive offset, all ff",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x3c),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0x80000000 } },
+		CLASSIC,
+		{ [0x3c] = 0xff, [0x3d] = 0xff,  [0x3e] = 0xff, [0x3f] = 0xff },
+		{ {0x40, 0xffffffff } },
 	},
 	{
-		"ALU64_LSH_K: Shift < 32, low word",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
-			BPF_ALU64_IMM(BPF_LSH, R0, 12),
-			BPF_EXIT_INSN(),
+		"LD_ABS word positive offset, out of bounds",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x3f),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0xbcdef000 } }
+		CLASSIC,
+		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+		{ {0x3f, 0 }, },
 	},
 	{
-		"ALU64_LSH_K: Shift < 32, high word",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
-			BPF_ALU64_IMM(BPF_LSH, R0, 12),
-			BPF_ALU64_IMM(BPF_RSH, R0, 32),
-			BPF_EXIT_INSN(),
+		"LD_ABS word negative offset, out of bounds load",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_W, -1),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0x3456789a } }
+		CLASSIC | FLAG_EXPECTED_FAIL,
+		.expected_errcode = -EINVAL,
 	},
 	{
-		"ALU64_LSH_K: Shift > 32, low word",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
-			BPF_ALU64_IMM(BPF_LSH, R0, 36),
-			BPF_EXIT_INSN(),
+		"LD_ABS word negative offset, in bounds",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_W, SKF_LL_OFF + 0x3c),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0 } }
+		CLASSIC,
+		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+		{ {0x40, 0x25051982 }, },
 	},
 	{
-		"ALU64_LSH_K: Shift > 32, high word",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
-			BPF_ALU64_IMM(BPF_LSH, R0, 36),
-			BPF_ALU64_IMM(BPF_RSH, R0, 32),
-			BPF_EXIT_INSN(),
+		"LD_ABS word negative offset, out of bounds",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_W, SKF_LL_OFF + 0x3c),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0x9abcdef0 } }
+		CLASSIC,
+		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+		{ {0x3f, 0 }, },
 	},
 	{
-		"ALU64_LSH_K: Shift == 32, low word",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
-			BPF_ALU64_IMM(BPF_LSH, R0, 32),
-			BPF_EXIT_INSN(),
+		"LDX_MSH standalone, preserved A",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
+			BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3c),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0 } }
+		CLASSIC,
+		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+		{ {0x40, 0xffeebbaa }, },
 	},
 	{
-		"ALU64_LSH_K: Shift == 32, high word",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
-			BPF_ALU64_IMM(BPF_LSH, R0, 32),
-			BPF_ALU64_IMM(BPF_RSH, R0, 32),
-			BPF_EXIT_INSN(),
+		"LDX_MSH standalone, preserved A 2",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_IMM, 0x175e9d63),
+			BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3c),
+			BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3d),
+			BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3e),
+			BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3f),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0x89abcdef } }
+		CLASSIC,
+		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+		{ {0x40, 0x175e9d63 }, },
 	},
 	{
-		"ALU64_LSH_K: Zero shift",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
-			BPF_ALU64_IMM(BPF_LSH, R0, 0),
-			BPF_EXIT_INSN(),
+		"LDX_MSH standalone, test result 1",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
+			BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3c),
+			BPF_STMT(BPF_MISC | BPF_TXA, 0),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0x89abcdef } }
+		CLASSIC,
+		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+		{ {0x40, 0x14 }, },
 	},
-	/* BPF_ALU | BPF_RSH | BPF_X */
 	{
-		"ALU_RSH_X: 2 >> 1 = 1",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 2),
-			BPF_ALU32_IMM(BPF_MOV, R1, 1),
-			BPF_ALU32_REG(BPF_RSH, R0, R1),
-			BPF_EXIT_INSN(),
+		"LDX_MSH standalone, test result 2",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
+			BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3e),
+			BPF_STMT(BPF_MISC | BPF_TXA, 0),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 1 } },
+		CLASSIC,
+		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+		{ {0x40, 0x24 }, },
 	},
 	{
-		"ALU_RSH_X: 0x80000000 >> 31 = 1",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x80000000),
-			BPF_ALU32_IMM(BPF_MOV, R1, 31),
-			BPF_ALU32_REG(BPF_RSH, R0, R1),
-			BPF_EXIT_INSN(),
+		"LDX_MSH standalone, negative offset",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
+			BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, -1),
+			BPF_STMT(BPF_MISC | BPF_TXA, 0),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 1 } },
+		CLASSIC,
+		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+		{ {0x40, 0 }, },
 	},
 	{
-		"ALU_RSH_X: 0x12345678 >> 20 = 0x123",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678),
-			BPF_ALU32_IMM(BPF_MOV, R1, 20),
-			BPF_ALU32_REG(BPF_RSH, R0, R1),
-			BPF_EXIT_INSN(),
+		"LDX_MSH standalone, negative offset 2",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
+			BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, SKF_LL_OFF + 0x3e),
+			BPF_STMT(BPF_MISC | BPF_TXA, 0),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0x123 } }
+		CLASSIC,
+		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+		{ {0x40, 0x24 }, },
 	},
 	{
-		"ALU64_RSH_X: 2 >> 1 = 1",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 2),
-			BPF_ALU32_IMM(BPF_MOV, R1, 1),
-			BPF_ALU64_REG(BPF_RSH, R0, R1),
-			BPF_EXIT_INSN(),
+		"LDX_MSH standalone, out of bounds",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
+			BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x40),
+			BPF_STMT(BPF_MISC | BPF_TXA, 0),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 1 } },
+		CLASSIC,
+		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+		{ {0x40, 0 }, },
 	},
+	/*
+	 * verify that the interpreter or JIT correctly sets A and X
+	 * to 0.
+	 */
 	{
-		"ALU64_RSH_X: 0x80000000 >> 31 = 1",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x80000000),
-			BPF_ALU32_IMM(BPF_MOV, R1, 31),
-			BPF_ALU64_REG(BPF_RSH, R0, R1),
-			BPF_EXIT_INSN(),
+		"ADD default X",
+		.u.insns = {
+			/*
+			 * A = 0x42
+			 * A = A + X
+			 * ret A
+			 */
+			BPF_STMT(BPF_LD | BPF_IMM, 0x42),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 1 } },
+		CLASSIC | FLAG_NO_DATA,
+		{},
+		{ {0x1, 0x42 } },
 	},
 	{
-		"ALU64_RSH_X: Shift < 32, low word",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
-			BPF_ALU32_IMM(BPF_MOV, R1, 12),
-			BPF_ALU64_REG(BPF_RSH, R0, R1),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
-		{ },
-		{ { 0, 0x56789abc } }
+		"ADD default A",
+		.u.insns = {
+			/*
+			 * A = A + 0x42
+			 * ret A
+			 */
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 0x42),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC | FLAG_NO_DATA,
+		{},
+		{ {0x1, 0x42 } },
 	},
 	{
-		"ALU64_RSH_X: Shift < 32, high word",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
-			BPF_ALU32_IMM(BPF_MOV, R1, 12),
-			BPF_ALU64_REG(BPF_RSH, R0, R1),
-			BPF_ALU64_IMM(BPF_RSH, R0, 32),
-			BPF_EXIT_INSN(),
+		"SUB default X",
+		.u.insns = {
+			/*
+			 * A = 0x66
+			 * A = A - X
+			 * ret A
+			 */
+			BPF_STMT(BPF_LD | BPF_IMM, 0x66),
+			BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0x00081234 } }
+		CLASSIC | FLAG_NO_DATA,
+		{},
+		{ {0x1, 0x66 } },
 	},
 	{
-		"ALU64_RSH_X: Shift > 32, low word",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
-			BPF_ALU32_IMM(BPF_MOV, R1, 36),
-			BPF_ALU64_REG(BPF_RSH, R0, R1),
-			BPF_EXIT_INSN(),
+		"SUB default A",
+		.u.insns = {
+			/*
+			 * A = A - -0x66
+			 * ret A
+			 */
+			BPF_STMT(BPF_ALU | BPF_SUB | BPF_K, -0x66),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0x08123456 } }
+		CLASSIC | FLAG_NO_DATA,
+		{},
+		{ {0x1, 0x66 } },
 	},
 	{
-		"ALU64_RSH_X: Shift > 32, high word",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
-			BPF_ALU32_IMM(BPF_MOV, R1, 36),
-			BPF_ALU64_REG(BPF_RSH, R0, R1),
-			BPF_ALU64_IMM(BPF_RSH, R0, 32),
-			BPF_EXIT_INSN(),
+		"MUL default X",
+		.u.insns = {
+			/*
+			 * A = 0x42
+			 * A = A * X
+			 * ret A
+			 */
+			BPF_STMT(BPF_LD | BPF_IMM, 0x42),
+			BPF_STMT(BPF_ALU | BPF_MUL | BPF_X, 0),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0 } }
+		CLASSIC | FLAG_NO_DATA,
+		{},
+		{ {0x1, 0x0 } },
 	},
 	{
-		"ALU64_RSH_X: Shift == 32, low word",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
-			BPF_ALU32_IMM(BPF_MOV, R1, 32),
-			BPF_ALU64_REG(BPF_RSH, R0, R1),
-			BPF_EXIT_INSN(),
+		"MUL default A",
+		.u.insns = {
+			/*
+			 * A = A * 0x66
+			 * ret A
+			 */
+			BPF_STMT(BPF_ALU | BPF_MUL | BPF_K, 0x66),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0x81234567 } }
+		CLASSIC | FLAG_NO_DATA,
+		{},
+		{ {0x1, 0x0 } },
 	},
 	{
-		"ALU64_RSH_X: Shift == 32, high word",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
-			BPF_ALU32_IMM(BPF_MOV, R1, 32),
-			BPF_ALU64_REG(BPF_RSH, R0, R1),
-			BPF_ALU64_IMM(BPF_RSH, R0, 32),
-			BPF_EXIT_INSN(),
+		"DIV default X",
+		.u.insns = {
+			/*
+			 * A = 0x42
+			 * A = A / X ; this halt the filter execution if X is 0
+			 * ret 0x42
+			 */
+			BPF_STMT(BPF_LD | BPF_IMM, 0x42),
+			BPF_STMT(BPF_ALU | BPF_DIV | BPF_X, 0),
+			BPF_STMT(BPF_RET | BPF_K, 0x42),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0 } }
+		CLASSIC | FLAG_NO_DATA,
+		{},
+		{ {0x1, 0x0 } },
 	},
 	{
-		"ALU64_RSH_X: Zero shift, low word",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
-			BPF_ALU32_IMM(BPF_MOV, R1, 0),
-			BPF_ALU64_REG(BPF_RSH, R0, R1),
-			BPF_EXIT_INSN(),
+		"DIV default A",
+		.u.insns = {
+			/*
+			 * A = A / 1
+			 * ret A
+			 */
+			BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0x1),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0x89abcdef } }
+		CLASSIC | FLAG_NO_DATA,
+		{},
+		{ {0x1, 0x0 } },
 	},
 	{
-		"ALU64_RSH_X: Zero shift, high word",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
-			BPF_ALU32_IMM(BPF_MOV, R1, 0),
-			BPF_ALU64_REG(BPF_RSH, R0, R1),
-			BPF_ALU64_IMM(BPF_RSH, R0, 32),
-			BPF_EXIT_INSN(),
+		"MOD default X",
+		.u.insns = {
+			/*
+			 * A = 0x42
+			 * A = A mod X ; this halt the filter execution if X is 0
+			 * ret 0x42
+			 */
+			BPF_STMT(BPF_LD | BPF_IMM, 0x42),
+			BPF_STMT(BPF_ALU | BPF_MOD | BPF_X, 0),
+			BPF_STMT(BPF_RET | BPF_K, 0x42),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0x81234567 } }
+		CLASSIC | FLAG_NO_DATA,
+		{},
+		{ {0x1, 0x0 } },
 	},
-	/* BPF_ALU | BPF_RSH | BPF_K */
 	{
-		"ALU_RSH_K: 2 >> 1 = 1",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 2),
-			BPF_ALU32_IMM(BPF_RSH, R0, 1),
-			BPF_EXIT_INSN(),
+		"MOD default A",
+		.u.insns = {
+			/*
+			 * A = A mod 1
+			 * ret A
+			 */
+			BPF_STMT(BPF_ALU | BPF_MOD | BPF_K, 0x1),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 1 } },
+		CLASSIC | FLAG_NO_DATA,
+		{},
+		{ {0x1, 0x0 } },
 	},
 	{
-		"ALU_RSH_K: 0x80000000 >> 31 = 1",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x80000000),
-			BPF_ALU32_IMM(BPF_RSH, R0, 31),
-			BPF_EXIT_INSN(),
+		"JMP EQ default A",
+		.u.insns = {
+			/*
+			 * cmp A, 0x0, 0, 1
+			 * ret 0x42
+			 * ret 0x66
+			 */
+			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x0, 0, 1),
+			BPF_STMT(BPF_RET | BPF_K, 0x42),
+			BPF_STMT(BPF_RET | BPF_K, 0x66),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 1 } },
+		CLASSIC | FLAG_NO_DATA,
+		{},
+		{ {0x1, 0x42 } },
 	},
 	{
-		"ALU_RSH_K: 0x12345678 >> 20 = 0x123",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678),
-			BPF_ALU32_IMM(BPF_RSH, R0, 20),
-			BPF_EXIT_INSN(),
+		"JMP EQ default X",
+		.u.insns = {
+			/*
+			 * A = 0x0
+			 * cmp A, X, 0, 1
+			 * ret 0x42
+			 * ret 0x66
+			 */
+			BPF_STMT(BPF_LD | BPF_IMM, 0x0),
+			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0x0, 0, 1),
+			BPF_STMT(BPF_RET | BPF_K, 0x42),
+			BPF_STMT(BPF_RET | BPF_K, 0x66),
 		},
-		INTERNAL,
-		{ },
-		{ { 0, 0x123 } }
+		CLASSIC | FLAG_NO_DATA,
+		{},
+		{ {0x1, 0x42 } },
 	},
+	/* Checking interpreter vs JIT wrt signed extended imms. */
 	{
-		"ALU_RSH_K: 0x12345678 >> 0 = 0x12345678",
+		"JNE signed compare, test 1",
 		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678),
-			BPF_ALU32_IMM(BPF_RSH, R0, 0),
+			BPF_ALU32_IMM(BPF_MOV, R1, 0xfefbbc12),
+			BPF_ALU32_IMM(BPF_MOV, R3, 0xffff0000),
+			BPF_MOV64_REG(R2, R1),
+			BPF_ALU64_REG(BPF_AND, R2, R3),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_JMP_IMM(BPF_JNE, R2, -17104896, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 2),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0x12345678 } }
+		{ { 0, 1 } },
 	},
 	{
-		"ALU64_RSH_K: 2 >> 1 = 1",
+		"JNE signed compare, test 2",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 2),
-			BPF_ALU64_IMM(BPF_RSH, R0, 1),
+			BPF_ALU32_IMM(BPF_MOV, R1, 0xfefbbc12),
+			BPF_ALU32_IMM(BPF_MOV, R3, 0xffff0000),
+			BPF_MOV64_REG(R2, R1),
+			BPF_ALU64_REG(BPF_AND, R2, R3),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_JMP_IMM(BPF_JNE, R2, 0xfefb0000, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 2),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
@@ -4844,3574 +11063,2878 @@ static struct bpf_test tests[] = {
 		{ { 0, 1 } },
 	},
 	{
-		"ALU64_RSH_K: 0x80000000 >> 31 = 1",
+		"JNE signed compare, test 3",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x80000000),
-			BPF_ALU64_IMM(BPF_RSH, R0, 31),
+			BPF_ALU32_IMM(BPF_MOV, R1, 0xfefbbc12),
+			BPF_ALU32_IMM(BPF_MOV, R3, 0xffff0000),
+			BPF_ALU32_IMM(BPF_MOV, R4, 0xfefb0000),
+			BPF_MOV64_REG(R2, R1),
+			BPF_ALU64_REG(BPF_AND, R2, R3),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_JMP_REG(BPF_JNE, R2, R4, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 2),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 1 } },
+		{ { 0, 2 } },
 	},
 	{
-		"ALU64_RSH_K: Shift < 32, low word",
+		"JNE signed compare, test 4",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
-			BPF_ALU64_IMM(BPF_RSH, R0, 12),
+			BPF_LD_IMM64(R1, -17104896),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_JMP_IMM(BPF_JNE, R1, -17104896, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 2),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0x56789abc } }
+		{ { 0, 2 } },
 	},
 	{
-		"ALU64_RSH_K: Shift < 32, high word",
+		"JNE signed compare, test 5",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
-			BPF_ALU64_IMM(BPF_RSH, R0, 12),
-			BPF_ALU64_IMM(BPF_RSH, R0, 32),
+			BPF_LD_IMM64(R1, 0xfefb0000),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_JMP_IMM(BPF_JNE, R1, 0xfefb0000, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 2),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0x00081234 } }
+		{ { 0, 1 } },
 	},
 	{
-		"ALU64_RSH_K: Shift > 32, low word",
+		"JNE signed compare, test 6",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
-			BPF_ALU64_IMM(BPF_RSH, R0, 36),
+			BPF_LD_IMM64(R1, 0x7efb0000),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_JMP_IMM(BPF_JNE, R1, 0x7efb0000, 1),
+			BPF_ALU32_IMM(BPF_MOV, R0, 2),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0x08123456 } }
+		{ { 0, 2 } },
 	},
 	{
-		"ALU64_RSH_K: Shift > 32, high word",
+		"JNE signed compare, test 7",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_IMM, 0xffff0000),
+			BPF_STMT(BPF_MISC | BPF_TAX, 0),
+			BPF_STMT(BPF_LD | BPF_IMM, 0xfefbbc12),
+			BPF_STMT(BPF_ALU | BPF_AND | BPF_X, 0),
+			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0xfefb0000, 1, 0),
+			BPF_STMT(BPF_RET | BPF_K, 1),
+			BPF_STMT(BPF_RET | BPF_K, 2),
+		},
+		CLASSIC | FLAG_NO_DATA,
+		{},
+		{ { 0, 2 } },
+	},
+	/* BPF_LDX_MEM with operand aliasing */
+	{
+		"LDX_MEM_B: operand register aliasing",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
-			BPF_ALU64_IMM(BPF_RSH, R0, 36),
-			BPF_ALU64_IMM(BPF_RSH, R0, 32),
+			BPF_ST_MEM(BPF_B, R10, -8, 123),
+			BPF_MOV64_REG(R0, R10),
+			BPF_LDX_MEM(BPF_B, R0, R0, -8),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0 } }
+		{ { 0, 123 } },
+		.stack_depth = 8,
 	},
 	{
-		"ALU64_RSH_K: Shift == 32, low word",
+		"LDX_MEM_H: operand register aliasing",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
-			BPF_ALU64_IMM(BPF_RSH, R0, 32),
+			BPF_ST_MEM(BPF_H, R10, -8, 12345),
+			BPF_MOV64_REG(R0, R10),
+			BPF_LDX_MEM(BPF_H, R0, R0, -8),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0x81234567 } }
+		{ { 0, 12345 } },
+		.stack_depth = 8,
 	},
 	{
-		"ALU64_RSH_K: Shift == 32, high word",
+		"LDX_MEM_W: operand register aliasing",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
-			BPF_ALU64_IMM(BPF_RSH, R0, 32),
-			BPF_ALU64_IMM(BPF_RSH, R0, 32),
+			BPF_ST_MEM(BPF_W, R10, -8, 123456789),
+			BPF_MOV64_REG(R0, R10),
+			BPF_LDX_MEM(BPF_W, R0, R0, -8),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0 } }
+		{ { 0, 123456789 } },
+		.stack_depth = 8,
 	},
 	{
-		"ALU64_RSH_K: Zero shift",
+		"LDX_MEM_DW: operand register aliasing",
 		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
-			BPF_ALU64_IMM(BPF_RSH, R0, 0),
+			BPF_LD_IMM64(R1, 0x123456789abcdefULL),
+			BPF_STX_MEM(BPF_DW, R10, R1, -8),
+			BPF_MOV64_REG(R0, R10),
+			BPF_LDX_MEM(BPF_DW, R0, R0, -8),
+			BPF_ALU64_REG(BPF_SUB, R0, R1),
+			BPF_MOV64_REG(R1, R0),
+			BPF_ALU64_IMM(BPF_RSH, R1, 32),
+			BPF_ALU64_REG(BPF_OR, R0, R1),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, 0x89abcdef } }
+		{ { 0, 0 } },
+		.stack_depth = 8,
 	},
-	/* BPF_ALU | BPF_ARSH | BPF_X */
+	/*
+	 * Register (non-)clobbering tests for the case where a JIT implements
+	 * complex ALU or ATOMIC operations via function calls. If so, the
+	 * function call must be transparent to the eBPF registers. The JIT
+	 * must therefore save and restore relevant registers across the call.
+	 * The following tests check that the eBPF registers retain their
+	 * values after such an operation. Mainly intended for complex ALU
+	 * and atomic operation, but we run it for all. You never know...
+	 *
+	 * Note that each operations should be tested twice with different
+	 * destinations, to check preservation for all registers.
+	 */
+#define BPF_TEST_CLOBBER_ALU(alu, op, dst, src)			\
+	{							\
+		#alu "_" #op " to " #dst ": no clobbering",	\
+		.u.insns_int = {				\
+			BPF_ALU64_IMM(BPF_MOV, R0, R0),		\
+			BPF_ALU64_IMM(BPF_MOV, R1, R1),		\
+			BPF_ALU64_IMM(BPF_MOV, R2, R2),		\
+			BPF_ALU64_IMM(BPF_MOV, R3, R3),		\
+			BPF_ALU64_IMM(BPF_MOV, R4, R4),		\
+			BPF_ALU64_IMM(BPF_MOV, R5, R5),		\
+			BPF_ALU64_IMM(BPF_MOV, R6, R6),		\
+			BPF_ALU64_IMM(BPF_MOV, R7, R7),		\
+			BPF_ALU64_IMM(BPF_MOV, R8, R8),		\
+			BPF_ALU64_IMM(BPF_MOV, R9, R9),		\
+			BPF_##alu(BPF_ ##op, dst, src),		\
+			BPF_ALU32_IMM(BPF_MOV, dst, dst),	\
+			BPF_JMP_IMM(BPF_JNE, R0, R0, 10),	\
+			BPF_JMP_IMM(BPF_JNE, R1, R1, 9),	\
+			BPF_JMP_IMM(BPF_JNE, R2, R2, 8),	\
+			BPF_JMP_IMM(BPF_JNE, R3, R3, 7),	\
+			BPF_JMP_IMM(BPF_JNE, R4, R4, 6),	\
+			BPF_JMP_IMM(BPF_JNE, R5, R5, 5),	\
+			BPF_JMP_IMM(BPF_JNE, R6, R6, 4),	\
+			BPF_JMP_IMM(BPF_JNE, R7, R7, 3),	\
+			BPF_JMP_IMM(BPF_JNE, R8, R8, 2),	\
+			BPF_JMP_IMM(BPF_JNE, R9, R9, 1),	\
+			BPF_ALU64_IMM(BPF_MOV, R0, 1),		\
+			BPF_EXIT_INSN(),			\
+		},						\
+		INTERNAL,					\
+		{ },						\
+		{ { 0, 1 } }					\
+	}
+	/* ALU64 operations, register clobbering */
+	BPF_TEST_CLOBBER_ALU(ALU64_IMM, AND, R8, 123456789),
+	BPF_TEST_CLOBBER_ALU(ALU64_IMM, AND, R9, 123456789),
+	BPF_TEST_CLOBBER_ALU(ALU64_IMM, OR, R8, 123456789),
+	BPF_TEST_CLOBBER_ALU(ALU64_IMM, OR, R9, 123456789),
+	BPF_TEST_CLOBBER_ALU(ALU64_IMM, XOR, R8, 123456789),
+	BPF_TEST_CLOBBER_ALU(ALU64_IMM, XOR, R9, 123456789),
+	BPF_TEST_CLOBBER_ALU(ALU64_IMM, LSH, R8, 12),
+	BPF_TEST_CLOBBER_ALU(ALU64_IMM, LSH, R9, 12),
+	BPF_TEST_CLOBBER_ALU(ALU64_IMM, RSH, R8, 12),
+	BPF_TEST_CLOBBER_ALU(ALU64_IMM, RSH, R9, 12),
+	BPF_TEST_CLOBBER_ALU(ALU64_IMM, ARSH, R8, 12),
+	BPF_TEST_CLOBBER_ALU(ALU64_IMM, ARSH, R9, 12),
+	BPF_TEST_CLOBBER_ALU(ALU64_IMM, ADD, R8, 123456789),
+	BPF_TEST_CLOBBER_ALU(ALU64_IMM, ADD, R9, 123456789),
+	BPF_TEST_CLOBBER_ALU(ALU64_IMM, SUB, R8, 123456789),
+	BPF_TEST_CLOBBER_ALU(ALU64_IMM, SUB, R9, 123456789),
+	BPF_TEST_CLOBBER_ALU(ALU64_IMM, MUL, R8, 123456789),
+	BPF_TEST_CLOBBER_ALU(ALU64_IMM, MUL, R9, 123456789),
+	BPF_TEST_CLOBBER_ALU(ALU64_IMM, DIV, R8, 123456789),
+	BPF_TEST_CLOBBER_ALU(ALU64_IMM, DIV, R9, 123456789),
+	BPF_TEST_CLOBBER_ALU(ALU64_IMM, MOD, R8, 123456789),
+	BPF_TEST_CLOBBER_ALU(ALU64_IMM, MOD, R9, 123456789),
+	/* ALU32 immediate operations, register clobbering */
+	BPF_TEST_CLOBBER_ALU(ALU32_IMM, AND, R8, 123456789),
+	BPF_TEST_CLOBBER_ALU(ALU32_IMM, AND, R9, 123456789),
+	BPF_TEST_CLOBBER_ALU(ALU32_IMM, OR, R8, 123456789),
+	BPF_TEST_CLOBBER_ALU(ALU32_IMM, OR, R9, 123456789),
+	BPF_TEST_CLOBBER_ALU(ALU32_IMM, XOR, R8, 123456789),
+	BPF_TEST_CLOBBER_ALU(ALU32_IMM, XOR, R9, 123456789),
+	BPF_TEST_CLOBBER_ALU(ALU32_IMM, LSH, R8, 12),
+	BPF_TEST_CLOBBER_ALU(ALU32_IMM, LSH, R9, 12),
+	BPF_TEST_CLOBBER_ALU(ALU32_IMM, RSH, R8, 12),
+	BPF_TEST_CLOBBER_ALU(ALU32_IMM, RSH, R9, 12),
+	BPF_TEST_CLOBBER_ALU(ALU32_IMM, ARSH, R8, 12),
+	BPF_TEST_CLOBBER_ALU(ALU32_IMM, ARSH, R9, 12),
+	BPF_TEST_CLOBBER_ALU(ALU32_IMM, ADD, R8, 123456789),
+	BPF_TEST_CLOBBER_ALU(ALU32_IMM, ADD, R9, 123456789),
+	BPF_TEST_CLOBBER_ALU(ALU32_IMM, SUB, R8, 123456789),
+	BPF_TEST_CLOBBER_ALU(ALU32_IMM, SUB, R9, 123456789),
+	BPF_TEST_CLOBBER_ALU(ALU32_IMM, MUL, R8, 123456789),
+	BPF_TEST_CLOBBER_ALU(ALU32_IMM, MUL, R9, 123456789),
+	BPF_TEST_CLOBBER_ALU(ALU32_IMM, DIV, R8, 123456789),
+	BPF_TEST_CLOBBER_ALU(ALU32_IMM, DIV, R9, 123456789),
+	BPF_TEST_CLOBBER_ALU(ALU32_IMM, MOD, R8, 123456789),
+	BPF_TEST_CLOBBER_ALU(ALU32_IMM, MOD, R9, 123456789),
+	/* ALU64 register operations, register clobbering */
+	BPF_TEST_CLOBBER_ALU(ALU64_REG, AND, R8, R1),
+	BPF_TEST_CLOBBER_ALU(ALU64_REG, AND, R9, R1),
+	BPF_TEST_CLOBBER_ALU(ALU64_REG, OR, R8, R1),
+	BPF_TEST_CLOBBER_ALU(ALU64_REG, OR, R9, R1),
+	BPF_TEST_CLOBBER_ALU(ALU64_REG, XOR, R8, R1),
+	BPF_TEST_CLOBBER_ALU(ALU64_REG, XOR, R9, R1),
+	BPF_TEST_CLOBBER_ALU(ALU64_REG, LSH, R8, R1),
+	BPF_TEST_CLOBBER_ALU(ALU64_REG, LSH, R9, R1),
+	BPF_TEST_CLOBBER_ALU(ALU64_REG, RSH, R8, R1),
+	BPF_TEST_CLOBBER_ALU(ALU64_REG, RSH, R9, R1),
+	BPF_TEST_CLOBBER_ALU(ALU64_REG, ARSH, R8, R1),
+	BPF_TEST_CLOBBER_ALU(ALU64_REG, ARSH, R9, R1),
+	BPF_TEST_CLOBBER_ALU(ALU64_REG, ADD, R8, R1),
+	BPF_TEST_CLOBBER_ALU(ALU64_REG, ADD, R9, R1),
+	BPF_TEST_CLOBBER_ALU(ALU64_REG, SUB, R8, R1),
+	BPF_TEST_CLOBBER_ALU(ALU64_REG, SUB, R9, R1),
+	BPF_TEST_CLOBBER_ALU(ALU64_REG, MUL, R8, R1),
+	BPF_TEST_CLOBBER_ALU(ALU64_REG, MUL, R9, R1),
+	BPF_TEST_CLOBBER_ALU(ALU64_REG, DIV, R8, R1),
+	BPF_TEST_CLOBBER_ALU(ALU64_REG, DIV, R9, R1),
+	BPF_TEST_CLOBBER_ALU(ALU64_REG, MOD, R8, R1),
+	BPF_TEST_CLOBBER_ALU(ALU64_REG, MOD, R9, R1),
+	/* ALU32 register operations, register clobbering */
+	BPF_TEST_CLOBBER_ALU(ALU32_REG, AND, R8, R1),
+	BPF_TEST_CLOBBER_ALU(ALU32_REG, AND, R9, R1),
+	BPF_TEST_CLOBBER_ALU(ALU32_REG, OR, R8, R1),
+	BPF_TEST_CLOBBER_ALU(ALU32_REG, OR, R9, R1),
+	BPF_TEST_CLOBBER_ALU(ALU32_REG, XOR, R8, R1),
+	BPF_TEST_CLOBBER_ALU(ALU32_REG, XOR, R9, R1),
+	BPF_TEST_CLOBBER_ALU(ALU32_REG, LSH, R8, R1),
+	BPF_TEST_CLOBBER_ALU(ALU32_REG, LSH, R9, R1),
+	BPF_TEST_CLOBBER_ALU(ALU32_REG, RSH, R8, R1),
+	BPF_TEST_CLOBBER_ALU(ALU32_REG, RSH, R9, R1),
+	BPF_TEST_CLOBBER_ALU(ALU32_REG, ARSH, R8, R1),
+	BPF_TEST_CLOBBER_ALU(ALU32_REG, ARSH, R9, R1),
+	BPF_TEST_CLOBBER_ALU(ALU32_REG, ADD, R8, R1),
+	BPF_TEST_CLOBBER_ALU(ALU32_REG, ADD, R9, R1),
+	BPF_TEST_CLOBBER_ALU(ALU32_REG, SUB, R8, R1),
+	BPF_TEST_CLOBBER_ALU(ALU32_REG, SUB, R9, R1),
+	BPF_TEST_CLOBBER_ALU(ALU32_REG, MUL, R8, R1),
+	BPF_TEST_CLOBBER_ALU(ALU32_REG, MUL, R9, R1),
+	BPF_TEST_CLOBBER_ALU(ALU32_REG, DIV, R8, R1),
+	BPF_TEST_CLOBBER_ALU(ALU32_REG, DIV, R9, R1),
+	BPF_TEST_CLOBBER_ALU(ALU32_REG, MOD, R8, R1),
+	BPF_TEST_CLOBBER_ALU(ALU32_REG, MOD, R9, R1),
+#undef BPF_TEST_CLOBBER_ALU
+#define BPF_TEST_CLOBBER_ATOMIC(width, op)			\
+	{							\
+		"Atomic_" #width " " #op ": no clobbering",	\
+		.u.insns_int = {				\
+			BPF_ALU64_IMM(BPF_MOV, R0, 0),		\
+			BPF_ALU64_IMM(BPF_MOV, R1, 1),		\
+			BPF_ALU64_IMM(BPF_MOV, R2, 2),		\
+			BPF_ALU64_IMM(BPF_MOV, R3, 3),		\
+			BPF_ALU64_IMM(BPF_MOV, R4, 4),		\
+			BPF_ALU64_IMM(BPF_MOV, R5, 5),		\
+			BPF_ALU64_IMM(BPF_MOV, R6, 6),		\
+			BPF_ALU64_IMM(BPF_MOV, R7, 7),		\
+			BPF_ALU64_IMM(BPF_MOV, R8, 8),		\
+			BPF_ALU64_IMM(BPF_MOV, R9, 9),		\
+			BPF_ST_MEM(width, R10, -8,		\
+				   (op) == BPF_CMPXCHG ? 0 :	\
+				   (op) & BPF_FETCH ? 1 : 0),	\
+			BPF_ATOMIC_OP(width, op, R10, R1, -8),	\
+			BPF_JMP_IMM(BPF_JNE, R0, 0, 10),	\
+			BPF_JMP_IMM(BPF_JNE, R1, 1, 9),		\
+			BPF_JMP_IMM(BPF_JNE, R2, 2, 8),		\
+			BPF_JMP_IMM(BPF_JNE, R3, 3, 7),		\
+			BPF_JMP_IMM(BPF_JNE, R4, 4, 6),		\
+			BPF_JMP_IMM(BPF_JNE, R5, 5, 5),		\
+			BPF_JMP_IMM(BPF_JNE, R6, 6, 4),		\
+			BPF_JMP_IMM(BPF_JNE, R7, 7, 3),		\
+			BPF_JMP_IMM(BPF_JNE, R8, 8, 2),		\
+			BPF_JMP_IMM(BPF_JNE, R9, 9, 1),		\
+			BPF_ALU64_IMM(BPF_MOV, R0, 1),		\
+			BPF_EXIT_INSN(),			\
+		},						\
+		INTERNAL,					\
+		{ },						\
+		{ { 0, 1 } },					\
+		.stack_depth = 8,				\
+	}
+	/* 64-bit atomic operations, register clobbering */
+	BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_ADD),
+	BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_AND),
+	BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_OR),
+	BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_XOR),
+	BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_ADD | BPF_FETCH),
+	BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_AND | BPF_FETCH),
+	BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_OR | BPF_FETCH),
+	BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_XOR | BPF_FETCH),
+	BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_XCHG),
+	BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_CMPXCHG),
+	/* 32-bit atomic operations, register clobbering */
+	BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_ADD),
+	BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_AND),
+	BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_OR),
+	BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_XOR),
+	BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_ADD | BPF_FETCH),
+	BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_AND | BPF_FETCH),
+	BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_OR | BPF_FETCH),
+	BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_XOR | BPF_FETCH),
+	BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_XCHG),
+	BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_CMPXCHG),
+#undef BPF_TEST_CLOBBER_ATOMIC
+	/* Checking that ALU32 src is not zero extended in place */
+#define BPF_ALU32_SRC_ZEXT(op)					\
+	{							\
+		"ALU32_" #op "_X: src preserved in zext",	\
+		.u.insns_int = {				\
+			BPF_LD_IMM64(R1, 0x0123456789acbdefULL),\
+			BPF_LD_IMM64(R2, 0xfedcba9876543210ULL),\
+			BPF_ALU64_REG(BPF_MOV, R0, R1),		\
+			BPF_ALU32_REG(BPF_##op, R2, R1),	\
+			BPF_ALU64_REG(BPF_SUB, R0, R1),		\
+			BPF_ALU64_REG(BPF_MOV, R1, R0),		\
+			BPF_ALU64_IMM(BPF_RSH, R1, 32),		\
+			BPF_ALU64_REG(BPF_OR, R0, R1),		\
+			BPF_EXIT_INSN(),			\
+		},						\
+		INTERNAL,					\
+		{ },						\
+		{ { 0, 0 } },					\
+	}
+	BPF_ALU32_SRC_ZEXT(MOV),
+	BPF_ALU32_SRC_ZEXT(AND),
+	BPF_ALU32_SRC_ZEXT(OR),
+	BPF_ALU32_SRC_ZEXT(XOR),
+	BPF_ALU32_SRC_ZEXT(ADD),
+	BPF_ALU32_SRC_ZEXT(SUB),
+	BPF_ALU32_SRC_ZEXT(MUL),
+	BPF_ALU32_SRC_ZEXT(DIV),
+	BPF_ALU32_SRC_ZEXT(MOD),
+#undef BPF_ALU32_SRC_ZEXT
+	/* Checking that ATOMIC32 src is not zero extended in place */
+#define BPF_ATOMIC32_SRC_ZEXT(op)					\
+	{								\
+		"ATOMIC_W_" #op ": src preserved in zext",		\
+		.u.insns_int = {					\
+			BPF_LD_IMM64(R0, 0x0123456789acbdefULL),	\
+			BPF_ALU64_REG(BPF_MOV, R1, R0),			\
+			BPF_ST_MEM(BPF_W, R10, -4, 0),			\
+			BPF_ATOMIC_OP(BPF_W, BPF_##op, R10, R1, -4),	\
+			BPF_ALU64_REG(BPF_SUB, R0, R1),			\
+			BPF_ALU64_REG(BPF_MOV, R1, R0),			\
+			BPF_ALU64_IMM(BPF_RSH, R1, 32),			\
+			BPF_ALU64_REG(BPF_OR, R0, R1),			\
+			BPF_EXIT_INSN(),				\
+		},							\
+		INTERNAL,						\
+		{ },							\
+		{ { 0, 0 } },						\
+		.stack_depth = 8,					\
+	}
+	BPF_ATOMIC32_SRC_ZEXT(ADD),
+	BPF_ATOMIC32_SRC_ZEXT(AND),
+	BPF_ATOMIC32_SRC_ZEXT(OR),
+	BPF_ATOMIC32_SRC_ZEXT(XOR),
+#undef BPF_ATOMIC32_SRC_ZEXT
+	/* Checking that CMPXCHG32 src is not zero extended in place */
 	{
-		"ALU32_ARSH_X: -1234 >> 7 = -10",
+		"ATOMIC_W_CMPXCHG: src preserved in zext",
 		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, -1234),
-			BPF_ALU32_IMM(BPF_MOV, R1, 7),
-			BPF_ALU32_REG(BPF_ARSH, R0, R1),
+			BPF_LD_IMM64(R1, 0x0123456789acbdefULL),
+			BPF_ALU64_REG(BPF_MOV, R2, R1),
+			BPF_ALU64_REG(BPF_MOV, R0, 0),
+			BPF_ST_MEM(BPF_W, R10, -4, 0),
+			BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R1, -4),
+			BPF_ALU64_REG(BPF_SUB, R1, R2),
+			BPF_ALU64_REG(BPF_MOV, R2, R1),
+			BPF_ALU64_IMM(BPF_RSH, R2, 32),
+			BPF_ALU64_REG(BPF_OR, R1, R2),
+			BPF_ALU64_REG(BPF_MOV, R0, R1),
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
 		{ },
-		{ { 0, -10 } }
+		{ { 0, 0 } },
+		.stack_depth = 8,
+	},
+	/* Checking that JMP32 immediate src is not zero extended in place */
+#define BPF_JMP32_IMM_ZEXT(op)					\
+	{							\
+		"JMP32_" #op "_K: operand preserved in zext",	\
+		.u.insns_int = {				\
+			BPF_LD_IMM64(R0, 0x0123456789acbdefULL),\
+			BPF_ALU64_REG(BPF_MOV, R1, R0),		\
+			BPF_JMP32_IMM(BPF_##op, R0, 1234, 1),	\
+			BPF_JMP_A(0), /* Nop */			\
+			BPF_ALU64_REG(BPF_SUB, R0, R1),		\
+			BPF_ALU64_REG(BPF_MOV, R1, R0),		\
+			BPF_ALU64_IMM(BPF_RSH, R1, 32),		\
+			BPF_ALU64_REG(BPF_OR, R0, R1),		\
+			BPF_EXIT_INSN(),			\
+		},						\
+		INTERNAL,					\
+		{ },						\
+		{ { 0, 0 } },					\
+	}
+	BPF_JMP32_IMM_ZEXT(JEQ),
+	BPF_JMP32_IMM_ZEXT(JNE),
+	BPF_JMP32_IMM_ZEXT(JSET),
+	BPF_JMP32_IMM_ZEXT(JGT),
+	BPF_JMP32_IMM_ZEXT(JGE),
+	BPF_JMP32_IMM_ZEXT(JLT),
+	BPF_JMP32_IMM_ZEXT(JLE),
+	BPF_JMP32_IMM_ZEXT(JSGT),
+	BPF_JMP32_IMM_ZEXT(JSGE),
+	BPF_JMP32_IMM_ZEXT(JSGT),
+	BPF_JMP32_IMM_ZEXT(JSLT),
+	BPF_JMP32_IMM_ZEXT(JSLE),
+#undef BPF_JMP2_IMM_ZEXT
+	/* Checking that JMP32 dst & src are not zero extended in place */
+#define BPF_JMP32_REG_ZEXT(op)					\
+	{							\
+		"JMP32_" #op "_X: operands preserved in zext",	\
+		.u.insns_int = {				\
+			BPF_LD_IMM64(R0, 0x0123456789acbdefULL),\
+			BPF_LD_IMM64(R1, 0xfedcba9876543210ULL),\
+			BPF_ALU64_REG(BPF_MOV, R2, R0),		\
+			BPF_ALU64_REG(BPF_MOV, R3, R1),		\
+			BPF_JMP32_IMM(BPF_##op, R0, R1, 1),	\
+			BPF_JMP_A(0), /* Nop */			\
+			BPF_ALU64_REG(BPF_SUB, R0, R2),		\
+			BPF_ALU64_REG(BPF_SUB, R1, R3),		\
+			BPF_ALU64_REG(BPF_OR, R0, R1),		\
+			BPF_ALU64_REG(BPF_MOV, R1, R0),		\
+			BPF_ALU64_IMM(BPF_RSH, R1, 32),		\
+			BPF_ALU64_REG(BPF_OR, R0, R1),		\
+			BPF_EXIT_INSN(),			\
+		},						\
+		INTERNAL,					\
+		{ },						\
+		{ { 0, 0 } },					\
+	}
+	BPF_JMP32_REG_ZEXT(JEQ),
+	BPF_JMP32_REG_ZEXT(JNE),
+	BPF_JMP32_REG_ZEXT(JSET),
+	BPF_JMP32_REG_ZEXT(JGT),
+	BPF_JMP32_REG_ZEXT(JGE),
+	BPF_JMP32_REG_ZEXT(JLT),
+	BPF_JMP32_REG_ZEXT(JLE),
+	BPF_JMP32_REG_ZEXT(JSGT),
+	BPF_JMP32_REG_ZEXT(JSGE),
+	BPF_JMP32_REG_ZEXT(JSGT),
+	BPF_JMP32_REG_ZEXT(JSLT),
+	BPF_JMP32_REG_ZEXT(JSLE),
+#undef BPF_JMP2_REG_ZEXT
+	/* ALU64 K register combinations */
+	{
+		"ALU64_MOV_K: registers",
+		{ },
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_mov_imm_regs,
 	},
 	{
-		"ALU64_ARSH_X: 0xff00ff0000000000 >> 40 = 0xffffffffffff00ff",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0xff00ff0000000000LL),
-			BPF_ALU32_IMM(BPF_MOV, R1, 40),
-			BPF_ALU64_REG(BPF_ARSH, R0, R1),
-			BPF_EXIT_INSN(),
-		},
+		"ALU64_AND_K: registers",
+		{ },
 		INTERNAL,
 		{ },
-		{ { 0, 0xffff00ff } },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_and_imm_regs,
 	},
 	{
-		"ALU64_ARSH_X: Shift < 32, low word",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
-			BPF_ALU32_IMM(BPF_MOV, R1, 12),
-			BPF_ALU64_REG(BPF_ARSH, R0, R1),
-			BPF_EXIT_INSN(),
-		},
+		"ALU64_OR_K: registers",
+		{ },
 		INTERNAL,
 		{ },
-		{ { 0, 0x56789abc } }
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_or_imm_regs,
 	},
 	{
-		"ALU64_ARSH_X: Shift < 32, high word",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
-			BPF_ALU32_IMM(BPF_MOV, R1, 12),
-			BPF_ALU64_REG(BPF_ARSH, R0, R1),
-			BPF_ALU64_IMM(BPF_RSH, R0, 32),
-			BPF_EXIT_INSN(),
-		},
+		"ALU64_XOR_K: registers",
+		{ },
 		INTERNAL,
 		{ },
-		{ { 0, 0xfff81234 } }
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_xor_imm_regs,
 	},
 	{
-		"ALU64_ARSH_X: Shift > 32, low word",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
-			BPF_ALU32_IMM(BPF_MOV, R1, 36),
-			BPF_ALU64_REG(BPF_ARSH, R0, R1),
-			BPF_EXIT_INSN(),
-		},
+		"ALU64_LSH_K: registers",
+		{ },
 		INTERNAL,
 		{ },
-		{ { 0, 0xf8123456 } }
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_lsh_imm_regs,
 	},
 	{
-		"ALU64_ARSH_X: Shift > 32, high word",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
-			BPF_ALU32_IMM(BPF_MOV, R1, 36),
-			BPF_ALU64_REG(BPF_ARSH, R0, R1),
-			BPF_ALU64_IMM(BPF_RSH, R0, 32),
-			BPF_EXIT_INSN(),
-		},
+		"ALU64_RSH_K: registers",
+		{ },
 		INTERNAL,
 		{ },
-		{ { 0, -1 } }
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_rsh_imm_regs,
 	},
 	{
-		"ALU64_ARSH_X: Shift == 32, low word",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
-			BPF_ALU32_IMM(BPF_MOV, R1, 32),
-			BPF_ALU64_REG(BPF_ARSH, R0, R1),
-			BPF_EXIT_INSN(),
-		},
+		"ALU64_ARSH_K: registers",
+		{ },
 		INTERNAL,
 		{ },
-		{ { 0, 0x81234567 } }
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_arsh_imm_regs,
 	},
 	{
-		"ALU64_ARSH_X: Shift == 32, high word",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
-			BPF_ALU32_IMM(BPF_MOV, R1, 32),
-			BPF_ALU64_REG(BPF_ARSH, R0, R1),
-			BPF_ALU64_IMM(BPF_RSH, R0, 32),
-			BPF_EXIT_INSN(),
-		},
+		"ALU64_ADD_K: registers",
+		{ },
 		INTERNAL,
 		{ },
-		{ { 0, -1 } }
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_add_imm_regs,
 	},
 	{
-		"ALU64_ARSH_X: Zero shift, low word",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
-			BPF_ALU32_IMM(BPF_MOV, R1, 0),
-			BPF_ALU64_REG(BPF_ARSH, R0, R1),
-			BPF_EXIT_INSN(),
-		},
+		"ALU64_SUB_K: registers",
+		{ },
 		INTERNAL,
 		{ },
-		{ { 0, 0x89abcdef } }
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_sub_imm_regs,
 	},
 	{
-		"ALU64_ARSH_X: Zero shift, high word",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
-			BPF_ALU32_IMM(BPF_MOV, R1, 0),
-			BPF_ALU64_REG(BPF_ARSH, R0, R1),
-			BPF_ALU64_IMM(BPF_RSH, R0, 32),
-			BPF_EXIT_INSN(),
-		},
+		"ALU64_MUL_K: registers",
+		{ },
 		INTERNAL,
 		{ },
-		{ { 0, 0x81234567 } }
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_mul_imm_regs,
 	},
-	/* BPF_ALU | BPF_ARSH | BPF_K */
 	{
-		"ALU32_ARSH_K: -1234 >> 7 = -10",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, -1234),
-			BPF_ALU32_IMM(BPF_ARSH, R0, 7),
-			BPF_EXIT_INSN(),
-		},
+		"ALU64_DIV_K: registers",
+		{ },
 		INTERNAL,
 		{ },
-		{ { 0, -10 } }
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_div_imm_regs,
 	},
 	{
-		"ALU32_ARSH_K: -1234 >> 0 = -1234",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, -1234),
-			BPF_ALU32_IMM(BPF_ARSH, R0, 0),
-			BPF_EXIT_INSN(),
-		},
+		"ALU64_MOD_K: registers",
+		{ },
 		INTERNAL,
 		{ },
-		{ { 0, -1234 } }
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_mod_imm_regs,
 	},
+	/* ALU32 K registers */
 	{
-		"ALU64_ARSH_K: 0xff00ff0000000000 >> 40 = 0xffffffffffff00ff",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0xff00ff0000000000LL),
-			BPF_ALU64_IMM(BPF_ARSH, R0, 40),
-			BPF_EXIT_INSN(),
-		},
+		"ALU32_MOV_K: registers",
+		{ },
 		INTERNAL,
 		{ },
-		{ { 0, 0xffff00ff } },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_mov_imm_regs,
 	},
 	{
-		"ALU64_ARSH_K: Shift < 32, low word",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
-			BPF_ALU64_IMM(BPF_RSH, R0, 12),
-			BPF_EXIT_INSN(),
-		},
+		"ALU32_AND_K: registers",
+		{ },
 		INTERNAL,
 		{ },
-		{ { 0, 0x56789abc } }
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_and_imm_regs,
 	},
 	{
-		"ALU64_ARSH_K: Shift < 32, high word",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
-			BPF_ALU64_IMM(BPF_ARSH, R0, 12),
-			BPF_ALU64_IMM(BPF_RSH, R0, 32),
-			BPF_EXIT_INSN(),
-		},
+		"ALU32_OR_K: registers",
+		{ },
 		INTERNAL,
 		{ },
-		{ { 0, 0xfff81234 } }
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_or_imm_regs,
 	},
 	{
-		"ALU64_ARSH_K: Shift > 32, low word",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
-			BPF_ALU64_IMM(BPF_ARSH, R0, 36),
-			BPF_EXIT_INSN(),
-		},
+		"ALU32_XOR_K: registers",
+		{ },
 		INTERNAL,
 		{ },
-		{ { 0, 0xf8123456 } }
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_xor_imm_regs,
 	},
 	{
-		"ALU64_ARSH_K: Shift > 32, high word",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0xf123456789abcdefLL),
-			BPF_ALU64_IMM(BPF_ARSH, R0, 36),
-			BPF_ALU64_IMM(BPF_RSH, R0, 32),
-			BPF_EXIT_INSN(),
-		},
+		"ALU32_LSH_K: registers",
+		{ },
 		INTERNAL,
 		{ },
-		{ { 0, -1 } }
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_lsh_imm_regs,
 	},
 	{
-		"ALU64_ARSH_K: Shift == 32, low word",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
-			BPF_ALU64_IMM(BPF_ARSH, R0, 32),
-			BPF_EXIT_INSN(),
-		},
+		"ALU32_RSH_K: registers",
+		{ },
 		INTERNAL,
 		{ },
-		{ { 0, 0x81234567 } }
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_rsh_imm_regs,
 	},
 	{
-		"ALU64_ARSH_K: Shift == 32, high word",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
-			BPF_ALU64_IMM(BPF_ARSH, R0, 32),
-			BPF_ALU64_IMM(BPF_RSH, R0, 32),
-			BPF_EXIT_INSN(),
-		},
+		"ALU32_ARSH_K: registers",
+		{ },
 		INTERNAL,
 		{ },
-		{ { 0, -1 } }
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_arsh_imm_regs,
 	},
 	{
-		"ALU64_ARSH_K: Zero shift",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
-			BPF_ALU64_IMM(BPF_ARSH, R0, 0),
-			BPF_EXIT_INSN(),
-		},
+		"ALU32_ADD_K: registers",
+		{ },
 		INTERNAL,
 		{ },
-		{ { 0, 0x89abcdef } }
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_add_imm_regs,
 	},
-	/* BPF_ALU | BPF_NEG */
 	{
-		"ALU_NEG: -(3) = -3",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 3),
-			BPF_ALU32_IMM(BPF_NEG, R0, 0),
-			BPF_EXIT_INSN(),
-		},
+		"ALU32_SUB_K: registers",
+		{ },
 		INTERNAL,
 		{ },
-		{ { 0, -3 } },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_sub_imm_regs,
 	},
 	{
-		"ALU_NEG: -(-3) = 3",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, -3),
-			BPF_ALU32_IMM(BPF_NEG, R0, 0),
-			BPF_EXIT_INSN(),
-		},
+		"ALU32_MUL_K: registers",
+		{ },
 		INTERNAL,
 		{ },
-		{ { 0, 3 } },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_mul_imm_regs,
 	},
 	{
-		"ALU64_NEG: -(3) = -3",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 3),
-			BPF_ALU64_IMM(BPF_NEG, R0, 0),
-			BPF_EXIT_INSN(),
-		},
+		"ALU32_DIV_K: registers",
+		{ },
 		INTERNAL,
 		{ },
-		{ { 0, -3 } },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_div_imm_regs,
 	},
 	{
-		"ALU64_NEG: -(-3) = 3",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, -3),
-			BPF_ALU64_IMM(BPF_NEG, R0, 0),
-			BPF_EXIT_INSN(),
-		},
+		"ALU32_MOD_K: registers",
+		{ },
 		INTERNAL,
 		{ },
-		{ { 0, 3 } },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_mod_imm_regs,
 	},
-	/* BPF_ALU | BPF_END | BPF_FROM_BE */
+	/* ALU64 X register combinations */
 	{
-		"ALU_END_FROM_BE 16: 0x0123456789abcdef -> 0xcdef",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
-			BPF_ENDIAN(BPF_FROM_BE, R0, 16),
-			BPF_EXIT_INSN(),
-		},
+		"ALU64_MOV_X: register combinations",
+		{ },
 		INTERNAL,
 		{ },
-		{ { 0,  cpu_to_be16(0xcdef) } },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_mov_reg_pairs,
 	},
 	{
-		"ALU_END_FROM_BE 32: 0x0123456789abcdef -> 0x89abcdef",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
-			BPF_ENDIAN(BPF_FROM_BE, R0, 32),
-			BPF_ALU64_REG(BPF_MOV, R1, R0),
-			BPF_ALU64_IMM(BPF_RSH, R1, 32),
-			BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */
-			BPF_EXIT_INSN(),
-		},
+		"ALU64_AND_X: register combinations",
+		{ },
 		INTERNAL,
 		{ },
-		{ { 0, cpu_to_be32(0x89abcdef) } },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_and_reg_pairs,
 	},
 	{
-		"ALU_END_FROM_BE 64: 0x0123456789abcdef -> 0x89abcdef",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
-			BPF_ENDIAN(BPF_FROM_BE, R0, 64),
-			BPF_EXIT_INSN(),
-		},
+		"ALU64_OR_X: register combinations",
+		{ },
 		INTERNAL,
 		{ },
-		{ { 0, (u32) cpu_to_be64(0x0123456789abcdefLL) } },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_or_reg_pairs,
 	},
-	/* BPF_ALU | BPF_END | BPF_FROM_LE */
 	{
-		"ALU_END_FROM_LE 16: 0x0123456789abcdef -> 0xefcd",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
-			BPF_ENDIAN(BPF_FROM_LE, R0, 16),
-			BPF_EXIT_INSN(),
-		},
+		"ALU64_XOR_X: register combinations",
+		{ },
 		INTERNAL,
 		{ },
-		{ { 0, cpu_to_le16(0xcdef) } },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_xor_reg_pairs,
 	},
 	{
-		"ALU_END_FROM_LE 32: 0x0123456789abcdef -> 0xefcdab89",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
-			BPF_ENDIAN(BPF_FROM_LE, R0, 32),
-			BPF_ALU64_REG(BPF_MOV, R1, R0),
-			BPF_ALU64_IMM(BPF_RSH, R1, 32),
-			BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */
-			BPF_EXIT_INSN(),
-		},
+		"ALU64_LSH_X: register combinations",
+		{ },
 		INTERNAL,
 		{ },
-		{ { 0, cpu_to_le32(0x89abcdef) } },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_lsh_reg_pairs,
 	},
 	{
-		"ALU_END_FROM_LE 64: 0x0123456789abcdef -> 0x67452301",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
-			BPF_ENDIAN(BPF_FROM_LE, R0, 64),
-			BPF_EXIT_INSN(),
-		},
+		"ALU64_RSH_X: register combinations",
+		{ },
 		INTERNAL,
 		{ },
-		{ { 0, (u32) cpu_to_le64(0x0123456789abcdefLL) } },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_rsh_reg_pairs,
 	},
-	/* BPF_ST(X) | BPF_MEM | BPF_B/H/W/DW */
 	{
-		"ST_MEM_B: Store/Load byte: max negative",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_ST_MEM(BPF_B, R10, -40, 0xff),
-			BPF_LDX_MEM(BPF_B, R0, R10, -40),
-			BPF_EXIT_INSN(),
-		},
+		"ALU64_ARSH_X: register combinations",
+		{ },
 		INTERNAL,
 		{ },
-		{ { 0, 0xff } },
-		.stack_depth = 40,
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_arsh_reg_pairs,
 	},
 	{
-		"ST_MEM_B: Store/Load byte: max positive",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_ST_MEM(BPF_H, R10, -40, 0x7f),
-			BPF_LDX_MEM(BPF_H, R0, R10, -40),
-			BPF_EXIT_INSN(),
-		},
+		"ALU64_ADD_X: register combinations",
+		{ },
 		INTERNAL,
 		{ },
-		{ { 0, 0x7f } },
-		.stack_depth = 40,
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_add_reg_pairs,
 	},
 	{
-		"STX_MEM_B: Store/Load byte: max negative",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0),
-			BPF_LD_IMM64(R1, 0xffLL),
-			BPF_STX_MEM(BPF_B, R10, R1, -40),
-			BPF_LDX_MEM(BPF_B, R0, R10, -40),
-			BPF_EXIT_INSN(),
-		},
+		"ALU64_SUB_X: register combinations",
+		{ },
 		INTERNAL,
 		{ },
-		{ { 0, 0xff } },
-		.stack_depth = 40,
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_sub_reg_pairs,
 	},
 	{
-		"ST_MEM_H: Store/Load half word: max negative",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_ST_MEM(BPF_H, R10, -40, 0xffff),
-			BPF_LDX_MEM(BPF_H, R0, R10, -40),
-			BPF_EXIT_INSN(),
-		},
+		"ALU64_MUL_X: register combinations",
+		{ },
 		INTERNAL,
 		{ },
-		{ { 0, 0xffff } },
-		.stack_depth = 40,
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_mul_reg_pairs,
 	},
 	{
-		"ST_MEM_H: Store/Load half word: max positive",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_ST_MEM(BPF_H, R10, -40, 0x7fff),
-			BPF_LDX_MEM(BPF_H, R0, R10, -40),
-			BPF_EXIT_INSN(),
-		},
+		"ALU64_DIV_X: register combinations",
+		{ },
 		INTERNAL,
 		{ },
-		{ { 0, 0x7fff } },
-		.stack_depth = 40,
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_div_reg_pairs,
 	},
 	{
-		"STX_MEM_H: Store/Load half word: max negative",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0),
-			BPF_LD_IMM64(R1, 0xffffLL),
-			BPF_STX_MEM(BPF_H, R10, R1, -40),
-			BPF_LDX_MEM(BPF_H, R0, R10, -40),
-			BPF_EXIT_INSN(),
-		},
+		"ALU64_MOD_X: register combinations",
+		{ },
 		INTERNAL,
 		{ },
-		{ { 0, 0xffff } },
-		.stack_depth = 40,
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_mod_reg_pairs,
 	},
+	/* ALU32 X register combinations */
 	{
-		"ST_MEM_W: Store/Load word: max negative",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_ST_MEM(BPF_W, R10, -40, 0xffffffff),
-			BPF_LDX_MEM(BPF_W, R0, R10, -40),
-			BPF_EXIT_INSN(),
-		},
+		"ALU32_MOV_X: register combinations",
+		{ },
 		INTERNAL,
 		{ },
-		{ { 0, 0xffffffff } },
-		.stack_depth = 40,
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_mov_reg_pairs,
 	},
 	{
-		"ST_MEM_W: Store/Load word: max positive",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_ST_MEM(BPF_W, R10, -40, 0x7fffffff),
-			BPF_LDX_MEM(BPF_W, R0, R10, -40),
-			BPF_EXIT_INSN(),
-		},
+		"ALU32_AND_X: register combinations",
+		{ },
 		INTERNAL,
 		{ },
-		{ { 0, 0x7fffffff } },
-		.stack_depth = 40,
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_and_reg_pairs,
 	},
 	{
-		"STX_MEM_W: Store/Load word: max negative",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0),
-			BPF_LD_IMM64(R1, 0xffffffffLL),
-			BPF_STX_MEM(BPF_W, R10, R1, -40),
-			BPF_LDX_MEM(BPF_W, R0, R10, -40),
-			BPF_EXIT_INSN(),
-		},
+		"ALU32_OR_X: register combinations",
+		{ },
 		INTERNAL,
 		{ },
-		{ { 0, 0xffffffff } },
-		.stack_depth = 40,
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_or_reg_pairs,
 	},
 	{
-		"ST_MEM_DW: Store/Load double word: max negative",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_ST_MEM(BPF_DW, R10, -40, 0xffffffff),
-			BPF_LDX_MEM(BPF_DW, R0, R10, -40),
-			BPF_EXIT_INSN(),
-		},
+		"ALU32_XOR_X: register combinations",
+		{ },
 		INTERNAL,
 		{ },
-		{ { 0, 0xffffffff } },
-		.stack_depth = 40,
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_xor_reg_pairs,
 	},
 	{
-		"ST_MEM_DW: Store/Load double word: max negative 2",
-		.u.insns_int = {
-			BPF_LD_IMM64(R2, 0xffff00000000ffffLL),
-			BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
-			BPF_ST_MEM(BPF_DW, R10, -40, 0xffffffff),
-			BPF_LDX_MEM(BPF_DW, R2, R10, -40),
-			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
-			BPF_MOV32_IMM(R0, 2),
-			BPF_EXIT_INSN(),
-			BPF_MOV32_IMM(R0, 1),
-			BPF_EXIT_INSN(),
-		},
+		"ALU32_LSH_X: register combinations",
+		{ },
 		INTERNAL,
 		{ },
-		{ { 0, 0x1 } },
-		.stack_depth = 40,
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_lsh_reg_pairs,
 	},
 	{
-		"ST_MEM_DW: Store/Load double word: max positive",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_ST_MEM(BPF_DW, R10, -40, 0x7fffffff),
-			BPF_LDX_MEM(BPF_DW, R0, R10, -40),
-			BPF_EXIT_INSN(),
-		},
+		"ALU32_RSH_X: register combinations",
+		{ },
 		INTERNAL,
 		{ },
-		{ { 0, 0x7fffffff } },
-		.stack_depth = 40,
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_rsh_reg_pairs,
 	},
 	{
-		"STX_MEM_DW: Store/Load double word: max negative",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0),
-			BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
-			BPF_STX_MEM(BPF_DW, R10, R1, -40),
-			BPF_LDX_MEM(BPF_DW, R0, R10, -40),
-			BPF_EXIT_INSN(),
-		},
+		"ALU32_ARSH_X: register combinations",
+		{ },
 		INTERNAL,
 		{ },
-		{ { 0, 0xffffffff } },
-		.stack_depth = 40,
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_arsh_reg_pairs,
 	},
 	{
-		"STX_MEM_DW: Store double word: first word in memory",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0),
-			BPF_LD_IMM64(R1, 0x0123456789abcdefLL),
-			BPF_STX_MEM(BPF_DW, R10, R1, -40),
-			BPF_LDX_MEM(BPF_W, R0, R10, -40),
-			BPF_EXIT_INSN(),
-		},
+		"ALU32_ADD_X: register combinations",
+		{ },
 		INTERNAL,
 		{ },
-#ifdef __BIG_ENDIAN
-		{ { 0, 0x01234567 } },
-#else
-		{ { 0, 0x89abcdef } },
-#endif
-		.stack_depth = 40,
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_add_reg_pairs,
 	},
 	{
-		"STX_MEM_DW: Store double word: second word in memory",
-		.u.insns_int = {
-			BPF_LD_IMM64(R0, 0),
-			BPF_LD_IMM64(R1, 0x0123456789abcdefLL),
-			BPF_STX_MEM(BPF_DW, R10, R1, -40),
-			BPF_LDX_MEM(BPF_W, R0, R10, -36),
-			BPF_EXIT_INSN(),
-		},
+		"ALU32_SUB_X: register combinations",
+		{ },
 		INTERNAL,
 		{ },
-#ifdef __BIG_ENDIAN
-		{ { 0, 0x89abcdef } },
-#else
-		{ { 0, 0x01234567 } },
-#endif
-		.stack_depth = 40,
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_sub_reg_pairs,
 	},
-	/* BPF_STX | BPF_ATOMIC | BPF_W/DW */
 	{
-		"STX_XADD_W: X + 1 + 1 + 1 + ...",
+		"ALU32_MUL_X: register combinations",
 		{ },
 		INTERNAL,
 		{ },
-		{ { 0, 4134 } },
-		.fill_helper = bpf_fill_stxw,
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_mul_reg_pairs,
 	},
 	{
-		"STX_XADD_DW: X + 1 + 1 + 1 + ...",
+		"ALU32_DIV_X: register combinations",
 		{ },
 		INTERNAL,
 		{ },
-		{ { 0, 4134 } },
-		.fill_helper = bpf_fill_stxdw,
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_div_reg_pairs,
 	},
-	/*
-	 * Exhaustive tests of atomic operation variants.
-	 * Individual tests are expanded from template macros for all
-	 * combinations of ALU operation, word size and fetching.
-	 */
-#define BPF_ATOMIC_OP_TEST1(width, op, logic, old, update, result)	\
-{									\
-	"BPF_ATOMIC | " #width ", " #op ": Test: "			\
-		#old " " #logic " " #update " = " #result,		\
-	.u.insns_int = {						\
-		BPF_ALU32_IMM(BPF_MOV, R5, update),			\
-		BPF_ST_MEM(width, R10, -40, old),			\
-		BPF_ATOMIC_OP(width, op, R10, R5, -40),			\
-		BPF_LDX_MEM(width, R0, R10, -40),			\
-		BPF_EXIT_INSN(),					\
-	},								\
-	INTERNAL,							\
-	{ },								\
-	{ { 0, result } },						\
-	.stack_depth = 40,						\
-}
-#define BPF_ATOMIC_OP_TEST2(width, op, logic, old, update, result)	\
-{									\
-	"BPF_ATOMIC | " #width ", " #op ": Test side effects, r10: "	\
-		#old " " #logic " " #update " = " #result,		\
-	.u.insns_int = {						\
-		BPF_ALU64_REG(BPF_MOV, R1, R10),			\
-		BPF_ALU32_IMM(BPF_MOV, R0, update),			\
-		BPF_ST_MEM(BPF_W, R10, -40, old),			\
-		BPF_ATOMIC_OP(width, op, R10, R0, -40),			\
-		BPF_ALU64_REG(BPF_MOV, R0, R10),			\
-		BPF_ALU64_REG(BPF_SUB, R0, R1),				\
-		BPF_EXIT_INSN(),					\
-	},								\
-	INTERNAL,							\
-	{ },								\
-	{ { 0, 0 } },							\
-	.stack_depth = 40,						\
-}
-#define BPF_ATOMIC_OP_TEST3(width, op, logic, old, update, result)	\
-{									\
-	"BPF_ATOMIC | " #width ", " #op ": Test side effects, r0: "	\
-		#old " " #logic " " #update " = " #result,		\
-	.u.insns_int = {						\
-		BPF_ALU64_REG(BPF_MOV, R0, R10),			\
-		BPF_ALU32_IMM(BPF_MOV, R1, update),			\
-		BPF_ST_MEM(width, R10, -40, old),			\
-		BPF_ATOMIC_OP(width, op, R10, R1, -40),			\
-		BPF_ALU64_REG(BPF_SUB, R0, R10),			\
-		BPF_EXIT_INSN(),					\
-	},								\
-	INTERNAL,                                                       \
-	{ },                                                            \
-	{ { 0, 0 } },                                                   \
-	.stack_depth = 40,                                              \
-}
-#define BPF_ATOMIC_OP_TEST4(width, op, logic, old, update, result)	\
-{									\
-	"BPF_ATOMIC | " #width ", " #op ": Test fetch: "		\
-		#old " " #logic " " #update " = " #result,		\
-	.u.insns_int = {						\
-		BPF_ALU32_IMM(BPF_MOV, R3, update),			\
-		BPF_ST_MEM(width, R10, -40, old),			\
-		BPF_ATOMIC_OP(width, op, R10, R3, -40),			\
-		BPF_ALU64_REG(BPF_MOV, R0, R3),                         \
-		BPF_EXIT_INSN(),					\
-	},								\
-	INTERNAL,                                                       \
-	{ },                                                            \
-	{ { 0, (op) & BPF_FETCH ? old : update } },			\
-	.stack_depth = 40,                                              \
-}
-	/* BPF_ATOMIC | BPF_W: BPF_ADD */
-	BPF_ATOMIC_OP_TEST1(BPF_W, BPF_ADD, +, 0x12, 0xab, 0xbd),
-	BPF_ATOMIC_OP_TEST2(BPF_W, BPF_ADD, +, 0x12, 0xab, 0xbd),
-	BPF_ATOMIC_OP_TEST3(BPF_W, BPF_ADD, +, 0x12, 0xab, 0xbd),
-	BPF_ATOMIC_OP_TEST4(BPF_W, BPF_ADD, +, 0x12, 0xab, 0xbd),
-	/* BPF_ATOMIC | BPF_W: BPF_ADD | BPF_FETCH */
-	BPF_ATOMIC_OP_TEST1(BPF_W, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
-	BPF_ATOMIC_OP_TEST2(BPF_W, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
-	BPF_ATOMIC_OP_TEST3(BPF_W, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
-	BPF_ATOMIC_OP_TEST4(BPF_W, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
-	/* BPF_ATOMIC | BPF_DW: BPF_ADD */
-	BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_ADD, +, 0x12, 0xab, 0xbd),
-	BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_ADD, +, 0x12, 0xab, 0xbd),
-	BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_ADD, +, 0x12, 0xab, 0xbd),
-	BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_ADD, +, 0x12, 0xab, 0xbd),
-	/* BPF_ATOMIC | BPF_DW: BPF_ADD | BPF_FETCH */
-	BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
-	BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
-	BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
-	BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
-	/* BPF_ATOMIC | BPF_W: BPF_AND */
-	BPF_ATOMIC_OP_TEST1(BPF_W, BPF_AND, &, 0x12, 0xab, 0x02),
-	BPF_ATOMIC_OP_TEST2(BPF_W, BPF_AND, &, 0x12, 0xab, 0x02),
-	BPF_ATOMIC_OP_TEST3(BPF_W, BPF_AND, &, 0x12, 0xab, 0x02),
-	BPF_ATOMIC_OP_TEST4(BPF_W, BPF_AND, &, 0x12, 0xab, 0x02),
-	/* BPF_ATOMIC | BPF_W: BPF_AND | BPF_FETCH */
-	BPF_ATOMIC_OP_TEST1(BPF_W, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
-	BPF_ATOMIC_OP_TEST2(BPF_W, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
-	BPF_ATOMIC_OP_TEST3(BPF_W, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
-	BPF_ATOMIC_OP_TEST4(BPF_W, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
-	/* BPF_ATOMIC | BPF_DW: BPF_AND */
-	BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_AND, &, 0x12, 0xab, 0x02),
-	BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_AND, &, 0x12, 0xab, 0x02),
-	BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_AND, &, 0x12, 0xab, 0x02),
-	BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_AND, &, 0x12, 0xab, 0x02),
-	/* BPF_ATOMIC | BPF_DW: BPF_AND | BPF_FETCH */
-	BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
-	BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
-	BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
-	BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
-	/* BPF_ATOMIC | BPF_W: BPF_OR */
-	BPF_ATOMIC_OP_TEST1(BPF_W, BPF_OR, |, 0x12, 0xab, 0xbb),
-	BPF_ATOMIC_OP_TEST2(BPF_W, BPF_OR, |, 0x12, 0xab, 0xbb),
-	BPF_ATOMIC_OP_TEST3(BPF_W, BPF_OR, |, 0x12, 0xab, 0xbb),
-	BPF_ATOMIC_OP_TEST4(BPF_W, BPF_OR, |, 0x12, 0xab, 0xbb),
-	/* BPF_ATOMIC | BPF_W: BPF_OR | BPF_FETCH */
-	BPF_ATOMIC_OP_TEST1(BPF_W, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
-	BPF_ATOMIC_OP_TEST2(BPF_W, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
-	BPF_ATOMIC_OP_TEST3(BPF_W, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
-	BPF_ATOMIC_OP_TEST4(BPF_W, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
-	/* BPF_ATOMIC | BPF_DW: BPF_OR */
-	BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_OR, |, 0x12, 0xab, 0xbb),
-	BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_OR, |, 0x12, 0xab, 0xbb),
-	BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_OR, |, 0x12, 0xab, 0xbb),
-	BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_OR, |, 0x12, 0xab, 0xbb),
-	/* BPF_ATOMIC | BPF_DW: BPF_OR | BPF_FETCH */
-	BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
-	BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
-	BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
-	BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
-	/* BPF_ATOMIC | BPF_W: BPF_XOR */
-	BPF_ATOMIC_OP_TEST1(BPF_W, BPF_XOR, ^, 0x12, 0xab, 0xb9),
-	BPF_ATOMIC_OP_TEST2(BPF_W, BPF_XOR, ^, 0x12, 0xab, 0xb9),
-	BPF_ATOMIC_OP_TEST3(BPF_W, BPF_XOR, ^, 0x12, 0xab, 0xb9),
-	BPF_ATOMIC_OP_TEST4(BPF_W, BPF_XOR, ^, 0x12, 0xab, 0xb9),
-	/* BPF_ATOMIC | BPF_W: BPF_XOR | BPF_FETCH */
-	BPF_ATOMIC_OP_TEST1(BPF_W, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
-	BPF_ATOMIC_OP_TEST2(BPF_W, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
-	BPF_ATOMIC_OP_TEST3(BPF_W, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
-	BPF_ATOMIC_OP_TEST4(BPF_W, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
-	/* BPF_ATOMIC | BPF_DW: BPF_XOR */
-	BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_XOR, ^, 0x12, 0xab, 0xb9),
-	BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_XOR, ^, 0x12, 0xab, 0xb9),
-	BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_XOR, ^, 0x12, 0xab, 0xb9),
-	BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_XOR, ^, 0x12, 0xab, 0xb9),
-	/* BPF_ATOMIC | BPF_DW: BPF_XOR | BPF_FETCH */
-	BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
-	BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
-	BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
-	BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
-	/* BPF_ATOMIC | BPF_W: BPF_XCHG */
-	BPF_ATOMIC_OP_TEST1(BPF_W, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
-	BPF_ATOMIC_OP_TEST2(BPF_W, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
-	BPF_ATOMIC_OP_TEST3(BPF_W, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
-	BPF_ATOMIC_OP_TEST4(BPF_W, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
-	/* BPF_ATOMIC | BPF_DW: BPF_XCHG */
-	BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
-	BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
-	BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
-	BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
-#undef BPF_ATOMIC_OP_TEST1
-#undef BPF_ATOMIC_OP_TEST2
-#undef BPF_ATOMIC_OP_TEST3
-#undef BPF_ATOMIC_OP_TEST4
-	/* BPF_ATOMIC | BPF_W, BPF_CMPXCHG */
 	{
-		"BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test successful return",
-		.u.insns_int = {
-			BPF_ST_MEM(BPF_W, R10, -40, 0x01234567),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0x01234567),
-			BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef),
-			BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40),
-			BPF_EXIT_INSN(),
-		},
+		"ALU32_MOD_X register combinations",
+		{ },
 		INTERNAL,
 		{ },
-		{ { 0, 0x01234567 } },
-		.stack_depth = 40,
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_mod_reg_pairs,
 	},
+	/* Exhaustive test of ALU64 shift operations */
 	{
-		"BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test successful store",
-		.u.insns_int = {
-			BPF_ST_MEM(BPF_W, R10, -40, 0x01234567),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0x01234567),
-			BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef),
-			BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40),
-			BPF_LDX_MEM(BPF_W, R0, R10, -40),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU64_LSH_K: all shift values",
 		{ },
-		{ { 0, 0x89abcdef } },
-		.stack_depth = 40,
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_lsh_imm,
 	},
 	{
-		"BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test failure return",
-		.u.insns_int = {
-			BPF_ST_MEM(BPF_W, R10, -40, 0x01234567),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0x76543210),
-			BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef),
-			BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU64_RSH_K: all shift values",
 		{ },
-		{ { 0, 0x01234567 } },
-		.stack_depth = 40,
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_rsh_imm,
 	},
 	{
-		"BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test failure store",
-		.u.insns_int = {
-			BPF_ST_MEM(BPF_W, R10, -40, 0x01234567),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0x76543210),
-			BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef),
-			BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40),
-			BPF_LDX_MEM(BPF_W, R0, R10, -40),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU64_ARSH_K: all shift values",
 		{ },
-		{ { 0, 0x01234567 } },
-		.stack_depth = 40,
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_arsh_imm,
 	},
 	{
-		"BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test side effects",
-		.u.insns_int = {
-			BPF_ST_MEM(BPF_W, R10, -40, 0x01234567),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0x01234567),
-			BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef),
-			BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40),
-			BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40),
-			BPF_ALU32_REG(BPF_MOV, R0, R3),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU64_LSH_X: all shift values",
 		{ },
-		{ { 0, 0x89abcdef } },
-		.stack_depth = 40,
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_lsh_reg,
 	},
-	/* BPF_ATOMIC | BPF_DW, BPF_CMPXCHG */
 	{
-		"BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test successful return",
-		.u.insns_int = {
-			BPF_LD_IMM64(R1, 0x0123456789abcdefULL),
-			BPF_LD_IMM64(R2, 0xfecdba9876543210ULL),
-			BPF_ALU64_REG(BPF_MOV, R0, R1),
-			BPF_STX_MEM(BPF_DW, R10, R1, -40),
-			BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40),
-			BPF_JMP_REG(BPF_JNE, R0, R1, 1),
-			BPF_ALU64_REG(BPF_SUB, R0, R1),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU64_RSH_X: all shift values",
 		{ },
-		{ { 0, 0 } },
-		.stack_depth = 40,
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_rsh_reg,
 	},
 	{
-		"BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test successful store",
-		.u.insns_int = {
-			BPF_LD_IMM64(R1, 0x0123456789abcdefULL),
-			BPF_LD_IMM64(R2, 0xfecdba9876543210ULL),
-			BPF_ALU64_REG(BPF_MOV, R0, R1),
-			BPF_STX_MEM(BPF_DW, R10, R0, -40),
-			BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40),
-			BPF_LDX_MEM(BPF_DW, R0, R10, -40),
-			BPF_JMP_REG(BPF_JNE, R0, R2, 1),
-			BPF_ALU64_REG(BPF_SUB, R0, R2),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU64_ARSH_X: all shift values",
 		{ },
-		{ { 0, 0 } },
-		.stack_depth = 40,
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_arsh_reg,
 	},
+	/* Exhaustive test of ALU32 shift operations */
 	{
-		"BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test failure return",
-		.u.insns_int = {
-			BPF_LD_IMM64(R1, 0x0123456789abcdefULL),
-			BPF_LD_IMM64(R2, 0xfecdba9876543210ULL),
-			BPF_ALU64_REG(BPF_MOV, R0, R1),
-			BPF_ALU64_IMM(BPF_ADD, R0, 1),
-			BPF_STX_MEM(BPF_DW, R10, R1, -40),
-			BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40),
-			BPF_JMP_REG(BPF_JNE, R0, R1, 1),
-			BPF_ALU64_REG(BPF_SUB, R0, R1),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU32_LSH_K: all shift values",
 		{ },
-		{ { 0, 0 } },
-		.stack_depth = 40,
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_lsh_imm,
 	},
 	{
-		"BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test failure store",
-		.u.insns_int = {
-			BPF_LD_IMM64(R1, 0x0123456789abcdefULL),
-			BPF_LD_IMM64(R2, 0xfecdba9876543210ULL),
-			BPF_ALU64_REG(BPF_MOV, R0, R1),
-			BPF_ALU64_IMM(BPF_ADD, R0, 1),
-			BPF_STX_MEM(BPF_DW, R10, R1, -40),
-			BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40),
-			BPF_LDX_MEM(BPF_DW, R0, R10, -40),
-			BPF_JMP_REG(BPF_JNE, R0, R1, 1),
-			BPF_ALU64_REG(BPF_SUB, R0, R1),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU32_RSH_K: all shift values",
 		{ },
-		{ { 0, 0 } },
-		.stack_depth = 40,
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_rsh_imm,
 	},
 	{
-		"BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test side effects",
-		.u.insns_int = {
-			BPF_LD_IMM64(R1, 0x0123456789abcdefULL),
-			BPF_LD_IMM64(R2, 0xfecdba9876543210ULL),
-			BPF_ALU64_REG(BPF_MOV, R0, R1),
-			BPF_STX_MEM(BPF_DW, R10, R1, -40),
-			BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40),
-			BPF_LD_IMM64(R0, 0xfecdba9876543210ULL),
-			BPF_JMP_REG(BPF_JNE, R0, R2, 1),
-			BPF_ALU64_REG(BPF_SUB, R0, R2),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU32_ARSH_K: all shift values",
 		{ },
-		{ { 0, 0 } },
-		.stack_depth = 40,
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_arsh_imm,
 	},
-	/* BPF_JMP32 | BPF_JEQ | BPF_K */
 	{
-		"JMP32_JEQ_K: Small immediate",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 123),
-			BPF_JMP32_IMM(BPF_JEQ, R0, 321, 1),
-			BPF_JMP32_IMM(BPF_JEQ, R0, 123, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU32_LSH_X: all shift values",
 		{ },
-		{ { 0, 123 } }
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_lsh_reg,
 	},
 	{
-		"JMP32_JEQ_K: Large immediate",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 12345678),
-			BPF_JMP32_IMM(BPF_JEQ, R0, 12345678 & 0xffff, 1),
-			BPF_JMP32_IMM(BPF_JEQ, R0, 12345678, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU32_RSH_X: all shift values",
 		{ },
-		{ { 0, 12345678 } }
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_rsh_reg,
 	},
 	{
-		"JMP32_JEQ_K: negative immediate",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, -123),
-			BPF_JMP32_IMM(BPF_JEQ, R0,  123, 1),
-			BPF_JMP32_IMM(BPF_JEQ, R0, -123, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU32_ARSH_X: all shift values",
 		{ },
-		{ { 0, -123 } }
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_arsh_reg,
 	},
-	/* BPF_JMP32 | BPF_JEQ | BPF_X */
+	/*
+	 * Exhaustive test of ALU64 shift operations when
+	 * source and destination register are the same.
+	 */
 	{
-		"JMP32_JEQ_X",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 1234),
-			BPF_ALU32_IMM(BPF_MOV, R1, 4321),
-			BPF_JMP32_REG(BPF_JEQ, R0, R1, 2),
-			BPF_ALU32_IMM(BPF_MOV, R1, 1234),
-			BPF_JMP32_REG(BPF_JEQ, R0, R1, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU64_LSH_X: all shift values with the same register",
 		{ },
-		{ { 0, 1234 } }
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_lsh_same_reg,
 	},
-	/* BPF_JMP32 | BPF_JNE | BPF_K */
 	{
-		"JMP32_JNE_K: Small immediate",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 123),
-			BPF_JMP32_IMM(BPF_JNE, R0, 123, 1),
-			BPF_JMP32_IMM(BPF_JNE, R0, 321, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU64_RSH_X: all shift values with the same register",
 		{ },
-		{ { 0, 123 } }
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_rsh_same_reg,
 	},
 	{
-		"JMP32_JNE_K: Large immediate",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 12345678),
-			BPF_JMP32_IMM(BPF_JNE, R0, 12345678, 1),
-			BPF_JMP32_IMM(BPF_JNE, R0, 12345678 & 0xffff, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU64_ARSH_X: all shift values with the same register",
 		{ },
-		{ { 0, 12345678 } }
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_arsh_same_reg,
 	},
+	/*
+	 * Exhaustive test of ALU32 shift operations when
+	 * source and destination register are the same.
+	 */
 	{
-		"JMP32_JNE_K: negative immediate",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, -123),
-			BPF_JMP32_IMM(BPF_JNE, R0, -123, 1),
-			BPF_JMP32_IMM(BPF_JNE, R0,  123, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU32_LSH_X: all shift values with the same register",
 		{ },
-		{ { 0, -123 } }
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_lsh_same_reg,
 	},
-	/* BPF_JMP32 | BPF_JNE | BPF_X */
 	{
-		"JMP32_JNE_X",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 1234),
-			BPF_ALU32_IMM(BPF_MOV, R1, 1234),
-			BPF_JMP32_REG(BPF_JNE, R0, R1, 2),
-			BPF_ALU32_IMM(BPF_MOV, R1, 4321),
-			BPF_JMP32_REG(BPF_JNE, R0, R1, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU32_RSH_X: all shift values with the same register",
 		{ },
-		{ { 0, 1234 } }
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_rsh_same_reg,
 	},
-	/* BPF_JMP32 | BPF_JSET | BPF_K */
 	{
-		"JMP32_JSET_K: Small immediate",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_JMP32_IMM(BPF_JSET, R0, 2, 1),
-			BPF_JMP32_IMM(BPF_JSET, R0, 3, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU32_ARSH_X: all shift values with the same register",
 		{ },
-		{ { 0, 1 } }
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_arsh_same_reg,
 	},
+	/* ALU64 immediate magnitudes */
 	{
-		"JMP32_JSET_K: Large immediate",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0x40000000),
-			BPF_JMP32_IMM(BPF_JSET, R0, 0x3fffffff, 1),
-			BPF_JMP32_IMM(BPF_JSET, R0, 0x60000000, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU64_MOV_K: all immediate value magnitudes",
 		{ },
-		{ { 0, 0x40000000 } }
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_mov_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"JMP32_JSET_K: negative immediate",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, -123),
-			BPF_JMP32_IMM(BPF_JSET, R0, -1, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU64_AND_K: all immediate value magnitudes",
 		{ },
-		{ { 0, -123 } }
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_and_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	/* BPF_JMP32 | BPF_JSET | BPF_X */
 	{
-		"JMP32_JSET_X",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 8),
-			BPF_ALU32_IMM(BPF_MOV, R1, 7),
-			BPF_JMP32_REG(BPF_JSET, R0, R1, 2),
-			BPF_ALU32_IMM(BPF_MOV, R1, 8 | 2),
-			BPF_JMP32_REG(BPF_JNE, R0, R1, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU64_OR_K: all immediate value magnitudes",
 		{ },
-		{ { 0, 8 } }
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_or_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	/* BPF_JMP32 | BPF_JGT | BPF_K */
 	{
-		"JMP32_JGT_K: Small immediate",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 123),
-			BPF_JMP32_IMM(BPF_JGT, R0, 123, 1),
-			BPF_JMP32_IMM(BPF_JGT, R0, 122, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU64_XOR_K: all immediate value magnitudes",
 		{ },
-		{ { 0, 123 } }
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_xor_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"JMP32_JGT_K: Large immediate",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
-			BPF_JMP32_IMM(BPF_JGT, R0, 0xffffffff, 1),
-			BPF_JMP32_IMM(BPF_JGT, R0, 0xfffffffd, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU64_ADD_K: all immediate value magnitudes",
 		{ },
-		{ { 0, 0xfffffffe } }
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_add_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	/* BPF_JMP32 | BPF_JGT | BPF_X */
 	{
-		"JMP32_JGT_X",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
-			BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
-			BPF_JMP32_REG(BPF_JGT, R0, R1, 2),
-			BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffd),
-			BPF_JMP32_REG(BPF_JGT, R0, R1, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU64_SUB_K: all immediate value magnitudes",
 		{ },
-		{ { 0, 0xfffffffe } }
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_sub_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	/* BPF_JMP32 | BPF_JGE | BPF_K */
 	{
-		"JMP32_JGE_K: Small immediate",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 123),
-			BPF_JMP32_IMM(BPF_JGE, R0, 124, 1),
-			BPF_JMP32_IMM(BPF_JGE, R0, 123, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU64_MUL_K: all immediate value magnitudes",
 		{ },
-		{ { 0, 123 } }
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_mul_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"JMP32_JGE_K: Large immediate",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
-			BPF_JMP32_IMM(BPF_JGE, R0, 0xffffffff, 1),
-			BPF_JMP32_IMM(BPF_JGE, R0, 0xfffffffe, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU64_DIV_K: all immediate value magnitudes",
 		{ },
-		{ { 0, 0xfffffffe } }
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_div_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	/* BPF_JMP32 | BPF_JGE | BPF_X */
 	{
-		"JMP32_JGE_X",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
-			BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
-			BPF_JMP32_REG(BPF_JGE, R0, R1, 2),
-			BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffe),
-			BPF_JMP32_REG(BPF_JGE, R0, R1, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU64_MOD_K: all immediate value magnitudes",
 		{ },
-		{ { 0, 0xfffffffe } }
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_mod_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	/* BPF_JMP32 | BPF_JLT | BPF_K */
+	/* ALU32 immediate magnitudes */
 	{
-		"JMP32_JLT_K: Small immediate",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 123),
-			BPF_JMP32_IMM(BPF_JLT, R0, 123, 1),
-			BPF_JMP32_IMM(BPF_JLT, R0, 124, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU32_MOV_K: all immediate value magnitudes",
 		{ },
-		{ { 0, 123 } }
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_mov_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"JMP32_JLT_K: Large immediate",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
-			BPF_JMP32_IMM(BPF_JLT, R0, 0xfffffffd, 1),
-			BPF_JMP32_IMM(BPF_JLT, R0, 0xffffffff, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU32_AND_K: all immediate value magnitudes",
 		{ },
-		{ { 0, 0xfffffffe } }
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_and_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	/* BPF_JMP32 | BPF_JLT | BPF_X */
 	{
-		"JMP32_JLT_X",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
-			BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffd),
-			BPF_JMP32_REG(BPF_JLT, R0, R1, 2),
-			BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
-			BPF_JMP32_REG(BPF_JLT, R0, R1, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU32_OR_K: all immediate value magnitudes",
 		{ },
-		{ { 0, 0xfffffffe } }
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_or_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	/* BPF_JMP32 | BPF_JLE | BPF_K */
 	{
-		"JMP32_JLE_K: Small immediate",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 123),
-			BPF_JMP32_IMM(BPF_JLE, R0, 122, 1),
-			BPF_JMP32_IMM(BPF_JLE, R0, 123, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU32_XOR_K: all immediate value magnitudes",
 		{ },
-		{ { 0, 123 } }
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_xor_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"JMP32_JLE_K: Large immediate",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
-			BPF_JMP32_IMM(BPF_JLE, R0, 0xfffffffd, 1),
-			BPF_JMP32_IMM(BPF_JLE, R0, 0xfffffffe, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU32_ADD_K: all immediate value magnitudes",
 		{ },
-		{ { 0, 0xfffffffe } }
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_add_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	/* BPF_JMP32 | BPF_JLE | BPF_X */
 	{
-		"JMP32_JLE_X",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
-			BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffd),
-			BPF_JMP32_REG(BPF_JLE, R0, R1, 2),
-			BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffe),
-			BPF_JMP32_REG(BPF_JLE, R0, R1, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU32_SUB_K: all immediate value magnitudes",
 		{ },
-		{ { 0, 0xfffffffe } }
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_sub_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	/* BPF_JMP32 | BPF_JSGT | BPF_K */
 	{
-		"JMP32_JSGT_K: Small immediate",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, -123),
-			BPF_JMP32_IMM(BPF_JSGT, R0, -123, 1),
-			BPF_JMP32_IMM(BPF_JSGT, R0, -124, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU32_MUL_K: all immediate value magnitudes",
 		{ },
-		{ { 0, -123 } }
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_mul_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"JMP32_JSGT_K: Large immediate",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
-			BPF_JMP32_IMM(BPF_JSGT, R0, -12345678, 1),
-			BPF_JMP32_IMM(BPF_JSGT, R0, -12345679, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU32_DIV_K: all immediate value magnitudes",
 		{ },
-		{ { 0, -12345678 } }
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_div_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	/* BPF_JMP32 | BPF_JSGT | BPF_X */
 	{
-		"JMP32_JSGT_X",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
-			BPF_ALU32_IMM(BPF_MOV, R1, -12345678),
-			BPF_JMP32_REG(BPF_JSGT, R0, R1, 2),
-			BPF_ALU32_IMM(BPF_MOV, R1, -12345679),
-			BPF_JMP32_REG(BPF_JSGT, R0, R1, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU32_MOD_K: all immediate value magnitudes",
 		{ },
-		{ { 0, -12345678 } }
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_mod_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	/* BPF_JMP32 | BPF_JSGE | BPF_K */
+	/* ALU64 register magnitudes */
 	{
-		"JMP32_JSGE_K: Small immediate",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, -123),
-			BPF_JMP32_IMM(BPF_JSGE, R0, -122, 1),
-			BPF_JMP32_IMM(BPF_JSGE, R0, -123, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU64_MOV_X: all register value magnitudes",
 		{ },
-		{ { 0, -123 } }
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_mov_reg,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"JMP32_JSGE_K: Large immediate",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
-			BPF_JMP32_IMM(BPF_JSGE, R0, -12345677, 1),
-			BPF_JMP32_IMM(BPF_JSGE, R0, -12345678, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU64_AND_X: all register value magnitudes",
 		{ },
-		{ { 0, -12345678 } }
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_and_reg,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	/* BPF_JMP32 | BPF_JSGE | BPF_X */
 	{
-		"JMP32_JSGE_X",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
-			BPF_ALU32_IMM(BPF_MOV, R1, -12345677),
-			BPF_JMP32_REG(BPF_JSGE, R0, R1, 2),
-			BPF_ALU32_IMM(BPF_MOV, R1, -12345678),
-			BPF_JMP32_REG(BPF_JSGE, R0, R1, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU64_OR_X: all register value magnitudes",
 		{ },
-		{ { 0, -12345678 } }
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_or_reg,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	/* BPF_JMP32 | BPF_JSLT | BPF_K */
 	{
-		"JMP32_JSLT_K: Small immediate",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, -123),
-			BPF_JMP32_IMM(BPF_JSLT, R0, -123, 1),
-			BPF_JMP32_IMM(BPF_JSLT, R0, -122, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU64_XOR_X: all register value magnitudes",
 		{ },
-		{ { 0, -123 } }
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_xor_reg,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"JMP32_JSLT_K: Large immediate",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
-			BPF_JMP32_IMM(BPF_JSLT, R0, -12345678, 1),
-			BPF_JMP32_IMM(BPF_JSLT, R0, -12345677, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU64_ADD_X: all register value magnitudes",
 		{ },
-		{ { 0, -12345678 } }
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_add_reg,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	/* BPF_JMP32 | BPF_JSLT | BPF_X */
 	{
-		"JMP32_JSLT_X",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
-			BPF_ALU32_IMM(BPF_MOV, R1, -12345678),
-			BPF_JMP32_REG(BPF_JSLT, R0, R1, 2),
-			BPF_ALU32_IMM(BPF_MOV, R1, -12345677),
-			BPF_JMP32_REG(BPF_JSLT, R0, R1, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU64_SUB_X: all register value magnitudes",
 		{ },
-		{ { 0, -12345678 } }
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_sub_reg,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	/* BPF_JMP32 | BPF_JSLE | BPF_K */
 	{
-		"JMP32_JSLE_K: Small immediate",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, -123),
-			BPF_JMP32_IMM(BPF_JSLE, R0, -124, 1),
-			BPF_JMP32_IMM(BPF_JSLE, R0, -123, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU64_MUL_X: all register value magnitudes",
 		{ },
-		{ { 0, -123 } }
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_mul_reg,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"JMP32_JSLE_K: Large immediate",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
-			BPF_JMP32_IMM(BPF_JSLE, R0, -12345679, 1),
-			BPF_JMP32_IMM(BPF_JSLE, R0, -12345678, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU64_DIV_X: all register value magnitudes",
 		{ },
-		{ { 0, -12345678 } }
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_div_reg,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	/* BPF_JMP32 | BPF_JSLE | BPF_K */
 	{
-		"JMP32_JSLE_X",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
-			BPF_ALU32_IMM(BPF_MOV, R1, -12345679),
-			BPF_JMP32_REG(BPF_JSLE, R0, R1, 2),
-			BPF_ALU32_IMM(BPF_MOV, R1, -12345678),
-			BPF_JMP32_REG(BPF_JSLE, R0, R1, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU64_MOD_X: all register value magnitudes",
 		{ },
-		{ { 0, -12345678 } }
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_mod_reg,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	/* BPF_JMP | BPF_EXIT */
+	/* ALU32 register magnitudes */
 	{
-		"JMP_EXIT",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0x4711),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0x4712),
-		},
-		INTERNAL,
+		"ALU32_MOV_X: all register value magnitudes",
 		{ },
-		{ { 0, 0x4711 } },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_mov_reg,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	/* BPF_JMP | BPF_JA */
 	{
-		"JMP_JA: Unconditional jump: if (true) return 1",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU32_AND_X: all register value magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_and_reg,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	/* BPF_JMP | BPF_JSLT | BPF_K */
 	{
-		"JMP_JSLT_K: Signed jump: if (-2 < -1) return 1",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, 0xfffffffffffffffeLL),
-			BPF_JMP_IMM(BPF_JSLT, R1, -1, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU32_OR_X: all register value magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_or_reg,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"JMP_JSLT_K: Signed jump: if (-1 < -1) return 0",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
-			BPF_JMP_IMM(BPF_JSLT, R1, -1, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU32_XOR_X: all register value magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_xor_reg,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	/* BPF_JMP | BPF_JSGT | BPF_K */
 	{
-		"JMP_JSGT_K: Signed jump: if (-1 > -2) return 1",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
-			BPF_JMP_IMM(BPF_JSGT, R1, -2, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU32_ADD_X: all register value magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_add_reg,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"JMP_JSGT_K: Signed jump: if (-1 > -1) return 0",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
-			BPF_JMP_IMM(BPF_JSGT, R1, -1, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU32_SUB_X: all register value magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_sub_reg,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	/* BPF_JMP | BPF_JSLE | BPF_K */
 	{
-		"JMP_JSLE_K: Signed jump: if (-2 <= -1) return 1",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, 0xfffffffffffffffeLL),
-			BPF_JMP_IMM(BPF_JSLE, R1, -1, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU32_MUL_X: all register value magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_mul_reg,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"JMP_JSLE_K: Signed jump: if (-1 <= -1) return 1",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
-			BPF_JMP_IMM(BPF_JSLE, R1, -1, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU32_DIV_X: all register value magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_div_reg,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"JMP_JSLE_K: Signed jump: value walk 1",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, 3),
-			BPF_JMP_IMM(BPF_JSLE, R1, 0, 6),
-			BPF_ALU64_IMM(BPF_SUB, R1, 1),
-			BPF_JMP_IMM(BPF_JSLE, R1, 0, 4),
-			BPF_ALU64_IMM(BPF_SUB, R1, 1),
-			BPF_JMP_IMM(BPF_JSLE, R1, 0, 2),
-			BPF_ALU64_IMM(BPF_SUB, R1, 1),
-			BPF_JMP_IMM(BPF_JSLE, R1, 0, 1),
-			BPF_EXIT_INSN(),		/* bad exit */
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),	/* good exit */
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ALU32_MOD_X: all register value magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_mod_reg,
+		.nr_testruns = NR_PATTERN_RUNS,
+	},
+	/* LD_IMM64 immediate magnitudes */
+	{
+		"LD_IMM64: all immediate value magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_ld_imm64,
 	},
+	/* 64-bit ATOMIC register combinations */
 	{
-		"JMP_JSLE_K: Signed jump: value walk 2",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, 3),
-			BPF_JMP_IMM(BPF_JSLE, R1, 0, 4),
-			BPF_ALU64_IMM(BPF_SUB, R1, 2),
-			BPF_JMP_IMM(BPF_JSLE, R1, 0, 2),
-			BPF_ALU64_IMM(BPF_SUB, R1, 2),
-			BPF_JMP_IMM(BPF_JSLE, R1, 0, 1),
-			BPF_EXIT_INSN(),		/* bad exit */
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),	/* good exit */
-			BPF_EXIT_INSN(),
-		},
+		"ATOMIC_DW_ADD: register combinations",
+		{ },
 		INTERNAL,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic64_add_reg_pairs,
+		.stack_depth = 8,
 	},
-	/* BPF_JMP | BPF_JSGE | BPF_K */
 	{
-		"JMP_JSGE_K: Signed jump: if (-1 >= -2) return 1",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
-			BPF_JMP_IMM(BPF_JSGE, R1, -2, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_EXIT_INSN(),
-		},
+		"ATOMIC_DW_AND: register combinations",
+		{ },
 		INTERNAL,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic64_and_reg_pairs,
+		.stack_depth = 8,
 	},
 	{
-		"JMP_JSGE_K: Signed jump: if (-1 >= -1) return 1",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
-			BPF_JMP_IMM(BPF_JSGE, R1, -1, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_EXIT_INSN(),
-		},
+		"ATOMIC_DW_OR: register combinations",
+		{ },
 		INTERNAL,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic64_or_reg_pairs,
+		.stack_depth = 8,
 	},
 	{
-		"JMP_JSGE_K: Signed jump: value walk 1",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, -3),
-			BPF_JMP_IMM(BPF_JSGE, R1, 0, 6),
-			BPF_ALU64_IMM(BPF_ADD, R1, 1),
-			BPF_JMP_IMM(BPF_JSGE, R1, 0, 4),
-			BPF_ALU64_IMM(BPF_ADD, R1, 1),
-			BPF_JMP_IMM(BPF_JSGE, R1, 0, 2),
-			BPF_ALU64_IMM(BPF_ADD, R1, 1),
-			BPF_JMP_IMM(BPF_JSGE, R1, 0, 1),
-			BPF_EXIT_INSN(),		/* bad exit */
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),	/* good exit */
-			BPF_EXIT_INSN(),
-		},
+		"ATOMIC_DW_XOR: register combinations",
+		{ },
 		INTERNAL,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic64_xor_reg_pairs,
+		.stack_depth = 8,
 	},
 	{
-		"JMP_JSGE_K: Signed jump: value walk 2",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, -3),
-			BPF_JMP_IMM(BPF_JSGE, R1, 0, 4),
-			BPF_ALU64_IMM(BPF_ADD, R1, 2),
-			BPF_JMP_IMM(BPF_JSGE, R1, 0, 2),
-			BPF_ALU64_IMM(BPF_ADD, R1, 2),
-			BPF_JMP_IMM(BPF_JSGE, R1, 0, 1),
-			BPF_EXIT_INSN(),		/* bad exit */
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),	/* good exit */
-			BPF_EXIT_INSN(),
-		},
+		"ATOMIC_DW_ADD_FETCH: register combinations",
+		{ },
 		INTERNAL,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic64_add_fetch_reg_pairs,
+		.stack_depth = 8,
 	},
-	/* BPF_JMP | BPF_JGT | BPF_K */
 	{
-		"JMP_JGT_K: if (3 > 2) return 1",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, 3),
-			BPF_JMP_IMM(BPF_JGT, R1, 2, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_EXIT_INSN(),
-		},
+		"ATOMIC_DW_AND_FETCH: register combinations",
+		{ },
 		INTERNAL,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic64_and_fetch_reg_pairs,
+		.stack_depth = 8,
 	},
 	{
-		"JMP_JGT_K: Unsigned jump: if (-1 > 1) return 1",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, -1),
-			BPF_JMP_IMM(BPF_JGT, R1, 1, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_EXIT_INSN(),
-		},
+		"ATOMIC_DW_OR_FETCH: register combinations",
+		{ },
 		INTERNAL,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic64_or_fetch_reg_pairs,
+		.stack_depth = 8,
 	},
-	/* BPF_JMP | BPF_JLT | BPF_K */
 	{
-		"JMP_JLT_K: if (2 < 3) return 1",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, 2),
-			BPF_JMP_IMM(BPF_JLT, R1, 3, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_EXIT_INSN(),
-		},
+		"ATOMIC_DW_XOR_FETCH: register combinations",
+		{ },
 		INTERNAL,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic64_xor_fetch_reg_pairs,
+		.stack_depth = 8,
 	},
 	{
-		"JMP_JGT_K: Unsigned jump: if (1 < -1) return 1",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, 1),
-			BPF_JMP_IMM(BPF_JLT, R1, -1, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_EXIT_INSN(),
-		},
+		"ATOMIC_DW_XCHG: register combinations",
+		{ },
 		INTERNAL,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic64_xchg_reg_pairs,
+		.stack_depth = 8,
 	},
-	/* BPF_JMP | BPF_JGE | BPF_K */
 	{
-		"JMP_JGE_K: if (3 >= 2) return 1",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, 3),
-			BPF_JMP_IMM(BPF_JGE, R1, 2, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_EXIT_INSN(),
-		},
+		"ATOMIC_DW_CMPXCHG: register combinations",
+		{ },
 		INTERNAL,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic64_cmpxchg_reg_pairs,
+		.stack_depth = 8,
 	},
-	/* BPF_JMP | BPF_JLE | BPF_K */
+	/* 32-bit ATOMIC register combinations */
 	{
-		"JMP_JLE_K: if (2 <= 3) return 1",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, 2),
-			BPF_JMP_IMM(BPF_JLE, R1, 3, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_EXIT_INSN(),
-		},
+		"ATOMIC_W_ADD: register combinations",
+		{ },
 		INTERNAL,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic32_add_reg_pairs,
+		.stack_depth = 8,
 	},
-	/* BPF_JMP | BPF_JGT | BPF_K jump backwards */
 	{
-		"JMP_JGT_K: if (3 > 2) return 1 (jump backwards)",
-		.u.insns_int = {
-			BPF_JMP_IMM(BPF_JA, 0, 0, 2), /* goto start */
-			BPF_ALU32_IMM(BPF_MOV, R0, 1), /* out: */
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0), /* start: */
-			BPF_LD_IMM64(R1, 3), /* note: this takes 2 insns */
-			BPF_JMP_IMM(BPF_JGT, R1, 2, -6), /* goto out */
-			BPF_EXIT_INSN(),
-		},
+		"ATOMIC_W_AND: register combinations",
+		{ },
 		INTERNAL,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic32_and_reg_pairs,
+		.stack_depth = 8,
 	},
 	{
-		"JMP_JGE_K: if (3 >= 3) return 1",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, 3),
-			BPF_JMP_IMM(BPF_JGE, R1, 3, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_EXIT_INSN(),
-		},
+		"ATOMIC_W_OR: register combinations",
+		{ },
 		INTERNAL,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic32_or_reg_pairs,
+		.stack_depth = 8,
 	},
-	/* BPF_JMP | BPF_JLT | BPF_K jump backwards */
 	{
-		"JMP_JGT_K: if (2 < 3) return 1 (jump backwards)",
-		.u.insns_int = {
-			BPF_JMP_IMM(BPF_JA, 0, 0, 2), /* goto start */
-			BPF_ALU32_IMM(BPF_MOV, R0, 1), /* out: */
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0), /* start: */
-			BPF_LD_IMM64(R1, 2), /* note: this takes 2 insns */
-			BPF_JMP_IMM(BPF_JLT, R1, 3, -6), /* goto out */
-			BPF_EXIT_INSN(),
-		},
+		"ATOMIC_W_XOR: register combinations",
+		{ },
 		INTERNAL,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic32_xor_reg_pairs,
+		.stack_depth = 8,
 	},
 	{
-		"JMP_JLE_K: if (3 <= 3) return 1",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, 3),
-			BPF_JMP_IMM(BPF_JLE, R1, 3, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_EXIT_INSN(),
-		},
+		"ATOMIC_W_ADD_FETCH: register combinations",
+		{ },
 		INTERNAL,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic32_add_fetch_reg_pairs,
+		.stack_depth = 8,
 	},
-	/* BPF_JMP | BPF_JNE | BPF_K */
 	{
-		"JMP_JNE_K: if (3 != 2) return 1",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, 3),
-			BPF_JMP_IMM(BPF_JNE, R1, 2, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_EXIT_INSN(),
-		},
+		"ATOMIC_W_AND_FETCH: register combinations",
+		{ },
 		INTERNAL,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic32_and_fetch_reg_pairs,
+		.stack_depth = 8,
 	},
-	/* BPF_JMP | BPF_JEQ | BPF_K */
 	{
-		"JMP_JEQ_K: if (3 == 3) return 1",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, 3),
-			BPF_JMP_IMM(BPF_JEQ, R1, 3, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_EXIT_INSN(),
-		},
+		"ATOMIC_W_OR_FETCH: register combinations",
+		{ },
 		INTERNAL,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic32_or_fetch_reg_pairs,
+		.stack_depth = 8,
 	},
-	/* BPF_JMP | BPF_JSET | BPF_K */
 	{
-		"JMP_JSET_K: if (0x3 & 0x2) return 1",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, 3),
-			BPF_JMP_IMM(BPF_JSET, R1, 2, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_EXIT_INSN(),
-		},
+		"ATOMIC_W_XOR_FETCH: register combinations",
+		{ },
 		INTERNAL,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic32_xor_fetch_reg_pairs,
+		.stack_depth = 8,
 	},
 	{
-		"JMP_JSET_K: if (0x3 & 0xffffffff) return 1",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, 3),
-			BPF_JMP_IMM(BPF_JSET, R1, 0xffffffff, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_EXIT_INSN(),
-		},
+		"ATOMIC_W_XCHG: register combinations",
+		{ },
 		INTERNAL,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic32_xchg_reg_pairs,
+		.stack_depth = 8,
 	},
-	/* BPF_JMP | BPF_JSGT | BPF_X */
 	{
-		"JMP_JSGT_X: Signed jump: if (-1 > -2) return 1",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, -1),
-			BPF_LD_IMM64(R2, -2),
-			BPF_JMP_REG(BPF_JSGT, R1, R2, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_EXIT_INSN(),
-		},
+		"ATOMIC_W_CMPXCHG: register combinations",
+		{ },
 		INTERNAL,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic32_cmpxchg_reg_pairs,
+		.stack_depth = 8,
 	},
+	/* 64-bit ATOMIC magnitudes */
 	{
-		"JMP_JSGT_X: Signed jump: if (-1 > -1) return 0",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_LD_IMM64(R1, -1),
-			BPF_LD_IMM64(R2, -1),
-			BPF_JMP_REG(BPF_JSGT, R1, R2, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ATOMIC_DW_ADD: all operand magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic64_add,
+		.stack_depth = 8,
+		.nr_testruns = NR_PATTERN_RUNS,
+	},
+	{
+		"ATOMIC_DW_AND: all operand magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic64_and,
+		.stack_depth = 8,
+		.nr_testruns = NR_PATTERN_RUNS,
+	},
+	{
+		"ATOMIC_DW_OR: all operand magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic64_or,
+		.stack_depth = 8,
+		.nr_testruns = NR_PATTERN_RUNS,
+	},
+	{
+		"ATOMIC_DW_XOR: all operand magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic64_xor,
+		.stack_depth = 8,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	/* BPF_JMP | BPF_JSLT | BPF_X */
 	{
-		"JMP_JSLT_X: Signed jump: if (-2 < -1) return 1",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, -1),
-			BPF_LD_IMM64(R2, -2),
-			BPF_JMP_REG(BPF_JSLT, R2, R1, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ATOMIC_DW_ADD_FETCH: all operand magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic64_add_fetch,
+		.stack_depth = 8,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"JMP_JSLT_X: Signed jump: if (-1 < -1) return 0",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_LD_IMM64(R1, -1),
-			BPF_LD_IMM64(R2, -1),
-			BPF_JMP_REG(BPF_JSLT, R1, R2, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ATOMIC_DW_AND_FETCH: all operand magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic64_and_fetch,
+		.stack_depth = 8,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	/* BPF_JMP | BPF_JSGE | BPF_X */
 	{
-		"JMP_JSGE_X: Signed jump: if (-1 >= -2) return 1",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, -1),
-			BPF_LD_IMM64(R2, -2),
-			BPF_JMP_REG(BPF_JSGE, R1, R2, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ATOMIC_DW_OR_FETCH: all operand magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic64_or_fetch,
+		.stack_depth = 8,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"JMP_JSGE_X: Signed jump: if (-1 >= -1) return 1",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, -1),
-			BPF_LD_IMM64(R2, -1),
-			BPF_JMP_REG(BPF_JSGE, R1, R2, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ATOMIC_DW_XOR_FETCH: all operand magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic64_xor_fetch,
+		.stack_depth = 8,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	/* BPF_JMP | BPF_JSLE | BPF_X */
 	{
-		"JMP_JSLE_X: Signed jump: if (-2 <= -1) return 1",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, -1),
-			BPF_LD_IMM64(R2, -2),
-			BPF_JMP_REG(BPF_JSLE, R2, R1, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ATOMIC_DW_XCHG: all operand magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic64_xchg,
+		.stack_depth = 8,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"JMP_JSLE_X: Signed jump: if (-1 <= -1) return 1",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, -1),
-			BPF_LD_IMM64(R2, -1),
-			BPF_JMP_REG(BPF_JSLE, R1, R2, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ATOMIC_DW_CMPXCHG: all operand magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_cmpxchg64,
+		.stack_depth = 8,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	/* BPF_JMP | BPF_JGT | BPF_X */
+	/* 64-bit atomic magnitudes */
 	{
-		"JMP_JGT_X: if (3 > 2) return 1",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, 3),
-			BPF_LD_IMM64(R2, 2),
-			BPF_JMP_REG(BPF_JGT, R1, R2, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ATOMIC_W_ADD: all operand magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic32_add,
+		.stack_depth = 8,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"JMP_JGT_X: Unsigned jump: if (-1 > 1) return 1",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, -1),
-			BPF_LD_IMM64(R2, 1),
-			BPF_JMP_REG(BPF_JGT, R1, R2, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ATOMIC_W_AND: all operand magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic32_and,
+		.stack_depth = 8,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	/* BPF_JMP | BPF_JLT | BPF_X */
 	{
-		"JMP_JLT_X: if (2 < 3) return 1",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, 3),
-			BPF_LD_IMM64(R2, 2),
-			BPF_JMP_REG(BPF_JLT, R2, R1, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ATOMIC_W_OR: all operand magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic32_or,
+		.stack_depth = 8,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"JMP_JLT_X: Unsigned jump: if (1 < -1) return 1",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, -1),
-			BPF_LD_IMM64(R2, 1),
-			BPF_JMP_REG(BPF_JLT, R2, R1, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ATOMIC_W_XOR: all operand magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic32_xor,
+		.stack_depth = 8,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	/* BPF_JMP | BPF_JGE | BPF_X */
 	{
-		"JMP_JGE_X: if (3 >= 2) return 1",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, 3),
-			BPF_LD_IMM64(R2, 2),
-			BPF_JMP_REG(BPF_JGE, R1, R2, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ATOMIC_W_ADD_FETCH: all operand magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic32_add_fetch,
+		.stack_depth = 8,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"JMP_JGE_X: if (3 >= 3) return 1",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, 3),
-			BPF_LD_IMM64(R2, 3),
-			BPF_JMP_REG(BPF_JGE, R1, R2, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ATOMIC_W_AND_FETCH: all operand magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic32_and_fetch,
+		.stack_depth = 8,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	/* BPF_JMP | BPF_JLE | BPF_X */
 	{
-		"JMP_JLE_X: if (2 <= 3) return 1",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, 3),
-			BPF_LD_IMM64(R2, 2),
-			BPF_JMP_REG(BPF_JLE, R2, R1, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ATOMIC_W_OR_FETCH: all operand magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic32_or_fetch,
+		.stack_depth = 8,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"JMP_JLE_X: if (3 <= 3) return 1",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, 3),
-			BPF_LD_IMM64(R2, 3),
-			BPF_JMP_REG(BPF_JLE, R1, R2, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ATOMIC_W_XOR_FETCH: all operand magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic32_xor_fetch,
+		.stack_depth = 8,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		/* Mainly testing JIT + imm64 here. */
-		"JMP_JGE_X: ldimm64 test 1",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, 3),
-			BPF_LD_IMM64(R2, 2),
-			BPF_JMP_REG(BPF_JGE, R1, R2, 2),
-			BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
-			BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeULL),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ATOMIC_W_XCHG: all operand magnitudes",
 		{ },
-		{ { 0, 0xeeeeeeeeU } },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic32_xchg,
+		.stack_depth = 8,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"JMP_JGE_X: ldimm64 test 2",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, 3),
-			BPF_LD_IMM64(R2, 2),
-			BPF_JMP_REG(BPF_JGE, R1, R2, 0),
-			BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"ATOMIC_W_CMPXCHG: all operand magnitudes",
 		{ },
-		{ { 0, 0xffffffffU } },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_cmpxchg32,
+		.stack_depth = 8,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
+	/* JMP immediate magnitudes */
 	{
-		"JMP_JGE_X: ldimm64 test 3",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_LD_IMM64(R1, 3),
-			BPF_LD_IMM64(R2, 2),
-			BPF_JMP_REG(BPF_JGE, R1, R2, 4),
-			BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
-			BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeULL),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"JMP_JSET_K: all immediate value magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_jmp_jset_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"JMP_JLE_X: ldimm64 test 1",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, 3),
-			BPF_LD_IMM64(R2, 2),
-			BPF_JMP_REG(BPF_JLE, R2, R1, 2),
-			BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
-			BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeULL),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"JMP_JEQ_K: all immediate value magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_jmp_jeq_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
+	},
+	{
+		"JMP_JNE_K: all immediate value magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_jmp_jne_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
+	},
+	{
+		"JMP_JGT_K: all immediate value magnitudes",
 		{ },
-		{ { 0, 0xeeeeeeeeU } },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_jmp_jgt_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"JMP_JLE_X: ldimm64 test 2",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, 3),
-			BPF_LD_IMM64(R2, 2),
-			BPF_JMP_REG(BPF_JLE, R2, R1, 0),
-			BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"JMP_JGE_K: all immediate value magnitudes",
 		{ },
-		{ { 0, 0xffffffffU } },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_jmp_jge_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"JMP_JLE_X: ldimm64 test 3",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_LD_IMM64(R1, 3),
-			BPF_LD_IMM64(R2, 2),
-			BPF_JMP_REG(BPF_JLE, R2, R1, 4),
-			BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
-			BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeULL),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"JMP_JLT_K: all immediate value magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_jmp_jlt_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	/* BPF_JMP | BPF_JNE | BPF_X */
 	{
-		"JMP_JNE_X: if (3 != 2) return 1",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, 3),
-			BPF_LD_IMM64(R2, 2),
-			BPF_JMP_REG(BPF_JNE, R1, R2, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"JMP_JLE_K: all immediate value magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_jmp_jle_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	/* BPF_JMP | BPF_JEQ | BPF_X */
 	{
-		"JMP_JEQ_X: if (3 == 3) return 1",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, 3),
-			BPF_LD_IMM64(R2, 3),
-			BPF_JMP_REG(BPF_JEQ, R1, R2, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"JMP_JSGT_K: all immediate value magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_jmp_jsgt_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	/* BPF_JMP | BPF_JSET | BPF_X */
 	{
-		"JMP_JSET_X: if (0x3 & 0x2) return 1",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, 3),
-			BPF_LD_IMM64(R2, 2),
-			BPF_JMP_REG(BPF_JSET, R1, R2, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"JMP_JSGE_K: all immediate value magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_jmp_jsge_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"JMP_JSET_X: if (0x3 & 0xffffffff) return 1",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R0, 0),
-			BPF_LD_IMM64(R1, 3),
-			BPF_LD_IMM64(R2, 0xffffffff),
-			BPF_JMP_REG(BPF_JSET, R1, R2, 1),
-			BPF_EXIT_INSN(),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"JMP_JSLT_K: all immediate value magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
 		{ },
 		{ { 0, 1 } },
+		.fill_helper = bpf_fill_jmp_jslt_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	{	/* Mainly checking JIT here. */
-		"BPF_MAXINSNS: Very long conditional jump",
+	{
+		"JMP_JSLE_K: all immediate value magnitudes",
 		{ },
 		INTERNAL | FLAG_NO_DATA,
 		{ },
 		{ { 0, 1 } },
-		.fill_helper = bpf_fill_long_jmp,
+		.fill_helper = bpf_fill_jmp_jsle_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
+	/* JMP register magnitudes */
 	{
-		"JMP_JA: Jump, gap, jump, ...",
+		"JMP_JSET_X: all register value magnitudes",
 		{ },
-		CLASSIC | FLAG_NO_DATA,
+		INTERNAL | FLAG_NO_DATA,
 		{ },
-		{ { 0, 0xababcbac } },
-		.fill_helper = bpf_fill_ja,
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_jmp_jset_reg,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	{	/* Mainly checking JIT here. */
-		"BPF_MAXINSNS: Maximum possible literals",
+	{
+		"JMP_JEQ_X: all register value magnitudes",
 		{ },
-		CLASSIC | FLAG_NO_DATA,
+		INTERNAL | FLAG_NO_DATA,
 		{ },
-		{ { 0, 0xffffffff } },
-		.fill_helper = bpf_fill_maxinsns1,
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_jmp_jeq_reg,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	{	/* Mainly checking JIT here. */
-		"BPF_MAXINSNS: Single literal",
+	{
+		"JMP_JNE_X: all register value magnitudes",
 		{ },
-		CLASSIC | FLAG_NO_DATA,
+		INTERNAL | FLAG_NO_DATA,
 		{ },
-		{ { 0, 0xfefefefe } },
-		.fill_helper = bpf_fill_maxinsns2,
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_jmp_jne_reg,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	{	/* Mainly checking JIT here. */
-		"BPF_MAXINSNS: Run/add until end",
+	{
+		"JMP_JGT_X: all register value magnitudes",
 		{ },
-		CLASSIC | FLAG_NO_DATA,
+		INTERNAL | FLAG_NO_DATA,
 		{ },
-		{ { 0, 0x947bf368 } },
-		.fill_helper = bpf_fill_maxinsns3,
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_jmp_jgt_reg,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"BPF_MAXINSNS: Too many instructions",
+		"JMP_JGE_X: all register value magnitudes",
 		{ },
-		CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+		INTERNAL | FLAG_NO_DATA,
 		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_jmp_jge_reg,
+		.nr_testruns = NR_PATTERN_RUNS,
+	},
+	{
+		"JMP_JLT_X: all register value magnitudes",
 		{ },
-		.fill_helper = bpf_fill_maxinsns4,
-		.expected_errcode = -EINVAL,
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_jmp_jlt_reg,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	{	/* Mainly checking JIT here. */
-		"BPF_MAXINSNS: Very long jump",
+	{
+		"JMP_JLE_X: all register value magnitudes",
 		{ },
-		CLASSIC | FLAG_NO_DATA,
+		INTERNAL | FLAG_NO_DATA,
 		{ },
-		{ { 0, 0xabababab } },
-		.fill_helper = bpf_fill_maxinsns5,
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_jmp_jle_reg,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	{	/* Mainly checking JIT here. */
-		"BPF_MAXINSNS: Ctx heavy transformations",
+	{
+		"JMP_JSGT_X: all register value magnitudes",
 		{ },
-		CLASSIC,
+		INTERNAL | FLAG_NO_DATA,
 		{ },
-		{
-			{  1, SKB_VLAN_PRESENT },
-			{ 10, SKB_VLAN_PRESENT }
-		},
-		.fill_helper = bpf_fill_maxinsns6,
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_jmp_jsgt_reg,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	{	/* Mainly checking JIT here. */
-		"BPF_MAXINSNS: Call heavy transformations",
+	{
+		"JMP_JSGE_X: all register value magnitudes",
 		{ },
-		CLASSIC | FLAG_NO_DATA,
+		INTERNAL | FLAG_NO_DATA,
 		{ },
-		{ { 1, 0 }, { 10, 0 } },
-		.fill_helper = bpf_fill_maxinsns7,
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_jmp_jsge_reg,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	{	/* Mainly checking JIT here. */
-		"BPF_MAXINSNS: Jump heavy test",
+	{
+		"JMP_JSLT_X: all register value magnitudes",
 		{ },
-		CLASSIC | FLAG_NO_DATA,
+		INTERNAL | FLAG_NO_DATA,
 		{ },
-		{ { 0, 0xffffffff } },
-		.fill_helper = bpf_fill_maxinsns8,
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_jmp_jslt_reg,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	{	/* Mainly checking JIT here. */
-		"BPF_MAXINSNS: Very long jump backwards",
+	{
+		"JMP_JSLE_X: all register value magnitudes",
 		{ },
 		INTERNAL | FLAG_NO_DATA,
 		{ },
-		{ { 0, 0xcbababab } },
-		.fill_helper = bpf_fill_maxinsns9,
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_jmp_jsle_reg,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	{	/* Mainly checking JIT here. */
-		"BPF_MAXINSNS: Edge hopping nuthouse",
+	/* JMP32 immediate magnitudes */
+	{
+		"JMP32_JSET_K: all immediate value magnitudes",
 		{ },
 		INTERNAL | FLAG_NO_DATA,
 		{ },
-		{ { 0, 0xabababac } },
-		.fill_helper = bpf_fill_maxinsns10,
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_jmp32_jset_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"BPF_MAXINSNS: Jump, gap, jump, ...",
+		"JMP32_JEQ_K: all immediate value magnitudes",
 		{ },
-		CLASSIC | FLAG_NO_DATA,
+		INTERNAL | FLAG_NO_DATA,
 		{ },
-		{ { 0, 0xababcbac } },
-		.fill_helper = bpf_fill_maxinsns11,
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_jmp32_jeq_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"BPF_MAXINSNS: jump over MSH",
+		"JMP32_JNE_K: all immediate value magnitudes",
 		{ },
-		CLASSIC | FLAG_EXPECTED_FAIL,
-		{ 0xfa, 0xfb, 0xfc, 0xfd, },
-		{ { 4, 0xabababab } },
-		.fill_helper = bpf_fill_maxinsns12,
-		.expected_errcode = -EINVAL,
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_jmp32_jne_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"BPF_MAXINSNS: exec all MSH",
+		"JMP32_JGT_K: all immediate value magnitudes",
 		{ },
-		CLASSIC,
-		{ 0xfa, 0xfb, 0xfc, 0xfd, },
-		{ { 4, 0xababab83 } },
-		.fill_helper = bpf_fill_maxinsns13,
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_jmp32_jgt_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"BPF_MAXINSNS: ld_abs+get_processor_id",
+		"JMP32_JGE_K: all immediate value magnitudes",
 		{ },
-		CLASSIC,
+		INTERNAL | FLAG_NO_DATA,
 		{ },
-		{ { 1, 0xbee } },
-		.fill_helper = bpf_fill_ld_abs_get_processor_id,
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_jmp32_jge_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	/*
-	 * LD_IND / LD_ABS on fragmented SKBs
-	 */
 	{
-		"LD_IND byte frag",
-		.u.insns = {
-			BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
-			BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x0),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC | FLAG_SKB_FRAG,
+		"JMP32_JLT_K: all immediate value magnitudes",
 		{ },
-		{ {0x40, 0x42} },
-		.frag_data = {
-			0x42, 0x00, 0x00, 0x00,
-			0x43, 0x44, 0x00, 0x00,
-			0x21, 0x07, 0x19, 0x83,
-		},
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_jmp32_jlt_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"LD_IND halfword frag",
-		.u.insns = {
-			BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
-			BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x4),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC | FLAG_SKB_FRAG,
+		"JMP32_JLE_K: all immediate value magnitudes",
 		{ },
-		{ {0x40, 0x4344} },
-		.frag_data = {
-			0x42, 0x00, 0x00, 0x00,
-			0x43, 0x44, 0x00, 0x00,
-			0x21, 0x07, 0x19, 0x83,
-		},
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_jmp32_jle_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"LD_IND word frag",
-		.u.insns = {
-			BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
-			BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x8),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC | FLAG_SKB_FRAG,
+		"JMP32_JSGT_K: all immediate value magnitudes",
 		{ },
-		{ {0x40, 0x21071983} },
-		.frag_data = {
-			0x42, 0x00, 0x00, 0x00,
-			0x43, 0x44, 0x00, 0x00,
-			0x21, 0x07, 0x19, 0x83,
-		},
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_jmp32_jsgt_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"LD_IND halfword mixed head/frag",
-		.u.insns = {
-			BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
-			BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x1),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC | FLAG_SKB_FRAG,
-		{ [0x3e] = 0x25, [0x3f] = 0x05, },
-		{ {0x40, 0x0519} },
-		.frag_data = { 0x19, 0x82 },
+		"JMP32_JSGE_K: all immediate value magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_jmp32_jsge_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"LD_IND word mixed head/frag",
-		.u.insns = {
-			BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
-			BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x2),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC | FLAG_SKB_FRAG,
-		{ [0x3e] = 0x25, [0x3f] = 0x05, },
-		{ {0x40, 0x25051982} },
-		.frag_data = { 0x19, 0x82 },
+		"JMP32_JSLT_K: all immediate value magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_jmp32_jslt_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"LD_ABS byte frag",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x40),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC | FLAG_SKB_FRAG,
+		"JMP32_JSLE_K: all immediate value magnitudes",
 		{ },
-		{ {0x40, 0x42} },
-		.frag_data = {
-			0x42, 0x00, 0x00, 0x00,
-			0x43, 0x44, 0x00, 0x00,
-			0x21, 0x07, 0x19, 0x83,
-		},
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_jmp32_jsle_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
+	/* JMP32 register magnitudes */
 	{
-		"LD_ABS halfword frag",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x44),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC | FLAG_SKB_FRAG,
+		"JMP32_JSET_X: all register value magnitudes",
 		{ },
-		{ {0x40, 0x4344} },
-		.frag_data = {
-			0x42, 0x00, 0x00, 0x00,
-			0x43, 0x44, 0x00, 0x00,
-			0x21, 0x07, 0x19, 0x83,
-		},
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_jmp32_jset_reg,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"LD_ABS word frag",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x48),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC | FLAG_SKB_FRAG,
+		"JMP32_JEQ_X: all register value magnitudes",
 		{ },
-		{ {0x40, 0x21071983} },
-		.frag_data = {
-			0x42, 0x00, 0x00, 0x00,
-			0x43, 0x44, 0x00, 0x00,
-			0x21, 0x07, 0x19, 0x83,
-		},
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_jmp32_jeq_reg,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"LD_ABS halfword mixed head/frag",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x3f),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC | FLAG_SKB_FRAG,
-		{ [0x3e] = 0x25, [0x3f] = 0x05, },
-		{ {0x40, 0x0519} },
-		.frag_data = { 0x19, 0x82 },
+		"JMP32_JNE_X: all register value magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_jmp32_jne_reg,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"LD_ABS word mixed head/frag",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x3e),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC | FLAG_SKB_FRAG,
-		{ [0x3e] = 0x25, [0x3f] = 0x05, },
-		{ {0x40, 0x25051982} },
-		.frag_data = { 0x19, 0x82 },
+		"JMP32_JGT_X: all register value magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_jmp32_jgt_reg,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
-	/*
-	 * LD_IND / LD_ABS on non fragmented SKBs
-	 */
 	{
-		/*
-		 * this tests that the JIT/interpreter correctly resets X
-		 * before using it in an LD_IND instruction.
-		 */
-		"LD_IND byte default X",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC,
-		{ [0x1] = 0x42 },
-		{ {0x40, 0x42 } },
+		"JMP32_JGE_X: all register value magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_jmp32_jge_reg,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"LD_IND byte positive offset",
-		.u.insns = {
-			BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
-			BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC,
-		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
-		{ {0x40, 0x82 } },
+		"JMP32_JLT_X: all register value magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_jmp32_jlt_reg,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"LD_IND byte negative offset",
-		.u.insns = {
-			BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
-			BPF_STMT(BPF_LD | BPF_IND | BPF_B, -0x1),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC,
-		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
-		{ {0x40, 0x05 } },
+		"JMP32_JLE_X: all register value magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_jmp32_jle_reg,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"LD_IND byte positive offset, all ff",
-		.u.insns = {
-			BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
-			BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC,
-		{ [0x3c] = 0xff, [0x3d] = 0xff,  [0x3e] = 0xff, [0x3f] = 0xff },
-		{ {0x40, 0xff } },
+		"JMP32_JSGT_X: all register value magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_jmp32_jsgt_reg,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"LD_IND byte positive offset, out of bounds",
-		.u.insns = {
-			BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
-			BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC,
-		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
-		{ {0x3f, 0 }, },
+		"JMP32_JSGE_X: all register value magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_jmp32_jsge_reg,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"LD_IND byte negative offset, out of bounds",
-		.u.insns = {
-			BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
-			BPF_STMT(BPF_LD | BPF_IND | BPF_B, -0x3f),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC,
-		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
-		{ {0x3f, 0 } },
+		"JMP32_JSLT_X: all register value magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_jmp32_jslt_reg,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
 	{
-		"LD_IND byte negative offset, multiple calls",
-		.u.insns = {
-			BPF_STMT(BPF_LDX | BPF_IMM, 0x3b),
-			BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 1),
-			BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 2),
-			BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 3),
-			BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 4),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC,
-		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
-		{ {0x40, 0x82 }, },
+		"JMP32_JSLE_X: all register value magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_jmp32_jsle_reg,
+		.nr_testruns = NR_PATTERN_RUNS,
 	},
+	/* Conditional jumps with constant decision */
 	{
-		"LD_IND halfword positive offset",
-		.u.insns = {
-			BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
-			BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x2),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC,
-		{
-			[0x1c] = 0xaa, [0x1d] = 0x55,
-			[0x1e] = 0xbb, [0x1f] = 0x66,
-			[0x20] = 0xcc, [0x21] = 0x77,
-			[0x22] = 0xdd, [0x23] = 0x88,
+		"JMP_JSET_K: imm = 0 -> never taken",
+		.u.insns_int = {
+			BPF_ALU64_IMM(BPF_MOV, R0, 1),
+			BPF_JMP_IMM(BPF_JSET, R1, 0, 1),
+			BPF_ALU64_IMM(BPF_MOV, R0, 0),
+			BPF_EXIT_INSN(),
 		},
-		{ {0x40, 0xdd88 } },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 0 } },
 	},
 	{
-		"LD_IND halfword negative offset",
-		.u.insns = {
-			BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
-			BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x2),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC,
-		{
-			[0x1c] = 0xaa, [0x1d] = 0x55,
-			[0x1e] = 0xbb, [0x1f] = 0x66,
-			[0x20] = 0xcc, [0x21] = 0x77,
-			[0x22] = 0xdd, [0x23] = 0x88,
+		"JMP_JLT_K: imm = 0 -> never taken",
+		.u.insns_int = {
+			BPF_ALU64_IMM(BPF_MOV, R0, 1),
+			BPF_JMP_IMM(BPF_JLT, R1, 0, 1),
+			BPF_ALU64_IMM(BPF_MOV, R0, 0),
+			BPF_EXIT_INSN(),
 		},
-		{ {0x40, 0xbb66 } },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 0 } },
 	},
 	{
-		"LD_IND halfword unaligned",
-		.u.insns = {
-			BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
-			BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x1),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC,
-		{
-			[0x1c] = 0xaa, [0x1d] = 0x55,
-			[0x1e] = 0xbb, [0x1f] = 0x66,
-			[0x20] = 0xcc, [0x21] = 0x77,
-			[0x22] = 0xdd, [0x23] = 0x88,
+		"JMP_JGE_K: imm = 0 -> always taken",
+		.u.insns_int = {
+			BPF_ALU64_IMM(BPF_MOV, R0, 1),
+			BPF_JMP_IMM(BPF_JGE, R1, 0, 1),
+			BPF_ALU64_IMM(BPF_MOV, R0, 0),
+			BPF_EXIT_INSN(),
 		},
-		{ {0x40, 0x66cc } },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
 	},
 	{
-		"LD_IND halfword positive offset, all ff",
-		.u.insns = {
-			BPF_STMT(BPF_LDX | BPF_IMM, 0x3d),
-			BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x1),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		"JMP_JGT_K: imm = 0xffffffff -> never taken",
+		.u.insns_int = {
+			BPF_ALU64_IMM(BPF_MOV, R0, 1),
+			BPF_JMP_IMM(BPF_JGT, R1, U32_MAX, 1),
+			BPF_ALU64_IMM(BPF_MOV, R0, 0),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC,
-		{ [0x3c] = 0xff, [0x3d] = 0xff,  [0x3e] = 0xff, [0x3f] = 0xff },
-		{ {0x40, 0xffff } },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 0 } },
 	},
 	{
-		"LD_IND halfword positive offset, out of bounds",
-		.u.insns = {
-			BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
-			BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x1),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		"JMP_JLE_K: imm = 0xffffffff -> always taken",
+		.u.insns_int = {
+			BPF_ALU64_IMM(BPF_MOV, R0, 1),
+			BPF_JMP_IMM(BPF_JLE, R1, U32_MAX, 1),
+			BPF_ALU64_IMM(BPF_MOV, R0, 0),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC,
-		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
-		{ {0x3f, 0 }, },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
 	},
 	{
-		"LD_IND halfword negative offset, out of bounds",
-		.u.insns = {
-			BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
-			BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x3f),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		"JMP32_JSGT_K: imm = 0x7fffffff -> never taken",
+		.u.insns_int = {
+			BPF_ALU64_IMM(BPF_MOV, R0, 1),
+			BPF_JMP32_IMM(BPF_JSGT, R1, S32_MAX, 1),
+			BPF_ALU64_IMM(BPF_MOV, R0, 0),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC,
-		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
-		{ {0x3f, 0 } },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 0 } },
 	},
 	{
-		"LD_IND word positive offset",
-		.u.insns = {
-			BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
-			BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x4),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC,
-		{
-			[0x1c] = 0xaa, [0x1d] = 0x55,
-			[0x1e] = 0xbb, [0x1f] = 0x66,
-			[0x20] = 0xcc, [0x21] = 0x77,
-			[0x22] = 0xdd, [0x23] = 0x88,
-			[0x24] = 0xee, [0x25] = 0x99,
-			[0x26] = 0xff, [0x27] = 0xaa,
+		"JMP32_JSGE_K: imm = -0x80000000 -> always taken",
+		.u.insns_int = {
+			BPF_ALU64_IMM(BPF_MOV, R0, 1),
+			BPF_JMP32_IMM(BPF_JSGE, R1, S32_MIN, 1),
+			BPF_ALU64_IMM(BPF_MOV, R0, 0),
+			BPF_EXIT_INSN(),
 		},
-		{ {0x40, 0xee99ffaa } },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
 	},
 	{
-		"LD_IND word negative offset",
-		.u.insns = {
-			BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
-			BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x4),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC,
-		{
-			[0x1c] = 0xaa, [0x1d] = 0x55,
-			[0x1e] = 0xbb, [0x1f] = 0x66,
-			[0x20] = 0xcc, [0x21] = 0x77,
-			[0x22] = 0xdd, [0x23] = 0x88,
-			[0x24] = 0xee, [0x25] = 0x99,
-			[0x26] = 0xff, [0x27] = 0xaa,
+		"JMP32_JSLT_K: imm = -0x80000000 -> never taken",
+		.u.insns_int = {
+			BPF_ALU64_IMM(BPF_MOV, R0, 1),
+			BPF_JMP32_IMM(BPF_JSLT, R1, S32_MIN, 1),
+			BPF_ALU64_IMM(BPF_MOV, R0, 0),
+			BPF_EXIT_INSN(),
 		},
-		{ {0x40, 0xaa55bb66 } },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 0 } },
 	},
 	{
-		"LD_IND word unaligned (addr & 3 == 2)",
-		.u.insns = {
-			BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
-			BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x2),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		"JMP32_JSLE_K: imm = 0x7fffffff -> always taken",
+		.u.insns_int = {
+			BPF_ALU64_IMM(BPF_MOV, R0, 1),
+			BPF_JMP32_IMM(BPF_JSLE, R1, S32_MAX, 1),
+			BPF_ALU64_IMM(BPF_MOV, R0, 0),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC,
-		{
-			[0x1c] = 0xaa, [0x1d] = 0x55,
-			[0x1e] = 0xbb, [0x1f] = 0x66,
-			[0x20] = 0xcc, [0x21] = 0x77,
-			[0x22] = 0xdd, [0x23] = 0x88,
-			[0x24] = 0xee, [0x25] = 0x99,
-			[0x26] = 0xff, [0x27] = 0xaa,
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"JMP_JEQ_X: dst = src -> always taken",
+		.u.insns_int = {
+			BPF_ALU64_IMM(BPF_MOV, R0, 1),
+			BPF_JMP_REG(BPF_JEQ, R1, R1, 1),
+			BPF_ALU64_IMM(BPF_MOV, R0, 0),
+			BPF_EXIT_INSN(),
 		},
-		{ {0x40, 0xbb66cc77 } },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
 	},
 	{
-		"LD_IND word unaligned (addr & 3 == 1)",
-		.u.insns = {
-			BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
-			BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x3),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		"JMP_JGE_X: dst = src -> always taken",
+		.u.insns_int = {
+			BPF_ALU64_IMM(BPF_MOV, R0, 1),
+			BPF_JMP_REG(BPF_JGE, R1, R1, 1),
+			BPF_ALU64_IMM(BPF_MOV, R0, 0),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC,
-		{
-			[0x1c] = 0xaa, [0x1d] = 0x55,
-			[0x1e] = 0xbb, [0x1f] = 0x66,
-			[0x20] = 0xcc, [0x21] = 0x77,
-			[0x22] = 0xdd, [0x23] = 0x88,
-			[0x24] = 0xee, [0x25] = 0x99,
-			[0x26] = 0xff, [0x27] = 0xaa,
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"JMP_JLE_X: dst = src -> always taken",
+		.u.insns_int = {
+			BPF_ALU64_IMM(BPF_MOV, R0, 1),
+			BPF_JMP_REG(BPF_JLE, R1, R1, 1),
+			BPF_ALU64_IMM(BPF_MOV, R0, 0),
+			BPF_EXIT_INSN(),
 		},
-		{ {0x40, 0x55bb66cc } },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
 	},
 	{
-		"LD_IND word unaligned (addr & 3 == 3)",
-		.u.insns = {
-			BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
-			BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x1),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		"JMP_JSGE_X: dst = src -> always taken",
+		.u.insns_int = {
+			BPF_ALU64_IMM(BPF_MOV, R0, 1),
+			BPF_JMP_REG(BPF_JSGE, R1, R1, 1),
+			BPF_ALU64_IMM(BPF_MOV, R0, 0),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC,
-		{
-			[0x1c] = 0xaa, [0x1d] = 0x55,
-			[0x1e] = 0xbb, [0x1f] = 0x66,
-			[0x20] = 0xcc, [0x21] = 0x77,
-			[0x22] = 0xdd, [0x23] = 0x88,
-			[0x24] = 0xee, [0x25] = 0x99,
-			[0x26] = 0xff, [0x27] = 0xaa,
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
+		"JMP_JSLE_X: dst = src -> always taken",
+		.u.insns_int = {
+			BPF_ALU64_IMM(BPF_MOV, R0, 1),
+			BPF_JMP_REG(BPF_JSLE, R1, R1, 1),
+			BPF_ALU64_IMM(BPF_MOV, R0, 0),
+			BPF_EXIT_INSN(),
 		},
-		{ {0x40, 0x66cc77dd } },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
 	},
 	{
-		"LD_IND word positive offset, all ff",
-		.u.insns = {
-			BPF_STMT(BPF_LDX | BPF_IMM, 0x3b),
-			BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x1),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		"JMP_JNE_X: dst = src -> never taken",
+		.u.insns_int = {
+			BPF_ALU64_IMM(BPF_MOV, R0, 1),
+			BPF_JMP_REG(BPF_JNE, R1, R1, 1),
+			BPF_ALU64_IMM(BPF_MOV, R0, 0),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC,
-		{ [0x3c] = 0xff, [0x3d] = 0xff,  [0x3e] = 0xff, [0x3f] = 0xff },
-		{ {0x40, 0xffffffff } },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 0 } },
 	},
 	{
-		"LD_IND word positive offset, out of bounds",
-		.u.insns = {
-			BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
-			BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x1),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		"JMP_JGT_X: dst = src -> never taken",
+		.u.insns_int = {
+			BPF_ALU64_IMM(BPF_MOV, R0, 1),
+			BPF_JMP_REG(BPF_JGT, R1, R1, 1),
+			BPF_ALU64_IMM(BPF_MOV, R0, 0),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC,
-		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
-		{ {0x3f, 0 }, },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 0 } },
 	},
 	{
-		"LD_IND word negative offset, out of bounds",
-		.u.insns = {
-			BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
-			BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x3f),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		"JMP_JLT_X: dst = src -> never taken",
+		.u.insns_int = {
+			BPF_ALU64_IMM(BPF_MOV, R0, 1),
+			BPF_JMP_REG(BPF_JLT, R1, R1, 1),
+			BPF_ALU64_IMM(BPF_MOV, R0, 0),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC,
-		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
-		{ {0x3f, 0 } },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 0 } },
 	},
 	{
-		"LD_ABS byte",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x20),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		"JMP_JSGT_X: dst = src -> never taken",
+		.u.insns_int = {
+			BPF_ALU64_IMM(BPF_MOV, R0, 1),
+			BPF_JMP_REG(BPF_JSGT, R1, R1, 1),
+			BPF_ALU64_IMM(BPF_MOV, R0, 0),
+			BPF_EXIT_INSN(),
 		},
-		CLASSIC,
-		{
-			[0x1c] = 0xaa, [0x1d] = 0x55,
-			[0x1e] = 0xbb, [0x1f] = 0x66,
-			[0x20] = 0xcc, [0x21] = 0x77,
-			[0x22] = 0xdd, [0x23] = 0x88,
-			[0x24] = 0xee, [0x25] = 0x99,
-			[0x26] = 0xff, [0x27] = 0xaa,
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 0 } },
+	},
+	{
+		"JMP_JSLT_X: dst = src -> never taken",
+		.u.insns_int = {
+			BPF_ALU64_IMM(BPF_MOV, R0, 1),
+			BPF_JMP_REG(BPF_JSLT, R1, R1, 1),
+			BPF_ALU64_IMM(BPF_MOV, R0, 0),
+			BPF_EXIT_INSN(),
 		},
-		{ {0x40, 0xcc } },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 0 } },
 	},
+	/* Short relative jumps */
 	{
-		"LD_ABS byte positive offset, all ff",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x3f),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		"Short relative jump: offset=0",
+		.u.insns_int = {
+			BPF_ALU64_IMM(BPF_MOV, R0, 0),
+			BPF_JMP_IMM(BPF_JEQ, R0, 0, 0),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, -1),
 		},
-		CLASSIC,
-		{ [0x3c] = 0xff, [0x3d] = 0xff,  [0x3e] = 0xff, [0x3f] = 0xff },
-		{ {0x40, 0xff } },
+		INTERNAL | FLAG_NO_DATA | FLAG_VERIFIER_ZEXT,
+		{ },
+		{ { 0, 0 } },
 	},
 	{
-		"LD_ABS byte positive offset, out of bounds",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x3f),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		"Short relative jump: offset=1",
+		.u.insns_int = {
+			BPF_ALU64_IMM(BPF_MOV, R0, 0),
+			BPF_JMP_IMM(BPF_JEQ, R0, 0, 1),
+			BPF_ALU32_IMM(BPF_ADD, R0, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, -1),
 		},
-		CLASSIC,
-		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
-		{ {0x3f, 0 }, },
+		INTERNAL | FLAG_NO_DATA | FLAG_VERIFIER_ZEXT,
+		{ },
+		{ { 0, 0 } },
 	},
 	{
-		"LD_ABS byte negative offset, out of bounds load",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_ABS | BPF_B, -1),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		"Short relative jump: offset=2",
+		.u.insns_int = {
+			BPF_ALU64_IMM(BPF_MOV, R0, 0),
+			BPF_JMP_IMM(BPF_JEQ, R0, 0, 2),
+			BPF_ALU32_IMM(BPF_ADD, R0, 1),
+			BPF_ALU32_IMM(BPF_ADD, R0, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, -1),
 		},
-		CLASSIC | FLAG_EXPECTED_FAIL,
-		.expected_errcode = -EINVAL,
+		INTERNAL | FLAG_NO_DATA | FLAG_VERIFIER_ZEXT,
+		{ },
+		{ { 0, 0 } },
 	},
 	{
-		"LD_ABS byte negative offset, in bounds",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3f),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		"Short relative jump: offset=3",
+		.u.insns_int = {
+			BPF_ALU64_IMM(BPF_MOV, R0, 0),
+			BPF_JMP_IMM(BPF_JEQ, R0, 0, 3),
+			BPF_ALU32_IMM(BPF_ADD, R0, 1),
+			BPF_ALU32_IMM(BPF_ADD, R0, 1),
+			BPF_ALU32_IMM(BPF_ADD, R0, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, -1),
 		},
-		CLASSIC,
-		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
-		{ {0x40, 0x82 }, },
+		INTERNAL | FLAG_NO_DATA | FLAG_VERIFIER_ZEXT,
+		{ },
+		{ { 0, 0 } },
 	},
 	{
-		"LD_ABS byte negative offset, out of bounds",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3f),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		"Short relative jump: offset=4",
+		.u.insns_int = {
+			BPF_ALU64_IMM(BPF_MOV, R0, 0),
+			BPF_JMP_IMM(BPF_JEQ, R0, 0, 4),
+			BPF_ALU32_IMM(BPF_ADD, R0, 1),
+			BPF_ALU32_IMM(BPF_ADD, R0, 1),
+			BPF_ALU32_IMM(BPF_ADD, R0, 1),
+			BPF_ALU32_IMM(BPF_ADD, R0, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, -1),
 		},
-		CLASSIC,
-		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
-		{ {0x3f, 0 }, },
+		INTERNAL | FLAG_NO_DATA | FLAG_VERIFIER_ZEXT,
+		{ },
+		{ { 0, 0 } },
 	},
+	/* Conditional branch conversions */
 	{
-		"LD_ABS byte negative offset, multiple calls",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3c),
-			BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3d),
-			BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3e),
-			BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3f),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC,
-		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
-		{ {0x40, 0x82 }, },
+		"Long conditional jump: taken at runtime",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_max_jmp_taken,
 	},
 	{
-		"LD_ABS halfword",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x22),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC,
-		{
-			[0x1c] = 0xaa, [0x1d] = 0x55,
-			[0x1e] = 0xbb, [0x1f] = 0x66,
-			[0x20] = 0xcc, [0x21] = 0x77,
-			[0x22] = 0xdd, [0x23] = 0x88,
-			[0x24] = 0xee, [0x25] = 0x99,
-			[0x26] = 0xff, [0x27] = 0xaa,
-		},
-		{ {0x40, 0xdd88 } },
+		"Long conditional jump: not taken at runtime",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 2 } },
+		.fill_helper = bpf_fill_max_jmp_not_taken,
 	},
 	{
-		"LD_ABS halfword unaligned",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x25),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC,
-		{
-			[0x1c] = 0xaa, [0x1d] = 0x55,
-			[0x1e] = 0xbb, [0x1f] = 0x66,
-			[0x20] = 0xcc, [0x21] = 0x77,
-			[0x22] = 0xdd, [0x23] = 0x88,
-			[0x24] = 0xee, [0x25] = 0x99,
-			[0x26] = 0xff, [0x27] = 0xaa,
-		},
-		{ {0x40, 0x99ff } },
+		"Long conditional jump: always taken, known at JIT time",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_max_jmp_always_taken,
 	},
 	{
-		"LD_ABS halfword positive offset, all ff",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x3e),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC,
-		{ [0x3c] = 0xff, [0x3d] = 0xff,  [0x3e] = 0xff, [0x3f] = 0xff },
-		{ {0x40, 0xffff } },
+		"Long conditional jump: never taken, known at JIT time",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 2 } },
+		.fill_helper = bpf_fill_max_jmp_never_taken,
 	},
+	/* Staggered jump sequences, immediate */
 	{
-		"LD_ABS halfword positive offset, out of bounds",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x3f),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC,
-		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
-		{ {0x3f, 0 }, },
+		"Staggered jumps: JMP_JA",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_ja,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
 	},
 	{
-		"LD_ABS halfword negative offset, out of bounds load",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_ABS | BPF_H, -1),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC | FLAG_EXPECTED_FAIL,
-		.expected_errcode = -EINVAL,
+		"Staggered jumps: JMP_JEQ_K",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jeq_imm,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
 	},
 	{
-		"LD_ABS halfword negative offset, in bounds",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_ABS | BPF_H, SKF_LL_OFF + 0x3e),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC,
-		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
-		{ {0x40, 0x1982 }, },
+		"Staggered jumps: JMP_JNE_K",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jne_imm,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
 	},
 	{
-		"LD_ABS halfword negative offset, out of bounds",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_ABS | BPF_H, SKF_LL_OFF + 0x3e),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC,
-		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
-		{ {0x3f, 0 }, },
+		"Staggered jumps: JMP_JSET_K",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jset_imm,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
 	},
 	{
-		"LD_ABS word",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x1c),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC,
-		{
-			[0x1c] = 0xaa, [0x1d] = 0x55,
-			[0x1e] = 0xbb, [0x1f] = 0x66,
-			[0x20] = 0xcc, [0x21] = 0x77,
-			[0x22] = 0xdd, [0x23] = 0x88,
-			[0x24] = 0xee, [0x25] = 0x99,
-			[0x26] = 0xff, [0x27] = 0xaa,
-		},
-		{ {0x40, 0xaa55bb66 } },
+		"Staggered jumps: JMP_JGT_K",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jgt_imm,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
 	},
 	{
-		"LD_ABS word unaligned (addr & 3 == 2)",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x22),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC,
-		{
-			[0x1c] = 0xaa, [0x1d] = 0x55,
-			[0x1e] = 0xbb, [0x1f] = 0x66,
-			[0x20] = 0xcc, [0x21] = 0x77,
-			[0x22] = 0xdd, [0x23] = 0x88,
-			[0x24] = 0xee, [0x25] = 0x99,
-			[0x26] = 0xff, [0x27] = 0xaa,
-		},
-		{ {0x40, 0xdd88ee99 } },
+		"Staggered jumps: JMP_JGE_K",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jge_imm,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
 	},
 	{
-		"LD_ABS word unaligned (addr & 3 == 1)",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x21),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC,
-		{
-			[0x1c] = 0xaa, [0x1d] = 0x55,
-			[0x1e] = 0xbb, [0x1f] = 0x66,
-			[0x20] = 0xcc, [0x21] = 0x77,
-			[0x22] = 0xdd, [0x23] = 0x88,
-			[0x24] = 0xee, [0x25] = 0x99,
-			[0x26] = 0xff, [0x27] = 0xaa,
-		},
-		{ {0x40, 0x77dd88ee } },
+		"Staggered jumps: JMP_JLT_K",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jlt_imm,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
 	},
 	{
-		"LD_ABS word unaligned (addr & 3 == 3)",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x23),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC,
-		{
-			[0x1c] = 0xaa, [0x1d] = 0x55,
-			[0x1e] = 0xbb, [0x1f] = 0x66,
-			[0x20] = 0xcc, [0x21] = 0x77,
-			[0x22] = 0xdd, [0x23] = 0x88,
-			[0x24] = 0xee, [0x25] = 0x99,
-			[0x26] = 0xff, [0x27] = 0xaa,
-		},
-		{ {0x40, 0x88ee99ff } },
+		"Staggered jumps: JMP_JLE_K",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jle_imm,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
+	},
+	{
+		"Staggered jumps: JMP_JSGT_K",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jsgt_imm,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
+	},
+	{
+		"Staggered jumps: JMP_JSGE_K",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jsge_imm,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
 	},
 	{
-		"LD_ABS word positive offset, all ff",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x3c),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC,
-		{ [0x3c] = 0xff, [0x3d] = 0xff,  [0x3e] = 0xff, [0x3f] = 0xff },
-		{ {0x40, 0xffffffff } },
+		"Staggered jumps: JMP_JSLT_K",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jslt_imm,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
 	},
 	{
-		"LD_ABS word positive offset, out of bounds",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x3f),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC,
-		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
-		{ {0x3f, 0 }, },
+		"Staggered jumps: JMP_JSLE_K",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jsle_imm,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
 	},
+	/* Staggered jump sequences, register */
 	{
-		"LD_ABS word negative offset, out of bounds load",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_ABS | BPF_W, -1),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC | FLAG_EXPECTED_FAIL,
-		.expected_errcode = -EINVAL,
+		"Staggered jumps: JMP_JEQ_X",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jeq_reg,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
 	},
 	{
-		"LD_ABS word negative offset, in bounds",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_ABS | BPF_W, SKF_LL_OFF + 0x3c),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC,
-		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
-		{ {0x40, 0x25051982 }, },
+		"Staggered jumps: JMP_JNE_X",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jne_reg,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
 	},
 	{
-		"LD_ABS word negative offset, out of bounds",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_ABS | BPF_W, SKF_LL_OFF + 0x3c),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC,
-		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
-		{ {0x3f, 0 }, },
+		"Staggered jumps: JMP_JSET_X",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jset_reg,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
 	},
 	{
-		"LDX_MSH standalone, preserved A",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
-			BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3c),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC,
-		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
-		{ {0x40, 0xffeebbaa }, },
+		"Staggered jumps: JMP_JGT_X",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jgt_reg,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
 	},
 	{
-		"LDX_MSH standalone, preserved A 2",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_IMM, 0x175e9d63),
-			BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3c),
-			BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3d),
-			BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3e),
-			BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3f),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC,
-		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
-		{ {0x40, 0x175e9d63 }, },
+		"Staggered jumps: JMP_JGE_X",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jge_reg,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
 	},
 	{
-		"LDX_MSH standalone, test result 1",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
-			BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3c),
-			BPF_STMT(BPF_MISC | BPF_TXA, 0),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC,
-		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
-		{ {0x40, 0x14 }, },
+		"Staggered jumps: JMP_JLT_X",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jlt_reg,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
 	},
 	{
-		"LDX_MSH standalone, test result 2",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
-			BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3e),
-			BPF_STMT(BPF_MISC | BPF_TXA, 0),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC,
-		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
-		{ {0x40, 0x24 }, },
+		"Staggered jumps: JMP_JLE_X",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jle_reg,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
 	},
 	{
-		"LDX_MSH standalone, negative offset",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
-			BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, -1),
-			BPF_STMT(BPF_MISC | BPF_TXA, 0),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC,
-		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
-		{ {0x40, 0 }, },
+		"Staggered jumps: JMP_JSGT_X",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jsgt_reg,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
 	},
 	{
-		"LDX_MSH standalone, negative offset 2",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
-			BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, SKF_LL_OFF + 0x3e),
-			BPF_STMT(BPF_MISC | BPF_TXA, 0),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC,
-		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
-		{ {0x40, 0x24 }, },
+		"Staggered jumps: JMP_JSGE_X",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jsge_reg,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
 	},
 	{
-		"LDX_MSH standalone, out of bounds",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
-			BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x40),
-			BPF_STMT(BPF_MISC | BPF_TXA, 0),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC,
-		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
-		{ {0x40, 0 }, },
+		"Staggered jumps: JMP_JSLT_X",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jslt_reg,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
 	},
-	/*
-	 * verify that the interpreter or JIT correctly sets A and X
-	 * to 0.
-	 */
 	{
-		"ADD default X",
-		.u.insns = {
-			/*
-			 * A = 0x42
-			 * A = A + X
-			 * ret A
-			 */
-			BPF_STMT(BPF_LD | BPF_IMM, 0x42),
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC | FLAG_NO_DATA,
-		{},
-		{ {0x1, 0x42 } },
+		"Staggered jumps: JMP_JSLE_X",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jsle_reg,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
 	},
+	/* Staggered jump sequences, JMP32 immediate */
 	{
-		"ADD default A",
-		.u.insns = {
-			/*
-			 * A = A + 0x42
-			 * ret A
-			 */
-			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 0x42),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC | FLAG_NO_DATA,
-		{},
-		{ {0x1, 0x42 } },
+		"Staggered jumps: JMP32_JEQ_K",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jeq32_imm,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
 	},
 	{
-		"SUB default X",
-		.u.insns = {
-			/*
-			 * A = 0x66
-			 * A = A - X
-			 * ret A
-			 */
-			BPF_STMT(BPF_LD | BPF_IMM, 0x66),
-			BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC | FLAG_NO_DATA,
-		{},
-		{ {0x1, 0x66 } },
+		"Staggered jumps: JMP32_JNE_K",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jne32_imm,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
 	},
 	{
-		"SUB default A",
-		.u.insns = {
-			/*
-			 * A = A - -0x66
-			 * ret A
-			 */
-			BPF_STMT(BPF_ALU | BPF_SUB | BPF_K, -0x66),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC | FLAG_NO_DATA,
-		{},
-		{ {0x1, 0x66 } },
+		"Staggered jumps: JMP32_JSET_K",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jset32_imm,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
 	},
 	{
-		"MUL default X",
-		.u.insns = {
-			/*
-			 * A = 0x42
-			 * A = A * X
-			 * ret A
-			 */
-			BPF_STMT(BPF_LD | BPF_IMM, 0x42),
-			BPF_STMT(BPF_ALU | BPF_MUL | BPF_X, 0),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC | FLAG_NO_DATA,
-		{},
-		{ {0x1, 0x0 } },
+		"Staggered jumps: JMP32_JGT_K",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jgt32_imm,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
 	},
 	{
-		"MUL default A",
-		.u.insns = {
-			/*
-			 * A = A * 0x66
-			 * ret A
-			 */
-			BPF_STMT(BPF_ALU | BPF_MUL | BPF_K, 0x66),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC | FLAG_NO_DATA,
-		{},
-		{ {0x1, 0x0 } },
+		"Staggered jumps: JMP32_JGE_K",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jge32_imm,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
+	},
+	{
+		"Staggered jumps: JMP32_JLT_K",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jlt32_imm,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
+	},
+	{
+		"Staggered jumps: JMP32_JLE_K",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jle32_imm,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
 	},
 	{
-		"DIV default X",
-		.u.insns = {
-			/*
-			 * A = 0x42
-			 * A = A / X ; this halt the filter execution if X is 0
-			 * ret 0x42
-			 */
-			BPF_STMT(BPF_LD | BPF_IMM, 0x42),
-			BPF_STMT(BPF_ALU | BPF_DIV | BPF_X, 0),
-			BPF_STMT(BPF_RET | BPF_K, 0x42),
-		},
-		CLASSIC | FLAG_NO_DATA,
-		{},
-		{ {0x1, 0x0 } },
+		"Staggered jumps: JMP32_JSGT_K",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jsgt32_imm,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
 	},
 	{
-		"DIV default A",
-		.u.insns = {
-			/*
-			 * A = A / 1
-			 * ret A
-			 */
-			BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0x1),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC | FLAG_NO_DATA,
-		{},
-		{ {0x1, 0x0 } },
+		"Staggered jumps: JMP32_JSGE_K",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jsge32_imm,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
 	},
 	{
-		"MOD default X",
-		.u.insns = {
-			/*
-			 * A = 0x42
-			 * A = A mod X ; this halt the filter execution if X is 0
-			 * ret 0x42
-			 */
-			BPF_STMT(BPF_LD | BPF_IMM, 0x42),
-			BPF_STMT(BPF_ALU | BPF_MOD | BPF_X, 0),
-			BPF_STMT(BPF_RET | BPF_K, 0x42),
-		},
-		CLASSIC | FLAG_NO_DATA,
-		{},
-		{ {0x1, 0x0 } },
+		"Staggered jumps: JMP32_JSLT_K",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jslt32_imm,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
 	},
 	{
-		"MOD default A",
-		.u.insns = {
-			/*
-			 * A = A mod 1
-			 * ret A
-			 */
-			BPF_STMT(BPF_ALU | BPF_MOD | BPF_K, 0x1),
-			BPF_STMT(BPF_RET | BPF_A, 0x0),
-		},
-		CLASSIC | FLAG_NO_DATA,
-		{},
-		{ {0x1, 0x0 } },
+		"Staggered jumps: JMP32_JSLE_K",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jsle32_imm,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
 	},
+	/* Staggered jump sequences, JMP32 register */
 	{
-		"JMP EQ default A",
-		.u.insns = {
-			/*
-			 * cmp A, 0x0, 0, 1
-			 * ret 0x42
-			 * ret 0x66
-			 */
-			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x0, 0, 1),
-			BPF_STMT(BPF_RET | BPF_K, 0x42),
-			BPF_STMT(BPF_RET | BPF_K, 0x66),
-		},
-		CLASSIC | FLAG_NO_DATA,
-		{},
-		{ {0x1, 0x42 } },
+		"Staggered jumps: JMP32_JEQ_X",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jeq32_reg,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
 	},
 	{
-		"JMP EQ default X",
-		.u.insns = {
-			/*
-			 * A = 0x0
-			 * cmp A, X, 0, 1
-			 * ret 0x42
-			 * ret 0x66
-			 */
-			BPF_STMT(BPF_LD | BPF_IMM, 0x0),
-			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0x0, 0, 1),
-			BPF_STMT(BPF_RET | BPF_K, 0x42),
-			BPF_STMT(BPF_RET | BPF_K, 0x66),
-		},
-		CLASSIC | FLAG_NO_DATA,
-		{},
-		{ {0x1, 0x42 } },
+		"Staggered jumps: JMP32_JNE_X",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jne32_reg,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
 	},
-	/* Checking interpreter vs JIT wrt signed extended imms. */
 	{
-		"JNE signed compare, test 1",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R1, 0xfefbbc12),
-			BPF_ALU32_IMM(BPF_MOV, R3, 0xffff0000),
-			BPF_MOV64_REG(R2, R1),
-			BPF_ALU64_REG(BPF_AND, R2, R3),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_JMP_IMM(BPF_JNE, R2, -17104896, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 2),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"Staggered jumps: JMP32_JSET_X",
 		{ },
-		{ { 0, 1 } },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jset32_reg,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
 	},
 	{
-		"JNE signed compare, test 2",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R1, 0xfefbbc12),
-			BPF_ALU32_IMM(BPF_MOV, R3, 0xffff0000),
-			BPF_MOV64_REG(R2, R1),
-			BPF_ALU64_REG(BPF_AND, R2, R3),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_JMP_IMM(BPF_JNE, R2, 0xfefb0000, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 2),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"Staggered jumps: JMP32_JGT_X",
 		{ },
-		{ { 0, 1 } },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jgt32_reg,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
 	},
 	{
-		"JNE signed compare, test 3",
-		.u.insns_int = {
-			BPF_ALU32_IMM(BPF_MOV, R1, 0xfefbbc12),
-			BPF_ALU32_IMM(BPF_MOV, R3, 0xffff0000),
-			BPF_ALU32_IMM(BPF_MOV, R4, 0xfefb0000),
-			BPF_MOV64_REG(R2, R1),
-			BPF_ALU64_REG(BPF_AND, R2, R3),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_JMP_REG(BPF_JNE, R2, R4, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 2),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"Staggered jumps: JMP32_JGE_X",
 		{ },
-		{ { 0, 2 } },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jge32_reg,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
 	},
 	{
-		"JNE signed compare, test 4",
-		.u.insns_int = {
-			BPF_LD_IMM64(R1, -17104896),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_JMP_IMM(BPF_JNE, R1, -17104896, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 2),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"Staggered jumps: JMP32_JLT_X",
 		{ },
-		{ { 0, 2 } },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jlt32_reg,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
 	},
 	{
-		"JNE signed compare, test 5",
-		.u.insns_int = {
-			BPF_LD_IMM64(R1, 0xfefb0000),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_JMP_IMM(BPF_JNE, R1, 0xfefb0000, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 2),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"Staggered jumps: JMP32_JLE_X",
 		{ },
-		{ { 0, 1 } },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jle32_reg,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
 	},
 	{
-		"JNE signed compare, test 6",
-		.u.insns_int = {
-			BPF_LD_IMM64(R1, 0x7efb0000),
-			BPF_ALU32_IMM(BPF_MOV, R0, 1),
-			BPF_JMP_IMM(BPF_JNE, R1, 0x7efb0000, 1),
-			BPF_ALU32_IMM(BPF_MOV, R0, 2),
-			BPF_EXIT_INSN(),
-		},
-		INTERNAL,
+		"Staggered jumps: JMP32_JSGT_X",
 		{ },
-		{ { 0, 2 } },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jsgt32_reg,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
 	},
 	{
-		"JNE signed compare, test 7",
-		.u.insns = {
-			BPF_STMT(BPF_LD | BPF_IMM, 0xffff0000),
-			BPF_STMT(BPF_MISC | BPF_TAX, 0),
-			BPF_STMT(BPF_LD | BPF_IMM, 0xfefbbc12),
-			BPF_STMT(BPF_ALU | BPF_AND | BPF_X, 0),
-			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0xfefb0000, 1, 0),
-			BPF_STMT(BPF_RET | BPF_K, 1),
-			BPF_STMT(BPF_RET | BPF_K, 2),
-		},
-		CLASSIC | FLAG_NO_DATA,
-		{},
-		{ { 0, 2 } },
+		"Staggered jumps: JMP32_JSGE_X",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jsge32_reg,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
+	},
+	{
+		"Staggered jumps: JMP32_JSLT_X",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jslt32_reg,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
+	},
+	{
+		"Staggered jumps: JMP32_JSLE_X",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+		.fill_helper = bpf_fill_staggered_jsle32_reg,
+		.nr_testruns = NR_STAGGERED_JMP_RUNS,
 	},
 };
 
@@ -8576,6 +14099,8 @@ static struct bpf_prog *generate_filter(int which, int *err)
 		fp->type = BPF_PROG_TYPE_SOCKET_FILTER;
 		memcpy(fp->insnsi, fptr, fp->len * sizeof(struct bpf_insn));
 		fp->aux->stack_depth = tests[which].stack_depth;
+		fp->aux->verifier_zext = !!(tests[which].aux &
+					    FLAG_VERIFIER_ZEXT);
 
 		/* We cannot error here as we don't need type compatibility
 		 * checks.
@@ -8631,6 +14156,9 @@ static int run_one(const struct bpf_prog *fp, struct bpf_test *test)
 {
 	int err_cnt = 0, i, runs = MAX_TESTRUNS;
 
+	if (test->nr_testruns)
+		runs = min(test->nr_testruns, MAX_TESTRUNS);
+
 	for (i = 0; i < MAX_SUBTESTS; i++) {
 		void *data;
 		u64 duration;
@@ -8690,8 +14218,6 @@ static __init int find_test_index(const char *test_name)
 
 static __init int prepare_bpf_tests(void)
 {
-	int i;
-
 	if (test_id >= 0) {
 		/*
 		 * if a test_id was specified, use test_range to
@@ -8735,23 +14261,11 @@ static __init int prepare_bpf_tests(void)
 		}
 	}
 
-	for (i = 0; i < ARRAY_SIZE(tests); i++) {
-		if (tests[i].fill_helper &&
-		    tests[i].fill_helper(&tests[i]) < 0)
-			return -ENOMEM;
-	}
-
 	return 0;
 }
 
 static __init void destroy_bpf_tests(void)
 {
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(tests); i++) {
-		if (tests[i].fill_helper)
-			kfree(tests[i].u.ptr.insns);
-	}
 }
 
 static bool exclude_test(int test_id)
@@ -8956,7 +14470,19 @@ static __init int test_bpf(void)
 
 		pr_info("#%d %s ", i, tests[i].descr);
 
+		if (tests[i].fill_helper &&
+		    tests[i].fill_helper(&tests[i]) < 0) {
+			pr_cont("FAIL to prog_fill\n");
+			continue;
+		}
+
 		fp = generate_filter(i, &err);
+
+		if (tests[i].fill_helper) {
+			kfree(tests[i].u.ptr.insns);
+			tests[i].u.ptr.insns = NULL;
+		}
+
 		if (fp == NULL) {
 			if (err == 0) {
 				pass_cnt++;
@@ -8993,10 +14519,15 @@ static __init int test_bpf(void)
 struct tail_call_test {
 	const char *descr;
 	struct bpf_insn insns[MAX_INSNS];
+	int flags;
 	int result;
 	int stack_depth;
 };
 
+/* Flags that can be passed to tail call test cases */
+#define FLAG_NEED_STATE		BIT(0)
+#define FLAG_RESULT_IN_STATE	BIT(1)
+
 /*
  * Magic marker used in test snippets for tail calls below.
  * BPF_LD/MOV to R2 and R2 with this immediate value is replaced
@@ -9016,6 +14547,30 @@ struct tail_call_test {
 		     offset, TAIL_CALL_MARKER),	       \
 	BPF_JMP_IMM(BPF_TAIL_CALL, 0, 0, 0)
 
+/*
+ * A test function to be called from a BPF program, clobbering a lot of
+ * CPU registers in the process. A JITed BPF program calling this function
+ * must save and restore any caller-saved registers it uses for internal
+ * state, for example the current tail call count.
+ */
+BPF_CALL_1(bpf_test_func, u64, arg)
+{
+	char buf[64];
+	long a = 0;
+	long b = 1;
+	long c = 2;
+	long d = 3;
+	long e = 4;
+	long f = 5;
+	long g = 6;
+	long h = 7;
+
+	return snprintf(buf, sizeof(buf),
+			"%ld %lu %lx %ld %lu %lx %ld %lu %x",
+			a, b, c, d, e, f, g, h, (int)arg);
+}
+#define BPF_FUNC_test_func __BPF_FUNC_MAX_ID
+
 /*
  * Tail call tests. Each test case may call any other test in the table,
  * including itself, specified as a relative index offset from the calling
@@ -9066,32 +14621,60 @@ static struct tail_call_test tail_call_tests[] = {
 	{
 		"Tail call error path, max count reached",
 		.insns = {
-			BPF_ALU64_IMM(BPF_ADD, R1, 1),
-			BPF_ALU64_REG(BPF_MOV, R0, R1),
+			BPF_LDX_MEM(BPF_W, R2, R1, 0),
+			BPF_ALU64_IMM(BPF_ADD, R2, 1),
+			BPF_STX_MEM(BPF_W, R1, R2, 0),
+			TAIL_CALL(0),
+			BPF_EXIT_INSN(),
+		},
+		.flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE,
+		.result = (MAX_TAIL_CALL_CNT + 1 + 1) * MAX_TESTRUNS,
+	},
+	{
+		"Tail call count preserved across function calls",
+		.insns = {
+			BPF_LDX_MEM(BPF_W, R2, R1, 0),
+			BPF_ALU64_IMM(BPF_ADD, R2, 1),
+			BPF_STX_MEM(BPF_W, R1, R2, 0),
+			BPF_STX_MEM(BPF_DW, R10, R1, -8),
+			BPF_CALL_REL(BPF_FUNC_get_numa_node_id),
+			BPF_CALL_REL(BPF_FUNC_ktime_get_ns),
+			BPF_CALL_REL(BPF_FUNC_ktime_get_boot_ns),
+			BPF_CALL_REL(BPF_FUNC_ktime_get_coarse_ns),
+			BPF_CALL_REL(BPF_FUNC_jiffies64),
+			BPF_CALL_REL(BPF_FUNC_test_func),
+			BPF_LDX_MEM(BPF_DW, R1, R10, -8),
+			BPF_ALU32_REG(BPF_MOV, R0, R1),
 			TAIL_CALL(0),
 			BPF_EXIT_INSN(),
 		},
-		.result = MAX_TAIL_CALL_CNT + 1,
+		.stack_depth = 8,
+		.flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE,
+		.result = (MAX_TAIL_CALL_CNT + 1 + 1) * MAX_TESTRUNS,
 	},
 	{
 		"Tail call error path, NULL target",
 		.insns = {
-			BPF_ALU64_IMM(BPF_MOV, R0, -1),
+			BPF_LDX_MEM(BPF_W, R2, R1, 0),
+			BPF_ALU64_IMM(BPF_ADD, R2, 1),
+			BPF_STX_MEM(BPF_W, R1, R2, 0),
 			TAIL_CALL(TAIL_CALL_NULL),
-			BPF_ALU64_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
 		},
-		.result = 1,
+		.flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE,
+		.result = MAX_TESTRUNS,
 	},
 	{
 		"Tail call error path, index out of range",
 		.insns = {
-			BPF_ALU64_IMM(BPF_MOV, R0, -1),
+			BPF_LDX_MEM(BPF_W, R2, R1, 0),
+			BPF_ALU64_IMM(BPF_ADD, R2, 1),
+			BPF_STX_MEM(BPF_W, R1, R2, 0),
 			TAIL_CALL(TAIL_CALL_INVALID),
-			BPF_ALU64_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
 		},
-		.result = 1,
+		.flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE,
+		.result = MAX_TESTRUNS,
 	},
 };
 
@@ -9147,17 +14730,19 @@ static __init int prepare_tail_call_tests(struct bpf_array **pprogs)
 		/* Relocate runtime tail call offsets and addresses */
 		for (i = 0; i < len; i++) {
 			struct bpf_insn *insn = &fp->insnsi[i];
-
-			if (insn->imm != TAIL_CALL_MARKER)
-				continue;
+			long addr = 0;
 
 			switch (insn->code) {
 			case BPF_LD | BPF_DW | BPF_IMM:
+				if (insn->imm != TAIL_CALL_MARKER)
+					break;
 				insn[0].imm = (u32)(long)progs;
 				insn[1].imm = ((u64)(long)progs) >> 32;
 				break;
 
 			case BPF_ALU | BPF_MOV | BPF_K:
+				if (insn->imm != TAIL_CALL_MARKER)
+					break;
 				if (insn->off == TAIL_CALL_NULL)
 					insn->imm = ntests;
 				else if (insn->off == TAIL_CALL_INVALID)
@@ -9165,6 +14750,38 @@ static __init int prepare_tail_call_tests(struct bpf_array **pprogs)
 				else
 					insn->imm = which + insn->off;
 				insn->off = 0;
+				break;
+
+			case BPF_JMP | BPF_CALL:
+				if (insn->src_reg != BPF_PSEUDO_CALL)
+					break;
+				switch (insn->imm) {
+				case BPF_FUNC_get_numa_node_id:
+					addr = (long)&numa_node_id;
+					break;
+				case BPF_FUNC_ktime_get_ns:
+					addr = (long)&ktime_get_ns;
+					break;
+				case BPF_FUNC_ktime_get_boot_ns:
+					addr = (long)&ktime_get_boot_fast_ns;
+					break;
+				case BPF_FUNC_ktime_get_coarse_ns:
+					addr = (long)&ktime_get_coarse_ns;
+					break;
+				case BPF_FUNC_jiffies64:
+					addr = (long)&get_jiffies_64;
+					break;
+				case BPF_FUNC_test_func:
+					addr = (long)&bpf_test_func;
+					break;
+				default:
+					err = -EFAULT;
+					goto out_err;
+				}
+				*insn = BPF_EMIT_CALL(addr);
+				if ((long)__bpf_call_base + insn->imm != addr)
+					*insn = BPF_JMP_A(0); /* Skip: NOP */
+				break;
 			}
 		}
 
@@ -9197,6 +14814,8 @@ static __init int test_tail_calls(struct bpf_array *progs)
 	for (i = 0; i < ARRAY_SIZE(tail_call_tests); i++) {
 		struct tail_call_test *test = &tail_call_tests[i];
 		struct bpf_prog *fp = progs->ptrs[i];
+		int *data = NULL;
+		int state = 0;
 		u64 duration;
 		int ret;
 
@@ -9213,7 +14832,11 @@ static __init int test_tail_calls(struct bpf_array *progs)
 		if (fp->jited)
 			jit_cnt++;
 
-		ret = __run_one(fp, NULL, MAX_TESTRUNS, &duration);
+		if (test->flags & FLAG_NEED_STATE)
+			data = &state;
+		ret = __run_one(fp, data, MAX_TESTRUNS, &duration);
+		if (test->flags & FLAG_RESULT_IN_STATE)
+			ret = state;
 		if (ret == test->result) {
 			pr_cont("%lld PASS", duration);
 			pass_cnt++;
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index bf1cb629f61891575235c9e860cf2a46245cb756..072f0c16c779b27c11a08f65e5735ef33bd5703d 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -807,7 +807,8 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
 	if (ret)
 		goto free_data;
 
-	bpf_prog_change_xdp(NULL, prog);
+	if (repeat > 1)
+		bpf_prog_change_xdp(NULL, prog);
 	ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
 	/* We convert the xdp_buff back to an xdp_md before checking the return
 	 * code so the reference count of any held netdevice will be decremented
@@ -828,7 +829,8 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
 				     sizeof(struct xdp_md));
 
 out:
-	bpf_prog_change_xdp(prog, NULL);
+	if (repeat > 1)
+		bpf_prog_change_xdp(prog, NULL);
 free_data:
 	kfree(data);
 free_ctx:
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index d6b500dc420847f107cbd967c15bc3b0107d0b53..f16074eb53c72a7040421d23028d5762e0c5658d 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -134,21 +134,6 @@ int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
 	return 0;
 }
 
-void xp_release(struct xdp_buff_xsk *xskb)
-{
-	xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb;
-}
-
-static u64 xp_get_handle(struct xdp_buff_xsk *xskb)
-{
-	u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start;
-
-	offset += xskb->pool->headroom;
-	if (!xskb->pool->unaligned)
-		return xskb->orig_addr + offset;
-	return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
-}
-
 static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
 {
 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index 8de01aaac4a084bde97bfa1ed6f82a6b043856e5..90c4e1e819d38b6bced115d1a2ccaef5c2b1530b 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -44,12 +44,13 @@ void xp_destroy(struct xsk_buff_pool *pool)
 struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
 						struct xdp_umem *umem)
 {
+	bool unaligned = umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG;
 	struct xsk_buff_pool *pool;
 	struct xdp_buff_xsk *xskb;
-	u32 i;
+	u32 i, entries;
 
-	pool = kvzalloc(struct_size(pool, free_heads, umem->chunks),
-			GFP_KERNEL);
+	entries = unaligned ? umem->chunks : 0;
+	pool = kvzalloc(struct_size(pool, free_heads, entries),	GFP_KERNEL);
 	if (!pool)
 		goto out;
 
@@ -63,7 +64,8 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
 	pool->free_heads_cnt = umem->chunks;
 	pool->headroom = umem->headroom;
 	pool->chunk_size = umem->chunk_size;
-	pool->unaligned = umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG;
+	pool->chunk_shift = ffs(umem->chunk_size) - 1;
+	pool->unaligned = unaligned;
 	pool->frame_len = umem->chunk_size - umem->headroom -
 		XDP_PACKET_HEADROOM;
 	pool->umem = umem;
@@ -81,7 +83,10 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
 		xskb = &pool->heads[i];
 		xskb->pool = pool;
 		xskb->xdp.frame_sz = umem->chunk_size - umem->headroom;
-		pool->free_heads[i] = xskb;
+		if (pool->unaligned)
+			pool->free_heads[i] = xskb;
+		else
+			xp_init_xskb_addr(xskb, pool, i * pool->chunk_size);
 	}
 
 	return pool;
@@ -406,6 +411,12 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
 
 	if (pool->unaligned)
 		xp_check_dma_contiguity(dma_map);
+	else
+		for (i = 0; i < pool->heads_cnt; i++) {
+			struct xdp_buff_xsk *xskb = &pool->heads[i];
+
+			xp_init_xskb_dma(xskb, pool, dma_map->dma_pages, xskb->orig_addr);
+		}
 
 	err = xp_init_dma_info(pool, dma_map);
 	if (err) {
@@ -448,12 +459,9 @@ static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool)
 	if (pool->free_heads_cnt == 0)
 		return NULL;
 
-	xskb = pool->free_heads[--pool->free_heads_cnt];
-
 	for (;;) {
 		if (!xskq_cons_peek_addr_unchecked(pool->fq, &addr)) {
 			pool->fq->queue_empty_descs++;
-			xp_release(xskb);
 			return NULL;
 		}
 
@@ -466,17 +474,17 @@ static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool)
 		}
 		break;
 	}
-	xskq_cons_release(pool->fq);
 
-	xskb->orig_addr = addr;
-	xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom;
-	if (pool->dma_pages_cnt) {
-		xskb->frame_dma = (pool->dma_pages[addr >> PAGE_SHIFT] &
-				   ~XSK_NEXT_PG_CONTIG_MASK) +
-				  (addr & ~PAGE_MASK);
-		xskb->dma = xskb->frame_dma + pool->headroom +
-			    XDP_PACKET_HEADROOM;
+	if (pool->unaligned) {
+		xskb = pool->free_heads[--pool->free_heads_cnt];
+		xp_init_xskb_addr(xskb, pool, addr);
+		if (pool->dma_pages_cnt)
+			xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr);
+	} else {
+		xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)];
 	}
+
+	xskq_cons_release(pool->fq);
 	return xskb;
 }
 
@@ -507,6 +515,96 @@ struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
 }
 EXPORT_SYMBOL(xp_alloc);
 
+static u32 xp_alloc_new_from_fq(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
+{
+	u32 i, cached_cons, nb_entries;
+
+	if (max > pool->free_heads_cnt)
+		max = pool->free_heads_cnt;
+	max = xskq_cons_nb_entries(pool->fq, max);
+
+	cached_cons = pool->fq->cached_cons;
+	nb_entries = max;
+	i = max;
+	while (i--) {
+		struct xdp_buff_xsk *xskb;
+		u64 addr;
+		bool ok;
+
+		__xskq_cons_read_addr_unchecked(pool->fq, cached_cons++, &addr);
+
+		ok = pool->unaligned ? xp_check_unaligned(pool, &addr) :
+			xp_check_aligned(pool, &addr);
+		if (unlikely(!ok)) {
+			pool->fq->invalid_descs++;
+			nb_entries--;
+			continue;
+		}
+
+		if (pool->unaligned) {
+			xskb = pool->free_heads[--pool->free_heads_cnt];
+			xp_init_xskb_addr(xskb, pool, addr);
+			if (pool->dma_pages_cnt)
+				xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr);
+		} else {
+			xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)];
+		}
+
+		*xdp = &xskb->xdp;
+		xdp++;
+	}
+
+	xskq_cons_release_n(pool->fq, max);
+	return nb_entries;
+}
+
+static u32 xp_alloc_reused(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 nb_entries)
+{
+	struct xdp_buff_xsk *xskb;
+	u32 i;
+
+	nb_entries = min_t(u32, nb_entries, pool->free_list_cnt);
+
+	i = nb_entries;
+	while (i--) {
+		xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk, free_list_node);
+		list_del(&xskb->free_list_node);
+
+		*xdp = &xskb->xdp;
+		xdp++;
+	}
+	pool->free_list_cnt -= nb_entries;
+
+	return nb_entries;
+}
+
+u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
+{
+	u32 nb_entries1 = 0, nb_entries2;
+
+	if (unlikely(pool->dma_need_sync)) {
+		/* Slow path */
+		*xdp = xp_alloc(pool);
+		return !!*xdp;
+	}
+
+	if (unlikely(pool->free_list_cnt)) {
+		nb_entries1 = xp_alloc_reused(pool, xdp, max);
+		if (nb_entries1 == max)
+			return nb_entries1;
+
+		max -= nb_entries1;
+		xdp += nb_entries1;
+	}
+
+	nb_entries2 = xp_alloc_new_from_fq(pool, xdp, max);
+	if (!nb_entries2)
+		pool->fq->queue_empty_descs++;
+
+	return nb_entries1 + nb_entries2;
+}
+EXPORT_SYMBOL(xp_alloc_batch);
+
 bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count)
 {
 	if (pool->free_list_cnt >= count)
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index 9ae13cccfb28d2172c599e28cde0fb79bf02cf30..e9aa2c23635618430267fdc4a3115a7ac845fcde 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -111,14 +111,18 @@ struct xsk_queue {
 
 /* Functions that read and validate content from consumer rings. */
 
-static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
+static inline void __xskq_cons_read_addr_unchecked(struct xsk_queue *q, u32 cached_cons, u64 *addr)
 {
 	struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
+	u32 idx = cached_cons & q->ring_mask;
 
-	if (q->cached_cons != q->cached_prod) {
-		u32 idx = q->cached_cons & q->ring_mask;
+	*addr = ring->desc[idx];
+}
 
-		*addr = ring->desc[idx];
+static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
+{
+	if (q->cached_cons != q->cached_prod) {
+		__xskq_cons_read_addr_unchecked(q, q->cached_cons, addr);
 		return true;
 	}
 
diff --git a/samples/bpf/xdp_router_ipv4_user.c b/samples/bpf/xdp_router_ipv4_user.c
index b5f03cb17a3c043ff45d64e6a552a801028f51e5..cfaf7e50e4316981595a2b5333bbda04498c45ea 100644
--- a/samples/bpf/xdp_router_ipv4_user.c
+++ b/samples/bpf/xdp_router_ipv4_user.c
@@ -155,7 +155,7 @@ static void read_route(struct nlmsghdr *nh, int nll)
 		printf("%d\n", nh->nlmsg_type);
 
 	memset(&route, 0, sizeof(route));
-	printf("Destination\t\tGateway\t\tGenmask\t\tMetric\t\tIface\n");
+	printf("Destination     Gateway         Genmask         Metric Iface\n");
 	for (; NLMSG_OK(nh, nll); nh = NLMSG_NEXT(nh, nll)) {
 		rt_msg = (struct rtmsg *)NLMSG_DATA(nh);
 		rtm_family = rt_msg->rtm_family;
@@ -207,6 +207,7 @@ static void read_route(struct nlmsghdr *nh, int nll)
 				int metric;
 				__be32 gw;
 			} *prefix_value;
+			struct in_addr dst_addr, gw_addr, mask_addr;
 
 			prefix_key = alloca(sizeof(*prefix_key) + 3);
 			prefix_value = alloca(sizeof(*prefix_value));
@@ -234,14 +235,17 @@ static void read_route(struct nlmsghdr *nh, int nll)
 			for (i = 0; i < 4; i++)
 				prefix_key->data[i] = (route.dst >> i * 8) & 0xff;
 
-			printf("%3d.%d.%d.%d\t\t%3x\t\t%d\t\t%d\t\t%s\n",
-			       (int)prefix_key->data[0],
-			       (int)prefix_key->data[1],
-			       (int)prefix_key->data[2],
-			       (int)prefix_key->data[3],
-			       route.gw, route.dst_len,
+			dst_addr.s_addr = route.dst;
+			printf("%-16s", inet_ntoa(dst_addr));
+
+			gw_addr.s_addr = route.gw;
+			printf("%-16s", inet_ntoa(gw_addr));
+
+			mask_addr.s_addr = htonl(~(0xffffffffU >> route.dst_len));
+			printf("%-16s%-7d%s\n", inet_ntoa(mask_addr),
 			       route.metric,
 			       route.iface_name);
+
 			if (bpf_map_lookup_elem(lpm_map_fd, prefix_key,
 						prefix_value) < 0) {
 				for (i = 0; i < 4; i++)
@@ -393,8 +397,12 @@ static void read_arp(struct nlmsghdr *nh, int nll)
 
 	if (nh->nlmsg_type == RTM_GETNEIGH)
 		printf("READING arp entry\n");
-	printf("Address\tHwAddress\n");
+	printf("Address         HwAddress\n");
 	for (; NLMSG_OK(nh, nll); nh = NLMSG_NEXT(nh, nll)) {
+		struct in_addr dst_addr;
+		char mac_str[18];
+		int len = 0, i;
+
 		rt_msg = (struct ndmsg *)NLMSG_DATA(nh);
 		rt_attr = (struct rtattr *)RTM_RTA(rt_msg);
 		ndm_family = rt_msg->ndm_family;
@@ -415,7 +423,14 @@ static void read_arp(struct nlmsghdr *nh, int nll)
 		}
 		arp_entry.dst = atoi(dsts);
 		arp_entry.mac = atol(mac);
-		printf("%x\t\t%llx\n", arp_entry.dst, arp_entry.mac);
+
+		dst_addr.s_addr = arp_entry.dst;
+		for (i = 0; i < 6; i++)
+			len += snprintf(mac_str + len, 18 - len, "%02llx%s",
+					((arp_entry.mac >> i * 8) & 0xff),
+					i < 5 ? ":" : "");
+		printf("%-16s%s\n", inet_ntoa(dst_addr), mac_str);
+
 		if (ndm_family == AF_INET) {
 			if (bpf_map_lookup_elem(exact_match_map_fd,
 						&arp_entry.dst,
@@ -672,7 +687,7 @@ int main(int ac, char **argv)
 	if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd))
 		return 1;
 
-	printf("\n**************loading bpf file*********************\n\n\n");
+	printf("\n******************loading bpf file*********************\n");
 	if (!prog_fd) {
 		printf("bpf_prog_load_xattr: %s\n", strerror(errno));
 		return 1;
@@ -722,9 +737,9 @@ int main(int ac, char **argv)
 	signal(SIGINT, int_exit);
 	signal(SIGTERM, int_exit);
 
-	printf("*******************ROUTE TABLE*************************\n\n\n");
+	printf("\n*******************ROUTE TABLE*************************\n");
 	get_route_table(AF_INET);
-	printf("*******************ARP TABLE***************************\n\n\n");
+	printf("\n*******************ARP TABLE***************************\n");
 	get_arp_table(AF_INET);
 	if (monitor_route() < 0) {
 		printf("Error in receiving route update");
diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c
index 7f36385aa9e2e3773b0c8c2bb092bdd9bb84d097..ade44577688edb25709bfc106f20d1d2d38d9f1a 100644
--- a/tools/bpf/bpftool/feature.c
+++ b/tools/bpf/bpftool/feature.c
@@ -624,6 +624,7 @@ probe_helpers_for_progtype(enum bpf_prog_type prog_type, bool supported_type,
 		 */
 		switch (id) {
 		case BPF_FUNC_trace_printk:
+		case BPF_FUNC_trace_vprintk:
 		case BPF_FUNC_probe_write_user:
 			if (!full_mode)
 				continue;
diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c
index e3ec47a6a6121647d18218fc02b389d158d0f937..cc835859465b6246ccdd3c3e9dc84cc74f654a9b 100644
--- a/tools/bpf/bpftool/gen.c
+++ b/tools/bpf/bpftool/gen.c
@@ -803,7 +803,10 @@ static int do_skeleton(int argc, char **argv)
 			}						    \n\
 									    \n\
 			err = %1$s__create_skeleton(obj);		    \n\
-			err = err ?: bpf_object__open_skeleton(obj->skeleton, opts);\n\
+			if (err)					    \n\
+				goto err_out;				    \n\
+									    \n\
+			err = bpf_object__open_skeleton(obj->skeleton, opts);\n\
 			if (err)					    \n\
 				goto err_out;				    \n\
 									    \n\
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 3e9785f1064a23f8df088d0818fd06af722911cd..6fc59d61937ada177f3044583995ab58fa5bccdc 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -4046,7 +4046,7 @@ union bpf_attr {
  * 		arguments. The *data* are a **u64** array and corresponding format string
  * 		values are stored in the array. For strings and pointers where pointees
  * 		are accessed, only the pointer values are stored in the *data* array.
- * 		The *data_len* is the size of *data* in bytes.
+ * 		The *data_len* is the size of *data* in bytes - must be a multiple of 8.
  *
  *		Formats **%s**, **%p{i,I}{4,6}** requires to read kernel memory.
  *		Reading kernel memory may fail due to either invalid address or
@@ -4751,7 +4751,8 @@ union bpf_attr {
  *		Each format specifier in **fmt** corresponds to one u64 element
  *		in the **data** array. For strings and pointers where pointees
  *		are accessed, only the pointer values are stored in the *data*
- *		array. The *data_len* is the size of *data* in bytes.
+ *		array. The *data_len* is the size of *data* in bytes - must be
+ *		a multiple of 8.
  *
  *		Formats **%s** and **%p{i,I}{4,6}** require to read kernel
  *		memory. Reading kernel memory may fail due to either invalid
@@ -4898,6 +4899,16 @@ union bpf_attr {
  *		**-EINVAL** if *flags* is not zero.
  *
  *		**-ENOENT** if architecture does not support branch records.
+ *
+ * long bpf_trace_vprintk(const char *fmt, u32 fmt_size, const void *data, u32 data_len)
+ *	Description
+ *		Behaves like **bpf_trace_printk**\ () helper, but takes an array of u64
+ *		to format and can handle more format args as a result.
+ *
+ *		Arguments are to be used as in **bpf_seq_printf**\ () helper.
+ *	Return
+ *		The number of bytes written to the buffer, or a negative error
+ *		in case of failure.
  */
 #define __BPF_FUNC_MAPPER(FN)		\
 	FN(unspec),			\
@@ -5077,6 +5088,7 @@ union bpf_attr {
 	FN(get_attach_cookie),		\
 	FN(task_pt_regs),		\
 	FN(get_branch_snapshot),	\
+	FN(trace_vprintk),		\
 	/* */
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h
index b9987c3efa3c430c0bab72921c35ea5646a165fc..963b1060d9441f5600bbb13ceaccedaf49f18c8e 100644
--- a/tools/lib/bpf/bpf_helpers.h
+++ b/tools/lib/bpf/bpf_helpers.h
@@ -14,14 +14,6 @@
 #define __type(name, val) typeof(val) *name
 #define __array(name, val) typeof(val) *name[]
 
-/* Helper macro to print out debug messages */
-#define bpf_printk(fmt, ...)				\
-({							\
-	char ____fmt[] = fmt;				\
-	bpf_trace_printk(____fmt, sizeof(____fmt),	\
-			 ##__VA_ARGS__);		\
-})
-
 /*
  * Helper macro to place programs, maps, license in
  * different sections in elf_bpf file. Section names
@@ -224,4 +216,47 @@ enum libbpf_tristate {
 		     ___param, sizeof(___param));		\
 })
 
+#ifdef BPF_NO_GLOBAL_DATA
+#define BPF_PRINTK_FMT_MOD
+#else
+#define BPF_PRINTK_FMT_MOD static const
+#endif
+
+#define __bpf_printk(fmt, ...)				\
+({							\
+	BPF_PRINTK_FMT_MOD char ____fmt[] = fmt;	\
+	bpf_trace_printk(____fmt, sizeof(____fmt),	\
+			 ##__VA_ARGS__);		\
+})
+
+/*
+ * __bpf_vprintk wraps the bpf_trace_vprintk helper with variadic arguments
+ * instead of an array of u64.
+ */
+#define __bpf_vprintk(fmt, args...)				\
+({								\
+	static const char ___fmt[] = fmt;			\
+	unsigned long long ___param[___bpf_narg(args)];		\
+								\
+	_Pragma("GCC diagnostic push")				\
+	_Pragma("GCC diagnostic ignored \"-Wint-conversion\"")	\
+	___bpf_fill(___param, args);				\
+	_Pragma("GCC diagnostic pop")				\
+								\
+	bpf_trace_vprintk(___fmt, sizeof(___fmt),		\
+			  ___param, sizeof(___param));		\
+})
+
+/* Use __bpf_printk when bpf_printk call has 3 or fewer fmt args
+ * Otherwise use __bpf_vprintk
+ */
+#define ___bpf_pick_printk(...) \
+	___bpf_nth(_, ##__VA_ARGS__, __bpf_vprintk, __bpf_vprintk, __bpf_vprintk,	\
+		   __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, __bpf_vprintk,		\
+		   __bpf_vprintk, __bpf_vprintk, __bpf_printk /*3*/, __bpf_printk /*2*/,\
+		   __bpf_printk /*1*/, __bpf_printk /*0*/)
+
+/* Helper macro to print out debug messages */
+#define bpf_printk(fmt, args...) ___bpf_pick_printk(args)(fmt, ##args)
+
 #endif
diff --git a/tools/lib/bpf/gen_loader.c b/tools/lib/bpf/gen_loader.c
index 8df718a6b142dfd8cda3909944aeab5436e60f8d..80087b13877fb084f136e202aa5654f97887fa4c 100644
--- a/tools/lib/bpf/gen_loader.c
+++ b/tools/lib/bpf/gen_loader.c
@@ -5,6 +5,7 @@
 #include <string.h>
 #include <errno.h>
 #include <linux/filter.h>
+#include <sys/param.h>
 #include "btf.h"
 #include "bpf.h"
 #include "libbpf.h"
@@ -135,13 +136,17 @@ void bpf_gen__init(struct bpf_gen *gen, int log_level)
 
 static int add_data(struct bpf_gen *gen, const void *data, __u32 size)
 {
+	__u32 size8 = roundup(size, 8);
+	__u64 zero = 0;
 	void *prev;
 
-	if (realloc_data_buf(gen, size))
+	if (realloc_data_buf(gen, size8))
 		return 0;
 	prev = gen->data_cur;
 	memcpy(gen->data_cur, data, size);
 	gen->data_cur += size;
+	memcpy(gen->data_cur, &zero, size8 - size);
+	gen->data_cur += size8 - size;
 	return prev - gen->data_start;
 }
 
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index da65a1666a5e97280bd727640cc5518638d103dc..e23f1b6b9402be3dfaafab357c73dbe1fefd21a8 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -220,17 +220,40 @@ struct reloc_desc {
 
 struct bpf_sec_def;
 
-typedef struct bpf_link *(*attach_fn_t)(const struct bpf_program *prog);
+typedef int (*init_fn_t)(struct bpf_program *prog, long cookie);
+typedef int (*preload_fn_t)(struct bpf_program *prog, struct bpf_prog_load_params *attr, long cookie);
+typedef struct bpf_link *(*attach_fn_t)(const struct bpf_program *prog, long cookie);
+
+/* stored as sec_def->cookie for all libbpf-supported SEC()s */
+enum sec_def_flags {
+	SEC_NONE = 0,
+	/* expected_attach_type is optional, if kernel doesn't support that */
+	SEC_EXP_ATTACH_OPT = 1,
+	/* legacy, only used by libbpf_get_type_names() and
+	 * libbpf_attach_type_by_name(), not used by libbpf itself at all.
+	 * This used to be associated with cgroup (and few other) BPF programs
+	 * that were attachable through BPF_PROG_ATTACH command. Pretty
+	 * meaningless nowadays, though.
+	 */
+	SEC_ATTACHABLE = 2,
+	SEC_ATTACHABLE_OPT = SEC_ATTACHABLE | SEC_EXP_ATTACH_OPT,
+	/* attachment target is specified through BTF ID in either kernel or
+	 * other BPF program's BTF object */
+	SEC_ATTACH_BTF = 4,
+	/* BPF program type allows sleeping/blocking in kernel */
+	SEC_SLEEPABLE = 8,
+	/* allow non-strict prefix matching */
+	SEC_SLOPPY_PFX = 16,
+};
 
 struct bpf_sec_def {
 	const char *sec;
-	size_t len;
 	enum bpf_prog_type prog_type;
 	enum bpf_attach_type expected_attach_type;
-	bool is_exp_attach_type_optional;
-	bool is_attachable;
-	bool is_attach_btf;
-	bool is_sleepable;
+	long cookie;
+
+	init_fn_t init_fn;
+	preload_fn_t preload_fn;
 	attach_fn_t attach_fn;
 };
 
@@ -1665,7 +1688,7 @@ static int bpf_object__process_kconfig_line(struct bpf_object *obj,
 	void *ext_val;
 	__u64 num;
 
-	if (strncmp(buf, "CONFIG_", 7))
+	if (!str_has_pfx(buf, "CONFIG_"))
 		return 0;
 
 	sep = strchr(buf, '=');
@@ -1845,6 +1868,8 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
 			continue;
 		if (sym.st_shndx != obj->efile.maps_shndx)
 			continue;
+		if (GELF_ST_TYPE(sym.st_info) == STT_SECTION)
+			continue;
 		nr_maps++;
 	}
 	/* Assume equally sized map definitions */
@@ -1869,6 +1894,8 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
 			continue;
 		if (sym.st_shndx != obj->efile.maps_shndx)
 			continue;
+		if (GELF_ST_TYPE(sym.st_info) == STT_SECTION)
+			continue;
 
 		map = bpf_object__add_map(obj);
 		if (IS_ERR(map))
@@ -1881,8 +1908,7 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
 			return -LIBBPF_ERRNO__FORMAT;
 		}
 
-		if (GELF_ST_TYPE(sym.st_info) == STT_SECTION
-		    || GELF_ST_BIND(sym.st_info) == STB_LOCAL) {
+		if (GELF_ST_BIND(sym.st_info) == STB_LOCAL) {
 			pr_warn("map '%s' (legacy): static maps are not supported\n", map_name);
 			return -ENOTSUP;
 		}
@@ -2913,7 +2939,7 @@ static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn)
 static bool is_sec_name_dwarf(const char *name)
 {
 	/* approximation, but the actual list is too long */
-	return strncmp(name, ".debug_", sizeof(".debug_") - 1) == 0;
+	return str_has_pfx(name, ".debug_");
 }
 
 static bool ignore_elf_section(GElf_Shdr *hdr, const char *name)
@@ -2935,7 +2961,7 @@ static bool ignore_elf_section(GElf_Shdr *hdr, const char *name)
 	if (is_sec_name_dwarf(name))
 		return true;
 
-	if (strncmp(name, ".rel", sizeof(".rel") - 1) == 0) {
+	if (str_has_pfx(name, ".rel")) {
 		name += sizeof(".rel") - 1;
 		/* DWARF section relocations */
 		if (is_sec_name_dwarf(name))
@@ -4643,6 +4669,30 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
 			create_attr.inner_map_fd = map->inner_map_fd;
 	}
 
+	switch (def->type) {
+	case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
+	case BPF_MAP_TYPE_CGROUP_ARRAY:
+	case BPF_MAP_TYPE_STACK_TRACE:
+	case BPF_MAP_TYPE_ARRAY_OF_MAPS:
+	case BPF_MAP_TYPE_HASH_OF_MAPS:
+	case BPF_MAP_TYPE_DEVMAP:
+	case BPF_MAP_TYPE_DEVMAP_HASH:
+	case BPF_MAP_TYPE_CPUMAP:
+	case BPF_MAP_TYPE_XSKMAP:
+	case BPF_MAP_TYPE_SOCKMAP:
+	case BPF_MAP_TYPE_SOCKHASH:
+	case BPF_MAP_TYPE_QUEUE:
+	case BPF_MAP_TYPE_STACK:
+	case BPF_MAP_TYPE_RINGBUF:
+		create_attr.btf_fd = 0;
+		create_attr.btf_key_type_id = 0;
+		create_attr.btf_value_type_id = 0;
+		map->btf_key_type_id = 0;
+		map->btf_value_type_id = 0;
+	default:
+		break;
+	}
+
 	if (obj->gen_loader) {
 		bpf_gen__map_create(obj->gen_loader, &create_attr, is_inner ? -1 : map - obj->maps);
 		/* Pretend to have valid FD to pass various fd >= 0 checks.
@@ -6094,6 +6144,48 @@ static int bpf_object__sanitize_prog(struct bpf_object *obj, struct bpf_program
 	return 0;
 }
 
+static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name,
+				     int *btf_obj_fd, int *btf_type_id);
+
+/* this is called as prog->sec_def->preload_fn for libbpf-supported sec_defs */
+static int libbpf_preload_prog(struct bpf_program *prog,
+			       struct bpf_prog_load_params *attr, long cookie)
+{
+	enum sec_def_flags def = cookie;
+
+	/* old kernels might not support specifying expected_attach_type */
+	if ((def & SEC_EXP_ATTACH_OPT) && !kernel_supports(prog->obj, FEAT_EXP_ATTACH_TYPE))
+		attr->expected_attach_type = 0;
+
+	if (def & SEC_SLEEPABLE)
+		attr->prog_flags |= BPF_F_SLEEPABLE;
+
+	if ((prog->type == BPF_PROG_TYPE_TRACING ||
+	     prog->type == BPF_PROG_TYPE_LSM ||
+	     prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) {
+		int btf_obj_fd = 0, btf_type_id = 0, err;
+		const char *attach_name;
+
+		attach_name = strchr(prog->sec_name, '/') + 1;
+		err = libbpf_find_attach_btf_id(prog, attach_name, &btf_obj_fd, &btf_type_id);
+		if (err)
+			return err;
+
+		/* cache resolved BTF FD and BTF type ID in the prog */
+		prog->attach_btf_obj_fd = btf_obj_fd;
+		prog->attach_btf_id = btf_type_id;
+
+		/* but by now libbpf common logic is not utilizing
+		 * prog->atach_btf_obj_fd/prog->attach_btf_id anymore because
+		 * this callback is called after attrs were populated by
+		 * libbpf, so this callback has to update attr explicitly here
+		 */
+		attr->attach_btf_obj_fd = btf_obj_fd;
+		attr->attach_btf_id = btf_type_id;
+	}
+	return 0;
+}
+
 static int
 load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
 	     char *license, __u32 kern_version, int *pfd)
@@ -6102,7 +6194,7 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
 	char *cp, errmsg[STRERR_BUFSIZE];
 	size_t log_buf_size = 0;
 	char *log_buf = NULL;
-	int btf_fd, ret;
+	int btf_fd, ret, err;
 
 	if (prog->type == BPF_PROG_TYPE_UNSPEC) {
 		/*
@@ -6118,22 +6210,15 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
 		return -EINVAL;
 
 	load_attr.prog_type = prog->type;
-	/* old kernels might not support specifying expected_attach_type */
-	if (!kernel_supports(prog->obj, FEAT_EXP_ATTACH_TYPE) && prog->sec_def &&
-	    prog->sec_def->is_exp_attach_type_optional)
-		load_attr.expected_attach_type = 0;
-	else
-		load_attr.expected_attach_type = prog->expected_attach_type;
+	load_attr.expected_attach_type = prog->expected_attach_type;
 	if (kernel_supports(prog->obj, FEAT_PROG_NAME))
 		load_attr.name = prog->name;
 	load_attr.insns = insns;
 	load_attr.insn_cnt = insns_cnt;
 	load_attr.license = license;
 	load_attr.attach_btf_id = prog->attach_btf_id;
-	if (prog->attach_prog_fd)
-		load_attr.attach_prog_fd = prog->attach_prog_fd;
-	else
-		load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd;
+	load_attr.attach_prog_fd = prog->attach_prog_fd;
+	load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd;
 	load_attr.attach_btf_id = prog->attach_btf_id;
 	load_attr.kern_version = kern_version;
 	load_attr.prog_ifindex = prog->prog_ifindex;
@@ -6152,6 +6237,16 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
 	load_attr.log_level = prog->log_level;
 	load_attr.prog_flags = prog->prog_flags;
 
+	/* adjust load_attr if sec_def provides custom preload callback */
+	if (prog->sec_def && prog->sec_def->preload_fn) {
+		err = prog->sec_def->preload_fn(prog, &load_attr, prog->sec_def->cookie);
+		if (err < 0) {
+			pr_warn("prog '%s': failed to prepare load attributes: %d\n",
+				prog->name, err);
+			return err;
+		}
+	}
+
 	if (prog->obj->gen_loader) {
 		bpf_gen__prog_load(prog->obj->gen_loader, &load_attr,
 				   prog - prog->obj->programs);
@@ -6267,8 +6362,6 @@ static int bpf_program__record_externs(struct bpf_program *prog)
 	return 0;
 }
 
-static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd, int *btf_type_id);
-
 int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
 {
 	int err = 0, fd, i;
@@ -6278,19 +6371,6 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
 		return libbpf_err(-EINVAL);
 	}
 
-	if ((prog->type == BPF_PROG_TYPE_TRACING ||
-	     prog->type == BPF_PROG_TYPE_LSM ||
-	     prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) {
-		int btf_obj_fd = 0, btf_type_id = 0;
-
-		err = libbpf_find_attach_btf_id(prog, &btf_obj_fd, &btf_type_id);
-		if (err)
-			return libbpf_err(err);
-
-		prog->attach_btf_obj_fd = btf_obj_fd;
-		prog->attach_btf_id = btf_type_id;
-	}
-
 	if (prog->instances.nr < 0 || !prog->instances.fds) {
 		if (prog->preprocessor) {
 			pr_warn("Internal error: can't load program '%s'\n",
@@ -6400,6 +6480,7 @@ static const struct bpf_sec_def *find_sec_def(const char *sec_name);
 static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object_open_opts *opts)
 {
 	struct bpf_program *prog;
+	int err;
 
 	bpf_object__for_each_program(prog, obj) {
 		prog->sec_def = find_sec_def(prog->sec_name);
@@ -6410,8 +6491,6 @@ static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object
 			continue;
 		}
 
-		if (prog->sec_def->is_sleepable)
-			prog->prog_flags |= BPF_F_SLEEPABLE;
 		bpf_program__set_type(prog, prog->sec_def->prog_type);
 		bpf_program__set_expected_attach_type(prog, prog->sec_def->expected_attach_type);
 
@@ -6421,6 +6500,18 @@ static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object
 		    prog->sec_def->prog_type == BPF_PROG_TYPE_EXT)
 			prog->attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0);
 #pragma GCC diagnostic pop
+
+		/* sec_def can have custom callback which should be called
+		 * after bpf_program is initialized to adjust its properties
+		 */
+		if (prog->sec_def->init_fn) {
+			err = prog->sec_def->init_fn(prog, prog->sec_def->cookie);
+			if (err < 0) {
+				pr_warn("prog '%s': failed to initialize: %d\n",
+					prog->name, err);
+				return err;
+			}
+		}
 	}
 
 	return 0;
@@ -6847,8 +6938,7 @@ static int bpf_object__resolve_externs(struct bpf_object *obj,
 			if (err)
 				return err;
 			pr_debug("extern (kcfg) %s=0x%x\n", ext->name, kver);
-		} else if (ext->type == EXT_KCFG &&
-			   strncmp(ext->name, "CONFIG_", 7) == 0) {
+		} else if (ext->type == EXT_KCFG && str_has_pfx(ext->name, "CONFIG_")) {
 			need_config = true;
 		} else if (ext->type == EXT_KSYM) {
 			if (ext->ksym.type_id)
@@ -7908,217 +7998,143 @@ void bpf_program__set_expected_attach_type(struct bpf_program *prog,
 	prog->expected_attach_type = type;
 }
 
-#define BPF_PROG_SEC_IMPL(string, ptype, eatype, eatype_optional,	    \
-			  attachable, attach_btf)			    \
-	{								    \
-		.sec = string,						    \
-		.len = sizeof(string) - 1,				    \
-		.prog_type = ptype,					    \
-		.expected_attach_type = eatype,				    \
-		.is_exp_attach_type_optional = eatype_optional,		    \
-		.is_attachable = attachable,				    \
-		.is_attach_btf = attach_btf,				    \
-	}
-
-/* Programs that can NOT be attached. */
-#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0, 0)
-
-/* Programs that can be attached. */
-#define BPF_APROG_SEC(string, ptype, atype) \
-	BPF_PROG_SEC_IMPL(string, ptype, atype, true, 1, 0)
-
-/* Programs that must specify expected attach type at load time. */
-#define BPF_EAPROG_SEC(string, ptype, eatype) \
-	BPF_PROG_SEC_IMPL(string, ptype, eatype, false, 1, 0)
-
-/* Programs that use BTF to identify attach point */
-#define BPF_PROG_BTF(string, ptype, eatype) \
-	BPF_PROG_SEC_IMPL(string, ptype, eatype, false, 0, 1)
-
-/* Programs that can be attached but attach type can't be identified by section
- * name. Kept for backward compatibility.
- */
-#define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype)
-
-#define SEC_DEF(sec_pfx, ptype, ...) {					    \
+#define SEC_DEF(sec_pfx, ptype, atype, flags, ...) {			    \
 	.sec = sec_pfx,							    \
-	.len = sizeof(sec_pfx) - 1,					    \
 	.prog_type = BPF_PROG_TYPE_##ptype,				    \
+	.expected_attach_type = atype,					    \
+	.cookie = (long)(flags),					    \
+	.preload_fn = libbpf_preload_prog,				    \
 	__VA_ARGS__							    \
 }
 
-static struct bpf_link *attach_kprobe(const struct bpf_program *prog);
-static struct bpf_link *attach_tp(const struct bpf_program *prog);
-static struct bpf_link *attach_raw_tp(const struct bpf_program *prog);
-static struct bpf_link *attach_trace(const struct bpf_program *prog);
-static struct bpf_link *attach_lsm(const struct bpf_program *prog);
-static struct bpf_link *attach_iter(const struct bpf_program *prog);
+static struct bpf_link *attach_kprobe(const struct bpf_program *prog, long cookie);
+static struct bpf_link *attach_tp(const struct bpf_program *prog, long cookie);
+static struct bpf_link *attach_raw_tp(const struct bpf_program *prog, long cookie);
+static struct bpf_link *attach_trace(const struct bpf_program *prog, long cookie);
+static struct bpf_link *attach_lsm(const struct bpf_program *prog, long cookie);
+static struct bpf_link *attach_iter(const struct bpf_program *prog, long cookie);
 
 static const struct bpf_sec_def section_defs[] = {
-	BPF_PROG_SEC("socket",			BPF_PROG_TYPE_SOCKET_FILTER),
-	BPF_EAPROG_SEC("sk_reuseport/migrate",	BPF_PROG_TYPE_SK_REUSEPORT,
-						BPF_SK_REUSEPORT_SELECT_OR_MIGRATE),
-	BPF_EAPROG_SEC("sk_reuseport",		BPF_PROG_TYPE_SK_REUSEPORT,
-						BPF_SK_REUSEPORT_SELECT),
-	SEC_DEF("kprobe/", KPROBE,
-		.attach_fn = attach_kprobe),
-	BPF_PROG_SEC("uprobe/",			BPF_PROG_TYPE_KPROBE),
-	SEC_DEF("kretprobe/", KPROBE,
-		.attach_fn = attach_kprobe),
-	BPF_PROG_SEC("uretprobe/",		BPF_PROG_TYPE_KPROBE),
-	BPF_PROG_SEC("classifier",		BPF_PROG_TYPE_SCHED_CLS),
-	BPF_PROG_SEC("action",			BPF_PROG_TYPE_SCHED_ACT),
-	SEC_DEF("tracepoint/", TRACEPOINT,
-		.attach_fn = attach_tp),
-	SEC_DEF("tp/", TRACEPOINT,
-		.attach_fn = attach_tp),
-	SEC_DEF("raw_tracepoint/", RAW_TRACEPOINT,
-		.attach_fn = attach_raw_tp),
-	SEC_DEF("raw_tp/", RAW_TRACEPOINT,
-		.attach_fn = attach_raw_tp),
-	SEC_DEF("tp_btf/", TRACING,
-		.expected_attach_type = BPF_TRACE_RAW_TP,
-		.is_attach_btf = true,
-		.attach_fn = attach_trace),
-	SEC_DEF("fentry/", TRACING,
-		.expected_attach_type = BPF_TRACE_FENTRY,
-		.is_attach_btf = true,
-		.attach_fn = attach_trace),
-	SEC_DEF("fmod_ret/", TRACING,
-		.expected_attach_type = BPF_MODIFY_RETURN,
-		.is_attach_btf = true,
-		.attach_fn = attach_trace),
-	SEC_DEF("fexit/", TRACING,
-		.expected_attach_type = BPF_TRACE_FEXIT,
-		.is_attach_btf = true,
-		.attach_fn = attach_trace),
-	SEC_DEF("fentry.s/", TRACING,
-		.expected_attach_type = BPF_TRACE_FENTRY,
-		.is_attach_btf = true,
-		.is_sleepable = true,
-		.attach_fn = attach_trace),
-	SEC_DEF("fmod_ret.s/", TRACING,
-		.expected_attach_type = BPF_MODIFY_RETURN,
-		.is_attach_btf = true,
-		.is_sleepable = true,
-		.attach_fn = attach_trace),
-	SEC_DEF("fexit.s/", TRACING,
-		.expected_attach_type = BPF_TRACE_FEXIT,
-		.is_attach_btf = true,
-		.is_sleepable = true,
-		.attach_fn = attach_trace),
-	SEC_DEF("freplace/", EXT,
-		.is_attach_btf = true,
-		.attach_fn = attach_trace),
-	SEC_DEF("lsm/", LSM,
-		.is_attach_btf = true,
-		.expected_attach_type = BPF_LSM_MAC,
-		.attach_fn = attach_lsm),
-	SEC_DEF("lsm.s/", LSM,
-		.is_attach_btf = true,
-		.is_sleepable = true,
-		.expected_attach_type = BPF_LSM_MAC,
-		.attach_fn = attach_lsm),
-	SEC_DEF("iter/", TRACING,
-		.expected_attach_type = BPF_TRACE_ITER,
-		.is_attach_btf = true,
-		.attach_fn = attach_iter),
-	SEC_DEF("syscall", SYSCALL,
-		.is_sleepable = true),
-	BPF_EAPROG_SEC("xdp_devmap/",		BPF_PROG_TYPE_XDP,
-						BPF_XDP_DEVMAP),
-	BPF_EAPROG_SEC("xdp_cpumap/",		BPF_PROG_TYPE_XDP,
-						BPF_XDP_CPUMAP),
-	BPF_APROG_SEC("xdp",			BPF_PROG_TYPE_XDP,
-						BPF_XDP),
-	BPF_PROG_SEC("perf_event",		BPF_PROG_TYPE_PERF_EVENT),
-	BPF_PROG_SEC("lwt_in",			BPF_PROG_TYPE_LWT_IN),
-	BPF_PROG_SEC("lwt_out",			BPF_PROG_TYPE_LWT_OUT),
-	BPF_PROG_SEC("lwt_xmit",		BPF_PROG_TYPE_LWT_XMIT),
-	BPF_PROG_SEC("lwt_seg6local",		BPF_PROG_TYPE_LWT_SEG6LOCAL),
-	BPF_APROG_SEC("cgroup_skb/ingress",	BPF_PROG_TYPE_CGROUP_SKB,
-						BPF_CGROUP_INET_INGRESS),
-	BPF_APROG_SEC("cgroup_skb/egress",	BPF_PROG_TYPE_CGROUP_SKB,
-						BPF_CGROUP_INET_EGRESS),
-	BPF_APROG_COMPAT("cgroup/skb",		BPF_PROG_TYPE_CGROUP_SKB),
-	BPF_EAPROG_SEC("cgroup/sock_create",	BPF_PROG_TYPE_CGROUP_SOCK,
-						BPF_CGROUP_INET_SOCK_CREATE),
-	BPF_EAPROG_SEC("cgroup/sock_release",	BPF_PROG_TYPE_CGROUP_SOCK,
-						BPF_CGROUP_INET_SOCK_RELEASE),
-	BPF_APROG_SEC("cgroup/sock",		BPF_PROG_TYPE_CGROUP_SOCK,
-						BPF_CGROUP_INET_SOCK_CREATE),
-	BPF_EAPROG_SEC("cgroup/post_bind4",	BPF_PROG_TYPE_CGROUP_SOCK,
-						BPF_CGROUP_INET4_POST_BIND),
-	BPF_EAPROG_SEC("cgroup/post_bind6",	BPF_PROG_TYPE_CGROUP_SOCK,
-						BPF_CGROUP_INET6_POST_BIND),
-	BPF_APROG_SEC("cgroup/dev",		BPF_PROG_TYPE_CGROUP_DEVICE,
-						BPF_CGROUP_DEVICE),
-	BPF_APROG_SEC("sockops",		BPF_PROG_TYPE_SOCK_OPS,
-						BPF_CGROUP_SOCK_OPS),
-	BPF_APROG_SEC("sk_skb/stream_parser",	BPF_PROG_TYPE_SK_SKB,
-						BPF_SK_SKB_STREAM_PARSER),
-	BPF_APROG_SEC("sk_skb/stream_verdict",	BPF_PROG_TYPE_SK_SKB,
-						BPF_SK_SKB_STREAM_VERDICT),
-	BPF_APROG_COMPAT("sk_skb",		BPF_PROG_TYPE_SK_SKB),
-	BPF_APROG_SEC("sk_msg",			BPF_PROG_TYPE_SK_MSG,
-						BPF_SK_MSG_VERDICT),
-	BPF_APROG_SEC("lirc_mode2",		BPF_PROG_TYPE_LIRC_MODE2,
-						BPF_LIRC_MODE2),
-	BPF_APROG_SEC("flow_dissector",		BPF_PROG_TYPE_FLOW_DISSECTOR,
-						BPF_FLOW_DISSECTOR),
-	BPF_EAPROG_SEC("cgroup/bind4",		BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
-						BPF_CGROUP_INET4_BIND),
-	BPF_EAPROG_SEC("cgroup/bind6",		BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
-						BPF_CGROUP_INET6_BIND),
-	BPF_EAPROG_SEC("cgroup/connect4",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
-						BPF_CGROUP_INET4_CONNECT),
-	BPF_EAPROG_SEC("cgroup/connect6",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
-						BPF_CGROUP_INET6_CONNECT),
-	BPF_EAPROG_SEC("cgroup/sendmsg4",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
-						BPF_CGROUP_UDP4_SENDMSG),
-	BPF_EAPROG_SEC("cgroup/sendmsg6",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
-						BPF_CGROUP_UDP6_SENDMSG),
-	BPF_EAPROG_SEC("cgroup/recvmsg4",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
-						BPF_CGROUP_UDP4_RECVMSG),
-	BPF_EAPROG_SEC("cgroup/recvmsg6",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
-						BPF_CGROUP_UDP6_RECVMSG),
-	BPF_EAPROG_SEC("cgroup/getpeername4",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
-						BPF_CGROUP_INET4_GETPEERNAME),
-	BPF_EAPROG_SEC("cgroup/getpeername6",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
-						BPF_CGROUP_INET6_GETPEERNAME),
-	BPF_EAPROG_SEC("cgroup/getsockname4",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
-						BPF_CGROUP_INET4_GETSOCKNAME),
-	BPF_EAPROG_SEC("cgroup/getsockname6",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
-						BPF_CGROUP_INET6_GETSOCKNAME),
-	BPF_EAPROG_SEC("cgroup/sysctl",		BPF_PROG_TYPE_CGROUP_SYSCTL,
-						BPF_CGROUP_SYSCTL),
-	BPF_EAPROG_SEC("cgroup/getsockopt",	BPF_PROG_TYPE_CGROUP_SOCKOPT,
-						BPF_CGROUP_GETSOCKOPT),
-	BPF_EAPROG_SEC("cgroup/setsockopt",	BPF_PROG_TYPE_CGROUP_SOCKOPT,
-						BPF_CGROUP_SETSOCKOPT),
-	BPF_PROG_SEC("struct_ops",		BPF_PROG_TYPE_STRUCT_OPS),
-	BPF_EAPROG_SEC("sk_lookup/",		BPF_PROG_TYPE_SK_LOOKUP,
-						BPF_SK_LOOKUP),
+	SEC_DEF("socket",		SOCKET_FILTER, 0, SEC_NONE | SEC_SLOPPY_PFX),
+	SEC_DEF("sk_reuseport/migrate",	SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+	SEC_DEF("sk_reuseport",		SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+	SEC_DEF("kprobe/",		KPROBE,	0, SEC_NONE, attach_kprobe),
+	SEC_DEF("uprobe/",		KPROBE,	0, SEC_NONE),
+	SEC_DEF("kretprobe/",		KPROBE, 0, SEC_NONE, attach_kprobe),
+	SEC_DEF("uretprobe/",		KPROBE, 0, SEC_NONE),
+	SEC_DEF("tc",			SCHED_CLS, 0, SEC_NONE),
+	SEC_DEF("classifier",		SCHED_CLS, 0, SEC_NONE | SEC_SLOPPY_PFX),
+	SEC_DEF("action",		SCHED_ACT, 0, SEC_NONE | SEC_SLOPPY_PFX),
+	SEC_DEF("tracepoint/",		TRACEPOINT, 0, SEC_NONE, attach_tp),
+	SEC_DEF("tp/",			TRACEPOINT, 0, SEC_NONE, attach_tp),
+	SEC_DEF("raw_tracepoint/",	RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp),
+	SEC_DEF("raw_tp/",		RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp),
+	SEC_DEF("tp_btf/",		TRACING, BPF_TRACE_RAW_TP, SEC_ATTACH_BTF, attach_trace),
+	SEC_DEF("fentry/",		TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF, attach_trace),
+	SEC_DEF("fmod_ret/",		TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF, attach_trace),
+	SEC_DEF("fexit/",		TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF, attach_trace),
+	SEC_DEF("fentry.s/",		TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
+	SEC_DEF("fmod_ret.s/",		TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
+	SEC_DEF("fexit.s/",		TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
+	SEC_DEF("freplace/",		EXT, 0, SEC_ATTACH_BTF, attach_trace),
+	SEC_DEF("lsm/",			LSM, BPF_LSM_MAC, SEC_ATTACH_BTF, attach_lsm),
+	SEC_DEF("lsm.s/",		LSM, BPF_LSM_MAC, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_lsm),
+	SEC_DEF("iter/",		TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF, attach_iter),
+	SEC_DEF("syscall",		SYSCALL, 0, SEC_SLEEPABLE),
+	SEC_DEF("xdp_devmap/",		XDP, BPF_XDP_DEVMAP, SEC_ATTACHABLE),
+	SEC_DEF("xdp_cpumap/",		XDP, BPF_XDP_CPUMAP, SEC_ATTACHABLE),
+	SEC_DEF("xdp",			XDP, BPF_XDP, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
+	SEC_DEF("perf_event",		PERF_EVENT, 0, SEC_NONE | SEC_SLOPPY_PFX),
+	SEC_DEF("lwt_in",		LWT_IN, 0, SEC_NONE | SEC_SLOPPY_PFX),
+	SEC_DEF("lwt_out",		LWT_OUT, 0, SEC_NONE | SEC_SLOPPY_PFX),
+	SEC_DEF("lwt_xmit",		LWT_XMIT, 0, SEC_NONE | SEC_SLOPPY_PFX),
+	SEC_DEF("lwt_seg6local",	LWT_SEG6LOCAL, 0, SEC_NONE | SEC_SLOPPY_PFX),
+	SEC_DEF("cgroup_skb/ingress",	CGROUP_SKB, BPF_CGROUP_INET_INGRESS, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
+	SEC_DEF("cgroup_skb/egress",	CGROUP_SKB, BPF_CGROUP_INET_EGRESS, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
+	SEC_DEF("cgroup/skb",		CGROUP_SKB, 0, SEC_NONE | SEC_SLOPPY_PFX),
+	SEC_DEF("cgroup/sock_create",	CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+	SEC_DEF("cgroup/sock_release",	CGROUP_SOCK, BPF_CGROUP_INET_SOCK_RELEASE, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+	SEC_DEF("cgroup/sock",		CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
+	SEC_DEF("cgroup/post_bind4",	CGROUP_SOCK, BPF_CGROUP_INET4_POST_BIND, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+	SEC_DEF("cgroup/post_bind6",	CGROUP_SOCK, BPF_CGROUP_INET6_POST_BIND, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+	SEC_DEF("cgroup/dev",		CGROUP_DEVICE, BPF_CGROUP_DEVICE, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
+	SEC_DEF("sockops",		SOCK_OPS, BPF_CGROUP_SOCK_OPS, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
+	SEC_DEF("sk_skb/stream_parser",	SK_SKB, BPF_SK_SKB_STREAM_PARSER, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
+	SEC_DEF("sk_skb/stream_verdict",SK_SKB, BPF_SK_SKB_STREAM_VERDICT, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
+	SEC_DEF("sk_skb",		SK_SKB, 0, SEC_NONE | SEC_SLOPPY_PFX),
+	SEC_DEF("sk_msg",		SK_MSG, BPF_SK_MSG_VERDICT, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
+	SEC_DEF("lirc_mode2",		LIRC_MODE2, BPF_LIRC_MODE2, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
+	SEC_DEF("flow_dissector",	FLOW_DISSECTOR, BPF_FLOW_DISSECTOR, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
+	SEC_DEF("cgroup/bind4",		CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_BIND, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+	SEC_DEF("cgroup/bind6",		CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_BIND, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+	SEC_DEF("cgroup/connect4",	CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_CONNECT, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+	SEC_DEF("cgroup/connect6",	CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_CONNECT, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+	SEC_DEF("cgroup/sendmsg4",	CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_SENDMSG, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+	SEC_DEF("cgroup/sendmsg6",	CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_SENDMSG, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+	SEC_DEF("cgroup/recvmsg4",	CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_RECVMSG, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+	SEC_DEF("cgroup/recvmsg6",	CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_RECVMSG, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+	SEC_DEF("cgroup/getpeername4",	CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETPEERNAME, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+	SEC_DEF("cgroup/getpeername6",	CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETPEERNAME, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+	SEC_DEF("cgroup/getsockname4",	CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETSOCKNAME, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+	SEC_DEF("cgroup/getsockname6",	CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETSOCKNAME, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+	SEC_DEF("cgroup/sysctl",	CGROUP_SYSCTL, BPF_CGROUP_SYSCTL, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+	SEC_DEF("cgroup/getsockopt",	CGROUP_SOCKOPT, BPF_CGROUP_GETSOCKOPT, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+	SEC_DEF("cgroup/setsockopt",	CGROUP_SOCKOPT, BPF_CGROUP_SETSOCKOPT, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+	SEC_DEF("struct_ops+",		STRUCT_OPS, 0, SEC_NONE),
+	SEC_DEF("sk_lookup",		SK_LOOKUP, BPF_SK_LOOKUP, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
 };
 
-#undef BPF_PROG_SEC_IMPL
-#undef BPF_PROG_SEC
-#undef BPF_APROG_SEC
-#undef BPF_EAPROG_SEC
-#undef BPF_APROG_COMPAT
-#undef SEC_DEF
-
 #define MAX_TYPE_NAME_SIZE 32
 
 static const struct bpf_sec_def *find_sec_def(const char *sec_name)
 {
-	int i, n = ARRAY_SIZE(section_defs);
+	const struct bpf_sec_def *sec_def;
+	enum sec_def_flags sec_flags;
+	int i, n = ARRAY_SIZE(section_defs), len;
+	bool strict = libbpf_mode & LIBBPF_STRICT_SEC_NAME;
 
 	for (i = 0; i < n; i++) {
-		if (strncmp(sec_name,
-			    section_defs[i].sec, section_defs[i].len))
+		sec_def = &section_defs[i];
+		sec_flags = sec_def->cookie;
+		len = strlen(sec_def->sec);
+
+		/* "type/" always has to have proper SEC("type/extras") form */
+		if (sec_def->sec[len - 1] == '/') {
+			if (str_has_pfx(sec_name, sec_def->sec))
+				return sec_def;
+			continue;
+		}
+
+		/* "type+" means it can be either exact SEC("type") or
+		 * well-formed SEC("type/extras") with proper '/' separator
+		 */
+		if (sec_def->sec[len - 1] == '+') {
+			len--;
+			/* not even a prefix */
+			if (strncmp(sec_name, sec_def->sec, len) != 0)
+				continue;
+			/* exact match or has '/' separator */
+			if (sec_name[len] == '\0' || sec_name[len] == '/')
+				return sec_def;
+			continue;
+		}
+
+		/* SEC_SLOPPY_PFX definitions are allowed to be just prefix
+		 * matches, unless strict section name mode
+		 * (LIBBPF_STRICT_SEC_NAME) is enabled, in which case the
+		 * match has to be exact.
+		 */
+		if ((sec_flags & SEC_SLOPPY_PFX) && !strict)  {
+			if (str_has_pfx(sec_name, sec_def->sec))
+				return sec_def;
 			continue;
-		return &section_defs[i];
+		}
+
+		/* Definitions not marked SEC_SLOPPY_PFX (e.g.,
+		 * SEC("syscall")) are exact matches in both modes.
+		 */
+		if (strcmp(sec_name, sec_def->sec) == 0)
+			return sec_def;
 	}
 	return NULL;
 }
@@ -8135,8 +8151,15 @@ static char *libbpf_get_type_names(bool attach_type)
 	buf[0] = '\0';
 	/* Forge string buf with all available names */
 	for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
-		if (attach_type && !section_defs[i].is_attachable)
-			continue;
+		const struct bpf_sec_def *sec_def = &section_defs[i];
+
+		if (attach_type) {
+			if (sec_def->preload_fn != libbpf_preload_prog)
+				continue;
+
+			if (!(sec_def->cookie & SEC_ATTACHABLE))
+				continue;
+		}
 
 		if (strlen(buf) + strlen(section_defs[i].sec) + 2 > len) {
 			free(buf);
@@ -8460,20 +8483,13 @@ static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name,
 	return -ESRCH;
 }
 
-static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd, int *btf_type_id)
+static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name,
+				     int *btf_obj_fd, int *btf_type_id)
 {
 	enum bpf_attach_type attach_type = prog->expected_attach_type;
 	__u32 attach_prog_fd = prog->attach_prog_fd;
-	const char *attach_name;
 	int err = 0;
 
-	if (!prog->sec_def || !prog->sec_def->is_attach_btf) {
-		pr_warn("failed to identify BTF ID based on ELF section name '%s'\n",
-			prog->sec_name);
-		return -ESRCH;
-	}
-	attach_name = prog->sec_name + prog->sec_def->len;
-
 	/* BPF program's BTF ID */
 	if (attach_prog_fd) {
 		err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd);
@@ -8523,7 +8539,9 @@ int libbpf_attach_type_by_name(const char *name,
 		return libbpf_err(-EINVAL);
 	}
 
-	if (!sec_def->is_attachable)
+	if (sec_def->preload_fn != libbpf_preload_prog)
+		return libbpf_err(-EINVAL);
+	if (!(sec_def->cookie & SEC_ATTACHABLE))
 		return libbpf_err(-EINVAL);
 
 	*attach_type = sec_def->expected_attach_type;
@@ -9011,59 +9029,18 @@ int bpf_link__unpin(struct bpf_link *link)
 	return 0;
 }
 
-static int poke_kprobe_events(bool add, const char *name, bool retprobe, uint64_t offset)
-{
-	int fd, ret = 0;
-	pid_t p = getpid();
-	char cmd[260], probename[128], probefunc[128];
-	const char *file = "/sys/kernel/debug/tracing/kprobe_events";
-
-	if (retprobe)
-		snprintf(probename, sizeof(probename), "kretprobes/%s_libbpf_%u", name, p);
-	else
-		snprintf(probename, sizeof(probename), "kprobes/%s_libbpf_%u", name, p);
-
-	if (offset)
-		snprintf(probefunc, sizeof(probefunc), "%s+%zu", name, (size_t)offset);
-
-	if (add) {
-		snprintf(cmd, sizeof(cmd), "%c:%s %s",
-			 retprobe ? 'r' : 'p',
-			 probename,
-			 offset ? probefunc : name);
-	} else {
-		snprintf(cmd, sizeof(cmd), "-:%s", probename);
-	}
-
-	fd = open(file, O_WRONLY | O_APPEND, 0);
-	if (!fd)
-		return -errno;
-	ret = write(fd, cmd, strlen(cmd));
-	if (ret < 0)
-		ret = -errno;
-	close(fd);
-
-	return ret;
-}
-
-static inline int add_kprobe_event_legacy(const char *name, bool retprobe, uint64_t offset)
-{
-	return poke_kprobe_events(true, name, retprobe, offset);
-}
-
-static inline int remove_kprobe_event_legacy(const char *name, bool retprobe)
-{
-	return poke_kprobe_events(false, name, retprobe, 0);
-}
-
 struct bpf_link_perf {
 	struct bpf_link link;
 	int perf_event_fd;
 	/* legacy kprobe support: keep track of probe identifier and type */
 	char *legacy_probe_name;
+	bool legacy_is_kprobe;
 	bool legacy_is_retprobe;
 };
 
+static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe);
+static int remove_uprobe_event_legacy(const char *probe_name, bool retprobe);
+
 static int bpf_link_perf_detach(struct bpf_link *link)
 {
 	struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
@@ -9076,10 +9053,16 @@ static int bpf_link_perf_detach(struct bpf_link *link)
 		close(perf_link->perf_event_fd);
 	close(link->fd);
 
-	/* legacy kprobe needs to be removed after perf event fd closure */
-	if (perf_link->legacy_probe_name)
-		err = remove_kprobe_event_legacy(perf_link->legacy_probe_name,
-						 perf_link->legacy_is_retprobe);
+	/* legacy uprobe/kprobe needs to be removed after perf event fd closure */
+	if (perf_link->legacy_probe_name) {
+		if (perf_link->legacy_is_kprobe) {
+			err = remove_kprobe_event_legacy(perf_link->legacy_probe_name,
+							 perf_link->legacy_is_retprobe);
+		} else {
+			err = remove_uprobe_event_legacy(perf_link->legacy_probe_name,
+							 perf_link->legacy_is_retprobe);
+		}
+	}
 
 	return err;
 }
@@ -9202,18 +9185,6 @@ static int parse_uint_from_file(const char *file, const char *fmt)
 	return ret;
 }
 
-static int determine_kprobe_perf_type_legacy(const char *func_name, bool is_retprobe)
-{
-	char file[192];
-
-	snprintf(file, sizeof(file),
-		 "/sys/kernel/debug/tracing/events/%s/%s_libbpf_%d/id",
-		 is_retprobe ? "kretprobes" : "kprobes",
-		 func_name, getpid());
-
-	return parse_uint_from_file(file, "%d\n");
-}
-
 static int determine_kprobe_perf_type(void)
 {
 	const char *file = "/sys/bus/event_source/devices/kprobe/type";
@@ -9296,21 +9267,79 @@ static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
 	return pfd;
 }
 
-static int perf_event_kprobe_open_legacy(bool retprobe, const char *name, uint64_t offset, int pid)
+static int append_to_file(const char *file, const char *fmt, ...)
+{
+	int fd, n, err = 0;
+	va_list ap;
+
+	fd = open(file, O_WRONLY | O_APPEND, 0);
+	if (fd < 0)
+		return -errno;
+
+	va_start(ap, fmt);
+	n = vdprintf(fd, fmt, ap);
+	va_end(ap);
+
+	if (n < 0)
+		err = -errno;
+
+	close(fd);
+	return err;
+}
+
+static void gen_kprobe_legacy_event_name(char *buf, size_t buf_sz,
+					 const char *kfunc_name, size_t offset)
+{
+	snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx", getpid(), kfunc_name, offset);
+}
+
+static int add_kprobe_event_legacy(const char *probe_name, bool retprobe,
+				   const char *kfunc_name, size_t offset)
+{
+	const char *file = "/sys/kernel/debug/tracing/kprobe_events";
+
+	return append_to_file(file, "%c:%s/%s %s+0x%zx",
+			      retprobe ? 'r' : 'p',
+			      retprobe ? "kretprobes" : "kprobes",
+			      probe_name, kfunc_name, offset);
+}
+
+static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe)
+{
+	const char *file = "/sys/kernel/debug/tracing/kprobe_events";
+
+	return append_to_file(file, "-:%s/%s", retprobe ? "kretprobes" : "kprobes", probe_name);
+}
+
+static int determine_kprobe_perf_type_legacy(const char *probe_name, bool retprobe)
+{
+	char file[256];
+
+	snprintf(file, sizeof(file),
+		 "/sys/kernel/debug/tracing/events/%s/%s/id",
+		 retprobe ? "kretprobes" : "kprobes", probe_name);
+
+	return parse_uint_from_file(file, "%d\n");
+}
+
+static int perf_event_kprobe_open_legacy(const char *probe_name, bool retprobe,
+					 const char *kfunc_name, size_t offset, int pid)
 {
 	struct perf_event_attr attr = {};
 	char errmsg[STRERR_BUFSIZE];
 	int type, pfd, err;
 
-	err = add_kprobe_event_legacy(name, retprobe, offset);
+	err = add_kprobe_event_legacy(probe_name, retprobe, kfunc_name, offset);
 	if (err < 0) {
-		pr_warn("failed to add legacy kprobe event: %s\n",
+		pr_warn("failed to add legacy kprobe event for '%s+0x%zx': %s\n",
+			kfunc_name, offset,
 			libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
 		return err;
 	}
-	type = determine_kprobe_perf_type_legacy(name, retprobe);
+	type = determine_kprobe_perf_type_legacy(probe_name, retprobe);
 	if (type < 0) {
-		pr_warn("failed to determine legacy kprobe event id: %s\n",
+		pr_warn("failed to determine legacy kprobe event id for '%s+0x%zx': %s\n",
+			kfunc_name, offset,
 			libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
 		return type;
 	}
@@ -9340,7 +9369,7 @@ bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
 	char errmsg[STRERR_BUFSIZE];
 	char *legacy_probe = NULL;
 	struct bpf_link *link;
-	unsigned long offset;
+	size_t offset;
 	bool retprobe, legacy;
 	int pfd, err;
 
@@ -9357,36 +9386,48 @@ bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
 					    func_name, offset,
 					    -1 /* pid */, 0 /* ref_ctr_off */);
 	} else {
+		char probe_name[256];
+
+		gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name),
+					     func_name, offset);
+
 		legacy_probe = strdup(func_name);
 		if (!legacy_probe)
 			return libbpf_err_ptr(-ENOMEM);
 
-		pfd = perf_event_kprobe_open_legacy(retprobe, func_name,
+		pfd = perf_event_kprobe_open_legacy(legacy_probe, retprobe, func_name,
 						    offset, -1 /* pid */);
 	}
 	if (pfd < 0) {
-		pr_warn("prog '%s': failed to create %s '%s' perf event: %s\n",
-			prog->name, retprobe ? "kretprobe" : "kprobe", func_name,
-			libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
-		return libbpf_err_ptr(pfd);
+		err = -errno;
+		pr_warn("prog '%s': failed to create %s '%s+0x%zx' perf event: %s\n",
+			prog->name, retprobe ? "kretprobe" : "kprobe",
+			func_name, offset,
+			libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+		goto err_out;
 	}
 	link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
 	err = libbpf_get_error(link);
 	if (err) {
 		close(pfd);
-		pr_warn("prog '%s': failed to attach to %s '%s': %s\n",
-			prog->name, retprobe ? "kretprobe" : "kprobe", func_name,
+		pr_warn("prog '%s': failed to attach to %s '%s+0x%zx': %s\n",
+			prog->name, retprobe ? "kretprobe" : "kprobe",
+			func_name, offset,
 			libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
-		return libbpf_err_ptr(err);
+		goto err_out;
 	}
 	if (legacy) {
 		struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
 
 		perf_link->legacy_probe_name = legacy_probe;
+		perf_link->legacy_is_kprobe = true;
 		perf_link->legacy_is_retprobe = retprobe;
 	}
 
 	return link;
+err_out:
+	free(legacy_probe);
+	return libbpf_err_ptr(err);
 }
 
 struct bpf_link *bpf_program__attach_kprobe(const struct bpf_program *prog,
@@ -9400,7 +9441,7 @@ struct bpf_link *bpf_program__attach_kprobe(const struct bpf_program *prog,
 	return bpf_program__attach_kprobe_opts(prog, func_name, &opts);
 }
 
-static struct bpf_link *attach_kprobe(const struct bpf_program *prog)
+static struct bpf_link *attach_kprobe(const struct bpf_program *prog, long cookie)
 {
 	DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts);
 	unsigned long offset = 0;
@@ -9409,8 +9450,11 @@ static struct bpf_link *attach_kprobe(const struct bpf_program *prog)
 	char *func;
 	int n, err;
 
-	func_name = prog->sec_name + prog->sec_def->len;
-	opts.retprobe = strcmp(prog->sec_def->sec, "kretprobe/") == 0;
+	opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe/");
+	if (opts.retprobe)
+		func_name = prog->sec_name + sizeof("kretprobe/") - 1;
+	else
+		func_name = prog->sec_name + sizeof("kprobe/") - 1;
 
 	n = sscanf(func_name, "%m[a-zA-Z0-9_.]+%li", &func, &offset);
 	if (n < 1) {
@@ -9431,17 +9475,96 @@ static struct bpf_link *attach_kprobe(const struct bpf_program *prog)
 	return link;
 }
 
+static void gen_uprobe_legacy_event_name(char *buf, size_t buf_sz,
+					 const char *binary_path, uint64_t offset)
+{
+	int i;
+
+	snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx", getpid(), binary_path, (size_t)offset);
+
+	/* sanitize binary_path in the probe name */
+	for (i = 0; buf[i]; i++) {
+		if (!isalnum(buf[i]))
+			buf[i] = '_';
+	}
+}
+
+static inline int add_uprobe_event_legacy(const char *probe_name, bool retprobe,
+					  const char *binary_path, size_t offset)
+{
+	const char *file = "/sys/kernel/debug/tracing/uprobe_events";
+
+	return append_to_file(file, "%c:%s/%s %s:0x%zx",
+			      retprobe ? 'r' : 'p',
+			      retprobe ? "uretprobes" : "uprobes",
+			      probe_name, binary_path, offset);
+}
+
+static inline int remove_uprobe_event_legacy(const char *probe_name, bool retprobe)
+{
+	const char *file = "/sys/kernel/debug/tracing/uprobe_events";
+
+	return append_to_file(file, "-:%s/%s", retprobe ? "uretprobes" : "uprobes", probe_name);
+}
+
+static int determine_uprobe_perf_type_legacy(const char *probe_name, bool retprobe)
+{
+	char file[512];
+
+	snprintf(file, sizeof(file),
+		 "/sys/kernel/debug/tracing/events/%s/%s/id",
+		 retprobe ? "uretprobes" : "uprobes", probe_name);
+
+	return parse_uint_from_file(file, "%d\n");
+}
+
+static int perf_event_uprobe_open_legacy(const char *probe_name, bool retprobe,
+					 const char *binary_path, size_t offset, int pid)
+{
+	struct perf_event_attr attr;
+	int type, pfd, err;
+
+	err = add_uprobe_event_legacy(probe_name, retprobe, binary_path, offset);
+	if (err < 0) {
+		pr_warn("failed to add legacy uprobe event for %s:0x%zx: %d\n",
+			binary_path, (size_t)offset, err);
+		return err;
+	}
+	type = determine_uprobe_perf_type_legacy(probe_name, retprobe);
+	if (type < 0) {
+		pr_warn("failed to determine legacy uprobe event id for %s:0x%zx: %d\n",
+			binary_path, offset, err);
+		return type;
+	}
+
+	memset(&attr, 0, sizeof(attr));
+	attr.size = sizeof(attr);
+	attr.config = type;
+	attr.type = PERF_TYPE_TRACEPOINT;
+
+	pfd = syscall(__NR_perf_event_open, &attr,
+		      pid < 0 ? -1 : pid, /* pid */
+		      pid == -1 ? 0 : -1, /* cpu */
+		      -1 /* group_fd */,  PERF_FLAG_FD_CLOEXEC);
+	if (pfd < 0) {
+		err = -errno;
+		pr_warn("legacy uprobe perf_event_open() failed: %d\n", err);
+		return err;
+	}
+	return pfd;
+}
+
 LIBBPF_API struct bpf_link *
 bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
 				const char *binary_path, size_t func_offset,
 				const struct bpf_uprobe_opts *opts)
 {
 	DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
-	char errmsg[STRERR_BUFSIZE];
+	char errmsg[STRERR_BUFSIZE], *legacy_probe = NULL;
 	struct bpf_link *link;
 	size_t ref_ctr_off;
 	int pfd, err;
-	bool retprobe;
+	bool retprobe, legacy;
 
 	if (!OPTS_VALID(opts, bpf_uprobe_opts))
 		return libbpf_err_ptr(-EINVAL);
@@ -9450,15 +9573,35 @@ bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
 	ref_ctr_off = OPTS_GET(opts, ref_ctr_offset, 0);
 	pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
 
-	pfd = perf_event_open_probe(true /* uprobe */, retprobe, binary_path,
-				    func_offset, pid, ref_ctr_off);
+	legacy = determine_uprobe_perf_type() < 0;
+	if (!legacy) {
+		pfd = perf_event_open_probe(true /* uprobe */, retprobe, binary_path,
+					    func_offset, pid, ref_ctr_off);
+	} else {
+		char probe_name[512];
+
+		if (ref_ctr_off)
+			return libbpf_err_ptr(-EINVAL);
+
+		gen_uprobe_legacy_event_name(probe_name, sizeof(probe_name),
+					     binary_path, func_offset);
+
+		legacy_probe = strdup(probe_name);
+		if (!legacy_probe)
+			return libbpf_err_ptr(-ENOMEM);
+
+		pfd = perf_event_uprobe_open_legacy(legacy_probe, retprobe,
+						    binary_path, func_offset, pid);
+	}
 	if (pfd < 0) {
+		err = -errno;
 		pr_warn("prog '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
 			prog->name, retprobe ? "uretprobe" : "uprobe",
 			binary_path, func_offset,
-			libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
-		return libbpf_err_ptr(pfd);
+			libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+		goto err_out;
 	}
+
 	link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
 	err = libbpf_get_error(link);
 	if (err) {
@@ -9467,9 +9610,20 @@ bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
 			prog->name, retprobe ? "uretprobe" : "uprobe",
 			binary_path, func_offset,
 			libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
-		return libbpf_err_ptr(err);
+		goto err_out;
+	}
+	if (legacy) {
+		struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
+
+		perf_link->legacy_probe_name = legacy_probe;
+		perf_link->legacy_is_kprobe = false;
+		perf_link->legacy_is_retprobe = retprobe;
 	}
 	return link;
+err_out:
+	free(legacy_probe);
+	return libbpf_err_ptr(err);
+
 }
 
 struct bpf_link *bpf_program__attach_uprobe(const struct bpf_program *prog,
@@ -9573,7 +9727,7 @@ struct bpf_link *bpf_program__attach_tracepoint(const struct bpf_program *prog,
 	return bpf_program__attach_tracepoint_opts(prog, tp_category, tp_name, NULL);
 }
 
-static struct bpf_link *attach_tp(const struct bpf_program *prog)
+static struct bpf_link *attach_tp(const struct bpf_program *prog, long cookie)
 {
 	char *sec_name, *tp_cat, *tp_name;
 	struct bpf_link *link;
@@ -9582,8 +9736,11 @@ static struct bpf_link *attach_tp(const struct bpf_program *prog)
 	if (!sec_name)
 		return libbpf_err_ptr(-ENOMEM);
 
-	/* extract "tp/<category>/<name>" */
-	tp_cat = sec_name + prog->sec_def->len;
+	/* extract "tp/<category>/<name>" or "tracepoint/<category>/<name>" */
+	if (str_has_pfx(prog->sec_name, "tp/"))
+		tp_cat = sec_name + sizeof("tp/") - 1;
+	else
+		tp_cat = sec_name + sizeof("tracepoint/") - 1;
 	tp_name = strchr(tp_cat, '/');
 	if (!tp_name) {
 		free(sec_name);
@@ -9627,9 +9784,14 @@ struct bpf_link *bpf_program__attach_raw_tracepoint(const struct bpf_program *pr
 	return link;
 }
 
-static struct bpf_link *attach_raw_tp(const struct bpf_program *prog)
+static struct bpf_link *attach_raw_tp(const struct bpf_program *prog, long cookie)
 {
-	const char *tp_name = prog->sec_name + prog->sec_def->len;
+	const char *tp_name;
+
+	if (str_has_pfx(prog->sec_name, "raw_tp/"))
+		tp_name = prog->sec_name + sizeof("raw_tp/") - 1;
+	else
+		tp_name = prog->sec_name + sizeof("raw_tracepoint/") - 1;
 
 	return bpf_program__attach_raw_tracepoint(prog, tp_name);
 }
@@ -9674,12 +9836,12 @@ struct bpf_link *bpf_program__attach_lsm(const struct bpf_program *prog)
 	return bpf_program__attach_btf_id(prog);
 }
 
-static struct bpf_link *attach_trace(const struct bpf_program *prog)
+static struct bpf_link *attach_trace(const struct bpf_program *prog, long cookie)
 {
 	return bpf_program__attach_trace(prog);
 }
 
-static struct bpf_link *attach_lsm(const struct bpf_program *prog)
+static struct bpf_link *attach_lsm(const struct bpf_program *prog, long cookie)
 {
 	return bpf_program__attach_lsm(prog);
 }
@@ -9810,7 +9972,7 @@ bpf_program__attach_iter(const struct bpf_program *prog,
 	return link;
 }
 
-static struct bpf_link *attach_iter(const struct bpf_program *prog)
+static struct bpf_link *attach_iter(const struct bpf_program *prog, long cookie)
 {
 	return bpf_program__attach_iter(prog, NULL);
 }
@@ -9820,7 +9982,7 @@ struct bpf_link *bpf_program__attach(const struct bpf_program *prog)
 	if (!prog->sec_def || !prog->sec_def->attach_fn)
 		return libbpf_err_ptr(-ESRCH);
 
-	return prog->sec_def->attach_fn(prog);
+	return prog->sec_def->attach_fn(prog, prog->sec_def->cookie);
 }
 
 static int bpf_link__detach_struct_ops(struct bpf_link *link)
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index c90e3d79e72cfee464b3a5769263f2d0c9110c2c..e35490c54eb3ac7847970d3a070425e9c3abf321 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -269,7 +269,7 @@ struct bpf_kprobe_opts {
 	/* custom user-provided value fetchable through bpf_get_attach_cookie() */
 	__u64 bpf_cookie;
 	/* function's offset to install kprobe to */
-	unsigned long offset;
+	size_t offset;
 	/* kprobe is return probe */
 	bool retprobe;
 	size_t :0;
@@ -481,9 +481,13 @@ struct bpf_map_def {
 	unsigned int map_flags;
 };
 
-/*
- * The 'struct bpf_map' in include/linux/bpf.h is internal to the kernel,
- * so no need to worry about a name clash.
+/**
+ * @brief **bpf_object__find_map_by_name()** returns BPF map of
+ * the given name, if it exists within the passed BPF object
+ * @param obj BPF object
+ * @param name name of the BPF map
+ * @return BPF map instance, if such map exists within the BPF object;
+ * or NULL otherwise.
  */
 LIBBPF_API struct bpf_map *
 bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name);
@@ -509,7 +513,12 @@ bpf_map__next(const struct bpf_map *map, const struct bpf_object *obj);
 LIBBPF_API struct bpf_map *
 bpf_map__prev(const struct bpf_map *map, const struct bpf_object *obj);
 
-/* get/set map FD */
+/**
+ * @brief **bpf_map__fd()** gets the file descriptor of the passed
+ * BPF map
+ * @param map the BPF map instance
+ * @return the file descriptor; or -EINVAL in case of an error
+ */
 LIBBPF_API int bpf_map__fd(const struct bpf_map *map);
 LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd);
 /* get map definition */
@@ -550,6 +559,14 @@ LIBBPF_API int bpf_map__set_initial_value(struct bpf_map *map,
 					  const void *data, size_t size);
 LIBBPF_API const void *bpf_map__initial_value(struct bpf_map *map, size_t *psize);
 LIBBPF_API bool bpf_map__is_offload_neutral(const struct bpf_map *map);
+
+/**
+ * @brief **bpf_map__is_internal()** tells the caller whether or not the
+ * passed map is a special map created by libbpf automatically for things like
+ * global variables, __ksym externs, Kconfig values, etc
+ * @param map the bpf_map
+ * @return true, if the map is an internal map; false, otherwise
+ */
 LIBBPF_API bool bpf_map__is_internal(const struct bpf_map *map);
 LIBBPF_API int bpf_map__set_pin_path(struct bpf_map *map, const char *path);
 LIBBPF_API const char *bpf_map__get_pin_path(const struct bpf_map *map);
@@ -561,6 +578,38 @@ LIBBPF_API int bpf_map__unpin(struct bpf_map *map, const char *path);
 LIBBPF_API int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd);
 LIBBPF_API struct bpf_map *bpf_map__inner_map(struct bpf_map *map);
 
+/**
+ * @brief **libbpf_get_error()** extracts the error code from the passed
+ * pointer
+ * @param ptr pointer returned from libbpf API function
+ * @return error code; or 0 if no error occured
+ *
+ * Many libbpf API functions which return pointers have logic to encode error
+ * codes as pointers, and do not return NULL. Meaning **libbpf_get_error()**
+ * should be used on the return value from these functions immediately after
+ * calling the API function, with no intervening calls that could clobber the
+ * `errno` variable. Consult the individual functions documentation to verify
+ * if this logic applies should be used.
+ *
+ * For these API functions, if `libbpf_set_strict_mode(LIBBPF_STRICT_CLEAN_PTRS)`
+ * is enabled, NULL is returned on error instead.
+ *
+ * If ptr is NULL, then errno should be already set by the failing
+ * API, because libbpf never returns NULL on success and it now always
+ * sets errno on error.
+ *
+ * Example usage:
+ *
+ *   struct perf_buffer *pb;
+ *
+ *   pb = perf_buffer__new(bpf_map__fd(obj->maps.events), PERF_BUFFER_PAGES, &opts);
+ *   err = libbpf_get_error(pb);
+ *   if (err) {
+ *	  pb = NULL;
+ *	  fprintf(stderr, "failed to open perf buffer: %d\n", err);
+ *	  goto cleanup;
+ *   }
+ */
 LIBBPF_API long libbpf_get_error(const void *ptr);
 
 struct bpf_prog_load_attr {
@@ -825,9 +874,10 @@ bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear);
 LIBBPF_API void
 bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear);
 
-/*
- * A helper function to get the number of possible CPUs before looking up
- * per-CPU maps. Negative errno is returned on failure.
+/**
+ * @brief **libbpf_num_possible_cpus()** is a helper function to get the
+ * number of possible CPUs that the host kernel supports and expects.
+ * @return number of possible CPUs; or error code on failure
  *
  * Example usage:
  *
@@ -837,7 +887,6 @@ bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear);
  *     }
  *     long values[ncpus];
  *     bpf_map_lookup_elem(per_cpu_map_fd, key, values);
- *
  */
 LIBBPF_API int libbpf_num_possible_cpus(void);
 
diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
index ceb0c98979bc6b532e66905f7f21762526761bd2..ec79400517d422d965aa4a87b84f6c92a2a221f3 100644
--- a/tools/lib/bpf/libbpf_internal.h
+++ b/tools/lib/bpf/libbpf_internal.h
@@ -89,6 +89,13 @@
 	(offsetof(TYPE, FIELD) + sizeof(((TYPE *)0)->FIELD))
 #endif
 
+/* Check whether a string `str` has prefix `pfx`, regardless if `pfx` is
+ * a string literal known at compilation time or char * pointer known only at
+ * runtime.
+ */
+#define str_has_pfx(str, pfx) \
+	(strncmp(str, pfx, __builtin_constant_p(pfx) ? sizeof(pfx) - 1 : strlen(pfx)) == 0)
+
 /* Symbol versioning is different between static and shared library.
  * Properly versioned symbols are needed for shared library, but
  * only the symbol of the new version is needed for static library.
diff --git a/tools/lib/bpf/libbpf_legacy.h b/tools/lib/bpf/libbpf_legacy.h
index df0d03dcffabc9c7f2c0c4ace81f16eedc57c02a..74e6f860f703efe5658c6c4d74a3a4c35f231930 100644
--- a/tools/lib/bpf/libbpf_legacy.h
+++ b/tools/lib/bpf/libbpf_legacy.h
@@ -46,6 +46,15 @@ enum libbpf_strict_mode {
 	 */
 	LIBBPF_STRICT_DIRECT_ERRS = 0x02,
 
+	/*
+	 * Enforce strict BPF program section (SEC()) names.
+	 * E.g., while prefiously SEC("xdp_whatever") or SEC("perf_event_blah") were
+	 * allowed, with LIBBPF_STRICT_SEC_PREFIX this will become
+	 * unrecognized by libbpf and would have to be just SEC("xdp") and
+	 * SEC("xdp") and SEC("perf_event").
+	 */
+	LIBBPF_STRICT_SEC_NAME = 0x04,
+
 	__LIBBPF_STRICT_LAST,
 };
 
diff --git a/tools/lib/bpf/skel_internal.h b/tools/lib/bpf/skel_internal.h
index b22b50c1b173e8bf251ac5391ecf70d7346cd842..9cf66702fa8ddb015e87b4965d2b09644fa96532 100644
--- a/tools/lib/bpf/skel_internal.h
+++ b/tools/lib/bpf/skel_internal.h
@@ -105,10 +105,12 @@ static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts)
 	err = skel_sys_bpf(BPF_PROG_RUN, &attr, sizeof(attr));
 	if (err < 0 || (int)attr.test.retval < 0) {
 		opts->errstr = "failed to execute loader prog";
-		if (err < 0)
+		if (err < 0) {
 			err = -errno;
-		else
+		} else {
 			err = (int)attr.test.retval;
+			errno = -err;
+		}
 		goto out;
 	}
 	err = 0;
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 5f058058aeed69298b09a4d91562495aab58c374..aa94739a18357a98362d86fe222c7fc8f1375f19 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -315,7 +315,8 @@ LINKED_SKELS := test_static_linked.skel.h linked_funcs.skel.h		\
 		linked_vars.skel.h linked_maps.skel.h
 
 LSKELS := kfunc_call_test.c fentry_test.c fexit_test.c fexit_sleep.c \
-	test_ksyms_module.c test_ringbuf.c atomics.c trace_printk.c
+	test_ksyms_module.c test_ringbuf.c atomics.c trace_printk.c \
+	trace_vprintk.c
 SKEL_BLACKLIST += $$(LSKELS)
 
 test_static_linked.skel.h-deps := test_static_linked1.o test_static_linked2.o
diff --git a/tools/testing/selftests/bpf/README.rst b/tools/testing/selftests/bpf/README.rst
index 8200c0da2769aba704f1bada7c01c99fce087084..554553acc6d940adac1e7ae2aca9c1fdf9f47ab2 100644
--- a/tools/testing/selftests/bpf/README.rst
+++ b/tools/testing/selftests/bpf/README.rst
@@ -242,3 +242,16 @@ To fix this issue, user newer libbpf.
 .. Links
 .. _clang reloc patch: https://reviews.llvm.org/D102712
 .. _kernel llvm reloc: /Documentation/bpf/llvm_reloc.rst
+
+Clang dependencies for the u32 spill test (xdpwall)
+===================================================
+The xdpwall selftest requires a change in `Clang 14`__.
+
+Without it, the xdpwall selftest will fail and the error message
+from running test_progs will look like:
+
+.. code-block:: console
+
+  test_xdpwall:FAIL:Does LLVM have https://reviews.llvm.org/D109073? unexpected error: -4007
+
+__ https://reviews.llvm.org/D109073
diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
index bf307bb9e446d0d592d1f138011f304fa285f686..6c511dcd1465db2ab7fdbce32ad7f2baec973088 100644
--- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c
+++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
@@ -14,6 +14,20 @@ void test_attach_probe(void)
 	struct test_attach_probe* skel;
 	size_t uprobe_offset;
 	ssize_t base_addr, ref_ctr_offset;
+	bool legacy;
+
+	/* Check if new-style kprobe/uprobe API is supported.
+	 * Kernels that support new FD-based kprobe and uprobe BPF attachment
+	 * through perf_event_open() syscall expose
+	 * /sys/bus/event_source/devices/kprobe/type and
+	 * /sys/bus/event_source/devices/uprobe/type files, respectively. They
+	 * contain magic numbers that are passed as "type" field of
+	 * perf_event_attr. Lack of such file in the system indicates legacy
+	 * kernel with old-style kprobe/uprobe attach interface through
+	 * creating per-probe event through tracefs. For such cases
+	 * ref_ctr_offset feature is not supported, so we don't test it.
+	 */
+	legacy = access("/sys/bus/event_source/devices/kprobe/type", F_OK) != 0;
 
 	base_addr = get_base_addr();
 	if (CHECK(base_addr < 0, "get_base_addr",
@@ -45,10 +59,11 @@ void test_attach_probe(void)
 		goto cleanup;
 	skel->links.handle_kretprobe = kretprobe_link;
 
-	ASSERT_EQ(uprobe_ref_ctr, 0, "uprobe_ref_ctr_before");
+	if (!legacy)
+		ASSERT_EQ(uprobe_ref_ctr, 0, "uprobe_ref_ctr_before");
 
 	uprobe_opts.retprobe = false;
-	uprobe_opts.ref_ctr_offset = ref_ctr_offset;
+	uprobe_opts.ref_ctr_offset = legacy ? 0 : ref_ctr_offset;
 	uprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe,
 						      0 /* self pid */,
 						      "/proc/self/exe",
@@ -58,11 +73,12 @@ void test_attach_probe(void)
 		goto cleanup;
 	skel->links.handle_uprobe = uprobe_link;
 
-	ASSERT_GT(uprobe_ref_ctr, 0, "uprobe_ref_ctr_after");
+	if (!legacy)
+		ASSERT_GT(uprobe_ref_ctr, 0, "uprobe_ref_ctr_after");
 
 	/* if uprobe uses ref_ctr, uretprobe has to use ref_ctr as well */
 	uprobe_opts.retprobe = true;
-	uprobe_opts.ref_ctr_offset = ref_ctr_offset;
+	uprobe_opts.ref_ctr_offset = legacy ? 0 : ref_ctr_offset;
 	uretprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe,
 							 -1 /* any pid */,
 							 "/proc/self/exe",
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dump.c b/tools/testing/selftests/bpf/prog_tests/btf_dump.c
index 52ccf0cf35e1ce01d0a20a70939d474656aaa008..87f9df653e4e23d289f562b3ff71a02c24976dec 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf_dump.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf_dump.c
@@ -358,12 +358,27 @@ static void test_btf_dump_int_data(struct btf *btf, struct btf_dump *d,
 	TEST_BTF_DUMP_DATA_OVER(btf, d, NULL, str, int, sizeof(int)-1, "", 1);
 
 #ifdef __SIZEOF_INT128__
-	TEST_BTF_DUMP_DATA(btf, d, NULL, str, __int128, BTF_F_COMPACT,
-			   "(__int128)0xffffffffffffffff",
-			   0xffffffffffffffff);
-	ASSERT_OK(btf_dump_data(btf, d, "__int128", NULL, 0, &i, 16, str,
-				"(__int128)0xfffffffffffffffffffffffffffffffe"),
-		  "dump __int128");
+	/* gcc encode unsigned __int128 type with name "__int128 unsigned" in dwarf,
+	 * and clang encode it with name "unsigned __int128" in dwarf.
+	 * Do an availability test for either variant before doing actual test.
+	 */
+	if (btf__find_by_name(btf, "unsigned __int128") > 0) {
+		TEST_BTF_DUMP_DATA(btf, d, NULL, str, unsigned __int128, BTF_F_COMPACT,
+				   "(unsigned __int128)0xffffffffffffffff",
+				   0xffffffffffffffff);
+		ASSERT_OK(btf_dump_data(btf, d, "unsigned __int128", NULL, 0, &i, 16, str,
+					"(unsigned __int128)0xfffffffffffffffffffffffffffffffe"),
+			  "dump unsigned __int128");
+	} else if (btf__find_by_name(btf, "__int128 unsigned") > 0) {
+		TEST_BTF_DUMP_DATA(btf, d, NULL, str, __int128 unsigned, BTF_F_COMPACT,
+				   "(__int128 unsigned)0xffffffffffffffff",
+				   0xffffffffffffffff);
+		ASSERT_OK(btf_dump_data(btf, d, "__int128 unsigned", NULL, 0, &i, 16, str,
+					"(__int128 unsigned)0xfffffffffffffffffffffffffffffffe"),
+			  "dump unsigned __int128");
+	} else {
+		ASSERT_TRUE(false, "unsigned_int128_not_found");
+	}
 #endif
 }
 
diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
index 225714f71ac6e19a5ac718be7c2cf4bc04201a89..ac54e3f91d42fd22ffbe16189f7b6cef96830678 100644
--- a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
+++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
@@ -458,9 +458,9 @@ static int init_prog_array(struct bpf_object *obj, struct bpf_map *prog_array)
 		return -1;
 
 	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
-		snprintf(prog_name, sizeof(prog_name), "flow_dissector/%i", i);
+		snprintf(prog_name, sizeof(prog_name), "flow_dissector_%d", i);
 
-		prog = bpf_object__find_program_by_title(obj, prog_name);
+		prog = bpf_object__find_program_by_name(obj, prog_name);
 		if (!prog)
 			return -1;
 
diff --git a/tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c b/tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c
index f81db9135ae42f4463fc20a203c06168c1542685..67e86f8d8677565c9675d9d2c2cd089df8d7b4a6 100644
--- a/tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c
+++ b/tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c
@@ -38,10 +38,9 @@ static int create_perf_events(void)
 
 static void close_perf_events(void)
 {
-	int cpu = 0;
-	int fd;
+	int cpu, fd;
 
-	while (cpu++ < cpu_cnt) {
+	for (cpu = 0; cpu < cpu_cnt; cpu++) {
 		fd = pfd_array[cpu];
 		if (fd < 0)
 			break;
diff --git a/tools/testing/selftests/bpf/prog_tests/probe_user.c b/tools/testing/selftests/bpf/prog_tests/probe_user.c
index 95bd1209735878e6968b55db6988ad0fbb5c14c1..52fe157e2a90266afbf4ddfe9896bdf13014d52c 100644
--- a/tools/testing/selftests/bpf/prog_tests/probe_user.c
+++ b/tools/testing/selftests/bpf/prog_tests/probe_user.c
@@ -3,7 +3,7 @@
 
 void test_probe_user(void)
 {
-	const char *prog_name = "kprobe/__sys_connect";
+	const char *prog_name = "handle_sys_connect";
 	const char *obj_file = "./test_probe_user.o";
 	DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, );
 	int err, results_map_fd, sock_fd, duration = 0;
@@ -18,7 +18,7 @@ void test_probe_user(void)
 	if (!ASSERT_OK_PTR(obj, "obj_open_file"))
 		return;
 
-	kprobe_prog = bpf_object__find_program_by_title(obj, prog_name);
+	kprobe_prog = bpf_object__find_program_by_name(obj, prog_name);
 	if (CHECK(!kprobe_prog, "find_probe",
 		  "prog '%s' not found\n", prog_name))
 		goto cleanup;
diff --git a/tools/testing/selftests/bpf/prog_tests/reference_tracking.c b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c
index 4e91f4d6466c687179cddeb72dc4dac1be7c3ef8..873323fb18ba973c000c26ee352455cb3d9c9c69 100644
--- a/tools/testing/selftests/bpf/prog_tests/reference_tracking.c
+++ b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c
@@ -1,6 +1,21 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <test_progs.h>
 
+static void toggle_object_autoload_progs(const struct bpf_object *obj,
+					 const char *name_load)
+{
+	struct bpf_program *prog;
+
+	bpf_object__for_each_program(prog, obj) {
+		const char *name = bpf_program__name(prog);
+
+		if (!strcmp(name_load, name))
+			bpf_program__set_autoload(prog, true);
+		else
+			bpf_program__set_autoload(prog, false);
+	}
+}
+
 void test_reference_tracking(void)
 {
 	const char *file = "test_sk_lookup_kern.o";
@@ -9,44 +24,49 @@ void test_reference_tracking(void)
 		.object_name = obj_name,
 		.relaxed_maps = true,
 	);
-	struct bpf_object *obj;
+	struct bpf_object *obj_iter, *obj = NULL;
 	struct bpf_program *prog;
 	__u32 duration = 0;
 	int err = 0;
 
-	obj = bpf_object__open_file(file, &open_opts);
-	if (!ASSERT_OK_PTR(obj, "obj_open_file"))
+	obj_iter = bpf_object__open_file(file, &open_opts);
+	if (!ASSERT_OK_PTR(obj_iter, "obj_iter_open_file"))
 		return;
 
-	if (CHECK(strcmp(bpf_object__name(obj), obj_name), "obj_name",
+	if (CHECK(strcmp(bpf_object__name(obj_iter), obj_name), "obj_name",
 		  "wrong obj name '%s', expected '%s'\n",
-		  bpf_object__name(obj), obj_name))
+		  bpf_object__name(obj_iter), obj_name))
 		goto cleanup;
 
-	bpf_object__for_each_program(prog, obj) {
-		const char *title;
+	bpf_object__for_each_program(prog, obj_iter) {
+		const char *name;
 
-		/* Ignore .text sections */
-		title = bpf_program__section_name(prog);
-		if (strstr(title, ".text") != NULL)
+		name = bpf_program__name(prog);
+		if (!test__start_subtest(name))
 			continue;
 
-		if (!test__start_subtest(title))
-			continue;
+		obj = bpf_object__open_file(file, &open_opts);
+		if (!ASSERT_OK_PTR(obj, "obj_open_file"))
+			goto cleanup;
 
+		toggle_object_autoload_progs(obj, name);
 		/* Expect verifier failure if test name has 'err' */
-		if (strstr(title, "err_") != NULL) {
+		if (strncmp(name, "err_", sizeof("err_") - 1) == 0) {
 			libbpf_print_fn_t old_print_fn;
 
 			old_print_fn = libbpf_set_print(NULL);
-			err = !bpf_program__load(prog, "GPL", 0);
+			err = !bpf_object__load(obj);
 			libbpf_set_print(old_print_fn);
 		} else {
-			err = bpf_program__load(prog, "GPL", 0);
+			err = bpf_object__load(obj);
 		}
-		CHECK(err, title, "\n");
+		ASSERT_OK(err, name);
+
+		bpf_object__close(obj);
+		obj = NULL;
 	}
 
 cleanup:
 	bpf_object__close(obj);
+	bpf_object__close(obj_iter);
 }
diff --git a/tools/testing/selftests/bpf/prog_tests/sk_assign.c b/tools/testing/selftests/bpf/prog_tests/sk_assign.c
index 3a469099f30d89334ac21cf3e4b53cef798ddeda..1d272e05188eb32dd16cff4633a258b81bccaf46 100644
--- a/tools/testing/selftests/bpf/prog_tests/sk_assign.c
+++ b/tools/testing/selftests/bpf/prog_tests/sk_assign.c
@@ -48,7 +48,7 @@ configure_stack(void)
 		return false;
 	sprintf(tc_cmd, "%s %s %s %s", "tc filter add dev lo ingress bpf",
 		       "direct-action object-file ./test_sk_assign.o",
-		       "section classifier/sk_assign_test",
+		       "section tc",
 		       (env.verbosity < VERBOSE_VERY) ? " 2>/dev/null" : "verbose");
 	if (CHECK(system(tc_cmd), "BPF load failed;",
 		  "run with -vv for more info\n"))
diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c b/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c
index 51fac975b3163fd9491fd66210229cea15432f1d..bc34f7773444f9df62a96c90ac4d10ccc7f2d950 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c
@@ -2,7 +2,7 @@
 #include <test_progs.h>
 #include "cgroup_helpers.h"
 
-static int prog_attach(struct bpf_object *obj, int cgroup_fd, const char *title)
+static int prog_attach(struct bpf_object *obj, int cgroup_fd, const char *title, const char *name)
 {
 	enum bpf_attach_type attach_type;
 	enum bpf_prog_type prog_type;
@@ -15,23 +15,23 @@ static int prog_attach(struct bpf_object *obj, int cgroup_fd, const char *title)
 		return -1;
 	}
 
-	prog = bpf_object__find_program_by_title(obj, title);
+	prog = bpf_object__find_program_by_name(obj, name);
 	if (!prog) {
-		log_err("Failed to find %s BPF program", title);
+		log_err("Failed to find %s BPF program", name);
 		return -1;
 	}
 
 	err = bpf_prog_attach(bpf_program__fd(prog), cgroup_fd,
 			      attach_type, BPF_F_ALLOW_MULTI);
 	if (err) {
-		log_err("Failed to attach %s BPF program", title);
+		log_err("Failed to attach %s BPF program", name);
 		return -1;
 	}
 
 	return 0;
 }
 
-static int prog_detach(struct bpf_object *obj, int cgroup_fd, const char *title)
+static int prog_detach(struct bpf_object *obj, int cgroup_fd, const char *title, const char *name)
 {
 	enum bpf_attach_type attach_type;
 	enum bpf_prog_type prog_type;
@@ -42,7 +42,7 @@ static int prog_detach(struct bpf_object *obj, int cgroup_fd, const char *title)
 	if (err)
 		return -1;
 
-	prog = bpf_object__find_program_by_title(obj, title);
+	prog = bpf_object__find_program_by_name(obj, name);
 	if (!prog)
 		return -1;
 
@@ -89,7 +89,7 @@ static int run_getsockopt_test(struct bpf_object *obj, int cg_parent,
 	 * - child:  0x80 -> 0x90
 	 */
 
-	err = prog_attach(obj, cg_child, "cgroup/getsockopt/child");
+	err = prog_attach(obj, cg_child, "cgroup/getsockopt", "_getsockopt_child");
 	if (err)
 		goto detach;
 
@@ -113,7 +113,7 @@ static int run_getsockopt_test(struct bpf_object *obj, int cg_parent,
 	 * - parent: 0x90 -> 0xA0
 	 */
 
-	err = prog_attach(obj, cg_parent, "cgroup/getsockopt/parent");
+	err = prog_attach(obj, cg_parent, "cgroup/getsockopt", "_getsockopt_parent");
 	if (err)
 		goto detach;
 
@@ -157,7 +157,7 @@ static int run_getsockopt_test(struct bpf_object *obj, int cg_parent,
 	 * - parent: unexpected 0x40, EPERM
 	 */
 
-	err = prog_detach(obj, cg_child, "cgroup/getsockopt/child");
+	err = prog_detach(obj, cg_child, "cgroup/getsockopt", "_getsockopt_child");
 	if (err) {
 		log_err("Failed to detach child program");
 		goto detach;
@@ -198,8 +198,8 @@ static int run_getsockopt_test(struct bpf_object *obj, int cg_parent,
 	}
 
 detach:
-	prog_detach(obj, cg_child, "cgroup/getsockopt/child");
-	prog_detach(obj, cg_parent, "cgroup/getsockopt/parent");
+	prog_detach(obj, cg_child, "cgroup/getsockopt", "_getsockopt_child");
+	prog_detach(obj, cg_parent, "cgroup/getsockopt", "_getsockopt_parent");
 
 	return err;
 }
@@ -236,7 +236,7 @@ static int run_setsockopt_test(struct bpf_object *obj, int cg_parent,
 
 	/* Attach child program and make sure it adds 0x10. */
 
-	err = prog_attach(obj, cg_child, "cgroup/setsockopt");
+	err = prog_attach(obj, cg_child, "cgroup/setsockopt", "_setsockopt");
 	if (err)
 		goto detach;
 
@@ -263,7 +263,7 @@ static int run_setsockopt_test(struct bpf_object *obj, int cg_parent,
 
 	/* Attach parent program and make sure it adds another 0x10. */
 
-	err = prog_attach(obj, cg_parent, "cgroup/setsockopt");
+	err = prog_attach(obj, cg_parent, "cgroup/setsockopt", "_setsockopt");
 	if (err)
 		goto detach;
 
@@ -289,8 +289,8 @@ static int run_setsockopt_test(struct bpf_object *obj, int cg_parent,
 	}
 
 detach:
-	prog_detach(obj, cg_child, "cgroup/setsockopt");
-	prog_detach(obj, cg_parent, "cgroup/setsockopt");
+	prog_detach(obj, cg_child, "cgroup/setsockopt", "_setsockopt");
+	prog_detach(obj, cg_parent, "cgroup/setsockopt", "_setsockopt");
 
 	return err;
 }
diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
index 7bf3a7a97d7bd50737397b00db73d13db0c5d426..9825f1f7bfcc20218320686f54bfd0609976949d 100644
--- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c
+++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
@@ -21,7 +21,7 @@ static void test_tailcall_1(void)
 	if (CHECK_FAIL(err))
 		return;
 
-	prog = bpf_object__find_program_by_title(obj, "classifier");
+	prog = bpf_object__find_program_by_name(obj, "entry");
 	if (CHECK_FAIL(!prog))
 		goto out;
 
@@ -38,9 +38,9 @@ static void test_tailcall_1(void)
 		goto out;
 
 	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
-		snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
+		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
 
-		prog = bpf_object__find_program_by_title(obj, prog_name);
+		prog = bpf_object__find_program_by_name(obj, prog_name);
 		if (CHECK_FAIL(!prog))
 			goto out;
 
@@ -70,9 +70,9 @@ static void test_tailcall_1(void)
 	      err, errno, retval);
 
 	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
-		snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
+		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
 
-		prog = bpf_object__find_program_by_title(obj, prog_name);
+		prog = bpf_object__find_program_by_name(obj, prog_name);
 		if (CHECK_FAIL(!prog))
 			goto out;
 
@@ -92,9 +92,9 @@ static void test_tailcall_1(void)
 
 	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
 		j = bpf_map__def(prog_array)->max_entries - 1 - i;
-		snprintf(prog_name, sizeof(prog_name), "classifier/%i", j);
+		snprintf(prog_name, sizeof(prog_name), "classifier_%d", j);
 
-		prog = bpf_object__find_program_by_title(obj, prog_name);
+		prog = bpf_object__find_program_by_name(obj, prog_name);
 		if (CHECK_FAIL(!prog))
 			goto out;
 
@@ -159,7 +159,7 @@ static void test_tailcall_2(void)
 	if (CHECK_FAIL(err))
 		return;
 
-	prog = bpf_object__find_program_by_title(obj, "classifier");
+	prog = bpf_object__find_program_by_name(obj, "entry");
 	if (CHECK_FAIL(!prog))
 		goto out;
 
@@ -176,9 +176,9 @@ static void test_tailcall_2(void)
 		goto out;
 
 	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
-		snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
+		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
 
-		prog = bpf_object__find_program_by_title(obj, prog_name);
+		prog = bpf_object__find_program_by_name(obj, prog_name);
 		if (CHECK_FAIL(!prog))
 			goto out;
 
@@ -233,7 +233,7 @@ static void test_tailcall_count(const char *which)
 	if (CHECK_FAIL(err))
 		return;
 
-	prog = bpf_object__find_program_by_title(obj, "classifier");
+	prog = bpf_object__find_program_by_name(obj, "entry");
 	if (CHECK_FAIL(!prog))
 		goto out;
 
@@ -249,7 +249,7 @@ static void test_tailcall_count(const char *which)
 	if (CHECK_FAIL(map_fd < 0))
 		goto out;
 
-	prog = bpf_object__find_program_by_title(obj, "classifier/0");
+	prog = bpf_object__find_program_by_name(obj, "classifier_0");
 	if (CHECK_FAIL(!prog))
 		goto out;
 
@@ -329,7 +329,7 @@ static void test_tailcall_4(void)
 	if (CHECK_FAIL(err))
 		return;
 
-	prog = bpf_object__find_program_by_title(obj, "classifier");
+	prog = bpf_object__find_program_by_name(obj, "entry");
 	if (CHECK_FAIL(!prog))
 		goto out;
 
@@ -354,9 +354,9 @@ static void test_tailcall_4(void)
 		return;
 
 	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
-		snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
+		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
 
-		prog = bpf_object__find_program_by_title(obj, prog_name);
+		prog = bpf_object__find_program_by_name(obj, prog_name);
 		if (CHECK_FAIL(!prog))
 			goto out;
 
@@ -417,7 +417,7 @@ static void test_tailcall_5(void)
 	if (CHECK_FAIL(err))
 		return;
 
-	prog = bpf_object__find_program_by_title(obj, "classifier");
+	prog = bpf_object__find_program_by_name(obj, "entry");
 	if (CHECK_FAIL(!prog))
 		goto out;
 
@@ -442,9 +442,9 @@ static void test_tailcall_5(void)
 		return;
 
 	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
-		snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
+		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
 
-		prog = bpf_object__find_program_by_title(obj, prog_name);
+		prog = bpf_object__find_program_by_name(obj, prog_name);
 		if (CHECK_FAIL(!prog))
 			goto out;
 
@@ -503,7 +503,7 @@ static void test_tailcall_bpf2bpf_1(void)
 	if (CHECK_FAIL(err))
 		return;
 
-	prog = bpf_object__find_program_by_title(obj, "classifier");
+	prog = bpf_object__find_program_by_name(obj, "entry");
 	if (CHECK_FAIL(!prog))
 		goto out;
 
@@ -521,9 +521,9 @@ static void test_tailcall_bpf2bpf_1(void)
 
 	/* nop -> jmp */
 	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
-		snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
+		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
 
-		prog = bpf_object__find_program_by_title(obj, prog_name);
+		prog = bpf_object__find_program_by_name(obj, prog_name);
 		if (CHECK_FAIL(!prog))
 			goto out;
 
@@ -587,7 +587,7 @@ static void test_tailcall_bpf2bpf_2(void)
 	if (CHECK_FAIL(err))
 		return;
 
-	prog = bpf_object__find_program_by_title(obj, "classifier");
+	prog = bpf_object__find_program_by_name(obj, "entry");
 	if (CHECK_FAIL(!prog))
 		goto out;
 
@@ -603,7 +603,7 @@ static void test_tailcall_bpf2bpf_2(void)
 	if (CHECK_FAIL(map_fd < 0))
 		goto out;
 
-	prog = bpf_object__find_program_by_title(obj, "classifier/0");
+	prog = bpf_object__find_program_by_name(obj, "classifier_0");
 	if (CHECK_FAIL(!prog))
 		goto out;
 
@@ -665,7 +665,7 @@ static void test_tailcall_bpf2bpf_3(void)
 	if (CHECK_FAIL(err))
 		return;
 
-	prog = bpf_object__find_program_by_title(obj, "classifier");
+	prog = bpf_object__find_program_by_name(obj, "entry");
 	if (CHECK_FAIL(!prog))
 		goto out;
 
@@ -682,9 +682,9 @@ static void test_tailcall_bpf2bpf_3(void)
 		goto out;
 
 	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
-		snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
+		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
 
-		prog = bpf_object__find_program_by_title(obj, prog_name);
+		prog = bpf_object__find_program_by_name(obj, prog_name);
 		if (CHECK_FAIL(!prog))
 			goto out;
 
@@ -762,7 +762,7 @@ static void test_tailcall_bpf2bpf_4(bool noise)
 	if (CHECK_FAIL(err))
 		return;
 
-	prog = bpf_object__find_program_by_title(obj, "classifier");
+	prog = bpf_object__find_program_by_name(obj, "entry");
 	if (CHECK_FAIL(!prog))
 		goto out;
 
@@ -779,9 +779,9 @@ static void test_tailcall_bpf2bpf_4(bool noise)
 		goto out;
 
 	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
-		snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
+		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
 
-		prog = bpf_object__find_program_by_title(obj, prog_name);
+		prog = bpf_object__find_program_by_name(obj, prog_name);
 		if (CHECK_FAIL(!prog))
 			goto out;
 
diff --git a/tools/testing/selftests/bpf/prog_tests/trace_printk.c b/tools/testing/selftests/bpf/prog_tests/trace_printk.c
index d39bc00feb45e672f5d23858eac487c653c74e7f..e47835f0a6748f9a61c260359818a6e419ff5708 100644
--- a/tools/testing/selftests/bpf/prog_tests/trace_printk.c
+++ b/tools/testing/selftests/bpf/prog_tests/trace_printk.c
@@ -10,7 +10,7 @@
 
 void test_trace_printk(void)
 {
-	int err, iter = 0, duration = 0, found = 0;
+	int err = 0, iter = 0, found = 0;
 	struct trace_printk__bss *bss;
 	struct trace_printk *skel;
 	char *buf = NULL;
@@ -18,25 +18,24 @@ void test_trace_printk(void)
 	size_t buflen;
 
 	skel = trace_printk__open();
-	if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
+	if (!ASSERT_OK_PTR(skel, "trace_printk__open"))
 		return;
 
-	ASSERT_EQ(skel->rodata->fmt[0], 'T', "invalid printk fmt string");
+	ASSERT_EQ(skel->rodata->fmt[0], 'T', "skel->rodata->fmt[0]");
 	skel->rodata->fmt[0] = 't';
 
 	err = trace_printk__load(skel);
-	if (CHECK(err, "skel_load", "failed to load skeleton: %d\n", err))
+	if (!ASSERT_OK(err, "trace_printk__load"))
 		goto cleanup;
 
 	bss = skel->bss;
 
 	err = trace_printk__attach(skel);
-	if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
+	if (!ASSERT_OK(err, "trace_printk__attach"))
 		goto cleanup;
 
 	fp = fopen(TRACEBUF, "r");
-	if (CHECK(fp == NULL, "could not open trace buffer",
-		  "error %d opening %s", errno, TRACEBUF))
+	if (!ASSERT_OK_PTR(fp, "fopen(TRACEBUF)"))
 		goto cleanup;
 
 	/* We do not want to wait forever if this test fails... */
@@ -46,14 +45,10 @@ void test_trace_printk(void)
 	usleep(1);
 	trace_printk__detach(skel);
 
-	if (CHECK(bss->trace_printk_ran == 0,
-		  "bpf_trace_printk never ran",
-		  "ran == %d", bss->trace_printk_ran))
+	if (!ASSERT_GT(bss->trace_printk_ran, 0, "bss->trace_printk_ran"))
 		goto cleanup;
 
-	if (CHECK(bss->trace_printk_ret <= 0,
-		  "bpf_trace_printk returned <= 0 value",
-		  "got %d", bss->trace_printk_ret))
+	if (!ASSERT_GT(bss->trace_printk_ret, 0, "bss->trace_printk_ret"))
 		goto cleanup;
 
 	/* verify our search string is in the trace buffer */
@@ -66,8 +61,7 @@ void test_trace_printk(void)
 			break;
 	}
 
-	if (CHECK(!found, "message from bpf_trace_printk not found",
-		  "no instance of %s in %s", SEARCHMSG, TRACEBUF))
+	if (!ASSERT_EQ(found, bss->trace_printk_ran, "found"))
 		goto cleanup;
 
 cleanup:
diff --git a/tools/testing/selftests/bpf/prog_tests/trace_vprintk.c b/tools/testing/selftests/bpf/prog_tests/trace_vprintk.c
new file mode 100644
index 0000000000000000000000000000000000000000..61a24e62e1a0fa5b30b7ee8c23b9fb0958b14eca
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/trace_vprintk.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include <test_progs.h>
+
+#include "trace_vprintk.lskel.h"
+
+#define TRACEBUF	"/sys/kernel/debug/tracing/trace_pipe"
+#define SEARCHMSG	"1,2,3,4,5,6,7,8,9,10"
+
+void test_trace_vprintk(void)
+{
+	int err = 0, iter = 0, found = 0;
+	struct trace_vprintk__bss *bss;
+	struct trace_vprintk *skel;
+	char *buf = NULL;
+	FILE *fp = NULL;
+	size_t buflen;
+
+	skel = trace_vprintk__open_and_load();
+	if (!ASSERT_OK_PTR(skel, "trace_vprintk__open_and_load"))
+		goto cleanup;
+
+	bss = skel->bss;
+
+	err = trace_vprintk__attach(skel);
+	if (!ASSERT_OK(err, "trace_vprintk__attach"))
+		goto cleanup;
+
+	fp = fopen(TRACEBUF, "r");
+	if (!ASSERT_OK_PTR(fp, "fopen(TRACEBUF)"))
+		goto cleanup;
+
+	/* We do not want to wait forever if this test fails... */
+	fcntl(fileno(fp), F_SETFL, O_NONBLOCK);
+
+	/* wait for tracepoint to trigger */
+	usleep(1);
+	trace_vprintk__detach(skel);
+
+	if (!ASSERT_GT(bss->trace_vprintk_ran, 0, "bss->trace_vprintk_ran"))
+		goto cleanup;
+
+	if (!ASSERT_GT(bss->trace_vprintk_ret, 0, "bss->trace_vprintk_ret"))
+		goto cleanup;
+
+	/* verify our search string is in the trace buffer */
+	while (getline(&buf, &buflen, fp) >= 0 || errno == EAGAIN) {
+		if (strstr(buf, SEARCHMSG) != NULL)
+			found++;
+		if (found == bss->trace_vprintk_ran)
+			break;
+		if (++iter > 1000)
+			break;
+	}
+
+	if (!ASSERT_EQ(found, bss->trace_vprintk_ran, "found"))
+		goto cleanup;
+
+	if (!ASSERT_LT(bss->null_data_vprintk_ret, 0, "bss->null_data_vprintk_ret"))
+		goto cleanup;
+
+cleanup:
+	trace_vprintk__destroy(skel);
+	free(buf);
+	if (fp)
+		fclose(fp);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdpwall.c b/tools/testing/selftests/bpf/prog_tests/xdpwall.c
new file mode 100644
index 0000000000000000000000000000000000000000..f3927829a55a6cb71a6a3e180fc16bdb9044b403
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/xdpwall.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "test_progs.h"
+#include "xdpwall.skel.h"
+
+void test_xdpwall(void)
+{
+	struct xdpwall *skel;
+
+	skel = xdpwall__open_and_load();
+	ASSERT_OK_PTR(skel, "Does LLMV have https://reviews.llvm.org/D109073?");
+
+	xdpwall__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_flow.c b/tools/testing/selftests/bpf/progs/bpf_flow.c
index 95a5a0778ed719cd0e365c8baaa3670ece674304..f266c757b3df3368f4fad8ed8ba175fe875721c4 100644
--- a/tools/testing/selftests/bpf/progs/bpf_flow.c
+++ b/tools/testing/selftests/bpf/progs/bpf_flow.c
@@ -19,9 +19,8 @@
 #include <bpf/bpf_helpers.h>
 #include <bpf/bpf_endian.h>
 
-int _version SEC("version") = 1;
 #define PROG(F) PROG_(F, _##F)
-#define PROG_(NUM, NAME) SEC("flow_dissector/"#NUM) int bpf_func##NAME
+#define PROG_(NUM, NAME) SEC("flow_dissector") int flow_dissector_##NUM
 
 /* These are the identifiers of the BPF programs that will be used in tail
  * calls. Name is limited to 16 characters, with the terminating character and
diff --git a/tools/testing/selftests/bpf/progs/cg_storage_multi_isolated.c b/tools/testing/selftests/bpf/progs/cg_storage_multi_isolated.c
index a25373002055e7475eb05e0e646f23bdffcb4c32..3f81ff92184cf170f7ca86044ba809c5f8cadc2a 100644
--- a/tools/testing/selftests/bpf/progs/cg_storage_multi_isolated.c
+++ b/tools/testing/selftests/bpf/progs/cg_storage_multi_isolated.c
@@ -20,7 +20,7 @@ struct {
 
 __u32 invocations = 0;
 
-SEC("cgroup_skb/egress/1")
+SEC("cgroup_skb/egress")
 int egress1(struct __sk_buff *skb)
 {
 	struct cgroup_value *ptr_cg_storage =
@@ -32,7 +32,7 @@ int egress1(struct __sk_buff *skb)
 	return 1;
 }
 
-SEC("cgroup_skb/egress/2")
+SEC("cgroup_skb/egress")
 int egress2(struct __sk_buff *skb)
 {
 	struct cgroup_value *ptr_cg_storage =
diff --git a/tools/testing/selftests/bpf/progs/cg_storage_multi_shared.c b/tools/testing/selftests/bpf/progs/cg_storage_multi_shared.c
index a149f33bc533797e459a370b1ebf23f8b6479e1c..d662db27fe4a1e7a81043ba347adb7bf0e3e94a9 100644
--- a/tools/testing/selftests/bpf/progs/cg_storage_multi_shared.c
+++ b/tools/testing/selftests/bpf/progs/cg_storage_multi_shared.c
@@ -20,7 +20,7 @@ struct {
 
 __u32 invocations = 0;
 
-SEC("cgroup_skb/egress/1")
+SEC("cgroup_skb/egress")
 int egress1(struct __sk_buff *skb)
 {
 	struct cgroup_value *ptr_cg_storage =
@@ -32,7 +32,7 @@ int egress1(struct __sk_buff *skb)
 	return 1;
 }
 
-SEC("cgroup_skb/egress/2")
+SEC("cgroup_skb/egress")
 int egress2(struct __sk_buff *skb)
 {
 	struct cgroup_value *ptr_cg_storage =
diff --git a/tools/testing/selftests/bpf/progs/for_each_array_map_elem.c b/tools/testing/selftests/bpf/progs/for_each_array_map_elem.c
index 75e8e1069fe73a1d7fb621e445e97c9294c54867..df918b2469da04b6acc45624e9ac063f1923d039 100644
--- a/tools/testing/selftests/bpf/progs/for_each_array_map_elem.c
+++ b/tools/testing/selftests/bpf/progs/for_each_array_map_elem.c
@@ -47,7 +47,7 @@ check_percpu_elem(struct bpf_map *map, __u32 *key, __u64 *val,
 
 u32 arraymap_output = 0;
 
-SEC("classifier")
+SEC("tc")
 int test_pkt_access(struct __sk_buff *skb)
 {
 	struct callback_ctx data;
diff --git a/tools/testing/selftests/bpf/progs/for_each_hash_map_elem.c b/tools/testing/selftests/bpf/progs/for_each_hash_map_elem.c
index 913dd91aafffc23ae6cb49ce3833722fe55ab05a..276994d5c0c71076fbbd5efafc25036e76105919 100644
--- a/tools/testing/selftests/bpf/progs/for_each_hash_map_elem.c
+++ b/tools/testing/selftests/bpf/progs/for_each_hash_map_elem.c
@@ -78,7 +78,7 @@ int hashmap_output = 0;
 int hashmap_elems = 0;
 int percpu_map_elems = 0;
 
-SEC("classifier")
+SEC("tc")
 int test_pkt_access(struct __sk_buff *skb)
 {
 	struct callback_ctx data;
diff --git a/tools/testing/selftests/bpf/progs/kfree_skb.c b/tools/testing/selftests/bpf/progs/kfree_skb.c
index 55e283050cab429521027949b5969e5263f00cd8..7236da72ce8055f9a0165d3b4334a04d17d110b8 100644
--- a/tools/testing/selftests/bpf/progs/kfree_skb.c
+++ b/tools/testing/selftests/bpf/progs/kfree_skb.c
@@ -9,8 +9,8 @@
 char _license[] SEC("license") = "GPL";
 struct {
 	__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
-	__uint(key_size, sizeof(int));
-	__uint(value_size, sizeof(int));
+	__type(key, int);
+	__type(value, int);
 } perf_buf_map SEC(".maps");
 
 #define _(P) (__builtin_preserve_access_index(P))
diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test.c b/tools/testing/selftests/bpf/progs/kfunc_call_test.c
index 470f8723e463d979875810103044fab69d616613..8a8cf59017aac01662c260d2cfe40fbb9f3d1fda 100644
--- a/tools/testing/selftests/bpf/progs/kfunc_call_test.c
+++ b/tools/testing/selftests/bpf/progs/kfunc_call_test.c
@@ -8,7 +8,7 @@ extern int bpf_kfunc_call_test2(struct sock *sk, __u32 a, __u32 b) __ksym;
 extern __u64 bpf_kfunc_call_test1(struct sock *sk, __u32 a, __u64 b,
 				  __u32 c, __u64 d) __ksym;
 
-SEC("classifier")
+SEC("tc")
 int kfunc_call_test2(struct __sk_buff *skb)
 {
 	struct bpf_sock *sk = skb->sk;
@@ -23,7 +23,7 @@ int kfunc_call_test2(struct __sk_buff *skb)
 	return bpf_kfunc_call_test2((struct sock *)sk, 1, 2);
 }
 
-SEC("classifier")
+SEC("tc")
 int kfunc_call_test1(struct __sk_buff *skb)
 {
 	struct bpf_sock *sk = skb->sk;
diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c b/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c
index 5fbd9e232d44b0d95bb8a80b02800838a3ef85e2..c1fdecabeabfcb742e41b2f968d3b8a0d615a311 100644
--- a/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c
+++ b/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c
@@ -33,7 +33,7 @@ int __noinline f1(struct __sk_buff *skb)
 	return (__u32)bpf_kfunc_call_test1((struct sock *)sk, 1, 2, 3, 4);
 }
 
-SEC("classifier")
+SEC("tc")
 int kfunc_call_test1(struct __sk_buff *skb)
 {
 	return f1(skb);
diff --git a/tools/testing/selftests/bpf/progs/perf_event_stackmap.c b/tools/testing/selftests/bpf/progs/perf_event_stackmap.c
index 25467d13c356a610b26cb694f9a96f8cc43ee637..b3fcb5274ee090eebae4e8b7292480106c69bb09 100644
--- a/tools/testing/selftests/bpf/progs/perf_event_stackmap.c
+++ b/tools/testing/selftests/bpf/progs/perf_event_stackmap.c
@@ -11,8 +11,8 @@ typedef __u64 stack_trace_t[PERF_MAX_STACK_DEPTH];
 struct {
 	__uint(type, BPF_MAP_TYPE_STACK_TRACE);
 	__uint(max_entries, 16384);
-	__uint(key_size, sizeof(__u32));
-	__uint(value_size, sizeof(stack_trace_t));
+	__type(key, __u32);
+	__type(value, stack_trace_t);
 } stackmap SEC(".maps");
 
 struct {
diff --git a/tools/testing/selftests/bpf/progs/skb_pkt_end.c b/tools/testing/selftests/bpf/progs/skb_pkt_end.c
index 7f2eaa2f89f810801c63bd3d2af5f47c40306552..992b7861003a210e94ab572e1720f32d11190c90 100644
--- a/tools/testing/selftests/bpf/progs/skb_pkt_end.c
+++ b/tools/testing/selftests/bpf/progs/skb_pkt_end.c
@@ -25,7 +25,7 @@ static INLINE struct iphdr *get_iphdr(struct __sk_buff *skb)
 	return ip;
 }
 
-SEC("classifier/cls")
+SEC("tc")
 int main_prog(struct __sk_buff *skb)
 {
 	struct iphdr *ip = NULL;
diff --git a/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c b/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c
index 4797dc9850645f5b01895f82cc184b99c30a6c20..73872c535cbb0c850c9563c34d6b2297b497262b 100644
--- a/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c
+++ b/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c
@@ -7,22 +7,22 @@ int _version SEC("version") = 1;
 struct {
 	__uint(type, BPF_MAP_TYPE_SOCKMAP);
 	__uint(max_entries, 20);
-	__uint(key_size, sizeof(int));
-	__uint(value_size, sizeof(int));
+	__type(key, int);
+	__type(value, int);
 } sock_map_rx SEC(".maps");
 
 struct {
 	__uint(type, BPF_MAP_TYPE_SOCKMAP);
 	__uint(max_entries, 20);
-	__uint(key_size, sizeof(int));
-	__uint(value_size, sizeof(int));
+	__type(key, int);
+	__type(value, int);
 } sock_map_tx SEC(".maps");
 
 struct {
 	__uint(type, BPF_MAP_TYPE_SOCKMAP);
 	__uint(max_entries, 20);
-	__uint(key_size, sizeof(int));
-	__uint(value_size, sizeof(int));
+	__type(key, int);
+	__type(value, int);
 } sock_map_msg SEC(".maps");
 
 struct {
diff --git a/tools/testing/selftests/bpf/progs/sockopt_multi.c b/tools/testing/selftests/bpf/progs/sockopt_multi.c
index 9d8c212dde9f5f8bf60af4fee88361ef413d9986..177a59069dae6f740c16a13c060e28990d1bdbc1 100644
--- a/tools/testing/selftests/bpf/progs/sockopt_multi.c
+++ b/tools/testing/selftests/bpf/progs/sockopt_multi.c
@@ -4,9 +4,8 @@
 #include <bpf/bpf_helpers.h>
 
 char _license[] SEC("license") = "GPL";
-__u32 _version SEC("version") = 1;
 
-SEC("cgroup/getsockopt/child")
+SEC("cgroup/getsockopt")
 int _getsockopt_child(struct bpf_sockopt *ctx)
 {
 	__u8 *optval_end = ctx->optval_end;
@@ -29,7 +28,7 @@ int _getsockopt_child(struct bpf_sockopt *ctx)
 	return 1;
 }
 
-SEC("cgroup/getsockopt/parent")
+SEC("cgroup/getsockopt")
 int _getsockopt_parent(struct bpf_sockopt *ctx)
 {
 	__u8 *optval_end = ctx->optval_end;
diff --git a/tools/testing/selftests/bpf/progs/tailcall1.c b/tools/testing/selftests/bpf/progs/tailcall1.c
index 7115bcefbe8ab61648dec48a586b54926f4b27e0..8159a0b4a69a7c87d680ddb4ad63109b064a8f9b 100644
--- a/tools/testing/selftests/bpf/progs/tailcall1.c
+++ b/tools/testing/selftests/bpf/progs/tailcall1.c
@@ -11,8 +11,8 @@ struct {
 } jmp_table SEC(".maps");
 
 #define TAIL_FUNC(x) 				\
-	SEC("classifier/" #x)			\
-	int bpf_func_##x(struct __sk_buff *skb)	\
+	SEC("tc")				\
+	int classifier_##x(struct __sk_buff *skb)	\
 	{					\
 		return x;			\
 	}
@@ -20,7 +20,7 @@ TAIL_FUNC(0)
 TAIL_FUNC(1)
 TAIL_FUNC(2)
 
-SEC("classifier")
+SEC("tc")
 int entry(struct __sk_buff *skb)
 {
 	/* Multiple locations to make sure we patch
@@ -45,4 +45,3 @@ int entry(struct __sk_buff *skb)
 }
 
 char __license[] SEC("license") = "GPL";
-int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall2.c b/tools/testing/selftests/bpf/progs/tailcall2.c
index 0431e4fe7efd7e703c3775d17b737f69fbaad858..a5ff53e6170202f9cfd0c04b3953f2a5b48eaa30 100644
--- a/tools/testing/selftests/bpf/progs/tailcall2.c
+++ b/tools/testing/selftests/bpf/progs/tailcall2.c
@@ -10,41 +10,41 @@ struct {
 	__uint(value_size, sizeof(__u32));
 } jmp_table SEC(".maps");
 
-SEC("classifier/0")
-int bpf_func_0(struct __sk_buff *skb)
+SEC("tc")
+int classifier_0(struct __sk_buff *skb)
 {
 	bpf_tail_call_static(skb, &jmp_table, 1);
 	return 0;
 }
 
-SEC("classifier/1")
-int bpf_func_1(struct __sk_buff *skb)
+SEC("tc")
+int classifier_1(struct __sk_buff *skb)
 {
 	bpf_tail_call_static(skb, &jmp_table, 2);
 	return 1;
 }
 
-SEC("classifier/2")
-int bpf_func_2(struct __sk_buff *skb)
+SEC("tc")
+int classifier_2(struct __sk_buff *skb)
 {
 	return 2;
 }
 
-SEC("classifier/3")
-int bpf_func_3(struct __sk_buff *skb)
+SEC("tc")
+int classifier_3(struct __sk_buff *skb)
 {
 	bpf_tail_call_static(skb, &jmp_table, 4);
 	return 3;
 }
 
-SEC("classifier/4")
-int bpf_func_4(struct __sk_buff *skb)
+SEC("tc")
+int classifier_4(struct __sk_buff *skb)
 {
 	bpf_tail_call_static(skb, &jmp_table, 3);
 	return 4;
 }
 
-SEC("classifier")
+SEC("tc")
 int entry(struct __sk_buff *skb)
 {
 	bpf_tail_call_static(skb, &jmp_table, 0);
@@ -56,4 +56,3 @@ int entry(struct __sk_buff *skb)
 }
 
 char __license[] SEC("license") = "GPL";
-int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall3.c b/tools/testing/selftests/bpf/progs/tailcall3.c
index 910858fe078a8ce3b96dfc41d3ae2c73e83cb08d..f60bcd7b8d4b5bfd0840a03393ce9d351824925e 100644
--- a/tools/testing/selftests/bpf/progs/tailcall3.c
+++ b/tools/testing/selftests/bpf/progs/tailcall3.c
@@ -12,15 +12,15 @@ struct {
 
 int count = 0;
 
-SEC("classifier/0")
-int bpf_func_0(struct __sk_buff *skb)
+SEC("tc")
+int classifier_0(struct __sk_buff *skb)
 {
 	count++;
 	bpf_tail_call_static(skb, &jmp_table, 0);
 	return 1;
 }
 
-SEC("classifier")
+SEC("tc")
 int entry(struct __sk_buff *skb)
 {
 	bpf_tail_call_static(skb, &jmp_table, 0);
@@ -28,4 +28,3 @@ int entry(struct __sk_buff *skb)
 }
 
 char __license[] SEC("license") = "GPL";
-int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall4.c b/tools/testing/selftests/bpf/progs/tailcall4.c
index bd4be135c39d9314330bc80758d77f77b795cf55..a56bbc2313ca10257d2bd78689d6907364ec7bad 100644
--- a/tools/testing/selftests/bpf/progs/tailcall4.c
+++ b/tools/testing/selftests/bpf/progs/tailcall4.c
@@ -13,8 +13,8 @@ struct {
 int selector = 0;
 
 #define TAIL_FUNC(x)				\
-	SEC("classifier/" #x)			\
-	int bpf_func_##x(struct __sk_buff *skb)	\
+	SEC("tc")				\
+	int classifier_##x(struct __sk_buff *skb)	\
 	{					\
 		return x;			\
 	}
@@ -22,7 +22,7 @@ TAIL_FUNC(0)
 TAIL_FUNC(1)
 TAIL_FUNC(2)
 
-SEC("classifier")
+SEC("tc")
 int entry(struct __sk_buff *skb)
 {
 	bpf_tail_call(skb, &jmp_table, selector);
@@ -30,4 +30,3 @@ int entry(struct __sk_buff *skb)
 }
 
 char __license[] SEC("license") = "GPL";
-int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall5.c b/tools/testing/selftests/bpf/progs/tailcall5.c
index adf30a33064ef5c0aee71c533adf53f198c2b2e4..8d03496eb6ca0bff5f2b7facce7a8ce145c24c68 100644
--- a/tools/testing/selftests/bpf/progs/tailcall5.c
+++ b/tools/testing/selftests/bpf/progs/tailcall5.c
@@ -13,8 +13,8 @@ struct {
 int selector = 0;
 
 #define TAIL_FUNC(x)				\
-	SEC("classifier/" #x)			\
-	int bpf_func_##x(struct __sk_buff *skb)	\
+	SEC("tc")				\
+	int classifier_##x(struct __sk_buff *skb)	\
 	{					\
 		return x;			\
 	}
@@ -22,7 +22,7 @@ TAIL_FUNC(0)
 TAIL_FUNC(1)
 TAIL_FUNC(2)
 
-SEC("classifier")
+SEC("tc")
 int entry(struct __sk_buff *skb)
 {
 	int idx = 0;
@@ -37,4 +37,3 @@ int entry(struct __sk_buff *skb)
 }
 
 char __license[] SEC("license") = "GPL";
-int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall6.c b/tools/testing/selftests/bpf/progs/tailcall6.c
index 0f4a811cc0288651bab77763371f008ecf5ca821..d77b8abd62f3bf8ebd25f196d875f1bbf2b3b149 100644
--- a/tools/testing/selftests/bpf/progs/tailcall6.c
+++ b/tools/testing/selftests/bpf/progs/tailcall6.c
@@ -12,8 +12,8 @@ struct {
 
 int count, which;
 
-SEC("classifier/0")
-int bpf_func_0(struct __sk_buff *skb)
+SEC("tc")
+int classifier_0(struct __sk_buff *skb)
 {
 	count++;
 	if (__builtin_constant_p(which))
@@ -22,7 +22,7 @@ int bpf_func_0(struct __sk_buff *skb)
 	return 1;
 }
 
-SEC("classifier")
+SEC("tc")
 int entry(struct __sk_buff *skb)
 {
 	if (__builtin_constant_p(which))
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf1.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf1.c
index 0103f3dd9f02b368b680e0890c6a3efa3cd68a0c..8c91428deb90d7fbb7ab1606fc7fc861ce901c49 100644
--- a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf1.c
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf1.c
@@ -10,8 +10,8 @@ struct {
 } jmp_table SEC(".maps");
 
 #define TAIL_FUNC(x) 				\
-	SEC("classifier/" #x)			\
-	int bpf_func_##x(struct __sk_buff *skb)	\
+	SEC("tc")				\
+	int classifier_##x(struct __sk_buff *skb)	\
 	{					\
 		return x;			\
 	}
@@ -26,7 +26,7 @@ int subprog_tail(struct __sk_buff *skb)
 	return skb->len * 2;
 }
 
-SEC("classifier")
+SEC("tc")
 int entry(struct __sk_buff *skb)
 {
 	bpf_tail_call_static(skb, &jmp_table, 1);
@@ -35,4 +35,3 @@ int entry(struct __sk_buff *skb)
 }
 
 char __license[] SEC("license") = "GPL";
-int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf2.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf2.c
index 3cc4c12817b505998ce34009a4e867c70f3fd2f8..ce97d141daee15e7625d4ef2b48ec1f2469cd105 100644
--- a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf2.c
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf2.c
@@ -22,14 +22,14 @@ int subprog_tail(struct __sk_buff *skb)
 
 int count = 0;
 
-SEC("classifier/0")
-int bpf_func_0(struct __sk_buff *skb)
+SEC("tc")
+int classifier_0(struct __sk_buff *skb)
 {
 	count++;
 	return subprog_tail(skb);
 }
 
-SEC("classifier")
+SEC("tc")
 int entry(struct __sk_buff *skb)
 {
 	bpf_tail_call_static(skb, &jmp_table, 0);
@@ -38,4 +38,3 @@ int entry(struct __sk_buff *skb)
 }
 
 char __license[] SEC("license") = "GPL";
-int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf3.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf3.c
index 0d5482bea6c9b9c65c1775a9e12fe6e83626da42..7fab39a3bb12aa3d4927691bddd5f1547880c22e 100644
--- a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf3.c
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf3.c
@@ -33,23 +33,23 @@ int subprog_tail(struct __sk_buff *skb)
 	return skb->len * 2;
 }
 
-SEC("classifier/0")
-int bpf_func_0(struct __sk_buff *skb)
+SEC("tc")
+int classifier_0(struct __sk_buff *skb)
 {
 	volatile char arr[128] = {};
 
 	return subprog_tail2(skb);
 }
 
-SEC("classifier/1")
-int bpf_func_1(struct __sk_buff *skb)
+SEC("tc")
+int classifier_1(struct __sk_buff *skb)
 {
 	volatile char arr[128] = {};
 
 	return skb->len * 3;
 }
 
-SEC("classifier")
+SEC("tc")
 int entry(struct __sk_buff *skb)
 {
 	volatile char arr[128] = {};
@@ -58,4 +58,3 @@ int entry(struct __sk_buff *skb)
 }
 
 char __license[] SEC("license") = "GPL";
-int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c
index e89368a50b97959e095eec658e8e06fe4b2273b6..b67e8022d500920071693d3b511a6f92f7d85b06 100644
--- a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c
@@ -50,30 +50,29 @@ int subprog_tail(struct __sk_buff *skb)
 	return skb->len;
 }
 
-SEC("classifier/1")
-int bpf_func_1(struct __sk_buff *skb)
+SEC("tc")
+int classifier_1(struct __sk_buff *skb)
 {
 	return subprog_tail_2(skb);
 }
 
-SEC("classifier/2")
-int bpf_func_2(struct __sk_buff *skb)
+SEC("tc")
+int classifier_2(struct __sk_buff *skb)
 {
 	count++;
 	return subprog_tail_2(skb);
 }
 
-SEC("classifier/0")
-int bpf_func_0(struct __sk_buff *skb)
+SEC("tc")
+int classifier_0(struct __sk_buff *skb)
 {
 	return subprog_tail_1(skb);
 }
 
-SEC("classifier")
+SEC("tc")
 int entry(struct __sk_buff *skb)
 {
 	return subprog_tail(skb);
 }
 
 char __license[] SEC("license") = "GPL";
-int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/test_btf_map_in_map.c b/tools/testing/selftests/bpf/progs/test_btf_map_in_map.c
index c1e0c8c7c55ff4b46a307aef4f893f308c66747a..c218cf8989a9e406a62a2ab6a4bdb9791e97034d 100644
--- a/tools/testing/selftests/bpf/progs/test_btf_map_in_map.c
+++ b/tools/testing/selftests/bpf/progs/test_btf_map_in_map.c
@@ -21,8 +21,8 @@ struct inner_map_sz2 {
 struct outer_arr {
 	__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
 	__uint(max_entries, 3);
-	__uint(key_size, sizeof(int));
-	__uint(value_size, sizeof(int));
+	__type(key, int);
+	__type(value, int);
 	/* it's possible to use anonymous struct as inner map definition here */
 	__array(values, struct {
 		__uint(type, BPF_MAP_TYPE_ARRAY);
@@ -61,8 +61,8 @@ struct inner_map_sz4 {
 struct outer_arr_dyn {
 	__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
 	__uint(max_entries, 3);
-	__uint(key_size, sizeof(int));
-	__uint(value_size, sizeof(int));
+	__type(key, int);
+	__type(value, int);
 	__array(values, struct {
 		__uint(type, BPF_MAP_TYPE_ARRAY);
 		__uint(map_flags, BPF_F_INNER_MAP);
@@ -81,7 +81,7 @@ struct outer_arr_dyn {
 struct outer_hash {
 	__uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
 	__uint(max_entries, 5);
-	__uint(key_size, sizeof(int));
+	__type(key, int);
 	/* Here everything works flawlessly due to reuse of struct inner_map
 	 * and compiler will complain at the attempt to use non-inner_map
 	 * references below. This is great experience.
@@ -111,8 +111,8 @@ struct sockarr_sz2 {
 struct outer_sockarr_sz1 {
 	__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
 	__uint(max_entries, 1);
-	__uint(key_size, sizeof(int));
-	__uint(value_size, sizeof(int));
+	__type(key, int);
+	__type(value, int);
 	__array(values, struct sockarr_sz1);
 } outer_sockarr SEC(".maps") = {
 	.values = { (void *)&sockarr_sz1 },
diff --git a/tools/testing/selftests/bpf/progs/test_btf_skc_cls_ingress.c b/tools/testing/selftests/bpf/progs/test_btf_skc_cls_ingress.c
index 9a6b85dd52d29f82cb7c56c55fcb6924da0490d6..e2bea4da194bd6cfba3fe816b3a21a970ee3379f 100644
--- a/tools/testing/selftests/bpf/progs/test_btf_skc_cls_ingress.c
+++ b/tools/testing/selftests/bpf/progs/test_btf_skc_cls_ingress.c
@@ -145,7 +145,7 @@ static int handle_ip6_tcp(struct ipv6hdr *ip6h, struct __sk_buff *skb)
 	return TC_ACT_OK;
 }
 
-SEC("classifier/ingress")
+SEC("tc")
 int cls_ingress(struct __sk_buff *skb)
 {
 	struct ipv6hdr *ip6h;
diff --git a/tools/testing/selftests/bpf/progs/test_cgroup_link.c b/tools/testing/selftests/bpf/progs/test_cgroup_link.c
index 77e47b9e44469b99fc1fa5435e92529a25baaac6..4faba88e45a5a2121991a9c793bf0a862311d178 100644
--- a/tools/testing/selftests/bpf/progs/test_cgroup_link.c
+++ b/tools/testing/selftests/bpf/progs/test_cgroup_link.c
@@ -6,14 +6,14 @@
 int calls = 0;
 int alt_calls = 0;
 
-SEC("cgroup_skb/egress1")
+SEC("cgroup_skb/egress")
 int egress(struct __sk_buff *skb)
 {
 	__sync_fetch_and_add(&calls, 1);
 	return 1;
 }
 
-SEC("cgroup_skb/egress2")
+SEC("cgroup_skb/egress")
 int egress_alt(struct __sk_buff *skb)
 {
 	__sync_fetch_and_add(&alt_calls, 1);
diff --git a/tools/testing/selftests/bpf/progs/test_check_mtu.c b/tools/testing/selftests/bpf/progs/test_check_mtu.c
index 71184af57749a2aef5a1c72cc9179d86c5c8f61c..2ec1de11a3ae4aae5b349b3fada2140468d9efe5 100644
--- a/tools/testing/selftests/bpf/progs/test_check_mtu.c
+++ b/tools/testing/selftests/bpf/progs/test_check_mtu.c
@@ -153,7 +153,7 @@ int xdp_input_len_exceed(struct xdp_md *ctx)
 	return retval;
 }
 
-SEC("classifier")
+SEC("tc")
 int tc_use_helper(struct __sk_buff *ctx)
 {
 	int retval = BPF_OK; /* Expected retval on successful test */
@@ -172,7 +172,7 @@ int tc_use_helper(struct __sk_buff *ctx)
 	return retval;
 }
 
-SEC("classifier")
+SEC("tc")
 int tc_exceed_mtu(struct __sk_buff *ctx)
 {
 	__u32 ifindex = GLOBAL_USER_IFINDEX;
@@ -196,7 +196,7 @@ int tc_exceed_mtu(struct __sk_buff *ctx)
 	return retval;
 }
 
-SEC("classifier")
+SEC("tc")
 int tc_exceed_mtu_da(struct __sk_buff *ctx)
 {
 	/* SKB Direct-Access variant */
@@ -223,7 +223,7 @@ int tc_exceed_mtu_da(struct __sk_buff *ctx)
 	return retval;
 }
 
-SEC("classifier")
+SEC("tc")
 int tc_minus_delta(struct __sk_buff *ctx)
 {
 	int retval = BPF_OK; /* Expected retval on successful test */
@@ -245,7 +245,7 @@ int tc_minus_delta(struct __sk_buff *ctx)
 	return retval;
 }
 
-SEC("classifier")
+SEC("tc")
 int tc_input_len(struct __sk_buff *ctx)
 {
 	int retval = BPF_OK; /* Expected retval on successful test */
@@ -265,7 +265,7 @@ int tc_input_len(struct __sk_buff *ctx)
 	return retval;
 }
 
-SEC("classifier")
+SEC("tc")
 int tc_input_len_exceed(struct __sk_buff *ctx)
 {
 	int retval = BPF_DROP; /* Fail */
diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect.c b/tools/testing/selftests/bpf/progs/test_cls_redirect.c
index e2a5acc4785c2941214c7f1c4f3c836515335d9d..2833ad722cb75232b877a34c289e09608f75f1fc 100644
--- a/tools/testing/selftests/bpf/progs/test_cls_redirect.c
+++ b/tools/testing/selftests/bpf/progs/test_cls_redirect.c
@@ -928,7 +928,7 @@ static INLINING verdict_t process_ipv6(buf_t *pkt, metrics_t *metrics)
 	}
 }
 
-SEC("classifier/cls_redirect")
+SEC("tc")
 int cls_redirect(struct __sk_buff *skb)
 {
 	metrics_t *metrics = get_global_metrics();
diff --git a/tools/testing/selftests/bpf/progs/test_global_data.c b/tools/testing/selftests/bpf/progs/test_global_data.c
index 1319be1c54ba418582e3894c89619123c82edd1d..719e314ef3e42e2f09ebdd0fbbfc3606fa595b61 100644
--- a/tools/testing/selftests/bpf/progs/test_global_data.c
+++ b/tools/testing/selftests/bpf/progs/test_global_data.c
@@ -68,7 +68,7 @@ static struct foo struct3 = {
 		bpf_map_update_elem(&result_##map, &key, var, 0);	\
 	} while (0)
 
-SEC("classifier/static_data_load")
+SEC("tc")
 int load_static_data(struct __sk_buff *skb)
 {
 	static const __u64 bar = ~0;
diff --git a/tools/testing/selftests/bpf/progs/test_global_func1.c b/tools/testing/selftests/bpf/progs/test_global_func1.c
index 880260f6d536615da57432fec171912cfd9e228b..7b42dad187b8941436ad14cdc03909f5f156974d 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func1.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func1.c
@@ -38,7 +38,7 @@ int f3(int val, struct __sk_buff *skb, int var)
 	return skb->ifindex * val * var;
 }
 
-SEC("classifier/test")
+SEC("tc")
 int test_cls(struct __sk_buff *skb)
 {
 	return f0(1, skb) + f1(skb) + f2(2, skb) + f3(3, skb, 4);
diff --git a/tools/testing/selftests/bpf/progs/test_global_func3.c b/tools/testing/selftests/bpf/progs/test_global_func3.c
index 86f0ecb304fcfe907b844f6be44db827d287bb5d..01bf8275dfd6401137ae9edba6c9991c9a73a3c3 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func3.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func3.c
@@ -54,7 +54,7 @@ int f8(struct __sk_buff *skb)
 }
 #endif
 
-SEC("classifier/test")
+SEC("tc")
 int test_cls(struct __sk_buff *skb)
 {
 #ifndef NO_FN8
diff --git a/tools/testing/selftests/bpf/progs/test_global_func5.c b/tools/testing/selftests/bpf/progs/test_global_func5.c
index 260c25b827ef949d40c80c0e3b79186ed10454ea..9248d03e0d06fb22f192993e4dd29abcce0d10d7 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func5.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func5.c
@@ -24,7 +24,7 @@ int f3(int val, struct __sk_buff *skb)
 	return skb->ifindex * val;
 }
 
-SEC("classifier/test")
+SEC("tc")
 int test_cls(struct __sk_buff *skb)
 {
 	return f1(skb) + f2(2, skb) + f3(3, skb);
diff --git a/tools/testing/selftests/bpf/progs/test_global_func6.c b/tools/testing/selftests/bpf/progs/test_global_func6.c
index 69e19c64e10b6b9d07c3675c5969a010ff31ffcf..af8c78bdfb257536c212531a9610d8e3faaa1d04 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func6.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func6.c
@@ -24,7 +24,7 @@ int f3(int val, struct __sk_buff *skb)
 	return skb->ifindex * val;
 }
 
-SEC("classifier/test")
+SEC("tc")
 int test_cls(struct __sk_buff *skb)
 {
 	return f1(skb) + f2(2, skb) + f3(3, skb);
diff --git a/tools/testing/selftests/bpf/progs/test_global_func7.c b/tools/testing/selftests/bpf/progs/test_global_func7.c
index 309b3f6136bde6d7a84b73bac16f0abeec997fbb..6cb8e2f5254cf1f545a5bd815edb4d9df70f1ab3 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func7.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func7.c
@@ -10,7 +10,7 @@ void foo(struct __sk_buff *skb)
 	skb->tc_index = 0;
 }
 
-SEC("classifier/test")
+SEC("tc")
 int test_cls(struct __sk_buff *skb)
 {
 	foo(skb);
diff --git a/tools/testing/selftests/bpf/progs/test_map_in_map.c b/tools/testing/selftests/bpf/progs/test_map_in_map.c
index 1cfeb940cf9fb259da3eb9fa864465b21e72794d..a6d91932dcd51aa50eb58acf73d933be86b729d3 100644
--- a/tools/testing/selftests/bpf/progs/test_map_in_map.c
+++ b/tools/testing/selftests/bpf/progs/test_map_in_map.c
@@ -9,21 +9,19 @@ struct {
 	__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
 	__uint(max_entries, 1);
 	__uint(map_flags, 0);
-	__uint(key_size, sizeof(__u32));
-	/* must be sizeof(__u32) for map in map */
-	__uint(value_size, sizeof(__u32));
+	__type(key, __u32);
+	__type(value, __u32);
 } mim_array SEC(".maps");
 
 struct {
 	__uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
 	__uint(max_entries, 1);
 	__uint(map_flags, 0);
-	__uint(key_size, sizeof(int));
-	/* must be sizeof(__u32) for map in map */
-	__uint(value_size, sizeof(__u32));
+	__type(key, int);
+	__type(value, __u32);
 } mim_hash SEC(".maps");
 
-SEC("xdp_mimtest")
+SEC("xdp")
 int xdp_mimtest0(struct xdp_md *ctx)
 {
 	int value = 123;
diff --git a/tools/testing/selftests/bpf/progs/test_map_in_map_invalid.c b/tools/testing/selftests/bpf/progs/test_map_in_map_invalid.c
index 703c08e064423a1e6c015841e83ab84b214b06d6..9c7d75cf0bd6b710eed5d2b774d5adbe583607ef 100644
--- a/tools/testing/selftests/bpf/progs/test_map_in_map_invalid.c
+++ b/tools/testing/selftests/bpf/progs/test_map_in_map_invalid.c
@@ -13,7 +13,7 @@ struct inner {
 struct {
 	__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
 	__uint(max_entries, 0); /* This will make map creation to fail */
-	__uint(key_size, sizeof(__u32));
+	__type(key, __u32);
 	__array(values, struct inner);
 } mim SEC(".maps");
 
diff --git a/tools/testing/selftests/bpf/progs/test_misc_tcp_hdr_options.c b/tools/testing/selftests/bpf/progs/test_misc_tcp_hdr_options.c
index 6077a025092cb3c3edcfedc0da6b02687d86dd00..2c121c5d66a7faeedaad032ab66cce8fcde89bee 100644
--- a/tools/testing/selftests/bpf/progs/test_misc_tcp_hdr_options.c
+++ b/tools/testing/selftests/bpf/progs/test_misc_tcp_hdr_options.c
@@ -293,7 +293,7 @@ static int handle_passive_estab(struct bpf_sock_ops *skops)
 	return check_active_hdr_in(skops);
 }
 
-SEC("sockops/misc_estab")
+SEC("sockops")
 int misc_estab(struct bpf_sock_ops *skops)
 {
 	int true_val = 1;
diff --git a/tools/testing/selftests/bpf/progs/test_pe_preserve_elems.c b/tools/testing/selftests/bpf/progs/test_pe_preserve_elems.c
index fb22de7c365d4c1fea8ae9cc58e2d740a7d02eaf..1249a945699fdbeb9221eae62b4a43ff2e0a4c6d 100644
--- a/tools/testing/selftests/bpf/progs/test_pe_preserve_elems.c
+++ b/tools/testing/selftests/bpf/progs/test_pe_preserve_elems.c
@@ -7,15 +7,15 @@
 struct {
 	__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
 	__uint(max_entries, 1);
-	__uint(key_size, sizeof(int));
-	__uint(value_size, sizeof(int));
+	__type(key, int);
+	__type(value, int);
 } array_1 SEC(".maps");
 
 struct {
 	__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
 	__uint(max_entries, 1);
-	__uint(key_size, sizeof(int));
-	__uint(value_size, sizeof(int));
+	__type(key, int);
+	__type(value, int);
 	__uint(map_flags, BPF_F_PRESERVE_ELEMS);
 } array_2 SEC(".maps");
 
diff --git a/tools/testing/selftests/bpf/progs/test_perf_buffer.c b/tools/testing/selftests/bpf/progs/test_perf_buffer.c
index 8207a2dc2f9da03858ad8929097e0d6d9b0c478a..d37ce29fd393fc0e9854d1fb4f925fef6ac3cbcc 100644
--- a/tools/testing/selftests/bpf/progs/test_perf_buffer.c
+++ b/tools/testing/selftests/bpf/progs/test_perf_buffer.c
@@ -8,8 +8,8 @@
 
 struct {
 	__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
-	__uint(key_size, sizeof(int));
-	__uint(value_size, sizeof(int));
+	__type(key, int);
+	__type(value, int);
 } perf_buf_map SEC(".maps");
 
 SEC("tp/raw_syscalls/sys_enter")
diff --git a/tools/testing/selftests/bpf/progs/test_pkt_access.c b/tools/testing/selftests/bpf/progs/test_pkt_access.c
index 852051064507d02ace5403b55ded28851161303e..3cfd88141ddc1823ef8f6013fbd2b18bfb50ff07 100644
--- a/tools/testing/selftests/bpf/progs/test_pkt_access.c
+++ b/tools/testing/selftests/bpf/progs/test_pkt_access.c
@@ -97,7 +97,7 @@ int test_pkt_write_access_subprog(struct __sk_buff *skb, __u32 off)
 	return 0;
 }
 
-SEC("classifier/test_pkt_access")
+SEC("tc")
 int test_pkt_access(struct __sk_buff *skb)
 {
 	void *data_end = (void *)(long)skb->data_end;
diff --git a/tools/testing/selftests/bpf/progs/test_pkt_md_access.c b/tools/testing/selftests/bpf/progs/test_pkt_md_access.c
index 610c74ea9f648a30209897e8195eac542b5ab075..d1839366f3e1e6e332c3aba9e6c0248687ffb561 100644
--- a/tools/testing/selftests/bpf/progs/test_pkt_md_access.c
+++ b/tools/testing/selftests/bpf/progs/test_pkt_md_access.c
@@ -7,8 +7,6 @@
 #include <linux/pkt_cls.h>
 #include <bpf/bpf_helpers.h>
 
-int _version SEC("version") = 1;
-
 #if  __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
 #define TEST_FIELD(TYPE, FIELD, MASK)					\
 	{								\
@@ -27,7 +25,7 @@ int _version SEC("version") = 1;
 	}
 #endif
 
-SEC("classifier/test_pkt_md_access")
+SEC("tc")
 int test_pkt_md_access(struct __sk_buff *skb)
 {
 	TEST_FIELD(__u8,  len, 0xFF);
diff --git a/tools/testing/selftests/bpf/progs/test_probe_user.c b/tools/testing/selftests/bpf/progs/test_probe_user.c
index 89b3532ccc75e3f71a8b4a2516b044269e7248c9..8812a90da4eb87f6472fda3e82c068ab6f7eee10 100644
--- a/tools/testing/selftests/bpf/progs/test_probe_user.c
+++ b/tools/testing/selftests/bpf/progs/test_probe_user.c
@@ -8,13 +8,37 @@
 #include <bpf/bpf_helpers.h>
 #include <bpf/bpf_tracing.h>
 
+#if defined(__TARGET_ARCH_x86)
+#define SYSCALL_WRAPPER 1
+#define SYS_PREFIX "__x64_"
+#elif defined(__TARGET_ARCH_s390)
+#define SYSCALL_WRAPPER 1
+#define SYS_PREFIX "__s390x_"
+#elif defined(__TARGET_ARCH_arm64)
+#define SYSCALL_WRAPPER 1
+#define SYS_PREFIX "__arm64_"
+#else
+#define SYSCALL_WRAPPER 0
+#define SYS_PREFIX ""
+#endif
+
 static struct sockaddr_in old;
 
-SEC("kprobe/__sys_connect")
+SEC("kprobe/" SYS_PREFIX "sys_connect")
 int BPF_KPROBE(handle_sys_connect)
 {
-	void *ptr = (void *)PT_REGS_PARM2(ctx);
+#if SYSCALL_WRAPPER == 1
+	struct pt_regs *real_regs;
+#endif
 	struct sockaddr_in new;
+	void *ptr;
+
+#if SYSCALL_WRAPPER == 0
+	ptr = (void *)PT_REGS_PARM2(ctx);
+#else
+	real_regs = (struct pt_regs *)PT_REGS_PARM1(ctx);
+	bpf_probe_read_kernel(&ptr, sizeof(ptr), &PT_REGS_PARM2(real_regs));
+#endif
 
 	bpf_probe_read_user(&old, sizeof(old), ptr);
 	__builtin_memset(&new, 0xab, sizeof(new));
diff --git a/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c b/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c
index 26e77dcc7e91da2127b67f56c36fdf612ac9c924..0f9bc258225eced3b2fdc88334cec8bca482a735 100644
--- a/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c
@@ -24,8 +24,8 @@ int _version SEC("version") = 1;
 struct {
 	__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
 	__uint(max_entries, 1);
-	__uint(key_size, sizeof(__u32));
-	__uint(value_size, sizeof(__u32));
+	__type(key, __u32);
+	__type(value, __u32);
 } outer_map SEC(".maps");
 
 struct {
diff --git a/tools/testing/selftests/bpf/progs/test_sk_assign.c b/tools/testing/selftests/bpf/progs/test_sk_assign.c
index 1ecd987005d2c8d0a3841c05e3de631f4e7bf0a5..02f79356d5eb7ccadf7679d7df96d4bc79518e17 100644
--- a/tools/testing/selftests/bpf/progs/test_sk_assign.c
+++ b/tools/testing/selftests/bpf/progs/test_sk_assign.c
@@ -36,7 +36,6 @@ struct {
 	.pinning = PIN_GLOBAL_NS,
 };
 
-int _version SEC("version") = 1;
 char _license[] SEC("license") = "GPL";
 
 /* Fill 'tuple' with L3 info, and attempt to find L4. On fail, return NULL. */
@@ -159,7 +158,7 @@ handle_tcp(struct __sk_buff *skb, struct bpf_sock_tuple *tuple, bool ipv4)
 	return ret;
 }
 
-SEC("classifier/sk_assign_test")
+SEC("tc")
 int bpf_sk_assign_test(struct __sk_buff *skb)
 {
 	struct bpf_sock_tuple *tuple, ln = {0};
diff --git a/tools/testing/selftests/bpf/progs/test_sk_lookup.c b/tools/testing/selftests/bpf/progs/test_sk_lookup.c
index ac6f7f205e25d2f3cc6199a56ef0666a7cff6ea5..48534d81039181d7bae06271336a4714f78b20bf 100644
--- a/tools/testing/selftests/bpf/progs/test_sk_lookup.c
+++ b/tools/testing/selftests/bpf/progs/test_sk_lookup.c
@@ -72,32 +72,32 @@ static const __u16 DST_PORT = 7007; /* Host byte order */
 static const __u32 DST_IP4 = IP4(127, 0, 0, 1);
 static const __u32 DST_IP6[] = IP6(0xfd000000, 0x0, 0x0, 0x00000001);
 
-SEC("sk_lookup/lookup_pass")
+SEC("sk_lookup")
 int lookup_pass(struct bpf_sk_lookup *ctx)
 {
 	return SK_PASS;
 }
 
-SEC("sk_lookup/lookup_drop")
+SEC("sk_lookup")
 int lookup_drop(struct bpf_sk_lookup *ctx)
 {
 	return SK_DROP;
 }
 
-SEC("sk_reuseport/reuse_pass")
+SEC("sk_reuseport")
 int reuseport_pass(struct sk_reuseport_md *ctx)
 {
 	return SK_PASS;
 }
 
-SEC("sk_reuseport/reuse_drop")
+SEC("sk_reuseport")
 int reuseport_drop(struct sk_reuseport_md *ctx)
 {
 	return SK_DROP;
 }
 
 /* Redirect packets destined for port DST_PORT to socket at redir_map[0]. */
-SEC("sk_lookup/redir_port")
+SEC("sk_lookup")
 int redir_port(struct bpf_sk_lookup *ctx)
 {
 	struct bpf_sock *sk;
@@ -116,7 +116,7 @@ int redir_port(struct bpf_sk_lookup *ctx)
 }
 
 /* Redirect packets destined for DST_IP4 address to socket at redir_map[0]. */
-SEC("sk_lookup/redir_ip4")
+SEC("sk_lookup")
 int redir_ip4(struct bpf_sk_lookup *ctx)
 {
 	struct bpf_sock *sk;
@@ -139,7 +139,7 @@ int redir_ip4(struct bpf_sk_lookup *ctx)
 }
 
 /* Redirect packets destined for DST_IP6 address to socket at redir_map[0]. */
-SEC("sk_lookup/redir_ip6")
+SEC("sk_lookup")
 int redir_ip6(struct bpf_sk_lookup *ctx)
 {
 	struct bpf_sock *sk;
@@ -164,7 +164,7 @@ int redir_ip6(struct bpf_sk_lookup *ctx)
 	return err ? SK_DROP : SK_PASS;
 }
 
-SEC("sk_lookup/select_sock_a")
+SEC("sk_lookup")
 int select_sock_a(struct bpf_sk_lookup *ctx)
 {
 	struct bpf_sock *sk;
@@ -179,7 +179,7 @@ int select_sock_a(struct bpf_sk_lookup *ctx)
 	return err ? SK_DROP : SK_PASS;
 }
 
-SEC("sk_lookup/select_sock_a_no_reuseport")
+SEC("sk_lookup")
 int select_sock_a_no_reuseport(struct bpf_sk_lookup *ctx)
 {
 	struct bpf_sock *sk;
@@ -194,7 +194,7 @@ int select_sock_a_no_reuseport(struct bpf_sk_lookup *ctx)
 	return err ? SK_DROP : SK_PASS;
 }
 
-SEC("sk_reuseport/select_sock_b")
+SEC("sk_reuseport")
 int select_sock_b(struct sk_reuseport_md *ctx)
 {
 	__u32 key = KEY_SERVER_B;
@@ -205,7 +205,7 @@ int select_sock_b(struct sk_reuseport_md *ctx)
 }
 
 /* Check that bpf_sk_assign() returns -EEXIST if socket already selected. */
-SEC("sk_lookup/sk_assign_eexist")
+SEC("sk_lookup")
 int sk_assign_eexist(struct bpf_sk_lookup *ctx)
 {
 	struct bpf_sock *sk;
@@ -238,7 +238,7 @@ int sk_assign_eexist(struct bpf_sk_lookup *ctx)
 }
 
 /* Check that bpf_sk_assign(BPF_SK_LOOKUP_F_REPLACE) can override selection. */
-SEC("sk_lookup/sk_assign_replace_flag")
+SEC("sk_lookup")
 int sk_assign_replace_flag(struct bpf_sk_lookup *ctx)
 {
 	struct bpf_sock *sk;
@@ -270,7 +270,7 @@ int sk_assign_replace_flag(struct bpf_sk_lookup *ctx)
 }
 
 /* Check that bpf_sk_assign(sk=NULL) is accepted. */
-SEC("sk_lookup/sk_assign_null")
+SEC("sk_lookup")
 int sk_assign_null(struct bpf_sk_lookup *ctx)
 {
 	struct bpf_sock *sk = NULL;
@@ -313,7 +313,7 @@ int sk_assign_null(struct bpf_sk_lookup *ctx)
 }
 
 /* Check that selected sk is accessible through context. */
-SEC("sk_lookup/access_ctx_sk")
+SEC("sk_lookup")
 int access_ctx_sk(struct bpf_sk_lookup *ctx)
 {
 	struct bpf_sock *sk1 = NULL, *sk2 = NULL;
@@ -379,7 +379,7 @@ int access_ctx_sk(struct bpf_sk_lookup *ctx)
  * are not covered because they give bogus results, that is the
  * verifier ignores the offset.
  */
-SEC("sk_lookup/ctx_narrow_access")
+SEC("sk_lookup")
 int ctx_narrow_access(struct bpf_sk_lookup *ctx)
 {
 	struct bpf_sock *sk;
@@ -553,7 +553,7 @@ int ctx_narrow_access(struct bpf_sk_lookup *ctx)
 }
 
 /* Check that sk_assign rejects SERVER_A socket with -ESOCKNOSUPPORT */
-SEC("sk_lookup/sk_assign_esocknosupport")
+SEC("sk_lookup")
 int sk_assign_esocknosupport(struct bpf_sk_lookup *ctx)
 {
 	struct bpf_sock *sk;
@@ -578,28 +578,28 @@ int sk_assign_esocknosupport(struct bpf_sk_lookup *ctx)
 	return ret;
 }
 
-SEC("sk_lookup/multi_prog_pass1")
+SEC("sk_lookup")
 int multi_prog_pass1(struct bpf_sk_lookup *ctx)
 {
 	bpf_map_update_elem(&run_map, &KEY_PROG1, &PROG_DONE, BPF_ANY);
 	return SK_PASS;
 }
 
-SEC("sk_lookup/multi_prog_pass2")
+SEC("sk_lookup")
 int multi_prog_pass2(struct bpf_sk_lookup *ctx)
 {
 	bpf_map_update_elem(&run_map, &KEY_PROG2, &PROG_DONE, BPF_ANY);
 	return SK_PASS;
 }
 
-SEC("sk_lookup/multi_prog_drop1")
+SEC("sk_lookup")
 int multi_prog_drop1(struct bpf_sk_lookup *ctx)
 {
 	bpf_map_update_elem(&run_map, &KEY_PROG1, &PROG_DONE, BPF_ANY);
 	return SK_DROP;
 }
 
-SEC("sk_lookup/multi_prog_drop2")
+SEC("sk_lookup")
 int multi_prog_drop2(struct bpf_sk_lookup *ctx)
 {
 	bpf_map_update_elem(&run_map, &KEY_PROG2, &PROG_DONE, BPF_ANY);
@@ -623,7 +623,7 @@ static __always_inline int select_server_a(struct bpf_sk_lookup *ctx)
 	return SK_PASS;
 }
 
-SEC("sk_lookup/multi_prog_redir1")
+SEC("sk_lookup")
 int multi_prog_redir1(struct bpf_sk_lookup *ctx)
 {
 	int ret;
@@ -633,7 +633,7 @@ int multi_prog_redir1(struct bpf_sk_lookup *ctx)
 	return SK_PASS;
 }
 
-SEC("sk_lookup/multi_prog_redir2")
+SEC("sk_lookup")
 int multi_prog_redir2(struct bpf_sk_lookup *ctx)
 {
 	int ret;
diff --git a/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c b/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c
index 8249075f088f24a2483f1be0b42e28f8e263cbd5..40f161480a2f257653d73b1efac8a1e80a1df41e 100644
--- a/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c
@@ -15,7 +15,6 @@
 #include <bpf/bpf_helpers.h>
 #include <bpf/bpf_endian.h>
 
-int _version SEC("version") = 1;
 char _license[] SEC("license") = "GPL";
 
 /* Fill 'tuple' with L3 info, and attempt to find L4. On fail, return NULL. */
@@ -53,8 +52,8 @@ static struct bpf_sock_tuple *get_tuple(void *data, __u64 nh_off,
 	return result;
 }
 
-SEC("classifier/sk_lookup_success")
-int bpf_sk_lookup_test0(struct __sk_buff *skb)
+SEC("tc")
+int sk_lookup_success(struct __sk_buff *skb)
 {
 	void *data_end = (void *)(long)skb->data_end;
 	void *data = (void *)(long)skb->data;
@@ -79,8 +78,8 @@ int bpf_sk_lookup_test0(struct __sk_buff *skb)
 	return sk ? TC_ACT_OK : TC_ACT_UNSPEC;
 }
 
-SEC("classifier/sk_lookup_success_simple")
-int bpf_sk_lookup_test1(struct __sk_buff *skb)
+SEC("tc")
+int sk_lookup_success_simple(struct __sk_buff *skb)
 {
 	struct bpf_sock_tuple tuple = {};
 	struct bpf_sock *sk;
@@ -91,8 +90,8 @@ int bpf_sk_lookup_test1(struct __sk_buff *skb)
 	return 0;
 }
 
-SEC("classifier/err_use_after_free")
-int bpf_sk_lookup_uaf(struct __sk_buff *skb)
+SEC("tc")
+int err_use_after_free(struct __sk_buff *skb)
 {
 	struct bpf_sock_tuple tuple = {};
 	struct bpf_sock *sk;
@@ -106,8 +105,8 @@ int bpf_sk_lookup_uaf(struct __sk_buff *skb)
 	return family;
 }
 
-SEC("classifier/err_modify_sk_pointer")
-int bpf_sk_lookup_modptr(struct __sk_buff *skb)
+SEC("tc")
+int err_modify_sk_pointer(struct __sk_buff *skb)
 {
 	struct bpf_sock_tuple tuple = {};
 	struct bpf_sock *sk;
@@ -121,8 +120,8 @@ int bpf_sk_lookup_modptr(struct __sk_buff *skb)
 	return 0;
 }
 
-SEC("classifier/err_modify_sk_or_null_pointer")
-int bpf_sk_lookup_modptr_or_null(struct __sk_buff *skb)
+SEC("tc")
+int err_modify_sk_or_null_pointer(struct __sk_buff *skb)
 {
 	struct bpf_sock_tuple tuple = {};
 	struct bpf_sock *sk;
@@ -135,8 +134,8 @@ int bpf_sk_lookup_modptr_or_null(struct __sk_buff *skb)
 	return 0;
 }
 
-SEC("classifier/err_no_release")
-int bpf_sk_lookup_test2(struct __sk_buff *skb)
+SEC("tc")
+int err_no_release(struct __sk_buff *skb)
 {
 	struct bpf_sock_tuple tuple = {};
 
@@ -144,8 +143,8 @@ int bpf_sk_lookup_test2(struct __sk_buff *skb)
 	return 0;
 }
 
-SEC("classifier/err_release_twice")
-int bpf_sk_lookup_test3(struct __sk_buff *skb)
+SEC("tc")
+int err_release_twice(struct __sk_buff *skb)
 {
 	struct bpf_sock_tuple tuple = {};
 	struct bpf_sock *sk;
@@ -156,8 +155,8 @@ int bpf_sk_lookup_test3(struct __sk_buff *skb)
 	return 0;
 }
 
-SEC("classifier/err_release_unchecked")
-int bpf_sk_lookup_test4(struct __sk_buff *skb)
+SEC("tc")
+int err_release_unchecked(struct __sk_buff *skb)
 {
 	struct bpf_sock_tuple tuple = {};
 	struct bpf_sock *sk;
@@ -173,8 +172,8 @@ void lookup_no_release(struct __sk_buff *skb)
 	bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
 }
 
-SEC("classifier/err_no_release_subcall")
-int bpf_sk_lookup_test5(struct __sk_buff *skb)
+SEC("tc")
+int err_no_release_subcall(struct __sk_buff *skb)
 {
 	lookup_no_release(skb);
 	return 0;
diff --git a/tools/testing/selftests/bpf/progs/test_skb_helpers.c b/tools/testing/selftests/bpf/progs/test_skb_helpers.c
index bb3fbf1a29e34fed32d306b469f88b0e5890f7bd..507215791c5bbea8107a55c2ae70933289d37a4e 100644
--- a/tools/testing/selftests/bpf/progs/test_skb_helpers.c
+++ b/tools/testing/selftests/bpf/progs/test_skb_helpers.c
@@ -14,7 +14,7 @@ struct {
 
 char _license[] SEC("license") = "GPL";
 
-SEC("classifier/test_skb_helpers")
+SEC("tc")
 int test_skb_helpers(struct __sk_buff *skb)
 {
 	struct task_struct *task;
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_listen.c b/tools/testing/selftests/bpf/progs/test_sockmap_listen.c
index a1cc58b10c7c212cb7bd93cc5ed0a0930f804099..00f1456aaeda2514278d6fe6ca494bbae8e380ec 100644
--- a/tools/testing/selftests/bpf/progs/test_sockmap_listen.c
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_listen.c
@@ -56,7 +56,7 @@ int prog_stream_verdict(struct __sk_buff *skb)
 	return verdict;
 }
 
-SEC("sk_skb/skb_verdict")
+SEC("sk_skb")
 int prog_skb_verdict(struct __sk_buff *skb)
 {
 	unsigned int *count;
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_skb_verdict_attach.c b/tools/testing/selftests/bpf/progs/test_sockmap_skb_verdict_attach.c
index 2d31f66e4f2361a7f3e255ee452390294864571d..3c69aa971738e67fa8d261dfa2c718f356e047cf 100644
--- a/tools/testing/selftests/bpf/progs/test_sockmap_skb_verdict_attach.c
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_skb_verdict_attach.c
@@ -9,7 +9,7 @@ struct {
 	__type(value, __u64);
 } sock_map SEC(".maps");
 
-SEC("sk_skb/skb_verdict")
+SEC("sk_skb")
 int prog_skb_verdict(struct __sk_buff *skb)
 {
 	return SK_DROP;
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_update.c b/tools/testing/selftests/bpf/progs/test_sockmap_update.c
index 9d0c9f28cab2ed6cc0587f8eca4ad1ba1d31ee53..6d64ea536e3d8163c77b612f749b62d3771ad5cb 100644
--- a/tools/testing/selftests/bpf/progs/test_sockmap_update.c
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_update.c
@@ -24,7 +24,7 @@ struct {
 	__type(value, __u64);
 } dst_sock_hash SEC(".maps");
 
-SEC("classifier/copy_sock_map")
+SEC("tc")
 int copy_sock_map(void *ctx)
 {
 	struct bpf_sock *sk;
diff --git a/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c b/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c
index 0cf0134631b4ba6ed2a1a475f9d97b817616a24b..7449fdb1763b14bb89e26acb168c128c1745abd2 100644
--- a/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c
+++ b/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c
@@ -28,8 +28,8 @@ struct {
 	__uint(type, BPF_MAP_TYPE_STACK_TRACE);
 	__uint(max_entries, 128);
 	__uint(map_flags, BPF_F_STACK_BUILD_ID);
-	__uint(key_size, sizeof(__u32));
-	__uint(value_size, sizeof(stack_trace_t));
+	__type(key, __u32);
+	__type(value, stack_trace_t);
 } stackmap SEC(".maps");
 
 struct {
diff --git a/tools/testing/selftests/bpf/progs/test_stacktrace_map.c b/tools/testing/selftests/bpf/progs/test_stacktrace_map.c
index 00ed486726204b8d45bb1d0bc4d8a1d7eab4649f..a8233e7f173bccbe1278c1711a4e21370af46d29 100644
--- a/tools/testing/selftests/bpf/progs/test_stacktrace_map.c
+++ b/tools/testing/selftests/bpf/progs/test_stacktrace_map.c
@@ -27,8 +27,8 @@ typedef __u64 stack_trace_t[PERF_MAX_STACK_DEPTH];
 struct {
 	__uint(type, BPF_MAP_TYPE_STACK_TRACE);
 	__uint(max_entries, 16384);
-	__uint(key_size, sizeof(__u32));
-	__uint(value_size, sizeof(stack_trace_t));
+	__type(key, __u32);
+	__type(value, stack_trace_t);
 } stackmap SEC(".maps");
 
 struct {
diff --git a/tools/testing/selftests/bpf/progs/test_tc_bpf.c b/tools/testing/selftests/bpf/progs/test_tc_bpf.c
index 18a3a7ed924a894d7d38f20400af2ce5e5278129..d28ca8d1f3d0700155a37884ee7e27da8660850b 100644
--- a/tools/testing/selftests/bpf/progs/test_tc_bpf.c
+++ b/tools/testing/selftests/bpf/progs/test_tc_bpf.c
@@ -5,7 +5,7 @@
 
 /* Dummy prog to test TC-BPF API */
 
-SEC("classifier")
+SEC("tc")
 int cls(struct __sk_buff *skb)
 {
 	return 0;
diff --git a/tools/testing/selftests/bpf/progs/test_tc_neigh.c b/tools/testing/selftests/bpf/progs/test_tc_neigh.c
index 0c93d326a663f90a1f95432ec89f71b115e0213d..3e32ea375ab4edf3e2b6a484e810a057210ff971 100644
--- a/tools/testing/selftests/bpf/progs/test_tc_neigh.c
+++ b/tools/testing/selftests/bpf/progs/test_tc_neigh.c
@@ -70,7 +70,7 @@ static __always_inline bool is_remote_ep_v6(struct __sk_buff *skb,
 	return v6_equal(ip6h->daddr, addr);
 }
 
-SEC("classifier/chk_egress")
+SEC("tc")
 int tc_chk(struct __sk_buff *skb)
 {
 	void *data_end = ctx_ptr(skb->data_end);
@@ -83,7 +83,7 @@ int tc_chk(struct __sk_buff *skb)
 	return !raw[0] && !raw[1] && !raw[2] ? TC_ACT_SHOT : TC_ACT_OK;
 }
 
-SEC("classifier/dst_ingress")
+SEC("tc")
 int tc_dst(struct __sk_buff *skb)
 {
 	__u8 zero[ETH_ALEN * 2];
@@ -108,7 +108,7 @@ int tc_dst(struct __sk_buff *skb)
 	return bpf_redirect_neigh(IFINDEX_SRC, NULL, 0, 0);
 }
 
-SEC("classifier/src_ingress")
+SEC("tc")
 int tc_src(struct __sk_buff *skb)
 {
 	__u8 zero[ETH_ALEN * 2];
diff --git a/tools/testing/selftests/bpf/progs/test_tc_neigh_fib.c b/tools/testing/selftests/bpf/progs/test_tc_neigh_fib.c
index f7ab69cf018e5a81705a1219b87ab903774bd3a5..ec4cce19362de4b914f0dd6b927a4b1b5a0fbf39 100644
--- a/tools/testing/selftests/bpf/progs/test_tc_neigh_fib.c
+++ b/tools/testing/selftests/bpf/progs/test_tc_neigh_fib.c
@@ -75,7 +75,7 @@ static __always_inline int fill_fib_params_v6(struct __sk_buff *skb,
 	return 0;
 }
 
-SEC("classifier/chk_egress")
+SEC("tc")
 int tc_chk(struct __sk_buff *skb)
 {
 	void *data_end = ctx_ptr(skb->data_end);
@@ -143,13 +143,13 @@ static __always_inline int tc_redir(struct __sk_buff *skb)
 /* these are identical, but keep them separate for compatibility with the
  * section names expected by test_tc_redirect.sh
  */
-SEC("classifier/dst_ingress")
+SEC("tc")
 int tc_dst(struct __sk_buff *skb)
 {
 	return tc_redir(skb);
 }
 
-SEC("classifier/src_ingress")
+SEC("tc")
 int tc_src(struct __sk_buff *skb)
 {
 	return tc_redir(skb);
diff --git a/tools/testing/selftests/bpf/progs/test_tc_peer.c b/tools/testing/selftests/bpf/progs/test_tc_peer.c
index fe818cd5f0109f440d702922e2055a6c875cbe34..365eacb5dc34125943f54c3a7d789d9a01c1c637 100644
--- a/tools/testing/selftests/bpf/progs/test_tc_peer.c
+++ b/tools/testing/selftests/bpf/progs/test_tc_peer.c
@@ -16,31 +16,31 @@ volatile const __u32 IFINDEX_DST;
 static const __u8 src_mac[] = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55};
 static const __u8 dst_mac[] = {0x00, 0x22, 0x33, 0x44, 0x55, 0x66};
 
-SEC("classifier/chk_egress")
+SEC("tc")
 int tc_chk(struct __sk_buff *skb)
 {
 	return TC_ACT_SHOT;
 }
 
-SEC("classifier/dst_ingress")
+SEC("tc")
 int tc_dst(struct __sk_buff *skb)
 {
 	return bpf_redirect_peer(IFINDEX_SRC, 0);
 }
 
-SEC("classifier/src_ingress")
+SEC("tc")
 int tc_src(struct __sk_buff *skb)
 {
 	return bpf_redirect_peer(IFINDEX_DST, 0);
 }
 
-SEC("classifier/dst_ingress_l3")
+SEC("tc")
 int tc_dst_l3(struct __sk_buff *skb)
 {
 	return bpf_redirect(IFINDEX_SRC, 0);
 }
 
-SEC("classifier/src_ingress_l3")
+SEC("tc")
 int tc_src_l3(struct __sk_buff *skb)
 {
 	__u16 proto = skb->protocol;
diff --git a/tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c b/tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c
index 47cbe2eeae431b85d407dd2e11e21e0e8316b58a..cd747cd93dbe81382ee29c68a73d3cdc41107b0a 100644
--- a/tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c
@@ -148,7 +148,7 @@ static __always_inline void check_syncookie(void *ctx, void *data,
 	bpf_sk_release(sk);
 }
 
-SEC("clsact/check_syncookie")
+SEC("tc")
 int check_syncookie_clsact(struct __sk_buff *skb)
 {
 	check_syncookie(skb, (void *)(long)skb->data,
@@ -156,7 +156,7 @@ int check_syncookie_clsact(struct __sk_buff *skb)
 	return TC_ACT_OK;
 }
 
-SEC("xdp/check_syncookie")
+SEC("xdp")
 int check_syncookie_xdp(struct xdp_md *ctx)
 {
 	check_syncookie(ctx, (void *)(long)ctx->data,
diff --git a/tools/testing/selftests/bpf/progs/test_tcp_hdr_options.c b/tools/testing/selftests/bpf/progs/test_tcp_hdr_options.c
index 678bd0fad29edd3994429e6823886381084ac670..5f4e87ee949ad1d03f02e221dc0e45496b85e19d 100644
--- a/tools/testing/selftests/bpf/progs/test_tcp_hdr_options.c
+++ b/tools/testing/selftests/bpf/progs/test_tcp_hdr_options.c
@@ -594,7 +594,7 @@ static int handle_parse_hdr(struct bpf_sock_ops *skops)
 	return CG_OK;
 }
 
-SEC("sockops/estab")
+SEC("sockops")
 int estab(struct bpf_sock_ops *skops)
 {
 	int true_val = 1;
diff --git a/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c b/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c
index ac63410bb541369b209921a3a50be09e5640b28b..24e9344994ef9f2dcb33086f1ad7492c59b1bb94 100644
--- a/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c
@@ -24,8 +24,8 @@ struct {
 struct {
 	__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
 	__uint(max_entries, 2);
-	__uint(key_size, sizeof(int));
-	__uint(value_size, sizeof(__u32));
+	__type(key, int);
+	__type(value, __u32);
 } perf_event_map SEC(".maps");
 
 int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/test_xdp.c b/tools/testing/selftests/bpf/progs/test_xdp.c
index 31f9bce37491a6ca842f0ac3eca3c8ad49759e6b..e6aa2fc6ce6bd4594d95540dca96672e53368753 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp.c
@@ -210,7 +210,7 @@ static __always_inline int handle_ipv6(struct xdp_md *xdp)
 	return XDP_TX;
 }
 
-SEC("xdp_tx_iptunnel")
+SEC("xdp")
 int _xdp_tx_iptunnel(struct xdp_md *xdp)
 {
 	void *data_end = (void *)(long)xdp->data_end;
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c b/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c
index 3d66599eee2ec9e0332e3270722f4581d2c80963..199c61b7d062863389c2b4371f7c8e6171c3d323 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c
@@ -2,7 +2,7 @@
 #include <linux/bpf.h>
 #include <bpf/bpf_helpers.h>
 
-SEC("xdp_adjust_tail_grow")
+SEC("xdp")
 int _xdp_adjust_tail_grow(struct xdp_md *xdp)
 {
 	void *data_end = (void *)(long)xdp->data_end;
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_shrink.c b/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_shrink.c
index 22065a9cfb254ed659890ead1578fc577d412895..b7448253d1359b7939370bba04525b20ffc2c367 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_shrink.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_shrink.c
@@ -9,9 +9,7 @@
 #include <linux/if_ether.h>
 #include <bpf/bpf_helpers.h>
 
-int _version SEC("version") = 1;
-
-SEC("xdp_adjust_tail_shrink")
+SEC("xdp")
 int _xdp_adjust_tail_shrink(struct xdp_md *xdp)
 {
 	void *data_end = (void *)(long)xdp->data_end;
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c b/tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c
index a038e827f850a077cf8ab616c6496c70fe829c37..58cf4345f5cc9c34df8460ebfef07e0a2623cfc6 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c
@@ -36,8 +36,8 @@ struct meta {
 
 struct {
 	__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
-	__uint(key_size, sizeof(int));
-	__uint(value_size, sizeof(int));
+	__type(key, int);
+	__type(value, int);
 } perf_buf_map SEC(".maps");
 
 __u64 test_result_fentry = 0;
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_devmap_helpers.c b/tools/testing/selftests/bpf/progs/test_xdp_devmap_helpers.c
index b360ba2bd441148e10da51c5b96b8696b6f91442..807bf895f42ca84ffc77838464a2ff144f8876d7 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_devmap_helpers.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_devmap_helpers.c
@@ -5,7 +5,7 @@
 #include <linux/bpf.h>
 #include <bpf/bpf_helpers.h>
 
-SEC("xdp_dm_log")
+SEC("xdp")
 int xdpdm_devlog(struct xdp_md *ctx)
 {
 	char fmt[] = "devmap redirect: dev %u -> dev %u len %u\n";
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_link.c b/tools/testing/selftests/bpf/progs/test_xdp_link.c
index eb93ea95d1d8f14ffd52e3934fc7b753d69e07fe..ee7d6ac0f61513120edfefefdcf42b554a131dcc 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_link.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_link.c
@@ -5,7 +5,7 @@
 
 char LICENSE[] SEC("license") = "GPL";
 
-SEC("xdp/handler")
+SEC("xdp")
 int xdp_handler(struct xdp_md *xdp)
 {
 	return 0;
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_loop.c b/tools/testing/selftests/bpf/progs/test_xdp_loop.c
index fcabcda30ba32086a1594ed21f5fbb77443ae475..27eb52dda92c21f23605c0f23a54f6b3f38f99c9 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_loop.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_loop.c
@@ -206,7 +206,7 @@ static __always_inline int handle_ipv6(struct xdp_md *xdp)
 	return XDP_TX;
 }
 
-SEC("xdp_tx_iptunnel")
+SEC("xdp")
 int _xdp_tx_iptunnel(struct xdp_md *xdp)
 {
 	void *data_end = (void *)(long)xdp->data_end;
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_noinline.c b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c
index 3a67921f62b52c3d9143ddc6bf78e43b62abb663..596c4e71bf3ac21b056ef7002934db0cbe499c13 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_noinline.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c
@@ -797,7 +797,7 @@ static int process_packet(void *data, __u64 off, void *data_end,
 	return XDP_DROP;
 }
 
-SEC("xdp-test-v4")
+SEC("xdp")
 int balancer_ingress_v4(struct xdp_md *ctx)
 {
 	void *data = (void *)(long)ctx->data;
@@ -816,7 +816,7 @@ int balancer_ingress_v4(struct xdp_md *ctx)
 		return XDP_DROP;
 }
 
-SEC("xdp-test-v6")
+SEC("xdp")
 int balancer_ingress_v6(struct xdp_md *ctx)
 {
 	void *data = (void *)(long)ctx->data;
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c b/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c
index 59ee4f182ff800d7517fe4daeb681fbd59660c1e..532025057711e27aa3cb3cdc80332725f9b40147 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c
@@ -12,13 +12,13 @@ struct {
 	__uint(max_entries, 4);
 } cpu_map SEC(".maps");
 
-SEC("xdp_redir")
+SEC("xdp")
 int xdp_redir_prog(struct xdp_md *ctx)
 {
 	return bpf_redirect_map(&cpu_map, 1, 0);
 }
 
-SEC("xdp_dummy")
+SEC("xdp")
 int xdp_dummy_prog(struct xdp_md *ctx)
 {
 	return XDP_PASS;
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_with_devmap_helpers.c b/tools/testing/selftests/bpf/progs/test_xdp_with_devmap_helpers.c
index 0ac08649772228ebd7846e6a543ce78bbc25168f..1e6b9c38ea6d9231ec8c7a860fa9e176472f5b01 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_with_devmap_helpers.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_with_devmap_helpers.c
@@ -9,7 +9,7 @@ struct {
 	__uint(max_entries, 4);
 } dm_ports SEC(".maps");
 
-SEC("xdp_redir")
+SEC("xdp")
 int xdp_redir_prog(struct xdp_md *ctx)
 {
 	return bpf_redirect_map(&dm_ports, 1, 0);
@@ -18,7 +18,7 @@ int xdp_redir_prog(struct xdp_md *ctx)
 /* invalid program on DEVMAP entry;
  * SEC name means expected attach type not set
  */
-SEC("xdp_dummy")
+SEC("xdp")
 int xdp_dummy_prog(struct xdp_md *ctx)
 {
 	return XDP_PASS;
diff --git a/tools/testing/selftests/bpf/progs/trace_vprintk.c b/tools/testing/selftests/bpf/progs/trace_vprintk.c
new file mode 100644
index 0000000000000000000000000000000000000000..d327241ba04754e75fc2041203a2f0c3fac39327
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/trace_vprintk.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+int null_data_vprintk_ret = 0;
+int trace_vprintk_ret = 0;
+int trace_vprintk_ran = 0;
+
+SEC("fentry/__x64_sys_nanosleep")
+int sys_enter(void *ctx)
+{
+	static const char one[] = "1";
+	static const char three[] = "3";
+	static const char five[] = "5";
+	static const char seven[] = "7";
+	static const char nine[] = "9";
+	static const char f[] = "%pS\n";
+
+	/* runner doesn't search for \t, just ensure it compiles */
+	bpf_printk("\t");
+
+	trace_vprintk_ret = __bpf_vprintk("%s,%d,%s,%d,%s,%d,%s,%d,%s,%d %d\n",
+		one, 2, three, 4, five, 6, seven, 8, nine, 10, ++trace_vprintk_ran);
+
+	/* non-NULL fmt w/ NULL data should result in error */
+	null_data_vprintk_ret = bpf_trace_vprintk(f, sizeof(f), NULL, 0);
+	return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/xdp_dummy.c b/tools/testing/selftests/bpf/progs/xdp_dummy.c
index ea25e88819928dda30a8a81b21adaba1d07b3919..d988b2e0cee840c578373ca34ec0df99483b0f85 100644
--- a/tools/testing/selftests/bpf/progs/xdp_dummy.c
+++ b/tools/testing/selftests/bpf/progs/xdp_dummy.c
@@ -4,7 +4,7 @@
 #include <linux/bpf.h>
 #include <bpf/bpf_helpers.h>
 
-SEC("xdp_dummy")
+SEC("xdp")
 int xdp_dummy_prog(struct xdp_md *ctx)
 {
 	return XDP_PASS;
diff --git a/tools/testing/selftests/bpf/progs/xdp_redirect_multi_kern.c b/tools/testing/selftests/bpf/progs/xdp_redirect_multi_kern.c
index 880debcbcd65deb32107e1646af170ccb389bc76..8395782b6e0a3c2d82f52298b80cf3d3b941969f 100644
--- a/tools/testing/selftests/bpf/progs/xdp_redirect_multi_kern.c
+++ b/tools/testing/selftests/bpf/progs/xdp_redirect_multi_kern.c
@@ -34,7 +34,7 @@ struct {
 	__uint(max_entries, 128);
 } mac_map SEC(".maps");
 
-SEC("xdp_redirect_map_multi")
+SEC("xdp")
 int xdp_redirect_map_multi_prog(struct xdp_md *ctx)
 {
 	void *data_end = (void *)(long)ctx->data_end;
@@ -63,7 +63,7 @@ int xdp_redirect_map_multi_prog(struct xdp_md *ctx)
 }
 
 /* The following 2 progs are for 2nd devmap prog testing */
-SEC("xdp_redirect_map_ingress")
+SEC("xdp")
 int xdp_redirect_map_all_prog(struct xdp_md *ctx)
 {
 	return bpf_redirect_map(&map_egress, 0,
diff --git a/tools/testing/selftests/bpf/progs/xdping_kern.c b/tools/testing/selftests/bpf/progs/xdping_kern.c
index 6b9ca40bd1f4f8a80c9c8b93f6f59dfc8f76d5a8..4ad73847b8a5de6227d48d5a5cb091ca2d5b88e7 100644
--- a/tools/testing/selftests/bpf/progs/xdping_kern.c
+++ b/tools/testing/selftests/bpf/progs/xdping_kern.c
@@ -86,7 +86,7 @@ static __always_inline int icmp_check(struct xdp_md *ctx, int type)
 	return XDP_TX;
 }
 
-SEC("xdpclient")
+SEC("xdp")
 int xdping_client(struct xdp_md *ctx)
 {
 	void *data_end = (void *)(long)ctx->data_end;
@@ -150,7 +150,7 @@ int xdping_client(struct xdp_md *ctx)
 	return XDP_TX;
 }
 
-SEC("xdpserver")
+SEC("xdp")
 int xdping_server(struct xdp_md *ctx)
 {
 	void *data_end = (void *)(long)ctx->data_end;
diff --git a/tools/testing/selftests/bpf/progs/xdpwall.c b/tools/testing/selftests/bpf/progs/xdpwall.c
new file mode 100644
index 0000000000000000000000000000000000000000..7a891a0c3a39cfb74dbb45e6cd3076744e09a507
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/xdpwall.c
@@ -0,0 +1,365 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include <stdbool.h>
+#include <stdint.h>
+#include <linux/stddef.h>
+#include <linux/if_ether.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/bpf.h>
+#include <linux/types.h>
+#include <bpf/bpf_endian.h>
+#include <bpf/bpf_helpers.h>
+
+enum pkt_parse_err {
+	NO_ERR,
+	BAD_IP6_HDR,
+	BAD_IP4GUE_HDR,
+	BAD_IP6GUE_HDR,
+};
+
+enum pkt_flag {
+	TUNNEL = 0x1,
+	TCP_SYN = 0x2,
+	QUIC_INITIAL_FLAG = 0x4,
+	TCP_ACK = 0x8,
+	TCP_RST = 0x10
+};
+
+struct v4_lpm_key {
+	__u32 prefixlen;
+	__u32 src;
+};
+
+struct v4_lpm_val {
+	struct v4_lpm_key key;
+	__u8 val;
+};
+
+struct {
+	__uint(type, BPF_MAP_TYPE_HASH);
+	__uint(max_entries, 16);
+	__type(key, struct in6_addr);
+	__type(value, bool);
+} v6_addr_map SEC(".maps");
+
+struct {
+	__uint(type, BPF_MAP_TYPE_HASH);
+	__uint(max_entries, 16);
+	__type(key, __u32);
+	__type(value, bool);
+} v4_addr_map SEC(".maps");
+
+struct {
+	__uint(type, BPF_MAP_TYPE_LPM_TRIE);
+	__uint(max_entries, 16);
+	__uint(key_size, sizeof(struct v4_lpm_key));
+	__uint(value_size, sizeof(struct v4_lpm_val));
+	__uint(map_flags, BPF_F_NO_PREALLOC);
+} v4_lpm_val_map SEC(".maps");
+
+struct {
+	__uint(type, BPF_MAP_TYPE_ARRAY);
+	__uint(max_entries, 16);
+	__type(key, int);
+	__type(value, __u8);
+} tcp_port_map SEC(".maps");
+
+struct {
+	__uint(type, BPF_MAP_TYPE_ARRAY);
+	__uint(max_entries, 16);
+	__type(key, int);
+	__type(value, __u16);
+} udp_port_map SEC(".maps");
+
+enum ip_type { V4 = 1, V6 = 2 };
+
+struct fw_match_info {
+	__u8 v4_src_ip_match;
+	__u8 v6_src_ip_match;
+	__u8 v4_src_prefix_match;
+	__u8 v4_dst_prefix_match;
+	__u8 tcp_dp_match;
+	__u16 udp_sp_match;
+	__u16 udp_dp_match;
+	bool is_tcp;
+	bool is_tcp_syn;
+};
+
+struct pkt_info {
+	enum ip_type type;
+	union {
+		struct iphdr *ipv4;
+		struct ipv6hdr *ipv6;
+	} ip;
+	int sport;
+	int dport;
+	__u16 trans_hdr_offset;
+	__u8 proto;
+	__u8 flags;
+};
+
+static __always_inline struct ethhdr *parse_ethhdr(void *data, void *data_end)
+{
+	struct ethhdr *eth = data;
+
+	if (eth + 1 > data_end)
+		return NULL;
+
+	return eth;
+}
+
+static __always_inline __u8 filter_ipv6_addr(const struct in6_addr *ipv6addr)
+{
+	__u8 *leaf;
+
+	leaf = bpf_map_lookup_elem(&v6_addr_map, ipv6addr);
+
+	return leaf ? *leaf : 0;
+}
+
+static __always_inline __u8 filter_ipv4_addr(const __u32 ipaddr)
+{
+	__u8 *leaf;
+
+	leaf = bpf_map_lookup_elem(&v4_addr_map, &ipaddr);
+
+	return leaf ? *leaf : 0;
+}
+
+static __always_inline __u8 filter_ipv4_lpm(const __u32 ipaddr)
+{
+	struct v4_lpm_key v4_key = {};
+	struct v4_lpm_val *lpm_val;
+
+	v4_key.src = ipaddr;
+	v4_key.prefixlen = 32;
+
+	lpm_val = bpf_map_lookup_elem(&v4_lpm_val_map, &v4_key);
+
+	return lpm_val ? lpm_val->val : 0;
+}
+
+
+static __always_inline void
+filter_src_dst_ip(struct pkt_info* info, struct fw_match_info* match_info)
+{
+	if (info->type == V6) {
+		match_info->v6_src_ip_match =
+			filter_ipv6_addr(&info->ip.ipv6->saddr);
+	} else if (info->type == V4) {
+		match_info->v4_src_ip_match =
+			filter_ipv4_addr(info->ip.ipv4->saddr);
+		match_info->v4_src_prefix_match =
+			filter_ipv4_lpm(info->ip.ipv4->saddr);
+		match_info->v4_dst_prefix_match =
+			filter_ipv4_lpm(info->ip.ipv4->daddr);
+	}
+}
+
+static __always_inline void *
+get_transport_hdr(__u16 offset, void *data, void *data_end)
+{
+	if (offset > 255 || data + offset > data_end)
+		return NULL;
+
+	return data + offset;
+}
+
+static __always_inline bool tcphdr_only_contains_flag(struct tcphdr *tcp,
+						      __u32 FLAG)
+{
+	return (tcp_flag_word(tcp) &
+		(TCP_FLAG_ACK | TCP_FLAG_RST | TCP_FLAG_SYN | TCP_FLAG_FIN)) == FLAG;
+}
+
+static __always_inline void set_tcp_flags(struct pkt_info *info,
+					  struct tcphdr *tcp) {
+	if (tcphdr_only_contains_flag(tcp, TCP_FLAG_SYN))
+		info->flags |= TCP_SYN;
+	else if (tcphdr_only_contains_flag(tcp, TCP_FLAG_ACK))
+		info->flags |= TCP_ACK;
+	else if (tcphdr_only_contains_flag(tcp, TCP_FLAG_RST))
+		info->flags |= TCP_RST;
+}
+
+static __always_inline bool
+parse_tcp(struct pkt_info *info, void *transport_hdr, void *data_end)
+{
+	struct tcphdr *tcp = transport_hdr;
+
+	if (tcp + 1 > data_end)
+		return false;
+
+	info->sport = bpf_ntohs(tcp->source);
+	info->dport = bpf_ntohs(tcp->dest);
+	set_tcp_flags(info, tcp);
+
+	return true;
+}
+
+static __always_inline bool
+parse_udp(struct pkt_info *info, void *transport_hdr, void *data_end)
+{
+	struct udphdr *udp = transport_hdr;
+
+	if (udp + 1 > data_end)
+		return false;
+
+	info->sport = bpf_ntohs(udp->source);
+	info->dport = bpf_ntohs(udp->dest);
+
+	return true;
+}
+
+static __always_inline __u8 filter_tcp_port(int port)
+{
+	__u8 *leaf = bpf_map_lookup_elem(&tcp_port_map, &port);
+
+	return leaf ? *leaf : 0;
+}
+
+static __always_inline __u16 filter_udp_port(int port)
+{
+	__u16 *leaf = bpf_map_lookup_elem(&udp_port_map, &port);
+
+	return leaf ? *leaf : 0;
+}
+
+static __always_inline bool
+filter_transport_hdr(void *transport_hdr, void *data_end,
+		     struct pkt_info *info, struct fw_match_info *match_info)
+{
+	if (info->proto == IPPROTO_TCP) {
+		if (!parse_tcp(info, transport_hdr, data_end))
+			return false;
+
+		match_info->is_tcp = true;
+		match_info->is_tcp_syn = (info->flags & TCP_SYN) > 0;
+
+		match_info->tcp_dp_match = filter_tcp_port(info->dport);
+	} else if (info->proto == IPPROTO_UDP) {
+		if (!parse_udp(info, transport_hdr, data_end))
+			return false;
+
+		match_info->udp_dp_match = filter_udp_port(info->dport);
+		match_info->udp_sp_match = filter_udp_port(info->sport);
+	}
+
+	return true;
+}
+
+static __always_inline __u8
+parse_gue_v6(struct pkt_info *info, struct ipv6hdr *ip6h, void *data_end)
+{
+	struct udphdr *udp = (struct udphdr *)(ip6h + 1);
+	void *encap_data = udp + 1;
+
+	if (udp + 1 > data_end)
+		return BAD_IP6_HDR;
+
+	if (udp->dest != bpf_htons(6666))
+		return NO_ERR;
+
+	info->flags |= TUNNEL;
+
+	if (encap_data + 1 > data_end)
+		return BAD_IP6GUE_HDR;
+
+	if (*(__u8 *)encap_data & 0x30) {
+		struct ipv6hdr *inner_ip6h = encap_data;
+
+		if (inner_ip6h + 1 > data_end)
+			return BAD_IP6GUE_HDR;
+
+		info->type = V6;
+		info->proto = inner_ip6h->nexthdr;
+		info->ip.ipv6 = inner_ip6h;
+		info->trans_hdr_offset += sizeof(struct ipv6hdr) + sizeof(struct udphdr);
+	} else {
+		struct iphdr *inner_ip4h = encap_data;
+
+		if (inner_ip4h + 1 > data_end)
+			return BAD_IP6GUE_HDR;
+
+		info->type = V4;
+		info->proto = inner_ip4h->protocol;
+		info->ip.ipv4 = inner_ip4h;
+		info->trans_hdr_offset += sizeof(struct iphdr) + sizeof(struct udphdr);
+	}
+
+	return NO_ERR;
+}
+
+static __always_inline __u8 parse_ipv6_gue(struct pkt_info *info,
+					   void *data, void *data_end)
+{
+	struct ipv6hdr *ip6h = data + sizeof(struct ethhdr);
+
+	if (ip6h + 1 > data_end)
+		return BAD_IP6_HDR;
+
+	info->proto = ip6h->nexthdr;
+	info->ip.ipv6 = ip6h;
+	info->type = V6;
+	info->trans_hdr_offset = sizeof(struct ethhdr) + sizeof(struct ipv6hdr);
+
+	if (info->proto == IPPROTO_UDP)
+		return parse_gue_v6(info, ip6h, data_end);
+
+	return NO_ERR;
+}
+
+SEC("xdp")
+int edgewall(struct xdp_md *ctx)
+{
+	void *data_end = (void *)(long)(ctx->data_end);
+	void *data = (void *)(long)(ctx->data);
+	struct fw_match_info match_info = {};
+	struct pkt_info info = {};
+	__u8 parse_err = NO_ERR;
+	void *transport_hdr;
+	struct ethhdr *eth;
+	bool filter_res;
+	__u32 proto;
+
+	eth = parse_ethhdr(data, data_end);
+	if (!eth)
+		return XDP_DROP;
+
+	proto = eth->h_proto;
+	if (proto != bpf_htons(ETH_P_IPV6))
+		return XDP_DROP;
+
+	if (parse_ipv6_gue(&info, data, data_end))
+		return XDP_DROP;
+
+	if (info.proto == IPPROTO_ICMPV6)
+		return XDP_PASS;
+
+	if (info.proto != IPPROTO_TCP && info.proto != IPPROTO_UDP)
+		return XDP_DROP;
+
+	filter_src_dst_ip(&info, &match_info);
+
+	transport_hdr = get_transport_hdr(info.trans_hdr_offset, data,
+					  data_end);
+	if (!transport_hdr)
+		return XDP_DROP;
+
+	filter_res = filter_transport_hdr(transport_hdr, data_end,
+					  &info, &match_info);
+	if (!filter_res)
+		return XDP_DROP;
+
+	if (match_info.is_tcp && !match_info.is_tcp_syn)
+		return XDP_PASS;
+
+	return XDP_DROP;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_bpftool.py b/tools/testing/selftests/bpf/test_bpftool.py
index 4fed2dc25c0aa1b99f83f357369ef9043ec50ac0..1c2408ee1f5d0023e7ec7c126cd014c3556de21b 100644
--- a/tools/testing/selftests/bpf/test_bpftool.py
+++ b/tools/testing/selftests/bpf/test_bpftool.py
@@ -57,6 +57,11 @@ def default_iface(f):
         return f(*args, iface, **kwargs)
     return wrapper
 
+DMESG_EMITTING_HELPERS = [
+        "bpf_probe_write_user",
+        "bpf_trace_printk",
+        "bpf_trace_vprintk",
+    ]
 
 class TestBpftool(unittest.TestCase):
     @classmethod
@@ -67,10 +72,7 @@ class TestBpftool(unittest.TestCase):
 
     @default_iface
     def test_feature_dev_json(self, iface):
-        unexpected_helpers = [
-            "bpf_probe_write_user",
-            "bpf_trace_printk",
-        ]
+        unexpected_helpers = DMESG_EMITTING_HELPERS
         expected_keys = [
             "syscall_config",
             "program_types",
@@ -94,10 +96,7 @@ class TestBpftool(unittest.TestCase):
             bpftool_json(["feature", "probe"]),
             bpftool_json(["feature"]),
         ]
-        unexpected_helpers = [
-            "bpf_probe_write_user",
-            "bpf_trace_printk",
-        ]
+        unexpected_helpers = DMESG_EMITTING_HELPERS
         expected_keys = [
             "syscall_config",
             "system_config",
@@ -121,10 +120,7 @@ class TestBpftool(unittest.TestCase):
             bpftool_json(["feature", "probe", "kernel", "full"]),
             bpftool_json(["feature", "probe", "full"]),
         ]
-        expected_helpers = [
-            "bpf_probe_write_user",
-            "bpf_trace_printk",
-        ]
+        expected_helpers = DMESG_EMITTING_HELPERS
 
         for tc in test_cases:
             # Check if expected helpers are included at least once in any
@@ -157,7 +153,7 @@ class TestBpftool(unittest.TestCase):
                 not_full_set.add(helper)
 
         self.assertCountEqual(full_set - not_full_set,
-                                {"bpf_probe_write_user", "bpf_trace_printk"})
+                              set(DMESG_EMITTING_HELPERS))
         self.assertCountEqual(not_full_set - full_set, set())
 
     def test_feature_macros(self):
diff --git a/tools/testing/selftests/bpf/test_tcp_check_syncookie.sh b/tools/testing/selftests/bpf/test_tcp_check_syncookie.sh
index 9b3617d770a525875bf6a0d6d6d2f27553267029..6413c14725546067e8fd252e493d7f282384bb1a 100755
--- a/tools/testing/selftests/bpf/test_tcp_check_syncookie.sh
+++ b/tools/testing/selftests/bpf/test_tcp_check_syncookie.sh
@@ -76,8 +76,8 @@ DIR=$(dirname $0)
 TEST_IF=lo
 MAX_PING_TRIES=5
 BPF_PROG_OBJ="${DIR}/test_tcp_check_syncookie_kern.o"
-CLSACT_SECTION="clsact/check_syncookie"
-XDP_SECTION="xdp/check_syncookie"
+CLSACT_SECTION="tc"
+XDP_SECTION="xdp"
 BPF_PROG_ID=0
 PROG="${DIR}/test_tcp_check_syncookie_user"
 
diff --git a/tools/testing/selftests/bpf/test_tunnel.sh b/tools/testing/selftests/bpf/test_tunnel.sh
index 1ccbe804e8e1cbd0db34b2a68fbd3b996cd98c2c..ca1372924023dfaf45bf7f995bc1587d271abc8c 100755
--- a/tools/testing/selftests/bpf/test_tunnel.sh
+++ b/tools/testing/selftests/bpf/test_tunnel.sh
@@ -168,14 +168,15 @@ add_vxlan_tunnel()
 	ip netns exec at_ns0 \
 		ip link set dev $DEV_NS address 52:54:00:d9:01:00 up
 	ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24
-	ip netns exec at_ns0 arp -s 10.1.1.200 52:54:00:d9:02:00
+	ip netns exec at_ns0 \
+		ip neigh add 10.1.1.200 lladdr 52:54:00:d9:02:00 dev $DEV_NS
 	ip netns exec at_ns0 iptables -A OUTPUT -j MARK --set-mark 0x800FF
 
 	# root namespace
 	ip link add dev $DEV type $TYPE external gbp dstport 4789
 	ip link set dev $DEV address 52:54:00:d9:02:00 up
 	ip addr add dev $DEV 10.1.1.200/24
-	arp -s 10.1.1.100 52:54:00:d9:01:00
+	ip neigh add 10.1.1.100 lladdr 52:54:00:d9:01:00 dev $DEV
 }
 
 add_ip6vxlan_tunnel()
diff --git a/tools/testing/selftests/bpf/test_xdp_meta.sh b/tools/testing/selftests/bpf/test_xdp_meta.sh
index 637fcf4fe4e30ae94a0db979eda3b447f983abbd..d10cefd6eb09455b9a378f2b5ca68b5d5405eb26 100755
--- a/tools/testing/selftests/bpf/test_xdp_meta.sh
+++ b/tools/testing/selftests/bpf/test_xdp_meta.sh
@@ -1,5 +1,8 @@
 #!/bin/sh
 
+# Kselftest framework requirement - SKIP code is 4.
+readonly KSFT_SKIP=4
+
 cleanup()
 {
 	if [ "$?" = "0" ]; then
@@ -17,7 +20,7 @@ cleanup()
 ip link set dev lo xdp off 2>/dev/null > /dev/null
 if [ $? -ne 0 ];then
 	echo "selftests: [SKIP] Could not run test without the ip xdp support"
-	exit 0
+	exit $KSFT_SKIP
 fi
 set -e
 
diff --git a/tools/testing/selftests/bpf/test_xdp_redirect.sh b/tools/testing/selftests/bpf/test_xdp_redirect.sh
index c033850886f441b014dbbac921a50814e68a1f54..57c8db9972a65fa3abc3bba6c9e21333f1ca2dcf 100755
--- a/tools/testing/selftests/bpf/test_xdp_redirect.sh
+++ b/tools/testing/selftests/bpf/test_xdp_redirect.sh
@@ -52,8 +52,8 @@ test_xdp_redirect()
 		return 0
 	fi
 
-	ip -n ns1 link set veth11 $xdpmode obj xdp_dummy.o sec xdp_dummy &> /dev/null
-	ip -n ns2 link set veth22 $xdpmode obj xdp_dummy.o sec xdp_dummy &> /dev/null
+	ip -n ns1 link set veth11 $xdpmode obj xdp_dummy.o sec xdp &> /dev/null
+	ip -n ns2 link set veth22 $xdpmode obj xdp_dummy.o sec xdp &> /dev/null
 	ip link set dev veth1 $xdpmode obj test_xdp_redirect.o sec redirect_to_222 &> /dev/null
 	ip link set dev veth2 $xdpmode obj test_xdp_redirect.o sec redirect_to_111 &> /dev/null
 
diff --git a/tools/testing/selftests/bpf/test_xdp_redirect_multi.sh b/tools/testing/selftests/bpf/test_xdp_redirect_multi.sh
index 1538373157e3c51cfe997eb154000b3f4658d8f3..351955c2bdfd8048bc25e50e57b9c86b6d7c3be7 100755
--- a/tools/testing/selftests/bpf/test_xdp_redirect_multi.sh
+++ b/tools/testing/selftests/bpf/test_xdp_redirect_multi.sh
@@ -88,7 +88,7 @@ setup_ns()
 		# Add a neigh entry for IPv4 ping test
 		ip -n ns$i neigh add 192.0.2.253 lladdr 00:00:00:00:00:01 dev veth0
 		ip -n ns$i link set veth0 $mode obj \
-			xdp_dummy.o sec xdp_dummy &> /dev/null || \
+			xdp_dummy.o sec xdp &> /dev/null || \
 			{ test_fail "Unable to load dummy xdp" && exit 1; }
 		IFACES="$IFACES veth$i"
 		veth_mac[$i]=$(ip link show veth$i | awk '/link\/ether/ {print $2}')
diff --git a/tools/testing/selftests/bpf/test_xdp_veth.sh b/tools/testing/selftests/bpf/test_xdp_veth.sh
index 995278e684b6e43a99d815a6eac953c50bcf573d..a3a1eaee26ea60d563fff576de4cf00dc13d195b 100755
--- a/tools/testing/selftests/bpf/test_xdp_veth.sh
+++ b/tools/testing/selftests/bpf/test_xdp_veth.sh
@@ -107,9 +107,9 @@ ip link set dev veth1 xdp pinned $BPF_DIR/progs/redirect_map_0
 ip link set dev veth2 xdp pinned $BPF_DIR/progs/redirect_map_1
 ip link set dev veth3 xdp pinned $BPF_DIR/progs/redirect_map_2
 
-ip -n ns1 link set dev veth11 xdp obj xdp_dummy.o sec xdp_dummy
+ip -n ns1 link set dev veth11 xdp obj xdp_dummy.o sec xdp
 ip -n ns2 link set dev veth22 xdp obj xdp_tx.o sec xdp
-ip -n ns3 link set dev veth33 xdp obj xdp_dummy.o sec xdp_dummy
+ip -n ns3 link set dev veth33 xdp obj xdp_dummy.o sec xdp
 
 trap cleanup EXIT
 
diff --git a/tools/testing/selftests/bpf/test_xdp_vlan.sh b/tools/testing/selftests/bpf/test_xdp_vlan.sh
index bb8b0da91686f0e14105b7c22bb40a7e762f937f..0cbc7604a2f8147cc2ab6e6537ecd8a37df3d1be 100755
--- a/tools/testing/selftests/bpf/test_xdp_vlan.sh
+++ b/tools/testing/selftests/bpf/test_xdp_vlan.sh
@@ -2,6 +2,9 @@
 # SPDX-License-Identifier: GPL-2.0
 # Author: Jesper Dangaard Brouer <hawk@kernel.org>
 
+# Kselftest framework requirement - SKIP code is 4.
+readonly KSFT_SKIP=4
+
 # Allow wrapper scripts to name test
 if [ -z "$TESTNAME" ]; then
     TESTNAME=xdp_vlan
@@ -94,7 +97,7 @@ while true; do
 	    -h | --help )
 		usage;
 		echo "selftests: $TESTNAME [SKIP] usage help info requested"
-		exit 0
+		exit $KSFT_SKIP
 		;;
 	    * )
 		shift
@@ -117,7 +120,7 @@ fi
 ip link set dev lo xdpgeneric off 2>/dev/null > /dev/null
 if [ $? -ne 0 ]; then
 	echo "selftests: $TESTNAME [SKIP] need ip xdp support"
-	exit 0
+	exit $KSFT_SKIP
 fi
 
 # Interactive mode likely require us to cleanup netns
diff --git a/tools/testing/selftests/bpf/verifier/spill_fill.c b/tools/testing/selftests/bpf/verifier/spill_fill.c
index 0b943897aaf6c136a8fea40a3804042b28c44e37..c9991c3f3bd274275fe3c524a55ba1cc471e04df 100644
--- a/tools/testing/selftests/bpf/verifier/spill_fill.c
+++ b/tools/testing/selftests/bpf/verifier/spill_fill.c
@@ -104,3 +104,164 @@
 	.result = ACCEPT,
 	.retval = POINTER_VALUE,
 },
+{
+	"Spill and refill a u32 const scalar.  Offset to skb->data",
+	.insns = {
+	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+		    offsetof(struct __sk_buff, data)),
+	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+		    offsetof(struct __sk_buff, data_end)),
+	/* r4 = 20 */
+	BPF_MOV32_IMM(BPF_REG_4, 20),
+	/* *(u32 *)(r10 -8) = r4 */
+	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
+	/* r4 = *(u32 *)(r10 -8) */
+	BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -8),
+	/* r0 = r2 */
+	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+	/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv20 */
+	BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
+	/* if (r0 > r3) R0=pkt,off=20 R2=pkt R3=pkt_end R4=inv20 */
+	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+	/* r0 = *(u32 *)r2 R0=pkt,off=20,r=20 R2=pkt,r=20 R3=pkt_end R4=inv20 */
+	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
+	BPF_MOV64_IMM(BPF_REG_0, 0),
+	BPF_EXIT_INSN(),
+	},
+	.result = ACCEPT,
+	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+	"Spill a u32 const, refill from another half of the uninit u32 from the stack",
+	.insns = {
+	/* r4 = 20 */
+	BPF_MOV32_IMM(BPF_REG_4, 20),
+	/* *(u32 *)(r10 -8) = r4 */
+	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
+	/* r4 = *(u32 *)(r10 -4) fp-8=????rrrr*/
+	BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -4),
+	BPF_MOV64_IMM(BPF_REG_0, 0),
+	BPF_EXIT_INSN(),
+	},
+	.result = REJECT,
+	.errstr = "invalid read from stack off -4+0 size 4",
+	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+	"Spill a u32 const scalar.  Refill as u16.  Offset to skb->data",
+	.insns = {
+	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+		    offsetof(struct __sk_buff, data)),
+	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+		    offsetof(struct __sk_buff, data_end)),
+	/* r4 = 20 */
+	BPF_MOV32_IMM(BPF_REG_4, 20),
+	/* *(u32 *)(r10 -8) = r4 */
+	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
+	/* r4 = *(u16 *)(r10 -8) */
+	BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -8),
+	/* r0 = r2 */
+	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+	/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=65535 */
+	BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
+	/* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv,umax=65535 */
+	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+	/* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv20 */
+	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
+	BPF_MOV64_IMM(BPF_REG_0, 0),
+	BPF_EXIT_INSN(),
+	},
+	.result = REJECT,
+	.errstr = "invalid access to packet",
+	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+	"Spill a u32 const scalar.  Refill as u16 from fp-6.  Offset to skb->data",
+	.insns = {
+	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+		    offsetof(struct __sk_buff, data)),
+	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+		    offsetof(struct __sk_buff, data_end)),
+	/* r4 = 20 */
+	BPF_MOV32_IMM(BPF_REG_4, 20),
+	/* *(u32 *)(r10 -8) = r4 */
+	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
+	/* r4 = *(u16 *)(r10 -6) */
+	BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -6),
+	/* r0 = r2 */
+	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+	/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=65535 */
+	BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
+	/* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv,umax=65535 */
+	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+	/* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv20 */
+	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
+	BPF_MOV64_IMM(BPF_REG_0, 0),
+	BPF_EXIT_INSN(),
+	},
+	.result = REJECT,
+	.errstr = "invalid access to packet",
+	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+	"Spill and refill a u32 const scalar at non 8byte aligned stack addr.  Offset to skb->data",
+	.insns = {
+	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+		    offsetof(struct __sk_buff, data)),
+	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+		    offsetof(struct __sk_buff, data_end)),
+	/* r4 = 20 */
+	BPF_MOV32_IMM(BPF_REG_4, 20),
+	/* *(u32 *)(r10 -8) = r4 */
+	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
+	/* *(u32 *)(r10 -4) = r4 */
+	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -4),
+	/* r4 = *(u32 *)(r10 -4),  */
+	BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -4),
+	/* r0 = r2 */
+	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+	/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=U32_MAX */
+	BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
+	/* if (r0 > r3) R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4=inv */
+	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+	/* r0 = *(u32 *)r2 R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4=inv */
+	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
+	BPF_MOV64_IMM(BPF_REG_0, 0),
+	BPF_EXIT_INSN(),
+	},
+	.result = REJECT,
+	.errstr = "invalid access to packet",
+	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+	"Spill and refill a umax=40 bounded scalar.  Offset to skb->data",
+	.insns = {
+	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+		    offsetof(struct __sk_buff, data)),
+	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+		    offsetof(struct __sk_buff, data_end)),
+	BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1,
+		    offsetof(struct __sk_buff, tstamp)),
+	BPF_JMP_IMM(BPF_JLE, BPF_REG_4, 40, 2),
+	BPF_MOV64_IMM(BPF_REG_0, 0),
+	BPF_EXIT_INSN(),
+	/* *(u32 *)(r10 -8) = r4 R4=inv,umax=40 */
+	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
+	/* r4 = (*u32 *)(r10 - 8) */
+	BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -8),
+	/* r2 += r4 R2=pkt R4=inv,umax=40 */
+	BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_4),
+	/* r0 = r2 R2=pkt,umax=40 R4=inv,umax=40 */
+	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+	/* r2 += 20 R0=pkt,umax=40 R2=pkt,umax=40 */
+	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 20),
+	/* if (r2 > r3) R0=pkt,umax=40 R2=pkt,off=20,umax=40 */
+	BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_3, 1),
+	/* r0 = *(u32 *)r0 R0=pkt,r=20,umax=40 R2=pkt,off=20,r=20,umax=40 */
+	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
+	BPF_MOV64_IMM(BPF_REG_0, 0),
+	BPF_EXIT_INSN(),
+	},
+	.result = ACCEPT,
+	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
diff --git a/tools/testing/selftests/bpf/xdping.c b/tools/testing/selftests/bpf/xdping.c
index 842d9155d36c5b322ca71961df7fb6ef2df22f48..79a3453dab255e11915ec00c7d7e1bb5ca20e9f5 100644
--- a/tools/testing/selftests/bpf/xdping.c
+++ b/tools/testing/selftests/bpf/xdping.c
@@ -178,9 +178,8 @@ int main(int argc, char **argv)
 		return 1;
 	}
 
-	main_prog = bpf_object__find_program_by_title(obj,
-						      server ? "xdpserver" :
-							       "xdpclient");
+	main_prog = bpf_object__find_program_by_name(obj,
+						     server ? "xdping_server" : "xdping_client");
 	if (main_prog)
 		prog_fd = bpf_program__fd(main_prog);
 	if (!main_prog || prog_fd < 0) {
diff --git a/tools/testing/selftests/bpf/xdpxceiver.c b/tools/testing/selftests/bpf/xdpxceiver.c
index 127bcde06c86e67f057d9c288b694fe11a2f5ab0..6c7cf8aadc7923fe5ff803cea67a810dc2ca31f5 100644
--- a/tools/testing/selftests/bpf/xdpxceiver.c
+++ b/tools/testing/selftests/bpf/xdpxceiver.c
@@ -384,6 +384,7 @@ static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
 		ifobj->umem = &ifobj->umem_arr[0];
 		ifobj->xsk = &ifobj->xsk_arr[0];
 		ifobj->use_poll = false;
+		ifobj->pacing_on = true;
 		ifobj->pkt_stream = test->pkt_stream_default;
 
 		if (i == 0) {
@@ -445,6 +446,12 @@ static void test_spec_set_name(struct test_spec *test, const char *name)
 	strncpy(test->name, name, MAX_TEST_NAME_SIZE);
 }
 
+static void pkt_stream_reset(struct pkt_stream *pkt_stream)
+{
+	if (pkt_stream)
+		pkt_stream->rx_pkt_nb = 0;
+}
+
 static struct pkt *pkt_stream_get_pkt(struct pkt_stream *pkt_stream, u32 pkt_nb)
 {
 	if (pkt_nb >= pkt_stream->nb_pkts)
@@ -507,8 +514,7 @@ static struct pkt_stream *pkt_stream_generate(struct xsk_umem_info *umem, u32 nb
 
 	pkt_stream->nb_pkts = nb_pkts;
 	for (i = 0; i < nb_pkts; i++) {
-		pkt_stream->pkts[i].addr = (i % umem->num_frames) * umem->frame_size +
-			DEFAULT_OFFSET;
+		pkt_stream->pkts[i].addr = (i % umem->num_frames) * umem->frame_size;
 		pkt_stream->pkts[i].len = pkt_len;
 		pkt_stream->pkts[i].payload = i;
 
@@ -536,14 +542,14 @@ static void pkt_stream_replace(struct test_spec *test, u32 nb_pkts, u32 pkt_len)
 	test->ifobj_rx->pkt_stream = pkt_stream;
 }
 
-static void pkt_stream_replace_half(struct test_spec *test, u32 pkt_len, u32 offset)
+static void pkt_stream_replace_half(struct test_spec *test, u32 pkt_len, int offset)
 {
 	struct xsk_umem_info *umem = test->ifobj_tx->umem;
 	struct pkt_stream *pkt_stream;
 	u32 i;
 
 	pkt_stream = pkt_stream_clone(umem, test->pkt_stream_default);
-	for (i = 0; i < test->pkt_stream_default->nb_pkts; i += 2) {
+	for (i = 1; i < test->pkt_stream_default->nb_pkts; i += 2) {
 		pkt_stream->pkts[i].addr = (i % umem->num_frames) * umem->frame_size + offset;
 		pkt_stream->pkts[i].len = pkt_len;
 	}
@@ -635,6 +641,25 @@ static void pkt_dump(void *pkt, u32 len)
 	fprintf(stdout, "---------------------------------------\n");
 }
 
+static bool is_offset_correct(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream, u64 addr,
+			      u64 pkt_stream_addr)
+{
+	u32 headroom = umem->unaligned_mode ? 0 : umem->frame_headroom;
+	u32 offset = addr % umem->frame_size, expected_offset = 0;
+
+	if (!pkt_stream->use_addr_for_fill)
+		pkt_stream_addr = 0;
+
+	expected_offset += (pkt_stream_addr + headroom + XDP_PACKET_HEADROOM) % umem->frame_size;
+
+	if (offset == expected_offset)
+		return true;
+
+	ksft_test_result_fail("ERROR: [%s] expected [%u], got [%u]\n", __func__, expected_offset,
+			      offset);
+	return false;
+}
+
 static bool is_pkt_valid(struct pkt *pkt, void *buffer, u64 addr, u32 len)
 {
 	void *data = xsk_umem__get_data(buffer, addr);
@@ -717,13 +742,15 @@ static void receive_pkts(struct pkt_stream *pkt_stream, struct xsk_socket_info *
 			 struct pollfd *fds)
 {
 	struct pkt *pkt = pkt_stream_get_next_rx_pkt(pkt_stream);
+	struct xsk_umem_info *umem = xsk->umem;
 	u32 idx_rx = 0, idx_fq = 0, rcvd, i;
+	u32 total = 0;
 	int ret;
 
 	while (pkt) {
 		rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, &idx_rx);
 		if (!rcvd) {
-			if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq)) {
+			if (xsk_ring_prod__needs_wakeup(&umem->fq)) {
 				ret = poll(fds, 1, POLL_TMOUT);
 				if (ret < 0)
 					exit_with_error(-ret);
@@ -731,16 +758,16 @@ static void receive_pkts(struct pkt_stream *pkt_stream, struct xsk_socket_info *
 			continue;
 		}
 
-		ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq);
+		ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
 		while (ret != rcvd) {
 			if (ret < 0)
 				exit_with_error(-ret);
-			if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq)) {
+			if (xsk_ring_prod__needs_wakeup(&umem->fq)) {
 				ret = poll(fds, 1, POLL_TMOUT);
 				if (ret < 0)
 					exit_with_error(-ret);
 			}
-			ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq);
+			ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
 		}
 
 		for (i = 0; i < rcvd; i++) {
@@ -757,15 +784,25 @@ static void receive_pkts(struct pkt_stream *pkt_stream, struct xsk_socket_info *
 
 			orig = xsk_umem__extract_addr(addr);
 			addr = xsk_umem__add_offset_to_addr(addr);
-			if (!is_pkt_valid(pkt, xsk->umem->buffer, addr, desc->len))
+
+			if (!is_pkt_valid(pkt, umem->buffer, addr, desc->len))
+				return;
+			if (!is_offset_correct(umem, pkt_stream, addr, pkt->addr))
 				return;
 
-			*xsk_ring_prod__fill_addr(&xsk->umem->fq, idx_fq++) = orig;
+			*xsk_ring_prod__fill_addr(&umem->fq, idx_fq++) = orig;
 			pkt = pkt_stream_get_next_rx_pkt(pkt_stream);
 		}
 
-		xsk_ring_prod__submit(&xsk->umem->fq, rcvd);
+		xsk_ring_prod__submit(&umem->fq, rcvd);
 		xsk_ring_cons__release(&xsk->rx, rcvd);
+
+		pthread_mutex_lock(&pacing_mutex);
+		pkts_in_flight -= rcvd;
+		total += rcvd;
+		if (pkts_in_flight < umem->num_frames)
+			pthread_cond_signal(&pacing_cond);
+		pthread_mutex_unlock(&pacing_mutex);
 	}
 }
 
@@ -791,10 +828,19 @@ static u32 __send_pkts(struct ifobject *ifobject, u32 pkt_nb)
 			valid_pkts++;
 	}
 
+	pthread_mutex_lock(&pacing_mutex);
+	pkts_in_flight += valid_pkts;
+	if (ifobject->pacing_on && pkts_in_flight >= ifobject->umem->num_frames - BATCH_SIZE) {
+		kick_tx(xsk);
+		pthread_cond_wait(&pacing_cond, &pacing_mutex);
+	}
+	pthread_mutex_unlock(&pacing_mutex);
+
 	xsk_ring_prod__submit(&xsk->tx, i);
 	xsk->outstanding_tx += valid_pkts;
-	complete_pkts(xsk, BATCH_SIZE);
+	complete_pkts(xsk, i);
 
+	usleep(10);
 	return i;
 }
 
@@ -813,8 +859,6 @@ static void send_pkts(struct ifobject *ifobject)
 	fds.events = POLLOUT;
 
 	while (pkt_cnt < ifobject->pkt_stream->nb_pkts) {
-		u32 sent;
-
 		if (ifobject->use_poll) {
 			int ret;
 
@@ -826,9 +870,7 @@ static void send_pkts(struct ifobject *ifobject)
 				continue;
 		}
 
-		sent = __send_pkts(ifobject, pkt_cnt);
-		pkt_cnt += sent;
-		usleep(10);
+		pkt_cnt += __send_pkts(ifobject, pkt_cnt);
 	}
 
 	wait_for_tx_completion(ifobject->xsk);
@@ -913,18 +955,17 @@ static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
 		u64 umem_sz = ifobject->umem->num_frames * ifobject->umem->frame_size;
 		u32 ctr = 0;
 		void *bufs;
+		int ret;
 
 		bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
 		if (bufs == MAP_FAILED)
 			exit_with_error(errno);
 
-		while (ctr++ < SOCK_RECONF_CTR) {
-			int ret;
-
-			ret = xsk_configure_umem(&ifobject->umem_arr[i], bufs, umem_sz);
-			if (ret)
-				exit_with_error(-ret);
+		ret = xsk_configure_umem(&ifobject->umem_arr[i], bufs, umem_sz);
+		if (ret)
+			exit_with_error(-ret);
 
+		while (ctr++ < SOCK_RECONF_CTR) {
 			ret = xsk_configure_socket(&ifobject->xsk_arr[i], &ifobject->umem_arr[i],
 						   ifobject, i);
 			if (!ret)
@@ -971,13 +1012,18 @@ static void *worker_testapp_validate_tx(void *arg)
 
 static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream)
 {
-	u32 idx = 0, i;
+	u32 idx = 0, i, buffers_to_fill;
 	int ret;
 
-	ret = xsk_ring_prod__reserve(&umem->fq, XSK_RING_PROD__DEFAULT_NUM_DESCS, &idx);
-	if (ret != XSK_RING_PROD__DEFAULT_NUM_DESCS)
+	if (umem->num_frames < XSK_RING_PROD__DEFAULT_NUM_DESCS)
+		buffers_to_fill = umem->num_frames;
+	else
+		buffers_to_fill = XSK_RING_PROD__DEFAULT_NUM_DESCS;
+
+	ret = xsk_ring_prod__reserve(&umem->fq, buffers_to_fill, &idx);
+	if (ret != buffers_to_fill)
 		exit_with_error(ENOSPC);
-	for (i = 0; i < XSK_RING_PROD__DEFAULT_NUM_DESCS; i++) {
+	for (i = 0; i < buffers_to_fill; i++) {
 		u64 addr;
 
 		if (pkt_stream->use_addr_for_fill) {
@@ -987,12 +1033,12 @@ static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream
 				break;
 			addr = pkt->addr;
 		} else {
-			addr = (i % umem->num_frames) * umem->frame_size + DEFAULT_OFFSET;
+			addr = i * umem->frame_size;
 		}
 
 		*xsk_ring_prod__fill_addr(&umem->fq, idx++) = addr;
 	}
-	xsk_ring_prod__submit(&umem->fq, XSK_RING_PROD__DEFAULT_NUM_DESCS);
+	xsk_ring_prod__submit(&umem->fq, buffers_to_fill);
 }
 
 static void *worker_testapp_validate_rx(void *arg)
@@ -1032,6 +1078,8 @@ static void testapp_validate_traffic(struct test_spec *test)
 		exit_with_error(errno);
 
 	test->current_step++;
+	pkt_stream_reset(ifobj_rx->pkt_stream);
+	pkts_in_flight = 0;
 
 	/*Spawn RX thread */
 	pthread_create(&t0, NULL, ifobj_rx->func_ptr, test);
@@ -1108,6 +1156,13 @@ static void testapp_bpf_res(struct test_spec *test)
 	testapp_validate_traffic(test);
 }
 
+static void testapp_headroom(struct test_spec *test)
+{
+	test_spec_set_name(test, "UMEM_HEADROOM");
+	test->ifobj_rx->umem->frame_headroom = UMEM_HEADROOM_TEST_SIZE;
+	testapp_validate_traffic(test);
+}
+
 static void testapp_stats(struct test_spec *test)
 {
 	int i;
@@ -1115,6 +1170,8 @@ static void testapp_stats(struct test_spec *test)
 	for (i = 0; i < STAT_TEST_TYPE_MAX; i++) {
 		test_spec_reset(test);
 		stat_test_type = i;
+		/* No or few packets will be received so cannot pace packets */
+		test->ifobj_tx->pacing_on = false;
 
 		switch (stat_test_type) {
 		case STAT_TEST_RX_DROPPED:
@@ -1181,7 +1238,7 @@ static bool testapp_unaligned(struct test_spec *test)
 	test->ifobj_tx->umem->unaligned_mode = true;
 	test->ifobj_rx->umem->unaligned_mode = true;
 	/* Let half of the packets straddle a buffer boundrary */
-	pkt_stream_replace_half(test, PKT_SIZE, test->ifobj_tx->umem->frame_size - 32);
+	pkt_stream_replace_half(test, PKT_SIZE, -PKT_SIZE / 2);
 	test->ifobj_rx->pkt_stream->use_addr_for_fill = true;
 	testapp_validate_traffic(test);
 
@@ -1189,6 +1246,15 @@ static bool testapp_unaligned(struct test_spec *test)
 	return true;
 }
 
+static void testapp_single_pkt(struct test_spec *test)
+{
+	struct pkt pkts[] = {{0x1000, PKT_SIZE, 0, true}};
+
+	pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts));
+	testapp_validate_traffic(test);
+	pkt_stream_restore_default(test);
+}
+
 static void testapp_invalid_desc(struct test_spec *test)
 {
 	struct pkt pkts[] = {
@@ -1270,6 +1336,10 @@ static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_
 		test_spec_set_name(test, "RUN_TO_COMPLETION");
 		testapp_validate_traffic(test);
 		break;
+	case TEST_TYPE_RUN_TO_COMPLETION_SINGLE_PKT:
+		test_spec_set_name(test, "RUN_TO_COMPLETION_SINGLE_PKT");
+		testapp_single_pkt(test);
+		break;
 	case TEST_TYPE_RUN_TO_COMPLETION_2K_FRAME:
 		test_spec_set_name(test, "RUN_TO_COMPLETION_2K_FRAME_SIZE");
 		test->ifobj_tx->umem->frame_size = 2048;
@@ -1305,6 +1375,9 @@ static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_
 		if (!testapp_unaligned(test))
 			return;
 		break;
+	case TEST_TYPE_HEADROOM:
+		testapp_headroom(test);
+		break;
 	default:
 		break;
 	}
diff --git a/tools/testing/selftests/bpf/xdpxceiver.h b/tools/testing/selftests/bpf/xdpxceiver.h
index 5ac4a5e647445136bf4c724a1c40a6260e671494..2f705f44b7483c92d1db08f65ae72cdcd65c0d98 100644
--- a/tools/testing/selftests/bpf/xdpxceiver.h
+++ b/tools/testing/selftests/bpf/xdpxceiver.h
@@ -35,13 +35,13 @@
 #define UDP_PKT_DATA_SIZE (UDP_PKT_SIZE - sizeof(struct udphdr))
 #define USLEEP_MAX 10000
 #define SOCK_RECONF_CTR 10
-#define BATCH_SIZE 8
+#define BATCH_SIZE 64
 #define POLL_TMOUT 1000
 #define DEFAULT_PKT_CNT (4 * 1024)
 #define DEFAULT_UMEM_BUFFERS (DEFAULT_PKT_CNT / 4)
 #define UMEM_SIZE (DEFAULT_UMEM_BUFFERS * XSK_UMEM__DEFAULT_FRAME_SIZE)
 #define RX_FULL_RXQSIZE 32
-#define DEFAULT_OFFSET 256
+#define UMEM_HEADROOM_TEST_SIZE 128
 #define XSK_UMEM__INVALID_FRAME_SIZE (XSK_UMEM__DEFAULT_FRAME_SIZE + 1)
 
 #define print_verbose(x...) do { if (opt_verbose) ksft_print_msg(x); } while (0)
@@ -55,11 +55,13 @@ enum test_mode {
 enum test_type {
 	TEST_TYPE_RUN_TO_COMPLETION,
 	TEST_TYPE_RUN_TO_COMPLETION_2K_FRAME,
+	TEST_TYPE_RUN_TO_COMPLETION_SINGLE_PKT,
 	TEST_TYPE_POLL,
 	TEST_TYPE_UNALIGNED,
 	TEST_TYPE_ALIGNED_INV_DESC,
 	TEST_TYPE_ALIGNED_INV_DESC_2K_FRAME,
 	TEST_TYPE_UNALIGNED_INV_DESC,
+	TEST_TYPE_HEADROOM,
 	TEST_TYPE_TEARDOWN,
 	TEST_TYPE_BIDI,
 	TEST_TYPE_STATS,
@@ -136,6 +138,7 @@ struct ifobject {
 	bool tx_on;
 	bool rx_on;
 	bool use_poll;
+	bool pacing_on;
 	u8 dst_mac[ETH_ALEN];
 	u8 src_mac[ETH_ALEN];
 };
@@ -151,5 +154,9 @@ struct test_spec {
 };
 
 pthread_barrier_t barr;
+pthread_mutex_t pacing_mutex = PTHREAD_MUTEX_INITIALIZER;
+pthread_cond_t pacing_cond = PTHREAD_COND_INITIALIZER;
+
+u32 pkts_in_flight;
 
 #endif				/* XDPXCEIVER_H */