diff options
333 files changed, 10738 insertions, 7488 deletions
diff --git a/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml b/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml index 2735c6a4f336..eba2f3026ab0 100644 --- a/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml +++ b/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml @@ -26,6 +26,7 @@ properties: - qcom,qca6390-bt - qcom,wcn6750-bt - qcom,wcn6855-bt + - qcom,wcn7850-bt enable-gpios: maxItems: 1 @@ -58,6 +59,9 @@ properties: vddaon-supply: description: VDD_AON supply regulator handle + vdddig-supply: + description: VDD_DIG supply regulator handle + vddbtcxmx-supply: description: VDD_BT_CXMX supply regulator handle @@ -73,6 +77,9 @@ properties: vddrfa1p2-supply: description: VDD_RFA_1P2 supply regulator handle + vddrfa1p9-supply: + description: VDD_RFA_1P9 supply regulator handle + vddrfa2p2-supply: description: VDD_RFA_2P2 supply regulator handle @@ -157,6 +164,22 @@ allOf: - vddrfa0p8-supply - vddrfa1p2-supply - vddrfa1p7-supply + - if: + properties: + compatible: + contains: + enum: + - qcom,wcn7850-bt + then: + required: + - enable-gpios + - swctrl-gpios + - vddio-supply + - vddaon-supply + - vdddig-supply + - vddrfa0p8-supply + - vddrfa1p2-supply + - vddrfa1p9-supply examples: - | diff --git a/Documentation/netlink/specs/fou.yaml b/Documentation/netlink/specs/fou.yaml index 3e13826a3fdf..0af5ab842c04 100644 --- a/Documentation/netlink/specs/fou.yaml +++ b/Documentation/netlink/specs/fou.yaml @@ -107,16 +107,16 @@ operations: flags: [ admin-perm ] do: - request: &select_attrs + request: &select_attrs attributes: - - af - - ifindex - - port - - peer_port - - local_v4 - - peer_v4 - - local_v6 - - peer_v6 + - af + - ifindex + - port + - peer_port + - local_v4 + - peer_v4 + - local_v6 + - peer_v6 - name: get diff --git a/Documentation/netlink/specs/ovs_vport.yaml b/Documentation/netlink/specs/ovs_vport.yaml index ef298b001445..f65ce62cd60d 100644 --- a/Documentation/netlink/specs/ovs_vport.yaml +++ b/Documentation/netlink/specs/ovs_vport.yaml @@ -139,9 +139,20 @@ operations: - ifindex - options - + name: del + doc: Delete existing OVS vport from a data path + attribute-set: vport + fixed-header: ovs-header + do: + request: + attributes: + - dp-ifindex + - port-no + - type + - name + - name: get doc: Get / dump OVS vport configuration and state - value: 3 attribute-set: vport fixed-header: ovs-header do: &vport-get-op diff --git a/MAINTAINERS b/MAINTAINERS index dfc2ba0c20f3..5f527a9ae4b2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -22925,6 +22925,7 @@ S: Maintained W: https://www.net-swift.com F: Documentation/networking/device_drivers/ethernet/wangxun/* F: drivers/net/ethernet/wangxun/ +F: drivers/net/pcs/pcs-xpcs-wx.c WATCHDOG DEVICE DRIVERS M: Wim Van Sebroeck <[email protected]> diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index 139a88e4e852..db1aeacd4cd9 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -186,6 +186,8 @@ enum aarch64_insn_ldst_type { AARCH64_INSN_LDST_LOAD_ACQ_EX, AARCH64_INSN_LDST_STORE_EX, AARCH64_INSN_LDST_STORE_REL_EX, + AARCH64_INSN_LDST_SIGNED_LOAD_IMM_OFFSET, + AARCH64_INSN_LDST_SIGNED_LOAD_REG_OFFSET, }; enum aarch64_insn_adsb_type { @@ -324,6 +326,7 @@ __AARCH64_INSN_FUNCS(prfm, 0x3FC00000, 0x39800000) __AARCH64_INSN_FUNCS(prfm_lit, 0xFF000000, 0xD8000000) __AARCH64_INSN_FUNCS(store_imm, 0x3FC00000, 0x39000000) __AARCH64_INSN_FUNCS(load_imm, 0x3FC00000, 0x39400000) +__AARCH64_INSN_FUNCS(signed_load_imm, 0X3FC00000, 0x39800000) __AARCH64_INSN_FUNCS(store_pre, 0x3FE00C00, 0x38000C00) __AARCH64_INSN_FUNCS(load_pre, 0x3FE00C00, 0x38400C00) __AARCH64_INSN_FUNCS(store_post, 0x3FE00C00, 0x38000400) @@ -337,6 +340,7 @@ __AARCH64_INSN_FUNCS(ldset, 0x3F20FC00, 0x38203000) __AARCH64_INSN_FUNCS(swp, 0x3F20FC00, 0x38208000) __AARCH64_INSN_FUNCS(cas, 0x3FA07C00, 0x08A07C00) __AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800) +__AARCH64_INSN_FUNCS(signed_ldr_reg, 0X3FE0FC00, 0x38A0E800) __AARCH64_INSN_FUNCS(ldr_imm, 0x3FC00000, 0x39400000) __AARCH64_INSN_FUNCS(ldr_lit, 0xBF000000, 0x18000000) __AARCH64_INSN_FUNCS(ldrsw_lit, 0xFF000000, 0x98000000) diff --git a/arch/arm64/lib/insn.c b/arch/arm64/lib/insn.c index 924934cb85ee..a635ab83fee3 100644 --- a/arch/arm64/lib/insn.c +++ b/arch/arm64/lib/insn.c @@ -385,6 +385,9 @@ u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg, case AARCH64_INSN_LDST_LOAD_REG_OFFSET: insn = aarch64_insn_get_ldr_reg_value(); break; + case AARCH64_INSN_LDST_SIGNED_LOAD_REG_OFFSET: + insn = aarch64_insn_get_signed_ldr_reg_value(); + break; case AARCH64_INSN_LDST_STORE_REG_OFFSET: insn = aarch64_insn_get_str_reg_value(); break; @@ -430,6 +433,9 @@ u32 aarch64_insn_gen_load_store_imm(enum aarch64_insn_register reg, case AARCH64_INSN_LDST_LOAD_IMM_OFFSET: insn = aarch64_insn_get_ldr_imm_value(); break; + case AARCH64_INSN_LDST_SIGNED_LOAD_IMM_OFFSET: + insn = aarch64_insn_get_signed_load_imm_value(); + break; case AARCH64_INSN_LDST_STORE_IMM_OFFSET: insn = aarch64_insn_get_str_imm_value(); break; diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h index c2edadb8ec6a..23b1b34db088 100644 --- a/arch/arm64/net/bpf_jit.h +++ b/arch/arm64/net/bpf_jit.h @@ -59,10 +59,13 @@ AARCH64_INSN_LDST_##type##_REG_OFFSET) #define A64_STRB(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 8, STORE) #define A64_LDRB(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 8, LOAD) +#define A64_LDRSB(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 8, SIGNED_LOAD) #define A64_STRH(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 16, STORE) #define A64_LDRH(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 16, LOAD) +#define A64_LDRSH(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 16, SIGNED_LOAD) #define A64_STR32(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 32, STORE) #define A64_LDR32(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 32, LOAD) +#define A64_LDRSW(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 32, SIGNED_LOAD) #define A64_STR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, STORE) #define A64_LDR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, LOAD) @@ -73,10 +76,13 @@ AARCH64_INSN_LDST_##type##_IMM_OFFSET) #define A64_STRBI(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 8, STORE) #define A64_LDRBI(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 8, LOAD) +#define A64_LDRSBI(Xt, Xn, imm) A64_LS_IMM(Xt, Xn, imm, 8, SIGNED_LOAD) #define A64_STRHI(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 16, STORE) #define A64_LDRHI(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 16, LOAD) +#define A64_LDRSHI(Xt, Xn, imm) A64_LS_IMM(Xt, Xn, imm, 16, SIGNED_LOAD) #define A64_STR32I(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 32, STORE) #define A64_LDR32I(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 32, LOAD) +#define A64_LDRSWI(Xt, Xn, imm) A64_LS_IMM(Xt, Xn, imm, 32, SIGNED_LOAD) #define A64_STR64I(Xt, Xn, imm) A64_LS_IMM(Xt, Xn, imm, 64, STORE) #define A64_LDR64I(Xt, Xn, imm) A64_LS_IMM(Xt, Xn, imm, 64, LOAD) @@ -186,6 +192,11 @@ #define A64_UXTH(sf, Rd, Rn) A64_UBFM(sf, Rd, Rn, 0, 15) #define A64_UXTW(sf, Rd, Rn) A64_UBFM(sf, Rd, Rn, 0, 31) +/* Sign extend */ +#define A64_SXTB(sf, Rd, Rn) A64_SBFM(sf, Rd, Rn, 0, 7) +#define A64_SXTH(sf, Rd, Rn) A64_SBFM(sf, Rd, Rn, 0, 15) +#define A64_SXTW(sf, Rd, Rn) A64_SBFM(sf, Rd, Rn, 0, 31) + /* Move wide (immediate) */ #define A64_MOVEW(sf, Rd, imm16, shift, type) \ aarch64_insn_gen_movewide(Rd, imm16, shift, \ @@ -223,6 +234,7 @@ #define A64_DATA2(sf, Rd, Rn, Rm, type) aarch64_insn_gen_data2(Rd, Rn, Rm, \ A64_VARIANT(sf), AARCH64_INSN_DATA2_##type) #define A64_UDIV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, UDIV) +#define A64_SDIV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, SDIV) #define A64_LSLV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, LSLV) #define A64_LSRV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, LSRV) #define A64_ASRV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, ASRV) diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index ec2174838f2a..150d1c6543f7 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -715,7 +715,8 @@ static int add_exception_handler(const struct bpf_insn *insn, /* First pass */ return 0; - if (BPF_MODE(insn->code) != BPF_PROBE_MEM) + if (BPF_MODE(insn->code) != BPF_PROBE_MEM && + BPF_MODE(insn->code) != BPF_PROBE_MEMSX) return 0; if (!ctx->prog->aux->extable || @@ -779,12 +780,26 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, u8 dst_adj; int off_adj; int ret; + bool sign_extend; switch (code) { /* dst = src */ case BPF_ALU | BPF_MOV | BPF_X: case BPF_ALU64 | BPF_MOV | BPF_X: - emit(A64_MOV(is64, dst, src), ctx); + switch (insn->off) { + case 0: + emit(A64_MOV(is64, dst, src), ctx); + break; + case 8: + emit(A64_SXTB(is64, dst, src), ctx); + break; + case 16: + emit(A64_SXTH(is64, dst, src), ctx); + break; + case 32: + emit(A64_SXTW(is64, dst, src), ctx); + break; + } break; /* dst = dst OP src */ case BPF_ALU | BPF_ADD | BPF_X: @@ -813,11 +828,17 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, break; case BPF_ALU | BPF_DIV | BPF_X: case BPF_ALU64 | BPF_DIV | BPF_X: - emit(A64_UDIV(is64, dst, dst, src), ctx); + if (!off) + emit(A64_UDIV(is64, dst, dst, src), ctx); + else + emit(A64_SDIV(is64, dst, dst, src), ctx); break; case BPF_ALU | BPF_MOD | BPF_X: case BPF_ALU64 | BPF_MOD | BPF_X: - emit(A64_UDIV(is64, tmp, dst, src), ctx); + if (!off) + emit(A64_UDIV(is64, tmp, dst, src), ctx); + else + emit(A64_SDIV(is64, tmp, dst, src), ctx); emit(A64_MSUB(is64, dst, dst, tmp, src), ctx); break; case BPF_ALU | BPF_LSH | BPF_X: @@ -840,11 +861,12 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, /* dst = BSWAP##imm(dst) */ case BPF_ALU | BPF_END | BPF_FROM_LE: case BPF_ALU | BPF_END | BPF_FROM_BE: + case BPF_ALU64 | BPF_END | BPF_FROM_LE: #ifdef CONFIG_CPU_BIG_ENDIAN - if (BPF_SRC(code) == BPF_FROM_BE) + if (BPF_CLASS(code) == BPF_ALU && BPF_SRC(code) == BPF_FROM_BE) goto emit_bswap_uxt; #else /* !CONFIG_CPU_BIG_ENDIAN */ - if (BPF_SRC(code) == BPF_FROM_LE) + if (BPF_CLASS(code) == BPF_ALU && BPF_SRC(code) == BPF_FROM_LE) goto emit_bswap_uxt; #endif switch (imm) { @@ -943,12 +965,18 @@ emit_bswap_uxt: case BPF_ALU | BPF_DIV | BPF_K: case BPF_ALU64 | BPF_DIV | BPF_K: emit_a64_mov_i(is64, tmp, imm, ctx); - emit(A64_UDIV(is64, dst, dst, tmp), ctx); + if (!off) + emit(A64_UDIV(is64, dst, dst, tmp), ctx); + else + emit(A64_SDIV(is64, dst, dst, tmp), ctx); break; case BPF_ALU | BPF_MOD | BPF_K: case BPF_ALU64 | BPF_MOD | BPF_K: emit_a64_mov_i(is64, tmp2, imm, ctx); - emit(A64_UDIV(is64, tmp, dst, tmp2), ctx); + if (!off) + emit(A64_UDIV(is64, tmp, dst, tmp2), ctx); + else + emit(A64_SDIV(is64, tmp, dst, tmp2), ctx); emit(A64_MSUB(is64, dst, dst, tmp, tmp2), ctx); break; case BPF_ALU | BPF_LSH | BPF_K: @@ -966,7 +994,11 @@ emit_bswap_uxt: /* JUMP off */ case BPF_JMP | BPF_JA: - jmp_offset = bpf2a64_offset(i, off, ctx); + case BPF_JMP32 | BPF_JA: + if (BPF_CLASS(code) == BPF_JMP) + jmp_offset = bpf2a64_offset(i, off, ctx); + else + jmp_offset = bpf2a64_offset(i, imm, ctx); check_imm26(jmp_offset); emit(A64_B(jmp_offset), ctx); break; @@ -1122,7 +1154,7 @@ emit_cond_jmp: return 1; } - /* LDX: dst = *(size *)(src + off) */ + /* LDX: dst = (u64)*(unsigned size *)(src + off) */ case BPF_LDX | BPF_MEM | BPF_W: case BPF_LDX | BPF_MEM | BPF_H: case BPF_LDX | BPF_MEM | BPF_B: @@ -1131,6 +1163,13 @@ emit_cond_jmp: case BPF_LDX | BPF_PROBE_MEM | BPF_W: case BPF_LDX | BPF_PROBE_MEM | BPF_H: case BPF_LDX | BPF_PROBE_MEM | BPF_B: + /* LDXS: dst_reg = (s64)*(signed size *)(src_reg + off) */ + case BPF_LDX | BPF_MEMSX | BPF_B: + case BPF_LDX | BPF_MEMSX | BPF_H: + case BPF_LDX | BPF_MEMSX | BPF_W: + case BPF_LDX | BPF_PROBE_MEMSX | BPF_B: + case BPF_LDX | BPF_PROBE_MEMSX | BPF_H: + case BPF_LDX | BPF_PROBE_MEMSX | BPF_W: if (ctx->fpb_offset > 0 && src == fp) { src_adj = fpb; off_adj = off + ctx->fpb_offset; @@ -1138,29 +1177,49 @@ emit_cond_jmp: src_adj = src; off_adj = off; } + sign_extend = (BPF_MODE(insn->code) == BPF_MEMSX || + BPF_MODE(insn->code) == BPF_PROBE_MEMSX); switch (BPF_SIZE(code)) { case BPF_W: if (is_lsi_offset(off_adj, 2)) { - emit(A64_LDR32I(dst, src_adj, off_adj), ctx); + if (sign_extend) + emit(A64_LDRSWI(dst, src_adj, off_adj), ctx); + else + emit(A64_LDR32I(dst, src_adj, off_adj), ctx); } else { emit_a64_mov_i(1, tmp, off, ctx); - emit(A64_LDR32(dst, src, tmp), ctx); + if (sign_extend) + emit(A64_LDRSW(dst, src_adj, off_adj), ctx); + else + emit(A64_LDR32(dst, src, tmp), ctx); } break; case BPF_H: if (is_lsi_offset(off_adj, 1)) { - emit(A64_LDRHI(dst, src_adj, off_adj), ctx); + if (sign_extend) + emit(A64_LDRSHI(dst, src_adj, off_adj), ctx); + else + emit(A64_LDRHI(dst, src_adj, off_adj), ctx); } else { emit_a64_mov_i(1, tmp, off, ctx); - emit(A64_LDRH(dst, src, tmp), ctx); + if (sign_extend) + emit(A64_LDRSH(dst, src, tmp), ctx); + else + emit(A64_LDRH(dst, src, tmp), ctx); } break; case BPF_B: if (is_lsi_offset(off_adj, 0)) { - emit(A64_LDRBI(dst, src_adj, off_adj), ctx); + if (sign_extend) + emit(A64_LDRSBI(dst, src_adj, off_adj), ctx); + else + emit(A64_LDRBI(dst, src_adj, off_adj), ctx); } else { emit_a64_mov_i(1, tmp, off, ctx); - emit(A64_LDRB(dst, src, tmp), ctx); + if (sign_extend) + emit(A64_LDRSB(dst, src, tmp), ctx); + else + emit(A64_LDRB(dst, src, tmp), ctx); } break; case BPF_DW: diff --git a/arch/riscv/net/bpf_jit.h b/arch/riscv/net/bpf_jit.h index 2717f5490428..d21c6c92a683 100644 --- a/arch/riscv/net/bpf_jit.h +++ b/arch/riscv/net/bpf_jit.h @@ -431,11 +431,21 @@ static inline u32 rv_mulhu(u8 rd, u8 rs1, u8 rs2) return rv_r_insn(1, rs2, rs1, 3, rd, 0x33); } +static inline u32 rv_div(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(1, rs2, rs1, 4, rd, 0x33); +} + static inline u32 rv_divu(u8 rd, u8 rs1, u8 rs2) { return rv_r_insn(1, rs2, rs1, 5, rd, 0x33); } +static inline u32 rv_rem(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(1, rs2, rs1, 6, rd, 0x33); +} + static inline u32 rv_remu(u8 rd, u8 rs1, u8 rs2) { return rv_r_insn(1, rs2, rs1, 7, rd, 0x33); @@ -501,6 +511,16 @@ static inline u32 rv_ble(u8 rs1, u8 rs2, u16 imm12_1) return rv_bge(rs2, rs1, imm12_1); } +static inline u32 rv_lb(u8 rd, u16 imm11_0, u8 rs1) +{ + return rv_i_insn(imm11_0, rs1, 0, rd, 0x03); +} + +static inline u32 rv_lh(u8 rd, u16 imm11_0, u8 rs1) +{ + return rv_i_insn(imm11_0, rs1, 1, rd, 0x03); +} + static inline u32 rv_lw(u8 rd, u16 imm11_0, u8 rs1) { return rv_i_insn(imm11_0, rs1, 2, rd, 0x03); @@ -766,11 +786,21 @@ static inline u32 rv_mulw(u8 rd, u8 rs1, u8 rs2) return rv_r_insn(1, rs2, rs1, 0, rd, 0x3b); } +static inline u32 rv_divw(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(1, rs2, rs1, 4, rd, 0x3b); +} + static inline u32 rv_divuw(u8 rd, u8 rs1, u8 rs2) { return rv_r_insn(1, rs2, rs1, 5, rd, 0x3b); } +static inline u32 rv_remw(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(1, rs2, rs1, 6, rd, 0x3b); +} + static inline u32 rv_remuw(u8 rd, u8 rs1, u8 rs2) { return rv_r_insn(1, rs2, rs1, 7, rd, 0x3b); diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c index 0ca4f5c0097c..8423f4ddf8f5 100644 --- a/arch/riscv/net/bpf_jit_comp64.c +++ b/arch/riscv/net/bpf_jit_comp64.c @@ -580,7 +580,8 @@ static int add_exception_handler(const struct bpf_insn *insn, unsigned long pc; off_t offset; - if (!ctx->insns || !ctx->prog->aux->extable || BPF_MODE(insn->code) != BPF_PROBE_MEM) + if (!ctx->insns || !ctx->prog->aux->extable || + (BPF_MODE(insn->code) != BPF_PROBE_MEM && BPF_MODE(insn->code) != BPF_PROBE_MEMSX)) return 0; if (WARN_ON_ONCE(ctx->nexentries >= ctx->prog->aux->num_exentries)) @@ -1046,7 +1047,19 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, emit_zext_32(rd, ctx); break; } - emit_mv(rd, rs, ctx); + switch (insn->off) { + case 0: + emit_mv(rd, rs, ctx); + break; + case 8: + case 16: + emit_slli(RV_REG_T1, rs, 64 - insn->off, ctx); + emit_srai(rd, RV_REG_T1, 64 - insn->off, ctx); + break; + case 32: + emit_addiw(rd, rs, 0, ctx); + break; + } if (!is64 && !aux->verifier_zext) emit_zext_32(rd, ctx); break; @@ -1094,13 +1107,19 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, break; case BPF_ALU | BPF_DIV | BPF_X: case BPF_ALU64 | BPF_DIV | BPF_X: - emit(is64 ? rv_divu(rd, rd, rs) : rv_divuw(rd, rd, rs), ctx); + if (off) + emit(is64 ? rv_div(rd, rd, rs) : rv_divw(rd, rd, rs), ctx); + else + emit(is64 ? rv_divu(rd, rd, rs) : rv_divuw(rd, rd, rs), ctx); if (!is64 && !aux->verifier_zext) emit_zext_32(rd, ctx); break; case BPF_ALU | BPF_MOD | BPF_X: case BPF_ALU64 | BPF_MOD | BPF_X: - emit(is64 ? rv_remu(rd, rd, rs) : rv_remuw(rd, rd, rs), ctx); + if (off) + emit(is64 ? rv_rem(rd, rd, rs) : rv_remw(rd, rd, rs), ctx); + else + emit(is64 ? rv_remu(rd, rd, rs) : rv_remuw(rd, rd, rs), ctx); if (!is64 && !aux->verifier_zext) emit_zext_32(rd, ctx); break; @@ -1149,6 +1168,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, break; case BPF_ALU | BPF_END | BPF_FROM_BE: + case BPF_ALU64 | BPF_END | BPF_FROM_LE: emit_li(RV_REG_T2, 0, ctx); emit_andi(RV_REG_T1, rd, 0xff, ctx); @@ -1271,16 +1291,24 @@ out_be: case BPF_ALU | BPF_DIV | BPF_K: case BPF_ALU64 | BPF_DIV | BPF_K: emit_imm(RV_REG_T1, imm, ctx); - emit(is64 ? rv_divu(rd, rd, RV_REG_T1) : - rv_divuw(rd, rd, RV_REG_T1), ctx); + if (off) + emit(is64 ? rv_div(rd, rd, RV_REG_T1) : + rv_divw(rd, rd, RV_REG_T1), ctx); + else + emit(is64 ? rv_divu(rd, rd, RV_REG_T1) : + rv_divuw(rd, rd, RV_REG_T1), ctx); if (!is64 && !aux->verifier_zext) emit_zext_32(rd, ctx); break; case BPF_ALU | BPF_MOD | BPF_K: case BPF_ALU64 | BPF_MOD | BPF_K: emit_imm(RV_REG_T1, imm, ctx); - emit(is64 ? rv_remu(rd, rd, RV_REG_T1) : - rv_remuw(rd, rd, RV_REG_T1), ctx); + if (off) + emit(is64 ? rv_rem(rd, rd, RV_REG_T1) : + rv_remw(rd, rd, RV_REG_T1), ctx); + else + emit(is64 ? rv_remu(rd, rd, RV_REG_T1) : + rv_remuw(rd, rd, RV_REG_T1), ctx); if (!is64 && !aux->verifier_zext) emit_zext_32(rd, ctx); break; @@ -1314,7 +1342,11 @@ out_be: /* JUMP off */ case BPF_JMP | BPF_JA: - rvoff = rv_offset(i, off, ctx); + case BPF_JMP32 | BPF_JA: + if (BPF_CLASS(code) == BPF_JMP) + rvoff = rv_offset(i, off, ctx); + else + rvoff = rv_offset(i, imm, ctx); ret = emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx); if (ret) return ret; @@ -1486,7 +1518,7 @@ out_be: return 1; } - /* LDX: dst = *(size *)(src + off) */ + /* LDX: dst = *(unsigned size *)(src + off) */ case BPF_LDX | BPF_MEM | BPF_B: case BPF_LDX | BPF_MEM | BPF_H: case BPF_LDX | BPF_MEM | BPF_W: @@ -1495,14 +1527,28 @@ out_be: case BPF_LDX | BPF_PROBE_MEM | BPF_H: case BPF_LDX | BPF_PROBE_MEM | BPF_W: case BPF_LDX | BPF_PROBE_MEM | BPF_DW: + /* LDSX: dst = *(signed size *)(src + off) */ + case BPF_LDX | BPF_MEMSX | BPF_B: + case BPF_LDX | BPF_MEMSX | BPF_H: + case BPF_LDX | BPF_MEMSX | BPF_W: + case BPF_LDX | BPF_PROBE_MEMSX | BPF_B: + case BPF_LDX | BPF_PROBE_MEMSX | BPF_H: + case BPF_LDX | BPF_PROBE_MEMSX | BPF_W: { int insn_len, insns_start; + bool sign_ext; + + sign_ext = BPF_MODE(insn->code) == BPF_MEMSX || + BPF_MODE(insn->code) == BPF_PROBE_MEMSX; switch (BPF_SIZE(code)) { case BPF_B: if (is_12b_int(off)) { insns_start = ctx->ninsns; - emit(rv_lbu(rd, off, rs), ctx); + if (sign_ext) + emit(rv_lb(rd, off, rs), ctx); + else + emit(rv_lbu(rd, off, rs), ctx); insn_len = ctx->ninsns - insns_start; break; } @@ -1510,15 +1556,19 @@ out_be: emit_imm(RV_REG_T1, off, ctx); emit_add(RV_REG_T1, RV_REG_T1, rs, ctx); insns_start = ctx->ninsns; - emit(rv_lbu(rd, 0, RV_REG_T1), ctx); + if (sign_ext) + emit(rv_lb(rd, 0, RV_REG_T1), ctx); + else + emit(rv_lbu(rd, 0, RV_REG_T1), ctx); insn_len = ctx->ninsns - insns_start; - if (insn_is_zext(&insn[1])) - return 1; break; case BPF_H: if (is_12b_int(off)) { insns_start = ctx->ninsns; - emit(rv_lhu(rd, off, rs), ctx); + if (sign_ext) + emit(rv_lh(rd, off, rs), ctx); + else + emit(rv_lhu(rd, off, rs), ctx); insn_len = ctx->ninsns - insns_start; break; } @@ -1526,15 +1576,19 @@ out_be: emit_imm(RV_REG_T1, off, ctx); emit_add(RV_REG_T1, RV_REG_T1, rs, ctx); insns_start = ctx->ninsns; - emit(rv_lhu(rd, 0, RV_REG_T1), ctx); + if (sign_ext) + emit(rv_lh(rd, 0, RV_REG_T1), ctx); + else + emit(rv_lhu(rd, 0, RV_REG_T1), ctx); insn_len = ctx->ninsns - insns_start; - if (insn_is_zext(&insn[1])) - return 1; break; case BPF_W: if (is_12b_int(off)) { insns_start = ctx->ninsns; - emit(rv_lwu(rd, off, rs), ctx); + if (sign_ext) + emit(rv_lw(rd, off, rs), ctx); + else + emit(rv_lwu(rd, off, rs), ctx); insn_len = ctx->ninsns - insns_start; break; } @@ -1542,10 +1596,11 @@ out_be: emit_imm(RV_REG_T1, off, ctx); emit_add(RV_REG_T1, RV_REG_T1, rs, ctx); insns_start = ctx->ninsns; - emit(rv_lwu(rd, 0, RV_REG_T1), ctx); + if (sign_ext) + emit(rv_lw(rd, 0, RV_REG_T1), ctx); + else + emit(rv_lwu(rd, 0, RV_REG_T1), ctx); insn_len = ctx->ninsns - insns_start; - if (insn_is_zext(&insn[1])) - return 1; break; case BPF_DW: if (is_12b_int(off)) { @@ -1566,6 +1621,9 @@ out_be: ret = add_exception_handler(insn, ctx, rd, insn_len); if (ret) return ret; + + if (BPF_SIZE(code) != BPF_DW && insn_is_zext(&insn[1])) + return 1; break; } /* speculation barrier */ diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c index 633e8d9bf58f..2462796a512a 100644 --- a/drivers/bluetooth/btintel.c +++ b/drivers/bluetooth/btintel.c @@ -2401,7 +2401,7 @@ static void btintel_set_ppag(struct hci_dev *hdev, struct intel_version_tlv *ver { struct btintel_ppag ppag; struct sk_buff *skb; - struct btintel_loc_aware_reg ppag_cmd; + struct hci_ppag_enable_cmd ppag_cmd; acpi_handle handle; /* PPAG is not supported if CRF is HrP2, Jfp2, JfP1 */ @@ -2409,6 +2409,8 @@ static void btintel_set_ppag(struct hci_dev *hdev, struct intel_version_tlv *ver case 0x504: /* Hrp2 */ case 0x202: /* Jfp2 */ case 0x201: /* Jfp1 */ + bt_dev_dbg(hdev, "PPAG not supported for Intel CNVr (0x%3x)", + ver->cnvr_top & 0xFFF); return; } @@ -2434,24 +2436,29 @@ static void btintel_set_ppag(struct hci_dev *hdev, struct intel_version_tlv *ver } if (ppag.domain != 0x12) { - bt_dev_warn(hdev, "PPAG-BT: domain is not bluetooth"); + bt_dev_dbg(hdev, "PPAG-BT: Bluetooth domain is disabled in ACPI firmware"); return; } - /* PPAG mode, BIT0 = 0 Disabled, BIT0 = 1 Enabled */ - if (!(ppag.mode & BIT(0))) { - bt_dev_dbg(hdev, "PPAG-BT: disabled"); + /* PPAG mode + * BIT 0 : 0 Disabled in EU + * 1 Enabled in EU + * BIT 1 : 0 Disabled in China + * 1 Enabled in China + */ + if ((ppag.mode & 0x01) != BIT(0) && (ppag.mode & 0x02) != BIT(1)) { + bt_dev_dbg(hdev, "PPAG-BT: EU, China mode are disabled in CB/BIOS"); return; } - ppag_cmd.mcc = cpu_to_le32(0); - ppag_cmd.sel = cpu_to_le32(0); /* 0 - Enable , 1 - Disable, 2 - Testing mode */ - ppag_cmd.delta = cpu_to_le32(0); - skb = __hci_cmd_sync(hdev, 0xfe19, sizeof(ppag_cmd), &ppag_cmd, HCI_CMD_TIMEOUT); + ppag_cmd.ppag_enable_flags = cpu_to_le32(ppag.mode); + + skb = __hci_cmd_sync(hdev, INTEL_OP_PPAG_CMD, sizeof(ppag_cmd), &ppag_cmd, HCI_CMD_TIMEOUT); if (IS_ERR(skb)) { bt_dev_warn(hdev, "Failed to send PPAG Enable (%ld)", PTR_ERR(skb)); return; } + bt_dev_info(hdev, "PPAG-BT: Enabled (Mode %d)", ppag.mode); kfree_skb(skb); } @@ -2780,6 +2787,9 @@ static int btintel_setup_combined(struct hci_dev *hdev) set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks); + /* These variants don't seem to support LE Coded PHY */ + set_bit(HCI_QUIRK_BROKEN_LE_CODED, &hdev->quirks); + /* Setup MSFT Extension support */ btintel_set_msft_opcode(hdev, ver.hw_variant); @@ -2851,6 +2861,9 @@ static int btintel_setup_combined(struct hci_dev *hdev) */ set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks); + /* These variants don't seem to support LE Coded PHY */ + set_bit(HCI_QUIRK_BROKEN_LE_CODED, &hdev->quirks); + /* Set Valid LE States quirk */ set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks); diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h index 2ed646609dee..3a2d5b4219dd 100644 --- a/drivers/bluetooth/btintel.h +++ b/drivers/bluetooth/btintel.h @@ -137,10 +137,9 @@ struct intel_offload_use_cases { __u8 preset[8]; } __packed; -struct btintel_loc_aware_reg { - __le32 mcc; - __le32 sel; - __le32 delta; +#define INTEL_OP_PPAG_CMD 0xFE0B +struct hci_ppag_enable_cmd { + __le32 ppag_enable_flags; } __packed; #define INTEL_TLV_TYPE_ID 0x01 diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c index ee6f6c872a34..b7e66b7ac570 100644 --- a/drivers/bluetooth/btnxpuart.c +++ b/drivers/bluetooth/btnxpuart.c @@ -28,12 +28,18 @@ #define BTNXPUART_FW_DOWNLOADING 2 #define BTNXPUART_CHECK_BOOT_SIGNATURE 3 #define BTNXPUART_SERDEV_OPEN 4 +#define BTNXPUART_IR_IN_PROGRESS 5 + +/* NXP HW err codes */ +#define BTNXPUART_IR_HW_ERR 0xb0 #define FIRMWARE_W8987 "nxp/uartuart8987_bt.bin" #define FIRMWARE_W8997 "nxp/uartuart8997_bt_v4.bin" #define FIRMWARE_W9098 "nxp/uartuart9098_bt_v1.bin" #define FIRMWARE_IW416 "nxp/uartiw416_bt_v0.bin" #define FIRMWARE_IW612 "nxp/uartspi_n61x_v1.bin.se" +#define FIRMWARE_IW624 "nxp/uartiw624_bt.bin" +#define FIRMWARE_SECURE_IW624 "nxp/uartiw624_bt.bin.se" #define FIRMWARE_AW693 "nxp/uartaw693_bt.bin" #define FIRMWARE_SECURE_AW693 "nxp/uartaw693_bt.bin.se" #define FIRMWARE_HELPER "nxp/helper_uart_3000000.bin" @@ -41,6 +47,8 @@ #define CHIP_ID_W9098 0x5c03 #define CHIP_ID_IW416 0x7201 #define CHIP_ID_IW612 0x7601 +#define CHIP_ID_IW624a 0x8000 +#define CHIP_ID_IW624c 0x8001 #define CHIP_ID_AW693 0x8200 #define FW_SECURE_MASK 0xc0 @@ -152,6 +160,7 @@ struct btnxpuart_dev { u32 fw_v1_sent_bytes; u32 fw_v3_offset_correction; u32 fw_v1_expected_len; + u32 boot_reg_offset; wait_queue_head_t fw_dnld_done_wait_q; wait_queue_head_t check_boot_sign_wait_q; @@ -375,39 +384,13 @@ static void ps_timeout_func(struct timer_list *t) } } -static int ps_init_work(struct hci_dev *hdev) +static void ps_setup(struct hci_dev *hdev) { struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); struct ps_data *psdata = &nxpdev->psdata; - psdata->h2c_ps_interval = PS_DEFAULT_TIMEOUT_PERIOD_MS; - psdata->ps_state = PS_STATE_AWAKE; - psdata->target_ps_mode = DEFAULT_PS_MODE; psdata->hdev = hdev; - psdata->c2h_wakeupmode = BT_HOST_WAKEUP_METHOD_NONE; - psdata->c2h_wakeup_gpio = 0xff; - - switch (DEFAULT_H2C_WAKEUP_MODE) { - case WAKEUP_METHOD_DTR: - psdata->h2c_wakeupmode = WAKEUP_METHOD_DTR; - break; - case WAKEUP_METHOD_BREAK: - default: - psdata->h2c_wakeupmode = WAKEUP_METHOD_BREAK; - break; - } - psdata->cur_psmode = PS_MODE_DISABLE; - psdata->cur_h2c_wakeupmode = WAKEUP_METHOD_INVALID; INIT_WORK(&psdata->work, ps_work_func); - - return 0; -} - -static void ps_init_timer(struct hci_dev *hdev) -{ - struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); - struct ps_data *psdata = &nxpdev->psdata; - timer_setup(&psdata->ps_timer, ps_timeout_func, 0); } @@ -510,19 +493,31 @@ static void ps_init(struct hci_dev *hdev) serdev_device_set_tiocm(nxpdev->serdev, TIOCM_RTS, 0); usleep_range(5000, 10000); - switch (psdata->h2c_wakeupmode) { + psdata->ps_state = PS_STATE_AWAKE; + psdata->c2h_wakeupmode = BT_HOST_WAKEUP_METHOD_NONE; + psdata->c2h_wakeup_gpio = 0xff; + + psdata->cur_h2c_wakeupmode = WAKEUP_METHOD_INVALID; + psdata->h2c_ps_interval = PS_DEFAULT_TIMEOUT_PERIOD_MS; + switch (DEFAULT_H2C_WAKEUP_MODE) { case WAKEUP_METHOD_DTR: + psdata->h2c_wakeupmode = WAKEUP_METHOD_DTR; serdev_device_set_tiocm(nxpdev->serdev, 0, TIOCM_DTR); serdev_device_set_tiocm(nxpdev->serdev, TIOCM_DTR, 0); break; case WAKEUP_METHOD_BREAK: default: + psdata->h2c_wakeupmode = WAKEUP_METHOD_BREAK; serdev_device_break_ctl(nxpdev->serdev, -1); usleep_range(5000, 10000); serdev_device_break_ctl(nxpdev->serdev, 0); usleep_range(5000, 10000); break; } + + psdata->cur_psmode = PS_MODE_DISABLE; + psdata->target_ps_mode = DEFAULT_PS_MODE; + if (psdata->cur_h2c_wakeupmode != psdata->h2c_wakeupmode) hci_cmd_sync_queue(hdev, send_wakeup_method_cmd, NULL, NULL); if (psdata->cur_psmode != psdata->target_ps_mode) @@ -538,6 +533,7 @@ static int nxp_download_firmware(struct hci_dev *hdev) nxpdev->fw_dnld_v1_offset = 0; nxpdev->fw_v1_sent_bytes = 0; nxpdev->fw_v1_expected_len = HDR_LEN; + nxpdev->boot_reg_offset = 0; nxpdev->fw_v3_offset_correction = 0; nxpdev->baudrate_changed = false; nxpdev->timeout_changed = false; @@ -547,7 +543,7 @@ static int nxp_download_firmware(struct hci_dev *hdev) serdev_device_set_flow_control(nxpdev->serdev, false); nxpdev->current_baudrate = HCI_NXP_PRI_BAUDRATE; - /* Wait till FW is downloaded and CTS becomes low */ + /* Wait till FW is downloaded */ err = wait_event_interruptible_timeout(nxpdev->fw_dnld_done_wait_q, !test_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state), @@ -558,16 +554,11 @@ static int nxp_download_firmware(struct hci_dev *hdev) } serdev_device_set_flow_control(nxpdev->serdev, true); - err = serdev_device_wait_for_cts(nxpdev->serdev, 1, 60000); - if (err < 0) { - bt_dev_err(hdev, "CTS is still high. FW Download failed."); - return err; - } release_firmware(nxpdev->fw); memset(nxpdev->fw_name, 0, sizeof(nxpdev->fw_name)); /* Allow the downloaded FW to initialize */ - usleep_range(800 * USEC_PER_MSEC, 1 * USEC_PER_SEC); + msleep(1200); return 0; } @@ -591,6 +582,12 @@ static bool nxp_fw_change_baudrate(struct hci_dev *hdev, u16 req_len) struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); struct nxp_bootloader_cmd nxp_cmd5; struct uart_config uart_config; + u32 clkdivaddr = CLKDIVADDR - nxpdev->boot_reg_offset; + u32 uartdivaddr = UARTDIVADDR - nxpdev->boot_reg_offset; + u32 uartmcraddr = UARTMCRADDR - nxpdev->boot_reg_offset; + u32 uartreinitaddr = UARTREINITADDR - nxpdev->boot_reg_offset; + u32 uarticraddr = UARTICRADDR - nxpdev->boot_reg_offset; + u32 uartfcraddr = UARTFCRADDR - nxpdev->boot_reg_offset; if (req_len == sizeof(nxp_cmd5)) { nxp_cmd5.header = __cpu_to_le32(5); @@ -603,17 +600,17 @@ static bool nxp_fw_change_baudrate(struct hci_dev *hdev, u16 req_len) serdev_device_write_buf(nxpdev->serdev, (u8 *)&nxp_cmd5, sizeof(nxp_cmd5)); nxpdev->fw_v3_offset_correction += req_len; } else if (req_len == sizeof(uart_config)) { - uart_config.clkdiv.address = __cpu_to_le32(CLKDIVADDR); + uart_config.clkdiv.address = __cpu_to_le32(clkdivaddr); uart_config.clkdiv.value = __cpu_to_le32(0x00c00000); - uart_config.uartdiv.address = __cpu_to_le32(UARTDIVADDR); + uart_config.uartdiv.address = __cpu_to_le32(uartdivaddr); uart_config.uartdiv.value = __cpu_to_le32(1); - uart_config.mcr.address = __cpu_to_le32(UARTMCRADDR); + uart_config.mcr.address = __cpu_to_le32(uartmcraddr); uart_config.mcr.value = __cpu_to_le32(MCR); - uart_config.re_init.address = __cpu_to_le32(UARTREINITADDR); + uart_config.re_init.address = __cpu_to_le32(uartreinitaddr); uart_config.re_init.value = __cpu_to_le32(INIT); - uart_config.icr.address = __cpu_to_le32(UARTICRADDR); + uart_config.icr.address = __cpu_to_le32(uarticraddr); uart_config.icr.value = __cpu_to_le32(ICR); - uart_config.fcr.address = __cpu_to_le32(UARTFCRADDR); + uart_config.fcr.address = __cpu_to_le32(uartfcraddr); uart_config.fcr.value = __cpu_to_le32(FCR); /* FW expects swapped CRC bytes */ uart_config.crc = __cpu_to_be32(crc32_be(0UL, (char *)&uart_config, @@ -702,7 +699,7 @@ static int nxp_recv_chip_ver_v1(struct hci_dev *hdev, struct sk_buff *skb) goto free_skb; chip_id = le16_to_cpu(req->chip_id ^ req->chip_id_comp); - if (chip_id == 0xffff) { + if (chip_id == 0xffff && nxpdev->fw_dnld_v1_offset) { nxpdev->fw_dnld_v1_offset = 0; nxpdev->fw_v1_sent_bytes = 0; nxpdev->fw_v1_expected_len = HDR_LEN; @@ -827,6 +824,7 @@ free_skb: static char *nxp_get_fw_name_from_chipid(struct hci_dev *hdev, u16 chipid, u8 loader_ver) { + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); char *fw_name = NULL; switch (chipid) { @@ -839,6 +837,16 @@ static char *nxp_get_fw_name_from_chipid(struct hci_dev *hdev, u16 chipid, case CHIP_ID_IW612: fw_name = FIRMWARE_IW612; break; + case CHIP_ID_IW624a: + case CHIP_ID_IW624c: + nxpdev->boot_reg_offset = 1; + if ((loader_ver & FW_SECURE_MASK) == FW_OPEN) + fw_name = FIRMWARE_IW624; + else if ((loader_ver & FW_SECURE_MASK) != FW_AUTH_ILLEGAL) + fw_name = FIRMWARE_SECURE_IW624; + else + bt_dev_err(hdev, "Illegal loader version %02x", loader_ver); + break; case CHIP_ID_AW693: if ((loader_ver & FW_SECURE_MASK) == FW_OPEN) fw_name = FIRMWARE_AW693; @@ -969,45 +977,13 @@ static int nxp_set_baudrate_cmd(struct hci_dev *hdev, void *data) return 0; } -static int nxp_set_ind_reset(struct hci_dev *hdev, void *data) -{ - struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); - struct sk_buff *skb; - u8 *status; - u8 pcmd = 0; - int err = 0; - - skb = nxp_drv_send_cmd(hdev, HCI_NXP_IND_RESET, 1, &pcmd); - if (IS_ERR(skb)) - return PTR_ERR(skb); - - status = skb_pull_data(skb, 1); - if (!status || *status) - goto free_skb; - - set_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state); - err = nxp_download_firmware(hdev); - if (err < 0) - goto free_skb; - serdev_device_set_baudrate(nxpdev->serdev, nxpdev->fw_init_baudrate); - nxpdev->current_baudrate = nxpdev->fw_init_baudrate; - if (nxpdev->current_baudrate != HCI_NXP_SEC_BAUDRATE) { - nxpdev->new_baudrate = HCI_NXP_SEC_BAUDRATE; - nxp_set_baudrate_cmd(hdev, NULL); - } - hci_cmd_sync_queue(hdev, send_wakeup_method_cmd, NULL, NULL); - hci_cmd_sync_queue(hdev, send_ps_cmd, NULL, NULL); - -free_skb: - kfree_skb(skb); - return err; -} - -/* NXP protocol */ static int nxp_check_boot_sign(struct btnxpuart_dev *nxpdev) { serdev_device_set_baudrate(nxpdev->serdev, HCI_NXP_PRI_BAUDRATE); - serdev_device_set_flow_control(nxpdev->serdev, true); + if (test_bit(BTNXPUART_IR_IN_PROGRESS, &nxpdev->tx_state)) + serdev_device_set_flow_control(nxpdev->serdev, false); + else + serdev_device_set_flow_control(nxpdev->serdev, true); set_bit(BTNXPUART_CHECK_BOOT_SIGNATURE, &nxpdev->tx_state); return wait_event_interruptible_timeout(nxpdev->check_boot_sign_wait_q, @@ -1016,15 +992,29 @@ static int nxp_check_boot_sign(struct btnxpuart_dev *nxpdev) msecs_to_jiffies(1000)); } +static int nxp_set_ind_reset(struct hci_dev *hdev, void *data) +{ + static const u8 ir_hw_err[] = { HCI_EV_HARDWARE_ERROR, + 0x01, BTNXPUART_IR_HW_ERR }; + struct sk_buff *skb; + + skb = bt_skb_alloc(3, GFP_ATOMIC); + if (!skb) + return -ENOMEM; + + hci_skb_pkt_type(skb) = HCI_EVENT_PKT; + skb_put_data(skb, ir_hw_err, 3); + + /* Inject Hardware Error to upper stack */ + return hci_recv_frame(hdev, skb); +} + +/* NXP protocol */ static int nxp_setup(struct hci_dev *hdev) { struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); int err = 0; - set_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state); - init_waitqueue_head(&nxpdev->fw_dnld_done_wait_q); - init_waitqueue_head(&nxpdev->check_boot_sign_wait_q); - if (nxp_check_boot_sign(nxpdev)) { bt_dev_dbg(hdev, "Need FW Download."); err = nxp_download_firmware(hdev); @@ -1035,10 +1025,6 @@ static int nxp_setup(struct hci_dev *hdev) clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state); } - device_property_read_u32(&nxpdev->serdev->dev, "fw-init-baudrate", - &nxpdev->fw_init_baudrate); - if (!nxpdev->fw_init_baudrate) - nxpdev->fw_init_baudrate = FW_INIT_BAUDRATE; serdev_device_set_baudrate(nxpdev->serdev, nxpdev->fw_init_baudrate); nxpdev->current_baudrate = nxpdev->fw_init_baudrate; @@ -1049,6 +1035,46 @@ static int nxp_setup(struct hci_dev *hdev) ps_init(hdev); + if (test_and_clear_bit(BTNXPUART_IR_IN_PROGRESS, &nxpdev->tx_state)) + hci_dev_clear_flag(hdev, HCI_SETUP); + + return 0; +} + +static void nxp_hw_err(struct hci_dev *hdev, u8 code) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + + switch (code) { + case BTNXPUART_IR_HW_ERR: + set_bit(BTNXPUART_IR_IN_PROGRESS, &nxpdev->tx_state); + hci_dev_set_flag(hdev, HCI_SETUP); + break; + default: + break; + } +} + +static int nxp_shutdown(struct hci_dev *hdev) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + struct sk_buff *skb; + u8 *status; + u8 pcmd = 0; + + if (test_bit(BTNXPUART_IR_IN_PROGRESS, &nxpdev->tx_state)) { + skb = nxp_drv_send_cmd(hdev, HCI_NXP_IND_RESET, 1, &pcmd); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + status = skb_pull_data(skb, 1); + if (status) { + serdev_device_set_flow_control(nxpdev->serdev, false); + set_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state); + } + kfree_skb(skb); + } + return 0; } @@ -1256,7 +1282,8 @@ static int btnxpuart_receive_buf(struct serdev_device *serdev, const u8 *data, nxpdev->rx_skb = NULL; return err; } - nxpdev->hdev->stat.byte_rx += count; + if (!is_fw_downloading(nxpdev)) + nxpdev->hdev->stat.byte_rx += count; return count; } @@ -1289,6 +1316,16 @@ static int nxp_serdev_probe(struct serdev_device *serdev) INIT_WORK(&nxpdev->tx_work, btnxpuart_tx_work); skb_queue_head_init(&nxpdev->txq); + init_waitqueue_head(&nxpdev->fw_dnld_done_wait_q); + init_waitqueue_head(&nxpdev->check_boot_sign_wait_q); + + device_property_read_u32(&nxpdev->serdev->dev, "fw-init-baudrate", + &nxpdev->fw_init_baudrate); + if (!nxpdev->fw_init_baudrate) + nxpdev->fw_init_baudrate = FW_INIT_BAUDRATE; + + set_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state); + crc8_populate_msb(crc8_table, POLYNOMIAL8); /* Initialize and register HCI device */ @@ -1309,6 +1346,8 @@ static int nxp_serdev_probe(struct serdev_device *serdev) hdev->flush = btnxpuart_flush; hdev->setup = nxp_setup; hdev->send = nxp_enqueue; + hdev->hw_error = nxp_hw_err; + hdev->shutdown = nxp_shutdown; SET_HCIDEV_DEV(hdev, &serdev->dev); if (hci_register_dev(hdev) < 0) { @@ -1317,8 +1356,7 @@ static int nxp_serdev_probe(struct serdev_device *serdev) return -ENODEV; } - ps_init_work(hdev); - ps_init_timer(hdev); + ps_setup(hdev); return 0; } diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c index 6f2187fab55f..5a35ac4138c6 100644 --- a/drivers/bluetooth/btqca.c +++ b/drivers/bluetooth/btqca.c @@ -604,26 +604,38 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, /* Download rampatch file */ config.type = TLV_TYPE_PATCH; - if (soc_type == QCA_WCN3988) { - snprintf(config.fwname, sizeof(config.fwname), - "qca/apbtfw%02x.tlv", rom_ver); - } else if (qca_is_wcn399x(soc_type)) { + switch (soc_type) { + case QCA_WCN3990: + case QCA_WCN3991: + case QCA_WCN3998: snprintf(config.fwname, sizeof(config.fwname), "qca/crbtfw%02x.tlv", rom_ver); - } else if (soc_type == QCA_QCA6390) { + break; + case QCA_WCN3988: + snprintf(config.fwname, sizeof(config.fwname), + "qca/apbtfw%02x.tlv", rom_ver); + break; + case QCA_QCA6390: snprintf(config.fwname, sizeof(config.fwname), "qca/htbtfw%02x.tlv", rom_ver); - } else if (soc_type == QCA_WCN6750) { + break; + case QCA_WCN6750: /* Choose mbn file by default.If mbn file is not found * then choose tlv file */ config.type = ELF_TYPE_PATCH; snprintf(config.fwname, sizeof(config.fwname), "qca/msbtfw%02x.mbn", rom_ver); - } else if (soc_type == QCA_WCN6855) { + break; + case QCA_WCN6855: snprintf(config.fwname, sizeof(config.fwname), "qca/hpbtfw%02x.tlv", rom_ver); - } else { + break; + case QCA_WCN7850: + snprintf(config.fwname, sizeof(config.fwname), + "qca/hmtbtfw%02x.tlv", rom_ver); + break; + default: snprintf(config.fwname, sizeof(config.fwname), "qca/rampatch_%08x.bin", soc_ver); } @@ -639,33 +651,48 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, /* Download NVM configuration */ config.type = TLV_TYPE_NVM; - if (firmware_name) + if (firmware_name) { snprintf(config.fwname, sizeof(config.fwname), "qca/%s", firmware_name); - else if (soc_type == QCA_WCN3988) - snprintf(config.fwname, sizeof(config.fwname), - "qca/apnv%02x.bin", rom_ver); - else if (qca_is_wcn399x(soc_type)) { - if (le32_to_cpu(ver.soc_id) == QCA_WCN3991_SOC_ID) { + } else { + switch (soc_type) { + case QCA_WCN3990: + case QCA_WCN3991: + case QCA_WCN3998: + if (le32_to_cpu(ver.soc_id) == QCA_WCN3991_SOC_ID) { + snprintf(config.fwname, sizeof(config.fwname), + "qca/crnv%02xu.bin", rom_ver); + } else { + snprintf(config.fwname, sizeof(config.fwname), + "qca/crnv%02x.bin", rom_ver); + } + break; + case QCA_WCN3988: snprintf(config.fwname, sizeof(config.fwname), - "qca/crnv%02xu.bin", rom_ver); - } else { + "qca/apnv%02x.bin", rom_ver); + break; + case QCA_QCA6390: + snprintf(config.fwname, sizeof(config.fwname), + "qca/htnv%02x.bin", rom_ver); + break; + case QCA_WCN6750: + snprintf(config.fwname, sizeof(config.fwname), + "qca/msnv%02x.bin", rom_ver); + break; + case QCA_WCN6855: + snprintf(config.fwname, sizeof(config.fwname), + "qca/hpnv%02x.bin", rom_ver); + break; + case QCA_WCN7850: snprintf(config.fwname, sizeof(config.fwname), - "qca/crnv%02x.bin", rom_ver); + "qca/hmtnv%02x.bin", rom_ver); + break; + + default: + snprintf(config.fwname, sizeof(config.fwname), + "qca/nvm_%08x.bin", soc_ver); } } - else if (soc_type == QCA_QCA6390) - snprintf(config.fwname, sizeof(config.fwname), - "qca/htnv%02x.bin", rom_ver); - else if (soc_type == QCA_WCN6750) - snprintf(config.fwname, sizeof(config.fwname), - "qca/msnv%02x.bin", rom_ver); - else if (soc_type == QCA_WCN6855) - snprintf(config.fwname, sizeof(config.fwname), - "qca/hpnv%02x.bin", rom_ver); - else - snprintf(config.fwname, sizeof(config.fwname), - "qca/nvm_%08x.bin", soc_ver); err = qca_download_firmware(hdev, &config, soc_type, rom_ver); if (err < 0) { @@ -673,16 +700,25 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, return err; } - if (soc_type >= QCA_WCN3991) { + switch (soc_type) { + case QCA_WCN3991: + case QCA_QCA6390: + case QCA_WCN6750: + case QCA_WCN6855: + case QCA_WCN7850: err = qca_disable_soc_logging(hdev); if (err < 0) return err; + break; + default: + break; } /* WCN399x and WCN6750 supports the Microsoft vendor extension with 0xFD70 as the * VsMsftOpCode. */ switch (soc_type) { + case QCA_WCN3988: case QCA_WCN3990: case QCA_WCN3991: case QCA_WCN3998: @@ -704,6 +740,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, case QCA_WCN3991: case QCA_WCN6750: case QCA_WCN6855: + case QCA_WCN7850: /* get fw build info */ err = qca_read_fw_build_info(hdev); if (err < 0) diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h index fc6cf314eb0e..03bff5c0059d 100644 --- a/drivers/bluetooth/btqca.h +++ b/drivers/bluetooth/btqca.h @@ -149,6 +149,7 @@ enum qca_btsoc_type { QCA_QCA6390, QCA_WCN6750, QCA_WCN6855, + QCA_WCN7850, }; #if IS_ENABLED(CONFIG_BT_QCA) @@ -161,27 +162,6 @@ int qca_read_soc_version(struct hci_dev *hdev, struct qca_btsoc_version *ver, enum qca_btsoc_type); int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr); int qca_send_pre_shutdown_cmd(struct hci_dev *hdev); -static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type) -{ - switch (soc_type) { - case QCA_WCN3988: - case QCA_WCN3990: - case QCA_WCN3991: - case QCA_WCN3998: - return true; - default: - return false; - } -} -static inline bool qca_is_wcn6750(enum qca_btsoc_type soc_type) -{ - return soc_type == QCA_WCN6750; -} -static inline bool qca_is_wcn6855(enum qca_btsoc_type soc_type) -{ - return soc_type == QCA_WCN6855; -} - #else static inline int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr) @@ -209,21 +189,6 @@ static inline int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) return -EOPNOTSUPP; } -static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type) -{ - return false; -} - -static inline bool qca_is_wcn6750(enum qca_btsoc_type soc_type) -{ - return false; -} - -static inline bool qca_is_wcn6855(enum qca_btsoc_type soc_type) -{ - return false; -} - static inline int qca_send_pre_shutdown_cmd(struct hci_dev *hdev) { return -EOPNOTSUPP; diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 1bb3b09013b0..82597ab4f747 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -175,7 +175,7 @@ static const struct usb_device_id btusb_table[] = { MODULE_DEVICE_TABLE(usb, btusb_table); -static const struct usb_device_id blacklist_table[] = { +static const struct usb_device_id quirks_table[] = { /* CSR BlueCore devices */ { USB_DEVICE(0x0a12, 0x0001), .driver_info = BTUSB_CSR }, @@ -2162,7 +2162,7 @@ static int btusb_switch_alt_setting(struct hci_dev *hdev, int new_alts) * alternate setting. */ spin_lock_irqsave(&data->rxlock, flags); - kfree_skb(data->sco_skb); + dev_kfree_skb_irq(data->sco_skb); data->sco_skb = NULL; spin_unlock_irqrestore(&data->rxlock, flags); @@ -4194,7 +4194,7 @@ static int btusb_probe(struct usb_interface *intf, if (!id->driver_info) { const struct usb_device_id *match; - match = usb_match_id(intf, blacklist_table); + match = usb_match_id(intf, quirks_table); if (match) id = match; } diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 011822519602..4b57e15f9c7a 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -607,9 +607,18 @@ static int qca_open(struct hci_uart *hu) if (hu->serdev) { qcadev = serdev_device_get_drvdata(hu->serdev); - if (qca_is_wcn399x(qcadev->btsoc_type) || - qca_is_wcn6750(qcadev->btsoc_type)) + switch (qcadev->btsoc_type) { + case QCA_WCN3988: + case QCA_WCN3990: + case QCA_WCN3991: + case QCA_WCN3998: + case QCA_WCN6750: hu->init_speed = qcadev->init_speed; + break; + + default: + break; + } if (qcadev->oper_speed) hu->oper_speed = qcadev->oper_speed; @@ -1341,12 +1350,20 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate) msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS)); /* Give the controller time to process the request */ - if (qca_is_wcn399x(qca_soc_type(hu)) || - qca_is_wcn6750(qca_soc_type(hu)) || - qca_is_wcn6855(qca_soc_type(hu))) + switch (qca_soc_type(hu)) { + case QCA_WCN3988: + case QCA_WCN3990: + case QCA_WCN3991: + case QCA_WCN3998: + case QCA_WCN6750: + case QCA_WCN6855: + case QCA_WCN7850: usleep_range(1000, 10000); - else + break; + + default: msleep(300); + } return 0; } @@ -1419,13 +1436,20 @@ static unsigned int qca_get_speed(struct hci_uart *hu, static int qca_check_speeds(struct hci_uart *hu) { - if (qca_is_wcn399x(qca_soc_type(hu)) || - qca_is_wcn6750(qca_soc_type(hu)) || - qca_is_wcn6855(qca_soc_type(hu))) { + switch (qca_soc_type(hu)) { + case QCA_WCN3988: + case QCA_WCN3990: + case QCA_WCN3991: + case QCA_WCN3998: + case QCA_WCN6750: + case QCA_WCN6855: + case QCA_WCN7850: if (!qca_get_speed(hu, QCA_INIT_SPEED) && !qca_get_speed(hu, QCA_OPER_SPEED)) return -EINVAL; - } else { + break; + + default: if (!qca_get_speed(hu, QCA_INIT_SPEED) || !qca_get_speed(hu, QCA_OPER_SPEED)) return -EINVAL; @@ -1454,14 +1478,29 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type) /* Disable flow control for wcn3990 to deassert RTS while * changing the baudrate of chip and host. */ - if (qca_is_wcn399x(soc_type) || - qca_is_wcn6750(soc_type) || - qca_is_wcn6855(soc_type)) + switch (soc_type) { + case QCA_WCN3988: + case QCA_WCN3990: + case QCA_WCN3991: + case QCA_WCN3998: + case QCA_WCN6750: + case QCA_WCN6855: + case QCA_WCN7850: hci_uart_set_flow_control(hu, true); + break; + + default: + break; + } - if (soc_type == QCA_WCN3990) { + switch (soc_type) { + case QCA_WCN3990: reinit_completion(&qca->drop_ev_comp); set_bit(QCA_DROP_VENDOR_EVENT, &qca->flags); + break; + + default: + break; } qca_baudrate = qca_get_baudrate_value(speed); @@ -1473,12 +1512,23 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type) host_set_baudrate(hu, speed); error: - if (qca_is_wcn399x(soc_type) || - qca_is_wcn6750(soc_type) || - qca_is_wcn6855(soc_type)) + switch (soc_type) { + case QCA_WCN3988: + case QCA_WCN3990: + case QCA_WCN3991: + case QCA_WCN3998: + case QCA_WCN6750: + case QCA_WCN6855: + case QCA_WCN7850: hci_uart_set_flow_control(hu, false); + break; - if (soc_type == QCA_WCN3990) { + default: + break; + } + + switch (soc_type) { + case QCA_WCN3990: /* Wait for the controller to send the vendor event * for the baudrate change command. */ @@ -1490,6 +1540,10 @@ error: } clear_bit(QCA_DROP_VENDOR_EVENT, &qca->flags); + break; + + default: + break; } } @@ -1651,12 +1705,20 @@ static int qca_regulator_init(struct hci_uart *hu) } } - if (qca_is_wcn399x(soc_type)) { + switch (soc_type) { + case QCA_WCN3988: + case QCA_WCN3990: + case QCA_WCN3991: + case QCA_WCN3998: /* Forcefully enable wcn399x to enter in to boot mode. */ host_set_baudrate(hu, 2400); ret = qca_send_power_pulse(hu, false); if (ret) return ret; + break; + + default: + break; } /* For wcn6750 need to enable gpio bt_en */ @@ -1673,10 +1735,18 @@ static int qca_regulator_init(struct hci_uart *hu) qca_set_speed(hu, QCA_INIT_SPEED); - if (qca_is_wcn399x(soc_type)) { + switch (soc_type) { + case QCA_WCN3988: + case QCA_WCN3990: + case QCA_WCN3991: + case QCA_WCN3998: ret = qca_send_power_pulse(hu, true); if (ret) return ret; + break; + + default: + break; } /* Now the device is in ready state to communicate with host. @@ -1710,11 +1780,18 @@ static int qca_power_on(struct hci_dev *hdev) if (!hu->serdev) return 0; - if (qca_is_wcn399x(soc_type) || - qca_is_wcn6750(soc_type) || - qca_is_wcn6855(soc_type)) { + switch (soc_type) { + case QCA_WCN3988: + case QCA_WCN3990: + case QCA_WCN3991: + case QCA_WCN3998: + case QCA_WCN6750: + case QCA_WCN6855: + case QCA_WCN7850: ret = qca_regulator_init(hu); - } else { + break; + + default: qcadev = serdev_device_get_drvdata(hu->serdev); if (qcadev->bt_en) { gpiod_set_value_cansleep(qcadev->bt_en, 1); @@ -1748,6 +1825,7 @@ static int qca_setup(struct hci_uart *hu) const char *firmware_name = qca_get_firmware_name(hu); int ret; struct qca_btsoc_version ver; + const char *soc_name; ret = qca_check_speeds(hu); if (ret) @@ -1762,10 +1840,30 @@ static int qca_setup(struct hci_uart *hu) */ set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); - bt_dev_info(hdev, "setting up %s", - qca_is_wcn399x(soc_type) ? "wcn399x" : - (soc_type == QCA_WCN6750) ? "wcn6750" : - (soc_type == QCA_WCN6855) ? "wcn6855" : "ROME/QCA6390"); + switch (soc_type) { + case QCA_WCN3988: + case QCA_WCN3990: + case QCA_WCN3991: + case QCA_WCN3998: + soc_name = "wcn399x"; + break; + + case QCA_WCN6750: + soc_name = "wcn6750"; + break; + + case QCA_WCN6855: + soc_name = "wcn6855"; + break; + + case QCA_WCN7850: + soc_name = "wcn7850"; + break; + + default: + soc_name = "ROME/QCA6390"; + } + bt_dev_info(hdev, "setting up %s", soc_name); qca->memdump_state = QCA_MEMDUMP_IDLE; @@ -1776,16 +1874,23 @@ retry: clear_bit(QCA_SSR_TRIGGERED, &qca->flags); - if (qca_is_wcn399x(soc_type) || - qca_is_wcn6750(soc_type) || - qca_is_wcn6855(soc_type)) { + switch (soc_type) { + case QCA_WCN3988: + case QCA_WCN3990: + case QCA_WCN3991: + case QCA_WCN3998: + case QCA_WCN6750: + case QCA_WCN6855: + case QCA_WCN7850: set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks); hci_set_aosp_capable(hdev); ret = qca_read_soc_version(hdev, &ver, soc_type); if (ret) goto out; - } else { + break; + + default: qca_set_speed(hu, QCA_INIT_SPEED); } @@ -1799,9 +1904,17 @@ retry: qca_baudrate = qca_get_baudrate_value(speed); } - if (!(qca_is_wcn399x(soc_type) || - qca_is_wcn6750(soc_type) || - qca_is_wcn6855(soc_type))) { + switch (soc_type) { + case QCA_WCN3988: + case QCA_WCN3990: + case QCA_WCN3991: + case QCA_WCN3998: + case QCA_WCN6750: + case QCA_WCN6855: + case QCA_WCN7850: + break; + + default: /* Get QCA version information */ ret = qca_read_soc_version(hdev, &ver, soc_type); if (ret) @@ -1955,6 +2068,20 @@ static const struct qca_device_data qca_soc_data_wcn6855 __maybe_unused = { .capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES, }; +static const struct qca_device_data qca_soc_data_wcn7850 __maybe_unused = { + .soc_type = QCA_WCN7850, + .vregs = (struct qca_vreg []) { + { "vddio", 5000 }, + { "vddaon", 26000 }, + { "vdddig", 126000 }, + { "vddrfa0p8", 102000 }, + { "vddrfa1p2", 257000 }, + { "vddrfa1p9", 302000 }, + }, + .num_vregs = 6, + .capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES, +}; + static void qca_power_shutdown(struct hci_uart *hu) { struct qca_serdev *qcadev; @@ -1980,11 +2107,18 @@ static void qca_power_shutdown(struct hci_uart *hu) qcadev = serdev_device_get_drvdata(hu->serdev); - if (qca_is_wcn399x(soc_type)) { + switch (soc_type) { + case QCA_WCN3988: + case QCA_WCN3990: + case QCA_WCN3991: + case QCA_WCN3998: host_set_baudrate(hu, 2400); qca_send_power_pulse(hu, false); qca_regulator_disable(qcadev); - } else if (soc_type == QCA_WCN6750 || soc_type == QCA_WCN6855) { + break; + + case QCA_WCN6750: + case QCA_WCN6855: gpiod_set_value_cansleep(qcadev->bt_en, 0); msleep(100); qca_regulator_disable(qcadev); @@ -1992,7 +2126,9 @@ static void qca_power_shutdown(struct hci_uart *hu) sw_ctrl_state = gpiod_get_value_cansleep(qcadev->sw_ctrl); bt_dev_dbg(hu->hdev, "SW_CTRL is %d", sw_ctrl_state); } - } else if (qcadev->bt_en) { + break; + + default: gpiod_set_value_cansleep(qcadev->bt_en, 0); } @@ -2117,11 +2253,19 @@ static int qca_serdev_probe(struct serdev_device *serdev) if (!qcadev->oper_speed) BT_DBG("UART will pick default operating speed"); - if (data && - (qca_is_wcn399x(data->soc_type) || - qca_is_wcn6750(data->soc_type) || - qca_is_wcn6855(data->soc_type))) { + if (data) qcadev->btsoc_type = data->soc_type; + else + qcadev->btsoc_type = QCA_ROME; + + switch (qcadev->btsoc_type) { + case QCA_WCN3988: + case QCA_WCN3990: + case QCA_WCN3991: + case QCA_WCN3998: + case QCA_WCN6750: + case QCA_WCN6855: + case QCA_WCN7850: qcadev->bt_power = devm_kzalloc(&serdev->dev, sizeof(struct qca_power), GFP_KERNEL); @@ -2151,7 +2295,8 @@ static int qca_serdev_probe(struct serdev_device *serdev) GPIOD_IN); if (IS_ERR_OR_NULL(qcadev->sw_ctrl) && (data->soc_type == QCA_WCN6750 || - data->soc_type == QCA_WCN6855)) + data->soc_type == QCA_WCN6855 || + data->soc_type == QCA_WCN7850)) dev_warn(&serdev->dev, "failed to acquire SW_CTRL gpio\n"); qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL); @@ -2165,12 +2310,9 @@ static int qca_serdev_probe(struct serdev_device *serdev) BT_ERR("wcn3990 serdev registration failed"); return err; } - } else { - if (data) - qcadev->btsoc_type = data->soc_type; - else - qcadev->btsoc_type = QCA_ROME; + break; + default: qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable", GPIOD_OUT_LOW); if (IS_ERR_OR_NULL(qcadev->bt_en)) { @@ -2226,13 +2368,24 @@ static void qca_serdev_remove(struct serdev_device *serdev) struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev); struct qca_power *power = qcadev->bt_power; - if ((qca_is_wcn399x(qcadev->btsoc_type) || - qca_is_wcn6750(qcadev->btsoc_type) || - qca_is_wcn6855(qcadev->btsoc_type)) && - power->vregs_on) - qca_power_shutdown(&qcadev->serdev_hu); - else if (qcadev->susclk) - clk_disable_unprepare(qcadev->susclk); + switch (qcadev->btsoc_type) { + case QCA_WCN3988: + case QCA_WCN3990: + case QCA_WCN3991: + case QCA_WCN3998: + case QCA_WCN6750: + case QCA_WCN6855: + case QCA_WCN7850: + if (power->vregs_on) { + qca_power_shutdown(&qcadev->serdev_hu); + break; + } + fallthrough; + + default: + if (qcadev->susclk) + clk_disable_unprepare(qcadev->susclk); + } hci_uart_unregister_device(&qcadev->serdev_hu); } @@ -2415,6 +2568,7 @@ static const struct of_device_id qca_bluetooth_of_match[] = { { .compatible = "qcom,wcn3998-bt", .data = &qca_soc_data_wcn3998}, { .compatible = "qcom,wcn6750-bt", .data = &qca_soc_data_wcn6750}, { .compatible = "qcom,wcn6855-bt", .data = &qca_soc_data_wcn6855}, + { .compatible = "qcom,wcn7850-bt", .data = &qca_soc_data_wcn7850}, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, qca_bluetooth_of_match); diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 2e91d8879326..73f913cbd146 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -400,6 +400,9 @@ static void del_gid(struct ib_device *ib_dev, u32 port, table->data_vec[ix] = NULL; write_unlock_irq(&table->rwlock); + if (rdma_cap_roce_gid_table(ib_dev, port)) + ib_dev->ops.del_gid(&entry->attr, &entry->context); + ndev_storage = entry->ndev_storage; if (ndev_storage) { entry->ndev_storage = NULL; @@ -407,9 +410,6 @@ static void del_gid(struct ib_device *ib_dev, u32 port, call_rcu(&ndev_storage->rcu_head, put_gid_ndev); } - if (rdma_cap_roce_gid_table(ib_dev, port)) - ib_dev->ops.del_gid(&entry->attr, &entry->context); - put_gid_entry_locked(entry); } diff --git a/drivers/infiniband/hw/mlx5/Makefile b/drivers/infiniband/hw/mlx5/Makefile index 612ee8190a2d..72a526236c2e 100644 --- a/drivers/infiniband/hw/mlx5/Makefile +++ b/drivers/infiniband/hw/mlx5/Makefile @@ -28,3 +28,4 @@ mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += devx.o \ fs.o \ qos.o \ std_types.o +mlx5_ib-$(CONFIG_MLX5_MACSEC) += macsec.o diff --git a/drivers/infiniband/hw/mlx5/macsec.c b/drivers/infiniband/hw/mlx5/macsec.c new file mode 100644 index 000000000000..3c56eb5eddf3 --- /dev/null +++ b/drivers/infiniband/hw/mlx5/macsec.c @@ -0,0 +1,364 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. */ + +#include "macsec.h" +#include <linux/mlx5/macsec.h> + +struct mlx5_reserved_gids { + int macsec_index; + const struct ib_gid_attr *physical_gid; +}; + +struct mlx5_roce_gids { + struct list_head roce_gid_list_entry; + u16 gid_idx; + union { + struct sockaddr_in sockaddr_in; + struct sockaddr_in6 sockaddr_in6; + } addr; +}; + +struct mlx5_macsec_device { + struct list_head macsec_devices_list_entry; + void *macdev; + struct list_head macsec_roce_gids; + struct list_head tx_rules_list; + struct list_head rx_rules_list; +}; + +static void cleanup_macsec_device(struct mlx5_macsec_device *macsec_device) +{ + if (!list_empty(&macsec_device->tx_rules_list) || + !list_empty(&macsec_device->rx_rules_list) || + !list_empty(&macsec_device->macsec_roce_gids)) + return; + + list_del(&macsec_device->macsec_devices_list_entry); + kfree(macsec_device); +} + +static struct mlx5_macsec_device *get_macsec_device(void *macdev, + struct list_head *macsec_devices_list) +{ + struct mlx5_macsec_device *iter, *macsec_device = NULL; + + list_for_each_entry(iter, macsec_devices_list, macsec_devices_list_entry) { + if (iter->macdev == macdev) { + macsec_device = iter; + break; + } + } + + if (macsec_device) + return macsec_device; + + macsec_device = kzalloc(sizeof(*macsec_device), GFP_KERNEL); + if (!macsec_device) + return NULL; + + macsec_device->macdev = macdev; + INIT_LIST_HEAD(&macsec_device->tx_rules_list); + INIT_LIST_HEAD(&macsec_device->rx_rules_list); + INIT_LIST_HEAD(&macsec_device->macsec_roce_gids); + list_add(&macsec_device->macsec_devices_list_entry, macsec_devices_list); + + return macsec_device; +} + +static void mlx5_macsec_del_roce_gid(struct mlx5_macsec_device *macsec_device, u16 gid_idx) +{ + struct mlx5_roce_gids *current_gid, *next_gid; + + list_for_each_entry_safe(current_gid, next_gid, &macsec_device->macsec_roce_gids, + roce_gid_list_entry) + if (current_gid->gid_idx == gid_idx) { + list_del(¤t_gid->roce_gid_list_entry); + kfree(current_gid); + } +} + +static void mlx5_macsec_save_roce_gid(struct mlx5_macsec_device *macsec_device, + const struct sockaddr *addr, u16 gid_idx) +{ + struct mlx5_roce_gids *roce_gids; + + roce_gids = kzalloc(sizeof(*roce_gids), GFP_KERNEL); + if (!roce_gids) + return; + + roce_gids->gid_idx = gid_idx; + if (addr->sa_family == AF_INET) + memcpy(&roce_gids->addr.sockaddr_in, addr, sizeof(roce_gids->addr.sockaddr_in)); + else + memcpy(&roce_gids->addr.sockaddr_in6, addr, sizeof(roce_gids->addr.sockaddr_in6)); + + list_add_tail(&roce_gids->roce_gid_list_entry, &macsec_device->macsec_roce_gids); +} + +static void handle_macsec_gids(struct list_head *macsec_devices_list, + struct mlx5_macsec_event_data *data) +{ + struct mlx5_macsec_device *macsec_device; + struct mlx5_roce_gids *gid; + + macsec_device = get_macsec_device(data->macdev, macsec_devices_list); + if (!macsec_device) + return; + + list_for_each_entry(gid, &macsec_device->macsec_roce_gids, roce_gid_list_entry) { + mlx5_macsec_add_roce_sa_rules(data->fs_id, (struct sockaddr *)&gid->addr, + gid->gid_idx, &macsec_device->tx_rules_list, + &macsec_device->rx_rules_list, data->macsec_fs, + data->is_tx); + } +} + +static void del_sa_roce_rule(struct list_head *macsec_devices_list, + struct mlx5_macsec_event_data *data) +{ + struct mlx5_macsec_device *macsec_device; + + macsec_device = get_macsec_device(data->macdev, macsec_devices_list); + WARN_ON(!macsec_device); + + mlx5_macsec_del_roce_sa_rules(data->fs_id, data->macsec_fs, + &macsec_device->tx_rules_list, + &macsec_device->rx_rules_list, data->is_tx); +} + +static int macsec_event(struct notifier_block *nb, unsigned long event, void *data) +{ + struct mlx5_macsec *macsec = container_of(nb, struct mlx5_macsec, blocking_events_nb); + + mutex_lock(&macsec->lock); + switch (event) { + case MLX5_DRIVER_EVENT_MACSEC_SA_ADDED: + handle_macsec_gids(&macsec->macsec_devices_list, data); + break; + case MLX5_DRIVER_EVENT_MACSEC_SA_DELETED: + del_sa_roce_rule(&macsec->macsec_devices_list, data); + break; + default: + mutex_unlock(&macsec->lock); + return NOTIFY_DONE; + } + mutex_unlock(&macsec->lock); + return NOTIFY_OK; +} + +void mlx5r_macsec_event_register(struct mlx5_ib_dev *dev) +{ + if (!mlx5_is_macsec_roce_supported(dev->mdev)) { + mlx5_ib_dbg(dev, "RoCE MACsec not supported due to capabilities\n"); + return; + } + + dev->macsec.blocking_events_nb.notifier_call = macsec_event; + blocking_notifier_chain_register(&dev->mdev->macsec_nh, + &dev->macsec.blocking_events_nb); +} + +void mlx5r_macsec_event_unregister(struct mlx5_ib_dev *dev) +{ + if (!mlx5_is_macsec_roce_supported(dev->mdev)) { + mlx5_ib_dbg(dev, "RoCE MACsec not supported due to capabilities\n"); + return; + } + + blocking_notifier_chain_unregister(&dev->mdev->macsec_nh, + &dev->macsec.blocking_events_nb); +} + +int mlx5r_macsec_init_gids_and_devlist(struct mlx5_ib_dev *dev) +{ + int i, j, max_gids; + + if (!mlx5_is_macsec_roce_supported(dev->mdev)) { + mlx5_ib_dbg(dev, "RoCE MACsec not supported due to capabilities\n"); + return 0; + } + + max_gids = MLX5_CAP_ROCE(dev->mdev, roce_address_table_size); + for (i = 0; i < dev->num_ports; i++) { + dev->port[i].reserved_gids = kcalloc(max_gids, + sizeof(*dev->port[i].reserved_gids), + GFP_KERNEL); + if (!dev->port[i].reserved_gids) + goto err; + + for (j = 0; j < max_gids; j++) + dev->port[i].reserved_gids[j].macsec_index = -1; + } + + INIT_LIST_HEAD(&dev->macsec.macsec_devices_list); + mutex_init(&dev->macsec.lock); + + return 0; +err: + while (i >= 0) { + kfree(dev->port[i].reserved_gids); + i--; + } + return -ENOMEM; +} + +void mlx5r_macsec_dealloc_gids(struct mlx5_ib_dev *dev) +{ + int i; + + if (!mlx5_is_macsec_roce_supported(dev->mdev)) + mlx5_ib_dbg(dev, "RoCE MACsec not supported due to capabilities\n"); + + for (i = 0; i < dev->num_ports; i++) + kfree(dev->port[i].reserved_gids); + + mutex_destroy(&dev->macsec.lock); +} + +int mlx5r_add_gid_macsec_operations(const struct ib_gid_attr *attr) +{ + struct mlx5_ib_dev *dev = to_mdev(attr->device); + struct mlx5_macsec_device *macsec_device; + const struct ib_gid_attr *physical_gid; + struct mlx5_reserved_gids *mgids; + struct net_device *ndev; + int ret = 0; + union { + struct sockaddr_in sockaddr_in; + struct sockaddr_in6 sockaddr_in6; + } addr; + + if (attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP) + return 0; + + if (!mlx5_is_macsec_roce_supported(dev->mdev)) { + mlx5_ib_dbg(dev, "RoCE MACsec not supported due to capabilities\n"); + return 0; + } + + rcu_read_lock(); + ndev = rcu_dereference(attr->ndev); + if (!ndev) { + rcu_read_unlock(); + return -ENODEV; + } + + if (!netif_is_macsec(ndev) || !macsec_netdev_is_offloaded(ndev)) { + rcu_read_unlock(); + return 0; + } + dev_hold(ndev); + rcu_read_unlock(); + + mutex_lock(&dev->macsec.lock); + macsec_device = get_macsec_device(ndev, &dev->macsec.macsec_devices_list); + if (!macsec_device) { + ret = -ENOMEM; + goto dev_err; + } + + physical_gid = rdma_find_gid(attr->device, &attr->gid, + attr->gid_type, NULL); + if (!IS_ERR(physical_gid)) { + ret = set_roce_addr(to_mdev(physical_gid->device), + physical_gid->port_num, + physical_gid->index, NULL, + physical_gid); + if (ret) + goto gid_err; + + mgids = &dev->port[attr->port_num - 1].reserved_gids[physical_gid->index]; + mgids->macsec_index = attr->index; + mgids->physical_gid = physical_gid; + } + + /* Proceed with adding steering rules, regardless if there was gid ambiguity or not.*/ + rdma_gid2ip((struct sockaddr *)&addr, &attr->gid); + ret = mlx5_macsec_add_roce_rule(ndev, (struct sockaddr *)&addr, attr->index, + &macsec_device->tx_rules_list, + &macsec_device->rx_rules_list, dev->mdev->macsec_fs); + if (ret && !IS_ERR(physical_gid)) + goto rule_err; + + mlx5_macsec_save_roce_gid(macsec_device, (struct sockaddr *)&addr, attr->index); + + dev_put(ndev); + mutex_unlock(&dev->macsec.lock); + return ret; + +rule_err: + set_roce_addr(to_mdev(physical_gid->device), physical_gid->port_num, + physical_gid->index, &physical_gid->gid, physical_gid); + mgids->macsec_index = -1; +gid_err: + rdma_put_gid_attr(physical_gid); + cleanup_macsec_device(macsec_device); +dev_err: + dev_put(ndev); + mutex_unlock(&dev->macsec.lock); + return ret; +} + +void mlx5r_del_gid_macsec_operations(const struct ib_gid_attr *attr) +{ + struct mlx5_ib_dev *dev = to_mdev(attr->device); + struct mlx5_macsec_device *macsec_device; + struct mlx5_reserved_gids *mgids; + struct net_device *ndev; + int i, max_gids; + + if (attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP) + return; + + if (!mlx5_is_macsec_roce_supported(dev->mdev)) { + mlx5_ib_dbg(dev, "RoCE MACsec not supported due to capabilities\n"); + return; + } + + mgids = &dev->port[attr->port_num - 1].reserved_gids[attr->index]; + if (mgids->macsec_index != -1) { /* Checking if physical gid has ambiguous IP */ + rdma_put_gid_attr(mgids->physical_gid); + mgids->macsec_index = -1; + return; + } + + rcu_read_lock(); + ndev = rcu_dereference(attr->ndev); + if (!ndev) { + rcu_read_unlock(); + return; + } + + if (!netif_is_macsec(ndev) || !macsec_netdev_is_offloaded(ndev)) { + rcu_read_unlock(); + return; + } + dev_hold(ndev); + rcu_read_unlock(); + + mutex_lock(&dev->macsec.lock); + max_gids = MLX5_CAP_ROCE(dev->mdev, roce_address_table_size); + for (i = 0; i < max_gids; i++) { /* Checking if macsec gid has ambiguous IP */ + mgids = &dev->port[attr->port_num - 1].reserved_gids[i]; + if (mgids->macsec_index == attr->index) { + const struct ib_gid_attr *physical_gid = mgids->physical_gid; + + set_roce_addr(to_mdev(physical_gid->device), + physical_gid->port_num, + physical_gid->index, + &physical_gid->gid, physical_gid); + + rdma_put_gid_attr(physical_gid); + mgids->macsec_index = -1; + break; + } + } + macsec_device = get_macsec_device(ndev, &dev->macsec.macsec_devices_list); + mlx5_macsec_del_roce_rule(attr->index, dev->mdev->macsec_fs, + &macsec_device->tx_rules_list, &macsec_device->rx_rules_list); + mlx5_macsec_del_roce_gid(macsec_device, attr->index); + cleanup_macsec_device(macsec_device); + + dev_put(ndev); + mutex_unlock(&dev->macsec.lock); +} diff --git a/drivers/infiniband/hw/mlx5/macsec.h b/drivers/infiniband/hw/mlx5/macsec.h new file mode 100644 index 000000000000..9b77ba90f0f4 --- /dev/null +++ b/drivers/infiniband/hw/mlx5/macsec.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. */ + +#ifndef __MLX5_MACSEC_H__ +#define __MLX5_MACSEC_H__ + +#include <net/macsec.h> +#include <rdma/ib_cache.h> +#include <rdma/ib_addr.h> +#include "mlx5_ib.h" + +#ifdef CONFIG_MLX5_MACSEC +struct mlx5_reserved_gids; + +int mlx5r_add_gid_macsec_operations(const struct ib_gid_attr *attr); +void mlx5r_del_gid_macsec_operations(const struct ib_gid_attr *attr); +int mlx5r_macsec_init_gids_and_devlist(struct mlx5_ib_dev *dev); +void mlx5r_macsec_dealloc_gids(struct mlx5_ib_dev *dev); +void mlx5r_macsec_event_register(struct mlx5_ib_dev *dev); +void mlx5r_macsec_event_unregister(struct mlx5_ib_dev *dev); +#else +static inline int mlx5r_add_gid_macsec_operations(const struct ib_gid_attr *attr) { return 0; } +static inline void mlx5r_del_gid_macsec_operations(const struct ib_gid_attr *attr) {} +static inline int mlx5r_macsec_init_gids_and_devlist(struct mlx5_ib_dev *dev) { return 0; } +static inline void mlx5r_macsec_dealloc_gids(struct mlx5_ib_dev *dev) {} +static inline void mlx5r_macsec_event_register(struct mlx5_ib_dev *dev) {} +static inline void mlx5r_macsec_event_unregister(struct mlx5_ib_dev *dev) {} +#endif +#endif /* __MLX5_MACSEC_H__ */ diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 3c25b9045f9d..aed5cdea50e6 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -46,6 +46,7 @@ #include <rdma/uverbs_ioctl.h> #include <rdma/mlx5_user_ioctl_verbs.h> #include <rdma/mlx5_user_ioctl_cmds.h> +#include "macsec.h" #define UVERBS_MODULE_NAME mlx5_ib #include <rdma/uverbs_named_ioctl.h> @@ -564,9 +565,9 @@ out: return err; } -static int set_roce_addr(struct mlx5_ib_dev *dev, u32 port_num, - unsigned int index, const union ib_gid *gid, - const struct ib_gid_attr *attr) +int set_roce_addr(struct mlx5_ib_dev *dev, u32 port_num, + unsigned int index, const union ib_gid *gid, + const struct ib_gid_attr *attr) { enum ib_gid_type gid_type; u16 vlan_id = 0xffff; @@ -607,6 +608,12 @@ static int set_roce_addr(struct mlx5_ib_dev *dev, u32 port_num, static int mlx5_ib_add_gid(const struct ib_gid_attr *attr, __always_unused void **context) { + int ret; + + ret = mlx5r_add_gid_macsec_operations(attr); + if (ret) + return ret; + return set_roce_addr(to_mdev(attr->device), attr->port_num, attr->index, &attr->gid, attr); } @@ -614,8 +621,15 @@ static int mlx5_ib_add_gid(const struct ib_gid_attr *attr, static int mlx5_ib_del_gid(const struct ib_gid_attr *attr, __always_unused void **context) { - return set_roce_addr(to_mdev(attr->device), attr->port_num, - attr->index, NULL, attr); + int ret; + + ret = set_roce_addr(to_mdev(attr->device), attr->port_num, + attr->index, NULL, attr); + if (ret) + return ret; + + mlx5r_del_gid_macsec_operations(attr); + return 0; } __be16 mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev *dev, @@ -3644,13 +3658,13 @@ static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev) mutex_destroy(&dev->cap_mask_mutex); WARN_ON(!xa_empty(&dev->sig_mrs)); WARN_ON(!bitmap_empty(dev->dm.memic_alloc_pages, MLX5_MAX_MEMIC_PAGES)); + mlx5r_macsec_dealloc_gids(dev); } static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev) { struct mlx5_core_dev *mdev = dev->mdev; - int err; - int i; + int err, i; dev->ib_dev.node_type = RDMA_NODE_IB_CA; dev->ib_dev.local_dma_lkey = 0 /* not supported for now */; @@ -3670,10 +3684,14 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev) if (err) return err; - err = mlx5_ib_init_multiport_master(dev); + err = mlx5r_macsec_init_gids_and_devlist(dev); if (err) return err; + err = mlx5_ib_init_multiport_master(dev); + if (err) + goto err; + err = set_has_smi_cap(dev); if (err) goto err_mp; @@ -3697,7 +3715,8 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev) spin_lock_init(&dev->dm.lock); dev->dm.dev = mdev; return 0; - +err: + mlx5r_macsec_dealloc_gids(dev); err_mp: mlx5_ib_cleanup_multiport_master(dev); return err; @@ -4106,11 +4125,15 @@ static int mlx5_ib_stage_dev_notifier_init(struct mlx5_ib_dev *dev) { dev->mdev_events.notifier_call = mlx5_ib_event; mlx5_notifier_register(dev->mdev, &dev->mdev_events); + + mlx5r_macsec_event_register(dev); + return 0; } static void mlx5_ib_stage_dev_notifier_cleanup(struct mlx5_ib_dev *dev) { + mlx5r_macsec_event_unregister(dev); mlx5_notifier_unregister(dev->mdev, &dev->mdev_events); } diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 9c33d960af3c..16713baf0d06 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -26,6 +26,7 @@ #include "srq.h" #include "qp.h" +#include "macsec.h" #define mlx5_ib_dbg(_dev, format, arg...) \ dev_dbg(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \ @@ -870,6 +871,9 @@ struct mlx5_ib_port { struct mlx5_ib_dbg_cc_params *dbg_cc_params; struct mlx5_roce roce; struct mlx5_eswitch_rep *rep; +#ifdef CONFIG_MLX5_MACSEC + struct mlx5_reserved_gids *reserved_gids; +#endif }; struct mlx5_ib_dbg_param { @@ -1086,6 +1090,12 @@ struct mlx5_special_mkeys { __be32 terminate_scatter_list_mkey; }; +struct mlx5_macsec { + struct mutex lock; /* Protects mlx5_macsec internal contexts */ + struct list_head macsec_devices_list; + struct notifier_block blocking_events_nb; +}; + struct mlx5_ib_dev { struct ib_device ib_dev; struct mlx5_core_dev *mdev; @@ -1145,6 +1155,10 @@ struct mlx5_ib_dev { u16 pkey_table_len; u8 lag_ports; struct mlx5_special_mkeys mkeys; + +#ifdef CONFIG_MLX5_MACSEC + struct mlx5_macsec macsec; +#endif }; static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) @@ -1648,4 +1662,7 @@ static inline bool mlx5_umem_needs_ats(struct mlx5_ib_dev *dev, return access_flags & IB_ACCESS_RELAXED_ORDERING; } +int set_roce_addr(struct mlx5_ib_dev *dev, u32 port_num, + unsigned int index, const union ib_gid *gid, + const struct ib_gid_attr *attr); #endif /* MLX5_IB_H */ diff --git a/drivers/net/dsa/b53/b53_serdes.c b/drivers/net/dsa/b53/b53_serdes.c index b0ccebcd3ffa..3f8a491ce885 100644 --- a/drivers/net/dsa/b53/b53_serdes.c +++ b/drivers/net/dsa/b53/b53_serdes.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Northstar Plus switch SerDes/SGMII PHY main logic * diff --git a/drivers/net/dsa/b53/b53_serdes.h b/drivers/net/dsa/b53/b53_serdes.h index ef81f5da5f81..3d367c4df4d9 100644 --- a/drivers/net/dsa/b53/b53_serdes.h +++ b/drivers/net/dsa/b53/b53_serdes.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Northstar Plus switch SerDes/SGMII PHY definitions * diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c index 720f4e4ed0b0..11ef1d7ea229 100644 --- a/drivers/net/dsa/hirschmann/hellcreek.c +++ b/drivers/net/dsa/hirschmann/hellcreek.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: (GPL-2.0 or MIT) +// SPDX-License-Identifier: (GPL-2.0 OR MIT) /* * DSA driver for: * Hirschmann Hellcreek TSN switch. diff --git a/drivers/net/dsa/hirschmann/hellcreek.h b/drivers/net/dsa/hirschmann/hellcreek.h index 4a678f7d61ae..6874cb9dc361 100644 --- a/drivers/net/dsa/hirschmann/hellcreek.h +++ b/drivers/net/dsa/hirschmann/hellcreek.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: (GPL-2.0 or MIT) */ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ /* * DSA driver for: * Hirschmann Hellcreek TSN switch. diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 8909899e9a31..77c8e9cfb445 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1590,9 +1590,12 @@ fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog, break; case XDP_TX: + rxq->stats[RX_XDP_TX]++; err = fec_enet_xdp_tx_xmit(fep, cpu, xdp, sync); - if (unlikely(err)) + if (unlikely(err)) { + rxq->stats[RX_XDP_TX_ERRORS]++; goto xdp_err; + } ret = FEC_ENET_XDP_TX; break; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index d537d517cabe..c4f4de82e29e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -140,7 +140,7 @@ config MLX5_CORE_IPOIB help MLX5 IPoIB offloads & acceleration support. -config MLX5_EN_MACSEC +config MLX5_MACSEC bool "Connect-X support for MACSec offload" depends on MLX5_CORE_EN depends on MACSEC diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 946310390659..b56b187a9097 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -98,7 +98,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib # mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o -mlx5_core-$(CONFIG_MLX5_EN_MACSEC) += en_accel/macsec.o en_accel/macsec_fs.o \ +mlx5_core-$(CONFIG_MLX5_MACSEC) += en_accel/macsec.o lib/macsec_fs.o \ en_accel/macsec_stats.o mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index c1deb04ba7e8..86f2690c5e01 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -917,7 +917,7 @@ struct mlx5e_priv { const struct mlx5e_profile *profile; void *ppriv; -#ifdef CONFIG_MLX5_EN_MACSEC +#ifdef CONFIG_MLX5_MACSEC struct mlx5e_macsec *macsec; #endif #ifdef CONFIG_MLX5_EN_IPSEC diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h index bac4717548c6..caa34b9c161e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h @@ -138,7 +138,7 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev, } #endif -#ifdef CONFIG_MLX5_EN_MACSEC +#ifdef CONFIG_MLX5_MACSEC if (unlikely(mlx5e_macsec_skb_is_offload(skb))) { struct mlx5e_priv *priv = netdev_priv(dev); @@ -173,7 +173,7 @@ static inline void mlx5e_accel_tx_eseg(struct mlx5e_priv *priv, mlx5e_ipsec_tx_build_eseg(priv, skb, eseg); #endif -#ifdef CONFIG_MLX5_EN_MACSEC +#ifdef CONFIG_MLX5_MACSEC if (unlikely(mlx5e_macsec_skb_is_offload(skb))) mlx5e_macsec_tx_build_eseg(priv->macsec, skb, eseg); #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c index 592b165530ff..c9c1db971652 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c @@ -10,7 +10,6 @@ #include "lib/aso.h" #include "lib/crypto.h" #include "en_accel/macsec.h" -#include "en_accel/macsec_fs.h" #define MLX5_MACSEC_EPN_SCOPE_MID 0x80000000L #define MLX5E_MACSEC_ASO_CTX_SZ MLX5_ST_SZ_BYTES(macsec_aso) @@ -66,9 +65,7 @@ struct mlx5e_macsec_sa { ssci_t ssci; salt_t salt; - struct rhash_head hash; - u32 fs_id; - union mlx5e_macsec_rule *macsec_rule; + union mlx5_macsec_rule *macsec_rule; struct rcu_head rcu_head; struct mlx5e_macsec_epn_state epn_state; }; @@ -106,14 +103,6 @@ struct mlx5e_macsec_aso { u32 pdn; }; -static const struct rhashtable_params rhash_sci = { - .key_len = sizeof_field(struct mlx5e_macsec_sa, sci), - .key_offset = offsetof(struct mlx5e_macsec_sa, sci), - .head_offset = offsetof(struct mlx5e_macsec_sa, hash), - .automatic_shrinking = true, - .min_size = 1, -}; - struct mlx5e_macsec_device { const struct net_device *netdev; struct mlx5e_macsec_sa *tx_sa[MACSEC_NUM_AN]; @@ -125,20 +114,13 @@ struct mlx5e_macsec_device { struct mlx5e_macsec { struct list_head macsec_device_list_head; int num_of_devices; - struct mlx5e_macsec_fs *macsec_fs; struct mutex lock; /* Protects mlx5e_macsec internal contexts */ - /* Tx sci -> fs id mapping handling */ - struct rhashtable sci_hash; /* sci -> mlx5e_macsec_sa */ - /* Rx fs_id -> rx_sc mapping */ struct xarray sc_xarray; struct mlx5_core_dev *mdev; - /* Stats manage */ - struct mlx5e_macsec_stats stats; - /* ASO */ struct mlx5e_macsec_aso aso; @@ -330,36 +312,30 @@ static void mlx5e_macsec_destroy_object(struct mlx5_core_dev *mdev, u32 macsec_o static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec, struct mlx5e_macsec_sa *sa, - bool is_tx) + bool is_tx, struct net_device *netdev, u32 fs_id) { int action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT : MLX5_ACCEL_MACSEC_ACTION_DECRYPT; - if ((is_tx) && sa->fs_id) { - /* Make sure ongoing datapath readers sees a valid SA */ - rhashtable_remove_fast(&macsec->sci_hash, &sa->hash, rhash_sci); - sa->fs_id = 0; - } - if (!sa->macsec_rule) return; - mlx5e_macsec_fs_del_rule(macsec->macsec_fs, sa->macsec_rule, action); + mlx5_macsec_fs_del_rule(macsec->mdev->macsec_fs, sa->macsec_rule, action, netdev, + fs_id); mlx5e_macsec_destroy_object(macsec->mdev, sa->macsec_obj_id); sa->macsec_rule = NULL; } static int mlx5e_macsec_init_sa(struct macsec_context *ctx, struct mlx5e_macsec_sa *sa, - bool encrypt, - bool is_tx) + bool encrypt, bool is_tx, u32 *fs_id) { struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev); struct mlx5e_macsec *macsec = priv->macsec; struct mlx5_macsec_rule_attrs rule_attrs; struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_macsec_obj_attrs obj_attrs; - union mlx5e_macsec_rule *macsec_rule; + union mlx5_macsec_rule *macsec_rule; int err; obj_attrs.next_pn = sa->next_pn; @@ -387,7 +363,7 @@ static int mlx5e_macsec_init_sa(struct macsec_context *ctx, rule_attrs.action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT : MLX5_ACCEL_MACSEC_ACTION_DECRYPT; - macsec_rule = mlx5e_macsec_fs_add_rule(macsec->macsec_fs, ctx, &rule_attrs, &sa->fs_id); + macsec_rule = mlx5_macsec_fs_add_rule(mdev->macsec_fs, ctx, &rule_attrs, fs_id); if (!macsec_rule) { err = -ENOMEM; goto destroy_macsec_object; @@ -395,16 +371,8 @@ static int mlx5e_macsec_init_sa(struct macsec_context *ctx, sa->macsec_rule = macsec_rule; - if (is_tx) { - err = rhashtable_insert_fast(&macsec->sci_hash, &sa->hash, rhash_sci); - if (err) - goto destroy_macsec_object_and_rule; - } - return 0; -destroy_macsec_object_and_rule: - mlx5e_macsec_cleanup_sa(macsec, sa, is_tx); destroy_macsec_object: mlx5e_macsec_destroy_object(mdev, sa->macsec_obj_id); @@ -426,7 +394,7 @@ mlx5e_macsec_get_rx_sc_from_sc_list(const struct list_head *list, sci_t sci) static int macsec_rx_sa_active_update(struct macsec_context *ctx, struct mlx5e_macsec_sa *rx_sa, - bool active) + bool active, u32 *fs_id) { struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev); struct mlx5e_macsec *macsec = priv->macsec; @@ -437,11 +405,11 @@ static int macsec_rx_sa_active_update(struct macsec_context *ctx, rx_sa->active = active; if (!active) { - mlx5e_macsec_cleanup_sa(macsec, rx_sa, false); + mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev, *fs_id); return 0; } - err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false); + err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false, fs_id); if (err) rx_sa->active = false; @@ -563,7 +531,7 @@ static int mlx5e_macsec_add_txsa(struct macsec_context *ctx) !tx_sa->active) goto out; - err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true); + err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL); if (err) goto destroy_encryption_key; @@ -627,7 +595,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx) goto out; if (ctx_tx_sa->active) { - err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true); + err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL); if (err) goto out; } else { @@ -636,7 +604,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx) goto out; } - mlx5e_macsec_cleanup_sa(macsec, tx_sa, true); + mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0); } out: mutex_unlock(&macsec->lock); @@ -669,7 +637,7 @@ static int mlx5e_macsec_del_txsa(struct macsec_context *ctx) goto out; } - mlx5e_macsec_cleanup_sa(macsec, tx_sa, true); + mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0); mlx5_destroy_encryption_key(macsec->mdev, tx_sa->enc_key_id); kfree_rcu_mightsleep(tx_sa); macsec_device->tx_sa[assoc_num] = NULL; @@ -680,20 +648,6 @@ out: return err; } -static u32 mlx5e_macsec_get_sa_from_hashtable(struct rhashtable *sci_hash, sci_t *sci) -{ - struct mlx5e_macsec_sa *macsec_sa; - u32 fs_id = 0; - - rcu_read_lock(); - macsec_sa = rhashtable_lookup(sci_hash, sci, rhash_sci); - if (macsec_sa) - fs_id = macsec_sa->fs_id; - rcu_read_unlock(); - - return fs_id; -} - static int mlx5e_macsec_add_rxsc(struct macsec_context *ctx) { struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element; @@ -813,7 +767,8 @@ static int mlx5e_macsec_upd_rxsc(struct macsec_context *ctx) if (!rx_sa) continue; - err = macsec_rx_sa_active_update(ctx, rx_sa, rx_sa->active && ctx_rx_sc->active); + err = macsec_rx_sa_active_update(ctx, rx_sa, rx_sa->active && ctx_rx_sc->active, + &rx_sc->sc_xarray_element->fs_id); if (err) goto out; } @@ -824,7 +779,8 @@ out: return err; } -static void macsec_del_rxsc_ctx(struct mlx5e_macsec *macsec, struct mlx5e_macsec_rx_sc *rx_sc) +static void macsec_del_rxsc_ctx(struct mlx5e_macsec *macsec, struct mlx5e_macsec_rx_sc *rx_sc, + struct net_device *netdev) { struct mlx5e_macsec_sa *rx_sa; int i; @@ -834,7 +790,8 @@ static void macsec_del_rxsc_ctx(struct mlx5e_macsec *macsec, struct mlx5e_macsec if (!rx_sa) continue; - mlx5e_macsec_cleanup_sa(macsec, rx_sa, false); + mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, netdev, + rx_sc->sc_xarray_element->fs_id); mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id); kfree(rx_sa); @@ -882,7 +839,7 @@ static int mlx5e_macsec_del_rxsc(struct macsec_context *ctx) goto out; } - macsec_del_rxsc_ctx(macsec, rx_sc); + macsec_del_rxsc_ctx(macsec, rx_sc, ctx->secy->netdev); out: mutex_unlock(&macsec->lock); @@ -941,7 +898,6 @@ static int mlx5e_macsec_add_rxsa(struct macsec_context *ctx) rx_sa->next_pn = ctx_rx_sa->next_pn; rx_sa->sci = sci; rx_sa->assoc_num = assoc_num; - rx_sa->fs_id = rx_sc->sc_xarray_element->fs_id; if (ctx->secy->xpn) update_macsec_epn(rx_sa, &ctx_rx_sa->key, &ctx_rx_sa->next_pn_halves, @@ -958,7 +914,7 @@ static int mlx5e_macsec_add_rxsa(struct macsec_context *ctx) goto out; //TODO - add support for both authentication and encryption flows - err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false); + err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false, &rx_sc->sc_xarray_element->fs_id); if (err) goto destroy_encryption_key; @@ -1025,7 +981,8 @@ static int mlx5e_macsec_upd_rxsa(struct macsec_context *ctx) goto out; } - err = macsec_rx_sa_active_update(ctx, rx_sa, ctx_rx_sa->active); + err = macsec_rx_sa_active_update(ctx, rx_sa, ctx_rx_sa->active, + &rx_sc->sc_xarray_element->fs_id); out: mutex_unlock(&macsec->lock); @@ -1073,7 +1030,8 @@ static int mlx5e_macsec_del_rxsa(struct macsec_context *ctx) goto out; } - mlx5e_macsec_cleanup_sa(macsec, rx_sa, false); + mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev, + rx_sc->sc_xarray_element->fs_id); mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id); kfree(rx_sa); rx_sc->rx_sa[assoc_num] = NULL; @@ -1154,7 +1112,8 @@ static int macsec_upd_secy_hw_address(struct macsec_context *ctx, if (!rx_sa || !rx_sa->macsec_rule) continue; - mlx5e_macsec_cleanup_sa(macsec, rx_sa, false); + mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev, + rx_sc->sc_xarray_element->fs_id); } } @@ -1165,7 +1124,8 @@ static int macsec_upd_secy_hw_address(struct macsec_context *ctx, continue; if (rx_sa->active) { - err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false); + err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false, + &rx_sc->sc_xarray_element->fs_id); if (err) goto out; } @@ -1218,7 +1178,7 @@ static int mlx5e_macsec_upd_secy(struct macsec_context *ctx) if (!tx_sa) continue; - mlx5e_macsec_cleanup_sa(macsec, tx_sa, true); + mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0); } for (i = 0; i < MACSEC_NUM_AN; ++i) { @@ -1227,7 +1187,7 @@ static int mlx5e_macsec_upd_secy(struct macsec_context *ctx) continue; if (tx_sa->assoc_num == tx_sc->encoding_sa && tx_sa->active) { - err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true); + err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL); if (err) goto out; } @@ -1265,7 +1225,7 @@ static int mlx5e_macsec_del_secy(struct macsec_context *ctx) if (!tx_sa) continue; - mlx5e_macsec_cleanup_sa(macsec, tx_sa, true); + mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0); mlx5_destroy_encryption_key(macsec->mdev, tx_sa->enc_key_id); kfree(tx_sa); macsec_device->tx_sa[i] = NULL; @@ -1273,7 +1233,7 @@ static int mlx5e_macsec_del_secy(struct macsec_context *ctx) list = &macsec_device->macsec_rx_sc_list_head; list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) - macsec_del_rxsc_ctx(macsec, rx_sc); + macsec_del_rxsc_ctx(macsec, rx_sc, ctx->secy->netdev); kfree(macsec_device->dev_addr); macsec_device->dev_addr = NULL; @@ -1647,50 +1607,6 @@ static void mlx5e_macsec_aso_cleanup(struct mlx5e_macsec_aso *aso, struct mlx5_c mlx5_core_dealloc_pd(mdev, aso->pdn); } -bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev) -{ - if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) & - MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD)) - return false; - - if (!MLX5_CAP_GEN(mdev, log_max_dek)) - return false; - - if (!MLX5_CAP_MACSEC(mdev, log_max_macsec_offload)) - return false; - - if (!MLX5_CAP_FLOWTABLE_NIC_RX(mdev, macsec_decrypt) || - !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, reformat_remove_macsec)) - return false; - - if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, macsec_encrypt) || - !MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_macsec)) - return false; - - if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_encrypt) && - !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_encrypt)) - return false; - - if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_decrypt) && - !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_decrypt)) - return false; - - return true; -} - -void mlx5e_macsec_get_stats_fill(struct mlx5e_macsec *macsec, void *macsec_stats) -{ - mlx5e_macsec_fs_get_stats_fill(macsec->macsec_fs, macsec_stats); -} - -struct mlx5e_macsec_stats *mlx5e_macsec_get_stats(struct mlx5e_macsec *macsec) -{ - if (!macsec) - return NULL; - - return &macsec->stats; -} - static const struct macsec_ops macsec_offload_ops = { .mdo_add_txsa = mlx5e_macsec_add_txsa, .mdo_upd_txsa = mlx5e_macsec_upd_txsa, @@ -1711,7 +1627,8 @@ bool mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec *macsec, struct sk_buff *skb struct metadata_dst *md_dst = skb_metadata_dst(skb); u32 fs_id; - fs_id = mlx5e_macsec_get_sa_from_hashtable(&macsec->sci_hash, &md_dst->u.macsec_info.sci); + fs_id = mlx5_macsec_fs_get_fs_id_from_hashtable(macsec->mdev->macsec_fs, + &md_dst->u.macsec_info.sci); if (!fs_id) goto err_out; @@ -1729,7 +1646,8 @@ void mlx5e_macsec_tx_build_eseg(struct mlx5e_macsec *macsec, struct metadata_dst *md_dst = skb_metadata_dst(skb); u32 fs_id; - fs_id = mlx5e_macsec_get_sa_from_hashtable(&macsec->sci_hash, &md_dst->u.macsec_info.sci); + fs_id = mlx5_macsec_fs_get_fs_id_from_hashtable(macsec->mdev->macsec_fs, + &md_dst->u.macsec_info.sci); if (!fs_id) return; @@ -1782,7 +1700,7 @@ int mlx5e_macsec_init(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_macsec *macsec = NULL; - struct mlx5e_macsec_fs *macsec_fs; + struct mlx5_macsec_fs *macsec_fs; int err; if (!mlx5e_is_macsec_device(priv->mdev)) { @@ -1797,13 +1715,6 @@ int mlx5e_macsec_init(struct mlx5e_priv *priv) INIT_LIST_HEAD(&macsec->macsec_device_list_head); mutex_init(&macsec->lock); - err = rhashtable_init(&macsec->sci_hash, &rhash_sci); - if (err) { - mlx5_core_err(mdev, "MACsec offload: Failed to init SCI hash table, err=%d\n", - err); - goto err_hash; - } - err = mlx5e_macsec_aso_init(&macsec->aso, priv->mdev); if (err) { mlx5_core_err(mdev, "MACsec offload: Failed to init aso, err=%d\n", err); @@ -1822,13 +1733,13 @@ int mlx5e_macsec_init(struct mlx5e_priv *priv) macsec->mdev = mdev; - macsec_fs = mlx5e_macsec_fs_init(mdev, priv->netdev); + macsec_fs = mlx5_macsec_fs_init(mdev); if (!macsec_fs) { err = -ENOMEM; goto err_out; } - macsec->macsec_fs = macsec_fs; + mdev->macsec_fs = macsec_fs; macsec->nb.notifier_call = macsec_obj_change_event; mlx5_notifier_register(mdev, &macsec->nb); @@ -1842,8 +1753,6 @@ err_out: err_wq: mlx5e_macsec_aso_cleanup(&macsec->aso, priv->mdev); err_aso: - rhashtable_destroy(&macsec->sci_hash); -err_hash: kfree(macsec); priv->macsec = NULL; return err; @@ -1858,10 +1767,9 @@ void mlx5e_macsec_cleanup(struct mlx5e_priv *priv) return; mlx5_notifier_unregister(mdev, &macsec->nb); - mlx5e_macsec_fs_cleanup(macsec->macsec_fs); + mlx5_macsec_fs_cleanup(mdev->macsec_fs); destroy_workqueue(macsec->wq); mlx5e_macsec_aso_cleanup(&macsec->aso, mdev); - rhashtable_destroy(&macsec->sci_hash); mutex_destroy(&macsec->lock); kfree(macsec); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h index 347380a2cd9c..27df72e23106 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h @@ -4,32 +4,16 @@ #ifndef __MLX5_EN_ACCEL_MACSEC_H__ #define __MLX5_EN_ACCEL_MACSEC_H__ -#ifdef CONFIG_MLX5_EN_MACSEC +#ifdef CONFIG_MLX5_MACSEC #include <linux/mlx5/driver.h> #include <net/macsec.h> #include <net/dst_metadata.h> - -/* Bit31 - 30: MACsec marker, Bit15-0: MACsec id */ -#define MLX5_MACEC_RX_FS_ID_MAX USHRT_MAX /* Must be power of two */ -#define MLX5_MACSEC_RX_FS_ID_MASK MLX5_MACEC_RX_FS_ID_MAX -#define MLX5_MACSEC_METADATA_MARKER(metadata) ((((metadata) >> 30) & 0x3) == 0x1) -#define MLX5_MACSEC_RX_METADAT_HANDLE(metadata) ((metadata) & MLX5_MACSEC_RX_FS_ID_MASK) +#include "lib/macsec_fs.h" struct mlx5e_priv; struct mlx5e_macsec; -struct mlx5e_macsec_stats { - u64 macsec_rx_pkts; - u64 macsec_rx_bytes; - u64 macsec_rx_pkts_drop; - u64 macsec_rx_bytes_drop; - u64 macsec_tx_pkts; - u64 macsec_tx_bytes; - u64 macsec_tx_pkts_drop; - u64 macsec_tx_bytes_drop; -}; - void mlx5e_macsec_build_netdev(struct mlx5e_priv *priv); int mlx5e_macsec_init(struct mlx5e_priv *priv); void mlx5e_macsec_cleanup(struct mlx5e_priv *priv); @@ -52,9 +36,6 @@ static inline bool mlx5e_macsec_is_rx_flow(struct mlx5_cqe64 *cqe) void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb, struct mlx5_cqe64 *cqe); -bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev); -void mlx5e_macsec_get_stats_fill(struct mlx5e_macsec *macsec, void *macsec_stats); -struct mlx5e_macsec_stats *mlx5e_macsec_get_stats(struct mlx5e_macsec *macsec); #else @@ -67,7 +48,6 @@ static inline void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb, struct mlx5_cqe64 *cqe) {} -static inline bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev) { return false; } -#endif /* CONFIG_MLX5_EN_MACSEC */ +#endif /* CONFIG_MLX5_MACSEC */ #endif /* __MLX5_ACCEL_EN_MACSEC_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c deleted file mode 100644 index 414e28584881..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c +++ /dev/null @@ -1,1394 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB -/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ - -#include <net/macsec.h> -#include <linux/netdevice.h> -#include <linux/mlx5/qp.h> -#include <linux/if_vlan.h> -#include "fs_core.h" -#include "en/fs.h" -#include "en_accel/macsec_fs.h" -#include "mlx5_core.h" - -/* MACsec TX flow steering */ -#define CRYPTO_NUM_MAXSEC_FTE BIT(15) -#define CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE 1 - -#define TX_CRYPTO_TABLE_LEVEL 0 -#define TX_CRYPTO_TABLE_NUM_GROUPS 3 -#define TX_CRYPTO_TABLE_MKE_GROUP_SIZE 1 -#define TX_CRYPTO_TABLE_SA_GROUP_SIZE \ - (CRYPTO_NUM_MAXSEC_FTE - (TX_CRYPTO_TABLE_MKE_GROUP_SIZE + \ - CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE)) -#define TX_CHECK_TABLE_LEVEL 1 -#define TX_CHECK_TABLE_NUM_FTE 2 -#define RX_CRYPTO_TABLE_LEVEL 0 -#define RX_CHECK_TABLE_LEVEL 1 -#define RX_CHECK_TABLE_NUM_FTE 3 -#define RX_CRYPTO_TABLE_NUM_GROUPS 3 -#define RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE \ - ((CRYPTO_NUM_MAXSEC_FTE - CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE) / 2) -#define RX_CRYPTO_TABLE_SA_RULE_WITHOUT_SCI_GROUP_SIZE \ - (CRYPTO_NUM_MAXSEC_FTE - RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE) -#define RX_NUM_OF_RULES_PER_SA 2 - -#define MLX5_MACSEC_TAG_LEN 8 /* SecTAG length with ethertype and without the optional SCI */ -#define MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK 0x23 -#define MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET 0x8 -#define MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET 0x5 -#define MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT (0x1 << MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET) -#define MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI 0x8 -#define MLX5_SECTAG_HEADER_SIZE_WITH_SCI (MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI + MACSEC_SCI_LEN) - -/* MACsec RX flow steering */ -#define MLX5_ETH_WQE_FT_META_MACSEC_MASK 0x3E - -struct mlx5_sectag_header { - __be16 ethertype; - u8 tci_an; - u8 sl; - u32 pn; - u8 sci[MACSEC_SCI_LEN]; /* optional */ -} __packed; - -struct mlx5e_macsec_tx_rule { - struct mlx5_flow_handle *rule; - struct mlx5_pkt_reformat *pkt_reformat; - u32 fs_id; -}; - -struct mlx5e_macsec_tables { - struct mlx5e_flow_table ft_crypto; - struct mlx5_flow_handle *crypto_miss_rule; - - struct mlx5_flow_table *ft_check; - struct mlx5_flow_group *ft_check_group; - struct mlx5_fc *check_miss_rule_counter; - struct mlx5_flow_handle *check_miss_rule; - struct mlx5_fc *check_rule_counter; - - u32 refcnt; -}; - -struct mlx5e_macsec_tx { - struct mlx5_flow_handle *crypto_mke_rule; - struct mlx5_flow_handle *check_rule; - - struct ida tx_halloc; - - struct mlx5e_macsec_tables tables; -}; - -struct mlx5e_macsec_rx_rule { - struct mlx5_flow_handle *rule[RX_NUM_OF_RULES_PER_SA]; - struct mlx5_modify_hdr *meta_modhdr; -}; - -struct mlx5e_macsec_rx { - struct mlx5_flow_handle *check_rule[2]; - struct mlx5_pkt_reformat *check_rule_pkt_reformat[2]; - - struct mlx5e_macsec_tables tables; -}; - -union mlx5e_macsec_rule { - struct mlx5e_macsec_tx_rule tx_rule; - struct mlx5e_macsec_rx_rule rx_rule; -}; - -struct mlx5e_macsec_fs { - struct mlx5_core_dev *mdev; - struct net_device *netdev; - struct mlx5e_macsec_tx *tx_fs; - struct mlx5e_macsec_rx *rx_fs; -}; - -static void macsec_fs_tx_destroy(struct mlx5e_macsec_fs *macsec_fs) -{ - struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs; - struct mlx5e_macsec_tables *tx_tables; - - tx_tables = &tx_fs->tables; - - /* Tx check table */ - if (tx_fs->check_rule) { - mlx5_del_flow_rules(tx_fs->check_rule); - tx_fs->check_rule = NULL; - } - - if (tx_tables->check_miss_rule) { - mlx5_del_flow_rules(tx_tables->check_miss_rule); - tx_tables->check_miss_rule = NULL; - } - - if (tx_tables->ft_check_group) { - mlx5_destroy_flow_group(tx_tables->ft_check_group); - tx_tables->ft_check_group = NULL; - } - - if (tx_tables->ft_check) { - mlx5_destroy_flow_table(tx_tables->ft_check); - tx_tables->ft_check = NULL; - } - - /* Tx crypto table */ - if (tx_fs->crypto_mke_rule) { - mlx5_del_flow_rules(tx_fs->crypto_mke_rule); - tx_fs->crypto_mke_rule = NULL; - } - - if (tx_tables->crypto_miss_rule) { - mlx5_del_flow_rules(tx_tables->crypto_miss_rule); - tx_tables->crypto_miss_rule = NULL; - } - - mlx5e_destroy_flow_table(&tx_tables->ft_crypto); -} - -static int macsec_fs_tx_create_crypto_table_groups(struct mlx5e_flow_table *ft) -{ - int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); - int mclen = MLX5_ST_SZ_BYTES(fte_match_param); - int ix = 0; - u32 *in; - int err; - u8 *mc; - - ft->g = kcalloc(TX_CRYPTO_TABLE_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL); - if (!ft->g) - return -ENOMEM; - in = kvzalloc(inlen, GFP_KERNEL); - - if (!in) { - kfree(ft->g); - ft->g = NULL; - return -ENOMEM; - } - - mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); - - /* Flow Group for MKE match */ - MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); - - MLX5_SET_CFG(in, start_flow_index, ix); - ix += TX_CRYPTO_TABLE_MKE_GROUP_SIZE; - MLX5_SET_CFG(in, end_flow_index, ix - 1); - ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); - if (IS_ERR(ft->g[ft->num_groups])) - goto err; - ft->num_groups++; - - /* Flow Group for SA rules */ - memset(in, 0, inlen); - memset(mc, 0, mclen); - MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_MISC_PARAMETERS_2); - MLX5_SET(fte_match_param, mc, misc_parameters_2.metadata_reg_a, - MLX5_ETH_WQE_FT_META_MACSEC_MASK); - - MLX5_SET_CFG(in, start_flow_index, ix); - ix += TX_CRYPTO_TABLE_SA_GROUP_SIZE; - MLX5_SET_CFG(in, end_flow_index, ix - 1); - ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); - if (IS_ERR(ft->g[ft->num_groups])) - goto err; - ft->num_groups++; - - /* Flow Group for l2 traps */ - memset(in, 0, inlen); - memset(mc, 0, mclen); - MLX5_SET_CFG(in, start_flow_index, ix); - ix += CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE; - MLX5_SET_CFG(in, end_flow_index, ix - 1); - ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); - if (IS_ERR(ft->g[ft->num_groups])) - goto err; - ft->num_groups++; - - kvfree(in); - return 0; - -err: - err = PTR_ERR(ft->g[ft->num_groups]); - ft->g[ft->num_groups] = NULL; - kvfree(in); - - return err; -} - -static struct mlx5_flow_table - *macsec_fs_auto_group_table_create(struct mlx5_flow_namespace *ns, int flags, - int level, int max_fte) -{ - struct mlx5_flow_table_attr ft_attr = {}; - struct mlx5_flow_table *fdb = NULL; - - /* reserve entry for the match all miss group and rule */ - ft_attr.autogroup.num_reserved_entries = 1; - ft_attr.autogroup.max_num_groups = 1; - ft_attr.prio = 0; - ft_attr.flags = flags; - ft_attr.level = level; - ft_attr.max_fte = max_fte; - - fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr); - - return fdb; -} - -static int macsec_fs_tx_create(struct mlx5e_macsec_fs *macsec_fs) -{ - int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); - struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs; - struct net_device *netdev = macsec_fs->netdev; - struct mlx5_flow_table_attr ft_attr = {}; - struct mlx5_flow_destination dest = {}; - struct mlx5e_macsec_tables *tx_tables; - struct mlx5_flow_act flow_act = {}; - struct mlx5e_flow_table *ft_crypto; - struct mlx5_flow_table *flow_table; - struct mlx5_flow_group *flow_group; - struct mlx5_flow_namespace *ns; - struct mlx5_flow_handle *rule; - struct mlx5_flow_spec *spec; - u32 *flow_group_in; - int err; - - ns = mlx5_get_flow_namespace(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_EGRESS_MACSEC); - if (!ns) - return -ENOMEM; - - spec = kvzalloc(sizeof(*spec), GFP_KERNEL); - if (!spec) - return -ENOMEM; - - flow_group_in = kvzalloc(inlen, GFP_KERNEL); - if (!flow_group_in) { - err = -ENOMEM; - goto out_spec; - } - - tx_tables = &tx_fs->tables; - ft_crypto = &tx_tables->ft_crypto; - - /* Tx crypto table */ - ft_attr.flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; - ft_attr.level = TX_CRYPTO_TABLE_LEVEL; - ft_attr.max_fte = CRYPTO_NUM_MAXSEC_FTE; - - flow_table = mlx5_create_flow_table(ns, &ft_attr); - if (IS_ERR(flow_table)) { - err = PTR_ERR(flow_table); - netdev_err(netdev, "Failed to create MACsec Tx crypto table err(%d)\n", err); - goto out_flow_group; - } - ft_crypto->t = flow_table; - - /* Tx crypto table groups */ - err = macsec_fs_tx_create_crypto_table_groups(ft_crypto); - if (err) { - netdev_err(netdev, - "Failed to create default flow group for MACsec Tx crypto table err(%d)\n", - err); - goto err; - } - - /* Tx crypto table MKE rule - MKE packets shouldn't be offloaded */ - spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype); - MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, ETH_P_PAE); - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW; - - rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, NULL, 0); - if (IS_ERR(rule)) { - err = PTR_ERR(rule); - netdev_err(netdev, "Failed to add MACsec TX MKE rule, err=%d\n", err); - goto err; - } - tx_fs->crypto_mke_rule = rule; - - /* Tx crypto table Default miss rule */ - memset(&flow_act, 0, sizeof(flow_act)); - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW; - rule = mlx5_add_flow_rules(ft_crypto->t, NULL, &flow_act, NULL, 0); - if (IS_ERR(rule)) { - err = PTR_ERR(rule); - netdev_err(netdev, "Failed to add MACsec Tx table default miss rule %d\n", err); - goto err; - } - tx_tables->crypto_miss_rule = rule; - - /* Tx check table */ - flow_table = macsec_fs_auto_group_table_create(ns, 0, TX_CHECK_TABLE_LEVEL, - TX_CHECK_TABLE_NUM_FTE); - if (IS_ERR(flow_table)) { - err = PTR_ERR(flow_table); - netdev_err(netdev, "fail to create MACsec TX check table, err(%d)\n", err); - goto err; - } - tx_tables->ft_check = flow_table; - - /* Tx check table Default miss group/rule */ - memset(flow_group_in, 0, inlen); - MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_table->max_fte - 1); - MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_table->max_fte - 1); - flow_group = mlx5_create_flow_group(tx_tables->ft_check, flow_group_in); - if (IS_ERR(flow_group)) { - err = PTR_ERR(flow_group); - netdev_err(netdev, - "Failed to create default flow group for MACsec Tx crypto table err(%d)\n", - err); - goto err; - } - tx_tables->ft_check_group = flow_group; - - /* Tx check table default drop rule */ - memset(&dest, 0, sizeof(struct mlx5_flow_destination)); - memset(&flow_act, 0, sizeof(flow_act)); - dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest.counter_id = mlx5_fc_id(tx_tables->check_miss_rule_counter); - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT; - rule = mlx5_add_flow_rules(tx_tables->ft_check, NULL, &flow_act, &dest, 1); - if (IS_ERR(rule)) { - err = PTR_ERR(rule); - netdev_err(netdev, "Failed to added MACsec tx check drop rule, err(%d)\n", err); - goto err; - } - tx_tables->check_miss_rule = rule; - - /* Tx check table rule */ - memset(spec, 0, sizeof(struct mlx5_flow_spec)); - memset(&dest, 0, sizeof(struct mlx5_flow_destination)); - memset(&flow_act, 0, sizeof(flow_act)); - - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4); - MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 0); - spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; - - flow_act.flags = FLOW_ACT_NO_APPEND; - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW | MLX5_FLOW_CONTEXT_ACTION_COUNT; - dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest.counter_id = mlx5_fc_id(tx_tables->check_rule_counter); - rule = mlx5_add_flow_rules(tx_tables->ft_check, spec, &flow_act, &dest, 1); - if (IS_ERR(rule)) { - err = PTR_ERR(rule); - netdev_err(netdev, "Failed to add MACsec check rule, err=%d\n", err); - goto err; - } - tx_fs->check_rule = rule; - - goto out_flow_group; - -err: - macsec_fs_tx_destroy(macsec_fs); -out_flow_group: - kvfree(flow_group_in); -out_spec: - kvfree(spec); - return err; -} - -static int macsec_fs_tx_ft_get(struct mlx5e_macsec_fs *macsec_fs) -{ - struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs; - struct mlx5e_macsec_tables *tx_tables; - int err = 0; - - tx_tables = &tx_fs->tables; - if (tx_tables->refcnt) - goto out; - - err = macsec_fs_tx_create(macsec_fs); - if (err) - return err; - -out: - tx_tables->refcnt++; - return err; -} - -static void macsec_fs_tx_ft_put(struct mlx5e_macsec_fs *macsec_fs) -{ - struct mlx5e_macsec_tables *tx_tables = &macsec_fs->tx_fs->tables; - - if (--tx_tables->refcnt) - return; - - macsec_fs_tx_destroy(macsec_fs); -} - -static int macsec_fs_tx_setup_fte(struct mlx5e_macsec_fs *macsec_fs, - struct mlx5_flow_spec *spec, - struct mlx5_flow_act *flow_act, - u32 macsec_obj_id, - u32 *fs_id) -{ - struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs; - int err = 0; - u32 id; - - err = ida_alloc_range(&tx_fs->tx_halloc, 1, - MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES, - GFP_KERNEL); - if (err < 0) - return err; - - id = err; - spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; - - /* Metadata match */ - MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_a, - MLX5_ETH_WQE_FT_META_MACSEC_MASK); - MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_a, - MLX5_ETH_WQE_FT_META_MACSEC | id << 2); - - *fs_id = id; - flow_act->crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC; - flow_act->crypto.obj_id = macsec_obj_id; - - mlx5_core_dbg(macsec_fs->mdev, "Tx fte: macsec obj_id %u, fs_id %u\n", macsec_obj_id, id); - return 0; -} - -static void macsec_fs_tx_create_sectag_header(const struct macsec_context *ctx, - char *reformatbf, - size_t *reformat_size) -{ - const struct macsec_secy *secy = ctx->secy; - bool sci_present = macsec_send_sci(secy); - struct mlx5_sectag_header sectag = {}; - const struct macsec_tx_sc *tx_sc; - - tx_sc = &secy->tx_sc; - sectag.ethertype = htons(ETH_P_MACSEC); - - if (sci_present) { - sectag.tci_an |= MACSEC_TCI_SC; - memcpy(§ag.sci, &secy->sci, - sizeof(sectag.sci)); - } else { - if (tx_sc->end_station) - sectag.tci_an |= MACSEC_TCI_ES; - if (tx_sc->scb) - sectag.tci_an |= MACSEC_TCI_SCB; - } - - /* With GCM, C/E clear for !encrypt, both set for encrypt */ - if (tx_sc->encrypt) - sectag.tci_an |= MACSEC_TCI_CONFID; - else if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN) - sectag.tci_an |= MACSEC_TCI_C; - - sectag.tci_an |= tx_sc->encoding_sa; - - *reformat_size = MLX5_MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0); - - memcpy(reformatbf, §ag, *reformat_size); -} - -static void macsec_fs_tx_del_rule(struct mlx5e_macsec_fs *macsec_fs, - struct mlx5e_macsec_tx_rule *tx_rule) -{ - if (tx_rule->rule) { - mlx5_del_flow_rules(tx_rule->rule); - tx_rule->rule = NULL; - } - - if (tx_rule->pkt_reformat) { - mlx5_packet_reformat_dealloc(macsec_fs->mdev, tx_rule->pkt_reformat); - tx_rule->pkt_reformat = NULL; - } - - if (tx_rule->fs_id) { - ida_free(&macsec_fs->tx_fs->tx_halloc, tx_rule->fs_id); - tx_rule->fs_id = 0; - } - - kfree(tx_rule); - - macsec_fs_tx_ft_put(macsec_fs); -} - -#define MLX5_REFORMAT_PARAM_ADD_MACSEC_OFFSET_4_BYTES 1 - -static union mlx5e_macsec_rule * -macsec_fs_tx_add_rule(struct mlx5e_macsec_fs *macsec_fs, - const struct macsec_context *macsec_ctx, - struct mlx5_macsec_rule_attrs *attrs, - u32 *sa_fs_id) -{ - char reformatbf[MLX5_MACSEC_TAG_LEN + MACSEC_SCI_LEN]; - struct mlx5_pkt_reformat_params reformat_params = {}; - struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs; - struct net_device *netdev = macsec_fs->netdev; - union mlx5e_macsec_rule *macsec_rule = NULL; - struct mlx5_flow_destination dest = {}; - struct mlx5e_macsec_tables *tx_tables; - struct mlx5e_macsec_tx_rule *tx_rule; - struct mlx5_flow_act flow_act = {}; - struct mlx5_flow_handle *rule; - struct mlx5_flow_spec *spec; - size_t reformat_size; - int err = 0; - u32 fs_id; - - tx_tables = &tx_fs->tables; - - spec = kvzalloc(sizeof(*spec), GFP_KERNEL); - if (!spec) - return NULL; - - err = macsec_fs_tx_ft_get(macsec_fs); - if (err) - goto out_spec; - - macsec_rule = kzalloc(sizeof(*macsec_rule), GFP_KERNEL); - if (!macsec_rule) { - macsec_fs_tx_ft_put(macsec_fs); - goto out_spec; - } - - tx_rule = &macsec_rule->tx_rule; - - /* Tx crypto table crypto rule */ - macsec_fs_tx_create_sectag_header(macsec_ctx, reformatbf, &reformat_size); - - reformat_params.type = MLX5_REFORMAT_TYPE_ADD_MACSEC; - reformat_params.size = reformat_size; - reformat_params.data = reformatbf; - - if (is_vlan_dev(macsec_ctx->netdev)) - reformat_params.param_0 = MLX5_REFORMAT_PARAM_ADD_MACSEC_OFFSET_4_BYTES; - - flow_act.pkt_reformat = mlx5_packet_reformat_alloc(macsec_fs->mdev, - &reformat_params, - MLX5_FLOW_NAMESPACE_EGRESS_MACSEC); - if (IS_ERR(flow_act.pkt_reformat)) { - err = PTR_ERR(flow_act.pkt_reformat); - netdev_err(netdev, "Failed to allocate MACsec Tx reformat context err=%d\n", err); - goto err; - } - tx_rule->pkt_reformat = flow_act.pkt_reformat; - - err = macsec_fs_tx_setup_fte(macsec_fs, spec, &flow_act, attrs->macsec_obj_id, &fs_id); - if (err) { - netdev_err(netdev, - "Failed to add packet reformat for MACsec TX crypto rule, err=%d\n", - err); - goto err; - } - - tx_rule->fs_id = fs_id; - *sa_fs_id = fs_id; - - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | - MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT | - MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; - dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - dest.ft = tx_tables->ft_check; - rule = mlx5_add_flow_rules(tx_tables->ft_crypto.t, spec, &flow_act, &dest, 1); - if (IS_ERR(rule)) { - err = PTR_ERR(rule); - netdev_err(netdev, "Failed to add MACsec TX crypto rule, err=%d\n", err); - goto err; - } - tx_rule->rule = rule; - - goto out_spec; - -err: - macsec_fs_tx_del_rule(macsec_fs, tx_rule); - macsec_rule = NULL; -out_spec: - kvfree(spec); - - return macsec_rule; -} - -static void macsec_fs_tx_cleanup(struct mlx5e_macsec_fs *macsec_fs) -{ - struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs; - struct mlx5_core_dev *mdev = macsec_fs->mdev; - struct mlx5e_macsec_tables *tx_tables; - - if (!tx_fs) - return; - - tx_tables = &tx_fs->tables; - if (tx_tables->refcnt) { - netdev_err(macsec_fs->netdev, - "Can't destroy MACsec offload tx_fs, refcnt(%u) isn't 0\n", - tx_tables->refcnt); - return; - } - - ida_destroy(&tx_fs->tx_halloc); - - if (tx_tables->check_miss_rule_counter) { - mlx5_fc_destroy(mdev, tx_tables->check_miss_rule_counter); - tx_tables->check_miss_rule_counter = NULL; - } - - if (tx_tables->check_rule_counter) { - mlx5_fc_destroy(mdev, tx_tables->check_rule_counter); - tx_tables->check_rule_counter = NULL; - } - - kfree(tx_fs); - macsec_fs->tx_fs = NULL; -} - -static int macsec_fs_tx_init(struct mlx5e_macsec_fs *macsec_fs) -{ - struct net_device *netdev = macsec_fs->netdev; - struct mlx5_core_dev *mdev = macsec_fs->mdev; - struct mlx5e_macsec_tables *tx_tables; - struct mlx5e_macsec_tx *tx_fs; - struct mlx5_fc *flow_counter; - int err; - - tx_fs = kzalloc(sizeof(*tx_fs), GFP_KERNEL); - if (!tx_fs) - return -ENOMEM; - - tx_tables = &tx_fs->tables; - - flow_counter = mlx5_fc_create(mdev, false); - if (IS_ERR(flow_counter)) { - err = PTR_ERR(flow_counter); - netdev_err(netdev, - "Failed to create MACsec Tx encrypt flow counter, err(%d)\n", - err); - goto err_encrypt_counter; - } - tx_tables->check_rule_counter = flow_counter; - - flow_counter = mlx5_fc_create(mdev, false); - if (IS_ERR(flow_counter)) { - err = PTR_ERR(flow_counter); - netdev_err(netdev, - "Failed to create MACsec Tx drop flow counter, err(%d)\n", - err); - goto err_drop_counter; - } - tx_tables->check_miss_rule_counter = flow_counter; - - ida_init(&tx_fs->tx_halloc); - - macsec_fs->tx_fs = tx_fs; - - return 0; - -err_drop_counter: - mlx5_fc_destroy(mdev, tx_tables->check_rule_counter); - tx_tables->check_rule_counter = NULL; - -err_encrypt_counter: - kfree(tx_fs); - macsec_fs->tx_fs = NULL; - - return err; -} - -static void macsec_fs_rx_destroy(struct mlx5e_macsec_fs *macsec_fs) -{ - struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs; - struct mlx5e_macsec_tables *rx_tables; - int i; - - /* Rx check table */ - for (i = 1; i >= 0; --i) { - if (rx_fs->check_rule[i]) { - mlx5_del_flow_rules(rx_fs->check_rule[i]); - rx_fs->check_rule[i] = NULL; - } - - if (rx_fs->check_rule_pkt_reformat[i]) { - mlx5_packet_reformat_dealloc(macsec_fs->mdev, - rx_fs->check_rule_pkt_reformat[i]); - rx_fs->check_rule_pkt_reformat[i] = NULL; - } - } - - rx_tables = &rx_fs->tables; - - if (rx_tables->check_miss_rule) { - mlx5_del_flow_rules(rx_tables->check_miss_rule); - rx_tables->check_miss_rule = NULL; - } - - if (rx_tables->ft_check_group) { - mlx5_destroy_flow_group(rx_tables->ft_check_group); - rx_tables->ft_check_group = NULL; - } - - if (rx_tables->ft_check) { - mlx5_destroy_flow_table(rx_tables->ft_check); - rx_tables->ft_check = NULL; - } - - /* Rx crypto table */ - if (rx_tables->crypto_miss_rule) { - mlx5_del_flow_rules(rx_tables->crypto_miss_rule); - rx_tables->crypto_miss_rule = NULL; - } - - mlx5e_destroy_flow_table(&rx_tables->ft_crypto); -} - -static int macsec_fs_rx_create_crypto_table_groups(struct mlx5e_flow_table *ft) -{ - int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); - int mclen = MLX5_ST_SZ_BYTES(fte_match_param); - int ix = 0; - u32 *in; - int err; - u8 *mc; - - ft->g = kcalloc(RX_CRYPTO_TABLE_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL); - if (!ft->g) - return -ENOMEM; - - in = kvzalloc(inlen, GFP_KERNEL); - if (!in) { - kfree(ft->g); - return -ENOMEM; - } - - mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); - - /* Flow group for SA rule with SCI */ - MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS | - MLX5_MATCH_MISC_PARAMETERS_5); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); - - MLX5_SET(fte_match_param, mc, misc_parameters_5.macsec_tag_0, - MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK << - MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET); - MLX5_SET_TO_ONES(fte_match_param, mc, misc_parameters_5.macsec_tag_2); - MLX5_SET_TO_ONES(fte_match_param, mc, misc_parameters_5.macsec_tag_3); - - MLX5_SET_CFG(in, start_flow_index, ix); - ix += RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE; - MLX5_SET_CFG(in, end_flow_index, ix - 1); - ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); - if (IS_ERR(ft->g[ft->num_groups])) - goto err; - ft->num_groups++; - - /* Flow group for SA rule without SCI */ - memset(in, 0, inlen); - memset(mc, 0, mclen); - MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS | - MLX5_MATCH_MISC_PARAMETERS_5); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.smac_47_16); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.smac_15_0); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); - - MLX5_SET(fte_match_param, mc, misc_parameters_5.macsec_tag_0, - MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET); - - MLX5_SET_CFG(in, start_flow_index, ix); - ix += RX_CRYPTO_TABLE_SA_RULE_WITHOUT_SCI_GROUP_SIZE; - MLX5_SET_CFG(in, end_flow_index, ix - 1); - ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); - if (IS_ERR(ft->g[ft->num_groups])) - goto err; - ft->num_groups++; - - /* Flow Group for l2 traps */ - memset(in, 0, inlen); - memset(mc, 0, mclen); - MLX5_SET_CFG(in, start_flow_index, ix); - ix += CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE; - MLX5_SET_CFG(in, end_flow_index, ix - 1); - ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); - if (IS_ERR(ft->g[ft->num_groups])) - goto err; - ft->num_groups++; - - kvfree(in); - return 0; - -err: - err = PTR_ERR(ft->g[ft->num_groups]); - ft->g[ft->num_groups] = NULL; - kvfree(in); - - return err; -} - -static int macsec_fs_rx_create_check_decap_rule(struct mlx5e_macsec_fs *macsec_fs, - struct mlx5_flow_destination *dest, - struct mlx5_flow_act *flow_act, - struct mlx5_flow_spec *spec, - int reformat_param_size) -{ - int rule_index = (reformat_param_size == MLX5_SECTAG_HEADER_SIZE_WITH_SCI) ? 0 : 1; - u8 mlx5_reformat_buf[MLX5_SECTAG_HEADER_SIZE_WITH_SCI]; - struct mlx5_pkt_reformat_params reformat_params = {}; - struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs; - struct net_device *netdev = macsec_fs->netdev; - struct mlx5e_macsec_tables *rx_tables; - struct mlx5_flow_handle *rule; - int err = 0; - - rx_tables = &rx_fs->tables; - - /* Rx check table decap 16B rule */ - memset(dest, 0, sizeof(*dest)); - memset(flow_act, 0, sizeof(*flow_act)); - memset(spec, 0, sizeof(*spec)); - - reformat_params.type = MLX5_REFORMAT_TYPE_DEL_MACSEC; - reformat_params.size = reformat_param_size; - reformat_params.data = mlx5_reformat_buf; - flow_act->pkt_reformat = mlx5_packet_reformat_alloc(macsec_fs->mdev, - &reformat_params, - MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC); - if (IS_ERR(flow_act->pkt_reformat)) { - err = PTR_ERR(flow_act->pkt_reformat); - netdev_err(netdev, "Failed to allocate MACsec Rx reformat context err=%d\n", err); - return err; - } - rx_fs->check_rule_pkt_reformat[rule_index] = flow_act->pkt_reformat; - - spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; - /* MACsec syndrome match */ - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.macsec_syndrome); - MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.macsec_syndrome, 0); - /* ASO return reg syndrome match */ - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4); - MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 0); - - spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5; - /* Sectag TCI SC present bit*/ - MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_5.macsec_tag_0, - MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET); - - if (reformat_param_size == MLX5_SECTAG_HEADER_SIZE_WITH_SCI) - MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_0, - MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT << - MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET); - - flow_act->flags = FLOW_ACT_NO_APPEND; - flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO | - MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT | - MLX5_FLOW_CONTEXT_ACTION_COUNT; - dest->type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest->counter_id = mlx5_fc_id(rx_tables->check_rule_counter); - rule = mlx5_add_flow_rules(rx_tables->ft_check, spec, flow_act, dest, 1); - if (IS_ERR(rule)) { - err = PTR_ERR(rule); - netdev_err(netdev, "Failed to add MACsec Rx check rule, err=%d\n", err); - return err; - } - - rx_fs->check_rule[rule_index] = rule; - - return 0; -} - -static int macsec_fs_rx_create(struct mlx5e_macsec_fs *macsec_fs) -{ - int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); - struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs; - struct net_device *netdev = macsec_fs->netdev; - struct mlx5_flow_table_attr ft_attr = {}; - struct mlx5_flow_destination dest = {}; - struct mlx5e_macsec_tables *rx_tables; - struct mlx5e_flow_table *ft_crypto; - struct mlx5_flow_table *flow_table; - struct mlx5_flow_group *flow_group; - struct mlx5_flow_act flow_act = {}; - struct mlx5_flow_namespace *ns; - struct mlx5_flow_handle *rule; - struct mlx5_flow_spec *spec; - u32 *flow_group_in; - int err; - - ns = mlx5_get_flow_namespace(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC); - if (!ns) - return -ENOMEM; - - spec = kvzalloc(sizeof(*spec), GFP_KERNEL); - if (!spec) - return -ENOMEM; - - flow_group_in = kvzalloc(inlen, GFP_KERNEL); - if (!flow_group_in) { - err = -ENOMEM; - goto free_spec; - } - - rx_tables = &rx_fs->tables; - ft_crypto = &rx_tables->ft_crypto; - - /* Rx crypto table */ - ft_attr.level = RX_CRYPTO_TABLE_LEVEL; - ft_attr.max_fte = CRYPTO_NUM_MAXSEC_FTE; - - flow_table = mlx5_create_flow_table(ns, &ft_attr); - if (IS_ERR(flow_table)) { - err = PTR_ERR(flow_table); - netdev_err(netdev, "Failed to create MACsec Rx crypto table err(%d)\n", err); - goto out_flow_group; - } - ft_crypto->t = flow_table; - - /* Rx crypto table groups */ - err = macsec_fs_rx_create_crypto_table_groups(ft_crypto); - if (err) { - netdev_err(netdev, - "Failed to create default flow group for MACsec Tx crypto table err(%d)\n", - err); - goto err; - } - - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; - rule = mlx5_add_flow_rules(ft_crypto->t, NULL, &flow_act, NULL, 0); - if (IS_ERR(rule)) { - err = PTR_ERR(rule); - netdev_err(netdev, - "Failed to add MACsec Rx crypto table default miss rule %d\n", - err); - goto err; - } - rx_tables->crypto_miss_rule = rule; - - /* Rx check table */ - flow_table = macsec_fs_auto_group_table_create(ns, - MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT, - RX_CHECK_TABLE_LEVEL, - RX_CHECK_TABLE_NUM_FTE); - if (IS_ERR(flow_table)) { - err = PTR_ERR(flow_table); - netdev_err(netdev, "fail to create MACsec RX check table, err(%d)\n", err); - goto err; - } - rx_tables->ft_check = flow_table; - - /* Rx check table Default miss group/rule */ - MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_table->max_fte - 1); - MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_table->max_fte - 1); - flow_group = mlx5_create_flow_group(rx_tables->ft_check, flow_group_in); - if (IS_ERR(flow_group)) { - err = PTR_ERR(flow_group); - netdev_err(netdev, - "Failed to create default flow group for MACsec Rx check table err(%d)\n", - err); - goto err; - } - rx_tables->ft_check_group = flow_group; - - /* Rx check table default drop rule */ - memset(&flow_act, 0, sizeof(flow_act)); - - dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest.counter_id = mlx5_fc_id(rx_tables->check_miss_rule_counter); - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT; - rule = mlx5_add_flow_rules(rx_tables->ft_check, NULL, &flow_act, &dest, 1); - if (IS_ERR(rule)) { - err = PTR_ERR(rule); - netdev_err(netdev, "Failed to added MACsec Rx check drop rule, err(%d)\n", err); - goto err; - } - rx_tables->check_miss_rule = rule; - - /* Rx check table decap rules */ - err = macsec_fs_rx_create_check_decap_rule(macsec_fs, &dest, &flow_act, spec, - MLX5_SECTAG_HEADER_SIZE_WITH_SCI); - if (err) - goto err; - - err = macsec_fs_rx_create_check_decap_rule(macsec_fs, &dest, &flow_act, spec, - MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI); - if (err) - goto err; - - goto out_flow_group; - -err: - macsec_fs_rx_destroy(macsec_fs); -out_flow_group: - kvfree(flow_group_in); -free_spec: - kvfree(spec); - return err; -} - -static int macsec_fs_rx_ft_get(struct mlx5e_macsec_fs *macsec_fs) -{ - struct mlx5e_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables; - int err = 0; - - if (rx_tables->refcnt) - goto out; - - err = macsec_fs_rx_create(macsec_fs); - if (err) - return err; - -out: - rx_tables->refcnt++; - return err; -} - -static void macsec_fs_rx_ft_put(struct mlx5e_macsec_fs *macsec_fs) -{ - struct mlx5e_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables; - - if (--rx_tables->refcnt) - return; - - macsec_fs_rx_destroy(macsec_fs); -} - -static void macsec_fs_rx_del_rule(struct mlx5e_macsec_fs *macsec_fs, - struct mlx5e_macsec_rx_rule *rx_rule) -{ - int i; - - for (i = 0; i < RX_NUM_OF_RULES_PER_SA; ++i) { - if (rx_rule->rule[i]) { - mlx5_del_flow_rules(rx_rule->rule[i]); - rx_rule->rule[i] = NULL; - } - } - - if (rx_rule->meta_modhdr) { - mlx5_modify_header_dealloc(macsec_fs->mdev, rx_rule->meta_modhdr); - rx_rule->meta_modhdr = NULL; - } - - kfree(rx_rule); - - macsec_fs_rx_ft_put(macsec_fs); -} - -static void macsec_fs_rx_setup_fte(struct mlx5_flow_spec *spec, - struct mlx5_flow_act *flow_act, - struct mlx5_macsec_rule_attrs *attrs, - bool sci_present) -{ - u8 tci_an = (sci_present << MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET) | attrs->assoc_num; - struct mlx5_flow_act_crypto_params *crypto_params = &flow_act->crypto; - __be32 *sci_p = (__be32 *)(&attrs->sci); - - spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - - /* MACsec ethertype */ - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype); - MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, ETH_P_MACSEC); - - spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5; - - /* Sectag AN + TCI SC present bit*/ - MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_5.macsec_tag_0, - MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET); - MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_0, - tci_an << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET); - - if (sci_present) { - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, - misc_parameters_5.macsec_tag_2); - MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_2, - be32_to_cpu(sci_p[0])); - - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, - misc_parameters_5.macsec_tag_3); - MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_3, - be32_to_cpu(sci_p[1])); - } else { - /* When SCI isn't present in the Sectag, need to match the source */ - /* MAC address only if the SCI contains the default MACsec PORT */ - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16); - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0); - memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers.smac_47_16), - sci_p, ETH_ALEN); - } - - crypto_params->type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC; - crypto_params->obj_id = attrs->macsec_obj_id; -} - -static union mlx5e_macsec_rule * -macsec_fs_rx_add_rule(struct mlx5e_macsec_fs *macsec_fs, - struct mlx5_macsec_rule_attrs *attrs, - u32 fs_id) -{ - u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; - struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs; - struct net_device *netdev = macsec_fs->netdev; - union mlx5e_macsec_rule *macsec_rule = NULL; - struct mlx5_modify_hdr *modify_hdr = NULL; - struct mlx5_flow_destination dest = {}; - struct mlx5e_macsec_tables *rx_tables; - struct mlx5e_macsec_rx_rule *rx_rule; - struct mlx5_flow_act flow_act = {}; - struct mlx5e_flow_table *ft_crypto; - struct mlx5_flow_handle *rule; - struct mlx5_flow_spec *spec; - int err = 0; - - spec = kvzalloc(sizeof(*spec), GFP_KERNEL); - if (!spec) - return NULL; - - err = macsec_fs_rx_ft_get(macsec_fs); - if (err) - goto out_spec; - - macsec_rule = kzalloc(sizeof(*macsec_rule), GFP_KERNEL); - if (!macsec_rule) { - macsec_fs_rx_ft_put(macsec_fs); - goto out_spec; - } - - rx_rule = &macsec_rule->rx_rule; - rx_tables = &rx_fs->tables; - ft_crypto = &rx_tables->ft_crypto; - - /* Set bit[31 - 30] macsec marker - 0x01 */ - /* Set bit[15-0] fs id */ - MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET); - MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B); - MLX5_SET(set_action_in, action, data, MLX5_MACSEC_RX_METADAT_HANDLE(fs_id) | BIT(30)); - MLX5_SET(set_action_in, action, offset, 0); - MLX5_SET(set_action_in, action, length, 32); - - modify_hdr = mlx5_modify_header_alloc(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC, - 1, action); - if (IS_ERR(modify_hdr)) { - err = PTR_ERR(modify_hdr); - netdev_err(netdev, "fail to alloc MACsec set modify_header_id err=%d\n", err); - modify_hdr = NULL; - goto err; - } - rx_rule->meta_modhdr = modify_hdr; - - /* Rx crypto table with SCI rule */ - macsec_fs_rx_setup_fte(spec, &flow_act, attrs, true); - - flow_act.modify_hdr = modify_hdr; - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | - MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT | - MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; - - dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - dest.ft = rx_tables->ft_check; - rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, &dest, 1); - if (IS_ERR(rule)) { - err = PTR_ERR(rule); - netdev_err(netdev, - "Failed to add SA with SCI rule to Rx crypto rule, err=%d\n", - err); - goto err; - } - rx_rule->rule[0] = rule; - - /* Rx crypto table without SCI rule */ - if ((cpu_to_be64((__force u64)attrs->sci) & 0xFFFF) == ntohs(MACSEC_PORT_ES)) { - memset(spec, 0, sizeof(struct mlx5_flow_spec)); - memset(&dest, 0, sizeof(struct mlx5_flow_destination)); - memset(&flow_act, 0, sizeof(flow_act)); - - macsec_fs_rx_setup_fte(spec, &flow_act, attrs, false); - - flow_act.modify_hdr = modify_hdr; - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | - MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT | - MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; - - dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - dest.ft = rx_tables->ft_check; - rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, &dest, 1); - if (IS_ERR(rule)) { - err = PTR_ERR(rule); - netdev_err(netdev, - "Failed to add SA without SCI rule to Rx crypto rule, err=%d\n", - err); - goto err; - } - rx_rule->rule[1] = rule; - } - - kvfree(spec); - return macsec_rule; - -err: - macsec_fs_rx_del_rule(macsec_fs, rx_rule); - macsec_rule = NULL; -out_spec: - kvfree(spec); - return macsec_rule; -} - -static int macsec_fs_rx_init(struct mlx5e_macsec_fs *macsec_fs) -{ - struct net_device *netdev = macsec_fs->netdev; - struct mlx5_core_dev *mdev = macsec_fs->mdev; - struct mlx5e_macsec_tables *rx_tables; - struct mlx5e_macsec_rx *rx_fs; - struct mlx5_fc *flow_counter; - int err; - - rx_fs = kzalloc(sizeof(*rx_fs), GFP_KERNEL); - if (!rx_fs) - return -ENOMEM; - - flow_counter = mlx5_fc_create(mdev, false); - if (IS_ERR(flow_counter)) { - err = PTR_ERR(flow_counter); - netdev_err(netdev, - "Failed to create MACsec Rx encrypt flow counter, err(%d)\n", - err); - goto err_encrypt_counter; - } - - rx_tables = &rx_fs->tables; - rx_tables->check_rule_counter = flow_counter; - - flow_counter = mlx5_fc_create(mdev, false); - if (IS_ERR(flow_counter)) { - err = PTR_ERR(flow_counter); - netdev_err(netdev, - "Failed to create MACsec Rx drop flow counter, err(%d)\n", - err); - goto err_drop_counter; - } - rx_tables->check_miss_rule_counter = flow_counter; - - macsec_fs->rx_fs = rx_fs; - - return 0; - -err_drop_counter: - mlx5_fc_destroy(mdev, rx_tables->check_rule_counter); - rx_tables->check_rule_counter = NULL; - -err_encrypt_counter: - kfree(rx_fs); - macsec_fs->rx_fs = NULL; - - return err; -} - -static void macsec_fs_rx_cleanup(struct mlx5e_macsec_fs *macsec_fs) -{ - struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs; - struct mlx5_core_dev *mdev = macsec_fs->mdev; - struct mlx5e_macsec_tables *rx_tables; - - if (!rx_fs) - return; - - rx_tables = &rx_fs->tables; - - if (rx_tables->refcnt) { - netdev_err(macsec_fs->netdev, - "Can't destroy MACsec offload rx_fs, refcnt(%u) isn't 0\n", - rx_tables->refcnt); - return; - } - - if (rx_tables->check_miss_rule_counter) { - mlx5_fc_destroy(mdev, rx_tables->check_miss_rule_counter); - rx_tables->check_miss_rule_counter = NULL; - } - - if (rx_tables->check_rule_counter) { - mlx5_fc_destroy(mdev, rx_tables->check_rule_counter); - rx_tables->check_rule_counter = NULL; - } - - kfree(rx_fs); - macsec_fs->rx_fs = NULL; -} - -void mlx5e_macsec_fs_get_stats_fill(struct mlx5e_macsec_fs *macsec_fs, void *macsec_stats) -{ - struct mlx5e_macsec_stats *stats = (struct mlx5e_macsec_stats *)macsec_stats; - struct mlx5e_macsec_tables *tx_tables = &macsec_fs->tx_fs->tables; - struct mlx5e_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables; - struct mlx5_core_dev *mdev = macsec_fs->mdev; - - if (tx_tables->check_rule_counter) - mlx5_fc_query(mdev, tx_tables->check_rule_counter, - &stats->macsec_tx_pkts, &stats->macsec_tx_bytes); - - if (tx_tables->check_miss_rule_counter) - mlx5_fc_query(mdev, tx_tables->check_miss_rule_counter, - &stats->macsec_tx_pkts_drop, &stats->macsec_tx_bytes_drop); - - if (rx_tables->check_rule_counter) - mlx5_fc_query(mdev, rx_tables->check_rule_counter, - &stats->macsec_rx_pkts, &stats->macsec_rx_bytes); - - if (rx_tables->check_miss_rule_counter) - mlx5_fc_query(mdev, rx_tables->check_miss_rule_counter, - &stats->macsec_rx_pkts_drop, &stats->macsec_rx_bytes_drop); -} - -union mlx5e_macsec_rule * -mlx5e_macsec_fs_add_rule(struct mlx5e_macsec_fs *macsec_fs, - const struct macsec_context *macsec_ctx, - struct mlx5_macsec_rule_attrs *attrs, - u32 *sa_fs_id) -{ - return (attrs->action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT) ? - macsec_fs_tx_add_rule(macsec_fs, macsec_ctx, attrs, sa_fs_id) : - macsec_fs_rx_add_rule(macsec_fs, attrs, *sa_fs_id); -} - -void mlx5e_macsec_fs_del_rule(struct mlx5e_macsec_fs *macsec_fs, - union mlx5e_macsec_rule *macsec_rule, - int action) -{ - (action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT) ? - macsec_fs_tx_del_rule(macsec_fs, &macsec_rule->tx_rule) : - macsec_fs_rx_del_rule(macsec_fs, &macsec_rule->rx_rule); -} - -void mlx5e_macsec_fs_cleanup(struct mlx5e_macsec_fs *macsec_fs) -{ - macsec_fs_rx_cleanup(macsec_fs); - macsec_fs_tx_cleanup(macsec_fs); - kfree(macsec_fs); -} - -struct mlx5e_macsec_fs * -mlx5e_macsec_fs_init(struct mlx5_core_dev *mdev, - struct net_device *netdev) -{ - struct mlx5e_macsec_fs *macsec_fs; - int err; - - macsec_fs = kzalloc(sizeof(*macsec_fs), GFP_KERNEL); - if (!macsec_fs) - return NULL; - - macsec_fs->mdev = mdev; - macsec_fs->netdev = netdev; - - err = macsec_fs_tx_init(macsec_fs); - if (err) { - netdev_err(netdev, "MACsec offload: Failed to init tx_fs, err=%d\n", err); - goto err; - } - - err = macsec_fs_rx_init(macsec_fs); - if (err) { - netdev_err(netdev, "MACsec offload: Failed to init tx_fs, err=%d\n", err); - goto tx_cleanup; - } - - return macsec_fs; - -tx_cleanup: - macsec_fs_tx_cleanup(macsec_fs); -err: - kfree(macsec_fs); - return NULL; -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.h deleted file mode 100644 index b429648d4ee7..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.h +++ /dev/null @@ -1,47 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ -/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ - -#ifndef __MLX5_MACSEC_STEERING_H__ -#define __MLX5_MACSEC_STEERING_H__ - -#ifdef CONFIG_MLX5_EN_MACSEC - -#include "en_accel/macsec.h" - -#define MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES 16 - -struct mlx5e_macsec_fs; -union mlx5e_macsec_rule; - -struct mlx5_macsec_rule_attrs { - sci_t sci; - u32 macsec_obj_id; - u8 assoc_num; - int action; -}; - -enum mlx5_macsec_action { - MLX5_ACCEL_MACSEC_ACTION_ENCRYPT, - MLX5_ACCEL_MACSEC_ACTION_DECRYPT, -}; - -void mlx5e_macsec_fs_cleanup(struct mlx5e_macsec_fs *macsec_fs); - -struct mlx5e_macsec_fs * -mlx5e_macsec_fs_init(struct mlx5_core_dev *mdev, struct net_device *netdev); - -union mlx5e_macsec_rule * -mlx5e_macsec_fs_add_rule(struct mlx5e_macsec_fs *macsec_fs, - const struct macsec_context *ctx, - struct mlx5_macsec_rule_attrs *attrs, - u32 *sa_fs_id); - -void mlx5e_macsec_fs_del_rule(struct mlx5e_macsec_fs *macsec_fs, - union mlx5e_macsec_rule *macsec_rule, - int action); - -void mlx5e_macsec_fs_get_stats_fill(struct mlx5e_macsec_fs *macsec_fs, void *macsec_stats); - -#endif - -#endif /* __MLX5_MACSEC_STEERING_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c index e50a2e3f3d18..4559ee16a11a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c @@ -8,14 +8,14 @@ #include "en_accel/macsec.h" static const struct counter_desc mlx5e_macsec_hw_stats_desc[] = { - { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_rx_pkts) }, - { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_rx_bytes) }, - { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_rx_pkts_drop) }, - { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_rx_bytes_drop) }, - { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_tx_pkts) }, - { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_tx_bytes) }, - { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_tx_pkts_drop) }, - { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_tx_bytes_drop) }, + { MLX5E_DECLARE_STAT(struct mlx5_macsec_stats, macsec_rx_pkts) }, + { MLX5E_DECLARE_STAT(struct mlx5_macsec_stats, macsec_rx_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5_macsec_stats, macsec_rx_pkts_drop) }, + { MLX5E_DECLARE_STAT(struct mlx5_macsec_stats, macsec_rx_bytes_drop) }, + { MLX5E_DECLARE_STAT(struct mlx5_macsec_stats, macsec_tx_pkts) }, + { MLX5E_DECLARE_STAT(struct mlx5_macsec_stats, macsec_tx_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5_macsec_stats, macsec_tx_pkts_drop) }, + { MLX5E_DECLARE_STAT(struct mlx5_macsec_stats, macsec_tx_bytes_drop) }, }; #define NUM_MACSEC_HW_COUNTERS ARRAY_SIZE(mlx5e_macsec_hw_stats_desc) @@ -52,6 +52,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(macsec_hw) static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(macsec_hw) { + struct mlx5_macsec_fs *macsec_fs; int i; if (!priv->macsec) @@ -60,9 +61,10 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(macsec_hw) if (!mlx5e_is_macsec_device(priv->mdev)) return idx; - mlx5e_macsec_get_stats_fill(priv->macsec, mlx5e_macsec_get_stats(priv->macsec)); + macsec_fs = priv->mdev->macsec_fs; + mlx5_macsec_fs_get_stats_fill(macsec_fs, mlx5_macsec_fs_get_stats(macsec_fs)); for (i = 0; i < NUM_MACSEC_HW_COUNTERS; i++) - data[idx++] = MLX5E_READ_CTR64_CPU(mlx5e_macsec_get_stats(priv->macsec), + data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_macsec_fs_get_stats(macsec_fs), mlx5e_macsec_hw_stats_desc, i); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 348fb140858c..4b96ad657145 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -2502,7 +2502,7 @@ mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = { &MLX5E_STATS_GRP(per_port_buff_congest), &MLX5E_STATS_GRP(ptp), &MLX5E_STATS_GRP(qos), -#ifdef CONFIG_MLX5_EN_MACSEC +#ifdef CONFIG_MLX5_MACSEC &MLX5E_STATS_GRP(macsec_hw), #endif }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index 244cfd470903..a4b925331661 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -975,6 +975,7 @@ static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns, max_actions = MLX5_CAP_ESW_INGRESS_ACL(dev, max_modify_header_actions); table_type = FS_FT_ESW_INGRESS_ACL; break; + case MLX5_FLOW_NAMESPACE_RDMA_TX_MACSEC: case MLX5_FLOW_NAMESPACE_RDMA_TX: max_actions = MLX5_CAP_FLOWTABLE_RDMA_TX(dev, max_modify_header_actions); table_type = FS_FT_RDMA_TX; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index a3228502f866..a13b9c2bd144 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -107,7 +107,7 @@ LEFTOVERS_NUM_PRIOS) #define KERNEL_RX_MACSEC_NUM_PRIOS 1 -#define KERNEL_RX_MACSEC_NUM_LEVELS 2 +#define KERNEL_RX_MACSEC_NUM_LEVELS 3 #define KERNEL_RX_MACSEC_MIN_LEVEL (BY_PASS_MIN_LEVEL + KERNEL_RX_MACSEC_NUM_PRIOS) #define ETHTOOL_PRIO_NUM_LEVELS 1 @@ -224,6 +224,7 @@ static struct init_tree_node egress_root_fs = { enum { RDMA_RX_IPSEC_PRIO, + RDMA_RX_MACSEC_PRIO, RDMA_RX_COUNTERS_PRIO, RDMA_RX_BYPASS_PRIO, RDMA_RX_KERNEL_PRIO, @@ -237,9 +238,13 @@ enum { #define RDMA_RX_KERNEL_MIN_LEVEL (RDMA_RX_BYPASS_MIN_LEVEL + 1) #define RDMA_RX_COUNTERS_MIN_LEVEL (RDMA_RX_KERNEL_MIN_LEVEL + 2) +#define RDMA_RX_MACSEC_NUM_PRIOS 1 +#define RDMA_RX_MACSEC_PRIO_NUM_LEVELS 2 +#define RDMA_RX_MACSEC_MIN_LEVEL (RDMA_RX_COUNTERS_MIN_LEVEL + RDMA_RX_MACSEC_NUM_PRIOS) + static struct init_tree_node rdma_rx_root_fs = { .type = FS_TYPE_NAMESPACE, - .ar_size = 4, + .ar_size = 5, .children = (struct init_tree_node[]) { [RDMA_RX_IPSEC_PRIO] = ADD_PRIO(0, RDMA_RX_IPSEC_MIN_LEVEL, 0, @@ -247,6 +252,12 @@ static struct init_tree_node rdma_rx_root_fs = { ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, ADD_MULTIPLE_PRIO(RDMA_RX_IPSEC_NUM_PRIOS, RDMA_RX_IPSEC_NUM_LEVELS))), + [RDMA_RX_MACSEC_PRIO] = + ADD_PRIO(0, RDMA_RX_MACSEC_MIN_LEVEL, 0, + FS_CHAINING_CAPS, + ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, + ADD_MULTIPLE_PRIO(RDMA_RX_MACSEC_NUM_PRIOS, + RDMA_RX_MACSEC_PRIO_NUM_LEVELS))), [RDMA_RX_COUNTERS_PRIO] = ADD_PRIO(0, RDMA_RX_COUNTERS_MIN_LEVEL, 0, FS_CHAINING_CAPS, @@ -270,6 +281,7 @@ static struct init_tree_node rdma_rx_root_fs = { enum { RDMA_TX_COUNTERS_PRIO, RDMA_TX_IPSEC_PRIO, + RDMA_TX_MACSEC_PRIO, RDMA_TX_BYPASS_PRIO, }; @@ -280,9 +292,13 @@ enum { #define RDMA_TX_IPSEC_PRIO_NUM_LEVELS 1 #define RDMA_TX_IPSEC_MIN_LEVEL (RDMA_TX_COUNTERS_MIN_LEVEL + RDMA_TX_IPSEC_NUM_PRIOS) +#define RDMA_TX_MACSEC_NUM_PRIOS 1 +#define RDMA_TX_MACESC_PRIO_NUM_LEVELS 1 +#define RDMA_TX_MACSEC_MIN_LEVEL (RDMA_TX_COUNTERS_MIN_LEVEL + RDMA_TX_MACSEC_NUM_PRIOS) + static struct init_tree_node rdma_tx_root_fs = { .type = FS_TYPE_NAMESPACE, - .ar_size = 3, + .ar_size = 4, .children = (struct init_tree_node[]) { [RDMA_TX_COUNTERS_PRIO] = ADD_PRIO(0, RDMA_TX_COUNTERS_MIN_LEVEL, 0, @@ -296,7 +312,12 @@ static struct init_tree_node rdma_tx_root_fs = { ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, ADD_MULTIPLE_PRIO(RDMA_TX_IPSEC_NUM_PRIOS, RDMA_TX_IPSEC_PRIO_NUM_LEVELS))), - + [RDMA_TX_MACSEC_PRIO] = + ADD_PRIO(0, RDMA_TX_MACSEC_MIN_LEVEL, 0, + FS_CHAINING_CAPS, + ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, + ADD_MULTIPLE_PRIO(RDMA_TX_MACSEC_NUM_PRIOS, + RDMA_TX_MACESC_PRIO_NUM_LEVELS))), [RDMA_TX_BYPASS_PRIO] = ADD_PRIO(0, RDMA_TX_BYPASS_MIN_LEVEL, 0, FS_CHAINING_CAPS_RDMA_TX, @@ -2466,6 +2487,14 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, root_ns = steering->rdma_tx_root_ns; prio = RDMA_TX_IPSEC_PRIO; break; + case MLX5_FLOW_NAMESPACE_RDMA_RX_MACSEC: + root_ns = steering->rdma_rx_root_ns; + prio = RDMA_RX_MACSEC_PRIO; + break; + case MLX5_FLOW_NAMESPACE_RDMA_TX_MACSEC: + root_ns = steering->rdma_tx_root_ns; + prio = RDMA_TX_MACSEC_PRIO; + break; default: /* Must be NIC RX */ WARN_ON(!is_nic_rx_ns(type)); root_ns = steering->root_ns; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c new file mode 100644 index 000000000000..4a078113e292 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c @@ -0,0 +1,2411 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#include <net/macsec.h> +#include <linux/mlx5/qp.h> +#include <linux/if_vlan.h> +#include <linux/mlx5/fs_helpers.h> +#include <linux/mlx5/macsec.h> +#include "fs_core.h" +#include "lib/macsec_fs.h" +#include "mlx5_core.h" + +/* MACsec TX flow steering */ +#define CRYPTO_NUM_MAXSEC_FTE BIT(15) +#define CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE 1 + +#define TX_CRYPTO_TABLE_LEVEL 0 +#define TX_CRYPTO_TABLE_NUM_GROUPS 3 +#define TX_CRYPTO_TABLE_MKE_GROUP_SIZE 1 +#define TX_CRYPTO_TABLE_SA_GROUP_SIZE \ + (CRYPTO_NUM_MAXSEC_FTE - (TX_CRYPTO_TABLE_MKE_GROUP_SIZE + \ + CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE)) +#define TX_CHECK_TABLE_LEVEL 1 +#define TX_CHECK_TABLE_NUM_FTE 2 +#define RX_CRYPTO_TABLE_LEVEL 0 +#define RX_CHECK_TABLE_LEVEL 1 +#define RX_ROCE_TABLE_LEVEL 2 +#define RX_CHECK_TABLE_NUM_FTE 3 +#define RX_ROCE_TABLE_NUM_FTE 2 +#define RX_CRYPTO_TABLE_NUM_GROUPS 3 +#define RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE \ + ((CRYPTO_NUM_MAXSEC_FTE - CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE) / 2) +#define RX_CRYPTO_TABLE_SA_RULE_WITHOUT_SCI_GROUP_SIZE \ + (CRYPTO_NUM_MAXSEC_FTE - RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE) +#define RX_NUM_OF_RULES_PER_SA 2 + +#define RDMA_RX_ROCE_IP_TABLE_LEVEL 0 +#define RDMA_RX_ROCE_MACSEC_OP_TABLE_LEVEL 1 + +#define MLX5_MACSEC_TAG_LEN 8 /* SecTAG length with ethertype and without the optional SCI */ +#define MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK 0x23 +#define MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET 0x8 +#define MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET 0x5 +#define MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT (0x1 << MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET) +#define MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI 0x8 +#define MLX5_SECTAG_HEADER_SIZE_WITH_SCI (MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI + MACSEC_SCI_LEN) + +/* MACsec RX flow steering */ +#define MLX5_ETH_WQE_FT_META_MACSEC_MASK 0x3E + +/* MACsec fs_id handling for steering */ +#define macsec_fs_set_tx_fs_id(fs_id) (MLX5_ETH_WQE_FT_META_MACSEC | (fs_id) << 2) +#define macsec_fs_set_rx_fs_id(fs_id) ((fs_id) | BIT(30)) + +struct mlx5_sectag_header { + __be16 ethertype; + u8 tci_an; + u8 sl; + u32 pn; + u8 sci[MACSEC_SCI_LEN]; /* optional */ +} __packed; + +struct mlx5_roce_macsec_tx_rule { + u32 fs_id; + u16 gid_idx; + struct list_head entry; + struct mlx5_flow_handle *rule; + struct mlx5_modify_hdr *meta_modhdr; +}; + +struct mlx5_macsec_tx_rule { + struct mlx5_flow_handle *rule; + struct mlx5_pkt_reformat *pkt_reformat; + u32 fs_id; +}; + +struct mlx5_macsec_flow_table { + int num_groups; + struct mlx5_flow_table *t; + struct mlx5_flow_group **g; +}; + +struct mlx5_macsec_tables { + struct mlx5_macsec_flow_table ft_crypto; + struct mlx5_flow_handle *crypto_miss_rule; + + struct mlx5_flow_table *ft_check; + struct mlx5_flow_group *ft_check_group; + struct mlx5_fc *check_miss_rule_counter; + struct mlx5_flow_handle *check_miss_rule; + struct mlx5_fc *check_rule_counter; + + u32 refcnt; +}; + +struct mlx5_fs_id { + u32 id; + refcount_t refcnt; + sci_t sci; + struct rhash_head hash; +}; + +struct mlx5_macsec_device { + struct list_head macsec_devices_list_entry; + void *macdev; + struct xarray tx_id_xa; + struct xarray rx_id_xa; +}; + +struct mlx5_macsec_tx { + struct mlx5_flow_handle *crypto_mke_rule; + struct mlx5_flow_handle *check_rule; + + struct ida tx_halloc; + + struct mlx5_macsec_tables tables; + + struct mlx5_flow_table *ft_rdma_tx; +}; + +struct mlx5_roce_macsec_rx_rule { + u32 fs_id; + u16 gid_idx; + struct mlx5_flow_handle *op; + struct mlx5_flow_handle *ip; + struct list_head entry; +}; + +struct mlx5_macsec_rx_rule { + struct mlx5_flow_handle *rule[RX_NUM_OF_RULES_PER_SA]; + struct mlx5_modify_hdr *meta_modhdr; +}; + +struct mlx5_macsec_miss { + struct mlx5_flow_group *g; + struct mlx5_flow_handle *rule; +}; + +struct mlx5_macsec_rx_roce { + /* Flow table/rules in NIC domain, to check if it's a RoCE packet */ + struct mlx5_flow_group *g; + struct mlx5_flow_table *ft; + struct mlx5_flow_handle *rule; + struct mlx5_modify_hdr *copy_modify_hdr; + struct mlx5_macsec_miss nic_miss; + + /* Flow table/rule in RDMA domain, to check dgid */ + struct mlx5_flow_table *ft_ip_check; + struct mlx5_flow_table *ft_macsec_op_check; + struct mlx5_macsec_miss miss; +}; + +struct mlx5_macsec_rx { + struct mlx5_flow_handle *check_rule[2]; + struct mlx5_pkt_reformat *check_rule_pkt_reformat[2]; + + struct mlx5_macsec_tables tables; + struct mlx5_macsec_rx_roce roce; +}; + +union mlx5_macsec_rule { + struct mlx5_macsec_tx_rule tx_rule; + struct mlx5_macsec_rx_rule rx_rule; +}; + +static const struct rhashtable_params rhash_sci = { + .key_len = sizeof_field(struct mlx5_fs_id, sci), + .key_offset = offsetof(struct mlx5_fs_id, sci), + .head_offset = offsetof(struct mlx5_fs_id, hash), + .automatic_shrinking = true, + .min_size = 1, +}; + +static const struct rhashtable_params rhash_fs_id = { + .key_len = sizeof_field(struct mlx5_fs_id, id), + .key_offset = offsetof(struct mlx5_fs_id, id), + .head_offset = offsetof(struct mlx5_fs_id, hash), + .automatic_shrinking = true, + .min_size = 1, +}; + +struct mlx5_macsec_fs { + struct mlx5_core_dev *mdev; + struct mlx5_macsec_tx *tx_fs; + struct mlx5_macsec_rx *rx_fs; + + /* Stats manage */ + struct mlx5_macsec_stats stats; + + /* Tx sci -> fs id mapping handling */ + struct rhashtable sci_hash; /* sci -> mlx5_fs_id */ + + /* RX fs_id -> mlx5_fs_id mapping handling */ + struct rhashtable fs_id_hash; /* fs_id -> mlx5_fs_id */ + + /* TX & RX fs_id lists per macsec device */ + struct list_head macsec_devices_list; +}; + +static void macsec_fs_destroy_groups(struct mlx5_macsec_flow_table *ft) +{ + int i; + + for (i = ft->num_groups - 1; i >= 0; i--) { + if (!IS_ERR_OR_NULL(ft->g[i])) + mlx5_destroy_flow_group(ft->g[i]); + ft->g[i] = NULL; + } + ft->num_groups = 0; +} + +static void macsec_fs_destroy_flow_table(struct mlx5_macsec_flow_table *ft) +{ + macsec_fs_destroy_groups(ft); + kfree(ft->g); + mlx5_destroy_flow_table(ft->t); + ft->t = NULL; +} + +static void macsec_fs_tx_destroy(struct mlx5_macsec_fs *macsec_fs) +{ + struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs; + struct mlx5_macsec_tables *tx_tables; + + if (mlx5_is_macsec_roce_supported(macsec_fs->mdev)) + mlx5_destroy_flow_table(tx_fs->ft_rdma_tx); + + tx_tables = &tx_fs->tables; + + /* Tx check table */ + if (tx_fs->check_rule) { + mlx5_del_flow_rules(tx_fs->check_rule); + tx_fs->check_rule = NULL; + } + + if (tx_tables->check_miss_rule) { + mlx5_del_flow_rules(tx_tables->check_miss_rule); + tx_tables->check_miss_rule = NULL; + } + + if (tx_tables->ft_check_group) { + mlx5_destroy_flow_group(tx_tables->ft_check_group); + tx_tables->ft_check_group = NULL; + } + + if (tx_tables->ft_check) { + mlx5_destroy_flow_table(tx_tables->ft_check); + tx_tables->ft_check = NULL; + } + + /* Tx crypto table */ + if (tx_fs->crypto_mke_rule) { + mlx5_del_flow_rules(tx_fs->crypto_mke_rule); + tx_fs->crypto_mke_rule = NULL; + } + + if (tx_tables->crypto_miss_rule) { + mlx5_del_flow_rules(tx_tables->crypto_miss_rule); + tx_tables->crypto_miss_rule = NULL; + } + + macsec_fs_destroy_flow_table(&tx_tables->ft_crypto); +} + +static int macsec_fs_tx_create_crypto_table_groups(struct mlx5_macsec_flow_table *ft) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + int mclen = MLX5_ST_SZ_BYTES(fte_match_param); + int ix = 0; + u32 *in; + int err; + u8 *mc; + + ft->g = kcalloc(TX_CRYPTO_TABLE_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL); + if (!ft->g) + return -ENOMEM; + in = kvzalloc(inlen, GFP_KERNEL); + + if (!in) { + kfree(ft->g); + ft->g = NULL; + return -ENOMEM; + } + + mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); + + /* Flow Group for MKE match */ + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); + + MLX5_SET_CFG(in, start_flow_index, ix); + ix += TX_CRYPTO_TABLE_MKE_GROUP_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err; + ft->num_groups++; + + /* Flow Group for SA rules */ + memset(in, 0, inlen); + memset(mc, 0, mclen); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_MISC_PARAMETERS_2); + MLX5_SET(fte_match_param, mc, misc_parameters_2.metadata_reg_a, + MLX5_ETH_WQE_FT_META_MACSEC_MASK); + + MLX5_SET_CFG(in, start_flow_index, ix); + ix += TX_CRYPTO_TABLE_SA_GROUP_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err; + ft->num_groups++; + + /* Flow Group for l2 traps */ + memset(in, 0, inlen); + memset(mc, 0, mclen); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err; + ft->num_groups++; + + kvfree(in); + return 0; + +err: + err = PTR_ERR(ft->g[ft->num_groups]); + ft->g[ft->num_groups] = NULL; + kvfree(in); + + return err; +} + +static struct mlx5_flow_table + *macsec_fs_auto_group_table_create(struct mlx5_flow_namespace *ns, int flags, + int level, int max_fte) +{ + struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_flow_table *fdb = NULL; + + /* reserve entry for the match all miss group and rule */ + ft_attr.autogroup.num_reserved_entries = 1; + ft_attr.autogroup.max_num_groups = 1; + ft_attr.prio = 0; + ft_attr.flags = flags; + ft_attr.level = level; + ft_attr.max_fte = max_fte; + + fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr); + + return fdb; +} + +enum { + RDMA_TX_MACSEC_LEVEL = 0, +}; + +static int macsec_fs_tx_roce_create(struct mlx5_macsec_fs *macsec_fs) +{ + struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs; + struct mlx5_core_dev *mdev = macsec_fs->mdev; + struct mlx5_flow_namespace *ns; + struct mlx5_flow_table *ft; + int err; + + if (!mlx5_is_macsec_roce_supported(mdev)) { + mlx5_core_dbg(mdev, "Failed to init RoCE MACsec, capabilities not supported\n"); + return 0; + } + + ns = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_RDMA_TX_MACSEC); + if (!ns) + return -ENOMEM; + + /* Tx RoCE crypto table */ + ft = macsec_fs_auto_group_table_create(ns, 0, RDMA_TX_MACSEC_LEVEL, CRYPTO_NUM_MAXSEC_FTE); + if (IS_ERR(ft)) { + err = PTR_ERR(ft); + mlx5_core_err(mdev, "Failed to create MACsec RoCE Tx crypto table err(%d)\n", err); + return err; + } + tx_fs->ft_rdma_tx = ft; + + return 0; +} + +static int macsec_fs_tx_create(struct mlx5_macsec_fs *macsec_fs) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs; + struct mlx5_core_dev *mdev = macsec_fs->mdev; + struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_flow_destination dest = {}; + struct mlx5_macsec_tables *tx_tables; + struct mlx5_flow_act flow_act = {}; + struct mlx5_macsec_flow_table *ft_crypto; + struct mlx5_flow_table *flow_table; + struct mlx5_flow_group *flow_group; + struct mlx5_flow_namespace *ns; + struct mlx5_flow_handle *rule; + struct mlx5_flow_spec *spec; + u32 *flow_group_in; + int err; + + ns = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_EGRESS_MACSEC); + if (!ns) + return -ENOMEM; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return -ENOMEM; + + flow_group_in = kvzalloc(inlen, GFP_KERNEL); + if (!flow_group_in) { + err = -ENOMEM; + goto out_spec; + } + + tx_tables = &tx_fs->tables; + ft_crypto = &tx_tables->ft_crypto; + + /* Tx crypto table */ + ft_attr.flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; + ft_attr.level = TX_CRYPTO_TABLE_LEVEL; + ft_attr.max_fte = CRYPTO_NUM_MAXSEC_FTE; + + flow_table = mlx5_create_flow_table(ns, &ft_attr); + if (IS_ERR(flow_table)) { + err = PTR_ERR(flow_table); + mlx5_core_err(mdev, "Failed to create MACsec Tx crypto table err(%d)\n", err); + goto out_flow_group; + } + ft_crypto->t = flow_table; + + /* Tx crypto table groups */ + err = macsec_fs_tx_create_crypto_table_groups(ft_crypto); + if (err) { + mlx5_core_err(mdev, + "Failed to create default flow group for MACsec Tx crypto table err(%d)\n", + err); + goto err; + } + + /* Tx crypto table MKE rule - MKE packets shouldn't be offloaded */ + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, ETH_P_PAE); + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW; + + rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, NULL, 0); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, "Failed to add MACsec TX MKE rule, err=%d\n", err); + goto err; + } + tx_fs->crypto_mke_rule = rule; + + /* Tx crypto table Default miss rule */ + memset(&flow_act, 0, sizeof(flow_act)); + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW; + rule = mlx5_add_flow_rules(ft_crypto->t, NULL, &flow_act, NULL, 0); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, "Failed to add MACsec Tx table default miss rule %d\n", err); + goto err; + } + tx_tables->crypto_miss_rule = rule; + + /* Tx check table */ + flow_table = macsec_fs_auto_group_table_create(ns, 0, TX_CHECK_TABLE_LEVEL, + TX_CHECK_TABLE_NUM_FTE); + if (IS_ERR(flow_table)) { + err = PTR_ERR(flow_table); + mlx5_core_err(mdev, "Fail to create MACsec TX check table, err(%d)\n", err); + goto err; + } + tx_tables->ft_check = flow_table; + + /* Tx check table Default miss group/rule */ + memset(flow_group_in, 0, inlen); + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_table->max_fte - 1); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_table->max_fte - 1); + flow_group = mlx5_create_flow_group(tx_tables->ft_check, flow_group_in); + if (IS_ERR(flow_group)) { + err = PTR_ERR(flow_group); + mlx5_core_err(mdev, + "Failed to create default flow group for MACsec Tx crypto table err(%d)\n", + err); + goto err; + } + tx_tables->ft_check_group = flow_group; + + /* Tx check table default drop rule */ + memset(&dest, 0, sizeof(struct mlx5_flow_destination)); + memset(&flow_act, 0, sizeof(flow_act)); + dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest.counter_id = mlx5_fc_id(tx_tables->check_miss_rule_counter); + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT; + rule = mlx5_add_flow_rules(tx_tables->ft_check, NULL, &flow_act, &dest, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, "Failed to added MACsec tx check drop rule, err(%d)\n", err); + goto err; + } + tx_tables->check_miss_rule = rule; + + /* Tx check table rule */ + memset(spec, 0, sizeof(struct mlx5_flow_spec)); + memset(&dest, 0, sizeof(struct mlx5_flow_destination)); + memset(&flow_act, 0, sizeof(flow_act)); + + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4); + MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 0); + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; + + flow_act.flags = FLOW_ACT_NO_APPEND; + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW | MLX5_FLOW_CONTEXT_ACTION_COUNT; + dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest.counter_id = mlx5_fc_id(tx_tables->check_rule_counter); + rule = mlx5_add_flow_rules(tx_tables->ft_check, spec, &flow_act, &dest, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, "Failed to add MACsec check rule, err=%d\n", err); + goto err; + } + tx_fs->check_rule = rule; + + err = macsec_fs_tx_roce_create(macsec_fs); + if (err) + goto err; + + kvfree(flow_group_in); + kvfree(spec); + return 0; + +err: + macsec_fs_tx_destroy(macsec_fs); +out_flow_group: + kvfree(flow_group_in); +out_spec: + kvfree(spec); + return err; +} + +static int macsec_fs_tx_ft_get(struct mlx5_macsec_fs *macsec_fs) +{ + struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs; + struct mlx5_macsec_tables *tx_tables; + int err = 0; + + tx_tables = &tx_fs->tables; + if (tx_tables->refcnt) + goto out; + + err = macsec_fs_tx_create(macsec_fs); + if (err) + return err; + +out: + tx_tables->refcnt++; + return err; +} + +static void macsec_fs_tx_ft_put(struct mlx5_macsec_fs *macsec_fs) +{ + struct mlx5_macsec_tables *tx_tables = &macsec_fs->tx_fs->tables; + + if (--tx_tables->refcnt) + return; + + macsec_fs_tx_destroy(macsec_fs); +} + +static int macsec_fs_tx_setup_fte(struct mlx5_macsec_fs *macsec_fs, + struct mlx5_flow_spec *spec, + struct mlx5_flow_act *flow_act, + u32 macsec_obj_id, + u32 *fs_id) +{ + struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs; + int err = 0; + u32 id; + + err = ida_alloc_range(&tx_fs->tx_halloc, 1, + MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES, + GFP_KERNEL); + if (err < 0) + return err; + + id = err; + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; + + /* Metadata match */ + MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_a, + MLX5_ETH_WQE_FT_META_MACSEC_MASK); + MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_a, + macsec_fs_set_tx_fs_id(id)); + + *fs_id = id; + flow_act->crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC; + flow_act->crypto.obj_id = macsec_obj_id; + + mlx5_core_dbg(macsec_fs->mdev, "Tx fte: macsec obj_id %u, fs_id %u\n", macsec_obj_id, id); + return 0; +} + +static void macsec_fs_tx_create_sectag_header(const struct macsec_context *ctx, + char *reformatbf, + size_t *reformat_size) +{ + const struct macsec_secy *secy = ctx->secy; + bool sci_present = macsec_send_sci(secy); + struct mlx5_sectag_header sectag = {}; + const struct macsec_tx_sc *tx_sc; + + tx_sc = &secy->tx_sc; + sectag.ethertype = htons(ETH_P_MACSEC); + + if (sci_present) { + sectag.tci_an |= MACSEC_TCI_SC; + memcpy(§ag.sci, &secy->sci, + sizeof(sectag.sci)); + } else { + if (tx_sc->end_station) + sectag.tci_an |= MACSEC_TCI_ES; + if (tx_sc->scb) + sectag.tci_an |= MACSEC_TCI_SCB; + } + + /* With GCM, C/E clear for !encrypt, both set for encrypt */ + if (tx_sc->encrypt) + sectag.tci_an |= MACSEC_TCI_CONFID; + else if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN) + sectag.tci_an |= MACSEC_TCI_C; + + sectag.tci_an |= tx_sc->encoding_sa; + + *reformat_size = MLX5_MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0); + + memcpy(reformatbf, §ag, *reformat_size); +} + +static bool macsec_fs_is_macsec_device_empty(struct mlx5_macsec_device *macsec_device) +{ + if (xa_empty(&macsec_device->tx_id_xa) && + xa_empty(&macsec_device->rx_id_xa)) + return true; + + return false; +} + +static void macsec_fs_id_del(struct list_head *macsec_devices_list, u32 fs_id, + void *macdev, struct rhashtable *hash_table, bool is_tx) +{ + const struct rhashtable_params *rhash = (is_tx) ? &rhash_sci : &rhash_fs_id; + struct mlx5_macsec_device *iter, *macsec_device = NULL; + struct mlx5_fs_id *fs_id_found; + struct xarray *fs_id_xa; + + list_for_each_entry(iter, macsec_devices_list, macsec_devices_list_entry) { + if (iter->macdev == macdev) { + macsec_device = iter; + break; + } + } + WARN_ON(!macsec_device); + + fs_id_xa = (is_tx) ? &macsec_device->tx_id_xa : + &macsec_device->rx_id_xa; + xa_lock(fs_id_xa); + fs_id_found = xa_load(fs_id_xa, fs_id); + WARN_ON(!fs_id_found); + + if (!refcount_dec_and_test(&fs_id_found->refcnt)) { + xa_unlock(fs_id_xa); + return; + } + + if (fs_id_found->id) { + /* Make sure ongoing datapath readers sees a valid SA */ + rhashtable_remove_fast(hash_table, &fs_id_found->hash, *rhash); + fs_id_found->id = 0; + } + xa_unlock(fs_id_xa); + + xa_erase(fs_id_xa, fs_id); + + kfree(fs_id_found); + + if (macsec_fs_is_macsec_device_empty(macsec_device)) { + list_del(&macsec_device->macsec_devices_list_entry); + kfree(macsec_device); + } +} + +static int macsec_fs_id_add(struct list_head *macsec_devices_list, u32 fs_id, + void *macdev, struct rhashtable *hash_table, sci_t sci, + bool is_tx) +{ + const struct rhashtable_params *rhash = (is_tx) ? &rhash_sci : &rhash_fs_id; + struct mlx5_macsec_device *iter, *macsec_device = NULL; + struct mlx5_fs_id *fs_id_iter; + struct xarray *fs_id_xa; + int err; + + if (!is_tx) { + rcu_read_lock(); + fs_id_iter = rhashtable_lookup(hash_table, &fs_id, rhash_fs_id); + if (fs_id_iter) { + refcount_inc(&fs_id_iter->refcnt); + rcu_read_unlock(); + return 0; + } + rcu_read_unlock(); + } + + fs_id_iter = kzalloc(sizeof(*fs_id_iter), GFP_KERNEL); + if (!fs_id_iter) + return -ENOMEM; + + list_for_each_entry(iter, macsec_devices_list, macsec_devices_list_entry) { + if (iter->macdev == macdev) { + macsec_device = iter; + break; + } + } + + if (!macsec_device) { /* first time adding a SA to that device */ + macsec_device = kzalloc(sizeof(*macsec_device), GFP_KERNEL); + if (!macsec_device) { + err = -ENOMEM; + goto err_alloc_dev; + } + macsec_device->macdev = macdev; + xa_init(&macsec_device->tx_id_xa); + xa_init(&macsec_device->rx_id_xa); + list_add(&macsec_device->macsec_devices_list_entry, macsec_devices_list); + } + + fs_id_xa = (is_tx) ? &macsec_device->tx_id_xa : + &macsec_device->rx_id_xa; + fs_id_iter->id = fs_id; + refcount_set(&fs_id_iter->refcnt, 1); + fs_id_iter->sci = sci; + err = xa_err(xa_store(fs_id_xa, fs_id, fs_id_iter, GFP_KERNEL)); + if (err) + goto err_store_id; + + err = rhashtable_insert_fast(hash_table, &fs_id_iter->hash, *rhash); + if (err) + goto err_hash_insert; + + return 0; + +err_hash_insert: + xa_erase(fs_id_xa, fs_id); +err_store_id: + if (macsec_fs_is_macsec_device_empty(macsec_device)) { + list_del(&macsec_device->macsec_devices_list_entry); + kfree(macsec_device); + } +err_alloc_dev: + kfree(fs_id_iter); + return err; +} + +static void macsec_fs_tx_del_rule(struct mlx5_macsec_fs *macsec_fs, + struct mlx5_macsec_tx_rule *tx_rule, + void *macdev) +{ + macsec_fs_id_del(&macsec_fs->macsec_devices_list, tx_rule->fs_id, macdev, + &macsec_fs->sci_hash, true); + + if (tx_rule->rule) { + mlx5_del_flow_rules(tx_rule->rule); + tx_rule->rule = NULL; + } + + if (tx_rule->pkt_reformat) { + mlx5_packet_reformat_dealloc(macsec_fs->mdev, tx_rule->pkt_reformat); + tx_rule->pkt_reformat = NULL; + } + + if (tx_rule->fs_id) { + ida_free(&macsec_fs->tx_fs->tx_halloc, tx_rule->fs_id); + tx_rule->fs_id = 0; + } + + kfree(tx_rule); + + macsec_fs_tx_ft_put(macsec_fs); +} + +#define MLX5_REFORMAT_PARAM_ADD_MACSEC_OFFSET_4_BYTES 1 + +static union mlx5_macsec_rule * +macsec_fs_tx_add_rule(struct mlx5_macsec_fs *macsec_fs, + const struct macsec_context *macsec_ctx, + struct mlx5_macsec_rule_attrs *attrs, u32 *fs_id) +{ + char reformatbf[MLX5_MACSEC_TAG_LEN + MACSEC_SCI_LEN]; + struct mlx5_pkt_reformat_params reformat_params = {}; + struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs; + struct mlx5_core_dev *mdev = macsec_fs->mdev; + union mlx5_macsec_rule *macsec_rule = NULL; + struct mlx5_flow_destination dest = {}; + struct mlx5_macsec_tables *tx_tables; + struct mlx5_macsec_tx_rule *tx_rule; + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_handle *rule; + struct mlx5_flow_spec *spec; + size_t reformat_size; + int err = 0; + + tx_tables = &tx_fs->tables; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return NULL; + + err = macsec_fs_tx_ft_get(macsec_fs); + if (err) + goto out_spec; + + macsec_rule = kzalloc(sizeof(*macsec_rule), GFP_KERNEL); + if (!macsec_rule) { + macsec_fs_tx_ft_put(macsec_fs); + goto out_spec; + } + + tx_rule = &macsec_rule->tx_rule; + + /* Tx crypto table crypto rule */ + macsec_fs_tx_create_sectag_header(macsec_ctx, reformatbf, &reformat_size); + + reformat_params.type = MLX5_REFORMAT_TYPE_ADD_MACSEC; + reformat_params.size = reformat_size; + reformat_params.data = reformatbf; + + if (is_vlan_dev(macsec_ctx->netdev)) + reformat_params.param_0 = MLX5_REFORMAT_PARAM_ADD_MACSEC_OFFSET_4_BYTES; + + flow_act.pkt_reformat = mlx5_packet_reformat_alloc(mdev, + &reformat_params, + MLX5_FLOW_NAMESPACE_EGRESS_MACSEC); + if (IS_ERR(flow_act.pkt_reformat)) { + err = PTR_ERR(flow_act.pkt_reformat); + mlx5_core_err(mdev, "Failed to allocate MACsec Tx reformat context err=%d\n", err); + goto err; + } + tx_rule->pkt_reformat = flow_act.pkt_reformat; + + err = macsec_fs_tx_setup_fte(macsec_fs, spec, &flow_act, attrs->macsec_obj_id, fs_id); + if (err) { + mlx5_core_err(mdev, + "Failed to add packet reformat for MACsec TX crypto rule, err=%d\n", + err); + goto err; + } + + tx_rule->fs_id = *fs_id; + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT | + MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest.ft = tx_tables->ft_check; + rule = mlx5_add_flow_rules(tx_tables->ft_crypto.t, spec, &flow_act, &dest, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, "Failed to add MACsec TX crypto rule, err=%d\n", err); + goto err; + } + tx_rule->rule = rule; + + err = macsec_fs_id_add(&macsec_fs->macsec_devices_list, *fs_id, macsec_ctx->secy->netdev, + &macsec_fs->sci_hash, attrs->sci, true); + if (err) { + mlx5_core_err(mdev, "Failed to save fs_id, err=%d\n", err); + goto err; + } + + goto out_spec; + +err: + macsec_fs_tx_del_rule(macsec_fs, tx_rule, macsec_ctx->secy->netdev); + macsec_rule = NULL; +out_spec: + kvfree(spec); + + return macsec_rule; +} + +static void macsec_fs_tx_cleanup(struct mlx5_macsec_fs *macsec_fs) +{ + struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs; + struct mlx5_core_dev *mdev = macsec_fs->mdev; + struct mlx5_macsec_tables *tx_tables; + + if (!tx_fs) + return; + + tx_tables = &tx_fs->tables; + if (tx_tables->refcnt) { + mlx5_core_err(mdev, + "Can't destroy MACsec offload tx_fs, refcnt(%u) isn't 0\n", + tx_tables->refcnt); + return; + } + + ida_destroy(&tx_fs->tx_halloc); + + if (tx_tables->check_miss_rule_counter) { + mlx5_fc_destroy(mdev, tx_tables->check_miss_rule_counter); + tx_tables->check_miss_rule_counter = NULL; + } + + if (tx_tables->check_rule_counter) { + mlx5_fc_destroy(mdev, tx_tables->check_rule_counter); + tx_tables->check_rule_counter = NULL; + } + + kfree(tx_fs); + macsec_fs->tx_fs = NULL; +} + +static int macsec_fs_tx_init(struct mlx5_macsec_fs *macsec_fs) +{ + struct mlx5_core_dev *mdev = macsec_fs->mdev; + struct mlx5_macsec_tables *tx_tables; + struct mlx5_macsec_tx *tx_fs; + struct mlx5_fc *flow_counter; + int err; + + tx_fs = kzalloc(sizeof(*tx_fs), GFP_KERNEL); + if (!tx_fs) + return -ENOMEM; + + tx_tables = &tx_fs->tables; + + flow_counter = mlx5_fc_create(mdev, false); + if (IS_ERR(flow_counter)) { + err = PTR_ERR(flow_counter); + mlx5_core_err(mdev, + "Failed to create MACsec Tx encrypt flow counter, err(%d)\n", + err); + goto err_encrypt_counter; + } + tx_tables->check_rule_counter = flow_counter; + + flow_counter = mlx5_fc_create(mdev, false); + if (IS_ERR(flow_counter)) { + err = PTR_ERR(flow_counter); + mlx5_core_err(mdev, + "Failed to create MACsec Tx drop flow counter, err(%d)\n", + err); + goto err_drop_counter; + } + tx_tables->check_miss_rule_counter = flow_counter; + + ida_init(&tx_fs->tx_halloc); + INIT_LIST_HEAD(&macsec_fs->macsec_devices_list); + + macsec_fs->tx_fs = tx_fs; + + return 0; + +err_drop_counter: + mlx5_fc_destroy(mdev, tx_tables->check_rule_counter); + tx_tables->check_rule_counter = NULL; + +err_encrypt_counter: + kfree(tx_fs); + macsec_fs->tx_fs = NULL; + + return err; +} + +static void macsec_fs_rx_roce_miss_destroy(struct mlx5_macsec_miss *miss) +{ + mlx5_del_flow_rules(miss->rule); + mlx5_destroy_flow_group(miss->g); +} + +static void macsec_fs_rdma_rx_destroy(struct mlx5_macsec_rx_roce *roce, struct mlx5_core_dev *mdev) +{ + if (!mlx5_is_macsec_roce_supported(mdev)) + return; + + mlx5_del_flow_rules(roce->nic_miss.rule); + mlx5_del_flow_rules(roce->rule); + mlx5_modify_header_dealloc(mdev, roce->copy_modify_hdr); + mlx5_destroy_flow_group(roce->nic_miss.g); + mlx5_destroy_flow_group(roce->g); + mlx5_destroy_flow_table(roce->ft); + + macsec_fs_rx_roce_miss_destroy(&roce->miss); + mlx5_destroy_flow_table(roce->ft_macsec_op_check); + mlx5_destroy_flow_table(roce->ft_ip_check); +} + +static void macsec_fs_rx_destroy(struct mlx5_macsec_fs *macsec_fs) +{ + struct mlx5_macsec_rx *rx_fs = macsec_fs->rx_fs; + struct mlx5_macsec_tables *rx_tables; + int i; + + /* Rx check table */ + for (i = 1; i >= 0; --i) { + if (rx_fs->check_rule[i]) { + mlx5_del_flow_rules(rx_fs->check_rule[i]); + rx_fs->check_rule[i] = NULL; + } + + if (rx_fs->check_rule_pkt_reformat[i]) { + mlx5_packet_reformat_dealloc(macsec_fs->mdev, + rx_fs->check_rule_pkt_reformat[i]); + rx_fs->check_rule_pkt_reformat[i] = NULL; + } + } + + rx_tables = &rx_fs->tables; + + if (rx_tables->check_miss_rule) { + mlx5_del_flow_rules(rx_tables->check_miss_rule); + rx_tables->check_miss_rule = NULL; + } + + if (rx_tables->ft_check_group) { + mlx5_destroy_flow_group(rx_tables->ft_check_group); + rx_tables->ft_check_group = NULL; + } + + if (rx_tables->ft_check) { + mlx5_destroy_flow_table(rx_tables->ft_check); + rx_tables->ft_check = NULL; + } + + /* Rx crypto table */ + if (rx_tables->crypto_miss_rule) { + mlx5_del_flow_rules(rx_tables->crypto_miss_rule); + rx_tables->crypto_miss_rule = NULL; + } + + macsec_fs_destroy_flow_table(&rx_tables->ft_crypto); + + macsec_fs_rdma_rx_destroy(&macsec_fs->rx_fs->roce, macsec_fs->mdev); +} + +static int macsec_fs_rx_create_crypto_table_groups(struct mlx5_macsec_flow_table *ft) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + int mclen = MLX5_ST_SZ_BYTES(fte_match_param); + int ix = 0; + u32 *in; + int err; + u8 *mc; + + ft->g = kcalloc(RX_CRYPTO_TABLE_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL); + if (!ft->g) + return -ENOMEM; + + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) { + kfree(ft->g); + return -ENOMEM; + } + + mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); + + /* Flow group for SA rule with SCI */ + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS | + MLX5_MATCH_MISC_PARAMETERS_5); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); + + MLX5_SET(fte_match_param, mc, misc_parameters_5.macsec_tag_0, + MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK << + MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET); + MLX5_SET_TO_ONES(fte_match_param, mc, misc_parameters_5.macsec_tag_2); + MLX5_SET_TO_ONES(fte_match_param, mc, misc_parameters_5.macsec_tag_3); + + MLX5_SET_CFG(in, start_flow_index, ix); + ix += RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err; + ft->num_groups++; + + /* Flow group for SA rule without SCI */ + memset(in, 0, inlen); + memset(mc, 0, mclen); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS | + MLX5_MATCH_MISC_PARAMETERS_5); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.smac_47_16); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.smac_15_0); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); + + MLX5_SET(fte_match_param, mc, misc_parameters_5.macsec_tag_0, + MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET); + + MLX5_SET_CFG(in, start_flow_index, ix); + ix += RX_CRYPTO_TABLE_SA_RULE_WITHOUT_SCI_GROUP_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err; + ft->num_groups++; + + /* Flow Group for l2 traps */ + memset(in, 0, inlen); + memset(mc, 0, mclen); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err; + ft->num_groups++; + + kvfree(in); + return 0; + +err: + err = PTR_ERR(ft->g[ft->num_groups]); + ft->g[ft->num_groups] = NULL; + kvfree(in); + + return err; +} + +static int macsec_fs_rx_create_check_decap_rule(struct mlx5_macsec_fs *macsec_fs, + struct mlx5_flow_destination *dest, + struct mlx5_flow_act *flow_act, + struct mlx5_flow_spec *spec, + int reformat_param_size) +{ + int rule_index = (reformat_param_size == MLX5_SECTAG_HEADER_SIZE_WITH_SCI) ? 0 : 1; + u8 mlx5_reformat_buf[MLX5_SECTAG_HEADER_SIZE_WITH_SCI]; + struct mlx5_pkt_reformat_params reformat_params = {}; + struct mlx5_macsec_rx *rx_fs = macsec_fs->rx_fs; + struct mlx5_core_dev *mdev = macsec_fs->mdev; + struct mlx5_flow_destination roce_dest[2]; + struct mlx5_macsec_tables *rx_tables; + struct mlx5_flow_handle *rule; + int err = 0, dstn = 0; + + rx_tables = &rx_fs->tables; + + /* Rx check table decap 16B rule */ + memset(dest, 0, sizeof(*dest)); + memset(flow_act, 0, sizeof(*flow_act)); + memset(spec, 0, sizeof(*spec)); + + reformat_params.type = MLX5_REFORMAT_TYPE_DEL_MACSEC; + reformat_params.size = reformat_param_size; + reformat_params.data = mlx5_reformat_buf; + flow_act->pkt_reformat = mlx5_packet_reformat_alloc(mdev, + &reformat_params, + MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC); + if (IS_ERR(flow_act->pkt_reformat)) { + err = PTR_ERR(flow_act->pkt_reformat); + mlx5_core_err(mdev, "Failed to allocate MACsec Rx reformat context err=%d\n", err); + return err; + } + rx_fs->check_rule_pkt_reformat[rule_index] = flow_act->pkt_reformat; + + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; + /* MACsec syndrome match */ + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.macsec_syndrome); + MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.macsec_syndrome, 0); + /* ASO return reg syndrome match */ + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4); + MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 0); + + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5; + /* Sectag TCI SC present bit*/ + MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_5.macsec_tag_0, + MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET); + + if (reformat_param_size == MLX5_SECTAG_HEADER_SIZE_WITH_SCI) + MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_0, + MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT << + MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET); + + flow_act->flags = FLOW_ACT_NO_APPEND; + + if (rx_fs->roce.ft) { + flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + roce_dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + roce_dest[dstn].ft = rx_fs->roce.ft; + dstn++; + } else { + flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; + } + + flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT | + MLX5_FLOW_CONTEXT_ACTION_COUNT; + roce_dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + roce_dest[dstn].counter_id = mlx5_fc_id(rx_tables->check_rule_counter); + rule = mlx5_add_flow_rules(rx_tables->ft_check, spec, flow_act, roce_dest, dstn + 1); + + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, "Failed to add MACsec Rx check rule, err=%d\n", err); + return err; + } + + rx_fs->check_rule[rule_index] = rule; + + return 0; +} + +static int macsec_fs_rx_roce_miss_create(struct mlx5_core_dev *mdev, + struct mlx5_macsec_rx_roce *roce) +{ + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_group *flow_group; + struct mlx5_flow_handle *rule; + u32 *flow_group_in; + int err; + + flow_group_in = kvzalloc(MLX5_ST_SZ_BYTES(create_flow_group_in), GFP_KERNEL); + if (!flow_group_in) + return -ENOMEM; + + /* IP check ft has no miss rule since we use default miss action which is go to next PRIO */ + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, + roce->ft_macsec_op_check->max_fte - 1); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, + roce->ft_macsec_op_check->max_fte - 1); + flow_group = mlx5_create_flow_group(roce->ft_macsec_op_check, flow_group_in); + if (IS_ERR(flow_group)) { + err = PTR_ERR(flow_group); + mlx5_core_err(mdev, + "Failed to create miss flow group for MACsec RoCE operation check table err(%d)\n", + err); + goto err_macsec_op_miss_group; + } + roce->miss.g = flow_group; + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; + rule = mlx5_add_flow_rules(roce->ft_macsec_op_check, NULL, &flow_act, NULL, 0); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, "Failed to add miss rule to MACsec RoCE operation check table err(%d)\n", + err); + goto err_macsec_op_rule; + } + roce->miss.rule = rule; + + kvfree(flow_group_in); + return 0; + +err_macsec_op_rule: + mlx5_destroy_flow_group(roce->miss.g); +err_macsec_op_miss_group: + kvfree(flow_group_in); + return err; +} + +#define MLX5_RX_ROCE_GROUP_SIZE BIT(0) + +static int macsec_fs_rx_roce_jump_to_rdma_groups_create(struct mlx5_core_dev *mdev, + struct mlx5_macsec_rx_roce *roce) +{ + struct mlx5_flow_group *g; + void *outer_headers_c; + int ix = 0; + u32 *in; + int err; + u8 *mc; + + in = kvzalloc(MLX5_ST_SZ_BYTES(create_flow_group_in), GFP_KERNEL); + if (!in) + return -ENOMEM; + + mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); + outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc, outer_headers); + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol); + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport); + + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5_RX_ROCE_GROUP_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + g = mlx5_create_flow_group(roce->ft, in); + if (IS_ERR(g)) { + err = PTR_ERR(g); + mlx5_core_err(mdev, "Failed to create main flow group for MACsec RoCE NIC UDP table err(%d)\n", + err); + goto err_udp_group; + } + roce->g = g; + + memset(in, 0, MLX5_ST_SZ_BYTES(create_flow_group_in)); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5_RX_ROCE_GROUP_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + g = mlx5_create_flow_group(roce->ft, in); + if (IS_ERR(g)) { + err = PTR_ERR(g); + mlx5_core_err(mdev, "Failed to create miss flow group for MACsec RoCE NIC UDP table err(%d)\n", + err); + goto err_udp_miss_group; + } + roce->nic_miss.g = g; + + kvfree(in); + return 0; + +err_udp_miss_group: + mlx5_destroy_flow_group(roce->g); +err_udp_group: + kvfree(in); + return err; +} + +static int macsec_fs_rx_roce_jump_to_rdma_rules_create(struct mlx5_macsec_fs *macsec_fs, + struct mlx5_macsec_rx_roce *roce) +{ + u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; + struct mlx5_core_dev *mdev = macsec_fs->mdev; + struct mlx5_flow_destination dst = {}; + struct mlx5_modify_hdr *modify_hdr; + MLX5_DECLARE_FLOW_ACT(flow_act); + struct mlx5_flow_handle *rule; + struct mlx5_flow_spec *spec; + int err; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return -ENOMEM; + + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_UDP); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.udp_dport); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport, ROCE_V2_UDP_DPORT); + + MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY); + MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B); + MLX5_SET(copy_action_in, action, src_offset, 0); + MLX5_SET(copy_action_in, action, length, 32); + MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_5); + MLX5_SET(copy_action_in, action, dst_offset, 0); + + modify_hdr = mlx5_modify_header_alloc(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC, + 1, action); + + if (IS_ERR(modify_hdr)) { + err = PTR_ERR(modify_hdr); + mlx5_core_err(mdev, + "Failed to alloc macsec copy modify_header_id err(%d)\n", err); + goto err_alloc_hdr; + } + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + flow_act.modify_hdr = modify_hdr; + dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE; + dst.ft = roce->ft_ip_check; + rule = mlx5_add_flow_rules(roce->ft, spec, &flow_act, &dst, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, "Failed to add rule to MACsec RoCE NIC UDP table err(%d)\n", + err); + goto err_add_rule; + } + roce->rule = rule; + roce->copy_modify_hdr = modify_hdr; + + memset(&flow_act, 0, sizeof(flow_act)); + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; + rule = mlx5_add_flow_rules(roce->ft, NULL, &flow_act, NULL, 0); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, "Failed to add miss rule to MACsec RoCE NIC UDP table err(%d)\n", + err); + goto err_add_rule2; + } + roce->nic_miss.rule = rule; + + kvfree(spec); + return 0; + +err_add_rule2: + mlx5_del_flow_rules(roce->rule); +err_add_rule: + mlx5_modify_header_dealloc(macsec_fs->mdev, modify_hdr); +err_alloc_hdr: + kvfree(spec); + return err; +} + +static int macsec_fs_rx_roce_jump_to_rdma_create(struct mlx5_macsec_fs *macsec_fs, + struct mlx5_macsec_rx_roce *roce) +{ + int err; + + err = macsec_fs_rx_roce_jump_to_rdma_groups_create(macsec_fs->mdev, roce); + if (err) + return err; + + err = macsec_fs_rx_roce_jump_to_rdma_rules_create(macsec_fs, roce); + if (err) + goto err; + + return 0; +err: + mlx5_destroy_flow_group(roce->nic_miss.g); + mlx5_destroy_flow_group(roce->g); + return err; +} + +static int macsec_fs_rx_roce_create(struct mlx5_macsec_fs *macsec_fs) +{ + struct mlx5_macsec_rx *rx_fs = macsec_fs->rx_fs; + struct mlx5_core_dev *mdev = macsec_fs->mdev; + struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_flow_namespace *ns; + struct mlx5_flow_table *ft; + int err = 0; + + if (!mlx5_is_macsec_roce_supported(macsec_fs->mdev)) { + mlx5_core_dbg(mdev, "Failed to init RoCE MACsec, capabilities not supported\n"); + return 0; + } + + ns = mlx5_get_flow_namespace(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_RDMA_RX_MACSEC); + if (!ns) + return -ENOMEM; + + ft = macsec_fs_auto_group_table_create(ns, 0, RDMA_RX_ROCE_IP_TABLE_LEVEL, + CRYPTO_NUM_MAXSEC_FTE); + if (IS_ERR(ft)) { + err = PTR_ERR(ft); + mlx5_core_err(mdev, + "Failed to create MACsec IP check RoCE table err(%d)\n", err); + return err; + } + rx_fs->roce.ft_ip_check = ft; + + ft = macsec_fs_auto_group_table_create(ns, 0, RDMA_RX_ROCE_MACSEC_OP_TABLE_LEVEL, + CRYPTO_NUM_MAXSEC_FTE); + if (IS_ERR(ft)) { + err = PTR_ERR(ft); + mlx5_core_err(mdev, + "Failed to create MACsec operation check RoCE table err(%d)\n", + err); + goto err_macsec_op; + } + rx_fs->roce.ft_macsec_op_check = ft; + + err = macsec_fs_rx_roce_miss_create(mdev, &rx_fs->roce); + if (err) + goto err_miss_create; + + ns = mlx5_get_flow_namespace(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC); + if (!ns) { + err = -EOPNOTSUPP; + goto err_ns; + } + + ft_attr.level = RX_ROCE_TABLE_LEVEL; + ft_attr.max_fte = RX_ROCE_TABLE_NUM_FTE; + ft = mlx5_create_flow_table(ns, &ft_attr); + if (IS_ERR(ft)) { + err = PTR_ERR(ft); + mlx5_core_err(mdev, + "Failed to create MACsec jump to RX RoCE, NIC table err(%d)\n", err); + goto err_ns; + } + rx_fs->roce.ft = ft; + + err = macsec_fs_rx_roce_jump_to_rdma_create(macsec_fs, &rx_fs->roce); + if (err) + goto err_udp_ft; + + return 0; + +err_udp_ft: + mlx5_destroy_flow_table(rx_fs->roce.ft); +err_ns: + macsec_fs_rx_roce_miss_destroy(&rx_fs->roce.miss); +err_miss_create: + mlx5_destroy_flow_table(rx_fs->roce.ft_macsec_op_check); +err_macsec_op: + mlx5_destroy_flow_table(rx_fs->roce.ft_ip_check); + return err; +} + +static int macsec_fs_rx_create(struct mlx5_macsec_fs *macsec_fs) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_macsec_rx *rx_fs = macsec_fs->rx_fs; + struct mlx5_core_dev *mdev = macsec_fs->mdev; + struct mlx5_macsec_flow_table *ft_crypto; + struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_flow_destination dest = {}; + struct mlx5_macsec_tables *rx_tables; + struct mlx5_flow_table *flow_table; + struct mlx5_flow_group *flow_group; + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_namespace *ns; + struct mlx5_flow_handle *rule; + struct mlx5_flow_spec *spec; + u32 *flow_group_in; + int err; + + ns = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC); + if (!ns) + return -ENOMEM; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return -ENOMEM; + + flow_group_in = kvzalloc(inlen, GFP_KERNEL); + if (!flow_group_in) { + err = -ENOMEM; + goto free_spec; + } + + rx_tables = &rx_fs->tables; + ft_crypto = &rx_tables->ft_crypto; + + err = macsec_fs_rx_roce_create(macsec_fs); + if (err) + goto out_flow_group; + + /* Rx crypto table */ + ft_attr.level = RX_CRYPTO_TABLE_LEVEL; + ft_attr.max_fte = CRYPTO_NUM_MAXSEC_FTE; + + flow_table = mlx5_create_flow_table(ns, &ft_attr); + if (IS_ERR(flow_table)) { + err = PTR_ERR(flow_table); + mlx5_core_err(mdev, "Failed to create MACsec Rx crypto table err(%d)\n", err); + goto err; + } + ft_crypto->t = flow_table; + + /* Rx crypto table groups */ + err = macsec_fs_rx_create_crypto_table_groups(ft_crypto); + if (err) { + mlx5_core_err(mdev, + "Failed to create default flow group for MACsec Tx crypto table err(%d)\n", + err); + goto err; + } + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; + rule = mlx5_add_flow_rules(ft_crypto->t, NULL, &flow_act, NULL, 0); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, + "Failed to add MACsec Rx crypto table default miss rule %d\n", + err); + goto err; + } + rx_tables->crypto_miss_rule = rule; + + /* Rx check table */ + flow_table = macsec_fs_auto_group_table_create(ns, + MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT, + RX_CHECK_TABLE_LEVEL, + RX_CHECK_TABLE_NUM_FTE); + if (IS_ERR(flow_table)) { + err = PTR_ERR(flow_table); + mlx5_core_err(mdev, "Fail to create MACsec RX check table, err(%d)\n", err); + goto err; + } + rx_tables->ft_check = flow_table; + + /* Rx check table Default miss group/rule */ + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_table->max_fte - 1); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_table->max_fte - 1); + flow_group = mlx5_create_flow_group(rx_tables->ft_check, flow_group_in); + if (IS_ERR(flow_group)) { + err = PTR_ERR(flow_group); + mlx5_core_err(mdev, + "Failed to create default flow group for MACsec Rx check table err(%d)\n", + err); + goto err; + } + rx_tables->ft_check_group = flow_group; + + /* Rx check table default drop rule */ + memset(&flow_act, 0, sizeof(flow_act)); + + dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest.counter_id = mlx5_fc_id(rx_tables->check_miss_rule_counter); + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT; + rule = mlx5_add_flow_rules(rx_tables->ft_check, NULL, &flow_act, &dest, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, "Failed to added MACsec Rx check drop rule, err(%d)\n", err); + goto err; + } + rx_tables->check_miss_rule = rule; + + /* Rx check table decap rules */ + err = macsec_fs_rx_create_check_decap_rule(macsec_fs, &dest, &flow_act, spec, + MLX5_SECTAG_HEADER_SIZE_WITH_SCI); + if (err) + goto err; + + err = macsec_fs_rx_create_check_decap_rule(macsec_fs, &dest, &flow_act, spec, + MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI); + if (err) + goto err; + + goto out_flow_group; + +err: + macsec_fs_rx_destroy(macsec_fs); +out_flow_group: + kvfree(flow_group_in); +free_spec: + kvfree(spec); + return err; +} + +static int macsec_fs_rx_ft_get(struct mlx5_macsec_fs *macsec_fs) +{ + struct mlx5_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables; + int err = 0; + + if (rx_tables->refcnt) + goto out; + + err = macsec_fs_rx_create(macsec_fs); + if (err) + return err; + +out: + rx_tables->refcnt++; + return err; +} + +static void macsec_fs_rx_ft_put(struct mlx5_macsec_fs *macsec_fs) +{ + struct mlx5_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables; + + if (--rx_tables->refcnt) + return; + + macsec_fs_rx_destroy(macsec_fs); +} + +static void macsec_fs_rx_del_rule(struct mlx5_macsec_fs *macsec_fs, + struct mlx5_macsec_rx_rule *rx_rule, + void *macdev, u32 fs_id) +{ + int i; + + macsec_fs_id_del(&macsec_fs->macsec_devices_list, fs_id, macdev, + &macsec_fs->fs_id_hash, false); + + for (i = 0; i < RX_NUM_OF_RULES_PER_SA; ++i) { + if (rx_rule->rule[i]) { + mlx5_del_flow_rules(rx_rule->rule[i]); + rx_rule->rule[i] = NULL; + } + } + + if (rx_rule->meta_modhdr) { + mlx5_modify_header_dealloc(macsec_fs->mdev, rx_rule->meta_modhdr); + rx_rule->meta_modhdr = NULL; + } + + kfree(rx_rule); + + macsec_fs_rx_ft_put(macsec_fs); +} + +static void macsec_fs_rx_setup_fte(struct mlx5_flow_spec *spec, + struct mlx5_flow_act *flow_act, + struct mlx5_macsec_rule_attrs *attrs, + bool sci_present) +{ + u8 tci_an = (sci_present << MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET) | attrs->assoc_num; + struct mlx5_flow_act_crypto_params *crypto_params = &flow_act->crypto; + __be32 *sci_p = (__be32 *)(&attrs->sci); + + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + + /* MACsec ethertype */ + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, ETH_P_MACSEC); + + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5; + + /* Sectag AN + TCI SC present bit*/ + MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_5.macsec_tag_0, + MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET); + MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_0, + tci_an << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET); + + if (sci_present) { + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + misc_parameters_5.macsec_tag_2); + MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_2, + be32_to_cpu(sci_p[0])); + + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + misc_parameters_5.macsec_tag_3); + MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_3, + be32_to_cpu(sci_p[1])); + } else { + /* When SCI isn't present in the Sectag, need to match the source */ + /* MAC address only if the SCI contains the default MACsec PORT */ + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0); + memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers.smac_47_16), + sci_p, ETH_ALEN); + } + + crypto_params->type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC; + crypto_params->obj_id = attrs->macsec_obj_id; +} + +static union mlx5_macsec_rule * +macsec_fs_rx_add_rule(struct mlx5_macsec_fs *macsec_fs, + const struct macsec_context *macsec_ctx, + struct mlx5_macsec_rule_attrs *attrs, + u32 fs_id) +{ + u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; + struct mlx5_macsec_rx *rx_fs = macsec_fs->rx_fs; + struct mlx5_core_dev *mdev = macsec_fs->mdev; + union mlx5_macsec_rule *macsec_rule = NULL; + struct mlx5_modify_hdr *modify_hdr = NULL; + struct mlx5_macsec_flow_table *ft_crypto; + struct mlx5_flow_destination dest = {}; + struct mlx5_macsec_tables *rx_tables; + struct mlx5_macsec_rx_rule *rx_rule; + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_handle *rule; + struct mlx5_flow_spec *spec; + int err = 0; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return NULL; + + err = macsec_fs_rx_ft_get(macsec_fs); + if (err) + goto out_spec; + + macsec_rule = kzalloc(sizeof(*macsec_rule), GFP_KERNEL); + if (!macsec_rule) { + macsec_fs_rx_ft_put(macsec_fs); + goto out_spec; + } + + rx_rule = &macsec_rule->rx_rule; + rx_tables = &rx_fs->tables; + ft_crypto = &rx_tables->ft_crypto; + + /* Set bit[31 - 30] macsec marker - 0x01 */ + /* Set bit[15-0] fs id */ + MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET); + MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B); + MLX5_SET(set_action_in, action, data, macsec_fs_set_rx_fs_id(fs_id)); + MLX5_SET(set_action_in, action, offset, 0); + MLX5_SET(set_action_in, action, length, 32); + + modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC, + 1, action); + if (IS_ERR(modify_hdr)) { + err = PTR_ERR(modify_hdr); + mlx5_core_err(mdev, "Fail to alloc MACsec set modify_header_id err=%d\n", err); + modify_hdr = NULL; + goto err; + } + rx_rule->meta_modhdr = modify_hdr; + + /* Rx crypto table with SCI rule */ + macsec_fs_rx_setup_fte(spec, &flow_act, attrs, true); + + flow_act.modify_hdr = modify_hdr; + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT | + MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest.ft = rx_tables->ft_check; + rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, &dest, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, + "Failed to add SA with SCI rule to Rx crypto rule, err=%d\n", + err); + goto err; + } + rx_rule->rule[0] = rule; + + /* Rx crypto table without SCI rule */ + if ((cpu_to_be64((__force u64)attrs->sci) & 0xFFFF) == ntohs(MACSEC_PORT_ES)) { + memset(spec, 0, sizeof(struct mlx5_flow_spec)); + memset(&dest, 0, sizeof(struct mlx5_flow_destination)); + memset(&flow_act, 0, sizeof(flow_act)); + + macsec_fs_rx_setup_fte(spec, &flow_act, attrs, false); + + flow_act.modify_hdr = modify_hdr; + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT | + MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest.ft = rx_tables->ft_check; + rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, &dest, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, + "Failed to add SA without SCI rule to Rx crypto rule, err=%d\n", + err); + goto err; + } + rx_rule->rule[1] = rule; + } + + err = macsec_fs_id_add(&macsec_fs->macsec_devices_list, fs_id, macsec_ctx->secy->netdev, + &macsec_fs->fs_id_hash, attrs->sci, false); + if (err) { + mlx5_core_err(mdev, "Failed to save fs_id, err=%d\n", err); + goto err; + } + + kvfree(spec); + return macsec_rule; + +err: + macsec_fs_rx_del_rule(macsec_fs, rx_rule, macsec_ctx->secy->netdev, fs_id); + macsec_rule = NULL; +out_spec: + kvfree(spec); + return macsec_rule; +} + +static int macsec_fs_rx_init(struct mlx5_macsec_fs *macsec_fs) +{ + struct mlx5_core_dev *mdev = macsec_fs->mdev; + struct mlx5_macsec_tables *rx_tables; + struct mlx5_macsec_rx *rx_fs; + struct mlx5_fc *flow_counter; + int err; + + rx_fs = kzalloc(sizeof(*rx_fs), GFP_KERNEL); + if (!rx_fs) + return -ENOMEM; + + flow_counter = mlx5_fc_create(mdev, false); + if (IS_ERR(flow_counter)) { + err = PTR_ERR(flow_counter); + mlx5_core_err(mdev, + "Failed to create MACsec Rx encrypt flow counter, err(%d)\n", + err); + goto err_encrypt_counter; + } + + rx_tables = &rx_fs->tables; + rx_tables->check_rule_counter = flow_counter; + + flow_counter = mlx5_fc_create(mdev, false); + if (IS_ERR(flow_counter)) { + err = PTR_ERR(flow_counter); + mlx5_core_err(mdev, + "Failed to create MACsec Rx drop flow counter, err(%d)\n", + err); + goto err_drop_counter; + } + rx_tables->check_miss_rule_counter = flow_counter; + + macsec_fs->rx_fs = rx_fs; + + return 0; + +err_drop_counter: + mlx5_fc_destroy(mdev, rx_tables->check_rule_counter); + rx_tables->check_rule_counter = NULL; + +err_encrypt_counter: + kfree(rx_fs); + macsec_fs->rx_fs = NULL; + + return err; +} + +static void macsec_fs_rx_cleanup(struct mlx5_macsec_fs *macsec_fs) +{ + struct mlx5_macsec_rx *rx_fs = macsec_fs->rx_fs; + struct mlx5_core_dev *mdev = macsec_fs->mdev; + struct mlx5_macsec_tables *rx_tables; + + if (!rx_fs) + return; + + rx_tables = &rx_fs->tables; + + if (rx_tables->refcnt) { + mlx5_core_err(mdev, + "Can't destroy MACsec offload rx_fs, refcnt(%u) isn't 0\n", + rx_tables->refcnt); + return; + } + + if (rx_tables->check_miss_rule_counter) { + mlx5_fc_destroy(mdev, rx_tables->check_miss_rule_counter); + rx_tables->check_miss_rule_counter = NULL; + } + + if (rx_tables->check_rule_counter) { + mlx5_fc_destroy(mdev, rx_tables->check_rule_counter); + rx_tables->check_rule_counter = NULL; + } + + kfree(rx_fs); + macsec_fs->rx_fs = NULL; +} + +static void set_ipaddr_spec_v4(struct sockaddr_in *in, struct mlx5_flow_spec *spec, bool is_dst_ip) +{ + MLX5_SET(fte_match_param, spec->match_value, + outer_headers.ip_version, MLX5_FS_IPV4_VERSION); + + if (is_dst_ip) { + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4); + memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, + outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), + &in->sin_addr.s_addr, 4); + } else { + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4); + memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, + outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), + &in->sin_addr.s_addr, 4); + } +} + +static void set_ipaddr_spec_v6(struct sockaddr_in6 *in6, struct mlx5_flow_spec *spec, + bool is_dst_ip) +{ + MLX5_SET(fte_match_param, spec->match_value, + outer_headers.ip_version, MLX5_FS_IPV6_VERSION); + + if (is_dst_ip) { + memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + 0xff, 16); + memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, + outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + &in6->sin6_addr, 16); + } else { + memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), + 0xff, 16); + memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, + outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), + &in6->sin6_addr, 16); + } +} + +static void set_ipaddr_spec(const struct sockaddr *addr, + struct mlx5_flow_spec *spec, bool is_dst_ip) +{ + struct sockaddr_in6 *in6; + + spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + outer_headers.ip_version); + + if (addr->sa_family == AF_INET) { + struct sockaddr_in *in = (struct sockaddr_in *)addr; + + set_ipaddr_spec_v4(in, spec, is_dst_ip); + return; + } + + in6 = (struct sockaddr_in6 *)addr; + set_ipaddr_spec_v6(in6, spec, is_dst_ip); +} + +static void macsec_fs_del_roce_rule_rx(struct mlx5_roce_macsec_rx_rule *rx_rule) +{ + mlx5_del_flow_rules(rx_rule->op); + mlx5_del_flow_rules(rx_rule->ip); + list_del(&rx_rule->entry); + kfree(rx_rule); +} + +static void macsec_fs_del_roce_rules_rx(struct mlx5_macsec_fs *macsec_fs, u32 fs_id, + struct list_head *rx_rules_list) +{ + struct mlx5_roce_macsec_rx_rule *rx_rule, *next; + + if (!mlx5_is_macsec_roce_supported(macsec_fs->mdev)) + return; + + list_for_each_entry_safe(rx_rule, next, rx_rules_list, entry) { + if (rx_rule->fs_id == fs_id) + macsec_fs_del_roce_rule_rx(rx_rule); + } +} + +static void macsec_fs_del_roce_rule_tx(struct mlx5_core_dev *mdev, + struct mlx5_roce_macsec_tx_rule *tx_rule) +{ + mlx5_del_flow_rules(tx_rule->rule); + mlx5_modify_header_dealloc(mdev, tx_rule->meta_modhdr); + list_del(&tx_rule->entry); + kfree(tx_rule); +} + +static void macsec_fs_del_roce_rules_tx(struct mlx5_macsec_fs *macsec_fs, u32 fs_id, + struct list_head *tx_rules_list) +{ + struct mlx5_roce_macsec_tx_rule *tx_rule, *next; + + if (!mlx5_is_macsec_roce_supported(macsec_fs->mdev)) + return; + + list_for_each_entry_safe(tx_rule, next, tx_rules_list, entry) { + if (tx_rule->fs_id == fs_id) + macsec_fs_del_roce_rule_tx(macsec_fs->mdev, tx_rule); + } +} + +void mlx5_macsec_fs_get_stats_fill(struct mlx5_macsec_fs *macsec_fs, void *macsec_stats) +{ + struct mlx5_macsec_stats *stats = (struct mlx5_macsec_stats *)macsec_stats; + struct mlx5_macsec_tables *tx_tables = &macsec_fs->tx_fs->tables; + struct mlx5_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables; + struct mlx5_core_dev *mdev = macsec_fs->mdev; + + if (tx_tables->check_rule_counter) + mlx5_fc_query(mdev, tx_tables->check_rule_counter, + &stats->macsec_tx_pkts, &stats->macsec_tx_bytes); + + if (tx_tables->check_miss_rule_counter) + mlx5_fc_query(mdev, tx_tables->check_miss_rule_counter, + &stats->macsec_tx_pkts_drop, &stats->macsec_tx_bytes_drop); + + if (rx_tables->check_rule_counter) + mlx5_fc_query(mdev, rx_tables->check_rule_counter, + &stats->macsec_rx_pkts, &stats->macsec_rx_bytes); + + if (rx_tables->check_miss_rule_counter) + mlx5_fc_query(mdev, rx_tables->check_miss_rule_counter, + &stats->macsec_rx_pkts_drop, &stats->macsec_rx_bytes_drop); +} + +struct mlx5_macsec_stats *mlx5_macsec_fs_get_stats(struct mlx5_macsec_fs *macsec_fs) +{ + if (!macsec_fs) + return NULL; + + return &macsec_fs->stats; +} + +u32 mlx5_macsec_fs_get_fs_id_from_hashtable(struct mlx5_macsec_fs *macsec_fs, sci_t *sci) +{ + struct mlx5_fs_id *mlx5_fs_id; + u32 fs_id = 0; + + rcu_read_lock(); + mlx5_fs_id = rhashtable_lookup(&macsec_fs->sci_hash, sci, rhash_sci); + if (mlx5_fs_id) + fs_id = mlx5_fs_id->id; + rcu_read_unlock(); + + return fs_id; +} + +union mlx5_macsec_rule * +mlx5_macsec_fs_add_rule(struct mlx5_macsec_fs *macsec_fs, + const struct macsec_context *macsec_ctx, + struct mlx5_macsec_rule_attrs *attrs, + u32 *sa_fs_id) +{ + struct mlx5_macsec_event_data data = {.macsec_fs = macsec_fs, + .macdev = macsec_ctx->secy->netdev, + .is_tx = + (attrs->action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT) + }; + union mlx5_macsec_rule *macsec_rule; + u32 tx_new_fs_id; + + macsec_rule = (attrs->action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT) ? + macsec_fs_tx_add_rule(macsec_fs, macsec_ctx, attrs, &tx_new_fs_id) : + macsec_fs_rx_add_rule(macsec_fs, macsec_ctx, attrs, *sa_fs_id); + + data.fs_id = (data.is_tx) ? tx_new_fs_id : *sa_fs_id; + if (macsec_rule) + blocking_notifier_call_chain(&macsec_fs->mdev->macsec_nh, + MLX5_DRIVER_EVENT_MACSEC_SA_ADDED, + &data); + + return macsec_rule; +} + +void mlx5_macsec_fs_del_rule(struct mlx5_macsec_fs *macsec_fs, + union mlx5_macsec_rule *macsec_rule, + int action, void *macdev, u32 sa_fs_id) +{ + struct mlx5_macsec_event_data data = {.macsec_fs = macsec_fs, + .macdev = macdev, + .is_tx = (action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT) + }; + + data.fs_id = (data.is_tx) ? macsec_rule->tx_rule.fs_id : sa_fs_id; + blocking_notifier_call_chain(&macsec_fs->mdev->macsec_nh, + MLX5_DRIVER_EVENT_MACSEC_SA_DELETED, + &data); + + (action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT) ? + macsec_fs_tx_del_rule(macsec_fs, &macsec_rule->tx_rule, macdev) : + macsec_fs_rx_del_rule(macsec_fs, &macsec_rule->rx_rule, macdev, sa_fs_id); +} + +static int mlx5_macsec_fs_add_roce_rule_rx(struct mlx5_macsec_fs *macsec_fs, u32 fs_id, u16 gid_idx, + const struct sockaddr *addr, + struct list_head *rx_rules_list) +{ + struct mlx5_macsec_rx *rx_fs = macsec_fs->rx_fs; + struct mlx5_roce_macsec_rx_rule *rx_rule; + struct mlx5_flow_destination dest = {}; + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_handle *new_rule; + struct mlx5_flow_spec *spec; + int err = 0; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return -ENOMEM; + + rx_rule = kzalloc(sizeof(*rx_rule), GFP_KERNEL); + if (!rx_rule) { + err = -ENOMEM; + goto out; + } + + set_ipaddr_spec(addr, spec, true); + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + dest.ft = rx_fs->roce.ft_macsec_op_check; + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + new_rule = mlx5_add_flow_rules(rx_fs->roce.ft_ip_check, spec, &flow_act, + &dest, 1); + if (IS_ERR(new_rule)) { + err = PTR_ERR(new_rule); + goto ip_rule_err; + } + rx_rule->ip = new_rule; + + memset(&flow_act, 0, sizeof(flow_act)); + memset(spec, 0, sizeof(*spec)); + + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_5); + MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_5, + macsec_fs_set_rx_fs_id(fs_id)); + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW; + new_rule = mlx5_add_flow_rules(rx_fs->roce.ft_macsec_op_check, spec, &flow_act, + NULL, 0); + if (IS_ERR(new_rule)) { + err = PTR_ERR(new_rule); + goto op_rule_err; + } + rx_rule->op = new_rule; + rx_rule->gid_idx = gid_idx; + rx_rule->fs_id = fs_id; + list_add_tail(&rx_rule->entry, rx_rules_list); + + goto out; + +op_rule_err: + mlx5_del_flow_rules(rx_rule->ip); + rx_rule->ip = NULL; +ip_rule_err: + kfree(rx_rule); +out: + kvfree(spec); + return err; +} + +static int mlx5_macsec_fs_add_roce_rule_tx(struct mlx5_macsec_fs *macsec_fs, u32 fs_id, u16 gid_idx, + const struct sockaddr *addr, + struct list_head *tx_rules_list) +{ + u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; + struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs; + struct mlx5_core_dev *mdev = macsec_fs->mdev; + struct mlx5_modify_hdr *modify_hdr = NULL; + struct mlx5_roce_macsec_tx_rule *tx_rule; + struct mlx5_flow_destination dest = {}; + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_handle *new_rule; + struct mlx5_flow_spec *spec; + int err = 0; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return -ENOMEM; + + tx_rule = kzalloc(sizeof(*tx_rule), GFP_KERNEL); + if (!tx_rule) { + err = -ENOMEM; + goto out; + } + + set_ipaddr_spec(addr, spec, false); + + MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET); + MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_A); + MLX5_SET(set_action_in, action, data, macsec_fs_set_tx_fs_id(fs_id)); + MLX5_SET(set_action_in, action, offset, 0); + MLX5_SET(set_action_in, action, length, 32); + + modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_RDMA_TX_MACSEC, + 1, action); + if (IS_ERR(modify_hdr)) { + err = PTR_ERR(modify_hdr); + mlx5_core_err(mdev, "Fail to alloc ROCE MACsec set modify_header_id err=%d\n", + err); + modify_hdr = NULL; + goto modify_hdr_err; + } + tx_rule->meta_modhdr = modify_hdr; + + flow_act.modify_hdr = modify_hdr; + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + + dest.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE; + dest.ft = tx_fs->tables.ft_crypto.t; + new_rule = mlx5_add_flow_rules(tx_fs->ft_rdma_tx, spec, &flow_act, &dest, 1); + if (IS_ERR(new_rule)) { + err = PTR_ERR(new_rule); + mlx5_core_err(mdev, "Failed to add ROCE TX rule, err=%d\n", err); + goto rule_err; + } + tx_rule->rule = new_rule; + tx_rule->gid_idx = gid_idx; + tx_rule->fs_id = fs_id; + list_add_tail(&tx_rule->entry, tx_rules_list); + + goto out; + +rule_err: + mlx5_modify_header_dealloc(mdev, tx_rule->meta_modhdr); +modify_hdr_err: + kfree(tx_rule); +out: + kvfree(spec); + return err; +} + +void mlx5_macsec_del_roce_rule(u16 gid_idx, struct mlx5_macsec_fs *macsec_fs, + struct list_head *tx_rules_list, struct list_head *rx_rules_list) +{ + struct mlx5_roce_macsec_rx_rule *rx_rule, *next_rx; + struct mlx5_roce_macsec_tx_rule *tx_rule, *next_tx; + + list_for_each_entry_safe(tx_rule, next_tx, tx_rules_list, entry) { + if (tx_rule->gid_idx == gid_idx) + macsec_fs_del_roce_rule_tx(macsec_fs->mdev, tx_rule); + } + + list_for_each_entry_safe(rx_rule, next_rx, rx_rules_list, entry) { + if (rx_rule->gid_idx == gid_idx) + macsec_fs_del_roce_rule_rx(rx_rule); + } +} +EXPORT_SYMBOL_GPL(mlx5_macsec_del_roce_rule); + +int mlx5_macsec_add_roce_rule(void *macdev, const struct sockaddr *addr, u16 gid_idx, + struct list_head *tx_rules_list, struct list_head *rx_rules_list, + struct mlx5_macsec_fs *macsec_fs) +{ + struct mlx5_macsec_device *iter, *macsec_device = NULL; + struct mlx5_core_dev *mdev = macsec_fs->mdev; + struct mlx5_fs_id *fs_id_iter; + unsigned long index = 0; + int err; + + list_for_each_entry(iter, &macsec_fs->macsec_devices_list, macsec_devices_list_entry) { + if (iter->macdev == macdev) { + macsec_device = iter; + break; + } + } + + if (!macsec_device) + return 0; + + xa_for_each(&macsec_device->tx_id_xa, index, fs_id_iter) { + err = mlx5_macsec_fs_add_roce_rule_tx(macsec_fs, fs_id_iter->id, gid_idx, addr, + tx_rules_list); + if (err) { + mlx5_core_err(mdev, "MACsec offload: Failed to add roce TX rule\n"); + goto out; + } + } + + index = 0; + xa_for_each(&macsec_device->rx_id_xa, index, fs_id_iter) { + err = mlx5_macsec_fs_add_roce_rule_rx(macsec_fs, fs_id_iter->id, gid_idx, addr, + rx_rules_list); + if (err) { + mlx5_core_err(mdev, "MACsec offload: Failed to add roce TX rule\n"); + goto out; + } + } + + return 0; +out: + mlx5_macsec_del_roce_rule(gid_idx, macsec_fs, tx_rules_list, rx_rules_list); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_macsec_add_roce_rule); + +void mlx5_macsec_add_roce_sa_rules(u32 fs_id, const struct sockaddr *addr, u16 gid_idx, + struct list_head *tx_rules_list, + struct list_head *rx_rules_list, + struct mlx5_macsec_fs *macsec_fs, bool is_tx) +{ + (is_tx) ? + mlx5_macsec_fs_add_roce_rule_tx(macsec_fs, fs_id, gid_idx, addr, + tx_rules_list) : + mlx5_macsec_fs_add_roce_rule_rx(macsec_fs, fs_id, gid_idx, addr, + rx_rules_list); +} +EXPORT_SYMBOL_GPL(mlx5_macsec_add_roce_sa_rules); + +void mlx5_macsec_del_roce_sa_rules(u32 fs_id, struct mlx5_macsec_fs *macsec_fs, + struct list_head *tx_rules_list, + struct list_head *rx_rules_list, bool is_tx) +{ + (is_tx) ? + macsec_fs_del_roce_rules_tx(macsec_fs, fs_id, tx_rules_list) : + macsec_fs_del_roce_rules_rx(macsec_fs, fs_id, rx_rules_list); +} +EXPORT_SYMBOL_GPL(mlx5_macsec_del_roce_sa_rules); + +void mlx5_macsec_fs_cleanup(struct mlx5_macsec_fs *macsec_fs) +{ + macsec_fs_rx_cleanup(macsec_fs); + macsec_fs_tx_cleanup(macsec_fs); + rhashtable_destroy(&macsec_fs->fs_id_hash); + rhashtable_destroy(&macsec_fs->sci_hash); + kfree(macsec_fs); +} + +struct mlx5_macsec_fs * +mlx5_macsec_fs_init(struct mlx5_core_dev *mdev) +{ + struct mlx5_macsec_fs *macsec_fs; + int err; + + macsec_fs = kzalloc(sizeof(*macsec_fs), GFP_KERNEL); + if (!macsec_fs) + return NULL; + + macsec_fs->mdev = mdev; + + err = rhashtable_init(&macsec_fs->sci_hash, &rhash_sci); + if (err) { + mlx5_core_err(mdev, "MACsec offload: Failed to init SCI hash table, err=%d\n", + err); + goto err_hash; + } + + err = rhashtable_init(&macsec_fs->fs_id_hash, &rhash_fs_id); + if (err) { + mlx5_core_err(mdev, "MACsec offload: Failed to init FS_ID hash table, err=%d\n", + err); + goto sci_hash_cleanup; + } + + err = macsec_fs_tx_init(macsec_fs); + if (err) { + mlx5_core_err(mdev, "MACsec offload: Failed to init tx_fs, err=%d\n", err); + goto fs_id_hash_cleanup; + } + + err = macsec_fs_rx_init(macsec_fs); + if (err) { + mlx5_core_err(mdev, "MACsec offload: Failed to init tx_fs, err=%d\n", err); + goto tx_cleanup; + } + + BLOCKING_INIT_NOTIFIER_HEAD(&mdev->macsec_nh); + + return macsec_fs; + +tx_cleanup: + macsec_fs_tx_cleanup(macsec_fs); +fs_id_hash_cleanup: + rhashtable_destroy(&macsec_fs->fs_id_hash); +sci_hash_cleanup: + rhashtable_destroy(&macsec_fs->sci_hash); +err_hash: + kfree(macsec_fs); + return NULL; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.h new file mode 100644 index 000000000000..34b80c3ef6a5 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#ifndef __MLX5_MACSEC_STEERING_H__ +#define __MLX5_MACSEC_STEERING_H__ + +#ifdef CONFIG_MLX5_MACSEC + +/* Bit31 - 30: MACsec marker, Bit15-0: MACsec id */ +#define MLX5_MACEC_RX_FS_ID_MAX USHRT_MAX /* Must be power of two */ +#define MLX5_MACSEC_RX_FS_ID_MASK MLX5_MACEC_RX_FS_ID_MAX +#define MLX5_MACSEC_METADATA_MARKER(metadata) ((((metadata) >> 30) & 0x3) == 0x1) +#define MLX5_MACSEC_RX_METADAT_HANDLE(metadata) ((metadata) & MLX5_MACSEC_RX_FS_ID_MASK) + +#define MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES 16 + +struct mlx5_macsec_fs; +union mlx5_macsec_rule; + +struct mlx5_macsec_rule_attrs { + sci_t sci; + u32 macsec_obj_id; + u8 assoc_num; + int action; +}; + +struct mlx5_macsec_stats { + u64 macsec_rx_pkts; + u64 macsec_rx_bytes; + u64 macsec_rx_pkts_drop; + u64 macsec_rx_bytes_drop; + u64 macsec_tx_pkts; + u64 macsec_tx_bytes; + u64 macsec_tx_pkts_drop; + u64 macsec_tx_bytes_drop; +}; + +enum mlx5_macsec_action { + MLX5_ACCEL_MACSEC_ACTION_ENCRYPT, + MLX5_ACCEL_MACSEC_ACTION_DECRYPT, +}; + +void mlx5_macsec_fs_cleanup(struct mlx5_macsec_fs *macsec_fs); + +struct mlx5_macsec_fs * +mlx5_macsec_fs_init(struct mlx5_core_dev *mdev); + +union mlx5_macsec_rule * +mlx5_macsec_fs_add_rule(struct mlx5_macsec_fs *macsec_fs, + const struct macsec_context *ctx, + struct mlx5_macsec_rule_attrs *attrs, + u32 *sa_fs_id); + +void mlx5_macsec_fs_del_rule(struct mlx5_macsec_fs *macsec_fs, + union mlx5_macsec_rule *macsec_rule, + int action, void *macdev, u32 sa_fs_id); + +void mlx5_macsec_fs_get_stats_fill(struct mlx5_macsec_fs *macsec_fs, void *macsec_stats); +struct mlx5_macsec_stats *mlx5_macsec_fs_get_stats(struct mlx5_macsec_fs *macsec_fs); +u32 mlx5_macsec_fs_get_fs_id_from_hashtable(struct mlx5_macsec_fs *macsec_fs, sci_t *sci); + +#endif + +#endif /* __MLX5_MACSEC_STEERING_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index 03b1c5a97826..c6ff1fa0e04d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -68,6 +68,11 @@ static void dwmac4_core_init(struct mac_device_info *hw, init_waitqueue_head(&priv->tstamp_busy_wait); } +static void dwmac4_phylink_get_caps(struct stmmac_priv *priv) +{ + priv->phylink_config.mac_capabilities |= MAC_2500FD; +} + static void dwmac4_rx_queue_enable(struct mac_device_info *hw, u8 mode, u32 queue) { @@ -1131,6 +1136,7 @@ static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no, const struct stmmac_ops dwmac4_ops = { .core_init = dwmac4_core_init, + .phylink_get_caps = dwmac4_phylink_get_caps, .set_mac = stmmac_set_mac, .rx_ipc = dwmac4_rx_ipc_enable, .rx_queue_enable = dwmac4_rx_queue_enable, @@ -1173,6 +1179,7 @@ const struct stmmac_ops dwmac4_ops = { const struct stmmac_ops dwmac410_ops = { .core_init = dwmac4_core_init, + .phylink_get_caps = dwmac4_phylink_get_caps, .set_mac = stmmac_dwmac4_set_mac, .rx_ipc = dwmac4_rx_ipc_enable, .rx_queue_enable = dwmac4_rx_queue_enable, @@ -1221,6 +1228,7 @@ const struct stmmac_ops dwmac410_ops = { const struct stmmac_ops dwmac510_ops = { .core_init = dwmac4_core_init, + .phylink_get_caps = dwmac4_phylink_get_caps, .set_mac = stmmac_dwmac4_set_mac, .rx_ipc = dwmac4_rx_ipc_enable, .rx_queue_enable = dwmac4_rx_queue_enable, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c index 34e1b0c3f346..f352be269deb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c @@ -47,6 +47,14 @@ static void dwxgmac2_core_init(struct mac_device_info *hw, writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN); } +static void xgmac_phylink_get_caps(struct stmmac_priv *priv) +{ + priv->phylink_config.mac_capabilities |= MAC_2500FD | MAC_5000FD | + MAC_10000FD | MAC_25000FD | + MAC_40000FD | MAC_50000FD | + MAC_100000FD; +} + static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable) { u32 tx = readl(ioaddr + XGMAC_TX_CONFIG); @@ -1490,6 +1498,7 @@ static void dwxgmac3_fpe_configure(void __iomem *ioaddr, u32 num_txq, const struct stmmac_ops dwxgmac210_ops = { .core_init = dwxgmac2_core_init, + .phylink_get_caps = xgmac_phylink_get_caps, .set_mac = dwxgmac2_set_mac, .rx_ipc = dwxgmac2_rx_ipc, .rx_queue_enable = dwxgmac2_rx_queue_enable, @@ -1551,6 +1560,7 @@ static void dwxlgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode, const struct stmmac_ops dwxlgmac2_ops = { .core_init = dwxgmac2_core_init, + .phylink_get_caps = xgmac_phylink_get_caps, .set_mac = dwxgmac2_set_mac, .rx_ipc = dwxgmac2_rx_ipc, .rx_queue_enable = dwxlgmac2_rx_queue_enable, diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index 238f17c50a1e..b95d3e137813 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -300,6 +300,8 @@ struct stmmac_est; struct stmmac_ops { /* MAC core initialization */ void (*core_init)(struct mac_device_info *hw, struct net_device *dev); + /* Get phylink capabilities */ + void (*phylink_get_caps)(struct stmmac_priv *priv); /* Enable the MAC RX/TX */ void (*set_mac)(void __iomem *ioaddr, bool enable); /* Enable and verify that the IPC module is supported */ @@ -419,6 +421,8 @@ struct stmmac_ops { #define stmmac_core_init(__priv, __args...) \ stmmac_do_void_callback(__priv, mac, core_init, __args) +#define stmmac_mac_phylink_get_caps(__priv) \ + stmmac_do_void_callback(__priv, mac, phylink_get_caps, __priv) #define stmmac_mac_set(__priv, __args...) \ stmmac_do_void_callback(__priv, mac, set_mac, __args) #define stmmac_rx_ipc(__priv, __args...) \ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 7a9bbcf03ea5..33ca5c50bdcd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1153,7 +1153,7 @@ static int stmmac_init_phy(struct net_device *dev) if (!phylink_expects_phy(priv->phylink)) return 0; - fwnode = of_fwnode_handle(priv->plat->phylink_node); + fwnode = priv->plat->port_node; if (!fwnode) fwnode = dev_fwnode(priv->device); @@ -1199,21 +1199,21 @@ static int stmmac_init_phy(struct net_device *dev) static int stmmac_phy_setup(struct stmmac_priv *priv) { - struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data; - struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node); - int max_speed = priv->plat->max_speed; + struct stmmac_mdio_bus_data *mdio_bus_data; int mode = priv->plat->phy_interface; + struct fwnode_handle *fwnode; struct phylink *phylink; + int max_speed; priv->phylink_config.dev = &priv->dev->dev; priv->phylink_config.type = PHYLINK_NETDEV; - if (priv->plat->mdio_bus_data) + priv->phylink_config.mac_managed_pm = true; + + mdio_bus_data = priv->plat->mdio_bus_data; + if (mdio_bus_data) priv->phylink_config.ovr_an_inband = mdio_bus_data->xpcs_an_inband; - if (!fwnode) - fwnode = dev_fwnode(priv->device); - /* Set the platform/firmware specified interface mode */ __set_bit(mode, priv->phylink_config.supported_interfaces); @@ -1223,36 +1223,24 @@ static int stmmac_phy_setup(struct stmmac_priv *priv) priv->phylink_config.supported_interfaces); priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | - MAC_10 | MAC_100; - - if (!max_speed || max_speed >= 1000) - priv->phylink_config.mac_capabilities |= MAC_1000; - - if (priv->plat->has_gmac4) { - if (!max_speed || max_speed >= 2500) - priv->phylink_config.mac_capabilities |= MAC_2500FD; - } else if (priv->plat->has_xgmac) { - if (!max_speed || max_speed >= 2500) - priv->phylink_config.mac_capabilities |= MAC_2500FD; - if (!max_speed || max_speed >= 5000) - priv->phylink_config.mac_capabilities |= MAC_5000FD; - if (!max_speed || max_speed >= 10000) - priv->phylink_config.mac_capabilities |= MAC_10000FD; - if (!max_speed || max_speed >= 25000) - priv->phylink_config.mac_capabilities |= MAC_25000FD; - if (!max_speed || max_speed >= 40000) - priv->phylink_config.mac_capabilities |= MAC_40000FD; - if (!max_speed || max_speed >= 50000) - priv->phylink_config.mac_capabilities |= MAC_50000FD; - if (!max_speed || max_speed >= 100000) - priv->phylink_config.mac_capabilities |= MAC_100000FD; - } + MAC_10FD | MAC_100FD | + MAC_1000FD; /* Half-Duplex can only work with single queue */ - if (priv->plat->tx_queues_to_use > 1) - priv->phylink_config.mac_capabilities &= - ~(MAC_10HD | MAC_100HD | MAC_1000HD); - priv->phylink_config.mac_managed_pm = true; + if (priv->plat->tx_queues_to_use <= 1) + priv->phylink_config.mac_capabilities |= MAC_10HD | MAC_100HD | + MAC_1000HD; + + /* Get the MAC specific capabilities */ + stmmac_mac_phylink_get_caps(priv); + + max_speed = priv->plat->max_speed; + if (max_speed) + phylink_limit_mac_speed(&priv->phylink_config, max_speed); + + fwnode = priv->plat->port_node; + if (!fwnode) + fwnode = dev_fwnode(priv->device); phylink = phylink_create(&priv->phylink_config, fwnode, mode, &stmmac_phylink_mac_ops); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index dd9e2fec5328..fa9e7e7040b9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -533,11 +533,11 @@ int stmmac_mdio_register(struct net_device *ndev) int err = 0; struct mii_bus *new_bus; struct stmmac_priv *priv = netdev_priv(ndev); - struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node); struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data; struct device_node *mdio_node = priv->plat->mdio_node; struct device *dev = ndev->dev.parent; struct fwnode_handle *fixed_node; + struct fwnode_handle *fwnode; int addr, found, max_addr; if (!mdio_bus_data) @@ -601,6 +601,7 @@ int stmmac_mdio_register(struct net_device *ndev) stmmac_xgmac2_mdio_read_c45(new_bus, 0, 0, 0); /* If fixed-link is set, skip PHY scanning */ + fwnode = priv->plat->port_node; if (!fwnode) fwnode = dev_fwnode(priv->device); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index be8e79c7aa34..ff330423ee66 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -428,7 +428,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) plat->phy_node = of_parse_phandle(np, "phy-handle", 0); /* PHYLINK automatically parses the phy-handle property */ - plat->phylink_node = np; + plat->port_node = of_fwnode_handle(np); /* Get max speed of operation from device tree */ of_property_read_u32(np, "max-speed", &plat->max_speed); diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig index 39596cd13539..23cd610bd376 100644 --- a/drivers/net/ethernet/wangxun/Kconfig +++ b/drivers/net/ethernet/wangxun/Kconfig @@ -41,6 +41,7 @@ config TXGBE tristate "Wangxun(R) 10GbE PCI Express adapters support" depends on PCI depends on COMMON_CLK + select MARVELL_10G_PHY select REGMAP select I2C select I2C_DESIGNWARE_PLATFORM diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h index 1de88a33a698..c5cbd177ef62 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h @@ -205,6 +205,8 @@ #define WX_TSC_CTL 0x1D000 #define WX_TSC_CTL_TX_DIS BIT(1) #define WX_TSC_CTL_TSEC_DIS BIT(0) +#define WX_TSC_ST 0x1D004 +#define WX_TSC_ST_SECTX_RDY BIT(0) #define WX_TSC_BUF_AE 0x1D00C #define WX_TSC_BUF_AE_THR GENMASK(9, 0) @@ -231,6 +233,24 @@ #define WX_MAC_WDG_TIMEOUT 0x1100C #define WX_MAC_RX_FLOW_CTRL 0x11090 #define WX_MAC_RX_FLOW_CTRL_RFE BIT(0) /* receive fc enable */ +/* MDIO Registers */ +#define WX_MSCA 0x11200 +#define WX_MSCA_RA(v) FIELD_PREP(U16_MAX, v) +#define WX_MSCA_PA(v) FIELD_PREP(GENMASK(20, 16), v) +#define WX_MSCA_DA(v) FIELD_PREP(GENMASK(25, 21), v) +#define WX_MSCC 0x11204 +#define WX_MSCC_CMD(v) FIELD_PREP(GENMASK(17, 16), v) + +enum WX_MSCA_CMD_value { + WX_MSCA_CMD_RSV = 0, + WX_MSCA_CMD_WRITE, + WX_MSCA_CMD_POST_READ, + WX_MSCA_CMD_READ, +}; + +#define WX_MSCC_SADDR BIT(18) +#define WX_MSCC_BUSY BIT(22) +#define WX_MDIO_CLK(v) FIELD_PREP(GENMASK(21, 19), v) #define WX_MMC_CONTROL 0x11800 #define WX_MMC_CONTROL_RSTONRD BIT(2) /* reset on read */ @@ -580,6 +600,13 @@ enum wx_mac_type { wx_mac_em }; +enum sp_media_type { + sp_media_unknown = 0, + sp_media_fiber, + sp_media_copper, + sp_media_backplane +}; + enum em_mac_type { em_mac_type_unknown = 0, em_mac_type_mdi, @@ -827,6 +854,7 @@ struct wx { struct wx_bus_info bus; struct wx_mac_info mac; enum em_mac_type mac_type; + enum sp_media_type media_type; struct wx_eeprom_info eeprom; struct wx_addr_filter_info addr_ctrl; struct wx_mac_addr *mac_table; diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c index fe20f02ecb3a..591f5b7b6da6 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c @@ -37,24 +37,24 @@ static int ngbe_phy_read_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regn wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0xF); /* setup and write the address cycle command */ - command = NGBE_MSCA_RA(regnum) | - NGBE_MSCA_PA(phy_addr) | - NGBE_MSCA_DA(device_type); - wr32(wx, NGBE_MSCA, command); - command = NGBE_MSCC_CMD(NGBE_MSCA_CMD_READ) | - NGBE_MSCC_BUSY | - NGBE_MDIO_CLK(6); - wr32(wx, NGBE_MSCC, command); + command = WX_MSCA_RA(regnum) | + WX_MSCA_PA(phy_addr) | + WX_MSCA_DA(device_type); + wr32(wx, WX_MSCA, command); + command = WX_MSCC_CMD(WX_MSCA_CMD_READ) | + WX_MSCC_BUSY | + WX_MDIO_CLK(6); + wr32(wx, WX_MSCC, command); /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & NGBE_MSCC_BUSY), 1000, - 100000, false, wx, NGBE_MSCC); + ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, + 100000, false, wx, WX_MSCC); if (ret) { wx_err(wx, "Mdio read c22 command did not complete.\n"); return ret; } - return (u16)rd32(wx, NGBE_MSCC); + return (u16)rd32(wx, WX_MSCC); } static int ngbe_phy_write_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum, u16 value) @@ -65,19 +65,19 @@ static int ngbe_phy_write_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int reg wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0xF); /* setup and write the address cycle command */ - command = NGBE_MSCA_RA(regnum) | - NGBE_MSCA_PA(phy_addr) | - NGBE_MSCA_DA(device_type); - wr32(wx, NGBE_MSCA, command); + command = WX_MSCA_RA(regnum) | + WX_MSCA_PA(phy_addr) | + WX_MSCA_DA(device_type); + wr32(wx, WX_MSCA, command); command = value | - NGBE_MSCC_CMD(NGBE_MSCA_CMD_WRITE) | - NGBE_MSCC_BUSY | - NGBE_MDIO_CLK(6); - wr32(wx, NGBE_MSCC, command); + WX_MSCC_CMD(WX_MSCA_CMD_WRITE) | + WX_MSCC_BUSY | + WX_MDIO_CLK(6); + wr32(wx, WX_MSCC, command); /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & NGBE_MSCC_BUSY), 1000, - 100000, false, wx, NGBE_MSCC); + ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, + 100000, false, wx, WX_MSCC); if (ret) wx_err(wx, "Mdio write c22 command did not complete.\n"); @@ -92,24 +92,24 @@ static int ngbe_phy_read_reg_mdi_c45(struct mii_bus *bus, int phy_addr, int devn wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0x0); /* setup and write the address cycle command */ - command = NGBE_MSCA_RA(regnum) | - NGBE_MSCA_PA(phy_addr) | - NGBE_MSCA_DA(devnum); - wr32(wx, NGBE_MSCA, command); - command = NGBE_MSCC_CMD(NGBE_MSCA_CMD_READ) | - NGBE_MSCC_BUSY | - NGBE_MDIO_CLK(6); - wr32(wx, NGBE_MSCC, command); + command = WX_MSCA_RA(regnum) | + WX_MSCA_PA(phy_addr) | + WX_MSCA_DA(devnum); + wr32(wx, WX_MSCA, command); + command = WX_MSCC_CMD(WX_MSCA_CMD_READ) | + WX_MSCC_BUSY | + WX_MDIO_CLK(6); + wr32(wx, WX_MSCC, command); /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & NGBE_MSCC_BUSY), 1000, - 100000, false, wx, NGBE_MSCC); + ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, + 100000, false, wx, WX_MSCC); if (ret) { wx_err(wx, "Mdio read c45 command did not complete.\n"); return ret; } - return (u16)rd32(wx, NGBE_MSCC); + return (u16)rd32(wx, WX_MSCC); } static int ngbe_phy_write_reg_mdi_c45(struct mii_bus *bus, int phy_addr, @@ -121,19 +121,19 @@ static int ngbe_phy_write_reg_mdi_c45(struct mii_bus *bus, int phy_addr, wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0x0); /* setup and write the address cycle command */ - command = NGBE_MSCA_RA(regnum) | - NGBE_MSCA_PA(phy_addr) | - NGBE_MSCA_DA(devnum); - wr32(wx, NGBE_MSCA, command); + command = WX_MSCA_RA(regnum) | + WX_MSCA_PA(phy_addr) | + WX_MSCA_DA(devnum); + wr32(wx, WX_MSCA, command); command = value | - NGBE_MSCC_CMD(NGBE_MSCA_CMD_WRITE) | - NGBE_MSCC_BUSY | - NGBE_MDIO_CLK(6); - wr32(wx, NGBE_MSCC, command); + WX_MSCC_CMD(WX_MSCA_CMD_WRITE) | + WX_MSCC_BUSY | + WX_MDIO_CLK(6); + wr32(wx, WX_MSCC, command); /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & NGBE_MSCC_BUSY), 1000, - 100000, false, wx, NGBE_MSCC); + ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, + 100000, false, wx, WX_MSCC); if (ret) wx_err(wx, "Mdio write c45 command did not complete.\n"); diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h index b70eca397b67..72c8cd2d5575 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h @@ -59,25 +59,6 @@ #define NGBE_EEPROM_VERSION_L 0x1D #define NGBE_EEPROM_VERSION_H 0x1E -/* mdio access */ -#define NGBE_MSCA 0x11200 -#define NGBE_MSCA_RA(v) FIELD_PREP(U16_MAX, v) -#define NGBE_MSCA_PA(v) FIELD_PREP(GENMASK(20, 16), v) -#define NGBE_MSCA_DA(v) FIELD_PREP(GENMASK(25, 21), v) -#define NGBE_MSCC 0x11204 -#define NGBE_MSCC_CMD(v) FIELD_PREP(GENMASK(17, 16), v) - -enum NGBE_MSCA_CMD_value { - NGBE_MSCA_CMD_RSV = 0, - NGBE_MSCA_CMD_WRITE, - NGBE_MSCA_CMD_POST_READ, - NGBE_MSCA_CMD_READ, -}; - -#define NGBE_MSCC_SADDR BIT(18) -#define NGBE_MSCC_BUSY BIT(22) -#define NGBE_MDIO_CLK(v) FIELD_PREP(GENMASK(21, 19), v) - /* Media-dependent registers. */ #define NGBE_MDIO_CLAUSE_SELECT 0x11220 diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c index 6e130d1f7a7b..372745250270 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c @@ -14,6 +14,34 @@ #include "txgbe_hw.h" /** + * txgbe_disable_sec_tx_path - Stops the transmit data path + * @wx: pointer to hardware structure + * + * Stops the transmit data path and waits for the HW to internally empty + * the tx security block + **/ +int txgbe_disable_sec_tx_path(struct wx *wx) +{ + int val; + + wr32m(wx, WX_TSC_CTL, WX_TSC_CTL_TX_DIS, WX_TSC_CTL_TX_DIS); + return read_poll_timeout(rd32, val, val & WX_TSC_ST_SECTX_RDY, + 1000, 20000, false, wx, WX_TSC_ST); +} + +/** + * txgbe_enable_sec_tx_path - Enables the transmit data path + * @wx: pointer to hardware structure + * + * Enables the transmit data path. + **/ +void txgbe_enable_sec_tx_path(struct wx *wx) +{ + wr32m(wx, WX_TSC_CTL, WX_TSC_CTL_TX_DIS, 0); + WX_WRITE_FLUSH(wx); +} + +/** * txgbe_init_thermal_sensor_thresh - Inits thermal sensor thresholds * @wx: pointer to hardware structure * @@ -257,17 +285,20 @@ static void txgbe_reset_misc(struct wx *wx) int txgbe_reset_hw(struct wx *wx) { int status; - u32 val; /* Call adapter stop to disable tx/rx and clear interrupts */ status = wx_stop_adapter(wx); if (status != 0) return status; - val = WX_MIS_RST_LAN_RST(wx->bus.func); - wr32(wx, WX_MIS_RST, val | rd32(wx, WX_MIS_RST)); - WX_WRITE_FLUSH(wx); - usleep_range(10, 100); + if (wx->media_type != sp_media_copper) { + u32 val; + + val = WX_MIS_RST_LAN_RST(wx->bus.func); + wr32(wx, WX_MIS_RST, val | rd32(wx, WX_MIS_RST)); + WX_WRITE_FLUSH(wx); + usleep_range(10, 100); + } status = wx_check_flash_load(wx, TXGBE_SPI_ILDR_STATUS_LAN_SW_RST(wx->bus.func)); if (status != 0) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h index e82f65dff8a6..abc729eb187a 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h @@ -4,6 +4,8 @@ #ifndef _TXGBE_HW_H_ #define _TXGBE_HW_H_ +int txgbe_disable_sec_tx_path(struct wx *wx); +void txgbe_enable_sec_tx_path(struct wx *wx); int txgbe_read_pba_string(struct wx *wx, u8 *pba_num, u32 pba_num_size); int txgbe_validate_eeprom_checksum(struct wx *wx, u16 *checksum_val); int txgbe_reset_hw(struct wx *wx); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index 46eba6d6188b..5c3aed516ac2 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -301,6 +301,49 @@ static void txgbe_down(struct wx *wx) } /** + * txgbe_init_type_code - Initialize the shared code + * @wx: pointer to hardware structure + **/ +static void txgbe_init_type_code(struct wx *wx) +{ + u8 device_type = wx->subsystem_device_id & 0xF0; + + switch (wx->device_id) { + case TXGBE_DEV_ID_SP1000: + case TXGBE_DEV_ID_WX1820: + wx->mac.type = wx_mac_sp; + break; + default: + wx->mac.type = wx_mac_unknown; + break; + } + + switch (device_type) { + case TXGBE_ID_SFP: + wx->media_type = sp_media_fiber; + break; + case TXGBE_ID_XAUI: + case TXGBE_ID_SGMII: + wx->media_type = sp_media_copper; + break; + case TXGBE_ID_KR_KX_KX4: + case TXGBE_ID_MAC_XAUI: + case TXGBE_ID_MAC_SGMII: + wx->media_type = sp_media_backplane; + break; + case TXGBE_ID_SFI_XAUI: + if (wx->bus.func == 0) + wx->media_type = sp_media_fiber; + else + wx->media_type = sp_media_copper; + break; + default: + wx->media_type = sp_media_unknown; + break; + } +} + +/** * txgbe_sw_init - Initialize general software structures (struct wx) * @wx: board private structure to initialize **/ @@ -324,15 +367,7 @@ static int txgbe_sw_init(struct wx *wx) return err; } - switch (wx->device_id) { - case TXGBE_DEV_ID_SP1000: - case TXGBE_DEV_ID_WX1820: - wx->mac.type = wx_mac_sp; - break; - default: - wx->mac.type = wx_mac_unknown; - break; - } + txgbe_init_type_code(wx); /* Set common capability flags and settings */ wx->max_q_vectors = TXGBE_MAX_MSIX_VECTORS; @@ -663,6 +698,9 @@ static int txgbe_probe(struct pci_dev *pdev, "0x%08x", etrack_id); } + if (etrack_id < 0x20010) + dev_warn(&pdev->dev, "Please upgrade the firmware to 0x20010 or above.\n"); + txgbe = devm_kzalloc(&pdev->dev, sizeof(*txgbe), GFP_KERNEL); if (!txgbe) { err = -ENOMEM; diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c index 819d1db34643..4159c84035fd 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c @@ -18,6 +18,7 @@ #include "../libwx/wx_hw.h" #include "txgbe_type.h" #include "txgbe_phy.h" +#include "txgbe_hw.h" static int txgbe_swnodes_register(struct txgbe *txgbe) { @@ -160,7 +161,10 @@ static struct phylink_pcs *txgbe_phylink_mac_select(struct phylink_config *confi { struct txgbe *txgbe = netdev_to_txgbe(to_net_dev(config->dev)); - return &txgbe->xpcs->pcs; + if (interface == PHY_INTERFACE_MODE_10GBASER) + return &txgbe->xpcs->pcs; + + return NULL; } static void txgbe_mac_config(struct phylink_config *config, unsigned int mode, @@ -210,8 +214,32 @@ static void txgbe_mac_link_up(struct phylink_config *config, wr32(wx, WX_MAC_WDG_TIMEOUT, wdg); } +static int txgbe_mac_prepare(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) +{ + struct wx *wx = netdev_priv(to_net_dev(config->dev)); + + wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0); + wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, 0); + + return txgbe_disable_sec_tx_path(wx); +} + +static int txgbe_mac_finish(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) +{ + struct wx *wx = netdev_priv(to_net_dev(config->dev)); + + txgbe_enable_sec_tx_path(wx); + wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, WX_MAC_RX_CFG_RE); + + return 0; +} + static const struct phylink_mac_ops txgbe_mac_ops = { .mac_select_pcs = txgbe_phylink_mac_select, + .mac_prepare = txgbe_mac_prepare, + .mac_finish = txgbe_mac_finish, .mac_config = txgbe_mac_config, .mac_link_down = txgbe_mac_link_down, .mac_link_up = txgbe_mac_link_up, @@ -219,8 +247,8 @@ static const struct phylink_mac_ops txgbe_mac_ops = { static int txgbe_phylink_init(struct txgbe *txgbe) { + struct fwnode_handle *fwnode = NULL; struct phylink_config *config; - struct fwnode_handle *fwnode; struct wx *wx = txgbe->wx; phy_interface_t phy_mode; struct phylink *phylink; @@ -231,14 +259,34 @@ static int txgbe_phylink_init(struct txgbe *txgbe) config->dev = &wx->netdev->dev; config->type = PHYLINK_NETDEV; - config->mac_capabilities = MAC_10000FD | MAC_1000FD | MAC_SYM_PAUSE | MAC_ASYM_PAUSE; - phy_mode = PHY_INTERFACE_MODE_10GBASER; - __set_bit(PHY_INTERFACE_MODE_10GBASER, config->supported_interfaces); - fwnode = software_node_fwnode(txgbe->nodes.group[SWNODE_PHYLINK]); + config->mac_capabilities = MAC_10000FD | MAC_1000FD | MAC_100FD | + MAC_SYM_PAUSE | MAC_ASYM_PAUSE; + + if (wx->media_type == sp_media_copper) { + phy_mode = PHY_INTERFACE_MODE_XAUI; + __set_bit(PHY_INTERFACE_MODE_XAUI, config->supported_interfaces); + } else { + phy_mode = PHY_INTERFACE_MODE_10GBASER; + fwnode = software_node_fwnode(txgbe->nodes.group[SWNODE_PHYLINK]); + __set_bit(PHY_INTERFACE_MODE_10GBASER, config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_1000BASEX, config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_SGMII, config->supported_interfaces); + } + phylink = phylink_create(config, fwnode, phy_mode, &txgbe_mac_ops); if (IS_ERR(phylink)) return PTR_ERR(phylink); + if (wx->phydev) { + int ret; + + ret = phylink_connect_phy(phylink, wx->phydev); + if (ret) { + phylink_destroy(phylink); + return ret; + } + } + txgbe->phylink = phylink; return 0; @@ -431,7 +479,8 @@ static void txgbe_irq_handler(struct irq_desc *desc) chained_irq_exit(chip, desc); - if (eicr & (TXGBE_PX_MISC_ETH_LK | TXGBE_PX_MISC_ETH_LKDN)) { + if (eicr & (TXGBE_PX_MISC_ETH_LK | TXGBE_PX_MISC_ETH_LKDN | + TXGBE_PX_MISC_ETH_AN)) { u32 reg = rd32(wx, TXGBE_CFG_PORT_ST); phylink_mac_change(txgbe->phylink, !!(reg & TXGBE_CFG_PORT_ST_LINK_UP)); @@ -598,10 +647,117 @@ static int txgbe_sfp_register(struct txgbe *txgbe) return 0; } +static int txgbe_phy_read(struct mii_bus *bus, int phy_addr, + int devnum, int regnum) +{ + struct wx *wx = bus->priv; + u32 val, command; + int ret; + + /* setup and write the address cycle command */ + command = WX_MSCA_RA(regnum) | + WX_MSCA_PA(phy_addr) | + WX_MSCA_DA(devnum); + wr32(wx, WX_MSCA, command); + + command = WX_MSCC_CMD(WX_MSCA_CMD_READ) | WX_MSCC_BUSY; + wr32(wx, WX_MSCC, command); + + /* wait to complete */ + ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, + 100000, false, wx, WX_MSCC); + if (ret) { + wx_err(wx, "Mdio read c45 command did not complete.\n"); + return ret; + } + + return (u16)rd32(wx, WX_MSCC); +} + +static int txgbe_phy_write(struct mii_bus *bus, int phy_addr, + int devnum, int regnum, u16 value) +{ + struct wx *wx = bus->priv; + int ret, command; + u16 val; + + /* setup and write the address cycle command */ + command = WX_MSCA_RA(regnum) | + WX_MSCA_PA(phy_addr) | + WX_MSCA_DA(devnum); + wr32(wx, WX_MSCA, command); + + command = value | WX_MSCC_CMD(WX_MSCA_CMD_WRITE) | WX_MSCC_BUSY; + wr32(wx, WX_MSCC, command); + + /* wait to complete */ + ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, + 100000, false, wx, WX_MSCC); + if (ret) + wx_err(wx, "Mdio write c45 command did not complete.\n"); + + return ret; +} + +static int txgbe_ext_phy_init(struct txgbe *txgbe) +{ + struct phy_device *phydev; + struct mii_bus *mii_bus; + struct pci_dev *pdev; + struct wx *wx; + int ret = 0; + + wx = txgbe->wx; + pdev = wx->pdev; + + mii_bus = devm_mdiobus_alloc(&pdev->dev); + if (!mii_bus) + return -ENOMEM; + + mii_bus->name = "txgbe_mii_bus"; + mii_bus->read_c45 = &txgbe_phy_read; + mii_bus->write_c45 = &txgbe_phy_write; + mii_bus->parent = &pdev->dev; + mii_bus->phy_mask = GENMASK(31, 1); + mii_bus->priv = wx; + snprintf(mii_bus->id, MII_BUS_ID_SIZE, "txgbe-%x", + (pdev->bus->number << 8) | pdev->devfn); + + ret = devm_mdiobus_register(&pdev->dev, mii_bus); + if (ret) { + wx_err(wx, "failed to register MDIO bus: %d\n", ret); + return ret; + } + + phydev = phy_find_first(mii_bus); + if (!phydev) { + wx_err(wx, "no PHY found\n"); + return -ENODEV; + } + + phy_attached_info(phydev); + + wx->link = 0; + wx->speed = 0; + wx->duplex = 0; + wx->phydev = phydev; + + ret = txgbe_phylink_init(txgbe); + if (ret) { + wx_err(wx, "failed to init phylink: %d\n", ret); + return ret; + } + + return 0; +} + int txgbe_init_phy(struct txgbe *txgbe) { int ret; + if (txgbe->wx->media_type == sp_media_copper) + return txgbe_ext_phy_init(txgbe); + ret = txgbe_swnodes_register(txgbe); if (ret) { wx_err(txgbe->wx, "failed to register software nodes\n"); @@ -663,6 +819,12 @@ err_unregister_swnode: void txgbe_remove_phy(struct txgbe *txgbe) { + if (txgbe->wx->media_type == sp_media_copper) { + phylink_disconnect_phy(txgbe->phylink); + phylink_destroy(txgbe->phylink); + return; + } + platform_device_unregister(txgbe->sfp_dev); platform_device_unregister(txgbe->i2c_dev); clkdev_drop(txgbe->clock); diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index ae60817ec5c2..c3f30663070f 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -4229,6 +4229,18 @@ static struct net *macsec_get_link_net(const struct net_device *dev) return dev_net(macsec_priv(dev)->real_dev); } +struct net_device *macsec_get_real_dev(const struct net_device *dev) +{ + return macsec_priv(dev)->real_dev; +} +EXPORT_SYMBOL_GPL(macsec_get_real_dev); + +bool macsec_netdev_is_offloaded(struct net_device *dev) +{ + return macsec_is_offloaded(macsec_priv(dev)); +} +EXPORT_SYMBOL_GPL(macsec_netdev_is_offloaded); + static size_t macsec_get_size(const struct net_device *dev) { return nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */ diff --git a/drivers/net/pcs/Makefile b/drivers/net/pcs/Makefile index ea662a7989b2..fb1694192ae6 100644 --- a/drivers/net/pcs/Makefile +++ b/drivers/net/pcs/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 # Makefile for Linux PCS drivers -pcs_xpcs-$(CONFIG_PCS_XPCS) := pcs-xpcs.o pcs-xpcs-nxp.o +pcs_xpcs-$(CONFIG_PCS_XPCS) := pcs-xpcs.o pcs-xpcs-nxp.o pcs-xpcs-wx.o obj-$(CONFIG_PCS_XPCS) += pcs_xpcs.o obj-$(CONFIG_PCS_LYNX) += pcs-lynx.o diff --git a/drivers/net/pcs/pcs-xpcs-wx.c b/drivers/net/pcs/pcs-xpcs-wx.c new file mode 100644 index 000000000000..19c75886f070 --- /dev/null +++ b/drivers/net/pcs/pcs-xpcs-wx.c @@ -0,0 +1,209 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */ + +#include <linux/pcs/pcs-xpcs.h> +#include <linux/mdio.h> +#include "pcs-xpcs.h" + +/* VR_XS_PMA_MMD */ +#define TXGBE_PMA_MMD 0x8020 +#define TXGBE_TX_GENCTL1 0x11 +#define TXGBE_TX_GENCTL1_VBOOST_LVL GENMASK(10, 8) +#define TXGBE_TX_GENCTL1_VBOOST_EN0 BIT(4) +#define TXGBE_TX_GEN_CTL2 0x12 +#define TXGBE_TX_GEN_CTL2_TX0_WIDTH(v) FIELD_PREP(GENMASK(9, 8), v) +#define TXGBE_TX_RATE_CTL 0x14 +#define TXGBE_TX_RATE_CTL_TX0_RATE(v) FIELD_PREP(GENMASK(2, 0), v) +#define TXGBE_RX_GEN_CTL2 0x32 +#define TXGBE_RX_GEN_CTL2_RX0_WIDTH(v) FIELD_PREP(GENMASK(9, 8), v) +#define TXGBE_RX_GEN_CTL3 0x33 +#define TXGBE_RX_GEN_CTL3_LOS_TRSHLD0 GENMASK(2, 0) +#define TXGBE_RX_RATE_CTL 0x34 +#define TXGBE_RX_RATE_CTL_RX0_RATE(v) FIELD_PREP(GENMASK(1, 0), v) +#define TXGBE_RX_EQ_ATTN_CTL 0x37 +#define TXGBE_RX_EQ_ATTN_LVL0 GENMASK(2, 0) +#define TXGBE_RX_EQ_CTL0 0x38 +#define TXGBE_RX_EQ_CTL0_VGA1_GAIN(v) FIELD_PREP(GENMASK(15, 12), v) +#define TXGBE_RX_EQ_CTL0_VGA2_GAIN(v) FIELD_PREP(GENMASK(11, 8), v) +#define TXGBE_RX_EQ_CTL0_CTLE_POLE(v) FIELD_PREP(GENMASK(7, 5), v) +#define TXGBE_RX_EQ_CTL0_CTLE_BOOST(v) FIELD_PREP(GENMASK(4, 0), v) +#define TXGBE_RX_EQ_CTL4 0x3C +#define TXGBE_RX_EQ_CTL4_CONT_OFF_CAN0 BIT(4) +#define TXGBE_RX_EQ_CTL4_CONT_ADAPT0 BIT(0) +#define TXGBE_AFE_DFE_ENABLE 0x3D +#define TXGBE_DFE_EN_0 BIT(4) +#define TXGBE_AFE_EN_0 BIT(0) +#define TXGBE_DFE_TAP_CTL0 0x3E +#define TXGBE_MPLLA_CTL0 0x51 +#define TXGBE_MPLLA_CTL2 0x53 +#define TXGBE_MPLLA_CTL2_DIV16P5_CLK_EN BIT(10) +#define TXGBE_MPLLA_CTL2_DIV10_CLK_EN BIT(9) +#define TXGBE_MPLLA_CTL3 0x57 +#define TXGBE_MISC_CTL0 0x70 +#define TXGBE_MISC_CTL0_PLL BIT(15) +#define TXGBE_MISC_CTL0_CR_PARA_SEL BIT(14) +#define TXGBE_MISC_CTL0_RX_VREF(v) FIELD_PREP(GENMASK(12, 8), v) +#define TXGBE_VCO_CAL_LD0 0x72 +#define TXGBE_VCO_CAL_REF0 0x76 + +static int txgbe_read_pma(struct dw_xpcs *xpcs, int reg) +{ + return xpcs_read(xpcs, MDIO_MMD_PMAPMD, TXGBE_PMA_MMD + reg); +} + +static int txgbe_write_pma(struct dw_xpcs *xpcs, int reg, u16 val) +{ + return xpcs_write(xpcs, MDIO_MMD_PMAPMD, TXGBE_PMA_MMD + reg, val); +} + +static void txgbe_pma_config_10gbaser(struct dw_xpcs *xpcs) +{ + int val; + + txgbe_write_pma(xpcs, TXGBE_MPLLA_CTL0, 0x21); + txgbe_write_pma(xpcs, TXGBE_MPLLA_CTL3, 0); + val = txgbe_read_pma(xpcs, TXGBE_TX_GENCTL1); + val = u16_replace_bits(val, 0x5, TXGBE_TX_GENCTL1_VBOOST_LVL); + txgbe_write_pma(xpcs, TXGBE_TX_GENCTL1, val); + txgbe_write_pma(xpcs, TXGBE_MISC_CTL0, TXGBE_MISC_CTL0_PLL | + TXGBE_MISC_CTL0_CR_PARA_SEL | TXGBE_MISC_CTL0_RX_VREF(0xF)); + txgbe_write_pma(xpcs, TXGBE_VCO_CAL_LD0, 0x549); + txgbe_write_pma(xpcs, TXGBE_VCO_CAL_REF0, 0x29); + txgbe_write_pma(xpcs, TXGBE_TX_RATE_CTL, 0); + txgbe_write_pma(xpcs, TXGBE_RX_RATE_CTL, 0); + txgbe_write_pma(xpcs, TXGBE_TX_GEN_CTL2, TXGBE_TX_GEN_CTL2_TX0_WIDTH(3)); + txgbe_write_pma(xpcs, TXGBE_RX_GEN_CTL2, TXGBE_RX_GEN_CTL2_RX0_WIDTH(3)); + txgbe_write_pma(xpcs, TXGBE_MPLLA_CTL2, TXGBE_MPLLA_CTL2_DIV16P5_CLK_EN | + TXGBE_MPLLA_CTL2_DIV10_CLK_EN); + + txgbe_write_pma(xpcs, TXGBE_RX_EQ_CTL0, TXGBE_RX_EQ_CTL0_CTLE_POLE(2) | + TXGBE_RX_EQ_CTL0_CTLE_BOOST(5)); + val = txgbe_read_pma(xpcs, TXGBE_RX_EQ_ATTN_CTL); + val &= ~TXGBE_RX_EQ_ATTN_LVL0; + txgbe_write_pma(xpcs, TXGBE_RX_EQ_ATTN_CTL, val); + txgbe_write_pma(xpcs, TXGBE_DFE_TAP_CTL0, 0xBE); + val = txgbe_read_pma(xpcs, TXGBE_AFE_DFE_ENABLE); + val &= ~(TXGBE_DFE_EN_0 | TXGBE_AFE_EN_0); + txgbe_write_pma(xpcs, TXGBE_AFE_DFE_ENABLE, val); + val = txgbe_read_pma(xpcs, TXGBE_RX_EQ_CTL4); + val &= ~TXGBE_RX_EQ_CTL4_CONT_ADAPT0; + txgbe_write_pma(xpcs, TXGBE_RX_EQ_CTL4, val); +} + +static void txgbe_pma_config_1g(struct dw_xpcs *xpcs) +{ + int val; + + val = txgbe_read_pma(xpcs, TXGBE_TX_GENCTL1); + val = u16_replace_bits(val, 0x5, TXGBE_TX_GENCTL1_VBOOST_LVL); + val &= ~TXGBE_TX_GENCTL1_VBOOST_EN0; + txgbe_write_pma(xpcs, TXGBE_TX_GENCTL1, val); + txgbe_write_pma(xpcs, TXGBE_MISC_CTL0, TXGBE_MISC_CTL0_PLL | + TXGBE_MISC_CTL0_CR_PARA_SEL | TXGBE_MISC_CTL0_RX_VREF(0xF)); + + txgbe_write_pma(xpcs, TXGBE_RX_EQ_CTL0, TXGBE_RX_EQ_CTL0_VGA1_GAIN(7) | + TXGBE_RX_EQ_CTL0_VGA2_GAIN(7) | TXGBE_RX_EQ_CTL0_CTLE_BOOST(6)); + val = txgbe_read_pma(xpcs, TXGBE_RX_EQ_ATTN_CTL); + val &= ~TXGBE_RX_EQ_ATTN_LVL0; + txgbe_write_pma(xpcs, TXGBE_RX_EQ_ATTN_CTL, val); + txgbe_write_pma(xpcs, TXGBE_DFE_TAP_CTL0, 0); + val = txgbe_read_pma(xpcs, TXGBE_RX_GEN_CTL3); + val = u16_replace_bits(val, 0x4, TXGBE_RX_GEN_CTL3_LOS_TRSHLD0); + txgbe_write_pma(xpcs, TXGBE_RX_EQ_ATTN_CTL, val); + + txgbe_write_pma(xpcs, TXGBE_MPLLA_CTL0, 0x20); + txgbe_write_pma(xpcs, TXGBE_MPLLA_CTL3, 0x46); + txgbe_write_pma(xpcs, TXGBE_VCO_CAL_LD0, 0x540); + txgbe_write_pma(xpcs, TXGBE_VCO_CAL_REF0, 0x2A); + txgbe_write_pma(xpcs, TXGBE_AFE_DFE_ENABLE, 0); + txgbe_write_pma(xpcs, TXGBE_RX_EQ_CTL4, TXGBE_RX_EQ_CTL4_CONT_OFF_CAN0); + txgbe_write_pma(xpcs, TXGBE_TX_RATE_CTL, TXGBE_TX_RATE_CTL_TX0_RATE(3)); + txgbe_write_pma(xpcs, TXGBE_RX_RATE_CTL, TXGBE_RX_RATE_CTL_RX0_RATE(3)); + txgbe_write_pma(xpcs, TXGBE_TX_GEN_CTL2, TXGBE_TX_GEN_CTL2_TX0_WIDTH(1)); + txgbe_write_pma(xpcs, TXGBE_RX_GEN_CTL2, TXGBE_RX_GEN_CTL2_RX0_WIDTH(1)); + txgbe_write_pma(xpcs, TXGBE_MPLLA_CTL2, TXGBE_MPLLA_CTL2_DIV10_CLK_EN); +} + +static int txgbe_pcs_poll_power_up(struct dw_xpcs *xpcs) +{ + int val, ret; + + /* Wait xpcs power-up good */ + ret = read_poll_timeout(xpcs_read_vpcs, val, + (val & DW_PSEQ_ST) == DW_PSEQ_ST_GOOD, + 10000, 1000000, false, + xpcs, DW_VR_XS_PCS_DIG_STS); + if (ret < 0) + dev_err(&xpcs->mdiodev->dev, "xpcs power-up timeout\n"); + + return ret; +} + +static int txgbe_pma_init_done(struct dw_xpcs *xpcs) +{ + int val, ret; + + xpcs_write_vpcs(xpcs, DW_VR_XS_PCS_DIG_CTRL1, DW_VR_RST | DW_EN_VSMMD1); + + /* wait pma initialization done */ + ret = read_poll_timeout(xpcs_read_vpcs, val, !(val & DW_VR_RST), + 100000, 10000000, false, + xpcs, DW_VR_XS_PCS_DIG_CTRL1); + if (ret < 0) + dev_err(&xpcs->mdiodev->dev, "xpcs pma initialization timeout\n"); + + return ret; +} + +static bool txgbe_xpcs_mode_quirk(struct dw_xpcs *xpcs) +{ + int ret; + + /* When txgbe do LAN reset, PCS will change to default 10GBASE-R mode */ + ret = xpcs_read(xpcs, MDIO_MMD_PCS, MDIO_CTRL2); + ret &= MDIO_PCS_CTRL2_TYPE; + if ((ret == MDIO_PCS_CTRL2_10GBR && + xpcs->interface != PHY_INTERFACE_MODE_10GBASER) || + xpcs->interface == PHY_INTERFACE_MODE_SGMII) + return true; + + return false; +} + +int txgbe_xpcs_switch_mode(struct dw_xpcs *xpcs, phy_interface_t interface) +{ + int val, ret; + + switch (interface) { + case PHY_INTERFACE_MODE_10GBASER: + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_1000BASEX: + break; + default: + return 0; + } + + if (xpcs->interface == interface && !txgbe_xpcs_mode_quirk(xpcs)) + return 0; + + xpcs->interface = interface; + + ret = txgbe_pcs_poll_power_up(xpcs); + if (ret < 0) + return ret; + + if (interface == PHY_INTERFACE_MODE_10GBASER) { + xpcs_write(xpcs, MDIO_MMD_PCS, MDIO_CTRL2, MDIO_PCS_CTRL2_10GBR); + val = xpcs_read(xpcs, MDIO_MMD_PMAPMD, MDIO_CTRL1); + val |= MDIO_CTRL1_SPEED10G; + xpcs_write(xpcs, MDIO_MMD_PMAPMD, MDIO_CTRL1, val); + txgbe_pma_config_10gbaser(xpcs); + } else { + xpcs_write(xpcs, MDIO_MMD_PCS, MDIO_CTRL2, MDIO_PCS_CTRL2_10GBX); + xpcs_write(xpcs, MDIO_MMD_PMAPMD, MDIO_CTRL1, 0); + xpcs_write(xpcs, MDIO_MMD_PCS, MDIO_CTRL1, 0); + txgbe_pma_config_1g(xpcs); + } + + return txgbe_pma_init_done(xpcs); +} diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c index 44b037646865..4dbc21f604f2 100644 --- a/drivers/net/pcs/pcs-xpcs.c +++ b/drivers/net/pcs/pcs-xpcs.c @@ -228,16 +228,39 @@ static int xpcs_write_vendor(struct dw_xpcs *xpcs, int dev, int reg, return xpcs_write(xpcs, dev, DW_VENDOR | reg, val); } -static int xpcs_read_vpcs(struct dw_xpcs *xpcs, int reg) +int xpcs_read_vpcs(struct dw_xpcs *xpcs, int reg) { return xpcs_read_vendor(xpcs, MDIO_MMD_PCS, reg); } -static int xpcs_write_vpcs(struct dw_xpcs *xpcs, int reg, u16 val) +int xpcs_write_vpcs(struct dw_xpcs *xpcs, int reg, u16 val) { return xpcs_write_vendor(xpcs, MDIO_MMD_PCS, reg, val); } +static int xpcs_dev_flag(struct dw_xpcs *xpcs) +{ + int ret, oui; + + ret = xpcs_read(xpcs, MDIO_MMD_PMAPMD, MDIO_DEVID1); + if (ret < 0) + return ret; + + oui = ret; + + ret = xpcs_read(xpcs, MDIO_MMD_PMAPMD, MDIO_DEVID2); + if (ret < 0) + return ret; + + ret = (ret >> 10) & 0x3F; + oui |= ret << 16; + + if (oui == DW_OUI_WX) + xpcs->dev_flag = DW_DEV_TXGBE; + + return 0; +} + static int xpcs_poll_reset(struct dw_xpcs *xpcs, int dev) { /* Poll until the reset bit clears (50ms per retry == 0.6 sec) */ @@ -660,7 +683,10 @@ EXPORT_SYMBOL_GPL(xpcs_config_eee); static int xpcs_config_aneg_c37_sgmii(struct dw_xpcs *xpcs, unsigned int neg_mode) { - int ret, mdio_ctrl; + int ret, mdio_ctrl, tx_conf; + + if (xpcs->dev_flag == DW_DEV_TXGBE) + xpcs_write_vpcs(xpcs, DW_VR_XS_PCS_DIG_CTRL1, DW_CL37_BP | DW_EN_VSMMD1); /* For AN for C37 SGMII mode, the settings are :- * 1) VR_MII_MMD_CTRL Bit(12) [AN_ENABLE] = 0b (Disable SGMII AN in case @@ -697,9 +723,15 @@ static int xpcs_config_aneg_c37_sgmii(struct dw_xpcs *xpcs, ret |= (DW_VR_MII_PCS_MODE_C37_SGMII << DW_VR_MII_AN_CTRL_PCS_MODE_SHIFT & DW_VR_MII_PCS_MODE_MASK); - ret |= (DW_VR_MII_TX_CONFIG_MAC_SIDE_SGMII << - DW_VR_MII_AN_CTRL_TX_CONFIG_SHIFT & - DW_VR_MII_TX_CONFIG_MASK); + if (xpcs->dev_flag == DW_DEV_TXGBE) { + ret |= DW_VR_MII_AN_CTRL_8BIT; + /* Hardware requires it to be PHY side SGMII */ + tx_conf = DW_VR_MII_TX_CONFIG_PHY_SIDE_SGMII; + } else { + tx_conf = DW_VR_MII_TX_CONFIG_MAC_SIDE_SGMII; + } + ret |= tx_conf << DW_VR_MII_AN_CTRL_TX_CONFIG_SHIFT & + DW_VR_MII_TX_CONFIG_MASK; ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_CTRL, ret); if (ret < 0) return ret; @@ -713,6 +745,9 @@ static int xpcs_config_aneg_c37_sgmii(struct dw_xpcs *xpcs, else ret &= ~DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW; + if (xpcs->dev_flag == DW_DEV_TXGBE) + ret |= DW_VR_MII_DIG_CTRL1_PHY_MODE_CTRL; + ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL1, ret); if (ret < 0) return ret; @@ -732,6 +767,9 @@ static int xpcs_config_aneg_c37_1000basex(struct dw_xpcs *xpcs, int ret, mdio_ctrl, adv; bool changed = 0; + if (xpcs->dev_flag == DW_DEV_TXGBE) + xpcs_write_vpcs(xpcs, DW_VR_XS_PCS_DIG_CTRL1, DW_CL37_BP | DW_EN_VSMMD1); + /* According to Chap 7.12, to set 1000BASE-X C37 AN, AN must * be disabled first:- * 1) VR_MII_MMD_CTRL Bit(12)[AN_ENABLE] = 0b @@ -753,6 +791,8 @@ static int xpcs_config_aneg_c37_1000basex(struct dw_xpcs *xpcs, return ret; ret &= ~DW_VR_MII_PCS_MODE_MASK; + if (!xpcs->pcs.poll) + ret |= DW_VR_MII_AN_INTR_EN; ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_CTRL, ret); if (ret < 0) return ret; @@ -818,6 +858,12 @@ int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface, if (!compat) return -ENODEV; + if (xpcs->dev_flag == DW_DEV_TXGBE) { + ret = txgbe_xpcs_switch_mode(xpcs, interface); + if (ret) + return ret; + } + switch (compat->an_mode) { case DW_10GBASER: break; @@ -977,6 +1023,33 @@ static int xpcs_get_state_c37_sgmii(struct dw_xpcs *xpcs, state->duplex = DUPLEX_FULL; else state->duplex = DUPLEX_HALF; + } else if (ret == DW_VR_MII_AN_STS_C37_ANCMPLT_INTR) { + int speed, duplex; + + state->link = true; + + speed = xpcs_read(xpcs, MDIO_MMD_VEND2, MDIO_CTRL1); + if (speed < 0) + return speed; + + speed &= SGMII_SPEED_SS13 | SGMII_SPEED_SS6; + if (speed == SGMII_SPEED_SS6) + state->speed = SPEED_1000; + else if (speed == SGMII_SPEED_SS13) + state->speed = SPEED_100; + else if (speed == 0) + state->speed = SPEED_10; + + duplex = xpcs_read(xpcs, MDIO_MMD_VEND2, MII_ADVERTISE); + if (duplex < 0) + return duplex; + + if (duplex & DW_FULL_DUPLEX) + state->duplex = DUPLEX_FULL; + else if (duplex & DW_HALF_DUPLEX) + state->duplex = DUPLEX_HALF; + + xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_INTR_STS, 0); } return 0; @@ -1000,6 +1073,17 @@ static int xpcs_get_state_c37_1000basex(struct dw_xpcs *xpcs, if (bmsr < 0) return bmsr; + /* Clear AN complete interrupt */ + if (!xpcs->pcs.poll) { + int an_intr; + + an_intr = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_INTR_STS); + if (an_intr & DW_VR_MII_AN_STS_C37_ANCMPLT_INTR) { + an_intr &= ~DW_VR_MII_AN_STS_C37_ANCMPLT_INTR; + xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_INTR_STS, an_intr); + } + } + phylink_mii_c22_pcs_decode_state(state, bmsr, lpa); } @@ -1284,16 +1368,20 @@ static struct dw_xpcs *xpcs_create(struct mdio_device *mdiodev, goto out; } + ret = xpcs_dev_flag(xpcs); + if (ret) + goto out; + xpcs->pcs.ops = &xpcs_phylink_ops; xpcs->pcs.neg_mode = true; - if (compat->an_mode == DW_10GBASER) - return xpcs; - xpcs->pcs.poll = true; + if (xpcs->dev_flag != DW_DEV_TXGBE) { + xpcs->pcs.poll = true; - ret = xpcs_soft_reset(xpcs, compat); - if (ret) - goto out; + ret = xpcs_soft_reset(xpcs, compat); + if (ret) + goto out; + } return xpcs; } diff --git a/drivers/net/pcs/pcs-xpcs.h b/drivers/net/pcs/pcs-xpcs.h index 68c6b5a62088..39a90417e535 100644 --- a/drivers/net/pcs/pcs-xpcs.h +++ b/drivers/net/pcs/pcs-xpcs.h @@ -15,8 +15,14 @@ /* VR_XS_PCS */ #define DW_USXGMII_RST BIT(10) #define DW_USXGMII_EN BIT(9) +#define DW_VR_XS_PCS_DIG_CTRL1 0x0000 +#define DW_VR_RST BIT(15) +#define DW_EN_VSMMD1 BIT(13) +#define DW_CL37_BP BIT(12) #define DW_VR_XS_PCS_DIG_STS 0x0010 #define DW_RXFIFO_ERR GENMASK(6, 5) +#define DW_PSEQ_ST GENMASK(4, 2) +#define DW_PSEQ_ST_GOOD FIELD_PREP(GENMASK(4, 2), 0x4) /* SR_MII */ #define DW_USXGMII_FULL BIT(8) @@ -61,12 +67,14 @@ /* VR_MII_DIG_CTRL1 */ #define DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW BIT(9) +#define DW_VR_MII_DIG_CTRL1_PHY_MODE_CTRL BIT(0) /* VR_MII_DIG_CTRL2 */ #define DW_VR_MII_DIG_CTRL2_TX_POL_INV BIT(4) #define DW_VR_MII_DIG_CTRL2_RX_POL_INV BIT(0) /* VR_MII_AN_CTRL */ +#define DW_VR_MII_AN_CTRL_8BIT BIT(8) #define DW_VR_MII_AN_CTRL_TX_CONFIG_SHIFT 3 #define DW_VR_MII_TX_CONFIG_MASK BIT(3) #define DW_VR_MII_TX_CONFIG_PHY_SIDE_SGMII 0x1 @@ -75,8 +83,10 @@ #define DW_VR_MII_PCS_MODE_MASK GENMASK(2, 1) #define DW_VR_MII_PCS_MODE_C37_1000BASEX 0x0 #define DW_VR_MII_PCS_MODE_C37_SGMII 0x2 +#define DW_VR_MII_AN_INTR_EN BIT(0) /* VR_MII_AN_INTR_STS */ +#define DW_VR_MII_AN_STS_C37_ANCMPLT_INTR BIT(0) #define DW_VR_MII_AN_STS_C37_ANSGM_FD BIT(1) #define DW_VR_MII_AN_STS_C37_ANSGM_SP_SHIFT 2 #define DW_VR_MII_AN_STS_C37_ANSGM_SP GENMASK(3, 2) @@ -90,6 +100,10 @@ #define SGMII_SPEED_SS13 BIT(13) /* SGMII speed along with SS6 */ #define SGMII_SPEED_SS6 BIT(6) /* SGMII speed along with SS13 */ +/* SR MII MMD AN Advertisement defines */ +#define DW_HALF_DUPLEX BIT(6) +#define DW_FULL_DUPLEX BIT(5) + /* VR MII EEE Control 0 defines */ #define DW_VR_MII_EEE_LTX_EN BIT(0) /* LPI Tx Enable */ #define DW_VR_MII_EEE_LRX_EN BIT(1) /* LPI Rx Enable */ @@ -106,6 +120,9 @@ int xpcs_read(struct dw_xpcs *xpcs, int dev, u32 reg); int xpcs_write(struct dw_xpcs *xpcs, int dev, u32 reg, u16 val); +int xpcs_read_vpcs(struct dw_xpcs *xpcs, int reg); +int xpcs_write_vpcs(struct dw_xpcs *xpcs, int reg, u16 val); int nxp_sja1105_sgmii_pma_config(struct dw_xpcs *xpcs); int nxp_sja1110_sgmii_pma_config(struct dw_xpcs *xpcs); int nxp_sja1110_2500basex_pma_config(struct dw_xpcs *xpcs); +int txgbe_xpcs_switch_mode(struct dw_xpcs *xpcs, phy_interface_t interface); diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 160bce608c34..0d7354955d62 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -427,6 +427,24 @@ static struct { }; /** + * phylink_limit_mac_speed - limit the phylink_config to a maximum speed + * @config: pointer to a &struct phylink_config + * @max_speed: maximum speed + * + * Mask off MAC capabilities for speeds higher than the @max_speed parameter. + * Any further motifications of config.mac_capabilities will override this. + */ +void phylink_limit_mac_speed(struct phylink_config *config, u32 max_speed) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(phylink_caps_params) && + phylink_caps_params[i].speed > max_speed; i++) + config->mac_capabilities &= ~phylink_caps_params[i].mask; +} +EXPORT_SYMBOL_GPL(phylink_limit_mac_speed); + +/** * phylink_cap_from_speed_duplex - Get mac capability from speed/duplex * @speed: the speed to search for * @duplex: the duplex to search for diff --git a/drivers/net/veth.c b/drivers/net/veth.c index da44bb1331ff..d43e62ebc2fc 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -373,14 +373,13 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) if (likely(veth_forward_skb(rcv, skb, rq, use_napi) == NET_RX_SUCCESS)) { if (!use_napi) dev_lstats_add(dev, length); + else + __veth_xdp_flush(rq); } else { drop: atomic64_inc(&priv->dropped); } - if (use_napi) - __veth_xdp_flush(rq); - rcu_read_unlock(); return NETDEV_TX_OK; diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c index 76efea2f1138..a378bc48b1d2 100644 --- a/drivers/net/wireless/ath/ath10k/ahb.c +++ b/drivers/net/wireless/ath/ath10k/ahb.c @@ -733,7 +733,7 @@ static int ath10k_ahb_probe(struct platform_device *pdev) int ret; struct ath10k_bus_params bus_params = {}; - hw_rev = (enum ath10k_hw_rev)of_device_get_match_data(&pdev->dev); + hw_rev = (uintptr_t)of_device_get_match_data(&pdev->dev); if (!hw_rev) { dev_err(&pdev->dev, "OF data missing\n"); return -EINVAL; diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index e0c9f45e7476..7b24297146e7 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -69,7 +69,7 @@ struct htt_ver_req { * The HTT tx descriptor is defined in two manners: by a struct with * bitfields, and by a series of [dword offset, bit mask, bit shift] * definitions. - * The target should use the struct def, for simplicitly and clarity, + * The target should use the struct def, for simplicity and clarity, * but the host shall use the bit-mast + bit-shift defs, to be endian- * neutral. Specifically, the host shall use the get/set macros built * around the mask + shift defs. @@ -2086,7 +2086,7 @@ static inline bool ath10k_htt_rx_proc_rx_frag_ind(struct ath10k_htt *htt, * for correctly accessing rx descriptor data. */ -/* base struct used for abstracting the rx descritor representation */ +/* base struct used for abstracting the rx descriptor representation */ struct htt_rx_desc { union { /* This field is filled on the host using the msdu buffer diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index a7f44f6335fb..7535524bb85e 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -1636,7 +1636,7 @@ static int ath10k_pci_dump_memory_generic(struct ath10k *ar, buf, current_region->len); - /* No individiual memory sections defined so we can + /* No individual memory sections defined so we can * copy the entire memory region. */ ret = ath10k_pci_diag_read_mem(ar, @@ -3816,7 +3816,7 @@ static void __exit ath10k_pci_exit(void) module_exit(ath10k_pci_exit); MODULE_AUTHOR("Qualcomm Atheros"); -MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN PCIe/AHB devices"); +MODULE_DESCRIPTION("Driver support for Qualcomm Atheros PCIe/AHB 802.11ac WLAN devices"); MODULE_LICENSE("Dual BSD/GPL"); /* QCA988x 2.0 firmware files */ diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c index 79e09c7a82b3..56fbcfb80bf8 100644 --- a/drivers/net/wireless/ath/ath10k/sdio.c +++ b/drivers/net/wireless/ath/ath10k/sdio.c @@ -2389,7 +2389,7 @@ static int ath10k_sdio_dump_memory_generic(struct ath10k *ar, buf, current_region->len); - /* No individiual memory sections defined so we can + /* No individual memory sections defined so we can * copy the entire memory region. */ if (fast_dump) diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c index b0067af685b1..3c482baacec1 100644 --- a/drivers/net/wireless/ath/ath10k/usb.c +++ b/drivers/net/wireless/ath/ath10k/usb.c @@ -1126,5 +1126,5 @@ static struct usb_driver ath10k_usb_driver = { module_usb_driver(ath10k_usb_driver); MODULE_AUTHOR("Atheros Communications, Inc."); -MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN USB devices"); +MODULE_DESCRIPTION("Driver support for Qualcomm Atheros USB 802.11ac WLAN devices"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 6d04a66fe5e0..b112e8826093 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -3854,9 +3854,9 @@ enum wmi_pdev_param { * retransmitting frames. */ WMI_PDEV_PARAM_DYNAMIC_BW, - /* Non aggregrate/ 11g sw retry threshold.0-disable */ + /* Non aggregate/ 11g sw retry threshold.0-disable */ WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH, - /* aggregrate sw retry threshold. 0-disable*/ + /* aggregate sw retry threshold. 0-disable*/ WMI_PDEV_PARAM_AGG_SW_RETRY_TH, /* Station kickout threshold (non of consecutive failures).0-disable */ WMI_PDEV_PARAM_STA_KICKOUT_TH, @@ -3953,9 +3953,9 @@ enum wmi_10x_pdev_param { WMI_10X_PDEV_PARAM_PROTECTION_MODE, /* Dynamic bandwidth 0: disable 1: enable */ WMI_10X_PDEV_PARAM_DYNAMIC_BW, - /* Non aggregrate/ 11g sw retry threshold.0-disable */ + /* Non aggregate/ 11g sw retry threshold.0-disable */ WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH, - /* aggregrate sw retry threshold. 0-disable*/ + /* aggregate sw retry threshold. 0-disable*/ WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH, /* Station kickout threshold (non of consecutive failures).0-disable */ WMI_10X_PDEV_PARAM_STA_KICKOUT_TH, diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c index ceb3ccbb1827..1215ebdf173a 100644 --- a/drivers/net/wireless/ath/ath11k/ahb.c +++ b/drivers/net/wireless/ath/ath11k/ahb.c @@ -1096,7 +1096,7 @@ static int ath11k_ahb_probe(struct platform_device *pdev) return -EINVAL; } - hw_rev = (enum ath11k_hw_rev)of_id->data; + hw_rev = (uintptr_t)of_id->data; switch (hw_rev) { case ATH11K_HW_IPQ8074: @@ -1306,17 +1306,7 @@ static struct platform_driver ath11k_ahb_driver = { .shutdown = ath11k_ahb_shutdown, }; -static int ath11k_ahb_init(void) -{ - return platform_driver_register(&ath11k_ahb_driver); -} -module_init(ath11k_ahb_init); - -static void ath11k_ahb_exit(void) -{ - platform_driver_unregister(&ath11k_ahb_driver); -} -module_exit(ath11k_ahb_exit); +module_platform_driver(ath11k_ahb_driver); MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax WLAN AHB devices"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/ath/ath11k/ce.h b/drivers/net/wireless/ath/ath11k/ce.h index 1fc6360e7f01..c0f6a0ba86df 100644 --- a/drivers/net/wireless/ath/ath11k/ce.h +++ b/drivers/net/wireless/ath/ath11k/ce.h @@ -203,9 +203,6 @@ int ath11k_ce_alloc_pipes(struct ath11k_base *ab); void ath11k_ce_free_pipes(struct ath11k_base *ab); int ath11k_ce_get_attr_flags(struct ath11k_base *ab, int ce_id); void ath11k_ce_poll_send_completed(struct ath11k_base *ab, u8 pipe_id); -int ath11k_ce_map_service_to_pipe(struct ath11k_base *ab, u16 service_id, - u8 *ul_pipe, u8 *dl_pipe); -int ath11k_ce_attr_attach(struct ath11k_base *ab); void ath11k_ce_get_shadow_config(struct ath11k_base *ab, u32 **shadow_cfg, u32 *shadow_cfg_len); void ath11k_ce_stop_shadow_timers(struct ath11k_base *ab); diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h index d04f78ab6b37..15815af453b2 100644 --- a/drivers/net/wireless/ath/ath11k/dp.h +++ b/drivers/net/wireless/ath/ath11k/dp.h @@ -635,7 +635,7 @@ enum htt_ppdu_stats_tag_type { * b'24 - status_swap: 1 is to swap status TLV * b'25 - pkt_swap: 1 is to swap packet TLV * b'26:31 - rsvd1: reserved for future use - * dword1 - b'0:16 - ring_buffer_size: size of bufferes referenced by rx ring, + * dword1 - b'0:16 - ring_buffer_size: size of buffers referenced by rx ring, * in byte units. * Valid only for HW_TO_SW_RING and SW_TO_HW_RING * - b'16:31 - rsvd2: Reserved for future use diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c index 1e488eed282b..62bc98852f0f 100644 --- a/drivers/net/wireless/ath/ath11k/dp_rx.c +++ b/drivers/net/wireless/ath/ath11k/dp_rx.c @@ -3423,7 +3423,7 @@ static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_ti ath11k_hal_rx_buf_addr_info_set(msdu0, paddr, cookie, ab->hw_params.hal_params->rx_buf_rbm); - /* Fill mpdu details into reo entrace ring */ + /* Fill mpdu details into reo entrance ring */ srng = &ab->hal.srng_list[ab->dp.reo_reinject_ring.ring_id]; spin_lock_bh(&srng->lock); diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c index a34833de7c67..0dda76f7a4b5 100644 --- a/drivers/net/wireless/ath/ath11k/dp_tx.c +++ b/drivers/net/wireless/ath/ath11k/dp_tx.c @@ -238,7 +238,7 @@ tcl_ring_sel: spin_unlock_bh(&tcl_ring->lock); ret = -ENOMEM; - /* Checking for available tcl descritors in another ring in + /* Checking for available tcl descriptors in another ring in * case of failure due to full tcl ring now, is better than * checking this ring earlier for each pkt tx. * Restart ring selection if some rings are not checked yet. @@ -344,7 +344,7 @@ ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab, dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); if (!skb_cb->vif) { - dev_kfree_skb_any(msdu); + ieee80211_free_txskb(ar->hw, msdu); return; } @@ -369,7 +369,7 @@ ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab, "dp_tx: failed to find the peer with peer_id %d\n", ts->peer_id); spin_unlock_bh(&ab->base_lock); - dev_kfree_skb_any(msdu); + ieee80211_free_txskb(ar->hw, msdu); return; } spin_unlock_bh(&ab->base_lock); @@ -566,12 +566,12 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar, dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); if (unlikely(!rcu_access_pointer(ab->pdevs_active[ar->pdev_idx]))) { - dev_kfree_skb_any(msdu); + ieee80211_free_txskb(ar->hw, msdu); return; } if (unlikely(!skb_cb->vif)) { - dev_kfree_skb_any(msdu); + ieee80211_free_txskb(ar->hw, msdu); return; } @@ -624,7 +624,7 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar, "dp_tx: failed to find the peer with peer_id %d\n", ts->peer_id); spin_unlock_bh(&ab->base_lock); - dev_kfree_skb_any(msdu); + ieee80211_free_txskb(ar->hw, msdu); return; } arsta = (struct ath11k_sta *)peer->sta->drv_priv; diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 2aadf2c387b6..c071bf5841af 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -566,7 +566,7 @@ static void ath11k_get_arvif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { struct ath11k_vif_iter *arvif_iter = data; - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); if (arvif->vdev_id == arvif_iter->vdev_id) arvif_iter->arvif = arvif; @@ -1464,7 +1464,7 @@ static int ath11k_mac_setup_bcn_tmpl_ema(struct ath11k_vif *arvif) u32 params = 0; u8 i = 0; - tx_arvif = (void *)arvif->vif->mbssid_tx_vif->drv_priv; + tx_arvif = ath11k_vif_to_arvif(arvif->vif->mbssid_tx_vif); beacons = ieee80211_beacon_get_template_ema_list(tx_arvif->ar->hw, tx_arvif->vif, 0); @@ -1520,8 +1520,8 @@ static int ath11k_mac_setup_bcn_tmpl_mbssid(struct ath11k_vif *arvif) struct sk_buff *bcn; int ret; - if (arvif->vif->mbssid_tx_vif) { - tx_arvif = (void *)arvif->vif->mbssid_tx_vif->drv_priv; + if (vif->mbssid_tx_vif) { + tx_arvif = ath11k_vif_to_arvif(vif->mbssid_tx_vif); if (tx_arvif != arvif) { ar = tx_arvif->ar; ab = ar->ab; @@ -1562,7 +1562,7 @@ static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif) * non-transmitting interfaces, and results in a crash if sent. */ if (vif->mbssid_tx_vif && - arvif != (void *)vif->mbssid_tx_vif->drv_priv && arvif->is_up) + arvif != ath11k_vif_to_arvif(vif->mbssid_tx_vif) && arvif->is_up) return 0; if (vif->bss_conf.ema_ap && vif->mbssid_tx_vif) @@ -1626,7 +1626,7 @@ static void ath11k_control_beaconing(struct ath11k_vif *arvif, ether_addr_copy(arvif->bssid, info->bssid); if (arvif->vif->mbssid_tx_vif) - tx_arvif = (struct ath11k_vif *)arvif->vif->mbssid_tx_vif->drv_priv; + tx_arvif = ath11k_vif_to_arvif(arvif->vif->mbssid_tx_vif); ret = ath11k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, arvif->bssid, @@ -1649,7 +1649,7 @@ static void ath11k_mac_handle_beacon_iter(void *data, u8 *mac, { struct sk_buff *skb = data; struct ieee80211_mgmt *mgmt = (void *)skb->data; - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); if (vif->type != NL80211_IFTYPE_STATION) return; @@ -1672,7 +1672,7 @@ static void ath11k_mac_handle_beacon_miss_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { u32 *vdev_id = data; - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ath11k *ar = arvif->ar; struct ieee80211_hw *hw = ar->hw; @@ -1718,7 +1718,7 @@ static void ath11k_peer_assoc_h_basic(struct ath11k *ar, struct ieee80211_sta *sta, struct peer_assoc_params *arg) { - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); u32 aid; lockdep_assert_held(&ar->conf_mutex); @@ -1746,7 +1746,7 @@ static void ath11k_peer_assoc_h_crypto(struct ath11k *ar, struct ieee80211_bss_conf *info = &vif->bss_conf; struct cfg80211_chan_def def; struct cfg80211_bss *bss; - struct ath11k_vif *arvif = (struct ath11k_vif *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); const u8 *rsnie = NULL; const u8 *wpaie = NULL; @@ -1804,7 +1804,7 @@ static void ath11k_peer_assoc_h_rates(struct ath11k *ar, struct ieee80211_sta *sta, struct peer_assoc_params *arg) { - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates; struct cfg80211_chan_def def; const struct ieee80211_supported_band *sband; @@ -1867,7 +1867,7 @@ static void ath11k_peer_assoc_h_ht(struct ath11k *ar, struct peer_assoc_params *arg) { const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct cfg80211_chan_def def; enum nl80211_band band; const u8 *ht_mcs_mask; @@ -2064,7 +2064,7 @@ static void ath11k_peer_assoc_h_vht(struct ath11k *ar, struct peer_assoc_params *arg) { const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap; - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct cfg80211_chan_def def; enum nl80211_band band; u16 *vht_mcs_mask; @@ -2261,7 +2261,7 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar, struct ieee80211_sta *sta, struct peer_assoc_params *arg) { - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct cfg80211_chan_def def; const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; enum nl80211_band band; @@ -2584,7 +2584,7 @@ static void ath11k_peer_assoc_h_qos(struct ath11k *ar, struct ieee80211_sta *sta, struct peer_assoc_params *arg) { - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); switch (arvif->vdev_type) { case WMI_VDEV_TYPE_AP: @@ -2747,7 +2747,7 @@ static void ath11k_peer_assoc_h_phymode(struct ath11k *ar, struct ieee80211_sta *sta, struct peer_assoc_params *arg) { - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct cfg80211_chan_def def; enum nl80211_band band; const u8 *ht_mcs_mask; @@ -2933,7 +2933,7 @@ static bool ath11k_mac_vif_recalc_sta_he_txbf(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_sta_he_cap *he_cap) { - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ieee80211_he_cap_elem he_cap_elem = {0}; struct ieee80211_sta_he_cap *cap_band = NULL; struct cfg80211_chan_def def; @@ -2995,7 +2995,7 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw, struct ieee80211_bss_conf *bss_conf) { struct ath11k *ar = hw->priv; - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct peer_assoc_params peer_arg; struct ieee80211_sta *ap_sta; struct ath11k_peer *peer; @@ -3111,7 +3111,7 @@ static void ath11k_bss_disassoc(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ath11k *ar = hw->priv; - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); int ret; lockdep_assert_held(&ar->conf_mutex); @@ -3160,7 +3160,7 @@ static void ath11k_recalculate_mgmt_rate(struct ath11k *ar, struct ieee80211_vif *vif, struct cfg80211_chan_def *def) { - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); const struct ieee80211_supported_band *sband; u8 basic_rate_idx; int hw_rate_code; @@ -4632,7 +4632,7 @@ static int ath11k_station_disassoc(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); int ret = 0; lockdep_assert_held(&ar->conf_mutex); @@ -5160,7 +5160,7 @@ static int ath11k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw, struct ieee80211_sta *sta) { struct ath11k *ar = hw->priv; - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); int ret = 0; s16 txpwr; @@ -5210,7 +5210,7 @@ static void ath11k_mac_op_sta_rc_update(struct ieee80211_hw *hw, { struct ath11k *ar = hw->priv; struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ath11k_peer *peer; u32 bw, smps; @@ -5337,7 +5337,7 @@ static int ath11k_mac_op_conf_tx(struct ieee80211_hw *hw, const struct ieee80211_tx_queue_params *params) { struct ath11k *ar = hw->priv; - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct wmi_wmm_params_arg *p = NULL; int ret; @@ -6455,7 +6455,7 @@ static int ath11k_mac_setup_vdev_params_mbssid(struct ath11k_vif *arvif, return 0; } - tx_arvif = (void *)tx_vif->drv_priv; + tx_arvif = ath11k_vif_to_arvif(tx_vif); if (arvif->vif->bss_conf.nontransmitted) { if (ar->hw->wiphy != ieee80211_vif_to_wdev(tx_vif)->wiphy) @@ -7408,7 +7408,7 @@ ath11k_mac_update_vif_chan(struct ath11k *ar, /* TODO: Update ar->rx_channel */ for (i = 0; i < n_vifs; i++) { - arvif = (void *)vifs[i].vif->drv_priv; + arvif = ath11k_vif_to_arvif(vifs[i].vif); if (WARN_ON(!arvif->is_started)) continue; @@ -7450,7 +7450,7 @@ ath11k_mac_update_vif_chan(struct ath11k *ar, mbssid_tx_vif = arvif->vif->mbssid_tx_vif; if (mbssid_tx_vif) - tx_arvif = (struct ath11k_vif *)mbssid_tx_vif->drv_priv; + tx_arvif = ath11k_vif_to_arvif(mbssid_tx_vif); ret = ath11k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, arvif->bssid, @@ -7546,7 +7546,7 @@ static int ath11k_start_vdev_delay(struct ieee80211_hw *hw, { struct ath11k *ar = hw->priv; struct ath11k_base *ab = ar->ab; - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); int ret; if (WARN_ON(arvif->is_started)) @@ -7596,7 +7596,7 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, { struct ath11k *ar = hw->priv; struct ath11k_base *ab = ar->ab; - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); int ret; struct peer_create_params param; @@ -7686,7 +7686,7 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, { struct ath11k *ar = hw->priv; struct ath11k_base *ab = ar->ab; - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ath11k_peer *peer; int ret; @@ -8307,7 +8307,7 @@ ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const struct cfg80211_bitrate_mask *mask) { - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct cfg80211_chan_def def; struct ath11k_pdev_cap *cap; struct ath11k *ar = arvif->ar; @@ -8904,7 +8904,7 @@ static int ath11k_mac_op_remain_on_channel(struct ieee80211_hw *hw, enum ieee80211_roc_type type) { struct ath11k *ar = hw->priv; - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct scan_req_params arg; int ret; u32 scan_time_msec; diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c index 5fd08ffc2a9f..9573bd959cac 100644 --- a/drivers/net/wireless/ath/ath11k/pci.c +++ b/drivers/net/wireless/ath/ath11k/pci.c @@ -1036,7 +1036,7 @@ static void ath11k_pci_exit(void) module_exit(ath11k_pci_exit); -MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax WLAN PCIe devices"); +MODULE_DESCRIPTION("Driver support for Qualcomm Technologies PCIe 802.11ax WLAN devices"); MODULE_LICENSE("Dual BSD/GPL"); /* firmware files */ diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h index b0407abf90cd..d477e2be814b 100644 --- a/drivers/net/wireless/ath/ath11k/qmi.h +++ b/drivers/net/wireless/ath/ath11k/qmi.h @@ -514,8 +514,6 @@ struct qmi_wlanfw_wlan_ini_resp_msg_v01 { int ath11k_qmi_firmware_start(struct ath11k_base *ab, u32 mode); void ath11k_qmi_firmware_stop(struct ath11k_base *ab); -void ath11k_qmi_event_work(struct work_struct *work); -void ath11k_qmi_msg_recv_work(struct work_struct *work); void ath11k_qmi_deinit_service(struct ath11k_base *ab); int ath11k_qmi_init_service(struct ath11k_base *ab); void ath11k_qmi_free_resource(struct ath11k_base *ab); diff --git a/drivers/net/wireless/ath/ath11k/testmode.c b/drivers/net/wireless/ath/ath11k/testmode.c index 8fc5cddb28bd..43bb23265d34 100644 --- a/drivers/net/wireless/ath/ath11k/testmode.c +++ b/drivers/net/wireless/ath/ath11k/testmode.c @@ -350,7 +350,7 @@ static int ath11k_tm_cmd_wmi(struct ath11k *ar, struct nlattr *tb[], if (ar->ab->fw_mode != ATH11K_FIRMWARE_MODE_FTM && (tag == WMI_TAG_VDEV_SET_PARAM_CMD || tag == WMI_TAG_UNIT_TEST_CMD)) { if (vif) { - arvif = (struct ath11k_vif *)vif->drv_priv; + arvif = ath11k_vif_to_arvif(vif); *ptr = arvif->vdev_id; } else { ret = -EINVAL; diff --git a/drivers/net/wireless/ath/ath12k/ce.h b/drivers/net/wireless/ath/ath12k/ce.h index 17cf16235e0b..79af3b6159f1 100644 --- a/drivers/net/wireless/ath/ath12k/ce.h +++ b/drivers/net/wireless/ath/ath12k/ce.h @@ -176,9 +176,6 @@ int ath12k_ce_alloc_pipes(struct ath12k_base *ab); void ath12k_ce_free_pipes(struct ath12k_base *ab); int ath12k_ce_get_attr_flags(struct ath12k_base *ab, int ce_id); void ath12k_ce_poll_send_completed(struct ath12k_base *ab, u8 pipe_id); -int ath12k_ce_map_service_to_pipe(struct ath12k_base *ab, u16 service_id, - u8 *ul_pipe, u8 *dl_pipe); -int ath12k_ce_attr_attach(struct ath12k_base *ab); void ath12k_ce_get_shadow_config(struct ath12k_base *ab, u32 **shadow_cfg, u32 *shadow_cfg_len); #endif diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h index 4389ff40b49d..d873b573dac6 100644 --- a/drivers/net/wireless/ath/ath12k/core.h +++ b/drivers/net/wireless/ath/ath12k/core.h @@ -788,7 +788,6 @@ int ath12k_core_fetch_board_data_api_1(struct ath12k_base *ab, int ath12k_core_fetch_bdf(struct ath12k_base *ath12k, struct ath12k_board_data *bd); void ath12k_core_free_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd); -int ath12k_core_check_dt(struct ath12k_base *ath12k); void ath12k_core_halt(struct ath12k *ar); int ath12k_core_resume(struct ath12k_base *ab); diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c index ae1645d0f42a..f933896f2a68 100644 --- a/drivers/net/wireless/ath/ath12k/dp.c +++ b/drivers/net/wireless/ath/ath12k/dp.c @@ -1129,6 +1129,7 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab) struct ath12k_dp *dp = &ab->dp; struct sk_buff *skb; int i; + u32 pool_id, tx_spt_page; if (!dp->spt_info) return; @@ -1148,6 +1149,14 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab) dev_kfree_skb_any(skb); } + for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) { + if (!dp->spt_info->rxbaddr[i]) + continue; + + kfree(dp->spt_info->rxbaddr[i]); + dp->spt_info->rxbaddr[i] = NULL; + } + spin_unlock_bh(&dp->rx_desc_lock); /* TX Descriptor cleanup */ @@ -1170,6 +1179,21 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab) spin_unlock_bh(&dp->tx_desc_lock[i]); } + for (pool_id = 0; pool_id < ATH12K_HW_MAX_QUEUES; pool_id++) { + spin_lock_bh(&dp->tx_desc_lock[pool_id]); + + for (i = 0; i < ATH12K_TX_SPT_PAGES_PER_POOL; i++) { + tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL; + if (!dp->spt_info->txbaddr[tx_spt_page]) + continue; + + kfree(dp->spt_info->txbaddr[tx_spt_page]); + dp->spt_info->txbaddr[tx_spt_page] = NULL; + } + + spin_unlock_bh(&dp->tx_desc_lock[pool_id]); + } + /* unmap SPT pages */ for (i = 0; i < dp->num_spt_pages; i++) { if (!dp->spt_info[i].vaddr) @@ -1343,6 +1367,8 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab) return -ENOMEM; } + dp->spt_info->rxbaddr[i] = &rx_descs[0]; + for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) { rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(i, j); rx_descs[j].magic = ATH12K_DP_RX_DESC_MAGIC; @@ -1368,8 +1394,10 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab) return -ENOMEM; } + tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL; + dp->spt_info->txbaddr[tx_spt_page] = &tx_descs[0]; + for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) { - tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL; ppt_idx = ATH12K_NUM_RX_SPT_PAGES + tx_spt_page; tx_descs[j].desc_id = ath12k_dp_cc_cookie_gen(ppt_idx, j); tx_descs[j].pool_id = pool_id; diff --git a/drivers/net/wireless/ath/ath12k/dp.h b/drivers/net/wireless/ath/ath12k/dp.h index 7c5dafce5a68..61f765432516 100644 --- a/drivers/net/wireless/ath/ath12k/dp.h +++ b/drivers/net/wireless/ath/ath12k/dp.h @@ -289,6 +289,8 @@ struct ath12k_tx_desc_info { struct ath12k_spt_info { dma_addr_t paddr; u64 *vaddr; + struct ath12k_rx_desc_info *rxbaddr[ATH12K_NUM_RX_SPT_PAGES]; + struct ath12k_tx_desc_info *txbaddr[ATH12K_NUM_TX_SPT_PAGES]; }; struct ath12k_reo_queue_ref { @@ -712,7 +714,7 @@ enum htt_stats_internal_ppdu_frametype { * b'24 - status_swap: 1 is to swap status TLV * b'25 - pkt_swap: 1 is to swap packet TLV * b'26:31 - rsvd1: reserved for future use - * dword1 - b'0:16 - ring_buffer_size: size of bufferes referenced by rx ring, + * dword1 - b'0:16 - ring_buffer_size: size of buffers referenced by rx ring, * in byte units. * Valid only for HW_TO_SW_RING and SW_TO_HW_RING * - b'16:31 - rsvd2: Reserved for future use diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c index 67f8c140840f..e6e64d437c47 100644 --- a/drivers/net/wireless/ath/ath12k/dp_rx.c +++ b/drivers/net/wireless/ath/ath12k/dp_rx.c @@ -3027,7 +3027,7 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar, desc_info->cookie, HAL_RX_BUF_RBM_SW3_BM); - /* Fill mpdu details into reo entrace ring */ + /* Fill mpdu details into reo entrance ring */ srng = &ab->hal.srng_list[dp->reo_reinject_ring.ring_id]; spin_lock_bh(&srng->lock); diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c index d661fe586651..8874c815d7fa 100644 --- a/drivers/net/wireless/ath/ath12k/dp_tx.c +++ b/drivers/net/wireless/ath/ath12k/dp_tx.c @@ -301,7 +301,7 @@ tcl_ring_sel: spin_unlock_bh(&tcl_ring->lock); ret = -ENOMEM; - /* Checking for available tcl descritors in another ring in + /* Checking for available tcl descriptors in another ring in * case of failure due to full tcl ring now, is better than * checking this ring earlier for each pkt tx. * Restart ring selection if some rings are not checked yet. diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c index 0f2af2f14ef7..88346e66bb75 100644 --- a/drivers/net/wireless/ath/ath12k/mac.c +++ b/drivers/net/wireless/ath/ath12k/mac.c @@ -5660,7 +5660,6 @@ static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw, mutex_lock(&ar->conf_mutex); - changed_flags &= SUPPORTED_FILTERS; *total_flags &= SUPPORTED_FILTERS; ar->filter_flags = *total_flags; @@ -5678,8 +5677,8 @@ static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw, "fail to set monitor filter: %d\n", ret); } ath12k_dbg(ar->ab, ATH12K_DBG_MAC, - "changed_flags:0x%x, total_flags:0x%x, reset_flag:%d\n", - changed_flags, *total_flags, reset_flag); + "total_flags:0x%x, reset_flag:%d\n", + *total_flags, reset_flag); mutex_unlock(&ar->conf_mutex); } @@ -6771,7 +6770,7 @@ ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw, /* After trigger disconnect, then upper layer will * trigger connect again, then the PN number of * upper layer will be reset to keep up with AP - * side, hence PN number mis-match will not happened. + * side, hence PN number mismatch will not happen. */ if (arvif->is_up && arvif->vdev_type == WMI_VDEV_TYPE_STA && diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c index 5990a55801f0..bd689efa7daa 100644 --- a/drivers/net/wireless/ath/ath12k/pci.c +++ b/drivers/net/wireless/ath/ath12k/pci.c @@ -1409,5 +1409,5 @@ static void ath12k_pci_exit(void) module_exit(ath12k_pci_exit); -MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11be WLAN PCIe devices"); +MODULE_DESCRIPTION("Driver support for Qualcomm Technologies PCIe 802.11be WLAN devices"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/ath/ath12k/qmi.h b/drivers/net/wireless/ath/ath12k/qmi.h index df76149c49f5..15944f5f33ab 100644 --- a/drivers/net/wireless/ath/ath12k/qmi.h +++ b/drivers/net/wireless/ath/ath12k/qmi.h @@ -562,8 +562,6 @@ struct qmi_wlanfw_wlan_cfg_resp_msg_v01 { int ath12k_qmi_firmware_start(struct ath12k_base *ab, u32 mode); void ath12k_qmi_firmware_stop(struct ath12k_base *ab); -void ath12k_qmi_event_work(struct work_struct *work); -void ath12k_qmi_msg_recv_work(struct work_struct *work); void ath12k_qmi_deinit_service(struct ath12k_base *ab); int ath12k_qmi_init_service(struct ath12k_base *ab); diff --git a/drivers/net/wireless/ath/ath12k/rx_desc.h b/drivers/net/wireless/ath/ath12k/rx_desc.h index f99556a253e5..bfa87cb8d021 100644 --- a/drivers/net/wireless/ath/ath12k/rx_desc.h +++ b/drivers/net/wireless/ath/ath12k/rx_desc.h @@ -221,7 +221,7 @@ struct rx_mpdu_start_qcn9274 { * PPE routing even if RXOLE CCE or flow search indicate 'Use_PPE' * This is set by SW for peers which are being handled by a * host SW/accelerator subsystem that also handles packet - * uffer management for WiFi-to-PPE routing. + * buffer management for WiFi-to-PPE routing. * * This is cleared by SW for peers which are being handled * by a different subsystem, completely disabling WiFi-to-PPE diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c index cc9a377c06fd..ef0f3cf35cfd 100644 --- a/drivers/net/wireless/ath/ath12k/wmi.c +++ b/drivers/net/wireless/ath/ath12k/wmi.c @@ -2239,12 +2239,6 @@ int ath12k_wmi_send_scan_start_cmd(struct ath12k *ar, if (arg->num_bssid) len += sizeof(*bssid) * arg->num_bssid; - len += TLV_HDR_SIZE; - if (arg->extraie.len) - extraie_len_with_pad = - roundup(arg->extraie.len, sizeof(u32)); - len += extraie_len_with_pad; - if (arg->num_hint_bssid) len += TLV_HDR_SIZE + arg->num_hint_bssid * sizeof(*hint_bssid); @@ -2253,6 +2247,18 @@ int ath12k_wmi_send_scan_start_cmd(struct ath12k *ar, len += TLV_HDR_SIZE + arg->num_hint_s_ssid * sizeof(*s_ssid); + len += TLV_HDR_SIZE; + if (arg->extraie.len) + extraie_len_with_pad = + roundup(arg->extraie.len, sizeof(u32)); + if (extraie_len_with_pad <= (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len)) { + len += extraie_len_with_pad; + } else { + ath12k_warn(ar->ab, "discard large size %d bytes extraie for scan start\n", + arg->extraie.len); + extraie_len_with_pad = 0; + } + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; @@ -2342,7 +2348,7 @@ int ath12k_wmi_send_scan_start_cmd(struct ath12k *ar, tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len); ptr += TLV_HDR_SIZE; - if (arg->extraie.len) + if (extraie_len_with_pad) memcpy(ptr, arg->extraie.ptr, arg->extraie.len); diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h index 8c047a9623f9..c75a6fa1f7e0 100644 --- a/drivers/net/wireless/ath/ath12k/wmi.h +++ b/drivers/net/wireless/ath/ath12k/wmi.h @@ -4855,8 +4855,6 @@ int ath12k_wmi_vdev_install_key(struct ath12k *ar, struct wmi_vdev_install_key_arg *arg); int ath12k_wmi_pdev_bss_chan_info_request(struct ath12k *ar, enum wmi_bss_chan_info_req_type type); -int ath12k_wmi_send_stats_request_cmd(struct ath12k *ar, u32 stats_id, - u32 vdev_id, u32 pdev_id); int ath12k_wmi_send_pdev_temperature_cmd(struct ath12k *ar); int ath12k_wmi_send_peer_flush_tids_cmd(struct ath12k *ar, u8 peer_addr[ETH_ALEN], diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c index 28a1e5eff204..08bd5d3b00f1 100644 --- a/drivers/net/wireless/ath/ath5k/ahb.c +++ b/drivers/net/wireless/ath/ath5k/ahb.c @@ -115,7 +115,6 @@ static int ath_ahb_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0) { - dev_err(&pdev->dev, "no IRQ resource found: %d\n", irq); ret = irq; goto err_iounmap; } diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c index 11ed30d6b595..c630343ca4f9 100644 --- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c +++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c @@ -382,7 +382,6 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, mfilt[1] = multicast >> 32; /* Only deal with supported flags */ - changed_flags &= SUPPORTED_FIF_FLAGS; *new_flags &= SUPPORTED_FIF_FLAGS; /* If HW detects any phy or radar errors, leave those filters on. diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c index 5797ef9c73d7..7ee4e1616f45 100644 --- a/drivers/net/wireless/ath/ath5k/phy.c +++ b/drivers/net/wireless/ath/ath5k/phy.c @@ -26,6 +26,7 @@ #include <linux/delay.h> #include <linux/slab.h> +#include <linux/sort.h> #include <asm/unaligned.h> #include "ath5k.h" @@ -1554,6 +1555,11 @@ static void ath5k_hw_update_nfcal_hist(struct ath5k_hw *ah, s16 noise_floor) hist->nfval[hist->index] = noise_floor; } +static int cmps16(const void *a, const void *b) +{ + return *(s16 *)a - *(s16 *)b; +} + /** * ath5k_hw_get_median_noise_floor() - Get median NF from history buffer * @ah: The &struct ath5k_hw @@ -1561,25 +1567,16 @@ static void ath5k_hw_update_nfcal_hist(struct ath5k_hw *ah, s16 noise_floor) static s16 ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah) { - s16 sort[ATH5K_NF_CAL_HIST_MAX]; - s16 tmp; - int i, j; - - memcpy(sort, ah->ah_nfcal_hist.nfval, sizeof(sort)); - for (i = 0; i < ATH5K_NF_CAL_HIST_MAX - 1; i++) { - for (j = 1; j < ATH5K_NF_CAL_HIST_MAX - i; j++) { - if (sort[j] > sort[j - 1]) { - tmp = sort[j]; - sort[j] = sort[j - 1]; - sort[j - 1] = tmp; - } - } - } + s16 sorted_nfval[ATH5K_NF_CAL_HIST_MAX]; + int i; + + memcpy(sorted_nfval, ah->ah_nfcal_hist.nfval, sizeof(sorted_nfval)); + sort(sorted_nfval, ATH5K_NF_CAL_HIST_MAX, sizeof(s16), cmps16, NULL); for (i = 0; i < ATH5K_NF_CAL_HIST_MAX; i++) { ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE, - "cal %d:%d\n", i, sort[i]); + "cal %d:%d\n", i, sorted_nfval[i]); } - return sort[(ATH5K_NF_CAL_HIST_MAX - 1) / 2]; + return sorted_nfval[(ATH5K_NF_CAL_HIST_MAX - 1) / 2]; } /** diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 2cc23605c9fc..668fc07b3073 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -1129,7 +1129,6 @@ void ath_restart_work(struct ath_softc *sc); int ath9k_init_device(u16 devid, struct ath_softc *sc, const struct ath_bus_ops *bus_ops); void ath9k_deinit_device(struct ath_softc *sc); -void ath9k_reload_chainmask_settings(struct ath_softc *sc); u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate); void ath_start_rfkill_poll(struct ath_softc *sc); void ath9k_rfkill_poll_state(struct ieee80211_hw *hw); diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c index e055adfb5361..a5349c72c332 100644 --- a/drivers/net/wireless/ath/ath9k/common-spectral.c +++ b/drivers/net/wireless/ath/ath9k/common-spectral.c @@ -855,16 +855,11 @@ static ssize_t write_file_spectral_short_repeat(struct file *file, { struct ath_spec_scan_priv *spec_priv = file->private_data; unsigned long val; - char buf[32]; - ssize_t len; - - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; + ssize_t ret; - buf[len] = '\0'; - if (kstrtoul(buf, 0, &val)) - return -EINVAL; + ret = kstrtoul_from_user(user_buf, count, 0, &val); + if (ret) + return ret; if (val > 1) return -EINVAL; @@ -903,17 +898,11 @@ static ssize_t write_file_spectral_count(struct file *file, { struct ath_spec_scan_priv *spec_priv = file->private_data; unsigned long val; - char buf[32]; - ssize_t len; - - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; - - buf[len] = '\0'; - if (kstrtoul(buf, 0, &val)) - return -EINVAL; + ssize_t ret; + ret = kstrtoul_from_user(user_buf, count, 0, &val); + if (ret) + return ret; if (val > 255) return -EINVAL; @@ -951,16 +940,11 @@ static ssize_t write_file_spectral_period(struct file *file, { struct ath_spec_scan_priv *spec_priv = file->private_data; unsigned long val; - char buf[32]; - ssize_t len; - - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; + ssize_t ret; - buf[len] = '\0'; - if (kstrtoul(buf, 0, &val)) - return -EINVAL; + ret = kstrtoul_from_user(user_buf, count, 0, &val); + if (ret) + return ret; if (val > 255) return -EINVAL; @@ -999,16 +983,11 @@ static ssize_t write_file_spectral_fft_period(struct file *file, { struct ath_spec_scan_priv *spec_priv = file->private_data; unsigned long val; - char buf[32]; - ssize_t len; - - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; + ssize_t ret; - buf[len] = '\0'; - if (kstrtoul(buf, 0, &val)) - return -EINVAL; + ret = kstrtoul_from_user(user_buf, count, 0, &val); + if (ret) + return ret; if (val > 15) return -EINVAL; diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h index f0ab6f9955e4..12204cf86fcf 100644 --- a/drivers/net/wireless/ath/ath9k/common.h +++ b/drivers/net/wireless/ath/ath9k/common.h @@ -85,8 +85,6 @@ struct ath9k_channel *ath9k_cmn_get_channel(struct ieee80211_hw *hw, struct ath_hw *ah, struct cfg80211_chan_def *chandef); int ath9k_cmn_count_streams(unsigned int chainmask, int max); -void ath9k_cmn_btcoex_bt_stomp(struct ath_common *common, - enum ath_stomp_type stomp_type); void ath9k_cmn_update_txpow(struct ath_hw *ah, u16 cur_txpow, u16 new_txpow, u16 *txpower); void ath9k_cmn_init_crypto(struct ath_hw *ah); diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index fb7a2952d0ce..9bc57c5a89bf 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -96,21 +96,16 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf, } static ssize_t write_file_debug(struct file *file, const char __user *user_buf, - size_t count, loff_t *ppos) + size_t count, loff_t *ppos) { struct ath_softc *sc = file->private_data; struct ath_common *common = ath9k_hw_common(sc->sc_ah); unsigned long mask; - char buf[32]; - ssize_t len; + ssize_t ret; - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; - - buf[len] = '\0'; - if (kstrtoul(buf, 0, &mask)) - return -EINVAL; + ret = kstrtoul_from_user(user_buf, count, 0, &mask); + if (ret) + return ret; common->debug_mask = mask; return count; @@ -191,16 +186,11 @@ static ssize_t write_file_ani(struct file *file, struct ath_softc *sc = file->private_data; struct ath_common *common = ath9k_hw_common(sc->sc_ah); unsigned long ani; - char buf[32]; - ssize_t len; + ssize_t ret; - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; - - buf[len] = '\0'; - if (kstrtoul(buf, 0, &ani)) - return -EINVAL; + ret = kstrtoul_from_user(user_buf, count, 0, &ani); + if (ret) + return ret; if (ani > 1) return -EINVAL; @@ -248,20 +238,15 @@ static ssize_t write_file_bt_ant_diversity(struct file *file, struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath9k_hw_capabilities *pCap = &sc->sc_ah->caps; unsigned long bt_ant_diversity; - char buf[32]; - ssize_t len; + ssize_t ret; - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; + ret = kstrtoul_from_user(user_buf, count, 0, &bt_ant_diversity); + if (ret) + return ret; if (!(pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV)) goto exit; - buf[len] = '\0'; - if (kstrtoul(buf, 0, &bt_ant_diversity)) - return -EINVAL; - common->bt_ant_diversity = !!bt_ant_diversity; ath9k_ps_wakeup(sc); ath9k_hw_set_bt_ant_diversity(sc->sc_ah, common->bt_ant_diversity); @@ -792,16 +777,11 @@ static ssize_t write_file_reset(struct file *file, struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); unsigned long val; - char buf[32]; - ssize_t len; - - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; + ssize_t ret; - buf[len] = '\0'; - if (kstrtoul(buf, 0, &val)) - return -EINVAL; + ret = kstrtoul_from_user(user_buf, count, 0, &val); + if (ret) + return ret; if (val != 1) return -EINVAL; @@ -886,16 +866,11 @@ static ssize_t write_file_regidx(struct file *file, const char __user *user_buf, { struct ath_softc *sc = file->private_data; unsigned long regidx; - char buf[32]; - ssize_t len; + ssize_t ret; - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; - - buf[len] = '\0'; - if (kstrtoul(buf, 0, ®idx)) - return -EINVAL; + ret = kstrtoul_from_user(user_buf, count, 0, ®idx); + if (ret) + return ret; sc->debug.regidx = regidx; return count; @@ -931,16 +906,11 @@ static ssize_t write_file_regval(struct file *file, const char __user *user_buf, struct ath_softc *sc = file->private_data; struct ath_hw *ah = sc->sc_ah; unsigned long regval; - char buf[32]; - ssize_t len; - - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; + ssize_t ret; - buf[len] = '\0'; - if (kstrtoul(buf, 0, ®val)) - return -EINVAL; + ret = kstrtoul_from_user(user_buf, count, 0, ®val); + if (ret) + return ret; ath9k_ps_wakeup(sc); REG_WRITE_D(ah, sc->debug.regidx, regval); @@ -1128,16 +1098,11 @@ static ssize_t write_file_wow(struct file *file, const char __user *user_buf, { struct ath_softc *sc = file->private_data; unsigned long val; - char buf[32]; - ssize_t len; - - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; + ssize_t ret; - buf[len] = '\0'; - if (kstrtoul(buf, 0, &val)) - return -EINVAL; + ret = kstrtoul_from_user(user_buf, count, 0, &val); + if (ret) + return ret; if (val != 1) return -EINVAL; @@ -1191,17 +1156,12 @@ static ssize_t write_file_tpc(struct file *file, const char __user *user_buf, struct ath_softc *sc = file->private_data; struct ath_hw *ah = sc->sc_ah; unsigned long val; - char buf[32]; - ssize_t len; + ssize_t ret; bool tpc_enabled; - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; - - buf[len] = '\0'; - if (kstrtoul(buf, 0, &val)) - return -EINVAL; + ret = kstrtoul_from_user(user_buf, count, 0, &val); + if (ret) + return ret; if (val > 1) return -EINVAL; @@ -1420,7 +1380,7 @@ int ath9k_init_debug(struct ath_hw *ah) sc->debug.debugfs_phy = debugfs_create_dir("ath9k", sc->hw->wiphy->debugfsdir); - if (!sc->debug.debugfs_phy) + if (IS_ERR(sc->debug.debugfs_phy)) return -ENOMEM; #ifdef CONFIG_ATH_DEBUG diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.c b/drivers/net/wireless/ath/ath9k/dfs_debug.c index 2a79c2fa8415..8e18e9b4ef48 100644 --- a/drivers/net/wireless/ath/ath9k/dfs_debug.c +++ b/drivers/net/wireless/ath/ath9k/dfs_debug.c @@ -99,17 +99,11 @@ static ssize_t write_file_dfs(struct file *file, const char __user *user_buf, { struct ath_softc *sc = file->private_data; unsigned long val; - char buf[32]; - ssize_t len; - - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; - - buf[len] = '\0'; - if (kstrtoul(buf, 0, &val)) - return -EINVAL; + ssize_t ret; + ret = kstrtoul_from_user(user_buf, count, 0, &val); + if (ret) + return ret; if (val == DFS_STATS_RESET_MAGIC) memset(&sc->debug.stats.dfs_stats, 0, sizeof(sc->debug.stats.dfs_stats)); diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c index 3caa149b1013..fd5312c2a7e3 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c @@ -572,8 +572,7 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah, } for (ctlMode = 0; ctlMode < numCtlModes; ctlMode++) { - bool isHt40CtlMode = - (pCtlMode[ctlMode] == CTL_2GHT40) ? true : false; + bool isHt40CtlMode = pCtlMode[ctlMode] == CTL_2GHT40; if (isHt40CtlMode) freq = centers.synth_center; diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index 27ff1ca2631f..e5414435b141 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c @@ -1432,7 +1432,7 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface) { struct usb_device *udev = interface_to_usbdev(interface); struct hif_device_usb *hif_dev = usb_get_intfdata(interface); - bool unplugged = (udev->state == USB_STATE_NOTATTACHED) ? true : false; + bool unplugged = udev->state == USB_STATE_NOTATTACHED; if (!hif_dev) return; diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c index b3ed65e5c4da..c549ff3abcdc 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c @@ -375,16 +375,11 @@ static ssize_t write_file_debug(struct file *file, const char __user *user_buf, struct ath9k_htc_priv *priv = file->private_data; struct ath_common *common = ath9k_hw_common(priv->ah); unsigned long mask; - char buf[32]; - ssize_t len; - - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; + ssize_t ret; - buf[len] = '\0'; - if (kstrtoul(buf, 0, &mask)) - return -EINVAL; + ret = kstrtoul_from_user(user_buf, count, 0, &mask); + if (ret) + return ret; common->debug_mask = mask; return count; @@ -491,7 +486,7 @@ int ath9k_htc_init_debug(struct ath_hw *ah) priv->debug.debugfs_phy = debugfs_create_dir(KBUILD_MODNAME, priv->hw->wiphy->debugfsdir); - if (!priv->debug.debugfs_phy) + if (IS_ERR(priv->debug.debugfs_phy)) return -ENOMEM; ath9k_cmn_spectral_init_debug(&priv->spec_priv, priv->debug.debugfs_phy); diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index 51766de5ec3b..9a9b5212051a 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -719,7 +719,7 @@ static int ath9k_htc_tx_aggr_oper(struct ath9k_htc_priv *priv, aggr.sta_index = ista->index; aggr.tidno = tid & 0xf; - aggr.aggr_enable = (action == IEEE80211_AMPDU_TX_START) ? true : false; + aggr.aggr_enable = action == IEEE80211_AMPDU_TX_START; WMI_CMD_BUF(WMI_TX_AGGR_ENABLE_CMDID, &aggr); if (ret) @@ -1264,7 +1264,6 @@ static void ath9k_htc_configure_filter(struct ieee80211_hw *hw, u32 rfilt; mutex_lock(&priv->mutex); - changed_flags &= SUPPORTED_FILTERS; *total_flags &= SUPPORTED_FILTERS; if (test_bit(ATH_OP_INVALID, &common->op_flags)) { diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 6360d3356e25..1494feedb27d 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -1571,7 +1571,6 @@ static void ath9k_configure_filter(struct ieee80211_hw *hw, struct ath_chanctx *ctx; u32 rfilt; - changed_flags &= SUPPORTED_FILTERS; *total_flags &= SUPPORTED_FILTERS; spin_lock_bh(&sc->chan_lock); diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c index 8a996ed9a3be..f2144fd39093 100644 --- a/drivers/net/wireless/ath/ath9k/tx99.c +++ b/drivers/net/wireless/ath/ath9k/tx99.c @@ -172,9 +172,8 @@ static ssize_t write_file_tx99(struct file *file, const char __user *user_buf, { struct ath_softc *sc = file->private_data; struct ath_common *common = ath9k_hw_common(sc->sc_ah); - char buf[32]; bool start; - ssize_t len; + ssize_t ret; int r; if (count < 1) @@ -183,14 +182,9 @@ static ssize_t write_file_tx99(struct file *file, const char __user *user_buf, if (sc->cur_chan->nvifs > 1) return -EOPNOTSUPP; - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; - - buf[len] = '\0'; - - if (kstrtobool(buf, &start)) - return -EINVAL; + ret = kstrtobool_from_user(user_buf, count, &start); + if (ret) + return ret; mutex_lock(&sc->mutex); diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c index d652c647d56b..1476b42b52a9 100644 --- a/drivers/net/wireless/ath/ath9k/wmi.c +++ b/drivers/net/wireless/ath/ath9k/wmi.c @@ -242,10 +242,10 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb, spin_unlock_irqrestore(&wmi->wmi_lock, flags); goto free_skb; } - spin_unlock_irqrestore(&wmi->wmi_lock, flags); /* WMI command response */ ath9k_wmi_rsp_callback(wmi, skb); + spin_unlock_irqrestore(&wmi->wmi_lock, flags); free_skb: kfree_skb(skb); @@ -283,7 +283,8 @@ int ath9k_wmi_connect(struct htc_target *htc, struct wmi *wmi, static int ath9k_wmi_cmd_issue(struct wmi *wmi, struct sk_buff *skb, - enum wmi_cmd_id cmd, u16 len) + enum wmi_cmd_id cmd, u16 len, + u8 *rsp_buf, u32 rsp_len) { struct wmi_cmd_hdr *hdr; unsigned long flags; @@ -293,6 +294,11 @@ static int ath9k_wmi_cmd_issue(struct wmi *wmi, hdr->seq_no = cpu_to_be16(++wmi->tx_seq_id); spin_lock_irqsave(&wmi->wmi_lock, flags); + + /* record the rsp buffer and length */ + wmi->cmd_rsp_buf = rsp_buf; + wmi->cmd_rsp_len = rsp_len; + wmi->last_seq_id = wmi->tx_seq_id; spin_unlock_irqrestore(&wmi->wmi_lock, flags); @@ -308,8 +314,8 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, struct ath_common *common = ath9k_hw_common(ah); u16 headroom = sizeof(struct htc_frame_hdr) + sizeof(struct wmi_cmd_hdr); + unsigned long time_left, flags; struct sk_buff *skb; - unsigned long time_left; int ret = 0; if (ah->ah_flags & AH_UNPLUGGED) @@ -333,11 +339,7 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, goto out; } - /* record the rsp buffer and length */ - wmi->cmd_rsp_buf = rsp_buf; - wmi->cmd_rsp_len = rsp_len; - - ret = ath9k_wmi_cmd_issue(wmi, skb, cmd_id, cmd_len); + ret = ath9k_wmi_cmd_issue(wmi, skb, cmd_id, cmd_len, rsp_buf, rsp_len); if (ret) goto out; @@ -345,7 +347,9 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, if (!time_left) { ath_dbg(common, WMI, "Timeout waiting for WMI command: %s\n", wmi_cmd_to_name(cmd_id)); + spin_lock_irqsave(&wmi->wmi_lock, flags); wmi->last_seq_id = 0; + spin_unlock_irqrestore(&wmi->wmi_lock, flags); mutex_unlock(&wmi->op_mutex); return -ETIMEDOUT; } diff --git a/drivers/net/wireless/ath/key.c b/drivers/net/wireless/ath/key.c index b7b61d4f02ba..21a93fec284d 100644 --- a/drivers/net/wireless/ath/key.c +++ b/drivers/net/wireless/ath/key.c @@ -104,7 +104,7 @@ bool ath_hw_keysetmac(struct ath_common *common, u16 entry, const u8 *mac) * Not setting this bit allows the hardware to use the key * for multicast frame decryption. */ - if (mac[0] & 0x01) + if (is_multicast_ether_addr(mac)) unicast_flag = 0; macLo = get_unaligned_le32(mac); diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c index 009bca34ece3..447b51cff8f9 100644 --- a/drivers/net/wireless/atmel/at76c50x-usb.c +++ b/drivers/net/wireless/atmel/at76c50x-usb.c @@ -10,7 +10,7 @@ * Copyright (c) 2007 Kalle Valo <[email protected]> * Copyright (c) 2010 Sebastian Smolorz <[email protected]> * - * This file is part of the Berlios driver for WLAN USB devices based on the + * This file is part of the Berlios driver for USB WLAN devices based on the * Atmel AT76C503A/505/505A. * * Some iw_handler code was taken from airo.c, (C) 1999 Benjamin Reed @@ -143,7 +143,7 @@ static const struct usb_device_id dev_table[] = { { USB_DEVICE(0x0cde, 0x0001), USB_DEVICE_DATA(BOARD_503_ISL3861) }, /* Dynalink/Askey WLL013 (intersil) */ { USB_DEVICE(0x069a, 0x0320), USB_DEVICE_DATA(BOARD_503_ISL3861) }, - /* EZ connect 11Mpbs Wireless USB Adapter SMC2662W v1 */ + /* EZ connect 11Mpbs USB Wireless Adapter SMC2662W v1 */ { USB_DEVICE(0x0d5c, 0xa001), USB_DEVICE_DATA(BOARD_503_ISL3861) }, /* BenQ AWL300 */ { USB_DEVICE(0x04a5, 0x9000), USB_DEVICE_DATA(BOARD_503_ISL3861) }, @@ -195,7 +195,7 @@ static const struct usb_device_id dev_table[] = { { USB_DEVICE(0x04a5, 0x9001), USB_DEVICE_DATA(BOARD_503) }, /* 3Com 3CRSHEW696 */ { USB_DEVICE(0x0506, 0x0a01), USB_DEVICE_DATA(BOARD_503) }, - /* Siemens Santis ADSL WLAN USB adapter WLL 013 */ + /* Siemens Santis ADSL USB WLAN adapter WLL 013 */ { USB_DEVICE(0x0681, 0x001b), USB_DEVICE_DATA(BOARD_503) }, /* Belkin F5D6050, version 2 */ { USB_DEVICE(0x050d, 0x0050), USB_DEVICE_DATA(BOARD_503) }, @@ -238,7 +238,7 @@ static const struct usb_device_id dev_table[] = { { USB_DEVICE(0x1915, 0x2233), USB_DEVICE_DATA(BOARD_505_2958) }, /* Xterasys XN-2122B, IBlitzz BWU613B/BWU613SB */ { USB_DEVICE(0x12fd, 0x1001), USB_DEVICE_DATA(BOARD_505_2958) }, - /* Corega WLAN USB Stick 11 */ + /* Corega USB WLAN Stick 11 */ { USB_DEVICE(0x07aa, 0x7613), USB_DEVICE_DATA(BOARD_505_2958) }, /* Microstar MSI Box MS6978 */ { USB_DEVICE(0x0db0, 0x1020), USB_DEVICE_DATA(BOARD_505_2958) }, diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c index dfe0f74369e6..820100cac491 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c @@ -1176,23 +1176,20 @@ static ssize_t debug_level_show(struct device_driver *d, char *buf) static ssize_t debug_level_store(struct device_driver *d, const char *buf, size_t count) { - char *p = (char *)buf; - u32 val; + unsigned long val; - if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') { - p++; - if (p[0] == 'x' || p[0] == 'X') - p++; - val = simple_strtoul(p, &p, 16); - } else - val = simple_strtoul(p, &p, 10); - if (p == buf) + int result = kstrtoul(buf, 0, &val); + + if (result == -EINVAL) printk(KERN_INFO DRV_NAME ": %s is not in hex or decimal form.\n", buf); + else if (result == -ERANGE) + printk(KERN_INFO DRV_NAME + ": %s has overflowed.\n", buf); else ipw_debug_level = val; - return strnlen(buf, count); + return count; } static DRIVER_ATTR_RW(debug_level); @@ -1461,25 +1458,13 @@ static ssize_t scan_age_store(struct device *d, struct device_attribute *attr, { struct ipw_priv *priv = dev_get_drvdata(d); struct net_device *dev = priv->net_dev; - char buffer[] = "00000000"; - unsigned long len = - (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1; - unsigned long val; - char *p = buffer; IPW_DEBUG_INFO("enter\n"); - strncpy(buffer, buf, len); - buffer[len] = 0; + unsigned long val; + int result = kstrtoul(buf, 0, &val); - if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') { - p++; - if (p[0] == 'x' || p[0] == 'X') - p++; - val = simple_strtoul(p, &p, 16); - } else - val = simple_strtoul(p, &p, 10); - if (p == buffer) { + if (result == -EINVAL || result == -ERANGE) { IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name); } else { priv->ieee->scan_age = val; @@ -1487,7 +1472,7 @@ static ssize_t scan_age_store(struct device *d, struct device_attribute *attr, } IPW_DEBUG_INFO("exit\n"); - return len; + return count; } static DEVICE_ATTR_RW(scan_age); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c index dfe8357036eb..b26f90e52256 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c @@ -69,6 +69,11 @@ static const struct dmi_system_id dmi_ppag_approved_list[] = { DMI_MATCH(DMI_SYS_VENDOR, "Alienware"), }, }, + { .ident = "RAZER", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Razer"), + }, + }, {} }; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h index 8fef38139bf6..90ce8d9b6ad3 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2022 Intel Corporation + * Copyright (C) 2005-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -30,6 +30,11 @@ enum iwl_debug_cmds { */ HOST_EVENT_CFG = 0x3, /** + * @INVALID_WR_PTR_CMD: invalid write pointer, set in the TFD + * when it's not in use + */ + INVALID_WR_PTR_CMD = 0x6, + /** * @DBGC_SUSPEND_RESUME: * DBGC suspend/resume commad. Uses a single dword as data: * 0 - resume DBGC recording @@ -377,7 +382,7 @@ struct iwl_buf_alloc_cmd { #define DRAM_INFO_SECOND_MAGIC_WORD 0x89ABCDEF /** - * struct iwL_dram_info - DRAM fragments allocation struct + * struct iwl_dram_info - DRAM fragments allocation struct * * Driver will fill in the first 1K(+) of the pointed DRAM fragment * diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dump.c b/drivers/net/wireless/intel/iwlwifi/fw/dump.c index 5876f917e536..8f107ceec407 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dump.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dump.c @@ -182,8 +182,7 @@ static void iwl_fwrt_dump_lmac_error_log(struct iwl_fw_runtime *fwrt, u8 lmac_nu base = fwrt->fw->inst_errlog_ptr; } - if ((fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ && !base) || - (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ && base < 0x400000)) { + if (!base) { IWL_ERR(fwrt, "Not valid error log pointer 0x%08X for %s uCode\n", base, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h index 4e4a60ddf9b2..41ab5a6e2dd3 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h @@ -565,6 +565,8 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, #define RX_QUEUE_MASK 255 #define RX_QUEUE_SIZE_LOG 8 +#define IWL_DEFAULT_RX_QUEUE 0 + /** * struct iwl_rb_status - reserve buffer status * host memory mapped FH registers diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 8c23f57f5c89..31176897b746 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -990,6 +990,8 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans, case IWL_CFG_RF_TYPE_GF: case IWL_CFG_RF_TYPE_MR: case IWL_CFG_RF_TYPE_MS: + case IWL_CFG_RF_TYPE_FM: + case IWL_CFG_RF_TYPE_WH: iftype_data->he_cap.he_cap_elem.phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU; if (!is_ap) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index d02943d0ea62..3b6b0e03037f 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2022 Intel Corporation + * Copyright (C) 2005-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -1069,6 +1069,7 @@ struct iwl_trans_txqs { * @mbx_addr_1_step: step address data 1 * @pcie_link_speed: current PCIe link speed (%PCI_EXP_LNKSTA_CLS_*), * only valid for discrete (not integrated) NICs + * @invalid_tx_cmd: invalid TX command buffer */ struct iwl_trans { bool csme_own; @@ -1133,6 +1134,8 @@ struct iwl_trans { u8 pcie_link_speed; + struct iwl_dma_ptr invalid_tx_cmd; + /* pointer to trans specific struct */ /*Ensure that this pointer will always be aligned to sizeof pointer */ char trans_specific[] __aligned(sizeof(void *)); @@ -1490,7 +1493,7 @@ static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr) { u32 value; - if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1))) + if (iwl_trans_read_mem(trans, addr, &value, 1)) return 0xa5a5a5a5; return value; diff --git a/drivers/net/wireless/intel/iwlwifi/mei/main.c b/drivers/net/wireless/intel/iwlwifi/mei/main.c index 54445f39fd55..1dd9106c6513 100644 --- a/drivers/net/wireless/intel/iwlwifi/mei/main.c +++ b/drivers/net/wireless/intel/iwlwifi/mei/main.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2021-2022 Intel Corporation + * Copyright (C) 2021-2023 Intel Corporation */ #include <linux/etherdevice.h> @@ -774,9 +774,13 @@ static void iwl_mei_set_init_conf(struct iwl_mei *mei) iwl_mei_send_sap_msg_payload(mei->cldev, &sar_msg.hdr); } - ether_addr_copy(nic_info_msg.mac_address, iwl_mei_cache.mac_address); - ether_addr_copy(nic_info_msg.nvm_address, iwl_mei_cache.nvm_address); - iwl_mei_send_sap_msg_payload(mei->cldev, &nic_info_msg.hdr); + if (is_valid_ether_addr(iwl_mei_cache.mac_address)) { + ether_addr_copy(nic_info_msg.mac_address, + iwl_mei_cache.mac_address); + ether_addr_copy(nic_info_msg.nvm_address, + iwl_mei_cache.nvm_address); + iwl_mei_send_sap_msg_payload(mei->cldev, &nic_info_msg.hdr); + } iwl_mei_send_sap_msg_payload(mei->cldev, &rfkill_msg.hdr); } @@ -1532,7 +1536,7 @@ void iwl_mei_host_associated(const struct iwl_mei_conn_info *conn_info, mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); - if (!mei && !mei->amt_enabled) + if (!mei || !mei->amt_enabled) goto out; iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr); @@ -1561,7 +1565,7 @@ void iwl_mei_host_disassociated(void) mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); - if (!mei && !mei->amt_enabled) + if (!mei || !mei->amt_enabled) goto out; iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr); @@ -1597,7 +1601,7 @@ void iwl_mei_set_rfkill_state(bool hw_rfkill, bool sw_rfkill) mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); - if (!mei && !mei->amt_enabled) + if (!mei || !mei->amt_enabled) goto out; iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr); @@ -1626,7 +1630,7 @@ void iwl_mei_set_nic_info(const u8 *mac_address, const u8 *nvm_address) mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); - if (!mei && !mei->amt_enabled) + if (!mei || !mei->amt_enabled) goto out; iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr); @@ -1654,7 +1658,7 @@ void iwl_mei_set_country_code(u16 mcc) mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); - if (!mei && !mei->amt_enabled) + if (!mei || !mei->amt_enabled) goto out; iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr); @@ -1680,7 +1684,7 @@ void iwl_mei_set_power_limit(const __le16 *power_limit) mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); - if (!mei && !mei->amt_enabled) + if (!mei || !mei->amt_enabled) goto out; memcpy(msg.sar_chain_info_table, power_limit, sizeof(msg.sar_chain_info_table)); @@ -1832,7 +1836,9 @@ void iwl_mei_unregister_complete(void) struct iwl_mei *mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); - iwl_mei_send_sap_msg(mei->cldev, SAP_MSG_NOTIF_WIFIDR_DOWN); + if (mei->amt_enabled) + iwl_mei_send_sap_msg(mei->cldev, + SAP_MSG_NOTIF_WIFIDR_DOWN); mei->got_ownership = false; } @@ -2070,33 +2076,29 @@ static void iwl_mei_remove(struct mei_cl_device *cldev) mutex_lock(&iwl_mei_mutex); - if (mei->amt_enabled) { - /* - * Tell CSME that we are going down so that it won't access the - * memory anymore, make sure this message goes through immediately. - */ - mei->csa_throttled = false; - iwl_mei_send_sap_msg(mei->cldev, - SAP_MSG_NOTIF_HOST_GOES_DOWN); - - for (i = 0; i < SEND_SAP_MAX_WAIT_ITERATION; i++) { - if (!iwl_mei_host_to_me_data_pending(mei)) - break; + /* Tell CSME that we are going down so that it won't access the + * memory anymore, make sure this message goes through immediately. + */ + mei->csa_throttled = false; + iwl_mei_send_sap_msg(mei->cldev, + SAP_MSG_NOTIF_HOST_GOES_DOWN); - msleep(20); - } + for (i = 0; i < SEND_SAP_MAX_WAIT_ITERATION; i++) { + if (!iwl_mei_host_to_me_data_pending(mei)) + break; - /* - * If we couldn't make sure that CSME saw the HOST_GOES_DOWN - * message, it means that it will probably keep reading memory - * that we are going to unmap and free, expect IOMMU error - * messages. - */ - if (i == SEND_SAP_MAX_WAIT_ITERATION) - dev_err(&mei->cldev->dev, - "Couldn't get ACK from CSME on HOST_GOES_DOWN message\n"); + msleep(20); } + /* If we couldn't make sure that CSME saw the HOST_GOES_DOWN + * message, it means that it will probably keep reading memory + * that we are going to unmap and free, expect IOMMU error + * messages. + */ + if (i == SEND_SAP_MAX_WAIT_ITERATION) + dev_err(&mei->cldev->dev, + "Couldn't get ACK from CSME on HOST_GOES_DOWN message\n"); + mutex_unlock(&iwl_mei_mutex); /* diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index ce7905faa08f..5918c1f2b10c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -315,8 +315,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) ieee80211_hw_set(hw, STA_MMPDU_TXQ); /* Set this early since we need to have it for the check below */ - if (mvm->mld_api_is_used && - mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + if (mvm->mld_api_is_used && mvm->nvm_data->sku_cap_11be_enable && + !iwlwifi_mod_params.disable_11ax && + !iwlwifi_mod_params.disable_11be) hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO; /* With MLD FW API, it tracks timing by itself, @@ -5604,9 +5605,6 @@ void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, return; } - if (vif->type != NL80211_IFTYPE_STATION) - return; - /* Make sure we're done with the deferred traffic before flushing */ flush_work(&mvm->add_stream_wk); @@ -5630,9 +5628,6 @@ void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, ap_sta_done = true; } - /* make sure only TDLS peers or the AP are flushed */ - WARN_ON_ONCE(sta != mvmvif->ap_sta && !sta->tdls); - if (drop) { if (iwl_mvm_flush_sta(mvm, mvmsta, false)) IWL_ERR(mvm, "flush request fail\n"); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 73c1fb3c0c5e..bc83d2ba55c6 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -1132,12 +1132,6 @@ static int get_crf_id(struct iwl_trans *iwl_trans) else sd_reg_ver_addr = SD_REG_VER; - if (!iwl_trans_grab_nic_access(iwl_trans)) { - IWL_ERR(iwl_trans, "Failed to grab nic access before reading crf id\n"); - ret = -EIO; - goto out; - } - /* Enable access to peripheral registers */ val = iwl_read_umac_prph_no_grab(iwl_trans, WFPM_CTRL_REG); val |= ENABLE_WFPM; @@ -1157,9 +1151,6 @@ static int get_crf_id(struct iwl_trans *iwl_trans) iwl_trans->hw_crf_id, iwl_trans->hw_cnv_id, iwl_trans->hw_wfpm_id); - iwl_trans_release_nic_access(iwl_trans); - -out: return ret; } @@ -1351,6 +1342,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto out_free_trans; if (iwl_trans_grab_nic_access(iwl_trans)) { + get_crf_id(iwl_trans); /* all good */ iwl_trans_release_nic_access(iwl_trans); } else { @@ -1360,7 +1352,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } iwl_trans->hw_rf_id = iwl_read32(iwl_trans, CSR_HW_RF_ID); - get_crf_id(iwl_trans); /* * The RF_ID is set to zero in blank OTP so read version to diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index 0adcf0e13e85..0f6493dab8cb 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2003-2015, 2018-2022 Intel Corporation + * Copyright (C) 2003-2015, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -315,7 +315,6 @@ enum iwl_pcie_imr_status { * @ucode_write_complete: indicates that the ucode has been copied. * @ucode_write_waitq: wait queue for uCode load * @cmd_queue - command queue number - * @def_rx_queue - default rx queue number * @rx_buf_size: Rx buffer size * @scd_set_active: should the transport configure the SCD for HCMD queue * @rx_page_order: page order for receive buffer size @@ -398,7 +397,6 @@ struct iwl_trans_pcie { wait_queue_head_t ucode_write_waitq; wait_queue_head_t sx_waitq; - u8 def_rx_queue; u8 n_no_reclaim_cmds; u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; u16 num_rx_bufs; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index f87b28edc267..4614acee9f7b 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -1373,7 +1373,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, } } - if (rxq->id == trans_pcie->def_rx_queue) + if (rxq->id == IWL_DEFAULT_RX_QUEUE) iwl_op_mode_rx(trans->op_mode, &rxq->napi, &rxcb); else diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 3e988da44973..198933f853c5 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -2018,6 +2018,30 @@ void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions memset(desc_dram, 0, sizeof(*desc_dram)); } +static void iwl_pcie_free_invalid_tx_cmd(struct iwl_trans *trans) +{ + iwl_pcie_free_dma_ptr(trans, &trans->invalid_tx_cmd); +} + +static int iwl_pcie_alloc_invalid_tx_cmd(struct iwl_trans *trans) +{ + struct iwl_cmd_header_wide bad_cmd = { + .cmd = INVALID_WR_PTR_CMD, + .group_id = DEBUG_GROUP, + .sequence = cpu_to_le16(0xffff), + .length = cpu_to_le16(0), + .version = 0, + }; + int ret; + + ret = iwl_pcie_alloc_dma_ptr(trans, &trans->invalid_tx_cmd, + sizeof(bad_cmd)); + if (ret) + return ret; + memcpy(trans->invalid_tx_cmd.addr, &bad_cmd, sizeof(bad_cmd)); + return 0; +} + void iwl_trans_pcie_free(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -2048,6 +2072,8 @@ void iwl_trans_pcie_free(struct iwl_trans *trans) iwl_pcie_free_ict(trans); } + iwl_pcie_free_invalid_tx_cmd(trans); + iwl_pcie_free_fw_monitor(trans); iwl_trans_pcie_free_pnvm_dram_regions(&trans_pcie->pnvm_data, @@ -3617,8 +3643,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, PCIE_LINK_STATE_CLKPM); } - trans_pcie->def_rx_queue = 0; - pci_set_master(pdev); addr_size = trans->txqs.tfd.addr_size; @@ -3686,6 +3710,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, init_waitqueue_head(&trans_pcie->sx_waitq); + ret = iwl_pcie_alloc_invalid_tx_cmd(trans); + if (ret) + goto out_no_pci; if (trans_pcie->msix_enabled) { ret = iwl_pcie_init_msix_handler(pdev, trans_pcie); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 790e5b124740..2f39b639c43f 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -132,22 +132,6 @@ void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) } } -static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd, - u8 idx, dma_addr_t addr, u16 len) -{ - struct iwl_tfd *tfd_fh = (void *)tfd; - struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx]; - - u16 hi_n_len = len << 4; - - put_unaligned_le32(addr, &tb->lo); - hi_n_len |= iwl_get_dma_hi_addr(addr); - - tb->hi_n_len = cpu_to_le16(hi_n_len); - - tfd_fh->num_tbs = idx + 1; -} - static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, dma_addr_t addr, u16 len, bool reset) { @@ -172,7 +156,7 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, "Unaligned address = %llx\n", (unsigned long long)addr)) return -EINVAL; - iwl_pcie_tfd_set_tb(trans, tfd, num_tbs, addr, len); + iwl_pcie_gen1_tfd_set_tb(trans, tfd, num_tbs, addr, len); return num_tbs; } @@ -1203,7 +1187,11 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, group_id = cmd->hdr.group_id; cmd_id = WIDE_ID(group_id, cmd->hdr.cmd); - iwl_txq_gen1_tfd_unmap(trans, meta, txq, index); + if (trans->trans_cfg->gen2) + iwl_txq_gen2_tfd_unmap(trans, meta, + iwl_txq_get_tfd(trans, txq, index)); + else + iwl_txq_gen1_tfd_unmap(trans, meta, txq, index); /* Input error checking is done when commands are added to queue. */ if (meta->flags & CMD_WANT_SKB) { diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c index 5bb3cc3367c9..340240b8954f 100644 --- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.c @@ -10,6 +10,7 @@ #include "fw/api/commands.h" #include "fw/api/tx.h" #include "fw/api/datapath.h" +#include "fw/api/debug.h" #include "queue/tx.h" #include "iwl-fh.h" #include "iwl-scd.h" @@ -84,6 +85,50 @@ static u8 iwl_txq_gen2_get_num_tbs(struct iwl_trans *trans, return le16_to_cpu(tfd->num_tbs) & 0x1f; } +int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd, + dma_addr_t addr, u16 len) +{ + int idx = iwl_txq_gen2_get_num_tbs(trans, tfd); + struct iwl_tfh_tb *tb; + + /* Only WARN here so we know about the issue, but we mess up our + * unmap path because not every place currently checks for errors + * returned from this function - it can only return an error if + * there's no more space, and so when we know there is enough we + * don't always check ... + */ + WARN(iwl_txq_crosses_4g_boundary(addr, len), + "possible DMA problem with iova:0x%llx, len:%d\n", + (unsigned long long)addr, len); + + if (WARN_ON(idx >= IWL_TFH_NUM_TBS)) + return -EINVAL; + tb = &tfd->tbs[idx]; + + /* Each TFD can point to a maximum max_tbs Tx buffers */ + if (le16_to_cpu(tfd->num_tbs) >= trans->txqs.tfd.max_tbs) { + IWL_ERR(trans, "Error can not send more than %d chunks\n", + trans->txqs.tfd.max_tbs); + return -EINVAL; + } + + put_unaligned_le64(addr, &tb->addr); + tb->tb_len = cpu_to_le16(len); + + tfd->num_tbs = cpu_to_le16(idx + 1); + + return idx; +} + +static void iwl_txq_set_tfd_invalid_gen2(struct iwl_trans *trans, + struct iwl_tfh_tfd *tfd) +{ + tfd->num_tbs = 0; + + iwl_txq_gen2_set_tb(trans, tfd, trans->invalid_tx_cmd.dma, + trans->invalid_tx_cmd.size); +} + void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta, struct iwl_tfh_tfd *tfd) { @@ -111,7 +156,7 @@ void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta, DMA_TO_DEVICE); } - tfd->num_tbs = 0; + iwl_txq_set_tfd_invalid_gen2(trans, tfd); } void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) @@ -142,42 +187,6 @@ void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) } } -int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd, - dma_addr_t addr, u16 len) -{ - int idx = iwl_txq_gen2_get_num_tbs(trans, tfd); - struct iwl_tfh_tb *tb; - - /* - * Only WARN here so we know about the issue, but we mess up our - * unmap path because not every place currently checks for errors - * returned from this function - it can only return an error if - * there's no more space, and so when we know there is enough we - * don't always check ... - */ - WARN(iwl_txq_crosses_4g_boundary(addr, len), - "possible DMA problem with iova:0x%llx, len:%d\n", - (unsigned long long)addr, len); - - if (WARN_ON(idx >= IWL_TFH_NUM_TBS)) - return -EINVAL; - tb = &tfd->tbs[idx]; - - /* Each TFD can point to a maximum max_tbs Tx buffers */ - if (le16_to_cpu(tfd->num_tbs) >= trans->txqs.tfd.max_tbs) { - IWL_ERR(trans, "Error can not send more than %d chunks\n", - trans->txqs.tfd.max_tbs); - return -EINVAL; - } - - put_unaligned_le64(addr, &tb->addr); - tb->tb_len = cpu_to_le16(len); - - tfd->num_tbs = cpu_to_le16(idx + 1); - - return idx; -} - static struct page *get_workaround_page(struct iwl_trans *trans, struct sk_buff *skb) { @@ -1026,11 +1035,21 @@ static void iwl_txq_stuck_timer(struct timer_list *t) iwl_force_nmi(trans); } +static void iwl_txq_set_tfd_invalid_gen1(struct iwl_trans *trans, + struct iwl_tfd *tfd) +{ + tfd->num_tbs = 0; + + iwl_pcie_gen1_tfd_set_tb(trans, tfd, 0, trans->invalid_tx_cmd.dma, + trans->invalid_tx_cmd.size); +} + int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, bool cmd_queue) { - size_t tfd_sz = trans->txqs.tfd.size * - trans->trans_cfg->base_params->max_tfd_queue_size; + size_t num_entries = trans->trans_cfg->gen2 ? + slots_num : trans->trans_cfg->base_params->max_tfd_queue_size; + size_t tfd_sz; size_t tb0_buf_sz; int i; @@ -1040,8 +1059,7 @@ int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, if (WARN_ON(txq->entries || txq->tfds)) return -EINVAL; - if (trans->trans_cfg->gen2) - tfd_sz = trans->txqs.tfd.size * slots_num; + tfd_sz = trans->txqs.tfd.size * num_entries; timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0); txq->trans = trans; @@ -1081,6 +1099,15 @@ int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, if (!txq->first_tb_bufs) goto err_free_tfds; + for (i = 0; i < num_entries; i++) { + void *tfd = iwl_txq_get_tfd(trans, txq, i); + + if (trans->trans_cfg->gen2) + iwl_txq_set_tfd_invalid_gen2(trans, tfd); + else + iwl_txq_set_tfd_invalid_gen1(trans, tfd); + } + return 0; err_free_tfds: dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr); @@ -1340,22 +1367,12 @@ error: } static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans, - void *_tfd, u8 idx) + struct iwl_tfd *tfd, u8 idx) { - struct iwl_tfd *tfd; - struct iwl_tfd_tb *tb; + struct iwl_tfd_tb *tb = &tfd->tbs[idx]; dma_addr_t addr; dma_addr_t hi_len; - if (trans->trans_cfg->gen2) { - struct iwl_tfh_tfd *tfh_tfd = _tfd; - struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx]; - - return (dma_addr_t)(le64_to_cpu(tfh_tb->addr)); - } - - tfd = _tfd; - tb = &tfd->tbs[idx]; addr = get_unaligned_le32(&tb->lo); if (sizeof(dma_addr_t) <= sizeof(u32)) @@ -1376,7 +1393,7 @@ void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans, struct iwl_txq *txq, int index) { int i, num_tbs; - void *tfd = iwl_txq_get_tfd(trans, txq, index); + struct iwl_tfd *tfd = iwl_txq_get_tfd(trans, txq, index); /* Sanity check on number of chunks */ num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd); @@ -1408,15 +1425,7 @@ void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans, meta->tbs = 0; - if (trans->trans_cfg->gen2) { - struct iwl_tfh_tfd *tfd_fh = (void *)tfd; - - tfd_fh->num_tbs = 0; - } else { - struct iwl_tfd *tfd_fh = (void *)tfd; - - tfd_fh->num_tbs = 0; - } + iwl_txq_set_tfd_invalid_gen1(trans, tfd); } #define IWL_TX_CRC_SIZE 4 @@ -1520,7 +1529,12 @@ void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) /* We have only q->n_window txq->entries, but we use * TFD_QUEUE_SIZE_MAX tfds */ - iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr); + if (trans->trans_cfg->gen2) + iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta, + iwl_txq_get_tfd(trans, txq, rd_ptr)); + else + iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, + txq, rd_ptr); /* free SKB */ skb = txq->entries[idx].skb; diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.h b/drivers/net/wireless/intel/iwlwifi/queue/tx.h index 1e4a24ab9bab..b7d3808588bf 100644 --- a/drivers/net/wireless/intel/iwlwifi/queue/tx.h +++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.h @@ -131,17 +131,8 @@ struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len, struct sk_buff *skb); #endif static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_trans *trans, - void *_tfd) + struct iwl_tfd *tfd) { - struct iwl_tfd *tfd; - - if (trans->trans_cfg->gen2) { - struct iwl_tfh_tfd *tfh_tfd = _tfd; - - return le16_to_cpu(tfh_tfd->num_tbs) & 0x1f; - } - - tfd = (struct iwl_tfd *)_tfd; return tfd->num_tbs & 0x1f; } @@ -164,6 +155,21 @@ static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans, return le16_to_cpu(tb->hi_n_len) >> 4; } +static inline void iwl_pcie_gen1_tfd_set_tb(struct iwl_trans *trans, + struct iwl_tfd *tfd, + u8 idx, dma_addr_t addr, u16 len) +{ + struct iwl_tfd_tb *tb = &tfd->tbs[idx]; + u16 hi_n_len = len << 4; + + put_unaligned_le32(addr, &tb->lo); + hi_n_len |= iwl_get_dma_hi_addr(addr); + + tb->hi_n_len = cpu_to_le16(hi_n_len); + + tfd->num_tbs = idx + 1; +} + void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta, struct iwl_txq *txq, int index); diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c index dd31929261ab..866e0230df25 100644 --- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c +++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c @@ -129,18 +129,18 @@ MODULE_FIRMWARE("orinoco_ezusb_fw"); #define USB_AVAYA8_VENDOR_ID 0x0D98 #define USB_AVAYAE_VENDOR_ID 0x0D9E -#define USB_AVAYA_WIRELESS_ID 0x0300 /* Avaya Wireless USB Card */ +#define USB_AVAYA_WIRELESS_ID 0x0300 /* Avaya USB Wireless Card */ #define USB_AGERE_VENDOR_ID 0x0D4E /* Agere Systems */ -#define USB_AGERE_MODEL0801_ID 0x1000 /* Wireless USB Card Model 0801 */ -#define USB_AGERE_MODEL0802_ID 0x1001 /* Wireless USB Card Model 0802 */ -#define USB_AGERE_REBRANDED_ID 0x047A /* WLAN USB Card */ +#define USB_AGERE_MODEL0801_ID 0x1000 /* USB Wireless Card Model 0801 */ +#define USB_AGERE_MODEL0802_ID 0x1001 /* USB Wireless Card Model 0802 */ +#define USB_AGERE_REBRANDED_ID 0x047A /* USB WLAN Card */ #define USB_ELSA_VENDOR_ID 0x05CC #define USB_ELSA_AIRLANCER_ID 0x3100 /* ELSA AirLancer USB-11 */ #define USB_LEGEND_VENDOR_ID 0x0E7C -#define USB_LEGEND_JOYNET_ID 0x0300 /* Joynet WLAN USB Card */ +#define USB_LEGEND_JOYNET_ID 0x0300 /* Joynet USB WLAN Card */ #define USB_SAMSUNG_VENDOR_ID 0x04E8 #define USB_SAMSUNG_SEW2001U1_ID 0x5002 /* Samsung SEW-2001u Card */ @@ -154,7 +154,7 @@ MODULE_FIRMWARE("orinoco_ezusb_fw"); #define USB_FUJITSU_E1100_ID 0x1002 /* connect2AIR WLAN E-1100 USB */ #define USB_2WIRE_VENDOR_ID 0x1630 -#define USB_2WIRE_WIRELESS_ID 0xff81 /* 2Wire Wireless USB adapter */ +#define USB_2WIRE_WIRELESS_ID 0xff81 /* 2Wire USB Wireless adapter */ #define EZUSB_REQUEST_FW_TRANS 0xA0 diff --git a/drivers/net/wireless/legacy/rndis_wlan.c b/drivers/net/wireless/legacy/rndis_wlan.c index 712038d46bdb..e7fea7ded6d5 100644 --- a/drivers/net/wireless/legacy/rndis_wlan.c +++ b/drivers/net/wireless/legacy/rndis_wlan.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Driver for RNDIS based wireless USB devices. + * Driver for RNDIS based USB wireless devices. * * Copyright (C) 2007 by Bjorge Dijkstra <[email protected]> * Copyright (C) 2008-2009 by Jussi Kivilinna <[email protected]> diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c index 52b18f4a774b..f9c9fec7c792 100644 --- a/drivers/net/wireless/marvell/mwifiex/debugfs.c +++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c @@ -253,8 +253,11 @@ mwifiex_histogram_read(struct file *file, char __user *ubuf, if (!p) return -ENOMEM; - if (!priv || !priv->hist_data) - return -EFAULT; + if (!priv || !priv->hist_data) { + ret = -EFAULT; + goto free_and_exit; + } + phist_data = priv->hist_data; p += sprintf(p, "\n" @@ -309,6 +312,8 @@ mwifiex_histogram_read(struct file *file, char __user *ubuf, ret = simple_read_from_buffer(ubuf, count, ppos, (char *)page, (unsigned long)p - page); +free_and_exit: + free_page(page); return ret; } @@ -420,7 +425,10 @@ mwifiex_regrdwr_write(struct file *file, if (IS_ERR(buf)) return PTR_ERR(buf); - sscanf(buf, "%u %x %x", ®_type, ®_offset, ®_value); + if (sscanf(buf, "%u %x %x", ®_type, ®_offset, ®_value) != 3) { + ret = -EINVAL; + goto done; + } if (reg_type == 0 || reg_offset == 0) { ret = -EINVAL; @@ -686,7 +694,10 @@ mwifiex_rdeeprom_write(struct file *file, if (IS_ERR(buf)) return PTR_ERR(buf); - sscanf(buf, "%d %d", &offset, &bytes); + if (sscanf(buf, "%d %d", &offset, &bytes) != 2) { + ret = -EINVAL; + goto done; + } if (offset == -1 || bytes == -1) { ret = -EINVAL; diff --git a/drivers/net/wireless/marvell/mwifiex/decl.h b/drivers/net/wireless/marvell/mwifiex/decl.h index 88648c062713..326ffb05d791 100644 --- a/drivers/net/wireless/marvell/mwifiex/decl.h +++ b/drivers/net/wireless/marvell/mwifiex/decl.h @@ -180,7 +180,6 @@ struct mwifiex_rxinfo { }; struct mwifiex_txinfo { - u32 status_code; u8 flags; u8 bss_num; u8 bss_type; diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c index 7dddb4b5dea1..c9c58419c37b 100644 --- a/drivers/net/wireless/marvell/mwifiex/init.c +++ b/drivers/net/wireless/marvell/mwifiex/init.c @@ -282,14 +282,12 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter) sleep_cfm_buf->action = cpu_to_le16(SLEEP_CONFIRM); sleep_cfm_buf->resp_ctrl = cpu_to_le16(RESP_NEEDED); - memset(&adapter->sleep_params, 0, sizeof(adapter->sleep_params)); memset(&adapter->sleep_period, 0, sizeof(adapter->sleep_period)); adapter->tx_lock_flag = false; adapter->null_pkt_interval = 0; adapter->fw_bands = 0; adapter->config_bands = 0; adapter->adhoc_start_band = 0; - adapter->scan_channels = NULL; adapter->fw_release_number = 0; adapter->fw_cap_info = 0; memset(&adapter->upld_buf, 0, sizeof(adapter->upld_buf)); diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index b95886e1413e..7bdec6c62248 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -444,15 +444,6 @@ struct mwifiex_current_bss_params { u8 data_rates[MWIFIEX_SUPPORTED_RATES]; }; -struct mwifiex_sleep_params { - u16 sp_error; - u16 sp_offset; - u16 sp_stable_time; - u8 sp_cal_control; - u8 sp_ext_sleep_clk; - u16 sp_reserved; -}; - struct mwifiex_sleep_period { u16 period; u16 reserved; @@ -681,7 +672,6 @@ struct mwifiex_private { struct cfg80211_chan_def dfs_chandef; struct workqueue_struct *dfs_cac_workqueue; struct delayed_work dfs_cac_work; - struct timer_list dfs_chan_switch_timer; struct workqueue_struct *dfs_chan_sw_workqueue; struct delayed_work dfs_chan_sw_work; struct cfg80211_beacon_data beacon_after; @@ -888,8 +878,6 @@ struct mwifiex_adapter { struct work_struct main_work; struct workqueue_struct *rx_workqueue; struct work_struct rx_work; - struct workqueue_struct *dfs_workqueue; - struct work_struct dfs_work; bool rx_work_enabled; bool rx_processing; bool delay_main_work; @@ -953,9 +941,7 @@ struct mwifiex_adapter { u8 fw_bands; u8 adhoc_start_band; u8 config_bands; - struct mwifiex_chan_scan_param_set *scan_channels; u8 tx_lock_flag; - struct mwifiex_sleep_params sleep_params; struct mwifiex_sleep_period sleep_period; u16 ps_mode; u32 ps_state; @@ -1155,8 +1141,10 @@ int mwifiex_process_uap_event(struct mwifiex_private *); void mwifiex_delete_all_station_list(struct mwifiex_private *priv); void mwifiex_wmm_del_peer_ra_list(struct mwifiex_private *priv, const u8 *ra_addr); -void *mwifiex_process_sta_txpd(struct mwifiex_private *, struct sk_buff *skb); -void *mwifiex_process_uap_txpd(struct mwifiex_private *, struct sk_buff *skb); +void mwifiex_process_sta_txpd(struct mwifiex_private *priv, + struct sk_buff *skb); +void mwifiex_process_uap_txpd(struct mwifiex_private *priv, + struct sk_buff *skb); int mwifiex_sta_init_cmd(struct mwifiex_private *, u8 first_sta, bool init); int mwifiex_cmd_802_11_scan(struct host_cmd_ds_command *cmd, struct mwifiex_scan_cmd_config *scan_cfg); diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c index 644b1e134b01..72904c275461 100644 --- a/drivers/net/wireless/marvell/mwifiex/scan.c +++ b/drivers/net/wireless/marvell/mwifiex/scan.c @@ -612,7 +612,6 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv, struct mwifiex_adapter *adapter = priv->adapter; int ret = 0; struct mwifiex_chan_scan_param_set *tmp_chan_list; - struct mwifiex_chan_scan_param_set *start_chan; u32 tlv_idx, rates_size, cmd_no; u32 total_scan_time; u32 done_early; @@ -643,7 +642,6 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv, total_scan_time = 0; radio_type = 0; chan_tlv_out->header.len = 0; - start_chan = tmp_chan_list; done_early = false; /* @@ -750,8 +748,6 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv, rates_size = mwifiex_append_rate_tlv(priv, scan_cfg_out, radio_type); - priv->adapter->scan_channels = start_chan; - /* Send the scan command to the firmware with the specified cfg */ if (priv->adapter->ext_scan) @@ -828,7 +824,6 @@ mwifiex_config_scan(struct mwifiex_private *priv, u8 ssid_filter; struct mwifiex_ie_types_htcap *ht_cap; struct mwifiex_ie_types_bss_mode *bss_mode; - const u8 zero_mac[6] = {0, 0, 0, 0, 0, 0}; /* The tlv_buf_len is calculated for each scan command. The TLVs added in this routine will be preserved since the routine that sends the @@ -966,7 +961,7 @@ mwifiex_config_scan(struct mwifiex_private *priv, sizeof(struct mwifiex_ie_types_scan_chan_gap); } - if (!ether_addr_equal(user_scan_in->random_mac, zero_mac)) { + if (!is_zero_ether_addr(user_scan_in->random_mac)) { random_mac_tlv = (void *)tlv_pos; random_mac_tlv->header.type = cpu_to_le16(TLV_TYPE_RANDOM_MAC); diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index a24bd40dd41a..774858cfe86f 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -1083,17 +1083,17 @@ cont: "info: SDIO FUNC1 IO port: %#x\n", adapter->ioport); /* Set Host interrupt reset to read to clear */ - if (!mwifiex_read_reg(adapter, card->reg->host_int_rsr_reg, ®)) - mwifiex_write_reg(adapter, card->reg->host_int_rsr_reg, - reg | card->reg->sdio_int_mask); - else + if (mwifiex_read_reg(adapter, card->reg->host_int_rsr_reg, ®)) + return -1; + if (mwifiex_write_reg(adapter, card->reg->host_int_rsr_reg, + reg | card->reg->sdio_int_mask)) return -1; /* Dnld/Upld ready set to auto reset */ - if (!mwifiex_read_reg(adapter, card->reg->card_misc_cfg_reg, ®)) - mwifiex_write_reg(adapter, card->reg->card_misc_cfg_reg, - reg | AUTO_RE_ENABLE_INT); - else + if (mwifiex_read_reg(adapter, card->reg->card_misc_cfg_reg, ®)) + return -1; + if (mwifiex_write_reg(adapter, card->reg->card_misc_cfg_reg, + reg | AUTO_RE_ENABLE_INT)) return -1; return 0; @@ -1556,7 +1556,7 @@ done: } /* - * This function decode sdio aggreation pkt. + * This function decodes sdio aggregation pkt. * * Based on the data block size and pkt_len, * skb data will be decoded to few packets. @@ -2266,7 +2266,7 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter, ret = mwifiex_write_data_to_card(adapter, card->mpa_tx.buf, card->mpa_tx.buf_len, mport); - /* Save the last multi port tx aggreagation info to debug log */ + /* Save the last multi port tx aggregation info to debug log. */ index = adapter->dbg.last_sdio_mp_index; index = (index + 1) % MWIFIEX_DBG_SDIO_MP_NUM; adapter->dbg.last_sdio_mp_index = index; @@ -2525,7 +2525,8 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter) mwifiex_read_reg(adapter, card->reg->host_int_status_reg, &sdio_ireg); /* Get SDIO ioport */ - mwifiex_init_sdio_ioport(adapter); + if (mwifiex_init_sdio_ioport(adapter)) + return -EIO; /* Initialize SDIO variables in card */ card->mp_rd_bitmap = 0; @@ -3141,7 +3142,8 @@ static void mwifiex_sdio_up_dev(struct mwifiex_adapter *adapter) */ mwifiex_read_reg(adapter, card->reg->host_int_status_reg, &sdio_ireg); - mwifiex_init_sdio_ioport(adapter); + if (mwifiex_init_sdio_ioport(adapter)) + dev_err(&card->func->dev, "error enabling SDIO port\n"); } static struct mwifiex_if_ops sdio_ops = { diff --git a/drivers/net/wireless/marvell/mwifiex/sta_rx.c b/drivers/net/wireless/marvell/mwifiex/sta_rx.c index f2899d53a43f..65420ad67416 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_rx.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_rx.c @@ -92,6 +92,7 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv, skb->len, rx_pkt_off); priv->stats.rx_dropped++; dev_kfree_skb_any(skb); + return -1; } if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header, diff --git a/drivers/net/wireless/marvell/mwifiex/sta_tx.c b/drivers/net/wireless/marvell/mwifiex/sta_tx.c index 13c0e67ededf..70c2790b8e35 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_tx.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_tx.c @@ -29,8 +29,8 @@ * - Priority specific Tx control * - Flags */ -void *mwifiex_process_sta_txpd(struct mwifiex_private *priv, - struct sk_buff *skb) +void mwifiex_process_sta_txpd(struct mwifiex_private *priv, + struct sk_buff *skb) { struct mwifiex_adapter *adapter = priv->adapter; struct txpd *local_tx_pd; @@ -39,15 +39,6 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv, u16 pkt_type, pkt_offset; int hroom = adapter->intf_hdr_len; - if (!skb->len) { - mwifiex_dbg(adapter, ERROR, - "Tx: bad packet length: %d\n", skb->len); - tx_info->status_code = -1; - return skb->data; - } - - BUG_ON(skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN); - pkt_type = mwifiex_is_skb_mgmt_frame(skb) ? PKT_TYPE_MGMT : 0; pad = ((uintptr_t)skb->data - (sizeof(*local_tx_pd) + hroom)) & @@ -109,8 +100,6 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv, if (!local_tx_pd->tx_control) /* TxCtrl set by user or default */ local_tx_pd->tx_control = cpu_to_le32(priv->pkt_tx_ctrl); - - return skb->data; } /* diff --git a/drivers/net/wireless/marvell/mwifiex/txrx.c b/drivers/net/wireless/marvell/mwifiex/txrx.c index 54c204608dab..bd91678d26b4 100644 --- a/drivers/net/wireless/marvell/mwifiex/txrx.c +++ b/drivers/net/wireless/marvell/mwifiex/txrx.c @@ -72,13 +72,18 @@ EXPORT_SYMBOL_GPL(mwifiex_handle_rx_packet); int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb, struct mwifiex_tx_param *tx_param) { - int hroom, ret = -1; + int hroom, ret; struct mwifiex_adapter *adapter = priv->adapter; - u8 *head_ptr; struct txpd *local_tx_pd = NULL; struct mwifiex_sta_node *dest_node; struct ethhdr *hdr = (void *)skb->data; + if (unlikely(!skb->len || + skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN)) { + ret = -EINVAL; + goto out; + } + hroom = adapter->intf_hdr_len; if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) { @@ -88,33 +93,31 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb, dest_node->stats.tx_packets++; } - head_ptr = mwifiex_process_uap_txpd(priv, skb); + mwifiex_process_uap_txpd(priv, skb); } else { - head_ptr = mwifiex_process_sta_txpd(priv, skb); + mwifiex_process_sta_txpd(priv, skb); } - if ((adapter->data_sent || adapter->tx_lock_flag) && head_ptr) { + if (adapter->data_sent || adapter->tx_lock_flag) { skb_queue_tail(&adapter->tx_data_q, skb); atomic_inc(&adapter->tx_queued); return 0; } - if (head_ptr) { - if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) - local_tx_pd = (struct txpd *)(head_ptr + hroom); - if (adapter->iface_type == MWIFIEX_USB) { - ret = adapter->if_ops.host_to_card(adapter, - priv->usb_port, - skb, tx_param); - } else { - ret = adapter->if_ops.host_to_card(adapter, - MWIFIEX_TYPE_DATA, - skb, tx_param); - } + if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) + local_tx_pd = (struct txpd *)(skb->data + hroom); + if (adapter->iface_type == MWIFIEX_USB) { + ret = adapter->if_ops.host_to_card(adapter, + priv->usb_port, + skb, tx_param); + } else { + ret = adapter->if_ops.host_to_card(adapter, + MWIFIEX_TYPE_DATA, + skb, tx_param); } mwifiex_dbg_dump(adapter, DAT_D, "tx pkt:", skb->data, min_t(size_t, skb->len, DEBUG_DUMP_DATA_MAX_LEN)); - +out: switch (ret) { case -ENOSR: mwifiex_dbg(adapter, DATA, "data: -ENOSR is returned\n"); @@ -137,6 +140,11 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb, break; case -EINPROGRESS: break; + case -EINVAL: + mwifiex_dbg(adapter, ERROR, + "malformed skb (length: %u, headroom: %u)\n", + skb->len, skb_headroom(skb)); + fallthrough; case 0: mwifiex_write_data_complete(adapter, skb, 0, ret); break; diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c index 04ff051f5d18..318bd4ed8399 100644 --- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c +++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c @@ -110,6 +110,7 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv, skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset)); priv->stats.rx_dropped++; dev_kfree_skb_any(skb); + return; } if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header, @@ -252,7 +253,15 @@ int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv, if (is_multicast_ether_addr(ra)) { skb_uap = skb_copy(skb, GFP_ATOMIC); - mwifiex_uap_queue_bridged_pkt(priv, skb_uap); + if (likely(skb_uap)) { + mwifiex_uap_queue_bridged_pkt(priv, skb_uap); + } else { + mwifiex_dbg(adapter, ERROR, + "failed to copy skb for uAP\n"); + priv->stats.rx_dropped++; + dev_kfree_skb_any(skb); + return -1; + } } else { if (mwifiex_get_sta_entry(priv, ra)) { /* Requeue Intra-BSS packet */ @@ -461,8 +470,8 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv, * - Priority specific Tx control * - Flags */ -void *mwifiex_process_uap_txpd(struct mwifiex_private *priv, - struct sk_buff *skb) +void mwifiex_process_uap_txpd(struct mwifiex_private *priv, + struct sk_buff *skb) { struct mwifiex_adapter *adapter = priv->adapter; struct uap_txpd *txpd; @@ -471,15 +480,6 @@ void *mwifiex_process_uap_txpd(struct mwifiex_private *priv, u16 pkt_type, pkt_offset; int hroom = adapter->intf_hdr_len; - if (!skb->len) { - mwifiex_dbg(adapter, ERROR, - "Tx: bad packet length: %d\n", skb->len); - tx_info->status_code = -1; - return skb->data; - } - - BUG_ON(skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN); - pkt_type = mwifiex_is_skb_mgmt_frame(skb) ? PKT_TYPE_MGMT : 0; pad = ((uintptr_t)skb->data - (sizeof(*txpd) + hroom)) & @@ -527,6 +527,4 @@ void *mwifiex_process_uap_txpd(struct mwifiex_private *priv, if (!txpd->tx_control) /* TxCtrl set by user or default */ txpd->tx_control = cpu_to_le32(priv->pkt_tx_ctrl); - - return skb->data; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7603/Kconfig index 6a0080f1d91c..dd16acfd9735 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/Kconfig +++ b/drivers/net/wireless/mediatek/mt76/mt7603/Kconfig @@ -5,7 +5,7 @@ config MT7603E depends on MAC80211 depends on PCI help - This adds support for MT7603E wireless PCIe devices and the WLAN core + This adds support for MT7603E PCIe wireless devices and the WLAN core on MT7628/MT7688 SoC devices. This family supports IEEE 802.11n 2x2 to 300Mbps PHY rate diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig index 30fba36ff46b..1ab1439143f4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig +++ b/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig @@ -11,7 +11,7 @@ config MT7615E depends on MAC80211 depends on PCI help - This adds support for MT7615-based wireless PCIe devices, + This adds support for MT7615-based PCIe wireless devices, which support concurrent dual-band operation at both 5GHz and 2.4GHz, IEEE 802.11ac 4x4:4SS 1733Mbps PHY rate, wave2 MU-MIMO up to 4 users/group and 160MHz channels. diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig b/drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig index 7c88ed8b8f1e..3ed888782a70 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig @@ -10,7 +10,7 @@ config MT76x0U depends on MAC80211 depends on USB help - This adds support for MT7610U-based wireless USB 2.0 dongles, + This adds support for MT7610U-based USB 2.0 wireless dongles, which comply with IEEE 802.11ac standards and support 1x1 433Mbps PHY rate. @@ -22,7 +22,7 @@ config MT76x0E depends on MAC80211 depends on PCI help - This adds support for MT7610/MT7630-based wireless PCIe devices, + This adds support for MT7610/MT7630-based PCIe wireless devices, which comply with IEEE 802.11ac standards and support 1x1 433Mbps PHY rate. diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig b/drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig index 5fd4973e32df..482a32b70ddf 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig @@ -9,7 +9,7 @@ config MT76x2E depends on MAC80211 depends on PCI help - This adds support for MT7612/MT7602/MT7662-based wireless PCIe + This adds support for MT7612/MT7602/MT7662-based PCIe wireless devices, which comply with IEEE 802.11ac standards and support 2SS to 866Mbit/s PHY rate. @@ -22,7 +22,7 @@ config MT76x2U depends on MAC80211 depends on USB help - This adds support for MT7612U-based wireless USB 3.0 dongles, + This adds support for MT7612U-based USB 3.0 wireless dongles, which comply with IEEE 802.11ac standards and support 2SS to 866Mbit/s PHY rate. diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7915/Kconfig index 896ec38c23d9..193112c49bd1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/Kconfig +++ b/drivers/net/wireless/mediatek/mt76/mt7915/Kconfig @@ -7,7 +7,7 @@ config MT7915E depends on PCI select RELAY help - This adds support for MT7915-based wireless PCIe devices, + This adds support for MT7915-based PCIe wireless devices, which support concurrent dual-band operation at both 5GHz and 2.4GHz IEEE 802.11ax 4x4:4SS 1024-QAM, 160MHz channels, OFDMA, spatial reuse and dual carrier modulation. diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7996/Kconfig index 1afa2f662e47..bb44d4a5e2dc 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/Kconfig +++ b/drivers/net/wireless/mediatek/mt76/mt7996/Kconfig @@ -7,7 +7,7 @@ config MT7996E depends on MAC80211 depends on PCI help - This adds support for MT7996-based wireless PCIe devices, + This adds support for MT7996-based PCIe wireless devices, which support concurrent tri-band operation at 6GHz, 5GHz, and 2.4GHz IEEE 802.11be 4x4:4SS 4096-QAM, 320MHz channels. diff --git a/drivers/net/wireless/mediatek/mt7601u/Kconfig b/drivers/net/wireless/mediatek/mt7601u/Kconfig index 4a8b96280670..4880fc053d9d 100644 --- a/drivers/net/wireless/mediatek/mt7601u/Kconfig +++ b/drivers/net/wireless/mediatek/mt7601u/Kconfig @@ -4,4 +4,4 @@ config MT7601U depends on MAC80211 depends on USB help - This adds support for MT7601U-based wireless USB dongles. + This adds support for MT7601U-based USB wireless dongles. diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.h b/drivers/net/wireless/microchip/wilc1000/cfg80211.h index 37b294cb3b37..8c65951cfaf9 100644 --- a/drivers/net/wireless/microchip/wilc1000/cfg80211.h +++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.h @@ -8,15 +8,12 @@ #define WILC_CFG80211_H #include "netdev.h" -struct wiphy *wilc_cfg_alloc(void); int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type, const struct wilc_hif_func *ops); struct wilc *wilc_create_wiphy(struct device *dev); void wilc_deinit_host_int(struct net_device *net); int wilc_init_host_int(struct net_device *net); void wilc_wfi_monitor_rx(struct net_device *mon_dev, u8 *buff, u32 size); -struct wilc_vif *wilc_netdev_interface(struct wilc *wl, const char *name, - enum nl80211_iftype type); void wilc_wfi_deinit_mon_interface(struct wilc *wl, bool rtnl_locked); struct net_device *wilc_wfi_init_mon_interface(struct wilc *wl, const char *name, @@ -24,7 +21,6 @@ struct net_device *wilc_wfi_init_mon_interface(struct wilc *wl, void wilc_update_mgmt_frame_registrations(struct wiphy *wiphy, struct wireless_dev *wdev, struct mgmt_frame_regs *upd); -struct wilc_vif *wilc_get_interface(struct wilc *wl); struct wilc_vif *wilc_get_wl_to_vif(struct wilc *wl); void wlan_deinit_locks(struct wilc *wilc); #endif diff --git a/drivers/net/wireless/purelifi/plfxlc/Kconfig b/drivers/net/wireless/purelifi/plfxlc/Kconfig index 4e0be27a5e0e..dd5fca480d7e 100644 --- a/drivers/net/wireless/purelifi/plfxlc/Kconfig +++ b/drivers/net/wireless/purelifi/plfxlc/Kconfig @@ -3,7 +3,7 @@ config PLFXLC tristate "pureLiFi X, XL, XC device support" depends on CFG80211 && MAC80211 && USB help - This option adds support for pureLiFi LiFi wireless USB + This option adds support for pureLiFi LiFi USB wireless adapters. The pureLiFi X, XL, XC USB devices are based on 802.11 OFDM PHY but uses light as the transmission medium. The driver supports common 802.11 encryption/authentication diff --git a/drivers/net/wireless/ralink/rt2x00/Kconfig b/drivers/net/wireless/ralink/rt2x00/Kconfig index dcccc290a7f5..d1fd66d44a7e 100644 --- a/drivers/net/wireless/ralink/rt2x00/Kconfig +++ b/drivers/net/wireless/ralink/rt2x00/Kconfig @@ -170,7 +170,7 @@ config RT2800USB_RT35XX config RT2800USB_RT3573 bool "rt2800usb - Include support for rt3573 devices (EXPERIMENTAL)" help - This enables support for RT3573 chipset based wireless USB devices + This enables support for RT3573 chipset based USB wireless devices in the rt2800usb driver. config RT2800USB_RT53XX diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index 1226a883cd67..e65cc00fa17c 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -3865,28 +3865,51 @@ static void rt2800_config_channel_rf7620(struct rt2x00_dev *rt2x00dev, } } -static void rt2800_config_alc(struct rt2x00_dev *rt2x00dev, - struct ieee80211_channel *chan, - int power_level) { - u16 eeprom, target_power, max_power; +static void rt2800_config_alc_rt6352(struct rt2x00_dev *rt2x00dev, + struct ieee80211_channel *chan, + int power_level) +{ + int cur_channel = rt2x00dev->rf_channel; + u16 eeprom, chan_power, rate_power, target_power; + u16 tx_power[2]; + s8 *power_group[2]; u32 mac_sys_ctrl; - u32 reg; + u32 cnt, reg; u8 bbp; - /* hardware unit is 0.5dBm, limited to 23.5dBm */ - power_level *= 2; - if (power_level > 0x2f) - power_level = 0x2f; + if (WARN_ON(cur_channel < 1 || cur_channel > 14)) + return; + + /* get per chain power, 2 chains in total, unit is 0.5dBm */ + power_level = (power_level - 3) * 2; - max_power = chan->max_power * 2; - if (max_power > 0x2f) - max_power = 0x2f; + /* We can't get the accurate TX power. Based on some tests, the real + * TX power is approximately equal to channel_power + (max)rate_power. + * Usually max rate_power is the gain of the OFDM 6M rate. The antenna + * gain and externel PA gain are not included as we are unable to + * obtain these values. + */ + rate_power = rt2800_eeprom_read_from_array(rt2x00dev, + EEPROM_TXPOWER_BYRATE, 1); + rate_power &= 0x3f; + power_level -= rate_power; + if (power_level < 1) + power_level = 1; + + power_group[0] = rt2800_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1); + power_group[1] = rt2800_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2); + for (cnt = 0; cnt < 2; cnt++) { + chan_power = power_group[cnt][cur_channel - 1]; + if (chan_power >= 0x20 || chan_power == 0) + chan_power = 0x10; + tx_power[cnt] = power_level < chan_power ? power_level : chan_power; + } reg = rt2800_register_read(rt2x00dev, TX_ALC_CFG_0); - rt2x00_set_field32(®, TX_ALC_CFG_0_CH_INIT_0, power_level); - rt2x00_set_field32(®, TX_ALC_CFG_0_CH_INIT_1, power_level); - rt2x00_set_field32(®, TX_ALC_CFG_0_LIMIT_0, max_power); - rt2x00_set_field32(®, TX_ALC_CFG_0_LIMIT_1, max_power); + rt2x00_set_field32(®, TX_ALC_CFG_0_CH_INIT_0, tx_power[0]); + rt2x00_set_field32(®, TX_ALC_CFG_0_CH_INIT_1, tx_power[1]); + rt2x00_set_field32(®, TX_ALC_CFG_0_LIMIT_0, 0x2f); + rt2x00_set_field32(®, TX_ALC_CFG_0_LIMIT_1, 0x2f); eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1); if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_INTERNAL_TX_ALC)) { @@ -5268,7 +5291,7 @@ static void rt2800_config_txpower_rt6352(struct rt2x00_dev *rt2x00dev, rt2x00_set_field32(&pwreg, TX_PWR_CFG_9B_STBC_MCS7, t); rt2800_register_write(rt2x00dev, TX_PWR_CFG_9, pwreg); - rt2800_config_alc(rt2x00dev, chan, power_level); + rt2800_config_alc_rt6352(rt2x00dev, chan, power_level); /* TODO: temperature compensation code! */ } @@ -8561,7 +8584,7 @@ static void rt2800_r_calibration(struct rt2x00_dev *rt2x00dev) rt2x00_warn(rt2x00dev, "Wait MAC Tx Status to MAX !!!\n"); maccfg = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL); - maccfg &= (~0x04); + maccfg &= (~0x08); rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, maccfg); if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev, MAC_STATUS_CFG_BBP_RF_BUSY_RX))) diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c index 4fb16f5f6f83..3835b639d453 100644 --- a/drivers/net/wireless/realtek/rtlwifi/core.c +++ b/drivers/net/wireless/realtek/rtlwifi/core.c @@ -1656,7 +1656,7 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, memcpy(rtlpriv->sec.key_buf[key_idx], key->key, key->keylen); rtlpriv->sec.key_len[key_idx] = key->keylen; - memcpy(mac_addr, bcast_addr, ETH_ALEN); + eth_broadcast_addr(mac_addr); } else { /* pairwise key */ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "set pairwise key\n"); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c index 36c00b89ccae..50b79cf8fb3c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c @@ -215,31 +215,3 @@ int rtl8723_download_fw(struct ieee80211_hw *hw, } EXPORT_SYMBOL_GPL(rtl8723_download_fw); -bool rtl8723_cmd_send_packet(struct ieee80211_hw *hw, - struct sk_buff *skb) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); - struct rtl8192_tx_ring *ring; - struct rtl_tx_desc *pdesc; - struct sk_buff *pskb = NULL; - unsigned long flags; - - ring = &rtlpci->tx_ring[BEACON_QUEUE]; - - pskb = __skb_dequeue(&ring->queue); - kfree_skb(pskb); - spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags); - - pdesc = &ring->desc[0]; - rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *)pdesc, 1, 1, skb); - - __skb_queue_tail(&ring->queue, skb); - - spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); - - rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE); - - return true; -} -EXPORT_SYMBOL_GPL(rtl8723_cmd_send_packet); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.h index b527fcbbdf08..c8e04f9722ae 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.h @@ -66,7 +66,5 @@ void rtl8723_write_fw(struct ieee80211_hw *hw, u8 *buffer, u32 size, u8 max_page); int rtl8723_fw_free_to_go(struct ieee80211_hw *hw, bool is_8723be, int count); int rtl8723_download_fw(struct ieee80211_hw *hw, bool is_8723be, int count); -bool rtl8723_cmd_send_packet(struct ieee80211_hw *hw, - struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c index 44a8fff34cdd..2bfc0e822b8d 100644 --- a/drivers/net/wireless/realtek/rtw88/pci.c +++ b/drivers/net/wireless/realtek/rtw88/pci.c @@ -1828,5 +1828,5 @@ void rtw_pci_shutdown(struct pci_dev *pdev) EXPORT_SYMBOL(rtw_pci_shutdown); MODULE_AUTHOR("Realtek Corporation"); -MODULE_DESCRIPTION("Realtek 802.11ac wireless PCI driver"); +MODULE_DESCRIPTION("Realtek PCI 802.11ac wireless driver"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c index 91ed6d10ba8a..d879d7e3dc81 100644 --- a/drivers/net/wireless/realtek/rtw88/usb.c +++ b/drivers/net/wireless/realtek/rtw88/usb.c @@ -826,7 +826,7 @@ int rtw_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) ret = rtw_core_init(rtwdev); if (ret) - goto err_release_hw; + goto err_free_rx_bufs; ret = rtw_usb_intf_init(rtwdev, intf); if (ret) { @@ -872,6 +872,9 @@ err_destroy_usb: err_deinit_core: rtw_core_deinit(rtwdev); +err_free_rx_bufs: + rtw_usb_free_rx_bufs(rtwusb); + err_release_hw: ieee80211_free_hw(hw); @@ -909,5 +912,5 @@ void rtw_usb_disconnect(struct usb_interface *intf) EXPORT_SYMBOL(rtw_usb_disconnect); MODULE_AUTHOR("Realtek Corporation"); -MODULE_DESCRIPTION("Realtek 802.11ac wireless USB driver"); +MODULE_DESCRIPTION("Realtek USB 802.11ac wireless driver"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/realtek/rtw89/chan.c b/drivers/net/wireless/realtek/rtw89/chan.c index 4663db4ce2f6..e1bc3606f9ae 100644 --- a/drivers/net/wireless/realtek/rtw89/chan.c +++ b/drivers/net/wireless/realtek/rtw89/chan.c @@ -4,6 +4,8 @@ #include "chan.h" #include "debug.h" +#include "fw.h" +#include "ps.h" #include "util.h" static enum rtw89_subband rtw89_get_subband_type(enum rtw89_band band, @@ -116,6 +118,7 @@ bool rtw89_assign_entity_chan(struct rtw89_dev *rtwdev, rcd->prev_primary_channel = chan->primary_channel; rcd->prev_band_type = chan->band_type; band_changed = new->band_type != chan->band_type; + rcd->band_changed = band_changed; *chan = *new; return band_changed; @@ -193,8 +196,12 @@ void rtw89_entity_init(struct rtw89_dev *rtwdev) enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev) { struct rtw89_hal *hal = &rtwdev->hal; + const struct cfg80211_chan_def *chandef; enum rtw89_entity_mode mode; + struct rtw89_chan chan; u8 weight; + u8 last; + u8 idx; weight = bitmap_weight(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY); switch (weight) { @@ -206,14 +213,121 @@ enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev) rtw89_config_default_chandef(rtwdev); fallthrough; case 1: + last = RTW89_SUB_ENTITY_0; mode = RTW89_ENTITY_MODE_SCC; break; + case 2: + last = RTW89_SUB_ENTITY_1; + mode = rtw89_get_entity_mode(rtwdev); + if (mode == RTW89_ENTITY_MODE_MCC) + break; + + mode = RTW89_ENTITY_MODE_MCC_PREPARE; + break; + } + + for (idx = 0; idx <= last; idx++) { + chandef = rtw89_chandef_get(rtwdev, idx); + rtw89_get_channel_params(chandef, &chan); + if (chan.channel == 0) { + WARN(1, "Invalid channel on chanctx %d\n", idx); + return RTW89_ENTITY_MODE_INVALID; + } + + rtw89_assign_entity_chan(rtwdev, idx, &chan); } rtw89_set_entity_mode(rtwdev, mode); return mode; } +static void rtw89_chanctx_notify(struct rtw89_dev *rtwdev, + enum rtw89_chanctx_state state) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + const struct rtw89_chanctx_listener *listener = chip->chanctx_listener; + int i; + + if (!listener) + return; + + for (i = 0; i < NUM_OF_RTW89_CHANCTX_CALLBACKS; i++) { + if (!listener->callbacks[i]) + continue; + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "chanctx notify listener: cb %d, state %d\n", + i, state); + + listener->callbacks[i](rtwdev, state); + } +} + +static int rtw89_mcc_start(struct rtw89_dev *rtwdev) +{ + if (rtwdev->scanning) + rtw89_hw_scan_abort(rtwdev, rtwdev->scan_info.scanning_vif); + + rtw89_leave_lps(rtwdev); + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC start\n"); + rtw89_chanctx_notify(rtwdev, RTW89_CHANCTX_STATE_MCC_START); + return 0; +} + +static void rtw89_mcc_stop(struct rtw89_dev *rtwdev) +{ + rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC stop\n"); + rtw89_chanctx_notify(rtwdev, RTW89_CHANCTX_STATE_MCC_STOP); +} + +void rtw89_chanctx_work(struct work_struct *work) +{ + struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, + chanctx_work.work); + enum rtw89_entity_mode mode; + int ret; + + mutex_lock(&rtwdev->mutex); + + mode = rtw89_get_entity_mode(rtwdev); + switch (mode) { + case RTW89_ENTITY_MODE_MCC_PREPARE: + rtw89_set_entity_mode(rtwdev, RTW89_ENTITY_MODE_MCC); + rtw89_set_channel(rtwdev); + + ret = rtw89_mcc_start(rtwdev); + if (ret) + rtw89_warn(rtwdev, "failed to start MCC: %d\n", ret); + break; + default: + break; + } + + mutex_unlock(&rtwdev->mutex); +} + +void rtw89_queue_chanctx_work(struct rtw89_dev *rtwdev) +{ + enum rtw89_entity_mode mode; + u32 delay; + + mode = rtw89_get_entity_mode(rtwdev); + switch (mode) { + default: + return; + case RTW89_ENTITY_MODE_MCC_PREPARE: + delay = ieee80211_tu_to_usec(RTW89_CHANCTX_TIME_MCC_PREPARE); + break; + } + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "queue chanctx work for mode %d with delay %d us\n", + mode, delay); + ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->chanctx_work, + usecs_to_jiffies(delay)); +} + int rtw89_chanctx_ops_add(struct rtw89_dev *rtwdev, struct ieee80211_chanctx_conf *ctx) { @@ -238,6 +352,7 @@ void rtw89_chanctx_ops_remove(struct rtw89_dev *rtwdev, { struct rtw89_hal *hal = &rtwdev->hal; struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv; + enum rtw89_entity_mode mode; struct rtw89_vif *rtwvif; u8 drop, roll; @@ -267,6 +382,15 @@ void rtw89_chanctx_ops_remove(struct rtw89_dev *rtwdev, drop = roll; out: + mode = rtw89_get_entity_mode(rtwdev); + switch (mode) { + case RTW89_ENTITY_MODE_MCC: + rtw89_mcc_stop(rtwdev); + break; + default: + break; + } + clear_bit(drop, hal->entity_map); rtw89_set_channel(rtwdev); } diff --git a/drivers/net/wireless/realtek/rtw89/chan.h b/drivers/net/wireless/realtek/rtw89/chan.h index bdf369db5041..448e6c5df9f1 100644 --- a/drivers/net/wireless/realtek/rtw89/chan.h +++ b/drivers/net/wireless/realtek/rtw89/chan.h @@ -7,6 +7,9 @@ #include "core.h" +/* The dwell time in TU before doing rtw89_chanctx_work(). */ +#define RTW89_CHANCTX_TIME_MCC_PREPARE 100 + static inline bool rtw89_get_entity_state(struct rtw89_dev *rtwdev) { struct rtw89_hal *hal = &rtwdev->hal; @@ -50,6 +53,8 @@ void rtw89_config_roc_chandef(struct rtw89_dev *rtwdev, const struct cfg80211_chan_def *chandef); void rtw89_entity_init(struct rtw89_dev *rtwdev); enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev); +void rtw89_chanctx_work(struct work_struct *work); +void rtw89_queue_chanctx_work(struct rtw89_dev *rtwdev); int rtw89_chanctx_ops_add(struct rtw89_dev *rtwdev, struct ieee80211_chanctx_conf *ctx); void rtw89_chanctx_ops_remove(struct rtw89_dev *rtwdev, diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c index bda0e1e99a8c..4ba8b3df70ae 100644 --- a/drivers/net/wireless/realtek/rtw89/coex.c +++ b/drivers/net/wireless/realtek/rtw89/coex.c @@ -5666,7 +5666,8 @@ enum btc_wl_mode { void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, struct rtw89_sta *rtwsta, enum btc_role_state state) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, + rtwvif->sub_entity_idx); struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); struct ieee80211_sta *sta = rtwsta_to_sta(rtwsta); struct rtw89_btc *btc = &rtwdev->btc; diff --git a/drivers/net/wireless/realtek/rtw89/coex.h b/drivers/net/wireless/realtek/rtw89/coex.h index f16421cb30ef..e76153709793 100644 --- a/drivers/net/wireless/realtek/rtw89/coex.h +++ b/drivers/net/wireless/realtek/rtw89/coex.h @@ -193,4 +193,13 @@ static inline u8 rtw89_btc_path_phymap(struct rtw89_dev *rtwdev, return rtw89_btc_phymap(rtwdev, phy_idx, BIT(path)); } +/* return bt req len in TU */ +static inline u16 rtw89_coex_query_bt_req_len(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx) +{ + struct rtw89_btc *btc = &rtwdev->btc; + + return btc->bt_req_len; +} + #endif diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c index 62c21c09cf92..133bf289bacb 100644 --- a/drivers/net/wireless/realtek/rtw89/core.c +++ b/drivers/net/wireless/realtek/rtw89/core.c @@ -256,8 +256,8 @@ void rtw89_get_default_chandef(struct cfg80211_chan_def *chandef) NL80211_CHAN_NO_HT); } -static void rtw89_get_channel_params(const struct cfg80211_chan_def *chandef, - struct rtw89_chan *chan) +void rtw89_get_channel_params(const struct cfg80211_chan_def *chandef, + struct rtw89_chan *chan) { struct ieee80211_channel *channel = chandef->chan; enum nl80211_chan_width width = chandef->width; @@ -318,9 +318,11 @@ static void rtw89_get_channel_params(const struct cfg80211_chan_def *chandef, void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev) { + struct rtw89_hal *hal = &rtwdev->hal; const struct rtw89_chip_info *chip = rtwdev->chip; const struct rtw89_chan *chan; enum rtw89_sub_entity_idx sub_entity_idx; + enum rtw89_sub_entity_idx roc_idx; enum rtw89_phy_idx phy_idx; enum rtw89_entity_mode mode; bool entity_active; @@ -330,10 +332,23 @@ void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev) return; mode = rtw89_get_entity_mode(rtwdev); - if (WARN(mode != RTW89_ENTITY_MODE_SCC, "Invalid ent mode: %d\n", mode)) + switch (mode) { + case RTW89_ENTITY_MODE_SCC: + case RTW89_ENTITY_MODE_MCC: + sub_entity_idx = RTW89_SUB_ENTITY_0; + break; + case RTW89_ENTITY_MODE_MCC_PREPARE: + sub_entity_idx = RTW89_SUB_ENTITY_1; + break; + default: + WARN(1, "Invalid ent mode: %d\n", mode); return; + } + + roc_idx = atomic_read(&hal->roc_entity_idx); + if (roc_idx != RTW89_SUB_ENTITY_IDLE) + sub_entity_idx = roc_idx; - sub_entity_idx = RTW89_SUB_ENTITY_0; phy_idx = RTW89_PHY_0; chan = rtw89_chan_get(rtwdev, sub_entity_idx); chip->ops->set_txpwr(rtwdev, chan, phy_idx); @@ -341,43 +356,54 @@ void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev) void rtw89_set_channel(struct rtw89_dev *rtwdev) { + struct rtw89_hal *hal = &rtwdev->hal; const struct rtw89_chip_info *chip = rtwdev->chip; - const struct cfg80211_chan_def *chandef; + const struct rtw89_chan_rcd *chan_rcd; + const struct rtw89_chan *chan; enum rtw89_sub_entity_idx sub_entity_idx; + enum rtw89_sub_entity_idx roc_idx; enum rtw89_mac_idx mac_idx; enum rtw89_phy_idx phy_idx; - struct rtw89_chan chan; struct rtw89_channel_help_params bak; enum rtw89_entity_mode mode; - bool band_changed; bool entity_active; entity_active = rtw89_get_entity_state(rtwdev); mode = rtw89_entity_recalc(rtwdev); - if (WARN(mode != RTW89_ENTITY_MODE_SCC, "Invalid ent mode: %d\n", mode)) + switch (mode) { + case RTW89_ENTITY_MODE_SCC: + case RTW89_ENTITY_MODE_MCC: + sub_entity_idx = RTW89_SUB_ENTITY_0; + break; + case RTW89_ENTITY_MODE_MCC_PREPARE: + sub_entity_idx = RTW89_SUB_ENTITY_1; + break; + default: + WARN(1, "Invalid ent mode: %d\n", mode); return; + } + + roc_idx = atomic_read(&hal->roc_entity_idx); + if (roc_idx != RTW89_SUB_ENTITY_IDLE) + sub_entity_idx = roc_idx; - sub_entity_idx = RTW89_SUB_ENTITY_0; mac_idx = RTW89_MAC_0; phy_idx = RTW89_PHY_0; - chandef = rtw89_chandef_get(rtwdev, sub_entity_idx); - rtw89_get_channel_params(chandef, &chan); - if (WARN(chan.channel == 0, "Invalid channel\n")) - return; - band_changed = rtw89_assign_entity_chan(rtwdev, sub_entity_idx, &chan); + chan = rtw89_chan_get(rtwdev, sub_entity_idx); + chan_rcd = rtw89_chan_rcd_get(rtwdev, sub_entity_idx); - rtw89_chip_set_channel_prepare(rtwdev, &bak, &chan, mac_idx, phy_idx); + rtw89_chip_set_channel_prepare(rtwdev, &bak, chan, mac_idx, phy_idx); - chip->ops->set_channel(rtwdev, &chan, mac_idx, phy_idx); + chip->ops->set_channel(rtwdev, chan, mac_idx, phy_idx); - chip->ops->set_txpwr(rtwdev, &chan, phy_idx); + chip->ops->set_txpwr(rtwdev, chan, phy_idx); - rtw89_chip_set_channel_done(rtwdev, &bak, &chan, mac_idx, phy_idx); + rtw89_chip_set_channel_done(rtwdev, &bak, chan, mac_idx, phy_idx); - if (!entity_active || band_changed) { - rtw89_btc_ntfy_switch_band(rtwdev, phy_idx, chan.band_type); + if (!entity_active || chan_rcd->band_changed) { + rtw89_btc_ntfy_switch_band(rtwdev, phy_idx, chan->band_type); rtw89_chip_rfk_band_changed(rtwdev, phy_idx); } @@ -523,12 +549,12 @@ rtw89_core_tx_update_sec_key(struct rtw89_dev *rtwdev, } static u16 rtw89_core_get_mgmt_rate(struct rtw89_dev *rtwdev, - struct rtw89_core_tx_request *tx_req) + struct rtw89_core_tx_request *tx_req, + const struct rtw89_chan *chan) { struct sk_buff *skb = tx_req->skb; struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); struct ieee80211_vif *vif = tx_info->control.vif; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); u16 lowest_rate; if (tx_info->flags & IEEE80211_TX_CTL_NO_CCK_RATE || @@ -567,7 +593,8 @@ rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif = tx_req->vif; struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, + rtwvif->sub_entity_idx); u8 qsel, ch_dma; qsel = desc_info->hiq ? RTW89_TX_QSEL_B0_HI : RTW89_TX_QSEL_B0_MGMT; @@ -584,7 +611,7 @@ rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev, desc_info->en_wd_info = true; desc_info->use_rate = true; desc_info->dis_data_fb = true; - desc_info->data_rate = rtw89_core_get_mgmt_rate(rtwdev, tx_req); + desc_info->data_rate = rtw89_core_get_mgmt_rate(rtwdev, tx_req, chan); rtw89_debug(rtwdev, RTW89_DBG_TXRX, "tx mgmt frame with rate 0x%x on channel %d (band %d, bw %d)\n", @@ -603,7 +630,8 @@ rtw89_core_tx_update_h2c_info(struct rtw89_dev *rtwdev, desc_info->ch_dma = RTW89_DMA_H2C; } -static void rtw89_core_get_no_ul_ofdma_htc(struct rtw89_dev *rtwdev, __le32 *htc) +static void rtw89_core_get_no_ul_ofdma_htc(struct rtw89_dev *rtwdev, __le32 *htc, + const struct rtw89_chan *chan) { static const u8 rtw89_bandwidth_to_om[] = { [RTW89_CHANNEL_WIDTH_20] = HTC_OM_CHANNEL_WIDTH_20, @@ -614,7 +642,6 @@ static void rtw89_core_get_no_ul_ofdma_htc(struct rtw89_dev *rtwdev, __le32 *htc }; const struct rtw89_chip_info *chip = rtwdev->chip; struct rtw89_hal *hal = &rtwdev->hal; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); u8 om_bandwidth; if (!chip->dis_2g_40m_ul_ofdma || @@ -1659,8 +1686,7 @@ static void rtw89_correct_cck_chan(struct rtw89_dev *rtwdev, const struct rtw89_chan_rcd *rcd = rtw89_chan_rcd_get(rtwdev, RTW89_SUB_ENTITY_0); u16 chan = rcd->prev_primary_channel; - u8 band = rcd->prev_band_type == RTW89_BAND_2G ? - NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; + u8 band = rtw89_hw_to_nl80211_band(rcd->prev_band_type); if (status->band != NL80211_BAND_2GHZ && status->encoding == RX_ENC_LEGACY && @@ -1900,7 +1926,6 @@ static void rtw89_core_update_rx_status(struct rtw89_dev *rtwdev, { const struct cfg80211_chan_def *chandef = rtw89_chandef_get(rtwdev, RTW89_SUB_ENTITY_0); - const struct rtw89_chan *cur = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); u16 data_rate; u8 data_rate_mode; @@ -1910,6 +1935,7 @@ static void rtw89_core_update_rx_status(struct rtw89_dev *rtwdev, if (rtwdev->scanning && RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw)) { + const struct rtw89_chan *cur = rtw89_scan_chan_get(rtwdev); u8 chan = cur->primary_channel; u8 band = cur->band_type; enum nl80211_band nl_band; @@ -2451,6 +2477,7 @@ out: void rtw89_roc_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; struct ieee80211_hw *hw = rtwdev->hw; struct rtw89_roc *roc = &rtwvif->roc; struct cfg80211_chan_def roc_chan; @@ -2478,7 +2505,7 @@ void rtw89_roc_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) rtw89_config_roc_chandef(rtwdev, rtwvif->sub_entity_idx, &roc_chan); rtw89_set_channel(rtwdev); rtw89_write32_clr(rtwdev, - rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0), + rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_0), B_AX_A_UC_CAM_MATCH | B_AX_A_BC_CAM_MATCH); ieee80211_ready_on_channel(hw); @@ -2486,6 +2513,7 @@ void rtw89_roc_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; struct ieee80211_hw *hw = rtwdev->hw; struct rtw89_roc *roc = &rtwvif->roc; struct rtw89_vif *tmp; @@ -2499,7 +2527,7 @@ void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) rtw89_leave_lps(rtwdev); rtw89_write32_mask(rtwdev, - rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0), + rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_0), B_AX_RX_FLTR_CFG_MASK, rtwdev->hal.rx_fltr); @@ -2682,6 +2710,7 @@ static void rtw89_track_work(struct work_struct *work) rtw89_phy_tx_path_div_track(rtwdev); rtw89_phy_antdiv_track(rtwdev); rtw89_phy_ul_tb_ctrl_track(rtwdev); + rtw89_tas_track(rtwdev); if (rtwdev->lps_enabled && !rtwdev->btc.lps) rtw89_enter_lps_track(rtwdev); @@ -2970,6 +2999,8 @@ int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; struct rtw89_bssid_cam_entry *bssid_cam = rtw89_get_bssid_cam_of(rtwvif, rtwsta); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, + rtwvif->sub_entity_idx); int ret; if (vif->type == NL80211_IFTYPE_AP || sta->tdls) { @@ -3023,7 +3054,7 @@ int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev, rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta, BTC_ROLE_MSTS_STA_CONN_END); - rtw89_core_get_no_ul_ofdma_htc(rtwdev, &rtwsta->htc_template); + rtw89_core_get_no_ul_ofdma_htc(rtwdev, &rtwsta->htc_template, chan); rtw89_phy_ul_tb_assoc(rtwdev, rtwvif); ret = rtw89_fw_h2c_general_pkt(rtwdev, rtwvif, rtwsta->mac_id); @@ -3463,6 +3494,27 @@ void rtw89_complete_cond(struct rtw89_wait_info *wait, unsigned int cond, complete(&wait->completion); } +void rtw89_core_ntfy_btc_event(struct rtw89_dev *rtwdev, enum rtw89_btc_hmsg event) +{ + u16 bt_req_len; + + switch (event) { + case RTW89_BTC_HMSG_SET_BT_REQ_SLOT: + bt_req_len = rtw89_coex_query_bt_req_len(rtwdev, RTW89_PHY_0); + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "coex updates BT req len to %d TU\n", bt_req_len); + break; + default: + if (event < NUM_OF_RTW89_BTC_HMSG) + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "unhandled BTC HMSG event: %d\n", event); + else + rtw89_warn(rtwdev, + "unrecognized BTC HMSG event: %d\n", event); + break; + } +} + int rtw89_core_start(struct rtw89_dev *rtwdev) { int ret; @@ -3496,6 +3548,8 @@ int rtw89_core_start(struct rtw89_dev *rtwdev) rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, true); rtw89_mac_update_rts_threshold(rtwdev, RTW89_MAC_0); + rtw89_tas_reset(rtwdev); + ret = rtw89_hci_start(rtwdev); if (ret) { rtw89_err(rtwdev, "failed to start hci\n"); @@ -3536,6 +3590,7 @@ void rtw89_core_stop(struct rtw89_dev *rtwdev) cancel_work_sync(&btc->icmp_notify_work); cancel_delayed_work_sync(&rtwdev->txq_reinvoke_work); cancel_delayed_work_sync(&rtwdev->track_work); + cancel_delayed_work_sync(&rtwdev->chanctx_work); cancel_delayed_work_sync(&rtwdev->coex_act1_work); cancel_delayed_work_sync(&rtwdev->coex_bt_devinfo_work); cancel_delayed_work_sync(&rtwdev->coex_rfk_chk_work); @@ -3572,6 +3627,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev) INIT_WORK(&rtwdev->txq_work, rtw89_core_txq_work); INIT_DELAYED_WORK(&rtwdev->txq_reinvoke_work, rtw89_core_txq_reinvoke_work); INIT_DELAYED_WORK(&rtwdev->track_work, rtw89_track_work); + INIT_DELAYED_WORK(&rtwdev->chanctx_work, rtw89_chanctx_work); INIT_DELAYED_WORK(&rtwdev->coex_act1_work, rtw89_coex_act1_work); INIT_DELAYED_WORK(&rtwdev->coex_bt_devinfo_work, rtw89_coex_bt_devinfo_work); INIT_DELAYED_WORK(&rtwdev->coex_rfk_chk_work, rtw89_coex_rfk_chk_work); @@ -3612,6 +3668,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev) rtw89_ser_init(rtwdev); rtw89_entity_init(rtwdev); + rtw89_tas_init(rtwdev); return 0; } @@ -3632,7 +3689,8 @@ EXPORT_SYMBOL(rtw89_core_deinit); void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, const u8 *mac_addr, bool hw_scan) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, + rtwvif->sub_entity_idx); rtwdev->scanning = true; rtw89_leave_lps(rtwdev); diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index fa4bbc4095ab..04ce221730f9 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -14,6 +14,8 @@ struct rtw89_dev; struct rtw89_pci_info; +struct rtw89_mac_gen_def; +struct rtw89_phy_gen_def; extern const struct ieee80211_ops rtw89_ops; @@ -789,6 +791,7 @@ enum rtw89_phy_idx { enum rtw89_sub_entity_idx { RTW89_SUB_ENTITY_0 = 0, + RTW89_SUB_ENTITY_1 = 1, NUM_OF_RTW89_SUB_ENTITY, RTW89_SUB_ENTITY_IDLE = NUM_OF_RTW89_SUB_ENTITY, @@ -900,6 +903,7 @@ struct rtw89_chan { struct rtw89_chan_rcd { u8 prev_primary_channel; enum rtw89_band prev_band_type; + bool band_changed; }; struct rtw89_channel_help_params { @@ -2656,6 +2660,17 @@ struct rtw89_btc { bool lps; }; +enum rtw89_btc_hmsg { + RTW89_BTC_HMSG_TMR_EN = 0x0, + RTW89_BTC_HMSG_BT_REG_READBACK = 0x1, + RTW89_BTC_HMSG_SET_BT_REQ_SLOT = 0x2, + RTW89_BTC_HMSG_FW_EV = 0x3, + RTW89_BTC_HMSG_BT_LINK_CHG = 0x4, + RTW89_BTC_HMSG_SET_BT_REQ_STBC = 0x5, + + NUM_OF_RTW89_BTC_HMSG, +}; + enum rtw89_ra_mode { RTW89_RA_MODE_CCK = BIT(0), RTW89_RA_MODE_OFDM = BIT(1), @@ -2885,6 +2900,32 @@ struct rtw89_roc { #define RTW89_P2P_MAX_NOA_NUM 2 +struct rtw89_p2p_ie_head { + u8 eid; + u8 ie_len; + u8 oui[3]; + u8 oui_type; +} __packed; + +struct rtw89_noa_attr_head { + u8 attr_type; + __le16 attr_len; + u8 index; + u8 oppps_ctwindow; +} __packed; + +struct rtw89_p2p_noa_ie { + struct rtw89_p2p_ie_head p2p_head; + struct rtw89_noa_attr_head noa_head; + struct ieee80211_p2p_noa_desc noa_desc[RTW89_P2P_MAX_NOA_NUM]; +} __packed; + +struct rtw89_p2p_noa_setter { + struct rtw89_p2p_noa_ie ie; + u8 noa_count; + u8 noa_index; +}; + struct rtw89_vif { struct list_head list; struct rtw89_dev *rtwdev; @@ -2927,6 +2968,7 @@ struct rtw89_vif { struct cfg80211_scan_request *scan_req; struct ieee80211_scan_ies *scan_ies; struct list_head general_pkt_list; + struct rtw89_p2p_noa_setter p2p_noa; }; enum rtw89_lv1_rcvy_step { @@ -3339,6 +3381,10 @@ struct rtw89_dig_regs { u32 seg0_pd_reg; u32 pd_lower_bound_mask; u32 pd_spatial_reuse_en; + u32 bmode_pd_reg; + u32 bmode_cca_rssi_limit_en; + u32 bmode_pd_lower_bound_reg; + u32 bmode_rssi_nocca_low_th_mask; struct rtw89_reg_def p0_lna_init; struct rtw89_reg_def p1_lna_init; struct rtw89_reg_def p0_tia_init; @@ -3375,10 +3421,28 @@ struct rtw89_antdiv_info { bool get_stats; }; +enum rtw89_chanctx_state { + RTW89_CHANCTX_STATE_MCC_START, + RTW89_CHANCTX_STATE_MCC_STOP, +}; + +enum rtw89_chanctx_callbacks { + RTW89_CHANCTX_CALLBACK_PLACEHOLDER, + + NUM_OF_RTW89_CHANCTX_CALLBACKS, +}; + +struct rtw89_chanctx_listener { + void (*callbacks[NUM_OF_RTW89_CHANCTX_CALLBACKS]) + (struct rtw89_dev *rtwdev, enum rtw89_chanctx_state state); +}; + struct rtw89_chip_info { enum rtw89_core_chip_id chip_id; enum rtw89_chip_gen chip_gen; const struct rtw89_chip_ops *ops; + const struct rtw89_mac_gen_def *mac_def; + const struct rtw89_phy_gen_def *phy_def; const char *fw_basename; u8 fw_format_max; bool try_ce_fw; @@ -3434,6 +3498,7 @@ struct rtw89_chip_info { /* NULL if no rfe-specific, or a null-terminated array by rfe_parms */ const struct rtw89_rfe_parms_conf *rfe_parms_conf; const struct rtw89_rfe_parms *dflt_parms; + const struct rtw89_chanctx_listener *chanctx_listener; u8 txpwr_factor_rf; u8 txpwr_factor_mac; @@ -3690,12 +3755,34 @@ struct rtw89_sar_info { }; }; +enum rtw89_tas_state { + RTW89_TAS_STATE_DPR_OFF, + RTW89_TAS_STATE_DPR_ON, + RTW89_TAS_STATE_DPR_FORBID, +}; + +#define RTW89_TAS_MAX_WINDOW 50 +struct rtw89_tas_info { + s16 txpwr_history[RTW89_TAS_MAX_WINDOW]; + s32 total_txpwr; + u8 cur_idx; + s8 dpr_gap; + s8 delta; + enum rtw89_tas_state state; + bool enable; +}; + struct rtw89_chanctx_cfg { enum rtw89_sub_entity_idx idx; }; enum rtw89_entity_mode { RTW89_ENTITY_MODE_SCC, + RTW89_ENTITY_MODE_MCC_PREPARE, + RTW89_ENTITY_MODE_MCC, + + NUM_OF_RTW89_ENTITY_MODE, + RTW89_ENTITY_MODE_INVALID = NUM_OF_RTW89_ENTITY_MODE, }; struct rtw89_sub_entity { @@ -4352,6 +4439,7 @@ struct rtw89_dev { struct rtw89_antdiv_info antdiv; struct delayed_work track_work; + struct delayed_work chanctx_work; struct delayed_work coex_act1_work; struct delayed_work coex_bt_devinfo_work; struct delayed_work coex_rfk_chk_work; @@ -4365,6 +4453,7 @@ struct rtw89_dev { struct rtw89_regulatory_info regulatory; struct rtw89_sar_info sar; + struct rtw89_tas_info tas; struct rtw89_btc btc; enum rtw89_ps_mode ps_mode; @@ -4900,6 +4989,18 @@ const struct rtw89_chan_rcd *rtw89_chan_rcd_get(struct rtw89_dev *rtwdev, return &hal->sub[idx].rcd; } +static inline +const struct rtw89_chan *rtw89_scan_chan_get(struct rtw89_dev *rtwdev) +{ + struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif; + struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif); + + if (rtwvif) + return rtw89_chan_get(rtwdev, rtwvif->sub_entity_idx); + else + return rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); +} + static inline void rtw89_chip_fem_setup(struct rtw89_dev *rtwdev) { const struct rtw89_chip_info *chip = rtwdev->chip; @@ -5273,6 +5374,8 @@ struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device, void rtw89_free_ieee80211_hw(struct rtw89_dev *rtwdev); void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev); void rtw89_get_default_chandef(struct cfg80211_chan_def *chandef); +void rtw89_get_channel_params(const struct cfg80211_chan_def *chandef, + struct rtw89_chan *chan); void rtw89_set_channel(struct rtw89_dev *rtwdev); void rtw89_get_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, struct rtw89_chan *chan); @@ -5307,5 +5410,6 @@ void rtw89_core_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, bool hw_scan); void rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, bool active); +void rtw89_core_ntfy_btc_event(struct rtw89_dev *rtwdev, enum rtw89_btc_hmsg event); #endif diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c index f1e5ac4186c2..d162e64f6064 100644 --- a/drivers/net/wireless/realtek/rtw89/debug.c +++ b/drivers/net/wireless/realtek/rtw89/debug.c @@ -572,9 +572,9 @@ static int __print_txpwr_map(struct seq_file *m, struct rtw89_dev *rtwdev, seq_puts(m, #_regd "\n"); \ break -static void __print_regd(struct seq_file *m, struct rtw89_dev *rtwdev) +static void __print_regd(struct seq_file *m, struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); u8 band = chan->band_type; u8 regd = rtw89_regd_get(rtwdev, band); @@ -604,16 +604,21 @@ static int rtw89_debug_priv_txpwr_table_get(struct seq_file *m, void *v) { struct rtw89_debugfs_priv *debugfs_priv = m->private; struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; + const struct rtw89_chan *chan; int ret = 0; mutex_lock(&rtwdev->mutex); rtw89_leave_ps_mode(rtwdev); + chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); seq_puts(m, "[Regulatory] "); - __print_regd(m, rtwdev); + __print_regd(m, rtwdev, chan); seq_puts(m, "[SAR]\n"); - rtw89_print_sar(m, rtwdev); + rtw89_print_sar(m, rtwdev, chan->freq); + + seq_puts(m, "[TAS]\n"); + rtw89_print_tas(m, rtwdev); seq_puts(m, "\n[TX power byrate]\n"); ret = __print_txpwr_map(m, rtwdev, &__txpwr_map_byr); @@ -790,6 +795,9 @@ static void rtw89_debug_dump_mac_mem(struct seq_file *m, struct rtw89_dev *rtwdev, u8 sel, u32 start_addr, u32 len) { + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + u32 filter_model_addr = mac->filter_model_addr; + u32 indir_access_addr = mac->indir_access_addr; u32 base_addr, start_page, residue; u32 i, j, p, pages; u32 dump_len, remain; @@ -799,17 +807,17 @@ static void rtw89_debug_dump_mac_mem(struct seq_file *m, pages = len / MAC_MEM_DUMP_PAGE_SIZE + 1; start_page = start_addr / MAC_MEM_DUMP_PAGE_SIZE; residue = start_addr % MAC_MEM_DUMP_PAGE_SIZE; - base_addr = rtw89_mac_mem_base_addrs[sel]; + base_addr = mac->mem_base_addrs[sel]; base_addr += start_page * MAC_MEM_DUMP_PAGE_SIZE; for (p = 0; p < pages; p++) { dump_len = min_t(u32, remain, MAC_MEM_DUMP_PAGE_SIZE); - rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, base_addr); - for (i = R_AX_INDIR_ACCESS_ENTRY + residue; - i < R_AX_INDIR_ACCESS_ENTRY + dump_len;) { + rtw89_write32(rtwdev, filter_model_addr, base_addr); + for (i = indir_access_addr + residue; + i < indir_access_addr + dump_len;) { seq_printf(m, "%08xh:", i); for (j = 0; - j < 4 && i < R_AX_INDIR_ACCESS_ENTRY + dump_len; + j < 4 && i < indir_access_addr + dump_len; j++, i += 4) { val = rtw89_read32(rtwdev, i); seq_printf(m, " %08x", val); diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c index 2811a94b5f69..df1dc2f43c86 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.c +++ b/drivers/net/wireless/realtek/rtw89/fw.c @@ -9,6 +9,7 @@ #include "fw.h" #include "mac.h" #include "phy.h" +#include "ps.h" #include "reg.h" #include "util.h" @@ -1165,7 +1166,7 @@ void rtw89_fw_log_dump(struct rtw89_dev *rtwdev, u8 *buf, u32 len) return; plain_log: - rtw89_info(rtwdev, "C2H log: %*s", len, buf); + rtw89_info(rtwdev, "C2H log: %.*s", len, buf); } @@ -1758,7 +1759,8 @@ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev, const struct rtw89_chip_info *chip = rtwdev->chip; struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta); struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, + rtwvif->sub_entity_idx); struct sk_buff *skb; u8 pads[RTW89_PPE_BW_NUM]; u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; @@ -1915,12 +1917,15 @@ int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, + rtwvif->sub_entity_idx); struct sk_buff *skb; struct sk_buff *skb_beacon; u16 tim_offset; int bcn_total_len; u16 beacon_rate; + void *noa_data; + u8 noa_len; int ret; if (vif->p2p) @@ -1937,6 +1942,13 @@ int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev, return -ENOMEM; } + noa_len = rtw89_p2p_noa_fetch(rtwvif, &noa_data); + if (noa_len && + (noa_len <= skb_tailroom(skb_beacon) || + pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) { + skb_put_data(skb_beacon, noa_data, noa_len); + } + bcn_total_len = H2C_BCN_BASE_LEN + skb_beacon->len; skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len); if (!skb) { @@ -3851,6 +3863,7 @@ void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, struct ieee80211_scan_request *scan_req) { struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; struct cfg80211_scan_request *req = &scan_req->req; u32 rx_fltr = rtwdev->hal.rx_fltr; u8 mac_addr[ETH_ALEN]; @@ -3873,7 +3886,7 @@ void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, rx_fltr &= ~B_AX_A_BC; rx_fltr &= ~B_AX_A_A1_MATCH; rtw89_write32_mask(rtwdev, - rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0), + rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_0), B_AX_RX_FLTR_CFG_MASK, rx_fltr); } @@ -3881,6 +3894,7 @@ void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, bool aborted) { + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; struct cfg80211_scan_info info = { .aborted = aborted, @@ -3891,7 +3905,7 @@ void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, return; rtw89_write32_mask(rtwdev, - rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0), + rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_0), B_AX_RX_FLTR_CFG_MASK, rtwdev->hal.rx_fltr); diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index cebefa3b1db3..fab9f5004a75 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -12,7 +12,7 @@ #include "reg.h" #include "util.h" -const u32 rtw89_mac_mem_base_addrs[RTW89_MAC_MEM_NUM] = { +static const u32 rtw89_mac_mem_base_addrs_ax[RTW89_MAC_MEM_NUM] = { [RTW89_MAC_MEM_AXIDMA] = AXIDMA_BASE_ADDR, [RTW89_MAC_MEM_SHARED_BUF] = SHARED_BUF_BASE_ADDR, [RTW89_MAC_MEM_DMAC_TBL] = DMAC_TBL_BASE_ADDR, @@ -39,19 +39,21 @@ const u32 rtw89_mac_mem_base_addrs[RTW89_MAC_MEM_NUM] = { static void rtw89_mac_mem_write(struct rtw89_dev *rtwdev, u32 offset, u32 val, enum rtw89_mac_mem_sel sel) { - u32 addr = rtw89_mac_mem_base_addrs[sel] + offset; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + u32 addr = mac->mem_base_addrs[sel] + offset; - rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, addr); - rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY, val); + rtw89_write32(rtwdev, mac->filter_model_addr, addr); + rtw89_write32(rtwdev, mac->indir_access_addr, val); } static u32 rtw89_mac_mem_read(struct rtw89_dev *rtwdev, u32 offset, enum rtw89_mac_mem_sel sel) { - u32 addr = rtw89_mac_mem_base_addrs[sel] + offset; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + u32 addr = mac->mem_base_addrs[sel] + offset; - rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, addr); - return rtw89_read32(rtwdev, R_AX_INDIR_ACCESS_ENTRY); + rtw89_write32(rtwdev, mac->filter_model_addr, addr); + return rtw89_read32(rtwdev, mac->indir_access_addr); } int rtw89_mac_check_mac_en(struct rtw89_dev *rtwdev, u8 mac_idx, @@ -2082,7 +2084,7 @@ static int addr_cam_init(struct rtw89_dev *rtwdev, u8 mac_idx) if (ret) return ret; - reg = rtw89_mac_reg_by_idx(R_AX_ADDR_CAM_CTRL, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_ADDR_CAM_CTRL, mac_idx); val = rtw89_read32(rtwdev, reg); val |= u32_encode_bits(0x7f, B_AX_ADDR_CAM_RANGE_MASK) | @@ -2109,7 +2111,7 @@ static int scheduler_init(struct rtw89_dev *rtwdev, u8 mac_idx) if (ret) return ret; - reg = rtw89_mac_reg_by_idx(R_AX_PREBKF_CFG_1, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PREBKF_CFG_1, mac_idx); if (rtwdev->chip->chip_id == RTL8852C) rtw89_write32_mask(rtwdev, reg, B_AX_SIFS_MACTXEN_T1_MASK, SIFS_MACTXEN_T1_V1); @@ -2118,14 +2120,14 @@ static int scheduler_init(struct rtw89_dev *rtwdev, u8 mac_idx) SIFS_MACTXEN_T1); if (rtwdev->chip->chip_id == RTL8852B || rtwdev->chip->chip_id == RTL8851B) { - reg = rtw89_mac_reg_by_idx(R_AX_SCH_EXT_CTRL, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_SCH_EXT_CTRL, mac_idx); rtw89_write32_set(rtwdev, reg, B_AX_PORT_RST_TSF_ADV); } - reg = rtw89_mac_reg_by_idx(R_AX_CCA_CFG_0, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_CCA_CFG_0, mac_idx); rtw89_write32_clr(rtwdev, reg, B_AX_BTCCA_EN); - reg = rtw89_mac_reg_by_idx(R_AX_PREBKF_CFG_0, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PREBKF_CFG_0, mac_idx); if (rtwdev->chip->chip_id == RTL8852C) { val = rtw89_read32_mask(rtwdev, R_AX_SEC_ENG_CTRL, B_AX_TX_PARTIAL_MODE); @@ -2165,13 +2167,13 @@ int rtw89_mac_typ_fltr_opt(struct rtw89_dev *rtwdev, switch (type) { case RTW89_MGNT: - reg = rtw89_mac_reg_by_idx(R_AX_MGNT_FLTR, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_MGNT_FLTR, mac_idx); break; case RTW89_CTRL: - reg = rtw89_mac_reg_by_idx(R_AX_CTRL_FLTR, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_CTRL_FLTR, mac_idx); break; case RTW89_DATA: - reg = rtw89_mac_reg_by_idx(R_AX_DATA_FLTR, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_DATA_FLTR, mac_idx); break; default: rtw89_err(rtwdev, "[ERR]set rx filter type err\n"); @@ -2202,9 +2204,9 @@ static int rx_fltr_init(struct rtw89_dev *rtwdev, u8 mac_idx) B_AX_LSIG_PARITY_CHK_EN | B_AX_SIGA_CRC_CHK | B_AX_VHT_SU_SIGB_CRC_CHK | B_AX_VHT_MU_SIGB_CRC_CHK | B_AX_HE_SIGB_CRC_CHK; - rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, mac_idx), + rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(rtwdev, R_AX_RX_FLTR_OPT, mac_idx), mac_ftlr); - rtw89_write16(rtwdev, rtw89_mac_reg_by_idx(R_AX_PLCP_HDR_FLTR, mac_idx), + rtw89_write16(rtwdev, rtw89_mac_reg_by_idx(rtwdev, R_AX_PLCP_HDR_FLTR, mac_idx), plcp_ftlr); return 0; @@ -2224,20 +2226,20 @@ static void _patch_dis_resp_chk(struct rtw89_dev *rtwdev, u8 mac_idx) switch (rtwdev->chip->chip_id) { case RTL8852A: case RTL8852B: - reg = rtw89_mac_reg_by_idx(R_AX_RSP_CHK_SIG, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RSP_CHK_SIG, mac_idx); val32 = rtw89_read32(rtwdev, reg) & ~b_rsp_chk_nav; rtw89_write32(rtwdev, reg, val32); - reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_0, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_0, mac_idx); val32 = rtw89_read32(rtwdev, reg) & ~b_rsp_chk_cca; rtw89_write32(rtwdev, reg, val32); break; default: - reg = rtw89_mac_reg_by_idx(R_AX_RSP_CHK_SIG, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RSP_CHK_SIG, mac_idx); val32 = rtw89_read32(rtwdev, reg) | b_rsp_chk_nav; rtw89_write32(rtwdev, reg, val32); - reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_0, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_0, mac_idx); val32 = rtw89_read32(rtwdev, reg) | b_rsp_chk_cca; rtw89_write32(rtwdev, reg, val32); break; @@ -2253,7 +2255,7 @@ static int cca_ctrl_init(struct rtw89_dev *rtwdev, u8 mac_idx) if (ret) return ret; - reg = rtw89_mac_reg_by_idx(R_AX_CCA_CONTROL, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_CCA_CONTROL, mac_idx); val = rtw89_read32(rtwdev, reg); val |= (B_AX_TB_CHK_BASIC_NAV | B_AX_TB_CHK_BTCCA | B_AX_TB_CHK_EDCCA | B_AX_TB_CHK_CCA_P20 | @@ -2294,7 +2296,7 @@ static int spatial_reuse_init(struct rtw89_dev *rtwdev, u8 mac_idx) ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); if (ret) return ret; - reg = rtw89_mac_reg_by_idx(R_AX_RX_SR_CTRL, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RX_SR_CTRL, mac_idx); rtw89_write8_clr(rtwdev, reg, B_AX_SR_EN); return 0; @@ -2309,13 +2311,13 @@ static int tmac_init(struct rtw89_dev *rtwdev, u8 mac_idx) if (ret) return ret; - reg = rtw89_mac_reg_by_idx(R_AX_MAC_LOOPBACK, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_MAC_LOOPBACK, mac_idx); rtw89_write32_clr(rtwdev, reg, B_AX_MACLBK_EN); - reg = rtw89_mac_reg_by_idx(R_AX_TCR0, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TCR0, mac_idx); rtw89_write32_mask(rtwdev, reg, B_AX_TCR_UDF_THSD_MASK, TCR_UDF_THSD); - reg = rtw89_mac_reg_by_idx(R_AX_TXD_FIFO_CTRL, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TXD_FIFO_CTRL, mac_idx); rtw89_write32_mask(rtwdev, reg, B_AX_TXDFIFO_HIGH_MCS_THRE_MASK, TXDFIFO_HIGH_MCS_THRE); rtw89_write32_mask(rtwdev, reg, B_AX_TXDFIFO_LOW_MCS_THRE_MASK, TXDFIFO_LOW_MCS_THRE); @@ -2333,7 +2335,7 @@ static int trxptcl_init(struct rtw89_dev *rtwdev, u8 mac_idx) if (ret) return ret; - reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_0, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_0, mac_idx); val = rtw89_read32(rtwdev, reg); val &= ~B_AX_WMAC_SPEC_SIFS_CCK_MASK; val |= FIELD_PREP(B_AX_WMAC_SPEC_SIFS_CCK_MASK, WMAC_SPEC_SIFS_CCK); @@ -2353,12 +2355,12 @@ static int trxptcl_init(struct rtw89_dev *rtwdev, u8 mac_idx) val |= FIELD_PREP(B_AX_WMAC_SPEC_SIFS_OFDM_MASK, sifs); rtw89_write32(rtwdev, reg, val); - reg = rtw89_mac_reg_by_idx(R_AX_RXTRIG_TEST_USER_2, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RXTRIG_TEST_USER_2, mac_idx); rtw89_write32_set(rtwdev, reg, B_AX_RXTRIG_FCSCHK_EN); - reg = rtw89_mac_reg_by_idx(rrsr->ref_rate.addr, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, rrsr->ref_rate.addr, mac_idx); rtw89_write32_mask(rtwdev, reg, rrsr->ref_rate.mask, rrsr->ref_rate.data); - reg = rtw89_mac_reg_by_idx(rrsr->rsc.addr, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, rrsr->rsc.addr, mac_idx); rtw89_write32_mask(rtwdev, reg, rrsr->rsc.mask, rrsr->rsc.data); return 0; @@ -2397,10 +2399,10 @@ static int rmac_init(struct rtw89_dev *rtwdev, u8 mac_idx) if (mac_idx == RTW89_MAC_0) rst_bacam(rtwdev); - reg = rtw89_mac_reg_by_idx(R_AX_RESPBA_CAM_CTRL, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RESPBA_CAM_CTRL, mac_idx); rtw89_write8_set(rtwdev, reg, B_AX_SSN_SEL); - reg = rtw89_mac_reg_by_idx(R_AX_DLK_PROTECT_CTL, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_DLK_PROTECT_CTL, mac_idx); val = rtw89_read16(rtwdev, reg); val = u16_replace_bits(val, TRXCFG_RMAC_DATA_TO, B_AX_RX_DLK_DATA_TIME_MASK); @@ -2408,10 +2410,10 @@ static int rmac_init(struct rtw89_dev *rtwdev, u8 mac_idx) B_AX_RX_DLK_CCA_TIME_MASK); rtw89_write16(rtwdev, reg, val); - reg = rtw89_mac_reg_by_idx(R_AX_RCR, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RCR, mac_idx); rtw89_write8_mask(rtwdev, reg, B_AX_CH_EN_MASK, 0x1); - reg = rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RX_FLTR_OPT, mac_idx); if (mac_idx == RTW89_MAC_0) rx_qta = rtwdev->mac.dle_info.c0_rx_qta; else @@ -2425,13 +2427,13 @@ static int rmac_init(struct rtw89_dev *rtwdev, u8 mac_idx) if (rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv == CHIP_CBV) { rtw89_write16_mask(rtwdev, - rtw89_mac_reg_by_idx(R_AX_DLK_PROTECT_CTL, mac_idx), + rtw89_mac_reg_by_idx(rtwdev, R_AX_DLK_PROTECT_CTL, mac_idx), B_AX_RX_DLK_CCA_TIME_MASK, 0); - rtw89_write16_set(rtwdev, rtw89_mac_reg_by_idx(R_AX_RCR, mac_idx), + rtw89_write16_set(rtwdev, rtw89_mac_reg_by_idx(rtwdev, R_AX_RCR, mac_idx), BIT(12)); } - reg = rtw89_mac_reg_by_idx(R_AX_PLCP_HDR_FLTR, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PLCP_HDR_FLTR, mac_idx); rtw89_write8_clr(rtwdev, reg, B_AX_VHT_SU_SIGB_CRC_CHK); return ret; @@ -2447,7 +2449,7 @@ static int cmac_com_init(struct rtw89_dev *rtwdev, u8 mac_idx) if (ret) return ret; - reg = rtw89_mac_reg_by_idx(R_AX_TX_SUB_CARRIER_VALUE, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TX_SUB_CARRIER_VALUE, mac_idx); val = rtw89_read32(rtwdev, reg); val = u32_replace_bits(val, 0, B_AX_TXSC_20M_MASK); val = u32_replace_bits(val, 0, B_AX_TXSC_40M_MASK); @@ -2455,7 +2457,7 @@ static int cmac_com_init(struct rtw89_dev *rtwdev, u8 mac_idx) rtw89_write32(rtwdev, reg, val); if (chip_id == RTL8852A || chip_id == RTL8852B) { - reg = rtw89_mac_reg_by_idx(R_AX_PTCL_RRSR1, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PTCL_RRSR1, mac_idx); rtw89_write32_mask(rtwdev, reg, B_AX_RRSR_RATE_EN_MASK, RRSR_OFDM_CCK_EN); } @@ -2485,7 +2487,7 @@ static int ptcl_init(struct rtw89_dev *rtwdev, u8 mac_idx) return ret; if (rtwdev->hci.type == RTW89_HCI_TYPE_PCIE) { - reg = rtw89_mac_reg_by_idx(R_AX_SIFS_SETTING, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_SIFS_SETTING, mac_idx); val = rtw89_read32(rtwdev, reg); val = u32_replace_bits(val, S_AX_CTS2S_TH_1K, B_AX_HW_CTS2SELF_PKT_LEN_TH_MASK); @@ -2494,7 +2496,7 @@ static int ptcl_init(struct rtw89_dev *rtwdev, u8 mac_idx) val |= B_AX_HW_CTS2SELF_EN; rtw89_write32(rtwdev, reg, val); - reg = rtw89_mac_reg_by_idx(R_AX_PTCL_FSM_MON, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PTCL_FSM_MON, mac_idx); val = rtw89_read32(rtwdev, reg); val = u32_replace_bits(val, S_AX_PTCL_TO_2MS, B_AX_PTCL_TX_ARB_TO_THR_MASK); val &= ~B_AX_PTCL_TX_ARB_TO_MODE; @@ -2531,7 +2533,7 @@ static int cmac_dma_init(struct rtw89_dev *rtwdev, u8 mac_idx) if (ret) return ret; - reg = rtw89_mac_reg_by_idx(R_AX_RXDMA_CTRL_0, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RXDMA_CTRL_0, mac_idx); rtw89_write8_clr(rtwdev, reg, RX_FULL_MODE); return 0; @@ -2725,7 +2727,7 @@ static int rtw89_hw_sch_tx_en_h2c(struct rtw89_dev *rtwdev, u8 band, static int rtw89_set_hw_sch_tx_en(struct rtw89_dev *rtwdev, u8 mac_idx, u16 tx_en, u16 tx_en_mask) { - u32 reg = rtw89_mac_reg_by_idx(R_AX_CTN_TXEN, mac_idx); + u32 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_CTN_TXEN, mac_idx); u16 val; int ret; @@ -2747,7 +2749,7 @@ static int rtw89_set_hw_sch_tx_en(struct rtw89_dev *rtwdev, u8 mac_idx, static int rtw89_set_hw_sch_tx_en_v1(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en, u32 tx_en_mask) { - u32 reg = rtw89_mac_reg_by_idx(R_AX_CTN_DRV_TXEN, mac_idx); + u32 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_CTN_DRV_TXEN, mac_idx); u32 val; int ret; @@ -2768,7 +2770,7 @@ int rtw89_mac_stop_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, int ret; *tx_en = rtw89_read16(rtwdev, - rtw89_mac_reg_by_idx(R_AX_CTN_TXEN, mac_idx)); + rtw89_mac_reg_by_idx(rtwdev, R_AX_CTN_TXEN, mac_idx)); switch (sel) { case RTW89_SCH_TX_SEL_ALL: @@ -2809,7 +2811,7 @@ int rtw89_mac_stop_sch_tx_v1(struct rtw89_dev *rtwdev, u8 mac_idx, int ret; *tx_en = rtw89_read32(rtwdev, - rtw89_mac_reg_by_idx(R_AX_CTN_DRV_TXEN, mac_idx)); + rtw89_mac_reg_by_idx(rtwdev, R_AX_CTN_DRV_TXEN, mac_idx)); switch (sel) { case RTW89_SCH_TX_SEL_ALL: @@ -3016,7 +3018,7 @@ static int band_idle_ck_b(struct rtw89_dev *rtwdev, u8 mac_idx) if (ret) return ret; - reg = rtw89_mac_reg_by_idx(R_AX_PTCL_TX_CTN_SEL, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PTCL_TX_CTN_SEL, mac_idx); ret = read_poll_timeout(rtw89_read8, val, (val & B_AX_PTCL_TX_ON_STAT) == 0, @@ -3224,7 +3226,7 @@ static void rtw89_scheduler_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx) { u32 reg; - reg = rtw89_mac_reg_by_idx(R_AX_SCHEDULE_ERR_IMR, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_SCHEDULE_ERR_IMR, mac_idx); rtw89_write32_clr(rtwdev, reg, B_AX_SORT_NON_IDLE_ERR_INT_EN | B_AX_FSM_TIMEOUT_ERR_INT_EN); rtw89_write32_set(rtwdev, reg, B_AX_FSM_TIMEOUT_ERR_INT_EN); @@ -3235,7 +3237,7 @@ static void rtw89_ptcl_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx) const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; u32 reg; - reg = rtw89_mac_reg_by_idx(R_AX_PTCL_IMR0, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PTCL_IMR0, mac_idx); rtw89_write32_clr(rtwdev, reg, imr->ptcl_imr_clr); rtw89_write32_set(rtwdev, reg, imr->ptcl_imr_set); } @@ -3246,12 +3248,12 @@ static void rtw89_cdma_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx) enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; u32 reg; - reg = rtw89_mac_reg_by_idx(imr->cdma_imr_0_reg, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, imr->cdma_imr_0_reg, mac_idx); rtw89_write32_clr(rtwdev, reg, imr->cdma_imr_0_clr); rtw89_write32_set(rtwdev, reg, imr->cdma_imr_0_set); if (chip_id == RTL8852C) { - reg = rtw89_mac_reg_by_idx(imr->cdma_imr_1_reg, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, imr->cdma_imr_1_reg, mac_idx); rtw89_write32_clr(rtwdev, reg, imr->cdma_imr_1_clr); rtw89_write32_set(rtwdev, reg, imr->cdma_imr_1_set); } @@ -3262,7 +3264,7 @@ static void rtw89_phy_intf_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx) const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; u32 reg; - reg = rtw89_mac_reg_by_idx(imr->phy_intf_imr_reg, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, imr->phy_intf_imr_reg, mac_idx); rtw89_write32_clr(rtwdev, reg, imr->phy_intf_imr_clr); rtw89_write32_set(rtwdev, reg, imr->phy_intf_imr_set); } @@ -3272,7 +3274,7 @@ static void rtw89_rmac_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx) const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; u32 reg; - reg = rtw89_mac_reg_by_idx(imr->rmac_imr_reg, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, imr->rmac_imr_reg, mac_idx); rtw89_write32_clr(rtwdev, reg, imr->rmac_imr_clr); rtw89_write32_set(rtwdev, reg, imr->rmac_imr_set); } @@ -3282,7 +3284,7 @@ static void rtw89_tmac_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx) const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; u32 reg; - reg = rtw89_mac_reg_by_idx(imr->tmac_imr_reg, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, imr->tmac_imr_reg, mac_idx); rtw89_write32_clr(rtwdev, reg, imr->tmac_imr_clr); rtw89_write32_set(rtwdev, reg, imr->tmac_imr_set); } @@ -3661,6 +3663,9 @@ static void rtw89_mac_dmac_tbl_init(struct rtw89_dev *rtwdev, u8 macid) { u8 i; + if (rtwdev->chip->chip_gen != RTW89_CHIP_AX) + return; + for (i = 0; i < 4; i++) { rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, DMAC_TBL_BASE_ADDR + (macid << 4) + (i << 2)); @@ -3670,6 +3675,9 @@ static void rtw89_mac_dmac_tbl_init(struct rtw89_dev *rtwdev, u8 macid) static void rtw89_mac_cmac_tbl_init(struct rtw89_dev *rtwdev, u8 macid) { + if (rtwdev->chip->chip_gen != RTW89_CHIP_AX) + return; + rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, CMAC_TBL_BASE_ADDR + macid * CCTL_INFO_SIZE); rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY, 0x4); @@ -3860,7 +3868,7 @@ static void rtw89_mac_port_cfg_hiq_win(struct rtw89_dev *rtwdev, u8 port = rtwvif->port; u32 reg; - reg = rtw89_mac_reg_by_idx(hiq_win_addr[port], rtwvif->mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, hiq_win_addr[port], rtwvif->mac_idx); rtw89_write8(rtwdev, reg, win); } @@ -3871,7 +3879,7 @@ static void rtw89_mac_port_cfg_hiq_dtim(struct rtw89_dev *rtwdev, const struct rtw89_port_reg *p = &rtw_port_base; u32 addr; - addr = rtw89_mac_reg_by_idx(R_AX_MD_TSFT_STMP_CTL, rtwvif->mac_idx); + addr = rtw89_mac_reg_by_idx(rtwdev, R_AX_MD_TSFT_STMP_CTL, rtwvif->mac_idx); rtw89_write8_set(rtwdev, addr, B_AX_UPD_HGQMD | B_AX_UPD_TIMIE); rtw89_write16_port_mask(rtwdev, rtwvif, p->dtim_ctrl, B_AX_DTIM_NUM_MASK, @@ -3930,7 +3938,7 @@ static void rtw89_mac_port_cfg_bss_color(struct rtw89_dev *rtwdev, bss_color = vif->bss_conf.he_bss_color.color; reg_base = port >= 4 ? R_AX_PTCL_BSS_COLOR_1 : R_AX_PTCL_BSS_COLOR_0; - reg = rtw89_mac_reg_by_idx(reg_base, rtwvif->mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, reg_base, rtwvif->mac_idx); rtw89_write32_mask(rtwdev, reg, masks[port], bss_color); } @@ -3944,7 +3952,7 @@ static void rtw89_mac_port_cfg_mbssid(struct rtw89_dev *rtwdev, return; if (port == 0) { - reg = rtw89_mac_reg_by_idx(R_AX_MBSSID_CTRL, rtwvif->mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_MBSSID_CTRL, rtwvif->mac_idx); rtw89_write32_clr(rtwdev, reg, B_AX_P0MB_ALL_MASK); } } @@ -3956,7 +3964,7 @@ static void rtw89_mac_port_cfg_hiq_drop(struct rtw89_dev *rtwdev, u32 reg; u32 val; - reg = rtw89_mac_reg_by_idx(R_AX_MBSSID_DROP_0, rtwvif->mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_MBSSID_DROP_0, rtwvif->mac_idx); val = rtw89_read32(rtwdev, reg); val &= ~FIELD_PREP(B_AX_PORT_DROP_4_0_MASK, BIT(port)); if (port == 0) @@ -4014,7 +4022,7 @@ void rtw89_mac_port_tsf_sync(struct rtw89_dev *rtwdev, u32 val, reg; val = RTW89_PORT_OFFSET_TU_TO_32US(offset_tu); - reg = rtw89_mac_reg_by_idx(R_AX_PORT0_TSF_SYNC + rtwvif->port * 4, + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PORT0_TSF_SYNC + rtwvif->port * 4, rtwvif->mac_idx); rtw89_write32_mask(rtwdev, reg, B_AX_SYNC_PORT_SRC, rtwvif_src->port); @@ -4204,7 +4212,7 @@ void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev, rtw89_mac_check_he_obss_narrow_bw_ru_iter, &tolerated); - reg = rtw89_mac_reg_by_idx(R_AX_RXTRIG_TEST_USER_2, rtwvif->mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RXTRIG_TEST_USER_2, rtwvif->mac_idx); if (tolerated) rtw89_write32_clr(rtwdev, reg, B_AX_RXTRIG_RU26_DIS); else @@ -4731,7 +4739,7 @@ bool rtw89_mac_get_txpwr_cr(struct rtw89_dev *rtwdev, { const struct rtw89_dle_mem *dle_mem = rtwdev->chip->dle_mem; enum rtw89_qta_mode mode = dle_mem->mode; - u32 addr = rtw89_mac_reg_by_idx(reg_base, phy_idx); + u32 addr = rtw89_mac_reg_by_idx(rtwdev, reg_base, phy_idx); if (addr < R_AX_PWR_RATE_CTRL || addr > CMAC1_END_ADDR) { rtw89_err(rtwdev, "[TXPWR] addr=0x%x exceed txpwr cr\n", @@ -4760,7 +4768,7 @@ EXPORT_SYMBOL(rtw89_mac_get_txpwr_cr); int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable) { - u32 reg = rtw89_mac_reg_by_idx(R_AX_PPDU_STAT, mac_idx); + u32 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PPDU_STAT, mac_idx); int ret; ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); @@ -4807,7 +4815,7 @@ void rtw89_mac_update_rts_threshold(struct rtw89_dev *rtwdev, u8 mac_idx) time_th = min_t(u32, time_th >> MAC_AX_TIME_TH_SH, MAC_AX_TIME_TH_MAX); len_th = min_t(u32, len_th >> MAC_AX_LEN_TH_SH, MAC_AX_LEN_TH_MAX); - reg = rtw89_mac_reg_by_idx(R_AX_AGG_LEN_HT_0, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_AGG_LEN_HT_0, mac_idx); rtw89_write16_mask(rtwdev, reg, B_AX_RTS_TXTIME_TH_MASK, time_th); rtw89_write16_mask(rtwdev, reg, B_AX_RTS_LEN_TH_MASK, len_th); } @@ -5043,7 +5051,7 @@ int rtw89_mac_cfg_plt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt) if (ret) return ret; - reg = rtw89_mac_reg_by_idx(R_AX_BT_PLT, plt->band); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_BT_PLT, plt->band); val = (plt->tx & RTW89_MAC_AX_PLT_LTE_RX ? B_AX_TX_PLT_GNT_LTE_RX : 0) | (plt->tx & RTW89_MAC_AX_PLT_GNT_BT_TX ? B_AX_TX_PLT_GNT_BT_TX : 0) | (plt->tx & RTW89_MAC_AX_PLT_GNT_BT_RX ? B_AX_TX_PLT_GNT_BT_RX : 0) | @@ -5133,7 +5141,7 @@ u16 rtw89_mac_get_plt_cnt(struct rtw89_dev *rtwdev, u8 band) u32 reg; u16 cnt; - reg = rtw89_mac_reg_by_idx(R_AX_BT_PLT, band); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_BT_PLT, band); cnt = rtw89_read32_mask(rtwdev, reg, B_AX_BT_PLT_PKT_CNT_MASK); rtw89_write16_set(rtwdev, reg, B_AX_BT_PLT_RST); @@ -5146,7 +5154,7 @@ static void rtw89_mac_bfee_standby_timer(struct rtw89_dev *rtwdev, u8 mac_idx, u32 reg; rtw89_debug(rtwdev, RTW89_DBG_BF, "set bfee standby_timer to %d\n", keep); - reg = rtw89_mac_reg_by_idx(R_AX_BFMEE_RESP_OPTION, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_BFMEE_RESP_OPTION, mac_idx); if (keep) { set_bit(RTW89_FLAG_BFEE_TIMER_KEEP, rtwdev->flags); rtw89_write32_mask(rtwdev, reg, B_AX_BFMEE_BFRP_RX_STANDBY_TIMER_MASK, @@ -5165,7 +5173,7 @@ static void rtw89_mac_bfee_ctrl(struct rtw89_dev *rtwdev, u8 mac_idx, bool en) B_AX_BFMEE_HE_NDPA_EN; rtw89_debug(rtwdev, RTW89_DBG_BF, "set bfee ndpa_en to %d\n", en); - reg = rtw89_mac_reg_by_idx(R_AX_BFMEE_RESP_OPTION, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_BFMEE_RESP_OPTION, mac_idx); if (en) { set_bit(RTW89_FLAG_BFEE_EN, rtwdev->flags); rtw89_write32_set(rtwdev, reg, mask); @@ -5187,30 +5195,30 @@ static int rtw89_mac_init_bfee(struct rtw89_dev *rtwdev, u8 mac_idx) /* AP mode set tx gid to 63 */ /* STA mode set tx gid to 0(default) */ - reg = rtw89_mac_reg_by_idx(R_AX_BFMER_CTRL_0, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_BFMER_CTRL_0, mac_idx); rtw89_write32_set(rtwdev, reg, B_AX_BFMER_NDP_BFEN); - reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_RRSC, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_CSI_RRSC, mac_idx); rtw89_write32(rtwdev, reg, CSI_RRSC_BMAP); - reg = rtw89_mac_reg_by_idx(R_AX_BFMEE_RESP_OPTION, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_BFMEE_RESP_OPTION, mac_idx); val32 = FIELD_PREP(B_AX_BFMEE_NDP_RX_STANDBY_TIMER_MASK, NDP_RX_STANDBY_TIMER); rtw89_write32(rtwdev, reg, val32); rtw89_mac_bfee_standby_timer(rtwdev, mac_idx, true); rtw89_mac_bfee_ctrl(rtwdev, mac_idx, true); - reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); rtw89_write32_set(rtwdev, reg, B_AX_BFMEE_BFPARAM_SEL | B_AX_BFMEE_USE_NSTS | B_AX_BFMEE_CSI_GID_SEL | B_AX_BFMEE_CSI_FORCE_RETE_EN); - reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_RATE, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_CSI_RATE, mac_idx); rtw89_write32(rtwdev, reg, u32_encode_bits(CSI_INIT_RATE_HT, B_AX_BFMEE_HT_CSI_RATE_MASK) | u32_encode_bits(CSI_INIT_RATE_VHT, B_AX_BFMEE_VHT_CSI_RATE_MASK) | u32_encode_bits(CSI_INIT_RATE_HE, B_AX_BFMEE_HE_CSI_RATE_MASK)); - reg = rtw89_mac_reg_by_idx(R_AX_CSIRPT_OPTION, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_CSIRPT_OPTION, mac_idx); rtw89_write32_set(rtwdev, reg, B_AX_CSIPRT_VHTSU_AID_EN | B_AX_CSIPRT_HESU_AID_EN); @@ -5254,7 +5262,7 @@ static int rtw89_mac_set_csi_para_reg(struct rtw89_dev *rtwdev, nc = min(nc, sound_dim); nr = min(nr, sound_dim); - reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); rtw89_write32_set(rtwdev, reg, B_AX_BFMEE_BFPARAM_SEL); val = FIELD_PREP(B_AX_BFMEE_CSIINFO0_NC_MASK, nc) | @@ -5266,9 +5274,9 @@ static int rtw89_mac_set_csi_para_reg(struct rtw89_dev *rtwdev, FIELD_PREP(B_AX_BFMEE_CSIINFO0_STBC_EN, stbc_en); if (port_sel == 0) - reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); else - reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_CTRL_1, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_CSI_CTRL_1, mac_idx); rtw89_write16(rtwdev, reg, val); @@ -5304,11 +5312,11 @@ static int rtw89_mac_csi_rrsc(struct rtw89_dev *rtwdev, BIT(RTW89_MAC_BF_RRSC_HT_MSC3) | BIT(RTW89_MAC_BF_RRSC_HT_MSC5)); } - reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); rtw89_write32_set(rtwdev, reg, B_AX_BFMEE_BFPARAM_SEL); rtw89_write32_clr(rtwdev, reg, B_AX_BFMEE_CSI_FORCE_RETE_EN); rtw89_write32(rtwdev, - rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_RRSC, mac_idx), + rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_CSI_RRSC, mac_idx), rrsc); return 0; @@ -5346,19 +5354,21 @@ void rtw89_mac_bf_set_gid_table(struct rtw89_dev *rtwdev, struct ieee80211_vif * rtw89_debug(rtwdev, RTW89_DBG_BF, "update bf GID table\n"); p = (__le32 *)conf->mu_group.membership; - rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION_EN0, mac_idx), + rtw89_write32(rtwdev, + rtw89_mac_reg_by_idx(rtwdev, R_AX_GID_POSITION_EN0, mac_idx), le32_to_cpu(p[0])); - rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION_EN1, mac_idx), + rtw89_write32(rtwdev, + rtw89_mac_reg_by_idx(rtwdev, R_AX_GID_POSITION_EN1, mac_idx), le32_to_cpu(p[1])); p = (__le32 *)conf->mu_group.position; - rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION0, mac_idx), + rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(rtwdev, R_AX_GID_POSITION0, mac_idx), le32_to_cpu(p[0])); - rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION1, mac_idx), + rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(rtwdev, R_AX_GID_POSITION1, mac_idx), le32_to_cpu(p[1])); - rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION2, mac_idx), + rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(rtwdev, R_AX_GID_POSITION2, mac_idx), le32_to_cpu(p[2])); - rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION3, mac_idx), + rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(rtwdev, R_AX_GID_POSITION3, mac_idx), le32_to_cpu(p[3])); } @@ -5449,7 +5459,7 @@ __rtw89_mac_set_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, return ret; } - reg = rtw89_mac_reg_by_idx(R_AX_AMPDU_AGG_LIMIT, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_AMPDU_AGG_LIMIT, mac_idx); rtw89_write32_mask(rtwdev, reg, B_AX_AMPDU_MAX_TIME_MASK, max_tx_time >> 5); } @@ -5489,7 +5499,7 @@ int rtw89_mac_get_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, return ret; } - reg = rtw89_mac_reg_by_idx(R_AX_AMPDU_AGG_LIMIT, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_AMPDU_AGG_LIMIT, mac_idx); *tx_time = rtw89_read32_mask(rtwdev, reg, B_AX_AMPDU_MAX_TIME_MASK) << 5; } @@ -5531,7 +5541,7 @@ int rtw89_mac_get_tx_retry_limit(struct rtw89_dev *rtwdev, return ret; } - reg = rtw89_mac_reg_by_idx(R_AX_TXCNT, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TXCNT, mac_idx); *tx_retry = rtw89_read32_mask(rtwdev, reg, B_AX_L_TXCNT_LMT_MASK); } @@ -5550,7 +5560,7 @@ int rtw89_mac_set_hw_muedca_ctrl(struct rtw89_dev *rtwdev, if (ret) return ret; - reg = rtw89_mac_reg_by_idx(R_AX_MUEDCA_EN, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_MUEDCA_EN, mac_idx); if (en) rtw89_write16_set(rtwdev, reg, set); else @@ -5673,3 +5683,12 @@ int rtw89_mac_ptk_drop_by_band_and_wait(struct rtw89_dev *rtwdev, } return ret; } + +const struct rtw89_mac_gen_def rtw89_mac_gen_ax = { + .band1_offset = RTW89_MAC_AX_BAND_REG_OFFSET, + .filter_model_addr = R_AX_FILTER_MODEL_ADDR, + .indir_access_addr = R_AX_INDIR_ACCESS_ENTRY, + .mem_base_addrs = rtw89_mac_mem_base_addrs_ax, + .rx_fltr = R_AX_RX_FLTR_OPT, +}; +EXPORT_SYMBOL(rtw89_mac_gen_ax); diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h index 0e1570451c2c..7cf34137c0bc 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.h +++ b/drivers/net/wireless/realtek/rtw89/mac.h @@ -275,6 +275,7 @@ enum rtw89_mac_dbg_port_sel { /* SRAM mem dump */ #define R_AX_INDIR_ACCESS_ENTRY 0x40000 +#define R_BE_INDIR_ACCESS_ENTRY 0x80000 #define AXIDMA_BASE_ADDR 0x18006000 #define STA_SCHED_BASE_ADDR 0x18808000 @@ -298,6 +299,31 @@ enum rtw89_mac_dbg_port_sel { #define TXDATA_FIFO_1_BASE_ADDR 0x188A1000 #define CPU_LOCAL_BASE_ADDR 0x18003000 +#define WD_PAGE_BASE_ADDR_BE 0x0 +#define CPU_LOCAL_BASE_ADDR_BE 0x18003000 +#define AXIDMA_BASE_ADDR_BE 0x18006000 +#define SHARED_BUF_BASE_ADDR_BE 0x18700000 +#define DMAC_TBL_BASE_ADDR_BE 0x18800000 +#define SHCUT_MACHDR_BASE_ADDR_BE 0x18800800 +#define STA_SCHED_BASE_ADDR_BE 0x18818000 +#define NAT25_CAM_BASE_ADDR_BE 0x18820000 +#define RXPLD_FLTR_CAM_BASE_ADDR_BE 0x18823000 +#define SEC_CAM_BASE_ADDR_BE 0x18824000 +#define WOW_CAM_BASE_ADDR_BE 0x18828000 +#define MLD_TBL_BASE_ADDR_BE 0x18829000 +#define RX_CLSF_CAM_BASE_ADDR_BE 0x1882A000 +#define CMAC_TBL_BASE_ADDR_BE 0x18840000 +#define ADDR_CAM_BASE_ADDR_BE 0x18850000 +#define BSSID_CAM_BASE_ADDR_BE 0x18858000 +#define BA_CAM_BASE_ADDR_BE 0x18859000 +#define BCN_IE_CAM0_BASE_ADDR_BE 0x18860000 +#define TXDATA_FIFO_0_BASE_ADDR_BE 0x18861000 +#define TXD_FIFO_0_BASE_ADDR_BE 0x18862000 +#define BCN_IE_CAM1_BASE_ADDR_BE 0x18880000 +#define TXDATA_FIFO_1_BASE_ADDR_BE 0x18881000 +#define TXD_FIFO_1_BASE_ADDR_BE 0x18881800 +#define DCPU_LOCAL_BASE_ADDR_BE 0x19C02000 + #define CCTL_INFO_SIZE 32 enum rtw89_mac_mem_sel { @@ -322,13 +348,12 @@ enum rtw89_mac_mem_sel { RTW89_MAC_MEM_BSSID_CAM, RTW89_MAC_MEM_TXD_FIFO_0_V1, RTW89_MAC_MEM_TXD_FIFO_1_V1, + RTW89_MAC_MEM_WD_PAGE, /* keep last */ RTW89_MAC_MEM_NUM, }; -extern const u32 rtw89_mac_mem_base_addrs[]; - enum rtw89_rpwm_req_pwr_state { RTW89_MAC_RPWM_REQ_PWR_STATE_ACTIVE = 0, RTW89_MAC_RPWM_REQ_PWR_STATE_BAND0_RFON = 1, @@ -478,6 +503,7 @@ enum rtw89_mac_bf_rrsc_rate { ({typeof(_addr) __addr = (_addr); \ __addr >= R_AX_CMAC_REG_START && __addr <= R_AX_CMAC_REG_END; }) #define RTW89_MAC_AX_BAND_REG_OFFSET 0x2000 +#define RTW89_MAC_BE_BAND_REG_OFFSET 0x4000 #define PTCL_IDLE_POLL_CNT 10000 #define SW_CVR_DUR_US 8 @@ -826,14 +852,29 @@ struct rtw89_mac_size_set { extern const struct rtw89_mac_size_set rtw89_mac_size; -static inline u32 rtw89_mac_reg_by_idx(u32 reg_base, u8 band) +struct rtw89_mac_gen_def { + u32 band1_offset; + u32 filter_model_addr; + u32 indir_access_addr; + const u32 *mem_base_addrs; + u32 rx_fltr; +}; + +extern const struct rtw89_mac_gen_def rtw89_mac_gen_ax; +extern const struct rtw89_mac_gen_def rtw89_mac_gen_be; + +static inline +u32 rtw89_mac_reg_by_idx(struct rtw89_dev *rtwdev, u32 reg_base, u8 band) { - return band == 0 ? reg_base : (reg_base + 0x2000); + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + + return band == 0 ? reg_base : (reg_base + mac->band1_offset); } -static inline u32 rtw89_mac_reg_by_port(u32 base, u8 port, u8 mac_idx) +static inline +u32 rtw89_mac_reg_by_port(struct rtw89_dev *rtwdev, u32 base, u8 port, u8 mac_idx) { - return rtw89_mac_reg_by_idx(base + port * 0x40, mac_idx); + return rtw89_mac_reg_by_idx(rtwdev, base + port * 0x40, mac_idx); } static inline u32 @@ -841,7 +882,7 @@ rtw89_read32_port(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, u32 base) { u32 reg; - reg = rtw89_mac_reg_by_port(base, rtwvif->port, rtwvif->mac_idx); + reg = rtw89_mac_reg_by_port(rtwdev, base, rtwvif->port, rtwvif->mac_idx); return rtw89_read32(rtwdev, reg); } @@ -851,7 +892,7 @@ rtw89_read32_port_mask(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, { u32 reg; - reg = rtw89_mac_reg_by_port(base, rtwvif->port, rtwvif->mac_idx); + reg = rtw89_mac_reg_by_port(rtwdev, base, rtwvif->port, rtwvif->mac_idx); return rtw89_read32_mask(rtwdev, reg, mask); } @@ -861,7 +902,7 @@ rtw89_write32_port(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, u32 base, { u32 reg; - reg = rtw89_mac_reg_by_port(base, rtwvif->port, rtwvif->mac_idx); + reg = rtw89_mac_reg_by_port(rtwdev, base, rtwvif->port, rtwvif->mac_idx); rtw89_write32(rtwdev, reg, data); } @@ -871,7 +912,7 @@ rtw89_write32_port_mask(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, { u32 reg; - reg = rtw89_mac_reg_by_port(base, rtwvif->port, rtwvif->mac_idx); + reg = rtw89_mac_reg_by_port(rtwdev, base, rtwvif->port, rtwvif->mac_idx); rtw89_write32_mask(rtwdev, reg, mask, data); } @@ -881,7 +922,7 @@ rtw89_write16_port_mask(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, { u32 reg; - reg = rtw89_mac_reg_by_port(base, rtwvif->port, rtwvif->mac_idx); + reg = rtw89_mac_reg_by_port(rtwdev, base, rtwvif->port, rtwvif->mac_idx); rtw89_write16_mask(rtwdev, reg, mask, data); } @@ -891,7 +932,7 @@ rtw89_write32_port_clr(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, { u32 reg; - reg = rtw89_mac_reg_by_port(base, rtwvif->port, rtwvif->mac_idx); + reg = rtw89_mac_reg_by_port(rtwdev, base, rtwvif->port, rtwvif->mac_idx); rtw89_write32_clr(rtwdev, reg, bit); } @@ -901,7 +942,7 @@ rtw89_write16_port_clr(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, { u32 reg; - reg = rtw89_mac_reg_by_port(base, rtwvif->port, rtwvif->mac_idx); + reg = rtw89_mac_reg_by_port(rtwdev, base, rtwvif->port, rtwvif->mac_idx); rtw89_write16_clr(rtwdev, reg, bit); } @@ -911,7 +952,7 @@ rtw89_write32_port_set(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, { u32 reg; - reg = rtw89_mac_reg_by_port(base, rtwvif->port, rtwvif->mac_idx); + reg = rtw89_mac_reg_by_port(rtwdev, base, rtwvif->port, rtwvif->mac_idx); rtw89_write32_set(rtwdev, reg, bit); } diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c index a66503eb35b8..5e48618706d9 100644 --- a/drivers/net/wireless/realtek/rtw89/mac80211.c +++ b/drivers/net/wireless/realtek/rtw89/mac80211.c @@ -224,6 +224,7 @@ static void rtw89_ops_configure_filter(struct ieee80211_hw *hw, u64 multicast) { struct rtw89_dev *rtwdev = hw->priv; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; mutex_lock(&rtwdev->mutex); rtw89_leave_ps_mode(rtwdev); @@ -271,13 +272,13 @@ static void rtw89_ops_configure_filter(struct ieee80211_hw *hw, } rtw89_write32_mask(rtwdev, - rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0), + rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_0), B_AX_RX_FLTR_CFG_MASK, rtwdev->hal.rx_fltr); if (!rtwdev->dbcc_en) goto out; rtw89_write32_mask(rtwdev, - rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_1), + rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_1), B_AX_RX_FLTR_CFG_MASK, rtwdev->hal.rx_fltr); @@ -296,7 +297,8 @@ static u8 rtw89_aifsn_to_aifs(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, u8 aifsn) { struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, + rtwvif->sub_entity_idx); u8 slot_time; u8 sifs; @@ -353,7 +355,7 @@ static void ____rtw89_conf_tx_mu_edca(struct rtw89_dev *rtwdev, val = FIELD_PREP(B_AX_MUEDCA_BE_PARAM_0_TIMER_MASK, timer_32us) | FIELD_PREP(B_AX_MUEDCA_BE_PARAM_0_CW_MASK, mu_edca->ecw_min_max) | FIELD_PREP(B_AX_MUEDCA_BE_PARAM_0_AIFS_MASK, aifs); - reg = rtw89_mac_reg_by_idx(ac_to_mu_edca_param[ac], rtwvif->mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, ac_to_mu_edca_param[ac], rtwvif->mac_idx); rtw89_write32(rtwdev, reg, val); rtw89_mac_set_hw_muedca_ctrl(rtwdev, rtwvif, true); @@ -413,6 +415,8 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw, rtw89_chip_cfg_txpwr_ul_tb_offset(rtwdev, vif); rtw89_mac_port_update(rtwdev, rtwvif); rtw89_mac_set_he_obss_narrow_bw_ru(rtwdev, vif); + + rtw89_queue_chanctx_work(rtwdev); } else { /* Abort ongoing scan if cancel_scan isn't issued * when disconnected by peer @@ -476,6 +480,8 @@ static int rtw89_ops_start_ap(struct ieee80211_hw *hw, rtw89_fw_h2c_join_info(rtwdev, rtwvif, NULL, true); rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL); rtw89_chip_rfk_channel(rtwdev); + + rtw89_queue_chanctx_work(rtwdev); mutex_unlock(&rtwdev->mutex); return 0; diff --git a/drivers/net/wireless/realtek/rtw89/mac_be.c b/drivers/net/wireless/realtek/rtw89/mac_be.c new file mode 100644 index 000000000000..9a63fb35e867 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/mac_be.c @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2019-2020 Realtek Corporation + */ + +#include "mac.h" +#include "reg.h" + +static const u32 rtw89_mac_mem_base_addrs_be[RTW89_MAC_MEM_NUM] = { + [RTW89_MAC_MEM_AXIDMA] = AXIDMA_BASE_ADDR_BE, + [RTW89_MAC_MEM_SHARED_BUF] = SHARED_BUF_BASE_ADDR_BE, + [RTW89_MAC_MEM_DMAC_TBL] = DMAC_TBL_BASE_ADDR_BE, + [RTW89_MAC_MEM_SHCUT_MACHDR] = SHCUT_MACHDR_BASE_ADDR_BE, + [RTW89_MAC_MEM_STA_SCHED] = STA_SCHED_BASE_ADDR_BE, + [RTW89_MAC_MEM_RXPLD_FLTR_CAM] = RXPLD_FLTR_CAM_BASE_ADDR_BE, + [RTW89_MAC_MEM_SECURITY_CAM] = SEC_CAM_BASE_ADDR_BE, + [RTW89_MAC_MEM_WOW_CAM] = WOW_CAM_BASE_ADDR_BE, + [RTW89_MAC_MEM_CMAC_TBL] = CMAC_TBL_BASE_ADDR_BE, + [RTW89_MAC_MEM_ADDR_CAM] = ADDR_CAM_BASE_ADDR_BE, + [RTW89_MAC_MEM_BA_CAM] = BA_CAM_BASE_ADDR_BE, + [RTW89_MAC_MEM_BCN_IE_CAM0] = BCN_IE_CAM0_BASE_ADDR_BE, + [RTW89_MAC_MEM_BCN_IE_CAM1] = BCN_IE_CAM1_BASE_ADDR_BE, + [RTW89_MAC_MEM_TXD_FIFO_0] = TXD_FIFO_0_BASE_ADDR_BE, + [RTW89_MAC_MEM_TXD_FIFO_1] = TXD_FIFO_1_BASE_ADDR_BE, + [RTW89_MAC_MEM_TXDATA_FIFO_0] = TXDATA_FIFO_0_BASE_ADDR_BE, + [RTW89_MAC_MEM_TXDATA_FIFO_1] = TXDATA_FIFO_1_BASE_ADDR_BE, + [RTW89_MAC_MEM_CPU_LOCAL] = CPU_LOCAL_BASE_ADDR_BE, + [RTW89_MAC_MEM_BSSID_CAM] = BSSID_CAM_BASE_ADDR_BE, + [RTW89_MAC_MEM_WD_PAGE] = WD_PAGE_BASE_ADDR_BE, +}; + +const struct rtw89_mac_gen_def rtw89_mac_gen_be = { + .band1_offset = RTW89_MAC_BE_BAND_REG_OFFSET, + .filter_model_addr = R_BE_FILTER_MODEL_ADDR, + .indir_access_addr = R_BE_INDIR_ACCESS_ENTRY, + .mem_base_addrs = rtw89_mac_mem_base_addrs_be, + .rx_fltr = R_BE_RX_FLTR_OPT, +}; +EXPORT_SYMBOL(rtw89_mac_gen_be); diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c index 9402f1a0caea..3a4bfc44142b 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.c +++ b/drivers/net/wireless/realtek/rtw89/pci.c @@ -3939,5 +3939,5 @@ void rtw89_pci_remove(struct pci_dev *pdev) EXPORT_SYMBOL(rtw89_pci_remove); MODULE_AUTHOR("Realtek Corporation"); -MODULE_DESCRIPTION("Realtek 802.11ax wireless PCI driver"); +MODULE_DESCRIPTION("Realtek PCI 802.11ax wireless driver"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c index c40c4f8c1271..7139146cb3fa 100644 --- a/drivers/net/wireless/realtek/rtw89/phy.c +++ b/drivers/net/wireless/realtek/rtw89/phy.c @@ -133,10 +133,10 @@ static u64 rtw89_phy_ra_mask_recover(u64 ra_mask, u64 ra_mask_bak) return ra_mask; } -static u64 rtw89_phy_ra_mask_cfg(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta) +static u64 rtw89_phy_ra_mask_cfg(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, + const struct rtw89_chan *chan) { struct ieee80211_sta *sta = rtwsta_to_sta(rtwsta); - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); struct cfg80211_bitrate_mask *mask = &rtwsta->mask; enum nl80211_band band; u64 cfg_mask; @@ -197,9 +197,9 @@ rtw89_ra_mask_he_rates[4] = {RA_MASK_HE_1SS_RATES, RA_MASK_HE_2SS_RATES, static void rtw89_phy_ra_gi_ltf(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, + const struct rtw89_chan *chan, bool *fix_giltf_en, u8 *fix_giltf) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); struct cfg80211_bitrate_mask *mask = &rtwsta->mask; u8 band = chan->band_type; enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band); @@ -236,7 +236,8 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif = rtwsta->rtwvif; struct rtw89_phy_rate_pattern *rate_pattern = &rtwvif->rate_pattern; struct rtw89_ra_info *ra = &rtwsta->ra; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, + rtwvif->sub_entity_idx); struct ieee80211_vif *vif = rtwvif_to_vif(rtwsta->rtwvif); const u64 *high_rate_masks = rtw89_ra_mask_ht_rates; u8 rssi = ewma_rssi_read(&rtwsta->avg_rssi); @@ -265,7 +266,7 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev, if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[1] & IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD) ldpc_en = 1; - rtw89_phy_ra_gi_ltf(rtwdev, rtwsta, &fix_giltf_en, &fix_giltf); + rtw89_phy_ra_gi_ltf(rtwdev, rtwsta, chan, &fix_giltf_en, &fix_giltf); } else if (sta->deflink.vht_cap.vht_supported) { u16 mcs_map = le16_to_cpu(sta->deflink.vht_cap.vht_mcs.rx_mcs_map); @@ -332,7 +333,7 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev, ra_mask &= rtw89_phy_ra_mask_rssi(rtwdev, rssi, 0); ra_mask = rtw89_phy_ra_mask_recover(ra_mask, ra_mask_bak); - ra_mask &= rtw89_phy_ra_mask_cfg(rtwdev, rtwsta); + ra_mask &= rtw89_phy_ra_mask_cfg(rtwdev, rtwsta, chan); switch (sta->deflink.bandwidth) { case IEEE80211_STA_RX_BW_160: @@ -362,7 +363,7 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev, ra->dcm_cap = 1; if (rate_pattern->enable && !vif->p2p) { - ra_mask = rtw89_phy_ra_mask_cfg(rtwdev, rtwsta); + ra_mask = rtw89_phy_ra_mask_cfg(rtwdev, rtwsta, chan); ra_mask &= rate_pattern->ra_mask; mode = rate_pattern->ra_mode; } @@ -457,7 +458,8 @@ void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev, struct ieee80211_supported_band *sband; struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; struct rtw89_phy_rate_pattern next_pattern = {0}; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, + rtwvif->sub_entity_idx); static const u16 hw_rate_he[][RTW89_CHIP_GEN_NUM] = { RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS1_MCS0), RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS2_MCS0), @@ -1446,6 +1448,9 @@ static u32 rtw89_phy0_phy1_offset(struct rtw89_dev *rtwdev, u32 addr) u32 phy_page = addr >> 8; u32 ofst = 0; + if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) + return addr < 0x10000 ? 0x20000 : 0; + switch (phy_page) { case 0x6: case 0x7: @@ -1647,6 +1652,8 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band, const struct rtw89_txpwr_rule_5ghz *rule_5ghz = &rfe_parms->rule_5ghz; const struct rtw89_txpwr_rule_6ghz *rule_6ghz = &rfe_parms->rule_6ghz; struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; + enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band); + u32 freq = ieee80211_channel_to_frequency(ch, nl_band); u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch); u8 regd = rtw89_regd_get(rtwdev, band); u8 reg6 = regulatory->reg_6ghz_power; @@ -1682,7 +1689,7 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band, } lmt = _phy_txpwr_rf_to_mac(rtwdev, lmt); - sar = rtw89_query_sar(rtwdev); + sar = rtw89_query_sar(rtwdev, freq); return min(lmt, sar); } @@ -1902,6 +1909,8 @@ static s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band, const struct rtw89_txpwr_rule_5ghz *rule_5ghz = &rfe_parms->rule_5ghz; const struct rtw89_txpwr_rule_6ghz *rule_6ghz = &rfe_parms->rule_6ghz; struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; + enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band); + u32 freq = ieee80211_channel_to_frequency(ch, nl_band); u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch); u8 regd = rtw89_regd_get(rtwdev, band); u8 reg6 = regulatory->reg_6ghz_power; @@ -1937,7 +1946,7 @@ static s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band, } lmt_ru = _phy_txpwr_rf_to_mac(rtwdev, lmt_ru); - sar = rtw89_query_sar(rtwdev); + sar = rtw89_query_sar(rtwdev, freq); return min(lmt_ru, sar); } @@ -2885,7 +2894,8 @@ void rtw89_phy_cfo_parse(struct rtw89_dev *rtwdev, s16 cfo_val, void rtw89_phy_ul_tb_assoc(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { const struct rtw89_chip_info *chip = rtwdev->chip; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, + rtwvif->sub_entity_idx); struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info; if (!chip->support_ul_tb_ctrl) @@ -3227,7 +3237,9 @@ static u32 rtw89_phy_ccx_idx_to_us(struct rtw89_dev *rtwdev, u16 idx) static void rtw89_phy_ccx_top_setting_init(struct rtw89_dev *rtwdev) { + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; + const struct rtw89_ccx_regs *ccx = phy->ccx; env->ccx_manual_ctrl = false; env->ccx_ongoing = false; @@ -3235,10 +3247,10 @@ static void rtw89_phy_ccx_top_setting_init(struct rtw89_dev *rtwdev) env->ccx_period = 0; env->ccx_unit_idx = RTW89_CCX_32_US; - rtw89_phy_set_phy_regs(rtwdev, R_CCX, B_CCX_EN_MSK, 1); - rtw89_phy_set_phy_regs(rtwdev, R_CCX, B_CCX_TRIG_OPT_MSK, 1); - rtw89_phy_set_phy_regs(rtwdev, R_CCX, B_MEASUREMENT_TRIG_MSK, 1); - rtw89_phy_set_phy_regs(rtwdev, R_CCX, B_CCX_EDCCA_OPT_MSK, + rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->en_mask, 1); + rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->trig_opt_mask, 1); + rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 1); + rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->edcca_opt_mask, RTW89_CCX_EDCCA_BW20_0); } @@ -3353,25 +3365,27 @@ ifs_update_finished: static void rtw89_phy_ifs_clm_set_th_reg(struct rtw89_dev *rtwdev) { + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; + const struct rtw89_ccx_regs *ccx = phy->ccx; u8 i = 0; - rtw89_phy_set_phy_regs(rtwdev, R_IFS_T1, B_IFS_T1_TH_LOW_MSK, + rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_th_l_mask, env->ifs_clm_th_l[0]); - rtw89_phy_set_phy_regs(rtwdev, R_IFS_T2, B_IFS_T2_TH_LOW_MSK, + rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_th_l_mask, env->ifs_clm_th_l[1]); - rtw89_phy_set_phy_regs(rtwdev, R_IFS_T3, B_IFS_T3_TH_LOW_MSK, + rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_th_l_mask, env->ifs_clm_th_l[2]); - rtw89_phy_set_phy_regs(rtwdev, R_IFS_T4, B_IFS_T4_TH_LOW_MSK, + rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_th_l_mask, env->ifs_clm_th_l[3]); - rtw89_phy_set_phy_regs(rtwdev, R_IFS_T1, B_IFS_T1_TH_HIGH_MSK, + rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_th_h_mask, env->ifs_clm_th_h[0]); - rtw89_phy_set_phy_regs(rtwdev, R_IFS_T2, B_IFS_T2_TH_HIGH_MSK, + rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_th_h_mask, env->ifs_clm_th_h[1]); - rtw89_phy_set_phy_regs(rtwdev, R_IFS_T3, B_IFS_T3_TH_HIGH_MSK, + rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_th_h_mask, env->ifs_clm_th_h[2]); - rtw89_phy_set_phy_regs(rtwdev, R_IFS_T4, B_IFS_T4_TH_HIGH_MSK, + rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_th_h_mask, env->ifs_clm_th_h[3]); for (i = 0; i < RTW89_IFS_CLM_NUM; i++) @@ -3382,7 +3396,9 @@ static void rtw89_phy_ifs_clm_set_th_reg(struct rtw89_dev *rtwdev) static void rtw89_phy_ifs_clm_setting_init(struct rtw89_dev *rtwdev) { + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; + const struct rtw89_ccx_regs *ccx = phy->ccx; struct rtw89_ccx_para_info para = {0}; env->ifs_clm_app = RTW89_IFS_CLM_BACKGROUND; @@ -3392,12 +3408,11 @@ static void rtw89_phy_ifs_clm_setting_init(struct rtw89_dev *rtwdev) if (rtw89_phy_ifs_clm_th_update_check(rtwdev, ¶)) rtw89_phy_ifs_clm_set_th_reg(rtwdev); - rtw89_phy_set_phy_regs(rtwdev, R_IFS_COUNTER, B_IFS_COLLECT_EN, - true); - rtw89_phy_set_phy_regs(rtwdev, R_IFS_T1, B_IFS_T1_EN_MSK, true); - rtw89_phy_set_phy_regs(rtwdev, R_IFS_T2, B_IFS_T2_EN_MSK, true); - rtw89_phy_set_phy_regs(rtwdev, R_IFS_T3, B_IFS_T3_EN_MSK, true); - rtw89_phy_set_phy_regs(rtwdev, R_IFS_T4, B_IFS_T4_EN_MSK, true); + rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_collect_en_mask, true); + rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_en_mask, true); + rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_en_mask, true); + rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_en_mask, true); + rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_en_mask, true); } static int rtw89_phy_ccx_racing_ctrl(struct rtw89_dev *rtwdev, @@ -3434,12 +3449,14 @@ static int rtw89_phy_ccx_racing_ctrl(struct rtw89_dev *rtwdev, static void rtw89_phy_ccx_trigger(struct rtw89_dev *rtwdev) { + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; + const struct rtw89_ccx_regs *ccx = phy->ccx; - rtw89_phy_set_phy_regs(rtwdev, R_IFS_COUNTER, B_IFS_COUNTER_CLR_MSK, 0); - rtw89_phy_set_phy_regs(rtwdev, R_CCX, B_MEASUREMENT_TRIG_MSK, 0); - rtw89_phy_set_phy_regs(rtwdev, R_IFS_COUNTER, B_IFS_COUNTER_CLR_MSK, 1); - rtw89_phy_set_phy_regs(rtwdev, R_CCX, B_MEASUREMENT_TRIG_MSK, 1); + rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_clm_cnt_clear_mask, 0); + rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 0); + rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_clm_cnt_clear_mask, 1); + rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 1); env->ccx_ongoing = true; } @@ -3511,63 +3528,79 @@ static void rtw89_phy_ifs_clm_get_utility(struct rtw89_dev *rtwdev) static bool rtw89_phy_ifs_clm_get_result(struct rtw89_dev *rtwdev) { + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; + const struct rtw89_ccx_regs *ccx = phy->ccx; u8 i = 0; - if (rtw89_phy_read32_mask(rtwdev, R_IFSCNT, B_IFSCNT_DONE_MSK) == 0) { + if (rtw89_phy_read32_mask(rtwdev, ccx->ifs_total_addr, + ccx->ifs_cnt_done_mask) == 0) { rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "Get IFS_CLM report Fail\n"); return false; } env->ifs_clm_tx = - rtw89_phy_read32_mask(rtwdev, R_IFS_CLM_TX_CNT, - B_IFS_CLM_TX_CNT_MSK); + rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_tx_cnt_addr, + ccx->ifs_clm_tx_cnt_msk); env->ifs_clm_edcca_excl_cca = - rtw89_phy_read32_mask(rtwdev, R_IFS_CLM_TX_CNT, - B_IFS_CLM_EDCCA_EXCLUDE_CCA_FA_MSK); + rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_tx_cnt_addr, + ccx->ifs_clm_edcca_excl_cca_fa_mask); env->ifs_clm_cckcca_excl_fa = - rtw89_phy_read32_mask(rtwdev, R_IFS_CLM_CCA, - B_IFS_CLM_CCKCCA_EXCLUDE_FA_MSK); + rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_cca_addr, + ccx->ifs_clm_cckcca_excl_fa_mask); env->ifs_clm_ofdmcca_excl_fa = - rtw89_phy_read32_mask(rtwdev, R_IFS_CLM_CCA, - B_IFS_CLM_OFDMCCA_EXCLUDE_FA_MSK); + rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_cca_addr, + ccx->ifs_clm_ofdmcca_excl_fa_mask); env->ifs_clm_cckfa = - rtw89_phy_read32_mask(rtwdev, R_IFS_CLM_FA, - B_IFS_CLM_CCK_FA_MSK); + rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_fa_addr, + ccx->ifs_clm_cck_fa_mask); env->ifs_clm_ofdmfa = - rtw89_phy_read32_mask(rtwdev, R_IFS_CLM_FA, - B_IFS_CLM_OFDM_FA_MSK); + rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_fa_addr, + ccx->ifs_clm_ofdm_fa_mask); env->ifs_clm_his[0] = - rtw89_phy_read32_mask(rtwdev, R_IFS_HIS, B_IFS_T1_HIS_MSK); + rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr, + ccx->ifs_t1_his_mask); env->ifs_clm_his[1] = - rtw89_phy_read32_mask(rtwdev, R_IFS_HIS, B_IFS_T2_HIS_MSK); + rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr, + ccx->ifs_t2_his_mask); env->ifs_clm_his[2] = - rtw89_phy_read32_mask(rtwdev, R_IFS_HIS, B_IFS_T3_HIS_MSK); + rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr, + ccx->ifs_t3_his_mask); env->ifs_clm_his[3] = - rtw89_phy_read32_mask(rtwdev, R_IFS_HIS, B_IFS_T4_HIS_MSK); + rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr, + ccx->ifs_t4_his_mask); env->ifs_clm_avg[0] = - rtw89_phy_read32_mask(rtwdev, R_IFS_AVG_L, B_IFS_T1_AVG_MSK); + rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_l_addr, + ccx->ifs_t1_avg_mask); env->ifs_clm_avg[1] = - rtw89_phy_read32_mask(rtwdev, R_IFS_AVG_L, B_IFS_T2_AVG_MSK); + rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_l_addr, + ccx->ifs_t2_avg_mask); env->ifs_clm_avg[2] = - rtw89_phy_read32_mask(rtwdev, R_IFS_AVG_H, B_IFS_T3_AVG_MSK); + rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_h_addr, + ccx->ifs_t3_avg_mask); env->ifs_clm_avg[3] = - rtw89_phy_read32_mask(rtwdev, R_IFS_AVG_H, B_IFS_T4_AVG_MSK); + rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_h_addr, + ccx->ifs_t4_avg_mask); env->ifs_clm_cca[0] = - rtw89_phy_read32_mask(rtwdev, R_IFS_CCA_L, B_IFS_T1_CCA_MSK); + rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_l_addr, + ccx->ifs_t1_cca_mask); env->ifs_clm_cca[1] = - rtw89_phy_read32_mask(rtwdev, R_IFS_CCA_L, B_IFS_T2_CCA_MSK); + rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_l_addr, + ccx->ifs_t2_cca_mask); env->ifs_clm_cca[2] = - rtw89_phy_read32_mask(rtwdev, R_IFS_CCA_H, B_IFS_T3_CCA_MSK); + rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_h_addr, + ccx->ifs_t3_cca_mask); env->ifs_clm_cca[3] = - rtw89_phy_read32_mask(rtwdev, R_IFS_CCA_H, B_IFS_T4_CCA_MSK); + rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_h_addr, + ccx->ifs_t4_cca_mask); env->ifs_clm_total_ifs = - rtw89_phy_read32_mask(rtwdev, R_IFSCNT, B_IFSCNT_TOTAL_CNT_MSK); + rtw89_phy_read32_mask(rtwdev, ccx->ifs_total_addr, + ccx->ifs_total_mask); rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "IFS-CLM total_ifs = %d\n", env->ifs_clm_total_ifs); @@ -3595,7 +3628,9 @@ static bool rtw89_phy_ifs_clm_get_result(struct rtw89_dev *rtwdev) static int rtw89_phy_ifs_clm_set(struct rtw89_dev *rtwdev, struct rtw89_ccx_para_info *para) { + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; + const struct rtw89_ccx_regs *ccx = phy->ccx; u32 period = 0; u32 unit_idx = 0; @@ -3611,10 +3646,11 @@ static int rtw89_phy_ifs_clm_set(struct rtw89_dev *rtwdev, if (para->mntr_time != env->ifs_clm_mntr_time) { rtw89_phy_ccx_ms_to_period_unit(rtwdev, para->mntr_time, &period, &unit_idx); - rtw89_phy_set_phy_regs(rtwdev, R_IFS_COUNTER, - B_IFS_CLM_PERIOD_MSK, period); - rtw89_phy_set_phy_regs(rtwdev, R_IFS_COUNTER, - B_IFS_CLM_COUNTER_UNIT_MSK, unit_idx); + rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, + ccx->ifs_clm_period_mask, period); + rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, + ccx->ifs_clm_cnt_unit_mask, + unit_idx); rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "Update IFS-CLM time ((%d)) -> ((%d))\n", @@ -3732,16 +3768,19 @@ static void rtw89_physts_enable_fail_report(struct rtw89_dev *rtwdev, bool enable, enum rtw89_phy_idx phy_idx) { + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; + const struct rtw89_physts_regs *physts = phy->physts; + if (enable) { - rtw89_phy_write32_clr(rtwdev, R_PLCP_HISTOGRAM, - B_STS_DIS_TRIG_BY_FAIL); - rtw89_phy_write32_clr(rtwdev, R_PLCP_HISTOGRAM, - B_STS_DIS_TRIG_BY_BRK); + rtw89_phy_write32_clr(rtwdev, physts->setting_addr, + physts->dis_trigger_fail_mask); + rtw89_phy_write32_clr(rtwdev, physts->setting_addr, + physts->dis_trigger_brk_mask); } else { - rtw89_phy_write32_set(rtwdev, R_PLCP_HISTOGRAM, - B_STS_DIS_TRIG_BY_FAIL); - rtw89_phy_write32_set(rtwdev, R_PLCP_HISTOGRAM, - B_STS_DIS_TRIG_BY_BRK); + rtw89_phy_write32_set(rtwdev, physts->setting_addr, + physts->dis_trigger_fail_mask); + rtw89_phy_write32_set(rtwdev, physts->setting_addr, + physts->dis_trigger_brk_mask); } } @@ -4169,10 +4208,10 @@ static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, u8 rssi, "igi=%d, cck_ccaTH=%d, backoff=%d, cck_PD_low=((%d))dB\n", final_rssi, cck_cca_th, under_region, pd_val); - rtw89_phy_write32_mask(rtwdev, R_BMODE_PDTH_EN_V1, - B_BMODE_PDTH_LIMIT_EN_MSK_V1, enable); - rtw89_phy_write32_mask(rtwdev, R_BMODE_PDTH_V1, - B_BMODE_PDTH_LOWER_BOUND_MSK_V1, pd_val); + rtw89_phy_write32_mask(rtwdev, dig_regs->bmode_pd_reg, + dig_regs->bmode_cca_rssi_limit_en, enable); + rtw89_phy_write32_mask(rtwdev, dig_regs->bmode_pd_lower_bound_reg, + dig_regs->bmode_rssi_nocca_low_th_mask, pd_val); } void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev) @@ -4561,7 +4600,7 @@ void rtw89_phy_tssi_ctrl_set_fast_mode_cfg(struct rtw89_dev *rtwdev, regs = rtw89_tssi_fastmode_regs_level; for (i = 0; i < RTW89_TSSI_FAST_MODE_NUM; i++) { - reg = rtw89_mac_reg_by_idx(regs[i].addr, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, regs[i].addr, mac_idx); rtw89_write32_mask(rtwdev, reg, regs[i].mask, val); } } @@ -4623,11 +4662,11 @@ void rtw89_phy_tssi_ctrl_set_bandedge_cfg(struct rtw89_dev *rtwdev, data = chip->tssi_dbw_table->data[bandedge_cfg]; for (i = 0; i < RTW89_TSSI_SBW_NUM; i++) { - reg = rtw89_mac_reg_by_idx(regs[i].addr, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, regs[i].addr, mac_idx); rtw89_write32_mask(rtwdev, reg, regs[i].mask, data[i]); } - reg = rtw89_mac_reg_by_idx(R_AX_BANDEDGE_CFG, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_BANDEDGE_CFG, mac_idx); rtw89_write32_mask(rtwdev, reg, B_AX_BANDEDGE_CFG_IDX_MASK, bandedge_cfg); rtw89_phy_tssi_ctrl_set_fast_mode_cfg(rtwdev, mac_idx, bandedge_cfg, @@ -4725,3 +4764,74 @@ void rtw89_phy_config_edcca(struct rtw89_dev *rtwdev, bool scan) rtw89_phy_write32(rtwdev, reg, hal->edcca_bak); } } + +static const struct rtw89_ccx_regs rtw89_ccx_regs_ax = { + .setting_addr = R_CCX, + .edcca_opt_mask = B_CCX_EDCCA_OPT_MSK, + .measurement_trig_mask = B_MEASUREMENT_TRIG_MSK, + .trig_opt_mask = B_CCX_TRIG_OPT_MSK, + .en_mask = B_CCX_EN_MSK, + .ifs_cnt_addr = R_IFS_COUNTER, + .ifs_clm_period_mask = B_IFS_CLM_PERIOD_MSK, + .ifs_clm_cnt_unit_mask = B_IFS_CLM_COUNTER_UNIT_MSK, + .ifs_clm_cnt_clear_mask = B_IFS_COUNTER_CLR_MSK, + .ifs_collect_en_mask = B_IFS_COLLECT_EN, + .ifs_t1_addr = R_IFS_T1, + .ifs_t1_th_h_mask = B_IFS_T1_TH_HIGH_MSK, + .ifs_t1_en_mask = B_IFS_T1_EN_MSK, + .ifs_t1_th_l_mask = B_IFS_T1_TH_LOW_MSK, + .ifs_t2_addr = R_IFS_T2, + .ifs_t2_th_h_mask = B_IFS_T2_TH_HIGH_MSK, + .ifs_t2_en_mask = B_IFS_T2_EN_MSK, + .ifs_t2_th_l_mask = B_IFS_T2_TH_LOW_MSK, + .ifs_t3_addr = R_IFS_T3, + .ifs_t3_th_h_mask = B_IFS_T3_TH_HIGH_MSK, + .ifs_t3_en_mask = B_IFS_T3_EN_MSK, + .ifs_t3_th_l_mask = B_IFS_T3_TH_LOW_MSK, + .ifs_t4_addr = R_IFS_T4, + .ifs_t4_th_h_mask = B_IFS_T4_TH_HIGH_MSK, + .ifs_t4_en_mask = B_IFS_T4_EN_MSK, + .ifs_t4_th_l_mask = B_IFS_T4_TH_LOW_MSK, + .ifs_clm_tx_cnt_addr = R_IFS_CLM_TX_CNT, + .ifs_clm_edcca_excl_cca_fa_mask = B_IFS_CLM_EDCCA_EXCLUDE_CCA_FA_MSK, + .ifs_clm_tx_cnt_msk = B_IFS_CLM_TX_CNT_MSK, + .ifs_clm_cca_addr = R_IFS_CLM_CCA, + .ifs_clm_ofdmcca_excl_fa_mask = B_IFS_CLM_OFDMCCA_EXCLUDE_FA_MSK, + .ifs_clm_cckcca_excl_fa_mask = B_IFS_CLM_CCKCCA_EXCLUDE_FA_MSK, + .ifs_clm_fa_addr = R_IFS_CLM_FA, + .ifs_clm_ofdm_fa_mask = B_IFS_CLM_OFDM_FA_MSK, + .ifs_clm_cck_fa_mask = B_IFS_CLM_CCK_FA_MSK, + .ifs_his_addr = R_IFS_HIS, + .ifs_t4_his_mask = B_IFS_T4_HIS_MSK, + .ifs_t3_his_mask = B_IFS_T3_HIS_MSK, + .ifs_t2_his_mask = B_IFS_T2_HIS_MSK, + .ifs_t1_his_mask = B_IFS_T1_HIS_MSK, + .ifs_avg_l_addr = R_IFS_AVG_L, + .ifs_t2_avg_mask = B_IFS_T2_AVG_MSK, + .ifs_t1_avg_mask = B_IFS_T1_AVG_MSK, + .ifs_avg_h_addr = R_IFS_AVG_H, + .ifs_t4_avg_mask = B_IFS_T4_AVG_MSK, + .ifs_t3_avg_mask = B_IFS_T3_AVG_MSK, + .ifs_cca_l_addr = R_IFS_CCA_L, + .ifs_t2_cca_mask = B_IFS_T2_CCA_MSK, + .ifs_t1_cca_mask = B_IFS_T1_CCA_MSK, + .ifs_cca_h_addr = R_IFS_CCA_H, + .ifs_t4_cca_mask = B_IFS_T4_CCA_MSK, + .ifs_t3_cca_mask = B_IFS_T3_CCA_MSK, + .ifs_total_addr = R_IFSCNT, + .ifs_cnt_done_mask = B_IFSCNT_DONE_MSK, + .ifs_total_mask = B_IFSCNT_TOTAL_CNT_MSK, +}; + +static const struct rtw89_physts_regs rtw89_physts_regs_ax = { + .setting_addr = R_PLCP_HISTOGRAM, + .dis_trigger_fail_mask = B_STS_DIS_TRIG_BY_FAIL, + .dis_trigger_brk_mask = B_STS_DIS_TRIG_BY_BRK, +}; + +const struct rtw89_phy_gen_def rtw89_phy_gen_ax = { + .cr_base = 0x10000, + .ccx = &rtw89_ccx_regs_ax, + .physts = &rtw89_physts_regs_ax, +}; +EXPORT_SYMBOL(rtw89_phy_gen_ax); diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h index ab174a0ba488..d6dc0cbbae43 100644 --- a/drivers/net/wireless/realtek/rtw89/phy.h +++ b/drivers/net/wireless/realtek/rtw89/phy.h @@ -7,7 +7,6 @@ #include "core.h" -#define RTW89_PHY_ADDR_OFFSET 0x10000 #define RTW89_RF_ADDR_ADSEL_MASK BIT(16) #define get_phy_headline(addr) FIELD_GET(GENMASK(31, 28), addr) @@ -337,61 +336,154 @@ struct rtw89_nbi_reg_def { struct rtw89_reg_def notch2_en; }; +struct rtw89_ccx_regs { + u32 setting_addr; + u32 edcca_opt_mask; + u32 measurement_trig_mask; + u32 trig_opt_mask; + u32 en_mask; + u32 ifs_cnt_addr; + u32 ifs_clm_period_mask; + u32 ifs_clm_cnt_unit_mask; + u32 ifs_clm_cnt_clear_mask; + u32 ifs_collect_en_mask; + u32 ifs_t1_addr; + u32 ifs_t1_th_h_mask; + u32 ifs_t1_en_mask; + u32 ifs_t1_th_l_mask; + u32 ifs_t2_addr; + u32 ifs_t2_th_h_mask; + u32 ifs_t2_en_mask; + u32 ifs_t2_th_l_mask; + u32 ifs_t3_addr; + u32 ifs_t3_th_h_mask; + u32 ifs_t3_en_mask; + u32 ifs_t3_th_l_mask; + u32 ifs_t4_addr; + u32 ifs_t4_th_h_mask; + u32 ifs_t4_en_mask; + u32 ifs_t4_th_l_mask; + u32 ifs_clm_tx_cnt_addr; + u32 ifs_clm_edcca_excl_cca_fa_mask; + u32 ifs_clm_tx_cnt_msk; + u32 ifs_clm_cca_addr; + u32 ifs_clm_ofdmcca_excl_fa_mask; + u32 ifs_clm_cckcca_excl_fa_mask; + u32 ifs_clm_fa_addr; + u32 ifs_clm_ofdm_fa_mask; + u32 ifs_clm_cck_fa_mask; + u32 ifs_his_addr; + u32 ifs_t4_his_mask; + u32 ifs_t3_his_mask; + u32 ifs_t2_his_mask; + u32 ifs_t1_his_mask; + u32 ifs_avg_l_addr; + u32 ifs_t2_avg_mask; + u32 ifs_t1_avg_mask; + u32 ifs_avg_h_addr; + u32 ifs_t4_avg_mask; + u32 ifs_t3_avg_mask; + u32 ifs_cca_l_addr; + u32 ifs_t2_cca_mask; + u32 ifs_t1_cca_mask; + u32 ifs_cca_h_addr; + u32 ifs_t4_cca_mask; + u32 ifs_t3_cca_mask; + u32 ifs_total_addr; + u32 ifs_cnt_done_mask; + u32 ifs_total_mask; +}; + +struct rtw89_physts_regs { + u32 setting_addr; + u32 dis_trigger_fail_mask; + u32 dis_trigger_brk_mask; +}; + +struct rtw89_phy_gen_def { + u32 cr_base; + const struct rtw89_ccx_regs *ccx; + const struct rtw89_physts_regs *physts; +}; + +extern const struct rtw89_phy_gen_def rtw89_phy_gen_ax; +extern const struct rtw89_phy_gen_def rtw89_phy_gen_be; + static inline void rtw89_phy_write8(struct rtw89_dev *rtwdev, u32 addr, u8 data) { - rtw89_write8(rtwdev, addr | RTW89_PHY_ADDR_OFFSET, data); + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; + + rtw89_write8(rtwdev, addr + phy->cr_base, data); } static inline void rtw89_phy_write16(struct rtw89_dev *rtwdev, u32 addr, u16 data) { - rtw89_write16(rtwdev, addr | RTW89_PHY_ADDR_OFFSET, data); + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; + + rtw89_write16(rtwdev, addr + phy->cr_base, data); } static inline void rtw89_phy_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data) { - rtw89_write32(rtwdev, addr | RTW89_PHY_ADDR_OFFSET, data); + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; + + rtw89_write32(rtwdev, addr + phy->cr_base, data); } static inline void rtw89_phy_write32_set(struct rtw89_dev *rtwdev, u32 addr, u32 bits) { - rtw89_write32_set(rtwdev, addr | RTW89_PHY_ADDR_OFFSET, bits); + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; + + rtw89_write32_set(rtwdev, addr + phy->cr_base, bits); } static inline void rtw89_phy_write32_clr(struct rtw89_dev *rtwdev, u32 addr, u32 bits) { - rtw89_write32_clr(rtwdev, addr | RTW89_PHY_ADDR_OFFSET, bits); + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; + + rtw89_write32_clr(rtwdev, addr + phy->cr_base, bits); } static inline void rtw89_phy_write32_mask(struct rtw89_dev *rtwdev, u32 addr, u32 mask, u32 data) { - rtw89_write32_mask(rtwdev, addr | RTW89_PHY_ADDR_OFFSET, mask, data); + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; + + rtw89_write32_mask(rtwdev, addr + phy->cr_base, mask, data); } static inline u8 rtw89_phy_read8(struct rtw89_dev *rtwdev, u32 addr) { - return rtw89_read8(rtwdev, addr | RTW89_PHY_ADDR_OFFSET); + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; + + return rtw89_read8(rtwdev, addr + phy->cr_base); } static inline u16 rtw89_phy_read16(struct rtw89_dev *rtwdev, u32 addr) { - return rtw89_read16(rtwdev, addr | RTW89_PHY_ADDR_OFFSET); + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; + + return rtw89_read16(rtwdev, addr + phy->cr_base); } static inline u32 rtw89_phy_read32(struct rtw89_dev *rtwdev, u32 addr) { - return rtw89_read32(rtwdev, addr | RTW89_PHY_ADDR_OFFSET); + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; + + return rtw89_read32(rtwdev, addr + phy->cr_base); } static inline u32 rtw89_phy_read32_mask(struct rtw89_dev *rtwdev, u32 addr, u32 mask) { - return rtw89_read32_mask(rtwdev, addr | RTW89_PHY_ADDR_OFFSET, mask); + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; + + return rtw89_read32_mask(rtwdev, addr + phy->cr_base, mask); } static inline diff --git a/drivers/net/wireless/realtek/rtw89/phy_be.c b/drivers/net/wireless/realtek/rtw89/phy_be.c new file mode 100644 index 000000000000..778e4b0c8e87 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/phy_be.c @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2023 Realtek Corporation + */ + +#include "phy.h" +#include "reg.h" + +static const struct rtw89_ccx_regs rtw89_ccx_regs_be = { + .setting_addr = R_CCX, + .edcca_opt_mask = B_CCX_EDCCA_OPT_MSK_V1, + .measurement_trig_mask = B_MEASUREMENT_TRIG_MSK, + .trig_opt_mask = B_CCX_TRIG_OPT_MSK, + .en_mask = B_CCX_EN_MSK, + .ifs_cnt_addr = R_IFS_COUNTER, + .ifs_clm_period_mask = B_IFS_CLM_PERIOD_MSK, + .ifs_clm_cnt_unit_mask = B_IFS_CLM_COUNTER_UNIT_MSK, + .ifs_clm_cnt_clear_mask = B_IFS_COUNTER_CLR_MSK, + .ifs_collect_en_mask = B_IFS_COLLECT_EN, + .ifs_t1_addr = R_IFS_T1, + .ifs_t1_th_h_mask = B_IFS_T1_TH_HIGH_MSK, + .ifs_t1_en_mask = B_IFS_T1_EN_MSK, + .ifs_t1_th_l_mask = B_IFS_T1_TH_LOW_MSK, + .ifs_t2_addr = R_IFS_T2, + .ifs_t2_th_h_mask = B_IFS_T2_TH_HIGH_MSK, + .ifs_t2_en_mask = B_IFS_T2_EN_MSK, + .ifs_t2_th_l_mask = B_IFS_T2_TH_LOW_MSK, + .ifs_t3_addr = R_IFS_T3, + .ifs_t3_th_h_mask = B_IFS_T3_TH_HIGH_MSK, + .ifs_t3_en_mask = B_IFS_T3_EN_MSK, + .ifs_t3_th_l_mask = B_IFS_T3_TH_LOW_MSK, + .ifs_t4_addr = R_IFS_T4, + .ifs_t4_th_h_mask = B_IFS_T4_TH_HIGH_MSK, + .ifs_t4_en_mask = B_IFS_T4_EN_MSK, + .ifs_t4_th_l_mask = B_IFS_T4_TH_LOW_MSK, + .ifs_clm_tx_cnt_addr = R_IFS_CLM_TX_CNT_V1, + .ifs_clm_edcca_excl_cca_fa_mask = B_IFS_CLM_EDCCA_EXCLUDE_CCA_FA_MSK, + .ifs_clm_tx_cnt_msk = B_IFS_CLM_TX_CNT_MSK, + .ifs_clm_cca_addr = R_IFS_CLM_CCA_V1, + .ifs_clm_ofdmcca_excl_fa_mask = B_IFS_CLM_OFDMCCA_EXCLUDE_FA_MSK, + .ifs_clm_cckcca_excl_fa_mask = B_IFS_CLM_CCKCCA_EXCLUDE_FA_MSK, + .ifs_clm_fa_addr = R_IFS_CLM_FA_V1, + .ifs_clm_ofdm_fa_mask = B_IFS_CLM_OFDM_FA_MSK, + .ifs_clm_cck_fa_mask = B_IFS_CLM_CCK_FA_MSK, + .ifs_his_addr = R_IFS_HIS_V1, + .ifs_t4_his_mask = B_IFS_T4_HIS_MSK, + .ifs_t3_his_mask = B_IFS_T3_HIS_MSK, + .ifs_t2_his_mask = B_IFS_T2_HIS_MSK, + .ifs_t1_his_mask = B_IFS_T1_HIS_MSK, + .ifs_avg_l_addr = R_IFS_AVG_L_V1, + .ifs_t2_avg_mask = B_IFS_T2_AVG_MSK, + .ifs_t1_avg_mask = B_IFS_T1_AVG_MSK, + .ifs_avg_h_addr = R_IFS_AVG_H_V1, + .ifs_t4_avg_mask = B_IFS_T4_AVG_MSK, + .ifs_t3_avg_mask = B_IFS_T3_AVG_MSK, + .ifs_cca_l_addr = R_IFS_CCA_L_V1, + .ifs_t2_cca_mask = B_IFS_T2_CCA_MSK, + .ifs_t1_cca_mask = B_IFS_T1_CCA_MSK, + .ifs_cca_h_addr = R_IFS_CCA_H_V1, + .ifs_t4_cca_mask = B_IFS_T4_CCA_MSK, + .ifs_t3_cca_mask = B_IFS_T3_CCA_MSK, + .ifs_total_addr = R_IFSCNT_V1, + .ifs_cnt_done_mask = B_IFSCNT_DONE_MSK, + .ifs_total_mask = B_IFSCNT_TOTAL_CNT_MSK, +}; + +static const struct rtw89_physts_regs rtw89_physts_regs_be = { + .setting_addr = R_PLCP_HISTOGRAM, + .dis_trigger_fail_mask = B_STS_DIS_TRIG_BY_FAIL, + .dis_trigger_brk_mask = B_STS_DIS_TRIG_BY_BRK, +}; + +const struct rtw89_phy_gen_def rtw89_phy_gen_be = { + .cr_base = 0x20000, + .ccx = &rtw89_ccx_regs_be, + .physts = &rtw89_physts_regs_be, +}; +EXPORT_SYMBOL(rtw89_phy_gen_be); diff --git a/drivers/net/wireless/realtek/rtw89/ps.c b/drivers/net/wireless/realtek/rtw89/ps.c index 84201ef19c17..917c01e5e9ed 100644 --- a/drivers/net/wireless/realtek/rtw89/ps.c +++ b/drivers/net/wireless/realtek/rtw89/ps.c @@ -2,6 +2,7 @@ /* Copyright(c) 2019-2020 Realtek Corporation */ +#include "chan.h" #include "coex.h" #include "core.h" #include "debug.h" @@ -257,8 +258,13 @@ void rtw89_recalc_lps(struct rtw89_dev *rtwdev) { struct ieee80211_vif *vif, *found_vif = NULL; struct rtw89_vif *rtwvif; + enum rtw89_entity_mode mode; int count = 0; + mode = rtw89_get_entity_mode(rtwdev); + if (mode == RTW89_ENTITY_MODE_MCC) + goto disable_lps; + rtw89_for_each_rtwvif(rtwdev, rtwvif) { vif = rtwvif_to_vif(rtwvif); @@ -273,8 +279,71 @@ void rtw89_recalc_lps(struct rtw89_dev *rtwdev) if (count == 1 && found_vif->cfg.ps) { rtwdev->lps_enabled = true; - } else { - rtw89_leave_lps(rtwdev); - rtwdev->lps_enabled = false; + return; } + +disable_lps: + rtw89_leave_lps(rtwdev); + rtwdev->lps_enabled = false; +} + +void rtw89_p2p_noa_renew(struct rtw89_vif *rtwvif) +{ + struct rtw89_p2p_noa_setter *setter = &rtwvif->p2p_noa; + struct rtw89_p2p_noa_ie *ie = &setter->ie; + struct rtw89_p2p_ie_head *p2p_head = &ie->p2p_head; + struct rtw89_noa_attr_head *noa_head = &ie->noa_head; + + if (setter->noa_count) { + setter->noa_index++; + setter->noa_count = 0; + } + + memset(ie, 0, sizeof(*ie)); + + p2p_head->eid = WLAN_EID_VENDOR_SPECIFIC; + p2p_head->ie_len = 4 + sizeof(*noa_head); + p2p_head->oui[0] = (WLAN_OUI_WFA >> 16) & 0xff; + p2p_head->oui[1] = (WLAN_OUI_WFA >> 8) & 0xff; + p2p_head->oui[2] = (WLAN_OUI_WFA >> 0) & 0xff; + p2p_head->oui_type = WLAN_OUI_TYPE_WFA_P2P; + + noa_head->attr_type = IEEE80211_P2P_ATTR_ABSENCE_NOTICE; + noa_head->attr_len = cpu_to_le16(2); + noa_head->index = setter->noa_index; + noa_head->oppps_ctwindow = 0; +} + +void rtw89_p2p_noa_append(struct rtw89_vif *rtwvif, + const struct ieee80211_p2p_noa_desc *desc) +{ + struct rtw89_p2p_noa_setter *setter = &rtwvif->p2p_noa; + struct rtw89_p2p_noa_ie *ie = &setter->ie; + struct rtw89_p2p_ie_head *p2p_head = &ie->p2p_head; + struct rtw89_noa_attr_head *noa_head = &ie->noa_head; + + if (!desc->count || !desc->duration) + return; + + if (setter->noa_count >= RTW89_P2P_MAX_NOA_NUM) + return; + + p2p_head->ie_len += sizeof(*desc); + le16_add_cpu(&noa_head->attr_len, sizeof(*desc)); + + ie->noa_desc[setter->noa_count++] = *desc; +} + +u8 rtw89_p2p_noa_fetch(struct rtw89_vif *rtwvif, void **data) +{ + struct rtw89_p2p_noa_setter *setter = &rtwvif->p2p_noa; + struct rtw89_p2p_noa_ie *ie = &setter->ie; + void *tail; + + if (!setter->noa_count) + return 0; + + *data = ie; + tail = ie->noa_desc + setter->noa_count; + return tail - *data; } diff --git a/drivers/net/wireless/realtek/rtw89/ps.h b/drivers/net/wireless/realtek/rtw89/ps.h index 4c18f49204b2..aff0fba71cb0 100644 --- a/drivers/net/wireless/realtek/rtw89/ps.h +++ b/drivers/net/wireless/realtek/rtw89/ps.h @@ -16,6 +16,10 @@ void rtw89_leave_ips(struct rtw89_dev *rtwdev); void rtw89_set_coex_ctrl_lps(struct rtw89_dev *rtwdev, bool btc_ctrl); void rtw89_process_p2p_ps(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif); void rtw89_recalc_lps(struct rtw89_dev *rtwdev); +void rtw89_p2p_noa_renew(struct rtw89_vif *rtwvif); +void rtw89_p2p_noa_append(struct rtw89_vif *rtwvif, + const struct ieee80211_p2p_noa_desc *desc); +u8 rtw89_p2p_noa_fetch(struct rtw89_vif *rtwvif, void **data); static inline void rtw89_leave_ips_by_hwflags(struct rtw89_dev *rtwdev) { diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index 55595fde7494..c0aac4d3678a 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -3625,6 +3625,27 @@ #define B_AX_GNT_BT_TX_SW_VAL BIT(1) #define B_AX_GNT_BT_TX_SW_CTRL BIT(0) +#define R_BE_FILTER_MODEL_ADDR 0x0C04 + +#define R_BE_RX_FLTR_OPT 0x11420 +#define R_BE_RX_FLTR_OPT_C1 0x15420 +#define B_BE_UID_FILTER_MASK GENMASK(31, 24) +#define B_BE_UNSPT_TYPE BIT(22) +#define B_BE_RX_MPDU_MAX_LEN_MASK GENMASK(21, 16) +#define B_BE_A_FTM_REQ BIT(14) +#define B_BE_A_ERR_PKT BIT(13) +#define B_BE_A_UNSUP_PKT BIT(12) +#define B_BE_A_CRC32_ERR BIT(11) +#define B_BE_A_BCN_CHK_RULE_MASK GENMASK(9, 8) +#define B_BE_A_BCN_CHK_EN BIT(7) +#define B_BE_A_MC_LIST_CAM_MATCH BIT(6) +#define B_BE_A_BC_CAM_MATCH BIT(5) +#define B_BE_A_UC_CAM_MATCH BIT(4) +#define B_BE_A_MC BIT(3) +#define B_BE_A_BC BIT(2) +#define B_BE_A_A1_MATCH BIT(1) +#define B_BE_SNIFFER_MODE BIT(0) + #define RR_MOD 0x00 #define RR_MOD_V1 0x10000 #define RR_MOD_IQK GENMASK(19, 4) @@ -3977,6 +3998,7 @@ #define B_DBCC_80P80_SEL_EVM_RPT_EN BIT(0) #define R_CCX 0x0C00 #define B_CCX_EDCCA_OPT_MSK GENMASK(6, 4) +#define B_CCX_EDCCA_OPT_MSK_V1 GENMASK(7, 4) #define B_MEASUREMENT_TRIG_MSK BIT(2) #define B_CCX_TRIG_OPT_MSK BIT(1) #define B_CCX_EN_MSK BIT(0) @@ -4068,32 +4090,41 @@ #define B_SWSI_R_DATA_DONE_V1 BIT(26) #define R_TX_COUNTER 0x1A40 #define R_IFS_CLM_TX_CNT 0x1ACC +#define R_IFS_CLM_TX_CNT_V1 0x0ECC #define B_IFS_CLM_EDCCA_EXCLUDE_CCA_FA_MSK GENMASK(31, 16) #define B_IFS_CLM_TX_CNT_MSK GENMASK(15, 0) #define R_IFS_CLM_CCA 0x1AD0 +#define R_IFS_CLM_CCA_V1 0x0ED0 #define B_IFS_CLM_OFDMCCA_EXCLUDE_FA_MSK GENMASK(31, 16) #define B_IFS_CLM_CCKCCA_EXCLUDE_FA_MSK GENMASK(15, 0) #define R_IFS_CLM_FA 0x1AD4 +#define R_IFS_CLM_FA_V1 0x0ED4 #define B_IFS_CLM_OFDM_FA_MSK GENMASK(31, 16) #define B_IFS_CLM_CCK_FA_MSK GENMASK(15, 0) #define R_IFS_HIS 0x1AD8 +#define R_IFS_HIS_V1 0x0ED8 #define B_IFS_T4_HIS_MSK GENMASK(31, 24) #define B_IFS_T3_HIS_MSK GENMASK(23, 16) #define B_IFS_T2_HIS_MSK GENMASK(15, 8) #define B_IFS_T1_HIS_MSK GENMASK(7, 0) #define R_IFS_AVG_L 0x1ADC +#define R_IFS_AVG_L_V1 0x0EDC #define B_IFS_T2_AVG_MSK GENMASK(31, 16) #define B_IFS_T1_AVG_MSK GENMASK(15, 0) #define R_IFS_AVG_H 0x1AE0 +#define R_IFS_AVG_H_V1 0x0EE0 #define B_IFS_T4_AVG_MSK GENMASK(31, 16) #define B_IFS_T3_AVG_MSK GENMASK(15, 0) #define R_IFS_CCA_L 0x1AE4 +#define R_IFS_CCA_L_V1 0x0EE4 #define B_IFS_T2_CCA_MSK GENMASK(31, 16) #define B_IFS_T1_CCA_MSK GENMASK(15, 0) #define R_IFS_CCA_H 0x1AE8 +#define R_IFS_CCA_H_V1 0x0EE8 #define B_IFS_T4_CCA_MSK GENMASK(31, 16) #define B_IFS_T3_CCA_MSK GENMASK(15, 0) #define R_IFSCNT 0x1AEC +#define R_IFSCNT_V1 0x0EEC #define B_IFSCNT_DONE_MSK BIT(16) #define B_IFSCNT_TOTAL_CNT_MSK GENMASK(15, 0) #define R_TXAGC_TP 0x1C04 @@ -4109,6 +4140,8 @@ #define B_TXAGC_BB_OFT GENMASK(31, 16) #define B_TXAGC_BB GENMASK(31, 24) #define B_TXAGC_RF GENMASK(5, 0) +#define R_PATH0_TXPWR 0x1C78 +#define B_PATH0_TXPWR GENMASK(8, 0) #define R_S0_ADDCK 0x1E00 #define B_S0_ADDCK_I GENMASK(9, 0) #define B_S0_ADDCK_Q GENMASK(19, 10) @@ -4184,6 +4217,8 @@ #define R_TXAGC_BB_S1 0x3C60 #define B_TXAGC_BB_S1_OFT GENMASK(31, 16) #define B_TXAGC_BB_S1 GENMASK(31, 24) +#define R_PATH1_TXPWR 0x3C78 +#define B_PATH1_TXPWR GENMASK(8, 0) #define R_S1_ADDCK 0x3E00 #define B_S1_ADDCK_I GENMASK(9, 0) #define B_S1_ADDCK_Q GENMASK(19, 10) @@ -4360,6 +4395,7 @@ #define B_PKT_POP_EN BIT(8) #define R_SEG0R_PD 0x481C #define R_SEG0R_PD_V1 0x4860 +#define R_SEG0R_PD_V2 0x6A74 #define R_SEG0R_EDCCA_LVL 0x4840 #define R_SEG0R_EDCCA_LVL_V1 0x4884 #define B_SEG0R_PPDU_LVL_MSK GENMASK(31, 24) @@ -4478,8 +4514,10 @@ #define R_DCFO_COMP_S0_V1 0x4A40 #define B_DCFO_COMP_S0_V1_MSK GENMASK(13, 0) #define R_BMODE_PDTH_V1 0x4B64 +#define R_BMODE_PDTH_V2 0x6708 #define B_BMODE_PDTH_LOWER_BOUND_MSK_V1 GENMASK(31, 24) #define R_BMODE_PDTH_EN_V1 0x4B74 +#define R_BMODE_PDTH_EN_V2 0x6718 #define B_BMODE_PDTH_LIMIT_EN_MSK_V1 BIT(30) #define R_CFO_COMP_SEG1_L 0x5384 #define R_CFO_COMP_SEG1_H 0x5388 diff --git a/drivers/net/wireless/realtek/rtw89/regd.c b/drivers/net/wireless/realtek/rtw89/regd.c index 34c4d40cfa02..9e2328db1865 100644 --- a/drivers/net/wireless/realtek/rtw89/regd.c +++ b/drivers/net/wireless/realtek/rtw89/regd.c @@ -13,10 +13,10 @@ } static const struct rtw89_regd rtw89_ww_regd = - COUNTRY_REGD("00", RTW89_WW, RTW89_WW); + COUNTRY_REGD("00", RTW89_WW, RTW89_WW, RTW89_WW); static const struct rtw89_regd rtw89_regd_map[] = { - COUNTRY_REGD("AR", RTW89_MEXICO, RTW89_MEXICO, RTW89_NA), + COUNTRY_REGD("AR", RTW89_MEXICO, RTW89_MEXICO, RTW89_FCC), COUNTRY_REGD("BO", RTW89_FCC, RTW89_FCC, RTW89_FCC), COUNTRY_REGD("BR", RTW89_FCC, RTW89_FCC, RTW89_FCC), COUNTRY_REGD("CL", RTW89_CHILE, RTW89_CHILE, RTW89_CHILE), @@ -26,7 +26,7 @@ static const struct rtw89_regd rtw89_regd_map[] = { COUNTRY_REGD("SV", RTW89_FCC, RTW89_FCC, RTW89_FCC), COUNTRY_REGD("GT", RTW89_FCC, RTW89_FCC, RTW89_FCC), COUNTRY_REGD("HN", RTW89_FCC, RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("MX", RTW89_MEXICO, RTW89_MEXICO, RTW89_NA), + COUNTRY_REGD("MX", RTW89_MEXICO, RTW89_MEXICO, RTW89_FCC), COUNTRY_REGD("NI", RTW89_FCC, RTW89_FCC, RTW89_NA), COUNTRY_REGD("PA", RTW89_FCC, RTW89_FCC, RTW89_NA), COUNTRY_REGD("PY", RTW89_FCC, RTW89_FCC, RTW89_NA), @@ -81,7 +81,7 @@ static const struct rtw89_regd rtw89_regd_map[] = { COUNTRY_REGD("KE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("KW", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("KG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("LB", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("LB", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("LS", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("MK", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("MA", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), @@ -96,7 +96,7 @@ static const struct rtw89_regd rtw89_regd_map[] = { COUNTRY_REGD("SN", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("RS", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("ME", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("ZA", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("ZA", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("TR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("UA", RTW89_UKRAINE, RTW89_UKRAINE, RTW89_UKRAINE), COUNTRY_REGD("AE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), @@ -115,7 +115,7 @@ static const struct rtw89_regd rtw89_regd_map[] = { COUNTRY_REGD("SG", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("LK", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("TW", RTW89_FCC, RTW89_FCC, RTW89_NA), - COUNTRY_REGD("TH", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("TH", RTW89_WW, RTW89_WW, RTW89_WW), COUNTRY_REGD("VN", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("AU", RTW89_ACMA, RTW89_ACMA, RTW89_ACMA), COUNTRY_REGD("NZ", RTW89_ACMA, RTW89_ACMA, RTW89_ACMA), @@ -148,7 +148,7 @@ static const struct rtw89_regd rtw89_regd_map[] = { COUNTRY_REGD("IO", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("VG", RTW89_FCC, RTW89_FCC, RTW89_FCC), COUNTRY_REGD("BN", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("BF", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("BF", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("MM", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("BI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("CM", RTW89_ETSI, RTW89_ETSI, RTW89_NA), @@ -164,7 +164,7 @@ static const struct rtw89_regd rtw89_regd_map[] = { COUNTRY_REGD("CK", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("CI", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("DJ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("DM", RTW89_FCC, RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("DM", RTW89_FCC, RTW89_FCC, RTW89_NA), COUNTRY_REGD("GQ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("ER", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("ET", RTW89_ETSI, RTW89_ETSI, RTW89_NA), @@ -179,20 +179,21 @@ static const struct rtw89_regd rtw89_regd_map[] = { COUNTRY_REGD("GE", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("GI", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("GL", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("GD", RTW89_FCC, RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("GD", RTW89_FCC, RTW89_FCC, RTW89_NA), COUNTRY_REGD("GP", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("GU", RTW89_FCC, RTW89_FCC, RTW89_NA), COUNTRY_REGD("GG", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("GN", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("GW", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("GY", RTW89_FCC, RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("HT", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("GY", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("HT", RTW89_FCC, RTW89_FCC, RTW89_FCC), COUNTRY_REGD("HM", RTW89_ACMA, RTW89_ACMA, RTW89_NA), COUNTRY_REGD("VA", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("IM", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("JE", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("KI", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("LA", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("XK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("LA", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("LR", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("LY", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("MO", RTW89_ETSI, RTW89_ETSI, RTW89_NA), @@ -207,7 +208,7 @@ static const struct rtw89_regd rtw89_regd_map[] = { COUNTRY_REGD("YT", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("FM", RTW89_FCC, RTW89_FCC, RTW89_NA), COUNTRY_REGD("MD", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("MN", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("MN", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("MS", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("NR", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("NP", RTW89_ETSI, RTW89_ETSI, RTW89_NA), diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b.c b/drivers/net/wireless/realtek/rtw89/rtw8851b.c index b5740639e267..103893f28b51 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8851b.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8851b.c @@ -185,6 +185,10 @@ static const struct rtw89_dig_regs rtw8851b_dig_regs = { .seg0_pd_reg = R_SEG0R_PD_V1, .pd_lower_bound_mask = B_SEG0R_PD_LOWER_BOUND_MSK, .pd_spatial_reuse_en = B_SEG0R_PD_SPATIAL_REUSE_EN_MSK_V1, + .bmode_pd_reg = R_BMODE_PDTH_EN_V1, + .bmode_cca_rssi_limit_en = B_BMODE_PDTH_LIMIT_EN_MSK_V1, + .bmode_pd_lower_bound_reg = R_BMODE_PDTH_V1, + .bmode_rssi_nocca_low_th_mask = B_BMODE_PDTH_LOWER_BOUND_MSK_V1, .p0_lna_init = {R_PATH0_LNA_INIT_V1, B_PATH0_LNA_INIT_IDX_MSK}, .p1_lna_init = {R_PATH1_LNA_INIT_V1, B_PATH1_LNA_INIT_IDX_MSK}, .p0_tia_init = {R_PATH0_TIA_INIT_V1, B_PATH0_TIA_INIT_IDX_MSK_V1}, @@ -756,9 +760,9 @@ static void rtw8851b_set_channel_mac(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, u8 mac_idx) { - u32 sub_carr = rtw89_mac_reg_by_idx(R_AX_TX_SUB_CARRIER_VALUE, mac_idx); - u32 chk_rate = rtw89_mac_reg_by_idx(R_AX_TXRATE_CHK, mac_idx); - u32 rf_mod = rtw89_mac_reg_by_idx(R_AX_WMAC_RFMOD, mac_idx); + u32 sub_carr = rtw89_mac_reg_by_idx(rtwdev, R_AX_TX_SUB_CARRIER_VALUE, mac_idx); + u32 chk_rate = rtw89_mac_reg_by_idx(rtwdev, R_AX_TXRATE_CHK, mac_idx); + u32 rf_mod = rtw89_mac_reg_by_idx(rtwdev, R_AX_WMAC_RFMOD, mac_idx); u8 txsc20 = 0, txsc40 = 0; switch (chan->band_width) { @@ -1740,14 +1744,14 @@ void rtw8851b_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev, return; } - reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_CTRL, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_CTRL, mac_idx); rtw89_write32_set(rtwdev, reg, B_AX_PWR_UL_TB_CTRL_EN); - reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_1T, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_1T, mac_idx); rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_1T_MASK, pw_ofst); pw_ofst = max_t(s8, pw_ofst - 3, -16); - reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_2T, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_2T, mac_idx); rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_2T_MASK, pw_ofst); } @@ -2336,6 +2340,8 @@ const struct rtw89_chip_info rtw8851b_chip_info = { .chip_id = RTL8851B, .chip_gen = RTW89_CHIP_AX, .ops = &rtw8851b_chip_ops, + .mac_def = &rtw89_mac_gen_ax, + .phy_def = &rtw89_phy_gen_ax, .fw_basename = RTW8851B_FW_BASENAME, .fw_format_max = RTW8851B_FW_FORMAT_MAX, .try_ce_fw = true, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c index 41f05276406d..d068eae6a2f0 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c @@ -478,6 +478,10 @@ static const struct rtw89_dig_regs rtw8852a_dig_regs = { .seg0_pd_reg = R_SEG0R_PD, .pd_lower_bound_mask = B_SEG0R_PD_LOWER_BOUND_MSK, .pd_spatial_reuse_en = B_SEG0R_PD_SPATIAL_REUSE_EN_MSK, + .bmode_pd_reg = R_BMODE_PDTH_EN_V1, + .bmode_cca_rssi_limit_en = B_BMODE_PDTH_LIMIT_EN_MSK_V1, + .bmode_pd_lower_bound_reg = R_BMODE_PDTH_V1, + .bmode_rssi_nocca_low_th_mask = B_BMODE_PDTH_LOWER_BOUND_MSK_V1, .p0_lna_init = {R_PATH0_LNA_INIT, B_PATH0_LNA_INIT_IDX_MSK}, .p1_lna_init = {R_PATH1_LNA_INIT, B_PATH1_LNA_INIT_IDX_MSK}, .p0_tia_init = {R_PATH0_TIA_INIT, B_PATH0_TIA_INIT_IDX_MSK}, @@ -704,10 +708,9 @@ static void rtw8852a_set_channel_mac(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, u8 mac_idx) { - u32 rf_mod = rtw89_mac_reg_by_idx(R_AX_WMAC_RFMOD, mac_idx); - u32 sub_carr = rtw89_mac_reg_by_idx(R_AX_TX_SUB_CARRIER_VALUE, - mac_idx); - u32 chk_rate = rtw89_mac_reg_by_idx(R_AX_TXRATE_CHK, mac_idx); + u32 rf_mod = rtw89_mac_reg_by_idx(rtwdev, R_AX_WMAC_RFMOD, mac_idx); + u32 sub_carr = rtw89_mac_reg_by_idx(rtwdev, R_AX_TX_SUB_CARRIER_VALUE, mac_idx); + u32 chk_rate = rtw89_mac_reg_by_idx(rtwdev, R_AX_TXRATE_CHK, mac_idx); u8 txsc20 = 0, txsc40 = 0; switch (chan->band_width) { @@ -1380,13 +1383,13 @@ void rtw8852a_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev, pw_ofst); return; } - reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_CTRL, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_CTRL, mac_idx); rtw89_write32_set(rtwdev, reg, B_AX_PWR_UL_TB_CTRL_EN); val_1t = pw_ofst; - reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_1T, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_1T, mac_idx); rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_1T_MASK, val_1t); val_2t = max(val_1t - 3, -16); - reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_2T, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_2T, mac_idx); rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_2T_MASK, val_2t); rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[ULTB] Set TB pwr_offset=(%d, %d)\n", val_1t, val_2t); @@ -2073,6 +2076,8 @@ const struct rtw89_chip_info rtw8852a_chip_info = { .chip_id = RTL8852A, .chip_gen = RTW89_CHIP_AX, .ops = &rtw8852a_chip_ops, + .mac_def = &rtw89_mac_gen_ax, + .phy_def = &rtw89_phy_gen_ax, .fw_basename = RTW8852A_FW_BASENAME, .fw_format_max = RTW8852A_FW_FORMAT_MAX, .try_ce_fw = false, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c index eb2210cb7e09..0063301952b3 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c @@ -310,6 +310,10 @@ static const struct rtw89_dig_regs rtw8852b_dig_regs = { .seg0_pd_reg = R_SEG0R_PD_V1, .pd_lower_bound_mask = B_SEG0R_PD_LOWER_BOUND_MSK, .pd_spatial_reuse_en = B_SEG0R_PD_SPATIAL_REUSE_EN_MSK_V1, + .bmode_pd_reg = R_BMODE_PDTH_EN_V1, + .bmode_cca_rssi_limit_en = B_BMODE_PDTH_LIMIT_EN_MSK_V1, + .bmode_pd_lower_bound_reg = R_BMODE_PDTH_V1, + .bmode_rssi_nocca_low_th_mask = B_BMODE_PDTH_LOWER_BOUND_MSK_V1, .p0_lna_init = {R_PATH0_LNA_INIT_V1, B_PATH0_LNA_INIT_IDX_MSK}, .p1_lna_init = {R_PATH1_LNA_INIT_V1, B_PATH1_LNA_INIT_IDX_MSK}, .p0_tia_init = {R_PATH0_TIA_INIT_V1, B_PATH0_TIA_INIT_IDX_MSK_V1}, @@ -843,9 +847,9 @@ static void rtw8852b_set_channel_mac(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, u8 mac_idx) { - u32 rf_mod = rtw89_mac_reg_by_idx(R_AX_WMAC_RFMOD, mac_idx); - u32 sub_carr = rtw89_mac_reg_by_idx(R_AX_TX_SUB_CARRIER_VALUE, mac_idx); - u32 chk_rate = rtw89_mac_reg_by_idx(R_AX_TXRATE_CHK, mac_idx); + u32 rf_mod = rtw89_mac_reg_by_idx(rtwdev, R_AX_WMAC_RFMOD, mac_idx); + u32 sub_carr = rtw89_mac_reg_by_idx(rtwdev, R_AX_TX_SUB_CARRIER_VALUE, mac_idx); + u32 chk_rate = rtw89_mac_reg_by_idx(rtwdev, R_AX_TXRATE_CHK, mac_idx); u8 txsc20 = 0, txsc40 = 0; switch (chan->band_width) { @@ -1725,14 +1729,14 @@ void rtw8852b_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev, return; } - reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_CTRL, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_CTRL, mac_idx); rtw89_write32_set(rtwdev, reg, B_AX_PWR_UL_TB_CTRL_EN); - reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_1T, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_1T, mac_idx); rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_1T_MASK, pw_ofst); pw_ofst = max_t(s8, pw_ofst - 3, -16); - reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_2T, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_2T, mac_idx); rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_2T_MASK, pw_ofst); } @@ -2505,6 +2509,8 @@ const struct rtw89_chip_info rtw8852b_chip_info = { .chip_id = RTL8852B, .chip_gen = RTW89_CHIP_AX, .ops = &rtw8852b_chip_ops, + .mac_def = &rtw89_mac_gen_ax, + .phy_def = &rtw89_phy_gen_ax, .fw_basename = RTW8852B_FW_BASENAME, .fw_format_max = RTW8852B_FW_FORMAT_MAX, .try_ce_fw = true, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c index fa018e1f499b..259df67836a0 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c @@ -846,7 +846,7 @@ static bool _iqk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, case ID_NBTXK: rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0); rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x011); - iqk_cmd = 0x308 | (1 << (4 + path)); + iqk_cmd = 0x408 | (1 << (4 + path)); break; case ID_NBRXK: rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); @@ -1078,7 +1078,7 @@ static bool _iqk_nbtxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 { struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; bool kfail; - u8 gp = 0x3; + u8 gp = 0x2; switch (iqk_info->iqk_band[path]) { case RTW89_BAND_2G: diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index b1af72fbf085..1e16cc0a05dc 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -146,6 +146,10 @@ static const struct rtw89_dig_regs rtw8852c_dig_regs = { .seg0_pd_reg = R_SEG0R_PD, .pd_lower_bound_mask = B_SEG0R_PD_LOWER_BOUND_MSK, .pd_spatial_reuse_en = B_SEG0R_PD_SPATIAL_REUSE_EN_MSK, + .bmode_pd_reg = R_BMODE_PDTH_EN_V1, + .bmode_cca_rssi_limit_en = B_BMODE_PDTH_LIMIT_EN_MSK_V1, + .bmode_pd_lower_bound_reg = R_BMODE_PDTH_V1, + .bmode_rssi_nocca_low_th_mask = B_BMODE_PDTH_LOWER_BOUND_MSK_V1, .p0_lna_init = {R_PATH0_LNA_INIT_V1, B_PATH0_LNA_INIT_IDX_MSK}, .p1_lna_init = {R_PATH1_LNA_INIT_V1, B_PATH1_LNA_INIT_IDX_MSK}, .p0_tia_init = {R_PATH0_TIA_INIT_V1, B_PATH0_TIA_INIT_IDX_MSK_V1}, @@ -606,10 +610,9 @@ static void rtw8852c_set_channel_mac(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, u8 mac_idx) { - u32 rf_mod = rtw89_mac_reg_by_idx(R_AX_WMAC_RFMOD, mac_idx); - u32 sub_carr = rtw89_mac_reg_by_idx(R_AX_TX_SUB_CARRIER_VALUE, - mac_idx); - u32 chk_rate = rtw89_mac_reg_by_idx(R_AX_TXRATE_CHK, mac_idx); + u32 rf_mod = rtw89_mac_reg_by_idx(rtwdev, R_AX_WMAC_RFMOD, mac_idx); + u32 sub_carr = rtw89_mac_reg_by_idx(rtwdev, R_AX_TX_SUB_CARRIER_VALUE, mac_idx); + u32 chk_rate = rtw89_mac_reg_by_idx(rtwdev, R_AX_TXRATE_CHK, mac_idx); u8 txsc20 = 0, txsc40 = 0, txsc80 = 0; u8 rf_mod_val = 0, chk_rate_mask = 0; u32 txsc; @@ -1654,8 +1657,7 @@ static void rtw8852c_set_channel_bb(struct rtw89_dev *rtwdev, rtwdev->hal.cv != CHIP_CAV) { rtw89_phy_write32_idx(rtwdev, R_P80_AT_HIGH_FREQ, B_P80_AT_HIGH_FREQ, 0x0, phy_idx); - reg = rtw89_mac_reg_by_idx(R_P80_AT_HIGH_FREQ_BB_WRP, - phy_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_P80_AT_HIGH_FREQ_BB_WRP, phy_idx); if (chan->primary_channel > chan->channel) { rtw89_phy_write32_mask(rtwdev, R_P80_AT_HIGH_FREQ_RU_ALLOC, @@ -1859,12 +1861,12 @@ void rtw8852c_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev, for (i = 0; i < 4; i++) { /* 1TX */ - reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_1T, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_1T, mac_idx); rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_1T_V1_MASK << (8 * i), val_1t); /* 2TX */ - reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_2T, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_2T, mac_idx); rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_2T_V1_MASK << (8 * i), val_2t); @@ -2181,7 +2183,7 @@ static void rtw8852c_ctrl_tx_path_tmac(struct rtw89_dev *rtwdev, u8 tx_path, for (addr = R_AX_MACID_ANT_TABLE; addr <= R_AX_MACID_ANT_TABLE_LAST; addr += 4) { - reg = rtw89_mac_reg_by_idx(addr, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, addr, mac_idx); rtw89_write32(rtwdev, reg, 0); } @@ -2211,7 +2213,7 @@ static void rtw8852c_ctrl_tx_path_tmac(struct rtw89_dev *rtwdev, u8 tx_path, for (i = 0; i < cr_size; i++) { rtw89_debug(rtwdev, RTW89_DBG_TSSI, "0x%x = 0x%x\n", path_com[i].addr, path_com[i].data); - reg = rtw89_mac_reg_by_idx(path_com[i].addr, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, path_com[i].addr, mac_idx); rtw89_write32(rtwdev, reg, path_com[i].data); } } @@ -2804,6 +2806,8 @@ const struct rtw89_chip_info rtw8852c_chip_info = { .chip_id = RTL8852C, .chip_gen = RTW89_CHIP_AX, .ops = &rtw8852c_chip_ops, + .mac_def = &rtw89_mac_gen_ax, + .phy_def = &rtw89_phy_gen_ax, .fw_basename = RTW8852C_FW_BASENAME, .fw_format_max = RTW8852C_FW_FORMAT_MAX, .try_ce_fw = false, diff --git a/drivers/net/wireless/realtek/rtw89/sar.c b/drivers/net/wireless/realtek/rtw89/sar.c index dfccae81c380..aed05b026c6c 100644 --- a/drivers/net/wireless/realtek/rtw89/sar.c +++ b/drivers/net/wireless/realtek/rtw89/sar.c @@ -2,9 +2,16 @@ /* Copyright(c) 2019-2020 Realtek Corporation */ +#include "acpi.h" #include "debug.h" +#include "phy.h" +#include "reg.h" #include "sar.h" +#define RTW89_TAS_FACTOR 2 /* unit: 0.25 dBm */ +#define RTW89_TAS_DPR_GAP (1 << RTW89_TAS_FACTOR) +#define RTW89_TAS_DELTA (2 << RTW89_TAS_FACTOR) + static enum rtw89_sar_subband rtw89_sar_get_subband(struct rtw89_dev *rtwdev, u32 center_freq) { @@ -78,17 +85,15 @@ static const struct rtw89_sar_span rtw89_sar_overlapping_6ghz[] = { RTW89_DECL_SAR_6GHZ_SPAN(6885, SUBBAND_7_H, SUBBAND_8), }; -static int rtw89_query_sar_config_common(struct rtw89_dev *rtwdev, s32 *cfg) +static int rtw89_query_sar_config_common(struct rtw89_dev *rtwdev, + u32 center_freq, s32 *cfg) { struct rtw89_sar_cfg_common *rtwsar = &rtwdev->sar.cfg_common; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); - enum rtw89_band band = chan->band_type; - u32 center_freq = chan->freq; const struct rtw89_sar_span *span = NULL; enum rtw89_sar_subband subband_l, subband_h; int idx; - if (band == RTW89_BAND_6G) { + if (center_freq >= RTW89_SAR_6GHZ_SPAN_HEAD) { idx = RTW89_SAR_6GHZ_SPAN_IDX(center_freq); /* To decrease size of rtw89_sar_overlapping_6ghz[], * RTW89_SAR_6GHZ_SPAN_IDX() truncates the leading NULLs @@ -108,8 +113,8 @@ static int rtw89_query_sar_config_common(struct rtw89_dev *rtwdev, s32 *cfg) } rtw89_debug(rtwdev, RTW89_DBG_SAR, - "for {band %u, center_freq %u}, SAR subband: {%u, %u}\n", - band, center_freq, subband_l, subband_h); + "center_freq %u: SAR subband {%u, %u}\n", + center_freq, subband_l, subband_h); if (!rtwsar->set[subband_l] && !rtwsar->set[subband_h]) return -ENODATA; @@ -157,11 +162,35 @@ static s8 rtw89_txpwr_sar_to_mac(struct rtw89_dev *rtwdev, u8 fct, s32 cfg) RTW89_SAR_TXPWR_MAC_MAX); } -s8 rtw89_query_sar(struct rtw89_dev *rtwdev) +static s8 rtw89_txpwr_tas_to_sar(const struct rtw89_sar_handler *sar_hdl, + s8 cfg) +{ + const u8 fct = sar_hdl->txpwr_factor_sar; + + if (fct > RTW89_TAS_FACTOR) + return cfg << (fct - RTW89_TAS_FACTOR); + else + return cfg >> (RTW89_TAS_FACTOR - fct); +} + +static s8 rtw89_txpwr_sar_to_tas(const struct rtw89_sar_handler *sar_hdl, + s8 cfg) +{ + const u8 fct = sar_hdl->txpwr_factor_sar; + + if (fct > RTW89_TAS_FACTOR) + return cfg >> (fct - RTW89_TAS_FACTOR); + else + return cfg << (RTW89_TAS_FACTOR - fct); +} + +s8 rtw89_query_sar(struct rtw89_dev *rtwdev, u32 center_freq) { const enum rtw89_sar_sources src = rtwdev->sar.src; /* its members are protected by rtw89_sar_set_src() */ const struct rtw89_sar_handler *sar_hdl = &rtw89_sar_handlers[src]; + struct rtw89_tas_info *tas = &rtwdev->tas; + s8 delta; int ret; s32 cfg; u8 fct; @@ -171,16 +200,30 @@ s8 rtw89_query_sar(struct rtw89_dev *rtwdev) if (src == RTW89_SAR_SOURCE_NONE) return RTW89_SAR_TXPWR_MAC_MAX; - ret = sar_hdl->query_sar_config(rtwdev, &cfg); + ret = sar_hdl->query_sar_config(rtwdev, center_freq, &cfg); if (ret) return RTW89_SAR_TXPWR_MAC_MAX; + if (tas->enable) { + switch (tas->state) { + case RTW89_TAS_STATE_DPR_OFF: + return RTW89_SAR_TXPWR_MAC_MAX; + case RTW89_TAS_STATE_DPR_ON: + delta = rtw89_txpwr_tas_to_sar(sar_hdl, tas->delta); + cfg -= delta; + break; + case RTW89_TAS_STATE_DPR_FORBID: + default: + break; + } + } + fct = sar_hdl->txpwr_factor_sar; return rtw89_txpwr_sar_to_mac(rtwdev, fct, cfg); } -void rtw89_print_sar(struct seq_file *m, struct rtw89_dev *rtwdev) +void rtw89_print_sar(struct seq_file *m, struct rtw89_dev *rtwdev, u32 center_freq) { const enum rtw89_sar_sources src = rtwdev->sar.src; /* its members are protected by rtw89_sar_set_src() */ @@ -199,7 +242,7 @@ void rtw89_print_sar(struct seq_file *m, struct rtw89_dev *rtwdev) seq_printf(m, "source: %d (%s)\n", src, sar_hdl->descr_sar_source); - ret = sar_hdl->query_sar_config(rtwdev, &cfg); + ret = sar_hdl->query_sar_config(rtwdev, center_freq, &cfg); if (ret) { seq_printf(m, "config: return code: %d\n", ret); seq_printf(m, "assign: max setting: %d (unit: 1/%lu dBm)\n", @@ -212,6 +255,19 @@ void rtw89_print_sar(struct seq_file *m, struct rtw89_dev *rtwdev) seq_printf(m, "config: %d (unit: 1/%lu dBm)\n", cfg, BIT(fct)); } +void rtw89_print_tas(struct seq_file *m, struct rtw89_dev *rtwdev) +{ + struct rtw89_tas_info *tas = &rtwdev->tas; + + if (!tas->enable) { + seq_puts(m, "no TAS is applied\n"); + return; + } + + seq_printf(m, "DPR gap: %d\n", tas->dpr_gap); + seq_printf(m, "TAS delta: %d\n", tas->delta); +} + static int rtw89_apply_sar_common(struct rtw89_dev *rtwdev, const struct rtw89_sar_cfg_common *sar) { @@ -292,3 +348,145 @@ int rtw89_ops_set_sar_specs(struct ieee80211_hw *hw, return rtw89_apply_sar_common(rtwdev, &sar_common); } + +static void rtw89_tas_state_update(struct rtw89_dev *rtwdev) +{ + const enum rtw89_sar_sources src = rtwdev->sar.src; + /* its members are protected by rtw89_sar_set_src() */ + const struct rtw89_sar_handler *sar_hdl = &rtw89_sar_handlers[src]; + struct rtw89_tas_info *tas = &rtwdev->tas; + s32 txpwr_avg = tas->total_txpwr / RTW89_TAS_MAX_WINDOW / PERCENT; + s32 dpr_on_threshold, dpr_off_threshold, cfg; + enum rtw89_tas_state state = tas->state; + const struct rtw89_chan *chan; + int ret; + + lockdep_assert_held(&rtwdev->mutex); + + if (src == RTW89_SAR_SOURCE_NONE) + return; + + chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + ret = sar_hdl->query_sar_config(rtwdev, chan->freq, &cfg); + if (ret) + return; + + cfg = rtw89_txpwr_sar_to_tas(sar_hdl, cfg); + + if (tas->delta >= cfg) { + rtw89_debug(rtwdev, RTW89_DBG_SAR, + "TAS delta exceed SAR limit\n"); + state = RTW89_TAS_STATE_DPR_FORBID; + goto out; + } + + dpr_on_threshold = cfg; + dpr_off_threshold = cfg - tas->dpr_gap; + rtw89_debug(rtwdev, RTW89_DBG_SAR, + "DPR_ON thold: %d, DPR_OFF thold: %d, txpwr_avg: %d\n", + dpr_on_threshold, dpr_off_threshold, txpwr_avg); + + if (txpwr_avg >= dpr_on_threshold) + state = RTW89_TAS_STATE_DPR_ON; + else if (txpwr_avg < dpr_off_threshold) + state = RTW89_TAS_STATE_DPR_OFF; + +out: + if (tas->state == state) + return; + + rtw89_debug(rtwdev, RTW89_DBG_SAR, + "TAS old state: %d, new state: %d\n", tas->state, state); + tas->state = state; + rtw89_core_set_chip_txpwr(rtwdev); +} + +void rtw89_tas_init(struct rtw89_dev *rtwdev) +{ + struct rtw89_tas_info *tas = &rtwdev->tas; + int ret; + u8 val; + + ret = rtw89_acpi_evaluate_dsm(rtwdev, RTW89_ACPI_DSM_FUNC_TAS_EN, &val); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_SAR, + "acpi: cannot get TAS: %d\n", ret); + return; + } + + switch (val) { + case 0: + tas->enable = false; + break; + case 1: + tas->enable = true; + break; + default: + break; + } + + if (!tas->enable) { + rtw89_debug(rtwdev, RTW89_DBG_SAR, "TAS not enable\n"); + return; + } + + tas->dpr_gap = RTW89_TAS_DPR_GAP; + tas->delta = RTW89_TAS_DELTA; +} + +void rtw89_tas_reset(struct rtw89_dev *rtwdev) +{ + struct rtw89_tas_info *tas = &rtwdev->tas; + + if (!tas->enable) + return; + + memset(&tas->txpwr_history, 0, sizeof(tas->txpwr_history)); + tas->total_txpwr = 0; + tas->cur_idx = 0; + tas->state = RTW89_TAS_STATE_DPR_OFF; +} + +static const struct rtw89_reg_def txpwr_regs[] = { + {R_PATH0_TXPWR, B_PATH0_TXPWR}, + {R_PATH1_TXPWR, B_PATH1_TXPWR}, +}; + +void rtw89_tas_track(struct rtw89_dev *rtwdev) +{ + struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; + const enum rtw89_sar_sources src = rtwdev->sar.src; + u8 max_nss_num = rtwdev->chip->rf_path_num; + struct rtw89_tas_info *tas = &rtwdev->tas; + s16 tmp, txpwr, instant_txpwr = 0; + u32 val; + int i; + + if (!tas->enable || src == RTW89_SAR_SOURCE_NONE) + return; + + if (env->ccx_watchdog_result != RTW89_PHY_ENV_MON_IFS_CLM) + return; + + for (i = 0; i < max_nss_num; i++) { + val = rtw89_phy_read32_mask(rtwdev, txpwr_regs[i].addr, + txpwr_regs[i].mask); + tmp = sign_extend32(val, 8); + if (tmp <= 0) + return; + instant_txpwr += tmp; + } + + instant_txpwr /= max_nss_num; + /* in unit of 0.25 dBm multiply by percentage */ + txpwr = instant_txpwr * env->ifs_clm_tx_ratio; + tas->total_txpwr += txpwr - tas->txpwr_history[tas->cur_idx]; + tas->txpwr_history[tas->cur_idx] = txpwr; + rtw89_debug(rtwdev, RTW89_DBG_SAR, + "instant_txpwr: %d, tx_ratio: %d, txpwr: %d\n", + instant_txpwr, env->ifs_clm_tx_ratio, txpwr); + + tas->cur_idx = (tas->cur_idx + 1) % RTW89_TAS_MAX_WINDOW; + + rtw89_tas_state_update(rtwdev); +} diff --git a/drivers/net/wireless/realtek/rtw89/sar.h b/drivers/net/wireless/realtek/rtw89/sar.h index 7b5484c84eb1..bd7a657188d9 100644 --- a/drivers/net/wireless/realtek/rtw89/sar.h +++ b/drivers/net/wireless/realtek/rtw89/sar.h @@ -13,14 +13,18 @@ struct rtw89_sar_handler { const char *descr_sar_source; u8 txpwr_factor_sar; - int (*query_sar_config)(struct rtw89_dev *rtwdev, s32 *cfg); + int (*query_sar_config)(struct rtw89_dev *rtwdev, u32 center_freq, s32 *cfg); }; extern const struct cfg80211_sar_capa rtw89_sar_capa; -s8 rtw89_query_sar(struct rtw89_dev *rtwdev); -void rtw89_print_sar(struct seq_file *m, struct rtw89_dev *rtwdev); +s8 rtw89_query_sar(struct rtw89_dev *rtwdev, u32 center_freq); +void rtw89_print_sar(struct seq_file *m, struct rtw89_dev *rtwdev, u32 center_freq); +void rtw89_print_tas(struct seq_file *m, struct rtw89_dev *rtwdev); int rtw89_ops_set_sar_specs(struct ieee80211_hw *hw, const struct cfg80211_sar_specs *sar); +void rtw89_tas_init(struct rtw89_dev *rtwdev); +void rtw89_tas_reset(struct rtw89_dev *rtwdev); +void rtw89_tas_track(struct rtw89_dev *rtwdev); #endif diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c index 0462ba693f6f..c1644353053f 100644 --- a/drivers/net/wireless/realtek/rtw89/ser.c +++ b/drivers/net/wireless/realtek/rtw89/ser.c @@ -529,6 +529,9 @@ static void ser_do_hci_st_hdl(struct rtw89_ser *ser, u8 evt) static void ser_mac_mem_dump(struct rtw89_dev *rtwdev, u8 *buf, u8 sel, u32 start_addr, u32 len) { + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + u32 filter_model_addr = mac->filter_model_addr; + u32 indir_access_addr = mac->indir_access_addr; u32 *ptr = (u32 *)buf; u32 base_addr, start_page, residue; u32 cnt = 0; @@ -536,14 +539,14 @@ static void ser_mac_mem_dump(struct rtw89_dev *rtwdev, u8 *buf, start_page = start_addr / MAC_MEM_DUMP_PAGE_SIZE; residue = start_addr % MAC_MEM_DUMP_PAGE_SIZE; - base_addr = rtw89_mac_mem_base_addrs[sel]; + base_addr = mac->mem_base_addrs[sel]; base_addr += start_page * MAC_MEM_DUMP_PAGE_SIZE; while (cnt < len) { - rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, base_addr); + rtw89_write32(rtwdev, filter_model_addr, base_addr); - for (i = R_AX_INDIR_ACCESS_ENTRY + residue; - i < R_AX_INDIR_ACCESS_ENTRY + MAC_MEM_DUMP_PAGE_SIZE; + for (i = indir_access_addr + residue; + i < indir_access_addr + MAC_MEM_DUMP_PAGE_SIZE; i += 4, ptr++) { *ptr = rtw89_read32(rtwdev, i); cnt += 4; @@ -585,6 +588,9 @@ static int rtw89_ser_fw_backtrace_dump(struct rtw89_dev *rtwdev, u8 *buf, const struct __fw_backtrace_entry *ent) { struct __fw_backtrace_info *ptr = (struct __fw_backtrace_info *)buf; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + u32 filter_model_addr = mac->filter_model_addr; + u32 indir_access_addr = mac->indir_access_addr; u32 fwbt_addr = ent->wcpu_addr & RTW89_WCPU_BASE_MASK; u32 fwbt_size = ent->size; u32 fwbt_key = ent->key; @@ -610,10 +616,10 @@ static int rtw89_ser_fw_backtrace_dump(struct rtw89_dev *rtwdev, u8 *buf, } rtw89_debug(rtwdev, RTW89_DBG_SER, "dump fw backtrace start\n"); - rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, fwbt_addr); + rtw89_write32(rtwdev, filter_model_addr, fwbt_addr); - for (i = R_AX_INDIR_ACCESS_ENTRY; - i < R_AX_INDIR_ACCESS_ENTRY + fwbt_size; + for (i = indir_access_addr; + i < indir_access_addr + fwbt_size; i += RTW89_FW_BACKTRACE_INFO_SIZE, ptr++) { *ptr = (struct __fw_backtrace_info){ .ra = rtw89_read32(rtwdev, i), diff --git a/drivers/net/wireless/realtek/rtw89/wow.c b/drivers/net/wireless/realtek/rtw89/wow.c index 364e54622150..aa9efca04025 100644 --- a/drivers/net/wireless/realtek/rtw89/wow.c +++ b/drivers/net/wireless/realtek/rtw89/wow.c @@ -40,6 +40,7 @@ static void rtw89_wow_leave_lps(struct rtw89_dev *rtwdev) static int rtw89_wow_config_mac(struct rtw89_dev *rtwdev, bool enable_wow) { + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; int ret; if (enable_wow) { @@ -49,7 +50,7 @@ static int rtw89_wow_config_mac(struct rtw89_dev *rtwdev, bool enable_wow) return ret; } rtw89_write32_set(rtwdev, R_AX_RX_FUNCTION_STOP, B_AX_HDR_RX_STOP); - rtw89_write32_clr(rtwdev, R_AX_RX_FLTR_OPT, B_AX_SNIFFER_MODE); + rtw89_write32_clr(rtwdev, mac->rx_fltr, B_AX_SNIFFER_MODE); rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, false); rtw89_write32(rtwdev, R_AX_ACTION_FWD0, 0); rtw89_write32(rtwdev, R_AX_ACTION_FWD1, 0); diff --git a/drivers/net/wireless/rsi/rsi_91x_coex.c b/drivers/net/wireless/rsi/rsi_91x_coex.c index 45ac9371f262..372eaaa2b9ef 100644 --- a/drivers/net/wireless/rsi/rsi_91x_coex.c +++ b/drivers/net/wireless/rsi/rsi_91x_coex.c @@ -52,8 +52,7 @@ static void rsi_coex_sched_tx_pkts(struct rsi_coex_ctrl_block *coex_cb) static void rsi_coex_scheduler_thread(struct rsi_common *common) { - struct rsi_coex_ctrl_block *coex_cb = - (struct rsi_coex_ctrl_block *)common->coex_cb; + struct rsi_coex_ctrl_block *coex_cb = common->coex_cb; u32 timeout = EVENT_WAIT_FOREVER; do { @@ -100,9 +99,8 @@ static inline int rsi_map_coex_q(u8 hal_queue) int rsi_coex_send_pkt(void *priv, struct sk_buff *skb, u8 hal_queue) { - struct rsi_common *common = (struct rsi_common *)priv; - struct rsi_coex_ctrl_block *coex_cb = - (struct rsi_coex_ctrl_block *)common->coex_cb; + struct rsi_common *common = priv; + struct rsi_coex_ctrl_block *coex_cb = common->coex_cb; struct skb_info *tx_params = NULL; enum rsi_coex_queues coex_q; int status; @@ -168,8 +166,7 @@ int rsi_coex_attach(struct rsi_common *common) void rsi_coex_detach(struct rsi_common *common) { - struct rsi_coex_ctrl_block *coex_cb = - (struct rsi_coex_ctrl_block *)common->coex_cb; + struct rsi_coex_ctrl_block *coex_cb = common->coex_cb; int cnt; rsi_kill_thread(&coex_cb->coex_tx_thread); diff --git a/drivers/net/wireless/rsi/rsi_91x_debugfs.c b/drivers/net/wireless/rsi/rsi_91x_debugfs.c index bf22fd948276..c528e6ca2c8d 100644 --- a/drivers/net/wireless/rsi/rsi_91x_debugfs.c +++ b/drivers/net/wireless/rsi/rsi_91x_debugfs.c @@ -28,8 +28,7 @@ static int rsi_sdio_stats_read(struct seq_file *seq, void *data) { struct rsi_common *common = seq->private; struct rsi_hw *adapter = common->priv; - struct rsi_91x_sdiodev *dev = - (struct rsi_91x_sdiodev *)adapter->rsi_dev; + struct rsi_91x_sdiodev *dev = adapter->rsi_dev; seq_printf(seq, "total_sdio_interrupts: %d\n", dev->rx_info.sdio_int_counter); diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c index d4489b943873..2cebe562a1f4 100644 --- a/drivers/net/wireless/rsi/rsi_91x_hal.c +++ b/drivers/net/wireless/rsi/rsi_91x_hal.c @@ -424,7 +424,7 @@ out: int rsi_prepare_beacon(struct rsi_common *common, struct sk_buff *skb) { - struct rsi_hw *adapter = (struct rsi_hw *)common->priv; + struct rsi_hw *adapter = common->priv; struct rsi_data_desc *bcn_frm; struct ieee80211_hw *hw = common->priv->hw; struct ieee80211_conf *conf = &hw->conf; diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index bc1f038d1655..05890536e353 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -1763,8 +1763,8 @@ static int rsi_mac80211_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_channel *chan, int duration, enum ieee80211_roc_type type) { - struct rsi_hw *adapter = (struct rsi_hw *)hw->priv; - struct rsi_common *common = (struct rsi_common *)adapter->priv; + struct rsi_hw *adapter = hw->priv; + struct rsi_common *common = adapter->priv; int status = 0; rsi_dbg(INFO_ZONE, "***** Remain on channel *****\n"); diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c index f9f004446b07..2112d8d277a9 100644 --- a/drivers/net/wireless/rsi/rsi_91x_main.c +++ b/drivers/net/wireless/rsi/rsi_91x_main.c @@ -270,14 +270,14 @@ static void rsi_tx_scheduler_thread(struct rsi_common *common) #ifdef CONFIG_RSI_COEX enum rsi_host_intf rsi_get_host_intf(void *priv) { - struct rsi_common *common = (struct rsi_common *)priv; + struct rsi_common *common = priv; return common->priv->rsi_host_intf; } void rsi_set_bt_context(void *priv, void *bt_context) { - struct rsi_common *common = (struct rsi_common *)priv; + struct rsi_common *common = priv; common->bt_adapter = bt_context; } diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c index 1911fef3bbad..8e7b757475d2 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c @@ -144,8 +144,7 @@ static int rsi_issue_sdiocommand(struct sdio_func *func, static void rsi_handle_interrupt(struct sdio_func *function) { struct rsi_hw *adapter = sdio_get_drvdata(function); - struct rsi_91x_sdiodev *dev = - (struct rsi_91x_sdiodev *)adapter->rsi_dev; + struct rsi_91x_sdiodev *dev = adapter->rsi_dev; if (adapter->priv->fsm_state == FSM_FW_NOT_LOADED) return; @@ -337,8 +336,7 @@ static void rsi_reset_card(struct sdio_func *pfunction) */ static void rsi_setclock(struct rsi_hw *adapter, u32 freq) { - struct rsi_91x_sdiodev *dev = - (struct rsi_91x_sdiodev *)adapter->rsi_dev; + struct rsi_91x_sdiodev *dev = adapter->rsi_dev; struct mmc_host *host = dev->pfunction->card->host; u32 clock; @@ -358,8 +356,7 @@ static void rsi_setclock(struct rsi_hw *adapter, u32 freq) */ static int rsi_setblocklength(struct rsi_hw *adapter, u32 length) { - struct rsi_91x_sdiodev *dev = - (struct rsi_91x_sdiodev *)adapter->rsi_dev; + struct rsi_91x_sdiodev *dev = adapter->rsi_dev; int status; rsi_dbg(INIT_ZONE, "%s: Setting the block length\n", __func__); @@ -380,8 +377,7 @@ static int rsi_setblocklength(struct rsi_hw *adapter, u32 length) */ static int rsi_setupcard(struct rsi_hw *adapter) { - struct rsi_91x_sdiodev *dev = - (struct rsi_91x_sdiodev *)adapter->rsi_dev; + struct rsi_91x_sdiodev *dev = adapter->rsi_dev; int status = 0; rsi_setclock(adapter, 50000); @@ -407,8 +403,7 @@ int rsi_sdio_read_register(struct rsi_hw *adapter, u32 addr, u8 *data) { - struct rsi_91x_sdiodev *dev = - (struct rsi_91x_sdiodev *)adapter->rsi_dev; + struct rsi_91x_sdiodev *dev = adapter->rsi_dev; u8 fun_num = 0; int status; @@ -441,8 +436,7 @@ int rsi_sdio_write_register(struct rsi_hw *adapter, u32 addr, u8 *data) { - struct rsi_91x_sdiodev *dev = - (struct rsi_91x_sdiodev *)adapter->rsi_dev; + struct rsi_91x_sdiodev *dev = adapter->rsi_dev; int status = 0; if (likely(dev->sdio_irq_task != current)) @@ -495,8 +489,7 @@ static int rsi_sdio_read_register_multiple(struct rsi_hw *adapter, u8 *data, u16 count) { - struct rsi_91x_sdiodev *dev = - (struct rsi_91x_sdiodev *)adapter->rsi_dev; + struct rsi_91x_sdiodev *dev = adapter->rsi_dev; u32 status; if (likely(dev->sdio_irq_task != current)) @@ -527,8 +520,7 @@ int rsi_sdio_write_register_multiple(struct rsi_hw *adapter, u8 *data, u16 count) { - struct rsi_91x_sdiodev *dev = - (struct rsi_91x_sdiodev *)adapter->rsi_dev; + struct rsi_91x_sdiodev *dev = adapter->rsi_dev; int status; if (dev->write_fail > 1) { @@ -762,8 +754,7 @@ static int rsi_sdio_host_intf_write_pkt(struct rsi_hw *adapter, u8 *pkt, u32 len) { - struct rsi_91x_sdiodev *dev = - (struct rsi_91x_sdiodev *)adapter->rsi_dev; + struct rsi_91x_sdiodev *dev = adapter->rsi_dev; u32 block_size = dev->tx_blk_size; u32 num_blocks, address, length; u32 queueno; @@ -1045,7 +1036,7 @@ static int rsi_probe(struct sdio_func *pfunction, goto fail_free_adapter; } - sdev = (struct rsi_91x_sdiodev *)adapter->rsi_dev; + sdev = adapter->rsi_dev; rsi_init_event(&sdev->rx_thread.event); status = rsi_create_kthread(adapter->priv, &sdev->rx_thread, rsi_sdio_rx_thread, "SDIO-RX-Thread"); @@ -1221,7 +1212,7 @@ static void rsi_disconnect(struct sdio_func *pfunction) if (!adapter) return; - dev = (struct rsi_91x_sdiodev *)adapter->rsi_dev; + dev = adapter->rsi_dev; rsi_kill_thread(&dev->rx_thread); sdio_claim_host(pfunction); @@ -1255,8 +1246,7 @@ static void rsi_disconnect(struct sdio_func *pfunction) #ifdef CONFIG_PM static int rsi_set_sdio_pm_caps(struct rsi_hw *adapter) { - struct rsi_91x_sdiodev *dev = - (struct rsi_91x_sdiodev *)adapter->rsi_dev; + struct rsi_91x_sdiodev *dev = adapter->rsi_dev; struct sdio_func *func = dev->pfunction; int ret; @@ -1407,7 +1397,7 @@ static int rsi_freeze(struct device *dev) return -ENODEV; } common = adapter->priv; - sdev = (struct rsi_91x_sdiodev *)adapter->rsi_dev; + sdev = adapter->rsi_dev; if ((common->wow_flags & RSI_WOW_ENABLED) && (common->wow_flags & RSI_WOW_NO_CONNECTION)) @@ -1457,8 +1447,7 @@ static void rsi_shutdown(struct device *dev) { struct sdio_func *pfunction = dev_to_sdio_func(dev); struct rsi_hw *adapter = sdio_get_drvdata(pfunction); - struct rsi_91x_sdiodev *sdev = - (struct rsi_91x_sdiodev *)adapter->rsi_dev; + struct rsi_91x_sdiodev *sdev = adapter->rsi_dev; struct ieee80211_hw *hw = adapter->hw; rsi_dbg(ERR_ZONE, "SDIO Bus shutdown =====>\n"); diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c index b2b47a0abcbf..597b238e2294 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c @@ -88,8 +88,7 @@ void rsi_sdio_rx_thread(struct rsi_common *common) static int rsi_process_pkt(struct rsi_common *common) { struct rsi_hw *adapter = common->priv; - struct rsi_91x_sdiodev *dev = - (struct rsi_91x_sdiodev *)adapter->rsi_dev; + struct rsi_91x_sdiodev *dev = adapter->rsi_dev; u8 num_blks = 0; u32 rcv_pkt_len = 0; int status = 0; @@ -147,8 +146,7 @@ static int rsi_process_pkt(struct rsi_common *common) */ int rsi_init_sdio_slave_regs(struct rsi_hw *adapter) { - struct rsi_91x_sdiodev *dev = - (struct rsi_91x_sdiodev *)adapter->rsi_dev; + struct rsi_91x_sdiodev *dev = adapter->rsi_dev; u8 function = 0; u8 byte; int status = 0; @@ -233,8 +231,7 @@ int rsi_init_sdio_slave_regs(struct rsi_hw *adapter) static void rsi_rx_handler(struct rsi_hw *adapter) { struct rsi_common *common = adapter->priv; - struct rsi_91x_sdiodev *dev = - (struct rsi_91x_sdiodev *)adapter->rsi_dev; + struct rsi_91x_sdiodev *dev = adapter->rsi_dev; int status; u8 isr_status = 0; u8 fw_status = 0; @@ -339,8 +336,7 @@ static void rsi_rx_handler(struct rsi_hw *adapter) int rsi_sdio_check_buffer_status(struct rsi_hw *adapter, u8 q_num) { struct rsi_common *common = adapter->priv; - struct rsi_91x_sdiodev *dev = - (struct rsi_91x_sdiodev *)adapter->rsi_dev; + struct rsi_91x_sdiodev *dev = adapter->rsi_dev; u8 buf_status = 0; int status = 0; static int counter = 4; @@ -409,8 +405,7 @@ out: */ int rsi_sdio_determine_event_timeout(struct rsi_hw *adapter) { - struct rsi_91x_sdiodev *dev = - (struct rsi_91x_sdiodev *)adapter->rsi_dev; + struct rsi_91x_sdiodev *dev = adapter->rsi_dev; /* Once buffer full is seen, event timeout to occur every 2 msecs */ if (dev->rx_info.buffer_full) diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c index 66fe386ec9cc..10a465686439 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c @@ -43,7 +43,7 @@ static int rsi_usb_card_write(struct rsi_hw *adapter, u16 len, u8 endpoint) { - struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev; + struct rsi_91x_usbdev *dev = adapter->rsi_dev; int status; u8 *seg = dev->tx_buffer; int transfer; @@ -91,7 +91,7 @@ static int rsi_write_multiple(struct rsi_hw *adapter, if (endpoint == 0) return -EINVAL; - dev = (struct rsi_91x_usbdev *)adapter->rsi_dev; + dev = adapter->rsi_dev; if (dev->write_fail) return -ENETDOWN; @@ -109,7 +109,7 @@ static int rsi_write_multiple(struct rsi_hw *adapter, static int rsi_find_bulk_in_and_out_endpoints(struct usb_interface *interface, struct rsi_hw *adapter) { - struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev; + struct rsi_91x_usbdev *dev = adapter->rsi_dev; struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *endpoint; __le16 buffer_size; @@ -306,7 +306,7 @@ out: static void rsi_rx_urb_kill(struct rsi_hw *adapter, u8 ep_num) { - struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev; + struct rsi_91x_usbdev *dev = adapter->rsi_dev; struct rx_usb_ctrl_block *rx_cb = &dev->rx_cb[ep_num - 1]; struct urb *urb = rx_cb->rx_urb; @@ -323,7 +323,7 @@ static void rsi_rx_urb_kill(struct rsi_hw *adapter, u8 ep_num) */ static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num, gfp_t mem_flags) { - struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev; + struct rsi_91x_usbdev *dev = adapter->rsi_dev; struct rx_usb_ctrl_block *rx_cb = &dev->rx_cb[ep_num - 1]; struct urb *urb = rx_cb->rx_urb; int status; @@ -362,7 +362,7 @@ static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num, gfp_t mem_flags) static int rsi_usb_read_register_multiple(struct rsi_hw *adapter, u32 addr, u8 *data, u16 count) { - struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev; + struct rsi_91x_usbdev *dev = adapter->rsi_dev; u8 *buf; u16 transfer; int status; @@ -412,7 +412,7 @@ static int rsi_usb_read_register_multiple(struct rsi_hw *adapter, u32 addr, static int rsi_usb_write_register_multiple(struct rsi_hw *adapter, u32 addr, u8 *data, u16 count) { - struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev; + struct rsi_91x_usbdev *dev = adapter->rsi_dev; u8 *buf; u16 transfer; int status = 0; @@ -559,7 +559,7 @@ static struct rsi_host_intf_ops usb_host_intf_ops = { */ static void rsi_deinit_usb_interface(struct rsi_hw *adapter) { - struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev; + struct rsi_91x_usbdev *dev = adapter->rsi_dev; rsi_kill_thread(&dev->rx_thread); @@ -572,7 +572,7 @@ static void rsi_deinit_usb_interface(struct rsi_hw *adapter) static int rsi_usb_init_rx(struct rsi_hw *adapter) { - struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev; + struct rsi_91x_usbdev *dev = adapter->rsi_dev; struct rx_usb_ctrl_block *rx_cb; u8 idx, num_rx_cb; @@ -822,7 +822,7 @@ static int rsi_probe(struct usb_interface *pfunction, goto err1; } - dev = (struct rsi_91x_usbdev *)adapter->rsi_dev; + dev = adapter->rsi_dev; status = rsi_usb_reg_read(dev->usbdev, FW_STATUS_REG, &fw_status, 2); if (status < 0) diff --git a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c index 5130b0e72adc..25c2b232394a 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c @@ -28,7 +28,7 @@ void rsi_usb_rx_thread(struct rsi_common *common) { struct rsi_hw *adapter = common->priv; - struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev; + struct rsi_91x_usbdev *dev = adapter->rsi_dev; int status; struct sk_buff *skb; diff --git a/drivers/net/wireless/silabs/wfx/main.c b/drivers/net/wireless/silabs/wfx/main.c index 0b50f7058bbb..ede822d771aa 100644 --- a/drivers/net/wireless/silabs/wfx/main.c +++ b/drivers/net/wireless/silabs/wfx/main.c @@ -293,13 +293,12 @@ struct wfx_dev *wfx_init_common(struct device *dev, const struct wfx_platform_da hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; hw->wiphy->n_iface_combinations = ARRAY_SIZE(wfx_iface_combinations); hw->wiphy->iface_combinations = wfx_iface_combinations; - hw->wiphy->bands[NL80211_BAND_2GHZ] = devm_kmalloc(dev, sizeof(wfx_band_2ghz), GFP_KERNEL); + /* FIXME: also copy wfx_rates and wfx_2ghz_chantable */ + hw->wiphy->bands[NL80211_BAND_2GHZ] = devm_kmemdup(dev, &wfx_band_2ghz, + sizeof(wfx_band_2ghz), GFP_KERNEL); if (!hw->wiphy->bands[NL80211_BAND_2GHZ]) goto err; - /* FIXME: also copy wfx_rates and wfx_2ghz_chantable */ - memcpy(hw->wiphy->bands[NL80211_BAND_2GHZ], &wfx_band_2ghz, sizeof(wfx_band_2ghz)); - wdev = hw->priv; wdev->hw = hw; wdev->dev = dev; diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c index cf8d909fa826..f0686635db46 100644 --- a/drivers/net/wireless/ti/wlcore/sdio.c +++ b/drivers/net/wireless/ti/wlcore/sdio.c @@ -442,18 +442,7 @@ static struct sdio_driver wl1271_sdio_driver = { #endif }; -static int __init wl1271_init(void) -{ - return sdio_register_driver(&wl1271_sdio_driver); -} - -static void __exit wl1271_exit(void) -{ - sdio_unregister_driver(&wl1271_sdio_driver); -} - -module_init(wl1271_init); -module_exit(wl1271_exit); +module_sdio_driver(wl1271_sdio_driver); module_param(dump, bool, 0600); MODULE_PARM_DESC(dump, "Enable sdio read/write dumps."); diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c index f446fd0e8cd0..1f524030b186 100644 --- a/drivers/net/wireless/virtual/mac80211_hwsim.c +++ b/drivers/net/wireless/virtual/mac80211_hwsim.c @@ -582,9 +582,8 @@ static int mac80211_hwsim_vendor_cmd_test(struct wiphy *wiphy, */ /* Add vendor data */ - err = nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_TEST, val + 1); - if (err) - return err; + nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_TEST, val + 1); + /* Send the event - this will call nla_nest_end() */ cfg80211_vendor_event(skb, GFP_KERNEL); } @@ -5626,14 +5625,15 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2, frame_data_len = nla_len(info->attrs[HWSIM_ATTR_FRAME]); frame_data = (void *)nla_data(info->attrs[HWSIM_ATTR_FRAME]); + if (frame_data_len < sizeof(struct ieee80211_hdr_3addr) || + frame_data_len > IEEE80211_MAX_DATA_LEN) + goto err; + /* Allocate new skb here */ skb = alloc_skb(frame_data_len, GFP_KERNEL); if (skb == NULL) goto err; - if (frame_data_len > IEEE80211_MAX_DATA_LEN) - goto err; - /* Copy the data */ skb_put_data(skb, frame_data, frame_data_len); @@ -6314,7 +6314,7 @@ static void hwsim_virtio_tx_done(struct virtqueue *vq) spin_lock_irqsave(&hwsim_virtio_lock, flags); while ((skb = virtqueue_get_buf(vq, &len))) - nlmsg_free(skb); + dev_kfree_skb_irq(skb); spin_unlock_irqrestore(&hwsim_virtio_lock, flags); } @@ -6383,14 +6383,14 @@ static void hwsim_virtio_rx_work(struct work_struct *work) spin_lock_irqsave(&hwsim_virtio_lock, flags); if (!hwsim_virtio_enabled) { - nlmsg_free(skb); + dev_kfree_skb_irq(skb); goto out_unlock; } vq = hwsim_vqs[HWSIM_VQ_RX]; sg_init_one(sg, skb->head, skb_end_offset(skb)); err = virtqueue_add_inbuf(vq, sg, 1, skb, GFP_ATOMIC); if (WARN(err, "virtqueue_add_inbuf returned %d\n", err)) - nlmsg_free(skb); + dev_kfree_skb_irq(skb); else virtqueue_kick(vq); schedule_work(&hwsim_virtio_rx); diff --git a/drivers/net/wireless/zydas/zd1201.c b/drivers/net/wireless/zydas/zd1201.c index a85fe7e4c6d4..2814df1ecc78 100644 --- a/drivers/net/wireless/zydas/zd1201.c +++ b/drivers/net/wireless/zydas/zd1201.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Driver for ZyDAS zd1201 based wireless USB devices. + * Driver for ZyDAS zd1201 based USB wireless devices. * * Copyright (c) 2004, 2005 Jeroen Vreeken ([email protected]) * @@ -23,8 +23,8 @@ #include "zd1201.h" static const struct usb_device_id zd1201_table[] = { - {USB_DEVICE(0x0586, 0x3400)}, /* Peabird Wireless USB Adapter */ - {USB_DEVICE(0x0ace, 0x1201)}, /* ZyDAS ZD1201 Wireless USB Adapter */ + {USB_DEVICE(0x0586, 0x3400)}, /* Peabird USB Wireless Adapter */ + {USB_DEVICE(0x0ace, 0x1201)}, /* ZyDAS ZD1201 USB Wireless Adapter */ {USB_DEVICE(0x050d, 0x6051)}, /* Belkin F5D6051 usb adapter */ {USB_DEVICE(0x0db0, 0x6823)}, /* MSI UB11B usb adapter */ {USB_DEVICE(0x1044, 0x8004)}, /* Gigabyte GN-WLBZ101 */ diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c index 62b71e8e3567..ff1b70269ccb 100644 --- a/drivers/platform/x86/eeepc-laptop.c +++ b/drivers/platform/x86/eeepc-laptop.c @@ -1394,7 +1394,7 @@ static int eeepc_acpi_add(struct acpi_device *device) * and machine-specific scripts find the fixed name convenient. But * It's also good for us to exclude multiple instances because both * our hwmon and our wlan rfkill subdevice use global ACPI objects - * (the EC and the wlan PCI slot respectively). + * (the EC and the PCI wlan slot respectively). */ result = eeepc_platform_init(eeepc); if (result) diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c index 80e36d03c4e2..0e0ccef4871e 100644 --- a/drivers/staging/wlan-ng/prism2usb.c +++ b/drivers/staging/wlan-ng/prism2usb.c @@ -11,45 +11,45 @@ static const struct usb_device_id usb_prism_tbl[] = { PRISM_DEV(0x04bb, 0x0922, "IOData AirPort WN-B11/USBS"), - PRISM_DEV(0x07aa, 0x0012, "Corega Wireless LAN USB Stick-11"), - PRISM_DEV(0x09aa, 0x3642, "Prism2.x 11Mbps WLAN USB Adapter"), - PRISM_DEV(0x1668, 0x0408, "Actiontec Prism2.5 11Mbps WLAN USB Adapter"), - PRISM_DEV(0x1668, 0x0421, "Actiontec Prism2.5 11Mbps WLAN USB Adapter"), - PRISM_DEV(0x1915, 0x2236, "Linksys WUSB11v3.0 11Mbps WLAN USB Adapter"), - PRISM_DEV(0x066b, 0x2212, "Linksys WUSB11v2.5 11Mbps WLAN USB Adapter"), - PRISM_DEV(0x066b, 0x2213, "Linksys WUSB12v1.1 11Mbps WLAN USB Adapter"), + PRISM_DEV(0x07aa, 0x0012, "Corega USB Wireless LAN Stick-11"), + PRISM_DEV(0x09aa, 0x3642, "Prism2.x 11Mbps USB WLAN Adapter"), + PRISM_DEV(0x1668, 0x0408, "Actiontec Prism2.5 11Mbps USB WLAN Adapter"), + PRISM_DEV(0x1668, 0x0421, "Actiontec Prism2.5 11Mbps USB WLAN Adapter"), + PRISM_DEV(0x1915, 0x2236, "Linksys WUSB11v3.0 11Mbps USB WLAN Adapter"), + PRISM_DEV(0x066b, 0x2212, "Linksys WUSB11v2.5 11Mbps USB WLAN Adapter"), + PRISM_DEV(0x066b, 0x2213, "Linksys WUSB12v1.1 11Mbps USB WLAN Adapter"), PRISM_DEV(0x0411, 0x0016, "Melco WLI-USB-S11 11Mbps WLAN Adapter"), - PRISM_DEV(0x08de, 0x7a01, "PRISM25 IEEE 802.11 Mini USB Adapter"), - PRISM_DEV(0x8086, 0x1111, "Intel PRO/Wireless 2011B LAN USB Adapter"), + PRISM_DEV(0x08de, 0x7a01, "PRISM25 USB IEEE 802.11 Mini Adapter"), + PRISM_DEV(0x8086, 0x1111, "Intel PRO/Wireless 2011B USB LAN Adapter"), PRISM_DEV(0x0d8e, 0x7a01, "PRISM25 IEEE 802.11 Mini USB Adapter"), - PRISM_DEV(0x045e, 0x006e, "Microsoft MN510 Wireless USB Adapter"), + PRISM_DEV(0x045e, 0x006e, "Microsoft MN510 USB Wireless Adapter"), PRISM_DEV(0x0967, 0x0204, "Acer Warplink USB Adapter"), PRISM_DEV(0x0cde, 0x0002, "Z-Com 725/726 Prism2.5 USB/USB Integrated"), - PRISM_DEV(0x0cde, 0x0005, "Z-Com Xl735 Wireless 802.11b USB Adapter"), - PRISM_DEV(0x413c, 0x8100, "Dell TrueMobile 1180 Wireless USB Adapter"), - PRISM_DEV(0x0b3b, 0x1601, "ALLNET 0193 11Mbps WLAN USB Adapter"), - PRISM_DEV(0x0b3b, 0x1602, "ZyXEL ZyAIR B200 Wireless USB Adapter"), - PRISM_DEV(0x0baf, 0x00eb, "USRobotics USR1120 Wireless USB Adapter"), + PRISM_DEV(0x0cde, 0x0005, "Z-Com Xl735 USB Wireless 802.11b Adapter"), + PRISM_DEV(0x413c, 0x8100, "Dell TrueMobile 1180 USB Wireless Adapter"), + PRISM_DEV(0x0b3b, 0x1601, "ALLNET 0193 11Mbps USB WLAN Adapter"), + PRISM_DEV(0x0b3b, 0x1602, "ZyXEL ZyAIR B200 USB Wireless Adapter"), + PRISM_DEV(0x0baf, 0x00eb, "USRobotics USR1120 USB Wireless Adapter"), PRISM_DEV(0x0411, 0x0027, "Melco WLI-USB-KS11G 11Mbps WLAN Adapter"), PRISM_DEV(0x04f1, 0x3009, "JVC MP-XP7250 Builtin USB WLAN Adapter"), PRISM_DEV(0x0846, 0x4110, "NetGear MA111"), PRISM_DEV(0x03f3, 0x0020, "Adaptec AWN-8020 USB WLAN Adapter"), - PRISM_DEV(0x2821, 0x3300, "ASUS-WL140 / Hawking HighDB Wireless USB Adapter"), - PRISM_DEV(0x2001, 0x3700, "DWL-122 Wireless USB Adapter"), - PRISM_DEV(0x2001, 0x3702, "DWL-120 Rev F Wireless USB Adapter"), + PRISM_DEV(0x2821, 0x3300, "ASUS-WL140 / Hawking HighDB USB Wireless Adapter"), + PRISM_DEV(0x2001, 0x3700, "DWL-122 USB Wireless Adapter"), + PRISM_DEV(0x2001, 0x3702, "DWL-120 Rev F USB Wireless Adapter"), PRISM_DEV(0x50c2, 0x4013, "Averatec USB WLAN Adapter"), - PRISM_DEV(0x2c02, 0x14ea, "Planex GW-US11H WLAN USB Adapter"), - PRISM_DEV(0x124a, 0x168b, "Airvast PRISM3 WLAN USB Adapter"), + PRISM_DEV(0x2c02, 0x14ea, "Planex GW-US11H USB WLAN Adapter"), + PRISM_DEV(0x124a, 0x168b, "Airvast PRISM3 USB WLAN Adapter"), PRISM_DEV(0x083a, 0x3503, "T-Sinus 111 USB WLAN Adapter"), PRISM_DEV(0x0411, 0x0044, "Melco WLI-USB-KB11 11Mbps WLAN Adapter"), - PRISM_DEV(0x1668, 0x6106, "ROPEX FreeLan 802.11b USB Adapter"), - PRISM_DEV(0x124a, 0x4017, "Pheenet WL-503IA 802.11b USB Adapter"), + PRISM_DEV(0x1668, 0x6106, "ROPEX FreeLan USB 802.11b Adapter"), + PRISM_DEV(0x124a, 0x4017, "Pheenet WL-503IA USB 802.11b Adapter"), PRISM_DEV(0x0bb2, 0x0302, "Ambit Microsystems Corp."), - PRISM_DEV(0x9016, 0x182d, "Sitecom WL-022 802.11b USB Adapter"), + PRISM_DEV(0x9016, 0x182d, "Sitecom WL-022 USB 802.11b Adapter"), PRISM_DEV(0x0543, 0x0f01, "ViewSonic Airsync USB Adapter 11Mbps (Prism2.5)"), PRISM_DEV(0x067c, 0x1022, - "Siemens SpeedStream 1022 11Mbps WLAN USB Adapter"), + "Siemens SpeedStream 1022 11Mbps USB WLAN Adapter"), PRISM_DEV(0x049f, 0x0033, "Compaq/Intel W100 PRO/Wireless 11Mbps multiport WLAN Adapter"), { } /* terminator */ diff --git a/include/linux/bpf.h b/include/linux/bpf.h index eced6400f778..12596af59c00 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -653,7 +653,8 @@ enum bpf_type_flag { MEM_RCU = BIT(13 + BPF_BASE_TYPE_BITS), /* Used to tag PTR_TO_BTF_ID | MEM_ALLOC references which are non-owning. - * Currently only valid for linked-list and rbtree nodes. + * Currently only valid for linked-list and rbtree nodes. If the nodes + * have a bpf_refcount_field, they must be tagged MEM_RCU as well. */ NON_OWN_REF = BIT(14 + BPF_BASE_TYPE_BITS), diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index f70f9ac884d2..b6e58dab8e27 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -745,7 +745,7 @@ static inline bool bpf_prog_check_recur(const struct bpf_prog *prog) } } -#define BPF_REG_TRUSTED_MODIFIERS (MEM_ALLOC | PTR_TRUSTED) +#define BPF_REG_TRUSTED_MODIFIERS (MEM_ALLOC | PTR_TRUSTED | NON_OWN_REF) static inline bool bpf_type_has_unsafe_modifiers(u32 type) { diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 4b998090898e..bd2f6e19c357 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -836,9 +836,14 @@ enum ieee80211_preq_target_flags { }; /** - * struct ieee80211_quiet_ie + * struct ieee80211_quiet_ie - Quiet element + * @count: Quiet Count + * @period: Quiet Period + * @duration: Quiet Duration + * @offset: Quiet Offset * - * This structure refers to "Quiet information element" + * This structure represents the payload of the "Quiet element" as + * described in IEEE Std 802.11-2020 section 9.4.2.22. */ struct ieee80211_quiet_ie { u8 count; @@ -848,9 +853,15 @@ struct ieee80211_quiet_ie { } __packed; /** - * struct ieee80211_msrment_ie + * struct ieee80211_msrment_ie - Measurement element + * @token: Measurement Token + * @mode: Measurement Report Mode + * @type: Measurement Type + * @request: Measurement Request or Measurement Report * - * This structure refers to "Measurement Request/Report information element" + * This structure represents the payload of both the "Measurement + * Request element" and the "Measurement Report element" as described + * in IEEE Std 802.11-2020 sections 9.4.2.20 and 9.4.2.21. */ struct ieee80211_msrment_ie { u8 token; @@ -860,9 +871,14 @@ struct ieee80211_msrment_ie { } __packed; /** - * struct ieee80211_channel_sw_ie + * struct ieee80211_channel_sw_ie - Channel Switch Announcement element + * @mode: Channel Switch Mode + * @new_ch_num: New Channel Number + * @count: Channel Switch Count * - * This structure refers to "Channel Switch Announcement information element" + * This structure represents the payload of the "Channel Switch + * Announcement element" as described in IEEE Std 802.11-2020 section + * 9.4.2.18. */ struct ieee80211_channel_sw_ie { u8 mode; @@ -871,9 +887,14 @@ struct ieee80211_channel_sw_ie { } __packed; /** - * struct ieee80211_ext_chansw_ie + * struct ieee80211_ext_chansw_ie - Extended Channel Switch Announcement element + * @mode: Channel Switch Mode + * @new_operating_class: New Operating Class + * @new_ch_num: New Channel Number + * @count: Channel Switch Count * - * This structure represents the "Extended Channel Switch Announcement element" + * This structure represents the "Extended Channel Switch Announcement + * element" as described in IEEE Std 802.11-2020 section 9.4.2.52. */ struct ieee80211_ext_chansw_ie { u8 mode; @@ -894,8 +915,14 @@ struct ieee80211_sec_chan_offs_ie { /** * struct ieee80211_mesh_chansw_params_ie - mesh channel switch parameters IE + * @mesh_ttl: Time To Live + * @mesh_flags: Flags + * @mesh_reason: Reason Code + * @mesh_pre_value: Precedence Value * - * This structure represents the "Mesh Channel Switch Paramters element" + * This structure represents the payload of the "Mesh Channel Switch + * Parameters element" as described in IEEE Std 802.11-2020 section + * 9.4.2.102. */ struct ieee80211_mesh_chansw_params_ie { u8 mesh_ttl; @@ -906,6 +933,13 @@ struct ieee80211_mesh_chansw_params_ie { /** * struct ieee80211_wide_bw_chansw_ie - wide bandwidth channel switch IE + * @new_channel_width: New Channel Width + * @new_center_freq_seg0: New Channel Center Frequency Segment 0 + * @new_center_freq_seg1: New Channel Center Frequency Segment 1 + * + * This structure represents the payload of the "Wide Bandwidth + * Channel Switch element" as described in IEEE Std 802.11-2020 + * section 9.4.2.160. */ struct ieee80211_wide_bw_chansw_ie { u8 new_channel_width; @@ -913,9 +947,14 @@ struct ieee80211_wide_bw_chansw_ie { } __packed; /** - * struct ieee80211_tim + * struct ieee80211_tim_ie - Traffic Indication Map information element + * @dtim_count: DTIM Count + * @dtim_period: DTIM Period + * @bitmap_ctrl: Bitmap Control + * @virtual_map: Partial Virtual Bitmap * - * This structure refers to "Traffic Indication Map information element" + * This structure represents the payload of the "TIM element" as + * described in IEEE Std 802.11-2020 section 9.4.2.5. */ struct ieee80211_tim_ie { u8 dtim_count; @@ -926,9 +965,17 @@ struct ieee80211_tim_ie { } __packed; /** - * struct ieee80211_meshconf_ie + * struct ieee80211_meshconf_ie - Mesh Configuration element + * @meshconf_psel: Active Path Selection Protocol Identifier + * @meshconf_pmetric: Active Path Selection Metric Identifier + * @meshconf_congest: Congestion Control Mode Identifier + * @meshconf_synch: Synchronization Method Identifier + * @meshconf_auth: Authentication Protocol Identifier + * @meshconf_form: Mesh Formation Info + * @meshconf_cap: Mesh Capability (see &enum mesh_config_capab_flags) * - * This structure refers to "Mesh Configuration information element" + * This structure represents the payload of the "Mesh Configuration + * element" as described in IEEE Std 802.11-2020 section 9.4.2.97. */ struct ieee80211_meshconf_ie { u8 meshconf_psel; @@ -950,6 +997,9 @@ struct ieee80211_meshconf_ie { * is ongoing * @IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL: STA is in deep sleep mode or has * neighbors in deep sleep mode + * + * Enumerates the "Mesh Capability" as described in IEEE Std + * 802.11-2020 section 9.4.2.97.7. */ enum mesh_config_capab_flags { IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS = 0x01, @@ -960,7 +1010,7 @@ enum mesh_config_capab_flags { #define IEEE80211_MESHCONF_FORM_CONNECTED_TO_GATE 0x1 -/** +/* * mesh channel switch parameters element's flag indicator * */ @@ -969,9 +1019,17 @@ enum mesh_config_capab_flags { #define WLAN_EID_CHAN_SWITCH_PARAM_REASON BIT(2) /** - * struct ieee80211_rann_ie + * struct ieee80211_rann_ie - RANN (root announcement) element + * @rann_flags: Flags + * @rann_hopcount: Hop Count + * @rann_ttl: Element TTL + * @rann_addr: Root Mesh STA Address + * @rann_seq: HWMP Sequence Number + * @rann_interval: Interval + * @rann_metric: Metric * - * This structure refers to "Root Announcement information element" + * This structure represents the payload of the "RANN element" as + * described in IEEE Std 802.11-2020 section 9.4.2.111. */ struct ieee80211_rann_ie { u8 rann_flags; @@ -993,7 +1051,7 @@ enum ieee80211_ht_chanwidth_values { }; /** - * enum ieee80211_opmode_bits - VHT operating mode field bits + * enum ieee80211_vht_opmode_bits - VHT operating mode field bits * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK: channel width mask * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ: 20 MHz channel width * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ: 40 MHz channel width @@ -1042,9 +1100,12 @@ enum ieee80211_s1g_chanwidth { #define WLAN_USER_POSITION_LEN 16 /** - * struct ieee80211_tpc_report_ie + * struct ieee80211_tpc_report_ie - TPC Report element + * @tx_power: Transmit Power + * @link_margin: Link Margin * - * This structure refers to "TPC Report element" + * This structure represents the payload of the "TPC Report element" as + * described in IEEE Std 802.11-2020 section 9.4.2.16. */ struct ieee80211_tpc_report_ie { u8 tx_power; @@ -1062,9 +1123,14 @@ struct ieee80211_addba_ext_ie { } __packed; /** - * struct ieee80211_s1g_bcn_compat_ie + * struct ieee80211_s1g_bcn_compat_ie - S1G Beacon Compatibility element + * @compat_info: Compatibility Information + * @beacon_int: Beacon Interval + * @tsf_completion: TSF Completion * - * S1G Beacon Compatibility element + * This structure represents the payload of the "S1G Beacon + * Compatibility element" as described in IEEE Std 802.11-2020 section + * 9.4.2.196. */ struct ieee80211_s1g_bcn_compat_ie { __le16 compat_info; @@ -1073,9 +1139,15 @@ struct ieee80211_s1g_bcn_compat_ie { } __packed; /** - * struct ieee80211_s1g_oper_ie + * struct ieee80211_s1g_oper_ie - S1G Operation element + * @ch_width: S1G Operation Information Channel Width + * @oper_class: S1G Operation Information Operating Class + * @primary_ch: S1G Operation Information Primary Channel Number + * @oper_ch: S1G Operation Information Channel Center Frequency + * @basic_mcs_nss: Basic S1G-MCS and NSS Set * - * S1G Operation element + * This structure represents the payload of the "S1G Operation + * element" as described in IEEE Std 802.11-2020 section 9.4.2.212. */ struct ieee80211_s1g_oper_ie { u8 ch_width; @@ -1086,9 +1158,13 @@ struct ieee80211_s1g_oper_ie { } __packed; /** - * struct ieee80211_aid_response_ie + * struct ieee80211_aid_response_ie - AID Response element + * @aid: AID/Group AID + * @switch_count: AID Switch Count + * @response_int: AID Response Interval * - * AID Response element + * This structure represents the payload of the "AID Response element" + * as described in IEEE Std 802.11-2020 section 9.4.2.194. */ struct ieee80211_aid_response_ie { __le16 aid; @@ -1489,7 +1565,7 @@ struct ieee80211_tdls_data { /* * Peer-to-Peer IE attribute related definitions. */ -/** +/* * enum ieee80211_p2p_attr_id - identifies type of peer-to-peer attribute. */ enum ieee80211_p2p_attr_id { @@ -1539,11 +1615,17 @@ struct ieee80211_p2p_noa_attr { #define IEEE80211_P2P_OPPPS_CTWINDOW_MASK 0x7F /** - * struct ieee80211_bar - HT Block Ack Request + * struct ieee80211_bar - Block Ack Request frame format + * @frame_control: Frame Control + * @duration: Duration + * @ra: RA + * @ta: TA + * @control: BAR Control + * @start_seq_num: Starting Sequence Number (see Figure 9-37) * - * This structure refers to "HT BlockAckReq" as - * described in 802.11n draft section 7.2.1.7.1 - */ + * This structure represents the "BlockAckReq frame format" + * as described in IEEE Std 802.11-2020 section 9.3.1.7. +*/ struct ieee80211_bar { __le16 frame_control; __le16 duration; @@ -1563,13 +1645,17 @@ struct ieee80211_bar { #define IEEE80211_HT_MCS_MASK_LEN 10 /** - * struct ieee80211_mcs_info - MCS information + * struct ieee80211_mcs_info - Supported MCS Set field * @rx_mask: RX mask * @rx_highest: highest supported RX rate. If set represents * the highest supported RX data rate in units of 1 Mbps. * If this field is 0 this value should not be used to * consider the highest RX data rate supported. * @tx_params: TX parameters + * @reserved: Reserved bits + * + * This structure represents the "Supported MCS Set field" as + * described in IEEE Std 802.11-2020 section 9.4.2.55.4. */ struct ieee80211_mcs_info { u8 rx_mask[IEEE80211_HT_MCS_MASK_LEN]; @@ -1600,10 +1686,16 @@ struct ieee80211_mcs_info { (IEEE80211_HT_MCS_UNEQUAL_MODULATION_START / 8) /** - * struct ieee80211_ht_cap - HT capabilities + * struct ieee80211_ht_cap - HT capabilities element + * @cap_info: HT Capability Information + * @ampdu_params_info: A-MPDU Parameters + * @mcs: Supported MCS Set + * @extended_ht_cap_info: HT Extended Capabilities + * @tx_BF_cap_info: Transmit Beamforming Capabilities + * @antenna_selection_info: ASEL Capability * - * This structure is the "HT capabilities element" as - * described in 802.11n D5.0 7.3.2.57 + * This structure represents the payload of the "HT Capabilities + * element" as described in IEEE Std 802.11-2020 section 9.4.2.55. */ struct ieee80211_ht_cap { __le16 cap_info; @@ -1691,9 +1783,14 @@ enum ieee80211_min_mpdu_spacing { /** * struct ieee80211_ht_operation - HT operation IE + * @primary_chan: Primary Channel + * @ht_param: HT Operation Information parameters + * @operation_mode: HT Operation Information operation mode + * @stbc_param: HT Operation Information STBC params + * @basic_set: Basic HT-MCS Set * - * This structure is the "HT operation element" as - * described in 802.11n-2009 7.3.2.57 + * This structure represents the payload of the "HT Operation + * element" as described in IEEE Std 802.11-2020 section 9.4.2.56. */ struct ieee80211_ht_operation { u8 primary_chan; @@ -1862,9 +1959,12 @@ struct ieee80211_vht_operation { /** * struct ieee80211_he_cap_elem - HE capabilities element + * @mac_cap_info: HE MAC Capabilities Information + * @phy_cap_info: HE PHY Capabilities Information * - * This structure is the "HE capabilities element" fixed fields as - * described in P802.11ax_D4.0 section 9.4.2.242.2 and 9.4.2.242.3 + * This structure represents the fixed fields of the payload of the + * "HE capabilities element" as described in IEEE Std 802.11ax-2021 + * sections 9.4.2.248.2 and 9.4.2.248.3. */ struct ieee80211_he_cap_elem { u8 mac_cap_info[6]; @@ -1923,35 +2023,45 @@ struct ieee80211_he_mcs_nss_supp { } __packed; /** - * struct ieee80211_he_operation - HE capabilities element + * struct ieee80211_he_operation - HE Operation element + * @he_oper_params: HE Operation Parameters + BSS Color Information + * @he_mcs_nss_set: Basic HE-MCS And NSS Set + * @optional: Optional fields VHT Operation Information, Max Co-Hosted + * BSSID Indicator, and 6 GHz Operation Information * - * This structure is the "HE operation element" fields as - * described in P802.11ax_D4.0 section 9.4.2.243 + * This structure represents the payload of the "HE Operation + * element" as described in IEEE Std 802.11ax-2021 section 9.4.2.249. */ struct ieee80211_he_operation { __le32 he_oper_params; __le16 he_mcs_nss_set; - /* Optional 0,1,3,4,5,7 or 8 bytes: depends on @he_oper_params */ u8 optional[]; } __packed; /** - * struct ieee80211_he_spr - HE spatial reuse element + * struct ieee80211_he_spr - Spatial Reuse Parameter Set element + * @he_sr_control: SR Control + * @optional: Optional fields Non-SRG OBSS PD Max Offset, SRG OBSS PD + * Min Offset, SRG OBSS PD Max Offset, SRG BSS Color + * Bitmap, and SRG Partial BSSID Bitmap * - * This structure is the "HE spatial reuse element" element as - * described in P802.11ax_D4.0 section 9.4.2.241 + * This structure represents the payload of the "Spatial Reuse + * Parameter Set element" as described in IEEE Std 802.11ax-2021 + * section 9.4.2.252. */ struct ieee80211_he_spr { u8 he_sr_control; - /* Optional 0 to 19 bytes: depends on @he_sr_control */ u8 optional[]; } __packed; /** * struct ieee80211_he_mu_edca_param_ac_rec - MU AC Parameter Record field + * @aifsn: ACI/AIFSN + * @ecw_min_max: ECWmin/ECWmax + * @mu_edca_timer: MU EDCA Timer * - * This structure is the "MU AC Parameter Record" fields as - * described in P802.11ax_D4.0 section 9.4.2.245 + * This structure represents the "MU AC Parameter Record" as described + * in IEEE Std 802.11ax-2021 section 9.4.2.251, Figure 9-788p. */ struct ieee80211_he_mu_edca_param_ac_rec { u8 aifsn; @@ -1961,9 +2071,14 @@ struct ieee80211_he_mu_edca_param_ac_rec { /** * struct ieee80211_mu_edca_param_set - MU EDCA Parameter Set element + * @mu_qos_info: QoS Info + * @ac_be: MU AC_BE Parameter Record + * @ac_bk: MU AC_BK Parameter Record + * @ac_vi: MU AC_VI Parameter Record + * @ac_vo: MU AC_VO Parameter Record * - * This structure is the "MU EDCA Parameter Set element" fields as - * described in P802.11ax_D4.0 section 9.4.2.245 + * This structure represents the payload of the "MU EDCA Parameter Set + * element" as described in IEEE Std 802.11ax-2021 section 9.4.2.251. */ struct ieee80211_mu_edca_param_set { u8 mu_qos_info; @@ -2177,9 +2292,9 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap, * enum ieee80211_ap_reg_power - regulatory power for a Access Point * * @IEEE80211_REG_UNSET_AP: Access Point has no regulatory power mode - * @IEEE80211_REG_LPI: Indoor Access Point - * @IEEE80211_REG_SP: Standard power Access Point - * @IEEE80211_REG_VLP: Very low power Access Point + * @IEEE80211_REG_LPI_AP: Indoor Access Point + * @IEEE80211_REG_SP_AP: Standard power Access Point + * @IEEE80211_REG_VLP_AP: Very low power Access Point * @IEEE80211_REG_AP_POWER_AFTER_LAST: internal * @IEEE80211_REG_AP_POWER_MAX: maximum value */ @@ -2567,7 +2682,7 @@ static inline bool ieee80211_he_capa_size_ok(const u8 *data, u8 len) #define IEEE80211_6GHZ_CTRL_REG_SP_AP 1 /** - * ieee80211_he_6ghz_oper - HE 6 GHz operation Information field + * struct ieee80211_he_6ghz_oper - HE 6 GHz operation Information field * @primary: primary channel * @control: control flags * @ccfs0: channel center frequency segment 0 @@ -2614,9 +2729,13 @@ enum ieee80211_tx_power_intrpt_type { }; /** - * struct ieee80211_tx_pwr_env + * struct ieee80211_tx_pwr_env - Transmit Power Envelope + * @tx_power_info: Transmit Power Information field + * @tx_power: Maximum Transmit Power field * - * This structure represents the "Transmit Power Envelope element" + * This structure represents the payload of the "Transmit Power + * Envelope element" as described in IEEE Std 802.11ax-2021 section + * 9.4.2.161 */ struct ieee80211_tx_pwr_env { u8 tx_power_info; @@ -4478,7 +4597,7 @@ static inline bool for_each_element_completed(const struct element *element, return (const u8 *)element == (const u8 *)data + datalen; } -/** +/* * RSNX Capabilities: * bits 0-3: Field length (n-1) */ @@ -4721,7 +4840,7 @@ ieee80211_mle_get_bss_param_ch_cnt(const struct ieee80211_multi_link_elem *mle) } /** - * ieee80211_mle_get_eml_sync_delay - returns the medium sync delay + * ieee80211_mle_get_eml_med_sync_delay - returns the medium sync delay * @data: pointer to the multi link EHT IE * * The element is assumed to be of the correct type (BASIC) and big enough, diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 93399802ba77..4d5be378fa8c 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -364,6 +364,8 @@ enum mlx5_event { enum mlx5_driver_event { MLX5_DRIVER_EVENT_TYPE_TRAP = 0, MLX5_DRIVER_EVENT_UPLINK_NETDEV, + MLX5_DRIVER_EVENT_MACSEC_SA_ADDED, + MLX5_DRIVER_EVENT_MACSEC_SA_DELETED, }; enum { diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index c9d82e74daaa..e95f10066eac 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -727,7 +727,6 @@ struct mlx5_fw_tracer; struct mlx5_vxlan; struct mlx5_geneve; struct mlx5_hv_vhca; -struct mlx5_thermal; #define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity)) #define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)) @@ -809,6 +808,11 @@ struct mlx5_core_dev { struct mlx5_hwmon *hwmon; u64 num_block_tc; u64 num_block_ipsec; +#ifdef CONFIG_MLX5_MACSEC + struct mlx5_macsec_fs *macsec_fs; + /* MACsec notifier chain to sync MACsec core and IB database */ + struct blocking_notifier_head macsec_nh; +#endif }; struct mlx5_db { @@ -1322,6 +1326,52 @@ static inline bool mlx5_get_roce_state(struct mlx5_core_dev *dev) return mlx5_is_roce_on(dev); } +#ifdef CONFIG_MLX5_MACSEC +static inline bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev) +{ + if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) & + MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD)) + return false; + + if (!MLX5_CAP_GEN(mdev, log_max_dek)) + return false; + + if (!MLX5_CAP_MACSEC(mdev, log_max_macsec_offload)) + return false; + + if (!MLX5_CAP_FLOWTABLE_NIC_RX(mdev, macsec_decrypt) || + !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, reformat_remove_macsec)) + return false; + + if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, macsec_encrypt) || + !MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_macsec)) + return false; + + if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_encrypt) && + !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_encrypt)) + return false; + + if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_decrypt) && + !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_decrypt)) + return false; + + return true; +} + +#define NIC_RDMA_BOTH_DIRS_CAPS (MLX5_FT_NIC_RX_2_NIC_RX_RDMA | MLX5_FT_NIC_TX_RDMA_2_NIC_TX) + +static inline bool mlx5_is_macsec_roce_supported(struct mlx5_core_dev *mdev) +{ + if (((MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & + NIC_RDMA_BOTH_DIRS_CAPS) != NIC_RDMA_BOTH_DIRS_CAPS) || + !MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, max_modify_header_actions) || + !mlx5e_is_macsec_device(mdev) || !mdev->macsec_fs) + return false; + + return true; +} +#endif + enum { MLX5_OCTWORD = 16, }; diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index c302ec34255b..1e00c2436377 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -105,6 +105,8 @@ enum mlx5_flow_namespace_type { MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS, MLX5_FLOW_NAMESPACE_RDMA_RX_IPSEC, MLX5_FLOW_NAMESPACE_RDMA_TX_IPSEC, + MLX5_FLOW_NAMESPACE_RDMA_RX_MACSEC, + MLX5_FLOW_NAMESPACE_RDMA_TX_MACSEC, }; enum { diff --git a/include/linux/mlx5/macsec.h b/include/linux/mlx5/macsec.h new file mode 100644 index 000000000000..f7ff4c2a95d0 --- /dev/null +++ b/include/linux/mlx5/macsec.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. */ + +#ifndef MLX5_MACSEC_H +#define MLX5_MACSEC_H + +#ifdef CONFIG_MLX5_MACSEC +struct mlx5_macsec_event_data { + struct mlx5_macsec_fs *macsec_fs; + void *macdev; + u32 fs_id; + bool is_tx; +}; + +int mlx5_macsec_add_roce_rule(void *macdev, const struct sockaddr *addr, u16 gid_idx, + struct list_head *tx_rules_list, struct list_head *rx_rules_list, + struct mlx5_macsec_fs *macsec_fs); + +void mlx5_macsec_del_roce_rule(u16 gid_idx, struct mlx5_macsec_fs *macsec_fs, + struct list_head *tx_rules_list, struct list_head *rx_rules_list); + +void mlx5_macsec_add_roce_sa_rules(u32 fs_id, const struct sockaddr *addr, u16 gid_idx, + struct list_head *tx_rules_list, + struct list_head *rx_rules_list, + struct mlx5_macsec_fs *macsec_fs, bool is_tx); + +void mlx5_macsec_del_roce_sa_rules(u32 fs_id, struct mlx5_macsec_fs *macsec_fs, + struct list_head *tx_rules_list, + struct list_head *rx_rules_list, bool is_tx); + +#endif +#endif /* MLX5_MACSEC_H */ diff --git a/include/linux/pcs/pcs-xpcs.h b/include/linux/pcs/pcs-xpcs.h index ff99cf7a5d0d..da3a6c30f6d2 100644 --- a/include/linux/pcs/pcs-xpcs.h +++ b/include/linux/pcs/pcs-xpcs.h @@ -20,12 +20,20 @@ #define DW_AN_C37_1000BASEX 4 #define DW_10GBASER 5 +/* device vendor OUI */ +#define DW_OUI_WX 0x0018fc80 + +/* dev_flag */ +#define DW_DEV_TXGBE BIT(0) + struct xpcs_id; struct dw_xpcs { struct mdio_device *mdiodev; const struct xpcs_id *id; struct phylink_pcs pcs; + phy_interface_t interface; + int dev_flag; }; int xpcs_get_an_mode(struct dw_xpcs *xpcs, phy_interface_t interface); diff --git a/include/linux/phylink.h b/include/linux/phylink.h index 789c516c6b4a..7d07f8736431 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -223,6 +223,8 @@ struct phylink_config { unsigned long mac_capabilities; }; +void phylink_limit_mac_speed(struct phylink_config *config, u32 max_speed); + /** * struct phylink_mac_ops - MAC operations structure. * @validate: Validate and update the link configuration. diff --git a/include/linux/platform_data/hirschmann-hellcreek.h b/include/linux/platform_data/hirschmann-hellcreek.h index 6a000df5541f..8748680e9e3c 100644 --- a/include/linux/platform_data/hirschmann-hellcreek.h +++ b/include/linux/platform_data/hirschmann-hellcreek.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: (GPL-2.0 or MIT) */ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ /* * Hirschmann Hellcreek TSN switch platform data. * diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 784277d666eb..b2ccd827bb80 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -227,7 +227,7 @@ struct plat_stmmacenet_data { phy_interface_t phy_interface; struct stmmac_mdio_bus_data *mdio_bus_data; struct device_node *phy_node; - struct device_node *phylink_node; + struct fwnode_handle *port_node; struct device_node *mdio_node; struct stmmac_dma_cfg *dma_cfg; struct stmmac_est *est; diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index e66d04dbe56a..5b85cf18c350 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -752,6 +752,7 @@ int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, u32 *fd_type, const char **buf, u64 *probe_offset, u64 *probe_addr); int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); +int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); #else static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx) { @@ -798,6 +799,11 @@ bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) { return -EOPNOTSUPP; } +static inline int +bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) +{ + return -EOPNOTSUPP; +} #endif enum { diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 5723405b833e..87d92accc26e 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -319,6 +319,16 @@ enum { * This quirk must be set before hci_register_dev is called. */ HCI_QUIRK_USE_MSFT_EXT_ADDRESS_FILTER, + + /* + * When this quirk is set, LE Coded PHY shall not be used. This is + * required for some Intel controllers which erroneously claim to + * support it but it causes problems with extended scanning. + * + * This quirk can be set before hci_register_dev is called or + * during the hdev->setup vendor callback. + */ + HCI_QUIRK_BROKEN_LE_CODED, }; /* HCI device flags */ @@ -2771,6 +2781,17 @@ struct hci_ev_le_enh_conn_complete { __u8 clk_accurancy; } __packed; +#define HCI_EV_LE_PER_ADV_REPORT 0x0f +struct hci_ev_le_per_adv_report { + __le16 sync_handle; + __u8 tx_power; + __u8 rssi; + __u8 cte_type; + __u8 data_status; + __u8 length; + __u8 data[]; +} __packed; + #define HCI_EV_LE_EXT_ADV_SET_TERM 0x12 struct hci_evt_le_ext_adv_set_term { __u8 status; diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index c53d74236e3a..e6359f7346f1 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -83,7 +83,7 @@ struct discovery_state { u8 last_adv_addr_type; s8 last_adv_rssi; u32 last_adv_flags; - u8 last_adv_data[HCI_MAX_AD_LENGTH]; + u8 last_adv_data[HCI_MAX_EXT_AD_LENGTH]; u8 last_adv_data_len; bool report_invalid_rssi; bool result_filtering; @@ -290,7 +290,7 @@ struct adv_pattern { __u8 ad_type; __u8 offset; __u8 length; - __u8 value[HCI_MAX_AD_LENGTH]; + __u8 value[HCI_MAX_EXT_AD_LENGTH]; }; struct adv_rssi_thresholds { @@ -726,7 +726,7 @@ struct hci_conn { __u16 le_conn_interval; __u16 le_conn_latency; __u16 le_supv_timeout; - __u8 le_adv_data[HCI_MAX_AD_LENGTH]; + __u8 le_adv_data[HCI_MAX_EXT_AD_LENGTH]; __u8 le_adv_data_len; __u8 le_per_adv_data[HCI_MAX_PER_AD_LENGTH]; __u8 le_per_adv_data_len; @@ -978,6 +978,8 @@ enum { HCI_CONN_CREATE_CIS, HCI_CONN_BIG_SYNC, HCI_CONN_BIG_SYNC_FAILED, + HCI_CONN_PA_SYNC, + HCI_CONN_PA_SYNC_FAILED, }; static inline bool hci_conn_ssp_enabled(struct hci_conn *conn) @@ -1300,7 +1302,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_big_any_dst(struct hci_dev * if (c->type != ISO_LINK) continue; - if (handle == c->iso_qos.bcast.big) { + if (handle != BT_ISO_QOS_BIG_UNSET && handle == c->iso_qos.bcast.big) { rcu_read_unlock(); return c; } @@ -1311,6 +1313,29 @@ static inline struct hci_conn *hci_conn_hash_lookup_big_any_dst(struct hci_dev * return NULL; } +static inline struct hci_conn * +hci_conn_hash_lookup_pa_sync(struct hci_dev *hdev, __u8 big) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + struct hci_conn *c; + + rcu_read_lock(); + + list_for_each_entry_rcu(c, &h->list, list) { + if (c->type != ISO_LINK || + !test_bit(HCI_CONN_PA_SYNC, &c->flags)) + continue; + + if (c->iso_qos.bcast.big == big) { + rcu_read_unlock(); + return c; + } + } + rcu_read_unlock(); + + return NULL; +} + static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev, __u8 type, __u16 state) { @@ -1435,7 +1460,8 @@ struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst, __u8 data_len, __u8 *data); int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type, __u8 sid, struct bt_iso_qos *qos); -int hci_le_big_create_sync(struct hci_dev *hdev, struct bt_iso_qos *qos, +int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon, + struct bt_iso_qos *qos, __u16 sync_handle, __u8 num_bis, __u8 bis[]); int hci_conn_check_link_mode(struct hci_conn *conn); int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level); @@ -1791,7 +1817,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn); #define scan_2m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_2M) || \ ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_2M)) -#define le_coded_capable(dev) (((dev)->le_features[1] & HCI_LE_PHY_CODED)) +#define le_coded_capable(dev) (((dev)->le_features[1] & HCI_LE_PHY_CODED) && \ + !test_bit(HCI_QUIRK_BROKEN_LE_CODED, \ + &(dev)->quirks)) #define scan_coded(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_CODED) || \ ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_CODED)) diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index d6fa7c8767ad..3a4b684f89bf 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -263,7 +263,7 @@ enum ieee80211_privacy { * are only for driver use when pointers to this structure are * passed around. * - * @flags: rate-specific flags + * @flags: rate-specific flags from &enum ieee80211_rate_flags * @bitrate: bitrate in units of 100 Kbps * @hw_value: driver/hardware value for this rate * @hw_value_short: driver/hardware value for this rate when @@ -811,7 +811,7 @@ struct cfg80211_tid_cfg { struct cfg80211_tid_config { const u8 *peer; u32 n_tid_conf; - struct cfg80211_tid_cfg tid_conf[]; + struct cfg80211_tid_cfg tid_conf[] __counted_by(n_tid_conf); }; /** @@ -1187,7 +1187,7 @@ struct cfg80211_mbssid_elems { struct { const u8 *data; size_t len; - } elem[]; + } elem[] __counted_by(cnt); }; /** @@ -1204,7 +1204,7 @@ struct cfg80211_rnr_elems { struct { const u8 *data; size_t len; - } elem[]; + } elem[] __counted_by(cnt); }; /** @@ -1282,7 +1282,7 @@ struct cfg80211_acl_data { int n_acl_entries; /* Keep it last */ - struct mac_address mac_addrs[]; + struct mac_address mac_addrs[] __counted_by(n_acl_entries); }; /** @@ -1353,7 +1353,7 @@ struct cfg80211_unsol_bcast_probe_resp { * @twt_responder: Enable Target Wait Time * @he_required: stations must support HE * @sae_h2e_required: stations must support direct H2E technique in SAE - * @flags: flags, as defined in enum cfg80211_ap_settings_flags + * @flags: flags, as defined in &enum nl80211_ap_settings_flags * @he_obss_pd: OBSS Packet Detection settings * @he_oper: HE operation IE (or %NULL if HE isn't enabled) * @fils_discovery: FILS discovery transmission parameters @@ -1482,7 +1482,6 @@ struct iface_combination_params { * @STATION_PARAM_APPLY_UAPSD: apply new uAPSD parameters (uapsd_queues, max_sp) * @STATION_PARAM_APPLY_CAPABILITY: apply new capability * @STATION_PARAM_APPLY_PLINK_STATE: apply new plink state - * @STATION_PARAM_APPLY_STA_TXPOWER: apply tx power for STA * * Not all station parameters have in-band "no change" signalling, * for those that don't these flags will are used. @@ -2156,7 +2155,7 @@ enum mpath_info_flags { * @sn: target sequence number * @metric: metric (cost) of this mesh path * @exptime: expiration time for the mesh path from now, in msecs - * @flags: mesh path flags + * @flags: mesh path flags from &enum mesh_path_flags * @discovery_timeout: total mesh path discovery timeout, in msecs * @discovery_retries: mesh path discovery retries * @generation: generation number for nl80211 dumps. @@ -2496,7 +2495,7 @@ struct cfg80211_scan_6ghz_params { * the actual dwell time may be shorter. * @duration_mandatory: if set, the scan duration must be as specified by the * %duration field. - * @flags: bit field of flags controlling operation + * @flags: control flags from &enum nl80211_scan_flags * @rates: bitmap of rates to advertise for each band * @wiphy: the wiphy this was for * @scan_start: time (in jiffies) when the scan started @@ -2544,7 +2543,7 @@ struct cfg80211_scan_request { struct cfg80211_scan_6ghz_params *scan_6ghz_params; /* keep last */ - struct ieee80211_channel *channels[]; + struct ieee80211_channel *channels[] __counted_by(n_channels); }; static inline void get_random_mask_addr(u8 *buf, const u8 *addr, const u8 *mask) @@ -2616,7 +2615,7 @@ struct cfg80211_bss_select_adjust { * @scan_width: channel width for scanning * @ie: optional information element(s) to add into Probe Request or %NULL * @ie_len: length of ie in octets - * @flags: bit field of flags controlling operation + * @flags: control flags from &enum nl80211_scan_flags * @match_sets: sets of parameters to be matched for a scan result * entry to be considered valid and to be passed to the host * (others are filtered out). @@ -3948,7 +3947,7 @@ struct cfg80211_pmsr_request { struct list_head list; - struct cfg80211_pmsr_request_peer peers[]; + struct cfg80211_pmsr_request_peer peers[] __counted_by(n_peers); }; /** @@ -8118,7 +8117,7 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr, * @link_id: the ID of the link the frame was received on * @buf: Management frame (header + body) * @len: length of the frame data - * @flags: flags, as defined in enum nl80211_rxmgmt_flags + * @flags: flags, as defined in &enum nl80211_rxmgmt_flags * @rx_tstamp: Hardware timestamp of frame RX in nanoseconds * @ack_tstamp: Hardware timestamp of ack TX in nanoseconds */ diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h index c4722a9963de..2338f8d2a8b3 100644 --- a/include/net/ieee80211_radiotap.h +++ b/include/net/ieee80211_radiotap.h @@ -21,7 +21,7 @@ #include <asm/unaligned.h> /** - * struct ieee82011_radiotap_header - base radiotap header + * struct ieee80211_radiotap_header - base radiotap header */ struct ieee80211_radiotap_header { /** @@ -575,6 +575,7 @@ enum ieee80211_radiotap_eht_usig_tb { /** * ieee80211_get_radiotap_len - get radiotap header length + * @data: pointer to the header */ static inline u16 ieee80211_get_radiotap_len(const char *data) { diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h index d2ea5863eedc..b2cf243ebe44 100644 --- a/include/net/iw_handler.h +++ b/include/net/iw_handler.h @@ -426,17 +426,10 @@ struct iw_public_data { /**************************** PROTOTYPES ****************************/ /* - * Functions part of the Wireless Extensions (defined in net/core/wireless.c). - * Those may be called only within the kernel. + * Functions part of the Wireless Extensions (defined in net/wireless/wext-core.c). + * Those may be called by driver modules. */ -/* First : function strictly used inside the kernel */ - -/* Handle /proc/net/wireless, called in net/code/dev.c */ -int dev_get_wireless_info(char *buffer, char **start, off_t offset, int length); - -/* Second : functions that may be called by driver modules */ - /* Send a single event to user space */ void wireless_send_event(struct net_device *dev, unsigned int cmd, union iwreq_data *wrqu, const char *extra); diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h index 6f15e6fa154e..53bd2d02a4f0 100644 --- a/include/net/lwtunnel.h +++ b/include/net/lwtunnel.h @@ -16,9 +16,12 @@ #define LWTUNNEL_STATE_INPUT_REDIRECT BIT(1) #define LWTUNNEL_STATE_XMIT_REDIRECT BIT(2) +/* LWTUNNEL_XMIT_CONTINUE should be distinguishable from dst_output return + * values (NET_XMIT_xxx and NETDEV_TX_xxx in linux/netdevice.h) for safety. + */ enum { LWTUNNEL_XMIT_DONE, - LWTUNNEL_XMIT_CONTINUE, + LWTUNNEL_XMIT_CONTINUE = 0x100, }; diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 2a55ae932c56..7c707358d15c 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1192,9 +1192,11 @@ struct ieee80211_tx_info { u8 ampdu_ack_len; u8 ampdu_len; u8 antenna; + u8 pad; u16 tx_time; u8 flags; - void *status_driver_data[18 / sizeof(void *)]; + u8 pad2; + void *status_driver_data[16 / sizeof(void *)]; } status; struct { struct ieee80211_tx_rate driver_rates[ @@ -2259,6 +2261,7 @@ struct ieee80211_sta_aggregates { * @he_cap: HE capabilities of this STA * @he_6ghz_capa: on 6 GHz, holds the HE 6 GHz band capabilities * @eht_cap: EHT capabilities of this STA + * @agg: per-link data for multi-link aggregation * @bandwidth: current bandwidth the station can receive with * @rx_nss: in HT/VHT, the maximum number of spatial streams the * station can receive at the moment, changed by operating mode diff --git a/include/net/macsec.h b/include/net/macsec.h index 441ed8fd4b5f..75a6f4863c83 100644 --- a/include/net/macsec.h +++ b/include/net/macsec.h @@ -312,6 +312,8 @@ static inline bool macsec_send_sci(const struct macsec_secy *secy) return tx_sc->send_sci || (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb); } +struct net_device *macsec_get_real_dev(const struct net_device *dev); +bool macsec_netdev_is_offloaded(struct net_device *dev); static inline void *macsec_netdev_priv(const struct net_device *dev) { diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index d21deb46f49f..8790b3962e4b 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1039,6 +1039,7 @@ enum bpf_attach_type { BPF_NETFILTER, BPF_TCX_INGRESS, BPF_TCX_EGRESS, + BPF_TRACE_UPROBE_MULTI, __MAX_BPF_ATTACH_TYPE }; @@ -1057,6 +1058,7 @@ enum bpf_link_type { BPF_LINK_TYPE_STRUCT_OPS = 9, BPF_LINK_TYPE_NETFILTER = 10, BPF_LINK_TYPE_TCX = 11, + BPF_LINK_TYPE_UPROBE_MULTI = 12, MAX_BPF_LINK_TYPE, }; @@ -1186,7 +1188,16 @@ enum bpf_perf_event_type { /* link_create.kprobe_multi.flags used in LINK_CREATE command for * BPF_TRACE_KPROBE_MULTI attach type to create return probe. */ -#define BPF_F_KPROBE_MULTI_RETURN (1U << 0) +enum { + BPF_F_KPROBE_MULTI_RETURN = (1U << 0) +}; + +/* link_create.uprobe_multi.flags used in LINK_CREATE command for + * BPF_TRACE_UPROBE_MULTI attach type to create return probe. + */ +enum { + BPF_F_UPROBE_MULTI_RETURN = (1U << 0) +}; /* link_create.netfilter.flags used in LINK_CREATE command for * BPF_PROG_TYPE_NETFILTER to enable IP packet defragmentation. @@ -1624,6 +1635,15 @@ union bpf_attr { }; __u64 expected_revision; } tcx; + struct { + __aligned_u64 path; + __aligned_u64 offsets; + __aligned_u64 ref_ctr_offsets; + __aligned_u64 cookies; + __u32 cnt; + __u32 flags; + __u32 pid; + } uprobe_multi; }; } link_create; diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index ef28c64f1eb1..e42a1bdb7f53 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -68,11 +68,8 @@ struct bpf_cpu_map_entry { struct bpf_cpumap_val value; struct bpf_prog *prog; - atomic_t refcnt; /* Control when this struct can be free'ed */ - struct rcu_head rcu; - - struct work_struct kthread_stop_wq; struct completion kthread_running; + struct rcu_work free_work; }; struct bpf_cpu_map { @@ -117,11 +114,6 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr) return &cmap->map; } -static void get_cpu_map_entry(struct bpf_cpu_map_entry *rcpu) -{ - atomic_inc(&rcpu->refcnt); -} - static void __cpu_map_ring_cleanup(struct ptr_ring *ring) { /* The tear-down procedure should have made sure that queue is @@ -142,35 +134,6 @@ static void __cpu_map_ring_cleanup(struct ptr_ring *ring) } } -static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu) -{ - if (atomic_dec_and_test(&rcpu->refcnt)) { - if (rcpu->prog) - bpf_prog_put(rcpu->prog); - /* The queue should be empty at this point */ - __cpu_map_ring_cleanup(rcpu->queue); - ptr_ring_cleanup(rcpu->queue, NULL); - kfree(rcpu->queue); - kfree(rcpu); - } -} - -/* called from workqueue, to workaround syscall using preempt_disable */ -static void cpu_map_kthread_stop(struct work_struct *work) -{ - struct bpf_cpu_map_entry *rcpu; - - rcpu = container_of(work, struct bpf_cpu_map_entry, kthread_stop_wq); - - /* Wait for flush in __cpu_map_entry_free(), via full RCU barrier, - * as it waits until all in-flight call_rcu() callbacks complete. - */ - rcu_barrier(); - - /* kthread_stop will wake_up_process and wait for it to complete */ - kthread_stop(rcpu->kthread); -} - static void cpu_map_bpf_prog_run_skb(struct bpf_cpu_map_entry *rcpu, struct list_head *listp, struct xdp_cpumap_stats *stats) @@ -395,7 +358,6 @@ static int cpu_map_kthread_run(void *data) } __set_current_state(TASK_RUNNING); - put_cpu_map_entry(rcpu); return 0; } @@ -472,9 +434,6 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value, if (IS_ERR(rcpu->kthread)) goto free_prog; - get_cpu_map_entry(rcpu); /* 1-refcnt for being in cmap->cpu_map[] */ - get_cpu_map_entry(rcpu); /* 1-refcnt for kthread */ - /* Make sure kthread runs on a single CPU */ kthread_bind(rcpu->kthread, cpu); wake_up_process(rcpu->kthread); @@ -501,40 +460,40 @@ free_rcu: return NULL; } -static void __cpu_map_entry_free(struct rcu_head *rcu) +static void __cpu_map_entry_free(struct work_struct *work) { struct bpf_cpu_map_entry *rcpu; /* This cpu_map_entry have been disconnected from map and one - * RCU grace-period have elapsed. Thus, XDP cannot queue any + * RCU grace-period have elapsed. Thus, XDP cannot queue any * new packets and cannot change/set flush_needed that can * find this entry. */ - rcpu = container_of(rcu, struct bpf_cpu_map_entry, rcu); + rcpu = container_of(to_rcu_work(work), struct bpf_cpu_map_entry, free_work); + /* kthread_stop will wake_up_process and wait for it to complete. + * cpu_map_kthread_run() makes sure the pointer ring is empty + * before exiting. + */ + kthread_stop(rcpu->kthread); + + if (rcpu->prog) + bpf_prog_put(rcpu->prog); + /* The queue should be empty at this point */ + __cpu_map_ring_cleanup(rcpu->queue); + ptr_ring_cleanup(rcpu->queue, NULL); + kfree(rcpu->queue); free_percpu(rcpu->bulkq); - /* Cannot kthread_stop() here, last put free rcpu resources */ - put_cpu_map_entry(rcpu); + kfree(rcpu); } -/* After xchg pointer to bpf_cpu_map_entry, use the call_rcu() to - * ensure any driver rcu critical sections have completed, but this - * does not guarantee a flush has happened yet. Because driver side - * rcu_read_lock/unlock only protects the running XDP program. The - * atomic xchg and NULL-ptr check in __cpu_map_flush() makes sure a - * pending flush op doesn't fail. - * - * The bpf_cpu_map_entry is still used by the kthread, and there can - * still be pending packets (in queue and percpu bulkq). A refcnt - * makes sure to last user (kthread_stop vs. call_rcu) free memory - * resources. - * - * The rcu callback __cpu_map_entry_free flush remaining packets in - * percpu bulkq to queue. Due to caller map_delete_elem() disable - * preemption, cannot call kthread_stop() to make sure queue is empty. - * Instead a work_queue is started for stopping kthread, - * cpu_map_kthread_stop, which waits for an RCU grace period before - * stopping kthread, emptying the queue. +/* After the xchg of the bpf_cpu_map_entry pointer, we need to make sure the old + * entry is no longer in use before freeing. We use queue_rcu_work() to call + * __cpu_map_entry_free() in a separate workqueue after waiting for an RCU grace + * period. This means that (a) all pending enqueue and flush operations have + * completed (because of the RCU callback), and (b) we are in a workqueue + * context where we can stop the kthread and wait for it to exit before freeing + * everything. */ static void __cpu_map_entry_replace(struct bpf_cpu_map *cmap, u32 key_cpu, struct bpf_cpu_map_entry *rcpu) @@ -543,9 +502,8 @@ static void __cpu_map_entry_replace(struct bpf_cpu_map *cmap, old_rcpu = unrcu_pointer(xchg(&cmap->cpu_map[key_cpu], RCU_INITIALIZER(rcpu))); if (old_rcpu) { - call_rcu(&old_rcpu->rcu, __cpu_map_entry_free); - INIT_WORK(&old_rcpu->kthread_stop_wq, cpu_map_kthread_stop); - schedule_work(&old_rcpu->kthread_stop_wq); + INIT_RCU_WORK(&old_rcpu->free_work, __cpu_map_entry_free); + queue_rcu_work(system_wq, &old_rcpu->free_work); } } @@ -557,7 +515,7 @@ static long cpu_map_delete_elem(struct bpf_map *map, void *key) if (key_cpu >= map->max_entries) return -EINVAL; - /* notice caller map_delete_elem() use preempt_disable() */ + /* notice caller map_delete_elem() uses rcu_read_lock() */ __cpu_map_entry_replace(cmap, key_cpu, NULL); return 0; } @@ -608,16 +566,15 @@ static void cpu_map_free(struct bpf_map *map) /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, * so the bpf programs (can be more than one that used this map) were * disconnected from events. Wait for outstanding critical sections in - * these programs to complete. The rcu critical section only guarantees - * no further "XDP/bpf-side" reads against bpf_cpu_map->cpu_map. - * It does __not__ ensure pending flush operations (if any) are - * complete. + * these programs to complete. synchronize_rcu() below not only + * guarantees no further "XDP/bpf-side" reads against + * bpf_cpu_map->cpu_map, but also ensure pending flush operations + * (if any) are completed. */ - synchronize_rcu(); - /* For cpu_map the remote CPUs can still be using the entries - * (struct bpf_cpu_map_entry). + /* The only possible user of bpf_cpu_map_entry is + * cpu_map_kthread_run(). */ for (i = 0; i < cmap->map.max_entries; i++) { struct bpf_cpu_map_entry *rcpu; @@ -626,8 +583,8 @@ static void cpu_map_free(struct bpf_map *map) if (!rcpu) continue; - /* bq flush and cleanup happens after RCU grace-period */ - __cpu_map_entry_replace(cmap, i, NULL); /* call_rcu */ + /* Stop kthread and cleanup entry directly */ + __cpu_map_entry_free(&rcpu->free_work.work); } bpf_map_area_free(cmap->cpu_map); bpf_map_area_free(cmap); diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index eb91cae0612a..8bd3812fb8df 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -286,6 +286,7 @@ static inline void __bpf_spin_lock(struct bpf_spin_lock *lock) compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0"); BUILD_BUG_ON(sizeof(*l) != sizeof(__u32)); BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32)); + preempt_disable(); arch_spin_lock(l); } @@ -294,6 +295,7 @@ static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock) arch_spinlock_t *l = (void *)lock; arch_spin_unlock(l); + preempt_enable(); } #else @@ -1913,7 +1915,11 @@ void __bpf_obj_drop_impl(void *p, const struct btf_record *rec) if (rec) bpf_obj_free_fields(rec, p); - bpf_mem_free(&bpf_global_ma, p); + + if (rec && rec->refcount_off >= 0) + bpf_mem_free_rcu(&bpf_global_ma, p); + else + bpf_mem_free(&bpf_global_ma, p); } __bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign) diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index cb658543bdb4..ebeb0695305a 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -657,7 +657,6 @@ void bpf_obj_free_fields(const struct btf_record *rec, void *obj) if (!btf_is_kernel(field->kptr.btf)) { pointee_struct_meta = btf_find_struct_meta(field->kptr.btf, field->kptr.btf_id); - WARN_ON_ONCE(!pointee_struct_meta); migrate_disable(); __bpf_obj_drop_impl(xchgd_field, pointee_struct_meta ? pointee_struct_meta->record : @@ -2815,10 +2814,12 @@ static void bpf_link_free_id(int id) /* Clean up bpf_link and corresponding anon_inode file and FD. After * anon_inode is created, bpf_link can't be just kfree()'d due to deferred - * anon_inode's release() call. This helper marksbpf_link as + * anon_inode's release() call. This helper marks bpf_link as * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt * is not decremented, it's the responsibility of a calling code that failed * to complete bpf_link initialization. + * This helper eventually calls link's dealloc callback, but does not call + * link's release callback. */ void bpf_link_cleanup(struct bpf_link_primer *primer) { @@ -3655,34 +3656,6 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr) return fd; } -static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, - enum bpf_attach_type attach_type) -{ - switch (prog->type) { - case BPF_PROG_TYPE_CGROUP_SOCK: - case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: - case BPF_PROG_TYPE_CGROUP_SOCKOPT: - case BPF_PROG_TYPE_SK_LOOKUP: - return attach_type == prog->expected_attach_type ? 0 : -EINVAL; - case BPF_PROG_TYPE_CGROUP_SKB: - if (!capable(CAP_NET_ADMIN)) - /* cg-skb progs can be loaded by unpriv user. - * check permissions at attach time. - */ - return -EPERM; - return prog->enforce_expected_attach_type && - prog->expected_attach_type != attach_type ? - -EINVAL : 0; - case BPF_PROG_TYPE_KPROBE: - if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI && - attach_type != BPF_TRACE_KPROBE_MULTI) - return -EINVAL; - return 0; - default: - return 0; - } -} - static enum bpf_prog_type attach_type_to_prog_type(enum bpf_attach_type attach_type) { @@ -3749,6 +3722,62 @@ attach_type_to_prog_type(enum bpf_attach_type attach_type) } } +static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, + enum bpf_attach_type attach_type) +{ + enum bpf_prog_type ptype; + + switch (prog->type) { + case BPF_PROG_TYPE_CGROUP_SOCK: + case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: + case BPF_PROG_TYPE_CGROUP_SOCKOPT: + case BPF_PROG_TYPE_SK_LOOKUP: + return attach_type == prog->expected_attach_type ? 0 : -EINVAL; + case BPF_PROG_TYPE_CGROUP_SKB: + if (!capable(CAP_NET_ADMIN)) + /* cg-skb progs can be loaded by unpriv user. + * check permissions at attach time. + */ + return -EPERM; + return prog->enforce_expected_attach_type && + prog->expected_attach_type != attach_type ? + -EINVAL : 0; + case BPF_PROG_TYPE_EXT: + return 0; + case BPF_PROG_TYPE_NETFILTER: + if (attach_type != BPF_NETFILTER) + return -EINVAL; + return 0; + case BPF_PROG_TYPE_PERF_EVENT: + case BPF_PROG_TYPE_TRACEPOINT: + if (attach_type != BPF_PERF_EVENT) + return -EINVAL; + return 0; + case BPF_PROG_TYPE_KPROBE: + if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI && + attach_type != BPF_TRACE_KPROBE_MULTI) + return -EINVAL; + if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI && + attach_type != BPF_TRACE_UPROBE_MULTI) + return -EINVAL; + if (attach_type != BPF_PERF_EVENT && + attach_type != BPF_TRACE_KPROBE_MULTI && + attach_type != BPF_TRACE_UPROBE_MULTI) + return -EINVAL; + return 0; + case BPF_PROG_TYPE_SCHED_CLS: + if (attach_type != BPF_TCX_INGRESS && + attach_type != BPF_TCX_EGRESS) + return -EINVAL; + return 0; + default: + ptype = attach_type_to_prog_type(attach_type); + if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type) + return -EINVAL; + return 0; + } +} + #define BPF_PROG_ATTACH_LAST_FIELD expected_revision #define BPF_F_ATTACH_MASK_BASE \ @@ -4852,10 +4881,9 @@ err_put: return err; } -#define BPF_LINK_CREATE_LAST_FIELD link_create.kprobe_multi.cookies +#define BPF_LINK_CREATE_LAST_FIELD link_create.uprobe_multi.pid static int link_create(union bpf_attr *attr, bpfptr_t uattr) { - enum bpf_prog_type ptype; struct bpf_prog *prog; int ret; @@ -4875,45 +4903,6 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr) goto out; switch (prog->type) { - case BPF_PROG_TYPE_EXT: - break; - case BPF_PROG_TYPE_NETFILTER: - if (attr->link_create.attach_type != BPF_NETFILTER) { - ret = -EINVAL; - goto out; - } - break; - case BPF_PROG_TYPE_PERF_EVENT: - case BPF_PROG_TYPE_TRACEPOINT: - if (attr->link_create.attach_type != BPF_PERF_EVENT) { - ret = -EINVAL; - goto out; - } - break; - case BPF_PROG_TYPE_KPROBE: - if (attr->link_create.attach_type != BPF_PERF_EVENT && - attr->link_create.attach_type != BPF_TRACE_KPROBE_MULTI) { - ret = -EINVAL; - goto out; - } - break; - case BPF_PROG_TYPE_SCHED_CLS: - if (attr->link_create.attach_type != BPF_TCX_INGRESS && - attr->link_create.attach_type != BPF_TCX_EGRESS) { - ret = -EINVAL; - goto out; - } - break; - default: - ptype = attach_type_to_prog_type(attr->link_create.attach_type); - if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type) { - ret = -EINVAL; - goto out; - } - break; - } - - switch (prog->type) { case BPF_PROG_TYPE_CGROUP_SKB: case BPF_PROG_TYPE_CGROUP_SOCK: case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: @@ -4969,8 +4958,10 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr) case BPF_PROG_TYPE_KPROBE: if (attr->link_create.attach_type == BPF_PERF_EVENT) ret = bpf_perf_link_attach(attr, prog); - else + else if (attr->link_create.attach_type == BPF_TRACE_KPROBE_MULTI) ret = bpf_kprobe_multi_link_attach(attr, prog); + else if (attr->link_create.attach_type == BPF_TRACE_UPROBE_MULTI) + ret = bpf_uprobe_multi_link_attach(attr, prog); break; default: ret = -EINVAL; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 4ccca1f6c998..bb78212fa5b2 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -4990,20 +4990,22 @@ static int map_kptr_match_type(struct bpf_verifier_env *env, struct bpf_reg_state *reg, u32 regno) { const char *targ_name = btf_type_name(kptr_field->kptr.btf, kptr_field->kptr.btf_id); - int perm_flags = PTR_MAYBE_NULL | PTR_TRUSTED | MEM_RCU; + int perm_flags; const char *reg_name = ""; - /* Only unreferenced case accepts untrusted pointers */ - if (kptr_field->type == BPF_KPTR_UNREF) - perm_flags |= PTR_UNTRUSTED; + if (btf_is_kernel(reg->btf)) { + perm_flags = PTR_MAYBE_NULL | PTR_TRUSTED | MEM_RCU; + + /* Only unreferenced case accepts untrusted pointers */ + if (kptr_field->type == BPF_KPTR_UNREF) + perm_flags |= PTR_UNTRUSTED; + } else { + perm_flags = PTR_MAYBE_NULL | MEM_ALLOC; + } if (base_type(reg->type) != PTR_TO_BTF_ID || (type_flag(reg->type) & ~perm_flags)) goto bad_type; - if (!btf_is_kernel(reg->btf)) { - verbose(env, "R%d must point to kernel BTF\n", regno); - return -EINVAL; - } /* We need to verify reg->type and reg->btf, before accessing reg->btf */ reg_name = btf_type_name(reg->btf, reg->btf_id); @@ -5016,7 +5018,7 @@ static int map_kptr_match_type(struct bpf_verifier_env *env, if (__check_ptr_off_reg(env, reg, regno, true)) return -EACCES; - /* A full type match is needed, as BTF can be vmlinux or module BTF, and + /* A full type match is needed, as BTF can be vmlinux, module or prog BTF, and * we also need to take into account the reg->off. * * We want to support cases like: @@ -5062,7 +5064,9 @@ bad_type: */ static bool in_rcu_cs(struct bpf_verifier_env *env) { - return env->cur_state->active_rcu_lock || !env->prog->aux->sleepable; + return env->cur_state->active_rcu_lock || + env->cur_state->active_lock.ptr || + !env->prog->aux->sleepable; } /* Once GCC supports btf_type_tag the following mechanism will be replaced with tag check */ @@ -7916,7 +7920,10 @@ found: verbose(env, "verifier internal error: unimplemented handling of MEM_ALLOC\n"); return -EFAULT; } - /* Handled by helper specific checks */ + if (meta->func_id == BPF_FUNC_kptr_xchg) { + if (map_kptr_match_type(env, meta->kptr_field, reg, regno)) + return -EACCES; + } break; case PTR_TO_BTF_ID | MEM_PERCPU: case PTR_TO_BTF_ID | MEM_PERCPU | PTR_TRUSTED: @@ -7968,17 +7975,6 @@ int check_func_arg_reg_off(struct bpf_verifier_env *env, if (arg_type_is_dynptr(arg_type) && type == PTR_TO_STACK) return 0; - if ((type_is_ptr_alloc_obj(type) || type_is_non_owning_ref(type)) && reg->off) { - if (reg_find_field_offset(reg, reg->off, BPF_GRAPH_NODE_OR_ROOT)) - return __check_ptr_off_reg(env, reg, regno, true); - - verbose(env, "R%d must have zero offset when passed to release func\n", - regno); - verbose(env, "No graph node or root found at R%d type:%s off:%d\n", regno, - btf_type_name(reg->btf, reg->btf_id), reg->off); - return -EINVAL; - } - /* Doing check_ptr_off_reg check for the offset will catch this * because fixed_off_ok is false, but checking here allows us * to give the user a better error message. @@ -8013,6 +8009,7 @@ int check_func_arg_reg_off(struct bpf_verifier_env *env, case PTR_TO_BTF_ID | PTR_TRUSTED: case PTR_TO_BTF_ID | MEM_RCU: case PTR_TO_BTF_ID | MEM_ALLOC | NON_OWN_REF: + case PTR_TO_BTF_ID | MEM_ALLOC | NON_OWN_REF | MEM_RCU: /* When referenced PTR_TO_BTF_ID is passed to release function, * its fixed offset must be 0. In the other cases, fixed offset * can be non-zero. This was already checked above. So pass @@ -10479,6 +10476,7 @@ static int process_kf_arg_ptr_to_btf_id(struct bpf_verifier_env *env, static int ref_set_non_owning(struct bpf_verifier_env *env, struct bpf_reg_state *reg) { struct bpf_verifier_state *state = env->cur_state; + struct btf_record *rec = reg_btf_record(reg); if (!state->active_lock.ptr) { verbose(env, "verifier internal error: ref_set_non_owning w/o active lock\n"); @@ -10491,6 +10489,9 @@ static int ref_set_non_owning(struct bpf_verifier_env *env, struct bpf_reg_state } reg->type |= NON_OWN_REF; + if (rec->refcount_off >= 0) + reg->type |= MEM_RCU; + return 0; } @@ -11223,10 +11224,7 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ verbose(env, "arg#%d doesn't point to a type with bpf_refcount field\n", i); return -EINVAL; } - if (rec->refcount_off >= 0) { - verbose(env, "bpf_refcount_acquire calls are disabled for now\n"); - return -EINVAL; - } + meta->arg_btf = reg->btf; meta->arg_btf_id = reg->btf_id; break; @@ -11331,6 +11329,11 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, struct bpf_func_state *state; struct bpf_reg_state *reg; + if (in_rbtree_lock_required_cb(env) && (rcu_lock || rcu_unlock)) { + verbose(env, "Calling bpf_rcu_read_{lock,unlock} in unnecessary rbtree callback\n"); + return -EACCES; + } + if (rcu_lock) { verbose(env, "nested rcu read lock (kernel function %s)\n", func_name); return -EINVAL; @@ -14047,6 +14050,12 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, return -EINVAL; } + /* check src2 operand */ + err = check_reg_arg(env, insn->dst_reg, SRC_OP); + if (err) + return err; + + dst_reg = ®s[insn->dst_reg]; if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0) { verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); @@ -14058,12 +14067,13 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, if (err) return err; - if (is_pointer_value(env, insn->src_reg)) { + src_reg = ®s[insn->src_reg]; + if (!(reg_is_pkt_pointer_any(dst_reg) && reg_is_pkt_pointer_any(src_reg)) && + is_pointer_value(env, insn->src_reg)) { verbose(env, "R%d pointer comparison prohibited\n", insn->src_reg); return -EACCES; } - src_reg = ®s[insn->src_reg]; } else { if (insn->src_reg != BPF_REG_0) { verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); @@ -14071,12 +14081,6 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, } } - /* check src2 operand */ - err = check_reg_arg(env, insn->dst_reg, SRC_OP); - if (err) - return err; - - dst_reg = ®s[insn->dst_reg]; is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; if (BPF_SRC(insn->code) == BPF_K) { @@ -16692,7 +16696,8 @@ static int do_check(struct bpf_verifier_env *env) return -EINVAL; } - if (env->cur_state->active_rcu_lock) { + if (env->cur_state->active_rcu_lock && + !in_rbtree_lock_required_cb(env)) { verbose(env, "bpf_rcu_read_unlock is missing\n"); return -EINVAL; } @@ -16972,11 +16977,6 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env, verbose(env, "tracing progs cannot use bpf_spin_lock yet\n"); return -EINVAL; } - - if (prog->aux->sleepable) { - verbose(env, "sleepable progs cannot use bpf_spin_lock yet\n"); - return -EINVAL; - } } if (btf_record_has_field(map->record, BPF_TIMER)) { @@ -18281,6 +18281,13 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta; struct bpf_insn addr[2] = { BPF_LD_IMM64(BPF_REG_2, (long)kptr_struct_meta) }; + if (desc->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl] && + !kptr_struct_meta) { + verbose(env, "verifier internal error: kptr_struct_meta expected at insn_idx %d\n", + insn_idx); + return -EFAULT; + } + insn_buf[0] = addr[0]; insn_buf[1] = addr[1]; insn_buf[2] = *insn; @@ -18288,6 +18295,7 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, } else if (desc->func_id == special_kfunc_list[KF_bpf_list_push_back_impl] || desc->func_id == special_kfunc_list[KF_bpf_list_push_front_impl] || desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { + struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta; int struct_meta_reg = BPF_REG_3; int node_offset_reg = BPF_REG_4; @@ -18297,6 +18305,12 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, node_offset_reg = BPF_REG_5; } + if (!kptr_struct_meta) { + verbose(env, "verifier internal error: kptr_struct_meta expected at insn_idx %d\n", + insn_idx); + return -EFAULT; + } + __fixup_collection_insert_kfunc(&env->insn_aux_data[insn_idx], struct_meta_reg, node_offset_reg, insn, insn_buf, cnt); } else if (desc->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx] || diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 792445e1f3f0..a7264b2c17ad 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -23,6 +23,7 @@ #include <linux/sort.h> #include <linux/key.h> #include <linux/verification.h> +#include <linux/namei.h> #include <net/bpf_sk_storage.h> @@ -86,6 +87,9 @@ static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size, static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx); static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx); +static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx); +static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx); + /** * trace_call_bpf - invoke BPF program * @call: tracepoint event @@ -1103,6 +1107,30 @@ static const struct bpf_func_proto bpf_get_attach_cookie_proto_kmulti = { .arg1_type = ARG_PTR_TO_CTX, }; +BPF_CALL_1(bpf_get_func_ip_uprobe_multi, struct pt_regs *, regs) +{ + return bpf_uprobe_multi_entry_ip(current->bpf_ctx); +} + +static const struct bpf_func_proto bpf_get_func_ip_proto_uprobe_multi = { + .func = bpf_get_func_ip_uprobe_multi, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, +}; + +BPF_CALL_1(bpf_get_attach_cookie_uprobe_multi, struct pt_regs *, regs) +{ + return bpf_uprobe_multi_cookie(current->bpf_ctx); +} + +static const struct bpf_func_proto bpf_get_attach_cookie_proto_umulti = { + .func = bpf_get_attach_cookie_uprobe_multi, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, +}; + BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx) { struct bpf_trace_run_ctx *run_ctx; @@ -1545,13 +1573,17 @@ kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_override_return_proto; #endif case BPF_FUNC_get_func_ip: - return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI ? - &bpf_get_func_ip_proto_kprobe_multi : - &bpf_get_func_ip_proto_kprobe; + if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI) + return &bpf_get_func_ip_proto_kprobe_multi; + if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI) + return &bpf_get_func_ip_proto_uprobe_multi; + return &bpf_get_func_ip_proto_kprobe; case BPF_FUNC_get_attach_cookie: - return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI ? - &bpf_get_attach_cookie_proto_kmulti : - &bpf_get_attach_cookie_proto_trace; + if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI) + return &bpf_get_attach_cookie_proto_kmulti; + if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI) + return &bpf_get_attach_cookie_proto_umulti; + return &bpf_get_attach_cookie_proto_trace; default: return bpf_tracing_func_proto(func_id, prog); } @@ -2970,3 +3002,301 @@ static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx) return 0; } #endif + +#ifdef CONFIG_UPROBES +struct bpf_uprobe_multi_link; + +struct bpf_uprobe { + struct bpf_uprobe_multi_link *link; + loff_t offset; + u64 cookie; + struct uprobe_consumer consumer; +}; + +struct bpf_uprobe_multi_link { + struct path path; + struct bpf_link link; + u32 cnt; + struct bpf_uprobe *uprobes; + struct task_struct *task; +}; + +struct bpf_uprobe_multi_run_ctx { + struct bpf_run_ctx run_ctx; + unsigned long entry_ip; + struct bpf_uprobe *uprobe; +}; + +static void bpf_uprobe_unregister(struct path *path, struct bpf_uprobe *uprobes, + u32 cnt) +{ + u32 i; + + for (i = 0; i < cnt; i++) { + uprobe_unregister(d_real_inode(path->dentry), uprobes[i].offset, + &uprobes[i].consumer); + } +} + +static void bpf_uprobe_multi_link_release(struct bpf_link *link) +{ + struct bpf_uprobe_multi_link *umulti_link; + + umulti_link = container_of(link, struct bpf_uprobe_multi_link, link); + bpf_uprobe_unregister(&umulti_link->path, umulti_link->uprobes, umulti_link->cnt); +} + +static void bpf_uprobe_multi_link_dealloc(struct bpf_link *link) +{ + struct bpf_uprobe_multi_link *umulti_link; + + umulti_link = container_of(link, struct bpf_uprobe_multi_link, link); + if (umulti_link->task) + put_task_struct(umulti_link->task); + path_put(&umulti_link->path); + kvfree(umulti_link->uprobes); + kfree(umulti_link); +} + +static const struct bpf_link_ops bpf_uprobe_multi_link_lops = { + .release = bpf_uprobe_multi_link_release, + .dealloc = bpf_uprobe_multi_link_dealloc, +}; + +static int uprobe_prog_run(struct bpf_uprobe *uprobe, + unsigned long entry_ip, + struct pt_regs *regs) +{ + struct bpf_uprobe_multi_link *link = uprobe->link; + struct bpf_uprobe_multi_run_ctx run_ctx = { + .entry_ip = entry_ip, + .uprobe = uprobe, + }; + struct bpf_prog *prog = link->link.prog; + bool sleepable = prog->aux->sleepable; + struct bpf_run_ctx *old_run_ctx; + int err = 0; + + if (link->task && current != link->task) + return 0; + + if (sleepable) + rcu_read_lock_trace(); + else + rcu_read_lock(); + + migrate_disable(); + + old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); + err = bpf_prog_run(link->link.prog, regs); + bpf_reset_run_ctx(old_run_ctx); + + migrate_enable(); + + if (sleepable) + rcu_read_unlock_trace(); + else + rcu_read_unlock(); + return err; +} + +static bool +uprobe_multi_link_filter(struct uprobe_consumer *con, enum uprobe_filter_ctx ctx, + struct mm_struct *mm) +{ + struct bpf_uprobe *uprobe; + + uprobe = container_of(con, struct bpf_uprobe, consumer); + return uprobe->link->task->mm == mm; +} + +static int +uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs) +{ + struct bpf_uprobe *uprobe; + + uprobe = container_of(con, struct bpf_uprobe, consumer); + return uprobe_prog_run(uprobe, instruction_pointer(regs), regs); +} + +static int +uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, struct pt_regs *regs) +{ + struct bpf_uprobe *uprobe; + + uprobe = container_of(con, struct bpf_uprobe, consumer); + return uprobe_prog_run(uprobe, func, regs); +} + +static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx) +{ + struct bpf_uprobe_multi_run_ctx *run_ctx; + + run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, run_ctx); + return run_ctx->entry_ip; +} + +static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx) +{ + struct bpf_uprobe_multi_run_ctx *run_ctx; + + run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, run_ctx); + return run_ctx->uprobe->cookie; +} + +int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) +{ + struct bpf_uprobe_multi_link *link = NULL; + unsigned long __user *uref_ctr_offsets; + unsigned long *ref_ctr_offsets = NULL; + struct bpf_link_primer link_primer; + struct bpf_uprobe *uprobes = NULL; + struct task_struct *task = NULL; + unsigned long __user *uoffsets; + u64 __user *ucookies; + void __user *upath; + u32 flags, cnt, i; + struct path path; + char *name; + pid_t pid; + int err; + + /* no support for 32bit archs yet */ + if (sizeof(u64) != sizeof(void *)) + return -EOPNOTSUPP; + + if (prog->expected_attach_type != BPF_TRACE_UPROBE_MULTI) + return -EINVAL; + + flags = attr->link_create.uprobe_multi.flags; + if (flags & ~BPF_F_UPROBE_MULTI_RETURN) + return -EINVAL; + + /* + * path, offsets and cnt are mandatory, + * ref_ctr_offsets and cookies are optional + */ + upath = u64_to_user_ptr(attr->link_create.uprobe_multi.path); + uoffsets = u64_to_user_ptr(attr->link_create.uprobe_multi.offsets); + cnt = attr->link_create.uprobe_multi.cnt; + + if (!upath || !uoffsets || !cnt) + return -EINVAL; + + uref_ctr_offsets = u64_to_user_ptr(attr->link_create.uprobe_multi.ref_ctr_offsets); + ucookies = u64_to_user_ptr(attr->link_create.uprobe_multi.cookies); + + name = strndup_user(upath, PATH_MAX); + if (IS_ERR(name)) { + err = PTR_ERR(name); + return err; + } + + err = kern_path(name, LOOKUP_FOLLOW, &path); + kfree(name); + if (err) + return err; + + if (!d_is_reg(path.dentry)) { + err = -EBADF; + goto error_path_put; + } + + pid = attr->link_create.uprobe_multi.pid; + if (pid) { + rcu_read_lock(); + task = get_pid_task(find_vpid(pid), PIDTYPE_PID); + rcu_read_unlock(); + if (!task) + goto error_path_put; + } + + err = -ENOMEM; + + link = kzalloc(sizeof(*link), GFP_KERNEL); + uprobes = kvcalloc(cnt, sizeof(*uprobes), GFP_KERNEL); + + if (!uprobes || !link) + goto error_free; + + if (uref_ctr_offsets) { + ref_ctr_offsets = kvcalloc(cnt, sizeof(*ref_ctr_offsets), GFP_KERNEL); + if (!ref_ctr_offsets) + goto error_free; + } + + for (i = 0; i < cnt; i++) { + if (ucookies && __get_user(uprobes[i].cookie, ucookies + i)) { + err = -EFAULT; + goto error_free; + } + if (uref_ctr_offsets && __get_user(ref_ctr_offsets[i], uref_ctr_offsets + i)) { + err = -EFAULT; + goto error_free; + } + if (__get_user(uprobes[i].offset, uoffsets + i)) { + err = -EFAULT; + goto error_free; + } + + uprobes[i].link = link; + + if (flags & BPF_F_UPROBE_MULTI_RETURN) + uprobes[i].consumer.ret_handler = uprobe_multi_link_ret_handler; + else + uprobes[i].consumer.handler = uprobe_multi_link_handler; + + if (pid) + uprobes[i].consumer.filter = uprobe_multi_link_filter; + } + + link->cnt = cnt; + link->uprobes = uprobes; + link->path = path; + link->task = task; + + bpf_link_init(&link->link, BPF_LINK_TYPE_UPROBE_MULTI, + &bpf_uprobe_multi_link_lops, prog); + + for (i = 0; i < cnt; i++) { + err = uprobe_register_refctr(d_real_inode(link->path.dentry), + uprobes[i].offset, + ref_ctr_offsets ? ref_ctr_offsets[i] : 0, + &uprobes[i].consumer); + if (err) { + bpf_uprobe_unregister(&path, uprobes, i); + goto error_free; + } + } + + err = bpf_link_prime(&link->link, &link_primer); + if (err) + goto error_free; + + kvfree(ref_ctr_offsets); + return bpf_link_settle(&link_primer); + +error_free: + kvfree(ref_ctr_offsets); + kvfree(uprobes); + kfree(link); + if (task) + put_task_struct(task); +error_path_put: + path_put(&path); + return err; +} +#else /* !CONFIG_UPROBES */ +int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) +{ + return -EOPNOTSUPP; +} +static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx) +{ + return 0; +} +static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx) +{ + return 0; +} +#endif /* CONFIG_UPROBES */ diff --git a/lib/checksum_kunit.c b/lib/checksum_kunit.c index ace3c4799fe1..0eed92b77ba3 100644 --- a/lib/checksum_kunit.c +++ b/lib/checksum_kunit.c @@ -10,7 +10,8 @@ #define MAX_ALIGN 64 #define TEST_BUFLEN (MAX_LEN + MAX_ALIGN) -static const __wsum random_init_sum = 0x2847aab; +/* Values for a little endian CPU. Byte swap each half on big endian CPU. */ +static const u32 random_init_sum = 0x2847aab; static const u8 random_buf[] = { 0xac, 0xd7, 0x76, 0x69, 0x6e, 0xf2, 0x93, 0x2c, 0x1f, 0xe0, 0xde, 0x86, 0x8f, 0x54, 0x33, 0x90, 0x95, 0xbf, 0xff, 0xb9, 0xea, 0x62, 0x6e, 0xb5, @@ -56,7 +57,9 @@ static const u8 random_buf[] = { 0xe1, 0xdf, 0x4b, 0xe1, 0x81, 0xe2, 0x17, 0x02, 0x7b, 0x58, 0x8b, 0x92, 0x1a, 0xac, 0x46, 0xdd, 0x2e, 0xce, 0x40, 0x09 }; -static const __sum16 expected_results[] = { + +/* Values for a little endian CPU. Byte swap on big endian CPU. */ +static const u16 expected_results[] = { 0x82d0, 0x8224, 0xab23, 0xaaad, 0x41ad, 0x413f, 0x4f3e, 0x4eab, 0x22ab, 0x228c, 0x428b, 0x41ad, 0xbbac, 0xbb1d, 0x671d, 0x66ea, 0xd6e9, 0xd654, 0x1754, 0x1655, 0x5d54, 0x5c6a, 0xfa69, 0xf9fb, 0x44fb, 0x4428, 0xf527, @@ -115,7 +118,9 @@ static const __sum16 expected_results[] = { 0x1d47, 0x3c46, 0x3bc5, 0x59c4, 0x59ad, 0x57ad, 0x5732, 0xff31, 0xfea6, 0x6ca6, 0x6c8c, 0xc08b, 0xc045, 0xe344, 0xe316, 0x1516, 0x14d6, }; -static const __wsum init_sums_no_overflow[] = { + +/* Values for a little endian CPU. Byte swap each half on big endian CPU. */ +static const u32 init_sums_no_overflow[] = { 0xffffffff, 0xfffffffb, 0xfffffbfb, 0xfffffbf7, 0xfffff7f7, 0xfffff7f3, 0xfffff3f3, 0xfffff3ef, 0xffffefef, 0xffffefeb, 0xffffebeb, 0xffffebe7, 0xffffe7e7, 0xffffe7e3, 0xffffe3e3, 0xffffe3df, 0xffffdfdf, 0xffffdfdb, @@ -208,7 +213,21 @@ static u8 tmp_buf[TEST_BUFLEN]; #define full_csum(buff, len, sum) csum_fold(csum_partial(buff, len, sum)) -#define CHECK_EQ(lhs, rhs) KUNIT_ASSERT_EQ(test, lhs, rhs) +#define CHECK_EQ(lhs, rhs) KUNIT_ASSERT_EQ(test, (__force u64)lhs, (__force u64)rhs) + +static __sum16 to_sum16(u16 x) +{ + return (__force __sum16)le16_to_cpu((__force __le16)x); +} + +/* This function swaps the bytes inside each half of a __wsum */ +static __wsum to_wsum(u32 x) +{ + u16 hi = le16_to_cpu((__force __le16)(x >> 16)); + u16 lo = le16_to_cpu((__force __le16)x); + + return (__force __wsum)((hi << 16) | lo); +} static void assert_setup_correct(struct kunit *test) { @@ -226,7 +245,8 @@ static void assert_setup_correct(struct kunit *test) static void test_csum_fixed_random_inputs(struct kunit *test) { int len, align; - __wsum result, expec, sum; + __wsum sum; + __sum16 result, expec; assert_setup_correct(test); for (align = 0; align < TEST_BUFLEN; ++align) { @@ -237,9 +257,9 @@ static void test_csum_fixed_random_inputs(struct kunit *test) /* * Test the precomputed random input. */ - sum = random_init_sum; + sum = to_wsum(random_init_sum); result = full_csum(&tmp_buf[align], len, sum); - expec = expected_results[len]; + expec = to_sum16(expected_results[len]); CHECK_EQ(result, expec); } } @@ -251,7 +271,8 @@ static void test_csum_fixed_random_inputs(struct kunit *test) static void test_csum_all_carry_inputs(struct kunit *test) { int len, align; - __wsum result, expec, sum; + __wsum sum; + __sum16 result, expec; assert_setup_correct(test); memset(tmp_buf, 0xff, TEST_BUFLEN); @@ -261,9 +282,9 @@ static void test_csum_all_carry_inputs(struct kunit *test) /* * All carries from input and initial sum. */ - sum = 0xffffffff; + sum = to_wsum(0xffffffff); result = full_csum(&tmp_buf[align], len, sum); - expec = (len & 1) ? 0xff00 : 0; + expec = to_sum16((len & 1) ? 0xff00 : 0); CHECK_EQ(result, expec); /* @@ -272,11 +293,11 @@ static void test_csum_all_carry_inputs(struct kunit *test) sum = 0; result = full_csum(&tmp_buf[align], len, sum); if (len & 1) - expec = 0xff00; + expec = to_sum16(0xff00); else if (len) expec = 0; else - expec = 0xffff; + expec = to_sum16(0xffff); CHECK_EQ(result, expec); } } @@ -290,7 +311,8 @@ static void test_csum_all_carry_inputs(struct kunit *test) static void test_csum_no_carry_inputs(struct kunit *test) { int len, align; - __wsum result, expec, sum; + __wsum sum; + __sum16 result, expec; assert_setup_correct(test); memset(tmp_buf, 0x4, TEST_BUFLEN); @@ -300,7 +322,7 @@ static void test_csum_no_carry_inputs(struct kunit *test) /* * Expect no carries. */ - sum = init_sums_no_overflow[len]; + sum = to_wsum(init_sums_no_overflow[len]); result = full_csum(&tmp_buf[align], len, sum); expec = 0; CHECK_EQ(result, expec); @@ -308,9 +330,9 @@ static void test_csum_no_carry_inputs(struct kunit *test) /* * Expect one carry. */ - sum = init_sums_no_overflow[len] + 1; + sum = to_wsum(init_sums_no_overflow[len] + 1); result = full_csum(&tmp_buf[align], len, sum); - expec = len ? 0xfffe : 0xffff; + expec = to_sum16(len ? 0xfffe : 0xffff); CHECK_EQ(result, expec); } } diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 913a7a079239..ecde4216201e 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -596,8 +596,8 @@ static int __bpf_fill_alu_shift(struct bpf_test *self, u8 op, { static const s64 regs[] = { 0x0123456789abcdefLL, /* dword > 0, word < 0 */ - 0xfedcba9876543210LL, /* dowrd < 0, word > 0 */ - 0xfedcba0198765432LL, /* dowrd < 0, word < 0 */ + 0xfedcba9876543210LL, /* dword < 0, word > 0 */ + 0xfedcba0198765432LL, /* dword < 0, word < 0 */ 0x0123458967abcdefLL, /* dword > 0, word > 0 */ }; int bits = alu32 ? 32 : 64; @@ -14567,8 +14567,10 @@ static int run_one(const struct bpf_prog *fp, struct bpf_test *test) if (ret == test->test[i].result) { pr_cont("%lld ", duration); } else { - pr_cont("ret %d != %d ", ret, - test->test[i].result); + s32 res = test->test[i].result; + + pr_cont("ret %d != %d (%#x != %#x)", + ret, res, ret, res); err_cnt++; } } @@ -15045,7 +15047,7 @@ static __init int prepare_tail_call_tests(struct bpf_array **pprogs) struct bpf_array *progs; int which, err; - /* Allocate the table of programs to be used for tall calls */ + /* Allocate the table of programs to be used for tail calls */ progs = kzalloc(struct_size(progs, ptrs, ntests + 1), GFP_KERNEL); if (!progs) goto out_nomem; diff --git a/net/bluetooth/eir.c b/net/bluetooth/eir.c index 8a85f6cdfbc1..9214189279e8 100644 --- a/net/bluetooth/eir.c +++ b/net/bluetooth/eir.c @@ -33,7 +33,7 @@ u8 eir_append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len) size_t complete_len; /* no space left for name (+ NULL + type + len) */ - if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3) + if ((max_adv_len(hdev) - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3) return ad_len; /* use complete name if present and fits */ diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 234746721047..9d5057cef30a 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -734,6 +734,7 @@ struct iso_list_data { }; int count; bool big_term; + bool pa_sync_term; bool big_sync_term; }; @@ -807,7 +808,10 @@ static int big_terminate_sync(struct hci_dev *hdev, void *data) if (d->big_sync_term) hci_le_big_terminate_sync(hdev, d->big); - return hci_le_pa_terminate_sync(hdev, d->sync_handle); + if (d->pa_sync_term) + return hci_le_pa_terminate_sync(hdev, d->sync_handle); + + return 0; } static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, struct hci_conn *conn) @@ -823,6 +827,7 @@ static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, struct hci_conn *c d->big = big; d->sync_handle = conn->sync_handle; + d->pa_sync_term = test_and_clear_bit(HCI_CONN_PA_SYNC, &conn->flags); d->big_sync_term = test_and_clear_bit(HCI_CONN_BIG_SYNC, &conn->flags); ret = hci_cmd_sync_queue(hdev, big_terminate_sync, d, @@ -1039,6 +1044,29 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, return conn; } +static void hci_conn_cleanup_child(struct hci_conn *conn, u8 reason) +{ + if (!reason) + reason = HCI_ERROR_REMOTE_USER_TERM; + + /* Due to race, SCO/ISO conn might be not established yet at this point, + * and nothing else will clean it up. In other cases it is done via HCI + * events. + */ + switch (conn->type) { + case SCO_LINK: + case ESCO_LINK: + if (HCI_CONN_HANDLE_UNSET(conn->handle)) + hci_conn_failed(conn, reason); + break; + case ISO_LINK: + if (conn->state != BT_CONNECTED && + !test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) + hci_conn_failed(conn, reason); + break; + } +} + static void hci_conn_unlink(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; @@ -1061,14 +1089,7 @@ static void hci_conn_unlink(struct hci_conn *conn) if (!test_bit(HCI_UP, &hdev->flags)) continue; - /* Due to race, SCO connection might be not established - * yet at this point. Delete it now, otherwise it is - * possible for it to be stuck and can't be deleted. - */ - if ((child->type == SCO_LINK || - child->type == ESCO_LINK) && - HCI_CONN_HANDLE_UNSET(child->handle)) - hci_conn_del(child); + hci_conn_cleanup_child(child, conn->abort_reason); } return; @@ -1299,6 +1320,7 @@ static int hci_connect_le_sync(struct hci_dev *hdev, void *data) bt_dev_dbg(hdev, "conn %p", conn); + clear_bit(HCI_CONN_SCANNING, &conn->flags); conn->state = BT_CONNECT; return hci_le_create_conn_sync(hdev, conn); @@ -1370,8 +1392,6 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, conn->sec_level = BT_SECURITY_LOW; conn->conn_timeout = conn_timeout; - clear_bit(HCI_CONN_SCANNING, &conn->flags); - err = hci_cmd_sync_queue(hdev, hci_connect_le_sync, UINT_PTR(conn->handle), create_le_conn_complete); @@ -2100,7 +2120,8 @@ int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type, return hci_cmd_sync_queue(hdev, create_pa_sync, cp, create_pa_complete); } -int hci_le_big_create_sync(struct hci_dev *hdev, struct bt_iso_qos *qos, +int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon, + struct bt_iso_qos *qos, __u16 sync_handle, __u8 num_bis, __u8 bis[]) { struct _packed { @@ -2116,6 +2137,9 @@ int hci_le_big_create_sync(struct hci_dev *hdev, struct bt_iso_qos *qos, if (err) return err; + if (hcon) + hcon->iso_qos.bcast.big = qos->bcast.big; + memset(&pdu, 0, sizeof(pdu)); pdu.cp.handle = qos->bcast.big; pdu.cp.sync_handle = cpu_to_le16(sync_handle); diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 559b6080706c..35f251041eeb 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -6581,19 +6581,56 @@ static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data, struct hci_ev_le_pa_sync_established *ev = data; int mask = hdev->link_mode; __u8 flags = 0; + struct hci_conn *bis; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); - if (ev->status) - return; - hci_dev_lock(hdev); hci_dev_clear_flag(hdev, HCI_PA_SYNC); mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ISO_LINK, &flags); - if (!(mask & HCI_LM_ACCEPT)) + if (!(mask & HCI_LM_ACCEPT)) { hci_le_pa_term_sync(hdev, ev->handle); + goto unlock; + } + + if (!(flags & HCI_PROTO_DEFER)) + goto unlock; + + /* Add connection to indicate the PA sync event */ + bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY, + HCI_ROLE_SLAVE); + + if (!bis) + goto unlock; + + if (ev->status) + set_bit(HCI_CONN_PA_SYNC_FAILED, &bis->flags); + else + set_bit(HCI_CONN_PA_SYNC, &bis->flags); + + /* Notify connection to iso layer */ + hci_connect_cfm(bis, ev->status); + +unlock: + hci_dev_unlock(hdev); +} + +static void hci_le_per_adv_report_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_le_per_adv_report *ev = data; + int mask = hdev->link_mode; + __u8 flags = 0; + + bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle)); + + hci_dev_lock(hdev); + + mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags); + if (!(mask & HCI_LM_ACCEPT)) + hci_le_pa_term_sync(hdev, ev->sync_handle); hci_dev_unlock(hdev); } @@ -7045,6 +7082,7 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data, { struct hci_evt_le_big_sync_estabilished *ev = data; struct hci_conn *bis; + struct hci_conn *pa_sync; int i; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); @@ -7055,6 +7093,15 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data, hci_dev_lock(hdev); + if (!ev->status) { + pa_sync = hci_conn_hash_lookup_pa_sync(hdev, ev->handle); + if (pa_sync) + /* Also mark the BIG sync established event on the + * associated PA sync hcon + */ + set_bit(HCI_CONN_BIG_SYNC, &pa_sync->flags); + } + for (i = 0; i < ev->num_bis; i++) { u16 handle = le16_to_cpu(ev->bis[i]); __le32 interval; @@ -7068,6 +7115,10 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data, bis->handle = handle; } + if (ev->status != 0x42) + /* Mark PA sync as established */ + set_bit(HCI_CONN_PA_SYNC, &bis->flags); + bis->iso_qos.bcast.big = ev->handle; memset(&interval, 0, sizeof(interval)); memcpy(&interval, ev->latency, sizeof(ev->latency)); @@ -7180,6 +7231,11 @@ static const struct hci_le_ev { HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED, hci_le_pa_sync_estabilished_evt, sizeof(struct hci_ev_le_pa_sync_established)), + /* [0x0f = HCI_EV_LE_PER_ADV_REPORT] */ + HCI_LE_EV_VL(HCI_EV_LE_PER_ADV_REPORT, + hci_le_per_adv_report_evt, + sizeof(struct hci_ev_le_per_adv_report), + HCI_MAX_EVENT_SIZE), /* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */ HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt, sizeof(struct hci_evt_le_ext_adv_set_term)), diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c index 5eb30ba21370..9b93653c6197 100644 --- a/net/bluetooth/hci_sync.c +++ b/net/bluetooth/hci_sync.c @@ -4668,7 +4668,10 @@ static const struct { "advertised, but not supported."), HCI_QUIRK_BROKEN(SET_RPA_TIMEOUT, "HCI LE Set Random Private Address Timeout command is " - "advertised, but not supported.") + "advertised, but not supported."), + HCI_QUIRK_BROKEN(LE_CODED, + "HCI LE Coded PHY feature bit is set, " + "but its usage is not supported.") }; /* This function handles hdev setup stage: @@ -5370,6 +5373,7 @@ int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason) { int err = 0; u16 handle = conn->handle; + struct hci_conn *c; switch (conn->state) { case BT_CONNECTED: @@ -5383,49 +5387,78 @@ int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason) err = hci_reject_conn_sync(hdev, conn, reason); break; case BT_OPEN: + hci_dev_lock(hdev); + + /* Cleanup bis or pa sync connections */ + if (test_and_clear_bit(HCI_CONN_BIG_SYNC_FAILED, &conn->flags) || + test_and_clear_bit(HCI_CONN_PA_SYNC_FAILED, &conn->flags)) { + hci_conn_failed(conn, reason); + } else if (test_bit(HCI_CONN_PA_SYNC, &conn->flags) || + test_bit(HCI_CONN_BIG_SYNC, &conn->flags)) { + conn->state = BT_CLOSED; + hci_disconn_cfm(conn, reason); + hci_conn_del(conn); + } + + hci_dev_unlock(hdev); + return 0; case BT_BOUND: hci_dev_lock(hdev); hci_conn_failed(conn, reason); hci_dev_unlock(hdev); return 0; default: + hci_dev_lock(hdev); conn->state = BT_CLOSED; + hci_disconn_cfm(conn, reason); + hci_conn_del(conn); + hci_dev_unlock(hdev); return 0; } + hci_dev_lock(hdev); + + /* Check if the connection hasn't been cleanup while waiting + * commands to complete. + */ + c = hci_conn_hash_lookup_handle(hdev, handle); + if (!c || c != conn) { + err = 0; + goto unlock; + } + /* Cleanup hci_conn object if it cannot be cancelled as it * likelly means the controller and host stack are out of sync * or in case of LE it was still scanning so it can be cleanup * safely. */ - if (err) { - struct hci_conn *c; - - /* Check if the connection hasn't been cleanup while waiting - * commands to complete. - */ - c = hci_conn_hash_lookup_handle(hdev, handle); - if (!c || c != conn) - return 0; - - hci_dev_lock(hdev); - hci_conn_failed(conn, err); - hci_dev_unlock(hdev); - } + hci_conn_failed(conn, reason); +unlock: + hci_dev_unlock(hdev); return err; } static int hci_disconnect_all_sync(struct hci_dev *hdev, u8 reason) { - struct hci_conn *conn, *tmp; - int err; + struct list_head *head = &hdev->conn_hash.list; + struct hci_conn *conn; - list_for_each_entry_safe(conn, tmp, &hdev->conn_hash.list, list) { - err = hci_abort_conn_sync(hdev, conn, reason); - if (err) - return err; + rcu_read_lock(); + while ((conn = list_first_or_null_rcu(head, struct hci_conn, list))) { + /* Make sure the connection is not freed while unlocking */ + conn = hci_conn_get(conn); + rcu_read_unlock(); + /* Disregard possible errors since hci_conn_del shall have been + * called even in case of errors had occurred since it would + * then cause hci_conn_failed to be called which calls + * hci_conn_del internally. + */ + hci_abort_conn_sync(hdev, conn, reason); + hci_conn_put(conn); + rcu_read_lock(); } + rcu_read_unlock(); return 0; } diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c index 3c03e49422c7..16da946f5881 100644 --- a/net/bluetooth/iso.c +++ b/net/bluetooth/iso.c @@ -51,6 +51,7 @@ static void iso_sock_kill(struct sock *sk); /* iso_pinfo flags values */ enum { BT_SK_BIG_SYNC, + BT_SK_PA_SYNC, }; struct iso_pinfo { @@ -75,6 +76,8 @@ static struct bt_iso_qos default_qos; static bool check_ucast_qos(struct bt_iso_qos *qos); static bool check_bcast_qos(struct bt_iso_qos *qos); +static bool iso_match_sid(struct sock *sk, void *data); +static void iso_sock_disconn(struct sock *sk); /* ---- ISO timers ---- */ #define ISO_CONN_TIMEOUT (HZ * 40) @@ -598,6 +601,15 @@ static void iso_sock_cleanup_listen(struct sock *parent) iso_sock_kill(sk); } + /* If listening socket stands for a PA sync connection, + * properly disconnect the hcon and socket. + */ + if (iso_pi(parent)->conn && iso_pi(parent)->conn->hcon && + test_bit(HCI_CONN_PA_SYNC, &iso_pi(parent)->conn->hcon->flags)) { + iso_sock_disconn(parent); + return; + } + parent->sk_state = BT_CLOSED; sock_set_flag(parent, SOCK_ZAPPED); } @@ -619,6 +631,16 @@ static void iso_sock_kill(struct sock *sk) sock_put(sk); } +static void iso_sock_disconn(struct sock *sk) +{ + sk->sk_state = BT_DISCONN; + iso_sock_set_timer(sk, ISO_DISCONN_TIMEOUT); + iso_conn_lock(iso_pi(sk)->conn); + hci_conn_drop(iso_pi(sk)->conn->hcon); + iso_pi(sk)->conn->hcon = NULL; + iso_conn_unlock(iso_pi(sk)->conn); +} + static void __iso_sock_close(struct sock *sk) { BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket); @@ -631,20 +653,19 @@ static void __iso_sock_close(struct sock *sk) case BT_CONNECT: case BT_CONNECTED: case BT_CONFIG: - if (iso_pi(sk)->conn->hcon) { - sk->sk_state = BT_DISCONN; - iso_sock_set_timer(sk, ISO_DISCONN_TIMEOUT); - iso_conn_lock(iso_pi(sk)->conn); - hci_conn_drop(iso_pi(sk)->conn->hcon); - iso_pi(sk)->conn->hcon = NULL; - iso_conn_unlock(iso_pi(sk)->conn); - } else { + if (iso_pi(sk)->conn->hcon) + iso_sock_disconn(sk); + else iso_chan_del(sk, ECONNRESET); - } break; case BT_CONNECT2: - iso_chan_del(sk, ECONNRESET); + if (iso_pi(sk)->conn->hcon && + (test_bit(HCI_CONN_PA_SYNC, &iso_pi(sk)->conn->hcon->flags) || + test_bit(HCI_CONN_PA_SYNC_FAILED, &iso_pi(sk)->conn->hcon->flags))) + iso_sock_disconn(sk); + else + iso_chan_del(sk, ECONNRESET); break; case BT_DISCONN: iso_chan_del(sk, ECONNRESET); @@ -1139,6 +1160,29 @@ static void iso_conn_defer_accept(struct hci_conn *conn) hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp); } +static void iso_conn_big_sync(struct sock *sk) +{ + int err; + struct hci_dev *hdev; + + hdev = hci_get_route(&iso_pi(sk)->dst, &iso_pi(sk)->src, + iso_pi(sk)->src_type); + + if (!hdev) + return; + + if (!test_and_set_bit(BT_SK_BIG_SYNC, &iso_pi(sk)->flags)) { + err = hci_le_big_create_sync(hdev, iso_pi(sk)->conn->hcon, + &iso_pi(sk)->qos, + iso_pi(sk)->sync_handle, + iso_pi(sk)->bc_num_bis, + iso_pi(sk)->bc_bis); + if (err) + bt_dev_err(hdev, "hci_le_big_create_sync: %d", + err); + } +} + static int iso_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { @@ -1151,8 +1195,15 @@ static int iso_sock_recvmsg(struct socket *sock, struct msghdr *msg, lock_sock(sk); switch (sk->sk_state) { case BT_CONNECT2: - iso_conn_defer_accept(pi->conn->hcon); - sk->sk_state = BT_CONFIG; + if (pi->conn->hcon && + test_bit(HCI_CONN_PA_SYNC, &pi->conn->hcon->flags)) { + iso_conn_big_sync(sk); + sk->sk_state = BT_LISTEN; + set_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags); + } else { + iso_conn_defer_accept(pi->conn->hcon); + sk->sk_state = BT_CONFIG; + } release_sock(sk); return 0; case BT_CONNECT: @@ -1395,7 +1446,8 @@ static int iso_sock_getsockopt(struct socket *sock, int level, int optname, break; case BT_ISO_BASE: - if (sk->sk_state == BT_CONNECTED) { + if (sk->sk_state == BT_CONNECTED && + !bacmp(&iso_pi(sk)->dst, BDADDR_ANY)) { base_len = iso_pi(sk)->conn->hcon->le_per_adv_data_len; base = iso_pi(sk)->conn->hcon->le_per_adv_data; } else { @@ -1513,11 +1565,17 @@ static bool iso_match_big(struct sock *sk, void *data) return ev->handle == iso_pi(sk)->qos.bcast.big; } +static bool iso_match_pa_sync_flag(struct sock *sk, void *data) +{ + return test_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags); +} + static void iso_conn_ready(struct iso_conn *conn) { - struct sock *parent; + struct sock *parent = NULL; struct sock *sk = conn->sk; - struct hci_ev_le_big_sync_estabilished *ev; + struct hci_ev_le_big_sync_estabilished *ev = NULL; + struct hci_ev_le_pa_sync_established *ev2 = NULL; struct hci_conn *hcon; BT_DBG("conn %p", conn); @@ -1529,15 +1587,32 @@ static void iso_conn_ready(struct iso_conn *conn) if (!hcon) return; - ev = hci_recv_event_data(hcon->hdev, - HCI_EVT_LE_BIG_SYNC_ESTABILISHED); - if (ev) + if (test_bit(HCI_CONN_BIG_SYNC, &hcon->flags) || + test_bit(HCI_CONN_BIG_SYNC_FAILED, &hcon->flags)) { + ev = hci_recv_event_data(hcon->hdev, + HCI_EVT_LE_BIG_SYNC_ESTABILISHED); + + /* Get reference to PA sync parent socket, if it exists */ parent = iso_get_sock_listen(&hcon->src, &hcon->dst, - iso_match_big, ev); - else + iso_match_pa_sync_flag, NULL); + if (!parent && ev) + parent = iso_get_sock_listen(&hcon->src, + &hcon->dst, + iso_match_big, ev); + } else if (test_bit(HCI_CONN_PA_SYNC, &hcon->flags) || + test_bit(HCI_CONN_PA_SYNC_FAILED, &hcon->flags)) { + ev2 = hci_recv_event_data(hcon->hdev, + HCI_EV_LE_PA_SYNC_ESTABLISHED); + if (ev2) + parent = iso_get_sock_listen(&hcon->src, + &hcon->dst, + iso_match_sid, ev2); + } + + if (!parent) parent = iso_get_sock_listen(&hcon->src, - BDADDR_ANY, NULL, NULL); + BDADDR_ANY, NULL, NULL); if (!parent) return; @@ -1554,11 +1629,17 @@ static void iso_conn_ready(struct iso_conn *conn) iso_sock_init(sk, parent); bacpy(&iso_pi(sk)->src, &hcon->src); - iso_pi(sk)->src_type = hcon->src_type; + + /* Convert from HCI to three-value type */ + if (hcon->src_type == ADDR_LE_DEV_PUBLIC) + iso_pi(sk)->src_type = BDADDR_LE_PUBLIC; + else + iso_pi(sk)->src_type = BDADDR_LE_RANDOM; /* If hcon has no destination address (BDADDR_ANY) it means it - * was created by HCI_EV_LE_BIG_SYNC_ESTABILISHED so we need to - * initialize using the parent socket destination address. + * was created by HCI_EV_LE_BIG_SYNC_ESTABILISHED or + * HCI_EV_LE_PA_SYNC_ESTABLISHED so we need to initialize using + * the parent socket destination address. */ if (!bacmp(&hcon->dst, BDADDR_ANY)) { bacpy(&hcon->dst, &iso_pi(parent)->dst); @@ -1566,13 +1647,24 @@ static void iso_conn_ready(struct iso_conn *conn) hcon->sync_handle = iso_pi(parent)->sync_handle; } + if (ev2 && !ev2->status) { + iso_pi(sk)->sync_handle = iso_pi(parent)->sync_handle; + iso_pi(sk)->qos = iso_pi(parent)->qos; + iso_pi(sk)->bc_num_bis = iso_pi(parent)->bc_num_bis; + memcpy(iso_pi(sk)->bc_bis, iso_pi(parent)->bc_bis, ISO_MAX_NUM_BIS); + } + bacpy(&iso_pi(sk)->dst, &hcon->dst); iso_pi(sk)->dst_type = hcon->dst_type; + iso_pi(sk)->sync_handle = iso_pi(parent)->sync_handle; + memcpy(iso_pi(sk)->base, iso_pi(parent)->base, iso_pi(parent)->base_len); + iso_pi(sk)->base_len = iso_pi(parent)->base_len; hci_conn_hold(hcon); iso_chan_add(conn, sk, parent); - if (ev && ((struct hci_evt_le_big_sync_estabilished *)ev)->status) { + if ((ev && ((struct hci_evt_le_big_sync_estabilished *)ev)->status) || + (ev2 && ev2->status)) { /* Trigger error signal on child socket */ sk->sk_err = ECONNREFUSED; sk->sk_error_report(sk); @@ -1604,12 +1696,20 @@ static bool iso_match_sync_handle(struct sock *sk, void *data) return le16_to_cpu(ev->sync_handle) == iso_pi(sk)->sync_handle; } +static bool iso_match_sync_handle_pa_report(struct sock *sk, void *data) +{ + struct hci_ev_le_per_adv_report *ev = data; + + return le16_to_cpu(ev->sync_handle) == iso_pi(sk)->sync_handle; +} + /* ----- ISO interface with lower layer (HCI) ----- */ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags) { struct hci_ev_le_pa_sync_established *ev1; struct hci_evt_le_big_info_adv_report *ev2; + struct hci_ev_le_per_adv_report *ev3; struct sock *sk; int lm = 0; @@ -1625,12 +1725,15 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags) * 2. HCI_EVT_LE_BIG_INFO_ADV_REPORT: When connect_ind is triggered by a * a BIG Info it attempts to check if there any listening socket with * the same sync_handle and if it does then attempt to create a sync. + * 3. HCI_EV_LE_PER_ADV_REPORT: When a PA report is received, it is stored + * in iso_pi(sk)->base so it can be passed up to user, in the case of a + * broadcast sink. */ ev1 = hci_recv_event_data(hdev, HCI_EV_LE_PA_SYNC_ESTABLISHED); if (ev1) { sk = iso_get_sock_listen(&hdev->bdaddr, bdaddr, iso_match_sid, ev1); - if (sk) + if (sk && !ev1->status) iso_pi(sk)->sync_handle = le16_to_cpu(ev1->handle); goto done; @@ -1638,16 +1741,21 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags) ev2 = hci_recv_event_data(hdev, HCI_EVT_LE_BIG_INFO_ADV_REPORT); if (ev2) { + /* Try to get PA sync listening socket, if it exists */ sk = iso_get_sock_listen(&hdev->bdaddr, bdaddr, - iso_match_sync_handle, ev2); + iso_match_pa_sync_flag, NULL); + if (!sk) + sk = iso_get_sock_listen(&hdev->bdaddr, bdaddr, + iso_match_sync_handle, ev2); if (sk) { int err; if (ev2->num_bis < iso_pi(sk)->bc_num_bis) iso_pi(sk)->bc_num_bis = ev2->num_bis; - if (!test_and_set_bit(BT_SK_BIG_SYNC, &iso_pi(sk)->flags)) { - err = hci_le_big_create_sync(hdev, + if (!test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags) && + !test_and_set_bit(BT_SK_BIG_SYNC, &iso_pi(sk)->flags)) { + err = hci_le_big_create_sync(hdev, NULL, &iso_pi(sk)->qos, iso_pi(sk)->sync_handle, iso_pi(sk)->bc_num_bis, @@ -1659,6 +1767,17 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags) } } } + } + + ev3 = hci_recv_event_data(hdev, HCI_EV_LE_PER_ADV_REPORT); + if (ev3) { + sk = iso_get_sock_listen(&hdev->bdaddr, bdaddr, + iso_match_sync_handle_pa_report, ev3); + + if (sk) { + memcpy(iso_pi(sk)->base, ev3->data, ev3->length); + iso_pi(sk)->base_len = ev3->length; + } } else { sk = iso_get_sock_listen(&hdev->bdaddr, BDADDR_ANY, NULL, NULL); } @@ -1699,12 +1818,13 @@ static void iso_connect_cfm(struct hci_conn *hcon, __u8 status) BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status); - /* Similar to the success case, if HCI_CONN_BIG_SYNC_FAILED is set, - * queue the failed bis connection into the accept queue of the - * listening socket and wake up userspace, to inform the user about - * the BIG sync failed event. + /* Similar to the success case, if HCI_CONN_BIG_SYNC_FAILED or + * HCI_CONN_PA_SYNC_FAILED is set, queue the failed connection + * into the accept queue of the listening socket and wake up + * userspace, to inform the user about the event. */ - if (!status || test_bit(HCI_CONN_BIG_SYNC_FAILED, &hcon->flags)) { + if (!status || test_bit(HCI_CONN_BIG_SYNC_FAILED, &hcon->flags) || + test_bit(HCI_CONN_PA_SYNC_FAILED, &hcon->flags)) { struct iso_conn *conn; conn = iso_conn_add(hcon); diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index d6c9b7bc8592..ba2e00646e8e 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -5381,9 +5381,9 @@ static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count, for (i = 0; i < pattern_count; i++) { offset = patterns[i].offset; length = patterns[i].length; - if (offset >= HCI_MAX_AD_LENGTH || - length > HCI_MAX_AD_LENGTH || - (offset + length) > HCI_MAX_AD_LENGTH) + if (offset >= HCI_MAX_EXT_AD_LENGTH || + length > HCI_MAX_EXT_AD_LENGTH || + (offset + length) > HCI_MAX_EXT_AD_LENGTH) return MGMT_STATUS_INVALID_PARAMS; p = kmalloc(sizeof(*p), GFP_KERNEL); diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c index 8b6b5e72b217..4a0797f0a154 100644 --- a/net/core/lwt_bpf.c +++ b/net/core/lwt_bpf.c @@ -60,9 +60,8 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt, ret = BPF_OK; } else { skb_reset_mac_header(skb); - ret = skb_do_redirect(skb); - if (ret == 0) - ret = BPF_REDIRECT; + skb_do_redirect(skb); + ret = BPF_REDIRECT; } break; @@ -255,7 +254,7 @@ static int bpf_lwt_xmit_reroute(struct sk_buff *skb) err = dst_output(dev_net(skb_dst(skb)->dev), skb->sk, skb); if (unlikely(err)) - return err; + return net_xmit_errno(err); /* ip[6]_finish_output2 understand LWTUNNEL_XMIT_DONE */ return LWTUNNEL_XMIT_DONE; diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index ce6257860a40..43ba4b77b248 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -216,7 +216,7 @@ static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *s if (lwtunnel_xmit_redirect(dst->lwtstate)) { int res = lwtunnel_xmit(skb); - if (res < 0 || res == LWTUNNEL_XMIT_DONE) + if (res != LWTUNNEL_XMIT_CONTINUE) return res; } diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index f8a1f6bb3f87..0665e8b09968 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -113,7 +113,7 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff * if (lwtunnel_xmit_redirect(dst->lwtstate)) { int res = lwtunnel_xmit(skb); - if (res < 0 || res == LWTUNNEL_XMIT_DONE) + if (res != LWTUNNEL_XMIT_CONTINUE) return res; } diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index e7ac24603892..45e7a5d9c7d9 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -3648,12 +3648,6 @@ static int __ieee80211_csa_finalize(struct ieee80211_sub_if_data *sdata) lockdep_assert_held(&local->mtx); lockdep_assert_held(&local->chanctx_mtx); - if (sdata->vif.bss_conf.eht_puncturing != sdata->vif.bss_conf.csa_punct_bitmap) { - sdata->vif.bss_conf.eht_puncturing = - sdata->vif.bss_conf.csa_punct_bitmap; - changed |= BSS_CHANGED_EHT_PUNCTURING; - } - /* * using reservation isn't immediate as it may be deferred until later * with multi-vif. once reservation is complete it will re-schedule the @@ -3683,6 +3677,12 @@ static int __ieee80211_csa_finalize(struct ieee80211_sub_if_data *sdata) if (err) return err; + if (sdata->vif.bss_conf.eht_puncturing != sdata->vif.bss_conf.csa_punct_bitmap) { + sdata->vif.bss_conf.eht_puncturing = + sdata->vif.bss_conf.csa_punct_bitmap; + changed |= BSS_CHANGED_EHT_PUNCTURING; + } + ieee80211_link_info_change_notify(sdata, &sdata->deflink, changed); if (sdata->deflink.csa_block_tx) { @@ -4133,19 +4133,20 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev, mutex_lock(&local->mtx); rcu_read_lock(); + sta = sta_info_get_bss(sdata, peer); + if (!sta) { + ret = -ENOLINK; + goto unlock; + } + + qos = sta->sta.wme; + chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf); if (WARN_ON(!chanctx_conf)) { ret = -EINVAL; goto unlock; } band = chanctx_conf->def.chan->band; - sta = sta_info_get_bss(sdata, peer); - if (sta) { - qos = sta->sta.wme; - } else { - ret = -ENOLINK; - goto unlock; - } if (qos) { fc = cpu_to_le16(IEEE80211_FTYPE_DATA | diff --git a/net/mac80211/fils_aead.c b/net/mac80211/fils_aead.c index e1d4cfd99128..912c46f74d24 100644 --- a/net/mac80211/fils_aead.c +++ b/net/mac80211/fils_aead.c @@ -5,9 +5,9 @@ */ #include <crypto/aes.h> -#include <crypto/algapi.h> #include <crypto/hash.h> #include <crypto/skcipher.h> +#include <crypto/utils.h> #include "ieee80211_i.h" #include "aes_cmac.h" diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 91633a0b723e..06bd406846d2 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1872,7 +1872,6 @@ void ieee80211_send_pspoll(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata); void ieee80211_recalc_ps(struct ieee80211_local *local); void ieee80211_recalc_ps_vif(struct ieee80211_sub_if_data *sdata); -int ieee80211_set_arp_filter(struct ieee80211_sub_if_data *sdata); void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata); void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); @@ -2564,7 +2563,6 @@ void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local, struct ieee80211_link_data *rsvd_for); bool ieee80211_is_radar_required(struct ieee80211_local *local); -void ieee80211_dfs_cac_timer(unsigned long data); void ieee80211_dfs_cac_timer_work(struct work_struct *work); void ieee80211_dfs_cac_cancel(struct ieee80211_local *local); void ieee80211_dfs_radar_detected_work(struct work_struct *work); diff --git a/net/mac80211/key.c b/net/mac80211/key.c index 21cf5a208910..13050dc9321f 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c @@ -9,6 +9,7 @@ * Copyright 2018-2020, 2022-2023 Intel Corporation */ +#include <crypto/utils.h> #include <linux/if_ether.h> #include <linux/etherdevice.h> #include <linux/list.h> @@ -17,7 +18,6 @@ #include <linux/slab.h> #include <linux/export.h> #include <net/mac80211.h> -#include <crypto/algapi.h> #include <asm/unaligned.h> #include "ieee80211_i.h" #include "driver-ops.h" diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index 6c94222a9df5..ad8469293d71 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h @@ -212,7 +212,6 @@ int mesh_rmc_check(struct ieee80211_sub_if_data *sdata, const u8 *addr, struct ieee80211s_hdr *mesh_hdr); bool mesh_matches_local(struct ieee80211_sub_if_data *sdata, struct ieee802_11_elems *ie); -void mesh_ids_set_default(struct ieee80211_if_mesh *mesh); int mesh_add_meshconf_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); int mesh_add_meshid_ie(struct ieee80211_sub_if_data *sdata, diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 0af2599c17e8..e751cda5eef6 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -3734,6 +3734,10 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) break; goto queue; case WLAN_CATEGORY_S1G: + if (len < offsetofend(typeof(*mgmt), + u.action.u.s1g.action_code)) + break; + switch (mgmt->u.action.u.s1g.action_code) { case WLAN_S1G_TWT_SETUP: case WLAN_S1G_TWT_TEARDOWN: diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index 4133496da378..2d8e38b3bcb5 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c @@ -15,7 +15,7 @@ #include <asm/unaligned.h> #include <net/mac80211.h> #include <crypto/aes.h> -#include <crypto/algapi.h> +#include <crypto/utils.h> #include "ieee80211_i.h" #include "michael.h" diff --git a/net/wireless/core.h b/net/wireless/core.h index 8a807b609ef7..507d184b8b40 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -298,7 +298,7 @@ struct cfg80211_cqm_config { u32 rssi_hyst; s32 last_rssi_event_value; int n_rssi_thresholds; - s32 rssi_thresholds[]; + s32 rssi_thresholds[] __counted_by(n_rssi_thresholds); }; void cfg80211_destroy_ifaces(struct cfg80211_registered_device *rdev); diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index ac059cefbeb3..775cac4d6100 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c @@ -281,6 +281,11 @@ int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, ether_addr_equal(req->bss->bssid, wdev->u.client.connected_addr)) return -EALREADY; + if (ether_addr_equal(req->bss->bssid, dev->dev_addr) || + (req->link_id >= 0 && + ether_addr_equal(req->ap_mld_addr, dev->dev_addr))) + return -EINVAL; + return rdev_auth(rdev, dev, req); } @@ -335,6 +340,9 @@ int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, if (req->links[i].bss == req->links[j].bss) return -EINVAL; } + + if (ether_addr_equal(req->links[i].bss->bssid, dev->dev_addr)) + return -EINVAL; } if (wdev->connected && @@ -342,6 +350,11 @@ int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, !ether_addr_equal(wdev->u.client.connected_addr, req->prev_bssid))) return -EALREADY; + if ((req->bss && ether_addr_equal(req->bss->bssid, dev->dev_addr)) || + (req->link_id >= 0 && + ether_addr_equal(req->ap_mld_addr, dev->dev_addr))) + return -EINVAL; + cfg80211_oper_and_ht_capa(&req->ht_capa_mask, rdev->wiphy.ht_capa_mod_mask); cfg80211_oper_and_vht_capa(&req->vht_capa_mask, diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 8bcf8e293308..de47838aca4f 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -323,6 +323,7 @@ nl80211_pmsr_ftm_req_attr_policy[NL80211_PMSR_FTM_REQ_ATTR_MAX + 1] = { [NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED] = { .type = NLA_FLAG }, [NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED] = { .type = NLA_FLAG }, [NL80211_PMSR_FTM_REQ_ATTR_LMR_FEEDBACK] = { .type = NLA_FLAG }, + [NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR] = { .type = NLA_U8 }, }; static const struct nla_policy @@ -4889,13 +4890,12 @@ static struct cfg80211_acl_data *parse_acl_data(struct wiphy *wiphy, acl = kzalloc(struct_size(acl, mac_addrs, n_entries), GFP_KERNEL); if (!acl) return ERR_PTR(-ENOMEM); + acl->n_acl_entries = n_entries; nla_for_each_nested(attr, info->attrs[NL80211_ATTR_MAC_ADDRS], tmp) { memcpy(acl->mac_addrs[i].addr, nla_data(attr), ETH_ALEN); i++; } - - acl->n_acl_entries = n_entries; acl->acl_policy = acl_policy; return acl; @@ -5439,13 +5439,13 @@ nl80211_parse_mbssid_elems(struct wiphy *wiphy, struct nlattr *attrs) elems = kzalloc(struct_size(elems, elem, num_elems), GFP_KERNEL); if (!elems) return ERR_PTR(-ENOMEM); + elems->cnt = num_elems; nla_for_each_nested(nl_elems, attrs, rem_elems) { elems->elem[i].data = nla_data(nl_elems); elems->elem[i].len = nla_len(nl_elems); i++; } - elems->cnt = num_elems; return elems; } @@ -5471,13 +5471,13 @@ nl80211_parse_rnr_elems(struct wiphy *wiphy, struct nlattr *attrs, elems = kzalloc(struct_size(elems, elem, num_elems), GFP_KERNEL); if (!elems) return ERR_PTR(-ENOMEM); + elems->cnt = num_elems; nla_for_each_nested(nl_elems, attrs, rem_elems) { elems->elem[i].data = nla_data(nl_elems); elems->elem[i].len = nla_len(nl_elems); i++; } - elems->cnt = num_elems; return elems; } diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h index 0278d817bb02..b4af53f9b227 100644 --- a/net/wireless/nl80211.h +++ b/net/wireless/nl80211.h @@ -120,6 +120,5 @@ void cfg80211_rdev_free_coalesce(struct cfg80211_registered_device *rdev); /* peer measurement */ int nl80211_pmsr_start(struct sk_buff *skb, struct genl_info *info); -int nl80211_pmsr_dump_results(struct sk_buff *skb, struct netlink_callback *cb); #endif /* __NET_WIRELESS_NL80211_H */ diff --git a/net/wireless/ocb.c b/net/wireless/ocb.c index 27a1732264f9..29afaf3da54f 100644 --- a/net/wireless/ocb.c +++ b/net/wireless/ocb.c @@ -68,6 +68,9 @@ int __cfg80211_leave_ocb(struct cfg80211_registered_device *rdev, if (!rdev->ops->leave_ocb) return -EOPNOTSUPP; + if (!wdev->u.ocb.chandef.chan) + return -ENOTCONN; + err = rdev_leave_ocb(rdev, dev); if (!err) memset(&wdev->u.ocb.chandef, 0, sizeof(wdev->u.ocb.chandef)); diff --git a/net/wireless/pmsr.c b/net/wireless/pmsr.c index 77000a264855..9611aa0bd051 100644 --- a/net/wireless/pmsr.c +++ b/net/wireless/pmsr.c @@ -291,6 +291,7 @@ int nl80211_pmsr_start(struct sk_buff *skb, struct genl_info *info) req = kzalloc(struct_size(req, peers, count), GFP_KERNEL); if (!req) return -ENOMEM; + req->n_peers = count; if (info->attrs[NL80211_ATTR_TIMEOUT]) req->timeout = nla_get_u32(info->attrs[NL80211_ATTR_TIMEOUT]); @@ -321,8 +322,6 @@ int nl80211_pmsr_start(struct sk_buff *skb, struct genl_info *info) goto out_err; idx++; } - - req->n_peers = count; req->cookie = cfg80211_assign_cookie(rdev); req->nl_portid = info->snd_portid; diff --git a/samples/bpf/.gitignore b/samples/bpf/.gitignore index 0e7bfdbff80a..0002cd359fb1 100644 --- a/samples/bpf/.gitignore +++ b/samples/bpf/.gitignore @@ -37,22 +37,10 @@ tracex4 tracex5 tracex6 tracex7 -xdp1 -xdp2 xdp_adjust_tail xdp_fwd -xdp_monitor -xdp_redirect -xdp_redirect_cpu -xdp_redirect_map -xdp_redirect_map_multi xdp_router_ipv4 -xdp_rxq_info -xdp_sample_pkts xdp_tx_iptunnel -xdpsock -xdpsock_ctrl_proc -xsk_fwd testfile.img hbm_out.log iperf.* diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 595b98d825ce..4ccf4236031c 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -30,8 +30,6 @@ tprogs-y += test_cgrp2_array_pin tprogs-y += test_cgrp2_attach tprogs-y += test_cgrp2_sock tprogs-y += test_cgrp2_sock2 -tprogs-y += xdp1 -tprogs-y += xdp2 tprogs-y += xdp_router_ipv4 tprogs-y += test_current_task_under_cgroup tprogs-y += trace_event @@ -41,22 +39,14 @@ tprogs-y += lwt_len_hist tprogs-y += xdp_tx_iptunnel tprogs-y += test_map_in_map tprogs-y += per_socket_stats_example -tprogs-y += xdp_rxq_info tprogs-y += syscall_tp tprogs-y += cpustat tprogs-y += xdp_adjust_tail tprogs-y += xdp_fwd tprogs-y += task_fd_query -tprogs-y += xdp_sample_pkts tprogs-y += ibumad tprogs-y += hbm -tprogs-y += xdp_redirect_cpu -tprogs-y += xdp_redirect_map_multi -tprogs-y += xdp_redirect_map -tprogs-y += xdp_redirect -tprogs-y += xdp_monitor - # Libbpf dependencies LIBBPF_SRC = $(TOOLS_PATH)/lib/bpf LIBBPF_OUTPUT = $(abspath $(BPF_SAMPLES_PATH))/libbpf @@ -90,9 +80,6 @@ test_cgrp2_array_pin-objs := test_cgrp2_array_pin.o test_cgrp2_attach-objs := test_cgrp2_attach.o test_cgrp2_sock-objs := test_cgrp2_sock.o test_cgrp2_sock2-objs := test_cgrp2_sock2.o -xdp1-objs := xdp1_user.o -# reuse xdp1 source intentionally -xdp2-objs := xdp1_user.o test_current_task_under_cgroup-objs := $(CGROUP_HELPERS) \ test_current_task_under_cgroup_user.o trace_event-objs := trace_event_user.o $(TRACE_HELPERS) @@ -102,21 +89,14 @@ lwt_len_hist-objs := lwt_len_hist_user.o xdp_tx_iptunnel-objs := xdp_tx_iptunnel_user.o test_map_in_map-objs := test_map_in_map_user.o per_socket_stats_example-objs := cookie_uid_helper_example.o -xdp_rxq_info-objs := xdp_rxq_info_user.o syscall_tp-objs := syscall_tp_user.o cpustat-objs := cpustat_user.o xdp_adjust_tail-objs := xdp_adjust_tail_user.o xdp_fwd-objs := xdp_fwd_user.o task_fd_query-objs := task_fd_query_user.o $(TRACE_HELPERS) -xdp_sample_pkts-objs := xdp_sample_pkts_user.o ibumad-objs := ibumad_user.o hbm-objs := hbm.o $(CGROUP_HELPERS) -xdp_redirect_map_multi-objs := xdp_redirect_map_multi_user.o $(XDP_SAMPLE) -xdp_redirect_cpu-objs := xdp_redirect_cpu_user.o $(XDP_SAMPLE) -xdp_redirect_map-objs := xdp_redirect_map_user.o $(XDP_SAMPLE) -xdp_redirect-objs := xdp_redirect_user.o $(XDP_SAMPLE) -xdp_monitor-objs := xdp_monitor_user.o $(XDP_SAMPLE) xdp_router_ipv4-objs := xdp_router_ipv4_user.o $(XDP_SAMPLE) # Tell kbuild to always build the programs @@ -124,29 +104,27 @@ always-y := $(tprogs-y) always-y += sockex1_kern.o always-y += sockex2_kern.o always-y += sockex3_kern.o -always-y += tracex1_kern.o +always-y += tracex1.bpf.o always-y += tracex2.bpf.o -always-y += tracex3_kern.o -always-y += tracex4_kern.o -always-y += tracex5_kern.o -always-y += tracex6_kern.o -always-y += tracex7_kern.o +always-y += tracex3.bpf.o +always-y += tracex4.bpf.o +always-y += tracex5.bpf.o +always-y += tracex6.bpf.o +always-y += tracex7.bpf.o always-y += sock_flags.bpf.o always-y += test_probe_write_user.bpf.o always-y += trace_output.bpf.o always-y += tcbpf1_kern.o always-y += tc_l2_redirect_kern.o always-y += lathist_kern.o -always-y += offwaketime_kern.o -always-y += spintest_kern.o +always-y += offwaketime.bpf.o +always-y += spintest.bpf.o always-y += map_perf_test.bpf.o always-y += test_overhead_tp.bpf.o always-y += test_overhead_raw_tp.bpf.o always-y += test_overhead_kprobe.bpf.o always-y += parse_varlen.o parse_simple.o parse_ldabs.o always-y += test_cgrp2_tc.bpf.o -always-y += xdp1_kern.o -always-y += xdp2_kern.o always-y += test_current_task_under_cgroup.bpf.o always-y += trace_event_kern.o always-y += sampleip_kern.o @@ -162,14 +140,12 @@ always-y += tcp_clamp_kern.o always-y += tcp_basertt_kern.o always-y += tcp_tos_reflect_kern.o always-y += tcp_dumpstats_kern.o -always-y += xdp_rxq_info_kern.o always-y += xdp2skb_meta_kern.o always-y += syscall_tp_kern.o always-y += cpustat_kern.o always-y += xdp_adjust_tail_kern.o always-y += xdp_fwd_kern.o always-y += task_fd_query_kern.o -always-y += xdp_sample_pkts_kern.o always-y += ibumad_kern.o always-y += hbm_out_kern.o always-y += hbm_edt_kern.o @@ -207,11 +183,6 @@ TPROGS_LDFLAGS := -L$(SYSROOT)/usr/lib endif TPROGS_LDLIBS += $(LIBBPF) -lelf -lz -TPROGLDLIBS_xdp_monitor += -lm -TPROGLDLIBS_xdp_redirect += -lm -TPROGLDLIBS_xdp_redirect_cpu += -lm -TPROGLDLIBS_xdp_redirect_map += -lm -TPROGLDLIBS_xdp_redirect_map_multi += -lm TPROGLDLIBS_xdp_router_ipv4 += -lm -pthread TPROGLDLIBS_tracex4 += -lrt TPROGLDLIBS_trace_output += -lrt @@ -326,14 +297,9 @@ $(obj)/$(TRACE_HELPERS) $(obj)/$(CGROUP_HELPERS) $(obj)/$(XDP_SAMPLE): | libbpf_ .PHONY: libbpf_hdrs -$(obj)/xdp_redirect_cpu_user.o: $(obj)/xdp_redirect_cpu.skel.h -$(obj)/xdp_redirect_map_multi_user.o: $(obj)/xdp_redirect_map_multi.skel.h -$(obj)/xdp_redirect_map_user.o: $(obj)/xdp_redirect_map.skel.h -$(obj)/xdp_redirect_user.o: $(obj)/xdp_redirect.skel.h -$(obj)/xdp_monitor_user.o: $(obj)/xdp_monitor.skel.h $(obj)/xdp_router_ipv4_user.o: $(obj)/xdp_router_ipv4.skel.h -$(obj)/tracex5_kern.o: $(obj)/syscall_nrs.h +$(obj)/tracex5.bpf.o: $(obj)/syscall_nrs.h $(obj)/hbm_out_kern.o: $(src)/hbm.h $(src)/hbm_kern.h $(obj)/hbm.o: $(src)/hbm.h $(obj)/hbm_edt_kern.o: $(src)/hbm.h $(src)/hbm_kern.h @@ -383,11 +349,6 @@ endef CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG)) -$(obj)/xdp_redirect_cpu.bpf.o: $(obj)/xdp_sample.bpf.o -$(obj)/xdp_redirect_map_multi.bpf.o: $(obj)/xdp_sample.bpf.o -$(obj)/xdp_redirect_map.bpf.o: $(obj)/xdp_sample.bpf.o -$(obj)/xdp_redirect.bpf.o: $(obj)/xdp_sample.bpf.o -$(obj)/xdp_monitor.bpf.o: $(obj)/xdp_sample.bpf.o $(obj)/xdp_router_ipv4.bpf.o: $(obj)/xdp_sample.bpf.o $(obj)/%.bpf.o: $(src)/%.bpf.c $(obj)/vmlinux.h $(src)/xdp_sample.bpf.h $(src)/xdp_sample_shared.h @@ -398,16 +359,9 @@ $(obj)/%.bpf.o: $(src)/%.bpf.c $(obj)/vmlinux.h $(src)/xdp_sample.bpf.h $(src)/x -I$(LIBBPF_INCLUDE) $(CLANG_SYS_INCLUDES) \ -c $(filter %.bpf.c,$^) -o $@ -LINKED_SKELS := xdp_redirect_cpu.skel.h xdp_redirect_map_multi.skel.h \ - xdp_redirect_map.skel.h xdp_redirect.skel.h xdp_monitor.skel.h \ - xdp_router_ipv4.skel.h +LINKED_SKELS := xdp_router_ipv4.skel.h clean-files += $(LINKED_SKELS) -xdp_redirect_cpu.skel.h-deps := xdp_redirect_cpu.bpf.o xdp_sample.bpf.o -xdp_redirect_map_multi.skel.h-deps := xdp_redirect_map_multi.bpf.o xdp_sample.bpf.o -xdp_redirect_map.skel.h-deps := xdp_redirect_map.bpf.o xdp_sample.bpf.o -xdp_redirect.skel.h-deps := xdp_redirect.bpf.o xdp_sample.bpf.o -xdp_monitor.skel.h-deps := xdp_monitor.bpf.o xdp_sample.bpf.o xdp_router_ipv4.skel.h-deps := xdp_router_ipv4.bpf.o xdp_sample.bpf.o LINKED_BPF_SRCS := $(patsubst %.bpf.o,%.bpf.c,$(foreach skel,$(LINKED_SKELS),$($(skel)-deps))) @@ -440,7 +394,7 @@ $(obj)/%.o: $(src)/%.c -Wno-gnu-variable-sized-type-not-at-end \ -Wno-address-of-packed-member -Wno-tautological-compare \ -Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \ - -fno-asynchronous-unwind-tables \ + -fno-asynchronous-unwind-tables -fcf-protection \ -I$(srctree)/samples/bpf/ -include asm_goto_workaround.h \ -O2 -emit-llvm -Xclang -disable-llvm-passes -c $< -o - | \ $(OPT) -O2 -mtriple=bpf-pc-linux | $(LLVM_DIS) | \ diff --git a/samples/bpf/README.rst b/samples/bpf/README.rst index f16fc48e55a5..cabe2d216997 100644 --- a/samples/bpf/README.rst +++ b/samples/bpf/README.rst @@ -4,6 +4,12 @@ eBPF sample programs This directory contains a test stubs, verifier test-suite and examples for using eBPF. The examples use libbpf from tools/lib/bpf. +Note that the XDP-specific samples have been removed from this directory and +moved to the xdp-tools repository: https://github.com/xdp-project/xdp-tools +See the commit messages removing each tool from this directory for how to +convert specific command invocations between the old samples and the utilities +in xdp-tools. + Build dependencies ================== diff --git a/samples/bpf/net_shared.h b/samples/bpf/net_shared.h index e9429af9aa44..88cc52461c98 100644 --- a/samples/bpf/net_shared.h +++ b/samples/bpf/net_shared.h @@ -17,6 +17,8 @@ #define TC_ACT_OK 0 #define TC_ACT_SHOT 2 +#define IFNAMSIZ 16 + #if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && \ __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ #define bpf_ntohs(x) __builtin_bswap16(x) diff --git a/samples/bpf/offwaketime_kern.c b/samples/bpf/offwaketime.bpf.c index 23f12b47e9e5..4a65ba76c1b1 100644 --- a/samples/bpf/offwaketime_kern.c +++ b/samples/bpf/offwaketime.bpf.c @@ -4,20 +4,15 @@ * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. */ -#include <uapi/linux/bpf.h> -#include <uapi/linux/ptrace.h> -#include <uapi/linux/perf_event.h> +#include "vmlinux.h" #include <linux/version.h> -#include <linux/sched.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> +#include <bpf/bpf_core_read.h> -#define _(P) \ - ({ \ - typeof(P) val; \ - bpf_probe_read_kernel(&val, sizeof(val), &(P)); \ - val; \ - }) +#ifndef PERF_MAX_STACK_DEPTH +#define PERF_MAX_STACK_DEPTH 127 +#endif #define MINBLOCK_US 1 #define MAX_ENTRIES 10000 @@ -67,11 +62,9 @@ struct { SEC("kprobe/try_to_wake_up") int waker(struct pt_regs *ctx) { - struct task_struct *p = (void *) PT_REGS_PARM1(ctx); + struct task_struct *p = (void *)PT_REGS_PARM1_CORE(ctx); + u32 pid = BPF_CORE_READ(p, pid); struct wokeby_t woke; - u32 pid; - - pid = _(p->pid); bpf_get_current_comm(&woke.name, sizeof(woke.name)); woke.ret = bpf_get_stackid(ctx, &stackmap, STACKID_FLAGS); @@ -111,28 +104,18 @@ static inline int update_counts(void *ctx, u32 pid, u64 delta) #if 1 /* taken from /sys/kernel/tracing/events/sched/sched_switch/format */ -struct sched_switch_args { - unsigned long long pad; - char prev_comm[TASK_COMM_LEN]; - int prev_pid; - int prev_prio; - long long prev_state; - char next_comm[TASK_COMM_LEN]; - int next_pid; - int next_prio; -}; SEC("tracepoint/sched/sched_switch") -int oncpu(struct sched_switch_args *ctx) +int oncpu(struct trace_event_raw_sched_switch *ctx) { /* record previous thread sleep time */ u32 pid = ctx->prev_pid; #else -SEC("kprobe/finish_task_switch") +SEC("kprobe.multi/finish_task_switch*") int oncpu(struct pt_regs *ctx) { - struct task_struct *p = (void *) PT_REGS_PARM1(ctx); + struct task_struct *p = (void *)PT_REGS_PARM1_CORE(ctx); /* record previous thread sleep time */ - u32 pid = _(p->pid); + u32 pid = BPF_CORE_READ(p, pid); #endif u64 delta, ts, *tsp; diff --git a/samples/bpf/offwaketime_user.c b/samples/bpf/offwaketime_user.c index b6eedcb98fb9..5557b5393642 100644 --- a/samples/bpf/offwaketime_user.c +++ b/samples/bpf/offwaketime_user.c @@ -105,7 +105,7 @@ int main(int argc, char **argv) return 2; } - snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); + snprintf(filename, sizeof(filename), "%s.bpf.o", argv[0]); obj = bpf_object__open_file(filename, NULL); if (libbpf_get_error(obj)) { fprintf(stderr, "ERROR: opening BPF object file failed\n"); diff --git a/samples/bpf/spintest_kern.c b/samples/bpf/spintest.bpf.c index 455da77319d9..cba5a9d50783 100644 --- a/samples/bpf/spintest_kern.c +++ b/samples/bpf/spintest.bpf.c @@ -4,14 +4,15 @@ * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. */ -#include <linux/skbuff.h> -#include <linux/netdevice.h> +#include "vmlinux.h" #include <linux/version.h> -#include <uapi/linux/bpf.h> -#include <uapi/linux/perf_event.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> +#ifndef PERF_MAX_STACK_DEPTH +#define PERF_MAX_STACK_DEPTH 127 +#endif + struct { __uint(type, BPF_MAP_TYPE_HASH); __type(key, long); @@ -46,20 +47,10 @@ int foo(struct pt_regs *ctx) \ } /* add kprobes to all possible *spin* functions */ -SEC("kprobe/spin_unlock")PROG(p1) -SEC("kprobe/spin_lock")PROG(p2) -SEC("kprobe/mutex_spin_on_owner")PROG(p3) -SEC("kprobe/rwsem_spin_on_owner")PROG(p4) -SEC("kprobe/spin_unlock_irqrestore")PROG(p5) -SEC("kprobe/_raw_spin_unlock_irqrestore")PROG(p6) -SEC("kprobe/_raw_spin_unlock_bh")PROG(p7) -SEC("kprobe/_raw_spin_unlock")PROG(p8) -SEC("kprobe/_raw_spin_lock_irqsave")PROG(p9) -SEC("kprobe/_raw_spin_trylock_bh")PROG(p10) -SEC("kprobe/_raw_spin_lock_irq")PROG(p11) -SEC("kprobe/_raw_spin_trylock")PROG(p12) -SEC("kprobe/_raw_spin_lock")PROG(p13) -SEC("kprobe/_raw_spin_lock_bh")PROG(p14) +SEC("kprobe.multi/spin_*lock*")PROG(spin_lock) +SEC("kprobe.multi/*_spin_on_owner")PROG(spin_on_owner) +SEC("kprobe.multi/_raw_spin_*lock*")PROG(raw_spin_lock) + /* and to inner bpf helpers */ SEC("kprobe/htab_map_update_elem")PROG(p15) SEC("kprobe/__htab_percpu_map_update_elem")PROG(p16) diff --git a/samples/bpf/spintest_user.c b/samples/bpf/spintest_user.c index aadac14f748a..55971edb1088 100644 --- a/samples/bpf/spintest_user.c +++ b/samples/bpf/spintest_user.c @@ -9,13 +9,12 @@ int main(int ac, char **argv) { - char filename[256], symbol[256]; struct bpf_object *obj = NULL; struct bpf_link *links[20]; long key, next_key, value; struct bpf_program *prog; int map_fd, i, j = 0; - const char *section; + char filename[256]; struct ksym *sym; if (load_kallsyms()) { @@ -23,7 +22,7 @@ int main(int ac, char **argv) return 2; } - snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); + snprintf(filename, sizeof(filename), "%s.bpf.o", argv[0]); obj = bpf_object__open_file(filename, NULL); if (libbpf_get_error(obj)) { fprintf(stderr, "ERROR: opening BPF object file failed\n"); @@ -44,20 +43,13 @@ int main(int ac, char **argv) } bpf_object__for_each_program(prog, obj) { - section = bpf_program__section_name(prog); - if (sscanf(section, "kprobe/%s", symbol) != 1) - continue; - - /* Attach prog only when symbol exists */ - if (ksym_get_addr(symbol)) { - links[j] = bpf_program__attach(prog); - if (libbpf_get_error(links[j])) { - fprintf(stderr, "bpf_program__attach failed\n"); - links[j] = NULL; - goto cleanup; - } - j++; + links[j] = bpf_program__attach(prog); + if (libbpf_get_error(links[j])) { + fprintf(stderr, "bpf_program__attach failed\n"); + links[j] = NULL; + goto cleanup; } + j++; } for (i = 0; i < 5; i++) { diff --git a/samples/bpf/test_map_in_map.bpf.c b/samples/bpf/test_map_in_map.bpf.c index 1883559e5977..9f030f9c4e1b 100644 --- a/samples/bpf/test_map_in_map.bpf.c +++ b/samples/bpf/test_map_in_map.bpf.c @@ -103,19 +103,15 @@ static __always_inline int do_inline_hash_lookup(void *inner_map, u32 port) return result ? *result : -ENOENT; } -SEC("kprobe/__sys_connect") -int trace_sys_connect(struct pt_regs *ctx) +SEC("ksyscall/connect") +int BPF_KSYSCALL(trace_sys_connect, unsigned int fd, struct sockaddr_in6 *in6, int addrlen) { - struct sockaddr_in6 *in6; u16 test_case, port, dst6[8]; - int addrlen, ret, inline_ret, ret_key = 0; + int ret, inline_ret, ret_key = 0; u32 port_key; void *outer_map, *inner_map; bool inline_hash = false; - in6 = (struct sockaddr_in6 *)PT_REGS_PARM2_CORE(ctx); - addrlen = (int)PT_REGS_PARM3_CORE(ctx); - if (addrlen != sizeof(*in6)) return 0; diff --git a/samples/bpf/test_overhead_kprobe.bpf.c b/samples/bpf/test_overhead_kprobe.bpf.c index c3528731e0e1..668cf5259c60 100644 --- a/samples/bpf/test_overhead_kprobe.bpf.c +++ b/samples/bpf/test_overhead_kprobe.bpf.c @@ -8,13 +8,7 @@ #include <linux/version.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> - -#define _(P) \ - ({ \ - typeof(P) val = 0; \ - bpf_probe_read_kernel(&val, sizeof(val), &(P)); \ - val; \ - }) +#include <bpf/bpf_core_read.h> SEC("kprobe/__set_task_comm") int prog(struct pt_regs *ctx) @@ -26,14 +20,14 @@ int prog(struct pt_regs *ctx) u16 oom_score_adj; u32 pid; - tsk = (void *)PT_REGS_PARM1(ctx); + tsk = (void *)PT_REGS_PARM1_CORE(ctx); - pid = _(tsk->pid); - bpf_probe_read_kernel_str(oldcomm, sizeof(oldcomm), &tsk->comm); - bpf_probe_read_kernel_str(newcomm, sizeof(newcomm), + pid = BPF_CORE_READ(tsk, pid); + bpf_core_read_str(oldcomm, sizeof(oldcomm), &tsk->comm); + bpf_core_read_str(newcomm, sizeof(newcomm), (void *)PT_REGS_PARM2(ctx)); - signal = _(tsk->signal); - oom_score_adj = _(signal->oom_score_adj); + signal = BPF_CORE_READ(tsk, signal); + oom_score_adj = BPF_CORE_READ(signal, oom_score_adj); return 0; } diff --git a/samples/bpf/test_overhead_tp.bpf.c b/samples/bpf/test_overhead_tp.bpf.c index 8b498328e961..5dc08b587978 100644 --- a/samples/bpf/test_overhead_tp.bpf.c +++ b/samples/bpf/test_overhead_tp.bpf.c @@ -8,40 +8,15 @@ #include <bpf/bpf_helpers.h> /* from /sys/kernel/tracing/events/task/task_rename/format */ -struct task_rename { - __u64 pad; - __u32 pid; - char oldcomm[TASK_COMM_LEN]; - char newcomm[TASK_COMM_LEN]; - __u16 oom_score_adj; -}; SEC("tracepoint/task/task_rename") -int prog(struct task_rename *ctx) +int prog(struct trace_event_raw_task_rename *ctx) { return 0; } /* from /sys/kernel/tracing/events/fib/fib_table_lookup/format */ -struct fib_table_lookup { - __u64 pad; - __u32 tb_id; - int err; - int oif; - int iif; - __u8 proto; - __u8 tos; - __u8 scope; - __u8 flags; - __u8 src[4]; - __u8 dst[4]; - __u8 gw4[4]; - __u8 gw6[16]; - __u16 sport; - __u16 dport; - char name[16]; -}; SEC("tracepoint/fib/fib_table_lookup") -int prog2(struct fib_table_lookup *ctx) +int prog2(struct trace_event_raw_fib_table_lookup *ctx) { return 0; } diff --git a/samples/bpf/tracex1_kern.c b/samples/bpf/tracex1.bpf.c index ef30d2b353b0..0ab39d76ff8f 100644 --- a/samples/bpf/tracex1_kern.c +++ b/samples/bpf/tracex1.bpf.c @@ -4,42 +4,35 @@ * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. */ -#include <linux/skbuff.h> -#include <linux/netdevice.h> -#include <uapi/linux/bpf.h> +#include "vmlinux.h" +#include "net_shared.h" #include <linux/version.h> #include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> #include <bpf/bpf_tracing.h> -#define _(P) \ - ({ \ - typeof(P) val = 0; \ - bpf_probe_read_kernel(&val, sizeof(val), &(P)); \ - val; \ - }) - /* kprobe is NOT a stable ABI * kernel functions can be removed, renamed or completely change semantics. * Number of arguments and their positions can change, etc. * In such case this bpf+kprobe example will no longer be meaningful */ -SEC("kprobe/__netif_receive_skb_core") +SEC("kprobe.multi/__netif_receive_skb_core*") int bpf_prog1(struct pt_regs *ctx) { /* attaches to kprobe __netif_receive_skb_core, * looks for packets on loobpack device and prints them + * (wildcard is used for avoiding symbol mismatch due to optimization) */ char devname[IFNAMSIZ]; struct net_device *dev; struct sk_buff *skb; int len; - /* non-portable! works for the given kernel only */ - bpf_probe_read_kernel(&skb, sizeof(skb), (void *)PT_REGS_PARM1(ctx)); - dev = _(skb->dev); - len = _(skb->len); + bpf_core_read(&skb, sizeof(skb), (void *)PT_REGS_PARM1(ctx)); + dev = BPF_CORE_READ(skb, dev); + len = BPF_CORE_READ(skb, len); - bpf_probe_read_kernel(devname, sizeof(devname), dev->name); + BPF_CORE_READ_STR_INTO(&devname, dev, name); if (devname[0] == 'l' && devname[1] == 'o') { char fmt[] = "skb %p len %d\n"; diff --git a/samples/bpf/tracex1_user.c b/samples/bpf/tracex1_user.c index 9d4adb7fd834..8c3d9043a2b6 100644 --- a/samples/bpf/tracex1_user.c +++ b/samples/bpf/tracex1_user.c @@ -12,7 +12,7 @@ int main(int ac, char **argv) char filename[256]; FILE *f; - snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); + snprintf(filename, sizeof(filename), "%s.bpf.o", argv[0]); obj = bpf_object__open_file(filename, NULL); if (libbpf_get_error(obj)) { fprintf(stderr, "ERROR: opening BPF object file failed\n"); diff --git a/samples/bpf/tracex3_kern.c b/samples/bpf/tracex3.bpf.c index bde6591cb20c..41f37966f5f5 100644 --- a/samples/bpf/tracex3_kern.c +++ b/samples/bpf/tracex3.bpf.c @@ -4,13 +4,17 @@ * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. */ -#include <linux/skbuff.h> -#include <linux/netdevice.h> +#include "vmlinux.h" #include <linux/version.h> -#include <uapi/linux/bpf.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> +struct start_key { + dev_t dev; + u32 _pad; + sector_t sector; +}; + struct { __uint(type, BPF_MAP_TYPE_HASH); __type(key, long); @@ -18,16 +22,17 @@ struct { __uint(max_entries, 4096); } my_map SEC(".maps"); -/* kprobe is NOT a stable ABI. If kernel internals change this bpf+kprobe - * example will no longer be meaningful - */ -SEC("kprobe/blk_mq_start_request") -int bpf_prog1(struct pt_regs *ctx) +/* from /sys/kernel/tracing/events/block/block_io_start/format */ +SEC("tracepoint/block/block_io_start") +int bpf_prog1(struct trace_event_raw_block_rq *ctx) { - long rq = PT_REGS_PARM1(ctx); u64 val = bpf_ktime_get_ns(); + struct start_key key = { + .dev = ctx->dev, + .sector = ctx->sector + }; - bpf_map_update_elem(&my_map, &rq, &val, BPF_ANY); + bpf_map_update_elem(&my_map, &key, &val, BPF_ANY); return 0; } @@ -49,21 +54,26 @@ struct { __uint(max_entries, SLOTS); } lat_map SEC(".maps"); -SEC("kprobe/__blk_account_io_done") -int bpf_prog2(struct pt_regs *ctx) +/* from /sys/kernel/tracing/events/block/block_io_done/format */ +SEC("tracepoint/block/block_io_done") +int bpf_prog2(struct trace_event_raw_block_rq *ctx) { - long rq = PT_REGS_PARM1(ctx); + struct start_key key = { + .dev = ctx->dev, + .sector = ctx->sector + }; + u64 *value, l, base; u32 index; - value = bpf_map_lookup_elem(&my_map, &rq); + value = bpf_map_lookup_elem(&my_map, &key); if (!value) return 0; u64 cur_time = bpf_ktime_get_ns(); u64 delta = cur_time - *value; - bpf_map_delete_elem(&my_map, &rq); + bpf_map_delete_elem(&my_map, &key); /* the lines below are computing index = log10(delta)*10 * using integer arithmetic diff --git a/samples/bpf/tracex3_user.c b/samples/bpf/tracex3_user.c index d5eebace31e6..1002eb0323b4 100644 --- a/samples/bpf/tracex3_user.c +++ b/samples/bpf/tracex3_user.c @@ -125,7 +125,7 @@ int main(int ac, char **argv) } } - snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); + snprintf(filename, sizeof(filename), "%s.bpf.o", argv[0]); obj = bpf_object__open_file(filename, NULL); if (libbpf_get_error(obj)) { fprintf(stderr, "ERROR: opening BPF object file failed\n"); diff --git a/samples/bpf/tracex4_kern.c b/samples/bpf/tracex4.bpf.c index eb0f8fdd14bf..ca826750901a 100644 --- a/samples/bpf/tracex4_kern.c +++ b/samples/bpf/tracex4.bpf.c @@ -4,9 +4,8 @@ * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. */ -#include <linux/ptrace.h> +#include "vmlinux.h" #include <linux/version.h> -#include <uapi/linux/bpf.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> diff --git a/samples/bpf/tracex4_user.c b/samples/bpf/tracex4_user.c index dee8f0a091ba..a5145ad72cbf 100644 --- a/samples/bpf/tracex4_user.c +++ b/samples/bpf/tracex4_user.c @@ -53,7 +53,7 @@ int main(int ac, char **argv) char filename[256]; int map_fd, j = 0; - snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); + snprintf(filename, sizeof(filename), "%s.bpf.o", argv[0]); obj = bpf_object__open_file(filename, NULL); if (libbpf_get_error(obj)) { fprintf(stderr, "ERROR: opening BPF object file failed\n"); diff --git a/samples/bpf/tracex5_kern.c b/samples/bpf/tracex5.bpf.c index 64a1f7550d7e..4d3d6c9b25fa 100644 --- a/samples/bpf/tracex5_kern.c +++ b/samples/bpf/tracex5.bpf.c @@ -4,15 +4,15 @@ * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. */ -#include <linux/ptrace.h> +#include "vmlinux.h" +#include "syscall_nrs.h" #include <linux/version.h> -#include <uapi/linux/bpf.h> -#include <uapi/linux/seccomp.h> #include <uapi/linux/unistd.h> -#include "syscall_nrs.h" #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> +#include <bpf/bpf_core_read.h> +#define __stringify(x) #x #define PROG(F) SEC("kprobe/"__stringify(F)) int bpf_func_##F struct { @@ -47,7 +47,7 @@ PROG(SYS__NR_write)(struct pt_regs *ctx) { struct seccomp_data sd; - bpf_probe_read_kernel(&sd, sizeof(sd), (void *)PT_REGS_PARM2(ctx)); + bpf_core_read(&sd, sizeof(sd), (void *)PT_REGS_PARM2(ctx)); if (sd.args[2] == 512) { char fmt[] = "write(fd=%d, buf=%p, size=%d)\n"; bpf_trace_printk(fmt, sizeof(fmt), @@ -60,7 +60,7 @@ PROG(SYS__NR_read)(struct pt_regs *ctx) { struct seccomp_data sd; - bpf_probe_read_kernel(&sd, sizeof(sd), (void *)PT_REGS_PARM2(ctx)); + bpf_core_read(&sd, sizeof(sd), (void *)PT_REGS_PARM2(ctx)); if (sd.args[2] > 128 && sd.args[2] <= 1024) { char fmt[] = "read(fd=%d, buf=%p, size=%d)\n"; bpf_trace_printk(fmt, sizeof(fmt), diff --git a/samples/bpf/tracex5_user.c b/samples/bpf/tracex5_user.c index 9d7d79f0d47d..7e2d8397fb98 100644 --- a/samples/bpf/tracex5_user.c +++ b/samples/bpf/tracex5_user.c @@ -42,7 +42,7 @@ int main(int ac, char **argv) char filename[256]; FILE *f; - snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); + snprintf(filename, sizeof(filename), "%s.bpf.o", argv[0]); obj = bpf_object__open_file(filename, NULL); if (libbpf_get_error(obj)) { fprintf(stderr, "ERROR: opening BPF object file failed\n"); diff --git a/samples/bpf/tracex6_kern.c b/samples/bpf/tracex6.bpf.c index acad5712d8b4..9b23b4737cfb 100644 --- a/samples/bpf/tracex6_kern.c +++ b/samples/bpf/tracex6.bpf.c @@ -1,7 +1,8 @@ -#include <linux/ptrace.h> +#include "vmlinux.h" #include <linux/version.h> -#include <uapi/linux/bpf.h> #include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_core_read.h> struct { __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); @@ -45,13 +46,24 @@ int bpf_prog1(struct pt_regs *ctx) return 0; } -SEC("kprobe/htab_map_lookup_elem") -int bpf_prog2(struct pt_regs *ctx) +/* + * Since *_map_lookup_elem can't be expected to trigger bpf programs + * due to potential deadlocks (bpf_disable_instrumentation), this bpf + * program will be attached to bpf_map_copy_value (which is called + * from map_lookup_elem) and will only filter the hashtable type. + */ +SEC("kprobe/bpf_map_copy_value") +int BPF_KPROBE(bpf_prog2, struct bpf_map *map) { u32 key = bpf_get_smp_processor_id(); struct bpf_perf_event_value *val, buf; + enum bpf_map_type type; int error; + type = BPF_CORE_READ(map, map_type); + if (type != BPF_MAP_TYPE_HASH) + return 0; + error = bpf_perf_event_read_value(&counters, key, &buf, sizeof(buf)); if (error) return 0; diff --git a/samples/bpf/tracex6_user.c b/samples/bpf/tracex6_user.c index 8e83bf2a84a4..ae811ac83bc2 100644 --- a/samples/bpf/tracex6_user.c +++ b/samples/bpf/tracex6_user.c @@ -180,7 +180,7 @@ int main(int argc, char **argv) char filename[256]; int i = 0; - snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); + snprintf(filename, sizeof(filename), "%s.bpf.o", argv[0]); obj = bpf_object__open_file(filename, NULL); if (libbpf_get_error(obj)) { fprintf(stderr, "ERROR: opening BPF object file failed\n"); diff --git a/samples/bpf/tracex7_kern.c b/samples/bpf/tracex7.bpf.c index c5a92df8ac31..ab8d6704a5a4 100644 --- a/samples/bpf/tracex7_kern.c +++ b/samples/bpf/tracex7.bpf.c @@ -1,5 +1,4 @@ -#include <uapi/linux/ptrace.h> -#include <uapi/linux/bpf.h> +#include "vmlinux.h" #include <linux/version.h> #include <bpf/bpf_helpers.h> diff --git a/samples/bpf/tracex7_user.c b/samples/bpf/tracex7_user.c index 8be7ce18d3ba..b10b5e03a226 100644 --- a/samples/bpf/tracex7_user.c +++ b/samples/bpf/tracex7_user.c @@ -19,7 +19,7 @@ int main(int argc, char **argv) return 0; } - snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); + snprintf(filename, sizeof(filename), "%s.bpf.o", argv[0]); obj = bpf_object__open_file(filename, NULL); if (libbpf_get_error(obj)) { fprintf(stderr, "ERROR: opening BPF object file failed\n"); diff --git a/samples/bpf/xdp1_kern.c b/samples/bpf/xdp1_kern.c deleted file mode 100644 index d91f27cbcfa9..000000000000 --- a/samples/bpf/xdp1_kern.c +++ /dev/null @@ -1,100 +0,0 @@ -/* Copyright (c) 2016 PLUMgrid - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. - */ -#define KBUILD_MODNAME "foo" -#include <uapi/linux/bpf.h> -#include <linux/in.h> -#include <linux/if_ether.h> -#include <linux/if_packet.h> -#include <linux/if_vlan.h> -#include <linux/ip.h> -#include <linux/ipv6.h> -#include <bpf/bpf_helpers.h> - -struct { - __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); - __type(key, u32); - __type(value, long); - __uint(max_entries, 256); -} rxcnt SEC(".maps"); - -static int parse_ipv4(void *data, u64 nh_off, void *data_end) -{ - struct iphdr *iph = data + nh_off; - - if (iph + 1 > data_end) - return 0; - return iph->protocol; -} - -static int parse_ipv6(void *data, u64 nh_off, void *data_end) -{ - struct ipv6hdr *ip6h = data + nh_off; - - if (ip6h + 1 > data_end) - return 0; - return ip6h->nexthdr; -} - -#define XDPBUFSIZE 60 -SEC("xdp.frags") -int xdp_prog1(struct xdp_md *ctx) -{ - __u8 pkt[XDPBUFSIZE] = {}; - void *data_end = &pkt[XDPBUFSIZE-1]; - void *data = pkt; - struct ethhdr *eth = data; - int rc = XDP_DROP; - long *value; - u16 h_proto; - u64 nh_off; - u32 ipproto; - - if (bpf_xdp_load_bytes(ctx, 0, pkt, sizeof(pkt))) - return rc; - - nh_off = sizeof(*eth); - if (data + nh_off > data_end) - return rc; - - h_proto = eth->h_proto; - - /* Handle VLAN tagged packet */ - if (h_proto == htons(ETH_P_8021Q) || h_proto == htons(ETH_P_8021AD)) { - struct vlan_hdr *vhdr; - - vhdr = data + nh_off; - nh_off += sizeof(struct vlan_hdr); - if (data + nh_off > data_end) - return rc; - h_proto = vhdr->h_vlan_encapsulated_proto; - } - /* Handle double VLAN tagged packet */ - if (h_proto == htons(ETH_P_8021Q) || h_proto == htons(ETH_P_8021AD)) { - struct vlan_hdr *vhdr; - - vhdr = data + nh_off; - nh_off += sizeof(struct vlan_hdr); - if (data + nh_off > data_end) - return rc; - h_proto = vhdr->h_vlan_encapsulated_proto; - } - - if (h_proto == htons(ETH_P_IP)) - ipproto = parse_ipv4(data, nh_off, data_end); - else if (h_proto == htons(ETH_P_IPV6)) - ipproto = parse_ipv6(data, nh_off, data_end); - else - ipproto = 0; - - value = bpf_map_lookup_elem(&rxcnt, &ipproto); - if (value) - *value += 1; - - return rc; -} - -char _license[] SEC("license") = "GPL"; diff --git a/samples/bpf/xdp1_user.c b/samples/bpf/xdp1_user.c deleted file mode 100644 index f05e797013e9..000000000000 --- a/samples/bpf/xdp1_user.c +++ /dev/null @@ -1,166 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2016 PLUMgrid - */ -#include <linux/bpf.h> -#include <linux/if_link.h> -#include <assert.h> -#include <errno.h> -#include <signal.h> -#include <stdio.h> -#include <stdlib.h> -#include <string.h> -#include <unistd.h> -#include <libgen.h> -#include <net/if.h> - -#include "bpf_util.h" -#include <bpf/bpf.h> -#include <bpf/libbpf.h> - -static int ifindex; -static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST; -static __u32 prog_id; - -static void int_exit(int sig) -{ - __u32 curr_prog_id = 0; - - if (bpf_xdp_query_id(ifindex, xdp_flags, &curr_prog_id)) { - printf("bpf_xdp_query_id failed\n"); - exit(1); - } - if (prog_id == curr_prog_id) - bpf_xdp_detach(ifindex, xdp_flags, NULL); - else if (!curr_prog_id) - printf("couldn't find a prog id on a given interface\n"); - else - printf("program on interface changed, not removing\n"); - exit(0); -} - -/* simple per-protocol drop counter - */ -static void poll_stats(int map_fd, int interval) -{ - unsigned int nr_cpus = bpf_num_possible_cpus(); - __u64 values[nr_cpus], prev[UINT8_MAX] = { 0 }; - int i; - - while (1) { - __u32 key = UINT32_MAX; - - sleep(interval); - - while (bpf_map_get_next_key(map_fd, &key, &key) == 0) { - __u64 sum = 0; - - assert(bpf_map_lookup_elem(map_fd, &key, values) == 0); - for (i = 0; i < nr_cpus; i++) - sum += values[i]; - if (sum > prev[key]) - printf("proto %u: %10llu pkt/s\n", - key, (sum - prev[key]) / interval); - prev[key] = sum; - } - } -} - -static void usage(const char *prog) -{ - fprintf(stderr, - "usage: %s [OPTS] IFACE\n\n" - "OPTS:\n" - " -S use skb-mode\n" - " -N enforce native mode\n" - " -F force loading prog\n", - prog); -} - -int main(int argc, char **argv) -{ - struct bpf_prog_info info = {}; - __u32 info_len = sizeof(info); - const char *optstr = "FSN"; - int prog_fd, map_fd, opt; - struct bpf_program *prog; - struct bpf_object *obj; - struct bpf_map *map; - char filename[256]; - int err; - - while ((opt = getopt(argc, argv, optstr)) != -1) { - switch (opt) { - case 'S': - xdp_flags |= XDP_FLAGS_SKB_MODE; - break; - case 'N': - /* default, set below */ - break; - case 'F': - xdp_flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST; - break; - default: - usage(basename(argv[0])); - return 1; - } - } - - if (!(xdp_flags & XDP_FLAGS_SKB_MODE)) - xdp_flags |= XDP_FLAGS_DRV_MODE; - - if (optind == argc) { - usage(basename(argv[0])); - return 1; - } - - ifindex = if_nametoindex(argv[optind]); - if (!ifindex) { - perror("if_nametoindex"); - return 1; - } - - snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); - obj = bpf_object__open_file(filename, NULL); - if (libbpf_get_error(obj)) - return 1; - - prog = bpf_object__next_program(obj, NULL); - bpf_program__set_type(prog, BPF_PROG_TYPE_XDP); - - err = bpf_object__load(obj); - if (err) - return 1; - - prog_fd = bpf_program__fd(prog); - - map = bpf_object__next_map(obj, NULL); - if (!map) { - printf("finding a map in obj file failed\n"); - return 1; - } - map_fd = bpf_map__fd(map); - - if (!prog_fd) { - printf("bpf_prog_load_xattr: %s\n", strerror(errno)); - return 1; - } - - signal(SIGINT, int_exit); - signal(SIGTERM, int_exit); - - if (bpf_xdp_attach(ifindex, prog_fd, xdp_flags, NULL) < 0) { - printf("link set xdp fd failed\n"); - return 1; - } - - err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len); - if (err) { - printf("can't get prog info - %s\n", strerror(errno)); - return err; - } - prog_id = info.id; - - poll_stats(map_fd, 1); - - return 0; -} diff --git a/samples/bpf/xdp2_kern.c b/samples/bpf/xdp2_kern.c deleted file mode 100644 index 8bca674451ed..000000000000 --- a/samples/bpf/xdp2_kern.c +++ /dev/null @@ -1,125 +0,0 @@ -/* Copyright (c) 2016 PLUMgrid - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. - */ -#define KBUILD_MODNAME "foo" -#include <uapi/linux/bpf.h> -#include <linux/in.h> -#include <linux/if_ether.h> -#include <linux/if_packet.h> -#include <linux/if_vlan.h> -#include <linux/ip.h> -#include <linux/ipv6.h> -#include <bpf/bpf_helpers.h> - -struct { - __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); - __type(key, u32); - __type(value, long); - __uint(max_entries, 256); -} rxcnt SEC(".maps"); - -static void swap_src_dst_mac(void *data) -{ - unsigned short *p = data; - unsigned short dst[3]; - - dst[0] = p[0]; - dst[1] = p[1]; - dst[2] = p[2]; - p[0] = p[3]; - p[1] = p[4]; - p[2] = p[5]; - p[3] = dst[0]; - p[4] = dst[1]; - p[5] = dst[2]; -} - -static int parse_ipv4(void *data, u64 nh_off, void *data_end) -{ - struct iphdr *iph = data + nh_off; - - if (iph + 1 > data_end) - return 0; - return iph->protocol; -} - -static int parse_ipv6(void *data, u64 nh_off, void *data_end) -{ - struct ipv6hdr *ip6h = data + nh_off; - - if (ip6h + 1 > data_end) - return 0; - return ip6h->nexthdr; -} - -#define XDPBUFSIZE 60 -SEC("xdp.frags") -int xdp_prog1(struct xdp_md *ctx) -{ - __u8 pkt[XDPBUFSIZE] = {}; - void *data_end = &pkt[XDPBUFSIZE-1]; - void *data = pkt; - struct ethhdr *eth = data; - int rc = XDP_DROP; - long *value; - u16 h_proto; - u64 nh_off; - u32 ipproto; - - if (bpf_xdp_load_bytes(ctx, 0, pkt, sizeof(pkt))) - return rc; - - nh_off = sizeof(*eth); - if (data + nh_off > data_end) - return rc; - - h_proto = eth->h_proto; - - /* Handle VLAN tagged packet */ - if (h_proto == htons(ETH_P_8021Q) || h_proto == htons(ETH_P_8021AD)) { - struct vlan_hdr *vhdr; - - vhdr = data + nh_off; - nh_off += sizeof(struct vlan_hdr); - if (data + nh_off > data_end) - return rc; - h_proto = vhdr->h_vlan_encapsulated_proto; - } - /* Handle double VLAN tagged packet */ - if (h_proto == htons(ETH_P_8021Q) || h_proto == htons(ETH_P_8021AD)) { - struct vlan_hdr *vhdr; - - vhdr = data + nh_off; - nh_off += sizeof(struct vlan_hdr); - if (data + nh_off > data_end) - return rc; - h_proto = vhdr->h_vlan_encapsulated_proto; - } - - if (h_proto == htons(ETH_P_IP)) - ipproto = parse_ipv4(data, nh_off, data_end); - else if (h_proto == htons(ETH_P_IPV6)) - ipproto = parse_ipv6(data, nh_off, data_end); - else - ipproto = 0; - - value = bpf_map_lookup_elem(&rxcnt, &ipproto); - if (value) - *value += 1; - - if (ipproto == IPPROTO_UDP) { - swap_src_dst_mac(data); - - if (bpf_xdp_store_bytes(ctx, 0, pkt, sizeof(pkt))) - return rc; - - rc = XDP_TX; - } - - return rc; -} - -char _license[] SEC("license") = "GPL"; diff --git a/samples/bpf/xdp_monitor.bpf.c b/samples/bpf/xdp_monitor.bpf.c deleted file mode 100644 index cfb41e2205f4..000000000000 --- a/samples/bpf/xdp_monitor.bpf.c +++ /dev/null @@ -1,8 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2017-2018 Jesper Dangaard Brouer, Red Hat Inc. - * - * XDP monitor tool, based on tracepoints - */ -#include "xdp_sample.bpf.h" - -char _license[] SEC("license") = "GPL"; diff --git a/samples/bpf/xdp_monitor_user.c b/samples/bpf/xdp_monitor_user.c deleted file mode 100644 index 58015eb2ffae..000000000000 --- a/samples/bpf/xdp_monitor_user.c +++ /dev/null @@ -1,118 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2017 Jesper Dangaard Brouer, Red Hat, Inc. */ -static const char *__doc__= -"XDP monitor tool, based on tracepoints\n"; - -static const char *__doc_err_only__= -" NOTICE: Only tracking XDP redirect errors\n" -" Enable redirect success stats via '-s/--stats'\n" -" (which comes with a per packet processing overhead)\n"; - -#include <errno.h> -#include <stdio.h> -#include <stdlib.h> -#include <stdbool.h> -#include <stdint.h> -#include <string.h> -#include <ctype.h> -#include <unistd.h> -#include <locale.h> -#include <getopt.h> -#include <net/if.h> -#include <time.h> -#include <signal.h> -#include <bpf/bpf.h> -#include <bpf/libbpf.h> -#include "bpf_util.h" -#include "xdp_sample_user.h" -#include "xdp_monitor.skel.h" - -static int mask = SAMPLE_REDIRECT_ERR_CNT | SAMPLE_CPUMAP_ENQUEUE_CNT | - SAMPLE_CPUMAP_KTHREAD_CNT | SAMPLE_EXCEPTION_CNT | - SAMPLE_DEVMAP_XMIT_CNT | SAMPLE_DEVMAP_XMIT_CNT_MULTI; - -DEFINE_SAMPLE_INIT(xdp_monitor); - -static const struct option long_options[] = { - { "help", no_argument, NULL, 'h' }, - { "stats", no_argument, NULL, 's' }, - { "interval", required_argument, NULL, 'i' }, - { "verbose", no_argument, NULL, 'v' }, - {} -}; - -int main(int argc, char **argv) -{ - unsigned long interval = 2; - int ret = EXIT_FAIL_OPTION; - struct xdp_monitor *skel; - bool errors_only = true; - int longindex = 0, opt; - bool error = true; - - /* Parse commands line args */ - while ((opt = getopt_long(argc, argv, "si:vh", - long_options, &longindex)) != -1) { - switch (opt) { - case 's': - errors_only = false; - mask |= SAMPLE_REDIRECT_CNT; - break; - case 'i': - interval = strtoul(optarg, NULL, 0); - break; - case 'v': - sample_switch_mode(); - break; - case 'h': - error = false; - default: - sample_usage(argv, long_options, __doc__, mask, error); - return ret; - } - } - - skel = xdp_monitor__open(); - if (!skel) { - fprintf(stderr, "Failed to xdp_monitor__open: %s\n", - strerror(errno)); - ret = EXIT_FAIL_BPF; - goto end; - } - - ret = sample_init_pre_load(skel); - if (ret < 0) { - fprintf(stderr, "Failed to sample_init_pre_load: %s\n", strerror(-ret)); - ret = EXIT_FAIL_BPF; - goto end_destroy; - } - - ret = xdp_monitor__load(skel); - if (ret < 0) { - fprintf(stderr, "Failed to xdp_monitor__load: %s\n", strerror(errno)); - ret = EXIT_FAIL_BPF; - goto end_destroy; - } - - ret = sample_init(skel, mask); - if (ret < 0) { - fprintf(stderr, "Failed to initialize sample: %s\n", strerror(-ret)); - ret = EXIT_FAIL_BPF; - goto end_destroy; - } - - if (errors_only) - printf("%s", __doc_err_only__); - - ret = sample_run(interval, NULL, NULL); - if (ret < 0) { - fprintf(stderr, "Failed during sample run: %s\n", strerror(-ret)); - ret = EXIT_FAIL; - goto end_destroy; - } - ret = EXIT_OK; -end_destroy: - xdp_monitor__destroy(skel); -end: - sample_exit(ret); -} diff --git a/samples/bpf/xdp_redirect.bpf.c b/samples/bpf/xdp_redirect.bpf.c deleted file mode 100644 index 7c02bacfe96b..000000000000 --- a/samples/bpf/xdp_redirect.bpf.c +++ /dev/null @@ -1,49 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright (c) 2016 John Fastabend <[email protected]> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -#include "vmlinux.h" -#include "xdp_sample.bpf.h" -#include "xdp_sample_shared.h" - -const volatile int ifindex_out; - -SEC("xdp") -int xdp_redirect_prog(struct xdp_md *ctx) -{ - void *data_end = (void *)(long)ctx->data_end; - void *data = (void *)(long)ctx->data; - u32 key = bpf_get_smp_processor_id(); - struct ethhdr *eth = data; - struct datarec *rec; - u64 nh_off; - - nh_off = sizeof(*eth); - if (data + nh_off > data_end) - return XDP_DROP; - - rec = bpf_map_lookup_elem(&rx_cnt, &key); - if (!rec) - return XDP_PASS; - NO_TEAR_INC(rec->processed); - - swap_src_dst_mac(data); - return bpf_redirect(ifindex_out, 0); -} - -/* Redirect require an XDP bpf_prog loaded on the TX device */ -SEC("xdp") -int xdp_redirect_dummy_prog(struct xdp_md *ctx) -{ - return XDP_PASS; -} - -char _license[] SEC("license") = "GPL"; diff --git a/samples/bpf/xdp_redirect_cpu.bpf.c b/samples/bpf/xdp_redirect_cpu.bpf.c deleted file mode 100644 index 87c54bfdbb70..000000000000 --- a/samples/bpf/xdp_redirect_cpu.bpf.c +++ /dev/null @@ -1,539 +0,0 @@ -/* XDP redirect to CPUs via cpumap (BPF_MAP_TYPE_CPUMAP) - * - * GPLv2, Copyright(c) 2017 Jesper Dangaard Brouer, Red Hat, Inc. - */ -#include "vmlinux.h" -#include "xdp_sample.bpf.h" -#include "xdp_sample_shared.h" -#include "hash_func01.h" - -/* Special map type that can XDP_REDIRECT frames to another CPU */ -struct { - __uint(type, BPF_MAP_TYPE_CPUMAP); - __uint(key_size, sizeof(u32)); - __uint(value_size, sizeof(struct bpf_cpumap_val)); -} cpu_map SEC(".maps"); - -/* Set of maps controlling available CPU, and for iterating through - * selectable redirect CPUs. - */ -struct { - __uint(type, BPF_MAP_TYPE_ARRAY); - __type(key, u32); - __type(value, u32); -} cpus_available SEC(".maps"); - -struct { - __uint(type, BPF_MAP_TYPE_ARRAY); - __type(key, u32); - __type(value, u32); - __uint(max_entries, 1); -} cpus_count SEC(".maps"); - -struct { - __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); - __type(key, u32); - __type(value, u32); - __uint(max_entries, 1); -} cpus_iterator SEC(".maps"); - -struct { - __uint(type, BPF_MAP_TYPE_DEVMAP); - __uint(key_size, sizeof(int)); - __uint(value_size, sizeof(struct bpf_devmap_val)); - __uint(max_entries, 1); -} tx_port SEC(".maps"); - -char tx_mac_addr[ETH_ALEN]; - -/* Helper parse functions */ - -static __always_inline -bool parse_eth(struct ethhdr *eth, void *data_end, - u16 *eth_proto, u64 *l3_offset) -{ - u16 eth_type; - u64 offset; - - offset = sizeof(*eth); - if ((void *)eth + offset > data_end) - return false; - - eth_type = eth->h_proto; - - /* Skip non 802.3 Ethertypes */ - if (__builtin_expect(bpf_ntohs(eth_type) < ETH_P_802_3_MIN, 0)) - return false; - - /* Handle VLAN tagged packet */ - if (eth_type == bpf_htons(ETH_P_8021Q) || - eth_type == bpf_htons(ETH_P_8021AD)) { - struct vlan_hdr *vlan_hdr; - - vlan_hdr = (void *)eth + offset; - offset += sizeof(*vlan_hdr); - if ((void *)eth + offset > data_end) - return false; - eth_type = vlan_hdr->h_vlan_encapsulated_proto; - } - /* Handle double VLAN tagged packet */ - if (eth_type == bpf_htons(ETH_P_8021Q) || - eth_type == bpf_htons(ETH_P_8021AD)) { - struct vlan_hdr *vlan_hdr; - - vlan_hdr = (void *)eth + offset; - offset += sizeof(*vlan_hdr); - if ((void *)eth + offset > data_end) - return false; - eth_type = vlan_hdr->h_vlan_encapsulated_proto; - } - - *eth_proto = bpf_ntohs(eth_type); - *l3_offset = offset; - return true; -} - -static __always_inline -u16 get_dest_port_ipv4_udp(struct xdp_md *ctx, u64 nh_off) -{ - void *data_end = (void *)(long)ctx->data_end; - void *data = (void *)(long)ctx->data; - struct iphdr *iph = data + nh_off; - struct udphdr *udph; - - if (iph + 1 > data_end) - return 0; - if (!(iph->protocol == IPPROTO_UDP)) - return 0; - - udph = (void *)(iph + 1); - if (udph + 1 > data_end) - return 0; - - return bpf_ntohs(udph->dest); -} - -static __always_inline -int get_proto_ipv4(struct xdp_md *ctx, u64 nh_off) -{ - void *data_end = (void *)(long)ctx->data_end; - void *data = (void *)(long)ctx->data; - struct iphdr *iph = data + nh_off; - - if (iph + 1 > data_end) - return 0; - return iph->protocol; -} - -static __always_inline -int get_proto_ipv6(struct xdp_md *ctx, u64 nh_off) -{ - void *data_end = (void *)(long)ctx->data_end; - void *data = (void *)(long)ctx->data; - struct ipv6hdr *ip6h = data + nh_off; - - if (ip6h + 1 > data_end) - return 0; - return ip6h->nexthdr; -} - -SEC("xdp") -int xdp_prognum0_no_touch(struct xdp_md *ctx) -{ - u32 key = bpf_get_smp_processor_id(); - struct datarec *rec; - u32 *cpu_selected; - u32 cpu_dest = 0; - u32 key0 = 0; - - /* Only use first entry in cpus_available */ - cpu_selected = bpf_map_lookup_elem(&cpus_available, &key0); - if (!cpu_selected) - return XDP_ABORTED; - cpu_dest = *cpu_selected; - - rec = bpf_map_lookup_elem(&rx_cnt, &key); - if (!rec) - return XDP_PASS; - NO_TEAR_INC(rec->processed); - - if (cpu_dest >= nr_cpus) { - NO_TEAR_INC(rec->issue); - return XDP_ABORTED; - } - return bpf_redirect_map(&cpu_map, cpu_dest, 0); -} - -SEC("xdp") -int xdp_prognum1_touch_data(struct xdp_md *ctx) -{ - void *data_end = (void *)(long)ctx->data_end; - void *data = (void *)(long)ctx->data; - u32 key = bpf_get_smp_processor_id(); - struct ethhdr *eth = data; - struct datarec *rec; - u32 *cpu_selected; - u32 cpu_dest = 0; - u32 key0 = 0; - u16 eth_type; - - /* Only use first entry in cpus_available */ - cpu_selected = bpf_map_lookup_elem(&cpus_available, &key0); - if (!cpu_selected) - return XDP_ABORTED; - cpu_dest = *cpu_selected; - - /* Validate packet length is minimum Eth header size */ - if (eth + 1 > data_end) - return XDP_ABORTED; - - rec = bpf_map_lookup_elem(&rx_cnt, &key); - if (!rec) - return XDP_PASS; - NO_TEAR_INC(rec->processed); - - /* Read packet data, and use it (drop non 802.3 Ethertypes) */ - eth_type = eth->h_proto; - if (bpf_ntohs(eth_type) < ETH_P_802_3_MIN) { - NO_TEAR_INC(rec->dropped); - return XDP_DROP; - } - - if (cpu_dest >= nr_cpus) { - NO_TEAR_INC(rec->issue); - return XDP_ABORTED; - } - return bpf_redirect_map(&cpu_map, cpu_dest, 0); -} - -SEC("xdp") -int xdp_prognum2_round_robin(struct xdp_md *ctx) -{ - void *data_end = (void *)(long)ctx->data_end; - void *data = (void *)(long)ctx->data; - u32 key = bpf_get_smp_processor_id(); - struct datarec *rec; - u32 cpu_dest = 0; - u32 key0 = 0; - - u32 *cpu_selected; - u32 *cpu_iterator; - u32 *cpu_max; - u32 cpu_idx; - - cpu_max = bpf_map_lookup_elem(&cpus_count, &key0); - if (!cpu_max) - return XDP_ABORTED; - - cpu_iterator = bpf_map_lookup_elem(&cpus_iterator, &key0); - if (!cpu_iterator) - return XDP_ABORTED; - cpu_idx = *cpu_iterator; - - *cpu_iterator += 1; - if (*cpu_iterator == *cpu_max) - *cpu_iterator = 0; - - cpu_selected = bpf_map_lookup_elem(&cpus_available, &cpu_idx); - if (!cpu_selected) - return XDP_ABORTED; - cpu_dest = *cpu_selected; - - rec = bpf_map_lookup_elem(&rx_cnt, &key); - if (!rec) - return XDP_PASS; - NO_TEAR_INC(rec->processed); - - if (cpu_dest >= nr_cpus) { - NO_TEAR_INC(rec->issue); - return XDP_ABORTED; - } - return bpf_redirect_map(&cpu_map, cpu_dest, 0); -} - -SEC("xdp") -int xdp_prognum3_proto_separate(struct xdp_md *ctx) -{ - void *data_end = (void *)(long)ctx->data_end; - void *data = (void *)(long)ctx->data; - u32 key = bpf_get_smp_processor_id(); - struct ethhdr *eth = data; - u8 ip_proto = IPPROTO_UDP; - struct datarec *rec; - u16 eth_proto = 0; - u64 l3_offset = 0; - u32 cpu_dest = 0; - u32 *cpu_lookup; - u32 cpu_idx = 0; - - rec = bpf_map_lookup_elem(&rx_cnt, &key); - if (!rec) - return XDP_PASS; - NO_TEAR_INC(rec->processed); - - if (!(parse_eth(eth, data_end, ð_proto, &l3_offset))) - return XDP_PASS; /* Just skip */ - - /* Extract L4 protocol */ - switch (eth_proto) { - case ETH_P_IP: - ip_proto = get_proto_ipv4(ctx, l3_offset); - break; - case ETH_P_IPV6: - ip_proto = get_proto_ipv6(ctx, l3_offset); - break; - case ETH_P_ARP: - cpu_idx = 0; /* ARP packet handled on separate CPU */ - break; - default: - cpu_idx = 0; - } - - /* Choose CPU based on L4 protocol */ - switch (ip_proto) { - case IPPROTO_ICMP: - case IPPROTO_ICMPV6: - cpu_idx = 2; - break; - case IPPROTO_TCP: - cpu_idx = 0; - break; - case IPPROTO_UDP: - cpu_idx = 1; - break; - default: - cpu_idx = 0; - } - - cpu_lookup = bpf_map_lookup_elem(&cpus_available, &cpu_idx); - if (!cpu_lookup) - return XDP_ABORTED; - cpu_dest = *cpu_lookup; - - if (cpu_dest >= nr_cpus) { - NO_TEAR_INC(rec->issue); - return XDP_ABORTED; - } - return bpf_redirect_map(&cpu_map, cpu_dest, 0); -} - -SEC("xdp") -int xdp_prognum4_ddos_filter_pktgen(struct xdp_md *ctx) -{ - void *data_end = (void *)(long)ctx->data_end; - void *data = (void *)(long)ctx->data; - u32 key = bpf_get_smp_processor_id(); - struct ethhdr *eth = data; - u8 ip_proto = IPPROTO_UDP; - struct datarec *rec; - u16 eth_proto = 0; - u64 l3_offset = 0; - u32 cpu_dest = 0; - u32 *cpu_lookup; - u32 cpu_idx = 0; - u16 dest_port; - - rec = bpf_map_lookup_elem(&rx_cnt, &key); - if (!rec) - return XDP_PASS; - NO_TEAR_INC(rec->processed); - - if (!(parse_eth(eth, data_end, ð_proto, &l3_offset))) - return XDP_PASS; /* Just skip */ - - /* Extract L4 protocol */ - switch (eth_proto) { - case ETH_P_IP: - ip_proto = get_proto_ipv4(ctx, l3_offset); - break; - case ETH_P_IPV6: - ip_proto = get_proto_ipv6(ctx, l3_offset); - break; - case ETH_P_ARP: - cpu_idx = 0; /* ARP packet handled on separate CPU */ - break; - default: - cpu_idx = 0; - } - - /* Choose CPU based on L4 protocol */ - switch (ip_proto) { - case IPPROTO_ICMP: - case IPPROTO_ICMPV6: - cpu_idx = 2; - break; - case IPPROTO_TCP: - cpu_idx = 0; - break; - case IPPROTO_UDP: - cpu_idx = 1; - /* DDoS filter UDP port 9 (pktgen) */ - dest_port = get_dest_port_ipv4_udp(ctx, l3_offset); - if (dest_port == 9) { - NO_TEAR_INC(rec->dropped); - return XDP_DROP; - } - break; - default: - cpu_idx = 0; - } - - cpu_lookup = bpf_map_lookup_elem(&cpus_available, &cpu_idx); - if (!cpu_lookup) - return XDP_ABORTED; - cpu_dest = *cpu_lookup; - - if (cpu_dest >= nr_cpus) { - NO_TEAR_INC(rec->issue); - return XDP_ABORTED; - } - return bpf_redirect_map(&cpu_map, cpu_dest, 0); -} - -/* Hashing initval */ -#define INITVAL 15485863 - -static __always_inline -u32 get_ipv4_hash_ip_pair(struct xdp_md *ctx, u64 nh_off) -{ - void *data_end = (void *)(long)ctx->data_end; - void *data = (void *)(long)ctx->data; - struct iphdr *iph = data + nh_off; - u32 cpu_hash; - - if (iph + 1 > data_end) - return 0; - - cpu_hash = iph->saddr + iph->daddr; - cpu_hash = SuperFastHash((char *)&cpu_hash, 4, INITVAL + iph->protocol); - - return cpu_hash; -} - -static __always_inline -u32 get_ipv6_hash_ip_pair(struct xdp_md *ctx, u64 nh_off) -{ - void *data_end = (void *)(long)ctx->data_end; - void *data = (void *)(long)ctx->data; - struct ipv6hdr *ip6h = data + nh_off; - u32 cpu_hash; - - if (ip6h + 1 > data_end) - return 0; - - cpu_hash = ip6h->saddr.in6_u.u6_addr32[0] + ip6h->daddr.in6_u.u6_addr32[0]; - cpu_hash += ip6h->saddr.in6_u.u6_addr32[1] + ip6h->daddr.in6_u.u6_addr32[1]; - cpu_hash += ip6h->saddr.in6_u.u6_addr32[2] + ip6h->daddr.in6_u.u6_addr32[2]; - cpu_hash += ip6h->saddr.in6_u.u6_addr32[3] + ip6h->daddr.in6_u.u6_addr32[3]; - cpu_hash = SuperFastHash((char *)&cpu_hash, 4, INITVAL + ip6h->nexthdr); - - return cpu_hash; -} - -/* Load-Balance traffic based on hashing IP-addrs + L4-proto. The - * hashing scheme is symmetric, meaning swapping IP src/dest still hit - * same CPU. - */ -SEC("xdp") -int xdp_prognum5_lb_hash_ip_pairs(struct xdp_md *ctx) -{ - void *data_end = (void *)(long)ctx->data_end; - void *data = (void *)(long)ctx->data; - u32 key = bpf_get_smp_processor_id(); - struct ethhdr *eth = data; - struct datarec *rec; - u16 eth_proto = 0; - u64 l3_offset = 0; - u32 cpu_dest = 0; - u32 cpu_idx = 0; - u32 *cpu_lookup; - u32 key0 = 0; - u32 *cpu_max; - u32 cpu_hash; - - rec = bpf_map_lookup_elem(&rx_cnt, &key); - if (!rec) - return XDP_PASS; - NO_TEAR_INC(rec->processed); - - cpu_max = bpf_map_lookup_elem(&cpus_count, &key0); - if (!cpu_max) - return XDP_ABORTED; - - if (!(parse_eth(eth, data_end, ð_proto, &l3_offset))) - return XDP_PASS; /* Just skip */ - - /* Hash for IPv4 and IPv6 */ - switch (eth_proto) { - case ETH_P_IP: - cpu_hash = get_ipv4_hash_ip_pair(ctx, l3_offset); - break; - case ETH_P_IPV6: - cpu_hash = get_ipv6_hash_ip_pair(ctx, l3_offset); - break; - case ETH_P_ARP: /* ARP packet handled on CPU idx 0 */ - default: - cpu_hash = 0; - } - - /* Choose CPU based on hash */ - cpu_idx = cpu_hash % *cpu_max; - - cpu_lookup = bpf_map_lookup_elem(&cpus_available, &cpu_idx); - if (!cpu_lookup) - return XDP_ABORTED; - cpu_dest = *cpu_lookup; - - if (cpu_dest >= nr_cpus) { - NO_TEAR_INC(rec->issue); - return XDP_ABORTED; - } - return bpf_redirect_map(&cpu_map, cpu_dest, 0); -} - -SEC("xdp/cpumap") -int xdp_redirect_cpu_devmap(struct xdp_md *ctx) -{ - void *data_end = (void *)(long)ctx->data_end; - void *data = (void *)(long)ctx->data; - struct ethhdr *eth = data; - u64 nh_off; - - nh_off = sizeof(*eth); - if (data + nh_off > data_end) - return XDP_DROP; - - swap_src_dst_mac(data); - return bpf_redirect_map(&tx_port, 0, 0); -} - -SEC("xdp/cpumap") -int xdp_redirect_cpu_pass(struct xdp_md *ctx) -{ - return XDP_PASS; -} - -SEC("xdp/cpumap") -int xdp_redirect_cpu_drop(struct xdp_md *ctx) -{ - return XDP_DROP; -} - -SEC("xdp/devmap") -int xdp_redirect_egress_prog(struct xdp_md *ctx) -{ - void *data_end = (void *)(long)ctx->data_end; - void *data = (void *)(long)ctx->data; - struct ethhdr *eth = data; - u64 nh_off; - - nh_off = sizeof(*eth); - if (data + nh_off > data_end) - return XDP_DROP; - - __builtin_memcpy(eth->h_source, (const char *)tx_mac_addr, ETH_ALEN); - - return XDP_PASS; -} - -char _license[] SEC("license") = "GPL"; diff --git a/samples/bpf/xdp_redirect_cpu_user.c b/samples/bpf/xdp_redirect_cpu_user.c deleted file mode 100644 index e1458405e2ba..000000000000 --- a/samples/bpf/xdp_redirect_cpu_user.c +++ /dev/null @@ -1,559 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* Copyright(c) 2017 Jesper Dangaard Brouer, Red Hat, Inc. - */ -static const char *__doc__ = -"XDP CPU redirect tool, using BPF_MAP_TYPE_CPUMAP\n" -"Usage: xdp_redirect_cpu -d <IFINDEX|IFNAME> -c 0 ... -c N\n" -"Valid specification for CPUMAP BPF program:\n" -" --mprog-name/-e pass (use built-in XDP_PASS program)\n" -" --mprog-name/-e drop (use built-in XDP_DROP program)\n" -" --redirect-device/-r <ifindex|ifname> (use built-in DEVMAP redirect program)\n" -" Custom CPUMAP BPF program:\n" -" --mprog-filename/-f <filename> --mprog-name/-e <program>\n" -" Optionally, also pass --redirect-map/-m and --redirect-device/-r together\n" -" to configure DEVMAP in BPF object <filename>\n"; - -#include <errno.h> -#include <signal.h> -#include <stdio.h> -#include <stdlib.h> -#include <stdbool.h> -#include <string.h> -#include <unistd.h> -#include <locale.h> -#include <sys/sysinfo.h> -#include <getopt.h> -#include <net/if.h> -#include <time.h> -#include <linux/limits.h> -#include <arpa/inet.h> -#include <linux/if_link.h> -#include <bpf/bpf.h> -#include <bpf/libbpf.h> -#include "bpf_util.h" -#include "xdp_sample_user.h" -#include "xdp_redirect_cpu.skel.h" - -static int map_fd; -static int avail_fd; -static int count_fd; - -static int mask = SAMPLE_RX_CNT | SAMPLE_REDIRECT_ERR_MAP_CNT | - SAMPLE_CPUMAP_ENQUEUE_CNT | SAMPLE_CPUMAP_KTHREAD_CNT | - SAMPLE_EXCEPTION_CNT; - -DEFINE_SAMPLE_INIT(xdp_redirect_cpu); - -static const struct option long_options[] = { - { "help", no_argument, NULL, 'h' }, - { "dev", required_argument, NULL, 'd' }, - { "skb-mode", no_argument, NULL, 'S' }, - { "progname", required_argument, NULL, 'p' }, - { "qsize", required_argument, NULL, 'q' }, - { "cpu", required_argument, NULL, 'c' }, - { "stress-mode", no_argument, NULL, 'x' }, - { "force", no_argument, NULL, 'F' }, - { "interval", required_argument, NULL, 'i' }, - { "verbose", no_argument, NULL, 'v' }, - { "stats", no_argument, NULL, 's' }, - { "mprog-name", required_argument, NULL, 'e' }, - { "mprog-filename", required_argument, NULL, 'f' }, - { "redirect-device", required_argument, NULL, 'r' }, - { "redirect-map", required_argument, NULL, 'm' }, - {} -}; - -static void print_avail_progs(struct bpf_object *obj) -{ - struct bpf_program *pos; - - printf(" Programs to be used for -p/--progname:\n"); - bpf_object__for_each_program(pos, obj) { - if (bpf_program__type(pos) == BPF_PROG_TYPE_XDP) { - if (!strncmp(bpf_program__name(pos), "xdp_prognum", - sizeof("xdp_prognum") - 1)) - printf(" %s\n", bpf_program__name(pos)); - } - } -} - -static void usage(char *argv[], const struct option *long_options, - const char *doc, int mask, bool error, struct bpf_object *obj) -{ - sample_usage(argv, long_options, doc, mask, error); - print_avail_progs(obj); -} - -static int create_cpu_entry(__u32 cpu, struct bpf_cpumap_val *value, - __u32 avail_idx, bool new) -{ - __u32 curr_cpus_count = 0; - __u32 key = 0; - int ret; - - /* Add a CPU entry to cpumap, as this allocate a cpu entry in - * the kernel for the cpu. - */ - ret = bpf_map_update_elem(map_fd, &cpu, value, 0); - if (ret < 0) { - fprintf(stderr, "Create CPU entry failed: %s\n", strerror(errno)); - return ret; - } - - /* Inform bpf_prog's that a new CPU is available to select - * from via some control maps. - */ - ret = bpf_map_update_elem(avail_fd, &avail_idx, &cpu, 0); - if (ret < 0) { - fprintf(stderr, "Add to avail CPUs failed: %s\n", strerror(errno)); - return ret; - } - - /* When not replacing/updating existing entry, bump the count */ - ret = bpf_map_lookup_elem(count_fd, &key, &curr_cpus_count); - if (ret < 0) { - fprintf(stderr, "Failed reading curr cpus_count: %s\n", - strerror(errno)); - return ret; - } - if (new) { - curr_cpus_count++; - ret = bpf_map_update_elem(count_fd, &key, - &curr_cpus_count, 0); - if (ret < 0) { - fprintf(stderr, "Failed write curr cpus_count: %s\n", - strerror(errno)); - return ret; - } - } - - printf("%s CPU: %u as idx: %u qsize: %d cpumap_prog_fd: %d (cpus_count: %u)\n", - new ? "Add new" : "Replace", cpu, avail_idx, - value->qsize, value->bpf_prog.fd, curr_cpus_count); - - return 0; -} - -/* CPUs are zero-indexed. Thus, add a special sentinel default value - * in map cpus_available to mark CPU index'es not configured - */ -static int mark_cpus_unavailable(void) -{ - int ret, i, n_cpus = libbpf_num_possible_cpus(); - __u32 invalid_cpu = n_cpus; - - for (i = 0; i < n_cpus; i++) { - ret = bpf_map_update_elem(avail_fd, &i, - &invalid_cpu, 0); - if (ret < 0) { - fprintf(stderr, "Failed marking CPU unavailable: %s\n", - strerror(errno)); - return ret; - } - } - - return 0; -} - -/* Stress cpumap management code by concurrently changing underlying cpumap */ -static void stress_cpumap(void *ctx) -{ - struct bpf_cpumap_val *value = ctx; - - /* Changing qsize will cause kernel to free and alloc a new - * bpf_cpu_map_entry, with an associated/complicated tear-down - * procedure. - */ - value->qsize = 1024; - create_cpu_entry(1, value, 0, false); - value->qsize = 8; - create_cpu_entry(1, value, 0, false); - value->qsize = 16000; - create_cpu_entry(1, value, 0, false); -} - -static int set_cpumap_prog(struct xdp_redirect_cpu *skel, - const char *redir_interface, const char *redir_map, - const char *mprog_filename, const char *mprog_name) -{ - if (mprog_filename) { - struct bpf_program *prog; - struct bpf_object *obj; - int ret; - - if (!mprog_name) { - fprintf(stderr, "BPF program not specified for file %s\n", - mprog_filename); - goto end; - } - if ((redir_interface && !redir_map) || (!redir_interface && redir_map)) { - fprintf(stderr, "--redirect-%s specified but --redirect-%s not specified\n", - redir_interface ? "device" : "map", redir_interface ? "map" : "device"); - goto end; - } - - /* Custom BPF program */ - obj = bpf_object__open_file(mprog_filename, NULL); - if (!obj) { - ret = -errno; - fprintf(stderr, "Failed to bpf_prog_load_xattr: %s\n", - strerror(errno)); - return ret; - } - - ret = bpf_object__load(obj); - if (ret < 0) { - ret = -errno; - fprintf(stderr, "Failed to bpf_object__load: %s\n", - strerror(errno)); - return ret; - } - - if (redir_map) { - int err, redir_map_fd, ifindex_out, key = 0; - - redir_map_fd = bpf_object__find_map_fd_by_name(obj, redir_map); - if (redir_map_fd < 0) { - fprintf(stderr, "Failed to bpf_object__find_map_fd_by_name: %s\n", - strerror(errno)); - return redir_map_fd; - } - - ifindex_out = if_nametoindex(redir_interface); - if (!ifindex_out) - ifindex_out = strtoul(redir_interface, NULL, 0); - if (!ifindex_out) { - fprintf(stderr, "Bad interface name or index\n"); - return -EINVAL; - } - - err = bpf_map_update_elem(redir_map_fd, &key, &ifindex_out, 0); - if (err < 0) - return err; - } - - prog = bpf_object__find_program_by_name(obj, mprog_name); - if (!prog) { - ret = -errno; - fprintf(stderr, "Failed to bpf_object__find_program_by_name: %s\n", - strerror(errno)); - return ret; - } - - return bpf_program__fd(prog); - } else { - if (mprog_name) { - if (redir_interface || redir_map) { - fprintf(stderr, "Need to specify --mprog-filename/-f\n"); - goto end; - } - if (!strcmp(mprog_name, "pass") || !strcmp(mprog_name, "drop")) { - /* Use built-in pass/drop programs */ - return *mprog_name == 'p' ? bpf_program__fd(skel->progs.xdp_redirect_cpu_pass) - : bpf_program__fd(skel->progs.xdp_redirect_cpu_drop); - } else { - fprintf(stderr, "Unknown name \"%s\" for built-in BPF program\n", - mprog_name); - goto end; - } - } else { - if (redir_map) { - fprintf(stderr, "Need to specify --mprog-filename, --mprog-name and" - " --redirect-device with --redirect-map\n"); - goto end; - } - if (redir_interface) { - /* Use built-in devmap redirect */ - struct bpf_devmap_val val = {}; - int ifindex_out, err; - __u32 key = 0; - - if (!redir_interface) - return 0; - - ifindex_out = if_nametoindex(redir_interface); - if (!ifindex_out) - ifindex_out = strtoul(redir_interface, NULL, 0); - if (!ifindex_out) { - fprintf(stderr, "Bad interface name or index\n"); - return -EINVAL; - } - - if (get_mac_addr(ifindex_out, skel->bss->tx_mac_addr) < 0) { - printf("Get interface %d mac failed\n", ifindex_out); - return -EINVAL; - } - - val.ifindex = ifindex_out; - val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_redirect_egress_prog); - err = bpf_map_update_elem(bpf_map__fd(skel->maps.tx_port), &key, &val, 0); - if (err < 0) - return -errno; - - return bpf_program__fd(skel->progs.xdp_redirect_cpu_devmap); - } - } - } - - /* Disabled */ - return 0; -end: - fprintf(stderr, "Invalid options for CPUMAP BPF program\n"); - return -EINVAL; -} - -int main(int argc, char **argv) -{ - const char *redir_interface = NULL, *redir_map = NULL; - const char *mprog_filename = NULL, *mprog_name = NULL; - struct xdp_redirect_cpu *skel; - struct bpf_map_info info = {}; - struct bpf_cpumap_val value; - __u32 infosz = sizeof(info); - int ret = EXIT_FAIL_OPTION; - unsigned long interval = 2; - bool stress_mode = false; - struct bpf_program *prog; - const char *prog_name; - bool generic = false; - bool force = false; - int added_cpus = 0; - bool error = true; - int longindex = 0; - int add_cpu = -1; - int ifindex = -1; - int *cpu, i, opt; - __u32 qsize; - int n_cpus; - - n_cpus = libbpf_num_possible_cpus(); - - /* Notice: Choosing the queue size is very important when CPU is - * configured with power-saving states. - * - * If deepest state take 133 usec to wakeup from (133/10^6). When link - * speed is 10Gbit/s ((10*10^9/8) in bytes/sec). How many bytes can - * arrive with in 133 usec at this speed: (10*10^9/8)*(133/10^6) = - * 166250 bytes. With MTU size packets this is 110 packets, and with - * minimum Ethernet (MAC-preamble + intergap) 84 bytes is 1979 packets. - * - * Setting default cpumap queue to 2048 as worst-case (small packet) - * should be +64 packet due kthread wakeup call (due to xdp_do_flush) - * worst-case is 2043 packets. - * - * Sysadm can configured system to avoid deep-sleep via: - * tuned-adm profile network-latency - */ - qsize = 2048; - - skel = xdp_redirect_cpu__open(); - if (!skel) { - fprintf(stderr, "Failed to xdp_redirect_cpu__open: %s\n", - strerror(errno)); - ret = EXIT_FAIL_BPF; - goto end; - } - - ret = sample_init_pre_load(skel); - if (ret < 0) { - fprintf(stderr, "Failed to sample_init_pre_load: %s\n", strerror(-ret)); - ret = EXIT_FAIL_BPF; - goto end_destroy; - } - - if (bpf_map__set_max_entries(skel->maps.cpu_map, n_cpus) < 0) { - fprintf(stderr, "Failed to set max entries for cpu_map map: %s", - strerror(errno)); - ret = EXIT_FAIL_BPF; - goto end_destroy; - } - - if (bpf_map__set_max_entries(skel->maps.cpus_available, n_cpus) < 0) { - fprintf(stderr, "Failed to set max entries for cpus_available map: %s", - strerror(errno)); - ret = EXIT_FAIL_BPF; - goto end_destroy; - } - - cpu = calloc(n_cpus, sizeof(int)); - if (!cpu) { - fprintf(stderr, "Failed to allocate cpu array\n"); - goto end_destroy; - } - - prog = skel->progs.xdp_prognum5_lb_hash_ip_pairs; - while ((opt = getopt_long(argc, argv, "d:si:Sxp:f:e:r:m:c:q:Fvh", - long_options, &longindex)) != -1) { - switch (opt) { - case 'd': - if (strlen(optarg) >= IF_NAMESIZE) { - fprintf(stderr, "-d/--dev name too long\n"); - usage(argv, long_options, __doc__, mask, true, skel->obj); - goto end_cpu; - } - ifindex = if_nametoindex(optarg); - if (!ifindex) - ifindex = strtoul(optarg, NULL, 0); - if (!ifindex) { - fprintf(stderr, "Bad interface index or name (%d): %s\n", - errno, strerror(errno)); - usage(argv, long_options, __doc__, mask, true, skel->obj); - goto end_cpu; - } - break; - case 's': - mask |= SAMPLE_REDIRECT_MAP_CNT; - break; - case 'i': - interval = strtoul(optarg, NULL, 0); - break; - case 'S': - generic = true; - break; - case 'x': - stress_mode = true; - break; - case 'p': - /* Selecting eBPF prog to load */ - prog_name = optarg; - prog = bpf_object__find_program_by_name(skel->obj, - prog_name); - if (!prog) { - fprintf(stderr, - "Failed to find program %s specified by" - " option -p/--progname\n", - prog_name); - print_avail_progs(skel->obj); - goto end_cpu; - } - break; - case 'f': - mprog_filename = optarg; - break; - case 'e': - mprog_name = optarg; - break; - case 'r': - redir_interface = optarg; - mask |= SAMPLE_DEVMAP_XMIT_CNT_MULTI; - break; - case 'm': - redir_map = optarg; - break; - case 'c': - /* Add multiple CPUs */ - add_cpu = strtoul(optarg, NULL, 0); - if (add_cpu >= n_cpus) { - fprintf(stderr, - "--cpu nr too large for cpumap err (%d):%s\n", - errno, strerror(errno)); - usage(argv, long_options, __doc__, mask, true, skel->obj); - goto end_cpu; - } - cpu[added_cpus++] = add_cpu; - break; - case 'q': - qsize = strtoul(optarg, NULL, 0); - break; - case 'F': - force = true; - break; - case 'v': - sample_switch_mode(); - break; - case 'h': - error = false; - default: - usage(argv, long_options, __doc__, mask, error, skel->obj); - goto end_cpu; - } - } - - ret = EXIT_FAIL_OPTION; - if (ifindex == -1) { - fprintf(stderr, "Required option --dev missing\n"); - usage(argv, long_options, __doc__, mask, true, skel->obj); - goto end_cpu; - } - - if (add_cpu == -1) { - fprintf(stderr, "Required option --cpu missing\n" - "Specify multiple --cpu option to add more\n"); - usage(argv, long_options, __doc__, mask, true, skel->obj); - goto end_cpu; - } - - skel->rodata->from_match[0] = ifindex; - if (redir_interface) - skel->rodata->to_match[0] = if_nametoindex(redir_interface); - - ret = xdp_redirect_cpu__load(skel); - if (ret < 0) { - fprintf(stderr, "Failed to xdp_redirect_cpu__load: %s\n", - strerror(errno)); - goto end_cpu; - } - - ret = bpf_map_get_info_by_fd(bpf_map__fd(skel->maps.cpu_map), &info, &infosz); - if (ret < 0) { - fprintf(stderr, "Failed bpf_map_get_info_by_fd for cpumap: %s\n", - strerror(errno)); - goto end_cpu; - } - - skel->bss->cpumap_map_id = info.id; - - map_fd = bpf_map__fd(skel->maps.cpu_map); - avail_fd = bpf_map__fd(skel->maps.cpus_available); - count_fd = bpf_map__fd(skel->maps.cpus_count); - - ret = mark_cpus_unavailable(); - if (ret < 0) { - fprintf(stderr, "Unable to mark CPUs as unavailable\n"); - goto end_cpu; - } - - ret = sample_init(skel, mask); - if (ret < 0) { - fprintf(stderr, "Failed to initialize sample: %s\n", strerror(-ret)); - ret = EXIT_FAIL; - goto end_cpu; - } - - value.bpf_prog.fd = set_cpumap_prog(skel, redir_interface, redir_map, - mprog_filename, mprog_name); - if (value.bpf_prog.fd < 0) { - fprintf(stderr, "Failed to set CPUMAP BPF program: %s\n", - strerror(-value.bpf_prog.fd)); - usage(argv, long_options, __doc__, mask, true, skel->obj); - ret = EXIT_FAIL_BPF; - goto end_cpu; - } - value.qsize = qsize; - - for (i = 0; i < added_cpus; i++) { - if (create_cpu_entry(cpu[i], &value, i, true) < 0) { - fprintf(stderr, "Cannot proceed, exiting\n"); - usage(argv, long_options, __doc__, mask, true, skel->obj); - goto end_cpu; - } - } - - ret = EXIT_FAIL_XDP; - if (sample_install_xdp(prog, ifindex, generic, force) < 0) - goto end_cpu; - - ret = sample_run(interval, stress_mode ? stress_cpumap : NULL, &value); - if (ret < 0) { - fprintf(stderr, "Failed during sample run: %s\n", strerror(-ret)); - ret = EXIT_FAIL; - goto end_cpu; - } - ret = EXIT_OK; -end_cpu: - free(cpu); -end_destroy: - xdp_redirect_cpu__destroy(skel); -end: - sample_exit(ret); -} diff --git a/samples/bpf/xdp_redirect_map.bpf.c b/samples/bpf/xdp_redirect_map.bpf.c deleted file mode 100644 index 8557c278df77..000000000000 --- a/samples/bpf/xdp_redirect_map.bpf.c +++ /dev/null @@ -1,97 +0,0 @@ -/* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -#define KBUILD_MODNAME "foo" - -#include "vmlinux.h" -#include "xdp_sample.bpf.h" -#include "xdp_sample_shared.h" - -/* The 2nd xdp prog on egress does not support skb mode, so we define two - * maps, tx_port_general and tx_port_native. - */ -struct { - __uint(type, BPF_MAP_TYPE_DEVMAP); - __uint(key_size, sizeof(int)); - __uint(value_size, sizeof(int)); - __uint(max_entries, 1); -} tx_port_general SEC(".maps"); - -struct { - __uint(type, BPF_MAP_TYPE_DEVMAP); - __uint(key_size, sizeof(int)); - __uint(value_size, sizeof(struct bpf_devmap_val)); - __uint(max_entries, 1); -} tx_port_native SEC(".maps"); - -/* store egress interface mac address */ -const volatile __u8 tx_mac_addr[ETH_ALEN]; - -static __always_inline int xdp_redirect_map(struct xdp_md *ctx, void *redirect_map) -{ - void *data_end = (void *)(long)ctx->data_end; - void *data = (void *)(long)ctx->data; - u32 key = bpf_get_smp_processor_id(); - struct ethhdr *eth = data; - struct datarec *rec; - u64 nh_off; - - nh_off = sizeof(*eth); - if (data + nh_off > data_end) - return XDP_DROP; - - rec = bpf_map_lookup_elem(&rx_cnt, &key); - if (!rec) - return XDP_PASS; - NO_TEAR_INC(rec->processed); - swap_src_dst_mac(data); - return bpf_redirect_map(redirect_map, 0, 0); -} - -SEC("xdp") -int xdp_redirect_map_general(struct xdp_md *ctx) -{ - return xdp_redirect_map(ctx, &tx_port_general); -} - -SEC("xdp") -int xdp_redirect_map_native(struct xdp_md *ctx) -{ - return xdp_redirect_map(ctx, &tx_port_native); -} - -SEC("xdp/devmap") -int xdp_redirect_map_egress(struct xdp_md *ctx) -{ - void *data_end = (void *)(long)ctx->data_end; - void *data = (void *)(long)ctx->data; - u8 *mac_addr = (u8 *) tx_mac_addr; - struct ethhdr *eth = data; - u64 nh_off; - - nh_off = sizeof(*eth); - if (data + nh_off > data_end) - return XDP_DROP; - - barrier_var(mac_addr); /* prevent optimizing out memcpy */ - __builtin_memcpy(eth->h_source, mac_addr, ETH_ALEN); - - return XDP_PASS; -} - -/* Redirect require an XDP bpf_prog loaded on the TX device */ -SEC("xdp") -int xdp_redirect_dummy_prog(struct xdp_md *ctx) -{ - return XDP_PASS; -} - -char _license[] SEC("license") = "GPL"; diff --git a/samples/bpf/xdp_redirect_map_multi.bpf.c b/samples/bpf/xdp_redirect_map_multi.bpf.c deleted file mode 100644 index 8b2fd4ec2c76..000000000000 --- a/samples/bpf/xdp_redirect_map_multi.bpf.c +++ /dev/null @@ -1,77 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#define KBUILD_MODNAME "foo" - -#include "vmlinux.h" -#include "xdp_sample.bpf.h" -#include "xdp_sample_shared.h" - -struct { - __uint(type, BPF_MAP_TYPE_DEVMAP_HASH); - __uint(key_size, sizeof(int)); - __uint(value_size, sizeof(int)); - __uint(max_entries, 32); -} forward_map_general SEC(".maps"); - -struct { - __uint(type, BPF_MAP_TYPE_DEVMAP_HASH); - __uint(key_size, sizeof(int)); - __uint(value_size, sizeof(struct bpf_devmap_val)); - __uint(max_entries, 32); -} forward_map_native SEC(".maps"); - -/* map to store egress interfaces mac addresses */ -struct { - __uint(type, BPF_MAP_TYPE_HASH); - __type(key, u32); - __type(value, __be64); - __uint(max_entries, 32); -} mac_map SEC(".maps"); - -static int xdp_redirect_map(struct xdp_md *ctx, void *forward_map) -{ - u32 key = bpf_get_smp_processor_id(); - struct datarec *rec; - - rec = bpf_map_lookup_elem(&rx_cnt, &key); - if (!rec) - return XDP_PASS; - NO_TEAR_INC(rec->processed); - - return bpf_redirect_map(forward_map, 0, - BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS); -} - -SEC("xdp") -int xdp_redirect_map_general(struct xdp_md *ctx) -{ - return xdp_redirect_map(ctx, &forward_map_general); -} - -SEC("xdp") -int xdp_redirect_map_native(struct xdp_md *ctx) -{ - return xdp_redirect_map(ctx, &forward_map_native); -} - -SEC("xdp/devmap") -int xdp_devmap_prog(struct xdp_md *ctx) -{ - void *data_end = (void *)(long)ctx->data_end; - void *data = (void *)(long)ctx->data; - u32 key = ctx->egress_ifindex; - struct ethhdr *eth = data; - __be64 *mac; - u64 nh_off; - - nh_off = sizeof(*eth); - if (data + nh_off > data_end) - return XDP_DROP; - - mac = bpf_map_lookup_elem(&mac_map, &key); - if (mac) - __builtin_memcpy(eth->h_source, mac, ETH_ALEN); - - return XDP_PASS; -} - -char _license[] SEC("license") = "GPL"; diff --git a/samples/bpf/xdp_redirect_map_multi_user.c b/samples/bpf/xdp_redirect_map_multi_user.c deleted file mode 100644 index 9e24f2705b67..000000000000 --- a/samples/bpf/xdp_redirect_map_multi_user.c +++ /dev/null @@ -1,232 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -static const char *__doc__ = -"XDP multi redirect tool, using BPF_MAP_TYPE_DEVMAP and BPF_F_BROADCAST flag for bpf_redirect_map\n" -"Usage: xdp_redirect_map_multi <IFINDEX|IFNAME> <IFINDEX|IFNAME> ... <IFINDEX|IFNAME>\n"; - -#include <linux/bpf.h> -#include <linux/if_link.h> -#include <assert.h> -#include <getopt.h> -#include <errno.h> -#include <signal.h> -#include <stdio.h> -#include <stdlib.h> -#include <string.h> -#include <net/if.h> -#include <unistd.h> -#include <libgen.h> -#include <sys/ioctl.h> -#include <sys/types.h> -#include <sys/socket.h> -#include <netinet/in.h> -#include <linux/if_ether.h> -#include <bpf/bpf.h> -#include <bpf/libbpf.h> -#include "bpf_util.h" -#include "xdp_sample_user.h" -#include "xdp_redirect_map_multi.skel.h" - -#define MAX_IFACE_NUM 32 -static int ifaces[MAX_IFACE_NUM] = {}; - -static int mask = SAMPLE_RX_CNT | SAMPLE_REDIRECT_ERR_MAP_CNT | - SAMPLE_EXCEPTION_CNT | SAMPLE_DEVMAP_XMIT_CNT | - SAMPLE_DEVMAP_XMIT_CNT_MULTI | SAMPLE_SKIP_HEADING; - -DEFINE_SAMPLE_INIT(xdp_redirect_map_multi); - -static const struct option long_options[] = { - { "help", no_argument, NULL, 'h' }, - { "skb-mode", no_argument, NULL, 'S' }, - { "force", no_argument, NULL, 'F' }, - { "load-egress", no_argument, NULL, 'X' }, - { "stats", no_argument, NULL, 's' }, - { "interval", required_argument, NULL, 'i' }, - { "verbose", no_argument, NULL, 'v' }, - {} -}; - -static int update_mac_map(struct bpf_map *map) -{ - int mac_map_fd = bpf_map__fd(map); - unsigned char mac_addr[6]; - unsigned int ifindex; - int i, ret = -1; - - for (i = 0; ifaces[i] > 0; i++) { - ifindex = ifaces[i]; - - ret = get_mac_addr(ifindex, mac_addr); - if (ret < 0) { - fprintf(stderr, "get interface %d mac failed\n", - ifindex); - return ret; - } - - ret = bpf_map_update_elem(mac_map_fd, &ifindex, mac_addr, 0); - if (ret < 0) { - fprintf(stderr, "Failed to update mac address for ifindex %d\n", - ifindex); - return ret; - } - } - - return 0; -} - -int main(int argc, char **argv) -{ - struct bpf_devmap_val devmap_val = {}; - struct xdp_redirect_map_multi *skel; - struct bpf_program *ingress_prog; - bool xdp_devmap_attached = false; - struct bpf_map *forward_map; - int ret = EXIT_FAIL_OPTION; - unsigned long interval = 2; - char ifname[IF_NAMESIZE]; - unsigned int ifindex; - bool generic = false; - bool force = false; - bool tried = false; - bool error = true; - int i, opt; - - while ((opt = getopt_long(argc, argv, "hSFXi:vs", - long_options, NULL)) != -1) { - switch (opt) { - case 'S': - generic = true; - /* devmap_xmit tracepoint not available */ - mask &= ~(SAMPLE_DEVMAP_XMIT_CNT | - SAMPLE_DEVMAP_XMIT_CNT_MULTI); - break; - case 'F': - force = true; - break; - case 'X': - xdp_devmap_attached = true; - break; - case 'i': - interval = strtoul(optarg, NULL, 0); - break; - case 'v': - sample_switch_mode(); - break; - case 's': - mask |= SAMPLE_REDIRECT_MAP_CNT; - break; - case 'h': - error = false; - default: - sample_usage(argv, long_options, __doc__, mask, error); - return ret; - } - } - - if (argc <= optind + 1) { - sample_usage(argv, long_options, __doc__, mask, error); - return ret; - } - - skel = xdp_redirect_map_multi__open(); - if (!skel) { - fprintf(stderr, "Failed to xdp_redirect_map_multi__open: %s\n", - strerror(errno)); - ret = EXIT_FAIL_BPF; - goto end; - } - - ret = sample_init_pre_load(skel); - if (ret < 0) { - fprintf(stderr, "Failed to sample_init_pre_load: %s\n", strerror(-ret)); - ret = EXIT_FAIL_BPF; - goto end_destroy; - } - - ret = EXIT_FAIL_OPTION; - for (i = 0; i < MAX_IFACE_NUM && argv[optind + i]; i++) { - ifaces[i] = if_nametoindex(argv[optind + i]); - if (!ifaces[i]) - ifaces[i] = strtoul(argv[optind + i], NULL, 0); - if (!if_indextoname(ifaces[i], ifname)) { - fprintf(stderr, "Bad interface index or name\n"); - sample_usage(argv, long_options, __doc__, mask, true); - goto end_destroy; - } - - skel->rodata->from_match[i] = ifaces[i]; - skel->rodata->to_match[i] = ifaces[i]; - } - - ret = xdp_redirect_map_multi__load(skel); - if (ret < 0) { - fprintf(stderr, "Failed to xdp_redirect_map_multi__load: %s\n", - strerror(errno)); - ret = EXIT_FAIL_BPF; - goto end_destroy; - } - - if (xdp_devmap_attached) { - /* Update mac_map with all egress interfaces' mac addr */ - if (update_mac_map(skel->maps.mac_map) < 0) { - fprintf(stderr, "Updating mac address failed\n"); - ret = EXIT_FAIL; - goto end_destroy; - } - } - - ret = sample_init(skel, mask); - if (ret < 0) { - fprintf(stderr, "Failed to initialize sample: %s\n", strerror(-ret)); - ret = EXIT_FAIL; - goto end_destroy; - } - - ingress_prog = skel->progs.xdp_redirect_map_native; - forward_map = skel->maps.forward_map_native; - - for (i = 0; ifaces[i] > 0; i++) { - ifindex = ifaces[i]; - - ret = EXIT_FAIL_XDP; -restart: - /* bind prog_fd to each interface */ - if (sample_install_xdp(ingress_prog, ifindex, generic, force) < 0) { - if (generic && !tried) { - fprintf(stderr, - "Trying fallback to sizeof(int) as value_size for devmap in generic mode\n"); - ingress_prog = skel->progs.xdp_redirect_map_general; - forward_map = skel->maps.forward_map_general; - tried = true; - goto restart; - } - goto end_destroy; - } - - /* Add all the interfaces to forward group and attach - * egress devmap program if exist - */ - devmap_val.ifindex = ifindex; - if (xdp_devmap_attached) - devmap_val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_devmap_prog); - ret = bpf_map_update_elem(bpf_map__fd(forward_map), &ifindex, &devmap_val, 0); - if (ret < 0) { - fprintf(stderr, "Failed to update devmap value: %s\n", - strerror(errno)); - ret = EXIT_FAIL_BPF; - goto end_destroy; - } - } - - ret = sample_run(interval, NULL, NULL); - if (ret < 0) { - fprintf(stderr, "Failed during sample run: %s\n", strerror(-ret)); - ret = EXIT_FAIL; - goto end_destroy; - } - ret = EXIT_OK; -end_destroy: - xdp_redirect_map_multi__destroy(skel); -end: - sample_exit(ret); -} diff --git a/samples/bpf/xdp_redirect_map_user.c b/samples/bpf/xdp_redirect_map_user.c deleted file mode 100644 index c889a1394dc1..000000000000 --- a/samples/bpf/xdp_redirect_map_user.c +++ /dev/null @@ -1,228 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io - */ -static const char *__doc__ = -"XDP redirect tool, using BPF_MAP_TYPE_DEVMAP\n" -"Usage: xdp_redirect_map <IFINDEX|IFNAME>_IN <IFINDEX|IFNAME>_OUT\n"; - -#include <linux/bpf.h> -#include <linux/if_link.h> -#include <assert.h> -#include <errno.h> -#include <signal.h> -#include <stdio.h> -#include <stdlib.h> -#include <stdbool.h> -#include <string.h> -#include <net/if.h> -#include <unistd.h> -#include <libgen.h> -#include <getopt.h> -#include <bpf/bpf.h> -#include <bpf/libbpf.h> -#include "bpf_util.h" -#include "xdp_sample_user.h" -#include "xdp_redirect_map.skel.h" - -static int mask = SAMPLE_RX_CNT | SAMPLE_REDIRECT_ERR_MAP_CNT | - SAMPLE_EXCEPTION_CNT | SAMPLE_DEVMAP_XMIT_CNT_MULTI; - -DEFINE_SAMPLE_INIT(xdp_redirect_map); - -static const struct option long_options[] = { - { "help", no_argument, NULL, 'h' }, - { "skb-mode", no_argument, NULL, 'S' }, - { "force", no_argument, NULL, 'F' }, - { "load-egress", no_argument, NULL, 'X' }, - { "stats", no_argument, NULL, 's' }, - { "interval", required_argument, NULL, 'i' }, - { "verbose", no_argument, NULL, 'v' }, - {} -}; - -static int verbose = 0; - -int main(int argc, char **argv) -{ - struct bpf_devmap_val devmap_val = {}; - bool xdp_devmap_attached = false; - struct xdp_redirect_map *skel; - char str[2 * IF_NAMESIZE + 1]; - char ifname_out[IF_NAMESIZE]; - struct bpf_map *tx_port_map; - char ifname_in[IF_NAMESIZE]; - int ifindex_in, ifindex_out; - unsigned long interval = 2; - int ret = EXIT_FAIL_OPTION; - struct bpf_program *prog; - bool generic = false; - bool force = false; - bool tried = false; - bool error = true; - int opt, key = 0; - - while ((opt = getopt_long(argc, argv, "hSFXi:vs", - long_options, NULL)) != -1) { - switch (opt) { - case 'S': - generic = true; - /* devmap_xmit tracepoint not available */ - mask &= ~(SAMPLE_DEVMAP_XMIT_CNT | - SAMPLE_DEVMAP_XMIT_CNT_MULTI); - break; - case 'F': - force = true; - break; - case 'X': - xdp_devmap_attached = true; - break; - case 'i': - interval = strtoul(optarg, NULL, 0); - break; - case 'v': - sample_switch_mode(); - verbose = 1; - break; - case 's': - mask |= SAMPLE_REDIRECT_MAP_CNT; - break; - case 'h': - error = false; - default: - sample_usage(argv, long_options, __doc__, mask, error); - return ret; - } - } - - if (argc <= optind + 1) { - sample_usage(argv, long_options, __doc__, mask, true); - goto end; - } - - ifindex_in = if_nametoindex(argv[optind]); - if (!ifindex_in) - ifindex_in = strtoul(argv[optind], NULL, 0); - - ifindex_out = if_nametoindex(argv[optind + 1]); - if (!ifindex_out) - ifindex_out = strtoul(argv[optind + 1], NULL, 0); - - if (!ifindex_in || !ifindex_out) { - fprintf(stderr, "Bad interface index or name\n"); - sample_usage(argv, long_options, __doc__, mask, true); - goto end; - } - - skel = xdp_redirect_map__open(); - if (!skel) { - fprintf(stderr, "Failed to xdp_redirect_map__open: %s\n", - strerror(errno)); - ret = EXIT_FAIL_BPF; - goto end; - } - - ret = sample_init_pre_load(skel); - if (ret < 0) { - fprintf(stderr, "Failed to sample_init_pre_load: %s\n", strerror(-ret)); - ret = EXIT_FAIL_BPF; - goto end_destroy; - } - - /* Load 2nd xdp prog on egress. */ - if (xdp_devmap_attached) { - ret = get_mac_addr(ifindex_out, skel->rodata->tx_mac_addr); - if (ret < 0) { - fprintf(stderr, "Failed to get interface %d mac address: %s\n", - ifindex_out, strerror(-ret)); - ret = EXIT_FAIL; - goto end_destroy; - } - if (verbose) - printf("Egress ifindex:%d using src MAC %02x:%02x:%02x:%02x:%02x:%02x\n", - ifindex_out, - skel->rodata->tx_mac_addr[0], skel->rodata->tx_mac_addr[1], - skel->rodata->tx_mac_addr[2], skel->rodata->tx_mac_addr[3], - skel->rodata->tx_mac_addr[4], skel->rodata->tx_mac_addr[5]); - } - - skel->rodata->from_match[0] = ifindex_in; - skel->rodata->to_match[0] = ifindex_out; - - ret = xdp_redirect_map__load(skel); - if (ret < 0) { - fprintf(stderr, "Failed to xdp_redirect_map__load: %s\n", - strerror(errno)); - ret = EXIT_FAIL_BPF; - goto end_destroy; - } - - ret = sample_init(skel, mask); - if (ret < 0) { - fprintf(stderr, "Failed to initialize sample: %s\n", strerror(-ret)); - ret = EXIT_FAIL; - goto end_destroy; - } - - prog = skel->progs.xdp_redirect_map_native; - tx_port_map = skel->maps.tx_port_native; -restart: - if (sample_install_xdp(prog, ifindex_in, generic, force) < 0) { - /* First try with struct bpf_devmap_val as value for generic - * mode, then fallback to sizeof(int) for older kernels. - */ - fprintf(stderr, - "Trying fallback to sizeof(int) as value_size for devmap in generic mode\n"); - if (generic && !tried) { - prog = skel->progs.xdp_redirect_map_general; - tx_port_map = skel->maps.tx_port_general; - tried = true; - goto restart; - } - ret = EXIT_FAIL_XDP; - goto end_destroy; - } - - /* Loading dummy XDP prog on out-device */ - sample_install_xdp(skel->progs.xdp_redirect_dummy_prog, ifindex_out, generic, force); - - devmap_val.ifindex = ifindex_out; - if (xdp_devmap_attached) - devmap_val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_redirect_map_egress); - ret = bpf_map_update_elem(bpf_map__fd(tx_port_map), &key, &devmap_val, 0); - if (ret < 0) { - fprintf(stderr, "Failed to update devmap value: %s\n", - strerror(errno)); - ret = EXIT_FAIL_BPF; - goto end_destroy; - } - - ret = EXIT_FAIL; - if (!if_indextoname(ifindex_in, ifname_in)) { - fprintf(stderr, "Failed to if_indextoname for %d: %s\n", ifindex_in, - strerror(errno)); - goto end_destroy; - } - - if (!if_indextoname(ifindex_out, ifname_out)) { - fprintf(stderr, "Failed to if_indextoname for %d: %s\n", ifindex_out, - strerror(errno)); - goto end_destroy; - } - - safe_strncpy(str, get_driver_name(ifindex_in), sizeof(str)); - printf("Redirecting from %s (ifindex %d; driver %s) to %s (ifindex %d; driver %s)\n", - ifname_in, ifindex_in, str, ifname_out, ifindex_out, get_driver_name(ifindex_out)); - snprintf(str, sizeof(str), "%s->%s", ifname_in, ifname_out); - - ret = sample_run(interval, NULL, NULL); - if (ret < 0) { - fprintf(stderr, "Failed during sample run: %s\n", strerror(-ret)); - ret = EXIT_FAIL; - goto end_destroy; - } - ret = EXIT_OK; -end_destroy: - xdp_redirect_map__destroy(skel); -end: - sample_exit(ret); -} diff --git a/samples/bpf/xdp_redirect_user.c b/samples/bpf/xdp_redirect_user.c deleted file mode 100644 index 8663dd631b6e..000000000000 --- a/samples/bpf/xdp_redirect_user.c +++ /dev/null @@ -1,172 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2016 John Fastabend <[email protected]> - */ -static const char *__doc__ = -"XDP redirect tool, using bpf_redirect helper\n" -"Usage: xdp_redirect <IFINDEX|IFNAME>_IN <IFINDEX|IFNAME>_OUT\n"; - -#include <linux/bpf.h> -#include <linux/if_link.h> -#include <assert.h> -#include <errno.h> -#include <signal.h> -#include <stdio.h> -#include <stdlib.h> -#include <stdbool.h> -#include <string.h> -#include <net/if.h> -#include <unistd.h> -#include <libgen.h> -#include <getopt.h> -#include <bpf/bpf.h> -#include <bpf/libbpf.h> -#include "bpf_util.h" -#include "xdp_sample_user.h" -#include "xdp_redirect.skel.h" - -static int mask = SAMPLE_RX_CNT | SAMPLE_REDIRECT_ERR_CNT | - SAMPLE_EXCEPTION_CNT | SAMPLE_DEVMAP_XMIT_CNT_MULTI; - -DEFINE_SAMPLE_INIT(xdp_redirect); - -static const struct option long_options[] = { - {"help", no_argument, NULL, 'h' }, - {"skb-mode", no_argument, NULL, 'S' }, - {"force", no_argument, NULL, 'F' }, - {"stats", no_argument, NULL, 's' }, - {"interval", required_argument, NULL, 'i' }, - {"verbose", no_argument, NULL, 'v' }, - {} -}; - -int main(int argc, char **argv) -{ - int ifindex_in, ifindex_out, opt; - char str[2 * IF_NAMESIZE + 1]; - char ifname_out[IF_NAMESIZE]; - char ifname_in[IF_NAMESIZE]; - int ret = EXIT_FAIL_OPTION; - unsigned long interval = 2; - struct xdp_redirect *skel; - bool generic = false; - bool force = false; - bool error = true; - - while ((opt = getopt_long(argc, argv, "hSFi:vs", - long_options, NULL)) != -1) { - switch (opt) { - case 'S': - generic = true; - mask &= ~(SAMPLE_DEVMAP_XMIT_CNT | - SAMPLE_DEVMAP_XMIT_CNT_MULTI); - break; - case 'F': - force = true; - break; - case 'i': - interval = strtoul(optarg, NULL, 0); - break; - case 'v': - sample_switch_mode(); - break; - case 's': - mask |= SAMPLE_REDIRECT_CNT; - break; - case 'h': - error = false; - default: - sample_usage(argv, long_options, __doc__, mask, error); - return ret; - } - } - - if (argc <= optind + 1) { - sample_usage(argv, long_options, __doc__, mask, true); - return ret; - } - - ifindex_in = if_nametoindex(argv[optind]); - if (!ifindex_in) - ifindex_in = strtoul(argv[optind], NULL, 0); - - ifindex_out = if_nametoindex(argv[optind + 1]); - if (!ifindex_out) - ifindex_out = strtoul(argv[optind + 1], NULL, 0); - - if (!ifindex_in || !ifindex_out) { - fprintf(stderr, "Bad interface index or name\n"); - sample_usage(argv, long_options, __doc__, mask, true); - goto end; - } - - skel = xdp_redirect__open(); - if (!skel) { - fprintf(stderr, "Failed to xdp_redirect__open: %s\n", strerror(errno)); - ret = EXIT_FAIL_BPF; - goto end; - } - - ret = sample_init_pre_load(skel); - if (ret < 0) { - fprintf(stderr, "Failed to sample_init_pre_load: %s\n", strerror(-ret)); - ret = EXIT_FAIL_BPF; - goto end_destroy; - } - - skel->rodata->from_match[0] = ifindex_in; - skel->rodata->to_match[0] = ifindex_out; - skel->rodata->ifindex_out = ifindex_out; - - ret = xdp_redirect__load(skel); - if (ret < 0) { - fprintf(stderr, "Failed to xdp_redirect__load: %s\n", strerror(errno)); - ret = EXIT_FAIL_BPF; - goto end_destroy; - } - - ret = sample_init(skel, mask); - if (ret < 0) { - fprintf(stderr, "Failed to initialize sample: %s\n", strerror(-ret)); - ret = EXIT_FAIL; - goto end_destroy; - } - - ret = EXIT_FAIL_XDP; - if (sample_install_xdp(skel->progs.xdp_redirect_prog, ifindex_in, - generic, force) < 0) - goto end_destroy; - - /* Loading dummy XDP prog on out-device */ - sample_install_xdp(skel->progs.xdp_redirect_dummy_prog, ifindex_out, - generic, force); - - ret = EXIT_FAIL; - if (!if_indextoname(ifindex_in, ifname_in)) { - fprintf(stderr, "Failed to if_indextoname for %d: %s\n", ifindex_in, - strerror(errno)); - goto end_destroy; - } - - if (!if_indextoname(ifindex_out, ifname_out)) { - fprintf(stderr, "Failed to if_indextoname for %d: %s\n", ifindex_out, - strerror(errno)); - goto end_destroy; - } - - safe_strncpy(str, get_driver_name(ifindex_in), sizeof(str)); - printf("Redirecting from %s (ifindex %d; driver %s) to %s (ifindex %d; driver %s)\n", - ifname_in, ifindex_in, str, ifname_out, ifindex_out, get_driver_name(ifindex_out)); - snprintf(str, sizeof(str), "%s->%s", ifname_in, ifname_out); - - ret = sample_run(interval, NULL, NULL); - if (ret < 0) { - fprintf(stderr, "Failed during sample run: %s\n", strerror(-ret)); - ret = EXIT_FAIL; - goto end_destroy; - } - ret = EXIT_OK; -end_destroy: - xdp_redirect__destroy(skel); -end: - sample_exit(ret); -} diff --git a/samples/bpf/xdp_rxq_info_kern.c b/samples/bpf/xdp_rxq_info_kern.c deleted file mode 100644 index 5e7459f9bf3e..000000000000 --- a/samples/bpf/xdp_rxq_info_kern.c +++ /dev/null @@ -1,140 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 - * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc. - * - * Example howto extract XDP RX-queue info - */ -#include <uapi/linux/bpf.h> -#include <uapi/linux/if_ether.h> -#include <uapi/linux/in.h> -#include <bpf/bpf_helpers.h> - -/* Config setup from with userspace - * - * User-side setup ifindex in config_map, to verify that - * ctx->ingress_ifindex is correct (against configured ifindex) - */ -struct config { - __u32 action; - int ifindex; - __u32 options; -}; -enum cfg_options_flags { - NO_TOUCH = 0x0U, - READ_MEM = 0x1U, - SWAP_MAC = 0x2U, -}; - -struct { - __uint(type, BPF_MAP_TYPE_ARRAY); - __type(key, int); - __type(value, struct config); - __uint(max_entries, 1); -} config_map SEC(".maps"); - -/* Common stats data record (shared with userspace) */ -struct datarec { - __u64 processed; - __u64 issue; -}; - -struct { - __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); - __type(key, u32); - __type(value, struct datarec); - __uint(max_entries, 1); -} stats_global_map SEC(".maps"); - -#define MAX_RXQs 64 - -/* Stats per rx_queue_index (per CPU) */ -struct { - __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); - __type(key, u32); - __type(value, struct datarec); - __uint(max_entries, MAX_RXQs + 1); -} rx_queue_index_map SEC(".maps"); - -static __always_inline -void swap_src_dst_mac(void *data) -{ - unsigned short *p = data; - unsigned short dst[3]; - - dst[0] = p[0]; - dst[1] = p[1]; - dst[2] = p[2]; - p[0] = p[3]; - p[1] = p[4]; - p[2] = p[5]; - p[3] = dst[0]; - p[4] = dst[1]; - p[5] = dst[2]; -} - -SEC("xdp_prog0") -int xdp_prognum0(struct xdp_md *ctx) -{ - void *data_end = (void *)(long)ctx->data_end; - void *data = (void *)(long)ctx->data; - struct datarec *rec, *rxq_rec; - int ingress_ifindex; - struct config *config; - u32 key = 0; - - /* Global stats record */ - rec = bpf_map_lookup_elem(&stats_global_map, &key); - if (!rec) - return XDP_ABORTED; - rec->processed++; - - /* Accessing ctx->ingress_ifindex, cause BPF to rewrite BPF - * instructions inside kernel to access xdp_rxq->dev->ifindex - */ - ingress_ifindex = ctx->ingress_ifindex; - - config = bpf_map_lookup_elem(&config_map, &key); - if (!config) - return XDP_ABORTED; - - /* Simple test: check ctx provided ifindex is as expected */ - if (ingress_ifindex != config->ifindex) { - /* count this error case */ - rec->issue++; - return XDP_ABORTED; - } - - /* Update stats per rx_queue_index. Handle if rx_queue_index - * is larger than stats map can contain info for. - */ - key = ctx->rx_queue_index; - if (key >= MAX_RXQs) - key = MAX_RXQs; - rxq_rec = bpf_map_lookup_elem(&rx_queue_index_map, &key); - if (!rxq_rec) - return XDP_ABORTED; - rxq_rec->processed++; - if (key == MAX_RXQs) - rxq_rec->issue++; - - /* Default: Don't touch packet data, only count packets */ - if (unlikely(config->options & (READ_MEM|SWAP_MAC))) { - struct ethhdr *eth = data; - - if (eth + 1 > data_end) - return XDP_ABORTED; - - /* Avoid compiler removing this: Drop non 802.3 Ethertypes */ - if (ntohs(eth->h_proto) < ETH_P_802_3_MIN) - return XDP_ABORTED; - - /* XDP_TX requires changing MAC-addrs, else HW may drop. - * Can also be enabled with --swapmac (for test purposes) - */ - if (unlikely(config->options & SWAP_MAC)) - swap_src_dst_mac(data); - } - - return config->action; -} - -char _license[] SEC("license") = "GPL"; diff --git a/samples/bpf/xdp_rxq_info_user.c b/samples/bpf/xdp_rxq_info_user.c deleted file mode 100644 index b95e0ef61f06..000000000000 --- a/samples/bpf/xdp_rxq_info_user.c +++ /dev/null @@ -1,614 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 - * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc. - */ -static const char *__doc__ = " XDP RX-queue info extract example\n\n" - "Monitor how many packets per sec (pps) are received\n" - "per NIC RX queue index and which CPU processed the packet\n" - ; - -#include <errno.h> -#include <signal.h> -#include <stdio.h> -#include <stdlib.h> -#include <stdbool.h> -#include <string.h> -#include <unistd.h> -#include <locale.h> -#include <getopt.h> -#include <net/if.h> -#include <time.h> -#include <limits.h> -#include <arpa/inet.h> -#include <linux/if_link.h> - -#include <bpf/bpf.h> -#include <bpf/libbpf.h> -#include "bpf_util.h" - -static int ifindex = -1; -static char ifname_buf[IF_NAMESIZE]; -static char *ifname; -static __u32 prog_id; - -static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST; - -static struct bpf_map *stats_global_map; -static struct bpf_map *rx_queue_index_map; - -/* Exit return codes */ -#define EXIT_OK 0 -#define EXIT_FAIL 1 -#define EXIT_FAIL_OPTION 2 -#define EXIT_FAIL_XDP 3 -#define EXIT_FAIL_BPF 4 -#define EXIT_FAIL_MEM 5 - -#define FAIL_MEM_SIG INT_MAX -#define FAIL_STAT_SIG (INT_MAX - 1) - -static const struct option long_options[] = { - {"help", no_argument, NULL, 'h' }, - {"dev", required_argument, NULL, 'd' }, - {"skb-mode", no_argument, NULL, 'S' }, - {"sec", required_argument, NULL, 's' }, - {"no-separators", no_argument, NULL, 'z' }, - {"action", required_argument, NULL, 'a' }, - {"readmem", no_argument, NULL, 'r' }, - {"swapmac", no_argument, NULL, 'm' }, - {"force", no_argument, NULL, 'F' }, - {0, 0, NULL, 0 } -}; - -static void int_exit(int sig) -{ - __u32 curr_prog_id = 0; - - if (ifindex > -1) { - if (bpf_xdp_query_id(ifindex, xdp_flags, &curr_prog_id)) { - printf("bpf_xdp_query_id failed\n"); - exit(EXIT_FAIL); - } - if (prog_id == curr_prog_id) { - fprintf(stderr, - "Interrupted: Removing XDP program on ifindex:%d device:%s\n", - ifindex, ifname); - bpf_xdp_detach(ifindex, xdp_flags, NULL); - } else if (!curr_prog_id) { - printf("couldn't find a prog id on a given iface\n"); - } else { - printf("program on interface changed, not removing\n"); - } - } - - if (sig == FAIL_MEM_SIG) - exit(EXIT_FAIL_MEM); - else if (sig == FAIL_STAT_SIG) - exit(EXIT_FAIL); - - exit(EXIT_OK); -} - -struct config { - __u32 action; - int ifindex; - __u32 options; -}; -enum cfg_options_flags { - NO_TOUCH = 0x0U, - READ_MEM = 0x1U, - SWAP_MAC = 0x2U, -}; -#define XDP_ACTION_MAX (XDP_TX + 1) -#define XDP_ACTION_MAX_STRLEN 11 -static const char *xdp_action_names[XDP_ACTION_MAX] = { - [XDP_ABORTED] = "XDP_ABORTED", - [XDP_DROP] = "XDP_DROP", - [XDP_PASS] = "XDP_PASS", - [XDP_TX] = "XDP_TX", -}; - -static const char *action2str(int action) -{ - if (action < XDP_ACTION_MAX) - return xdp_action_names[action]; - return NULL; -} - -static int parse_xdp_action(char *action_str) -{ - size_t maxlen; - __u64 action = -1; - int i; - - for (i = 0; i < XDP_ACTION_MAX; i++) { - maxlen = XDP_ACTION_MAX_STRLEN; - if (strncmp(xdp_action_names[i], action_str, maxlen) == 0) { - action = i; - break; - } - } - return action; -} - -static void list_xdp_actions(void) -{ - int i; - - printf("Available XDP --action <options>\n"); - for (i = 0; i < XDP_ACTION_MAX; i++) - printf("\t%s\n", xdp_action_names[i]); - printf("\n"); -} - -static char* options2str(enum cfg_options_flags flag) -{ - if (flag == NO_TOUCH) - return "no_touch"; - if (flag & SWAP_MAC) - return "swapmac"; - if (flag & READ_MEM) - return "read"; - fprintf(stderr, "ERR: Unknown config option flags"); - int_exit(FAIL_STAT_SIG); - return "unknown"; -} - -static void usage(char *argv[]) -{ - int i; - - printf("\nDOCUMENTATION:\n%s\n", __doc__); - printf(" Usage: %s (options-see-below)\n", argv[0]); - printf(" Listing options:\n"); - for (i = 0; long_options[i].name != 0; i++) { - printf(" --%-12s", long_options[i].name); - if (long_options[i].flag != NULL) - printf(" flag (internal value:%d)", - *long_options[i].flag); - else - printf(" short-option: -%c", - long_options[i].val); - printf("\n"); - } - printf("\n"); - list_xdp_actions(); -} - -#define NANOSEC_PER_SEC 1000000000 /* 10^9 */ -static __u64 gettime(void) -{ - struct timespec t; - int res; - - res = clock_gettime(CLOCK_MONOTONIC, &t); - if (res < 0) { - fprintf(stderr, "Error with gettimeofday! (%i)\n", res); - int_exit(FAIL_STAT_SIG); - } - return (__u64) t.tv_sec * NANOSEC_PER_SEC + t.tv_nsec; -} - -/* Common stats data record shared with _kern.c */ -struct datarec { - __u64 processed; - __u64 issue; -}; -struct record { - __u64 timestamp; - struct datarec total; - struct datarec *cpu; -}; -struct stats_record { - struct record stats; - struct record *rxq; -}; - -static struct datarec *alloc_record_per_cpu(void) -{ - unsigned int nr_cpus = bpf_num_possible_cpus(); - struct datarec *array; - - array = calloc(nr_cpus, sizeof(struct datarec)); - if (!array) { - fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus); - int_exit(FAIL_MEM_SIG); - } - return array; -} - -static struct record *alloc_record_per_rxq(void) -{ - unsigned int nr_rxqs = bpf_map__max_entries(rx_queue_index_map); - struct record *array; - - array = calloc(nr_rxqs, sizeof(struct record)); - if (!array) { - fprintf(stderr, "Mem alloc error (nr_rxqs:%u)\n", nr_rxqs); - int_exit(FAIL_MEM_SIG); - } - return array; -} - -static struct stats_record *alloc_stats_record(void) -{ - unsigned int nr_rxqs = bpf_map__max_entries(rx_queue_index_map); - struct stats_record *rec; - int i; - - rec = calloc(1, sizeof(struct stats_record)); - if (!rec) { - fprintf(stderr, "Mem alloc error\n"); - int_exit(FAIL_MEM_SIG); - } - rec->rxq = alloc_record_per_rxq(); - for (i = 0; i < nr_rxqs; i++) - rec->rxq[i].cpu = alloc_record_per_cpu(); - - rec->stats.cpu = alloc_record_per_cpu(); - return rec; -} - -static void free_stats_record(struct stats_record *r) -{ - unsigned int nr_rxqs = bpf_map__max_entries(rx_queue_index_map); - int i; - - for (i = 0; i < nr_rxqs; i++) - free(r->rxq[i].cpu); - - free(r->rxq); - free(r->stats.cpu); - free(r); -} - -static bool map_collect_percpu(int fd, __u32 key, struct record *rec) -{ - /* For percpu maps, userspace gets a value per possible CPU */ - unsigned int nr_cpus = bpf_num_possible_cpus(); - struct datarec values[nr_cpus]; - __u64 sum_processed = 0; - __u64 sum_issue = 0; - int i; - - if ((bpf_map_lookup_elem(fd, &key, values)) != 0) { - fprintf(stderr, - "ERR: bpf_map_lookup_elem failed key:0x%X\n", key); - return false; - } - /* Get time as close as possible to reading map contents */ - rec->timestamp = gettime(); - - /* Record and sum values from each CPU */ - for (i = 0; i < nr_cpus; i++) { - rec->cpu[i].processed = values[i].processed; - sum_processed += values[i].processed; - rec->cpu[i].issue = values[i].issue; - sum_issue += values[i].issue; - } - rec->total.processed = sum_processed; - rec->total.issue = sum_issue; - return true; -} - -static void stats_collect(struct stats_record *rec) -{ - int fd, i, max_rxqs; - - fd = bpf_map__fd(stats_global_map); - map_collect_percpu(fd, 0, &rec->stats); - - fd = bpf_map__fd(rx_queue_index_map); - max_rxqs = bpf_map__max_entries(rx_queue_index_map); - for (i = 0; i < max_rxqs; i++) - map_collect_percpu(fd, i, &rec->rxq[i]); -} - -static double calc_period(struct record *r, struct record *p) -{ - double period_ = 0; - __u64 period = 0; - - period = r->timestamp - p->timestamp; - if (period > 0) - period_ = ((double) period / NANOSEC_PER_SEC); - - return period_; -} - -static __u64 calc_pps(struct datarec *r, struct datarec *p, double period_) -{ - __u64 packets = 0; - __u64 pps = 0; - - if (period_ > 0) { - packets = r->processed - p->processed; - pps = packets / period_; - } - return pps; -} - -static __u64 calc_errs_pps(struct datarec *r, - struct datarec *p, double period_) -{ - __u64 packets = 0; - __u64 pps = 0; - - if (period_ > 0) { - packets = r->issue - p->issue; - pps = packets / period_; - } - return pps; -} - -static void stats_print(struct stats_record *stats_rec, - struct stats_record *stats_prev, - int action, __u32 cfg_opt) -{ - unsigned int nr_rxqs = bpf_map__max_entries(rx_queue_index_map); - unsigned int nr_cpus = bpf_num_possible_cpus(); - double pps = 0, err = 0; - struct record *rec, *prev; - double t; - int rxq; - int i; - - /* Header */ - printf("\nRunning XDP on dev:%s (ifindex:%d) action:%s options:%s\n", - ifname, ifindex, action2str(action), options2str(cfg_opt)); - - /* stats_global_map */ - { - char *fmt_rx = "%-15s %-7d %'-11.0f %'-10.0f %s\n"; - char *fm2_rx = "%-15s %-7s %'-11.0f\n"; - char *errstr = ""; - - printf("%-15s %-7s %-11s %-11s\n", - "XDP stats", "CPU", "pps", "issue-pps"); - - rec = &stats_rec->stats; - prev = &stats_prev->stats; - t = calc_period(rec, prev); - for (i = 0; i < nr_cpus; i++) { - struct datarec *r = &rec->cpu[i]; - struct datarec *p = &prev->cpu[i]; - - pps = calc_pps (r, p, t); - err = calc_errs_pps(r, p, t); - if (err > 0) - errstr = "invalid-ifindex"; - if (pps > 0) - printf(fmt_rx, "XDP-RX CPU", - i, pps, err, errstr); - } - pps = calc_pps (&rec->total, &prev->total, t); - err = calc_errs_pps(&rec->total, &prev->total, t); - printf(fm2_rx, "XDP-RX CPU", "total", pps, err); - } - - /* rx_queue_index_map */ - printf("\n%-15s %-7s %-11s %-11s\n", - "RXQ stats", "RXQ:CPU", "pps", "issue-pps"); - - for (rxq = 0; rxq < nr_rxqs; rxq++) { - char *fmt_rx = "%-15s %3d:%-3d %'-11.0f %'-10.0f %s\n"; - char *fm2_rx = "%-15s %3d:%-3s %'-11.0f\n"; - char *errstr = ""; - int rxq_ = rxq; - - /* Last RXQ in map catch overflows */ - if (rxq_ == nr_rxqs - 1) - rxq_ = -1; - - rec = &stats_rec->rxq[rxq]; - prev = &stats_prev->rxq[rxq]; - t = calc_period(rec, prev); - for (i = 0; i < nr_cpus; i++) { - struct datarec *r = &rec->cpu[i]; - struct datarec *p = &prev->cpu[i]; - - pps = calc_pps (r, p, t); - err = calc_errs_pps(r, p, t); - if (err > 0) { - if (rxq_ == -1) - errstr = "map-overflow-RXQ"; - else - errstr = "err"; - } - if (pps > 0) - printf(fmt_rx, "rx_queue_index", - rxq_, i, pps, err, errstr); - } - pps = calc_pps (&rec->total, &prev->total, t); - err = calc_errs_pps(&rec->total, &prev->total, t); - if (pps || err) - printf(fm2_rx, "rx_queue_index", rxq_, "sum", pps, err); - } -} - - -/* Pointer swap trick */ -static inline void swap(struct stats_record **a, struct stats_record **b) -{ - struct stats_record *tmp; - - tmp = *a; - *a = *b; - *b = tmp; -} - -static void stats_poll(int interval, int action, __u32 cfg_opt) -{ - struct stats_record *record, *prev; - - record = alloc_stats_record(); - prev = alloc_stats_record(); - stats_collect(record); - - while (1) { - swap(&prev, &record); - stats_collect(record); - stats_print(record, prev, action, cfg_opt); - sleep(interval); - } - - free_stats_record(record); - free_stats_record(prev); -} - - -int main(int argc, char **argv) -{ - __u32 cfg_options= NO_TOUCH ; /* Default: Don't touch packet memory */ - struct bpf_prog_info info = {}; - __u32 info_len = sizeof(info); - int prog_fd, map_fd, opt, err; - bool use_separators = true; - struct config cfg = { 0 }; - struct bpf_program *prog; - struct bpf_object *obj; - struct bpf_map *map; - char filename[256]; - int longindex = 0; - int interval = 2; - __u32 key = 0; - - - char action_str_buf[XDP_ACTION_MAX_STRLEN + 1 /* for \0 */] = { 0 }; - int action = XDP_PASS; /* Default action */ - char *action_str = NULL; - - snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); - - obj = bpf_object__open_file(filename, NULL); - if (libbpf_get_error(obj)) - return EXIT_FAIL; - - prog = bpf_object__next_program(obj, NULL); - bpf_program__set_type(prog, BPF_PROG_TYPE_XDP); - - err = bpf_object__load(obj); - if (err) - return EXIT_FAIL; - prog_fd = bpf_program__fd(prog); - - map = bpf_object__find_map_by_name(obj, "config_map"); - stats_global_map = bpf_object__find_map_by_name(obj, "stats_global_map"); - rx_queue_index_map = bpf_object__find_map_by_name(obj, "rx_queue_index_map"); - if (!map || !stats_global_map || !rx_queue_index_map) { - printf("finding a map in obj file failed\n"); - return EXIT_FAIL; - } - map_fd = bpf_map__fd(map); - - if (!prog_fd) { - fprintf(stderr, "ERR: bpf_prog_load_xattr: %s\n", strerror(errno)); - return EXIT_FAIL; - } - - /* Parse commands line args */ - while ((opt = getopt_long(argc, argv, "FhSrmzd:s:a:", - long_options, &longindex)) != -1) { - switch (opt) { - case 'd': - if (strlen(optarg) >= IF_NAMESIZE) { - fprintf(stderr, "ERR: --dev name too long\n"); - goto error; - } - ifname = (char *)&ifname_buf; - strncpy(ifname, optarg, IF_NAMESIZE); - ifindex = if_nametoindex(ifname); - if (ifindex == 0) { - fprintf(stderr, - "ERR: --dev name unknown err(%d):%s\n", - errno, strerror(errno)); - goto error; - } - break; - case 's': - interval = atoi(optarg); - break; - case 'S': - xdp_flags |= XDP_FLAGS_SKB_MODE; - break; - case 'z': - use_separators = false; - break; - case 'a': - action_str = (char *)&action_str_buf; - strncpy(action_str, optarg, XDP_ACTION_MAX_STRLEN); - break; - case 'r': - cfg_options |= READ_MEM; - break; - case 'm': - cfg_options |= SWAP_MAC; - break; - case 'F': - xdp_flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST; - break; - case 'h': - error: - default: - usage(argv); - return EXIT_FAIL_OPTION; - } - } - - if (!(xdp_flags & XDP_FLAGS_SKB_MODE)) - xdp_flags |= XDP_FLAGS_DRV_MODE; - - /* Required option */ - if (ifindex == -1) { - fprintf(stderr, "ERR: required option --dev missing\n"); - usage(argv); - return EXIT_FAIL_OPTION; - } - cfg.ifindex = ifindex; - - /* Parse action string */ - if (action_str) { - action = parse_xdp_action(action_str); - if (action < 0) { - fprintf(stderr, "ERR: Invalid XDP --action: %s\n", - action_str); - list_xdp_actions(); - return EXIT_FAIL_OPTION; - } - } - cfg.action = action; - - /* XDP_TX requires changing MAC-addrs, else HW may drop */ - if (action == XDP_TX) - cfg_options |= SWAP_MAC; - cfg.options = cfg_options; - - /* Trick to pretty printf with thousands separators use %' */ - if (use_separators) - setlocale(LC_NUMERIC, "en_US"); - - /* User-side setup ifindex in config_map */ - err = bpf_map_update_elem(map_fd, &key, &cfg, 0); - if (err) { - fprintf(stderr, "Store config failed (err:%d)\n", err); - exit(EXIT_FAIL_BPF); - } - - /* Remove XDP program when program is interrupted or killed */ - signal(SIGINT, int_exit); - signal(SIGTERM, int_exit); - - if (bpf_xdp_attach(ifindex, prog_fd, xdp_flags, NULL) < 0) { - fprintf(stderr, "link set xdp fd failed\n"); - return EXIT_FAIL_XDP; - } - - err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len); - if (err) { - printf("can't get prog info - %s\n", strerror(errno)); - return err; - } - prog_id = info.id; - - stats_poll(interval, action, cfg_options); - return EXIT_OK; -} diff --git a/samples/bpf/xdp_sample_pkts_kern.c b/samples/bpf/xdp_sample_pkts_kern.c deleted file mode 100644 index 9cf76b340dd7..000000000000 --- a/samples/bpf/xdp_sample_pkts_kern.c +++ /dev/null @@ -1,57 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include <linux/ptrace.h> -#include <linux/version.h> -#include <uapi/linux/bpf.h> -#include <bpf/bpf_helpers.h> - -#define SAMPLE_SIZE 64ul - -struct { - __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); - __uint(key_size, sizeof(int)); - __uint(value_size, sizeof(u32)); -} my_map SEC(".maps"); - -SEC("xdp_sample") -int xdp_sample_prog(struct xdp_md *ctx) -{ - void *data_end = (void *)(long)ctx->data_end; - void *data = (void *)(long)ctx->data; - - /* Metadata will be in the perf event before the packet data. */ - struct S { - u16 cookie; - u16 pkt_len; - } __packed metadata; - - if (data < data_end) { - /* The XDP perf_event_output handler will use the upper 32 bits - * of the flags argument as a number of bytes to include of the - * packet payload in the event data. If the size is too big, the - * call to bpf_perf_event_output will fail and return -EFAULT. - * - * See bpf_xdp_event_output in net/core/filter.c. - * - * The BPF_F_CURRENT_CPU flag means that the event output fd - * will be indexed by the CPU number in the event map. - */ - u64 flags = BPF_F_CURRENT_CPU; - u16 sample_size; - int ret; - - metadata.cookie = 0xdead; - metadata.pkt_len = (u16)(data_end - data); - sample_size = min(metadata.pkt_len, SAMPLE_SIZE); - flags |= (u64)sample_size << 32; - - ret = bpf_perf_event_output(ctx, &my_map, flags, - &metadata, sizeof(metadata)); - if (ret) - bpf_printk("perf_event_output failed: %d\n", ret); - } - - return XDP_PASS; -} - -char _license[] SEC("license") = "GPL"; -u32 _version SEC("version") = LINUX_VERSION_CODE; diff --git a/samples/bpf/xdp_sample_pkts_user.c b/samples/bpf/xdp_sample_pkts_user.c deleted file mode 100644 index e39d7f654f30..000000000000 --- a/samples/bpf/xdp_sample_pkts_user.c +++ /dev/null @@ -1,196 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include <stdio.h> -#include <stdlib.h> -#include <string.h> -#include <linux/perf_event.h> -#include <linux/bpf.h> -#include <net/if.h> -#include <errno.h> -#include <assert.h> -#include <sys/sysinfo.h> -#include <sys/ioctl.h> -#include <signal.h> -#include <bpf/libbpf.h> -#include <bpf/bpf.h> -#include <libgen.h> -#include <linux/if_link.h> - -#include "perf-sys.h" - -static int if_idx; -static char *if_name; -static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST; -static __u32 prog_id; -static struct perf_buffer *pb = NULL; - -static int do_attach(int idx, int fd, const char *name) -{ - struct bpf_prog_info info = {}; - __u32 info_len = sizeof(info); - int err; - - err = bpf_xdp_attach(idx, fd, xdp_flags, NULL); - if (err < 0) { - printf("ERROR: failed to attach program to %s\n", name); - return err; - } - - err = bpf_prog_get_info_by_fd(fd, &info, &info_len); - if (err) { - printf("can't get prog info - %s\n", strerror(errno)); - return err; - } - prog_id = info.id; - - return err; -} - -static int do_detach(int idx, const char *name) -{ - __u32 curr_prog_id = 0; - int err = 0; - - err = bpf_xdp_query_id(idx, xdp_flags, &curr_prog_id); - if (err) { - printf("bpf_xdp_query_id failed\n"); - return err; - } - if (prog_id == curr_prog_id) { - err = bpf_xdp_detach(idx, xdp_flags, NULL); - if (err < 0) - printf("ERROR: failed to detach prog from %s\n", name); - } else if (!curr_prog_id) { - printf("couldn't find a prog id on a %s\n", name); - } else { - printf("program on interface changed, not removing\n"); - } - - return err; -} - -#define SAMPLE_SIZE 64 - -static void print_bpf_output(void *ctx, int cpu, void *data, __u32 size) -{ - struct { - __u16 cookie; - __u16 pkt_len; - __u8 pkt_data[SAMPLE_SIZE]; - } __packed *e = data; - int i; - - if (e->cookie != 0xdead) { - printf("BUG cookie %x sized %d\n", e->cookie, size); - return; - } - - printf("Pkt len: %-5d bytes. Ethernet hdr: ", e->pkt_len); - for (i = 0; i < 14 && i < e->pkt_len; i++) - printf("%02x ", e->pkt_data[i]); - printf("\n"); -} - -static void sig_handler(int signo) -{ - do_detach(if_idx, if_name); - perf_buffer__free(pb); - exit(0); -} - -static void usage(const char *prog) -{ - fprintf(stderr, - "%s: %s [OPTS] <ifname|ifindex>\n\n" - "OPTS:\n" - " -F force loading prog\n" - " -S use skb-mode\n", - __func__, prog); -} - -int main(int argc, char **argv) -{ - const char *optstr = "FS"; - int prog_fd, map_fd, opt; - struct bpf_program *prog; - struct bpf_object *obj; - struct bpf_map *map; - char filename[256]; - int ret, err; - - while ((opt = getopt(argc, argv, optstr)) != -1) { - switch (opt) { - case 'F': - xdp_flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST; - break; - case 'S': - xdp_flags |= XDP_FLAGS_SKB_MODE; - break; - default: - usage(basename(argv[0])); - return 1; - } - } - - if (!(xdp_flags & XDP_FLAGS_SKB_MODE)) - xdp_flags |= XDP_FLAGS_DRV_MODE; - - if (optind == argc) { - usage(basename(argv[0])); - return 1; - } - - snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); - - obj = bpf_object__open_file(filename, NULL); - if (libbpf_get_error(obj)) - return 1; - - prog = bpf_object__next_program(obj, NULL); - bpf_program__set_type(prog, BPF_PROG_TYPE_XDP); - - err = bpf_object__load(obj); - if (err) - return 1; - - prog_fd = bpf_program__fd(prog); - - map = bpf_object__next_map(obj, NULL); - if (!map) { - printf("finding a map in obj file failed\n"); - return 1; - } - map_fd = bpf_map__fd(map); - - if_idx = if_nametoindex(argv[optind]); - if (!if_idx) - if_idx = strtoul(argv[optind], NULL, 0); - - if (!if_idx) { - fprintf(stderr, "Invalid ifname\n"); - return 1; - } - if_name = argv[optind]; - err = do_attach(if_idx, prog_fd, if_name); - if (err) - return err; - - if (signal(SIGINT, sig_handler) || - signal(SIGHUP, sig_handler) || - signal(SIGTERM, sig_handler)) { - perror("signal"); - return 1; - } - - pb = perf_buffer__new(map_fd, 8, print_bpf_output, NULL, NULL, NULL); - err = libbpf_get_error(pb); - if (err) { - perror("perf_buffer setup failed"); - return 1; - } - - while ((ret = perf_buffer__poll(pb, 1000)) >= 0) { - } - - kill(0, SIGINT); - return ret; -} diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index d21deb46f49f..8790b3962e4b 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1039,6 +1039,7 @@ enum bpf_attach_type { BPF_NETFILTER, BPF_TCX_INGRESS, BPF_TCX_EGRESS, + BPF_TRACE_UPROBE_MULTI, __MAX_BPF_ATTACH_TYPE }; @@ -1057,6 +1058,7 @@ enum bpf_link_type { BPF_LINK_TYPE_STRUCT_OPS = 9, BPF_LINK_TYPE_NETFILTER = 10, BPF_LINK_TYPE_TCX = 11, + BPF_LINK_TYPE_UPROBE_MULTI = 12, MAX_BPF_LINK_TYPE, }; @@ -1186,7 +1188,16 @@ enum bpf_perf_event_type { /* link_create.kprobe_multi.flags used in LINK_CREATE command for * BPF_TRACE_KPROBE_MULTI attach type to create return probe. */ -#define BPF_F_KPROBE_MULTI_RETURN (1U << 0) +enum { + BPF_F_KPROBE_MULTI_RETURN = (1U << 0) +}; + +/* link_create.uprobe_multi.flags used in LINK_CREATE command for + * BPF_TRACE_UPROBE_MULTI attach type to create return probe. + */ +enum { + BPF_F_UPROBE_MULTI_RETURN = (1U << 0) +}; /* link_create.netfilter.flags used in LINK_CREATE command for * BPF_PROG_TYPE_NETFILTER to enable IP packet defragmentation. @@ -1624,6 +1635,15 @@ union bpf_attr { }; __u64 expected_revision; } tcx; + struct { + __aligned_u64 path; + __aligned_u64 offsets; + __aligned_u64 ref_ctr_offsets; + __aligned_u64 cookies; + __u32 cnt; + __u32 flags; + __u32 pid; + } uprobe_multi; }; } link_create; diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build index b8b0a6369363..2d0c282c8588 100644 --- a/tools/lib/bpf/Build +++ b/tools/lib/bpf/Build @@ -1,4 +1,4 @@ libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o \ netlink.o bpf_prog_linfo.o libbpf_probes.o hashmap.o \ btf_dump.o ringbuf.o strset.o linker.o gen_loader.o relo_core.o \ - usdt.o zip.o + usdt.o zip.o elf.o diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index c9b6b311a441..b0f1913763a3 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -767,6 +767,17 @@ int bpf_link_create(int prog_fd, int target_fd, if (!OPTS_ZEROED(opts, kprobe_multi)) return libbpf_err(-EINVAL); break; + case BPF_TRACE_UPROBE_MULTI: + attr.link_create.uprobe_multi.flags = OPTS_GET(opts, uprobe_multi.flags, 0); + attr.link_create.uprobe_multi.cnt = OPTS_GET(opts, uprobe_multi.cnt, 0); + attr.link_create.uprobe_multi.path = ptr_to_u64(OPTS_GET(opts, uprobe_multi.path, 0)); + attr.link_create.uprobe_multi.offsets = ptr_to_u64(OPTS_GET(opts, uprobe_multi.offsets, 0)); + attr.link_create.uprobe_multi.ref_ctr_offsets = ptr_to_u64(OPTS_GET(opts, uprobe_multi.ref_ctr_offsets, 0)); + attr.link_create.uprobe_multi.cookies = ptr_to_u64(OPTS_GET(opts, uprobe_multi.cookies, 0)); + attr.link_create.uprobe_multi.pid = OPTS_GET(opts, uprobe_multi.pid, 0); + if (!OPTS_ZEROED(opts, uprobe_multi)) + return libbpf_err(-EINVAL); + break; case BPF_TRACE_FENTRY: case BPF_TRACE_FEXIT: case BPF_MODIFY_RETURN: diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 044a74ffc38a..74c2887cfd24 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -393,6 +393,15 @@ struct bpf_link_create_opts { const __u64 *cookies; } kprobe_multi; struct { + __u32 flags; + __u32 cnt; + const char *path; + const unsigned long *offsets; + const unsigned long *ref_ctr_offsets; + const __u64 *cookies; + __u32 pid; + } uprobe_multi; + struct { __u64 cookie; } tracing; struct { @@ -409,7 +418,7 @@ struct bpf_link_create_opts { }; size_t :0; }; -#define bpf_link_create_opts__last_field kprobe_multi.cookies +#define bpf_link_create_opts__last_field uprobe_multi.pid LIBBPF_API int bpf_link_create(int prog_fd, int target_fd, enum bpf_attach_type attach_type, diff --git a/tools/lib/bpf/elf.c b/tools/lib/bpf/elf.c new file mode 100644 index 000000000000..9d0296c1726a --- /dev/null +++ b/tools/lib/bpf/elf.c @@ -0,0 +1,440 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +#include <libelf.h> +#include <gelf.h> +#include <fcntl.h> +#include <linux/kernel.h> + +#include "libbpf_internal.h" +#include "str_error.h" + +#define STRERR_BUFSIZE 128 + +int elf_open(const char *binary_path, struct elf_fd *elf_fd) +{ + char errmsg[STRERR_BUFSIZE]; + int fd, ret; + Elf *elf; + + if (elf_version(EV_CURRENT) == EV_NONE) { + pr_warn("elf: failed to init libelf for %s\n", binary_path); + return -LIBBPF_ERRNO__LIBELF; + } + fd = open(binary_path, O_RDONLY | O_CLOEXEC); + if (fd < 0) { + ret = -errno; + pr_warn("elf: failed to open %s: %s\n", binary_path, + libbpf_strerror_r(ret, errmsg, sizeof(errmsg))); + return ret; + } + elf = elf_begin(fd, ELF_C_READ_MMAP, NULL); + if (!elf) { + pr_warn("elf: could not read elf from %s: %s\n", binary_path, elf_errmsg(-1)); + close(fd); + return -LIBBPF_ERRNO__FORMAT; + } + elf_fd->fd = fd; + elf_fd->elf = elf; + return 0; +} + +void elf_close(struct elf_fd *elf_fd) +{ + if (!elf_fd) + return; + elf_end(elf_fd->elf); + close(elf_fd->fd); +} + +/* Return next ELF section of sh_type after scn, or first of that type if scn is NULL. */ +static Elf_Scn *elf_find_next_scn_by_type(Elf *elf, int sh_type, Elf_Scn *scn) +{ + while ((scn = elf_nextscn(elf, scn)) != NULL) { + GElf_Shdr sh; + + if (!gelf_getshdr(scn, &sh)) + continue; + if (sh.sh_type == sh_type) + return scn; + } + return NULL; +} + +struct elf_sym { + const char *name; + GElf_Sym sym; + GElf_Shdr sh; +}; + +struct elf_sym_iter { + Elf *elf; + Elf_Data *syms; + size_t nr_syms; + size_t strtabidx; + size_t next_sym_idx; + struct elf_sym sym; + int st_type; +}; + +static int elf_sym_iter_new(struct elf_sym_iter *iter, + Elf *elf, const char *binary_path, + int sh_type, int st_type) +{ + Elf_Scn *scn = NULL; + GElf_Ehdr ehdr; + GElf_Shdr sh; + + memset(iter, 0, sizeof(*iter)); + + if (!gelf_getehdr(elf, &ehdr)) { + pr_warn("elf: failed to get ehdr from %s: %s\n", binary_path, elf_errmsg(-1)); + return -EINVAL; + } + + scn = elf_find_next_scn_by_type(elf, sh_type, NULL); + if (!scn) { + pr_debug("elf: failed to find symbol table ELF sections in '%s'\n", + binary_path); + return -ENOENT; + } + + if (!gelf_getshdr(scn, &sh)) + return -EINVAL; + + iter->strtabidx = sh.sh_link; + iter->syms = elf_getdata(scn, 0); + if (!iter->syms) { + pr_warn("elf: failed to get symbols for symtab section in '%s': %s\n", + binary_path, elf_errmsg(-1)); + return -EINVAL; + } + iter->nr_syms = iter->syms->d_size / sh.sh_entsize; + iter->elf = elf; + iter->st_type = st_type; + return 0; +} + +static struct elf_sym *elf_sym_iter_next(struct elf_sym_iter *iter) +{ + struct elf_sym *ret = &iter->sym; + GElf_Sym *sym = &ret->sym; + const char *name = NULL; + Elf_Scn *sym_scn; + size_t idx; + + for (idx = iter->next_sym_idx; idx < iter->nr_syms; idx++) { + if (!gelf_getsym(iter->syms, idx, sym)) + continue; + if (GELF_ST_TYPE(sym->st_info) != iter->st_type) + continue; + name = elf_strptr(iter->elf, iter->strtabidx, sym->st_name); + if (!name) + continue; + sym_scn = elf_getscn(iter->elf, sym->st_shndx); + if (!sym_scn) + continue; + if (!gelf_getshdr(sym_scn, &ret->sh)) + continue; + + iter->next_sym_idx = idx + 1; + ret->name = name; + return ret; + } + + return NULL; +} + + +/* Transform symbol's virtual address (absolute for binaries and relative + * for shared libs) into file offset, which is what kernel is expecting + * for uprobe/uretprobe attachment. + * See Documentation/trace/uprobetracer.rst for more details. This is done + * by looking up symbol's containing section's header and using iter's virtual + * address (sh_addr) and corresponding file offset (sh_offset) to transform + * sym.st_value (virtual address) into desired final file offset. + */ +static unsigned long elf_sym_offset(struct elf_sym *sym) +{ + return sym->sym.st_value - sym->sh.sh_addr + sym->sh.sh_offset; +} + +/* Find offset of function name in the provided ELF object. "binary_path" is + * the path to the ELF binary represented by "elf", and only used for error + * reporting matters. "name" matches symbol name or name@@LIB for library + * functions. + */ +long elf_find_func_offset(Elf *elf, const char *binary_path, const char *name) +{ + int i, sh_types[2] = { SHT_DYNSYM, SHT_SYMTAB }; + bool is_shared_lib, is_name_qualified; + long ret = -ENOENT; + size_t name_len; + GElf_Ehdr ehdr; + + if (!gelf_getehdr(elf, &ehdr)) { + pr_warn("elf: failed to get ehdr from %s: %s\n", binary_path, elf_errmsg(-1)); + ret = -LIBBPF_ERRNO__FORMAT; + goto out; + } + /* for shared lib case, we do not need to calculate relative offset */ + is_shared_lib = ehdr.e_type == ET_DYN; + + name_len = strlen(name); + /* Does name specify "@@LIB"? */ + is_name_qualified = strstr(name, "@@") != NULL; + + /* Search SHT_DYNSYM, SHT_SYMTAB for symbol. This search order is used because if + * a binary is stripped, it may only have SHT_DYNSYM, and a fully-statically + * linked binary may not have SHT_DYMSYM, so absence of a section should not be + * reported as a warning/error. + */ + for (i = 0; i < ARRAY_SIZE(sh_types); i++) { + struct elf_sym_iter iter; + struct elf_sym *sym; + int last_bind = -1; + int cur_bind; + + ret = elf_sym_iter_new(&iter, elf, binary_path, sh_types[i], STT_FUNC); + if (ret == -ENOENT) + continue; + if (ret) + goto out; + + while ((sym = elf_sym_iter_next(&iter))) { + /* User can specify func, func@@LIB or func@@LIB_VERSION. */ + if (strncmp(sym->name, name, name_len) != 0) + continue; + /* ...but we don't want a search for "foo" to match 'foo2" also, so any + * additional characters in sname should be of the form "@@LIB". + */ + if (!is_name_qualified && sym->name[name_len] != '\0' && sym->name[name_len] != '@') + continue; + + cur_bind = GELF_ST_BIND(sym->sym.st_info); + + if (ret > 0) { + /* handle multiple matches */ + if (last_bind != STB_WEAK && cur_bind != STB_WEAK) { + /* Only accept one non-weak bind. */ + pr_warn("elf: ambiguous match for '%s', '%s' in '%s'\n", + sym->name, name, binary_path); + ret = -LIBBPF_ERRNO__FORMAT; + goto out; + } else if (cur_bind == STB_WEAK) { + /* already have a non-weak bind, and + * this is a weak bind, so ignore. + */ + continue; + } + } + + ret = elf_sym_offset(sym); + last_bind = cur_bind; + } + if (ret > 0) + break; + } + + if (ret > 0) { + pr_debug("elf: symbol address match for '%s' in '%s': 0x%lx\n", name, binary_path, + ret); + } else { + if (ret == 0) { + pr_warn("elf: '%s' is 0 in symtab for '%s': %s\n", name, binary_path, + is_shared_lib ? "should not be 0 in a shared library" : + "try using shared library path instead"); + ret = -ENOENT; + } else { + pr_warn("elf: failed to find symbol '%s' in '%s'\n", name, binary_path); + } + } +out: + return ret; +} + +/* Find offset of function name in ELF object specified by path. "name" matches + * symbol name or name@@LIB for library functions. + */ +long elf_find_func_offset_from_file(const char *binary_path, const char *name) +{ + struct elf_fd elf_fd; + long ret = -ENOENT; + + ret = elf_open(binary_path, &elf_fd); + if (ret) + return ret; + ret = elf_find_func_offset(elf_fd.elf, binary_path, name); + elf_close(&elf_fd); + return ret; +} + +struct symbol { + const char *name; + int bind; + int idx; +}; + +static int symbol_cmp(const void *a, const void *b) +{ + const struct symbol *sym_a = a; + const struct symbol *sym_b = b; + + return strcmp(sym_a->name, sym_b->name); +} + +/* + * Return offsets in @poffsets for symbols specified in @syms array argument. + * On success returns 0 and offsets are returned in allocated array with @cnt + * size, that needs to be released by the caller. + */ +int elf_resolve_syms_offsets(const char *binary_path, int cnt, + const char **syms, unsigned long **poffsets) +{ + int sh_types[2] = { SHT_DYNSYM, SHT_SYMTAB }; + int err = 0, i, cnt_done = 0; + unsigned long *offsets; + struct symbol *symbols; + struct elf_fd elf_fd; + + err = elf_open(binary_path, &elf_fd); + if (err) + return err; + + offsets = calloc(cnt, sizeof(*offsets)); + symbols = calloc(cnt, sizeof(*symbols)); + + if (!offsets || !symbols) { + err = -ENOMEM; + goto out; + } + + for (i = 0; i < cnt; i++) { + symbols[i].name = syms[i]; + symbols[i].idx = i; + } + + qsort(symbols, cnt, sizeof(*symbols), symbol_cmp); + + for (i = 0; i < ARRAY_SIZE(sh_types); i++) { + struct elf_sym_iter iter; + struct elf_sym *sym; + + err = elf_sym_iter_new(&iter, elf_fd.elf, binary_path, sh_types[i], STT_FUNC); + if (err == -ENOENT) + continue; + if (err) + goto out; + + while ((sym = elf_sym_iter_next(&iter))) { + unsigned long sym_offset = elf_sym_offset(sym); + int bind = GELF_ST_BIND(sym->sym.st_info); + struct symbol *found, tmp = { + .name = sym->name, + }; + unsigned long *offset; + + found = bsearch(&tmp, symbols, cnt, sizeof(*symbols), symbol_cmp); + if (!found) + continue; + + offset = &offsets[found->idx]; + if (*offset > 0) { + /* same offset, no problem */ + if (*offset == sym_offset) + continue; + /* handle multiple matches */ + if (found->bind != STB_WEAK && bind != STB_WEAK) { + /* Only accept one non-weak bind. */ + pr_warn("elf: ambiguous match found '%s@%lu' in '%s' previous offset %lu\n", + sym->name, sym_offset, binary_path, *offset); + err = -ESRCH; + goto out; + } else if (bind == STB_WEAK) { + /* already have a non-weak bind, and + * this is a weak bind, so ignore. + */ + continue; + } + } else { + cnt_done++; + } + *offset = sym_offset; + found->bind = bind; + } + } + + if (cnt != cnt_done) { + err = -ENOENT; + goto out; + } + + *poffsets = offsets; + +out: + free(symbols); + if (err) + free(offsets); + elf_close(&elf_fd); + return err; +} + +/* + * Return offsets in @poffsets for symbols specified by @pattern argument. + * On success returns 0 and offsets are returned in allocated @poffsets + * array with the @pctn size, that needs to be released by the caller. + */ +int elf_resolve_pattern_offsets(const char *binary_path, const char *pattern, + unsigned long **poffsets, size_t *pcnt) +{ + int sh_types[2] = { SHT_SYMTAB, SHT_DYNSYM }; + unsigned long *offsets = NULL; + size_t cap = 0, cnt = 0; + struct elf_fd elf_fd; + int err = 0, i; + + err = elf_open(binary_path, &elf_fd); + if (err) + return err; + + for (i = 0; i < ARRAY_SIZE(sh_types); i++) { + struct elf_sym_iter iter; + struct elf_sym *sym; + + err = elf_sym_iter_new(&iter, elf_fd.elf, binary_path, sh_types[i], STT_FUNC); + if (err == -ENOENT) + continue; + if (err) + goto out; + + while ((sym = elf_sym_iter_next(&iter))) { + if (!glob_match(sym->name, pattern)) + continue; + + err = libbpf_ensure_mem((void **) &offsets, &cap, sizeof(*offsets), + cnt + 1); + if (err) + goto out; + + offsets[cnt++] = elf_sym_offset(sym); + } + + /* If we found anything in the first symbol section, + * do not search others to avoid duplicates. + */ + if (cnt) + break; + } + + if (cnt) { + *poffsets = offsets; + *pcnt = cnt; + } else { + err = -ENOENT; + } + +out: + if (err) + free(offsets); + elf_close(&elf_fd); + return err; +} diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index b14a4376a86e..96ff1aa4bf6a 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -120,6 +120,7 @@ static const char * const attach_type_name[] = { [BPF_NETFILTER] = "netfilter", [BPF_TCX_INGRESS] = "tcx_ingress", [BPF_TCX_EGRESS] = "tcx_egress", + [BPF_TRACE_UPROBE_MULTI] = "trace_uprobe_multi", }; static const char * const link_type_name[] = { @@ -135,6 +136,7 @@ static const char * const link_type_name[] = { [BPF_LINK_TYPE_STRUCT_OPS] = "struct_ops", [BPF_LINK_TYPE_NETFILTER] = "netfilter", [BPF_LINK_TYPE_TCX] = "tcx", + [BPF_LINK_TYPE_UPROBE_MULTI] = "uprobe_multi", }; static const char * const map_type_name[] = { @@ -365,6 +367,8 @@ enum sec_def_flags { SEC_SLEEPABLE = 8, /* BPF program support non-linear XDP buffer */ SEC_XDP_FRAGS = 16, + /* Setup proper attach type for usdt probes. */ + SEC_USDT = 32, }; struct bpf_sec_def { @@ -550,6 +554,7 @@ struct extern_desc { int btf_id; int sec_btf_id; const char *name; + char *essent_name; bool is_set; bool is_weak; union { @@ -3770,6 +3775,7 @@ static int bpf_object__collect_externs(struct bpf_object *obj) struct extern_desc *ext; int i, n, off, dummy_var_btf_id; const char *ext_name, *sec_name; + size_t ext_essent_len; Elf_Scn *scn; Elf64_Shdr *sh; @@ -3819,6 +3825,14 @@ static int bpf_object__collect_externs(struct bpf_object *obj) ext->sym_idx = i; ext->is_weak = ELF64_ST_BIND(sym->st_info) == STB_WEAK; + ext_essent_len = bpf_core_essential_name_len(ext->name); + ext->essent_name = NULL; + if (ext_essent_len != strlen(ext->name)) { + ext->essent_name = strndup(ext->name, ext_essent_len); + if (!ext->essent_name) + return -ENOMEM; + } + ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id); if (ext->sec_btf_id <= 0) { pr_warn("failed to find BTF for extern '%s' [%d] section: %d\n", @@ -4817,6 +4831,39 @@ static int probe_perf_link(void) return link_fd < 0 && err == -EBADF; } +static int probe_uprobe_multi_link(void) +{ + LIBBPF_OPTS(bpf_prog_load_opts, load_opts, + .expected_attach_type = BPF_TRACE_UPROBE_MULTI, + ); + LIBBPF_OPTS(bpf_link_create_opts, link_opts); + struct bpf_insn insns[] = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + int prog_fd, link_fd, err; + unsigned long offset = 0; + + prog_fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL", + insns, ARRAY_SIZE(insns), &load_opts); + if (prog_fd < 0) + return -errno; + + /* Creating uprobe in '/' binary should fail with -EBADF. */ + link_opts.uprobe_multi.path = "/"; + link_opts.uprobe_multi.offsets = &offset; + link_opts.uprobe_multi.cnt = 1; + + link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts); + err = -errno; /* close() can clobber errno */ + + if (link_fd >= 0) + close(link_fd); + close(prog_fd); + + return link_fd < 0 && err == -EBADF; +} + static int probe_kern_bpf_cookie(void) { struct bpf_insn insns[] = { @@ -4913,6 +4960,9 @@ static struct kern_feature_desc { [FEAT_SYSCALL_WRAPPER] = { "Kernel using syscall wrapper", probe_kern_syscall_wrapper, }, + [FEAT_UPROBE_MULTI_LINK] = { + "BPF multi-uprobe link support", probe_uprobe_multi_link, + }, }; bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id) @@ -6780,6 +6830,10 @@ static int libbpf_prepare_prog_load(struct bpf_program *prog, if (prog->type == BPF_PROG_TYPE_XDP && (def & SEC_XDP_FRAGS)) opts->prog_flags |= BPF_F_XDP_HAS_FRAGS; + /* special check for usdt to use uprobe_multi link */ + if ((def & SEC_USDT) && kernel_supports(prog->obj, FEAT_UPROBE_MULTI_LINK)) + prog->expected_attach_type = BPF_TRACE_UPROBE_MULTI; + if ((def & SEC_ATTACH_BTF) && !prog->attach_btf_id) { int btf_obj_fd = 0, btf_type_id = 0, err; const char *attach_name; @@ -6848,7 +6902,6 @@ static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog if (!insns || !insns_cnt) return -EINVAL; - load_attr.expected_attach_type = prog->expected_attach_type; if (kernel_supports(obj, FEAT_PROG_NAME)) prog_name = prog->name; load_attr.attach_prog_fd = prog->attach_prog_fd; @@ -6884,6 +6937,9 @@ static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog insns_cnt = prog->insns_cnt; } + /* allow prog_prepare_load_fn to change expected_attach_type */ + load_attr.expected_attach_type = prog->expected_attach_type; + if (obj->gen_loader) { bpf_gen__prog_load(obj->gen_loader, prog->type, prog->name, license, insns, insns_cnt, &load_attr, @@ -7624,7 +7680,8 @@ static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj, local_func_proto_id = ext->ksym.type_id; - kfunc_id = find_ksym_btf_id(obj, ext->name, BTF_KIND_FUNC, &kern_btf, &mod_btf); + kfunc_id = find_ksym_btf_id(obj, ext->essent_name ?: ext->name, BTF_KIND_FUNC, &kern_btf, + &mod_btf); if (kfunc_id < 0) { if (kfunc_id == -ESRCH && ext->is_weak) return 0; @@ -7639,6 +7696,9 @@ static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj, ret = bpf_core_types_are_compat(obj->btf, local_func_proto_id, kern_btf, kfunc_proto_id); if (ret <= 0) { + if (ext->is_weak) + return 0; + pr_warn("extern (func ksym) '%s': func_proto [%d] incompatible with %s [%d]\n", ext->name, local_func_proto_id, mod_btf ? mod_btf->name : "vmlinux", kfunc_proto_id); @@ -8316,6 +8376,21 @@ int bpf_object__pin(struct bpf_object *obj, const char *path) return 0; } +int bpf_object__unpin(struct bpf_object *obj, const char *path) +{ + int err; + + err = bpf_object__unpin_programs(obj, path); + if (err) + return libbpf_err(err); + + err = bpf_object__unpin_maps(obj, path); + if (err) + return libbpf_err(err); + + return 0; +} + static void bpf_map__destroy(struct bpf_map *map) { if (map->inner_map) { @@ -8363,6 +8438,7 @@ void bpf_object__close(struct bpf_object *obj) bpf_object__elf_finish(obj); bpf_object_unload(obj); btf__free(obj->btf); + btf__free(obj->btf_vmlinux); btf_ext__free(obj->btf_ext); for (i = 0; i < obj->nr_maps; i++) @@ -8370,6 +8446,10 @@ void bpf_object__close(struct bpf_object *obj) zfree(&obj->btf_custom_path); zfree(&obj->kconfig); + + for (i = 0; i < obj->nr_extern; i++) + zfree(&obj->externs[i].essent_name); + zfree(&obj->externs); obj->nr_extern = 0; @@ -8681,6 +8761,7 @@ static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_lin static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link); static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link); static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link); +static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link); static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link); static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link); @@ -8696,9 +8777,14 @@ static const struct bpf_sec_def section_defs[] = { SEC_DEF("uretprobe.s+", KPROBE, 0, SEC_SLEEPABLE, attach_uprobe), SEC_DEF("kprobe.multi+", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi), SEC_DEF("kretprobe.multi+", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi), + SEC_DEF("uprobe.multi+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi), + SEC_DEF("uretprobe.multi+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi), + SEC_DEF("uprobe.multi.s+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi), + SEC_DEF("uretprobe.multi.s+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi), SEC_DEF("ksyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall), SEC_DEF("kretsyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall), - SEC_DEF("usdt+", KPROBE, 0, SEC_NONE, attach_usdt), + SEC_DEF("usdt+", KPROBE, 0, SEC_USDT, attach_usdt), + SEC_DEF("usdt.s+", KPROBE, 0, SEC_USDT | SEC_SLEEPABLE, attach_usdt), SEC_DEF("tc/ingress", SCHED_CLS, BPF_TCX_INGRESS, SEC_NONE), /* alias for tcx */ SEC_DEF("tc/egress", SCHED_CLS, BPF_TCX_EGRESS, SEC_NONE), /* alias for tcx */ SEC_DEF("tcx/ingress", SCHED_CLS, BPF_TCX_INGRESS, SEC_NONE), @@ -10549,7 +10635,7 @@ struct bpf_link *bpf_program__attach_ksyscall(const struct bpf_program *prog, } /* Adapted from perf/util/string.c */ -static bool glob_match(const char *str, const char *pat) +bool glob_match(const char *str, const char *pat) { while (*str && *pat && *pat != '*') { if (*pat == '?') { /* Matches any single character */ @@ -10902,6 +10988,37 @@ static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, stru return libbpf_get_error(*link); } +static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link) +{ + char *probe_type = NULL, *binary_path = NULL, *func_name = NULL; + LIBBPF_OPTS(bpf_uprobe_multi_opts, opts); + int n, ret = -EINVAL; + + *link = NULL; + + n = sscanf(prog->sec_name, "%m[^/]/%m[^:]:%ms", + &probe_type, &binary_path, &func_name); + switch (n) { + case 1: + /* handle SEC("u[ret]probe") - format is valid, but auto-attach is impossible. */ + ret = 0; + break; + case 3: + opts.retprobe = strcmp(probe_type, "uretprobe.multi") == 0; + *link = bpf_program__attach_uprobe_multi(prog, -1, binary_path, func_name, &opts); + ret = libbpf_get_error(*link); + break; + default: + pr_warn("prog '%s': invalid format of section definition '%s'\n", prog->name, + prog->sec_name); + break; + } + free(probe_type); + free(binary_path); + free(func_name); + return ret; +} + static void gen_uprobe_legacy_event_name(char *buf, size_t buf_sz, const char *binary_path, uint64_t offset) { @@ -10984,191 +11101,6 @@ err_clean_legacy: return err; } -/* Return next ELF section of sh_type after scn, or first of that type if scn is NULL. */ -static Elf_Scn *elf_find_next_scn_by_type(Elf *elf, int sh_type, Elf_Scn *scn) -{ - while ((scn = elf_nextscn(elf, scn)) != NULL) { - GElf_Shdr sh; - - if (!gelf_getshdr(scn, &sh)) - continue; - if (sh.sh_type == sh_type) - return scn; - } - return NULL; -} - -/* Find offset of function name in the provided ELF object. "binary_path" is - * the path to the ELF binary represented by "elf", and only used for error - * reporting matters. "name" matches symbol name or name@@LIB for library - * functions. - */ -static long elf_find_func_offset(Elf *elf, const char *binary_path, const char *name) -{ - int i, sh_types[2] = { SHT_DYNSYM, SHT_SYMTAB }; - bool is_shared_lib, is_name_qualified; - long ret = -ENOENT; - size_t name_len; - GElf_Ehdr ehdr; - - if (!gelf_getehdr(elf, &ehdr)) { - pr_warn("elf: failed to get ehdr from %s: %s\n", binary_path, elf_errmsg(-1)); - ret = -LIBBPF_ERRNO__FORMAT; - goto out; - } - /* for shared lib case, we do not need to calculate relative offset */ - is_shared_lib = ehdr.e_type == ET_DYN; - - name_len = strlen(name); - /* Does name specify "@@LIB"? */ - is_name_qualified = strstr(name, "@@") != NULL; - - /* Search SHT_DYNSYM, SHT_SYMTAB for symbol. This search order is used because if - * a binary is stripped, it may only have SHT_DYNSYM, and a fully-statically - * linked binary may not have SHT_DYMSYM, so absence of a section should not be - * reported as a warning/error. - */ - for (i = 0; i < ARRAY_SIZE(sh_types); i++) { - size_t nr_syms, strtabidx, idx; - Elf_Data *symbols = NULL; - Elf_Scn *scn = NULL; - int last_bind = -1; - const char *sname; - GElf_Shdr sh; - - scn = elf_find_next_scn_by_type(elf, sh_types[i], NULL); - if (!scn) { - pr_debug("elf: failed to find symbol table ELF sections in '%s'\n", - binary_path); - continue; - } - if (!gelf_getshdr(scn, &sh)) - continue; - strtabidx = sh.sh_link; - symbols = elf_getdata(scn, 0); - if (!symbols) { - pr_warn("elf: failed to get symbols for symtab section in '%s': %s\n", - binary_path, elf_errmsg(-1)); - ret = -LIBBPF_ERRNO__FORMAT; - goto out; - } - nr_syms = symbols->d_size / sh.sh_entsize; - - for (idx = 0; idx < nr_syms; idx++) { - int curr_bind; - GElf_Sym sym; - Elf_Scn *sym_scn; - GElf_Shdr sym_sh; - - if (!gelf_getsym(symbols, idx, &sym)) - continue; - - if (GELF_ST_TYPE(sym.st_info) != STT_FUNC) - continue; - - sname = elf_strptr(elf, strtabidx, sym.st_name); - if (!sname) - continue; - - curr_bind = GELF_ST_BIND(sym.st_info); - - /* User can specify func, func@@LIB or func@@LIB_VERSION. */ - if (strncmp(sname, name, name_len) != 0) - continue; - /* ...but we don't want a search for "foo" to match 'foo2" also, so any - * additional characters in sname should be of the form "@@LIB". - */ - if (!is_name_qualified && sname[name_len] != '\0' && sname[name_len] != '@') - continue; - - if (ret >= 0) { - /* handle multiple matches */ - if (last_bind != STB_WEAK && curr_bind != STB_WEAK) { - /* Only accept one non-weak bind. */ - pr_warn("elf: ambiguous match for '%s', '%s' in '%s'\n", - sname, name, binary_path); - ret = -LIBBPF_ERRNO__FORMAT; - goto out; - } else if (curr_bind == STB_WEAK) { - /* already have a non-weak bind, and - * this is a weak bind, so ignore. - */ - continue; - } - } - - /* Transform symbol's virtual address (absolute for - * binaries and relative for shared libs) into file - * offset, which is what kernel is expecting for - * uprobe/uretprobe attachment. - * See Documentation/trace/uprobetracer.rst for more - * details. - * This is done by looking up symbol's containing - * section's header and using it's virtual address - * (sh_addr) and corresponding file offset (sh_offset) - * to transform sym.st_value (virtual address) into - * desired final file offset. - */ - sym_scn = elf_getscn(elf, sym.st_shndx); - if (!sym_scn) - continue; - if (!gelf_getshdr(sym_scn, &sym_sh)) - continue; - - ret = sym.st_value - sym_sh.sh_addr + sym_sh.sh_offset; - last_bind = curr_bind; - } - if (ret > 0) - break; - } - - if (ret > 0) { - pr_debug("elf: symbol address match for '%s' in '%s': 0x%lx\n", name, binary_path, - ret); - } else { - if (ret == 0) { - pr_warn("elf: '%s' is 0 in symtab for '%s': %s\n", name, binary_path, - is_shared_lib ? "should not be 0 in a shared library" : - "try using shared library path instead"); - ret = -ENOENT; - } else { - pr_warn("elf: failed to find symbol '%s' in '%s'\n", name, binary_path); - } - } -out: - return ret; -} - -/* Find offset of function name in ELF object specified by path. "name" matches - * symbol name or name@@LIB for library functions. - */ -static long elf_find_func_offset_from_file(const char *binary_path, const char *name) -{ - char errmsg[STRERR_BUFSIZE]; - long ret = -ENOENT; - Elf *elf; - int fd; - - fd = open(binary_path, O_RDONLY | O_CLOEXEC); - if (fd < 0) { - ret = -errno; - pr_warn("failed to open %s: %s\n", binary_path, - libbpf_strerror_r(ret, errmsg, sizeof(errmsg))); - return ret; - } - elf = elf_begin(fd, ELF_C_READ_MMAP, NULL); - if (!elf) { - pr_warn("elf: could not read elf from %s: %s\n", binary_path, elf_errmsg(-1)); - close(fd); - return -LIBBPF_ERRNO__FORMAT; - } - - ret = elf_find_func_offset(elf, binary_path, name); - elf_end(elf); - close(fd); - return ret; -} - /* Find offset of function name in archive specified by path. Currently * supported are .zip files that do not compress their contents, as used on * Android in the form of APKs, for example. "file_name" is the name of the ELF @@ -11311,6 +11243,120 @@ static int resolve_full_path(const char *file, char *result, size_t result_sz) return -ENOENT; } +struct bpf_link * +bpf_program__attach_uprobe_multi(const struct bpf_program *prog, + pid_t pid, + const char *path, + const char *func_pattern, + const struct bpf_uprobe_multi_opts *opts) +{ + const unsigned long *ref_ctr_offsets = NULL, *offsets = NULL; + LIBBPF_OPTS(bpf_link_create_opts, lopts); + unsigned long *resolved_offsets = NULL; + int err = 0, link_fd, prog_fd; + struct bpf_link *link = NULL; + char errmsg[STRERR_BUFSIZE]; + char full_path[PATH_MAX]; + const __u64 *cookies; + const char **syms; + size_t cnt; + + if (!OPTS_VALID(opts, bpf_uprobe_multi_opts)) + return libbpf_err_ptr(-EINVAL); + + syms = OPTS_GET(opts, syms, NULL); + offsets = OPTS_GET(opts, offsets, NULL); + ref_ctr_offsets = OPTS_GET(opts, ref_ctr_offsets, NULL); + cookies = OPTS_GET(opts, cookies, NULL); + cnt = OPTS_GET(opts, cnt, 0); + + /* + * User can specify 2 mutually exclusive set of inputs: + * + * 1) use only path/func_pattern/pid arguments + * + * 2) use path/pid with allowed combinations of: + * syms/offsets/ref_ctr_offsets/cookies/cnt + * + * - syms and offsets are mutually exclusive + * - ref_ctr_offsets and cookies are optional + * + * Any other usage results in error. + */ + + if (!path) + return libbpf_err_ptr(-EINVAL); + if (!func_pattern && cnt == 0) + return libbpf_err_ptr(-EINVAL); + + if (func_pattern) { + if (syms || offsets || ref_ctr_offsets || cookies || cnt) + return libbpf_err_ptr(-EINVAL); + } else { + if (!!syms == !!offsets) + return libbpf_err_ptr(-EINVAL); + } + + if (func_pattern) { + if (!strchr(path, '/')) { + err = resolve_full_path(path, full_path, sizeof(full_path)); + if (err) { + pr_warn("prog '%s': failed to resolve full path for '%s': %d\n", + prog->name, path, err); + return libbpf_err_ptr(err); + } + path = full_path; + } + + err = elf_resolve_pattern_offsets(path, func_pattern, + &resolved_offsets, &cnt); + if (err < 0) + return libbpf_err_ptr(err); + offsets = resolved_offsets; + } else if (syms) { + err = elf_resolve_syms_offsets(path, cnt, syms, &resolved_offsets); + if (err < 0) + return libbpf_err_ptr(err); + offsets = resolved_offsets; + } + + lopts.uprobe_multi.path = path; + lopts.uprobe_multi.offsets = offsets; + lopts.uprobe_multi.ref_ctr_offsets = ref_ctr_offsets; + lopts.uprobe_multi.cookies = cookies; + lopts.uprobe_multi.cnt = cnt; + lopts.uprobe_multi.flags = OPTS_GET(opts, retprobe, false) ? BPF_F_UPROBE_MULTI_RETURN : 0; + + if (pid == 0) + pid = getpid(); + if (pid > 0) + lopts.uprobe_multi.pid = pid; + + link = calloc(1, sizeof(*link)); + if (!link) { + err = -ENOMEM; + goto error; + } + link->detach = &bpf_link__detach_fd; + + prog_fd = bpf_program__fd(prog); + link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &lopts); + if (link_fd < 0) { + err = -errno; + pr_warn("prog '%s': failed to attach multi-uprobe: %s\n", + prog->name, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + goto error; + } + link->fd = link_fd; + free(resolved_offsets); + return link; + +error: + free(resolved_offsets); + free(link); + return libbpf_err_ptr(err); +} + LIBBPF_API struct bpf_link * bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid, const char *binary_path, size_t func_offset, diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 55b97b208754..0e52621cba43 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -266,6 +266,7 @@ LIBBPF_API int bpf_object__pin_programs(struct bpf_object *obj, LIBBPF_API int bpf_object__unpin_programs(struct bpf_object *obj, const char *path); LIBBPF_API int bpf_object__pin(struct bpf_object *object, const char *path); +LIBBPF_API int bpf_object__unpin(struct bpf_object *object, const char *path); LIBBPF_API const char *bpf_object__name(const struct bpf_object *obj); LIBBPF_API unsigned int bpf_object__kversion(const struct bpf_object *obj); @@ -529,6 +530,57 @@ bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog, const char *pattern, const struct bpf_kprobe_multi_opts *opts); +struct bpf_uprobe_multi_opts { + /* size of this struct, for forward/backward compatibility */ + size_t sz; + /* array of function symbols to attach to */ + const char **syms; + /* array of function addresses to attach to */ + const unsigned long *offsets; + /* optional, array of associated ref counter offsets */ + const unsigned long *ref_ctr_offsets; + /* optional, array of associated BPF cookies */ + const __u64 *cookies; + /* number of elements in syms/addrs/cookies arrays */ + size_t cnt; + /* create return uprobes */ + bool retprobe; + size_t :0; +}; + +#define bpf_uprobe_multi_opts__last_field retprobe + +/** + * @brief **bpf_program__attach_uprobe_multi()** attaches a BPF program + * to multiple uprobes with uprobe_multi link. + * + * User can specify 2 mutually exclusive set of inputs: + * + * 1) use only path/func_pattern/pid arguments + * + * 2) use path/pid with allowed combinations of + * syms/offsets/ref_ctr_offsets/cookies/cnt + * + * - syms and offsets are mutually exclusive + * - ref_ctr_offsets and cookies are optional + * + * + * @param prog BPF program to attach + * @param pid Process ID to attach the uprobe to, 0 for self (own process), + * -1 for all processes + * @param binary_path Path to binary + * @param func_pattern Regular expression to specify functions to attach + * BPF program to + * @param opts Additional options (see **struct bpf_uprobe_multi_opts**) + * @return 0, on success; negative error code, otherwise + */ +LIBBPF_API struct bpf_link * +bpf_program__attach_uprobe_multi(const struct bpf_program *prog, + pid_t pid, + const char *binary_path, + const char *func_pattern, + const struct bpf_uprobe_multi_opts *opts); + struct bpf_ksyscall_opts { /* size of this struct, for forward/backward compatibility */ size_t sz; diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 9c7538dd5835..57712321490f 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -395,7 +395,9 @@ LIBBPF_1.2.0 { LIBBPF_1.3.0 { global: bpf_obj_pin_opts; + bpf_object__unpin; bpf_prog_detach_opts; bpf_program__attach_netfilter; bpf_program__attach_tcx; + bpf_program__attach_uprobe_multi; } LIBBPF_1.2.0; diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index e4d05662a96c..f0f08635adb0 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -15,6 +15,7 @@ #include <linux/err.h> #include <fcntl.h> #include <unistd.h> +#include <libelf.h> #include "relo_core.h" /* make sure libbpf doesn't use kernel-only integer typedefs */ @@ -354,6 +355,8 @@ enum kern_feature_id { FEAT_BTF_ENUM64, /* Kernel uses syscall wrapper (CONFIG_ARCH_HAS_SYSCALL_WRAPPER) */ FEAT_SYSCALL_WRAPPER, + /* BPF multi-uprobe link support */ + FEAT_UPROBE_MULTI_LINK, __FEAT_CNT, }; @@ -577,4 +580,22 @@ static inline bool is_pow_of_2(size_t x) #define PROG_LOAD_ATTEMPTS 5 int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts); +bool glob_match(const char *str, const char *pat); + +long elf_find_func_offset(Elf *elf, const char *binary_path, const char *name); +long elf_find_func_offset_from_file(const char *binary_path, const char *name); + +struct elf_fd { + Elf *elf; + int fd; +}; + +int elf_open(const char *binary_path, struct elf_fd *elf_fd); +void elf_close(struct elf_fd *elf_fd); + +int elf_resolve_syms_offsets(const char *binary_path, int cnt, + const char **syms, unsigned long **poffsets); +int elf_resolve_pattern_offsets(const char *binary_path, const char *pattern, + unsigned long **poffsets, size_t *pcnt); + #endif /* __LIBBPF_LIBBPF_INTERNAL_H */ diff --git a/tools/lib/bpf/relo_core.c b/tools/lib/bpf/relo_core.c index a26b2f5fa0fc..63a4d5ad12d1 100644 --- a/tools/lib/bpf/relo_core.c +++ b/tools/lib/bpf/relo_core.c @@ -776,7 +776,7 @@ static int bpf_core_calc_field_relo(const char *prog_name, break; case BPF_CORE_FIELD_SIGNED: *val = (btf_is_any_enum(mt) && BTF_INFO_KFLAG(mt->info)) || - (btf_int_encoding(mt) & BTF_INT_SIGNED); + (btf_is_int(mt) && (btf_int_encoding(mt) & BTF_INT_SIGNED)); if (validate) *validate = true; /* signedness is never ambiguous */ break; diff --git a/tools/lib/bpf/usdt.c b/tools/lib/bpf/usdt.c index 37455d00b239..93794f01bb67 100644 --- a/tools/lib/bpf/usdt.c +++ b/tools/lib/bpf/usdt.c @@ -250,6 +250,7 @@ struct usdt_manager { bool has_bpf_cookie; bool has_sema_refcnt; + bool has_uprobe_multi; }; struct usdt_manager *usdt_manager_new(struct bpf_object *obj) @@ -284,6 +285,11 @@ struct usdt_manager *usdt_manager_new(struct bpf_object *obj) */ man->has_sema_refcnt = faccessat(AT_FDCWD, ref_ctr_sysfs_path, F_OK, AT_EACCESS) == 0; + /* + * Detect kernel support for uprobe multi link to be used for attaching + * usdt probes. + */ + man->has_uprobe_multi = kernel_supports(obj, FEAT_UPROBE_MULTI_LINK); return man; } @@ -808,6 +814,8 @@ struct bpf_link_usdt { long abs_ip; struct bpf_link *link; } *uprobes; + + struct bpf_link *multi_link; }; static int bpf_link_usdt_detach(struct bpf_link *link) @@ -816,6 +824,9 @@ static int bpf_link_usdt_detach(struct bpf_link *link) struct usdt_manager *man = usdt_link->usdt_man; int i; + bpf_link__destroy(usdt_link->multi_link); + + /* When having multi_link, uprobe_cnt is 0 */ for (i = 0; i < usdt_link->uprobe_cnt; i++) { /* detach underlying uprobe link */ bpf_link__destroy(usdt_link->uprobes[i].link); @@ -946,32 +957,24 @@ struct bpf_link *usdt_manager_attach_usdt(struct usdt_manager *man, const struct const char *usdt_provider, const char *usdt_name, __u64 usdt_cookie) { - int i, fd, err, spec_map_fd, ip_map_fd; + unsigned long *offsets = NULL, *ref_ctr_offsets = NULL; + int i, err, spec_map_fd, ip_map_fd; LIBBPF_OPTS(bpf_uprobe_opts, opts); struct hashmap *specs_hash = NULL; struct bpf_link_usdt *link = NULL; struct usdt_target *targets = NULL; + __u64 *cookies = NULL; + struct elf_fd elf_fd; size_t target_cnt; - Elf *elf; spec_map_fd = bpf_map__fd(man->specs_map); ip_map_fd = bpf_map__fd(man->ip_to_spec_id_map); - fd = open(path, O_RDONLY | O_CLOEXEC); - if (fd < 0) { - err = -errno; - pr_warn("usdt: failed to open ELF binary '%s': %d\n", path, err); + err = elf_open(path, &elf_fd); + if (err) return libbpf_err_ptr(err); - } - - elf = elf_begin(fd, ELF_C_READ_MMAP, NULL); - if (!elf) { - err = -EBADF; - pr_warn("usdt: failed to parse ELF binary '%s': %s\n", path, elf_errmsg(-1)); - goto err_out; - } - err = sanity_check_usdt_elf(elf, path); + err = sanity_check_usdt_elf(elf_fd.elf, path); if (err) goto err_out; @@ -984,7 +987,7 @@ struct bpf_link *usdt_manager_attach_usdt(struct usdt_manager *man, const struct /* discover USDT in given binary, optionally limiting * activations to a given PID, if pid > 0 */ - err = collect_usdt_targets(man, elf, path, pid, usdt_provider, usdt_name, + err = collect_usdt_targets(man, elf_fd.elf, path, pid, usdt_provider, usdt_name, usdt_cookie, &targets, &target_cnt); if (err <= 0) { err = (err == 0) ? -ENOENT : err; @@ -1007,10 +1010,21 @@ struct bpf_link *usdt_manager_attach_usdt(struct usdt_manager *man, const struct link->link.detach = &bpf_link_usdt_detach; link->link.dealloc = &bpf_link_usdt_dealloc; - link->uprobes = calloc(target_cnt, sizeof(*link->uprobes)); - if (!link->uprobes) { - err = -ENOMEM; - goto err_out; + if (man->has_uprobe_multi) { + offsets = calloc(target_cnt, sizeof(*offsets)); + cookies = calloc(target_cnt, sizeof(*cookies)); + ref_ctr_offsets = calloc(target_cnt, sizeof(*ref_ctr_offsets)); + + if (!offsets || !ref_ctr_offsets || !cookies) { + err = -ENOMEM; + goto err_out; + } + } else { + link->uprobes = calloc(target_cnt, sizeof(*link->uprobes)); + if (!link->uprobes) { + err = -ENOMEM; + goto err_out; + } } for (i = 0; i < target_cnt; i++) { @@ -1051,37 +1065,65 @@ struct bpf_link *usdt_manager_attach_usdt(struct usdt_manager *man, const struct goto err_out; } - opts.ref_ctr_offset = target->sema_off; - opts.bpf_cookie = man->has_bpf_cookie ? spec_id : 0; - uprobe_link = bpf_program__attach_uprobe_opts(prog, pid, path, - target->rel_ip, &opts); - err = libbpf_get_error(uprobe_link); - if (err) { - pr_warn("usdt: failed to attach uprobe #%d for '%s:%s' in '%s': %d\n", - i, usdt_provider, usdt_name, path, err); + if (man->has_uprobe_multi) { + offsets[i] = target->rel_ip; + ref_ctr_offsets[i] = target->sema_off; + cookies[i] = spec_id; + } else { + opts.ref_ctr_offset = target->sema_off; + opts.bpf_cookie = man->has_bpf_cookie ? spec_id : 0; + uprobe_link = bpf_program__attach_uprobe_opts(prog, pid, path, + target->rel_ip, &opts); + err = libbpf_get_error(uprobe_link); + if (err) { + pr_warn("usdt: failed to attach uprobe #%d for '%s:%s' in '%s': %d\n", + i, usdt_provider, usdt_name, path, err); + goto err_out; + } + + link->uprobes[i].link = uprobe_link; + link->uprobes[i].abs_ip = target->abs_ip; + link->uprobe_cnt++; + } + } + + if (man->has_uprobe_multi) { + LIBBPF_OPTS(bpf_uprobe_multi_opts, opts_multi, + .ref_ctr_offsets = ref_ctr_offsets, + .offsets = offsets, + .cookies = cookies, + .cnt = target_cnt, + ); + + link->multi_link = bpf_program__attach_uprobe_multi(prog, pid, path, + NULL, &opts_multi); + if (!link->multi_link) { + err = -errno; + pr_warn("usdt: failed to attach uprobe multi for '%s:%s' in '%s': %d\n", + usdt_provider, usdt_name, path, err); goto err_out; } - link->uprobes[i].link = uprobe_link; - link->uprobes[i].abs_ip = target->abs_ip; - link->uprobe_cnt++; + free(offsets); + free(ref_ctr_offsets); + free(cookies); } free(targets); hashmap__free(specs_hash); - elf_end(elf); - close(fd); - + elf_close(&elf_fd); return &link->link; err_out: + free(offsets); + free(ref_ctr_offsets); + free(cookies); + if (link) bpf_link__destroy(&link->link); free(targets); hashmap__free(specs_hash); - if (elf) - elf_end(elf); - close(fd); + elf_close(&elf_fd); return libbpf_err_ptr(err); } diff --git a/tools/net/ynl/generated/ethtool-user.h b/tools/net/ynl/generated/ethtool-user.h index d7d4ba855f43..ddc1a5209992 100644 --- a/tools/net/ynl/generated/ethtool-user.h +++ b/tools/net/ynl/generated/ethtool-user.h @@ -1422,6 +1422,7 @@ ethtool_wol_set_req_set_sopass(struct ethtool_wol_set_req *req, const void *sopass, size_t len) { free(req->sopass); + req->_present.sopass_len = len; req->sopass = malloc(req->_present.sopass_len); memcpy(req->sopass, sopass, req->_present.sopass_len); } @@ -4071,6 +4072,7 @@ ethtool_fec_set_req_set_stats_corrected(struct ethtool_fec_set_req *req, const void *corrected, size_t len) { free(req->stats.corrected); + req->stats._present.corrected_len = len; req->stats.corrected = malloc(req->stats._present.corrected_len); memcpy(req->stats.corrected, corrected, req->stats._present.corrected_len); } @@ -4079,6 +4081,7 @@ ethtool_fec_set_req_set_stats_uncorr(struct ethtool_fec_set_req *req, const void *uncorr, size_t len) { free(req->stats.uncorr); + req->stats._present.uncorr_len = len; req->stats.uncorr = malloc(req->stats._present.uncorr_len); memcpy(req->stats.uncorr, uncorr, req->stats._present.uncorr_len); } @@ -4087,6 +4090,7 @@ ethtool_fec_set_req_set_stats_corr_bits(struct ethtool_fec_set_req *req, const void *corr_bits, size_t len) { free(req->stats.corr_bits); + req->stats._present.corr_bits_len = len; req->stats.corr_bits = malloc(req->stats._present.corr_bits_len); memcpy(req->stats.corr_bits, corr_bits, req->stats._present.corr_bits_len); } diff --git a/tools/net/ynl/generated/fou-user.h b/tools/net/ynl/generated/fou-user.h index d8ab50579cd1..a8f860892540 100644 --- a/tools/net/ynl/generated/fou-user.h +++ b/tools/net/ynl/generated/fou-user.h @@ -91,6 +91,7 @@ fou_add_req_set_local_v6(struct fou_add_req *req, const void *local_v6, size_t len) { free(req->local_v6); + req->_present.local_v6_len = len; req->local_v6 = malloc(req->_present.local_v6_len); memcpy(req->local_v6, local_v6, req->_present.local_v6_len); } @@ -99,6 +100,7 @@ fou_add_req_set_peer_v6(struct fou_add_req *req, const void *peer_v6, size_t len) { free(req->peer_v6); + req->_present.peer_v6_len = len; req->peer_v6 = malloc(req->_present.peer_v6_len); memcpy(req->peer_v6, peer_v6, req->_present.peer_v6_len); } @@ -192,6 +194,7 @@ fou_del_req_set_local_v6(struct fou_del_req *req, const void *local_v6, size_t len) { free(req->local_v6); + req->_present.local_v6_len = len; req->local_v6 = malloc(req->_present.local_v6_len); memcpy(req->local_v6, local_v6, req->_present.local_v6_len); } @@ -200,6 +203,7 @@ fou_del_req_set_peer_v6(struct fou_del_req *req, const void *peer_v6, size_t len) { free(req->peer_v6); + req->_present.peer_v6_len = len; req->peer_v6 = malloc(req->_present.peer_v6_len); memcpy(req->peer_v6, peer_v6, req->_present.peer_v6_len); } @@ -280,6 +284,7 @@ fou_get_req_set_local_v6(struct fou_get_req *req, const void *local_v6, size_t len) { free(req->local_v6); + req->_present.local_v6_len = len; req->local_v6 = malloc(req->_present.local_v6_len); memcpy(req->local_v6, local_v6, req->_present.local_v6_len); } @@ -288,6 +293,7 @@ fou_get_req_set_peer_v6(struct fou_get_req *req, const void *peer_v6, size_t len) { free(req->peer_v6); + req->_present.peer_v6_len = len; req->peer_v6 = malloc(req->_present.peer_v6_len); memcpy(req->peer_v6, peer_v6, req->_present.peer_v6_len); } diff --git a/tools/net/ynl/lib/ynl.py b/tools/net/ynl/lib/ynl.py index 6951bcc7efdc..fa4f1c28efc5 100644 --- a/tools/net/ynl/lib/ynl.py +++ b/tools/net/ynl/lib/ynl.py @@ -410,7 +410,12 @@ class YnlFamily(SpecFamily): elif attr["type"] == 'string': attr_payload = str(value).encode('ascii') + b'\x00' elif attr["type"] == 'binary': - attr_payload = bytes.fromhex(value) + if isinstance(value, bytes): + attr_payload = value + elif isinstance(value, str): + attr_payload = bytes.fromhex(value) + else: + raise Exception(f'Unknown type for binary attribute, value: {value}') elif attr['type'] in NlAttr.type_formats: format = NlAttr.get_format(attr['type'], attr.byte_order) attr_payload = format.pack(int(value)) diff --git a/tools/net/ynl/ynl-gen-c.py b/tools/net/ynl/ynl-gen-c.py index bdff8dfc29c9..897af958cee8 100755 --- a/tools/net/ynl/ynl-gen-c.py +++ b/tools/net/ynl/ynl-gen-c.py @@ -428,6 +428,7 @@ class TypeBinary(Type): def _setter_lines(self, ri, member, presence): return [f"free({member});", + f"{presence}_len = len;", f"{member} = malloc({presence}_len);", f'memcpy({member}, {self.c_name}, {presence}_len);'] @@ -614,7 +615,7 @@ class Struct: self.attr_list = [] self.attrs = dict() - if type_list: + if type_list is not None: for t in type_list: self.attr_list.append((t, self.attr_set[t]),) else: @@ -977,7 +978,9 @@ class Family(SpecFamily): for op_mode in ['do', 'dump']: if op_mode in op: - global_set.update(op[op_mode].get('request', [])) + req = op[op_mode].get('request') + if req: + global_set.update(req.get('attributes', [])) self.global_policy = [] self.global_policy_set = attr_set_name @@ -1042,14 +1045,30 @@ class RenderInfo: class CodeWriter: - def __init__(self, nlib, out_file): + def __init__(self, nlib, out_file=None): self.nlib = nlib self._nl = False self._block_end = False self._silent_block = False self._ind = 0 - self._out = out_file + if out_file is None: + self._out = os.sys.stdout + else: + self._out = tempfile.TemporaryFile('w+') + self._out_file = out_file + + def __del__(self): + self.close_out_file() + + def close_out_file(self): + if self._out == os.sys.stdout: + return + with open(self._out_file, 'w+') as out_file: + self._out.seek(0) + shutil.copyfileobj(self._out, out_file) + self._out.close() + self._out = os.sys.stdout @classmethod def _is_cond(cls, line): @@ -1540,7 +1559,14 @@ def parse_rsp_msg(ri, deref=False): ri.cw.write_func_prot('int', f'{op_prefix(ri, "reply", deref=deref)}_parse', func_args) - _multi_parse(ri, ri.struct["reply"], init_lines, local_vars) + if ri.struct["reply"].member_list(): + _multi_parse(ri, ri.struct["reply"], init_lines, local_vars) + else: + # Empty reply + ri.cw.block_start() + ri.cw.p('return MNL_CB_OK;') + ri.cw.block_end() + ri.cw.nl() def print_req(ri): @@ -2303,11 +2329,9 @@ def main(): parser.add_argument('--source', dest='header', action='store_false') parser.add_argument('--user-header', nargs='+', default=[]) parser.add_argument('--exclude-op', action='append', default=[]) - parser.add_argument('-o', dest='out_file', type=str) + parser.add_argument('-o', dest='out_file', type=str, default=None) args = parser.parse_args() - tmp_file = tempfile.TemporaryFile('w+') if args.out_file else os.sys.stdout - if args.header is None: parser.error("--header or --source is required") @@ -2331,7 +2355,7 @@ def main(): print(f'Message enum-model {parsed.msg_id_model} not supported for {args.mode} generation') os.sys.exit(1) - cw = CodeWriter(BaseNlLib(), tmp_file) + cw = CodeWriter(BaseNlLib(), args.out_file) _, spec_kernel = find_kernel_root(args.spec) if args.mode == 'uapi' or args.header: @@ -2580,10 +2604,6 @@ def main(): if args.header: cw.p(f'#endif /* {hdr_prot} */') - if args.out_file: - out_file = open(args.out_file, 'w+') - tmp_file.seek(0) - shutil.copyfileobj(tmp_file, out_file) if __name__ == "__main__": main() diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore index 110518ba4804..f1aebabfb017 100644 --- a/tools/testing/selftests/bpf/.gitignore +++ b/tools/testing/selftests/bpf/.gitignore @@ -44,6 +44,7 @@ test_cpp /bench /veristat /sign-file +/uprobe_multi *.ko *.tmp xskxceiver diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index e4e1e6492268..edef49fcd23e 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -585,6 +585,7 @@ TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read $(OUTPUT)/bpf_testmod.ko \ $(OUTPUT)/liburandom_read.so \ $(OUTPUT)/xdp_synproxy \ $(OUTPUT)/sign-file \ + $(OUTPUT)/uprobe_multi \ ima_setup.sh \ verify_sig_setup.sh \ $(wildcard progs/btf_dump_test_case_*.c) \ @@ -698,6 +699,10 @@ $(OUTPUT)/veristat: $(OUTPUT)/veristat.o $(call msg,BINARY,,$@) $(Q)$(CC) $(CFLAGS) $(LDFLAGS) $(filter %.a %.o,$^) $(LDLIBS) -o $@ +$(OUTPUT)/uprobe_multi: uprobe_multi.c + $(call msg,BINARY,,$@) + $(Q)$(CC) $(CFLAGS) $(LDFLAGS) $^ $(LDLIBS) -o $@ + EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) \ prog_tests/tests.h map_tests/tests.h verifier/tests.h \ feature bpftool \ diff --git a/tools/testing/selftests/bpf/bench.h b/tools/testing/selftests/bpf/bench.h index 7ff32be3d730..68180d8f8558 100644 --- a/tools/testing/selftests/bpf/bench.h +++ b/tools/testing/selftests/bpf/bench.h @@ -81,15 +81,6 @@ void grace_period_latency_basic_stats(struct bench_res res[], int res_cnt, void grace_period_ticks_basic_stats(struct bench_res res[], int res_cnt, struct basic_stats *gp_stat); -static inline __u64 get_time_ns(void) -{ - struct timespec t; - - clock_gettime(CLOCK_MONOTONIC, &t); - - return (u64)t.tv_sec * 1000000000 + t.tv_nsec; -} - static inline void atomic_inc(long *value) { (void)__atomic_add_fetch(value, 1, __ATOMIC_RELAXED); diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config index 3b350bc31343..1c7584e8dd9e 100644 --- a/tools/testing/selftests/bpf/config +++ b/tools/testing/selftests/bpf/config @@ -16,6 +16,7 @@ CONFIG_CRYPTO_USER_API_HASH=y CONFIG_DEBUG_INFO=y CONFIG_DEBUG_INFO_BTF=y CONFIG_DEBUG_INFO_DWARF4=y +CONFIG_DUMMY=y CONFIG_DYNAMIC_FTRACE=y CONFIG_FPROBE=y CONFIG_FTRACE_SYSCALLS=y @@ -59,6 +60,7 @@ CONFIG_NET_IPGRE=y CONFIG_NET_IPGRE_DEMUX=y CONFIG_NET_IPIP=y CONFIG_NET_MPLS_GSO=y +CONFIG_NET_SCH_FQ=y CONFIG_NET_SCH_INGRESS=y CONFIG_NET_SCHED=y CONFIG_NETDEVSIM=y diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c index 26b2d1bffdfd..1454cebc262b 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c @@ -11,6 +11,7 @@ #include <bpf/btf.h> #include "test_bpf_cookie.skel.h" #include "kprobe_multi.skel.h" +#include "uprobe_multi.skel.h" /* uprobe attach point */ static noinline void trigger_func(void) @@ -239,6 +240,81 @@ cleanup: bpf_link__destroy(link1); kprobe_multi__destroy(skel); } + +/* defined in prog_tests/uprobe_multi_test.c */ +void uprobe_multi_func_1(void); +void uprobe_multi_func_2(void); +void uprobe_multi_func_3(void); + +static void uprobe_multi_test_run(struct uprobe_multi *skel) +{ + skel->bss->uprobe_multi_func_1_addr = (__u64) uprobe_multi_func_1; + skel->bss->uprobe_multi_func_2_addr = (__u64) uprobe_multi_func_2; + skel->bss->uprobe_multi_func_3_addr = (__u64) uprobe_multi_func_3; + + skel->bss->pid = getpid(); + skel->bss->test_cookie = true; + + uprobe_multi_func_1(); + uprobe_multi_func_2(); + uprobe_multi_func_3(); + + ASSERT_EQ(skel->bss->uprobe_multi_func_1_result, 1, "uprobe_multi_func_1_result"); + ASSERT_EQ(skel->bss->uprobe_multi_func_2_result, 1, "uprobe_multi_func_2_result"); + ASSERT_EQ(skel->bss->uprobe_multi_func_3_result, 1, "uprobe_multi_func_3_result"); + + ASSERT_EQ(skel->bss->uretprobe_multi_func_1_result, 1, "uretprobe_multi_func_1_result"); + ASSERT_EQ(skel->bss->uretprobe_multi_func_2_result, 1, "uretprobe_multi_func_2_result"); + ASSERT_EQ(skel->bss->uretprobe_multi_func_3_result, 1, "uretprobe_multi_func_3_result"); +} + +static void uprobe_multi_attach_api_subtest(void) +{ + struct bpf_link *link1 = NULL, *link2 = NULL; + struct uprobe_multi *skel = NULL; + LIBBPF_OPTS(bpf_uprobe_multi_opts, opts); + const char *syms[3] = { + "uprobe_multi_func_1", + "uprobe_multi_func_2", + "uprobe_multi_func_3", + }; + __u64 cookies[3]; + + cookies[0] = 3; /* uprobe_multi_func_1 */ + cookies[1] = 1; /* uprobe_multi_func_2 */ + cookies[2] = 2; /* uprobe_multi_func_3 */ + + opts.syms = syms; + opts.cnt = ARRAY_SIZE(syms); + opts.cookies = &cookies[0]; + + skel = uprobe_multi__open_and_load(); + if (!ASSERT_OK_PTR(skel, "uprobe_multi")) + goto cleanup; + + link1 = bpf_program__attach_uprobe_multi(skel->progs.uprobe, -1, + "/proc/self/exe", NULL, &opts); + if (!ASSERT_OK_PTR(link1, "bpf_program__attach_uprobe_multi")) + goto cleanup; + + cookies[0] = 2; /* uprobe_multi_func_1 */ + cookies[1] = 3; /* uprobe_multi_func_2 */ + cookies[2] = 1; /* uprobe_multi_func_3 */ + + opts.retprobe = true; + link2 = bpf_program__attach_uprobe_multi(skel->progs.uretprobe, -1, + "/proc/self/exe", NULL, &opts); + if (!ASSERT_OK_PTR(link2, "bpf_program__attach_uprobe_multi_retprobe")) + goto cleanup; + + uprobe_multi_test_run(skel); + +cleanup: + bpf_link__destroy(link2); + bpf_link__destroy(link1); + uprobe_multi__destroy(skel); +} + static void uprobe_subtest(struct test_bpf_cookie *skel) { DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts); @@ -515,6 +591,8 @@ void test_bpf_cookie(void) kprobe_multi_attach_api_subtest(); if (test__start_subtest("uprobe")) uprobe_subtest(skel); + if (test__start_subtest("multi_uprobe_attach_api")) + uprobe_multi_attach_api_subtest(); if (test__start_subtest("tracepoint")) tp_subtest(skel); if (test__start_subtest("perf_event")) diff --git a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c index 2173c4bb555e..179fe300534f 100644 --- a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c +++ b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c @@ -304,14 +304,6 @@ cleanup: kprobe_multi__destroy(skel); } -static inline __u64 get_time_ns(void) -{ - struct timespec t; - - clock_gettime(CLOCK_MONOTONIC, &t); - return (__u64) t.tv_sec * 1000000000 + t.tv_nsec; -} - static size_t symbol_hash(long key, void *ctx __maybe_unused) { return str_hash((const char *) key); diff --git a/tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c b/tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c index 76f1da877f81..b25b870f87ba 100644 --- a/tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c +++ b/tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c @@ -5,6 +5,7 @@ #include <network_helpers.h> #include "local_kptr_stash.skel.h" +#include "local_kptr_stash_fail.skel.h" static void test_local_kptr_stash_simple(void) { LIBBPF_OPTS(bpf_test_run_opts, opts, @@ -26,6 +27,27 @@ static void test_local_kptr_stash_simple(void) local_kptr_stash__destroy(skel); } +static void test_local_kptr_stash_plain(void) +{ + LIBBPF_OPTS(bpf_test_run_opts, opts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + struct local_kptr_stash *skel; + int ret; + + skel = local_kptr_stash__open_and_load(); + if (!ASSERT_OK_PTR(skel, "local_kptr_stash__open_and_load")) + return; + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.stash_plain), &opts); + ASSERT_OK(ret, "local_kptr_stash_add_plain run"); + ASSERT_OK(opts.retval, "local_kptr_stash_add_plain retval"); + + local_kptr_stash__destroy(skel); +} + static void test_local_kptr_stash_unstash(void) { LIBBPF_OPTS(bpf_test_run_opts, opts, @@ -51,10 +73,19 @@ static void test_local_kptr_stash_unstash(void) local_kptr_stash__destroy(skel); } -void test_local_kptr_stash_success(void) +static void test_local_kptr_stash_fail(void) +{ + RUN_TESTS(local_kptr_stash_fail); +} + +void test_local_kptr_stash(void) { if (test__start_subtest("local_kptr_stash_simple")) test_local_kptr_stash_simple(); + if (test__start_subtest("local_kptr_stash_plain")) + test_local_kptr_stash_plain(); if (test__start_subtest("local_kptr_stash_unstash")) test_local_kptr_stash_unstash(); + if (test__start_subtest("local_kptr_stash_fail")) + test_local_kptr_stash_fail(); } diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h b/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h new file mode 100644 index 000000000000..61333f2a03f9 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __LWT_HELPERS_H +#define __LWT_HELPERS_H + +#include <time.h> +#include <net/if.h> +#include <linux/if_tun.h> +#include <linux/icmp.h> + +#include "test_progs.h" + +#define log_err(MSG, ...) \ + fprintf(stderr, "(%s:%d: errno: %s) " MSG "\n", \ + __FILE__, __LINE__, strerror(errno), ##__VA_ARGS__) + +#define RUN_TEST(name) \ + ({ \ + if (test__start_subtest(#name)) \ + if (ASSERT_OK(netns_create(), "netns_create")) { \ + struct nstoken *token = open_netns(NETNS); \ + if (ASSERT_OK_PTR(token, "setns")) { \ + test_ ## name(); \ + close_netns(token); \ + } \ + netns_delete(); \ + } \ + }) + +#define NETNS "ns_lwt" + +static inline int netns_create(void) +{ + return system("ip netns add " NETNS); +} + +static inline int netns_delete(void) +{ + return system("ip netns del " NETNS ">/dev/null 2>&1"); +} + +static int open_tuntap(const char *dev_name, bool need_mac) +{ + int err = 0; + struct ifreq ifr; + int fd = open("/dev/net/tun", O_RDWR); + + if (!ASSERT_GT(fd, 0, "open(/dev/net/tun)")) + return -1; + + ifr.ifr_flags = IFF_NO_PI | (need_mac ? IFF_TAP : IFF_TUN); + memcpy(ifr.ifr_name, dev_name, IFNAMSIZ); + + err = ioctl(fd, TUNSETIFF, &ifr); + if (!ASSERT_OK(err, "ioctl(TUNSETIFF)")) { + close(fd); + return -1; + } + + err = fcntl(fd, F_SETFL, O_NONBLOCK); + if (!ASSERT_OK(err, "fcntl(O_NONBLOCK)")) { + close(fd); + return -1; + } + + return fd; +} + +#define ICMP_PAYLOAD_SIZE 100 + +/* Match an ICMP packet with payload len ICMP_PAYLOAD_SIZE */ +static int __expect_icmp_ipv4(char *buf, ssize_t len) +{ + struct iphdr *ip = (struct iphdr *)buf; + struct icmphdr *icmp = (struct icmphdr *)(ip + 1); + ssize_t min_header_len = sizeof(*ip) + sizeof(*icmp); + + if (len < min_header_len) + return -1; + + if (ip->protocol != IPPROTO_ICMP) + return -1; + + if (icmp->type != ICMP_ECHO) + return -1; + + return len == ICMP_PAYLOAD_SIZE + min_header_len; +} + +typedef int (*filter_t) (char *, ssize_t); + +/* wait_for_packet - wait for a packet that matches the filter + * + * @fd: tun fd/packet socket to read packet + * @filter: filter function, returning 1 if matches + * @timeout: timeout to wait for the packet + * + * Returns 1 if a matching packet is read, 0 if timeout expired, -1 on error. + */ +static int wait_for_packet(int fd, filter_t filter, struct timeval *timeout) +{ + char buf[4096]; + int max_retry = 5; /* in case we read some spurious packets */ + fd_set fds; + + FD_ZERO(&fds); + while (max_retry--) { + /* Linux modifies timeout arg... So make a copy */ + struct timeval copied_timeout = *timeout; + ssize_t ret = -1; + + FD_SET(fd, &fds); + + ret = select(1 + fd, &fds, NULL, NULL, &copied_timeout); + if (ret <= 0) { + if (errno == EINTR) + continue; + else if (errno == EAGAIN || ret == 0) + return 0; + + log_err("select failed"); + return -1; + } + + ret = read(fd, buf, sizeof(buf)); + + if (ret <= 0) { + log_err("read(dev): %ld", ret); + return -1; + } + + if (filter && filter(buf, ret) > 0) + return 1; + } + + return 0; +} + +#endif /* __LWT_HELPERS_H */ diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c b/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c new file mode 100644 index 000000000000..59b38569f310 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c @@ -0,0 +1,330 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause + +/* + * Test suite of lwt_xmit BPF programs that redirect packets + * The file tests focus not only if these programs work as expected normally, + * but also if they can handle abnormal situations gracefully. + * + * WARNING + * ------- + * This test suite may crash the kernel, thus should be run in a VM. + * + * Setup: + * --------- + * All tests are performed in a single netns. Two lwt encap routes are setup for + * each subtest: + * + * ip route add 10.0.0.0/24 encap bpf xmit <obj> sec "<ingress_sec>" dev link_err + * ip route add 20.0.0.0/24 encap bpf xmit <obj> sec "<egress_sec>" dev link_err + * + * Here <obj> is statically defined to test_lwt_redirect.bpf.o, and each section + * of this object holds a program entry to test. The BPF object is built from + * progs/test_lwt_redirect.c. We didn't use generated BPF skeleton since the + * attachment for lwt programs are not supported by libbpf yet. + * + * For testing, ping commands are run in the test netns: + * + * ping 10.0.0.<ifindex> -c 1 -w 1 -s 100 + * ping 20.0.0.<ifindex> -c 1 -w 1 -s 100 + * + * Scenarios: + * -------------------------------- + * 1. Redirect to a running tap/tun device + * 2. Redirect to a down tap/tun device + * 3. Redirect to a vlan device with lower layer down + * + * Case 1, ping packets should be received by packet socket on target device + * when redirected to ingress, and by tun/tap fd when redirected to egress. + * + * Case 2,3 are considered successful as long as they do not crash the kernel + * as a regression. + * + * Case 1,2 use tap device to test redirect to device that requires MAC + * header, and tun device to test the case with no MAC header added. + */ +#include <sys/socket.h> +#include <net/if.h> +#include <linux/if_ether.h> +#include <linux/if_packet.h> +#include <linux/if_tun.h> +#include <linux/icmp.h> +#include <arpa/inet.h> +#include <unistd.h> +#include <errno.h> +#include <stdbool.h> +#include <stdlib.h> + +#include "lwt_helpers.h" +#include "test_progs.h" +#include "network_helpers.h" + +#define BPF_OBJECT "test_lwt_redirect.bpf.o" +#define INGRESS_SEC(need_mac) ((need_mac) ? "redir_ingress" : "redir_ingress_nomac") +#define EGRESS_SEC(need_mac) ((need_mac) ? "redir_egress" : "redir_egress_nomac") +#define LOCAL_SRC "10.0.0.1" +#define CIDR_TO_INGRESS "10.0.0.0/24" +#define CIDR_TO_EGRESS "20.0.0.0/24" + +/* ping to redirect toward given dev, with last byte of dest IP being the target + * device index. + * + * Note: ping command inside BPF-CI is busybox version, so it does not have certain + * function, such like -m option to set packet mark. + */ +static void ping_dev(const char *dev, bool is_ingress) +{ + int link_index = if_nametoindex(dev); + char ip[256]; + + if (!ASSERT_GE(link_index, 0, "if_nametoindex")) + return; + + if (is_ingress) + snprintf(ip, sizeof(ip), "10.0.0.%d", link_index); + else + snprintf(ip, sizeof(ip), "20.0.0.%d", link_index); + + /* We won't get a reply. Don't fail here */ + SYS_NOFAIL("ping %s -c1 -W1 -s %d >/dev/null 2>&1", + ip, ICMP_PAYLOAD_SIZE); +} + +static int new_packet_sock(const char *ifname) +{ + int err = 0; + int ignore_outgoing = 1; + int ifindex = -1; + int s = -1; + + s = socket(AF_PACKET, SOCK_RAW, 0); + if (!ASSERT_GE(s, 0, "socket(AF_PACKET)")) + return -1; + + ifindex = if_nametoindex(ifname); + if (!ASSERT_GE(ifindex, 0, "if_nametoindex")) { + close(s); + return -1; + } + + struct sockaddr_ll addr = { + .sll_family = AF_PACKET, + .sll_protocol = htons(ETH_P_IP), + .sll_ifindex = ifindex, + }; + + err = bind(s, (struct sockaddr *)&addr, sizeof(addr)); + if (!ASSERT_OK(err, "bind(AF_PACKET)")) { + close(s); + return -1; + } + + /* Use packet socket to capture only the ingress, so we can distinguish + * the case where a regression that actually redirects the packet to + * the egress. + */ + err = setsockopt(s, SOL_PACKET, PACKET_IGNORE_OUTGOING, + &ignore_outgoing, sizeof(ignore_outgoing)); + if (!ASSERT_OK(err, "setsockopt(PACKET_IGNORE_OUTGOING)")) { + close(s); + return -1; + } + + err = fcntl(s, F_SETFL, O_NONBLOCK); + if (!ASSERT_OK(err, "fcntl(O_NONBLOCK)")) { + close(s); + return -1; + } + + return s; +} + +static int expect_icmp(char *buf, ssize_t len) +{ + struct ethhdr *eth = (struct ethhdr *)buf; + + if (len < (ssize_t)sizeof(*eth)) + return -1; + + if (eth->h_proto == htons(ETH_P_IP)) + return __expect_icmp_ipv4((char *)(eth + 1), len - sizeof(*eth)); + + return -1; +} + +static int expect_icmp_nomac(char *buf, ssize_t len) +{ + return __expect_icmp_ipv4(buf, len); +} + +static void send_and_capture_test_packets(const char *test_name, int tap_fd, + const char *target_dev, bool need_mac) +{ + int psock = -1; + struct timeval timeo = { + .tv_sec = 0, + .tv_usec = 250000, + }; + int ret = -1; + + filter_t filter = need_mac ? expect_icmp : expect_icmp_nomac; + + ping_dev(target_dev, false); + + ret = wait_for_packet(tap_fd, filter, &timeo); + if (!ASSERT_EQ(ret, 1, "wait_for_epacket")) { + log_err("%s egress test fails", test_name); + goto out; + } + + psock = new_packet_sock(target_dev); + ping_dev(target_dev, true); + + ret = wait_for_packet(psock, filter, &timeo); + if (!ASSERT_EQ(ret, 1, "wait_for_ipacket")) { + log_err("%s ingress test fails", test_name); + goto out; + } + +out: + if (psock >= 0) + close(psock); +} + +static int setup_redirect_target(const char *target_dev, bool need_mac) +{ + int target_index = -1; + int tap_fd = -1; + + tap_fd = open_tuntap(target_dev, need_mac); + if (!ASSERT_GE(tap_fd, 0, "open_tuntap")) + goto fail; + + target_index = if_nametoindex(target_dev); + if (!ASSERT_GE(target_index, 0, "if_nametoindex")) + goto fail; + + SYS(fail, "ip link add link_err type dummy"); + SYS(fail, "ip link set lo up"); + SYS(fail, "ip addr add dev lo " LOCAL_SRC "/32"); + SYS(fail, "ip link set link_err up"); + SYS(fail, "ip link set %s up", target_dev); + + SYS(fail, "ip route add %s dev link_err encap bpf xmit obj %s sec %s", + CIDR_TO_INGRESS, BPF_OBJECT, INGRESS_SEC(need_mac)); + + SYS(fail, "ip route add %s dev link_err encap bpf xmit obj %s sec %s", + CIDR_TO_EGRESS, BPF_OBJECT, EGRESS_SEC(need_mac)); + + return tap_fd; + +fail: + if (tap_fd >= 0) + close(tap_fd); + return -1; +} + +static void test_lwt_redirect_normal(void) +{ + const char *target_dev = "tap0"; + int tap_fd = -1; + bool need_mac = true; + + tap_fd = setup_redirect_target(target_dev, need_mac); + if (!ASSERT_GE(tap_fd, 0, "setup_redirect_target")) + return; + + send_and_capture_test_packets(__func__, tap_fd, target_dev, need_mac); + close(tap_fd); +} + +static void test_lwt_redirect_normal_nomac(void) +{ + const char *target_dev = "tun0"; + int tap_fd = -1; + bool need_mac = false; + + tap_fd = setup_redirect_target(target_dev, need_mac); + if (!ASSERT_GE(tap_fd, 0, "setup_redirect_target")) + return; + + send_and_capture_test_packets(__func__, tap_fd, target_dev, need_mac); + close(tap_fd); +} + +/* This test aims to prevent regression of future. As long as the kernel does + * not panic, it is considered as success. + */ +static void __test_lwt_redirect_dev_down(bool need_mac) +{ + const char *target_dev = "tap0"; + int tap_fd = -1; + + tap_fd = setup_redirect_target(target_dev, need_mac); + if (!ASSERT_GE(tap_fd, 0, "setup_redirect_target")) + return; + + SYS(out, "ip link set %s down", target_dev); + ping_dev(target_dev, true); + ping_dev(target_dev, false); + +out: + close(tap_fd); +} + +static void test_lwt_redirect_dev_down(void) +{ + __test_lwt_redirect_dev_down(true); +} + +static void test_lwt_redirect_dev_down_nomac(void) +{ + __test_lwt_redirect_dev_down(false); +} + +/* This test aims to prevent regression of future. As long as the kernel does + * not panic, it is considered as success. + */ +static void test_lwt_redirect_dev_carrier_down(void) +{ + const char *lower_dev = "tap0"; + const char *vlan_dev = "vlan100"; + int tap_fd = -1; + + tap_fd = setup_redirect_target(lower_dev, true); + if (!ASSERT_GE(tap_fd, 0, "setup_redirect_target")) + return; + + SYS(out, "ip link add vlan100 link %s type vlan id 100", lower_dev); + SYS(out, "ip link set %s up", vlan_dev); + SYS(out, "ip link set %s down", lower_dev); + ping_dev(vlan_dev, true); + ping_dev(vlan_dev, false); + +out: + close(tap_fd); +} + +static void *test_lwt_redirect_run(void *arg) +{ + netns_delete(); + RUN_TEST(lwt_redirect_normal); + RUN_TEST(lwt_redirect_normal_nomac); + RUN_TEST(lwt_redirect_dev_down); + RUN_TEST(lwt_redirect_dev_down_nomac); + RUN_TEST(lwt_redirect_dev_carrier_down); + return NULL; +} + +void test_lwt_redirect(void) +{ + pthread_t test_thread; + int err; + + /* Run the tests in their own thread to isolate the namespace changes + * so they do not affect the environment of other tests. + * (specifically needed because of unshare(CLONE_NEWNS) in open_netns()) + */ + err = pthread_create(&test_thread, NULL, &test_lwt_redirect_run, NULL); + if (ASSERT_OK(err, "pthread_create")) + ASSERT_OK(pthread_join(test_thread, NULL), "pthread_join"); +} diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c b/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c new file mode 100644 index 000000000000..f4bb2d5fcae0 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c @@ -0,0 +1,262 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause + +/* + * Test suite of lwt BPF programs that reroutes packets + * The file tests focus not only if these programs work as expected normally, + * but also if they can handle abnormal situations gracefully. This test + * suite currently only covers lwt_xmit hook. lwt_in tests have not been + * implemented. + * + * WARNING + * ------- + * This test suite can crash the kernel, thus should be run in a VM. + * + * Setup: + * --------- + * all tests are performed in a single netns. A lwt encap route is setup for + * each subtest: + * + * ip route add 10.0.0.0/24 encap bpf xmit <obj> sec "<section_N>" dev link_err + * + * Here <obj> is statically defined to test_lwt_reroute.bpf.o, and it contains + * a single test program entry. This program sets packet mark by last byte of + * the IPv4 daddr. For example, a packet going to 1.2.3.4 will receive a skb + * mark 4. A packet will only be marked once, and IP x.x.x.0 will be skipped + * to avoid route loop. We didn't use generated BPF skeleton since the + * attachment for lwt programs are not supported by libbpf yet. + * + * The test program will bring up a tun device, and sets up the following + * routes: + * + * ip rule add pref 100 from all fwmark <tun_index> lookup 100 + * ip route add table 100 default dev tun0 + * + * For normal testing, a ping command is running in the test netns: + * + * ping 10.0.0.<tun_index> -c 1 -w 1 -s 100 + * + * For abnormal testing, fq is used as the qdisc of the tun device. Then a UDP + * socket will try to overflow the fq queue and trigger qdisc drop error. + * + * Scenarios: + * -------------------------------- + * 1. Reroute to a running tun device + * 2. Reroute to a device where qdisc drop + * + * For case 1, ping packets should be received by the tun device. + * + * For case 2, force UDP packets to overflow fq limit. As long as kernel + * is not crashed, it is considered successful. + */ +#include "lwt_helpers.h" +#include "network_helpers.h" +#include <linux/net_tstamp.h> + +#define BPF_OBJECT "test_lwt_reroute.bpf.o" +#define LOCAL_SRC "10.0.0.1" +#define TEST_CIDR "10.0.0.0/24" +#define XMIT_HOOK "xmit" +#define XMIT_SECTION "lwt_xmit" +#define NSEC_PER_SEC 1000000000ULL + +/* send a ping to be rerouted to the target device */ +static void ping_once(const char *ip) +{ + /* We won't get a reply. Don't fail here */ + SYS_NOFAIL("ping %s -c1 -W1 -s %d >/dev/null 2>&1", + ip, ICMP_PAYLOAD_SIZE); +} + +/* Send snd_target UDP packets to overflow the fq queue and trigger qdisc drop + * error. This is done via TX tstamp to force buffering delayed packets. + */ +static int overflow_fq(int snd_target, const char *target_ip) +{ + struct sockaddr_in addr = { + .sin_family = AF_INET, + .sin_port = htons(1234), + }; + + char data_buf[8]; /* only #pkts matter, so use a random small buffer */ + char control_buf[CMSG_SPACE(sizeof(uint64_t))]; + struct iovec iov = { + .iov_base = data_buf, + .iov_len = sizeof(data_buf), + }; + int err = -1; + int s = -1; + struct sock_txtime txtime_on = { + .clockid = CLOCK_MONOTONIC, + .flags = 0, + }; + struct msghdr msg = { + .msg_name = &addr, + .msg_namelen = sizeof(addr), + .msg_control = control_buf, + .msg_controllen = sizeof(control_buf), + .msg_iovlen = 1, + .msg_iov = &iov, + }; + struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg); + + memset(data_buf, 0, sizeof(data_buf)); + + s = socket(AF_INET, SOCK_DGRAM, 0); + if (!ASSERT_GE(s, 0, "socket")) + goto out; + + err = setsockopt(s, SOL_SOCKET, SO_TXTIME, &txtime_on, sizeof(txtime_on)); + if (!ASSERT_OK(err, "setsockopt(SO_TXTIME)")) + goto out; + + err = inet_pton(AF_INET, target_ip, &addr.sin_addr); + if (!ASSERT_EQ(err, 1, "inet_pton")) + goto out; + + while (snd_target > 0) { + struct timespec now; + + memset(control_buf, 0, sizeof(control_buf)); + cmsg->cmsg_type = SCM_TXTIME; + cmsg->cmsg_level = SOL_SOCKET; + cmsg->cmsg_len = CMSG_LEN(sizeof(uint64_t)); + + err = clock_gettime(CLOCK_MONOTONIC, &now); + if (!ASSERT_OK(err, "clock_gettime(CLOCK_MONOTONIC)")) { + err = -1; + goto out; + } + + *(uint64_t *)CMSG_DATA(cmsg) = (now.tv_nsec + 1) * NSEC_PER_SEC + + now.tv_nsec; + + /* we will intentionally send more than fq limit, so ignore + * the error here. + */ + sendmsg(s, &msg, MSG_NOSIGNAL); + snd_target--; + } + + /* no kernel crash so far is considered success */ + err = 0; + +out: + if (s >= 0) + close(s); + + return err; +} + +static int setup(const char *tun_dev) +{ + int target_index = -1; + int tap_fd = -1; + + tap_fd = open_tuntap(tun_dev, false); + if (!ASSERT_GE(tap_fd, 0, "open_tun")) + return -1; + + target_index = if_nametoindex(tun_dev); + if (!ASSERT_GE(target_index, 0, "if_nametoindex")) + return -1; + + SYS(fail, "ip link add link_err type dummy"); + SYS(fail, "ip link set lo up"); + SYS(fail, "ip addr add dev lo " LOCAL_SRC "/32"); + SYS(fail, "ip link set link_err up"); + SYS(fail, "ip link set %s up", tun_dev); + + SYS(fail, "ip route add %s dev link_err encap bpf xmit obj %s sec lwt_xmit", + TEST_CIDR, BPF_OBJECT); + + SYS(fail, "ip rule add pref 100 from all fwmark %d lookup 100", + target_index); + SYS(fail, "ip route add t 100 default dev %s", tun_dev); + + return tap_fd; + +fail: + if (tap_fd >= 0) + close(tap_fd); + return -1; +} + +static void test_lwt_reroute_normal_xmit(void) +{ + const char *tun_dev = "tun0"; + int tun_fd = -1; + int ifindex = -1; + char ip[256]; + struct timeval timeo = { + .tv_sec = 0, + .tv_usec = 250000, + }; + + tun_fd = setup(tun_dev); + if (!ASSERT_GE(tun_fd, 0, "setup_reroute")) + return; + + ifindex = if_nametoindex(tun_dev); + if (!ASSERT_GE(ifindex, 0, "if_nametoindex")) + return; + + snprintf(ip, 256, "10.0.0.%d", ifindex); + + /* ping packets should be received by the tun device */ + ping_once(ip); + + if (!ASSERT_EQ(wait_for_packet(tun_fd, __expect_icmp_ipv4, &timeo), 1, + "wait_for_packet")) + log_err("%s xmit", __func__); +} + +/* + * Test the failure case when the skb is dropped at the qdisc. This is a + * regression prevention at the xmit hook only. + */ +static void test_lwt_reroute_qdisc_dropped(void) +{ + const char *tun_dev = "tun0"; + int tun_fd = -1; + int ifindex = -1; + char ip[256]; + + tun_fd = setup(tun_dev); + if (!ASSERT_GE(tun_fd, 0, "setup_reroute")) + goto fail; + + SYS(fail, "tc qdisc replace dev %s root fq limit 5 flow_limit 5", tun_dev); + + ifindex = if_nametoindex(tun_dev); + if (!ASSERT_GE(ifindex, 0, "if_nametoindex")) + return; + + snprintf(ip, 256, "10.0.0.%d", ifindex); + ASSERT_EQ(overflow_fq(10, ip), 0, "overflow_fq"); + +fail: + if (tun_fd >= 0) + close(tun_fd); +} + +static void *test_lwt_reroute_run(void *arg) +{ + netns_delete(); + RUN_TEST(lwt_reroute_normal_xmit); + RUN_TEST(lwt_reroute_qdisc_dropped); + return NULL; +} + +void test_lwt_reroute(void) +{ + pthread_t test_thread; + int err; + + /* Run the tests in their own thread to isolate the namespace changes + * so they do not affect the environment of other tests. + * (specifically needed because of unshare(CLONE_NEWNS) in open_netns()) + */ + err = pthread_create(&test_thread, NULL, &test_lwt_reroute_run, NULL); + if (ASSERT_OK(err, "pthread_create")) + ASSERT_OK(pthread_join(test_thread, NULL), "pthread_join"); +} diff --git a/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c b/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c index 7423983472c7..d6bd5e16e637 100644 --- a/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c +++ b/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c @@ -9,12 +9,38 @@ void test_refcounted_kptr(void) { + RUN_TESTS(refcounted_kptr); } void test_refcounted_kptr_fail(void) { + RUN_TESTS(refcounted_kptr_fail); } void test_refcounted_kptr_wrong_owner(void) { + LIBBPF_OPTS(bpf_test_run_opts, opts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + struct refcounted_kptr *skel; + int ret; + + skel = refcounted_kptr__open_and_load(); + if (!ASSERT_OK_PTR(skel, "refcounted_kptr__open_and_load")) + return; + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_wrong_owner_remove_fail_a1), &opts); + ASSERT_OK(ret, "rbtree_wrong_owner_remove_fail_a1"); + ASSERT_OK(opts.retval, "rbtree_wrong_owner_remove_fail_a1 retval"); + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_wrong_owner_remove_fail_b), &opts); + ASSERT_OK(ret, "rbtree_wrong_owner_remove_fail_b"); + ASSERT_OK(opts.retval, "rbtree_wrong_owner_remove_fail_b retval"); + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_wrong_owner_remove_fail_a2), &opts); + ASSERT_OK(ret, "rbtree_wrong_owner_remove_fail_a2"); + ASSERT_OK(opts.retval, "rbtree_wrong_owner_remove_fail_a2 retval"); + refcounted_kptr__destroy(skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/task_kfunc.c b/tools/testing/selftests/bpf/prog_tests/task_kfunc.c index 740d5f644b40..d4579f735398 100644 --- a/tools/testing/selftests/bpf/prog_tests/task_kfunc.c +++ b/tools/testing/selftests/bpf/prog_tests/task_kfunc.c @@ -79,6 +79,8 @@ static const char * const success_tests[] = { "test_task_from_pid_current", "test_task_from_pid_invalid", "task_kfunc_acquire_trusted_walked", + "test_task_kfunc_flavor_relo", + "test_task_kfunc_flavor_relo_not_found", }; void test_task_kfunc(void) diff --git a/tools/testing/selftests/bpf/prog_tests/tc_bpf.c b/tools/testing/selftests/bpf/prog_tests/tc_bpf.c index e873766276d1..48b55539331e 100644 --- a/tools/testing/selftests/bpf/prog_tests/tc_bpf.c +++ b/tools/testing/selftests/bpf/prog_tests/tc_bpf.c @@ -3,6 +3,7 @@ #include <test_progs.h> #include <linux/pkt_cls.h> +#include "cap_helpers.h" #include "test_tc_bpf.skel.h" #define LO_IFINDEX 1 @@ -327,7 +328,7 @@ static int test_tc_bpf_api(struct bpf_tc_hook *hook, int fd) return 0; } -void test_tc_bpf(void) +void tc_bpf_root(void) { DECLARE_LIBBPF_OPTS(bpf_tc_hook, hook, .ifindex = LO_IFINDEX, .attach_point = BPF_TC_INGRESS); @@ -393,3 +394,36 @@ end: } test_tc_bpf__destroy(skel); } + +void tc_bpf_non_root(void) +{ + struct test_tc_bpf *skel = NULL; + __u64 caps = 0; + int ret; + + /* In case CAP_BPF and CAP_PERFMON is not set */ + ret = cap_enable_effective(1ULL << CAP_BPF | 1ULL << CAP_NET_ADMIN, &caps); + if (!ASSERT_OK(ret, "set_cap_bpf_cap_net_admin")) + return; + ret = cap_disable_effective(1ULL << CAP_SYS_ADMIN | 1ULL << CAP_PERFMON, NULL); + if (!ASSERT_OK(ret, "disable_cap_sys_admin")) + goto restore_cap; + + skel = test_tc_bpf__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_tc_bpf__open_and_load")) + goto restore_cap; + + test_tc_bpf__destroy(skel); + +restore_cap: + if (caps) + cap_enable_effective(caps, NULL); +} + +void test_tc_bpf(void) +{ + if (test__start_subtest("tc_bpf_root")) + tc_bpf_root(); + if (test__start_subtest("tc_bpf_non_root")) + tc_bpf_non_root(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c new file mode 100644 index 000000000000..cd051d3901a9 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c @@ -0,0 +1,415 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <unistd.h> +#include <test_progs.h> +#include "uprobe_multi.skel.h" +#include "uprobe_multi_bench.skel.h" +#include "uprobe_multi_usdt.skel.h" +#include "bpf/libbpf_internal.h" +#include "testing_helpers.h" + +static char test_data[] = "test_data"; + +noinline void uprobe_multi_func_1(void) +{ + asm volatile (""); +} + +noinline void uprobe_multi_func_2(void) +{ + asm volatile (""); +} + +noinline void uprobe_multi_func_3(void) +{ + asm volatile (""); +} + +struct child { + int go[2]; + int pid; +}; + +static void release_child(struct child *child) +{ + int child_status; + + if (!child) + return; + close(child->go[1]); + close(child->go[0]); + if (child->pid > 0) + waitpid(child->pid, &child_status, 0); +} + +static void kick_child(struct child *child) +{ + char c = 1; + + if (child) { + write(child->go[1], &c, 1); + release_child(child); + } + fflush(NULL); +} + +static struct child *spawn_child(void) +{ + static struct child child; + int err; + int c; + + /* pipe to notify child to execute the trigger functions */ + if (pipe(child.go)) + return NULL; + + child.pid = fork(); + if (child.pid < 0) { + release_child(&child); + errno = EINVAL; + return NULL; + } + + /* child */ + if (child.pid == 0) { + close(child.go[1]); + + /* wait for parent's kick */ + err = read(child.go[0], &c, 1); + if (err != 1) + exit(err); + + uprobe_multi_func_1(); + uprobe_multi_func_2(); + uprobe_multi_func_3(); + + exit(errno); + } + + return &child; +} + +static void uprobe_multi_test_run(struct uprobe_multi *skel, struct child *child) +{ + skel->bss->uprobe_multi_func_1_addr = (__u64) uprobe_multi_func_1; + skel->bss->uprobe_multi_func_2_addr = (__u64) uprobe_multi_func_2; + skel->bss->uprobe_multi_func_3_addr = (__u64) uprobe_multi_func_3; + + skel->bss->user_ptr = test_data; + + /* + * Disable pid check in bpf program if we are pid filter test, + * because the probe should be executed only by child->pid + * passed at the probe attach. + */ + skel->bss->pid = child ? 0 : getpid(); + + if (child) + kick_child(child); + + /* trigger all probes */ + uprobe_multi_func_1(); + uprobe_multi_func_2(); + uprobe_multi_func_3(); + + /* + * There are 2 entry and 2 exit probe called for each uprobe_multi_func_[123] + * function and each slepable probe (6) increments uprobe_multi_sleep_result. + */ + ASSERT_EQ(skel->bss->uprobe_multi_func_1_result, 2, "uprobe_multi_func_1_result"); + ASSERT_EQ(skel->bss->uprobe_multi_func_2_result, 2, "uprobe_multi_func_2_result"); + ASSERT_EQ(skel->bss->uprobe_multi_func_3_result, 2, "uprobe_multi_func_3_result"); + + ASSERT_EQ(skel->bss->uretprobe_multi_func_1_result, 2, "uretprobe_multi_func_1_result"); + ASSERT_EQ(skel->bss->uretprobe_multi_func_2_result, 2, "uretprobe_multi_func_2_result"); + ASSERT_EQ(skel->bss->uretprobe_multi_func_3_result, 2, "uretprobe_multi_func_3_result"); + + ASSERT_EQ(skel->bss->uprobe_multi_sleep_result, 6, "uprobe_multi_sleep_result"); + + if (child) + ASSERT_EQ(skel->bss->child_pid, child->pid, "uprobe_multi_child_pid"); +} + +static void test_skel_api(void) +{ + struct uprobe_multi *skel = NULL; + int err; + + skel = uprobe_multi__open_and_load(); + if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load")) + goto cleanup; + + err = uprobe_multi__attach(skel); + if (!ASSERT_OK(err, "uprobe_multi__attach")) + goto cleanup; + + uprobe_multi_test_run(skel, NULL); + +cleanup: + uprobe_multi__destroy(skel); +} + +static void +__test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_multi_opts *opts, + struct child *child) +{ + pid_t pid = child ? child->pid : -1; + struct uprobe_multi *skel = NULL; + + skel = uprobe_multi__open_and_load(); + if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load")) + goto cleanup; + + opts->retprobe = false; + skel->links.uprobe = bpf_program__attach_uprobe_multi(skel->progs.uprobe, pid, + binary, pattern, opts); + if (!ASSERT_OK_PTR(skel->links.uprobe, "bpf_program__attach_uprobe_multi")) + goto cleanup; + + opts->retprobe = true; + skel->links.uretprobe = bpf_program__attach_uprobe_multi(skel->progs.uretprobe, pid, + binary, pattern, opts); + if (!ASSERT_OK_PTR(skel->links.uretprobe, "bpf_program__attach_uprobe_multi")) + goto cleanup; + + opts->retprobe = false; + skel->links.uprobe_sleep = bpf_program__attach_uprobe_multi(skel->progs.uprobe_sleep, pid, + binary, pattern, opts); + if (!ASSERT_OK_PTR(skel->links.uprobe_sleep, "bpf_program__attach_uprobe_multi")) + goto cleanup; + + opts->retprobe = true; + skel->links.uretprobe_sleep = bpf_program__attach_uprobe_multi(skel->progs.uretprobe_sleep, + pid, binary, pattern, opts); + if (!ASSERT_OK_PTR(skel->links.uretprobe_sleep, "bpf_program__attach_uprobe_multi")) + goto cleanup; + + opts->retprobe = false; + skel->links.uprobe_extra = bpf_program__attach_uprobe_multi(skel->progs.uprobe_extra, -1, + binary, pattern, opts); + if (!ASSERT_OK_PTR(skel->links.uprobe_extra, "bpf_program__attach_uprobe_multi")) + goto cleanup; + + uprobe_multi_test_run(skel, child); + +cleanup: + uprobe_multi__destroy(skel); +} + +static void +test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_multi_opts *opts) +{ + struct child *child; + + /* no pid filter */ + __test_attach_api(binary, pattern, opts, NULL); + + /* pid filter */ + child = spawn_child(); + if (!ASSERT_OK_PTR(child, "spawn_child")) + return; + + __test_attach_api(binary, pattern, opts, child); +} + +static void test_attach_api_pattern(void) +{ + LIBBPF_OPTS(bpf_uprobe_multi_opts, opts); + + test_attach_api("/proc/self/exe", "uprobe_multi_func_*", &opts); + test_attach_api("/proc/self/exe", "uprobe_multi_func_?", &opts); +} + +static void test_attach_api_syms(void) +{ + LIBBPF_OPTS(bpf_uprobe_multi_opts, opts); + const char *syms[3] = { + "uprobe_multi_func_1", + "uprobe_multi_func_2", + "uprobe_multi_func_3", + }; + + opts.syms = syms; + opts.cnt = ARRAY_SIZE(syms); + test_attach_api("/proc/self/exe", NULL, &opts); +} + +static void __test_link_api(struct child *child) +{ + int prog_fd, link1_fd = -1, link2_fd = -1, link3_fd = -1, link4_fd = -1; + LIBBPF_OPTS(bpf_link_create_opts, opts); + const char *path = "/proc/self/exe"; + struct uprobe_multi *skel = NULL; + unsigned long *offsets = NULL; + const char *syms[3] = { + "uprobe_multi_func_1", + "uprobe_multi_func_2", + "uprobe_multi_func_3", + }; + int link_extra_fd = -1; + int err; + + err = elf_resolve_syms_offsets(path, 3, syms, (unsigned long **) &offsets); + if (!ASSERT_OK(err, "elf_resolve_syms_offsets")) + return; + + opts.uprobe_multi.path = path; + opts.uprobe_multi.offsets = offsets; + opts.uprobe_multi.cnt = ARRAY_SIZE(syms); + opts.uprobe_multi.pid = child ? child->pid : 0; + + skel = uprobe_multi__open_and_load(); + if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load")) + goto cleanup; + + opts.kprobe_multi.flags = 0; + prog_fd = bpf_program__fd(skel->progs.uprobe); + link1_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts); + if (!ASSERT_GE(link1_fd, 0, "link1_fd")) + goto cleanup; + + opts.kprobe_multi.flags = BPF_F_UPROBE_MULTI_RETURN; + prog_fd = bpf_program__fd(skel->progs.uretprobe); + link2_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts); + if (!ASSERT_GE(link2_fd, 0, "link2_fd")) + goto cleanup; + + opts.kprobe_multi.flags = 0; + prog_fd = bpf_program__fd(skel->progs.uprobe_sleep); + link3_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts); + if (!ASSERT_GE(link3_fd, 0, "link3_fd")) + goto cleanup; + + opts.kprobe_multi.flags = BPF_F_UPROBE_MULTI_RETURN; + prog_fd = bpf_program__fd(skel->progs.uretprobe_sleep); + link4_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts); + if (!ASSERT_GE(link4_fd, 0, "link4_fd")) + goto cleanup; + + opts.kprobe_multi.flags = 0; + opts.uprobe_multi.pid = 0; + prog_fd = bpf_program__fd(skel->progs.uprobe_extra); + link_extra_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts); + if (!ASSERT_GE(link_extra_fd, 0, "link_extra_fd")) + goto cleanup; + + uprobe_multi_test_run(skel, child); + +cleanup: + if (link1_fd >= 0) + close(link1_fd); + if (link2_fd >= 0) + close(link2_fd); + if (link3_fd >= 0) + close(link3_fd); + if (link4_fd >= 0) + close(link4_fd); + if (link_extra_fd >= 0) + close(link_extra_fd); + + uprobe_multi__destroy(skel); + free(offsets); +} + +void test_link_api(void) +{ + struct child *child; + + /* no pid filter */ + __test_link_api(NULL); + + /* pid filter */ + child = spawn_child(); + if (!ASSERT_OK_PTR(child, "spawn_child")) + return; + + __test_link_api(child); +} + +static void test_bench_attach_uprobe(void) +{ + long attach_start_ns = 0, attach_end_ns = 0; + struct uprobe_multi_bench *skel = NULL; + long detach_start_ns, detach_end_ns; + double attach_delta, detach_delta; + int err; + + skel = uprobe_multi_bench__open_and_load(); + if (!ASSERT_OK_PTR(skel, "uprobe_multi_bench__open_and_load")) + goto cleanup; + + attach_start_ns = get_time_ns(); + + err = uprobe_multi_bench__attach(skel); + if (!ASSERT_OK(err, "uprobe_multi_bench__attach")) + goto cleanup; + + attach_end_ns = get_time_ns(); + + system("./uprobe_multi bench"); + + ASSERT_EQ(skel->bss->count, 50000, "uprobes_count"); + +cleanup: + detach_start_ns = get_time_ns(); + uprobe_multi_bench__destroy(skel); + detach_end_ns = get_time_ns(); + + attach_delta = (attach_end_ns - attach_start_ns) / 1000000000.0; + detach_delta = (detach_end_ns - detach_start_ns) / 1000000000.0; + + printf("%s: attached in %7.3lfs\n", __func__, attach_delta); + printf("%s: detached in %7.3lfs\n", __func__, detach_delta); +} + +static void test_bench_attach_usdt(void) +{ + long attach_start_ns = 0, attach_end_ns = 0; + struct uprobe_multi_usdt *skel = NULL; + long detach_start_ns, detach_end_ns; + double attach_delta, detach_delta; + + skel = uprobe_multi_usdt__open_and_load(); + if (!ASSERT_OK_PTR(skel, "uprobe_multi__open")) + goto cleanup; + + attach_start_ns = get_time_ns(); + + skel->links.usdt0 = bpf_program__attach_usdt(skel->progs.usdt0, -1, "./uprobe_multi", + "test", "usdt", NULL); + if (!ASSERT_OK_PTR(skel->links.usdt0, "bpf_program__attach_usdt")) + goto cleanup; + + attach_end_ns = get_time_ns(); + + system("./uprobe_multi usdt"); + + ASSERT_EQ(skel->bss->count, 50000, "usdt_count"); + +cleanup: + detach_start_ns = get_time_ns(); + uprobe_multi_usdt__destroy(skel); + detach_end_ns = get_time_ns(); + + attach_delta = (attach_end_ns - attach_start_ns) / 1000000000.0; + detach_delta = (detach_end_ns - detach_start_ns) / 1000000000.0; + + printf("%s: attached in %7.3lfs\n", __func__, attach_delta); + printf("%s: detached in %7.3lfs\n", __func__, detach_delta); +} + +void test_uprobe_multi_test(void) +{ + if (test__start_subtest("skel_api")) + test_skel_api(); + if (test__start_subtest("attach_api_pattern")) + test_attach_api_pattern(); + if (test__start_subtest("attach_api_syms")) + test_attach_api_syms(); + if (test__start_subtest("link_api")) + test_link_api(); + if (test__start_subtest("bench_uprobe")) + test_bench_attach_uprobe(); + if (test__start_subtest("bench_usdt")) + test_bench_attach_usdt(); +} diff --git a/tools/testing/selftests/bpf/progs/local_kptr_stash.c b/tools/testing/selftests/bpf/progs/local_kptr_stash.c index 06838083079c..b567a666d2b8 100644 --- a/tools/testing/selftests/bpf/progs/local_kptr_stash.c +++ b/tools/testing/selftests/bpf/progs/local_kptr_stash.c @@ -14,10 +14,16 @@ struct node_data { struct bpf_rb_node node; }; +struct plain_local { + long key; + long data; +}; + struct map_value { struct prog_test_ref_kfunc *not_kptr; struct prog_test_ref_kfunc __kptr *val; struct node_data __kptr *node; + struct plain_local __kptr *plain; }; /* This is necessary so that LLVM generates BTF for node_data struct @@ -67,6 +73,28 @@ long stash_rb_nodes(void *ctx) } SEC("tc") +long stash_plain(void *ctx) +{ + struct map_value *mapval; + struct plain_local *res; + int idx = 0; + + mapval = bpf_map_lookup_elem(&some_nodes, &idx); + if (!mapval) + return 1; + + res = bpf_obj_new(typeof(*res)); + if (!res) + return 1; + res->key = 41; + + res = bpf_kptr_xchg(&mapval->plain, res); + if (res) + bpf_obj_drop(res); + return 0; +} + +SEC("tc") long unstash_rb_node(void *ctx) { struct map_value *mapval; diff --git a/tools/testing/selftests/bpf/progs/local_kptr_stash_fail.c b/tools/testing/selftests/bpf/progs/local_kptr_stash_fail.c new file mode 100644 index 000000000000..fcf7a7567da2 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/local_kptr_stash_fail.c @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_core_read.h> +#include "../bpf_experimental.h" +#include "bpf_misc.h" + +struct node_data { + long key; + long data; + struct bpf_rb_node node; +}; + +struct map_value { + struct node_data __kptr *node; +}; + +struct node_data2 { + long key[4]; +}; + +/* This is necessary so that LLVM generates BTF for node_data struct + * If it's not included, a fwd reference for node_data will be generated but + * no struct. Example BTF of "node" field in map_value when not included: + * + * [10] PTR '(anon)' type_id=35 + * [34] FWD 'node_data' fwd_kind=struct + * [35] TYPE_TAG 'kptr_ref' type_id=34 + */ +struct node_data *just_here_because_btf_bug; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, int); + __type(value, struct map_value); + __uint(max_entries, 2); +} some_nodes SEC(".maps"); + +SEC("tc") +__failure __msg("invalid kptr access, R2 type=ptr_node_data2 expected=ptr_node_data") +long stash_rb_nodes(void *ctx) +{ + struct map_value *mapval; + struct node_data2 *res; + int idx = 0; + + mapval = bpf_map_lookup_elem(&some_nodes, &idx); + if (!mapval) + return 1; + + res = bpf_obj_new(typeof(*res)); + if (!res) + return 1; + res->key[0] = 40; + + res = bpf_kptr_xchg(&mapval->node, res); + if (res) + bpf_obj_drop(res); + return 0; +} + +SEC("tc") +__failure __msg("R1 must have zero offset when passed to release func") +long drop_rb_node_off(void *ctx) +{ + struct map_value *mapval; + struct node_data *res; + int idx = 0; + + mapval = bpf_map_lookup_elem(&some_nodes, &idx); + if (!mapval) + return 1; + + res = bpf_obj_new(typeof(*res)); + if (!res) + return 1; + /* Try releasing with graph node offset */ + bpf_obj_drop(&res->node); + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr.c b/tools/testing/selftests/bpf/progs/refcounted_kptr.c index c55652fdc63a..893a4fdb4b6e 100644 --- a/tools/testing/selftests/bpf/progs/refcounted_kptr.c +++ b/tools/testing/selftests/bpf/progs/refcounted_kptr.c @@ -8,6 +8,9 @@ #include "bpf_misc.h" #include "bpf_experimental.h" +extern void bpf_rcu_read_lock(void) __ksym; +extern void bpf_rcu_read_unlock(void) __ksym; + struct node_data { long key; long list_data; @@ -497,4 +500,72 @@ long rbtree_wrong_owner_remove_fail_a2(void *ctx) return 0; } +SEC("?fentry.s/bpf_testmod_test_read") +__success +int BPF_PROG(rbtree_sleepable_rcu, + struct file *file, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len) +{ + struct bpf_rb_node *rb; + struct node_data *n, *m = NULL; + + n = bpf_obj_new(typeof(*n)); + if (!n) + return 0; + + bpf_rcu_read_lock(); + bpf_spin_lock(&lock); + bpf_rbtree_add(&root, &n->r, less); + rb = bpf_rbtree_first(&root); + if (!rb) + goto err_out; + + rb = bpf_rbtree_remove(&root, rb); + if (!rb) + goto err_out; + + m = container_of(rb, struct node_data, r); + +err_out: + bpf_spin_unlock(&lock); + bpf_rcu_read_unlock(); + if (m) + bpf_obj_drop(m); + return 0; +} + +SEC("?fentry.s/bpf_testmod_test_read") +__success +int BPF_PROG(rbtree_sleepable_rcu_no_explicit_rcu_lock, + struct file *file, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len) +{ + struct bpf_rb_node *rb; + struct node_data *n, *m = NULL; + + n = bpf_obj_new(typeof(*n)); + if (!n) + return 0; + + /* No explicit bpf_rcu_read_lock */ + bpf_spin_lock(&lock); + bpf_rbtree_add(&root, &n->r, less); + rb = bpf_rbtree_first(&root); + if (!rb) + goto err_out; + + rb = bpf_rbtree_remove(&root, rb); + if (!rb) + goto err_out; + + m = container_of(rb, struct node_data, r); + +err_out: + bpf_spin_unlock(&lock); + /* No explicit bpf_rcu_read_unlock */ + if (m) + bpf_obj_drop(m); + return 0; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c index 0b09e5c915b1..1ef07f6ee580 100644 --- a/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c +++ b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c @@ -13,6 +13,9 @@ struct node_acquire { struct bpf_refcount refcount; }; +extern void bpf_rcu_read_lock(void) __ksym; +extern void bpf_rcu_read_unlock(void) __ksym; + #define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8))) private(A) struct bpf_spin_lock glock; private(A) struct bpf_rb_root groot __contains(node_acquire, node); @@ -71,4 +74,29 @@ long rbtree_refcounted_node_ref_escapes_owning_input(void *ctx) return 0; } +SEC("?fentry.s/bpf_testmod_test_read") +__failure __msg("function calls are not allowed while holding a lock") +int BPF_PROG(rbtree_fail_sleepable_lock_across_rcu, + struct file *file, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len) +{ + struct node_acquire *n; + + n = bpf_obj_new(typeof(*n)); + if (!n) + return 0; + + /* spin_{lock,unlock} are in different RCU CS */ + bpf_rcu_read_lock(); + bpf_spin_lock(&glock); + bpf_rbtree_add(&groot, &n->node, less); + bpf_rcu_read_unlock(); + + bpf_rcu_read_lock(); + bpf_spin_unlock(&glock); + bpf_rcu_read_unlock(); + + return 0; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_success.c b/tools/testing/selftests/bpf/progs/task_kfunc_success.c index b09371bba204..70df695312dc 100644 --- a/tools/testing/selftests/bpf/progs/task_kfunc_success.c +++ b/tools/testing/selftests/bpf/progs/task_kfunc_success.c @@ -18,6 +18,13 @@ int err, pid; */ struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym __weak; + +struct task_struct *bpf_task_acquire___one(struct task_struct *task) __ksym __weak; +/* The two-param bpf_task_acquire doesn't exist */ +struct task_struct *bpf_task_acquire___two(struct task_struct *p, void *ctx) __ksym __weak; +/* Incorrect type for first param */ +struct task_struct *bpf_task_acquire___three(void *ctx) __ksym __weak; + void invalid_kfunc(void) __ksym __weak; void bpf_testmod_test_mod_kfunc(int i) __ksym __weak; @@ -56,6 +63,50 @@ static int test_acquire_release(struct task_struct *task) } SEC("tp_btf/task_newtask") +int BPF_PROG(test_task_kfunc_flavor_relo, struct task_struct *task, u64 clone_flags) +{ + struct task_struct *acquired = NULL; + int fake_ctx = 42; + + if (bpf_ksym_exists(bpf_task_acquire___one)) { + acquired = bpf_task_acquire___one(task); + } else if (bpf_ksym_exists(bpf_task_acquire___two)) { + /* Here, bpf_object__resolve_ksym_func_btf_id's find_ksym_btf_id + * call will find vmlinux's bpf_task_acquire, but subsequent + * bpf_core_types_are_compat will fail + */ + acquired = bpf_task_acquire___two(task, &fake_ctx); + err = 3; + return 0; + } else if (bpf_ksym_exists(bpf_task_acquire___three)) { + /* bpf_core_types_are_compat will fail similarly to above case */ + acquired = bpf_task_acquire___three(&fake_ctx); + err = 4; + return 0; + } + + if (acquired) + bpf_task_release(acquired); + else + err = 5; + return 0; +} + +SEC("tp_btf/task_newtask") +int BPF_PROG(test_task_kfunc_flavor_relo_not_found, struct task_struct *task, u64 clone_flags) +{ + /* Neither symbol should successfully resolve. + * Success or failure of one ___flavor should not affect others + */ + if (bpf_ksym_exists(bpf_task_acquire___two)) + err = 1; + else if (bpf_ksym_exists(bpf_task_acquire___three)) + err = 2; + + return 0; +} + +SEC("tp_btf/task_newtask") int BPF_PROG(test_task_acquire_release_argument, struct task_struct *task, u64 clone_flags) { if (!is_test_kfunc_task()) diff --git a/tools/testing/selftests/bpf/progs/test_ldsx_insn.c b/tools/testing/selftests/bpf/progs/test_ldsx_insn.c index 321abf862801..67c14ba1e87b 100644 --- a/tools/testing/selftests/bpf/progs/test_ldsx_insn.c +++ b/tools/testing/selftests/bpf/progs/test_ldsx_insn.c @@ -5,7 +5,8 @@ #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> -#if defined(__TARGET_ARCH_x86) && __clang_major__ >= 18 +#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \ + (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) && __clang_major__ >= 18 const volatile int skip = 0; #else const volatile int skip = 1; diff --git a/tools/testing/selftests/bpf/progs/test_lwt_redirect.c b/tools/testing/selftests/bpf/progs/test_lwt_redirect.c new file mode 100644 index 000000000000..8c895122f293 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_lwt_redirect.c @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bpf.h> +#include <bpf/bpf_endian.h> +#include <bpf/bpf_helpers.h> +#include <linux/ip.h> +#include "bpf_tracing_net.h" + +/* We don't care about whether the packet can be received by network stack. + * Just care if the packet is sent to the correct device at correct direction + * and not panic the kernel. + */ +static int prepend_dummy_mac(struct __sk_buff *skb) +{ + char mac[] = {0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0xf, + 0xe, 0xd, 0xc, 0xb, 0xa, 0x08, 0x00}; + + if (bpf_skb_change_head(skb, ETH_HLEN, 0)) + return -1; + + if (bpf_skb_store_bytes(skb, 0, mac, sizeof(mac), 0)) + return -1; + + return 0; +} + +/* Use the last byte of IP address to redirect the packet */ +static int get_redirect_target(struct __sk_buff *skb) +{ + struct iphdr *iph = NULL; + void *start = (void *)(long)skb->data; + void *end = (void *)(long)skb->data_end; + + if (start + sizeof(*iph) > end) + return -1; + + iph = (struct iphdr *)start; + return bpf_ntohl(iph->daddr) & 0xff; +} + +SEC("redir_ingress") +int test_lwt_redirect_in(struct __sk_buff *skb) +{ + int target = get_redirect_target(skb); + + if (target < 0) + return BPF_OK; + + if (prepend_dummy_mac(skb)) + return BPF_DROP; + + return bpf_redirect(target, BPF_F_INGRESS); +} + +SEC("redir_egress") +int test_lwt_redirect_out(struct __sk_buff *skb) +{ + int target = get_redirect_target(skb); + + if (target < 0) + return BPF_OK; + + if (prepend_dummy_mac(skb)) + return BPF_DROP; + + return bpf_redirect(target, 0); +} + +SEC("redir_egress_nomac") +int test_lwt_redirect_out_nomac(struct __sk_buff *skb) +{ + int target = get_redirect_target(skb); + + if (target < 0) + return BPF_OK; + + return bpf_redirect(target, 0); +} + +SEC("redir_ingress_nomac") +int test_lwt_redirect_in_nomac(struct __sk_buff *skb) +{ + int target = get_redirect_target(skb); + + if (target < 0) + return BPF_OK; + + return bpf_redirect(target, BPF_F_INGRESS); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_lwt_reroute.c b/tools/testing/selftests/bpf/progs/test_lwt_reroute.c new file mode 100644 index 000000000000..1dc64351929c --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_lwt_reroute.c @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <inttypes.h> +#include <linux/bpf.h> +#include <bpf/bpf_endian.h> +#include <bpf/bpf_helpers.h> +#include <linux/if_ether.h> +#include <linux/ip.h> + +/* This function extracts the last byte of the daddr, and uses it + * as output dev index. + */ +SEC("lwt_xmit") +int test_lwt_reroute(struct __sk_buff *skb) +{ + struct iphdr *iph = NULL; + void *start = (void *)(long)skb->data; + void *end = (void *)(long)skb->data_end; + + /* set mark at most once */ + if (skb->mark != 0) + return BPF_OK; + + if (start + sizeof(*iph) > end) + return BPF_DROP; + + iph = (struct iphdr *)start; + skb->mark = bpf_ntohl(iph->daddr) & 0xff; + + /* do not reroute x.x.x.0 packets */ + if (skb->mark == 0) + return BPF_OK; + + return BPF_LWT_REROUTE; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_tc_bpf.c b/tools/testing/selftests/bpf/progs/test_tc_bpf.c index d28ca8d1f3d0..ef7da419632a 100644 --- a/tools/testing/selftests/bpf/progs/test_tc_bpf.c +++ b/tools/testing/selftests/bpf/progs/test_tc_bpf.c @@ -2,6 +2,8 @@ #include <linux/bpf.h> #include <bpf/bpf_helpers.h> +#include <linux/if_ether.h> +#include <linux/ip.h> /* Dummy prog to test TC-BPF API */ @@ -10,3 +12,14 @@ int cls(struct __sk_buff *skb) { return 0; } + +/* Prog to verify tc-bpf without cap_sys_admin and cap_perfmon */ +SEC("tcx/ingress") +int pkt_ptr(struct __sk_buff *skb) +{ + struct iphdr *iph = (void *)(long)skb->data + sizeof(struct ethhdr); + + if ((long)(iph + 1) > (long)skb->data_end) + return 1; + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi.c b/tools/testing/selftests/bpf/progs/uprobe_multi.c new file mode 100644 index 000000000000..419d9aa28fce --- /dev/null +++ b/tools/testing/selftests/bpf/progs/uprobe_multi.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include <stdbool.h> + +char _license[] SEC("license") = "GPL"; + +__u64 uprobe_multi_func_1_addr = 0; +__u64 uprobe_multi_func_2_addr = 0; +__u64 uprobe_multi_func_3_addr = 0; + +__u64 uprobe_multi_func_1_result = 0; +__u64 uprobe_multi_func_2_result = 0; +__u64 uprobe_multi_func_3_result = 0; + +__u64 uretprobe_multi_func_1_result = 0; +__u64 uretprobe_multi_func_2_result = 0; +__u64 uretprobe_multi_func_3_result = 0; + +__u64 uprobe_multi_sleep_result = 0; + +int pid = 0; +int child_pid = 0; + +bool test_cookie = false; +void *user_ptr = 0; + +static __always_inline bool verify_sleepable_user_copy(void) +{ + char data[9]; + + bpf_copy_from_user(data, sizeof(data), user_ptr); + return bpf_strncmp(data, sizeof(data), "test_data") == 0; +} + +static void uprobe_multi_check(void *ctx, bool is_return, bool is_sleep) +{ + child_pid = bpf_get_current_pid_tgid() >> 32; + + if (pid && child_pid != pid) + return; + + __u64 cookie = test_cookie ? bpf_get_attach_cookie(ctx) : 0; + __u64 addr = bpf_get_func_ip(ctx); + +#define SET(__var, __addr, __cookie) ({ \ + if (addr == __addr && \ + (!test_cookie || (cookie == __cookie))) \ + __var += 1; \ +}) + + if (is_return) { + SET(uretprobe_multi_func_1_result, uprobe_multi_func_1_addr, 2); + SET(uretprobe_multi_func_2_result, uprobe_multi_func_2_addr, 3); + SET(uretprobe_multi_func_3_result, uprobe_multi_func_3_addr, 1); + } else { + SET(uprobe_multi_func_1_result, uprobe_multi_func_1_addr, 3); + SET(uprobe_multi_func_2_result, uprobe_multi_func_2_addr, 1); + SET(uprobe_multi_func_3_result, uprobe_multi_func_3_addr, 2); + } + +#undef SET + + if (is_sleep && verify_sleepable_user_copy()) + uprobe_multi_sleep_result += 1; +} + +SEC("uprobe.multi//proc/self/exe:uprobe_multi_func_*") +int uprobe(struct pt_regs *ctx) +{ + uprobe_multi_check(ctx, false, false); + return 0; +} + +SEC("uretprobe.multi//proc/self/exe:uprobe_multi_func_*") +int uretprobe(struct pt_regs *ctx) +{ + uprobe_multi_check(ctx, true, false); + return 0; +} + +SEC("uprobe.multi.s//proc/self/exe:uprobe_multi_func_*") +int uprobe_sleep(struct pt_regs *ctx) +{ + uprobe_multi_check(ctx, false, true); + return 0; +} + +SEC("uretprobe.multi.s//proc/self/exe:uprobe_multi_func_*") +int uretprobe_sleep(struct pt_regs *ctx) +{ + uprobe_multi_check(ctx, true, true); + return 0; +} + +SEC("uprobe.multi//proc/self/exe:uprobe_multi_func_*") +int uprobe_extra(struct pt_regs *ctx) +{ + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi_bench.c b/tools/testing/selftests/bpf/progs/uprobe_multi_bench.c new file mode 100644 index 000000000000..5367f6105e30 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/uprobe_multi_bench.c @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +char _license[] SEC("license") = "GPL"; + +int count; + +SEC("uprobe.multi/./uprobe_multi:uprobe_multi_func_*") +int uprobe_bench(struct pt_regs *ctx) +{ + count++; + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi_usdt.c b/tools/testing/selftests/bpf/progs/uprobe_multi_usdt.c new file mode 100644 index 000000000000..9e1c33d0bd2f --- /dev/null +++ b/tools/testing/selftests/bpf/progs/uprobe_multi_usdt.c @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/usdt.bpf.h> + +char _license[] SEC("license") = "GPL"; + +int count; + +SEC("usdt") +int usdt0(struct pt_regs *ctx) +{ + count++; + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/verifier_bswap.c b/tools/testing/selftests/bpf/progs/verifier_bswap.c index 724bb38988b5..8893094725f0 100644 --- a/tools/testing/selftests/bpf/progs/verifier_bswap.c +++ b/tools/testing/selftests/bpf/progs/verifier_bswap.c @@ -4,7 +4,8 @@ #include <bpf/bpf_helpers.h> #include "bpf_misc.h" -#if defined(__TARGET_ARCH_x86) && __clang_major__ >= 18 +#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \ + (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) && __clang_major__ >= 18 SEC("socket") __description("BSWAP, 16") diff --git a/tools/testing/selftests/bpf/progs/verifier_gotol.c b/tools/testing/selftests/bpf/progs/verifier_gotol.c index ce48f7757db2..2dae5322a18e 100644 --- a/tools/testing/selftests/bpf/progs/verifier_gotol.c +++ b/tools/testing/selftests/bpf/progs/verifier_gotol.c @@ -4,7 +4,8 @@ #include <bpf/bpf_helpers.h> #include "bpf_misc.h" -#if defined(__TARGET_ARCH_x86) && __clang_major__ >= 18 +#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \ + (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) && __clang_major__ >= 18 SEC("socket") __description("gotol, small_imm") diff --git a/tools/testing/selftests/bpf/progs/verifier_ldsx.c b/tools/testing/selftests/bpf/progs/verifier_ldsx.c index 3c3d1bddd67f..0c638f45aaf1 100644 --- a/tools/testing/selftests/bpf/progs/verifier_ldsx.c +++ b/tools/testing/selftests/bpf/progs/verifier_ldsx.c @@ -4,7 +4,8 @@ #include <bpf/bpf_helpers.h> #include "bpf_misc.h" -#if defined(__TARGET_ARCH_x86) && __clang_major__ >= 18 +#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \ + (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) && __clang_major__ >= 18 SEC("socket") __description("LDSX, S8") diff --git a/tools/testing/selftests/bpf/progs/verifier_movsx.c b/tools/testing/selftests/bpf/progs/verifier_movsx.c index be6f69a6b659..3c8ac2c57b1b 100644 --- a/tools/testing/selftests/bpf/progs/verifier_movsx.c +++ b/tools/testing/selftests/bpf/progs/verifier_movsx.c @@ -4,7 +4,8 @@ #include <bpf/bpf_helpers.h> #include "bpf_misc.h" -#if defined(__TARGET_ARCH_x86) && __clang_major__ >= 18 +#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \ + (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) && __clang_major__ >= 18 SEC("socket") __description("MOV32SX, S8") diff --git a/tools/testing/selftests/bpf/progs/verifier_sdiv.c b/tools/testing/selftests/bpf/progs/verifier_sdiv.c index f61a9a1058c8..0990f8825675 100644 --- a/tools/testing/selftests/bpf/progs/verifier_sdiv.c +++ b/tools/testing/selftests/bpf/progs/verifier_sdiv.c @@ -4,7 +4,8 @@ #include <bpf/bpf_helpers.h> #include "bpf_misc.h" -#if defined(__TARGET_ARCH_x86) && __clang_major__ >= 18 +#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \ + (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) && __clang_major__ >= 18 SEC("socket") __description("SDIV32, non-zero imm divisor, check 1") diff --git a/tools/testing/selftests/bpf/testing_helpers.h b/tools/testing/selftests/bpf/testing_helpers.h index 5312323881b6..5b7a55136741 100644 --- a/tools/testing/selftests/bpf/testing_helpers.h +++ b/tools/testing/selftests/bpf/testing_helpers.h @@ -7,6 +7,7 @@ #include <stdbool.h> #include <bpf/bpf.h> #include <bpf/libbpf.h> +#include <time.h> int parse_num_list(const char *s, bool **set, int *set_len); __u32 link_info_prog_id(const struct bpf_link *link, struct bpf_link_info *info); @@ -33,4 +34,13 @@ int load_bpf_testmod(bool verbose); int unload_bpf_testmod(bool verbose); int kern_sync_rcu(void); +static inline __u64 get_time_ns(void) +{ + struct timespec t; + + clock_gettime(CLOCK_MONOTONIC, &t); + + return (u64)t.tv_sec * 1000000000 + t.tv_nsec; +} + #endif /* __TESTING_HELPERS_H */ diff --git a/tools/testing/selftests/bpf/uprobe_multi.c b/tools/testing/selftests/bpf/uprobe_multi.c new file mode 100644 index 000000000000..a61ceab60b68 --- /dev/null +++ b/tools/testing/selftests/bpf/uprobe_multi.c @@ -0,0 +1,91 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <stdio.h> +#include <string.h> +#include <sdt.h> + +#define __PASTE(a, b) a##b +#define PASTE(a, b) __PASTE(a, b) + +#define NAME(name, idx) PASTE(name, idx) + +#define DEF(name, idx) int NAME(name, idx)(void) { return 0; } +#define CALL(name, idx) NAME(name, idx)(); + +#define F(body, name, idx) body(name, idx) + +#define F10(body, name, idx) \ + F(body, PASTE(name, idx), 0) F(body, PASTE(name, idx), 1) F(body, PASTE(name, idx), 2) \ + F(body, PASTE(name, idx), 3) F(body, PASTE(name, idx), 4) F(body, PASTE(name, idx), 5) \ + F(body, PASTE(name, idx), 6) F(body, PASTE(name, idx), 7) F(body, PASTE(name, idx), 8) \ + F(body, PASTE(name, idx), 9) + +#define F100(body, name, idx) \ + F10(body, PASTE(name, idx), 0) F10(body, PASTE(name, idx), 1) F10(body, PASTE(name, idx), 2) \ + F10(body, PASTE(name, idx), 3) F10(body, PASTE(name, idx), 4) F10(body, PASTE(name, idx), 5) \ + F10(body, PASTE(name, idx), 6) F10(body, PASTE(name, idx), 7) F10(body, PASTE(name, idx), 8) \ + F10(body, PASTE(name, idx), 9) + +#define F1000(body, name, idx) \ + F100(body, PASTE(name, idx), 0) F100(body, PASTE(name, idx), 1) F100(body, PASTE(name, idx), 2) \ + F100(body, PASTE(name, idx), 3) F100(body, PASTE(name, idx), 4) F100(body, PASTE(name, idx), 5) \ + F100(body, PASTE(name, idx), 6) F100(body, PASTE(name, idx), 7) F100(body, PASTE(name, idx), 8) \ + F100(body, PASTE(name, idx), 9) + +#define F10000(body, name, idx) \ + F1000(body, PASTE(name, idx), 0) F1000(body, PASTE(name, idx), 1) F1000(body, PASTE(name, idx), 2) \ + F1000(body, PASTE(name, idx), 3) F1000(body, PASTE(name, idx), 4) F1000(body, PASTE(name, idx), 5) \ + F1000(body, PASTE(name, idx), 6) F1000(body, PASTE(name, idx), 7) F1000(body, PASTE(name, idx), 8) \ + F1000(body, PASTE(name, idx), 9) + +F10000(DEF, uprobe_multi_func_, 0) +F10000(DEF, uprobe_multi_func_, 1) +F10000(DEF, uprobe_multi_func_, 2) +F10000(DEF, uprobe_multi_func_, 3) +F10000(DEF, uprobe_multi_func_, 4) + +static int bench(void) +{ + F10000(CALL, uprobe_multi_func_, 0) + F10000(CALL, uprobe_multi_func_, 1) + F10000(CALL, uprobe_multi_func_, 2) + F10000(CALL, uprobe_multi_func_, 3) + F10000(CALL, uprobe_multi_func_, 4) + return 0; +} + +#define PROBE STAP_PROBE(test, usdt); + +#define PROBE10 PROBE PROBE PROBE PROBE PROBE \ + PROBE PROBE PROBE PROBE PROBE +#define PROBE100 PROBE10 PROBE10 PROBE10 PROBE10 PROBE10 \ + PROBE10 PROBE10 PROBE10 PROBE10 PROBE10 +#define PROBE1000 PROBE100 PROBE100 PROBE100 PROBE100 PROBE100 \ + PROBE100 PROBE100 PROBE100 PROBE100 PROBE100 +#define PROBE10000 PROBE1000 PROBE1000 PROBE1000 PROBE1000 PROBE1000 \ + PROBE1000 PROBE1000 PROBE1000 PROBE1000 PROBE1000 + +static int usdt(void) +{ + PROBE10000 + PROBE10000 + PROBE10000 + PROBE10000 + PROBE10000 + return 0; +} + +int main(int argc, char **argv) +{ + if (argc != 2) + goto error; + + if (!strcmp("bench", argv[1])) + return bench(); + if (!strcmp("usdt", argv[1])) + return usdt(); + +error: + fprintf(stderr, "usage: %s <bench|usdt>\n", argv[0]); + return -1; +} |