diff --git a/.clippy.toml b/.clippy.toml new file mode 100644 index 0000000000000..e4c4eef10b28c --- /dev/null +++ b/.clippy.toml @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 + +check-private-items = true + +disallowed-macros = [ + # The `clippy::dbg_macro` lint only works with `std::dbg!`, thus we simulate + # it here, see: https://github.com/rust-lang/rust-clippy/issues/11303. + { path = "kernel::dbg", reason = "the `dbg!` macro is intended as a debugging tool" }, +] diff --git a/.gitignore b/.gitignore index 56972adb5031a..a61e4778d011c 100644 --- a/.gitignore +++ b/.gitignore @@ -103,6 +103,7 @@ modules.order # We don't want to ignore the following even if they are dot-files # !.clang-format +!.clippy.toml !.cocciconfig !.editorconfig !.get_maintainer.ignore diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index d401577b5a6ac..607a8937f1754 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -3028,6 +3028,8 @@ * max_sec_lba48: Set or clear transfer size limit to 65535 sectors. + * external: Mark port as external (hotplug-capable). + * [no]lpm: Enable or disable link power management. * [no]setxfer: Indicate if transfer speed mode setting diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst index f8bc1630eba05..fa21cdd610b21 100644 --- a/Documentation/admin-guide/sysctl/kernel.rst +++ b/Documentation/admin-guide/sysctl/kernel.rst @@ -212,6 +212,17 @@ pid>/``). This value defaults to 0. +core_sort_vma +============= + +The default coredump writes VMAs in address order. By setting +``core_sort_vma`` to 1, VMAs will be written from smallest size +to largest size. This is known to break at least elfutils, but +can be handy when dealing with very large (and truncated) +coredumps where the more useful debugging details are included +in the smaller VMAs. + + core_uses_pid ============= diff --git a/Documentation/arch/arm64/booting.rst b/Documentation/arch/arm64/booting.rst index b57776a68f156..15bcd1b4003a7 100644 --- a/Documentation/arch/arm64/booting.rst +++ b/Documentation/arch/arm64/booting.rst @@ -285,6 +285,12 @@ Before jumping into the kernel, the following conditions must be met: - SCR_EL3.FGTEn (bit 27) must be initialised to 0b1. + For CPUs with the Fine Grained Traps 2 (FEAT_FGT2) extension present: + + - If EL3 is present and the kernel is entered at EL2: + + - SCR_EL3.FGTEn2 (bit 59) must be initialised to 0b1. + For CPUs with support for HCRX_EL2 (FEAT_HCX) present: - If EL3 is present and the kernel is entered at EL2: @@ -379,6 +385,22 @@ Before jumping into the kernel, the following conditions must be met: - SMCR_EL2.EZT0 (bit 30) must be initialised to 0b1. + For CPUs with the Performance Monitors Extension (FEAT_PMUv3p9): + + - If EL3 is present: + + - MDCR_EL3.EnPM2 (bit 7) must be initialised to 0b1. + + - If the kernel is entered at EL1 and EL2 is present: + + - HDFGRTR2_EL2.nPMICNTR_EL0 (bit 2) must be initialised to 0b1. + - HDFGRTR2_EL2.nPMICFILTR_EL0 (bit 3) must be initialised to 0b1. + - HDFGRTR2_EL2.nPMUACR_EL1 (bit 4) must be initialised to 0b1. + + - HDFGWTR2_EL2.nPMICNTR_EL0 (bit 2) must be initialised to 0b1. + - HDFGWTR2_EL2.nPMICFILTR_EL0 (bit 3) must be initialised to 0b1. + - HDFGWTR2_EL2.nPMUACR_EL1 (bit 4) must be initialised to 0b1. + For CPUs with Memory Copy and Memory Set instructions (FEAT_MOPS): - If the kernel is entered at EL1 and EL2 is present: diff --git a/Documentation/devicetree/bindings/arm/qcom,coresight-tpda.yaml b/Documentation/devicetree/bindings/arm/qcom,coresight-tpda.yaml index 76163abed655a..5ed40f21b8eb5 100644 --- a/Documentation/devicetree/bindings/arm/qcom,coresight-tpda.yaml +++ b/Documentation/devicetree/bindings/arm/qcom,coresight-tpda.yaml @@ -55,8 +55,7 @@ properties: - const: arm,primecell reg: - minItems: 1 - maxItems: 2 + maxItems: 1 clocks: maxItems: 1 diff --git a/Documentation/devicetree/bindings/arm/qcom,coresight-tpdm.yaml b/Documentation/devicetree/bindings/arm/qcom,coresight-tpdm.yaml index 8eec07d9d4542..07d21a3617f5b 100644 --- a/Documentation/devicetree/bindings/arm/qcom,coresight-tpdm.yaml +++ b/Documentation/devicetree/bindings/arm/qcom,coresight-tpdm.yaml @@ -41,8 +41,7 @@ properties: - const: arm,primecell reg: - minItems: 1 - maxItems: 2 + maxItems: 1 qcom,dsb-element-bits: description: diff --git a/Documentation/devicetree/bindings/media/i2c/st,st-mipid02.yaml b/Documentation/devicetree/bindings/media/i2c/st,st-mipid02.yaml index b68141264c0e9..4d40e75b4e1ef 100644 --- a/Documentation/devicetree/bindings/media/i2c/st,st-mipid02.yaml +++ b/Documentation/devicetree/bindings/media/i2c/st,st-mipid02.yaml @@ -71,7 +71,7 @@ properties: description: Any lane can be inverted or not. minItems: 1 - maxItems: 2 + maxItems: 3 required: - data-lanes diff --git a/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml b/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml index 7c5ac5d2e880b..f6884f6e59e74 100644 --- a/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml +++ b/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml @@ -170,7 +170,7 @@ allOf: const: renesas,r8a779h0-canfd then: patternProperties: - "^channel[5-7]$": false + "^channel[4-7]$": false else: if: not: diff --git a/Documentation/devicetree/bindings/soc/fsl/fsl,ls1028a-reset.yaml b/Documentation/devicetree/bindings/soc/fsl/fsl,ls1028a-reset.yaml index 31295be910130..234089b5954dd 100644 --- a/Documentation/devicetree/bindings/soc/fsl/fsl,ls1028a-reset.yaml +++ b/Documentation/devicetree/bindings/soc/fsl/fsl,ls1028a-reset.yaml @@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: Freescale Layerscape Reset Registers Module maintainers: - - Frank Li + - Frank Li description: Reset Module includes chip reset, service processor control and Reset Control diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml index fbfce9b4ae6b8..71a1a399e1e1f 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.yaml +++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml @@ -581,6 +581,8 @@ patternProperties: description: GlobalTop Technology, Inc. "^gmt,.*": description: Global Mixed-mode Technology, Inc. + "^gocontroll,.*": + description: GOcontroll Modular Embedded Electronics B.V. "^goldelico,.*": description: Golden Delicious Computers GmbH & Co. KG "^goodix,.*": diff --git a/Documentation/netlink/specs/ovs_vport.yaml b/Documentation/netlink/specs/ovs_vport.yaml index 86ba9ac2a5210..b538bb99ee9b5 100644 --- a/Documentation/netlink/specs/ovs_vport.yaml +++ b/Documentation/netlink/specs/ovs_vport.yaml @@ -123,12 +123,12 @@ attribute-sets: operations: name-prefix: ovs-vport-cmd- + fixed-header: ovs-header list: - name: new doc: Create a new OVS vport attribute-set: vport - fixed-header: ovs-header do: request: attributes: @@ -141,7 +141,6 @@ operations: name: del doc: Delete existing OVS vport from a data path attribute-set: vport - fixed-header: ovs-header do: request: attributes: @@ -152,7 +151,6 @@ operations: name: get doc: Get / dump OVS vport configuration and state attribute-set: vport - fixed-header: ovs-header do: &vport-get-op request: attributes: diff --git a/Documentation/netlink/specs/rt_link.yaml b/Documentation/netlink/specs/rt_link.yaml index 0c4d5d40cae90..a048fc30389d6 100644 --- a/Documentation/netlink/specs/rt_link.yaml +++ b/Documentation/netlink/specs/rt_link.yaml @@ -1094,11 +1094,10 @@ attribute-sets: - name: prop-list type: nest - nested-attributes: link-attrs + nested-attributes: prop-list-link-attrs - name: alt-ifname type: string - multi-attr: true - name: perm-address type: binary @@ -1137,6 +1136,13 @@ attribute-sets: name: dpll-pin type: nest nested-attributes: link-dpll-pin-attrs + - + name: prop-list-link-attrs + subset-of: link-attrs + attributes: + - + name: alt-ifname + multi-attr: true - name: af-spec-attrs attributes: @@ -2071,9 +2077,10 @@ attribute-sets: type: u32 - name: mctp-attrs + name-prefix: ifla-mctp- attributes: - - name: mctp-net + name: net type: u32 - name: stats-attrs @@ -2319,7 +2326,6 @@ operations: - min-mtu - max-mtu - prop-list - - alt-ifname - perm-address - proto-down-reason - parent-dev-name diff --git a/Documentation/networking/strparser.rst b/Documentation/networking/strparser.rst index 6cab1f74ae05a..7f623d1db72aa 100644 --- a/Documentation/networking/strparser.rst +++ b/Documentation/networking/strparser.rst @@ -112,7 +112,7 @@ Functions Callbacks ========= -There are six callbacks: +There are seven callbacks: :: @@ -182,6 +182,13 @@ There are six callbacks: the length of the message. skb->len - offset may be greater then full_len since strparser does not trim the skb. + :: + + int (*read_sock)(struct strparser *strp, read_descriptor_t *desc, + sk_read_actor_t recv_actor); + + The read_sock callback is used by strparser instead of + sock->ops->read_sock, if provided. :: int (*read_sock_done)(struct strparser *strp, int err); diff --git a/Documentation/rust/coding-guidelines.rst b/Documentation/rust/coding-guidelines.rst index 329b070a1d473..a2e326b42410f 100644 --- a/Documentation/rust/coding-guidelines.rst +++ b/Documentation/rust/coding-guidelines.rst @@ -227,3 +227,149 @@ The equivalent in Rust may look like (ignoring documentation): That is, the equivalent of ``GPIO_LINE_DIRECTION_IN`` would be referred to as ``gpio::LineDirection::In``. In particular, it should not be named ``gpio::gpio_line_direction::GPIO_LINE_DIRECTION_IN``. + + +Lints +----- + +In Rust, it is possible to ``allow`` particular warnings (diagnostics, lints) +locally, making the compiler ignore instances of a given warning within a given +function, module, block, etc. + +It is similar to ``#pragma GCC diagnostic push`` + ``ignored`` + ``pop`` in C +[#]_: + +.. code-block:: c + + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wunused-function" + static void f(void) {} + #pragma GCC diagnostic pop + +.. [#] In this particular case, the kernel's ``__{always,maybe}_unused`` + attributes (C23's ``[[maybe_unused]]``) may be used; however, the example + is meant to reflect the equivalent lint in Rust discussed afterwards. + +But way less verbose: + +.. code-block:: rust + + #[allow(dead_code)] + fn f() {} + +By that virtue, it makes it possible to comfortably enable more diagnostics by +default (i.e. outside ``W=`` levels). In particular, those that may have some +false positives but that are otherwise quite useful to keep enabled to catch +potential mistakes. + +On top of that, Rust provides the ``expect`` attribute which takes this further. +It makes the compiler warn if the warning was not produced. For instance, the +following will ensure that, when ``f()`` is called somewhere, we will have to +remove the attribute: + +.. code-block:: rust + + #[expect(dead_code)] + fn f() {} + +If we do not, we get a warning from the compiler:: + + warning: this lint expectation is unfulfilled + --> x.rs:3:10 + | + 3 | #[expect(dead_code)] + | ^^^^^^^^^ + | + = note: `#[warn(unfulfilled_lint_expectations)]` on by default + +This means that ``expect``\ s do not get forgotten when they are not needed, which +may happen in several situations, e.g.: + +- Temporary attributes added while developing. + +- Improvements in lints in the compiler, Clippy or custom tools which may + remove a false positive. + +- When the lint is not needed anymore because it was expected that it would be + removed at some point, such as the ``dead_code`` example above. + +It also increases the visibility of the remaining ``allow``\ s and reduces the +chance of misapplying one. + +Thus prefer ``expect`` over ``allow`` unless: + +- Conditional compilation triggers the warning in some cases but not others. + + If there are only a few cases where the warning triggers (or does not + trigger) compared to the total number of cases, then one may consider using + a conditional ``expect`` (i.e. ``cfg_attr(..., expect(...))``). Otherwise, + it is likely simpler to just use ``allow``. + +- Inside macros, when the different invocations may create expanded code that + triggers the warning in some cases but not in others. + +- When code may trigger a warning for some architectures but not others, such + as an ``as`` cast to a C FFI type. + +As a more developed example, consider for instance this program: + +.. code-block:: rust + + fn g() {} + + fn main() { + #[cfg(CONFIG_X)] + g(); + } + +Here, function ``g()`` is dead code if ``CONFIG_X`` is not set. Can we use +``expect`` here? + +.. code-block:: rust + + #[expect(dead_code)] + fn g() {} + + fn main() { + #[cfg(CONFIG_X)] + g(); + } + +This would emit a lint if ``CONFIG_X`` is set, since it is not dead code in that +configuration. Therefore, in cases like this, we cannot use ``expect`` as-is. + +A simple possibility is using ``allow``: + +.. code-block:: rust + + #[allow(dead_code)] + fn g() {} + + fn main() { + #[cfg(CONFIG_X)] + g(); + } + +An alternative would be using a conditional ``expect``: + +.. code-block:: rust + + #[cfg_attr(not(CONFIG_X), expect(dead_code))] + fn g() {} + + fn main() { + #[cfg(CONFIG_X)] + g(); + } + +This would ensure that, if someone introduces another call to ``g()`` somewhere +(e.g. unconditionally), then it would be spotted that it is not dead code +anymore. However, the ``cfg_attr`` is more complex than a simple ``allow``. + +Therefore, it is likely that it is not worth using conditional ``expect``\ s when +more than one or two configurations are involved or when the lint may be +triggered due to non-local changes (such as ``dead_code``). + +For more information about diagnostics in Rust, please see: + + https://doc.rust-lang.org/stable/reference/attributes/diagnostics.html diff --git a/Documentation/rust/quick-start.rst b/Documentation/rust/quick-start.rst index 2d107982c87bb..ded0d0836aee0 100644 --- a/Documentation/rust/quick-start.rst +++ b/Documentation/rust/quick-start.rst @@ -128,7 +128,7 @@ Rust standard library source **************************** The Rust standard library source is required because the build system will -cross-compile ``core`` and ``alloc``. +cross-compile ``core``. If ``rustup`` is being used, run:: diff --git a/Documentation/wmi/devices/msi-wmi-platform.rst b/Documentation/wmi/devices/msi-wmi-platform.rst index 31a1369428923..73197b31926a5 100644 --- a/Documentation/wmi/devices/msi-wmi-platform.rst +++ b/Documentation/wmi/devices/msi-wmi-platform.rst @@ -138,6 +138,10 @@ input data, the meaning of which depends on the subfeature being accessed. The output buffer contains a single byte which signals success or failure (``0x00`` on failure) and 31 bytes of output data, the meaning if which depends on the subfeature being accessed. +.. note:: + The ACPI control method responsible for handling the WMI method calls is not thread-safe. + This is a firmware bug that needs to be handled inside the driver itself. + WMI method Get_EC() ------------------- diff --git a/MAINTAINERS b/MAINTAINERS index 888727d778f75..d9b6927a23d64 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -20181,6 +20181,7 @@ B: https://github.com/Rust-for-Linux/linux/issues C: zulip://rust-for-linux.zulipchat.com P: https://rust-for-linux.com/contributing T: git https://github.com/Rust-for-Linux/linux.git rust-next +F: .clippy.toml F: Documentation/rust/ F: rust/ F: samples/rust/ @@ -20188,6 +20189,13 @@ F: scripts/*rust* F: tools/testing/selftests/rust/ K: \b(?i:rust)\b +RUST [ALLOC] +M: Danilo Krummrich +L: rust-for-linux@vger.kernel.org +S: Maintained +F: rust/kernel/alloc.rs +F: rust/kernel/alloc/ + RXRPC SOCKETS (AF_RXRPC) M: David Howells M: Marc Dionne diff --git a/Makefile b/Makefile index 340da922fa4f2..93f4ba25a4533 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 6 PATCHLEVEL = 12 -SUBLEVEL = 16 +SUBLEVEL = 25 EXTRAVERSION = NAME = Baby Opossum Posse @@ -446,19 +446,22 @@ KBUILD_USERLDFLAGS := $(USERLDFLAGS) export rust_common_flags := --edition=2021 \ -Zbinary_dep_depinfo=y \ -Astable_features \ - -Dunsafe_op_in_unsafe_fn \ -Dnon_ascii_idents \ + -Dunsafe_op_in_unsafe_fn \ + -Wmissing_docs \ -Wrust_2018_idioms \ -Wunreachable_pub \ - -Wmissing_docs \ - -Wrustdoc::missing_crate_level_docs \ -Wclippy::all \ + -Wclippy::ignored_unit_patterns \ -Wclippy::mut_mut \ -Wclippy::needless_bitwise_bool \ - -Wclippy::needless_continue \ -Aclippy::needless_lifetimes \ -Wclippy::no_mangle_with_rust_abi \ - -Wclippy::dbg_macro + -Wclippy::undocumented_unsafe_blocks \ + -Wclippy::unnecessary_safety_comment \ + -Wclippy::unnecessary_safety_doc \ + -Wrustdoc::missing_crate_level_docs \ + -Wrustdoc::unescaped_backticks KBUILD_HOSTCFLAGS := $(KBUILD_USERHOSTCFLAGS) $(HOST_LFS_CFLAGS) \ $(HOSTCFLAGS) -I $(srctree)/scripts/include @@ -583,6 +586,9 @@ endif # Allows the usage of unstable features in stable compilers. export RUSTC_BOOTSTRAP := 1 +# Allows finding `.clippy.toml` in out-of-srctree builds. +export CLIPPY_CONF_DIR := $(srctree) + export ARCH SRCARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE LD CC HOSTPKG_CONFIG export RUSTC RUSTDOC RUSTFMT RUSTC_OR_CLIPPY_QUIET RUSTC_OR_CLIPPY BINDGEN export HOSTRUSTC KBUILD_HOSTRUSTFLAGS @@ -1006,6 +1012,12 @@ ifdef CONFIG_CC_IS_GCC KBUILD_CFLAGS += -fconserve-stack endif +# Ensure compilers do not transform certain loops into calls to wcslen() +KBUILD_CFLAGS += -fno-builtin-wcslen + +# Ensure compilers do not transform certain loops into calls to wcslen() +KBUILD_CFLAGS += -fno-builtin-wcslen + # change __FILE__ to the relative path from the srctree KBUILD_CPPFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=) @@ -1060,6 +1072,11 @@ endif KBUILD_USERCFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS)) KBUILD_USERLDFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS)) +# userspace programs are linked via the compiler, use the correct linker +ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_LD_IS_LLD),yy) +KBUILD_USERLDFLAGS += --ld-path=$(LD) +endif + # make the checker run with the right architecture CHECKFLAGS += --arch=$(ARCH) diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h index 4d7c46f50382e..50c82187e60ec 100644 --- a/arch/alpha/include/asm/elf.h +++ b/arch/alpha/include/asm/elf.h @@ -74,7 +74,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; /* * This is used to ensure we don't load something for the wrong architecture. */ -#define elf_check_arch(x) ((x)->e_machine == EM_ALPHA) +#define elf_check_arch(x) (((x)->e_machine == EM_ALPHA) && !((x)->e_flags & EF_ALPHA_32BIT)) /* * These are used to set parameters in the core dumps. @@ -137,10 +137,6 @@ extern int dump_elf_task(elf_greg_t *dest, struct task_struct *task); : amask (AMASK_CIX) ? "ev6" : "ev67"); \ }) -#define SET_PERSONALITY(EX) \ - set_personality(((EX).e_flags & EF_ALPHA_32BIT) \ - ? PER_LINUX_32BIT : PER_LINUX) - extern int alpha_l1i_cacheshape; extern int alpha_l1d_cacheshape; extern int alpha_l2_cacheshape; diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h index 635f0a5f5bbde..02e8817a89212 100644 --- a/arch/alpha/include/asm/pgtable.h +++ b/arch/alpha/include/asm/pgtable.h @@ -360,7 +360,7 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte) extern void paging_init(void); -/* We have our own get_unmapped_area to cope with ADDR_LIMIT_32BIT. */ +/* We have our own get_unmapped_area */ #define HAVE_ARCH_UNMAPPED_AREA #endif /* _ALPHA_PGTABLE_H */ diff --git a/arch/alpha/include/asm/processor.h b/arch/alpha/include/asm/processor.h index 55bb1c09fd39d..5dce5518a2111 100644 --- a/arch/alpha/include/asm/processor.h +++ b/arch/alpha/include/asm/processor.h @@ -8,23 +8,19 @@ #ifndef __ASM_ALPHA_PROCESSOR_H #define __ASM_ALPHA_PROCESSOR_H -#include /* for ADDR_LIMIT_32BIT */ - /* * We have a 42-bit user address space: 4TB user VM... */ #define TASK_SIZE (0x40000000000UL) -#define STACK_TOP \ - (current->personality & ADDR_LIMIT_32BIT ? 0x80000000 : 0x00120000000UL) +#define STACK_TOP (0x00120000000UL) #define STACK_TOP_MAX 0x00120000000UL /* This decides where the kernel will search for a free chunk of vm * space during mmap's. */ -#define TASK_UNMAPPED_BASE \ - ((current->personality & ADDR_LIMIT_32BIT) ? 0x40000000 : TASK_SIZE / 2) +#define TASK_UNMAPPED_BASE (TASK_SIZE / 2) /* This is dead. Everything has been moved to thread_info. */ struct thread_struct { }; diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index c0424de9e7cda..077a1407be6d7 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -1211,8 +1211,7 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p) return ret; } -/* Get an address range which is currently unmapped. Similar to the - generic version except that we know how to honor ADDR_LIMIT_32BIT. */ +/* Get an address range which is currently unmapped. */ static unsigned long arch_get_unmapped_area_1(unsigned long addr, unsigned long len, @@ -1231,13 +1230,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags) { - unsigned long limit; - - /* "32 bit" actually means 31 bit, since pointers sign extend. */ - if (current->personality & ADDR_LIMIT_32BIT) - limit = 0x80000000; - else - limit = TASK_SIZE; + unsigned long limit = TASK_SIZE; if (len > limit) return -ENOMEM; diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index d70e3c69f5ef7..344c6db6db213 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -119,7 +119,7 @@ config ARM select HAVE_KERNEL_XZ select HAVE_KPROBES if !XIP_KERNEL && !CPU_ENDIAN_BE32 && !CPU_V7M select HAVE_KRETPROBES if HAVE_KPROBES - select HAVE_LD_DEAD_CODE_DATA_ELIMINATION if (LD_VERSION >= 23600 || LD_IS_LLD) + select HAVE_LD_DEAD_CODE_DATA_ELIMINATION if (LD_VERSION >= 23600 || LD_CAN_USE_KEEP_IN_OVERLAY) select HAVE_MOD_ARCH_SPECIFIC select HAVE_NMI select HAVE_OPTPROBES if !THUMB2_KERNEL diff --git a/arch/arm/boot/dts/broadcom/bcm2711-rpi.dtsi b/arch/arm/boot/dts/broadcom/bcm2711-rpi.dtsi index 6bf4241fe3b73..c78ed064d1667 100644 --- a/arch/arm/boot/dts/broadcom/bcm2711-rpi.dtsi +++ b/arch/arm/boot/dts/broadcom/bcm2711-rpi.dtsi @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include "bcm2835-rpi.dtsi" -#include #include / { @@ -101,7 +100,3 @@ &vchiq { interrupts = ; }; - -&xhci { - power-domains = <&power RPI_POWER_DOMAIN_USB>; -}; diff --git a/arch/arm/boot/dts/broadcom/bcm2711.dtsi b/arch/arm/boot/dts/broadcom/bcm2711.dtsi index e4e42af21ef3a..c06d9f5e53c80 100644 --- a/arch/arm/boot/dts/broadcom/bcm2711.dtsi +++ b/arch/arm/boot/dts/broadcom/bcm2711.dtsi @@ -134,7 +134,7 @@ clocks = <&clocks BCM2835_CLOCK_UART>, <&clocks BCM2835_CLOCK_VPU>; clock-names = "uartclk", "apb_pclk"; - arm,primecell-periphid = <0x00241011>; + arm,primecell-periphid = <0x00341011>; status = "disabled"; }; @@ -145,7 +145,7 @@ clocks = <&clocks BCM2835_CLOCK_UART>, <&clocks BCM2835_CLOCK_VPU>; clock-names = "uartclk", "apb_pclk"; - arm,primecell-periphid = <0x00241011>; + arm,primecell-periphid = <0x00341011>; status = "disabled"; }; @@ -156,7 +156,7 @@ clocks = <&clocks BCM2835_CLOCK_UART>, <&clocks BCM2835_CLOCK_VPU>; clock-names = "uartclk", "apb_pclk"; - arm,primecell-periphid = <0x00241011>; + arm,primecell-periphid = <0x00341011>; status = "disabled"; }; @@ -167,7 +167,7 @@ clocks = <&clocks BCM2835_CLOCK_UART>, <&clocks BCM2835_CLOCK_VPU>; clock-names = "uartclk", "apb_pclk"; - arm,primecell-periphid = <0x00241011>; + arm,primecell-periphid = <0x00341011>; status = "disabled"; }; @@ -451,8 +451,6 @@ IRQ_TYPE_LEVEL_LOW)>, ; - /* This only applies to the ARMv7 stub */ - arm,cpu-registers-not-fw-configured; }; cpus: cpus { @@ -610,6 +608,7 @@ #address-cells = <1>; #size-cells = <0>; interrupts = ; + power-domains = <&pm BCM2835_POWER_DOMAIN_USB>; /* DWC2 and this IP block share the same USB PHY, * enabling both at the same time results in lockups. * So keep this node disabled and let the bootloader @@ -1177,6 +1176,7 @@ }; &uart0 { + arm,primecell-periphid = <0x00341011>; interrupts = ; }; diff --git a/arch/arm/boot/dts/broadcom/bcm4709-asus-rt-ac3200.dts b/arch/arm/boot/dts/broadcom/bcm4709-asus-rt-ac3200.dts index 53cb0c58f6d05..3da2daee0c849 100644 --- a/arch/arm/boot/dts/broadcom/bcm4709-asus-rt-ac3200.dts +++ b/arch/arm/boot/dts/broadcom/bcm4709-asus-rt-ac3200.dts @@ -124,19 +124,19 @@ }; port@1 { - label = "lan1"; + label = "lan4"; }; port@2 { - label = "lan2"; + label = "lan3"; }; port@3 { - label = "lan3"; + label = "lan2"; }; port@4 { - label = "lan4"; + label = "lan1"; }; }; }; diff --git a/arch/arm/boot/dts/broadcom/bcm47094-asus-rt-ac5300.dts b/arch/arm/boot/dts/broadcom/bcm47094-asus-rt-ac5300.dts index 6c666dc7ad23e..01ec8c03686a6 100644 --- a/arch/arm/boot/dts/broadcom/bcm47094-asus-rt-ac5300.dts +++ b/arch/arm/boot/dts/broadcom/bcm47094-asus-rt-ac5300.dts @@ -126,11 +126,11 @@ ports { port@0 { - label = "lan4"; + label = "wan"; }; port@1 { - label = "lan3"; + label = "lan1"; }; port@2 { @@ -138,11 +138,11 @@ }; port@3 { - label = "lan1"; + label = "lan3"; }; port@4 { - label = "wan"; + label = "lan4"; }; }; }; diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi index edf55760a5c1a..1a63a6add4398 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi @@ -108,6 +108,11 @@ }; }; + poweroff { + compatible = "regulator-poweroff"; + cpu-supply = <&vgen2_reg>; + }; + reg_module_3v3: regulator-module-3v3 { compatible = "regulator-fixed"; regulator-always-on; @@ -236,10 +241,6 @@ status = "disabled"; }; -&clks { - fsl,pmic-stby-poweroff; -}; - /* Apalis SPI1 */ &ecspi1 { cs-gpios = <&gpio5 25 GPIO_ACTIVE_LOW>; @@ -527,7 +528,6 @@ pmic: pmic@8 { compatible = "fsl,pfuze100"; - fsl,pmic-stby-poweroff; reg = <0x08>; regulators { diff --git a/arch/arm/include/asm/vmlinux.lds.h b/arch/arm/include/asm/vmlinux.lds.h index d60f6e83a9f70..14811b4f48ec8 100644 --- a/arch/arm/include/asm/vmlinux.lds.h +++ b/arch/arm/include/asm/vmlinux.lds.h @@ -34,6 +34,12 @@ #define NOCROSSREFS #endif +#ifdef CONFIG_LD_CAN_USE_KEEP_IN_OVERLAY +#define OVERLAY_KEEP(x) KEEP(x) +#else +#define OVERLAY_KEEP(x) x +#endif + /* Set start/end symbol names to the LMA for the section */ #define ARM_LMA(sym, section) \ sym##_start = LOADADDR(section); \ @@ -125,13 +131,13 @@ __vectors_lma = .; \ OVERLAY 0xffff0000 : NOCROSSREFS AT(__vectors_lma) { \ .vectors { \ - *(.vectors) \ + OVERLAY_KEEP(*(.vectors)) \ } \ .vectors.bhb.loop8 { \ - *(.vectors.bhb.loop8) \ + OVERLAY_KEEP(*(.vectors.bhb.loop8)) \ } \ .vectors.bhb.bpiall { \ - *(.vectors.bhb.bpiall) \ + OVERLAY_KEEP(*(.vectors.bhb.bpiall)) \ } \ } \ ARM_LMA(__vectors, .vectors); \ diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig index 2a8a9fe46586d..3fa15f3422409 100644 --- a/arch/arm/mach-davinci/Kconfig +++ b/arch/arm/mach-davinci/Kconfig @@ -27,6 +27,7 @@ config ARCH_DAVINCI_DA830 config ARCH_DAVINCI_DA850 bool "DA850/OMAP-L138/AM18x based system" + select ARCH_DAVINCI_DA8XX select DAVINCI_CP_INTC config ARCH_DAVINCI_DA8XX diff --git a/arch/arm/mach-omap1/Kconfig b/arch/arm/mach-omap1/Kconfig index a643b71e30a35..08ec6bd84ada5 100644 --- a/arch/arm/mach-omap1/Kconfig +++ b/arch/arm/mach-omap1/Kconfig @@ -8,6 +8,7 @@ menuconfig ARCH_OMAP1 select ARCH_OMAP select CLKSRC_MMIO select FORCE_PCI if PCCARD + select GENERIC_IRQ_CHIP select GPIOLIB help Support for older TI OMAP1 (omap7xx, omap15xx or omap16xx) diff --git a/arch/arm/mach-shmobile/headsmp.S b/arch/arm/mach-shmobile/headsmp.S index a956b489b6ea1..2bc7e73a8582d 100644 --- a/arch/arm/mach-shmobile/headsmp.S +++ b/arch/arm/mach-shmobile/headsmp.S @@ -136,6 +136,7 @@ ENDPROC(shmobile_smp_sleep) .long shmobile_smp_arg - 1b .bss + .align 2 .globl shmobile_smp_mpidr shmobile_smp_mpidr: .space NR_CPUS * 4 diff --git a/arch/arm64/boot/dts/broadcom/bcm2712.dtsi b/arch/arm64/boot/dts/broadcom/bcm2712.dtsi index 26a29e5e5078d..447bfa060918c 100644 --- a/arch/arm64/boot/dts/broadcom/bcm2712.dtsi +++ b/arch/arm64/boot/dts/broadcom/bcm2712.dtsi @@ -232,7 +232,7 @@ interrupts = ; clocks = <&clk_uart>, <&clk_vpu>; clock-names = "uartclk", "apb_pclk"; - arm,primecell-periphid = <0x00241011>; + arm,primecell-periphid = <0x00341011>; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/exynos/google/gs101.dtsi b/arch/arm64/boot/dts/exynos/google/gs101.dtsi index 302c5beb224aa..b8f8255f840b1 100644 --- a/arch/arm64/boot/dts/exynos/google/gs101.dtsi +++ b/arch/arm64/boot/dts/exynos/google/gs101.dtsi @@ -1451,6 +1451,7 @@ /* TODO: update once support for this CMU exists */ clocks = <0>; clock-names = "pclk"; + status = "disabled"; }; cmu_top: clock-controller@1e080000 { diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi index ce20de2598054..3d0b149681310 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi @@ -16,10 +16,10 @@ "Headphone Jack", "HPOUTR", "IN2L", "Line In Jack", "IN2R", "Line In Jack", - "Headphone Jack", "MICBIAS", - "IN1L", "Headphone Jack"; + "Microphone Jack", "MICBIAS", + "IN1L", "Microphone Jack"; simple-audio-card,widgets = - "Microphone", "Headphone Jack", + "Microphone", "Microphone Jack", "Headphone", "Headphone Jack", "Line", "Line In Jack"; diff --git a/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql.dtsi index 336785a9fba89..3ddc5aaa7c5f0 100644 --- a/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql.dtsi @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-or-later OR MIT /* - * Copyright 2021-2022 TQ-Systems GmbH - * Author: Alexander Stein + * Copyright 2021-2025 TQ-Systems GmbH , + * D-82229 Seefeld, Germany. + * Author: Alexander Stein */ #include "imx8mp.dtsi" @@ -23,15 +24,6 @@ regulator-max-microvolt = <3300000>; regulator-always-on; }; - - /* e-MMC IO, needed for HS modes */ - reg_vcc1v8: regulator-vcc1v8 { - compatible = "regulator-fixed"; - regulator-name = "VCC1V8"; - regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <1800000>; - regulator-always-on; - }; }; &A53_0 { @@ -197,7 +189,7 @@ no-sd; no-sdio; vmmc-supply = <®_vcc3v3>; - vqmmc-supply = <®_vcc1v8>; + vqmmc-supply = <&buck5_reg>; status = "okay"; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mp-verdin-dahlia.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-verdin-dahlia.dtsi index da8902c5f7e5b..1493319aa748d 100644 --- a/arch/arm64/boot/dts/freescale/imx8mp-verdin-dahlia.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mp-verdin-dahlia.dtsi @@ -28,10 +28,10 @@ "Headphone Jack", "HPOUTR", "IN2L", "Line In Jack", "IN2R", "Line In Jack", - "Headphone Jack", "MICBIAS", - "IN1L", "Headphone Jack"; + "Microphone Jack", "MICBIAS", + "IN1L", "Microphone Jack"; simple-audio-card,widgets = - "Microphone", "Headphone Jack", + "Microphone", "Microphone Jack", "Headphone", "Headphone Jack", "Line", "Line In Jack"; diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi index 3458be7f7f611..f49ec74959060 100644 --- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi @@ -1255,8 +1255,7 @@ }; pwm0: pwm@1401e000 { - compatible = "mediatek,mt8173-disp-pwm", - "mediatek,mt6595-disp-pwm"; + compatible = "mediatek,mt8173-disp-pwm"; reg = <0 0x1401e000 0 0x1000>; #pwm-cells = <2>; clocks = <&mmsys CLK_MM_DISP_PWM026M>, @@ -1266,8 +1265,7 @@ }; pwm1: pwm@1401f000 { - compatible = "mediatek,mt8173-disp-pwm", - "mediatek,mt6595-disp-pwm"; + compatible = "mediatek,mt8173-disp-pwm"; reg = <0 0x1401f000 0 0x1000>; #pwm-cells = <2>; clocks = <&mmsys CLK_MM_DISP_PWM126M>, diff --git a/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts b/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts index 1aa668c3ccf92..dbdee604edab4 100644 --- a/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts +++ b/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts @@ -63,6 +63,18 @@ pulldown-ohm = <0>; io-channels = <&auxadc 0>; }; + + connector { + compatible = "hdmi-connector"; + label = "hdmi"; + type = "d"; + + port { + hdmi_connector_in: endpoint { + remote-endpoint = <&hdmi_connector_out>; + }; + }; + }; }; &auxadc { @@ -120,6 +132,43 @@ pinctrl-0 = <&i2c6_pins>; status = "okay"; clock-frequency = <100000>; + #address-cells = <1>; + #size-cells = <0>; + + it66121hdmitx: hdmitx@4c { + compatible = "ite,it66121"; + reg = <0x4c>; + pinctrl-names = "default"; + pinctrl-0 = <&ite_pins>; + reset-gpios = <&pio 160 GPIO_ACTIVE_LOW>; + interrupt-parent = <&pio>; + interrupts = <4 IRQ_TYPE_LEVEL_LOW>; + vcn33-supply = <&mt6358_vcn33_reg>; + vcn18-supply = <&mt6358_vcn18_reg>; + vrf12-supply = <&mt6358_vrf12_reg>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + + it66121_in: endpoint { + bus-width = <12>; + remote-endpoint = <&dpi_out>; + }; + }; + + port@1 { + reg = <1>; + + hdmi_connector_out: endpoint { + remote-endpoint = <&hdmi_connector_in>; + }; + }; + }; + }; }; &keyboard { @@ -362,6 +411,67 @@ input-enable; }; }; + + ite_pins: ite-pins { + pins-irq { + pinmux = ; + input-enable; + bias-pull-up; + }; + + pins-rst { + pinmux = ; + output-high; + }; + }; + + dpi_func_pins: dpi-func-pins { + pins-dpi { + pinmux = , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + }; + }; + + dpi_idle_pins: dpi-idle-pins { + pins-idle { + pinmux = , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + }; + }; }; &pmic { @@ -412,6 +522,15 @@ status = "okay"; }; -&dsi0 { - status = "disabled"; +&dpi0 { + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&dpi_func_pins>; + pinctrl-1 = <&dpi_idle_pins>; + status = "okay"; + + port { + dpi_out: endpoint { + remote-endpoint = <&it66121_in>; + }; + }; }; diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi index 5cb6bd3c5acbb..92c41463d10e3 100644 --- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi @@ -1835,6 +1835,7 @@ resets = <&mmsys MT8183_MMSYS_SW0_RST_B_DISP_DSI0>; phys = <&mipi_tx0>; phy-names = "dphy"; + status = "disabled"; }; dpi0: dpi@14015000 { diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767.dtsi b/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767.dtsi index 19340d13f789f..41821354bbdae 100644 --- a/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767.dtsi @@ -227,13 +227,6 @@ wakeup-event-action = ; wakeup-source; }; - - key-suspend { - label = "Suspend"; - gpios = <&gpio TEGRA234_MAIN_GPIO(G, 2) GPIO_ACTIVE_LOW>; - linux,input-type = ; - linux,code = ; - }; }; fan: pwm-fan { diff --git a/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts b/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts index ae398acdcf45e..3d5e81a0afdc5 100644 --- a/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts +++ b/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts @@ -194,6 +194,13 @@ <3 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>; }; }; + + uart { + uart5_rts_pin: uart5-rts-pin { + rockchip,pins = + <0 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; }; &pwm0 { @@ -222,11 +229,15 @@ }; &uart0 { + pinctrl-names = "default"; + pinctrl-0 = <&uart0_xfer>; status = "okay"; }; &uart5 { - pinctrl-0 = <&uart5_xfer>; + /* Add pinmux for rts-gpios (uart5_rts_pin) */ + pinctrl-names = "default"; + pinctrl-0 = <&uart5_xfer &uart5_rts_pin>; rts-gpios = <&gpio0 RK_PB5 GPIO_ACTIVE_HIGH>; status = "okay"; }; diff --git a/arch/arm64/boot/dts/rockchip/px30-ringneck.dtsi b/arch/arm64/boot/dts/rockchip/px30-ringneck.dtsi index b7163ed74232d..f743aaf78359d 100644 --- a/arch/arm64/boot/dts/rockchip/px30-ringneck.dtsi +++ b/arch/arm64/boot/dts/rockchip/px30-ringneck.dtsi @@ -373,6 +373,12 @@ status = "okay"; }; +&uart5 { + /delete-property/ dmas; + /delete-property/ dma-names; + pinctrl-0 = <&uart5_xfer>; +}; + /* Mule UCAN */ &usb_host0_ehci { status = "okay"; diff --git a/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus-lts.dts b/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus-lts.dts index 4237f2ee8fee3..f57d4acd9807c 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus-lts.dts +++ b/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus-lts.dts @@ -15,9 +15,11 @@ }; &gmac2io { + /delete-property/ tx_delay; + /delete-property/ rx_delay; + phy-handle = <&yt8531c>; - tx_delay = <0x19>; - rx_delay = <0x05>; + phy-mode = "rgmii-id"; mdio { /delete-node/ ethernet-phy@1; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-nanopi-r4s.dts b/arch/arm64/boot/dts/rockchip/rk3399-nanopi-r4s.dts index fe5b526100107..6a6b36c36ce21 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-nanopi-r4s.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-nanopi-r4s.dts @@ -117,7 +117,7 @@ }; &u2phy1_host { - status = "disabled"; + phy-supply = <&vdd_5v>; }; &uart0 { diff --git a/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts b/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts index 9a2f59a351dee..48ccdd6b47118 100644 --- a/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts +++ b/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts @@ -512,7 +512,6 @@ &sdmmc0 { max-frequency = <150000000>; - supports-sd; bus-width = <4>; cap-mmc-highspeed; cap-sd-highspeed; diff --git a/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi index fc67585b64b7b..83e7e0fbe7839 100644 --- a/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi @@ -549,10 +549,10 @@ mmu600_pcie: iommu@fc900000 { compatible = "arm,smmu-v3"; reg = <0x0 0xfc900000 0x0 0x200000>; - interrupts = , - , - , - ; + interrupts = , + , + , + ; interrupt-names = "eventq", "gerror", "priq", "cmdq-sync"; #iommu-cells = <1>; status = "disabled"; @@ -561,10 +561,10 @@ mmu600_php: iommu@fcb00000 { compatible = "arm,smmu-v3"; reg = <0x0 0xfcb00000 0x0 0x200000>; - interrupts = , - , - , - ; + interrupts = , + , + , + ; interrupt-names = "eventq", "gerror", "priq", "cmdq-sync"; #iommu-cells = <1>; status = "disabled"; @@ -2626,9 +2626,9 @@ rockchip,hw-tshut-temp = <120000>; rockchip,hw-tshut-mode = <0>; /* tshut mode 0:CRU 1:GPIO */ rockchip,hw-tshut-polarity = <0>; /* tshut polarity 0:LOW 1:HIGH */ - pinctrl-0 = <&tsadc_gpio_func>; - pinctrl-1 = <&tsadc_shut>; - pinctrl-names = "gpio", "otpout"; + pinctrl-0 = <&tsadc_shut_org>; + pinctrl-1 = <&tsadc_gpio_func>; + pinctrl-names = "default", "sleep"; #thermal-sensor-cells = <1>; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-genbook.dts b/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-genbook.dts index 6418286efe40d..762d36ad733ab 100644 --- a/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-genbook.dts +++ b/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-genbook.dts @@ -101,7 +101,7 @@ compatible = "regulator-fixed"; regulator-name = "vcc3v3_lcd"; enable-active-high; - gpio = <&gpio1 RK_PC4 GPIO_ACTIVE_HIGH>; + gpio = <&gpio0 RK_PC4 GPIO_ACTIVE_HIGH>; pinctrl-names = "default"; pinctrl-0 = <&lcdpwr_en>; vin-supply = <&vcc3v3_sys>; @@ -207,7 +207,7 @@ &pinctrl { lcd { lcdpwr_en: lcdpwr-en { - rockchip,pins = <1 RK_PC4 RK_FUNC_GPIO &pcfg_pull_down>; + rockchip,pins = <0 RK_PC4 RK_FUNC_GPIO &pcfg_pull_down>; }; bl_en: bl-en { diff --git a/arch/arm64/boot/dts/rockchip/rk3588-jaguar.dts b/arch/arm64/boot/dts/rockchip/rk3588-jaguar.dts index 31d2f8994f851..e61c5731fb99f 100644 --- a/arch/arm64/boot/dts/rockchip/rk3588-jaguar.dts +++ b/arch/arm64/boot/dts/rockchip/rk3588-jaguar.dts @@ -455,7 +455,6 @@ non-removable; pinctrl-names = "default"; pinctrl-0 = <&emmc_bus8 &emmc_cmd &emmc_clk &emmc_data_strobe>; - supports-cqe; vmmc-supply = <&vcc_3v3_s3>; vqmmc-supply = <&vcc_1v8_s3>; status = "okay"; diff --git a/arch/arm64/boot/dts/rockchip/rk3588-tiger.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-tiger.dtsi index 615094bb8ba38..a82fe75bda55c 100644 --- a/arch/arm64/boot/dts/rockchip/rk3588-tiger.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3588-tiger.dtsi @@ -367,7 +367,6 @@ non-removable; pinctrl-names = "default"; pinctrl-0 = <&emmc_bus8 &emmc_cmd &emmc_clk &emmc_data_strobe>; - supports-cqe; vmmc-supply = <&vcc_3v3_s3>; vqmmc-supply = <&vcc_1v8_s3>; status = "okay"; diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 488f8e7513495..2a4e686e633c6 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -75,6 +75,7 @@ #define ARM_CPU_PART_CORTEX_A76 0xD0B #define ARM_CPU_PART_NEOVERSE_N1 0xD0C #define ARM_CPU_PART_CORTEX_A77 0xD0D +#define ARM_CPU_PART_CORTEX_A76AE 0xD0E #define ARM_CPU_PART_NEOVERSE_V1 0xD40 #define ARM_CPU_PART_CORTEX_A78 0xD41 #define ARM_CPU_PART_CORTEX_A78AE 0xD42 @@ -119,6 +120,7 @@ #define QCOM_CPU_PART_KRYO 0x200 #define QCOM_CPU_PART_KRYO_2XX_GOLD 0x800 #define QCOM_CPU_PART_KRYO_2XX_SILVER 0x801 +#define QCOM_CPU_PART_KRYO_3XX_GOLD 0x802 #define QCOM_CPU_PART_KRYO_3XX_SILVER 0x803 #define QCOM_CPU_PART_KRYO_4XX_GOLD 0x804 #define QCOM_CPU_PART_KRYO_4XX_SILVER 0x805 @@ -158,6 +160,7 @@ #define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76) #define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1) #define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77) +#define MIDR_CORTEX_A76AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76AE) #define MIDR_NEOVERSE_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V1) #define MIDR_CORTEX_A78 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78) #define MIDR_CORTEX_A78AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE) @@ -195,6 +198,7 @@ #define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO) #define MIDR_QCOM_KRYO_2XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_2XX_GOLD) #define MIDR_QCOM_KRYO_2XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_2XX_SILVER) +#define MIDR_QCOM_KRYO_3XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_GOLD) #define MIDR_QCOM_KRYO_3XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_SILVER) #define MIDR_QCOM_KRYO_4XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_GOLD) #define MIDR_QCOM_KRYO_4XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_SILVER) diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index e0ffdf13a18b3..bdbe9e08664a6 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -215,6 +215,30 @@ .Lskip_fgt_\@: .endm +.macro __init_el2_fgt2 + mrs x1, id_aa64mmfr0_el1 + ubfx x1, x1, #ID_AA64MMFR0_EL1_FGT_SHIFT, #4 + cmp x1, #ID_AA64MMFR0_EL1_FGT_FGT2 + b.lt .Lskip_fgt2_\@ + + mov x0, xzr + mrs x1, id_aa64dfr0_el1 + ubfx x1, x1, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4 + cmp x1, #ID_AA64DFR0_EL1_PMUVer_V3P9 + b.lt .Lskip_pmuv3p9_\@ + + orr x0, x0, #HDFGRTR2_EL2_nPMICNTR_EL0 + orr x0, x0, #HDFGRTR2_EL2_nPMICFILTR_EL0 + orr x0, x0, #HDFGRTR2_EL2_nPMUACR_EL1 +.Lskip_pmuv3p9_\@: + msr_s SYS_HDFGRTR2_EL2, x0 + msr_s SYS_HDFGWTR2_EL2, x0 + msr_s SYS_HFGRTR2_EL2, xzr + msr_s SYS_HFGWTR2_EL2, xzr + msr_s SYS_HFGITR2_EL2, xzr +.Lskip_fgt2_\@: +.endm + .macro __init_el2_nvhe_prepare_eret mov x0, #INIT_PSTATE_EL1 msr spsr_el2, x0 @@ -240,6 +264,7 @@ __init_el2_nvhe_idregs __init_el2_cptr __init_el2_fgt + __init_el2_fgt2 .endm #ifndef __KVM_NVHE_HYPERVISOR__ diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h index 293f880865e8d..f0304273eb351 100644 --- a/arch/arm64/include/asm/hugetlb.h +++ b/arch/arm64/include/asm/hugetlb.h @@ -34,8 +34,8 @@ extern int huge_ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t pte, int dirty); #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR -extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm, - unsigned long addr, pte_t *ptep); +extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned long sz); #define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT extern void huge_ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep); diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index c315bc1a4e9ad..122a1e12582c0 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -602,23 +602,13 @@ struct kvm_host_data { struct kvm_cpu_context host_ctxt; /* - * All pointers in this union are hyp VA. + * Hyp VA. * sve_state is only used in pKVM and if system_supports_sve(). */ - union { - struct user_fpsimd_state *fpsimd_state; - struct cpu_sve_state *sve_state; - }; - - union { - /* HYP VA pointer to the host storage for FPMR */ - u64 *fpmr_ptr; - /* - * Used by pKVM only, as it needs to provide storage - * for the host - */ - u64 fpmr; - }; + struct cpu_sve_state *sve_state; + + /* Used by pKVM only. */ + u64 fpmr; /* Ownership of the FP regs */ enum { @@ -697,7 +687,6 @@ struct kvm_vcpu_arch { u64 hcr_el2; u64 hcrx_el2; u64 mdcr_el2; - u64 cptr_el2; /* Exception Information */ struct kvm_vcpu_fault_info fault; @@ -902,10 +891,6 @@ struct kvm_vcpu_arch { /* Save TRBE context if active */ #define DEBUG_STATE_SAVE_TRBE __vcpu_single_flag(iflags, BIT(6)) -/* SVE enabled for host EL0 */ -#define HOST_SVE_ENABLED __vcpu_single_flag(sflags, BIT(0)) -/* SME enabled for EL0 */ -#define HOST_SME_ENABLED __vcpu_single_flag(sflags, BIT(1)) /* Physical CPU not in supported_cpus */ #define ON_UNSUPPORTED_CPU __vcpu_single_flag(sflags, BIT(2)) /* WFIT instruction trapped */ @@ -1243,7 +1228,7 @@ int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu, extern unsigned int __ro_after_init kvm_arm_vmid_bits; int __init kvm_arm_vmid_alloc_init(void); void __init kvm_arm_vmid_alloc_free(void); -bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid); +void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid); void kvm_arm_vmid_clear_active(void); static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch) diff --git a/arch/arm64/include/asm/mman.h b/arch/arm64/include/asm/mman.h index 798d965760d43..5a280ac7570cd 100644 --- a/arch/arm64/include/asm/mman.h +++ b/arch/arm64/include/asm/mman.h @@ -41,9 +41,12 @@ static inline unsigned long arch_calc_vm_flag_bits(struct file *file, * backed by tags-capable memory. The vm_flags may be overridden by a * filesystem supporting MTE (RAM-based). */ - if (system_supports_mte() && - ((flags & MAP_ANONYMOUS) || shmem_file(file))) - return VM_MTE_ALLOWED; + if (system_supports_mte()) { + if ((flags & MAP_ANONYMOUS) && !(flags & MAP_HUGETLB)) + return VM_MTE_ALLOWED; + if (shmem_file(file)) + return VM_MTE_ALLOWED; + } return 0; } diff --git a/arch/arm64/include/asm/spectre.h b/arch/arm64/include/asm/spectre.h index 0c4d9045c31f4..f1524cdeacf1c 100644 --- a/arch/arm64/include/asm/spectre.h +++ b/arch/arm64/include/asm/spectre.h @@ -97,7 +97,6 @@ enum mitigation_state arm64_get_meltdown_state(void); enum mitigation_state arm64_get_spectre_bhb_state(void); bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope); -u8 spectre_bhb_loop_affected(int scope); void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused); bool try_emulate_el1_ssbs(struct pt_regs *regs, u32 instr); diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 95fbc8c056079..9edbd871c31bf 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -396,33 +396,35 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) #define __flush_tlb_range_op(op, start, pages, stride, \ asid, tlb_level, tlbi_user, lpa2) \ do { \ + typeof(start) __flush_start = start; \ + typeof(pages) __flush_pages = pages; \ int num = 0; \ int scale = 3; \ int shift = lpa2 ? 16 : PAGE_SHIFT; \ unsigned long addr; \ \ - while (pages > 0) { \ + while (__flush_pages > 0) { \ if (!system_supports_tlb_range() || \ - pages == 1 || \ - (lpa2 && start != ALIGN(start, SZ_64K))) { \ - addr = __TLBI_VADDR(start, asid); \ + __flush_pages == 1 || \ + (lpa2 && __flush_start != ALIGN(__flush_start, SZ_64K))) { \ + addr = __TLBI_VADDR(__flush_start, asid); \ __tlbi_level(op, addr, tlb_level); \ if (tlbi_user) \ __tlbi_user_level(op, addr, tlb_level); \ - start += stride; \ - pages -= stride >> PAGE_SHIFT; \ + __flush_start += stride; \ + __flush_pages -= stride >> PAGE_SHIFT; \ continue; \ } \ \ - num = __TLBI_RANGE_NUM(pages, scale); \ + num = __TLBI_RANGE_NUM(__flush_pages, scale); \ if (num >= 0) { \ - addr = __TLBI_VADDR_RANGE(start >> shift, asid, \ + addr = __TLBI_VADDR_RANGE(__flush_start >> shift, asid, \ scale, num, tlb_level); \ __tlbi(r##op, addr); \ if (tlbi_user) \ __tlbi_user(r##op, addr); \ - start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \ - pages -= __TLBI_RANGE_PAGES(num, scale); \ + __flush_start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \ + __flush_pages -= __TLBI_RANGE_PAGES(num, scale);\ } \ scale--; \ } \ diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h index d780d1bd2eacb..82cf1f879c61d 100644 --- a/arch/arm64/include/asm/traps.h +++ b/arch/arm64/include/asm/traps.h @@ -109,10 +109,9 @@ static inline void arm64_mops_reset_regs(struct user_pt_regs *regs, unsigned lon int dstreg = ESR_ELx_MOPS_ISS_DESTREG(esr); int srcreg = ESR_ELx_MOPS_ISS_SRCREG(esr); int sizereg = ESR_ELx_MOPS_ISS_SIZEREG(esr); - unsigned long dst, src, size; + unsigned long dst, size; dst = regs->regs[dstreg]; - src = regs->regs[srcreg]; size = regs->regs[sizereg]; /* @@ -129,6 +128,7 @@ static inline void arm64_mops_reset_regs(struct user_pt_regs *regs, unsigned lon } } else { /* CPY* instruction */ + unsigned long src = regs->regs[srcreg]; if (!(option_a ^ wrong_option)) { /* Format is from Option B */ if (regs->pstate & PSR_N_BIT) { diff --git a/arch/arm64/kernel/compat_alignment.c b/arch/arm64/kernel/compat_alignment.c index deff21bfa6800..b68e1d328d4cb 100644 --- a/arch/arm64/kernel/compat_alignment.c +++ b/arch/arm64/kernel/compat_alignment.c @@ -368,6 +368,8 @@ int do_compat_alignment_fixup(unsigned long addr, struct pt_regs *regs) return 1; } + if (!handler) + return 1; type = handler(addr, instr, regs); if (type == TYPE_ERROR || type == TYPE_FAULT) diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 6d21971ae5594..f38d22dac140f 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -1694,31 +1694,6 @@ void fpsimd_signal_preserve_current_state(void) sve_to_fpsimd(current); } -/* - * Called by KVM when entering the guest. - */ -void fpsimd_kvm_prepare(void) -{ - if (!system_supports_sve()) - return; - - /* - * KVM does not save host SVE state since we can only enter - * the guest from a syscall so the ABI means that only the - * non-saved SVE state needs to be saved. If we have left - * SVE enabled for performance reasons then update the task - * state to be FPSIMD only. - */ - get_cpu_fpsimd_context(); - - if (test_and_clear_thread_flag(TIF_SVE)) { - sve_to_fpsimd(current); - current->thread.fp_type = FP_STATE_FPSIMD; - } - - put_cpu_fpsimd_context(); -} - /* * Associate current's FPSIMD context with this cpu * The caller must have ownership of the cpu FPSIMD context before calling diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c index da53722f95d41..0f51fd10b4b06 100644 --- a/arch/arm64/kernel/proton-pack.c +++ b/arch/arm64/kernel/proton-pack.c @@ -845,52 +845,86 @@ static unsigned long system_bhb_mitigations; * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any * SCOPE_SYSTEM call will give the right answer. */ -u8 spectre_bhb_loop_affected(int scope) +static bool is_spectre_bhb_safe(int scope) +{ + static const struct midr_range spectre_bhb_safe_list[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A510), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A520), + MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53), + MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER), + MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER), + MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER), + {}, + }; + static bool all_safe = true; + + if (scope != SCOPE_LOCAL_CPU) + return all_safe; + + if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_safe_list)) + return true; + + all_safe = false; + + return false; +} + +static u8 spectre_bhb_loop_affected(void) { u8 k = 0; - static u8 max_bhb_k; - - if (scope == SCOPE_LOCAL_CPU) { - static const struct midr_range spectre_bhb_k32_list[] = { - MIDR_ALL_VERSIONS(MIDR_CORTEX_A78), - MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE), - MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C), - MIDR_ALL_VERSIONS(MIDR_CORTEX_X1), - MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), - MIDR_ALL_VERSIONS(MIDR_CORTEX_X2), - MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), - MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1), - {}, - }; - static const struct midr_range spectre_bhb_k24_list[] = { - MIDR_ALL_VERSIONS(MIDR_CORTEX_A76), - MIDR_ALL_VERSIONS(MIDR_CORTEX_A77), - MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1), - {}, - }; - static const struct midr_range spectre_bhb_k11_list[] = { - MIDR_ALL_VERSIONS(MIDR_AMPERE1), - {}, - }; - static const struct midr_range spectre_bhb_k8_list[] = { - MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), - MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), - {}, - }; - - if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list)) - k = 32; - else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list)) - k = 24; - else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list)) - k = 11; - else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list)) - k = 8; - - max_bhb_k = max(max_bhb_k, k); - } else { - k = max_bhb_k; - } + + static const struct midr_range spectre_bhb_k132_list[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_X3), + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2), + }; + static const struct midr_range spectre_bhb_k38_list[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A715), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A720), + }; + static const struct midr_range spectre_bhb_k32_list[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A78), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C), + MIDR_ALL_VERSIONS(MIDR_CORTEX_X1), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), + MIDR_ALL_VERSIONS(MIDR_CORTEX_X2), + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1), + {}, + }; + static const struct midr_range spectre_bhb_k24_list[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A76), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A76AE), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A77), + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1), + MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_GOLD), + {}, + }; + static const struct midr_range spectre_bhb_k11_list[] = { + MIDR_ALL_VERSIONS(MIDR_AMPERE1), + {}, + }; + static const struct midr_range spectre_bhb_k8_list[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), + {}, + }; + + if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k132_list)) + k = 132; + else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k38_list)) + k = 38; + else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list)) + k = 32; + else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list)) + k = 24; + else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list)) + k = 11; + else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list)) + k = 8; return k; } @@ -916,29 +950,13 @@ static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void) } } -static bool is_spectre_bhb_fw_affected(int scope) +static bool has_spectre_bhb_fw_mitigation(void) { - static bool system_affected; enum mitigation_state fw_state; bool has_smccc = arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE; - static const struct midr_range spectre_bhb_firmware_mitigated_list[] = { - MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), - MIDR_ALL_VERSIONS(MIDR_CORTEX_A75), - {}, - }; - bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(), - spectre_bhb_firmware_mitigated_list); - - if (scope != SCOPE_LOCAL_CPU) - return system_affected; fw_state = spectre_bhb_get_cpu_fw_mitigation_state(); - if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) { - system_affected = true; - return true; - } - - return false; + return has_smccc && fw_state == SPECTRE_MITIGATED; } static bool supports_ecbhb(int scope) @@ -954,6 +972,8 @@ static bool supports_ecbhb(int scope) ID_AA64MMFR1_EL1_ECBHB_SHIFT); } +static u8 max_bhb_k; + bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope) { @@ -962,16 +982,18 @@ bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, if (supports_csv2p3(scope)) return false; - if (supports_clearbhb(scope)) - return true; - - if (spectre_bhb_loop_affected(scope)) - return true; + if (is_spectre_bhb_safe(scope)) + return false; - if (is_spectre_bhb_fw_affected(scope)) - return true; + /* + * At this point the core isn't known to be "safe" so we're going to + * assume it's vulnerable. We still need to update `max_bhb_k` though, + * but only if we aren't mitigating with clearbhb though. + */ + if (scope == SCOPE_LOCAL_CPU && !supports_clearbhb(SCOPE_LOCAL_CPU)) + max_bhb_k = max(max_bhb_k, spectre_bhb_loop_affected()); - return false; + return true; } static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot) @@ -1002,7 +1024,7 @@ early_param("nospectre_bhb", parse_spectre_bhb_param); void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry) { bp_hardening_cb_t cpu_cb; - enum mitigation_state fw_state, state = SPECTRE_VULNERABLE; + enum mitigation_state state = SPECTRE_VULNERABLE; struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data); if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU)) @@ -1028,7 +1050,7 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry) this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN); state = SPECTRE_MITIGATED; set_bit(BHB_INSN, &system_bhb_mitigations); - } else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) { + } else if (spectre_bhb_loop_affected()) { /* * Ensure KVM uses the indirect vector which will have the * branchy-loop added. A57/A72-r0 will already have selected @@ -1041,32 +1063,29 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry) this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP); state = SPECTRE_MITIGATED; set_bit(BHB_LOOP, &system_bhb_mitigations); - } else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) { - fw_state = spectre_bhb_get_cpu_fw_mitigation_state(); - if (fw_state == SPECTRE_MITIGATED) { - /* - * Ensure KVM uses one of the spectre bp_hardening - * vectors. The indirect vector doesn't include the EL3 - * call, so needs upgrading to - * HYP_VECTOR_SPECTRE_INDIRECT. - */ - if (!data->slot || data->slot == HYP_VECTOR_INDIRECT) - data->slot += 1; - - this_cpu_set_vectors(EL1_VECTOR_BHB_FW); - - /* - * The WA3 call in the vectors supersedes the WA1 call - * made during context-switch. Uninstall any firmware - * bp_hardening callback. - */ - cpu_cb = spectre_v2_get_sw_mitigation_cb(); - if (__this_cpu_read(bp_hardening_data.fn) != cpu_cb) - __this_cpu_write(bp_hardening_data.fn, NULL); - - state = SPECTRE_MITIGATED; - set_bit(BHB_FW, &system_bhb_mitigations); - } + } else if (has_spectre_bhb_fw_mitigation()) { + /* + * Ensure KVM uses one of the spectre bp_hardening + * vectors. The indirect vector doesn't include the EL3 + * call, so needs upgrading to + * HYP_VECTOR_SPECTRE_INDIRECT. + */ + if (!data->slot || data->slot == HYP_VECTOR_INDIRECT) + data->slot += 1; + + this_cpu_set_vectors(EL1_VECTOR_BHB_FW); + + /* + * The WA3 call in the vectors supersedes the WA1 call + * made during context-switch. Uninstall any firmware + * bp_hardening callback. + */ + cpu_cb = spectre_v2_get_sw_mitigation_cb(); + if (__this_cpu_read(bp_hardening_data.fn) != cpu_cb) + __this_cpu_write(bp_hardening_data.fn, NULL); + + state = SPECTRE_MITIGATED; + set_bit(BHB_FW, &system_bhb_mitigations); } update_mitigation_state(&spectre_bhb_state, state); @@ -1100,7 +1119,6 @@ void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt, { u8 rd; u32 insn; - u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM); BUG_ON(nr_inst != 1); /* MOV -> MOV */ @@ -1109,7 +1127,7 @@ void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt, insn = le32_to_cpu(*origptr); rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn); - insn = aarch64_insn_gen_movewide(rd, loop_count, 0, + insn = aarch64_insn_gen_movewide(rd, max_bhb_k, 0, AARCH64_INSN_VARIANT_64BIT, AARCH64_INSN_MOVEWIDE_ZERO); *updptr++ = cpu_to_le32(insn); diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index 1a2c72f3e7f80..cb180684d10d5 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c @@ -194,12 +194,19 @@ static void amu_fie_setup(const struct cpumask *cpus) int cpu; /* We are already set since the last insmod of cpufreq driver */ - if (unlikely(cpumask_subset(cpus, amu_fie_cpus))) + if (cpumask_available(amu_fie_cpus) && + unlikely(cpumask_subset(cpus, amu_fie_cpus))) return; - for_each_cpu(cpu, cpus) { + for_each_cpu(cpu, cpus) if (!freq_counters_valid(cpu)) return; + + if (!cpumask_available(amu_fie_cpus) && + !zalloc_cpumask_var(&amu_fie_cpus, GFP_KERNEL)) { + WARN_ONCE(1, "Failed to allocate FIE cpumask for CPUs[%*pbl]\n", + cpumask_pr_args(cpus)); + return; } cpumask_or(amu_fie_cpus, amu_fie_cpus, cpus); @@ -237,17 +244,8 @@ static struct notifier_block init_amu_fie_notifier = { static int __init init_amu_fie(void) { - int ret; - - if (!zalloc_cpumask_var(&amu_fie_cpus, GFP_KERNEL)) - return -ENOMEM; - - ret = cpufreq_register_notifier(&init_amu_fie_notifier, + return cpufreq_register_notifier(&init_amu_fie_notifier, CPUFREQ_POLICY_NOTIFIER); - if (ret) - free_cpumask_var(amu_fie_cpus); - - return ret; } core_initcall(init_amu_fie); diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 117702f033218..7d301da8ff289 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -493,7 +493,11 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) if (err) return err; - return kvm_share_hyp(vcpu, vcpu + 1); + err = kvm_share_hyp(vcpu, vcpu + 1); + if (err) + kvm_vgic_vcpu_destroy(vcpu); + + return err; } void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) @@ -580,6 +584,16 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) mmu = vcpu->arch.hw_mmu; last_ran = this_cpu_ptr(mmu->last_vcpu_ran); + /* + * Ensure a VMID is allocated for the MMU before programming VTTBR_EL2, + * which happens eagerly in VHE. + * + * Also, the VMID allocator only preserves VMIDs that are active at the + * time of rollover, so KVM might need to grab a new VMID for the MMU if + * this is called from kvm_sched_in(). + */ + kvm_arm_vmid_update(&mmu->vmid); + /* * We guarantee that both TLBs and I-cache are private to each * vcpu. If detecting that a vcpu from the same VM has @@ -1155,18 +1169,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) */ preempt_disable(); - /* - * The VMID allocator only tracks active VMIDs per - * physical CPU, and therefore the VMID allocated may not be - * preserved on VMID roll-over if the task was preempted, - * making a thread's VMID inactive. So we need to call - * kvm_arm_vmid_update() in non-premptible context. - */ - if (kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid) && - has_vhe()) - __load_stage2(vcpu->arch.hw_mmu, - vcpu->arch.hw_mmu->arch); - kvm_pmu_flush_hwstate(vcpu); local_irq_disable(); @@ -1579,7 +1581,6 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, } vcpu_reset_hcr(vcpu); - vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu); /* * Handle the "start in power-off" case. @@ -2479,14 +2480,6 @@ static void finalize_init_hyp_mode(void) per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state = kern_hyp_va(sve_state); } - } else { - for_each_possible_cpu(cpu) { - struct user_fpsimd_state *fpsimd_state; - - fpsimd_state = &per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->host_ctxt.fp_regs; - per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->fpsimd_state = - kern_hyp_va(fpsimd_state); - } } } diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index ea5484ce1f3ba..3cbb999419af7 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -54,43 +54,16 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) if (!system_supports_fpsimd()) return; - fpsimd_kvm_prepare(); - /* - * We will check TIF_FOREIGN_FPSTATE just before entering the - * guest in kvm_arch_vcpu_ctxflush_fp() and override this to - * FP_STATE_FREE if the flag set. + * Ensure that any host FPSIMD/SVE/SME state is saved and unbound such + * that the host kernel is responsible for restoring this state upon + * return to userspace, and the hyp code doesn't need to save anything. + * + * When the host may use SME, fpsimd_save_and_flush_cpu_state() ensures + * that PSTATE.{SM,ZA} == {0,0}. */ - *host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED; - *host_data_ptr(fpsimd_state) = kern_hyp_va(¤t->thread.uw.fpsimd_state); - *host_data_ptr(fpmr_ptr) = kern_hyp_va(¤t->thread.uw.fpmr); - - vcpu_clear_flag(vcpu, HOST_SVE_ENABLED); - if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN) - vcpu_set_flag(vcpu, HOST_SVE_ENABLED); - - if (system_supports_sme()) { - vcpu_clear_flag(vcpu, HOST_SME_ENABLED); - if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN) - vcpu_set_flag(vcpu, HOST_SME_ENABLED); - - /* - * If PSTATE.SM is enabled then save any pending FP - * state and disable PSTATE.SM. If we leave PSTATE.SM - * enabled and the guest does not enable SME via - * CPACR_EL1.SMEN then operations that should be valid - * may generate SME traps from EL1 to EL1 which we - * can't intercept and which would confuse the guest. - * - * Do the same for PSTATE.ZA in the case where there - * is state in the registers which has not already - * been saved, this is very unlikely to happen. - */ - if (read_sysreg_s(SYS_SVCR) & (SVCR_SM_MASK | SVCR_ZA_MASK)) { - *host_data_ptr(fp_owner) = FP_STATE_FREE; - fpsimd_save_and_flush_cpu_state(); - } - } + fpsimd_save_and_flush_cpu_state(); + *host_data_ptr(fp_owner) = FP_STATE_FREE; /* * If normal guests gain SME support, maintain this behavior for pKVM @@ -162,52 +135,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) local_irq_save(flags); - /* - * If we have VHE then the Hyp code will reset CPACR_EL1 to - * the default value and we need to reenable SME. - */ - if (has_vhe() && system_supports_sme()) { - /* Also restore EL0 state seen on entry */ - if (vcpu_get_flag(vcpu, HOST_SME_ENABLED)) - sysreg_clear_set(CPACR_EL1, 0, CPACR_ELx_SMEN); - else - sysreg_clear_set(CPACR_EL1, - CPACR_EL1_SMEN_EL0EN, - CPACR_EL1_SMEN_EL1EN); - isb(); - } - if (guest_owns_fp_regs()) { - if (vcpu_has_sve(vcpu)) { - u64 zcr = read_sysreg_el1(SYS_ZCR); - - /* - * If the vCPU is in the hyp context then ZCR_EL1 is - * loaded with its vEL2 counterpart. - */ - __vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)) = zcr; - - /* - * Restore the VL that was saved when bound to the CPU, - * which is the maximum VL for the guest. Because the - * layout of the data when saving the sve state depends - * on the VL, we need to use a consistent (i.e., the - * maximum) VL. - * Note that this means that at guest exit ZCR_EL1 is - * not necessarily the same as on guest entry. - * - * ZCR_EL2 holds the guest hypervisor's VL when running - * a nested guest, which could be smaller than the - * max for the vCPU. Similar to above, we first need to - * switch to a VL consistent with the layout of the - * vCPU's SVE state. KVM support for NV implies VHE, so - * using the ZCR_EL1 alias is safe. - */ - if (!has_vhe() || (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu))) - sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, - SYS_ZCR_EL1); - } - /* * Flush (save and invalidate) the fpsimd/sve state so that if * the host tries to use fpsimd/sve, it's not using stale data @@ -219,18 +147,6 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) * when needed. */ fpsimd_save_and_flush_cpu_state(); - } else if (has_vhe() && system_supports_sve()) { - /* - * The FPSIMD/SVE state in the CPU has not been touched, and we - * have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been - * reset by kvm_reset_cptr_el2() in the Hyp code, disabling SVE - * for EL0. To avoid spurious traps, restore the trap state - * seen by kvm_arch_vcpu_load_fp(): - */ - if (vcpu_get_flag(vcpu, HOST_SVE_ENABLED)) - sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_ZEN_EL0EN); - else - sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0); } local_irq_restore(flags); diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index 4433a234aa9ba..9f4e8d68ab505 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S @@ -44,6 +44,11 @@ alternative_if ARM64_HAS_RAS_EXTN alternative_else_nop_endif mrs x1, isr_el1 cbz x1, 1f + + // Ensure that __guest_enter() always provides a context + // synchronization event so that callers don't need ISBs for anything + // that would usually be synchonized by the ERET. + isb mov x0, #ARM_EXCEPTION_IRQ ret diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 5310fe1da6165..cc9cb63959463 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -295,7 +295,7 @@ static inline bool __populate_fault_info(struct kvm_vcpu *vcpu) return __get_fault_info(vcpu->arch.fault.esr_el2, &vcpu->arch.fault); } -static bool kvm_hyp_handle_mops(struct kvm_vcpu *vcpu, u64 *exit_code) +static inline bool kvm_hyp_handle_mops(struct kvm_vcpu *vcpu, u64 *exit_code) { *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR); arm64_mops_reset_regs(vcpu_gp_regs(vcpu), vcpu->arch.fault.esr_el2); @@ -344,7 +344,87 @@ static inline void __hyp_sve_save_host(void) true); } -static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu); +static inline void fpsimd_lazy_switch_to_guest(struct kvm_vcpu *vcpu) +{ + u64 zcr_el1, zcr_el2; + + if (!guest_owns_fp_regs()) + return; + + if (vcpu_has_sve(vcpu)) { + /* A guest hypervisor may restrict the effective max VL. */ + if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) + zcr_el2 = __vcpu_sys_reg(vcpu, ZCR_EL2); + else + zcr_el2 = vcpu_sve_max_vq(vcpu) - 1; + + write_sysreg_el2(zcr_el2, SYS_ZCR); + + zcr_el1 = __vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)); + write_sysreg_el1(zcr_el1, SYS_ZCR); + } +} + +static inline void fpsimd_lazy_switch_to_host(struct kvm_vcpu *vcpu) +{ + u64 zcr_el1, zcr_el2; + + if (!guest_owns_fp_regs()) + return; + + /* + * When the guest owns the FP regs, we know that guest+hyp traps for + * any FPSIMD/SVE/SME features exposed to the guest have been disabled + * by either fpsimd_lazy_switch_to_guest() or kvm_hyp_handle_fpsimd() + * prior to __guest_entry(). As __guest_entry() guarantees a context + * synchronization event, we don't need an ISB here to avoid taking + * traps for anything that was exposed to the guest. + */ + if (vcpu_has_sve(vcpu)) { + zcr_el1 = read_sysreg_el1(SYS_ZCR); + __vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)) = zcr_el1; + + /* + * The guest's state is always saved using the guest's max VL. + * Ensure that the host has the guest's max VL active such that + * the host can save the guest's state lazily, but don't + * artificially restrict the host to the guest's max VL. + */ + if (has_vhe()) { + zcr_el2 = vcpu_sve_max_vq(vcpu) - 1; + write_sysreg_el2(zcr_el2, SYS_ZCR); + } else { + zcr_el2 = sve_vq_from_vl(kvm_host_sve_max_vl) - 1; + write_sysreg_el2(zcr_el2, SYS_ZCR); + + zcr_el1 = vcpu_sve_max_vq(vcpu) - 1; + write_sysreg_el1(zcr_el1, SYS_ZCR); + } + } +} + +static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu) +{ + /* + * Non-protected kvm relies on the host restoring its sve state. + * Protected kvm restores the host's sve state as not to reveal that + * fpsimd was used by a guest nor leak upper sve bits. + */ + if (system_supports_sve()) { + __hyp_sve_save_host(); + + /* Re-enable SVE traps if not supported for the guest vcpu. */ + if (!vcpu_has_sve(vcpu)) + cpacr_clear_set(CPACR_ELx_ZEN, 0); + + } else { + __fpsimd_save_state(host_data_ptr(host_ctxt.fp_regs)); + } + + if (kvm_has_fpmr(kern_hyp_va(vcpu->kvm))) + *host_data_ptr(fpmr) = read_sysreg_s(SYS_FPMR); +} + /* * We trap the first access to the FP/SIMD to save the host context and @@ -352,7 +432,7 @@ static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu); * If FP/SIMD is not implemented, handle the trap and inject an undefined * instruction exception to the guest. Similarly for trapped SVE accesses. */ -static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code) +static inline bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code) { bool sve_guest; u8 esr_ec; @@ -394,7 +474,7 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code) isb(); /* Write out the host state if it's in the registers */ - if (host_owns_fp_regs()) + if (is_protected_kvm_enabled() && host_owns_fp_regs()) kvm_hyp_save_fpsimd_host(vcpu); /* Restore the guest state */ @@ -543,7 +623,7 @@ static bool handle_ampere1_tcr(struct kvm_vcpu *vcpu) return true; } -static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code) +static inline bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code) { if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) && handle_tx2_tvm(vcpu)) @@ -563,7 +643,7 @@ static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code) return false; } -static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code) +static inline bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code) { if (static_branch_unlikely(&vgic_v3_cpuif_trap) && __vgic_v3_perform_cpuif_access(vcpu) == 1) @@ -572,19 +652,18 @@ static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code) return false; } -static bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu, u64 *exit_code) +static inline bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu, + u64 *exit_code) { if (!__populate_fault_info(vcpu)) return true; return false; } -static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code) - __alias(kvm_hyp_handle_memory_fault); -static bool kvm_hyp_handle_watchpt_low(struct kvm_vcpu *vcpu, u64 *exit_code) - __alias(kvm_hyp_handle_memory_fault); +#define kvm_hyp_handle_iabt_low kvm_hyp_handle_memory_fault +#define kvm_hyp_handle_watchpt_low kvm_hyp_handle_memory_fault -static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code) +static inline bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code) { if (kvm_hyp_handle_memory_fault(vcpu, exit_code)) return true; @@ -614,23 +693,16 @@ static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code) typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *); -static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu); - -static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code); - /* * Allow the hypervisor to handle the exit with an exit handler if it has one. * * Returns true if the hypervisor handled the exit, and control should go back * to the guest, or false if it hasn't. */ -static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code) +static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code, + const exit_handler_fn *handlers) { - const exit_handler_fn *handlers = kvm_get_exit_handler_array(vcpu); - exit_handler_fn fn; - - fn = handlers[kvm_vcpu_trap_get_class(vcpu)]; - + exit_handler_fn fn = handlers[kvm_vcpu_trap_get_class(vcpu)]; if (fn) return fn(vcpu, exit_code); @@ -660,20 +732,9 @@ static inline void synchronize_vcpu_pstate(struct kvm_vcpu *vcpu, u64 *exit_code * the guest, false when we should restore the host state and return to the * main run loop. */ -static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) +static inline bool __fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code, + const exit_handler_fn *handlers) { - /* - * Save PSTATE early so that we can evaluate the vcpu mode - * early on. - */ - synchronize_vcpu_pstate(vcpu, exit_code); - - /* - * Check whether we want to repaint the state one way or - * another. - */ - early_exit_filter(vcpu, exit_code); - if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ) vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR); @@ -703,7 +764,7 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) goto exit; /* Check if there's an exit handler and allow it to handle the exit. */ - if (kvm_hyp_handle_exit(vcpu, exit_code)) + if (kvm_hyp_handle_exit(vcpu, exit_code, handlers)) goto guest; exit: /* Return to the host kernel and handle the exit */ diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c index fefc89209f9e4..75f7e386de75b 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c @@ -5,6 +5,7 @@ */ #include +#include #include #include @@ -83,7 +84,7 @@ static void fpsimd_sve_sync(struct kvm_vcpu *vcpu) if (system_supports_sve()) __hyp_sve_restore_host(); else - __fpsimd_restore_state(*host_data_ptr(fpsimd_state)); + __fpsimd_restore_state(host_data_ptr(host_ctxt.fp_regs)); if (has_fpmr) write_sysreg_s(*host_data_ptr(fpmr), SYS_FPMR); @@ -177,7 +178,9 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt) pkvm_put_hyp_vcpu(hyp_vcpu); } else { /* The host is fully trusted, run its vCPU directly. */ + fpsimd_lazy_switch_to_guest(host_vcpu); ret = __kvm_vcpu_run(host_vcpu); + fpsimd_lazy_switch_to_host(host_vcpu); } out: @@ -486,12 +489,6 @@ void handle_trap(struct kvm_cpu_context *host_ctxt) case ESR_ELx_EC_SMC64: handle_host_smc(host_ctxt); break; - case ESR_ELx_EC_SVE: - cpacr_clear_set(0, CPACR_ELx_ZEN); - isb(); - sve_cond_update_zcr_vq(sve_vq_from_vl(kvm_host_sve_max_vl) - 1, - SYS_ZCR_EL2); - break; case ESR_ELx_EC_IABT_LOW: case ESR_ELx_EC_DABT_LOW: handle_host_mem_abort(host_ctxt); diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c index 077d4098548d2..7c464340bcd07 100644 --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c @@ -28,8 +28,6 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu) const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR0_EL1); u64 hcr_set = HCR_RW; u64 hcr_clear = 0; - u64 cptr_set = 0; - u64 cptr_clear = 0; /* Protected KVM does not support AArch32 guests. */ BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), @@ -59,21 +57,10 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu) /* Trap AMU */ if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU), feature_ids)) { hcr_clear |= HCR_AMVOFFEN; - cptr_set |= CPTR_EL2_TAM; - } - - /* Trap SVE */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids)) { - if (has_hvhe()) - cptr_clear |= CPACR_ELx_ZEN; - else - cptr_set |= CPTR_EL2_TZ; } vcpu->arch.hcr_el2 |= hcr_set; vcpu->arch.hcr_el2 &= ~hcr_clear; - vcpu->arch.cptr_el2 |= cptr_set; - vcpu->arch.cptr_el2 &= ~cptr_clear; } /* @@ -103,7 +90,6 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu) const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64DFR0_EL1); u64 mdcr_set = 0; u64 mdcr_clear = 0; - u64 cptr_set = 0; /* Trap/constrain PMU */ if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), feature_ids)) { @@ -130,21 +116,12 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu) if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceFilt), feature_ids)) mdcr_set |= MDCR_EL2_TTRF; - /* Trap Trace */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids)) { - if (has_hvhe()) - cptr_set |= CPACR_EL1_TTA; - else - cptr_set |= CPTR_EL2_TTA; - } - /* Trap External Trace */ if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_ExtTrcBuff), feature_ids)) mdcr_clear |= MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT; vcpu->arch.mdcr_el2 |= mdcr_set; vcpu->arch.mdcr_el2 &= ~mdcr_clear; - vcpu->arch.cptr_el2 |= cptr_set; } /* @@ -195,10 +172,6 @@ static void pvm_init_trap_regs(struct kvm_vcpu *vcpu) /* Clear res0 and set res1 bits to trap potential new features. */ vcpu->arch.hcr_el2 &= ~(HCR_RES0); vcpu->arch.mdcr_el2 &= ~(MDCR_EL2_RES0); - if (!has_hvhe()) { - vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1; - vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0); - } } /* @@ -579,8 +552,6 @@ int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu, return ret; } - hyp_vcpu->vcpu.arch.cptr_el2 = kvm_get_reset_cptr_el2(&hyp_vcpu->vcpu); - return 0; } diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index cc69106734ca7..a1245fa838319 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -36,33 +36,71 @@ DEFINE_PER_CPU(unsigned long, kvm_hyp_vector); extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc); -static void __activate_traps(struct kvm_vcpu *vcpu) +static void __activate_cptr_traps(struct kvm_vcpu *vcpu) { - u64 val; + u64 val = CPTR_EL2_TAM; /* Same bit irrespective of E2H */ - ___activate_traps(vcpu, vcpu->arch.hcr_el2); - __activate_traps_common(vcpu); + if (!guest_owns_fp_regs()) + __activate_traps_fpsimd32(vcpu); - val = vcpu->arch.cptr_el2; - val |= CPTR_EL2_TAM; /* Same bit irrespective of E2H */ - val |= has_hvhe() ? CPACR_EL1_TTA : CPTR_EL2_TTA; - if (cpus_have_final_cap(ARM64_SME)) { - if (has_hvhe()) - val &= ~CPACR_ELx_SMEN; - else - val |= CPTR_EL2_TSM; + if (has_hvhe()) { + val |= CPACR_ELx_TTA; + + if (guest_owns_fp_regs()) { + val |= CPACR_ELx_FPEN; + if (vcpu_has_sve(vcpu)) + val |= CPACR_ELx_ZEN; + } + + write_sysreg(val, cpacr_el1); + } else { + val |= CPTR_EL2_TTA | CPTR_NVHE_EL2_RES1; + + /* + * Always trap SME since it's not supported in KVM. + * TSM is RES1 if SME isn't implemented. + */ + val |= CPTR_EL2_TSM; + + if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs()) + val |= CPTR_EL2_TZ; + + if (!guest_owns_fp_regs()) + val |= CPTR_EL2_TFP; + + write_sysreg(val, cptr_el2); } +} - if (!guest_owns_fp_regs()) { - if (has_hvhe()) - val &= ~(CPACR_ELx_FPEN | CPACR_ELx_ZEN); - else - val |= CPTR_EL2_TFP | CPTR_EL2_TZ; +static void __deactivate_cptr_traps(struct kvm_vcpu *vcpu) +{ + if (has_hvhe()) { + u64 val = CPACR_ELx_FPEN; - __activate_traps_fpsimd32(vcpu); + if (cpus_have_final_cap(ARM64_SVE)) + val |= CPACR_ELx_ZEN; + if (cpus_have_final_cap(ARM64_SME)) + val |= CPACR_ELx_SMEN; + + write_sysreg(val, cpacr_el1); + } else { + u64 val = CPTR_NVHE_EL2_RES1; + + if (!cpus_have_final_cap(ARM64_SVE)) + val |= CPTR_EL2_TZ; + if (!cpus_have_final_cap(ARM64_SME)) + val |= CPTR_EL2_TSM; + + write_sysreg(val, cptr_el2); } +} + +static void __activate_traps(struct kvm_vcpu *vcpu) +{ + ___activate_traps(vcpu, vcpu->arch.hcr_el2); + __activate_traps_common(vcpu); + __activate_cptr_traps(vcpu); - kvm_write_cptr_el2(val); write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2); if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { @@ -107,7 +145,7 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu) write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2); - kvm_reset_cptr_el2(vcpu); + __deactivate_cptr_traps(vcpu); write_sysreg(__kvm_hyp_host_vector, vbar_el2); } @@ -180,34 +218,6 @@ static bool kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu, u64 *exit_code) kvm_handle_pvm_sysreg(vcpu, exit_code)); } -static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu) -{ - /* - * Non-protected kvm relies on the host restoring its sve state. - * Protected kvm restores the host's sve state as not to reveal that - * fpsimd was used by a guest nor leak upper sve bits. - */ - if (unlikely(is_protected_kvm_enabled() && system_supports_sve())) { - __hyp_sve_save_host(); - - /* Re-enable SVE traps if not supported for the guest vcpu. */ - if (!vcpu_has_sve(vcpu)) - cpacr_clear_set(CPACR_ELx_ZEN, 0); - - } else { - __fpsimd_save_state(*host_data_ptr(fpsimd_state)); - } - - if (kvm_has_fpmr(kern_hyp_va(vcpu->kvm))) { - u64 val = read_sysreg_s(SYS_FPMR); - - if (unlikely(is_protected_kvm_enabled())) - *host_data_ptr(fpmr) = val; - else - **host_data_ptr(fpmr_ptr) = val; - } -} - static const exit_handler_fn hyp_exit_handlers[] = { [0 ... ESR_ELx_EC_MAX] = NULL, [ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32, @@ -239,19 +249,21 @@ static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu) return hyp_exit_handlers; } -/* - * Some guests (e.g., protected VMs) are not be allowed to run in AArch32. - * The ARMv8 architecture does not give the hypervisor a mechanism to prevent a - * guest from dropping to AArch32 EL0 if implemented by the CPU. If the - * hypervisor spots a guest in such a state ensure it is handled, and don't - * trust the host to spot or fix it. The check below is based on the one in - * kvm_arch_vcpu_ioctl_run(). - * - * Returns false if the guest ran in AArch32 when it shouldn't have, and - * thus should exit to the host, or true if a the guest run loop can continue. - */ -static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code) +static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) { + const exit_handler_fn *handlers = kvm_get_exit_handler_array(vcpu); + + synchronize_vcpu_pstate(vcpu, exit_code); + + /* + * Some guests (e.g., protected VMs) are not be allowed to run in + * AArch32. The ARMv8 architecture does not give the hypervisor a + * mechanism to prevent a guest from dropping to AArch32 EL0 if + * implemented by the CPU. If the hypervisor spots a guest in such a + * state ensure it is handled, and don't trust the host to spot or fix + * it. The check below is based on the one in + * kvm_arch_vcpu_ioctl_run(). + */ if (unlikely(vcpu_is_protected(vcpu) && vcpu_mode_is_32bit(vcpu))) { /* * As we have caught the guest red-handed, decide that it isn't @@ -264,6 +276,8 @@ static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code) *exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT); *exit_code |= ARM_EXCEPTION_IL; } + + return __fixup_guest_exit(vcpu, exit_code, handlers); } /* Switch to the guest for legacy non-VHE systems */ diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index 80581b1c39959..496abfd3646b9 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -309,14 +309,6 @@ static bool kvm_hyp_handle_eret(struct kvm_vcpu *vcpu, u64 *exit_code) return true; } -static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu) -{ - __fpsimd_save_state(*host_data_ptr(fpsimd_state)); - - if (kvm_has_fpmr(vcpu->kvm)) - **host_data_ptr(fpmr_ptr) = read_sysreg_s(SYS_FPMR); -} - static bool kvm_hyp_handle_tlbi_el2(struct kvm_vcpu *vcpu, u64 *exit_code) { int ret = -EINVAL; @@ -431,13 +423,10 @@ static const exit_handler_fn hyp_exit_handlers[] = { [ESR_ELx_EC_MOPS] = kvm_hyp_handle_mops, }; -static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu) +static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) { - return hyp_exit_handlers; -} + synchronize_vcpu_pstate(vcpu, exit_code); -static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code) -{ /* * If we were in HYP context on entry, adjust the PSTATE view * so that the usual helpers work correctly. @@ -457,6 +446,8 @@ static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code) *vcpu_cpsr(vcpu) &= ~(PSR_MODE_MASK | PSR_MODE32_BIT); *vcpu_cpsr(vcpu) |= mode; } + + return __fixup_guest_exit(vcpu, exit_code, hyp_exit_handlers); } /* Switch to the guest for VHE systems running in EL2 */ @@ -471,6 +462,8 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) sysreg_save_host_state_vhe(host_ctxt); + fpsimd_lazy_switch_to_guest(vcpu); + /* * Note that ARM erratum 1165522 requires us to configure both stage 1 * and stage 2 translation for the guest context before we clear @@ -495,6 +488,8 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) __deactivate_traps(vcpu); + fpsimd_lazy_switch_to_host(vcpu); + sysreg_restore_host_state_vhe(host_ctxt); if (guest_owns_fp_regs()) diff --git a/arch/arm64/kvm/vmid.c b/arch/arm64/kvm/vmid.c index 806223b7022af..7fe8ba1a2851c 100644 --- a/arch/arm64/kvm/vmid.c +++ b/arch/arm64/kvm/vmid.c @@ -135,11 +135,10 @@ void kvm_arm_vmid_clear_active(void) atomic64_set(this_cpu_ptr(&active_vmids), VMID_ACTIVE_INVALID); } -bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid) +void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid) { unsigned long flags; u64 vmid, old_active_vmid; - bool updated = false; vmid = atomic64_read(&kvm_vmid->id); @@ -157,21 +156,17 @@ bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid) if (old_active_vmid != 0 && vmid_gen_match(vmid) && 0 != atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_vmids), old_active_vmid, vmid)) - return false; + return; raw_spin_lock_irqsave(&cpu_vmid_lock, flags); /* Check that our VMID belongs to the current generation. */ vmid = atomic64_read(&kvm_vmid->id); - if (!vmid_gen_match(vmid)) { + if (!vmid_gen_match(vmid)) vmid = new_vmid(kvm_vmid); - updated = true; - } atomic64_set(this_cpu_ptr(&active_vmids), vmid); raw_spin_unlock_irqrestore(&cpu_vmid_lock, flags); - - return updated; } /* diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index 0a6956bbfb326..fe167ce297a16 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -100,20 +100,11 @@ static int find_num_contig(struct mm_struct *mm, unsigned long addr, static inline int num_contig_ptes(unsigned long size, size_t *pgsize) { - int contig_ptes = 0; + int contig_ptes = 1; *pgsize = size; switch (size) { -#ifndef __PAGETABLE_PMD_FOLDED - case PUD_SIZE: - if (pud_sect_supported()) - contig_ptes = 1; - break; -#endif - case PMD_SIZE: - contig_ptes = 1; - break; case CONT_PMD_SIZE: *pgsize = PMD_SIZE; contig_ptes = CONT_PMDS; @@ -122,6 +113,8 @@ static inline int num_contig_ptes(unsigned long size, size_t *pgsize) *pgsize = PAGE_SIZE; contig_ptes = CONT_PTES; break; + default: + WARN_ON(!__hugetlb_valid_size(size)); } return contig_ptes; @@ -163,24 +156,23 @@ static pte_t get_clear_contig(struct mm_struct *mm, unsigned long pgsize, unsigned long ncontig) { - pte_t orig_pte = __ptep_get(ptep); - unsigned long i; - - for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) { - pte_t pte = __ptep_get_and_clear(mm, addr, ptep); - - /* - * If HW_AFDBM is enabled, then the HW could turn on - * the dirty or accessed bit for any page in the set, - * so check them all. - */ - if (pte_dirty(pte)) - orig_pte = pte_mkdirty(orig_pte); - - if (pte_young(pte)) - orig_pte = pte_mkyoung(orig_pte); + pte_t pte, tmp_pte; + bool present; + + pte = __ptep_get_and_clear(mm, addr, ptep); + present = pte_present(pte); + while (--ncontig) { + ptep++; + addr += pgsize; + tmp_pte = __ptep_get_and_clear(mm, addr, ptep); + if (present) { + if (pte_dirty(tmp_pte)) + pte = pte_mkdirty(pte); + if (pte_young(tmp_pte)) + pte = pte_mkyoung(pte); + } } - return orig_pte; + return pte; } static pte_t get_clear_contig_flush(struct mm_struct *mm, @@ -385,18 +377,13 @@ void huge_pte_clear(struct mm_struct *mm, unsigned long addr, __pte_clear(mm, addr, ptep); } -pte_t huge_ptep_get_and_clear(struct mm_struct *mm, - unsigned long addr, pte_t *ptep) +pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned long sz) { int ncontig; size_t pgsize; - pte_t orig_pte = __ptep_get(ptep); - - if (!pte_cont(orig_pte)) - return __ptep_get_and_clear(mm, addr, ptep); - - ncontig = find_num_contig(mm, addr, ptep, &pgsize); + ncontig = num_contig_ptes(sz, &pgsize); return get_clear_contig(mm, addr, ptep, pgsize, ncontig); } @@ -538,6 +525,8 @@ bool __init arch_hugetlb_valid_size(unsigned long size) pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { + unsigned long psize = huge_page_size(hstate_vma(vma)); + if (alternative_has_cap_unlikely(ARM64_WORKAROUND_2645198)) { /* * Break-before-make (BBM) is required for all user space mappings @@ -547,7 +536,7 @@ pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr if (pte_user_exec(__ptep_get(ptep))) return huge_ptep_clear_flush(vma, addr, ptep); } - return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); + return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, psize); } void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index ea71ef2e343c2..93ba66de160ce 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -278,12 +278,7 @@ void __init arm64_memblock_init(void) if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { extern u16 memstart_offset_seed; - - /* - * Use the sanitised version of id_aa64mmfr0_el1 so that linear - * map randomization can be enabled by shrinking the IPA space. - */ - u64 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); + u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1); int parange = cpuid_feature_extract_unsigned_field( mmfr0, ID_AA64MMFR0_EL1_PARANGE_SHIFT); s64 range = linear_region_size - diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index e55b02fbddc8f..9bcd51fd67d4e 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -1176,8 +1176,11 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap) { WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END)); + /* [start, end] should be within one section */ + WARN_ON_ONCE(end - start > PAGES_PER_SECTION * sizeof(struct page)); - if (!IS_ENABLED(CONFIG_ARM64_4K_PAGES)) + if (!IS_ENABLED(CONFIG_ARM64_4K_PAGES) || + (end - start < PAGES_PER_SECTION * sizeof(struct page))) return vmemmap_populate_basepages(start, end, node, altmap); else return vmemmap_populate_hugepages(start, end, node, altmap); @@ -1357,7 +1360,8 @@ int arch_add_memory(int nid, u64 start, u64 size, __remove_pgd_mapping(swapper_pg_dir, __phys_to_virt(start), size); else { - max_pfn = PFN_UP(start + size); + /* Address of hotplugged memory can be smaller */ + max_pfn = max(max_pfn, PFN_UP(start + size)); max_low_pfn = max_pfn; } diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index 8d637ac4b7c6b..362bcfa0aed18 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -1238,6 +1238,7 @@ UnsignedEnum 11:8 PMUVer 0b0110 V3P5 0b0111 V3P7 0b1000 V3P8 + 0b1001 V3P9 0b1111 IMP_DEF EndEnum UnsignedEnum 7:4 TraceVer @@ -1556,6 +1557,7 @@ EndEnum UnsignedEnum 59:56 FGT 0b0000 NI 0b0001 IMP + 0b0010 FGT2 EndEnum Res0 55:48 UnsignedEnum 47:44 EXS @@ -1617,6 +1619,7 @@ Enum 3:0 PARANGE 0b0100 44 0b0101 48 0b0110 52 + 0b0111 56 EndEnum EndSysreg @@ -2463,6 +2466,101 @@ Field 1 ICIALLU Field 0 ICIALLUIS EndSysreg +Sysreg HDFGRTR2_EL2 3 4 3 1 0 +Res0 63:25 +Field 24 nPMBMAR_EL1 +Field 23 nMDSTEPOP_EL1 +Field 22 nTRBMPAM_EL1 +Res0 21 +Field 20 nTRCITECR_EL1 +Field 19 nPMSDSFR_EL1 +Field 18 nSPMDEVAFF_EL1 +Field 17 nSPMID +Field 16 nSPMSCR_EL1 +Field 15 nSPMACCESSR_EL1 +Field 14 nSPMCR_EL0 +Field 13 nSPMOVS +Field 12 nSPMINTEN +Field 11 nSPMCNTEN +Field 10 nSPMSELR_EL0 +Field 9 nSPMEVTYPERn_EL0 +Field 8 nSPMEVCNTRn_EL0 +Field 7 nPMSSCR_EL1 +Field 6 nPMSSDATA +Field 5 nMDSELR_EL1 +Field 4 nPMUACR_EL1 +Field 3 nPMICFILTR_EL0 +Field 2 nPMICNTR_EL0 +Field 1 nPMIAR_EL1 +Field 0 nPMECR_EL1 +EndSysreg + +Sysreg HDFGWTR2_EL2 3 4 3 1 1 +Res0 63:25 +Field 24 nPMBMAR_EL1 +Field 23 nMDSTEPOP_EL1 +Field 22 nTRBMPAM_EL1 +Field 21 nPMZR_EL0 +Field 20 nTRCITECR_EL1 +Field 19 nPMSDSFR_EL1 +Res0 18:17 +Field 16 nSPMSCR_EL1 +Field 15 nSPMACCESSR_EL1 +Field 14 nSPMCR_EL0 +Field 13 nSPMOVS +Field 12 nSPMINTEN +Field 11 nSPMCNTEN +Field 10 nSPMSELR_EL0 +Field 9 nSPMEVTYPERn_EL0 +Field 8 nSPMEVCNTRn_EL0 +Field 7 nPMSSCR_EL1 +Res0 6 +Field 5 nMDSELR_EL1 +Field 4 nPMUACR_EL1 +Field 3 nPMICFILTR_EL0 +Field 2 nPMICNTR_EL0 +Field 1 nPMIAR_EL1 +Field 0 nPMECR_EL1 +EndSysreg + +Sysreg HFGRTR2_EL2 3 4 3 1 2 +Res0 63:15 +Field 14 nACTLRALIAS_EL1 +Field 13 nACTLRMASK_EL1 +Field 12 nTCR2ALIAS_EL1 +Field 11 nTCRALIAS_EL1 +Field 10 nSCTLRALIAS2_EL1 +Field 9 nSCTLRALIAS_EL1 +Field 8 nCPACRALIAS_EL1 +Field 7 nTCR2MASK_EL1 +Field 6 nTCRMASK_EL1 +Field 5 nSCTLR2MASK_EL1 +Field 4 nSCTLRMASK_EL1 +Field 3 nCPACRMASK_EL1 +Field 2 nRCWSMASK_EL1 +Field 1 nERXGSR_EL1 +Field 0 nPFAR_EL1 +EndSysreg + +Sysreg HFGWTR2_EL2 3 4 3 1 3 +Res0 63:15 +Field 14 nACTLRALIAS_EL1 +Field 13 nACTLRMASK_EL1 +Field 12 nTCR2ALIAS_EL1 +Field 11 nTCRALIAS_EL1 +Field 10 nSCTLRALIAS2_EL1 +Field 9 nSCTLRALIAS_EL1 +Field 8 nCPACRALIAS_EL1 +Field 7 nTCR2MASK_EL1 +Field 6 nTCRMASK_EL1 +Field 5 nSCTLR2MASK_EL1 +Field 4 nSCTLRMASK_EL1 +Field 3 nCPACRMASK_EL1 +Field 2 nRCWSMASK_EL1 +Res0 1 +Field 0 nPFAR_EL1 +EndSysreg + Sysreg HDFGRTR_EL2 3 4 3 1 4 Field 63 PMBIDR_EL1 Field 62 nPMSNEVFR_EL1 @@ -2635,6 +2733,12 @@ Field 1 AMEVCNTR00_EL0 Field 0 AMCNTEN0 EndSysreg +Sysreg HFGITR2_EL2 3 4 3 1 7 +Res0 63:2 +Field 1 nDCCIVAPS +Field 0 TSBCSYNC +EndSysreg + Sysreg ZCR_EL2 3 4 1 2 0 Fields ZCR_ELx EndSysreg diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index d9fce0fd475a0..fe9f895138dba 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -375,8 +375,8 @@ config CMDLINE_BOOTLOADER config CMDLINE_EXTEND bool "Use built-in to extend bootloader kernel arguments" help - The command-line arguments provided during boot will be - appended to the built-in command line. This is useful in + The built-in command line will be appended to the command- + line arguments provided during boot. This is useful in cases where the provided arguments are insufficient and you don't want to or cannot modify them. diff --git a/arch/loongarch/include/asm/bug.h b/arch/loongarch/include/asm/bug.h index 08388876ade4c..561ac1bf79e26 100644 --- a/arch/loongarch/include/asm/bug.h +++ b/arch/loongarch/include/asm/bug.h @@ -4,6 +4,7 @@ #include #include +#include #ifndef CONFIG_DEBUG_BUGVERBOSE #define _BUGVERBOSE_LOCATION(file, line) @@ -33,25 +34,25 @@ #define ASM_BUG_FLAGS(flags) \ __BUG_ENTRY(flags) \ - break BRK_BUG + break BRK_BUG; #define ASM_BUG() ASM_BUG_FLAGS(0) -#define __BUG_FLAGS(flags) \ - asm_inline volatile (__stringify(ASM_BUG_FLAGS(flags))); +#define __BUG_FLAGS(flags, extra) \ + asm_inline volatile (__stringify(ASM_BUG_FLAGS(flags)) \ + extra); #define __WARN_FLAGS(flags) \ do { \ instrumentation_begin(); \ - __BUG_FLAGS(BUGFLAG_WARNING|(flags)); \ - annotate_reachable(); \ + __BUG_FLAGS(BUGFLAG_WARNING|(flags), ASM_REACHABLE); \ instrumentation_end(); \ } while (0) #define BUG() \ do { \ instrumentation_begin(); \ - __BUG_FLAGS(0); \ + __BUG_FLAGS(0, ""); \ unreachable(); \ } while (0) diff --git a/arch/loongarch/include/asm/cache.h b/arch/loongarch/include/asm/cache.h index 1b6d096171998..aa622c7544144 100644 --- a/arch/loongarch/include/asm/cache.h +++ b/arch/loongarch/include/asm/cache.h @@ -8,6 +8,8 @@ #define L1_CACHE_SHIFT CONFIG_L1_CACHE_SHIFT #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) +#define ARCH_DMA_MINALIGN (16) + #define __read_mostly __section(".data..read_mostly") #endif /* _ASM_CACHE_H */ diff --git a/arch/loongarch/include/asm/hugetlb.h b/arch/loongarch/include/asm/hugetlb.h index 376c0708e2979..6302e60fbaee1 100644 --- a/arch/loongarch/include/asm/hugetlb.h +++ b/arch/loongarch/include/asm/hugetlb.h @@ -41,7 +41,8 @@ static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, - unsigned long addr, pte_t *ptep) + unsigned long addr, pte_t *ptep, + unsigned long sz) { pte_t clear; pte_t pte = ptep_get(ptep); @@ -56,8 +57,9 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { pte_t pte; + unsigned long sz = huge_page_size(hstate_vma(vma)); - pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); + pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, sz); flush_tlb_page(vma, addr); return pte; } diff --git a/arch/loongarch/include/asm/irq.h b/arch/loongarch/include/asm/irq.h index 9c2ca785faa9b..b2915fd538620 100644 --- a/arch/loongarch/include/asm/irq.h +++ b/arch/loongarch/include/asm/irq.h @@ -53,7 +53,7 @@ void spurious_interrupt(void); #define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace void arch_trigger_cpumask_backtrace(const struct cpumask *mask, int exclude_cpu); -#define MAX_IO_PICS 2 +#define MAX_IO_PICS 8 #define NR_IRQS (64 + NR_VECTORS * (NR_CPUS + MAX_IO_PICS)) struct acpi_vector_group { diff --git a/arch/loongarch/include/asm/stacktrace.h b/arch/loongarch/include/asm/stacktrace.h index f23adb15f418f..fc8b64773794a 100644 --- a/arch/loongarch/include/asm/stacktrace.h +++ b/arch/loongarch/include/asm/stacktrace.h @@ -8,6 +8,7 @@ #include #include #include +#include #include enum stack_type { @@ -43,6 +44,7 @@ int get_stack_info(unsigned long stack, struct task_struct *task, struct stack_i static __always_inline void prepare_frametrace(struct pt_regs *regs) { __asm__ __volatile__( + UNWIND_HINT_SAVE /* Save $ra */ STORE_ONE_REG(1) /* Use $ra to save PC */ @@ -80,6 +82,7 @@ static __always_inline void prepare_frametrace(struct pt_regs *regs) STORE_ONE_REG(29) STORE_ONE_REG(30) STORE_ONE_REG(31) + UNWIND_HINT_RESTORE : "=m" (regs->csr_era) : "r" (regs->regs) : "memory"); diff --git a/arch/loongarch/include/asm/unwind_hints.h b/arch/loongarch/include/asm/unwind_hints.h index a01086ad9ddea..2c68bc72736c9 100644 --- a/arch/loongarch/include/asm/unwind_hints.h +++ b/arch/loongarch/include/asm/unwind_hints.h @@ -23,6 +23,14 @@ UNWIND_HINT sp_reg=ORC_REG_SP type=UNWIND_HINT_TYPE_CALL .endm -#endif /* __ASSEMBLY__ */ +#else /* !__ASSEMBLY__ */ + +#define UNWIND_HINT_SAVE \ + UNWIND_HINT(UNWIND_HINT_TYPE_SAVE, 0, 0, 0) + +#define UNWIND_HINT_RESTORE \ + UNWIND_HINT(UNWIND_HINT_TYPE_RESTORE, 0, 0, 0) + +#endif /* !__ASSEMBLY__ */ #endif /* _ASM_LOONGARCH_UNWIND_HINTS_H */ diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c index 382a09a7152c3..1120ac2824f6e 100644 --- a/arch/loongarch/kernel/acpi.c +++ b/arch/loongarch/kernel/acpi.c @@ -249,18 +249,6 @@ static __init int setup_node(int pxm) return acpi_map_pxm_to_node(pxm); } -/* - * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for - * I/O localities since SRAT does not list them. I/O localities are - * not supported at this point. - */ -unsigned int numa_distance_cnt; - -static inline unsigned int get_numa_distances_cnt(struct acpi_table_slit *slit) -{ - return slit->locality_count; -} - void __init numa_set_distance(int from, int to, int distance) { if ((u8)distance != distance || (from == to && distance != LOCAL_DISTANCE)) { diff --git a/arch/loongarch/kernel/env.c b/arch/loongarch/kernel/env.c index 2f1f5b08638f8..27144de5c5fe4 100644 --- a/arch/loongarch/kernel/env.c +++ b/arch/loongarch/kernel/env.c @@ -68,6 +68,8 @@ static int __init fdt_cpu_clk_init(void) return -ENODEV; clk = of_clk_get(np, 0); + of_node_put(np); + if (IS_ERR(clk)) return -ENODEV; diff --git a/arch/loongarch/kernel/kgdb.c b/arch/loongarch/kernel/kgdb.c index 445c452d72a79..7be5b4c0c9002 100644 --- a/arch/loongarch/kernel/kgdb.c +++ b/arch/loongarch/kernel/kgdb.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -224,13 +225,13 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) regs->csr_era = pc; } -void arch_kgdb_breakpoint(void) +noinline void arch_kgdb_breakpoint(void) { __asm__ __volatile__ ( \ ".globl kgdb_breakinst\n\t" \ - "nop\n" \ "kgdb_breakinst:\tbreak 2\n\t"); /* BRK_KDB = 2 */ } +STACK_FRAME_NON_STANDARD(arch_kgdb_breakpoint); /* * Calls linux_debug_hook before the kernel dies. If KGDB is enabled, diff --git a/arch/loongarch/kernel/machine_kexec.c b/arch/loongarch/kernel/machine_kexec.c index 8ae641dc53bb7..f9381800e291c 100644 --- a/arch/loongarch/kernel/machine_kexec.c +++ b/arch/loongarch/kernel/machine_kexec.c @@ -126,14 +126,14 @@ void kexec_reboot(void) /* All secondary cpus go to kexec_smp_wait */ if (smp_processor_id() > 0) { relocated_kexec_smp_wait(NULL); - unreachable(); + BUG(); } #endif do_kexec = (void *)reboot_code_buffer; do_kexec(efi_boot, cmdline_ptr, systable_ptr, start_addr, first_ind_entry); - unreachable(); + BUG(); } diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index 56934fe58170e..1fa6a604734ef 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -387,6 +387,9 @@ static void __init check_kernel_sections_mem(void) */ static void __init arch_mem_init(char **cmdline_p) { + /* Recalculate max_low_pfn for "mem=xxx" */ + max_pfn = max_low_pfn = PHYS_PFN(memblock_end_of_DRAM()); + if (usermem) pr_info("User-defined physical RAM map overwrite\n"); diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c index 5d59e9ce2772d..d96065dbe779b 100644 --- a/arch/loongarch/kernel/smp.c +++ b/arch/loongarch/kernel/smp.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -423,7 +424,7 @@ void loongson_cpu_die(unsigned int cpu) mb(); } -void __noreturn arch_cpu_idle_dead(void) +static void __noreturn idle_play_dead(void) { register uint64_t addr; register void (*init_fn)(void); @@ -447,6 +448,50 @@ void __noreturn arch_cpu_idle_dead(void) BUG(); } +#ifdef CONFIG_HIBERNATION +static void __noreturn poll_play_dead(void) +{ + register uint64_t addr; + register void (*init_fn)(void); + + idle_task_exit(); + __this_cpu_write(cpu_state, CPU_DEAD); + + __smp_mb(); + do { + __asm__ __volatile__("nop\n\t"); + addr = iocsr_read64(LOONGARCH_IOCSR_MBUF0); + } while (addr == 0); + + init_fn = (void *)TO_CACHE(addr); + iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_CLEAR); + + init_fn(); + BUG(); +} +#endif + +static void (*play_dead)(void) = idle_play_dead; + +void __noreturn arch_cpu_idle_dead(void) +{ + play_dead(); + BUG(); /* play_dead() doesn't return */ +} + +#ifdef CONFIG_HIBERNATION +int hibernate_resume_nonboot_cpu_disable(void) +{ + int ret; + + play_dead = poll_play_dead; + ret = suspend_disable_secondary_cpus(); + play_dead = idle_play_dead; + + return ret; +} +#endif + #endif /* diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index 90894f70ff4a5..add52e927f153 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -624,6 +624,12 @@ static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write) struct kvm_run *run = vcpu->run; unsigned long badv = vcpu->arch.badv; + /* Inject ADE exception if exceed max GPA size */ + if (unlikely(badv >= vcpu->kvm->arch.gpa_size)) { + kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEM); + return RESUME_GUEST; + } + ret = kvm_handle_mm_fault(vcpu, badv, write); if (ret) { /* Treat as MMIO */ diff --git a/arch/loongarch/kvm/main.c b/arch/loongarch/kvm/main.c index 7e8f5d6829ef0..34fad2c29ee69 100644 --- a/arch/loongarch/kvm/main.c +++ b/arch/loongarch/kvm/main.c @@ -297,6 +297,13 @@ int kvm_arch_enable_virtualization_cpu(void) kvm_debug("GCFG:%lx GSTAT:%lx GINTC:%lx GTLBC:%lx", read_csr_gcfg(), read_csr_gstat(), read_csr_gintc(), read_csr_gtlbc()); + /* + * HW Guest CSR registers are lost after CPU suspend and resume. + * Clear last_vcpu so that Guest CSR registers forced to reload + * from vCPU SW state. + */ + this_cpu_ptr(vmcs)->last_vcpu = NULL; + return 0; } diff --git a/arch/loongarch/kvm/switch.S b/arch/loongarch/kvm/switch.S index 0c292f8184927..1be185e948072 100644 --- a/arch/loongarch/kvm/switch.S +++ b/arch/loongarch/kvm/switch.S @@ -85,7 +85,7 @@ * Guest CRMD comes from separate GCSR_CRMD register */ ori t0, zero, CSR_PRMD_PIE - csrxchg t0, t0, LOONGARCH_CSR_PRMD + csrwr t0, LOONGARCH_CSR_PRMD /* Set PVM bit to setup ertn to guest context */ ori t0, zero, CSR_GSTAT_PVM diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 9d53eca66fcc7..e7a084de64f7b 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -311,7 +311,7 @@ static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) { int ret = RESUME_GUEST; unsigned long estat = vcpu->arch.host_estat; - u32 intr = estat & 0x1fff; /* Ignore NMI */ + u32 intr = estat & CSR_ESTAT_IS; u32 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT; vcpu->mode = OUTSIDE_GUEST_MODE; diff --git a/arch/loongarch/kvm/vm.c b/arch/loongarch/kvm/vm.c index 4ba734aaef87a..fe9e973912d44 100644 --- a/arch/loongarch/kvm/vm.c +++ b/arch/loongarch/kvm/vm.c @@ -46,7 +46,11 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) if (kvm_pvtime_supported()) kvm->arch.pv_features |= BIT(KVM_FEATURE_STEAL_TIME); - kvm->arch.gpa_size = BIT(cpu_vabits - 1); + /* + * cpu_vabits means user address space only (a half of total). + * GPA size of VM is the same with the size of user address space. + */ + kvm->arch.gpa_size = BIT(cpu_vabits); kvm->arch.root_level = CONFIG_PGTABLE_LEVELS - 1; kvm->arch.invalid_ptes[0] = 0; kvm->arch.invalid_ptes[1] = (unsigned long)invalid_pte_table; diff --git a/arch/loongarch/mm/pageattr.c b/arch/loongarch/mm/pageattr.c index ffd8d76021d47..aca4e86d2d888 100644 --- a/arch/loongarch/mm/pageattr.c +++ b/arch/loongarch/mm/pageattr.c @@ -3,6 +3,7 @@ * Copyright (C) 2024 Loongson Technology Corporation Limited */ +#include #include #include #include @@ -167,7 +168,7 @@ bool kernel_page_present(struct page *page) unsigned long addr = (unsigned long)page_address(page); if (addr < vm_map_base) - return true; + return memblock_is_memory(__pa(addr)); pgd = pgd_offset_k(addr); if (pgd_none(pgdp_get(pgd))) diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c index ea357a3edc094..fa1500d4aa3e3 100644 --- a/arch/loongarch/net/bpf_jit.c +++ b/arch/loongarch/net/bpf_jit.c @@ -142,6 +142,8 @@ static void build_prologue(struct jit_ctx *ctx) */ if (seen_tail_call(ctx) && seen_call(ctx)) move_reg(ctx, TCC_SAVED, REG_TCC); + else + emit_insn(ctx, nop); ctx->stack_size = stack_adjust; } @@ -905,7 +907,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext move_addr(ctx, t1, func_addr); emit_insn(ctx, jirl, LOONGARCH_GPR_RA, t1, 0); - move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0); + + if (insn->src_reg != BPF_PSEUDO_CALL) + move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0); + break; /* tail call */ @@ -930,7 +935,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext { const u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm; - move_imm(ctx, dst, imm64, is32); + if (bpf_pseudo_func(insn)) + move_addr(ctx, dst, imm64); + else + move_imm(ctx, dst, imm64, is32); return 1; } diff --git a/arch/loongarch/net/bpf_jit.h b/arch/loongarch/net/bpf_jit.h index 68586338ecf85..f9c569f539491 100644 --- a/arch/loongarch/net/bpf_jit.h +++ b/arch/loongarch/net/bpf_jit.h @@ -27,6 +27,11 @@ struct jit_data { struct jit_ctx ctx; }; +static inline void emit_nop(union loongarch_instruction *insn) +{ + insn->word = INSN_NOP; +} + #define emit_insn(ctx, func, ...) \ do { \ if (ctx->image != NULL) { \ diff --git a/arch/mips/dec/prom/init.c b/arch/mips/dec/prom/init.c index cb12eb211a49e..8d74d7d6c05b4 100644 --- a/arch/mips/dec/prom/init.c +++ b/arch/mips/dec/prom/init.c @@ -42,7 +42,7 @@ int (*__pmax_close)(int); * Detect which PROM the DECSTATION has, and set the callback vectors * appropriately. */ -void __init which_prom(s32 magic, s32 *prom_vec) +static void __init which_prom(s32 magic, s32 *prom_vec) { /* * No sign of the REX PROM's magic number means we assume a non-REX diff --git a/arch/mips/include/asm/ds1287.h b/arch/mips/include/asm/ds1287.h index 46cfb01f9a14e..51cb61fd4c033 100644 --- a/arch/mips/include/asm/ds1287.h +++ b/arch/mips/include/asm/ds1287.h @@ -8,7 +8,7 @@ #define __ASM_DS1287_H extern int ds1287_timer_state(void); -extern void ds1287_set_base_clock(unsigned int clock); +extern int ds1287_set_base_clock(unsigned int hz); extern int ds1287_clockevent_init(int irq); #endif diff --git a/arch/mips/include/asm/hugetlb.h b/arch/mips/include/asm/hugetlb.h index fd69c88085542..00ee3c0366305 100644 --- a/arch/mips/include/asm/hugetlb.h +++ b/arch/mips/include/asm/hugetlb.h @@ -32,7 +32,8 @@ static inline int prepare_hugepage_range(struct file *file, #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, - unsigned long addr, pte_t *ptep) + unsigned long addr, pte_t *ptep, + unsigned long sz) { pte_t clear; pte_t pte = *ptep; @@ -47,13 +48,14 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { pte_t pte; + unsigned long sz = huge_page_size(hstate_vma(vma)); /* * clear the huge pte entry firstly, so that the other smp threads will * not get old pte entry after finishing flush_tlb_page and before * setting new huge pte entry */ - pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); + pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, sz); flush_tlb_page(vma, addr); return pte; } diff --git a/arch/mips/kernel/cevt-ds1287.c b/arch/mips/kernel/cevt-ds1287.c index 9a47fbcd4638a..de64d6bb7ba36 100644 --- a/arch/mips/kernel/cevt-ds1287.c +++ b/arch/mips/kernel/cevt-ds1287.c @@ -10,6 +10,7 @@ #include #include +#include #include int ds1287_timer_state(void) diff --git a/arch/parisc/include/asm/hugetlb.h b/arch/parisc/include/asm/hugetlb.h index 72daacc472a0a..f7a91411dcc95 100644 --- a/arch/parisc/include/asm/hugetlb.h +++ b/arch/parisc/include/asm/hugetlb.h @@ -10,7 +10,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, - pte_t *ptep); + pte_t *ptep, unsigned long sz); /* * If the arch doesn't supply something else, assume that hugepage diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c index aa664f7ddb639..cec2b9a581dd3 100644 --- a/arch/parisc/mm/hugetlbpage.c +++ b/arch/parisc/mm/hugetlbpage.c @@ -147,7 +147,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, - pte_t *ptep) + pte_t *ptep, unsigned long sz) { pte_t entry; diff --git a/arch/powerpc/configs/mpc885_ads_defconfig b/arch/powerpc/configs/mpc885_ads_defconfig index 77306be62e9ee..129355f87f80f 100644 --- a/arch/powerpc/configs/mpc885_ads_defconfig +++ b/arch/powerpc/configs/mpc885_ads_defconfig @@ -78,4 +78,4 @@ CONFIG_DEBUG_VM_PGTABLE=y CONFIG_DETECT_HUNG_TASK=y CONFIG_BDI_SWITCH=y CONFIG_PPC_EARLY_DEBUG=y -CONFIG_GENERIC_PTDUMP=y +CONFIG_PTDUMP_DEBUGFS=y diff --git a/arch/powerpc/crypto/Makefile b/arch/powerpc/crypto/Makefile index 59808592f0a1b..1e52b02d8943b 100644 --- a/arch/powerpc/crypto/Makefile +++ b/arch/powerpc/crypto/Makefile @@ -56,3 +56,4 @@ $(obj)/aesp8-ppc.S $(obj)/ghashp8-ppc.S: $(obj)/%.S: $(src)/%.pl FORCE OBJECT_FILES_NON_STANDARD_aesp10-ppc.o := y OBJECT_FILES_NON_STANDARD_ghashp10-ppc.o := y OBJECT_FILES_NON_STANDARD_aesp8-ppc.o := y +OBJECT_FILES_NON_STANDARD_ghashp8-ppc.o := y diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h index c3efacab4b941..aa90a048f319a 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-4k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h @@ -77,9 +77,17 @@ /* * With 4K page size the real_pte machinery is all nops. */ -#define __real_pte(e, p, o) ((real_pte_t){(e)}) +static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep, int offset) +{ + return (real_pte_t){pte}; +} + #define __rpte_to_pte(r) ((r).pte) -#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> H_PAGE_F_GIX_SHIFT) + +static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index) +{ + return pte_val(__rpte_to_pte(rpte)) >> H_PAGE_F_GIX_SHIFT; +} #define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \ do { \ diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index dad2e7980f245..86326587e58de 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -45,7 +45,8 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, - unsigned long addr, pte_t *ptep) + unsigned long addr, pte_t *ptep, + unsigned long sz) { return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1)); } @@ -55,8 +56,9 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { pte_t pte; + unsigned long sz = huge_page_size(hstate_vma(vma)); - pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); + pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, sz); flush_hugetlb_page(vma, addr); return pte; } diff --git a/arch/powerpc/kexec/relocate_32.S b/arch/powerpc/kexec/relocate_32.S index 104c9911f4061..dd86e338307d3 100644 --- a/arch/powerpc/kexec/relocate_32.S +++ b/arch/powerpc/kexec/relocate_32.S @@ -348,16 +348,13 @@ write_utlb: rlwinm r10, r24, 0, 22, 27 cmpwi r10, PPC47x_TLB0_4K - bne 0f li r10, 0x1000 /* r10 = 4k */ - ANNOTATE_INTRA_FUNCTION_CALL - bl 1f + beq 0f -0: /* Defaults to 256M */ lis r10, 0x1000 - bcl 20,31,$+4 +0: bcl 20,31,$+4 1: mflr r4 addi r4, r4, (2f-1b) /* virtual address of 2f */ diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index 6824e8139801c..3708fa48bee95 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -242,7 +242,7 @@ static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe) return tlbe->mas7_3 & (MAS3_SW|MAS3_UW); } -static inline bool kvmppc_e500_ref_setup(struct tlbe_ref *ref, +static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, struct kvm_book3e_206_tlb_entry *gtlbe, kvm_pfn_t pfn, unsigned int wimg) { @@ -252,7 +252,11 @@ static inline bool kvmppc_e500_ref_setup(struct tlbe_ref *ref, /* Use guest supplied MAS2_G and MAS2_E */ ref->flags |= (gtlbe->mas2 & MAS2_ATTRIB_MASK) | wimg; - return tlbe_is_writable(gtlbe); + /* Mark the page accessed */ + kvm_set_pfn_accessed(pfn); + + if (tlbe_is_writable(gtlbe)) + kvm_set_pfn_dirty(pfn); } static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref) @@ -322,7 +326,6 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, { struct kvm_memory_slot *slot; unsigned long pfn = 0; /* silence GCC warning */ - struct page *page = NULL; unsigned long hva; int pfnmap = 0; int tsize = BOOK3E_PAGESZ_4K; @@ -334,7 +337,6 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, unsigned int wimg = 0; pgd_t *pgdir; unsigned long flags; - bool writable = false; /* used to check for invalidations in progress */ mmu_seq = kvm->mmu_invalidate_seq; @@ -444,7 +446,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, if (likely(!pfnmap)) { tsize_pages = 1UL << (tsize + 10 - PAGE_SHIFT); - pfn = __kvm_faultin_pfn(slot, gfn, FOLL_WRITE, NULL, &page); + pfn = gfn_to_pfn_memslot(slot, gfn); if (is_error_noslot_pfn(pfn)) { if (printk_ratelimit()) pr_err("%s: real page not found for gfn %lx\n", @@ -489,7 +491,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, } local_irq_restore(flags); - writable = kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg); + kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg); kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, ref, gvaddr, stlbe); @@ -497,8 +499,11 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, kvmppc_mmu_flush_icache(pfn); out: - kvm_release_faultin_page(kvm, page, !!ret, writable); spin_unlock(&kvm->mmu_lock); + + /* Drop refcount on page, so that mmu notifiers can clear it */ + kvm_release_pfn_clean(pfn); + return ret; } diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index f14329989e9a7..4b6ce4f07bc2c 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -550,12 +550,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) #ifdef CONFIG_PPC_BOOK3S_64 case KVM_CAP_SPAPR_TCE: + fallthrough; case KVM_CAP_SPAPR_TCE_64: - r = 1; - break; case KVM_CAP_SPAPR_TCE_VFIO: - r = !!cpu_has_feature(CPU_FTR_HVMODE); - break; case KVM_CAP_PPC_RTAS: case KVM_CAP_PPC_FIXUP_HCALL: case KVM_CAP_PPC_ENABLE_HCALL: diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index acdab294b340a..c1d9b031f0d57 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -108,7 +108,7 @@ static int text_area_cpu_up(unsigned int cpu) unsigned long addr; int err; - area = get_vm_area(PAGE_SIZE, VM_ALLOC); + area = get_vm_area(PAGE_SIZE, 0); if (!area) { WARN_ONCE(1, "Failed to create text area for cpu %d\n", cpu); @@ -493,7 +493,9 @@ static int __do_patch_instructions_mm(u32 *addr, u32 *code, size_t len, bool rep orig_mm = start_using_temp_mm(patching_mm); + kasan_disable_current(); err = __patch_instructions(patch_addr, code, len, repeat_instr); + kasan_enable_current(); /* context synchronisation performed by __patch_instructions */ stop_using_temp_mm(patching_mm, orig_mm); diff --git a/arch/powerpc/platforms/cell/spufs/gang.c b/arch/powerpc/platforms/cell/spufs/gang.c index 827d338deaf4c..2c2999de6bfa2 100644 --- a/arch/powerpc/platforms/cell/spufs/gang.c +++ b/arch/powerpc/platforms/cell/spufs/gang.c @@ -25,6 +25,7 @@ struct spu_gang *alloc_spu_gang(void) mutex_init(&gang->aff_mutex); INIT_LIST_HEAD(&gang->list); INIT_LIST_HEAD(&gang->aff_list_head); + gang->alive = 1; out: return gang; diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index 70236d1df3d3e..9f9e4b8716278 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -192,13 +192,32 @@ static int spufs_fill_dir(struct dentry *dir, return -ENOMEM; ret = spufs_new_file(dir->d_sb, dentry, files->ops, files->mode & mode, files->size, ctx); - if (ret) + if (ret) { + dput(dentry); return ret; + } files++; } return 0; } +static void unuse_gang(struct dentry *dir) +{ + struct inode *inode = dir->d_inode; + struct spu_gang *gang = SPUFS_I(inode)->i_gang; + + if (gang) { + bool dead; + + inode_lock(inode); // exclusion with spufs_create_context() + dead = !--gang->alive; + inode_unlock(inode); + + if (dead) + simple_recursive_removal(dir, NULL); + } +} + static int spufs_dir_close(struct inode *inode, struct file *file) { struct inode *parent; @@ -213,6 +232,7 @@ static int spufs_dir_close(struct inode *inode, struct file *file) inode_unlock(parent); WARN_ON(ret); + unuse_gang(dir->d_parent); return dcache_dir_close(inode, file); } @@ -405,7 +425,7 @@ spufs_create_context(struct inode *inode, struct dentry *dentry, { int ret; int affinity; - struct spu_gang *gang; + struct spu_gang *gang = SPUFS_I(inode)->i_gang; struct spu_context *neighbor; struct path path = {.mnt = mnt, .dentry = dentry}; @@ -420,11 +440,15 @@ spufs_create_context(struct inode *inode, struct dentry *dentry, if ((flags & SPU_CREATE_ISOLATE) && !isolated_loader) return -ENODEV; - gang = NULL; + if (gang) { + if (!gang->alive) + return -ENOENT; + gang->alive++; + } + neighbor = NULL; affinity = flags & (SPU_CREATE_AFFINITY_MEM | SPU_CREATE_AFFINITY_SPU); if (affinity) { - gang = SPUFS_I(inode)->i_gang; if (!gang) return -EINVAL; mutex_lock(&gang->aff_mutex); @@ -436,8 +460,11 @@ spufs_create_context(struct inode *inode, struct dentry *dentry, } ret = spufs_mkdir(inode, dentry, flags, mode & 0777); - if (ret) + if (ret) { + if (neighbor) + put_spu_context(neighbor); goto out_aff_unlock; + } if (affinity) { spufs_set_affinity(flags, SPUFS_I(d_inode(dentry))->i_ctx, @@ -453,6 +480,8 @@ spufs_create_context(struct inode *inode, struct dentry *dentry, out_aff_unlock: if (affinity) mutex_unlock(&gang->aff_mutex); + if (ret && gang) + gang->alive--; // can't reach 0 return ret; } @@ -482,6 +511,7 @@ spufs_mkgang(struct inode *dir, struct dentry *dentry, umode_t mode) inode->i_fop = &simple_dir_operations; d_instantiate(dentry, inode); + dget(dentry); inc_nlink(dir); inc_nlink(d_inode(dentry)); return ret; @@ -492,6 +522,21 @@ spufs_mkgang(struct inode *dir, struct dentry *dentry, umode_t mode) return ret; } +static int spufs_gang_close(struct inode *inode, struct file *file) +{ + unuse_gang(file->f_path.dentry); + return dcache_dir_close(inode, file); +} + +static const struct file_operations spufs_gang_fops = { + .open = dcache_dir_open, + .release = spufs_gang_close, + .llseek = dcache_dir_lseek, + .read = generic_read_dir, + .iterate_shared = dcache_readdir, + .fsync = noop_fsync, +}; + static int spufs_gang_open(const struct path *path) { int ret; @@ -511,7 +556,7 @@ static int spufs_gang_open(const struct path *path) return PTR_ERR(filp); } - filp->f_op = &simple_dir_operations; + filp->f_op = &spufs_gang_fops; fd_install(ret, filp); return ret; } @@ -526,10 +571,8 @@ static int spufs_create_gang(struct inode *inode, ret = spufs_mkgang(inode, dentry, mode & 0777); if (!ret) { ret = spufs_gang_open(&path); - if (ret < 0) { - int err = simple_rmdir(inode, dentry); - WARN_ON(err); - } + if (ret < 0) + unuse_gang(dentry); } return ret; } diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index 84958487f696a..d33787c57c39a 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h @@ -151,6 +151,8 @@ struct spu_gang { int aff_flags; struct spu *aff_ref_spu; atomic_t aff_sched_count; + + int alive; }; /* Flag bits for spu_gang aff_flags */ diff --git a/arch/riscv/boot/dts/starfive/jh7110-pinfunc.h b/arch/riscv/boot/dts/starfive/jh7110-pinfunc.h index 256de17f52611..ae49c908e7fb3 100644 --- a/arch/riscv/boot/dts/starfive/jh7110-pinfunc.h +++ b/arch/riscv/boot/dts/starfive/jh7110-pinfunc.h @@ -89,7 +89,7 @@ #define GPOUT_SYS_SDIO1_DATA1 59 #define GPOUT_SYS_SDIO1_DATA2 60 #define GPOUT_SYS_SDIO1_DATA3 61 -#define GPOUT_SYS_SDIO1_DATA4 63 +#define GPOUT_SYS_SDIO1_DATA4 62 #define GPOUT_SYS_SDIO1_DATA5 63 #define GPOUT_SYS_SDIO1_DATA6 64 #define GPOUT_SYS_SDIO1_DATA7 65 diff --git a/arch/riscv/errata/Makefile b/arch/riscv/errata/Makefile index f0da9d7b39c37..bc6c77ba837d2 100644 --- a/arch/riscv/errata/Makefile +++ b/arch/riscv/errata/Makefile @@ -1,5 +1,9 @@ ifdef CONFIG_RELOCATABLE -KBUILD_CFLAGS += -fno-pie +# We can't use PIC/PIE when handling early-boot errata parsing, as the kernel +# doesn't have a GOT setup at that point. So instead just use medany: it's +# usually position-independent, so it should be good enough for the errata +# handling. +KBUILD_CFLAGS += -fno-pie -mcmodel=medany endif ifdef CONFIG_RISCV_ALTERNATIVE_EARLY diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h index 2cddd79ff21b1..f253c8dae878e 100644 --- a/arch/riscv/include/asm/ftrace.h +++ b/arch/riscv/include/asm/ftrace.h @@ -92,7 +92,7 @@ struct dyn_arch_ftrace { #define make_call_t0(caller, callee, call) \ do { \ unsigned int offset = \ - (unsigned long) callee - (unsigned long) caller; \ + (unsigned long) (callee) - (unsigned long) (caller); \ call[0] = to_auipc_t0(offset); \ call[1] = to_jalr_t0(offset); \ } while (0) @@ -108,7 +108,7 @@ do { \ #define make_call_ra(caller, callee, call) \ do { \ unsigned int offset = \ - (unsigned long) callee - (unsigned long) caller; \ + (unsigned long) (callee) - (unsigned long) (caller); \ call[0] = to_auipc_ra(offset); \ call[1] = to_jalr_ra(offset); \ } while (0) diff --git a/arch/riscv/include/asm/futex.h b/arch/riscv/include/asm/futex.h index fc8130f995c1e..6907c456ac8c0 100644 --- a/arch/riscv/include/asm/futex.h +++ b/arch/riscv/include/asm/futex.h @@ -93,7 +93,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %[r]) \ _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %[r]) \ : [r] "+r" (ret), [v] "=&r" (val), [u] "+m" (*uaddr), [t] "=&r" (tmp) - : [ov] "Jr" (oldval), [nv] "Jr" (newval) + : [ov] "Jr" ((long)(int)oldval), [nv] "Jr" (newval) : "memory"); __disable_user_access(); diff --git a/arch/riscv/include/asm/hugetlb.h b/arch/riscv/include/asm/hugetlb.h index faf3624d80577..4461264977684 100644 --- a/arch/riscv/include/asm/hugetlb.h +++ b/arch/riscv/include/asm/hugetlb.h @@ -28,7 +28,8 @@ void set_huge_pte_at(struct mm_struct *mm, #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR pte_t huge_ptep_get_and_clear(struct mm_struct *mm, - unsigned long addr, pte_t *ptep); + unsigned long addr, pte_t *ptep, + unsigned long sz); #define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, diff --git a/arch/riscv/include/asm/kgdb.h b/arch/riscv/include/asm/kgdb.h index 46677daf708bd..cc11c4544cffd 100644 --- a/arch/riscv/include/asm/kgdb.h +++ b/arch/riscv/include/asm/kgdb.h @@ -19,16 +19,9 @@ #ifndef __ASSEMBLY__ +void arch_kgdb_breakpoint(void); extern unsigned long kgdb_compiled_break; -static inline void arch_kgdb_breakpoint(void) -{ - asm(".global kgdb_compiled_break\n" - ".option norvc\n" - "kgdb_compiled_break: ebreak\n" - ".option rvc\n"); -} - #endif /* !__ASSEMBLY__ */ #define DBG_REG_ZERO "zero" diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h index 121fff429dce6..eceabf59ae482 100644 --- a/arch/riscv/include/asm/syscall.h +++ b/arch/riscv/include/asm/syscall.h @@ -62,8 +62,11 @@ static inline void syscall_get_arguments(struct task_struct *task, unsigned long *args) { args[0] = regs->orig_a0; - args++; - memcpy(args, ®s->a1, 5 * sizeof(args[0])); + args[1] = regs->a1; + args[2] = regs->a2; + args[3] = regs->a3; + args[4] = regs->a4; + args[5] = regs->a5; } static inline int syscall_get_arch(struct task_struct *task) diff --git a/arch/riscv/kernel/cacheinfo.c b/arch/riscv/kernel/cacheinfo.c index 2d40736fc37ce..26b085dbdd073 100644 --- a/arch/riscv/kernel/cacheinfo.c +++ b/arch/riscv/kernel/cacheinfo.c @@ -108,11 +108,11 @@ int populate_cache_leaves(unsigned int cpu) if (!np) return -ENOENT; - if (of_property_read_bool(np, "cache-size")) + if (of_property_present(np, "cache-size")) ci_leaf_init(this_leaf++, CACHE_TYPE_UNIFIED, level); - if (of_property_read_bool(np, "i-cache-size")) + if (of_property_present(np, "i-cache-size")) ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level); - if (of_property_read_bool(np, "d-cache-size")) + if (of_property_present(np, "d-cache-size")) ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level); prev = np; @@ -125,11 +125,11 @@ int populate_cache_leaves(unsigned int cpu) break; if (level <= levels) break; - if (of_property_read_bool(np, "cache-size")) + if (of_property_present(np, "cache-size")) ci_leaf_init(this_leaf++, CACHE_TYPE_UNIFIED, level); - if (of_property_read_bool(np, "i-cache-size")) + if (of_property_present(np, "i-cache-size")) ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level); - if (of_property_read_bool(np, "d-cache-size")) + if (of_property_present(np, "d-cache-size")) ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level); levels = level; } diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index 3a8eeaa9310c3..308430af3e8f8 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -454,7 +454,7 @@ static void __init riscv_resolve_isa(unsigned long *source_isa, if (bit < RISCV_ISA_EXT_BASE) *this_hwcap |= isa2hwcap[bit]; } - } while (loop && memcmp(prev_resolved_isa, resolved_isa, sizeof(prev_resolved_isa))); + } while (loop && !bitmap_equal(prev_resolved_isa, resolved_isa, RISCV_ISA_EXT_MAX)); } static void __init match_isa_ext(const char *name, const char *name_end, unsigned long *bitmap) diff --git a/arch/riscv/kernel/elf_kexec.c b/arch/riscv/kernel/elf_kexec.c index 3c37661801f95..e783a72d051f4 100644 --- a/arch/riscv/kernel/elf_kexec.c +++ b/arch/riscv/kernel/elf_kexec.c @@ -468,6 +468,9 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi, case R_RISCV_ALIGN: case R_RISCV_RELAX: break; + case R_RISCV_64: + *(u64 *)loc = val; + break; default: pr_err("Unknown rela relocation: %d\n", r_type); return -ENOEXEC; diff --git a/arch/riscv/kernel/kgdb.c b/arch/riscv/kernel/kgdb.c index 2e0266ae6bd72..9f3db3503dabd 100644 --- a/arch/riscv/kernel/kgdb.c +++ b/arch/riscv/kernel/kgdb.c @@ -254,6 +254,12 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) regs->epc = pc; } +noinline void arch_kgdb_breakpoint(void) +{ + asm(".global kgdb_compiled_break\n" + "kgdb_compiled_break: ebreak\n"); +} + void kgdb_arch_handle_qxfer_pkt(char *remcom_in_buffer, char *remcom_out_buffer) { diff --git a/arch/riscv/kernel/module-sections.c b/arch/riscv/kernel/module-sections.c index e264e59e596e8..91d0b355ceeff 100644 --- a/arch/riscv/kernel/module-sections.c +++ b/arch/riscv/kernel/module-sections.c @@ -73,16 +73,17 @@ static bool duplicate_rela(const Elf_Rela *rela, int idx) static void count_max_entries(Elf_Rela *relas, int num, unsigned int *plts, unsigned int *gots) { - unsigned int type, i; - - for (i = 0; i < num; i++) { - type = ELF_RISCV_R_TYPE(relas[i].r_info); - if (type == R_RISCV_CALL_PLT) { + for (int i = 0; i < num; i++) { + switch (ELF_R_TYPE(relas[i].r_info)) { + case R_RISCV_CALL_PLT: + case R_RISCV_PLT32: if (!duplicate_rela(relas, i)) (*plts)++; - } else if (type == R_RISCV_GOT_HI20) { + break; + case R_RISCV_GOT_HI20: if (!duplicate_rela(relas, i)) (*gots)++; + break; } } } diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c index 47d0ebeec93c2..7f6147c18033b 100644 --- a/arch/riscv/kernel/module.c +++ b/arch/riscv/kernel/module.c @@ -648,7 +648,7 @@ process_accumulated_relocations(struct module *me, kfree(bucket_iter); } - kfree(*relocation_hashtable); + kvfree(*relocation_hashtable); } static int add_relocation_to_accumulate(struct module *me, int type, @@ -752,9 +752,10 @@ initialize_relocation_hashtable(unsigned int num_relocations, hashtable_size <<= should_double_size; - *relocation_hashtable = kmalloc_array(hashtable_size, - sizeof(**relocation_hashtable), - GFP_KERNEL); + /* Number of relocations may be large, so kvmalloc it */ + *relocation_hashtable = kvmalloc_array(hashtable_size, + sizeof(**relocation_hashtable), + GFP_KERNEL); if (!*relocation_hashtable) return 0; @@ -859,7 +860,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, } j++; - if (j > sechdrs[relsec].sh_size / sizeof(*rel)) + if (j == num_relocations) j = 0; } while (j_idx != j); diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index 2b3c152d3c91f..194bda6d74ce7 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -66,6 +66,9 @@ static struct resource bss_res = { .name = "Kernel bss", }; static struct resource elfcorehdr_res = { .name = "ELF Core hdr", }; #endif +static int num_standard_resources; +static struct resource *standard_resources; + static int __init add_resource(struct resource *parent, struct resource *res) { @@ -139,7 +142,7 @@ static void __init init_resources(void) struct resource *res = NULL; struct resource *mem_res = NULL; size_t mem_res_sz = 0; - int num_resources = 0, res_idx = 0; + int num_resources = 0, res_idx = 0, non_resv_res = 0; int ret = 0; /* + 1 as memblock_alloc() might increase memblock.reserved.cnt */ @@ -195,6 +198,7 @@ static void __init init_resources(void) /* Add /memory regions to the resource tree */ for_each_mem_region(region) { res = &mem_res[res_idx--]; + non_resv_res++; if (unlikely(memblock_is_nomap(region))) { res->name = "Reserved"; @@ -212,6 +216,9 @@ static void __init init_resources(void) goto error; } + num_standard_resources = non_resv_res; + standard_resources = &mem_res[res_idx + 1]; + /* Clean-up any unused pre-allocated resources */ if (res_idx >= 0) memblock_free(mem_res, (res_idx + 1) * sizeof(*mem_res)); @@ -223,6 +230,33 @@ static void __init init_resources(void) memblock_free(mem_res, mem_res_sz); } +static int __init reserve_memblock_reserved_regions(void) +{ + u64 i, j; + + for (i = 0; i < num_standard_resources; i++) { + struct resource *mem = &standard_resources[i]; + phys_addr_t r_start, r_end, mem_size = resource_size(mem); + + if (!memblock_is_region_reserved(mem->start, mem_size)) + continue; + + for_each_reserved_mem_range(j, &r_start, &r_end) { + resource_size_t start, end; + + start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start); + end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end); + + if (start > mem->end || end < mem->start) + continue; + + reserve_region_with_split(mem, start, end, "Reserved"); + } + } + + return 0; +} +arch_initcall(reserve_memblock_reserved_regions); static void __init parse_dtb(void) { @@ -288,8 +322,8 @@ void __init setup_arch(char **cmdline_p) riscv_init_cbo_blocksizes(); riscv_fill_hwcap(); - init_rt_signal_env(); apply_boot_alternatives(); + init_rt_signal_env(); if (IS_ENABLED(CONFIG_RISCV_ISA_ZICBOM) && riscv_isa_extension_available(NULL, ZICBOM)) diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c index dcd2824194561..c3c517b9eee55 100644 --- a/arch/riscv/kernel/signal.c +++ b/arch/riscv/kernel/signal.c @@ -215,12 +215,6 @@ static size_t get_rt_frame_size(bool cal_all) if (cal_all || riscv_v_vstate_query(task_pt_regs(current))) total_context_size += riscv_v_sc_size; } - /* - * Preserved a __riscv_ctx_hdr for END signal context header if an - * extension uses __riscv_extra_ext_header - */ - if (total_context_size) - total_context_size += sizeof(struct __riscv_ctx_hdr); frame_size += total_context_size; diff --git a/arch/riscv/kvm/vcpu_pmu.c b/arch/riscv/kvm/vcpu_pmu.c index 2707a51b082ca..78ac3216a54dd 100644 --- a/arch/riscv/kvm/vcpu_pmu.c +++ b/arch/riscv/kvm/vcpu_pmu.c @@ -666,6 +666,7 @@ int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_ba .type = etype, .size = sizeof(struct perf_event_attr), .pinned = true, + .disabled = true, /* * It should never reach here if the platform doesn't support the sscofpmf * extension as mode filtering won't work without it. diff --git a/arch/riscv/kvm/vcpu_sbi_hsm.c b/arch/riscv/kvm/vcpu_sbi_hsm.c index dce667f4b6ab0..3070bb31745de 100644 --- a/arch/riscv/kvm/vcpu_sbi_hsm.c +++ b/arch/riscv/kvm/vcpu_sbi_hsm.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include @@ -79,12 +80,12 @@ static int kvm_sbi_hsm_vcpu_get_status(struct kvm_vcpu *vcpu) target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, target_vcpuid); if (!target_vcpu) return SBI_ERR_INVALID_PARAM; - if (!kvm_riscv_vcpu_stopped(target_vcpu)) - return SBI_HSM_STATE_STARTED; - else if (vcpu->stat.generic.blocking) + if (kvm_riscv_vcpu_stopped(target_vcpu)) + return SBI_HSM_STATE_STOPPED; + else if (target_vcpu->stat.generic.blocking) return SBI_HSM_STATE_SUSPENDED; else - return SBI_HSM_STATE_STOPPED; + return SBI_HSM_STATE_STARTED; } static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, @@ -109,7 +110,7 @@ static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, } return 0; case SBI_EXT_HSM_HART_SUSPEND: - switch (cp->a0) { + switch (lower_32_bits(cp->a0)) { case SBI_HSM_SUSPEND_RET_DEFAULT: kvm_riscv_vcpu_wfi(vcpu); break; diff --git a/arch/riscv/kvm/vcpu_sbi_replace.c b/arch/riscv/kvm/vcpu_sbi_replace.c index 9c2ab3dfa93aa..5fbf3f94f1e85 100644 --- a/arch/riscv/kvm/vcpu_sbi_replace.c +++ b/arch/riscv/kvm/vcpu_sbi_replace.c @@ -21,7 +21,7 @@ static int kvm_sbi_ext_time_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, u64 next_cycle; if (cp->a6 != SBI_EXT_TIME_SET_TIMER) { - retdata->err_val = SBI_ERR_INVALID_PARAM; + retdata->err_val = SBI_ERR_NOT_SUPPORTED; return 0; } @@ -51,9 +51,10 @@ static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, struct kvm_cpu_context *cp = &vcpu->arch.guest_context; unsigned long hmask = cp->a0; unsigned long hbase = cp->a1; + unsigned long hart_bit = 0, sentmask = 0; if (cp->a6 != SBI_EXT_IPI_SEND_IPI) { - retdata->err_val = SBI_ERR_INVALID_PARAM; + retdata->err_val = SBI_ERR_NOT_SUPPORTED; return 0; } @@ -62,15 +63,23 @@ static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, if (hbase != -1UL) { if (tmp->vcpu_id < hbase) continue; - if (!(hmask & (1UL << (tmp->vcpu_id - hbase)))) + hart_bit = tmp->vcpu_id - hbase; + if (hart_bit >= __riscv_xlen) + goto done; + if (!(hmask & (1UL << hart_bit))) continue; } ret = kvm_riscv_vcpu_set_interrupt(tmp, IRQ_VS_SOFT); if (ret < 0) break; + sentmask |= 1UL << hart_bit; kvm_riscv_vcpu_pmu_incr_fw(tmp, SBI_PMU_FW_IPI_RCVD); } +done: + if (hbase != -1UL && (hmask ^ sentmask)) + retdata->err_val = SBI_ERR_INVALID_PARAM; + return ret; } diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c index 42314f0939220..375dd96bb4a0d 100644 --- a/arch/riscv/mm/hugetlbpage.c +++ b/arch/riscv/mm/hugetlbpage.c @@ -148,22 +148,25 @@ unsigned long hugetlb_mask_last_page(struct hstate *h) static pte_t get_clear_contig(struct mm_struct *mm, unsigned long addr, pte_t *ptep, - unsigned long pte_num) + unsigned long ncontig) { - pte_t orig_pte = ptep_get(ptep); - unsigned long i; - - for (i = 0; i < pte_num; i++, addr += PAGE_SIZE, ptep++) { - pte_t pte = ptep_get_and_clear(mm, addr, ptep); - - if (pte_dirty(pte)) - orig_pte = pte_mkdirty(orig_pte); - - if (pte_young(pte)) - orig_pte = pte_mkyoung(orig_pte); + pte_t pte, tmp_pte; + bool present; + + pte = ptep_get_and_clear(mm, addr, ptep); + present = pte_present(pte); + while (--ncontig) { + ptep++; + addr += PAGE_SIZE; + tmp_pte = ptep_get_and_clear(mm, addr, ptep); + if (present) { + if (pte_dirty(tmp_pte)) + pte = pte_mkdirty(pte); + if (pte_young(tmp_pte)) + pte = pte_mkyoung(pte); + } } - - return orig_pte; + return pte; } static pte_t get_clear_contig_flush(struct mm_struct *mm, @@ -212,6 +215,26 @@ static void clear_flush(struct mm_struct *mm, flush_tlb_range(&vma, saddr, addr); } +static int num_contig_ptes_from_size(unsigned long sz, size_t *pgsize) +{ + unsigned long hugepage_shift; + + if (sz >= PGDIR_SIZE) + hugepage_shift = PGDIR_SHIFT; + else if (sz >= P4D_SIZE) + hugepage_shift = P4D_SHIFT; + else if (sz >= PUD_SIZE) + hugepage_shift = PUD_SHIFT; + else if (sz >= PMD_SIZE) + hugepage_shift = PMD_SHIFT; + else + hugepage_shift = PAGE_SHIFT; + + *pgsize = 1 << hugepage_shift; + + return sz >> hugepage_shift; +} + /* * When dealing with NAPOT mappings, the privileged specification indicates that * "if an update needs to be made, the OS generally should first mark all of the @@ -226,22 +249,10 @@ void set_huge_pte_at(struct mm_struct *mm, pte_t pte, unsigned long sz) { - unsigned long hugepage_shift, pgsize; + size_t pgsize; int i, pte_num; - if (sz >= PGDIR_SIZE) - hugepage_shift = PGDIR_SHIFT; - else if (sz >= P4D_SIZE) - hugepage_shift = P4D_SHIFT; - else if (sz >= PUD_SIZE) - hugepage_shift = PUD_SHIFT; - else if (sz >= PMD_SIZE) - hugepage_shift = PMD_SHIFT; - else - hugepage_shift = PAGE_SHIFT; - - pte_num = sz >> hugepage_shift; - pgsize = 1 << hugepage_shift; + pte_num = num_contig_ptes_from_size(sz, &pgsize); if (!pte_present(pte)) { for (i = 0; i < pte_num; i++, ptep++, addr += pgsize) @@ -293,15 +304,16 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma, pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, - pte_t *ptep) + pte_t *ptep, unsigned long sz) { + size_t pgsize; pte_t orig_pte = ptep_get(ptep); int pte_num; if (!pte_napot(orig_pte)) return ptep_get_and_clear(mm, addr, ptep); - pte_num = napot_pte_num(napot_cont_order(orig_pte)); + pte_num = num_contig_ptes_from_size(sz, &pgsize); return get_clear_contig(mm, addr, ptep, pte_num); } @@ -351,6 +363,7 @@ void huge_pte_clear(struct mm_struct *mm, pte_t *ptep, unsigned long sz) { + size_t pgsize; pte_t pte = ptep_get(ptep); int i, pte_num; @@ -359,8 +372,9 @@ void huge_pte_clear(struct mm_struct *mm, return; } - pte_num = napot_pte_num(napot_cont_order(pte)); - for (i = 0; i < pte_num; i++, addr += PAGE_SIZE, ptep++) + pte_num = num_contig_ptes_from_size(sz, &pgsize); + + for (i = 0; i < pte_num; i++, addr += pgsize, ptep++) pte_clear(mm, addr, ptep); } diff --git a/arch/riscv/purgatory/entry.S b/arch/riscv/purgatory/entry.S index 0e6ca6d5ae4b4..c5db2f072c341 100644 --- a/arch/riscv/purgatory/entry.S +++ b/arch/riscv/purgatory/entry.S @@ -12,6 +12,7 @@ .text +.align 2 SYM_CODE_START(purgatory_start) lla sp, .Lstack diff --git a/arch/s390/Makefile b/arch/s390/Makefile index 9b77209327870..5b97af3117092 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile @@ -15,7 +15,7 @@ KBUILD_CFLAGS_MODULE += -fPIC KBUILD_AFLAGS += -m64 KBUILD_CFLAGS += -m64 KBUILD_CFLAGS += -fPIC -LDFLAGS_vmlinux := -no-pie --emit-relocs --discard-none +LDFLAGS_vmlinux := $(call ld-option,-no-pie) --emit-relocs --discard-none extra_tools := relocs aflags_dwarf := -Wa,-gdwarf-2 KBUILD_AFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -D__ASSEMBLY__ diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c index c2ee0745f59ed..7b69be63d5d20 100644 --- a/arch/s390/boot/startup.c +++ b/arch/s390/boot/startup.c @@ -75,7 +75,7 @@ static int cmma_test_essa(void) : [reg1] "=&d" (reg1), [reg2] "=&a" (reg2), [rc] "+&d" (rc), - [tmp] "=&d" (tmp), + [tmp] "+&d" (tmp), "+Q" (get_lowcore()->program_new_psw), "=Q" (old) : [psw_old] "a" (&old), diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h index cf1b5d6fb1a62..4731a51241ba8 100644 --- a/arch/s390/include/asm/hugetlb.h +++ b/arch/s390/include/asm/hugetlb.h @@ -20,8 +20,15 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte); pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep); -pte_t huge_ptep_get_and_clear(struct mm_struct *mm, - unsigned long addr, pte_t *ptep); +pte_t __huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep); + +static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, + unsigned long sz) +{ + return __huge_ptep_get_and_clear(mm, addr, ptep); +} /* * If the arch doesn't supply something else, assume that hugepage @@ -57,7 +64,7 @@ static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { - return huge_ptep_get_and_clear(vma->vm_mm, address, ptep); + return __huge_ptep_get_and_clear(vma->vm_mm, address, ptep); } static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, @@ -66,7 +73,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, { int changed = !pte_same(huge_ptep_get(vma->vm_mm, addr, ptep), pte); if (changed) { - huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); + __huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); __set_huge_pte_at(vma->vm_mm, addr, ptep, pte); } return changed; @@ -75,7 +82,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - pte_t pte = huge_ptep_get_and_clear(mm, addr, ptep); + pte_t pte = __huge_ptep_get_and_clear(mm, addr, ptep); __set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte)); } diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h index fc9933a743d69..251e0372ccbd0 100644 --- a/arch/s390/include/asm/io.h +++ b/arch/s390/include/asm/io.h @@ -34,8 +34,6 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr); #define ioremap_wc(addr, size) \ ioremap_prot((addr), (size), pgprot_val(pgprot_writecombine(PAGE_KERNEL))) -#define ioremap_wt(addr, size) \ - ioremap_prot((addr), (size), pgprot_val(pgprot_writethrough(PAGE_KERNEL))) static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) { diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 0ffbaf7419558..5ee73f245a0c0 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -1365,9 +1365,6 @@ void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr); #define pgprot_writecombine pgprot_writecombine pgprot_t pgprot_writecombine(pgprot_t prot); -#define pgprot_writethrough pgprot_writethrough -pgprot_t pgprot_writethrough(pgprot_t prot); - #define PFN_PTE_SHIFT PAGE_SHIFT /* diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 594da4cba707a..a7de838f80318 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -501,7 +501,7 @@ SYM_CODE_START(mcck_int_handler) clgrjl %r9,%r14, 4f larl %r14,.Lsie_leave clgrjhe %r9,%r14, 4f - lg %r10,__LC_PCPU + lg %r10,__LC_PCPU(%r13) oi __PCPU_FLAGS+7(%r10), _CIF_MCCK_GUEST 4: BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST SIEEXIT __SF_SIE_CONTROL(%r15),%r13 diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c index c3075e4a8efc3..6d6b057b562fd 100644 --- a/arch/s390/kernel/perf_cpum_cf.c +++ b/arch/s390/kernel/perf_cpum_cf.c @@ -858,18 +858,13 @@ static int cpumf_pmu_event_type(struct perf_event *event) static int cpumf_pmu_event_init(struct perf_event *event) { unsigned int type = event->attr.type; - int err; + int err = -ENOENT; if (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_RAW) err = __hw_perf_event_init(event, type); else if (event->pmu->type == type) /* Registered as unknown PMU */ err = __hw_perf_event_init(event, cpumf_pmu_event_type(event)); - else - return -ENOENT; - - if (unlikely(err) && event->destroy) - event->destroy(event); return err; } @@ -1819,8 +1814,6 @@ static int cfdiag_event_init(struct perf_event *event) event->destroy = hw_perf_event_destroy; err = cfdiag_event_init2(event); - if (unlikely(err)) - event->destroy(event); out: return err; } diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index 331e0654d61d7..efdd6ead7ba81 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c @@ -898,9 +898,6 @@ static int cpumsf_pmu_event_init(struct perf_event *event) event->attr.exclude_idle = 0; err = __hw_perf_event_init(event); - if (unlikely(err)) - if (event->destroy) - event->destroy(event); return err; } diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 160b2acba8db2..908bae8498435 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -284,10 +284,10 @@ static void __init test_monitor_call(void) return; asm volatile( " mc 0,0\n" - "0: xgr %0,%0\n" + "0: lhi %[val],0\n" "1:\n" - EX_TABLE(0b,1b) - : "+d" (val)); + EX_TABLE(0b, 1b) + : [val] "+d" (val)); if (!val) panic("Monitor call doesn't work!\n"); } diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index ded0eff58a192..9c1ba8c0cac61 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c @@ -174,8 +174,8 @@ pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep) return __rste_to_pte(pte_val(*ptep)); } -pte_t huge_ptep_get_and_clear(struct mm_struct *mm, - unsigned long addr, pte_t *ptep) +pte_t __huge_ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) { pte_t pte = huge_ptep_get(mm, addr, ptep); pmd_t *pmdp = (pmd_t *) ptep; diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 2c944bafb0309..b03c665d72426 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -34,16 +34,6 @@ pgprot_t pgprot_writecombine(pgprot_t prot) } EXPORT_SYMBOL_GPL(pgprot_writecombine); -pgprot_t pgprot_writethrough(pgprot_t prot) -{ - /* - * mio_wb_bit_mask may be set on a different CPU, but it is only set - * once at init and only read afterwards. - */ - return __pgprot(pgprot_val(prot) & ~mio_wb_bit_mask); -} -EXPORT_SYMBOL_GPL(pgprot_writethrough); - static inline void ptep_ipte_local(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int nodat) { diff --git a/arch/s390/pci/pci_bus.c b/arch/s390/pci/pci_bus.c index c385468293455..23c27c6320130 100644 --- a/arch/s390/pci/pci_bus.c +++ b/arch/s390/pci/pci_bus.c @@ -335,6 +335,9 @@ static bool zpci_bus_is_isolated_vf(struct zpci_bus *zbus, struct zpci_dev *zdev { struct pci_dev *pdev; + if (!zdev->vfn) + return false; + pdev = zpci_iov_find_parent_pf(zbus, zdev); if (!pdev) return true; diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c index de5c0b389a3ec..4779c3cb6cfab 100644 --- a/arch/s390/pci/pci_mmio.c +++ b/arch/s390/pci/pci_mmio.c @@ -171,8 +171,12 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr, args.address = mmio_addr; args.vma = vma; ret = follow_pfnmap_start(&args); - if (ret) - goto out_unlock_mmap; + if (ret) { + fixup_user_fault(current->mm, mmio_addr, FAULT_FLAG_WRITE, NULL); + ret = follow_pfnmap_start(&args); + if (ret) + goto out_unlock_mmap; + } io_addr = (void __iomem *)((args.pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK)); @@ -305,14 +309,18 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr, if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) goto out_unlock_mmap; ret = -EACCES; - if (!(vma->vm_flags & VM_WRITE)) + if (!(vma->vm_flags & VM_READ)) goto out_unlock_mmap; args.vma = vma; args.address = mmio_addr; ret = follow_pfnmap_start(&args); - if (ret) - goto out_unlock_mmap; + if (ret) { + fixup_user_fault(current->mm, mmio_addr, 0, NULL); + ret = follow_pfnmap_start(&args); + if (ret) + goto out_unlock_mmap; + } io_addr = (void __iomem *)((args.pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK)); diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h index c714ca6a05aa0..e7a9cdd498dca 100644 --- a/arch/sparc/include/asm/hugetlb.h +++ b/arch/sparc/include/asm/hugetlb.h @@ -20,7 +20,7 @@ void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr, #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, - pte_t *ptep); + pte_t *ptep, unsigned long sz); #define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 2b7f358762c18..dc28f2c4eee3f 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -936,7 +936,6 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, static inline void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte, unsigned int nr) { - arch_enter_lazy_mmu_mode(); for (;;) { __set_pte_at(mm, addr, ptep, pte, 0); if (--nr == 0) @@ -945,7 +944,6 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr, pte_val(pte) += PAGE_SIZE; addr += PAGE_SIZE; } - arch_leave_lazy_mmu_mode(); } #define set_ptes set_ptes diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index cc91ca7a1e182..c276d70a74799 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -368,7 +368,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, } pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, - pte_t *ptep) + pte_t *ptep, unsigned long sz) { unsigned int i, nptes, orig_shift, shift; unsigned long size; diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index 8648a50afe889..a35ddcca5e766 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c @@ -52,8 +52,10 @@ void flush_tlb_pending(void) void arch_enter_lazy_mmu_mode(void) { - struct tlb_batch *tb = this_cpu_ptr(&tlb_batch); + struct tlb_batch *tb; + preempt_disable(); + tb = this_cpu_ptr(&tlb_batch); tb->active = 1; } @@ -64,6 +66,7 @@ void arch_leave_lazy_mmu_mode(void) if (tb->tlb_nr) flush_tlb_pending(); tb->active = 0; + preempt_enable(); } static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, diff --git a/arch/um/include/shared/os.h b/arch/um/include/shared/os.h index 9a039d6f1f748..77a8593f219a1 100644 --- a/arch/um/include/shared/os.h +++ b/arch/um/include/shared/os.h @@ -218,7 +218,6 @@ extern int os_protect_memory(void *addr, unsigned long len, extern int os_unmap_memory(void *addr, int len); extern int os_drop_memory(void *addr, int length); extern int can_drop_memory(void); -extern int os_mincore(void *addr, unsigned long len); /* execvp.c */ extern int execvp_noalloc(char *buf, const char *file, char *const argv[]); diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile index f8567b933ffaa..4df1cd0d20179 100644 --- a/arch/um/kernel/Makefile +++ b/arch/um/kernel/Makefile @@ -17,7 +17,7 @@ extra-y := vmlinux.lds obj-y = config.o exec.o exitcode.o irq.o ksyms.o mem.o \ physmem.o process.o ptrace.o reboot.o sigio.o \ signal.o sysrq.o time.o tlb.o trap.o \ - um_arch.o umid.o maccess.o kmsg_dump.o capflags.o skas/ + um_arch.o umid.o kmsg_dump.o capflags.o skas/ obj-y += load_file.o obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o diff --git a/arch/um/kernel/maccess.c b/arch/um/kernel/maccess.c deleted file mode 100644 index 8ccd56813f684..0000000000000 --- a/arch/um/kernel/maccess.c +++ /dev/null @@ -1,19 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2013 Richard Weinberger - */ - -#include -#include -#include - -bool copy_from_kernel_nofault_allowed(const void *src, size_t size) -{ - void *psrc = (void *)rounddown((unsigned long)src, PAGE_SIZE); - - if ((unsigned long)src < PAGE_SIZE || size <= 0) - return false; - if (os_mincore(psrc, size + src - psrc) <= 0) - return false; - return true; -} diff --git a/arch/um/os-Linux/process.c b/arch/um/os-Linux/process.c index e52dd37ddadcc..2686120ab2325 100644 --- a/arch/um/os-Linux/process.c +++ b/arch/um/os-Linux/process.c @@ -223,57 +223,6 @@ int __init can_drop_memory(void) return ok; } -static int os_page_mincore(void *addr) -{ - char vec[2]; - int ret; - - ret = mincore(addr, UM_KERN_PAGE_SIZE, vec); - if (ret < 0) { - if (errno == ENOMEM || errno == EINVAL) - return 0; - else - return -errno; - } - - return vec[0] & 1; -} - -int os_mincore(void *addr, unsigned long len) -{ - char *vec; - int ret, i; - - if (len <= UM_KERN_PAGE_SIZE) - return os_page_mincore(addr); - - vec = calloc(1, (len + UM_KERN_PAGE_SIZE - 1) / UM_KERN_PAGE_SIZE); - if (!vec) - return -ENOMEM; - - ret = mincore(addr, UM_KERN_PAGE_SIZE, vec); - if (ret < 0) { - if (errno == ENOMEM || errno == EINVAL) - ret = 0; - else - ret = -errno; - - goto out; - } - - for (i = 0; i < ((len + UM_KERN_PAGE_SIZE - 1) / UM_KERN_PAGE_SIZE); i++) { - if (!(vec[i] & 1)) { - ret = 0; - goto out; - } - } - - ret = 1; -out: - free(vec); - return ret; -} - void init_new_thread_signals(void) { set_handler(SIGSEGV); diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 95744a4165589..9d353b61f80f7 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -227,7 +227,7 @@ config X86 select HAVE_SAMPLE_FTRACE_DIRECT_MULTI if X86_64 select HAVE_EBPF_JIT select HAVE_EFFICIENT_UNALIGNED_ACCESS - select HAVE_EISA + select HAVE_EISA if X86_32 select HAVE_EXIT_THREAD select HAVE_GUP_FAST select HAVE_FENTRY if X86_64 || DYNAMIC_FTRACE @@ -899,6 +899,7 @@ config INTEL_TDX_GUEST depends on X86_64 && CPU_SUP_INTEL depends on X86_X2APIC depends on EFI_STUB + depends on PARAVIRT select ARCH_HAS_CC_PLATFORM select X86_MEM_ENCRYPT select X86_MCE @@ -1338,6 +1339,7 @@ config X86_REBOOTFIXUPS config MICROCODE def_bool y depends on CPU_SUP_AMD || CPU_SUP_INTEL + select CRYPTO_LIB_SHA256 if CPU_SUP_AMD config MICROCODE_INITRD32 def_bool y @@ -2437,18 +2439,20 @@ config CC_HAS_NAMED_AS def_bool $(success,echo 'int __seg_fs fs; int __seg_gs gs;' | $(CC) -x c - -S -o /dev/null) depends on CC_IS_GCC +# +# -fsanitize=kernel-address (KASAN) and -fsanitize=thread (KCSAN) +# are incompatible with named address spaces with GCC < 13.3 +# (see GCC PR sanitizer/111736 and also PR sanitizer/115172). +# + config CC_HAS_NAMED_AS_FIXED_SANITIZERS - def_bool CC_IS_GCC && GCC_VERSION >= 130300 + def_bool y + depends on !(KASAN || KCSAN) || GCC_VERSION >= 130300 + depends on !(UBSAN_BOOL && KASAN) || GCC_VERSION >= 140200 config USE_X86_SEG_SUPPORT - def_bool y - depends on CC_HAS_NAMED_AS - # - # -fsanitize=kernel-address (KASAN) and -fsanitize=thread - # (KCSAN) are incompatible with named address spaces with - # GCC < 13.3 - see GCC PR sanitizer/111736. - # - depends on !(KASAN || KCSAN) || CC_HAS_NAMED_AS_FIXED_SANITIZERS + def_bool CC_HAS_NAMED_AS + depends on CC_HAS_NAMED_AS_FIXED_SANITIZERS config CC_HAS_SLS def_bool $(cc-option,-mharden-sls=all) diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index 2a7279d80460a..42e6a40876ea4 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu @@ -368,7 +368,7 @@ config X86_HAVE_PAE config X86_CMPXCHG64 def_bool y - depends on X86_HAVE_PAE || M586TSC || M586MMX || MK6 || MK7 + depends on X86_HAVE_PAE || M586TSC || M586MMX || MK6 || MK7 || MGEODEGX1 || MGEODE_LX # this should be set for all -march=.. options where the compiler # generates cmov. diff --git a/arch/x86/Makefile.um b/arch/x86/Makefile.um index a46b1397ad01c..c86cbd9cbba38 100644 --- a/arch/x86/Makefile.um +++ b/arch/x86/Makefile.um @@ -7,12 +7,13 @@ core-y += arch/x86/crypto/ # GCC versions < 11. See: # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99652 # -ifeq ($(CONFIG_CC_IS_CLANG),y) -KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx -KBUILD_RUSTFLAGS += --target=$(objtree)/scripts/target.json +ifeq ($(call gcc-min-version, 110000)$(CONFIG_CC_IS_CLANG),y) +KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx KBUILD_RUSTFLAGS += -Ctarget-feature=-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-avx,-avx2 endif +KBUILD_RUSTFLAGS += --target=$(objtree)/scripts/target.json + ifeq ($(CONFIG_X86_32),y) START := 0x8048000 diff --git a/arch/x86/boot/compressed/mem.c b/arch/x86/boot/compressed/mem.c index dbba332e4a12d..f676156d9f3db 100644 --- a/arch/x86/boot/compressed/mem.c +++ b/arch/x86/boot/compressed/mem.c @@ -34,11 +34,14 @@ static bool early_is_tdx_guest(void) void arch_accept_memory(phys_addr_t start, phys_addr_t end) { + static bool sevsnp; + /* Platform-specific memory-acceptance call goes here */ if (early_is_tdx_guest()) { if (!tdx_accept_memory(start, end)) panic("TDX: Failed to accept memory\n"); - } else if (sev_snp_enabled()) { + } else if (sevsnp || (sev_get_status() & MSR_AMD64_SEV_SNP_ENABLED)) { + sevsnp = true; snp_accept_memory(start, end); } else { error("Cannot accept memory: unknown platform\n"); diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c index c882e1f67af01..d8c5de40669d3 100644 --- a/arch/x86/boot/compressed/pgtable_64.c +++ b/arch/x86/boot/compressed/pgtable_64.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include "misc.h" #include +#include #include #include #include "pgtable.h" @@ -107,6 +108,7 @@ asmlinkage void configure_5level_paging(struct boot_params *bp, void *pgtable) bool l5_required = false; /* Initialize boot_params. Required for cmdline_find_option_bool(). */ + sanitize_boot_params(bp); boot_params_ptr = bp; /* diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c index cd44e120fe537..f49f7eef1dba0 100644 --- a/arch/x86/boot/compressed/sev.c +++ b/arch/x86/boot/compressed/sev.c @@ -164,10 +164,7 @@ bool sev_snp_enabled(void) static void __page_state_change(unsigned long paddr, enum psc_op op) { - u64 val; - - if (!sev_snp_enabled()) - return; + u64 val, msr; /* * If private -> shared then invalidate the page before requesting the @@ -176,6 +173,9 @@ static void __page_state_change(unsigned long paddr, enum psc_op op) if (op == SNP_PAGE_STATE_SHARED) pvalidate_4k_page(paddr, paddr, false); + /* Save the current GHCB MSR value */ + msr = sev_es_rd_ghcb_msr(); + /* Issue VMGEXIT to change the page state in RMP table. */ sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op)); VMGEXIT(); @@ -185,6 +185,9 @@ static void __page_state_change(unsigned long paddr, enum psc_op op) if ((GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP) || GHCB_MSR_PSC_RESP_VAL(val)) sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC); + /* Restore the GHCB MSR value */ + sev_es_wr_ghcb_msr(msr); + /* * Now that page state is changed in the RMP table, validate it so that it is * consistent with the RMP entry. @@ -195,11 +198,17 @@ static void __page_state_change(unsigned long paddr, enum psc_op op) void snp_set_page_private(unsigned long paddr) { + if (!sev_snp_enabled()) + return; + __page_state_change(paddr, SNP_PAGE_STATE_PRIVATE); } void snp_set_page_shared(unsigned long paddr) { + if (!sev_snp_enabled()) + return; + __page_state_change(paddr, SNP_PAGE_STATE_SHARED); } @@ -223,56 +232,10 @@ static bool early_setup_ghcb(void) return true; } -static phys_addr_t __snp_accept_memory(struct snp_psc_desc *desc, - phys_addr_t pa, phys_addr_t pa_end) -{ - struct psc_hdr *hdr; - struct psc_entry *e; - unsigned int i; - - hdr = &desc->hdr; - memset(hdr, 0, sizeof(*hdr)); - - e = desc->entries; - - i = 0; - while (pa < pa_end && i < VMGEXIT_PSC_MAX_ENTRY) { - hdr->end_entry = i; - - e->gfn = pa >> PAGE_SHIFT; - e->operation = SNP_PAGE_STATE_PRIVATE; - if (IS_ALIGNED(pa, PMD_SIZE) && (pa_end - pa) >= PMD_SIZE) { - e->pagesize = RMP_PG_SIZE_2M; - pa += PMD_SIZE; - } else { - e->pagesize = RMP_PG_SIZE_4K; - pa += PAGE_SIZE; - } - - e++; - i++; - } - - if (vmgexit_psc(boot_ghcb, desc)) - sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC); - - pvalidate_pages(desc); - - return pa; -} - void snp_accept_memory(phys_addr_t start, phys_addr_t end) { - struct snp_psc_desc desc = {}; - unsigned int i; - phys_addr_t pa; - - if (!boot_ghcb && !early_setup_ghcb()) - sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC); - - pa = start; - while (pa < end) - pa = __snp_accept_memory(&desc, pa, end); + for (phys_addr_t pa = start; pa < end; pa += PAGE_SIZE) + __page_state_change(pa, SNP_PAGE_STATE_PRIVATE); } void sev_es_shutdown_ghcb(void) diff --git a/arch/x86/boot/compressed/sev.h b/arch/x86/boot/compressed/sev.h index fc725a981b093..4e463f33186df 100644 --- a/arch/x86/boot/compressed/sev.h +++ b/arch/x86/boot/compressed/sev.h @@ -12,11 +12,13 @@ bool sev_snp_enabled(void); void snp_accept_memory(phys_addr_t start, phys_addr_t end); +u64 sev_get_status(void); #else static inline bool sev_snp_enabled(void) { return false; } static inline void snp_accept_memory(phys_addr_t start, phys_addr_t end) { } +static inline u64 sev_get_status(void) { return 0; } #endif diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c index 2f85ed005c42f..b8aeb3ac7d28b 100644 --- a/arch/x86/coco/tdx/tdx.c +++ b/arch/x86/coco/tdx/tdx.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -359,7 +360,7 @@ static int handle_halt(struct ve_info *ve) return ve_instr_len(ve); } -void __cpuidle tdx_safe_halt(void) +void __cpuidle tdx_halt(void) { const bool irq_disabled = false; @@ -370,6 +371,16 @@ void __cpuidle tdx_safe_halt(void) WARN_ONCE(1, "HLT instruction emulation failed\n"); } +static void __cpuidle tdx_safe_halt(void) +{ + tdx_halt(); + /* + * "__cpuidle" section doesn't support instrumentation, so stick + * with raw_* variant that avoids tracing hooks. + */ + raw_local_irq_enable(); +} + static int read_msr(struct pt_regs *regs, struct ve_info *ve) { struct tdx_module_args args = { @@ -1056,6 +1067,19 @@ void __init tdx_early_init(void) x86_platform.guest.enc_kexec_begin = tdx_kexec_begin; x86_platform.guest.enc_kexec_finish = tdx_kexec_finish; + /* + * Avoid "sti;hlt" execution in TDX guests as HLT induces a #VE that + * will enable interrupts before HLT TDCALL invocation if executed + * in STI-shadow, possibly resulting in missed wakeup events. + * + * Modify all possible HLT execution paths to use TDX specific routines + * that directly execute TDCALL and toggle the interrupt state as + * needed after TDCALL completion. This also reduces HLT related #VEs + * in addition to having a reliable halt logic execution. + */ + pv_ops.irq.safe_halt = tdx_safe_halt; + pv_ops.irq.halt = tdx_halt; + /* * TDX intercepts the RDMSR to read the X2APIC ID in the parallel * bringup low level code. That raises #VE which cannot be handled diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index ea81770629eea..626a81c6015bd 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -70,6 +70,8 @@ For 32-bit we have the following conventions - kernel is built with pushq %rsi /* pt_regs->si */ movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */ movq %rdi, 8(%rsp) /* pt_regs->di (overwriting original return address) */ + /* We just clobbered the return address - use the IRET frame for unwinding: */ + UNWIND_HINT_IRET_REGS offset=3*8 .else pushq %rdi /* pt_regs->di */ pushq %rsi /* pt_regs->si */ diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index 94941c5a10ac1..51efd2da4d7fd 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -142,7 +142,7 @@ static __always_inline int syscall_32_enter(struct pt_regs *regs) #ifdef CONFIG_IA32_EMULATION bool __ia32_enabled __ro_after_init = !IS_ENABLED(CONFIG_IA32_EMULATION_DEFAULT_DISABLED); -static int ia32_emulation_override_cmdline(char *arg) +static int __init ia32_emulation_override_cmdline(char *arg) { return kstrtobool(arg, &__ia32_enabled); } diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 65ab6460aed4d..0d33c85da4535 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -628,7 +628,7 @@ int x86_pmu_hw_config(struct perf_event *event) if (event->attr.type == event->pmu->type) event->hw.config |= x86_pmu_get_event_config(event); - if (event->attr.sample_period && x86_pmu.limit_period) { + if (!event->attr.freq && x86_pmu.limit_period) { s64 left = event->attr.sample_period; x86_pmu.limit_period(event, &left); if (left > event->attr.sample_period) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index f5bf400f6a283..f86e47afd5609 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -397,34 +397,28 @@ static struct event_constraint intel_lnc_event_constraints[] = { METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FETCH_LAT, 6), METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_MEM_BOUND, 7), + INTEL_EVENT_CONSTRAINT(0x20, 0xf), + + INTEL_UEVENT_CONSTRAINT(0x012a, 0xf), + INTEL_UEVENT_CONSTRAINT(0x012b, 0xf), INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), INTEL_UEVENT_CONSTRAINT(0x0175, 0x4), INTEL_EVENT_CONSTRAINT(0x2e, 0x3ff), INTEL_EVENT_CONSTRAINT(0x3c, 0x3ff), - /* - * Generally event codes < 0x90 are restricted to counters 0-3. - * The 0x2E and 0x3C are exception, which has no restriction. - */ - INTEL_EVENT_CONSTRAINT_RANGE(0x01, 0x8f, 0xf), - INTEL_UEVENT_CONSTRAINT(0x01a3, 0xf), - INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), INTEL_UEVENT_CONSTRAINT(0x04a4, 0x1), INTEL_UEVENT_CONSTRAINT(0x08a4, 0x1), INTEL_UEVENT_CONSTRAINT(0x10a4, 0x1), INTEL_UEVENT_CONSTRAINT(0x01b1, 0x8), + INTEL_UEVENT_CONSTRAINT(0x01cd, 0x3fc), INTEL_UEVENT_CONSTRAINT(0x02cd, 0x3), - INTEL_EVENT_CONSTRAINT(0xce, 0x1), INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xdf, 0xf), - /* - * Generally event codes >= 0x90 are likely to have no restrictions. - * The exception are defined as above. - */ - INTEL_EVENT_CONSTRAINT_RANGE(0x90, 0xfe, 0x3ff), + + INTEL_UEVENT_CONSTRAINT(0x00e0, 0xf), EVENT_CONSTRAINT_END }; @@ -2785,28 +2779,33 @@ static u64 icl_update_topdown_event(struct perf_event *event) DEFINE_STATIC_CALL(intel_pmu_update_topdown_event, x86_perf_event_update); -static void intel_pmu_read_topdown_event(struct perf_event *event) +static void intel_pmu_read_event(struct perf_event *event) { - struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + if (event->hw.flags & (PERF_X86_EVENT_AUTO_RELOAD | PERF_X86_EVENT_TOPDOWN)) { + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + bool pmu_enabled = cpuc->enabled; - /* Only need to call update_topdown_event() once for group read. */ - if ((cpuc->txn_flags & PERF_PMU_TXN_READ) && - !is_slots_event(event)) - return; + /* Only need to call update_topdown_event() once for group read. */ + if (is_metric_event(event) && (cpuc->txn_flags & PERF_PMU_TXN_READ)) + return; - perf_pmu_disable(event->pmu); - static_call(intel_pmu_update_topdown_event)(event); - perf_pmu_enable(event->pmu); -} + cpuc->enabled = 0; + if (pmu_enabled) + intel_pmu_disable_all(); -static void intel_pmu_read_event(struct perf_event *event) -{ - if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) - intel_pmu_auto_reload_read(event); - else if (is_topdown_count(event)) - intel_pmu_read_topdown_event(event); - else - x86_perf_event_update(event); + if (is_topdown_event(event)) + static_call(intel_pmu_update_topdown_event)(event); + else + intel_pmu_drain_pebs_buffer(); + + cpuc->enabled = pmu_enabled; + if (pmu_enabled) + intel_pmu_enable_all(0); + + return; + } + + x86_perf_event_update(event); } static void intel_pmu_enable_fixed(struct perf_event *event) @@ -3073,7 +3072,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status) handled++; x86_pmu_handle_guest_pebs(regs, &data); - x86_pmu.drain_pebs(regs, &data); + static_call(x86_pmu_drain_pebs)(regs, &data); status &= intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI; /* @@ -3955,6 +3954,85 @@ static inline bool intel_pmu_has_cap(struct perf_event *event, int idx) return test_bit(idx, (unsigned long *)&intel_cap->capabilities); } +static u64 intel_pmu_freq_start_period(struct perf_event *event) +{ + int type = event->attr.type; + u64 config, factor; + s64 start; + + /* + * The 127 is the lowest possible recommended SAV (sample after value) + * for a 4000 freq (default freq), according to the event list JSON file. + * Also, assume the workload is idle 50% time. + */ + factor = 64 * 4000; + if (type != PERF_TYPE_HARDWARE && type != PERF_TYPE_HW_CACHE) + goto end; + + /* + * The estimation of the start period in the freq mode is + * based on the below assumption. + * + * For a cycles or an instructions event, 1GHZ of the + * underlying platform, 1 IPC. The workload is idle 50% time. + * The start period = 1,000,000,000 * 1 / freq / 2. + * = 500,000,000 / freq + * + * Usually, the branch-related events occur less than the + * instructions event. According to the Intel event list JSON + * file, the SAV (sample after value) of a branch-related event + * is usually 1/4 of an instruction event. + * The start period of branch-related events = 125,000,000 / freq. + * + * The cache-related events occurs even less. The SAV is usually + * 1/20 of an instruction event. + * The start period of cache-related events = 25,000,000 / freq. + */ + config = event->attr.config & PERF_HW_EVENT_MASK; + if (type == PERF_TYPE_HARDWARE) { + switch (config) { + case PERF_COUNT_HW_CPU_CYCLES: + case PERF_COUNT_HW_INSTRUCTIONS: + case PERF_COUNT_HW_BUS_CYCLES: + case PERF_COUNT_HW_STALLED_CYCLES_FRONTEND: + case PERF_COUNT_HW_STALLED_CYCLES_BACKEND: + case PERF_COUNT_HW_REF_CPU_CYCLES: + factor = 500000000; + break; + case PERF_COUNT_HW_BRANCH_INSTRUCTIONS: + case PERF_COUNT_HW_BRANCH_MISSES: + factor = 125000000; + break; + case PERF_COUNT_HW_CACHE_REFERENCES: + case PERF_COUNT_HW_CACHE_MISSES: + factor = 25000000; + break; + default: + goto end; + } + } + + if (type == PERF_TYPE_HW_CACHE) + factor = 25000000; +end: + /* + * Usually, a prime or a number with less factors (close to prime) + * is chosen as an SAV, which makes it less likely that the sampling + * period synchronizes with some periodic event in the workload. + * Minus 1 to make it at least avoiding values near power of twos + * for the default freq. + */ + start = DIV_ROUND_UP_ULL(factor, event->attr.sample_freq) - 1; + + if (start > x86_pmu.max_period) + start = x86_pmu.max_period; + + if (x86_pmu.limit_period) + x86_pmu.limit_period(event, &start); + + return start; +} + static int intel_pmu_hw_config(struct perf_event *event) { int ret = x86_pmu_hw_config(event); @@ -3966,6 +4044,12 @@ static int intel_pmu_hw_config(struct perf_event *event) if (ret) return ret; + if (event->attr.freq && event->attr.sample_freq) { + event->hw.sample_period = intel_pmu_freq_start_period(event); + event->hw.last_period = event->hw.sample_period; + local64_set(&event->hw.period_left, event->hw.sample_period); + } + if (event->attr.precise_ip) { if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT) return -EINVAL; diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index b6303b0224531..1b82bcc6fa556 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -932,11 +932,11 @@ int intel_pmu_drain_bts_buffer(void) return 1; } -static inline void intel_pmu_drain_pebs_buffer(void) +void intel_pmu_drain_pebs_buffer(void) { struct perf_sample_data data; - x86_pmu.drain_pebs(NULL, &data); + static_call(x86_pmu_drain_pebs)(NULL, &data); } /* @@ -1178,7 +1178,7 @@ struct event_constraint intel_lnc_pebs_event_constraints[] = { INTEL_FLAGS_UEVENT_CONSTRAINT(0x100, 0x100000000ULL), /* INST_RETIRED.PREC_DIST */ INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL), - INTEL_HYBRID_LDLAT_CONSTRAINT(0x1cd, 0x3ff), + INTEL_HYBRID_LDLAT_CONSTRAINT(0x1cd, 0x3fc), INTEL_HYBRID_STLAT_CONSTRAINT(0x2cd, 0x3), INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */ @@ -1317,8 +1317,10 @@ static u64 pebs_update_adaptive_cfg(struct perf_event *event) * + precise_ip < 2 for the non event IP * + For RTM TSX weight we need GPRs for the abort code. */ - gprs = (sample_type & PERF_SAMPLE_REGS_INTR) && - (attr->sample_regs_intr & PEBS_GP_REGS); + gprs = ((sample_type & PERF_SAMPLE_REGS_INTR) && + (attr->sample_regs_intr & PEBS_GP_REGS)) || + ((sample_type & PERF_SAMPLE_REGS_USER) && + (attr->sample_regs_user & PEBS_GP_REGS)); tsx_weight = (sample_type & PERF_SAMPLE_WEIGHT_TYPE) && ((attr->config & INTEL_ARCH_EVENT_MASK) == @@ -1970,7 +1972,7 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event, regs->flags &= ~PERF_EFLAGS_EXACT; } - if (sample_type & PERF_SAMPLE_REGS_INTR) + if (sample_type & (PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER)) adaptive_pebs_save_regs(regs, gprs); } @@ -2079,15 +2081,6 @@ get_next_pebs_record_by_bit(void *base, void *top, int bit) return NULL; } -void intel_pmu_auto_reload_read(struct perf_event *event) -{ - WARN_ON(!(event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)); - - perf_pmu_disable(event->pmu); - intel_pmu_drain_pebs_buffer(); - perf_pmu_enable(event->pmu); -} - /* * Special variant of intel_pmu_save_and_restart() for auto-reload. */ diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index ca98744343b89..543609d1231ef 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -4891,28 +4891,28 @@ static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = { INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"), /* Free-Running IIO BANDWIDTH IN Counters */ INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"), - INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.0517578125e-5"), INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"), INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"), - INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.0517578125e-5"), INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"), INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"), - INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.0517578125e-5"), INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"), INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"), - INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.0517578125e-5"), INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"), INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"), - INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.0517578125e-5"), INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"), INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"), - INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.0517578125e-5"), INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"), INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"), - INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.0517578125e-5"), INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"), INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"), - INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.0517578125e-5"), INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"), { /* end: all zeroes */ }, }; @@ -5485,37 +5485,6 @@ static struct freerunning_counters icx_iio_freerunning[] = { [ICX_IIO_MSR_BW_IN] = { 0xaa0, 0x1, 0x10, 8, 48, icx_iio_bw_freerunning_box_offsets }, }; -static struct uncore_event_desc icx_uncore_iio_freerunning_events[] = { - /* Free-Running IIO CLOCKS Counter */ - INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"), - /* Free-Running IIO BANDWIDTH IN Counters */ - INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"), - INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"), - INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"), - INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"), - INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"), - INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"), - INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"), - INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"), - INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"), - INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"), - INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"), - INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"), - INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"), - INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"), - INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"), - INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"), - { /* end: all zeroes */ }, -}; - static struct intel_uncore_type icx_uncore_iio_free_running = { .name = "iio_free_running", .num_counters = 9, @@ -5523,7 +5492,7 @@ static struct intel_uncore_type icx_uncore_iio_free_running = { .num_freerunning_types = ICX_IIO_FREERUNNING_TYPE_MAX, .freerunning = icx_iio_freerunning, .ops = &skx_uncore_iio_freerunning_ops, - .event_descs = icx_uncore_iio_freerunning_events, + .event_descs = snr_uncore_iio_freerunning_events, .format_group = &skx_uncore_iio_freerunning_format_group, }; @@ -6320,69 +6289,13 @@ static struct freerunning_counters spr_iio_freerunning[] = { [SPR_IIO_MSR_BW_OUT] = { 0x3808, 0x1, 0x10, 8, 48 }, }; -static struct uncore_event_desc spr_uncore_iio_freerunning_events[] = { - /* Free-Running IIO CLOCKS Counter */ - INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"), - /* Free-Running IIO BANDWIDTH IN Counters */ - INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"), - INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"), - INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"), - INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"), - INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"), - INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"), - INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"), - INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"), - INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"), - INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"), - INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"), - INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"), - INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"), - INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"), - INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"), - INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"), - /* Free-Running IIO BANDWIDTH OUT Counters */ - INTEL_UNCORE_EVENT_DESC(bw_out_port0, "event=0xff,umask=0x30"), - INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit, "MiB"), - INTEL_UNCORE_EVENT_DESC(bw_out_port1, "event=0xff,umask=0x31"), - INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit, "MiB"), - INTEL_UNCORE_EVENT_DESC(bw_out_port2, "event=0xff,umask=0x32"), - INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit, "MiB"), - INTEL_UNCORE_EVENT_DESC(bw_out_port3, "event=0xff,umask=0x33"), - INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit, "MiB"), - INTEL_UNCORE_EVENT_DESC(bw_out_port4, "event=0xff,umask=0x34"), - INTEL_UNCORE_EVENT_DESC(bw_out_port4.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_out_port4.unit, "MiB"), - INTEL_UNCORE_EVENT_DESC(bw_out_port5, "event=0xff,umask=0x35"), - INTEL_UNCORE_EVENT_DESC(bw_out_port5.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_out_port5.unit, "MiB"), - INTEL_UNCORE_EVENT_DESC(bw_out_port6, "event=0xff,umask=0x36"), - INTEL_UNCORE_EVENT_DESC(bw_out_port6.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_out_port6.unit, "MiB"), - INTEL_UNCORE_EVENT_DESC(bw_out_port7, "event=0xff,umask=0x37"), - INTEL_UNCORE_EVENT_DESC(bw_out_port7.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_out_port7.unit, "MiB"), - { /* end: all zeroes */ }, -}; - static struct intel_uncore_type spr_uncore_iio_free_running = { .name = "iio_free_running", .num_counters = 17, .num_freerunning_types = SPR_IIO_FREERUNNING_TYPE_MAX, .freerunning = spr_iio_freerunning, .ops = &skx_uncore_iio_freerunning_ops, - .event_descs = spr_uncore_iio_freerunning_events, + .event_descs = snr_uncore_iio_freerunning_events, .format_group = &skx_uncore_iio_freerunning_format_group, }; diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index ac1182141bf67..8c616656391ec 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -1092,6 +1092,7 @@ extern struct x86_pmu x86_pmu __read_mostly; DECLARE_STATIC_CALL(x86_pmu_set_period, *x86_pmu.set_period); DECLARE_STATIC_CALL(x86_pmu_update, *x86_pmu.update); +DECLARE_STATIC_CALL(x86_pmu_drain_pebs, *x86_pmu.drain_pebs); static __always_inline struct x86_perf_task_context_opt *task_context_opt(void *ctx) { @@ -1626,7 +1627,7 @@ void intel_pmu_pebs_disable_all(void); void intel_pmu_pebs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in); -void intel_pmu_auto_reload_read(struct perf_event *event); +void intel_pmu_drain_pebs_buffer(void); void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr); diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c index a481a939862e5..fc06b216aacdb 100644 --- a/arch/x86/events/rapl.c +++ b/arch/x86/events/rapl.c @@ -846,6 +846,7 @@ static const struct x86_cpu_id rapl_model_match[] __initconst = { X86_MATCH_VFM(INTEL_METEORLAKE_L, &model_skl), X86_MATCH_VFM(INTEL_ARROWLAKE_H, &model_skl), X86_MATCH_VFM(INTEL_ARROWLAKE, &model_skl), + X86_MATCH_VFM(INTEL_ARROWLAKE_U, &model_skl), X86_MATCH_VFM(INTEL_LUNARLAKE_M, &model_skl), {}, }; diff --git a/arch/x86/hyperv/hv_vtl.c b/arch/x86/hyperv/hv_vtl.c index 04775346369c5..d04ccd4b3b4af 100644 --- a/arch/x86/hyperv/hv_vtl.c +++ b/arch/x86/hyperv/hv_vtl.c @@ -30,6 +30,7 @@ void __init hv_vtl_init_platform(void) x86_platform.realmode_init = x86_init_noop; x86_init.irqs.pre_vector_init = x86_init_noop; x86_init.timers.timer_init = x86_init_noop; + x86_init.resources.probe_roms = x86_init_noop; /* Avoid searching for BIOS MP tables */ x86_init.mpparse.find_mptable = x86_init_noop; diff --git a/arch/x86/hyperv/ivm.c b/arch/x86/hyperv/ivm.c index 60fc3ed728304..4065f5ef3ae08 100644 --- a/arch/x86/hyperv/ivm.c +++ b/arch/x86/hyperv/ivm.c @@ -339,7 +339,7 @@ int hv_snp_boot_ap(u32 cpu, unsigned long start_ip) vmsa->sev_features = sev_status >> 2; ret = snp_set_vmsa(vmsa, true); - if (!ret) { + if (ret) { pr_err("RMPADJUST(%llx) failed: %llx\n", (u64)vmsa, ret); free_page((u64)vmsa); return ret; @@ -465,7 +465,6 @@ static int hv_mark_gpa_visibility(u16 count, const u64 pfn[], enum hv_mem_host_visibility visibility) { struct hv_gpa_range_for_visibility *input; - u16 pages_processed; u64 hv_status; unsigned long flags; @@ -494,7 +493,7 @@ static int hv_mark_gpa_visibility(u16 count, const u64 pfn[], memcpy((void *)input->gpa_page_list, pfn, count * sizeof(*pfn)); hv_status = hv_do_rep_hypercall( HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY, count, - 0, input, &pages_processed); + 0, input, NULL); local_irq_restore(flags); if (hv_result_success(hv_status)) diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h index cf7fc2b8e3ce1..1c2db11a2c3cb 100644 --- a/arch/x86/include/asm/irqflags.h +++ b/arch/x86/include/asm/irqflags.h @@ -76,6 +76,28 @@ static __always_inline void native_local_irq_restore(unsigned long flags) #endif +#ifndef CONFIG_PARAVIRT +#ifndef __ASSEMBLY__ +/* + * Used in the idle loop; sti takes one instruction cycle + * to complete: + */ +static __always_inline void arch_safe_halt(void) +{ + native_safe_halt(); +} + +/* + * Used when interrupts are already enabled or to + * shutdown the processor: + */ +static __always_inline void halt(void) +{ + native_halt(); +} +#endif /* __ASSEMBLY__ */ +#endif /* CONFIG_PARAVIRT */ + #ifdef CONFIG_PARAVIRT_XXL #include #else @@ -97,24 +119,6 @@ static __always_inline void arch_local_irq_enable(void) native_irq_enable(); } -/* - * Used in the idle loop; sti takes one instruction cycle - * to complete: - */ -static __always_inline void arch_safe_halt(void) -{ - native_safe_halt(); -} - -/* - * Used when interrupts are already enabled or to - * shutdown the processor: - */ -static __always_inline void halt(void) -{ - native_halt(); -} - /* * For spinlocks, etc: */ diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 8499b9cb9c826..e4dd840e0becd 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -761,6 +761,7 @@ struct kvm_vcpu_arch { u32 pkru; u32 hflags; u64 efer; + u64 host_debugctl; u64 apic_base; struct kvm_lapic *apic; /* kernel irqchip context */ bool load_eoi_exitmap_pending; diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index d4eb9e1d61b8e..75d4c994f5e2a 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -107,6 +107,16 @@ static inline void notify_page_enc_status_changed(unsigned long pfn, PVOP_VCALL3(mmu.notify_page_enc_status_changed, pfn, npages, enc); } +static __always_inline void arch_safe_halt(void) +{ + PVOP_VCALL0(irq.safe_halt); +} + +static inline void halt(void) +{ + PVOP_VCALL0(irq.halt); +} + #ifdef CONFIG_PARAVIRT_XXL static inline void load_sp0(unsigned long sp0) { @@ -170,16 +180,6 @@ static inline void __write_cr4(unsigned long x) PVOP_VCALL1(cpu.write_cr4, x); } -static __always_inline void arch_safe_halt(void) -{ - PVOP_VCALL0(irq.safe_halt); -} - -static inline void halt(void) -{ - PVOP_VCALL0(irq.halt); -} - extern noinstr void pv_native_wbinvd(void); static __always_inline void wbinvd(void) diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 8d4fbe1be4895..9334fdd1f6350 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -122,10 +122,9 @@ struct pv_irq_ops { struct paravirt_callee_save save_fl; struct paravirt_callee_save irq_disable; struct paravirt_callee_save irq_enable; - +#endif void (*safe_halt)(void); void (*halt)(void); -#endif } __no_randomize_layout; struct pv_mmu_ops { diff --git a/arch/x86/include/asm/tdx.h b/arch/x86/include/asm/tdx.h index eba178996d845..b5b6332940613 100644 --- a/arch/x86/include/asm/tdx.h +++ b/arch/x86/include/asm/tdx.h @@ -58,7 +58,7 @@ void tdx_get_ve_info(struct ve_info *ve); bool tdx_handle_virt_exception(struct pt_regs *regs, struct ve_info *ve); -void tdx_safe_halt(void); +void tdx_halt(void); bool tdx_early_handle_ve(struct pt_regs *regs); @@ -69,7 +69,7 @@ u64 tdx_hcall_get_quote(u8 *buf, size_t size); #else static inline void tdx_early_init(void) { }; -static inline void tdx_safe_halt(void) { }; +static inline void tdx_halt(void) { }; static inline bool tdx_early_handle_ve(struct pt_regs *regs) { return false; } diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 02fc2aa06e9e0..3da6451397485 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -242,7 +242,7 @@ void flush_tlb_multi(const struct cpumask *cpumask, flush_tlb_mm_range((vma)->vm_mm, start, end, \ ((vma)->vm_flags & VM_HUGETLB) \ ? huge_page_shift(hstate_vma(vma)) \ - : PAGE_SHIFT, false) + : PAGE_SHIFT, true) extern void flush_tlb_all(void); extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index c70b86f1f2954..63adda8a143f9 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -23,6 +23,8 @@ #include #include +#include + #include #include #include @@ -1730,6 +1732,15 @@ int __init acpi_mps_check(void) { #if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_X86_MPPARSE) /* mptable code is not built-in*/ + + /* + * Xen disables ACPI in PV DomU guests but it still emulates APIC and + * supports SMP. Returning early here ensures that APIC is not disabled + * unnecessarily and the guest is not limited to a single vCPU. + */ + if (xen_pv_domain() && !xen_initial_domain()) + return 0; + if (acpi_disabled || acpi_noirq) { pr_warn("MPS support code is not built-in, using acpi=off or acpi=noirq or pci=noacpi may have problem\n"); return 1; diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index 37b8244899d89..04712fd0c9649 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -405,7 +405,6 @@ bool __init early_is_amd_nb(u32 device) struct resource *amd_get_mmconfig_range(struct resource *res) { - u32 address; u64 base, msr; unsigned int segn_busn_bits; @@ -413,13 +412,11 @@ struct resource *amd_get_mmconfig_range(struct resource *res) boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) return NULL; - /* assume all cpus from fam10h have mmconfig */ - if (boot_cpu_data.x86 < 0x10) + /* Assume CPUs from Fam10h have mmconfig, although not all VMs do */ + if (boot_cpu_data.x86 < 0x10 || + rdmsrl_safe(MSR_FAM10H_MMIO_CONF_BASE, &msr)) return NULL; - address = MSR_FAM10H_MMIO_CONF_BASE; - rdmsrl(address, msr); - /* mmconfig is not enabled */ if (!(msr & FAM10H_MMIO_CONF_ENABLE)) return NULL; diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 79d2e17f6582e..e432910859cb1 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -627,7 +627,7 @@ static void init_amd_k8(struct cpuinfo_x86 *c) * (model = 0x14) and later actually support it. * (AMD Erratum #110, docId: 25759). */ - if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) { + if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM) && !cpu_has(c, X86_FEATURE_HYPERVISOR)) { clear_cpu_cap(c, X86_FEATURE_LAHF_LM); if (!rdmsrl_amd_safe(0xc001100d, &value)) { value &= ~BIT_64(32); @@ -862,6 +862,16 @@ static void init_amd_zen1(struct cpuinfo_x86 *c) pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n"); setup_force_cpu_bug(X86_BUG_DIV0); + + /* + * Turn off the Instructions Retired free counter on machines that are + * susceptible to erratum #1054 "Instructions Retired Performance + * Counter May Be Inaccurate". + */ + if (c->x86_model < 0x30) { + msr_clear_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT); + clear_cpu_cap(c, X86_FEATURE_IRPERF); + } } static bool cpu_has_zenbleed_microcode(void) @@ -1045,13 +1055,8 @@ static void init_amd(struct cpuinfo_x86 *c) if (!cpu_feature_enabled(X86_FEATURE_XENPV)) set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); - /* - * Turn on the Instructions Retired free counter on machines not - * susceptible to erratum #1054 "Instructions Retired Performance - * Counter May Be Inaccurate". - */ - if (cpu_has(c, X86_FEATURE_IRPERF) && - (boot_cpu_has(X86_FEATURE_ZEN1) && c->x86_model > 0x2f)) + /* Enable the Instructions Retired free counter */ + if (cpu_has(c, X86_FEATURE_IRPERF)) msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT); check_null_seg_clears_base(c); diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c index e6fa03ed9172c..a6c6bccfa8b8d 100644 --- a/arch/x86/kernel/cpu/cacheinfo.c +++ b/arch/x86/kernel/cpu/cacheinfo.c @@ -808,7 +808,7 @@ void init_intel_cacheinfo(struct cpuinfo_x86 *c) cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]); /* If bit 31 is set, this is an unknown format */ - for (j = 0 ; j < 3 ; j++) + for (j = 0 ; j < 4 ; j++) if (regs[j] & (1 << 31)) regs[j] = 0; diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index 9651275aecd1b..dfec2c61e3547 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c @@ -153,8 +153,8 @@ static void geode_configure(void) u8 ccr3; local_irq_save(flags); - /* Suspend on halt power saving and enable #SUSP pin */ - setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88); + /* Suspend on halt power saving */ + setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x08); ccr3 = getCx86(CX86_CCR3); setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 4b5f3d0521517..b93d88ec14175 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -672,26 +672,37 @@ static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) } #endif -#define TLB_INST_4K 0x01 -#define TLB_INST_4M 0x02 -#define TLB_INST_2M_4M 0x03 +#define TLB_INST_4K 0x01 +#define TLB_INST_4M 0x02 +#define TLB_INST_2M_4M 0x03 -#define TLB_INST_ALL 0x05 -#define TLB_INST_1G 0x06 +#define TLB_INST_ALL 0x05 +#define TLB_INST_1G 0x06 -#define TLB_DATA_4K 0x11 -#define TLB_DATA_4M 0x12 -#define TLB_DATA_2M_4M 0x13 -#define TLB_DATA_4K_4M 0x14 +#define TLB_DATA_4K 0x11 +#define TLB_DATA_4M 0x12 +#define TLB_DATA_2M_4M 0x13 +#define TLB_DATA_4K_4M 0x14 -#define TLB_DATA_1G 0x16 +#define TLB_DATA_1G 0x16 +#define TLB_DATA_1G_2M_4M 0x17 -#define TLB_DATA0_4K 0x21 -#define TLB_DATA0_4M 0x22 -#define TLB_DATA0_2M_4M 0x23 +#define TLB_DATA0_4K 0x21 +#define TLB_DATA0_4M 0x22 +#define TLB_DATA0_2M_4M 0x23 -#define STLB_4K 0x41 -#define STLB_4K_2M 0x42 +#define STLB_4K 0x41 +#define STLB_4K_2M 0x42 + +/* + * All of leaf 0x2's one-byte TLB descriptors implies the same number of + * entries for their respective TLB types. The 0x63 descriptor is an + * exception: it implies 4 dTLB entries for 1GB pages 32 dTLB entries + * for 2MB or 4MB pages. Encode descriptor 0x63 dTLB entry count for + * 2MB/4MB pages here, as its count for dTLB 1GB pages is already at the + * intel_tlb_table[] mapping. + */ +#define TLB_0x63_2M_4M_ENTRIES 32 static const struct _tlb_table intel_tlb_table[] = { { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" }, @@ -713,7 +724,8 @@ static const struct _tlb_table intel_tlb_table[] = { { 0x5c, TLB_DATA_4K_4M, 128, " TLB_DATA 4 KByte and 4 MByte pages" }, { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" }, { 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" }, - { 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" }, + { 0x63, TLB_DATA_1G_2M_4M, 4, " TLB_DATA 1 GByte pages, 4-way set associative" + " (plus 32 entries TLB_DATA 2 MByte or 4 MByte pages, not encoded here)" }, { 0x6b, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 8-way associative" }, { 0x6c, TLB_DATA_2M_4M, 128, " TLB_DATA 2 MByte or 4 MByte pages, 8-way associative" }, { 0x6d, TLB_DATA_1G, 16, " TLB_DATA 1 GByte pages, fully associative" }, @@ -813,6 +825,12 @@ static void intel_tlb_lookup(const unsigned char desc) if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; break; + case TLB_DATA_1G_2M_4M: + if (tlb_lld_2m[ENTRIES] < TLB_0x63_2M_4M_ENTRIES) + tlb_lld_2m[ENTRIES] = TLB_0x63_2M_4M_ENTRIES; + if (tlb_lld_4m[ENTRIES] < TLB_0x63_2M_4M_ENTRIES) + tlb_lld_4m[ENTRIES] = TLB_0x63_2M_4M_ENTRIES; + fallthrough; case TLB_DATA_1G: if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries) tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries; @@ -836,7 +854,7 @@ static void intel_detect_tlb(struct cpuinfo_x86 *c) cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]); /* If bit 31 is set, this is an unknown format */ - for (j = 0 ; j < 3 ; j++) + for (j = 0 ; j < 4 ; j++) if (regs[j] & (1 << 31)) regs[j] = 0; diff --git a/arch/x86/kernel/cpu/mce/severity.c b/arch/x86/kernel/cpu/mce/severity.c index dac4d64dfb2a8..2235a74774360 100644 --- a/arch/x86/kernel/cpu/mce/severity.c +++ b/arch/x86/kernel/cpu/mce/severity.c @@ -300,13 +300,12 @@ static noinstr int error_context(struct mce *m, struct pt_regs *regs) copy_user = is_copy_from_user(regs); instrumentation_end(); - switch (fixup_type) { - case EX_TYPE_UACCESS: - if (!copy_user) - return IN_KERNEL; - m->kflags |= MCE_IN_KERNEL_COPYIN; - fallthrough; + if (copy_user) { + m->kflags |= MCE_IN_KERNEL_COPYIN | MCE_IN_KERNEL_RECOV; + return IN_KERNEL_RECOV; + } + switch (fixup_type) { case EX_TYPE_FAULT_MCE_SAFE: case EX_TYPE_DEFAULT_MCE_SAFE: m->kflags |= MCE_IN_KERNEL_RECOV; diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index fb5d0c67fbab1..093d3ca43c467 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -23,14 +23,18 @@ #include #include +#include #include #include #include #include #include +#include + #include #include +#include #include #include #include @@ -145,6 +149,118 @@ ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin"; */ static u32 bsp_cpuid_1_eax __ro_after_init; +static bool sha_check = true; + +struct patch_digest { + u32 patch_id; + u8 sha256[SHA256_DIGEST_SIZE]; +}; + +#include "amd_shas.c" + +static int cmp_id(const void *key, const void *elem) +{ + struct patch_digest *pd = (struct patch_digest *)elem; + u32 patch_id = *(u32 *)key; + + if (patch_id == pd->patch_id) + return 0; + else if (patch_id < pd->patch_id) + return -1; + else + return 1; +} + +static bool need_sha_check(u32 cur_rev) +{ + switch (cur_rev >> 8) { + case 0x80012: return cur_rev <= 0x800126f; break; + case 0x80082: return cur_rev <= 0x800820f; break; + case 0x83010: return cur_rev <= 0x830107c; break; + case 0x86001: return cur_rev <= 0x860010e; break; + case 0x86081: return cur_rev <= 0x8608108; break; + case 0x87010: return cur_rev <= 0x8701034; break; + case 0x8a000: return cur_rev <= 0x8a0000a; break; + case 0xa0010: return cur_rev <= 0xa00107a; break; + case 0xa0011: return cur_rev <= 0xa0011da; break; + case 0xa0012: return cur_rev <= 0xa001243; break; + case 0xa0082: return cur_rev <= 0xa00820e; break; + case 0xa1011: return cur_rev <= 0xa101153; break; + case 0xa1012: return cur_rev <= 0xa10124e; break; + case 0xa1081: return cur_rev <= 0xa108109; break; + case 0xa2010: return cur_rev <= 0xa20102f; break; + case 0xa2012: return cur_rev <= 0xa201212; break; + case 0xa4041: return cur_rev <= 0xa404109; break; + case 0xa5000: return cur_rev <= 0xa500013; break; + case 0xa6012: return cur_rev <= 0xa60120a; break; + case 0xa7041: return cur_rev <= 0xa704109; break; + case 0xa7052: return cur_rev <= 0xa705208; break; + case 0xa7080: return cur_rev <= 0xa708009; break; + case 0xa70c0: return cur_rev <= 0xa70C009; break; + case 0xaa001: return cur_rev <= 0xaa00116; break; + case 0xaa002: return cur_rev <= 0xaa00218; break; + case 0xb0021: return cur_rev <= 0xb002146; break; + case 0xb1010: return cur_rev <= 0xb101046; break; + case 0xb2040: return cur_rev <= 0xb204031; break; + case 0xb4040: return cur_rev <= 0xb404031; break; + case 0xb6000: return cur_rev <= 0xb600031; break; + case 0xb7000: return cur_rev <= 0xb700031; break; + default: break; + } + + pr_info("You should not be seeing this. Please send the following couple of lines to x86--kernel.org\n"); + pr_info("CPUID(1).EAX: 0x%x, current revision: 0x%x\n", bsp_cpuid_1_eax, cur_rev); + return true; +} + +static bool verify_sha256_digest(u32 patch_id, u32 cur_rev, const u8 *data, unsigned int len) +{ + struct patch_digest *pd = NULL; + u8 digest[SHA256_DIGEST_SIZE]; + struct sha256_state s; + int i; + + if (x86_family(bsp_cpuid_1_eax) < 0x17) + return true; + + if (!need_sha_check(cur_rev)) + return true; + + if (!sha_check) + return true; + + pd = bsearch(&patch_id, phashes, ARRAY_SIZE(phashes), sizeof(struct patch_digest), cmp_id); + if (!pd) { + pr_err("No sha256 digest for patch ID: 0x%x found\n", patch_id); + return false; + } + + sha256_init(&s); + sha256_update(&s, data, len); + sha256_final(&s, digest); + + if (memcmp(digest, pd->sha256, sizeof(digest))) { + pr_err("Patch 0x%x SHA256 digest mismatch!\n", patch_id); + + for (i = 0; i < SHA256_DIGEST_SIZE; i++) + pr_cont("0x%x ", digest[i]); + pr_info("\n"); + + return false; + } + + return true; +} + +static u32 get_patch_level(void) +{ + u32 rev, dummy __always_unused; + + native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); + + return rev; +} + static union cpuid_1_eax ucode_rev_to_cpuid(unsigned int val) { union zen_patch_rev p; @@ -246,8 +362,7 @@ static bool verify_equivalence_table(const u8 *buf, size_t buf_size) * On success, @sh_psize returns the patch size according to the section header, * to the caller. */ -static bool -__verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize) +static bool __verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize) { u32 p_type, p_size; const u32 *hdr; @@ -484,10 +599,13 @@ static void scan_containers(u8 *ucode, size_t size, struct cont_desc *desc) } } -static int __apply_microcode_amd(struct microcode_amd *mc, unsigned int psize) +static bool __apply_microcode_amd(struct microcode_amd *mc, u32 *cur_rev, + unsigned int psize) { unsigned long p_addr = (unsigned long)&mc->hdr.data_code; - u32 rev, dummy; + + if (!verify_sha256_digest(mc->hdr.patch_id, *cur_rev, (const u8 *)p_addr, psize)) + return false; native_wrmsrl(MSR_AMD64_PATCH_LOADER, p_addr); @@ -505,47 +623,13 @@ static int __apply_microcode_amd(struct microcode_amd *mc, unsigned int psize) } /* verify patch application was successful */ - native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); - - if (rev != mc->hdr.patch_id) - return -1; + *cur_rev = get_patch_level(); + if (*cur_rev != mc->hdr.patch_id) + return false; - return 0; + return true; } -/* - * Early load occurs before we can vmalloc(). So we look for the microcode - * patch container file in initrd, traverse equivalent cpu table, look for a - * matching microcode patch, and update, all in initrd memory in place. - * When vmalloc() is available for use later -- on 64-bit during first AP load, - * and on 32-bit during save_microcode_in_initrd_amd() -- we can call - * load_microcode_amd() to save equivalent cpu table and microcode patches in - * kernel heap memory. - * - * Returns true if container found (sets @desc), false otherwise. - */ -static bool early_apply_microcode(u32 old_rev, void *ucode, size_t size) -{ - struct cont_desc desc = { 0 }; - struct microcode_amd *mc; - bool ret = false; - - scan_containers(ucode, size, &desc); - - mc = desc.mc; - if (!mc) - return ret; - - /* - * Allow application of the same revision to pick up SMT-specific - * changes even if the revision of the other SMT thread is already - * up-to-date. - */ - if (old_rev > mc->hdr.patch_id) - return ret; - - return !__apply_microcode_amd(mc, desc.psize); -} static bool get_builtin_microcode(struct cpio_data *cp) { @@ -569,64 +653,74 @@ static bool get_builtin_microcode(struct cpio_data *cp) return false; } -static void __init find_blobs_in_containers(struct cpio_data *ret) +static bool __init find_blobs_in_containers(struct cpio_data *ret) { struct cpio_data cp; + bool found; if (!get_builtin_microcode(&cp)) cp = find_microcode_in_initrd(ucode_path); - *ret = cp; + found = cp.data && cp.size; + if (found) + *ret = cp; + + return found; } +/* + * Early load occurs before we can vmalloc(). So we look for the microcode + * patch container file in initrd, traverse equivalent cpu table, look for a + * matching microcode patch, and update, all in initrd memory in place. + * When vmalloc() is available for use later -- on 64-bit during first AP load, + * and on 32-bit during save_microcode_in_initrd() -- we can call + * load_microcode_amd() to save equivalent cpu table and microcode patches in + * kernel heap memory. + */ void __init load_ucode_amd_bsp(struct early_load_data *ed, unsigned int cpuid_1_eax) { + struct cont_desc desc = { }; + struct microcode_amd *mc; struct cpio_data cp = { }; - u32 dummy; + char buf[4]; + u32 rev; + + if (cmdline_find_option(boot_command_line, "microcode.amd_sha_check", buf, 4)) { + if (!strncmp(buf, "off", 3)) { + sha_check = false; + pr_warn_once("It is a very very bad idea to disable the blobs SHA check!\n"); + add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); + } + } bsp_cpuid_1_eax = cpuid_1_eax; - native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->old_rev, dummy); + rev = get_patch_level(); + ed->old_rev = rev; /* Needed in load_microcode_amd() */ ucode_cpu_info[0].cpu_sig.sig = cpuid_1_eax; - find_blobs_in_containers(&cp); - if (!(cp.data && cp.size)) + if (!find_blobs_in_containers(&cp)) return; - if (early_apply_microcode(ed->old_rev, cp.data, cp.size)) - native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->new_rev, dummy); -} - -static enum ucode_state _load_microcode_amd(u8 family, const u8 *data, size_t size); - -static int __init save_microcode_in_initrd(void) -{ - unsigned int cpuid_1_eax = native_cpuid_eax(1); - struct cpuinfo_x86 *c = &boot_cpu_data; - struct cont_desc desc = { 0 }; - enum ucode_state ret; - struct cpio_data cp; - - if (dis_ucode_ldr || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) - return 0; - - find_blobs_in_containers(&cp); - if (!(cp.data && cp.size)) - return -EINVAL; - scan_containers(cp.data, cp.size, &desc); - if (!desc.mc) - return -EINVAL; - ret = _load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size); - if (ret > UCODE_UPDATED) - return -EINVAL; + mc = desc.mc; + if (!mc) + return; - return 0; + /* + * Allow application of the same revision to pick up SMT-specific + * changes even if the revision of the other SMT thread is already + * up-to-date. + */ + if (ed->old_rev > mc->hdr.patch_id) + return; + + if (__apply_microcode_amd(mc, &rev, desc.psize)) + ed->new_rev = rev; } -early_initcall(save_microcode_in_initrd); static inline bool patch_cpus_equivalent(struct ucode_patch *p, struct ucode_patch *n, @@ -727,14 +821,9 @@ static void free_cache(void) static struct ucode_patch *find_patch(unsigned int cpu) { struct ucode_cpu_info *uci = ucode_cpu_info + cpu; - u32 rev, dummy __always_unused; u16 equiv_id = 0; - /* fetch rev if not populated yet: */ - if (!uci->cpu_sig.rev) { - rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); - uci->cpu_sig.rev = rev; - } + uci->cpu_sig.rev = get_patch_level(); if (x86_family(bsp_cpuid_1_eax) < 0x17) { equiv_id = find_equiv_id(&equiv_table, uci->cpu_sig.sig); @@ -757,22 +846,20 @@ void reload_ucode_amd(unsigned int cpu) mc = p->data; - rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); - + rev = get_patch_level(); if (rev < mc->hdr.patch_id) { - if (!__apply_microcode_amd(mc, p->size)) - pr_info_once("reload revision: 0x%08x\n", mc->hdr.patch_id); + if (__apply_microcode_amd(mc, &rev, p->size)) + pr_info_once("reload revision: 0x%08x\n", rev); } } static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig) { - struct cpuinfo_x86 *c = &cpu_data(cpu); struct ucode_cpu_info *uci = ucode_cpu_info + cpu; struct ucode_patch *p; csig->sig = cpuid_eax(0x00000001); - csig->rev = c->microcode; + csig->rev = get_patch_level(); /* * a patch could have been loaded early, set uci->mc so that @@ -813,7 +900,7 @@ static enum ucode_state apply_microcode_amd(int cpu) goto out; } - if (__apply_microcode_amd(mc_amd, p->size)) { + if (!__apply_microcode_amd(mc_amd, &rev, p->size)) { pr_err("CPU%d: update failed for patch_level=0x%08x\n", cpu, mc_amd->hdr.patch_id); return UCODE_ERROR; @@ -935,8 +1022,7 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover, } /* Scan the blob in @data and add microcode patches to the cache. */ -static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, - size_t size) +static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, size_t size) { u8 *fw = (u8 *)data; size_t offset; @@ -994,7 +1080,7 @@ static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t siz if (ret != UCODE_OK) return ret; - for_each_node(nid) { + for_each_node_with_cpus(nid) { cpu = cpumask_first(cpumask_of_node(nid)); c = &cpu_data(cpu); @@ -1011,6 +1097,32 @@ static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t siz return ret; } +static int __init save_microcode_in_initrd(void) +{ + unsigned int cpuid_1_eax = native_cpuid_eax(1); + struct cpuinfo_x86 *c = &boot_cpu_data; + struct cont_desc desc = { 0 }; + enum ucode_state ret; + struct cpio_data cp; + + if (dis_ucode_ldr || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) + return 0; + + if (!find_blobs_in_containers(&cp)) + return -EINVAL; + + scan_containers(cp.data, cp.size, &desc); + if (!desc.mc) + return -EINVAL; + + ret = _load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size); + if (ret > UCODE_UPDATED) + return -EINVAL; + + return 0; +} +early_initcall(save_microcode_in_initrd); + /* * AMD microcode firmware naming convention, up to family 15h they are in * the legacy file: diff --git a/arch/x86/kernel/cpu/microcode/amd_shas.c b/arch/x86/kernel/cpu/microcode/amd_shas.c new file mode 100644 index 0000000000000..2a1655b1fdd88 --- /dev/null +++ b/arch/x86/kernel/cpu/microcode/amd_shas.c @@ -0,0 +1,444 @@ +/* Keep 'em sorted. */ +static const struct patch_digest phashes[] = { + { 0x8001227, { + 0x99,0xc0,0x9b,0x2b,0xcc,0x9f,0x52,0x1b, + 0x1a,0x5f,0x1d,0x83,0xa1,0x6c,0xc4,0x46, + 0xe2,0x6c,0xda,0x73,0xfb,0x2d,0x23,0xa8, + 0x77,0xdc,0x15,0x31,0x33,0x4a,0x46,0x18, + } + }, + { 0x8001250, { + 0xc0,0x0b,0x6b,0x19,0xfd,0x5c,0x39,0x60, + 0xd5,0xc3,0x57,0x46,0x54,0xe4,0xd1,0xaa, + 0xa8,0xf7,0x1f,0xa8,0x6a,0x60,0x3e,0xe3, + 0x27,0x39,0x8e,0x53,0x30,0xf8,0x49,0x19, + } + }, + { 0x800126e, { + 0xf3,0x8b,0x2b,0xb6,0x34,0xe3,0xc8,0x2c, + 0xef,0xec,0x63,0x6d,0xc8,0x76,0x77,0xb3, + 0x25,0x5a,0xb7,0x52,0x8c,0x83,0x26,0xe6, + 0x4c,0xbe,0xbf,0xe9,0x7d,0x22,0x6a,0x43, + } + }, + { 0x800126f, { + 0x2b,0x5a,0xf2,0x9c,0xdd,0xd2,0x7f,0xec, + 0xec,0x96,0x09,0x57,0xb0,0x96,0x29,0x8b, + 0x2e,0x26,0x91,0xf0,0x49,0x33,0x42,0x18, + 0xdd,0x4b,0x65,0x5a,0xd4,0x15,0x3d,0x33, + } + }, + { 0x800820d, { + 0x68,0x98,0x83,0xcd,0x22,0x0d,0xdd,0x59, + 0x73,0x2c,0x5b,0x37,0x1f,0x84,0x0e,0x67, + 0x96,0x43,0x83,0x0c,0x46,0x44,0xab,0x7c, + 0x7b,0x65,0x9e,0x57,0xb5,0x90,0x4b,0x0e, + } + }, + { 0x8301025, { + 0xe4,0x7d,0xdb,0x1e,0x14,0xb4,0x5e,0x36, + 0x8f,0x3e,0x48,0x88,0x3c,0x6d,0x76,0xa1, + 0x59,0xc6,0xc0,0x72,0x42,0xdf,0x6c,0x30, + 0x6f,0x0b,0x28,0x16,0x61,0xfc,0x79,0x77, + } + }, + { 0x8301055, { + 0x81,0x7b,0x99,0x1b,0xae,0x2d,0x4f,0x9a, + 0xef,0x13,0xce,0xb5,0x10,0xaf,0x6a,0xea, + 0xe5,0xb0,0x64,0x98,0x10,0x68,0x34,0x3b, + 0x9d,0x7a,0xd6,0x22,0x77,0x5f,0xb3,0x5b, + } + }, + { 0x8301072, { + 0xcf,0x76,0xa7,0x1a,0x49,0xdf,0x2a,0x5e, + 0x9e,0x40,0x70,0xe5,0xdd,0x8a,0xa8,0x28, + 0x20,0xdc,0x91,0xd8,0x2c,0xa6,0xa0,0xb1, + 0x2d,0x22,0x26,0x94,0x4b,0x40,0x85,0x30, + } + }, + { 0x830107a, { + 0x2a,0x65,0x8c,0x1a,0x5e,0x07,0x21,0x72, + 0xdf,0x90,0xa6,0x51,0x37,0xd3,0x4b,0x34, + 0xc4,0xda,0x03,0xe1,0x8a,0x6c,0xfb,0x20, + 0x04,0xb2,0x81,0x05,0xd4,0x87,0xf4,0x0a, + } + }, + { 0x830107b, { + 0xb3,0x43,0x13,0x63,0x56,0xc1,0x39,0xad, + 0x10,0xa6,0x2b,0xcc,0x02,0xe6,0x76,0x2a, + 0x1e,0x39,0x58,0x3e,0x23,0x6e,0xa4,0x04, + 0x95,0xea,0xf9,0x6d,0xc2,0x8a,0x13,0x19, + } + }, + { 0x830107c, { + 0x21,0x64,0xde,0xfb,0x9f,0x68,0x96,0x47, + 0x70,0x5c,0xe2,0x8f,0x18,0x52,0x6a,0xac, + 0xa4,0xd2,0x2e,0xe0,0xde,0x68,0x66,0xc3, + 0xeb,0x1e,0xd3,0x3f,0xbc,0x51,0x1d,0x38, + } + }, + { 0x860010d, { + 0x86,0xb6,0x15,0x83,0xbc,0x3b,0x9c,0xe0, + 0xb3,0xef,0x1d,0x99,0x84,0x35,0x15,0xf7, + 0x7c,0x2a,0xc6,0x42,0xdb,0x73,0x07,0x5c, + 0x7d,0xc3,0x02,0xb5,0x43,0x06,0x5e,0xf8, + } + }, + { 0x8608108, { + 0x14,0xfe,0x57,0x86,0x49,0xc8,0x68,0xe2, + 0x11,0xa3,0xcb,0x6e,0xff,0x6e,0xd5,0x38, + 0xfe,0x89,0x1a,0xe0,0x67,0xbf,0xc4,0xcc, + 0x1b,0x9f,0x84,0x77,0x2b,0x9f,0xaa,0xbd, + } + }, + { 0x8701034, { + 0xc3,0x14,0x09,0xa8,0x9c,0x3f,0x8d,0x83, + 0x9b,0x4c,0xa5,0xb7,0x64,0x8b,0x91,0x5d, + 0x85,0x6a,0x39,0x26,0x1e,0x14,0x41,0xa8, + 0x75,0xea,0xa6,0xf9,0xc9,0xd1,0xea,0x2b, + } + }, + { 0x8a00008, { + 0xd7,0x2a,0x93,0xdc,0x05,0x2f,0xa5,0x6e, + 0x0c,0x61,0x2c,0x07,0x9f,0x38,0xe9,0x8e, + 0xef,0x7d,0x2a,0x05,0x4d,0x56,0xaf,0x72, + 0xe7,0x56,0x47,0x6e,0x60,0x27,0xd5,0x8c, + } + }, + { 0x8a0000a, { + 0x73,0x31,0x26,0x22,0xd4,0xf9,0xee,0x3c, + 0x07,0x06,0xe7,0xb9,0xad,0xd8,0x72,0x44, + 0x33,0x31,0xaa,0x7d,0xc3,0x67,0x0e,0xdb, + 0x47,0xb5,0xaa,0xbc,0xf5,0xbb,0xd9,0x20, + } + }, + { 0xa00104c, { + 0x3c,0x8a,0xfe,0x04,0x62,0xd8,0x6d,0xbe, + 0xa7,0x14,0x28,0x64,0x75,0xc0,0xa3,0x76, + 0xb7,0x92,0x0b,0x97,0x0a,0x8e,0x9c,0x5b, + 0x1b,0xc8,0x9d,0x3a,0x1e,0x81,0x3d,0x3b, + } + }, + { 0xa00104e, { + 0xc4,0x35,0x82,0x67,0xd2,0x86,0xe5,0xb2, + 0xfd,0x69,0x12,0x38,0xc8,0x77,0xba,0xe0, + 0x70,0xf9,0x77,0x89,0x10,0xa6,0x74,0x4e, + 0x56,0x58,0x13,0xf5,0x84,0x70,0x28,0x0b, + } + }, + { 0xa001053, { + 0x92,0x0e,0xf4,0x69,0x10,0x3b,0xf9,0x9d, + 0x31,0x1b,0xa6,0x99,0x08,0x7d,0xd7,0x25, + 0x7e,0x1e,0x89,0xba,0x35,0x8d,0xac,0xcb, + 0x3a,0xb4,0xdf,0x58,0x12,0xcf,0xc0,0xc3, + } + }, + { 0xa001058, { + 0x33,0x7d,0xa9,0xb5,0x4e,0x62,0x13,0x36, + 0xef,0x66,0xc9,0xbd,0x0a,0xa6,0x3b,0x19, + 0xcb,0xf5,0xc2,0xc3,0x55,0x47,0x20,0xec, + 0x1f,0x7b,0xa1,0x44,0x0e,0x8e,0xa4,0xb2, + } + }, + { 0xa001075, { + 0x39,0x02,0x82,0xd0,0x7c,0x26,0x43,0xe9, + 0x26,0xa3,0xd9,0x96,0xf7,0x30,0x13,0x0a, + 0x8a,0x0e,0xac,0xe7,0x1d,0xdc,0xe2,0x0f, + 0xcb,0x9e,0x8d,0xbc,0xd2,0xa2,0x44,0xe0, + } + }, + { 0xa001078, { + 0x2d,0x67,0xc7,0x35,0xca,0xef,0x2f,0x25, + 0x4c,0x45,0x93,0x3f,0x36,0x01,0x8c,0xce, + 0xa8,0x5b,0x07,0xd3,0xc1,0x35,0x3c,0x04, + 0x20,0xa2,0xfc,0xdc,0xe6,0xce,0x26,0x3e, + } + }, + { 0xa001079, { + 0x43,0xe2,0x05,0x9c,0xfd,0xb7,0x5b,0xeb, + 0x5b,0xe9,0xeb,0x3b,0x96,0xf4,0xe4,0x93, + 0x73,0x45,0x3e,0xac,0x8d,0x3b,0xe4,0xdb, + 0x10,0x31,0xc1,0xe4,0xa2,0xd0,0x5a,0x8a, + } + }, + { 0xa00107a, { + 0x5f,0x92,0xca,0xff,0xc3,0x59,0x22,0x5f, + 0x02,0xa0,0x91,0x3b,0x4a,0x45,0x10,0xfd, + 0x19,0xe1,0x8a,0x6d,0x9a,0x92,0xc1,0x3f, + 0x75,0x78,0xac,0x78,0x03,0x1d,0xdb,0x18, + } + }, + { 0xa001143, { + 0x56,0xca,0xf7,0x43,0x8a,0x4c,0x46,0x80, + 0xec,0xde,0xe5,0x9c,0x50,0x84,0x9a,0x42, + 0x27,0xe5,0x51,0x84,0x8f,0x19,0xc0,0x8d, + 0x0c,0x25,0xb4,0xb0,0x8f,0x10,0xf3,0xf8, + } + }, + { 0xa001144, { + 0x42,0xd5,0x9b,0xa7,0xd6,0x15,0x29,0x41, + 0x61,0xc4,0x72,0x3f,0xf3,0x06,0x78,0x4b, + 0x65,0xf3,0x0e,0xfa,0x9c,0x87,0xde,0x25, + 0xbd,0xb3,0x9a,0xf4,0x75,0x13,0x53,0xdc, + } + }, + { 0xa00115d, { + 0xd4,0xc4,0x49,0x36,0x89,0x0b,0x47,0xdd, + 0xfb,0x2f,0x88,0x3b,0x5f,0xf2,0x8e,0x75, + 0xc6,0x6c,0x37,0x5a,0x90,0x25,0x94,0x3e, + 0x36,0x9c,0xae,0x02,0x38,0x6c,0xf5,0x05, + } + }, + { 0xa001173, { + 0x28,0xbb,0x9b,0xd1,0xa0,0xa0,0x7e,0x3a, + 0x59,0x20,0xc0,0xa9,0xb2,0x5c,0xc3,0x35, + 0x53,0x89,0xe1,0x4c,0x93,0x2f,0x1d,0xc3, + 0xe5,0xf7,0xf3,0xc8,0x9b,0x61,0xaa,0x9e, + } + }, + { 0xa0011a8, { + 0x97,0xc6,0x16,0x65,0x99,0xa4,0x85,0x3b, + 0xf6,0xce,0xaa,0x49,0x4a,0x3a,0xc5,0xb6, + 0x78,0x25,0xbc,0x53,0xaf,0x5d,0xcf,0xf4, + 0x23,0x12,0xbb,0xb1,0xbc,0x8a,0x02,0x2e, + } + }, + { 0xa0011ce, { + 0xcf,0x1c,0x90,0xa3,0x85,0x0a,0xbf,0x71, + 0x94,0x0e,0x80,0x86,0x85,0x4f,0xd7,0x86, + 0xae,0x38,0x23,0x28,0x2b,0x35,0x9b,0x4e, + 0xfe,0xb8,0xcd,0x3d,0x3d,0x39,0xc9,0x6a, + } + }, + { 0xa0011d1, { + 0xdf,0x0e,0xca,0xde,0xf6,0xce,0x5c,0x1e, + 0x4c,0xec,0xd7,0x71,0x83,0xcc,0xa8,0x09, + 0xc7,0xc5,0xfe,0xb2,0xf7,0x05,0xd2,0xc5, + 0x12,0xdd,0xe4,0xf3,0x92,0x1c,0x3d,0xb8, + } + }, + { 0xa0011d3, { + 0x91,0xe6,0x10,0xd7,0x57,0xb0,0x95,0x0b, + 0x9a,0x24,0xee,0xf7,0xcf,0x56,0xc1,0xa6, + 0x4a,0x52,0x7d,0x5f,0x9f,0xdf,0xf6,0x00, + 0x65,0xf7,0xea,0xe8,0x2a,0x88,0xe2,0x26, + } + }, + { 0xa0011d5, { + 0xed,0x69,0x89,0xf4,0xeb,0x64,0xc2,0x13, + 0xe0,0x51,0x1f,0x03,0x26,0x52,0x7d,0xb7, + 0x93,0x5d,0x65,0xca,0xb8,0x12,0x1d,0x62, + 0x0d,0x5b,0x65,0x34,0x69,0xb2,0x62,0x21, + } + }, + { 0xa001223, { + 0xfb,0x32,0x5f,0xc6,0x83,0x4f,0x8c,0xb8, + 0xa4,0x05,0xf9,0x71,0x53,0x01,0x16,0xc4, + 0x83,0x75,0x94,0xdd,0xeb,0x7e,0xb7,0x15, + 0x8e,0x3b,0x50,0x29,0x8a,0x9c,0xcc,0x45, + } + }, + { 0xa001224, { + 0x0e,0x0c,0xdf,0xb4,0x89,0xee,0x35,0x25, + 0xdd,0x9e,0xdb,0xc0,0x69,0x83,0x0a,0xad, + 0x26,0xa9,0xaa,0x9d,0xfc,0x3c,0xea,0xf9, + 0x6c,0xdc,0xd5,0x6d,0x8b,0x6e,0x85,0x4a, + } + }, + { 0xa001227, { + 0xab,0xc6,0x00,0x69,0x4b,0x50,0x87,0xad, + 0x5f,0x0e,0x8b,0xea,0x57,0x38,0xce,0x1d, + 0x0f,0x75,0x26,0x02,0xf6,0xd6,0x96,0xe9, + 0x87,0xb9,0xd6,0x20,0x27,0x7c,0xd2,0xe0, + } + }, + { 0xa001229, { + 0x7f,0x49,0x49,0x48,0x46,0xa5,0x50,0xa6, + 0x28,0x89,0x98,0xe2,0x9e,0xb4,0x7f,0x75, + 0x33,0xa7,0x04,0x02,0xe4,0x82,0xbf,0xb4, + 0xa5,0x3a,0xba,0x24,0x8d,0x31,0x10,0x1d, + } + }, + { 0xa00122e, { + 0x56,0x94,0xa9,0x5d,0x06,0x68,0xfe,0xaf, + 0xdf,0x7a,0xff,0x2d,0xdf,0x74,0x0f,0x15, + 0x66,0xfb,0x00,0xb5,0x51,0x97,0x9b,0xfa, + 0xcb,0x79,0x85,0x46,0x25,0xb4,0xd2,0x10, + } + }, + { 0xa001231, { + 0x0b,0x46,0xa5,0xfc,0x18,0x15,0xa0,0x9e, + 0xa6,0xdc,0xb7,0xff,0x17,0xf7,0x30,0x64, + 0xd4,0xda,0x9e,0x1b,0xc3,0xfc,0x02,0x3b, + 0xe2,0xc6,0x0e,0x41,0x54,0xb5,0x18,0xdd, + } + }, + { 0xa001234, { + 0x88,0x8d,0xed,0xab,0xb5,0xbd,0x4e,0xf7, + 0x7f,0xd4,0x0e,0x95,0x34,0x91,0xff,0xcc, + 0xfb,0x2a,0xcd,0xf7,0xd5,0xdb,0x4c,0x9b, + 0xd6,0x2e,0x73,0x50,0x8f,0x83,0x79,0x1a, + } + }, + { 0xa001236, { + 0x3d,0x30,0x00,0xb9,0x71,0xba,0x87,0x78, + 0xa8,0x43,0x55,0xc4,0x26,0x59,0xcf,0x9d, + 0x93,0xce,0x64,0x0e,0x8b,0x72,0x11,0x8b, + 0xa3,0x8f,0x51,0xe9,0xca,0x98,0xaa,0x25, + } + }, + { 0xa001238, { + 0x72,0xf7,0x4b,0x0c,0x7d,0x58,0x65,0xcc, + 0x00,0xcc,0x57,0x16,0x68,0x16,0xf8,0x2a, + 0x1b,0xb3,0x8b,0xe1,0xb6,0x83,0x8c,0x7e, + 0xc0,0xcd,0x33,0xf2,0x8d,0xf9,0xef,0x59, + } + }, + { 0xa00820c, { + 0xa8,0x0c,0x81,0xc0,0xa6,0x00,0xe7,0xf3, + 0x5f,0x65,0xd3,0xb9,0x6f,0xea,0x93,0x63, + 0xf1,0x8c,0x88,0x45,0xd7,0x82,0x80,0xd1, + 0xe1,0x3b,0x8d,0xb2,0xf8,0x22,0x03,0xe2, + } + }, + { 0xa10113e, { + 0x05,0x3c,0x66,0xd7,0xa9,0x5a,0x33,0x10, + 0x1b,0xf8,0x9c,0x8f,0xed,0xfc,0xa7,0xa0, + 0x15,0xe3,0x3f,0x4b,0x1d,0x0d,0x0a,0xd5, + 0xfa,0x90,0xc4,0xed,0x9d,0x90,0xaf,0x53, + } + }, + { 0xa101144, { + 0xb3,0x0b,0x26,0x9a,0xf8,0x7c,0x02,0x26, + 0x35,0x84,0x53,0xa4,0xd3,0x2c,0x7c,0x09, + 0x68,0x7b,0x96,0xb6,0x93,0xef,0xde,0xbc, + 0xfd,0x4b,0x15,0xd2,0x81,0xd3,0x51,0x47, + } + }, + { 0xa101148, { + 0x20,0xd5,0x6f,0x40,0x4a,0xf6,0x48,0x90, + 0xc2,0x93,0x9a,0xc2,0xfd,0xac,0xef,0x4f, + 0xfa,0xc0,0x3d,0x92,0x3c,0x6d,0x01,0x08, + 0xf1,0x5e,0xb0,0xde,0xb4,0x98,0xae,0xc4, + } + }, + { 0xa10123e, { + 0x03,0xb9,0x2c,0x76,0x48,0x93,0xc9,0x18, + 0xfb,0x56,0xfd,0xf7,0xe2,0x1d,0xca,0x4d, + 0x1d,0x13,0x53,0x63,0xfe,0x42,0x6f,0xfc, + 0x19,0x0f,0xf1,0xfc,0xa7,0xdd,0x89,0x1b, + } + }, + { 0xa101244, { + 0x71,0x56,0xb5,0x9f,0x21,0xbf,0xb3,0x3c, + 0x8c,0xd7,0x36,0xd0,0x34,0x52,0x1b,0xb1, + 0x46,0x2f,0x04,0xf0,0x37,0xd8,0x1e,0x72, + 0x24,0xa2,0x80,0x84,0x83,0x65,0x84,0xc0, + } + }, + { 0xa101248, { + 0xed,0x3b,0x95,0xa6,0x68,0xa7,0x77,0x3e, + 0xfc,0x17,0x26,0xe2,0x7b,0xd5,0x56,0x22, + 0x2c,0x1d,0xef,0xeb,0x56,0xdd,0xba,0x6e, + 0x1b,0x7d,0x64,0x9d,0x4b,0x53,0x13,0x75, + } + }, + { 0xa108108, { + 0xed,0xc2,0xec,0xa1,0x15,0xc6,0x65,0xe9, + 0xd0,0xef,0x39,0xaa,0x7f,0x55,0x06,0xc6, + 0xf5,0xd4,0x3f,0x7b,0x14,0xd5,0x60,0x2c, + 0x28,0x1e,0x9c,0x59,0x69,0x99,0x4d,0x16, + } + }, + { 0xa20102d, { + 0xf9,0x6e,0xf2,0x32,0xd3,0x0f,0x5f,0x11, + 0x59,0xa1,0xfe,0xcc,0xcd,0x9b,0x42,0x89, + 0x8b,0x89,0x2f,0xb5,0xbb,0x82,0xef,0x23, + 0x8c,0xe9,0x19,0x3e,0xcc,0x3f,0x7b,0xb4, + } + }, + { 0xa201210, { + 0xe8,0x6d,0x51,0x6a,0x8e,0x72,0xf3,0xfe, + 0x6e,0x16,0xbc,0x62,0x59,0x40,0x17,0xe9, + 0x6d,0x3d,0x0e,0x6b,0xa7,0xac,0xe3,0x68, + 0xf7,0x55,0xf0,0x13,0xbb,0x22,0xf6,0x41, + } + }, + { 0xa404107, { + 0xbb,0x04,0x4e,0x47,0xdd,0x5e,0x26,0x45, + 0x1a,0xc9,0x56,0x24,0xa4,0x4c,0x82,0xb0, + 0x8b,0x0d,0x9f,0xf9,0x3a,0xdf,0xc6,0x81, + 0x13,0xbc,0xc5,0x25,0xe4,0xc5,0xc3,0x99, + } + }, + { 0xa500011, { + 0x23,0x3d,0x70,0x7d,0x03,0xc3,0xc4,0xf4, + 0x2b,0x82,0xc6,0x05,0xda,0x80,0x0a,0xf1, + 0xd7,0x5b,0x65,0x3a,0x7d,0xab,0xdf,0xa2, + 0x11,0x5e,0x96,0x7e,0x71,0xe9,0xfc,0x74, + } + }, + { 0xa601209, { + 0x66,0x48,0xd4,0x09,0x05,0xcb,0x29,0x32, + 0x66,0xb7,0x9a,0x76,0xcd,0x11,0xf3,0x30, + 0x15,0x86,0xcc,0x5d,0x97,0x0f,0xc0,0x46, + 0xe8,0x73,0xe2,0xd6,0xdb,0xd2,0x77,0x1d, + } + }, + { 0xa704107, { + 0xf3,0xc6,0x58,0x26,0xee,0xac,0x3f,0xd6, + 0xce,0xa1,0x72,0x47,0x3b,0xba,0x2b,0x93, + 0x2a,0xad,0x8e,0x6b,0xea,0x9b,0xb7,0xc2, + 0x64,0x39,0x71,0x8c,0xce,0xe7,0x41,0x39, + } + }, + { 0xa705206, { + 0x8d,0xc0,0x76,0xbd,0x58,0x9f,0x8f,0xa4, + 0x12,0x9d,0x21,0xfb,0x48,0x21,0xbc,0xe7, + 0x67,0x6f,0x04,0x18,0xae,0x20,0x87,0x4b, + 0x03,0x35,0xe9,0xbe,0xfb,0x06,0xdf,0xfc, + } + }, + { 0xa708007, { + 0x6b,0x76,0xcc,0x78,0xc5,0x8a,0xa3,0xe3, + 0x32,0x2d,0x79,0xe4,0xc3,0x80,0xdb,0xb2, + 0x07,0xaa,0x3a,0xe0,0x57,0x13,0x72,0x80, + 0xdf,0x92,0x73,0x84,0x87,0x3c,0x73,0x93, + } + }, + { 0xa70c005, { + 0x88,0x5d,0xfb,0x79,0x64,0xd8,0x46,0x3b, + 0x4a,0x83,0x8e,0x77,0x7e,0xcf,0xb3,0x0f, + 0x1f,0x1f,0xf1,0x97,0xeb,0xfe,0x56,0x55, + 0xee,0x49,0xac,0xe1,0x8b,0x13,0xc5,0x13, + } + }, + { 0xaa00116, { + 0xe8,0x4c,0x2c,0x88,0xa1,0xac,0x24,0x63, + 0x65,0xe5,0xaa,0x2d,0x16,0xa9,0xc3,0xf5, + 0xfe,0x1d,0x5e,0x65,0xc7,0xaa,0x92,0x4d, + 0x91,0xee,0x76,0xbb,0x4c,0x66,0x78,0xc9, + } + }, + { 0xaa00212, { + 0xbd,0x57,0x5d,0x0a,0x0a,0x30,0xc1,0x75, + 0x95,0x58,0x5e,0x93,0x02,0x28,0x43,0x71, + 0xed,0x42,0x29,0xc8,0xec,0x34,0x2b,0xb2, + 0x1a,0x65,0x4b,0xfe,0x07,0x0f,0x34,0xa1, + } + }, + { 0xaa00213, { + 0xed,0x58,0xb7,0x76,0x81,0x7f,0xd9,0x3a, + 0x1a,0xff,0x8b,0x34,0xb8,0x4a,0x99,0x0f, + 0x28,0x49,0x6c,0x56,0x2b,0xdc,0xb7,0xed, + 0x96,0xd5,0x9d,0xc1,0x7a,0xd4,0x51,0x9b, + } + }, + { 0xaa00215, { + 0x55,0xd3,0x28,0xcb,0x87,0xa9,0x32,0xe9, + 0x4e,0x85,0x4b,0x7c,0x6b,0xd5,0x7c,0xd4, + 0x1b,0x51,0x71,0x3a,0x0e,0x0b,0xdc,0x9b, + 0x68,0x2f,0x46,0xee,0xfe,0xc6,0x6d,0xef, + } + }, +}; diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h index 21776c529fa97..5df621752fefa 100644 --- a/arch/x86/kernel/cpu/microcode/internal.h +++ b/arch/x86/kernel/cpu/microcode/internal.h @@ -100,14 +100,12 @@ extern bool force_minrev; #ifdef CONFIG_CPU_SUP_AMD void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family); void load_ucode_amd_ap(unsigned int family); -int save_microcode_in_initrd_amd(unsigned int family); void reload_ucode_amd(unsigned int cpu); struct microcode_ops *init_amd_microcode(void); void exit_amd_microcode(void); #else /* CONFIG_CPU_SUP_AMD */ static inline void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family) { } static inline void load_ucode_amd_ap(unsigned int family) { } -static inline int save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; } static inline void reload_ucode_amd(unsigned int cpu) { } static inline struct microcode_ops *init_amd_microcode(void) { return NULL; } static inline void exit_amd_microcode(void) { } diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index d7163b764c626..2d48db66fca85 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -148,7 +148,8 @@ static int closid_alloc(void) lockdep_assert_held(&rdtgroup_mutex); - if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) { + if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID) && + is_llc_occupancy_enabled()) { cleanest_closid = resctrl_find_cleanest_closid(); if (cleanest_closid < 0) return cleanest_closid; diff --git a/arch/x86/kernel/cpu/sgx/driver.c b/arch/x86/kernel/cpu/sgx/driver.c index 22b65a5f5ec6c..7f8d1e11dbee2 100644 --- a/arch/x86/kernel/cpu/sgx/driver.c +++ b/arch/x86/kernel/cpu/sgx/driver.c @@ -150,13 +150,15 @@ int __init sgx_drv_init(void) u64 xfrm_mask; int ret; - if (!cpu_feature_enabled(X86_FEATURE_SGX_LC)) + if (!cpu_feature_enabled(X86_FEATURE_SGX_LC)) { + pr_info("SGX disabled: SGX launch control CPU feature is not available, /dev/sgx_enclave disabled.\n"); return -ENODEV; + } cpuid_count(SGX_CPUID, 0, &eax, &ebx, &ecx, &edx); if (!(eax & 1)) { - pr_err("SGX disabled: SGX1 instruction support not available.\n"); + pr_info("SGX disabled: SGX1 instruction support not available, /dev/sgx_enclave disabled.\n"); return -ENODEV; } @@ -173,8 +175,10 @@ int __init sgx_drv_init(void) } ret = misc_register(&sgx_dev_enclave); - if (ret) + if (ret) { + pr_info("SGX disabled: Unable to register the /dev/sgx_enclave driver (%d).\n", ret); return ret; + } return 0; } diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c index b65ab214bdf57..776a20172867e 100644 --- a/arch/x86/kernel/cpu/sgx/ioctl.c +++ b/arch/x86/kernel/cpu/sgx/ioctl.c @@ -64,6 +64,13 @@ static int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs) struct file *backing; long ret; + /* + * ECREATE would detect this too, but checking here also ensures + * that the 'encl_size' calculations below can never overflow. + */ + if (!is_power_of_2(secs->size)) + return -EINVAL; + va_page = sgx_encl_grow(encl, true); if (IS_ERR(va_page)) return PTR_ERR(va_page); diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c index 00189cdeb775f..cb3f900c46fcc 100644 --- a/arch/x86/kernel/cpu/vmware.c +++ b/arch/x86/kernel/cpu/vmware.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -429,6 +430,9 @@ static void __init vmware_platform_setup(void) pr_warn("Failed to get TSC freq from the hypervisor\n"); } + if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP) && !efi_enabled(EFI_BOOT)) + x86_init.mpparse.find_mptable = mpparse_find_mptable; + vmware_paravirt_ops_setup(); #ifdef CONFIG_X86_IO_APIC diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c index 59d23cdf4ed0f..dd8748c45529a 100644 --- a/arch/x86/kernel/devicetree.c +++ b/arch/x86/kernel/devicetree.c @@ -2,6 +2,7 @@ /* * Architecture specific OF callbacks. */ +#include #include #include #include @@ -313,6 +314,6 @@ void __init x86_flattree_get_config(void) if (initial_dtb) early_memunmap(dt, map_len); #endif - if (of_have_populated_dt()) + if (acpi_disabled && of_have_populated_dt()) x86_init.mpparse.parse_smp_cfg = x86_dtb_parse_smp_config; } diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index a7d562697e50e..b2b118a8c09be 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -195,6 +195,7 @@ static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, printk("%sCall Trace:\n", log_lvl); unwind_start(&state, task, regs, stack); + stack = stack ?: get_stack_pointer(task, regs); regs = unwind_get_entry_regs(&state, &partial); /* @@ -213,9 +214,7 @@ static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, * - hardirq stack * - entry stack */ - for (stack = stack ?: get_stack_pointer(task, regs); - stack; - stack = stack_info.next_sp) { + for (; stack; stack = stack_info.next_sp) { const char *stack_name; stack = PTR_ALIGN(stack, sizeof(long)); diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 4893d30ce4384..b4746eb8b1152 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -754,22 +754,21 @@ void __init e820__memory_setup_extended(u64 phys_addr, u32 data_len) void __init e820__register_nosave_regions(unsigned long limit_pfn) { int i; - unsigned long pfn = 0; + u64 last_addr = 0; for (i = 0; i < e820_table->nr_entries; i++) { struct e820_entry *entry = &e820_table->entries[i]; - if (pfn < PFN_UP(entry->addr)) - register_nosave_region(pfn, PFN_UP(entry->addr)); - - pfn = PFN_DOWN(entry->addr + entry->size); - if (entry->type != E820_TYPE_RAM && entry->type != E820_TYPE_RESERVED_KERN) - register_nosave_region(PFN_UP(entry->addr), pfn); + continue; - if (pfn >= limit_pfn) - break; + if (last_addr < entry->addr) + register_nosave_region(PFN_DOWN(last_addr), PFN_UP(entry->addr)); + + last_addr = entry->addr + entry->size; } + + register_nosave_region(PFN_DOWN(last_addr), limit_pfn); } #ifdef CONFIG_ACPI diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index 1209c7aebb211..dcac3c058fb76 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -220,7 +220,7 @@ bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu) struct fpstate *fpstate; unsigned int size; - size = fpu_user_cfg.default_size + ALIGN(offsetof(struct fpstate, regs), 64); + size = fpu_kernel_cfg.default_size + ALIGN(offsetof(struct fpstate, regs), 64); fpstate = vzalloc(size); if (!fpstate) return false; @@ -232,8 +232,8 @@ bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu) fpstate->is_guest = true; gfpu->fpstate = fpstate; - gfpu->xfeatures = fpu_user_cfg.default_features; - gfpu->perm = fpu_user_cfg.default_features; + gfpu->xfeatures = fpu_kernel_cfg.default_features; + gfpu->perm = fpu_kernel_cfg.default_features; /* * KVM sets the FP+SSE bits in the XSAVE header when copying FPU state diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 385e3a5fc3045..feca4f20b06aa 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -25,8 +25,10 @@ #include #include +#if defined(CONFIG_X86_LOCAL_APIC) || defined(CONFIG_X86_THERMAL_VECTOR) #define CREATE_TRACE_POINTS #include +#endif DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); EXPORT_PER_CPU_SYMBOL(irq_stat); diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index fec3815335558..0c1b915d7efac 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -100,6 +100,11 @@ int paravirt_disable_iospace(void) return request_resource(&ioport_resource, &reserve_ioports); } +static noinstr void pv_native_safe_halt(void) +{ + native_safe_halt(); +} + #ifdef CONFIG_PARAVIRT_XXL static noinstr void pv_native_write_cr2(unsigned long val) { @@ -121,10 +126,6 @@ noinstr void pv_native_wbinvd(void) native_wbinvd(); } -static noinstr void pv_native_safe_halt(void) -{ - native_safe_halt(); -} #endif struct pv_info pv_info = { @@ -182,9 +183,11 @@ struct paravirt_patch_template pv_ops = { .irq.save_fl = __PV_IS_CALLEE_SAVE(pv_native_save_fl), .irq.irq_disable = __PV_IS_CALLEE_SAVE(pv_native_irq_disable), .irq.irq_enable = __PV_IS_CALLEE_SAVE(pv_native_irq_enable), +#endif /* CONFIG_PARAVIRT_XXL */ + + /* Irq HLT ops. */ .irq.safe_halt = pv_native_safe_halt, .irq.halt = native_halt, -#endif /* CONFIG_PARAVIRT_XXL */ /* Mmu ops. */ .mmu.flush_tlb_user = native_flush_tlb_local, diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 15507e739c255..c7ce3655b7078 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -92,7 +92,12 @@ EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid); */ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) { - memcpy(dst, src, arch_task_struct_size); + /* init_task is not dynamically sized (incomplete FPU state) */ + if (unlikely(src == &init_task)) + memcpy_and_pad(dst, arch_task_struct_size, src, sizeof(init_task), 0); + else + memcpy(dst, src, arch_task_struct_size); + #ifdef CONFIG_VM86 dst->thread.vm86 = NULL; #endif @@ -933,7 +938,7 @@ void __init select_idle_routine(void) static_call_update(x86_idle, mwait_idle); } else if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) { pr_info("using TDX aware idle routine\n"); - static_call_update(x86_idle, tdx_safe_halt); + static_call_update(x86_idle, tdx_halt); } else { static_call_update(x86_idle, default_idle); } diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c index ef654530bf5a9..98123ff10506c 100644 --- a/arch/x86/kernel/signal_32.c +++ b/arch/x86/kernel/signal_32.c @@ -33,25 +33,55 @@ #include #include +/* + * The first GDT descriptor is reserved as 'NULL descriptor'. As bits 0 + * and 1 of a segment selector, i.e., the RPL bits, are NOT used to index + * GDT, selector values 0~3 all point to the NULL descriptor, thus values + * 0, 1, 2 and 3 are all valid NULL selector values. + * + * However IRET zeros ES, FS, GS, and DS segment registers if any of them + * is found to have any nonzero NULL selector value, which can be used by + * userspace in pre-FRED systems to spot any interrupt/exception by loading + * a nonzero NULL selector and waiting for it to become zero. Before FRED + * there was nothing software could do to prevent such an information leak. + * + * ERETU, the only legit instruction to return to userspace from kernel + * under FRED, by design does NOT zero any segment register to avoid this + * problem behavior. + * + * As such, leave NULL selector values 0~3 unchanged. + */ +static inline u16 fixup_rpl(u16 sel) +{ + return sel <= 3 ? sel : sel | 3; +} + #ifdef CONFIG_IA32_EMULATION #include static inline void reload_segments(struct sigcontext_32 *sc) { - unsigned int cur; + u16 cur; + /* + * Reload fs and gs if they have changed in the signal + * handler. This does not handle long fs/gs base changes in + * the handler, but does not clobber them at least in the + * normal case. + */ savesegment(gs, cur); - if ((sc->gs | 0x03) != cur) - load_gs_index(sc->gs | 0x03); + if (fixup_rpl(sc->gs) != cur) + load_gs_index(fixup_rpl(sc->gs)); savesegment(fs, cur); - if ((sc->fs | 0x03) != cur) - loadsegment(fs, sc->fs | 0x03); + if (fixup_rpl(sc->fs) != cur) + loadsegment(fs, fixup_rpl(sc->fs)); + savesegment(ds, cur); - if ((sc->ds | 0x03) != cur) - loadsegment(ds, sc->ds | 0x03); + if (fixup_rpl(sc->ds) != cur) + loadsegment(ds, fixup_rpl(sc->ds)); savesegment(es, cur); - if ((sc->es | 0x03) != cur) - loadsegment(es, sc->es | 0x03); + if (fixup_rpl(sc->es) != cur) + loadsegment(es, fixup_rpl(sc->es)); } #define sigset32_t compat_sigset_t @@ -105,18 +135,12 @@ static bool ia32_restore_sigcontext(struct pt_regs *regs, regs->orig_ax = -1; #ifdef CONFIG_IA32_EMULATION - /* - * Reload fs and gs if they have changed in the signal - * handler. This does not handle long fs/gs base changes in - * the handler, but does not clobber them at least in the - * normal case. - */ reload_segments(&sc); #else - loadsegment(gs, sc.gs); - regs->fs = sc.fs; - regs->es = sc.es; - regs->ds = sc.ds; + loadsegment(gs, fixup_rpl(sc.gs)); + regs->fs = fixup_rpl(sc.fs); + regs->es = fixup_rpl(sc.es); + regs->ds = fixup_rpl(sc.ds); #endif return fpu__restore_sig(compat_ptr(sc.fpstate), 1); diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 2dbadf347b5f4..5e3e036e6e537 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -379,6 +379,21 @@ __visible void __noreturn handle_stack_overflow(struct pt_regs *regs, } #endif +/* + * Prevent the compiler and/or objtool from marking the !CONFIG_X86_ESPFIX64 + * version of exc_double_fault() as noreturn. Otherwise the noreturn mismatch + * between configs triggers objtool warnings. + * + * This is a temporary hack until we have compiler or plugin support for + * annotating noreturns. + */ +#ifdef CONFIG_X86_ESPFIX64 +#define always_true() true +#else +bool always_true(void); +bool __weak always_true(void) { return true; } +#endif + /* * Runs on an IST stack for x86_64 and on a special task stack for x86_32. * @@ -514,7 +529,8 @@ DEFINE_IDTENTRY_DF(exc_double_fault) pr_emerg("PANIC: double fault, error_code: 0x%lx\n", error_code); die("double fault", regs, error_code); - panic("Machine halted."); + if (always_true()) + panic("Machine halted."); instrumentation_end(); } diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index dfe6847fd99e5..310d8cdf7ca3a 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -956,7 +956,7 @@ static unsigned long long cyc2ns_suspend; void tsc_save_sched_clock_state(void) { - if (!sched_clock_stable()) + if (!static_branch_likely(&__use_tsc) && !sched_clock_stable()) return; cyc2ns_suspend = sched_clock(); @@ -976,7 +976,7 @@ void tsc_restore_sched_clock_state(void) unsigned long flags; int cpu; - if (!sched_clock_stable()) + if (!static_branch_likely(&__use_tsc) && !sched_clock_stable()) return; local_irq_save(flags); diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 5a952c5ea66bc..9194695662b26 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -357,19 +357,23 @@ void *arch_uprobe_trampoline(unsigned long *psize) return &insn; } -static unsigned long trampoline_check_ip(void) +static unsigned long trampoline_check_ip(unsigned long tramp) { - unsigned long tramp = uprobe_get_trampoline_vaddr(); - return tramp + (uretprobe_syscall_check - uretprobe_trampoline_entry); } SYSCALL_DEFINE0(uretprobe) { struct pt_regs *regs = task_pt_regs(current); - unsigned long err, ip, sp, r11_cx_ax[3]; + unsigned long err, ip, sp, r11_cx_ax[3], tramp; + + /* If there's no trampoline, we are called from wrong place. */ + tramp = uprobe_get_trampoline_vaddr(); + if (unlikely(tramp == UPROBE_NO_TRAMPOLINE_VADDR)) + goto sigill; - if (regs->ip != trampoline_check_ip()) + /* Make sure the ip matches the only allowed sys_uretprobe caller. */ + if (unlikely(regs->ip != trampoline_check_ip(tramp))) goto sigill; err = copy_from_user(r11_cx_ax, (void __user *)regs->sp, sizeof(r11_cx_ax)); diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 83bfecd1a6e40..c92e43f2d0c4e 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -1047,8 +1047,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) } break; case 0xa: { /* Architectural Performance Monitoring */ - union cpuid10_eax eax; - union cpuid10_edx edx; + union cpuid10_eax eax = { }; + union cpuid10_edx edx = { }; if (!enable_pmu || !static_cpu_has(X86_FEATURE_ARCH_PERFMON)) { entry->eax = entry->ebx = entry->ecx = entry->edx = 0; @@ -1064,8 +1064,6 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) if (kvm_pmu_cap.version) edx.split.anythread_deprecated = 1; - edx.split.reserved1 = 0; - edx.split.reserved2 = 0; entry->eax = eax.full; entry->ebx = kvm_pmu_cap.events_mask; @@ -1383,11 +1381,11 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) break; /* AMD Extended Performance Monitoring and Debug */ case 0x80000022: { - union cpuid_0x80000022_ebx ebx; + union cpuid_0x80000022_ebx ebx = { }; entry->ecx = entry->edx = 0; if (!enable_pmu || !kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2)) { - entry->eax = entry->ebx; + entry->eax = entry->ebx = 0; break; } diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 375bbb9600d3c..1a8148dec4afe 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -816,6 +816,17 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic) } } +void kvm_apic_update_hwapic_isr(struct kvm_vcpu *vcpu) +{ + struct kvm_lapic *apic = vcpu->arch.apic; + + if (WARN_ON_ONCE(!lapic_in_kernel(vcpu)) || !apic->apicv_active) + return; + + kvm_x86_call(hwapic_isr_update)(vcpu, apic_find_highest_isr(apic)); +} +EXPORT_SYMBOL_GPL(kvm_apic_update_hwapic_isr); + int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu) { /* This may race with setting of irr in __apic_accept_irq() and diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index 1b8ef9856422a..3aa599db77968 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h @@ -117,11 +117,10 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src, struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map); void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high); -u64 kvm_get_apic_base(struct kvm_vcpu *vcpu); int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info); int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s); int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s); -enum lapic_mode kvm_get_apic_mode(struct kvm_vcpu *vcpu); +void kvm_apic_update_hwapic_isr(struct kvm_vcpu *vcpu); int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu); u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu); @@ -271,6 +270,11 @@ static inline enum lapic_mode kvm_apic_mode(u64 apic_base) return apic_base & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE); } +static inline enum lapic_mode kvm_get_apic_mode(struct kvm_vcpu *vcpu) +{ + return kvm_apic_mode(vcpu->arch.apic_base); +} + static inline u8 kvm_xapic_id(struct kvm_lapic *apic) { return kvm_lapic_get_reg(apic, APIC_ID) >> 24; diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 19c96278ba755..9242c0649adf1 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -7589,7 +7589,7 @@ static void kvm_mmu_start_lpage_recovery(struct once *once) kvm_nx_huge_page_recovery_worker_kill, kvm, "kvm-nx-lpage-recovery"); - if (!nx_thread) + if (IS_ERR(nx_thread)) return; vhost_task_start(nx_thread); diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index e9af87b128140..6154cb450b448 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -3957,16 +3957,12 @@ static int sev_snp_ap_creation(struct vcpu_svm *svm) /* * The target vCPU is valid, so the vCPU will be kicked unless the - * request is for CREATE_ON_INIT. For any errors at this stage, the - * kick will place the vCPU in an non-runnable state. + * request is for CREATE_ON_INIT. */ kick = true; mutex_lock(&target_svm->sev_es.snp_vmsa_mutex); - target_svm->sev_es.snp_vmsa_gpa = INVALID_PAGE; - target_svm->sev_es.snp_ap_waiting_for_reset = true; - /* Interrupt injection mode shouldn't change for AP creation */ if (request < SVM_VMGEXIT_AP_DESTROY) { u64 sev_features; @@ -4012,20 +4008,23 @@ static int sev_snp_ap_creation(struct vcpu_svm *svm) target_svm->sev_es.snp_vmsa_gpa = svm->vmcb->control.exit_info_2; break; case SVM_VMGEXIT_AP_DESTROY: + target_svm->sev_es.snp_vmsa_gpa = INVALID_PAGE; break; default: vcpu_unimpl(vcpu, "vmgexit: invalid AP creation request [%#x] from guest\n", request); ret = -EINVAL; - break; + goto out; } -out: + target_svm->sev_es.snp_ap_waiting_for_reset = true; + if (kick) { kvm_make_request(KVM_REQ_UPDATE_PROTECTED_GUEST_STATE, target_vcpu); kvm_vcpu_kick(target_vcpu); } +out: mutex_unlock(&target_svm->sev_es.snp_vmsa_mutex); return ret; @@ -4579,6 +4578,8 @@ void sev_es_vcpu_reset(struct vcpu_svm *svm) void sev_es_prepare_switch_to_guest(struct vcpu_svm *svm, struct sev_es_save_area *hostsa) { + struct kvm *kvm = svm->vcpu.kvm; + /* * All host state for SEV-ES guests is categorized into three swap types * based on how it is handled by hardware during a world switch: @@ -4602,10 +4603,15 @@ void sev_es_prepare_switch_to_guest(struct vcpu_svm *svm, struct sev_es_save_are /* * If DebugSwap is enabled, debug registers are loaded but NOT saved by - * the CPU (Type-B). If DebugSwap is disabled/unsupported, the CPU both - * saves and loads debug registers (Type-A). + * the CPU (Type-B). If DebugSwap is disabled/unsupported, the CPU does + * not save or load debug registers. Sadly, on CPUs without + * ALLOWED_SEV_FEATURES, KVM can't prevent SNP guests from enabling + * DebugSwap on secondary vCPUs without KVM's knowledge via "AP Create". + * Save all registers if DebugSwap is supported to prevent host state + * from being clobbered by a misbehaving guest. */ - if (sev_vcpu_has_debug_swap(svm)) { + if (sev_vcpu_has_debug_swap(svm) || + (sev_snp_guest(kvm) && cpu_feature_enabled(X86_FEATURE_DEBUG_SWAP))) { hostsa->dr0 = native_get_debugreg(0); hostsa->dr1 = native_get_debugreg(1); hostsa->dr2 = native_get_debugreg(2); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index a7cb7c82b38e3..e39ab7c0be4e9 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -3167,6 +3167,27 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) kvm_pr_unimpl_wrmsr(vcpu, ecx, data); break; } + + /* + * AMD changed the architectural behavior of bits 5:2. On CPUs + * without BusLockTrap, bits 5:2 control "external pins", but + * on CPUs that support BusLockDetect, bit 2 enables BusLockTrap + * and bits 5:3 are reserved-to-zero. Sadly, old KVM allowed + * the guest to set bits 5:2 despite not actually virtualizing + * Performance-Monitoring/Breakpoint external pins. Drop bits + * 5:2 for backwards compatibility. + */ + data &= ~GENMASK(5, 2); + + /* + * Suppress BTF as KVM doesn't virtualize BTF, but there's no + * way to communicate lack of support to the guest. + */ + if (data & DEBUGCTLMSR_BTF) { + kvm_pr_unimpl_wrmsr(vcpu, MSR_IA32_DEBUGCTLMSR, data); + data &= ~DEBUGCTLMSR_BTF; + } + if (data & DEBUGCTL_RESERVED_BITS) return 1; @@ -4176,6 +4197,18 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in guest_state_enter_irqoff(); + /* + * Set RFLAGS.IF prior to VMRUN, as the host's RFLAGS.IF at the time of + * VMRUN controls whether or not physical IRQs are masked (KVM always + * runs with V_INTR_MASKING_MASK). Toggle RFLAGS.IF here to avoid the + * temptation to do STI+VMRUN+CLI, as AMD CPUs bleed the STI shadow + * into guest state if delivery of an event during VMRUN triggers a + * #VMEXIT, and the guest_state transitions already tell lockdep that + * IRQs are being enabled/disabled. Note! GIF=0 for the entirety of + * this path, so IRQs aren't actually unmasked while running host code. + */ + raw_local_irq_enable(); + amd_clear_divider(); if (sev_es_guest(vcpu->kvm)) @@ -4184,6 +4217,8 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in else __svm_vcpu_run(svm, spec_ctrl_intercepted); + raw_local_irq_disable(); + guest_state_exit_irqoff(); } @@ -4240,6 +4275,16 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu, clgi(); kvm_load_guest_xsave_state(vcpu); + /* + * Hardware only context switches DEBUGCTL if LBR virtualization is + * enabled. Manually load DEBUGCTL if necessary (and restore it after + * VM-Exit), as running with the host's DEBUGCTL can negatively affect + * guest state and can even be fatal, e.g. due to Bus Lock Detect. + */ + if (!(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK) && + vcpu->arch.host_debugctl != svm->vmcb->save.dbgctl) + update_debugctlmsr(svm->vmcb->save.dbgctl); + kvm_wait_lapic_expire(vcpu); /* @@ -4267,6 +4312,10 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu, if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) kvm_before_interrupt(vcpu, KVM_HANDLING_NMI); + if (!(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK) && + vcpu->arch.host_debugctl != svm->vmcb->save.dbgctl) + update_debugctlmsr(vcpu->arch.host_debugctl); + kvm_load_host_xsave_state(vcpu); stgi(); diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 43fa6a16eb191..d114efac7af78 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -591,7 +591,7 @@ static inline bool is_vnmi_enabled(struct vcpu_svm *svm) /* svm.c */ #define MSR_INVALID 0xffffffffU -#define DEBUGCTL_RESERVED_BITS (~(0x3fULL)) +#define DEBUGCTL_RESERVED_BITS (~DEBUGCTLMSR_LBR) extern bool dump_invalid_vmcb; diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S index 2ed80aea3bb13..0c61153b275f6 100644 --- a/arch/x86/kvm/svm/vmenter.S +++ b/arch/x86/kvm/svm/vmenter.S @@ -170,12 +170,8 @@ SYM_FUNC_START(__svm_vcpu_run) mov VCPU_RDI(%_ASM_DI), %_ASM_DI /* Enter guest mode */ - sti - 3: vmrun %_ASM_AX 4: - cli - /* Pop @svm to RAX while it's the only available register. */ pop %_ASM_AX @@ -340,12 +336,8 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run) mov KVM_VMCB_pa(%rax), %rax /* Enter guest mode */ - sti - 1: vmrun %rax - -2: cli - +2: /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */ FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 931a7361c30f2..22bee8a711442 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -5043,6 +5043,11 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason, kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu); } + if (vmx->nested.update_vmcs01_hwapic_isr) { + vmx->nested.update_vmcs01_hwapic_isr = false; + kvm_apic_update_hwapic_isr(vcpu); + } + if ((vm_exit_reason != -1) && (enable_shadow_vmcs || nested_vmx_is_evmptr12_valid(vmx))) vmx->nested.need_vmcs12_to_shadow_sync = true; diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index f06d443ec3c68..a3d45b01dbadf 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1515,16 +1515,12 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, */ void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { - struct vcpu_vmx *vmx = to_vmx(vcpu); - if (vcpu->scheduled_out && !kvm_pause_in_guest(vcpu->kvm)) shrink_ple_window(vcpu); vmx_vcpu_load_vmcs(vcpu, cpu, NULL); vmx_vcpu_pi_load(vcpu, cpu); - - vmx->host_debugctlmsr = get_debugctlmsr(); } void vmx_vcpu_put(struct kvm_vcpu *vcpu) @@ -6858,6 +6854,27 @@ void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr) u16 status; u8 old; + /* + * If L2 is active, defer the SVI update until vmcs01 is loaded, as SVI + * is only relevant for if and only if Virtual Interrupt Delivery is + * enabled in vmcs12, and if VID is enabled then L2 EOIs affect L2's + * vAPIC, not L1's vAPIC. KVM must update vmcs01 on the next nested + * VM-Exit, otherwise L1 with run with a stale SVI. + */ + if (is_guest_mode(vcpu)) { + /* + * KVM is supposed to forward intercepted L2 EOIs to L1 if VID + * is enabled in vmcs12; as above, the EOIs affect L2's vAPIC. + * Note, userspace can stuff state while L2 is active; assert + * that VID is disabled if and only if the vCPU is in KVM_RUN + * to avoid false positives if userspace is setting APIC state. + */ + WARN_ON_ONCE(vcpu->wants_to_run && + nested_cpu_has_vid(get_vmcs12(vcpu))); + to_vmx(vcpu)->nested.update_vmcs01_hwapic_isr = true; + return; + } + if (max_isr == -1) max_isr = 0; @@ -7433,8 +7450,8 @@ fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit) } /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */ - if (vmx->host_debugctlmsr) - update_debugctlmsr(vmx->host_debugctlmsr); + if (vcpu->arch.host_debugctl) + update_debugctlmsr(vcpu->arch.host_debugctl); #ifndef CONFIG_X86_64 /* diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 2325f773a20be..cf57fbf12104f 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -176,6 +176,7 @@ struct nested_vmx { bool reload_vmcs01_apic_access_page; bool update_vmcs01_cpu_dirty_logging; bool update_vmcs01_apicv_status; + bool update_vmcs01_hwapic_isr; /* * Enlightened VMCS has been enabled. It does not mean that L1 has to @@ -338,8 +339,6 @@ struct vcpu_vmx { /* apic deadline value in host tsc */ u64 hv_deadline_tsc; - unsigned long host_debugctlmsr; - /* * Only bits masked by msr_ia32_feature_control_valid_bits can be set in * msr_ia32_feature_control. FEAT_CTL_LOCKED is always included diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 0846e3af5f6c5..1a4ca471d63df 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -667,17 +667,6 @@ static void drop_user_return_notifiers(void) kvm_on_user_return(&msrs->urn); } -u64 kvm_get_apic_base(struct kvm_vcpu *vcpu) -{ - return vcpu->arch.apic_base; -} - -enum lapic_mode kvm_get_apic_mode(struct kvm_vcpu *vcpu) -{ - return kvm_apic_mode(kvm_get_apic_base(vcpu)); -} -EXPORT_SYMBOL_GPL(kvm_get_apic_mode); - int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { enum lapic_mode old_mode = kvm_get_apic_mode(vcpu); @@ -4314,7 +4303,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) msr_info->data = 1 << 24; break; case MSR_IA32_APICBASE: - msr_info->data = kvm_get_apic_base(vcpu); + msr_info->data = vcpu->arch.apic_base; break; case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff: return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data); @@ -4601,6 +4590,11 @@ static bool kvm_is_vm_type_supported(unsigned long type) return type < 32 && (kvm_caps.supported_vm_types & BIT(type)); } +static inline u32 kvm_sync_valid_fields(struct kvm *kvm) +{ + return kvm && kvm->arch.has_protected_state ? 0 : KVM_SYNC_X86_VALID_FIELDS; +} + int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) { int r = 0; @@ -4709,7 +4703,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) break; #endif case KVM_CAP_SYNC_REGS: - r = KVM_SYNC_X86_VALID_FIELDS; + r = kvm_sync_valid_fields(kvm); break; case KVM_CAP_ADJUST_CLOCK: r = KVM_CLOCK_VALID_FLAGS; @@ -10159,7 +10153,7 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) kvm_run->if_flag = kvm_x86_call(get_if_flag)(vcpu); kvm_run->cr8 = kvm_get_cr8(vcpu); - kvm_run->apic_base = kvm_get_apic_base(vcpu); + kvm_run->apic_base = vcpu->arch.apic_base; kvm_run->ready_for_interrupt_injection = pic_in_kernel(vcpu->kvm) || @@ -10975,6 +10969,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) set_debugreg(0, 7); } + vcpu->arch.host_debugctl = get_debugctlmsr(); + guest_timing_enter_irqoff(); for (;;) { @@ -11479,6 +11475,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) { struct kvm_queued_exception *ex = &vcpu->arch.exception; struct kvm_run *kvm_run = vcpu->run; + u32 sync_valid_fields; int r; r = kvm_mmu_post_init_vm(vcpu->kvm); @@ -11524,8 +11521,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) goto out; } - if ((kvm_run->kvm_valid_regs & ~KVM_SYNC_X86_VALID_FIELDS) || - (kvm_run->kvm_dirty_regs & ~KVM_SYNC_X86_VALID_FIELDS)) { + sync_valid_fields = kvm_sync_valid_fields(vcpu->kvm); + if ((kvm_run->kvm_valid_regs & ~sync_valid_fields) || + (kvm_run->kvm_dirty_regs & ~sync_valid_fields)) { r = -EINVAL; goto out; } @@ -11583,7 +11581,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) out: kvm_put_guest_fpu(vcpu); - if (kvm_run->kvm_valid_regs) + if (kvm_run->kvm_valid_regs && likely(!vcpu->arch.guest_state_protected)) store_regs(vcpu); post_kvm_run_save(vcpu); kvm_vcpu_srcu_read_unlock(vcpu); @@ -11718,7 +11716,7 @@ static void __get_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) sregs->cr4 = kvm_read_cr4(vcpu); sregs->cr8 = kvm_get_cr8(vcpu); sregs->efer = vcpu->arch.efer; - sregs->apic_base = kvm_get_apic_base(vcpu); + sregs->apic_base = vcpu->arch.apic_base; } static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) @@ -11771,6 +11769,8 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, if (kvm_mpx_supported()) kvm_load_guest_fpu(vcpu); + kvm_vcpu_srcu_read_lock(vcpu); + r = kvm_apic_accept_events(vcpu); if (r < 0) goto out; @@ -11784,6 +11784,8 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, mp_state->mp_state = vcpu->arch.mp_state; out: + kvm_vcpu_srcu_read_unlock(vcpu); + if (kvm_mpx_supported()) kvm_put_guest_fpu(vcpu); vcpu_put(vcpu); diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index fc9fb5d061744..b8f74d80f35c6 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -74,6 +74,24 @@ SYM_FUNC_START(rep_movs_alternative) _ASM_EXTABLE_UA( 0b, 1b) .Llarge_movsq: + /* Do the first possibly unaligned word */ +0: movq (%rsi),%rax +1: movq %rax,(%rdi) + + _ASM_EXTABLE_UA( 0b, .Lcopy_user_tail) + _ASM_EXTABLE_UA( 1b, .Lcopy_user_tail) + + /* What would be the offset to the aligned destination? */ + leaq 8(%rdi),%rax + andq $-8,%rax + subq %rdi,%rax + + /* .. and update pointers and count to match */ + addq %rax,%rdi + addq %rax,%rsi + subq %rax,%rcx + + /* make %rcx contain the number of words, %rax the remainder */ movq %rcx,%rax shrq $3,%rcx andl $7,%eax diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index eb503f53c3195..101725c149c42 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -263,28 +263,33 @@ static void __init probe_page_size_mask(void) } /* - * INVLPG may not properly flush Global entries - * on these CPUs when PCIDs are enabled. + * INVLPG may not properly flush Global entries on + * these CPUs. New microcode fixes the issue. */ static const struct x86_cpu_id invlpg_miss_ids[] = { - X86_MATCH_VFM(INTEL_ALDERLAKE, 0), - X86_MATCH_VFM(INTEL_ALDERLAKE_L, 0), - X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, 0), - X86_MATCH_VFM(INTEL_RAPTORLAKE, 0), - X86_MATCH_VFM(INTEL_RAPTORLAKE_P, 0), - X86_MATCH_VFM(INTEL_RAPTORLAKE_S, 0), + X86_MATCH_VFM(INTEL_ALDERLAKE, 0x2e), + X86_MATCH_VFM(INTEL_ALDERLAKE_L, 0x42c), + X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, 0x11), + X86_MATCH_VFM(INTEL_RAPTORLAKE, 0x118), + X86_MATCH_VFM(INTEL_RAPTORLAKE_P, 0x4117), + X86_MATCH_VFM(INTEL_RAPTORLAKE_S, 0x2e), {} }; static void setup_pcid(void) { + const struct x86_cpu_id *invlpg_miss_match; + if (!IS_ENABLED(CONFIG_X86_64)) return; if (!boot_cpu_has(X86_FEATURE_PCID)) return; - if (x86_match_cpu(invlpg_miss_ids)) { + invlpg_miss_match = x86_match_cpu(invlpg_miss_ids); + + if (invlpg_miss_match && + boot_cpu_data.microcode < invlpg_miss_match->driver_data) { pr_info("Incomplete global flushes, disabling PCID"); setup_clear_cpu_cap(X86_FEATURE_PCID); return; diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c index ac33b2263a434..b922b9fea6b64 100644 --- a/arch/x86/mm/mem_encrypt_identity.c +++ b/arch/x86/mm/mem_encrypt_identity.c @@ -562,7 +562,7 @@ void __head sme_enable(struct boot_params *bp) } RIP_REL_REF(sme_me_mask) = me_mask; - physical_mask &= ~me_mask; - cc_vendor = CC_VENDOR_AMD; + RIP_REL_REF(physical_mask) &= ~me_mask; + RIP_REL_REF(cc_vendor) = CC_VENDOR_AMD; cc_set_mask(me_mask); } diff --git a/arch/x86/mm/pat/cpa-test.c b/arch/x86/mm/pat/cpa-test.c index 3d2f7f0a6ed14..ad3c1feec990d 100644 --- a/arch/x86/mm/pat/cpa-test.c +++ b/arch/x86/mm/pat/cpa-test.c @@ -183,7 +183,7 @@ static int pageattr_test(void) break; case 1: - err = change_page_attr_set(addrs, len[1], PAGE_CPA_TEST, 1); + err = change_page_attr_set(addrs, len[i], PAGE_CPA_TEST, 1); break; case 2: diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c index feb8cc6a12bf2..d721cc19addbd 100644 --- a/arch/x86/mm/pat/memtype.c +++ b/arch/x86/mm/pat/memtype.c @@ -984,29 +984,42 @@ static int get_pat_info(struct vm_area_struct *vma, resource_size_t *paddr, return -EINVAL; } -/* - * track_pfn_copy is called when vma that is covering the pfnmap gets - * copied through copy_page_range(). - * - * If the vma has a linear pfn mapping for the entire range, we get the prot - * from pte and reserve the entire vma range with single reserve_pfn_range call. - */ -int track_pfn_copy(struct vm_area_struct *vma) +int track_pfn_copy(struct vm_area_struct *dst_vma, + struct vm_area_struct *src_vma, unsigned long *pfn) { + const unsigned long vma_size = src_vma->vm_end - src_vma->vm_start; resource_size_t paddr; - unsigned long vma_size = vma->vm_end - vma->vm_start; pgprot_t pgprot; + int rc; - if (vma->vm_flags & VM_PAT) { - if (get_pat_info(vma, &paddr, &pgprot)) - return -EINVAL; - /* reserve the whole chunk covered by vma. */ - return reserve_pfn_range(paddr, vma_size, &pgprot, 1); - } + if (!(src_vma->vm_flags & VM_PAT)) + return 0; + + /* + * Duplicate the PAT information for the dst VMA based on the src + * VMA. + */ + if (get_pat_info(src_vma, &paddr, &pgprot)) + return -EINVAL; + rc = reserve_pfn_range(paddr, vma_size, &pgprot, 1); + if (rc) + return rc; + /* Reservation for the destination VMA succeeded. */ + vm_flags_set(dst_vma, VM_PAT); + *pfn = PHYS_PFN(paddr); return 0; } +void untrack_pfn_copy(struct vm_area_struct *dst_vma, unsigned long pfn) +{ + untrack_pfn(dst_vma, pfn, dst_vma->vm_end - dst_vma->vm_start, true); + /* + * Reservation was freed, any copied page tables will get cleaned + * up later, but without getting PAT involved again. + */ +} + /* * prot is passed in as a parameter for the new mapping. If the vma has * a linear pfn mapping for the entire range, or no vma is provided, @@ -1095,15 +1108,6 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, } } -/* - * untrack_pfn_clear is called if the following situation fits: - * - * 1) while mremapping a pfnmap for a new region, with the old vma after - * its pfnmap page table has been removed. The new vma has a new pfnmap - * to the same pfn & cache type with VM_PAT set. - * 2) while duplicating vm area, the new vma fails to copy the pgtable from - * old vma. - */ void untrack_pfn_clear(struct vm_area_struct *vma) { vm_flags_clear(vma, VM_PAT); diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c index 44f7b2ea6a073..69ceb967d73e9 100644 --- a/arch/x86/mm/pat/set_memory.c +++ b/arch/x86/mm/pat/set_memory.c @@ -2422,7 +2422,7 @@ static int __set_pages_np(struct page *page, int numpages) .pgd = NULL, .numpages = numpages, .mask_set = __pgprot(0), - .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW), + .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY), .flags = CPA_NO_CHECK_ALIAS }; /* @@ -2501,7 +2501,7 @@ int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address, .pgd = pgd, .numpages = numpages, .mask_set = __pgprot(0), - .mask_clr = __pgprot(~page_flags & (_PAGE_NX|_PAGE_RW)), + .mask_clr = __pgprot(~page_flags & (_PAGE_NX|_PAGE_RW|_PAGE_DIRTY)), .flags = CPA_NO_CHECK_ALIAS, }; @@ -2544,7 +2544,7 @@ int __init kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address, .pgd = pgd, .numpages = numpages, .mask_set = __pgprot(0), - .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW), + .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY), .flags = CPA_NO_CHECK_ALIAS, }; diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index b4f3784f27e95..0c950bbca309f 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -70,6 +70,9 @@ EXPORT_SYMBOL(xen_start_flags); */ struct shared_info *HYPERVISOR_shared_info = &xen_dummy_shared_info; +/* Number of pages released from the initial allocation. */ +unsigned long xen_released_pages; + static __ref void xen_get_vendor(void) { init_cpu_devs(); @@ -465,6 +468,13 @@ int __init arch_xen_unpopulated_init(struct resource **res) xen_free_unpopulated_pages(1, &pg); } + /* + * Account for the region being in the physmap but unpopulated. + * The value in xen_released_pages is used by the balloon + * driver to know how much of the physmap is unpopulated and + * set an accurate initial memory target. + */ + xen_released_pages += xen_extra_mem[i].n_pfns; /* Zero so region is not also added to the balloon driver. */ xen_extra_mem[i].n_pfns = 0; } diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c index 10c660fae8b30..7237d56a9d3f0 100644 --- a/arch/x86/xen/multicalls.c +++ b/arch/x86/xen/multicalls.c @@ -54,14 +54,20 @@ struct mc_debug_data { static DEFINE_PER_CPU(struct mc_buffer, mc_buffer); static struct mc_debug_data mc_debug_data_early __initdata; -static DEFINE_PER_CPU(struct mc_debug_data *, mc_debug_data) = - &mc_debug_data_early; static struct mc_debug_data __percpu *mc_debug_data_ptr; DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags); static struct static_key mc_debug __ro_after_init; static bool mc_debug_enabled __initdata; +static struct mc_debug_data * __ref get_mc_debug(void) +{ + if (!mc_debug_data_ptr) + return &mc_debug_data_early; + + return this_cpu_ptr(mc_debug_data_ptr); +} + static int __init xen_parse_mc_debug(char *arg) { mc_debug_enabled = true; @@ -71,20 +77,16 @@ static int __init xen_parse_mc_debug(char *arg) } early_param("xen_mc_debug", xen_parse_mc_debug); -void mc_percpu_init(unsigned int cpu) -{ - per_cpu(mc_debug_data, cpu) = per_cpu_ptr(mc_debug_data_ptr, cpu); -} - static int __init mc_debug_enable(void) { unsigned long flags; + struct mc_debug_data __percpu *mcdb; if (!mc_debug_enabled) return 0; - mc_debug_data_ptr = alloc_percpu(struct mc_debug_data); - if (!mc_debug_data_ptr) { + mcdb = alloc_percpu(struct mc_debug_data); + if (!mcdb) { pr_err("xen_mc_debug inactive\n"); static_key_slow_dec(&mc_debug); return -ENOMEM; @@ -93,7 +95,7 @@ static int __init mc_debug_enable(void) /* Be careful when switching to percpu debug data. */ local_irq_save(flags); xen_mc_flush(); - mc_percpu_init(0); + mc_debug_data_ptr = mcdb; local_irq_restore(flags); pr_info("xen_mc_debug active\n"); @@ -155,7 +157,7 @@ void xen_mc_flush(void) trace_xen_mc_flush(b->mcidx, b->argidx, b->cbidx); if (static_key_false(&mc_debug)) { - mcdb = __this_cpu_read(mc_debug_data); + mcdb = get_mc_debug(); memcpy(mcdb->entries, b->entries, b->mcidx * sizeof(struct multicall_entry)); } @@ -235,7 +237,7 @@ struct multicall_space __xen_mc_entry(size_t args) ret.mc = &b->entries[b->mcidx]; if (static_key_false(&mc_debug)) { - struct mc_debug_data *mcdb = __this_cpu_read(mc_debug_data); + struct mc_debug_data *mcdb = get_mc_debug(); mcdb->caller[b->mcidx] = __builtin_return_address(0); mcdb->argsz[b->mcidx] = args; diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index c3db71d96c434..3823e52aef523 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -37,9 +37,6 @@ #define GB(x) ((uint64_t)(x) * 1024 * 1024 * 1024) -/* Number of pages released from the initial allocation. */ -unsigned long xen_released_pages; - /* Memory map would allow PCI passthrough. */ bool xen_pv_pci_possible; diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c index 6863d3da7decf..7ea57f728b89d 100644 --- a/arch/x86/xen/smp_pv.c +++ b/arch/x86/xen/smp_pv.c @@ -305,7 +305,6 @@ static int xen_pv_kick_ap(unsigned int cpu, struct task_struct *idle) return rc; xen_pmu_init(cpu); - mc_percpu_init(cpu); /* * Why is this a BUG? If the hypercall fails then everything can be diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 63c13a2ccf556..25e318ef27d6b 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h @@ -261,9 +261,6 @@ void xen_mc_callback(void (*fn)(void *), void *data); */ struct multicall_space xen_mc_extend_args(unsigned long op, size_t arg_size); -/* Do percpu data initialization for multicalls. */ -void mc_percpu_init(unsigned int cpu); - extern bool is_xen_pmu; irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id); diff --git a/block/bio-integrity.c b/block/bio-integrity.c index 9aed61fcd0bf9..456026c4a3c96 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c @@ -104,16 +104,12 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, } EXPORT_SYMBOL(bio_integrity_alloc); -static void bio_integrity_unpin_bvec(struct bio_vec *bv, int nr_vecs, - bool dirty) +static void bio_integrity_unpin_bvec(struct bio_vec *bv, int nr_vecs) { int i; - for (i = 0; i < nr_vecs; i++) { - if (dirty && !PageCompound(bv[i].bv_page)) - set_page_dirty_lock(bv[i].bv_page); + for (i = 0; i < nr_vecs; i++) unpin_user_page(bv[i].bv_page); - } } static void bio_integrity_uncopy_user(struct bio_integrity_payload *bip) @@ -129,7 +125,7 @@ static void bio_integrity_uncopy_user(struct bio_integrity_payload *bip) ret = copy_to_iter(bvec_virt(bounce_bvec), bytes, &orig_iter); WARN_ON_ONCE(ret != bytes); - bio_integrity_unpin_bvec(orig_bvecs, orig_nr_vecs, true); + bio_integrity_unpin_bvec(orig_bvecs, orig_nr_vecs); } /** @@ -149,8 +145,7 @@ void bio_integrity_unmap_user(struct bio *bio) return; } - bio_integrity_unpin_bvec(bip->bip_vec, bip->bip_max_vcnt, - bio_data_dir(bio) == READ); + bio_integrity_unpin_bvec(bip->bip_vec, bip->bip_max_vcnt); } /** @@ -236,7 +231,7 @@ static int bio_integrity_copy_user(struct bio *bio, struct bio_vec *bvec, } if (write) - bio_integrity_unpin_bvec(bvec, nr_vecs, false); + bio_integrity_unpin_bvec(bvec, nr_vecs); else memcpy(&bip->bip_vec[1], bvec, nr_vecs * sizeof(*bvec)); @@ -362,7 +357,7 @@ int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t bytes, return 0; release_pages: - bio_integrity_unpin_bvec(bvec, nr_bvecs, false); + bio_integrity_unpin_bvec(bvec, nr_bvecs); free_bvec: if (bvec != stack_vec) kfree(bvec); diff --git a/block/bio.c b/block/bio.c index ac4d77c889322..43d4ae26f4758 100644 --- a/block/bio.c +++ b/block/bio.c @@ -77,7 +77,7 @@ struct bio_slab { struct kmem_cache *slab; unsigned int slab_ref; unsigned int slab_size; - char name[8]; + char name[12]; }; static DEFINE_MUTEX(bio_slab_lock); static DEFINE_XARRAY(bio_slabs); diff --git a/block/blk-core.c b/block/blk-core.c index 42023addf9cda..c7b6c1f763597 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1121,8 +1121,8 @@ void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios) return; plug->cur_ktime = 0; - plug->mq_list = NULL; - plug->cached_rq = NULL; + rq_list_init(&plug->mq_list); + rq_list_init(&plug->cached_rqs); plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT); plug->rq_count = 0; plug->multiple_queues = false; @@ -1218,7 +1218,7 @@ void __blk_flush_plug(struct blk_plug *plug, bool from_schedule) * queue for cached requests, we don't want a blocked task holding * up a queue freeze/quiesce event. */ - if (unlikely(!rq_list_empty(plug->cached_rq))) + if (unlikely(!rq_list_empty(&plug->cached_rqs))) blk_mq_free_plug_rqs(plug); plug->cur_ktime = 0; diff --git a/block/blk-merge.c b/block/blk-merge.c index 5baa950f34fe2..ceac64e796ea8 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -1175,7 +1175,7 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, struct blk_plug *plug = current->plug; struct request *rq; - if (!plug || rq_list_empty(plug->mq_list)) + if (!plug || rq_list_empty(&plug->mq_list)) return false; rq_list_for_each(&plug->mq_list, rq) { diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c index 9638b25fd5212..ad8d6a363f24a 100644 --- a/block/blk-mq-cpumap.c +++ b/block/blk-mq-cpumap.c @@ -11,6 +11,7 @@ #include #include #include +#include #include "blk.h" #include "blk-mq.h" @@ -54,3 +55,39 @@ int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int index) return NUMA_NO_NODE; } + +/** + * blk_mq_map_hw_queues - Create CPU to hardware queue mapping + * @qmap: CPU to hardware queue map + * @dev: The device to map queues + * @offset: Queue offset to use for the device + * + * Create a CPU to hardware queue mapping in @qmap. The struct bus_type + * irq_get_affinity callback will be used to retrieve the affinity. + */ +void blk_mq_map_hw_queues(struct blk_mq_queue_map *qmap, + struct device *dev, unsigned int offset) + +{ + const struct cpumask *mask; + unsigned int queue, cpu; + + if (!dev->bus->irq_get_affinity) + goto fallback; + + for (queue = 0; queue < qmap->nr_queues; queue++) { + mask = dev->bus->irq_get_affinity(dev, queue + offset); + if (!mask) + goto fallback; + + for_each_cpu(cpu, mask) + qmap->mq_map[cpu] = qmap->queue_offset + queue; + } + + return; + +fallback: + WARN_ON_ONCE(qmap->nr_queues > 1); + blk_mq_clear_mq_map(qmap); +} +EXPORT_SYMBOL_GPL(blk_mq_map_hw_queues); diff --git a/block/blk-mq.c b/block/blk-mq.c index 662e52ab06467..f26bee5626936 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -506,7 +506,7 @@ __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data) prefetch(tags->static_rqs[tag]); tag_mask &= ~(1UL << i); rq = blk_mq_rq_ctx_init(data, tags, tag); - rq_list_add(data->cached_rq, rq); + rq_list_add_head(data->cached_rqs, rq); nr++; } if (!(data->rq_flags & RQF_SCHED_TAGS)) @@ -515,7 +515,7 @@ __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data) percpu_ref_get_many(&data->q->q_usage_counter, nr - 1); data->nr_tags -= nr; - return rq_list_pop(data->cached_rq); + return rq_list_pop(data->cached_rqs); } static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data) @@ -612,7 +612,7 @@ static struct request *blk_mq_rq_cache_fill(struct request_queue *q, .flags = flags, .cmd_flags = opf, .nr_tags = plug->nr_ios, - .cached_rq = &plug->cached_rq, + .cached_rqs = &plug->cached_rqs, }; struct request *rq; @@ -637,14 +637,14 @@ static struct request *blk_mq_alloc_cached_request(struct request_queue *q, if (!plug) return NULL; - if (rq_list_empty(plug->cached_rq)) { + if (rq_list_empty(&plug->cached_rqs)) { if (plug->nr_ios == 1) return NULL; rq = blk_mq_rq_cache_fill(q, plug, opf, flags); if (!rq) return NULL; } else { - rq = rq_list_peek(&plug->cached_rq); + rq = rq_list_peek(&plug->cached_rqs); if (!rq || rq->q != q) return NULL; @@ -653,7 +653,7 @@ static struct request *blk_mq_alloc_cached_request(struct request_queue *q, if (op_is_flush(rq->cmd_flags) != op_is_flush(opf)) return NULL; - plug->cached_rq = rq_list_next(rq); + rq_list_pop(&plug->cached_rqs); blk_mq_rq_time_init(rq, 0); } @@ -830,7 +830,7 @@ void blk_mq_free_plug_rqs(struct blk_plug *plug) { struct request *rq; - while ((rq = rq_list_pop(&plug->cached_rq)) != NULL) + while ((rq = rq_list_pop(&plug->cached_rqs)) != NULL) blk_mq_free_request(rq); } @@ -1386,8 +1386,7 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) */ if (!plug->has_elevator && (rq->rq_flags & RQF_SCHED_TAGS)) plug->has_elevator = true; - rq->rq_next = NULL; - rq_list_add(&plug->mq_list, rq); + rq_list_add_tail(&plug->mq_list, rq); plug->rq_count++; } @@ -2781,7 +2780,7 @@ static void blk_mq_plug_issue_direct(struct blk_plug *plug) blk_status_t ret = BLK_STS_OK; while ((rq = rq_list_pop(&plug->mq_list))) { - bool last = rq_list_empty(plug->mq_list); + bool last = rq_list_empty(&plug->mq_list); if (hctx != rq->mq_hctx) { if (hctx) { @@ -2824,8 +2823,7 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched) { struct blk_mq_hw_ctx *this_hctx = NULL; struct blk_mq_ctx *this_ctx = NULL; - struct request *requeue_list = NULL; - struct request **requeue_lastp = &requeue_list; + struct rq_list requeue_list = {}; unsigned int depth = 0; bool is_passthrough = false; LIST_HEAD(list); @@ -2839,12 +2837,12 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched) is_passthrough = blk_rq_is_passthrough(rq); } else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx || is_passthrough != blk_rq_is_passthrough(rq)) { - rq_list_add_tail(&requeue_lastp, rq); + rq_list_add_tail(&requeue_list, rq); continue; } - list_add(&rq->queuelist, &list); + list_add_tail(&rq->queuelist, &list); depth++; - } while (!rq_list_empty(plug->mq_list)); + } while (!rq_list_empty(&plug->mq_list)); plug->mq_list = requeue_list; trace_block_unplug(this_hctx->queue, depth, !from_sched); @@ -2899,19 +2897,19 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) if (q->mq_ops->queue_rqs) { blk_mq_run_dispatch_ops(q, __blk_mq_flush_plug_list(q, plug)); - if (rq_list_empty(plug->mq_list)) + if (rq_list_empty(&plug->mq_list)) return; } blk_mq_run_dispatch_ops(q, blk_mq_plug_issue_direct(plug)); - if (rq_list_empty(plug->mq_list)) + if (rq_list_empty(&plug->mq_list)) return; } do { blk_mq_dispatch_plug_list(plug, from_schedule); - } while (!rq_list_empty(plug->mq_list)); + } while (!rq_list_empty(&plug->mq_list)); } static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, @@ -2976,7 +2974,7 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q, if (plug) { data.nr_tags = plug->nr_ios; plug->nr_ios = 1; - data.cached_rq = &plug->cached_rq; + data.cached_rqs = &plug->cached_rqs; } rq = __blk_mq_alloc_requests(&data); @@ -2999,7 +2997,7 @@ static struct request *blk_mq_peek_cached_request(struct blk_plug *plug, if (!plug) return NULL; - rq = rq_list_peek(&plug->cached_rq); + rq = rq_list_peek(&plug->cached_rqs); if (!rq || rq->q != q) return NULL; if (type != rq->mq_hctx->type && @@ -3013,14 +3011,14 @@ static struct request *blk_mq_peek_cached_request(struct blk_plug *plug, static void blk_mq_use_cached_rq(struct request *rq, struct blk_plug *plug, struct bio *bio) { - WARN_ON_ONCE(rq_list_peek(&plug->cached_rq) != rq); + if (rq_list_pop(&plug->cached_rqs) != rq) + WARN_ON_ONCE(1); /* * If any qos ->throttle() end up blocking, we will have flushed the * plug and hence killed the cached_rq list as well. Pop this entry * before we throttle. */ - plug->cached_rq = rq_list_next(rq); rq_qos_throttle(rq->q, bio); blk_mq_rq_time_init(rq, 0); diff --git a/block/blk-mq.h b/block/blk-mq.h index 364c0415293cf..a80d3b3105f9e 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -155,7 +155,7 @@ struct blk_mq_alloc_data { /* allocate multiple requests/tags in one go */ unsigned int nr_tags; - struct request **cached_rq; + struct rq_list *cached_rqs; /* input & output parameter */ struct blk_mq_ctx *ctx; diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 692b27266220f..0e2520d929e1d 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -813,6 +813,8 @@ int blk_register_queue(struct gendisk *disk) out_debugfs_remove: blk_debugfs_remove(disk); mutex_unlock(&q->sysfs_lock); + if (queue_is_mq(q)) + blk_mq_sysfs_unregister(disk); out_put_queue_kobj: kobject_put(&disk->queue_kobj); mutex_unlock(&q->sysfs_dir_lock); diff --git a/block/blk-zoned.c b/block/blk-zoned.c index 767bcbce74fac..c11db5be25324 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -427,13 +427,14 @@ static bool disk_insert_zone_wplug(struct gendisk *disk, } } hlist_add_head_rcu(&zwplug->node, &disk->zone_wplugs_hash[idx]); + atomic_inc(&disk->nr_zone_wplugs); spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags); return true; } -static struct blk_zone_wplug *disk_get_zone_wplug(struct gendisk *disk, - sector_t sector) +static struct blk_zone_wplug *disk_get_hashed_zone_wplug(struct gendisk *disk, + sector_t sector) { unsigned int zno = disk_zone_no(disk, sector); unsigned int idx = hash_32(zno, disk->zone_wplugs_hash_bits); @@ -454,6 +455,15 @@ static struct blk_zone_wplug *disk_get_zone_wplug(struct gendisk *disk, return NULL; } +static inline struct blk_zone_wplug *disk_get_zone_wplug(struct gendisk *disk, + sector_t sector) +{ + if (!atomic_read(&disk->nr_zone_wplugs)) + return NULL; + + return disk_get_hashed_zone_wplug(disk, sector); +} + static void disk_free_zone_wplug_rcu(struct rcu_head *rcu_head) { struct blk_zone_wplug *zwplug = @@ -518,6 +528,7 @@ static void disk_remove_zone_wplug(struct gendisk *disk, zwplug->flags |= BLK_ZONE_WPLUG_UNHASHED; spin_lock_irqsave(&disk->zone_wplugs_lock, flags); hlist_del_init_rcu(&zwplug->node); + atomic_dec(&disk->nr_zone_wplugs); spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags); disk_put_zone_wplug(zwplug); } @@ -607,6 +618,11 @@ static void disk_zone_wplug_abort(struct blk_zone_wplug *zwplug) { struct bio *bio; + if (bio_list_empty(&zwplug->bio_list)) + return; + + pr_warn_ratelimited("%s: zone %u: Aborting plugged BIOs\n", + zwplug->disk->disk_name, zwplug->zone_no); while ((bio = bio_list_pop(&zwplug->bio_list))) blk_zone_wplug_bio_io_error(zwplug, bio); } @@ -1055,6 +1071,47 @@ static bool blk_zone_wplug_handle_write(struct bio *bio, unsigned int nr_segs) return true; } +static void blk_zone_wplug_handle_native_zone_append(struct bio *bio) +{ + struct gendisk *disk = bio->bi_bdev->bd_disk; + struct blk_zone_wplug *zwplug; + unsigned long flags; + + /* + * We have native support for zone append operations, so we are not + * going to handle @bio through plugging. However, we may already have a + * zone write plug for the target zone if that zone was previously + * partially written using regular writes. In such case, we risk leaving + * the plug in the disk hash table if the zone is fully written using + * zone append operations. Avoid this by removing the zone write plug. + */ + zwplug = disk_get_zone_wplug(disk, bio->bi_iter.bi_sector); + if (likely(!zwplug)) + return; + + spin_lock_irqsave(&zwplug->lock, flags); + + /* + * We are about to remove the zone write plug. But if the user + * (mistakenly) has issued regular writes together with native zone + * append, we must aborts the writes as otherwise the plugged BIOs would + * not be executed by the plug BIO work as disk_get_zone_wplug() will + * return NULL after the plug is removed. Aborting the plugged write + * BIOs is consistent with the fact that these writes will most likely + * fail anyway as there is no ordering guarantees between zone append + * operations and regular write operations. + */ + if (!bio_list_empty(&zwplug->bio_list)) { + pr_warn_ratelimited("%s: zone %u: Invalid mix of zone append and regular writes\n", + disk->disk_name, zwplug->zone_no); + disk_zone_wplug_abort(zwplug); + } + disk_remove_zone_wplug(disk, zwplug); + spin_unlock_irqrestore(&zwplug->lock, flags); + + disk_put_zone_wplug(zwplug); +} + /** * blk_zone_plug_bio - Handle a zone write BIO with zone write plugging * @bio: The BIO being submitted @@ -1111,8 +1168,10 @@ bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs) */ switch (bio_op(bio)) { case REQ_OP_ZONE_APPEND: - if (!bdev_emulates_zone_append(bdev)) + if (!bdev_emulates_zone_append(bdev)) { + blk_zone_wplug_handle_native_zone_append(bio); return false; + } fallthrough; case REQ_OP_WRITE: case REQ_OP_WRITE_ZEROES: @@ -1299,6 +1358,7 @@ static int disk_alloc_zone_resources(struct gendisk *disk, { unsigned int i; + atomic_set(&disk->nr_zone_wplugs, 0); disk->zone_wplugs_hash_bits = min(ilog2(pool_size) + 1, BLK_ZONE_WPLUG_MAX_HASH_BITS); @@ -1353,6 +1413,7 @@ static void disk_destroy_zone_wplugs_hash_table(struct gendisk *disk) } } + WARN_ON_ONCE(atomic_read(&disk->nr_zone_wplugs)); kfree(disk->zone_wplugs_hash); disk->zone_wplugs_hash = NULL; disk->zone_wplugs_hash_bits = 0; @@ -1570,11 +1631,12 @@ static int blk_revalidate_seq_zone(struct blk_zone *zone, unsigned int idx, } /* - * We need to track the write pointer of all zones that are not - * empty nor full. So make sure we have a zone write plug for - * such zone if the device has a zone write plug hash table. + * If the device needs zone append emulation, we need to track the + * write pointer of all zones that are not empty nor full. So make sure + * we have a zone write plug for such zone if the device has a zone + * write plug hash table. */ - if (!disk->zone_wplugs_hash) + if (!queue_emulates_zone_append(disk->queue) || !disk->zone_wplugs_hash) return 0; disk_zone_wplug_sync_wp_offset(disk, zone); diff --git a/block/partitions/efi.c b/block/partitions/efi.c index 5e9be13a56a82..7acba66eed481 100644 --- a/block/partitions/efi.c +++ b/block/partitions/efi.c @@ -682,7 +682,7 @@ static void utf16_le_to_7bit(const __le16 *in, unsigned int size, u8 *out) out[size] = 0; while (i < size) { - u8 c = le16_to_cpu(in[i]) & 0xff; + u8 c = le16_to_cpu(in[i]) & 0x7f; if (c && !isprint(c)) c = '!'; diff --git a/crypto/api.c b/crypto/api.c index bfd177a4313a0..c2c4eb14ef955 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -36,7 +36,8 @@ EXPORT_SYMBOL_GPL(crypto_chain); DEFINE_STATIC_KEY_FALSE(__crypto_boot_test_finished); #endif -static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg); +static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg, + u32 type, u32 mask); static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, u32 mask); @@ -145,7 +146,7 @@ static struct crypto_alg *crypto_larval_add(const char *name, u32 type, if (alg != &larval->alg) { kfree(larval); if (crypto_is_larval(alg)) - alg = crypto_larval_wait(alg); + alg = crypto_larval_wait(alg, type, mask); } return alg; @@ -197,7 +198,8 @@ static void crypto_start_test(struct crypto_larval *larval) crypto_schedule_test(larval); } -static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg) +static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg, + u32 type, u32 mask) { struct crypto_larval *larval; long time_left; @@ -219,12 +221,7 @@ static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg) crypto_larval_kill(larval); alg = ERR_PTR(-ETIMEDOUT); } else if (!alg) { - u32 type; - u32 mask; - alg = &larval->alg; - type = alg->cra_flags & ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD); - mask = larval->mask; alg = crypto_alg_lookup(alg->cra_name, type, mask) ?: ERR_PTR(-EAGAIN); } else if (IS_ERR(alg)) @@ -304,7 +301,7 @@ static struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, } if (!IS_ERR_OR_NULL(alg) && crypto_is_larval(alg)) - alg = crypto_larval_wait(alg); + alg = crypto_larval_wait(alg, type, mask); else if (alg) ; else if (!(mask & CRYPTO_ALG_TESTED)) @@ -352,7 +349,7 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask) ok = crypto_probing_notify(CRYPTO_MSG_ALG_REQUEST, larval); if (ok == NOTIFY_STOP) - alg = crypto_larval_wait(larval); + alg = crypto_larval_wait(larval, type, mask); else { crypto_mod_put(larval); alg = ERR_PTR(-ENOENT); diff --git a/crypto/bpf_crypto_skcipher.c b/crypto/bpf_crypto_skcipher.c index b5e657415770a..a88798d3e8c87 100644 --- a/crypto/bpf_crypto_skcipher.c +++ b/crypto/bpf_crypto_skcipher.c @@ -80,3 +80,4 @@ static void __exit bpf_crypto_skcipher_exit(void) module_init(bpf_crypto_skcipher_init); module_exit(bpf_crypto_skcipher_exit); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Symmetric key cipher support for BPF"); diff --git a/drivers/accel/ivpu/Kconfig b/drivers/accel/ivpu/Kconfig index 682c532452863..e4d418b44626e 100644 --- a/drivers/accel/ivpu/Kconfig +++ b/drivers/accel/ivpu/Kconfig @@ -8,6 +8,7 @@ config DRM_ACCEL_IVPU select FW_LOADER select DRM_GEM_SHMEM_HELPER select GENERIC_ALLOCATOR + select WANT_DEV_COREDUMP help Choose this option if you have a system with an 14th generation Intel CPU (Meteor Lake) or newer. Intel NPU (formerly called Intel VPU) diff --git a/drivers/accel/ivpu/Makefile b/drivers/accel/ivpu/Makefile index ebd682a42eb12..232ea6d28c6e2 100644 --- a/drivers/accel/ivpu/Makefile +++ b/drivers/accel/ivpu/Makefile @@ -19,5 +19,6 @@ intel_vpu-y := \ ivpu_sysfs.o intel_vpu-$(CONFIG_DEBUG_FS) += ivpu_debugfs.o +intel_vpu-$(CONFIG_DEV_COREDUMP) += ivpu_coredump.o obj-$(CONFIG_DRM_ACCEL_IVPU) += intel_vpu.o diff --git a/drivers/accel/ivpu/ivpu_coredump.c b/drivers/accel/ivpu/ivpu_coredump.c new file mode 100644 index 0000000000000..16ad0c30818cc --- /dev/null +++ b/drivers/accel/ivpu/ivpu_coredump.c @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020-2024 Intel Corporation + */ + +#include +#include + +#include "ivpu_coredump.h" +#include "ivpu_fw.h" +#include "ivpu_gem.h" +#include "vpu_boot_api.h" + +#define CRASH_DUMP_HEADER "Intel NPU crash dump" +#define CRASH_DUMP_HEADERS_SIZE SZ_4K + +void ivpu_dev_coredump(struct ivpu_device *vdev) +{ + struct drm_print_iterator pi = {}; + struct drm_printer p; + size_t coredump_size; + char *coredump; + + coredump_size = CRASH_DUMP_HEADERS_SIZE + FW_VERSION_HEADER_SIZE + + ivpu_bo_size(vdev->fw->mem_log_crit) + ivpu_bo_size(vdev->fw->mem_log_verb); + coredump = vmalloc(coredump_size); + if (!coredump) + return; + + pi.data = coredump; + pi.remain = coredump_size; + p = drm_coredump_printer(&pi); + + drm_printf(&p, "%s\n", CRASH_DUMP_HEADER); + drm_printf(&p, "FW version: %s\n", vdev->fw->version); + ivpu_fw_log_print(vdev, false, &p); + + dev_coredumpv(vdev->drm.dev, coredump, pi.offset, GFP_KERNEL); +} diff --git a/drivers/accel/ivpu/ivpu_coredump.h b/drivers/accel/ivpu/ivpu_coredump.h new file mode 100644 index 0000000000000..8efb09d024411 --- /dev/null +++ b/drivers/accel/ivpu/ivpu_coredump.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020-2024 Intel Corporation + */ + +#ifndef __IVPU_COREDUMP_H__ +#define __IVPU_COREDUMP_H__ + +#include + +#include "ivpu_drv.h" +#include "ivpu_fw_log.h" + +#ifdef CONFIG_DEV_COREDUMP +void ivpu_dev_coredump(struct ivpu_device *vdev); +#else +static inline void ivpu_dev_coredump(struct ivpu_device *vdev) +{ + struct drm_printer p = drm_info_printer(vdev->drm.dev); + + ivpu_fw_log_print(vdev, false, &p); +} +#endif + +#endif /* __IVPU_COREDUMP_H__ */ diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c index 8d50981594d15..eccedb0c8886b 100644 --- a/drivers/accel/ivpu/ivpu_debugfs.c +++ b/drivers/accel/ivpu/ivpu_debugfs.c @@ -331,7 +331,7 @@ ivpu_force_recovery_fn(struct file *file, const char __user *user_buf, size_t si return -EINVAL; ret = ivpu_rpm_get(vdev); - if (ret) + if (ret < 0) return ret; ivpu_pm_trigger_recovery(vdev, "debugfs"); @@ -408,7 +408,7 @@ static int dct_active_set(void *data, u64 active_percent) return -EINVAL; ret = ivpu_rpm_get(vdev); - if (ret) + if (ret < 0) return ret; if (active_percent) diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c index c91400ecf9265..38b4158f52784 100644 --- a/drivers/accel/ivpu/ivpu_drv.c +++ b/drivers/accel/ivpu/ivpu_drv.c @@ -14,7 +14,7 @@ #include #include -#include "vpu_boot_api.h" +#include "ivpu_coredump.h" #include "ivpu_debugfs.h" #include "ivpu_drv.h" #include "ivpu_fw.h" @@ -29,6 +29,7 @@ #include "ivpu_ms.h" #include "ivpu_pm.h" #include "ivpu_sysfs.h" +#include "vpu_boot_api.h" #ifndef DRIVER_VERSION_STR #define DRIVER_VERSION_STR __stringify(DRM_IVPU_DRIVER_MAJOR) "." \ @@ -382,7 +383,7 @@ int ivpu_boot(struct ivpu_device *vdev) ivpu_err(vdev, "Failed to boot the firmware: %d\n", ret); ivpu_hw_diagnose_failure(vdev); ivpu_mmu_evtq_dump(vdev); - ivpu_fw_log_dump(vdev); + ivpu_dev_coredump(vdev); return ret; } diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h index 63f13b697eed7..2b30cc2e9272e 100644 --- a/drivers/accel/ivpu/ivpu_drv.h +++ b/drivers/accel/ivpu/ivpu_drv.h @@ -152,6 +152,7 @@ struct ivpu_device { int tdr; int autosuspend; int d0i3_entry_msg; + int state_dump_msg; } timeout; }; diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c index ede6165e09d90..b2b6d89f06537 100644 --- a/drivers/accel/ivpu/ivpu_fw.c +++ b/drivers/accel/ivpu/ivpu_fw.c @@ -25,7 +25,6 @@ #define FW_SHAVE_NN_MAX_SIZE SZ_2M #define FW_RUNTIME_MIN_ADDR (FW_GLOBAL_MEM_START) #define FW_RUNTIME_MAX_ADDR (FW_GLOBAL_MEM_END - FW_SHARED_MEM_SIZE) -#define FW_VERSION_HEADER_SIZE SZ_4K #define FW_FILE_IMAGE_OFFSET (VPU_FW_HEADER_SIZE + FW_VERSION_HEADER_SIZE) #define WATCHDOG_MSS_REDIRECT 32 @@ -191,8 +190,10 @@ static int ivpu_fw_parse(struct ivpu_device *vdev) ivpu_dbg(vdev, FW_BOOT, "Header version: 0x%x, format 0x%x\n", fw_hdr->header_version, fw_hdr->image_format); - ivpu_info(vdev, "Firmware: %s, version: %s", fw->name, - (const char *)fw_hdr + VPU_FW_HEADER_SIZE); + if (!scnprintf(fw->version, sizeof(fw->version), "%s", fw->file->data + VPU_FW_HEADER_SIZE)) + ivpu_warn(vdev, "Missing firmware version\n"); + + ivpu_info(vdev, "Firmware: %s, version: %s\n", fw->name, fw->version); if (IVPU_FW_CHECK_API_COMPAT(vdev, fw_hdr, BOOT, 3)) return -EINVAL; diff --git a/drivers/accel/ivpu/ivpu_fw.h b/drivers/accel/ivpu/ivpu_fw.h index 40d9d17be3f52..5e8eb608b70f1 100644 --- a/drivers/accel/ivpu/ivpu_fw.h +++ b/drivers/accel/ivpu/ivpu_fw.h @@ -1,11 +1,14 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (C) 2020-2023 Intel Corporation + * Copyright (C) 2020-2024 Intel Corporation */ #ifndef __IVPU_FW_H__ #define __IVPU_FW_H__ +#define FW_VERSION_HEADER_SIZE SZ_4K +#define FW_VERSION_STR_SIZE SZ_256 + struct ivpu_device; struct ivpu_bo; struct vpu_boot_params; @@ -13,6 +16,7 @@ struct vpu_boot_params; struct ivpu_fw_info { const struct firmware *file; const char *name; + char version[FW_VERSION_STR_SIZE]; struct ivpu_bo *mem; struct ivpu_bo *mem_shave_nn; struct ivpu_bo *mem_log_crit; diff --git a/drivers/accel/ivpu/ivpu_fw_log.h b/drivers/accel/ivpu/ivpu_fw_log.h index 0b2573f6f3151..4b390a99699d6 100644 --- a/drivers/accel/ivpu/ivpu_fw_log.h +++ b/drivers/accel/ivpu/ivpu_fw_log.h @@ -8,8 +8,6 @@ #include -#include - #include "ivpu_drv.h" #define IVPU_FW_LOG_DEFAULT 0 @@ -28,11 +26,5 @@ extern unsigned int ivpu_log_level; void ivpu_fw_log_print(struct ivpu_device *vdev, bool only_new_msgs, struct drm_printer *p); void ivpu_fw_log_clear(struct ivpu_device *vdev); -static inline void ivpu_fw_log_dump(struct ivpu_device *vdev) -{ - struct drm_printer p = drm_info_printer(vdev->drm.dev); - - ivpu_fw_log_print(vdev, false, &p); -} #endif /* __IVPU_FW_LOG_H__ */ diff --git a/drivers/accel/ivpu/ivpu_hw.c b/drivers/accel/ivpu/ivpu_hw.c index e69c0613513f1..08b3cef58fd2d 100644 --- a/drivers/accel/ivpu/ivpu_hw.c +++ b/drivers/accel/ivpu/ivpu_hw.c @@ -89,12 +89,14 @@ static void timeouts_init(struct ivpu_device *vdev) vdev->timeout.tdr = 2000000; vdev->timeout.autosuspend = -1; vdev->timeout.d0i3_entry_msg = 500; + vdev->timeout.state_dump_msg = 10; } else if (ivpu_is_simics(vdev)) { vdev->timeout.boot = 50; vdev->timeout.jsm = 500; vdev->timeout.tdr = 10000; vdev->timeout.autosuspend = -1; vdev->timeout.d0i3_entry_msg = 100; + vdev->timeout.state_dump_msg = 10; } else { vdev->timeout.boot = 1000; vdev->timeout.jsm = 500; @@ -104,6 +106,7 @@ static void timeouts_init(struct ivpu_device *vdev) else vdev->timeout.autosuspend = 100; vdev->timeout.d0i3_entry_msg = 5; + vdev->timeout.state_dump_msg = 10; } } diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c index 29b723039a345..f0402dc847582 100644 --- a/drivers/accel/ivpu/ivpu_ipc.c +++ b/drivers/accel/ivpu/ivpu_ipc.c @@ -299,7 +299,8 @@ ivpu_ipc_send_receive_internal(struct ivpu_device *vdev, struct vpu_jsm_msg *req struct ivpu_ipc_consumer cons; int ret; - drm_WARN_ON(&vdev->drm, pm_runtime_status_suspended(vdev->drm.dev)); + drm_WARN_ON(&vdev->drm, pm_runtime_status_suspended(vdev->drm.dev) && + pm_runtime_enabled(vdev->drm.dev)); ivpu_ipc_consumer_add(vdev, &cons, channel, NULL); @@ -353,6 +354,32 @@ int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req, return ret; } +int ivpu_ipc_send_and_wait(struct ivpu_device *vdev, struct vpu_jsm_msg *req, + u32 channel, unsigned long timeout_ms) +{ + struct ivpu_ipc_consumer cons; + int ret; + + ret = ivpu_rpm_get(vdev); + if (ret < 0) + return ret; + + ivpu_ipc_consumer_add(vdev, &cons, channel, NULL); + + ret = ivpu_ipc_send(vdev, &cons, req); + if (ret) { + ivpu_warn_ratelimited(vdev, "IPC send failed: %d\n", ret); + goto consumer_del; + } + + msleep(timeout_ms); + +consumer_del: + ivpu_ipc_consumer_del(vdev, &cons); + ivpu_rpm_put(vdev); + return ret; +} + static bool ivpu_ipc_match_consumer(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, struct ivpu_ipc_hdr *ipc_hdr, struct vpu_jsm_msg *jsm_msg) diff --git a/drivers/accel/ivpu/ivpu_ipc.h b/drivers/accel/ivpu/ivpu_ipc.h index fb4de7fb8210e..b4dfb504679ba 100644 --- a/drivers/accel/ivpu/ivpu_ipc.h +++ b/drivers/accel/ivpu/ivpu_ipc.h @@ -107,5 +107,7 @@ int ivpu_ipc_send_receive_internal(struct ivpu_device *vdev, struct vpu_jsm_msg int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req, enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp, u32 channel, unsigned long timeout_ms); +int ivpu_ipc_send_and_wait(struct ivpu_device *vdev, struct vpu_jsm_msg *req, + u32 channel, unsigned long timeout_ms); #endif /* __IVPU_IPC_H__ */ diff --git a/drivers/accel/ivpu/ivpu_jsm_msg.c b/drivers/accel/ivpu/ivpu_jsm_msg.c index 88105963c1b28..f7618b605f021 100644 --- a/drivers/accel/ivpu/ivpu_jsm_msg.c +++ b/drivers/accel/ivpu/ivpu_jsm_msg.c @@ -555,3 +555,11 @@ int ivpu_jsm_dct_disable(struct ivpu_device *vdev) return ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_DCT_DISABLE_DONE, &resp, VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); } + +int ivpu_jsm_state_dump(struct ivpu_device *vdev) +{ + struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_STATE_DUMP }; + + return ivpu_ipc_send_and_wait(vdev, &req, VPU_IPC_CHAN_ASYNC_CMD, + vdev->timeout.state_dump_msg); +} diff --git a/drivers/accel/ivpu/ivpu_jsm_msg.h b/drivers/accel/ivpu/ivpu_jsm_msg.h index e4e42c0ff6e65..9e84d3526a146 100644 --- a/drivers/accel/ivpu/ivpu_jsm_msg.h +++ b/drivers/accel/ivpu/ivpu_jsm_msg.h @@ -43,4 +43,6 @@ int ivpu_jsm_metric_streamer_info(struct ivpu_device *vdev, u64 metric_group_mas u64 buffer_size, u32 *sample_size, u64 *info_size); int ivpu_jsm_dct_enable(struct ivpu_device *vdev, u32 active_us, u32 inactive_us); int ivpu_jsm_dct_disable(struct ivpu_device *vdev); +int ivpu_jsm_state_dump(struct ivpu_device *vdev); + #endif diff --git a/drivers/accel/ivpu/ivpu_ms.c b/drivers/accel/ivpu/ivpu_ms.c index 2f9d37f5c208a..a961002fe25b2 100644 --- a/drivers/accel/ivpu/ivpu_ms.c +++ b/drivers/accel/ivpu/ivpu_ms.c @@ -4,6 +4,7 @@ */ #include +#include #include "ivpu_drv.h" #include "ivpu_gem.h" @@ -44,6 +45,10 @@ int ivpu_ms_start_ioctl(struct drm_device *dev, void *data, struct drm_file *fil args->sampling_period_ns < MS_MIN_SAMPLE_PERIOD_NS) return -EINVAL; + ret = ivpu_rpm_get(vdev); + if (ret < 0) + return ret; + mutex_lock(&file_priv->ms_lock); if (get_instance_by_mask(file_priv, args->metric_group_mask)) { @@ -96,6 +101,8 @@ int ivpu_ms_start_ioctl(struct drm_device *dev, void *data, struct drm_file *fil kfree(ms); unlock: mutex_unlock(&file_priv->ms_lock); + + ivpu_rpm_put(vdev); return ret; } @@ -160,6 +167,10 @@ int ivpu_ms_get_data_ioctl(struct drm_device *dev, void *data, struct drm_file * if (!args->metric_group_mask) return -EINVAL; + ret = ivpu_rpm_get(vdev); + if (ret < 0) + return ret; + mutex_lock(&file_priv->ms_lock); ms = get_instance_by_mask(file_priv, args->metric_group_mask); @@ -187,6 +198,7 @@ int ivpu_ms_get_data_ioctl(struct drm_device *dev, void *data, struct drm_file * unlock: mutex_unlock(&file_priv->ms_lock); + ivpu_rpm_put(vdev); return ret; } @@ -204,11 +216,17 @@ int ivpu_ms_stop_ioctl(struct drm_device *dev, void *data, struct drm_file *file { struct ivpu_file_priv *file_priv = file->driver_priv; struct drm_ivpu_metric_streamer_stop *args = data; + struct ivpu_device *vdev = file_priv->vdev; struct ivpu_ms_instance *ms; + int ret; if (!args->metric_group_mask) return -EINVAL; + ret = ivpu_rpm_get(vdev); + if (ret < 0) + return ret; + mutex_lock(&file_priv->ms_lock); ms = get_instance_by_mask(file_priv, args->metric_group_mask); @@ -217,6 +235,7 @@ int ivpu_ms_stop_ioctl(struct drm_device *dev, void *data, struct drm_file *file mutex_unlock(&file_priv->ms_lock); + ivpu_rpm_put(vdev); return ms ? 0 : -EINVAL; } @@ -281,6 +300,9 @@ int ivpu_ms_get_info_ioctl(struct drm_device *dev, void *data, struct drm_file * void ivpu_ms_cleanup(struct ivpu_file_priv *file_priv) { struct ivpu_ms_instance *ms, *tmp; + struct ivpu_device *vdev = file_priv->vdev; + + pm_runtime_get_sync(vdev->drm.dev); mutex_lock(&file_priv->ms_lock); @@ -293,6 +315,8 @@ void ivpu_ms_cleanup(struct ivpu_file_priv *file_priv) free_instance(file_priv, ms); mutex_unlock(&file_priv->ms_lock); + + pm_runtime_put_autosuspend(vdev->drm.dev); } void ivpu_ms_cleanup_all(struct ivpu_device *vdev) diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c index ef9a4ba18cb8a..fbb61a2c3b19c 100644 --- a/drivers/accel/ivpu/ivpu_pm.c +++ b/drivers/accel/ivpu/ivpu_pm.c @@ -9,17 +9,18 @@ #include #include -#include "vpu_boot_api.h" +#include "ivpu_coredump.h" #include "ivpu_drv.h" -#include "ivpu_hw.h" #include "ivpu_fw.h" #include "ivpu_fw_log.h" +#include "ivpu_hw.h" #include "ivpu_ipc.h" #include "ivpu_job.h" #include "ivpu_jsm_msg.h" #include "ivpu_mmu.h" #include "ivpu_ms.h" #include "ivpu_pm.h" +#include "vpu_boot_api.h" static bool ivpu_disable_recovery; module_param_named_unsafe(disable_recovery, ivpu_disable_recovery, bool, 0644); @@ -110,40 +111,57 @@ static int ivpu_resume(struct ivpu_device *vdev) return ret; } -static void ivpu_pm_recovery_work(struct work_struct *work) +static void ivpu_pm_reset_begin(struct ivpu_device *vdev) { - struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, recovery_work); - struct ivpu_device *vdev = pm->vdev; - char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL}; - int ret; - - ivpu_err(vdev, "Recovering the NPU (reset #%d)\n", atomic_read(&vdev->pm->reset_counter)); - - ret = pm_runtime_resume_and_get(vdev->drm.dev); - if (ret) - ivpu_err(vdev, "Failed to resume NPU: %d\n", ret); - - ivpu_fw_log_dump(vdev); + pm_runtime_disable(vdev->drm.dev); atomic_inc(&vdev->pm->reset_counter); atomic_set(&vdev->pm->reset_pending, 1); down_write(&vdev->pm->reset_lock); +} + +static void ivpu_pm_reset_complete(struct ivpu_device *vdev) +{ + int ret; - ivpu_suspend(vdev); ivpu_pm_prepare_cold_boot(vdev); ivpu_jobs_abort_all(vdev); ivpu_ms_cleanup_all(vdev); ret = ivpu_resume(vdev); - if (ret) + if (ret) { ivpu_err(vdev, "Failed to resume NPU: %d\n", ret); + pm_runtime_set_suspended(vdev->drm.dev); + } else { + pm_runtime_set_active(vdev->drm.dev); + } up_write(&vdev->pm->reset_lock); atomic_set(&vdev->pm->reset_pending, 0); - kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt); pm_runtime_mark_last_busy(vdev->drm.dev); - pm_runtime_put_autosuspend(vdev->drm.dev); + pm_runtime_enable(vdev->drm.dev); +} + +static void ivpu_pm_recovery_work(struct work_struct *work) +{ + struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, recovery_work); + struct ivpu_device *vdev = pm->vdev; + char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL}; + + ivpu_err(vdev, "Recovering the NPU (reset #%d)\n", atomic_read(&vdev->pm->reset_counter)); + + ivpu_pm_reset_begin(vdev); + + if (!pm_runtime_status_suspended(vdev->drm.dev)) { + ivpu_jsm_state_dump(vdev); + ivpu_dev_coredump(vdev); + ivpu_suspend(vdev); + } + + ivpu_pm_reset_complete(vdev); + + kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt); } void ivpu_pm_trigger_recovery(struct ivpu_device *vdev, const char *reason) @@ -262,7 +280,7 @@ int ivpu_pm_runtime_suspend_cb(struct device *dev) if (!is_idle || ret_d0i3) { ivpu_err(vdev, "Forcing cold boot due to previous errors\n"); atomic_inc(&vdev->pm->reset_counter); - ivpu_fw_log_dump(vdev); + ivpu_dev_coredump(vdev); ivpu_pm_prepare_cold_boot(vdev); } else { ivpu_pm_prepare_warm_boot(vdev); @@ -314,16 +332,13 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev) struct ivpu_device *vdev = pci_get_drvdata(pdev); ivpu_dbg(vdev, PM, "Pre-reset..\n"); - atomic_inc(&vdev->pm->reset_counter); - atomic_set(&vdev->pm->reset_pending, 1); - pm_runtime_get_sync(vdev->drm.dev); - down_write(&vdev->pm->reset_lock); - ivpu_prepare_for_reset(vdev); - ivpu_hw_reset(vdev); - ivpu_pm_prepare_cold_boot(vdev); - ivpu_jobs_abort_all(vdev); - ivpu_ms_cleanup_all(vdev); + ivpu_pm_reset_begin(vdev); + + if (!pm_runtime_status_suspended(vdev->drm.dev)) { + ivpu_prepare_for_reset(vdev); + ivpu_hw_reset(vdev); + } ivpu_dbg(vdev, PM, "Pre-reset done.\n"); } @@ -331,18 +346,12 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev) void ivpu_pm_reset_done_cb(struct pci_dev *pdev) { struct ivpu_device *vdev = pci_get_drvdata(pdev); - int ret; ivpu_dbg(vdev, PM, "Post-reset..\n"); - ret = ivpu_resume(vdev); - if (ret) - ivpu_err(vdev, "Failed to set RESUME state: %d\n", ret); - up_write(&vdev->pm->reset_lock); - atomic_set(&vdev->pm->reset_pending, 0); - ivpu_dbg(vdev, PM, "Post-reset done.\n"); - pm_runtime_mark_last_busy(vdev->drm.dev); - pm_runtime_put_autosuspend(vdev->drm.dev); + ivpu_pm_reset_complete(vdev); + + ivpu_dbg(vdev, PM, "Post-reset done.\n"); } void ivpu_pm_init(struct ivpu_device *vdev) diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c index c20eb63750f51..43aba57b48f05 100644 --- a/drivers/accel/qaic/qaic_data.c +++ b/drivers/accel/qaic/qaic_data.c @@ -172,9 +172,10 @@ static void free_slice(struct kref *kref) static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_table **sgt_out, struct sg_table *sgt_in, u64 size, u64 offset) { - int total_len, len, nents, offf = 0, offl = 0; struct scatterlist *sg, *sgn, *sgf, *sgl; + unsigned int len, nents, offf, offl; struct sg_table *sgt; + size_t total_len; int ret, j; /* find out number of relevant nents needed for this mem */ @@ -182,6 +183,8 @@ static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_tabl sgf = NULL; sgl = NULL; nents = 0; + offf = 0; + offl = 0; size = size ? size : PAGE_SIZE; for_each_sgtable_dma_sg(sgt_in, sg, j) { @@ -554,6 +557,7 @@ static bool invalid_sem(struct qaic_sem *sem) static int qaic_validate_req(struct qaic_device *qdev, struct qaic_attach_slice_entry *slice_ent, u32 count, u64 total_size) { + u64 total; int i; for (i = 0; i < count; i++) { @@ -563,7 +567,8 @@ static int qaic_validate_req(struct qaic_device *qdev, struct qaic_attach_slice_ invalid_sem(&slice_ent[i].sem2) || invalid_sem(&slice_ent[i].sem3)) return -EINVAL; - if (slice_ent[i].offset + slice_ent[i].size > total_size) + if (check_add_overflow(slice_ent[i].offset, slice_ent[i].size, &total) || + total > total_size) return -EINVAL; } diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index a5d47819b3a4e..ae035b93da087 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -485,7 +485,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, cmd_mask = nd_desc->cmd_mask; if (cmd == ND_CMD_CALL && call_pkg->nd_family) { family = call_pkg->nd_family; - if (family > NVDIMM_BUS_FAMILY_MAX || + if (call_pkg->nd_family > NVDIMM_BUS_FAMILY_MAX || !test_bit(family, &nd_desc->bus_family_mask)) return -EINVAL; family = array_index_nospec(family, diff --git a/drivers/acpi/platform_profile.c b/drivers/acpi/platform_profile.c index d2f7fd7743a13..11278f785526d 100644 --- a/drivers/acpi/platform_profile.c +++ b/drivers/acpi/platform_profile.c @@ -22,8 +22,8 @@ static const char * const profile_names[] = { }; static_assert(ARRAY_SIZE(profile_names) == PLATFORM_PROFILE_LAST); -static ssize_t platform_profile_choices_show(struct device *dev, - struct device_attribute *attr, +static ssize_t platform_profile_choices_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) { int len = 0; @@ -49,8 +49,8 @@ static ssize_t platform_profile_choices_show(struct device *dev, return len; } -static ssize_t platform_profile_show(struct device *dev, - struct device_attribute *attr, +static ssize_t platform_profile_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) { enum platform_profile_option profile = PLATFORM_PROFILE_BALANCED; @@ -77,8 +77,8 @@ static ssize_t platform_profile_show(struct device *dev, return sysfs_emit(buf, "%s\n", profile_names[profile]); } -static ssize_t platform_profile_store(struct device *dev, - struct device_attribute *attr, +static ssize_t platform_profile_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) { int err, i; @@ -115,12 +115,12 @@ static ssize_t platform_profile_store(struct device *dev, return count; } -static DEVICE_ATTR_RO(platform_profile_choices); -static DEVICE_ATTR_RW(platform_profile); +static struct kobj_attribute attr_platform_profile_choices = __ATTR_RO(platform_profile_choices); +static struct kobj_attribute attr_platform_profile = __ATTR_RW(platform_profile); static struct attribute *platform_profile_attrs[] = { - &dev_attr_platform_profile_choices.attr, - &dev_attr_platform_profile.attr, + &attr_platform_profile_choices.attr, + &attr_platform_profile.attr, NULL }; diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 831fa4a121598..0888e4d618d53 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -268,6 +268,10 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) ACPI_CX_DESC_LEN, "ACPI P_LVL3 IOPORT 0x%x", pr->power.states[ACPI_STATE_C3].address); + if (!pr->power.states[ACPI_STATE_C2].address && + !pr->power.states[ACPI_STATE_C3].address) + return -ENODEV; + return 0; } diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index 90aaec923889c..14c7bac4100b4 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -440,6 +440,13 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = { DMI_MATCH(DMI_BOARD_NAME, "S5602ZA"), }, }, + { + /* Asus Vivobook X1404VAP */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, "X1404VAP"), + }, + }, { /* Asus Vivobook X1504VAP */ .matches = { @@ -563,6 +570,12 @@ static const struct dmi_system_id irq1_edge_low_force_override[] = { DMI_MATCH(DMI_BOARD_NAME, "RP-15"), }, }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Eluktronics Inc."), + DMI_MATCH(DMI_BOARD_NAME, "MECH-17"), + }, + }, { /* TongFang GM6XGxX/TUXEDO Stellaris 16 Gen5 AMD */ .matches = { diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c index 068c1612660bc..4ee30c2897a2b 100644 --- a/drivers/acpi/x86/utils.c +++ b/drivers/acpi/x86/utils.c @@ -374,7 +374,8 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"), }, .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | - ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY), + ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY | + ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS), }, { /* Medion Lifetab S10346 */ diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 45f63b09828a1..650122deb480d 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -63,6 +63,7 @@ enum board_ids { board_ahci_pcs_quirk_no_devslp, board_ahci_pcs_quirk_no_sntf, board_ahci_yes_fbs, + board_ahci_yes_fbs_atapi_dma, /* board IDs for specific chipsets in alphabetical order */ board_ahci_al, @@ -188,6 +189,14 @@ static const struct ata_port_info ahci_port_info[] = { .udma_mask = ATA_UDMA6, .port_ops = &ahci_ops, }, + [board_ahci_yes_fbs_atapi_dma] = { + AHCI_HFLAGS (AHCI_HFLAG_YES_FBS | + AHCI_HFLAG_ATAPI_DMA_QUIRK), + .flags = AHCI_FLAG_COMMON, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, + .port_ops = &ahci_ops, + }, /* by chipsets */ [board_ahci_al] = { AHCI_HFLAGS (AHCI_HFLAG_NO_PMP | AHCI_HFLAG_NO_MSI), @@ -589,6 +598,8 @@ static const struct pci_device_id ahci_pci_tbl[] = { .driver_data = board_ahci_yes_fbs }, { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3), .driver_data = board_ahci_yes_fbs }, + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9215), + .driver_data = board_ahci_yes_fbs_atapi_dma }, { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230), .driver_data = board_ahci_yes_fbs }, { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9235), diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index 8f40f75ba08cf..10a5fe02f0a45 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h @@ -246,6 +246,7 @@ enum { AHCI_HFLAG_NO_SXS = BIT(26), /* SXS not supported */ AHCI_HFLAG_43BIT_ONLY = BIT(27), /* 43bit DMA addr limit */ AHCI_HFLAG_INTEL_PCS_QUIRK = BIT(28), /* apply Intel PCS quirk */ + AHCI_HFLAG_ATAPI_DMA_QUIRK = BIT(29), /* force ATAPI to use DMA */ /* ap->flags bits */ diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index fdfa7b2662180..a28ffe1e59691 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -1321,6 +1321,10 @@ static void ahci_dev_config(struct ata_device *dev) { struct ahci_host_priv *hpriv = dev->link->ap->host->private_data; + if ((dev->class == ATA_DEV_ATAPI) && + (hpriv->flags & AHCI_HFLAG_ATAPI_DMA_QUIRK)) + dev->quirks |= ATA_QUIRK_ATAPI_MOD16_DMA; + if (hpriv->flags & AHCI_HFLAG_SECT255) { dev->max_sectors = 255; ata_dev_info(dev, diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index c085dd81ebe7f..0cb97181d10a9 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -88,6 +88,7 @@ struct ata_force_param { unsigned int xfer_mask; unsigned int quirk_on; unsigned int quirk_off; + unsigned int pflags_on; u16 lflags_on; u16 lflags_off; }; @@ -331,6 +332,35 @@ void ata_force_cbl(struct ata_port *ap) } } +/** + * ata_force_pflags - force port flags according to libata.force + * @ap: ATA port of interest + * + * Force port flags according to libata.force and whine about it. + * + * LOCKING: + * EH context. + */ +static void ata_force_pflags(struct ata_port *ap) +{ + int i; + + for (i = ata_force_tbl_size - 1; i >= 0; i--) { + const struct ata_force_ent *fe = &ata_force_tbl[i]; + + if (fe->port != -1 && fe->port != ap->print_id) + continue; + + /* let pflags stack */ + if (fe->param.pflags_on) { + ap->pflags |= fe->param.pflags_on; + ata_port_notice(ap, + "FORCE: port flag 0x%x forced -> 0x%x\n", + fe->param.pflags_on, ap->pflags); + } + } +} + /** * ata_force_link_limits - force link limits according to libata.force * @link: ATA link of interest @@ -486,6 +516,7 @@ static void ata_force_quirks(struct ata_device *dev) } } #else +static inline void ata_force_pflags(struct ata_port *ap) { } static inline void ata_force_link_limits(struct ata_link *link) { } static inline void ata_force_xfermask(struct ata_device *dev) { } static inline void ata_force_quirks(struct ata_device *dev) { } @@ -2845,6 +2876,10 @@ int ata_dev_configure(struct ata_device *dev) (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2) dev->quirks |= ATA_QUIRK_NOLPM; + if (dev->quirks & ATA_QUIRK_NO_LPM_ON_ATI && + ata_dev_check_adapter(dev, PCI_VENDOR_ID_ATI)) + dev->quirks |= ATA_QUIRK_NOLPM; + if (ap->flags & ATA_FLAG_NO_LPM) dev->quirks |= ATA_QUIRK_NOLPM; @@ -3897,6 +3932,7 @@ static const char * const ata_quirk_names[] = { [__ATA_QUIRK_MAX_SEC_1024] = "maxsec1024", [__ATA_QUIRK_MAX_TRIM_128M] = "maxtrim128m", [__ATA_QUIRK_NO_NCQ_ON_ATI] = "noncqonati", + [__ATA_QUIRK_NO_LPM_ON_ATI] = "nolpmonati", [__ATA_QUIRK_NO_ID_DEV_LOG] = "noiddevlog", [__ATA_QUIRK_NO_LOG_DIR] = "nologdir", [__ATA_QUIRK_NO_FUA] = "nofua", @@ -4142,13 +4178,16 @@ static const struct ata_dev_quirks_entry __ata_dev_quirks[] = { ATA_QUIRK_ZERO_AFTER_TRIM }, { "Samsung SSD 860*", NULL, ATA_QUIRK_NO_NCQ_TRIM | ATA_QUIRK_ZERO_AFTER_TRIM | - ATA_QUIRK_NO_NCQ_ON_ATI }, + ATA_QUIRK_NO_NCQ_ON_ATI | + ATA_QUIRK_NO_LPM_ON_ATI }, { "Samsung SSD 870*", NULL, ATA_QUIRK_NO_NCQ_TRIM | ATA_QUIRK_ZERO_AFTER_TRIM | - ATA_QUIRK_NO_NCQ_ON_ATI }, + ATA_QUIRK_NO_NCQ_ON_ATI | + ATA_QUIRK_NO_LPM_ON_ATI }, { "SAMSUNG*MZ7LH*", NULL, ATA_QUIRK_NO_NCQ_TRIM | ATA_QUIRK_ZERO_AFTER_TRIM | - ATA_QUIRK_NO_NCQ_ON_ATI, }, + ATA_QUIRK_NO_NCQ_ON_ATI | + ATA_QUIRK_NO_LPM_ON_ATI }, { "FCCT*M500*", NULL, ATA_QUIRK_NO_NCQ_TRIM | ATA_QUIRK_ZERO_AFTER_TRIM }, @@ -5452,6 +5491,8 @@ struct ata_port *ata_port_alloc(struct ata_host *host) #endif ata_sff_port_init(ap); + ata_force_pflags(ap); + return ap; } EXPORT_SYMBOL_GPL(ata_port_alloc); @@ -6264,6 +6305,9 @@ EXPORT_SYMBOL_GPL(ata_platform_remove_one); { "no" #name, .lflags_on = (flags) }, \ { #name, .lflags_off = (flags) } +#define force_pflag_on(name, flags) \ + { #name, .pflags_on = (flags) } + #define force_quirk_on(name, flag) \ { #name, .quirk_on = (flag) } @@ -6323,6 +6367,8 @@ static const struct ata_force_param force_tbl[] __initconst = { force_lflag_on(rstonce, ATA_LFLAG_RST_ONCE), force_lflag_onoff(dbdelay, ATA_LFLAG_NO_DEBOUNCE_DELAY), + force_pflag_on(external, ATA_PFLAG_EXTERNAL), + force_quirk_onoff(ncq, ATA_QUIRK_NONCQ), force_quirk_onoff(ncqtrim, ATA_QUIRK_NO_NCQ_TRIM), force_quirk_onoff(ncqati, ATA_QUIRK_NO_NCQ_ON_ATI), diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 3b303d4ae37a0..16cd676eae1f9 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -1542,8 +1542,15 @@ unsigned int atapi_eh_request_sense(struct ata_device *dev, tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; tf.command = ATA_CMD_PACKET; - /* is it pointless to prefer PIO for "safety reasons"? */ - if (ap->flags & ATA_FLAG_PIO_DMA) { + /* + * Do not use DMA if the connected device only supports PIO, even if the + * port prefers PIO commands via DMA. + * + * Ideally, we should call atapi_check_dma() to check if it is safe for + * the LLD to use DMA for REQUEST_SENSE, but we don't have a qc. + * Since we can't check the command, perhaps we should only use pio? + */ + if ((ap->flags & ATA_FLAG_PIO_DMA) && !(dev->flags & ATA_DFLAG_PIO)) { tf.protocol = ATAPI_PROT_DMA; tf.feature |= ATAPI_PKT_DMA; } else { diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c index 9c76fb1ad2ec5..a7442dc0bd8e1 100644 --- a/drivers/ata/libata-sata.c +++ b/drivers/ata/libata-sata.c @@ -1510,6 +1510,8 @@ int ata_eh_get_ncq_success_sense(struct ata_link *link) unsigned int err_mask, tag; u8 *sense, sk = 0, asc = 0, ascq = 0; u64 sense_valid, val; + u16 extended_sense; + bool aux_icc_valid; int ret = 0; err_mask = ata_read_log_page(dev, ATA_LOG_SENSE_NCQ, 0, buf, 2); @@ -1529,6 +1531,8 @@ int ata_eh_get_ncq_success_sense(struct ata_link *link) sense_valid = (u64)buf[8] | ((u64)buf[9] << 8) | ((u64)buf[10] << 16) | ((u64)buf[11] << 24); + extended_sense = get_unaligned_le16(&buf[14]); + aux_icc_valid = extended_sense & BIT(15); ata_qc_for_each_raw(ap, qc, tag) { if (!(qc->flags & ATA_QCFLAG_EH) || @@ -1556,6 +1560,17 @@ int ata_eh_get_ncq_success_sense(struct ata_link *link) continue; } + qc->result_tf.nsect = sense[6]; + qc->result_tf.hob_nsect = sense[7]; + qc->result_tf.lbal = sense[8]; + qc->result_tf.lbam = sense[9]; + qc->result_tf.lbah = sense[10]; + qc->result_tf.hob_lbal = sense[11]; + qc->result_tf.hob_lbam = sense[12]; + qc->result_tf.hob_lbah = sense[13]; + if (aux_icc_valid) + qc->result_tf.auxiliary = get_unaligned_le32(&sense[16]); + /* Set sense without also setting scsicmd->result */ scsi_build_sense_buffer(dev->flags & ATA_DFLAG_D_SENSE, qc->scsicmd->sense_buffer, sk, diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c index 538bd3423d859..1bdcd6ee741d3 100644 --- a/drivers/ata/pata_pxa.c +++ b/drivers/ata/pata_pxa.c @@ -223,10 +223,16 @@ static int pxa_ata_probe(struct platform_device *pdev) ap->ioaddr.cmd_addr = devm_ioremap(&pdev->dev, cmd_res->start, resource_size(cmd_res)); + if (!ap->ioaddr.cmd_addr) + return -ENOMEM; ap->ioaddr.ctl_addr = devm_ioremap(&pdev->dev, ctl_res->start, resource_size(ctl_res)); + if (!ap->ioaddr.ctl_addr) + return -ENOMEM; ap->ioaddr.bmdma_addr = devm_ioremap(&pdev->dev, dma_res->start, resource_size(dma_res)); + if (!ap->ioaddr.bmdma_addr) + return -ENOMEM; /* * Adjust register offsets diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c index a482741eb181f..c3042eca6332d 100644 --- a/drivers/ata/sata_sx4.c +++ b/drivers/ata/sata_sx4.c @@ -1117,9 +1117,14 @@ static int pdc20621_prog_dimm0(struct ata_host *host) mmio += PDC_CHIP0_OFS; for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++) - pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, - pdc_i2c_read_data[i].reg, - &spd0[pdc_i2c_read_data[i].ofs]); + if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, + pdc_i2c_read_data[i].reg, + &spd0[pdc_i2c_read_data[i].ofs])) { + dev_err(host->dev, + "Failed in i2c read at index %d: device=%#x, reg=%#x\n", + i, PDC_DIMM0_SPD_DEV_ADDRESS, pdc_i2c_read_data[i].reg); + return -EIO; + } data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4); data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) | @@ -1284,6 +1289,8 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host) /* Programming DIMM0 Module Control Register (index_CID0:80h) */ size = pdc20621_prog_dimm0(host); + if (size < 0) + return size; dev_dbg(host->dev, "Local DIMM Size = %dMB\n", size); /* Programming DIMM Module Global Control Register (index_CID0:88h) */ diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig index 21545ffba0658..2a9bb31633a71 100644 --- a/drivers/auxdisplay/Kconfig +++ b/drivers/auxdisplay/Kconfig @@ -503,6 +503,7 @@ config HT16K33 config MAX6959 tristate "Maxim MAX6958/6959 7-segment LED controller" depends on I2C + select BITREVERSE select REGMAP_I2C select LINEDISP help diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c index 025dc6855cb25..41807ce363399 100644 --- a/drivers/auxdisplay/hd44780.c +++ b/drivers/auxdisplay/hd44780.c @@ -313,7 +313,7 @@ static int hd44780_probe(struct platform_device *pdev) fail3: kfree(hd); fail2: - kfree(lcd); + charlcd_free(lcd); fail1: kfree(hdc); return ret; @@ -328,7 +328,7 @@ static void hd44780_remove(struct platform_device *pdev) kfree(hdc->hd44780); kfree(lcd->drvdata); - kfree(lcd); + charlcd_free(lcd); } static const struct of_device_id hd44780_of_match[] = { diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c index a731f28455b45..6dc8798d01f98 100644 --- a/drivers/auxdisplay/panel.c +++ b/drivers/auxdisplay/panel.c @@ -1664,7 +1664,7 @@ static void panel_attach(struct parport *port) if (lcd.enabled) charlcd_unregister(lcd.charlcd); err_unreg_device: - kfree(lcd.charlcd); + charlcd_free(lcd.charlcd); lcd.charlcd = NULL; parport_unregister_device(pprt); pprt = NULL; @@ -1692,7 +1692,7 @@ static void panel_detach(struct parport *port) charlcd_unregister(lcd.charlcd); lcd.initialized = false; kfree(lcd.charlcd->drvdata); - kfree(lcd.charlcd); + charlcd_free(lcd.charlcd); lcd.charlcd = NULL; } diff --git a/drivers/base/core.c b/drivers/base/core.c index d922cefc1e662..ec0ef6a0de942 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -2079,6 +2079,7 @@ static bool __fw_devlink_relax_cycles(struct fwnode_handle *con_handle, out: sup_handle->flags &= ~FWNODE_FLAG_VISITED; put_device(sup_dev); + put_device(con_dev); put_device(par_dev); return ret; } diff --git a/drivers/base/devres.c b/drivers/base/devres.c index 2152eec0c1352..68224f2f83fff 100644 --- a/drivers/base/devres.c +++ b/drivers/base/devres.c @@ -687,6 +687,13 @@ int devres_release_group(struct device *dev, void *id) spin_unlock_irqrestore(&dev->devres_lock, flags); release_nodes(dev, &todo); + } else if (list_empty(&dev->devres_head)) { + /* + * dev is probably dying via devres_release_all(): groups + * have already been removed and are on the process of + * being released - don't touch and don't warn. + */ + spin_unlock_irqrestore(&dev->devres_lock, flags); } else { WARN_ON(1); spin_unlock_irqrestore(&dev->devres_lock, flags); diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 4a67e83300e16..1abe61f11525d 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -913,6 +913,9 @@ static void device_resume(struct device *dev, pm_message_t state, bool async) if (dev->power.syscore) goto Complete; + if (!dev->power.is_suspended) + goto Complete; + if (dev->power.direct_complete) { /* Match the pm_runtime_disable() in __device_suspend(). */ pm_runtime_enable(dev); @@ -931,9 +934,6 @@ static void device_resume(struct device *dev, pm_message_t state, bool async) */ dev->power.is_prepared = false; - if (!dev->power.is_suspended) - goto Unlock; - if (dev->pm_domain) { info = "power domain "; callback = pm_op(&dev->pm_domain->ops, state); @@ -973,7 +973,6 @@ static void device_resume(struct device *dev, pm_message_t state, bool async) error = dpm_run_callback(callback, dev, state, info); dev->power.is_suspended = false; - Unlock: device_unlock(dev); dpm_watchdog_clear(&wd); @@ -1254,14 +1253,13 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state, bool asy dev->power.is_noirq_suspended = true; /* - * Skipping the resume of devices that were in use right before the - * system suspend (as indicated by their PM-runtime usage counters) - * would be suboptimal. Also resume them if doing that is not allowed - * to be skipped. + * Devices must be resumed unless they are explicitly allowed to be left + * in suspend, but even in that case skipping the resume of devices that + * were in use right before the system suspend (as indicated by their + * runtime PM usage counters and child counters) would be suboptimal. */ - if (atomic_read(&dev->power.usage_count) > 1 || - !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) && - dev->power.may_skip_resume)) + if (!(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) && + dev->power.may_skip_resume) || !pm_runtime_need_not_resume(dev)) dev->power.must_resume = true; if (dev->power.must_resume) @@ -1628,6 +1626,7 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async) pm_runtime_disable(dev); if (pm_runtime_status_suspended(dev)) { pm_dev_dbg(dev, state, "direct-complete "); + dev->power.is_suspended = true; goto Complete; } diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 2ee45841486bc..04113adb092b5 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -1874,7 +1874,7 @@ void pm_runtime_drop_link(struct device_link *link) pm_request_idle(link->supplier); } -static bool pm_runtime_need_not_resume(struct device *dev) +bool pm_runtime_need_not_resume(struct device *dev) { return atomic_read(&dev->power.usage_count) <= 1 && (atomic_read(&dev->power.child_count) == 0 || diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 86cc3b19faae8..8827a768284ac 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -233,72 +233,6 @@ static void loop_set_size(struct loop_device *lo, loff_t size) kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE); } -static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos) -{ - struct iov_iter i; - ssize_t bw; - - iov_iter_bvec(&i, ITER_SOURCE, bvec, 1, bvec->bv_len); - - bw = vfs_iter_write(file, &i, ppos, 0); - - if (likely(bw == bvec->bv_len)) - return 0; - - printk_ratelimited(KERN_ERR - "loop: Write error at byte offset %llu, length %i.\n", - (unsigned long long)*ppos, bvec->bv_len); - if (bw >= 0) - bw = -EIO; - return bw; -} - -static int lo_write_simple(struct loop_device *lo, struct request *rq, - loff_t pos) -{ - struct bio_vec bvec; - struct req_iterator iter; - int ret = 0; - - rq_for_each_segment(bvec, rq, iter) { - ret = lo_write_bvec(lo->lo_backing_file, &bvec, &pos); - if (ret < 0) - break; - cond_resched(); - } - - return ret; -} - -static int lo_read_simple(struct loop_device *lo, struct request *rq, - loff_t pos) -{ - struct bio_vec bvec; - struct req_iterator iter; - struct iov_iter i; - ssize_t len; - - rq_for_each_segment(bvec, rq, iter) { - iov_iter_bvec(&i, ITER_DEST, &bvec, 1, bvec.bv_len); - len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0); - if (len < 0) - return len; - - flush_dcache_page(bvec.bv_page); - - if (len != bvec.bv_len) { - struct bio *bio; - - __rq_for_each_bio(bio, rq) - zero_fill_bio(bio); - break; - } - cond_resched(); - } - - return 0; -} - static void loop_clear_limits(struct loop_device *lo, int mode) { struct queue_limits lim = queue_limits_start_update(lo->lo_queue); @@ -357,7 +291,7 @@ static void lo_complete_rq(struct request *rq) struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); blk_status_t ret = BLK_STS_OK; - if (!cmd->use_aio || cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) || + if (cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) || req_op(rq) != REQ_OP_READ) { if (cmd->ret < 0) ret = errno_to_blk_status(cmd->ret); @@ -373,14 +307,13 @@ static void lo_complete_rq(struct request *rq) cmd->ret = 0; blk_mq_requeue_request(rq, true); } else { - if (cmd->use_aio) { - struct bio *bio = rq->bio; + struct bio *bio = rq->bio; - while (bio) { - zero_fill_bio(bio); - bio = bio->bi_next; - } + while (bio) { + zero_fill_bio(bio); + bio = bio->bi_next; } + ret = BLK_STS_IOERR; end_io: blk_mq_end_request(rq, ret); @@ -460,9 +393,14 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd, cmd->iocb.ki_pos = pos; cmd->iocb.ki_filp = file; - cmd->iocb.ki_complete = lo_rw_aio_complete; - cmd->iocb.ki_flags = IOCB_DIRECT; - cmd->iocb.ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0); + cmd->iocb.ki_ioprio = req_get_ioprio(rq); + if (cmd->use_aio) { + cmd->iocb.ki_complete = lo_rw_aio_complete; + cmd->iocb.ki_flags = IOCB_DIRECT; + } else { + cmd->iocb.ki_complete = NULL; + cmd->iocb.ki_flags = 0; + } if (rw == ITER_SOURCE) ret = file->f_op->write_iter(&cmd->iocb, &iter); @@ -473,7 +411,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd, if (ret != -EIOCBQUEUED) lo_rw_aio_complete(&cmd->iocb, ret); - return 0; + return -EIOCBQUEUED; } static int do_req_filebacked(struct loop_device *lo, struct request *rq) @@ -481,15 +419,6 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq) struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); loff_t pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset; - /* - * lo_write_simple and lo_read_simple should have been covered - * by io submit style function like lo_rw_aio(), one blocker - * is that lo_read_simple() need to call flush_dcache_page after - * the page is written from kernel, and it isn't easy to handle - * this in io submit style function which submits all segments - * of the req at one time. And direct read IO doesn't need to - * run flush_dcache_page(). - */ switch (req_op(rq)) { case REQ_OP_FLUSH: return lo_req_flush(lo, rq); @@ -505,15 +434,9 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq) case REQ_OP_DISCARD: return lo_fallocate(lo, rq, pos, FALLOC_FL_PUNCH_HOLE); case REQ_OP_WRITE: - if (cmd->use_aio) - return lo_rw_aio(lo, cmd, pos, ITER_SOURCE); - else - return lo_write_simple(lo, rq, pos); + return lo_rw_aio(lo, cmd, pos, ITER_SOURCE); case REQ_OP_READ: - if (cmd->use_aio) - return lo_rw_aio(lo, cmd, pos, ITER_DEST); - else - return lo_read_simple(lo, rq, pos); + return lo_rw_aio(lo, cmd, pos, ITER_DEST); default: WARN_ON_ONCE(1); return -EIO; @@ -645,19 +568,20 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, * dependency. */ fput(old_file); + dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0); if (partscan) loop_reread_partitions(lo); error = 0; done: - /* enable and uncork uevent now that we are done */ - dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0); + kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE); return error; out_err: loop_global_unlock(lo, is_loop); out_putf: fput(file); + dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0); goto done; } @@ -1111,8 +1035,8 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode, if (partscan) clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state); - /* enable and uncork uevent now that we are done */ dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0); + kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE); loop_global_unlock(lo, is_loop); if (partscan) @@ -1888,7 +1812,6 @@ static void loop_handle_cmd(struct loop_cmd *cmd) struct loop_device *lo = rq->q->queuedata; int ret = 0; struct mem_cgroup *old_memcg = NULL; - const bool use_aio = cmd->use_aio; if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) { ret = -EIO; @@ -1918,7 +1841,7 @@ static void loop_handle_cmd(struct loop_cmd *cmd) } failed: /* complete non-aio request */ - if (!use_aio || ret) { + if (ret != -EIOCBQUEUED) { if (ret == -EOPNOTSUPP) cmd->ret = ret; else diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c index 2f0431e42c494..f10369ad90f76 100644 --- a/drivers/block/null_blk/main.c +++ b/drivers/block/null_blk/main.c @@ -1541,8 +1541,8 @@ static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) cmd = blk_mq_rq_to_pdu(req); cmd->error = null_process_cmd(cmd, req_op(req), blk_rq_pos(req), blk_rq_sectors(req)); - if (!blk_mq_add_to_batch(req, iob, (__force int) cmd->error, - blk_mq_end_request_batch)) + if (!blk_mq_add_to_batch(req, iob, cmd->error != BLK_STS_OK, + blk_mq_end_request_batch)) blk_mq_end_request(req, cmd->error); nr++; } @@ -1638,10 +1638,9 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, return BLK_STS_OK; } -static void null_queue_rqs(struct request **rqlist) +static void null_queue_rqs(struct rq_list *rqlist) { - struct request *requeue_list = NULL; - struct request **requeue_lastp = &requeue_list; + struct rq_list requeue_list = {}; struct blk_mq_queue_data bd = { }; blk_status_t ret; @@ -1651,8 +1650,8 @@ static void null_queue_rqs(struct request **rqlist) bd.rq = rq; ret = null_queue_rq(rq->mq_hctx, &bd); if (ret != BLK_STS_OK) - rq_list_add_tail(&requeue_lastp, rq); - } while (!rq_list_empty(*rqlist)); + rq_list_add_tail(&requeue_list, rq); + } while (!rq_list_empty(rqlist)); *rqlist = requeue_list; } diff --git a/drivers/block/rnull.rs b/drivers/block/rnull.rs index b0227cf9ddd38..5de7223beb4d5 100644 --- a/drivers/block/rnull.rs +++ b/drivers/block/rnull.rs @@ -32,7 +32,7 @@ module! { } struct NullBlkModule { - _disk: Pin>>>, + _disk: Pin>>>, } impl kernel::Module for NullBlkModule { @@ -47,7 +47,7 @@ impl kernel::Module for NullBlkModule { .rotational(false) .build(format_args!("rnullb{}", 0), tagset)?; - let disk = Box::pin_init(new_mutex!(disk, "nullb:disk"), flags::GFP_KERNEL)?; + let disk = KBox::pin_init(new_mutex!(disk, "nullb:disk"), flags::GFP_KERNEL)?; Ok(Self { _disk: disk }) } diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index 458ac54e7b201..38b9e485e520d 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -681,22 +681,44 @@ static int ublk_max_cmd_buf_size(void) return __ublk_queue_cmd_buf_size(UBLK_MAX_QUEUE_DEPTH); } -static inline bool ublk_queue_can_use_recovery_reissue( - struct ublk_queue *ubq) +/* + * Should I/O outstanding to the ublk server when it exits be reissued? + * If not, outstanding I/O will get errors. + */ +static inline bool ublk_nosrv_should_reissue_outstanding(struct ublk_device *ub) +{ + return (ub->dev_info.flags & UBLK_F_USER_RECOVERY) && + (ub->dev_info.flags & UBLK_F_USER_RECOVERY_REISSUE); +} + +/* + * Should I/O issued while there is no ublk server queue? If not, I/O + * issued while there is no ublk server will get errors. + */ +static inline bool ublk_nosrv_dev_should_queue_io(struct ublk_device *ub) { - return (ubq->flags & UBLK_F_USER_RECOVERY) && - (ubq->flags & UBLK_F_USER_RECOVERY_REISSUE); + return ub->dev_info.flags & UBLK_F_USER_RECOVERY; } -static inline bool ublk_queue_can_use_recovery( - struct ublk_queue *ubq) +/* + * Same as ublk_nosrv_dev_should_queue_io, but uses a queue-local copy + * of the device flags for smaller cache footprint - better for fast + * paths. + */ +static inline bool ublk_nosrv_should_queue_io(struct ublk_queue *ubq) { return ubq->flags & UBLK_F_USER_RECOVERY; } -static inline bool ublk_can_use_recovery(struct ublk_device *ub) +/* + * Should ublk devices be stopped (i.e. no recovery possible) when the + * ublk server exits? If not, devices can be used again by a future + * incarnation of a ublk server via the start_recovery/end_recovery + * commands. + */ +static inline bool ublk_nosrv_should_stop_dev(struct ublk_device *ub) { - return ub->dev_info.flags & UBLK_F_USER_RECOVERY; + return !(ub->dev_info.flags & UBLK_F_USER_RECOVERY); } static void ublk_free_disk(struct gendisk *disk) @@ -1059,6 +1081,25 @@ static void ublk_complete_rq(struct kref *ref) __ublk_complete_rq(req); } +static void ublk_do_fail_rq(struct request *req) +{ + struct ublk_queue *ubq = req->mq_hctx->driver_data; + + if (ublk_nosrv_should_reissue_outstanding(ubq->dev)) + blk_mq_requeue_request(req, false); + else + __ublk_complete_rq(req); +} + +static void ublk_fail_rq_fn(struct kref *ref) +{ + struct ublk_rq_data *data = container_of(ref, struct ublk_rq_data, + ref); + struct request *req = blk_mq_rq_from_pdu(data); + + ublk_do_fail_rq(req); +} + /* * Since __ublk_rq_task_work always fails requests immediately during * exiting, __ublk_fail_req() is only called from abort context during @@ -1072,10 +1113,13 @@ static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io, { WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE); - if (ublk_queue_can_use_recovery_reissue(ubq)) - blk_mq_requeue_request(req, false); - else - ublk_put_req_ref(ubq, req); + if (ublk_need_req_ref(ubq)) { + struct ublk_rq_data *data = blk_mq_rq_to_pdu(req); + + kref_put(&data->ref, ublk_fail_rq_fn); + } else { + ublk_do_fail_rq(req); + } } static void ubq_complete_io_cmd(struct ublk_io *io, int res, @@ -1100,7 +1144,7 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq, struct request *rq) { /* We cannot process this rq so just requeue it. */ - if (ublk_queue_can_use_recovery(ubq)) + if (ublk_nosrv_dev_should_queue_io(ubq->dev)) blk_mq_requeue_request(rq, false); else blk_mq_end_request(rq, BLK_STS_IOERR); @@ -1245,10 +1289,10 @@ static enum blk_eh_timer_return ublk_timeout(struct request *rq) struct ublk_device *ub = ubq->dev; if (ublk_abort_requests(ub, ubq)) { - if (ublk_can_use_recovery(ub)) - schedule_work(&ub->quiesce_work); - else + if (ublk_nosrv_should_stop_dev(ub)) schedule_work(&ub->stop_work); + else + schedule_work(&ub->quiesce_work); } return BLK_EH_DONE; } @@ -1277,7 +1321,7 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx, * Note: force_abort is guaranteed to be seen because it is set * before request queue is unqiuesced. */ - if (ublk_queue_can_use_recovery(ubq) && unlikely(ubq->force_abort)) + if (ublk_nosrv_should_queue_io(ubq) && unlikely(ubq->force_abort)) return BLK_STS_IOERR; if (unlikely(ubq->canceling)) { @@ -1416,17 +1460,27 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq) } } +/* Must be called when queue is frozen */ +static bool ublk_mark_queue_canceling(struct ublk_queue *ubq) +{ + bool canceled; + + spin_lock(&ubq->cancel_lock); + canceled = ubq->canceling; + if (!canceled) + ubq->canceling = true; + spin_unlock(&ubq->cancel_lock); + + return canceled; +} + static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq) { + bool was_canceled = ubq->canceling; struct gendisk *disk; - spin_lock(&ubq->cancel_lock); - if (ubq->canceling) { - spin_unlock(&ubq->cancel_lock); + if (was_canceled) return false; - } - ubq->canceling = true; - spin_unlock(&ubq->cancel_lock); spin_lock(&ub->lock); disk = ub->ub_disk; @@ -1438,14 +1492,23 @@ static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq) if (!disk) return false; - /* Now we are serialized with ublk_queue_rq() */ + /* + * Now we are serialized with ublk_queue_rq() + * + * Make sure that ubq->canceling is set when queue is frozen, + * because ublk_queue_rq() has to rely on this flag for avoiding to + * touch completed uring_cmd + */ blk_mq_quiesce_queue(disk->queue); - /* abort queue is for making forward progress */ - ublk_abort_queue(ub, ubq); + was_canceled = ublk_mark_queue_canceling(ubq); + if (!was_canceled) { + /* abort queue is for making forward progress */ + ublk_abort_queue(ub, ubq); + } blk_mq_unquiesce_queue(disk->queue); put_device(disk_to_dev(disk)); - return true; + return !was_canceled; } static void ublk_cancel_cmd(struct ublk_queue *ubq, struct ublk_io *io, @@ -1498,10 +1561,10 @@ static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd, ublk_cancel_cmd(ubq, io, issue_flags); if (need_schedule) { - if (ublk_can_use_recovery(ub)) - schedule_work(&ub->quiesce_work); - else + if (ublk_nosrv_should_stop_dev(ub)) schedule_work(&ub->stop_work); + else + schedule_work(&ub->quiesce_work); } } @@ -1621,7 +1684,7 @@ static void ublk_stop_dev(struct ublk_device *ub) mutex_lock(&ub->mutex); if (ub->dev_info.state == UBLK_S_DEV_DEAD) goto unlock; - if (ublk_can_use_recovery(ub)) { + if (ublk_nosrv_dev_should_queue_io(ub)) { if (ub->dev_info.state == UBLK_S_DEV_LIVE) __ublk_quiesce_dev(ub); ublk_unquiesce_dev(ub); @@ -2665,9 +2728,12 @@ static int ublk_ctrl_set_params(struct ublk_device *ub, if (ph.len > sizeof(struct ublk_params)) ph.len = sizeof(struct ublk_params); - /* parameters can only be changed when device isn't live */ mutex_lock(&ub->mutex); - if (ub->dev_info.state == UBLK_S_DEV_LIVE) { + if (test_bit(UB_STATE_USED, &ub->state)) { + /* + * Parameters can only be changed when device hasn't + * been started yet + */ ret = -EACCES; } else if (copy_from_user(&ub->params, argp, ph.len)) { ret = -EFAULT; @@ -2716,7 +2782,7 @@ static int ublk_ctrl_start_recovery(struct ublk_device *ub, int i; mutex_lock(&ub->mutex); - if (!ublk_can_use_recovery(ub)) + if (ublk_nosrv_should_stop_dev(ub)) goto out_unlock; if (!ub->nr_queues_ready) goto out_unlock; @@ -2769,7 +2835,7 @@ static int ublk_ctrl_end_recovery(struct ublk_device *ub, __func__, ub->dev_info.nr_hw_queues, header->dev_id); mutex_lock(&ub->mutex); - if (!ublk_can_use_recovery(ub)) + if (ublk_nosrv_should_stop_dev(ub)) goto out_unlock; if (ub->dev_info.state != UBLK_S_DEV_QUIESCED) { diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 0e50b65e1dbf5..fd6c565f8a507 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -472,7 +472,7 @@ static bool virtblk_prep_rq_batch(struct request *req) } static void virtblk_add_req_batch(struct virtio_blk_vq *vq, - struct request **rqlist) + struct rq_list *rqlist) { struct request *req; unsigned long flags; @@ -499,11 +499,10 @@ static void virtblk_add_req_batch(struct virtio_blk_vq *vq, virtqueue_notify(vq->vq); } -static void virtio_queue_rqs(struct request **rqlist) +static void virtio_queue_rqs(struct rq_list *rqlist) { - struct request *submit_list = NULL; - struct request *requeue_list = NULL; - struct request **requeue_lastp = &requeue_list; + struct rq_list submit_list = { }; + struct rq_list requeue_list = { }; struct virtio_blk_vq *vq = NULL; struct request *req; @@ -515,9 +514,9 @@ static void virtio_queue_rqs(struct request **rqlist) vq = this_vq; if (virtblk_prep_rq_batch(req)) - rq_list_add(&submit_list, req); /* reverse order */ + rq_list_add_tail(&submit_list, req); else - rq_list_add_tail(&requeue_lastp, req); + rq_list_add_tail(&requeue_list, req); } if (vq) @@ -1210,11 +1209,12 @@ static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) while ((vbr = virtqueue_get_buf(vq->vq, &len)) != NULL) { struct request *req = blk_mq_rq_from_pdu(vbr); + u8 status = virtblk_vbr_status(vbr); found++; if (!blk_mq_complete_request_remote(req) && - !blk_mq_add_to_batch(req, iob, virtblk_vbr_status(vbr), - virtblk_complete_batch)) + !blk_mq_add_to_batch(req, iob, status != VIRTIO_BLK_S_OK, + virtblk_complete_batch)) virtblk_request_done(req); } diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c index 53f6b4f76bccd..ab465e13c1f60 100644 --- a/drivers/bluetooth/btintel_pcie.c +++ b/drivers/bluetooth/btintel_pcie.c @@ -36,6 +36,7 @@ /* Intel Bluetooth PCIe device id table */ static const struct pci_device_id btintel_pcie_table[] = { { BTINTEL_PCI_DEVICE(0xA876, PCI_ANY_ID) }, + { BTINTEL_PCI_DEVICE(0xE476, PCI_ANY_ID) }, { 0 } }; MODULE_DEVICE_TABLE(pci, btintel_pcie_table); diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c index dfbbac92242a8..dd2c0485b9848 100644 --- a/drivers/bluetooth/btqca.c +++ b/drivers/bluetooth/btqca.c @@ -272,6 +272,39 @@ int qca_send_pre_shutdown_cmd(struct hci_dev *hdev) } EXPORT_SYMBOL_GPL(qca_send_pre_shutdown_cmd); +static bool qca_filename_has_extension(const char *filename) +{ + const char *suffix = strrchr(filename, '.'); + + /* File extensions require a dot, but not as the first or last character */ + if (!suffix || suffix == filename || *(suffix + 1) == '\0') + return 0; + + /* Avoid matching directories with names that look like files with extensions */ + return !strchr(suffix, '/'); +} + +static bool qca_get_alt_nvm_file(char *filename, size_t max_size) +{ + char fwname[64]; + const char *suffix; + + /* nvm file name has an extension, replace with .bin */ + if (qca_filename_has_extension(filename)) { + suffix = strrchr(filename, '.'); + strscpy(fwname, filename, suffix - filename + 1); + snprintf(fwname + (suffix - filename), + sizeof(fwname) - (suffix - filename), ".bin"); + /* If nvm file is already the default one, return false to skip the retry. */ + if (strcmp(fwname, filename) == 0) + return false; + + snprintf(filename, max_size, "%s", fwname); + return true; + } + return false; +} + static int qca_tlv_check_data(struct hci_dev *hdev, struct qca_fw_config *config, u8 *fw_data, size_t fw_size, @@ -564,6 +597,19 @@ static int qca_download_firmware(struct hci_dev *hdev, config->fwname, ret); return ret; } + } + /* If the board-specific file is missing, try loading the default + * one, unless that was attempted already. + */ + else if (config->type == TLV_TYPE_NVM && + qca_get_alt_nvm_file(config->fwname, sizeof(config->fwname))) { + bt_dev_info(hdev, "QCA Downloading %s", config->fwname); + ret = request_firmware(&fw, config->fwname, &hdev->dev); + if (ret) { + bt_dev_err(hdev, "QCA Failed to request file: %s (%d)", + config->fwname, ret); + return ret; + } } else { bt_dev_err(hdev, "QCA Failed to request file: %s (%d)", config->fwname, ret); @@ -700,34 +746,38 @@ static int qca_check_bdaddr(struct hci_dev *hdev, const struct qca_fw_config *co return 0; } -static void qca_generate_hsp_nvm_name(char *fwname, size_t max_size, +static void qca_get_nvm_name_by_board(char *fwname, size_t max_size, + const char *stem, enum qca_btsoc_type soc_type, struct qca_btsoc_version ver, u8 rom_ver, u16 bid) { const char *variant; + const char *prefix; - /* hsp gf chip */ - if ((le32_to_cpu(ver.soc_id) & QCA_HSP_GF_SOC_MASK) == QCA_HSP_GF_SOC_ID) - variant = "g"; - else - variant = ""; + /* Set the default value to variant and prefix */ + variant = ""; + prefix = "b"; - if (bid == 0x0) - snprintf(fwname, max_size, "qca/hpnv%02x%s.bin", rom_ver, variant); - else - snprintf(fwname, max_size, "qca/hpnv%02x%s.%x", rom_ver, variant, bid); -} + if (soc_type == QCA_QCA2066) + prefix = ""; -static inline void qca_get_nvm_name_generic(struct qca_fw_config *cfg, - const char *stem, u8 rom_ver, u16 bid) -{ - if (bid == 0x0) - snprintf(cfg->fwname, sizeof(cfg->fwname), "qca/%snv%02x.bin", stem, rom_ver); - else if (bid & 0xff00) - snprintf(cfg->fwname, sizeof(cfg->fwname), - "qca/%snv%02x.b%x", stem, rom_ver, bid); - else - snprintf(cfg->fwname, sizeof(cfg->fwname), - "qca/%snv%02x.b%02x", stem, rom_ver, bid); + if (soc_type == QCA_WCN6855 || soc_type == QCA_QCA2066) { + /* If the chip is manufactured by GlobalFoundries */ + if ((le32_to_cpu(ver.soc_id) & QCA_HSP_GF_SOC_MASK) == QCA_HSP_GF_SOC_ID) + variant = "g"; + } + + if (rom_ver != 0) { + if (bid == 0x0 || bid == 0xffff) + snprintf(fwname, max_size, "qca/%s%02x%s.bin", stem, rom_ver, variant); + else + snprintf(fwname, max_size, "qca/%s%02x%s.%s%02x", stem, rom_ver, + variant, prefix, bid); + } else { + if (bid == 0x0 || bid == 0xffff) + snprintf(fwname, max_size, "qca/%s%s.bin", stem, variant); + else + snprintf(fwname, max_size, "qca/%s%s.%s%02x", stem, variant, prefix, bid); + } } int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, @@ -735,6 +785,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, const char *firmware_name) { struct qca_fw_config config = {}; + const char *variant = ""; int err; u8 rom_ver = 0; u32 soc_ver; @@ -816,28 +867,33 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, /* Download NVM configuration */ config.type = TLV_TYPE_NVM; if (firmware_name) { - snprintf(config.fwname, sizeof(config.fwname), - "qca/%s", firmware_name); + /* The firmware name has an extension, use it directly */ + if (qca_filename_has_extension(firmware_name)) { + snprintf(config.fwname, sizeof(config.fwname), "qca/%s", firmware_name); + } else { + qca_read_fw_board_id(hdev, &boardid); + qca_get_nvm_name_by_board(config.fwname, sizeof(config.fwname), + firmware_name, soc_type, ver, 0, boardid); + } } else { switch (soc_type) { case QCA_WCN3990: case QCA_WCN3991: case QCA_WCN3998: - if (le32_to_cpu(ver.soc_id) == QCA_WCN3991_SOC_ID) { - snprintf(config.fwname, sizeof(config.fwname), - "qca/crnv%02xu.bin", rom_ver); - } else { - snprintf(config.fwname, sizeof(config.fwname), - "qca/crnv%02x.bin", rom_ver); - } + if (le32_to_cpu(ver.soc_id) == QCA_WCN3991_SOC_ID) + variant = "u"; + + snprintf(config.fwname, sizeof(config.fwname), + "qca/crnv%02x%s.bin", rom_ver, variant); break; case QCA_WCN3988: snprintf(config.fwname, sizeof(config.fwname), "qca/apnv%02x.bin", rom_ver); break; case QCA_QCA2066: - qca_generate_hsp_nvm_name(config.fwname, - sizeof(config.fwname), ver, rom_ver, boardid); + qca_get_nvm_name_by_board(config.fwname, + sizeof(config.fwname), "hpnv", soc_type, ver, + rom_ver, boardid); break; case QCA_QCA6390: snprintf(config.fwname, sizeof(config.fwname), @@ -848,13 +904,14 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, "qca/msnv%02x.bin", rom_ver); break; case QCA_WCN6855: - snprintf(config.fwname, sizeof(config.fwname), - "qca/hpnv%02x.bin", rom_ver); + qca_read_fw_board_id(hdev, &boardid); + qca_get_nvm_name_by_board(config.fwname, sizeof(config.fwname), + "hpnv", soc_type, ver, rom_ver, boardid); break; case QCA_WCN7850: - qca_get_nvm_name_generic(&config, "hmt", rom_ver, boardid); + qca_get_nvm_name_by_board(config.fwname, sizeof(config.fwname), + "hmtnv", soc_type, ver, rom_ver, boardid); break; - default: snprintf(config.fwname, sizeof(config.fwname), "qca/nvm_%08x.bin", soc_ver); diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c index 0a6ca6dfb9484..59eb948664223 100644 --- a/drivers/bluetooth/btrtl.c +++ b/drivers/bluetooth/btrtl.c @@ -1215,6 +1215,8 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev, rtl_dev_err(hdev, "mandatory config file %s not found", btrtl_dev->ic_info->cfg_name); ret = btrtl_dev->cfg_len; + if (!ret) + ret = -EINVAL; goto err_free; } } diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 6bc6dd417adf6..151054a718522 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -626,6 +626,10 @@ static const struct usb_device_id quirks_table[] = { BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0489, 0xe102), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0489, 0xe152), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0489, 0xe153), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x04ca, 0x3804), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x04ca, 0x38e4), .driver_info = BTUSB_MEDIATEK | @@ -3644,6 +3648,7 @@ static ssize_t force_poll_sync_write(struct file *file, } static const struct file_operations force_poll_sync_fops = { + .owner = THIS_MODULE, .open = simple_open, .read = force_poll_sync_read, .write = force_poll_sync_write, diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index 395d66e32a2ea..2f322f890b81f 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -102,7 +102,8 @@ static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu) if (!skb) { percpu_down_read(&hu->proto_lock); - if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) + if (test_bit(HCI_UART_PROTO_READY, &hu->flags) || + test_bit(HCI_UART_PROTO_INIT, &hu->flags)) skb = hu->proto->dequeue(hu); percpu_up_read(&hu->proto_lock); @@ -124,7 +125,8 @@ int hci_uart_tx_wakeup(struct hci_uart *hu) if (!percpu_down_read_trylock(&hu->proto_lock)) return 0; - if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) + if (!test_bit(HCI_UART_PROTO_READY, &hu->flags) && + !test_bit(HCI_UART_PROTO_INIT, &hu->flags)) goto no_schedule; set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state); @@ -278,7 +280,8 @@ static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb) percpu_down_read(&hu->proto_lock); - if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) { + if (!test_bit(HCI_UART_PROTO_READY, &hu->flags) && + !test_bit(HCI_UART_PROTO_INIT, &hu->flags)) { percpu_up_read(&hu->proto_lock); return -EUNATCH; } @@ -585,7 +588,8 @@ static void hci_uart_tty_wakeup(struct tty_struct *tty) if (tty != hu->tty) return; - if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) + if (test_bit(HCI_UART_PROTO_READY, &hu->flags) || + test_bit(HCI_UART_PROTO_INIT, &hu->flags)) hci_uart_tx_wakeup(hu); } @@ -611,7 +615,8 @@ static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, percpu_down_read(&hu->proto_lock); - if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) { + if (!test_bit(HCI_UART_PROTO_READY, &hu->flags) && + !test_bit(HCI_UART_PROTO_INIT, &hu->flags)) { percpu_up_read(&hu->proto_lock); return; } @@ -707,12 +712,16 @@ static int hci_uart_set_proto(struct hci_uart *hu, int id) hu->proto = p; + set_bit(HCI_UART_PROTO_INIT, &hu->flags); + err = hci_uart_register_dev(hu); if (err) { return err; } set_bit(HCI_UART_PROTO_READY, &hu->flags); + clear_bit(HCI_UART_PROTO_INIT, &hu->flags); + return 0; } diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 37fddf6055beb..1837622ea625a 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -2353,6 +2353,7 @@ static int qca_serdev_probe(struct serdev_device *serdev) switch (qcadev->btsoc_type) { case QCA_WCN6855: case QCA_WCN7850: + case QCA_WCN6750: if (!device_property_present(&serdev->dev, "enable-gpios")) { /* * Backward compatibility with old DT sources. If the @@ -2372,7 +2373,6 @@ static int qca_serdev_probe(struct serdev_device *serdev) case QCA_WCN3990: case QCA_WCN3991: case QCA_WCN3998: - case QCA_WCN6750: qcadev->bt_power->dev = &serdev->dev; err = qca_init_regulators(qcadev->bt_power, data->vregs, data->num_vregs); diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h index fbf3079b92a53..5ea5dd80e297c 100644 --- a/drivers/bluetooth/hci_uart.h +++ b/drivers/bluetooth/hci_uart.h @@ -90,6 +90,7 @@ struct hci_uart { #define HCI_UART_REGISTERED 1 #define HCI_UART_PROTO_READY 2 #define HCI_UART_NO_SUSPEND_NOTIFIER 3 +#define HCI_UART_PROTO_INIT 4 /* TX states */ #define HCI_UART_SENDING 1 diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c index 7651321d351cc..9ac22e4a070be 100644 --- a/drivers/bluetooth/hci_vhci.c +++ b/drivers/bluetooth/hci_vhci.c @@ -289,18 +289,18 @@ static void vhci_coredump(struct hci_dev *hdev) static void vhci_coredump_hdr(struct hci_dev *hdev, struct sk_buff *skb) { - char buf[80]; + const char *buf; - snprintf(buf, sizeof(buf), "Controller Name: vhci_ctrl\n"); + buf = "Controller Name: vhci_ctrl\n"; skb_put_data(skb, buf, strlen(buf)); - snprintf(buf, sizeof(buf), "Firmware Version: vhci_fw\n"); + buf = "Firmware Version: vhci_fw\n"; skb_put_data(skb, buf, strlen(buf)); - snprintf(buf, sizeof(buf), "Driver: vhci_drv\n"); + buf = "Driver: vhci_drv\n"; skb_put_data(skb, buf, strlen(buf)); - snprintf(buf, sizeof(buf), "Vendor: vhci\n"); + buf = "Vendor: vhci\n"; skb_put_data(skb, buf, strlen(buf)); } diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c index 4de75674f1935..aa8a0ef697c77 100644 --- a/drivers/bus/mhi/host/main.c +++ b/drivers/bus/mhi/host/main.c @@ -1207,11 +1207,16 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, struct mhi_ring_element *mhi_tre; struct mhi_buf_info *buf_info; int eot, eob, chain, bei; - int ret; + int ret = 0; /* Protect accesses for reading and incrementing WP */ write_lock_bh(&mhi_chan->lock); + if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) { + ret = -ENODEV; + goto out; + } + buf_ring = &mhi_chan->buf_ring; tre_ring = &mhi_chan->tre_ring; @@ -1229,10 +1234,8 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, if (!info->pre_mapped) { ret = mhi_cntrl->map_single(mhi_cntrl, buf_info); - if (ret) { - write_unlock_bh(&mhi_chan->lock); - return ret; - } + if (ret) + goto out; } eob = !!(flags & MHI_EOB); @@ -1250,9 +1253,10 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, mhi_add_ring_element(mhi_cntrl, tre_ring); mhi_add_ring_element(mhi_cntrl, buf_ring); +out: write_unlock_bh(&mhi_chan->lock); - return 0; + return ret; } int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir, diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c index 9938bb034c1cb..acfd673834ed7 100644 --- a/drivers/bus/mhi/host/pci_generic.c +++ b/drivers/bus/mhi/host/pci_generic.c @@ -1040,8 +1040,9 @@ static void mhi_pci_recovery_work(struct work_struct *work) err_unprepare: mhi_unprepare_after_power_down(mhi_cntrl); err_try_reset: - if (pci_reset_function(pdev)) - dev_err(&pdev->dev, "Recovery failed\n"); + err = pci_try_reset_function(pdev); + if (err) + dev_err(&pdev->dev, "Recovery failed: %d\n", err); } static void health_check(struct timer_list *t) diff --git a/drivers/cdx/cdx.c b/drivers/cdx/cdx.c index 07371cb653d35..4af1901c9d524 100644 --- a/drivers/cdx/cdx.c +++ b/drivers/cdx/cdx.c @@ -470,8 +470,12 @@ static ssize_t driver_override_show(struct device *dev, struct device_attribute *attr, char *buf) { struct cdx_device *cdx_dev = to_cdx_device(dev); + ssize_t len; - return sysfs_emit(buf, "%s\n", cdx_dev->driver_override); + device_lock(dev); + len = sysfs_emit(buf, "%s\n", cdx_dev->driver_override); + device_unlock(dev); + return len; } static DEVICE_ATTR_RW(driver_override); diff --git a/drivers/char/misc.c b/drivers/char/misc.c index 2cf595d2e10b8..f7dd455dd0dd3 100644 --- a/drivers/char/misc.c +++ b/drivers/char/misc.c @@ -264,8 +264,8 @@ int misc_register(struct miscdevice *misc) device_create_with_groups(&misc_class, misc->parent, dev, misc, misc->groups, "%s", misc->name); if (IS_ERR(misc->this_device)) { + misc_minor_free(misc->minor); if (is_dynamic) { - misc_minor_free(misc->minor); misc->minor = MISC_DYNAMIC_MINOR; } err = PTR_ERR(misc->this_device); diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index 7df7abaf3e526..e25daf2396d37 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -168,6 +168,11 @@ int tpm_try_get_ops(struct tpm_chip *chip) goto out_ops; mutex_lock(&chip->tpm_mutex); + + /* tmp_chip_start may issue IO that is denied while suspended */ + if (chip->flags & TPM_CHIP_FLAG_SUSPENDED) + goto out_lock; + rc = tpm_chip_start(chip); if (rc) goto out_lock; @@ -300,6 +305,7 @@ int tpm_class_shutdown(struct device *dev) down_write(&chip->ops_sem); if (chip->flags & TPM_CHIP_FLAG_TPM2) { if (!tpm_chip_start(chip)) { + tpm2_end_auth_session(chip); tpm2_shutdown(chip, TPM2_SU_CLEAR); tpm_chip_stop(chip); } diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c index b1daa0d7b341b..f62f7871edbdb 100644 --- a/drivers/char/tpm/tpm-interface.c +++ b/drivers/char/tpm/tpm-interface.c @@ -445,18 +445,11 @@ int tpm_get_random(struct tpm_chip *chip, u8 *out, size_t max) if (!chip) return -ENODEV; - /* Give back zero bytes, as TPM chip has not yet fully resumed: */ - if (chip->flags & TPM_CHIP_FLAG_SUSPENDED) { - rc = 0; - goto out; - } - if (chip->flags & TPM_CHIP_FLAG_TPM2) rc = tpm2_get_random(chip, out, max); else rc = tpm1_get_random(chip, out, max); -out: tpm_put_ops(chip); return rc; } diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index fdef214b9f6bf..ed0d3d8449b30 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -114,11 +114,10 @@ static int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask, return 0; /* process status changes without irq support */ do { + usleep_range(priv->timeout_min, priv->timeout_max); status = chip->ops->status(chip); if ((status & mask) == mask) return 0; - usleep_range(priv->timeout_min, - priv->timeout_max); } while (time_before(jiffies, stop)); return -ETIME; } @@ -464,7 +463,10 @@ static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len) if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c, &priv->int_queue, false) < 0) { - rc = -ETIME; + if (test_bit(TPM_TIS_STATUS_VALID_RETRY, &priv->flags)) + rc = -EAGAIN; + else + rc = -ETIME; goto out_err; } status = tpm_tis_status(chip); @@ -481,7 +483,10 @@ static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len) if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c, &priv->int_queue, false) < 0) { - rc = -ETIME; + if (test_bit(TPM_TIS_STATUS_VALID_RETRY, &priv->flags)) + rc = -EAGAIN; + else + rc = -ETIME; goto out_err; } status = tpm_tis_status(chip); @@ -546,9 +551,11 @@ static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len) if (rc >= 0) /* Data transfer done successfully */ break; - else if (rc != -EIO) + else if (rc != -EAGAIN && rc != -EIO) /* Data transfer failed, not recoverable */ return rc; + + usleep_range(priv->timeout_min, priv->timeout_max); } /* go and do it */ @@ -1144,6 +1151,9 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, priv->timeout_max = TIS_TIMEOUT_MAX_ATML; } + if (priv->manufacturer_id == TPM_VID_IFX) + set_bit(TPM_TIS_STATUS_VALID_RETRY, &priv->flags); + if (is_bsw()) { priv->ilb_base_addr = ioremap(INTEL_LEGACY_BLK_BASE_ADDR, ILB_REMAP_SIZE); diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h index 690ad8e9b7319..970d02c337c7f 100644 --- a/drivers/char/tpm/tpm_tis_core.h +++ b/drivers/char/tpm/tpm_tis_core.h @@ -89,6 +89,7 @@ enum tpm_tis_flags { TPM_TIS_INVALID_STATUS = 1, TPM_TIS_DEFAULT_CANCELLATION = 2, TPM_TIS_IRQ_TESTED = 3, + TPM_TIS_STATUS_VALID_RETRY = 4, }; struct tpm_tis_data { diff --git a/drivers/clk/imx/clk-imx8mp-audiomix.c b/drivers/clk/imx/clk-imx8mp-audiomix.c index c409fc7e06186..775f62dddb11d 100644 --- a/drivers/clk/imx/clk-imx8mp-audiomix.c +++ b/drivers/clk/imx/clk-imx8mp-audiomix.c @@ -180,14 +180,14 @@ static struct clk_imx8mp_audiomix_sel sels[] = { CLK_GATE("asrc", ASRC_IPG), CLK_GATE("pdm", PDM_IPG), CLK_GATE("earc", EARC_IPG), - CLK_GATE("ocrama", OCRAMA_IPG), + CLK_GATE_PARENT("ocrama", OCRAMA_IPG, "axi"), CLK_GATE("aud2htx", AUD2HTX_IPG), CLK_GATE_PARENT("earc_phy", EARC_PHY, "sai_pll_out_div2"), CLK_GATE("sdma2", SDMA2_ROOT), CLK_GATE("sdma3", SDMA3_ROOT), CLK_GATE("spba2", SPBA2_ROOT), - CLK_GATE("dsp", DSP_ROOT), - CLK_GATE("dspdbg", DSPDBG_ROOT), + CLK_GATE_PARENT("dsp", DSP_ROOT, "axi"), + CLK_GATE_PARENT("dspdbg", DSPDBG_ROOT, "axi"), CLK_GATE("edma", EDMA_ROOT), CLK_GATE_PARENT("audpll", AUDPLL_ROOT, "osc_24m"), CLK_GATE("mu2", MU2_ROOT), diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c index 02dda57105b10..4f92b83965d5a 100644 --- a/drivers/clk/meson/g12a.c +++ b/drivers/clk/meson/g12a.c @@ -1139,8 +1139,18 @@ static struct clk_regmap g12a_cpu_clk_div16_en = { .hw.init = &(struct clk_init_data) { .name = "cpu_clk_div16_en", .ops = &clk_regmap_gate_ro_ops, - .parent_hws = (const struct clk_hw *[]) { - &g12a_cpu_clk.hw + .parent_data = &(const struct clk_parent_data) { + /* + * Note: + * G12A and G12B have different cpu clocks (with + * different struct clk_hw). We fallback to the global + * naming string mechanism so this clock picks + * up the appropriate one. Same goes for the other + * clock using cpu cluster A clock output and present + * on both G12 variant. + */ + .name = "cpu_clk", + .index = -1, }, .num_parents = 1, /* @@ -1205,7 +1215,10 @@ static struct clk_regmap g12a_cpu_clk_apb_div = { .hw.init = &(struct clk_init_data){ .name = "cpu_clk_apb_div", .ops = &clk_regmap_divider_ro_ops, - .parent_hws = (const struct clk_hw *[]) { &g12a_cpu_clk.hw }, + .parent_data = &(const struct clk_parent_data) { + .name = "cpu_clk", + .index = -1, + }, .num_parents = 1, }, }; @@ -1239,7 +1252,10 @@ static struct clk_regmap g12a_cpu_clk_atb_div = { .hw.init = &(struct clk_init_data){ .name = "cpu_clk_atb_div", .ops = &clk_regmap_divider_ro_ops, - .parent_hws = (const struct clk_hw *[]) { &g12a_cpu_clk.hw }, + .parent_data = &(const struct clk_parent_data) { + .name = "cpu_clk", + .index = -1, + }, .num_parents = 1, }, }; @@ -1273,7 +1289,10 @@ static struct clk_regmap g12a_cpu_clk_axi_div = { .hw.init = &(struct clk_init_data){ .name = "cpu_clk_axi_div", .ops = &clk_regmap_divider_ro_ops, - .parent_hws = (const struct clk_hw *[]) { &g12a_cpu_clk.hw }, + .parent_data = &(const struct clk_parent_data) { + .name = "cpu_clk", + .index = -1, + }, .num_parents = 1, }, }; @@ -1308,13 +1327,6 @@ static struct clk_regmap g12a_cpu_clk_trace_div = { .name = "cpu_clk_trace_div", .ops = &clk_regmap_divider_ro_ops, .parent_data = &(const struct clk_parent_data) { - /* - * Note: - * G12A and G12B have different cpu_clks (with - * different struct clk_hw). We fallback to the global - * naming string mechanism so cpu_clk_trace_div picks - * up the appropriate one. - */ .name = "cpu_clk", .index = -1, }, @@ -4317,7 +4329,7 @@ static MESON_GATE(g12a_spicc_1, HHI_GCLK_MPEG0, 14); static MESON_GATE(g12a_hiu_reg, HHI_GCLK_MPEG0, 19); static MESON_GATE(g12a_mipi_dsi_phy, HHI_GCLK_MPEG0, 20); static MESON_GATE(g12a_assist_misc, HHI_GCLK_MPEG0, 23); -static MESON_GATE(g12a_emmc_a, HHI_GCLK_MPEG0, 4); +static MESON_GATE(g12a_emmc_a, HHI_GCLK_MPEG0, 24); static MESON_GATE(g12a_emmc_b, HHI_GCLK_MPEG0, 25); static MESON_GATE(g12a_emmc_c, HHI_GCLK_MPEG0, 26); static MESON_GATE(g12a_audio_codec, HHI_GCLK_MPEG0, 28); diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c index f071faad1ebb7..d9529de200ae4 100644 --- a/drivers/clk/meson/gxbb.c +++ b/drivers/clk/meson/gxbb.c @@ -1272,14 +1272,13 @@ static struct clk_regmap gxbb_cts_i958 = { }, }; +/* + * This table skips a clock named 'cts_slow_oscin' in the documentation + * This clock does not exist yet in this controller or the AO one + */ +static u32 gxbb_32k_clk_parents_val_table[] = { 0, 2, 3 }; static const struct clk_parent_data gxbb_32k_clk_parent_data[] = { { .fw_name = "xtal", }, - /* - * FIXME: This clock is provided by the ao clock controller but the - * clock is not yet part of the binding of this controller, so string - * name must be use to set this parent. - */ - { .name = "cts_slow_oscin", .index = -1 }, { .hw = &gxbb_fclk_div3.hw }, { .hw = &gxbb_fclk_div5.hw }, }; @@ -1289,6 +1288,7 @@ static struct clk_regmap gxbb_32k_clk_sel = { .offset = HHI_32K_CLK_CNTL, .mask = 0x3, .shift = 16, + .table = gxbb_32k_clk_parents_val_table, }, .hw.init = &(struct clk_init_data){ .name = "32k_clk_sel", @@ -1312,7 +1312,7 @@ static struct clk_regmap gxbb_32k_clk_div = { &gxbb_32k_clk_sel.hw }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT | CLK_DIVIDER_ROUND_CLOSEST, + .flags = CLK_SET_RATE_PARENT, }, }; diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c index 229480c5b075a..0f10090d4ae68 100644 --- a/drivers/clk/qcom/clk-branch.c +++ b/drivers/clk/qcom/clk-branch.c @@ -28,7 +28,7 @@ static bool clk_branch_in_hwcg_mode(const struct clk_branch *br) static bool clk_branch_check_halt(const struct clk_branch *br, bool enabling) { - bool invert = (br->halt_check == BRANCH_HALT_ENABLE); + bool invert = (br->halt_check & BRANCH_HALT_ENABLE); u32 val; regmap_read(br->clkr.regmap, br->halt_reg, &val); @@ -44,7 +44,7 @@ static bool clk_branch2_check_halt(const struct clk_branch *br, bool enabling) { u32 val; u32 mask; - bool invert = (br->halt_check == BRANCH_HALT_ENABLE); + bool invert = (br->halt_check & BRANCH_HALT_ENABLE); mask = CBCR_NOC_FSM_STATUS; mask |= CBCR_CLK_OFF; diff --git a/drivers/clk/qcom/gcc-msm8953.c b/drivers/clk/qcom/gcc-msm8953.c index 855a61966f3ef..8f29ecc74c50b 100644 --- a/drivers/clk/qcom/gcc-msm8953.c +++ b/drivers/clk/qcom/gcc-msm8953.c @@ -3770,7 +3770,7 @@ static struct clk_branch gcc_venus0_axi_clk = { static struct clk_branch gcc_venus0_core0_vcodec0_clk = { .halt_reg = 0x4c02c, - .halt_check = BRANCH_HALT, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x4c02c, .enable_mask = BIT(0), diff --git a/drivers/clk/qcom/gcc-sm8650.c b/drivers/clk/qcom/gcc-sm8650.c index 9dd5c48f33bed..fa1672c4e7d81 100644 --- a/drivers/clk/qcom/gcc-sm8650.c +++ b/drivers/clk/qcom/gcc-sm8650.c @@ -3497,7 +3497,7 @@ static struct gdsc usb30_prim_gdsc = { .pd = { .name = "usb30_prim_gdsc", }, - .pwrsts = PWRSTS_OFF_ON, + .pwrsts = PWRSTS_RET_ON, .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE, }; @@ -3506,7 +3506,7 @@ static struct gdsc usb3_phy_gdsc = { .pd = { .name = "usb3_phy_gdsc", }, - .pwrsts = PWRSTS_OFF_ON, + .pwrsts = PWRSTS_RET_ON, .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE, }; diff --git a/drivers/clk/qcom/gcc-x1e80100.c b/drivers/clk/qcom/gcc-x1e80100.c index 7288af845434d..009f39139b644 100644 --- a/drivers/clk/qcom/gcc-x1e80100.c +++ b/drivers/clk/qcom/gcc-x1e80100.c @@ -2564,19 +2564,6 @@ static struct clk_branch gcc_disp_hf_axi_clk = { }, }; -static struct clk_branch gcc_disp_xo_clk = { - .halt_reg = 0x27018, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x27018, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "gcc_disp_xo_clk", - .ops = &clk_branch2_ops, - }, - }, -}; - static struct clk_branch gcc_gp1_clk = { .halt_reg = 0x64000, .halt_check = BRANCH_HALT, @@ -2631,21 +2618,6 @@ static struct clk_branch gcc_gp3_clk = { }, }; -static struct clk_branch gcc_gpu_cfg_ahb_clk = { - .halt_reg = 0x71004, - .halt_check = BRANCH_HALT_VOTED, - .hwcg_reg = 0x71004, - .hwcg_bit = 1, - .clkr = { - .enable_reg = 0x71004, - .enable_mask = BIT(0), - .hw.init = &(const struct clk_init_data) { - .name = "gcc_gpu_cfg_ahb_clk", - .ops = &clk_branch2_ops, - }, - }, -}; - static struct clk_branch gcc_gpu_gpll0_cph_clk_src = { .halt_check = BRANCH_HALT_DELAY, .clkr = { @@ -6268,7 +6240,6 @@ static struct clk_regmap *gcc_x1e80100_clocks[] = { [GCC_CNOC_PCIE_TUNNEL_CLK] = &gcc_cnoc_pcie_tunnel_clk.clkr, [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr, [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr, - [GCC_DISP_XO_CLK] = &gcc_disp_xo_clk.clkr, [GCC_GP1_CLK] = &gcc_gp1_clk.clkr, [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr, [GCC_GP2_CLK] = &gcc_gp2_clk.clkr, @@ -6281,7 +6252,6 @@ static struct clk_regmap *gcc_x1e80100_clocks[] = { [GCC_GPLL7] = &gcc_gpll7.clkr, [GCC_GPLL8] = &gcc_gpll8.clkr, [GCC_GPLL9] = &gcc_gpll9.clkr, - [GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr, [GCC_GPU_GPLL0_CPH_CLK_SRC] = &gcc_gpu_gpll0_cph_clk_src.clkr, [GCC_GPU_GPLL0_DIV_CPH_CLK_SRC] = &gcc_gpu_gpll0_div_cph_clk_src.clkr, [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr, diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c index fa5fe4c2a2ee7..208fc430ec98f 100644 --- a/drivers/clk/qcom/gdsc.c +++ b/drivers/clk/qcom/gdsc.c @@ -292,6 +292,9 @@ static int gdsc_enable(struct generic_pm_domain *domain) */ udelay(1); + if (sc->flags & RETAIN_FF_ENABLE) + gdsc_retain_ff_on(sc); + /* Turn on HW trigger mode if supported */ if (sc->flags & HW_CTRL) { ret = gdsc_hwctrl(sc, true); @@ -308,9 +311,6 @@ static int gdsc_enable(struct generic_pm_domain *domain) udelay(1); } - if (sc->flags & RETAIN_FF_ENABLE) - gdsc_retain_ff_on(sc); - return 0; } @@ -457,13 +457,6 @@ static int gdsc_init(struct gdsc *sc) goto err_disable_supply; } - /* Turn on HW trigger mode if supported */ - if (sc->flags & HW_CTRL) { - ret = gdsc_hwctrl(sc, true); - if (ret < 0) - goto err_disable_supply; - } - /* * Make sure the retain bit is set if the GDSC is already on, * otherwise we end up turning off the GDSC and destroying all @@ -471,6 +464,14 @@ static int gdsc_init(struct gdsc *sc) */ if (sc->flags & RETAIN_FF_ENABLE) gdsc_retain_ff_on(sc); + + /* Turn on HW trigger mode if supported */ + if (sc->flags & HW_CTRL) { + ret = gdsc_hwctrl(sc, true); + if (ret < 0) + goto err_disable_supply; + } + } else if (sc->flags & ALWAYS_ON) { /* If ALWAYS_ON GDSCs are not ON, turn them ON */ gdsc_enable(&sc->pd); @@ -506,6 +507,23 @@ static int gdsc_init(struct gdsc *sc) return ret; } +static void gdsc_pm_subdomain_remove(struct gdsc_desc *desc, size_t num) +{ + struct device *dev = desc->dev; + struct gdsc **scs = desc->scs; + int i; + + /* Remove subdomains */ + for (i = num - 1; i >= 0; i--) { + if (!scs[i]) + continue; + if (scs[i]->parent) + pm_genpd_remove_subdomain(scs[i]->parent, &scs[i]->pd); + else if (!IS_ERR_OR_NULL(dev->pm_domain)) + pm_genpd_remove_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd); + } +} + int gdsc_register(struct gdsc_desc *desc, struct reset_controller_dev *rcdev, struct regmap *regmap) { @@ -555,30 +573,27 @@ int gdsc_register(struct gdsc_desc *desc, if (!scs[i]) continue; if (scs[i]->parent) - pm_genpd_add_subdomain(scs[i]->parent, &scs[i]->pd); + ret = pm_genpd_add_subdomain(scs[i]->parent, &scs[i]->pd); else if (!IS_ERR_OR_NULL(dev->pm_domain)) - pm_genpd_add_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd); + ret = pm_genpd_add_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd); + if (ret) + goto err_pm_subdomain_remove; } return of_genpd_add_provider_onecell(dev->of_node, data); + +err_pm_subdomain_remove: + gdsc_pm_subdomain_remove(desc, i); + + return ret; } void gdsc_unregister(struct gdsc_desc *desc) { - int i; struct device *dev = desc->dev; - struct gdsc **scs = desc->scs; size_t num = desc->num; - /* Remove subdomains */ - for (i = 0; i < num; i++) { - if (!scs[i]) - continue; - if (scs[i]->parent) - pm_genpd_remove_subdomain(scs[i]->parent, &scs[i]->pd); - else if (!IS_ERR_OR_NULL(dev->pm_domain)) - pm_genpd_remove_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd); - } + gdsc_pm_subdomain_remove(desc, num); of_genpd_del_provider(dev->of_node); } diff --git a/drivers/clk/qcom/mmcc-sdm660.c b/drivers/clk/qcom/mmcc-sdm660.c index 98ba5b4518fb3..b9f02d91004e8 100644 --- a/drivers/clk/qcom/mmcc-sdm660.c +++ b/drivers/clk/qcom/mmcc-sdm660.c @@ -2544,7 +2544,7 @@ static struct clk_branch video_core_clk = { static struct clk_branch video_subcore0_clk = { .halt_reg = 0x1048, - .halt_check = BRANCH_HALT, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x1048, .enable_mask = BIT(0), diff --git a/drivers/clk/renesas/r9a07g043-cpg.c b/drivers/clk/renesas/r9a07g043-cpg.c index c3c2b0c439833..fce2eecfa8c03 100644 --- a/drivers/clk/renesas/r9a07g043-cpg.c +++ b/drivers/clk/renesas/r9a07g043-cpg.c @@ -89,7 +89,9 @@ static const struct clk_div_table dtable_1_32[] = { /* Mux clock tables */ static const char * const sel_pll3_3[] = { ".pll3_533", ".pll3_400" }; +#ifdef CONFIG_ARM64 static const char * const sel_pll6_2[] = { ".pll6_250", ".pll5_250" }; +#endif static const char * const sel_sdhi[] = { ".clk_533", ".clk_400", ".clk_266" }; static const u32 mtable_sdhi[] = { 1, 2, 3 }; @@ -137,7 +139,12 @@ static const struct cpg_core_clk r9a07g043_core_clks[] __initconst = { DEF_DIV("P2", R9A07G043_CLK_P2, CLK_PLL3_DIV2_4_2, DIVPL3A, dtable_1_32), DEF_FIXED("M0", R9A07G043_CLK_M0, CLK_PLL3_DIV2_4, 1, 1), DEF_FIXED("ZT", R9A07G043_CLK_ZT, CLK_PLL3_DIV2_4_2, 1, 1), +#ifdef CONFIG_ARM64 DEF_MUX("HP", R9A07G043_CLK_HP, SEL_PLL6_2, sel_pll6_2), +#endif +#ifdef CONFIG_RISCV + DEF_FIXED("HP", R9A07G043_CLK_HP, CLK_PLL6_250, 1, 1), +#endif DEF_FIXED("SPI0", R9A07G043_CLK_SPI0, CLK_DIV_PLL3_C, 1, 2), DEF_FIXED("SPI1", R9A07G043_CLK_SPI1, CLK_DIV_PLL3_C, 1, 4), DEF_SD_MUX("SD0", R9A07G043_CLK_SD0, SEL_SDHI0, SEL_SDHI0_STS, sel_sdhi, diff --git a/drivers/clk/renesas/r9a08g045-cpg.c b/drivers/clk/renesas/r9a08g045-cpg.c index 1ce40fb51f13b..a1f961d5b8569 100644 --- a/drivers/clk/renesas/r9a08g045-cpg.c +++ b/drivers/clk/renesas/r9a08g045-cpg.c @@ -50,7 +50,7 @@ #define G3S_SEL_SDHI2 SEL_PLL_PACK(G3S_CPG_SDHI_DSEL, 8, 2) /* PLL 1/4/6 configuration registers macro. */ -#define G3S_PLL146_CONF(clk1, clk2) ((clk1) << 22 | (clk2) << 12) +#define G3S_PLL146_CONF(clk1, clk2, setting) ((clk1) << 22 | (clk2) << 12 | (setting)) #define DEF_G3S_MUX(_name, _id, _conf, _parent_names, _mux_flags, _clk_flags) \ DEF_TYPE(_name, _id, CLK_TYPE_MUX, .conf = (_conf), \ @@ -133,7 +133,8 @@ static const struct cpg_core_clk r9a08g045_core_clks[] __initconst = { /* Internal Core Clocks */ DEF_FIXED(".osc_div1000", CLK_OSC_DIV1000, CLK_EXTAL, 1, 1000), - DEF_G3S_PLL(".pll1", CLK_PLL1, CLK_EXTAL, G3S_PLL146_CONF(0x4, 0x8)), + DEF_G3S_PLL(".pll1", CLK_PLL1, CLK_EXTAL, G3S_PLL146_CONF(0x4, 0x8, 0x100), + 1100000000UL), DEF_FIXED(".pll2", CLK_PLL2, CLK_EXTAL, 200, 3), DEF_FIXED(".pll3", CLK_PLL3, CLK_EXTAL, 200, 3), DEF_FIXED(".pll4", CLK_PLL4, CLK_EXTAL, 100, 3), diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c index b43b763dfe186..229f4540b219e 100644 --- a/drivers/clk/renesas/rzg2l-cpg.c +++ b/drivers/clk/renesas/rzg2l-cpg.c @@ -51,6 +51,7 @@ #define RZG3S_DIV_M GENMASK(25, 22) #define RZG3S_DIV_NI GENMASK(21, 13) #define RZG3S_DIV_NF GENMASK(12, 1) +#define RZG3S_SEL_PLL BIT(0) #define CLK_ON_R(reg) (reg) #define CLK_MON_R(reg) (0x180 + (reg)) @@ -60,6 +61,7 @@ #define GET_REG_OFFSET(val) ((val >> 20) & 0xfff) #define GET_REG_SAMPLL_CLK1(val) ((val >> 22) & 0xfff) #define GET_REG_SAMPLL_CLK2(val) ((val >> 12) & 0xfff) +#define GET_REG_SAMPLL_SETTING(val) ((val) & 0xfff) #define CPG_WEN_BIT BIT(16) @@ -943,6 +945,7 @@ rzg2l_cpg_sipll5_register(const struct cpg_core_clk *core, struct pll_clk { struct clk_hw hw; + unsigned long default_rate; unsigned int conf; unsigned int type; void __iomem *base; @@ -980,12 +983,19 @@ static unsigned long rzg3s_cpg_pll_clk_recalc_rate(struct clk_hw *hw, { struct pll_clk *pll_clk = to_pll(hw); struct rzg2l_cpg_priv *priv = pll_clk->priv; - u32 nir, nfr, mr, pr, val; + u32 nir, nfr, mr, pr, val, setting; u64 rate; if (pll_clk->type != CLK_TYPE_G3S_PLL) return parent_rate; + setting = GET_REG_SAMPLL_SETTING(pll_clk->conf); + if (setting) { + val = readl(priv->base + setting); + if (val & RZG3S_SEL_PLL) + return pll_clk->default_rate; + } + val = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf)); pr = 1 << FIELD_GET(RZG3S_DIV_P, val); @@ -1038,6 +1048,7 @@ rzg2l_cpg_pll_clk_register(const struct cpg_core_clk *core, pll_clk->base = priv->base; pll_clk->priv = priv; pll_clk->type = core->type; + pll_clk->default_rate = core->default_rate; ret = devm_clk_hw_register(dev, &pll_clk->hw); if (ret) diff --git a/drivers/clk/renesas/rzg2l-cpg.h b/drivers/clk/renesas/rzg2l-cpg.h index ecfe7e7ea8a17..019efe00ffd9f 100644 --- a/drivers/clk/renesas/rzg2l-cpg.h +++ b/drivers/clk/renesas/rzg2l-cpg.h @@ -102,7 +102,10 @@ struct cpg_core_clk { const struct clk_div_table *dtable; const u32 *mtable; const unsigned long invalid_rate; - const unsigned long max_rate; + union { + const unsigned long max_rate; + const unsigned long default_rate; + }; const char * const *parent_names; notifier_fn_t notifier; u32 flag; @@ -144,8 +147,9 @@ enum clk_types { DEF_TYPE(_name, _id, _type, .parent = _parent) #define DEF_SAMPLL(_name, _id, _parent, _conf) \ DEF_TYPE(_name, _id, CLK_TYPE_SAM_PLL, .parent = _parent, .conf = _conf) -#define DEF_G3S_PLL(_name, _id, _parent, _conf) \ - DEF_TYPE(_name, _id, CLK_TYPE_G3S_PLL, .parent = _parent, .conf = _conf) +#define DEF_G3S_PLL(_name, _id, _parent, _conf, _default_rate) \ + DEF_TYPE(_name, _id, CLK_TYPE_G3S_PLL, .parent = _parent, .conf = _conf, \ + .default_rate = _default_rate) #define DEF_INPUT(_name, _id) \ DEF_TYPE(_name, _id, CLK_TYPE_IN) #define DEF_FIXED(_name, _id, _parent, _mult, _div) \ diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c index 3bb87b27b662d..cf60fcf2fa5cd 100644 --- a/drivers/clk/rockchip/clk-rk3328.c +++ b/drivers/clk/rockchip/clk-rk3328.c @@ -201,7 +201,7 @@ PNAME(mux_aclk_peri_pre_p) = { "cpll_peri", "gpll_peri", "hdmiphy_peri" }; PNAME(mux_ref_usb3otg_src_p) = { "xin24m", - "clk_usb3otg_ref" }; + "clk_ref_usb3otg_src" }; PNAME(mux_xin24m_32k_p) = { "xin24m", "clk_rtc32k" }; PNAME(mux_mac2io_src_p) = { "clk_mac2io_src", diff --git a/drivers/clk/samsung/clk-gs101.c b/drivers/clk/samsung/clk-gs101.c index 85098c61c15e6..4d4363bc8b28d 100644 --- a/drivers/clk/samsung/clk-gs101.c +++ b/drivers/clk/samsung/clk-gs101.c @@ -382,17 +382,9 @@ static const unsigned long cmu_top_clk_regs[] __initconst = { EARLY_WAKEUP_DPU_DEST, EARLY_WAKEUP_CSIS_DEST, EARLY_WAKEUP_SW_TRIG_APM, - EARLY_WAKEUP_SW_TRIG_APM_SET, - EARLY_WAKEUP_SW_TRIG_APM_CLEAR, EARLY_WAKEUP_SW_TRIG_CLUSTER0, - EARLY_WAKEUP_SW_TRIG_CLUSTER0_SET, - EARLY_WAKEUP_SW_TRIG_CLUSTER0_CLEAR, EARLY_WAKEUP_SW_TRIG_DPU, - EARLY_WAKEUP_SW_TRIG_DPU_SET, - EARLY_WAKEUP_SW_TRIG_DPU_CLEAR, EARLY_WAKEUP_SW_TRIG_CSIS, - EARLY_WAKEUP_SW_TRIG_CSIS_SET, - EARLY_WAKEUP_SW_TRIG_CSIS_CLEAR, CLK_CON_MUX_MUX_CLKCMU_BO_BUS, CLK_CON_MUX_MUX_CLKCMU_BUS0_BUS, CLK_CON_MUX_MUX_CLKCMU_BUS1_BUS, diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c index cca3e630922c1..68a72f5fd9a5a 100644 --- a/drivers/clk/samsung/clk-pll.c +++ b/drivers/clk/samsung/clk-pll.c @@ -206,6 +206,7 @@ static const struct clk_ops samsung_pll3000_clk_ops = { */ /* Maximum lock time can be 270 * PDIV cycles */ #define PLL35XX_LOCK_FACTOR (270) +#define PLL142XX_LOCK_FACTOR (150) #define PLL35XX_MDIV_MASK (0x3FF) #define PLL35XX_PDIV_MASK (0x3F) @@ -272,7 +273,11 @@ static int samsung_pll35xx_set_rate(struct clk_hw *hw, unsigned long drate, } /* Set PLL lock time. */ - writel_relaxed(rate->pdiv * PLL35XX_LOCK_FACTOR, + if (pll->type == pll_142xx) + writel_relaxed(rate->pdiv * PLL142XX_LOCK_FACTOR, + pll->lock_reg); + else + writel_relaxed(rate->pdiv * PLL35XX_LOCK_FACTOR, pll->lock_reg); /* Change PLL PMS values */ diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c index afa5760ed3a11..e6533513f8ac2 100644 --- a/drivers/clk/samsung/clk.c +++ b/drivers/clk/samsung/clk.c @@ -74,12 +74,12 @@ struct samsung_clk_provider * __init samsung_clk_init(struct device *dev, if (!ctx) panic("could not allocate clock provider context.\n"); + ctx->clk_data.num = nr_clks; for (i = 0; i < nr_clks; ++i) ctx->clk_data.hws[i] = ERR_PTR(-ENOENT); ctx->dev = dev; ctx->reg_base = base; - ctx->clk_data.num = nr_clks; spin_lock_init(&ctx->lock); return ctx; diff --git a/drivers/clocksource/jcore-pit.c b/drivers/clocksource/jcore-pit.c index a3fe98cd38382..82815428f8f92 100644 --- a/drivers/clocksource/jcore-pit.c +++ b/drivers/clocksource/jcore-pit.c @@ -114,6 +114,18 @@ static int jcore_pit_local_init(unsigned cpu) pit->periodic_delta = DIV_ROUND_CLOSEST(NSEC_PER_SEC, HZ * buspd); clockevents_config_and_register(&pit->ced, freq, 1, ULONG_MAX); + enable_percpu_irq(pit->ced.irq, IRQ_TYPE_NONE); + + return 0; +} + +static int jcore_pit_local_teardown(unsigned cpu) +{ + struct jcore_pit *pit = this_cpu_ptr(jcore_pit_percpu); + + pr_info("Local J-Core PIT teardown on cpu %u\n", cpu); + + disable_percpu_irq(pit->ced.irq); return 0; } @@ -168,6 +180,7 @@ static int __init jcore_pit_init(struct device_node *node) return -ENOMEM; } + irq_set_percpu_devid(pit_irq); err = request_percpu_irq(pit_irq, jcore_timer_interrupt, "jcore_pit", jcore_pit_percpu); if (err) { @@ -237,7 +250,7 @@ static int __init jcore_pit_init(struct device_node *node) cpuhp_setup_state(CPUHP_AP_JCORE_TIMER_STARTING, "clockevents/jcore:starting", - jcore_pit_local_init, NULL); + jcore_pit_local_init, jcore_pit_local_teardown); return 0; } diff --git a/drivers/clocksource/timer-stm32-lp.c b/drivers/clocksource/timer-stm32-lp.c index a4c95161cb22c..193e4f643358b 100644 --- a/drivers/clocksource/timer-stm32-lp.c +++ b/drivers/clocksource/timer-stm32-lp.c @@ -168,9 +168,7 @@ static int stm32_clkevent_lp_probe(struct platform_device *pdev) } if (of_property_read_bool(pdev->dev.parent->of_node, "wakeup-source")) { - ret = device_init_wakeup(&pdev->dev, true); - if (ret) - goto out_clk_disable; + device_set_wakeup_capable(&pdev->dev, true); ret = dev_pm_set_wake_irq(&pdev->dev, irq); if (ret) diff --git a/drivers/counter/microchip-tcb-capture.c b/drivers/counter/microchip-tcb-capture.c index b3e615cbd2caa..461f57f66631c 100644 --- a/drivers/counter/microchip-tcb-capture.c +++ b/drivers/counter/microchip-tcb-capture.c @@ -368,6 +368,25 @@ static int mchp_tc_probe(struct platform_device *pdev) channel); } + /* Disable Quadrature Decoder and position measure */ + ret = regmap_update_bits(regmap, ATMEL_TC_BMR, ATMEL_TC_QDEN | ATMEL_TC_POSEN, 0); + if (ret) + return ret; + + /* Setup the period capture mode */ + ret = regmap_update_bits(regmap, ATMEL_TC_REG(priv->channel[0], CMR), + ATMEL_TC_WAVE | ATMEL_TC_ABETRG | ATMEL_TC_CMR_MASK | + ATMEL_TC_TCCLKS, + ATMEL_TC_CMR_MASK); + if (ret) + return ret; + + /* Enable clock and trigger counter */ + ret = regmap_write(regmap, ATMEL_TC_REG(priv->channel[0], CCR), + ATMEL_TC_CLKEN | ATMEL_TC_SWTRG); + if (ret) + return ret; + priv->tc_cfg = tcb_config; priv->regmap = regmap; counter->name = dev_name(&pdev->dev); diff --git a/drivers/counter/stm32-lptimer-cnt.c b/drivers/counter/stm32-lptimer-cnt.c index 8439755559b21..537fe9b669f35 100644 --- a/drivers/counter/stm32-lptimer-cnt.c +++ b/drivers/counter/stm32-lptimer-cnt.c @@ -58,37 +58,43 @@ static int stm32_lptim_set_enable_state(struct stm32_lptim_cnt *priv, return 0; } + ret = clk_enable(priv->clk); + if (ret) + goto disable_cnt; + /* LP timer must be enabled before writing CMP & ARR */ ret = regmap_write(priv->regmap, STM32_LPTIM_ARR, priv->ceiling); if (ret) - return ret; + goto disable_clk; ret = regmap_write(priv->regmap, STM32_LPTIM_CMP, 0); if (ret) - return ret; + goto disable_clk; /* ensure CMP & ARR registers are properly written */ ret = regmap_read_poll_timeout(priv->regmap, STM32_LPTIM_ISR, val, (val & STM32_LPTIM_CMPOK_ARROK) == STM32_LPTIM_CMPOK_ARROK, 100, 1000); if (ret) - return ret; + goto disable_clk; ret = regmap_write(priv->regmap, STM32_LPTIM_ICR, STM32_LPTIM_CMPOKCF_ARROKCF); if (ret) - return ret; + goto disable_clk; - ret = clk_enable(priv->clk); - if (ret) { - regmap_write(priv->regmap, STM32_LPTIM_CR, 0); - return ret; - } priv->enabled = true; /* Start LP timer in continuous mode */ return regmap_update_bits(priv->regmap, STM32_LPTIM_CR, STM32_LPTIM_CNTSTRT, STM32_LPTIM_CNTSTRT); + +disable_clk: + clk_disable(priv->clk); +disable_cnt: + regmap_write(priv->regmap, STM32_LPTIM_CR, 0); + + return ret; } static int stm32_lptim_setup(struct stm32_lptim_cnt *priv, int enable) diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 5f7e13e60c802..e67b2326671c9 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -245,7 +245,7 @@ config ARM_TEGRA186_CPUFREQ config ARM_TEGRA194_CPUFREQ tristate "Tegra194 CPUFreq support" - depends on ARCH_TEGRA_194_SOC || (64BIT && COMPILE_TEST) + depends on ARCH_TEGRA_194_SOC || ARCH_TEGRA_234_SOC || (64BIT && COMPILE_TEST) depends on TEGRA_BPMP default y help diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index f98c9438760c9..67b4e3d18ffe2 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -2748,10 +2748,18 @@ EXPORT_SYMBOL(cpufreq_update_policy); */ void cpufreq_update_limits(unsigned int cpu) { + struct cpufreq_policy *policy; + + policy = cpufreq_cpu_get(cpu); + if (!policy) + return; + if (cpufreq_driver->update_limits) cpufreq_driver->update_limits(cpu); else cpufreq_update_policy(cpu); + + cpufreq_cpu_put(policy); } EXPORT_SYMBOL_GPL(cpufreq_update_limits); diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index af44ee6a64304..1a7fcaf39cc9b 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -145,7 +145,23 @@ unsigned int dbs_update(struct cpufreq_policy *policy) time_elapsed = update_time - j_cdbs->prev_update_time; j_cdbs->prev_update_time = update_time; - idle_time = cur_idle_time - j_cdbs->prev_cpu_idle; + /* + * cur_idle_time could be smaller than j_cdbs->prev_cpu_idle if + * it's obtained from get_cpu_idle_time_jiffy() when NOHZ is + * off, where idle_time is calculated by the difference between + * time elapsed in jiffies and "busy time" obtained from CPU + * statistics. If a CPU is 100% busy, the time elapsed and busy + * time should grow with the same amount in two consecutive + * samples, but in practice there could be a tiny difference, + * making the accumulated idle time decrease sometimes. Hence, + * in this case, idle_time should be regarded as 0 in order to + * make the further process correct. + */ + if (cur_idle_time > j_cdbs->prev_cpu_idle) + idle_time = cur_idle_time - j_cdbs->prev_cpu_idle; + else + idle_time = 0; + j_cdbs->prev_cpu_idle = cur_idle_time; if (ignore_nice) { @@ -162,7 +178,7 @@ unsigned int dbs_update(struct cpufreq_policy *policy) * calls, so the previous load value can be used then. */ load = j_cdbs->prev_load; - } else if (unlikely((int)idle_time > 2 * sampling_rate && + } else if (unlikely(idle_time > 2 * sampling_rate && j_cdbs->prev_load)) { /* * If the CPU had gone completely idle and a task has @@ -189,30 +205,15 @@ unsigned int dbs_update(struct cpufreq_policy *policy) load = j_cdbs->prev_load; j_cdbs->prev_load = 0; } else { - if (time_elapsed >= idle_time) { + if (time_elapsed > idle_time) load = 100 * (time_elapsed - idle_time) / time_elapsed; - } else { - /* - * That can happen if idle_time is returned by - * get_cpu_idle_time_jiffy(). In that case - * idle_time is roughly equal to the difference - * between time_elapsed and "busy time" obtained - * from CPU statistics. Then, the "busy time" - * can end up being greater than time_elapsed - * (for example, if jiffies_64 and the CPU - * statistics are updated by different CPUs), - * so idle_time may in fact be negative. That - * means, though, that the CPU was busy all - * the time (on the rough average) during the - * last sampling interval and 100 can be - * returned as the load. - */ - load = (int)idle_time < 0 ? 100 : 0; - } + else + load = 0; + j_cdbs->prev_load = load; } - if (unlikely((int)idle_time > 2 * sampling_rate)) { + if (unlikely(idle_time > 2 * sampling_rate)) { unsigned int periods = idle_time / sampling_rate; if (periods < idle_periods) diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c index 8d73e6e8be2a5..f2d913a91be9e 100644 --- a/drivers/cpufreq/scpi-cpufreq.c +++ b/drivers/cpufreq/scpi-cpufreq.c @@ -39,8 +39,9 @@ static unsigned int scpi_cpufreq_get_rate(unsigned int cpu) static int scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index) { - u64 rate = policy->freq_table[index].frequency * 1000; + unsigned long freq_khz = policy->freq_table[index].frequency; struct scpi_data *priv = policy->driver_data; + unsigned long rate = freq_khz * 1000; int ret; ret = clk_set_rate(priv->clk, rate); @@ -48,7 +49,7 @@ scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index) if (ret) return ret; - if (clk_get_rate(priv->clk) != rate) + if (clk_get_rate(priv->clk) / 1000 != freq_khz) return -EIO; return 0; diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c index 8ed2bb01a619f..4486063005001 100644 --- a/drivers/crypto/caam/qi.c +++ b/drivers/crypto/caam/qi.c @@ -122,12 +122,12 @@ int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req) qm_fd_addr_set64(&fd, addr); do { + refcount_inc(&req->drv_ctx->refcnt); ret = qman_enqueue(req->drv_ctx->req_fq, &fd); - if (likely(!ret)) { - refcount_inc(&req->drv_ctx->refcnt); + if (likely(!ret)) return 0; - } + refcount_dec(&req->drv_ctx->refcnt); if (ret != -EBUSY) break; num_retries++; diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index 248d98fd8c48d..157f9a9ed6361 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -189,14 +189,17 @@ static bool sp_pci_is_master(struct sp_device *sp) pdev_new = to_pci_dev(dev_new); pdev_cur = to_pci_dev(dev_cur); - if (pdev_new->bus->number < pdev_cur->bus->number) - return true; + if (pci_domain_nr(pdev_new->bus) != pci_domain_nr(pdev_cur->bus)) + return pci_domain_nr(pdev_new->bus) < pci_domain_nr(pdev_cur->bus); - if (PCI_SLOT(pdev_new->devfn) < PCI_SLOT(pdev_cur->devfn)) - return true; + if (pdev_new->bus->number != pdev_cur->bus->number) + return pdev_new->bus->number < pdev_cur->bus->number; - if (PCI_FUNC(pdev_new->devfn) < PCI_FUNC(pdev_cur->devfn)) - return true; + if (PCI_SLOT(pdev_new->devfn) != PCI_SLOT(pdev_cur->devfn)) + return PCI_SLOT(pdev_new->devfn) < PCI_SLOT(pdev_cur->devfn); + + if (PCI_FUNC(pdev_new->devfn) != PCI_FUNC(pdev_cur->devfn)) + return PCI_FUNC(pdev_new->devfn) < PCI_FUNC(pdev_cur->devfn); return false; } diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index 30c2b1a64695c..2fc04e210bc4f 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -37,7 +37,6 @@ struct sec_aead_req { u8 *a_ivin; dma_addr_t a_ivin_dma; struct aead_request *aead_req; - bool fallback; }; /* SEC request of Crypto */ diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index a9b1b9b0b03bf..8605cb3cae92c 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -57,7 +57,6 @@ #define SEC_TYPE_MASK 0x0F #define SEC_DONE_MASK 0x0001 #define SEC_ICV_MASK 0x000E -#define SEC_SQE_LEN_RATE_MASK 0x3 #define SEC_TOTAL_IV_SZ(depth) (SEC_IV_SIZE * (depth)) #define SEC_SGL_SGE_NR 128 @@ -80,16 +79,16 @@ #define SEC_TOTAL_PBUF_SZ(depth) (PAGE_SIZE * SEC_PBUF_PAGE_NUM(depth) + \ SEC_PBUF_LEFT_SZ(depth)) -#define SEC_SQE_LEN_RATE 4 #define SEC_SQE_CFLAG 2 #define SEC_SQE_AEAD_FLAG 3 #define SEC_SQE_DONE 0x1 #define SEC_ICV_ERR 0x2 -#define MIN_MAC_LEN 4 #define MAC_LEN_MASK 0x1U #define MAX_INPUT_DATA_LEN 0xFFFE00 #define BITS_MASK 0xFF +#define WORD_MASK 0x3 #define BYTE_BITS 0x8 +#define BYTES_TO_WORDS(bcount) ((bcount) >> 2) #define SEC_XTS_NAME_SZ 0x3 #define IV_CM_CAL_NUM 2 #define IV_CL_MASK 0x7 @@ -691,14 +690,10 @@ static int sec_skcipher_fbtfm_init(struct crypto_skcipher *tfm) c_ctx->fallback = false; - /* Currently, only XTS mode need fallback tfm when using 192bit key */ - if (likely(strncmp(alg, "xts", SEC_XTS_NAME_SZ))) - return 0; - c_ctx->fbtfm = crypto_alloc_sync_skcipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(c_ctx->fbtfm)) { - pr_err("failed to alloc xts mode fallback tfm!\n"); + pr_err("failed to alloc fallback tfm for %s!\n", alg); return PTR_ERR(c_ctx->fbtfm); } @@ -858,7 +853,7 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, } memcpy(c_ctx->c_key, key, keylen); - if (c_ctx->fallback && c_ctx->fbtfm) { + if (c_ctx->fbtfm) { ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen); if (ret) { dev_err(dev, "failed to set fallback skcipher key!\n"); @@ -1090,11 +1085,6 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx, struct crypto_shash *hash_tfm = ctx->hash_tfm; int blocksize, digestsize, ret; - if (!keys->authkeylen) { - pr_err("hisi_sec2: aead auth key error!\n"); - return -EINVAL; - } - blocksize = crypto_shash_blocksize(hash_tfm); digestsize = crypto_shash_digestsize(hash_tfm); if (keys->authkeylen > blocksize) { @@ -1106,7 +1096,8 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx, } ctx->a_key_len = digestsize; } else { - memcpy(ctx->a_key, keys->authkey, keys->authkeylen); + if (keys->authkeylen) + memcpy(ctx->a_key, keys->authkey, keys->authkeylen); ctx->a_key_len = keys->authkeylen; } @@ -1160,8 +1151,10 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, } ret = crypto_authenc_extractkeys(&keys, key, keylen); - if (ret) + if (ret) { + dev_err(dev, "sec extract aead keys err!\n"); goto bad_key; + } ret = sec_aead_aes_set_key(c_ctx, &keys); if (ret) { @@ -1175,12 +1168,6 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, goto bad_key; } - if (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK) { - ret = -EINVAL; - dev_err(dev, "AUTH key length error!\n"); - goto bad_key; - } - ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen); if (ret) { dev_err(dev, "set sec fallback key err!\n"); @@ -1583,11 +1570,10 @@ static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir, sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma); - sec_sqe->type2.mac_key_alg = cpu_to_le32(authsize / SEC_SQE_LEN_RATE); + sec_sqe->type2.mac_key_alg = cpu_to_le32(BYTES_TO_WORDS(authsize)); sec_sqe->type2.mac_key_alg |= - cpu_to_le32((u32)((ctx->a_key_len) / - SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET); + cpu_to_le32((u32)BYTES_TO_WORDS(ctx->a_key_len) << SEC_AKEY_OFFSET); sec_sqe->type2.mac_key_alg |= cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET); @@ -1639,12 +1625,10 @@ static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir, sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma); sqe3->auth_mac_key |= - cpu_to_le32((u32)(authsize / - SEC_SQE_LEN_RATE) << SEC_MAC_OFFSET_V3); + cpu_to_le32(BYTES_TO_WORDS(authsize) << SEC_MAC_OFFSET_V3); sqe3->auth_mac_key |= - cpu_to_le32((u32)(ctx->a_key_len / - SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET_V3); + cpu_to_le32((u32)BYTES_TO_WORDS(ctx->a_key_len) << SEC_AKEY_OFFSET_V3); sqe3->auth_mac_key |= cpu_to_le32((u32)(ctx->a_alg) << SEC_AUTH_ALG_OFFSET_V3); @@ -2003,8 +1987,7 @@ static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm) return sec_aead_ctx_init(tfm, "sha512"); } -static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx, - struct sec_req *sreq) +static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx, struct sec_req *sreq) { u32 cryptlen = sreq->c_req.sk_req->cryptlen; struct device *dev = ctx->dev; @@ -2026,10 +2009,6 @@ static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx, } break; case SEC_CMODE_CTR: - if (unlikely(ctx->sec->qm.ver < QM_HW_V3)) { - dev_err(dev, "skcipher HW version error!\n"); - ret = -EINVAL; - } break; default: ret = -EINVAL; @@ -2038,17 +2017,21 @@ static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx, return ret; } -static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq) +static int sec_skcipher_param_check(struct sec_ctx *ctx, + struct sec_req *sreq, bool *need_fallback) { struct skcipher_request *sk_req = sreq->c_req.sk_req; struct device *dev = ctx->dev; u8 c_alg = ctx->c_ctx.c_alg; - if (unlikely(!sk_req->src || !sk_req->dst || - sk_req->cryptlen > MAX_INPUT_DATA_LEN)) { + if (unlikely(!sk_req->src || !sk_req->dst)) { dev_err(dev, "skcipher input param error!\n"); return -EINVAL; } + + if (sk_req->cryptlen > MAX_INPUT_DATA_LEN) + *need_fallback = true; + sreq->c_req.c_len = sk_req->cryptlen; if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ) @@ -2106,6 +2089,7 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req); struct sec_req *req = skcipher_request_ctx(sk_req); struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); + bool need_fallback = false; int ret; if (!sk_req->cryptlen) { @@ -2119,11 +2103,11 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt) req->c_req.encrypt = encrypt; req->ctx = ctx; - ret = sec_skcipher_param_check(ctx, req); + ret = sec_skcipher_param_check(ctx, req, &need_fallback); if (unlikely(ret)) return -EINVAL; - if (unlikely(ctx->c_ctx.fallback)) + if (unlikely(ctx->c_ctx.fallback || need_fallback)) return sec_skcipher_soft_crypto(ctx, sk_req, encrypt); return ctx->req_op->process(ctx, req); @@ -2231,52 +2215,35 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq) struct crypto_aead *tfm = crypto_aead_reqtfm(req); size_t sz = crypto_aead_authsize(tfm); u8 c_mode = ctx->c_ctx.c_mode; - struct device *dev = ctx->dev; int ret; - /* Hardware does not handle cases where authsize is less than 4 bytes */ - if (unlikely(sz < MIN_MAC_LEN)) { - sreq->aead_req.fallback = true; + if (unlikely(ctx->sec->qm.ver == QM_HW_V2 && !sreq->c_req.c_len)) return -EINVAL; - } if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN || - req->assoclen > SEC_MAX_AAD_LEN)) { - dev_err(dev, "aead input spec error!\n"); + req->assoclen > SEC_MAX_AAD_LEN)) return -EINVAL; - } if (c_mode == SEC_CMODE_CCM) { - if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN)) { - dev_err_ratelimited(dev, "CCM input aad parameter is too long!\n"); + if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN)) return -EINVAL; - } - ret = aead_iv_demension_check(req); - if (ret) { - dev_err(dev, "aead input iv param error!\n"); - return ret; - } - } - if (sreq->c_req.encrypt) - sreq->c_req.c_len = req->cryptlen; - else - sreq->c_req.c_len = req->cryptlen - sz; - if (c_mode == SEC_CMODE_CBC) { - if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) { - dev_err(dev, "aead crypto length error!\n"); + ret = aead_iv_demension_check(req); + if (unlikely(ret)) + return -EINVAL; + } else if (c_mode == SEC_CMODE_CBC) { + if (unlikely(sz & WORD_MASK)) + return -EINVAL; + if (unlikely(ctx->a_ctx.a_key_len & WORD_MASK)) return -EINVAL; - } } return 0; } -static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) +static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq, bool *need_fallback) { struct aead_request *req = sreq->aead_req.aead_req; - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - size_t authsize = crypto_aead_authsize(tfm); struct device *dev = ctx->dev; u8 c_alg = ctx->c_ctx.c_alg; @@ -2285,12 +2252,10 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) return -EINVAL; } - if (ctx->sec->qm.ver == QM_HW_V2) { - if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt && - req->cryptlen <= authsize))) { - sreq->aead_req.fallback = true; - return -EINVAL; - } + if (unlikely(ctx->c_ctx.c_mode == SEC_CMODE_CBC && + sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) { + dev_err(dev, "aead cbc mode input data length error!\n"); + return -EINVAL; } /* Support AES or SM4 */ @@ -2299,8 +2264,10 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) return -EINVAL; } - if (unlikely(sec_aead_spec_check(ctx, sreq))) + if (unlikely(sec_aead_spec_check(ctx, sreq))) { + *need_fallback = true; return -EINVAL; + } if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <= SEC_PBUF_SZ) @@ -2344,17 +2311,19 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt) struct crypto_aead *tfm = crypto_aead_reqtfm(a_req); struct sec_req *req = aead_request_ctx(a_req); struct sec_ctx *ctx = crypto_aead_ctx(tfm); + size_t sz = crypto_aead_authsize(tfm); + bool need_fallback = false; int ret; req->flag = a_req->base.flags; req->aead_req.aead_req = a_req; req->c_req.encrypt = encrypt; req->ctx = ctx; - req->aead_req.fallback = false; + req->c_req.c_len = a_req->cryptlen - (req->c_req.encrypt ? 0 : sz); - ret = sec_aead_param_check(ctx, req); + ret = sec_aead_param_check(ctx, req, &need_fallback); if (unlikely(ret)) { - if (req->aead_req.fallback) + if (need_fallback) return sec_aead_soft_crypto(ctx, a_req, encrypt); return -EINVAL; } diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c index d2f07e34f3142..e1f60f0f507c9 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_main.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c @@ -1527,7 +1527,7 @@ static int iaa_comp_acompress(struct acomp_req *req) iaa_wq = idxd_wq_get_private(wq); if (!req->dst) { - gfp_t flags = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; + gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; /* incompressible data will always be < 2 * slen */ req->dlen = 2 * req->slen; @@ -1609,7 +1609,7 @@ static int iaa_comp_acompress(struct acomp_req *req) static int iaa_comp_adecompress_alloc_dest(struct acomp_req *req) { - gfp_t flags = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? + gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; struct crypto_tfm *tfm = req->base.tfm; dma_addr_t src_addr, dst_addr; diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c index 9faef33e54bd3..a17adc4beda2e 100644 --- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c @@ -420,6 +420,7 @@ static void adf_gen4_set_err_mask(struct adf_dev_err_mask *dev_err_mask) dev_err_mask->parerr_cpr_xlt_mask = ADF_420XX_PARITYERRORMASK_CPR_XLT_MASK; dev_err_mask->parerr_dcpr_ucs_mask = ADF_420XX_PARITYERRORMASK_DCPR_UCS_MASK; dev_err_mask->parerr_pke_mask = ADF_420XX_PARITYERRORMASK_PKE_MASK; + dev_err_mask->parerr_wat_wcp_mask = ADF_420XX_PARITYERRORMASK_WAT_WCP_MASK; dev_err_mask->ssmfeatren_mask = ADF_420XX_SSMFEATREN_MASK; } diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c index 2dd3772bf58a6..0f7f00a19e7dc 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c @@ -695,7 +695,7 @@ static bool adf_handle_slice_hang_error(struct adf_accel_dev *accel_dev, if (err_mask->parerr_wat_wcp_mask) adf_poll_slicehang_csr(accel_dev, csr, ADF_GEN4_SLICEHANGSTATUS_WAT_WCP, - "ath_cph"); + "wat_wcp"); return false; } @@ -1043,63 +1043,16 @@ static bool adf_handle_ssmcpppar_err(struct adf_accel_dev *accel_dev, return reset_required; } -static bool adf_handle_rf_parr_err(struct adf_accel_dev *accel_dev, +static void adf_handle_rf_parr_err(struct adf_accel_dev *accel_dev, void __iomem *csr, u32 iastatssm) { - struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); - u32 reg; - if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_SSMSOFTERRORPARITY_BIT)) - return false; - - reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_SRC); - reg &= ADF_GEN4_SSMSOFTERRORPARITY_SRC_BIT; - if (reg) { - ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); - ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_SRC, reg); - } - - reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_ATH_CPH); - reg &= err_mask->parerr_ath_cph_mask; - if (reg) { - ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); - ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_ATH_CPH, reg); - } - - reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_CPR_XLT); - reg &= err_mask->parerr_cpr_xlt_mask; - if (reg) { - ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); - ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_CPR_XLT, reg); - } - - reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_DCPR_UCS); - reg &= err_mask->parerr_dcpr_ucs_mask; - if (reg) { - ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); - ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_DCPR_UCS, reg); - } - - reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_PKE); - reg &= err_mask->parerr_pke_mask; - if (reg) { - ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); - ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_PKE, reg); - } - - if (err_mask->parerr_wat_wcp_mask) { - reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_WAT_WCP); - reg &= err_mask->parerr_wat_wcp_mask; - if (reg) { - ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); - ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_WAT_WCP, - reg); - } - } + return; + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); dev_err(&GET_DEV(accel_dev), "Slice ssm soft parity error reported"); - return false; + return; } static bool adf_handle_ser_err_ssmsh(struct adf_accel_dev *accel_dev, @@ -1171,8 +1124,8 @@ static bool adf_handle_iaintstatssm(struct adf_accel_dev *accel_dev, reset_required |= adf_handle_slice_hang_error(accel_dev, csr, iastatssm); reset_required |= adf_handle_spppar_err(accel_dev, csr, iastatssm); reset_required |= adf_handle_ssmcpppar_err(accel_dev, csr, iastatssm); - reset_required |= adf_handle_rf_parr_err(accel_dev, csr, iastatssm); reset_required |= adf_handle_ser_err_ssmsh(accel_dev, csr, iastatssm); + adf_handle_rf_parr_err(accel_dev, csr, iastatssm); ADF_CSR_WR(csr, ADF_GEN4_IAINTSTATSSM, iastatssm); diff --git a/drivers/crypto/nx/nx-common-pseries.c b/drivers/crypto/nx/nx-common-pseries.c index 35f2d0d8507ed..7e98f174f69b9 100644 --- a/drivers/crypto/nx/nx-common-pseries.c +++ b/drivers/crypto/nx/nx-common-pseries.c @@ -1144,6 +1144,7 @@ static void __init nxcop_get_capabilities(void) { struct hv_vas_all_caps *hv_caps; struct hv_nx_cop_caps *hv_nxc; + u64 feat; int rc; hv_caps = kmalloc(sizeof(*hv_caps), GFP_KERNEL); @@ -1154,27 +1155,26 @@ static void __init nxcop_get_capabilities(void) */ rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES, 0, (u64)virt_to_phys(hv_caps)); + if (!rc) + feat = be64_to_cpu(hv_caps->feat_type); + kfree(hv_caps); if (rc) - goto out; + return; + if (!(feat & VAS_NX_GZIP_FEAT_BIT)) + return; - caps_feat = be64_to_cpu(hv_caps->feat_type); /* * NX-GZIP feature available */ - if (caps_feat & VAS_NX_GZIP_FEAT_BIT) { - hv_nxc = kmalloc(sizeof(*hv_nxc), GFP_KERNEL); - if (!hv_nxc) - goto out; - /* - * Get capabilities for NX-GZIP feature - */ - rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES, - VAS_NX_GZIP_FEAT, - (u64)virt_to_phys(hv_nxc)); - } else { - pr_err("NX-GZIP feature is not available\n"); - rc = -EINVAL; - } + hv_nxc = kmalloc(sizeof(*hv_nxc), GFP_KERNEL); + if (!hv_nxc) + return; + /* + * Get capabilities for NX-GZIP feature + */ + rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES, + VAS_NX_GZIP_FEAT, + (u64)virt_to_phys(hv_nxc)); if (!rc) { nx_cop_caps.descriptor = be64_to_cpu(hv_nxc->descriptor); @@ -1184,13 +1184,10 @@ static void __init nxcop_get_capabilities(void) be64_to_cpu(hv_nxc->min_compress_len); nx_cop_caps.min_decompress_len = be64_to_cpu(hv_nxc->min_decompress_len); - } else { - caps_feat = 0; + caps_feat = feat; } kfree(hv_nxc); -out: - kfree(hv_caps); } static const struct vio_device_id nx842_vio_driver_ids[] = { diff --git a/drivers/crypto/tegra/tegra-se-aes.c b/drivers/crypto/tegra/tegra-se-aes.c index 3106fd1e84b91..cd52807e76afd 100644 --- a/drivers/crypto/tegra/tegra-se-aes.c +++ b/drivers/crypto/tegra/tegra-se-aes.c @@ -263,13 +263,7 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq) unsigned int cmdlen; int ret; - rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_AES_BUFLEN, - &rctx->datbuf.addr, GFP_KERNEL); - if (!rctx->datbuf.buf) - return -ENOMEM; - - rctx->datbuf.size = SE_AES_BUFLEN; - rctx->iv = (u32 *)req->iv; + rctx->iv = (ctx->alg == SE_ALG_ECB) ? NULL : (u32 *)req->iv; rctx->len = req->cryptlen; /* Pad input to AES Block size */ @@ -278,18 +272,24 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq) rctx->len += AES_BLOCK_SIZE - (rctx->len % AES_BLOCK_SIZE); } + rctx->datbuf.size = rctx->len; + rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size, + &rctx->datbuf.addr, GFP_KERNEL); + if (!rctx->datbuf.buf) + return -ENOMEM; + scatterwalk_map_and_copy(rctx->datbuf.buf, req->src, 0, req->cryptlen, 0); /* Prepare the command and submit for execution */ cmdlen = tegra_aes_prep_cmd(ctx, rctx); - ret = tegra_se_host1x_submit(se, cmdlen); + ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); /* Copy the result */ tegra_aes_update_iv(req, ctx); scatterwalk_map_and_copy(rctx->datbuf.buf, req->dst, 0, req->cryptlen, 1); /* Free the buffer */ - dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, + dma_free_coherent(ctx->se->dev, rctx->datbuf.size, rctx->datbuf.buf, rctx->datbuf.addr); crypto_finalize_skcipher_request(se->engine, req, ret); @@ -719,7 +719,7 @@ static int tegra_gcm_do_gmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqct cmdlen = tegra_gmac_prep_cmd(ctx, rctx); - return tegra_se_host1x_submit(se, cmdlen); + return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); } static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx) @@ -736,7 +736,7 @@ static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqc /* Prepare command and submit */ cmdlen = tegra_gcm_crypt_prep_cmd(ctx, rctx); - ret = tegra_se_host1x_submit(se, cmdlen); + ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); if (ret) return ret; @@ -759,7 +759,7 @@ static int tegra_gcm_do_final(struct tegra_aead_ctx *ctx, struct tegra_aead_reqc /* Prepare command and submit */ cmdlen = tegra_gcm_prep_final_cmd(se, cpuvaddr, rctx); - ret = tegra_se_host1x_submit(se, cmdlen); + ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); if (ret) return ret; @@ -891,7 +891,7 @@ static int tegra_ccm_do_cbcmac(struct tegra_aead_ctx *ctx, struct tegra_aead_req /* Prepare command and submit */ cmdlen = tegra_cbcmac_prep_cmd(ctx, rctx); - return tegra_se_host1x_submit(se, cmdlen); + return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); } static int tegra_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize) @@ -1098,7 +1098,7 @@ static int tegra_ccm_do_ctr(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx /* Prepare command and submit */ cmdlen = tegra_ctr_prep_cmd(ctx, rctx); - ret = tegra_se_host1x_submit(se, cmdlen); + ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); if (ret) return ret; @@ -1117,6 +1117,11 @@ static int tegra_ccm_crypt_init(struct aead_request *req, struct tegra_se *se, rctx->assoclen = req->assoclen; rctx->authsize = crypto_aead_authsize(tfm); + if (rctx->encrypt) + rctx->cryptlen = req->cryptlen; + else + rctx->cryptlen = req->cryptlen - rctx->authsize; + memcpy(iv, req->iv, 16); ret = tegra_ccm_check_iv(iv); @@ -1145,30 +1150,26 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq) struct tegra_se *se = ctx->se; int ret; + ret = tegra_ccm_crypt_init(req, se, rctx); + if (ret) + return ret; + /* Allocate buffers required */ - rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN, + rctx->inbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen + 100; + rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->inbuf.size, &rctx->inbuf.addr, GFP_KERNEL); if (!rctx->inbuf.buf) return -ENOMEM; - rctx->inbuf.size = SE_AES_BUFLEN; - - rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN, + rctx->outbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen + 100; + rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->outbuf.size, &rctx->outbuf.addr, GFP_KERNEL); if (!rctx->outbuf.buf) { ret = -ENOMEM; goto outbuf_err; } - rctx->outbuf.size = SE_AES_BUFLEN; - - ret = tegra_ccm_crypt_init(req, se, rctx); - if (ret) - goto out; - if (rctx->encrypt) { - rctx->cryptlen = req->cryptlen; - /* CBC MAC Operation */ ret = tegra_ccm_compute_auth(ctx, rctx); if (ret) @@ -1179,10 +1180,6 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq) if (ret) goto out; } else { - rctx->cryptlen = req->cryptlen - ctx->authsize; - if (ret) - goto out; - /* CTR operation */ ret = tegra_ccm_do_ctr(ctx, rctx); if (ret) @@ -1195,11 +1192,11 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq) } out: - dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, + dma_free_coherent(ctx->se->dev, rctx->inbuf.size, rctx->outbuf.buf, rctx->outbuf.addr); outbuf_err: - dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, + dma_free_coherent(ctx->se->dev, rctx->outbuf.size, rctx->inbuf.buf, rctx->inbuf.addr); crypto_finalize_aead_request(ctx->se->engine, req, ret); @@ -1215,23 +1212,6 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq) struct tegra_aead_reqctx *rctx = aead_request_ctx(req); int ret; - /* Allocate buffers required */ - rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN, - &rctx->inbuf.addr, GFP_KERNEL); - if (!rctx->inbuf.buf) - return -ENOMEM; - - rctx->inbuf.size = SE_AES_BUFLEN; - - rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN, - &rctx->outbuf.addr, GFP_KERNEL); - if (!rctx->outbuf.buf) { - ret = -ENOMEM; - goto outbuf_err; - } - - rctx->outbuf.size = SE_AES_BUFLEN; - rctx->src_sg = req->src; rctx->dst_sg = req->dst; rctx->assoclen = req->assoclen; @@ -1245,6 +1225,21 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq) memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE); rctx->iv[3] = (1 << 24); + /* Allocate buffers required */ + rctx->inbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen; + rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->inbuf.size, + &rctx->inbuf.addr, GFP_KERNEL); + if (!rctx->inbuf.buf) + return -ENOMEM; + + rctx->outbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen; + rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->outbuf.size, + &rctx->outbuf.addr, GFP_KERNEL); + if (!rctx->outbuf.buf) { + ret = -ENOMEM; + goto outbuf_err; + } + /* If there is associated data perform GMAC operation */ if (rctx->assoclen) { ret = tegra_gcm_do_gmac(ctx, rctx); @@ -1268,11 +1263,11 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq) ret = tegra_gcm_do_verify(ctx->se, rctx); out: - dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, + dma_free_coherent(ctx->se->dev, rctx->outbuf.size, rctx->outbuf.buf, rctx->outbuf.addr); outbuf_err: - dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, + dma_free_coherent(ctx->se->dev, rctx->inbuf.size, rctx->inbuf.buf, rctx->inbuf.addr); /* Finalize the request if there are no errors */ @@ -1499,6 +1494,11 @@ static int tegra_cmac_do_update(struct ahash_request *req) return 0; } + rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size, + &rctx->datbuf.addr, GFP_KERNEL); + if (!rctx->datbuf.buf) + return -ENOMEM; + /* Copy the previous residue first */ if (rctx->residue.size) memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); @@ -1513,23 +1513,19 @@ static int tegra_cmac_do_update(struct ahash_request *req) rctx->residue.size = nresidue; /* - * If this is not the first 'update' call, paste the previous copied + * If this is not the first task, paste the previous copied * intermediate results to the registers so that it gets picked up. - * This is to support the import/export functionality. */ if (!(rctx->task & SHA_FIRST)) tegra_cmac_paste_result(ctx->se, rctx); cmdlen = tegra_cmac_prep_cmd(ctx, rctx); + ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); - ret = tegra_se_host1x_submit(se, cmdlen); - /* - * If this is not the final update, copy the intermediate results - * from the registers so that it can be used in the next 'update' - * call. This is to support the import/export functionality. - */ - if (!(rctx->task & SHA_FINAL)) - tegra_cmac_copy_result(ctx->se, rctx); + tegra_cmac_copy_result(ctx->se, rctx); + + dma_free_coherent(ctx->se->dev, rctx->datbuf.size, + rctx->datbuf.buf, rctx->datbuf.addr); return ret; } @@ -1545,17 +1541,34 @@ static int tegra_cmac_do_final(struct ahash_request *req) if (!req->nbytes && !rctx->total_len && ctx->fallback_tfm) { return crypto_shash_tfm_digest(ctx->fallback_tfm, - rctx->datbuf.buf, 0, req->result); + NULL, 0, req->result); + } + + if (rctx->residue.size) { + rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->residue.size, + &rctx->datbuf.addr, GFP_KERNEL); + if (!rctx->datbuf.buf) { + ret = -ENOMEM; + goto out_free; + } + + memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); } - memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); rctx->datbuf.size = rctx->residue.size; rctx->total_len += rctx->residue.size; rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0); + /* + * If this is not the first task, paste the previous copied + * intermediate results to the registers so that it gets picked up. + */ + if (!(rctx->task & SHA_FIRST)) + tegra_cmac_paste_result(ctx->se, rctx); + /* Prepare command and submit */ cmdlen = tegra_cmac_prep_cmd(ctx, rctx); - ret = tegra_se_host1x_submit(se, cmdlen); + ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); if (ret) goto out; @@ -1567,8 +1580,10 @@ static int tegra_cmac_do_final(struct ahash_request *req) writel(0, se->base + se->hw->regs->result + (i * 4)); out: - dma_free_coherent(se->dev, SE_SHA_BUFLEN, - rctx->datbuf.buf, rctx->datbuf.addr); + if (rctx->residue.size) + dma_free_coherent(se->dev, rctx->datbuf.size, + rctx->datbuf.buf, rctx->datbuf.addr); +out_free: dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm) * 2, rctx->residue.buf, rctx->residue.addr); return ret; @@ -1581,18 +1596,24 @@ static int tegra_cmac_do_one_req(struct crypto_engine *engine, void *areq) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); struct tegra_se *se = ctx->se; - int ret; + int ret = 0; if (rctx->task & SHA_UPDATE) { ret = tegra_cmac_do_update(req); + if (ret) + goto out; + rctx->task &= ~SHA_UPDATE; } if (rctx->task & SHA_FINAL) { ret = tegra_cmac_do_final(req); + if (ret) + goto out; + rctx->task &= ~SHA_FINAL; } - +out: crypto_finalize_hash_request(se->engine, req, ret); return 0; @@ -1674,28 +1695,15 @@ static int tegra_cmac_init(struct ahash_request *req) rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2, &rctx->residue.addr, GFP_KERNEL); if (!rctx->residue.buf) - goto resbuf_fail; + return -ENOMEM; rctx->residue.size = 0; - rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_SHA_BUFLEN, - &rctx->datbuf.addr, GFP_KERNEL); - if (!rctx->datbuf.buf) - goto datbuf_fail; - - rctx->datbuf.size = 0; - /* Clear any previous result */ for (i = 0; i < CMAC_RESULT_REG_COUNT; i++) writel(0, se->base + se->hw->regs->result + (i * 4)); return 0; - -datbuf_fail: - dma_free_coherent(se->dev, rctx->blk_size, rctx->residue.buf, - rctx->residue.addr); -resbuf_fail: - return -ENOMEM; } static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, diff --git a/drivers/crypto/tegra/tegra-se-hash.c b/drivers/crypto/tegra/tegra-se-hash.c index 0b5cdd5676b17..451b8eaab16aa 100644 --- a/drivers/crypto/tegra/tegra-se-hash.c +++ b/drivers/crypto/tegra/tegra-se-hash.c @@ -300,8 +300,9 @@ static int tegra_sha_do_update(struct ahash_request *req) { struct tegra_sha_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); struct tegra_sha_reqctx *rctx = ahash_request_ctx(req); + struct tegra_se *se = ctx->se; unsigned int nblks, nresidue, size, ret; - u32 *cpuvaddr = ctx->se->cmdbuf->addr; + u32 *cpuvaddr = se->cmdbuf->addr; nresidue = (req->nbytes + rctx->residue.size) % rctx->blk_size; nblks = (req->nbytes + rctx->residue.size) / rctx->blk_size; @@ -331,6 +332,11 @@ static int tegra_sha_do_update(struct ahash_request *req) return 0; } + rctx->datbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->datbuf.size, + &rctx->datbuf.addr, GFP_KERNEL); + if (!rctx->datbuf.buf) + return -ENOMEM; + /* Copy the previous residue first */ if (rctx->residue.size) memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); @@ -353,11 +359,11 @@ static int tegra_sha_do_update(struct ahash_request *req) * This is to support the import/export functionality. */ if (!(rctx->task & SHA_FIRST)) - tegra_sha_paste_hash_result(ctx->se, rctx); + tegra_sha_paste_hash_result(se, rctx); - size = tegra_sha_prep_cmd(ctx->se, cpuvaddr, rctx); + size = tegra_sha_prep_cmd(se, cpuvaddr, rctx); - ret = tegra_se_host1x_submit(ctx->se, size); + ret = tegra_se_host1x_submit(se, se->cmdbuf, size); /* * If this is not the final update, copy the intermediate results @@ -365,7 +371,10 @@ static int tegra_sha_do_update(struct ahash_request *req) * call. This is to support the import/export functionality. */ if (!(rctx->task & SHA_FINAL)) - tegra_sha_copy_hash_result(ctx->se, rctx); + tegra_sha_copy_hash_result(se, rctx); + + dma_free_coherent(ctx->se->dev, rctx->datbuf.size, + rctx->datbuf.buf, rctx->datbuf.addr); return ret; } @@ -379,7 +388,17 @@ static int tegra_sha_do_final(struct ahash_request *req) u32 *cpuvaddr = se->cmdbuf->addr; int size, ret = 0; - memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); + if (rctx->residue.size) { + rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->residue.size, + &rctx->datbuf.addr, GFP_KERNEL); + if (!rctx->datbuf.buf) { + ret = -ENOMEM; + goto out_free; + } + + memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); + } + rctx->datbuf.size = rctx->residue.size; rctx->total_len += rctx->residue.size; @@ -388,7 +407,7 @@ static int tegra_sha_do_final(struct ahash_request *req) size = tegra_sha_prep_cmd(se, cpuvaddr, rctx); - ret = tegra_se_host1x_submit(se, size); + ret = tegra_se_host1x_submit(se, se->cmdbuf, size); if (ret) goto out; @@ -396,8 +415,10 @@ static int tegra_sha_do_final(struct ahash_request *req) memcpy(req->result, rctx->digest.buf, rctx->digest.size); out: - dma_free_coherent(se->dev, SE_SHA_BUFLEN, - rctx->datbuf.buf, rctx->datbuf.addr); + if (rctx->residue.size) + dma_free_coherent(se->dev, rctx->datbuf.size, + rctx->datbuf.buf, rctx->datbuf.addr); +out_free: dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm), rctx->residue.buf, rctx->residue.addr); dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf, @@ -416,14 +437,21 @@ static int tegra_sha_do_one_req(struct crypto_engine *engine, void *areq) if (rctx->task & SHA_UPDATE) { ret = tegra_sha_do_update(req); + if (ret) + goto out; + rctx->task &= ~SHA_UPDATE; } if (rctx->task & SHA_FINAL) { ret = tegra_sha_do_final(req); + if (ret) + goto out; + rctx->task &= ~SHA_FINAL; } +out: crypto_finalize_hash_request(se->engine, req, ret); return 0; @@ -526,19 +554,11 @@ static int tegra_sha_init(struct ahash_request *req) if (!rctx->residue.buf) goto resbuf_fail; - rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_SHA_BUFLEN, - &rctx->datbuf.addr, GFP_KERNEL); - if (!rctx->datbuf.buf) - goto datbuf_fail; - return 0; -datbuf_fail: - dma_free_coherent(se->dev, rctx->blk_size, rctx->residue.buf, - rctx->residue.addr); resbuf_fail: - dma_free_coherent(se->dev, SE_SHA_BUFLEN, rctx->datbuf.buf, - rctx->datbuf.addr); + dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf, + rctx->digest.addr); digbuf_fail: return -ENOMEM; } @@ -559,13 +579,18 @@ static int tegra_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); + int ret; if (aes_check_keylen(keylen)) return tegra_hmac_fallback_setkey(ctx, key, keylen); + ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id); + if (ret) + return tegra_hmac_fallback_setkey(ctx, key, keylen); + ctx->fallback = false; - return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id); + return 0; } static int tegra_sha_update(struct ahash_request *req) diff --git a/drivers/crypto/tegra/tegra-se-key.c b/drivers/crypto/tegra/tegra-se-key.c index ac14678dbd30d..276b261fb6df1 100644 --- a/drivers/crypto/tegra/tegra-se-key.c +++ b/drivers/crypto/tegra/tegra-se-key.c @@ -115,11 +115,17 @@ static int tegra_key_insert(struct tegra_se *se, const u8 *key, u32 keylen, u16 slot, u32 alg) { const u32 *keyval = (u32 *)key; - u32 *addr = se->cmdbuf->addr, size; + u32 *addr = se->keybuf->addr, size; + int ret; + + mutex_lock(&kslt_lock); size = tegra_key_prep_ins_cmd(se, addr, keyval, keylen, slot, alg); + ret = tegra_se_host1x_submit(se, se->keybuf, size); - return tegra_se_host1x_submit(se, size); + mutex_unlock(&kslt_lock); + + return ret; } void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg) diff --git a/drivers/crypto/tegra/tegra-se-main.c b/drivers/crypto/tegra/tegra-se-main.c index f94c0331b148c..55690b044e417 100644 --- a/drivers/crypto/tegra/tegra-se-main.c +++ b/drivers/crypto/tegra/tegra-se-main.c @@ -141,7 +141,7 @@ static struct tegra_se_cmdbuf *tegra_se_host1x_bo_alloc(struct tegra_se *se, ssi return cmdbuf; } -int tegra_se_host1x_submit(struct tegra_se *se, u32 size) +int tegra_se_host1x_submit(struct tegra_se *se, struct tegra_se_cmdbuf *cmdbuf, u32 size) { struct host1x_job *job; int ret; @@ -160,9 +160,9 @@ int tegra_se_host1x_submit(struct tegra_se *se, u32 size) job->engine_fallback_streamid = se->stream_id; job->engine_streamid_offset = SE_STREAM_ID; - se->cmdbuf->words = size; + cmdbuf->words = size; - host1x_job_add_gather(job, &se->cmdbuf->bo, size, 0); + host1x_job_add_gather(job, &cmdbuf->bo, size, 0); ret = host1x_job_pin(job, se->dev); if (ret) { @@ -220,14 +220,22 @@ static int tegra_se_client_init(struct host1x_client *client) goto syncpt_put; } + se->keybuf = tegra_se_host1x_bo_alloc(se, SZ_4K); + if (!se->keybuf) { + ret = -ENOMEM; + goto cmdbuf_put; + } + ret = se->hw->init_alg(se); if (ret) { dev_err(se->dev, "failed to register algorithms\n"); - goto cmdbuf_put; + goto keybuf_put; } return 0; +keybuf_put: + tegra_se_cmdbuf_put(&se->keybuf->bo); cmdbuf_put: tegra_se_cmdbuf_put(&se->cmdbuf->bo); syncpt_put: diff --git a/drivers/crypto/tegra/tegra-se.h b/drivers/crypto/tegra/tegra-se.h index b9dd7ceb8783c..e196a90eedb92 100644 --- a/drivers/crypto/tegra/tegra-se.h +++ b/drivers/crypto/tegra/tegra-se.h @@ -340,8 +340,6 @@ #define SE_CRYPTO_CTR_REG_COUNT 4 #define SE_MAX_KEYSLOT 15 #define SE_MAX_MEM_ALLOC SZ_4M -#define SE_AES_BUFLEN 0x8000 -#define SE_SHA_BUFLEN 0x2000 #define SHA_FIRST BIT(0) #define SHA_UPDATE BIT(1) @@ -420,6 +418,7 @@ struct tegra_se { struct host1x_client client; struct host1x_channel *channel; struct tegra_se_cmdbuf *cmdbuf; + struct tegra_se_cmdbuf *keybuf; struct crypto_engine *engine; struct host1x_syncpt *syncpt; struct device *dev; @@ -502,7 +501,7 @@ void tegra_deinit_hash(struct tegra_se *se); int tegra_key_submit(struct tegra_se *se, const u8 *key, u32 keylen, u32 alg, u32 *keyid); void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg); -int tegra_se_host1x_submit(struct tegra_se *se, u32 size); +int tegra_se_host1x_submit(struct tegra_se *se, struct tegra_se_cmdbuf *cmdbuf, u32 size); /* HOST1x OPCODES */ static inline u32 host1x_opcode_setpayload(unsigned int payload) diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c index c353029789cf1..1290886f065e3 100644 --- a/drivers/dma-buf/sw_sync.c +++ b/drivers/dma-buf/sw_sync.c @@ -444,15 +444,17 @@ static int sw_sync_ioctl_get_deadline(struct sync_timeline *obj, unsigned long a return -EINVAL; pt = dma_fence_to_sync_pt(fence); - if (!pt) - return -EINVAL; + if (!pt) { + ret = -EINVAL; + goto put_fence; + } spin_lock_irqsave(fence->lock, flags); - if (test_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags)) { - data.deadline_ns = ktime_to_ns(pt->deadline); - } else { + if (!test_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags)) { ret = -ENOENT; + goto unlock; } + data.deadline_ns = ktime_to_ns(pt->deadline); spin_unlock_irqrestore(fence->lock, flags); dma_fence_put(fence); @@ -464,6 +466,13 @@ static int sw_sync_ioctl_get_deadline(struct sync_timeline *obj, unsigned long a return -EFAULT; return 0; + +unlock: + spin_unlock_irqrestore(fence->lock, flags); +put_fence: + dma_fence_put(fence); + + return ret; } static long sw_sync_ioctl(struct file *file, unsigned int cmd, diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c index 70cb7fda757a9..27645606f900b 100644 --- a/drivers/dma/fsl-edma-main.c +++ b/drivers/dma/fsl-edma-main.c @@ -303,6 +303,7 @@ fsl_edma2_irq_init(struct platform_device *pdev, /* The last IRQ is for eDMA err */ if (i == count - 1) { + fsl_edma->errirq = irq; ret = devm_request_irq(&pdev->dev, irq, fsl_edma_err_handler, 0, "eDMA2-ERR", fsl_edma); @@ -322,10 +323,13 @@ static void fsl_edma_irq_exit( struct platform_device *pdev, struct fsl_edma_engine *fsl_edma) { if (fsl_edma->txirq == fsl_edma->errirq) { - devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma); + if (fsl_edma->txirq >= 0) + devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma); } else { - devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma); - devm_free_irq(&pdev->dev, fsl_edma->errirq, fsl_edma); + if (fsl_edma->txirq >= 0) + devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma); + if (fsl_edma->errirq >= 0) + devm_free_irq(&pdev->dev, fsl_edma->errirq, fsl_edma); } } @@ -513,6 +517,8 @@ static int fsl_edma_probe(struct platform_device *pdev) if (!fsl_edma) return -ENOMEM; + fsl_edma->errirq = -EINVAL; + fsl_edma->txirq = -EINVAL; fsl_edma->drvdata = drvdata; fsl_edma->n_chans = chans; mutex_init(&fsl_edma->fsl_edma_mutex); @@ -699,9 +705,9 @@ static void fsl_edma_remove(struct platform_device *pdev) struct fsl_edma_engine *fsl_edma = platform_get_drvdata(pdev); fsl_edma_irq_exit(pdev, fsl_edma); - fsl_edma_cleanup_vchan(&fsl_edma->dma_dev); of_dma_controller_free(np); dma_async_device_unregister(&fsl_edma->dma_dev); + fsl_edma_cleanup_vchan(&fsl_edma->dma_dev); fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs); } diff --git a/drivers/dpll/dpll_core.c b/drivers/dpll/dpll_core.c index 32019dc33cca7..1877201d1aa9f 100644 --- a/drivers/dpll/dpll_core.c +++ b/drivers/dpll/dpll_core.c @@ -505,7 +505,7 @@ dpll_pin_alloc(u64 clock_id, u32 pin_idx, struct module *module, xa_init_flags(&pin->parent_refs, XA_FLAGS_ALLOC); ret = xa_alloc_cyclic(&dpll_pin_xa, &pin->id, pin, xa_limit_32b, &dpll_pin_xa_id, GFP_KERNEL); - if (ret) + if (ret < 0) goto err_xa_alloc; return pin; err_xa_alloc: diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c index 51556c72a9674..fbdf005bed3a4 100644 --- a/drivers/edac/i10nm_base.c +++ b/drivers/edac/i10nm_base.c @@ -751,6 +751,8 @@ static int i10nm_get_ddr_munits(void) continue; } else { d->imc[lmc].mdev = mdev; + if (res_cfg->type == SPR) + skx_set_mc_mapping(d, i, lmc); lmc++; } } diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c index 9ef13570f2e54..56be8ef40f376 100644 --- a/drivers/edac/ie31200_edac.c +++ b/drivers/edac/ie31200_edac.c @@ -91,8 +91,6 @@ (((did) & PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_MASK) == \ PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_MASK)) -#define IE31200_DIMMS 4 -#define IE31200_RANKS 8 #define IE31200_RANKS_PER_CHANNEL 4 #define IE31200_DIMMS_PER_CHANNEL 2 #define IE31200_CHANNELS 2 @@ -164,6 +162,7 @@ #define IE31200_MAD_DIMM_0_OFFSET 0x5004 #define IE31200_MAD_DIMM_0_OFFSET_SKL 0x500C #define IE31200_MAD_DIMM_SIZE GENMASK_ULL(7, 0) +#define IE31200_MAD_DIMM_SIZE_SKL GENMASK_ULL(5, 0) #define IE31200_MAD_DIMM_A_RANK BIT(17) #define IE31200_MAD_DIMM_A_RANK_SHIFT 17 #define IE31200_MAD_DIMM_A_RANK_SKL BIT(10) @@ -377,7 +376,7 @@ static void __iomem *ie31200_map_mchbar(struct pci_dev *pdev) static void __skl_populate_dimm_info(struct dimm_data *dd, u32 addr_decode, int chan) { - dd->size = (addr_decode >> (chan << 4)) & IE31200_MAD_DIMM_SIZE; + dd->size = (addr_decode >> (chan << 4)) & IE31200_MAD_DIMM_SIZE_SKL; dd->dual_rank = (addr_decode & (IE31200_MAD_DIMM_A_RANK_SKL << (chan << 4))) ? 1 : 0; dd->x16_width = ((addr_decode & (IE31200_MAD_DIMM_A_WIDTH_SKL << (chan << 4))) >> (IE31200_MAD_DIMM_A_WIDTH_SKL_SHIFT + (chan << 4))); @@ -426,7 +425,7 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx) nr_channels = how_many_channels(pdev); layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; - layers[0].size = IE31200_DIMMS; + layers[0].size = IE31200_RANKS_PER_CHANNEL; layers[0].is_virt_csrow = true; layers[1].type = EDAC_MC_LAYER_CHANNEL; layers[1].size = nr_channels; @@ -618,7 +617,7 @@ static int __init ie31200_init(void) pci_rc = pci_register_driver(&ie31200_driver); if (pci_rc < 0) - goto fail0; + return pci_rc; if (!mci_pdev) { ie31200_registered = 0; @@ -629,11 +628,13 @@ static int __init ie31200_init(void) if (mci_pdev) break; } + if (!mci_pdev) { edac_dbg(0, "ie31200 pci_get_device fail\n"); pci_rc = -ENODEV; - goto fail1; + goto fail0; } + pci_rc = ie31200_init_one(mci_pdev, &ie31200_pci_tbl[i]); if (pci_rc < 0) { edac_dbg(0, "ie31200 init fail\n"); @@ -641,12 +642,12 @@ static int __init ie31200_init(void) goto fail1; } } - return 0; + return 0; fail1: - pci_unregister_driver(&ie31200_driver); -fail0: pci_dev_put(mci_pdev); +fail0: + pci_unregister_driver(&ie31200_driver); return pci_rc; } diff --git a/drivers/edac/qcom_edac.c b/drivers/edac/qcom_edac.c index a9a8ba067007a..0fd7a777fe7d2 100644 --- a/drivers/edac/qcom_edac.c +++ b/drivers/edac/qcom_edac.c @@ -95,7 +95,7 @@ static int qcom_llcc_core_setup(struct llcc_drv_data *drv, struct regmap *llcc_b * Configure interrupt enable registers such that Tag, Data RAM related * interrupts are propagated to interrupt controller for servicing */ - ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_2_enable, + ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_0_enable, TRP0_INTERRUPT_ENABLE, TRP0_INTERRUPT_ENABLE); if (ret) @@ -113,7 +113,7 @@ static int qcom_llcc_core_setup(struct llcc_drv_data *drv, struct regmap *llcc_b if (ret) return ret; - ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_2_enable, + ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_0_enable, DRP0_INTERRUPT_ENABLE, DRP0_INTERRUPT_ENABLE); if (ret) diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c index 6cf17af7d9112..85ec3196664d3 100644 --- a/drivers/edac/skx_common.c +++ b/drivers/edac/skx_common.c @@ -120,6 +120,35 @@ void skx_adxl_put(void) } EXPORT_SYMBOL_GPL(skx_adxl_put); +static void skx_init_mc_mapping(struct skx_dev *d) +{ + /* + * By default, the BIOS presents all memory controllers within each + * socket to the EDAC driver. The physical indices are the same as + * the logical indices of the memory controllers enumerated by the + * EDAC driver. + */ + for (int i = 0; i < NUM_IMC; i++) + d->mc_mapping[i] = i; +} + +void skx_set_mc_mapping(struct skx_dev *d, u8 pmc, u8 lmc) +{ + edac_dbg(0, "Set the mapping of mc phy idx to logical idx: %02d -> %02d\n", + pmc, lmc); + + d->mc_mapping[pmc] = lmc; +} +EXPORT_SYMBOL_GPL(skx_set_mc_mapping); + +static u8 skx_get_mc_mapping(struct skx_dev *d, u8 pmc) +{ + edac_dbg(0, "Get the mapping of mc phy idx to logical idx: %02d -> %02d\n", + pmc, d->mc_mapping[pmc]); + + return d->mc_mapping[pmc]; +} + static bool skx_adxl_decode(struct decoded_addr *res, enum error_source err_src) { struct skx_dev *d; @@ -187,6 +216,8 @@ static bool skx_adxl_decode(struct decoded_addr *res, enum error_source err_src) return false; } + res->imc = skx_get_mc_mapping(d, res->imc); + for (i = 0; i < adxl_component_count; i++) { if (adxl_values[i] == ~0x0ull) continue; @@ -307,6 +338,8 @@ int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list) d->bus[0], d->bus[1], d->bus[2], d->bus[3]); list_add_tail(&d->list, &dev_edac_list); prev = pdev; + + skx_init_mc_mapping(d); } if (list) diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h index 54bba8a62f727..849198fd14da6 100644 --- a/drivers/edac/skx_common.h +++ b/drivers/edac/skx_common.h @@ -93,6 +93,16 @@ struct skx_dev { struct pci_dev *uracu; /* for i10nm CPU */ struct pci_dev *pcu_cr3; /* for HBM memory detection */ u32 mcroute; + /* + * Some server BIOS may hide certain memory controllers, and the + * EDAC driver skips those hidden memory controllers. However, the + * ADXL still decodes memory error address using physical memory + * controller indices. The mapping table is used to convert the + * physical indices (reported by ADXL) to the logical indices + * (used the EDAC driver) of present memory controllers during the + * error handling process. + */ + u8 mc_mapping[NUM_IMC]; struct skx_imc { struct mem_ctl_info *mci; struct pci_dev *mdev; /* for i10nm CPU */ @@ -242,6 +252,7 @@ void skx_adxl_put(void); void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log); void skx_set_mem_cfg(bool mem_cfg_2lm); void skx_set_res_cfg(struct res_config *cfg); +void skx_set_mc_mapping(struct skx_dev *d, u8 pmc, u8 lmc); int skx_get_src_id(struct skx_dev *d, int off, u8 *id); int skx_get_node_id(struct skx_dev *d, u8 *id); diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c index a86ab9b35953f..2641faa329cdd 100644 --- a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c +++ b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c @@ -254,8 +254,8 @@ static int scmi_imx_misc_ctrl_set(const struct scmi_protocol_handle *ph, if (num > max_num) return -EINVAL; - ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_CTRL_SET, sizeof(*in), - 0, &t); + ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_CTRL_SET, + sizeof(*in) + num * sizeof(__le32), 0, &t); if (ret) return ret; diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c index 419220fa42fd7..ea452f1908542 100644 --- a/drivers/firmware/cirrus/cs_dsp.c +++ b/drivers/firmware/cirrus/cs_dsp.c @@ -1609,8 +1609,8 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware, goto out_fw; } - ret = regmap_raw_write_async(regmap, reg, buf->buf, - le32_to_cpu(region->len)); + ret = regmap_raw_write(regmap, reg, buf->buf, + le32_to_cpu(region->len)); if (ret != 0) { cs_dsp_err(dsp, "%s.%d: Failed to write %d bytes at %d in %s: %d\n", @@ -1625,20 +1625,14 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware, regions++; } - ret = regmap_async_complete(regmap); - if (ret != 0) { - cs_dsp_err(dsp, "Failed to complete async write: %d\n", ret); - goto out_fw; - } - if (pos > firmware->size) cs_dsp_warn(dsp, "%s.%d: %zu bytes at end of file\n", file, regions, pos - firmware->size); cs_dsp_debugfs_save_wmfwname(dsp, file); + ret = 0; out_fw: - regmap_async_complete(regmap); cs_dsp_buf_free(&buf_list); if (ret == -EOVERFLOW) @@ -2326,8 +2320,8 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware cs_dsp_dbg(dsp, "%s.%d: Writing %d bytes at %x\n", file, blocks, le32_to_cpu(blk->len), reg); - ret = regmap_raw_write_async(regmap, reg, buf->buf, - le32_to_cpu(blk->len)); + ret = regmap_raw_write(regmap, reg, buf->buf, + le32_to_cpu(blk->len)); if (ret != 0) { cs_dsp_err(dsp, "%s.%d: Failed to write to %x in %s: %d\n", @@ -2339,18 +2333,14 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware blocks++; } - ret = regmap_async_complete(regmap); - if (ret != 0) - cs_dsp_err(dsp, "Failed to complete async write: %d\n", ret); - if (pos > firmware->size) cs_dsp_warn(dsp, "%s.%d: %zu bytes at end of file\n", file, blocks, pos - firmware->size); cs_dsp_debugfs_save_binname(dsp, file); + ret = 0; out_fw: - regmap_async_complete(regmap); cs_dsp_buf_free(&buf_list); if (ret == -EOVERFLOW) @@ -2561,8 +2551,8 @@ static int cs_dsp_adsp2_enable_core(struct cs_dsp *dsp) { int ret; - ret = regmap_update_bits_async(dsp->regmap, dsp->base + ADSP2_CONTROL, - ADSP2_SYS_ENA, ADSP2_SYS_ENA); + ret = regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL, + ADSP2_SYS_ENA, ADSP2_SYS_ENA); if (ret != 0) return ret; diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h index 685098f9626f2..eebcdf653d072 100644 --- a/drivers/firmware/efi/libstub/efistub.h +++ b/drivers/firmware/efi/libstub/efistub.h @@ -171,7 +171,7 @@ void efi_set_u64_split(u64 data, u32 *lo, u32 *hi) * the EFI memory map. Other related structures, e.g. x86 e820ext, need * to factor in this headroom requirement as well. */ -#define EFI_MMAP_NR_SLACK_SLOTS 8 +#define EFI_MMAP_NR_SLACK_SLOTS 32 typedef struct efi_generic_dev_path efi_device_path_protocol_t; diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c index 8ad3efb9b1ff1..593e98e3b993e 100644 --- a/drivers/firmware/efi/libstub/randomalloc.c +++ b/drivers/firmware/efi/libstub/randomalloc.c @@ -75,6 +75,10 @@ efi_status_t efi_random_alloc(unsigned long size, if (align < EFI_ALLOC_ALIGN) align = EFI_ALLOC_ALIGN; + /* Avoid address 0x0, as it can be mistaken for NULL */ + if (alloc_min == 0) + alloc_min = align; + size = round_up(size, EFI_ALLOC_ALIGN); /* count the suitable slots in each memory map entry */ diff --git a/drivers/firmware/efi/mokvar-table.c b/drivers/firmware/efi/mokvar-table.c index 5ed0602c2f75f..4eb0dff4dfaf8 100644 --- a/drivers/firmware/efi/mokvar-table.c +++ b/drivers/firmware/efi/mokvar-table.c @@ -103,9 +103,7 @@ void __init efi_mokvar_table_init(void) void *va = NULL; unsigned long cur_offset = 0; unsigned long offset_limit; - unsigned long map_size = 0; unsigned long map_size_needed = 0; - unsigned long size; struct efi_mokvar_table_entry *mokvar_entry; int err; @@ -134,48 +132,34 @@ void __init efi_mokvar_table_init(void) */ err = -EINVAL; while (cur_offset + sizeof(*mokvar_entry) <= offset_limit) { - mokvar_entry = va + cur_offset; - map_size_needed = cur_offset + sizeof(*mokvar_entry); - if (map_size_needed > map_size) { - if (va) - early_memunmap(va, map_size); - /* - * Map a little more than the fixed size entry - * header, anticipating some data. It's safe to - * do so as long as we stay within current memory - * descriptor. - */ - map_size = min(map_size_needed + 2*EFI_PAGE_SIZE, - offset_limit); - va = early_memremap(efi.mokvar_table, map_size); - if (!va) { - pr_err("Failed to map EFI MOKvar config table pa=0x%lx, size=%lu.\n", - efi.mokvar_table, map_size); - return; - } - mokvar_entry = va + cur_offset; + if (va) + early_memunmap(va, sizeof(*mokvar_entry)); + va = early_memremap(efi.mokvar_table + cur_offset, sizeof(*mokvar_entry)); + if (!va) { + pr_err("Failed to map EFI MOKvar config table pa=0x%lx, size=%zu.\n", + efi.mokvar_table + cur_offset, sizeof(*mokvar_entry)); + return; } + mokvar_entry = va; /* Check for last sentinel entry */ if (mokvar_entry->name[0] == '\0') { if (mokvar_entry->data_size != 0) break; err = 0; + map_size_needed = cur_offset + sizeof(*mokvar_entry); break; } - /* Sanity check that the name is null terminated */ - size = strnlen(mokvar_entry->name, - sizeof(mokvar_entry->name)); - if (size >= sizeof(mokvar_entry->name)) - break; + /* Enforce that the name is NUL terminated */ + mokvar_entry->name[sizeof(mokvar_entry->name) - 1] = '\0'; /* Advance to the next entry */ - cur_offset = map_size_needed + mokvar_entry->data_size; + cur_offset += sizeof(*mokvar_entry) + mokvar_entry->data_size; } if (va) - early_memunmap(va, map_size); + early_memunmap(va, sizeof(*mokvar_entry)); if (err) { pr_err("EFI MOKvar config table is not valid\n"); return; diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig index 907cd149c40a8..c964f4924359f 100644 --- a/drivers/firmware/imx/Kconfig +++ b/drivers/firmware/imx/Kconfig @@ -25,6 +25,7 @@ config IMX_SCU config IMX_SCMI_MISC_DRV tristate "IMX SCMI MISC Protocol driver" + depends on ARCH_MXC || COMPILE_TEST default y if ARCH_MXC help The System Controller Management Interface firmware (SCMI FW) is diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c index 1dd4362ef9a3f..8c28e25ddc8a6 100644 --- a/drivers/firmware/imx/imx-scu.c +++ b/drivers/firmware/imx/imx-scu.c @@ -280,6 +280,7 @@ static int imx_scu_probe(struct platform_device *pdev) return ret; sc_ipc->fast_ipc = of_device_is_compatible(args.np, "fsl,imx8-mu-scu"); + of_node_put(args.np); num_channel = sc_ipc->fast_ipc ? 2 : SCU_MU_CHAN_NUM; for (i = 0; i < num_channel; i++) { diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c index 6e9788324fea5..371f24569b3b2 100644 --- a/drivers/firmware/iscsi_ibft.c +++ b/drivers/firmware/iscsi_ibft.c @@ -310,7 +310,10 @@ static ssize_t ibft_attr_show_nic(void *data, int type, char *buf) str += sprintf_ipaddr(str, nic->ip_addr); break; case ISCSI_BOOT_ETH_SUBNET_MASK: - val = cpu_to_be32(~((1 << (32-nic->subnet_mask_prefix))-1)); + if (nic->subnet_mask_prefix > 32) + val = cpu_to_be32(~0); + else + val = cpu_to_be32(~((1 << (32-nic->subnet_mask_prefix))-1)); str += sprintf(str, "%pI4", &val); break; case ISCSI_BOOT_ETH_PREFIX_LEN: diff --git a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c index 447246bd04be3..98a463e9774bf 100644 --- a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c +++ b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c @@ -814,15 +814,6 @@ static int qcom_uefisecapp_probe(struct auxiliary_device *aux_dev, qcuefi->client = container_of(aux_dev, struct qseecom_client, aux_dev); - auxiliary_set_drvdata(aux_dev, qcuefi); - status = qcuefi_set_reference(qcuefi); - if (status) - return status; - - status = efivars_register(&qcuefi->efivars, &qcom_efivar_ops); - if (status) - qcuefi_set_reference(NULL); - memset(&pool_config, 0, sizeof(pool_config)); pool_config.initial_size = SZ_4K; pool_config.policy = QCOM_TZMEM_POLICY_MULTIPLIER; @@ -833,6 +824,15 @@ static int qcom_uefisecapp_probe(struct auxiliary_device *aux_dev, if (IS_ERR(qcuefi->mempool)) return PTR_ERR(qcuefi->mempool); + auxiliary_set_drvdata(aux_dev, qcuefi); + status = qcuefi_set_reference(qcuefi); + if (status) + return status; + + status = efivars_register(&qcuefi->efivars, &qcom_efivar_ops); + if (status) + qcuefi_set_reference(NULL); + return status; } diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c index 2e093c39b610a..23aefbf6fca58 100644 --- a/drivers/firmware/qcom/qcom_scm.c +++ b/drivers/firmware/qcom/qcom_scm.c @@ -2054,8 +2054,8 @@ static int qcom_scm_probe(struct platform_device *pdev) __scm->mempool = devm_qcom_tzmem_pool_new(__scm->dev, &pool_config); if (IS_ERR(__scm->mempool)) { - dev_err_probe(__scm->dev, PTR_ERR(__scm->mempool), - "Failed to create the SCM memory pool\n"); + ret = dev_err_probe(__scm->dev, PTR_ERR(__scm->mempool), + "Failed to create the SCM memory pool\n"); goto err; } diff --git a/drivers/gpio/gpio-aggregator.c b/drivers/gpio/gpio-aggregator.c index 38e0fff9afe72..cc6ee4334602a 100644 --- a/drivers/gpio/gpio-aggregator.c +++ b/drivers/gpio/gpio-aggregator.c @@ -121,10 +121,15 @@ static ssize_t new_device_store(struct device_driver *driver, const char *buf, struct platform_device *pdev; int res, id; + if (!try_module_get(THIS_MODULE)) + return -ENOENT; + /* kernfs guarantees string termination, so count + 1 is safe */ aggr = kzalloc(sizeof(*aggr) + count + 1, GFP_KERNEL); - if (!aggr) - return -ENOMEM; + if (!aggr) { + res = -ENOMEM; + goto put_module; + } memcpy(aggr->args, buf, count + 1); @@ -163,6 +168,7 @@ static ssize_t new_device_store(struct device_driver *driver, const char *buf, } aggr->pdev = pdev; + module_put(THIS_MODULE); return count; remove_table: @@ -177,6 +183,8 @@ static ssize_t new_device_store(struct device_driver *driver, const char *buf, kfree(aggr->lookups); free_ga: kfree(aggr); +put_module: + module_put(THIS_MODULE); return res; } @@ -205,13 +213,19 @@ static ssize_t delete_device_store(struct device_driver *driver, if (error) return error; + if (!try_module_get(THIS_MODULE)) + return -ENOENT; + mutex_lock(&gpio_aggregator_lock); aggr = idr_remove(&gpio_aggregator_idr, id); mutex_unlock(&gpio_aggregator_lock); - if (!aggr) + if (!aggr) { + module_put(THIS_MODULE); return -ENOENT; + } gpio_aggregator_free(aggr); + module_put(THIS_MODULE); return count; } static DRIVER_ATTR_WO(delete_device); diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c index 6159fda38d5da..6641ed5cd8e1c 100644 --- a/drivers/gpio/gpio-rcar.c +++ b/drivers/gpio/gpio-rcar.c @@ -40,7 +40,7 @@ struct gpio_rcar_info { struct gpio_rcar_priv { void __iomem *base; - spinlock_t lock; + raw_spinlock_t lock; struct device *dev; struct gpio_chip gpio_chip; unsigned int irq_parent; @@ -123,7 +123,7 @@ static void gpio_rcar_config_interrupt_input_mode(struct gpio_rcar_priv *p, * "Setting Level-Sensitive Interrupt Input Mode" */ - spin_lock_irqsave(&p->lock, flags); + raw_spin_lock_irqsave(&p->lock, flags); /* Configure positive or negative logic in POSNEG */ gpio_rcar_modify_bit(p, POSNEG, hwirq, !active_high_rising_edge); @@ -142,7 +142,7 @@ static void gpio_rcar_config_interrupt_input_mode(struct gpio_rcar_priv *p, if (!level_trigger) gpio_rcar_write(p, INTCLR, BIT(hwirq)); - spin_unlock_irqrestore(&p->lock, flags); + raw_spin_unlock_irqrestore(&p->lock, flags); } static int gpio_rcar_irq_set_type(struct irq_data *d, unsigned int type) @@ -246,7 +246,7 @@ static void gpio_rcar_config_general_input_output_mode(struct gpio_chip *chip, * "Setting General Input Mode" */ - spin_lock_irqsave(&p->lock, flags); + raw_spin_lock_irqsave(&p->lock, flags); /* Configure positive logic in POSNEG */ gpio_rcar_modify_bit(p, POSNEG, gpio, false); @@ -261,7 +261,7 @@ static void gpio_rcar_config_general_input_output_mode(struct gpio_chip *chip, if (p->info.has_outdtsel && output) gpio_rcar_modify_bit(p, OUTDTSEL, gpio, false); - spin_unlock_irqrestore(&p->lock, flags); + raw_spin_unlock_irqrestore(&p->lock, flags); } static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset) @@ -347,7 +347,7 @@ static int gpio_rcar_get_multiple(struct gpio_chip *chip, unsigned long *mask, return 0; } - spin_lock_irqsave(&p->lock, flags); + raw_spin_lock_irqsave(&p->lock, flags); outputs = gpio_rcar_read(p, INOUTSEL); m = outputs & bankmask; if (m) @@ -356,7 +356,7 @@ static int gpio_rcar_get_multiple(struct gpio_chip *chip, unsigned long *mask, m = ~outputs & bankmask; if (m) val |= gpio_rcar_read(p, INDT) & m; - spin_unlock_irqrestore(&p->lock, flags); + raw_spin_unlock_irqrestore(&p->lock, flags); bits[0] = val; return 0; @@ -367,9 +367,9 @@ static void gpio_rcar_set(struct gpio_chip *chip, unsigned offset, int value) struct gpio_rcar_priv *p = gpiochip_get_data(chip); unsigned long flags; - spin_lock_irqsave(&p->lock, flags); + raw_spin_lock_irqsave(&p->lock, flags); gpio_rcar_modify_bit(p, OUTDT, offset, value); - spin_unlock_irqrestore(&p->lock, flags); + raw_spin_unlock_irqrestore(&p->lock, flags); } static void gpio_rcar_set_multiple(struct gpio_chip *chip, unsigned long *mask, @@ -386,12 +386,12 @@ static void gpio_rcar_set_multiple(struct gpio_chip *chip, unsigned long *mask, if (!bankmask) return; - spin_lock_irqsave(&p->lock, flags); + raw_spin_lock_irqsave(&p->lock, flags); val = gpio_rcar_read(p, OUTDT); val &= ~bankmask; val |= (bankmask & bits[0]); gpio_rcar_write(p, OUTDT, val); - spin_unlock_irqrestore(&p->lock, flags); + raw_spin_unlock_irqrestore(&p->lock, flags); } static int gpio_rcar_direction_output(struct gpio_chip *chip, unsigned offset, @@ -468,7 +468,12 @@ static int gpio_rcar_parse_dt(struct gpio_rcar_priv *p, unsigned int *npins) p->info = *info; ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &args); - *npins = ret == 0 ? args.args[2] : RCAR_MAX_GPIO_PER_BANK; + if (ret) { + *npins = RCAR_MAX_GPIO_PER_BANK; + } else { + *npins = args.args[2]; + of_node_put(args.np); + } if (*npins == 0 || *npins > RCAR_MAX_GPIO_PER_BANK) { dev_warn(p->dev, "Invalid number of gpio lines %u, using %u\n", @@ -505,7 +510,7 @@ static int gpio_rcar_probe(struct platform_device *pdev) return -ENOMEM; p->dev = dev; - spin_lock_init(&p->lock); + raw_spin_lock_init(&p->lock); /* Get device configuration from DT node */ ret = gpio_rcar_parse_dt(p, &npins); diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c index 1ecb733a5e88b..45543ab5073f6 100644 --- a/drivers/gpio/gpio-tegra186.c +++ b/drivers/gpio/gpio-tegra186.c @@ -823,6 +823,7 @@ static int tegra186_gpio_probe(struct platform_device *pdev) struct gpio_irq_chip *irq; struct tegra_gpio *gpio; struct device_node *np; + struct resource *res; char **names; int err; @@ -842,19 +843,19 @@ static int tegra186_gpio_probe(struct platform_device *pdev) gpio->num_banks++; /* get register apertures */ - gpio->secure = devm_platform_ioremap_resource_byname(pdev, "security"); - if (IS_ERR(gpio->secure)) { - gpio->secure = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(gpio->secure)) - return PTR_ERR(gpio->secure); - } - - gpio->base = devm_platform_ioremap_resource_byname(pdev, "gpio"); - if (IS_ERR(gpio->base)) { - gpio->base = devm_platform_ioremap_resource(pdev, 1); - if (IS_ERR(gpio->base)) - return PTR_ERR(gpio->base); - } + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "security"); + if (!res) + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + gpio->secure = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(gpio->secure)) + return PTR_ERR(gpio->secure); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gpio"); + if (!res) + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + gpio->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(gpio->base)) + return PTR_ERR(gpio->base); err = platform_irq_count(pdev); if (err < 0) diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c index 27eff741fe9a2..c36a9dbccd4dd 100644 --- a/drivers/gpio/gpio-vf610.c +++ b/drivers/gpio/gpio-vf610.c @@ -15,10 +15,9 @@ #include #include #include -#include -#include -#include #include +#include +#include #define VF610_GPIO_PER_PORT 32 @@ -37,6 +36,7 @@ struct vf610_gpio_port { struct clk *clk_port; struct clk *clk_gpio; int irq; + spinlock_t lock; /* protect gpio direction registers */ }; #define GPIO_PDOR 0x00 @@ -125,6 +125,7 @@ static int vf610_gpio_direction_input(struct gpio_chip *chip, unsigned int gpio) u32 val; if (port->sdata->have_paddr) { + guard(spinlock_irqsave)(&port->lock); val = vf610_gpio_readl(port->gpio_base + GPIO_PDDR); val &= ~mask; vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR); @@ -143,6 +144,7 @@ static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned int gpio vf610_gpio_set(chip, gpio, value); if (port->sdata->have_paddr) { + guard(spinlock_irqsave)(&port->lock); val = vf610_gpio_readl(port->gpio_base + GPIO_PDDR); val |= mask; vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR); @@ -297,7 +299,8 @@ static int vf610_gpio_probe(struct platform_device *pdev) if (!port) return -ENOMEM; - port->sdata = of_device_get_match_data(dev); + port->sdata = device_get_match_data(dev); + spin_lock_init(&port->lock); dual_base = port->sdata->have_dual_base; diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c index 1a42336dfc1d4..cc53e6940ad7e 100644 --- a/drivers/gpio/gpio-zynq.c +++ b/drivers/gpio/gpio-zynq.c @@ -1011,6 +1011,7 @@ static void zynq_gpio_remove(struct platform_device *pdev) ret = pm_runtime_get_sync(&pdev->dev); if (ret < 0) dev_warn(&pdev->dev, "pm_runtime_get_sync() Failed\n"); + device_init_wakeup(&pdev->dev, 0); gpiochip_remove(&gpio->chip); device_set_wakeup_capable(&pdev->dev, 0); pm_runtime_disable(&pdev->dev); diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index 880f1efcaca53..e543129d36050 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -193,6 +193,8 @@ static void of_gpio_try_fixup_polarity(const struct device_node *np, */ { "himax,hx8357", "gpios-reset", false }, { "himax,hx8369", "gpios-reset", false }, +#endif +#if IS_ENABLED(CONFIG_MTD_NAND_JZ4780) /* * The rb-gpios semantics was undocumented and qi,lb60 (along with * the ingenic driver) got it wrong. The active state encodes the diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 1e8f0bdb6ae3b..209871c219d69 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -3068,6 +3068,8 @@ static int gpiod_get_raw_value_commit(const struct gpio_desc *desc) static int gpio_chip_get_multiple(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits) { + lockdep_assert_held(&gc->gpiodev->srcu); + if (gc->get_multiple) return gc->get_multiple(gc, mask, bits); if (gc->get) { @@ -3098,6 +3100,7 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep, struct gpio_array *array_info, unsigned long *value_bitmap) { + struct gpio_chip *gc; int ret, i = 0; /* @@ -3109,10 +3112,15 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep, array_size <= array_info->size && (void *)array_info == desc_array + array_info->size) { if (!can_sleep) - WARN_ON(array_info->chip->can_sleep); + WARN_ON(array_info->gdev->can_sleep); + + guard(srcu)(&array_info->gdev->srcu); + gc = srcu_dereference(array_info->gdev->chip, + &array_info->gdev->srcu); + if (!gc) + return -ENODEV; - ret = gpio_chip_get_multiple(array_info->chip, - array_info->get_mask, + ret = gpio_chip_get_multiple(gc, array_info->get_mask, value_bitmap); if (ret) return ret; @@ -3393,6 +3401,8 @@ static void gpiod_set_raw_value_commit(struct gpio_desc *desc, bool value) static void gpio_chip_set_multiple(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits) { + lockdep_assert_held(&gc->gpiodev->srcu); + if (gc->set_multiple) { gc->set_multiple(gc, mask, bits); } else { @@ -3410,6 +3420,7 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep, struct gpio_array *array_info, unsigned long *value_bitmap) { + struct gpio_chip *gc; int i = 0; /* @@ -3421,14 +3432,19 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep, array_size <= array_info->size && (void *)array_info == desc_array + array_info->size) { if (!can_sleep) - WARN_ON(array_info->chip->can_sleep); + WARN_ON(array_info->gdev->can_sleep); + + guard(srcu)(&array_info->gdev->srcu); + gc = srcu_dereference(array_info->gdev->chip, + &array_info->gdev->srcu); + if (!gc) + return -ENODEV; if (!raw && !bitmap_empty(array_info->invert_mask, array_size)) bitmap_xor(value_bitmap, value_bitmap, array_info->invert_mask, array_size); - gpio_chip_set_multiple(array_info->chip, array_info->set_mask, - value_bitmap); + gpio_chip_set_multiple(gc, array_info->set_mask, value_bitmap); i = find_first_zero_bit(array_info->set_mask, array_size); if (i == array_size) @@ -4684,9 +4700,10 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev, { struct gpio_desc *desc; struct gpio_descs *descs; + struct gpio_device *gdev; struct gpio_array *array_info = NULL; - struct gpio_chip *gc; int count, bitmap_size; + unsigned long dflags; size_t descs_size; count = gpiod_count(dev, con_id); @@ -4707,7 +4724,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev, descs->desc[descs->ndescs] = desc; - gc = gpiod_to_chip(desc); + gdev = gpiod_to_gpio_device(desc); /* * If pin hardware number of array member 0 is also 0, select * its chip as a candidate for fast bitmap processing path. @@ -4715,8 +4732,8 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev, if (descs->ndescs == 0 && gpio_chip_hwgpio(desc) == 0) { struct gpio_descs *array; - bitmap_size = BITS_TO_LONGS(gc->ngpio > count ? - gc->ngpio : count); + bitmap_size = BITS_TO_LONGS(gdev->ngpio > count ? + gdev->ngpio : count); array = krealloc(descs, descs_size + struct_size(array_info, invert_mask, 3 * bitmap_size), @@ -4736,7 +4753,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev, array_info->desc = descs->desc; array_info->size = count; - array_info->chip = gc; + array_info->gdev = gdev; bitmap_set(array_info->get_mask, descs->ndescs, count - descs->ndescs); bitmap_set(array_info->set_mask, descs->ndescs, @@ -4749,7 +4766,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev, continue; /* Unmark array members which don't belong to the 'fast' chip */ - if (array_info->chip != gc) { + if (array_info->gdev != gdev) { __clear_bit(descs->ndescs, array_info->get_mask); __clear_bit(descs->ndescs, array_info->set_mask); } @@ -4772,9 +4789,10 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev, array_info->set_mask); } } else { + dflags = READ_ONCE(desc->flags); /* Exclude open drain or open source from fast output */ - if (gpiochip_line_is_open_drain(gc, descs->ndescs) || - gpiochip_line_is_open_source(gc, descs->ndescs)) + if (test_bit(FLAG_OPEN_DRAIN, &dflags) || + test_bit(FLAG_OPEN_SOURCE, &dflags)) __clear_bit(descs->ndescs, array_info->set_mask); /* Identify 'fast' pins which require invertion */ @@ -4786,7 +4804,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev, if (array_info) dev_dbg(dev, "GPIO array info: chip=%s, size=%d, get_mask=%lx, set_mask=%lx, invert_mask=%lx\n", - array_info->chip->label, array_info->size, + array_info->gdev->label, array_info->size, *array_info->get_mask, *array_info->set_mask, *array_info->invert_mask); return descs; diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h index 067197d61d57e..87ce3753500e4 100644 --- a/drivers/gpio/gpiolib.h +++ b/drivers/gpio/gpiolib.h @@ -110,7 +110,7 @@ extern const char *const gpio_suffixes[]; * * @desc: Array of pointers to the GPIO descriptors * @size: Number of elements in desc - * @chip: Parent GPIO chip + * @gdev: Parent GPIO device * @get_mask: Get mask used in fastpath * @set_mask: Set mask used in fastpath * @invert_mask: Invert mask used in fastpath @@ -122,7 +122,7 @@ extern const char *const gpio_suffixes[]; struct gpio_array { struct gpio_desc **desc; unsigned int size; - struct gpio_chip *chip; + struct gpio_device *gdev; unsigned long *get_mask; unsigned long *set_mask; unsigned long invert_mask[]; diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 7408ea8caacc3..ae53f26da945f 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -211,6 +211,18 @@ config DRM_DEBUG_MODESET_LOCK If in doubt, say "N". +config DRM_CLIENT_SELECTION + bool + depends on DRM + select DRM_CLIENT_SETUP if DRM_FBDEV_EMULATION + help + Drivers that support in-kernel DRM clients have to select this + option. + +config DRM_CLIENT_SETUP + bool + depends on DRM_CLIENT_SELECTION + config DRM_FBDEV_EMULATION bool "Enable legacy fbdev support for your modesetting driver" depends on DRM diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 84746054c721a..1ec44529447a7 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -144,8 +144,12 @@ drm_kms_helper-y := \ drm_rect.o \ drm_self_refresh_helper.o \ drm_simple_kms_helper.o +drm_kms_helper-$(CONFIG_DRM_CLIENT_SETUP) += \ + drm_client_setup.o drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o -drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o +drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += \ + drm_fbdev_client.o \ + drm_fb_helper.o obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o # diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index 45affc02548c1..a3a7d20ab4fea 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -437,6 +437,13 @@ static bool amdgpu_get_bios_apu(struct amdgpu_device *adev) return true; } +static bool amdgpu_prefer_rom_resource(struct amdgpu_device *adev) +{ + struct resource *res = &adev->pdev->resource[PCI_ROM_RESOURCE]; + + return (res->flags & IORESOURCE_ROM_SHADOW); +} + static bool amdgpu_get_bios_dgpu(struct amdgpu_device *adev) { if (amdgpu_atrm_get_bios(adev)) { @@ -455,14 +462,27 @@ static bool amdgpu_get_bios_dgpu(struct amdgpu_device *adev) goto success; } - if (amdgpu_read_platform_bios(adev)) { - dev_info(adev->dev, "Fetched VBIOS from platform\n"); - goto success; - } + if (amdgpu_prefer_rom_resource(adev)) { + if (amdgpu_read_bios(adev)) { + dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n"); + goto success; + } - if (amdgpu_read_bios(adev)) { - dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n"); - goto success; + if (amdgpu_read_platform_bios(adev)) { + dev_info(adev->dev, "Fetched VBIOS from platform\n"); + goto success; + } + + } else { + if (amdgpu_read_platform_bios(adev)) { + dev_info(adev->dev, "Fetched VBIOS from platform\n"); + goto success; + } + + if (amdgpu_read_bios(adev)) { + dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n"); + goto success; + } } if (amdgpu_read_bios_from_rom(adev)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 45e28726e148e..24d007715a14a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1542,6 +1542,13 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) if (amdgpu_sriov_vf(adev)) return 0; + /* resizing on Dell G5 SE platforms causes problems with runtime pm */ + if ((amdgpu_runtime_pm != 0) && + adev->pdev->vendor == PCI_VENDOR_ID_ATI && + adev->pdev->device == 0x731f && + adev->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL) + return 0; + /* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */ if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR)) DRM_WARN("System can't access extended configuration space, please check!!\n"); @@ -3315,6 +3322,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) amdgpu_device_mem_scratch_fini(adev); amdgpu_ib_pool_fini(adev); amdgpu_seq64_fini(adev); + amdgpu_doorbell_fini(adev); } r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev); @@ -4663,7 +4671,6 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev) iounmap(adev->rmmio); adev->rmmio = NULL; - amdgpu_doorbell_fini(adev); drm_dev_exit(idx); } @@ -6568,18 +6575,26 @@ struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev, { struct dma_fence *old = NULL; + dma_fence_get(gang); do { dma_fence_put(old); old = amdgpu_device_get_gang(adev); if (old == gang) break; - if (!dma_fence_is_signaled(old)) + if (!dma_fence_is_signaled(old)) { + dma_fence_put(gang); return old; + } } while (cmpxchg((struct dma_fence __force **)&adev->gang_submit, old, gang) != old); + /* + * Drop it once for the exchanged reference in adev and once for the + * thread local reference acquired in amdgpu_device_get_gang(). + */ + dma_fence_put(old); dma_fence_put(old); return NULL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index 8e81a83d37d84..2f90fff1b9ddc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -181,7 +181,7 @@ static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach, struct sg_table *sgt, enum dma_data_direction dir) { - if (sgt->sgl->page_link) { + if (sg_page(sgt->sgl)) { dma_unmap_sgtable(attach->dev, sgt, dir, 0); sg_free_table(sgt); kfree(sgt); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index c27b4c36a7c0f..a9eb0927a7664 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -119,9 +119,10 @@ * - 3.58.0 - Add GFX12 DCC support * - 3.59.0 - Cleared VRAM * - 3.60.0 - Add AMDGPU_TILING_GFX12_DCC_WRITE_COMPRESS_DISABLE (Vulkan requirement) + * - 3.61.0 - Contains fix for RV/PCO compute queues */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 60 +#define KMS_DRIVER_MINOR 61 #define KMS_DRIVER_PATCHLEVEL 0 /* @@ -1794,7 +1795,6 @@ static const u16 amdgpu_unsupported_pciidlist[] = { }; static const struct pci_device_id pciidlist[] = { -#ifdef CONFIG_DRM_AMDGPU_SI {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, {0x1002, 0x6784, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, {0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, @@ -1867,8 +1867,6 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x6665, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY}, {0x1002, 0x6667, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY}, {0x1002, 0x666F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY}, -#endif -#ifdef CONFIG_DRM_AMDGPU_CIK /* Kaveri */ {0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU}, {0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, @@ -1951,7 +1949,6 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, {0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, -#endif /* topaz */ {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, @@ -2283,14 +2280,14 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, return -ENOTSUPP; } + switch (flags & AMD_ASIC_MASK) { + case CHIP_TAHITI: + case CHIP_PITCAIRN: + case CHIP_VERDE: + case CHIP_OLAND: + case CHIP_HAINAN: #ifdef CONFIG_DRM_AMDGPU_SI - if (!amdgpu_si_support) { - switch (flags & AMD_ASIC_MASK) { - case CHIP_TAHITI: - case CHIP_PITCAIRN: - case CHIP_VERDE: - case CHIP_OLAND: - case CHIP_HAINAN: + if (!amdgpu_si_support) { dev_info(&pdev->dev, "SI support provided by radeon.\n"); dev_info(&pdev->dev, @@ -2298,16 +2295,18 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, ); return -ENODEV; } - } + break; +#else + dev_info(&pdev->dev, "amdgpu is built without SI support.\n"); + return -ENODEV; #endif + case CHIP_KAVERI: + case CHIP_BONAIRE: + case CHIP_HAWAII: + case CHIP_KABINI: + case CHIP_MULLINS: #ifdef CONFIG_DRM_AMDGPU_CIK - if (!amdgpu_cik_support) { - switch (flags & AMD_ASIC_MASK) { - case CHIP_KAVERI: - case CHIP_BONAIRE: - case CHIP_HAWAII: - case CHIP_KABINI: - case CHIP_MULLINS: + if (!amdgpu_cik_support) { dev_info(&pdev->dev, "CIK support provided by radeon.\n"); dev_info(&pdev->dev, @@ -2315,8 +2314,14 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, ); return -ENODEV; } - } + break; +#else + dev_info(&pdev->dev, "amdgpu is built without CIK support.\n"); + return -ENODEV; #endif + default: + break; + } adev = devm_drm_dev_alloc(&pdev->dev, &amdgpu_kms_driver, typeof(*adev), ddev); if (IS_ERR(adev)) @@ -2632,7 +2637,6 @@ static int amdgpu_pmops_freeze(struct device *dev) adev->in_s4 = true; r = amdgpu_device_suspend(drm_dev, true); - adev->in_s4 = false; if (r) return r; @@ -2644,8 +2648,13 @@ static int amdgpu_pmops_freeze(struct device *dev) static int amdgpu_pmops_thaw(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(drm_dev); + int r; - return amdgpu_device_resume(drm_dev, true); + r = amdgpu_device_resume(drm_dev, true); + adev->in_s4 = false; + + return r; } static int amdgpu_pmops_poweroff(struct device *dev) @@ -2658,6 +2667,9 @@ static int amdgpu_pmops_poweroff(struct device *dev) static int amdgpu_pmops_restore(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(drm_dev); + + adev->in_s4 = false; return amdgpu_device_resume(drm_dev, true); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 971419e3a9bbd..4c4bdc4f51b29 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -161,8 +161,8 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) * When GTT is just an alternative to VRAM make sure that we * only use it as fallback and still try to fill up VRAM first. */ - if (domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM && - !(adev->flags & AMD_IS_APU)) + if (abo->tbo.resource && !(adev->flags & AMD_IS_APU) && + domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) places[c].flags |= TTM_PL_FLAG_FALLBACK; c++; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 425073d994912..1c8ac4cf08c5a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -2280,7 +2280,7 @@ int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo, struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; struct amdgpu_res_cursor cursor; u64 addr; - int r; + int r = 0; if (!adev->mman.buffer_funcs_enabled) return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c index 6162582d0aa27..d5125523bfa7b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c @@ -584,7 +584,7 @@ int amdgpu_umsch_mm_init_microcode(struct amdgpu_umsch_mm *umsch) fw_name = "amdgpu/umsch_mm_4_0_0.bin"; break; default: - break; + return -EINVAL; } r = amdgpu_ucode_request(adev, &adev->umsch_mm.fw, "%s", fw_name); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 73e02141a6e21..37d53578825b3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2434,8 +2434,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, spin_lock_init(&vm->status_lock); INIT_LIST_HEAD(&vm->freed); INIT_LIST_HEAD(&vm->done); - INIT_LIST_HEAD(&vm->pt_freed); - INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work); INIT_KFIFO(vm->faults); r = amdgpu_vm_init_entities(adev, vm); @@ -2607,8 +2605,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm); - flush_work(&vm->pt_free_work); - root = amdgpu_bo_ref(vm->root.bo); amdgpu_bo_reserve(root, true); amdgpu_vm_put_task_info(vm->task_info); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 52dd7cdfdc814..ee893527a4f1d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -360,10 +360,6 @@ struct amdgpu_vm { /* BOs which are invalidated, has been updated in the PTs */ struct list_head done; - /* PT BOs scheduled to free and fill with zero if vm_resv is not hold */ - struct list_head pt_freed; - struct work_struct pt_free_work; - /* contains the page directory */ struct amdgpu_vm_bo_base root; struct dma_fence *last_update; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c index f78a0434a48fa..54ae0e9bc6d77 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c @@ -546,27 +546,6 @@ static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry) amdgpu_bo_unref(&entry->bo); } -void amdgpu_vm_pt_free_work(struct work_struct *work) -{ - struct amdgpu_vm_bo_base *entry, *next; - struct amdgpu_vm *vm; - LIST_HEAD(pt_freed); - - vm = container_of(work, struct amdgpu_vm, pt_free_work); - - spin_lock(&vm->status_lock); - list_splice_init(&vm->pt_freed, &pt_freed); - spin_unlock(&vm->status_lock); - - /* flush_work in amdgpu_vm_fini ensure vm->root.bo is valid. */ - amdgpu_bo_reserve(vm->root.bo, true); - - list_for_each_entry_safe(entry, next, &pt_freed, vm_status) - amdgpu_vm_pt_free(entry); - - amdgpu_bo_unreserve(vm->root.bo); -} - /** * amdgpu_vm_pt_free_list - free PD/PT levels * @@ -579,19 +558,15 @@ void amdgpu_vm_pt_free_list(struct amdgpu_device *adev, struct amdgpu_vm_update_params *params) { struct amdgpu_vm_bo_base *entry, *next; - struct amdgpu_vm *vm = params->vm; bool unlocked = params->unlocked; if (list_empty(¶ms->tlb_flush_waitlist)) return; - if (unlocked) { - spin_lock(&vm->status_lock); - list_splice_init(¶ms->tlb_flush_waitlist, &vm->pt_freed); - spin_unlock(&vm->status_lock); - schedule_work(&vm->pt_free_work); - return; - } + /* + * unlocked unmap clear page table leaves, warning to free the page entry. + */ + WARN_ON(unlocked); list_for_each_entry_safe(entry, next, ¶ms->tlb_flush_waitlist, vm_status) amdgpu_vm_pt_free(entry); @@ -899,7 +874,15 @@ int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params, incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift; mask = amdgpu_vm_pt_entries_mask(adev, cursor.level); pe_start = ((cursor.pfn >> shift) & mask) * 8; - entry_end = ((uint64_t)mask + 1) << shift; + + if (cursor.level < AMDGPU_VM_PTB && params->unlocked) + /* + * MMU notifier callback unlocked unmap huge page, leave is PDE entry, + * only clear one entry. Next entry search again for PDE or PTE leave. + */ + entry_end = 1ULL << shift; + else + entry_end = ((uint64_t)mask + 1) << shift; entry_end += cursor.pfn & ~(entry_end - 1); entry_end = min(entry_end, end); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index d3e8be82a1727..84cf5fd297b7f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -1549,7 +1549,7 @@ static int gfx_v11_0_sw_init(void *handle) adev->gfx.me.num_me = 1; adev->gfx.me.num_pipe_per_me = 1; adev->gfx.me.num_queue_per_pipe = 1; - adev->gfx.mec.num_mec = 2; + adev->gfx.mec.num_mec = 1; adev->gfx.mec.num_pipe_per_mec = 4; adev->gfx.mec.num_queue_per_pipe = 4; break; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c index ca130880edfd4..b259e217930c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c @@ -1332,7 +1332,7 @@ static int gfx_v12_0_sw_init(void *handle) adev->gfx.me.num_me = 1; adev->gfx.me.num_pipe_per_me = 1; adev->gfx.me.num_queue_per_pipe = 1; - adev->gfx.mec.num_mec = 2; + adev->gfx.mec.num_mec = 1; adev->gfx.mec.num_pipe_per_mec = 2; adev->gfx.mec.num_queue_per_pipe = 4; break; @@ -2395,7 +2395,7 @@ static int gfx_v12_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev) (void **)&adev->gfx.me.me_fw_data_ptr); if (r) { dev_err(adev->dev, "(%d) failed to create me data bo\n", r); - gfx_v12_0_pfp_fini(adev); + gfx_v12_0_me_fini(adev); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index e2501c98e107d..114653a0b5701 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1269,6 +1269,7 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev) adev->gfx.mec_fw_write_wait = false; if ((amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 1)) && + (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2)) && ((adev->gfx.mec_fw_version < 0x000001a5) || (adev->gfx.mec_feature_version < 46) || (adev->gfx.pfp_fw_version < 0x000000b7) || @@ -7415,6 +7416,34 @@ static void gfx_v9_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring) amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */ } +static void gfx_v9_0_ring_begin_use_compute(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + amdgpu_gfx_enforce_isolation_ring_begin_use(ring); + + /* Raven and PCO APUs seem to have stability issues + * with compute and gfxoff and gfx pg. Disable gfx pg during + * submission and allow again afterwards. + */ + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 1, 0)) + gfx_v9_0_set_powergating_state(adev, AMD_PG_STATE_UNGATE); +} + +static void gfx_v9_0_ring_end_use_compute(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + /* Raven and PCO APUs seem to have stability issues + * with compute and gfxoff and gfx pg. Disable gfx pg during + * submission and allow again afterwards. + */ + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 1, 0)) + gfx_v9_0_set_powergating_state(adev, AMD_PG_STATE_GATE); + + amdgpu_gfx_enforce_isolation_ring_end_use(ring); +} + static const struct amd_ip_funcs gfx_v9_0_ip_funcs = { .name = "gfx_v9_0", .early_init = gfx_v9_0_early_init, @@ -7591,8 +7620,8 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = { .emit_wave_limit = gfx_v9_0_emit_wave_limit, .reset = gfx_v9_0_reset_kcq, .emit_cleaner_shader = gfx_v9_0_ring_emit_cleaner_shader, - .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use, - .end_use = amdgpu_gfx_enforce_isolation_ring_end_use, + .begin_use = gfx_v9_0_ring_begin_use_compute, + .end_use = gfx_v9_0_ring_end_use_compute, }; static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c index edcb5351f8cca..60acf676000b3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c @@ -498,9 +498,6 @@ static void gmc_v12_0_get_vm_pte(struct amdgpu_device *adev, uint64_t *flags) { struct amdgpu_bo *bo = mapping->bo_va->base.bo; - struct amdgpu_device *bo_adev; - bool coherent, is_system; - *flags &= ~AMDGPU_PTE_EXECUTABLE; *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; @@ -516,25 +513,11 @@ static void gmc_v12_0_get_vm_pte(struct amdgpu_device *adev, *flags &= ~AMDGPU_PTE_VALID; } - if (!bo) - return; - - if (bo->flags & (AMDGPU_GEM_CREATE_COHERENT | - AMDGPU_GEM_CREATE_UNCACHED)) - *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_UC); - - bo_adev = amdgpu_ttm_adev(bo->tbo.bdev); - coherent = bo->flags & AMDGPU_GEM_CREATE_COHERENT; - is_system = (bo->tbo.resource->mem_type == TTM_PL_TT) || - (bo->tbo.resource->mem_type == AMDGPU_PL_PREEMPT); - if (bo && bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC) *flags |= AMDGPU_PTE_DCC; - /* WA for HW bug */ - if (is_system || ((bo_adev != adev) && coherent)) - *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_NC); - + if (bo && bo->flags & AMDGPU_GEM_CREATE_UNCACHED) + *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_UC); } static unsigned gmc_v12_0_get_vbios_fb_size(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index 231a3d490ea8e..7a773fcd7752c 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -859,6 +859,10 @@ static void mes_v11_0_get_fw_version(struct amdgpu_device *adev) { int pipe; + /* return early if we have already fetched these */ + if (adev->mes.sched_version && adev->mes.kiq_version) + return; + /* get MES scheduler/KIQ versions */ mutex_lock(&adev->srbm_mutex); diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c index b3175ff676f33..459f7b8d72b4d 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c @@ -1225,17 +1225,20 @@ static int mes_v12_0_queue_init(struct amdgpu_device *adev, mes_v12_0_queue_init_register(ring); } - /* get MES scheduler/KIQ versions */ - mutex_lock(&adev->srbm_mutex); - soc21_grbm_select(adev, 3, pipe, 0, 0); + if (((pipe == AMDGPU_MES_SCHED_PIPE) && !adev->mes.sched_version) || + ((pipe == AMDGPU_MES_KIQ_PIPE) && !adev->mes.kiq_version)) { + /* get MES scheduler/KIQ versions */ + mutex_lock(&adev->srbm_mutex); + soc21_grbm_select(adev, 3, pipe, 0, 0); - if (pipe == AMDGPU_MES_SCHED_PIPE) - adev->mes.sched_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO); - else if (pipe == AMDGPU_MES_KIQ_PIPE && adev->enable_mes_kiq) - adev->mes.kiq_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO); + if (pipe == AMDGPU_MES_SCHED_PIPE) + adev->mes.sched_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO); + else if (pipe == AMDGPU_MES_KIQ_PIPE && adev->enable_mes_kiq) + adev->mes.kiq_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO); - soc21_grbm_select(adev, 0, 0, 0, 0); - mutex_unlock(&adev->srbm_mutex); + soc21_grbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + } return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 73065a85e0d26..4f94a119d6275 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -78,12 +78,12 @@ static const struct amdgpu_video_codecs nv_video_codecs_encode = { /* Navi1x */ static const struct amdgpu_video_codec_info nv_video_codecs_decode_array[] = { - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 8192, 8192, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, }; @@ -104,10 +104,10 @@ static const struct amdgpu_video_codecs sc_video_codecs_encode = { }; static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn0[] = { - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, @@ -115,10 +115,10 @@ static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn0[] }; static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn1[] = { - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 307185c0e1b8f..4cbe0da100d8f 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -103,12 +103,11 @@ static const struct amdgpu_video_codecs vega_video_codecs_encode = /* Vega */ static const struct amdgpu_video_codec_info vega_video_codecs_decode_array[] = { - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, }; static const struct amdgpu_video_codecs vega_video_codecs_decode = @@ -120,12 +119,12 @@ static const struct amdgpu_video_codecs vega_video_codecs_decode = /* Raven */ static const struct amdgpu_video_codec_info rv_video_codecs_decode_array[] = { - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 8192, 8192, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 4096, 4096, 0)}, }; @@ -138,10 +137,10 @@ static const struct amdgpu_video_codecs rv_video_codecs_decode = /* Renoir, Arcturus */ static const struct amdgpu_video_codec_info rn_video_codecs_decode_array[] = { - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 792b2eb6bbace..48ab93e715c82 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -167,16 +167,16 @@ static const struct amdgpu_video_codec_info tonga_video_codecs_decode_array[] = { { .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, - .max_width = 4096, - .max_height = 4096, - .max_pixels_per_frame = 4096 * 4096, + .max_width = 1920, + .max_height = 1088, + .max_pixels_per_frame = 1920 * 1088, .max_level = 3, }, { .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, - .max_width = 4096, - .max_height = 4096, - .max_pixels_per_frame = 4096 * 4096, + .max_width = 1920, + .max_height = 1088, + .max_pixels_per_frame = 1920 * 1088, .max_level = 5, }, { @@ -188,9 +188,9 @@ static const struct amdgpu_video_codec_info tonga_video_codecs_decode_array[] = }, { .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, - .max_width = 4096, - .max_height = 4096, - .max_pixels_per_frame = 4096 * 4096, + .max_width = 1920, + .max_height = 1088, + .max_pixels_per_frame = 1920 * 1088, .max_level = 4, }, }; @@ -206,16 +206,16 @@ static const struct amdgpu_video_codec_info cz_video_codecs_decode_array[] = { { .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, - .max_width = 4096, - .max_height = 4096, - .max_pixels_per_frame = 4096 * 4096, + .max_width = 1920, + .max_height = 1088, + .max_pixels_per_frame = 1920 * 1088, .max_level = 3, }, { .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, - .max_width = 4096, - .max_height = 4096, - .max_pixels_per_frame = 4096 * 4096, + .max_width = 1920, + .max_height = 1088, + .max_pixels_per_frame = 1920 * 1088, .max_level = 5, }, { @@ -227,9 +227,9 @@ static const struct amdgpu_video_codec_info cz_video_codecs_decode_array[] = }, { .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, - .max_width = 4096, - .max_height = 4096, - .max_pixels_per_frame = 4096 * 4096, + .max_width = 1920, + .max_height = 1088, + .max_pixels_per_frame = 1920 * 1088, .max_level = 4, }, { @@ -239,13 +239,6 @@ static const struct amdgpu_video_codec_info cz_video_codecs_decode_array[] = .max_pixels_per_frame = 4096 * 4096, .max_level = 186, }, - { - .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, - .max_width = 4096, - .max_height = 4096, - .max_pixels_per_frame = 4096 * 4096, - .max_level = 0, - }, }; static const struct amdgpu_video_codecs cz_video_codecs_decode = diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h index 02f7ba8c93cd4..7062f12b5b751 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h @@ -4117,7 +4117,8 @@ static const uint32_t cwsr_trap_gfx12_hex[] = { 0x0000ffff, 0x8bfe7e7e, 0x8bea6a6a, 0xb97af804, 0xbe804ec2, 0xbf94fffe, - 0xbe804a6c, 0xbfb10000, + 0xbe804a6c, 0xbe804ec2, + 0xbf94fffe, 0xbfb10000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0x00000000, diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm index 44772eec9ef4d..96fbb16ceb216 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm @@ -34,41 +34,24 @@ * cpp -DASIC_FAMILY=CHIP_PLUM_BONITO cwsr_trap_handler_gfx10.asm -P -o gfx11.sp3 * sp3 gfx11.sp3 -hex gfx11.hex * - * gfx12: - * cpp -DASIC_FAMILY=CHIP_GFX12 cwsr_trap_handler_gfx10.asm -P -o gfx12.sp3 - * sp3 gfx12.sp3 -hex gfx12.hex */ #define CHIP_NAVI10 26 #define CHIP_SIENNA_CICHLID 30 #define CHIP_PLUM_BONITO 36 -#define CHIP_GFX12 37 #define NO_SQC_STORE (ASIC_FAMILY >= CHIP_SIENNA_CICHLID) #define HAVE_XNACK (ASIC_FAMILY < CHIP_SIENNA_CICHLID) #define HAVE_SENDMSG_RTN (ASIC_FAMILY >= CHIP_PLUM_BONITO) #define HAVE_BUFFER_LDS_LOAD (ASIC_FAMILY < CHIP_PLUM_BONITO) -#define SW_SA_TRAP (ASIC_FAMILY >= CHIP_PLUM_BONITO && ASIC_FAMILY < CHIP_GFX12) +#define SW_SA_TRAP (ASIC_FAMILY == CHIP_PLUM_BONITO) #define SAVE_AFTER_XNACK_ERROR (HAVE_XNACK && !NO_SQC_STORE) // workaround for TCP store failure after XNACK error when ALLOW_REPLAY=0, for debugger #define SINGLE_STEP_MISSED_WORKAROUND 1 //workaround for lost MODE.DEBUG_EN exception when SAVECTX raised -#if ASIC_FAMILY < CHIP_GFX12 #define S_COHERENCE glc:1 #define V_COHERENCE slc:1 glc:1 #define S_WAITCNT_0 s_waitcnt 0 -#else -#define S_COHERENCE scope:SCOPE_SYS -#define V_COHERENCE scope:SCOPE_SYS -#define S_WAITCNT_0 s_wait_idle - -#define HW_REG_SHADER_FLAT_SCRATCH_LO HW_REG_WAVE_SCRATCH_BASE_LO -#define HW_REG_SHADER_FLAT_SCRATCH_HI HW_REG_WAVE_SCRATCH_BASE_HI -#define HW_REG_GPR_ALLOC HW_REG_WAVE_GPR_ALLOC -#define HW_REG_LDS_ALLOC HW_REG_WAVE_LDS_ALLOC -#define HW_REG_MODE HW_REG_WAVE_MODE -#endif -#if ASIC_FAMILY < CHIP_GFX12 var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006 var SQ_WAVE_STATUS_HALT_MASK = 0x2000 var SQ_WAVE_STATUS_ECC_ERR_MASK = 0x20000 @@ -81,21 +64,6 @@ var S_STATUS_ALWAYS_CLEAR_MASK = SQ_WAVE_STATUS_SPI_PRIO_MASK|SQ_WAVE_STATUS_E var S_STATUS_HALT_MASK = SQ_WAVE_STATUS_HALT_MASK var S_SAVE_PC_HI_TRAP_ID_MASK = 0x00FF0000 var S_SAVE_PC_HI_HT_MASK = 0x01000000 -#else -var SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK = 0x4 -var SQ_WAVE_STATE_PRIV_SCC_SHIFT = 9 -var SQ_WAVE_STATE_PRIV_SYS_PRIO_MASK = 0xC00 -var SQ_WAVE_STATE_PRIV_HALT_MASK = 0x4000 -var SQ_WAVE_STATE_PRIV_POISON_ERR_MASK = 0x8000 -var SQ_WAVE_STATE_PRIV_POISON_ERR_SHIFT = 15 -var SQ_WAVE_STATUS_WAVE64_SHIFT = 29 -var SQ_WAVE_STATUS_WAVE64_SIZE = 1 -var SQ_WAVE_LDS_ALLOC_GRANULARITY = 9 -var S_STATUS_HWREG = HW_REG_WAVE_STATE_PRIV -var S_STATUS_ALWAYS_CLEAR_MASK = SQ_WAVE_STATE_PRIV_SYS_PRIO_MASK|SQ_WAVE_STATE_PRIV_POISON_ERR_MASK -var S_STATUS_HALT_MASK = SQ_WAVE_STATE_PRIV_HALT_MASK -var S_SAVE_PC_HI_TRAP_ID_MASK = 0xF0000000 -#endif var SQ_WAVE_STATUS_NO_VGPRS_SHIFT = 24 var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12 @@ -110,7 +78,6 @@ var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT = 8 var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT = 12 #endif -#if ASIC_FAMILY < CHIP_GFX12 var SQ_WAVE_TRAPSTS_SAVECTX_MASK = 0x400 var SQ_WAVE_TRAPSTS_EXCP_MASK = 0x1FF var SQ_WAVE_TRAPSTS_SAVECTX_SHIFT = 10 @@ -161,39 +128,6 @@ var S_TRAPSTS_RESTORE_PART_3_SIZE = 32 - S_TRAPSTS_RESTORE_PART_3_SHIFT var S_TRAPSTS_HWREG = HW_REG_TRAPSTS var S_TRAPSTS_SAVE_CONTEXT_MASK = SQ_WAVE_TRAPSTS_SAVECTX_MASK var S_TRAPSTS_SAVE_CONTEXT_SHIFT = SQ_WAVE_TRAPSTS_SAVECTX_SHIFT -#else -var SQ_WAVE_EXCP_FLAG_PRIV_ADDR_WATCH_MASK = 0xF -var SQ_WAVE_EXCP_FLAG_PRIV_MEM_VIOL_MASK = 0x10 -var SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT = 5 -var SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_MASK = 0x20 -var SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_MASK = 0x40 -var SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT = 6 -var SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK = 0x80 -var SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_SHIFT = 7 -var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_MASK = 0x100 -var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_SHIFT = 8 -var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_END_MASK = 0x200 -var SQ_WAVE_EXCP_FLAG_PRIV_TRAP_AFTER_INST_MASK = 0x800 -var SQ_WAVE_TRAP_CTRL_ADDR_WATCH_MASK = 0x80 -var SQ_WAVE_TRAP_CTRL_TRAP_AFTER_INST_MASK = 0x200 - -var S_TRAPSTS_HWREG = HW_REG_WAVE_EXCP_FLAG_PRIV -var S_TRAPSTS_SAVE_CONTEXT_MASK = SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_MASK -var S_TRAPSTS_SAVE_CONTEXT_SHIFT = SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT -var S_TRAPSTS_NON_MASKABLE_EXCP_MASK = SQ_WAVE_EXCP_FLAG_PRIV_MEM_VIOL_MASK |\ - SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_MASK |\ - SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK |\ - SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_MASK |\ - SQ_WAVE_EXCP_FLAG_PRIV_WAVE_END_MASK |\ - SQ_WAVE_EXCP_FLAG_PRIV_TRAP_AFTER_INST_MASK -var S_TRAPSTS_RESTORE_PART_1_SIZE = SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT -var S_TRAPSTS_RESTORE_PART_2_SHIFT = SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT -var S_TRAPSTS_RESTORE_PART_2_SIZE = SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_SHIFT - SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT -var S_TRAPSTS_RESTORE_PART_3_SHIFT = SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_SHIFT -var S_TRAPSTS_RESTORE_PART_3_SIZE = 32 - S_TRAPSTS_RESTORE_PART_3_SHIFT -var BARRIER_STATE_SIGNAL_OFFSET = 16 -var BARRIER_STATE_VALID_OFFSET = 0 -#endif // bits [31:24] unused by SPI debug data var TTMP11_SAVE_REPLAY_W64H_SHIFT = 31 @@ -305,11 +239,7 @@ L_TRAP_NO_BARRIER: L_HALTED: // Host trap may occur while wave is halted. -#if ASIC_FAMILY < CHIP_GFX12 s_and_b32 ttmp2, s_save_pc_hi, S_SAVE_PC_HI_TRAP_ID_MASK -#else - s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK -#endif s_cbranch_scc1 L_FETCH_2ND_TRAP L_CHECK_SAVE: @@ -336,7 +266,6 @@ L_NOT_HALTED: // Check for maskable exceptions in trapsts.excp and trapsts.excp_hi. // Maskable exceptions only cause the wave to enter the trap handler if // their respective bit in mode.excp_en is set. -#if ASIC_FAMILY < CHIP_GFX12 s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_EXCP_MASK|SQ_WAVE_TRAPSTS_EXCP_HI_MASK s_cbranch_scc0 L_CHECK_TRAP_ID @@ -349,17 +278,6 @@ L_NOT_ADDR_WATCH: s_lshl_b32 ttmp2, ttmp2, SQ_WAVE_MODE_EXCP_EN_SHIFT s_and_b32 ttmp2, ttmp2, ttmp3 s_cbranch_scc1 L_FETCH_2ND_TRAP -#else - s_getreg_b32 ttmp2, hwreg(HW_REG_WAVE_EXCP_FLAG_USER) - s_and_b32 ttmp3, s_save_trapsts, SQ_WAVE_EXCP_FLAG_PRIV_ADDR_WATCH_MASK - s_cbranch_scc0 L_NOT_ADDR_WATCH - s_or_b32 ttmp2, ttmp2, SQ_WAVE_TRAP_CTRL_ADDR_WATCH_MASK - -L_NOT_ADDR_WATCH: - s_getreg_b32 ttmp3, hwreg(HW_REG_WAVE_TRAP_CTRL) - s_and_b32 ttmp2, ttmp3, ttmp2 - s_cbranch_scc1 L_FETCH_2ND_TRAP -#endif L_CHECK_TRAP_ID: // Check trap_id != 0 @@ -369,13 +287,8 @@ L_CHECK_TRAP_ID: #if SINGLE_STEP_MISSED_WORKAROUND // Prioritize single step exception over context save. // Second-level trap will halt wave and RFE, re-entering for SAVECTX. -#if ASIC_FAMILY < CHIP_GFX12 s_getreg_b32 ttmp2, hwreg(HW_REG_MODE) s_and_b32 ttmp2, ttmp2, SQ_WAVE_MODE_DEBUG_EN_MASK -#else - // WAVE_TRAP_CTRL is already in ttmp3. - s_and_b32 ttmp3, ttmp3, SQ_WAVE_TRAP_CTRL_TRAP_AFTER_INST_MASK -#endif s_cbranch_scc1 L_FETCH_2ND_TRAP #endif @@ -425,12 +338,7 @@ L_NO_NEXT_TRAP: s_cbranch_scc1 L_TRAP_CASE // Host trap will not cause trap re-entry. -#if ASIC_FAMILY < CHIP_GFX12 s_and_b32 ttmp2, s_save_pc_hi, S_SAVE_PC_HI_HT_MASK -#else - s_getreg_b32 ttmp2, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV) - s_and_b32 ttmp2, ttmp2, SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK -#endif s_cbranch_scc1 L_EXIT_TRAP s_or_b32 s_save_status, s_save_status, S_STATUS_HALT_MASK @@ -457,16 +365,7 @@ L_EXIT_TRAP: s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32 s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32 -#if ASIC_FAMILY < CHIP_GFX12 s_setreg_b32 hwreg(S_STATUS_HWREG), s_save_status -#else - // STATE_PRIV.BARRIER_COMPLETE may have changed since we read it. - // Only restore fields which the trap handler changes. - s_lshr_b32 s_save_status, s_save_status, SQ_WAVE_STATE_PRIV_SCC_SHIFT - s_setreg_b32 hwreg(S_STATUS_HWREG, SQ_WAVE_STATE_PRIV_SCC_SHIFT, \ - SQ_WAVE_STATE_PRIV_POISON_ERR_SHIFT - SQ_WAVE_STATE_PRIV_SCC_SHIFT + 1), s_save_status -#endif - s_rfe_b64 [ttmp0, ttmp1] L_SAVE: @@ -478,14 +377,6 @@ L_SAVE: s_endpgm L_HAVE_VGPRS: #endif -#if ASIC_FAMILY >= CHIP_GFX12 - s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_STATUS) - s_bitcmp1_b32 s_save_tmp, SQ_WAVE_STATUS_NO_VGPRS_SHIFT - s_cbranch_scc0 L_HAVE_VGPRS - s_endpgm -L_HAVE_VGPRS: -#endif - s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff //pc[47:32] s_mov_b32 s_save_tmp, 0 s_setreg_b32 hwreg(S_TRAPSTS_HWREG, S_TRAPSTS_SAVE_CONTEXT_SHIFT, 1), s_save_tmp //clear saveCtx bit @@ -671,19 +562,6 @@ L_SAVE_HWREG: s_mov_b32 m0, 0x0 //Next lane of v2 to write to #endif -#if ASIC_FAMILY >= CHIP_GFX12 - // Ensure no further changes to barrier or LDS state. - // STATE_PRIV.BARRIER_COMPLETE may change up to this point. - s_barrier_signal -2 - s_barrier_wait -2 - - // Re-read final state of BARRIER_COMPLETE field for save. - s_getreg_b32 s_save_tmp, hwreg(S_STATUS_HWREG) - s_and_b32 s_save_tmp, s_save_tmp, SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK - s_andn2_b32 s_save_status, s_save_status, SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK - s_or_b32 s_save_status, s_save_status, s_save_tmp -#endif - write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset) write_hwreg_to_mem(s_save_pc_lo, s_save_buf_rsrc0, s_save_mem_offset) s_andn2_b32 s_save_tmp, s_save_pc_hi, S_SAVE_PC_HI_FIRST_WAVE_MASK @@ -707,21 +585,6 @@ L_SAVE_HWREG: s_getreg_b32 s_save_m0, hwreg(HW_REG_SHADER_FLAT_SCRATCH_HI) write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset) -#if ASIC_FAMILY >= CHIP_GFX12 - s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_EXCP_FLAG_USER) - write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset) - - s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_TRAP_CTRL) - write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset) - - s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_STATUS) - write_hwreg_to_mem(s_save_tmp, s_save_buf_rsrc0, s_save_mem_offset) - - s_get_barrier_state s_save_tmp, -1 - s_wait_kmcnt (0) - write_hwreg_to_mem(s_save_tmp, s_save_buf_rsrc0, s_save_mem_offset) -#endif - #if NO_SQC_STORE // Write HWREGs with 16 VGPR lanes. TTMPs occupy space after this. s_mov_b32 exec_lo, 0xFFFF @@ -814,9 +677,7 @@ L_SAVE_LDS_NORMAL: s_and_b32 s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF //lds_size is zero? s_cbranch_scc0 L_SAVE_LDS_DONE //no lds used? jump to L_SAVE_DONE -#if ASIC_FAMILY < CHIP_GFX12 s_barrier //LDS is used? wait for other waves in the same TG -#endif s_and_b32 s_save_tmp, s_save_pc_hi, S_SAVE_PC_HI_FIRST_WAVE_MASK s_cbranch_scc0 L_SAVE_LDS_DONE @@ -1081,11 +942,6 @@ L_RESTORE: s_mov_b32 s_restore_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes) s_mov_b32 s_restore_buf_rsrc3, S_RESTORE_BUF_RSRC_WORD3_MISC -#if ASIC_FAMILY >= CHIP_GFX12 - // Save s_restore_spi_init_hi for later use. - s_mov_b32 s_restore_spi_init_hi_save, s_restore_spi_init_hi -#endif - //determine it is wave32 or wave64 get_wave_size2(s_restore_size) @@ -1320,9 +1176,7 @@ L_RESTORE_SGPR: // s_barrier with MODE.DEBUG_EN=1, STATUS.PRIV=1 incorrectly asserts debug exception. // Clear DEBUG_EN before and restore MODE after the barrier. s_setreg_imm32_b32 hwreg(HW_REG_MODE), 0 -#if ASIC_FAMILY < CHIP_GFX12 s_barrier //barrier to ensure the readiness of LDS before access attemps from any other wave in the same TG -#endif /* restore HW registers */ L_RESTORE_HWREG: @@ -1334,11 +1188,6 @@ L_RESTORE_HWREG: s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes -#if ASIC_FAMILY >= CHIP_GFX12 - // Restore s_restore_spi_init_hi before the saved value gets clobbered. - s_mov_b32 s_restore_spi_init_hi, s_restore_spi_init_hi_save -#endif - read_hwreg_from_mem(s_restore_m0, s_restore_buf_rsrc0, s_restore_mem_offset) read_hwreg_from_mem(s_restore_pc_lo, s_restore_buf_rsrc0, s_restore_mem_offset) read_hwreg_from_mem(s_restore_pc_hi, s_restore_buf_rsrc0, s_restore_mem_offset) @@ -1358,44 +1207,6 @@ L_RESTORE_HWREG: s_setreg_b32 hwreg(HW_REG_SHADER_FLAT_SCRATCH_HI), s_restore_flat_scratch -#if ASIC_FAMILY >= CHIP_GFX12 - read_hwreg_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset) - S_WAITCNT_0 - s_setreg_b32 hwreg(HW_REG_WAVE_EXCP_FLAG_USER), s_restore_tmp - - read_hwreg_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset) - S_WAITCNT_0 - s_setreg_b32 hwreg(HW_REG_WAVE_TRAP_CTRL), s_restore_tmp - - // Only the first wave needs to restore the workgroup barrier. - s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_FIRST_WAVE_MASK - s_cbranch_scc0 L_SKIP_BARRIER_RESTORE - - // Skip over WAVE_STATUS, since there is no state to restore from it - s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 4 - - read_hwreg_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset) - S_WAITCNT_0 - - s_bitcmp1_b32 s_restore_tmp, BARRIER_STATE_VALID_OFFSET - s_cbranch_scc0 L_SKIP_BARRIER_RESTORE - - // extract the saved signal count from s_restore_tmp - s_lshr_b32 s_restore_tmp, s_restore_tmp, BARRIER_STATE_SIGNAL_OFFSET - - // We need to call s_barrier_signal repeatedly to restore the signal - // count of the work group barrier. The member count is already - // initialized with the number of waves in the work group. -L_BARRIER_RESTORE_LOOP: - s_and_b32 s_restore_tmp, s_restore_tmp, s_restore_tmp - s_cbranch_scc0 L_SKIP_BARRIER_RESTORE - s_barrier_signal -1 - s_add_i32 s_restore_tmp, s_restore_tmp, -1 - s_branch L_BARRIER_RESTORE_LOOP - -L_SKIP_BARRIER_RESTORE: -#endif - s_mov_b32 m0, s_restore_m0 s_mov_b32 exec_lo, s_restore_exec_lo s_mov_b32 exec_hi, s_restore_exec_hi @@ -1453,13 +1264,6 @@ L_RETURN_WITHOUT_PRIV: s_setreg_b32 hwreg(S_STATUS_HWREG), s_restore_status // SCC is included, which is changed by previous salu -#if ASIC_FAMILY >= CHIP_GFX12 - // Make barrier and LDS state visible to all waves in the group. - // STATE_PRIV.BARRIER_COMPLETE may change after this point. - s_barrier_signal -2 - s_barrier_wait -2 -#endif - s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution L_END_PGM: @@ -1598,11 +1402,7 @@ function get_hwreg_size_bytes end function get_wave_size2(s_reg) -#if ASIC_FAMILY < CHIP_GFX12 s_getreg_b32 s_reg, hwreg(HW_REG_IB_STS2,SQ_WAVE_IB_STS2_WAVE64_SHIFT,SQ_WAVE_IB_STS2_WAVE64_SIZE) -#else - s_getreg_b32 s_reg, hwreg(HW_REG_WAVE_STATUS,SQ_WAVE_STATUS_WAVE64_SHIFT,SQ_WAVE_STATUS_WAVE64_SIZE) -#endif s_lshl_b32 s_reg, s_reg, S_WAVE_SIZE end diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm new file mode 100644 index 0000000000000..7b9d36e5fa437 --- /dev/null +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm @@ -0,0 +1,1130 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/* To compile this assembly code: + * + * gfx12: + * cpp -DASIC_FAMILY=CHIP_GFX12 cwsr_trap_handler_gfx12.asm -P -o gfx12.sp3 + * sp3 gfx12.sp3 -hex gfx12.hex + */ + +#define CHIP_GFX12 37 + +#define SINGLE_STEP_MISSED_WORKAROUND 1 //workaround for lost TRAP_AFTER_INST exception when SAVECTX raised + +var SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK = 0x4 +var SQ_WAVE_STATE_PRIV_SCC_SHIFT = 9 +var SQ_WAVE_STATE_PRIV_SYS_PRIO_MASK = 0xC00 +var SQ_WAVE_STATE_PRIV_HALT_MASK = 0x4000 +var SQ_WAVE_STATE_PRIV_POISON_ERR_MASK = 0x8000 +var SQ_WAVE_STATE_PRIV_POISON_ERR_SHIFT = 15 +var SQ_WAVE_STATUS_WAVE64_SHIFT = 29 +var SQ_WAVE_STATUS_WAVE64_SIZE = 1 +var SQ_WAVE_STATUS_NO_VGPRS_SHIFT = 24 +var SQ_WAVE_STATE_PRIV_ALWAYS_CLEAR_MASK = SQ_WAVE_STATE_PRIV_SYS_PRIO_MASK|SQ_WAVE_STATE_PRIV_POISON_ERR_MASK +var S_SAVE_PC_HI_TRAP_ID_MASK = 0xF0000000 + +var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12 +var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9 +var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE = 8 +var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT = 12 +var SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT = 24 +var SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE = 4 +var SQ_WAVE_LDS_ALLOC_GRANULARITY = 9 + +var SQ_WAVE_EXCP_FLAG_PRIV_ADDR_WATCH_MASK = 0xF +var SQ_WAVE_EXCP_FLAG_PRIV_MEM_VIOL_MASK = 0x10 +var SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT = 5 +var SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_MASK = 0x20 +var SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_MASK = 0x40 +var SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT = 6 +var SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK = 0x80 +var SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_SHIFT = 7 +var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_MASK = 0x100 +var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_SHIFT = 8 +var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_END_MASK = 0x200 +var SQ_WAVE_EXCP_FLAG_PRIV_TRAP_AFTER_INST_MASK = 0x800 +var SQ_WAVE_TRAP_CTRL_ADDR_WATCH_MASK = 0x80 +var SQ_WAVE_TRAP_CTRL_TRAP_AFTER_INST_MASK = 0x200 + +var SQ_WAVE_EXCP_FLAG_PRIV_NON_MASKABLE_EXCP_MASK= SQ_WAVE_EXCP_FLAG_PRIV_MEM_VIOL_MASK |\ + SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_MASK |\ + SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK |\ + SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_MASK |\ + SQ_WAVE_EXCP_FLAG_PRIV_WAVE_END_MASK |\ + SQ_WAVE_EXCP_FLAG_PRIV_TRAP_AFTER_INST_MASK +var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_1_SIZE = SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT +var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SHIFT = SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT +var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SIZE = SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_SHIFT - SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT +var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SHIFT = SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_SHIFT +var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SIZE = 32 - SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SHIFT +var BARRIER_STATE_SIGNAL_OFFSET = 16 +var BARRIER_STATE_VALID_OFFSET = 0 + +var TTMP11_DEBUG_TRAP_ENABLED_SHIFT = 23 +var TTMP11_DEBUG_TRAP_ENABLED_MASK = 0x800000 + +// SQ_SEL_X/Y/Z/W, BUF_NUM_FORMAT_FLOAT, (0 for MUBUF stride[17:14] +// when ADD_TID_ENABLE and BUF_DATA_FORMAT_32 for MTBUF), ADD_TID_ENABLE +var S_SAVE_BUF_RSRC_WORD1_STRIDE = 0x00040000 +var S_SAVE_BUF_RSRC_WORD3_MISC = 0x10807FAC +var S_SAVE_SPI_INIT_FIRST_WAVE_MASK = 0x04000000 +var S_SAVE_SPI_INIT_FIRST_WAVE_SHIFT = 26 + +var S_SAVE_PC_HI_FIRST_WAVE_MASK = 0x80000000 +var S_SAVE_PC_HI_FIRST_WAVE_SHIFT = 31 + +var s_sgpr_save_num = 108 + +var s_save_spi_init_lo = exec_lo +var s_save_spi_init_hi = exec_hi +var s_save_pc_lo = ttmp0 +var s_save_pc_hi = ttmp1 +var s_save_exec_lo = ttmp2 +var s_save_exec_hi = ttmp3 +var s_save_state_priv = ttmp12 +var s_save_excp_flag_priv = ttmp15 +var s_save_xnack_mask = s_save_excp_flag_priv +var s_wave_size = ttmp7 +var s_save_buf_rsrc0 = ttmp8 +var s_save_buf_rsrc1 = ttmp9 +var s_save_buf_rsrc2 = ttmp10 +var s_save_buf_rsrc3 = ttmp11 +var s_save_mem_offset = ttmp4 +var s_save_alloc_size = s_save_excp_flag_priv +var s_save_tmp = ttmp14 +var s_save_m0 = ttmp5 +var s_save_ttmps_lo = s_save_tmp +var s_save_ttmps_hi = s_save_excp_flag_priv + +var S_RESTORE_BUF_RSRC_WORD1_STRIDE = S_SAVE_BUF_RSRC_WORD1_STRIDE +var S_RESTORE_BUF_RSRC_WORD3_MISC = S_SAVE_BUF_RSRC_WORD3_MISC + +var S_RESTORE_SPI_INIT_FIRST_WAVE_MASK = 0x04000000 +var S_RESTORE_SPI_INIT_FIRST_WAVE_SHIFT = 26 +var S_WAVE_SIZE = 25 + +var s_restore_spi_init_lo = exec_lo +var s_restore_spi_init_hi = exec_hi +var s_restore_mem_offset = ttmp12 +var s_restore_alloc_size = ttmp3 +var s_restore_tmp = ttmp2 +var s_restore_mem_offset_save = s_restore_tmp +var s_restore_m0 = s_restore_alloc_size +var s_restore_mode = ttmp7 +var s_restore_flat_scratch = s_restore_tmp +var s_restore_pc_lo = ttmp0 +var s_restore_pc_hi = ttmp1 +var s_restore_exec_lo = ttmp4 +var s_restore_exec_hi = ttmp5 +var s_restore_state_priv = ttmp14 +var s_restore_excp_flag_priv = ttmp15 +var s_restore_xnack_mask = ttmp13 +var s_restore_buf_rsrc0 = ttmp8 +var s_restore_buf_rsrc1 = ttmp9 +var s_restore_buf_rsrc2 = ttmp10 +var s_restore_buf_rsrc3 = ttmp11 +var s_restore_size = ttmp6 +var s_restore_ttmps_lo = s_restore_tmp +var s_restore_ttmps_hi = s_restore_alloc_size +var s_restore_spi_init_hi_save = s_restore_exec_hi + +shader main + asic(DEFAULT) + type(CS) + wave_size(32) + + s_branch L_SKIP_RESTORE //NOT restore. might be a regular trap or save + +L_JUMP_TO_RESTORE: + s_branch L_RESTORE + +L_SKIP_RESTORE: + s_getreg_b32 s_save_state_priv, hwreg(HW_REG_WAVE_STATE_PRIV) //save STATUS since we will change SCC + + // Clear SPI_PRIO: do not save with elevated priority. + // Clear ECC_ERR: prevents SQC store and triggers FATAL_HALT if setreg'd. + s_andn2_b32 s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_ALWAYS_CLEAR_MASK + + s_getreg_b32 s_save_excp_flag_priv, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV) + + s_and_b32 ttmp2, s_save_state_priv, SQ_WAVE_STATE_PRIV_HALT_MASK + s_cbranch_scc0 L_NOT_HALTED + +L_HALTED: + // Host trap may occur while wave is halted. + s_and_b32 ttmp2, s_save_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK + s_cbranch_scc1 L_FETCH_2ND_TRAP + +L_CHECK_SAVE: + s_and_b32 ttmp2, s_save_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_MASK + s_cbranch_scc1 L_SAVE + + // Wave is halted but neither host trap nor SAVECTX is raised. + // Caused by instruction fetch memory violation. + // Spin wait until context saved to prevent interrupt storm. + s_sleep 0x10 + s_getreg_b32 s_save_excp_flag_priv, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV) + s_branch L_CHECK_SAVE + +L_NOT_HALTED: + // Let second-level handle non-SAVECTX exception or trap. + // Any concurrent SAVECTX will be handled upon re-entry once halted. + + // Check non-maskable exceptions. memory_violation, illegal_instruction + // and xnack_error exceptions always cause the wave to enter the trap + // handler. + s_and_b32 ttmp2, s_save_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_NON_MASKABLE_EXCP_MASK + s_cbranch_scc1 L_FETCH_2ND_TRAP + + // Check for maskable exceptions in trapsts.excp and trapsts.excp_hi. + // Maskable exceptions only cause the wave to enter the trap handler if + // their respective bit in mode.excp_en is set. + s_getreg_b32 ttmp2, hwreg(HW_REG_WAVE_EXCP_FLAG_USER) + s_and_b32 ttmp3, s_save_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_ADDR_WATCH_MASK + s_cbranch_scc0 L_NOT_ADDR_WATCH + s_or_b32 ttmp2, ttmp2, SQ_WAVE_TRAP_CTRL_ADDR_WATCH_MASK + +L_NOT_ADDR_WATCH: + s_getreg_b32 ttmp3, hwreg(HW_REG_WAVE_TRAP_CTRL) + s_and_b32 ttmp2, ttmp3, ttmp2 + s_cbranch_scc1 L_FETCH_2ND_TRAP + +L_CHECK_TRAP_ID: + // Check trap_id != 0 + s_and_b32 ttmp2, s_save_pc_hi, S_SAVE_PC_HI_TRAP_ID_MASK + s_cbranch_scc1 L_FETCH_2ND_TRAP + +#if SINGLE_STEP_MISSED_WORKAROUND + // Prioritize single step exception over context save. + // Second-level trap will halt wave and RFE, re-entering for SAVECTX. + // WAVE_TRAP_CTRL is already in ttmp3. + s_and_b32 ttmp3, ttmp3, SQ_WAVE_TRAP_CTRL_TRAP_AFTER_INST_MASK + s_cbranch_scc1 L_FETCH_2ND_TRAP +#endif + + s_and_b32 ttmp2, s_save_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_MASK + s_cbranch_scc1 L_SAVE + +L_FETCH_2ND_TRAP: + // Read second-level TBA/TMA from first-level TMA and jump if available. + // ttmp[2:5] and ttmp12 can be used (others hold SPI-initialized debug data) + // ttmp12 holds SQ_WAVE_STATUS + s_sendmsg_rtn_b64 [ttmp14, ttmp15], sendmsg(MSG_RTN_GET_TMA) + s_wait_idle + s_lshl_b64 [ttmp14, ttmp15], [ttmp14, ttmp15], 0x8 + + s_bitcmp1_b32 ttmp15, 0xF + s_cbranch_scc0 L_NO_SIGN_EXTEND_TMA + s_or_b32 ttmp15, ttmp15, 0xFFFF0000 +L_NO_SIGN_EXTEND_TMA: + + s_load_dword ttmp2, [ttmp14, ttmp15], 0x10 scope:SCOPE_SYS // debug trap enabled flag + s_wait_idle + s_lshl_b32 ttmp2, ttmp2, TTMP11_DEBUG_TRAP_ENABLED_SHIFT + s_andn2_b32 ttmp11, ttmp11, TTMP11_DEBUG_TRAP_ENABLED_MASK + s_or_b32 ttmp11, ttmp11, ttmp2 + + s_load_dwordx2 [ttmp2, ttmp3], [ttmp14, ttmp15], 0x0 scope:SCOPE_SYS // second-level TBA + s_wait_idle + s_load_dwordx2 [ttmp14, ttmp15], [ttmp14, ttmp15], 0x8 scope:SCOPE_SYS // second-level TMA + s_wait_idle + + s_and_b64 [ttmp2, ttmp3], [ttmp2, ttmp3], [ttmp2, ttmp3] + s_cbranch_scc0 L_NO_NEXT_TRAP // second-level trap handler not been set + s_setpc_b64 [ttmp2, ttmp3] // jump to second-level trap handler + +L_NO_NEXT_TRAP: + // If not caused by trap then halt wave to prevent re-entry. + s_and_b32 ttmp2, s_save_pc_hi, S_SAVE_PC_HI_TRAP_ID_MASK + s_cbranch_scc1 L_TRAP_CASE + + // Host trap will not cause trap re-entry. + s_getreg_b32 ttmp2, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV) + s_and_b32 ttmp2, ttmp2, SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK + s_cbranch_scc1 L_EXIT_TRAP + s_or_b32 s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_HALT_MASK + + // If the PC points to S_ENDPGM then context save will fail if STATE_PRIV.HALT is set. + // Rewind the PC to prevent this from occurring. + s_sub_u32 ttmp0, ttmp0, 0x8 + s_subb_u32 ttmp1, ttmp1, 0x0 + + s_branch L_EXIT_TRAP + +L_TRAP_CASE: + // Advance past trap instruction to prevent re-entry. + s_add_u32 ttmp0, ttmp0, 0x4 + s_addc_u32 ttmp1, ttmp1, 0x0 + +L_EXIT_TRAP: + s_and_b32 ttmp1, ttmp1, 0xFFFF + + // Restore SQ_WAVE_STATUS. + s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32 + s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32 + + // STATE_PRIV.BARRIER_COMPLETE may have changed since we read it. + // Only restore fields which the trap handler changes. + s_lshr_b32 s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_SCC_SHIFT + s_setreg_b32 hwreg(HW_REG_WAVE_STATE_PRIV, SQ_WAVE_STATE_PRIV_SCC_SHIFT, \ + SQ_WAVE_STATE_PRIV_POISON_ERR_SHIFT - SQ_WAVE_STATE_PRIV_SCC_SHIFT + 1), s_save_state_priv + + s_rfe_b64 [ttmp0, ttmp1] + +L_SAVE: + // If VGPRs have been deallocated then terminate the wavefront. + // It has no remaining program to run and cannot save without VGPRs. + s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_STATUS) + s_bitcmp1_b32 s_save_tmp, SQ_WAVE_STATUS_NO_VGPRS_SHIFT + s_cbranch_scc0 L_HAVE_VGPRS + s_endpgm +L_HAVE_VGPRS: + + s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff //pc[47:32] + s_mov_b32 s_save_tmp, 0 + s_setreg_b32 hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV, SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT, 1), s_save_tmp //clear saveCtx bit + + /* inform SPI the readiness and wait for SPI's go signal */ + s_mov_b32 s_save_exec_lo, exec_lo //save EXEC and use EXEC for the go signal from SPI + s_mov_b32 s_save_exec_hi, exec_hi + s_mov_b64 exec, 0x0 //clear EXEC to get ready to receive + + s_sendmsg_rtn_b64 [exec_lo, exec_hi], sendmsg(MSG_RTN_SAVE_WAVE) + s_wait_idle + + // Save first_wave flag so we can clear high bits of save address. + s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_FIRST_WAVE_MASK + s_lshl_b32 s_save_tmp, s_save_tmp, (S_SAVE_PC_HI_FIRST_WAVE_SHIFT - S_SAVE_SPI_INIT_FIRST_WAVE_SHIFT) + s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp + + // Trap temporaries must be saved via VGPR but all VGPRs are in use. + // There is no ttmp space to hold the resource constant for VGPR save. + // Save v0 by itself since it requires only two SGPRs. + s_mov_b32 s_save_ttmps_lo, exec_lo + s_and_b32 s_save_ttmps_hi, exec_hi, 0xFFFF + s_mov_b32 exec_lo, 0xFFFFFFFF + s_mov_b32 exec_hi, 0xFFFFFFFF + global_store_dword_addtid v0, [s_save_ttmps_lo, s_save_ttmps_hi] scope:SCOPE_SYS + v_mov_b32 v0, 0x0 + s_mov_b32 exec_lo, s_save_ttmps_lo + s_mov_b32 exec_hi, s_save_ttmps_hi + + // Save trap temporaries 4-11, 13 initialized by SPI debug dispatch logic + // ttmp SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR)+0x40 + get_wave_size2(s_save_ttmps_hi) + get_vgpr_size_bytes(s_save_ttmps_lo, s_save_ttmps_hi) + get_svgpr_size_bytes(s_save_ttmps_hi) + s_add_u32 s_save_ttmps_lo, s_save_ttmps_lo, s_save_ttmps_hi + s_and_b32 s_save_ttmps_hi, s_save_spi_init_hi, 0xFFFF + s_add_u32 s_save_ttmps_lo, s_save_ttmps_lo, get_sgpr_size_bytes() + s_add_u32 s_save_ttmps_lo, s_save_ttmps_lo, s_save_spi_init_lo + s_addc_u32 s_save_ttmps_hi, s_save_ttmps_hi, 0x0 + + v_writelane_b32 v0, ttmp4, 0x4 + v_writelane_b32 v0, ttmp5, 0x5 + v_writelane_b32 v0, ttmp6, 0x6 + v_writelane_b32 v0, ttmp7, 0x7 + v_writelane_b32 v0, ttmp8, 0x8 + v_writelane_b32 v0, ttmp9, 0x9 + v_writelane_b32 v0, ttmp10, 0xA + v_writelane_b32 v0, ttmp11, 0xB + v_writelane_b32 v0, ttmp13, 0xD + v_writelane_b32 v0, exec_lo, 0xE + v_writelane_b32 v0, exec_hi, 0xF + + s_mov_b32 exec_lo, 0x3FFF + s_mov_b32 exec_hi, 0x0 + global_store_dword_addtid v0, [s_save_ttmps_lo, s_save_ttmps_hi] offset:0x40 scope:SCOPE_SYS + v_readlane_b32 ttmp14, v0, 0xE + v_readlane_b32 ttmp15, v0, 0xF + s_mov_b32 exec_lo, ttmp14 + s_mov_b32 exec_hi, ttmp15 + + /* setup Resource Contants */ + s_mov_b32 s_save_buf_rsrc0, s_save_spi_init_lo //base_addr_lo + s_and_b32 s_save_buf_rsrc1, s_save_spi_init_hi, 0x0000FFFF //base_addr_hi + s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE + s_mov_b32 s_save_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes) although not neccessarily inited + s_mov_b32 s_save_buf_rsrc3, S_SAVE_BUF_RSRC_WORD3_MISC + + s_mov_b32 s_save_m0, m0 + + /* global mem offset */ + s_mov_b32 s_save_mem_offset, 0x0 + get_wave_size2(s_wave_size) + + /* save first 4 VGPRs, needed for SGPR save */ + s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on + s_lshr_b32 m0, s_wave_size, S_WAVE_SIZE + s_and_b32 m0, m0, 1 + s_cmp_eq_u32 m0, 1 + s_cbranch_scc1 L_ENABLE_SAVE_4VGPR_EXEC_HI + s_mov_b32 exec_hi, 0x00000000 + s_branch L_SAVE_4VGPR_WAVE32 +L_ENABLE_SAVE_4VGPR_EXEC_HI: + s_mov_b32 exec_hi, 0xFFFFFFFF + s_branch L_SAVE_4VGPR_WAVE64 +L_SAVE_4VGPR_WAVE32: + s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + + // VGPR Allocated in 4-GPR granularity + + buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:128 + buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:128*2 + buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:128*3 + s_branch L_SAVE_HWREG + +L_SAVE_4VGPR_WAVE64: + s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + + // VGPR Allocated in 4-GPR granularity + + buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:256 + buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:256*2 + buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:256*3 + + /* save HW registers */ + +L_SAVE_HWREG: + // HWREG SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR) + get_vgpr_size_bytes(s_save_mem_offset, s_wave_size) + get_svgpr_size_bytes(s_save_tmp) + s_add_u32 s_save_mem_offset, s_save_mem_offset, s_save_tmp + s_add_u32 s_save_mem_offset, s_save_mem_offset, get_sgpr_size_bytes() + + s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + + v_mov_b32 v0, 0x0 //Offset[31:0] from buffer resource + v_mov_b32 v1, 0x0 //Offset[63:32] from buffer resource + v_mov_b32 v2, 0x0 //Set of SGPRs for TCP store + s_mov_b32 m0, 0x0 //Next lane of v2 to write to + + // Ensure no further changes to barrier or LDS state. + // STATE_PRIV.BARRIER_COMPLETE may change up to this point. + s_barrier_signal -2 + s_barrier_wait -2 + + // Re-read final state of BARRIER_COMPLETE field for save. + s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_STATE_PRIV) + s_and_b32 s_save_tmp, s_save_tmp, SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK + s_andn2_b32 s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK + s_or_b32 s_save_state_priv, s_save_state_priv, s_save_tmp + + write_hwreg_to_v2(s_save_m0) + write_hwreg_to_v2(s_save_pc_lo) + s_andn2_b32 s_save_tmp, s_save_pc_hi, S_SAVE_PC_HI_FIRST_WAVE_MASK + write_hwreg_to_v2(s_save_tmp) + write_hwreg_to_v2(s_save_exec_lo) + write_hwreg_to_v2(s_save_exec_hi) + write_hwreg_to_v2(s_save_state_priv) + + s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV) + write_hwreg_to_v2(s_save_tmp) + + write_hwreg_to_v2(s_save_xnack_mask) + + s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_MODE) + write_hwreg_to_v2(s_save_m0) + + s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_SCRATCH_BASE_LO) + write_hwreg_to_v2(s_save_m0) + + s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_SCRATCH_BASE_HI) + write_hwreg_to_v2(s_save_m0) + + s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_EXCP_FLAG_USER) + write_hwreg_to_v2(s_save_m0) + + s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_TRAP_CTRL) + write_hwreg_to_v2(s_save_m0) + + s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_STATUS) + write_hwreg_to_v2(s_save_tmp) + + s_get_barrier_state s_save_tmp, -1 + s_wait_kmcnt (0) + write_hwreg_to_v2(s_save_tmp) + + // Write HWREGs with 16 VGPR lanes. TTMPs occupy space after this. + s_mov_b32 exec_lo, 0xFFFF + s_mov_b32 exec_hi, 0x0 + buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS + + // Write SGPRs with 32 VGPR lanes. This works in wave32 and wave64 mode. + s_mov_b32 exec_lo, 0xFFFFFFFF + + /* save SGPRs */ + // Save SGPR before LDS save, then the s0 to s4 can be used during LDS save... + + // SGPR SR memory offset : size(VGPR)+size(SVGPR) + get_vgpr_size_bytes(s_save_mem_offset, s_wave_size) + get_svgpr_size_bytes(s_save_tmp) + s_add_u32 s_save_mem_offset, s_save_mem_offset, s_save_tmp + s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + + s_mov_b32 ttmp13, 0x0 //next VGPR lane to copy SGPR into + + s_mov_b32 m0, 0x0 //SGPR initial index value =0 + s_nop 0x0 //Manually inserted wait states +L_SAVE_SGPR_LOOP: + // SGPR is allocated in 16 SGPR granularity + s_movrels_b64 s0, s0 //s0 = s[0+m0], s1 = s[1+m0] + s_movrels_b64 s2, s2 //s2 = s[2+m0], s3 = s[3+m0] + s_movrels_b64 s4, s4 //s4 = s[4+m0], s5 = s[5+m0] + s_movrels_b64 s6, s6 //s6 = s[6+m0], s7 = s[7+m0] + s_movrels_b64 s8, s8 //s8 = s[8+m0], s9 = s[9+m0] + s_movrels_b64 s10, s10 //s10 = s[10+m0], s11 = s[11+m0] + s_movrels_b64 s12, s12 //s12 = s[12+m0], s13 = s[13+m0] + s_movrels_b64 s14, s14 //s14 = s[14+m0], s15 = s[15+m0] + + write_16sgpr_to_v2(s0) + + s_cmp_eq_u32 ttmp13, 0x20 //have 32 VGPR lanes filled? + s_cbranch_scc0 L_SAVE_SGPR_SKIP_TCP_STORE + + buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS + s_add_u32 s_save_mem_offset, s_save_mem_offset, 0x80 + s_mov_b32 ttmp13, 0x0 + v_mov_b32 v2, 0x0 +L_SAVE_SGPR_SKIP_TCP_STORE: + + s_add_u32 m0, m0, 16 //next sgpr index + s_cmp_lt_u32 m0, 96 //scc = (m0 < first 96 SGPR) ? 1 : 0 + s_cbranch_scc1 L_SAVE_SGPR_LOOP //first 96 SGPR save is complete? + + //save the rest 12 SGPR + s_movrels_b64 s0, s0 //s0 = s[0+m0], s1 = s[1+m0] + s_movrels_b64 s2, s2 //s2 = s[2+m0], s3 = s[3+m0] + s_movrels_b64 s4, s4 //s4 = s[4+m0], s5 = s[5+m0] + s_movrels_b64 s6, s6 //s6 = s[6+m0], s7 = s[7+m0] + s_movrels_b64 s8, s8 //s8 = s[8+m0], s9 = s[9+m0] + s_movrels_b64 s10, s10 //s10 = s[10+m0], s11 = s[11+m0] + write_12sgpr_to_v2(s0) + + buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS + + /* save LDS */ + +L_SAVE_LDS: + // Change EXEC to all threads... + s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on + s_lshr_b32 m0, s_wave_size, S_WAVE_SIZE + s_and_b32 m0, m0, 1 + s_cmp_eq_u32 m0, 1 + s_cbranch_scc1 L_ENABLE_SAVE_LDS_EXEC_HI + s_mov_b32 exec_hi, 0x00000000 + s_branch L_SAVE_LDS_NORMAL +L_ENABLE_SAVE_LDS_EXEC_HI: + s_mov_b32 exec_hi, 0xFFFFFFFF +L_SAVE_LDS_NORMAL: + s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_WAVE_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE) + s_and_b32 s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF //lds_size is zero? + s_cbranch_scc0 L_SAVE_LDS_DONE //no lds used? jump to L_SAVE_DONE + + s_and_b32 s_save_tmp, s_save_pc_hi, S_SAVE_PC_HI_FIRST_WAVE_MASK + s_cbranch_scc0 L_SAVE_LDS_DONE + + // first wave do LDS save; + + s_lshl_b32 s_save_alloc_size, s_save_alloc_size, SQ_WAVE_LDS_ALLOC_GRANULARITY + s_mov_b32 s_save_buf_rsrc2, s_save_alloc_size //NUM_RECORDS in bytes + + // LDS at offset: size(VGPR)+size(SVGPR)+SIZE(SGPR)+SIZE(HWREG) + // + get_vgpr_size_bytes(s_save_mem_offset, s_wave_size) + get_svgpr_size_bytes(s_save_tmp) + s_add_u32 s_save_mem_offset, s_save_mem_offset, s_save_tmp + s_add_u32 s_save_mem_offset, s_save_mem_offset, get_sgpr_size_bytes() + s_add_u32 s_save_mem_offset, s_save_mem_offset, get_hwreg_size_bytes() + + s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + + //load 0~63*4(byte address) to vgpr v0 + v_mbcnt_lo_u32_b32 v0, -1, 0 + v_mbcnt_hi_u32_b32 v0, -1, v0 + v_mul_u32_u24 v0, 4, v0 + + s_lshr_b32 m0, s_wave_size, S_WAVE_SIZE + s_and_b32 m0, m0, 1 + s_cmp_eq_u32 m0, 1 + s_mov_b32 m0, 0x0 + s_cbranch_scc1 L_SAVE_LDS_W64 + +L_SAVE_LDS_W32: + s_mov_b32 s3, 128 + s_nop 0 + s_nop 0 + s_nop 0 +L_SAVE_LDS_LOOP_W32: + ds_read_b32 v1, v0 + s_wait_idle + buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS + + s_add_u32 m0, m0, s3 //every buffer_store_lds does 128 bytes + s_add_u32 s_save_mem_offset, s_save_mem_offset, s3 + v_add_nc_u32 v0, v0, 128 //mem offset increased by 128 bytes + s_cmp_lt_u32 m0, s_save_alloc_size //scc=(m0 < s_save_alloc_size) ? 1 : 0 + s_cbranch_scc1 L_SAVE_LDS_LOOP_W32 //LDS save is complete? + + s_branch L_SAVE_LDS_DONE + +L_SAVE_LDS_W64: + s_mov_b32 s3, 256 + s_nop 0 + s_nop 0 + s_nop 0 +L_SAVE_LDS_LOOP_W64: + ds_read_b32 v1, v0 + s_wait_idle + buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS + + s_add_u32 m0, m0, s3 //every buffer_store_lds does 256 bytes + s_add_u32 s_save_mem_offset, s_save_mem_offset, s3 + v_add_nc_u32 v0, v0, 256 //mem offset increased by 256 bytes + s_cmp_lt_u32 m0, s_save_alloc_size //scc=(m0 < s_save_alloc_size) ? 1 : 0 + s_cbranch_scc1 L_SAVE_LDS_LOOP_W64 //LDS save is complete? + +L_SAVE_LDS_DONE: + /* save VGPRs - set the Rest VGPRs */ +L_SAVE_VGPR: + // VGPR SR memory offset: 0 + s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on + s_lshr_b32 m0, s_wave_size, S_WAVE_SIZE + s_and_b32 m0, m0, 1 + s_cmp_eq_u32 m0, 1 + s_cbranch_scc1 L_ENABLE_SAVE_VGPR_EXEC_HI + s_mov_b32 s_save_mem_offset, (0+128*4) // for the rest VGPRs + s_mov_b32 exec_hi, 0x00000000 + s_branch L_SAVE_VGPR_NORMAL +L_ENABLE_SAVE_VGPR_EXEC_HI: + s_mov_b32 s_save_mem_offset, (0+256*4) // for the rest VGPRs + s_mov_b32 exec_hi, 0xFFFFFFFF +L_SAVE_VGPR_NORMAL: + s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_WAVE_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE) + s_add_u32 s_save_alloc_size, s_save_alloc_size, 1 + s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value) + //determine it is wave32 or wave64 + s_lshr_b32 m0, s_wave_size, S_WAVE_SIZE + s_and_b32 m0, m0, 1 + s_cmp_eq_u32 m0, 1 + s_cbranch_scc1 L_SAVE_VGPR_WAVE64 + + s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + + // VGPR Allocated in 4-GPR granularity + + // VGPR store using dw burst + s_mov_b32 m0, 0x4 //VGPR initial index value =4 + s_cmp_lt_u32 m0, s_save_alloc_size + s_cbranch_scc0 L_SAVE_VGPR_END + +L_SAVE_VGPR_W32_LOOP: + v_movrels_b32 v0, v0 //v0 = v[0+m0] + v_movrels_b32 v1, v1 //v1 = v[1+m0] + v_movrels_b32 v2, v2 //v2 = v[2+m0] + v_movrels_b32 v3, v3 //v3 = v[3+m0] + + buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS + buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:128 + buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:128*2 + buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:128*3 + + s_add_u32 m0, m0, 4 //next vgpr index + s_add_u32 s_save_mem_offset, s_save_mem_offset, 128*4 //every buffer_store_dword does 128 bytes + s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0 + s_cbranch_scc1 L_SAVE_VGPR_W32_LOOP //VGPR save is complete? + + s_branch L_SAVE_VGPR_END + +L_SAVE_VGPR_WAVE64: + s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + + // VGPR store using dw burst + s_mov_b32 m0, 0x4 //VGPR initial index value =4 + s_cmp_lt_u32 m0, s_save_alloc_size + s_cbranch_scc0 L_SAVE_SHARED_VGPR + +L_SAVE_VGPR_W64_LOOP: + v_movrels_b32 v0, v0 //v0 = v[0+m0] + v_movrels_b32 v1, v1 //v1 = v[1+m0] + v_movrels_b32 v2, v2 //v2 = v[2+m0] + v_movrels_b32 v3, v3 //v3 = v[3+m0] + + buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS + buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:256 + buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:256*2 + buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:256*3 + + s_add_u32 m0, m0, 4 //next vgpr index + s_add_u32 s_save_mem_offset, s_save_mem_offset, 256*4 //every buffer_store_dword does 256 bytes + s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0 + s_cbranch_scc1 L_SAVE_VGPR_W64_LOOP //VGPR save is complete? + +L_SAVE_SHARED_VGPR: + s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_WAVE_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE) + s_and_b32 s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF //shared_vgpr_size is zero? + s_cbranch_scc0 L_SAVE_VGPR_END //no shared_vgpr used? jump to L_SAVE_LDS + s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 3 //Number of SHARED_VGPRs = shared_vgpr_size * 8 (non-zero value) + //m0 now has the value of normal vgpr count, just add the m0 with shared_vgpr count to get the total count. + //save shared_vgpr will start from the index of m0 + s_add_u32 s_save_alloc_size, s_save_alloc_size, m0 + s_mov_b32 exec_lo, 0xFFFFFFFF + s_mov_b32 exec_hi, 0x00000000 + +L_SAVE_SHARED_VGPR_WAVE64_LOOP: + v_movrels_b32 v0, v0 //v0 = v[0+m0] + buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS + s_add_u32 m0, m0, 1 //next vgpr index + s_add_u32 s_save_mem_offset, s_save_mem_offset, 128 + s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0 + s_cbranch_scc1 L_SAVE_SHARED_VGPR_WAVE64_LOOP //SHARED_VGPR save is complete? + +L_SAVE_VGPR_END: + s_branch L_END_PGM + +L_RESTORE: + /* Setup Resource Contants */ + s_mov_b32 s_restore_buf_rsrc0, s_restore_spi_init_lo //base_addr_lo + s_and_b32 s_restore_buf_rsrc1, s_restore_spi_init_hi, 0x0000FFFF //base_addr_hi + s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE + s_mov_b32 s_restore_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes) + s_mov_b32 s_restore_buf_rsrc3, S_RESTORE_BUF_RSRC_WORD3_MISC + + // Save s_restore_spi_init_hi for later use. + s_mov_b32 s_restore_spi_init_hi_save, s_restore_spi_init_hi + + //determine it is wave32 or wave64 + get_wave_size2(s_restore_size) + + s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_FIRST_WAVE_MASK + s_cbranch_scc0 L_RESTORE_VGPR + + /* restore LDS */ +L_RESTORE_LDS: + s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on + s_lshr_b32 m0, s_restore_size, S_WAVE_SIZE + s_and_b32 m0, m0, 1 + s_cmp_eq_u32 m0, 1 + s_cbranch_scc1 L_ENABLE_RESTORE_LDS_EXEC_HI + s_mov_b32 exec_hi, 0x00000000 + s_branch L_RESTORE_LDS_NORMAL +L_ENABLE_RESTORE_LDS_EXEC_HI: + s_mov_b32 exec_hi, 0xFFFFFFFF +L_RESTORE_LDS_NORMAL: + s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_WAVE_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE) + s_and_b32 s_restore_alloc_size, s_restore_alloc_size, 0xFFFFFFFF //lds_size is zero? + s_cbranch_scc0 L_RESTORE_VGPR //no lds used? jump to L_RESTORE_VGPR + s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, SQ_WAVE_LDS_ALLOC_GRANULARITY + s_mov_b32 s_restore_buf_rsrc2, s_restore_alloc_size //NUM_RECORDS in bytes + + // LDS at offset: size(VGPR)+size(SVGPR)+SIZE(SGPR)+SIZE(HWREG) + // + get_vgpr_size_bytes(s_restore_mem_offset, s_restore_size) + get_svgpr_size_bytes(s_restore_tmp) + s_add_u32 s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp + s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_sgpr_size_bytes() + s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_hwreg_size_bytes() + + s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + + s_lshr_b32 m0, s_restore_size, S_WAVE_SIZE + s_and_b32 m0, m0, 1 + s_cmp_eq_u32 m0, 1 + s_mov_b32 m0, 0x0 + s_cbranch_scc1 L_RESTORE_LDS_LOOP_W64 + +L_RESTORE_LDS_LOOP_W32: + buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset + s_wait_idle + ds_store_addtid_b32 v0 + s_add_u32 m0, m0, 128 // 128 DW + s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128 //mem offset increased by 128DW + s_cmp_lt_u32 m0, s_restore_alloc_size //scc=(m0 < s_restore_alloc_size) ? 1 : 0 + s_cbranch_scc1 L_RESTORE_LDS_LOOP_W32 //LDS restore is complete? + s_branch L_RESTORE_VGPR + +L_RESTORE_LDS_LOOP_W64: + buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset + s_wait_idle + ds_store_addtid_b32 v0 + s_add_u32 m0, m0, 256 // 256 DW + s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256 //mem offset increased by 256DW + s_cmp_lt_u32 m0, s_restore_alloc_size //scc=(m0 < s_restore_alloc_size) ? 1 : 0 + s_cbranch_scc1 L_RESTORE_LDS_LOOP_W64 //LDS restore is complete? + + /* restore VGPRs */ +L_RESTORE_VGPR: + // VGPR SR memory offset : 0 + s_mov_b32 s_restore_mem_offset, 0x0 + s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on + s_lshr_b32 m0, s_restore_size, S_WAVE_SIZE + s_and_b32 m0, m0, 1 + s_cmp_eq_u32 m0, 1 + s_cbranch_scc1 L_ENABLE_RESTORE_VGPR_EXEC_HI + s_mov_b32 exec_hi, 0x00000000 + s_branch L_RESTORE_VGPR_NORMAL +L_ENABLE_RESTORE_VGPR_EXEC_HI: + s_mov_b32 exec_hi, 0xFFFFFFFF +L_RESTORE_VGPR_NORMAL: + s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_WAVE_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE) + s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 1 + s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value) + //determine it is wave32 or wave64 + s_lshr_b32 m0, s_restore_size, S_WAVE_SIZE + s_and_b32 m0, m0, 1 + s_cmp_eq_u32 m0, 1 + s_cbranch_scc1 L_RESTORE_VGPR_WAVE64 + + s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + + // VGPR load using dw burst + s_mov_b32 s_restore_mem_offset_save, s_restore_mem_offset // restore start with v1, v0 will be the last + s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128*4 + s_mov_b32 m0, 4 //VGPR initial index value = 4 + s_cmp_lt_u32 m0, s_restore_alloc_size + s_cbranch_scc0 L_RESTORE_SGPR + +L_RESTORE_VGPR_WAVE32_LOOP: + buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS + buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS offset:128 + buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS offset:128*2 + buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS offset:128*3 + s_wait_idle + v_movreld_b32 v0, v0 //v[0+m0] = v0 + v_movreld_b32 v1, v1 + v_movreld_b32 v2, v2 + v_movreld_b32 v3, v3 + s_add_u32 m0, m0, 4 //next vgpr index + s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128*4 //every buffer_load_dword does 128 bytes + s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0 + s_cbranch_scc1 L_RESTORE_VGPR_WAVE32_LOOP //VGPR restore (except v0) is complete? + + /* VGPR restore on v0 */ + buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS + buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS offset:128 + buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS offset:128*2 + buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS offset:128*3 + s_wait_idle + + s_branch L_RESTORE_SGPR + +L_RESTORE_VGPR_WAVE64: + s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + + // VGPR load using dw burst + s_mov_b32 s_restore_mem_offset_save, s_restore_mem_offset // restore start with v4, v0 will be the last + s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4 + s_mov_b32 m0, 4 //VGPR initial index value = 4 + s_cmp_lt_u32 m0, s_restore_alloc_size + s_cbranch_scc0 L_RESTORE_SHARED_VGPR + +L_RESTORE_VGPR_WAVE64_LOOP: + buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS + buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS offset:256 + buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS offset:256*2 + buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS offset:256*3 + s_wait_idle + v_movreld_b32 v0, v0 //v[0+m0] = v0 + v_movreld_b32 v1, v1 + v_movreld_b32 v2, v2 + v_movreld_b32 v3, v3 + s_add_u32 m0, m0, 4 //next vgpr index + s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4 //every buffer_load_dword does 256 bytes + s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0 + s_cbranch_scc1 L_RESTORE_VGPR_WAVE64_LOOP //VGPR restore (except v0) is complete? + +L_RESTORE_SHARED_VGPR: + s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_WAVE_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE) //shared_vgpr_size + s_and_b32 s_restore_alloc_size, s_restore_alloc_size, 0xFFFFFFFF //shared_vgpr_size is zero? + s_cbranch_scc0 L_RESTORE_V0 //no shared_vgpr used? + s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 3 //Number of SHARED_VGPRs = shared_vgpr_size * 8 (non-zero value) + //m0 now has the value of normal vgpr count, just add the m0 with shared_vgpr count to get the total count. + //restore shared_vgpr will start from the index of m0 + s_add_u32 s_restore_alloc_size, s_restore_alloc_size, m0 + s_mov_b32 exec_lo, 0xFFFFFFFF + s_mov_b32 exec_hi, 0x00000000 +L_RESTORE_SHARED_VGPR_WAVE64_LOOP: + buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS + s_wait_idle + v_movreld_b32 v0, v0 //v[0+m0] = v0 + s_add_u32 m0, m0, 1 //next vgpr index + s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128 + s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0 + s_cbranch_scc1 L_RESTORE_SHARED_VGPR_WAVE64_LOOP //VGPR restore (except v0) is complete? + + s_mov_b32 exec_hi, 0xFFFFFFFF //restore back exec_hi before restoring V0!! + + /* VGPR restore on v0 */ +L_RESTORE_V0: + buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS + buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS offset:256 + buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS offset:256*2 + buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS offset:256*3 + s_wait_idle + + /* restore SGPRs */ + //will be 2+8+16*6 + // SGPR SR memory offset : size(VGPR)+size(SVGPR) +L_RESTORE_SGPR: + get_vgpr_size_bytes(s_restore_mem_offset, s_restore_size) + get_svgpr_size_bytes(s_restore_tmp) + s_add_u32 s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp + s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_sgpr_size_bytes() + s_sub_u32 s_restore_mem_offset, s_restore_mem_offset, 20*4 //s108~s127 is not saved + + s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + + s_mov_b32 m0, s_sgpr_save_num + + read_4sgpr_from_mem(s0, s_restore_buf_rsrc0, s_restore_mem_offset) + s_wait_idle + + s_sub_u32 m0, m0, 4 // Restore from S[0] to S[104] + s_nop 0 // hazard SALU M0=> S_MOVREL + + s_movreld_b64 s0, s0 //s[0+m0] = s0 + s_movreld_b64 s2, s2 + + read_8sgpr_from_mem(s0, s_restore_buf_rsrc0, s_restore_mem_offset) + s_wait_idle + + s_sub_u32 m0, m0, 8 // Restore from S[0] to S[96] + s_nop 0 // hazard SALU M0=> S_MOVREL + + s_movreld_b64 s0, s0 //s[0+m0] = s0 + s_movreld_b64 s2, s2 + s_movreld_b64 s4, s4 + s_movreld_b64 s6, s6 + + L_RESTORE_SGPR_LOOP: + read_16sgpr_from_mem(s0, s_restore_buf_rsrc0, s_restore_mem_offset) + s_wait_idle + + s_sub_u32 m0, m0, 16 // Restore from S[n] to S[0] + s_nop 0 // hazard SALU M0=> S_MOVREL + + s_movreld_b64 s0, s0 //s[0+m0] = s0 + s_movreld_b64 s2, s2 + s_movreld_b64 s4, s4 + s_movreld_b64 s6, s6 + s_movreld_b64 s8, s8 + s_movreld_b64 s10, s10 + s_movreld_b64 s12, s12 + s_movreld_b64 s14, s14 + + s_cmp_eq_u32 m0, 0 //scc = (m0 < s_sgpr_save_num) ? 1 : 0 + s_cbranch_scc0 L_RESTORE_SGPR_LOOP + + // s_barrier with STATE_PRIV.TRAP_AFTER_INST=1, STATUS.PRIV=1 incorrectly asserts debug exception. + // Clear DEBUG_EN before and restore MODE after the barrier. + s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE), 0 + + /* restore HW registers */ +L_RESTORE_HWREG: + // HWREG SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR) + get_vgpr_size_bytes(s_restore_mem_offset, s_restore_size) + get_svgpr_size_bytes(s_restore_tmp) + s_add_u32 s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp + s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_sgpr_size_bytes() + + s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + + // Restore s_restore_spi_init_hi before the saved value gets clobbered. + s_mov_b32 s_restore_spi_init_hi, s_restore_spi_init_hi_save + + read_hwreg_from_mem(s_restore_m0, s_restore_buf_rsrc0, s_restore_mem_offset) + read_hwreg_from_mem(s_restore_pc_lo, s_restore_buf_rsrc0, s_restore_mem_offset) + read_hwreg_from_mem(s_restore_pc_hi, s_restore_buf_rsrc0, s_restore_mem_offset) + read_hwreg_from_mem(s_restore_exec_lo, s_restore_buf_rsrc0, s_restore_mem_offset) + read_hwreg_from_mem(s_restore_exec_hi, s_restore_buf_rsrc0, s_restore_mem_offset) + read_hwreg_from_mem(s_restore_state_priv, s_restore_buf_rsrc0, s_restore_mem_offset) + read_hwreg_from_mem(s_restore_excp_flag_priv, s_restore_buf_rsrc0, s_restore_mem_offset) + read_hwreg_from_mem(s_restore_xnack_mask, s_restore_buf_rsrc0, s_restore_mem_offset) + read_hwreg_from_mem(s_restore_mode, s_restore_buf_rsrc0, s_restore_mem_offset) + read_hwreg_from_mem(s_restore_flat_scratch, s_restore_buf_rsrc0, s_restore_mem_offset) + s_wait_idle + + s_setreg_b32 hwreg(HW_REG_WAVE_SCRATCH_BASE_LO), s_restore_flat_scratch + + read_hwreg_from_mem(s_restore_flat_scratch, s_restore_buf_rsrc0, s_restore_mem_offset) + s_wait_idle + + s_setreg_b32 hwreg(HW_REG_WAVE_SCRATCH_BASE_HI), s_restore_flat_scratch + + read_hwreg_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset) + s_wait_idle + s_setreg_b32 hwreg(HW_REG_WAVE_EXCP_FLAG_USER), s_restore_tmp + + read_hwreg_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset) + s_wait_idle + s_setreg_b32 hwreg(HW_REG_WAVE_TRAP_CTRL), s_restore_tmp + + // Only the first wave needs to restore the workgroup barrier. + s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_FIRST_WAVE_MASK + s_cbranch_scc0 L_SKIP_BARRIER_RESTORE + + // Skip over WAVE_STATUS, since there is no state to restore from it + s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 4 + + read_hwreg_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset) + s_wait_idle + + s_bitcmp1_b32 s_restore_tmp, BARRIER_STATE_VALID_OFFSET + s_cbranch_scc0 L_SKIP_BARRIER_RESTORE + + // extract the saved signal count from s_restore_tmp + s_lshr_b32 s_restore_tmp, s_restore_tmp, BARRIER_STATE_SIGNAL_OFFSET + + // We need to call s_barrier_signal repeatedly to restore the signal + // count of the work group barrier. The member count is already + // initialized with the number of waves in the work group. +L_BARRIER_RESTORE_LOOP: + s_and_b32 s_restore_tmp, s_restore_tmp, s_restore_tmp + s_cbranch_scc0 L_SKIP_BARRIER_RESTORE + s_barrier_signal -1 + s_add_i32 s_restore_tmp, s_restore_tmp, -1 + s_branch L_BARRIER_RESTORE_LOOP + +L_SKIP_BARRIER_RESTORE: + + s_mov_b32 m0, s_restore_m0 + s_mov_b32 exec_lo, s_restore_exec_lo + s_mov_b32 exec_hi, s_restore_exec_hi + + // EXCP_FLAG_PRIV.SAVE_CONTEXT and HOST_TRAP may have changed. + // Only restore the other fields to avoid clobbering them. + s_setreg_b32 hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV, 0, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_1_SIZE), s_restore_excp_flag_priv + s_lshr_b32 s_restore_excp_flag_priv, s_restore_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SHIFT + s_setreg_b32 hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SHIFT, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SIZE), s_restore_excp_flag_priv + s_lshr_b32 s_restore_excp_flag_priv, s_restore_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SHIFT - SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SHIFT + s_setreg_b32 hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SHIFT, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SIZE), s_restore_excp_flag_priv + + s_setreg_b32 hwreg(HW_REG_WAVE_MODE), s_restore_mode + + // Restore trap temporaries 4-11, 13 initialized by SPI debug dispatch logic + // ttmp SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR)+0x40 + get_vgpr_size_bytes(s_restore_ttmps_lo, s_restore_size) + get_svgpr_size_bytes(s_restore_ttmps_hi) + s_add_u32 s_restore_ttmps_lo, s_restore_ttmps_lo, s_restore_ttmps_hi + s_add_u32 s_restore_ttmps_lo, s_restore_ttmps_lo, get_sgpr_size_bytes() + s_add_u32 s_restore_ttmps_lo, s_restore_ttmps_lo, s_restore_buf_rsrc0 + s_addc_u32 s_restore_ttmps_hi, s_restore_buf_rsrc1, 0x0 + s_and_b32 s_restore_ttmps_hi, s_restore_ttmps_hi, 0xFFFF + s_load_dwordx4 [ttmp4, ttmp5, ttmp6, ttmp7], [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x50 scope:SCOPE_SYS + s_load_dwordx4 [ttmp8, ttmp9, ttmp10, ttmp11], [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x60 scope:SCOPE_SYS + s_load_dword ttmp13, [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x74 scope:SCOPE_SYS + s_wait_idle + + s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS + s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32 + s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32 + + s_setreg_b32 hwreg(HW_REG_WAVE_STATE_PRIV), s_restore_state_priv // SCC is included, which is changed by previous salu + + // Make barrier and LDS state visible to all waves in the group. + // STATE_PRIV.BARRIER_COMPLETE may change after this point. + s_barrier_signal -2 + s_barrier_wait -2 + + s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution + +L_END_PGM: + // Make sure that no wave of the workgroup can exit the trap handler + // before the workgroup barrier state is saved. + s_barrier_signal -2 + s_barrier_wait -2 + s_endpgm_saved +end + +function write_hwreg_to_v2(s) + // Copy into VGPR for later TCP store. + v_writelane_b32 v2, s, m0 + s_add_u32 m0, m0, 0x1 +end + + +function write_16sgpr_to_v2(s) + // Copy into VGPR for later TCP store. + for var sgpr_idx = 0; sgpr_idx < 16; sgpr_idx ++ + v_writelane_b32 v2, s[sgpr_idx], ttmp13 + s_add_u32 ttmp13, ttmp13, 0x1 + end +end + +function write_12sgpr_to_v2(s) + // Copy into VGPR for later TCP store. + for var sgpr_idx = 0; sgpr_idx < 12; sgpr_idx ++ + v_writelane_b32 v2, s[sgpr_idx], ttmp13 + s_add_u32 ttmp13, ttmp13, 0x1 + end +end + +function read_hwreg_from_mem(s, s_rsrc, s_mem_offset) + s_buffer_load_dword s, s_rsrc, s_mem_offset scope:SCOPE_SYS + s_add_u32 s_mem_offset, s_mem_offset, 4 +end + +function read_16sgpr_from_mem(s, s_rsrc, s_mem_offset) + s_sub_u32 s_mem_offset, s_mem_offset, 4*16 + s_buffer_load_dwordx16 s, s_rsrc, s_mem_offset scope:SCOPE_SYS +end + +function read_8sgpr_from_mem(s, s_rsrc, s_mem_offset) + s_sub_u32 s_mem_offset, s_mem_offset, 4*8 + s_buffer_load_dwordx8 s, s_rsrc, s_mem_offset scope:SCOPE_SYS +end + +function read_4sgpr_from_mem(s, s_rsrc, s_mem_offset) + s_sub_u32 s_mem_offset, s_mem_offset, 4*4 + s_buffer_load_dwordx4 s, s_rsrc, s_mem_offset scope:SCOPE_SYS +end + +function get_vgpr_size_bytes(s_vgpr_size_byte, s_size) + s_getreg_b32 s_vgpr_size_byte, hwreg(HW_REG_WAVE_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE) + s_add_u32 s_vgpr_size_byte, s_vgpr_size_byte, 1 + s_bitcmp1_b32 s_size, S_WAVE_SIZE + s_cbranch_scc1 L_ENABLE_SHIFT_W64 + s_lshl_b32 s_vgpr_size_byte, s_vgpr_size_byte, (2+7) //Number of VGPRs = (vgpr_size + 1) * 4 * 32 * 4 (non-zero value) + s_branch L_SHIFT_DONE +L_ENABLE_SHIFT_W64: + s_lshl_b32 s_vgpr_size_byte, s_vgpr_size_byte, (2+8) //Number of VGPRs = (vgpr_size + 1) * 4 * 64 * 4 (non-zero value) +L_SHIFT_DONE: +end + +function get_svgpr_size_bytes(s_svgpr_size_byte) + s_getreg_b32 s_svgpr_size_byte, hwreg(HW_REG_WAVE_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE) + s_lshl_b32 s_svgpr_size_byte, s_svgpr_size_byte, (3+7) +end + +function get_sgpr_size_bytes + return 512 +end + +function get_hwreg_size_bytes + return 128 +end + +function get_wave_size2(s_reg) + s_getreg_b32 s_reg, hwreg(HW_REG_WAVE_STATUS,SQ_WAVE_STATUS_WAVE64_SHIFT,SQ_WAVE_STATUS_WAVE64_SIZE) + s_lshl_b32 s_reg, s_reg, S_WAVE_SIZE +end diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 3e6b4736a7fea..67b5f3d7ff8e9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -212,6 +212,11 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties, return -EINVAL; } + if (args->ring_size < KFD_MIN_QUEUE_RING_SIZE) { + args->ring_size = KFD_MIN_QUEUE_RING_SIZE; + pr_debug("Size lower. clamped to KFD_MIN_QUEUE_RING_SIZE"); + } + if (!access_ok((const void __user *) args->read_pointer_address, sizeof(uint32_t))) { pr_err("Can't access read pointer\n"); @@ -461,6 +466,11 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p, return -EINVAL; } + if (args->ring_size < KFD_MIN_QUEUE_RING_SIZE) { + args->ring_size = KFD_MIN_QUEUE_RING_SIZE; + pr_debug("Size lower. clamped to KFD_MIN_QUEUE_RING_SIZE"); + } + properties.queue_address = args->ring_base_address; properties.queue_size = args->ring_size; properties.queue_percent = args->queue_percentage & 0xFF; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index d350c7ce35b3d..9186ef0bd2a32 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -1493,6 +1493,11 @@ int kfd_debugfs_hang_hws(struct kfd_node *dev) return -EINVAL; } + if (dev->kfd->shared_resources.enable_mes) { + dev_err(dev->adev->dev, "Inducing MES hang is not supported\n"); + return -EINVAL; + } + return dqm_debugfs_hang_hws(dev->dqm); } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 3cfb4a38d17c7..951b87e7e3f68 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -205,21 +205,6 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q, if (!down_read_trylock(&adev->reset_domain->sem)) return -EIO; - if (!pdd->proc_ctx_cpu_ptr) { - r = amdgpu_amdkfd_alloc_gtt_mem(adev, - AMDGPU_MES_PROC_CTX_SIZE, - &pdd->proc_ctx_bo, - &pdd->proc_ctx_gpu_addr, - &pdd->proc_ctx_cpu_ptr, - false); - if (r) { - dev_err(adev->dev, - "failed to allocate process context bo\n"); - return r; - } - memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE); - } - memset(&queue_input, 0x0, sizeof(struct mes_add_queue_input)); queue_input.process_id = qpd->pqm->process->pasid; queue_input.page_table_base_addr = qpd->page_table_base; @@ -1199,11 +1184,13 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm, decrement_queue_count(dqm, qpd, q); if (dqm->dev->kfd->shared_resources.enable_mes) { - retval = remove_queue_mes(dqm, q, qpd); - if (retval) { + int err; + + err = remove_queue_mes(dqm, q, qpd); + if (err) { dev_err(dev, "Failed to evict queue %d\n", q->properties.queue_id); - goto out; + retval = err; } } } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index 2eff37aaf8273..1695dd78ede8e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -107,6 +107,8 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK | 0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT; + m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT; + m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK; m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT; m->cp_mqd_base_addr_lo = lower_32_bits(addr); @@ -167,10 +169,10 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, m = get_mqd(mqd); - m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT; + m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK; m->cp_hqd_pq_control |= ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1; - m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK; + pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control); m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c index 68dbc0399c87a..3c0ae28c5923b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c @@ -154,6 +154,8 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK | 0x55 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT; + m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT; + m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK; m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT; m->cp_mqd_base_addr_lo = lower_32_bits(addr); @@ -221,10 +223,9 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, m = get_mqd(mqd); - m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT; + m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK; m->cp_hqd_pq_control |= ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1; - m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK; pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control); m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c index 2b72d5b4949b6..565858b9044d4 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c @@ -121,6 +121,8 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK | 0x55 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT; + m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT; + m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK; m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT; m->cp_mqd_base_addr_lo = lower_32_bits(addr); @@ -184,10 +186,9 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, m = get_mqd(mqd); - m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT; + m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK; m->cp_hqd_pq_control |= ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1; - m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK; pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control); m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index 84e8ea3a8a0c9..217af36dc0976 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -182,6 +182,9 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK | 0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT; + m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT; + m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK; + m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT; m->cp_mqd_base_addr_lo = lower_32_bits(addr); @@ -244,7 +247,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, m = get_mqd(mqd); - m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT; + m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK; m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1; pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 264bd764f6f27..0ec8b457494bd 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -35,6 +35,7 @@ #include #include "amdgpu_amdkfd.h" #include "amdgpu.h" +#include "amdgpu_reset.h" struct mm_struct; @@ -1140,6 +1141,17 @@ static void kfd_process_remove_sysfs(struct kfd_process *p) p->kobj = NULL; } +/* + * If any GPU is ongoing reset, wait for reset complete. + */ +static void kfd_process_wait_gpu_reset_complete(struct kfd_process *p) +{ + int i; + + for (i = 0; i < p->n_pdds; i++) + flush_workqueue(p->pdds[i]->dev->adev->reset_domain->wq); +} + /* No process locking is needed in this function, because the process * is not findable any more. We must assume that no other thread is * using it any more, otherwise we couldn't safely free the process @@ -1154,6 +1166,11 @@ static void kfd_process_wq_release(struct work_struct *work) kfd_process_dequeue_from_all_devices(p); pqm_uninit(&p->pqm); + /* + * If GPU in reset, user queues may still running, wait for reset complete. + */ + kfd_process_wait_gpu_reset_complete(p); + /* Signal the eviction fence after user mode queues are * destroyed. This allows any BOs to be freed without * triggering pointless evictions or waiting for fences. diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 42fd7669ac7d3..4078a81761871 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -361,10 +361,26 @@ int pqm_create_queue(struct process_queue_manager *pqm, if (retval != 0) return retval; + /* Register process if this is the first queue */ if (list_empty(&pdd->qpd.queues_list) && list_empty(&pdd->qpd.priv_queue_list)) dev->dqm->ops.register_process(dev->dqm, &pdd->qpd); + /* Allocate proc_ctx_bo only if MES is enabled and this is the first queue */ + if (!pdd->proc_ctx_cpu_ptr && dev->kfd->shared_resources.enable_mes) { + retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev, + AMDGPU_MES_PROC_CTX_SIZE, + &pdd->proc_ctx_bo, + &pdd->proc_ctx_gpu_addr, + &pdd->proc_ctx_cpu_ptr, + false); + if (retval) { + dev_err(dev->adev->dev, "failed to allocate process context bo\n"); + return retval; + } + memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE); + } + pqn = kzalloc(sizeof(*pqn), GFP_KERNEL); if (!pqn) { retval = -ENOMEM; @@ -530,7 +546,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid) pr_err("Pasid 0x%x destroy queue %d failed, ret %d\n", pqm->process->pasid, pqn->q->properties.queue_id, retval); - if (retval != -ETIME) + if (retval != -ETIME && retval != -EIO) goto err_destroy_queue; } kfd_procfs_del_queue(pqn->q); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c index ad29634f8b44c..29d7cb4cfe69a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c @@ -233,6 +233,7 @@ void kfd_queue_buffer_put(struct amdgpu_bo **bo) int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_properties *properties) { struct kfd_topology_device *topo_dev; + u64 expected_queue_size; struct amdgpu_vm *vm; u32 total_cwsr_size; int err; @@ -241,6 +242,15 @@ int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_prope if (!topo_dev) return -EINVAL; + /* AQL queues on GFX7 and GFX8 appear twice their actual size */ + if (properties->type == KFD_QUEUE_TYPE_COMPUTE && + properties->format == KFD_QUEUE_FORMAT_AQL && + topo_dev->node_props.gfx_target_version >= 70000 && + topo_dev->node_props.gfx_target_version < 90000) + expected_queue_size = properties->queue_size / 2; + else + expected_queue_size = properties->queue_size; + vm = drm_priv_to_vm(pdd->drm_priv); err = amdgpu_bo_reserve(vm->root.bo, false); if (err) @@ -255,7 +265,7 @@ int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_prope goto out_err_unreserve; err = kfd_queue_buffer_get(vm, (void *)properties->queue_address, - &properties->ring_bo, properties->queue_size); + &properties->ring_bo, expected_queue_size); if (err) goto out_err_unreserve; @@ -266,8 +276,8 @@ int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_prope /* EOP buffer is not required for all ASICs */ if (properties->eop_ring_buffer_address) { if (properties->eop_ring_buffer_size != topo_dev->node_props.eop_buffer_size) { - pr_debug("queue eop bo size 0x%lx not equal to node eop buf size 0x%x\n", - properties->eop_buf_bo->tbo.base.size, + pr_debug("queue eop bo size 0x%x not equal to node eop buf size 0x%x\n", + properties->eop_ring_buffer_size, topo_dev->node_props.eop_buffer_size); err = -EINVAL; goto out_err_unreserve; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 1893c27746a52..b50283864dcd2 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1276,13 +1276,7 @@ svm_range_get_pte_flags(struct kfd_node *node, break; case IP_VERSION(12, 0, 0): case IP_VERSION(12, 0, 1): - if (domain == SVM_RANGE_VRAM_DOMAIN) { - if (bo_node != node) - mapping_flags |= AMDGPU_VM_MTYPE_NC; - } else { - mapping_flags |= coherent ? - AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; - } + mapping_flags |= AMDGPU_VM_MTYPE_NC; break; default: mapping_flags |= coherent ? @@ -2998,19 +2992,6 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, goto out; } - /* check if this page fault time stamp is before svms->checkpoint_ts */ - if (svms->checkpoint_ts[gpuidx] != 0) { - if (amdgpu_ih_ts_after(ts, svms->checkpoint_ts[gpuidx])) { - pr_debug("draining retry fault, drop fault 0x%llx\n", addr); - r = 0; - goto out; - } else - /* ts is after svms->checkpoint_ts now, reset svms->checkpoint_ts - * to zero to avoid following ts wrap around give wrong comparing - */ - svms->checkpoint_ts[gpuidx] = 0; - } - if (!p->xnack_enabled) { pr_debug("XNACK not enabled for pasid 0x%x\n", pasid); r = -EFAULT; @@ -3030,6 +3011,21 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, mmap_read_lock(mm); retry_write_locked: mutex_lock(&svms->lock); + + /* check if this page fault time stamp is before svms->checkpoint_ts */ + if (svms->checkpoint_ts[gpuidx] != 0) { + if (amdgpu_ih_ts_after(ts, svms->checkpoint_ts[gpuidx])) { + pr_debug("draining retry fault, drop fault 0x%llx\n", addr); + r = -EAGAIN; + goto out_unlock_svms; + } else { + /* ts is after svms->checkpoint_ts now, reset svms->checkpoint_ts + * to zero to avoid following ts wrap around give wrong comparing + */ + svms->checkpoint_ts[gpuidx] = 0; + } + } + prange = svm_range_from_addr(svms, addr, NULL); if (!prange) { pr_debug("failed to find prange svms 0x%p address [0x%llx]\n", @@ -3154,7 +3150,8 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, mutex_unlock(&svms->lock); mmap_read_unlock(mm); - svm_range_count_fault(node, p, gpuidx); + if (r != -EAGAIN) + svm_range_count_fault(node, p, gpuidx); mmput(mm); out: diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 85e58e0f6059a..c22da13859bd5 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -243,6 +243,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector); static void handle_hpd_rx_irq(void *param); +static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, + int bl_idx, + u32 user_brightness); + static bool is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, struct drm_crtc_state *new_crtc_state); @@ -1593,75 +1597,151 @@ static bool dm_should_disable_stutter(struct pci_dev *pdev) return false; } -static const struct dmi_system_id hpd_disconnect_quirk_table[] = { +struct amdgpu_dm_quirks { + bool aux_hpd_discon; + bool support_edp0_on_dp1; +}; + +static struct amdgpu_dm_quirks quirk_entries = { + .aux_hpd_discon = false, + .support_edp0_on_dp1 = false +}; + +static int edp0_on_dp1_callback(const struct dmi_system_id *id) +{ + quirk_entries.support_edp0_on_dp1 = true; + return 0; +} + +static int aux_hpd_discon_callback(const struct dmi_system_id *id) +{ + quirk_entries.aux_hpd_discon = true; + return 0; +} + +static const struct dmi_system_id dmi_quirk_table[] = { { + .callback = aux_hpd_discon_callback, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"), }, }, { + .callback = aux_hpd_discon_callback, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"), }, }, { + .callback = aux_hpd_discon_callback, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"), }, }, { + .callback = aux_hpd_discon_callback, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"), }, }, { + .callback = aux_hpd_discon_callback, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"), }, }, { + .callback = aux_hpd_discon_callback, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"), }, }, { + .callback = aux_hpd_discon_callback, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"), }, }, { + .callback = aux_hpd_discon_callback, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"), }, }, { + .callback = aux_hpd_discon_callback, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"), }, }, + { + .callback = edp0_on_dp1_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite mt645 G8 Mobile Thin Client"), + }, + }, + { + .callback = edp0_on_dp1_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 645 14 inch G11 Notebook PC"), + }, + }, + { + .callback = edp0_on_dp1_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 665 16 inch G11 Notebook PC"), + }, + }, + { + .callback = edp0_on_dp1_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 445 14 inch G11 Notebook PC"), + }, + }, + { + .callback = edp0_on_dp1_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 465 16 inch G11 Notebook PC"), + }, + }, {} /* TODO: refactor this from a fixed table to a dynamic option */ }; -static void retrieve_dmi_info(struct amdgpu_display_manager *dm) +static void retrieve_dmi_info(struct amdgpu_display_manager *dm, struct dc_init_data *init_data) { - const struct dmi_system_id *dmi_id; + int dmi_id; + struct drm_device *dev = dm->ddev; dm->aux_hpd_discon_quirk = false; + init_data->flags.support_edp0_on_dp1 = false; + + dmi_id = dmi_check_system(dmi_quirk_table); - dmi_id = dmi_first_match(hpd_disconnect_quirk_table); - if (dmi_id) { + if (!dmi_id) + return; + + if (quirk_entries.aux_hpd_discon) { dm->aux_hpd_discon_quirk = true; - DRM_INFO("aux_hpd_discon_quirk attached\n"); + drm_info(dev, "aux_hpd_discon_quirk attached\n"); + } + if (quirk_entries.support_edp0_on_dp1) { + init_data->flags.support_edp0_on_dp1 = true; + drm_info(dev, "support_edp0_on_dp1 attached\n"); } } @@ -1969,7 +2049,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0)) init_data.num_virtual_links = 1; - retrieve_dmi_info(&adev->dm); + retrieve_dmi_info(&adev->dm, &init_data); if (adev->dm.bb_from_dmub) init_data.bb_from_dmub = adev->dm.bb_from_dmub; @@ -3172,8 +3252,7 @@ static int dm_resume(void *handle) struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state); enum dc_connection_type new_connection_type = dc_connection_none; struct dc_state *dc_state; - int i, r, j, ret; - bool need_hotplug = false; + int i, r, j; struct dc_commit_streams_params commit_params = {}; if (dm->dc->caps.ips_support) { @@ -3240,8 +3319,19 @@ static int dm_resume(void *handle) mutex_unlock(&dm->dc_lock); + /* set the backlight after a reset */ + for (i = 0; i < dm->num_of_edps; i++) { + if (dm->backlight_dev[i]) + amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); + } + return 0; } + + /* leave display off for S4 sequence */ + if (adev->in_s4) + return 0; + /* Recreate dc_state - DC invalidates it when setting power state to S3. */ dc_state_release(dm_state->context); dm_state->context = dc_state_create(dm->dc, NULL); @@ -3362,23 +3452,16 @@ static int dm_resume(void *handle) aconnector->mst_root) continue; - ret = drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr, true); - - if (ret < 0) { - dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx, - aconnector->dc_link); - need_hotplug = true; - } + drm_dp_mst_topology_queue_probe(&aconnector->mst_mgr); } drm_connector_list_iter_end(&iter); - if (need_hotplug) - drm_kms_helper_hotplug_event(ddev); - amdgpu_dm_irq_resume_late(adev); amdgpu_dm_smu_write_watermarks_table(adev); + drm_kms_helper_hotplug_event(ddev); + return 0; } @@ -4767,6 +4850,7 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector) dm->backlight_dev[aconnector->bl_idx] = backlight_device_register(bl_name, aconnector->base.kdev, dm, &amdgpu_dm_backlight_ops, &props); + dm->brightness[aconnector->bl_idx] = props.brightness; if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) { DRM_ERROR("DM: Backlight registration failed!\n"); @@ -4834,7 +4918,6 @@ static void setup_backlight_device(struct amdgpu_display_manager *dm, aconnector->bl_idx = bl_idx; amdgpu_dm_update_backlight_caps(dm, bl_idx); - dm->brightness[bl_idx] = AMDGPU_MAX_BL_LEVEL; dm->backlight_link[bl_idx] = link; dm->num_of_edps++; @@ -8396,14 +8479,39 @@ static void manage_dm_interrupts(struct amdgpu_device *adev, int offdelay; if (acrtc_state) { - if (amdgpu_ip_version(adev, DCE_HWIP, 0) < - IP_VERSION(3, 5, 0) || - acrtc_state->stream->link->psr_settings.psr_version < - DC_PSR_VERSION_UNSUPPORTED || - !(adev->flags & AMD_IS_APU)) { - timing = &acrtc_state->stream->timing; - - /* at least 2 frames */ + timing = &acrtc_state->stream->timing; + + /* + * Depending on when the HW latching event of double-buffered + * registers happen relative to the PSR SDP deadline, and how + * bad the Panel clock has drifted since the last ALPM off + * event, there can be up to 3 frames of delay between sending + * the PSR exit cmd to DMUB fw, and when the panel starts + * displaying live frames. + * + * We can set: + * + * 20/100 * offdelay_ms = 3_frames_ms + * => offdelay_ms = 5 * 3_frames_ms + * + * This ensures that `3_frames_ms` will only be experienced as a + * 20% delay on top how long the display has been static, and + * thus make the delay less perceivable. + */ + if (acrtc_state->stream->link->psr_settings.psr_version < + DC_PSR_VERSION_UNSUPPORTED) { + offdelay = DIV64_U64_ROUND_UP((u64)5 * 3 * 10 * + timing->v_total * + timing->h_total, + timing->pix_clk_100hz); + config.offdelay_ms = offdelay ?: 30; + } else if (amdgpu_ip_version(adev, DCE_HWIP, 0) < + IP_VERSION(3, 5, 0) || + !(adev->flags & AMD_IS_APU)) { + /* + * Older HW and DGPU have issues with instant off; + * use a 2 frame offdelay. + */ offdelay = DIV64_U64_ROUND_UP((u64)20 * timing->v_total * timing->h_total, @@ -8411,6 +8519,8 @@ static void manage_dm_interrupts(struct amdgpu_device *adev, config.offdelay_ms = offdelay ?: 30; } else { + /* offdelay_ms = 0 will never disable vblank */ + config.offdelay_ms = 1; config.disable_immediate = true; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index 70fcfae8e4c55..2ac56e79df05e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -113,6 +113,7 @@ bool amdgpu_dm_crtc_vrr_active(const struct dm_crtc_state *dm_state) * * Panel Replay and PSR SU * - Enable when: + * - VRR is disabled * - vblank counter is disabled * - entry is allowed: usermode demonstrates an adequate number of fast * commits) @@ -131,19 +132,20 @@ static void amdgpu_dm_crtc_set_panel_sr_feature( bool is_sr_active = (link->replay_settings.replay_allow_active || link->psr_settings.psr_allow_active); bool is_crc_window_active = false; + bool vrr_active = amdgpu_dm_crtc_vrr_active_irq(vblank_work->acrtc); #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY is_crc_window_active = amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base); #endif - if (link->replay_settings.replay_feature_enabled && + if (link->replay_settings.replay_feature_enabled && !vrr_active && allow_sr_entry && !is_sr_active && !is_crc_window_active) { amdgpu_dm_replay_enable(vblank_work->stream, true); } else if (vblank_enabled) { if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 && is_sr_active) amdgpu_dm_psr_disable(vblank_work->stream, false); - } else if (link->psr_settings.psr_feature_enabled && + } else if (link->psr_settings.psr_feature_enabled && !vrr_active && allow_sr_entry && !is_sr_active && !is_crc_window_active) { struct amdgpu_dm_connector *aconn = diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index e339c7a8d541c..c0dc232440490 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -455,6 +455,7 @@ void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work) for (i = 0; i < hdcp_work->max_link; i++) { cancel_delayed_work_sync(&hdcp_work[i].callback_dwork); cancel_delayed_work_sync(&hdcp_work[i].watchdog_timer_dwork); + cancel_delayed_work_sync(&hdcp_work[i].property_validate_dwork); } sysfs_remove_bin_file(kobj, &hdcp_work[0].attr); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index 3390f0d8420a0..a215234151ac3 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -894,6 +894,15 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev) struct drm_device *dev = adev_to_drm(adev); struct drm_connector *connector; struct drm_connector_list_iter iter; + int irq_type; + int i; + + /* First, clear all hpd and hpdrx interrupts */ + for (i = DC_IRQ_SOURCE_HPD1; i <= DC_IRQ_SOURCE_HPD6RX; i++) { + if (!dc_interrupt_set(adev->dm.dc, i, false)) + drm_err(dev, "Failed to clear hpd(rx) source=%d on init\n", + i); + } drm_connector_list_iter_begin(dev, &iter); drm_for_each_connector_iter(connector, &iter) { @@ -907,10 +916,31 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev) dc_link = amdgpu_dm_connector->dc_link; + /* + * Get a base driver irq reference for hpd ints for the lifetime + * of dm. Note that only hpd interrupt types are registered with + * base driver; hpd_rx types aren't. IOW, amdgpu_irq_get/put on + * hpd_rx isn't available. DM currently controls hpd_rx + * explicitly with dc_interrupt_set() + */ if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) { - dc_interrupt_set(adev->dm.dc, - dc_link->irq_source_hpd, - true); + irq_type = dc_link->irq_source_hpd - DC_IRQ_SOURCE_HPD1; + /* + * TODO: There's a mismatch between mode_info.num_hpd + * and what bios reports as the # of connectors with hpd + * sources. Since the # of hpd source types registered + * with base driver == mode_info.num_hpd, we have to + * fallback to dc_interrupt_set for the remaining types. + */ + if (irq_type < adev->mode_info.num_hpd) { + if (amdgpu_irq_get(adev, &adev->hpd_irq, irq_type)) + drm_err(dev, "DM_IRQ: Failed get HPD for source=%d)!\n", + dc_link->irq_source_hpd); + } else { + dc_interrupt_set(adev->dm.dc, + dc_link->irq_source_hpd, + true); + } } if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) { @@ -935,6 +965,7 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev) struct drm_device *dev = adev_to_drm(adev); struct drm_connector *connector; struct drm_connector_list_iter iter; + int irq_type; drm_connector_list_iter_begin(dev, &iter); drm_for_each_connector_iter(connector, &iter) { @@ -948,9 +979,18 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev) dc_link = amdgpu_dm_connector->dc_link; if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) { - dc_interrupt_set(adev->dm.dc, - dc_link->irq_source_hpd, - false); + irq_type = dc_link->irq_source_hpd - DC_IRQ_SOURCE_HPD1; + + /* TODO: See same TODO in amdgpu_dm_hpd_init() */ + if (irq_type < adev->mode_info.num_hpd) { + if (amdgpu_irq_put(adev, &adev->hpd_irq, irq_type)) + drm_err(dev, "DM_IRQ: Failed put HPD for source=%d!\n", + dc_link->irq_source_hpd); + } else { + dc_interrupt_set(adev->dm.dc, + dc_link->irq_source_hpd, + false); + } } if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c index 83c7c8853edec..62e30942f735d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c @@ -275,8 +275,11 @@ static int amdgpu_dm_plane_validate_dcc(struct amdgpu_device *adev, if (!dcc->enable) return 0; - if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || - !dc->cap_funcs.get_dcc_compression_cap) + if (adev->family < AMDGPU_FAMILY_GC_12_0_0 && + format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) + return -EINVAL; + + if (!dc->cap_funcs.get_dcc_compression_cap) return -EINVAL; input.format = format; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c index 45858bf1523d8..e140b7a04d724 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c @@ -54,7 +54,8 @@ static bool link_supports_psrsu(struct dc_link *link) if (amdgpu_dc_debug_mask & DC_DISABLE_PSR_SU) return false; - return dc_dmub_check_min_version(dc->ctx->dmub_srv->dmub); + /* Temporarily disable PSR-SU to avoid glitches */ + return false; } /* diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile index ab1132bc896a3..d9955c5d2e5ed 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile @@ -174,7 +174,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN32) ############################################################################### # DCN35 ############################################################################### -CLK_MGR_DCN35 = dcn35_smu.o dcn35_clk_mgr.o +CLK_MGR_DCN35 = dcn35_smu.o dcn351_clk_mgr.o dcn35_clk_mgr.o AMD_DAL_CLK_MGR_DCN35 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn35/,$(CLK_MGR_DCN35)) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index 0e243f4344d05..4c3e58c730b11 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -355,8 +355,11 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p BREAK_TO_DEBUGGER(); return NULL; } + if (ctx->dce_version == DCN_VERSION_3_51) + dcn351_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); + else + dcn35_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); - dcn35_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); return &clk_mgr->base.base; } break; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c index e93df3d6222e6..bc123f1884da3 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c @@ -50,12 +50,13 @@ #include "link.h" #include "logger_types.h" + + +#include "yellow_carp_offset.h" #undef DC_LOGGER #define DC_LOGGER \ clk_mgr->base.base.ctx->logger -#include "yellow_carp_offset.h" - #define regCLK1_CLK_PLL_REQ 0x0237 #define regCLK1_CLK_PLL_REQ_BASE_IDX 0 diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c index 29eff386505ab..91d872d6d392b 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c @@ -53,9 +53,6 @@ #include "logger_types.h" -#undef DC_LOGGER -#define DC_LOGGER \ - clk_mgr->base.base.ctx->logger #define MAX_INSTANCE 7 @@ -77,6 +74,9 @@ static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0, { { 0x0001B200, 0x0242DC00, 0, 0, 0, 0, 0, 0 } }, { { 0x0001B400, 0x0242E000, 0, 0, 0, 0, 0, 0 } } } }; +#undef DC_LOGGER +#define DC_LOGGER \ + clk_mgr->base.base.ctx->logger #define regCLK1_CLK_PLL_REQ 0x0237 #define regCLK1_CLK_PLL_REQ_BASE_IDX 0 diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c new file mode 100644 index 0000000000000..6a6ae618650b6 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c @@ -0,0 +1,140 @@ +/* + * Copyright 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "core_types.h" +#include "dcn35_clk_mgr.h" + +#define DCN_BASE__INST0_SEG1 0x000000C0 +#define mmCLK1_CLK_PLL_REQ 0x16E37 + +#define mmCLK1_CLK0_DFS_CNTL 0x16E69 +#define mmCLK1_CLK1_DFS_CNTL 0x16E6C +#define mmCLK1_CLK2_DFS_CNTL 0x16E6F +#define mmCLK1_CLK3_DFS_CNTL 0x16E72 +#define mmCLK1_CLK4_DFS_CNTL 0x16E75 +#define mmCLK1_CLK5_DFS_CNTL 0x16E78 + +#define mmCLK1_CLK0_CURRENT_CNT 0x16EFC +#define mmCLK1_CLK1_CURRENT_CNT 0x16EFD +#define mmCLK1_CLK2_CURRENT_CNT 0x16EFE +#define mmCLK1_CLK3_CURRENT_CNT 0x16EFF +#define mmCLK1_CLK4_CURRENT_CNT 0x16F00 +#define mmCLK1_CLK5_CURRENT_CNT 0x16F01 + +#define mmCLK1_CLK0_BYPASS_CNTL 0x16E8A +#define mmCLK1_CLK1_BYPASS_CNTL 0x16E93 +#define mmCLK1_CLK2_BYPASS_CNTL 0x16E9C +#define mmCLK1_CLK3_BYPASS_CNTL 0x16EA5 +#define mmCLK1_CLK4_BYPASS_CNTL 0x16EAE +#define mmCLK1_CLK5_BYPASS_CNTL 0x16EB7 + +#define mmCLK1_CLK0_DS_CNTL 0x16E83 +#define mmCLK1_CLK1_DS_CNTL 0x16E8C +#define mmCLK1_CLK2_DS_CNTL 0x16E95 +#define mmCLK1_CLK3_DS_CNTL 0x16E9E +#define mmCLK1_CLK4_DS_CNTL 0x16EA7 +#define mmCLK1_CLK5_DS_CNTL 0x16EB0 + +#define mmCLK1_CLK0_ALLOW_DS 0x16E84 +#define mmCLK1_CLK1_ALLOW_DS 0x16E8D +#define mmCLK1_CLK2_ALLOW_DS 0x16E96 +#define mmCLK1_CLK3_ALLOW_DS 0x16E9F +#define mmCLK1_CLK4_ALLOW_DS 0x16EA8 +#define mmCLK1_CLK5_ALLOW_DS 0x16EB1 + +#define mmCLK5_spll_field_8 0x1B04B +#define mmDENTIST_DISPCLK_CNTL 0x0124 +#define regDENTIST_DISPCLK_CNTL 0x0064 +#define regDENTIST_DISPCLK_CNTL_BASE_IDX 1 + +#define CLK1_CLK_PLL_REQ__FbMult_int__SHIFT 0x0 +#define CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc +#define CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10 +#define CLK1_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL +#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L +#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L + +#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L + +// DENTIST_DISPCLK_CNTL +#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER__SHIFT 0x0 +#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER__SHIFT 0x8 +#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE__SHIFT 0x13 +#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE__SHIFT 0x14 +#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER__SHIFT 0x18 +#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER_MASK 0x0000007FL +#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER_MASK 0x00007F00L +#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE_MASK 0x00080000L +#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE_MASK 0x00100000L +#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER_MASK 0x7F000000L + +#define CLK5_spll_field_8__spll_ssc_en_MASK 0x00002000L + +#define REG(reg) \ + (clk_mgr->regs->reg) + +#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg + +#define BASE(seg) BASE_INNER(seg) + +#define SR(reg_name)\ + .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ + reg ## reg_name + +#define CLK_SR_DCN35(reg_name)\ + .reg_name = mm ## reg_name + +static const struct clk_mgr_registers clk_mgr_regs_dcn351 = { + CLK_REG_LIST_DCN35() +}; + +static const struct clk_mgr_shift clk_mgr_shift_dcn351 = { + CLK_COMMON_MASK_SH_LIST_DCN32(__SHIFT) +}; + +static const struct clk_mgr_mask clk_mgr_mask_dcn351 = { + CLK_COMMON_MASK_SH_LIST_DCN32(_MASK) +}; + +#define TO_CLK_MGR_DCN35(clk_mgr)\ + container_of(clk_mgr, struct clk_mgr_dcn35, base) + + +void dcn351_clk_mgr_construct( + struct dc_context *ctx, + struct clk_mgr_dcn35 *clk_mgr, + struct pp_smu_funcs *pp_smu, + struct dccg *dccg) +{ + /*register offset changed*/ + clk_mgr->base.regs = &clk_mgr_regs_dcn351; + clk_mgr->base.clk_mgr_shift = &clk_mgr_shift_dcn351; + clk_mgr->base.clk_mgr_mask = &clk_mgr_mask_dcn351; + + dcn35_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); + +} + + diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c index 3bd0d46c17010..7d0d8852ce8d2 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c @@ -36,15 +36,11 @@ #include "dcn20/dcn20_clk_mgr.h" - - #include "reg_helper.h" #include "core_types.h" #include "dcn35_smu.h" #include "dm_helpers.h" -/* TODO: remove this include once we ported over remaining clk mgr functions*/ -#include "dcn30/dcn30_clk_mgr.h" #include "dcn31/dcn31_clk_mgr.h" #include "dc_dmub_srv.h" @@ -55,34 +51,102 @@ #define DC_LOGGER \ clk_mgr->base.base.ctx->logger -#define regCLK1_CLK_PLL_REQ 0x0237 -#define regCLK1_CLK_PLL_REQ_BASE_IDX 0 +#define DCN_BASE__INST0_SEG1 0x000000C0 +#define mmCLK1_CLK_PLL_REQ 0x16E37 + +#define mmCLK1_CLK0_DFS_CNTL 0x16E69 +#define mmCLK1_CLK1_DFS_CNTL 0x16E6C +#define mmCLK1_CLK2_DFS_CNTL 0x16E6F +#define mmCLK1_CLK3_DFS_CNTL 0x16E72 +#define mmCLK1_CLK4_DFS_CNTL 0x16E75 +#define mmCLK1_CLK5_DFS_CNTL 0x16E78 + +#define mmCLK1_CLK0_CURRENT_CNT 0x16EFB +#define mmCLK1_CLK1_CURRENT_CNT 0x16EFC +#define mmCLK1_CLK2_CURRENT_CNT 0x16EFD +#define mmCLK1_CLK3_CURRENT_CNT 0x16EFE +#define mmCLK1_CLK4_CURRENT_CNT 0x16EFF +#define mmCLK1_CLK5_CURRENT_CNT 0x16F00 + +#define mmCLK1_CLK0_BYPASS_CNTL 0x16E8A +#define mmCLK1_CLK1_BYPASS_CNTL 0x16E93 +#define mmCLK1_CLK2_BYPASS_CNTL 0x16E9C +#define mmCLK1_CLK3_BYPASS_CNTL 0x16EA5 +#define mmCLK1_CLK4_BYPASS_CNTL 0x16EAE +#define mmCLK1_CLK5_BYPASS_CNTL 0x16EB7 + +#define mmCLK1_CLK0_DS_CNTL 0x16E83 +#define mmCLK1_CLK1_DS_CNTL 0x16E8C +#define mmCLK1_CLK2_DS_CNTL 0x16E95 +#define mmCLK1_CLK3_DS_CNTL 0x16E9E +#define mmCLK1_CLK4_DS_CNTL 0x16EA7 +#define mmCLK1_CLK5_DS_CNTL 0x16EB0 + +#define mmCLK1_CLK0_ALLOW_DS 0x16E84 +#define mmCLK1_CLK1_ALLOW_DS 0x16E8D +#define mmCLK1_CLK2_ALLOW_DS 0x16E96 +#define mmCLK1_CLK3_ALLOW_DS 0x16E9F +#define mmCLK1_CLK4_ALLOW_DS 0x16EA8 +#define mmCLK1_CLK5_ALLOW_DS 0x16EB1 + +#define mmCLK5_spll_field_8 0x1B24B +#define mmDENTIST_DISPCLK_CNTL 0x0124 +#define regDENTIST_DISPCLK_CNTL 0x0064 +#define regDENTIST_DISPCLK_CNTL_BASE_IDX 1 + +#define CLK1_CLK_PLL_REQ__FbMult_int__SHIFT 0x0 +#define CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc +#define CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10 +#define CLK1_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL +#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L +#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L + +#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L +#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK 0x000F0000L +// DENTIST_DISPCLK_CNTL +#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER__SHIFT 0x0 +#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER__SHIFT 0x8 +#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE__SHIFT 0x13 +#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE__SHIFT 0x14 +#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER__SHIFT 0x18 +#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER_MASK 0x0000007FL +#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER_MASK 0x00007F00L +#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE_MASK 0x00080000L +#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE_MASK 0x00100000L +#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER_MASK 0x7F000000L + +#define CLK5_spll_field_8__spll_ssc_en_MASK 0x00002000L -#define CLK1_CLK_PLL_REQ__FbMult_int__SHIFT 0x0 -#define CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc -#define CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10 -#define CLK1_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL -#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L -#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L +#define SMU_VER_THRESHOLD 0x5D4A00 //93.74.0 +#undef FN +#define FN(reg_name, field_name) \ + clk_mgr->clk_mgr_shift->field_name, clk_mgr->clk_mgr_mask->field_name -#define regCLK1_CLK2_BYPASS_CNTL 0x029c -#define regCLK1_CLK2_BYPASS_CNTL_BASE_IDX 0 +#define REG(reg) \ + (clk_mgr->regs->reg) -#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL__SHIFT 0x0 -#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV__SHIFT 0x10 -#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L -#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK 0x000F0000L +#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg -#define regCLK5_0_CLK5_spll_field_8 0x464b -#define regCLK5_0_CLK5_spll_field_8_BASE_IDX 0 +#define BASE(seg) BASE_INNER(seg) -#define CLK5_0_CLK5_spll_field_8__spll_ssc_en__SHIFT 0xd -#define CLK5_0_CLK5_spll_field_8__spll_ssc_en_MASK 0x00002000L +#define SR(reg_name)\ + .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ + reg ## reg_name -#define SMU_VER_THRESHOLD 0x5D4A00 //93.74.0 +#define CLK_SR_DCN35(reg_name)\ + .reg_name = mm ## reg_name -#define REG(reg_name) \ - (ctx->clk_reg_offsets[reg ## reg_name ## _BASE_IDX] + reg ## reg_name) +static const struct clk_mgr_registers clk_mgr_regs_dcn35 = { + CLK_REG_LIST_DCN35() +}; + +static const struct clk_mgr_shift clk_mgr_shift_dcn35 = { + CLK_COMMON_MASK_SH_LIST_DCN32(__SHIFT) +}; + +static const struct clk_mgr_mask clk_mgr_mask_dcn35 = { + CLK_COMMON_MASK_SH_LIST_DCN32(_MASK) +}; #define TO_CLK_MGR_DCN35(clk_mgr)\ container_of(clk_mgr, struct clk_mgr_dcn35, base) @@ -443,7 +507,6 @@ static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr) struct fixed31_32 pll_req; unsigned int fbmult_frac_val = 0; unsigned int fbmult_int_val = 0; - struct dc_context *ctx = clk_mgr->base.ctx; /* * Register value of fbmult is in 8.16 format, we are converting to 314.32 @@ -503,12 +566,12 @@ static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs static bool dcn35_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base) { struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); - struct dc_context *ctx = clk_mgr->base.ctx; + uint32_t ssc_enable; - REG_GET(CLK5_0_CLK5_spll_field_8, spll_ssc_en, &ssc_enable); + ssc_enable = REG_READ(CLK5_spll_field_8) & CLK5_spll_field_8__spll_ssc_en_MASK; - return ssc_enable == 1; + return ssc_enable != 0; } static void init_clk_states(struct clk_mgr *clk_mgr) @@ -633,10 +696,10 @@ static struct dcn35_ss_info_table ss_info_table = { static void dcn35_read_ss_info_from_lut(struct clk_mgr_internal *clk_mgr) { - struct dc_context *ctx = clk_mgr->base.ctx; - uint32_t clock_source; + uint32_t clock_source = 0; + + clock_source = REG_READ(CLK1_CLK2_BYPASS_CNTL) & CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK; - REG_GET(CLK1_CLK2_BYPASS_CNTL, CLK2_BYPASS_SEL, &clock_source); // If it's DFS mode, clock_source is 0. if (dcn35_is_spll_ssc_enabled(&clk_mgr->base) && (clock_source < ARRAY_SIZE(ss_info_table.ss_percentage))) { clk_mgr->dprefclk_ss_percentage = ss_info_table.ss_percentage[clock_source]; @@ -1106,6 +1169,12 @@ void dcn35_clk_mgr_construct( clk_mgr->base.dprefclk_ss_divider = 1000; clk_mgr->base.ss_on_dprefclk = false; clk_mgr->base.dfs_ref_freq_khz = 48000; + if (ctx->dce_version == DCN_VERSION_3_5) { + clk_mgr->base.regs = &clk_mgr_regs_dcn35; + clk_mgr->base.clk_mgr_shift = &clk_mgr_shift_dcn35; + clk_mgr->base.clk_mgr_mask = &clk_mgr_mask_dcn35; + } + clk_mgr->smu_wm_set.wm_set = (struct dcn35_watermarks *)dm_helpers_allocate_gpu_mem( clk_mgr->base.base.ctx, diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.h index 1203dc605b12c..a12a9bf90806e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.h @@ -60,4 +60,8 @@ void dcn35_clk_mgr_construct(struct dc_context *ctx, void dcn35_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int); +void dcn351_clk_mgr_construct(struct dc_context *ctx, + struct clk_mgr_dcn35 *clk_mgr, + struct pp_smu_funcs *pp_smu, + struct dccg *dccg); #endif //__DCN35_CLK_MGR_H__ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index d915020a42958..bfcbbea377298 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1455,7 +1455,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); /* Invalid input */ - if (!plane_state->dst_rect.width || + if (!plane_state || + !plane_state->dst_rect.width || !plane_state->dst_rect.height || !plane_state->src_rect.width || !plane_state->src_rect.height) { @@ -3387,10 +3388,13 @@ static int get_norm_pix_clk(const struct dc_crtc_timing *timing) break; case COLOR_DEPTH_121212: normalized_pix_clk = (pix_clk * 36) / 24; - break; + break; + case COLOR_DEPTH_141414: + normalized_pix_clk = (pix_clk * 42) / 24; + break; case COLOR_DEPTH_161616: normalized_pix_clk = (pix_clk * 48) / 24; - break; + break; default: ASSERT(0); break; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c index bf636b28e3e16..d37ecfdde4f1b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c @@ -63,11 +63,26 @@ void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv, bool should_use_dmub_lock(struct dc_link *link) { + /* ASIC doesn't support DMUB */ + if (!link->ctx->dmub_srv) + return false; + if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) return true; if (link->replay_settings.replay_feature_enabled) return true; + /* only use HW lock for PSR1 on single eDP */ + if (link->psr_settings.psr_version == DC_PSR_VERSION_1) { + struct dc_link *edp_links[MAX_NUM_EDP]; + int edp_num; + + dc_get_edp_links(link->dc, edp_links, &edp_num); + + if (edp_num == 1) + return true; + } + return false; } diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c index e5fb0e8333e43..e691a1cf33567 100644 --- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c +++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c @@ -239,6 +239,7 @@ static const struct timing_generator_funcs dce60_tg_funcs = { dce60_timing_generator_enable_advanced_request, .configure_crc = dce60_configure_crc, .get_crc = dce110_get_crc, + .is_two_pixels_per_container = dce110_is_two_pixels_per_container, }; void dce60_timing_generator_construct( diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c index 1c10ba4dcddea..abe51cf3aab29 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c @@ -281,10 +281,10 @@ static void CalculateDynamicMetadataParameters( double DISPCLK, double DCFClkDeepSleep, double PixelClock, - long HTotal, - long VBlank, - long DynamicMetadataTransmittedBytes, - long DynamicMetadataLinesBeforeActiveRequired, + unsigned int HTotal, + unsigned int VBlank, + unsigned int DynamicMetadataTransmittedBytes, + int DynamicMetadataLinesBeforeActiveRequired, int InterlaceEnable, bool ProgressiveToInterlaceUnitInOPP, double *Tsetup, @@ -3277,8 +3277,8 @@ static double CalculateWriteBackDelay( static void CalculateDynamicMetadataParameters(int MaxInterDCNTileRepeaters, double DPPCLK, double DISPCLK, - double DCFClkDeepSleep, double PixelClock, long HTotal, long VBlank, long DynamicMetadataTransmittedBytes, - long DynamicMetadataLinesBeforeActiveRequired, int InterlaceEnable, bool ProgressiveToInterlaceUnitInOPP, + double DCFClkDeepSleep, double PixelClock, unsigned int HTotal, unsigned int VBlank, unsigned int DynamicMetadataTransmittedBytes, + int DynamicMetadataLinesBeforeActiveRequired, int InterlaceEnable, bool ProgressiveToInterlaceUnitInOPP, double *Tsetup, double *Tdmbf, double *Tdmec, double *Tdmsks) { double TotalRepeaterDelayTime = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c index 8dee0d397e032..55014c1521167 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c @@ -994,7 +994,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s if (disp_cfg_stream_location < 0) disp_cfg_stream_location = dml_dispcfg->num_streams++; - ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__); + ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location < __DML2_WRAPPER_MAX_STREAMS_PLANES__); populate_dml21_timing_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, context->streams[stream_index], dml_ctx); populate_dml21_output_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].output, context->streams[stream_index], &context->res_ctx.pipe_ctx[stream_index]); populate_dml21_stream_overrides_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location], context->streams[stream_index]); @@ -1018,7 +1018,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s if (disp_cfg_plane_location < 0) disp_cfg_plane_location = dml_dispcfg->num_planes++; - ASSERT(disp_cfg_plane_location >= 0 && disp_cfg_plane_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__); + ASSERT(disp_cfg_plane_location >= 0 && disp_cfg_plane_location < __DML2_WRAPPER_MAX_STREAMS_PLANES__); populate_dml21_surface_config_from_plane_state(in_dc, &dml_dispcfg->plane_descriptors[disp_cfg_plane_location].surface, context->stream_status[stream_index].plane_states[plane_index]); populate_dml21_plane_config_from_plane_state(dml_ctx, &dml_dispcfg->plane_descriptors[disp_cfg_plane_location], context->stream_status[stream_index].plane_states[plane_index], context, stream_index); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c index d35dd507cb9f8..cb187604744e9 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c @@ -87,6 +87,8 @@ static void dml21_init(const struct dc *in_dc, struct dml2_context **dml_ctx, co /* Store configuration options */ (*dml_ctx)->config = *config; + DC_FP_START(); + /*Initialize SOCBB and DCNIP params */ dml21_initialize_soc_bb_params(&(*dml_ctx)->v21.dml_init, config, in_dc); dml21_initialize_ip_params(&(*dml_ctx)->v21.dml_init, config, in_dc); @@ -97,6 +99,8 @@ static void dml21_init(const struct dc *in_dc, struct dml2_context **dml_ctx, co /*Initialize DML21 instance */ dml2_initialize_instance(&(*dml_ctx)->v21.dml_init); + + DC_FP_END(); } bool dml21_create(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config) @@ -277,11 +281,16 @@ bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml { bool out = false; + DC_FP_START(); + /* Use dml_validate_only for fast_validate path */ - if (fast_validate) { + if (fast_validate) out = dml21_check_mode_support(in_dc, context, dml_ctx); - } else + else out = dml21_mode_check_and_programming(in_dc, context, dml_ctx); + + DC_FP_END(); + return out; } @@ -420,8 +429,12 @@ void dml21_copy(struct dml2_context *dst_dml_ctx, dst_dml_ctx->v21.mode_programming.programming = dst_dml2_programming; + DC_FP_START(); + /* need to initialize copied instance for internal references to be correct */ dml2_initialize_instance(&dst_dml_ctx->v21.dml_init); + + DC_FP_END(); } bool dml21_create_copy(struct dml2_context **dst_dml_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c index 0aa4e4d343b04..2c1316d1b6eb8 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c @@ -139,9 +139,8 @@ bool core_dcn4_initialize(struct dml2_core_initialize_in_out *in_out) core->clean_me_up.mode_lib.ip.subvp_fw_processing_delay_us = core_dcn4_ip_caps_base.subvp_pstate_allow_width_us; core->clean_me_up.mode_lib.ip.subvp_swath_height_margin_lines = core_dcn4_ip_caps_base.subvp_swath_height_margin_lines; } else { - memcpy(&core->clean_me_up.mode_lib.ip, &core_dcn4_ip_caps_base, sizeof(struct dml2_core_ip_params)); + memcpy(&core->clean_me_up.mode_lib.ip, &core_dcn4_ip_caps_base, sizeof(struct dml2_core_ip_params)); patch_ip_params_with_ip_caps(&core->clean_me_up.mode_lib.ip, in_out->ip_caps); - core->clean_me_up.mode_lib.ip.imall_supported = false; } diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c index 1ed21c1b86a5b..a966abd407881 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c @@ -532,26 +532,6 @@ static void calculate_odm_slices(const struct dc_stream_state *stream, unsigned odm_slice_end_x[odm_factor - 1] = stream->src.width - 1; } -static bool is_plane_in_odm_slice(const struct dc_plane_state *plane, unsigned int slice_index, unsigned int *odm_slice_end_x, unsigned int num_slices) -{ - unsigned int slice_start_x, slice_end_x; - - if (slice_index == 0) - slice_start_x = 0; - else - slice_start_x = odm_slice_end_x[slice_index - 1] + 1; - - slice_end_x = odm_slice_end_x[slice_index]; - - if (plane->clip_rect.x + plane->clip_rect.width < slice_start_x) - return false; - - if (plane->clip_rect.x > slice_end_x) - return false; - - return true; -} - static void add_odm_slice_to_odm_tree(struct dml2_context *ctx, struct dc_state *state, struct dc_pipe_mapping_scratch *scratch, @@ -791,12 +771,6 @@ static void map_pipes_for_plane(struct dml2_context *ctx, struct dc_state *state sort_pipes_for_splitting(&scratch->pipe_pool); for (odm_slice_index = 0; odm_slice_index < scratch->odm_info.odm_factor; odm_slice_index++) { - // We build the tree for one ODM slice at a time. - // Each ODM slice shares a common OPP - if (!is_plane_in_odm_slice(plane, odm_slice_index, scratch->odm_info.odm_slice_end_x, scratch->odm_info.odm_factor)) { - continue; - } - // Now we have a list of all pipes to be used for this plane/stream, now setup the tree. scratch->odm_info.next_higher_pipe_for_odm_slice[odm_slice_index] = add_plane_to_blend_tree(ctx, state, plane, diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c index bde4250853b10..81ba8809a3b4c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c @@ -746,7 +746,7 @@ static void populate_dml_output_cfg_from_stream_state(struct dml_output_cfg_st * case SIGNAL_TYPE_DISPLAY_PORT_MST: case SIGNAL_TYPE_DISPLAY_PORT: out->OutputEncoder[location] = dml_dp; - if (dml2->v20.scratch.hpo_stream_to_link_encoder_mapping[location] != -1) + if (location < MAX_HPO_DP2_ENCODERS && dml2->v20.scratch.hpo_stream_to_link_encoder_mapping[location] != -1) out->OutputEncoder[dml2->v20.scratch.hpo_stream_to_link_encoder_mapping[location]] = dml_dp2p0; break; case SIGNAL_TYPE_EDP: @@ -1303,7 +1303,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat if (disp_cfg_stream_location < 0) disp_cfg_stream_location = dml_dispcfg->num_timings++; - ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__); + ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location < __DML2_WRAPPER_MAX_STREAMS_PLANES__); populate_dml_timing_cfg_from_stream_state(&dml_dispcfg->timing, disp_cfg_stream_location, context->streams[i]); populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_stream_location, context->streams[i], current_pipe_context, dml2); @@ -1343,7 +1343,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat if (disp_cfg_plane_location < 0) disp_cfg_plane_location = dml_dispcfg->num_surfaces++; - ASSERT(disp_cfg_plane_location >= 0 && disp_cfg_plane_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__); + ASSERT(disp_cfg_plane_location >= 0 && disp_cfg_plane_location < __DML2_WRAPPER_MAX_STREAMS_PLANES__); populate_dml_surface_cfg_from_plane_state(dml2->v20.dml_core_ctx.project, &dml_dispcfg->surface, disp_cfg_plane_location, context->stream_status[i].plane_states[j]); populate_dml_plane_cfg_from_plane_state( diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c index 4d64c45930da4..cb2cb89dfecb2 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c @@ -734,11 +734,16 @@ bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2 return out; } + DC_FP_START(); + /* Use dml_validate_only for fast_validate path */ if (fast_validate) out = dml2_validate_only(context); else out = dml2_validate_and_build_resource(in_dc, context); + + DC_FP_END(); + return out; } @@ -779,11 +784,15 @@ static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_op break; } + DC_FP_START(); + initialize_dml2_ip_params(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.ip); initialize_dml2_soc_bbox(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc); initialize_dml2_soc_states(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc, &(*dml2)->v20.dml_core_ctx.states); + + DC_FP_END(); } bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2) diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c index a65a0ddee6467..c671908ba7d06 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c @@ -44,7 +44,7 @@ void hubp31_set_unbounded_requesting(struct hubp *hubp, bool enable) struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); REG_UPDATE(DCHUBP_CNTL, HUBP_UNBOUNDED_REQ_MODE, enable); - REG_UPDATE(CURSOR_CONTROL, CURSOR_REQ_MODE, enable); + REG_UPDATE(CURSOR_CONTROL, CURSOR_REQ_MODE, 1); } void hubp31_soft_reset(struct hubp *hubp, bool reset) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c index fd0530251c6e5..d725af14af371 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c @@ -1992,20 +1992,11 @@ static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx) dc->hwss.get_position(&pipe_ctx, 1, &position); vpos = position.vertical_count; - /* Avoid wraparound calculation issues */ - vupdate_start += stream->timing.v_total; - vupdate_end += stream->timing.v_total; - vpos += stream->timing.v_total; - if (vpos <= vupdate_start) { /* VPOS is in VACTIVE or back porch. */ lines_to_vupdate = vupdate_start - vpos; - } else if (vpos > vupdate_end) { - /* VPOS is in the front porch. */ - return; } else { - /* VPOS is in VUPDATE. */ - lines_to_vupdate = 0; + lines_to_vupdate = stream->timing.v_total - vpos + vupdate_start; } /* Calculate time until VUPDATE in microseconds. */ @@ -2013,13 +2004,18 @@ static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx) stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz; us_to_vupdate = lines_to_vupdate * us_per_line; + /* Stall out until the cursor update completes. */ + if (vupdate_end < vupdate_start) + vupdate_end += stream->timing.v_total; + + /* Position is in the range of vupdate start and end*/ + if (lines_to_vupdate > stream->timing.v_total - vupdate_end + vupdate_start) + us_to_vupdate = 0; + /* 70 us is a conservative estimate of cursor update time*/ if (us_to_vupdate > 70) return; - /* Stall out until the cursor update completes. */ - if (vupdate_end < vupdate_start) - vupdate_end += stream->timing.v_total; us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line; udelay(us_to_vupdate + us_vupdate); } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c index 36d12db8d0225..f5f1ccd8303cf 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c @@ -3003,7 +3003,11 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, dp_hpo_inst); phyd32clk = get_phyd32clk_src(link); - dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk); + if (link->cur_link_settings.link_rate == LINK_RATE_UNKNOWN) { + dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst); + } else { + dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk); + } } else { if (dccg->funcs->enable_symclk_se) dccg->funcs->enable_symclk_se(dccg, stream_enc->stream_enc_inst, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c index 0b743669f23b4..62f1e597787e6 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c @@ -1001,8 +1001,11 @@ void dcn401_enable_stream(struct pipe_ctx *pipe_ctx) if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) { if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { dccg->funcs->set_dpstreamclk(dccg, DPREFCLK, tg->inst, dp_hpo_inst); - - dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk); + if (link->cur_link_settings.link_rate == LINK_RATE_UNKNOWN) { + dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst); + } else { + dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk); + } } else { /* need to set DTBCLK_P source to DPREFCLK for DP8B10B */ dccg->funcs->set_dtbclk_p_src(dccg, DPREFCLK, tg->inst); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h index c2dd061892f4d..7a1ca1e98059b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h @@ -166,6 +166,41 @@ enum dentist_divider_range { CLK_SR_DCN32(CLK1_CLK4_CURRENT_CNT), \ CLK_SR_DCN32(CLK4_CLK0_CURRENT_CNT) +#define CLK_REG_LIST_DCN35() \ + CLK_SR_DCN35(CLK1_CLK_PLL_REQ), \ + CLK_SR_DCN35(CLK1_CLK0_DFS_CNTL), \ + CLK_SR_DCN35(CLK1_CLK1_DFS_CNTL), \ + CLK_SR_DCN35(CLK1_CLK2_DFS_CNTL), \ + CLK_SR_DCN35(CLK1_CLK3_DFS_CNTL), \ + CLK_SR_DCN35(CLK1_CLK4_DFS_CNTL), \ + CLK_SR_DCN35(CLK1_CLK5_DFS_CNTL), \ + CLK_SR_DCN35(CLK1_CLK0_CURRENT_CNT), \ + CLK_SR_DCN35(CLK1_CLK1_CURRENT_CNT), \ + CLK_SR_DCN35(CLK1_CLK2_CURRENT_CNT), \ + CLK_SR_DCN35(CLK1_CLK3_CURRENT_CNT), \ + CLK_SR_DCN35(CLK1_CLK4_CURRENT_CNT), \ + CLK_SR_DCN35(CLK1_CLK5_CURRENT_CNT), \ + CLK_SR_DCN35(CLK1_CLK0_BYPASS_CNTL), \ + CLK_SR_DCN35(CLK1_CLK1_BYPASS_CNTL), \ + CLK_SR_DCN35(CLK1_CLK2_BYPASS_CNTL), \ + CLK_SR_DCN35(CLK1_CLK3_BYPASS_CNTL), \ + CLK_SR_DCN35(CLK1_CLK4_BYPASS_CNTL),\ + CLK_SR_DCN35(CLK1_CLK5_BYPASS_CNTL), \ + CLK_SR_DCN35(CLK1_CLK0_DS_CNTL), \ + CLK_SR_DCN35(CLK1_CLK1_DS_CNTL), \ + CLK_SR_DCN35(CLK1_CLK2_DS_CNTL), \ + CLK_SR_DCN35(CLK1_CLK3_DS_CNTL), \ + CLK_SR_DCN35(CLK1_CLK4_DS_CNTL), \ + CLK_SR_DCN35(CLK1_CLK5_DS_CNTL), \ + CLK_SR_DCN35(CLK1_CLK0_ALLOW_DS), \ + CLK_SR_DCN35(CLK1_CLK1_ALLOW_DS), \ + CLK_SR_DCN35(CLK1_CLK2_ALLOW_DS), \ + CLK_SR_DCN35(CLK1_CLK3_ALLOW_DS), \ + CLK_SR_DCN35(CLK1_CLK4_ALLOW_DS), \ + CLK_SR_DCN35(CLK1_CLK5_ALLOW_DS), \ + CLK_SR_DCN35(CLK5_spll_field_8), \ + SR(DENTIST_DISPCLK_CNTL), \ + #define CLK_COMMON_MASK_SH_LIST_DCN32(mask_sh) \ CLK_COMMON_MASK_SH_LIST_DCN20_BASE(mask_sh),\ CLK_SF(CLK1_CLK_PLL_REQ, FbMult_int, mask_sh),\ @@ -236,6 +271,7 @@ struct clk_mgr_registers { uint32_t CLK1_CLK2_DFS_CNTL; uint32_t CLK1_CLK3_DFS_CNTL; uint32_t CLK1_CLK4_DFS_CNTL; + uint32_t CLK1_CLK5_DFS_CNTL; uint32_t CLK2_CLK2_DFS_CNTL; uint32_t CLK1_CLK0_CURRENT_CNT; @@ -243,11 +279,34 @@ struct clk_mgr_registers { uint32_t CLK1_CLK2_CURRENT_CNT; uint32_t CLK1_CLK3_CURRENT_CNT; uint32_t CLK1_CLK4_CURRENT_CNT; + uint32_t CLK1_CLK5_CURRENT_CNT; uint32_t CLK0_CLK0_DFS_CNTL; uint32_t CLK0_CLK1_DFS_CNTL; uint32_t CLK0_CLK3_DFS_CNTL; uint32_t CLK0_CLK4_DFS_CNTL; + uint32_t CLK1_CLK0_BYPASS_CNTL; + uint32_t CLK1_CLK1_BYPASS_CNTL; + uint32_t CLK1_CLK2_BYPASS_CNTL; + uint32_t CLK1_CLK3_BYPASS_CNTL; + uint32_t CLK1_CLK4_BYPASS_CNTL; + uint32_t CLK1_CLK5_BYPASS_CNTL; + + uint32_t CLK1_CLK0_DS_CNTL; + uint32_t CLK1_CLK1_DS_CNTL; + uint32_t CLK1_CLK2_DS_CNTL; + uint32_t CLK1_CLK3_DS_CNTL; + uint32_t CLK1_CLK4_DS_CNTL; + uint32_t CLK1_CLK5_DS_CNTL; + + uint32_t CLK1_CLK0_ALLOW_DS; + uint32_t CLK1_CLK1_ALLOW_DS; + uint32_t CLK1_CLK2_ALLOW_DS; + uint32_t CLK1_CLK3_ALLOW_DS; + uint32_t CLK1_CLK4_ALLOW_DS; + uint32_t CLK1_CLK5_ALLOW_DS; + uint32_t CLK5_spll_field_8; + }; struct clk_mgr_shift { diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c index d78c8ec4de79e..885e749cdc6e9 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c @@ -51,9 +51,10 @@ #include "dc_dmub_srv.h" #include "gpio_service_interface.h" +#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */ + #define DC_LOGGER \ link->ctx->logger -#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */ #ifndef MAX #define MAX(X, Y) ((X) > (Y) ? (X) : (Y)) diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c index 80386f698ae4d..0ca6358a9782e 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c @@ -891,7 +891,7 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_z10 = true, .enable_legacy_fast_update = true, .enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/ - .dml_hostvm_override = DML_HOSTVM_NO_OVERRIDE, + .dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE, .using_dml2 = false, }; diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 0fa6fbee19789..bfdfba676025e 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -2493,6 +2493,8 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ case IP_VERSION(11, 0, 1): case IP_VERSION(11, 0, 2): case IP_VERSION(11, 0, 3): + case IP_VERSION(12, 0, 0): + case IP_VERSION(12, 0, 1): *states = ATTR_STATE_SUPPORTED; break; default: diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c index e8b6989a40f35..6b34a33d788f2 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c @@ -3043,6 +3043,7 @@ static int kv_dpm_hw_init(void *handle) if (!amdgpu_dpm) return 0; + mutex_lock(&adev->pm.mutex); kv_dpm_setup_asic(adev); ret = kv_dpm_enable(adev); if (ret) @@ -3050,6 +3051,8 @@ static int kv_dpm_hw_init(void *handle) else adev->pm.dpm_enabled = true; amdgpu_legacy_dpm_compute_clocks(adev); + mutex_unlock(&adev->pm.mutex); + return ret; } @@ -3067,32 +3070,42 @@ static int kv_dpm_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + cancel_work_sync(&adev->pm.dpm.thermal.work); + if (adev->pm.dpm_enabled) { + mutex_lock(&adev->pm.mutex); + adev->pm.dpm_enabled = false; /* disable dpm */ kv_dpm_disable(adev); /* reset the power state */ adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; + mutex_unlock(&adev->pm.mutex); } return 0; } static int kv_dpm_resume(void *handle) { - int ret; + int ret = 0; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (adev->pm.dpm_enabled) { + if (!amdgpu_dpm) + return 0; + + if (!adev->pm.dpm_enabled) { + mutex_lock(&adev->pm.mutex); /* asic init will reset to the boot state */ kv_dpm_setup_asic(adev); ret = kv_dpm_enable(adev); - if (ret) + if (ret) { adev->pm.dpm_enabled = false; - else + } else { adev->pm.dpm_enabled = true; - if (adev->pm.dpm_enabled) amdgpu_legacy_dpm_compute_clocks(adev); + } + mutex_unlock(&adev->pm.mutex); } - return 0; + return ret; } static bool kv_dpm_is_idle(void *handle) diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c index e861355ebd75b..c7518b13e7879 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c @@ -1009,9 +1009,12 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work) enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL; int temp, size = sizeof(temp); - if (!adev->pm.dpm_enabled) - return; + mutex_lock(&adev->pm.mutex); + if (!adev->pm.dpm_enabled) { + mutex_unlock(&adev->pm.mutex); + return; + } if (!pp_funcs->read_sensor(adev->powerplay.pp_handle, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&temp, @@ -1033,4 +1036,5 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work) adev->pm.dpm.state = dpm_state; amdgpu_legacy_dpm_compute_clocks(adev->powerplay.pp_handle); + mutex_unlock(&adev->pm.mutex); } diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c index a1baa13ab2c26..a5ad1b60597e6 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c @@ -7783,6 +7783,7 @@ static int si_dpm_hw_init(void *handle) if (!amdgpu_dpm) return 0; + mutex_lock(&adev->pm.mutex); si_dpm_setup_asic(adev); ret = si_dpm_enable(adev); if (ret) @@ -7790,6 +7791,7 @@ static int si_dpm_hw_init(void *handle) else adev->pm.dpm_enabled = true; amdgpu_legacy_dpm_compute_clocks(adev); + mutex_unlock(&adev->pm.mutex); return ret; } @@ -7807,32 +7809,44 @@ static int si_dpm_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + cancel_work_sync(&adev->pm.dpm.thermal.work); + if (adev->pm.dpm_enabled) { + mutex_lock(&adev->pm.mutex); + adev->pm.dpm_enabled = false; /* disable dpm */ si_dpm_disable(adev); /* reset the power state */ adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; + mutex_unlock(&adev->pm.mutex); } + return 0; } static int si_dpm_resume(void *handle) { - int ret; + int ret = 0; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (adev->pm.dpm_enabled) { + if (!amdgpu_dpm) + return 0; + + if (!adev->pm.dpm_enabled) { /* asic init will reset to the boot state */ + mutex_lock(&adev->pm.mutex); si_dpm_setup_asic(adev); ret = si_dpm_enable(adev); - if (ret) + if (ret) { adev->pm.dpm_enabled = false; - else + } else { adev->pm.dpm_enabled = true; - if (adev->pm.dpm_enabled) amdgpu_legacy_dpm_compute_clocks(adev); + } + mutex_unlock(&adev->pm.mutex); } - return 0; + + return ret; } static bool si_dpm_is_idle(void *handle) diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c index a71c6117d7e54..0115d26b5af92 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c @@ -51,6 +51,11 @@ static int amd_powerplay_create(struct amdgpu_device *adev) hwmgr->adev = adev; hwmgr->not_vf = !amdgpu_sriov_vf(adev); hwmgr->device = amdgpu_cgs_create_device(adev); + if (!hwmgr->device) { + kfree(hwmgr); + return -ENOMEM; + } + mutex_init(&hwmgr->msg_lock); hwmgr->chip_family = adev->family; hwmgr->chip_id = adev->asic_type; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c index a8fc0fa44db69..ba5c1237fcfe1 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c @@ -267,10 +267,10 @@ int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) if (hwmgr->thermal_controller.fanInfo.bNoFan || (hwmgr->thermal_controller.fanInfo. ucTachometerPulsesPerRevolution == 0) || - speed == 0 || + (!speed || speed > UINT_MAX/8) || (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) || (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM)) - return 0; + return -EINVAL; if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) smu7_fan_ctrl_stop_smc_fan_control(hwmgr); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c index 379012494da57..56423aedf3fa7 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c @@ -307,10 +307,10 @@ int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) int result = 0; if (hwmgr->thermal_controller.fanInfo.bNoFan || - speed == 0 || + (!speed || speed > UINT_MAX/8) || (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) || (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM)) - return -1; + return -EINVAL; if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) result = vega10_fan_ctrl_stop_smc_fan_control(hwmgr); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c index a3331ffb2daf7..1b1c88590156c 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c @@ -191,7 +191,7 @@ int vega20_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) uint32_t tach_period, crystal_clock_freq; int result = 0; - if (!speed) + if (!speed || speed > UINT_MAX/8) return -EINVAL; if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) { diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index fc1297fecc62e..d4b954b22441c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -1267,6 +1267,9 @@ static int arcturus_set_fan_speed_rpm(struct smu_context *smu, uint32_t crystal_clock_freq = 2500; uint32_t tach_period; + if (!speed || speed > UINT_MAX/8) + return -EINVAL; + tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); WREG32_SOC15(THM, 0, mmCG_TACH_CTRL_ARCT, REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL_ARCT), diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index 16fcd9dcd202e..6c61e87359dd4 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -1199,7 +1199,7 @@ int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu, uint32_t crystal_clock_freq = 2500; uint32_t tach_period; - if (speed == 0) + if (!speed || speed > UINT_MAX/8) return -EINVAL; /* * To prevent from possible overheat, some ASICs may have requirement diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index 2024a85fa11bd..4f78c84da780c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -1228,7 +1228,7 @@ int smu_v13_0_set_fan_speed_rpm(struct smu_context *smu, uint32_t tach_period; int ret; - if (!speed) + if (!speed || speed > UINT_MAX/8) return -EINVAL; ret = smu_v13_0_auto_fan_control(smu, 0); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c index 452589adaf046..e5f619c979d80 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c @@ -1883,16 +1883,6 @@ static int smu_v14_0_allow_ih_interrupt(struct smu_context *smu) NULL); } -static int smu_v14_0_process_pending_interrupt(struct smu_context *smu) -{ - int ret = 0; - - if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_ACDC_BIT)) - ret = smu_v14_0_allow_ih_interrupt(smu); - - return ret; -} - int smu_v14_0_enable_thermal_alert(struct smu_context *smu) { int ret = 0; @@ -1904,7 +1894,7 @@ int smu_v14_0_enable_thermal_alert(struct smu_context *smu) if (ret) return ret; - return smu_v14_0_process_pending_interrupt(smu); + return smu_v14_0_allow_ih_interrupt(smu); } int smu_v14_0_disable_thermal_alert(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c index 9ec53431f2c32..e98a6a2f3e6ac 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c @@ -1206,16 +1206,9 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu, PP_OD_FEATURE_GFXCLK_BIT)) break; - PPTable_t *pptable = smu->smu_table.driver_pptable; - const OverDriveLimits_t * const overdrive_upperlimits = - &pptable->SkuTable.OverDriveLimitsBasicMax; - const OverDriveLimits_t * const overdrive_lowerlimits = - &pptable->SkuTable.OverDriveLimitsBasicMin; - size += sysfs_emit_at(buf, size, "OD_SCLK_OFFSET:\n"); - size += sysfs_emit_at(buf, size, "0: %dMhz\n1: %uMhz\n", - overdrive_lowerlimits->GfxclkFoffset, - overdrive_upperlimits->GfxclkFoffset); + size += sysfs_emit_at(buf, size, "%dMhz\n", + od_table->OverDriveTable.GfxclkFoffset); break; case SMU_OD_MCLK: @@ -1349,13 +1342,9 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu, size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) { - smu_v14_0_2_get_od_setting_limits(smu, - PP_OD_FEATURE_GFXCLK_FMIN, - &min_value, - NULL); smu_v14_0_2_get_od_setting_limits(smu, PP_OD_FEATURE_GFXCLK_FMAX, - NULL, + &min_value, &max_value); size += sysfs_emit_at(buf, size, "SCLK_OFFSET: %7dMhz %10uMhz\n", min_value, max_value); @@ -1639,6 +1628,39 @@ static void smu_v14_0_2_get_unique_id(struct smu_context *smu) adev->unique_id = ((uint64_t)upper32 << 32) | lower32; } +static int smu_v14_0_2_get_fan_speed_pwm(struct smu_context *smu, + uint32_t *speed) +{ + int ret; + + if (!speed) + return -EINVAL; + + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_CURR_FANPWM, + speed); + if (ret) { + dev_err(smu->adev->dev, "Failed to get fan speed(PWM)!"); + return ret; + } + + /* Convert the PMFW output which is in percent to pwm(255) based */ + *speed = min(*speed * 255 / 100, (uint32_t)255); + + return 0; +} + +static int smu_v14_0_2_get_fan_speed_rpm(struct smu_context *smu, + uint32_t *speed) +{ + if (!speed) + return -EINVAL; + + return smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_CURR_FANSPEED, + speed); +} + static int smu_v14_0_2_get_power_limit(struct smu_context *smu, uint32_t *current_power_limit, uint32_t *default_power_limit, @@ -2429,36 +2451,24 @@ static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu, return -ENOTSUPP; } - for (i = 0; i < size; i += 2) { - if (i + 2 > size) { - dev_info(adev->dev, "invalid number of input parameters %d\n", size); - return -EINVAL; - } - - switch (input[i]) { - case 1: - smu_v14_0_2_get_od_setting_limits(smu, - PP_OD_FEATURE_GFXCLK_FMAX, - &minimum, - &maximum); - if (input[i + 1] < minimum || - input[i + 1] > maximum) { - dev_info(adev->dev, "GfxclkFmax (%ld) must be within [%u, %u]!\n", - input[i + 1], minimum, maximum); - return -EINVAL; - } - - od_table->OverDriveTable.GfxclkFoffset = input[i + 1]; - od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT; - break; + if (size != 1) { + dev_info(adev->dev, "invalid number of input parameters %d\n", size); + return -EINVAL; + } - default: - dev_info(adev->dev, "Invalid SCLK_VDDC_TABLE index: %ld\n", input[i]); - dev_info(adev->dev, "Supported indices: [0:min,1:max]\n"); - return -EINVAL; - } + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_GFXCLK_FMAX, + &minimum, + &maximum); + if (input[0] < minimum || + input[0] > maximum) { + dev_info(adev->dev, "GfxclkFoffset must be within [%d, %u]!\n", + minimum, maximum); + return -EINVAL; } + od_table->OverDriveTable.GfxclkFoffset = input[0]; + od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT; break; case PP_OD_EDIT_MCLK_VDDC_TABLE: @@ -2817,6 +2827,8 @@ static const struct pptable_funcs smu_v14_0_2_ppt_funcs = { .set_performance_level = smu_v14_0_set_performance_level, .gfx_off_control = smu_v14_0_gfx_off_control, .get_unique_id = smu_v14_0_2_get_unique_id, + .get_fan_speed_pwm = smu_v14_0_2_get_fan_speed_pwm, + .get_fan_speed_rpm = smu_v14_0_2_get_fan_speed_rpm, .get_power_limit = smu_v14_0_2_get_power_limit, .set_power_limit = smu_v14_0_2_set_power_limit, .get_power_profile_mode = smu_v14_0_2_get_power_profile_mode, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index 0d71db7be325d..0ce1766c859f5 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -459,8 +459,7 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu, } if (read_arg) { smu_cmn_read_arg(smu, read_arg); - dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x,\ - readval: 0x%08x\n", + dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x, readval: 0x%08x\n", smu_get_message_name(smu, msg), index, param, reg, *read_arg); } else { dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x\n", diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c index 00b364f9a71e5..5dadc895e7f26 100644 --- a/drivers/gpu/drm/ast/ast_dp.c +++ b/drivers/gpu/drm/ast/ast_dp.c @@ -17,6 +17,12 @@ static bool ast_astdp_is_connected(struct ast_device *ast) { if (!ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF, AST_IO_VGACRDF_HPD)) return false; + /* + * HPD might be set even if no monitor is connected, so also check that + * the link training was successful. + */ + if (!ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDC, AST_IO_VGACRDC_LINK_SUCCESS)) + return false; return true; } diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c index 41f72d458487f..9ba2a667a1f3a 100644 --- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c +++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c @@ -2463,9 +2463,9 @@ static int cdns_mhdp_probe(struct platform_device *pdev) if (!mhdp) return -ENOMEM; - clk = devm_clk_get(dev, NULL); + clk = devm_clk_get_enabled(dev, NULL); if (IS_ERR(clk)) { - dev_err(dev, "couldn't get clk: %ld\n", PTR_ERR(clk)); + dev_err(dev, "couldn't get and enable clk: %ld\n", PTR_ERR(clk)); return PTR_ERR(clk); } @@ -2504,14 +2504,12 @@ static int cdns_mhdp_probe(struct platform_device *pdev) mhdp->info = of_device_get_match_data(dev); - clk_prepare_enable(clk); - pm_runtime_enable(dev); ret = pm_runtime_resume_and_get(dev); if (ret < 0) { dev_err(dev, "pm_runtime_resume_and_get failed\n"); pm_runtime_disable(dev); - goto clk_disable; + return ret; } if (mhdp->info && mhdp->info->ops && mhdp->info->ops->init) { @@ -2590,8 +2588,6 @@ static int cdns_mhdp_probe(struct platform_device *pdev) runtime_put: pm_runtime_put_sync(dev); pm_runtime_disable(dev); -clk_disable: - clk_disable_unprepare(mhdp->clk); return ret; } @@ -2632,8 +2628,6 @@ static void cdns_mhdp_remove(struct platform_device *pdev) cancel_work_sync(&mhdp->modeset_retry_work); flush_work(&mhdp->hpd_work); /* Ignoring mhdp->hdcp.check_work and mhdp->hdcp.prop_work here. */ - - clk_disable_unprepare(mhdp->clk); } static const struct of_device_id mhdp_ids[] = { diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c index faee8e2e82a05..967aa24b7c537 100644 --- a/drivers/gpu/drm/bridge/ite-it6505.c +++ b/drivers/gpu/drm/bridge/ite-it6505.c @@ -2042,12 +2042,13 @@ static bool it6505_hdcp_part2_ksvlist_check(struct it6505 *it6505) continue; } - for (i = 0; i < 5; i++) { + for (i = 0; i < 5; i++) if (bv[i][3] != av[i][0] || bv[i][2] != av[i][1] || - av[i][1] != av[i][2] || bv[i][0] != av[i][3]) + bv[i][1] != av[i][2] || bv[i][0] != av[i][3]) break; - DRM_DEV_DEBUG_DRIVER(dev, "V' all match!! %d, %d", retry, i); + if (i == 5) { + DRM_DEV_DEBUG_DRIVER(dev, "V' all match!! %d", retry); return true; } } diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index 582cf4f73a74c..95ce50ed53acf 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -480,6 +480,7 @@ static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata, const char *name) { struct device *dev = pdata->dev; + const struct i2c_client *client = to_i2c_client(dev); struct auxiliary_device *aux; int ret; @@ -488,6 +489,7 @@ static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata, return -ENOMEM; aux->name = name; + aux->id = (client->adapter->nr << 10) | client->addr; aux->dev.parent = dev; aux->dev.release = ti_sn65dsi86_aux_device_release; device_set_of_node_from_dev(&aux->dev, dev); diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c index f0c6d50d8c334..3e5f721d75400 100644 --- a/drivers/gpu/drm/display/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c @@ -179,13 +179,13 @@ static int drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len) { int i; - u8 unpacked_rad[16]; + u8 unpacked_rad[16] = {}; - for (i = 0; i < lct; i++) { + for (i = 1; i < lct; i++) { if (i % 2) - unpacked_rad[i] = rad[i / 2] >> 4; + unpacked_rad[i] = rad[(i - 1) / 2] >> 4; else - unpacked_rad[i] = rad[i / 2] & BIT_MASK(4); + unpacked_rad[i] = rad[(i - 1) / 2] & 0xF; } /* TODO: Eventually add something to printk so we can format the rad @@ -4034,6 +4034,22 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) return 0; } +static bool primary_mstb_probing_is_done(struct drm_dp_mst_topology_mgr *mgr) +{ + bool probing_done = false; + + mutex_lock(&mgr->lock); + + if (mgr->mst_primary && drm_dp_mst_topology_try_get_mstb(mgr->mst_primary)) { + probing_done = mgr->mst_primary->link_address_sent; + drm_dp_mst_topology_put_mstb(mgr->mst_primary); + } + + mutex_unlock(&mgr->lock); + + return probing_done; +} + static inline bool drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_pending_up_req *up_req) @@ -4064,8 +4080,12 @@ drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr, /* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */ if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) { - dowork = drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat); - hotplug = true; + if (!primary_mstb_probing_is_done(mgr)) { + drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it.\n"); + } else { + dowork = drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat); + hotplug = true; + } } drm_dp_mst_topology_put_mstb(mstb); @@ -4144,10 +4164,11 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) drm_dp_send_up_ack_reply(mgr, mst_primary, up_req->msg.req_type, false); + drm_dp_mst_topology_put_mstb(mst_primary); + if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) { const struct drm_dp_connection_status_notify *conn_stat = &up_req->msg.u.conn_stat; - bool handle_csn; drm_dbg_kms(mgr->dev, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", conn_stat->port_number, @@ -4156,16 +4177,6 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) conn_stat->message_capability_status, conn_stat->input_port, conn_stat->peer_device_type); - - mutex_lock(&mgr->probe_lock); - handle_csn = mst_primary->link_address_sent; - mutex_unlock(&mgr->probe_lock); - - if (!handle_csn) { - drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it."); - kfree(up_req); - goto out_put_primary; - } } else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { const struct drm_dp_resource_status_notify *res_stat = &up_req->msg.u.resource_stat; @@ -4180,9 +4191,6 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) list_add_tail(&up_req->next, &mgr->up_req_list); mutex_unlock(&mgr->up_req_lock); queue_work(system_long_wq, &mgr->up_req_work); - -out_put_primary: - drm_dp_mst_topology_put_mstb(mst_primary); out_clear_reply: memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); return 0; diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 5186d2114a503..32902f77f00dd 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -1376,7 +1376,7 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state) mode = &new_crtc_state->mode; adjusted_mode = &new_crtc_state->adjusted_mode; - if (!new_crtc_state->mode_changed) + if (!new_crtc_state->mode_changed && !new_crtc_state->connectors_changed) continue; drm_dbg_atomic(dev, "modeset on [ENCODER:%d:%s]\n", diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c index 370dc676e3aa5..fd36b8fd54e9e 100644 --- a/drivers/gpu/drm/drm_atomic_uapi.c +++ b/drivers/gpu/drm/drm_atomic_uapi.c @@ -956,6 +956,10 @@ int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state, if (mode != DRM_MODE_DPMS_ON) mode = DRM_MODE_DPMS_OFF; + + if (connector->dpms == mode) + goto out; + connector->dpms = mode; crtc = connector->state->crtc; diff --git a/drivers/gpu/drm/drm_client_setup.c b/drivers/gpu/drm/drm_client_setup.c new file mode 100644 index 0000000000000..5969c4ffe31ba --- /dev/null +++ b/drivers/gpu/drm/drm_client_setup.c @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: MIT + +#include +#include +#include +#include +#include + +/** + * drm_client_setup() - Setup in-kernel DRM clients + * @dev: DRM device + * @format: Preferred pixel format for the device. Use NULL, unless + * there is clearly a driver-preferred format. + * + * This function sets up the in-kernel DRM clients. Restore, hotplug + * events and teardown are all taken care of. + * + * Drivers should call drm_client_setup() after registering the new + * DRM device with drm_dev_register(). This function is safe to call + * even when there are no connectors present. Setup will be retried + * on the next hotplug event. + * + * The clients are destroyed by drm_dev_unregister(). + */ +void drm_client_setup(struct drm_device *dev, const struct drm_format_info *format) +{ + int ret; + + ret = drm_fbdev_client_setup(dev, format); + if (ret) + drm_warn(dev, "Failed to set up DRM client; error %d\n", ret); +} +EXPORT_SYMBOL(drm_client_setup); + +/** + * drm_client_setup_with_fourcc() - Setup in-kernel DRM clients for color mode + * @dev: DRM device + * @fourcc: Preferred pixel format as 4CC code for the device + * + * This function sets up the in-kernel DRM clients. It is equivalent + * to drm_client_setup(), but expects a 4CC code as second argument. + */ +void drm_client_setup_with_fourcc(struct drm_device *dev, u32 fourcc) +{ + drm_client_setup(dev, drm_format_info(fourcc)); +} +EXPORT_SYMBOL(drm_client_setup_with_fourcc); + +/** + * drm_client_setup_with_color_mode() - Setup in-kernel DRM clients for color mode + * @dev: DRM device + * @color_mode: Preferred color mode for the device + * + * This function sets up the in-kernel DRM clients. It is equivalent + * to drm_client_setup(), but expects a color mode as second argument. + * + * Do not use this function in new drivers. Prefer drm_client_setup() with a + * format of NULL. + */ +void drm_client_setup_with_color_mode(struct drm_device *dev, unsigned int color_mode) +{ + u32 fourcc = drm_driver_color_mode_format(dev, color_mode); + + drm_client_setup_with_fourcc(dev, fourcc); +} +EXPORT_SYMBOL(drm_client_setup_with_color_mode); diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 0e6021235a930..994afa5a0ffb5 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -1308,6 +1308,10 @@ EXPORT_SYMBOL(drm_hdmi_connector_get_output_format_name); * callback. For atomic drivers the remapping to the "ACTIVE" property is * implemented in the DRM core. * + * On atomic drivers any DPMS setproperty ioctl where the value does not + * change is completely skipped, otherwise a full atomic commit will occur. + * On legacy drivers the exact behavior is driver specific. + * * Note that this property cannot be set through the MODE_ATOMIC ioctl, * userspace must use "ACTIVE" on the CRTC instead. * diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index 9d3e6dd68810e..98a37dc3324e4 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -743,7 +743,7 @@ static int bridges_show(struct seq_file *m, void *data) unsigned int idx = 0; drm_for_each_bridge_in_chain(encoder, bridge) { - drm_printf(&p, "bridge[%d]: %ps\n", idx++, bridge->funcs); + drm_printf(&p, "bridge[%u]: %ps\n", idx++, bridge->funcs); drm_printf(&p, "\ttype: [%d] %s\n", bridge->type, drm_get_connector_type_name(bridge->type)); diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index eaac2e5726e75..b15ddbd65e7b5 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -492,8 +492,8 @@ EXPORT_SYMBOL(drm_fb_helper_init); * @fb_helper: driver-allocated fbdev helper * * A helper to alloc fb_info and the member cmap. Called by the driver - * within the fb_probe fb_helper callback function. Drivers do not - * need to release the allocated fb_info structure themselves, this is + * within the struct &drm_driver.fbdev_probe callback function. Drivers do + * not need to release the allocated fb_info structure themselves, this is * automatically done when calling drm_fb_helper_fini(). * * RETURNS: @@ -1443,67 +1443,27 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, EXPORT_SYMBOL(drm_fb_helper_pan_display); static uint32_t drm_fb_helper_find_format(struct drm_fb_helper *fb_helper, const uint32_t *formats, - size_t format_count, uint32_t bpp, uint32_t depth) + size_t format_count, unsigned int color_mode) { struct drm_device *dev = fb_helper->dev; uint32_t format; size_t i; - /* - * Do not consider YUV or other complicated formats - * for framebuffers. This means only legacy formats - * are supported (fmt->depth is a legacy field), but - * the framebuffer emulation can only deal with such - * formats, specifically RGB/BGA formats. - */ - format = drm_mode_legacy_fb_format(bpp, depth); - if (!format) - goto err; + format = drm_driver_color_mode_format(dev, color_mode); + if (!format) { + drm_info(dev, "unsupported color mode of %d\n", color_mode); + return DRM_FORMAT_INVALID; + } for (i = 0; i < format_count; ++i) { if (formats[i] == format) return format; } - -err: - /* We found nothing. */ - drm_warn(dev, "bpp/depth value of %u/%u not supported\n", bpp, depth); + drm_warn(dev, "format %p4cc not supported\n", &format); return DRM_FORMAT_INVALID; } -static uint32_t drm_fb_helper_find_color_mode_format(struct drm_fb_helper *fb_helper, - const uint32_t *formats, size_t format_count, - unsigned int color_mode) -{ - struct drm_device *dev = fb_helper->dev; - uint32_t bpp, depth; - - switch (color_mode) { - case 1: - case 2: - case 4: - case 8: - case 16: - case 24: - bpp = depth = color_mode; - break; - case 15: - bpp = 16; - depth = 15; - break; - case 32: - bpp = 32; - depth = 24; - break; - default: - drm_info(dev, "unsupported color mode of %d\n", color_mode); - return DRM_FORMAT_INVALID; - } - - return drm_fb_helper_find_format(fb_helper, formats, format_count, bpp, depth); -} - static int __drm_fb_helper_find_sizes(struct drm_fb_helper *fb_helper, struct drm_fb_helper_surface_size *sizes) { @@ -1533,10 +1493,10 @@ static int __drm_fb_helper_find_sizes(struct drm_fb_helper *fb_helper, if (!cmdline_mode->bpp_specified) continue; - surface_format = drm_fb_helper_find_color_mode_format(fb_helper, - plane->format_types, - plane->format_count, - cmdline_mode->bpp); + surface_format = drm_fb_helper_find_format(fb_helper, + plane->format_types, + plane->format_count, + cmdline_mode->bpp); if (surface_format != DRM_FORMAT_INVALID) break; /* found supported format */ } @@ -1546,10 +1506,10 @@ static int __drm_fb_helper_find_sizes(struct drm_fb_helper *fb_helper, break; /* found supported format */ /* try preferred color mode */ - surface_format = drm_fb_helper_find_color_mode_format(fb_helper, - plane->format_types, - plane->format_count, - fb_helper->preferred_bpp); + surface_format = drm_fb_helper_find_format(fb_helper, + plane->format_types, + plane->format_count, + fb_helper->preferred_bpp); if (surface_format != DRM_FORMAT_INVALID) break; /* found supported format */ } @@ -1650,7 +1610,7 @@ static int drm_fb_helper_find_sizes(struct drm_fb_helper *fb_helper, /* * Allocates the backing storage and sets up the fbdev info structure through - * the ->fb_probe callback. + * the ->fbdev_probe callback. */ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper) { @@ -1668,7 +1628,10 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper) } /* push down into drivers */ - ret = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes); + if (dev->driver->fbdev_probe) + ret = dev->driver->fbdev_probe(fb_helper, &sizes); + else if (fb_helper->funcs) + ret = fb_helper->funcs->fb_probe(fb_helper, &sizes); if (ret < 0) return ret; @@ -1740,7 +1703,7 @@ static void drm_fb_helper_fill_var(struct fb_info *info, * instance and the drm framebuffer allocated in &drm_fb_helper.fb. * * Drivers should call this (or their equivalent setup code) from their - * &drm_fb_helper_funcs.fb_probe callback after having allocated the fbdev + * &drm_driver.fbdev_probe callback after having allocated the fbdev * backing storage framebuffer. */ void drm_fb_helper_fill_info(struct fb_info *info, @@ -1896,7 +1859,7 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper) * Note that this also registers the fbdev and so allows userspace to call into * the driver through the fbdev interfaces. * - * This function will call down into the &drm_fb_helper_funcs.fb_probe callback + * This function will call down into the &drm_driver.fbdev_probe callback * to let the driver allocate and initialize the fbdev info structure and the * drm framebuffer used to back the fbdev. drm_fb_helper_fill_info() is provided * as a helper to setup simple default values for the fbdev info structure. diff --git a/drivers/gpu/drm/drm_fbdev_client.c b/drivers/gpu/drm/drm_fbdev_client.c new file mode 100644 index 0000000000000..a09382afe2fb6 --- /dev/null +++ b/drivers/gpu/drm/drm_fbdev_client.c @@ -0,0 +1,141 @@ +// SPDX-License-Identifier: MIT + +#include +#include +#include +#include +#include +#include +#include + +/* + * struct drm_client_funcs + */ + +static void drm_fbdev_client_unregister(struct drm_client_dev *client) +{ + struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); + + if (fb_helper->info) { + drm_fb_helper_unregister_info(fb_helper); + } else { + drm_client_release(&fb_helper->client); + drm_fb_helper_unprepare(fb_helper); + kfree(fb_helper); + } +} + +static int drm_fbdev_client_restore(struct drm_client_dev *client) +{ + drm_fb_helper_lastclose(client->dev); + + return 0; +} + +static int drm_fbdev_client_hotplug(struct drm_client_dev *client) +{ + struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); + struct drm_device *dev = client->dev; + int ret; + + if (dev->fb_helper) + return drm_fb_helper_hotplug_event(dev->fb_helper); + + ret = drm_fb_helper_init(dev, fb_helper); + if (ret) + goto err_drm_err; + + if (!drm_drv_uses_atomic_modeset(dev)) + drm_helper_disable_unused_functions(dev); + + ret = drm_fb_helper_initial_config(fb_helper); + if (ret) + goto err_drm_fb_helper_fini; + + return 0; + +err_drm_fb_helper_fini: + drm_fb_helper_fini(fb_helper); +err_drm_err: + drm_err(dev, "fbdev: Failed to setup emulation (ret=%d)\n", ret); + return ret; +} + +static const struct drm_client_funcs drm_fbdev_client_funcs = { + .owner = THIS_MODULE, + .unregister = drm_fbdev_client_unregister, + .restore = drm_fbdev_client_restore, + .hotplug = drm_fbdev_client_hotplug, +}; + +/** + * drm_fbdev_client_setup() - Setup fbdev emulation + * @dev: DRM device + * @format: Preferred color format for the device. DRM_FORMAT_XRGB8888 + * is used if this is zero. + * + * This function sets up fbdev emulation. Restore, hotplug events and + * teardown are all taken care of. Drivers that do suspend/resume need + * to call drm_fb_helper_set_suspend_unlocked() themselves. Simple + * drivers might use drm_mode_config_helper_suspend(). + * + * This function is safe to call even when there are no connectors present. + * Setup will be retried on the next hotplug event. + * + * The fbdev client is destroyed by drm_dev_unregister(). + * + * Returns: + * 0 on success, or a negative errno code otherwise. + */ +int drm_fbdev_client_setup(struct drm_device *dev, const struct drm_format_info *format) +{ + struct drm_fb_helper *fb_helper; + unsigned int color_mode; + int ret; + + /* TODO: Use format info throughout DRM */ + if (format) { + unsigned int bpp = drm_format_info_bpp(format, 0); + + switch (bpp) { + case 16: + color_mode = format->depth; // could also be 15 + break; + default: + color_mode = bpp; + } + } else { + switch (dev->mode_config.preferred_depth) { + case 0: + case 24: + color_mode = 32; + break; + default: + color_mode = dev->mode_config.preferred_depth; + } + } + + drm_WARN(dev, !dev->registered, "Device has not been registered.\n"); + drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n"); + + fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL); + if (!fb_helper) + return -ENOMEM; + drm_fb_helper_prepare(dev, fb_helper, color_mode, NULL); + + ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs); + if (ret) { + drm_err(dev, "Failed to register client: %d\n", ret); + goto err_drm_client_init; + } + + drm_client_register(&fb_helper->client); + + return 0; + +err_drm_client_init: + drm_fb_helper_unprepare(fb_helper); + kfree(fb_helper); + return ret; +} +EXPORT_SYMBOL(drm_fbdev_client_setup); diff --git a/drivers/gpu/drm/drm_fbdev_ttm.c b/drivers/gpu/drm/drm_fbdev_ttm.c index 119ffb28aaf95..d799cbe944cd3 100644 --- a/drivers/gpu/drm/drm_fbdev_ttm.c +++ b/drivers/gpu/drm/drm_fbdev_ttm.c @@ -71,71 +71,7 @@ static const struct fb_ops drm_fbdev_ttm_fb_ops = { static int drm_fbdev_ttm_helper_fb_probe(struct drm_fb_helper *fb_helper, struct drm_fb_helper_surface_size *sizes) { - struct drm_client_dev *client = &fb_helper->client; - struct drm_device *dev = fb_helper->dev; - struct drm_client_buffer *buffer; - struct fb_info *info; - size_t screen_size; - void *screen_buffer; - u32 format; - int ret; - - drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n", - sizes->surface_width, sizes->surface_height, - sizes->surface_bpp); - - format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp, - sizes->surface_depth); - buffer = drm_client_framebuffer_create(client, sizes->surface_width, - sizes->surface_height, format); - if (IS_ERR(buffer)) - return PTR_ERR(buffer); - - fb_helper->buffer = buffer; - fb_helper->fb = buffer->fb; - - screen_size = buffer->gem->size; - screen_buffer = vzalloc(screen_size); - if (!screen_buffer) { - ret = -ENOMEM; - goto err_drm_client_framebuffer_delete; - } - - info = drm_fb_helper_alloc_info(fb_helper); - if (IS_ERR(info)) { - ret = PTR_ERR(info); - goto err_vfree; - } - - drm_fb_helper_fill_info(info, fb_helper, sizes); - - info->fbops = &drm_fbdev_ttm_fb_ops; - - /* screen */ - info->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST; - info->screen_buffer = screen_buffer; - info->fix.smem_len = screen_size; - - /* deferred I/O */ - fb_helper->fbdefio.delay = HZ / 20; - fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io; - - info->fbdefio = &fb_helper->fbdefio; - ret = fb_deferred_io_init(info); - if (ret) - goto err_drm_fb_helper_release_info; - - return 0; - -err_drm_fb_helper_release_info: - drm_fb_helper_release_info(fb_helper); -err_vfree: - vfree(screen_buffer); -err_drm_client_framebuffer_delete: - fb_helper->fb = NULL; - fb_helper->buffer = NULL; - drm_client_framebuffer_delete(buffer); - return ret; + return drm_fbdev_ttm_driver_fbdev_probe(fb_helper, sizes); } static void drm_fbdev_ttm_damage_blit_real(struct drm_fb_helper *fb_helper, @@ -240,6 +176,82 @@ static const struct drm_fb_helper_funcs drm_fbdev_ttm_helper_funcs = { .fb_dirty = drm_fbdev_ttm_helper_fb_dirty, }; +/* + * struct drm_driver + */ + +int drm_fbdev_ttm_driver_fbdev_probe(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *sizes) +{ + struct drm_client_dev *client = &fb_helper->client; + struct drm_device *dev = fb_helper->dev; + struct drm_client_buffer *buffer; + struct fb_info *info; + size_t screen_size; + void *screen_buffer; + u32 format; + int ret; + + drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n", + sizes->surface_width, sizes->surface_height, + sizes->surface_bpp); + + format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp, + sizes->surface_depth); + buffer = drm_client_framebuffer_create(client, sizes->surface_width, + sizes->surface_height, format); + if (IS_ERR(buffer)) + return PTR_ERR(buffer); + + fb_helper->funcs = &drm_fbdev_ttm_helper_funcs; + fb_helper->buffer = buffer; + fb_helper->fb = buffer->fb; + + screen_size = buffer->gem->size; + screen_buffer = vzalloc(screen_size); + if (!screen_buffer) { + ret = -ENOMEM; + goto err_drm_client_framebuffer_delete; + } + + info = drm_fb_helper_alloc_info(fb_helper); + if (IS_ERR(info)) { + ret = PTR_ERR(info); + goto err_vfree; + } + + drm_fb_helper_fill_info(info, fb_helper, sizes); + + info->fbops = &drm_fbdev_ttm_fb_ops; + + /* screen */ + info->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST; + info->screen_buffer = screen_buffer; + info->fix.smem_len = screen_size; + + /* deferred I/O */ + fb_helper->fbdefio.delay = HZ / 20; + fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io; + + info->fbdefio = &fb_helper->fbdefio; + ret = fb_deferred_io_init(info); + if (ret) + goto err_drm_fb_helper_release_info; + + return 0; + +err_drm_fb_helper_release_info: + drm_fb_helper_release_info(fb_helper); +err_vfree: + vfree(screen_buffer); +err_drm_client_framebuffer_delete: + fb_helper->fb = NULL; + fb_helper->buffer = NULL; + drm_client_framebuffer_delete(buffer); + return ret; +} +EXPORT_SYMBOL(drm_fbdev_ttm_driver_fbdev_probe); + static void drm_fbdev_ttm_client_unregister(struct drm_client_dev *client) { struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c index 193cf8ed79128..3a94ca211f9ce 100644 --- a/drivers/gpu/drm/drm_fourcc.c +++ b/drivers/gpu/drm/drm_fourcc.c @@ -36,7 +36,6 @@ * @depth: bit depth per pixel * * Computes a drm fourcc pixel format code for the given @bpp/@depth values. - * Useful in fbdev emulation code, since that deals in those values. */ uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth) { @@ -140,6 +139,35 @@ uint32_t drm_driver_legacy_fb_format(struct drm_device *dev, } EXPORT_SYMBOL(drm_driver_legacy_fb_format); +/** + * drm_driver_color_mode_format - Compute DRM 4CC code from color mode + * @dev: DRM device + * @color_mode: command-line color mode + * + * Computes a DRM 4CC pixel format code for the given color mode using + * drm_driver_color_mode(). The color mode is in the format used and the + * kernel command line. It specifies the number of bits per pixel + * and color depth in a single value. + * + * Useful in fbdev emulation code, since that deals in those values. The + * helper does not consider YUV or other complicated formats. This means + * only legacy formats are supported (fmt->depth is a legacy field), but + * the framebuffer emulation can only deal with such formats, specifically + * RGB/BGA formats. + */ +uint32_t drm_driver_color_mode_format(struct drm_device *dev, unsigned int color_mode) +{ + switch (color_mode) { + case 15: + return drm_driver_legacy_fb_format(dev, 16, 15); + case 32: + return drm_driver_legacy_fb_format(dev, 32, 24); + default: + return drm_driver_legacy_fb_format(dev, color_mode, color_mode); + } +} +EXPORT_SYMBOL(drm_driver_color_mode_format); + /* * Internal function to query information for a given format. See * drm_format_info() for the public API. diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c index 19ab0a794add3..fd8fa2e0ef6fa 100644 --- a/drivers/gpu/drm/drm_panel.c +++ b/drivers/gpu/drm/drm_panel.c @@ -49,7 +49,7 @@ static LIST_HEAD(panel_list); * @dev: parent device of the panel * @funcs: panel operations * @connector_type: the connector type (DRM_MODE_CONNECTOR_*) corresponding to - * the panel interface + * the panel interface (must NOT be DRM_MODE_CONNECTOR_Unknown) * * Initialize the panel structure for subsequent registration with * drm_panel_add(). @@ -57,6 +57,9 @@ static LIST_HEAD(panel_list); void drm_panel_init(struct drm_panel *panel, struct device *dev, const struct drm_panel_funcs *funcs, int connector_type) { + if (connector_type == DRM_MODE_CONNECTOR_Unknown) + DRM_WARN("%s: %s: a valid connector type is required!\n", __func__, dev_name(dev)); + INIT_LIST_HEAD(&panel->list); INIT_LIST_HEAD(&panel->followers); mutex_init(&panel->follower_lock); diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index 4a73821b81f6f..c554ad8f246b6 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -93,6 +93,12 @@ static const struct drm_dmi_panel_orientation_data onegx1_pro = { .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, }; +static const struct drm_dmi_panel_orientation_data lcd640x960_leftside_up = { + .width = 640, + .height = 960, + .orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP, +}; + static const struct drm_dmi_panel_orientation_data lcd720x1280_rightside_up = { .width = 720, .height = 1280, @@ -123,6 +129,12 @@ static const struct drm_dmi_panel_orientation_data lcd1080x1920_rightside_up = { .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, }; +static const struct drm_dmi_panel_orientation_data lcd1200x1920_leftside_up = { + .width = 1200, + .height = 1920, + .orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP, +}; + static const struct drm_dmi_panel_orientation_data lcd1200x1920_rightside_up = { .width = 1200, .height = 1920, @@ -184,10 +196,10 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"), }, .driver_data = (void *)&lcd800x1280_rightside_up, - }, { /* AYA NEO AYANEO 2 */ + }, { /* AYA NEO AYANEO 2/2S */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"), - DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "AYANEO 2"), + DMI_MATCH(DMI_PRODUCT_NAME, "AYANEO 2"), }, .driver_data = (void *)&lcd1200x1920_rightside_up, }, { /* AYA NEO 2021 */ @@ -202,6 +214,18 @@ static const struct dmi_system_id orientation_data[] = { DMI_MATCH(DMI_PRODUCT_NAME, "AIR"), }, .driver_data = (void *)&lcd1080x1920_leftside_up, + }, { /* AYA NEO Flip DS Bottom Screen */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "FLIP DS"), + }, + .driver_data = (void *)&lcd640x960_leftside_up, + }, { /* AYA NEO Flip KB/DS Top Screen */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"), + DMI_MATCH(DMI_PRODUCT_NAME, "FLIP"), + }, + .driver_data = (void *)&lcd1080x1920_leftside_up, }, { /* AYA NEO Founder */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYA NEO"), @@ -226,6 +250,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_MATCH(DMI_BOARD_NAME, "KUN"), }, .driver_data = (void *)&lcd1600x2560_rightside_up, + }, { /* AYA NEO SLIDE */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"), + DMI_MATCH(DMI_PRODUCT_NAME, "SLIDE"), + }, + .driver_data = (void *)&lcd1080x1920_leftside_up, }, { /* AYN Loki Max */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ayn"), @@ -315,6 +345,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"), }, .driver_data = (void *)&gpd_win2, + }, { /* GPD Win 2 (correct DMI strings) */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "WIN2") + }, + .driver_data = (void *)&lcd720x1280_rightside_up, }, { /* GPD Win 3 */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"), @@ -443,6 +479,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ONE XPLAYER"), }, .driver_data = (void *)&lcd1600x2560_leftside_up, + }, { /* OneXPlayer Mini (Intel) */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ONE-NETBOOK TECHNOLOGY CO., LTD."), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ONE XPLAYER"), + }, + .driver_data = (void *)&lcd1200x1920_leftside_up, }, { /* OrangePi Neo */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "OrangePi"), diff --git a/drivers/gpu/drm/drm_panic_qr.rs b/drivers/gpu/drm/drm_panic_qr.rs index 447740d79d3d2..6903e2010cb98 100644 --- a/drivers/gpu/drm/drm_panic_qr.rs +++ b/drivers/gpu/drm/drm_panic_qr.rs @@ -209,12 +209,9 @@ const FORMAT_INFOS_QR_L: [u16; 8] = [ impl Version { /// Returns the smallest QR version than can hold these segments. fn from_segments(segments: &[&Segment<'_>]) -> Option { - for v in (1..=40).map(|k| Version(k)) { - if v.max_data() * 8 >= segments.iter().map(|s| s.total_size_bits(v)).sum() { - return Some(v); - } - } - None + (1..=40) + .map(Version) + .find(|&v| v.max_data() * 8 >= segments.iter().map(|s| s.total_size_bits(v)).sum()) } fn width(&self) -> u8 { @@ -242,7 +239,7 @@ impl Version { } fn alignment_pattern(&self) -> &'static [u8] { - &ALIGNMENT_PATTERNS[self.0 - 1] + ALIGNMENT_PATTERNS[self.0 - 1] } fn poly(&self) -> &'static [u8] { @@ -479,7 +476,7 @@ struct EncodedMsg<'a> { /// Data to be put in the QR code, with correct segment encoding, padding, and /// Error Code Correction. impl EncodedMsg<'_> { - fn new<'a, 'b>(segments: &[&Segment<'b>], data: &'a mut [u8]) -> Option> { + fn new<'a>(segments: &[&Segment<'_>], data: &'a mut [u8]) -> Option> { let version = Version::from_segments(segments)?; let ec_size = version.ec_size(); let g1_blocks = version.g1_blocks(); @@ -492,7 +489,7 @@ impl EncodedMsg<'_> { data.fill(0); let mut em = EncodedMsg { - data: data, + data, ec_size, g1_blocks, g2_blocks, @@ -548,7 +545,7 @@ impl EncodedMsg<'_> { } self.push(&mut offset, (MODE_STOP, 4)); - let pad_offset = (offset + 7) / 8; + let pad_offset = offset.div_ceil(8); for i in pad_offset..self.version.max_data() { self.data[i] = PADDING[(i & 1) ^ (pad_offset & 1)]; } @@ -662,7 +659,7 @@ struct QrImage<'a> { impl QrImage<'_> { fn new<'a, 'b>(em: &'b EncodedMsg<'b>, qrdata: &'a mut [u8]) -> QrImage<'a> { let width = em.version.width(); - let stride = (width + 7) / 8; + let stride = width.div_ceil(8); let data = qrdata; let mut qr_image = QrImage { @@ -722,7 +719,10 @@ impl QrImage<'_> { fn is_finder(&self, x: u8, y: u8) -> bool { let end = self.width - 8; - (x < 8 && y < 8) || (x < 8 && y >= end) || (x >= end && y < 8) + #[expect(clippy::nonminimal_bool)] + { + (x < 8 && y < 8) || (x < 8 && y >= end) || (x >= end && y < 8) + } } // Alignment pattern: 5x5 squares in a grid. @@ -911,16 +911,16 @@ impl QrImage<'_> { /// /// * `url`: The base URL of the QR code. It will be encoded as Binary segment. /// * `data`: A pointer to the binary data, to be encoded. if URL is NULL, it -/// will be encoded as binary segment, otherwise it will be encoded -/// efficiently as a numeric segment, and appended to the URL. +/// will be encoded as binary segment, otherwise it will be encoded +/// efficiently as a numeric segment, and appended to the URL. /// * `data_len`: Length of the data, that needs to be encoded, must be less -/// than data_size. +/// than data_size. /// * `data_size`: Size of data buffer, it should be at least 4071 bytes to hold -/// a V40 QR code. It will then be overwritten with the QR code image. +/// a V40 QR code. It will then be overwritten with the QR code image. /// * `tmp`: A temporary buffer that the QR code encoder will use, to write the -/// segments and ECC. +/// segments and ECC. /// * `tmp_size`: Size of the temporary buffer, it must be at least 3706 bytes -/// long for V40. +/// long for V40. /// /// # Safety /// @@ -931,7 +931,7 @@ impl QrImage<'_> { /// They must remain valid for the duration of the function call. #[no_mangle] pub unsafe extern "C" fn drm_panic_qr_generate( - url: *const i8, + url: *const kernel::ffi::c_char, data: *mut u8, data_len: usize, data_size: usize, @@ -978,10 +978,11 @@ pub unsafe extern "C" fn drm_panic_qr_generate( /// * `url_len`: Length of the URL. /// /// * If `url_len` > 0, remove the 2 segments header/length and also count the -/// conversion to numeric segments. +/// conversion to numeric segments. /// * If `url_len` = 0, only removes 3 bytes for 1 binary segment. #[no_mangle] pub extern "C" fn drm_panic_qr_max_data_size(version: u8, url_len: usize) -> usize { + #[expect(clippy::manual_range_contains)] if version < 1 || version > 40 { return 0; } diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c index 7e76790c6a81f..cba97d7db131d 100644 --- a/drivers/gpu/drm/gma500/mid_bios.c +++ b/drivers/gpu/drm/gma500/mid_bios.c @@ -279,6 +279,11 @@ static void mid_get_vbt_data(struct drm_psb_private *dev_priv) 0, PCI_DEVFN(2, 0)); int ret = -1; + if (pci_gfx_root == NULL) { + WARN_ON(1); + return; + } + /* Get the address of the platform config vbt */ pci_read_config_dword(pci_gfx_root, 0xFC, &addr); pci_dev_put(pci_gfx_root); diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c index ff93e08d5036d..5f02a5a39ab4a 100644 --- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c +++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c @@ -154,6 +154,7 @@ static int hyperv_vmbus_probe(struct hv_device *hdev, return 0; err_free_mmio: + iounmap(hv->vram); vmbus_free_mmio(hv->mem->start, hv->fb_size); err_vmbus_close: vmbus_close(hdev->channel); @@ -172,6 +173,7 @@ static void hyperv_vmbus_remove(struct hv_device *hdev) vmbus_close(hdev->channel); hv_set_drvdata(hdev, NULL); + iounmap(hv->vram); vmbus_free_mmio(hv->mem->start, hv->fb_size); } diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c index 9447f7229b608..17a1e3801a85c 100644 --- a/drivers/gpu/drm/i915/display/i9xx_plane.c +++ b/drivers/gpu/drm/i915/display/i9xx_plane.c @@ -416,7 +416,8 @@ static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state, return DIV_ROUND_UP(pixel_rate * num, den); } -static void i9xx_plane_update_noarm(struct intel_plane *plane, +static void i9xx_plane_update_noarm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -444,7 +445,8 @@ static void i9xx_plane_update_noarm(struct intel_plane *plane, } } -static void i9xx_plane_update_arm(struct intel_plane *plane, +static void i9xx_plane_update_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -507,7 +509,8 @@ static void i9xx_plane_update_arm(struct intel_plane *plane, intel_plane_ggtt_offset(plane_state) + dspaddr_offset); } -static void i830_plane_update_arm(struct intel_plane *plane, +static void i830_plane_update_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -517,11 +520,12 @@ static void i830_plane_update_arm(struct intel_plane *plane, * Additional breakage on i830 causes register reads to return * the last latched value instead of the last written value [ALM026]. */ - i9xx_plane_update_noarm(plane, crtc_state, plane_state); - i9xx_plane_update_arm(plane, crtc_state, plane_state); + i9xx_plane_update_noarm(dsb, plane, crtc_state, plane_state); + i9xx_plane_update_arm(dsb, plane, crtc_state, plane_state); } -static void i9xx_plane_disable_arm(struct intel_plane *plane, +static void i9xx_plane_disable_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); @@ -549,7 +553,8 @@ static void i9xx_plane_disable_arm(struct intel_plane *plane, } static void -g4x_primary_async_flip(struct intel_plane *plane, +g4x_primary_async_flip(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, bool async_flip) @@ -569,7 +574,8 @@ g4x_primary_async_flip(struct intel_plane *plane, } static void -vlv_primary_async_flip(struct intel_plane *plane, +vlv_primary_async_flip(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, bool async_flip) diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index 293efc1f841df..4e95b8eda23f7 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -50,38 +50,38 @@ #include "skl_scaler.h" #include "skl_universal_plane.h" -static int header_credits_available(struct drm_i915_private *dev_priv, +static int header_credits_available(struct intel_display *display, enum transcoder dsi_trans) { - return (intel_de_read(dev_priv, DSI_CMD_TXCTL(dsi_trans)) & FREE_HEADER_CREDIT_MASK) + return (intel_de_read(display, DSI_CMD_TXCTL(dsi_trans)) & FREE_HEADER_CREDIT_MASK) >> FREE_HEADER_CREDIT_SHIFT; } -static int payload_credits_available(struct drm_i915_private *dev_priv, +static int payload_credits_available(struct intel_display *display, enum transcoder dsi_trans) { - return (intel_de_read(dev_priv, DSI_CMD_TXCTL(dsi_trans)) & FREE_PLOAD_CREDIT_MASK) + return (intel_de_read(display, DSI_CMD_TXCTL(dsi_trans)) & FREE_PLOAD_CREDIT_MASK) >> FREE_PLOAD_CREDIT_SHIFT; } -static bool wait_for_header_credits(struct drm_i915_private *dev_priv, +static bool wait_for_header_credits(struct intel_display *display, enum transcoder dsi_trans, int hdr_credit) { - if (wait_for_us(header_credits_available(dev_priv, dsi_trans) >= + if (wait_for_us(header_credits_available(display, dsi_trans) >= hdr_credit, 100)) { - drm_err(&dev_priv->drm, "DSI header credits not released\n"); + drm_err(display->drm, "DSI header credits not released\n"); return false; } return true; } -static bool wait_for_payload_credits(struct drm_i915_private *dev_priv, +static bool wait_for_payload_credits(struct intel_display *display, enum transcoder dsi_trans, int payld_credit) { - if (wait_for_us(payload_credits_available(dev_priv, dsi_trans) >= + if (wait_for_us(payload_credits_available(display, dsi_trans) >= payld_credit, 100)) { - drm_err(&dev_priv->drm, "DSI payload credits not released\n"); + drm_err(display->drm, "DSI payload credits not released\n"); return false; } @@ -98,7 +98,7 @@ static enum transcoder dsi_port_to_transcoder(enum port port) static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct mipi_dsi_device *dsi; enum port port; @@ -108,8 +108,8 @@ static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder) /* wait for header/payload credits to be released */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - wait_for_header_credits(dev_priv, dsi_trans, MAX_HEADER_CREDIT); - wait_for_payload_credits(dev_priv, dsi_trans, MAX_PLOAD_CREDIT); + wait_for_header_credits(display, dsi_trans, MAX_HEADER_CREDIT); + wait_for_payload_credits(display, dsi_trans, MAX_PLOAD_CREDIT); } /* send nop DCS command */ @@ -119,22 +119,22 @@ static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder) dsi->channel = 0; ret = mipi_dsi_dcs_nop(dsi); if (ret < 0) - drm_err(&dev_priv->drm, + drm_err(display->drm, "error sending DCS NOP command\n"); } /* wait for header credits to be released */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - wait_for_header_credits(dev_priv, dsi_trans, MAX_HEADER_CREDIT); + wait_for_header_credits(display, dsi_trans, MAX_HEADER_CREDIT); } /* wait for LP TX in progress bit to be cleared */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - if (wait_for_us(!(intel_de_read(dev_priv, DSI_LP_MSG(dsi_trans)) & + if (wait_for_us(!(intel_de_read(display, DSI_LP_MSG(dsi_trans)) & LPTX_IN_PROGRESS), 20)) - drm_err(&dev_priv->drm, "LPTX bit not cleared\n"); + drm_err(display->drm, "LPTX bit not cleared\n"); } } @@ -142,7 +142,7 @@ static int dsi_send_pkt_payld(struct intel_dsi_host *host, const struct mipi_dsi_packet *packet) { struct intel_dsi *intel_dsi = host->intel_dsi; - struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev); + struct intel_display *display = to_intel_display(&intel_dsi->base); enum transcoder dsi_trans = dsi_port_to_transcoder(host->port); const u8 *data = packet->payload; u32 len = packet->payload_length; @@ -150,20 +150,20 @@ static int dsi_send_pkt_payld(struct intel_dsi_host *host, /* payload queue can accept *256 bytes*, check limit */ if (len > MAX_PLOAD_CREDIT * 4) { - drm_err(&i915->drm, "payload size exceeds max queue limit\n"); + drm_err(display->drm, "payload size exceeds max queue limit\n"); return -EINVAL; } for (i = 0; i < len; i += 4) { u32 tmp = 0; - if (!wait_for_payload_credits(i915, dsi_trans, 1)) + if (!wait_for_payload_credits(display, dsi_trans, 1)) return -EBUSY; for (j = 0; j < min_t(u32, len - i, 4); j++) tmp |= *data++ << 8 * j; - intel_de_write(i915, DSI_CMD_TXPYLD(dsi_trans), tmp); + intel_de_write(display, DSI_CMD_TXPYLD(dsi_trans), tmp); } return 0; @@ -174,14 +174,14 @@ static int dsi_send_pkt_hdr(struct intel_dsi_host *host, bool enable_lpdt) { struct intel_dsi *intel_dsi = host->intel_dsi; - struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev); + struct intel_display *display = to_intel_display(&intel_dsi->base); enum transcoder dsi_trans = dsi_port_to_transcoder(host->port); u32 tmp; - if (!wait_for_header_credits(dev_priv, dsi_trans, 1)) + if (!wait_for_header_credits(display, dsi_trans, 1)) return -EBUSY; - tmp = intel_de_read(dev_priv, DSI_CMD_TXHDR(dsi_trans)); + tmp = intel_de_read(display, DSI_CMD_TXHDR(dsi_trans)); if (packet->payload) tmp |= PAYLOAD_PRESENT; @@ -200,15 +200,14 @@ static int dsi_send_pkt_hdr(struct intel_dsi_host *host, tmp |= ((packet->header[0] & DT_MASK) << DT_SHIFT); tmp |= (packet->header[1] << PARAM_WC_LOWER_SHIFT); tmp |= (packet->header[2] << PARAM_WC_UPPER_SHIFT); - intel_de_write(dev_priv, DSI_CMD_TXHDR(dsi_trans), tmp); + intel_de_write(display, DSI_CMD_TXHDR(dsi_trans), tmp); return 0; } void icl_dsi_frame_update(struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc_state); u32 mode_flags; enum port port; @@ -226,12 +225,13 @@ void icl_dsi_frame_update(struct intel_crtc_state *crtc_state) else return; - intel_de_rmw(dev_priv, DSI_CMD_FRMCTL(port), 0, DSI_FRAME_UPDATE_REQUEST); + intel_de_rmw(display, DSI_CMD_FRMCTL(port), 0, + DSI_FRAME_UPDATE_REQUEST); } static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum phy phy; u32 tmp, mask, val; @@ -245,31 +245,31 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder) mask = SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK; val = SCALING_MODE_SEL(0x2) | TAP2_DISABLE | TAP3_DISABLE | RTERM_SELECT(0x6); - tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy)); + tmp = intel_de_read(display, ICL_PORT_TX_DW5_LN(0, phy)); tmp &= ~mask; tmp |= val; - intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp); - intel_de_rmw(dev_priv, ICL_PORT_TX_DW5_AUX(phy), mask, val); + intel_de_write(display, ICL_PORT_TX_DW5_GRP(phy), tmp); + intel_de_rmw(display, ICL_PORT_TX_DW5_AUX(phy), mask, val); mask = SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | RCOMP_SCALAR_MASK; val = SWING_SEL_UPPER(0x2) | SWING_SEL_LOWER(0x2) | RCOMP_SCALAR(0x98); - tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy)); + tmp = intel_de_read(display, ICL_PORT_TX_DW2_LN(0, phy)); tmp &= ~mask; tmp |= val; - intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), tmp); - intel_de_rmw(dev_priv, ICL_PORT_TX_DW2_AUX(phy), mask, val); + intel_de_write(display, ICL_PORT_TX_DW2_GRP(phy), tmp); + intel_de_rmw(display, ICL_PORT_TX_DW2_AUX(phy), mask, val); mask = POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | CURSOR_COEFF_MASK; val = POST_CURSOR_1(0x0) | POST_CURSOR_2(0x0) | CURSOR_COEFF(0x3f); - intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_AUX(phy), mask, val); + intel_de_rmw(display, ICL_PORT_TX_DW4_AUX(phy), mask, val); /* Bspec: must not use GRP register for write */ for (lane = 0; lane <= 3; lane++) - intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(lane, phy), + intel_de_rmw(display, ICL_PORT_TX_DW4_LN(lane, phy), mask, val); } } @@ -277,13 +277,13 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder) static void configure_dual_link_mode(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); i915_reg_t dss_ctl1_reg, dss_ctl2_reg; u32 dss_ctl1; /* FIXME: Move all DSS handling to intel_vdsc.c */ - if (DISPLAY_VER(dev_priv) >= 12) { + if (DISPLAY_VER(display) >= 12) { struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); dss_ctl1_reg = ICL_PIPE_DSS_CTL1(crtc->pipe); @@ -293,7 +293,7 @@ static void configure_dual_link_mode(struct intel_encoder *encoder, dss_ctl2_reg = DSS_CTL2; } - dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg); + dss_ctl1 = intel_de_read(display, dss_ctl1_reg); dss_ctl1 |= SPLITTER_ENABLE; dss_ctl1 &= ~OVERLAP_PIXELS_MASK; dss_ctl1 |= OVERLAP_PIXELS(intel_dsi->pixel_overlap); @@ -308,19 +308,19 @@ static void configure_dual_link_mode(struct intel_encoder *encoder, dl_buffer_depth = hactive / 2 + intel_dsi->pixel_overlap; if (dl_buffer_depth > MAX_DL_BUFFER_TARGET_DEPTH) - drm_err(&dev_priv->drm, + drm_err(display->drm, "DL buffer depth exceed max value\n"); dss_ctl1 &= ~LEFT_DL_BUF_TARGET_DEPTH_MASK; dss_ctl1 |= LEFT_DL_BUF_TARGET_DEPTH(dl_buffer_depth); - intel_de_rmw(dev_priv, dss_ctl2_reg, RIGHT_DL_BUF_TARGET_DEPTH_MASK, + intel_de_rmw(display, dss_ctl2_reg, RIGHT_DL_BUF_TARGET_DEPTH_MASK, RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth)); } else { /* Interleave */ dss_ctl1 |= DUAL_LINK_MODE_INTERLEAVE; } - intel_de_write(dev_priv, dss_ctl1_reg, dss_ctl1); + intel_de_write(display, dss_ctl1_reg, dss_ctl1); } /* aka DSI 8X clock */ @@ -341,6 +341,7 @@ static int afe_clk(struct intel_encoder *encoder, static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; @@ -360,33 +361,34 @@ static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder, } for_each_dsi_port(port, intel_dsi->ports) { - intel_de_write(dev_priv, ICL_DSI_ESC_CLK_DIV(port), + intel_de_write(display, ICL_DSI_ESC_CLK_DIV(port), esc_clk_div_m & ICL_ESC_CLK_DIV_MASK); - intel_de_posting_read(dev_priv, ICL_DSI_ESC_CLK_DIV(port)); + intel_de_posting_read(display, ICL_DSI_ESC_CLK_DIV(port)); } for_each_dsi_port(port, intel_dsi->ports) { - intel_de_write(dev_priv, ICL_DPHY_ESC_CLK_DIV(port), + intel_de_write(display, ICL_DPHY_ESC_CLK_DIV(port), esc_clk_div_m & ICL_ESC_CLK_DIV_MASK); - intel_de_posting_read(dev_priv, ICL_DPHY_ESC_CLK_DIV(port)); + intel_de_posting_read(display, ICL_DPHY_ESC_CLK_DIV(port)); } if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv)) { for_each_dsi_port(port, intel_dsi->ports) { - intel_de_write(dev_priv, ADL_MIPIO_DW(port, 8), + intel_de_write(display, ADL_MIPIO_DW(port, 8), esc_clk_div_m_phy & TX_ESC_CLK_DIV_PHY); - intel_de_posting_read(dev_priv, ADL_MIPIO_DW(port, 8)); + intel_de_posting_read(display, ADL_MIPIO_DW(port, 8)); } } } -static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv, - struct intel_dsi *intel_dsi) +static void get_dsi_io_power_domains(struct intel_dsi *intel_dsi) { + struct intel_display *display = to_intel_display(&intel_dsi->base); + struct drm_i915_private *dev_priv = to_i915(display->drm); enum port port; for_each_dsi_port(port, intel_dsi->ports) { - drm_WARN_ON(&dev_priv->drm, intel_dsi->io_wakeref[port]); + drm_WARN_ON(display->drm, intel_dsi->io_wakeref[port]); intel_dsi->io_wakeref[port] = intel_display_power_get(dev_priv, port == PORT_A ? @@ -397,15 +399,15 @@ static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv, static void gen11_dsi_enable_io_power(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; for_each_dsi_port(port, intel_dsi->ports) - intel_de_rmw(dev_priv, ICL_DSI_IO_MODECTL(port), + intel_de_rmw(display, ICL_DSI_IO_MODECTL(port), 0, COMBO_PHY_MODE_DSI); - get_dsi_io_power_domains(dev_priv, intel_dsi); + get_dsi_io_power_domains(intel_dsi); } static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder) @@ -421,6 +423,7 @@ static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder) static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum phy phy; @@ -429,32 +432,33 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder) /* Step 4b(i) set loadgen select for transmit and aux lanes */ for_each_dsi_phy(phy, intel_dsi->phys) { - intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_AUX(phy), LOADGEN_SELECT, 0); + intel_de_rmw(display, ICL_PORT_TX_DW4_AUX(phy), + LOADGEN_SELECT, 0); for (lane = 0; lane <= 3; lane++) - intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(lane, phy), + intel_de_rmw(display, ICL_PORT_TX_DW4_LN(lane, phy), LOADGEN_SELECT, lane != 2 ? LOADGEN_SELECT : 0); } /* Step 4b(ii) set latency optimization for transmit and aux lanes */ for_each_dsi_phy(phy, intel_dsi->phys) { - intel_de_rmw(dev_priv, ICL_PORT_TX_DW2_AUX(phy), + intel_de_rmw(display, ICL_PORT_TX_DW2_AUX(phy), FRC_LATENCY_OPTIM_MASK, FRC_LATENCY_OPTIM_VAL(0x5)); - tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy)); + tmp = intel_de_read(display, ICL_PORT_TX_DW2_LN(0, phy)); tmp &= ~FRC_LATENCY_OPTIM_MASK; tmp |= FRC_LATENCY_OPTIM_VAL(0x5); - intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), tmp); + intel_de_write(display, ICL_PORT_TX_DW2_GRP(phy), tmp); /* For EHL, TGL, set latency optimization for PCS_DW1 lanes */ if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv) || - (DISPLAY_VER(dev_priv) >= 12)) { - intel_de_rmw(dev_priv, ICL_PORT_PCS_DW1_AUX(phy), + (DISPLAY_VER(display) >= 12)) { + intel_de_rmw(display, ICL_PORT_PCS_DW1_AUX(phy), LATENCY_OPTIM_MASK, LATENCY_OPTIM_VAL(0)); - tmp = intel_de_read(dev_priv, + tmp = intel_de_read(display, ICL_PORT_PCS_DW1_LN(0, phy)); tmp &= ~LATENCY_OPTIM_MASK; tmp |= LATENCY_OPTIM_VAL(0x1); - intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), + intel_de_write(display, ICL_PORT_PCS_DW1_GRP(phy), tmp); } } @@ -463,17 +467,17 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder) static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u32 tmp; enum phy phy; /* clear common keeper enable bit */ for_each_dsi_phy(phy, intel_dsi->phys) { - tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy)); + tmp = intel_de_read(display, ICL_PORT_PCS_DW1_LN(0, phy)); tmp &= ~COMMON_KEEPER_EN; - intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), tmp); - intel_de_rmw(dev_priv, ICL_PORT_PCS_DW1_AUX(phy), COMMON_KEEPER_EN, 0); + intel_de_write(display, ICL_PORT_PCS_DW1_GRP(phy), tmp); + intel_de_rmw(display, ICL_PORT_PCS_DW1_AUX(phy), COMMON_KEEPER_EN, 0); } /* @@ -482,14 +486,15 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder) * as part of lane phy sequence configuration */ for_each_dsi_phy(phy, intel_dsi->phys) - intel_de_rmw(dev_priv, ICL_PORT_CL_DW5(phy), 0, SUS_CLOCK_CONFIG); + intel_de_rmw(display, ICL_PORT_CL_DW5(phy), 0, + SUS_CLOCK_CONFIG); /* Clear training enable to change swing values */ for_each_dsi_phy(phy, intel_dsi->phys) { - tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy)); + tmp = intel_de_read(display, ICL_PORT_TX_DW5_LN(0, phy)); tmp &= ~TX_TRAINING_EN; - intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp); - intel_de_rmw(dev_priv, ICL_PORT_TX_DW5_AUX(phy), TX_TRAINING_EN, 0); + intel_de_write(display, ICL_PORT_TX_DW5_GRP(phy), tmp); + intel_de_rmw(display, ICL_PORT_TX_DW5_AUX(phy), TX_TRAINING_EN, 0); } /* Program swing and de-emphasis */ @@ -497,26 +502,26 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder) /* Set training enable to trigger update */ for_each_dsi_phy(phy, intel_dsi->phys) { - tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy)); + tmp = intel_de_read(display, ICL_PORT_TX_DW5_LN(0, phy)); tmp |= TX_TRAINING_EN; - intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp); - intel_de_rmw(dev_priv, ICL_PORT_TX_DW5_AUX(phy), 0, TX_TRAINING_EN); + intel_de_write(display, ICL_PORT_TX_DW5_GRP(phy), tmp); + intel_de_rmw(display, ICL_PORT_TX_DW5_AUX(phy), 0, TX_TRAINING_EN); } } static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; for_each_dsi_port(port, intel_dsi->ports) { - intel_de_rmw(dev_priv, DDI_BUF_CTL(port), 0, DDI_BUF_CTL_ENABLE); + intel_de_rmw(display, DDI_BUF_CTL(port), 0, DDI_BUF_CTL_ENABLE); - if (wait_for_us(!(intel_de_read(dev_priv, DDI_BUF_CTL(port)) & + if (wait_for_us(!(intel_de_read(display, DDI_BUF_CTL(port)) & DDI_BUF_IS_IDLE), 500)) - drm_err(&dev_priv->drm, "DDI port:%c buffer idle\n", + drm_err(display->drm, "DDI port:%c buffer idle\n", port_name(port)); } } @@ -525,6 +530,7 @@ static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; @@ -532,12 +538,12 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder, /* Program DPHY clock lanes timings */ for_each_dsi_port(port, intel_dsi->ports) - intel_de_write(dev_priv, DPHY_CLK_TIMING_PARAM(port), + intel_de_write(display, DPHY_CLK_TIMING_PARAM(port), intel_dsi->dphy_reg); /* Program DPHY data lanes timings */ for_each_dsi_port(port, intel_dsi->ports) - intel_de_write(dev_priv, DPHY_DATA_TIMING_PARAM(port), + intel_de_write(display, DPHY_DATA_TIMING_PARAM(port), intel_dsi->dphy_data_lane_reg); /* @@ -546,10 +552,10 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder, * a value '0' inside TA_PARAM_REGISTERS otherwise * leave all fields at HW default values. */ - if (DISPLAY_VER(dev_priv) == 11) { + if (DISPLAY_VER(display) == 11) { if (afe_clk(encoder, crtc_state) <= 800000) { for_each_dsi_port(port, intel_dsi->ports) - intel_de_rmw(dev_priv, DPHY_TA_TIMING_PARAM(port), + intel_de_rmw(display, DPHY_TA_TIMING_PARAM(port), TA_SURE_MASK, TA_SURE_OVERRIDE | TA_SURE(0)); } @@ -557,7 +563,7 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder, if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) { for_each_dsi_phy(phy, intel_dsi->phys) - intel_de_rmw(dev_priv, ICL_DPHY_CHKN(phy), + intel_de_rmw(display, ICL_DPHY_CHKN(phy), 0, ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP); } } @@ -566,30 +572,30 @@ static void gen11_dsi_setup_timings(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; /* Program T-INIT master registers */ for_each_dsi_port(port, intel_dsi->ports) - intel_de_rmw(dev_priv, ICL_DSI_T_INIT_MASTER(port), + intel_de_rmw(display, ICL_DSI_T_INIT_MASTER(port), DSI_T_INIT_MASTER_MASK, intel_dsi->init_count); /* shadow register inside display core */ for_each_dsi_port(port, intel_dsi->ports) - intel_de_write(dev_priv, DSI_CLK_TIMING_PARAM(port), + intel_de_write(display, DSI_CLK_TIMING_PARAM(port), intel_dsi->dphy_reg); /* shadow register inside display core */ for_each_dsi_port(port, intel_dsi->ports) - intel_de_write(dev_priv, DSI_DATA_TIMING_PARAM(port), + intel_de_write(display, DSI_DATA_TIMING_PARAM(port), intel_dsi->dphy_data_lane_reg); /* shadow register inside display core */ - if (DISPLAY_VER(dev_priv) == 11) { + if (DISPLAY_VER(display) == 11) { if (afe_clk(encoder, crtc_state) <= 800000) { for_each_dsi_port(port, intel_dsi->ports) { - intel_de_rmw(dev_priv, DSI_TA_TIMING_PARAM(port), + intel_de_rmw(display, DSI_TA_TIMING_PARAM(port), TA_SURE_MASK, TA_SURE_OVERRIDE | TA_SURE(0)); } @@ -599,45 +605,45 @@ gen11_dsi_setup_timings(struct intel_encoder *encoder, static void gen11_dsi_gate_clocks(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u32 tmp; enum phy phy; - mutex_lock(&dev_priv->display.dpll.lock); - tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0); + mutex_lock(&display->dpll.lock); + tmp = intel_de_read(display, ICL_DPCLKA_CFGCR0); for_each_dsi_phy(phy, intel_dsi->phys) tmp |= ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); - intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, tmp); - mutex_unlock(&dev_priv->display.dpll.lock); + intel_de_write(display, ICL_DPCLKA_CFGCR0, tmp); + mutex_unlock(&display->dpll.lock); } static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u32 tmp; enum phy phy; - mutex_lock(&dev_priv->display.dpll.lock); - tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0); + mutex_lock(&display->dpll.lock); + tmp = intel_de_read(display, ICL_DPCLKA_CFGCR0); for_each_dsi_phy(phy, intel_dsi->phys) tmp &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); - intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, tmp); - mutex_unlock(&dev_priv->display.dpll.lock); + intel_de_write(display, ICL_DPCLKA_CFGCR0, tmp); + mutex_unlock(&display->dpll.lock); } static bool gen11_dsi_is_clock_enabled(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); bool clock_enabled = false; enum phy phy; u32 tmp; - tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0); + tmp = intel_de_read(display, ICL_DPCLKA_CFGCR0); for_each_dsi_phy(phy, intel_dsi->phys) { if (!(tmp & ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy))) @@ -650,36 +656,36 @@ static bool gen11_dsi_is_clock_enabled(struct intel_encoder *encoder) static void gen11_dsi_map_pll(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct intel_shared_dpll *pll = crtc_state->shared_dpll; enum phy phy; u32 val; - mutex_lock(&dev_priv->display.dpll.lock); + mutex_lock(&display->dpll.lock); - val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0); + val = intel_de_read(display, ICL_DPCLKA_CFGCR0); for_each_dsi_phy(phy, intel_dsi->phys) { val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy); val |= ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy); } - intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val); + intel_de_write(display, ICL_DPCLKA_CFGCR0, val); for_each_dsi_phy(phy, intel_dsi->phys) { val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); } - intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val); + intel_de_write(display, ICL_DPCLKA_CFGCR0, val); - intel_de_posting_read(dev_priv, ICL_DPCLKA_CFGCR0); + intel_de_posting_read(display, ICL_DPCLKA_CFGCR0); - mutex_unlock(&dev_priv->display.dpll.lock); + mutex_unlock(&display->dpll.lock); } static void gen11_dsi_configure_transcoder(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); enum pipe pipe = crtc->pipe; @@ -689,7 +695,7 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder, for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - tmp = intel_de_read(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans)); + tmp = intel_de_read(display, DSI_TRANS_FUNC_CONF(dsi_trans)); if (intel_dsi->eotp_pkt) tmp &= ~EOTP_DISABLED; @@ -745,7 +751,7 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder, } } - if (DISPLAY_VER(dev_priv) >= 12) { + if (DISPLAY_VER(display) >= 12) { if (is_vid_mode(intel_dsi)) tmp |= BLANKING_PACKET_ENABLE; } @@ -778,15 +784,15 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder, tmp |= TE_SOURCE_GPIO; } - intel_de_write(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans), tmp); + intel_de_write(display, DSI_TRANS_FUNC_CONF(dsi_trans), tmp); } /* enable port sync mode if dual link */ if (intel_dsi->dual_link) { for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - intel_de_rmw(dev_priv, - TRANS_DDI_FUNC_CTL2(dev_priv, dsi_trans), + intel_de_rmw(display, + TRANS_DDI_FUNC_CTL2(display, dsi_trans), 0, PORT_SYNC_MODE_ENABLE); } @@ -798,10 +804,10 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder, dsi_trans = dsi_port_to_transcoder(port); /* select data lane width */ - tmp = intel_de_read(dev_priv, - TRANS_DDI_FUNC_CTL(dev_priv, dsi_trans)); - tmp &= ~DDI_PORT_WIDTH_MASK; - tmp |= DDI_PORT_WIDTH(intel_dsi->lane_count); + tmp = intel_de_read(display, + TRANS_DDI_FUNC_CTL(display, dsi_trans)); + tmp &= ~TRANS_DDI_PORT_WIDTH_MASK; + tmp |= TRANS_DDI_PORT_WIDTH(intel_dsi->lane_count); /* select input pipe */ tmp &= ~TRANS_DDI_EDP_INPUT_MASK; @@ -825,16 +831,16 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder, /* enable DDI buffer */ tmp |= TRANS_DDI_FUNC_ENABLE; - intel_de_write(dev_priv, - TRANS_DDI_FUNC_CTL(dev_priv, dsi_trans), tmp); + intel_de_write(display, + TRANS_DDI_FUNC_CTL(display, dsi_trans), tmp); } /* wait for link ready */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - if (wait_for_us((intel_de_read(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans)) & + if (wait_for_us((intel_de_read(display, DSI_TRANS_FUNC_CONF(dsi_trans)) & LINK_READY), 2500)) - drm_err(&dev_priv->drm, "DSI link not ready\n"); + drm_err(display->drm, "DSI link not ready\n"); } } @@ -842,7 +848,7 @@ static void gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; @@ -909,17 +915,17 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, /* minimum hactive as per bspec: 256 pixels */ if (adjusted_mode->crtc_hdisplay < 256) - drm_err(&dev_priv->drm, "hactive is less then 256 pixels\n"); + drm_err(display->drm, "hactive is less then 256 pixels\n"); /* if RGB666 format, then hactive must be multiple of 4 pixels */ if (intel_dsi->pixel_format == MIPI_DSI_FMT_RGB666 && hactive % 4 != 0) - drm_err(&dev_priv->drm, + drm_err(display->drm, "hactive pixels are not multiple of 4\n"); /* program TRANS_HTOTAL register */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - intel_de_write(dev_priv, TRANS_HTOTAL(dev_priv, dsi_trans), + intel_de_write(display, TRANS_HTOTAL(display, dsi_trans), HACTIVE(hactive - 1) | HTOTAL(htotal - 1)); } @@ -928,12 +934,12 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, if (intel_dsi->video_mode == NON_BURST_SYNC_PULSE) { /* BSPEC: hsync size should be atleast 16 pixels */ if (hsync_size < 16) - drm_err(&dev_priv->drm, + drm_err(display->drm, "hsync size < 16 pixels\n"); } if (hback_porch < 16) - drm_err(&dev_priv->drm, "hback porch < 16 pixels\n"); + drm_err(display->drm, "hback porch < 16 pixels\n"); if (intel_dsi->dual_link) { hsync_start /= 2; @@ -942,8 +948,8 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - intel_de_write(dev_priv, - TRANS_HSYNC(dev_priv, dsi_trans), + intel_de_write(display, + TRANS_HSYNC(display, dsi_trans), HSYNC_START(hsync_start - 1) | HSYNC_END(hsync_end - 1)); } } @@ -957,22 +963,22 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, * struct drm_display_mode. * For interlace mode: program required pixel minus 2 */ - intel_de_write(dev_priv, TRANS_VTOTAL(dev_priv, dsi_trans), + intel_de_write(display, TRANS_VTOTAL(display, dsi_trans), VACTIVE(vactive - 1) | VTOTAL(vtotal - 1)); } if (vsync_end < vsync_start || vsync_end > vtotal) - drm_err(&dev_priv->drm, "Invalid vsync_end value\n"); + drm_err(display->drm, "Invalid vsync_end value\n"); if (vsync_start < vactive) - drm_err(&dev_priv->drm, "vsync_start less than vactive\n"); + drm_err(display->drm, "vsync_start less than vactive\n"); /* program TRANS_VSYNC register for video mode only */ if (is_vid_mode(intel_dsi)) { for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - intel_de_write(dev_priv, - TRANS_VSYNC(dev_priv, dsi_trans), + intel_de_write(display, + TRANS_VSYNC(display, dsi_trans), VSYNC_START(vsync_start - 1) | VSYNC_END(vsync_end - 1)); } } @@ -986,8 +992,8 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, if (is_vid_mode(intel_dsi)) { for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - intel_de_write(dev_priv, - TRANS_VSYNCSHIFT(dev_priv, dsi_trans), + intel_de_write(display, + TRANS_VSYNCSHIFT(display, dsi_trans), vsync_shift); } } @@ -998,11 +1004,11 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, * FIXME get rid of these local hacks and do it right, * this will not handle eg. delayed vblank correctly. */ - if (DISPLAY_VER(dev_priv) >= 12) { + if (DISPLAY_VER(display) >= 12) { for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - intel_de_write(dev_priv, - TRANS_VBLANK(dev_priv, dsi_trans), + intel_de_write(display, + TRANS_VBLANK(display, dsi_trans), VBLANK_START(vactive - 1) | VBLANK_END(vtotal - 1)); } } @@ -1010,20 +1016,20 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; enum transcoder dsi_trans; for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - intel_de_rmw(dev_priv, TRANSCONF(dev_priv, dsi_trans), 0, + intel_de_rmw(display, TRANSCONF(display, dsi_trans), 0, TRANSCONF_ENABLE); /* wait for transcoder to be enabled */ - if (intel_de_wait_for_set(dev_priv, TRANSCONF(dev_priv, dsi_trans), + if (intel_de_wait_for_set(display, TRANSCONF(display, dsi_trans), TRANSCONF_STATE_ENABLE, 10)) - drm_err(&dev_priv->drm, + drm_err(display->drm, "DSI transcoder not enabled\n"); } } @@ -1031,7 +1037,7 @@ static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder) static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; enum transcoder dsi_trans; @@ -1055,21 +1061,21 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder, dsi_trans = dsi_port_to_transcoder(port); /* program hst_tx_timeout */ - intel_de_rmw(dev_priv, DSI_HSTX_TO(dsi_trans), + intel_de_rmw(display, DSI_HSTX_TO(dsi_trans), HSTX_TIMEOUT_VALUE_MASK, HSTX_TIMEOUT_VALUE(hs_tx_timeout)); /* FIXME: DSI_CALIB_TO */ /* program lp_rx_host timeout */ - intel_de_rmw(dev_priv, DSI_LPRX_HOST_TO(dsi_trans), + intel_de_rmw(display, DSI_LPRX_HOST_TO(dsi_trans), LPRX_TIMEOUT_VALUE_MASK, LPRX_TIMEOUT_VALUE(lp_rx_timeout)); /* FIXME: DSI_PWAIT_TO */ /* program turn around timeout */ - intel_de_rmw(dev_priv, DSI_TA_TO(dsi_trans), + intel_de_rmw(display, DSI_TA_TO(dsi_trans), TA_TIMEOUT_VALUE_MASK, TA_TIMEOUT_VALUE(ta_timeout)); } @@ -1078,7 +1084,7 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder, static void gen11_dsi_config_util_pin(struct intel_encoder *encoder, bool enable) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u32 tmp; @@ -1090,7 +1096,7 @@ static void gen11_dsi_config_util_pin(struct intel_encoder *encoder, if (is_vid_mode(intel_dsi) || (intel_dsi->ports & BIT(PORT_B))) return; - tmp = intel_de_read(dev_priv, UTIL_PIN_CTL); + tmp = intel_de_read(display, UTIL_PIN_CTL); if (enable) { tmp |= UTIL_PIN_DIRECTION_INPUT; @@ -1098,7 +1104,7 @@ static void gen11_dsi_config_util_pin(struct intel_encoder *encoder, } else { tmp &= ~UTIL_PIN_ENABLE; } - intel_de_write(dev_priv, UTIL_PIN_CTL, tmp); + intel_de_write(display, UTIL_PIN_CTL, tmp); } static void @@ -1136,7 +1142,7 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder, static void gen11_dsi_powerup_panel(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct mipi_dsi_device *dsi; enum port port; @@ -1152,14 +1158,14 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder) * FIXME: This uses the number of DW's currently in the payload * receive queue. This is probably not what we want here. */ - tmp = intel_de_read(dev_priv, DSI_CMD_RXCTL(dsi_trans)); + tmp = intel_de_read(display, DSI_CMD_RXCTL(dsi_trans)); tmp &= NUMBER_RX_PLOAD_DW_MASK; /* multiply "Number Rx Payload DW" by 4 to get max value */ tmp = tmp * 4; dsi = intel_dsi->dsi_hosts[port]->device; ret = mipi_dsi_set_maximum_return_packet_size(dsi, tmp); if (ret < 0) - drm_err(&dev_priv->drm, + drm_err(display->drm, "error setting max return pkt size%d\n", tmp); } @@ -1219,10 +1225,10 @@ static void gen11_dsi_pre_enable(struct intel_atomic_state *state, static void icl_apply_kvmr_pipe_a_wa(struct intel_encoder *encoder, enum pipe pipe, bool enable) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - if (DISPLAY_VER(dev_priv) == 11 && pipe == PIPE_B) - intel_de_rmw(dev_priv, CHICKEN_PAR1_1, + if (DISPLAY_VER(display) == 11 && pipe == PIPE_B) + intel_de_rmw(display, CHICKEN_PAR1_1, IGNORE_KVMR_PIPE_A, enable ? IGNORE_KVMR_PIPE_A : 0); } @@ -1235,13 +1241,13 @@ static void icl_apply_kvmr_pipe_a_wa(struct intel_encoder *encoder, */ static void adlp_set_lp_hs_wakeup_gb(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; - if (DISPLAY_VER(i915) == 13) { + if (DISPLAY_VER(display) == 13) { for_each_dsi_port(port, intel_dsi->ports) - intel_de_rmw(i915, TGL_DSI_CHKN_REG(port), + intel_de_rmw(display, TGL_DSI_CHKN_REG(port), TGL_DSI_CHKN_LSHS_GB_MASK, TGL_DSI_CHKN_LSHS_GB(4)); } @@ -1275,7 +1281,7 @@ static void gen11_dsi_enable(struct intel_atomic_state *state, static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; enum transcoder dsi_trans; @@ -1284,13 +1290,13 @@ static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder) dsi_trans = dsi_port_to_transcoder(port); /* disable transcoder */ - intel_de_rmw(dev_priv, TRANSCONF(dev_priv, dsi_trans), + intel_de_rmw(display, TRANSCONF(display, dsi_trans), TRANSCONF_ENABLE, 0); /* wait for transcoder to be disabled */ - if (intel_de_wait_for_clear(dev_priv, TRANSCONF(dev_priv, dsi_trans), + if (intel_de_wait_for_clear(display, TRANSCONF(display, dsi_trans), TRANSCONF_STATE_ENABLE, 50)) - drm_err(&dev_priv->drm, + drm_err(display->drm, "DSI trancoder not disabled\n"); } } @@ -1307,7 +1313,7 @@ static void gen11_dsi_powerdown_panel(struct intel_encoder *encoder) static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; enum transcoder dsi_trans; @@ -1316,29 +1322,29 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder) /* disable periodic update mode */ if (is_cmd_mode(intel_dsi)) { for_each_dsi_port(port, intel_dsi->ports) - intel_de_rmw(dev_priv, DSI_CMD_FRMCTL(port), + intel_de_rmw(display, DSI_CMD_FRMCTL(port), DSI_PERIODIC_FRAME_UPDATE_ENABLE, 0); } /* put dsi link in ULPS */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - tmp = intel_de_read(dev_priv, DSI_LP_MSG(dsi_trans)); + tmp = intel_de_read(display, DSI_LP_MSG(dsi_trans)); tmp |= LINK_ENTER_ULPS; tmp &= ~LINK_ULPS_TYPE_LP11; - intel_de_write(dev_priv, DSI_LP_MSG(dsi_trans), tmp); + intel_de_write(display, DSI_LP_MSG(dsi_trans), tmp); - if (wait_for_us((intel_de_read(dev_priv, DSI_LP_MSG(dsi_trans)) & + if (wait_for_us((intel_de_read(display, DSI_LP_MSG(dsi_trans)) & LINK_IN_ULPS), 10)) - drm_err(&dev_priv->drm, "DSI link not in ULPS\n"); + drm_err(display->drm, "DSI link not in ULPS\n"); } /* disable ddi function */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - intel_de_rmw(dev_priv, - TRANS_DDI_FUNC_CTL(dev_priv, dsi_trans), + intel_de_rmw(display, + TRANS_DDI_FUNC_CTL(display, dsi_trans), TRANS_DDI_FUNC_ENABLE, 0); } @@ -1346,8 +1352,8 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder) if (intel_dsi->dual_link) { for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - intel_de_rmw(dev_priv, - TRANS_DDI_FUNC_CTL2(dev_priv, dsi_trans), + intel_de_rmw(display, + TRANS_DDI_FUNC_CTL2(display, dsi_trans), PORT_SYNC_MODE_ENABLE, 0); } } @@ -1355,18 +1361,18 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder) static void gen11_dsi_disable_port(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; gen11_dsi_ungate_clocks(encoder); for_each_dsi_port(port, intel_dsi->ports) { - intel_de_rmw(dev_priv, DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE, 0); + intel_de_rmw(display, DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE, 0); - if (wait_for_us((intel_de_read(dev_priv, DDI_BUF_CTL(port)) & + if (wait_for_us((intel_de_read(display, DDI_BUF_CTL(port)) & DDI_BUF_IS_IDLE), 8)) - drm_err(&dev_priv->drm, + drm_err(display->drm, "DDI port:%c buffer not idle\n", port_name(port)); } @@ -1375,6 +1381,7 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder) static void gen11_dsi_disable_io_power(struct intel_encoder *encoder) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; @@ -1392,7 +1399,7 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder) /* set mode to DDI */ for_each_dsi_port(port, intel_dsi->ports) - intel_de_rmw(dev_priv, ICL_DSI_IO_MODECTL(port), + intel_de_rmw(display, ICL_DSI_IO_MODECTL(port), COMBO_PHY_MODE_DSI, 0); } @@ -1504,8 +1511,7 @@ static void gen11_dsi_get_timings(struct intel_encoder *encoder, static bool gen11_dsi_is_periodic_cmd_mode(struct intel_dsi *intel_dsi) { - struct drm_device *dev = intel_dsi->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_display *display = to_intel_display(&intel_dsi->base); enum transcoder dsi_trans; u32 val; @@ -1514,7 +1520,7 @@ static bool gen11_dsi_is_periodic_cmd_mode(struct intel_dsi *intel_dsi) else dsi_trans = TRANSCODER_DSI_0; - val = intel_de_read(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans)); + val = intel_de_read(display, DSI_TRANS_FUNC_CONF(dsi_trans)); return (val & DSI_PERIODIC_FRAME_UPDATE_ENABLE); } @@ -1557,7 +1563,7 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder, static void gen11_dsi_sync_state(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_crtc *intel_crtc; enum pipe pipe; @@ -1568,9 +1574,9 @@ static void gen11_dsi_sync_state(struct intel_encoder *encoder, pipe = intel_crtc->pipe; /* wa verify 1409054076:icl,jsl,ehl */ - if (DISPLAY_VER(dev_priv) == 11 && pipe == PIPE_B && - !(intel_de_read(dev_priv, CHICKEN_PAR1_1) & IGNORE_KVMR_PIPE_A)) - drm_dbg_kms(&dev_priv->drm, + if (DISPLAY_VER(display) == 11 && pipe == PIPE_B && + !(intel_de_read(display, CHICKEN_PAR1_1) & IGNORE_KVMR_PIPE_A)) + drm_dbg_kms(display->drm, "[ENCODER:%d:%s] BIOS left IGNORE_KVMR_PIPE_A cleared with pipe B enabled\n", encoder->base.base.id, encoder->base.name); @@ -1579,9 +1585,9 @@ static void gen11_dsi_sync_state(struct intel_encoder *encoder, static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; - int dsc_max_bpc = DISPLAY_VER(dev_priv) >= 12 ? 12 : 10; + int dsc_max_bpc = DISPLAY_VER(display) >= 12 ? 12 : 10; bool use_dsc; int ret; @@ -1606,12 +1612,12 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder, return ret; /* DSI specific sanity checks on the common code */ - drm_WARN_ON(&dev_priv->drm, vdsc_cfg->vbr_enable); - drm_WARN_ON(&dev_priv->drm, vdsc_cfg->simple_422); - drm_WARN_ON(&dev_priv->drm, + drm_WARN_ON(display->drm, vdsc_cfg->vbr_enable); + drm_WARN_ON(display->drm, vdsc_cfg->simple_422); + drm_WARN_ON(display->drm, vdsc_cfg->pic_width % vdsc_cfg->slice_width); - drm_WARN_ON(&dev_priv->drm, vdsc_cfg->slice_height < 8); - drm_WARN_ON(&dev_priv->drm, + drm_WARN_ON(display->drm, vdsc_cfg->slice_height < 8); + drm_WARN_ON(display->drm, vdsc_cfg->pic_height % vdsc_cfg->slice_height); ret = drm_dsc_compute_rc_parameters(vdsc_cfg); @@ -1627,7 +1633,7 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct intel_connector *intel_connector = intel_dsi->attached_connector; struct drm_display_mode *adjusted_mode = @@ -1661,7 +1667,7 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder, pipe_config->clock_set = true; if (gen11_dsi_dsc_compute_config(encoder, pipe_config)) - drm_dbg_kms(&i915->drm, "Attempting to use DSC failed\n"); + drm_dbg_kms(display->drm, "Attempting to use DSC failed\n"); pipe_config->port_clock = afe_clk(encoder, pipe_config) / 5; @@ -1679,15 +1685,13 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder, static void gen11_dsi_get_power_domains(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - - get_dsi_io_power_domains(i915, - enc_to_intel_dsi(encoder)); + get_dsi_io_power_domains(enc_to_intel_dsi(encoder)); } static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum transcoder dsi_trans; @@ -1703,8 +1707,8 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder, for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - tmp = intel_de_read(dev_priv, - TRANS_DDI_FUNC_CTL(dev_priv, dsi_trans)); + tmp = intel_de_read(display, + TRANS_DDI_FUNC_CTL(display, dsi_trans)); switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { case TRANS_DDI_EDP_INPUT_A_ON: *pipe = PIPE_A; @@ -1719,11 +1723,11 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder, *pipe = PIPE_D; break; default: - drm_err(&dev_priv->drm, "Invalid PIPE input\n"); + drm_err(display->drm, "Invalid PIPE input\n"); goto out; } - tmp = intel_de_read(dev_priv, TRANSCONF(dev_priv, dsi_trans)); + tmp = intel_de_read(display, TRANSCONF(display, dsi_trans)); ret = tmp & TRANSCONF_ENABLE; } out: @@ -1833,8 +1837,7 @@ static const struct mipi_dsi_host_ops gen11_dsi_host_ops = { static void icl_dphy_param_init(struct intel_dsi *intel_dsi) { - struct drm_device *dev = intel_dsi->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_display *display = to_intel_display(&intel_dsi->base); struct intel_connector *connector = intel_dsi->attached_connector; struct mipi_config *mipi_config = connector->panel.vbt.dsi.config; u32 tlpx_ns; @@ -1858,7 +1861,7 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi) */ prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * 4, tlpx_ns); if (prepare_cnt > ICL_PREPARE_CNT_MAX) { - drm_dbg_kms(&dev_priv->drm, "prepare_cnt out of range (%d)\n", + drm_dbg_kms(display->drm, "prepare_cnt out of range (%d)\n", prepare_cnt); prepare_cnt = ICL_PREPARE_CNT_MAX; } @@ -1867,7 +1870,7 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi) clk_zero_cnt = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero - ths_prepare_ns, tlpx_ns); if (clk_zero_cnt > ICL_CLK_ZERO_CNT_MAX) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "clk_zero_cnt out of range (%d)\n", clk_zero_cnt); clk_zero_cnt = ICL_CLK_ZERO_CNT_MAX; } @@ -1875,7 +1878,7 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi) /* trail cnt in escape clocks*/ trail_cnt = DIV_ROUND_UP(tclk_trail_ns, tlpx_ns); if (trail_cnt > ICL_TRAIL_CNT_MAX) { - drm_dbg_kms(&dev_priv->drm, "trail_cnt out of range (%d)\n", + drm_dbg_kms(display->drm, "trail_cnt out of range (%d)\n", trail_cnt); trail_cnt = ICL_TRAIL_CNT_MAX; } @@ -1883,7 +1886,7 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi) /* tclk pre count in escape clocks */ tclk_pre_cnt = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns); if (tclk_pre_cnt > ICL_TCLK_PRE_CNT_MAX) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "tclk_pre_cnt out of range (%d)\n", tclk_pre_cnt); tclk_pre_cnt = ICL_TCLK_PRE_CNT_MAX; } @@ -1892,7 +1895,7 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi) hs_zero_cnt = DIV_ROUND_UP(mipi_config->ths_prepare_hszero - ths_prepare_ns, tlpx_ns); if (hs_zero_cnt > ICL_HS_ZERO_CNT_MAX) { - drm_dbg_kms(&dev_priv->drm, "hs_zero_cnt out of range (%d)\n", + drm_dbg_kms(display->drm, "hs_zero_cnt out of range (%d)\n", hs_zero_cnt); hs_zero_cnt = ICL_HS_ZERO_CNT_MAX; } @@ -1900,7 +1903,7 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi) /* hs exit zero cnt in escape clocks */ exit_zero_cnt = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns); if (exit_zero_cnt > ICL_EXIT_ZERO_CNT_MAX) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "exit_zero_cnt out of range (%d)\n", exit_zero_cnt); exit_zero_cnt = ICL_EXIT_ZERO_CNT_MAX; @@ -1942,10 +1945,9 @@ static void icl_dsi_add_properties(struct intel_connector *connector) fixed_mode->vdisplay); } -void icl_dsi_init(struct drm_i915_private *dev_priv, +void icl_dsi_init(struct intel_display *display, const struct intel_bios_encoder_data *devdata) { - struct intel_display *display = &dev_priv->display; struct intel_dsi *intel_dsi; struct intel_encoder *encoder; struct intel_connector *intel_connector; @@ -1973,7 +1975,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv, encoder->devdata = devdata; /* register DSI encoder with DRM subsystem */ - drm_encoder_init(&dev_priv->drm, &encoder->base, &gen11_dsi_encoder_funcs, + drm_encoder_init(display->drm, &encoder->base, + &gen11_dsi_encoder_funcs, DRM_MODE_ENCODER_DSI, "DSI %c", port_name(port)); encoder->pre_pll_enable = gen11_dsi_pre_pll_enable; @@ -1998,7 +2001,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv, encoder->shutdown = intel_dsi_shutdown; /* register DSI connector with DRM subsystem */ - drm_connector_init(&dev_priv->drm, connector, &gen11_dsi_connector_funcs, + drm_connector_init(display->drm, connector, + &gen11_dsi_connector_funcs, DRM_MODE_CONNECTOR_DSI); drm_connector_helper_add(connector, &gen11_dsi_connector_helper_funcs); connector->display_info.subpixel_order = SubPixelHorizontalRGB; @@ -2011,12 +2015,12 @@ void icl_dsi_init(struct drm_i915_private *dev_priv, intel_bios_init_panel_late(display, &intel_connector->panel, encoder->devdata, NULL); - mutex_lock(&dev_priv->drm.mode_config.mutex); + mutex_lock(&display->drm->mode_config.mutex); intel_panel_add_vbt_lfp_fixed_mode(intel_connector); - mutex_unlock(&dev_priv->drm.mode_config.mutex); + mutex_unlock(&display->drm->mode_config.mutex); if (!intel_panel_preferred_fixed_mode(intel_connector)) { - drm_err(&dev_priv->drm, "DSI fixed mode info missing\n"); + drm_err(display->drm, "DSI fixed mode info missing\n"); goto err; } @@ -2029,10 +2033,10 @@ void icl_dsi_init(struct drm_i915_private *dev_priv, else intel_dsi->ports = BIT(port); - if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports)) + if (drm_WARN_ON(display->drm, intel_connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports)) intel_connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports; - if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports)) + if (drm_WARN_ON(display->drm, intel_connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports)) intel_connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports; for_each_dsi_port(port, intel_dsi->ports) { @@ -2046,7 +2050,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv, } if (!intel_dsi_vbt_init(intel_dsi, MIPI_DSI_GENERIC_PANEL_ID)) { - drm_dbg_kms(&dev_priv->drm, "no device found\n"); + drm_dbg_kms(display->drm, "no device found\n"); goto err; } diff --git a/drivers/gpu/drm/i915/display/icl_dsi.h b/drivers/gpu/drm/i915/display/icl_dsi.h index 43fa7d72eeb18..099fc50e35b41 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.h +++ b/drivers/gpu/drm/i915/display/icl_dsi.h @@ -6,11 +6,11 @@ #ifndef __ICL_DSI_H__ #define __ICL_DSI_H__ -struct drm_i915_private; struct intel_bios_encoder_data; struct intel_crtc_state; +struct intel_display; -void icl_dsi_init(struct drm_i915_private *dev_priv, +void icl_dsi_init(struct intel_display *display, const struct intel_bios_encoder_data *devdata); void icl_dsi_frame_update(struct intel_crtc_state *crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c index e979786aa5cf3..5c2a7987cccb4 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c @@ -790,7 +790,8 @@ skl_next_plane_to_commit(struct intel_atomic_state *state, return NULL; } -void intel_plane_update_noarm(struct intel_plane *plane, +void intel_plane_update_noarm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -799,10 +800,11 @@ void intel_plane_update_noarm(struct intel_plane *plane, trace_intel_plane_update_noarm(plane, crtc); if (plane->update_noarm) - plane->update_noarm(plane, crtc_state, plane_state); + plane->update_noarm(dsb, plane, crtc_state, plane_state); } -void intel_plane_async_flip(struct intel_plane *plane, +void intel_plane_async_flip(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, bool async_flip) @@ -810,34 +812,37 @@ void intel_plane_async_flip(struct intel_plane *plane, struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); trace_intel_plane_async_flip(plane, crtc, async_flip); - plane->async_flip(plane, crtc_state, plane_state, async_flip); + plane->async_flip(dsb, plane, crtc_state, plane_state, async_flip); } -void intel_plane_update_arm(struct intel_plane *plane, +void intel_plane_update_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); if (crtc_state->do_async_flip && plane->async_flip) { - intel_plane_async_flip(plane, crtc_state, plane_state, true); + intel_plane_async_flip(dsb, plane, crtc_state, plane_state, true); return; } trace_intel_plane_update_arm(plane, crtc); - plane->update_arm(plane, crtc_state, plane_state); + plane->update_arm(dsb, plane, crtc_state, plane_state); } -void intel_plane_disable_arm(struct intel_plane *plane, +void intel_plane_disable_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); trace_intel_plane_disable_arm(plane, crtc); - plane->disable_arm(plane, crtc_state); + plane->disable_arm(dsb, plane, crtc_state); } -void intel_crtc_planes_update_noarm(struct intel_atomic_state *state, +void intel_crtc_planes_update_noarm(struct intel_dsb *dsb, + struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_crtc_state *new_crtc_state = @@ -862,11 +867,13 @@ void intel_crtc_planes_update_noarm(struct intel_atomic_state *state, /* TODO: for mailbox updates this should be skipped */ if (new_plane_state->uapi.visible || new_plane_state->planar_slave) - intel_plane_update_noarm(plane, new_crtc_state, new_plane_state); + intel_plane_update_noarm(dsb, plane, + new_crtc_state, new_plane_state); } } -static void skl_crtc_planes_update_arm(struct intel_atomic_state *state, +static void skl_crtc_planes_update_arm(struct intel_dsb *dsb, + struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_crtc_state *old_crtc_state = @@ -893,13 +900,14 @@ static void skl_crtc_planes_update_arm(struct intel_atomic_state *state, */ if (new_plane_state->uapi.visible || new_plane_state->planar_slave) - intel_plane_update_arm(plane, new_crtc_state, new_plane_state); + intel_plane_update_arm(dsb, plane, new_crtc_state, new_plane_state); else - intel_plane_disable_arm(plane, new_crtc_state); + intel_plane_disable_arm(dsb, plane, new_crtc_state); } } -static void i9xx_crtc_planes_update_arm(struct intel_atomic_state *state, +static void i9xx_crtc_planes_update_arm(struct intel_dsb *dsb, + struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_crtc_state *new_crtc_state = @@ -919,21 +927,22 @@ static void i9xx_crtc_planes_update_arm(struct intel_atomic_state *state, * would have to be called here as well. */ if (new_plane_state->uapi.visible) - intel_plane_update_arm(plane, new_crtc_state, new_plane_state); + intel_plane_update_arm(dsb, plane, new_crtc_state, new_plane_state); else - intel_plane_disable_arm(plane, new_crtc_state); + intel_plane_disable_arm(dsb, plane, new_crtc_state); } } -void intel_crtc_planes_update_arm(struct intel_atomic_state *state, +void intel_crtc_planes_update_arm(struct intel_dsb *dsb, + struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(state->base.dev); if (DISPLAY_VER(i915) >= 9) - skl_crtc_planes_update_arm(state, crtc); + skl_crtc_planes_update_arm(dsb, state, crtc); else - i9xx_crtc_planes_update_arm(state, crtc); + i9xx_crtc_planes_update_arm(dsb, state, crtc); } int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state, diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h index 6c4fe35964650..0f982f452ff39 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h @@ -14,6 +14,7 @@ struct drm_rect; struct intel_atomic_state; struct intel_crtc; struct intel_crtc_state; +struct intel_dsb; struct intel_plane; struct intel_plane_state; enum plane_id; @@ -32,26 +33,32 @@ void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state, struct intel_crtc *crtc); void intel_plane_copy_hw_state(struct intel_plane_state *plane_state, const struct intel_plane_state *from_plane_state); -void intel_plane_async_flip(struct intel_plane *plane, +void intel_plane_async_flip(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, bool async_flip); -void intel_plane_update_noarm(struct intel_plane *plane, +void intel_plane_update_noarm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); -void intel_plane_update_arm(struct intel_plane *plane, +void intel_plane_update_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); -void intel_plane_disable_arm(struct intel_plane *plane, +void intel_plane_disable_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state); struct intel_plane *intel_plane_alloc(void); void intel_plane_free(struct intel_plane *plane); struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane); void intel_plane_destroy_state(struct drm_plane *plane, struct drm_plane_state *state); -void intel_crtc_planes_update_noarm(struct intel_atomic_state *state, +void intel_crtc_planes_update_noarm(struct intel_dsb *dsb, + struct intel_atomic_state *state, struct intel_crtc *crtc); -void intel_crtc_planes_update_arm(struct intel_atomic_state *state, +void intel_crtc_planes_update_arm(struct intel_dsb *dsbx, + struct intel_atomic_state *state, struct intel_crtc *crtc); int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state, struct intel_crtc_state *crtc_state, diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index ec55cb651d449..1fbe3cd452c13 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -1912,6 +1912,23 @@ void intel_color_post_update(const struct intel_crtc_state *crtc_state) i915->display.funcs.color->color_post_update(crtc_state); } +void intel_color_modeset(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + + intel_color_load_luts(crtc_state); + intel_color_commit_noarm(crtc_state); + intel_color_commit_arm(crtc_state); + + if (DISPLAY_VER(display) < 9) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + + /* update DSPCNTR to configure gamma/csc for pipe bottom color */ + plane->disable_arm(NULL, plane, crtc_state); + } +} + void intel_color_prepare_commit(struct intel_atomic_state *state, struct intel_crtc *crtc) { diff --git a/drivers/gpu/drm/i915/display/intel_color.h b/drivers/gpu/drm/i915/display/intel_color.h index 79f230a1709ad..ab3aaec06a2ac 100644 --- a/drivers/gpu/drm/i915/display/intel_color.h +++ b/drivers/gpu/drm/i915/display/intel_color.h @@ -28,6 +28,7 @@ void intel_color_commit_noarm(const struct intel_crtc_state *crtc_state); void intel_color_commit_arm(const struct intel_crtc_state *crtc_state); void intel_color_post_update(const struct intel_crtc_state *crtc_state); void intel_color_load_luts(const struct intel_crtc_state *crtc_state); +void intel_color_modeset(const struct intel_crtc_state *crtc_state); void intel_color_get_config(struct intel_crtc_state *crtc_state); bool intel_color_lut_equal(const struct intel_crtc_state *crtc_state, const struct drm_property_blob *blob1, diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c index 6cbe81a2fa668..41e5565ff7166 100644 --- a/drivers/gpu/drm/i915/display/intel_cursor.c +++ b/drivers/gpu/drm/i915/display/intel_cursor.c @@ -275,7 +275,8 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state, } /* TODO: split into noarm+arm pair */ -static void i845_cursor_update_arm(struct intel_plane *plane, +static void i845_cursor_update_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -315,10 +316,11 @@ static void i845_cursor_update_arm(struct intel_plane *plane, } } -static void i845_cursor_disable_arm(struct intel_plane *plane, +static void i845_cursor_disable_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { - i845_cursor_update_arm(plane, crtc_state, NULL); + i845_cursor_update_arm(dsb, plane, crtc_state, NULL); } static bool i845_cursor_get_hw_state(struct intel_plane *plane, @@ -527,22 +529,25 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state, return 0; } -static void i9xx_cursor_disable_sel_fetch_arm(struct intel_plane *plane, +static void i9xx_cursor_disable_sel_fetch_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane->base.dev); enum pipe pipe = plane->pipe; if (!crtc_state->enable_psr2_sel_fetch) return; - intel_de_write_fw(dev_priv, SEL_FETCH_CUR_CTL(pipe), 0); + intel_de_write_dsb(display, dsb, SEL_FETCH_CUR_CTL(pipe), 0); } -static void wa_16021440873(struct intel_plane *plane, +static void wa_16021440873(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { + struct intel_display *display = to_intel_display(plane->base.dev); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); u32 ctl = plane_state->ctl; int et_y_position = drm_rect_height(&crtc_state->pipe_src) + 1; @@ -551,16 +556,18 @@ static void wa_16021440873(struct intel_plane *plane, ctl &= ~MCURSOR_MODE_MASK; ctl |= MCURSOR_MODE_64_2B; - intel_de_write_fw(dev_priv, SEL_FETCH_CUR_CTL(pipe), ctl); + intel_de_write_dsb(display, dsb, SEL_FETCH_CUR_CTL(pipe), ctl); - intel_de_write(dev_priv, CURPOS_ERLY_TPT(dev_priv, pipe), - CURSOR_POS_Y(et_y_position)); + intel_de_write_dsb(display, dsb, CURPOS_ERLY_TPT(dev_priv, pipe), + CURSOR_POS_Y(et_y_position)); } -static void i9xx_cursor_update_sel_fetch_arm(struct intel_plane *plane, +static void i9xx_cursor_update_sel_fetch_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { + struct intel_display *display = to_intel_display(plane->base.dev); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; @@ -571,19 +578,17 @@ static void i9xx_cursor_update_sel_fetch_arm(struct intel_plane *plane, if (crtc_state->enable_psr2_su_region_et) { u32 val = intel_cursor_position(crtc_state, plane_state, true); - intel_de_write_fw(dev_priv, - CURPOS_ERLY_TPT(dev_priv, pipe), - val); + + intel_de_write_dsb(display, dsb, CURPOS_ERLY_TPT(dev_priv, pipe), val); } - intel_de_write_fw(dev_priv, SEL_FETCH_CUR_CTL(pipe), - plane_state->ctl); + intel_de_write_dsb(display, dsb, SEL_FETCH_CUR_CTL(pipe), plane_state->ctl); } else { /* Wa_16021440873 */ if (crtc_state->enable_psr2_su_region_et) - wa_16021440873(plane, crtc_state, plane_state); + wa_16021440873(dsb, plane, crtc_state, plane_state); else - i9xx_cursor_disable_sel_fetch_arm(plane, crtc_state); + i9xx_cursor_disable_sel_fetch_arm(dsb, plane, crtc_state); } } @@ -610,9 +615,11 @@ static u32 skl_cursor_wm_reg_val(const struct skl_wm_level *level) return val; } -static void skl_write_cursor_wm(struct intel_plane *plane, +static void skl_write_cursor_wm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(plane->base.dev); struct drm_i915_private *i915 = to_i915(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; @@ -622,30 +629,32 @@ static void skl_write_cursor_wm(struct intel_plane *plane, int level; for (level = 0; level < i915->display.wm.num_levels; level++) - intel_de_write_fw(i915, CUR_WM(pipe, level), - skl_cursor_wm_reg_val(skl_plane_wm_level(pipe_wm, plane_id, level))); + intel_de_write_dsb(display, dsb, CUR_WM(pipe, level), + skl_cursor_wm_reg_val(skl_plane_wm_level(pipe_wm, plane_id, level))); - intel_de_write_fw(i915, CUR_WM_TRANS(pipe), - skl_cursor_wm_reg_val(skl_plane_trans_wm(pipe_wm, plane_id))); + intel_de_write_dsb(display, dsb, CUR_WM_TRANS(pipe), + skl_cursor_wm_reg_val(skl_plane_trans_wm(pipe_wm, plane_id))); if (HAS_HW_SAGV_WM(i915)) { const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id]; - intel_de_write_fw(i915, CUR_WM_SAGV(pipe), - skl_cursor_wm_reg_val(&wm->sagv.wm0)); - intel_de_write_fw(i915, CUR_WM_SAGV_TRANS(pipe), - skl_cursor_wm_reg_val(&wm->sagv.trans_wm)); + intel_de_write_dsb(display, dsb, CUR_WM_SAGV(pipe), + skl_cursor_wm_reg_val(&wm->sagv.wm0)); + intel_de_write_dsb(display, dsb, CUR_WM_SAGV_TRANS(pipe), + skl_cursor_wm_reg_val(&wm->sagv.trans_wm)); } - intel_de_write_fw(i915, CUR_BUF_CFG(pipe), - skl_cursor_ddb_reg_val(ddb)); + intel_de_write_dsb(display, dsb, CUR_BUF_CFG(pipe), + skl_cursor_ddb_reg_val(ddb)); } /* TODO: split into noarm+arm pair */ -static void i9xx_cursor_update_arm(struct intel_plane *plane, +static void i9xx_cursor_update_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { + struct intel_display *display = to_intel_display(plane->base.dev); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0; @@ -685,38 +694,36 @@ static void i9xx_cursor_update_arm(struct intel_plane *plane, */ if (DISPLAY_VER(dev_priv) >= 9) - skl_write_cursor_wm(plane, crtc_state); + skl_write_cursor_wm(dsb, plane, crtc_state); if (plane_state) - i9xx_cursor_update_sel_fetch_arm(plane, crtc_state, - plane_state); + i9xx_cursor_update_sel_fetch_arm(dsb, plane, crtc_state, plane_state); else - i9xx_cursor_disable_sel_fetch_arm(plane, crtc_state); + i9xx_cursor_disable_sel_fetch_arm(dsb, plane, crtc_state); if (plane->cursor.base != base || plane->cursor.size != fbc_ctl || plane->cursor.cntl != cntl) { if (HAS_CUR_FBC(dev_priv)) - intel_de_write_fw(dev_priv, - CUR_FBC_CTL(dev_priv, pipe), - fbc_ctl); - intel_de_write_fw(dev_priv, CURCNTR(dev_priv, pipe), cntl); - intel_de_write_fw(dev_priv, CURPOS(dev_priv, pipe), pos); - intel_de_write_fw(dev_priv, CURBASE(dev_priv, pipe), base); + intel_de_write_dsb(display, dsb, CUR_FBC_CTL(dev_priv, pipe), fbc_ctl); + intel_de_write_dsb(display, dsb, CURCNTR(dev_priv, pipe), cntl); + intel_de_write_dsb(display, dsb, CURPOS(dev_priv, pipe), pos); + intel_de_write_dsb(display, dsb, CURBASE(dev_priv, pipe), base); plane->cursor.base = base; plane->cursor.size = fbc_ctl; plane->cursor.cntl = cntl; } else { - intel_de_write_fw(dev_priv, CURPOS(dev_priv, pipe), pos); - intel_de_write_fw(dev_priv, CURBASE(dev_priv, pipe), base); + intel_de_write_dsb(display, dsb, CURPOS(dev_priv, pipe), pos); + intel_de_write_dsb(display, dsb, CURBASE(dev_priv, pipe), base); } } -static void i9xx_cursor_disable_arm(struct intel_plane *plane, +static void i9xx_cursor_disable_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { - i9xx_cursor_update_arm(plane, crtc_state, NULL); + i9xx_cursor_update_arm(dsb, plane, crtc_state, NULL); } static bool i9xx_cursor_get_hw_state(struct intel_plane *plane, @@ -907,10 +914,10 @@ intel_legacy_cursor_update(struct drm_plane *_plane, } if (new_plane_state->uapi.visible) { - intel_plane_update_noarm(plane, crtc_state, new_plane_state); - intel_plane_update_arm(plane, crtc_state, new_plane_state); + intel_plane_update_noarm(NULL, plane, crtc_state, new_plane_state); + intel_plane_update_arm(NULL, plane, crtc_state, new_plane_state); } else { - intel_plane_disable_arm(plane, crtc_state); + intel_plane_disable_arm(NULL, plane, crtc_state); } if (!IS_ENABLED(CONFIG_PREEMPT_RT)) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index b1c294236cc87..34dee523f0b61 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -3363,7 +3363,7 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state, intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(dev_priv, port), XELPDP_PORT_WIDTH_MASK | XELPDP_PORT_REVERSAL, port_buf); - buf_ctl |= DDI_PORT_WIDTH(lane_count); + buf_ctl |= DDI_PORT_WIDTH(crtc_state->lane_count); if (DISPLAY_VER(dev_priv) >= 20) buf_ctl |= XE2LPD_DDI_BUF_D2D_LINK_ENABLE; @@ -4888,7 +4888,7 @@ void intel_ddi_init(struct intel_display *display, if (!assert_has_icl_dsi(dev_priv)) return; - icl_dsi_init(dev_priv, devdata); + icl_dsi_init(display, devdata); return; } diff --git a/drivers/gpu/drm/i915/display/intel_de.h b/drivers/gpu/drm/i915/display/intel_de.h index e881bfeafb47d..e017cd4a81685 100644 --- a/drivers/gpu/drm/i915/display/intel_de.h +++ b/drivers/gpu/drm/i915/display/intel_de.h @@ -8,6 +8,7 @@ #include "i915_drv.h" #include "i915_trace.h" +#include "intel_dsb.h" #include "intel_uncore.h" static inline struct intel_uncore *__to_uncore(struct intel_display *display) @@ -233,4 +234,14 @@ __intel_de_write_notrace(struct intel_display *display, i915_reg_t reg, } #define intel_de_write_notrace(p,...) __intel_de_write_notrace(__to_intel_display(p), __VA_ARGS__) +static __always_inline void +intel_de_write_dsb(struct intel_display *display, struct intel_dsb *dsb, + i915_reg_t reg, u32 val) +{ + if (dsb) + intel_dsb_reg_write(dsb, reg, val); + else + intel_de_write_fw(display, reg, val); +} + #endif /* __INTEL_DE_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index b4ef4d59da1ac..4f8899cd125d9 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -135,7 +135,8 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state); static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state); static void hsw_set_transconf(const struct intel_crtc_state *crtc_state); -static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state); +static void bdw_set_pipe_misc(struct intel_dsb *dsb, + const struct intel_crtc_state *crtc_state); /* returns HPLL frequency in kHz */ int vlv_get_hpll_vco(struct drm_i915_private *dev_priv) @@ -715,7 +716,7 @@ void intel_plane_disable_noatomic(struct intel_crtc *crtc, if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes) intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); - intel_plane_disable_arm(plane, crtc_state); + intel_plane_disable_arm(NULL, plane, crtc_state); intel_crtc_wait_for_next_vblank(crtc); } @@ -1005,7 +1006,9 @@ static bool vrr_params_changed(const struct intel_crtc_state *old_crtc_state, old_crtc_state->vrr.vmin != new_crtc_state->vrr.vmin || old_crtc_state->vrr.vmax != new_crtc_state->vrr.vmax || old_crtc_state->vrr.guardband != new_crtc_state->vrr.guardband || - old_crtc_state->vrr.pipeline_full != new_crtc_state->vrr.pipeline_full; + old_crtc_state->vrr.pipeline_full != new_crtc_state->vrr.pipeline_full || + old_crtc_state->vrr.vsync_start != new_crtc_state->vrr.vsync_start || + old_crtc_state->vrr.vsync_end != new_crtc_state->vrr.vsync_end; } static bool cmrr_params_changed(const struct intel_crtc_state *old_crtc_state, @@ -1172,8 +1175,8 @@ static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state, * Apart from the async flip bit we want to * preserve the old state for the plane. */ - intel_plane_async_flip(plane, old_crtc_state, - old_plane_state, false); + intel_plane_async_flip(NULL, plane, + old_crtc_state, old_plane_state, false); need_vbl_wait = true; } } @@ -1315,7 +1318,7 @@ static void intel_crtc_disable_planes(struct intel_atomic_state *state, !(update_mask & BIT(plane->id))) continue; - intel_plane_disable_arm(plane, new_crtc_state); + intel_plane_disable_arm(NULL, plane, new_crtc_state); if (old_plane_state->uapi.visible) fb_bits |= plane->frontbuffer_bit; @@ -1502,14 +1505,6 @@ static void intel_encoders_update_pipe(struct intel_atomic_state *state, } } -static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct intel_plane *plane = to_intel_plane(crtc->base.primary); - - plane->disable_arm(plane, crtc_state); -} - static void ilk_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); @@ -1575,11 +1570,7 @@ static void ilk_crtc_enable(struct intel_atomic_state *state, * On ILK+ LUT must be loaded before the pipe is running but with * clocks enabled */ - intel_color_load_luts(new_crtc_state); - intel_color_commit_noarm(new_crtc_state); - intel_color_commit_arm(new_crtc_state); - /* update DSPCNTR to configure gamma for pipe bottom color */ - intel_disable_primary_plane(new_crtc_state); + intel_color_modeset(new_crtc_state); intel_initial_watermarks(state, crtc); intel_enable_transcoder(new_crtc_state); @@ -1716,7 +1707,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state, intel_set_pipe_src_size(pipe_crtc_state); if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) - bdw_set_pipe_misc(pipe_crtc_state); + bdw_set_pipe_misc(NULL, pipe_crtc_state); } if (!transcoder_is_dsi(cpu_transcoder)) @@ -1741,12 +1732,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state, * On ILK+ LUT must be loaded before the pipe is running but with * clocks enabled */ - intel_color_load_luts(pipe_crtc_state); - intel_color_commit_noarm(pipe_crtc_state); - intel_color_commit_arm(pipe_crtc_state); - /* update DSPCNTR to configure gamma/csc for pipe bottom color */ - if (DISPLAY_VER(dev_priv) < 9) - intel_disable_primary_plane(pipe_crtc_state); + intel_color_modeset(pipe_crtc_state); hsw_set_linetime_wm(pipe_crtc_state); @@ -2147,11 +2133,7 @@ static void valleyview_crtc_enable(struct intel_atomic_state *state, i9xx_pfit_enable(new_crtc_state); - intel_color_load_luts(new_crtc_state); - intel_color_commit_noarm(new_crtc_state); - intel_color_commit_arm(new_crtc_state); - /* update DSPCNTR to configure gamma for pipe bottom color */ - intel_disable_primary_plane(new_crtc_state); + intel_color_modeset(new_crtc_state); intel_initial_watermarks(state, crtc); intel_enable_transcoder(new_crtc_state); @@ -2187,11 +2169,7 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state, i9xx_pfit_enable(new_crtc_state); - intel_color_load_luts(new_crtc_state); - intel_color_commit_noarm(new_crtc_state); - intel_color_commit_arm(new_crtc_state); - /* update DSPCNTR to configure gamma for pipe bottom color */ - intel_disable_primary_plane(new_crtc_state); + intel_color_modeset(new_crtc_state); if (!intel_initial_watermarks(state, crtc)) intel_update_watermarks(dev_priv); @@ -3246,9 +3224,11 @@ static void hsw_set_transconf(const struct intel_crtc_state *crtc_state) intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder)); } -static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state) +static void bdw_set_pipe_misc(struct intel_dsb *dsb, + const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct intel_display *display = to_intel_display(crtc->base.dev); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 val = 0; @@ -3293,7 +3273,7 @@ static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state) if (IS_BROADWELL(dev_priv)) val |= PIPE_MISC_PSR_MASK_SPRITE_ENABLE; - intel_de_write(dev_priv, PIPE_MISC(crtc->pipe), val); + intel_de_write_dsb(display, dsb, PIPE_MISC(crtc->pipe), val); } int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc) @@ -6369,12 +6349,30 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in static int intel_joiner_add_affected_crtcs(struct intel_atomic_state *state) { struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_plane_state *plane_state; struct intel_crtc_state *crtc_state; + struct intel_plane *plane; struct intel_crtc *crtc; u8 affected_pipes = 0; u8 modeset_pipes = 0; int i; + /* + * Any plane which is in use by the joiner needs its crtc. + * Pull those in first as this will not have happened yet + * if the plane remains disabled according to uapi. + */ + for_each_new_intel_plane_in_state(state, plane, plane_state, i) { + crtc = to_intel_crtc(plane_state->hw.crtc); + if (!crtc) + continue; + + crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + } + + /* Now pull in all joined crtcs */ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { affected_pipes |= crtc_state->joiner_pipes; if (intel_crtc_needs_modeset(crtc_state)) @@ -6828,7 +6826,7 @@ static void commit_pipe_pre_planes(struct intel_atomic_state *state, intel_color_commit_arm(new_crtc_state); if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) - bdw_set_pipe_misc(new_crtc_state); + bdw_set_pipe_misc(NULL, new_crtc_state); if (intel_crtc_needs_fastset(new_crtc_state)) intel_pipe_fastset(old_crtc_state, new_crtc_state); @@ -6928,7 +6926,7 @@ static void intel_pre_update_crtc(struct intel_atomic_state *state, intel_crtc_needs_color_update(new_crtc_state)) intel_color_commit_noarm(new_crtc_state); - intel_crtc_planes_update_noarm(state, crtc); + intel_crtc_planes_update_noarm(NULL, state, crtc); } static void intel_update_crtc(struct intel_atomic_state *state, @@ -6944,7 +6942,7 @@ static void intel_update_crtc(struct intel_atomic_state *state, commit_pipe_pre_planes(state, crtc); - intel_crtc_planes_update_arm(state, crtc); + intel_crtc_planes_update_arm(NULL, state, crtc); commit_pipe_post_planes(state, crtc); @@ -7442,9 +7440,6 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) /* Now enable the clocks, plane, pipe, and connectors that we set up. */ dev_priv->display.funcs.display->commit_modeset_enables(state); - if (state->modeset) - intel_set_cdclk_post_plane_update(state); - intel_wait_for_vblank_workers(state); /* FIXME: We should call drm_atomic_helper_commit_hw_done() here @@ -7525,6 +7520,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) intel_verify_planes(state); intel_sagv_post_plane_update(state); + if (state->modeset) + intel_set_cdclk_post_plane_update(state); intel_pmdemand_post_plane_update(state); drm_atomic_helper_commit_hw_done(&state->base); diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index f29e5dc3db910..3e24d2e90d3cf 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -1036,6 +1036,10 @@ struct intel_csc_matrix { u16 postoff[3]; }; +void intel_io_mmio_fw_write(void *ctx, i915_reg_t reg, u32 val); + +typedef void (*intel_io_reg_write)(void *ctx, i915_reg_t reg, u32 val); + struct intel_crtc_state { /* * uapi (drm) state. This is the software state shown to userspace. @@ -1578,22 +1582,26 @@ struct intel_plane { u32 pixel_format, u64 modifier, unsigned int rotation); /* Write all non-self arming plane registers */ - void (*update_noarm)(struct intel_plane *plane, + void (*update_noarm)(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); /* Write all self-arming plane registers */ - void (*update_arm)(struct intel_plane *plane, + void (*update_arm)(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); /* Disable the plane, must arm */ - void (*disable_arm)(struct intel_plane *plane, + void (*disable_arm)(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state); bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe); int (*check_plane)(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state); int (*min_cdclk)(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); - void (*async_flip)(struct intel_plane *plane, + void (*async_flip)(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, bool async_flip); diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index 40bedc31d6bf2..5d8f93d4cdc6a 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -1561,7 +1561,7 @@ intel_dp_128b132b_link_train(struct intel_dp *intel_dp, if (wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) { lt_err(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clear\n"); - return false; + goto out; } if (intel_dp_128b132b_lane_eq(intel_dp, crtc_state) && @@ -1573,6 +1573,19 @@ intel_dp_128b132b_link_train(struct intel_dp *intel_dp, passed ? "passed" : "failed", crtc_state->port_clock, crtc_state->lane_count); +out: + /* + * Ensure that the training pattern does get set to TPS2 even in case + * of a failure, as is the case at the end of a passing link training + * and what is expected by the transcoder. Leaving TPS1 set (and + * disabling the link train mode in DP_TP_CTL later from TPS1 directly) + * would result in a stuck transcoder HW state and flip-done timeouts + * later in the modeset sequence. + */ + if (!passed) + intel_dp_program_link_training_pattern(intel_dp, crtc_state, + DP_PHY_DPRX, DP_TRAINING_PATTERN_2); + return passed; } diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index e657b09ede999..e6fadcef58e06 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -378,7 +378,8 @@ static void vlv_sprite_update_gamma(const struct intel_plane_state *plane_state) } static void -vlv_sprite_update_noarm(struct intel_plane *plane, +vlv_sprite_update_noarm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -399,7 +400,8 @@ vlv_sprite_update_noarm(struct intel_plane *plane, } static void -vlv_sprite_update_arm(struct intel_plane *plane, +vlv_sprite_update_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -449,7 +451,8 @@ vlv_sprite_update_arm(struct intel_plane *plane, } static void -vlv_sprite_disable_arm(struct intel_plane *plane, +vlv_sprite_disable_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(plane->base.dev); @@ -795,7 +798,8 @@ static void ivb_sprite_update_gamma(const struct intel_plane_state *plane_state) } static void -ivb_sprite_update_noarm(struct intel_plane *plane, +ivb_sprite_update_noarm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -826,7 +830,8 @@ ivb_sprite_update_noarm(struct intel_plane *plane, } static void -ivb_sprite_update_arm(struct intel_plane *plane, +ivb_sprite_update_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -874,7 +879,8 @@ ivb_sprite_update_arm(struct intel_plane *plane, } static void -ivb_sprite_disable_arm(struct intel_plane *plane, +ivb_sprite_disable_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(plane->base.dev); @@ -1133,7 +1139,8 @@ static void ilk_sprite_update_gamma(const struct intel_plane_state *plane_state) } static void -g4x_sprite_update_noarm(struct intel_plane *plane, +g4x_sprite_update_noarm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -1162,7 +1169,8 @@ g4x_sprite_update_noarm(struct intel_plane *plane, } static void -g4x_sprite_update_arm(struct intel_plane *plane, +g4x_sprite_update_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -1206,7 +1214,8 @@ g4x_sprite_update_arm(struct intel_plane *plane, } static void -g4x_sprite_disable_arm(struct intel_plane *plane, +g4x_sprite_disable_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(plane->base.dev); diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c index 62a5287ea1d9c..7f77a76309bd5 100644 --- a/drivers/gpu/drm/i915/display/skl_universal_plane.c +++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c @@ -589,11 +589,11 @@ static u32 skl_plane_min_alignment(struct intel_plane *plane, * in full-range YCbCr. */ static void -icl_program_input_csc(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, +icl_program_input_csc(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_plane_state *plane_state) { - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane->base.dev); enum pipe pipe = plane->pipe; enum plane_id plane_id = plane->id; @@ -637,31 +637,31 @@ icl_program_input_csc(struct intel_plane *plane, }; const u16 *csc = input_csc_matrix[plane_state->hw.color_encoding]; - intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0), - ROFF(csc[0]) | GOFF(csc[1])); - intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 1), - BOFF(csc[2])); - intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 2), - ROFF(csc[3]) | GOFF(csc[4])); - intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 3), - BOFF(csc[5])); - intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 4), - ROFF(csc[6]) | GOFF(csc[7])); - intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 5), - BOFF(csc[8])); - - intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0), - PREOFF_YUV_TO_RGB_HI); - intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), - PREOFF_YUV_TO_RGB_ME); - intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2), - PREOFF_YUV_TO_RGB_LO); - intel_de_write_fw(dev_priv, - PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0); - intel_de_write_fw(dev_priv, - PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 1), 0x0); - intel_de_write_fw(dev_priv, - PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 2), 0x0); + intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0), + ROFF(csc[0]) | GOFF(csc[1])); + intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 1), + BOFF(csc[2])); + intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 2), + ROFF(csc[3]) | GOFF(csc[4])); + intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 3), + BOFF(csc[5])); + intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 4), + ROFF(csc[6]) | GOFF(csc[7])); + intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 5), + BOFF(csc[8])); + + intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0), + PREOFF_YUV_TO_RGB_HI); + intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), + PREOFF_YUV_TO_RGB_ME); + intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2), + PREOFF_YUV_TO_RGB_LO); + intel_de_write_dsb(display, dsb, + PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0); + intel_de_write_dsb(display, dsb, + PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 1), 0x0); + intel_de_write_dsb(display, dsb, + PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 2), 0x0); } static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb, @@ -715,9 +715,11 @@ static u32 skl_plane_wm_reg_val(const struct skl_wm_level *level) return val; } -static void skl_write_plane_wm(struct intel_plane *plane, +static void skl_write_plane_wm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(plane->base.dev); struct drm_i915_private *i915 = to_i915(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; @@ -729,71 +731,75 @@ static void skl_write_plane_wm(struct intel_plane *plane, int level; for (level = 0; level < i915->display.wm.num_levels; level++) - intel_de_write_fw(i915, PLANE_WM(pipe, plane_id, level), - skl_plane_wm_reg_val(skl_plane_wm_level(pipe_wm, plane_id, level))); + intel_de_write_dsb(display, dsb, PLANE_WM(pipe, plane_id, level), + skl_plane_wm_reg_val(skl_plane_wm_level(pipe_wm, plane_id, level))); - intel_de_write_fw(i915, PLANE_WM_TRANS(pipe, plane_id), - skl_plane_wm_reg_val(skl_plane_trans_wm(pipe_wm, plane_id))); + intel_de_write_dsb(display, dsb, PLANE_WM_TRANS(pipe, plane_id), + skl_plane_wm_reg_val(skl_plane_trans_wm(pipe_wm, plane_id))); if (HAS_HW_SAGV_WM(i915)) { const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id]; - intel_de_write_fw(i915, PLANE_WM_SAGV(pipe, plane_id), - skl_plane_wm_reg_val(&wm->sagv.wm0)); - intel_de_write_fw(i915, PLANE_WM_SAGV_TRANS(pipe, plane_id), - skl_plane_wm_reg_val(&wm->sagv.trans_wm)); + intel_de_write_dsb(display, dsb, PLANE_WM_SAGV(pipe, plane_id), + skl_plane_wm_reg_val(&wm->sagv.wm0)); + intel_de_write_dsb(display, dsb, PLANE_WM_SAGV_TRANS(pipe, plane_id), + skl_plane_wm_reg_val(&wm->sagv.trans_wm)); } - intel_de_write_fw(i915, PLANE_BUF_CFG(pipe, plane_id), - skl_plane_ddb_reg_val(ddb)); + intel_de_write_dsb(display, dsb, PLANE_BUF_CFG(pipe, plane_id), + skl_plane_ddb_reg_val(ddb)); if (DISPLAY_VER(i915) < 11) - intel_de_write_fw(i915, PLANE_NV12_BUF_CFG(pipe, plane_id), - skl_plane_ddb_reg_val(ddb_y)); + intel_de_write_dsb(display, dsb, PLANE_NV12_BUF_CFG(pipe, plane_id), + skl_plane_ddb_reg_val(ddb_y)); } static void -skl_plane_disable_arm(struct intel_plane *plane, +skl_plane_disable_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; - skl_write_plane_wm(plane, crtc_state); + skl_write_plane_wm(dsb, plane, crtc_state); - intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), 0); - intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0); + intel_de_write_dsb(display, dsb, PLANE_CTL(pipe, plane_id), 0); + intel_de_write_dsb(display, dsb, PLANE_SURF(pipe, plane_id), 0); } -static void icl_plane_disable_sel_fetch_arm(struct intel_plane *plane, +static void icl_plane_disable_sel_fetch_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane->base.dev); enum pipe pipe = plane->pipe; if (!crtc_state->enable_psr2_sel_fetch) return; - intel_de_write_fw(i915, SEL_FETCH_PLANE_CTL(pipe, plane->id), 0); + intel_de_write_dsb(display, dsb, SEL_FETCH_PLANE_CTL(pipe, plane->id), 0); } static void -icl_plane_disable_arm(struct intel_plane *plane, +icl_plane_disable_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(plane->base.dev); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; if (icl_is_hdr_plane(dev_priv, plane_id)) - intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id), 0); + intel_de_write_dsb(display, dsb, PLANE_CUS_CTL(pipe, plane_id), 0); - skl_write_plane_wm(plane, crtc_state); + skl_write_plane_wm(dsb, plane, crtc_state); - icl_plane_disable_sel_fetch_arm(plane, crtc_state); - intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), 0); - intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0); + icl_plane_disable_sel_fetch_arm(dsb, plane, crtc_state); + intel_de_write_dsb(display, dsb, PLANE_CTL(pipe, plane_id), 0); + intel_de_write_dsb(display, dsb, PLANE_SURF(pipe, plane_id), 0); } static bool @@ -1230,28 +1236,30 @@ static u32 skl_plane_keymsk(const struct intel_plane_state *plane_state) return keymsk; } -static void icl_plane_csc_load_black(struct intel_plane *plane) +static void icl_plane_csc_load_black(struct intel_dsb *dsb, + struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; - intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 0), 0); - intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 1), 0); + intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane_id, 0), 0); + intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane_id, 1), 0); - intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 2), 0); - intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 3), 0); + intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane_id, 2), 0); + intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane_id, 3), 0); - intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 4), 0); - intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 5), 0); + intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane_id, 4), 0); + intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane_id, 5), 0); - intel_de_write_fw(i915, PLANE_CSC_PREOFF(pipe, plane_id, 0), 0); - intel_de_write_fw(i915, PLANE_CSC_PREOFF(pipe, plane_id, 1), 0); - intel_de_write_fw(i915, PLANE_CSC_PREOFF(pipe, plane_id, 2), 0); + intel_de_write_dsb(display, dsb, PLANE_CSC_PREOFF(pipe, plane_id, 0), 0); + intel_de_write_dsb(display, dsb, PLANE_CSC_PREOFF(pipe, plane_id, 1), 0); + intel_de_write_dsb(display, dsb, PLANE_CSC_PREOFF(pipe, plane_id, 2), 0); - intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 0), 0); - intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 1), 0); - intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 2), 0); + intel_de_write_dsb(display, dsb, PLANE_CSC_POSTOFF(pipe, plane_id, 0), 0); + intel_de_write_dsb(display, dsb, PLANE_CSC_POSTOFF(pipe, plane_id, 1), 0); + intel_de_write_dsb(display, dsb, PLANE_CSC_POSTOFF(pipe, plane_id, 2), 0); } static int icl_plane_color_plane(const struct intel_plane_state *plane_state) @@ -1264,11 +1272,12 @@ static int icl_plane_color_plane(const struct intel_plane_state *plane_state) } static void -skl_plane_update_noarm(struct intel_plane *plane, +skl_plane_update_noarm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; u32 stride = skl_plane_stride(plane_state, 0); @@ -1283,21 +1292,23 @@ skl_plane_update_noarm(struct intel_plane *plane, crtc_y = 0; } - intel_de_write_fw(dev_priv, PLANE_STRIDE(pipe, plane_id), - PLANE_STRIDE_(stride)); - intel_de_write_fw(dev_priv, PLANE_POS(pipe, plane_id), - PLANE_POS_Y(crtc_y) | PLANE_POS_X(crtc_x)); - intel_de_write_fw(dev_priv, PLANE_SIZE(pipe, plane_id), - PLANE_HEIGHT(src_h - 1) | PLANE_WIDTH(src_w - 1)); + intel_de_write_dsb(display, dsb, PLANE_STRIDE(pipe, plane_id), + PLANE_STRIDE_(stride)); + intel_de_write_dsb(display, dsb, PLANE_POS(pipe, plane_id), + PLANE_POS_Y(crtc_y) | PLANE_POS_X(crtc_x)); + intel_de_write_dsb(display, dsb, PLANE_SIZE(pipe, plane_id), + PLANE_HEIGHT(src_h - 1) | PLANE_WIDTH(src_w - 1)); - skl_write_plane_wm(plane, crtc_state); + skl_write_plane_wm(dsb, plane, crtc_state); } static void -skl_plane_update_arm(struct intel_plane *plane, +skl_plane_update_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { + struct intel_display *display = to_intel_display(plane->base.dev); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; @@ -1317,22 +1328,26 @@ skl_plane_update_arm(struct intel_plane *plane, plane_color_ctl = plane_state->color_ctl | glk_plane_color_ctl_crtc(crtc_state); - intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id), skl_plane_keyval(plane_state)); - intel_de_write_fw(dev_priv, PLANE_KEYMSK(pipe, plane_id), skl_plane_keymsk(plane_state)); - intel_de_write_fw(dev_priv, PLANE_KEYMAX(pipe, plane_id), skl_plane_keymax(plane_state)); + intel_de_write_dsb(display, dsb, PLANE_KEYVAL(pipe, plane_id), + skl_plane_keyval(plane_state)); + intel_de_write_dsb(display, dsb, PLANE_KEYMSK(pipe, plane_id), + skl_plane_keymsk(plane_state)); + intel_de_write_dsb(display, dsb, PLANE_KEYMAX(pipe, plane_id), + skl_plane_keymax(plane_state)); - intel_de_write_fw(dev_priv, PLANE_OFFSET(pipe, plane_id), - PLANE_OFFSET_Y(y) | PLANE_OFFSET_X(x)); + intel_de_write_dsb(display, dsb, PLANE_OFFSET(pipe, plane_id), + PLANE_OFFSET_Y(y) | PLANE_OFFSET_X(x)); - intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id), - skl_plane_aux_dist(plane_state, 0)); + intel_de_write_dsb(display, dsb, PLANE_AUX_DIST(pipe, plane_id), + skl_plane_aux_dist(plane_state, 0)); - intel_de_write_fw(dev_priv, PLANE_AUX_OFFSET(pipe, plane_id), - PLANE_OFFSET_Y(plane_state->view.color_plane[1].y) | - PLANE_OFFSET_X(plane_state->view.color_plane[1].x)); + intel_de_write_dsb(display, dsb, PLANE_AUX_OFFSET(pipe, plane_id), + PLANE_OFFSET_Y(plane_state->view.color_plane[1].y) | + PLANE_OFFSET_X(plane_state->view.color_plane[1].x)); if (DISPLAY_VER(dev_priv) >= 10) - intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl); + intel_de_write_dsb(display, dsb, PLANE_COLOR_CTL(pipe, plane_id), + plane_color_ctl); /* * Enable the scaler before the plane so that we don't @@ -1349,17 +1364,19 @@ skl_plane_update_arm(struct intel_plane *plane, * disabled. Try to make the plane enable atomic by writing * the control register just before the surface register. */ - intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl); - intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), - skl_plane_surf(plane_state, 0)); + intel_de_write_dsb(display, dsb, PLANE_CTL(pipe, plane_id), + plane_ctl); + intel_de_write_dsb(display, dsb, PLANE_SURF(pipe, plane_id), + skl_plane_surf(plane_state, 0)); } -static void icl_plane_update_sel_fetch_noarm(struct intel_plane *plane, +static void icl_plane_update_sel_fetch_noarm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, int color_plane) { - struct drm_i915_private *i915 = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane->base.dev); enum pipe pipe = plane->pipe; const struct drm_rect *clip; u32 val; @@ -1376,7 +1393,7 @@ static void icl_plane_update_sel_fetch_noarm(struct intel_plane *plane, y = (clip->y1 + plane_state->uapi.dst.y1); val = y << 16; val |= plane_state->uapi.dst.x1; - intel_de_write_fw(i915, SEL_FETCH_PLANE_POS(pipe, plane->id), val); + intel_de_write_dsb(display, dsb, SEL_FETCH_PLANE_POS(pipe, plane->id), val); x = plane_state->view.color_plane[color_plane].x; @@ -1391,20 +1408,21 @@ static void icl_plane_update_sel_fetch_noarm(struct intel_plane *plane, val = y << 16 | x; - intel_de_write_fw(i915, SEL_FETCH_PLANE_OFFSET(pipe, plane->id), - val); + intel_de_write_dsb(display, dsb, SEL_FETCH_PLANE_OFFSET(pipe, plane->id), val); /* Sizes are 0 based */ val = (drm_rect_height(clip) - 1) << 16; val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1; - intel_de_write_fw(i915, SEL_FETCH_PLANE_SIZE(pipe, plane->id), val); + intel_de_write_dsb(display, dsb, SEL_FETCH_PLANE_SIZE(pipe, plane->id), val); } static void -icl_plane_update_noarm(struct intel_plane *plane, +icl_plane_update_noarm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { + struct intel_display *display = to_intel_display(plane->base.dev); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; @@ -1428,76 +1446,82 @@ icl_plane_update_noarm(struct intel_plane *plane, crtc_y = 0; } - intel_de_write_fw(dev_priv, PLANE_STRIDE(pipe, plane_id), - PLANE_STRIDE_(stride)); - intel_de_write_fw(dev_priv, PLANE_POS(pipe, plane_id), - PLANE_POS_Y(crtc_y) | PLANE_POS_X(crtc_x)); - intel_de_write_fw(dev_priv, PLANE_SIZE(pipe, plane_id), - PLANE_HEIGHT(src_h - 1) | PLANE_WIDTH(src_w - 1)); + intel_de_write_dsb(display, dsb, PLANE_STRIDE(pipe, plane_id), + PLANE_STRIDE_(stride)); + intel_de_write_dsb(display, dsb, PLANE_POS(pipe, plane_id), + PLANE_POS_Y(crtc_y) | PLANE_POS_X(crtc_x)); + intel_de_write_dsb(display, dsb, PLANE_SIZE(pipe, plane_id), + PLANE_HEIGHT(src_h - 1) | PLANE_WIDTH(src_w - 1)); - intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id), skl_plane_keyval(plane_state)); - intel_de_write_fw(dev_priv, PLANE_KEYMSK(pipe, plane_id), skl_plane_keymsk(plane_state)); - intel_de_write_fw(dev_priv, PLANE_KEYMAX(pipe, plane_id), skl_plane_keymax(plane_state)); + intel_de_write_dsb(display, dsb, PLANE_KEYVAL(pipe, plane_id), + skl_plane_keyval(plane_state)); + intel_de_write_dsb(display, dsb, PLANE_KEYMSK(pipe, plane_id), + skl_plane_keymsk(plane_state)); + intel_de_write_dsb(display, dsb, PLANE_KEYMAX(pipe, plane_id), + skl_plane_keymax(plane_state)); - intel_de_write_fw(dev_priv, PLANE_OFFSET(pipe, plane_id), - PLANE_OFFSET_Y(y) | PLANE_OFFSET_X(x)); + intel_de_write_dsb(display, dsb, PLANE_OFFSET(pipe, plane_id), + PLANE_OFFSET_Y(y) | PLANE_OFFSET_X(x)); if (intel_fb_is_rc_ccs_cc_modifier(fb->modifier)) { - intel_de_write_fw(dev_priv, PLANE_CC_VAL(pipe, plane_id, 0), - lower_32_bits(plane_state->ccval)); - intel_de_write_fw(dev_priv, PLANE_CC_VAL(pipe, plane_id, 1), - upper_32_bits(plane_state->ccval)); + intel_de_write_dsb(display, dsb, PLANE_CC_VAL(pipe, plane_id, 0), + lower_32_bits(plane_state->ccval)); + intel_de_write_dsb(display, dsb, PLANE_CC_VAL(pipe, plane_id, 1), + upper_32_bits(plane_state->ccval)); } /* FLAT CCS doesn't need to program AUX_DIST */ if (!HAS_FLAT_CCS(dev_priv) && DISPLAY_VER(dev_priv) < 20) - intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id), - skl_plane_aux_dist(plane_state, color_plane)); + intel_de_write_dsb(display, dsb, PLANE_AUX_DIST(pipe, plane_id), + skl_plane_aux_dist(plane_state, color_plane)); if (icl_is_hdr_plane(dev_priv, plane_id)) - intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id), - plane_state->cus_ctl); + intel_de_write_dsb(display, dsb, PLANE_CUS_CTL(pipe, plane_id), + plane_state->cus_ctl); - intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl); + intel_de_write_dsb(display, dsb, PLANE_COLOR_CTL(pipe, plane_id), + plane_color_ctl); if (fb->format->is_yuv && icl_is_hdr_plane(dev_priv, plane_id)) - icl_program_input_csc(plane, crtc_state, plane_state); + icl_program_input_csc(dsb, plane, plane_state); - skl_write_plane_wm(plane, crtc_state); + skl_write_plane_wm(dsb, plane, crtc_state); /* * FIXME: pxp session invalidation can hit any time even at time of commit * or after the commit, display content will be garbage. */ if (plane_state->force_black) - icl_plane_csc_load_black(plane); + icl_plane_csc_load_black(dsb, plane, crtc_state); - icl_plane_update_sel_fetch_noarm(plane, crtc_state, plane_state, color_plane); + icl_plane_update_sel_fetch_noarm(dsb, plane, crtc_state, plane_state, color_plane); } -static void icl_plane_update_sel_fetch_arm(struct intel_plane *plane, +static void icl_plane_update_sel_fetch_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { - struct drm_i915_private *i915 = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane->base.dev); enum pipe pipe = plane->pipe; if (!crtc_state->enable_psr2_sel_fetch) return; if (drm_rect_height(&plane_state->psr2_sel_fetch_area) > 0) - intel_de_write_fw(i915, SEL_FETCH_PLANE_CTL(pipe, plane->id), - SEL_FETCH_PLANE_CTL_ENABLE); + intel_de_write_dsb(display, dsb, SEL_FETCH_PLANE_CTL(pipe, plane->id), + SEL_FETCH_PLANE_CTL_ENABLE); else - icl_plane_disable_sel_fetch_arm(plane, crtc_state); + icl_plane_disable_sel_fetch_arm(dsb, plane, crtc_state); } static void -icl_plane_update_arm(struct intel_plane *plane, +icl_plane_update_arm(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; int color_plane = icl_plane_color_plane(plane_state); @@ -1516,25 +1540,27 @@ icl_plane_update_arm(struct intel_plane *plane, if (plane_state->scaler_id >= 0) skl_program_plane_scaler(plane, crtc_state, plane_state); - icl_plane_update_sel_fetch_arm(plane, crtc_state, plane_state); + icl_plane_update_sel_fetch_arm(dsb, plane, crtc_state, plane_state); /* * The control register self-arms if the plane was previously * disabled. Try to make the plane enable atomic by writing * the control register just before the surface register. */ - intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl); - intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), - skl_plane_surf(plane_state, color_plane)); + intel_de_write_dsb(display, dsb, PLANE_CTL(pipe, plane_id), + plane_ctl); + intel_de_write_dsb(display, dsb, PLANE_SURF(pipe, plane_id), + skl_plane_surf(plane_state, color_plane)); } static void -skl_plane_async_flip(struct intel_plane *plane, +skl_plane_async_flip(struct intel_dsb *dsb, + struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, bool async_flip) { - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + struct intel_display *display = to_intel_display(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; u32 plane_ctl = plane_state->ctl; @@ -1544,9 +1570,10 @@ skl_plane_async_flip(struct intel_plane *plane, if (async_flip) plane_ctl |= PLANE_CTL_ASYNC_FLIP; - intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl); - intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), - skl_plane_surf(plane_state, 0)); + intel_de_write_dsb(display, dsb, PLANE_CTL(pipe, plane_id), + plane_ctl); + intel_de_write_dsb(display, dsb, PLANE_SURF(pipe, plane_id), + skl_plane_surf(plane_state, 0)); } static bool intel_format_is_p01x(u32 format) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index 21274aa9bdddc..c3dabb8579605 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -164,6 +164,9 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj) * 4 - Support multiple fault handlers per object depending on object's * backing storage (a.k.a. MMAP_OFFSET). * + * 5 - Support multiple partial mmaps(mmap part of BO + unmap a offset, multiple + * times with different size and offset). + * * Restrictions: * * * snoopable objects cannot be accessed via the GTT. It can cause machine @@ -191,7 +194,7 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj) */ int i915_gem_mmap_gtt_version(void) { - return 4; + return 5; } static inline struct i915_gtt_view diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c index 9378d5901c493..9ca42589da4da 100644 --- a/drivers/gpu/drm/i915/gt/intel_rc6.c +++ b/drivers/gpu/drm/i915/gt/intel_rc6.c @@ -117,21 +117,10 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6) GEN6_RC_CTL_RC6_ENABLE | GEN6_RC_CTL_EI_MODE(1); - /* - * BSpec 52698 - Render powergating must be off. - * FIXME BSpec is outdated, disabling powergating for MTL is just - * temporary wa and should be removed after fixing real cause - * of forcewake timeouts. - */ - if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74))) - pg_enable = - GEN9_MEDIA_PG_ENABLE | - GEN11_MEDIA_SAMPLER_PG_ENABLE; - else - pg_enable = - GEN9_RENDER_PG_ENABLE | - GEN9_MEDIA_PG_ENABLE | - GEN11_MEDIA_SAMPLER_PG_ENABLE; + pg_enable = + GEN9_RENDER_PG_ENABLE | + GEN9_MEDIA_PG_ENABLE | + GEN11_MEDIA_SAMPLER_PG_ENABLE; if (GRAPHICS_VER(gt->i915) >= 12 && !IS_DG1(gt->i915)) { for (i = 0; i < I915_MAX_VCS; i++) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index b0e94c95940f6..8aaadbb702df6 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -3425,10 +3425,10 @@ static inline int guc_lrc_desc_unpin(struct intel_context *ce) */ ret = deregister_context(ce, ce->guc_id.id); if (ret) { - spin_lock(&ce->guc_state.lock); + spin_lock_irqsave(&ce->guc_state.lock, flags); set_context_registered(ce); clr_context_destroyed(ce); - spin_unlock(&ce->guc_state.lock); + spin_unlock_irqrestore(&ce->guc_state.lock, flags); /* * As gt-pm is awake at function entry, intel_wakeref_put_async merely decrements * the wakeref immediately but per function spec usage call this after unlock. diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c index 2d9152eb72825..24fdce844d9e3 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c @@ -317,6 +317,11 @@ void intel_huc_init_early(struct intel_huc *huc) } } +void intel_huc_fini_late(struct intel_huc *huc) +{ + delayed_huc_load_fini(huc); +} + #define HUC_LOAD_MODE_STRING(x) (x ? "GSC" : "legacy") static int check_huc_loading_mode(struct intel_huc *huc) { @@ -414,12 +419,6 @@ int intel_huc_init(struct intel_huc *huc) void intel_huc_fini(struct intel_huc *huc) { - /* - * the fence is initialized in init_early, so we need to clean it up - * even if HuC loading is off. - */ - delayed_huc_load_fini(huc); - if (huc->heci_pkt) i915_vma_unpin_and_release(&huc->heci_pkt, 0); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h index ba5cb08e9e7bf..09aff3148f7dd 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h @@ -55,6 +55,7 @@ struct intel_huc { int intel_huc_sanitize(struct intel_huc *huc); void intel_huc_init_early(struct intel_huc *huc); +void intel_huc_fini_late(struct intel_huc *huc); int intel_huc_init(struct intel_huc *huc); void intel_huc_fini(struct intel_huc *huc); void intel_huc_suspend(struct intel_huc *huc); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index 5b8080ec5315b..4f751ce74214d 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -136,6 +136,7 @@ void intel_uc_init_late(struct intel_uc *uc) void intel_uc_driver_late_release(struct intel_uc *uc) { + intel_huc_fini_late(&uc->huc); } /** diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c index 908f910420c20..4ef45520e199a 100644 --- a/drivers/gpu/drm/i915/gvt/opregion.c +++ b/drivers/gpu/drm/i915/gvt/opregion.c @@ -222,7 +222,6 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu) u8 *buf; struct opregion_header *header; struct vbt v; - const char opregion_signature[16] = OPREGION_SIGNATURE; gvt_dbg_core("init vgpu%d opregion\n", vgpu->id); vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL | @@ -236,8 +235,10 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu) /* emulated opregion with VBT mailbox only */ buf = (u8 *)vgpu_opregion(vgpu)->va; header = (struct opregion_header *)buf; - memcpy(header->signature, opregion_signature, - sizeof(opregion_signature)); + + static_assert(sizeof(header->signature) == sizeof(OPREGION_SIGNATURE) - 1); + memcpy(header->signature, OPREGION_SIGNATURE, sizeof(header->signature)); + header->size = 0x8; header->opregion_ver = 0x02000000; header->mboxes = MBOX_VBT; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 41f4350a7c6c5..b7f521a9b337d 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -3869,7 +3869,7 @@ enum skl_power_gate { #define DDI_BUF_IS_IDLE (1 << 7) #define DDI_BUF_CTL_TC_PHY_OWNERSHIP REG_BIT(6) #define DDI_A_4_LANES (1 << 4) -#define DDI_PORT_WIDTH(width) (((width) - 1) << 1) +#define DDI_PORT_WIDTH(width) (((width) == 3 ? 4 : ((width) - 1)) << 1) #define DDI_PORT_WIDTH_MASK (7 << 1) #define DDI_PORT_WIDTH_SHIFT 1 #define DDI_INIT_DISPLAY_DETECTED (1 << 0) diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c index fee76c1d2f450..889281819c5b1 100644 --- a/drivers/gpu/drm/i915/selftests/i915_selftest.c +++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c @@ -23,7 +23,9 @@ #include +#include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" +#include "gt/intel_gt_regs.h" #include "gt/uc/intel_gsc_fw.h" #include "i915_driver.h" @@ -253,11 +255,27 @@ int i915_mock_selftests(void) int i915_live_selftests(struct pci_dev *pdev) { struct drm_i915_private *i915 = pdev_to_i915(pdev); + struct intel_uncore *uncore = &i915->uncore; int err; + u32 pg_enable; + intel_wakeref_t wakeref; if (!i915_selftest.live) return 0; + /* + * FIXME Disable render powergating, this is temporary wa and should be removed + * after fixing real cause of forcewake timeouts. + */ + with_intel_runtime_pm(uncore->rpm, wakeref) { + if (IS_GFX_GT_IP_RANGE(to_gt(i915), IP_VER(12, 00), IP_VER(12, 74))) { + pg_enable = intel_uncore_read(uncore, GEN9_PG_ENABLE); + if (pg_enable & GEN9_RENDER_PG_ENABLE) + intel_uncore_write_fw(uncore, GEN9_PG_ENABLE, + pg_enable & ~GEN9_RENDER_PG_ENABLE); + } + } + __wait_gsc_proxy_completed(i915); __wait_gsc_huc_load_completed(i915); diff --git a/drivers/gpu/drm/imagination/pvr_fw.c b/drivers/gpu/drm/imagination/pvr_fw.c index 3debc9870a82a..d09c4c6841162 100644 --- a/drivers/gpu/drm/imagination/pvr_fw.c +++ b/drivers/gpu/drm/imagination/pvr_fw.c @@ -732,7 +732,7 @@ pvr_fw_process(struct pvr_device *pvr_dev) fw_mem->core_data, fw_mem->core_code_alloc_size); if (err) - goto err_free_fw_core_data_obj; + goto err_free_kdata; memcpy(fw_code_ptr, fw_mem->code, fw_mem->code_alloc_size); memcpy(fw_data_ptr, fw_mem->data, fw_mem->data_alloc_size); @@ -742,10 +742,14 @@ pvr_fw_process(struct pvr_device *pvr_dev) memcpy(fw_core_data_ptr, fw_mem->core_data, fw_mem->core_data_alloc_size); /* We're finished with the firmware section memory on the CPU, unmap. */ - if (fw_core_data_ptr) + if (fw_core_data_ptr) { pvr_fw_object_vunmap(fw_mem->core_data_obj); - if (fw_core_code_ptr) + fw_core_data_ptr = NULL; + } + if (fw_core_code_ptr) { pvr_fw_object_vunmap(fw_mem->core_code_obj); + fw_core_code_ptr = NULL; + } pvr_fw_object_vunmap(fw_mem->data_obj); fw_data_ptr = NULL; pvr_fw_object_vunmap(fw_mem->code_obj); @@ -753,7 +757,7 @@ pvr_fw_process(struct pvr_device *pvr_dev) err = pvr_fw_create_fwif_connection_ctl(pvr_dev); if (err) - goto err_free_fw_core_data_obj; + goto err_free_kdata; return 0; @@ -763,13 +767,16 @@ pvr_fw_process(struct pvr_device *pvr_dev) kfree(fw_mem->data); kfree(fw_mem->code); -err_free_fw_core_data_obj: if (fw_core_data_ptr) - pvr_fw_object_unmap_and_destroy(fw_mem->core_data_obj); + pvr_fw_object_vunmap(fw_mem->core_data_obj); + if (fw_mem->core_data_obj) + pvr_fw_object_destroy(fw_mem->core_data_obj); err_free_fw_core_code_obj: if (fw_core_code_ptr) - pvr_fw_object_unmap_and_destroy(fw_mem->core_code_obj); + pvr_fw_object_vunmap(fw_mem->core_code_obj); + if (fw_mem->core_code_obj) + pvr_fw_object_destroy(fw_mem->core_code_obj); err_free_fw_data_obj: if (fw_data_ptr) @@ -836,6 +843,12 @@ pvr_fw_cleanup(struct pvr_device *pvr_dev) struct pvr_fw_mem *fw_mem = &pvr_dev->fw_dev.mem; pvr_fw_fini_fwif_connection_ctl(pvr_dev); + + kfree(fw_mem->core_data); + kfree(fw_mem->core_code); + kfree(fw_mem->data); + kfree(fw_mem->code); + if (fw_mem->core_code_obj) pvr_fw_object_destroy(fw_mem->core_code_obj); if (fw_mem->core_data_obj) diff --git a/drivers/gpu/drm/imagination/pvr_fw_meta.c b/drivers/gpu/drm/imagination/pvr_fw_meta.c index c39beb70c3173..6d13864851fc2 100644 --- a/drivers/gpu/drm/imagination/pvr_fw_meta.c +++ b/drivers/gpu/drm/imagination/pvr_fw_meta.c @@ -527,8 +527,10 @@ pvr_meta_vm_map(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj) static void pvr_meta_vm_unmap(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj) { - pvr_vm_unmap(pvr_dev->kernel_vm_ctx, fw_obj->fw_mm_node.start, - fw_obj->fw_mm_node.size); + struct pvr_gem_object *pvr_obj = fw_obj->gem; + + pvr_vm_unmap_obj(pvr_dev->kernel_vm_ctx, pvr_obj, + fw_obj->fw_mm_node.start, fw_obj->fw_mm_node.size); } static bool diff --git a/drivers/gpu/drm/imagination/pvr_fw_trace.c b/drivers/gpu/drm/imagination/pvr_fw_trace.c index 73707daa4e52d..5dbb636d7d4ff 100644 --- a/drivers/gpu/drm/imagination/pvr_fw_trace.c +++ b/drivers/gpu/drm/imagination/pvr_fw_trace.c @@ -333,8 +333,8 @@ static int fw_trace_seq_show(struct seq_file *s, void *v) if (sf_id == ROGUE_FW_SF_LAST) return -EINVAL; - timestamp = read_fw_trace(trace_seq_data, 1) | - ((u64)read_fw_trace(trace_seq_data, 2) << 32); + timestamp = ((u64)read_fw_trace(trace_seq_data, 1) << 32) | + read_fw_trace(trace_seq_data, 2); timestamp = (timestamp & ~ROGUE_FWT_TIMESTAMP_TIME_CLRMSK) >> ROGUE_FWT_TIMESTAMP_TIME_SHIFT; diff --git a/drivers/gpu/drm/imagination/pvr_job.c b/drivers/gpu/drm/imagination/pvr_job.c index 78c2f3c6dce01..6a15c1d2d871d 100644 --- a/drivers/gpu/drm/imagination/pvr_job.c +++ b/drivers/gpu/drm/imagination/pvr_job.c @@ -684,6 +684,13 @@ pvr_jobs_link_geom_frag(struct pvr_job_data *job_data, u32 *job_count) geom_job->paired_job = frag_job; frag_job->paired_job = geom_job; + /* The geometry job pvr_job structure is used when the fragment + * job is being prepared by the GPU scheduler. Have the fragment + * job hold a reference on the geometry job to prevent it being + * freed until the fragment job has finished with it. + */ + pvr_job_get(geom_job); + /* Skip the fragment job we just paired to the geometry job. */ i++; } diff --git a/drivers/gpu/drm/imagination/pvr_queue.c b/drivers/gpu/drm/imagination/pvr_queue.c index 20cb460120821..130473cfdfc9b 100644 --- a/drivers/gpu/drm/imagination/pvr_queue.c +++ b/drivers/gpu/drm/imagination/pvr_queue.c @@ -109,12 +109,20 @@ pvr_queue_fence_get_driver_name(struct dma_fence *f) return PVR_DRIVER_NAME; } +static void pvr_queue_fence_release_work(struct work_struct *w) +{ + struct pvr_queue_fence *fence = container_of(w, struct pvr_queue_fence, release_work); + + pvr_context_put(fence->queue->ctx); + dma_fence_free(&fence->base); +} + static void pvr_queue_fence_release(struct dma_fence *f) { struct pvr_queue_fence *fence = container_of(f, struct pvr_queue_fence, base); + struct pvr_device *pvr_dev = fence->queue->ctx->pvr_dev; - pvr_context_put(fence->queue->ctx); - dma_fence_free(f); + queue_work(pvr_dev->sched_wq, &fence->release_work); } static const char * @@ -268,6 +276,7 @@ pvr_queue_fence_init(struct dma_fence *f, pvr_context_get(queue->ctx); fence->queue = queue; + INIT_WORK(&fence->release_work, pvr_queue_fence_release_work); dma_fence_init(&fence->base, fence_ops, &fence_ctx->lock, fence_ctx->id, atomic_inc_return(&fence_ctx->seqno)); @@ -304,8 +313,9 @@ pvr_queue_cccb_fence_init(struct dma_fence *fence, struct pvr_queue *queue) static void pvr_queue_job_fence_init(struct dma_fence *fence, struct pvr_queue *queue) { - pvr_queue_fence_init(fence, queue, &pvr_queue_job_fence_ops, - &queue->job_fence_ctx); + if (!fence->ops) + pvr_queue_fence_init(fence, queue, &pvr_queue_job_fence_ops, + &queue->job_fence_ctx); } /** @@ -856,6 +866,10 @@ static void pvr_queue_free_job(struct drm_sched_job *sched_job) struct pvr_job *job = container_of(sched_job, struct pvr_job, base); drm_sched_job_cleanup(sched_job); + + if (job->type == DRM_PVR_JOB_TYPE_FRAGMENT && job->paired_job) + pvr_job_put(job->paired_job); + job->paired_job = NULL; pvr_job_put(job); } diff --git a/drivers/gpu/drm/imagination/pvr_queue.h b/drivers/gpu/drm/imagination/pvr_queue.h index e06ced69302fc..93fe9ac9f58cc 100644 --- a/drivers/gpu/drm/imagination/pvr_queue.h +++ b/drivers/gpu/drm/imagination/pvr_queue.h @@ -5,6 +5,7 @@ #define PVR_QUEUE_H #include +#include #include "pvr_cccb.h" #include "pvr_device.h" @@ -63,6 +64,9 @@ struct pvr_queue_fence { /** @queue: Queue that created this fence. */ struct pvr_queue *queue; + + /** @release_work: Fence release work structure. */ + struct work_struct release_work; }; /** diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c index 363f885a70982..2896fa7501b1c 100644 --- a/drivers/gpu/drm/imagination/pvr_vm.c +++ b/drivers/gpu/drm/imagination/pvr_vm.c @@ -293,8 +293,9 @@ pvr_vm_bind_op_map_init(struct pvr_vm_bind_op *bind_op, static int pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op *bind_op, - struct pvr_vm_context *vm_ctx, u64 device_addr, - u64 size) + struct pvr_vm_context *vm_ctx, + struct pvr_gem_object *pvr_obj, + u64 device_addr, u64 size) { int err; @@ -318,6 +319,7 @@ pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op *bind_op, goto err_bind_op_fini; } + bind_op->pvr_obj = pvr_obj; bind_op->vm_ctx = vm_ctx; bind_op->device_addr = device_addr; bind_op->size = size; @@ -597,20 +599,6 @@ pvr_vm_create_context(struct pvr_device *pvr_dev, bool is_userspace_context) return ERR_PTR(err); } -/** - * pvr_vm_unmap_all() - Unmap all mappings associated with a VM context. - * @vm_ctx: Target VM context. - * - * This function ensures that no mappings are left dangling by unmapping them - * all in order of ascending device-virtual address. - */ -void -pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx) -{ - WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start, - vm_ctx->gpuvm_mgr.mm_range)); -} - /** * pvr_vm_context_release() - Teardown a VM context. * @ref_count: Pointer to reference counter of the VM context. @@ -703,11 +691,7 @@ pvr_vm_lock_extra(struct drm_gpuvm_exec *vm_exec) struct pvr_vm_bind_op *bind_op = vm_exec->extra.priv; struct pvr_gem_object *pvr_obj = bind_op->pvr_obj; - /* Unmap operations don't have an object to lock. */ - if (!pvr_obj) - return 0; - - /* Acquire lock on the GEM being mapped. */ + /* Acquire lock on the GEM object being mapped/unmapped. */ return drm_exec_lock_obj(&vm_exec->exec, gem_from_pvr_gem(pvr_obj)); } @@ -772,8 +756,10 @@ pvr_vm_map(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj, } /** - * pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory. + * pvr_vm_unmap_obj_locked() - Unmap an already mapped section of device-virtual + * memory. * @vm_ctx: Target VM context. + * @pvr_obj: Target PowerVR memory object. * @device_addr: Virtual device address at the start of the target mapping. * @size: Size of the target mapping. * @@ -784,9 +770,13 @@ pvr_vm_map(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj, * * Any error encountered while performing internal operations required to * destroy the mapping (returned from pvr_vm_gpuva_unmap or * pvr_vm_gpuva_remap). + * + * The vm_ctx->lock must be held when calling this function. */ -int -pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size) +static int +pvr_vm_unmap_obj_locked(struct pvr_vm_context *vm_ctx, + struct pvr_gem_object *pvr_obj, + u64 device_addr, u64 size) { struct pvr_vm_bind_op bind_op = {0}; struct drm_gpuvm_exec vm_exec = { @@ -799,11 +789,13 @@ pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size) }, }; - int err = pvr_vm_bind_op_unmap_init(&bind_op, vm_ctx, device_addr, - size); + int err = pvr_vm_bind_op_unmap_init(&bind_op, vm_ctx, pvr_obj, + device_addr, size); if (err) return err; + pvr_gem_object_get(pvr_obj); + err = drm_gpuvm_exec_lock(&vm_exec); if (err) goto err_cleanup; @@ -818,6 +810,96 @@ pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size) return err; } +/** + * pvr_vm_unmap_obj() - Unmap an already mapped section of device-virtual + * memory. + * @vm_ctx: Target VM context. + * @pvr_obj: Target PowerVR memory object. + * @device_addr: Virtual device address at the start of the target mapping. + * @size: Size of the target mapping. + * + * Return: + * * 0 on success, + * * Any error encountered by pvr_vm_unmap_obj_locked. + */ +int +pvr_vm_unmap_obj(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj, + u64 device_addr, u64 size) +{ + int err; + + mutex_lock(&vm_ctx->lock); + err = pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj, device_addr, size); + mutex_unlock(&vm_ctx->lock); + + return err; +} + +/** + * pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory. + * @vm_ctx: Target VM context. + * @device_addr: Virtual device address at the start of the target mapping. + * @size: Size of the target mapping. + * + * Return: + * * 0 on success, + * * Any error encountered by drm_gpuva_find, + * * Any error encountered by pvr_vm_unmap_obj_locked. + */ +int +pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size) +{ + struct pvr_gem_object *pvr_obj; + struct drm_gpuva *va; + int err; + + mutex_lock(&vm_ctx->lock); + + va = drm_gpuva_find(&vm_ctx->gpuvm_mgr, device_addr, size); + if (va) { + pvr_obj = gem_to_pvr_gem(va->gem.obj); + err = pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj, + va->va.addr, va->va.range); + } else { + err = -ENOENT; + } + + mutex_unlock(&vm_ctx->lock); + + return err; +} + +/** + * pvr_vm_unmap_all() - Unmap all mappings associated with a VM context. + * @vm_ctx: Target VM context. + * + * This function ensures that no mappings are left dangling by unmapping them + * all in order of ascending device-virtual address. + */ +void +pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx) +{ + mutex_lock(&vm_ctx->lock); + + for (;;) { + struct pvr_gem_object *pvr_obj; + struct drm_gpuva *va; + + va = drm_gpuva_find_first(&vm_ctx->gpuvm_mgr, + vm_ctx->gpuvm_mgr.mm_start, + vm_ctx->gpuvm_mgr.mm_range); + if (!va) + break; + + pvr_obj = gem_to_pvr_gem(va->gem.obj); + + WARN_ON(pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj, + va->va.addr, va->va.range)); + } + + mutex_unlock(&vm_ctx->lock); +} + /* Static data areas are determined by firmware. */ static const struct drm_pvr_static_data_area static_data_areas[] = { { diff --git a/drivers/gpu/drm/imagination/pvr_vm.h b/drivers/gpu/drm/imagination/pvr_vm.h index 79406243617c1..b0528dffa7f1b 100644 --- a/drivers/gpu/drm/imagination/pvr_vm.h +++ b/drivers/gpu/drm/imagination/pvr_vm.h @@ -38,6 +38,9 @@ struct pvr_vm_context *pvr_vm_create_context(struct pvr_device *pvr_dev, int pvr_vm_map(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj, u64 pvr_obj_offset, u64 device_addr, u64 size); +int pvr_vm_unmap_obj(struct pvr_vm_context *vm_ctx, + struct pvr_gem_object *pvr_obj, + u64 device_addr, u64 size); int pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size); void pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx); diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.c b/drivers/gpu/drm/mediatek/mtk_crtc.c index 5674f5707cca8..8f6fba4217ece 100644 --- a/drivers/gpu/drm/mediatek/mtk_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_crtc.c @@ -620,13 +620,16 @@ static void mtk_crtc_update_config(struct mtk_crtc *mtk_crtc, bool needs_vblank) mbox_send_message(mtk_crtc->cmdq_client.chan, cmdq_handle); mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0); + goto update_config_out; } -#else +#endif spin_lock_irqsave(&mtk_crtc->config_lock, flags); mtk_crtc->config_updating = false; spin_unlock_irqrestore(&mtk_crtc->config_lock, flags); -#endif +#if IS_REACHABLE(CONFIG_MTK_CMDQ) +update_config_out: +#endif mutex_unlock(&mtk_crtc->hw_lock); } diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c index cad65ea851edc..4979d49ae25a6 100644 --- a/drivers/gpu/drm/mediatek/mtk_dp.c +++ b/drivers/gpu/drm/mediatek/mtk_dp.c @@ -1746,7 +1746,7 @@ static int mtk_dp_parse_capabilities(struct mtk_dp *mtk_dp) ret = drm_dp_dpcd_readb(&mtk_dp->aux, DP_MSTM_CAP, &val); if (ret < 1) { - drm_err(mtk_dp->drm_dev, "Read mstm cap failed\n"); + dev_err(mtk_dp->dev, "Read mstm cap failed: %zd\n", ret); return ret == 0 ? -EIO : ret; } @@ -1756,7 +1756,7 @@ static int mtk_dp_parse_capabilities(struct mtk_dp *mtk_dp) DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0, &val); if (ret < 1) { - drm_err(mtk_dp->drm_dev, "Read irq vector failed\n"); + dev_err(mtk_dp->dev, "Read irq vector failed: %zd\n", ret); return ret == 0 ? -EIO : ret; } @@ -2039,7 +2039,7 @@ static int mtk_dp_wait_hpd_asserted(struct drm_dp_aux *mtk_aux, unsigned long wa ret = mtk_dp_parse_capabilities(mtk_dp); if (ret) { - drm_err(mtk_dp->drm_dev, "Can't parse capabilities\n"); + dev_err(mtk_dp->dev, "Can't parse capabilities: %d\n", ret); return ret; } diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c index a08d206549543..9c11d3158324c 100644 --- a/drivers/gpu/drm/mediatek/mtk_dpi.c +++ b/drivers/gpu/drm/mediatek/mtk_dpi.c @@ -127,14 +127,14 @@ struct mtk_dpi_yc_limit { * @is_ck_de_pol: Support CK/DE polarity. * @swap_input_support: Support input swap function. * @support_direct_pin: IP supports direct connection to dpi panels. - * @input_2pixel: Input pixel of dp_intf is 2 pixel per round, so enable this - * config to enable this feature. * @dimension_mask: Mask used for HWIDTH, HPORCH, VSYNC_WIDTH and VSYNC_PORCH * (no shift). * @hvsize_mask: Mask of HSIZE and VSIZE mask (no shift). * @channel_swap_shift: Shift value of channel swap. * @yuv422_en_bit: Enable bit of yuv422. * @csc_enable_bit: Enable bit of CSC. + * @input_2p_en_bit: Enable bit for input two pixel per round feature. + * If present, implies that the feature must be enabled. * @pixels_per_iter: Quantity of transferred pixels per iteration. * @edge_cfg_in_mmsys: If the edge configuration for DPI's output needs to be set in MMSYS. */ @@ -148,12 +148,12 @@ struct mtk_dpi_conf { bool is_ck_de_pol; bool swap_input_support; bool support_direct_pin; - bool input_2pixel; u32 dimension_mask; u32 hvsize_mask; u32 channel_swap_shift; u32 yuv422_en_bit; u32 csc_enable_bit; + u32 input_2p_en_bit; u32 pixels_per_iter; bool edge_cfg_in_mmsys; }; @@ -471,6 +471,7 @@ static void mtk_dpi_power_off(struct mtk_dpi *dpi) mtk_dpi_disable(dpi); clk_disable_unprepare(dpi->pixel_clk); + clk_disable_unprepare(dpi->tvd_clk); clk_disable_unprepare(dpi->engine_clk); } @@ -487,6 +488,12 @@ static int mtk_dpi_power_on(struct mtk_dpi *dpi) goto err_refcount; } + ret = clk_prepare_enable(dpi->tvd_clk); + if (ret) { + dev_err(dpi->dev, "Failed to enable tvd pll: %d\n", ret); + goto err_engine; + } + ret = clk_prepare_enable(dpi->pixel_clk); if (ret) { dev_err(dpi->dev, "Failed to enable pixel clock: %d\n", ret); @@ -496,6 +503,8 @@ static int mtk_dpi_power_on(struct mtk_dpi *dpi) return 0; err_pixel: + clk_disable_unprepare(dpi->tvd_clk); +err_engine: clk_disable_unprepare(dpi->engine_clk); err_refcount: dpi->refcount--; @@ -610,9 +619,9 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi, mtk_dpi_dual_edge(dpi); mtk_dpi_config_disable_edge(dpi); } - if (dpi->conf->input_2pixel) { - mtk_dpi_mask(dpi, DPI_CON, DPINTF_INPUT_2P_EN, - DPINTF_INPUT_2P_EN); + if (dpi->conf->input_2p_en_bit) { + mtk_dpi_mask(dpi, DPI_CON, dpi->conf->input_2p_en_bit, + dpi->conf->input_2p_en_bit); } mtk_dpi_sw_reset(dpi, false); @@ -992,12 +1001,12 @@ static const struct mtk_dpi_conf mt8195_dpintf_conf = { .output_fmts = mt8195_output_fmts, .num_output_fmts = ARRAY_SIZE(mt8195_output_fmts), .pixels_per_iter = 4, - .input_2pixel = true, .dimension_mask = DPINTF_HPW_MASK, .hvsize_mask = DPINTF_HSIZE_MASK, .channel_swap_shift = DPINTF_CH_SWAP, .yuv422_en_bit = DPINTF_YUV422_EN, .csc_enable_bit = DPINTF_CSC_ENABLE, + .input_2p_en_bit = DPINTF_INPUT_2P_EN, }; static int mtk_dpi_probe(struct platform_device *pdev) diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index b9b7fd08b7d7e..88f3dfeb4731d 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -1108,12 +1108,12 @@ static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host, const struct mipi_dsi_msg *msg) { struct mtk_dsi *dsi = host_to_dsi(host); - u32 recv_cnt, i; + ssize_t recv_cnt; u8 read_data[16]; void *src_addr; u8 irq_flag = CMD_DONE_INT_FLAG; u32 dsi_mode; - int ret; + int ret, i; dsi_mode = readl(dsi->regs + DSI_MODE_CTRL); if (dsi_mode & MODE) { @@ -1162,7 +1162,7 @@ static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host, if (recv_cnt) memcpy(msg->rx_buf, src_addr, recv_cnt); - DRM_INFO("dsi get %d byte data from the panel address(0x%x)\n", + DRM_INFO("dsi get %zd byte data from the panel address(0x%x)\n", recv_cnt, *((u8 *)(msg->tx_buf))); restore_dsi_mode: diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index 7687f673964ec..1aad8e6cf52e7 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -137,7 +137,7 @@ enum hdmi_aud_channel_swap_type { struct hdmi_audio_param { enum hdmi_audio_coding_type aud_codec; - enum hdmi_audio_sample_size aud_sampe_size; + enum hdmi_audio_sample_size aud_sample_size; enum hdmi_aud_input_type aud_input_type; enum hdmi_aud_i2s_fmt aud_i2s_fmt; enum hdmi_aud_mclk aud_mclk; @@ -173,6 +173,7 @@ struct mtk_hdmi { unsigned int sys_offset; void __iomem *regs; enum hdmi_colorspace csp; + struct platform_device *audio_pdev; struct hdmi_audio_param aud_param; bool audio_enable; bool powered; @@ -1074,7 +1075,7 @@ static int mtk_hdmi_output_init(struct mtk_hdmi *hdmi) hdmi->csp = HDMI_COLORSPACE_RGB; aud_param->aud_codec = HDMI_AUDIO_CODING_TYPE_PCM; - aud_param->aud_sampe_size = HDMI_AUDIO_SAMPLE_SIZE_16; + aud_param->aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16; aud_param->aud_input_type = HDMI_AUD_INPUT_I2S; aud_param->aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT; aud_param->aud_mclk = HDMI_AUD_MCLK_128FS; @@ -1572,14 +1573,14 @@ static int mtk_hdmi_audio_hw_params(struct device *dev, void *data, switch (daifmt->fmt) { case HDMI_I2S: hdmi_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM; - hdmi_params.aud_sampe_size = HDMI_AUDIO_SAMPLE_SIZE_16; + hdmi_params.aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16; hdmi_params.aud_input_type = HDMI_AUD_INPUT_I2S; hdmi_params.aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT; hdmi_params.aud_mclk = HDMI_AUD_MCLK_128FS; break; case HDMI_SPDIF: hdmi_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM; - hdmi_params.aud_sampe_size = HDMI_AUDIO_SAMPLE_SIZE_16; + hdmi_params.aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16; hdmi_params.aud_input_type = HDMI_AUD_INPUT_SPDIF; break; default: @@ -1663,6 +1664,11 @@ static const struct hdmi_codec_ops mtk_hdmi_audio_codec_ops = { .no_capture_mute = 1, }; +static void mtk_hdmi_unregister_audio_driver(void *data) +{ + platform_device_unregister(data); +} + static int mtk_hdmi_register_audio_driver(struct device *dev) { struct mtk_hdmi *hdmi = dev_get_drvdata(dev); @@ -1672,13 +1678,20 @@ static int mtk_hdmi_register_audio_driver(struct device *dev) .i2s = 1, .data = hdmi, }; - struct platform_device *pdev; + int ret; - pdev = platform_device_register_data(dev, HDMI_CODEC_DRV_NAME, - PLATFORM_DEVID_AUTO, &codec_data, - sizeof(codec_data)); - if (IS_ERR(pdev)) - return PTR_ERR(pdev); + hdmi->audio_pdev = platform_device_register_data(dev, + HDMI_CODEC_DRV_NAME, + PLATFORM_DEVID_AUTO, + &codec_data, + sizeof(codec_data)); + if (IS_ERR(hdmi->audio_pdev)) + return PTR_ERR(hdmi->audio_pdev); + + ret = devm_add_action_or_reset(dev, mtk_hdmi_unregister_audio_driver, + hdmi->audio_pdev); + if (ret) + return ret; DRM_INFO("%s driver bound to HDMI\n", HDMI_CODEC_DRV_NAME); return 0; diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index fb71658c3117b..6067d08aeee34 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -223,7 +223,7 @@ void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mod vsyncstr = mode->crtc_vsync_start - 1; vsyncend = mode->crtc_vsync_end - 1; vtotal = mode->crtc_vtotal - 2; - vblkstr = mode->crtc_vblank_start; + vblkstr = mode->crtc_vblank_start - 1; vblkend = vtotal + 1; linecomp = vdispend; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index e386b059187ac..67fa528f546d3 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -1126,49 +1126,50 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu) struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; u32 val; + int ret; /* - * The GMU may still be in slumber unless the GPU started so check and - * skip putting it back into slumber if so + * GMU firmware's internal power state gets messed up if we send "prepare_slumber" hfi when + * oob_gpu handshake wasn't done after the last wake up. So do a dummy handshake here when + * required */ - val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); + if (adreno_gpu->base.needs_hw_init) { + if (a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET)) + goto force_off; - if (val != 0xf) { - int ret = a6xx_gmu_wait_for_idle(gmu); + a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); + } - /* If the GMU isn't responding assume it is hung */ - if (ret) { - a6xx_gmu_force_off(gmu); - return; - } + ret = a6xx_gmu_wait_for_idle(gmu); - a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung); + /* If the GMU isn't responding assume it is hung */ + if (ret) + goto force_off; - /* tell the GMU we want to slumber */ - ret = a6xx_gmu_notify_slumber(gmu); - if (ret) { - a6xx_gmu_force_off(gmu); - return; - } + a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung); - ret = gmu_poll_timeout(gmu, - REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val, - !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB), - 100, 10000); + /* tell the GMU we want to slumber */ + ret = a6xx_gmu_notify_slumber(gmu); + if (ret) + goto force_off; - /* - * Let the user know we failed to slumber but don't worry too - * much because we are powering down anyway - */ + ret = gmu_poll_timeout(gmu, + REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val, + !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB), + 100, 10000); - if (ret) - DRM_DEV_ERROR(gmu->dev, - "Unable to slumber GMU: status = 0%x/0%x\n", - gmu_read(gmu, - REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS), - gmu_read(gmu, - REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2)); - } + /* + * Let the user know we failed to slumber but don't worry too + * much because we are powering down anyway + */ + + if (ret) + DRM_DEV_ERROR(gmu->dev, + "Unable to slumber GMU: status = 0%x/0%x\n", + gmu_read(gmu, + REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS), + gmu_read(gmu, + REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2)); /* Turn off HFI */ a6xx_hfi_stop(gmu); @@ -1178,6 +1179,11 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu) /* Tell RPMh to power off the GPU */ a6xx_rpmh_stop(gmu); + + return; + +force_off: + a6xx_gmu_force_off(gmu); } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index 702b8d4b34972..d903ad9c0b5fb 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -233,10 +233,10 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) break; fallthrough; case MSM_SUBMIT_CMD_BUF: - OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); + OUT_PKT7(ring, CP_INDIRECT_BUFFER, 3); OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); OUT_RING(ring, upper_32_bits(submit->cmd[i].iova)); - OUT_RING(ring, submit->cmd[i].size); + OUT_RING(ring, A5XX_CP_INDIRECT_BUFFER_2_IB_SIZE(submit->cmd[i].size)); ibs++; break; } @@ -319,10 +319,10 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) break; fallthrough; case MSM_SUBMIT_CMD_BUF: - OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); + OUT_PKT7(ring, CP_INDIRECT_BUFFER, 3); OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); OUT_RING(ring, upper_32_bits(submit->cmd[i].iova)); - OUT_RING(ring, submit->cmd[i].size); + OUT_RING(ring, A5XX_CP_INDIRECT_BUFFER_2_IB_SIZE(submit->cmd[i].size)); ibs++; break; } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c index 0fcae53c0b140..159665cb6b14f 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c @@ -1507,6 +1507,8 @@ static void a6xx_get_indexed_registers(struct msm_gpu *gpu, /* Restore the size in the hardware */ gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, mempool_size); + + a6xx_state->nr_indexed_regs = count; } static void a7xx_get_indexed_registers(struct msm_gpu *gpu, diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h index 421afacb72480..36cc9dbc00b5c 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h @@ -297,7 +297,7 @@ static const struct dpu_wb_cfg sm8150_wb[] = { { .name = "wb_2", .id = WB_2, .base = 0x65000, .len = 0x2c8, - .features = WB_SDM845_MASK, + .features = WB_SM8250_MASK, .format_list = wb2_formats_rgb, .num_formats = ARRAY_SIZE(wb2_formats_rgb), .clk_ctrl = DPU_CLK_CTRL_WB2, diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h index 641023b102bf5..e8eacdb47967a 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h @@ -304,7 +304,7 @@ static const struct dpu_wb_cfg sc8180x_wb[] = { { .name = "wb_2", .id = WB_2, .base = 0x65000, .len = 0x2c8, - .features = WB_SDM845_MASK, + .features = WB_SM8250_MASK, .format_list = wb2_formats_rgb, .num_formats = ARRAY_SIZE(wb2_formats_rgb), .clk_ctrl = DPU_CLK_CTRL_WB2, diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h index d039b96beb97c..76f60a2df7a89 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h @@ -144,7 +144,7 @@ static const struct dpu_wb_cfg sm6125_wb[] = { { .name = "wb_2", .id = WB_2, .base = 0x65000, .len = 0x2c8, - .features = WB_SDM845_MASK, + .features = WB_SM8250_MASK, .format_list = wb2_formats_rgb, .num_formats = ARRAY_SIZE(wb2_formats_rgb), .clk_ctrl = DPU_CLK_CTRL_WB2, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index db6c57900781d..ecd595215a6be 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -1191,10 +1191,6 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc, DRM_DEBUG_ATOMIC("%s: check\n", dpu_crtc->name); - /* force a full mode set if active state changed */ - if (crtc_state->active_changed) - crtc_state->mode_changed = true; - if (cstate->num_mixers) { rc = _dpu_crtc_check_and_setup_lm_bounds(crtc, crtc_state); if (rc) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index bd3698bf0cf74..47b514c89ce66 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -718,12 +718,11 @@ static int dpu_encoder_virt_atomic_check( crtc_state->mode_changed = true; /* * Release and Allocate resources on every modeset - * Dont allocate when active is false. */ if (drm_atomic_crtc_needs_modeset(crtc_state)) { dpu_rm_release(global_state, drm_enc); - if (!crtc_state->active_changed || crtc_state->enable) + if (crtc_state->enable) ret = dpu_rm_reserve(&dpu_kms->rm, global_state, drm_enc, crtc_state, topology); if (!ret) @@ -2125,6 +2124,9 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc) } } + if (phys_enc->hw_pp && phys_enc->hw_pp->ops.setup_dither) + phys_enc->hw_pp->ops.setup_dither(phys_enc->hw_pp, NULL); + /* reset the merge 3D HW block */ if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) { phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c index 5e9aad1b2aa28..d1e0fb2139765 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c @@ -52,6 +52,7 @@ static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc, u32 slice_last_group_size; u32 det_thresh_flatness; bool is_cmd_mode = !(mode & DSC_MODE_VIDEO); + bool input_10_bits = dsc->bits_per_component == 10; DPU_REG_WRITE(c, DSC_COMMON_MODE, mode); @@ -68,7 +69,7 @@ static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc, data |= (dsc->line_buf_depth << 3); data |= (dsc->simple_422 << 2); data |= (dsc->convert_rgb << 1); - data |= dsc->bits_per_component; + data |= input_10_bits; DPU_REG_WRITE(c, DSC_ENC, data); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c index 0f40eea7f5e24..2040bee8d512f 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c @@ -272,7 +272,7 @@ static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops, if (cap & BIT(DPU_MDP_VSYNC_SEL)) ops->setup_vsync_source = dpu_hw_setup_vsync_sel; - else + else if (!(cap & BIT(DPU_MDP_PERIPH_0_REMOVED))) ops->setup_vsync_source = dpu_hw_setup_wd_timer; ops->get_safe_status = dpu_hw_get_safe_status; diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index a98d24b7cb00b..d22e01751f5ee 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -846,7 +846,7 @@ static void dsi_ctrl_enable(struct msm_dsi_host *msm_host, dsi_write(msm_host, REG_DSI_CPHY_MODE_CTRL, BIT(0)); } -static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mode, u32 hdisplay) +static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mode) { struct drm_dsc_config *dsc = msm_host->dsc; u32 reg, reg_ctrl, reg_ctrl2; @@ -858,7 +858,7 @@ static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mod /* first calculate dsc parameters and then program * compress mode registers */ - slice_per_intf = msm_dsc_get_slices_per_intf(dsc, hdisplay); + slice_per_intf = dsc->slice_count; total_bytes_per_intf = dsc->slice_chunk_size * slice_per_intf; bytes_per_pkt = dsc->slice_chunk_size; /* * slice_per_pkt; */ @@ -991,7 +991,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi) if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) { if (msm_host->dsc) - dsi_update_dsc_timing(msm_host, false, mode->hdisplay); + dsi_update_dsc_timing(msm_host, false); dsi_write(msm_host, REG_DSI_ACTIVE_H, DSI_ACTIVE_H_START(ha_start) | @@ -1012,7 +1012,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi) DSI_ACTIVE_VSYNC_VPOS_END(vs_end)); } else { /* command mode */ if (msm_host->dsc) - dsi_update_dsc_timing(msm_host, true, mode->hdisplay); + dsi_update_dsc_timing(msm_host, true); /* image data and 1 byte write_memory_start cmd */ if (!msm_host->dsc) @@ -1827,8 +1827,15 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host) __func__, ret); goto err; } - if (!ret) + if (!ret) { msm_dsi->te_source = devm_kstrdup(dev, te_source, GFP_KERNEL); + if (!msm_dsi->te_source) { + DRM_DEV_ERROR(dev, "%s: failed to allocate te_source\n", + __func__); + ret = -ENOMEM; + goto err; + } + } ret = 0; if (of_property_read_bool(np, "syscon-sfpb")) { diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index a210b7c9e5ca2..4fabb01345aa2 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -74,17 +74,35 @@ static int dsi_mgr_setup_components(int id) int ret; if (!IS_BONDED_DSI()) { + /* + * Set the usecase before calling msm_dsi_host_register(), which would + * already program the PLL source mux based on a default usecase. + */ + msm_dsi_phy_set_usecase(msm_dsi->phy, MSM_DSI_PHY_STANDALONE); + msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy); + ret = msm_dsi_host_register(msm_dsi->host); if (ret) return ret; - - msm_dsi_phy_set_usecase(msm_dsi->phy, MSM_DSI_PHY_STANDALONE); - msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy); } else if (other_dsi) { struct msm_dsi *master_link_dsi = IS_MASTER_DSI_LINK(id) ? msm_dsi : other_dsi; struct msm_dsi *slave_link_dsi = IS_MASTER_DSI_LINK(id) ? other_dsi : msm_dsi; + + /* + * PLL0 is to drive both DSI link clocks in bonded DSI mode. + * + * Set the usecase before calling msm_dsi_host_register(), which would + * already program the PLL source mux based on a default usecase. + */ + msm_dsi_phy_set_usecase(clk_master_dsi->phy, + MSM_DSI_PHY_MASTER); + msm_dsi_phy_set_usecase(clk_slave_dsi->phy, + MSM_DSI_PHY_SLAVE); + msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy); + msm_dsi_host_set_phy_mode(other_dsi->host, other_dsi->phy); + /* Register slave host first, so that slave DSI device * has a chance to probe, and do not block the master * DSI device's probe. @@ -98,14 +116,6 @@ static int dsi_mgr_setup_components(int id) ret = msm_dsi_host_register(master_link_dsi->host); if (ret) return ret; - - /* PLL0 is to drive both 2 DSI link clocks in bonded DSI mode. */ - msm_dsi_phy_set_usecase(clk_master_dsi->phy, - MSM_DSI_PHY_MASTER); - msm_dsi_phy_set_usecase(clk_slave_dsi->phy, - MSM_DSI_PHY_SLAVE); - msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy); - msm_dsi_host_set_phy_mode(other_dsi->host, other_dsi->phy); } return 0; diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c index 031446c87daec..a2c87c84aa05b 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c @@ -83,6 +83,9 @@ struct dsi_pll_7nm { /* protects REG_DSI_7nm_PHY_CMN_CLK_CFG0 register */ spinlock_t postdiv_lock; + /* protects REG_DSI_7nm_PHY_CMN_CLK_CFG1 register */ + spinlock_t pclk_mux_lock; + struct pll_7nm_cached_state cached_state; struct dsi_pll_7nm *slave; @@ -302,7 +305,7 @@ static void dsi_pll_commit(struct dsi_pll_7nm *pll, struct dsi_pll_config *confi writel(pll->phy->cphy_mode ? 0x00 : 0x10, base + REG_DSI_7nm_PHY_PLL_CMODE_1); writel(config->pll_clock_inverters, - base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS); + base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS_1); } static int dsi_pll_7nm_vco_set_rate(struct clk_hw *hw, unsigned long rate, @@ -372,22 +375,41 @@ static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll) ndelay(250); } -static void dsi_pll_disable_global_clk(struct dsi_pll_7nm *pll) +static void dsi_pll_cmn_clk_cfg0_write(struct dsi_pll_7nm *pll, u32 val) { + unsigned long flags; + + spin_lock_irqsave(&pll->postdiv_lock, flags); + writel(val, pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG0); + spin_unlock_irqrestore(&pll->postdiv_lock, flags); +} + +static void dsi_pll_cmn_clk_cfg1_update(struct dsi_pll_7nm *pll, u32 mask, + u32 val) +{ + unsigned long flags; u32 data; + spin_lock_irqsave(&pll->pclk_mux_lock, flags); data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1); - writel(data & ~BIT(5), pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1); + data &= ~mask; + data |= val & mask; + + writel(data, pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1); + spin_unlock_irqrestore(&pll->pclk_mux_lock, flags); +} + +static void dsi_pll_disable_global_clk(struct dsi_pll_7nm *pll) +{ + dsi_pll_cmn_clk_cfg1_update(pll, DSI_7nm_PHY_CMN_CLK_CFG1_CLK_EN, 0); } static void dsi_pll_enable_global_clk(struct dsi_pll_7nm *pll) { - u32 data; + u32 cfg_1 = DSI_7nm_PHY_CMN_CLK_CFG1_CLK_EN | DSI_7nm_PHY_CMN_CLK_CFG1_CLK_EN_SEL; writel(0x04, pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_3); - - data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1); - writel(data | BIT(5) | BIT(4), pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1); + dsi_pll_cmn_clk_cfg1_update(pll, cfg_1, cfg_1); } static void dsi_pll_phy_dig_reset(struct dsi_pll_7nm *pll) @@ -565,7 +587,6 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy) { struct dsi_pll_7nm *pll_7nm = to_pll_7nm(phy->vco_hw); struct pll_7nm_cached_state *cached = &pll_7nm->cached_state; - void __iomem *phy_base = pll_7nm->phy->base; u32 val; int ret; @@ -574,13 +595,10 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy) val |= cached->pll_out_div; writel(val, pll_7nm->phy->pll_base + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE); - writel(cached->bit_clk_div | (cached->pix_clk_div << 4), - phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0); - - val = readl(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1); - val &= ~0x3; - val |= cached->pll_mux; - writel(val, phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1); + dsi_pll_cmn_clk_cfg0_write(pll_7nm, + DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0(cached->bit_clk_div) | + DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4(cached->pix_clk_div)); + dsi_pll_cmn_clk_cfg1_update(pll_7nm, 0x3, cached->pll_mux); ret = dsi_pll_7nm_vco_set_rate(phy->vco_hw, pll_7nm->vco_current_rate, @@ -599,7 +617,6 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy) static int dsi_7nm_set_usecase(struct msm_dsi_phy *phy) { struct dsi_pll_7nm *pll_7nm = to_pll_7nm(phy->vco_hw); - void __iomem *base = phy->base; u32 data = 0x0; /* internal PLL */ DBG("DSI PLL%d", pll_7nm->phy->id); @@ -618,7 +635,8 @@ static int dsi_7nm_set_usecase(struct msm_dsi_phy *phy) } /* set PLL src */ - writel(data << 2, base + REG_DSI_7nm_PHY_CMN_CLK_CFG1); + dsi_pll_cmn_clk_cfg1_update(pll_7nm, DSI_7nm_PHY_CMN_CLK_CFG1_BITCLK_SEL__MASK, + DSI_7nm_PHY_CMN_CLK_CFG1_BITCLK_SEL(data)); return 0; } @@ -733,7 +751,7 @@ static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provide pll_by_2_bit, }), 2, 0, pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, - 0, 1, 0, NULL); + 0, 1, 0, &pll_7nm->pclk_mux_lock); if (IS_ERR(hw)) { ret = PTR_ERR(hw); goto fail; @@ -778,6 +796,7 @@ static int dsi_pll_7nm_init(struct msm_dsi_phy *phy) pll_7nm_list[phy->id] = pll_7nm; spin_lock_init(&pll_7nm->postdiv_lock); + spin_lock_init(&pll_7nm->pclk_mux_lock); pll_7nm->phy = phy; diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 2e28a13446366..9526b22038ab8 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -543,15 +543,12 @@ static inline int align_pitch(int width, int bpp) static inline unsigned long timeout_to_jiffies(const ktime_t *timeout) { ktime_t now = ktime_get(); - s64 remaining_jiffies; - if (ktime_compare(*timeout, now) < 0) { - remaining_jiffies = 0; - } else { - ktime_t rem = ktime_sub(*timeout, now); - remaining_jiffies = ktime_divns(rem, NSEC_PER_SEC / HZ); - } + if (ktime_compare(*timeout, now) <= 0) + return 0; + ktime_t rem = ktime_sub(*timeout, now); + s64 remaining_jiffies = ktime_divns(rem, NSEC_PER_SEC / HZ); return clamp(remaining_jiffies, 1LL, (s64)INT_MAX); } diff --git a/drivers/gpu/drm/msm/msm_dsc_helper.h b/drivers/gpu/drm/msm/msm_dsc_helper.h index b9049fe1e2790..63f95523b2cbb 100644 --- a/drivers/gpu/drm/msm/msm_dsc_helper.h +++ b/drivers/gpu/drm/msm/msm_dsc_helper.h @@ -12,17 +12,6 @@ #include #include -/** - * msm_dsc_get_slices_per_intf() - calculate number of slices per interface - * @dsc: Pointer to drm dsc config struct - * @intf_width: interface width in pixels - * Returns: Integer representing the number of slices for the given interface - */ -static inline u32 msm_dsc_get_slices_per_intf(const struct drm_dsc_config *dsc, u32 intf_width) -{ - return DIV_ROUND_UP(intf_width, dsc->slice_width); -} - /** * msm_dsc_get_bytes_per_line() - calculate bytes per line * @dsc: Pointer to drm dsc config struct diff --git a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml index cab01af55d222..c6cdc5c003dc0 100644 --- a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml +++ b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml @@ -2264,5 +2264,12 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords) + + + + + + + diff --git a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml index d54b72f924493..35f7f40e405b7 100644 --- a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml +++ b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml @@ -9,8 +9,15 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd"> - - + + + + + + + + + diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig index ceef470c9fbfc..1050a4617fc15 100644 --- a/drivers/gpu/drm/nouveau/Kconfig +++ b/drivers/gpu/drm/nouveau/Kconfig @@ -4,6 +4,8 @@ config DRM_NOUVEAU depends on DRM && PCI && MMU select IOMMU_API select FW_LOADER + select FW_CACHE if PM_SLEEP + select DRM_CLIENT_SELECTION select DRM_DISPLAY_DP_HELPER select DRM_DISPLAY_HDMI_HELPER select DRM_DISPLAY_HELPER diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index db961eade2257..2016c1e7242fe 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -144,6 +144,9 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) nouveau_bo_del_io_reserve_lru(bo); nv10_bo_put_tile_region(dev, nvbo->tile, NULL); + if (bo->base.import_attach) + drm_prime_gem_destroy(&bo->base, bo->sg); + /* * If nouveau_bo_new() allocated this buffer, the GEM object was never * initialized, so don't attempt to release it. diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index b06aa473102b3..5ab4201c981e4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -776,7 +776,6 @@ nouveau_connector_force(struct drm_connector *connector) if (!nv_encoder) { NV_ERROR(drm, "can't find encoder to force %s on!\n", connector->name); - connector->status = connector_status_disconnected; return; } diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 34985771b2a28..6e5adab034713 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -31,6 +31,7 @@ #include #include +#include #include #include #include @@ -836,6 +837,7 @@ static int nouveau_drm_probe(struct pci_dev *pdev, { struct nvkm_device *device; struct nouveau_drm *drm; + const struct drm_format_info *format; int ret; if (vga_switcheroo_client_probe_defer(pdev)) @@ -873,9 +875,11 @@ static int nouveau_drm_probe(struct pci_dev *pdev, goto fail_pci; if (drm->client.device.info.ram_size <= 32 * 1024 * 1024) - drm_fbdev_ttm_setup(drm->dev, 8); + format = drm_format_info(DRM_FORMAT_C8); else - drm_fbdev_ttm_setup(drm->dev, 32); + format = NULL; + + drm_client_setup(drm->dev, format); quirk_broken_nv_runpm(pdev); return 0; @@ -1318,6 +1322,8 @@ driver_stub = { .dumb_create = nouveau_display_dumb_create, .dumb_map_offset = drm_gem_ttm_dumb_map_offset, + DRM_FBDEV_TTM_DRIVER_OPS, + .name = DRIVER_NAME, .desc = DRIVER_DESC, #ifdef GIT_REVISION diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 9ae2cee1c7c58..67e3c99de73ae 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -87,9 +87,6 @@ nouveau_gem_object_del(struct drm_gem_object *gem) return; } - if (gem->import_attach) - drm_prime_gem_destroy(gem, nvbo->bo.sg); - ttm_bo_put(&nvbo->bo); pm_runtime_mark_last_busy(dev); diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index b4da82ddbb6b2..8ea98f06d39af 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -590,6 +590,7 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm, unsigned long timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT); struct mm_struct *mm = svmm->notifier.mm; + struct folio *folio; struct page *page; unsigned long start = args->p.addr; unsigned long notifier_seq; @@ -616,12 +617,16 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm, ret = -EINVAL; goto out; } + folio = page_folio(page); mutex_lock(&svmm->mutex); if (!mmu_interval_read_retry(¬ifier->notifier, notifier_seq)) break; mutex_unlock(&svmm->mutex); + + folio_unlock(folio); + folio_put(folio); } /* Map the page on the GPU. */ @@ -637,8 +642,8 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm, ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL); mutex_unlock(&svmm->mutex); - unlock_page(page); - put_page(page); + folio_unlock(folio); + folio_put(folio); out: mmu_interval_notifier_remove(¬ifier->notifier); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c index a6f410ba60bc9..d393bc540f862 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c @@ -75,7 +75,7 @@ gp10b_pmu_acr = { .bootstrap_multiple_falcons = gp10b_pmu_acr_bootstrap_multiple_falcons, }; -#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) +#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) MODULE_FIRMWARE("nvidia/gp10b/pmu/desc.bin"); MODULE_FIRMWARE("nvidia/gp10b/pmu/image.bin"); MODULE_FIRMWARE("nvidia/gp10b/pmu/sig.bin"); diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c b/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c index 266a087fe14c1..3c24a63b6be8c 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c @@ -607,7 +607,7 @@ static int ili9882t_add(struct ili9882t *ili) ili->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW); if (IS_ERR(ili->enable_gpio)) { - dev_err(dev, "cannot get reset-gpios %ld\n", + dev_err(dev, "cannot get enable-gpios %ld\n", PTR_ERR(ili->enable_gpio)); return PTR_ERR(ili->enable_gpio); } diff --git a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c index 45d09e6fa667f..7d68a8acfe2ea 100644 --- a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c +++ b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c @@ -109,13 +109,13 @@ static int jadard_prepare(struct drm_panel *panel) if (jadard->desc->lp11_to_reset_delay_ms) msleep(jadard->desc->lp11_to_reset_delay_ms); - gpiod_set_value(jadard->reset, 1); + gpiod_set_value(jadard->reset, 0); msleep(5); - gpiod_set_value(jadard->reset, 0); + gpiod_set_value(jadard->reset, 1); msleep(10); - gpiod_set_value(jadard->reset, 1); + gpiod_set_value(jadard->reset, 0); msleep(130); ret = jadard->desc->init(jadard); @@ -1130,7 +1130,7 @@ static int jadard_dsi_probe(struct mipi_dsi_device *dsi) dsi->format = desc->format; dsi->lanes = desc->lanes; - jadard->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); + jadard->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(jadard->reset)) { DRM_DEV_ERROR(&dsi->dev, "failed to get our reset GPIO\n"); return PTR_ERR(jadard->reset); diff --git a/drivers/gpu/drm/panthor/panthor_fw.h b/drivers/gpu/drm/panthor/panthor_fw.h index 22448abde9923..6598d96c6d2aa 100644 --- a/drivers/gpu/drm/panthor/panthor_fw.h +++ b/drivers/gpu/drm/panthor/panthor_fw.h @@ -102,9 +102,9 @@ struct panthor_fw_cs_output_iface { #define CS_STATUS_BLOCKED_REASON_SB_WAIT 1 #define CS_STATUS_BLOCKED_REASON_PROGRESS_WAIT 2 #define CS_STATUS_BLOCKED_REASON_SYNC_WAIT 3 -#define CS_STATUS_BLOCKED_REASON_DEFERRED 5 -#define CS_STATUS_BLOCKED_REASON_RES 6 -#define CS_STATUS_BLOCKED_REASON_FLUSH 7 +#define CS_STATUS_BLOCKED_REASON_DEFERRED 4 +#define CS_STATUS_BLOCKED_REASON_RESOURCE 5 +#define CS_STATUS_BLOCKED_REASON_FLUSH 6 #define CS_STATUS_BLOCKED_REASON_MASK GENMASK(3, 0) u32 status_blocked_reason; u32 status_wait_sync_value_hi; diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 05c13102a8cb8..d22889fbfa9c8 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -359,7 +359,8 @@ int r300_mc_wait_for_idle(struct radeon_device *rdev) return -1; } -static void r300_gpu_init(struct radeon_device *rdev) +/* rs400_gpu_init also calls this! */ +void r300_gpu_init(struct radeon_device *rdev) { uint32_t gb_tile_config, tmp; diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 1e00f6b99f94b..8f5e07834fcc6 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -165,6 +165,7 @@ void r200_set_safe_registers(struct radeon_device *rdev); */ extern int r300_init(struct radeon_device *rdev); extern void r300_fini(struct radeon_device *rdev); +extern void r300_gpu_init(struct radeon_device *rdev); extern int r300_suspend(struct radeon_device *rdev); extern int r300_resume(struct radeon_device *rdev); extern int r300_asic_reset(struct radeon_device *rdev, bool hard); diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c index d1871af967d4a..2355a78e1b69d 100644 --- a/drivers/gpu/drm/radeon/radeon_vce.c +++ b/drivers/gpu/drm/radeon/radeon_vce.c @@ -557,7 +557,7 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) { int session_idx = -1; bool destroyed = false, created = false, allocated = false; - uint32_t tmp, handle = 0; + uint32_t tmp = 0, handle = 0; uint32_t *size = &tmp; int i, r = 0; diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index d6c18fd740ec6..13cd0a688a65c 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c @@ -256,8 +256,22 @@ int rs400_mc_wait_for_idle(struct radeon_device *rdev) static void rs400_gpu_init(struct radeon_device *rdev) { - /* FIXME: is this correct ? */ - r420_pipes_init(rdev); + /* Earlier code was calling r420_pipes_init and then + * rs400_mc_wait_for_idle(rdev). The problem is that + * at least on my Mobility Radeon Xpress 200M RC410 card + * that ends up in this code path ends up num_gb_pipes == 3 + * while the card seems to have only one pipe. With the + * r420 pipe initialization method. + * + * Problems shown up as HyperZ glitches, see: + * https://bugs.freedesktop.org/show_bug.cgi?id=110897 + * + * Delegating initialization to r300 code seems to work + * and results in proper pipe numbers. The rs400 cards + * are said to be not r400, but r300 kind of cards. + */ + r300_gpu_init(rdev); + if (rs400_mc_wait_for_idle(rdev)) { pr_warn("rs400: Failed to wait MC idle while programming pipes. Bad things might happen. %08x\n", RREG32(RADEON_MC_STATUS)); diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h index c75302ca3427c..f56e77e7f6d02 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h +++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h @@ -21,7 +21,7 @@ * */ -#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#if !defined(_GPU_SCHED_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) #define _GPU_SCHED_TRACE_H_ #include @@ -106,7 +106,7 @@ TRACE_EVENT(drm_sched_job_wait_dep, __entry->seqno) ); -#endif +#endif /* _GPU_SCHED_TRACE_H_ */ /* This part must be outside protection */ #undef TRACE_INCLUDE_PATH diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index a75eede8bf8da..002057be0d84a 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -259,9 +259,16 @@ static void drm_sched_entity_kill(struct drm_sched_entity *entity) struct drm_sched_fence *s_fence = job->s_fence; dma_fence_get(&s_fence->finished); - if (!prev || dma_fence_add_callback(prev, &job->finish_cb, - drm_sched_entity_kill_jobs_cb)) + if (!prev || + dma_fence_add_callback(prev, &job->finish_cb, + drm_sched_entity_kill_jobs_cb)) { + /* + * Adding callback above failed. + * dma_fence_put() checks for NULL. + */ + dma_fence_put(prev); drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb); + } prev = &s_fence->finished; } diff --git a/drivers/gpu/drm/solomon/ssd130x-spi.c b/drivers/gpu/drm/solomon/ssd130x-spi.c index 84bfde31d1724..fd1b858dcb788 100644 --- a/drivers/gpu/drm/solomon/ssd130x-spi.c +++ b/drivers/gpu/drm/solomon/ssd130x-spi.c @@ -151,7 +151,6 @@ static const struct of_device_id ssd130x_of_match[] = { }; MODULE_DEVICE_TABLE(of, ssd130x_of_match); -#if IS_MODULE(CONFIG_DRM_SSD130X_SPI) /* * The SPI core always reports a MODALIAS uevent of the form "spi:", even * if the device was registered via OF. This means that the module will not be @@ -160,7 +159,7 @@ MODULE_DEVICE_TABLE(of, ssd130x_of_match); * To workaround this issue, add a SPI device ID table. Even when this should * not be needed for this driver to match the registered SPI devices. */ -static const struct spi_device_id ssd130x_spi_table[] = { +static const struct spi_device_id ssd130x_spi_id[] = { /* ssd130x family */ { "sh1106", SH1106_ID }, { "ssd1305", SSD1305_ID }, @@ -175,14 +174,14 @@ static const struct spi_device_id ssd130x_spi_table[] = { { "ssd1331", SSD1331_ID }, { /* sentinel */ } }; -MODULE_DEVICE_TABLE(spi, ssd130x_spi_table); -#endif +MODULE_DEVICE_TABLE(spi, ssd130x_spi_id); static struct spi_driver ssd130x_spi_driver = { .driver = { .name = DRIVER_NAME, .of_match_table = ssd130x_of_match, }, + .id_table = ssd130x_spi_id, .probe = ssd130x_spi_probe, .remove = ssd130x_spi_remove, .shutdown = ssd130x_spi_shutdown, diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c index 6f51bcf774e27..06f5057690bd8 100644 --- a/drivers/gpu/drm/solomon/ssd130x.c +++ b/drivers/gpu/drm/solomon/ssd130x.c @@ -880,7 +880,7 @@ static int ssd132x_update_rect(struct ssd130x_device *ssd130x, u8 n1 = buf[i * width + j]; u8 n2 = buf[i * width + j + 1]; - data_array[array_idx++] = (n2 << 4) | n1; + data_array[array_idx++] = (n2 & 0xf0) | (n1 >> 4); } } @@ -1037,7 +1037,7 @@ static int ssd132x_fb_blit_rect(struct drm_framebuffer *fb, struct drm_format_conv_state *fmtcnv_state) { struct ssd130x_device *ssd130x = drm_to_ssd130x(fb->dev); - unsigned int dst_pitch = drm_rect_width(rect); + unsigned int dst_pitch; struct iosys_map dst; int ret = 0; @@ -1046,6 +1046,8 @@ static int ssd132x_fb_blit_rect(struct drm_framebuffer *fb, rect->x2 = min_t(unsigned int, round_up(rect->x2, SSD132X_SEGMENT_WIDTH), ssd130x->width); + dst_pitch = drm_rect_width(rect); + ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE); if (ret) return ret; diff --git a/drivers/gpu/drm/sti/Makefile b/drivers/gpu/drm/sti/Makefile index f203ac5514ae0..f778a4eee7c9c 100644 --- a/drivers/gpu/drm/sti/Makefile +++ b/drivers/gpu/drm/sti/Makefile @@ -7,8 +7,6 @@ sti-drm-y := \ sti_compositor.o \ sti_crtc.o \ sti_plane.o \ - sti_crtc.o \ - sti_plane.o \ sti_hdmi.o \ sti_hdmi_tx3g4c28phy.o \ sti_dvo.o \ diff --git a/drivers/gpu/drm/tests/drm_client_modeset_test.c b/drivers/gpu/drm/tests/drm_client_modeset_test.c index 7516f6cb36e4e..3e9518d7b8b7e 100644 --- a/drivers/gpu/drm/tests/drm_client_modeset_test.c +++ b/drivers/gpu/drm/tests/drm_client_modeset_test.c @@ -95,6 +95,9 @@ static void drm_test_pick_cmdline_res_1920_1080_60(struct kunit *test) expected_mode = drm_mode_find_dmt(priv->drm, 1920, 1080, 60, false); KUNIT_ASSERT_NOT_NULL(test, expected_mode); + ret = drm_kunit_add_mode_destroy_action(test, expected_mode); + KUNIT_ASSERT_EQ(test, ret, 0); + KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline, connector, diff --git a/drivers/gpu/drm/tests/drm_cmdline_parser_test.c b/drivers/gpu/drm/tests/drm_cmdline_parser_test.c index 59c8408c453c2..1cfcb597b088b 100644 --- a/drivers/gpu/drm/tests/drm_cmdline_parser_test.c +++ b/drivers/gpu/drm/tests/drm_cmdline_parser_test.c @@ -7,6 +7,7 @@ #include #include +#include #include static const struct drm_connector no_connector = {}; @@ -955,8 +956,15 @@ struct drm_cmdline_tv_option_test { static void drm_test_cmdline_tv_options(struct kunit *test) { const struct drm_cmdline_tv_option_test *params = test->param_value; - const struct drm_display_mode *expected_mode = params->mode_fn(NULL); + struct drm_display_mode *expected_mode; struct drm_cmdline_mode mode = { }; + int ret; + + expected_mode = params->mode_fn(NULL); + KUNIT_ASSERT_NOT_NULL(test, expected_mode); + + ret = drm_kunit_add_mode_destroy_action(test, expected_mode); + KUNIT_ASSERT_EQ(test, ret, 0); KUNIT_EXPECT_TRUE(test, drm_mode_parse_command_line_for_connector(params->cmdline, &no_connector, &mode)); diff --git a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c index cbd9584af3299..383fbe128348e 100644 --- a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c +++ b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c @@ -258,15 +258,16 @@ static void drm_test_check_broadcast_rgb_crtc_mode_changed(struct kunit *test) 8); KUNIT_ASSERT_NOT_NULL(test, priv); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); - + drm = &priv->drm; + crtc = priv->crtc; conn = &priv->connector; + preferred = find_preferred_mode(conn); KUNIT_ASSERT_NOT_NULL(test, preferred); - drm = &priv->drm; - crtc = priv->crtc; + ctx = drm_kunit_helper_acquire_ctx_alloc(test); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); KUNIT_ASSERT_EQ(test, ret, 0); @@ -321,15 +322,16 @@ static void drm_test_check_broadcast_rgb_crtc_mode_not_changed(struct kunit *tes 8); KUNIT_ASSERT_NOT_NULL(test, priv); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); - + drm = &priv->drm; + crtc = priv->crtc; conn = &priv->connector; + preferred = find_preferred_mode(conn); KUNIT_ASSERT_NOT_NULL(test, preferred); - drm = &priv->drm; - crtc = priv->crtc; + ctx = drm_kunit_helper_acquire_ctx_alloc(test); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); KUNIT_ASSERT_EQ(test, ret, 0); @@ -384,18 +386,18 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode(struct kunit *test) 8); KUNIT_ASSERT_NOT_NULL(test, priv); + drm = &priv->drm; + crtc = priv->crtc; conn = &priv->connector; KUNIT_ASSERT_TRUE(test, conn->display_info.is_hdmi); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); - preferred = find_preferred_mode(conn); KUNIT_ASSERT_NOT_NULL(test, preferred); KUNIT_ASSERT_NE(test, drm_match_cea_mode(preferred), 1); - drm = &priv->drm; - crtc = priv->crtc; + ctx = drm_kunit_helper_acquire_ctx_alloc(test); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); KUNIT_ASSERT_EQ(test, ret, 0); @@ -450,7 +452,6 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode_vic_1(struct kunit *test) mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1); KUNIT_ASSERT_NOT_NULL(test, mode); - drm = &priv->drm; crtc = priv->crtc; ret = light_up_connector(test, drm, crtc, conn, mode, ctx); KUNIT_ASSERT_EQ(test, ret, 0); @@ -496,18 +497,18 @@ static void drm_test_check_broadcast_rgb_full_cea_mode(struct kunit *test) 8); KUNIT_ASSERT_NOT_NULL(test, priv); + drm = &priv->drm; + crtc = priv->crtc; conn = &priv->connector; KUNIT_ASSERT_TRUE(test, conn->display_info.is_hdmi); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); - preferred = find_preferred_mode(conn); KUNIT_ASSERT_NOT_NULL(test, preferred); KUNIT_ASSERT_NE(test, drm_match_cea_mode(preferred), 1); - drm = &priv->drm; - crtc = priv->crtc; + ctx = drm_kunit_helper_acquire_ctx_alloc(test); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); KUNIT_ASSERT_EQ(test, ret, 0); @@ -564,7 +565,6 @@ static void drm_test_check_broadcast_rgb_full_cea_mode_vic_1(struct kunit *test) mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1); KUNIT_ASSERT_NOT_NULL(test, mode); - drm = &priv->drm; crtc = priv->crtc; ret = light_up_connector(test, drm, crtc, conn, mode, ctx); KUNIT_ASSERT_EQ(test, ret, 0); @@ -612,18 +612,18 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode(struct kunit *test) 8); KUNIT_ASSERT_NOT_NULL(test, priv); + drm = &priv->drm; + crtc = priv->crtc; conn = &priv->connector; KUNIT_ASSERT_TRUE(test, conn->display_info.is_hdmi); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); - preferred = find_preferred_mode(conn); KUNIT_ASSERT_NOT_NULL(test, preferred); KUNIT_ASSERT_NE(test, drm_match_cea_mode(preferred), 1); - drm = &priv->drm; - crtc = priv->crtc; + ctx = drm_kunit_helper_acquire_ctx_alloc(test); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); KUNIT_ASSERT_EQ(test, ret, 0); @@ -680,7 +680,6 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *te mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1); KUNIT_ASSERT_NOT_NULL(test, mode); - drm = &priv->drm; crtc = priv->crtc; ret = light_up_connector(test, drm, crtc, conn, mode, ctx); KUNIT_ASSERT_EQ(test, ret, 0); @@ -730,20 +729,20 @@ static void drm_test_check_output_bpc_crtc_mode_changed(struct kunit *test) 10); KUNIT_ASSERT_NOT_NULL(test, priv); + drm = &priv->drm; + crtc = priv->crtc; conn = &priv->connector; ret = set_connector_edid(test, conn, test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz, ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz)); KUNIT_ASSERT_GT(test, ret, 0); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); - preferred = find_preferred_mode(conn); KUNIT_ASSERT_NOT_NULL(test, preferred); - drm = &priv->drm; - crtc = priv->crtc; + ctx = drm_kunit_helper_acquire_ctx_alloc(test); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); KUNIT_ASSERT_EQ(test, ret, 0); @@ -804,20 +803,20 @@ static void drm_test_check_output_bpc_crtc_mode_not_changed(struct kunit *test) 10); KUNIT_ASSERT_NOT_NULL(test, priv); + drm = &priv->drm; + crtc = priv->crtc; conn = &priv->connector; ret = set_connector_edid(test, conn, test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz, ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz)); KUNIT_ASSERT_GT(test, ret, 0); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); - preferred = find_preferred_mode(conn); KUNIT_ASSERT_NOT_NULL(test, preferred); - drm = &priv->drm; - crtc = priv->crtc; + ctx = drm_kunit_helper_acquire_ctx_alloc(test); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); KUNIT_ASSERT_EQ(test, ret, 0); @@ -875,6 +874,8 @@ static void drm_test_check_output_bpc_dvi(struct kunit *test) 12); KUNIT_ASSERT_NOT_NULL(test, priv); + drm = &priv->drm; + crtc = priv->crtc; conn = &priv->connector; ret = set_connector_edid(test, conn, test_edid_dvi_1080p, @@ -884,14 +885,12 @@ static void drm_test_check_output_bpc_dvi(struct kunit *test) info = &conn->display_info; KUNIT_ASSERT_FALSE(test, info->is_hdmi); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); - preferred = find_preferred_mode(conn); KUNIT_ASSERT_NOT_NULL(test, preferred); - drm = &priv->drm; - crtc = priv->crtc; + ctx = drm_kunit_helper_acquire_ctx_alloc(test); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); KUNIT_ASSERT_EQ(test, ret, 0); @@ -922,21 +921,21 @@ static void drm_test_check_tmds_char_rate_rgb_8bpc(struct kunit *test) 8); KUNIT_ASSERT_NOT_NULL(test, priv); + drm = &priv->drm; + crtc = priv->crtc; conn = &priv->connector; ret = set_connector_edid(test, conn, test_edid_hdmi_1080p_rgb_max_200mhz, ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_200mhz)); KUNIT_ASSERT_GT(test, ret, 0); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); - preferred = find_preferred_mode(conn); KUNIT_ASSERT_NOT_NULL(test, preferred); KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK); - drm = &priv->drm; - crtc = priv->crtc; + ctx = drm_kunit_helper_acquire_ctx_alloc(test); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); KUNIT_ASSERT_EQ(test, ret, 0); @@ -969,21 +968,21 @@ static void drm_test_check_tmds_char_rate_rgb_10bpc(struct kunit *test) 10); KUNIT_ASSERT_NOT_NULL(test, priv); + drm = &priv->drm; + crtc = priv->crtc; conn = &priv->connector; ret = set_connector_edid(test, conn, test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz, ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz)); KUNIT_ASSERT_GT(test, ret, 0); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); - preferred = find_preferred_mode(conn); KUNIT_ASSERT_NOT_NULL(test, preferred); KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK); - drm = &priv->drm; - crtc = priv->crtc; + ctx = drm_kunit_helper_acquire_ctx_alloc(test); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); KUNIT_ASSERT_EQ(test, ret, 0); @@ -1016,21 +1015,21 @@ static void drm_test_check_tmds_char_rate_rgb_12bpc(struct kunit *test) 12); KUNIT_ASSERT_NOT_NULL(test, priv); + drm = &priv->drm; + crtc = priv->crtc; conn = &priv->connector; ret = set_connector_edid(test, conn, test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz, ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz)); KUNIT_ASSERT_GT(test, ret, 0); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); - preferred = find_preferred_mode(conn); KUNIT_ASSERT_NOT_NULL(test, preferred); KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK); - drm = &priv->drm; - crtc = priv->crtc; + ctx = drm_kunit_helper_acquire_ctx_alloc(test); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); KUNIT_ASSERT_EQ(test, ret, 0); @@ -1067,15 +1066,16 @@ static void drm_test_check_hdmi_funcs_reject_rate(struct kunit *test) 8); KUNIT_ASSERT_NOT_NULL(test, priv); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); - + drm = &priv->drm; + crtc = priv->crtc; conn = &priv->connector; + preferred = find_preferred_mode(conn); KUNIT_ASSERT_NOT_NULL(test, preferred); - drm = &priv->drm; - crtc = priv->crtc; + ctx = drm_kunit_helper_acquire_ctx_alloc(test); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); KUNIT_ASSERT_EQ(test, ret, 0); @@ -1123,6 +1123,8 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test) 12); KUNIT_ASSERT_NOT_NULL(test, priv); + drm = &priv->drm; + crtc = priv->crtc; conn = &priv->connector; ret = set_connector_edid(test, conn, test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz, @@ -1133,9 +1135,6 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test) KUNIT_ASSERT_TRUE(test, info->is_hdmi); KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); - preferred = find_preferred_mode(conn); KUNIT_ASSERT_NOT_NULL(test, preferred); KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK); @@ -1146,8 +1145,9 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test) rate = drm_hdmi_compute_mode_clock(preferred, 10, HDMI_COLORSPACE_RGB); KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000); - drm = &priv->drm; - crtc = priv->crtc; + ctx = drm_kunit_helper_acquire_ctx_alloc(test); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); KUNIT_EXPECT_EQ(test, ret, 0); @@ -1192,6 +1192,8 @@ static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test) 12); KUNIT_ASSERT_NOT_NULL(test, priv); + drm = &priv->drm; + crtc = priv->crtc; conn = &priv->connector; ret = set_connector_edid(test, conn, test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz, @@ -1202,9 +1204,6 @@ static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test) KUNIT_ASSERT_TRUE(test, info->is_hdmi); KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); - preferred = find_preferred_mode(conn); KUNIT_ASSERT_NOT_NULL(test, preferred); KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK); @@ -1218,8 +1217,9 @@ static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test) rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_YUV422); KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000); - drm = &priv->drm; - crtc = priv->crtc; + ctx = drm_kunit_helper_acquire_ctx_alloc(test); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); KUNIT_EXPECT_EQ(test, ret, 0); @@ -1266,9 +1266,6 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test) KUNIT_ASSERT_TRUE(test, info->is_hdmi); KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); - mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1); KUNIT_ASSERT_NOT_NULL(test, mode); @@ -1282,7 +1279,9 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test) rate = mode->clock * 1500; KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000); - drm = &priv->drm; + ctx = drm_kunit_helper_acquire_ctx_alloc(test); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + crtc = priv->crtc; ret = light_up_connector(test, drm, crtc, conn, mode, ctx); KUNIT_EXPECT_EQ(test, ret, 0); @@ -1316,6 +1315,8 @@ static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test) 12); KUNIT_ASSERT_NOT_NULL(test, priv); + drm = &priv->drm; + crtc = priv->crtc; conn = &priv->connector; ret = set_connector_edid(test, conn, test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz, @@ -1326,9 +1327,6 @@ static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test) KUNIT_ASSERT_TRUE(test, info->is_hdmi); KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); - preferred = find_preferred_mode(conn); KUNIT_ASSERT_NOT_NULL(test, preferred); @@ -1347,8 +1345,9 @@ static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test) rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_YUV422); KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000); - drm = &priv->drm; - crtc = priv->crtc; + ctx = drm_kunit_helper_acquire_ctx_alloc(test); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); KUNIT_EXPECT_EQ(test, ret, 0); @@ -1383,6 +1382,8 @@ static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test 12); KUNIT_ASSERT_NOT_NULL(test, priv); + drm = &priv->drm; + crtc = priv->crtc; conn = &priv->connector; ret = set_connector_edid(test, conn, test_edid_hdmi_1080p_rgb_max_200mhz, @@ -1393,9 +1394,6 @@ static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test KUNIT_ASSERT_TRUE(test, info->is_hdmi); KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); - preferred = find_preferred_mode(conn); KUNIT_ASSERT_NOT_NULL(test, preferred); @@ -1414,8 +1412,9 @@ static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_YUV422); KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000); - drm = &priv->drm; - crtc = priv->crtc; + ctx = drm_kunit_helper_acquire_ctx_alloc(test); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); KUNIT_EXPECT_EQ(test, ret, 0); @@ -1449,6 +1448,8 @@ static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test 8); KUNIT_ASSERT_NOT_NULL(test, priv); + drm = &priv->drm; + crtc = priv->crtc; conn = &priv->connector; ret = set_connector_edid(test, conn, test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz, @@ -1459,9 +1460,6 @@ static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test KUNIT_ASSERT_TRUE(test, info->is_hdmi); KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); - preferred = find_preferred_mode(conn); KUNIT_ASSERT_NOT_NULL(test, preferred); @@ -1472,8 +1470,9 @@ static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_RGB); KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000); - drm = &priv->drm; - crtc = priv->crtc; + ctx = drm_kunit_helper_acquire_ctx_alloc(test); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); KUNIT_EXPECT_EQ(test, ret, 0); @@ -1509,6 +1508,8 @@ static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *tes 12); KUNIT_ASSERT_NOT_NULL(test, priv); + drm = &priv->drm; + crtc = priv->crtc; conn = &priv->connector; ret = set_connector_edid(test, conn, test_edid_hdmi_1080p_rgb_max_340mhz, @@ -1519,9 +1520,6 @@ static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *tes KUNIT_ASSERT_TRUE(test, info->is_hdmi); KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); - preferred = find_preferred_mode(conn); KUNIT_ASSERT_NOT_NULL(test, preferred); @@ -1532,8 +1530,9 @@ static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *tes rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_RGB); KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000); - drm = &priv->drm; - crtc = priv->crtc; + ctx = drm_kunit_helper_acquire_ctx_alloc(test); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + ret = light_up_connector(test, drm, crtc, conn, preferred, ctx); KUNIT_EXPECT_EQ(test, ret, 0); diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c index 3c0b7824c0be3..922c4b6ed1dc9 100644 --- a/drivers/gpu/drm/tests/drm_kunit_helpers.c +++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c @@ -319,6 +319,28 @@ static void kunit_action_drm_mode_destroy(void *ptr) drm_mode_destroy(NULL, mode); } +/** + * drm_kunit_add_mode_destroy_action() - Add a drm_destroy_mode kunit action + * @test: The test context object + * @mode: The drm_display_mode to destroy eventually + * + * Registers a kunit action that will destroy the drm_display_mode at + * the end of the test. + * + * If an error occurs, the drm_display_mode will be destroyed. + * + * Returns: + * 0 on success, an error code otherwise. + */ +int drm_kunit_add_mode_destroy_action(struct kunit *test, + struct drm_display_mode *mode) +{ + return kunit_add_action_or_reset(test, + kunit_action_drm_mode_destroy, + mode); +} +EXPORT_SYMBOL_GPL(drm_kunit_add_mode_destroy_action); + /** * drm_kunit_display_mode_from_cea_vic() - return a mode for CEA VIC for a KUnit test * @test: The test context object diff --git a/drivers/gpu/drm/tests/drm_modes_test.c b/drivers/gpu/drm/tests/drm_modes_test.c index 6ed51f99e133c..7ba646d87856f 100644 --- a/drivers/gpu/drm/tests/drm_modes_test.c +++ b/drivers/gpu/drm/tests/drm_modes_test.c @@ -40,6 +40,7 @@ static void drm_test_modes_analog_tv_ntsc_480i(struct kunit *test) { struct drm_test_modes_priv *priv = test->priv; struct drm_display_mode *mode; + int ret; mode = drm_analog_tv_mode(priv->drm, DRM_MODE_TV_MODE_NTSC, @@ -47,6 +48,9 @@ static void drm_test_modes_analog_tv_ntsc_480i(struct kunit *test) true); KUNIT_ASSERT_NOT_NULL(test, mode); + ret = drm_kunit_add_mode_destroy_action(test, mode); + KUNIT_ASSERT_EQ(test, ret, 0); + KUNIT_EXPECT_EQ(test, drm_mode_vrefresh(mode), 60); KUNIT_EXPECT_EQ(test, mode->hdisplay, 720); @@ -70,6 +74,7 @@ static void drm_test_modes_analog_tv_ntsc_480i_inlined(struct kunit *test) { struct drm_test_modes_priv *priv = test->priv; struct drm_display_mode *expected, *mode; + int ret; expected = drm_analog_tv_mode(priv->drm, DRM_MODE_TV_MODE_NTSC, @@ -77,9 +82,15 @@ static void drm_test_modes_analog_tv_ntsc_480i_inlined(struct kunit *test) true); KUNIT_ASSERT_NOT_NULL(test, expected); + ret = drm_kunit_add_mode_destroy_action(test, expected); + KUNIT_ASSERT_EQ(test, ret, 0); + mode = drm_mode_analog_ntsc_480i(priv->drm); KUNIT_ASSERT_NOT_NULL(test, mode); + ret = drm_kunit_add_mode_destroy_action(test, mode); + KUNIT_ASSERT_EQ(test, ret, 0); + KUNIT_EXPECT_TRUE(test, drm_mode_equal(expected, mode)); } @@ -87,6 +98,7 @@ static void drm_test_modes_analog_tv_pal_576i(struct kunit *test) { struct drm_test_modes_priv *priv = test->priv; struct drm_display_mode *mode; + int ret; mode = drm_analog_tv_mode(priv->drm, DRM_MODE_TV_MODE_PAL, @@ -94,6 +106,9 @@ static void drm_test_modes_analog_tv_pal_576i(struct kunit *test) true); KUNIT_ASSERT_NOT_NULL(test, mode); + ret = drm_kunit_add_mode_destroy_action(test, mode); + KUNIT_ASSERT_EQ(test, ret, 0); + KUNIT_EXPECT_EQ(test, drm_mode_vrefresh(mode), 50); KUNIT_EXPECT_EQ(test, mode->hdisplay, 720); @@ -117,6 +132,7 @@ static void drm_test_modes_analog_tv_pal_576i_inlined(struct kunit *test) { struct drm_test_modes_priv *priv = test->priv; struct drm_display_mode *expected, *mode; + int ret; expected = drm_analog_tv_mode(priv->drm, DRM_MODE_TV_MODE_PAL, @@ -124,9 +140,15 @@ static void drm_test_modes_analog_tv_pal_576i_inlined(struct kunit *test) true); KUNIT_ASSERT_NOT_NULL(test, expected); + ret = drm_kunit_add_mode_destroy_action(test, expected); + KUNIT_ASSERT_EQ(test, ret, 0); + mode = drm_mode_analog_pal_576i(priv->drm); KUNIT_ASSERT_NOT_NULL(test, mode); + ret = drm_kunit_add_mode_destroy_action(test, mode); + KUNIT_ASSERT_EQ(test, ret, 0); + KUNIT_EXPECT_TRUE(test, drm_mode_equal(expected, mode)); } diff --git a/drivers/gpu/drm/tests/drm_probe_helper_test.c b/drivers/gpu/drm/tests/drm_probe_helper_test.c index bc09ff38aca18..db0e4f5df275e 100644 --- a/drivers/gpu/drm/tests/drm_probe_helper_test.c +++ b/drivers/gpu/drm/tests/drm_probe_helper_test.c @@ -98,7 +98,7 @@ drm_test_connector_helper_tv_get_modes_check(struct kunit *test) struct drm_connector *connector = &priv->connector; struct drm_cmdline_mode *cmdline = &connector->cmdline_mode; struct drm_display_mode *mode; - const struct drm_display_mode *expected; + struct drm_display_mode *expected; size_t len; int ret; @@ -134,6 +134,9 @@ drm_test_connector_helper_tv_get_modes_check(struct kunit *test) KUNIT_EXPECT_TRUE(test, drm_mode_equal(mode, expected)); KUNIT_EXPECT_TRUE(test, mode->type & DRM_MODE_TYPE_PREFERRED); + + ret = drm_kunit_add_mode_destroy_action(test, expected); + KUNIT_ASSERT_EQ(test, ret, 0); } if (params->num_expected_modes >= 2) { @@ -145,6 +148,9 @@ drm_test_connector_helper_tv_get_modes_check(struct kunit *test) KUNIT_EXPECT_TRUE(test, drm_mode_equal(mode, expected)); KUNIT_EXPECT_FALSE(test, mode->type & DRM_MODE_TYPE_PREFERRED); + + ret = drm_kunit_add_mode_destroy_action(test, expected); + KUNIT_ASSERT_EQ(test, ret, 0); } mutex_unlock(&priv->drm->mode_config.mutex); diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c index 1f78aa3d26bbd..768dfea15aec0 100644 --- a/drivers/gpu/drm/tiny/repaper.c +++ b/drivers/gpu/drm/tiny/repaper.c @@ -455,7 +455,7 @@ static void repaper_frame_fixed_repeat(struct repaper_epd *epd, u8 fixed_value, enum repaper_stage stage) { u64 start = local_clock(); - u64 end = start + (epd->factored_stage_time * 1000 * 1000); + u64 end = start + ((u64)epd->factored_stage_time * 1000 * 1000); do { repaper_frame_fixed(epd, fixed_value, stage); @@ -466,7 +466,7 @@ static void repaper_frame_data_repeat(struct repaper_epd *epd, const u8 *image, const u8 *mask, enum repaper_stage stage) { u64 start = local_clock(); - u64 end = start + (epd->factored_stage_time * 1000 * 1000); + u64 end = start + ((u64)epd->factored_stage_time * 1000 * 1000); do { repaper_frame_data(epd, image, mask, stage); diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index 4f935f1d50a94..4a6aa36619fe3 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -319,11 +319,15 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job) struct drm_device *dev = &v3d->drm; struct dma_fence *fence; + if (unlikely(job->base.base.s_fence->finished.error)) + return NULL; + + v3d->tfu_job = job; + fence = v3d_fence_create(v3d, V3D_TFU); if (IS_ERR(fence)) return NULL; - v3d->tfu_job = job; if (job->base.irq_fence) dma_fence_put(job->base.irq_fence); job->base.irq_fence = dma_fence_get(fence); @@ -361,6 +365,9 @@ v3d_csd_job_run(struct drm_sched_job *sched_job) struct dma_fence *fence; int i, csd_cfg0_reg; + if (unlikely(job->base.base.s_fence->finished.error)) + return NULL; + v3d->csd_job = job; v3d_invalidate_caches(v3d); @@ -403,7 +410,8 @@ v3d_rewrite_csd_job_wg_counts_from_indirect(struct v3d_cpu_job *job) struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]); struct v3d_bo *indirect = to_v3d_bo(indirect_csd->indirect); struct drm_v3d_submit_csd *args = &indirect_csd->job->args; - u32 *wg_counts; + struct v3d_dev *v3d = job->base.v3d; + u32 num_batches, *wg_counts; v3d_get_bo_vaddr(bo); v3d_get_bo_vaddr(indirect); @@ -416,8 +424,17 @@ v3d_rewrite_csd_job_wg_counts_from_indirect(struct v3d_cpu_job *job) args->cfg[0] = wg_counts[0] << V3D_CSD_CFG012_WG_COUNT_SHIFT; args->cfg[1] = wg_counts[1] << V3D_CSD_CFG012_WG_COUNT_SHIFT; args->cfg[2] = wg_counts[2] << V3D_CSD_CFG012_WG_COUNT_SHIFT; - args->cfg[4] = DIV_ROUND_UP(indirect_csd->wg_size, 16) * - (wg_counts[0] * wg_counts[1] * wg_counts[2]) - 1; + + num_batches = DIV_ROUND_UP(indirect_csd->wg_size, 16) * + (wg_counts[0] * wg_counts[1] * wg_counts[2]); + + /* V3D 7.1.6 and later don't subtract 1 from the number of batches */ + if (v3d->ver < 71 || (v3d->ver == 71 && v3d->rev < 6)) + args->cfg[4] = num_batches - 1; + else + args->cfg[4] = num_batches; + + WARN_ON(args->cfg[4] == ~0); for (int i = 0; i < 3; i++) { /* 0xffffffff indicates that the uniform rewrite is not needed */ diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c index e7441b227b3ce..3d6785d081f2c 100644 --- a/drivers/gpu/drm/vkms/vkms_composer.c +++ b/drivers/gpu/drm/vkms/vkms_composer.c @@ -98,7 +98,7 @@ static u16 lerp_u16(u16 a, u16 b, s64 t) s64 delta = drm_fixp_mul(b_fp - a_fp, t); - return drm_fixp2int(a_fp + delta); + return drm_fixp2int_round(a_fp + delta); } static s64 get_lut_index(const struct vkms_color_lut *lut, u16 channel_value) diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c index 0c1a713b7b7b3..be642ee739c4f 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.c +++ b/drivers/gpu/drm/vkms/vkms_drv.c @@ -245,17 +245,19 @@ static int __init vkms_init(void) if (!config) return -ENOMEM; - default_config = config; - config->cursor = enable_cursor; config->writeback = enable_writeback; config->overlay = enable_overlay; ret = vkms_create(config); - if (ret) + if (ret) { kfree(config); + return ret; + } - return ret; + default_config = config; + + return 0; } static void vkms_destroy(struct vkms_config *config) @@ -279,9 +281,10 @@ static void vkms_destroy(struct vkms_config *config) static void __exit vkms_exit(void) { - if (default_config->dev) - vkms_destroy(default_config); + if (!default_config) + return; + vkms_destroy(default_config); kfree(default_config); } diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c index a50ab9eae40ae..f99d38cc5d8e9 100644 --- a/drivers/gpu/drm/xe/display/xe_plane_initial.c +++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c @@ -194,8 +194,6 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc, to_intel_plane(crtc->base.primary); struct intel_plane_state *plane_state = to_intel_plane_state(plane->base.state); - struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); struct drm_framebuffer *fb; struct i915_vma *vma; @@ -241,14 +239,6 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc, atomic_or(plane->frontbuffer_bit, &to_intel_frontbuffer(fb)->bits); plane_config->vma = vma; - - /* - * Flip to the newly created mapping ASAP, so we can re-use the - * first part of GGTT for WOPCM, prevent flickering, and prevent - * the lookup of sysmem scratch pages. - */ - plane->check_plane(crtc_state, plane_state); - plane->async_flip(plane, crtc_state, plane_state, true); return; nofb: diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h index 7c78496e6213c..192e571348f6b 100644 --- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h @@ -53,7 +53,6 @@ #define RING_CTL(base) XE_REG((base) + 0x3c) #define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */ -#define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */ #define RING_START_UDW(base) XE_REG((base) + 0x48) diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h index 6e4be52306dfc..d22269a230aa1 100644 --- a/drivers/gpu/drm/xe/xe_bo.h +++ b/drivers/gpu/drm/xe/xe_bo.h @@ -314,7 +314,6 @@ static inline unsigned int xe_sg_segment_size(struct device *dev) #define i915_gem_object_flush_if_display(obj) ((void)(obj)) -#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) /** * xe_bo_is_mem_type - Whether the bo currently resides in the given * TTM memory type @@ -329,4 +328,3 @@ static inline bool xe_bo_is_mem_type(struct xe_bo *bo, u32 mem_type) return bo->ttm.resource->mem_type == mem_type; } #endif -#endif diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c index 68f309f5e9815..78204578443f4 100644 --- a/drivers/gpu/drm/xe/xe_dma_buf.c +++ b/drivers/gpu/drm/xe/xe_dma_buf.c @@ -58,7 +58,7 @@ static int xe_dma_buf_pin(struct dma_buf_attachment *attach) * 1) Avoid pinning in a placement not accessible to some importers. * 2) Pinning in VRAM requires PIN accounting which is a to-do. */ - if (xe_bo_is_pinned(bo) && bo->ttm.resource->placement != XE_PL_TT) { + if (xe_bo_is_pinned(bo) && !xe_bo_is_mem_type(bo, XE_PL_TT)) { drm_dbg(&xe->drm, "Can't migrate pinned bo for dma-buf pin.\n"); return -EINVAL; } @@ -145,10 +145,7 @@ static void xe_dma_buf_unmap(struct dma_buf_attachment *attach, struct sg_table *sgt, enum dma_data_direction dir) { - struct dma_buf *dma_buf = attach->dmabuf; - struct xe_bo *bo = gem_to_xe_bo(dma_buf->priv); - - if (!xe_bo_is_vram(bo)) { + if (sg_page(sgt->sgl)) { dma_unmap_sgtable(attach->dev, sgt, dir, 0); sg_free_table(sgt); kfree(sgt); diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c index b940688c36135..17ba15132a984 100644 --- a/drivers/gpu/drm/xe/xe_gt.c +++ b/drivers/gpu/drm/xe/xe_gt.c @@ -32,6 +32,7 @@ #include "xe_gt_pagefault.h" #include "xe_gt_printk.h" #include "xe_gt_sriov_pf.h" +#include "xe_gt_sriov_vf.h" #include "xe_gt_sysfs.h" #include "xe_gt_tlb_invalidation.h" #include "xe_gt_topology.h" @@ -379,9 +380,7 @@ int xe_gt_init_early(struct xe_gt *gt) if (err) return err; - xe_wa_process_gt(gt); xe_wa_process_oob(gt); - xe_tuning_process_gt(gt); xe_force_wake_init_gt(gt, gt_to_fw(gt)); spin_lock_init(>->global_invl_lock); @@ -469,6 +468,8 @@ static int all_fw_domain_init(struct xe_gt *gt) goto err_hw_fence_irq; xe_gt_mcr_set_implicit_defaults(gt); + xe_wa_process_gt(gt); + xe_tuning_process_gt(gt); xe_reg_sr_apply_mmio(>->reg_sr, gt); err = xe_gt_clock_init(gt); @@ -647,6 +648,9 @@ static int do_gt_reset(struct xe_gt *gt) { int err; + if (IS_SRIOV_VF(gt_to_xe(gt))) + return xe_gt_sriov_vf_reset(gt); + xe_gsc_wa_14015076503(gt, true); xe_mmio_write32(gt, GDRST, GRDOM_FULL); diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c index 4ebc82e607af6..f982d6f9f218d 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c @@ -57,6 +57,22 @@ static int vf_reset_guc_state(struct xe_gt *gt) return err; } +/** + * xe_gt_sriov_vf_reset - Reset GuC VF internal state. + * @gt: the &xe_gt + * + * It requires functional `GuC MMIO based communication`_. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_vf_reset(struct xe_gt *gt) +{ + if (!xe_device_uc_enabled(gt_to_xe(gt))) + return -ENODEV; + + return vf_reset_guc_state(gt); +} + static int guc_action_match_version(struct xe_guc *guc, u32 wanted_branch, u32 wanted_major, u32 wanted_minor, u32 *branch, u32 *major, u32 *minor, u32 *patch) diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h index e541ce57bec24..576ff5e795a8b 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h @@ -12,6 +12,7 @@ struct drm_printer; struct xe_gt; struct xe_reg; +int xe_gt_sriov_vf_reset(struct xe_gt *gt); int xe_gt_sriov_vf_bootstrap(struct xe_gt *gt); int xe_gt_sriov_vf_query_config(struct xe_gt *gt); int xe_gt_sriov_vf_connect(struct xe_gt *gt); diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c index ace1fe831a7b7..98a450271f5ce 100644 --- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c @@ -310,6 +310,13 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt) return 0; } +/* + * Ensure that roundup_pow_of_two(length) doesn't overflow. + * Note that roundup_pow_of_two() operates on unsigned long, + * not on u64. + */ +#define MAX_RANGE_TLB_INVALIDATION_LENGTH (rounddown_pow_of_two(ULONG_MAX)) + /** * xe_gt_tlb_invalidation_range - Issue a TLB invalidation on this GT for an * address range @@ -334,6 +341,7 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt, struct xe_device *xe = gt_to_xe(gt); #define MAX_TLB_INVALIDATION_LEN 7 u32 action[MAX_TLB_INVALIDATION_LEN]; + u64 length = end - start; int len = 0; xe_gt_assert(gt, fence); @@ -346,11 +354,11 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt, action[len++] = XE_GUC_ACTION_TLB_INVALIDATION; action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */ - if (!xe->info.has_range_tlb_invalidation) { + if (!xe->info.has_range_tlb_invalidation || + length > MAX_RANGE_TLB_INVALIDATION_LENGTH) { action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL); } else { u64 orig_start = start; - u64 length = end - start; u64 align; if (length < SZ_4K) diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c index d1902a8581ca1..e144fd41c0a76 100644 --- a/drivers/gpu/drm/xe/xe_guc_ads.c +++ b/drivers/gpu/drm/xe/xe_guc_ads.c @@ -483,24 +483,52 @@ static void fill_engine_enable_masks(struct xe_gt *gt, engine_enable_mask(gt, XE_ENGINE_CLASS_OTHER)); } -static void guc_prep_golden_lrc_null(struct xe_guc_ads *ads) +/* + * Write the offsets corresponding to the golden LRCs. The actual data is + * populated later by guc_golden_lrc_populate() + */ +static void guc_golden_lrc_init(struct xe_guc_ads *ads) { struct xe_device *xe = ads_to_xe(ads); + struct xe_gt *gt = ads_to_gt(ads); struct iosys_map info_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads), offsetof(struct __guc_ads_blob, system_info)); - u8 guc_class; + size_t alloc_size, real_size; + u32 addr_ggtt, offset; + int class; + + offset = guc_ads_golden_lrc_offset(ads); + addr_ggtt = xe_bo_ggtt_addr(ads->bo) + offset; + + for (class = 0; class < XE_ENGINE_CLASS_MAX; ++class) { + u8 guc_class; + + guc_class = xe_engine_class_to_guc_class(class); - for (guc_class = 0; guc_class <= GUC_MAX_ENGINE_CLASSES; ++guc_class) { if (!info_map_read(xe, &info_map, engine_enabled_masks[guc_class])) continue; + real_size = xe_gt_lrc_size(gt, class); + alloc_size = PAGE_ALIGN(real_size); + + /* + * This interface is slightly confusing. We need to pass the + * base address of the full golden context and the size of just + * the engine state, which is the section of the context image + * that starts after the execlists LRC registers. This is + * required to allow the GuC to restore just the engine state + * when a watchdog reset occurs. + * We calculate the engine state size by removing the size of + * what comes before it in the context image (which is identical + * on all engines). + */ ads_blob_write(ads, ads.eng_state_size[guc_class], - guc_ads_golden_lrc_size(ads) - - xe_lrc_skip_size(xe)); + real_size - xe_lrc_skip_size(xe)); ads_blob_write(ads, ads.golden_context_lrca[guc_class], - xe_bo_ggtt_addr(ads->bo) + - guc_ads_golden_lrc_offset(ads)); + addr_ggtt); + + addr_ggtt += alloc_size; } } @@ -710,7 +738,7 @@ void xe_guc_ads_populate_minimal(struct xe_guc_ads *ads) xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, ads->bo->size); guc_policies_init(ads); - guc_prep_golden_lrc_null(ads); + guc_golden_lrc_init(ads); guc_mapping_table_init_invalid(gt, &info_map); guc_doorbell_init(ads); @@ -736,7 +764,7 @@ void xe_guc_ads_populate(struct xe_guc_ads *ads) guc_policies_init(ads); fill_engine_enable_masks(gt, &info_map); guc_mmio_reg_state_init(ads); - guc_prep_golden_lrc_null(ads); + guc_golden_lrc_init(ads); guc_mapping_table_init(gt, &info_map); guc_capture_list_init(ads); guc_doorbell_init(ads); @@ -756,18 +784,22 @@ void xe_guc_ads_populate(struct xe_guc_ads *ads) guc_ads_private_data_offset(ads)); } -static void guc_populate_golden_lrc(struct xe_guc_ads *ads) +/* + * After the golden LRC's are recorded for each engine class by the first + * submission, copy them to the ADS, as initialized earlier by + * guc_golden_lrc_init(). + */ +static void guc_golden_lrc_populate(struct xe_guc_ads *ads) { struct xe_device *xe = ads_to_xe(ads); struct xe_gt *gt = ads_to_gt(ads); struct iosys_map info_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads), offsetof(struct __guc_ads_blob, system_info)); size_t total_size = 0, alloc_size, real_size; - u32 addr_ggtt, offset; + u32 offset; int class; offset = guc_ads_golden_lrc_offset(ads); - addr_ggtt = xe_bo_ggtt_addr(ads->bo) + offset; for (class = 0; class < XE_ENGINE_CLASS_MAX; ++class) { u8 guc_class; @@ -784,26 +816,9 @@ static void guc_populate_golden_lrc(struct xe_guc_ads *ads) alloc_size = PAGE_ALIGN(real_size); total_size += alloc_size; - /* - * This interface is slightly confusing. We need to pass the - * base address of the full golden context and the size of just - * the engine state, which is the section of the context image - * that starts after the execlists LRC registers. This is - * required to allow the GuC to restore just the engine state - * when a watchdog reset occurs. - * We calculate the engine state size by removing the size of - * what comes before it in the context image (which is identical - * on all engines). - */ - ads_blob_write(ads, ads.eng_state_size[guc_class], - real_size - xe_lrc_skip_size(xe)); - ads_blob_write(ads, ads.golden_context_lrca[guc_class], - addr_ggtt); - xe_map_memcpy_to(xe, ads_to_map(ads), offset, gt->default_lrc[class], real_size); - addr_ggtt += alloc_size; offset += alloc_size; } @@ -812,7 +827,7 @@ static void guc_populate_golden_lrc(struct xe_guc_ads *ads) void xe_guc_ads_populate_post_load(struct xe_guc_ads *ads) { - guc_populate_golden_lrc(ads); + guc_golden_lrc_populate(ads); } static int guc_ads_action_update_policies(struct xe_guc_ads *ads, u32 policy_offset) diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index fed23304e4da5..20d05efdd406e 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -1213,9 +1213,11 @@ static void __guc_exec_queue_fini_async(struct work_struct *w) xe_pm_runtime_get(guc_to_xe(guc)); trace_xe_exec_queue_destroy(q); + release_guc_id(guc, q); if (xe_exec_queue_is_lr(q)) cancel_work_sync(&ge->lr_tdr); - release_guc_id(guc, q); + /* Confirm no work left behind accessing device structures */ + cancel_delayed_work_sync(&ge->sched.base.work_tdr); xe_sched_entity_fini(&ge->entity); xe_sched_fini(&ge->sched); diff --git a/drivers/gpu/drm/xe/xe_hmm.c b/drivers/gpu/drm/xe/xe_hmm.c index 2c32dc46f7d48..3d0278c3db935 100644 --- a/drivers/gpu/drm/xe/xe_hmm.c +++ b/drivers/gpu/drm/xe/xe_hmm.c @@ -19,39 +19,51 @@ static u64 xe_npages_in_range(unsigned long start, unsigned long end) return (end - start) >> PAGE_SHIFT; } -/* - * xe_mark_range_accessed() - mark a range is accessed, so core mm - * have such information for memory eviction or write back to - * hard disk - * - * @range: the range to mark - * @write: if write to this range, we mark pages in this range - * as dirty - */ -static void xe_mark_range_accessed(struct hmm_range *range, bool write) +static int xe_alloc_sg(struct xe_device *xe, struct sg_table *st, + struct hmm_range *range, struct rw_semaphore *notifier_sem) { - struct page *page; - u64 i, npages; + unsigned long i, npages, hmm_pfn; + unsigned long num_chunks = 0; + int ret; + + /* HMM docs says this is needed. */ + ret = down_read_interruptible(notifier_sem); + if (ret) + return ret; + + if (mmu_interval_read_retry(range->notifier, range->notifier_seq)) { + up_read(notifier_sem); + return -EAGAIN; + } npages = xe_npages_in_range(range->start, range->end); - for (i = 0; i < npages; i++) { - page = hmm_pfn_to_page(range->hmm_pfns[i]); - if (write) - set_page_dirty_lock(page); + for (i = 0; i < npages;) { + unsigned long len; + + hmm_pfn = range->hmm_pfns[i]; + xe_assert(xe, hmm_pfn & HMM_PFN_VALID); - mark_page_accessed(page); + len = 1UL << hmm_pfn_to_map_order(hmm_pfn); + + /* If order > 0 the page may extend beyond range->start */ + len -= (hmm_pfn & ~HMM_PFN_FLAGS) & (len - 1); + i += len; + num_chunks++; } + up_read(notifier_sem); + + return sg_alloc_table(st, num_chunks, GFP_KERNEL); } -/* +/** * xe_build_sg() - build a scatter gather table for all the physical pages/pfn * in a hmm_range. dma-map pages if necessary. dma-address is save in sg table * and will be used to program GPU page table later. - * * @xe: the xe device who will access the dma-address in sg table * @range: the hmm range that we build the sg table from. range->hmm_pfns[] * has the pfn numbers of pages that back up this hmm address range. * @st: pointer to the sg table. + * @notifier_sem: The xe notifier lock. * @write: whether we write to this range. This decides dma map direction * for system pages. If write we map it bi-diretional; otherwise * DMA_TO_DEVICE @@ -78,43 +90,88 @@ static void xe_mark_range_accessed(struct hmm_range *range, bool write) * Returns 0 if successful; -ENOMEM if fails to allocate memory */ static int xe_build_sg(struct xe_device *xe, struct hmm_range *range, - struct sg_table *st, bool write) + struct sg_table *st, + struct rw_semaphore *notifier_sem, + bool write) { + unsigned long npages = xe_npages_in_range(range->start, range->end); struct device *dev = xe->drm.dev; - struct page **pages; - u64 i, npages; - int ret; + struct scatterlist *sgl; + struct page *page; + unsigned long i, j; - npages = xe_npages_in_range(range->start, range->end); - pages = kvmalloc_array(npages, sizeof(*pages), GFP_KERNEL); - if (!pages) - return -ENOMEM; + lockdep_assert_held(notifier_sem); + + i = 0; + for_each_sg(st->sgl, sgl, st->nents, j) { + unsigned long hmm_pfn, size; + + hmm_pfn = range->hmm_pfns[i]; + page = hmm_pfn_to_page(hmm_pfn); + xe_assert(xe, !is_device_private_page(page)); + + size = 1UL << hmm_pfn_to_map_order(hmm_pfn); + size -= page_to_pfn(page) & (size - 1); + i += size; - for (i = 0; i < npages; i++) { - pages[i] = hmm_pfn_to_page(range->hmm_pfns[i]); - xe_assert(xe, !is_device_private_page(pages[i])); + if (unlikely(j == st->nents - 1)) { + xe_assert(xe, i >= npages); + if (i > npages) + size -= (i - npages); + + sg_mark_end(sgl); + } else { + xe_assert(xe, i < npages); + } + + sg_set_page(sgl, page, size << PAGE_SHIFT, 0); } - ret = sg_alloc_table_from_pages_segment(st, pages, npages, 0, npages << PAGE_SHIFT, - xe_sg_segment_size(dev), GFP_KERNEL); - if (ret) - goto free_pages; + return dma_map_sgtable(dev, st, write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING); +} + +static void xe_hmm_userptr_set_mapped(struct xe_userptr_vma *uvma) +{ + struct xe_userptr *userptr = &uvma->userptr; + struct xe_vm *vm = xe_vma_vm(&uvma->vma); + + lockdep_assert_held_write(&vm->lock); + lockdep_assert_held(&vm->userptr.notifier_lock); + + mutex_lock(&userptr->unmap_mutex); + xe_assert(vm->xe, !userptr->mapped); + userptr->mapped = true; + mutex_unlock(&userptr->unmap_mutex); +} - ret = dma_map_sgtable(dev, st, write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, - DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING); - if (ret) { - sg_free_table(st); - st = NULL; +void xe_hmm_userptr_unmap(struct xe_userptr_vma *uvma) +{ + struct xe_userptr *userptr = &uvma->userptr; + struct xe_vma *vma = &uvma->vma; + bool write = !xe_vma_read_only(vma); + struct xe_vm *vm = xe_vma_vm(vma); + struct xe_device *xe = vm->xe; + + if (!lockdep_is_held_type(&vm->userptr.notifier_lock, 0) && + !lockdep_is_held_type(&vm->lock, 0) && + !(vma->gpuva.flags & XE_VMA_DESTROYED)) { + /* Don't unmap in exec critical section. */ + xe_vm_assert_held(vm); + /* Don't unmap while mapping the sg. */ + lockdep_assert_held(&vm->lock); } -free_pages: - kvfree(pages); - return ret; + mutex_lock(&userptr->unmap_mutex); + if (userptr->sg && userptr->mapped) + dma_unmap_sgtable(xe->drm.dev, userptr->sg, + write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, 0); + userptr->mapped = false; + mutex_unlock(&userptr->unmap_mutex); } -/* +/** * xe_hmm_userptr_free_sg() - Free the scatter gather table of userptr - * * @uvma: the userptr vma which hold the scatter gather table * * With function xe_userptr_populate_range, we allocate storage of @@ -124,16 +181,9 @@ static int xe_build_sg(struct xe_device *xe, struct hmm_range *range, void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma) { struct xe_userptr *userptr = &uvma->userptr; - struct xe_vma *vma = &uvma->vma; - bool write = !xe_vma_read_only(vma); - struct xe_vm *vm = xe_vma_vm(vma); - struct xe_device *xe = vm->xe; - struct device *dev = xe->drm.dev; - - xe_assert(xe, userptr->sg); - dma_unmap_sgtable(dev, userptr->sg, - write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, 0); + xe_assert(xe_vma_vm(&uvma->vma)->xe, userptr->sg); + xe_hmm_userptr_unmap(uvma); sg_free_table(userptr->sg); userptr->sg = NULL; } @@ -166,13 +216,20 @@ int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma, { unsigned long timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT); - unsigned long *pfns, flags = HMM_PFN_REQ_FAULT; + unsigned long *pfns; struct xe_userptr *userptr; struct xe_vma *vma = &uvma->vma; u64 userptr_start = xe_vma_userptr(vma); u64 userptr_end = userptr_start + xe_vma_size(vma); struct xe_vm *vm = xe_vma_vm(vma); - struct hmm_range hmm_range; + struct hmm_range hmm_range = { + .pfn_flags_mask = 0, /* ignore pfns */ + .default_flags = HMM_PFN_REQ_FAULT, + .start = userptr_start, + .end = userptr_end, + .notifier = &uvma->userptr.notifier, + .dev_private_owner = vm->xe, + }; bool write = !xe_vma_read_only(vma); unsigned long notifier_seq; u64 npages; @@ -199,19 +256,14 @@ int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma, return -ENOMEM; if (write) - flags |= HMM_PFN_REQ_WRITE; + hmm_range.default_flags |= HMM_PFN_REQ_WRITE; if (!mmget_not_zero(userptr->notifier.mm)) { ret = -EFAULT; goto free_pfns; } - hmm_range.default_flags = flags; hmm_range.hmm_pfns = pfns; - hmm_range.notifier = &userptr->notifier; - hmm_range.start = userptr_start; - hmm_range.end = userptr_end; - hmm_range.dev_private_owner = vm->xe; while (true) { hmm_range.notifier_seq = mmu_interval_read_begin(&userptr->notifier); @@ -238,16 +290,36 @@ int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma, if (ret) goto free_pfns; - ret = xe_build_sg(vm->xe, &hmm_range, &userptr->sgt, write); + ret = xe_alloc_sg(vm->xe, &userptr->sgt, &hmm_range, &vm->userptr.notifier_lock); if (ret) goto free_pfns; - xe_mark_range_accessed(&hmm_range, write); + ret = down_read_interruptible(&vm->userptr.notifier_lock); + if (ret) + goto free_st; + + if (mmu_interval_read_retry(hmm_range.notifier, hmm_range.notifier_seq)) { + ret = -EAGAIN; + goto out_unlock; + } + + ret = xe_build_sg(vm->xe, &hmm_range, &userptr->sgt, + &vm->userptr.notifier_lock, write); + if (ret) + goto out_unlock; + userptr->sg = &userptr->sgt; + xe_hmm_userptr_set_mapped(uvma); userptr->notifier_seq = hmm_range.notifier_seq; + up_read(&vm->userptr.notifier_lock); + kvfree(pfns); + return 0; +out_unlock: + up_read(&vm->userptr.notifier_lock); +free_st: + sg_free_table(&userptr->sgt); free_pfns: kvfree(pfns); return ret; } - diff --git a/drivers/gpu/drm/xe/xe_hmm.h b/drivers/gpu/drm/xe/xe_hmm.h index 909dc2bdcd97e..0ea98d8e7bbc7 100644 --- a/drivers/gpu/drm/xe/xe_hmm.h +++ b/drivers/gpu/drm/xe/xe_hmm.h @@ -3,9 +3,16 @@ * Copyright © 2024 Intel Corporation */ +#ifndef _XE_HMM_H_ +#define _XE_HMM_H_ + #include struct xe_userptr_vma; int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma, bool is_mm_mmap_locked); + void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma); + +void xe_hmm_userptr_unmap(struct xe_userptr_vma *uvma); +#endif diff --git a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c index b53e8d2accdbd..a440442b4d727 100644 --- a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c +++ b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c @@ -32,14 +32,61 @@ bool xe_hw_engine_timeout_in_range(u64 timeout, u64 min, u64 max) return timeout >= min && timeout <= max; } -static void kobj_xe_hw_engine_release(struct kobject *kobj) +static void xe_hw_engine_sysfs_kobj_release(struct kobject *kobj) { kfree(kobj); } +static ssize_t xe_hw_engine_class_sysfs_attr_show(struct kobject *kobj, + struct attribute *attr, + char *buf) +{ + struct xe_device *xe = kobj_to_xe(kobj); + struct kobj_attribute *kattr; + ssize_t ret = -EIO; + + kattr = container_of(attr, struct kobj_attribute, attr); + if (kattr->show) { + xe_pm_runtime_get(xe); + ret = kattr->show(kobj, kattr, buf); + xe_pm_runtime_put(xe); + } + + return ret; +} + +static ssize_t xe_hw_engine_class_sysfs_attr_store(struct kobject *kobj, + struct attribute *attr, + const char *buf, + size_t count) +{ + struct xe_device *xe = kobj_to_xe(kobj); + struct kobj_attribute *kattr; + ssize_t ret = -EIO; + + kattr = container_of(attr, struct kobj_attribute, attr); + if (kattr->store) { + xe_pm_runtime_get(xe); + ret = kattr->store(kobj, kattr, buf, count); + xe_pm_runtime_put(xe); + } + + return ret; +} + +static const struct sysfs_ops xe_hw_engine_class_sysfs_ops = { + .show = xe_hw_engine_class_sysfs_attr_show, + .store = xe_hw_engine_class_sysfs_attr_store, +}; + static const struct kobj_type kobj_xe_hw_engine_type = { - .release = kobj_xe_hw_engine_release, - .sysfs_ops = &kobj_sysfs_ops + .release = xe_hw_engine_sysfs_kobj_release, + .sysfs_ops = &xe_hw_engine_class_sysfs_ops, +}; + +static const struct kobj_type kobj_xe_hw_engine_type_def = { + .release = xe_hw_engine_sysfs_kobj_release, + .sysfs_ops = &kobj_sysfs_ops, }; static ssize_t job_timeout_max_store(struct kobject *kobj, @@ -543,7 +590,7 @@ static int xe_add_hw_engine_class_defaults(struct xe_device *xe, if (!kobj) return -ENOMEM; - kobject_init(kobj, &kobj_xe_hw_engine_type); + kobject_init(kobj, &kobj_xe_hw_engine_type_def); err = kobject_add(kobj, parent, "%s", ".defaults"); if (err) goto err_object; @@ -559,57 +606,6 @@ static int xe_add_hw_engine_class_defaults(struct xe_device *xe, return err; } -static void xe_hw_engine_sysfs_kobj_release(struct kobject *kobj) -{ - kfree(kobj); -} - -static ssize_t xe_hw_engine_class_sysfs_attr_show(struct kobject *kobj, - struct attribute *attr, - char *buf) -{ - struct xe_device *xe = kobj_to_xe(kobj); - struct kobj_attribute *kattr; - ssize_t ret = -EIO; - - kattr = container_of(attr, struct kobj_attribute, attr); - if (kattr->show) { - xe_pm_runtime_get(xe); - ret = kattr->show(kobj, kattr, buf); - xe_pm_runtime_put(xe); - } - - return ret; -} - -static ssize_t xe_hw_engine_class_sysfs_attr_store(struct kobject *kobj, - struct attribute *attr, - const char *buf, - size_t count) -{ - struct xe_device *xe = kobj_to_xe(kobj); - struct kobj_attribute *kattr; - ssize_t ret = -EIO; - - kattr = container_of(attr, struct kobj_attribute, attr); - if (kattr->store) { - xe_pm_runtime_get(xe); - ret = kattr->store(kobj, kattr, buf, count); - xe_pm_runtime_put(xe); - } - - return ret; -} - -static const struct sysfs_ops xe_hw_engine_class_sysfs_ops = { - .show = xe_hw_engine_class_sysfs_attr_show, - .store = xe_hw_engine_class_sysfs_attr_store, -}; - -static const struct kobj_type xe_hw_engine_sysfs_kobj_type = { - .release = xe_hw_engine_sysfs_kobj_release, - .sysfs_ops = &xe_hw_engine_class_sysfs_ops, -}; static void hw_engine_class_sysfs_fini(void *arg) { @@ -640,7 +636,7 @@ int xe_hw_engine_class_sysfs_init(struct xe_gt *gt) if (!kobj) return -ENOMEM; - kobject_init(kobj, &xe_hw_engine_sysfs_kobj_type); + kobject_init(kobj, &kobj_xe_hw_engine_type); err = kobject_add(kobj, gt->sysfs, "engines"); if (err) diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index 1b97d90aaddaf..6431697c61693 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -1177,7 +1177,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m, err_sync: /* Sync partial copies if any. FIXME: job_mutex? */ if (fence) { - dma_fence_wait(m->fence, false); + dma_fence_wait(fence, false); dma_fence_put(fence); } diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c index 6fc00d63b2857..448766033690c 100644 --- a/drivers/gpu/drm/xe/xe_oa.c +++ b/drivers/gpu/drm/xe/xe_oa.c @@ -36,11 +36,22 @@ #include "xe_pm.h" #include "xe_sched_job.h" #include "xe_sriov.h" +#include "xe_sync.h" #define DEFAULT_POLL_FREQUENCY_HZ 200 #define DEFAULT_POLL_PERIOD_NS (NSEC_PER_SEC / DEFAULT_POLL_FREQUENCY_HZ) #define XE_OA_UNIT_INVALID U32_MAX +enum xe_oa_submit_deps { + XE_OA_SUBMIT_NO_DEPS, + XE_OA_SUBMIT_ADD_DEPS, +}; + +enum xe_oa_user_extn_from { + XE_OA_USER_EXTN_FROM_OPEN, + XE_OA_USER_EXTN_FROM_CONFIG, +}; + struct xe_oa_reg { struct xe_reg addr; u32 value; @@ -63,13 +74,8 @@ struct xe_oa_config { struct rcu_head rcu; }; -struct flex { - struct xe_reg reg; - u32 offset; - u32 value; -}; - struct xe_oa_open_param { + struct xe_file *xef; u32 oa_unit_id; bool sample; u32 metric_set; @@ -81,6 +87,9 @@ struct xe_oa_open_param { struct xe_exec_queue *exec_q; struct xe_hw_engine *hwe; bool no_preempt; + struct drm_xe_sync __user *syncs_user; + int num_syncs; + struct xe_sync_entry *syncs; }; struct xe_oa_config_bo { @@ -90,6 +99,17 @@ struct xe_oa_config_bo { struct xe_bb *bb; }; +struct xe_oa_fence { + /* @base: dma fence base */ + struct dma_fence base; + /* @lock: lock for the fence */ + spinlock_t lock; + /* @work: work to signal @base */ + struct delayed_work work; + /* @cb: callback to schedule @work */ + struct dma_fence_cb cb; +}; + #define DRM_FMT(x) DRM_XE_OA_FMT_TYPE_##x static const struct xe_oa_format oa_formats[] = { @@ -162,10 +182,10 @@ static struct xe_oa_config *xe_oa_get_oa_config(struct xe_oa *oa, int metrics_se return oa_config; } -static void free_oa_config_bo(struct xe_oa_config_bo *oa_bo) +static void free_oa_config_bo(struct xe_oa_config_bo *oa_bo, struct dma_fence *last_fence) { xe_oa_config_put(oa_bo->oa_config); - xe_bb_free(oa_bo->bb, NULL); + xe_bb_free(oa_bo->bb, last_fence); kfree(oa_bo); } @@ -567,32 +587,60 @@ static __poll_t xe_oa_poll(struct file *file, poll_table *wait) return ret; } -static int xe_oa_submit_bb(struct xe_oa_stream *stream, struct xe_bb *bb) +static void xe_oa_lock_vma(struct xe_exec_queue *q) +{ + if (q->vm) { + down_read(&q->vm->lock); + xe_vm_lock(q->vm, false); + } +} + +static void xe_oa_unlock_vma(struct xe_exec_queue *q) +{ + if (q->vm) { + xe_vm_unlock(q->vm); + up_read(&q->vm->lock); + } +} + +static struct dma_fence *xe_oa_submit_bb(struct xe_oa_stream *stream, enum xe_oa_submit_deps deps, + struct xe_bb *bb) { + struct xe_exec_queue *q = stream->exec_q ?: stream->k_exec_q; struct xe_sched_job *job; struct dma_fence *fence; - long timeout; int err = 0; - /* Kernel configuration is issued on stream->k_exec_q, not stream->exec_q */ - job = xe_bb_create_job(stream->k_exec_q, bb); + xe_oa_lock_vma(q); + + job = xe_bb_create_job(q, bb); if (IS_ERR(job)) { err = PTR_ERR(job); goto exit; } + job->ggtt = true; + + if (deps == XE_OA_SUBMIT_ADD_DEPS) { + for (int i = 0; i < stream->num_syncs && !err; i++) + err = xe_sync_entry_add_deps(&stream->syncs[i], job); + if (err) { + drm_dbg(&stream->oa->xe->drm, "xe_sync_entry_add_deps err %d\n", err); + goto err_put_job; + } + } xe_sched_job_arm(job); fence = dma_fence_get(&job->drm.s_fence->finished); xe_sched_job_push(job); - timeout = dma_fence_wait_timeout(fence, false, HZ); - dma_fence_put(fence); - if (timeout < 0) - err = timeout; - else if (!timeout) - err = -ETIME; + xe_oa_unlock_vma(q); + + return fence; +err_put_job: + xe_sched_job_put(job); exit: - return err; + xe_oa_unlock_vma(q); + return ERR_PTR(err); } static void write_cs_mi_lri(struct xe_bb *bb, const struct xe_oa_reg *reg_data, u32 n_regs) @@ -636,57 +684,34 @@ static void xe_oa_free_configs(struct xe_oa_stream *stream) xe_oa_config_put(stream->oa_config); llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node) - free_oa_config_bo(oa_bo); -} - -static void xe_oa_store_flex(struct xe_oa_stream *stream, struct xe_lrc *lrc, - struct xe_bb *bb, const struct flex *flex, u32 count) -{ - u32 offset = xe_bo_ggtt_addr(lrc->bo); - - do { - bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_GGTT | MI_SDI_NUM_DW(1); - bb->cs[bb->len++] = offset + flex->offset * sizeof(u32); - bb->cs[bb->len++] = 0; - bb->cs[bb->len++] = flex->value; - - } while (flex++, --count); + free_oa_config_bo(oa_bo, stream->last_fence); + dma_fence_put(stream->last_fence); } -static int xe_oa_modify_ctx_image(struct xe_oa_stream *stream, struct xe_lrc *lrc, - const struct flex *flex, u32 count) +static int xe_oa_load_with_lri(struct xe_oa_stream *stream, struct xe_oa_reg *reg_lri, u32 count) { + struct dma_fence *fence; struct xe_bb *bb; int err; - bb = xe_bb_new(stream->gt, 4 * count, false); + bb = xe_bb_new(stream->gt, 2 * count + 1, false); if (IS_ERR(bb)) { err = PTR_ERR(bb); goto exit; } - xe_oa_store_flex(stream, lrc, bb, flex, count); - - err = xe_oa_submit_bb(stream, bb); - xe_bb_free(bb, NULL); -exit: - return err; -} + write_cs_mi_lri(bb, reg_lri, count); -static int xe_oa_load_with_lri(struct xe_oa_stream *stream, struct xe_oa_reg *reg_lri) -{ - struct xe_bb *bb; - int err; - - bb = xe_bb_new(stream->gt, 3, false); - if (IS_ERR(bb)) { - err = PTR_ERR(bb); - goto exit; + fence = xe_oa_submit_bb(stream, XE_OA_SUBMIT_NO_DEPS, bb); + if (IS_ERR(fence)) { + err = PTR_ERR(fence); + goto free_bb; } + xe_bb_free(bb, fence); + dma_fence_put(fence); - write_cs_mi_lri(bb, reg_lri, 1); - - err = xe_oa_submit_bb(stream, bb); + return 0; +free_bb: xe_bb_free(bb, NULL); exit: return err; @@ -695,70 +720,54 @@ static int xe_oa_load_with_lri(struct xe_oa_stream *stream, struct xe_oa_reg *re static int xe_oa_configure_oar_context(struct xe_oa_stream *stream, bool enable) { const struct xe_oa_format *format = stream->oa_buffer.format; - struct xe_lrc *lrc = stream->exec_q->lrc[0]; - u32 regs_offset = xe_lrc_regs_offset(lrc) / sizeof(u32); u32 oacontrol = __format_to_oactrl(format, OAR_OACONTROL_COUNTER_SEL_MASK) | (enable ? OAR_OACONTROL_COUNTER_ENABLE : 0); - struct flex regs_context[] = { + struct xe_oa_reg reg_lri[] = { { OACTXCONTROL(stream->hwe->mmio_base), - stream->oa->ctx_oactxctrl_offset[stream->hwe->class] + 1, enable ? OA_COUNTER_RESUME : 0, }, + { + OAR_OACONTROL, + oacontrol, + }, { RING_CONTEXT_CONTROL(stream->hwe->mmio_base), - regs_offset + CTX_CONTEXT_CONTROL, - _MASKED_BIT_ENABLE(CTX_CTRL_OAC_CONTEXT_ENABLE), + _MASKED_FIELD(CTX_CTRL_OAC_CONTEXT_ENABLE, + enable ? CTX_CTRL_OAC_CONTEXT_ENABLE : 0) }, }; - struct xe_oa_reg reg_lri = { OAR_OACONTROL, oacontrol }; - int err; - - /* Modify stream hwe context image with regs_context */ - err = xe_oa_modify_ctx_image(stream, stream->exec_q->lrc[0], - regs_context, ARRAY_SIZE(regs_context)); - if (err) - return err; - /* Apply reg_lri using LRI */ - return xe_oa_load_with_lri(stream, ®_lri); + return xe_oa_load_with_lri(stream, reg_lri, ARRAY_SIZE(reg_lri)); } static int xe_oa_configure_oac_context(struct xe_oa_stream *stream, bool enable) { const struct xe_oa_format *format = stream->oa_buffer.format; - struct xe_lrc *lrc = stream->exec_q->lrc[0]; - u32 regs_offset = xe_lrc_regs_offset(lrc) / sizeof(u32); u32 oacontrol = __format_to_oactrl(format, OAR_OACONTROL_COUNTER_SEL_MASK) | (enable ? OAR_OACONTROL_COUNTER_ENABLE : 0); - struct flex regs_context[] = { + struct xe_oa_reg reg_lri[] = { { OACTXCONTROL(stream->hwe->mmio_base), - stream->oa->ctx_oactxctrl_offset[stream->hwe->class] + 1, enable ? OA_COUNTER_RESUME : 0, }, + { + OAC_OACONTROL, + oacontrol + }, { RING_CONTEXT_CONTROL(stream->hwe->mmio_base), - regs_offset + CTX_CONTEXT_CONTROL, - _MASKED_BIT_ENABLE(CTX_CTRL_OAC_CONTEXT_ENABLE) | + _MASKED_FIELD(CTX_CTRL_OAC_CONTEXT_ENABLE, + enable ? CTX_CTRL_OAC_CONTEXT_ENABLE : 0) | _MASKED_FIELD(CTX_CTRL_RUN_ALONE, enable ? CTX_CTRL_RUN_ALONE : 0), }, }; - struct xe_oa_reg reg_lri = { OAC_OACONTROL, oacontrol }; - int err; /* Set ccs select to enable programming of OAC_OACONTROL */ xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_ctrl, __oa_ccs_select(stream)); - /* Modify stream hwe context image with regs_context */ - err = xe_oa_modify_ctx_image(stream, stream->exec_q->lrc[0], - regs_context, ARRAY_SIZE(regs_context)); - if (err) - return err; - - /* Apply reg_lri using LRI */ - return xe_oa_load_with_lri(stream, ®_lri); + return xe_oa_load_with_lri(stream, reg_lri, ARRAY_SIZE(reg_lri)); } static int xe_oa_configure_oa_context(struct xe_oa_stream *stream, bool enable) @@ -840,6 +849,7 @@ static void xe_oa_stream_destroy(struct xe_oa_stream *stream) xe_gt_WARN_ON(gt, xe_guc_pc_unset_gucrc_mode(>->uc.guc.pc)); xe_oa_free_configs(stream); + xe_file_put(stream->xef); } static int xe_oa_alloc_oa_buffer(struct xe_oa_stream *stream) @@ -910,11 +920,62 @@ xe_oa_alloc_config_buffer(struct xe_oa_stream *stream, struct xe_oa_config *oa_c return oa_bo; } +static void xe_oa_update_last_fence(struct xe_oa_stream *stream, struct dma_fence *fence) +{ + dma_fence_put(stream->last_fence); + stream->last_fence = dma_fence_get(fence); +} + +static void xe_oa_fence_work_fn(struct work_struct *w) +{ + struct xe_oa_fence *ofence = container_of(w, typeof(*ofence), work.work); + + /* Signal fence to indicate new OA configuration is active */ + dma_fence_signal(&ofence->base); + dma_fence_put(&ofence->base); +} + +static void xe_oa_config_cb(struct dma_fence *fence, struct dma_fence_cb *cb) +{ + /* Additional empirical delay needed for NOA programming after registers are written */ +#define NOA_PROGRAM_ADDITIONAL_DELAY_US 500 + + struct xe_oa_fence *ofence = container_of(cb, typeof(*ofence), cb); + + INIT_DELAYED_WORK(&ofence->work, xe_oa_fence_work_fn); + queue_delayed_work(system_unbound_wq, &ofence->work, + usecs_to_jiffies(NOA_PROGRAM_ADDITIONAL_DELAY_US)); + dma_fence_put(fence); +} + +static const char *xe_oa_get_driver_name(struct dma_fence *fence) +{ + return "xe_oa"; +} + +static const char *xe_oa_get_timeline_name(struct dma_fence *fence) +{ + return "unbound"; +} + +static const struct dma_fence_ops xe_oa_fence_ops = { + .get_driver_name = xe_oa_get_driver_name, + .get_timeline_name = xe_oa_get_timeline_name, +}; + static int xe_oa_emit_oa_config(struct xe_oa_stream *stream, struct xe_oa_config *config) { #define NOA_PROGRAM_ADDITIONAL_DELAY_US 500 struct xe_oa_config_bo *oa_bo; - int err, us = NOA_PROGRAM_ADDITIONAL_DELAY_US; + struct xe_oa_fence *ofence; + int i, err, num_signal = 0; + struct dma_fence *fence; + + ofence = kzalloc(sizeof(*ofence), GFP_KERNEL); + if (!ofence) { + err = -ENOMEM; + goto exit; + } oa_bo = xe_oa_alloc_config_buffer(stream, config); if (IS_ERR(oa_bo)) { @@ -922,11 +983,50 @@ static int xe_oa_emit_oa_config(struct xe_oa_stream *stream, struct xe_oa_config goto exit; } - err = xe_oa_submit_bb(stream, oa_bo->bb); + /* Emit OA configuration batch */ + fence = xe_oa_submit_bb(stream, XE_OA_SUBMIT_ADD_DEPS, oa_bo->bb); + if (IS_ERR(fence)) { + err = PTR_ERR(fence); + goto exit; + } - /* Additional empirical delay needed for NOA programming after registers are written */ - usleep_range(us, 2 * us); + /* Point of no return: initialize and set fence to signal */ + spin_lock_init(&ofence->lock); + dma_fence_init(&ofence->base, &xe_oa_fence_ops, &ofence->lock, 0, 0); + + for (i = 0; i < stream->num_syncs; i++) { + if (stream->syncs[i].flags & DRM_XE_SYNC_FLAG_SIGNAL) + num_signal++; + xe_sync_entry_signal(&stream->syncs[i], &ofence->base); + } + + /* Additional dma_fence_get in case we dma_fence_wait */ + if (!num_signal) + dma_fence_get(&ofence->base); + + /* Update last fence too before adding callback */ + xe_oa_update_last_fence(stream, fence); + + /* Add job fence callback to schedule work to signal ofence->base */ + err = dma_fence_add_callback(fence, &ofence->cb, xe_oa_config_cb); + xe_gt_assert(stream->gt, !err || err == -ENOENT); + if (err == -ENOENT) + xe_oa_config_cb(fence, &ofence->cb); + + /* If nothing needs to be signaled we wait synchronously */ + if (!num_signal) { + dma_fence_wait(&ofence->base, false); + dma_fence_put(&ofence->base); + } + + /* Done with syncs */ + for (i = 0; i < stream->num_syncs; i++) + xe_sync_entry_cleanup(&stream->syncs[i]); + kfree(stream->syncs); + + return 0; exit: + kfree(ofence); return err; } @@ -997,6 +1097,262 @@ static int xe_oa_enable_metric_set(struct xe_oa_stream *stream) return xe_oa_emit_oa_config(stream, stream->oa_config); } +static int decode_oa_format(struct xe_oa *oa, u64 fmt, enum xe_oa_format_name *name) +{ + u32 counter_size = FIELD_GET(DRM_XE_OA_FORMAT_MASK_COUNTER_SIZE, fmt); + u32 counter_sel = FIELD_GET(DRM_XE_OA_FORMAT_MASK_COUNTER_SEL, fmt); + u32 bc_report = FIELD_GET(DRM_XE_OA_FORMAT_MASK_BC_REPORT, fmt); + u32 type = FIELD_GET(DRM_XE_OA_FORMAT_MASK_FMT_TYPE, fmt); + int idx; + + for_each_set_bit(idx, oa->format_mask, __XE_OA_FORMAT_MAX) { + const struct xe_oa_format *f = &oa->oa_formats[idx]; + + if (counter_size == f->counter_size && bc_report == f->bc_report && + type == f->type && counter_sel == f->counter_select) { + *name = idx; + return 0; + } + } + + return -EINVAL; +} + +static int xe_oa_set_prop_oa_unit_id(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + if (value >= oa->oa_unit_ids) { + drm_dbg(&oa->xe->drm, "OA unit ID out of range %lld\n", value); + return -EINVAL; + } + param->oa_unit_id = value; + return 0; +} + +static int xe_oa_set_prop_sample_oa(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + param->sample = value; + return 0; +} + +static int xe_oa_set_prop_metric_set(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + param->metric_set = value; + return 0; +} + +static int xe_oa_set_prop_oa_format(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + int ret = decode_oa_format(oa, value, ¶m->oa_format); + + if (ret) { + drm_dbg(&oa->xe->drm, "Unsupported OA report format %#llx\n", value); + return ret; + } + return 0; +} + +static int xe_oa_set_prop_oa_exponent(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ +#define OA_EXPONENT_MAX 31 + + if (value > OA_EXPONENT_MAX) { + drm_dbg(&oa->xe->drm, "OA timer exponent too high (> %u)\n", OA_EXPONENT_MAX); + return -EINVAL; + } + param->period_exponent = value; + return 0; +} + +static int xe_oa_set_prop_disabled(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + param->disabled = value; + return 0; +} + +static int xe_oa_set_prop_exec_queue_id(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + param->exec_queue_id = value; + return 0; +} + +static int xe_oa_set_prop_engine_instance(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + param->engine_instance = value; + return 0; +} + +static int xe_oa_set_no_preempt(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + param->no_preempt = value; + return 0; +} + +static int xe_oa_set_prop_num_syncs(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + param->num_syncs = value; + return 0; +} + +static int xe_oa_set_prop_syncs_user(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + param->syncs_user = u64_to_user_ptr(value); + return 0; +} + +static int xe_oa_set_prop_ret_inval(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + return -EINVAL; +} + +typedef int (*xe_oa_set_property_fn)(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param); +static const xe_oa_set_property_fn xe_oa_set_property_funcs_open[] = { + [DRM_XE_OA_PROPERTY_OA_UNIT_ID] = xe_oa_set_prop_oa_unit_id, + [DRM_XE_OA_PROPERTY_SAMPLE_OA] = xe_oa_set_prop_sample_oa, + [DRM_XE_OA_PROPERTY_OA_METRIC_SET] = xe_oa_set_prop_metric_set, + [DRM_XE_OA_PROPERTY_OA_FORMAT] = xe_oa_set_prop_oa_format, + [DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT] = xe_oa_set_prop_oa_exponent, + [DRM_XE_OA_PROPERTY_OA_DISABLED] = xe_oa_set_prop_disabled, + [DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID] = xe_oa_set_prop_exec_queue_id, + [DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE] = xe_oa_set_prop_engine_instance, + [DRM_XE_OA_PROPERTY_NO_PREEMPT] = xe_oa_set_no_preempt, + [DRM_XE_OA_PROPERTY_NUM_SYNCS] = xe_oa_set_prop_num_syncs, + [DRM_XE_OA_PROPERTY_SYNCS] = xe_oa_set_prop_syncs_user, +}; + +static const xe_oa_set_property_fn xe_oa_set_property_funcs_config[] = { + [DRM_XE_OA_PROPERTY_OA_UNIT_ID] = xe_oa_set_prop_ret_inval, + [DRM_XE_OA_PROPERTY_SAMPLE_OA] = xe_oa_set_prop_ret_inval, + [DRM_XE_OA_PROPERTY_OA_METRIC_SET] = xe_oa_set_prop_metric_set, + [DRM_XE_OA_PROPERTY_OA_FORMAT] = xe_oa_set_prop_ret_inval, + [DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT] = xe_oa_set_prop_ret_inval, + [DRM_XE_OA_PROPERTY_OA_DISABLED] = xe_oa_set_prop_ret_inval, + [DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID] = xe_oa_set_prop_ret_inval, + [DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE] = xe_oa_set_prop_ret_inval, + [DRM_XE_OA_PROPERTY_NO_PREEMPT] = xe_oa_set_prop_ret_inval, + [DRM_XE_OA_PROPERTY_NUM_SYNCS] = xe_oa_set_prop_num_syncs, + [DRM_XE_OA_PROPERTY_SYNCS] = xe_oa_set_prop_syncs_user, +}; + +static int xe_oa_user_ext_set_property(struct xe_oa *oa, enum xe_oa_user_extn_from from, + u64 extension, struct xe_oa_open_param *param) +{ + u64 __user *address = u64_to_user_ptr(extension); + struct drm_xe_ext_set_property ext; + int err; + u32 idx; + + err = __copy_from_user(&ext, address, sizeof(ext)); + if (XE_IOCTL_DBG(oa->xe, err)) + return -EFAULT; + + BUILD_BUG_ON(ARRAY_SIZE(xe_oa_set_property_funcs_open) != + ARRAY_SIZE(xe_oa_set_property_funcs_config)); + + if (XE_IOCTL_DBG(oa->xe, ext.property >= ARRAY_SIZE(xe_oa_set_property_funcs_open)) || + XE_IOCTL_DBG(oa->xe, ext.pad)) + return -EINVAL; + + idx = array_index_nospec(ext.property, ARRAY_SIZE(xe_oa_set_property_funcs_open)); + + if (from == XE_OA_USER_EXTN_FROM_CONFIG) + return xe_oa_set_property_funcs_config[idx](oa, ext.value, param); + else + return xe_oa_set_property_funcs_open[idx](oa, ext.value, param); +} + +typedef int (*xe_oa_user_extension_fn)(struct xe_oa *oa, enum xe_oa_user_extn_from from, + u64 extension, struct xe_oa_open_param *param); +static const xe_oa_user_extension_fn xe_oa_user_extension_funcs[] = { + [DRM_XE_OA_EXTENSION_SET_PROPERTY] = xe_oa_user_ext_set_property, +}; + +#define MAX_USER_EXTENSIONS 16 +static int xe_oa_user_extensions(struct xe_oa *oa, enum xe_oa_user_extn_from from, u64 extension, + int ext_number, struct xe_oa_open_param *param) +{ + u64 __user *address = u64_to_user_ptr(extension); + struct drm_xe_user_extension ext; + int err; + u32 idx; + + if (XE_IOCTL_DBG(oa->xe, ext_number >= MAX_USER_EXTENSIONS)) + return -E2BIG; + + err = __copy_from_user(&ext, address, sizeof(ext)); + if (XE_IOCTL_DBG(oa->xe, err)) + return -EFAULT; + + if (XE_IOCTL_DBG(oa->xe, ext.pad) || + XE_IOCTL_DBG(oa->xe, ext.name >= ARRAY_SIZE(xe_oa_user_extension_funcs))) + return -EINVAL; + + idx = array_index_nospec(ext.name, ARRAY_SIZE(xe_oa_user_extension_funcs)); + err = xe_oa_user_extension_funcs[idx](oa, from, extension, param); + if (XE_IOCTL_DBG(oa->xe, err)) + return err; + + if (ext.next_extension) + return xe_oa_user_extensions(oa, from, ext.next_extension, ++ext_number, param); + + return 0; +} + +static int xe_oa_parse_syncs(struct xe_oa *oa, struct xe_oa_open_param *param) +{ + int ret, num_syncs, num_ufence = 0; + + if (param->num_syncs && !param->syncs_user) { + drm_dbg(&oa->xe->drm, "num_syncs specified without sync array\n"); + ret = -EINVAL; + goto exit; + } + + if (param->num_syncs) { + param->syncs = kcalloc(param->num_syncs, sizeof(*param->syncs), GFP_KERNEL); + if (!param->syncs) { + ret = -ENOMEM; + goto exit; + } + } + + for (num_syncs = 0; num_syncs < param->num_syncs; num_syncs++) { + ret = xe_sync_entry_parse(oa->xe, param->xef, ¶m->syncs[num_syncs], + ¶m->syncs_user[num_syncs], 0); + if (ret) + goto err_syncs; + + if (xe_sync_is_ufence(¶m->syncs[num_syncs])) + num_ufence++; + } + + if (XE_IOCTL_DBG(oa->xe, num_ufence > 1)) { + ret = -EINVAL; + goto err_syncs; + } + + return 0; + +err_syncs: + while (num_syncs--) + xe_sync_entry_cleanup(¶m->syncs[num_syncs]); + kfree(param->syncs); +exit: + return ret; +} + static void xe_oa_stream_enable(struct xe_oa_stream *stream) { stream->pollin = false; @@ -1090,36 +1446,38 @@ static int xe_oa_disable_locked(struct xe_oa_stream *stream) static long xe_oa_config_locked(struct xe_oa_stream *stream, u64 arg) { - struct drm_xe_ext_set_property ext; + struct xe_oa_open_param param = {}; long ret = stream->oa_config->id; struct xe_oa_config *config; int err; - err = __copy_from_user(&ext, u64_to_user_ptr(arg), sizeof(ext)); - if (XE_IOCTL_DBG(stream->oa->xe, err)) - return -EFAULT; - - if (XE_IOCTL_DBG(stream->oa->xe, ext.pad) || - XE_IOCTL_DBG(stream->oa->xe, ext.base.name != DRM_XE_OA_EXTENSION_SET_PROPERTY) || - XE_IOCTL_DBG(stream->oa->xe, ext.base.next_extension) || - XE_IOCTL_DBG(stream->oa->xe, ext.property != DRM_XE_OA_PROPERTY_OA_METRIC_SET)) - return -EINVAL; + err = xe_oa_user_extensions(stream->oa, XE_OA_USER_EXTN_FROM_CONFIG, arg, 0, ¶m); + if (err) + return err; - config = xe_oa_get_oa_config(stream->oa, ext.value); + config = xe_oa_get_oa_config(stream->oa, param.metric_set); if (!config) return -ENODEV; - if (config != stream->oa_config) { - err = xe_oa_emit_oa_config(stream, config); - if (!err) - config = xchg(&stream->oa_config, config); - else - ret = err; + param.xef = stream->xef; + err = xe_oa_parse_syncs(stream->oa, ¶m); + if (err) + goto err_config_put; + + stream->num_syncs = param.num_syncs; + stream->syncs = param.syncs; + + err = xe_oa_emit_oa_config(stream, config); + if (!err) { + config = xchg(&stream->oa_config, config); + drm_dbg(&stream->oa->xe->drm, "changed to oa config uuid=%s\n", + stream->oa_config->uuid); } +err_config_put: xe_oa_config_put(config); - return ret; + return err ?: ret; } static long xe_oa_status_locked(struct xe_oa_stream *stream, unsigned long arg) @@ -1358,10 +1716,14 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream, stream->oa_buffer.format = &stream->oa->oa_formats[param->oa_format]; stream->sample = param->sample; - stream->periodic = param->period_exponent > 0; + stream->periodic = param->period_exponent >= 0; stream->period_exponent = param->period_exponent; stream->no_preempt = param->no_preempt; + stream->xef = xe_file_get(param->xef); + stream->num_syncs = param->num_syncs; + stream->syncs = param->syncs; + /* * For Xe2+, when overrun mode is enabled, there are no partial reports at the end * of buffer, making the OA buffer effectively a non-power-of-2 size circular @@ -1458,6 +1820,7 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream, err_free_configs: xe_oa_free_configs(stream); exit: + xe_file_put(stream->xef); return ret; } @@ -1567,27 +1930,6 @@ static bool engine_supports_oa_format(const struct xe_hw_engine *hwe, int type) } } -static int decode_oa_format(struct xe_oa *oa, u64 fmt, enum xe_oa_format_name *name) -{ - u32 counter_size = FIELD_GET(DRM_XE_OA_FORMAT_MASK_COUNTER_SIZE, fmt); - u32 counter_sel = FIELD_GET(DRM_XE_OA_FORMAT_MASK_COUNTER_SEL, fmt); - u32 bc_report = FIELD_GET(DRM_XE_OA_FORMAT_MASK_BC_REPORT, fmt); - u32 type = FIELD_GET(DRM_XE_OA_FORMAT_MASK_FMT_TYPE, fmt); - int idx; - - for_each_set_bit(idx, oa->format_mask, __XE_OA_FORMAT_MAX) { - const struct xe_oa_format *f = &oa->oa_formats[idx]; - - if (counter_size == f->counter_size && bc_report == f->bc_report && - type == f->type && counter_sel == f->counter_select) { - *name = idx; - return 0; - } - } - - return -EINVAL; -} - /** * xe_oa_unit_id - Return OA unit ID for a hardware engine * @hwe: @xe_hw_engine @@ -1634,155 +1976,6 @@ static int xe_oa_assign_hwe(struct xe_oa *oa, struct xe_oa_open_param *param) return ret; } -static int xe_oa_set_prop_oa_unit_id(struct xe_oa *oa, u64 value, - struct xe_oa_open_param *param) -{ - if (value >= oa->oa_unit_ids) { - drm_dbg(&oa->xe->drm, "OA unit ID out of range %lld\n", value); - return -EINVAL; - } - param->oa_unit_id = value; - return 0; -} - -static int xe_oa_set_prop_sample_oa(struct xe_oa *oa, u64 value, - struct xe_oa_open_param *param) -{ - param->sample = value; - return 0; -} - -static int xe_oa_set_prop_metric_set(struct xe_oa *oa, u64 value, - struct xe_oa_open_param *param) -{ - param->metric_set = value; - return 0; -} - -static int xe_oa_set_prop_oa_format(struct xe_oa *oa, u64 value, - struct xe_oa_open_param *param) -{ - int ret = decode_oa_format(oa, value, ¶m->oa_format); - - if (ret) { - drm_dbg(&oa->xe->drm, "Unsupported OA report format %#llx\n", value); - return ret; - } - return 0; -} - -static int xe_oa_set_prop_oa_exponent(struct xe_oa *oa, u64 value, - struct xe_oa_open_param *param) -{ -#define OA_EXPONENT_MAX 31 - - if (value > OA_EXPONENT_MAX) { - drm_dbg(&oa->xe->drm, "OA timer exponent too high (> %u)\n", OA_EXPONENT_MAX); - return -EINVAL; - } - param->period_exponent = value; - return 0; -} - -static int xe_oa_set_prop_disabled(struct xe_oa *oa, u64 value, - struct xe_oa_open_param *param) -{ - param->disabled = value; - return 0; -} - -static int xe_oa_set_prop_exec_queue_id(struct xe_oa *oa, u64 value, - struct xe_oa_open_param *param) -{ - param->exec_queue_id = value; - return 0; -} - -static int xe_oa_set_prop_engine_instance(struct xe_oa *oa, u64 value, - struct xe_oa_open_param *param) -{ - param->engine_instance = value; - return 0; -} - -static int xe_oa_set_no_preempt(struct xe_oa *oa, u64 value, - struct xe_oa_open_param *param) -{ - param->no_preempt = value; - return 0; -} - -typedef int (*xe_oa_set_property_fn)(struct xe_oa *oa, u64 value, - struct xe_oa_open_param *param); -static const xe_oa_set_property_fn xe_oa_set_property_funcs[] = { - [DRM_XE_OA_PROPERTY_OA_UNIT_ID] = xe_oa_set_prop_oa_unit_id, - [DRM_XE_OA_PROPERTY_SAMPLE_OA] = xe_oa_set_prop_sample_oa, - [DRM_XE_OA_PROPERTY_OA_METRIC_SET] = xe_oa_set_prop_metric_set, - [DRM_XE_OA_PROPERTY_OA_FORMAT] = xe_oa_set_prop_oa_format, - [DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT] = xe_oa_set_prop_oa_exponent, - [DRM_XE_OA_PROPERTY_OA_DISABLED] = xe_oa_set_prop_disabled, - [DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID] = xe_oa_set_prop_exec_queue_id, - [DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE] = xe_oa_set_prop_engine_instance, - [DRM_XE_OA_PROPERTY_NO_PREEMPT] = xe_oa_set_no_preempt, -}; - -static int xe_oa_user_ext_set_property(struct xe_oa *oa, u64 extension, - struct xe_oa_open_param *param) -{ - u64 __user *address = u64_to_user_ptr(extension); - struct drm_xe_ext_set_property ext; - int err; - u32 idx; - - err = __copy_from_user(&ext, address, sizeof(ext)); - if (XE_IOCTL_DBG(oa->xe, err)) - return -EFAULT; - - if (XE_IOCTL_DBG(oa->xe, ext.property >= ARRAY_SIZE(xe_oa_set_property_funcs)) || - XE_IOCTL_DBG(oa->xe, ext.pad)) - return -EINVAL; - - idx = array_index_nospec(ext.property, ARRAY_SIZE(xe_oa_set_property_funcs)); - return xe_oa_set_property_funcs[idx](oa, ext.value, param); -} - -typedef int (*xe_oa_user_extension_fn)(struct xe_oa *oa, u64 extension, - struct xe_oa_open_param *param); -static const xe_oa_user_extension_fn xe_oa_user_extension_funcs[] = { - [DRM_XE_OA_EXTENSION_SET_PROPERTY] = xe_oa_user_ext_set_property, -}; - -#define MAX_USER_EXTENSIONS 16 -static int xe_oa_user_extensions(struct xe_oa *oa, u64 extension, int ext_number, - struct xe_oa_open_param *param) -{ - u64 __user *address = u64_to_user_ptr(extension); - struct drm_xe_user_extension ext; - int err; - u32 idx; - - if (XE_IOCTL_DBG(oa->xe, ext_number >= MAX_USER_EXTENSIONS)) - return -E2BIG; - - err = __copy_from_user(&ext, address, sizeof(ext)); - if (XE_IOCTL_DBG(oa->xe, err)) - return -EFAULT; - - if (XE_IOCTL_DBG(oa->xe, ext.pad) || - XE_IOCTL_DBG(oa->xe, ext.name >= ARRAY_SIZE(xe_oa_user_extension_funcs))) - return -EINVAL; - - idx = array_index_nospec(ext.name, ARRAY_SIZE(xe_oa_user_extension_funcs)); - err = xe_oa_user_extension_funcs[idx](oa, extension, param); - if (XE_IOCTL_DBG(oa->xe, err)) - return err; - - if (ext.next_extension) - return xe_oa_user_extensions(oa, ext.next_extension, ++ext_number, param); - - return 0; -} - /** * xe_oa_stream_open_ioctl - Opens an OA stream * @dev: @drm_device @@ -1808,7 +2001,9 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f return -ENODEV; } - ret = xe_oa_user_extensions(oa, data, 0, ¶m); + param.xef = xef; + param.period_exponent = -1; + ret = xe_oa_user_extensions(oa, XE_OA_USER_EXTN_FROM_OPEN, data, 0, ¶m); if (ret) return ret; @@ -1817,8 +2012,8 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f if (XE_IOCTL_DBG(oa->xe, !param.exec_q)) return -ENOENT; - if (param.exec_q->width > 1) - drm_dbg(&oa->xe->drm, "exec_q->width > 1, programming only exec_q->lrc[0]\n"); + if (XE_IOCTL_DBG(oa->xe, param.exec_q->width > 1)) + return -EOPNOTSUPP; } /* @@ -1862,7 +2057,7 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f goto err_exec_q; } - if (param.period_exponent > 0) { + if (param.period_exponent >= 0) { u64 oa_period, oa_freq_hz; /* Requesting samples from OAG buffer is a privileged operation */ @@ -1876,11 +2071,24 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f drm_dbg(&oa->xe->drm, "Using periodic sampling freq %lld Hz\n", oa_freq_hz); } + ret = xe_oa_parse_syncs(oa, ¶m); + if (ret) + goto err_exec_q; + mutex_lock(¶m.hwe->gt->oa.gt_lock); ret = xe_oa_stream_open_ioctl_locked(oa, ¶m); mutex_unlock(¶m.hwe->gt->oa.gt_lock); + if (ret < 0) + goto err_sync_cleanup; + + return ret; + +err_sync_cleanup: + while (param.num_syncs--) + xe_sync_entry_cleanup(¶m.syncs[param.num_syncs]); + kfree(param.syncs); err_exec_q: - if (ret < 0 && param.exec_q) + if (param.exec_q) xe_exec_queue_put(param.exec_q); return ret; } diff --git a/drivers/gpu/drm/xe/xe_oa_types.h b/drivers/gpu/drm/xe/xe_oa_types.h index 8862eca73fbe3..fea9d981e414f 100644 --- a/drivers/gpu/drm/xe/xe_oa_types.h +++ b/drivers/gpu/drm/xe/xe_oa_types.h @@ -238,5 +238,17 @@ struct xe_oa_stream { /** @no_preempt: Whether preemption and timeslicing is disabled for stream exec_q */ u32 no_preempt; + + /** @xef: xe_file with which the stream was opened */ + struct xe_file *xef; + + /** @last_fence: fence to use in stream destroy when needed */ + struct dma_fence *last_fence; + + /** @num_syncs: size of @syncs array */ + u32 num_syncs; + + /** @syncs: syncs to wait on and to signal */ + struct xe_sync_entry *syncs; }; #endif diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c index 33eb039053e4f..06f50aa313267 100644 --- a/drivers/gpu/drm/xe/xe_pm.c +++ b/drivers/gpu/drm/xe/xe_pm.c @@ -264,6 +264,15 @@ int xe_pm_init_early(struct xe_device *xe) return 0; } +static u32 vram_threshold_value(struct xe_device *xe) +{ + /* FIXME: D3Cold temporarily disabled by default on BMG */ + if (xe->info.platform == XE_BATTLEMAGE) + return 0; + + return DEFAULT_VRAM_THRESHOLD; +} + /** * xe_pm_init - Initialize Xe Power Management * @xe: xe device instance @@ -274,6 +283,7 @@ int xe_pm_init_early(struct xe_device *xe) */ int xe_pm_init(struct xe_device *xe) { + u32 vram_threshold; int err; /* For now suspend/resume is only allowed with GuC */ @@ -287,7 +297,8 @@ int xe_pm_init(struct xe_device *xe) if (err) return err; - err = xe_pm_set_vram_threshold(xe, DEFAULT_VRAM_THRESHOLD); + vram_threshold = vram_threshold_value(xe); + err = xe_pm_set_vram_threshold(xe, vram_threshold); if (err) return err; } diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c index 797576690356f..230cf47fb9c5e 100644 --- a/drivers/gpu/drm/xe/xe_pt.c +++ b/drivers/gpu/drm/xe/xe_pt.c @@ -28,6 +28,8 @@ struct xe_pt_dir { struct xe_pt pt; /** @children: Array of page-table child nodes */ struct xe_ptw *children[XE_PDES]; + /** @staging: Array of page-table staging nodes */ + struct xe_ptw *staging[XE_PDES]; }; #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM) @@ -48,9 +50,10 @@ static struct xe_pt_dir *as_xe_pt_dir(struct xe_pt *pt) return container_of(pt, struct xe_pt_dir, pt); } -static struct xe_pt *xe_pt_entry(struct xe_pt_dir *pt_dir, unsigned int index) +static struct xe_pt * +xe_pt_entry_staging(struct xe_pt_dir *pt_dir, unsigned int index) { - return container_of(pt_dir->children[index], struct xe_pt, base); + return container_of(pt_dir->staging[index], struct xe_pt, base); } static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm, @@ -125,6 +128,7 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile, } pt->bo = bo; pt->base.children = level ? as_xe_pt_dir(pt)->children : NULL; + pt->base.staging = level ? as_xe_pt_dir(pt)->staging : NULL; if (vm->xef) xe_drm_client_add_bo(vm->xef->client, pt->bo); @@ -205,8 +209,8 @@ void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred) struct xe_pt_dir *pt_dir = as_xe_pt_dir(pt); for (i = 0; i < XE_PDES; i++) { - if (xe_pt_entry(pt_dir, i)) - xe_pt_destroy(xe_pt_entry(pt_dir, i), flags, + if (xe_pt_entry_staging(pt_dir, i)) + xe_pt_destroy(xe_pt_entry_staging(pt_dir, i), flags, deferred); } } @@ -375,8 +379,10 @@ xe_pt_insert_entry(struct xe_pt_stage_bind_walk *xe_walk, struct xe_pt *parent, /* Continue building a non-connected subtree. */ struct iosys_map *map = &parent->bo->vmap; - if (unlikely(xe_child)) + if (unlikely(xe_child)) { parent->base.children[offset] = &xe_child->base; + parent->base.staging[offset] = &xe_child->base; + } xe_pt_write(xe_walk->vm->xe, map, offset, pte); parent->num_live++; @@ -613,6 +619,7 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma, .ops = &xe_pt_stage_bind_ops, .shifts = xe_normal_pt_shifts, .max_level = XE_PT_HIGHEST_LEVEL, + .staging = true, }, .vm = xe_vma_vm(vma), .tile = tile, @@ -872,7 +879,7 @@ static void xe_pt_cancel_bind(struct xe_vma *vma, } } -static void xe_pt_commit_locks_assert(struct xe_vma *vma) +static void xe_pt_commit_prepare_locks_assert(struct xe_vma *vma) { struct xe_vm *vm = xe_vma_vm(vma); @@ -884,6 +891,16 @@ static void xe_pt_commit_locks_assert(struct xe_vma *vma) xe_vm_assert_held(vm); } +static void xe_pt_commit_locks_assert(struct xe_vma *vma) +{ + struct xe_vm *vm = xe_vma_vm(vma); + + xe_pt_commit_prepare_locks_assert(vma); + + if (xe_vma_is_userptr(vma)) + lockdep_assert_held_read(&vm->userptr.notifier_lock); +} + static void xe_pt_commit(struct xe_vma *vma, struct xe_vm_pgtable_update *entries, u32 num_entries, struct llist_head *deferred) @@ -894,13 +911,17 @@ static void xe_pt_commit(struct xe_vma *vma, for (i = 0; i < num_entries; i++) { struct xe_pt *pt = entries[i].pt; + struct xe_pt_dir *pt_dir; if (!pt->level) continue; + pt_dir = as_xe_pt_dir(pt); for (j = 0; j < entries[i].qwords; j++) { struct xe_pt *oldpte = entries[i].pt_entries[j].pt; + int j_ = j + entries[i].ofs; + pt_dir->children[j_] = pt_dir->staging[j_]; xe_pt_destroy(oldpte, xe_vma_vm(vma)->flags, deferred); } } @@ -912,7 +933,7 @@ static void xe_pt_abort_bind(struct xe_vma *vma, { int i, j; - xe_pt_commit_locks_assert(vma); + xe_pt_commit_prepare_locks_assert(vma); for (i = num_entries - 1; i >= 0; --i) { struct xe_pt *pt = entries[i].pt; @@ -927,10 +948,10 @@ static void xe_pt_abort_bind(struct xe_vma *vma, pt_dir = as_xe_pt_dir(pt); for (j = 0; j < entries[i].qwords; j++) { u32 j_ = j + entries[i].ofs; - struct xe_pt *newpte = xe_pt_entry(pt_dir, j_); + struct xe_pt *newpte = xe_pt_entry_staging(pt_dir, j_); struct xe_pt *oldpte = entries[i].pt_entries[j].pt; - pt_dir->children[j_] = oldpte ? &oldpte->base : 0; + pt_dir->staging[j_] = oldpte ? &oldpte->base : 0; xe_pt_destroy(newpte, xe_vma_vm(vma)->flags, NULL); } } @@ -942,7 +963,7 @@ static void xe_pt_commit_prepare_bind(struct xe_vma *vma, { u32 i, j; - xe_pt_commit_locks_assert(vma); + xe_pt_commit_prepare_locks_assert(vma); for (i = 0; i < num_entries; i++) { struct xe_pt *pt = entries[i].pt; @@ -960,10 +981,10 @@ static void xe_pt_commit_prepare_bind(struct xe_vma *vma, struct xe_pt *newpte = entries[i].pt_entries[j].pt; struct xe_pt *oldpte = NULL; - if (xe_pt_entry(pt_dir, j_)) - oldpte = xe_pt_entry(pt_dir, j_); + if (xe_pt_entry_staging(pt_dir, j_)) + oldpte = xe_pt_entry_staging(pt_dir, j_); - pt_dir->children[j_] = &newpte->base; + pt_dir->staging[j_] = &newpte->base; entries[i].pt_entries[j].pt = oldpte; } } @@ -1212,42 +1233,22 @@ static int vma_check_userptr(struct xe_vm *vm, struct xe_vma *vma, return 0; uvma = to_userptr_vma(vma); - notifier_seq = uvma->userptr.notifier_seq; + if (xe_pt_userptr_inject_eagain(uvma)) + xe_vma_userptr_force_invalidate(uvma); - if (uvma->userptr.initial_bind && !xe_vm_in_fault_mode(vm)) - return 0; + notifier_seq = uvma->userptr.notifier_seq; if (!mmu_interval_read_retry(&uvma->userptr.notifier, - notifier_seq) && - !xe_pt_userptr_inject_eagain(uvma)) + notifier_seq)) return 0; - if (xe_vm_in_fault_mode(vm)) { + if (xe_vm_in_fault_mode(vm)) return -EAGAIN; - } else { - spin_lock(&vm->userptr.invalidated_lock); - list_move_tail(&uvma->userptr.invalidate_link, - &vm->userptr.invalidated); - spin_unlock(&vm->userptr.invalidated_lock); - - if (xe_vm_in_preempt_fence_mode(vm)) { - struct dma_resv_iter cursor; - struct dma_fence *fence; - long err; - - dma_resv_iter_begin(&cursor, xe_vm_resv(vm), - DMA_RESV_USAGE_BOOKKEEP); - dma_resv_for_each_fence_unlocked(&cursor, fence) - dma_fence_enable_sw_signaling(fence); - dma_resv_iter_end(&cursor); - - err = dma_resv_wait_timeout(xe_vm_resv(vm), - DMA_RESV_USAGE_BOOKKEEP, - false, MAX_SCHEDULE_TIMEOUT); - XE_WARN_ON(err <= 0); - } - } + /* + * Just continue the operation since exec or rebind worker + * will take care of rebinding. + */ return 0; } @@ -1513,6 +1514,7 @@ static unsigned int xe_pt_stage_unbind(struct xe_tile *tile, struct xe_vma *vma, .ops = &xe_pt_stage_unbind_ops, .shifts = xe_normal_pt_shifts, .max_level = XE_PT_HIGHEST_LEVEL, + .staging = true, }, .tile = tile, .modified_start = xe_vma_start(vma), @@ -1554,7 +1556,7 @@ static void xe_pt_abort_unbind(struct xe_vma *vma, { int i, j; - xe_pt_commit_locks_assert(vma); + xe_pt_commit_prepare_locks_assert(vma); for (i = num_entries - 1; i >= 0; --i) { struct xe_vm_pgtable_update *entry = &entries[i]; @@ -1567,7 +1569,7 @@ static void xe_pt_abort_unbind(struct xe_vma *vma, continue; for (j = entry->ofs; j < entry->ofs + entry->qwords; j++) - pt_dir->children[j] = + pt_dir->staging[j] = entries[i].pt_entries[j - entry->ofs].pt ? &entries[i].pt_entries[j - entry->ofs].pt->base : NULL; } @@ -1580,7 +1582,7 @@ xe_pt_commit_prepare_unbind(struct xe_vma *vma, { int i, j; - xe_pt_commit_locks_assert(vma); + xe_pt_commit_prepare_locks_assert(vma); for (i = 0; i < num_entries; ++i) { struct xe_vm_pgtable_update *entry = &entries[i]; @@ -1594,8 +1596,8 @@ xe_pt_commit_prepare_unbind(struct xe_vma *vma, pt_dir = as_xe_pt_dir(pt); for (j = entry->ofs; j < entry->ofs + entry->qwords; j++) { entry->pt_entries[j - entry->ofs].pt = - xe_pt_entry(pt_dir, j); - pt_dir->children[j] = NULL; + xe_pt_entry_staging(pt_dir, j); + pt_dir->staging[j] = NULL; } } } diff --git a/drivers/gpu/drm/xe/xe_pt_walk.c b/drivers/gpu/drm/xe/xe_pt_walk.c index b8b3d2aea4923..be602a763ff32 100644 --- a/drivers/gpu/drm/xe/xe_pt_walk.c +++ b/drivers/gpu/drm/xe/xe_pt_walk.c @@ -74,7 +74,8 @@ int xe_pt_walk_range(struct xe_ptw *parent, unsigned int level, u64 addr, u64 end, struct xe_pt_walk *walk) { pgoff_t offset = xe_pt_offset(addr, level, walk); - struct xe_ptw **entries = parent->children ? parent->children : NULL; + struct xe_ptw **entries = walk->staging ? (parent->staging ?: NULL) : + (parent->children ?: NULL); const struct xe_pt_walk_ops *ops = walk->ops; enum page_walk_action action; struct xe_ptw *child; diff --git a/drivers/gpu/drm/xe/xe_pt_walk.h b/drivers/gpu/drm/xe/xe_pt_walk.h index 5ecc4d2f0f653..5c02c244f7de3 100644 --- a/drivers/gpu/drm/xe/xe_pt_walk.h +++ b/drivers/gpu/drm/xe/xe_pt_walk.h @@ -11,12 +11,14 @@ /** * struct xe_ptw - base class for driver pagetable subclassing. * @children: Pointer to an array of children if any. + * @staging: Pointer to an array of staging if any. * * Drivers could subclass this, and if it's a page-directory, typically * embed an array of xe_ptw pointers. */ struct xe_ptw { struct xe_ptw **children; + struct xe_ptw **staging; }; /** @@ -41,6 +43,8 @@ struct xe_pt_walk { * as shared pagetables. */ bool shared_pt_mode; + /** @staging: Walk staging PT structure */ + bool staging; }; /** diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c index 1c96375bd7df7..6fec5d1a1eb44 100644 --- a/drivers/gpu/drm/xe/xe_query.c +++ b/drivers/gpu/drm/xe/xe_query.c @@ -679,7 +679,7 @@ static int query_oa_units(struct xe_device *xe, du->oa_unit_id = u->oa_unit_id; du->oa_unit_type = u->type; du->oa_timestamp_freq = xe_oa_timestamp_frequency(gt); - du->capabilities = DRM_XE_OA_CAPS_BASE; + du->capabilities = DRM_XE_OA_CAPS_BASE | DRM_XE_OA_CAPS_SYNCS; j = 0; for_each_hw_engine(hwe, gt, hwe_id) { diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c index 0be4f489d3e12..9f327f27c0726 100644 --- a/drivers/gpu/drm/xe/xe_ring_ops.c +++ b/drivers/gpu/drm/xe/xe_ring_ops.c @@ -221,7 +221,10 @@ static int emit_pipe_imm_ggtt(u32 addr, u32 value, bool stall_only, u32 *dw, static u32 get_ppgtt_flag(struct xe_sched_job *job) { - return job->q->vm ? BIT(8) : 0; + if (job->q->vm && !job->ggtt) + return BIT(8); + + return 0; } static int emit_copy_timestamp(struct xe_lrc *lrc, u32 *dw, int i) diff --git a/drivers/gpu/drm/xe/xe_sched_job_types.h b/drivers/gpu/drm/xe/xe_sched_job_types.h index 0d3f76fb05cea..c207361bf43e1 100644 --- a/drivers/gpu/drm/xe/xe_sched_job_types.h +++ b/drivers/gpu/drm/xe/xe_sched_job_types.h @@ -57,6 +57,8 @@ struct xe_sched_job { u32 migrate_flush_flags; /** @ring_ops_flush_tlb: The ring ops need to flush TLB before payload. */ bool ring_ops_flush_tlb; + /** @ggtt: mapped in ggtt. */ + bool ggtt; /** @ptrs: per instance pointers. */ struct xe_job_ptrs ptrs[]; }; diff --git a/drivers/gpu/drm/xe/xe_tuning.c b/drivers/gpu/drm/xe/xe_tuning.c index 0d5e04158917b..1fb12da21c9e4 100644 --- a/drivers/gpu/drm/xe/xe_tuning.c +++ b/drivers/gpu/drm/xe/xe_tuning.c @@ -97,14 +97,6 @@ static const struct xe_rtp_entry_sr engine_tunings[] = { }; static const struct xe_rtp_entry_sr lrc_tunings[] = { - { XE_RTP_NAME("Tuning: ganged timer, also known as 16011163337"), - XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210), ENGINE_CLASS(RENDER)), - /* read verification is ignored due to 1608008084. */ - XE_RTP_ACTIONS(FIELD_SET_NO_READ_MASK(FF_MODE2, - FF_MODE2_GS_TIMER_MASK, - FF_MODE2_GS_TIMER_224)) - }, - /* DG2 */ { XE_RTP_NAME("Tuning: L3 cache"), diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index c99380271de62..872de052d670f 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -580,51 +580,26 @@ static void preempt_rebind_work_func(struct work_struct *w) trace_xe_vm_rebind_worker_exit(vm); } -static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni, - const struct mmu_notifier_range *range, - unsigned long cur_seq) +static void __vma_userptr_invalidate(struct xe_vm *vm, struct xe_userptr_vma *uvma) { - struct xe_userptr *userptr = container_of(mni, typeof(*userptr), notifier); - struct xe_userptr_vma *uvma = container_of(userptr, typeof(*uvma), userptr); + struct xe_userptr *userptr = &uvma->userptr; struct xe_vma *vma = &uvma->vma; - struct xe_vm *vm = xe_vma_vm(vma); struct dma_resv_iter cursor; struct dma_fence *fence; long err; - xe_assert(vm->xe, xe_vma_is_userptr(vma)); - trace_xe_vma_userptr_invalidate(vma); - - if (!mmu_notifier_range_blockable(range)) - return false; - - vm_dbg(&xe_vma_vm(vma)->xe->drm, - "NOTIFIER: addr=0x%016llx, range=0x%016llx", - xe_vma_start(vma), xe_vma_size(vma)); - - down_write(&vm->userptr.notifier_lock); - mmu_interval_set_seq(mni, cur_seq); - - /* No need to stop gpu access if the userptr is not yet bound. */ - if (!userptr->initial_bind) { - up_write(&vm->userptr.notifier_lock); - return true; - } - /* * Tell exec and rebind worker they need to repin and rebind this * userptr. */ if (!xe_vm_in_fault_mode(vm) && - !(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) { + !(vma->gpuva.flags & XE_VMA_DESTROYED)) { spin_lock(&vm->userptr.invalidated_lock); list_move_tail(&userptr->invalidate_link, &vm->userptr.invalidated); spin_unlock(&vm->userptr.invalidated_lock); } - up_write(&vm->userptr.notifier_lock); - /* * Preempt fences turn into schedule disables, pipeline these. * Note that even in fault mode, we need to wait for binds and @@ -642,11 +617,37 @@ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni, false, MAX_SCHEDULE_TIMEOUT); XE_WARN_ON(err <= 0); - if (xe_vm_in_fault_mode(vm)) { + if (xe_vm_in_fault_mode(vm) && userptr->initial_bind) { err = xe_vm_invalidate_vma(vma); XE_WARN_ON(err); } + xe_hmm_userptr_unmap(uvma); +} + +static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni, + const struct mmu_notifier_range *range, + unsigned long cur_seq) +{ + struct xe_userptr_vma *uvma = container_of(mni, typeof(*uvma), userptr.notifier); + struct xe_vma *vma = &uvma->vma; + struct xe_vm *vm = xe_vma_vm(vma); + + xe_assert(vm->xe, xe_vma_is_userptr(vma)); + trace_xe_vma_userptr_invalidate(vma); + + if (!mmu_notifier_range_blockable(range)) + return false; + + vm_dbg(&xe_vma_vm(vma)->xe->drm, + "NOTIFIER: addr=0x%016llx, range=0x%016llx", + xe_vma_start(vma), xe_vma_size(vma)); + + down_write(&vm->userptr.notifier_lock); + mmu_interval_set_seq(mni, cur_seq); + + __vma_userptr_invalidate(vm, uvma); + up_write(&vm->userptr.notifier_lock); trace_xe_vma_userptr_invalidate_complete(vma); return true; @@ -656,6 +657,34 @@ static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = { .invalidate = vma_userptr_invalidate, }; +#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) +/** + * xe_vma_userptr_force_invalidate() - force invalidate a userptr + * @uvma: The userptr vma to invalidate + * + * Perform a forced userptr invalidation for testing purposes. + */ +void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma) +{ + struct xe_vm *vm = xe_vma_vm(&uvma->vma); + + /* Protect against concurrent userptr pinning */ + lockdep_assert_held(&vm->lock); + /* Protect against concurrent notifiers */ + lockdep_assert_held(&vm->userptr.notifier_lock); + /* + * Protect against concurrent instances of this function and + * the critical exec sections + */ + xe_vm_assert_held(vm); + + if (!mmu_interval_read_retry(&uvma->userptr.notifier, + uvma->userptr.notifier_seq)) + uvma->userptr.notifier_seq -= 2; + __vma_userptr_invalidate(vm, uvma); +} +#endif + int xe_vm_userptr_pin(struct xe_vm *vm) { struct xe_userptr_vma *uvma, *next; @@ -667,20 +696,33 @@ int xe_vm_userptr_pin(struct xe_vm *vm) /* Collect invalidated userptrs */ spin_lock(&vm->userptr.invalidated_lock); + xe_assert(vm->xe, list_empty(&vm->userptr.repin_list)); list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated, userptr.invalidate_link) { list_del_init(&uvma->userptr.invalidate_link); - list_move_tail(&uvma->userptr.repin_link, - &vm->userptr.repin_list); + list_add_tail(&uvma->userptr.repin_link, + &vm->userptr.repin_list); } spin_unlock(&vm->userptr.invalidated_lock); - /* Pin and move to temporary list */ + /* Pin and move to bind list */ list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list, userptr.repin_link) { err = xe_vma_userptr_pin_pages(uvma); if (err == -EFAULT) { list_del_init(&uvma->userptr.repin_link); + /* + * We might have already done the pin once already, but + * then had to retry before the re-bind happened, due + * some other condition in the caller, but in the + * meantime the userptr got dinged by the notifier such + * that we need to revalidate here, but this time we hit + * the EFAULT. In such a case make sure we remove + * ourselves from the rebind list to avoid going down in + * flames. + */ + if (!list_empty(&uvma->vma.combined_links.rebind)) + list_del_init(&uvma->vma.combined_links.rebind); /* Wait for pending binds */ xe_vm_lock(vm, false); @@ -691,10 +733,10 @@ int xe_vm_userptr_pin(struct xe_vm *vm) err = xe_vm_invalidate_vma(&uvma->vma); xe_vm_unlock(vm); if (err) - return err; + break; } else { - if (err < 0) - return err; + if (err) + break; list_del_init(&uvma->userptr.repin_link); list_move_tail(&uvma->vma.combined_links.rebind, @@ -702,7 +744,19 @@ int xe_vm_userptr_pin(struct xe_vm *vm) } } - return 0; + if (err) { + down_write(&vm->userptr.notifier_lock); + spin_lock(&vm->userptr.invalidated_lock); + list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list, + userptr.repin_link) { + list_del_init(&uvma->userptr.repin_link); + list_move_tail(&uvma->userptr.invalidate_link, + &vm->userptr.invalidated); + } + spin_unlock(&vm->userptr.invalidated_lock); + up_write(&vm->userptr.notifier_lock); + } + return err; } /** @@ -987,6 +1041,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm, INIT_LIST_HEAD(&userptr->invalidate_link); INIT_LIST_HEAD(&userptr->repin_link); vma->gpuva.gem.offset = bo_offset_or_userptr; + mutex_init(&userptr->unmap_mutex); err = mmu_interval_notifier_insert(&userptr->notifier, current->mm, @@ -1028,6 +1083,7 @@ static void xe_vma_destroy_late(struct xe_vma *vma) * them anymore */ mmu_interval_notifier_remove(&userptr->notifier); + mutex_destroy(&userptr->unmap_mutex); xe_vm_put(vm); } else if (xe_vma_is_null(vma)) { xe_vm_put(vm); @@ -1066,6 +1122,7 @@ static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence) xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED); spin_lock(&vm->userptr.invalidated_lock); + xe_assert(vm->xe, list_empty(&to_userptr_vma(vma)->userptr.repin_link)); list_del(&to_userptr_vma(vma)->userptr.invalidate_link); spin_unlock(&vm->userptr.invalidated_lock); } else if (!xe_vma_is_null(vma)) { @@ -2258,8 +2315,17 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops, break; } case DRM_GPUVA_OP_UNMAP: + xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask); + break; case DRM_GPUVA_OP_PREFETCH: - /* FIXME: Need to skip some prefetch ops */ + vma = gpuva_to_vma(op->base.prefetch.va); + + if (xe_vma_is_userptr(vma)) { + err = xe_vma_userptr_pin_pages(to_userptr_vma(vma)); + if (err) + return err; + } + xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask); break; default: diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h index c864dba35e1d5..d2406532fcc50 100644 --- a/drivers/gpu/drm/xe/xe_vm.h +++ b/drivers/gpu/drm/xe/xe_vm.h @@ -275,9 +275,17 @@ static inline void vm_dbg(const struct drm_device *dev, const char *format, ...) { /* noop */ } #endif -#endif struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm); void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap); void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p); void xe_vm_snapshot_free(struct xe_vm_snapshot *snap); + +#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) +void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma); +#else +static inline void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma) +{ +} +#endif +#endif diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h index 7f9a303e51d89..a4b4091cfd0da 100644 --- a/drivers/gpu/drm/xe/xe_vm_types.h +++ b/drivers/gpu/drm/xe/xe_vm_types.h @@ -59,12 +59,16 @@ struct xe_userptr { struct sg_table *sg; /** @notifier_seq: notifier sequence number */ unsigned long notifier_seq; + /** @unmap_mutex: Mutex protecting dma-unmapping */ + struct mutex unmap_mutex; /** * @initial_bind: user pointer has been bound at least once. * write: vm->userptr.notifier_lock in read mode and vm->resv held. * read: vm->userptr.notifier_lock in write mode or vm->resv held. */ bool initial_bind; + /** @mapped: Whether the @sgt sg-table is dma-mapped. Protected by @unmap_mutex. */ + bool mapped; #if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) u32 divisor; #endif @@ -227,8 +231,8 @@ struct xe_vm { * up for revalidation. Protected from access with the * @invalidated_lock. Removing items from the list * additionally requires @lock in write mode, and adding - * items to the list requires the @userptr.notifer_lock in - * write mode. + * items to the list requires either the @userptr.notifer_lock in + * write mode, OR @lock in write mode. */ struct list_head invalidated; } userptr; diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c index 37e592b2bf062..0a1905f8d380a 100644 --- a/drivers/gpu/drm/xe/xe_wa.c +++ b/drivers/gpu/drm/xe/xe_wa.c @@ -606,6 +606,13 @@ static const struct xe_rtp_entry_sr engine_was[] = { }; static const struct xe_rtp_entry_sr lrc_was[] = { + { XE_RTP_NAME("16011163337"), + XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210), ENGINE_CLASS(RENDER)), + /* read verification is ignored due to 1608008084. */ + XE_RTP_ACTIONS(FIELD_SET_NO_READ_MASK(FF_MODE2, + FF_MODE2_GS_TIMER_MASK, + FF_MODE2_GS_TIMER_224)) + }, { XE_RTP_NAME("1409342910, 14010698770, 14010443199, 1408979724, 1409178076, 1409207793, 1409217633, 1409252684, 1409347922, 1409142259"), XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210)), XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN3, diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c index f5781939de9c3..a25b22238e3d2 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c +++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c @@ -231,6 +231,8 @@ static int zynqmp_dpsub_probe(struct platform_device *pdev) if (ret) return ret; + dma_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32)); + /* Try the reserved memory. Proceed if there's none. */ of_reserved_mem_device_init(&pdev->dev); diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index 710674ef40a97..3f23a7d91519f 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -367,6 +367,10 @@ static bool host1x_wants_iommu(struct host1x *host1x) return true; } +/* + * Returns ERR_PTR on failure, NULL if the translation is IDENTITY, otherwise a + * valid paging domain. + */ static struct iommu_domain *host1x_iommu_attach(struct host1x *host) { struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev); @@ -391,6 +395,8 @@ static struct iommu_domain *host1x_iommu_attach(struct host1x *host) * Similarly, if host1x is already attached to an IOMMU (via the DMA * API), don't try to attach again. */ + if (domain && domain->type == IOMMU_DOMAIN_IDENTITY) + domain = NULL; if (!host1x_wants_iommu(host) || domain) return domain; diff --git a/drivers/greybus/gb-beagleplay.c b/drivers/greybus/gb-beagleplay.c index 473ac3f2d3821..da31f1131afca 100644 --- a/drivers/greybus/gb-beagleplay.c +++ b/drivers/greybus/gb-beagleplay.c @@ -912,7 +912,9 @@ static enum fw_upload_err cc1352_prepare(struct fw_upload *fw_upload, cc1352_bootloader_reset(bg); WRITE_ONCE(bg->flashing_mode, false); msleep(200); - gb_greybus_init(bg); + if (gb_greybus_init(bg) < 0) + return dev_err_probe(&bg->sd->dev, FW_UPLOAD_ERR_RW_ERROR, + "Failed to initialize greybus"); gb_beagleplay_start_svc(bg); return FW_UPLOAD_ERR_FW_INVALID; } diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index f8a56d6312425..95a4ede270991 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -1154,7 +1154,8 @@ config HID_TOPRE tristate "Topre REALFORCE keyboards" depends on HID help - Say Y for N-key rollover support on Topre REALFORCE R2 108/87 key keyboards. + Say Y for N-key rollover support on Topre REALFORCE R2 108/87 key and + Topre REALFORCE R3S 87 key keyboards. config HID_THINGM tristate "ThingM blink(1) USB RGB LED" @@ -1204,6 +1205,20 @@ config HID_U2FZERO allow setting the brightness to anything but 1, which will trigger a single blink and immediately reset back to 0. +config HID_UNIVERSAL_PIDFF + tristate "universal-pidff: extended USB PID driver compatibility and usage" + depends on USB_HID + depends on HID_PID + help + Extended PID support for selected devices. + + Contains report fixups, extended usable button range and + pidff quirk management to extend compatibility with slightly + non-compliant USB PID devices and better fuzz/flat values for + high precision direct drive devices. + + Supports Moza Racing, Cammus, VRS, FFBeast and more. + config HID_WACOM tristate "Wacom Intuos/Graphire tablet support (USB)" depends on USB_HID diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile index 496dab54c73a8..27ee02bf6f26d 100644 --- a/drivers/hid/Makefile +++ b/drivers/hid/Makefile @@ -139,6 +139,7 @@ hid-uclogic-objs := hid-uclogic-core.o \ hid-uclogic-params.o obj-$(CONFIG_HID_UCLOGIC) += hid-uclogic.o obj-$(CONFIG_HID_UDRAW_PS3) += hid-udraw-ps3.o +obj-$(CONFIG_HID_UNIVERSAL_PIDFF) += hid-universal-pidff.o obj-$(CONFIG_HID_LED) += hid-led.o obj-$(CONFIG_HID_XIAOMI) += hid-xiaomi.o obj-$(CONFIG_HID_XINMO) += hid-xinmo.o @@ -165,7 +166,6 @@ obj-$(CONFIG_USB_KBD) += usbhid/ obj-$(CONFIG_I2C_HID_CORE) += i2c-hid/ obj-$(CONFIG_INTEL_ISH_HID) += intel-ish-hid/ -obj-$(INTEL_ISH_FIRMWARE_DOWNLOADER) += intel-ish-hid/ obj-$(CONFIG_AMD_SFH_HID) += amd-sfh-hid/ diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c index 7e1ae2a2bcc24..d900dd05c335c 100644 --- a/drivers/hid/hid-apple.c +++ b/drivers/hid/hid-apple.c @@ -378,6 +378,12 @@ static bool apple_is_non_apple_keyboard(struct hid_device *hdev) return false; } +static bool apple_is_omoton_kb066(struct hid_device *hdev) +{ + return hdev->product == USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI && + strcmp(hdev->name, "Bluetooth Keyboard") == 0; +} + static inline void apple_setup_key_translation(struct input_dev *input, const struct apple_key_translation *table) { @@ -474,6 +480,7 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input, hid->product == USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2015) table = magic_keyboard_2015_fn_keys; else if (hid->product == USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021 || + hid->product == USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2024 || hid->product == USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021 || hid->product == USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2021) table = apple2021_fn_keys; @@ -724,7 +731,7 @@ static int apple_input_configured(struct hid_device *hdev, { struct apple_sc *asc = hid_get_drvdata(hdev); - if ((asc->quirks & APPLE_HAS_FN) && !asc->fn_found) { + if (((asc->quirks & APPLE_HAS_FN) && !asc->fn_found) || apple_is_omoton_kb066(hdev)) { hid_info(hdev, "Fn key not found (Apple Wireless Keyboard clone?), disabling Fn key handling\n"); asc->quirks &= ~APPLE_HAS_FN; } @@ -1150,6 +1157,10 @@ static const struct hid_device_id apple_devices[] = { .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_RDESC_BATTERY }, { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021), .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2024), + .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_RDESC_BATTERY }, + { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2024), + .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021), .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_RDESC_BATTERY }, { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021), diff --git a/drivers/hid/hid-appleir.c b/drivers/hid/hid-appleir.c index 8deded1857254..c45e5aa569d25 100644 --- a/drivers/hid/hid-appleir.c +++ b/drivers/hid/hid-appleir.c @@ -188,7 +188,7 @@ static int appleir_raw_event(struct hid_device *hid, struct hid_report *report, static const u8 flatbattery[] = { 0x25, 0x87, 0xe0 }; unsigned long flags; - if (len != 5) + if (len != 5 || !(hid->claimed & HID_CLAIMED_INPUT)) goto out; if (!memcmp(data, keydown, sizeof(keydown))) { diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c index 22683ec819aac..646ba5b92e0b2 100644 --- a/drivers/hid/hid-google-hammer.c +++ b/drivers/hid/hid-google-hammer.c @@ -268,11 +268,13 @@ static void cbas_ec_remove(struct platform_device *pdev) mutex_unlock(&cbas_ec_reglock); } +#ifdef CONFIG_ACPI static const struct acpi_device_id cbas_ec_acpi_ids[] = { { "GOOG000B", 0 }, { } }; MODULE_DEVICE_TABLE(acpi, cbas_ec_acpi_ids); +#endif #ifdef CONFIG_OF static const struct of_device_id cbas_ec_of_match[] = { diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index ceb3b1a72e235..92baa34f42f28 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -184,11 +184,18 @@ #define USB_DEVICE_ID_APPLE_IRCONTROL4 0x8242 #define USB_DEVICE_ID_APPLE_IRCONTROL5 0x8243 #define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021 0x029c +#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2024 0x0320 #define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021 0x029a #define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2021 0x029f #define USB_DEVICE_ID_APPLE_TOUCHBAR_BACKLIGHT 0x8102 #define USB_DEVICE_ID_APPLE_TOUCHBAR_DISPLAY 0x8302 +#define USB_VENDOR_ID_ASETEK 0x2433 +#define USB_DEVICE_ID_ASETEK_INVICTA 0xf300 +#define USB_DEVICE_ID_ASETEK_FORTE 0xf301 +#define USB_DEVICE_ID_ASETEK_LA_PRIMA 0xf303 +#define USB_DEVICE_ID_ASETEK_TONY_KANAAN 0xf306 + #define USB_VENDOR_ID_ASUS 0x0486 #define USB_DEVICE_ID_ASUS_T91MT 0x0185 #define USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO 0x0186 @@ -261,6 +268,10 @@ #define USB_DEVICE_ID_BTC_EMPREX_REMOTE 0x5578 #define USB_DEVICE_ID_BTC_EMPREX_REMOTE_2 0x5577 +#define USB_VENDOR_ID_CAMMUS 0x3416 +#define USB_DEVICE_ID_CAMMUS_C5 0x0301 +#define USB_DEVICE_ID_CAMMUS_C12 0x0302 + #define USB_VENDOR_ID_CANDO 0x2087 #define USB_DEVICE_ID_CANDO_PIXCIR_MULTI_TOUCH 0x0703 #define USB_DEVICE_ID_CANDO_MULTI_TOUCH 0x0a01 @@ -452,6 +463,11 @@ #define USB_VENDOR_ID_EVISION 0x320f #define USB_DEVICE_ID_EVISION_ICL01 0x5041 +#define USB_VENDOR_ID_FFBEAST 0x045b +#define USB_DEVICE_ID_FFBEAST_JOYSTICK 0x58f9 +#define USB_DEVICE_ID_FFBEAST_RUDDER 0x5968 +#define USB_DEVICE_ID_FFBEAST_WHEEL 0x59d7 + #define USB_VENDOR_ID_FLATFROG 0x25b5 #define USB_DEVICE_ID_MULTITOUCH_3200 0x0002 @@ -812,6 +828,13 @@ #define I2C_DEVICE_ID_LG_8001 0x8001 #define I2C_DEVICE_ID_LG_7010 0x7010 +#define USB_VENDOR_ID_LITE_STAR 0x11ff +#define USB_DEVICE_ID_PXN_V10 0x3245 +#define USB_DEVICE_ID_PXN_V12 0x1212 +#define USB_DEVICE_ID_PXN_V12_LITE 0x1112 +#define USB_DEVICE_ID_PXN_V12_LITE_2 0x1211 +#define USB_DEVICE_LITE_STAR_GT987_FF 0x2141 + #define USB_VENDOR_ID_LOGITECH 0x046d #define USB_DEVICE_ID_LOGITECH_Z_10_SPK 0x0a07 #define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e @@ -959,6 +982,18 @@ #define USB_VENDOR_ID_MONTEREY 0x0566 #define USB_DEVICE_ID_GENIUS_KB29E 0x3004 +#define USB_VENDOR_ID_MOZA 0x346e +#define USB_DEVICE_ID_MOZA_R3 0x0005 +#define USB_DEVICE_ID_MOZA_R3_2 0x0015 +#define USB_DEVICE_ID_MOZA_R5 0x0004 +#define USB_DEVICE_ID_MOZA_R5_2 0x0014 +#define USB_DEVICE_ID_MOZA_R9 0x0002 +#define USB_DEVICE_ID_MOZA_R9_2 0x0012 +#define USB_DEVICE_ID_MOZA_R12 0x0006 +#define USB_DEVICE_ID_MOZA_R12_2 0x0016 +#define USB_DEVICE_ID_MOZA_R16_R21 0x0000 +#define USB_DEVICE_ID_MOZA_R16_R21_2 0x0010 + #define USB_VENDOR_ID_MSI 0x1770 #define USB_DEVICE_ID_MSI_GT683R_LED_PANEL 0xff00 @@ -1089,6 +1124,7 @@ #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001 0x3001 #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003 0x3003 #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008 0x3008 +#define USB_DEVICE_ID_QUANTA_HP_5MP_CAMERA_5473 0x5473 #define I2C_VENDOR_ID_RAYDIUM 0x2386 #define I2C_PRODUCT_ID_RAYDIUM_4B33 0x4b33 @@ -1295,6 +1331,7 @@ #define USB_VENDOR_ID_TOPRE 0x0853 #define USB_DEVICE_ID_TOPRE_REALFORCE_R2_108 0x0148 #define USB_DEVICE_ID_TOPRE_REALFORCE_R2_87 0x0146 +#define USB_DEVICE_ID_TOPRE_REALFORCE_R3S_87 0x0313 #define USB_VENDOR_ID_TOPSEED 0x0766 #define USB_DEVICE_ID_TOPSEED_CYBERLINK 0x0204 @@ -1368,6 +1405,9 @@ #define USB_DEVICE_ID_VELLEMAN_K8061_FIRST 0x8061 #define USB_DEVICE_ID_VELLEMAN_K8061_LAST 0x8068 +#define USB_VENDOR_ID_VRS 0x0483 +#define USB_DEVICE_ID_VRS_DFP 0xa355 + #define USB_VENDOR_ID_VTL 0x0306 #define USB_DEVICE_ID_VTL_MULTITOUCH_FF3F 0xff3f diff --git a/drivers/hid/hid-plantronics.c b/drivers/hid/hid-plantronics.c index 25cfd964dc25d..acb9eb18f7ccf 100644 --- a/drivers/hid/hid-plantronics.c +++ b/drivers/hid/hid-plantronics.c @@ -6,9 +6,6 @@ * Copyright (c) 2015-2018 Terry Junge */ -/* - */ - #include "hid-ids.h" #include @@ -23,30 +20,28 @@ #define PLT_VOL_UP 0x00b1 #define PLT_VOL_DOWN 0x00b2 +#define PLT_MIC_MUTE 0x00b5 #define PLT1_VOL_UP (PLT_HID_1_0_PAGE | PLT_VOL_UP) #define PLT1_VOL_DOWN (PLT_HID_1_0_PAGE | PLT_VOL_DOWN) +#define PLT1_MIC_MUTE (PLT_HID_1_0_PAGE | PLT_MIC_MUTE) #define PLT2_VOL_UP (PLT_HID_2_0_PAGE | PLT_VOL_UP) #define PLT2_VOL_DOWN (PLT_HID_2_0_PAGE | PLT_VOL_DOWN) +#define PLT2_MIC_MUTE (PLT_HID_2_0_PAGE | PLT_MIC_MUTE) +#define HID_TELEPHONY_MUTE (HID_UP_TELEPHONY | 0x2f) +#define HID_CONSUMER_MUTE (HID_UP_CONSUMER | 0xe2) #define PLT_DA60 0xda60 #define PLT_BT300_MIN 0x0413 #define PLT_BT300_MAX 0x0418 - -#define PLT_ALLOW_CONSUMER (field->application == HID_CP_CONSUMERCONTROL && \ - (usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER) - -#define PLT_QUIRK_DOUBLE_VOLUME_KEYS BIT(0) -#define PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS BIT(1) - #define PLT_DOUBLE_KEY_TIMEOUT 5 /* ms */ -#define PLT_FOLLOWED_OPPOSITE_KEY_TIMEOUT 220 /* ms */ struct plt_drv_data { unsigned long device_type; - unsigned long last_volume_key_ts; - u32 quirks; + unsigned long last_key_ts; + unsigned long double_key_to; + __u16 last_key; }; static int plantronics_input_mapping(struct hid_device *hdev, @@ -58,34 +53,43 @@ static int plantronics_input_mapping(struct hid_device *hdev, unsigned short mapped_key; struct plt_drv_data *drv_data = hid_get_drvdata(hdev); unsigned long plt_type = drv_data->device_type; + int allow_mute = usage->hid == HID_TELEPHONY_MUTE; + int allow_consumer = field->application == HID_CP_CONSUMERCONTROL && + (usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER && + usage->hid != HID_CONSUMER_MUTE; /* special case for PTT products */ if (field->application == HID_GD_JOYSTICK) goto defaulted; - /* handle volume up/down mapping */ /* non-standard types or multi-HID interfaces - plt_type is PID */ if (!(plt_type & HID_USAGE_PAGE)) { switch (plt_type) { case PLT_DA60: - if (PLT_ALLOW_CONSUMER) + if (allow_consumer) goto defaulted; - goto ignored; + if (usage->hid == HID_CONSUMER_MUTE) { + mapped_key = KEY_MICMUTE; + goto mapped; + } + break; default: - if (PLT_ALLOW_CONSUMER) + if (allow_consumer || allow_mute) goto defaulted; } + goto ignored; } - /* handle standard types - plt_type is 0xffa0uuuu or 0xffa2uuuu */ - /* 'basic telephony compliant' - allow default consumer page map */ - else if ((plt_type & HID_USAGE) >= PLT_BASIC_TELEPHONY && - (plt_type & HID_USAGE) != PLT_BASIC_EXCEPTION) { - if (PLT_ALLOW_CONSUMER) - goto defaulted; - } - /* not 'basic telephony' - apply legacy mapping */ - /* only map if the field is in the device's primary vendor page */ - else if (!((field->application ^ plt_type) & HID_USAGE_PAGE)) { + + /* handle standard consumer control mapping */ + /* and standard telephony mic mute mapping */ + if (allow_consumer || allow_mute) + goto defaulted; + + /* handle vendor unique types - plt_type is 0xffa0uuuu or 0xffa2uuuu */ + /* if not 'basic telephony compliant' - map vendor unique controls */ + if (!((plt_type & HID_USAGE) >= PLT_BASIC_TELEPHONY && + (plt_type & HID_USAGE) != PLT_BASIC_EXCEPTION) && + !((field->application ^ plt_type) & HID_USAGE_PAGE)) switch (usage->hid) { case PLT1_VOL_UP: case PLT2_VOL_UP: @@ -95,8 +99,11 @@ static int plantronics_input_mapping(struct hid_device *hdev, case PLT2_VOL_DOWN: mapped_key = KEY_VOLUMEDOWN; goto mapped; + case PLT1_MIC_MUTE: + case PLT2_MIC_MUTE: + mapped_key = KEY_MICMUTE; + goto mapped; } - } /* * Future mapping of call control or other usages, @@ -105,6 +112,8 @@ static int plantronics_input_mapping(struct hid_device *hdev, */ ignored: + hid_dbg(hdev, "usage: %08x (appl: %08x) - ignored\n", + usage->hid, field->application); return -1; defaulted: @@ -123,38 +132,26 @@ static int plantronics_event(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { struct plt_drv_data *drv_data = hid_get_drvdata(hdev); + unsigned long prev_tsto, cur_ts; + __u16 prev_key, cur_key; - if (drv_data->quirks & PLT_QUIRK_DOUBLE_VOLUME_KEYS) { - unsigned long prev_ts, cur_ts; + /* Usages are filtered in plantronics_usages. */ - /* Usages are filtered in plantronics_usages. */ + /* HZ too low for ms resolution - double key detection disabled */ + /* or it is a key release - handle key presses only. */ + if (!drv_data->double_key_to || !value) + return 0; - if (!value) /* Handle key presses only. */ - return 0; + prev_tsto = drv_data->last_key_ts + drv_data->double_key_to; + cur_ts = drv_data->last_key_ts = jiffies; + prev_key = drv_data->last_key; + cur_key = drv_data->last_key = usage->code; - prev_ts = drv_data->last_volume_key_ts; - cur_ts = jiffies; - if (jiffies_to_msecs(cur_ts - prev_ts) <= PLT_DOUBLE_KEY_TIMEOUT) - return 1; /* Ignore the repeated key. */ - - drv_data->last_volume_key_ts = cur_ts; + /* If the same key occurs in <= double_key_to -- ignore it */ + if (prev_key == cur_key && time_before_eq(cur_ts, prev_tsto)) { + hid_dbg(hdev, "double key %d ignored\n", cur_key); + return 1; /* Ignore the repeated key. */ } - if (drv_data->quirks & PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS) { - unsigned long prev_ts, cur_ts; - - /* Usages are filtered in plantronics_usages. */ - - if (!value) /* Handle key presses only. */ - return 0; - - prev_ts = drv_data->last_volume_key_ts; - cur_ts = jiffies; - if (jiffies_to_msecs(cur_ts - prev_ts) <= PLT_FOLLOWED_OPPOSITE_KEY_TIMEOUT) - return 1; /* Ignore the followed opposite volume key. */ - - drv_data->last_volume_key_ts = cur_ts; - } - return 0; } @@ -196,12 +193,16 @@ static int plantronics_probe(struct hid_device *hdev, ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); - goto err; + return ret; } drv_data->device_type = plantronics_device_type(hdev); - drv_data->quirks = id->driver_data; - drv_data->last_volume_key_ts = jiffies - msecs_to_jiffies(PLT_DOUBLE_KEY_TIMEOUT); + drv_data->double_key_to = msecs_to_jiffies(PLT_DOUBLE_KEY_TIMEOUT); + drv_data->last_key_ts = jiffies - drv_data->double_key_to; + + /* if HZ does not allow ms resolution - disable double key detection */ + if (drv_data->double_key_to < PLT_DOUBLE_KEY_TIMEOUT) + drv_data->double_key_to = 0; hid_set_drvdata(hdev, drv_data); @@ -210,29 +211,10 @@ static int plantronics_probe(struct hid_device *hdev, if (ret) hid_err(hdev, "hw start failed\n"); -err: return ret; } static const struct hid_device_id plantronics_devices[] = { - { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, - USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3210_SERIES), - .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS }, - { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, - USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES), - .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS }, - { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, - USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3215_SERIES), - .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS }, - { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, - USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3225_SERIES), - .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS }, - { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, - USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3325_SERIES), - .driver_data = PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS }, - { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, - USB_DEVICE_ID_PLANTRONICS_ENCOREPRO_500_SERIES), - .driver_data = PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS }, { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) }, { } }; @@ -241,6 +223,14 @@ MODULE_DEVICE_TABLE(hid, plantronics_devices); static const struct hid_usage_id plantronics_usages[] = { { HID_CP_VOLUMEUP, EV_KEY, HID_ANY_ID }, { HID_CP_VOLUMEDOWN, EV_KEY, HID_ANY_ID }, + { HID_TELEPHONY_MUTE, EV_KEY, HID_ANY_ID }, + { HID_CONSUMER_MUTE, EV_KEY, HID_ANY_ID }, + { PLT2_VOL_UP, EV_KEY, HID_ANY_ID }, + { PLT2_VOL_DOWN, EV_KEY, HID_ANY_ID }, + { PLT2_MIC_MUTE, EV_KEY, HID_ANY_ID }, + { PLT1_VOL_UP, EV_KEY, HID_ANY_ID }, + { PLT1_VOL_DOWN, EV_KEY, HID_ANY_ID }, + { PLT1_MIC_MUTE, EV_KEY, HID_ANY_ID }, { HID_TERMINATOR, HID_TERMINATOR, HID_TERMINATOR } }; diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index e0bbf0c6345d6..5d7a418ccdbec 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -891,6 +891,7 @@ static const struct hid_device_id hid_ignore_list[] = { { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DPAD) }, #endif { HID_USB_DEVICE(USB_VENDOR_ID_YEALINK, USB_DEVICE_ID_YEALINK_P1K_P4K_B2K) }, + { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_HP_5MP_CAMERA_5473) }, { } }; diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c index 7b35966898785..9de875f27c246 100644 --- a/drivers/hid/hid-steam.c +++ b/drivers/hid/hid-steam.c @@ -1051,10 +1051,10 @@ static void steam_mode_switch_cb(struct work_struct *work) struct steam_device, mode_switch); unsigned long flags; bool client_opened; - steam->gamepad_mode = !steam->gamepad_mode; if (!lizard_mode) return; + steam->gamepad_mode = !steam->gamepad_mode; if (steam->gamepad_mode) steam_set_lizard_mode(steam, false); else { @@ -1327,11 +1327,11 @@ static void steam_remove(struct hid_device *hdev) return; } + hid_destroy_device(steam->client_hdev); cancel_delayed_work_sync(&steam->mode_switch); cancel_work_sync(&steam->work_connect); cancel_work_sync(&steam->rumble_work); cancel_work_sync(&steam->unregister_work); - hid_destroy_device(steam->client_hdev); steam->client_hdev = NULL; steam->client_opened = 0; if (steam->quirks & STEAM_QUIRK_WIRELESS) { @@ -1623,7 +1623,7 @@ static void steam_do_deck_input_event(struct steam_device *steam, schedule_delayed_work(&steam->mode_switch, 45 * HZ / 100); } - if (!steam->gamepad_mode) + if (!steam->gamepad_mode && lizard_mode) return; lpad_touched = b10 & BIT(3); @@ -1693,7 +1693,7 @@ static void steam_do_deck_sensors_event(struct steam_device *steam, */ steam->sensor_timestamp_us += 4000; - if (!steam->gamepad_mode) + if (!steam->gamepad_mode && lizard_mode) return; input_event(sensors, EV_MSC, MSC_TIMESTAMP, steam->sensor_timestamp_us); diff --git a/drivers/hid/hid-topre.c b/drivers/hid/hid-topre.c index 848361f6225df..ccedf8721722e 100644 --- a/drivers/hid/hid-topre.c +++ b/drivers/hid/hid-topre.c @@ -29,6 +29,11 @@ static const __u8 *topre_report_fixup(struct hid_device *hdev, __u8 *rdesc, hid_info(hdev, "fixing up Topre REALFORCE keyboard report descriptor\n"); rdesc[72] = 0x02; + } else if (*rsize >= 106 && rdesc[28] == 0x29 && rdesc[29] == 0xe7 && + rdesc[30] == 0x81 && rdesc[31] == 0x00) { + hid_info(hdev, + "fixing up Topre REALFORCE keyboard report descriptor\n"); + rdesc[31] = 0x02; } return rdesc; } @@ -38,6 +43,8 @@ static const struct hid_device_id topre_id_table[] = { USB_DEVICE_ID_TOPRE_REALFORCE_R2_108) }, { HID_USB_DEVICE(USB_VENDOR_ID_TOPRE, USB_DEVICE_ID_TOPRE_REALFORCE_R2_87) }, + { HID_USB_DEVICE(USB_VENDOR_ID_TOPRE, + USB_DEVICE_ID_TOPRE_REALFORCE_R3S_87) }, { } }; MODULE_DEVICE_TABLE(hid, topre_id_table); diff --git a/drivers/hid/hid-universal-pidff.c b/drivers/hid/hid-universal-pidff.c new file mode 100644 index 0000000000000..5b89ec7b5c26c --- /dev/null +++ b/drivers/hid/hid-universal-pidff.c @@ -0,0 +1,202 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * HID UNIVERSAL PIDFF + * hid-pidff wrapper for PID-enabled devices + * Handles device reports, quirks and extends usable button range + * + * Copyright (c) 2024, 2025 Oleg Makarenko + * Copyright (c) 2024, 2025 Tomasz PakuÅ‚a + */ + +#include +#include +#include +#include +#include "hid-ids.h" +#include "usbhid/hid-pidff.h" + +#define JOY_RANGE (BTN_DEAD - BTN_JOYSTICK + 1) + +/* + * Map buttons manually to extend the default joystick button limit + */ +static int universal_pidff_input_mapping(struct hid_device *hdev, + struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, + unsigned long **bit, int *max) +{ + if ((usage->hid & HID_USAGE_PAGE) != HID_UP_BUTTON) + return 0; + + if (field->application != HID_GD_JOYSTICK) + return 0; + + int button = ((usage->hid - 1) & HID_USAGE); + int code = button + BTN_JOYSTICK; + + /* Detect the end of JOYSTICK buttons range */ + if (code > BTN_DEAD) + code = button + KEY_NEXT_FAVORITE - JOY_RANGE; + + /* + * Map overflowing buttons to KEY_RESERVED to not ignore + * them and let them still trigger MSC_SCAN + */ + if (code > KEY_MAX) + code = KEY_RESERVED; + + hid_map_usage(hi, usage, bit, max, EV_KEY, code); + hid_dbg(hdev, "Button %d: usage %d", button, code); + return 1; +} + +/* + * Check if the device is PID and initialize it + * Add quirks after initialisation + */ +static int universal_pidff_probe(struct hid_device *hdev, + const struct hid_device_id *id) +{ + int i, error; + error = hid_parse(hdev); + if (error) { + hid_err(hdev, "HID parse failed\n"); + goto err; + } + + error = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF); + if (error) { + hid_err(hdev, "HID hw start failed\n"); + goto err; + } + + /* Check if device contains PID usage page */ + error = 1; + for (i = 0; i < hdev->collection_size; i++) + if ((hdev->collection[i].usage & HID_USAGE_PAGE) == HID_UP_PID) { + error = 0; + hid_dbg(hdev, "PID usage page found\n"); + break; + } + + /* + * Do not fail as this might be the second "device" + * just for additional buttons/axes. Exit cleanly if force + * feedback usage page wasn't found (included devices were + * tested and confirmed to be USB PID after all). + */ + if (error) { + hid_dbg(hdev, "PID usage page not found in the descriptor\n"); + return 0; + } + + /* Check if HID_PID support is enabled */ + int (*init_function)(struct hid_device *, u32); + init_function = hid_pidff_init_with_quirks; + + if (!init_function) { + hid_warn(hdev, "HID_PID support not enabled!\n"); + return 0; + } + + error = init_function(hdev, id->driver_data); + if (error) { + hid_warn(hdev, "Error initialising force feedback\n"); + goto err; + } + + hid_info(hdev, "Universal pidff driver loaded sucessfully!"); + + return 0; +err: + return error; +} + +static int universal_pidff_input_configured(struct hid_device *hdev, + struct hid_input *hidinput) +{ + int axis; + struct input_dev *input = hidinput->input; + + if (!input->absinfo) + return 0; + + /* Decrease fuzz and deadzone on available axes */ + for (axis = ABS_X; axis <= ABS_BRAKE; axis++) { + if (!test_bit(axis, input->absbit)) + continue; + + input_set_abs_params(input, axis, + input->absinfo[axis].minimum, + input->absinfo[axis].maximum, + axis == ABS_X ? 0 : 8, 0); + } + + /* Remove fuzz and deadzone from the second joystick axis */ + if (hdev->vendor == USB_VENDOR_ID_FFBEAST && + hdev->product == USB_DEVICE_ID_FFBEAST_JOYSTICK) + input_set_abs_params(input, ABS_Y, + input->absinfo[ABS_Y].minimum, + input->absinfo[ABS_Y].maximum, 0, 0); + + return 0; +} + +static const struct hid_device_id universal_pidff_devices[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R3), + .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION }, + { HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R3_2), + .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION }, + { HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R5), + .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION }, + { HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R5_2), + .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION }, + { HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R9), + .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION }, + { HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R9_2), + .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION }, + { HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R12), + .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION }, + { HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R12_2), + .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION }, + { HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R16_R21), + .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION }, + { HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R16_R21_2), + .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION }, + { HID_USB_DEVICE(USB_VENDOR_ID_CAMMUS, USB_DEVICE_ID_CAMMUS_C5) }, + { HID_USB_DEVICE(USB_VENDOR_ID_CAMMUS, USB_DEVICE_ID_CAMMUS_C12) }, + { HID_USB_DEVICE(USB_VENDOR_ID_VRS, USB_DEVICE_ID_VRS_DFP), + .driver_data = HID_PIDFF_QUIRK_PERMISSIVE_CONTROL }, + { HID_USB_DEVICE(USB_VENDOR_ID_FFBEAST, USB_DEVICE_ID_FFBEAST_JOYSTICK), }, + { HID_USB_DEVICE(USB_VENDOR_ID_FFBEAST, USB_DEVICE_ID_FFBEAST_RUDDER), }, + { HID_USB_DEVICE(USB_VENDOR_ID_FFBEAST, USB_DEVICE_ID_FFBEAST_WHEEL) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LITE_STAR, USB_DEVICE_ID_PXN_V10), + .driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY }, + { HID_USB_DEVICE(USB_VENDOR_ID_LITE_STAR, USB_DEVICE_ID_PXN_V12), + .driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY }, + { HID_USB_DEVICE(USB_VENDOR_ID_LITE_STAR, USB_DEVICE_ID_PXN_V12_LITE), + .driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY }, + { HID_USB_DEVICE(USB_VENDOR_ID_LITE_STAR, USB_DEVICE_ID_PXN_V12_LITE_2), + .driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY }, + { HID_USB_DEVICE(USB_VENDOR_ID_LITE_STAR, USB_DEVICE_LITE_STAR_GT987_FF), + .driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY }, + { HID_USB_DEVICE(USB_VENDOR_ID_ASETEK, USB_DEVICE_ID_ASETEK_INVICTA) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ASETEK, USB_DEVICE_ID_ASETEK_FORTE) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ASETEK, USB_DEVICE_ID_ASETEK_LA_PRIMA) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ASETEK, USB_DEVICE_ID_ASETEK_TONY_KANAAN) }, + { } +}; +MODULE_DEVICE_TABLE(hid, universal_pidff_devices); + +static struct hid_driver universal_pidff = { + .name = "hid-universal-pidff", + .id_table = universal_pidff_devices, + .input_mapping = universal_pidff_input_mapping, + .probe = universal_pidff_probe, + .input_configured = universal_pidff_input_configured +}; +module_hid_driver(universal_pidff); + +MODULE_DESCRIPTION("Universal driver for USB PID Force Feedback devices"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Oleg Makarenko "); +MODULE_AUTHOR("Tomasz PakuÅ‚a "); diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c index 4e87380d3edd6..bcca89ef73606 100644 --- a/drivers/hid/i2c-hid/i2c-hid-core.c +++ b/drivers/hid/i2c-hid/i2c-hid-core.c @@ -284,7 +284,7 @@ static int i2c_hid_get_report(struct i2c_hid *ihid, ihid->rawbuf, recv_len + sizeof(__le16)); if (error) { dev_err(&ihid->client->dev, - "failed to set a report to device: %d\n", error); + "failed to get a report from device: %d\n", error); return error; } diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h index cdd80c653918b..07e90d51f073c 100644 --- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h +++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h @@ -36,6 +36,8 @@ #define PCI_DEVICE_ID_INTEL_ISH_ARL_H 0x7745 #define PCI_DEVICE_ID_INTEL_ISH_ARL_S 0x7F78 #define PCI_DEVICE_ID_INTEL_ISH_LNL_M 0xA845 +#define PCI_DEVICE_ID_INTEL_ISH_PTL_H 0xE345 +#define PCI_DEVICE_ID_INTEL_ISH_PTL_P 0xE445 #define REVISION_ID_CHT_A0 0x6 #define REVISION_ID_CHT_Ax_SI 0x0 diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c index 3cd53fc80634a..4c861119e97aa 100644 --- a/drivers/hid/intel-ish-hid/ipc/ipc.c +++ b/drivers/hid/intel-ish-hid/ipc/ipc.c @@ -517,6 +517,10 @@ static int ish_fw_reset_handler(struct ishtp_device *dev) /* ISH FW is dead */ if (!ish_is_input_ready(dev)) return -EPIPE; + + /* Send clock sync at once after reset */ + ishtp_dev->prev_sync = 0; + /* * Set HOST2ISH.ILUP. Apparently we need this BEFORE sending * RESET_NOTIFY_ACK - FW will be checking for it @@ -577,15 +581,14 @@ static void fw_reset_work_fn(struct work_struct *work) */ static void _ish_sync_fw_clock(struct ishtp_device *dev) { - static unsigned long prev_sync; - uint64_t usec; + struct ipc_time_update_msg time = {}; - if (prev_sync && time_before(jiffies, prev_sync + 20 * HZ)) + if (dev->prev_sync && time_before(jiffies, dev->prev_sync + 20 * HZ)) return; - prev_sync = jiffies; - usec = ktime_to_us(ktime_get_boottime()); - ipc_send_mng_msg(dev, MNG_SYNC_FW_CLOCK, &usec, sizeof(uint64_t)); + dev->prev_sync = jiffies; + /* The fields of time would be updated while sending message */ + ipc_send_mng_msg(dev, MNG_SYNC_FW_CLOCK, &time, sizeof(time)); } /** diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c index aae0d965b47b5..1894743e88028 100644 --- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c +++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c @@ -26,9 +26,11 @@ enum ishtp_driver_data_index { ISHTP_DRIVER_DATA_NONE, ISHTP_DRIVER_DATA_LNL_M, + ISHTP_DRIVER_DATA_PTL, }; #define ISH_FW_GEN_LNL_M "lnlm" +#define ISH_FW_GEN_PTL "ptl" #define ISH_FIRMWARE_PATH(gen) "intel/ish/ish_" gen ".bin" #define ISH_FIRMWARE_PATH_ALL "intel/ish/ish_*.bin" @@ -37,6 +39,9 @@ static struct ishtp_driver_data ishtp_driver_data[] = { [ISHTP_DRIVER_DATA_LNL_M] = { .fw_generation = ISH_FW_GEN_LNL_M, }, + [ISHTP_DRIVER_DATA_PTL] = { + .fw_generation = ISH_FW_GEN_PTL, + }, }; static const struct pci_device_id ish_pci_tbl[] = { @@ -63,6 +68,8 @@ static const struct pci_device_id ish_pci_tbl[] = { {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_ARL_H)}, {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_ARL_S)}, {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_LNL_M), .driver_data = ISHTP_DRIVER_DATA_LNL_M}, + {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_PTL_H), .driver_data = ISHTP_DRIVER_DATA_PTL}, + {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_PTL_P), .driver_data = ISHTP_DRIVER_DATA_PTL}, {} }; MODULE_DEVICE_TABLE(pci, ish_pci_tbl); diff --git a/drivers/hid/intel-ish-hid/ishtp-hid-client.c b/drivers/hid/intel-ish-hid/ishtp-hid-client.c index fbd4f8ea1951b..af6a5afc1a93e 100644 --- a/drivers/hid/intel-ish-hid/ishtp-hid-client.c +++ b/drivers/hid/intel-ish-hid/ishtp-hid-client.c @@ -833,9 +833,9 @@ static void hid_ishtp_cl_remove(struct ishtp_cl_device *cl_device) hid_ishtp_cl); dev_dbg(ishtp_device(cl_device), "%s\n", __func__); - hid_ishtp_cl_deinit(hid_ishtp_cl); ishtp_put_device(cl_device); ishtp_hid_remove(client_data); + hid_ishtp_cl_deinit(hid_ishtp_cl); hid_ishtp_cl = NULL; diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.c b/drivers/hid/intel-ish-hid/ishtp-hid.c index 00c6f0ebf3563..be2c62fc8251d 100644 --- a/drivers/hid/intel-ish-hid/ishtp-hid.c +++ b/drivers/hid/intel-ish-hid/ishtp-hid.c @@ -261,12 +261,14 @@ int ishtp_hid_probe(unsigned int cur_hid_dev, */ void ishtp_hid_remove(struct ishtp_cl_data *client_data) { + void *data; int i; for (i = 0; i < client_data->num_hid_devices; ++i) { if (client_data->hid_sensor_hubs[i]) { - kfree(client_data->hid_sensor_hubs[i]->driver_data); + data = client_data->hid_sensor_hubs[i]->driver_data; hid_destroy_device(client_data->hid_sensor_hubs[i]); + kfree(data); client_data->hid_sensor_hubs[i] = NULL; } } diff --git a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h index cdacce0a4c9d7..b35afefd036d4 100644 --- a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h +++ b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h @@ -242,6 +242,8 @@ struct ishtp_device { unsigned int ipc_tx_cnt; unsigned long long ipc_tx_bytes_cnt; + /* Time of the last clock sync */ + unsigned long prev_sync; const struct ishtp_hw_ops *ops; size_t mtu; uint32_t ishtp_msg_hdr; diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c index a9e85bdd4cc65..bf0f51ef0149f 100644 --- a/drivers/hid/usbhid/hid-core.c +++ b/drivers/hid/usbhid/hid-core.c @@ -35,6 +35,7 @@ #include #include #include "usbhid.h" +#include "hid-pidff.h" /* * Version Information diff --git a/drivers/hid/usbhid/hid-pidff.c b/drivers/hid/usbhid/hid-pidff.c index 3b4ee21cd8111..8dfd2c554a276 100644 --- a/drivers/hid/usbhid/hid-pidff.c +++ b/drivers/hid/usbhid/hid-pidff.c @@ -3,27 +3,27 @@ * Force feedback driver for USB HID PID compliant devices * * Copyright (c) 2005, 2006 Anssi Hannula + * Upgraded 2025 by Oleg Makarenko and Tomasz PakuÅ‚a */ -/* - */ - -/* #define DEBUG */ - #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include "hid-pidff.h" #include #include #include - #include +#include -#include "usbhid.h" #define PID_EFFECTS_MAX 64 +#define PID_INFINITE U16_MAX -/* Report usage table used to put reports into an array */ +/* Linux Force Feedback API uses miliseconds as time unit */ +#define FF_TIME_EXPONENT -3 +#define FF_INFINITE 0 +/* Report usage table used to put reports into an array */ #define PID_SET_EFFECT 0 #define PID_EFFECT_OPERATION 1 #define PID_DEVICE_GAIN 2 @@ -44,12 +44,19 @@ static const u8 pidff_reports[] = { 0x21, 0x77, 0x7d, 0x7f, 0x89, 0x90, 0x96, 0xab, 0x5a, 0x5f, 0x6e, 0x73, 0x74 }; +/* + * device_control is really 0x95, but 0x96 specified + * as it is the usage of the only field in that report. + */ -/* device_control is really 0x95, but 0x96 specified as it is the usage of -the only field in that report */ +/* PID special fields */ +#define PID_EFFECT_TYPE 0x25 +#define PID_DIRECTION 0x57 +#define PID_EFFECT_OPERATION_ARRAY 0x78 +#define PID_BLOCK_LOAD_STATUS 0x8b +#define PID_DEVICE_CONTROL_ARRAY 0x96 /* Value usage tables used to put fields and values into arrays */ - #define PID_EFFECT_BLOCK_INDEX 0 #define PID_DURATION 1 @@ -107,10 +114,13 @@ static const u8 pidff_device_gain[] = { 0x7e }; static const u8 pidff_pool[] = { 0x80, 0x83, 0xa9 }; /* Special field key tables used to put special field keys into arrays */ - #define PID_ENABLE_ACTUATORS 0 -#define PID_RESET 1 -static const u8 pidff_device_control[] = { 0x97, 0x9a }; +#define PID_DISABLE_ACTUATORS 1 +#define PID_STOP_ALL_EFFECTS 2 +#define PID_RESET 3 +#define PID_PAUSE 4 +#define PID_CONTINUE 5 +static const u8 pidff_device_control[] = { 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c }; #define PID_CONSTANT 0 #define PID_RAMP 1 @@ -130,12 +140,16 @@ static const u8 pidff_effect_types[] = { #define PID_BLOCK_LOAD_SUCCESS 0 #define PID_BLOCK_LOAD_FULL 1 -static const u8 pidff_block_load_status[] = { 0x8c, 0x8d }; +#define PID_BLOCK_LOAD_ERROR 2 +static const u8 pidff_block_load_status[] = { 0x8c, 0x8d, 0x8e}; #define PID_EFFECT_START 0 #define PID_EFFECT_STOP 1 static const u8 pidff_effect_operation_status[] = { 0x79, 0x7b }; +/* Polar direction 90 degrees (East) */ +#define PIDFF_FIXED_WHEEL_DIRECTION 0x4000 + struct pidff_usage { struct hid_field *field; s32 *value; @@ -159,8 +173,10 @@ struct pidff_device { struct pidff_usage effect_operation[sizeof(pidff_effect_operation)]; struct pidff_usage block_free[sizeof(pidff_block_free)]; - /* Special field is a field that is not composed of - usage<->value pairs that pidff_usage values are */ + /* + * Special field is a field that is not composed of + * usage<->value pairs that pidff_usage values are + */ /* Special field in create_new_effect */ struct hid_field *create_new_effect_type; @@ -184,30 +200,61 @@ struct pidff_device { int operation_id[sizeof(pidff_effect_operation_status)]; int pid_id[PID_EFFECTS_MAX]; + + u32 quirks; + u8 effect_count; }; +/* + * Clamp value for a given field + */ +static s32 pidff_clamp(s32 i, struct hid_field *field) +{ + s32 clamped = clamp(i, field->logical_minimum, field->logical_maximum); + pr_debug("clamped from %d to %d", i, clamped); + return clamped; +} + /* * Scale an unsigned value with range 0..max for the given field */ static int pidff_rescale(int i, int max, struct hid_field *field) { return i * (field->logical_maximum - field->logical_minimum) / max + - field->logical_minimum; + field->logical_minimum; } /* - * Scale a signed value in range -0x8000..0x7fff for the given field + * Scale a signed value in range S16_MIN..S16_MAX for the given field */ static int pidff_rescale_signed(int i, struct hid_field *field) { - return i == 0 ? 0 : i > - 0 ? i * field->logical_maximum / 0x7fff : i * - field->logical_minimum / -0x8000; + if (i > 0) return i * field->logical_maximum / S16_MAX; + if (i < 0) return i * field->logical_minimum / S16_MIN; + return 0; +} + +/* + * Scale time value from Linux default (ms) to field units + */ +static u32 pidff_rescale_time(u16 time, struct hid_field *field) +{ + u32 scaled_time = time; + int exponent = field->unit_exponent; + pr_debug("time field exponent: %d\n", exponent); + + for (;exponent < FF_TIME_EXPONENT; exponent++) + scaled_time *= 10; + for (;exponent > FF_TIME_EXPONENT; exponent--) + scaled_time /= 10; + + pr_debug("time calculated from %d to %d\n", time, scaled_time); + return scaled_time; } static void pidff_set(struct pidff_usage *usage, u16 value) { - usage->value[0] = pidff_rescale(value, 0xffff, usage->field); + usage->value[0] = pidff_rescale(value, U16_MAX, usage->field); pr_debug("calculated from %d to %d\n", value, usage->value[0]); } @@ -218,14 +265,35 @@ static void pidff_set_signed(struct pidff_usage *usage, s16 value) else { if (value < 0) usage->value[0] = - pidff_rescale(-value, 0x8000, usage->field); + pidff_rescale(-value, -S16_MIN, usage->field); else usage->value[0] = - pidff_rescale(value, 0x7fff, usage->field); + pidff_rescale(value, S16_MAX, usage->field); } pr_debug("calculated from %d to %d\n", value, usage->value[0]); } +static void pidff_set_time(struct pidff_usage *usage, u16 time) +{ + u32 modified_time = pidff_rescale_time(time, usage->field); + usage->value[0] = pidff_clamp(modified_time, usage->field); +} + +static void pidff_set_duration(struct pidff_usage *usage, u16 duration) +{ + /* Infinite value conversion from Linux API -> PID */ + if (duration == FF_INFINITE) + duration = PID_INFINITE; + + /* PID defines INFINITE as the max possible value for duration field */ + if (duration == PID_INFINITE) { + usage->value[0] = (1U << usage->field->report_size) - 1; + return; + } + + pidff_set_time(usage, duration); +} + /* * Send envelope report to the device */ @@ -233,19 +301,21 @@ static void pidff_set_envelope_report(struct pidff_device *pidff, struct ff_envelope *envelope) { pidff->set_envelope[PID_EFFECT_BLOCK_INDEX].value[0] = - pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0]; + pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0]; pidff->set_envelope[PID_ATTACK_LEVEL].value[0] = - pidff_rescale(envelope->attack_level > - 0x7fff ? 0x7fff : envelope->attack_level, 0x7fff, - pidff->set_envelope[PID_ATTACK_LEVEL].field); + pidff_rescale(envelope->attack_level > + S16_MAX ? S16_MAX : envelope->attack_level, S16_MAX, + pidff->set_envelope[PID_ATTACK_LEVEL].field); pidff->set_envelope[PID_FADE_LEVEL].value[0] = - pidff_rescale(envelope->fade_level > - 0x7fff ? 0x7fff : envelope->fade_level, 0x7fff, - pidff->set_envelope[PID_FADE_LEVEL].field); + pidff_rescale(envelope->fade_level > + S16_MAX ? S16_MAX : envelope->fade_level, S16_MAX, + pidff->set_envelope[PID_FADE_LEVEL].field); - pidff->set_envelope[PID_ATTACK_TIME].value[0] = envelope->attack_length; - pidff->set_envelope[PID_FADE_TIME].value[0] = envelope->fade_length; + pidff_set_time(&pidff->set_envelope[PID_ATTACK_TIME], + envelope->attack_length); + pidff_set_time(&pidff->set_envelope[PID_FADE_TIME], + envelope->fade_length); hid_dbg(pidff->hid, "attack %u => %d\n", envelope->attack_level, @@ -261,10 +331,22 @@ static void pidff_set_envelope_report(struct pidff_device *pidff, static int pidff_needs_set_envelope(struct ff_envelope *envelope, struct ff_envelope *old) { - return envelope->attack_level != old->attack_level || - envelope->fade_level != old->fade_level || + bool needs_new_envelope; + needs_new_envelope = envelope->attack_level != 0 || + envelope->fade_level != 0 || + envelope->attack_length != 0 || + envelope->fade_length != 0; + + if (!needs_new_envelope) + return false; + + if (!old) + return needs_new_envelope; + + return envelope->attack_level != old->attack_level || + envelope->fade_level != old->fade_level || envelope->attack_length != old->attack_length || - envelope->fade_length != old->fade_length; + envelope->fade_length != old->fade_length; } /* @@ -301,17 +383,27 @@ static void pidff_set_effect_report(struct pidff_device *pidff, pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0]; pidff->set_effect_type->value[0] = pidff->create_new_effect_type->value[0]; - pidff->set_effect[PID_DURATION].value[0] = effect->replay.length; + + pidff_set_duration(&pidff->set_effect[PID_DURATION], + effect->replay.length); + pidff->set_effect[PID_TRIGGER_BUTTON].value[0] = effect->trigger.button; - pidff->set_effect[PID_TRIGGER_REPEAT_INT].value[0] = - effect->trigger.interval; + pidff_set_time(&pidff->set_effect[PID_TRIGGER_REPEAT_INT], + effect->trigger.interval); pidff->set_effect[PID_GAIN].value[0] = pidff->set_effect[PID_GAIN].field->logical_maximum; pidff->set_effect[PID_DIRECTION_ENABLE].value[0] = 1; - pidff->effect_direction->value[0] = - pidff_rescale(effect->direction, 0xffff, - pidff->effect_direction); - pidff->set_effect[PID_START_DELAY].value[0] = effect->replay.delay; + + /* Use fixed direction if needed */ + pidff->effect_direction->value[0] = pidff_rescale( + pidff->quirks & HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION ? + PIDFF_FIXED_WHEEL_DIRECTION : effect->direction, + U16_MAX, pidff->effect_direction); + + /* Omit setting delay field if it's missing */ + if (!(pidff->quirks & HID_PIDFF_QUIRK_MISSING_DELAY)) + pidff_set_time(&pidff->set_effect[PID_START_DELAY], + effect->replay.delay); hid_hw_request(pidff->hid, pidff->reports[PID_SET_EFFECT], HID_REQ_SET_REPORT); @@ -343,11 +435,11 @@ static void pidff_set_periodic_report(struct pidff_device *pidff, pidff_set_signed(&pidff->set_periodic[PID_OFFSET], effect->u.periodic.offset); pidff_set(&pidff->set_periodic[PID_PHASE], effect->u.periodic.phase); - pidff->set_periodic[PID_PERIOD].value[0] = effect->u.periodic.period; + pidff_set_time(&pidff->set_periodic[PID_PERIOD], + effect->u.periodic.period); hid_hw_request(pidff->hid, pidff->reports[PID_SET_PERIODIC], HID_REQ_SET_REPORT); - } /* @@ -368,13 +460,19 @@ static int pidff_needs_set_periodic(struct ff_effect *effect, static void pidff_set_condition_report(struct pidff_device *pidff, struct ff_effect *effect) { - int i; + int i, max_axis; + + /* Devices missing Parameter Block Offset can only have one axis */ + max_axis = pidff->quirks & HID_PIDFF_QUIRK_MISSING_PBO ? 1 : 2; pidff->set_condition[PID_EFFECT_BLOCK_INDEX].value[0] = pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0]; - for (i = 0; i < 2; i++) { - pidff->set_condition[PID_PARAM_BLOCK_OFFSET].value[0] = i; + for (i = 0; i < max_axis; i++) { + /* Omit Parameter Block Offset if missing */ + if (!(pidff->quirks & HID_PIDFF_QUIRK_MISSING_PBO)) + pidff->set_condition[PID_PARAM_BLOCK_OFFSET].value[0] = i; + pidff_set_signed(&pidff->set_condition[PID_CP_OFFSET], effect->u.condition[i].center); pidff_set_signed(&pidff->set_condition[PID_POS_COEFFICIENT], @@ -441,9 +539,104 @@ static int pidff_needs_set_ramp(struct ff_effect *effect, struct ff_effect *old) effect->u.ramp.end_level != old->u.ramp.end_level; } +/* + * Set device gain + */ +static void pidff_set_gain_report(struct pidff_device *pidff, u16 gain) +{ + if (!pidff->device_gain[PID_DEVICE_GAIN_FIELD].field) + return; + + pidff_set(&pidff->device_gain[PID_DEVICE_GAIN_FIELD], gain); + hid_hw_request(pidff->hid, pidff->reports[PID_DEVICE_GAIN], + HID_REQ_SET_REPORT); +} + +/* + * Send device control report to the device + */ +static void pidff_set_device_control(struct pidff_device *pidff, int field) +{ + int i, index; + int field_index = pidff->control_id[field]; + + if (field_index < 1) + return; + + /* Detect if the field is a bitmask variable or an array */ + if (pidff->device_control->flags & HID_MAIN_ITEM_VARIABLE) { + hid_dbg(pidff->hid, "DEVICE_CONTROL is a bitmask\n"); + + /* Clear current bitmask */ + for(i = 0; i < sizeof(pidff_device_control); i++) { + index = pidff->control_id[i]; + if (index < 1) + continue; + + pidff->device_control->value[index - 1] = 0; + } + + pidff->device_control->value[field_index - 1] = 1; + } else { + hid_dbg(pidff->hid, "DEVICE_CONTROL is an array\n"); + pidff->device_control->value[0] = field_index; + } + + hid_hw_request(pidff->hid, pidff->reports[PID_DEVICE_CONTROL], HID_REQ_SET_REPORT); + hid_hw_wait(pidff->hid); +} + +/* + * Modify actuators state + */ +static void pidff_set_actuators(struct pidff_device *pidff, bool enable) +{ + hid_dbg(pidff->hid, "%s actuators\n", enable ? "Enable" : "Disable"); + pidff_set_device_control(pidff, + enable ? PID_ENABLE_ACTUATORS : PID_DISABLE_ACTUATORS); +} + +/* + * Reset the device, stop all effects, enable actuators + */ +static void pidff_reset(struct pidff_device *pidff) +{ + /* We reset twice as sometimes hid_wait_io isn't waiting long enough */ + pidff_set_device_control(pidff, PID_RESET); + pidff_set_device_control(pidff, PID_RESET); + pidff->effect_count = 0; + + pidff_set_device_control(pidff, PID_STOP_ALL_EFFECTS); + pidff_set_actuators(pidff, 1); +} + +/* + * Fetch pool report + */ +static void pidff_fetch_pool(struct pidff_device *pidff) +{ + int i; + struct hid_device *hid = pidff->hid; + + /* Repeat if PID_SIMULTANEOUS_MAX < 2 to make sure it's correct */ + for(i = 0; i < 20; i++) { + hid_hw_request(hid, pidff->reports[PID_POOL], HID_REQ_GET_REPORT); + hid_hw_wait(hid); + + if (!pidff->pool[PID_SIMULTANEOUS_MAX].value) + return; + if (pidff->pool[PID_SIMULTANEOUS_MAX].value[0] >= 2) + return; + } + hid_warn(hid, "device reports %d simultaneous effects\n", + pidff->pool[PID_SIMULTANEOUS_MAX].value[0]); +} + /* * Send a request for effect upload to the device * + * Reset and enable actuators if no effects were present on the device + * * Returns 0 if device reported success, -ENOSPC if the device reported memory * is full. Upon unknown response the function will retry for 60 times, if * still unsuccessful -EIO is returned. @@ -452,6 +645,9 @@ static int pidff_request_effect_upload(struct pidff_device *pidff, int efnum) { int j; + if (!pidff->effect_count) + pidff_reset(pidff); + pidff->create_new_effect_type->value[0] = efnum; hid_hw_request(pidff->hid, pidff->reports[PID_CREATE_NEW_EFFECT], HID_REQ_SET_REPORT); @@ -471,6 +667,8 @@ static int pidff_request_effect_upload(struct pidff_device *pidff, int efnum) hid_dbg(pidff->hid, "device reported free memory: %d bytes\n", pidff->block_load[PID_RAM_POOL_AVAILABLE].value ? pidff->block_load[PID_RAM_POOL_AVAILABLE].value[0] : -1); + + pidff->effect_count++; return 0; } if (pidff->block_load_status->value[0] == @@ -480,6 +678,11 @@ static int pidff_request_effect_upload(struct pidff_device *pidff, int efnum) pidff->block_load[PID_RAM_POOL_AVAILABLE].value[0] : -1); return -ENOSPC; } + if (pidff->block_load_status->value[0] == + pidff->status_id[PID_BLOCK_LOAD_ERROR]) { + hid_dbg(pidff->hid, "device error during effect creation\n"); + return -EREMOTEIO; + } } hid_err(pidff->hid, "pid_block_load failed 60 times\n"); return -EIO; @@ -498,7 +701,8 @@ static void pidff_playback_pid(struct pidff_device *pidff, int pid_id, int n) } else { pidff->effect_operation_status->value[0] = pidff->operation_id[PID_EFFECT_START]; - pidff->effect_operation[PID_LOOP_COUNT].value[0] = n; + pidff->effect_operation[PID_LOOP_COUNT].value[0] = + pidff_clamp(n, pidff->effect_operation[PID_LOOP_COUNT].field); } hid_hw_request(pidff->hid, pidff->reports[PID_EFFECT_OPERATION], @@ -511,20 +715,22 @@ static void pidff_playback_pid(struct pidff_device *pidff, int pid_id, int n) static int pidff_playback(struct input_dev *dev, int effect_id, int value) { struct pidff_device *pidff = dev->ff->private; - pidff_playback_pid(pidff, pidff->pid_id[effect_id], value); - return 0; } /* * Erase effect with PID id + * Decrease the device effect counter */ static void pidff_erase_pid(struct pidff_device *pidff, int pid_id) { pidff->block_free[PID_EFFECT_BLOCK_INDEX].value[0] = pid_id; hid_hw_request(pidff->hid, pidff->reports[PID_BLOCK_FREE], HID_REQ_SET_REPORT); + + if (pidff->effect_count > 0) + pidff->effect_count--; } /* @@ -537,8 +743,11 @@ static int pidff_erase_effect(struct input_dev *dev, int effect_id) hid_dbg(pidff->hid, "starting to erase %d/%d\n", effect_id, pidff->pid_id[effect_id]); - /* Wait for the queue to clear. We do not want a full fifo to - prevent the effect removal. */ + + /* + * Wait for the queue to clear. We do not want + * a full fifo to prevent the effect removal. + */ hid_hw_wait(pidff->hid); pidff_playback_pid(pidff, pid_id, 0); pidff_erase_pid(pidff, pid_id); @@ -574,11 +783,9 @@ static int pidff_upload_effect(struct input_dev *dev, struct ff_effect *effect, pidff_set_effect_report(pidff, effect); if (!old || pidff_needs_set_constant(effect, old)) pidff_set_constant_force_report(pidff, effect); - if (!old || - pidff_needs_set_envelope(&effect->u.constant.envelope, - &old->u.constant.envelope)) - pidff_set_envelope_report(pidff, - &effect->u.constant.envelope); + if (pidff_needs_set_envelope(&effect->u.constant.envelope, + old ? &old->u.constant.envelope : NULL)) + pidff_set_envelope_report(pidff, &effect->u.constant.envelope); break; case FF_PERIODIC: @@ -604,6 +811,9 @@ static int pidff_upload_effect(struct input_dev *dev, struct ff_effect *effect, return -EINVAL; } + if (pidff->quirks & HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY) + type_id = PID_SINE; + error = pidff_request_effect_upload(pidff, pidff->type_id[type_id]); if (error) @@ -613,11 +823,9 @@ static int pidff_upload_effect(struct input_dev *dev, struct ff_effect *effect, pidff_set_effect_report(pidff, effect); if (!old || pidff_needs_set_periodic(effect, old)) pidff_set_periodic_report(pidff, effect); - if (!old || - pidff_needs_set_envelope(&effect->u.periodic.envelope, - &old->u.periodic.envelope)) - pidff_set_envelope_report(pidff, - &effect->u.periodic.envelope); + if (pidff_needs_set_envelope(&effect->u.periodic.envelope, + old ? &old->u.periodic.envelope : NULL)) + pidff_set_envelope_report(pidff, &effect->u.periodic.envelope); break; case FF_RAMP: @@ -631,56 +839,32 @@ static int pidff_upload_effect(struct input_dev *dev, struct ff_effect *effect, pidff_set_effect_report(pidff, effect); if (!old || pidff_needs_set_ramp(effect, old)) pidff_set_ramp_force_report(pidff, effect); - if (!old || - pidff_needs_set_envelope(&effect->u.ramp.envelope, - &old->u.ramp.envelope)) - pidff_set_envelope_report(pidff, - &effect->u.ramp.envelope); + if (pidff_needs_set_envelope(&effect->u.ramp.envelope, + old ? &old->u.ramp.envelope : NULL)) + pidff_set_envelope_report(pidff, &effect->u.ramp.envelope); break; case FF_SPRING: - if (!old) { - error = pidff_request_effect_upload(pidff, - pidff->type_id[PID_SPRING]); - if (error) - return error; - } - if (!old || pidff_needs_set_effect(effect, old)) - pidff_set_effect_report(pidff, effect); - if (!old || pidff_needs_set_condition(effect, old)) - pidff_set_condition_report(pidff, effect); - break; - - case FF_FRICTION: - if (!old) { - error = pidff_request_effect_upload(pidff, - pidff->type_id[PID_FRICTION]); - if (error) - return error; - } - if (!old || pidff_needs_set_effect(effect, old)) - pidff_set_effect_report(pidff, effect); - if (!old || pidff_needs_set_condition(effect, old)) - pidff_set_condition_report(pidff, effect); - break; - case FF_DAMPER: - if (!old) { - error = pidff_request_effect_upload(pidff, - pidff->type_id[PID_DAMPER]); - if (error) - return error; - } - if (!old || pidff_needs_set_effect(effect, old)) - pidff_set_effect_report(pidff, effect); - if (!old || pidff_needs_set_condition(effect, old)) - pidff_set_condition_report(pidff, effect); - break; - case FF_INERTIA: + case FF_FRICTION: if (!old) { + switch(effect->type) { + case FF_SPRING: + type_id = PID_SPRING; + break; + case FF_DAMPER: + type_id = PID_DAMPER; + break; + case FF_INERTIA: + type_id = PID_INERTIA; + break; + case FF_FRICTION: + type_id = PID_FRICTION; + break; + } error = pidff_request_effect_upload(pidff, - pidff->type_id[PID_INERTIA]); + pidff->type_id[type_id]); if (error) return error; } @@ -709,11 +893,7 @@ static int pidff_upload_effect(struct input_dev *dev, struct ff_effect *effect, */ static void pidff_set_gain(struct input_dev *dev, u16 gain) { - struct pidff_device *pidff = dev->ff->private; - - pidff_set(&pidff->device_gain[PID_DEVICE_GAIN_FIELD], gain); - hid_hw_request(pidff->hid, pidff->reports[PID_DEVICE_GAIN], - HID_REQ_SET_REPORT); + pidff_set_gain_report(dev->ff->private, gain); } static void pidff_autocenter(struct pidff_device *pidff, u16 magnitude) @@ -736,7 +916,10 @@ static void pidff_autocenter(struct pidff_device *pidff, u16 magnitude) pidff->set_effect[PID_TRIGGER_REPEAT_INT].value[0] = 0; pidff_set(&pidff->set_effect[PID_GAIN], magnitude); pidff->set_effect[PID_DIRECTION_ENABLE].value[0] = 1; - pidff->set_effect[PID_START_DELAY].value[0] = 0; + + /* Omit setting delay field if it's missing */ + if (!(pidff->quirks & HID_PIDFF_QUIRK_MISSING_DELAY)) + pidff->set_effect[PID_START_DELAY].value[0] = 0; hid_hw_request(pidff->hid, pidff->reports[PID_SET_EFFECT], HID_REQ_SET_REPORT); @@ -747,9 +930,7 @@ static void pidff_autocenter(struct pidff_device *pidff, u16 magnitude) */ static void pidff_set_autocenter(struct input_dev *dev, u16 magnitude) { - struct pidff_device *pidff = dev->ff->private; - - pidff_autocenter(pidff, magnitude); + pidff_autocenter(dev->ff->private, magnitude); } /* @@ -758,7 +939,13 @@ static void pidff_set_autocenter(struct input_dev *dev, u16 magnitude) static int pidff_find_fields(struct pidff_usage *usage, const u8 *table, struct hid_report *report, int count, int strict) { + if (!report) { + pr_debug("pidff_find_fields, null report\n"); + return -1; + } + int i, j, k, found; + int return_value = 0; for (k = 0; k < count; k++) { found = 0; @@ -783,12 +970,22 @@ static int pidff_find_fields(struct pidff_usage *usage, const u8 *table, if (found) break; } - if (!found && strict) { + if (!found && table[k] == pidff_set_effect[PID_START_DELAY]) { + pr_debug("Delay field not found, but that's OK\n"); + pr_debug("Setting MISSING_DELAY quirk\n"); + return_value |= HID_PIDFF_QUIRK_MISSING_DELAY; + } + else if (!found && table[k] == pidff_set_condition[PID_PARAM_BLOCK_OFFSET]) { + pr_debug("PBO field not found, but that's OK\n"); + pr_debug("Setting MISSING_PBO quirk\n"); + return_value |= HID_PIDFF_QUIRK_MISSING_PBO; + } + else if (!found && strict) { pr_debug("failed to locate %d\n", k); return -1; } } - return 0; + return return_value; } /* @@ -871,6 +1068,11 @@ static int pidff_reports_ok(struct pidff_device *pidff) static struct hid_field *pidff_find_special_field(struct hid_report *report, int usage, int enforce_min) { + if (!report) { + pr_debug("pidff_find_special_field, null report\n"); + return NULL; + } + int i; for (i = 0; i < report->maxfield; i++) { @@ -923,22 +1125,24 @@ static int pidff_find_special_fields(struct pidff_device *pidff) pidff->create_new_effect_type = pidff_find_special_field(pidff->reports[PID_CREATE_NEW_EFFECT], - 0x25, 1); + PID_EFFECT_TYPE, 1); pidff->set_effect_type = pidff_find_special_field(pidff->reports[PID_SET_EFFECT], - 0x25, 1); + PID_EFFECT_TYPE, 1); pidff->effect_direction = pidff_find_special_field(pidff->reports[PID_SET_EFFECT], - 0x57, 0); + PID_DIRECTION, 0); pidff->device_control = pidff_find_special_field(pidff->reports[PID_DEVICE_CONTROL], - 0x96, 1); + PID_DEVICE_CONTROL_ARRAY, + !(pidff->quirks & HID_PIDFF_QUIRK_PERMISSIVE_CONTROL)); + pidff->block_load_status = pidff_find_special_field(pidff->reports[PID_BLOCK_LOAD], - 0x8b, 1); + PID_BLOCK_LOAD_STATUS, 1); pidff->effect_operation_status = pidff_find_special_field(pidff->reports[PID_EFFECT_OPERATION], - 0x78, 1); + PID_EFFECT_OPERATION_ARRAY, 1); hid_dbg(pidff->hid, "search done\n"); @@ -967,10 +1171,6 @@ static int pidff_find_special_fields(struct pidff_device *pidff) return -1; } - pidff_find_special_keys(pidff->control_id, pidff->device_control, - pidff_device_control, - sizeof(pidff_device_control)); - PIDFF_FIND_SPECIAL_KEYS(control_id, device_control, device_control); if (!PIDFF_FIND_SPECIAL_KEYS(type_id, create_new_effect_type, @@ -1049,7 +1249,6 @@ static int pidff_find_effects(struct pidff_device *pidff, set_bit(FF_FRICTION, dev->ffbit); return 0; - } #define PIDFF_FIND_FIELDS(name, report, strict) \ @@ -1062,12 +1261,19 @@ static int pidff_find_effects(struct pidff_device *pidff, */ static int pidff_init_fields(struct pidff_device *pidff, struct input_dev *dev) { - int envelope_ok = 0; + int status = 0; - if (PIDFF_FIND_FIELDS(set_effect, PID_SET_EFFECT, 1)) { + /* Save info about the device not having the DELAY ffb field. */ + status = PIDFF_FIND_FIELDS(set_effect, PID_SET_EFFECT, 1); + if (status == -1) { hid_err(pidff->hid, "unknown set_effect report layout\n"); return -ENODEV; } + pidff->quirks |= status; + + if (status & HID_PIDFF_QUIRK_MISSING_DELAY) + hid_dbg(pidff->hid, "Adding MISSING_DELAY quirk\n"); + PIDFF_FIND_FIELDS(block_load, PID_BLOCK_LOAD, 0); if (!pidff->block_load[PID_EFFECT_BLOCK_INDEX].value) { @@ -1085,13 +1291,10 @@ static int pidff_init_fields(struct pidff_device *pidff, struct input_dev *dev) return -ENODEV; } - if (!PIDFF_FIND_FIELDS(set_envelope, PID_SET_ENVELOPE, 1)) - envelope_ok = 1; - if (pidff_find_special_fields(pidff) || pidff_find_effects(pidff, dev)) return -ENODEV; - if (!envelope_ok) { + if (PIDFF_FIND_FIELDS(set_envelope, PID_SET_ENVELOPE, 1)) { if (test_and_clear_bit(FF_CONSTANT, dev->ffbit)) hid_warn(pidff->hid, "has constant effect but no envelope\n"); @@ -1116,16 +1319,20 @@ static int pidff_init_fields(struct pidff_device *pidff, struct input_dev *dev) clear_bit(FF_RAMP, dev->ffbit); } - if ((test_bit(FF_SPRING, dev->ffbit) || - test_bit(FF_DAMPER, dev->ffbit) || - test_bit(FF_FRICTION, dev->ffbit) || - test_bit(FF_INERTIA, dev->ffbit)) && - PIDFF_FIND_FIELDS(set_condition, PID_SET_CONDITION, 1)) { - hid_warn(pidff->hid, "unknown condition effect layout\n"); - clear_bit(FF_SPRING, dev->ffbit); - clear_bit(FF_DAMPER, dev->ffbit); - clear_bit(FF_FRICTION, dev->ffbit); - clear_bit(FF_INERTIA, dev->ffbit); + if (test_bit(FF_SPRING, dev->ffbit) || + test_bit(FF_DAMPER, dev->ffbit) || + test_bit(FF_FRICTION, dev->ffbit) || + test_bit(FF_INERTIA, dev->ffbit)) { + status = PIDFF_FIND_FIELDS(set_condition, PID_SET_CONDITION, 1); + + if (status < 0) { + hid_warn(pidff->hid, "unknown condition effect layout\n"); + clear_bit(FF_SPRING, dev->ffbit); + clear_bit(FF_DAMPER, dev->ffbit); + clear_bit(FF_FRICTION, dev->ffbit); + clear_bit(FF_INERTIA, dev->ffbit); + } + pidff->quirks |= status; } if (test_bit(FF_PERIODIC, dev->ffbit) && @@ -1142,46 +1349,6 @@ static int pidff_init_fields(struct pidff_device *pidff, struct input_dev *dev) return 0; } -/* - * Reset the device - */ -static void pidff_reset(struct pidff_device *pidff) -{ - struct hid_device *hid = pidff->hid; - int i = 0; - - pidff->device_control->value[0] = pidff->control_id[PID_RESET]; - /* We reset twice as sometimes hid_wait_io isn't waiting long enough */ - hid_hw_request(hid, pidff->reports[PID_DEVICE_CONTROL], HID_REQ_SET_REPORT); - hid_hw_wait(hid); - hid_hw_request(hid, pidff->reports[PID_DEVICE_CONTROL], HID_REQ_SET_REPORT); - hid_hw_wait(hid); - - pidff->device_control->value[0] = - pidff->control_id[PID_ENABLE_ACTUATORS]; - hid_hw_request(hid, pidff->reports[PID_DEVICE_CONTROL], HID_REQ_SET_REPORT); - hid_hw_wait(hid); - - /* pool report is sometimes messed up, refetch it */ - hid_hw_request(hid, pidff->reports[PID_POOL], HID_REQ_GET_REPORT); - hid_hw_wait(hid); - - if (pidff->pool[PID_SIMULTANEOUS_MAX].value) { - while (pidff->pool[PID_SIMULTANEOUS_MAX].value[0] < 2) { - if (i++ > 20) { - hid_warn(pidff->hid, - "device reports %d simultaneous effects\n", - pidff->pool[PID_SIMULTANEOUS_MAX].value[0]); - break; - } - hid_dbg(pidff->hid, "pid_pool requested again\n"); - hid_hw_request(hid, pidff->reports[PID_POOL], - HID_REQ_GET_REPORT); - hid_hw_wait(hid); - } - } -} - /* * Test if autocenter modification is using the supported method */ @@ -1206,24 +1373,23 @@ static int pidff_check_autocenter(struct pidff_device *pidff, if (pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0] == pidff->block_load[PID_EFFECT_BLOCK_INDEX].field->logical_minimum + 1) { - pidff_autocenter(pidff, 0xffff); + pidff_autocenter(pidff, U16_MAX); set_bit(FF_AUTOCENTER, dev->ffbit); } else { hid_notice(pidff->hid, "device has unknown autocenter control method\n"); } - pidff_erase_pid(pidff, pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0]); return 0; - } /* * Check if the device is PID and initialize it + * Set initial quirks */ -int hid_pidff_init(struct hid_device *hid) +int hid_pidff_init_with_quirks(struct hid_device *hid, u32 initial_quirks) { struct pidff_device *pidff; struct hid_input *hidinput = list_entry(hid->inputs.next, @@ -1245,6 +1411,8 @@ int hid_pidff_init(struct hid_device *hid) return -ENOMEM; pidff->hid = hid; + pidff->quirks = initial_quirks; + pidff->effect_count = 0; hid_device_io_start(hid); @@ -1261,14 +1429,9 @@ int hid_pidff_init(struct hid_device *hid) if (error) goto fail; - pidff_reset(pidff); - - if (test_bit(FF_GAIN, dev->ffbit)) { - pidff_set(&pidff->device_gain[PID_DEVICE_GAIN_FIELD], 0xffff); - hid_hw_request(hid, pidff->reports[PID_DEVICE_GAIN], - HID_REQ_SET_REPORT); - } - + /* pool report is sometimes messed up, refetch it */ + pidff_fetch_pool(pidff); + pidff_set_gain_report(pidff, U16_MAX); error = pidff_check_autocenter(pidff, dev); if (error) goto fail; @@ -1311,6 +1474,7 @@ int hid_pidff_init(struct hid_device *hid) ff->playback = pidff_playback; hid_info(dev, "Force feedback for USB HID PID devices by Anssi Hannula \n"); + hid_dbg(dev, "Active quirks mask: 0x%x\n", pidff->quirks); hid_device_io_stop(hid); @@ -1322,3 +1486,14 @@ int hid_pidff_init(struct hid_device *hid) kfree(pidff); return error; } +EXPORT_SYMBOL_GPL(hid_pidff_init_with_quirks); + +/* + * Check if the device is PID and initialize it + * Wrapper made to keep the compatibility with old + * init function + */ +int hid_pidff_init(struct hid_device *hid) +{ + return hid_pidff_init_with_quirks(hid, 0); +} diff --git a/drivers/hid/usbhid/hid-pidff.h b/drivers/hid/usbhid/hid-pidff.h new file mode 100644 index 0000000000000..dda571e0a5bd3 --- /dev/null +++ b/drivers/hid/usbhid/hid-pidff.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef __HID_PIDFF_H +#define __HID_PIDFF_H + +#include + +/* HID PIDFF quirks */ + +/* Delay field (0xA7) missing. Skip it during set effect report upload */ +#define HID_PIDFF_QUIRK_MISSING_DELAY BIT(0) + +/* Missing Paramter block offset (0x23). Skip it during SET_CONDITION + report upload */ +#define HID_PIDFF_QUIRK_MISSING_PBO BIT(1) + +/* Initialise device control field even if logical_minimum != 1 */ +#define HID_PIDFF_QUIRK_PERMISSIVE_CONTROL BIT(2) + +/* Use fixed 0x4000 direction during SET_EFFECT report upload */ +#define HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION BIT(3) + +/* Force all periodic effects to be uploaded as SINE */ +#define HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY BIT(4) + +#ifdef CONFIG_HID_PID +int hid_pidff_init(struct hid_device *hid); +int hid_pidff_init_with_quirks(struct hid_device *hid, u32 initial_quirks); +#else +#define hid_pidff_init NULL +#define hid_pidff_init_with_quirks NULL +#endif + +#endif diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c index afe470f3661c7..6105ea9a6c6aa 100644 --- a/drivers/hsi/clients/ssi_protocol.c +++ b/drivers/hsi/clients/ssi_protocol.c @@ -401,6 +401,7 @@ static void ssip_reset(struct hsi_client *cl) del_timer(&ssi->rx_wd); del_timer(&ssi->tx_wd); del_timer(&ssi->keep_alive); + cancel_work_sync(&ssi->work); ssi->main_state = 0; ssi->send_state = 0; ssi->recv_state = 0; diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 9b15f7daf5059..2b6749c9712ef 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -2262,12 +2262,25 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size) struct resource *iter; mutex_lock(&hyperv_mmio_lock); + + /* + * If all bytes of the MMIO range to be released are within the + * special case fb_mmio shadow region, skip releasing the shadow + * region since no corresponding __request_region() was done + * in vmbus_allocate_mmio(). + */ + if (fb_mmio && start >= fb_mmio->start && + (start + size - 1 <= fb_mmio->end)) + goto skip_shadow_release; + for (iter = hyperv_mmio; iter; iter = iter->sibling) { if ((iter->start >= start + size) || (iter->end <= start)) continue; __release_region(iter, start, size); } + +skip_shadow_release: release_mem_region(start, size); mutex_unlock(&hyperv_mmio_lock); diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c index 7802bbf5f9587..59424103f6348 100644 --- a/drivers/hwmon/ad7314.c +++ b/drivers/hwmon/ad7314.c @@ -22,11 +22,13 @@ */ #define AD7314_TEMP_MASK 0x7FE0 #define AD7314_TEMP_SHIFT 5 +#define AD7314_LEADING_ZEROS_MASK BIT(15) /* * ADT7301 and ADT7302 temperature masks */ #define ADT7301_TEMP_MASK 0x3FFF +#define ADT7301_LEADING_ZEROS_MASK (BIT(15) | BIT(14)) enum ad7314_variant { adt7301, @@ -65,12 +67,20 @@ static ssize_t ad7314_temperature_show(struct device *dev, return ret; switch (spi_get_device_id(chip->spi_dev)->driver_data) { case ad7314: + if (ret & AD7314_LEADING_ZEROS_MASK) { + /* Invalid read-out, leading zero part is missing */ + return -EIO; + } data = (ret & AD7314_TEMP_MASK) >> AD7314_TEMP_SHIFT; data = sign_extend32(data, 9); return sprintf(buf, "%d\n", 250 * data); case adt7301: case adt7302: + if (ret & ADT7301_LEADING_ZEROS_MASK) { + /* Invalid read-out, leading zero part is missing */ + return -EIO; + } /* * Documented as a 13 bit twos complement register * with a sign bit - which is a 14 bit 2's complement diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c index fa3351351825b..79bc67ffb9986 100644 --- a/drivers/hwmon/nct6775-core.c +++ b/drivers/hwmon/nct6775-core.c @@ -273,8 +273,8 @@ static const s8 NCT6776_BEEP_BITS[NUM_BEEP_BITS] = { static const u16 NCT6776_REG_TOLERANCE_H[] = { 0x10c, 0x20c, 0x30c, 0x80c, 0x90c, 0xa0c, 0xb0c }; -static const u8 NCT6776_REG_PWM_MODE[] = { 0x04, 0, 0, 0, 0, 0 }; -static const u8 NCT6776_PWM_MODE_MASK[] = { 0x01, 0, 0, 0, 0, 0 }; +static const u8 NCT6776_REG_PWM_MODE[] = { 0x04, 0, 0, 0, 0, 0, 0 }; +static const u8 NCT6776_PWM_MODE_MASK[] = { 0x01, 0, 0, 0, 0, 0, 0 }; static const u16 NCT6776_REG_FAN_MIN[] = { 0x63a, 0x63c, 0x63e, 0x640, 0x642, 0x64a, 0x64c }; diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c index b5352900463fb..0d29c8f97ba7c 100644 --- a/drivers/hwmon/ntc_thermistor.c +++ b/drivers/hwmon/ntc_thermistor.c @@ -181,40 +181,40 @@ static const struct ntc_compensation ncpXXwf104[] = { }; static const struct ntc_compensation ncpXXxh103[] = { - { .temp_c = -40, .ohm = 247565 }, - { .temp_c = -35, .ohm = 181742 }, - { .temp_c = -30, .ohm = 135128 }, - { .temp_c = -25, .ohm = 101678 }, - { .temp_c = -20, .ohm = 77373 }, - { .temp_c = -15, .ohm = 59504 }, - { .temp_c = -10, .ohm = 46222 }, - { .temp_c = -5, .ohm = 36244 }, - { .temp_c = 0, .ohm = 28674 }, - { .temp_c = 5, .ohm = 22878 }, - { .temp_c = 10, .ohm = 18399 }, - { .temp_c = 15, .ohm = 14910 }, - { .temp_c = 20, .ohm = 12169 }, + { .temp_c = -40, .ohm = 195652 }, + { .temp_c = -35, .ohm = 148171 }, + { .temp_c = -30, .ohm = 113347 }, + { .temp_c = -25, .ohm = 87559 }, + { .temp_c = -20, .ohm = 68237 }, + { .temp_c = -15, .ohm = 53650 }, + { .temp_c = -10, .ohm = 42506 }, + { .temp_c = -5, .ohm = 33892 }, + { .temp_c = 0, .ohm = 27219 }, + { .temp_c = 5, .ohm = 22021 }, + { .temp_c = 10, .ohm = 17926 }, + { .temp_c = 15, .ohm = 14674 }, + { .temp_c = 20, .ohm = 12081 }, { .temp_c = 25, .ohm = 10000 }, - { .temp_c = 30, .ohm = 8271 }, - { .temp_c = 35, .ohm = 6883 }, - { .temp_c = 40, .ohm = 5762 }, - { .temp_c = 45, .ohm = 4851 }, - { .temp_c = 50, .ohm = 4105 }, - { .temp_c = 55, .ohm = 3492 }, - { .temp_c = 60, .ohm = 2985 }, - { .temp_c = 65, .ohm = 2563 }, - { .temp_c = 70, .ohm = 2211 }, - { .temp_c = 75, .ohm = 1915 }, - { .temp_c = 80, .ohm = 1666 }, - { .temp_c = 85, .ohm = 1454 }, - { .temp_c = 90, .ohm = 1275 }, - { .temp_c = 95, .ohm = 1121 }, - { .temp_c = 100, .ohm = 990 }, - { .temp_c = 105, .ohm = 876 }, - { .temp_c = 110, .ohm = 779 }, - { .temp_c = 115, .ohm = 694 }, - { .temp_c = 120, .ohm = 620 }, - { .temp_c = 125, .ohm = 556 }, + { .temp_c = 30, .ohm = 8315 }, + { .temp_c = 35, .ohm = 6948 }, + { .temp_c = 40, .ohm = 5834 }, + { .temp_c = 45, .ohm = 4917 }, + { .temp_c = 50, .ohm = 4161 }, + { .temp_c = 55, .ohm = 3535 }, + { .temp_c = 60, .ohm = 3014 }, + { .temp_c = 65, .ohm = 2586 }, + { .temp_c = 70, .ohm = 2228 }, + { .temp_c = 75, .ohm = 1925 }, + { .temp_c = 80, .ohm = 1669 }, + { .temp_c = 85, .ohm = 1452 }, + { .temp_c = 90, .ohm = 1268 }, + { .temp_c = 95, .ohm = 1110 }, + { .temp_c = 100, .ohm = 974 }, + { .temp_c = 105, .ohm = 858 }, + { .temp_c = 110, .ohm = 758 }, + { .temp_c = 115, .ohm = 672 }, + { .temp_c = 120, .ohm = 596 }, + { .temp_c = 125, .ohm = 531 }, }; /* diff --git a/drivers/hwmon/peci/dimmtemp.c b/drivers/hwmon/peci/dimmtemp.c index 4a72e9712408e..b7b09780c7b0a 100644 --- a/drivers/hwmon/peci/dimmtemp.c +++ b/drivers/hwmon/peci/dimmtemp.c @@ -127,8 +127,6 @@ static int update_thresholds(struct peci_dimmtemp *priv, int dimm_no) return 0; ret = priv->gen_info->read_thresholds(priv, dimm_order, chan_rank, &data); - if (ret == -ENODATA) /* Use default or previous value */ - return 0; if (ret) return ret; @@ -509,11 +507,11 @@ read_thresholds_icx(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u ret = peci_ep_pci_local_read(priv->peci_dev, 0, 13, 0, 2, 0xd4, ®_val); if (ret || !(reg_val & BIT(31))) - return -ENODATA; /* Use default or previous value */ + return -ENODATA; ret = peci_ep_pci_local_read(priv->peci_dev, 0, 13, 0, 2, 0xd0, ®_val); if (ret) - return -ENODATA; /* Use default or previous value */ + return -ENODATA; /* * Device 26, Offset 224e0: IMC 0 channel 0 -> rank 0 @@ -546,11 +544,11 @@ read_thresholds_spr(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u ret = peci_ep_pci_local_read(priv->peci_dev, 0, 30, 0, 2, 0xd4, ®_val); if (ret || !(reg_val & BIT(31))) - return -ENODATA; /* Use default or previous value */ + return -ENODATA; ret = peci_ep_pci_local_read(priv->peci_dev, 0, 30, 0, 2, 0xd0, ®_val); if (ret) - return -ENODATA; /* Use default or previous value */ + return -ENODATA; /* * Device 26, Offset 219a8: IMC 0 channel 0 -> rank 0 diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c index ec40c5c599543..59424dc518c8f 100644 --- a/drivers/hwmon/pmbus/pmbus.c +++ b/drivers/hwmon/pmbus/pmbus.c @@ -103,6 +103,8 @@ static int pmbus_identify(struct i2c_client *client, if (pmbus_check_byte_register(client, 0, PMBUS_PAGE)) { int page; + info->pages = PMBUS_PAGES; + for (page = 1; page < PMBUS_PAGES; page++) { if (pmbus_set_page(client, page, 0xff) < 0) break; diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c index 5e0759a70f6d5..92d82faf237fc 100644 --- a/drivers/hwmon/xgene-hwmon.c +++ b/drivers/hwmon/xgene-hwmon.c @@ -706,7 +706,7 @@ static int xgene_hwmon_probe(struct platform_device *pdev) goto out; } - if (!ctx->pcc_comm_addr) { + if (IS_ERR_OR_NULL(ctx->pcc_comm_addr)) { dev_err(&pdev->dev, "Failed to ioremap PCC comm region\n"); rc = -ENOMEM; diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c index bfea880d6dfbf..d8ad64ea81f11 100644 --- a/drivers/hwtracing/coresight/coresight-catu.c +++ b/drivers/hwtracing/coresight/coresight-catu.c @@ -269,7 +269,7 @@ catu_init_sg_table(struct device *catu_dev, int node, * Each table can address upto 1MB and we can have * CATU_PAGES_PER_SYSPAGE tables in a system page. */ - nr_tpages = DIV_ROUND_UP(size, SZ_1M) / CATU_PAGES_PER_SYSPAGE; + nr_tpages = DIV_ROUND_UP(size, CATU_PAGES_PER_SYSPAGE * SZ_1M); catu_table = tmc_alloc_sg_table(catu_dev, node, nr_tpages, size >> PAGE_SHIFT, pages); if (IS_ERR(catu_table)) diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c index ea38ecf26fcbf..c42aa9fddab9b 100644 --- a/drivers/hwtracing/coresight/coresight-core.c +++ b/drivers/hwtracing/coresight/coresight-core.c @@ -1017,18 +1017,20 @@ static void coresight_remove_conns(struct coresight_device *csdev) } /** - * coresight_timeout - loop until a bit has changed to a specific register - * state. + * coresight_timeout_action - loop until a bit has changed to a specific register + * state, with a callback after every trial. * @csa: coresight device access for the device * @offset: Offset of the register from the base of the device. * @position: the position of the bit of interest. * @value: the value the bit should have. + * @cb: Call back after each trial. * * Return: 0 as soon as the bit has taken the desired state or -EAGAIN if * TIMEOUT_US has elapsed, which ever happens first. */ -int coresight_timeout(struct csdev_access *csa, u32 offset, - int position, int value) +int coresight_timeout_action(struct csdev_access *csa, u32 offset, + int position, int value, + coresight_timeout_cb_t cb) { int i; u32 val; @@ -1044,7 +1046,8 @@ int coresight_timeout(struct csdev_access *csa, u32 offset, if (!(val & BIT(position))) return 0; } - + if (cb) + cb(csa, offset, position, value); /* * Delay is arbitrary - the specification doesn't say how long * we are expected to wait. Extra check required to make sure @@ -1056,6 +1059,13 @@ int coresight_timeout(struct csdev_access *csa, u32 offset, return -EAGAIN; } +EXPORT_SYMBOL_GPL(coresight_timeout_action); + +int coresight_timeout(struct csdev_access *csa, u32 offset, + int position, int value) +{ + return coresight_timeout_action(csa, offset, position, value, NULL); +} EXPORT_SYMBOL_GPL(coresight_timeout); u32 coresight_relaxed_read32(struct coresight_device *csdev, u32 offset) diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c index 66d44a404ad0c..be8b46f26ddc8 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x-core.c +++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c @@ -399,6 +399,29 @@ static void etm4_check_arch_features(struct etmv4_drvdata *drvdata, } #endif /* CONFIG_ETM4X_IMPDEF_FEATURE */ +static void etm4x_sys_ins_barrier(struct csdev_access *csa, u32 offset, int pos, int val) +{ + if (!csa->io_mem) + isb(); +} + +/* + * etm4x_wait_status: Poll for TRCSTATR. == . While using system + * instruction to access the trace unit, each access must be separated by a + * synchronization barrier. See ARM IHI0064H.b section "4.3.7 Synchronization of + * register updates", for system instructions section, in "Notes": + * + * "In particular, whenever disabling or enabling the trace unit, a poll of + * TRCSTATR needs explicit synchronization between each read of TRCSTATR" + */ +static int etm4x_wait_status(struct csdev_access *csa, int pos, int val) +{ + if (!csa->io_mem) + return coresight_timeout_action(csa, TRCSTATR, pos, val, + etm4x_sys_ins_barrier); + return coresight_timeout(csa, TRCSTATR, pos, val); +} + static int etm4_enable_hw(struct etmv4_drvdata *drvdata) { int i, rc; @@ -430,7 +453,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata) isb(); /* wait for TRCSTATR.IDLE to go up */ - if (coresight_timeout(csa, TRCSTATR, TRCSTATR_IDLE_BIT, 1)) + if (etm4x_wait_status(csa, TRCSTATR_IDLE_BIT, 1)) dev_err(etm_dev, "timeout while waiting for Idle Trace Status\n"); if (drvdata->nr_pe) @@ -523,7 +546,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata) isb(); /* wait for TRCSTATR.IDLE to go back down to '0' */ - if (coresight_timeout(csa, TRCSTATR, TRCSTATR_IDLE_BIT, 0)) + if (etm4x_wait_status(csa, TRCSTATR_IDLE_BIT, 0)) dev_err(etm_dev, "timeout while waiting for Idle Trace Status\n"); @@ -906,10 +929,25 @@ static void etm4_disable_hw(void *info) tsb_csync(); etm4x_relaxed_write32(csa, control, TRCPRGCTLR); + /* + * As recommended by section 4.3.7 ("Synchronization when using system + * instructions to progrom the trace unit") of ARM IHI 0064H.b, the + * self-hosted trace analyzer must perform a Context synchronization + * event between writing to the TRCPRGCTLR and reading the TRCSTATR. + */ + if (!csa->io_mem) + isb(); + /* wait for TRCSTATR.PMSTABLE to go to '1' */ - if (coresight_timeout(csa, TRCSTATR, TRCSTATR_PMSTABLE_BIT, 1)) + if (etm4x_wait_status(csa, TRCSTATR_PMSTABLE_BIT, 1)) dev_err(etm_dev, "timeout while waiting for PM stable Trace Status\n"); + /* + * As recommended by section 4.3.7 (Synchronization of register updates) + * of ARM IHI 0064H.b. + */ + isb(); + /* read the status of the single shot comparators */ for (i = 0; i < drvdata->nr_ss_cmp; i++) { config->ss_status[i] = @@ -1711,7 +1749,7 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata) etm4_os_lock(drvdata); /* wait for TRCSTATR.PMSTABLE to go up */ - if (coresight_timeout(csa, TRCSTATR, TRCSTATR_PMSTABLE_BIT, 1)) { + if (etm4x_wait_status(csa, TRCSTATR_PMSTABLE_BIT, 1)) { dev_err(etm_dev, "timeout while waiting for PM Stable Status\n"); etm4_os_unlock(drvdata); @@ -1802,7 +1840,7 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata) state->trcpdcr = etm4x_read32(csa, TRCPDCR); /* wait for TRCSTATR.IDLE to go up */ - if (coresight_timeout(csa, TRCSTATR, TRCSTATR_IDLE_BIT, 1)) { + if (etm4x_wait_status(csa, TRCSTATR_PMSTABLE_BIT, 1)) { dev_err(etm_dev, "timeout while waiting for Idle Trace Status\n"); etm4_os_unlock(drvdata); diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c index 0d7b9839e5b66..6bb6af0f96fa5 100644 --- a/drivers/hwtracing/intel_th/pci.c +++ b/drivers/hwtracing/intel_th/pci.c @@ -329,6 +329,21 @@ static const struct pci_device_id intel_th_pci_id_table[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa824), .driver_data = (kernel_ulong_t)&intel_th_2x, }, + { + /* Arrow Lake */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7724), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { + /* Panther Lake-H */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe324), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { + /* Panther Lake-P/U */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe424), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, { /* Alder Lake CPU */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x466f), diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c index 544c94e86b896..1eac358380405 100644 --- a/drivers/i2c/busses/i2c-ali1535.c +++ b/drivers/i2c/busses/i2c-ali1535.c @@ -485,6 +485,8 @@ MODULE_DEVICE_TABLE(pci, ali1535_ids); static int ali1535_probe(struct pci_dev *dev, const struct pci_device_id *id) { + int ret; + if (ali1535_setup(dev)) { dev_warn(&dev->dev, "ALI1535 not detected, module not inserted.\n"); @@ -496,7 +498,15 @@ static int ali1535_probe(struct pci_dev *dev, const struct pci_device_id *id) snprintf(ali1535_adapter.name, sizeof(ali1535_adapter.name), "SMBus ALI1535 adapter at %04x", ali1535_offset); - return i2c_add_adapter(&ali1535_adapter); + ret = i2c_add_adapter(&ali1535_adapter); + if (ret) + goto release_region; + + return 0; + +release_region: + release_region(ali1535_smba, ALI1535_SMB_IOSIZE); + return ret; } static void ali1535_remove(struct pci_dev *dev) diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c index 4761c72081022..418d11266671e 100644 --- a/drivers/i2c/busses/i2c-ali15x3.c +++ b/drivers/i2c/busses/i2c-ali15x3.c @@ -472,6 +472,8 @@ MODULE_DEVICE_TABLE (pci, ali15x3_ids); static int ali15x3_probe(struct pci_dev *dev, const struct pci_device_id *id) { + int ret; + if (ali15x3_setup(dev)) { dev_err(&dev->dev, "ALI15X3 not detected, module not inserted.\n"); @@ -483,7 +485,15 @@ static int ali15x3_probe(struct pci_dev *dev, const struct pci_device_id *id) snprintf(ali15x3_adapter.name, sizeof(ali15x3_adapter.name), "SMBus ALI15X3 adapter at %04x", ali15x3_smba); - return i2c_add_adapter(&ali15x3_adapter); + ret = i2c_add_adapter(&ali15x3_adapter); + if (ret) + goto release_region; + + return 0; + +release_region: + release_region(ali15x3_smba, ALI15X3_SMB_IOSIZE); + return ret; } static void ali15x3_remove(struct pci_dev *dev) diff --git a/drivers/i2c/busses/i2c-cros-ec-tunnel.c b/drivers/i2c/busses/i2c-cros-ec-tunnel.c index ab2688bd4d338..e19cb62d6796d 100644 --- a/drivers/i2c/busses/i2c-cros-ec-tunnel.c +++ b/drivers/i2c/busses/i2c-cros-ec-tunnel.c @@ -247,6 +247,9 @@ static int ec_i2c_probe(struct platform_device *pdev) u32 remote_bus; int err; + if (!ec) + return dev_err_probe(dev, -EPROBE_DEFER, "couldn't find parent EC device\n"); + if (!ec->cmd_xfer) { dev_err(dev, "Missing sendrecv\n"); return -EINVAL; diff --git a/drivers/i2c/busses/i2c-ls2x.c b/drivers/i2c/busses/i2c-ls2x.c index 8821cac3897b6..b475dd27b7af9 100644 --- a/drivers/i2c/busses/i2c-ls2x.c +++ b/drivers/i2c/busses/i2c-ls2x.c @@ -10,6 +10,7 @@ * Rewritten for mainline by Binbin Zhou */ +#include #include #include #include @@ -26,7 +27,8 @@ #include /* I2C Registers */ -#define I2C_LS2X_PRER 0x0 /* Freq Division Register(16 bits) */ +#define I2C_LS2X_PRER_LO 0x0 /* Freq Division Low Byte Register */ +#define I2C_LS2X_PRER_HI 0x1 /* Freq Division High Byte Register */ #define I2C_LS2X_CTR 0x2 /* Control Register */ #define I2C_LS2X_TXR 0x3 /* Transport Data Register */ #define I2C_LS2X_RXR 0x3 /* Receive Data Register */ @@ -93,6 +95,7 @@ static irqreturn_t ls2x_i2c_isr(int this_irq, void *dev_id) */ static void ls2x_i2c_adjust_bus_speed(struct ls2x_i2c_priv *priv) { + u16 val; struct i2c_timings *t = &priv->i2c_t; struct device *dev = priv->adapter.dev.parent; u32 acpi_speed = i2c_acpi_find_bus_speed(dev); @@ -104,9 +107,14 @@ static void ls2x_i2c_adjust_bus_speed(struct ls2x_i2c_priv *priv) else t->bus_freq_hz = LS2X_I2C_FREQ_STD; - /* Calculate and set i2c frequency. */ - writew(LS2X_I2C_PCLK_FREQ / (5 * t->bus_freq_hz) - 1, - priv->base + I2C_LS2X_PRER); + /* + * According to the chip manual, we can only access the registers as bytes, + * otherwise the high bits will be truncated. + * So set the I2C frequency with a sequential writeb() instead of writew(). + */ + val = LS2X_I2C_PCLK_FREQ / (5 * t->bus_freq_hz) - 1; + writeb(FIELD_GET(GENMASK(7, 0), val), priv->base + I2C_LS2X_PRER_LO); + writeb(FIELD_GET(GENMASK(15, 8), val), priv->base + I2C_LS2X_PRER_HI); } static void ls2x_i2c_init(struct ls2x_i2c_priv *priv) diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c index bbcb4d6668ce6..a693ebb64edf4 100644 --- a/drivers/i2c/busses/i2c-npcm7xx.c +++ b/drivers/i2c/busses/i2c-npcm7xx.c @@ -2319,6 +2319,13 @@ static int npcm_i2c_probe_bus(struct platform_device *pdev) if (irq < 0) return irq; + /* + * Disable the interrupt to avoid the interrupt handler being triggered + * incorrectly by the asynchronous interrupt status since the machine + * might do a warm reset during the last smbus/i2c transfer session. + */ + npcm_i2c_int_enable(bus, false); + ret = devm_request_irq(bus->dev, irq, npcm_i2c_bus_irq, 0, dev_name(bus->dev), bus); if (ret) diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index 1d9ad25c89ae5..8c9cf08ad45e2 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -1048,23 +1048,6 @@ static int omap_i2c_transmit_data(struct omap_i2c_dev *omap, u8 num_bytes, return 0; } -static irqreturn_t -omap_i2c_isr(int irq, void *dev_id) -{ - struct omap_i2c_dev *omap = dev_id; - irqreturn_t ret = IRQ_HANDLED; - u16 mask; - u16 stat; - - stat = omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG); - mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG) & ~OMAP_I2C_STAT_NACK; - - if (stat & mask) - ret = IRQ_WAKE_THREAD; - - return ret; -} - static int omap_i2c_xfer_data(struct omap_i2c_dev *omap) { u16 bits; @@ -1095,8 +1078,13 @@ static int omap_i2c_xfer_data(struct omap_i2c_dev *omap) } if (stat & OMAP_I2C_STAT_NACK) { - err |= OMAP_I2C_STAT_NACK; + omap->cmd_err |= OMAP_I2C_STAT_NACK; omap_i2c_ack_stat(omap, OMAP_I2C_STAT_NACK); + + if (!(stat & ~OMAP_I2C_STAT_NACK)) { + err = -EAGAIN; + break; + } } if (stat & OMAP_I2C_STAT_AL) { @@ -1472,7 +1460,7 @@ omap_i2c_probe(struct platform_device *pdev) IRQF_NO_SUSPEND, pdev->name, omap); else r = devm_request_threaded_irq(&pdev->dev, omap->irq, - omap_i2c_isr, omap_i2c_isr_thread, + NULL, omap_i2c_isr_thread, IRQF_NO_SUSPEND | IRQF_ONESHOT, pdev->name, omap); diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c index 3505cf29cedda..a19c3d251804d 100644 --- a/drivers/i2c/busses/i2c-sis630.c +++ b/drivers/i2c/busses/i2c-sis630.c @@ -509,6 +509,8 @@ MODULE_DEVICE_TABLE(pci, sis630_ids); static int sis630_probe(struct pci_dev *dev, const struct pci_device_id *id) { + int ret; + if (sis630_setup(dev)) { dev_err(&dev->dev, "SIS630 compatible bus not detected, " @@ -522,7 +524,15 @@ static int sis630_probe(struct pci_dev *dev, const struct pci_device_id *id) snprintf(sis630_adapter.name, sizeof(sis630_adapter.name), "SMBus SIS630 adapter at %04x", smbus_base + SMB_STS); - return i2c_add_adapter(&sis630_adapter); + ret = i2c_add_adapter(&sis630_adapter); + if (ret) + goto release_region; + + return 0; + +release_region: + release_region(smbus_base + SMB_STS, SIS630_SMB_IOREGION); + return ret; } static void sis630_remove(struct pci_dev *dev) diff --git a/drivers/i2c/i2c-atr.c b/drivers/i2c/i2c-atr.c index 0d54d0b5e3273..5342e934aa5e4 100644 --- a/drivers/i2c/i2c-atr.c +++ b/drivers/i2c/i2c-atr.c @@ -8,12 +8,12 @@ * Originally based on i2c-mux.c */ -#include #include #include #include #include #include +#include #include #include diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c index 53ab814b676ff..7c1dc42b809bf 100644 --- a/drivers/i3c/master.c +++ b/drivers/i3c/master.c @@ -2553,6 +2553,9 @@ static void i3c_master_unregister_i3c_devs(struct i3c_master_controller *master) */ void i3c_master_queue_ibi(struct i3c_dev_desc *dev, struct i3c_ibi_slot *slot) { + if (!dev->ibi || !slot) + return; + atomic_inc(&dev->ibi->pending_ibis); queue_work(dev->ibi->wq, &slot->work); } diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c index 565af3759813b..42102baabcdda 100644 --- a/drivers/i3c/master/svc-i3c-master.c +++ b/drivers/i3c/master/svc-i3c-master.c @@ -378,7 +378,7 @@ static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master, slot->len < SVC_I3C_FIFO_SIZE) { mdatactrl = readl(master->regs + SVC_I3C_MDATACTRL); count = SVC_I3C_MDATACTRL_RXCOUNT(mdatactrl); - readsl(master->regs + SVC_I3C_MRDATAB, buf, count); + readsb(master->regs + SVC_I3C_MRDATAB, buf, count); slot->len += count; buf += count; } @@ -990,7 +990,7 @@ static int svc_i3c_update_ibirules(struct svc_i3c_master *master) /* Create the IBIRULES register for both cases */ i3c_bus_for_each_i3cdev(&master->base.bus, dev) { - if (I3C_BCR_DEVICE_ROLE(dev->info.bcr) == I3C_BCR_I3C_MASTER) + if (!(dev->info.bcr & I3C_BCR_IBI_REQ_CAP)) continue; if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD) { diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 67aebfe0fed66..524ed143f875d 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -55,6 +55,7 @@ #include #include #include +#include #include #define INTEL_IDLE_VERSION "0.5.1" @@ -1749,6 +1750,9 @@ static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) if (intel_idle_state_needs_timer_stop(state)) state->flags |= CPUIDLE_FLAG_TIMER_STOP; + if (cx->type > ACPI_STATE_C1 && !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) + mark_tsc_unstable("TSC halts in idle"); + state->enter = intel_idle; state->enter_s2idle = intel_idle_s2idle; } diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c index 62e6369e22696..de207526babee 100644 --- a/drivers/iio/accel/mma8452.c +++ b/drivers/iio/accel/mma8452.c @@ -711,7 +711,7 @@ static int mma8452_write_raw(struct iio_dev *indio_dev, int val, int val2, long mask) { struct mma8452_data *data = iio_priv(indio_dev); - int i, ret; + int i, j, ret; ret = iio_device_claim_direct_mode(indio_dev); if (ret) @@ -771,14 +771,18 @@ static int mma8452_write_raw(struct iio_dev *indio_dev, break; case IIO_CHAN_INFO_OVERSAMPLING_RATIO: - ret = mma8452_get_odr_index(data); + j = mma8452_get_odr_index(data); for (i = 0; i < ARRAY_SIZE(mma8452_os_ratio); i++) { - if (mma8452_os_ratio[i][ret] == val) { + if (mma8452_os_ratio[i][j] == val) { ret = mma8452_set_power_mode(data, i); break; } } + if (i == ARRAY_SIZE(mma8452_os_ratio)) { + ret = -EINVAL; + break; + } break; default: ret = -EINVAL; diff --git a/drivers/iio/accel/msa311.c b/drivers/iio/accel/msa311.c index 57025354c7cd5..f484be27058d9 100644 --- a/drivers/iio/accel/msa311.c +++ b/drivers/iio/accel/msa311.c @@ -593,23 +593,25 @@ static int msa311_read_raw_data(struct iio_dev *indio_dev, __le16 axis; int err; - err = pm_runtime_resume_and_get(dev); + err = iio_device_claim_direct_mode(indio_dev); if (err) return err; - err = iio_device_claim_direct_mode(indio_dev); - if (err) + err = pm_runtime_resume_and_get(dev); + if (err) { + iio_device_release_direct_mode(indio_dev); return err; + } mutex_lock(&msa311->lock); err = msa311_get_axis(msa311, chan, &axis); mutex_unlock(&msa311->lock); - iio_device_release_direct_mode(indio_dev); - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); + iio_device_release_direct_mode(indio_dev); + if (err) { dev_err(dev, "can't get axis %s (%pe)\n", chan->datasheet_name, ERR_PTR(err)); @@ -755,10 +757,6 @@ static int msa311_write_samp_freq(struct iio_dev *indio_dev, int val, int val2) unsigned int odr; int err; - err = pm_runtime_resume_and_get(dev); - if (err) - return err; - /* * Sampling frequency changing is prohibited when buffer mode is * enabled, because sometimes MSA311 chip returns outliers during @@ -768,6 +766,12 @@ static int msa311_write_samp_freq(struct iio_dev *indio_dev, int val, int val2) if (err) return err; + err = pm_runtime_resume_and_get(dev); + if (err) { + iio_device_release_direct_mode(indio_dev); + return err; + } + err = -EINVAL; for (odr = 0; odr < ARRAY_SIZE(msa311_odr_table); odr++) if (val == msa311_odr_table[odr].integral && @@ -778,11 +782,11 @@ static int msa311_write_samp_freq(struct iio_dev *indio_dev, int val, int val2) break; } - iio_device_release_direct_mode(indio_dev); - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); + iio_device_release_direct_mode(indio_dev); + if (err) dev_err(dev, "can't update frequency (%pe)\n", ERR_PTR(err)); diff --git a/drivers/iio/adc/ad4130.c b/drivers/iio/adc/ad4130.c index de32cc9d18c5e..712f95f53c9ec 100644 --- a/drivers/iio/adc/ad4130.c +++ b/drivers/iio/adc/ad4130.c @@ -223,6 +223,10 @@ enum ad4130_pin_function { AD4130_PIN_FN_VBIAS = BIT(3), }; +/* + * If you make adaptations in this struct, you most likely also have to adapt + * ad4130_setup_info_eq(), too. + */ struct ad4130_setup_info { unsigned int iout0_val; unsigned int iout1_val; @@ -591,6 +595,40 @@ static irqreturn_t ad4130_irq_handler(int irq, void *private) return IRQ_HANDLED; } +static bool ad4130_setup_info_eq(struct ad4130_setup_info *a, + struct ad4130_setup_info *b) +{ + /* + * This is just to make sure that the comparison is adapted after + * struct ad4130_setup_info was changed. + */ + static_assert(sizeof(*a) == + sizeof(struct { + unsigned int iout0_val; + unsigned int iout1_val; + unsigned int burnout; + unsigned int pga; + unsigned int fs; + u32 ref_sel; + enum ad4130_filter_mode filter_mode; + bool ref_bufp; + bool ref_bufm; + })); + + if (a->iout0_val != b->iout0_val || + a->iout1_val != b->iout1_val || + a->burnout != b->burnout || + a->pga != b->pga || + a->fs != b->fs || + a->ref_sel != b->ref_sel || + a->filter_mode != b->filter_mode || + a->ref_bufp != b->ref_bufp || + a->ref_bufm != b->ref_bufm) + return false; + + return true; +} + static int ad4130_find_slot(struct ad4130_state *st, struct ad4130_setup_info *target_setup_info, unsigned int *slot, bool *overwrite) @@ -604,8 +642,7 @@ static int ad4130_find_slot(struct ad4130_state *st, struct ad4130_slot_info *slot_info = &st->slots_info[i]; /* Immediately accept a matching setup info. */ - if (!memcmp(target_setup_info, &slot_info->setup, - sizeof(*target_setup_info))) { + if (ad4130_setup_info_eq(target_setup_info, &slot_info->setup)) { *slot = i; return 0; } diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c index 8d94bc2b1cac3..30a7392c4f8b9 100644 --- a/drivers/iio/adc/ad7124.c +++ b/drivers/iio/adc/ad7124.c @@ -147,7 +147,11 @@ struct ad7124_chip_info { struct ad7124_channel_config { bool live; unsigned int cfg_slot; - /* Following fields are used to compare equality. */ + /* + * Following fields are used to compare for equality. If you + * make adaptations in it, you most likely also have to adapt + * ad7124_find_similar_live_cfg(), too. + */ struct_group(config_props, enum ad7124_ref_sel refsel; bool bipolar; @@ -334,15 +338,38 @@ static struct ad7124_channel_config *ad7124_find_similar_live_cfg(struct ad7124_ struct ad7124_channel_config *cfg) { struct ad7124_channel_config *cfg_aux; - ptrdiff_t cmp_size; int i; - cmp_size = sizeof_field(struct ad7124_channel_config, config_props); + /* + * This is just to make sure that the comparison is adapted after + * struct ad7124_channel_config was changed. + */ + static_assert(sizeof_field(struct ad7124_channel_config, config_props) == + sizeof(struct { + enum ad7124_ref_sel refsel; + bool bipolar; + bool buf_positive; + bool buf_negative; + unsigned int vref_mv; + unsigned int pga_bits; + unsigned int odr; + unsigned int odr_sel_bits; + unsigned int filter_type; + })); + for (i = 0; i < st->num_channels; i++) { cfg_aux = &st->channels[i].cfg; if (cfg_aux->live && - !memcmp(&cfg->config_props, &cfg_aux->config_props, cmp_size)) + cfg->refsel == cfg_aux->refsel && + cfg->bipolar == cfg_aux->bipolar && + cfg->buf_positive == cfg_aux->buf_positive && + cfg->buf_negative == cfg_aux->buf_negative && + cfg->vref_mv == cfg_aux->vref_mv && + cfg->pga_bits == cfg_aux->pga_bits && + cfg->odr == cfg_aux->odr && + cfg->odr_sel_bits == cfg_aux->odr_sel_bits && + cfg->filter_type == cfg_aux->filter_type) return cfg_aux; } diff --git a/drivers/iio/adc/ad7173.c b/drivers/iio/adc/ad7173.c index 5a65be00dd190..2eebc6f761a63 100644 --- a/drivers/iio/adc/ad7173.c +++ b/drivers/iio/adc/ad7173.c @@ -181,7 +181,11 @@ struct ad7173_channel_config { u8 cfg_slot; bool live; - /* Following fields are used to compare equality. */ + /* + * Following fields are used to compare equality. If you + * make adaptations in it, you most likely also have to adapt + * ad7173_find_live_config(), too. + */ struct_group(config_props, bool bipolar; bool input_buf; @@ -582,15 +586,28 @@ static struct ad7173_channel_config * ad7173_find_live_config(struct ad7173_state *st, struct ad7173_channel_config *cfg) { struct ad7173_channel_config *cfg_aux; - ptrdiff_t cmp_size; int i; - cmp_size = sizeof_field(struct ad7173_channel_config, config_props); + /* + * This is just to make sure that the comparison is adapted after + * struct ad7173_channel_config was changed. + */ + static_assert(sizeof_field(struct ad7173_channel_config, config_props) == + sizeof(struct { + bool bipolar; + bool input_buf; + u8 odr; + u8 ref_sel; + })); + for (i = 0; i < st->num_channels; i++) { cfg_aux = &st->channels[i].cfg; if (cfg_aux->live && - !memcmp(&cfg->config_props, &cfg_aux->config_props, cmp_size)) + cfg->bipolar == cfg_aux->bipolar && + cfg->input_buf == cfg_aux->input_buf && + cfg->odr == cfg_aux->odr && + cfg->ref_sel == cfg_aux->ref_sel) return cfg_aux; } return NULL; diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c index 955e9eff0099e..6fe32f866765b 100644 --- a/drivers/iio/adc/ad7192.c +++ b/drivers/iio/adc/ad7192.c @@ -1082,7 +1082,7 @@ static int ad7192_update_scan_mode(struct iio_dev *indio_dev, const unsigned lon conf &= ~AD7192_CONF_CHAN_MASK; for_each_set_bit(i, scan_mask, 8) - conf |= FIELD_PREP(AD7192_CONF_CHAN_MASK, i); + conf |= FIELD_PREP(AD7192_CONF_CHAN_MASK, BIT(i)); ret = ad_sd_write_reg(&st->sd, AD7192_REG_CONF, 3, conf); if (ret < 0) diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c index 113703fb72454..6f8816483f1a0 100644 --- a/drivers/iio/adc/ad7768-1.c +++ b/drivers/iio/adc/ad7768-1.c @@ -574,6 +574,21 @@ static int ad7768_probe(struct spi_device *spi) return -ENOMEM; st = iio_priv(indio_dev); + /* + * Datasheet recommends SDI line to be kept high when data is not being + * clocked out of the controller and the spi clock is free running, + * to prevent accidental reset. + * Since many controllers do not support the SPI_MOSI_IDLE_HIGH flag + * yet, only request the MOSI idle state to enable if the controller + * supports it. + */ + if (spi->controller->mode_bits & SPI_MOSI_IDLE_HIGH) { + spi->mode |= SPI_MOSI_IDLE_HIGH; + ret = spi_setup(spi); + if (ret < 0) + return ret; + } + st->spi = spi; st->vref = devm_regulator_get(&spi->dev, "vref"); diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c index d7fd21e7c6e2a..3618e769b1065 100644 --- a/drivers/iio/adc/at91-sama5d2_adc.c +++ b/drivers/iio/adc/at91-sama5d2_adc.c @@ -329,7 +329,7 @@ static const struct at91_adc_reg_layout sama7g5_layout = { #define AT91_HWFIFO_MAX_SIZE_STR "128" #define AT91_HWFIFO_MAX_SIZE 128 -#define AT91_SAMA5D2_CHAN_SINGLE(index, num, addr) \ +#define AT91_SAMA_CHAN_SINGLE(index, num, addr, rbits) \ { \ .type = IIO_VOLTAGE, \ .channel = num, \ @@ -337,7 +337,7 @@ static const struct at91_adc_reg_layout sama7g5_layout = { .scan_index = index, \ .scan_type = { \ .sign = 'u', \ - .realbits = 14, \ + .realbits = rbits, \ .storagebits = 16, \ }, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ @@ -350,7 +350,13 @@ static const struct at91_adc_reg_layout sama7g5_layout = { .indexed = 1, \ } -#define AT91_SAMA5D2_CHAN_DIFF(index, num, num2, addr) \ +#define AT91_SAMA5D2_CHAN_SINGLE(index, num, addr) \ + AT91_SAMA_CHAN_SINGLE(index, num, addr, 14) + +#define AT91_SAMA7G5_CHAN_SINGLE(index, num, addr) \ + AT91_SAMA_CHAN_SINGLE(index, num, addr, 16) + +#define AT91_SAMA_CHAN_DIFF(index, num, num2, addr, rbits) \ { \ .type = IIO_VOLTAGE, \ .differential = 1, \ @@ -360,7 +366,7 @@ static const struct at91_adc_reg_layout sama7g5_layout = { .scan_index = index, \ .scan_type = { \ .sign = 's', \ - .realbits = 14, \ + .realbits = rbits, \ .storagebits = 16, \ }, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ @@ -373,6 +379,12 @@ static const struct at91_adc_reg_layout sama7g5_layout = { .indexed = 1, \ } +#define AT91_SAMA5D2_CHAN_DIFF(index, num, num2, addr) \ + AT91_SAMA_CHAN_DIFF(index, num, num2, addr, 14) + +#define AT91_SAMA7G5_CHAN_DIFF(index, num, num2, addr) \ + AT91_SAMA_CHAN_DIFF(index, num, num2, addr, 16) + #define AT91_SAMA5D2_CHAN_TOUCH(num, name, mod) \ { \ .type = IIO_POSITIONRELATIVE, \ @@ -666,30 +678,30 @@ static const struct iio_chan_spec at91_sama5d2_adc_channels[] = { }; static const struct iio_chan_spec at91_sama7g5_adc_channels[] = { - AT91_SAMA5D2_CHAN_SINGLE(0, 0, 0x60), - AT91_SAMA5D2_CHAN_SINGLE(1, 1, 0x64), - AT91_SAMA5D2_CHAN_SINGLE(2, 2, 0x68), - AT91_SAMA5D2_CHAN_SINGLE(3, 3, 0x6c), - AT91_SAMA5D2_CHAN_SINGLE(4, 4, 0x70), - AT91_SAMA5D2_CHAN_SINGLE(5, 5, 0x74), - AT91_SAMA5D2_CHAN_SINGLE(6, 6, 0x78), - AT91_SAMA5D2_CHAN_SINGLE(7, 7, 0x7c), - AT91_SAMA5D2_CHAN_SINGLE(8, 8, 0x80), - AT91_SAMA5D2_CHAN_SINGLE(9, 9, 0x84), - AT91_SAMA5D2_CHAN_SINGLE(10, 10, 0x88), - AT91_SAMA5D2_CHAN_SINGLE(11, 11, 0x8c), - AT91_SAMA5D2_CHAN_SINGLE(12, 12, 0x90), - AT91_SAMA5D2_CHAN_SINGLE(13, 13, 0x94), - AT91_SAMA5D2_CHAN_SINGLE(14, 14, 0x98), - AT91_SAMA5D2_CHAN_SINGLE(15, 15, 0x9c), - AT91_SAMA5D2_CHAN_DIFF(16, 0, 1, 0x60), - AT91_SAMA5D2_CHAN_DIFF(17, 2, 3, 0x68), - AT91_SAMA5D2_CHAN_DIFF(18, 4, 5, 0x70), - AT91_SAMA5D2_CHAN_DIFF(19, 6, 7, 0x78), - AT91_SAMA5D2_CHAN_DIFF(20, 8, 9, 0x80), - AT91_SAMA5D2_CHAN_DIFF(21, 10, 11, 0x88), - AT91_SAMA5D2_CHAN_DIFF(22, 12, 13, 0x90), - AT91_SAMA5D2_CHAN_DIFF(23, 14, 15, 0x98), + AT91_SAMA7G5_CHAN_SINGLE(0, 0, 0x60), + AT91_SAMA7G5_CHAN_SINGLE(1, 1, 0x64), + AT91_SAMA7G5_CHAN_SINGLE(2, 2, 0x68), + AT91_SAMA7G5_CHAN_SINGLE(3, 3, 0x6c), + AT91_SAMA7G5_CHAN_SINGLE(4, 4, 0x70), + AT91_SAMA7G5_CHAN_SINGLE(5, 5, 0x74), + AT91_SAMA7G5_CHAN_SINGLE(6, 6, 0x78), + AT91_SAMA7G5_CHAN_SINGLE(7, 7, 0x7c), + AT91_SAMA7G5_CHAN_SINGLE(8, 8, 0x80), + AT91_SAMA7G5_CHAN_SINGLE(9, 9, 0x84), + AT91_SAMA7G5_CHAN_SINGLE(10, 10, 0x88), + AT91_SAMA7G5_CHAN_SINGLE(11, 11, 0x8c), + AT91_SAMA7G5_CHAN_SINGLE(12, 12, 0x90), + AT91_SAMA7G5_CHAN_SINGLE(13, 13, 0x94), + AT91_SAMA7G5_CHAN_SINGLE(14, 14, 0x98), + AT91_SAMA7G5_CHAN_SINGLE(15, 15, 0x9c), + AT91_SAMA7G5_CHAN_DIFF(16, 0, 1, 0x60), + AT91_SAMA7G5_CHAN_DIFF(17, 2, 3, 0x68), + AT91_SAMA7G5_CHAN_DIFF(18, 4, 5, 0x70), + AT91_SAMA7G5_CHAN_DIFF(19, 6, 7, 0x78), + AT91_SAMA7G5_CHAN_DIFF(20, 8, 9, 0x80), + AT91_SAMA7G5_CHAN_DIFF(21, 10, 11, 0x88), + AT91_SAMA7G5_CHAN_DIFF(22, 12, 13, 0x90), + AT91_SAMA7G5_CHAN_DIFF(23, 14, 15, 0x98), IIO_CHAN_SOFT_TIMESTAMP(24), AT91_SAMA5D2_CHAN_TEMP(AT91_SAMA7G5_ADC_TEMP_CHANNEL, "temp", 0xdc), }; diff --git a/drivers/iio/dac/ad3552r.c b/drivers/iio/dac/ad3552r.c index 7d61b2fe66243..390d3fab21478 100644 --- a/drivers/iio/dac/ad3552r.c +++ b/drivers/iio/dac/ad3552r.c @@ -714,6 +714,12 @@ static int ad3552r_reset(struct ad3552r_desc *dac) return ret; } + /* Clear reset error flag, see ad3552r manual, rev B table 38. */ + ret = ad3552r_write_reg(dac, AD3552R_REG_ADDR_ERR_STATUS, + AD3552R_MASK_RESET_STATUS); + if (ret) + return ret; + return ad3552r_update_reg_field(dac, addr_mask_map[AD3552R_ADDR_ASCENSION][0], addr_mask_map[AD3552R_ADDR_ASCENSION][1], diff --git a/drivers/iio/filter/admv8818.c b/drivers/iio/filter/admv8818.c index 848baa6e3bbf5..d85b7d3de8660 100644 --- a/drivers/iio/filter/admv8818.c +++ b/drivers/iio/filter/admv8818.c @@ -574,21 +574,15 @@ static int admv8818_init(struct admv8818_state *st) struct spi_device *spi = st->spi; unsigned int chip_id; - ret = regmap_update_bits(st->regmap, ADMV8818_REG_SPI_CONFIG_A, - ADMV8818_SOFTRESET_N_MSK | - ADMV8818_SOFTRESET_MSK, - FIELD_PREP(ADMV8818_SOFTRESET_N_MSK, 1) | - FIELD_PREP(ADMV8818_SOFTRESET_MSK, 1)); + ret = regmap_write(st->regmap, ADMV8818_REG_SPI_CONFIG_A, + ADMV8818_SOFTRESET_N_MSK | ADMV8818_SOFTRESET_MSK); if (ret) { dev_err(&spi->dev, "ADMV8818 Soft Reset failed.\n"); return ret; } - ret = regmap_update_bits(st->regmap, ADMV8818_REG_SPI_CONFIG_A, - ADMV8818_SDOACTIVE_N_MSK | - ADMV8818_SDOACTIVE_MSK, - FIELD_PREP(ADMV8818_SDOACTIVE_N_MSK, 1) | - FIELD_PREP(ADMV8818_SDOACTIVE_MSK, 1)); + ret = regmap_write(st->regmap, ADMV8818_REG_SPI_CONFIG_A, + ADMV8818_SDOACTIVE_N_MSK | ADMV8818_SDOACTIVE_MSK); if (ret) { dev_err(&spi->dev, "ADMV8818 SDO Enable failed.\n"); return ret; diff --git a/drivers/iio/industrialio-backend.c b/drivers/iio/industrialio-backend.c index fb34a8e4d04e7..42e0ee683ef6b 100644 --- a/drivers/iio/industrialio-backend.c +++ b/drivers/iio/industrialio-backend.c @@ -155,10 +155,12 @@ static ssize_t iio_backend_debugfs_write_reg(struct file *file, ssize_t rc; int ret; - rc = simple_write_to_buffer(buf, sizeof(buf), ppos, userbuf, count); + rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, userbuf, count); if (rc < 0) return rc; + buf[count] = '\0'; + ret = sscanf(buf, "%i %i", &back->cached_reg_addr, &val); switch (ret) { diff --git a/drivers/iio/light/apds9306.c b/drivers/iio/light/apds9306.c index 079e02be10052..7f9d6cac8adb7 100644 --- a/drivers/iio/light/apds9306.c +++ b/drivers/iio/light/apds9306.c @@ -108,11 +108,11 @@ static const struct part_id_gts_multiplier apds9306_gts_mul[] = { { .part_id = 0xB1, .max_scale_int = 16, - .max_scale_nano = 3264320, + .max_scale_nano = 326432000, }, { .part_id = 0xB3, .max_scale_int = 14, - .max_scale_nano = 9712000, + .max_scale_nano = 97120000, }, }; diff --git a/drivers/iio/light/veml6075.c b/drivers/iio/light/veml6075.c index 05d4c0e9015d6..859891e8f1152 100644 --- a/drivers/iio/light/veml6075.c +++ b/drivers/iio/light/veml6075.c @@ -195,13 +195,17 @@ static int veml6075_read_uv_direct(struct veml6075_data *data, int chan, static int veml6075_read_int_time_index(struct veml6075_data *data) { - int ret, conf; + int ret, conf, int_index; ret = regmap_read(data->regmap, VEML6075_CMD_CONF, &conf); if (ret < 0) return ret; - return FIELD_GET(VEML6075_CONF_IT, conf); + int_index = FIELD_GET(VEML6075_CONF_IT, conf); + if (int_index >= ARRAY_SIZE(veml6075_it_ms)) + return -EINVAL; + + return int_index; } static int veml6075_read_int_time_ms(struct veml6075_data *data, int *val) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 91db10515d747..176d0b3e44887 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -72,6 +72,8 @@ static const char * const cma_events[] = { static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid, enum ib_gid_type gid_type); +static void cma_netevent_work_handler(struct work_struct *_work); + const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event) { size_t index = event; @@ -1033,6 +1035,7 @@ __rdma_create_id(struct net *net, rdma_cm_event_handler event_handler, get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num); id_priv->id.route.addr.dev_addr.net = get_net(net); id_priv->seq_num &= 0x00ffffff; + INIT_WORK(&id_priv->id.net_work, cma_netevent_work_handler); rdma_restrack_new(&id_priv->res, RDMA_RESTRACK_CM_ID); if (parent) @@ -5227,7 +5230,6 @@ static int cma_netevent_callback(struct notifier_block *self, if (!memcmp(current_id->id.route.addr.dev_addr.dst_dev_addr, neigh->ha, ETH_ALEN)) continue; - INIT_WORK(¤t_id->id.net_work, cma_netevent_work_handler); cma_id_get(current_id); queue_work(cma_wq, ¤t_id->id.net_work); } diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index e029401b56805..46102f179955b 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -544,6 +544,8 @@ static struct class ib_class = { static void rdma_init_coredev(struct ib_core_device *coredev, struct ib_device *dev, struct net *net) { + bool is_full_dev = &dev->coredev == coredev; + /* This BUILD_BUG_ON is intended to catch layout change * of union of ib_core_device and device. * dev must be the first element as ib_core and providers @@ -555,6 +557,13 @@ static void rdma_init_coredev(struct ib_core_device *coredev, coredev->dev.class = &ib_class; coredev->dev.groups = dev->groups; + + /* + * Don't expose hw counters outside of the init namespace. + */ + if (!is_full_dev && dev->hw_stats_attr_index) + coredev->dev.groups[dev->hw_stats_attr_index] = NULL; + device_initialize(&coredev->dev); coredev->owner = dev; INIT_LIST_HEAD(&coredev->port_list); @@ -1357,9 +1366,11 @@ static void ib_device_notify_register(struct ib_device *device) u32 port; int ret; + down_read(&devices_rwsem); + ret = rdma_nl_notify_event(device, 0, RDMA_REGISTER_EVENT); if (ret) - return; + goto out; rdma_for_each_port(device, port) { netdev = ib_device_get_netdev(device, port); @@ -1370,8 +1381,11 @@ static void ib_device_notify_register(struct ib_device *device) RDMA_NETDEV_ATTACH_EVENT); dev_put(netdev); if (ret) - return; + goto out; } + +out: + up_read(&devices_rwsem); } /** diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 1fd54d5c4dd8b..73f3a0b9a54b5 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -2671,11 +2671,11 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, struct ib_mad_private *mad) { unsigned long flags; - int post, ret; struct ib_mad_private *mad_priv; struct ib_sge sg_list; struct ib_recv_wr recv_wr; struct ib_mad_queue *recv_queue = &qp_info->recv_queue; + int ret = 0; /* Initialize common scatter list fields */ sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey; @@ -2685,7 +2685,7 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, recv_wr.sg_list = &sg_list; recv_wr.num_sge = 1; - do { + while (true) { /* Allocate and map receive buffer */ if (mad) { mad_priv = mad; @@ -2693,10 +2693,8 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, } else { mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv), GFP_ATOMIC); - if (!mad_priv) { - ret = -ENOMEM; - break; - } + if (!mad_priv) + return -ENOMEM; } sg_list.length = mad_priv_dma_size(mad_priv); sg_list.addr = ib_dma_map_single(qp_info->port_priv->device, @@ -2705,37 +2703,41 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, DMA_FROM_DEVICE); if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, sg_list.addr))) { - kfree(mad_priv); ret = -ENOMEM; - break; + goto free_mad_priv; } mad_priv->header.mapping = sg_list.addr; mad_priv->header.mad_list.mad_queue = recv_queue; mad_priv->header.mad_list.cqe.done = ib_mad_recv_done; recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe; - - /* Post receive WR */ spin_lock_irqsave(&recv_queue->lock, flags); - post = (++recv_queue->count < recv_queue->max_active); - list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list); + if (recv_queue->count >= recv_queue->max_active) { + /* Fully populated the receive queue */ + spin_unlock_irqrestore(&recv_queue->lock, flags); + break; + } + recv_queue->count++; + list_add_tail(&mad_priv->header.mad_list.list, + &recv_queue->list); spin_unlock_irqrestore(&recv_queue->lock, flags); + ret = ib_post_recv(qp_info->qp, &recv_wr, NULL); if (ret) { spin_lock_irqsave(&recv_queue->lock, flags); list_del(&mad_priv->header.mad_list.list); recv_queue->count--; spin_unlock_irqrestore(&recv_queue->lock, flags); - ib_dma_unmap_single(qp_info->port_priv->device, - mad_priv->header.mapping, - mad_priv_dma_size(mad_priv), - DMA_FROM_DEVICE); - kfree(mad_priv); dev_err(&qp_info->port_priv->device->dev, "ib_post_recv failed: %d\n", ret); break; } - } while (post); + } + ib_dma_unmap_single(qp_info->port_priv->device, + mad_priv->header.mapping, + mad_priv_dma_size(mad_priv), DMA_FROM_DEVICE); +free_mad_priv: + kfree(mad_priv); return ret; } diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index 9f97bef021497..210092b9bf17d 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -988,6 +988,7 @@ int ib_setup_device_attrs(struct ib_device *ibdev) for (i = 0; i != ARRAY_SIZE(ibdev->groups); i++) if (!ibdev->groups[i]) { ibdev->groups[i] = &data->group; + ibdev->hw_stats_attr_index = i; return 0; } WARN(true, "struct ib_device->groups is too small"); diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index e9fa22d31c233..c48ef60830205 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c @@ -76,12 +76,14 @@ static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp, npfns = (end - start) >> PAGE_SHIFT; umem_odp->pfn_list = kvcalloc( - npfns, sizeof(*umem_odp->pfn_list), GFP_KERNEL); + npfns, sizeof(*umem_odp->pfn_list), + GFP_KERNEL | __GFP_NOWARN); if (!umem_odp->pfn_list) return -ENOMEM; umem_odp->dma_list = kvcalloc( - ndmas, sizeof(*umem_odp->dma_list), GFP_KERNEL); + ndmas, sizeof(*umem_odp->dma_list), + GFP_KERNEL | __GFP_NOWARN); if (!umem_odp->dma_list) { ret = -ENOMEM; goto out_pfn_list; diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h index e94518b12f86e..a316afc0139c8 100644 --- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h +++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h @@ -154,6 +154,14 @@ struct bnxt_re_pacing { #define BNXT_RE_GRC_FIFO_REG_BASE 0x2000 +#define BNXT_RE_MIN_MSIX 2 +#define BNXT_RE_MAX_MSIX BNXT_MAX_ROCE_MSIX +struct bnxt_re_nq_record { + struct bnxt_msix_entry msix_entries[BNXT_RE_MAX_MSIX]; + struct bnxt_qplib_nq nq[BNXT_RE_MAX_MSIX]; + int num_msix; +}; + #define MAX_CQ_HASH_BITS (16) #define MAX_SRQ_HASH_BITS (16) struct bnxt_re_dev { @@ -174,24 +182,20 @@ struct bnxt_re_dev { unsigned int version, major, minor; struct bnxt_qplib_chip_ctx *chip_ctx; struct bnxt_en_dev *en_dev; - int num_msix; int id; struct delayed_work worker; u8 cur_prio_map; - /* FP Notification Queue (CQ & SRQ) */ - struct tasklet_struct nq_task; - /* RCFW Channel */ struct bnxt_qplib_rcfw rcfw; - /* NQ */ - struct bnxt_qplib_nq nq[BNXT_MAX_ROCE_MSIX]; + /* NQ record */ + struct bnxt_re_nq_record *nqr; /* Device Resources */ - struct bnxt_qplib_dev_attr dev_attr; + struct bnxt_qplib_dev_attr *dev_attr; struct bnxt_qplib_ctx qplib_ctx; struct bnxt_qplib_res qplib_res; struct bnxt_qplib_dpi dpi_privileged; diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c index 1e63f80917483..f51adb0a97e66 100644 --- a/drivers/infiniband/hw/bnxt_re/hw_counters.c +++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c @@ -357,8 +357,8 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev, goto done; } bnxt_re_copy_err_stats(rdev, stats, err_s); - if (_is_ext_stats_supported(rdev->dev_attr.dev_cap_flags) && - !rdev->is_virtfn) { + if (bnxt_ext_stats_supported(rdev->chip_ctx, rdev->dev_attr->dev_cap_flags, + rdev->is_virtfn)) { rc = bnxt_re_get_ext_stat(rdev, stats); if (rc) { clear_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index a7067c3c06797..0b21d8b5d9629 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -118,7 +118,7 @@ static enum ib_access_flags __to_ib_access_flags(int qflags) static void bnxt_re_check_and_set_relaxed_ordering(struct bnxt_re_dev *rdev, struct bnxt_qplib_mrw *qplib_mr) { - if (_is_relaxed_ordering_supported(rdev->dev_attr.dev_cap_flags2) && + if (_is_relaxed_ordering_supported(rdev->dev_attr->dev_cap_flags2) && pcie_relaxed_ordering_enabled(rdev->en_dev->pdev)) qplib_mr->flags |= CMDQ_REGISTER_MR_FLAGS_ENABLE_RO; } @@ -143,7 +143,7 @@ int bnxt_re_query_device(struct ib_device *ibdev, struct ib_udata *udata) { struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); - struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; + struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr; memset(ib_attr, 0, sizeof(*ib_attr)); memcpy(&ib_attr->fw_ver, dev_attr->fw_ver, @@ -216,7 +216,7 @@ int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num, struct ib_port_attr *port_attr) { struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); - struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; + struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr; int rc; memset(port_attr, 0, sizeof(*port_attr)); @@ -274,8 +274,8 @@ void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str) struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d", - rdev->dev_attr.fw_ver[0], rdev->dev_attr.fw_ver[1], - rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]); + rdev->dev_attr->fw_ver[0], rdev->dev_attr->fw_ver[1], + rdev->dev_attr->fw_ver[2], rdev->dev_attr->fw_ver[3]); } int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num, @@ -526,7 +526,7 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd) mr->qplib_mr.pd = &pd->qplib_pd; mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR; mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags); - if (!_is_alloc_mr_unified(rdev->dev_attr.dev_cap_flags)) { + if (!_is_alloc_mr_unified(rdev->dev_attr->dev_cap_flags)) { rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); if (rc) { ibdev_err(&rdev->ibdev, "Failed to alloc fence-HW-MR\n"); @@ -1001,7 +1001,7 @@ static int bnxt_re_setup_swqe_size(struct bnxt_re_qp *qp, rdev = qp->rdev; qplqp = &qp->qplib_qp; sq = &qplqp->sq; - dev_attr = &rdev->dev_attr; + dev_attr = rdev->dev_attr; align = sizeof(struct sq_send_hdr); ilsize = ALIGN(init_attr->cap.max_inline_data, align); @@ -1221,7 +1221,7 @@ static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp, rdev = qp->rdev; qplqp = &qp->qplib_qp; rq = &qplqp->rq; - dev_attr = &rdev->dev_attr; + dev_attr = rdev->dev_attr; if (init_attr->srq) { struct bnxt_re_srq *srq; @@ -1258,7 +1258,7 @@ static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp) rdev = qp->rdev; qplqp = &qp->qplib_qp; - dev_attr = &rdev->dev_attr; + dev_attr = rdev->dev_attr; if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) { qplqp->rq.max_sge = dev_attr->max_qp_sges; @@ -1284,7 +1284,7 @@ static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp, rdev = qp->rdev; qplqp = &qp->qplib_qp; sq = &qplqp->sq; - dev_attr = &rdev->dev_attr; + dev_attr = rdev->dev_attr; sq->max_sge = init_attr->cap.max_send_sge; entries = init_attr->cap.max_send_wr; @@ -1337,7 +1337,7 @@ static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp, rdev = qp->rdev; qplqp = &qp->qplib_qp; - dev_attr = &rdev->dev_attr; + dev_attr = rdev->dev_attr; if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) { entries = bnxt_re_init_depth(init_attr->cap.max_send_wr + 1, uctx); @@ -1386,7 +1386,7 @@ static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd, rdev = qp->rdev; qplqp = &qp->qplib_qp; - dev_attr = &rdev->dev_attr; + dev_attr = rdev->dev_attr; /* Setup misc params */ ether_addr_copy(qplqp->smac, rdev->netdev->dev_addr); @@ -1556,7 +1556,7 @@ int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr, ib_pd = ib_qp->pd; pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); rdev = pd->rdev; - dev_attr = &rdev->dev_attr; + dev_attr = rdev->dev_attr; qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx); @@ -1783,7 +1783,7 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq, ib_pd = ib_srq->pd; pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); rdev = pd->rdev; - dev_attr = &rdev->dev_attr; + dev_attr = rdev->dev_attr; srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq); if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) { @@ -1814,8 +1814,10 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq, srq->qplib_srq.wqe_size = bnxt_re_get_rwqe_size(dev_attr->max_srq_sges); srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit; srq->srq_limit = srq_init_attr->attr.srq_limit; - srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id; - nq = &rdev->nq[0]; + srq->qplib_srq.eventq_hw_ring_id = rdev->nqr->nq[0].ring_id; + srq->qplib_srq.sg_info.pgsize = PAGE_SIZE; + srq->qplib_srq.sg_info.pgshft = PAGE_SHIFT; + nq = &rdev->nqr->nq[0]; if (udata) { rc = bnxt_re_init_user_srq(rdev, pd, srq, udata); @@ -1987,7 +1989,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, { struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); struct bnxt_re_dev *rdev = qp->rdev; - struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; + struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr; enum ib_qp_state curr_qp_state, new_qp_state; int rc, entries; unsigned int flags; @@ -3011,7 +3013,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct ib_udata *udata = &attrs->driver_udata; struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx); - struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; + struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr; struct bnxt_qplib_chip_ctx *cctx; struct bnxt_qplib_nq *nq = NULL; unsigned int nq_alloc_cnt; @@ -3070,7 +3072,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, * used for getting the NQ index. */ nq_alloc_cnt = atomic_inc_return(&rdev->nq_alloc_cnt); - nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)]; + nq = &rdev->nqr->nq[nq_alloc_cnt % (rdev->nqr->num_msix - 1)]; cq->qplib_cq.max_wqe = entries; cq->qplib_cq.cnq_hw_ring_id = nq->ring_id; cq->qplib_cq.nq = nq; @@ -3154,7 +3156,7 @@ int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) cq = container_of(ibcq, struct bnxt_re_cq, ib_cq); rdev = cq->rdev; - dev_attr = &rdev->dev_attr; + dev_attr = rdev->dev_attr; if (!ibcq->uobject) { ibdev_err(&rdev->ibdev, "Kernel CQ Resize not supported"); return -EOPNOTSUPP; @@ -4127,7 +4129,7 @@ static struct ib_mr *__bnxt_re_user_reg_mr(struct ib_pd *ib_pd, u64 length, u64 mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags); mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR; - if (!_is_alloc_mr_unified(rdev->dev_attr.dev_cap_flags)) { + if (!_is_alloc_mr_unified(rdev->dev_attr->dev_cap_flags)) { rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); if (rc) { ibdev_err(&rdev->ibdev, "Failed to allocate MR rc = %d", rc); @@ -4219,7 +4221,7 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata) struct bnxt_re_ucontext *uctx = container_of(ctx, struct bnxt_re_ucontext, ib_uctx); struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); - struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; + struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr; struct bnxt_re_user_mmap_entry *entry; struct bnxt_re_uctx_resp resp = {}; struct bnxt_re_uctx_req ureq = {}; diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 8abd1b723f8ff..9bd837a5b8a1a 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -152,6 +152,10 @@ static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev) if (!rdev->chip_ctx) return; + + kfree(rdev->dev_attr); + rdev->dev_attr = NULL; + chip_ctx = rdev->chip_ctx; rdev->chip_ctx = NULL; rdev->rcfw.res = NULL; @@ -165,7 +169,7 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev) { struct bnxt_qplib_chip_ctx *chip_ctx; struct bnxt_en_dev *en_dev; - int rc; + int rc = -ENOMEM; en_dev = rdev->en_dev; @@ -181,23 +185,30 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev) rdev->qplib_res.cctx = rdev->chip_ctx; rdev->rcfw.res = &rdev->qplib_res; - rdev->qplib_res.dattr = &rdev->dev_attr; + rdev->dev_attr = kzalloc(sizeof(*rdev->dev_attr), GFP_KERNEL); + if (!rdev->dev_attr) + goto free_chip_ctx; + rdev->qplib_res.dattr = rdev->dev_attr; rdev->qplib_res.is_vf = BNXT_EN_VF(en_dev); bnxt_re_set_drv_mode(rdev); bnxt_re_set_db_offset(rdev); rc = bnxt_qplib_map_db_bar(&rdev->qplib_res); - if (rc) { - kfree(rdev->chip_ctx); - rdev->chip_ctx = NULL; - return rc; - } + if (rc) + goto free_dev_attr; if (bnxt_qplib_determine_atomics(en_dev->pdev)) ibdev_info(&rdev->ibdev, "platform doesn't support global atomics."); return 0; +free_dev_attr: + kfree(rdev->dev_attr); + rdev->dev_attr = NULL; +free_chip_ctx: + kfree(rdev->chip_ctx); + rdev->chip_ctx = NULL; + return rc; } /* SR-IOV helper functions */ @@ -219,7 +230,7 @@ static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev) struct bnxt_qplib_ctx *ctx; int i; - attr = &rdev->dev_attr; + attr = rdev->dev_attr; ctx = &rdev->qplib_ctx; ctx->qpc_count = min_t(u32, BNXT_RE_MAX_QPC_COUNT, @@ -233,7 +244,7 @@ static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev) if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) rdev->qplib_ctx.tqm_ctx.qcount[i] = - rdev->dev_attr.tqm_alloc_reqs[i]; + rdev->dev_attr->tqm_alloc_reqs[i]; } static void bnxt_re_limit_vf_res(struct bnxt_qplib_ctx *qplib_ctx, u32 num_vf) @@ -314,10 +325,12 @@ static void bnxt_re_stop_irq(void *handle) int indx; rdev = en_info->rdev; + if (!rdev) + return; rcfw = &rdev->rcfw; - for (indx = BNXT_RE_NQ_IDX; indx < rdev->num_msix; indx++) { - nq = &rdev->nq[indx - 1]; + for (indx = BNXT_RE_NQ_IDX; indx < rdev->nqr->num_msix; indx++) { + nq = &rdev->nqr->nq[indx - 1]; bnxt_qplib_nq_stop_irq(nq, false); } @@ -334,7 +347,9 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent) int indx, rc; rdev = en_info->rdev; - msix_ent = rdev->en_dev->msix_entries; + if (!rdev) + return; + msix_ent = rdev->nqr->msix_entries; rcfw = &rdev->rcfw; if (!ent) { /* Not setting the f/w timeout bit in rcfw. @@ -349,8 +364,8 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent) /* Vectors may change after restart, so update with new vectors * in device sctructure. */ - for (indx = 0; indx < rdev->num_msix; indx++) - rdev->en_dev->msix_entries[indx].vector = ent[indx].vector; + for (indx = 0; indx < rdev->nqr->num_msix; indx++) + rdev->nqr->msix_entries[indx].vector = ent[indx].vector; rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector, false); @@ -358,8 +373,8 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent) ibdev_warn(&rdev->ibdev, "Failed to reinit CREQ\n"); return; } - for (indx = BNXT_RE_NQ_IDX ; indx < rdev->num_msix; indx++) { - nq = &rdev->nq[indx - 1]; + for (indx = BNXT_RE_NQ_IDX ; indx < rdev->nqr->num_msix; indx++) { + nq = &rdev->nqr->nq[indx - 1]; rc = bnxt_qplib_nq_start_irq(nq, indx - 1, msix_ent[indx].vector, false); if (rc) { @@ -943,7 +958,7 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev) addrconf_addr_eui48((u8 *)&ibdev->node_guid, rdev->netdev->dev_addr); - ibdev->num_comp_vectors = rdev->num_msix - 1; + ibdev->num_comp_vectors = rdev->nqr->num_msix - 1; ibdev->dev.parent = &rdev->en_dev->pdev->dev; ibdev->local_dma_lkey = BNXT_QPLIB_RSVD_LKEY; @@ -1276,8 +1291,8 @@ static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev) { int i; - for (i = 1; i < rdev->num_msix; i++) - bnxt_qplib_disable_nq(&rdev->nq[i - 1]); + for (i = 1; i < rdev->nqr->num_msix; i++) + bnxt_qplib_disable_nq(&rdev->nqr->nq[i - 1]); if (rdev->qplib_res.rcfw) bnxt_qplib_cleanup_res(&rdev->qplib_res); @@ -1291,10 +1306,10 @@ static int bnxt_re_init_res(struct bnxt_re_dev *rdev) bnxt_qplib_init_res(&rdev->qplib_res); - for (i = 1; i < rdev->num_msix ; i++) { - db_offt = rdev->en_dev->msix_entries[i].db_offset; - rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nq[i - 1], - i - 1, rdev->en_dev->msix_entries[i].vector, + for (i = 1; i < rdev->nqr->num_msix ; i++) { + db_offt = rdev->nqr->msix_entries[i].db_offset; + rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nqr->nq[i - 1], + i - 1, rdev->nqr->msix_entries[i].vector, db_offt, &bnxt_re_cqn_handler, &bnxt_re_srqn_handler); if (rc) { @@ -1307,20 +1322,22 @@ static int bnxt_re_init_res(struct bnxt_re_dev *rdev) return 0; fail: for (i = num_vec_enabled; i >= 0; i--) - bnxt_qplib_disable_nq(&rdev->nq[i]); + bnxt_qplib_disable_nq(&rdev->nqr->nq[i]); return rc; } static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev) { + struct bnxt_qplib_nq *nq; u8 type; int i; - for (i = 0; i < rdev->num_msix - 1; i++) { + for (i = 0; i < rdev->nqr->num_msix - 1; i++) { type = bnxt_qplib_get_ring_type(rdev->chip_ctx); - bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, type); - bnxt_qplib_free_nq(&rdev->nq[i]); - rdev->nq[i].res = NULL; + nq = &rdev->nqr->nq[i]; + bnxt_re_net_ring_free(rdev, nq->ring_id, type); + bnxt_qplib_free_nq(nq); + nq->res = NULL; } } @@ -1347,12 +1364,11 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev) /* Configure and allocate resources for qplib */ rdev->qplib_res.rcfw = &rdev->rcfw; - rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr); + rc = bnxt_qplib_get_dev_attr(&rdev->rcfw); if (rc) goto fail; - rc = bnxt_qplib_alloc_res(&rdev->qplib_res, rdev->en_dev->pdev, - rdev->netdev, &rdev->dev_attr); + rc = bnxt_qplib_alloc_res(&rdev->qplib_res, rdev->netdev); if (rc) goto fail; @@ -1362,12 +1378,12 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev) if (rc) goto dealloc_res; - for (i = 0; i < rdev->num_msix - 1; i++) { + for (i = 0; i < rdev->nqr->num_msix - 1; i++) { struct bnxt_qplib_nq *nq; - nq = &rdev->nq[i]; + nq = &rdev->nqr->nq[i]; nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT; - rc = bnxt_qplib_alloc_nq(&rdev->qplib_res, &rdev->nq[i]); + rc = bnxt_qplib_alloc_nq(&rdev->qplib_res, nq); if (rc) { ibdev_err(&rdev->ibdev, "Alloc Failed NQ%d rc:%#x", i, rc); @@ -1375,17 +1391,17 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev) } type = bnxt_qplib_get_ring_type(rdev->chip_ctx); rattr.dma_arr = nq->hwq.pbl[PBL_LVL_0].pg_map_arr; - rattr.pages = nq->hwq.pbl[rdev->nq[i].hwq.level].pg_count; + rattr.pages = nq->hwq.pbl[rdev->nqr->nq[i].hwq.level].pg_count; rattr.type = type; rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX; rattr.depth = BNXT_QPLIB_NQE_MAX_CNT - 1; - rattr.lrid = rdev->en_dev->msix_entries[i + 1].ring_idx; + rattr.lrid = rdev->nqr->msix_entries[i + 1].ring_idx; rc = bnxt_re_net_ring_alloc(rdev, &rattr, &nq->ring_id); if (rc) { ibdev_err(&rdev->ibdev, "Failed to allocate NQ fw id with rc = 0x%x", rc); - bnxt_qplib_free_nq(&rdev->nq[i]); + bnxt_qplib_free_nq(nq); goto free_nq; } num_vec_created++; @@ -1394,8 +1410,8 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev) free_nq: for (i = num_vec_created - 1; i >= 0; i--) { type = bnxt_qplib_get_ring_type(rdev->chip_ctx); - bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, type); - bnxt_qplib_free_nq(&rdev->nq[i]); + bnxt_re_net_ring_free(rdev, rdev->nqr->nq[i].ring_id, type); + bnxt_qplib_free_nq(&rdev->nqr->nq[i]); } bnxt_qplib_dealloc_dpi(&rdev->qplib_res, &rdev->dpi_privileged); @@ -1584,6 +1600,21 @@ static int bnxt_re_ib_init(struct bnxt_re_dev *rdev) return rc; } +static int bnxt_re_alloc_nqr_mem(struct bnxt_re_dev *rdev) +{ + rdev->nqr = kzalloc(sizeof(*rdev->nqr), GFP_KERNEL); + if (!rdev->nqr) + return -ENOMEM; + + return 0; +} + +static void bnxt_re_free_nqr_mem(struct bnxt_re_dev *rdev) +{ + kfree(rdev->nqr); + rdev->nqr = NULL; +} + static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type) { u8 type; @@ -1611,11 +1642,12 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type) bnxt_qplib_free_rcfw_channel(&rdev->rcfw); } - rdev->num_msix = 0; + rdev->nqr->num_msix = 0; if (rdev->pacing.dbr_pacing) bnxt_re_deinitialize_dbr_pacing(rdev); + bnxt_re_free_nqr_mem(rdev); bnxt_re_destroy_chip_ctx(rdev); if (op_type == BNXT_RE_COMPLETE_REMOVE) { if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) @@ -1653,6 +1685,17 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type) } set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags); + if (rdev->en_dev->ulp_tbl->msix_requested < BNXT_RE_MIN_MSIX) { + ibdev_err(&rdev->ibdev, + "RoCE requires minimum 2 MSI-X vectors, but only %d reserved\n", + rdev->en_dev->ulp_tbl->msix_requested); + bnxt_unregister_dev(rdev->en_dev); + clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags); + return -EINVAL; + } + ibdev_dbg(&rdev->ibdev, "Got %d MSI-X vectors\n", + rdev->en_dev->ulp_tbl->msix_requested); + rc = bnxt_re_setup_chip_ctx(rdev); if (rc) { bnxt_unregister_dev(rdev->en_dev); @@ -1661,19 +1704,20 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type) return -EINVAL; } + rc = bnxt_re_alloc_nqr_mem(rdev); + if (rc) { + bnxt_re_destroy_chip_ctx(rdev); + bnxt_unregister_dev(rdev->en_dev); + clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags); + return rc; + } + rdev->nqr->num_msix = rdev->en_dev->ulp_tbl->msix_requested; + memcpy(rdev->nqr->msix_entries, rdev->en_dev->msix_entries, + sizeof(struct bnxt_msix_entry) * rdev->nqr->num_msix); + /* Check whether VF or PF */ bnxt_re_get_sriov_func_type(rdev); - if (!rdev->en_dev->ulp_tbl->msix_requested) { - ibdev_err(&rdev->ibdev, - "Failed to get MSI-X vectors: %#x\n", rc); - rc = -EINVAL; - goto fail; - } - ibdev_dbg(&rdev->ibdev, "Got %d MSI-X vectors\n", - rdev->en_dev->ulp_tbl->msix_requested); - rdev->num_msix = rdev->en_dev->ulp_tbl->msix_requested; - bnxt_re_query_hwrm_intf_version(rdev); /* Establish RCFW Communication Channel to initialize the context @@ -1695,14 +1739,14 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type) rattr.type = type; rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX; rattr.depth = BNXT_QPLIB_CREQE_MAX_CNT - 1; - rattr.lrid = rdev->en_dev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx; + rattr.lrid = rdev->nqr->msix_entries[BNXT_RE_AEQ_IDX].ring_idx; rc = bnxt_re_net_ring_alloc(rdev, &rattr, &creq->ring_id); if (rc) { ibdev_err(&rdev->ibdev, "Failed to allocate CREQ: %#x\n", rc); goto free_rcfw; } - db_offt = rdev->en_dev->msix_entries[BNXT_RE_AEQ_IDX].db_offset; - vid = rdev->en_dev->msix_entries[BNXT_RE_AEQ_IDX].vector; + db_offt = rdev->nqr->msix_entries[BNXT_RE_AEQ_IDX].db_offset; + vid = rdev->nqr->msix_entries[BNXT_RE_AEQ_IDX].vector; rc = bnxt_qplib_enable_rcfw_channel(&rdev->rcfw, vid, db_offt, &bnxt_re_aeq_handler); @@ -1722,7 +1766,7 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type) rdev->pacing.dbr_pacing = false; } } - rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr); + rc = bnxt_qplib_get_dev_attr(&rdev->rcfw); if (rc) goto disable_rcfw; @@ -2047,6 +2091,7 @@ static int bnxt_re_suspend(struct auxiliary_device *adev, pm_message_t state) ibdev_info(&rdev->ibdev, "%s: L2 driver notified to stop en_state 0x%lx", __func__, en_dev->en_state); bnxt_re_remove_device(rdev, BNXT_RE_PRE_RECOVERY_REMOVE, adev); + bnxt_re_update_en_info_rdev(NULL, en_info, adev); mutex_unlock(&bnxt_re_mutex); return 0; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 613b5fc70e13e..7436ce5515797 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -1216,8 +1216,6 @@ static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp) qp->path_mtu = CMDQ_MODIFY_QP_PATH_MTU_MTU_2048; } - qp->modify_flags &= - ~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID; /* Bono FW require the max_dest_rd_atomic to be >= 1 */ if (qp->max_dest_rd_atomic < 1) qp->max_dest_rd_atomic = 1; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h index 07779aeb75759..a4deb45ec849f 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h @@ -283,9 +283,10 @@ int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw); int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, struct bnxt_qplib_ctx *ctx, int is_virtfn); void bnxt_qplib_mark_qp_error(void *qp_handle); + static inline u32 map_qp_id_to_tbl_indx(u32 qid, struct bnxt_qplib_rcfw *rcfw) { /* Last index of the qp_tbl is for QP1 ie. qp_tbl_size - 1*/ - return (qid == 1) ? rcfw->qp_tbl_size - 1 : qid % rcfw->qp_tbl_size - 2; + return (qid == 1) ? rcfw->qp_tbl_size - 1 : (qid % (rcfw->qp_tbl_size - 2)); } #endif /* __BNXT_QPLIB_RCFW_H__ */ diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c index 96ceec1e8199a..02922a0987ad7 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c @@ -876,14 +876,13 @@ void bnxt_qplib_free_res(struct bnxt_qplib_res *res) bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl); } -int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev, - struct net_device *netdev, - struct bnxt_qplib_dev_attr *dev_attr) +int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct net_device *netdev) { + struct bnxt_qplib_dev_attr *dev_attr; int rc; - res->pdev = pdev; res->netdev = netdev; + dev_attr = res->dattr; rc = bnxt_qplib_alloc_sgid_tbl(res, &res->sgid_tbl, dev_attr->max_sgid); if (rc) diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h index c2f710364e0ff..b40cff8252bc4 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h @@ -421,9 +421,7 @@ int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res, void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res); int bnxt_qplib_init_res(struct bnxt_qplib_res *res); void bnxt_qplib_free_res(struct bnxt_qplib_res *res); -int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev, - struct net_device *netdev, - struct bnxt_qplib_dev_attr *dev_attr); +int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct net_device *netdev); void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res, struct bnxt_qplib_ctx *ctx); int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res, @@ -546,6 +544,14 @@ static inline bool _is_ext_stats_supported(u16 dev_cap_flags) CREQ_QUERY_FUNC_RESP_SB_EXT_STATS; } +static inline int bnxt_ext_stats_supported(struct bnxt_qplib_chip_ctx *ctx, + u16 flags, bool virtfn) +{ + /* ext stats supported if cap flag is set AND is a PF OR a Thor2 VF */ + return (_is_ext_stats_supported(flags) && + ((virtfn && bnxt_qplib_is_chip_gen_p7(ctx)) || (!virtfn))); +} + static inline bool _is_hw_retx_supported(u16 dev_cap_flags) { return dev_cap_flags & diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index 3cca7b1395f6a..807439b1acb51 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -88,9 +88,9 @@ static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw, fw_ver[3] = resp.fw_rsvd; } -int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, - struct bnxt_qplib_dev_attr *attr) +int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw) { + struct bnxt_qplib_dev_attr *attr = rcfw->res->dattr; struct creq_query_func_resp resp = {}; struct bnxt_qplib_cmdqmsg msg = {}; struct creq_query_func_resp_sb *sb; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h index ecf3f45fea74f..de959b3c28e01 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h @@ -325,8 +325,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, struct bnxt_qplib_gid *gid, u16 gid_idx, const u8 *smac); -int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, - struct bnxt_qplib_dev_attr *attr); +int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw); int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res, struct bnxt_qplib_rcfw *rcfw, struct bnxt_qplib_ctx *ctx); diff --git a/drivers/infiniband/hw/erdma/erdma_cm.c b/drivers/infiniband/hw/erdma/erdma_cm.c index 771059a8eb7d7..e349e8d2fb50a 100644 --- a/drivers/infiniband/hw/erdma/erdma_cm.c +++ b/drivers/infiniband/hw/erdma/erdma_cm.c @@ -705,7 +705,6 @@ static void erdma_accept_newconn(struct erdma_cep *cep) erdma_cancel_mpatimer(new_cep); erdma_cep_put(new_cep); - new_cep->sock = NULL; } if (new_s) { diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c index 950c133d4220e..6ee911f6885b5 100644 --- a/drivers/infiniband/hw/hns/hns_roce_alloc.c +++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c @@ -175,8 +175,10 @@ void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev) if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC) ida_destroy(&hr_dev->xrcd_ida.ida); - if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) { ida_destroy(&hr_dev->srq_table.srq_ida.ida); + xa_destroy(&hr_dev->srq_table.xa); + } hns_roce_cleanup_qp_table(hr_dev); hns_roce_cleanup_cq_table(hr_dev); ida_destroy(&hr_dev->mr_table.mtpt_ida.ida); diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c index 4106423a1b399..3a5c93c9fb3e6 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cq.c +++ b/drivers/infiniband/hw/hns/hns_roce_cq.c @@ -537,5 +537,6 @@ void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev) for (i = 0; i < HNS_ROCE_CQ_BANK_NUM; i++) ida_destroy(&hr_dev->cq_table.bank[i].ida); + xa_destroy(&hr_dev->cq_table.array); mutex_destroy(&hr_dev->cq_table.bank_mutex); } diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c index 605562122ecce..ca0798224e565 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.c +++ b/drivers/infiniband/hw/hns/hns_roce_hem.c @@ -1361,6 +1361,11 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev, return ret; } +/* This is the bottom bt pages number of a 100G MR on 4K OS, assuming + * the bt page size is not expanded by cal_best_bt_pg_sz() + */ +#define RESCHED_LOOP_CNT_THRESHOLD_ON_4K 12800 + /* construct the base address table and link them by address hop config */ int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev, struct hns_roce_hem_list *hem_list, @@ -1369,6 +1374,7 @@ int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev, { const struct hns_roce_buf_region *r; int ofs, end; + int loop; int unit; int ret; int i; @@ -1386,7 +1392,10 @@ int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev, continue; end = r->offset + r->count; - for (ofs = r->offset; ofs < end; ofs += unit) { + for (ofs = r->offset, loop = 1; ofs < end; ofs += unit, loop++) { + if (!(loop % RESCHED_LOOP_CNT_THRESHOLD_ON_4K)) + cond_resched(); + ret = hem_list_alloc_mid_bt(hr_dev, r, unit, ofs, hem_list->mid_bt[i], &hem_list->btm_bt); @@ -1443,9 +1452,14 @@ void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev, struct list_head *head = &hem_list->btm_bt; struct hns_roce_hem_item *hem, *temp_hem; void *cpu_base = NULL; + int loop = 1; int nr = 0; list_for_each_entry_safe(hem, temp_hem, head, sibling) { + if (!(loop % RESCHED_LOOP_CNT_THRESHOLD_ON_4K)) + cond_resched(); + loop++; + if (hem_list_page_is_in_range(hem, offset)) { nr = offset - hem->start; cpu_base = hem->addr + nr * BA_BYTE_LEN; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 0144e7210d05a..f5c3e560df58d 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -1286,10 +1286,8 @@ static u32 hns_roce_cmdq_tx_timeout(u16 opcode, u32 tx_timeout) return tx_timeout; } -static void hns_roce_wait_csq_done(struct hns_roce_dev *hr_dev, u16 opcode) +static void hns_roce_wait_csq_done(struct hns_roce_dev *hr_dev, u32 tx_timeout) { - struct hns_roce_v2_priv *priv = hr_dev->priv; - u32 tx_timeout = hns_roce_cmdq_tx_timeout(opcode, priv->cmq.tx_timeout); u32 timeout = 0; do { @@ -1299,8 +1297,9 @@ static void hns_roce_wait_csq_done(struct hns_roce_dev *hr_dev, u16 opcode) } while (++timeout < tx_timeout); } -static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev, - struct hns_roce_cmq_desc *desc, int num) +static int __hns_roce_cmq_send_one(struct hns_roce_dev *hr_dev, + struct hns_roce_cmq_desc *desc, + int num, u32 tx_timeout) { struct hns_roce_v2_priv *priv = hr_dev->priv; struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq; @@ -1309,8 +1308,6 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev, int ret; int i; - spin_lock_bh(&csq->lock); - tail = csq->head; for (i = 0; i < num; i++) { @@ -1324,22 +1321,17 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev, atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_CMDS_CNT]); - hns_roce_wait_csq_done(hr_dev, le16_to_cpu(desc->opcode)); + hns_roce_wait_csq_done(hr_dev, tx_timeout); if (hns_roce_cmq_csq_done(hr_dev)) { ret = 0; for (i = 0; i < num; i++) { /* check the result of hardware write back */ - desc[i] = csq->desc[tail++]; + desc_ret = le16_to_cpu(csq->desc[tail++].retval); if (tail == csq->desc_num) tail = 0; - - desc_ret = le16_to_cpu(desc[i].retval); if (likely(desc_ret == CMD_EXEC_SUCCESS)) continue; - dev_err_ratelimited(hr_dev->dev, - "Cmdq IO error, opcode = 0x%x, return = 0x%x.\n", - desc->opcode, desc_ret); ret = hns_roce_cmd_err_convert_errno(desc_ret); } } else { @@ -1354,14 +1346,54 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev, ret = -EAGAIN; } - spin_unlock_bh(&csq->lock); - if (ret) atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_CMDS_ERR_CNT]); return ret; } +static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev, + struct hns_roce_cmq_desc *desc, int num) +{ + struct hns_roce_v2_priv *priv = hr_dev->priv; + struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq; + u16 opcode = le16_to_cpu(desc->opcode); + u32 tx_timeout = hns_roce_cmdq_tx_timeout(opcode, priv->cmq.tx_timeout); + u8 try_cnt = HNS_ROCE_OPC_POST_MB_TRY_CNT; + u32 rsv_tail; + int ret; + int i; + + while (try_cnt) { + try_cnt--; + + spin_lock_bh(&csq->lock); + rsv_tail = csq->head; + ret = __hns_roce_cmq_send_one(hr_dev, desc, num, tx_timeout); + if (opcode == HNS_ROCE_OPC_POST_MB && ret == -ETIME && + try_cnt) { + spin_unlock_bh(&csq->lock); + mdelay(HNS_ROCE_OPC_POST_MB_RETRY_GAP_MSEC); + continue; + } + + for (i = 0; i < num; i++) { + desc[i] = csq->desc[rsv_tail++]; + if (rsv_tail == csq->desc_num) + rsv_tail = 0; + } + spin_unlock_bh(&csq->lock); + break; + } + + if (ret) + dev_err_ratelimited(hr_dev->dev, + "Cmdq IO error, opcode = 0x%x, return = %d.\n", + opcode, ret); + + return ret; +} + static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev, struct hns_roce_cmq_desc *desc, int num) { diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index cbdbc9edbce6e..91a5665465ffb 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -230,6 +230,8 @@ enum hns_roce_opcode_type { }; #define HNS_ROCE_OPC_POST_MB_TIMEOUT 35000 +#define HNS_ROCE_OPC_POST_MB_TRY_CNT 8 +#define HNS_ROCE_OPC_POST_MB_RETRY_GAP_MSEC 5 struct hns_roce_cmdq_tx_timeout_map { u16 opcode; u32 tx_timeout; diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index ae24c81c9812d..8d0b63d4b50a6 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -183,7 +183,7 @@ static int hns_roce_query_device(struct ib_device *ib_dev, IB_DEVICE_RC_RNR_NAK_GEN; props->max_send_sge = hr_dev->caps.max_sq_sg; props->max_recv_sge = hr_dev->caps.max_rq_sg; - props->max_sge_rd = 1; + props->max_sge_rd = hr_dev->caps.max_sq_sg; props->max_cq = hr_dev->caps.num_cqs; props->max_cqe = hr_dev->caps.max_cqes; props->max_mr = hr_dev->caps.num_mtpts; @@ -763,7 +763,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev) if (ret) return ret; } - dma_set_max_seg_size(dev, UINT_MAX); + dma_set_max_seg_size(dev, SZ_2G); ret = ib_register_device(ib_dev, "hns_%d", dev); if (ret) { dev_err(dev, "ib_register_device failed!\n"); diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 9e2e76c594063..8901c142c1b65 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -868,12 +868,14 @@ static int alloc_user_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_ib_create_qp *ucmd, struct hns_roce_ib_create_qp_resp *resp) { + bool has_sdb = user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd); struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(udata, struct hns_roce_ucontext, ibucontext); + bool has_rdb = user_qp_has_rdb(hr_dev, init_attr, udata, resp); struct ib_device *ibdev = &hr_dev->ib_dev; int ret; - if (user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd)) { + if (has_sdb) { ret = hns_roce_db_map_user(uctx, ucmd->sdb_addr, &hr_qp->sdb); if (ret) { ibdev_err(ibdev, @@ -884,7 +886,7 @@ static int alloc_user_qp_db(struct hns_roce_dev *hr_dev, hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB; } - if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) { + if (has_rdb) { ret = hns_roce_db_map_user(uctx, ucmd->db_addr, &hr_qp->rdb); if (ret) { ibdev_err(ibdev, @@ -898,7 +900,7 @@ static int alloc_user_qp_db(struct hns_roce_dev *hr_dev, return 0; err_sdb: - if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) + if (has_sdb) hns_roce_db_unmap_user(uctx, &hr_qp->sdb); err_out: return ret; @@ -1119,24 +1121,23 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, ibucontext); hr_qp->config = uctx->config; ret = set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ucmd); - if (ret) + if (ret) { ibdev_err(ibdev, "failed to set user SQ size, ret = %d.\n", ret); + return ret; + } ret = set_congest_param(hr_dev, hr_qp, ucmd); - if (ret) - return ret; } else { if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) hr_qp->config = HNS_ROCE_EXSGE_FLAGS; + default_congest_type(hr_dev, hr_qp); ret = set_kernel_sq_size(hr_dev, &init_attr->cap, hr_qp); if (ret) ibdev_err(ibdev, "failed to set kernel SQ size, ret = %d.\n", ret); - - default_congest_type(hr_dev, hr_qp); } return ret; @@ -1219,7 +1220,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, min(udata->outlen, sizeof(resp))); if (ret) { ibdev_err(ibdev, "copy qp resp failed!\n"); - goto err_store; + goto err_flow_ctrl; } } @@ -1602,6 +1603,7 @@ void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev) for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++) ida_destroy(&hr_dev->qp_table.bank[i].ida); xa_destroy(&hr_dev->qp_table.dip_xa); + xa_destroy(&hr_dev->qp_table_xa); mutex_destroy(&hr_dev->qp_table.bank_mutex); mutex_destroy(&hr_dev->qp_table.scc_mutex); } diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c index 67c2d43135a8a..f6bf289041bfe 100644 --- a/drivers/infiniband/hw/mana/main.c +++ b/drivers/infiniband/hw/mana/main.c @@ -174,7 +174,7 @@ static int mana_gd_allocate_doorbell_page(struct gdma_context *gc, req.resource_type = GDMA_RESOURCE_DOORBELL_PAGE; req.num_resources = 1; - req.alignment = 1; + req.alignment = PAGE_SIZE / MANA_PAGE_SIZE; /* Have GDMA start searching from 0 */ req.allocated_resources = 0; @@ -358,7 +358,7 @@ static int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem unsigned int tail = 0; u64 *page_addr_list; void *request_buf; - int err; + int err = 0; gc = mdev_to_gc(dev); hwc = gc->hwc.driver_data; diff --git a/drivers/infiniband/hw/mlx5/ah.c b/drivers/infiniband/hw/mlx5/ah.c index 505bc47fd575d..531a57f9ee7e8 100644 --- a/drivers/infiniband/hw/mlx5/ah.c +++ b/drivers/infiniband/hw/mlx5/ah.c @@ -50,11 +50,12 @@ static __be16 mlx5_ah_get_udp_sport(const struct mlx5_ib_dev *dev, return sport; } -static void create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah, +static int create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah, struct rdma_ah_init_attr *init_attr) { struct rdma_ah_attr *ah_attr = init_attr->ah_attr; enum ib_gid_type gid_type; + int rate_val; if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) { const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr); @@ -67,7 +68,10 @@ static void create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah, ah->av.tclass = grh->traffic_class; } - ah->av.stat_rate_sl = (rdma_ah_get_static_rate(ah_attr) << 4); + rate_val = mlx5r_ib_rate(dev, rdma_ah_get_static_rate(ah_attr)); + if (rate_val < 0) + return rate_val; + ah->av.stat_rate_sl = rate_val << 4; if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) { if (init_attr->xmit_slave) @@ -88,6 +92,8 @@ static void create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah, ah->av.fl_mlid = rdma_ah_get_path_bits(ah_attr) & 0x7f; ah->av.stat_rate_sl |= (rdma_ah_get_sl(ah_attr) & 0xf); } + + return 0; } int mlx5_ib_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, @@ -120,8 +126,7 @@ int mlx5_ib_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, return err; } - create_ib_ah(dev, ah, init_attr); - return 0; + return create_ib_ah(dev, ah, init_attr); } int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr) diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c index 4f6c1968a2ee3..81cfa74147a18 100644 --- a/drivers/infiniband/hw/mlx5/counters.c +++ b/drivers/infiniband/hw/mlx5/counters.c @@ -546,6 +546,7 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter, struct ib_qp *qp) { struct mlx5_ib_dev *dev = to_mdev(qp->device); + bool new = false; int err; if (!counter->id) { @@ -560,6 +561,7 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter, return err; counter->id = MLX5_GET(alloc_q_counter_out, out, counter_set_id); + new = true; } err = mlx5_ib_qp_set_counter(qp, counter); @@ -569,8 +571,10 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter, return 0; fail_set_counter: - mlx5_ib_counter_dealloc(counter); - counter->id = 0; + if (new) { + mlx5_ib_counter_dealloc(counter); + counter->id = 0; + } return err; } diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 4c54dc5780690..1aa5311b03e9f 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -490,7 +490,7 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq, } qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff; - if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) { + if (!*cur_qp || (qpn != (*cur_qp)->trans_qp.base.mqp.qpn)) { /* We do not have to take the QP table lock here, * because CQs will be locked while QPs are removed * from the table. diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index bb02b6adbf2c2..068eac3bdb50b 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -56,7 +56,7 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context); static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem, u64 iova, int access_flags, - unsigned int page_size, bool populate, + unsigned long page_size, bool populate, int access_mode); static int __mlx5_ib_dereg_mr(struct ib_mr *ibmr); @@ -919,6 +919,25 @@ mlx5r_cache_create_ent_locked(struct mlx5_ib_dev *dev, return ERR_PTR(ret); } +static void mlx5r_destroy_cache_entries(struct mlx5_ib_dev *dev) +{ + struct rb_root *root = &dev->cache.rb_root; + struct mlx5_cache_ent *ent; + struct rb_node *node; + + mutex_lock(&dev->cache.rb_lock); + node = rb_first(root); + while (node) { + ent = rb_entry(node, struct mlx5_cache_ent, node); + node = rb_next(node); + clean_keys(dev, ent); + rb_erase(&ent->node, root); + mlx5r_mkeys_uninit(ent); + kfree(ent); + } + mutex_unlock(&dev->cache.rb_lock); +} + int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev) { struct mlx5_mkey_cache *cache = &dev->cache; @@ -970,6 +989,8 @@ int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev) err: mutex_unlock(&cache->rb_lock); mlx5_mkey_cache_debugfs_cleanup(dev); + mlx5r_destroy_cache_entries(dev); + destroy_workqueue(cache->wq); mlx5_ib_warn(dev, "failed to create mkey cache entry\n"); return ret; } @@ -1003,17 +1024,7 @@ void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev) mlx5_cmd_cleanup_async_ctx(&dev->async_ctx); /* At this point all entries are disabled and have no concurrent work. */ - mutex_lock(&dev->cache.rb_lock); - node = rb_first(root); - while (node) { - ent = rb_entry(node, struct mlx5_cache_ent, node); - node = rb_next(node); - clean_keys(dev, ent); - rb_erase(&ent->node, root); - mlx5r_mkeys_uninit(ent); - kfree(ent); - } - mutex_unlock(&dev->cache.rb_lock); + mlx5r_destroy_cache_entries(dev); destroy_workqueue(dev->cache.wq); del_timer_sync(&dev->delay_timer); @@ -1115,7 +1126,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd, struct mlx5r_cache_rb_key rb_key = {}; struct mlx5_cache_ent *ent; struct mlx5_ib_mr *mr; - unsigned int page_size; + unsigned long page_size; if (umem->is_dmabuf) page_size = mlx5_umem_dmabuf_default_pgsz(umem, iova); @@ -1219,7 +1230,7 @@ reg_create_crossing_vhca_mr(struct ib_pd *pd, u64 iova, u64 length, int access_f */ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem, u64 iova, int access_flags, - unsigned int page_size, bool populate, + unsigned long page_size, bool populate, int access_mode) { struct mlx5_ib_dev *dev = to_mdev(pd->device); @@ -1425,7 +1436,7 @@ static struct ib_mr *create_real_mr(struct ib_pd *pd, struct ib_umem *umem, mr = alloc_cacheable_mr(pd, umem, iova, access_flags, MLX5_MKC_ACCESS_MODE_MTT); } else { - unsigned int page_size = + unsigned long page_size = mlx5_umem_mkc_find_best_pgsz(dev, umem, iova); mutex_lock(&dev->slow_path_mutex); @@ -1550,7 +1561,7 @@ static void mlx5_ib_dmabuf_invalidate_cb(struct dma_buf_attachment *attach) dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv); - if (!umem_dmabuf->sgt) + if (!umem_dmabuf->sgt || !mr) return; mlx5r_umr_update_mr_pas(mr, MLX5_IB_UPD_XLT_ZAP); @@ -1935,7 +1946,8 @@ mlx5_alloc_priv_descs(struct ib_device *device, static void mlx5_free_priv_descs(struct mlx5_ib_mr *mr) { - if (!mr->umem && !mr->data_direct && mr->descs) { + if (!mr->umem && !mr->data_direct && + mr->ibmr.type != IB_MR_TYPE_DM && mr->descs) { struct ib_device *device = mr->ibmr.device; int size = mr->max_descs * mr->desc_size; struct mlx5_ib_dev *dev = to_mdev(device); @@ -2022,11 +2034,16 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr) struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); struct mlx5_cache_ent *ent = mr->mmkey.cache_ent; bool is_odp = is_odp_mr(mr); + bool is_odp_dma_buf = is_dmabuf_mr(mr) && + !to_ib_umem_dmabuf(mr->umem)->pinned; int ret = 0; if (is_odp) mutex_lock(&to_ib_umem_odp(mr->umem)->umem_mutex); + if (is_odp_dma_buf) + dma_resv_lock(to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv, NULL); + if (mr->mmkey.cacheable && !mlx5r_umr_revoke_mr(mr) && !cache_ent_find_and_store(dev, mr)) { ent = mr->mmkey.cache_ent; /* upon storing to a clean temp entry - schedule its cleanup */ @@ -2054,6 +2071,12 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr) mutex_unlock(&to_ib_umem_odp(mr->umem)->umem_mutex); } + if (is_odp_dma_buf) { + if (!ret) + to_ib_umem_dmabuf(mr->umem)->private = NULL; + dma_resv_unlock(to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv); + } + return ret; } diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 1d3bf56157702..e158d5b1ab17b 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -242,6 +242,7 @@ static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr) if (__xa_cmpxchg(&imr->implicit_children, idx, mr, NULL, GFP_KERNEL) != mr) { xa_unlock(&imr->implicit_children); + mlx5r_deref_odp_mkey(&imr->mmkey); return; } @@ -308,9 +309,6 @@ static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni, blk_start_idx = idx; in_block = 1; } - - /* Count page invalidations */ - invalidations += idx - blk_start_idx + 1; } else { u64 umr_offset = idx & umr_block_mask; @@ -320,14 +318,19 @@ static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni, MLX5_IB_UPD_XLT_ZAP | MLX5_IB_UPD_XLT_ATOMIC); in_block = 0; + /* Count page invalidations */ + invalidations += idx - blk_start_idx + 1; } } } - if (in_block) + if (in_block) { mlx5r_umr_update_xlt(mr, blk_start_idx, idx - blk_start_idx + 1, 0, MLX5_IB_UPD_XLT_ZAP | MLX5_IB_UPD_XLT_ATOMIC); + /* Count page invalidations */ + invalidations += idx - blk_start_idx + 1; + } mlx5_update_odp_stats(mr, invalidations, invalidations); diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 10ce3b44f645f..ded139b4e87aa 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -3420,11 +3420,11 @@ static int ib_to_mlx5_rate_map(u8 rate) return 0; } -static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate) +int mlx5r_ib_rate(struct mlx5_ib_dev *dev, u8 rate) { u32 stat_rate_support; - if (rate == IB_RATE_PORT_CURRENT) + if (rate == IB_RATE_PORT_CURRENT || rate == IB_RATE_800_GBPS) return 0; if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_800_GBPS) @@ -3569,7 +3569,7 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, sizeof(grh->dgid.raw)); } - err = ib_rate_to_mlx5(dev, rdma_ah_get_static_rate(ah)); + err = mlx5r_ib_rate(dev, rdma_ah_get_static_rate(ah)); if (err < 0) return err; MLX5_SET(ads, path, stat_rate, err); @@ -4547,6 +4547,8 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr, set_id = mlx5_ib_get_counters_id(dev, attr->port_num - 1); MLX5_SET(dctc, dctc, counter_set_id, set_id); + + qp->port = attr->port_num; } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { struct mlx5_ib_modify_qp_resp resp = {}; u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {}; @@ -5033,7 +5035,7 @@ static int mlx5_ib_dct_query_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *mqp, } if (qp_attr_mask & IB_QP_PORT) - qp_attr->port_num = MLX5_GET(dctc, dctc, port); + qp_attr->port_num = mqp->port; if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) qp_attr->min_rnr_timer = MLX5_GET(dctc, dctc, min_rnr_nak); if (qp_attr_mask & IB_QP_AV) { diff --git a/drivers/infiniband/hw/mlx5/qp.h b/drivers/infiniband/hw/mlx5/qp.h index b6ee7c3ee1ca1..2530e7730635f 100644 --- a/drivers/infiniband/hw/mlx5/qp.h +++ b/drivers/infiniband/hw/mlx5/qp.h @@ -56,4 +56,5 @@ int mlx5_core_xrcd_dealloc(struct mlx5_ib_dev *dev, u32 xrcdn); int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter); int mlx5_ib_qp_event_init(void); void mlx5_ib_qp_event_cleanup(void); +int mlx5r_ib_rate(struct mlx5_ib_dev *dev, u8 rate); #endif /* _MLX5_IB_QP_H */ diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c index 887fd6fa3ba93..793f3c5c4d012 100644 --- a/drivers/infiniband/hw/mlx5/umr.c +++ b/drivers/infiniband/hw/mlx5/umr.c @@ -231,30 +231,6 @@ void mlx5r_umr_cleanup(struct mlx5_ib_dev *dev) ib_dealloc_pd(dev->umrc.pd); } -static int mlx5r_umr_recover(struct mlx5_ib_dev *dev) -{ - struct umr_common *umrc = &dev->umrc; - struct ib_qp_attr attr; - int err; - - attr.qp_state = IB_QPS_RESET; - err = ib_modify_qp(umrc->qp, &attr, IB_QP_STATE); - if (err) { - mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n"); - goto err; - } - - err = mlx5r_umr_qp_rst2rts(dev, umrc->qp); - if (err) - goto err; - - umrc->state = MLX5_UMR_STATE_ACTIVE; - return 0; - -err: - umrc->state = MLX5_UMR_STATE_ERR; - return err; -} static int mlx5r_umr_post_send(struct ib_qp *ibqp, u32 mkey, struct ib_cqe *cqe, struct mlx5r_umr_wqe *wqe, bool with_data) @@ -302,6 +278,61 @@ static int mlx5r_umr_post_send(struct ib_qp *ibqp, u32 mkey, struct ib_cqe *cqe, return err; } +static int mlx5r_umr_recover(struct mlx5_ib_dev *dev, u32 mkey, + struct mlx5r_umr_context *umr_context, + struct mlx5r_umr_wqe *wqe, bool with_data) +{ + struct umr_common *umrc = &dev->umrc; + struct ib_qp_attr attr; + int err; + + mutex_lock(&umrc->lock); + /* Preventing any further WRs to be sent now */ + if (umrc->state != MLX5_UMR_STATE_RECOVER) { + mlx5_ib_warn(dev, "UMR recovery encountered an unexpected state=%d\n", + umrc->state); + umrc->state = MLX5_UMR_STATE_RECOVER; + } + mutex_unlock(&umrc->lock); + + /* Sending a final/barrier WR (the failed one) and wait for its completion. + * This will ensure that all the previous WRs got a completion before + * we set the QP state to RESET. + */ + err = mlx5r_umr_post_send(umrc->qp, mkey, &umr_context->cqe, wqe, + with_data); + if (err) { + mlx5_ib_warn(dev, "UMR recovery post send failed, err %d\n", err); + goto err; + } + + /* Since the QP is in an error state, it will only receive + * IB_WC_WR_FLUSH_ERR. However, as it serves only as a barrier + * we don't care about its status. + */ + wait_for_completion(&umr_context->done); + + attr.qp_state = IB_QPS_RESET; + err = ib_modify_qp(umrc->qp, &attr, IB_QP_STATE); + if (err) { + mlx5_ib_warn(dev, "Couldn't modify UMR QP to RESET, err=%d\n", err); + goto err; + } + + err = mlx5r_umr_qp_rst2rts(dev, umrc->qp); + if (err) { + mlx5_ib_warn(dev, "Couldn't modify UMR QP to RTS, err=%d\n", err); + goto err; + } + + umrc->state = MLX5_UMR_STATE_ACTIVE; + return 0; + +err: + umrc->state = MLX5_UMR_STATE_ERR; + return err; +} + static void mlx5r_umr_done(struct ib_cq *cq, struct ib_wc *wc) { struct mlx5_ib_umr_context *context = @@ -366,9 +397,7 @@ static int mlx5r_umr_post_send_wait(struct mlx5_ib_dev *dev, u32 mkey, mlx5_ib_warn(dev, "reg umr failed (%u). Trying to recover and resubmit the flushed WQEs, mkey = %u\n", umr_context.status, mkey); - mutex_lock(&umrc->lock); - err = mlx5r_umr_recover(dev); - mutex_unlock(&umrc->lock); + err = mlx5r_umr_recover(dev, mkey, &umr_context, wqe, with_data); if (err) mlx5_ib_warn(dev, "couldn't recover UMR, err %d\n", err); diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c index 13b654ddd3cc8..bcf7d8607d56e 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_main.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c @@ -380,7 +380,7 @@ static void *usnic_ib_device_add(struct pci_dev *dev) if (!us_ibdev) { usnic_err("Device %s context alloc failed\n", netdev_name(pci_get_drvdata(dev))); - return ERR_PTR(-EFAULT); + return NULL; } us_ibdev->ufdev = usnic_fwd_dev_alloc(dev); @@ -500,8 +500,8 @@ static struct usnic_ib_dev *usnic_ib_discover_pf(struct usnic_vnic *vnic) } us_ibdev = usnic_ib_device_add(parent_pci); - if (IS_ERR_OR_NULL(us_ibdev)) { - us_ibdev = us_ibdev ? us_ibdev : ERR_PTR(-EFAULT); + if (!us_ibdev) { + us_ibdev = ERR_PTR(-EFAULT); goto out; } @@ -569,10 +569,10 @@ static int usnic_ib_pci_probe(struct pci_dev *pdev, } pf = usnic_ib_discover_pf(vf->vnic); - if (IS_ERR_OR_NULL(pf)) { - usnic_err("Failed to discover pf of vnic %s with err%ld\n", - pci_name(pdev), PTR_ERR(pf)); - err = pf ? PTR_ERR(pf) : -EFAULT; + if (IS_ERR(pf)) { + err = PTR_ERR(pf); + usnic_err("Failed to discover pf of vnic %s with err%d\n", + pci_name(pdev), err); goto out_clean_vnic; } diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c index 1ba4a0c8726ae..e27478fe9456c 100644 --- a/drivers/infiniband/sw/rxe/rxe.c +++ b/drivers/infiniband/sw/rxe/rxe.c @@ -38,10 +38,8 @@ void rxe_dealloc(struct ib_device *ib_dev) } /* initialize rxe device parameters */ -static void rxe_init_device_param(struct rxe_dev *rxe) +static void rxe_init_device_param(struct rxe_dev *rxe, struct net_device *ndev) { - struct net_device *ndev; - rxe->max_inline_data = RXE_MAX_INLINE_DATA; rxe->attr.vendor_id = RXE_VENDOR_ID; @@ -74,15 +72,9 @@ static void rxe_init_device_param(struct rxe_dev *rxe) rxe->attr.max_pkeys = RXE_MAX_PKEYS; rxe->attr.local_ca_ack_delay = RXE_LOCAL_CA_ACK_DELAY; - ndev = rxe_ib_device_get_netdev(&rxe->ib_dev); - if (!ndev) - return; - addrconf_addr_eui48((unsigned char *)&rxe->attr.sys_image_guid, ndev->dev_addr); - dev_put(ndev); - rxe->max_ucontext = RXE_MAX_UCONTEXT; } @@ -115,18 +107,13 @@ static void rxe_init_port_param(struct rxe_port *port) /* initialize port state, note IB convention that HCA ports are always * numbered from 1 */ -static void rxe_init_ports(struct rxe_dev *rxe) +static void rxe_init_ports(struct rxe_dev *rxe, struct net_device *ndev) { struct rxe_port *port = &rxe->port; - struct net_device *ndev; rxe_init_port_param(port); - ndev = rxe_ib_device_get_netdev(&rxe->ib_dev); - if (!ndev) - return; addrconf_addr_eui48((unsigned char *)&port->port_guid, ndev->dev_addr); - dev_put(ndev); spin_lock_init(&port->port_lock); } @@ -144,12 +131,12 @@ static void rxe_init_pools(struct rxe_dev *rxe) } /* initialize rxe device state */ -static void rxe_init(struct rxe_dev *rxe) +static void rxe_init(struct rxe_dev *rxe, struct net_device *ndev) { /* init default device parameters */ - rxe_init_device_param(rxe); + rxe_init_device_param(rxe, ndev); - rxe_init_ports(rxe); + rxe_init_ports(rxe, ndev); rxe_init_pools(rxe); /* init pending mmap list */ @@ -184,7 +171,7 @@ void rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu) int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name, struct net_device *ndev) { - rxe_init(rxe); + rxe_init(rxe, ndev); rxe_set_mtu(rxe, mtu); return rxe_register_device(rxe, ibdev_name, ndev); diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index 77fddab9d9502..a6c7951011308 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -140,6 +140,7 @@ static const struct xpad_device { { 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX }, { 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX }, { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX }, + { 0x044f, 0xd01e, "ThrustMaster, Inc. ESWAP X 2 ELDEN RING EDITION", 0, XTYPE_XBOXONE }, { 0x044f, 0x0f10, "Thrustmaster Modena GT Wheel", 0, XTYPE_XBOX }, { 0x044f, 0xb326, "Thrustmaster Gamepad GP XID", 0, XTYPE_XBOX360 }, { 0x045e, 0x0202, "Microsoft X-Box pad v1 (US)", 0, XTYPE_XBOX }, @@ -177,6 +178,7 @@ static const struct xpad_device { { 0x06a3, 0x0200, "Saitek Racing Wheel", 0, XTYPE_XBOX }, { 0x06a3, 0x0201, "Saitek Adrenalin", 0, XTYPE_XBOX }, { 0x06a3, 0xf51a, "Saitek P3600", 0, XTYPE_XBOX360 }, + { 0x0738, 0x4503, "Mad Catz Racing Wheel", 0, XTYPE_XBOXONE }, { 0x0738, 0x4506, "Mad Catz 4506 Wireless Controller", 0, XTYPE_XBOX }, { 0x0738, 0x4516, "Mad Catz Control Pad", 0, XTYPE_XBOX }, { 0x0738, 0x4520, "Mad Catz Control Pad Pro", 0, XTYPE_XBOX }, @@ -238,6 +240,7 @@ static const struct xpad_device { { 0x0e6f, 0x0146, "Rock Candy Wired Controller for Xbox One", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0147, "PDP Marvel Xbox One Controller", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x015c, "PDP Xbox One Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE }, + { 0x0e6f, 0x015d, "PDP Mirror's Edge Official Wired Controller for Xbox One", XTYPE_XBOXONE }, { 0x0e6f, 0x0161, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0162, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0163, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, @@ -276,12 +279,15 @@ static const struct xpad_device { { 0x0f0d, 0x0078, "Hori Real Arcade Pro V Kai Xbox One", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE }, { 0x0f0d, 0x00c5, "Hori Fighting Commander ONE", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE }, { 0x0f0d, 0x00dc, "HORIPAD FPS for Nintendo Switch", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x0f0d, 0x0151, "Hori Racing Wheel Overdrive for Xbox Series X", 0, XTYPE_XBOXONE }, + { 0x0f0d, 0x0152, "Hori Racing Wheel Overdrive for Xbox Series X", 0, XTYPE_XBOXONE }, { 0x0f30, 0x010b, "Philips Recoil", 0, XTYPE_XBOX }, { 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX }, { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX }, { 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX }, { 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, { 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, + { 0x10f5, 0x7005, "Turtle Beach Recon Controller", 0, XTYPE_XBOXONE }, { 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 }, { 0x11ff, 0x0511, "PXN V900", 0, XTYPE_XBOX360 }, { 0x1209, 0x2882, "Ardwiino Controller", 0, XTYPE_XBOX360 }, @@ -306,7 +312,7 @@ static const struct xpad_device { { 0x1689, 0xfe00, "Razer Sabertooth", 0, XTYPE_XBOX360 }, { 0x17ef, 0x6182, "Lenovo Legion Controller for Windows", 0, XTYPE_XBOX360 }, { 0x1949, 0x041a, "Amazon Game Controller", 0, XTYPE_XBOX360 }, - { 0x1a86, 0xe310, "QH Electronics Controller", 0, XTYPE_XBOX360 }, + { 0x1a86, 0xe310, "Legion Go S", 0, XTYPE_XBOX360 }, { 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 }, { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, { 0x1bad, 0x0130, "Ion Drum Rocker", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, @@ -343,6 +349,7 @@ static const struct xpad_device { { 0x1bad, 0xfa01, "MadCatz GamePad", 0, XTYPE_XBOX360 }, { 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 }, { 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 }, + { 0x1ee9, 0x1590, "ZOTAC Gaming Zone", 0, XTYPE_XBOX360 }, { 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE }, { 0x20d6, 0x2009, "PowerA Enhanced Wired Controller for Xbox Series X|S", 0, XTYPE_XBOXONE }, { 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 }, @@ -366,6 +373,7 @@ static const struct xpad_device { { 0x24c6, 0x5510, "Hori Fighting Commander ONE (Xbox 360/PC Mode)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x24c6, 0x551a, "PowerA FUSION Pro Controller", 0, XTYPE_XBOXONE }, { 0x24c6, 0x561a, "PowerA FUSION Controller", 0, XTYPE_XBOXONE }, + { 0x24c6, 0x581a, "ThrustMaster XB1 Classic Controller", 0, XTYPE_XBOXONE }, { 0x24c6, 0x5b00, "ThrustMaster Ferrari 458 Racing Wheel", 0, XTYPE_XBOX360 }, { 0x24c6, 0x5b02, "Thrustmaster, Inc. GPX Controller", 0, XTYPE_XBOX360 }, { 0x24c6, 0x5b03, "Thrustmaster Ferrari 458 Racing Wheel", 0, XTYPE_XBOX360 }, @@ -374,10 +382,15 @@ static const struct xpad_device { { 0x2563, 0x058d, "OneXPlayer Gamepad", 0, XTYPE_XBOX360 }, { 0x294b, 0x3303, "Snakebyte GAMEPAD BASE X", 0, XTYPE_XBOXONE }, { 0x294b, 0x3404, "Snakebyte GAMEPAD RGB X", 0, XTYPE_XBOXONE }, + { 0x2993, 0x2001, "TECNO Pocket Go", 0, XTYPE_XBOX360 }, { 0x2dc8, 0x2000, "8BitDo Pro 2 Wired Controller fox Xbox", 0, XTYPE_XBOXONE }, { 0x2dc8, 0x3106, "8BitDo Ultimate Wireless / Pro 2 Wired Controller", 0, XTYPE_XBOX360 }, + { 0x2dc8, 0x3109, "8BitDo Ultimate Wireless Bluetooth", 0, XTYPE_XBOX360 }, { 0x2dc8, 0x310a, "8BitDo Ultimate 2C Wireless Controller", 0, XTYPE_XBOX360 }, + { 0x2dc8, 0x6001, "8BitDo SN30 Pro", 0, XTYPE_XBOX360 }, { 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE }, + { 0x2e24, 0x1688, "Hyperkin X91 X-Box One pad", 0, XTYPE_XBOXONE }, + { 0x2e95, 0x0504, "SCUF Gaming Controller", MAP_SELECT_BUTTON, XTYPE_XBOXONE }, { 0x31e3, 0x1100, "Wooting One", 0, XTYPE_XBOX360 }, { 0x31e3, 0x1200, "Wooting Two", 0, XTYPE_XBOX360 }, { 0x31e3, 0x1210, "Wooting Lekker", 0, XTYPE_XBOX360 }, @@ -385,11 +398,16 @@ static const struct xpad_device { { 0x31e3, 0x1230, "Wooting Two HE (ARM)", 0, XTYPE_XBOX360 }, { 0x31e3, 0x1300, "Wooting 60HE (AVR)", 0, XTYPE_XBOX360 }, { 0x31e3, 0x1310, "Wooting 60HE (ARM)", 0, XTYPE_XBOX360 }, + { 0x3285, 0x0603, "Nacon Pro Compact controller for Xbox", 0, XTYPE_XBOXONE }, { 0x3285, 0x0607, "Nacon GC-100", 0, XTYPE_XBOX360 }, + { 0x3285, 0x0614, "Nacon Pro Compact", 0, XTYPE_XBOXONE }, { 0x3285, 0x0646, "Nacon Pro Compact", 0, XTYPE_XBOXONE }, + { 0x3285, 0x0662, "Nacon Revolution5 Pro", 0, XTYPE_XBOX360 }, { 0x3285, 0x0663, "Nacon Evol-X", 0, XTYPE_XBOXONE }, { 0x3537, 0x1004, "GameSir T4 Kaleid", 0, XTYPE_XBOX360 }, + { 0x3537, 0x1010, "GameSir G7 SE", 0, XTYPE_XBOXONE }, { 0x3767, 0x0101, "Fanatec Speedster 3 Forceshock Wheel", 0, XTYPE_XBOX }, + { 0x413d, 0x2104, "Black Shark Green Ghost Gamepad", 0, XTYPE_XBOX360 }, { 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX }, { 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN } }; @@ -488,6 +506,7 @@ static const struct usb_device_id xpad_table[] = { XPAD_XBOX360_VENDOR(0x03f0), /* HP HyperX Xbox 360 controllers */ XPAD_XBOXONE_VENDOR(0x03f0), /* HP HyperX Xbox One controllers */ XPAD_XBOX360_VENDOR(0x044f), /* Thrustmaster Xbox 360 controllers */ + XPAD_XBOXONE_VENDOR(0x044f), /* Thrustmaster Xbox One controllers */ XPAD_XBOX360_VENDOR(0x045e), /* Microsoft Xbox 360 controllers */ XPAD_XBOXONE_VENDOR(0x045e), /* Microsoft Xbox One controllers */ XPAD_XBOX360_VENDOR(0x046d), /* Logitech Xbox 360-style controllers */ @@ -519,8 +538,9 @@ static const struct usb_device_id xpad_table[] = { XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */ XPAD_XBOX360_VENDOR(0x17ef), /* Lenovo */ XPAD_XBOX360_VENDOR(0x1949), /* Amazon controllers */ - XPAD_XBOX360_VENDOR(0x1a86), /* QH Electronics */ + XPAD_XBOX360_VENDOR(0x1a86), /* Nanjing Qinheng Microelectronics (WCH) */ XPAD_XBOX360_VENDOR(0x1bad), /* Harmonix Rock Band guitar and drums */ + XPAD_XBOX360_VENDOR(0x1ee9), /* ZOTAC Technology Limited */ XPAD_XBOX360_VENDOR(0x20d6), /* PowerA controllers */ XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA controllers */ XPAD_XBOX360_VENDOR(0x2345), /* Machenike Controllers */ @@ -528,17 +548,20 @@ static const struct usb_device_id xpad_table[] = { XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA controllers */ XPAD_XBOX360_VENDOR(0x2563), /* OneXPlayer Gamepad */ XPAD_XBOX360_VENDOR(0x260d), /* Dareu H101 */ - XPAD_XBOXONE_VENDOR(0x294b), /* Snakebyte */ + XPAD_XBOXONE_VENDOR(0x294b), /* Snakebyte */ + XPAD_XBOX360_VENDOR(0x2993), /* TECNO Mobile */ XPAD_XBOX360_VENDOR(0x2c22), /* Qanba Controllers */ - XPAD_XBOX360_VENDOR(0x2dc8), /* 8BitDo Pro 2 Wired Controller */ - XPAD_XBOXONE_VENDOR(0x2dc8), /* 8BitDo Pro 2 Wired Controller for Xbox */ - XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Duke Xbox One pad */ - XPAD_XBOX360_VENDOR(0x2f24), /* GameSir controllers */ + XPAD_XBOX360_VENDOR(0x2dc8), /* 8BitDo Controllers */ + XPAD_XBOXONE_VENDOR(0x2dc8), /* 8BitDo Controllers */ + XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Controllers */ + XPAD_XBOX360_VENDOR(0x2f24), /* GameSir Controllers */ + XPAD_XBOXONE_VENDOR(0x2e95), /* SCUF Gaming Controller */ XPAD_XBOX360_VENDOR(0x31e3), /* Wooting Keyboards */ XPAD_XBOX360_VENDOR(0x3285), /* Nacon GC-100 */ XPAD_XBOXONE_VENDOR(0x3285), /* Nacon Evol-X */ XPAD_XBOX360_VENDOR(0x3537), /* GameSir Controllers */ XPAD_XBOXONE_VENDOR(0x3537), /* GameSir Controllers */ + XPAD_XBOX360_VENDOR(0x413d), /* Black Shark Green Ghost Controller */ { } }; @@ -691,7 +714,9 @@ static const struct xboxone_init_packet xboxone_init_packets[] = { XBOXONE_INIT_PKT(0x045e, 0x0b00, xboxone_s_init), XBOXONE_INIT_PKT(0x045e, 0x0b00, extra_input_packet_init), XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_led_on), + XBOXONE_INIT_PKT(0x20d6, 0xa01a, xboxone_pdp_led_on), XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_auth), + XBOXONE_INIT_PKT(0x20d6, 0xa01a, xboxone_pdp_auth), XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init), XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init), XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init), diff --git a/drivers/input/misc/iqs7222.c b/drivers/input/misc/iqs7222.c index be80a31de9f8f..01c4009fd53e7 100644 --- a/drivers/input/misc/iqs7222.c +++ b/drivers/input/misc/iqs7222.c @@ -100,11 +100,11 @@ enum iqs7222_reg_key_id { enum iqs7222_reg_grp_id { IQS7222_REG_GRP_STAT, - IQS7222_REG_GRP_FILT, IQS7222_REG_GRP_CYCLE, IQS7222_REG_GRP_GLBL, IQS7222_REG_GRP_BTN, IQS7222_REG_GRP_CHAN, + IQS7222_REG_GRP_FILT, IQS7222_REG_GRP_SLDR, IQS7222_REG_GRP_TPAD, IQS7222_REG_GRP_GPIO, @@ -286,6 +286,7 @@ static const struct iqs7222_event_desc iqs7222_tp_events[] = { struct iqs7222_reg_grp_desc { u16 base; + u16 val_len; int num_row; int num_col; }; @@ -342,6 +343,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = { }, [IQS7222_REG_GRP_FILT] = { .base = 0xAC00, + .val_len = 3, .num_row = 1, .num_col = 2, }, @@ -400,6 +402,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = { }, [IQS7222_REG_GRP_FILT] = { .base = 0xAC00, + .val_len = 3, .num_row = 1, .num_col = 2, }, @@ -454,6 +457,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = { }, [IQS7222_REG_GRP_FILT] = { .base = 0xC400, + .val_len = 3, .num_row = 1, .num_col = 2, }, @@ -496,6 +500,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = { }, [IQS7222_REG_GRP_FILT] = { .base = 0xC400, + .val_len = 3, .num_row = 1, .num_col = 2, }, @@ -543,6 +548,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = { }, [IQS7222_REG_GRP_FILT] = { .base = 0xAA00, + .val_len = 3, .num_row = 1, .num_col = 2, }, @@ -600,6 +606,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = { }, [IQS7222_REG_GRP_FILT] = { .base = 0xAA00, + .val_len = 3, .num_row = 1, .num_col = 2, }, @@ -656,6 +663,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = { }, [IQS7222_REG_GRP_FILT] = { .base = 0xAE00, + .val_len = 3, .num_row = 1, .num_col = 2, }, @@ -712,6 +720,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = { }, [IQS7222_REG_GRP_FILT] = { .base = 0xAE00, + .val_len = 3, .num_row = 1, .num_col = 2, }, @@ -768,6 +777,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = { }, [IQS7222_REG_GRP_FILT] = { .base = 0xAE00, + .val_len = 3, .num_row = 1, .num_col = 2, }, @@ -1604,7 +1614,7 @@ static int iqs7222_force_comms(struct iqs7222_private *iqs7222) } static int iqs7222_read_burst(struct iqs7222_private *iqs7222, - u16 reg, void *val, u16 num_val) + u16 reg, void *val, u16 val_len) { u8 reg_buf[sizeof(__be16)]; int ret, i; @@ -1619,7 +1629,7 @@ static int iqs7222_read_burst(struct iqs7222_private *iqs7222, { .addr = client->addr, .flags = I2C_M_RD, - .len = num_val * sizeof(__le16), + .len = val_len, .buf = (u8 *)val, }, }; @@ -1675,7 +1685,7 @@ static int iqs7222_read_word(struct iqs7222_private *iqs7222, u16 reg, u16 *val) __le16 val_buf; int error; - error = iqs7222_read_burst(iqs7222, reg, &val_buf, 1); + error = iqs7222_read_burst(iqs7222, reg, &val_buf, sizeof(val_buf)); if (error) return error; @@ -1685,10 +1695,9 @@ static int iqs7222_read_word(struct iqs7222_private *iqs7222, u16 reg, u16 *val) } static int iqs7222_write_burst(struct iqs7222_private *iqs7222, - u16 reg, const void *val, u16 num_val) + u16 reg, const void *val, u16 val_len) { int reg_len = reg > U8_MAX ? sizeof(reg) : sizeof(u8); - int val_len = num_val * sizeof(__le16); int msg_len = reg_len + val_len; int ret, i; struct i2c_client *client = iqs7222->client; @@ -1747,7 +1756,7 @@ static int iqs7222_write_word(struct iqs7222_private *iqs7222, u16 reg, u16 val) { __le16 val_buf = cpu_to_le16(val); - return iqs7222_write_burst(iqs7222, reg, &val_buf, 1); + return iqs7222_write_burst(iqs7222, reg, &val_buf, sizeof(val_buf)); } static int iqs7222_ati_trigger(struct iqs7222_private *iqs7222) @@ -1831,30 +1840,14 @@ static int iqs7222_dev_init(struct iqs7222_private *iqs7222, int dir) /* * Acknowledge reset before writing any registers in case the device - * suffers a spurious reset during initialization. Because this step - * may change the reserved fields of the second filter beta register, - * its cache must be updated. - * - * Writing the second filter beta register, in turn, may clobber the - * system status register. As such, the filter beta register pair is - * written first to protect against this hazard. + * suffers a spurious reset during initialization. */ if (dir == WRITE) { - u16 reg = dev_desc->reg_grps[IQS7222_REG_GRP_FILT].base + 1; - u16 filt_setup; - error = iqs7222_write_word(iqs7222, IQS7222_SYS_SETUP, iqs7222->sys_setup[0] | IQS7222_SYS_SETUP_ACK_RESET); if (error) return error; - - error = iqs7222_read_word(iqs7222, reg, &filt_setup); - if (error) - return error; - - iqs7222->filt_setup[1] &= GENMASK(7, 0); - iqs7222->filt_setup[1] |= (filt_setup & ~GENMASK(7, 0)); } /* @@ -1883,6 +1876,7 @@ static int iqs7222_dev_init(struct iqs7222_private *iqs7222, int dir) int num_col = dev_desc->reg_grps[i].num_col; u16 reg = dev_desc->reg_grps[i].base; __le16 *val_buf; + u16 val_len = dev_desc->reg_grps[i].val_len ? : num_col * sizeof(*val_buf); u16 *val; if (!num_col) @@ -1900,7 +1894,7 @@ static int iqs7222_dev_init(struct iqs7222_private *iqs7222, int dir) switch (dir) { case READ: error = iqs7222_read_burst(iqs7222, reg, - val_buf, num_col); + val_buf, val_len); for (k = 0; k < num_col; k++) val[k] = le16_to_cpu(val_buf[k]); break; @@ -1909,7 +1903,7 @@ static int iqs7222_dev_init(struct iqs7222_private *iqs7222, int dir) for (k = 0; k < num_col; k++) val_buf[k] = cpu_to_le16(val[k]); error = iqs7222_write_burst(iqs7222, reg, - val_buf, num_col); + val_buf, val_len); break; default: @@ -1962,7 +1956,7 @@ static int iqs7222_dev_info(struct iqs7222_private *iqs7222) int error, i; error = iqs7222_read_burst(iqs7222, IQS7222_PROD_NUM, dev_id, - ARRAY_SIZE(dev_id)); + sizeof(dev_id)); if (error) return error; @@ -2917,7 +2911,7 @@ static int iqs7222_report(struct iqs7222_private *iqs7222) __le16 status[IQS7222_MAX_COLS_STAT]; error = iqs7222_read_burst(iqs7222, IQS7222_SYS_STATUS, status, - num_stat); + num_stat * sizeof(*status)); if (error) return error; diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 380aa1614442f..3d1459b551bb2 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -667,23 +667,50 @@ static void synaptics_pt_stop(struct serio *serio) serio_continue_rx(parent->ps2dev.serio); } +static int synaptics_pt_open(struct serio *serio) +{ + struct psmouse *parent = psmouse_from_serio(serio->parent); + struct synaptics_data *priv = parent->private; + + guard(serio_pause_rx)(parent->ps2dev.serio); + priv->pt_port_open = true; + + return 0; +} + +static void synaptics_pt_close(struct serio *serio) +{ + struct psmouse *parent = psmouse_from_serio(serio->parent); + struct synaptics_data *priv = parent->private; + + guard(serio_pause_rx)(parent->ps2dev.serio); + priv->pt_port_open = false; +} + static int synaptics_is_pt_packet(u8 *buf) { return (buf[0] & 0xFC) == 0x84 && (buf[3] & 0xCC) == 0xC4; } -static void synaptics_pass_pt_packet(struct serio *ptport, u8 *packet) +static void synaptics_pass_pt_packet(struct synaptics_data *priv, u8 *packet) { - struct psmouse *child = psmouse_from_serio(ptport); + struct serio *ptport; - if (child && child->state == PSMOUSE_ACTIVATED) { - serio_interrupt(ptport, packet[1], 0); - serio_interrupt(ptport, packet[4], 0); - serio_interrupt(ptport, packet[5], 0); - if (child->pktsize == 4) - serio_interrupt(ptport, packet[2], 0); - } else { - serio_interrupt(ptport, packet[1], 0); + ptport = priv->pt_port; + if (!ptport) + return; + + serio_interrupt(ptport, packet[1], 0); + + if (priv->pt_port_open) { + struct psmouse *child = psmouse_from_serio(ptport); + + if (child->state == PSMOUSE_ACTIVATED) { + serio_interrupt(ptport, packet[4], 0); + serio_interrupt(ptport, packet[5], 0); + if (child->pktsize == 4) + serio_interrupt(ptport, packet[2], 0); + } } } @@ -722,6 +749,8 @@ static void synaptics_pt_create(struct psmouse *psmouse) serio->write = synaptics_pt_write; serio->start = synaptics_pt_start; serio->stop = synaptics_pt_stop; + serio->open = synaptics_pt_open; + serio->close = synaptics_pt_close; serio->parent = psmouse->ps2dev.serio; psmouse->pt_activate = synaptics_pt_activate; @@ -1218,11 +1247,10 @@ static psmouse_ret_t synaptics_process_byte(struct psmouse *psmouse) if (SYN_CAP_PASS_THROUGH(priv->info.capabilities) && synaptics_is_pt_packet(psmouse->packet)) { - if (priv->pt_port) - synaptics_pass_pt_packet(priv->pt_port, - psmouse->packet); - } else + synaptics_pass_pt_packet(priv, psmouse->packet); + } else { synaptics_process_packet(psmouse); + } return PSMOUSE_FULL_PACKET; } diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h index 08533d1b1b16f..4b34f13b9f761 100644 --- a/drivers/input/mouse/synaptics.h +++ b/drivers/input/mouse/synaptics.h @@ -188,6 +188,7 @@ struct synaptics_data { bool disable_gesture; /* disable gestures */ struct serio *pt_port; /* Pass-through serio port */ + bool pt_port_open; /* * Last received Advanced Gesture Mode (AGM) packet. An AGM packet diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h index 34d1f07ea4c30..8813db7eec397 100644 --- a/drivers/input/serio/i8042-acpipnpio.h +++ b/drivers/input/serio/i8042-acpipnpio.h @@ -1080,16 +1080,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { DMI_MATCH(DMI_BOARD_VENDOR, "TUXEDO"), DMI_MATCH(DMI_BOARD_NAME, "AURA1501"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "TUXEDO"), DMI_MATCH(DMI_BOARD_NAME, "EDUBOOK1502"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { /* Mivvy M310 */ @@ -1159,9 +1157,7 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { }, /* * A lot of modern Clevo barebones have touchpad and/or keyboard issues - * after suspend fixable with nomux + reset + noloop + nopnp. Luckily, - * none of them have an external PS/2 port so this can safely be set for - * all of them. + * after suspend fixable with the forcenorestore quirk. * Clevo barebones come with board_vendor and/or system_vendor set to * either the very generic string "Notebook" and/or a different value * for each individual reseller. The only somewhat universal way to @@ -1171,29 +1167,25 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { .matches = { DMI_MATCH(DMI_BOARD_NAME, "LAPQC71A"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { .matches = { DMI_MATCH(DMI_BOARD_NAME, "LAPQC71B"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { .matches = { DMI_MATCH(DMI_BOARD_NAME, "N140CU"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { .matches = { DMI_MATCH(DMI_BOARD_NAME, "N141CU"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { .matches = { @@ -1205,29 +1197,19 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { .matches = { DMI_MATCH(DMI_BOARD_NAME, "NH5xAx"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { - /* - * Setting SERIO_QUIRK_NOMUX or SERIO_QUIRK_RESET_ALWAYS makes - * the keyboard very laggy for ~5 seconds after boot and - * sometimes also after resume. - * However both are required for the keyboard to not fail - * completely sometimes after boot or resume. - */ .matches = { DMI_MATCH(DMI_BOARD_NAME, "NHxxRZQ"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { .matches = { DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, /* * At least one modern Clevo barebone has the touchpad connected both @@ -1243,17 +1225,15 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { .matches = { DMI_MATCH(DMI_BOARD_NAME, "NS50MU"), }, - .driver_data = (void *)(SERIO_QUIRK_NOAUX | SERIO_QUIRK_NOMUX | - SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOLOOP | - SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_NOAUX | + SERIO_QUIRK_FORCENORESTORE) }, { .matches = { DMI_MATCH(DMI_BOARD_NAME, "NS50_70MU"), }, - .driver_data = (void *)(SERIO_QUIRK_NOAUX | SERIO_QUIRK_NOMUX | - SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOLOOP | - SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_NOAUX | + SERIO_QUIRK_FORCENORESTORE) }, { .matches = { @@ -1265,8 +1245,13 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { .matches = { DMI_MATCH(DMI_BOARD_NAME, "NJ50_70CU"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "P640RE"), + }, + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { /* @@ -1277,16 +1262,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "P65xH"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { /* Clevo P650RS, 650RP6, Sager NP8152-S, and others */ .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { /* @@ -1297,8 +1280,7 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "P65_P67H"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { /* @@ -1309,8 +1291,7 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "P65_67RP"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { /* @@ -1321,8 +1302,7 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "P65_67RS"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { /* @@ -1333,22 +1313,43 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "P67xRP"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { .matches = { DMI_MATCH(DMI_BOARD_NAME, "PB50_70DFx,DDx"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "PB51RF"), + }, + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "PB71RD"), + }, + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "PC70DR"), + }, + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { .matches = { DMI_MATCH(DMI_BOARD_NAME, "PCX0DX"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "PCX0DX_GN20"), + }, + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, /* See comment on TUXEDO InfinityBook S17 Gen6 / Clevo NS70MU above */ { @@ -1361,15 +1362,13 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { .matches = { DMI_MATCH(DMI_BOARD_NAME, "X170SM"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { .matches = { DMI_MATCH(DMI_BOARD_NAME, "X170KM-G"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { /* diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index 607f18af70104..212dafa0bba2d 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c @@ -1011,7 +1011,7 @@ static int ads7846_setup_pendown(struct spi_device *spi, if (pdata->get_pendown_state) { ts->get_pendown_state = pdata->get_pendown_state; } else { - ts->gpio_pendown = gpiod_get(&spi->dev, "pendown", GPIOD_IN); + ts->gpio_pendown = devm_gpiod_get(&spi->dev, "pendown", GPIOD_IN); if (IS_ERR(ts->gpio_pendown)) { dev_err(&spi->dev, "failed to request pendown GPIO\n"); return PTR_ERR(ts->gpio_pendown); diff --git a/drivers/input/touchscreen/goodix_berlin_core.c b/drivers/input/touchscreen/goodix_berlin_core.c index 3fc03cf0ca23f..141c64675997d 100644 --- a/drivers/input/touchscreen/goodix_berlin_core.c +++ b/drivers/input/touchscreen/goodix_berlin_core.c @@ -165,7 +165,7 @@ struct goodix_berlin_core { struct device *dev; struct regmap *regmap; struct regulator *avdd; - struct regulator *iovdd; + struct regulator *vddio; struct gpio_desc *reset_gpio; struct touchscreen_properties props; struct goodix_berlin_fw_version fw_version; @@ -248,19 +248,19 @@ static int goodix_berlin_power_on(struct goodix_berlin_core *cd) { int error; - error = regulator_enable(cd->iovdd); + error = regulator_enable(cd->vddio); if (error) { - dev_err(cd->dev, "Failed to enable iovdd: %d\n", error); + dev_err(cd->dev, "Failed to enable vddio: %d\n", error); return error; } - /* Vendor waits 3ms for IOVDD to settle */ + /* Vendor waits 3ms for VDDIO to settle */ usleep_range(3000, 3100); error = regulator_enable(cd->avdd); if (error) { dev_err(cd->dev, "Failed to enable avdd: %d\n", error); - goto err_iovdd_disable; + goto err_vddio_disable; } /* Vendor waits 15ms for IOVDD to settle */ @@ -283,8 +283,8 @@ static int goodix_berlin_power_on(struct goodix_berlin_core *cd) err_dev_reset: gpiod_set_value_cansleep(cd->reset_gpio, 1); regulator_disable(cd->avdd); -err_iovdd_disable: - regulator_disable(cd->iovdd); +err_vddio_disable: + regulator_disable(cd->vddio); return error; } @@ -292,7 +292,7 @@ static void goodix_berlin_power_off(struct goodix_berlin_core *cd) { gpiod_set_value_cansleep(cd->reset_gpio, 1); regulator_disable(cd->avdd); - regulator_disable(cd->iovdd); + regulator_disable(cd->vddio); } static int goodix_berlin_read_version(struct goodix_berlin_core *cd) @@ -744,10 +744,10 @@ int goodix_berlin_probe(struct device *dev, int irq, const struct input_id *id, return dev_err_probe(dev, PTR_ERR(cd->avdd), "Failed to request avdd regulator\n"); - cd->iovdd = devm_regulator_get(dev, "iovdd"); - if (IS_ERR(cd->iovdd)) - return dev_err_probe(dev, PTR_ERR(cd->iovdd), - "Failed to request iovdd regulator\n"); + cd->vddio = devm_regulator_get(dev, "vddio"); + if (IS_ERR(cd->vddio)) + return dev_err_probe(dev, PTR_ERR(cd->vddio), + "Failed to request vddio regulator\n"); error = goodix_berlin_power_on(cd); if (error) { diff --git a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c index d525ab43a4aeb..dd7d030d2e890 100644 --- a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c +++ b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c @@ -487,17 +487,6 @@ static int tegra241_cmdqv_hw_reset(struct arm_smmu_device *smmu) /* VCMDQ Resource Helpers */ -static void tegra241_vcmdq_free_smmu_cmdq(struct tegra241_vcmdq *vcmdq) -{ - struct arm_smmu_queue *q = &vcmdq->cmdq.q; - size_t nents = 1 << q->llq.max_n_shift; - size_t qsz = nents << CMDQ_ENT_SZ_SHIFT; - - if (!q->base) - return; - dmam_free_coherent(vcmdq->cmdqv->smmu.dev, qsz, q->base, q->base_dma); -} - static int tegra241_vcmdq_alloc_smmu_cmdq(struct tegra241_vcmdq *vcmdq) { struct arm_smmu_device *smmu = &vcmdq->cmdqv->smmu; @@ -560,7 +549,8 @@ static void tegra241_vintf_free_lvcmdq(struct tegra241_vintf *vintf, u16 lidx) struct tegra241_vcmdq *vcmdq = vintf->lvcmdqs[lidx]; char header[64]; - tegra241_vcmdq_free_smmu_cmdq(vcmdq); + /* Note that the lvcmdq queue memory space is managed by devres */ + tegra241_vintf_deinit_lvcmdq(vintf, lidx); dev_dbg(vintf->cmdqv->dev, @@ -768,13 +758,13 @@ static int tegra241_cmdqv_init_structures(struct arm_smmu_device *smmu) vintf = kzalloc(sizeof(*vintf), GFP_KERNEL); if (!vintf) - goto out_fallback; + return -ENOMEM; /* Init VINTF0 for in-kernel use */ ret = tegra241_cmdqv_init_vintf(cmdqv, 0, vintf); if (ret) { dev_err(cmdqv->dev, "failed to init vintf0: %d\n", ret); - goto free_vintf; + return ret; } /* Preallocate logical VCMDQs to VINTF0 */ @@ -783,24 +773,12 @@ static int tegra241_cmdqv_init_structures(struct arm_smmu_device *smmu) vcmdq = tegra241_vintf_alloc_lvcmdq(vintf, lidx); if (IS_ERR(vcmdq)) - goto free_lvcmdq; + return PTR_ERR(vcmdq); } /* Now, we are ready to run all the impl ops */ smmu->impl_ops = &tegra241_cmdqv_impl_ops; return 0; - -free_lvcmdq: - for (lidx--; lidx >= 0; lidx--) - tegra241_vintf_free_lvcmdq(vintf, lidx); - tegra241_cmdqv_deinit_vintf(cmdqv, vintf->idx); -free_vintf: - kfree(vintf); -out_fallback: - dev_info(smmu->impl_dev, "Falling back to standard SMMU CMDQ\n"); - smmu->options &= ~ARM_SMMU_OPT_TEGRA241_CMDQV; - tegra241_cmdqv_remove(smmu); - return 0; } #ifdef CONFIG_IOMMU_DEBUGFS diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index c666ecab955d2..7465dbb6fa80c 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -832,7 +832,7 @@ static int __maybe_unused exynos_sysmmu_suspend(struct device *dev) struct exynos_iommu_owner *owner = dev_iommu_priv_get(master); mutex_lock(&owner->rpm_lock); - if (&data->domain->domain != &exynos_identity_domain) { + if (data->domain) { dev_dbg(data->sysmmu, "saving state\n"); __sysmmu_disable(data); } @@ -850,7 +850,7 @@ static int __maybe_unused exynos_sysmmu_resume(struct device *dev) struct exynos_iommu_owner *owner = dev_iommu_priv_get(master); mutex_lock(&owner->rpm_lock); - if (&data->domain->domain != &exynos_identity_domain) { + if (data->domain) { dev_dbg(data->sysmmu, "restoring state\n"); __sysmmu_enable(data); } diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c index eaf862e8dea1a..7f553f7aa3cb3 100644 --- a/drivers/iommu/intel/dmar.c +++ b/drivers/iommu/intel/dmar.c @@ -2056,6 +2056,7 @@ int enable_drhd_fault_handling(unsigned int cpu) /* * Enable fault control interrupt. */ + guard(rwsem_read)(&dmar_global_lock); for_each_iommu(iommu, drhd) { u32 fault_status; int ret; diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index cc23cfcdeb2d5..038a66388564a 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -3174,6 +3174,7 @@ static int __init probe_acpi_namespace_devices(void) if (dev->bus != &acpi_bus_type) continue; + up_read(&dmar_global_lock); adev = to_acpi_device(dev); mutex_lock(&adev->physical_node_lock); list_for_each_entry(pn, @@ -3183,6 +3184,7 @@ static int __init probe_acpi_namespace_devices(void) break; } mutex_unlock(&adev->physical_node_lock); + down_read(&dmar_global_lock); if (ret) return ret; @@ -3307,7 +3309,14 @@ int __init intel_iommu_init(void) iommu_device_sysfs_add(&iommu->iommu, NULL, intel_iommu_groups, "%s", iommu->name); + /* + * The iommu device probe is protected by the iommu_probe_device_lock. + * Release the dmar_global_lock before entering the device probe path + * to avoid unnecessary lock order splat. + */ + up_read(&dmar_global_lock); iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL); + down_read(&dmar_global_lock); iommu_pmu_register(iommu); } @@ -4547,9 +4556,6 @@ static int context_setup_pass_through_cb(struct pci_dev *pdev, u16 alias, void * { struct device *dev = data; - if (dev != &pdev->dev) - return 0; - return context_setup_pass_through(dev, PCI_BUS_NUM(alias), alias & 0xff); } diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c index 7a6d188e3bea0..71b3383b7115c 100644 --- a/drivers/iommu/intel/irq_remapping.c +++ b/drivers/iommu/intel/irq_remapping.c @@ -26,11 +26,6 @@ #include "../iommu-pages.h" #include "cap_audit.h" -enum irq_mode { - IRQ_REMAPPING, - IRQ_POSTING, -}; - struct ioapic_scope { struct intel_iommu *iommu; unsigned int id; @@ -50,8 +45,8 @@ struct irq_2_iommu { u16 irte_index; u16 sub_handle; u8 irte_mask; - enum irq_mode mode; bool posted_msi; + bool posted_vcpu; }; struct intel_ir_data { @@ -139,7 +134,6 @@ static int alloc_irte(struct intel_iommu *iommu, irq_iommu->irte_index = index; irq_iommu->sub_handle = 0; irq_iommu->irte_mask = mask; - irq_iommu->mode = IRQ_REMAPPING; } raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); @@ -194,8 +188,6 @@ static int modify_irte(struct irq_2_iommu *irq_iommu, rc = qi_flush_iec(iommu, index, 0); - /* Update iommu mode according to the IRTE mode */ - irq_iommu->mode = irte->pst ? IRQ_POSTING : IRQ_REMAPPING; raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); return rc; @@ -1173,7 +1165,26 @@ static void intel_ir_reconfigure_irte_posted(struct irq_data *irqd) static inline void intel_ir_reconfigure_irte_posted(struct irq_data *irqd) {} #endif -static void intel_ir_reconfigure_irte(struct irq_data *irqd, bool force) +static void __intel_ir_reconfigure_irte(struct irq_data *irqd, bool force_host) +{ + struct intel_ir_data *ir_data = irqd->chip_data; + + /* + * Don't modify IRTEs for IRQs that are being posted to vCPUs if the + * host CPU affinity changes. + */ + if (ir_data->irq_2_iommu.posted_vcpu && !force_host) + return; + + ir_data->irq_2_iommu.posted_vcpu = false; + + if (ir_data->irq_2_iommu.posted_msi) + intel_ir_reconfigure_irte_posted(irqd); + else + modify_irte(&ir_data->irq_2_iommu, &ir_data->irte_entry); +} + +static void intel_ir_reconfigure_irte(struct irq_data *irqd, bool force_host) { struct intel_ir_data *ir_data = irqd->chip_data; struct irte *irte = &ir_data->irte_entry; @@ -1186,10 +1197,7 @@ static void intel_ir_reconfigure_irte(struct irq_data *irqd, bool force) irte->vector = cfg->vector; irte->dest_id = IRTE_DEST(cfg->dest_apicid); - if (ir_data->irq_2_iommu.posted_msi) - intel_ir_reconfigure_irte_posted(irqd); - else if (force || ir_data->irq_2_iommu.mode == IRQ_REMAPPING) - modify_irte(&ir_data->irq_2_iommu, irte); + __intel_ir_reconfigure_irte(irqd, force_host); } /* @@ -1244,7 +1252,7 @@ static int intel_ir_set_vcpu_affinity(struct irq_data *data, void *info) /* stop posting interrupts, back to the default mode */ if (!vcpu_pi_info) { - modify_irte(&ir_data->irq_2_iommu, &ir_data->irte_entry); + __intel_ir_reconfigure_irte(data, true); } else { struct irte irte_pi; @@ -1267,6 +1275,7 @@ static int intel_ir_set_vcpu_affinity(struct irq_data *data, void *info) irte_pi.pda_h = (vcpu_pi_info->pi_desc_addr >> 32) & ~(-1UL << PDA_HIGH_BIT); + ir_data->irq_2_iommu.posted_vcpu = true; modify_irte(&ir_data->irq_2_iommu, &irte_pi); } @@ -1282,43 +1291,44 @@ static struct irq_chip intel_ir_chip = { }; /* - * With posted MSIs, all vectors are multiplexed into a single notification - * vector. Devices MSIs are then dispatched in a demux loop where - * EOIs can be coalesced as well. + * With posted MSIs, the MSI vectors are multiplexed into a single notification + * vector, and only the notification vector is sent to the APIC IRR. Device + * MSIs are then dispatched in a demux loop that harvests the MSIs from the + * CPU's Posted Interrupt Request bitmap. I.e. Posted MSIs never get sent to + * the APIC IRR, and thus do not need an EOI. The notification handler instead + * performs a single EOI after processing the PIR. * - * "INTEL-IR-POST" IRQ chip does not do EOI on ACK, thus the dummy irq_ack() - * function. Instead EOI is performed by the posted interrupt notification - * handler. + * Note! Pending SMP/CPU affinity changes, which are per MSI, must still be + * honored, only the APIC EOI is omitted. * * For the example below, 3 MSIs are coalesced into one CPU notification. Only - * one apic_eoi() is needed. + * one apic_eoi() is needed, but each MSI needs to process pending changes to + * its CPU affinity. * * __sysvec_posted_msi_notification() * irq_enter(); * handle_edge_irq() * irq_chip_ack_parent() - * dummy(); // No EOI + * irq_move_irq(); // No EOI * handle_irq_event() * driver_handler() * handle_edge_irq() * irq_chip_ack_parent() - * dummy(); // No EOI + * irq_move_irq(); // No EOI * handle_irq_event() * driver_handler() * handle_edge_irq() * irq_chip_ack_parent() - * dummy(); // No EOI + * irq_move_irq(); // No EOI * handle_irq_event() * driver_handler() * apic_eoi() * irq_exit() + * */ - -static void dummy_ack(struct irq_data *d) { } - static struct irq_chip intel_ir_chip_post_msi = { .name = "INTEL-IR-POST", - .irq_ack = dummy_ack, + .irq_ack = irq_move_irq, .irq_set_affinity = intel_ir_set_affinity, .irq_compose_msi_msg = intel_ir_compose_msi_msg, .irq_set_vcpu_affinity = intel_ir_set_vcpu_affinity, @@ -1494,6 +1504,9 @@ static void intel_irq_remapping_deactivate(struct irq_domain *domain, struct intel_ir_data *data = irq_data->chip_data; struct irte entry; + WARN_ON_ONCE(data->irq_2_iommu.posted_vcpu); + data->irq_2_iommu.posted_vcpu = false; + memset(&entry, 0, sizeof(entry)); modify_irte(&data->irq_2_iommu, &entry); } diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c index 5fd3dd4202901..3fd8920e79ffb 100644 --- a/drivers/iommu/iommufd/device.c +++ b/drivers/iommu/iommufd/device.c @@ -352,6 +352,122 @@ iommufd_device_attach_reserved_iova(struct iommufd_device *idev, return 0; } +/* The device attach/detach/replace helpers for attach_handle */ + +/* Check if idev is attached to igroup->hwpt */ +static bool iommufd_device_is_attached(struct iommufd_device *idev) +{ + struct iommufd_device *cur; + + list_for_each_entry(cur, &idev->igroup->device_list, group_item) + if (cur == idev) + return true; + return false; +} + +static int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt, + struct iommufd_device *idev) +{ + struct iommufd_attach_handle *handle; + int rc; + + lockdep_assert_held(&idev->igroup->lock); + + handle = kzalloc(sizeof(*handle), GFP_KERNEL); + if (!handle) + return -ENOMEM; + + if (hwpt->fault) { + rc = iommufd_fault_iopf_enable(idev); + if (rc) + goto out_free_handle; + } + + handle->idev = idev; + rc = iommu_attach_group_handle(hwpt->domain, idev->igroup->group, + &handle->handle); + if (rc) + goto out_disable_iopf; + + return 0; + +out_disable_iopf: + if (hwpt->fault) + iommufd_fault_iopf_disable(idev); +out_free_handle: + kfree(handle); + return rc; +} + +static struct iommufd_attach_handle * +iommufd_device_get_attach_handle(struct iommufd_device *idev) +{ + struct iommu_attach_handle *handle; + + lockdep_assert_held(&idev->igroup->lock); + + handle = + iommu_attach_handle_get(idev->igroup->group, IOMMU_NO_PASID, 0); + if (IS_ERR(handle)) + return NULL; + return to_iommufd_handle(handle); +} + +static void iommufd_hwpt_detach_device(struct iommufd_hw_pagetable *hwpt, + struct iommufd_device *idev) +{ + struct iommufd_attach_handle *handle; + + handle = iommufd_device_get_attach_handle(idev); + iommu_detach_group_handle(hwpt->domain, idev->igroup->group); + if (hwpt->fault) { + iommufd_auto_response_faults(hwpt, handle); + iommufd_fault_iopf_disable(idev); + } + kfree(handle); +} + +static int iommufd_hwpt_replace_device(struct iommufd_device *idev, + struct iommufd_hw_pagetable *hwpt, + struct iommufd_hw_pagetable *old) +{ + struct iommufd_attach_handle *handle, *old_handle = + iommufd_device_get_attach_handle(idev); + int rc; + + handle = kzalloc(sizeof(*handle), GFP_KERNEL); + if (!handle) + return -ENOMEM; + + if (hwpt->fault && !old->fault) { + rc = iommufd_fault_iopf_enable(idev); + if (rc) + goto out_free_handle; + } + + handle->idev = idev; + rc = iommu_replace_group_handle(idev->igroup->group, hwpt->domain, + &handle->handle); + if (rc) + goto out_disable_iopf; + + if (old->fault) { + iommufd_auto_response_faults(hwpt, old_handle); + if (!hwpt->fault) + iommufd_fault_iopf_disable(idev); + } + kfree(old_handle); + + return 0; + +out_disable_iopf: + if (hwpt->fault && !old->fault) + iommufd_fault_iopf_disable(idev); +out_free_handle: + kfree(handle); + return rc; +} + int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt, struct iommufd_device *idev) { @@ -488,6 +604,11 @@ iommufd_device_do_replace(struct iommufd_device *idev, goto err_unlock; } + if (!iommufd_device_is_attached(idev)) { + rc = -EINVAL; + goto err_unlock; + } + if (hwpt == igroup->hwpt) { mutex_unlock(&idev->igroup->lock); return NULL; @@ -1127,7 +1248,7 @@ int iommufd_access_rw(struct iommufd_access *access, unsigned long iova, struct io_pagetable *iopt; struct iopt_area *area; unsigned long last_iova; - int rc; + int rc = -EINVAL; if (!length) return -EINVAL; diff --git a/drivers/iommu/iommufd/fault.c b/drivers/iommu/iommufd/fault.c index 95e2e99ab2724..1b0812f8bf840 100644 --- a/drivers/iommu/iommufd/fault.c +++ b/drivers/iommu/iommufd/fault.c @@ -16,7 +16,7 @@ #include "../iommu-priv.h" #include "iommufd_private.h" -static int iommufd_fault_iopf_enable(struct iommufd_device *idev) +int iommufd_fault_iopf_enable(struct iommufd_device *idev) { struct device *dev = idev->dev; int ret; @@ -45,7 +45,7 @@ static int iommufd_fault_iopf_enable(struct iommufd_device *idev) return ret; } -static void iommufd_fault_iopf_disable(struct iommufd_device *idev) +void iommufd_fault_iopf_disable(struct iommufd_device *idev) { mutex_lock(&idev->iopf_lock); if (!WARN_ON(idev->iopf_enabled == 0)) { @@ -93,8 +93,8 @@ int iommufd_fault_domain_attach_dev(struct iommufd_hw_pagetable *hwpt, return ret; } -static void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt, - struct iommufd_attach_handle *handle) +void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt, + struct iommufd_attach_handle *handle) { struct iommufd_fault *fault = hwpt->fault; struct iopf_group *group, *next; diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h index c1f82cb682425..18cdf1391a034 100644 --- a/drivers/iommu/iommufd/iommufd_private.h +++ b/drivers/iommu/iommufd/iommufd_private.h @@ -523,35 +523,10 @@ int iommufd_fault_domain_replace_dev(struct iommufd_device *idev, struct iommufd_hw_pagetable *hwpt, struct iommufd_hw_pagetable *old); -static inline int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt, - struct iommufd_device *idev) -{ - if (hwpt->fault) - return iommufd_fault_domain_attach_dev(hwpt, idev); - - return iommu_attach_group(hwpt->domain, idev->igroup->group); -} - -static inline void iommufd_hwpt_detach_device(struct iommufd_hw_pagetable *hwpt, - struct iommufd_device *idev) -{ - if (hwpt->fault) { - iommufd_fault_domain_detach_dev(hwpt, idev); - return; - } - - iommu_detach_group(hwpt->domain, idev->igroup->group); -} - -static inline int iommufd_hwpt_replace_device(struct iommufd_device *idev, - struct iommufd_hw_pagetable *hwpt, - struct iommufd_hw_pagetable *old) -{ - if (old->fault || hwpt->fault) - return iommufd_fault_domain_replace_dev(idev, hwpt, old); - - return iommu_group_replace_domain(idev->igroup->group, hwpt->domain); -} +int iommufd_fault_iopf_enable(struct iommufd_device *idev); +void iommufd_fault_iopf_disable(struct iommufd_device *idev); +void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt, + struct iommufd_attach_handle *handle); #ifdef CONFIG_IOMMUFD_TEST int iommufd_test(struct iommufd_ucmd *ucmd); diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 6a2707fe7a78c..32deab732209e 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -1371,15 +1371,6 @@ static int mtk_iommu_probe(struct platform_device *pdev) platform_set_drvdata(pdev, data); mutex_init(&data->mutex); - ret = iommu_device_sysfs_add(&data->iommu, dev, NULL, - "mtk-iommu.%pa", &ioaddr); - if (ret) - goto out_link_remove; - - ret = iommu_device_register(&data->iommu, &mtk_iommu_ops, dev); - if (ret) - goto out_sysfs_remove; - if (MTK_IOMMU_HAS_FLAG(data->plat_data, SHARE_PGTABLE)) { list_add_tail(&data->list, data->plat_data->hw_list); data->hw_list = data->plat_data->hw_list; @@ -1389,19 +1380,28 @@ static int mtk_iommu_probe(struct platform_device *pdev) data->hw_list = &data->hw_list_head; } + ret = iommu_device_sysfs_add(&data->iommu, dev, NULL, + "mtk-iommu.%pa", &ioaddr); + if (ret) + goto out_list_del; + + ret = iommu_device_register(&data->iommu, &mtk_iommu_ops, dev); + if (ret) + goto out_sysfs_remove; + if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) { ret = component_master_add_with_match(dev, &mtk_iommu_com_ops, match); if (ret) - goto out_list_del; + goto out_device_unregister; } return ret; -out_list_del: - list_del(&data->list); +out_device_unregister: iommu_device_unregister(&data->iommu); out_sysfs_remove: iommu_device_sysfs_remove(&data->iommu); -out_link_remove: +out_list_del: + list_del(&data->list); if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) device_link_remove(data->smicomm_dev, dev); out_runtime_disable: diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 8fdee511bc0f2..cf469a6724972 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -44,6 +44,7 @@ static u8 dist_prio_nmi __ro_after_init = GICV3_PRIO_NMI; #define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0) #define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1) #define FLAGS_WORKAROUND_ASR_ERRATUM_8601001 (1ULL << 2) +#define FLAGS_WORKAROUND_INSECURE (1ULL << 3) #define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1) @@ -83,6 +84,8 @@ static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key); #define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U) #define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer) +static bool nmi_support_forbidden; + /* * There are 16 SGIs, though we only actually use 8 in Linux. The other 8 SGIs * are potentially stolen by the secure side. Some code, especially code dealing @@ -163,21 +166,27 @@ static void __init gic_prio_init(void) { bool ds; - ds = gic_dist_security_disabled(); - if (!ds) { - u32 val; - - val = readl_relaxed(gic_data.dist_base + GICD_CTLR); - val |= GICD_CTLR_DS; - writel_relaxed(val, gic_data.dist_base + GICD_CTLR); + cpus_have_group0 = gic_has_group0(); - ds = gic_dist_security_disabled(); - if (ds) - pr_warn("Broken GIC integration, security disabled"); + ds = gic_dist_security_disabled(); + if ((gic_data.flags & FLAGS_WORKAROUND_INSECURE) && !ds) { + if (cpus_have_group0) { + u32 val; + + val = readl_relaxed(gic_data.dist_base + GICD_CTLR); + val |= GICD_CTLR_DS; + writel_relaxed(val, gic_data.dist_base + GICD_CTLR); + + ds = gic_dist_security_disabled(); + if (ds) + pr_warn("Broken GIC integration, security disabled\n"); + } else { + pr_warn("Broken GIC integration, pNMI forbidden\n"); + nmi_support_forbidden = true; + } } cpus_have_security_disabled = ds; - cpus_have_group0 = gic_has_group0(); /* * How priority values are used by the GIC depends on two things: @@ -209,7 +218,7 @@ static void __init gic_prio_init(void) * be in the non-secure range, we program the non-secure values into * the distributor to match the PMR values we want. */ - if (cpus_have_group0 & !cpus_have_security_disabled) { + if (cpus_have_group0 && !cpus_have_security_disabled) { dist_prio_irq = __gicv3_prio_to_ns(dist_prio_irq); dist_prio_nmi = __gicv3_prio_to_ns(dist_prio_nmi); } @@ -1922,6 +1931,18 @@ static bool gic_enable_quirk_arm64_2941627(void *data) return true; } +static bool gic_enable_quirk_rk3399(void *data) +{ + struct gic_chip_data *d = data; + + if (of_machine_is_compatible("rockchip,rk3399")) { + d->flags |= FLAGS_WORKAROUND_INSECURE; + return true; + } + + return false; +} + static bool rd_set_non_coherent(void *data) { struct gic_chip_data *d = data; @@ -1996,6 +2017,12 @@ static const struct gic_quirk gic_quirks[] = { .property = "dma-noncoherent", .init = rd_set_non_coherent, }, + { + .desc = "GICv3: Insecure RK3399 integration", + .iidr = 0x0000043b, + .mask = 0xff000fff, + .init = gic_enable_quirk_rk3399, + }, { } }; @@ -2004,7 +2031,7 @@ static void gic_enable_nmi_support(void) { int i; - if (!gic_prio_masking_enabled()) + if (!gic_prio_masking_enabled() || nmi_support_forbidden) return; rdist_nmi_refs = kcalloc(gic_data.ppi_nr + SGI_NR, diff --git a/drivers/irqchip/irq-jcore-aic.c b/drivers/irqchip/irq-jcore-aic.c index b9dcc8e78c750..1f613eb7b7f03 100644 --- a/drivers/irqchip/irq-jcore-aic.c +++ b/drivers/irqchip/irq-jcore-aic.c @@ -38,7 +38,7 @@ static struct irq_chip jcore_aic; static void handle_jcore_irq(struct irq_desc *desc) { if (irqd_is_per_cpu(irq_desc_get_irq_data(desc))) - handle_percpu_irq(desc); + handle_percpu_devid_irq(desc); else handle_simple_irq(desc); } diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c index 001c290bc07b7..cda0995b16798 100644 --- a/drivers/leds/led-core.c +++ b/drivers/leds/led-core.c @@ -159,8 +159,19 @@ static void set_brightness_delayed(struct work_struct *ws) * before this work item runs once. To make sure this works properly * handle LED_SET_BRIGHTNESS_OFF first. */ - if (test_and_clear_bit(LED_SET_BRIGHTNESS_OFF, &led_cdev->work_flags)) + if (test_and_clear_bit(LED_SET_BRIGHTNESS_OFF, &led_cdev->work_flags)) { set_brightness_delayed_set_brightness(led_cdev, LED_OFF); + /* + * The consecutives led_set_brightness(LED_OFF), + * led_set_brightness(LED_FULL) could have been executed out of + * order (LED_FULL first), if the work_flags has been set + * between LED_SET_BRIGHTNESS_OFF and LED_SET_BRIGHTNESS of this + * work. To avoid ending with the LED turned off, turn the LED + * on again. + */ + if (led_cdev->delayed_set_value != LED_OFF) + set_bit(LED_SET_BRIGHTNESS, &led_cdev->work_flags); + } if (test_and_clear_bit(LED_SET_BRIGHTNESS, &led_cdev->work_flags)) set_brightness_delayed_set_brightness(led_cdev, led_cdev->delayed_set_value); @@ -331,10 +342,13 @@ void led_set_brightness_nopm(struct led_classdev *led_cdev, unsigned int value) * change is done immediately afterwards (before the work runs), * it uses a separate work_flag. */ - if (value) { - led_cdev->delayed_set_value = value; + led_cdev->delayed_set_value = value; + /* Ensure delayed_set_value is seen before work_flags modification */ + smp_mb__before_atomic(); + + if (value) set_bit(LED_SET_BRIGHTNESS, &led_cdev->work_flags); - } else { + else { clear_bit(LED_SET_BRIGHTNESS, &led_cdev->work_flags); clear_bit(LED_SET_BLINK, &led_cdev->work_flags); set_bit(LED_SET_BRIGHTNESS_OFF, &led_cdev->work_flags); diff --git a/drivers/leds/rgb/leds-qcom-lpg.c b/drivers/leds/rgb/leds-qcom-lpg.c index f3c9ef2bfa572..5d8e27e2e7ae7 100644 --- a/drivers/leds/rgb/leds-qcom-lpg.c +++ b/drivers/leds/rgb/leds-qcom-lpg.c @@ -461,7 +461,7 @@ static int lpg_calc_freq(struct lpg_channel *chan, uint64_t period) max_res = LPG_RESOLUTION_9BIT; } - min_period = div64_u64((u64)NSEC_PER_SEC * (1 << pwm_resolution_arr[0]), + min_period = div64_u64((u64)NSEC_PER_SEC * ((1 << pwm_resolution_arr[0]) - 1), clk_rate_arr[clk_len - 1]); if (period <= min_period) return -EINVAL; @@ -482,7 +482,7 @@ static int lpg_calc_freq(struct lpg_channel *chan, uint64_t period) */ for (i = 0; i < pwm_resolution_count; i++) { - resolution = 1 << pwm_resolution_arr[i]; + resolution = (1 << pwm_resolution_arr[i]) - 1; for (clk_sel = 1; clk_sel < clk_len; clk_sel++) { u64 numerator = period * clk_rate_arr[clk_sel]; @@ -529,7 +529,7 @@ static void lpg_calc_duty(struct lpg_channel *chan, uint64_t duty) unsigned int clk_rate; if (chan->subtype == LPG_SUBTYPE_HI_RES_PWM) { - max = LPG_RESOLUTION_15BIT - 1; + max = BIT(lpg_pwm_resolution_hi_res[chan->pwm_resolution_sel]) - 1; clk_rate = lpg_clk_rates_hi_res[chan->clk_sel]; } else { max = LPG_RESOLUTION_9BIT - 1; @@ -1291,7 +1291,7 @@ static int lpg_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm, if (ret) return ret; - state->period = DIV_ROUND_UP_ULL((u64)NSEC_PER_SEC * (1 << resolution) * + state->period = DIV_ROUND_UP_ULL((u64)NSEC_PER_SEC * ((1 << resolution) - 1) * pre_div * (1 << m), refclk); state->duty_cycle = DIV_ROUND_UP_ULL((u64)NSEC_PER_SEC * pwm_value * pre_div * (1 << m), refclk); } else { diff --git a/drivers/mailbox/tegra-hsp.c b/drivers/mailbox/tegra-hsp.c index 46c921000a34c..76f54f8b6b6c5 100644 --- a/drivers/mailbox/tegra-hsp.c +++ b/drivers/mailbox/tegra-hsp.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2016-2023, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2025, NVIDIA CORPORATION. All rights reserved. */ #include @@ -28,12 +28,6 @@ #define HSP_INT_FULL_MASK 0xff #define HSP_INT_DIMENSIONING 0x380 -#define HSP_nSM_SHIFT 0 -#define HSP_nSS_SHIFT 4 -#define HSP_nAS_SHIFT 8 -#define HSP_nDB_SHIFT 12 -#define HSP_nSI_SHIFT 16 -#define HSP_nINT_MASK 0xf #define HSP_DB_TRIGGER 0x0 #define HSP_DB_ENABLE 0x4 @@ -97,6 +91,20 @@ struct tegra_hsp_soc { bool has_per_mb_ie; bool has_128_bit_mb; unsigned int reg_stride; + + /* Shifts for dimensioning register. */ + unsigned int si_shift; + unsigned int db_shift; + unsigned int as_shift; + unsigned int ss_shift; + unsigned int sm_shift; + + /* Masks for dimensioning register. */ + unsigned int si_mask; + unsigned int db_mask; + unsigned int as_mask; + unsigned int ss_mask; + unsigned int sm_mask; }; struct tegra_hsp { @@ -747,11 +755,11 @@ static int tegra_hsp_probe(struct platform_device *pdev) return PTR_ERR(hsp->regs); value = tegra_hsp_readl(hsp, HSP_INT_DIMENSIONING); - hsp->num_sm = (value >> HSP_nSM_SHIFT) & HSP_nINT_MASK; - hsp->num_ss = (value >> HSP_nSS_SHIFT) & HSP_nINT_MASK; - hsp->num_as = (value >> HSP_nAS_SHIFT) & HSP_nINT_MASK; - hsp->num_db = (value >> HSP_nDB_SHIFT) & HSP_nINT_MASK; - hsp->num_si = (value >> HSP_nSI_SHIFT) & HSP_nINT_MASK; + hsp->num_sm = (value >> hsp->soc->sm_shift) & hsp->soc->sm_mask; + hsp->num_ss = (value >> hsp->soc->ss_shift) & hsp->soc->ss_mask; + hsp->num_as = (value >> hsp->soc->as_shift) & hsp->soc->as_mask; + hsp->num_db = (value >> hsp->soc->db_shift) & hsp->soc->db_mask; + hsp->num_si = (value >> hsp->soc->si_shift) & hsp->soc->si_mask; err = platform_get_irq_byname_optional(pdev, "doorbell"); if (err >= 0) @@ -915,6 +923,16 @@ static const struct tegra_hsp_soc tegra186_hsp_soc = { .has_per_mb_ie = false, .has_128_bit_mb = false, .reg_stride = 0x100, + .si_shift = 16, + .db_shift = 12, + .as_shift = 8, + .ss_shift = 4, + .sm_shift = 0, + .si_mask = 0xf, + .db_mask = 0xf, + .as_mask = 0xf, + .ss_mask = 0xf, + .sm_mask = 0xf, }; static const struct tegra_hsp_soc tegra194_hsp_soc = { @@ -922,6 +940,16 @@ static const struct tegra_hsp_soc tegra194_hsp_soc = { .has_per_mb_ie = true, .has_128_bit_mb = false, .reg_stride = 0x100, + .si_shift = 16, + .db_shift = 12, + .as_shift = 8, + .ss_shift = 4, + .sm_shift = 0, + .si_mask = 0xf, + .db_mask = 0xf, + .as_mask = 0xf, + .ss_mask = 0xf, + .sm_mask = 0xf, }; static const struct tegra_hsp_soc tegra234_hsp_soc = { @@ -929,6 +957,16 @@ static const struct tegra_hsp_soc tegra234_hsp_soc = { .has_per_mb_ie = false, .has_128_bit_mb = true, .reg_stride = 0x100, + .si_shift = 16, + .db_shift = 12, + .as_shift = 8, + .ss_shift = 4, + .sm_shift = 0, + .si_mask = 0xf, + .db_mask = 0xf, + .as_mask = 0xf, + .ss_mask = 0xf, + .sm_mask = 0xf, }; static const struct tegra_hsp_soc tegra264_hsp_soc = { @@ -936,6 +974,16 @@ static const struct tegra_hsp_soc tegra264_hsp_soc = { .has_per_mb_ie = false, .has_128_bit_mb = true, .reg_stride = 0x1000, + .si_shift = 17, + .db_shift = 12, + .as_shift = 8, + .ss_shift = 4, + .sm_shift = 0, + .si_mask = 0x1f, + .db_mask = 0x1f, + .as_mask = 0xf, + .ss_mask = 0xf, + .sm_mask = 0xf, }; static const struct of_device_id tegra_hsp_match[] = { diff --git a/drivers/md/dm-ebs-target.c b/drivers/md/dm-ebs-target.c index 18ae45dcbfb28..b19b0142a690a 100644 --- a/drivers/md/dm-ebs-target.c +++ b/drivers/md/dm-ebs-target.c @@ -390,6 +390,12 @@ static int ebs_map(struct dm_target *ti, struct bio *bio) return DM_MAPIO_REMAPPED; } +static void ebs_postsuspend(struct dm_target *ti) +{ + struct ebs_c *ec = ti->private; + dm_bufio_client_reset(ec->bufio); +} + static void ebs_status(struct dm_target *ti, status_type_t type, unsigned int status_flags, char *result, unsigned int maxlen) { @@ -447,6 +453,7 @@ static struct target_type ebs_target = { .ctr = ebs_ctr, .dtr = ebs_dtr, .map = ebs_map, + .postsuspend = ebs_postsuspend, .status = ebs_status, .io_hints = ebs_io_hints, .prepare_ioctl = ebs_prepare_ioctl, diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index 731467d4ed101..b690905ab89ff 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c @@ -426,7 +426,7 @@ static struct bio *clone_bio(struct dm_target *ti, struct flakey_c *fc, struct b if (!clone) return NULL; - bio_init(clone, fc->dev->bdev, bio->bi_inline_vecs, nr_iovecs, bio->bi_opf); + bio_init(clone, fc->dev->bdev, clone->bi_inline_vecs, nr_iovecs, bio->bi_opf); clone->bi_iter.bi_sector = flakey_map_sector(ti, bio->bi_iter.bi_sector); clone->bi_private = bio; diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index ee9f7cecd78e0..b35b779b17044 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -516,7 +517,7 @@ static int sb_mac(struct dm_integrity_c *ic, bool wr) dm_integrity_io_error(ic, "crypto_shash_digest", r); return r; } - if (memcmp(mac, actual_mac, mac_size)) { + if (crypto_memneq(mac, actual_mac, mac_size)) { dm_integrity_io_error(ic, "superblock mac", -EILSEQ); dm_audit_log_target(DM_MSG_PREFIX, "mac-superblock", ic->ti, 0); return -EILSEQ; @@ -859,7 +860,7 @@ static void rw_section_mac(struct dm_integrity_c *ic, unsigned int section, bool if (likely(wr)) memcpy(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR); else { - if (memcmp(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR)) { + if (crypto_memneq(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR)) { dm_integrity_io_error(ic, "journal mac", -EILSEQ); dm_audit_log_target(DM_MSG_PREFIX, "mac-journal", ic->ti, 0); } @@ -1401,10 +1402,9 @@ static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_ static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block, unsigned int *metadata_offset, unsigned int total_size, int op) { -#define MAY_BE_FILLER 1 -#define MAY_BE_HASH 2 unsigned int hash_offset = 0; - unsigned int may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0); + unsigned char mismatch_hash = 0; + unsigned char mismatch_filler = !ic->discard; do { unsigned char *data, *dp; @@ -1425,7 +1425,7 @@ static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, se if (op == TAG_READ) { memcpy(tag, dp, to_copy); } else if (op == TAG_WRITE) { - if (memcmp(dp, tag, to_copy)) { + if (crypto_memneq(dp, tag, to_copy)) { memcpy(dp, tag, to_copy); dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy); } @@ -1433,29 +1433,30 @@ static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, se /* e.g.: op == TAG_CMP */ if (likely(is_power_of_2(ic->tag_size))) { - if (unlikely(memcmp(dp, tag, to_copy))) - if (unlikely(!ic->discard) || - unlikely(memchr_inv(dp, DISCARD_FILLER, to_copy) != NULL)) { - goto thorough_test; - } + if (unlikely(crypto_memneq(dp, tag, to_copy))) + goto thorough_test; } else { unsigned int i, ts; thorough_test: ts = total_size; for (i = 0; i < to_copy; i++, ts--) { - if (unlikely(dp[i] != tag[i])) - may_be &= ~MAY_BE_HASH; - if (likely(dp[i] != DISCARD_FILLER)) - may_be &= ~MAY_BE_FILLER; + /* + * Warning: the control flow must not be + * dependent on match/mismatch of + * individual bytes. + */ + mismatch_hash |= dp[i] ^ tag[i]; + mismatch_filler |= dp[i] ^ DISCARD_FILLER; hash_offset++; if (unlikely(hash_offset == ic->tag_size)) { - if (unlikely(!may_be)) { + if (unlikely(mismatch_hash) && unlikely(mismatch_filler)) { dm_bufio_release(b); return ts; } hash_offset = 0; - may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0); + mismatch_hash = 0; + mismatch_filler = !ic->discard; } } } @@ -1476,8 +1477,6 @@ static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, se } while (unlikely(total_size)); return 0; -#undef MAY_BE_FILLER -#undef MAY_BE_HASH } struct flush_request { @@ -2076,7 +2075,7 @@ static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio, char checksums_onstack[MAX_T(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)]; integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack); - if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) { + if (unlikely(crypto_memneq(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) { DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx", logical_sector); dm_audit_log_bio(DM_MSG_PREFIX, "journal-checksum", @@ -2595,7 +2594,7 @@ static void dm_integrity_inline_recheck(struct work_struct *w) bio_put(outgoing_bio); integrity_sector_checksum(ic, dio->bio_details.bi_iter.bi_sector, outgoing_data, digest); - if (unlikely(memcmp(digest, dio->integrity_payload, min(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)))) { + if (unlikely(crypto_memneq(digest, dio->integrity_payload, min(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)))) { DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx", ic->dev->bdev, dio->bio_details.bi_iter.bi_sector); atomic64_inc(&ic->number_of_mismatches); @@ -2634,7 +2633,7 @@ static int dm_integrity_end_io(struct dm_target *ti, struct bio *bio, blk_status char *mem = bvec_kmap_local(&bv); //memset(mem, 0xff, ic->sectors_per_block << SECTOR_SHIFT); integrity_sector_checksum(ic, dio->bio_details.bi_iter.bi_sector, mem, digest); - if (unlikely(memcmp(digest, dio->integrity_payload + pos, + if (unlikely(crypto_memneq(digest, dio->integrity_payload + pos, min(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)))) { kunmap_local(mem); dm_integrity_free_payload(dio); @@ -2911,7 +2910,7 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned int write_start integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block), (char *)access_journal_data(ic, i, l), test_tag); - if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size))) { + if (unlikely(crypto_memneq(test_tag, journal_entry_tag(ic, je2), ic->tag_size))) { dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ); dm_audit_log_target(DM_MSG_PREFIX, "integrity-replay-journal", ic->ti, 0); } @@ -3790,10 +3789,6 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type, break; case STATUSTYPE_TABLE: { - __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100; - - watermark_percentage += ic->journal_entries / 2; - do_div(watermark_percentage, ic->journal_entries); arg_count = 3; arg_count += !!ic->meta_dev; arg_count += ic->sectors_per_block != 1; @@ -3826,6 +3821,10 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type, DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors); DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors); if (ic->mode == 'J') { + __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100; + + watermark_percentage += ic->journal_entries / 2; + do_div(watermark_percentage, ic->journal_entries); DMEMIT(" journal_watermark:%u", (unsigned int)watermark_percentage); DMEMIT(" commit_time:%u", ic->autocommit_msec); } @@ -5081,16 +5080,19 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv ic->recalc_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages); if (!ic->recalc_bitmap) { + ti->error = "Could not allocate memory for bitmap"; r = -ENOMEM; goto bad; } ic->may_write_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages); if (!ic->may_write_bitmap) { + ti->error = "Could not allocate memory for bitmap"; r = -ENOMEM; goto bad; } ic->bbs = kvmalloc_array(ic->n_bitmap_blocks, sizeof(struct bitmap_block_status), GFP_KERNEL); if (!ic->bbs) { + ti->error = "Could not allocate memory for bitmap"; r = -ENOMEM; goto bad; } diff --git a/drivers/md/dm-vdo/dedupe.c b/drivers/md/dm-vdo/dedupe.c index 80628ae93fbac..5a74b3a85ec43 100644 --- a/drivers/md/dm-vdo/dedupe.c +++ b/drivers/md/dm-vdo/dedupe.c @@ -2178,6 +2178,7 @@ static int initialize_index(struct vdo *vdo, struct hash_zones *zones) vdo_set_dedupe_index_timeout_interval(vdo_dedupe_index_timeout_interval); vdo_set_dedupe_index_min_timer_interval(vdo_dedupe_index_min_timer_interval); + spin_lock_init(&zones->lock); /* * Since we will save up the timeouts that would have been reported but were ratelimited, diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index c142ec5458b70..53ba0fbdf495c 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -796,6 +796,13 @@ static int verity_map(struct dm_target *ti, struct bio *bio) return DM_MAPIO_SUBMITTED; } +static void verity_postsuspend(struct dm_target *ti) +{ + struct dm_verity *v = ti->private; + flush_workqueue(v->verify_wq); + dm_bufio_client_reset(v->bufio); +} + /* * Status: V (valid) or C (corruption found) */ @@ -1766,6 +1773,7 @@ static struct target_type verity_target = { .ctr = verity_ctr, .dtr = verity_dtr, .map = verity_map, + .postsuspend = verity_postsuspend, .status = verity_status, .prepare_ioctl = verity_prepare_ioctl, .iterate_devices = verity_iterate_devices, diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c index 2e3087556adb3..fbb4f57010da6 100644 --- a/drivers/md/md-bitmap.c +++ b/drivers/md/md-bitmap.c @@ -2355,9 +2355,8 @@ static int bitmap_get_stats(void *data, struct md_bitmap_stats *stats) if (!bitmap) return -ENOENT; - if (bitmap->mddev->bitmap_info.external) - return -ENOENT; - if (!bitmap->storage.sb_page) /* no superblock */ + if (!bitmap->mddev->bitmap_info.external && + !bitmap->storage.sb_page) return -EINVAL; sb = kmap_local_page(bitmap->storage.sb_page); stats->sync_size = le64_to_cpu(sb->sync_size); diff --git a/drivers/md/md.c b/drivers/md/md.c index fff28aea23c89..7809b951e09aa 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -629,6 +629,12 @@ static void __mddev_put(struct mddev *mddev) queue_work(md_misc_wq, &mddev->del_work); } +static void mddev_put_locked(struct mddev *mddev) +{ + if (atomic_dec_and_test(&mddev->active)) + __mddev_put(mddev); +} + void mddev_put(struct mddev *mddev) { if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) @@ -8461,9 +8467,7 @@ static int md_seq_show(struct seq_file *seq, void *v) if (mddev == list_last_entry(&all_mddevs, struct mddev, all_mddevs)) status_unused(seq); - if (atomic_dec_and_test(&mddev->active)) - __mddev_put(mddev); - + mddev_put_locked(mddev); return 0; } @@ -9886,11 +9890,11 @@ EXPORT_SYMBOL_GPL(rdev_clear_badblocks); static int md_notify_reboot(struct notifier_block *this, unsigned long code, void *x) { - struct mddev *mddev, *n; + struct mddev *mddev; int need_delay = 0; spin_lock(&all_mddevs_lock); - list_for_each_entry_safe(mddev, n, &all_mddevs, all_mddevs) { + list_for_each_entry(mddev, &all_mddevs, all_mddevs) { if (!mddev_get(mddev)) continue; spin_unlock(&all_mddevs_lock); @@ -9902,8 +9906,8 @@ static int md_notify_reboot(struct notifier_block *this, mddev_unlock(mddev); } need_delay = 1; - mddev_put(mddev); spin_lock(&all_mddevs_lock); + mddev_put_locked(mddev); } spin_unlock(&all_mddevs_lock); @@ -10236,7 +10240,7 @@ void md_autostart_arrays(int part) static __exit void md_exit(void) { - struct mddev *mddev, *n; + struct mddev *mddev; int delay = 1; unregister_blkdev(MD_MAJOR,"md"); @@ -10257,7 +10261,7 @@ static __exit void md_exit(void) remove_proc_entry("mdstat", NULL); spin_lock(&all_mddevs_lock); - list_for_each_entry_safe(mddev, n, &all_mddevs, all_mddevs) { + list_for_each_entry(mddev, &all_mddevs, all_mddevs) { if (!mddev_get(mddev)) continue; spin_unlock(&all_mddevs_lock); @@ -10269,8 +10273,8 @@ static __exit void md_exit(void) * the mddev for destruction by a workqueue, and the * destroy_workqueue() below will wait for that to complete. */ - mddev_put(mddev); spin_lock(&all_mddevs_lock); + mddev_put_locked(mddev); } spin_unlock(&all_mddevs_lock); diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 32d5875247784..31bea72bcb01a 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -385,10 +385,8 @@ static int raid0_set_limits(struct mddev *mddev) lim.io_min = mddev->chunk_sectors << 9; lim.io_opt = lim.io_min * mddev->raid_disks; err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY); - if (err) { - queue_limits_cancel_update(mddev->gendisk->queue); + if (err) return err; - } return queue_limits_set(mddev->gendisk->queue, &lim); } diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index d83fe3b3abc00..8a994a1975ca7 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -3171,10 +3171,8 @@ static int raid1_set_limits(struct mddev *mddev) md_init_stacking_limits(&lim); lim.max_write_zeroes_sectors = 0; err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY); - if (err) { - queue_limits_cancel_update(mddev->gendisk->queue); + if (err) return err; - } return queue_limits_set(mddev->gendisk->queue, &lim); } diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index daf42acc4fb6f..cc194f6ec18da 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1687,6 +1687,7 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio) * The discard bio returns only first r10bio finishes */ if (first_copy) { + md_account_bio(mddev, &bio); r10_bio->master_bio = bio; set_bit(R10BIO_Discard, &r10_bio->state); first_copy = false; @@ -3963,10 +3964,8 @@ static int raid10_set_queue_limits(struct mddev *mddev) lim.io_min = mddev->chunk_sectors << 9; lim.io_opt = lim.io_min * raid10_nr_stripes(conf); err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY); - if (err) { - queue_limits_cancel_update(mddev->gendisk->queue); + if (err) return err; - } return queue_limits_set(mddev->gendisk->queue, &lim); } diff --git a/drivers/media/common/siano/smsdvb-main.c b/drivers/media/common/siano/smsdvb-main.c index 44d8fe8b220e7..9b1a650ed055c 100644 --- a/drivers/media/common/siano/smsdvb-main.c +++ b/drivers/media/common/siano/smsdvb-main.c @@ -1243,6 +1243,8 @@ static int __init smsdvb_module_init(void) smsdvb_debugfs_register(); rc = smscore_register_hotplug(smsdvb_hotplug); + if (rc) + smsdvb_debugfs_unregister(); pr_debug("\n"); diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c index 2f5165918163d..cfe59c3255f70 100644 --- a/drivers/media/dvb-frontends/dib8000.c +++ b/drivers/media/dvb-frontends/dib8000.c @@ -2701,8 +2701,11 @@ static void dib8000_set_dds(struct dib8000_state *state, s32 offset_khz) u8 ratio; if (state->revision == 0x8090) { + u32 internal = dib8000_read32(state, 23) / 1000; + ratio = 4; - unit_khz_dds_val = (1<<26) / (dib8000_read32(state, 23) / 1000); + + unit_khz_dds_val = (1<<26) / (internal ?: 1); if (offset_khz < 0) dds = (1 << 26) - (abs_offset_khz * unit_khz_dds_val); else diff --git a/drivers/media/i2c/adv748x/adv748x.h b/drivers/media/i2c/adv748x/adv748x.h index 9bc0121d0eff3..2c1db5968af8e 100644 --- a/drivers/media/i2c/adv748x/adv748x.h +++ b/drivers/media/i2c/adv748x/adv748x.h @@ -320,7 +320,7 @@ struct adv748x_state { /* Free run pattern select */ #define ADV748X_SDP_FRP 0x14 -#define ADV748X_SDP_FRP_MASK GENMASK(3, 1) +#define ADV748X_SDP_FRP_MASK GENMASK(2, 0) /* Saturation */ #define ADV748X_SDP_SD_SAT_U 0xe3 /* user_map_rw_reg_e3 */ diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c index cb21df46bab16..4b7d8039b1c9f 100644 --- a/drivers/media/i2c/ccs/ccs-core.c +++ b/drivers/media/i2c/ccs/ccs-core.c @@ -3562,6 +3562,7 @@ static int ccs_probe(struct i2c_client *client) out_disable_runtime_pm: pm_runtime_put_noidle(&client->dev); pm_runtime_disable(&client->dev); + pm_runtime_set_suspended(&client->dev); out_cleanup: ccs_cleanup(sensor); @@ -3591,9 +3592,10 @@ static void ccs_remove(struct i2c_client *client) v4l2_async_unregister_subdev(subdev); pm_runtime_disable(&client->dev); - if (!pm_runtime_status_suspended(&client->dev)) + if (!pm_runtime_status_suspended(&client->dev)) { ccs_power_off(&client->dev); - pm_runtime_set_suspended(&client->dev); + pm_runtime_set_suspended(&client->dev); + } for (i = 0; i < sensor->ssds_used; i++) v4l2_device_unregister_subdev(&sensor->ssds[i].sd); diff --git a/drivers/media/i2c/hi556.c b/drivers/media/i2c/hi556.c index f31f9886c924e..0e89aff9c664d 100644 --- a/drivers/media/i2c/hi556.c +++ b/drivers/media/i2c/hi556.c @@ -1230,12 +1230,13 @@ static int hi556_check_hwcfg(struct device *dev) ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk); if (ret) { dev_err(dev, "can't get clock frequency"); - return ret; + goto check_hwcfg_error; } if (mclk != HI556_MCLK) { dev_err(dev, "external clock %d is not supported", mclk); - return -EINVAL; + ret = -EINVAL; + goto check_hwcfg_error; } if (bus_cfg.bus.mipi_csi2.num_data_lanes != 2) { diff --git a/drivers/media/i2c/imx214.c b/drivers/media/i2c/imx214.c index 4962cfe7c83d6..6a393e18267f4 100644 --- a/drivers/media/i2c/imx214.c +++ b/drivers/media/i2c/imx214.c @@ -1075,10 +1075,6 @@ static int imx214_probe(struct i2c_client *client) */ imx214_power_on(imx214->dev); - pm_runtime_set_active(imx214->dev); - pm_runtime_enable(imx214->dev); - pm_runtime_idle(imx214->dev); - ret = imx214_ctrls_init(imx214); if (ret < 0) goto error_power_off; @@ -1099,22 +1095,30 @@ static int imx214_probe(struct i2c_client *client) imx214_entity_init_state(&imx214->sd, NULL); + pm_runtime_set_active(imx214->dev); + pm_runtime_enable(imx214->dev); + ret = v4l2_async_register_subdev_sensor(&imx214->sd); if (ret < 0) { dev_err(dev, "could not register v4l2 device\n"); goto free_entity; } + pm_runtime_idle(imx214->dev); + return 0; free_entity: + pm_runtime_disable(imx214->dev); + pm_runtime_set_suspended(&client->dev); media_entity_cleanup(&imx214->sd.entity); + free_ctrl: mutex_destroy(&imx214->mutex); v4l2_ctrl_handler_free(&imx214->ctrls); + error_power_off: - pm_runtime_disable(imx214->dev); - regulator_bulk_disable(IMX214_NUM_SUPPLIES, imx214->supplies); + imx214_power_off(imx214->dev); return ret; } @@ -1127,11 +1131,12 @@ static void imx214_remove(struct i2c_client *client) v4l2_async_unregister_subdev(&imx214->sd); media_entity_cleanup(&imx214->sd.entity); v4l2_ctrl_handler_free(&imx214->ctrls); - - pm_runtime_disable(&client->dev); - pm_runtime_set_suspended(&client->dev); - mutex_destroy(&imx214->mutex); + pm_runtime_disable(&client->dev); + if (!pm_runtime_status_suspended(&client->dev)) { + imx214_power_off(imx214->dev); + pm_runtime_set_suspended(&client->dev); + } } static const struct of_device_id imx214_of_match[] = { diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c index e78a80b2bb2e4..906aa314b7f84 100644 --- a/drivers/media/i2c/imx219.c +++ b/drivers/media/i2c/imx219.c @@ -134,10 +134,11 @@ /* Pixel rate is fixed for all the modes */ #define IMX219_PIXEL_RATE 182400000 -#define IMX219_PIXEL_RATE_4LANE 280800000 +#define IMX219_PIXEL_RATE_4LANE 281600000 #define IMX219_DEFAULT_LINK_FREQ 456000000 -#define IMX219_DEFAULT_LINK_FREQ_4LANE 363000000 +#define IMX219_DEFAULT_LINK_FREQ_4LANE_UNSUPPORTED 363000000 +#define IMX219_DEFAULT_LINK_FREQ_4LANE 364000000 /* IMX219 native and active pixel array size. */ #define IMX219_NATIVE_WIDTH 3296U @@ -169,15 +170,6 @@ static const struct cci_reg_sequence imx219_common_regs[] = { { CCI_REG8(0x30eb), 0x05 }, { CCI_REG8(0x30eb), 0x09 }, - /* PLL Clock Table */ - { IMX219_REG_VTPXCK_DIV, 5 }, - { IMX219_REG_VTSYCK_DIV, 1 }, - { IMX219_REG_PREPLLCK_VT_DIV, 3 }, /* 0x03 = AUTO set */ - { IMX219_REG_PREPLLCK_OP_DIV, 3 }, /* 0x03 = AUTO set */ - { IMX219_REG_PLL_VT_MPY, 57 }, - { IMX219_REG_OPSYCK_DIV, 1 }, - { IMX219_REG_PLL_OP_MPY, 114 }, - /* Undocumented registers */ { CCI_REG8(0x455e), 0x00 }, { CCI_REG8(0x471e), 0x4b }, @@ -202,12 +194,45 @@ static const struct cci_reg_sequence imx219_common_regs[] = { { IMX219_REG_EXCK_FREQ, IMX219_EXCK_FREQ(IMX219_XCLK_FREQ / 1000000) }, }; +static const struct cci_reg_sequence imx219_2lane_regs[] = { + /* PLL Clock Table */ + { IMX219_REG_VTPXCK_DIV, 5 }, + { IMX219_REG_VTSYCK_DIV, 1 }, + { IMX219_REG_PREPLLCK_VT_DIV, 3 }, /* 0x03 = AUTO set */ + { IMX219_REG_PREPLLCK_OP_DIV, 3 }, /* 0x03 = AUTO set */ + { IMX219_REG_PLL_VT_MPY, 57 }, + { IMX219_REG_OPSYCK_DIV, 1 }, + { IMX219_REG_PLL_OP_MPY, 114 }, + + /* 2-Lane CSI Mode */ + { IMX219_REG_CSI_LANE_MODE, IMX219_CSI_2_LANE_MODE }, +}; + +static const struct cci_reg_sequence imx219_4lane_regs[] = { + /* PLL Clock Table */ + { IMX219_REG_VTPXCK_DIV, 5 }, + { IMX219_REG_VTSYCK_DIV, 1 }, + { IMX219_REG_PREPLLCK_VT_DIV, 3 }, /* 0x03 = AUTO set */ + { IMX219_REG_PREPLLCK_OP_DIV, 3 }, /* 0x03 = AUTO set */ + { IMX219_REG_PLL_VT_MPY, 88 }, + { IMX219_REG_OPSYCK_DIV, 1 }, + { IMX219_REG_PLL_OP_MPY, 91 }, + + /* 4-Lane CSI Mode */ + { IMX219_REG_CSI_LANE_MODE, IMX219_CSI_4_LANE_MODE }, +}; + static const s64 imx219_link_freq_menu[] = { IMX219_DEFAULT_LINK_FREQ, }; static const s64 imx219_link_freq_4lane_menu[] = { IMX219_DEFAULT_LINK_FREQ_4LANE, + /* + * This will never be advertised to userspace, but will be used for + * v4l2_link_freq_to_bitmap + */ + IMX219_DEFAULT_LINK_FREQ_4LANE_UNSUPPORTED, }; static const char * const imx219_test_pattern_menu[] = { @@ -663,9 +688,11 @@ static int imx219_set_framefmt(struct imx219 *imx219, static int imx219_configure_lanes(struct imx219 *imx219) { - return cci_write(imx219->regmap, IMX219_REG_CSI_LANE_MODE, - imx219->lanes == 2 ? IMX219_CSI_2_LANE_MODE : - IMX219_CSI_4_LANE_MODE, NULL); + /* Write the appropriate PLL settings for the number of MIPI lanes */ + return cci_multi_reg_write(imx219->regmap, + imx219->lanes == 2 ? imx219_2lane_regs : imx219_4lane_regs, + imx219->lanes == 2 ? ARRAY_SIZE(imx219_2lane_regs) : + ARRAY_SIZE(imx219_4lane_regs), NULL); }; static int imx219_start_streaming(struct imx219 *imx219, @@ -1042,6 +1069,7 @@ static int imx219_check_hwcfg(struct device *dev, struct imx219 *imx219) struct v4l2_fwnode_endpoint ep_cfg = { .bus_type = V4L2_MBUS_CSI2_DPHY }; + unsigned long link_freq_bitmap; int ret = -EINVAL; endpoint = fwnode_graph_get_next_endpoint(dev_fwnode(dev), NULL); @@ -1063,23 +1091,40 @@ static int imx219_check_hwcfg(struct device *dev, struct imx219 *imx219) imx219->lanes = ep_cfg.bus.mipi_csi2.num_data_lanes; /* Check the link frequency set in device tree */ - if (!ep_cfg.nr_of_link_frequencies) { - dev_err_probe(dev, -EINVAL, - "link-frequency property not found in DT\n"); - goto error_out; + switch (imx219->lanes) { + case 2: + ret = v4l2_link_freq_to_bitmap(dev, + ep_cfg.link_frequencies, + ep_cfg.nr_of_link_frequencies, + imx219_link_freq_menu, + ARRAY_SIZE(imx219_link_freq_menu), + &link_freq_bitmap); + break; + case 4: + ret = v4l2_link_freq_to_bitmap(dev, + ep_cfg.link_frequencies, + ep_cfg.nr_of_link_frequencies, + imx219_link_freq_4lane_menu, + ARRAY_SIZE(imx219_link_freq_4lane_menu), + &link_freq_bitmap); + + if (!ret && (link_freq_bitmap & BIT(1))) { + dev_warn(dev, "Link frequency of %d not supported, but has been incorrectly advertised previously\n", + IMX219_DEFAULT_LINK_FREQ_4LANE_UNSUPPORTED); + dev_warn(dev, "Using link frequency of %d\n", + IMX219_DEFAULT_LINK_FREQ_4LANE); + link_freq_bitmap |= BIT(0); + } + break; } - if (ep_cfg.nr_of_link_frequencies != 1 || - (ep_cfg.link_frequencies[0] != ((imx219->lanes == 2) ? - IMX219_DEFAULT_LINK_FREQ : IMX219_DEFAULT_LINK_FREQ_4LANE))) { + if (ret || !(link_freq_bitmap & BIT(0))) { + ret = -EINVAL; dev_err_probe(dev, -EINVAL, "Link frequency not supported: %lld\n", ep_cfg.link_frequencies[0]); - goto error_out; } - ret = 0; - error_out: v4l2_fwnode_endpoint_free(&ep_cfg); fwnode_handle_put(endpoint); @@ -1186,6 +1231,9 @@ static int imx219_probe(struct i2c_client *client) goto error_media_entity; } + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + ret = v4l2_async_register_subdev_sensor(&imx219->sd); if (ret < 0) { dev_err_probe(dev, ret, @@ -1193,15 +1241,14 @@ static int imx219_probe(struct i2c_client *client) goto error_subdev_cleanup; } - /* Enable runtime PM and turn off the device */ - pm_runtime_set_active(dev); - pm_runtime_enable(dev); pm_runtime_idle(dev); return 0; error_subdev_cleanup: v4l2_subdev_cleanup(&imx219->sd); + pm_runtime_disable(dev); + pm_runtime_set_suspended(dev); error_media_entity: media_entity_cleanup(&imx219->sd.entity); @@ -1226,9 +1273,10 @@ static void imx219_remove(struct i2c_client *client) imx219_free_controls(imx219); pm_runtime_disable(&client->dev); - if (!pm_runtime_status_suspended(&client->dev)) + if (!pm_runtime_status_suspended(&client->dev)) { imx219_power_off(&client->dev); - pm_runtime_set_suspended(&client->dev); + pm_runtime_set_suspended(&client->dev); + } } static const struct of_device_id imx219_dt_ids[] = { diff --git a/drivers/media/i2c/imx319.c b/drivers/media/i2c/imx319.c index dd1b4ff983dcb..701840f4a5cc0 100644 --- a/drivers/media/i2c/imx319.c +++ b/drivers/media/i2c/imx319.c @@ -2442,17 +2442,19 @@ static int imx319_probe(struct i2c_client *client) if (full_power) pm_runtime_set_active(&client->dev); pm_runtime_enable(&client->dev); - pm_runtime_idle(&client->dev); ret = v4l2_async_register_subdev_sensor(&imx319->sd); if (ret < 0) goto error_media_entity_pm; + pm_runtime_idle(&client->dev); + return 0; error_media_entity_pm: pm_runtime_disable(&client->dev); - pm_runtime_set_suspended(&client->dev); + if (full_power) + pm_runtime_set_suspended(&client->dev); media_entity_cleanup(&imx319->sd.entity); error_handler_free: @@ -2474,7 +2476,8 @@ static void imx319_remove(struct i2c_client *client) v4l2_ctrl_handler_free(sd->ctrl_handler); pm_runtime_disable(&client->dev); - pm_runtime_set_suspended(&client->dev); + if (!pm_runtime_status_suspended(&client->dev)) + pm_runtime_set_suspended(&client->dev); mutex_destroy(&imx319->mutex); } diff --git a/drivers/media/i2c/ov7251.c b/drivers/media/i2c/ov7251.c index 30f61e04ecaf5..3226888d77e9c 100644 --- a/drivers/media/i2c/ov7251.c +++ b/drivers/media/i2c/ov7251.c @@ -922,6 +922,8 @@ static int ov7251_set_power_on(struct device *dev) return ret; } + usleep_range(1000, 1100); + gpiod_set_value_cansleep(ov7251->enable_gpio, 1); /* wait at least 65536 external clock cycles */ @@ -1696,7 +1698,7 @@ static int ov7251_probe(struct i2c_client *client) return PTR_ERR(ov7251->analog_regulator); } - ov7251->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_HIGH); + ov7251->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW); if (IS_ERR(ov7251->enable_gpio)) { dev_err(dev, "cannot get enable gpio\n"); return PTR_ERR(ov7251->enable_gpio); diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-video.c b/drivers/media/pci/intel/ipu6/ipu6-isys-video.c index b37561352ead3..48388c0c851ca 100644 --- a/drivers/media/pci/intel/ipu6/ipu6-isys-video.c +++ b/drivers/media/pci/intel/ipu6/ipu6-isys-video.c @@ -1296,6 +1296,7 @@ int ipu6_isys_video_init(struct ipu6_isys_video *av) av->vdev.release = video_device_release_empty; av->vdev.fops = &isys_fops; av->vdev.v4l2_dev = &av->isys->v4l2_dev; + av->vdev.dev_parent = &av->isys->adev->isp->pdev->dev; if (!av->vdev.ioctl_ops) av->vdev.ioctl_ops = &ipu6_v4l2_ioctl_ops; av->vdev.queue = &av->aq.vbq; diff --git a/drivers/media/pci/mgb4/mgb4_cmt.c b/drivers/media/pci/mgb4/mgb4_cmt.c index a25b68403bc60..c22ef51436ed5 100644 --- a/drivers/media/pci/mgb4/mgb4_cmt.c +++ b/drivers/media/pci/mgb4/mgb4_cmt.c @@ -135,8 +135,8 @@ static const u16 cmt_vals_out[][15] = { }; static const u16 cmt_vals_in[][13] = { - {0x1082, 0x0000, 0x5104, 0x0000, 0x11C7, 0x0000, 0x1041, 0x02BC, 0x7C01, 0xFFE9, 0x9900, 0x9908, 0x8100}, {0x1104, 0x0000, 0x9208, 0x0000, 0x138E, 0x0000, 0x1041, 0x015E, 0x7C01, 0xFFE9, 0x0100, 0x0908, 0x1000}, + {0x1082, 0x0000, 0x5104, 0x0000, 0x11C7, 0x0000, 0x1041, 0x02BC, 0x7C01, 0xFFE9, 0x9900, 0x9908, 0x8100}, }; static const u32 cmt_addrs_out[][15] = { @@ -206,10 +206,11 @@ u32 mgb4_cmt_set_vout_freq(struct mgb4_vout_dev *voutdev, unsigned int freq) mgb4_write_reg(video, regs->config, 0x1 | (config & ~0x3)); + mgb4_mask_reg(video, regs->config, 0x100, 0x100); + for (i = 0; i < ARRAY_SIZE(cmt_addrs_out[0]); i++) mgb4_write_reg(&voutdev->mgbdev->cmt, addr[i], reg_set[i]); - mgb4_mask_reg(video, regs->config, 0x100, 0x100); mgb4_mask_reg(video, regs->config, 0x100, 0x0); mgb4_write_reg(video, regs->config, config & ~0x1); @@ -236,10 +237,11 @@ void mgb4_cmt_set_vin_freq_range(struct mgb4_vin_dev *vindev, mgb4_write_reg(video, regs->config, 0x1 | (config & ~0x3)); + mgb4_mask_reg(video, regs->config, 0x1000, 0x1000); + for (i = 0; i < ARRAY_SIZE(cmt_addrs_in[0]); i++) mgb4_write_reg(&vindev->mgbdev->cmt, addr[i], reg_set[i]); - mgb4_mask_reg(video, regs->config, 0x1000, 0x1000); mgb4_mask_reg(video, regs->config, 0x1000, 0x0); mgb4_write_reg(video, regs->config, config & ~0x1); diff --git a/drivers/media/platform/allegro-dvt/allegro-core.c b/drivers/media/platform/allegro-dvt/allegro-core.c index 88c36eb6174ad..9ca4e2f94647b 100644 --- a/drivers/media/platform/allegro-dvt/allegro-core.c +++ b/drivers/media/platform/allegro-dvt/allegro-core.c @@ -3914,6 +3914,7 @@ static int allegro_probe(struct platform_device *pdev) if (ret < 0) { v4l2_err(&dev->v4l2_dev, "failed to request firmware: %d\n", ret); + v4l2_device_unregister(&dev->v4l2_dev); return ret; } diff --git a/drivers/media/platform/chips-media/wave5/wave5-hw.c b/drivers/media/platform/chips-media/wave5/wave5-hw.c index c89aafabc7421..710311d851138 100644 --- a/drivers/media/platform/chips-media/wave5/wave5-hw.c +++ b/drivers/media/platform/chips-media/wave5/wave5-hw.c @@ -576,7 +576,7 @@ int wave5_vpu_build_up_dec_param(struct vpu_instance *inst, vpu_write_reg(inst->dev, W5_CMD_NUM_CQ_DEPTH_M1, WAVE521_COMMAND_QUEUE_DEPTH - 1); } - + vpu_write_reg(inst->dev, W5_CMD_ERR_CONCEAL, 0); ret = send_firmware_command(inst, W5_CREATE_INSTANCE, true, NULL, NULL); if (ret) { wave5_vdi_free_dma_memory(vpu_dev, &p_dec_info->vb_work); diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c b/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c index 0c5c9a8de91fa..e238447c88bbf 100644 --- a/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c +++ b/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c @@ -1424,10 +1424,24 @@ static int wave5_vpu_dec_start_streaming(struct vb2_queue *q, unsigned int count if (ret) goto free_bitstream_vbuf; } else if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + struct dec_initial_info *initial_info = + &inst->codec_info->dec_info.initial_info; + if (inst->state == VPU_INST_STATE_STOP) ret = switch_state(inst, VPU_INST_STATE_INIT_SEQ); if (ret) goto return_buffers; + + if (inst->state == VPU_INST_STATE_INIT_SEQ && + inst->dev->product_code == WAVE521C_CODE) { + if (initial_info->luma_bitdepth != 8) { + dev_info(inst->dev->dev, "%s: no support for %d bit depth", + __func__, initial_info->luma_bitdepth); + ret = -EINVAL; + goto return_buffers; + } + } + } return ret; @@ -1446,6 +1460,16 @@ static int streamoff_output(struct vb2_queue *q) struct vb2_v4l2_buffer *buf; int ret; dma_addr_t new_rd_ptr; + struct dec_output_info dec_info; + unsigned int i; + + for (i = 0; i < v4l2_m2m_num_dst_bufs_ready(m2m_ctx); i++) { + ret = wave5_vpu_dec_set_disp_flag(inst, i); + if (ret) + dev_dbg(inst->dev->dev, + "%s: Setting display flag of buf index: %u, fail: %d\n", + __func__, i, ret); + } while ((buf = v4l2_m2m_src_buf_remove(m2m_ctx))) { dev_dbg(inst->dev->dev, "%s: (Multiplanar) buf type %4u | index %4u\n", @@ -1453,6 +1477,11 @@ static int streamoff_output(struct vb2_queue *q) v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR); } + while (wave5_vpu_dec_get_output_info(inst, &dec_info) == 0) { + if (dec_info.index_frame_display >= 0) + wave5_vpu_dec_set_disp_flag(inst, dec_info.index_frame_display); + } + ret = wave5_vpu_flush_instance(inst); if (ret) return ret; @@ -1535,7 +1564,7 @@ static void wave5_vpu_dec_stop_streaming(struct vb2_queue *q) break; if (wave5_vpu_dec_get_output_info(inst, &dec_output_info)) - dev_dbg(inst->dev->dev, "Getting decoding results from fw, fail\n"); + dev_dbg(inst->dev->dev, "there is no output info\n"); } v4l2_m2m_update_stop_streaming_state(m2m_ctx, q); diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpu.c b/drivers/media/platform/chips-media/wave5/wave5-vpu.c index 7273254ecb034..b13c5cd46d7e4 100644 --- a/drivers/media/platform/chips-media/wave5/wave5-vpu.c +++ b/drivers/media/platform/chips-media/wave5/wave5-vpu.c @@ -54,12 +54,12 @@ static void wave5_vpu_handle_irq(void *dev_id) struct vpu_device *dev = dev_id; irq_reason = wave5_vdi_read_register(dev, W5_VPU_VINT_REASON); + seq_done = wave5_vdi_read_register(dev, W5_RET_SEQ_DONE_INSTANCE_INFO); + cmd_done = wave5_vdi_read_register(dev, W5_RET_QUEUE_CMD_DONE_INST); wave5_vdi_write_register(dev, W5_VPU_VINT_REASON_CLR, irq_reason); wave5_vdi_write_register(dev, W5_VPU_VINT_CLEAR, 0x1); list_for_each_entry(inst, &dev->instances, list) { - seq_done = wave5_vdi_read_register(dev, W5_RET_SEQ_DONE_INSTANCE_INFO); - cmd_done = wave5_vdi_read_register(dev, W5_RET_QUEUE_CMD_DONE_INST); if (irq_reason & BIT(INT_WAVE5_INIT_SEQ) || irq_reason & BIT(INT_WAVE5_ENC_SET_PARAM)) { diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpuapi.c b/drivers/media/platform/chips-media/wave5/wave5-vpuapi.c index 1a3efb638dde5..65fdabcd9d292 100644 --- a/drivers/media/platform/chips-media/wave5/wave5-vpuapi.c +++ b/drivers/media/platform/chips-media/wave5/wave5-vpuapi.c @@ -73,6 +73,16 @@ int wave5_vpu_flush_instance(struct vpu_instance *inst) inst->type == VPU_INST_TYPE_DEC ? "DECODER" : "ENCODER", inst->id); mutex_unlock(&inst->dev->hw_lock); return -ETIMEDOUT; + } else if (ret == -EBUSY) { + struct dec_output_info dec_info; + + mutex_unlock(&inst->dev->hw_lock); + wave5_vpu_dec_get_output_info(inst, &dec_info); + ret = mutex_lock_interruptible(&inst->dev->hw_lock); + if (ret) + return ret; + if (dec_info.index_frame_display > 0) + wave5_vpu_dec_set_disp_flag(inst, dec_info.index_frame_display); } } while (ret != 0); mutex_unlock(&inst->dev->hw_lock); diff --git a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c index ff23b225db705..1b0bc47355c05 100644 --- a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c +++ b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c @@ -79,8 +79,11 @@ struct mtk_vcodec_fw *mtk_vcodec_fw_scp_init(void *priv, enum mtk_vcodec_fw_use } fw = devm_kzalloc(&plat_dev->dev, sizeof(*fw), GFP_KERNEL); - if (!fw) + if (!fw) { + scp_put(scp); return ERR_PTR(-ENOMEM); + } + fw->type = SCP; fw->ops = &mtk_vcodec_rproc_msg; fw->scp = scp; diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c index eea709d938209..47c302745c1de 100644 --- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c +++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c @@ -1188,7 +1188,8 @@ static int vdec_vp9_slice_setup_lat(struct vdec_vp9_slice_instance *instance, return ret; } -static +/* clang stack usage explodes if this is inlined */ +static noinline_for_stack void vdec_vp9_slice_map_counts_eob_coef(unsigned int i, unsigned int j, unsigned int k, struct vdec_vp9_slice_frame_counts *counts, struct v4l2_vp9_frame_symbol_counts *counts_helper) diff --git a/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c b/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c index f8145998fcaf7..8522f71fc901d 100644 --- a/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c +++ b/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c @@ -594,7 +594,11 @@ static int h264_enc_init(struct mtk_vcodec_enc_ctx *ctx) inst->ctx = ctx; inst->vpu_inst.ctx = ctx; - inst->vpu_inst.id = is_ext ? SCP_IPI_VENC_H264 : IPI_VENC_H264; + if (is_ext) + inst->vpu_inst.id = SCP_IPI_VENC_H264; + else + inst->vpu_inst.id = IPI_VENC_H264; + inst->hw_base = mtk_vcodec_get_reg_addr(inst->ctx->dev->reg_base, VENC_SYS); ret = vpu_enc_init(&inst->vpu_inst); diff --git a/drivers/media/platform/nuvoton/npcm-video.c b/drivers/media/platform/nuvoton/npcm-video.c index db454c9d2641f..e0dee768a3be1 100644 --- a/drivers/media/platform/nuvoton/npcm-video.c +++ b/drivers/media/platform/nuvoton/npcm-video.c @@ -1650,8 +1650,8 @@ static int npcm_video_setup_video(struct npcm_video *video) static int npcm_video_ece_init(struct npcm_video *video) { + struct device_node *ece_node __free(device_node) = NULL; struct device *dev = video->dev; - struct device_node *ece_node; struct platform_device *ece_pdev; void __iomem *regs; @@ -1671,7 +1671,7 @@ static int npcm_video_ece_init(struct npcm_video *video) dev_err(dev, "Failed to find ECE device\n"); return -ENODEV; } - of_node_put(ece_node); + struct device *ece_dev __free(put_device) = &ece_pdev->dev; regs = devm_platform_ioremap_resource(ece_pdev, 0); if (IS_ERR(regs)) { @@ -1686,7 +1686,7 @@ static int npcm_video_ece_init(struct npcm_video *video) return PTR_ERR(video->ece.regmap); } - video->ece.reset = devm_reset_control_get(&ece_pdev->dev, NULL); + video->ece.reset = devm_reset_control_get(ece_dev, NULL); if (IS_ERR(video->ece.reset)) { dev_err(dev, "Failed to get ECE reset control in DTS\n"); return PTR_ERR(video->ece.reset); diff --git a/drivers/media/platform/qcom/venus/hfi_parser.c b/drivers/media/platform/qcom/venus/hfi_parser.c index 3df241dc3a118..1b3db2caa99fe 100644 --- a/drivers/media/platform/qcom/venus/hfi_parser.c +++ b/drivers/media/platform/qcom/venus/hfi_parser.c @@ -19,6 +19,8 @@ static void init_codecs(struct venus_core *core) struct hfi_plat_caps *caps = core->caps, *cap; unsigned long bit; + core->codecs_count = 0; + if (hweight_long(core->dec_codecs) + hweight_long(core->enc_codecs) > MAX_CODEC_NUM) return; @@ -62,7 +64,7 @@ fill_buf_mode(struct hfi_plat_caps *cap, const void *data, unsigned int num) cap->cap_bufs_mode_dynamic = true; } -static void +static int parse_alloc_mode(struct venus_core *core, u32 codecs, u32 domain, void *data) { struct hfi_buffer_alloc_mode_supported *mode = data; @@ -70,7 +72,7 @@ parse_alloc_mode(struct venus_core *core, u32 codecs, u32 domain, void *data) u32 *type; if (num_entries > MAX_ALLOC_MODE_ENTRIES) - return; + return -EINVAL; type = mode->data; @@ -82,6 +84,8 @@ parse_alloc_mode(struct venus_core *core, u32 codecs, u32 domain, void *data) type++; } + + return sizeof(*mode); } static void fill_profile_level(struct hfi_plat_caps *cap, const void *data, @@ -96,7 +100,7 @@ static void fill_profile_level(struct hfi_plat_caps *cap, const void *data, cap->num_pl += num; } -static void +static int parse_profile_level(struct venus_core *core, u32 codecs, u32 domain, void *data) { struct hfi_profile_level_supported *pl = data; @@ -104,12 +108,14 @@ parse_profile_level(struct venus_core *core, u32 codecs, u32 domain, void *data) struct hfi_profile_level pl_arr[HFI_MAX_PROFILE_COUNT] = {}; if (pl->profile_count > HFI_MAX_PROFILE_COUNT) - return; + return -EINVAL; memcpy(pl_arr, proflevel, pl->profile_count * sizeof(*proflevel)); for_each_codec(core->caps, ARRAY_SIZE(core->caps), codecs, domain, fill_profile_level, pl_arr, pl->profile_count); + + return pl->profile_count * sizeof(*proflevel) + sizeof(u32); } static void @@ -124,7 +130,7 @@ fill_caps(struct hfi_plat_caps *cap, const void *data, unsigned int num) cap->num_caps += num; } -static void +static int parse_caps(struct venus_core *core, u32 codecs, u32 domain, void *data) { struct hfi_capabilities *caps = data; @@ -133,12 +139,14 @@ parse_caps(struct venus_core *core, u32 codecs, u32 domain, void *data) struct hfi_capability caps_arr[MAX_CAP_ENTRIES] = {}; if (num_caps > MAX_CAP_ENTRIES) - return; + return -EINVAL; memcpy(caps_arr, cap, num_caps * sizeof(*cap)); for_each_codec(core->caps, ARRAY_SIZE(core->caps), codecs, domain, fill_caps, caps_arr, num_caps); + + return sizeof(*caps); } static void fill_raw_fmts(struct hfi_plat_caps *cap, const void *fmts, @@ -153,7 +161,7 @@ static void fill_raw_fmts(struct hfi_plat_caps *cap, const void *fmts, cap->num_fmts += num_fmts; } -static void +static int parse_raw_formats(struct venus_core *core, u32 codecs, u32 domain, void *data) { struct hfi_uncompressed_format_supported *fmt = data; @@ -162,7 +170,8 @@ parse_raw_formats(struct venus_core *core, u32 codecs, u32 domain, void *data) struct raw_formats rawfmts[MAX_FMT_ENTRIES] = {}; u32 entries = fmt->format_entries; unsigned int i = 0; - u32 num_planes; + u32 num_planes = 0; + u32 size; while (entries) { num_planes = pinfo->num_planes; @@ -172,7 +181,7 @@ parse_raw_formats(struct venus_core *core, u32 codecs, u32 domain, void *data) i++; if (i >= MAX_FMT_ENTRIES) - return; + return -EINVAL; if (pinfo->num_planes > MAX_PLANES) break; @@ -184,9 +193,13 @@ parse_raw_formats(struct venus_core *core, u32 codecs, u32 domain, void *data) for_each_codec(core->caps, ARRAY_SIZE(core->caps), codecs, domain, fill_raw_fmts, rawfmts, i); + size = fmt->format_entries * (sizeof(*constr) * num_planes + 2 * sizeof(u32)) + + 2 * sizeof(u32); + + return size; } -static void parse_codecs(struct venus_core *core, void *data) +static int parse_codecs(struct venus_core *core, void *data) { struct hfi_codec_supported *codecs = data; @@ -198,21 +211,27 @@ static void parse_codecs(struct venus_core *core, void *data) core->dec_codecs &= ~HFI_VIDEO_CODEC_SPARK; core->enc_codecs &= ~HFI_VIDEO_CODEC_HEVC; } + + return sizeof(*codecs); } -static void parse_max_sessions(struct venus_core *core, const void *data) +static int parse_max_sessions(struct venus_core *core, const void *data) { const struct hfi_max_sessions_supported *sessions = data; core->max_sessions_supported = sessions->max_sessions; + + return sizeof(*sessions); } -static void parse_codecs_mask(u32 *codecs, u32 *domain, void *data) +static int parse_codecs_mask(u32 *codecs, u32 *domain, void *data) { struct hfi_codec_mask_supported *mask = data; *codecs = mask->codecs; *domain = mask->video_domains; + + return sizeof(*mask); } static void parser_init(struct venus_inst *inst, u32 *codecs, u32 *domain) @@ -281,8 +300,9 @@ static int hfi_platform_parser(struct venus_core *core, struct venus_inst *inst) u32 hfi_parser(struct venus_core *core, struct venus_inst *inst, void *buf, u32 size) { - unsigned int words_count = size >> 2; - u32 *word = buf, *data, codecs = 0, domain = 0; + u32 *words = buf, *payload, codecs = 0, domain = 0; + u32 *frame_size = buf + size; + u32 rem_bytes = size; int ret; ret = hfi_platform_parser(core, inst); @@ -299,38 +319,66 @@ u32 hfi_parser(struct venus_core *core, struct venus_inst *inst, void *buf, memset(core->caps, 0, sizeof(core->caps)); } - while (words_count) { - data = word + 1; + while (words < frame_size) { + payload = words + 1; - switch (*word) { + switch (*words) { case HFI_PROPERTY_PARAM_CODEC_SUPPORTED: - parse_codecs(core, data); + if (rem_bytes <= sizeof(struct hfi_codec_supported)) + return HFI_ERR_SYS_INSUFFICIENT_RESOURCES; + + ret = parse_codecs(core, payload); + if (ret < 0) + return HFI_ERR_SYS_INSUFFICIENT_RESOURCES; + init_codecs(core); break; case HFI_PROPERTY_PARAM_MAX_SESSIONS_SUPPORTED: - parse_max_sessions(core, data); + if (rem_bytes <= sizeof(struct hfi_max_sessions_supported)) + return HFI_ERR_SYS_INSUFFICIENT_RESOURCES; + + ret = parse_max_sessions(core, payload); break; case HFI_PROPERTY_PARAM_CODEC_MASK_SUPPORTED: - parse_codecs_mask(&codecs, &domain, data); + if (rem_bytes <= sizeof(struct hfi_codec_mask_supported)) + return HFI_ERR_SYS_INSUFFICIENT_RESOURCES; + + ret = parse_codecs_mask(&codecs, &domain, payload); break; case HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED: - parse_raw_formats(core, codecs, domain, data); + if (rem_bytes <= sizeof(struct hfi_uncompressed_format_supported)) + return HFI_ERR_SYS_INSUFFICIENT_RESOURCES; + + ret = parse_raw_formats(core, codecs, domain, payload); break; case HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED: - parse_caps(core, codecs, domain, data); + if (rem_bytes <= sizeof(struct hfi_capabilities)) + return HFI_ERR_SYS_INSUFFICIENT_RESOURCES; + + ret = parse_caps(core, codecs, domain, payload); break; case HFI_PROPERTY_PARAM_PROFILE_LEVEL_SUPPORTED: - parse_profile_level(core, codecs, domain, data); + if (rem_bytes <= sizeof(struct hfi_profile_level_supported)) + return HFI_ERR_SYS_INSUFFICIENT_RESOURCES; + + ret = parse_profile_level(core, codecs, domain, payload); break; case HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE_SUPPORTED: - parse_alloc_mode(core, codecs, domain, data); + if (rem_bytes <= sizeof(struct hfi_buffer_alloc_mode_supported)) + return HFI_ERR_SYS_INSUFFICIENT_RESOURCES; + + ret = parse_alloc_mode(core, codecs, domain, payload); break; default: + ret = sizeof(u32); break; } - word++; - words_count--; + if (ret < 0) + return HFI_ERR_SYS_INSUFFICIENT_RESOURCES; + + words += ret / sizeof(u32); + rem_bytes -= ret; } if (!core->max_sessions_supported) diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c index f9437b6412b91..ab93757fff4b3 100644 --- a/drivers/media/platform/qcom/venus/hfi_venus.c +++ b/drivers/media/platform/qcom/venus/hfi_venus.c @@ -187,6 +187,9 @@ static int venus_write_queue(struct venus_hfi_device *hdev, /* ensure rd/wr indices's are read from memory */ rmb(); + if (qsize > IFACEQ_QUEUE_SIZE / 4) + return -EINVAL; + if (wr_idx >= rd_idx) empty_space = qsize - (wr_idx - rd_idx); else @@ -255,6 +258,9 @@ static int venus_read_queue(struct venus_hfi_device *hdev, wr_idx = qhdr->write_idx; qsize = qhdr->q_size; + if (qsize > IFACEQ_QUEUE_SIZE / 4) + return -EINVAL; + /* make sure data is valid before using it */ rmb(); @@ -1035,18 +1041,26 @@ static void venus_sfr_print(struct venus_hfi_device *hdev) { struct device *dev = hdev->core->dev; struct hfi_sfr *sfr = hdev->sfr.kva; + u32 size; void *p; if (!sfr) return; - p = memchr(sfr->data, '\0', sfr->buf_size); + size = sfr->buf_size; + if (!size) + return; + + if (size > ALIGNED_SFR_SIZE) + size = ALIGNED_SFR_SIZE; + + p = memchr(sfr->data, '\0', size); /* * SFR isn't guaranteed to be NULL terminated since SYS_ERROR indicates * that Venus is in the process of crashing. */ if (!p) - sfr->data[sfr->buf_size - 1] = '\0'; + sfr->data[size - 1] = '\0'; dev_err_ratelimited(dev, "SFR message from FW: %s\n", sfr->data); } diff --git a/drivers/media/platform/rockchip/rga/rga-hw.c b/drivers/media/platform/rockchip/rga/rga-hw.c index 11c3d72347572..b2ef3beec5258 100644 --- a/drivers/media/platform/rockchip/rga/rga-hw.c +++ b/drivers/media/platform/rockchip/rga/rga-hw.c @@ -376,7 +376,7 @@ static void rga_cmd_set_dst_info(struct rga_ctx *ctx, * Configure the dest framebuffer base address with pixel offset. */ offsets = rga_get_addr_offset(&ctx->out, offset, dst_x, dst_y, dst_w, dst_h); - dst_offset = rga_lookup_draw_pos(&offsets, mir_mode, rot_mode); + dst_offset = rga_lookup_draw_pos(&offsets, rot_mode, mir_mode); dest[(RGA_DST_Y_RGB_BASE_ADDR - RGA_MODE_BASE_REG) >> 2] = dst_offset->y_off; diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c index 73f7af674c01b..0c636090d723d 100644 --- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c +++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c @@ -549,8 +549,9 @@ static void s5p_mfc_enc_calc_src_size_v6(struct s5p_mfc_ctx *ctx) case V4L2_PIX_FMT_NV21M: ctx->stride[0] = ALIGN(ctx->img_width, S5P_FIMV_NV12M_HALIGN_V6); ctx->stride[1] = ALIGN(ctx->img_width, S5P_FIMV_NV12M_HALIGN_V6); - ctx->luma_size = ctx->stride[0] * ALIGN(ctx->img_height, 16); - ctx->chroma_size = ctx->stride[0] * ALIGN(ctx->img_height / 2, 16); + ctx->luma_size = ALIGN(ctx->stride[0] * ALIGN(ctx->img_height, 16), 256); + ctx->chroma_size = ALIGN(ctx->stride[0] * ALIGN(ctx->img_height / 2, 16), + 256); break; case V4L2_PIX_FMT_YUV420M: case V4L2_PIX_FMT_YVU420M: diff --git a/drivers/media/platform/st/stm32/dma2d/dma2d.c b/drivers/media/platform/st/stm32/dma2d/dma2d.c index 92f1edee58f89..3c64e91260250 100644 --- a/drivers/media/platform/st/stm32/dma2d/dma2d.c +++ b/drivers/media/platform/st/stm32/dma2d/dma2d.c @@ -492,7 +492,8 @@ static void device_run(void *prv) dst->sequence = frm_cap->sequence++; v4l2_m2m_buf_copy_metadata(src, dst, true); - clk_enable(dev->gate); + if (clk_enable(dev->gate)) + goto end; dma2d_config_fg(dev, frm_out, vb2_dma_contig_plane_dma_addr(&src->vb2_buf, 0)); diff --git a/drivers/media/platform/ti/omap3isp/isp.c b/drivers/media/platform/ti/omap3isp/isp.c index 91101ba88ef01..b2210841a320f 100644 --- a/drivers/media/platform/ti/omap3isp/isp.c +++ b/drivers/media/platform/ti/omap3isp/isp.c @@ -1961,6 +1961,13 @@ static int isp_attach_iommu(struct isp_device *isp) struct dma_iommu_mapping *mapping; int ret; + /* We always want to replace any default mapping from the arch code */ + mapping = to_dma_iommu_mapping(isp->dev); + if (mapping) { + arm_iommu_detach_device(isp->dev); + arm_iommu_release_mapping(mapping); + } + /* * Create the ARM mapping, used by the ARM DMA mapping core to allocate * VAs. This will allocate a corresponding IOMMU domain. diff --git a/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c b/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c index 85a44143b3786..0e212198dd65b 100644 --- a/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c +++ b/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c @@ -518,6 +518,7 @@ static void set_buffers(struct hantro_ctx *ctx) hantro_reg_write(vpu, &g2_stream_len, src_len); hantro_reg_write(vpu, &g2_strm_buffer_len, src_buf_len); hantro_reg_write(vpu, &g2_strm_start_offset, 0); + hantro_reg_write(vpu, &g2_start_bit, 0); hantro_reg_write(vpu, &g2_write_mvs_e, 1); hantro_write_addr(vpu, G2_TILE_SIZES_ADDR, ctx->hevc_dec.tile_sizes.dma); diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c index 9b209e687f256..d3b48a0dd1f47 100644 --- a/drivers/media/rc/streamzap.c +++ b/drivers/media/rc/streamzap.c @@ -138,39 +138,10 @@ static void sz_push_half_space(struct streamzap_ir *sz, sz_push_full_space(sz, value & SZ_SPACE_MASK); } -/* - * streamzap_callback - usb IRQ handler callback - * - * This procedure is invoked on reception of data from - * the usb remote. - */ -static void streamzap_callback(struct urb *urb) +static void sz_process_ir_data(struct streamzap_ir *sz, int len) { - struct streamzap_ir *sz; unsigned int i; - int len; - if (!urb) - return; - - sz = urb->context; - len = urb->actual_length; - - switch (urb->status) { - case -ECONNRESET: - case -ENOENT: - case -ESHUTDOWN: - /* - * this urb is terminated, clean up. - * sz might already be invalid at this point - */ - dev_err(sz->dev, "urb terminated, status: %d\n", urb->status); - return; - default: - break; - } - - dev_dbg(sz->dev, "%s: received urb, len %d\n", __func__, len); for (i = 0; i < len; i++) { dev_dbg(sz->dev, "sz->buf_in[%d]: %x\n", i, (unsigned char)sz->buf_in[i]); @@ -219,6 +190,43 @@ static void streamzap_callback(struct urb *urb) } ir_raw_event_handle(sz->rdev); +} + +/* + * streamzap_callback - usb IRQ handler callback + * + * This procedure is invoked on reception of data from + * the usb remote. + */ +static void streamzap_callback(struct urb *urb) +{ + struct streamzap_ir *sz; + int len; + + if (!urb) + return; + + sz = urb->context; + len = urb->actual_length; + + switch (urb->status) { + case 0: + dev_dbg(sz->dev, "%s: received urb, len %d\n", __func__, len); + sz_process_ir_data(sz, len); + break; + case -ECONNRESET: + case -ENOENT: + case -ESHUTDOWN: + /* + * this urb is terminated, clean up. + * sz might already be invalid at this point + */ + dev_err(sz->dev, "urb terminated, status: %d\n", urb->status); + return; + default: + break; + } + usb_submit_urb(urb, GFP_ATOMIC); } @@ -385,8 +393,8 @@ static void streamzap_disconnect(struct usb_interface *interface) if (!sz) return; - rc_unregister_device(sz->rdev); usb_kill_urb(sz->urb_in); + rc_unregister_device(sz->rdev); usb_free_urb(sz->urb_in); usb_free_coherent(usbdev, sz->buf_in_len, sz->buf_in, sz->dma_in); diff --git a/drivers/media/test-drivers/vim2m.c b/drivers/media/test-drivers/vim2m.c index 3e3b424b48605..8ca6459286ba6 100644 --- a/drivers/media/test-drivers/vim2m.c +++ b/drivers/media/test-drivers/vim2m.c @@ -1316,9 +1316,6 @@ static int vim2m_probe(struct platform_device *pdev) vfd->v4l2_dev = &dev->v4l2_dev; video_set_drvdata(vfd, dev); - v4l2_info(&dev->v4l2_dev, - "Device registered as /dev/video%d\n", vfd->num); - platform_set_drvdata(pdev, dev); dev->m2m_dev = v4l2_m2m_init(&m2m_ops); @@ -1345,6 +1342,9 @@ static int vim2m_probe(struct platform_device *pdev) goto error_m2m; } + v4l2_info(&dev->v4l2_dev, + "Device registered as /dev/video%d\n", vfd->num); + #ifdef CONFIG_MEDIA_CONTROLLER ret = v4l2_m2m_register_media_controller(dev->m2m_dev, vfd, MEDIA_ENT_F_PROC_VIDEO_SCALER); diff --git a/drivers/media/test-drivers/vimc/vimc-streamer.c b/drivers/media/test-drivers/vimc/vimc-streamer.c index 807551a5143b7..15d863f97cbf9 100644 --- a/drivers/media/test-drivers/vimc/vimc-streamer.c +++ b/drivers/media/test-drivers/vimc/vimc-streamer.c @@ -59,6 +59,12 @@ static void vimc_streamer_pipeline_terminate(struct vimc_stream *stream) continue; sd = media_entity_to_v4l2_subdev(ved->ent); + /* + * Do not call .s_stream() to stop an already + * stopped/unstarted subdev. + */ + if (!v4l2_subdev_is_streaming(sd)) + continue; v4l2_subdev_call(sd, video, s_stream, 0); } } diff --git a/drivers/media/test-drivers/visl/visl-core.c b/drivers/media/test-drivers/visl/visl-core.c index c46464bcaf2e1..93239391f2cf6 100644 --- a/drivers/media/test-drivers/visl/visl-core.c +++ b/drivers/media/test-drivers/visl/visl-core.c @@ -161,9 +161,15 @@ static const struct visl_ctrl_desc visl_h264_ctrl_descs[] = { }, { .cfg.id = V4L2_CID_STATELESS_H264_DECODE_MODE, + .cfg.min = V4L2_STATELESS_H264_DECODE_MODE_SLICE_BASED, + .cfg.max = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED, + .cfg.def = V4L2_STATELESS_H264_DECODE_MODE_SLICE_BASED, }, { .cfg.id = V4L2_CID_STATELESS_H264_START_CODE, + .cfg.min = V4L2_STATELESS_H264_START_CODE_NONE, + .cfg.max = V4L2_STATELESS_H264_START_CODE_ANNEX_B, + .cfg.def = V4L2_STATELESS_H264_START_CODE_NONE, }, { .cfg.id = V4L2_CID_STATELESS_H264_SLICE_PARAMS, @@ -198,9 +204,15 @@ static const struct visl_ctrl_desc visl_hevc_ctrl_descs[] = { }, { .cfg.id = V4L2_CID_STATELESS_HEVC_DECODE_MODE, + .cfg.min = V4L2_STATELESS_HEVC_DECODE_MODE_SLICE_BASED, + .cfg.max = V4L2_STATELESS_HEVC_DECODE_MODE_FRAME_BASED, + .cfg.def = V4L2_STATELESS_HEVC_DECODE_MODE_SLICE_BASED, }, { .cfg.id = V4L2_CID_STATELESS_HEVC_START_CODE, + .cfg.min = V4L2_STATELESS_HEVC_START_CODE_NONE, + .cfg.max = V4L2_STATELESS_HEVC_START_CODE_ANNEX_B, + .cfg.def = V4L2_STATELESS_HEVC_START_CODE_NONE, }, { .cfg.id = V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS, diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c index 4d8e00b425f44..a0d683d266471 100644 --- a/drivers/media/usb/uvc/uvc_driver.c +++ b/drivers/media/usb/uvc/uvc_driver.c @@ -3039,6 +3039,15 @@ static const struct usb_device_id uvc_ids[] = { .bInterfaceProtocol = 0, .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_PROBE_MINMAX | UVC_QUIRK_IGNORE_SELECTOR_UNIT) }, + /* Actions Microelectronics Co. Display capture-UVC05 */ + { .match_flags = USB_DEVICE_ID_MATCH_DEVICE + | USB_DEVICE_ID_MATCH_INT_INFO, + .idVendor = 0x1de1, + .idProduct = 0xf105, + .bInterfaceClass = USB_CLASS_VIDEO, + .bInterfaceSubClass = 1, + .bInterfaceProtocol = 0, + .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_DISABLE_AUTOSUSPEND) }, /* NXP Semiconductors IR VIDEO */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c index 2cf5dcee0ce80..4d05873892c16 100644 --- a/drivers/media/v4l2-core/v4l2-dv-timings.c +++ b/drivers/media/v4l2-core/v4l2-dv-timings.c @@ -764,7 +764,7 @@ bool v4l2_detect_gtf(unsigned int frame_height, u64 num; u32 den; - num = ((image_width * GTF_D_C_PRIME * (u64)hfreq) - + num = (((u64)image_width * GTF_D_C_PRIME * hfreq) - ((u64)image_width * GTF_D_M_PRIME * 1000)); den = (hfreq * (100 - GTF_D_C_PRIME) + GTF_D_M_PRIME * 1000) * (2 * GTF_CELL_GRAN); @@ -774,7 +774,7 @@ bool v4l2_detect_gtf(unsigned int frame_height, u64 num; u32 den; - num = ((image_width * GTF_S_C_PRIME * (u64)hfreq) - + num = (((u64)image_width * GTF_S_C_PRIME * hfreq) - ((u64)image_width * GTF_S_M_PRIME * 1000)); den = (hfreq * (100 - GTF_S_C_PRIME) + GTF_S_M_PRIME * 1000) * (2 * GTF_CELL_GRAN); diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c index c8a0d82f9c27d..719225c09a4d6 100644 --- a/drivers/memory/omap-gpmc.c +++ b/drivers/memory/omap-gpmc.c @@ -2245,26 +2245,6 @@ static int gpmc_probe_generic_child(struct platform_device *pdev, goto err; } - if (of_node_name_eq(child, "nand")) { - /* Warn about older DT blobs with no compatible property */ - if (!of_property_read_bool(child, "compatible")) { - dev_warn(&pdev->dev, - "Incompatible NAND node: missing compatible"); - ret = -EINVAL; - goto err; - } - } - - if (of_node_name_eq(child, "onenand")) { - /* Warn about older DT blobs with no compatible property */ - if (!of_property_read_bool(child, "compatible")) { - dev_warn(&pdev->dev, - "Incompatible OneNAND node: missing compatible"); - ret = -EINVAL; - goto err; - } - } - if (of_match_node(omap_nand_ids, child)) { /* NAND specific setup */ val = 8; diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c index ffdd8de9ec5d7..d99f8922d4ad0 100644 --- a/drivers/memstick/host/rtsx_usb_ms.c +++ b/drivers/memstick/host/rtsx_usb_ms.c @@ -813,6 +813,7 @@ static void rtsx_usb_ms_drv_remove(struct platform_device *pdev) host->eject = true; cancel_work_sync(&host->handle_req); + cancel_delayed_work_sync(&host->poll_card); mutex_lock(&host->host_mutex); if (host->req) { diff --git a/drivers/mfd/ene-kb3930.c b/drivers/mfd/ene-kb3930.c index fa0ad2f14a396..9460a67acb0b5 100644 --- a/drivers/mfd/ene-kb3930.c +++ b/drivers/mfd/ene-kb3930.c @@ -162,7 +162,7 @@ static int kb3930_probe(struct i2c_client *client) devm_gpiod_get_array_optional(dev, "off", GPIOD_IN); if (IS_ERR(ddata->off_gpios)) return PTR_ERR(ddata->off_gpios); - if (ddata->off_gpios->ndescs < 2) { + if (ddata->off_gpios && ddata->off_gpios->ndescs < 2) { dev_err(dev, "invalid off-gpios property\n"); return -EINVAL; } diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c index b3592982a83b5..5b6dc1cb9bfc3 100644 --- a/drivers/mfd/sm501.c +++ b/drivers/mfd/sm501.c @@ -920,7 +920,7 @@ static void sm501_gpio_set(struct gpio_chip *chip, unsigned offset, int value) { struct sm501_gpio_chip *smchip = gpiochip_get_data(chip); struct sm501_gpio *smgpio = smchip->ourgpio; - unsigned long bit = 1 << offset; + unsigned long bit = BIT(offset); void __iomem *regs = smchip->regbase; unsigned long save; unsigned long val; @@ -946,7 +946,7 @@ static int sm501_gpio_input(struct gpio_chip *chip, unsigned offset) struct sm501_gpio_chip *smchip = gpiochip_get_data(chip); struct sm501_gpio *smgpio = smchip->ourgpio; void __iomem *regs = smchip->regbase; - unsigned long bit = 1 << offset; + unsigned long bit = BIT(offset); unsigned long save; unsigned long ddr; @@ -971,7 +971,7 @@ static int sm501_gpio_output(struct gpio_chip *chip, { struct sm501_gpio_chip *smchip = gpiochip_get_data(chip); struct sm501_gpio *smgpio = smchip->ourgpio; - unsigned long bit = 1 << offset; + unsigned long bit = BIT(offset); void __iomem *regs = smchip->regbase; unsigned long save; unsigned long val; diff --git a/drivers/misc/cardreader/rtsx_usb.c b/drivers/misc/cardreader/rtsx_usb.c index 285a748748d70..f150d8769f198 100644 --- a/drivers/misc/cardreader/rtsx_usb.c +++ b/drivers/misc/cardreader/rtsx_usb.c @@ -286,7 +286,6 @@ static int rtsx_usb_get_status_with_bulk(struct rtsx_ucr *ucr, u16 *status) int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status) { int ret; - u8 interrupt_val = 0; u16 *buf; if (!status) @@ -309,20 +308,6 @@ int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status) ret = rtsx_usb_get_status_with_bulk(ucr, status); } - rtsx_usb_read_register(ucr, CARD_INT_PEND, &interrupt_val); - /* Cross check presence with interrupts */ - if (*status & XD_CD) - if (!(interrupt_val & XD_INT)) - *status &= ~XD_CD; - - if (*status & SD_CD) - if (!(interrupt_val & SD_INT)) - *status &= ~SD_CD; - - if (*status & MS_CD) - if (!(interrupt_val & MS_INT)) - *status &= ~MS_CD; - /* usb_control_msg may return positive when success */ if (ret < 0) return ret; diff --git a/drivers/misc/eeprom/digsy_mtc_eeprom.c b/drivers/misc/eeprom/digsy_mtc_eeprom.c index 88888485e6f8e..ee58f7ce5bfa9 100644 --- a/drivers/misc/eeprom/digsy_mtc_eeprom.c +++ b/drivers/misc/eeprom/digsy_mtc_eeprom.c @@ -50,7 +50,7 @@ static struct platform_device digsy_mtc_eeprom = { }; static struct gpiod_lookup_table eeprom_spi_gpiod_table = { - .dev_id = "spi_gpio", + .dev_id = "spi_gpio.1", .table = { GPIO_LOOKUP("gpio@b00", GPIO_EEPROM_CLK, "sck", GPIO_ACTIVE_HIGH), diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index c3a6657dcd4a2..a5f88ec97df75 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -117,6 +117,8 @@ #define MEI_DEV_ID_LNL_M 0xA870 /* Lunar Lake Point M */ +#define MEI_DEV_ID_PTL_P 0xE470 /* Panther Lake P */ + /* * MEI HW Section */ diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 6589635f8ba32..d6ff9d82ae94b 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -124,6 +124,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_LNL_M, MEI_ME_PCH15_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_PTL_P, MEI_ME_PCH15_CFG)}, + /* required last entry */ {0, } }; diff --git a/drivers/misc/mei/vsc-tp.c b/drivers/misc/mei/vsc-tp.c index 1618cca9a7317..ef0a9f423c8f8 100644 --- a/drivers/misc/mei/vsc-tp.c +++ b/drivers/misc/mei/vsc-tp.c @@ -504,7 +504,7 @@ static int vsc_tp_probe(struct spi_device *spi) if (ret) return ret; - tp->wakeuphost = devm_gpiod_get(dev, "wakeuphost", GPIOD_IN); + tp->wakeuphost = devm_gpiod_get(dev, "wakeuphostint", GPIOD_IN); if (IS_ERR(tp->wakeuphost)) return PTR_ERR(tp->wakeuphost); diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c index 3aaaf47fa4ee2..e22afb420d099 100644 --- a/drivers/misc/pci_endpoint_test.c +++ b/drivers/misc/pci_endpoint_test.c @@ -85,7 +85,6 @@ #define PCI_DEVICE_ID_RENESAS_R8A774E1 0x0025 #define PCI_DEVICE_ID_RENESAS_R8A779F0 0x0031 -#define PCI_VENDOR_ID_ROCKCHIP 0x1d87 #define PCI_DEVICE_ID_ROCKCHIP_RK3588 0x3588 static DEFINE_IDA(pci_endpoint_test_ida); @@ -235,7 +234,7 @@ static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test) return true; fail: - switch (irq_type) { + switch (test->irq_type) { case IRQ_TYPE_INTX: dev_err(dev, "Failed to request IRQ %d for Legacy\n", pci_irq_vector(pdev, i)); @@ -252,6 +251,9 @@ static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test) break; } + test->num_irqs = i; + pci_endpoint_test_release_irq(test); + return false; } @@ -739,6 +741,7 @@ static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test, if (!pci_endpoint_test_request_irq(test)) goto err; + irq_type = test->irq_type; return true; err: diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index cdbd2edf4b2e7..8c0853dca8b92 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -2499,8 +2499,10 @@ static int atmci_probe(struct platform_device *pdev) /* Get MCI capabilities and set operations according to it */ atmci_get_cap(host); ret = atmci_configure_dma(host); - if (ret == -EPROBE_DEFER) + if (ret == -EPROBE_DEFER) { + clk_disable_unprepare(host->mck); goto err_dma_probe_defer; + } if (ret == 0) { host->prepare_data = &atmci_prepare_data_dma; host->submit_data = &atmci_submit_data_dma; diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index e9f6e4e622901..55158540c28cf 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -2579,6 +2579,91 @@ static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt) } } +static void dw_mci_push_data64_32(struct dw_mci *host, void *buf, int cnt) +{ + struct mmc_data *data = host->data; + int init_cnt = cnt; + + /* try and push anything in the part_buf */ + if (unlikely(host->part_buf_count)) { + int len = dw_mci_push_part_bytes(host, buf, cnt); + + buf += len; + cnt -= len; + + if (host->part_buf_count == 8) { + mci_fifo_l_writeq(host->fifo_reg, host->part_buf); + host->part_buf_count = 0; + } + } +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (unlikely((unsigned long)buf & 0x7)) { + while (cnt >= 8) { + u64 aligned_buf[16]; + int len = min(cnt & -8, (int)sizeof(aligned_buf)); + int items = len >> 3; + int i; + /* memcpy from input buffer into aligned buffer */ + memcpy(aligned_buf, buf, len); + buf += len; + cnt -= len; + /* push data from aligned buffer into fifo */ + for (i = 0; i < items; ++i) + mci_fifo_l_writeq(host->fifo_reg, aligned_buf[i]); + } + } else +#endif + { + u64 *pdata = buf; + + for (; cnt >= 8; cnt -= 8) + mci_fifo_l_writeq(host->fifo_reg, *pdata++); + buf = pdata; + } + /* put anything remaining in the part_buf */ + if (cnt) { + dw_mci_set_part_bytes(host, buf, cnt); + /* Push data if we have reached the expected data length */ + if ((data->bytes_xfered + init_cnt) == + (data->blksz * data->blocks)) + mci_fifo_l_writeq(host->fifo_reg, host->part_buf); + } +} + +static void dw_mci_pull_data64_32(struct dw_mci *host, void *buf, int cnt) +{ +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (unlikely((unsigned long)buf & 0x7)) { + while (cnt >= 8) { + /* pull data from fifo into aligned buffer */ + u64 aligned_buf[16]; + int len = min(cnt & -8, (int)sizeof(aligned_buf)); + int items = len >> 3; + int i; + + for (i = 0; i < items; ++i) + aligned_buf[i] = mci_fifo_l_readq(host->fifo_reg); + + /* memcpy from aligned buffer into output buffer */ + memcpy(buf, aligned_buf, len); + buf += len; + cnt -= len; + } + } else +#endif + { + u64 *pdata = buf; + + for (; cnt >= 8; cnt -= 8) + *pdata++ = mci_fifo_l_readq(host->fifo_reg); + buf = pdata; + } + if (cnt) { + host->part_buf = mci_fifo_l_readq(host->fifo_reg); + dw_mci_pull_final_bytes(host, buf, cnt); + } +} + static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt) { int len; @@ -3379,8 +3464,13 @@ int dw_mci_probe(struct dw_mci *host) width = 16; host->data_shift = 1; } else if (i == 2) { - host->push_data = dw_mci_push_data64; - host->pull_data = dw_mci_pull_data64; + if ((host->quirks & DW_MMC_QUIRK_FIFO64_32)) { + host->push_data = dw_mci_push_data64_32; + host->pull_data = dw_mci_pull_data64_32; + } else { + host->push_data = dw_mci_push_data64; + host->pull_data = dw_mci_pull_data64; + } width = 64; host->data_shift = 3; } else { diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h index 6447b916990dc..5463392dc8110 100644 --- a/drivers/mmc/host/dw_mmc.h +++ b/drivers/mmc/host/dw_mmc.h @@ -281,6 +281,8 @@ struct dw_mci_board { /* Support for longer data read timeout */ #define DW_MMC_QUIRK_EXTENDED_TMOUT BIT(0) +/* Force 32-bit access to the FIFO */ +#define DW_MMC_QUIRK_FIFO64_32 BIT(1) #define DW_MMC_240A 0x240a #define DW_MMC_280A 0x280a @@ -472,6 +474,31 @@ struct dw_mci_board { #define mci_fifo_writel(__value, __reg) __raw_writel(__reg, __value) #define mci_fifo_writeq(__value, __reg) __raw_writeq(__reg, __value) +/* + * Some dw_mmc devices have 64-bit FIFOs, but expect them to be + * accessed using two 32-bit accesses. If such controller is used + * with a 64-bit kernel, this has to be done explicitly. + */ +static inline u64 mci_fifo_l_readq(void __iomem *addr) +{ + u64 ans; + u32 proxy[2]; + + proxy[0] = mci_fifo_readl(addr); + proxy[1] = mci_fifo_readl(addr + 4); + memcpy(&ans, proxy, 8); + return ans; +} + +static inline void mci_fifo_l_writeq(void __iomem *addr, u64 value) +{ + u32 proxy[2]; + + memcpy(proxy, &value, 8); + mci_fifo_writel(addr, proxy[0]); + mci_fifo_writel(addr + 4, proxy[1]); +} + /* Register access macros */ #define mci_readl(dev, reg) \ readl_relaxed((dev)->regs + SDMMC_##reg) diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c index 335350a4e99ab..ee0940d96febf 100644 --- a/drivers/mmc/host/omap.c +++ b/drivers/mmc/host/omap.c @@ -1272,19 +1272,25 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id) /* Check for some optional GPIO controls */ slot->vsd = devm_gpiod_get_index_optional(host->dev, "vsd", id, GPIOD_OUT_LOW); - if (IS_ERR(slot->vsd)) - return dev_err_probe(host->dev, PTR_ERR(slot->vsd), + if (IS_ERR(slot->vsd)) { + r = dev_err_probe(host->dev, PTR_ERR(slot->vsd), "error looking up VSD GPIO\n"); + goto err_free_host; + } slot->vio = devm_gpiod_get_index_optional(host->dev, "vio", id, GPIOD_OUT_LOW); - if (IS_ERR(slot->vio)) - return dev_err_probe(host->dev, PTR_ERR(slot->vio), + if (IS_ERR(slot->vio)) { + r = dev_err_probe(host->dev, PTR_ERR(slot->vio), "error looking up VIO GPIO\n"); + goto err_free_host; + } slot->cover = devm_gpiod_get_index_optional(host->dev, "cover", id, GPIOD_IN); - if (IS_ERR(slot->cover)) - return dev_err_probe(host->dev, PTR_ERR(slot->cover), + if (IS_ERR(slot->cover)) { + r = dev_err_probe(host->dev, PTR_ERR(slot->cover), "error looking up cover switch GPIO\n"); + goto err_free_host; + } host->slots[id] = slot; @@ -1344,6 +1350,7 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id) device_remove_file(&mmc->class_dev, &dev_attr_slot_name); err_remove_host: mmc_remove_host(mmc); +err_free_host: mmc_free_host(mmc); return r; } diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c index 031a4b514d16b..9d9ddc2f6f70c 100644 --- a/drivers/mmc/host/sdhci-brcmstb.c +++ b/drivers/mmc/host/sdhci-brcmstb.c @@ -503,8 +503,15 @@ static int sdhci_brcmstb_suspend(struct device *dev) struct sdhci_host *host = dev_get_drvdata(dev); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_brcmstb_priv *priv = sdhci_pltfm_priv(pltfm_host); + int ret; clk_disable_unprepare(priv->base_clk); + if (host->mmc->caps2 & MMC_CAP2_CQE) { + ret = cqhci_suspend(host->mmc); + if (ret) + return ret; + } + return sdhci_pltfm_suspend(dev); } @@ -529,6 +536,9 @@ static int sdhci_brcmstb_resume(struct device *dev) ret = clk_set_rate(priv->base_clk, priv->base_freq_hz); } + if (host->mmc->caps2 & MMC_CAP2_CQE) + ret = cqhci_resume(host->mmc); + return ret; } #endif diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c index 5841a9afeb9f5..ea4a801c9ace5 100644 --- a/drivers/mmc/host/sdhci-omap.c +++ b/drivers/mmc/host/sdhci-omap.c @@ -1339,8 +1339,8 @@ static int sdhci_omap_probe(struct platform_device *pdev) /* R1B responses is required to properly manage HW busy detection. */ mmc->caps |= MMC_CAP_NEED_RSP_BUSY; - /* Allow card power off and runtime PM for eMMC/SD card devices */ - mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_AGGRESSIVE_PM; + /* Enable SDIO card power off. */ + mmc->caps |= MMC_CAP_POWER_OFF_CARD; ret = sdhci_setup_host(host); if (ret) diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c index 3af43ac058255..376fd927ae738 100644 --- a/drivers/mmc/host/sdhci-pxav3.c +++ b/drivers/mmc/host/sdhci-pxav3.c @@ -399,6 +399,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev) if (!IS_ERR(pxa->clk_core)) clk_prepare_enable(pxa->clk_core); + host->mmc->caps |= MMC_CAP_NEED_RSP_BUSY; /* enable 1/8V DDR capable */ host->mmc->caps |= MMC_CAP_1_8V_DDR; diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c index 9739387cff8c9..58c6e1743f5c6 100644 --- a/drivers/mtd/inftlcore.c +++ b/drivers/mtd/inftlcore.c @@ -482,10 +482,11 @@ static inline u16 INFTL_findwriteunit(struct INFTLrecord *inftl, unsigned block) silly = MAX_LOOPS; while (thisEUN <= inftl->lastEUN) { - inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) + - blockofs, 8, &retlen, (char *)&bci); - - status = bci.Status | bci.Status1; + if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) + + blockofs, 8, &retlen, (char *)&bci) < 0) + status = SECTOR_IGNORE; + else + status = bci.Status | bci.Status1; pr_debug("INFTL: status of block %d in EUN %d is %x\n", block , writeEUN, status); diff --git a/drivers/mtd/mtdpstore.c b/drivers/mtd/mtdpstore.c index 7ac8ac9013068..9cf3872e37ae1 100644 --- a/drivers/mtd/mtdpstore.c +++ b/drivers/mtd/mtdpstore.c @@ -417,11 +417,14 @@ static void mtdpstore_notify_add(struct mtd_info *mtd) } longcnt = BITS_TO_LONGS(div_u64(mtd->size, info->kmsg_size)); - cxt->rmmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL); - cxt->usedmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL); + cxt->rmmap = devm_kcalloc(&mtd->dev, longcnt, sizeof(long), GFP_KERNEL); + cxt->usedmap = devm_kcalloc(&mtd->dev, longcnt, sizeof(long), GFP_KERNEL); longcnt = BITS_TO_LONGS(div_u64(mtd->size, mtd->erasesize)); - cxt->badmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL); + cxt->badmap = devm_kcalloc(&mtd->dev, longcnt, sizeof(long), GFP_KERNEL); + + if (!cxt->rmmap || !cxt->usedmap || !cxt->badmap) + return; /* just support dmesg right now */ cxt->dev.flags = PSTORE_FLAGS_DMESG; @@ -527,9 +530,6 @@ static void mtdpstore_notify_remove(struct mtd_info *mtd) mtdpstore_flush_removed(cxt); unregister_pstore_device(&cxt->dev); - kfree(cxt->badmap); - kfree(cxt->usedmap); - kfree(cxt->rmmap); cxt->mtd = NULL; cxt->index = -1; } diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c index e76df6a00ed4f..2eb44c1428fbc 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c @@ -3008,7 +3008,7 @@ static int brcmnand_resume(struct device *dev) brcmnand_save_restore_cs_config(host, 1); /* Reset the chip, required by some chips after power-up */ - nand_reset_op(chip); + nand_reset(chip, 0); } return 0; diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c index 3bc89b3569632..fca54e21a164f 100644 --- a/drivers/mtd/nand/raw/cadence-nand-controller.c +++ b/drivers/mtd/nand/raw/cadence-nand-controller.c @@ -471,6 +471,8 @@ struct cdns_nand_ctrl { struct { void __iomem *virt; dma_addr_t dma; + dma_addr_t iova_dma; + u32 size; } io; int irq; @@ -1835,11 +1837,11 @@ static int cadence_nand_slave_dma_transfer(struct cdns_nand_ctrl *cdns_ctrl, } if (dir == DMA_FROM_DEVICE) { - src_dma = cdns_ctrl->io.dma; + src_dma = cdns_ctrl->io.iova_dma; dst_dma = buf_dma; } else { src_dma = buf_dma; - dst_dma = cdns_ctrl->io.dma; + dst_dma = cdns_ctrl->io.iova_dma; } tx = dmaengine_prep_dma_memcpy(cdns_ctrl->dmac, dst_dma, src_dma, len, @@ -1861,12 +1863,12 @@ static int cadence_nand_slave_dma_transfer(struct cdns_nand_ctrl *cdns_ctrl, dma_async_issue_pending(cdns_ctrl->dmac); wait_for_completion(&finished); - dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir); + dma_unmap_single(dma_dev->dev, buf_dma, len, dir); return 0; err_unmap: - dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir); + dma_unmap_single(dma_dev->dev, buf_dma, len, dir); err: dev_dbg(cdns_ctrl->dev, "Fall back to CPU I/O\n"); @@ -2869,6 +2871,7 @@ cadence_nand_irq_cleanup(int irqnum, struct cdns_nand_ctrl *cdns_ctrl) static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl) { dma_cap_mask_t mask; + struct dma_device *dma_dev = cdns_ctrl->dmac->device; int ret; cdns_ctrl->cdma_desc = dma_alloc_coherent(cdns_ctrl->dev, @@ -2904,15 +2907,24 @@ static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl) dma_cap_set(DMA_MEMCPY, mask); if (cdns_ctrl->caps1->has_dma) { - cdns_ctrl->dmac = dma_request_channel(mask, NULL, NULL); - if (!cdns_ctrl->dmac) { - dev_err(cdns_ctrl->dev, - "Unable to get a DMA channel\n"); - ret = -EBUSY; + cdns_ctrl->dmac = dma_request_chan_by_mask(&mask); + if (IS_ERR(cdns_ctrl->dmac)) { + ret = dev_err_probe(cdns_ctrl->dev, PTR_ERR(cdns_ctrl->dmac), + "%d: Failed to get a DMA channel\n", ret); goto disable_irq; } } + cdns_ctrl->io.iova_dma = dma_map_resource(dma_dev->dev, cdns_ctrl->io.dma, + cdns_ctrl->io.size, + DMA_BIDIRECTIONAL, 0); + + ret = dma_mapping_error(dma_dev->dev, cdns_ctrl->io.iova_dma); + if (ret) { + dev_err(cdns_ctrl->dev, "Failed to map I/O resource to DMA\n"); + goto dma_release_chnl; + } + nand_controller_init(&cdns_ctrl->controller); INIT_LIST_HEAD(&cdns_ctrl->chips); @@ -2923,18 +2935,22 @@ static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl) if (ret) { dev_err(cdns_ctrl->dev, "Failed to register MTD: %d\n", ret); - goto dma_release_chnl; + goto unmap_dma_resource; } kfree(cdns_ctrl->buf); cdns_ctrl->buf = kzalloc(cdns_ctrl->buf_size, GFP_KERNEL); if (!cdns_ctrl->buf) { ret = -ENOMEM; - goto dma_release_chnl; + goto unmap_dma_resource; } return 0; +unmap_dma_resource: + dma_unmap_resource(dma_dev->dev, cdns_ctrl->io.iova_dma, + cdns_ctrl->io.size, DMA_BIDIRECTIONAL, 0); + dma_release_chnl: if (cdns_ctrl->dmac) dma_release_channel(cdns_ctrl->dmac); @@ -2956,6 +2972,8 @@ static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl) static void cadence_nand_remove(struct cdns_nand_ctrl *cdns_ctrl) { cadence_nand_chips_cleanup(cdns_ctrl); + dma_unmap_resource(cdns_ctrl->dmac->device->dev, cdns_ctrl->io.iova_dma, + cdns_ctrl->io.size, DMA_BIDIRECTIONAL, 0); cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl); kfree(cdns_ctrl->buf); dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc), @@ -3020,7 +3038,9 @@ static int cadence_nand_dt_probe(struct platform_device *ofdev) cdns_ctrl->io.virt = devm_platform_get_and_ioremap_resource(ofdev, 1, &res); if (IS_ERR(cdns_ctrl->io.virt)) return PTR_ERR(cdns_ctrl->io.virt); + cdns_ctrl->io.dma = res->start; + cdns_ctrl->io.size = resource_size(res); dt->clk = devm_clk_get(cdns_ctrl->dev, "nf_clk"); if (IS_ERR(dt->clk)) diff --git a/drivers/mtd/nand/raw/r852.c b/drivers/mtd/nand/raw/r852.c index ed0cf732d20e4..36cfe03cd4ac3 100644 --- a/drivers/mtd/nand/raw/r852.c +++ b/drivers/mtd/nand/raw/r852.c @@ -387,6 +387,9 @@ static int r852_wait(struct nand_chip *chip) static int r852_ready(struct nand_chip *chip) { struct r852_device *dev = r852_get_dev(nand_to_mtd(chip)); + if (dev->card_unstable) + return 0; + return !(r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_BUSY); } diff --git a/drivers/mtd/spi-nor/sst.c b/drivers/mtd/spi-nor/sst.c index b5ad7118c49a2..175211fe6a5ed 100644 --- a/drivers/mtd/spi-nor/sst.c +++ b/drivers/mtd/spi-nor/sst.c @@ -174,7 +174,7 @@ static int sst_nor_write_data(struct spi_nor *nor, loff_t to, size_t len, int ret; nor->program_opcode = op; - ret = spi_nor_write_data(nor, to, 1, buf); + ret = spi_nor_write_data(nor, to, len, buf); if (ret < 0) return ret; WARN(ret != len, "While writing %zu byte written %i bytes\n", len, ret); diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c index c5e571ec94c99..0472bcdff1307 100644 --- a/drivers/net/arcnet/com20020-pci.c +++ b/drivers/net/arcnet/com20020-pci.c @@ -251,18 +251,33 @@ static int com20020pci_probe(struct pci_dev *pdev, card->tx_led.default_trigger = devm_kasprintf(&pdev->dev, GFP_KERNEL, "arc%d-%d-tx", dev->dev_id, i); + if (!card->tx_led.default_trigger) { + ret = -ENOMEM; + goto err_free_arcdev; + } card->tx_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "pci:green:tx:%d-%d", dev->dev_id, i); - + if (!card->tx_led.name) { + ret = -ENOMEM; + goto err_free_arcdev; + } card->tx_led.dev = &dev->dev; card->recon_led.brightness_set = led_recon_set; card->recon_led.default_trigger = devm_kasprintf(&pdev->dev, GFP_KERNEL, "arc%d-%d-recon", dev->dev_id, i); + if (!card->recon_led.default_trigger) { + ret = -ENOMEM; + goto err_free_arcdev; + } card->recon_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "pci:red:recon:%d-%d", dev->dev_id, i); + if (!card->recon_led.name) { + ret = -ENOMEM; + goto err_free_arcdev; + } card->recon_led.dev = &dev->dev; ret = devm_led_classdev_register(&pdev->dev, &card->tx_led); diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 327b6ecdc77e0..d1b095af253bd 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -1242,10 +1242,28 @@ static bool slave_can_set_ns_maddr(const struct bonding *bond, struct slave *sla slave->dev->flags & IFF_MULTICAST; } +/** + * slave_set_ns_maddrs - add/del all NS mac addresses for slave + * @bond: bond device + * @slave: slave device + * @add: add or remove all the NS mac addresses + * + * This function tries to add or delete all the NS mac addresses on the slave + * + * Note, the IPv6 NS target address is the unicast address in Neighbor + * Solicitation (NS) message. The dest address of NS message should be + * solicited-node multicast address of the target. The dest mac of NS message + * is converted from the solicited-node multicast address. + * + * This function is called when + * * arp_validate changes + * * enslaving, releasing new slaves + */ static void slave_set_ns_maddrs(struct bonding *bond, struct slave *slave, bool add) { struct in6_addr *targets = bond->params.ns_targets; char slot_maddr[MAX_ADDR_LEN]; + struct in6_addr mcaddr; int i; if (!slave_can_set_ns_maddr(bond, slave)) @@ -1255,7 +1273,8 @@ static void slave_set_ns_maddrs(struct bonding *bond, struct slave *slave, bool if (ipv6_addr_any(&targets[i])) break; - if (!ndisc_mc_map(&targets[i], slot_maddr, slave->dev, 0)) { + addrconf_addr_solict_mult(&targets[i], &mcaddr); + if (!ndisc_mc_map(&mcaddr, slot_maddr, slave->dev, 0)) { if (add) dev_mc_add(slave->dev, slot_maddr); else @@ -1278,23 +1297,43 @@ void bond_slave_ns_maddrs_del(struct bonding *bond, struct slave *slave) slave_set_ns_maddrs(bond, slave, false); } +/** + * slave_set_ns_maddr - set new NS mac address for slave + * @bond: bond device + * @slave: slave device + * @target: the new IPv6 target + * @slot: the old IPv6 target in the slot + * + * This function tries to replace the old mac address to new one on the slave. + * + * Note, the target/slot IPv6 address is the unicast address in Neighbor + * Solicitation (NS) message. The dest address of NS message should be + * solicited-node multicast address of the target. The dest mac of NS message + * is converted from the solicited-node multicast address. + * + * This function is called when + * * An IPv6 NS target is added or removed. + */ static void slave_set_ns_maddr(struct bonding *bond, struct slave *slave, struct in6_addr *target, struct in6_addr *slot) { - char target_maddr[MAX_ADDR_LEN], slot_maddr[MAX_ADDR_LEN]; + char mac_addr[MAX_ADDR_LEN]; + struct in6_addr mcast_addr; if (!bond->params.arp_validate || !slave_can_set_ns_maddr(bond, slave)) return; - /* remove the previous maddr from slave */ + /* remove the previous mac addr from slave */ + addrconf_addr_solict_mult(slot, &mcast_addr); if (!ipv6_addr_any(slot) && - !ndisc_mc_map(slot, slot_maddr, slave->dev, 0)) - dev_mc_del(slave->dev, slot_maddr); + !ndisc_mc_map(&mcast_addr, mac_addr, slave->dev, 0)) + dev_mc_del(slave->dev, mac_addr); - /* add new maddr on slave if target is set */ + /* add new mac addr on slave if target is set */ + addrconf_addr_solict_mult(target, &mcast_addr); if (!ipv6_addr_any(target) && - !ndisc_mc_map(target, target_maddr, slave->dev, 0)) - dev_mc_add(slave->dev, target_maddr); + !ndisc_mc_map(&mcast_addr, mac_addr, slave->dev, 0)) + dev_mc_add(slave->dev, mac_addr); } static void _bond_options_ns_ip6_target_set(struct bonding *bond, int slot, diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c index 7fea00c7ca8a6..c60386bf2d1a4 100644 --- a/drivers/net/caif/caif_virtio.c +++ b/drivers/net/caif/caif_virtio.c @@ -745,7 +745,7 @@ static int cfv_probe(struct virtio_device *vdev) if (cfv->vr_rx) vdev->vringh_config->del_vrhs(cfv->vdev); - if (cfv->vdev) + if (cfv->vq_tx) vdev->config->del_vqs(cfv->vdev); free_netdev(netdev); return err; diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c index ac1a860986df6..fca290afb5329 100644 --- a/drivers/net/can/flexcan/flexcan-core.c +++ b/drivers/net/can/flexcan/flexcan-core.c @@ -386,6 +386,16 @@ static const struct flexcan_devtype_data fsl_lx2160a_r1_devtype_data = { FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR, }; +static const struct flexcan_devtype_data nxp_s32g2_devtype_data = { + .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | + FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_BROKEN_PERR_STATE | + FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_SUPPORT_FD | + FLEXCAN_QUIRK_SUPPORT_ECC | FLEXCAN_QUIRK_NR_IRQ_3 | + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR | + FLEXCAN_QUIRK_SECONDARY_MB_IRQ, +}; + static const struct can_bittiming_const flexcan_bittiming_const = { .name = DRV_NAME, .tseg1_min = 4, @@ -1762,14 +1772,25 @@ static int flexcan_open(struct net_device *dev) goto out_free_irq_boff; } + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SECONDARY_MB_IRQ) { + err = request_irq(priv->irq_secondary_mb, + flexcan_irq, IRQF_SHARED, dev->name, dev); + if (err) + goto out_free_irq_err; + } + flexcan_chip_interrupts_enable(dev); netif_start_queue(dev); return 0; + out_free_irq_err: + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3) + free_irq(priv->irq_err, dev); out_free_irq_boff: - free_irq(priv->irq_boff, dev); + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3) + free_irq(priv->irq_boff, dev); out_free_irq: free_irq(dev->irq, dev); out_can_rx_offload_disable: @@ -1794,6 +1815,9 @@ static int flexcan_close(struct net_device *dev) netif_stop_queue(dev); flexcan_chip_interrupts_disable(dev); + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SECONDARY_MB_IRQ) + free_irq(priv->irq_secondary_mb, dev); + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3) { free_irq(priv->irq_err, dev); free_irq(priv->irq_boff, dev); @@ -2041,6 +2065,7 @@ static const struct of_device_id flexcan_of_match[] = { { .compatible = "fsl,vf610-flexcan", .data = &fsl_vf610_devtype_data, }, { .compatible = "fsl,ls1021ar2-flexcan", .data = &fsl_ls1021a_r2_devtype_data, }, { .compatible = "fsl,lx2160ar1-flexcan", .data = &fsl_lx2160a_r1_devtype_data, }, + { .compatible = "nxp,s32g2-flexcan", .data = &nxp_s32g2_devtype_data, }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, flexcan_of_match); @@ -2187,6 +2212,14 @@ static int flexcan_probe(struct platform_device *pdev) } } + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SECONDARY_MB_IRQ) { + priv->irq_secondary_mb = platform_get_irq_byname(pdev, "mb-1"); + if (priv->irq_secondary_mb < 0) { + err = priv->irq_secondary_mb; + goto failed_platform_get_irq; + } + } + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SUPPORT_FD) { priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO; @@ -2260,14 +2293,19 @@ static int __maybe_unused flexcan_suspend(struct device *device) flexcan_chip_interrupts_disable(dev); + err = flexcan_transceiver_disable(priv); + if (err) + return err; + err = pinctrl_pm_select_sleep_state(device); if (err) return err; } netif_stop_queue(dev); netif_device_detach(dev); + + priv->can.state = CAN_STATE_SLEEPING; } - priv->can.state = CAN_STATE_SLEEPING; return 0; } @@ -2278,7 +2316,6 @@ static int __maybe_unused flexcan_resume(struct device *device) struct flexcan_priv *priv = netdev_priv(dev); int err; - priv->can.state = CAN_STATE_ERROR_ACTIVE; if (netif_running(dev)) { netif_device_attach(dev); netif_start_queue(dev); @@ -2292,12 +2329,20 @@ static int __maybe_unused flexcan_resume(struct device *device) if (err) return err; - err = flexcan_chip_start(dev); + err = flexcan_transceiver_enable(priv); if (err) return err; + err = flexcan_chip_start(dev); + if (err) { + flexcan_transceiver_disable(priv); + return err; + } + flexcan_chip_interrupts_enable(dev); } + + priv->can.state = CAN_STATE_ERROR_ACTIVE; } return 0; diff --git a/drivers/net/can/flexcan/flexcan.h b/drivers/net/can/flexcan/flexcan.h index 4933d8c7439e6..2cf886618c962 100644 --- a/drivers/net/can/flexcan/flexcan.h +++ b/drivers/net/can/flexcan/flexcan.h @@ -70,6 +70,10 @@ #define FLEXCAN_QUIRK_SUPPORT_RX_FIFO BIT(16) /* Setup stop mode with ATF SCMI protocol to support wakeup */ #define FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI BIT(17) +/* Device has two separate interrupt lines for two mailbox ranges, which + * both need to have an interrupt handler registered. + */ +#define FLEXCAN_QUIRK_SECONDARY_MB_IRQ BIT(18) struct flexcan_devtype_data { u32 quirks; /* quirks needed for different IP cores */ @@ -107,6 +111,7 @@ struct flexcan_priv { int irq_boff; int irq_err; + int irq_secondary_mb; /* IPC handle when setup stop mode by System Controller firmware(scfw) */ struct imx_sc_ipc *sc_ipc_handle; diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c index df1a5d0b37b22..aa3df0d05b853 100644 --- a/drivers/net/can/rcar/rcar_canfd.c +++ b/drivers/net/can/rcar/rcar_canfd.c @@ -787,22 +787,14 @@ static void rcar_canfd_configure_controller(struct rcar_canfd_global *gpriv) } static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv, - u32 ch) + u32 ch, u32 rule_entry) { - u32 cfg; - int offset, start, page, num_rules = RCANFD_CHANNEL_NUMRULES; + int offset, page, num_rules = RCANFD_CHANNEL_NUMRULES; + u32 rule_entry_index = rule_entry % 16; u32 ridx = ch + RCANFD_RFFIFO_IDX; - if (ch == 0) { - start = 0; /* Channel 0 always starts from 0th rule */ - } else { - /* Get number of Channel 0 rules and adjust */ - cfg = rcar_canfd_read(gpriv->base, RCANFD_GAFLCFG(ch)); - start = RCANFD_GAFLCFG_GETRNC(gpriv, 0, cfg); - } - /* Enable write access to entry */ - page = RCANFD_GAFL_PAGENUM(start); + page = RCANFD_GAFL_PAGENUM(rule_entry); rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLECTR, (RCANFD_GAFLECTR_AFLPN(gpriv, page) | RCANFD_GAFLECTR_AFLDAE)); @@ -818,13 +810,13 @@ static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv, offset = RCANFD_C_GAFL_OFFSET; /* Accept all IDs */ - rcar_canfd_write(gpriv->base, RCANFD_GAFLID(offset, start), 0); + rcar_canfd_write(gpriv->base, RCANFD_GAFLID(offset, rule_entry_index), 0); /* IDE or RTR is not considered for matching */ - rcar_canfd_write(gpriv->base, RCANFD_GAFLM(offset, start), 0); + rcar_canfd_write(gpriv->base, RCANFD_GAFLM(offset, rule_entry_index), 0); /* Any data length accepted */ - rcar_canfd_write(gpriv->base, RCANFD_GAFLP0(offset, start), 0); + rcar_canfd_write(gpriv->base, RCANFD_GAFLP0(offset, rule_entry_index), 0); /* Place the msg in corresponding Rx FIFO entry */ - rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLP1(offset, start), + rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLP1(offset, rule_entry_index), RCANFD_GAFLP1_GAFLFDP(ridx)); /* Disable write access to page */ @@ -1851,6 +1843,7 @@ static int rcar_canfd_probe(struct platform_device *pdev) unsigned long channels_mask = 0; int err, ch_irq, g_irq; int g_err_irq, g_recc_irq; + u32 rule_entry = 0; bool fdmode = true; /* CAN FD only mode - default */ char name[9] = "channelX"; int i; @@ -2023,7 +2016,8 @@ static int rcar_canfd_probe(struct platform_device *pdev) rcar_canfd_configure_tx(gpriv, ch); /* Configure receive rules */ - rcar_canfd_configure_afl_rules(gpriv, ch); + rcar_canfd_configure_afl_rules(gpriv, ch, rule_entry); + rule_entry += RCANFD_CHANNEL_NUMRULES; } /* Configure common interrupts */ diff --git a/drivers/net/can/rockchip/rockchip_canfd-core.c b/drivers/net/can/rockchip/rockchip_canfd-core.c index d9a937ba126c3..ac514766d431c 100644 --- a/drivers/net/can/rockchip/rockchip_canfd-core.c +++ b/drivers/net/can/rockchip/rockchip_canfd-core.c @@ -907,15 +907,16 @@ static int rkcanfd_probe(struct platform_device *pdev) priv->can.data_bittiming_const = &rkcanfd_data_bittiming_const; priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_BERR_REPORTING; - if (!(priv->devtype_data.quirks & RKCANFD_QUIRK_CANFD_BROKEN)) - priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD; priv->can.do_set_mode = rkcanfd_set_mode; priv->can.do_get_berr_counter = rkcanfd_get_berr_counter; priv->ndev = ndev; match = device_get_match_data(&pdev->dev); - if (match) + if (match) { priv->devtype_data = *(struct rkcanfd_devtype_data *)match; + if (!(priv->devtype_data.quirks & RKCANFD_QUIRK_CANFD_BROKEN)) + priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD; + } err = can_rx_offload_add_manual(ndev, &priv->offload, RKCANFD_NAPI_WEIGHT); diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c index 39a63b7313a46..07406daf7c88e 100644 --- a/drivers/net/can/usb/ucan.c +++ b/drivers/net/can/usb/ucan.c @@ -186,7 +186,7 @@ union ucan_ctl_payload { */ struct ucan_ctl_cmd_get_protocol_version cmd_get_protocol_version; - u8 raw[128]; + u8 fw_str[128]; } __packed; enum { @@ -424,18 +424,20 @@ static int ucan_ctrl_command_out(struct ucan_priv *up, UCAN_USB_CTL_PIPE_TIMEOUT); } -static int ucan_device_request_in(struct ucan_priv *up, - u8 cmd, u16 subcmd, u16 datalen) +static void ucan_get_fw_str(struct ucan_priv *up, char *fw_str, size_t size) { - return usb_control_msg(up->udev, - usb_rcvctrlpipe(up->udev, 0), - cmd, - USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, - subcmd, - 0, - up->ctl_msg_buffer, - datalen, - UCAN_USB_CTL_PIPE_TIMEOUT); + int ret; + + ret = usb_control_msg(up->udev, usb_rcvctrlpipe(up->udev, 0), + UCAN_DEVICE_GET_FW_STRING, + USB_DIR_IN | USB_TYPE_VENDOR | + USB_RECIP_DEVICE, + 0, 0, fw_str, size - 1, + UCAN_USB_CTL_PIPE_TIMEOUT); + if (ret > 0) + fw_str[ret] = '\0'; + else + strscpy(fw_str, "unknown", size); } /* Parse the device information structure reported by the device and @@ -1314,7 +1316,6 @@ static int ucan_probe(struct usb_interface *intf, u8 in_ep_addr; u8 out_ep_addr; union ucan_ctl_payload *ctl_msg_buffer; - char firmware_str[sizeof(union ucan_ctl_payload) + 1]; udev = interface_to_usbdev(intf); @@ -1527,17 +1528,6 @@ static int ucan_probe(struct usb_interface *intf, */ ucan_parse_device_info(up, &ctl_msg_buffer->cmd_get_device_info); - /* just print some device information - if available */ - ret = ucan_device_request_in(up, UCAN_DEVICE_GET_FW_STRING, 0, - sizeof(union ucan_ctl_payload)); - if (ret > 0) { - /* copy string while ensuring zero termination */ - strscpy(firmware_str, up->ctl_msg_buffer->raw, - sizeof(union ucan_ctl_payload) + 1); - } else { - strcpy(firmware_str, "unknown"); - } - /* device is compatible, reset it */ ret = ucan_ctrl_command_out(up, UCAN_COMMAND_RESET, 0, 0); if (ret < 0) @@ -1555,7 +1545,10 @@ static int ucan_probe(struct usb_interface *intf, /* initialisation complete, log device info */ netdev_info(up->netdev, "registered device\n"); - netdev_info(up->netdev, "firmware string: %s\n", firmware_str); + ucan_get_fw_str(up, up->ctl_msg_buffer->fw_str, + sizeof(up->ctl_msg_buffer->fw_str)); + netdev_info(up->netdev, "firmware string: %s\n", + up->ctl_msg_buffer->fw_str); /* success */ return 0; diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index c39cb119e760d..d4600ab0b70b3 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -737,6 +737,15 @@ static void b53_enable_mib(struct b53_device *dev) b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc); } +static void b53_enable_stp(struct b53_device *dev) +{ + u8 gc; + + b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc); + gc |= GC_RX_BPDU_EN; + b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc); +} + static u16 b53_default_pvid(struct b53_device *dev) { if (is5325(dev) || is5365(dev)) @@ -876,6 +885,7 @@ static int b53_switch_reset(struct b53_device *dev) } b53_enable_mib(dev); + b53_enable_stp(dev); return b53_flush_arl(dev, FAST_AGE_STATIC); } diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index d84ee1b419a61..abc979fbb45d1 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -2590,7 +2590,8 @@ mt7531_setup_common(struct dsa_switch *ds) if (ret < 0) return ret; - return 0; + /* Setup VLAN ID 0 for VLAN-unaware bridges */ + return mt7530_setup_vlan0(priv); } static int @@ -2686,11 +2687,6 @@ mt7531_setup(struct dsa_switch *ds) if (ret) return ret; - /* Setup VLAN ID 0 for VLAN-unaware bridges */ - ret = mt7530_setup_vlan0(priv); - if (ret) - return ret; - ds->assisted_learning_on_cpu_port = true; ds->mtu_enforcement_ingress = true; diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 284270a4ade1c..df1df60154121 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -1878,6 +1878,8 @@ static int mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid, if (!chip->info->ops->vtu_getnext) return -EOPNOTSUPP; + memset(entry, 0, sizeof(*entry)); + entry->vid = vid ? vid - 1 : mv88e6xxx_max_vid(chip); entry->valid = false; @@ -2013,7 +2015,16 @@ static int mv88e6xxx_mst_put(struct mv88e6xxx_chip *chip, u8 sid) struct mv88e6xxx_mst *mst, *tmp; int err; - if (!sid) + /* If the SID is zero, it is for a VLAN mapped to the default MSTI, + * and mv88e6xxx_stu_setup() made sure it is always present, and thus, + * should not be removed here. + * + * If the chip lacks STU support, numerically the "sid" variable will + * happen to also be zero, but we don't want to rely on that fact, so + * we explicitly test that first. In that case, there is also nothing + * to do here. + */ + if (!mv88e6xxx_has_stu(chip) || !sid) return 0; list_for_each_entry_safe(mst, tmp, &chip->msts, node) { @@ -2261,13 +2272,11 @@ mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port, return err; } -static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port, - const unsigned char *addr, u16 vid, - u8 state) +static int mv88e6xxx_port_db_get(struct mv88e6xxx_chip *chip, + const unsigned char *addr, u16 vid, + u16 *fid, struct mv88e6xxx_atu_entry *entry) { - struct mv88e6xxx_atu_entry entry; struct mv88e6xxx_vtu_entry vlan; - u16 fid; int err; /* Ports have two private address databases: one for when the port is @@ -2278,7 +2287,7 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port, * VLAN ID into the port's database used for VLAN-unaware bridging. */ if (vid == 0) { - fid = MV88E6XXX_FID_BRIDGED; + *fid = MV88E6XXX_FID_BRIDGED; } else { err = mv88e6xxx_vtu_get(chip, vid, &vlan); if (err) @@ -2288,14 +2297,39 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port, if (!vlan.valid) return -EOPNOTSUPP; - fid = vlan.fid; + *fid = vlan.fid; } - entry.state = 0; - ether_addr_copy(entry.mac, addr); - eth_addr_dec(entry.mac); + entry->state = 0; + ether_addr_copy(entry->mac, addr); + eth_addr_dec(entry->mac); + + return mv88e6xxx_g1_atu_getnext(chip, *fid, entry); +} + +static bool mv88e6xxx_port_db_find(struct mv88e6xxx_chip *chip, + const unsigned char *addr, u16 vid) +{ + struct mv88e6xxx_atu_entry entry; + u16 fid; + int err; + + err = mv88e6xxx_port_db_get(chip, addr, vid, &fid, &entry); + if (err) + return false; + + return entry.state && ether_addr_equal(entry.mac, addr); +} - err = mv88e6xxx_g1_atu_getnext(chip, fid, &entry); +static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port, + const unsigned char *addr, u16 vid, + u8 state) +{ + struct mv88e6xxx_atu_entry entry; + u16 fid; + int err; + + err = mv88e6xxx_port_db_get(chip, addr, vid, &fid, &entry); if (err) return err; @@ -2893,6 +2927,13 @@ static int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port, mv88e6xxx_reg_lock(chip); err = mv88e6xxx_port_db_load_purge(chip, port, addr, vid, MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC); + if (err) + goto out; + + if (!mv88e6xxx_port_db_find(chip, addr, vid)) + err = -ENOSPC; + +out: mv88e6xxx_reg_unlock(chip); return err; @@ -3661,6 +3702,21 @@ static int mv88e6xxx_stats_setup(struct mv88e6xxx_chip *chip) return mv88e6xxx_g1_stats_clear(chip); } +static int mv88e6320_setup_errata(struct mv88e6xxx_chip *chip) +{ + u16 dummy; + int err; + + /* Workaround for erratum + * 3.3 RGMII timing may be out of spec when transmit delay is enabled + */ + err = mv88e6xxx_port_hidden_write(chip, 0, 0xf, 0x7, 0xe000); + if (err) + return err; + + return mv88e6xxx_port_hidden_read(chip, 0, 0xf, 0x7, &dummy); +} + /* Check if the errata has already been applied. */ static bool mv88e6390_setup_errata_applied(struct mv88e6xxx_chip *chip) { @@ -5114,6 +5170,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = { static const struct mv88e6xxx_ops mv88e6320_ops = { /* MV88E6XXX_FAMILY_6320 */ + .setup_errata = mv88e6320_setup_errata, .ieee_pri_map = mv88e6085_g1_ieee_pri_map, .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, @@ -5163,6 +5220,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = { static const struct mv88e6xxx_ops mv88e6321_ops = { /* MV88E6XXX_FAMILY_6320 */ + .setup_errata = mv88e6320_setup_errata, .ieee_pri_map = mv88e6085_g1_ieee_pri_map, .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, @@ -6124,7 +6182,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_databases = 4096, .num_macs = 8192, .num_ports = 7, - .num_internal_phys = 5, + .num_internal_phys = 2, + .internal_phys_offset = 3, .num_gpio = 15, .max_vid = 4095, .max_sid = 63, @@ -6318,7 +6377,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_databases = 4096, .num_macs = 8192, .num_ports = 7, - .num_internal_phys = 5, + .num_internal_phys = 2, + .internal_phys_offset = 3, .num_gpio = 15, .max_vid = 4095, .max_sid = 63, @@ -6593,6 +6653,13 @@ static int mv88e6xxx_port_mdb_add(struct dsa_switch *ds, int port, mv88e6xxx_reg_lock(chip); err = mv88e6xxx_port_db_load_purge(chip, port, mdb->addr, mdb->vid, MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC); + if (err) + goto out; + + if (!mv88e6xxx_port_db_find(chip, mdb->addr, mdb->vid)) + err = -ENOSPC; + +out: mv88e6xxx_reg_unlock(chip); return err; @@ -7264,13 +7331,13 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) err = mv88e6xxx_switch_reset(chip); mv88e6xxx_reg_unlock(chip); if (err) - goto out; + goto out_phy; if (np) { chip->irq = of_irq_get(np, 0); if (chip->irq == -EPROBE_DEFER) { err = chip->irq; - goto out; + goto out_phy; } } @@ -7289,7 +7356,7 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) mv88e6xxx_reg_unlock(chip); if (err) - goto out; + goto out_phy; if (chip->info->g2_irqs > 0) { err = mv88e6xxx_g2_irq_setup(chip); @@ -7323,6 +7390,8 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) mv88e6xxx_g1_irq_free(chip); else mv88e6xxx_irq_poll_free(chip); +out_phy: + mv88e6xxx_phy_destroy(chip); out: if (pdata) dev_put(pdata->netdev); @@ -7345,7 +7414,6 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev) mv88e6xxx_ptp_free(chip); } - mv88e6xxx_phy_destroy(chip); mv88e6xxx_unregister_switch(chip); mv88e6xxx_g1_vtu_prob_irq_free(chip); @@ -7358,6 +7426,8 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev) mv88e6xxx_g1_irq_free(chip); else mv88e6xxx_irq_poll_free(chip); + + mv88e6xxx_phy_destroy(chip); } static void mv88e6xxx_shutdown(struct mdio_device *mdiodev) diff --git a/drivers/net/dsa/mv88e6xxx/devlink.c b/drivers/net/dsa/mv88e6xxx/devlink.c index a08dab75e0c0c..f57fde02077d2 100644 --- a/drivers/net/dsa/mv88e6xxx/devlink.c +++ b/drivers/net/dsa/mv88e6xxx/devlink.c @@ -743,7 +743,8 @@ void mv88e6xxx_teardown_devlink_regions_global(struct dsa_switch *ds) int i; for (i = 0; i < ARRAY_SIZE(mv88e6xxx_regions); i++) - dsa_devlink_region_destroy(chip->regions[i]); + if (chip->regions[i]) + dsa_devlink_region_destroy(chip->regions[i]); } void mv88e6xxx_teardown_devlink_regions_port(struct dsa_switch *ds, int port) diff --git a/drivers/net/dsa/mv88e6xxx/phy.c b/drivers/net/dsa/mv88e6xxx/phy.c index 8bb88b3d900db..ee9e5d7e52770 100644 --- a/drivers/net/dsa/mv88e6xxx/phy.c +++ b/drivers/net/dsa/mv88e6xxx/phy.c @@ -229,7 +229,10 @@ static void mv88e6xxx_phy_ppu_state_init(struct mv88e6xxx_chip *chip) static void mv88e6xxx_phy_ppu_state_destroy(struct mv88e6xxx_chip *chip) { + mutex_lock(&chip->ppu_mutex); del_timer_sync(&chip->ppu_timer); + cancel_work_sync(&chip->ppu_work); + mutex_unlock(&chip->ppu_mutex); } int mv88e6185_phy_ppu_read(struct mv88e6xxx_chip *chip, struct mii_bus *bus, diff --git a/drivers/net/dsa/realtek/Kconfig b/drivers/net/dsa/realtek/Kconfig index 6989972eebc30..d6eb6713e5f6b 100644 --- a/drivers/net/dsa/realtek/Kconfig +++ b/drivers/net/dsa/realtek/Kconfig @@ -43,4 +43,10 @@ config NET_DSA_REALTEK_RTL8366RB help Select to enable support for Realtek RTL8366RB. +config NET_DSA_REALTEK_RTL8366RB_LEDS + bool + depends on (LEDS_CLASS=y || LEDS_CLASS=NET_DSA_REALTEK_RTL8366RB) + depends on NET_DSA_REALTEK_RTL8366RB + default NET_DSA_REALTEK_RTL8366RB + endif diff --git a/drivers/net/dsa/realtek/Makefile b/drivers/net/dsa/realtek/Makefile index 35491dc20d6d6..17367bcba496c 100644 --- a/drivers/net/dsa/realtek/Makefile +++ b/drivers/net/dsa/realtek/Makefile @@ -12,4 +12,7 @@ endif obj-$(CONFIG_NET_DSA_REALTEK_RTL8366RB) += rtl8366.o rtl8366-objs := rtl8366-core.o rtl8366rb.o +ifdef CONFIG_NET_DSA_REALTEK_RTL8366RB_LEDS +rtl8366-objs += rtl8366rb-leds.o +endif obj-$(CONFIG_NET_DSA_REALTEK_RTL8365MB) += rtl8365mb.o diff --git a/drivers/net/dsa/realtek/rtl8366rb-leds.c b/drivers/net/dsa/realtek/rtl8366rb-leds.c new file mode 100644 index 0000000000000..99c890681ae60 --- /dev/null +++ b/drivers/net/dsa/realtek/rtl8366rb-leds.c @@ -0,0 +1,177 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include "rtl83xx.h" +#include "rtl8366rb.h" + +static inline u32 rtl8366rb_led_group_port_mask(u8 led_group, u8 port) +{ + switch (led_group) { + case 0: + return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port)); + case 1: + return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port)); + case 2: + return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port)); + case 3: + return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port)); + default: + return 0; + } +} + +static int rb8366rb_get_port_led(struct rtl8366rb_led *led) +{ + struct realtek_priv *priv = led->priv; + u8 led_group = led->led_group; + u8 port_num = led->port_num; + int ret; + u32 val; + + ret = regmap_read(priv->map, RTL8366RB_LED_X_X_CTRL_REG(led_group), + &val); + if (ret) { + dev_err(priv->dev, "error reading LED on port %d group %d\n", + led_group, port_num); + return ret; + } + + return !!(val & rtl8366rb_led_group_port_mask(led_group, port_num)); +} + +static int rb8366rb_set_port_led(struct rtl8366rb_led *led, bool enable) +{ + struct realtek_priv *priv = led->priv; + u8 led_group = led->led_group; + u8 port_num = led->port_num; + int ret; + + ret = regmap_update_bits(priv->map, + RTL8366RB_LED_X_X_CTRL_REG(led_group), + rtl8366rb_led_group_port_mask(led_group, + port_num), + enable ? 0xffff : 0); + if (ret) { + dev_err(priv->dev, "error updating LED on port %d group %d\n", + led_group, port_num); + return ret; + } + + /* Change the LED group to manual controlled LEDs if required */ + ret = rb8366rb_set_ledgroup_mode(priv, led_group, + RTL8366RB_LEDGROUP_FORCE); + + if (ret) { + dev_err(priv->dev, "error updating LED GROUP group %d\n", + led_group); + return ret; + } + + return 0; +} + +static int +rtl8366rb_cled_brightness_set_blocking(struct led_classdev *ldev, + enum led_brightness brightness) +{ + struct rtl8366rb_led *led = container_of(ldev, struct rtl8366rb_led, + cdev); + + return rb8366rb_set_port_led(led, brightness == LED_ON); +} + +static int rtl8366rb_setup_led(struct realtek_priv *priv, struct dsa_port *dp, + struct fwnode_handle *led_fwnode) +{ + struct rtl8366rb *rb = priv->chip_data; + struct led_init_data init_data = { }; + enum led_default_state state; + struct rtl8366rb_led *led; + u32 led_group; + int ret; + + ret = fwnode_property_read_u32(led_fwnode, "reg", &led_group); + if (ret) + return ret; + + if (led_group >= RTL8366RB_NUM_LEDGROUPS) { + dev_warn(priv->dev, "Invalid LED reg %d defined for port %d", + led_group, dp->index); + return -EINVAL; + } + + led = &rb->leds[dp->index][led_group]; + led->port_num = dp->index; + led->led_group = led_group; + led->priv = priv; + + state = led_init_default_state_get(led_fwnode); + switch (state) { + case LEDS_DEFSTATE_ON: + led->cdev.brightness = 1; + rb8366rb_set_port_led(led, 1); + break; + case LEDS_DEFSTATE_KEEP: + led->cdev.brightness = + rb8366rb_get_port_led(led); + break; + case LEDS_DEFSTATE_OFF: + default: + led->cdev.brightness = 0; + rb8366rb_set_port_led(led, 0); + } + + led->cdev.max_brightness = 1; + led->cdev.brightness_set_blocking = + rtl8366rb_cled_brightness_set_blocking; + init_data.fwnode = led_fwnode; + init_data.devname_mandatory = true; + + init_data.devicename = kasprintf(GFP_KERNEL, "Realtek-%d:0%d:%d", + dp->ds->index, dp->index, led_group); + if (!init_data.devicename) + return -ENOMEM; + + ret = devm_led_classdev_register_ext(priv->dev, &led->cdev, &init_data); + if (ret) { + dev_warn(priv->dev, "Failed to init LED %d for port %d", + led_group, dp->index); + return ret; + } + + return 0; +} + +int rtl8366rb_setup_leds(struct realtek_priv *priv) +{ + struct dsa_switch *ds = &priv->ds; + struct device_node *leds_np; + struct dsa_port *dp; + int ret = 0; + + dsa_switch_for_each_port(dp, ds) { + if (!dp->dn) + continue; + + leds_np = of_get_child_by_name(dp->dn, "leds"); + if (!leds_np) { + dev_dbg(priv->dev, "No leds defined for port %d", + dp->index); + continue; + } + + for_each_child_of_node_scoped(leds_np, led_np) { + ret = rtl8366rb_setup_led(priv, dp, + of_fwnode_handle(led_np)); + if (ret) + break; + } + + of_node_put(leds_np); + if (ret) + return ret; + } + return 0; +} diff --git a/drivers/net/dsa/realtek/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c index c7a8cd0605878..ae3d49fc22b80 100644 --- a/drivers/net/dsa/realtek/rtl8366rb.c +++ b/drivers/net/dsa/realtek/rtl8366rb.c @@ -26,11 +26,7 @@ #include "realtek-smi.h" #include "realtek-mdio.h" #include "rtl83xx.h" - -#define RTL8366RB_PORT_NUM_CPU 5 -#define RTL8366RB_NUM_PORTS 6 -#define RTL8366RB_PHY_NO_MAX 4 -#define RTL8366RB_PHY_ADDR_MAX 31 +#include "rtl8366rb.h" /* Switch Global Configuration register */ #define RTL8366RB_SGCR 0x0000 @@ -175,39 +171,6 @@ */ #define RTL8366RB_VLAN_INGRESS_CTRL2_REG 0x037f -/* LED control registers */ -/* The LED blink rate is global; it is used by all triggers in all groups. */ -#define RTL8366RB_LED_BLINKRATE_REG 0x0430 -#define RTL8366RB_LED_BLINKRATE_MASK 0x0007 -#define RTL8366RB_LED_BLINKRATE_28MS 0x0000 -#define RTL8366RB_LED_BLINKRATE_56MS 0x0001 -#define RTL8366RB_LED_BLINKRATE_84MS 0x0002 -#define RTL8366RB_LED_BLINKRATE_111MS 0x0003 -#define RTL8366RB_LED_BLINKRATE_222MS 0x0004 -#define RTL8366RB_LED_BLINKRATE_446MS 0x0005 - -/* LED trigger event for each group */ -#define RTL8366RB_LED_CTRL_REG 0x0431 -#define RTL8366RB_LED_CTRL_OFFSET(led_group) \ - (4 * (led_group)) -#define RTL8366RB_LED_CTRL_MASK(led_group) \ - (0xf << RTL8366RB_LED_CTRL_OFFSET(led_group)) - -/* The RTL8366RB_LED_X_X registers are used to manually set the LED state only - * when the corresponding LED group in RTL8366RB_LED_CTRL_REG is - * RTL8366RB_LEDGROUP_FORCE. Otherwise, it is ignored. - */ -#define RTL8366RB_LED_0_1_CTRL_REG 0x0432 -#define RTL8366RB_LED_2_3_CTRL_REG 0x0433 -#define RTL8366RB_LED_X_X_CTRL_REG(led_group) \ - ((led_group) <= 1 ? \ - RTL8366RB_LED_0_1_CTRL_REG : \ - RTL8366RB_LED_2_3_CTRL_REG) -#define RTL8366RB_LED_0_X_CTRL_MASK GENMASK(5, 0) -#define RTL8366RB_LED_X_1_CTRL_MASK GENMASK(11, 6) -#define RTL8366RB_LED_2_X_CTRL_MASK GENMASK(5, 0) -#define RTL8366RB_LED_X_3_CTRL_MASK GENMASK(11, 6) - #define RTL8366RB_MIB_COUNT 33 #define RTL8366RB_GLOBAL_MIB_COUNT 1 #define RTL8366RB_MIB_COUNTER_PORT_OFFSET 0x0050 @@ -243,7 +206,6 @@ #define RTL8366RB_PORT_STATUS_AN_MASK 0x0080 #define RTL8366RB_NUM_VLANS 16 -#define RTL8366RB_NUM_LEDGROUPS 4 #define RTL8366RB_NUM_VIDS 4096 #define RTL8366RB_PRIORITYMAX 7 #define RTL8366RB_NUM_FIDS 8 @@ -350,46 +312,6 @@ #define RTL8366RB_GREEN_FEATURE_TX BIT(0) #define RTL8366RB_GREEN_FEATURE_RX BIT(2) -enum rtl8366_ledgroup_mode { - RTL8366RB_LEDGROUP_OFF = 0x0, - RTL8366RB_LEDGROUP_DUP_COL = 0x1, - RTL8366RB_LEDGROUP_LINK_ACT = 0x2, - RTL8366RB_LEDGROUP_SPD1000 = 0x3, - RTL8366RB_LEDGROUP_SPD100 = 0x4, - RTL8366RB_LEDGROUP_SPD10 = 0x5, - RTL8366RB_LEDGROUP_SPD1000_ACT = 0x6, - RTL8366RB_LEDGROUP_SPD100_ACT = 0x7, - RTL8366RB_LEDGROUP_SPD10_ACT = 0x8, - RTL8366RB_LEDGROUP_SPD100_10_ACT = 0x9, - RTL8366RB_LEDGROUP_FIBER = 0xa, - RTL8366RB_LEDGROUP_AN_FAULT = 0xb, - RTL8366RB_LEDGROUP_LINK_RX = 0xc, - RTL8366RB_LEDGROUP_LINK_TX = 0xd, - RTL8366RB_LEDGROUP_MASTER = 0xe, - RTL8366RB_LEDGROUP_FORCE = 0xf, - - __RTL8366RB_LEDGROUP_MODE_MAX -}; - -struct rtl8366rb_led { - u8 port_num; - u8 led_group; - struct realtek_priv *priv; - struct led_classdev cdev; -}; - -/** - * struct rtl8366rb - RTL8366RB-specific data - * @max_mtu: per-port max MTU setting - * @pvid_enabled: if PVID is set for respective port - * @leds: per-port and per-ledgroup led info - */ -struct rtl8366rb { - unsigned int max_mtu[RTL8366RB_NUM_PORTS]; - bool pvid_enabled[RTL8366RB_NUM_PORTS]; - struct rtl8366rb_led leds[RTL8366RB_NUM_PORTS][RTL8366RB_NUM_LEDGROUPS]; -}; - static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = { { 0, 0, 4, "IfInOctets" }, { 0, 4, 4, "EtherStatsOctets" }, @@ -830,9 +752,10 @@ static int rtl8366rb_jam_table(const struct rtl8366rb_jam_tbl_entry *jam_table, return 0; } -static int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv, - u8 led_group, - enum rtl8366_ledgroup_mode mode) +/* This code is used also with LEDs disabled */ +int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv, + u8 led_group, + enum rtl8366_ledgroup_mode mode) { int ret; u32 val; @@ -849,144 +772,7 @@ static int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv, return 0; } -static inline u32 rtl8366rb_led_group_port_mask(u8 led_group, u8 port) -{ - switch (led_group) { - case 0: - return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port)); - case 1: - return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port)); - case 2: - return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port)); - case 3: - return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port)); - default: - return 0; - } -} - -static int rb8366rb_get_port_led(struct rtl8366rb_led *led) -{ - struct realtek_priv *priv = led->priv; - u8 led_group = led->led_group; - u8 port_num = led->port_num; - int ret; - u32 val; - - ret = regmap_read(priv->map, RTL8366RB_LED_X_X_CTRL_REG(led_group), - &val); - if (ret) { - dev_err(priv->dev, "error reading LED on port %d group %d\n", - led_group, port_num); - return ret; - } - - return !!(val & rtl8366rb_led_group_port_mask(led_group, port_num)); -} - -static int rb8366rb_set_port_led(struct rtl8366rb_led *led, bool enable) -{ - struct realtek_priv *priv = led->priv; - u8 led_group = led->led_group; - u8 port_num = led->port_num; - int ret; - - ret = regmap_update_bits(priv->map, - RTL8366RB_LED_X_X_CTRL_REG(led_group), - rtl8366rb_led_group_port_mask(led_group, - port_num), - enable ? 0xffff : 0); - if (ret) { - dev_err(priv->dev, "error updating LED on port %d group %d\n", - led_group, port_num); - return ret; - } - - /* Change the LED group to manual controlled LEDs if required */ - ret = rb8366rb_set_ledgroup_mode(priv, led_group, - RTL8366RB_LEDGROUP_FORCE); - - if (ret) { - dev_err(priv->dev, "error updating LED GROUP group %d\n", - led_group); - return ret; - } - - return 0; -} - -static int -rtl8366rb_cled_brightness_set_blocking(struct led_classdev *ldev, - enum led_brightness brightness) -{ - struct rtl8366rb_led *led = container_of(ldev, struct rtl8366rb_led, - cdev); - - return rb8366rb_set_port_led(led, brightness == LED_ON); -} - -static int rtl8366rb_setup_led(struct realtek_priv *priv, struct dsa_port *dp, - struct fwnode_handle *led_fwnode) -{ - struct rtl8366rb *rb = priv->chip_data; - struct led_init_data init_data = { }; - enum led_default_state state; - struct rtl8366rb_led *led; - u32 led_group; - int ret; - - ret = fwnode_property_read_u32(led_fwnode, "reg", &led_group); - if (ret) - return ret; - - if (led_group >= RTL8366RB_NUM_LEDGROUPS) { - dev_warn(priv->dev, "Invalid LED reg %d defined for port %d", - led_group, dp->index); - return -EINVAL; - } - - led = &rb->leds[dp->index][led_group]; - led->port_num = dp->index; - led->led_group = led_group; - led->priv = priv; - - state = led_init_default_state_get(led_fwnode); - switch (state) { - case LEDS_DEFSTATE_ON: - led->cdev.brightness = 1; - rb8366rb_set_port_led(led, 1); - break; - case LEDS_DEFSTATE_KEEP: - led->cdev.brightness = - rb8366rb_get_port_led(led); - break; - case LEDS_DEFSTATE_OFF: - default: - led->cdev.brightness = 0; - rb8366rb_set_port_led(led, 0); - } - - led->cdev.max_brightness = 1; - led->cdev.brightness_set_blocking = - rtl8366rb_cled_brightness_set_blocking; - init_data.fwnode = led_fwnode; - init_data.devname_mandatory = true; - - init_data.devicename = kasprintf(GFP_KERNEL, "Realtek-%d:0%d:%d", - dp->ds->index, dp->index, led_group); - if (!init_data.devicename) - return -ENOMEM; - - ret = devm_led_classdev_register_ext(priv->dev, &led->cdev, &init_data); - if (ret) { - dev_warn(priv->dev, "Failed to init LED %d for port %d", - led_group, dp->index); - return ret; - } - - return 0; -} - +/* This code is used also with LEDs disabled */ static int rtl8366rb_setup_all_leds_off(struct realtek_priv *priv) { int ret = 0; @@ -1007,38 +793,6 @@ static int rtl8366rb_setup_all_leds_off(struct realtek_priv *priv) return ret; } -static int rtl8366rb_setup_leds(struct realtek_priv *priv) -{ - struct dsa_switch *ds = &priv->ds; - struct device_node *leds_np; - struct dsa_port *dp; - int ret = 0; - - dsa_switch_for_each_port(dp, ds) { - if (!dp->dn) - continue; - - leds_np = of_get_child_by_name(dp->dn, "leds"); - if (!leds_np) { - dev_dbg(priv->dev, "No leds defined for port %d", - dp->index); - continue; - } - - for_each_child_of_node_scoped(leds_np, led_np) { - ret = rtl8366rb_setup_led(priv, dp, - of_fwnode_handle(led_np)); - if (ret) - break; - } - - of_node_put(leds_np); - if (ret) - return ret; - } - return 0; -} - static int rtl8366rb_setup(struct dsa_switch *ds) { struct realtek_priv *priv = ds->priv; diff --git a/drivers/net/dsa/realtek/rtl8366rb.h b/drivers/net/dsa/realtek/rtl8366rb.h new file mode 100644 index 0000000000000..685ff3275faa1 --- /dev/null +++ b/drivers/net/dsa/realtek/rtl8366rb.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef _RTL8366RB_H +#define _RTL8366RB_H + +#include "realtek.h" + +#define RTL8366RB_PORT_NUM_CPU 5 +#define RTL8366RB_NUM_PORTS 6 +#define RTL8366RB_PHY_NO_MAX 4 +#define RTL8366RB_NUM_LEDGROUPS 4 +#define RTL8366RB_PHY_ADDR_MAX 31 + +/* LED control registers */ +/* The LED blink rate is global; it is used by all triggers in all groups. */ +#define RTL8366RB_LED_BLINKRATE_REG 0x0430 +#define RTL8366RB_LED_BLINKRATE_MASK 0x0007 +#define RTL8366RB_LED_BLINKRATE_28MS 0x0000 +#define RTL8366RB_LED_BLINKRATE_56MS 0x0001 +#define RTL8366RB_LED_BLINKRATE_84MS 0x0002 +#define RTL8366RB_LED_BLINKRATE_111MS 0x0003 +#define RTL8366RB_LED_BLINKRATE_222MS 0x0004 +#define RTL8366RB_LED_BLINKRATE_446MS 0x0005 + +/* LED trigger event for each group */ +#define RTL8366RB_LED_CTRL_REG 0x0431 +#define RTL8366RB_LED_CTRL_OFFSET(led_group) \ + (4 * (led_group)) +#define RTL8366RB_LED_CTRL_MASK(led_group) \ + (0xf << RTL8366RB_LED_CTRL_OFFSET(led_group)) + +/* The RTL8366RB_LED_X_X registers are used to manually set the LED state only + * when the corresponding LED group in RTL8366RB_LED_CTRL_REG is + * RTL8366RB_LEDGROUP_FORCE. Otherwise, it is ignored. + */ +#define RTL8366RB_LED_0_1_CTRL_REG 0x0432 +#define RTL8366RB_LED_2_3_CTRL_REG 0x0433 +#define RTL8366RB_LED_X_X_CTRL_REG(led_group) \ + ((led_group) <= 1 ? \ + RTL8366RB_LED_0_1_CTRL_REG : \ + RTL8366RB_LED_2_3_CTRL_REG) +#define RTL8366RB_LED_0_X_CTRL_MASK GENMASK(5, 0) +#define RTL8366RB_LED_X_1_CTRL_MASK GENMASK(11, 6) +#define RTL8366RB_LED_2_X_CTRL_MASK GENMASK(5, 0) +#define RTL8366RB_LED_X_3_CTRL_MASK GENMASK(11, 6) + +enum rtl8366_ledgroup_mode { + RTL8366RB_LEDGROUP_OFF = 0x0, + RTL8366RB_LEDGROUP_DUP_COL = 0x1, + RTL8366RB_LEDGROUP_LINK_ACT = 0x2, + RTL8366RB_LEDGROUP_SPD1000 = 0x3, + RTL8366RB_LEDGROUP_SPD100 = 0x4, + RTL8366RB_LEDGROUP_SPD10 = 0x5, + RTL8366RB_LEDGROUP_SPD1000_ACT = 0x6, + RTL8366RB_LEDGROUP_SPD100_ACT = 0x7, + RTL8366RB_LEDGROUP_SPD10_ACT = 0x8, + RTL8366RB_LEDGROUP_SPD100_10_ACT = 0x9, + RTL8366RB_LEDGROUP_FIBER = 0xa, + RTL8366RB_LEDGROUP_AN_FAULT = 0xb, + RTL8366RB_LEDGROUP_LINK_RX = 0xc, + RTL8366RB_LEDGROUP_LINK_TX = 0xd, + RTL8366RB_LEDGROUP_MASTER = 0xe, + RTL8366RB_LEDGROUP_FORCE = 0xf, + + __RTL8366RB_LEDGROUP_MODE_MAX +}; + +#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB_LEDS) + +struct rtl8366rb_led { + u8 port_num; + u8 led_group; + struct realtek_priv *priv; + struct led_classdev cdev; +}; + +int rtl8366rb_setup_leds(struct realtek_priv *priv); + +#else + +static inline int rtl8366rb_setup_leds(struct realtek_priv *priv) +{ + return 0; +} + +#endif /* IS_ENABLED(CONFIG_LEDS_CLASS) */ + +/** + * struct rtl8366rb - RTL8366RB-specific data + * @max_mtu: per-port max MTU setting + * @pvid_enabled: if PVID is set for respective port + * @leds: per-port and per-ledgroup led info + */ +struct rtl8366rb { + unsigned int max_mtu[RTL8366RB_NUM_PORTS]; + bool pvid_enabled[RTL8366RB_NUM_PORTS]; +#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB_LEDS) + struct rtl8366rb_led leds[RTL8366RB_NUM_PORTS][RTL8366RB_NUM_LEDGROUPS]; +#endif +}; + +/* This code is used also with LEDs disabled */ +int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv, + u8 led_group, + enum rtl8366_ledgroup_mode mode); + +#endif /* _RTL8366RB_H */ diff --git a/drivers/net/ethernet/amd/pds_core/debugfs.c b/drivers/net/ethernet/amd/pds_core/debugfs.c index ac37a4e738ae7..04c5e3abd8d70 100644 --- a/drivers/net/ethernet/amd/pds_core/debugfs.c +++ b/drivers/net/ethernet/amd/pds_core/debugfs.c @@ -154,8 +154,9 @@ void pdsc_debugfs_add_qcq(struct pdsc *pdsc, struct pdsc_qcq *qcq) debugfs_create_u32("index", 0400, intr_dentry, &intr->index); debugfs_create_u32("vector", 0400, intr_dentry, &intr->vector); - intr_ctrl_regset = kzalloc(sizeof(*intr_ctrl_regset), - GFP_KERNEL); + intr_ctrl_regset = devm_kzalloc(pdsc->dev, + sizeof(*intr_ctrl_regset), + GFP_KERNEL); if (!intr_ctrl_regset) return; intr_ctrl_regset->regs = intr_ctrl_regs; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 603e9c968c44b..016dcfec8d496 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -758,7 +758,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) dev_kfree_skb_any(skb); tx_kick_pending: if (BNXT_TX_PTP_IS_SET(lflags)) { - txr->tx_buf_ring[txr->tx_prod].is_ts_pkt = 0; + txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].is_ts_pkt = 0; atomic64_inc(&bp->ptp_cfg->stats.ts_err); if (!(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) /* set SKB to err so PTP worker will clean up */ @@ -766,7 +766,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) } if (txr->kick_pending) bnxt_txr_db_kick(bp, txr, txr->tx_prod); - txr->tx_buf_ring[txr->tx_prod].skb = NULL; + txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].skb = NULL; dev_core_stats_tx_dropped_inc(dev); return NETDEV_TX_OK; } @@ -864,6 +864,11 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) bnapi->events &= ~BNXT_TX_CMP_EVENT; } +static bool bnxt_separate_head_pool(void) +{ + return PAGE_SIZE > BNXT_RX_PAGE_SIZE; +} + static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, struct bnxt_rx_ring_info *rxr, unsigned int *offset, @@ -886,27 +891,19 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, } static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping, + struct bnxt_rx_ring_info *rxr, gfp_t gfp) { - u8 *data; - struct pci_dev *pdev = bp->pdev; + unsigned int offset; + struct page *page; - if (gfp == GFP_ATOMIC) - data = napi_alloc_frag(bp->rx_buf_size); - else - data = netdev_alloc_frag(bp->rx_buf_size); - if (!data) + page = page_pool_alloc_frag(rxr->head_pool, &offset, + bp->rx_buf_size, gfp); + if (!page) return NULL; - *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset, - bp->rx_buf_use_size, bp->rx_dir, - DMA_ATTR_WEAK_ORDERING); - - if (dma_mapping_error(&pdev->dev, *mapping)) { - skb_free_frag(data); - data = NULL; - } - return data; + *mapping = page_pool_get_dma_addr(page) + bp->rx_dma_offset + offset; + return page_address(page) + offset; } int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, @@ -928,7 +925,7 @@ int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, rx_buf->data = page; rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset; } else { - u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp); + u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, rxr, gfp); if (!data) return -ENOMEM; @@ -1179,13 +1176,14 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, } skb = napi_build_skb(data, bp->rx_buf_size); - dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, - bp->rx_dir, DMA_ATTR_WEAK_ORDERING); + dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, + bp->rx_dir); if (!skb) { - skb_free_frag(data); + page_pool_free_va(rxr->head_pool, data, true); return NULL; } + skb_mark_for_recycle(skb); skb_reserve(skb, bp->rx_offset); skb_put(skb, offset_and_len & 0xffff); return skb; @@ -1840,7 +1838,8 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, u8 *new_data; dma_addr_t new_mapping; - new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC); + new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, rxr, + GFP_ATOMIC); if (!new_data) { bnxt_abort_tpa(cpr, idx, agg_bufs); cpr->sw_stats->rx.rx_oom_discards += 1; @@ -1852,16 +1851,16 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, tpa_info->mapping = new_mapping; skb = napi_build_skb(data, bp->rx_buf_size); - dma_unmap_single_attrs(&bp->pdev->dev, mapping, - bp->rx_buf_use_size, bp->rx_dir, - DMA_ATTR_WEAK_ORDERING); + dma_sync_single_for_cpu(&bp->pdev->dev, mapping, + bp->rx_buf_use_size, bp->rx_dir); if (!skb) { - skb_free_frag(data); + page_pool_free_va(rxr->head_pool, data, true); bnxt_abort_tpa(cpr, idx, agg_bufs); cpr->sw_stats->rx.rx_oom_discards += 1; return NULL; } + skb_mark_for_recycle(skb); skb_reserve(skb, bp->rx_offset); skb_put(skb, len); } @@ -2025,6 +2024,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, struct rx_cmp_ext *rxcmp1; u32 tmp_raw_cons = *raw_cons; u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons); + struct skb_shared_info *sinfo; struct bnxt_sw_rx_bd *rx_buf; unsigned int len; u8 *data_ptr, agg_bufs, cmp_type; @@ -2151,6 +2151,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, false); if (!frag_len) goto oom_next_rx; + } xdp_active = true; } @@ -2160,6 +2161,12 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, rc = 1; goto next_rx; } + if (xdp_buff_has_frags(&xdp)) { + sinfo = xdp_get_shared_info_from_buff(&xdp); + agg_bufs = sinfo->nr_frags; + } else { + agg_bufs = 0; + } } if (len <= bp->rx_copy_thresh) { @@ -2197,7 +2204,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, if (!skb) goto oom_next_rx; } else { - skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1); + skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, + rxr->page_pool, &xdp); if (!skb) { /* we should be able to free the old skb here */ bnxt_xdp_buff_frags_free(rxr, &xdp); @@ -3316,28 +3324,22 @@ static void bnxt_free_tx_skbs(struct bnxt *bp) static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) { - struct pci_dev *pdev = bp->pdev; int i, max_idx; max_idx = bp->rx_nr_pages * RX_DESC_CNT; for (i = 0; i < max_idx; i++) { struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i]; - dma_addr_t mapping = rx_buf->mapping; void *data = rx_buf->data; if (!data) continue; rx_buf->data = NULL; - if (BNXT_RX_PAGE_MODE(bp)) { + if (BNXT_RX_PAGE_MODE(bp)) page_pool_recycle_direct(rxr->page_pool, data); - } else { - dma_unmap_single_attrs(&pdev->dev, mapping, - bp->rx_buf_use_size, bp->rx_dir, - DMA_ATTR_WEAK_ORDERING); - skb_free_frag(data); - } + else + page_pool_free_va(rxr->head_pool, data, true); } } @@ -3361,16 +3363,11 @@ static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info } } -static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr) +static void bnxt_free_one_tpa_info_data(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr) { - struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; - struct pci_dev *pdev = bp->pdev; - struct bnxt_tpa_idx_map *map; int i; - if (!rxr->rx_tpa) - goto skip_rx_tpa_free; - for (i = 0; i < bp->max_tpa; i++) { struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i]; u8 *data = tpa_info->data; @@ -3378,14 +3375,20 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr) if (!data) continue; - dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping, - bp->rx_buf_use_size, bp->rx_dir, - DMA_ATTR_WEAK_ORDERING); - tpa_info->data = NULL; - - skb_free_frag(data); + page_pool_free_va(rxr->head_pool, data, false); } +} + +static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr) +{ + struct bnxt_tpa_idx_map *map; + + if (!rxr->rx_tpa) + goto skip_rx_tpa_free; + + bnxt_free_one_tpa_info_data(bp, rxr); skip_rx_tpa_free: if (!rxr->rx_buf_ring) @@ -3413,7 +3416,7 @@ static void bnxt_free_rx_skbs(struct bnxt *bp) return; for (i = 0; i < bp->rx_nr_rings; i++) - bnxt_free_one_rx_ring_skbs(bp, i); + bnxt_free_one_rx_ring_skbs(bp, &bp->rx_ring[i]); } static void bnxt_free_skbs(struct bnxt *bp) @@ -3525,29 +3528,64 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) return 0; } +static void bnxt_free_one_tpa_info(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr) +{ + int i; + + kfree(rxr->rx_tpa_idx_map); + rxr->rx_tpa_idx_map = NULL; + if (rxr->rx_tpa) { + for (i = 0; i < bp->max_tpa; i++) { + kfree(rxr->rx_tpa[i].agg_arr); + rxr->rx_tpa[i].agg_arr = NULL; + } + } + kfree(rxr->rx_tpa); + rxr->rx_tpa = NULL; +} + static void bnxt_free_tpa_info(struct bnxt *bp) { - int i, j; + int i; for (i = 0; i < bp->rx_nr_rings; i++) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; - kfree(rxr->rx_tpa_idx_map); - rxr->rx_tpa_idx_map = NULL; - if (rxr->rx_tpa) { - for (j = 0; j < bp->max_tpa; j++) { - kfree(rxr->rx_tpa[j].agg_arr); - rxr->rx_tpa[j].agg_arr = NULL; - } - } - kfree(rxr->rx_tpa); - rxr->rx_tpa = NULL; + bnxt_free_one_tpa_info(bp, rxr); } } +static int bnxt_alloc_one_tpa_info(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr) +{ + struct rx_agg_cmp *agg; + int i; + + rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info), + GFP_KERNEL); + if (!rxr->rx_tpa) + return -ENOMEM; + + if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) + return 0; + for (i = 0; i < bp->max_tpa; i++) { + agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL); + if (!agg) + return -ENOMEM; + rxr->rx_tpa[i].agg_arr = agg; + } + rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map), + GFP_KERNEL); + if (!rxr->rx_tpa_idx_map) + return -ENOMEM; + + return 0; +} + static int bnxt_alloc_tpa_info(struct bnxt *bp) { - int i, j; + int i, rc; bp->max_tpa = MAX_TPA; if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { @@ -3558,25 +3596,10 @@ static int bnxt_alloc_tpa_info(struct bnxt *bp) for (i = 0; i < bp->rx_nr_rings; i++) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; - struct rx_agg_cmp *agg; - - rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info), - GFP_KERNEL); - if (!rxr->rx_tpa) - return -ENOMEM; - if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) - continue; - for (j = 0; j < bp->max_tpa; j++) { - agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL); - if (!agg) - return -ENOMEM; - rxr->rx_tpa[j].agg_arr = agg; - } - rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map), - GFP_KERNEL); - if (!rxr->rx_tpa_idx_map) - return -ENOMEM; + rc = bnxt_alloc_one_tpa_info(bp, rxr); + if (rc) + return rc; } return 0; } @@ -3600,7 +3623,9 @@ static void bnxt_free_rx_rings(struct bnxt *bp) xdp_rxq_info_unreg(&rxr->xdp_rxq); page_pool_destroy(rxr->page_pool); - rxr->page_pool = NULL; + if (bnxt_separate_head_pool()) + page_pool_destroy(rxr->head_pool); + rxr->page_pool = rxr->head_pool = NULL; kfree(rxr->rx_agg_bmap); rxr->rx_agg_bmap = NULL; @@ -3618,6 +3643,7 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp, int numa_node) { struct page_pool_params pp = { 0 }; + struct page_pool *pool; pp.pool_size = bp->rx_agg_ring_size; if (BNXT_RX_PAGE_MODE(bp)) @@ -3630,14 +3656,25 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp, pp.max_len = PAGE_SIZE; pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; - rxr->page_pool = page_pool_create(&pp); - if (IS_ERR(rxr->page_pool)) { - int err = PTR_ERR(rxr->page_pool); + pool = page_pool_create(&pp); + if (IS_ERR(pool)) + return PTR_ERR(pool); + rxr->page_pool = pool; - rxr->page_pool = NULL; - return err; + if (bnxt_separate_head_pool()) { + pp.pool_size = max(bp->rx_ring_size, 1024); + pool = page_pool_create(&pp); + if (IS_ERR(pool)) + goto err_destroy_pp; } + rxr->head_pool = pool; + return 0; + +err_destroy_pp: + page_pool_destroy(rxr->page_pool); + rxr->page_pool = NULL; + return PTR_ERR(pool); } static int bnxt_alloc_rx_rings(struct bnxt *bp) @@ -4171,10 +4208,31 @@ static void bnxt_alloc_one_rx_ring_page(struct bnxt *bp, rxr->rx_agg_prod = prod; } +static int bnxt_alloc_one_tpa_info_data(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr) +{ + dma_addr_t mapping; + u8 *data; + int i; + + for (i = 0; i < bp->max_tpa; i++) { + data = __bnxt_alloc_rx_frag(bp, &mapping, rxr, + GFP_KERNEL); + if (!data) + return -ENOMEM; + + rxr->rx_tpa[i].data = data; + rxr->rx_tpa[i].data_ptr = data + bp->rx_offset; + rxr->rx_tpa[i].mapping = mapping; + } + + return 0; +} + static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; - int i; + int rc; bnxt_alloc_one_rx_ring_skb(bp, rxr, ring_nr); @@ -4184,18 +4242,9 @@ static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr) bnxt_alloc_one_rx_ring_page(bp, rxr, ring_nr); if (rxr->rx_tpa) { - dma_addr_t mapping; - u8 *data; - - for (i = 0; i < bp->max_tpa; i++) { - data = __bnxt_alloc_rx_frag(bp, &mapping, GFP_KERNEL); - if (!data) - return -ENOMEM; - - rxr->rx_tpa[i].data = data; - rxr->rx_tpa[i].data_ptr = data + bp->rx_offset; - rxr->rx_tpa[i].mapping = mapping; - } + rc = bnxt_alloc_one_tpa_info_data(bp, rxr); + if (rc) + return rc; } return 0; } @@ -13452,7 +13501,7 @@ static void bnxt_rx_ring_reset(struct bnxt *bp) bnxt_reset_task(bp, true); break; } - bnxt_free_one_rx_ring_skbs(bp, i); + bnxt_free_one_rx_ring_skbs(bp, rxr); rxr->rx_prod = 0; rxr->rx_agg_prod = 0; rxr->rx_sw_agg_prod = 0; @@ -15023,6 +15072,9 @@ static void bnxt_get_queue_stats_rx(struct net_device *dev, int i, struct bnxt_cp_ring_info *cpr; u64 *sw; + if (!bp->bnapi) + return; + cpr = &bp->bnapi[i]->cp_ring; sw = cpr->stats.sw_stats; @@ -15046,6 +15098,9 @@ static void bnxt_get_queue_stats_tx(struct net_device *dev, int i, struct bnxt_napi *bnapi; u64 *sw; + if (!bp->tx_ring) + return; + bnapi = bp->tx_ring[bp->tx_ring_map[i]].bnapi; sw = bnapi->cp_ring.stats.sw_stats; @@ -15100,6 +15155,9 @@ static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx) struct bnxt_ring_struct *ring; int rc; + if (!bp->rx_ring) + return -ENETDOWN; + rxr = &bp->rx_ring[idx]; clone = qmem; memcpy(clone, rxr, sizeof(*rxr)); @@ -15141,15 +15199,25 @@ static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx) goto err_free_rx_agg_ring; } + if (bp->flags & BNXT_FLAG_TPA) { + rc = bnxt_alloc_one_tpa_info(bp, clone); + if (rc) + goto err_free_tpa_info; + } + bnxt_init_one_rx_ring_rxbd(bp, clone); bnxt_init_one_rx_agg_ring_rxbd(bp, clone); bnxt_alloc_one_rx_ring_skb(bp, clone, idx); if (bp->flags & BNXT_FLAG_AGG_RINGS) bnxt_alloc_one_rx_ring_page(bp, clone, idx); + if (bp->flags & BNXT_FLAG_TPA) + bnxt_alloc_one_tpa_info_data(bp, clone); return 0; +err_free_tpa_info: + bnxt_free_one_tpa_info(bp, clone); err_free_rx_agg_ring: bnxt_free_ring(bp, &clone->rx_agg_ring_struct.ring_mem); err_free_rx_ring: @@ -15157,9 +15225,11 @@ static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx) err_rxq_info_unreg: xdp_rxq_info_unreg(&clone->xdp_rxq); err_page_pool_destroy: - clone->page_pool->p.napi = NULL; page_pool_destroy(clone->page_pool); + if (bnxt_separate_head_pool()) + page_pool_destroy(clone->head_pool); clone->page_pool = NULL; + clone->head_pool = NULL; return rc; } @@ -15169,13 +15239,16 @@ static void bnxt_queue_mem_free(struct net_device *dev, void *qmem) struct bnxt *bp = netdev_priv(dev); struct bnxt_ring_struct *ring; - bnxt_free_one_rx_ring(bp, rxr); - bnxt_free_one_rx_agg_ring(bp, rxr); + bnxt_free_one_rx_ring_skbs(bp, rxr); + bnxt_free_one_tpa_info(bp, rxr); xdp_rxq_info_unreg(&rxr->xdp_rxq); page_pool_destroy(rxr->page_pool); + if (bnxt_separate_head_pool()) + page_pool_destroy(rxr->head_pool); rxr->page_pool = NULL; + rxr->head_pool = NULL; ring = &rxr->rx_ring_struct; bnxt_free_ring(bp, &ring->ring_mem); @@ -15257,7 +15330,10 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx) rxr->rx_agg_prod = clone->rx_agg_prod; rxr->rx_sw_agg_prod = clone->rx_sw_agg_prod; rxr->rx_next_cons = clone->rx_next_cons; + rxr->rx_tpa = clone->rx_tpa; + rxr->rx_tpa_idx_map = clone->rx_tpa_idx_map; rxr->page_pool = clone->page_pool; + rxr->head_pool = clone->head_pool; rxr->xdp_rxq = clone->xdp_rxq; bnxt_copy_rx_ring(bp, rxr, clone); @@ -15276,7 +15352,7 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx) cpr = &rxr->bnapi->cp_ring; cpr->sw_stats->rx.rx_resets++; - for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) { + for (i = 0; i <= bp->nr_vnics; i++) { vnic = &bp->vnic_info[i]; rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true); @@ -15304,7 +15380,7 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx) struct bnxt_vnic_info *vnic; int i; - for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) { + for (i = 0; i <= bp->nr_vnics; i++) { vnic = &bp->vnic_info[i]; vnic->mru = 0; bnxt_hwrm_vnic_update(bp, vnic, @@ -15318,6 +15394,8 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx) bnxt_hwrm_rx_agg_ring_free(bp, rxr, false); rxr->rx_next_cons = 0; page_pool_disable_direct_recycling(rxr->page_pool); + if (bnxt_separate_head_pool()) + page_pool_disable_direct_recycling(rxr->head_pool); memcpy(qmem, rxr, sizeof(*rxr)); bnxt_init_rx_ring_struct(bp, qmem); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index bee645f58d0bd..1758edcd1db42 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1108,6 +1108,7 @@ struct bnxt_rx_ring_info { struct bnxt_ring_struct rx_agg_ring_struct; struct xdp_rxq_info xdp_rxq; struct page_pool *page_pool; + struct page_pool *head_pool; }; struct bnxt_rx_sw_stats { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index dc51dce209d5f..8726657f5cb9e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -456,23 +456,16 @@ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp) struct sk_buff * bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags, - struct page_pool *pool, struct xdp_buff *xdp, - struct rx_cmp_ext *rxcmp1) + struct page_pool *pool, struct xdp_buff *xdp) { struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); if (!skb) return NULL; - skb_checksum_none_assert(skb); - if (RX_CMP_L4_CS_OK(rxcmp1)) { - if (bp->dev->features & NETIF_F_RXCSUM) { - skb->ip_summed = CHECKSUM_UNNECESSARY; - skb->csum_level = RX_CMP_ENCAP(rxcmp1); - } - } + xdp_update_skb_shared_info(skb, num_frags, sinfo->xdp_frags_size, - BNXT_RX_PAGE_SIZE * sinfo->nr_frags, + BNXT_RX_PAGE_SIZE * num_frags, xdp_buff_is_frag_pfmemalloc(xdp)); return skb; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h index 0122782400b8a..220285e190fcd 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h @@ -33,6 +33,5 @@ void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr, struct xdp_buff *xdp); struct sk_buff *bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags, struct page_pool *pool, - struct xdp_buff *xdp, - struct rx_cmp_ext *rxcmp1); + struct xdp_buff *xdp); #endif diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 5740c98d8c9f0..2847278d9cd48 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -1279,6 +1279,8 @@ struct macb { struct clk *rx_clk; struct clk *tsu_clk; struct net_device *dev; + /* Protects hw_stats and ethtool_stats */ + spinlock_t stats_lock; union { struct macb_stats macb; struct gem_stats gem; diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 56901280ba047..60847cdb516ee 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -1992,10 +1992,12 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) if (status & MACB_BIT(ISR_ROVR)) { /* We missed at least one packet */ + spin_lock(&bp->stats_lock); if (macb_is_gem(bp)) bp->hw_stats.gem.rx_overruns++; else bp->hw_stats.macb.rx_overruns++; + spin_unlock(&bp->stats_lock); if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) queue_writel(queue, ISR, MACB_BIT(ISR_ROVR)); @@ -3116,6 +3118,7 @@ static struct net_device_stats *gem_get_stats(struct macb *bp) if (!netif_running(bp->dev)) return nstat; + spin_lock_irq(&bp->stats_lock); gem_update_stats(bp); nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors + @@ -3145,6 +3148,7 @@ static struct net_device_stats *gem_get_stats(struct macb *bp) nstat->tx_aborted_errors = hwstat->tx_excessive_collisions; nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors; nstat->tx_fifo_errors = hwstat->tx_underrun; + spin_unlock_irq(&bp->stats_lock); return nstat; } @@ -3152,12 +3156,13 @@ static struct net_device_stats *gem_get_stats(struct macb *bp) static void gem_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { - struct macb *bp; + struct macb *bp = netdev_priv(dev); - bp = netdev_priv(dev); + spin_lock_irq(&bp->stats_lock); gem_update_stats(bp); memcpy(data, &bp->ethtool_stats, sizeof(u64) * (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES)); + spin_unlock_irq(&bp->stats_lock); } static int gem_get_sset_count(struct net_device *dev, int sset) @@ -3207,6 +3212,7 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev) return gem_get_stats(bp); /* read stats from hardware */ + spin_lock_irq(&bp->stats_lock); macb_update_stats(bp); /* Convert HW stats into netdevice stats */ @@ -3240,6 +3246,7 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev) nstat->tx_carrier_errors = hwstat->tx_carrier_errors; nstat->tx_fifo_errors = hwstat->tx_underruns; /* Don't know about heartbeat or window errors... */ + spin_unlock_irq(&bp->stats_lock); return nstat; } @@ -5110,6 +5117,7 @@ static int macb_probe(struct platform_device *pdev) } } spin_lock_init(&bp->lock); + spin_lock_init(&bp->stats_lock); /* setup capabilities */ macb_configure_caps(bp, macb_config); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 7f3f5afa864f4..1546c3db08f09 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -2270,6 +2270,7 @@ int cxgb4_init_ethtool_filters(struct adapter *adap) eth_filter->port[i].bmap = bitmap_zalloc(nentries, GFP_KERNEL); if (!eth_filter->port[i].bmap) { ret = -ENOMEM; + kvfree(eth_filter->port[i].loc_array); goto free_eth_finfo; } } diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index e48b861e4ce15..270ff9aab3352 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -562,7 +562,7 @@ struct be_adapter { struct be_dma_mem mbox_mem_alloced; struct be_mcc_obj mcc_obj; - struct mutex mcc_lock; /* For serializing mcc cmds to BE card */ + spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */ spinlock_t mcc_cq_lock; u16 cfg_num_rx_irqs; /* configured via set-channels */ diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 61adcebeef010..51b8377edd1d0 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -575,7 +575,7 @@ int be_process_mcc(struct be_adapter *adapter) /* Wait till no more pending mcc requests are present */ static int be_mcc_wait_compl(struct be_adapter *adapter) { -#define mcc_timeout 12000 /* 12s timeout */ +#define mcc_timeout 120000 /* 12s timeout */ int i, status = 0; struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; @@ -589,7 +589,7 @@ static int be_mcc_wait_compl(struct be_adapter *adapter) if (atomic_read(&mcc_obj->q.used) == 0) break; - usleep_range(500, 1000); + udelay(100); } if (i == mcc_timeout) { dev_err(&adapter->pdev->dev, "FW not responding\n"); @@ -866,7 +866,7 @@ static bool use_mcc(struct be_adapter *adapter) static int be_cmd_lock(struct be_adapter *adapter) { if (use_mcc(adapter)) { - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); return 0; } else { return mutex_lock_interruptible(&adapter->mbox_lock); @@ -877,7 +877,7 @@ static int be_cmd_lock(struct be_adapter *adapter) static void be_cmd_unlock(struct be_adapter *adapter) { if (use_mcc(adapter)) - return mutex_unlock(&adapter->mcc_lock); + return spin_unlock_bh(&adapter->mcc_lock); else return mutex_unlock(&adapter->mbox_lock); } @@ -1047,7 +1047,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, struct be_cmd_req_mac_query *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1076,7 +1076,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, } err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -1088,7 +1088,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, const u8 *mac_addr, struct be_cmd_req_pmac_add *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1113,7 +1113,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, const u8 *mac_addr, } err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST) status = -EPERM; @@ -1131,7 +1131,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom) if (pmac_id == -1) return 0; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1151,7 +1151,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom) status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -1414,7 +1414,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter, struct be_dma_mem *q_mem = &rxq->dma_mem; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1444,7 +1444,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter, } err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -1508,7 +1508,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q) struct be_cmd_req_q_destroy *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1525,7 +1525,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q) q->created = false; err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -1593,7 +1593,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) struct be_cmd_req_hdr *hdr; int status = 0; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1621,7 +1621,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) adapter->stats_cmd_sent = true; err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -1637,7 +1637,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter, CMD_SUBSYSTEM_ETH)) return -EPERM; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1660,7 +1660,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter, adapter->stats_cmd_sent = true; err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -1697,7 +1697,7 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed, struct be_cmd_req_link_status *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); if (link_status) *link_status = LINK_DOWN; @@ -1736,7 +1736,7 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed, } err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -1747,7 +1747,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter) struct be_cmd_req_get_cntl_addnl_attribs *req; int status = 0; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1762,7 +1762,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter) status = be_mcc_notify(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -1811,7 +1811,7 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf) if (!get_fat_cmd.va) return -ENOMEM; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); while (total_size) { buf_size = min(total_size, (u32)60 * 1024); @@ -1849,9 +1849,9 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf) log_offset += buf_size; } err: + spin_unlock_bh(&adapter->mcc_lock); dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size, get_fat_cmd.va, get_fat_cmd.dma); - mutex_unlock(&adapter->mcc_lock); return status; } @@ -1862,7 +1862,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter) struct be_cmd_req_get_fw_version *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1885,7 +1885,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter) sizeof(adapter->fw_on_flash)); } err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -1899,7 +1899,7 @@ static int __be_cmd_modify_eqd(struct be_adapter *adapter, struct be_cmd_req_modify_eq_delay *req; int status = 0, i; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1922,7 +1922,7 @@ static int __be_cmd_modify_eqd(struct be_adapter *adapter, status = be_mcc_notify(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -1949,7 +1949,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, struct be_cmd_req_vlan_config *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1971,7 +1971,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -1982,7 +1982,7 @@ static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) struct be_cmd_req_rx_filter *req = mem->va; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2015,7 +2015,7 @@ static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -2046,7 +2046,7 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc) CMD_SUBSYSTEM_COMMON)) return -EPERM; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2066,7 +2066,7 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc) status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); if (base_status(status) == MCC_STATUS_FEATURE_NOT_SUPPORTED) return -EOPNOTSUPP; @@ -2085,7 +2085,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc) CMD_SUBSYSTEM_COMMON)) return -EPERM; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2108,7 +2108,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc) } err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -2189,7 +2189,7 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS)) return 0; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2214,7 +2214,7 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -2226,7 +2226,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, struct be_cmd_req_enable_disable_beacon *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2247,7 +2247,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -2258,7 +2258,7 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state) struct be_cmd_req_get_beacon_state *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2282,7 +2282,7 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state) } err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -2306,7 +2306,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, return -ENOMEM; } - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2328,7 +2328,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, memcpy(data, resp->page_data + off, len); } err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); return status; } @@ -2345,7 +2345,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter, void *ctxt = NULL; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); adapter->flash_status = 0; wrb = wrb_from_mccq(adapter); @@ -2387,7 +2387,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter, if (status) goto err_unlock; - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); if (!wait_for_completion_timeout(&adapter->et_cmd_compl, msecs_to_jiffies(60000))) @@ -2406,7 +2406,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter, return status; err_unlock: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -2460,7 +2460,7 @@ static int lancer_cmd_delete_object(struct be_adapter *adapter, struct be_mcc_wrb *wrb; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2478,7 +2478,7 @@ static int lancer_cmd_delete_object(struct be_adapter *adapter, status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -2491,7 +2491,7 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd, struct lancer_cmd_resp_read_object *resp; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2525,7 +2525,7 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd, } err_unlock: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -2537,7 +2537,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_cmd_write_flashrom *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); adapter->flash_status = 0; wrb = wrb_from_mccq(adapter); @@ -2562,7 +2562,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter, if (status) goto err_unlock; - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); if (!wait_for_completion_timeout(&adapter->et_cmd_compl, msecs_to_jiffies(40000))) @@ -2573,7 +2573,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter, return status; err_unlock: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -2584,7 +2584,7 @@ static int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, struct be_mcc_wrb *wrb; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2611,7 +2611,7 @@ static int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, memcpy(flashed_crc, req->crc, 4); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -3217,7 +3217,7 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, struct be_cmd_req_acpi_wol_magic_config *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3234,7 +3234,7 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -3249,7 +3249,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, CMD_SUBSYSTEM_LOWLEVEL)) return -EPERM; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3272,7 +3272,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, if (status) goto err_unlock; - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); if (!wait_for_completion_timeout(&adapter->et_cmd_compl, msecs_to_jiffies(SET_LB_MODE_TIMEOUT))) @@ -3281,7 +3281,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, return status; err_unlock: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -3298,7 +3298,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, CMD_SUBSYSTEM_LOWLEVEL)) return -EPERM; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3324,7 +3324,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, if (status) goto err; - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); wait_for_completion(&adapter->et_cmd_compl); resp = embedded_payload(wrb); @@ -3332,7 +3332,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, return status; err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -3348,7 +3348,7 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, CMD_SUBSYSTEM_LOWLEVEL)) return -EPERM; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3382,7 +3382,7 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, } err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -3393,7 +3393,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter, struct be_cmd_req_seeprom_read *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3409,7 +3409,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter, status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -3424,7 +3424,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter) CMD_SUBSYSTEM_COMMON)) return -EPERM; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3469,7 +3469,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter) } dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -3479,7 +3479,7 @@ static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain) struct be_cmd_req_set_qos *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3499,7 +3499,7 @@ static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain) status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -3611,7 +3611,7 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege, struct be_cmd_req_get_fn_privileges *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3643,7 +3643,7 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege, } err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -3655,7 +3655,7 @@ int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges, struct be_cmd_req_set_fn_privileges *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3675,7 +3675,7 @@ int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges, status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -3707,7 +3707,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, return -ENOMEM; } - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3771,7 +3771,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, } out: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size, get_mac_list_cmd.va, get_mac_list_cmd.dma); return status; @@ -3831,7 +3831,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, if (!cmd.va) return -ENOMEM; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3853,7 +3853,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, err: dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -3889,7 +3889,7 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, CMD_SUBSYSTEM_COMMON)) return -EPERM; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3930,7 +3930,7 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -3944,7 +3944,7 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, int status; u16 vid; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3991,7 +3991,7 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, } err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -4190,7 +4190,7 @@ int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter, struct be_cmd_req_set_ext_fat_caps *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -4206,7 +4206,7 @@ int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter, status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -4684,7 +4684,7 @@ int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op) if (iface == 0xFFFFFFFF) return -1; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -4701,7 +4701,7 @@ int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op) status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -4735,7 +4735,7 @@ int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg, struct be_cmd_resp_get_iface_list *resp; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -4756,7 +4756,7 @@ int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg, } err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -4850,7 +4850,7 @@ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain) if (BEx_chip(adapter)) return 0; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -4868,7 +4868,7 @@ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain) req->enable = 1; status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -4941,7 +4941,7 @@ __be_cmd_set_logical_link_config(struct be_adapter *adapter, u32 link_config = 0; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -4969,7 +4969,7 @@ __be_cmd_set_logical_link_config(struct be_adapter *adapter, status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -5000,8 +5000,7 @@ int be_cmd_set_features(struct be_adapter *adapter) struct be_mcc_wrb *wrb; int status; - if (mutex_lock_interruptible(&adapter->mcc_lock)) - return -1; + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -5039,7 +5038,7 @@ int be_cmd_set_features(struct be_adapter *adapter) dev_info(&adapter->pdev->dev, "Adapter does not support HW error recovery\n"); - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -5053,7 +5052,7 @@ int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload, struct be_cmd_resp_hdr *resp; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -5076,7 +5075,7 @@ int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload, memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length); be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } EXPORT_SYMBOL(be_roce_mcc_cmd); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 875fe379eea21..3d2e215921191 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -5667,8 +5667,8 @@ static int be_drv_init(struct be_adapter *adapter) } mutex_init(&adapter->mbox_lock); - mutex_init(&adapter->mcc_lock); mutex_init(&adapter->rx_filter_lock); + spin_lock_init(&adapter->mcc_lock); spin_lock_init(&adapter->mcc_cq_lock); init_completion(&adapter->et_cmd_compl); diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 16a7908c79f70..f662a5d54986c 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -145,6 +145,24 @@ static int enetc_ptp_parse(struct sk_buff *skb, u8 *udp, return 0; } +/** + * enetc_unwind_tx_frame() - Unwind the DMA mappings of a multi-buffer Tx frame + * @tx_ring: Pointer to the Tx ring on which the buffer descriptors are located + * @count: Number of Tx buffer descriptors which need to be unmapped + * @i: Index of the last successfully mapped Tx buffer descriptor + */ +static void enetc_unwind_tx_frame(struct enetc_bdr *tx_ring, int count, int i) +{ + while (count--) { + struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i]; + + enetc_free_tx_frame(tx_ring, tx_swbd); + if (i == 0) + i = tx_ring->bd_count; + i--; + } +} + static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) { bool do_vlan, do_onestep_tstamp = false, do_twostep_tstamp = false; @@ -235,9 +253,11 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) } if (do_onestep_tstamp) { - u32 lo, hi, val; - u64 sec, nsec; + __be32 new_sec_l, new_nsec; + u32 lo, hi, nsec, val; + __be16 new_sec_h; u8 *data; + u64 sec; lo = enetc_rd_hot(hw, ENETC_SICTR0); hi = enetc_rd_hot(hw, ENETC_SICTR1); @@ -251,13 +271,38 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) /* Update originTimestamp field of Sync packet * - 48 bits seconds field * - 32 bits nanseconds field + * + * In addition, the UDP checksum needs to be updated + * by software after updating originTimestamp field, + * otherwise the hardware will calculate the wrong + * checksum when updating the correction field and + * update it to the packet. */ data = skb_mac_header(skb); - *(__be16 *)(data + offset2) = - htons((sec >> 32) & 0xffff); - *(__be32 *)(data + offset2 + 2) = - htonl(sec & 0xffffffff); - *(__be32 *)(data + offset2 + 6) = htonl(nsec); + new_sec_h = htons((sec >> 32) & 0xffff); + new_sec_l = htonl(sec & 0xffffffff); + new_nsec = htonl(nsec); + if (udp) { + struct udphdr *uh = udp_hdr(skb); + __be32 old_sec_l, old_nsec; + __be16 old_sec_h; + + old_sec_h = *(__be16 *)(data + offset2); + inet_proto_csum_replace2(&uh->check, skb, old_sec_h, + new_sec_h, false); + + old_sec_l = *(__be32 *)(data + offset2 + 2); + inet_proto_csum_replace4(&uh->check, skb, old_sec_l, + new_sec_l, false); + + old_nsec = *(__be32 *)(data + offset2 + 6); + inet_proto_csum_replace4(&uh->check, skb, old_nsec, + new_nsec, false); + } + + *(__be16 *)(data + offset2) = new_sec_h; + *(__be32 *)(data + offset2 + 2) = new_sec_l; + *(__be32 *)(data + offset2 + 6) = new_nsec; /* Configure single-step register */ val = ENETC_PM0_SINGLE_STEP_EN; @@ -328,25 +373,20 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) dma_err: dev_err(tx_ring->dev, "DMA map error"); - do { - tx_swbd = &tx_ring->tx_swbd[i]; - enetc_free_tx_frame(tx_ring, tx_swbd); - if (i == 0) - i = tx_ring->bd_count; - i--; - } while (count--); + enetc_unwind_tx_frame(tx_ring, count, i); return 0; } -static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb, - struct enetc_tx_swbd *tx_swbd, - union enetc_tx_bd *txbd, int *i, int hdr_len, - int data_len) +static int enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb, + struct enetc_tx_swbd *tx_swbd, + union enetc_tx_bd *txbd, int *i, int hdr_len, + int data_len) { union enetc_tx_bd txbd_tmp; u8 flags = 0, e_flags = 0; dma_addr_t addr; + int count = 1; enetc_clear_tx_bd(&txbd_tmp); addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE; @@ -389,7 +429,10 @@ static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb, /* Write the BD */ txbd_tmp.ext.e_flags = e_flags; *txbd = txbd_tmp; + count++; } + + return count; } static int enetc_map_tx_tso_data(struct enetc_bdr *tx_ring, struct sk_buff *skb, @@ -521,9 +564,9 @@ static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb /* compute the csum over the L4 header */ csum = enetc_tso_hdr_csum(&tso, skb, hdr, hdr_len, &pos); - enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd, &i, hdr_len, data_len); + count += enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd, + &i, hdr_len, data_len); bd_data_num = 0; - count++; while (data_len > 0) { int size; @@ -547,8 +590,13 @@ static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb err = enetc_map_tx_tso_data(tx_ring, skb, tx_swbd, txbd, tso.data, size, size == data_len); - if (err) + if (err) { + if (i == 0) + i = tx_ring->bd_count; + i--; + goto err_map_data; + } data_len -= size; count++; @@ -577,13 +625,7 @@ static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb dev_err(tx_ring->dev, "DMA map error"); err_chained_bd: - do { - tx_swbd = &tx_ring->tx_swbd[i]; - enetc_free_tx_frame(tx_ring, tx_swbd); - if (i == 0) - i = tx_ring->bd_count; - i--; - } while (count--); + enetc_unwind_tx_frame(tx_ring, count, i); return 0; } @@ -1623,7 +1665,7 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, enetc_xdp_drop(rx_ring, orig_i, i); tx_ring->stats.xdp_tx_drops++; } else { - tx_ring->stats.xdp_tx += xdp_tx_bd_cnt; + tx_ring->stats.xdp_tx++; rx_ring->xdp.xdp_tx_in_flight += xdp_tx_bd_cnt; xdp_tx_frm_cnt++; /* The XDP_TX enqueue was successful, so we @@ -2929,6 +2971,9 @@ static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr) new_offloads |= ENETC_F_TX_TSTAMP; break; case HWTSTAMP_TX_ONESTEP_SYNC: + if (!enetc_si_is_pf(priv->si)) + return -EOPNOTSUPP; + new_offloads &= ~ENETC_F_TX_TSTAMP_MASK; new_offloads |= ENETC_F_TX_ONESTEP_SYNC_TSTAMP; break; diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c index 2563eb8ac7b63..6a24324703bf4 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c @@ -843,6 +843,7 @@ static int enetc_set_coalesce(struct net_device *ndev, static int enetc_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *info) { + struct enetc_ndev_priv *priv = netdev_priv(ndev); int *phc_idx; phc_idx = symbol_get(enetc_phc_index); @@ -863,8 +864,10 @@ static int enetc_get_ts_info(struct net_device *ndev, SOF_TIMESTAMPING_TX_SOFTWARE; info->tx_types = (1 << HWTSTAMP_TX_OFF) | - (1 << HWTSTAMP_TX_ON) | - (1 << HWTSTAMP_TX_ONESTEP_SYNC); + (1 << HWTSTAMP_TX_ON); + + if (enetc_si_is_pf(priv->si)) + info->tx_types |= (1 << HWTSTAMP_TX_ONESTEP_SYNC); info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | (1 << HWTSTAMP_FILTER_ALL); diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h index 95471cfcff420..8ddd366d9fde5 100644 --- a/drivers/net/ethernet/google/gve/gve.h +++ b/drivers/net/ethernet/google/gve/gve.h @@ -1110,6 +1110,16 @@ static inline u32 gve_xdp_tx_start_queue_id(struct gve_priv *priv) return gve_xdp_tx_queue_id(priv, 0); } +static inline bool gve_supports_xdp_xmit(struct gve_priv *priv) +{ + switch (priv->queue_format) { + case GVE_GQI_QPL_FORMAT: + return true; + default: + return false; + } +} + /* gqi napi handler defined in gve_main.c */ int gve_napi_poll(struct napi_struct *napi, int budget); diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c index bdfc6e77b2af5..1f5db1096d4a4 100644 --- a/drivers/net/ethernet/google/gve/gve_ethtool.c +++ b/drivers/net/ethernet/google/gve/gve_ethtool.c @@ -392,7 +392,9 @@ gve_get_ethtool_stats(struct net_device *netdev, */ data[i++] = 0; data[i++] = 0; - data[i++] = tx->dqo_tx.tail - tx->dqo_tx.head; + data[i++] = + (tx->dqo_tx.tail - tx->dqo_tx.head) & + tx->mask; } do { start = diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index f985a3cf2b11f..862c4575701fe 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -1895,6 +1895,8 @@ static void gve_turndown(struct gve_priv *priv) /* Stop tx queues */ netif_tx_disable(priv->dev); + xdp_features_clear_redirect_target(priv->dev); + gve_clear_napi_enabled(priv); gve_clear_report_stats(priv); @@ -1955,6 +1957,9 @@ static void gve_turnup(struct gve_priv *priv) napi_schedule(&block->napi); } + if (priv->num_xdp_queues && gve_supports_xdp_xmit(priv)) + xdp_features_set_redirect_target(priv->dev, false); + gve_set_napi_enabled(priv); } @@ -2229,7 +2234,6 @@ static void gve_set_netdev_xdp_features(struct gve_priv *priv) if (priv->queue_format == GVE_GQI_QPL_FORMAT) { xdp_features = NETDEV_XDP_ACT_BASIC; xdp_features |= NETDEV_XDP_ACT_REDIRECT; - xdp_features |= NETDEV_XDP_ACT_NDO_XMIT; xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY; } else { xdp_features = 0; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c index bab16c2191b2f..181af419b878d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c @@ -483,7 +483,7 @@ int hclge_ptp_init(struct hclge_dev *hdev) ret = hclge_ptp_get_cycle(hdev); if (ret) - return ret; + goto out; } ret = hclge_ptp_int_en(hdev, true); diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index b619a3ec245b2..04192190bebab 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1802,18 +1802,22 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr, long value = simple_strtol(buf, NULL, 10); long rc; + rtnl_lock(); + if (attr == &veth_active_attr) { if (value && !pool->active) { if (netif_running(netdev)) { if (ibmveth_alloc_buffer_pool(pool)) { netdev_err(netdev, "unable to alloc pool\n"); - return -ENOMEM; + rc = -ENOMEM; + goto unlock_err; } pool->active = 1; ibmveth_close(netdev); - if ((rc = ibmveth_open(netdev))) - return rc; + rc = ibmveth_open(netdev); + if (rc) + goto unlock_err; } else { pool->active = 1; } @@ -1833,48 +1837,59 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr, if (i == IBMVETH_NUM_BUFF_POOLS) { netdev_err(netdev, "no active pool >= MTU\n"); - return -EPERM; + rc = -EPERM; + goto unlock_err; } if (netif_running(netdev)) { ibmveth_close(netdev); pool->active = 0; - if ((rc = ibmveth_open(netdev))) - return rc; + rc = ibmveth_open(netdev); + if (rc) + goto unlock_err; } pool->active = 0; } } else if (attr == &veth_num_attr) { if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) { - return -EINVAL; + rc = -EINVAL; + goto unlock_err; } else { if (netif_running(netdev)) { ibmveth_close(netdev); pool->size = value; - if ((rc = ibmveth_open(netdev))) - return rc; + rc = ibmveth_open(netdev); + if (rc) + goto unlock_err; } else { pool->size = value; } } } else if (attr == &veth_size_attr) { if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) { - return -EINVAL; + rc = -EINVAL; + goto unlock_err; } else { if (netif_running(netdev)) { ibmveth_close(netdev); pool->buff_size = value; - if ((rc = ibmveth_open(netdev))) - return rc; + rc = ibmveth_open(netdev); + if (rc) + goto unlock_err; } else { pool->buff_size = value; } } } + rtnl_unlock(); /* kick the interrupt handler to allocate/deallocate pools */ ibmveth_interrupt(netdev->irq, netdev); return count; + +unlock_err: + rtnl_unlock(); + return rc; } diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 97425c06e1ed7..61db00b2b33e4 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -2310,7 +2310,7 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter, tx_buff = &tx_pool->tx_buff[index]; adapter->netdev->stats.tx_packets--; adapter->netdev->stats.tx_bytes -= tx_buff->skb->len; - adapter->tx_stats_buffers[queue_num].packets--; + adapter->tx_stats_buffers[queue_num].batched_packets--; adapter->tx_stats_buffers[queue_num].bytes -= tx_buff->skb->len; dev_kfree_skb_any(tx_buff->skb); @@ -2402,11 +2402,13 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) unsigned int tx_map_failed = 0; union sub_crq indir_arr[16]; unsigned int tx_dropped = 0; - unsigned int tx_packets = 0; + unsigned int tx_dpackets = 0; + unsigned int tx_bpackets = 0; unsigned int tx_bytes = 0; dma_addr_t data_dma_addr; struct netdev_queue *txq; unsigned long lpar_rc; + unsigned int skblen; union sub_crq tx_crq; unsigned int offset; bool use_scrq_send_direct = false; @@ -2521,6 +2523,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_buff->skb = skb; tx_buff->index = bufidx; tx_buff->pool_index = queue_num; + skblen = skb->len; memset(&tx_crq, 0, sizeof(tx_crq)); tx_crq.v1.first = IBMVNIC_CRQ_CMD; @@ -2575,6 +2578,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) if (lpar_rc != H_SUCCESS) goto tx_err; + tx_dpackets++; goto early_exit; } @@ -2603,6 +2607,8 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) goto tx_err; } + tx_bpackets++; + early_exit: if (atomic_add_return(num_entries, &tx_scrq->used) >= adapter->req_tx_entries_per_subcrq) { @@ -2610,8 +2616,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) netif_stop_subqueue(netdev, queue_num); } - tx_packets++; - tx_bytes += skb->len; + tx_bytes += skblen; txq_trans_cond_update(txq); ret = NETDEV_TX_OK; goto out; @@ -2640,10 +2645,11 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) rcu_read_unlock(); netdev->stats.tx_dropped += tx_dropped; netdev->stats.tx_bytes += tx_bytes; - netdev->stats.tx_packets += tx_packets; + netdev->stats.tx_packets += tx_bpackets + tx_dpackets; adapter->tx_send_failed += tx_send_failed; adapter->tx_map_failed += tx_map_failed; - adapter->tx_stats_buffers[queue_num].packets += tx_packets; + adapter->tx_stats_buffers[queue_num].batched_packets += tx_bpackets; + adapter->tx_stats_buffers[queue_num].direct_packets += tx_dpackets; adapter->tx_stats_buffers[queue_num].bytes += tx_bytes; adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped; @@ -3808,7 +3814,10 @@ static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN); for (i = 0; i < adapter->req_tx_queues; i++) { - snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i); + snprintf(data, ETH_GSTRING_LEN, "tx%d_batched_packets", i); + data += ETH_GSTRING_LEN; + + snprintf(data, ETH_GSTRING_LEN, "tx%d_direct_packets", i); data += ETH_GSTRING_LEN; snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i); @@ -3873,7 +3882,9 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev, (adapter, ibmvnic_stats[i].offset)); for (j = 0; j < adapter->req_tx_queues; j++) { - data[i] = adapter->tx_stats_buffers[j].packets; + data[i] = adapter->tx_stats_buffers[j].batched_packets; + i++; + data[i] = adapter->tx_stats_buffers[j].direct_packets; i++; data[i] = adapter->tx_stats_buffers[j].bytes; i++; diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 94ac36b1408be..a189038d88df0 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -213,7 +213,8 @@ struct ibmvnic_statistics { #define NUM_TX_STATS 3 struct ibmvnic_tx_queue_stats { - u64 packets; + u64 batched_packets; + u64 direct_packets; u64 bytes; u64 dropped_packets; }; diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index 5e2cfa73f8891..8294a7c4f122c 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -803,4 +803,7 @@ /* SerDes Control */ #define E1000_GEN_POLL_TIMEOUT 640 +#define E1000_FEXTNVM12_PHYPD_CTRL_MASK 0x00C00000 +#define E1000_FEXTNVM12_PHYPD_CTRL_P1 0x00800000 + #endif /* _E1000_DEFINES_H_ */ diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 2f9655cf5dd9e..364378133526a 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -285,6 +285,45 @@ static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw) } } +/** + * e1000_reconfigure_k1_exit_timeout - reconfigure K1 exit timeout to + * align to MTP and later platform requirements. + * @hw: pointer to the HW structure + * + * Context: PHY semaphore must be held by caller. + * Return: 0 on success, negative on failure + */ +static s32 e1000_reconfigure_k1_exit_timeout(struct e1000_hw *hw) +{ + u16 phy_timeout; + u32 fextnvm12; + s32 ret_val; + + if (hw->mac.type < e1000_pch_mtp) + return 0; + + /* Change Kumeran K1 power down state from P0s to P1 */ + fextnvm12 = er32(FEXTNVM12); + fextnvm12 &= ~E1000_FEXTNVM12_PHYPD_CTRL_MASK; + fextnvm12 |= E1000_FEXTNVM12_PHYPD_CTRL_P1; + ew32(FEXTNVM12, fextnvm12); + + /* Wait for the interface the settle */ + usleep_range(1000, 1100); + + /* Change K1 exit timeout */ + ret_val = e1e_rphy_locked(hw, I217_PHY_TIMEOUTS_REG, + &phy_timeout); + if (ret_val) + return ret_val; + + phy_timeout &= ~I217_PHY_TIMEOUTS_K1_EXIT_TO_MASK; + phy_timeout |= 0xF00; + + return e1e_wphy_locked(hw, I217_PHY_TIMEOUTS_REG, + phy_timeout); +} + /** * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds * @hw: pointer to the HW structure @@ -327,15 +366,22 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) * LANPHYPC Value bit to force the interconnect to PCIe mode. */ switch (hw->mac.type) { + case e1000_pch_mtp: + case e1000_pch_lnp: + case e1000_pch_ptp: + case e1000_pch_nvp: + /* At this point the PHY might be inaccessible so don't + * propagate the failure + */ + if (e1000_reconfigure_k1_exit_timeout(hw)) + e_dbg("Failed to reconfigure K1 exit timeout\n"); + + fallthrough; case e1000_pch_lpt: case e1000_pch_spt: case e1000_pch_cnp: case e1000_pch_tgp: case e1000_pch_adp: - case e1000_pch_mtp: - case e1000_pch_lnp: - case e1000_pch_ptp: - case e1000_pch_nvp: if (e1000_phy_is_accessible_pchlan(hw)) break; @@ -419,8 +465,20 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) * the PHY is in. */ ret_val = hw->phy.ops.check_reset_block(hw); - if (ret_val) + if (ret_val) { e_err("ME blocked access to PHY after reset\n"); + goto out; + } + + if (hw->mac.type >= e1000_pch_mtp) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) { + e_err("Failed to reconfigure K1 exit timeout\n"); + goto out; + } + ret_val = e1000_reconfigure_k1_exit_timeout(hw); + hw->phy.ops.release(hw); + } } out: @@ -4888,6 +4946,18 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) u16 i; e1000_initialize_hw_bits_ich8lan(hw); + if (hw->mac.type >= e1000_pch_mtp) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_reconfigure_k1_exit_timeout(hw); + hw->phy.ops.release(hw); + if (ret_val) { + e_dbg("Error failed to reconfigure K1 exit timeout\n"); + return ret_val; + } + } /* Initialize identification LED */ ret_val = mac->ops.id_led_init(hw); diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index 2504b11c3169f..5feb589a9b5ff 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -219,6 +219,10 @@ #define I217_PLL_CLOCK_GATE_REG PHY_REG(772, 28) #define I217_PLL_CLOCK_GATE_MASK 0x07FF +/* PHY Timeouts */ +#define I217_PHY_TIMEOUTS_REG PHY_REG(770, 21) +#define I217_PHY_TIMEOUTS_K1_EXIT_TO_MASK 0x0FC0 + #define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */ /* Inband Control */ diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 558cda577191d..2960709f6b62c 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -207,6 +207,7 @@ enum ice_feature { ICE_F_GNSS, ICE_F_ROCE_LAG, ICE_F_SRIOV_LAG, + ICE_F_MBX_LIMIT, ICE_F_MAX }; diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c index 7cee365cc7d16..405ddd17de1bf 100644 --- a/drivers/net/ethernet/intel/ice/ice_arfs.c +++ b/drivers/net/ethernet/intel/ice/ice_arfs.c @@ -511,7 +511,7 @@ void ice_init_arfs(struct ice_vsi *vsi) struct hlist_head *arfs_fltr_list; unsigned int i; - if (!vsi || vsi->type != ICE_VSI_PF) + if (!vsi || vsi->type != ICE_VSI_PF || ice_is_arfs_active(vsi)) return; arfs_fltr_list = kcalloc(ICE_MAX_ARFS_LIST, sizeof(*arfs_fltr_list), diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c index fb527434b58b1..ed21d7f55ac11 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.c +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c @@ -38,8 +38,7 @@ static int ice_eswitch_setup_env(struct ice_pf *pf) if (ice_vsi_add_vlan_zero(uplink_vsi)) goto err_vlan_zero; - if (ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, true, - ICE_FLTR_RX)) + if (ice_set_dflt_vsi(uplink_vsi)) goto err_def_rx; if (ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, true, @@ -50,9 +49,6 @@ static int ice_eswitch_setup_env(struct ice_pf *pf) if (vlan_ops->dis_rx_filtering(uplink_vsi)) goto err_vlan_filtering; - if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override)) - goto err_override_uplink; - if (ice_vsi_update_local_lb(uplink_vsi, true)) goto err_override_local_lb; @@ -64,8 +60,6 @@ static int ice_eswitch_setup_env(struct ice_pf *pf) err_up: ice_vsi_update_local_lb(uplink_vsi, false); err_override_local_lb: - ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override); -err_override_uplink: vlan_ops->ena_rx_filtering(uplink_vsi); err_vlan_filtering: ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false, @@ -276,7 +270,6 @@ static void ice_eswitch_release_env(struct ice_pf *pf) vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi); ice_vsi_update_local_lb(uplink_vsi, false); - ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override); vlan_ops->ena_rx_filtering(uplink_vsi); ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false, ICE_FLTR_TX); diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 91cbae1eec89a..8d31bfe28cc88 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -539,5 +539,8 @@ #define E830_PRTMAC_CL01_QNT_THR_CL0_M GENMASK(15, 0) #define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4)) #define VFINT_DYN_CTLN_CLEARPBA_M BIT(1) +#define E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH 0x00234000 +#define E830_MBX_VF_DEC_TRIG(_VF) (0x00233800 + (_VF) * 4) +#define E830_MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT(_VF) (0x00233000 + (_VF) * 4) #endif /* _ICE_HW_AUTOGEN_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c index 1ccb572ce285d..22371011c2492 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.c +++ b/drivers/net/ethernet/intel/ice/ice_lag.c @@ -1000,6 +1000,28 @@ static void ice_lag_link(struct ice_lag *lag) netdev_info(lag->netdev, "Shared SR-IOV resources in bond are active\n"); } +/** + * ice_lag_config_eswitch - configure eswitch to work with LAG + * @lag: lag info struct + * @netdev: active network interface device struct + * + * Updates all port representors in eswitch to use @netdev for Tx. + * + * Configures the netdev to keep dst metadata (also used in representor Tx). + * This is required for an uplink without switchdev mode configured. + */ +static void ice_lag_config_eswitch(struct ice_lag *lag, + struct net_device *netdev) +{ + struct ice_repr *repr; + unsigned long id; + + xa_for_each(&lag->pf->eswitch.reprs, id, repr) + repr->dst->u.port_info.lower_dev = netdev; + + netif_keep_dst(netdev); +} + /** * ice_lag_unlink - handle unlink event * @lag: LAG info struct @@ -1021,6 +1043,9 @@ static void ice_lag_unlink(struct ice_lag *lag) ice_lag_move_vf_nodes(lag, act_port, pri_port); lag->primary = false; lag->active_port = ICE_LAG_INVALID_PORT; + + /* Config primary's eswitch back to normal operation. */ + ice_lag_config_eswitch(lag, lag->netdev); } else { struct ice_lag *primary_lag; @@ -1419,6 +1444,7 @@ static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr) ice_lag_move_vf_nodes(lag, prim_port, event_port); lag->active_port = event_port; + ice_lag_config_eswitch(lag, event_netdev); return; } @@ -1428,6 +1454,7 @@ static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr) /* new active port */ ice_lag_move_vf_nodes(lag, lag->active_port, event_port); lag->active_port = event_port; + ice_lag_config_eswitch(lag, event_netdev); } else { /* port not set as currently active (e.g. new active port * has already claimed the nodes and filters diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 06e712cdc3d9e..121a5ad5c8e10 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -3880,6 +3880,9 @@ void ice_init_feature_support(struct ice_pf *pf) default: break; } + + if (pf->hw.mac_type == ICE_MAC_E830) + ice_set_feature_support(pf, ICE_F_MBX_LIMIT); } /** @@ -3925,24 +3928,6 @@ void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx) ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); } -/** - * ice_vsi_ctx_set_allow_override - allow destination override on VSI - * @ctx: pointer to VSI ctx structure - */ -void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx) -{ - ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; -} - -/** - * ice_vsi_ctx_clear_allow_override - turn off destination override on VSI - * @ctx: pointer to VSI ctx structure - */ -void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx) -{ - ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; -} - /** * ice_vsi_update_local_lb - update sw block in VSI with local loopback bit * @vsi: pointer to VSI structure diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index 1a6cfc8693ce4..2b27998fd1be3 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -106,10 +106,6 @@ ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *)) void ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx *ctx); void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx); - -void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx); - -void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx); int ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set); int ice_vsi_add_vlan_zero(struct ice_vsi *vsi); int ice_vsi_del_vlan_zero(struct ice_vsi *vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 45eefe22fb5b7..ca707dfcb286e 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -1546,12 +1546,20 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) ice_vf_lan_overflow_event(pf, &event); break; case ice_mbx_opc_send_msg_to_pf: - data.num_msg_proc = i; - data.num_pending_arq = pending; - data.max_num_msgs_mbx = hw->mailboxq.num_rq_entries; - data.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK; + if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT)) { + ice_vc_process_vf_msg(pf, &event, NULL); + ice_mbx_vf_dec_trig_e830(hw, &event); + } else { + u16 val = hw->mailboxq.num_rq_entries; + + data.max_num_msgs_mbx = val; + val = ICE_MBX_OVERFLOW_WATERMARK; + data.async_watermark_val = val; + data.num_msg_proc = i; + data.num_pending_arq = pending; - ice_vc_process_vf_msg(pf, &event, &data); + ice_vc_process_vf_msg(pf, &event, &data); + } break; case ice_aqc_opc_fw_logs_event: ice_get_fwlog_data(pf, &event); @@ -4082,7 +4090,11 @@ static int ice_init_pf(struct ice_pf *pf) mutex_init(&pf->vfs.table_lock); hash_init(pf->vfs.table); - ice_mbx_init_snapshot(&pf->hw); + if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT)) + wr32(&pf->hw, E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH, + ICE_MBX_OVERFLOW_WATERMARK); + else + ice_mbx_init_snapshot(&pf->hw); xa_init(&pf->dyn_ports); xa_init(&pf->sf_nums); diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index 91cb393f616f2..8aabf7749aa5e 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -36,6 +36,7 @@ static void ice_free_vf_entries(struct ice_pf *pf) hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) { hash_del_rcu(&vf->entry); + ice_deinitialize_vf_entry(vf); ice_put_vf(vf); } } @@ -193,9 +194,6 @@ void ice_free_vfs(struct ice_pf *pf) wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); } - /* clear malicious info since the VF is getting released */ - list_del(&vf->mbx_info.list_entry); - mutex_unlock(&vf->cfg_lock); } diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index f12fb3a2b6ad9..f522dd42093a9 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -2424,7 +2424,9 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring) ICE_TXD_CTX_QW1_CMD_S); ice_tstamp(tx_ring, skb, first, &offload); - if (ice_is_switchdev_running(vsi->back) && vsi->type != ICE_VSI_SF) + if ((ice_is_switchdev_running(vsi->back) || + ice_lag_is_switchdev_running(vsi->back)) && + vsi->type != ICE_VSI_SF) ice_eswitch_set_target_vsi(skb, &offload); if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) { diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c index 8c434689e3f78..815ad0bfe8326 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c @@ -716,6 +716,23 @@ ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m) return 0; } +/** + * ice_reset_vf_mbx_cnt - reset VF mailbox message count + * @vf: pointer to the VF structure + * + * This function clears the VF mailbox message count, and should be called on + * VF reset. + */ +static void ice_reset_vf_mbx_cnt(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + + if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT)) + ice_mbx_vf_clear_cnt_e830(&pf->hw, vf->vf_id); + else + ice_mbx_clear_malvf(&vf->mbx_info); +} + /** * ice_reset_all_vfs - reset all allocated VFs in one go * @pf: pointer to the PF structure @@ -742,7 +759,7 @@ void ice_reset_all_vfs(struct ice_pf *pf) /* clear all malicious info if the VFs are getting reset */ ice_for_each_vf(pf, bkt, vf) - ice_mbx_clear_malvf(&vf->mbx_info); + ice_reset_vf_mbx_cnt(vf); /* If VFs have been disabled, there is no need to reset */ if (test_and_set_bit(ICE_VF_DIS, pf->state)) { @@ -958,7 +975,7 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags) ice_eswitch_update_repr(&vf->repr_id, vsi); /* if the VF has been reset allow it to come up again */ - ice_mbx_clear_malvf(&vf->mbx_info); + ice_reset_vf_mbx_cnt(vf); out_unlock: if (lag && lag->bonded && lag->primary && @@ -1011,11 +1028,22 @@ void ice_initialize_vf_entry(struct ice_vf *vf) ice_vf_fdir_init(vf); /* Initialize mailbox info for this VF */ - ice_mbx_init_vf_info(&pf->hw, &vf->mbx_info); + if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT)) + ice_mbx_vf_clear_cnt_e830(&pf->hw, vf->vf_id); + else + ice_mbx_init_vf_info(&pf->hw, &vf->mbx_info); mutex_init(&vf->cfg_lock); } +void ice_deinitialize_vf_entry(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + + if (!ice_is_feature_supported(pf, ICE_F_MBX_LIMIT)) + list_del(&vf->mbx_info.list_entry); +} + /** * ice_dis_vf_qs - Disable the VF queues * @vf: pointer to the VF structure diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h index 0c7e77c0a09fa..5392b04049862 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h @@ -24,6 +24,7 @@ #endif void ice_initialize_vf_entry(struct ice_vf *vf); +void ice_deinitialize_vf_entry(struct ice_vf *vf); void ice_dis_vf_qs(struct ice_vf *vf); int ice_check_vf_init(struct ice_vf *vf); enum virtchnl_status_code ice_err_to_virt_err(int err); diff --git a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c index 40cb4ba0789ce..75c8113e58ee9 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c @@ -210,6 +210,38 @@ ice_mbx_detect_malvf(struct ice_hw *hw, struct ice_mbx_vf_info *vf_info, return 0; } +/** + * ice_mbx_vf_dec_trig_e830 - Decrements the VF mailbox queue counter + * @hw: pointer to the HW struct + * @event: pointer to the control queue receive event + * + * This function triggers to decrement the counter + * MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT when the driver replenishes + * the buffers at the PF mailbox queue. + */ +void ice_mbx_vf_dec_trig_e830(const struct ice_hw *hw, + const struct ice_rq_event_info *event) +{ + u16 vfid = le16_to_cpu(event->desc.retval); + + wr32(hw, E830_MBX_VF_DEC_TRIG(vfid), 1); +} + +/** + * ice_mbx_vf_clear_cnt_e830 - Clear the VF mailbox queue count + * @hw: pointer to the HW struct + * @vf_id: VF ID in the PF space + * + * This function clears the counter MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT, and should + * be called when a VF is created and on VF reset. + */ +void ice_mbx_vf_clear_cnt_e830(const struct ice_hw *hw, u16 vf_id) +{ + u32 reg = rd32(hw, E830_MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT(vf_id)); + + wr32(hw, E830_MBX_VF_DEC_TRIG(vf_id), reg); +} + /** * ice_mbx_vf_state_handler - Handle states of the overflow algorithm * @hw: pointer to the HW struct diff --git a/drivers/net/ethernet/intel/ice/ice_vf_mbx.h b/drivers/net/ethernet/intel/ice/ice_vf_mbx.h index 44bc030d17e07..684de89e5c5ed 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_mbx.h +++ b/drivers/net/ethernet/intel/ice/ice_vf_mbx.h @@ -19,6 +19,9 @@ ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, struct ice_sq_cd *cd); u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed); +void ice_mbx_vf_dec_trig_e830(const struct ice_hw *hw, + const struct ice_rq_event_info *event); +void ice_mbx_vf_clear_cnt_e830(const struct ice_hw *hw, u16 vf_id); int ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data, struct ice_mbx_vf_info *vf_info, bool *report_malvf); @@ -47,5 +50,11 @@ static inline void ice_mbx_init_snapshot(struct ice_hw *hw) { } +static inline void +ice_mbx_vf_dec_trig_e830(const struct ice_hw *hw, + const struct ice_rq_event_info *event) +{ +} + #endif /* CONFIG_PCI_IOV */ #endif /* _ICE_VF_MBX_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index b6ec01f6fa73e..c8c1d48ff793d 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -4008,8 +4008,10 @@ ice_is_malicious_vf(struct ice_vf *vf, struct ice_mbx_data *mbxdata) * @event: pointer to the AQ event * @mbxdata: information used to detect VF attempting mailbox overflow * - * called from the common asq/arq handler to - * process request from VF + * Called from the common asq/arq handler to process request from VF. When this + * flow is used for devices with hardware VF to PF message queue overflow + * support (ICE_F_MBX_LIMIT) mbxdata is set to NULL and ice_is_malicious_vf + * check is skipped. */ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event, struct ice_mbx_data *mbxdata) @@ -4035,7 +4037,7 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event, mutex_lock(&vf->cfg_lock); /* Check if the VF is trying to overflow the mailbox */ - if (ice_is_malicious_vf(vf, mbxdata)) + if (mbxdata && ice_is_malicious_vf(vf, mbxdata)) goto finish; /* Check if VF is disabled. */ diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c index dfd56fc5ff655..7557bb6694c09 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_main.c +++ b/drivers/net/ethernet/intel/idpf/idpf_main.c @@ -87,7 +87,11 @@ static void idpf_remove(struct pci_dev *pdev) */ static void idpf_shutdown(struct pci_dev *pdev) { - idpf_remove(pdev); + struct idpf_adapter *adapter = pci_get_drvdata(pdev); + + cancel_delayed_work_sync(&adapter->vc_event_task); + idpf_vc_core_deinit(adapter); + idpf_deinit_dflt_mbx(adapter); if (system_state == SYSTEM_POWER_OFF) pci_set_power_state(pdev, PCI_D3hot); diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index 1e0d1f9b07fbc..afc902ae4763e 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -3013,7 +3013,6 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb, skb_shinfo(skb)->gso_size = rsc_seg_len; skb_reset_network_header(skb); - len = skb->len - skb_transport_offset(skb); if (ipv4) { struct iphdr *ipv4h = ip_hdr(skb); @@ -3022,6 +3021,7 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb, /* Reset and set transport header offset in skb */ skb_set_transport_header(skb, sizeof(struct iphdr)); + len = skb->len - skb_transport_offset(skb); /* Compute the TCP pseudo header checksum*/ tcp_hdr(skb)->check = @@ -3031,6 +3031,7 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb, skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; skb_set_transport_header(skb, sizeof(struct ipv6hdr)); + len = skb->len - skb_transport_offset(skb); tcp_hdr(skb)->check = ~tcp_v6_check(len, &ipv6h->saddr, &ipv6h->daddr, 0); } diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h index f0537826f8403..9c1fe84108ed2 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h @@ -438,7 +438,8 @@ struct idpf_q_vector { __cacheline_group_end_aligned(cold); }; libeth_cacheline_set_assert(struct idpf_q_vector, 112, - 424 + 2 * sizeof(struct dim), + 24 + sizeof(struct napi_struct) + + 2 * sizeof(struct dim), 8 + sizeof(cpumask_var_t)); struct idpf_rx_queue_stats { diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index eac0f966e0e4c..323db1e2be388 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -319,6 +319,7 @@ struct igc_adapter { struct timespec64 prev_ptp_time; /* Pre-reset PTP clock */ ktime_t ptp_reset_start; /* Reset time in clock mono */ struct system_time_snapshot snapshot; + struct mutex ptm_lock; /* Only allow one PTM transaction at a time */ char fw_version[32]; diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index 8e449904aa7db..d19325b0e6e0b 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -574,7 +574,10 @@ #define IGC_PTM_CTRL_SHRT_CYC(usec) (((usec) & 0x3f) << 2) #define IGC_PTM_CTRL_PTM_TO(usec) (((usec) & 0xff) << 8) -#define IGC_PTM_SHORT_CYC_DEFAULT 1 /* Default short cycle interval */ +/* A short cycle time of 1us theoretically should work, but appears to be too + * short in practice. + */ +#define IGC_PTM_SHORT_CYC_DEFAULT 4 /* Default short cycle interval */ #define IGC_PTM_CYC_TIME_DEFAULT 5 /* Default PTM cycle time */ #define IGC_PTM_TIMEOUT_DEFAULT 255 /* Default timeout for PTM errors */ @@ -593,6 +596,7 @@ #define IGC_PTM_STAT_T4M1_OVFL BIT(3) /* T4 minus T1 overflow */ #define IGC_PTM_STAT_ADJUST_1ST BIT(4) /* 1588 timer adjusted during 1st PTM cycle */ #define IGC_PTM_STAT_ADJUST_CYC BIT(5) /* 1588 timer adjusted during non-1st PTM cycle */ +#define IGC_PTM_STAT_ALL GENMASK(5, 0) /* Used to clear all status */ /* PCIe PTM Cycle Control */ #define IGC_PTM_CYCLE_CTRL_CYC_TIME(msec) ((msec) & 0x3ff) /* PTM Cycle Time (msec) */ diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 1ec9e8cc99d94..082b0baf5d37c 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -7173,6 +7173,7 @@ static int igc_probe(struct pci_dev *pdev, err_register: igc_release_hw_control(adapter); + igc_ptp_stop(adapter); err_eeprom: if (!igc_check_reset_block(hw)) igc_reset_phy(hw); diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index 946edbad43022..612ed26a29c5d 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -974,45 +974,62 @@ static void igc_ptm_log_error(struct igc_adapter *adapter, u32 ptm_stat) } } +/* The PTM lock: adapter->ptm_lock must be held when calling igc_ptm_trigger() */ +static void igc_ptm_trigger(struct igc_hw *hw) +{ + u32 ctrl; + + /* To "manually" start the PTM cycle we need to set the + * trigger (TRIG) bit + */ + ctrl = rd32(IGC_PTM_CTRL); + ctrl |= IGC_PTM_CTRL_TRIG; + wr32(IGC_PTM_CTRL, ctrl); + /* Perform flush after write to CTRL register otherwise + * transaction may not start + */ + wrfl(); +} + +/* The PTM lock: adapter->ptm_lock must be held when calling igc_ptm_reset() */ +static void igc_ptm_reset(struct igc_hw *hw) +{ + u32 ctrl; + + ctrl = rd32(IGC_PTM_CTRL); + ctrl &= ~IGC_PTM_CTRL_TRIG; + wr32(IGC_PTM_CTRL, ctrl); + /* Write to clear all status */ + wr32(IGC_PTM_STAT, IGC_PTM_STAT_ALL); +} + static int igc_phc_get_syncdevicetime(ktime_t *device, struct system_counterval_t *system, void *ctx) { - u32 stat, t2_curr_h, t2_curr_l, ctrl; struct igc_adapter *adapter = ctx; struct igc_hw *hw = &adapter->hw; + u32 stat, t2_curr_h, t2_curr_l; int err, count = 100; ktime_t t1, t2_curr; - /* Get a snapshot of system clocks to use as historic value. */ - ktime_get_snapshot(&adapter->snapshot); - + /* Doing this in a loop because in the event of a + * badly timed (ha!) system clock adjustment, we may + * get PTM errors from the PCI root, but these errors + * are transitory. Repeating the process returns valid + * data eventually. + */ do { - /* Doing this in a loop because in the event of a - * badly timed (ha!) system clock adjustment, we may - * get PTM errors from the PCI root, but these errors - * are transitory. Repeating the process returns valid - * data eventually. - */ + /* Get a snapshot of system clocks to use as historic value. */ + ktime_get_snapshot(&adapter->snapshot); - /* To "manually" start the PTM cycle we need to clear and - * then set again the TRIG bit. - */ - ctrl = rd32(IGC_PTM_CTRL); - ctrl &= ~IGC_PTM_CTRL_TRIG; - wr32(IGC_PTM_CTRL, ctrl); - ctrl |= IGC_PTM_CTRL_TRIG; - wr32(IGC_PTM_CTRL, ctrl); - - /* The cycle only starts "for real" when software notifies - * that it has read the registers, this is done by setting - * VALID bit. - */ - wr32(IGC_PTM_STAT, IGC_PTM_STAT_VALID); + igc_ptm_trigger(hw); err = readx_poll_timeout(rd32, IGC_PTM_STAT, stat, stat, IGC_PTM_STAT_SLEEP, IGC_PTM_STAT_TIMEOUT); + igc_ptm_reset(hw); + if (err < 0) { netdev_err(adapter->netdev, "Timeout reading IGC_PTM_STAT register\n"); return err; @@ -1021,15 +1038,7 @@ static int igc_phc_get_syncdevicetime(ktime_t *device, if ((stat & IGC_PTM_STAT_VALID) == IGC_PTM_STAT_VALID) break; - if (stat & ~IGC_PTM_STAT_VALID) { - /* An error occurred, log it. */ - igc_ptm_log_error(adapter, stat); - /* The STAT register is write-1-to-clear (W1C), - * so write the previous error status to clear it. - */ - wr32(IGC_PTM_STAT, stat); - continue; - } + igc_ptm_log_error(adapter, stat); } while (--count); if (!count) { @@ -1061,9 +1070,16 @@ static int igc_ptp_getcrosststamp(struct ptp_clock_info *ptp, { struct igc_adapter *adapter = container_of(ptp, struct igc_adapter, ptp_caps); + int ret; + + /* This blocks until any in progress PTM transactions complete */ + mutex_lock(&adapter->ptm_lock); - return get_device_system_crosststamp(igc_phc_get_syncdevicetime, - adapter, &adapter->snapshot, cts); + ret = get_device_system_crosststamp(igc_phc_get_syncdevicetime, + adapter, &adapter->snapshot, cts); + mutex_unlock(&adapter->ptm_lock); + + return ret; } static int igc_ptp_getcyclesx64(struct ptp_clock_info *ptp, @@ -1162,6 +1178,7 @@ void igc_ptp_init(struct igc_adapter *adapter) spin_lock_init(&adapter->ptp_tx_lock); spin_lock_init(&adapter->free_timer_lock); spin_lock_init(&adapter->tmreg_lock); + mutex_init(&adapter->ptm_lock); adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; @@ -1174,6 +1191,7 @@ void igc_ptp_init(struct igc_adapter *adapter) if (IS_ERR(adapter->ptp_clock)) { adapter->ptp_clock = NULL; netdev_err(netdev, "ptp_clock_register failed\n"); + mutex_destroy(&adapter->ptm_lock); } else if (adapter->ptp_clock) { netdev_info(netdev, "PHC added\n"); adapter->ptp_flags |= IGC_PTP_ENABLED; @@ -1203,10 +1221,12 @@ static void igc_ptm_stop(struct igc_adapter *adapter) struct igc_hw *hw = &adapter->hw; u32 ctrl; + mutex_lock(&adapter->ptm_lock); ctrl = rd32(IGC_PTM_CTRL); ctrl &= ~IGC_PTM_CTRL_EN; wr32(IGC_PTM_CTRL, ctrl); + mutex_unlock(&adapter->ptm_lock); } /** @@ -1237,13 +1257,18 @@ void igc_ptp_suspend(struct igc_adapter *adapter) **/ void igc_ptp_stop(struct igc_adapter *adapter) { + if (!(adapter->ptp_flags & IGC_PTP_ENABLED)) + return; + igc_ptp_suspend(adapter); + adapter->ptp_flags &= ~IGC_PTP_ENABLED; if (adapter->ptp_clock) { ptp_clock_unregister(adapter->ptp_clock); netdev_info(adapter->netdev, "PHC removed\n"); adapter->ptp_flags &= ~IGC_PTP_ENABLED; } + mutex_destroy(&adapter->ptm_lock); } /** @@ -1255,10 +1280,13 @@ void igc_ptp_stop(struct igc_adapter *adapter) void igc_ptp_reset(struct igc_adapter *adapter) { struct igc_hw *hw = &adapter->hw; - u32 cycle_ctrl, ctrl; + u32 cycle_ctrl, ctrl, stat; unsigned long flags; u32 timadj; + if (!(adapter->ptp_flags & IGC_PTP_ENABLED)) + return; + /* reset the tstamp_config */ igc_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); @@ -1280,6 +1308,7 @@ void igc_ptp_reset(struct igc_adapter *adapter) if (!igc_is_crosststamp_supported(adapter)) break; + mutex_lock(&adapter->ptm_lock); wr32(IGC_PCIE_DIG_DELAY, IGC_PCIE_DIG_DELAY_DEFAULT); wr32(IGC_PCIE_PHY_DELAY, IGC_PCIE_PHY_DELAY_DEFAULT); @@ -1290,14 +1319,20 @@ void igc_ptp_reset(struct igc_adapter *adapter) ctrl = IGC_PTM_CTRL_EN | IGC_PTM_CTRL_START_NOW | IGC_PTM_CTRL_SHRT_CYC(IGC_PTM_SHORT_CYC_DEFAULT) | - IGC_PTM_CTRL_PTM_TO(IGC_PTM_TIMEOUT_DEFAULT) | - IGC_PTM_CTRL_TRIG; + IGC_PTM_CTRL_PTM_TO(IGC_PTM_TIMEOUT_DEFAULT); wr32(IGC_PTM_CTRL, ctrl); /* Force the first cycle to run. */ - wr32(IGC_PTM_STAT, IGC_PTM_STAT_VALID); + igc_ptm_trigger(hw); + + if (readx_poll_timeout_atomic(rd32, IGC_PTM_STAT, stat, + stat, IGC_PTM_STAT_SLEEP, + IGC_PTM_STAT_TIMEOUT)) + netdev_err(adapter->netdev, "Timeout reading IGC_PTM_STAT register\n"); + igc_ptm_reset(hw); + mutex_unlock(&adapter->ptm_lock); break; default: /* No work to do. */ diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index 9e02e4367bec8..9bd3d76b5fe2a 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -1108,6 +1108,9 @@ struct mvpp2 { /* Spinlocks for CM3 shared memory configuration */ spinlock_t mss_spinlock; + + /* Spinlock for shared PRS parser memory and shadow table */ + spinlock_t prs_spinlock; }; struct mvpp2_pcpu_stats { diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c index 1641791a2d5b4..8ed83fb988624 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c @@ -324,7 +324,7 @@ static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = { MVPP2_PRS_RI_VLAN_MASK), /* Non IP flow, with vlan tag */ MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_TAG, - MVPP22_CLS_HEK_OPT_VLAN, + MVPP22_CLS_HEK_TAGGED, 0, 0), }; diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 3880dcc0418b2..66b5a80c9c28a 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -7640,8 +7640,9 @@ static int mvpp2_probe(struct platform_device *pdev) if (mvpp2_read(priv, MVPP2_VER_ID_REG) == MVPP2_VER_PP23) priv->hw_version = MVPP23; - /* Init mss lock */ + /* Init locks for shared packet processor resources */ spin_lock_init(&priv->mss_spinlock); + spin_lock_init(&priv->prs_spinlock); /* Initialize network controller */ err = mvpp2_init(pdev, priv); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c index 9af22f497a40f..93e978bdf303c 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c @@ -23,6 +23,8 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) { int i; + lockdep_assert_held(&priv->prs_spinlock); + if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) return -EINVAL; @@ -43,11 +45,13 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) } /* Initialize tcam entry from hw */ -int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe, - int tid) +static int __mvpp2_prs_init_from_hw(struct mvpp2 *priv, + struct mvpp2_prs_entry *pe, int tid) { int i; + lockdep_assert_held(&priv->prs_spinlock); + if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1) return -EINVAL; @@ -73,6 +77,18 @@ int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe, return 0; } +int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe, + int tid) +{ + int err; + + spin_lock_bh(&priv->prs_spinlock); + err = __mvpp2_prs_init_from_hw(priv, pe, tid); + spin_unlock_bh(&priv->prs_spinlock); + + return err; +} + /* Invalidate tcam hw entry */ static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index) { @@ -374,7 +390,7 @@ static int mvpp2_prs_flow_find(struct mvpp2 *priv, int flow) priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS) continue; - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); bits = mvpp2_prs_sram_ai_get(&pe); /* Sram store classification lookup ID in AI bits [5:0] */ @@ -441,7 +457,7 @@ static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add) if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) { /* Entry exist - update port only */ - mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL); + __mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL); } else { /* Entry doesn't exist - create new */ memset(&pe, 0, sizeof(pe)); @@ -469,14 +485,17 @@ static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add) } /* Set port to unicast or multicast promiscuous mode */ -void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, - enum mvpp2_prs_l2_cast l2_cast, bool add) +static void __mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, + enum mvpp2_prs_l2_cast l2_cast, + bool add) { struct mvpp2_prs_entry pe; unsigned char cast_match; unsigned int ri; int tid; + lockdep_assert_held(&priv->prs_spinlock); + if (l2_cast == MVPP2_PRS_L2_UNI_CAST) { cast_match = MVPP2_PRS_UCAST_VAL; tid = MVPP2_PE_MAC_UC_PROMISCUOUS; @@ -489,7 +508,7 @@ void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, /* promiscuous mode - Accept unknown unicast or multicast packets */ if (priv->prs_shadow[tid].valid) { - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); } else { memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); @@ -522,6 +541,14 @@ void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, mvpp2_prs_hw_write(priv, &pe); } +void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, + enum mvpp2_prs_l2_cast l2_cast, bool add) +{ + spin_lock_bh(&priv->prs_spinlock); + __mvpp2_prs_mac_promisc_set(priv, port, l2_cast, add); + spin_unlock_bh(&priv->prs_spinlock); +} + /* Set entry for dsa packets */ static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add, bool tagged, bool extend) @@ -539,7 +566,7 @@ static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add, if (priv->prs_shadow[tid].valid) { /* Entry exist - update port only */ - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); } else { /* Entry doesn't exist - create new */ memset(&pe, 0, sizeof(pe)); @@ -610,7 +637,7 @@ static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port, if (priv->prs_shadow[tid].valid) { /* Entry exist - update port only */ - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); } else { /* Entry doesn't exist - create new */ memset(&pe, 0, sizeof(pe)); @@ -673,7 +700,7 @@ static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai) priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) continue; - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid); if (!match) continue; @@ -726,7 +753,7 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) continue; - mvpp2_prs_init_from_hw(priv, &pe, tid_aux); + __mvpp2_prs_init_from_hw(priv, &pe, tid_aux); ri_bits = mvpp2_prs_sram_ri_get(&pe); if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) == MVPP2_PRS_RI_VLAN_DOUBLE) @@ -760,7 +787,7 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); } else { - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); } /* Update ports' mask */ mvpp2_prs_tcam_port_map_set(&pe, port_map); @@ -800,7 +827,7 @@ static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1, priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) continue; - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid1) && mvpp2_prs_tcam_data_cmp(&pe, 4, tpid2); @@ -849,7 +876,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) continue; - mvpp2_prs_init_from_hw(priv, &pe, tid_aux); + __mvpp2_prs_init_from_hw(priv, &pe, tid_aux); ri_bits = mvpp2_prs_sram_ri_get(&pe); ri_bits &= MVPP2_PRS_RI_VLAN_MASK; if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE || @@ -880,7 +907,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); } else { - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); } /* Update ports' mask */ @@ -1213,8 +1240,8 @@ static void mvpp2_prs_mac_init(struct mvpp2 *priv) /* Create dummy entries for drop all and promiscuous modes */ mvpp2_prs_drop_fc(priv); mvpp2_prs_mac_drop_all_set(priv, 0, false); - mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false); - mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false); + __mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false); + __mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false); } /* Set default entries for various types of dsa packets */ @@ -1533,12 +1560,6 @@ static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv) struct mvpp2_prs_entry pe; int err; - priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool), - MVPP2_PRS_DBL_VLANS_MAX, - GFP_KERNEL); - if (!priv->prs_double_vlans) - return -ENOMEM; - /* Double VLAN: 0x88A8, 0x8100 */ err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021AD, ETH_P_8021Q, MVPP2_PRS_PORT_MASK); @@ -1941,7 +1962,7 @@ static int mvpp2_prs_vid_range_find(struct mvpp2_port *port, u16 vid, u16 mask) port->priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID) continue; - mvpp2_prs_init_from_hw(port->priv, &pe, tid); + __mvpp2_prs_init_from_hw(port->priv, &pe, tid); mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]); mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]); @@ -1970,6 +1991,8 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid) memset(&pe, 0, sizeof(pe)); + spin_lock_bh(&priv->prs_spinlock); + /* Scan TCAM and see if entry with this already exist */ tid = mvpp2_prs_vid_range_find(port, vid, mask); @@ -1988,8 +2011,10 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid) MVPP2_PRS_VLAN_FILT_MAX_ENTRY); /* There isn't room for a new VID filter */ - if (tid < 0) + if (tid < 0) { + spin_unlock_bh(&priv->prs_spinlock); return tid; + } mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); pe.index = tid; @@ -1997,7 +2022,7 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid) /* Mask all ports */ mvpp2_prs_tcam_port_map_set(&pe, 0); } else { - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); } /* Enable the current port */ @@ -2019,6 +2044,7 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid) mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); mvpp2_prs_hw_write(priv, &pe); + spin_unlock_bh(&priv->prs_spinlock); return 0; } @@ -2028,15 +2054,16 @@ void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid) struct mvpp2 *priv = port->priv; int tid; - /* Scan TCAM and see if entry with this already exist */ - tid = mvpp2_prs_vid_range_find(port, vid, 0xfff); + spin_lock_bh(&priv->prs_spinlock); - /* No such entry */ - if (tid < 0) - return; + /* Invalidate TCAM entry with this , if it exists */ + tid = mvpp2_prs_vid_range_find(port, vid, 0xfff); + if (tid >= 0) { + mvpp2_prs_hw_inv(priv, tid); + priv->prs_shadow[tid].valid = false; + } - mvpp2_prs_hw_inv(priv, tid); - priv->prs_shadow[tid].valid = false; + spin_unlock_bh(&priv->prs_spinlock); } /* Remove all existing VID filters on this port */ @@ -2045,6 +2072,8 @@ void mvpp2_prs_vid_remove_all(struct mvpp2_port *port) struct mvpp2 *priv = port->priv; int tid; + spin_lock_bh(&priv->prs_spinlock); + for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id); tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) { if (priv->prs_shadow[tid].valid) { @@ -2052,6 +2081,8 @@ void mvpp2_prs_vid_remove_all(struct mvpp2_port *port) priv->prs_shadow[tid].valid = false; } } + + spin_unlock_bh(&priv->prs_spinlock); } /* Remove VID filering entry for this port */ @@ -2060,10 +2091,14 @@ void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port) unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id); struct mvpp2 *priv = port->priv; + spin_lock_bh(&priv->prs_spinlock); + /* Invalidate the guard entry */ mvpp2_prs_hw_inv(priv, tid); priv->prs_shadow[tid].valid = false; + + spin_unlock_bh(&priv->prs_spinlock); } /* Add guard entry that drops packets when no VID is matched on this port */ @@ -2079,6 +2114,8 @@ void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port) memset(&pe, 0, sizeof(pe)); + spin_lock_bh(&priv->prs_spinlock); + pe.index = tid; reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id)); @@ -2111,6 +2148,8 @@ void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port) /* Update shadow table */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); mvpp2_prs_hw_write(priv, &pe); + + spin_unlock_bh(&priv->prs_spinlock); } /* Parser default initialization */ @@ -2118,6 +2157,20 @@ int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv) { int err, index, i; + priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE, + sizeof(*priv->prs_shadow), + GFP_KERNEL); + if (!priv->prs_shadow) + return -ENOMEM; + + priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool), + MVPP2_PRS_DBL_VLANS_MAX, + GFP_KERNEL); + if (!priv->prs_double_vlans) + return -ENOMEM; + + spin_lock_bh(&priv->prs_spinlock); + /* Enable tcam table */ mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK); @@ -2136,12 +2189,6 @@ int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv) for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) mvpp2_prs_hw_inv(priv, index); - priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE, - sizeof(*priv->prs_shadow), - GFP_KERNEL); - if (!priv->prs_shadow) - return -ENOMEM; - /* Always start from lookup = 0 */ for (index = 0; index < MVPP2_MAX_PORTS; index++) mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH, @@ -2158,26 +2205,13 @@ int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv) mvpp2_prs_vid_init(priv); err = mvpp2_prs_etype_init(priv); - if (err) - return err; - - err = mvpp2_prs_vlan_init(pdev, priv); - if (err) - return err; - - err = mvpp2_prs_pppoe_init(priv); - if (err) - return err; - - err = mvpp2_prs_ip6_init(priv); - if (err) - return err; - - err = mvpp2_prs_ip4_init(priv); - if (err) - return err; + err = err ? : mvpp2_prs_vlan_init(pdev, priv); + err = err ? : mvpp2_prs_pppoe_init(priv); + err = err ? : mvpp2_prs_ip6_init(priv); + err = err ? : mvpp2_prs_ip4_init(priv); - return 0; + spin_unlock_bh(&priv->prs_spinlock); + return err; } /* Compare MAC DA with tcam entry data */ @@ -2217,7 +2251,7 @@ mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da, (priv->prs_shadow[tid].udf != udf_type)) continue; - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); entry_pmap = mvpp2_prs_tcam_port_map_get(&pe); if (mvpp2_prs_mac_range_equals(&pe, da, mask) && @@ -2229,7 +2263,8 @@ mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da, } /* Update parser's mac da entry */ -int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add) +static int __mvpp2_prs_mac_da_accept(struct mvpp2_port *port, + const u8 *da, bool add) { unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; struct mvpp2 *priv = port->priv; @@ -2261,7 +2296,7 @@ int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add) /* Mask all ports */ mvpp2_prs_tcam_port_map_set(&pe, 0); } else { - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); } mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); @@ -2317,6 +2352,17 @@ int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add) return 0; } +int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add) +{ + int err; + + spin_lock_bh(&port->priv->prs_spinlock); + err = __mvpp2_prs_mac_da_accept(port, da, add); + spin_unlock_bh(&port->priv->prs_spinlock); + + return err; +} + int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da) { struct mvpp2_port *port = netdev_priv(dev); @@ -2345,6 +2391,8 @@ void mvpp2_prs_mac_del_all(struct mvpp2_port *port) unsigned long pmap; int index, tid; + spin_lock_bh(&priv->prs_spinlock); + for (tid = MVPP2_PE_MAC_RANGE_START; tid <= MVPP2_PE_MAC_RANGE_END; tid++) { unsigned char da[ETH_ALEN], da_mask[ETH_ALEN]; @@ -2354,7 +2402,7 @@ void mvpp2_prs_mac_del_all(struct mvpp2_port *port) (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF)) continue; - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); pmap = mvpp2_prs_tcam_port_map_get(&pe); @@ -2375,14 +2423,17 @@ void mvpp2_prs_mac_del_all(struct mvpp2_port *port) continue; /* Remove entry from TCAM */ - mvpp2_prs_mac_da_accept(port, da, false); + __mvpp2_prs_mac_da_accept(port, da, false); } + + spin_unlock_bh(&priv->prs_spinlock); } int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) { switch (type) { case MVPP2_TAG_TYPE_EDSA: + spin_lock_bh(&priv->prs_spinlock); /* Add port to EDSA entries */ mvpp2_prs_dsa_tag_set(priv, port, true, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); @@ -2393,9 +2444,11 @@ int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); + spin_unlock_bh(&priv->prs_spinlock); break; case MVPP2_TAG_TYPE_DSA: + spin_lock_bh(&priv->prs_spinlock); /* Add port to DSA entries */ mvpp2_prs_dsa_tag_set(priv, port, true, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); @@ -2406,10 +2459,12 @@ int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); + spin_unlock_bh(&priv->prs_spinlock); break; case MVPP2_TAG_TYPE_MH: case MVPP2_TAG_TYPE_NONE: + spin_lock_bh(&priv->prs_spinlock); /* Remove port form EDSA and DSA entries */ mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); @@ -2419,6 +2474,7 @@ int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); + spin_unlock_bh(&priv->prs_spinlock); break; default: @@ -2437,11 +2493,15 @@ int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask) memset(&pe, 0, sizeof(pe)); + spin_lock_bh(&priv->prs_spinlock); + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID, MVPP2_PE_FIRST_FREE_TID); - if (tid < 0) + if (tid < 0) { + spin_unlock_bh(&priv->prs_spinlock); return tid; + } pe.index = tid; @@ -2461,6 +2521,7 @@ int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask) mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); mvpp2_prs_hw_write(priv, &pe); + spin_unlock_bh(&priv->prs_spinlock); return 0; } @@ -2472,6 +2533,8 @@ int mvpp2_prs_def_flow(struct mvpp2_port *port) memset(&pe, 0, sizeof(pe)); + spin_lock_bh(&port->priv->prs_spinlock); + tid = mvpp2_prs_flow_find(port->priv, port->id); /* Such entry not exist */ @@ -2480,8 +2543,10 @@ int mvpp2_prs_def_flow(struct mvpp2_port *port) tid = mvpp2_prs_tcam_first_free(port->priv, MVPP2_PE_LAST_FREE_TID, MVPP2_PE_FIRST_FREE_TID); - if (tid < 0) + if (tid < 0) { + spin_unlock_bh(&port->priv->prs_spinlock); return tid; + } pe.index = tid; @@ -2492,13 +2557,14 @@ int mvpp2_prs_def_flow(struct mvpp2_port *port) /* Update shadow table */ mvpp2_prs_shadow_set(port->priv, pe.index, MVPP2_PRS_LU_FLOWS); } else { - mvpp2_prs_init_from_hw(port->priv, &pe, tid); + __mvpp2_prs_init_from_hw(port->priv, &pe, tid); } mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); mvpp2_prs_tcam_port_map_set(&pe, (1 << port->id)); mvpp2_prs_hw_write(port->priv, &pe); + spin_unlock_bh(&port->priv->prs_spinlock); return 0; } @@ -2509,11 +2575,14 @@ int mvpp2_prs_hits(struct mvpp2 *priv, int index) if (index > MVPP2_PRS_TCAM_SRAM_SIZE) return -EINVAL; + spin_lock_bh(&priv->prs_spinlock); + mvpp2_write(priv, MVPP2_PRS_TCAM_HIT_IDX_REG, index); val = mvpp2_read(priv, MVPP2_PRS_TCAM_HIT_CNT_REG); val &= MVPP2_PRS_TCAM_HIT_CNT_MASK; + spin_unlock_bh(&priv->prs_spinlock); return val; } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index cd0d7b7774f1a..6575c422635b7 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -2634,7 +2634,7 @@ static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq) rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), intr); rvu_queue_work(&rvu->afvf_wq_info, 64, vfs, intr); - vfs -= 64; + vfs = 64; } intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0)); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c index 7498ab429963d..06f778baaeef2 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c @@ -207,7 +207,7 @@ static void rvu_nix_unregister_interrupts(struct rvu *rvu) rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU] = false; } - for (i = NIX_AF_INT_VEC_AF_ERR; i < NIX_AF_INT_VEC_CNT; i++) + for (i = NIX_AF_INT_VEC_GEN; i < NIX_AF_INT_VEC_CNT; i++) if (rvu->irq_allocated[offs + i]) { free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl); rvu->irq_allocated[offs + i] = false; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c index 0f844c14485a0..35acc07bd9648 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c @@ -165,6 +165,11 @@ static void __otx2_qos_txschq_cfg(struct otx2_nic *pfvf, otx2_config_sched_shaping(pfvf, node, cfg, &num_regs); } else if (level == NIX_TXSCH_LVL_TL2) { + /* configure parent txschq */ + cfg->reg[num_regs] = NIX_AF_TL2X_PARENT(node->schq); + cfg->regval[num_regs] = (u64)hw->tx_link << 16; + num_regs++; + /* configure link cfg */ if (level == pfvf->qos.link_cfg_lvl) { cfg->reg[num_regs] = NIX_AF_TL3_TL2X_LINKX_CFG(node->schq, hw->tx_link); diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index ed7313c10a052..d408dcda76d79 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -734,7 +734,7 @@ static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx, case SPEED_100: val |= MTK_QTX_SCH_MAX_RATE_EN | FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) | - FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3); + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3) | FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1); break; case SPEED_1000: @@ -757,13 +757,13 @@ static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx, case SPEED_100: val |= MTK_QTX_SCH_MAX_RATE_EN | FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) | - FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5); + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) | FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1); break; case SPEED_1000: val |= MTK_QTX_SCH_MAX_RATE_EN | - FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 10) | - FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) | + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) | + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 6) | FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10); break; default: @@ -823,9 +823,25 @@ static const struct phylink_mac_ops mtk_phylink_ops = { .mac_link_up = mtk_mac_link_up, }; +static void mtk_mdio_config(struct mtk_eth *eth) +{ + u32 val; + + /* Configure MDC Divider */ + val = FIELD_PREP(PPSC_MDC_CFG, eth->mdc_divider); + + /* Configure MDC Turbo Mode */ + if (mtk_is_netsys_v3_or_greater(eth)) + mtk_m32(eth, 0, MISC_MDC_TURBO, MTK_MAC_MISC_V3); + else + val |= PPSC_MDC_TURBO; + + mtk_m32(eth, PPSC_MDC_CFG, val, MTK_PPSC); +} + static int mtk_mdio_init(struct mtk_eth *eth) { - unsigned int max_clk = 2500000, divider; + unsigned int max_clk = 2500000; struct device_node *mii_np; int ret; u32 val; @@ -865,20 +881,9 @@ static int mtk_mdio_init(struct mtk_eth *eth) } max_clk = val; } - divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63); - - /* Configure MDC Turbo Mode */ - if (mtk_is_netsys_v3_or_greater(eth)) - mtk_m32(eth, 0, MISC_MDC_TURBO, MTK_MAC_MISC_V3); - - /* Configure MDC Divider */ - val = FIELD_PREP(PPSC_MDC_CFG, divider); - if (!mtk_is_netsys_v3_or_greater(eth)) - val |= PPSC_MDC_TURBO; - mtk_m32(eth, PPSC_MDC_CFG, val, MTK_PPSC); - - dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / divider); - + eth->mdc_divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63); + mtk_mdio_config(eth); + dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / eth->mdc_divider); ret = of_mdiobus_register(eth->mii_bus, mii_np); err_put_node: @@ -3269,7 +3274,7 @@ static int mtk_start_dma(struct mtk_eth *eth) if (mtk_is_netsys_v2_or_greater(eth)) val |= MTK_MUTLI_CNT | MTK_RESV_BUF | MTK_WCOMP_EN | MTK_DMAD_WR_WDONE | - MTK_CHK_DDONE_EN | MTK_LEAKY_BUCKET_EN; + MTK_CHK_DDONE_EN; else val |= MTK_RX_BT_32DWORDS; mtk_w32(eth, val, reg_map->qdma.glo_cfg); @@ -3928,6 +3933,10 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset) else mtk_hw_reset(eth); + /* No MT7628/88 support yet */ + if (reset && !MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) + mtk_mdio_config(eth); + if (mtk_is_netsys_v3_or_greater(eth)) { /* Set FE to PDMAv2 if necessary */ val = mtk_r32(eth, MTK_FE_GLO_MISC); diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 0d5225f1d3eef..8d7b6818d8601 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -1260,6 +1260,7 @@ struct mtk_eth { struct clk *clks[MTK_CLK_MAX]; struct mii_bus *mii_bus; + unsigned int mdc_divider; struct work_struct pending_work; unsigned long state; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index 98d4306929f3e..a2cf3e79693dd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -46,6 +46,9 @@ mlx5_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req, u32 running_fw, stored_fw; int err; + if (!mlx5_core_is_pf(dev)) + return 0; + err = devlink_info_version_fixed_put(req, "fw.psid", dev->board_id); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index 64b62ed17b07a..31eb99f09c63c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -423,7 +423,7 @@ u8 mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { u32 resrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * - PAGE_SIZE; + MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE; return order_base_2(DIV_ROUND_UP(resrv_size, params->sw_mtu)); } @@ -827,7 +827,8 @@ static u32 mlx5e_shampo_get_log_cq_size(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_xsk_param *xsk) { - int rsrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE; + int rsrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * + MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE; u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk)); int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params)); u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); @@ -1036,7 +1037,8 @@ u32 mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_rq_param *rq_param) { - int resv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE; + int resv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * + MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE; u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, NULL)); int pkt_per_resv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params)); u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c index 5d128c5b4529a..0f5d7ea8956f7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c @@ -48,15 +48,10 @@ mlx5_esw_bridge_lag_rep_get(struct net_device *dev, struct mlx5_eswitch *esw) struct list_head *iter; netdev_for_each_lower_dev(dev, lower, iter) { - struct mlx5_core_dev *mdev; - struct mlx5e_priv *priv; - if (!mlx5e_eswitch_rep(lower)) continue; - priv = netdev_priv(lower); - mdev = priv->mdev; - if (mlx5_lag_is_shared_fdb(mdev) && mlx5_esw_bridge_dev_same_esw(lower, esw)) + if (mlx5_esw_bridge_dev_same_esw(lower, esw)) return lower; } @@ -125,7 +120,7 @@ static bool mlx5_esw_bridge_is_local(struct net_device *dev, struct net_device * priv = netdev_priv(rep); mdev = priv->mdev; if (netif_is_lag_master(dev)) - return mlx5_lag_is_shared_fdb(mdev) && mlx5_lag_is_master(mdev); + return mlx5_lag_is_master(mdev); return true; } @@ -455,6 +450,9 @@ static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb, if (!rep) return NOTIFY_DONE; + if (netif_is_lag_master(dev) && !mlx5_lag_is_shared_fdb(esw->dev)) + return NOTIFY_DONE; + switch (event) { case SWITCHDEV_FDB_ADD_TO_BRIDGE: fdb_info = container_of(info, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 62b8a7c1c6b54..1c087fa1ca269 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -5099,11 +5099,9 @@ static int mlx5e_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5_core_dev *mdev = priv->mdev; u8 mode, setting; - int err; - err = mlx5_eswitch_get_vepa(mdev->priv.eswitch, &setting); - if (err) - return err; + if (mlx5_eswitch_get_vepa(mdev->priv.eswitch, &setting)) + return -EOPNOTSUPP; mode = setting ? BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB; return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 68cb86b37e561..4241cf07a0306 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -887,8 +887,8 @@ static void comp_irq_release_sf(struct mlx5_core_dev *dev, u16 vecidx) static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx) { + struct mlx5_irq_pool *pool = mlx5_irq_table_get_comp_irq_pool(dev); struct mlx5_eq_table *table = dev->priv.eq_table; - struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev); struct irq_affinity_desc af_desc = {}; struct mlx5_irq *irq; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c index 1477db7f5307e..2691d88cdee1f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c @@ -175,7 +175,7 @@ mlx5_irq_affinity_request(struct mlx5_core_dev *dev, struct mlx5_irq_pool *pool, void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *irq) { - struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev); + struct mlx5_irq_pool *pool = mlx5_irq_get_pool(irq); int cpu; cpu = cpumask_first(mlx5_irq_get_affinity_mask(irq)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index 7f68468c2e759..4b3da7ebd6310 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -859,7 +859,7 @@ void mlx5_disable_lag(struct mlx5_lag *ldev) mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch); } -static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev) +bool mlx5_lag_shared_fdb_supported(struct mlx5_lag *ldev) { struct mlx5_core_dev *dev; int i; @@ -937,7 +937,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) } if (do_bond && !__mlx5_lag_is_active(ldev)) { - bool shared_fdb = mlx5_shared_fdb_supported(ldev); + bool shared_fdb = mlx5_lag_shared_fdb_supported(ldev); roce_lag = mlx5_lag_is_roce_lag(ldev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h index 50fcb1eee5748..48a5f3e7b91a8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h @@ -92,6 +92,7 @@ mlx5_lag_is_ready(struct mlx5_lag *ldev) return test_bit(MLX5_LAG_FLAG_NDEVS_READY, &ldev->state_flags); } +bool mlx5_lag_shared_fdb_supported(struct mlx5_lag *ldev); bool mlx5_lag_check_prereq(struct mlx5_lag *ldev); void mlx5_modify_lag(struct mlx5_lag *ldev, struct lag_tracker *tracker); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c index 571ea26edd0ca..2381a0eec1900 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c @@ -81,7 +81,8 @@ static int enable_mpesw(struct mlx5_lag *ldev) if (mlx5_eswitch_mode(dev0) != MLX5_ESWITCH_OFFLOADS || !MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table) || !MLX5_CAP_GEN(dev0, create_lag_when_not_master_up) || - !mlx5_lag_check_prereq(ldev)) + !mlx5_lag_check_prereq(ldev) || + !mlx5_lag_shared_fdb_supported(ldev)) return -EOPNOTSUPP; err = mlx5_mpesw_metadata_set(ldev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c index a80ecb672f33d..711d14dea2485 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c @@ -196,6 +196,11 @@ mlx5_chains_create_table(struct mlx5_fs_chains *chains, ns = mlx5_get_flow_namespace(chains->dev, chains->ns); } + if (!ns) { + mlx5_core_warn(chains->dev, "Failed to get flow namespace\n"); + return ERR_PTR(-EOPNOTSUPP); + } + ft_attr.autogroup.num_reserved_entries = 2; ft_attr.autogroup.max_num_groups = chains->group_num; ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h index 0881e961d8b17..586688da9940e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h @@ -10,12 +10,15 @@ struct mlx5_irq; struct cpu_rmap; +struct mlx5_irq_pool; int mlx5_irq_table_init(struct mlx5_core_dev *dev); void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev); int mlx5_irq_table_create(struct mlx5_core_dev *dev); void mlx5_irq_table_destroy(struct mlx5_core_dev *dev); void mlx5_irq_table_free_irqs(struct mlx5_core_dev *dev); +struct mlx5_irq_pool * +mlx5_irq_table_get_comp_irq_pool(struct mlx5_core_dev *dev); int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table); int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table); struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev); @@ -38,7 +41,6 @@ struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq); int mlx5_irq_get_index(struct mlx5_irq *irq); int mlx5_irq_get_irq(const struct mlx5_irq *irq); -struct mlx5_irq_pool; #ifdef CONFIG_MLX5_SF struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev, struct cpumask *used_cpus, u16 vecidx); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c index 7db9cab9bedf6..2c5f850c31f68 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c @@ -378,6 +378,11 @@ int mlx5_irq_get_index(struct mlx5_irq *irq) return irq->map.index; } +struct mlx5_irq_pool *mlx5_irq_get_pool(struct mlx5_irq *irq) +{ + return irq->pool; +} + /* irq_pool API */ /* requesting an irq from a given pool according to given index */ @@ -405,18 +410,20 @@ static struct mlx5_irq_pool *sf_ctrl_irq_pool_get(struct mlx5_irq_table *irq_tab return irq_table->sf_ctrl_pool; } -static struct mlx5_irq_pool *sf_irq_pool_get(struct mlx5_irq_table *irq_table) +static struct mlx5_irq_pool * +sf_comp_irq_pool_get(struct mlx5_irq_table *irq_table) { return irq_table->sf_comp_pool; } -struct mlx5_irq_pool *mlx5_irq_pool_get(struct mlx5_core_dev *dev) +struct mlx5_irq_pool * +mlx5_irq_table_get_comp_irq_pool(struct mlx5_core_dev *dev) { struct mlx5_irq_table *irq_table = mlx5_irq_table_get(dev); struct mlx5_irq_pool *pool = NULL; if (mlx5_core_is_sf(dev)) - pool = sf_irq_pool_get(irq_table); + pool = sf_comp_irq_pool_get(irq_table); /* In some configs, there won't be a pool of SFs IRQs. Hence, returning * the PF IRQs pool in case the SF pool doesn't exist. @@ -572,7 +579,7 @@ irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name, pool->min_threshold = min_threshold * MLX5_EQ_REFS_PER_IRQ; pool->max_threshold = max_threshold * MLX5_EQ_REFS_PER_IRQ; mlx5_core_dbg(dev, "pool->name = %s, pool->size = %d, pool->start = %d", - name, size, start); + name ? name : "mlx5_pcif_pool", size, start); return pool; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h index c4d377f8df308..cc064425fe160 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h @@ -28,7 +28,6 @@ struct mlx5_irq_pool { struct mlx5_core_dev *dev; }; -struct mlx5_irq_pool *mlx5_irq_pool_get(struct mlx5_core_dev *dev); static inline bool mlx5_irq_pool_is_sf_pool(struct mlx5_irq_pool *pool) { return !strncmp("mlx5_sf", pool->name, strlen("mlx5_sf")); @@ -40,5 +39,6 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i, int mlx5_irq_get_locked(struct mlx5_irq *irq); int mlx5_irq_read_locked(struct mlx5_irq *irq); int mlx5_irq_put(struct mlx5_irq *irq); +struct mlx5_irq_pool *mlx5_irq_get_pool(struct mlx5_irq *irq); #endif /* __PCI_IRQ_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h index 4fe8c32d8fbe8..681fb73f00bbf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h @@ -16,8 +16,8 @@ struct mlx5hws_bwc_matcher { struct mlx5hws_matcher *matcher; struct mlx5hws_match_template *mt; struct mlx5hws_action_template *at[MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM]; + u32 priority; u8 num_of_at; - u16 priority; u8 size_log; u32 num_of_rules; /* atomically accessed */ struct list_head *rules; diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index 0c2ba2fa88c46..36802e0a8b570 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -131,9 +131,10 @@ static int mana_gd_detect_devices(struct pci_dev *pdev) struct gdma_list_devices_resp resp = {}; struct gdma_general_req req = {}; struct gdma_dev_id dev; - u32 i, max_num_devs; + int found_dev = 0; u16 dev_type; int err; + u32 i; mana_gd_init_req_hdr(&req.hdr, GDMA_LIST_DEVICES, sizeof(req), sizeof(resp)); @@ -145,12 +146,17 @@ static int mana_gd_detect_devices(struct pci_dev *pdev) return err ? err : -EPROTO; } - max_num_devs = min_t(u32, MAX_NUM_GDMA_DEVICES, resp.num_of_devs); - - for (i = 0; i < max_num_devs; i++) { + for (i = 0; i < GDMA_DEV_LIST_SIZE && + found_dev < resp.num_of_devs; i++) { dev = resp.devs[i]; dev_type = dev.type; + /* Skip empty devices */ + if (dev.as_uint32 == 0) + continue; + + found_dev++; + /* HWC is already detected in mana_hwc_create_channel(). */ if (dev_type == GDMA_DEVICE_HWC) continue; diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index b2d206dec70c8..12c22261dd3a8 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -636,30 +636,16 @@ int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_qu mpc->rxbpre_total = 0; for (i = 0; i < num_rxb; i++) { - if (mpc->rxbpre_alloc_size > PAGE_SIZE) { - va = netdev_alloc_frag(mpc->rxbpre_alloc_size); - if (!va) - goto error; - - page = virt_to_head_page(va); - /* Check if the frag falls back to single page */ - if (compound_order(page) < - get_order(mpc->rxbpre_alloc_size)) { - put_page(page); - goto error; - } - } else { - page = dev_alloc_page(); - if (!page) - goto error; + page = dev_alloc_pages(get_order(mpc->rxbpre_alloc_size)); + if (!page) + goto error; - va = page_to_virt(page); - } + va = page_to_virt(page); da = dma_map_single(dev, va + mpc->rxbpre_headroom, mpc->rxbpre_datasize, DMA_FROM_DEVICE); if (dma_mapping_error(dev, da)) { - put_page(virt_to_head_page(va)); + put_page(page); goto error; } @@ -1618,7 +1604,7 @@ static void mana_rx_skb(void *buf_va, bool from_pool, } static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev, - dma_addr_t *da, bool *from_pool, bool is_napi) + dma_addr_t *da, bool *from_pool) { struct page *page; void *va; @@ -1629,21 +1615,6 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev, if (rxq->xdp_save_va) { va = rxq->xdp_save_va; rxq->xdp_save_va = NULL; - } else if (rxq->alloc_size > PAGE_SIZE) { - if (is_napi) - va = napi_alloc_frag(rxq->alloc_size); - else - va = netdev_alloc_frag(rxq->alloc_size); - - if (!va) - return NULL; - - page = virt_to_head_page(va); - /* Check if the frag falls back to single page */ - if (compound_order(page) < get_order(rxq->alloc_size)) { - put_page(page); - return NULL; - } } else { page = page_pool_dev_alloc_pages(rxq->page_pool); if (!page) @@ -1676,7 +1647,7 @@ static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq, dma_addr_t da; void *va; - va = mana_get_rxfrag(rxq, dev, &da, &from_pool, true); + va = mana_get_rxfrag(rxq, dev, &da, &from_pool); if (!va) return; @@ -2083,7 +2054,7 @@ static int mana_fill_rx_oob(struct mana_recv_buf_oob *rx_oob, u32 mem_key, if (mpc->rxbufs_pre) va = mana_get_rxbuf_pre(rxq, &da); else - va = mana_get_rxfrag(rxq, dev, &da, &from_pool, false); + va = mana_get_rxfrag(rxq, dev, &da, &from_pool); if (!va) return -ENOMEM; @@ -2169,6 +2140,7 @@ static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc) pprm.nid = gc->numa_node; pprm.napi = &rxq->rx_cq.napi; pprm.netdev = rxq->ndev; + pprm.order = get_order(rxq->alloc_size); rxq->page_pool = page_pool_create(&pprm); diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c index 2ec62c8d86e1c..59486fe2ad18c 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c @@ -20,6 +20,8 @@ nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size) struct sk_buff *skb; skb = nfp_app_ctrl_msg_alloc(bpf->app, size, GFP_KERNEL); + if (!skb) + return NULL; skb_put(skb, size); return skb; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index f9dd50152b1e3..28d24d59efb84 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -454,8 +454,10 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter, num_vlans = sriov->num_allowed_vlans; sriov->allowed_vlans = kcalloc(num_vlans, sizeof(u16), GFP_KERNEL); - if (!sriov->allowed_vlans) + if (!sriov->allowed_vlans) { + qlcnic_sriov_free_vlans(adapter); return -ENOMEM; + } vlans = (u16 *)&cmd->rsp.arg[3]; for (i = 0; i < num_vlans; i++) @@ -2167,8 +2169,10 @@ int qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter) vf = &sriov->vf_info[i]; vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans, sizeof(*vf->sriov_vlans), GFP_KERNEL); - if (!vf->sriov_vlans) + if (!vf->sriov_vlans) { + qlcnic_sriov_free_vlans(adapter); return -ENOMEM; + } } return 0; diff --git a/drivers/net/ethernet/realtek/rtase/rtase_main.c b/drivers/net/ethernet/realtek/rtase/rtase_main.c index 14ffd45e9a25a..86dd034fdddc5 100644 --- a/drivers/net/ethernet/realtek/rtase/rtase_main.c +++ b/drivers/net/ethernet/realtek/rtase/rtase_main.c @@ -1501,7 +1501,10 @@ static void rtase_wait_for_quiescence(const struct net_device *dev) static void rtase_sw_reset(struct net_device *dev) { struct rtase_private *tp = netdev_priv(dev); + struct rtase_ring *ring, *tmp; + struct rtase_int_vector *ivec; int ret; + u32 i; netif_stop_queue(dev); netif_carrier_off(dev); @@ -1512,6 +1515,13 @@ static void rtase_sw_reset(struct net_device *dev) rtase_tx_clear(tp); rtase_rx_clear(tp); + for (i = 0; i < tp->int_nums; i++) { + ivec = &tp->int_vector[i]; + list_for_each_entry_safe(ring, tmp, &ivec->ring_list, + ring_entry) + list_del(&ring->ring_entry); + } + ret = rtase_init_ring(dev); if (ret) { netdev_err(dev, "unable to init ring\n"); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c index bfe6e2d631bdf..ab7c2750c1042 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c @@ -11,6 +11,8 @@ #include "dwmac_dma.h" #include "dwmac1000.h" +#define DRIVER_NAME "dwmac-loongson-pci" + /* Normal Loongson Tx Summary */ #define DMA_INTR_ENA_NIE_TX_LOONGSON 0x00040000 /* Normal Loongson Rx Summary */ @@ -516,6 +518,19 @@ static int loongson_dwmac_acpi_config(struct pci_dev *pdev, return 0; } +/* Loongson's DWMAC device may take nearly two seconds to complete DMA reset */ +static int loongson_dwmac_fix_reset(void *priv, void __iomem *ioaddr) +{ + u32 value = readl(ioaddr + DMA_BUS_MODE); + + value |= DMA_BUS_MODE_SFT_RESET; + writel(value, ioaddr + DMA_BUS_MODE); + + return readl_poll_timeout(ioaddr + DMA_BUS_MODE, value, + !(value & DMA_BUS_MODE_SFT_RESET), + 10000, 2000000); +} + static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct plat_stmmacenet_data *plat; @@ -555,7 +570,7 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id for (i = 0; i < PCI_STD_NUM_BARS; i++) { if (pci_resource_len(pdev, i) == 0) continue; - ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev)); + ret = pcim_iomap_regions(pdev, BIT(0), DRIVER_NAME); if (ret) goto err_disable_device; break; @@ -566,6 +581,7 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id plat->bsp_priv = ld; plat->setup = loongson_dwmac_setup; + plat->fix_soc_reset = loongson_dwmac_fix_reset; ld->dev = &pdev->dev; ld->loongson_id = readl(res.addr + GMAC_VERSION) & 0xff; @@ -673,7 +689,7 @@ static const struct pci_device_id loongson_dwmac_id_table[] = { MODULE_DEVICE_TABLE(pci, loongson_dwmac_id_table); static struct pci_driver loongson_dwmac_driver = { - .name = "dwmac-loongson-pci", + .name = DRIVER_NAME, .id_table = loongson_dwmac_id_table, .probe = loongson_dwmac_probe, .remove = loongson_dwmac_remove, diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index 0d5a862cd78a6..3a13d60a947a8 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -99,6 +99,7 @@ config TI_K3_AM65_CPSW_NUSS select NET_DEVLINK select TI_DAVINCI_MDIO select PHYLINK + select PAGE_POOL select TI_K3_CPPI_DESC_POOL imply PHY_TI_GMII_SEL depends on TI_K3_AM65_CPTS || !TI_K3_AM65_CPTS diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 3e090f87f97eb..a21e7c0afbfdc 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -2225,14 +2225,18 @@ static void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common) static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common) { struct device *dev = common->dev; + struct am65_cpsw_tx_chn *tx_chn; int i, ret = 0; for (i = 0; i < common->tx_ch_num; i++) { - struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i]; + tx_chn = &common->tx_chns[i]; hrtimer_init(&tx_chn->tx_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); tx_chn->tx_hrtimer.function = &am65_cpsw_nuss_tx_timer_callback; + netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx, + am65_cpsw_nuss_tx_poll); + ret = devm_request_irq(dev, tx_chn->irq, am65_cpsw_nuss_tx_irq, IRQF_TRIGGER_HIGH, @@ -2242,19 +2246,16 @@ static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common) tx_chn->id, tx_chn->irq, ret); goto err; } - - netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx, - am65_cpsw_nuss_tx_poll); } return 0; err: - for (--i ; i >= 0 ; i--) { - struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i]; - - netif_napi_del(&tx_chn->napi_tx); + netif_napi_del(&tx_chn->napi_tx); + for (--i; i >= 0; i--) { + tx_chn = &common->tx_chns[i]; devm_free_irq(dev, tx_chn->irq, tx_chn); + netif_napi_del(&tx_chn->napi_tx); } return ret; @@ -2488,6 +2489,9 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common) HRTIMER_MODE_REL_PINNED); flow->rx_hrtimer.function = &am65_cpsw_nuss_rx_timer_callback; + netif_napi_add(common->dma_ndev, &flow->napi_rx, + am65_cpsw_nuss_rx_poll); + ret = devm_request_irq(dev, flow->irq, am65_cpsw_nuss_rx_irq, IRQF_TRIGGER_HIGH, @@ -2496,11 +2500,8 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common) dev_err(dev, "failure requesting rx %d irq %u, %d\n", i, flow->irq, ret); flow->irq = -EINVAL; - goto err_flow; + goto err_request_irq; } - - netif_napi_add(common->dma_ndev, &flow->napi_rx, - am65_cpsw_nuss_rx_poll); } /* setup classifier to route priorities to flows */ @@ -2508,11 +2509,14 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common) return 0; +err_request_irq: + netif_napi_del(&flow->napi_rx); + err_flow: - for (--i; i >= 0 ; i--) { + for (--i; i >= 0; i--) { flow = &rx_chn->flows[i]; - netif_napi_del(&flow->napi_rx); devm_free_irq(dev, flow->irq, flow); + netif_napi_del(&flow->napi_rx); } err: @@ -2676,7 +2680,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) of_property_read_bool(port_np, "ti,mac-only"); /* get phy/link info */ - port->slave.port_np = port_np; + port->slave.port_np = of_node_get(port_np); ret = of_get_phy_mode(port_np, &port->slave.phy_if); if (ret) { dev_err(dev, "%pOF read phy-mode err %d\n", @@ -2737,6 +2741,17 @@ static void am65_cpsw_nuss_phylink_cleanup(struct am65_cpsw_common *common) } } +static void am65_cpsw_remove_dt(struct am65_cpsw_common *common) +{ + struct am65_cpsw_port *port; + int i; + + for (i = 0; i < common->port_num; i++) { + port = &common->ports[i]; + of_node_put(port->slave.port_np); + } +} + static int am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx) { @@ -3643,6 +3658,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev) am65_cpsw_nuss_cleanup_ndev(common); am65_cpsw_nuss_phylink_cleanup(common); am65_cpts_release(common->cpts); + am65_cpsw_remove_dt(common); err_of_clear: if (common->mdio_dev) of_platform_device_destroy(common->mdio_dev, NULL); @@ -3682,6 +3698,7 @@ static void am65_cpsw_nuss_remove(struct platform_device *pdev) am65_cpsw_nuss_phylink_cleanup(common); am65_cpts_release(common->cpts); am65_cpsw_disable_serdes_phy(common); + am65_cpsw_remove_dt(common); if (common->mdio_dev) of_platform_device_destroy(common->mdio_dev, NULL); diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c index 768578c0d9587..2a1c43316f462 100644 --- a/drivers/net/ethernet/ti/icssg/icss_iep.c +++ b/drivers/net/ethernet/ti/icssg/icss_iep.c @@ -406,66 +406,79 @@ static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns) static int icss_iep_perout_enable_hw(struct icss_iep *iep, struct ptp_perout_request *req, int on) { + struct timespec64 ts; + u64 ns_start; + u64 ns_width; int ret; u64 cmp; + if (!on) { + /* Disable CMP 1 */ + regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG, + IEP_CMP_CFG_CMP_EN(1), 0); + + /* clear CMP regs */ + regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0); + if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) + regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0); + + /* Disable sync */ + regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0); + + return 0; + } + + /* Calculate width of the signal for PPS/PEROUT handling */ + ts.tv_sec = req->on.sec; + ts.tv_nsec = req->on.nsec; + ns_width = timespec64_to_ns(&ts); + + if (req->flags & PTP_PEROUT_PHASE) { + ts.tv_sec = req->phase.sec; + ts.tv_nsec = req->phase.nsec; + ns_start = timespec64_to_ns(&ts); + } else { + ns_start = 0; + } + if (iep->ops && iep->ops->perout_enable) { ret = iep->ops->perout_enable(iep->clockops_data, req, on, &cmp); if (ret) return ret; - if (on) { - /* Configure CMP */ - regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(cmp)); - if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) - regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(cmp)); - /* Configure SYNC, 1ms pulse width */ - regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, 1000000); - regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0); - regmap_write(iep->map, ICSS_IEP_SYNC_START_REG, 0); - regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0); /* one-shot mode */ - /* Enable CMP 1 */ - regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG, - IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1)); - } else { - /* Disable CMP 1 */ - regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG, - IEP_CMP_CFG_CMP_EN(1), 0); - - /* clear regs */ - regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0); - if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) - regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0); - } + /* Configure CMP */ + regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(cmp)); + if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) + regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(cmp)); + /* Configure SYNC, based on req on width */ + regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, + div_u64(ns_width, iep->def_inc)); + regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0); + regmap_write(iep->map, ICSS_IEP_SYNC_START_REG, + div_u64(ns_start, iep->def_inc)); + regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0); /* one-shot mode */ + /* Enable CMP 1 */ + regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG, + IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1)); } else { - if (on) { - u64 start_ns; - - iep->period = ((u64)req->period.sec * NSEC_PER_SEC) + - req->period.nsec; - start_ns = ((u64)req->period.sec * NSEC_PER_SEC) - + req->period.nsec; - icss_iep_update_to_next_boundary(iep, start_ns); - - /* Enable Sync in single shot mode */ - regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, - IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN); - /* Enable CMP 1 */ - regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG, - IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1)); - } else { - /* Disable CMP 1 */ - regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG, - IEP_CMP_CFG_CMP_EN(1), 0); - - /* clear CMP regs */ - regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0); - if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) - regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0); - - /* Disable sync */ - regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0); - } + u64 start_ns; + + iep->period = ((u64)req->period.sec * NSEC_PER_SEC) + + req->period.nsec; + start_ns = ((u64)req->period.sec * NSEC_PER_SEC) + + req->period.nsec; + icss_iep_update_to_next_boundary(iep, start_ns); + + regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, + div_u64(ns_width, iep->def_inc)); + regmap_write(iep->map, ICSS_IEP_SYNC_START_REG, + div_u64(ns_start, iep->def_inc)); + /* Enable Sync in single shot mode */ + regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, + IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN); + /* Enable CMP 1 */ + regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG, + IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1)); } return 0; @@ -476,6 +489,21 @@ static int icss_iep_perout_enable(struct icss_iep *iep, { int ret = 0; + if (!on) + goto disable; + + /* Reject requests with unsupported flags */ + if (req->flags & ~(PTP_PEROUT_DUTY_CYCLE | + PTP_PEROUT_PHASE)) + return -EOPNOTSUPP; + + /* Set default "on" time (1ms) for the signal if not passed by the app */ + if (!(req->flags & PTP_PEROUT_DUTY_CYCLE)) { + req->on.sec = 0; + req->on.nsec = NSEC_PER_MSEC; + } + +disable: mutex_lock(&iep->ptp_clk_mutex); if (iep->pps_enabled) { @@ -568,10 +596,13 @@ static int icss_iep_pps_enable(struct icss_iep *iep, int on) if (on) { ns = icss_iep_gettime(iep, NULL); ts = ns_to_timespec64(ns); + rq.perout.flags = 0; rq.perout.period.sec = 1; rq.perout.period.nsec = 0; rq.perout.start.sec = ts.tv_sec + 2; rq.perout.start.nsec = 0; + rq.perout.on.sec = 0; + rq.perout.on.nsec = NSEC_PER_MSEC; ret = icss_iep_perout_enable_hw(iep, &rq.perout, on); } else { ret = icss_iep_perout_enable_hw(iep, &rq.perout, on); diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c index cb11635a8d120..6f0700d156e71 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c @@ -1555,6 +1555,7 @@ static int prueth_probe(struct platform_device *pdev) } spin_lock_init(&prueth->vtbl_lock); + spin_lock_init(&prueth->stats_lock); /* setup netdev interfaces */ if (eth0_node) { ret = prueth_netdev_init(prueth, eth0_node); diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h index 5473315ea2040..e456a11c5d4e3 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h @@ -297,6 +297,8 @@ struct prueth { int default_vlan; /** @vtbl_lock: Lock for vtbl in shared memory */ spinlock_t vtbl_lock; + /** @stats_lock: Lock for reading icssg stats */ + spinlock_t stats_lock; }; struct emac_tx_ts_response { diff --git a/drivers/net/ethernet/ti/icssg/icssg_stats.c b/drivers/net/ethernet/ti/icssg/icssg_stats.c index 8800bd3a8d074..6f0edae38ea24 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_stats.c +++ b/drivers/net/ethernet/ti/icssg/icssg_stats.c @@ -26,6 +26,8 @@ void emac_update_hardware_stats(struct prueth_emac *emac) u32 val, reg; int i; + spin_lock(&prueth->stats_lock); + for (i = 0; i < ARRAY_SIZE(icssg_all_miig_stats); i++) { regmap_read(prueth->miig_rt, base + icssg_all_miig_stats[i].offset, @@ -51,6 +53,8 @@ void emac_update_hardware_stats(struct prueth_emac *emac) emac->pa_stats[i] += val; } } + + spin_unlock(&prueth->stats_lock); } void icssg_stats_work_handler(struct work_struct *work) diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c index 2b3d6586f44a5..71c891d14fb62 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c @@ -309,7 +309,8 @@ static bool wx_alloc_mapped_page(struct wx_ring *rx_ring, return true; page = page_pool_dev_alloc_pages(rx_ring->page_pool); - WARN_ON(!page); + if (unlikely(!page)) + return false; dma = page_pool_get_dma_addr(page); bi->page_dma = dma; diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c index 53aeae2f884b0..1be2a5cc4a83c 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c @@ -607,7 +607,7 @@ static int ngbe_probe(struct pci_dev *pdev, /* setup the private structure */ err = ngbe_sw_init(wx); if (err) - goto err_free_mac_table; + goto err_pci_release_regions; /* check if flash load is done after hw power up */ err = wx_check_flash_load(wx, NGBE_SPI_ILDR_STATUS_PERST); @@ -701,6 +701,7 @@ static int ngbe_probe(struct pci_dev *pdev, err_clear_interrupt_scheme: wx_clear_interrupt_scheme(wx); err_free_mac_table: + kfree(wx->rss_key); kfree(wx->mac_table); err_pci_release_regions: pci_release_selected_regions(pdev, diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index f774502680364..7e352837184fa 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -559,7 +559,7 @@ static int txgbe_probe(struct pci_dev *pdev, /* setup the private structure */ err = txgbe_sw_init(wx); if (err) - goto err_free_mac_table; + goto err_pci_release_regions; /* check if flash load is done after hw power up */ err = wx_check_flash_load(wx, TXGBE_SPI_ILDR_STATUS_PERST); @@ -717,6 +717,7 @@ static int txgbe_probe(struct pci_dev *pdev, wx_clear_interrupt_scheme(wx); wx_control_hw(wx, false); err_free_mac_table: + kfree(wx->rss_key); kfree(wx->mac_table); err_pci_release_regions: pci_release_selected_regions(pdev, diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index de10a2d08c428..fe3438abcd253 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -2888,6 +2888,7 @@ static int axienet_probe(struct platform_device *pdev) lp->phylink_config.dev = &ndev->dev; lp->phylink_config.type = PHYLINK_NETDEV; + lp->phylink_config.mac_managed_pm = true; lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | MAC_10FD | MAC_100FD | MAC_1000FD; diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index ba15a0a4ce629..963fb9261f017 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -1902,21 +1902,9 @@ static void geneve_destroy_tunnels(struct net *net, struct list_head *head) { struct geneve_net *gn = net_generic(net, geneve_net_id); struct geneve_dev *geneve, *next; - struct net_device *dev, *aux; - /* gather any geneve devices that were moved into this ns */ - for_each_netdev_safe(net, dev, aux) - if (dev->rtnl_link_ops == &geneve_link_ops) - unregister_netdevice_queue(dev, head); - - /* now gather any other geneve devices that were created in this ns */ - list_for_each_entry_safe(geneve, next, &gn->geneve_list, next) { - /* If geneve->dev is in the same netns, it was already added - * to the list by the previous loop. - */ - if (!net_eq(dev_net(geneve->dev), net)) - unregister_netdevice_queue(geneve->dev, head); - } + list_for_each_entry_safe(geneve, next, &gn->geneve_list, next) + geneve_dellink(geneve->dev, head); } static void __net_exit geneve_exit_batch_rtnl(struct list_head *net_list, diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index 47406ce990161..33b78b4007fe7 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -2487,11 +2487,6 @@ static void __net_exit gtp_net_exit_batch_rtnl(struct list_head *net_list, list_for_each_entry(net, net_list, exit_list) { struct gtp_net *gn = net_generic(net, gtp_net_id); struct gtp_dev *gtp, *gtp_next; - struct net_device *dev; - - for_each_netdev(net, dev) - if (dev->rtnl_link_ops == >p_link_ops) - gtp_dellink(dev, dev_to_kill); list_for_each_entry_safe(gtp, gtp_next, &gn->gtp_dev_list, list) gtp_dellink(gtp->dev, dev_to_kill); diff --git a/drivers/net/ipa/data/ipa_data-v4.7.c b/drivers/net/ipa/data/ipa_data-v4.7.c index c8c23d9be961b..41f212209993f 100644 --- a/drivers/net/ipa/data/ipa_data-v4.7.c +++ b/drivers/net/ipa/data/ipa_data-v4.7.c @@ -28,20 +28,18 @@ enum ipa_resource_type { enum ipa_rsrc_group_id { /* Source resource group identifiers */ IPA_RSRC_GROUP_SRC_UL_DL = 0, - IPA_RSRC_GROUP_SRC_UC_RX_Q, IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */ /* Destination resource group identifiers */ - IPA_RSRC_GROUP_DST_UL_DL_DPL = 0, - IPA_RSRC_GROUP_DST_UNUSED_1, + IPA_RSRC_GROUP_DST_UL_DL = 0, IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */ }; /* QSB configuration data for an SoC having IPA v4.7 */ static const struct ipa_qsb_data ipa_qsb_data[] = { [IPA_QSB_MASTER_DDR] = { - .max_writes = 8, - .max_reads = 0, /* no limit (hardware max) */ + .max_writes = 12, + .max_reads = 13, .max_reads_beats = 120, }, }; @@ -81,7 +79,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { }, .endpoint = { .config = { - .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL, + .resource_group = IPA_RSRC_GROUP_DST_UL_DL, .aggregation = true, .status_enable = true, .rx = { @@ -106,6 +104,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .filter_support = true, .config = { .resource_group = IPA_RSRC_GROUP_SRC_UL_DL, + .checksum = true, .qmap = true, .status_enable = true, .tx = { @@ -128,7 +127,8 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { }, .endpoint = { .config = { - .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL, + .resource_group = IPA_RSRC_GROUP_DST_UL_DL, + .checksum = true, .qmap = true, .aggregation = true, .rx = { @@ -197,12 +197,12 @@ static const struct ipa_resource ipa_resource_src[] = { /* Destination resource configuration data for an SoC having IPA v4.7 */ static const struct ipa_resource ipa_resource_dst[] = { [IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = { - .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = { + .limits[IPA_RSRC_GROUP_DST_UL_DL] = { .min = 7, .max = 7, }, }, [IPA_RESOURCE_TYPE_DST_DPS_DMARS] = { - .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = { + .limits[IPA_RSRC_GROUP_DST_UL_DL] = { .min = 2, .max = 2, }, }, diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index b1afcb8740de1..ca62188a317ad 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -3,6 +3,7 @@ */ #include +#include #include "ipvlan.h" @@ -415,20 +416,25 @@ struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h, static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb) { - const struct iphdr *ip4h = ip_hdr(skb); struct net_device *dev = skb->dev; struct net *net = dev_net(dev); - struct rtable *rt; int err, ret = NET_XMIT_DROP; + const struct iphdr *ip4h; + struct rtable *rt; struct flowi4 fl4 = { .flowi4_oif = dev->ifindex, - .flowi4_tos = ip4h->tos & INET_DSCP_MASK, .flowi4_flags = FLOWI_FLAG_ANYSRC, .flowi4_mark = skb->mark, - .daddr = ip4h->daddr, - .saddr = ip4h->saddr, }; + if (!pskb_network_may_pull(skb, sizeof(struct iphdr))) + goto err; + + ip4h = ip_hdr(skb); + fl4.daddr = ip4h->daddr; + fl4.saddr = ip4h->saddr; + fl4.flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(ip4h)); + rt = ip_route_output_flow(net, &fl4, NULL); if (IS_ERR(rt)) goto err; @@ -487,6 +493,12 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb) struct net_device *dev = skb->dev; int err, ret = NET_XMIT_DROP; + if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr))) { + DEV_STATS_INC(dev, tx_errors); + kfree_skb(skb); + return ret; + } + err = ipvlan_route_v6_outbound(dev, skb); if (unlikely(err)) { DEV_STATS_INC(dev, tx_errors); diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 1993b90b1a5f9..491e56b3263fd 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -244,8 +244,22 @@ static netdev_tx_t blackhole_netdev_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } +static int blackhole_neigh_output(struct neighbour *n, struct sk_buff *skb) +{ + kfree_skb(skb); + return 0; +} + +static int blackhole_neigh_construct(struct net_device *dev, + struct neighbour *n) +{ + n->output = blackhole_neigh_output; + return 0; +} + static const struct net_device_ops blackhole_netdev_ops = { .ndo_start_xmit = blackhole_netdev_xmit, + .ndo_neigh_construct = blackhole_neigh_construct, }; /* This is a dst-dummy device used specifically for invalidated diff --git a/drivers/net/mctp/mctp-i2c.c b/drivers/net/mctp/mctp-i2c.c index e70fb66879941..6622de48fc9e7 100644 --- a/drivers/net/mctp/mctp-i2c.c +++ b/drivers/net/mctp/mctp-i2c.c @@ -584,6 +584,7 @@ static int mctp_i2c_header_create(struct sk_buff *skb, struct net_device *dev, struct mctp_i2c_hdr *hdr; struct mctp_hdr *mhdr; u8 lldst, llsrc; + int rc; if (len > MCTP_I2C_MAXMTU) return -EMSGSIZE; @@ -594,6 +595,10 @@ static int mctp_i2c_header_create(struct sk_buff *skb, struct net_device *dev, lldst = *((u8 *)daddr); llsrc = *((u8 *)saddr); + rc = skb_cow_head(skb, sizeof(struct mctp_i2c_hdr)); + if (rc) + return rc; + skb_push(skb, sizeof(struct mctp_i2c_hdr)); skb_reset_mac_header(skb); hdr = (void *)skb_mac_header(skb); diff --git a/drivers/net/mctp/mctp-i3c.c b/drivers/net/mctp/mctp-i3c.c index ee9d562f0817c..47513ebbc6807 100644 --- a/drivers/net/mctp/mctp-i3c.c +++ b/drivers/net/mctp/mctp-i3c.c @@ -506,6 +506,14 @@ static int mctp_i3c_header_create(struct sk_buff *skb, struct net_device *dev, const void *saddr, unsigned int len) { struct mctp_i3c_internal_hdr *ihdr; + int rc; + + if (!daddr || !saddr) + return -EINVAL; + + rc = skb_cow_head(skb, sizeof(struct mctp_i3c_internal_hdr)); + if (rc) + return rc; skb_push(skb, sizeof(struct mctp_i3c_internal_hdr)); skb_reset_mac_header(skb); diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index ddded162c44c1..d2a9cf3fde5ac 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -859,7 +859,7 @@ static int brcm_fet_config_init(struct phy_device *phydev) return reg; /* Unmask events we are interested in and mask interrupts globally. */ - if (phydev->phy_id == PHY_ID_BCM5221) + if (phydev->drv->phy_id == PHY_ID_BCM5221) reg = MII_BRCM_FET_IR_ENABLE | MII_BRCM_FET_IR_MASK; else @@ -888,7 +888,7 @@ static int brcm_fet_config_init(struct phy_device *phydev) return err; } - if (phydev->phy_id != PHY_ID_BCM5221) { + if (phydev->drv->phy_id != PHY_ID_BCM5221) { /* Set the LED mode */ reg = __phy_read(phydev, MII_BRCM_FET_SHDW_AUXMODE4); if (reg < 0) { @@ -1009,7 +1009,7 @@ static int brcm_fet_suspend(struct phy_device *phydev) return err; } - if (phydev->phy_id == PHY_ID_BCM5221) + if (phydev->drv->phy_id == PHY_ID_BCM5221) /* Force Low Power Mode with clock enabled */ reg = BCM5221_SHDW_AM4_EN_CLK_LPM | BCM5221_SHDW_AM4_FORCE_LPM; else diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c index ae43103c76cbd..9788b820c6be7 100644 --- a/drivers/net/phy/nxp-c45-tja11xx.c +++ b/drivers/net/phy/nxp-c45-tja11xx.c @@ -21,6 +21,11 @@ #define PHY_ID_TJA_1103 0x001BB010 #define PHY_ID_TJA_1120 0x001BB031 +#define VEND1_DEVICE_ID3 0x0004 +#define TJA1120_DEV_ID3_SILICON_VERSION GENMASK(15, 12) +#define TJA1120_DEV_ID3_SAMPLE_TYPE GENMASK(11, 8) +#define DEVICE_ID3_SAMPLE_TYPE_R 0x9 + #define VEND1_DEVICE_CONTROL 0x0040 #define DEVICE_CONTROL_RESET BIT(15) #define DEVICE_CONTROL_CONFIG_GLOBAL_EN BIT(14) @@ -108,6 +113,9 @@ #define MII_BASIC_CONFIG_RMII 0x5 #define MII_BASIC_CONFIG_MII 0x4 +#define VEND1_SGMII_BASIC_CONTROL 0xB000 +#define SGMII_LPM BIT(11) + #define VEND1_SYMBOL_ERROR_CNT_XTD 0x8351 #define EXTENDED_CNT_EN BIT(15) #define VEND1_MONITOR_STATUS 0xAC80 @@ -1583,6 +1591,63 @@ static int nxp_c45_set_phy_mode(struct phy_device *phydev) return 0; } +/* Errata: ES_TJA1120 and ES_TJA1121 Rev. 1.0 — 28 November 2024 Section 3.1 & 3.2 */ +static void nxp_c45_tja1120_errata(struct phy_device *phydev) +{ + bool macsec_ability, sgmii_ability; + int silicon_version, sample_type; + int phy_abilities; + int ret = 0; + + ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_ID3); + if (ret < 0) + return; + + sample_type = FIELD_GET(TJA1120_DEV_ID3_SAMPLE_TYPE, ret); + if (sample_type != DEVICE_ID3_SAMPLE_TYPE_R) + return; + + silicon_version = FIELD_GET(TJA1120_DEV_ID3_SILICON_VERSION, ret); + + phy_abilities = phy_read_mmd(phydev, MDIO_MMD_VEND1, + VEND1_PORT_ABILITIES); + macsec_ability = !!(phy_abilities & MACSEC_ABILITY); + sgmii_ability = !!(phy_abilities & SGMII_ABILITY); + if ((!macsec_ability && silicon_version == 2) || + (macsec_ability && silicon_version == 1)) { + /* TJA1120/TJA1121 PHY configuration errata workaround. + * Apply PHY writes sequence before link up. + */ + if (!macsec_ability) { + phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 0x4b95); + phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 0xf3cd); + } else { + phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 0x89c7); + phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 0x0893); + } + + phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x0476, 0x58a0); + + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, 0x8921, 0xa3a); + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, 0x89F1, 0x16c1); + + phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 0x0); + phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 0x0); + + if (sgmii_ability) { + /* TJA1120B/TJA1121B SGMII PCS restart errata workaround. + * Put SGMII PCS into power down mode and back up. + */ + phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, + VEND1_SGMII_BASIC_CONTROL, + SGMII_LPM); + phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, + VEND1_SGMII_BASIC_CONTROL, + SGMII_LPM); + } + } +} + static int nxp_c45_config_init(struct phy_device *phydev) { int ret; @@ -1599,6 +1664,9 @@ static int nxp_c45_config_init(struct phy_device *phydev) phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 1); phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 2); + if (phy_id_compare(phydev->phy_id, PHY_ID_TJA_1120, GENMASK(31, 4))) + nxp_c45_tja1120_errata(phydev); + phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONFIG, PHY_CONFIG_AUTO); diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 4f3e742907cb6..c9cfdc33fc5f1 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -615,6 +615,49 @@ int phy_ethtool_get_stats(struct phy_device *phydev, } EXPORT_SYMBOL(phy_ethtool_get_stats); +/** + * __phy_ethtool_get_phy_stats - Retrieve standardized PHY statistics + * @phydev: Pointer to the PHY device + * @phy_stats: Pointer to ethtool_eth_phy_stats structure + * @phydev_stats: Pointer to ethtool_phy_stats structure + * + * Fetches PHY statistics using a kernel-defined interface for consistent + * diagnostics. Unlike phy_ethtool_get_stats(), which allows custom stats, + * this function enforces a standardized format for better interoperability. + */ +void __phy_ethtool_get_phy_stats(struct phy_device *phydev, + struct ethtool_eth_phy_stats *phy_stats, + struct ethtool_phy_stats *phydev_stats) +{ + if (!phydev->drv || !phydev->drv->get_phy_stats) + return; + + mutex_lock(&phydev->lock); + phydev->drv->get_phy_stats(phydev, phy_stats, phydev_stats); + mutex_unlock(&phydev->lock); +} + +/** + * __phy_ethtool_get_link_ext_stats - Retrieve extended link statistics for a PHY + * @phydev: Pointer to the PHY device + * @link_stats: Pointer to the structure to store extended link statistics + * + * Populates the ethtool_link_ext_stats structure with link down event counts + * and additional driver-specific link statistics, if available. + */ +void __phy_ethtool_get_link_ext_stats(struct phy_device *phydev, + struct ethtool_link_ext_stats *link_stats) +{ + link_stats->link_down_events = READ_ONCE(phydev->link_down_events); + + if (!phydev->drv || !phydev->drv->get_link_stats) + return; + + mutex_lock(&phydev->lock); + phydev->drv->get_link_stats(phydev, link_stats); + mutex_unlock(&phydev->lock); +} + /** * phy_ethtool_get_plca_cfg - Get PLCA RS configuration * @phydev: the phy_device struct diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 499797646580e..8af44224480f1 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -289,6 +289,46 @@ static bool phy_drv_wol_enabled(struct phy_device *phydev) return wol.wolopts != 0; } +static void phy_link_change(struct phy_device *phydev, bool up) +{ + struct net_device *netdev = phydev->attached_dev; + + if (up) + netif_carrier_on(netdev); + else + netif_carrier_off(netdev); + phydev->adjust_link(netdev); + if (phydev->mii_ts && phydev->mii_ts->link_state) + phydev->mii_ts->link_state(phydev->mii_ts, phydev); +} + +/** + * phy_uses_state_machine - test whether consumer driver uses PAL state machine + * @phydev: the target PHY device structure + * + * Ultimately, this aims to indirectly determine whether the PHY is attached + * to a consumer which uses the state machine by calling phy_start() and + * phy_stop(). + * + * When the PHY driver consumer uses phylib, it must have previously called + * phy_connect_direct() or one of its derivatives, so that phy_prepare_link() + * has set up a hook for monitoring state changes. + * + * When the PHY driver is used by the MAC driver consumer through phylink (the + * only other provider of a phy_link_change() method), using the PHY state + * machine is not optional. + * + * Return: true if consumer calls phy_start() and phy_stop(), false otherwise. + */ +static bool phy_uses_state_machine(struct phy_device *phydev) +{ + if (phydev->phy_link_change == phy_link_change) + return phydev->attached_dev && phydev->adjust_link; + + /* phydev->phy_link_change is implicitly phylink_phy_change() */ + return true; +} + static bool mdio_bus_phy_may_suspend(struct phy_device *phydev) { struct device_driver *drv = phydev->mdio.dev.driver; @@ -355,7 +395,7 @@ static __maybe_unused int mdio_bus_phy_suspend(struct device *dev) * may call phy routines that try to grab the same lock, and that may * lead to a deadlock. */ - if (phydev->attached_dev && phydev->adjust_link) + if (phy_uses_state_machine(phydev)) phy_stop_machine(phydev); if (!mdio_bus_phy_may_suspend(phydev)) @@ -409,7 +449,7 @@ static __maybe_unused int mdio_bus_phy_resume(struct device *dev) } } - if (phydev->attached_dev && phydev->adjust_link) + if (phy_uses_state_machine(phydev)) phy_start_machine(phydev); return 0; @@ -1101,19 +1141,6 @@ struct phy_device *phy_find_first(struct mii_bus *bus) } EXPORT_SYMBOL(phy_find_first); -static void phy_link_change(struct phy_device *phydev, bool up) -{ - struct net_device *netdev = phydev->attached_dev; - - if (up) - netif_carrier_on(netdev); - else - netif_carrier_off(netdev); - phydev->adjust_link(netdev); - if (phydev->mii_ts && phydev->mii_ts->link_state) - phydev->mii_ts->link_state(phydev->mii_ts, phydev); -} - /** * phy_prepare_link - prepares the PHY layer to monitor link status * @phydev: target phy_device struct @@ -3776,6 +3803,8 @@ static const struct ethtool_phy_ops phy_ethtool_phy_ops = { static const struct phylib_stubs __phylib_stubs = { .hwtstamp_get = __phy_hwtstamp_get, .hwtstamp_set = __phy_hwtstamp_set, + .get_phy_stats = __phy_ethtool_get_phy_stats, + .get_link_ext_stats = __phy_ethtool_get_link_ext_stats, }; static void phylib_register_stubs(void) diff --git a/drivers/net/phy/phy_link_topology.c b/drivers/net/phy/phy_link_topology.c index 4a5d73002a1a8..0e9e987f37dd8 100644 --- a/drivers/net/phy/phy_link_topology.c +++ b/drivers/net/phy/phy_link_topology.c @@ -73,7 +73,7 @@ int phy_link_topo_add_phy(struct net_device *dev, xa_limit_32b, &topo->next_phy_index, GFP_KERNEL); - if (ret) + if (ret < 0) goto err; return 0; diff --git a/drivers/net/phy/qcom/qca807x.c b/drivers/net/phy/qcom/qca807x.c index bd8a51ec0ecd6..ec336c3e338d6 100644 --- a/drivers/net/phy/qcom/qca807x.c +++ b/drivers/net/phy/qcom/qca807x.c @@ -774,7 +774,7 @@ static int qca807x_config_init(struct phy_device *phydev) control_dac &= ~QCA807X_CONTROL_DAC_MASK; if (!priv->dac_full_amplitude) control_dac |= QCA807X_CONTROL_DAC_DSP_AMPLITUDE; - if (!priv->dac_full_amplitude) + if (!priv->dac_full_bias_current) control_dac |= QCA807X_CONTROL_DAC_DSP_BIAS_CURRENT; if (!priv->dac_disable_bias_current_tweak) control_dac |= QCA807X_CONTROL_DAC_BIAS_CURRENT_TWEAK; diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index dcec92625cf65..7b33993f7001e 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -385,7 +385,7 @@ static void sfp_fixup_rollball(struct sfp *sfp) sfp->phy_t_retry = msecs_to_jiffies(1000); } -static void sfp_fixup_fs_2_5gt(struct sfp *sfp) +static void sfp_fixup_rollball_wait4s(struct sfp *sfp) { sfp_fixup_rollball(sfp); @@ -399,7 +399,7 @@ static void sfp_fixup_fs_2_5gt(struct sfp *sfp) static void sfp_fixup_fs_10gt(struct sfp *sfp) { sfp_fixup_10gbaset_30m(sfp); - sfp_fixup_fs_2_5gt(sfp); + sfp_fixup_rollball_wait4s(sfp); } static void sfp_fixup_halny_gsfp(struct sfp *sfp) @@ -479,9 +479,10 @@ static const struct sfp_quirk sfp_quirks[] = { // PHY. SFP_QUIRK_F("FS", "SFP-10G-T", sfp_fixup_fs_10gt), - // Fiberstore SFP-2.5G-T uses Rollball protocol to talk to the PHY and - // needs 4 sec wait before probing the PHY. - SFP_QUIRK_F("FS", "SFP-2.5G-T", sfp_fixup_fs_2_5gt), + // Fiberstore SFP-2.5G-T and SFP-10GM-T uses Rollball protocol to talk + // to the PHY and needs 4 sec wait before probing the PHY. + SFP_QUIRK_F("FS", "SFP-2.5G-T", sfp_fixup_rollball_wait4s), + SFP_QUIRK_F("FS", "SFP-10GM-T", sfp_fixup_rollball_wait4s), // Fiberstore GPON-ONU-34-20BI can operate at 2500base-X, but report 1.2GBd // NRZ in their EEPROM @@ -515,6 +516,8 @@ static const struct sfp_quirk sfp_quirks[] = { SFP_QUIRK_F("OEM", "SFP-10G-T", sfp_fixup_rollball_cc), SFP_QUIRK_M("OEM", "SFP-2.5G-T", sfp_quirk_oem_2_5g), + SFP_QUIRK_M("OEM", "SFP-2.5G-BX10-D", sfp_quirk_2500basex), + SFP_QUIRK_M("OEM", "SFP-2.5G-BX10-U", sfp_quirk_2500basex), SFP_QUIRK_F("OEM", "RTSFP-10", sfp_fixup_rollball_cc), SFP_QUIRK_F("OEM", "RTSFP-10G", sfp_fixup_rollball_cc), SFP_QUIRK_F("Turris", "RTSFP-2.5G", sfp_fixup_rollball), diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 4583e15ad03a0..1420c4efa48e6 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -72,6 +72,17 @@ #define PPP_PROTO_LEN 2 #define PPP_LCP_HDRLEN 4 +/* The filter instructions generated by libpcap are constructed + * assuming a four-byte PPP header on each packet, where the last + * 2 bytes are the protocol field defined in the RFC and the first + * byte of the first 2 bytes indicates the direction. + * The second byte is currently unused, but we still need to initialize + * it to prevent crafted BPF programs from reading them which would + * cause reading of uninitialized data. + */ +#define PPP_FILTER_OUTBOUND_TAG 0x0100 +#define PPP_FILTER_INBOUND_TAG 0x0000 + /* * An instance of /dev/ppp can be associated with either a ppp * interface unit or a ppp channel. In both cases, file->private_data @@ -1762,10 +1773,10 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb) if (proto < 0x8000) { #ifdef CONFIG_PPP_FILTER - /* check if we should pass this packet */ - /* the filter instructions are constructed assuming - a four-byte PPP header on each packet */ - *(u8 *)skb_push(skb, 2) = 1; + /* check if the packet passes the pass and active filters. + * See comment for PPP_FILTER_OUTBOUND_TAG above. + */ + *(__be16 *)skb_push(skb, 2) = htons(PPP_FILTER_OUTBOUND_TAG); if (ppp->pass_filter && bpf_prog_run(ppp->pass_filter, skb) == 0) { if (ppp->debug & 1) @@ -2482,14 +2493,13 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) /* network protocol frame - give it to the kernel */ #ifdef CONFIG_PPP_FILTER - /* check if the packet passes the pass and active filters */ - /* the filter instructions are constructed assuming - a four-byte PPP header on each packet */ if (ppp->pass_filter || ppp->active_filter) { if (skb_unclone(skb, GFP_ATOMIC)) goto err; - - *(u8 *)skb_push(skb, 2) = 0; + /* Check if the packet passes the pass and active filters. + * See comment for PPP_FILTER_INBOUND_TAG above. + */ + *(__be16 *)skb_push(skb, 2) = htons(PPP_FILTER_INBOUND_TAG); if (ppp->pass_filter && bpf_prog_run(ppp->pass_filter, skb) == 0) { if (ppp->debug & 1) diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c index 644e99fc3623f..9c4932198931f 100644 --- a/drivers/net/ppp/ppp_synctty.c +++ b/drivers/net/ppp/ppp_synctty.c @@ -506,6 +506,11 @@ ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *skb) unsigned char *data; int islcp; + /* Ensure we can safely access protocol field and LCP code */ + if (!pskb_may_pull(skb, 3)) { + kfree_skb(skb); + return NULL; + } data = skb->data; proto = get_unaligned_be16(data); diff --git a/drivers/net/pse-pd/pd692x0.c b/drivers/net/pse-pd/pd692x0.c index 0af7db80b2f88..7cfc36cadb576 100644 --- a/drivers/net/pse-pd/pd692x0.c +++ b/drivers/net/pse-pd/pd692x0.c @@ -999,13 +999,12 @@ static int pd692x0_pi_get_voltage(struct pse_controller_dev *pcdev, int id) return (buf.sub[0] << 8 | buf.sub[1]) * 100000; } -static int pd692x0_pi_get_current_limit(struct pse_controller_dev *pcdev, - int id) +static int pd692x0_pi_get_pw_limit(struct pse_controller_dev *pcdev, + int id) { struct pd692x0_priv *priv = to_pd692x0_priv(pcdev); struct pd692x0_msg msg, buf = {0}; - int mW, uV, uA, ret; - s64 tmp_64; + int ret; msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_PARAM]; msg.sub[2] = id; @@ -1013,48 +1012,24 @@ static int pd692x0_pi_get_current_limit(struct pse_controller_dev *pcdev, if (ret < 0) return ret; - ret = pd692x0_pi_get_pw_from_table(buf.data[2], buf.data[3]); - if (ret < 0) - return ret; - mW = ret; - - ret = pd692x0_pi_get_voltage(pcdev, id); - if (ret < 0) - return ret; - uV = ret; - - tmp_64 = mW; - tmp_64 *= 1000000000ull; - /* uA = mW * 1000000000 / uV */ - uA = DIV_ROUND_CLOSEST_ULL(tmp_64, uV); - return uA; + return pd692x0_pi_get_pw_from_table(buf.data[0], buf.data[1]); } -static int pd692x0_pi_set_current_limit(struct pse_controller_dev *pcdev, - int id, int max_uA) +static int pd692x0_pi_set_pw_limit(struct pse_controller_dev *pcdev, + int id, int max_mW) { struct pd692x0_priv *priv = to_pd692x0_priv(pcdev); struct device *dev = &priv->client->dev; struct pd692x0_msg msg, buf = {0}; - int uV, ret, mW; - s64 tmp_64; + int ret; ret = pd692x0_fw_unavailable(priv); if (ret) return ret; - ret = pd692x0_pi_get_voltage(pcdev, id); - if (ret < 0) - return ret; - uV = ret; - msg = pd692x0_msg_template_list[PD692X0_MSG_SET_PORT_PARAM]; msg.sub[2] = id; - tmp_64 = uV; - tmp_64 *= max_uA; - /* mW = uV * uA / 1000000000 */ - mW = DIV_ROUND_CLOSEST_ULL(tmp_64, 1000000000); - ret = pd692x0_pi_set_pw_from_table(dev, &msg, mW); + ret = pd692x0_pi_set_pw_from_table(dev, &msg, max_mW); if (ret) return ret; @@ -1068,8 +1043,8 @@ static const struct pse_controller_ops pd692x0_ops = { .pi_disable = pd692x0_pi_disable, .pi_is_enabled = pd692x0_pi_is_enabled, .pi_get_voltage = pd692x0_pi_get_voltage, - .pi_get_current_limit = pd692x0_pi_get_current_limit, - .pi_set_current_limit = pd692x0_pi_set_current_limit, + .pi_get_pw_limit = pd692x0_pi_get_pw_limit, + .pi_set_pw_limit = pd692x0_pi_set_pw_limit, }; #define PD692X0_FW_LINE_MAX_SZ 0xff diff --git a/drivers/net/pse-pd/pse_core.c b/drivers/net/pse-pd/pse_core.c index 2906ce173f66c..bb509d973e914 100644 --- a/drivers/net/pse-pd/pse_core.c +++ b/drivers/net/pse-pd/pse_core.c @@ -291,32 +291,24 @@ static int pse_pi_get_voltage(struct regulator_dev *rdev) return ret; } -static int _pse_ethtool_get_status(struct pse_controller_dev *pcdev, - int id, - struct netlink_ext_ack *extack, - struct pse_control_status *status); - static int pse_pi_get_current_limit(struct regulator_dev *rdev) { struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev); const struct pse_controller_ops *ops; - struct netlink_ext_ack extack = {}; - struct pse_control_status st = {}; - int id, uV, ret; + int id, uV, mW, ret; s64 tmp_64; ops = pcdev->ops; id = rdev_get_id(rdev); + if (!ops->pi_get_pw_limit || !ops->pi_get_voltage) + return -EOPNOTSUPP; + mutex_lock(&pcdev->lock); - if (ops->pi_get_current_limit) { - ret = ops->pi_get_current_limit(pcdev, id); + ret = ops->pi_get_pw_limit(pcdev, id); + if (ret < 0) goto out; - } + mW = ret; - /* If pi_get_current_limit() callback not populated get voltage - * from pi_get_voltage() and power limit from ethtool_get_status() - * to calculate current limit. - */ ret = _pse_pi_get_voltage(rdev); if (!ret) { dev_err(pcdev->dev, "Voltage null\n"); @@ -327,16 +319,7 @@ static int pse_pi_get_current_limit(struct regulator_dev *rdev) goto out; uV = ret; - ret = _pse_ethtool_get_status(pcdev, id, &extack, &st); - if (ret) - goto out; - - if (!st.c33_avail_pw_limit) { - ret = -ENODATA; - goto out; - } - - tmp_64 = st.c33_avail_pw_limit; + tmp_64 = mW; tmp_64 *= 1000000000ull; /* uA = mW * 1000000000 / uV */ ret = DIV_ROUND_CLOSEST_ULL(tmp_64, uV); @@ -351,15 +334,33 @@ static int pse_pi_set_current_limit(struct regulator_dev *rdev, int min_uA, { struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev); const struct pse_controller_ops *ops; - int id, ret; + int id, mW, ret; + s64 tmp_64; ops = pcdev->ops; - if (!ops->pi_set_current_limit) + if (!ops->pi_set_pw_limit || !ops->pi_get_voltage) return -EOPNOTSUPP; + if (max_uA > MAX_PI_CURRENT) + return -ERANGE; + id = rdev_get_id(rdev); mutex_lock(&pcdev->lock); - ret = ops->pi_set_current_limit(pcdev, id, max_uA); + ret = _pse_pi_get_voltage(rdev); + if (!ret) { + dev_err(pcdev->dev, "Voltage null\n"); + ret = -ERANGE; + goto out; + } + if (ret < 0) + goto out; + + tmp_64 = ret; + tmp_64 *= max_uA; + /* mW = uA * uV / 1000000000 */ + mW = DIV_ROUND_CLOSEST_ULL(tmp_64, 1000000000); + ret = ops->pi_set_pw_limit(pcdev, id, mW); +out: mutex_unlock(&pcdev->lock); return ret; @@ -403,11 +404,9 @@ devm_pse_pi_regulator_register(struct pse_controller_dev *pcdev, rinit_data->constraints.valid_ops_mask = REGULATOR_CHANGE_STATUS; - if (pcdev->ops->pi_set_current_limit) { + if (pcdev->ops->pi_set_pw_limit) rinit_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_CURRENT; - rinit_data->constraints.max_uA = MAX_PI_CURRENT; - } rinit_data->supply_regulator = "vpwr"; @@ -736,23 +735,6 @@ struct pse_control *of_pse_control_get(struct device_node *node) } EXPORT_SYMBOL_GPL(of_pse_control_get); -static int _pse_ethtool_get_status(struct pse_controller_dev *pcdev, - int id, - struct netlink_ext_ack *extack, - struct pse_control_status *status) -{ - const struct pse_controller_ops *ops; - - ops = pcdev->ops; - if (!ops->ethtool_get_status) { - NL_SET_ERR_MSG(extack, - "PSE driver does not support status report"); - return -EOPNOTSUPP; - } - - return ops->ethtool_get_status(pcdev, id, extack, status); -} - /** * pse_ethtool_get_status - get status of PSE control * @psec: PSE control pointer @@ -765,11 +747,21 @@ int pse_ethtool_get_status(struct pse_control *psec, struct netlink_ext_ack *extack, struct pse_control_status *status) { + const struct pse_controller_ops *ops; + struct pse_controller_dev *pcdev; int err; - mutex_lock(&psec->pcdev->lock); - err = _pse_ethtool_get_status(psec->pcdev, psec->id, extack, status); - mutex_unlock(&psec->pcdev->lock); + pcdev = psec->pcdev; + ops = pcdev->ops; + if (!ops->ethtool_get_status) { + NL_SET_ERR_MSG(extack, + "PSE driver does not support status report"); + return -EOPNOTSUPP; + } + + mutex_lock(&pcdev->lock); + err = ops->ethtool_get_status(pcdev, psec->id, extack, status); + mutex_unlock(&pcdev->lock); return err; } diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index 57d6e5abc30e8..da24941a6e444 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -1421,6 +1421,19 @@ static const struct driver_info hg20f9_info = { .data = FLAG_EEPROM_MAC, }; +static const struct driver_info lyconsys_fibergecko100_info = { + .description = "LyconSys FiberGecko 100 USB 2.0 to SFP Adapter", + .bind = ax88178_bind, + .status = asix_status, + .link_reset = ax88178_link_reset, + .reset = ax88178_link_reset, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | + FLAG_MULTI_PACKET, + .rx_fixup = asix_rx_fixup_common, + .tx_fixup = asix_tx_fixup, + .data = 0x20061201, +}; + static const struct usb_device_id products [] = { { // Linksys USB200M @@ -1578,6 +1591,10 @@ static const struct usb_device_id products [] = { // Linux Automation GmbH USB 10Base-T1L USB_DEVICE(0x33f7, 0x0004), .driver_info = (unsigned long) &lxausb_t1l_info, +}, { + /* LyconSys FiberGecko 100 */ + USB_DEVICE(0x1d2a, 0x0801), + .driver_info = (unsigned long) &lyconsys_fibergecko100_info, }, { }, // END }; diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index a6469235d904e..a032c1ded4063 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -783,6 +783,13 @@ static const struct usb_device_id products[] = { .driver_info = 0, }, +/* Lenovo ThinkPad Hybrid USB-C with USB-A Dock (40af0135eu, based on Realtek RTL8153) */ +{ + USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0xa359, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), + .driver_info = 0, +}, + /* Aquantia AQtion USB to 5GbE Controller (based on AQC111U) */ { USB_DEVICE_AND_INTERFACE_INFO(AQUANTIA_VENDOR_ID, 0xc101, diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c index 46af78caf457a..0bfa37c140591 100644 --- a/drivers/net/usb/gl620a.c +++ b/drivers/net/usb/gl620a.c @@ -179,9 +179,7 @@ static int genelink_bind(struct usbnet *dev, struct usb_interface *intf) { dev->hard_mtu = GL_RCV_BUF_SIZE; dev->net->hard_header_len += 4; - dev->in = usb_rcvbulkpipe(dev->udev, dev->driver_info->in); - dev->out = usb_sndbulkpipe(dev->udev, dev->driver_info->out); - return 0; + return usbnet_get_endpoints(dev, intf); } static const struct driver_info genelink_info = { diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 9fe7f704a2f7b..944a33361dae5 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1365,9 +1365,11 @@ static const struct usb_device_id products[] = { {QMI_QUIRK_SET_DTR(0x1bc7, 0x10a0, 0)}, /* Telit FN920C04 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10a4, 0)}, /* Telit FN920C04 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10a9, 0)}, /* Telit FN920C04 */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x10b0, 0)}, /* Telit FE990B */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10c0, 0)}, /* Telit FE910C04 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10c4, 0)}, /* Telit FE910C04 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10c8, 0)}, /* Telit FE910C04 */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x10d0, 0)}, /* Telit FN990B */ {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 468c739740463..96fa3857d8e25 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -785,6 +785,7 @@ enum rtl8152_flags { #define DEVICE_ID_THINKPAD_USB_C_DONGLE 0x720c #define DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2 0xa387 #define DEVICE_ID_THINKPAD_USB_C_DOCK_GEN3 0x3062 +#define DEVICE_ID_THINKPAD_HYBRID_USB_C_DOCK 0xa359 struct tally_counter { __le64 tx_packets; @@ -9787,6 +9788,7 @@ static bool rtl8152_supports_lenovo_macpassthru(struct usb_device *udev) case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2: case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN3: case DEVICE_ID_THINKPAD_USB_C_DONGLE: + case DEVICE_ID_THINKPAD_HYBRID_USB_C_DOCK: return 1; } } else if (vendor_id == VENDOR_ID_REALTEK && parent_vendor_id == VENDOR_ID_LENOVO) { @@ -10064,6 +10066,8 @@ static const struct usb_device_id rtl8152_table[] = { { USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927) }, { USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0c5e) }, { USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101) }, + + /* Lenovo */ { USB_DEVICE(VENDOR_ID_LENOVO, 0x304f) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x3054) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x3062) }, @@ -10074,7 +10078,9 @@ static const struct usb_device_id rtl8152_table[] = { { USB_DEVICE(VENDOR_ID_LENOVO, 0x720c) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x7214) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x721e) }, + { USB_DEVICE(VENDOR_ID_LENOVO, 0xa359) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0xa387) }, + { USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041) }, { USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff) }, { USB_DEVICE(VENDOR_ID_TPLINK, 0x0601) }, diff --git a/drivers/net/usb/r8153_ecm.c b/drivers/net/usb/r8153_ecm.c index 20b2df8d74ae1..8d860dacdf49b 100644 --- a/drivers/net/usb/r8153_ecm.c +++ b/drivers/net/usb/r8153_ecm.c @@ -135,6 +135,12 @@ static const struct usb_device_id products[] = { USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&r8153_info, }, +/* Lenovo ThinkPad Hybrid USB-C with USB-A Dock (40af0135eu, based on Realtek RTL8153) */ +{ + USB_DEVICE_AND_INTERFACE_INFO(VENDOR_ID_LENOVO, 0xa359, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), + .driver_info = (unsigned long)&r8153_info, +}, { }, /* END */ }; diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index 7b3739b29c8f7..bb0bf14158727 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c @@ -630,6 +630,16 @@ static const struct driver_info zte_rndis_info = { .tx_fixup = rndis_tx_fixup, }; +static const struct driver_info wwan_rndis_info = { + .description = "Mobile Broadband RNDIS device", + .flags = FLAG_WWAN | FLAG_POINTTOPOINT | FLAG_FRAMING_RN | FLAG_NO_SETINT, + .bind = rndis_bind, + .unbind = rndis_unbind, + .status = rndis_status, + .rx_fixup = rndis_rx_fixup, + .tx_fixup = rndis_tx_fixup, +}; + /*-------------------------------------------------------------------------*/ static const struct usb_device_id products [] = { @@ -666,9 +676,11 @@ static const struct usb_device_id products [] = { USB_INTERFACE_INFO(USB_CLASS_WIRELESS_CONTROLLER, 1, 3), .driver_info = (unsigned long) &rndis_info, }, { - /* Novatel Verizon USB730L */ + /* Mobile Broadband Modem, seen in Novatel Verizon USB730L and + * Telit FN990A (RNDIS) + */ USB_INTERFACE_INFO(USB_CLASS_MISC, 4, 1), - .driver_info = (unsigned long) &rndis_info, + .driver_info = (unsigned long)&wwan_rndis_info, }, { }, // END }; diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 44179f4e807fc..724b93aa4f7eb 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -178,6 +178,17 @@ int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress) } EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr); +static bool usbnet_needs_usb_name_format(struct usbnet *dev, struct net_device *net) +{ + /* Point to point devices which don't have a real MAC address + * (or report a fake local one) have historically used the usb%d + * naming. Preserve this.. + */ + return (dev->driver_info->flags & FLAG_POINTTOPOINT) != 0 && + (is_zero_ether_addr(net->dev_addr) || + is_local_ether_addr(net->dev_addr)); +} + static void intr_complete (struct urb *urb) { struct usbnet *dev = urb->context; @@ -519,7 +530,8 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) netif_device_present (dev->net) && test_bit(EVENT_DEV_OPEN, &dev->flags) && !test_bit (EVENT_RX_HALT, &dev->flags) && - !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) { + !test_bit (EVENT_DEV_ASLEEP, &dev->flags) && + !usbnet_going_away(dev)) { switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) { case -EPIPE: usbnet_defer_kevent (dev, EVENT_RX_HALT); @@ -540,8 +552,7 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) tasklet_schedule (&dev->bh); break; case 0: - if (!usbnet_going_away(dev)) - __usbnet_queue_skb(&dev->rxq, skb, rx_start); + __usbnet_queue_skb(&dev->rxq, skb, rx_start); } } else { netif_dbg(dev, ifdown, dev->net, "rx: stopped\n"); @@ -1762,13 +1773,11 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) if (status < 0) goto out1; - // heuristic: "usb%d" for links we know are two-host, - // else "eth%d" when there's reasonable doubt. userspace - // can rename the link if it knows better. + /* heuristic: rename to "eth%d" if we are not sure this link + * is two-host (these links keep "usb%d") + */ if ((dev->driver_info->flags & FLAG_ETHER) != 0 && - ((dev->driver_info->flags & FLAG_POINTTOPOINT) == 0 || - /* somebody touched it*/ - !is_zero_ether_addr(net->dev_addr))) + !usbnet_needs_usb_name_format(dev, net)) strscpy(net->name, "eth%d", sizeof(net->name)); /* WLAN devices should always be named "wlan%d" */ if ((dev->driver_info->flags & FLAG_WLAN) != 0) diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c index 97b12f51ef28c..9389dc5f4a3da 100644 --- a/drivers/net/wireless/ath/ath11k/ahb.c +++ b/drivers/net/wireless/ath/ath11k/ahb.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -1290,6 +1290,7 @@ static void ath11k_ahb_remove(struct platform_device *pdev) ath11k_core_deinit(ab); qmi_fail: + ath11k_fw_destroy(ab); ath11k_ahb_free_resources(ab); } @@ -1309,6 +1310,7 @@ static void ath11k_ahb_shutdown(struct platform_device *pdev) ath11k_core_deinit(ab); free_resources: + ath11k_fw_destroy(ab); ath11k_ahb_free_resources(ab); } diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index ccf4ad35fdc33..7eba6ee054ffe 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -2214,7 +2214,6 @@ void ath11k_core_deinit(struct ath11k_base *ab) ath11k_hif_power_down(ab); ath11k_mac_destroy(ab); ath11k_core_soc_destroy(ab); - ath11k_fw_destroy(ab); } EXPORT_SYMBOL(ath11k_core_deinit); diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c index fbf666d0ecf1d..f124b7329e1ac 100644 --- a/drivers/net/wireless/ath/ath11k/dp.c +++ b/drivers/net/wireless/ath/ath11k/dp.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -104,14 +104,12 @@ void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring) if (!ring->vaddr_unaligned) return; - if (ring->cached) { - dma_unmap_single(ab->dev, ring->paddr_unaligned, ring->size, - DMA_FROM_DEVICE); - kfree(ring->vaddr_unaligned); - } else { + if (ring->cached) + dma_free_noncoherent(ab->dev, ring->size, ring->vaddr_unaligned, + ring->paddr_unaligned, DMA_FROM_DEVICE); + else dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned, ring->paddr_unaligned); - } ring->vaddr_unaligned = NULL; } @@ -249,25 +247,14 @@ int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring, default: cached = false; } - - if (cached) { - ring->vaddr_unaligned = kzalloc(ring->size, GFP_KERNEL); - if (!ring->vaddr_unaligned) - return -ENOMEM; - - ring->paddr_unaligned = dma_map_single(ab->dev, - ring->vaddr_unaligned, - ring->size, - DMA_FROM_DEVICE); - if (dma_mapping_error(ab->dev, ring->paddr_unaligned)) { - kfree(ring->vaddr_unaligned); - ring->vaddr_unaligned = NULL; - return -ENOMEM; - } - } } - if (!cached) + if (cached) + ring->vaddr_unaligned = dma_alloc_noncoherent(ab->dev, ring->size, + &ring->paddr_unaligned, + DMA_FROM_DEVICE, + GFP_KERNEL); + else ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size, &ring->paddr_unaligned, GFP_KERNEL); diff --git a/drivers/net/wireless/ath/ath11k/fw.c b/drivers/net/wireless/ath/ath11k/fw.c index 4e36292a79db8..cbbd8e57119f2 100644 --- a/drivers/net/wireless/ath/ath11k/fw.c +++ b/drivers/net/wireless/ath/ath11k/fw.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* - * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include "core.h" @@ -166,3 +166,4 @@ void ath11k_fw_destroy(struct ath11k_base *ab) { release_firmware(ab->fw.fw); } +EXPORT_SYMBOL(ath11k_fw_destroy); diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c index be9d2c69cc413..6ebfa5d02e2e5 100644 --- a/drivers/net/wireless/ath/ath11k/pci.c +++ b/drivers/net/wireless/ath/ath11k/pci.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -981,6 +981,7 @@ static void ath11k_pci_remove(struct pci_dev *pdev) ath11k_core_deinit(ab); qmi_fail: + ath11k_fw_destroy(ab); ath11k_mhi_unregister(ab_pci); ath11k_pcic_free_irq(ab); diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c index 5c6749bc4039d..4c98b9de1e584 100644 --- a/drivers/net/wireless/ath/ath12k/dp_mon.c +++ b/drivers/net/wireless/ath/ath12k/dp_mon.c @@ -2118,7 +2118,7 @@ int ath12k_dp_mon_srng_process(struct ath12k *ar, int mac_id, int *budget, dest_idx = 0; move_next: ath12k_dp_mon_buf_replenish(ab, buf_ring, 1); - ath12k_hal_srng_src_get_next_entry(ab, srng); + ath12k_hal_srng_dst_get_next_entry(ab, srng); num_buffs_reaped++; } diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c index 91e3393f7b5f4..4cbba96121a11 100644 --- a/drivers/net/wireless/ath/ath12k/dp_rx.c +++ b/drivers/net/wireless/ath/ath12k/dp_rx.c @@ -2470,6 +2470,29 @@ static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *nap ieee80211_rx_napi(ath12k_ar_to_hw(ar), pubsta, msdu, napi); } +static bool ath12k_dp_rx_check_nwifi_hdr_len_valid(struct ath12k_base *ab, + struct hal_rx_desc *rx_desc, + struct sk_buff *msdu) +{ + struct ieee80211_hdr *hdr; + u8 decap_type; + u32 hdr_len; + + decap_type = ath12k_dp_rx_h_decap_type(ab, rx_desc); + if (decap_type != DP_RX_DECAP_TYPE_NATIVE_WIFI) + return true; + + hdr = (struct ieee80211_hdr *)msdu->data; + hdr_len = ieee80211_hdrlen(hdr->frame_control); + + if ((likely(hdr_len <= DP_MAX_NWIFI_HDR_LEN))) + return true; + + ab->soc_stats.invalid_rbm++; + WARN_ON_ONCE(1); + return false; +} + static int ath12k_dp_rx_process_msdu(struct ath12k *ar, struct sk_buff *msdu, struct sk_buff_head *msdu_list, @@ -2528,6 +2551,11 @@ static int ath12k_dp_rx_process_msdu(struct ath12k *ar, } } + if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, rx_desc, msdu))) { + ret = -EINVAL; + goto free_out; + } + ath12k_dp_rx_h_ppdu(ar, rx_desc, rx_status); ath12k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status); @@ -2880,6 +2908,9 @@ static int ath12k_dp_rx_h_verify_tkip_mic(struct ath12k *ar, struct ath12k_peer RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED; skb_pull(msdu, hal_rx_desc_sz); + if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, rx_desc, msdu))) + return -EINVAL; + ath12k_dp_rx_h_ppdu(ar, rx_desc, rxs); ath12k_dp_rx_h_undecap(ar, msdu, rx_desc, HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true); @@ -3600,6 +3631,9 @@ static int ath12k_dp_rx_h_null_q_desc(struct ath12k *ar, struct sk_buff *msdu, skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len); skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes); } + if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, desc, msdu))) + return -EINVAL; + ath12k_dp_rx_h_ppdu(ar, desc, status); ath12k_dp_rx_h_mpdu(ar, msdu, desc, status); @@ -3644,7 +3678,7 @@ static bool ath12k_dp_rx_h_reo_err(struct ath12k *ar, struct sk_buff *msdu, return drop; } -static void ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu, +static bool ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu, struct ieee80211_rx_status *status) { struct ath12k_base *ab = ar->ab; @@ -3662,6 +3696,9 @@ static void ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu, skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len); skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes); + if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, desc, msdu))) + return true; + ath12k_dp_rx_h_ppdu(ar, desc, status); status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR | @@ -3669,6 +3706,7 @@ static void ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu, ath12k_dp_rx_h_undecap(ar, msdu, desc, HAL_ENCRYPT_TYPE_TKIP_MIC, status, false); + return false; } static bool ath12k_dp_rx_h_rxdma_err(struct ath12k *ar, struct sk_buff *msdu, @@ -3687,7 +3725,7 @@ static bool ath12k_dp_rx_h_rxdma_err(struct ath12k *ar, struct sk_buff *msdu, case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR: err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc); if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC) { - ath12k_dp_rx_h_tkip_mic_err(ar, msdu, status); + drop = ath12k_dp_rx_h_tkip_mic_err(ar, msdu, status); break; } fallthrough; diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c index bd269aa1740bc..2ff866e1d7d5b 100644 --- a/drivers/net/wireless/ath/ath12k/pci.c +++ b/drivers/net/wireless/ath/ath12k/pci.c @@ -1541,6 +1541,7 @@ static void ath12k_pci_remove(struct pci_dev *pdev) ath12k_core_deinit(ab); qmi_fail: + ath12k_fw_unmap(ab); ath12k_mhi_unregister(ab_pci); ath12k_pci_free_irq(ab); diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c index 504e05ea30f29..97ea7ab0f4910 100644 --- a/drivers/net/wireless/atmel/at76c50x-usb.c +++ b/drivers/net/wireless/atmel/at76c50x-usb.c @@ -2552,7 +2552,7 @@ static void at76_disconnect(struct usb_interface *interface) wiphy_info(priv->hw->wiphy, "disconnecting\n"); at76_delete_device(priv); - usb_put_dev(priv->udev); + usb_put_dev(interface_to_usbdev(interface)); dev_info(&interface->dev, "disconnected\n"); } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c index 8a1e337642448..cfdd92564060a 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c @@ -1167,6 +1167,7 @@ static int brcmf_ops_sdio_suspend(struct device *dev) struct brcmf_bus *bus_if; struct brcmf_sdio_dev *sdiodev; mmc_pm_flag_t sdio_flags; + bool cap_power_off; int ret = 0; func = container_of(dev, struct sdio_func, dev); @@ -1174,19 +1175,23 @@ static int brcmf_ops_sdio_suspend(struct device *dev) if (func->num != 1) return 0; + cap_power_off = !!(func->card->host->caps & MMC_CAP_POWER_OFF_CARD); bus_if = dev_get_drvdata(dev); sdiodev = bus_if->bus_priv.sdio; - if (sdiodev->wowl_enabled) { + if (sdiodev->wowl_enabled || !cap_power_off) { brcmf_sdiod_freezer_on(sdiodev); brcmf_sdio_wd_timer(sdiodev->bus, 0); sdio_flags = MMC_PM_KEEP_POWER; - if (sdiodev->settings->bus.sdio.oob_irq_supported) - enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr); - else - sdio_flags |= MMC_PM_WAKE_SDIO_IRQ; + + if (sdiodev->wowl_enabled) { + if (sdiodev->settings->bus.sdio.oob_irq_supported) + enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr); + else + sdio_flags |= MMC_PM_WAKE_SDIO_IRQ; + } if (sdio_set_host_pm_flags(sdiodev->func1, sdio_flags)) brcmf_err("Failed to set pm_flags %x\n", sdio_flags); @@ -1208,18 +1213,19 @@ static int brcmf_ops_sdio_resume(struct device *dev) struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; struct sdio_func *func = container_of(dev, struct sdio_func, dev); int ret = 0; + bool cap_power_off = !!(func->card->host->caps & MMC_CAP_POWER_OFF_CARD); brcmf_dbg(SDIO, "Enter: F%d\n", func->num); if (func->num != 2) return 0; - if (!sdiodev->wowl_enabled) { + if (!sdiodev->wowl_enabled && cap_power_off) { /* bus was powered off and device removed, probe again */ ret = brcmf_sdiod_probe(sdiodev); if (ret) brcmf_err("Failed to probe device on resume\n"); } else { - if (sdiodev->settings->bus.sdio.oob_irq_supported) + if (sdiodev->wowl_enabled && sdiodev->settings->bus.sdio.oob_irq_supported) disable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr); brcmf_sdiod_freezer_off(sdiodev); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index fb2ea38e89aca..6594216f873c4 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -558,41 +558,71 @@ static void iwl_dump_prph(struct iwl_fw_runtime *fwrt, } /* - * alloc_sgtable - allocates scallerlist table in the given size, - * fills it with pages and returns it + * alloc_sgtable - allocates (chained) scatterlist in the given size, + * fills it with pages and returns it * @size: the size (in bytes) of the table -*/ -static struct scatterlist *alloc_sgtable(int size) + */ +static struct scatterlist *alloc_sgtable(ssize_t size) { - int alloc_size, nents, i; - struct page *new_page; - struct scatterlist *iter; - struct scatterlist *table; + struct scatterlist *result = NULL, *prev; + int nents, i, n_prev; nents = DIV_ROUND_UP(size, PAGE_SIZE); - table = kcalloc(nents, sizeof(*table), GFP_KERNEL); - if (!table) - return NULL; - sg_init_table(table, nents); - iter = table; - for_each_sg(table, iter, sg_nents(table), i) { - new_page = alloc_page(GFP_KERNEL); - if (!new_page) { - /* release all previous allocated pages in the table */ - iter = table; - for_each_sg(table, iter, sg_nents(table), i) { - new_page = sg_page(iter); - if (new_page) - __free_page(new_page); - } - kfree(table); + +#define N_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(*result)) + /* + * We need an additional entry for table chaining, + * this ensures the loop can finish i.e. we can + * fit at least two entries per page (obviously, + * many more really fit.) + */ + BUILD_BUG_ON(N_ENTRIES_PER_PAGE < 2); + + while (nents > 0) { + struct scatterlist *new, *iter; + int n_fill, n_alloc; + + if (nents <= N_ENTRIES_PER_PAGE) { + /* last needed table */ + n_fill = nents; + n_alloc = nents; + nents = 0; + } else { + /* fill a page with entries */ + n_alloc = N_ENTRIES_PER_PAGE; + /* reserve one for chaining */ + n_fill = n_alloc - 1; + nents -= n_fill; + } + + new = kcalloc(n_alloc, sizeof(*new), GFP_KERNEL); + if (!new) { + if (result) + _devcd_free_sgtable(result); return NULL; } - alloc_size = min_t(int, size, PAGE_SIZE); - size -= PAGE_SIZE; - sg_set_page(iter, new_page, alloc_size, 0); + sg_init_table(new, n_alloc); + + if (!result) + result = new; + else + sg_chain(prev, n_prev, new); + prev = new; + n_prev = n_alloc; + + for_each_sg(new, iter, n_fill, i) { + struct page *new_page = alloc_page(GFP_KERNEL); + + if (!new_page) { + _devcd_free_sgtable(result); + return NULL; + } + + sg_set_page(iter, new_page, PAGE_SIZE, 0); + } } - return table; + + return result; } static void iwl_fw_get_prph_len(struct iwl_fw_runtime *fwrt, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index c620911a11933..754e01688900d 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1197,7 +1197,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, if (tlv_len != sizeof(*fseq_ver)) goto invalid_tlv_len; - IWL_INFO(drv, "TLV_FW_FSEQ_VERSION: %s\n", + IWL_INFO(drv, "TLV_FW_FSEQ_VERSION: %.32s\n", fseq_ver->version); } break; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 91ca830a7b603..f4276fdee6bea 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -1518,6 +1518,13 @@ static ssize_t iwl_dbgfs_fw_dbg_clear_write(struct iwl_mvm *mvm, if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000) return -EOPNOTSUPP; + /* + * If the firmware is not running, silently succeed since there is + * no data to clear. + */ + if (!iwl_mvm_firmware_running(mvm)) + return count; + mutex_lock(&mvm->mutex); iwl_fw_dbg_clear_monitor_buf(&mvm->fwrt); mutex_unlock(&mvm->mutex); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index f30b0fc8eca97..2b9a684cf61d5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2024 Intel Corporation + * Copyright (C) 2012-2014, 2018-2025 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -422,6 +422,8 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, /* if reached this point, Alive notification was received */ iwl_mei_alive_notif(true); + iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr); + ret = iwl_pnvm_load(mvm->trans, &mvm->notif_wait, &mvm->fw->ucode_capa); if (ret) { @@ -430,8 +432,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, return ret; } - iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr); - /* * Note: all the queues are enabled as part of the interface * initialization, but in firmware restart scenarios they diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 65f8933c34b42..0b52d77f57837 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -995,7 +995,7 @@ iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data, */ u8 ru = le32_get_bits(phy_data->d1, IWL_RX_PHY_DATA1_HE_RU_ALLOC_MASK); u32 rate_n_flags = phy_data->rate_n_flags; - u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK_V1; + u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK; u8 offs = 0; rx_status->bw = RATE_INFO_BW_HE_RU; @@ -1050,13 +1050,13 @@ iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data, if (he_mu) he_mu->flags2 |= - le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK_V1, + le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK, rate_n_flags), IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW); - else if (he_type == RATE_MCS_HE_TYPE_TRIG_V1) + else if (he_type == RATE_MCS_HE_TYPE_TRIG) he->data6 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_KNOWN) | - le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK_V1, + le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK, rate_n_flags), IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index 72fa7ac86516c..17b8ccc275693 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -1030,6 +1030,8 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm, /* End TE, notify mac80211 */ mvmvif->time_event_data.id = SESSION_PROTECT_CONF_MAX_ID; mvmvif->time_event_data.link_id = -1; + /* set the bit so the ROC cleanup will actually clean up */ + set_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status); iwl_mvm_roc_finished(mvm); ieee80211_remain_on_channel_expired(mvm->hw); } else if (le32_to_cpu(notif->start)) { diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index 27a7e0b5b3d51..ebe9b25cc53a9 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2003-2015, 2018-2024 Intel Corporation + * Copyright (C) 2003-2015, 2018-2025 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -643,7 +643,8 @@ dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset, unsigned int len); struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_cmd_meta *cmd_meta, - u8 **hdr, unsigned int hdr_room); + u8 **hdr, unsigned int hdr_room, + unsigned int offset); void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_cmd_meta *cmd_meta); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index b1846abb99b78..477a05cd1288b 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2020, 2023-2024 Intel Corporation + * Copyright (C) 2018-2020, 2023-2025 Intel Corporation */ #include #include @@ -188,7 +188,8 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans, (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)); /* Our device supports 9 segments at most, it will fit in 1 page */ - sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room); + sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room, + snap_ip_tcp_hdrlen + hdr_len); if (!sgt) return -ENOMEM; @@ -347,6 +348,7 @@ iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans, return tfd; out_err: + iwl_pcie_free_tso_pages(trans, skb, out_meta); iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); return NULL; } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 9fe050f0ddc16..9fcdd06e126ae 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2003-2014, 2018-2021, 2023-2024 Intel Corporation + * Copyright (C) 2003-2014, 2018-2021, 2023-2025 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -1853,6 +1853,7 @@ dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset, * @cmd_meta: command meta to store the scatter list information for unmapping * @hdr: output argument for TSO headers * @hdr_room: requested length for TSO headers + * @offset: offset into the data from which mapping should start * * Allocate space for a scatter gather list and TSO headers and map the SKB * using the scatter gather list. The SKB is unmapped again when the page is @@ -1862,9 +1863,12 @@ dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset, */ struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_cmd_meta *cmd_meta, - u8 **hdr, unsigned int hdr_room) + u8 **hdr, unsigned int hdr_room, + unsigned int offset) { struct sg_table *sgt; + unsigned int n_segments = skb_shinfo(skb)->nr_frags + 1; + int orig_nents; if (WARN_ON_ONCE(skb_has_frag_list(skb))) return NULL; @@ -1872,8 +1876,7 @@ struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb, *hdr = iwl_pcie_get_page_hdr(trans, hdr_room + __alignof__(struct sg_table) + sizeof(struct sg_table) + - (skb_shinfo(skb)->nr_frags + 1) * - sizeof(struct scatterlist), + n_segments * sizeof(struct scatterlist), skb); if (!*hdr) return NULL; @@ -1881,14 +1884,15 @@ struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb, sgt = (void *)PTR_ALIGN(*hdr + hdr_room, __alignof__(struct sg_table)); sgt->sgl = (void *)(sgt + 1); - sg_init_table(sgt->sgl, skb_shinfo(skb)->nr_frags + 1); + sg_init_table(sgt->sgl, n_segments); /* Only map the data, not the header (it is copied to the TSO page) */ - sgt->orig_nents = skb_to_sgvec(skb, sgt->sgl, skb_headlen(skb), - skb->data_len); - if (WARN_ON_ONCE(sgt->orig_nents <= 0)) + orig_nents = skb_to_sgvec(skb, sgt->sgl, offset, skb->len - offset); + if (WARN_ON_ONCE(orig_nents <= 0)) return NULL; + sgt->orig_nents = orig_nents; + /* And map the entire SKB */ if (dma_map_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0) < 0) return NULL; @@ -1937,7 +1941,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len; /* Our device supports 9 segments at most, it will fit in 1 page */ - sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room); + sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room, + snap_ip_tcp_hdrlen + hdr_len + iv_len); if (!sgt) return -ENOMEM; diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c index 0bc66cc19acd1..443517d06c9fa 100644 --- a/drivers/net/wireless/mediatek/mt76/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/eeprom.c @@ -95,6 +95,10 @@ int mt76_get_of_data_from_mtd(struct mt76_dev *dev, void *eep, int offset, int l #ifdef CONFIG_NL80211_TESTMODE dev->test_mtd.name = devm_kstrdup(dev->dev, part, GFP_KERNEL); + if (!dev->test_mtd.name) { + ret = -ENOMEM; + goto out_put_node; + } dev->test_mtd.offset = offset; #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 0b75a45ad2e82..e2e9b5ece74e2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -755,6 +755,7 @@ struct mt76_testmode_data { struct mt76_vif { u8 idx; + u8 link_idx; u8 omac_idx; u8 band_idx; u8 wmm_idx; diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c index 7d07e720e4ec1..452579ccc4922 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c @@ -1164,7 +1164,7 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy, .tag = cpu_to_le16(DEV_INFO_ACTIVE), .len = cpu_to_le16(sizeof(struct req_tlv)), .active = enable, - .link_idx = mvif->idx, + .link_idx = mvif->link_idx, }, }; struct { @@ -1187,7 +1187,7 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy, .bmc_tx_wlan_idx = cpu_to_le16(wcid->idx), .sta_idx = cpu_to_le16(wcid->idx), .conn_state = 1, - .link_idx = mvif->idx, + .link_idx = mvif->link_idx, }, }; int err, idx, cmd, len; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c index e832ad53e2393..a4f4d12f904e7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c @@ -22,6 +22,7 @@ static const struct usb_device_id mt76x2u_device_table[] = { { USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */ { USB_DEVICE(0x045e, 0x02e6) }, /* XBox One Wireless Adapter */ { USB_DEVICE(0x045e, 0x02fe) }, /* XBox One Wireless Adapter */ + { USB_DEVICE(0x2357, 0x0137) }, /* TP-Link TL-WDN6200 */ { }, }; diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c index e2dfd3670c4c9..6a3629f71caaa 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c @@ -805,6 +805,7 @@ int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, msta->deflink.wcid.phy_idx = mvif->bss_conf.mt76.band_idx; msta->deflink.wcid.tx_info |= MT_WCID_TX_INFO_SET; msta->deflink.last_txs = jiffies; + msta->deflink.sta = msta; ret = mt76_connac_pm_wake(&dev->mphy, &dev->pm); if (ret) diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c index ddc67423efe2c..d2a98c92e1147 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c @@ -256,7 +256,7 @@ int mt7925_init_mlo_caps(struct mt792x_phy *phy) ext_capab[0].eml_capabilities = phy->eml_cap; ext_capab[0].mld_capa_and_ops = - u16_encode_bits(1, IEEE80211_MLD_CAP_OP_MAX_SIMUL_LINKS); + u16_encode_bits(0, IEEE80211_MLD_CAP_OP_MAX_SIMUL_LINKS); wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO; wiphy->iftype_ext_capab = ext_capab; @@ -356,10 +356,15 @@ static int mt7925_mac_link_bss_add(struct mt792x_dev *dev, struct mt76_txq *mtxq; int idx, ret = 0; - mconf->mt76.idx = __ffs64(~dev->mt76.vif_mask); - if (mconf->mt76.idx >= MT792x_MAX_INTERFACES) { - ret = -ENOSPC; - goto out; + if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { + mconf->mt76.idx = MT792x_MAX_INTERFACES; + } else { + mconf->mt76.idx = __ffs64(~dev->mt76.vif_mask); + + if (mconf->mt76.idx >= MT792x_MAX_INTERFACES) { + ret = -ENOSPC; + goto out; + } } mconf->mt76.omac_idx = ieee80211_vif_is_mld(vif) ? @@ -367,6 +372,7 @@ static int mt7925_mac_link_bss_add(struct mt792x_dev *dev, mconf->mt76.band_idx = 0xff; mconf->mt76.wmm_idx = ieee80211_vif_is_mld(vif) ? 0 : mconf->mt76.idx % MT76_CONNAC_MAX_WMM_SETS; + mconf->mt76.link_idx = hweight16(mvif->valid_links); if (mvif->phy->mt76->chandef.chan->band != NL80211_BAND_2GHZ) mconf->mt76.basic_rates_idx = MT792x_BASIC_RATES_TBL + 4; diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c index ce3d8197b026a..8476f9caa98db 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c @@ -3117,16 +3117,16 @@ __mt7925_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2, .idx = idx, .env = env_cap, - .acpi_conf = mt792x_acpi_get_flags(&dev->phy), }; int ret, valid_cnt = 0; - u8 i, *pos; + u8 *pos, *last_pos; if (!clc) return 0; pos = clc->data + sizeof(*seg) * clc->nr_seg; - for (i = 0; i < clc->nr_country; i++) { + last_pos = clc->data + le32_to_cpu(*(__le32 *)(clc->data + 4)); + while (pos < last_pos) { struct mt7925_clc_rule *rule = (struct mt7925_clc_rule *)pos; pos += sizeof(*rule); diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h index fe6a613ba0088..887427e0760ae 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h @@ -566,8 +566,8 @@ struct mt7925_wow_pattern_tlv { u8 offset; u8 mask[MT76_CONNAC_WOW_MASK_MAX_LEN]; u8 pattern[MT76_CONNAC_WOW_PATTEN_MAX_LEN]; - u8 rsv[7]; -} __packed; + u8 rsv[4]; +}; struct roc_acquire_tlv { __le16 tag; diff --git a/drivers/net/wireless/ti/wl1251/tx.c b/drivers/net/wireless/ti/wl1251/tx.c index 474b603c121cb..adb4840b04893 100644 --- a/drivers/net/wireless/ti/wl1251/tx.c +++ b/drivers/net/wireless/ti/wl1251/tx.c @@ -342,8 +342,10 @@ void wl1251_tx_work(struct work_struct *work) while ((skb = skb_dequeue(&wl->tx_queue))) { if (!woken_up) { ret = wl1251_ps_elp_wakeup(wl); - if (ret < 0) + if (ret < 0) { + skb_queue_head(&wl->tx_queue, skb); goto out; + } woken_up = true; } diff --git a/drivers/net/wwan/mhi_wwan_mbim.c b/drivers/net/wwan/mhi_wwan_mbim.c index d5a9360323d29..8755c5e6a65b3 100644 --- a/drivers/net/wwan/mhi_wwan_mbim.c +++ b/drivers/net/wwan/mhi_wwan_mbim.c @@ -220,7 +220,7 @@ static int mbim_rx_verify_nth16(struct mhi_mbim_context *mbim, struct sk_buff *s if (mbim->rx_seq + 1 != le16_to_cpu(nth16->wSequence) && (mbim->rx_seq || le16_to_cpu(nth16->wSequence)) && !(mbim->rx_seq == 0xffff && !le16_to_cpu(nth16->wSequence))) { - net_err_ratelimited("sequence number glitch prev=%d curr=%d\n", + net_dbg_ratelimited("sequence number glitch prev=%d curr=%d\n", mbim->rx_seq, le16_to_cpu(nth16->wSequence)); } mbim->rx_seq = le16_to_cpu(nth16->wSequence); diff --git a/drivers/ntb/hw/intel/ntb_hw_gen3.c b/drivers/ntb/hw/intel/ntb_hw_gen3.c index ffcfc3e02c353..a5aa96a31f4a6 100644 --- a/drivers/ntb/hw/intel/ntb_hw_gen3.c +++ b/drivers/ntb/hw/intel/ntb_hw_gen3.c @@ -215,6 +215,9 @@ static int gen3_init_ntb(struct intel_ntb_dev *ndev) } ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1; + /* Make sure we are not using DB's used for link status */ + if (ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) + ndev->db_valid_mask &= ~ndev->db_link_mask; ndev->reg->db_iowrite(ndev->db_valid_mask, ndev->self_mmio + diff --git a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c index ad1786be2554b..f851397b65d6e 100644 --- a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c +++ b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c @@ -288,7 +288,7 @@ static int switchtec_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx, if (size != 0 && xlate_pos < 12) return -EINVAL; - if (!IS_ALIGNED(addr, BIT_ULL(xlate_pos))) { + if (xlate_pos >= 0 && !IS_ALIGNED(addr, BIT_ULL(xlate_pos))) { /* * In certain circumstances we can get a buffer that is * not aligned to its size. (Most of the time diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index a22ea4a4b202b..4f775c3e218f4 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -1353,7 +1353,7 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) qp_count = ilog2(qp_bitmap); if (nt->use_msi) { qp_count -= 1; - nt->msi_db_mask = 1 << qp_count; + nt->msi_db_mask = BIT_ULL(qp_count); ntb_db_clear_mask(ndev, nt->msi_db_mask); } diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c index 72bc1d017a46e..dfd175f79e8f0 100644 --- a/drivers/ntb/test/ntb_perf.c +++ b/drivers/ntb/test/ntb_perf.c @@ -839,10 +839,8 @@ static int perf_copy_chunk(struct perf_thread *pthr, dma_set_unmap(tx, unmap); ret = dma_submit_error(dmaengine_submit(tx)); - if (ret) { - dmaengine_unmap_put(unmap); + if (ret) goto err_free_resource; - } dmaengine_unmap_put(unmap); diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c index b1387dc459a32..328f5a103628f 100644 --- a/drivers/nvme/host/apple.c +++ b/drivers/nvme/host/apple.c @@ -599,7 +599,8 @@ static inline void apple_nvme_handle_cqe(struct apple_nvme_queue *q, } if (!nvme_try_complete_req(req, cqe->status, cqe->result) && - !blk_mq_add_to_batch(req, iob, nvme_req(req)->status, + !blk_mq_add_to_batch(req, iob, + nvme_req(req)->status != NVME_SC_SUCCESS, apple_nvme_complete_batch)) apple_nvme_complete_rq(req); } @@ -649,7 +650,7 @@ static bool apple_nvme_handle_cq(struct apple_nvme_queue *q, bool force) found = apple_nvme_poll_cq(q, &iob); - if (!rq_list_empty(iob.req_list)) + if (!rq_list_empty(&iob.req_list)) apple_nvme_complete_batch(&iob); return found; @@ -1518,6 +1519,7 @@ static struct apple_nvme *apple_nvme_alloc(struct platform_device *pdev) return anv; put_dev: + apple_nvme_detach_genpd(anv); put_device(anv->dev); return ERR_PTR(ret); } @@ -1551,6 +1553,7 @@ static int apple_nvme_probe(struct platform_device *pdev) nvme_uninit_ctrl(&anv->ctrl); out_put_ctrl: nvme_put_ctrl(&anv->ctrl); + apple_nvme_detach_genpd(anv); return ret; } diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 8da50df56b079..9bdf6fc53697c 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -429,6 +429,12 @@ static inline void nvme_end_req_zoned(struct request *req) static inline void __nvme_end_req(struct request *req) { + if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) { + if (blk_rq_is_passthrough(req)) + nvme_log_err_passthru(req); + else + nvme_log_error(req); + } nvme_end_req_zoned(req); nvme_trace_bio_complete(req); if (req->cmd_flags & REQ_NVME_MPATH) @@ -439,12 +445,6 @@ void nvme_end_req(struct request *req) { blk_status_t status = nvme_error_status(nvme_req(req)->status); - if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) { - if (blk_rq_is_passthrough(req)) - nvme_log_err_passthru(req); - else - nvme_log_error(req); - } __nvme_end_req(req); blk_mq_end_request(req, status); } @@ -562,8 +562,6 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, switch (new_state) { case NVME_CTRL_LIVE: switch (old_state) { - case NVME_CTRL_NEW: - case NVME_CTRL_RESETTING: case NVME_CTRL_CONNECTING: changed = true; fallthrough; diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 682234da2fabe..7c13a400071e6 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -786,49 +786,8 @@ nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl) "NVME-FC{%d}: controller connectivity lost. Awaiting " "Reconnect", ctrl->cnum); - switch (nvme_ctrl_state(&ctrl->ctrl)) { - case NVME_CTRL_NEW: - case NVME_CTRL_LIVE: - /* - * Schedule a controller reset. The reset will terminate the - * association and schedule the reconnect timer. Reconnects - * will be attempted until either the ctlr_loss_tmo - * (max_retries * connect_delay) expires or the remoteport's - * dev_loss_tmo expires. - */ - if (nvme_reset_ctrl(&ctrl->ctrl)) { - dev_warn(ctrl->ctrl.device, - "NVME-FC{%d}: Couldn't schedule reset.\n", - ctrl->cnum); - nvme_delete_ctrl(&ctrl->ctrl); - } - break; - - case NVME_CTRL_CONNECTING: - /* - * The association has already been terminated and the - * controller is attempting reconnects. No need to do anything - * futher. Reconnects will be attempted until either the - * ctlr_loss_tmo (max_retries * connect_delay) expires or the - * remoteport's dev_loss_tmo expires. - */ - break; - - case NVME_CTRL_RESETTING: - /* - * Controller is already in the process of terminating the - * association. No need to do anything further. The reconnect - * step will kick in naturally after the association is - * terminated. - */ - break; - - case NVME_CTRL_DELETING: - case NVME_CTRL_DELETING_NOIO: - default: - /* no action to take - let it delete */ - break; - } + set_bit(ASSOC_FAILED, &ctrl->flags); + nvme_reset_ctrl(&ctrl->ctrl); } /** @@ -2546,7 +2505,6 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg) */ if (state == NVME_CTRL_CONNECTING) { __nvme_fc_abort_outstanding_ios(ctrl, true); - set_bit(ASSOC_FAILED, &ctrl->flags); dev_warn(ctrl->ctrl.device, "NVME-FC{%d}: transport error during (re)connect\n", ctrl->cnum); @@ -3065,7 +3023,6 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) struct nvmefc_ls_rcv_op *disls = NULL; unsigned long flags; int ret; - bool changed; ++ctrl->ctrl.nr_reconnects; @@ -3176,12 +3133,13 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) if (ret) goto out_term_aen_ops; - changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); + if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE)) { + ret = -EIO; + goto out_term_aen_ops; + } ctrl->ctrl.nr_reconnects = 0; - - if (changed) - nvme_start_ctrl(&ctrl->ctrl); + nvme_start_ctrl(&ctrl->ctrl); return 0; /* Success */ @@ -3582,8 +3540,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list); spin_unlock_irqrestore(&rport->lock, flags); - if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) || - !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { + if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { dev_err(ctrl->ctrl.device, "NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum); goto fail_ctrl; diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index a96976b22fa79..a1b3c538a4bd2 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -120,19 +120,31 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer, struct nvme_ns *ns = q->queuedata; struct block_device *bdev = ns ? ns->disk->part0 : NULL; bool supports_metadata = bdev && blk_get_integrity(bdev->bd_disk); + struct nvme_ctrl *ctrl = nvme_req(req)->ctrl; bool has_metadata = meta_buffer && meta_len; struct bio *bio = NULL; int ret; - if (has_metadata && !supports_metadata) - return -EINVAL; + if (!nvme_ctrl_sgl_supported(ctrl)) + dev_warn_once(ctrl->device, "using unchecked data buffer\n"); + if (has_metadata) { + if (!supports_metadata) { + ret = -EINVAL; + goto out; + } + if (!nvme_ctrl_meta_sgl_supported(ctrl)) + dev_warn_once(ctrl->device, + "using unchecked metadata buffer\n"); + } if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) { struct iov_iter iter; /* fixedbufs is only for non-vectored io */ - if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC)) - return -EINVAL; + if (flags & NVME_IOCTL_VEC) { + ret = -EINVAL; + goto out; + } ret = io_uring_cmd_import_fixed(ubuffer, bufflen, rq_data_dir(req), &iter, ioucmd); if (ret < 0) @@ -276,8 +288,7 @@ static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl, { if (ns && nsid != ns->head->ns_id) { dev_err(ctrl->device, - "%s: nsid (%u) in cmd does not match nsid (%u)" - "of namespace\n", + "%s: nsid (%u) in cmd does not match nsid (%u) of namespace\n", current->comm, nsid, ns->head->ns_id); return false; } diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 61bba5513de05..dcdce7d12e441 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -1130,6 +1130,13 @@ static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl) return ctrl->sgls & ((1 << 0) | (1 << 1)); } +static inline bool nvme_ctrl_meta_sgl_supported(struct nvme_ctrl *ctrl) +{ + if (ctrl->ops->flags & NVME_F_FABRICS) + return true; + return ctrl->sgls & NVME_CTRL_SGLS_MSDS; +} + #ifdef CONFIG_NVME_HOST_AUTH int __init nvme_init_auth(void); void __exit nvme_exit_auth(void); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index cc74682dc0d4e..e70618e8d35eb 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -43,6 +43,7 @@ */ #define NVME_MAX_KB_SZ 8192 #define NVME_MAX_SEGS 128 +#define NVME_MAX_META_SEGS 15 #define NVME_MAX_NR_ALLOCATIONS 5 static int use_threaded_interrupts; @@ -143,6 +144,7 @@ struct nvme_dev { bool hmb; mempool_t *iod_mempool; + mempool_t *iod_meta_mempool; /* shadow doorbell buffer support: */ __le32 *dbbuf_dbs; @@ -238,6 +240,8 @@ struct nvme_iod { dma_addr_t first_dma; dma_addr_t meta_dma; struct sg_table sgt; + struct sg_table meta_sgt; + union nvme_descriptor meta_list; union nvme_descriptor list[NVME_MAX_NR_ALLOCATIONS]; }; @@ -505,6 +509,15 @@ static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx) spin_unlock(&nvmeq->sq_lock); } +static inline bool nvme_pci_metadata_use_sgls(struct nvme_dev *dev, + struct request *req) +{ + if (!nvme_ctrl_meta_sgl_supported(&dev->ctrl)) + return false; + return req->nr_integrity_segments > 1 || + nvme_req(req)->flags & NVME_REQ_USERCMD; +} + static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req, int nseg) { @@ -517,8 +530,10 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req, return false; if (!nvmeq->qid) return false; + if (nvme_pci_metadata_use_sgls(dev, req)) + return true; if (!sgl_threshold || avg_seg_size < sgl_threshold) - return false; + return nvme_req(req)->flags & NVME_REQ_USERCMD; return true; } @@ -779,7 +794,8 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, struct bio_vec bv = req_bvec(req); if (!is_pci_p2pdma_page(bv.bv_page)) { - if ((bv.bv_offset & (NVME_CTRL_PAGE_SIZE - 1)) + + if (!nvme_pci_metadata_use_sgls(dev, req) && + (bv.bv_offset & (NVME_CTRL_PAGE_SIZE - 1)) + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2) return nvme_setup_prp_simple(dev, req, &cmnd->rw, &bv); @@ -823,11 +839,69 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, return ret; } -static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req, - struct nvme_command *cmnd) +static blk_status_t nvme_pci_setup_meta_sgls(struct nvme_dev *dev, + struct request *req) +{ + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + struct nvme_rw_command *cmnd = &iod->cmd.rw; + struct nvme_sgl_desc *sg_list; + struct scatterlist *sgl, *sg; + unsigned int entries; + dma_addr_t sgl_dma; + int rc, i; + + iod->meta_sgt.sgl = mempool_alloc(dev->iod_meta_mempool, GFP_ATOMIC); + if (!iod->meta_sgt.sgl) + return BLK_STS_RESOURCE; + + sg_init_table(iod->meta_sgt.sgl, req->nr_integrity_segments); + iod->meta_sgt.orig_nents = blk_rq_map_integrity_sg(req, + iod->meta_sgt.sgl); + if (!iod->meta_sgt.orig_nents) + goto out_free_sg; + + rc = dma_map_sgtable(dev->dev, &iod->meta_sgt, rq_dma_dir(req), + DMA_ATTR_NO_WARN); + if (rc) + goto out_free_sg; + + sg_list = dma_pool_alloc(dev->prp_small_pool, GFP_ATOMIC, &sgl_dma); + if (!sg_list) + goto out_unmap_sg; + + entries = iod->meta_sgt.nents; + iod->meta_list.sg_list = sg_list; + iod->meta_dma = sgl_dma; + + cmnd->flags = NVME_CMD_SGL_METASEG; + cmnd->metadata = cpu_to_le64(sgl_dma); + + sgl = iod->meta_sgt.sgl; + if (entries == 1) { + nvme_pci_sgl_set_data(sg_list, sgl); + return BLK_STS_OK; + } + + sgl_dma += sizeof(*sg_list); + nvme_pci_sgl_set_seg(sg_list, sgl_dma, entries); + for_each_sg(sgl, sg, entries, i) + nvme_pci_sgl_set_data(&sg_list[i + 1], sg); + + return BLK_STS_OK; + +out_unmap_sg: + dma_unmap_sgtable(dev->dev, &iod->meta_sgt, rq_dma_dir(req), 0); +out_free_sg: + mempool_free(iod->meta_sgt.sgl, dev->iod_meta_mempool); + return BLK_STS_RESOURCE; +} + +static blk_status_t nvme_pci_setup_meta_mptr(struct nvme_dev *dev, + struct request *req) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); struct bio_vec bv = rq_integrity_vec(req); + struct nvme_command *cmnd = &iod->cmd; iod->meta_dma = dma_map_bvec(dev->dev, &bv, rq_dma_dir(req), 0); if (dma_mapping_error(dev->dev, iod->meta_dma)) @@ -836,6 +910,13 @@ static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req, return BLK_STS_OK; } +static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req) +{ + if (nvme_pci_metadata_use_sgls(dev, req)) + return nvme_pci_setup_meta_sgls(dev, req); + return nvme_pci_setup_meta_mptr(dev, req); +} + static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); @@ -844,6 +925,7 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req) iod->aborted = false; iod->nr_allocations = -1; iod->sgt.nents = 0; + iod->meta_sgt.nents = 0; ret = nvme_setup_cmd(req->q->queuedata, req); if (ret) @@ -856,7 +938,7 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req) } if (blk_integrity_rq(req)) { - ret = nvme_map_metadata(dev, req, &iod->cmd); + ret = nvme_map_metadata(dev, req); if (ret) goto out_unmap_data; } @@ -903,7 +985,7 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx, return BLK_STS_OK; } -static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct request **rqlist) +static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct rq_list *rqlist) { struct request *req; @@ -931,11 +1013,10 @@ static bool nvme_prep_rq_batch(struct nvme_queue *nvmeq, struct request *req) return nvme_prep_rq(nvmeq->dev, req) == BLK_STS_OK; } -static void nvme_queue_rqs(struct request **rqlist) +static void nvme_queue_rqs(struct rq_list *rqlist) { - struct request *submit_list = NULL; - struct request *requeue_list = NULL; - struct request **requeue_lastp = &requeue_list; + struct rq_list submit_list = { }; + struct rq_list requeue_list = { }; struct nvme_queue *nvmeq = NULL; struct request *req; @@ -945,9 +1026,9 @@ static void nvme_queue_rqs(struct request **rqlist) nvmeq = req->mq_hctx->driver_data; if (nvme_prep_rq_batch(nvmeq, req)) - rq_list_add(&submit_list, req); /* reverse order */ + rq_list_add_tail(&submit_list, req); else - rq_list_add_tail(&requeue_lastp, req); + rq_list_add_tail(&requeue_list, req); } if (nvmeq) @@ -955,17 +1036,31 @@ static void nvme_queue_rqs(struct request **rqlist) *rqlist = requeue_list; } +static __always_inline void nvme_unmap_metadata(struct nvme_dev *dev, + struct request *req) +{ + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + + if (!iod->meta_sgt.nents) { + dma_unmap_page(dev->dev, iod->meta_dma, + rq_integrity_vec(req).bv_len, + rq_dma_dir(req)); + return; + } + + dma_pool_free(dev->prp_small_pool, iod->meta_list.sg_list, + iod->meta_dma); + dma_unmap_sgtable(dev->dev, &iod->meta_sgt, rq_dma_dir(req), 0); + mempool_free(iod->meta_sgt.sgl, dev->iod_meta_mempool); +} + static __always_inline void nvme_pci_unmap_rq(struct request *req) { struct nvme_queue *nvmeq = req->mq_hctx->driver_data; struct nvme_dev *dev = nvmeq->dev; - if (blk_integrity_rq(req)) { - struct nvme_iod *iod = blk_mq_rq_to_pdu(req); - - dma_unmap_page(dev->dev, iod->meta_dma, - rq_integrity_vec(req).bv_len, rq_dma_dir(req)); - } + if (blk_integrity_rq(req)) + nvme_unmap_metadata(dev, req); if (blk_rq_nr_phys_segments(req)) nvme_unmap_data(dev, req); @@ -1035,8 +1130,9 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail); if (!nvme_try_complete_req(req, cqe->status, cqe->result) && - !blk_mq_add_to_batch(req, iob, nvme_req(req)->status, - nvme_pci_complete_batch)) + !blk_mq_add_to_batch(req, iob, + nvme_req(req)->status != NVME_SC_SUCCESS, + nvme_pci_complete_batch)) nvme_pci_complete_rq(req); } @@ -1079,7 +1175,7 @@ static irqreturn_t nvme_irq(int irq, void *data) DEFINE_IO_COMP_BATCH(iob); if (nvme_poll_cq(nvmeq, &iob)) { - if (!rq_list_empty(iob.req_list)) + if (!rq_list_empty(&iob.req_list)) nvme_pci_complete_batch(&iob); return IRQ_HANDLED; } @@ -1316,9 +1412,20 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req) struct nvme_dev *dev = nvmeq->dev; struct request *abort_req; struct nvme_command cmd = { }; + struct pci_dev *pdev = to_pci_dev(dev->dev); u32 csts = readl(dev->bar + NVME_REG_CSTS); u8 opcode; + /* + * Shutdown the device immediately if we see it is disconnected. This + * unblocks PCIe error handling if the nvme driver is waiting in + * error_resume for a device that has been removed. We can't unbind the + * driver while the driver's error callback is waiting to complete, so + * we're relying on a timeout to break that deadlock if a removal + * occurs while reset work is running. + */ + if (pci_dev_is_disconnected(pdev)) + nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); if (nvme_state_terminal(&dev->ctrl)) goto disable; @@ -1326,7 +1433,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req) * the recovery mechanism will surely fail. */ mb(); - if (pci_channel_offline(to_pci_dev(dev->dev))) + if (pci_channel_offline(pdev)) return BLK_EH_RESET_TIMER; /* @@ -1887,6 +1994,18 @@ static void nvme_map_cmb(struct nvme_dev *dev) if (offset > bar_size) return; + /* + * Controllers may support a CMB size larger than their BAR, for + * example, due to being behind a bridge. Reduce the CMB to the + * reported size of the BAR + */ + size = min(size, bar_size - offset); + + if (!IS_ALIGNED(size, memremap_compat_align()) || + !IS_ALIGNED(pci_resource_start(pdev, bar), + memremap_compat_align())) + return; + /* * Tell the controller about the host side address mapping the CMB, * and enable CMB decoding for the NVMe 1.4+ scheme: @@ -1897,17 +2016,10 @@ static void nvme_map_cmb(struct nvme_dev *dev) dev->bar + NVME_REG_CMBMSC); } - /* - * Controllers may support a CMB size larger than their BAR, - * for example, due to being behind a bridge. Reduce the CMB to - * the reported size of the BAR - */ - if (size > bar_size - offset) - size = bar_size - offset; - if (pci_p2pdma_add_resource(pdev, bar, size, offset)) { dev_warn(dev->ctrl.device, "failed to register the CMB\n"); + hi_lo_writeq(0, dev->bar + NVME_REG_CMBMSC); return; } @@ -2719,6 +2831,7 @@ static void nvme_release_prp_pools(struct nvme_dev *dev) static int nvme_pci_alloc_iod_mempool(struct nvme_dev *dev) { + size_t meta_size = sizeof(struct scatterlist) * (NVME_MAX_META_SEGS + 1); size_t alloc_size = sizeof(struct scatterlist) * NVME_MAX_SEGS; dev->iod_mempool = mempool_create_node(1, @@ -2727,7 +2840,18 @@ static int nvme_pci_alloc_iod_mempool(struct nvme_dev *dev) dev_to_node(dev->dev)); if (!dev->iod_mempool) return -ENOMEM; + + dev->iod_meta_mempool = mempool_create_node(1, + mempool_kmalloc, mempool_kfree, + (void *)meta_size, GFP_KERNEL, + dev_to_node(dev->dev)); + if (!dev->iod_meta_mempool) + goto free; + return 0; +free: + mempool_destroy(dev->iod_mempool); + return -ENOMEM; } static void nvme_free_tagset(struct nvme_dev *dev) @@ -2792,6 +2916,11 @@ static void nvme_reset_work(struct work_struct *work) if (result) goto out; + if (nvme_ctrl_meta_sgl_supported(&dev->ctrl)) + dev->ctrl.max_integrity_segments = NVME_MAX_META_SEGS; + else + dev->ctrl.max_integrity_segments = 1; + nvme_dbbuf_dma_alloc(dev); result = nvme_setup_host_mem(dev); @@ -3061,11 +3190,6 @@ static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev, dev->ctrl.max_hw_sectors = min_t(u32, NVME_MAX_KB_SZ << 1, dma_opt_mapping_size(&pdev->dev) >> 9); dev->ctrl.max_segments = NVME_MAX_SEGS; - - /* - * There is no support for SGLs for metadata (yet), so we are limited to - * a single integrity segment for the separate metadata pointer. - */ dev->ctrl.max_integrity_segments = 1; return dev; @@ -3128,6 +3252,11 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (result) goto out_disable; + if (nvme_ctrl_meta_sgl_supported(&dev->ctrl)) + dev->ctrl.max_integrity_segments = NVME_MAX_META_SEGS; + else + dev->ctrl.max_integrity_segments = 1; + nvme_dbbuf_dma_alloc(dev); result = nvme_setup_host_mem(dev); @@ -3170,6 +3299,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) nvme_free_queues(dev, 0); out_release_iod_mempool: mempool_destroy(dev->iod_mempool); + mempool_destroy(dev->iod_meta_mempool); out_release_prp_pools: nvme_release_prp_pools(dev); out_dev_unmap: @@ -3235,6 +3365,7 @@ static void nvme_remove(struct pci_dev *pdev) nvme_dbbuf_dma_free(dev); nvme_free_queues(dev, 0); mempool_destroy(dev->iod_mempool); + mempool_destroy(dev->iod_meta_mempool); nvme_release_prp_pools(dev); nvme_dev_unmap(dev); nvme_uninit_ctrl(&dev->ctrl); @@ -3554,6 +3685,8 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1cc1, 0x5350), /* ADATA XPG GAMMIX S50 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1dbe, 0x5216), /* Acer/INNOGRIT FA100/5216 NVMe SSD */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1dbe, 0x5236), /* ADATA XPG GAMMIX S70 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1e49, 0x0021), /* ZHITAI TiPro5000 NVMe SSD */ diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 8305d3c128074..854aa6a070ca8 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -217,6 +217,19 @@ static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue) return queue - queue->ctrl->queues; } +static inline bool nvme_tcp_recv_pdu_supported(enum nvme_tcp_pdu_type type) +{ + switch (type) { + case nvme_tcp_c2h_term: + case nvme_tcp_c2h_data: + case nvme_tcp_r2t: + case nvme_tcp_rsp: + return true; + default: + return false; + } +} + /* * Check if the queue is TLS encrypted */ @@ -763,6 +776,40 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue, return 0; } +static void nvme_tcp_handle_c2h_term(struct nvme_tcp_queue *queue, + struct nvme_tcp_term_pdu *pdu) +{ + u16 fes; + const char *msg; + u32 plen = le32_to_cpu(pdu->hdr.plen); + + static const char * const msg_table[] = { + [NVME_TCP_FES_INVALID_PDU_HDR] = "Invalid PDU Header Field", + [NVME_TCP_FES_PDU_SEQ_ERR] = "PDU Sequence Error", + [NVME_TCP_FES_HDR_DIGEST_ERR] = "Header Digest Error", + [NVME_TCP_FES_DATA_OUT_OF_RANGE] = "Data Transfer Out Of Range", + [NVME_TCP_FES_DATA_LIMIT_EXCEEDED] = "Data Transfer Limit Exceeded", + [NVME_TCP_FES_UNSUPPORTED_PARAM] = "Unsupported Parameter", + }; + + if (plen < NVME_TCP_MIN_C2HTERM_PLEN || + plen > NVME_TCP_MAX_C2HTERM_PLEN) { + dev_err(queue->ctrl->ctrl.device, + "Received a malformed C2HTermReq PDU (plen = %u)\n", + plen); + return; + } + + fes = le16_to_cpu(pdu->fes); + if (fes && fes < ARRAY_SIZE(msg_table)) + msg = msg_table[fes]; + else + msg = "Unknown"; + + dev_err(queue->ctrl->ctrl.device, + "Received C2HTermReq (FES = %s)\n", msg); +} + static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb, unsigned int *offset, size_t *len) { @@ -784,6 +831,25 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb, return 0; hdr = queue->pdu; + if (unlikely(hdr->hlen != sizeof(struct nvme_tcp_rsp_pdu))) { + if (!nvme_tcp_recv_pdu_supported(hdr->type)) + goto unsupported_pdu; + + dev_err(queue->ctrl->ctrl.device, + "pdu type %d has unexpected header length (%d)\n", + hdr->type, hdr->hlen); + return -EPROTO; + } + + if (unlikely(hdr->type == nvme_tcp_c2h_term)) { + /* + * C2HTermReq never includes Header or Data digests. + * Skip the checks. + */ + nvme_tcp_handle_c2h_term(queue, (void *)queue->pdu); + return -EINVAL; + } + if (queue->hdr_digest) { ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen); if (unlikely(ret)) @@ -807,10 +873,13 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb, nvme_tcp_init_recv_ctx(queue); return nvme_tcp_handle_r2t(queue, (void *)queue->pdu); default: - dev_err(queue->ctrl->ctrl.device, - "unsupported pdu type (%d)\n", hdr->type); - return -EINVAL; + goto unsupported_pdu; } + +unsupported_pdu: + dev_err(queue->ctrl->ctrl.device, + "unsupported pdu type (%d)\n", hdr->type); + return -EINVAL; } static inline void nvme_tcp_end_request(struct request *rq, u16 status) @@ -1449,8 +1518,11 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue) msg.msg_control = cbuf; msg.msg_controllen = sizeof(cbuf); } + msg.msg_flags = MSG_WAITALL; ret = kernel_recvmsg(queue->sock, &msg, &iov, 1, iov.iov_len, msg.msg_flags); + if (ret >= 0 && ret < sizeof(*icresp)) + ret = -ECONNRESET; if (ret < 0) { pr_warn("queue %d: failed to receive icresp, error %d\n", nvme_tcp_queue_id(queue), ret); @@ -1565,7 +1637,7 @@ static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue) ctrl->io_queues[HCTX_TYPE_POLL]; } -/** +/* * Track the number of queues assigned to each cpu using a global per-cpu * counter and select the least used cpu from the mq_map. Our goal is to spread * different controllers I/O threads across different cpu cores. @@ -2664,6 +2736,7 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) { struct nvme_tcp_queue *queue = hctx->driver_data; struct sock *sk = queue->sock->sk; + int ret; if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags)) return 0; @@ -2671,9 +2744,9 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) set_bit(NVME_TCP_Q_POLLING, &queue->flags); if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue)) sk_busy_loop(sk, true); - nvme_tcp_try_recv(queue); + ret = nvme_tcp_try_recv(queue); clear_bit(NVME_TCP_Q_POLLING, &queue->flags); - return queue->nr_cqe; + return ret < 0 ? ret : queue->nr_cqe; } static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size) diff --git a/drivers/nvme/target/debugfs.c b/drivers/nvme/target/debugfs.c index 220c7391fc19a..c6571fbd35e30 100644 --- a/drivers/nvme/target/debugfs.c +++ b/drivers/nvme/target/debugfs.c @@ -78,7 +78,7 @@ static int nvmet_ctrl_state_show(struct seq_file *m, void *p) bool sep = false; int i; - for (i = 0; i < 7; i++) { + for (i = 0; i < ARRAY_SIZE(csts_state_names); i++) { int state = BIT(i); if (!(ctrl->csts & state)) diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index 3ef4beacde325..7318b736d4141 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -172,20 +172,6 @@ struct nvmet_fc_tgt_assoc { struct work_struct del_work; }; - -static inline int -nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr) -{ - return (iodptr - iodptr->tgtport->iod); -} - -static inline int -nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr) -{ - return (fodptr - fodptr->queue->fod); -} - - /* * Association and Connection IDs: * diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index e1abb27927ff7..da195d61a9664 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c @@ -478,7 +478,7 @@ fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport, if (targetport) { tport = targetport->private; spin_lock(&tport->lock); - list_add_tail(&tport->ls_list, &tls_req->ls_list); + list_add_tail(&tls_req->ls_list, &tport->ls_list); spin_unlock(&tport->lock); queue_work(nvmet_wq, &tport->ls_work); } diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 1afd93026f9bf..2a4536ef61848 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -996,6 +996,27 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue, nvmet_req_complete(&cmd->req, status); } +static bool nvmet_rdma_recv_not_live(struct nvmet_rdma_queue *queue, + struct nvmet_rdma_rsp *rsp) +{ + unsigned long flags; + bool ret = true; + + spin_lock_irqsave(&queue->state_lock, flags); + /* + * recheck queue state is not live to prevent a race condition + * with RDMA_CM_EVENT_ESTABLISHED handler. + */ + if (queue->state == NVMET_RDMA_Q_LIVE) + ret = false; + else if (queue->state == NVMET_RDMA_Q_CONNECTING) + list_add_tail(&rsp->wait_list, &queue->rsp_wait_list); + else + nvmet_rdma_put_rsp(rsp); + spin_unlock_irqrestore(&queue->state_lock, flags); + return ret; +} + static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) { struct nvmet_rdma_cmd *cmd = @@ -1038,17 +1059,9 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) rsp->n_rdma = 0; rsp->invalidate_rkey = 0; - if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) { - unsigned long flags; - - spin_lock_irqsave(&queue->state_lock, flags); - if (queue->state == NVMET_RDMA_Q_CONNECTING) - list_add_tail(&rsp->wait_list, &queue->rsp_wait_list); - else - nvmet_rdma_put_rsp(rsp); - spin_unlock_irqrestore(&queue->state_lock, flags); + if (unlikely(queue->state != NVMET_RDMA_Q_LIVE) && + nvmet_rdma_recv_not_live(queue, rsp)) return; - } nvmet_rdma_handle_command(queue, rsp); } diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 7c51c2a8c109a..4f9cac8a5abe0 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -571,10 +571,16 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req) struct nvmet_tcp_cmd *cmd = container_of(req, struct nvmet_tcp_cmd, req); struct nvmet_tcp_queue *queue = cmd->queue; + enum nvmet_tcp_recv_state queue_state; + struct nvmet_tcp_cmd *queue_cmd; struct nvme_sgl_desc *sgl; u32 len; - if (unlikely(cmd == queue->cmd)) { + /* Pairs with store_release in nvmet_prepare_receive_pdu() */ + queue_state = smp_load_acquire(&queue->rcv_state); + queue_cmd = READ_ONCE(queue->cmd); + + if (unlikely(cmd == queue_cmd)) { sgl = &cmd->req.cmd->common.dptr.sgl; len = le32_to_cpu(sgl->length); @@ -583,7 +589,7 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req) * Avoid using helpers, this might happen before * nvmet_req_init is completed. */ - if (queue->rcv_state == NVMET_TCP_RECV_PDU && + if (queue_state == NVMET_TCP_RECV_PDU && len && len <= cmd->req.port->inline_data_size && nvme_is_write(cmd->req.cmd)) return; @@ -847,8 +853,9 @@ static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue) { queue->offset = 0; queue->left = sizeof(struct nvme_tcp_hdr); - queue->cmd = NULL; - queue->rcv_state = NVMET_TCP_RECV_PDU; + WRITE_ONCE(queue->cmd, NULL); + /* Ensure rcv_state is visible only after queue->cmd is set */ + smp_store_release(&queue->rcv_state, NVMET_TCP_RECV_PDU); } static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue) diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 1fb329c0a55b8..5fbfc4d4e06e4 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c @@ -16,6 +16,7 @@ #define pr_fmt(fmt) "OF: " fmt +#include #include #include #include @@ -38,11 +39,15 @@ unsigned int irq_of_parse_and_map(struct device_node *dev, int index) { struct of_phandle_args oirq; + unsigned int ret; if (of_irq_parse_one(dev, index, &oirq)) return 0; - return irq_create_of_mapping(&oirq); + ret = irq_create_of_mapping(&oirq); + of_node_put(oirq.np); + + return ret; } EXPORT_SYMBOL_GPL(irq_of_parse_and_map); @@ -165,6 +170,8 @@ const __be32 *of_irq_parse_imap_parent(const __be32 *imap, int len, struct of_ph * the specifier for each map, and then returns the translated map. * * Return: 0 on success and a negative number on error + * + * Note: refcount of node @out_irq->np is increased by 1 on success. */ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq) { @@ -310,6 +317,12 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq) addrsize = (imap - match_array) - intsize; if (ipar == newpar) { + /* + * We got @ipar's refcount, but the refcount was + * gotten again by of_irq_parse_imap_parent() via its + * alias @newpar. + */ + of_node_put(ipar); pr_debug("%pOF interrupt-map entry to self\n", ipar); return 0; } @@ -339,10 +352,12 @@ EXPORT_SYMBOL_GPL(of_irq_parse_raw); * This function resolves an interrupt for a node by walking the interrupt tree, * finding which interrupt controller node it is attached to, and returning the * interrupt specifier that can be used to retrieve a Linux IRQ number. + * + * Note: refcount of node @out_irq->np is increased by 1 on success. */ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_args *out_irq) { - struct device_node *p; + struct device_node __free(device_node) *p = NULL; const __be32 *addr; u32 intsize; int i, res, addr_len; @@ -367,41 +382,33 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar /* Try the new-style interrupts-extended first */ res = of_parse_phandle_with_args(device, "interrupts-extended", "#interrupt-cells", index, out_irq); - if (!res) - return of_irq_parse_raw(addr_buf, out_irq); - - /* Look for the interrupt parent. */ - p = of_irq_find_parent(device); - if (p == NULL) - return -EINVAL; + if (!res) { + p = out_irq->np; + } else { + /* Look for the interrupt parent. */ + p = of_irq_find_parent(device); + /* Get size of interrupt specifier */ + if (!p || of_property_read_u32(p, "#interrupt-cells", &intsize)) + return -EINVAL; + + pr_debug(" parent=%pOF, intsize=%d\n", p, intsize); + + /* Copy intspec into irq structure */ + out_irq->np = p; + out_irq->args_count = intsize; + for (i = 0; i < intsize; i++) { + res = of_property_read_u32_index(device, "interrupts", + (index * intsize) + i, + out_irq->args + i); + if (res) + return res; + } - /* Get size of interrupt specifier */ - if (of_property_read_u32(p, "#interrupt-cells", &intsize)) { - res = -EINVAL; - goto out; + pr_debug(" intspec=%d\n", *out_irq->args); } - pr_debug(" parent=%pOF, intsize=%d\n", p, intsize); - - /* Copy intspec into irq structure */ - out_irq->np = p; - out_irq->args_count = intsize; - for (i = 0; i < intsize; i++) { - res = of_property_read_u32_index(device, "interrupts", - (index * intsize) + i, - out_irq->args + i); - if (res) - goto out; - } - - pr_debug(" intspec=%d\n", *out_irq->args); - - /* Check if there are any interrupt-map translations to process */ - res = of_irq_parse_raw(addr_buf, out_irq); - out: - of_node_put(p); - return res; + return of_irq_parse_raw(addr_buf, out_irq); } EXPORT_SYMBOL_GPL(of_irq_parse_one); @@ -505,8 +512,10 @@ int of_irq_count(struct device_node *dev) struct of_phandle_args irq; int nr = 0; - while (of_irq_parse_one(dev, nr, &irq) == 0) + while (of_irq_parse_one(dev, nr, &irq) == 0) { + of_node_put(irq.np); nr++; + } return nr; } @@ -623,6 +632,8 @@ void __init of_irq_init(const struct of_device_id *matches) __func__, desc->dev, desc->dev, desc->interrupt_parent); of_node_clear_flag(desc->dev, OF_POPULATED); + of_node_put(desc->interrupt_parent); + of_node_put(desc->dev); kfree(desc); continue; } @@ -653,6 +664,7 @@ void __init of_irq_init(const struct of_device_id *matches) err: list_for_each_entry_safe(desc, temp_desc, &intc_desc_list, list) { list_del(&desc->list); + of_node_put(desc->interrupt_parent); of_node_put(desc->dev); kfree(desc); } diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c index e45d6d3a8dc67..45445a1600a96 100644 --- a/drivers/of/of_reserved_mem.c +++ b/drivers/of/of_reserved_mem.c @@ -360,12 +360,12 @@ static int __init __reserved_mem_alloc_size(unsigned long node, const char *unam prop = of_get_flat_dt_prop(node, "alignment", &len); if (prop) { - if (len != dt_root_size_cells * sizeof(__be32)) { + if (len != dt_root_addr_cells * sizeof(__be32)) { pr_err("invalid alignment property in '%s' node.\n", uname); return -EINVAL; } - align = dt_mem_next_cell(dt_root_size_cells, &prop); + align = dt_mem_next_cell(dt_root_addr_cells, &prop); } nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL; diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c index e091c3e55b5c6..bae829ac759e1 100644 --- a/drivers/pci/controller/cadence/pci-j721e.c +++ b/drivers/pci/controller/cadence/pci-j721e.c @@ -355,6 +355,7 @@ static const struct j721e_pcie_data j7200_pcie_rc_data = { static const struct j721e_pcie_data j7200_pcie_ep_data = { .mode = PCI_MODE_EP, .quirk_detect_quiet_flag = true, + .linkdown_irq_regfield = J7200_LINK_DOWN, .quirk_disable_flr = true, .max_lanes = 2, }; @@ -376,13 +377,13 @@ static const struct j721e_pcie_data j784s4_pcie_rc_data = { .mode = PCI_MODE_RC, .quirk_retrain_flag = true, .byte_access_allowed = false, - .linkdown_irq_regfield = LINK_DOWN, + .linkdown_irq_regfield = J7200_LINK_DOWN, .max_lanes = 4, }; static const struct j721e_pcie_data j784s4_pcie_ep_data = { .mode = PCI_MODE_EP, - .linkdown_irq_regfield = LINK_DOWN, + .linkdown_irq_regfield = J7200_LINK_DOWN, .max_lanes = 4, }; diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c index e0cc4560dfde7..0bf4cde34f517 100644 --- a/drivers/pci/controller/cadence/pcie-cadence-ep.c +++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c @@ -352,8 +352,7 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx, spin_unlock_irqrestore(&ep->lock, flags); offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) | - CDNS_PCIE_NORMAL_MSG_CODE(msg_code) | - CDNS_PCIE_MSG_NO_DATA; + CDNS_PCIE_NORMAL_MSG_CODE(msg_code); writel(0, ep->irq_cpu_addr + offset); } diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h index f5eeff834ec19..39ee9945c903e 100644 --- a/drivers/pci/controller/cadence/pcie-cadence.h +++ b/drivers/pci/controller/cadence/pcie-cadence.h @@ -246,7 +246,7 @@ struct cdns_pcie_rp_ib_bar { #define CDNS_PCIE_NORMAL_MSG_CODE_MASK GENMASK(15, 8) #define CDNS_PCIE_NORMAL_MSG_CODE(code) \ (((code) << 8) & CDNS_PCIE_NORMAL_MSG_CODE_MASK) -#define CDNS_PCIE_MSG_NO_DATA BIT(16) +#define CDNS_PCIE_MSG_DATA BIT(16) struct cdns_pcie; diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index b58e89ea566b8..dea19250598a6 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -755,6 +755,7 @@ int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep) if (ret) return ret; + ret = -ENOMEM; if (!ep->ib_window_map) { ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows, GFP_KERNEL); diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c index 7a11c618b9d9c..5538e5bf99fb6 100644 --- a/drivers/pci/controller/dwc/pcie-histb.c +++ b/drivers/pci/controller/dwc/pcie-histb.c @@ -409,16 +409,21 @@ static int histb_pcie_probe(struct platform_device *pdev) ret = histb_pcie_host_enable(pp); if (ret) { dev_err(dev, "failed to enable host\n"); - return ret; + goto err_exit_phy; } ret = dw_pcie_host_init(pp); if (ret) { dev_err(dev, "failed to initialize host\n"); - return ret; + goto err_exit_phy; } return 0; + +err_exit_phy: + phy_exit(hipcie->phy); + + return ret; } static void histb_pcie_remove(struct platform_device *pdev) @@ -427,8 +432,7 @@ static void histb_pcie_remove(struct platform_device *pdev) histb_pcie_host_disable(hipcie); - if (hipcie->phy) - phy_exit(hipcie->phy); + phy_exit(hipcie->phy); } static const struct of_device_id histb_pcie_of_match[] = { diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c index 9321280f6edba..792d24cea5747 100644 --- a/drivers/pci/controller/pcie-brcmstb.c +++ b/drivers/pci/controller/pcie-brcmstb.c @@ -403,10 +403,10 @@ static int brcm_pcie_set_ssc(struct brcm_pcie *pcie) static void brcm_pcie_set_gen(struct brcm_pcie *pcie, int gen) { u16 lnkctl2 = readw(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2); - u32 lnkcap = readl(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP); + u32 lnkcap = readl(pcie->base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY); lnkcap = (lnkcap & ~PCI_EXP_LNKCAP_SLS) | gen; - writel(lnkcap, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP); + writel(lnkcap, pcie->base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY); lnkctl2 = (lnkctl2 & ~0xf) | gen; writew(lnkctl2, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2); @@ -1276,6 +1276,10 @@ static int brcm_pcie_start_link(struct brcm_pcie *pcie) bool ssc_good = false; int ret, i; + /* Limit the generation if specified */ + if (pcie->gen) + brcm_pcie_set_gen(pcie, pcie->gen); + /* Unassert the fundamental reset */ ret = pcie->perst_set(pcie, 0); if (ret) @@ -1302,9 +1306,6 @@ static int brcm_pcie_start_link(struct brcm_pcie *pcie) brcm_config_clkreq(pcie); - if (pcie->gen) - brcm_pcie_set_gen(pcie, pcie->gen); - if (pcie->ssc) { ret = brcm_pcie_set_ssc(pcie); if (ret == 0) @@ -1367,7 +1368,8 @@ static int brcm_pcie_add_bus(struct pci_bus *bus) ret = regulator_bulk_get(dev, sr->num_supplies, sr->supplies); if (ret) { - dev_info(dev, "No regulators for downstream device\n"); + dev_info(dev, "Did not get regulators, err=%d\n", ret); + pcie->sr = NULL; goto no_regulators; } @@ -1390,7 +1392,7 @@ static void brcm_pcie_remove_bus(struct pci_bus *bus) struct subdev_regulators *sr = pcie->sr; struct device *dev = &bus->dev; - if (!sr) + if (!sr || !bus->parent || !pci_is_root_bus(bus->parent)) return; if (regulator_bulk_disable(sr->num_supplies, sr->supplies)) @@ -1784,7 +1786,7 @@ static struct pci_ops brcm7425_pcie_ops = { static int brcm_pcie_probe(struct platform_device *pdev) { - struct device_node *np = pdev->dev.of_node, *msi_np; + struct device_node *np = pdev->dev.of_node; struct pci_host_bridge *bridge; const struct pcie_cfg_data *data; struct brcm_pcie *pcie; @@ -1888,9 +1890,14 @@ static int brcm_pcie_probe(struct platform_device *pdev) goto fail; } - msi_np = of_parse_phandle(pcie->np, "msi-parent", 0); - if (pci_msi_enabled() && msi_np == pcie->np) { - ret = brcm_pcie_enable_msi(pcie); + if (pci_msi_enabled()) { + struct device_node *msi_np = of_parse_phandle(pcie->np, "msi-parent", 0); + + if (msi_np == pcie->np) + ret = brcm_pcie_enable_msi(pcie); + + of_node_put(msi_np); + if (ret) { dev_err(pcie->dev, "probe of internal MSI failed"); goto fail; diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c index cbec711148253..481dcc476c556 100644 --- a/drivers/pci/controller/pcie-rockchip-host.c +++ b/drivers/pci/controller/pcie-rockchip-host.c @@ -367,7 +367,7 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip) } } - rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID, + rockchip_pcie_write(rockchip, PCI_VENDOR_ID_ROCKCHIP, PCIE_CORE_CONFIG_VENDOR); rockchip_pcie_write(rockchip, PCI_CLASS_BRIDGE_PCI_NORMAL << 8, diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h index 15ee949f2485e..688f51d9bde63 100644 --- a/drivers/pci/controller/pcie-rockchip.h +++ b/drivers/pci/controller/pcie-rockchip.h @@ -188,7 +188,6 @@ #define AXI_WRAPPER_NOR_MSG 0xc #define PCIE_RC_SEND_PME_OFF 0x11960 -#define ROCKCHIP_VENDOR_ID 0x1d87 #define PCIE_LINK_IS_L2(x) \ (((x) & PCIE_CLIENT_DEBUG_LTSSM_MASK) == PCIE_CLIENT_DEBUG_LTSSM_L2) #define PCIE_LINK_UP(x) \ diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c index a0f5e1d67b04c..1594d9e9e637a 100644 --- a/drivers/pci/controller/pcie-xilinx-cpm.c +++ b/drivers/pci/controller/pcie-xilinx-cpm.c @@ -570,15 +570,17 @@ static int xilinx_cpm_pcie_probe(struct platform_device *pdev) return err; bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS); - if (!bus) - return -ENODEV; + if (!bus) { + err = -ENODEV; + goto err_free_irq_domains; + } port->variant = of_device_get_match_data(dev); err = xilinx_cpm_pcie_parse_dt(port, bus->res); if (err) { dev_err(dev, "Parsing DT failed\n"); - goto err_parse_dt; + goto err_free_irq_domains; } xilinx_cpm_pcie_init_port(port); @@ -602,7 +604,7 @@ static int xilinx_cpm_pcie_probe(struct platform_device *pdev) xilinx_cpm_free_interrupts(port); err_setup_irq: pci_ecam_free(port->cfg); -err_parse_dt: +err_free_irq_domains: xilinx_cpm_free_irq_domains(port); return err; } diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index 9d9596947350f..94ceec50a2b94 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -125,7 +125,7 @@ struct vmd_irq_list { struct vmd_dev { struct pci_dev *dev; - spinlock_t cfg_lock; + raw_spinlock_t cfg_lock; void __iomem *cfgbar; int msix_count; @@ -391,7 +391,7 @@ static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg, if (!addr) return -EFAULT; - spin_lock_irqsave(&vmd->cfg_lock, flags); + raw_spin_lock_irqsave(&vmd->cfg_lock, flags); switch (len) { case 1: *value = readb(addr); @@ -406,7 +406,7 @@ static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg, ret = -EINVAL; break; } - spin_unlock_irqrestore(&vmd->cfg_lock, flags); + raw_spin_unlock_irqrestore(&vmd->cfg_lock, flags); return ret; } @@ -426,7 +426,7 @@ static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg, if (!addr) return -EFAULT; - spin_lock_irqsave(&vmd->cfg_lock, flags); + raw_spin_lock_irqsave(&vmd->cfg_lock, flags); switch (len) { case 1: writeb(value, addr); @@ -444,7 +444,7 @@ static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg, ret = -EINVAL; break; } - spin_unlock_irqrestore(&vmd->cfg_lock, flags); + raw_spin_unlock_irqrestore(&vmd->cfg_lock, flags); return ret; } @@ -1009,7 +1009,7 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id) if (features & VMD_FEAT_OFFSET_FIRST_VECTOR) vmd->first_vec = 1; - spin_lock_init(&vmd->cfg_lock); + raw_spin_lock_init(&vmd->cfg_lock); pci_set_drvdata(dev, vmd); err = vmd_enable_domain(vmd, features); if (err) diff --git a/drivers/pci/devres.c b/drivers/pci/devres.c index b133967faef84..3f2691888c35d 100644 --- a/drivers/pci/devres.c +++ b/drivers/pci/devres.c @@ -40,7 +40,7 @@ * Legacy struct storing addresses to whole mapped BARs. */ struct pcim_iomap_devres { - void __iomem *table[PCI_STD_NUM_BARS]; + void __iomem *table[PCI_NUM_RESOURCES]; }; /* Used to restore the old INTx state on driver detach. */ @@ -411,46 +411,20 @@ static inline bool mask_contains_bar(int mask, int bar) return mask & BIT(bar); } -/* - * This is a copy of pci_intx() used to bypass the problem of recursive - * function calls due to the hybrid nature of pci_intx(). - */ -static void __pcim_intx(struct pci_dev *pdev, int enable) -{ - u16 pci_command, new; - - pci_read_config_word(pdev, PCI_COMMAND, &pci_command); - - if (enable) - new = pci_command & ~PCI_COMMAND_INTX_DISABLE; - else - new = pci_command | PCI_COMMAND_INTX_DISABLE; - - if (new != pci_command) - pci_write_config_word(pdev, PCI_COMMAND, new); -} - static void pcim_intx_restore(struct device *dev, void *data) { struct pci_dev *pdev = to_pci_dev(dev); struct pcim_intx_devres *res = data; - __pcim_intx(pdev, res->orig_intx); + pci_intx(pdev, res->orig_intx); } -static struct pcim_intx_devres *get_or_create_intx_devres(struct device *dev) +static void save_orig_intx(struct pci_dev *pdev, struct pcim_intx_devres *res) { - struct pcim_intx_devres *res; - - res = devres_find(dev, pcim_intx_restore, NULL, NULL); - if (res) - return res; + u16 pci_command; - res = devres_alloc(pcim_intx_restore, sizeof(*res), GFP_KERNEL); - if (res) - devres_add(dev, res); - - return res; + pci_read_config_word(pdev, PCI_COMMAND, &pci_command); + res->orig_intx = !(pci_command & PCI_COMMAND_INTX_DISABLE); } /** @@ -466,16 +440,28 @@ static struct pcim_intx_devres *get_or_create_intx_devres(struct device *dev) int pcim_intx(struct pci_dev *pdev, int enable) { struct pcim_intx_devres *res; + struct device *dev = &pdev->dev; - res = get_or_create_intx_devres(&pdev->dev); - if (!res) - return -ENOMEM; + /* + * pcim_intx() must only restore the INTx value that existed before the + * driver was loaded, i.e., before it called pcim_intx() for the + * first time. + */ + res = devres_find(dev, pcim_intx_restore, NULL, NULL); + if (!res) { + res = devres_alloc(pcim_intx_restore, sizeof(*res), GFP_KERNEL); + if (!res) + return -ENOMEM; + + save_orig_intx(pdev, res); + devres_add(dev, res); + } - res->orig_intx = !enable; - __pcim_intx(pdev, enable); + pci_intx(pdev, enable); return 0; } +EXPORT_SYMBOL_GPL(pcim_intx); static void pcim_disable_device(void *pdev_raw) { @@ -591,7 +577,7 @@ static int pcim_add_mapping_to_legacy_table(struct pci_dev *pdev, { void __iomem **legacy_iomap_table; - if (bar >= PCI_STD_NUM_BARS) + if (!pci_bar_index_is_valid(bar)) return -EINVAL; legacy_iomap_table = (void __iomem **)pcim_iomap_table(pdev); @@ -636,7 +622,7 @@ static void pcim_remove_bar_from_legacy_table(struct pci_dev *pdev, int bar) { void __iomem **legacy_iomap_table; - if (bar >= PCI_STD_NUM_BARS) + if (!pci_bar_index_is_valid(bar)) return; legacy_iomap_table = (void __iomem **)pcim_iomap_table(pdev); @@ -669,6 +655,9 @@ void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen) void __iomem *mapping; struct pcim_addr_devres *res; + if (!pci_bar_index_is_valid(bar)) + return NULL; + res = pcim_addr_devres_alloc(pdev); if (!res) return NULL; @@ -736,6 +725,9 @@ void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar, int ret; struct pcim_addr_devres *res; + if (!pci_bar_index_is_valid(bar)) + return IOMEM_ERR_PTR(-EINVAL); + res = pcim_addr_devres_alloc(pdev); if (!res) return IOMEM_ERR_PTR(-ENOMEM); @@ -836,6 +828,9 @@ static int _pcim_request_region(struct pci_dev *pdev, int bar, const char *name, int ret; struct pcim_addr_devres *res; + if (!pci_bar_index_is_valid(bar)) + return -EINVAL; + res = pcim_addr_devres_alloc(pdev); if (!res) return -ENOMEM; @@ -939,7 +934,7 @@ static void pcim_release_all_regions(struct pci_dev *pdev) * desired, release individual regions with pcim_release_region() or all of * them at once with pcim_release_all_regions(). */ -static int pcim_request_all_regions(struct pci_dev *pdev, const char *name) +int pcim_request_all_regions(struct pci_dev *pdev, const char *name) { int ret; int bar; @@ -957,6 +952,7 @@ static int pcim_request_all_regions(struct pci_dev *pdev, const char *name) return ret; } +EXPORT_SYMBOL(pcim_request_all_regions); /** * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones @@ -1056,6 +1052,9 @@ void __iomem *pcim_iomap_range(struct pci_dev *pdev, int bar, void __iomem *mapping; struct pcim_addr_devres *res; + if (!pci_bar_index_is_valid(bar)) + return IOMEM_ERR_PTR(-EINVAL); + res = pcim_addr_devres_alloc(pdev); if (!res) return IOMEM_ERR_PTR(-ENOMEM); diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index ff458e692fedb..997841c698935 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c @@ -286,9 +286,12 @@ static int pciehp_suspend(struct pcie_device *dev) static bool pciehp_device_replaced(struct controller *ctrl) { - struct pci_dev *pdev __free(pci_dev_put); + struct pci_dev *pdev __free(pci_dev_put) = NULL; u32 reg; + if (pci_dev_is_disconnected(ctrl->pcie->port)) + return false; + pdev = pci_get_slot(ctrl->pcie->port->subordinate, PCI_DEVFN(0, 0)); if (!pdev) return true; diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 736ad8baa2a55..8f3e4c7de961f 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -842,7 +842,9 @@ void pcie_enable_interrupt(struct controller *ctrl) { u16 mask; - mask = PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_DLLSCE; + mask = PCI_EXP_SLTCTL_DLLSCE; + if (!pciehp_poll_mode) + mask |= PCI_EXP_SLTCTL_HPIE; pcie_write_cmd(ctrl, mask, mask); } diff --git a/drivers/pci/iomap.c b/drivers/pci/iomap.c index 9fb7cacc15cde..fe706ed946dfd 100644 --- a/drivers/pci/iomap.c +++ b/drivers/pci/iomap.c @@ -9,6 +9,8 @@ #include +#include "pci.h" /* for pci_bar_index_is_valid() */ + /** * pci_iomap_range - create a virtual mapping cookie for a PCI BAR * @dev: PCI device that owns the BAR @@ -33,12 +35,19 @@ void __iomem *pci_iomap_range(struct pci_dev *dev, unsigned long offset, unsigned long maxlen) { - resource_size_t start = pci_resource_start(dev, bar); - resource_size_t len = pci_resource_len(dev, bar); - unsigned long flags = pci_resource_flags(dev, bar); + resource_size_t start, len; + unsigned long flags; + + if (!pci_bar_index_is_valid(bar)) + return NULL; + + start = pci_resource_start(dev, bar); + len = pci_resource_len(dev, bar); + flags = pci_resource_flags(dev, bar); if (len <= offset || !start) return NULL; + len -= offset; start += offset; if (maxlen && len > maxlen) @@ -77,16 +86,20 @@ void __iomem *pci_iomap_wc_range(struct pci_dev *dev, unsigned long offset, unsigned long maxlen) { - resource_size_t start = pci_resource_start(dev, bar); - resource_size_t len = pci_resource_len(dev, bar); - unsigned long flags = pci_resource_flags(dev, bar); + resource_size_t start, len; + unsigned long flags; - - if (flags & IORESOURCE_IO) + if (!pci_bar_index_is_valid(bar)) return NULL; + start = pci_resource_start(dev, bar); + len = pci_resource_len(dev, bar); + flags = pci_resource_flags(dev, bar); + if (len <= offset || !start) return NULL; + if (flags & IORESOURCE_IO) + return NULL; len -= offset; start += offset; diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 3e5a117f5b5d6..5af4a804a4f89 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -1444,7 +1444,7 @@ static ssize_t __resource_resize_store(struct device *dev, int n, return -EINVAL; device_lock(dev); - if (dev->driver) { + if (dev->driver || pci_num_vf(pdev)) { ret = -EBUSY; goto unlock; } @@ -1466,7 +1466,7 @@ static ssize_t __resource_resize_store(struct device *dev, int n, pci_remove_resource_files(pdev); - for (i = 0; i < PCI_STD_NUM_BARS; i++) { + for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) { if (pci_resource_len(pdev, i) && pci_resource_flags(pdev, i) == flags) pci_release_resource(pdev, i); diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index dd3c6dcb47ae4..25c07af1686b9 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -955,8 +955,10 @@ struct pci_acs { }; static void __pci_config_acs(struct pci_dev *dev, struct pci_acs *caps, - const char *p, u16 mask, u16 flags) + const char *p, const u16 acs_mask, const u16 acs_flags) { + u16 flags = acs_flags; + u16 mask = acs_mask; char *delimit; int ret = 0; @@ -964,7 +966,7 @@ static void __pci_config_acs(struct pci_dev *dev, struct pci_acs *caps, return; while (*p) { - if (!mask) { + if (!acs_mask) { /* Check for ACS flags */ delimit = strstr(p, "@"); if (delimit) { @@ -972,6 +974,8 @@ static void __pci_config_acs(struct pci_dev *dev, struct pci_acs *caps, u32 shift = 0; end = delimit - p - 1; + mask = 0; + flags = 0; while (end > -1) { if (*(p + end) == '0') { @@ -1028,10 +1032,14 @@ static void __pci_config_acs(struct pci_dev *dev, struct pci_acs *caps, pci_dbg(dev, "ACS mask = %#06x\n", mask); pci_dbg(dev, "ACS flags = %#06x\n", flags); + pci_dbg(dev, "ACS control = %#06x\n", caps->ctrl); + pci_dbg(dev, "ACS fw_ctrl = %#06x\n", caps->fw_ctrl); - /* If mask is 0 then we copy the bit from the firmware setting. */ - caps->ctrl = (caps->ctrl & ~mask) | (caps->fw_ctrl & mask); - caps->ctrl |= flags; + /* + * For mask bits that are 0, copy them from the firmware setting + * and apply flags for all the mask bits that are 1. + */ + caps->ctrl = (caps->fw_ctrl & ~mask) | (flags & mask); pci_info(dev, "Configured ACS to %#06x\n", caps->ctrl); } @@ -3914,6 +3922,9 @@ EXPORT_SYMBOL(pci_enable_atomic_ops_to_root); */ void pci_release_region(struct pci_dev *pdev, int bar) { + if (!pci_bar_index_is_valid(bar)) + return; + /* * This is done for backwards compatibility, because the old PCI devres * API had a mode in which the function became managed if it had been @@ -3959,6 +3970,9 @@ EXPORT_SYMBOL(pci_release_region); static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name, int exclusive) { + if (!pci_bar_index_is_valid(bar)) + return -EINVAL; + if (pci_is_managed(pdev)) { if (exclusive == IORESOURCE_EXCLUSIVE) return pcim_request_region_exclusive(pdev, bar, res_name); @@ -4486,11 +4500,6 @@ void pci_disable_parity(struct pci_dev *dev) * @enable: boolean: whether to enable or disable PCI INTx * * Enables/disables PCI INTx for device @pdev - * - * NOTE: - * This is a "hybrid" function: It's normally unmanaged, but becomes managed - * when pcim_enable_device() has been called in advance. This hybrid feature is - * DEPRECATED! If you want managed cleanup, use pcim_intx() instead. */ void pci_intx(struct pci_dev *pdev, int enable) { @@ -4503,15 +4512,10 @@ void pci_intx(struct pci_dev *pdev, int enable) else new = pci_command | PCI_COMMAND_INTX_DISABLE; - if (new != pci_command) { - /* Preserve the "hybrid" behavior for backwards compatibility */ - if (pci_is_managed(pdev)) { - WARN_ON_ONCE(pcim_intx(pdev, enable) != 0); - return; - } + if (new == pci_command) + return; - pci_write_config_word(pdev, PCI_COMMAND, new); - } + pci_write_config_word(pdev, PCI_COMMAND, new); } EXPORT_SYMBOL_GPL(pci_intx); diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 1cdc2c9547a7e..65df6d2ac0032 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -165,6 +165,22 @@ static inline void pci_wakeup_event(struct pci_dev *dev) pm_wakeup_event(&dev->dev, 100); } +/** + * pci_bar_index_is_valid - Check whether a BAR index is within valid range + * @bar: BAR index + * + * Protects against overflowing &struct pci_dev.resource array. + * + * Return: true for valid index, false otherwise. + */ +static inline bool pci_bar_index_is_valid(int bar) +{ + if (bar >= 0 && bar < PCI_NUM_RESOURCES) + return true; + + return false; +} + static inline bool pci_has_subordinate(struct pci_dev *pci_dev) { return !!(pci_dev->subordinate); diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index cee2365e54b8b..62650a2f00ccc 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -1242,16 +1242,16 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev) parent_link = link->parent; /* - * link->downstream is a pointer to the pci_dev of function 0. If - * we remove that function, the pci_dev is about to be deallocated, - * so we can't use link->downstream again. Free the link state to - * avoid this. + * Free the parent link state, no later than function 0 (i.e. + * link->downstream) being removed. * - * If we're removing a non-0 function, it's possible we could - * retain the link state, but PCIe r6.0, sec 7.5.3.7, recommends - * programming the same ASPM Control value for all functions of - * multi-function devices, so disable ASPM for all of them. + * Do not free the link state any earlier. If function 0 is a + * switch upstream port, this link state is parent_link to all + * subordinate ones. */ + if (pdev != link->downstream) + goto out; + pcie_config_aspm_link(link, 0); list_del(&link->sibling); free_link_state(link); @@ -1262,6 +1262,7 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev) pcie_config_aspm_path(parent_link); } + out: mutex_unlock(&aspm_lock); up_read(&pci_bus_sem); } diff --git a/drivers/pci/pcie/portdrv.c b/drivers/pci/pcie/portdrv.c index 6af5e04258728..604c055f60786 100644 --- a/drivers/pci/pcie/portdrv.c +++ b/drivers/pci/pcie/portdrv.c @@ -228,10 +228,12 @@ static int get_port_device_capability(struct pci_dev *dev) /* * Disable hot-plug interrupts in case they have been enabled - * by the BIOS and the hot-plug service driver is not loaded. + * by the BIOS and the hot-plug service driver won't be loaded + * to handle them. */ - pcie_capability_clear_word(dev, PCI_EXP_SLTCTL, - PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE); + if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)) + pcie_capability_clear_word(dev, PCI_EXP_SLTCTL, + PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE); } #ifdef CONFIG_PCIEAER diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index ebb0c1d5cae25..cf7c7886b6420 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -908,6 +908,7 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge) resource_size_t offset, next_offset; LIST_HEAD(resources); struct resource *res, *next_res; + bool bus_registered = false; char addr[64], *fmt; const char *name; int err; @@ -950,10 +951,9 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge) /* Temporarily move resources off the list */ list_splice_init(&bridge->windows, &resources); err = device_add(&bridge->dev); - if (err) { - put_device(&bridge->dev); + if (err) goto free; - } + bus->bridge = get_device(&bridge->dev); device_enable_async_suspend(bus->bridge); pci_set_bus_of_node(bus); @@ -972,6 +972,7 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge) name = dev_name(&bus->dev); err = device_register(&bus->dev); + bus_registered = true; if (err) goto unregister; @@ -1058,12 +1059,15 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge) unregister: put_device(&bridge->dev); device_del(&bridge->dev); - free: #ifdef CONFIG_PCI_DOMAINS_GENERIC pci_bus_release_domain_nr(parent, bus->domain_nr); #endif - kfree(bus); + if (bus_registered) + put_device(&bus->dev); + else + kfree(bus); + return err; } @@ -1172,7 +1176,10 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent, add_dev: pci_set_bus_msi_domain(child); ret = device_register(&child->dev); - WARN_ON(ret < 0); + if (WARN_ON(ret < 0)) { + put_device(&child->dev); + return NULL; + } pcibios_add_bus(child); @@ -1328,8 +1335,6 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev, pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT); - pci_enable_rrs_sv(dev); - if ((secondary || subordinate) && !pcibios_assign_all_busses() && !is_cardbus && !broken) { unsigned int cmax, buses; @@ -1570,6 +1575,11 @@ void set_pcie_port_type(struct pci_dev *pdev) pdev->pcie_cap = pos; pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); pdev->pcie_flags_reg = reg16; + + type = pci_pcie_type(pdev); + if (type == PCI_EXP_TYPE_ROOT_PORT) + pci_enable_rrs_sv(pdev); + pci_read_config_dword(pdev, pos + PCI_EXP_DEVCAP, &pdev->devcap); pdev->pcie_mpss = FIELD_GET(PCI_EXP_DEVCAP_PAYLOAD, pdev->devcap); @@ -1586,7 +1596,6 @@ void set_pcie_port_type(struct pci_dev *pdev) * correctly so detect impossible configurations here and correct * the port type accordingly. */ - type = pci_pcie_type(pdev); if (type == PCI_EXP_TYPE_DOWNSTREAM) { /* * If pdev claims to be downstream port but the parent diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 23082bc0ca37a..f16c7ce3bf3fc 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -1150,7 +1150,6 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, min_align = 1ULL << (max_order + __ffs(SZ_1M)); min_align = max(min_align, win_align); size0 = calculate_memsize(size, min_size, 0, 0, resource_size(b_res), win_align); - add_align = win_align; pci_info(bus->self, "bridge window %pR to %pR requires relaxed alignment rules\n", b_res, &bus->busn_res); } @@ -2105,8 +2104,7 @@ pci_root_bus_distribute_available_resources(struct pci_bus *bus, * in case of root bus. */ if (bridge && pci_bridge_resources_not_assigned(dev)) - pci_bridge_distribute_available_resources(bridge, - add_list); + pci_bridge_distribute_available_resources(dev, add_list); else pci_root_bus_distribute_available_resources(b, add_list); } diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 398cce3d76fc4..2f33e69a8caf2 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -342,12 +342,10 @@ armpmu_add(struct perf_event *event, int flags) if (idx < 0) return idx; - /* - * If there is an event in the counter we are going to use then make - * sure it is disabled. - */ + /* The newly-allocated counter should be empty */ + WARN_ON_ONCE(hw_events->events[idx]); + event->hw.idx = idx; - armpmu->disable(event); hw_events->events[idx] = event; hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; diff --git a/drivers/perf/dwc_pcie_pmu.c b/drivers/perf/dwc_pcie_pmu.c index 4ca50f9b6dfed..7dbda36884c8d 100644 --- a/drivers/perf/dwc_pcie_pmu.c +++ b/drivers/perf/dwc_pcie_pmu.c @@ -567,8 +567,10 @@ static int dwc_pcie_register_dev(struct pci_dev *pdev) return PTR_ERR(plat_dev); dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL); - if (!dev_info) + if (!dev_info) { + platform_device_unregister(plat_dev); return -ENOMEM; + } /* Cache platform device to handle pci device hotplug */ dev_info->plat_dev = plat_dev; @@ -724,6 +726,15 @@ static struct platform_driver dwc_pcie_pmu_driver = { .driver = {.name = "dwc_pcie_pmu",}, }; +static void dwc_pcie_cleanup_devices(void) +{ + struct dwc_pcie_dev_info *dev_info, *tmp; + + list_for_each_entry_safe(dev_info, tmp, &dwc_pcie_dev_info_head, dev_node) { + dwc_pcie_unregister_dev(dev_info); + } +} + static int __init dwc_pcie_pmu_init(void) { struct pci_dev *pdev = NULL; @@ -736,7 +747,7 @@ static int __init dwc_pcie_pmu_init(void) ret = dwc_pcie_register_dev(pdev); if (ret) { pci_dev_put(pdev); - return ret; + goto err_cleanup; } } @@ -745,35 +756,35 @@ static int __init dwc_pcie_pmu_init(void) dwc_pcie_pmu_online_cpu, dwc_pcie_pmu_offline_cpu); if (ret < 0) - return ret; + goto err_cleanup; dwc_pcie_pmu_hp_state = ret; ret = platform_driver_register(&dwc_pcie_pmu_driver); if (ret) - goto platform_driver_register_err; + goto err_remove_cpuhp; ret = bus_register_notifier(&pci_bus_type, &dwc_pcie_pmu_nb); if (ret) - goto platform_driver_register_err; + goto err_unregister_driver; notify = true; return 0; -platform_driver_register_err: +err_unregister_driver: + platform_driver_unregister(&dwc_pcie_pmu_driver); +err_remove_cpuhp: cpuhp_remove_multi_state(dwc_pcie_pmu_hp_state); - +err_cleanup: + dwc_pcie_cleanup_devices(); return ret; } static void __exit dwc_pcie_pmu_exit(void) { - struct dwc_pcie_dev_info *dev_info, *tmp; - if (notify) bus_unregister_notifier(&pci_bus_type, &dwc_pcie_pmu_nb); - list_for_each_entry_safe(dev_info, tmp, &dwc_pcie_dev_info_head, dev_node) - dwc_pcie_unregister_dev(dev_info); + dwc_pcie_cleanup_devices(); platform_driver_unregister(&dwc_pcie_pmu_driver); cpuhp_remove_multi_state(dwc_pcie_pmu_hp_state); } diff --git a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c index e98361dcdeadf..afd52392cd530 100644 --- a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c +++ b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c @@ -162,6 +162,16 @@ static int imx8_pcie_phy_power_on(struct phy *phy) return ret; } +static int imx8_pcie_phy_power_off(struct phy *phy) +{ + struct imx8_pcie_phy *imx8_phy = phy_get_drvdata(phy); + + reset_control_assert(imx8_phy->reset); + reset_control_assert(imx8_phy->perst); + + return 0; +} + static int imx8_pcie_phy_init(struct phy *phy) { struct imx8_pcie_phy *imx8_phy = phy_get_drvdata(phy); @@ -182,6 +192,7 @@ static const struct phy_ops imx8_pcie_phy_ops = { .init = imx8_pcie_phy_init, .exit = imx8_pcie_phy_exit, .power_on = imx8_pcie_phy_power_on, + .power_off = imx8_pcie_phy_power_off, .owner = THIS_MODULE, }; diff --git a/drivers/phy/rockchip/Kconfig b/drivers/phy/rockchip/Kconfig index 2f7a05f21dc59..dcb8e1628632e 100644 --- a/drivers/phy/rockchip/Kconfig +++ b/drivers/phy/rockchip/Kconfig @@ -125,6 +125,7 @@ config PHY_ROCKCHIP_USBDP depends on ARCH_ROCKCHIP && OF depends on TYPEC select GENERIC_PHY + select USB_COMMON help Enable this to support the Rockchip USB3.0/DP combo PHY with Samsung IP block. This is required for USB3 support on RK3588. diff --git a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c index 2eb3329ca23f6..1ef6d9630f7e0 100644 --- a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c +++ b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c @@ -309,7 +309,10 @@ static int rockchip_combphy_parse_dt(struct device *dev, struct rockchip_combphy priv->ext_refclk = device_property_present(dev, "rockchip,ext-refclk"); - priv->phy_rst = devm_reset_control_get(dev, "phy"); + priv->phy_rst = devm_reset_control_get_exclusive(dev, "phy"); + /* fallback to old behaviour */ + if (PTR_ERR(priv->phy_rst) == -ENOENT) + priv->phy_rst = devm_reset_control_array_get_exclusive(dev); if (IS_ERR(priv->phy_rst)) return dev_err_probe(dev, PTR_ERR(priv->phy_rst), "failed to get phy reset\n"); diff --git a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c index 69c3ec0938f74..be6f1ca9095aa 100644 --- a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c +++ b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c @@ -266,11 +266,22 @@ enum rk_hdptx_reset { RST_MAX }; +#define MAX_HDPTX_PHY_NUM 2 + +struct rk_hdptx_phy_cfg { + unsigned int num_phys; + unsigned int phy_ids[MAX_HDPTX_PHY_NUM]; +}; + struct rk_hdptx_phy { struct device *dev; struct regmap *regmap; struct regmap *grf; + /* PHY const config */ + const struct rk_hdptx_phy_cfg *cfgs; + int phy_id; + struct phy *phy; struct phy_config *phy_cfg; struct clk_bulk_data *clks; @@ -1019,15 +1030,14 @@ static int rk_hdptx_phy_clk_register(struct rk_hdptx_phy *hdptx) struct device *dev = hdptx->dev; const char *name, *pname; struct clk *refclk; - int ret, id; + int ret; refclk = devm_clk_get(dev, "ref"); if (IS_ERR(refclk)) return dev_err_probe(dev, PTR_ERR(refclk), "Failed to get ref clock\n"); - id = of_alias_get_id(dev->of_node, "hdptxphy"); - name = id > 0 ? "clk_hdmiphy_pixel1" : "clk_hdmiphy_pixel0"; + name = hdptx->phy_id > 0 ? "clk_hdmiphy_pixel1" : "clk_hdmiphy_pixel0"; pname = __clk_get_name(refclk); hdptx->hw.init = CLK_HW_INIT(name, pname, &hdptx_phy_clk_ops, @@ -1070,8 +1080,9 @@ static int rk_hdptx_phy_probe(struct platform_device *pdev) struct phy_provider *phy_provider; struct device *dev = &pdev->dev; struct rk_hdptx_phy *hdptx; + struct resource *res; void __iomem *regs; - int ret; + int ret, id; hdptx = devm_kzalloc(dev, sizeof(*hdptx), GFP_KERNEL); if (!hdptx) @@ -1079,11 +1090,27 @@ static int rk_hdptx_phy_probe(struct platform_device *pdev) hdptx->dev = dev; - regs = devm_platform_ioremap_resource(pdev, 0); + regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(regs)) return dev_err_probe(dev, PTR_ERR(regs), "Failed to ioremap resource\n"); + hdptx->cfgs = device_get_match_data(dev); + if (!hdptx->cfgs) + return dev_err_probe(dev, -EINVAL, "missing match data\n"); + + /* find the phy-id from the io address */ + hdptx->phy_id = -ENODEV; + for (id = 0; id < hdptx->cfgs->num_phys; id++) { + if (res->start == hdptx->cfgs->phy_ids[id]) { + hdptx->phy_id = id; + break; + } + } + + if (hdptx->phy_id < 0) + return dev_err_probe(dev, -ENODEV, "no matching device found\n"); + ret = devm_clk_bulk_get_all(dev, &hdptx->clks); if (ret < 0) return dev_err_probe(dev, ret, "Failed to get clocks\n"); @@ -1147,8 +1174,19 @@ static const struct dev_pm_ops rk_hdptx_phy_pm_ops = { rk_hdptx_phy_runtime_resume, NULL) }; +static const struct rk_hdptx_phy_cfg rk3588_hdptx_phy_cfgs = { + .num_phys = 2, + .phy_ids = { + 0xfed60000, + 0xfed70000, + }, +}; + static const struct of_device_id rk_hdptx_phy_of_match[] = { - { .compatible = "rockchip,rk3588-hdptx-phy", }, + { + .compatible = "rockchip,rk3588-hdptx-phy", + .data = &rk3588_hdptx_phy_cfgs + }, {} }; MODULE_DEVICE_TABLE(of, rk_hdptx_phy_of_match); diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c index c421b495eb0fe..46b8f6987c62c 100644 --- a/drivers/phy/samsung/phy-exynos5-usbdrd.c +++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c @@ -488,9 +488,9 @@ exynos5_usbdrd_pipe3_set_refclk(struct phy_usb_instance *inst) reg |= PHYCLKRST_REFCLKSEL_EXT_REFCLK; /* FSEL settings corresponding to reference clock */ - reg &= ~PHYCLKRST_FSEL_PIPE_MASK | - PHYCLKRST_MPLL_MULTIPLIER_MASK | - PHYCLKRST_SSC_REFCLKSEL_MASK; + reg &= ~(PHYCLKRST_FSEL_PIPE_MASK | + PHYCLKRST_MPLL_MULTIPLIER_MASK | + PHYCLKRST_SSC_REFCLKSEL_MASK); switch (phy_drd->extrefclk) { case EXYNOS5_FSEL_50MHZ: reg |= (PHYCLKRST_MPLL_MULTIPLIER_50M_REF | @@ -532,9 +532,9 @@ exynos5_usbdrd_utmi_set_refclk(struct phy_usb_instance *inst) reg &= ~PHYCLKRST_REFCLKSEL_MASK; reg |= PHYCLKRST_REFCLKSEL_EXT_REFCLK; - reg &= ~PHYCLKRST_FSEL_UTMI_MASK | - PHYCLKRST_MPLL_MULTIPLIER_MASK | - PHYCLKRST_SSC_REFCLKSEL_MASK; + reg &= ~(PHYCLKRST_FSEL_UTMI_MASK | + PHYCLKRST_MPLL_MULTIPLIER_MASK | + PHYCLKRST_SSC_REFCLKSEL_MASK); reg |= PHYCLKRST_FSEL(phy_drd->extrefclk); return reg; @@ -1296,14 +1296,17 @@ static int exynos5_usbdrd_gs101_phy_exit(struct phy *phy) struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst); int ret; + if (inst->phy_cfg->id == EXYNOS5_DRDPHY_UTMI) { + ret = exynos850_usbdrd_phy_exit(phy); + if (ret) + return ret; + } + + exynos5_usbdrd_phy_isol(inst, true); + if (inst->phy_cfg->id != EXYNOS5_DRDPHY_UTMI) return 0; - ret = exynos850_usbdrd_phy_exit(phy); - if (ret) - return ret; - - exynos5_usbdrd_phy_isol(inst, true); return regulator_bulk_disable(phy_drd->drv_data->n_regulators, phy_drd->regulators); } diff --git a/drivers/phy/tegra/xusb-tegra186.c b/drivers/phy/tegra/xusb-tegra186.c index 0f60d5d1c1678..fae6242aa730e 100644 --- a/drivers/phy/tegra/xusb-tegra186.c +++ b/drivers/phy/tegra/xusb-tegra186.c @@ -928,6 +928,7 @@ static int tegra186_utmi_phy_init(struct phy *phy) unsigned int index = lane->index; struct device *dev = padctl->dev; int err; + u32 reg; port = tegra_xusb_find_usb2_port(padctl, index); if (!port) { @@ -935,6 +936,16 @@ static int tegra186_utmi_phy_init(struct phy *phy) return -ENODEV; } + if (port->mode == USB_DR_MODE_OTG || + port->mode == USB_DR_MODE_PERIPHERAL) { + /* reset VBUS&ID OVERRIDE */ + reg = padctl_readl(padctl, USB2_VBUS_ID); + reg &= ~VBUS_OVERRIDE; + reg &= ~ID_OVERRIDE(~0); + reg |= ID_OVERRIDE_FLOATING; + padctl_writel(padctl, reg, USB2_VBUS_ID); + } + if (port->supply && port->mode == USB_DR_MODE_HOST) { err = regulator_enable(port->supply); if (err) { diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c index 103b266fec771..2c2256fe5a3b6 100644 --- a/drivers/phy/ti/phy-gmii-sel.c +++ b/drivers/phy/ti/phy-gmii-sel.c @@ -423,6 +423,12 @@ static int phy_gmii_sel_init_ports(struct phy_gmii_sel_priv *priv) return 0; } +static const struct regmap_config phy_gmii_sel_regmap_cfg = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, +}; + static int phy_gmii_sel_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -467,7 +473,14 @@ static int phy_gmii_sel_probe(struct platform_device *pdev) priv->regmap = syscon_node_to_regmap(node->parent); if (IS_ERR(priv->regmap)) { - priv->regmap = device_node_to_regmap(node); + void __iomem *base; + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return dev_err_probe(dev, PTR_ERR(base), + "failed to get base memory resource\n"); + + priv->regmap = regmap_init_mmio(dev, base, &phy_gmii_sel_regmap_cfg); if (IS_ERR(priv->regmap)) return dev_err_probe(dev, PTR_ERR(priv->regmap), "Failed to get syscon\n"); diff --git a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c index 73dbf29c002f3..cf6efa9c0364a 100644 --- a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c +++ b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c @@ -974,7 +974,7 @@ static const struct regmap_config bcm281xx_pinctrl_regmap_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, - .max_register = BCM281XX_PIN_VC_CAM3_SDA, + .max_register = BCM281XX_PIN_VC_CAM3_SDA * 4, }; static int bcm281xx_pinctrl_get_groups_count(struct pinctrl_dev *pctldev) diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index 928607a21d36d..f8abc69a39d16 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c @@ -1531,7 +1531,6 @@ static int intel_pinctrl_probe_pwm(struct intel_pinctrl *pctrl, .clk_rate = 19200000, .npwm = 1, .base_unit_bits = 22, - .bypass = true, }; struct pwm_chip *chip; diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c index 471f644c5eef2..f6a1e684a3864 100644 --- a/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c +++ b/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c @@ -1290,12 +1290,14 @@ static struct npcm8xx_func npcm8xx_funcs[] = { }; #define NPCM8XX_PINCFG(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q) \ - [a] { .fn0 = fn_ ## b, .reg0 = NPCM8XX_GCR_ ## c, .bit0 = d, \ + [a] = { \ + .flag = q, \ + .fn0 = fn_ ## b, .reg0 = NPCM8XX_GCR_ ## c, .bit0 = d, \ .fn1 = fn_ ## e, .reg1 = NPCM8XX_GCR_ ## f, .bit1 = g, \ .fn2 = fn_ ## h, .reg2 = NPCM8XX_GCR_ ## i, .bit2 = j, \ .fn3 = fn_ ## k, .reg3 = NPCM8XX_GCR_ ## l, .bit3 = m, \ .fn4 = fn_ ## n, .reg4 = NPCM8XX_GCR_ ## o, .bit4 = p, \ - .flag = q } + } /* Drive strength controlled by NPCM8XX_GP_N_ODSC */ #define DRIVE_STRENGTH_LO_SHIFT 8 @@ -2361,8 +2363,8 @@ static int npcm8xx_gpio_fw(struct npcm8xx_pinctrl *pctrl) return dev_err_probe(dev, ret, "gpio-ranges fail for GPIO bank %u\n", id); ret = fwnode_irq_get(child, 0); - if (!ret) - return dev_err_probe(dev, ret, "No IRQ for GPIO bank %u\n", id); + if (ret < 0) + return dev_err_probe(dev, ret, "Failed to retrieve IRQ for bank %u\n", id); pctrl->gpio_bank[id].irq = ret; pctrl->gpio_bank[id].irq_chip = npcmgpio_irqchip; @@ -2374,6 +2376,9 @@ static int npcm8xx_gpio_fw(struct npcm8xx_pinctrl *pctrl) pctrl->gpio_bank[id].gc.parent = dev; pctrl->gpio_bank[id].gc.fwnode = child; pctrl->gpio_bank[id].gc.label = devm_kasprintf(dev, GFP_KERNEL, "%pfw", child); + if (pctrl->gpio_bank[id].gc.label == NULL) + return -ENOMEM; + pctrl->gpio_bank[id].gc.dbg_show = npcmgpio_dbg_show; pctrl->gpio_bank[id].direction_input = pctrl->gpio_bank[id].gc.direction_input; pctrl->gpio_bank[id].gc.direction_input = npcmgpio_direction_input; diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index aeaf0d1958f56..a6bdff7a0bb25 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c @@ -1044,8 +1044,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type) const struct msm_pingroup *g; u32 intr_target_mask = GENMASK(2, 0); unsigned long flags; - bool was_enabled; - u32 val; + u32 val, oldval; if (msm_gpio_needs_dual_edge_parent_workaround(d, type)) { set_bit(d->hwirq, pctrl->dual_edge_irqs); @@ -1107,8 +1106,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type) * internal circuitry of TLMM, toggling the RAW_STATUS * could cause the INTR_STATUS to be set for EDGE interrupts. */ - val = msm_readl_intr_cfg(pctrl, g); - was_enabled = val & BIT(g->intr_raw_status_bit); + val = oldval = msm_readl_intr_cfg(pctrl, g); val |= BIT(g->intr_raw_status_bit); if (g->intr_detection_width == 2) { val &= ~(3 << g->intr_detection_bit); @@ -1161,9 +1159,11 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type) /* * The first time we set RAW_STATUS_EN it could trigger an interrupt. * Clear the interrupt. This is safe because we have - * IRQCHIP_SET_TYPE_MASKED. + * IRQCHIP_SET_TYPE_MASKED. When changing the interrupt type, we could + * also still have a non-matching interrupt latched, so clear whenever + * making changes to the interrupt configuration. */ - if (!was_enabled) + if (val != oldval) msm_ack_intr_status(pctrl, g); if (test_bit(d->hwirq, pctrl->dual_edge_irqs)) diff --git a/drivers/pinctrl/renesas/pinctrl-rza2.c b/drivers/pinctrl/renesas/pinctrl-rza2.c index af689d7c117f3..773eaf508565b 100644 --- a/drivers/pinctrl/renesas/pinctrl-rza2.c +++ b/drivers/pinctrl/renesas/pinctrl-rza2.c @@ -253,6 +253,8 @@ static int rza2_gpio_register(struct rza2_pinctrl_priv *priv) return ret; } + of_node_put(of_args.np); + if ((of_args.args[0] != 0) || (of_args.args[1] != 0) || (of_args.args[2] != priv->npins)) { diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c index 5081c7d8064fa..d90685cfe2e1a 100644 --- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c +++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c @@ -2583,6 +2583,8 @@ static int rzg2l_gpio_register(struct rzg2l_pinctrl *pctrl) if (ret) return dev_err_probe(pctrl->dev, ret, "Unable to parse gpio-ranges\n"); + of_node_put(of_args.np); + if (of_args.args[0] != 0 || of_args.args[1] != 0 || of_args.args[2] != pctrl->data->n_port_pins) return dev_err_probe(pctrl->dev, -EINVAL, @@ -3180,6 +3182,7 @@ static struct platform_driver rzg2l_pinctrl_driver = { .name = DRV_NAME, .of_match_table = of_match_ptr(rzg2l_pinctrl_of_table), .pm = pm_sleep_ptr(&rzg2l_pinctrl_pm_ops), + .suppress_bind_attrs = true, }, .probe = rzg2l_pinctrl_probe, }; diff --git a/drivers/pinctrl/renesas/pinctrl-rzv2m.c b/drivers/pinctrl/renesas/pinctrl-rzv2m.c index 4062c56619f59..8c7169db4fcce 100644 --- a/drivers/pinctrl/renesas/pinctrl-rzv2m.c +++ b/drivers/pinctrl/renesas/pinctrl-rzv2m.c @@ -940,6 +940,8 @@ static int rzv2m_gpio_register(struct rzv2m_pinctrl *pctrl) return ret; } + of_node_put(of_args.np); + if (of_args.args[0] != 0 || of_args.args[1] != 0 || of_args.args[2] != pctrl->data->n_port_pins) { dev_err(pctrl->dev, "gpio-ranges does not match selected SOC\n"); diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c index 5480e0884abec..23b4bc1e5da81 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c +++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c @@ -939,83 +939,83 @@ const struct samsung_pinctrl_of_match_data fsd_of_data __initconst = { /* pin banks of gs101 pin-controller (ALIVE) */ static const struct samsung_pin_bank_data gs101_pin_alive[] = { - EXYNOS850_PIN_BANK_EINTW(8, 0x0, "gpa0", 0x00), - EXYNOS850_PIN_BANK_EINTW(7, 0x20, "gpa1", 0x04), - EXYNOS850_PIN_BANK_EINTW(5, 0x40, "gpa2", 0x08), - EXYNOS850_PIN_BANK_EINTW(4, 0x60, "gpa3", 0x0c), - EXYNOS850_PIN_BANK_EINTW(4, 0x80, "gpa4", 0x10), - EXYNOS850_PIN_BANK_EINTW(7, 0xa0, "gpa5", 0x14), - EXYNOS850_PIN_BANK_EINTW(8, 0xc0, "gpa9", 0x18), - EXYNOS850_PIN_BANK_EINTW(2, 0xe0, "gpa10", 0x1c), + GS101_PIN_BANK_EINTW(8, 0x0, "gpa0", 0x00, 0x00), + GS101_PIN_BANK_EINTW(7, 0x20, "gpa1", 0x04, 0x08), + GS101_PIN_BANK_EINTW(5, 0x40, "gpa2", 0x08, 0x10), + GS101_PIN_BANK_EINTW(4, 0x60, "gpa3", 0x0c, 0x18), + GS101_PIN_BANK_EINTW(4, 0x80, "gpa4", 0x10, 0x1c), + GS101_PIN_BANK_EINTW(7, 0xa0, "gpa5", 0x14, 0x20), + GS101_PIN_BANK_EINTW(8, 0xc0, "gpa9", 0x18, 0x28), + GS101_PIN_BANK_EINTW(2, 0xe0, "gpa10", 0x1c, 0x30), }; /* pin banks of gs101 pin-controller (FAR_ALIVE) */ static const struct samsung_pin_bank_data gs101_pin_far_alive[] = { - EXYNOS850_PIN_BANK_EINTW(8, 0x0, "gpa6", 0x00), - EXYNOS850_PIN_BANK_EINTW(4, 0x20, "gpa7", 0x04), - EXYNOS850_PIN_BANK_EINTW(8, 0x40, "gpa8", 0x08), - EXYNOS850_PIN_BANK_EINTW(2, 0x60, "gpa11", 0x0c), + GS101_PIN_BANK_EINTW(8, 0x0, "gpa6", 0x00, 0x00), + GS101_PIN_BANK_EINTW(4, 0x20, "gpa7", 0x04, 0x08), + GS101_PIN_BANK_EINTW(8, 0x40, "gpa8", 0x08, 0x0c), + GS101_PIN_BANK_EINTW(2, 0x60, "gpa11", 0x0c, 0x14), }; /* pin banks of gs101 pin-controller (GSACORE) */ static const struct samsung_pin_bank_data gs101_pin_gsacore[] = { - EXYNOS850_PIN_BANK_EINTG(2, 0x0, "gps0", 0x00), - EXYNOS850_PIN_BANK_EINTG(8, 0x20, "gps1", 0x04), - EXYNOS850_PIN_BANK_EINTG(3, 0x40, "gps2", 0x08), + GS101_PIN_BANK_EINTG(2, 0x0, "gps0", 0x00, 0x00), + GS101_PIN_BANK_EINTG(8, 0x20, "gps1", 0x04, 0x04), + GS101_PIN_BANK_EINTG(3, 0x40, "gps2", 0x08, 0x0c), }; /* pin banks of gs101 pin-controller (GSACTRL) */ static const struct samsung_pin_bank_data gs101_pin_gsactrl[] = { - EXYNOS850_PIN_BANK_EINTW(6, 0x0, "gps3", 0x00), + GS101_PIN_BANK_EINTW(6, 0x0, "gps3", 0x00, 0x00), }; /* pin banks of gs101 pin-controller (PERIC0) */ static const struct samsung_pin_bank_data gs101_pin_peric0[] = { - EXYNOS850_PIN_BANK_EINTG(5, 0x0, "gpp0", 0x00), - EXYNOS850_PIN_BANK_EINTG(4, 0x20, "gpp1", 0x04), - EXYNOS850_PIN_BANK_EINTG(4, 0x40, "gpp2", 0x08), - EXYNOS850_PIN_BANK_EINTG(2, 0x60, "gpp3", 0x0c), - EXYNOS850_PIN_BANK_EINTG(4, 0x80, "gpp4", 0x10), - EXYNOS850_PIN_BANK_EINTG(2, 0xa0, "gpp5", 0x14), - EXYNOS850_PIN_BANK_EINTG(4, 0xc0, "gpp6", 0x18), - EXYNOS850_PIN_BANK_EINTG(2, 0xe0, "gpp7", 0x1c), - EXYNOS850_PIN_BANK_EINTG(4, 0x100, "gpp8", 0x20), - EXYNOS850_PIN_BANK_EINTG(2, 0x120, "gpp9", 0x24), - EXYNOS850_PIN_BANK_EINTG(4, 0x140, "gpp10", 0x28), - EXYNOS850_PIN_BANK_EINTG(2, 0x160, "gpp11", 0x2c), - EXYNOS850_PIN_BANK_EINTG(4, 0x180, "gpp12", 0x30), - EXYNOS850_PIN_BANK_EINTG(2, 0x1a0, "gpp13", 0x34), - EXYNOS850_PIN_BANK_EINTG(4, 0x1c0, "gpp14", 0x38), - EXYNOS850_PIN_BANK_EINTG(2, 0x1e0, "gpp15", 0x3c), - EXYNOS850_PIN_BANK_EINTG(4, 0x200, "gpp16", 0x40), - EXYNOS850_PIN_BANK_EINTG(2, 0x220, "gpp17", 0x44), - EXYNOS850_PIN_BANK_EINTG(4, 0x240, "gpp18", 0x48), - EXYNOS850_PIN_BANK_EINTG(4, 0x260, "gpp19", 0x4c), + GS101_PIN_BANK_EINTG(5, 0x0, "gpp0", 0x00, 0x00), + GS101_PIN_BANK_EINTG(4, 0x20, "gpp1", 0x04, 0x08), + GS101_PIN_BANK_EINTG(4, 0x40, "gpp2", 0x08, 0x0c), + GS101_PIN_BANK_EINTG(2, 0x60, "gpp3", 0x0c, 0x10), + GS101_PIN_BANK_EINTG(4, 0x80, "gpp4", 0x10, 0x14), + GS101_PIN_BANK_EINTG(2, 0xa0, "gpp5", 0x14, 0x18), + GS101_PIN_BANK_EINTG(4, 0xc0, "gpp6", 0x18, 0x1c), + GS101_PIN_BANK_EINTG(2, 0xe0, "gpp7", 0x1c, 0x20), + GS101_PIN_BANK_EINTG(4, 0x100, "gpp8", 0x20, 0x24), + GS101_PIN_BANK_EINTG(2, 0x120, "gpp9", 0x24, 0x28), + GS101_PIN_BANK_EINTG(4, 0x140, "gpp10", 0x28, 0x2c), + GS101_PIN_BANK_EINTG(2, 0x160, "gpp11", 0x2c, 0x30), + GS101_PIN_BANK_EINTG(4, 0x180, "gpp12", 0x30, 0x34), + GS101_PIN_BANK_EINTG(2, 0x1a0, "gpp13", 0x34, 0x38), + GS101_PIN_BANK_EINTG(4, 0x1c0, "gpp14", 0x38, 0x3c), + GS101_PIN_BANK_EINTG(2, 0x1e0, "gpp15", 0x3c, 0x40), + GS101_PIN_BANK_EINTG(4, 0x200, "gpp16", 0x40, 0x44), + GS101_PIN_BANK_EINTG(2, 0x220, "gpp17", 0x44, 0x48), + GS101_PIN_BANK_EINTG(4, 0x240, "gpp18", 0x48, 0x4c), + GS101_PIN_BANK_EINTG(4, 0x260, "gpp19", 0x4c, 0x50), }; /* pin banks of gs101 pin-controller (PERIC1) */ static const struct samsung_pin_bank_data gs101_pin_peric1[] = { - EXYNOS850_PIN_BANK_EINTG(8, 0x0, "gpp20", 0x00), - EXYNOS850_PIN_BANK_EINTG(4, 0x20, "gpp21", 0x04), - EXYNOS850_PIN_BANK_EINTG(2, 0x40, "gpp22", 0x08), - EXYNOS850_PIN_BANK_EINTG(8, 0x60, "gpp23", 0x0c), - EXYNOS850_PIN_BANK_EINTG(4, 0x80, "gpp24", 0x10), - EXYNOS850_PIN_BANK_EINTG(4, 0xa0, "gpp25", 0x14), - EXYNOS850_PIN_BANK_EINTG(5, 0xc0, "gpp26", 0x18), - EXYNOS850_PIN_BANK_EINTG(4, 0xe0, "gpp27", 0x1c), + GS101_PIN_BANK_EINTG(8, 0x0, "gpp20", 0x00, 0x00), + GS101_PIN_BANK_EINTG(4, 0x20, "gpp21", 0x04, 0x08), + GS101_PIN_BANK_EINTG(2, 0x40, "gpp22", 0x08, 0x0c), + GS101_PIN_BANK_EINTG(8, 0x60, "gpp23", 0x0c, 0x10), + GS101_PIN_BANK_EINTG(4, 0x80, "gpp24", 0x10, 0x18), + GS101_PIN_BANK_EINTG(4, 0xa0, "gpp25", 0x14, 0x1c), + GS101_PIN_BANK_EINTG(5, 0xc0, "gpp26", 0x18, 0x20), + GS101_PIN_BANK_EINTG(4, 0xe0, "gpp27", 0x1c, 0x28), }; /* pin banks of gs101 pin-controller (HSI1) */ static const struct samsung_pin_bank_data gs101_pin_hsi1[] = { - EXYNOS850_PIN_BANK_EINTG(6, 0x0, "gph0", 0x00), - EXYNOS850_PIN_BANK_EINTG(7, 0x20, "gph1", 0x04), + GS101_PIN_BANK_EINTG(6, 0x0, "gph0", 0x00, 0x00), + GS101_PIN_BANK_EINTG(7, 0x20, "gph1", 0x04, 0x08), }; /* pin banks of gs101 pin-controller (HSI2) */ static const struct samsung_pin_bank_data gs101_pin_hsi2[] = { - EXYNOS850_PIN_BANK_EINTG(6, 0x0, "gph2", 0x00), - EXYNOS850_PIN_BANK_EINTG(2, 0x20, "gph3", 0x04), - EXYNOS850_PIN_BANK_EINTG(6, 0x40, "gph4", 0x08), + GS101_PIN_BANK_EINTG(6, 0x0, "gph2", 0x00, 0x00), + GS101_PIN_BANK_EINTG(2, 0x20, "gph3", 0x04, 0x08), + GS101_PIN_BANK_EINTG(6, 0x40, "gph4", 0x08, 0x0c), }; static const struct samsung_pin_ctrl gs101_pin_ctrl[] __initconst = { diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.h b/drivers/pinctrl/samsung/pinctrl-exynos.h index 305cb1d31de49..97a43fa4dfc56 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos.h +++ b/drivers/pinctrl/samsung/pinctrl-exynos.h @@ -165,6 +165,28 @@ .name = id \ } +#define GS101_PIN_BANK_EINTG(pins, reg, id, offs, fltcon_offs) \ + { \ + .type = &exynos850_bank_type_off, \ + .pctl_offset = reg, \ + .nr_pins = pins, \ + .eint_type = EINT_TYPE_GPIO, \ + .eint_offset = offs, \ + .eint_fltcon_offset = fltcon_offs, \ + .name = id \ + } + +#define GS101_PIN_BANK_EINTW(pins, reg, id, offs, fltcon_offs) \ + { \ + .type = &exynos850_bank_type_alive, \ + .pctl_offset = reg, \ + .nr_pins = pins, \ + .eint_type = EINT_TYPE_WKUP, \ + .eint_offset = offs, \ + .eint_fltcon_offset = fltcon_offs, \ + .name = id \ + } + /** * struct exynos_weint_data: irq specific data for all the wakeup interrupts * generated by the external wakeup interrupt controller. diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c index c142cd7920307..63ac89a802d30 100644 --- a/drivers/pinctrl/samsung/pinctrl-samsung.c +++ b/drivers/pinctrl/samsung/pinctrl-samsung.c @@ -1230,6 +1230,7 @@ samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d, bank->eint_con_offset = bdata->eint_con_offset; bank->eint_mask_offset = bdata->eint_mask_offset; bank->eint_pend_offset = bdata->eint_pend_offset; + bank->eint_fltcon_offset = bdata->eint_fltcon_offset; bank->name = bdata->name; raw_spin_lock_init(&bank->slock); diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h index a1e7377bd890b..14c3b6b965851 100644 --- a/drivers/pinctrl/samsung/pinctrl-samsung.h +++ b/drivers/pinctrl/samsung/pinctrl-samsung.h @@ -144,6 +144,7 @@ struct samsung_pin_bank_type { * @eint_con_offset: ExynosAuto SoC-specific EINT control register offset of bank. * @eint_mask_offset: ExynosAuto SoC-specific EINT mask register offset of bank. * @eint_pend_offset: ExynosAuto SoC-specific EINT pend register offset of bank. + * @eint_fltcon_offset: GS101 SoC-specific EINT filter config register offset. * @name: name to be prefixed for each pin in this pin bank. */ struct samsung_pin_bank_data { @@ -158,6 +159,7 @@ struct samsung_pin_bank_data { u32 eint_con_offset; u32 eint_mask_offset; u32 eint_pend_offset; + u32 eint_fltcon_offset; const char *name; }; @@ -175,6 +177,7 @@ struct samsung_pin_bank_data { * @eint_con_offset: ExynosAuto SoC-specific EINT register or interrupt offset of bank. * @eint_mask_offset: ExynosAuto SoC-specific EINT mask register offset of bank. * @eint_pend_offset: ExynosAuto SoC-specific EINT pend register offset of bank. + * @eint_fltcon_offset: GS101 SoC-specific EINT filter config register offset. * @name: name to be prefixed for each pin in this pin bank. * @id: id of the bank, propagated to the pin range. * @pin_base: starting pin number of the bank. @@ -201,6 +204,7 @@ struct samsung_pin_bank { u32 eint_con_offset; u32 eint_mask_offset; u32 eint_pend_offset; + u32 eint_fltcon_offset; const char *name; u32 id; diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c index c83e5a65e6801..3b046450bd3ff 100644 --- a/drivers/pinctrl/tegra/pinctrl-tegra.c +++ b/drivers/pinctrl/tegra/pinctrl-tegra.c @@ -270,6 +270,9 @@ static int tegra_pinctrl_set_mux(struct pinctrl_dev *pctldev, val = pmx_readl(pmx, g->mux_bank, g->mux_reg); val &= ~(0x3 << g->mux_bit); val |= i << g->mux_bit; + /* Set the SFIO/GPIO selection to SFIO when under pinmux control*/ + if (pmx->soc->sfsel_in_mux) + val |= (1 << g->sfsel_bit); pmx_writel(pmx, val, g->mux_bank, g->mux_reg); return 0; diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c index 626e2635e3da7..ac198d1fd1707 100644 --- a/drivers/platform/chrome/cros_ec_lpc.c +++ b/drivers/platform/chrome/cros_ec_lpc.c @@ -30,6 +30,7 @@ #define DRV_NAME "cros_ec_lpcs" #define ACPI_DRV_NAME "GOOG0004" +#define FRMW_ACPI_DRV_NAME "FRMWC004" /* True if ACPI device is present */ static bool cros_ec_lpc_acpi_device_found; @@ -460,7 +461,7 @@ static int cros_ec_lpc_probe(struct platform_device *pdev) acpi_status status; struct cros_ec_device *ec_dev; struct cros_ec_lpc *ec_lpc; - struct lpc_driver_data *driver_data; + const struct lpc_driver_data *driver_data; u8 buf[2] = {}; int irq, ret; u32 quirks; @@ -472,6 +473,9 @@ static int cros_ec_lpc_probe(struct platform_device *pdev) ec_lpc->mmio_memory_base = EC_LPC_ADDR_MEMMAP; driver_data = platform_get_drvdata(pdev); + if (!driver_data) + driver_data = acpi_device_get_match_data(dev); + if (driver_data) { quirks = driver_data->quirks; @@ -625,12 +629,6 @@ static void cros_ec_lpc_remove(struct platform_device *pdev) cros_ec_unregister(ec_dev); } -static const struct acpi_device_id cros_ec_lpc_acpi_device_ids[] = { - { ACPI_DRV_NAME, 0 }, - { } -}; -MODULE_DEVICE_TABLE(acpi, cros_ec_lpc_acpi_device_ids); - static const struct lpc_driver_data framework_laptop_npcx_lpc_driver_data __initconst = { .quirks = CROS_EC_LPC_QUIRK_REMAP_MEMORY, .quirk_mmio_memory_base = 0xE00, @@ -642,6 +640,13 @@ static const struct lpc_driver_data framework_laptop_mec_lpc_driver_data __initc .quirk_aml_mutex_name = "ECMT", }; +static const struct acpi_device_id cros_ec_lpc_acpi_device_ids[] = { + { ACPI_DRV_NAME, 0 }, + { FRMW_ACPI_DRV_NAME, (kernel_ulong_t)&framework_laptop_npcx_lpc_driver_data }, + { } +}; +MODULE_DEVICE_TABLE(acpi, cros_ec_lpc_acpi_device_ids); + static const struct dmi_system_id cros_ec_lpc_dmi_table[] __initconst = { { /* @@ -795,7 +800,8 @@ static int __init cros_ec_lpc_init(void) int ret; const struct dmi_system_id *dmi_match; - cros_ec_lpc_acpi_device_found = !!cros_ec_lpc_get_device(ACPI_DRV_NAME); + cros_ec_lpc_acpi_device_found = !!cros_ec_lpc_get_device(ACPI_DRV_NAME) || + !!cros_ec_lpc_get_device(FRMW_ACPI_DRV_NAME); dmi_match = dmi_first_match(cros_ec_lpc_dmi_table); diff --git a/drivers/platform/cznic/Kconfig b/drivers/platform/cznic/Kconfig index 49c383eb67854..13e37b49d9d01 100644 --- a/drivers/platform/cznic/Kconfig +++ b/drivers/platform/cznic/Kconfig @@ -6,6 +6,7 @@ menuconfig CZNIC_PLATFORMS bool "Platform support for CZ.NIC's Turris hardware" + depends on ARCH_MVEBU || COMPILE_TEST help Say Y here to be able to choose driver support for CZ.NIC's Turris devices. This option alone does not add any kernel code. diff --git a/drivers/platform/x86/amd/pmf/auto-mode.c b/drivers/platform/x86/amd/pmf/auto-mode.c index 02ff68be10d01..a184922bba8d6 100644 --- a/drivers/platform/x86/amd/pmf/auto-mode.c +++ b/drivers/platform/x86/amd/pmf/auto-mode.c @@ -120,9 +120,9 @@ static void amd_pmf_set_automode(struct amd_pmf_dev *dev, int idx, amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false, pwr_ctrl->sppt_apu_only, NULL); amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false, pwr_ctrl->stt_min, NULL); amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false, - pwr_ctrl->stt_skin_temp[STT_TEMP_APU], NULL); + fixp_q88_fromint(pwr_ctrl->stt_skin_temp[STT_TEMP_APU]), NULL); amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false, - pwr_ctrl->stt_skin_temp[STT_TEMP_HS2], NULL); + fixp_q88_fromint(pwr_ctrl->stt_skin_temp[STT_TEMP_HS2]), NULL); if (is_apmf_func_supported(dev, APMF_FUNC_SET_FAN_IDX)) apmf_update_fan_idx(dev, config_store.mode_set[idx].fan_control.manual, diff --git a/drivers/platform/x86/amd/pmf/cnqf.c b/drivers/platform/x86/amd/pmf/cnqf.c index bc8899e15c914..207a0b33d8d36 100644 --- a/drivers/platform/x86/amd/pmf/cnqf.c +++ b/drivers/platform/x86/amd/pmf/cnqf.c @@ -81,10 +81,10 @@ static int amd_pmf_set_cnqf(struct amd_pmf_dev *dev, int src, int idx, amd_pmf_send_cmd(dev, SET_SPPT, false, pc->sppt, NULL); amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false, pc->sppt_apu_only, NULL); amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false, pc->stt_min, NULL); - amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false, pc->stt_skin_temp[STT_TEMP_APU], - NULL); - amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false, pc->stt_skin_temp[STT_TEMP_HS2], - NULL); + amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false, + fixp_q88_fromint(pc->stt_skin_temp[STT_TEMP_APU]), NULL); + amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false, + fixp_q88_fromint(pc->stt_skin_temp[STT_TEMP_HS2]), NULL); if (is_apmf_func_supported(dev, APMF_FUNC_SET_FAN_IDX)) apmf_update_fan_idx(dev, diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c index 347bb43a5f2b7..719caa2a00f05 100644 --- a/drivers/platform/x86/amd/pmf/core.c +++ b/drivers/platform/x86/amd/pmf/core.c @@ -176,6 +176,20 @@ static void __maybe_unused amd_pmf_dump_registers(struct amd_pmf_dev *dev) dev_dbg(dev->dev, "AMD_PMF_REGISTER_MESSAGE:%x\n", value); } +/** + * fixp_q88_fromint: Convert integer to Q8.8 + * @val: input value + * + * Converts an integer into binary fixed point format where 8 bits + * are used for integer and 8 bits are used for the decimal. + * + * Return: unsigned integer converted to Q8.8 format + */ +u32 fixp_q88_fromint(u32 val) +{ + return val << 8; +} + int amd_pmf_send_cmd(struct amd_pmf_dev *dev, u8 message, bool get, u32 arg, u32 *data) { int rc; diff --git a/drivers/platform/x86/amd/pmf/pmf.h b/drivers/platform/x86/amd/pmf/pmf.h index 8ce8816da9c16..34ba0309a33a2 100644 --- a/drivers/platform/x86/amd/pmf/pmf.h +++ b/drivers/platform/x86/amd/pmf/pmf.h @@ -105,9 +105,12 @@ struct cookie_header { #define PMF_TA_IF_VERSION_MAJOR 1 #define TA_PMF_ACTION_MAX 32 #define TA_PMF_UNDO_MAX 8 -#define TA_OUTPUT_RESERVED_MEM 906 +#define TA_OUTPUT_RESERVED_MEM 922 #define MAX_OPERATION_PARAMS 4 +#define TA_ERROR_CRYPTO_INVALID_PARAM 0x20002 +#define TA_ERROR_CRYPTO_BIN_TOO_LARGE 0x2000d + #define PMF_IF_V1 1 #define PMF_IF_V2 2 @@ -743,6 +746,7 @@ int apmf_install_handler(struct amd_pmf_dev *pmf_dev); int apmf_os_power_slider_update(struct amd_pmf_dev *dev, u8 flag); int amd_pmf_set_dram_addr(struct amd_pmf_dev *dev, bool alloc_buffer); int amd_pmf_notify_sbios_heartbeat_event_v2(struct amd_pmf_dev *dev, u8 flag); +u32 fixp_q88_fromint(u32 val); /* SPS Layer */ int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf); diff --git a/drivers/platform/x86/amd/pmf/sps.c b/drivers/platform/x86/amd/pmf/sps.c index 92f7fb22277dc..3a24209f7df03 100644 --- a/drivers/platform/x86/amd/pmf/sps.c +++ b/drivers/platform/x86/amd/pmf/sps.c @@ -198,9 +198,11 @@ static void amd_pmf_update_slider_v2(struct amd_pmf_dev *dev, int idx) amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false, apts_config_store.val[idx].stt_min_limit, NULL); amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false, - apts_config_store.val[idx].stt_skin_temp_limit_apu, NULL); + fixp_q88_fromint(apts_config_store.val[idx].stt_skin_temp_limit_apu), + NULL); amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false, - apts_config_store.val[idx].stt_skin_temp_limit_hs2, NULL); + fixp_q88_fromint(apts_config_store.val[idx].stt_skin_temp_limit_hs2), + NULL); } void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx, @@ -217,9 +219,11 @@ void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx, amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false, config_store.prop[src][idx].stt_min, NULL); amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false, - config_store.prop[src][idx].stt_skin_temp[STT_TEMP_APU], NULL); + fixp_q88_fromint(config_store.prop[src][idx].stt_skin_temp[STT_TEMP_APU]), + NULL); amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false, - config_store.prop[src][idx].stt_skin_temp[STT_TEMP_HS2], NULL); + fixp_q88_fromint(config_store.prop[src][idx].stt_skin_temp[STT_TEMP_HS2]), + NULL); } else if (op == SLIDER_OP_GET) { amd_pmf_send_cmd(dev, GET_SPL, true, ARG_NONE, &table->prop[src][idx].spl); amd_pmf_send_cmd(dev, GET_FPPT, true, ARG_NONE, &table->prop[src][idx].fppt); diff --git a/drivers/platform/x86/amd/pmf/tee-if.c b/drivers/platform/x86/amd/pmf/tee-if.c index 19c27b6e46663..cb5abab2210a7 100644 --- a/drivers/platform/x86/amd/pmf/tee-if.c +++ b/drivers/platform/x86/amd/pmf/tee-if.c @@ -27,8 +27,11 @@ module_param(pb_side_load, bool, 0444); MODULE_PARM_DESC(pb_side_load, "Sideload policy binaries debug policy failures"); #endif -static const uuid_t amd_pmf_ta_uuid = UUID_INIT(0x6fd93b77, 0x3fb8, 0x524d, - 0xb1, 0x2d, 0xc5, 0x29, 0xb1, 0x3d, 0x85, 0x43); +static const uuid_t amd_pmf_ta_uuid[] = { UUID_INIT(0xd9b39bf2, 0x66bd, 0x4154, 0xaf, 0xb8, 0x8a, + 0xcc, 0x2b, 0x2b, 0x60, 0xd6), + UUID_INIT(0x6fd93b77, 0x3fb8, 0x524d, 0xb1, 0x2d, 0xc5, + 0x29, 0xb1, 0x3d, 0x85, 0x43), + }; static const char *amd_pmf_uevent_as_str(unsigned int state) { @@ -120,7 +123,8 @@ static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_ case PMF_POLICY_STT_SKINTEMP_APU: if (dev->prev_data->stt_skintemp_apu != val) { - amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false, val, NULL); + amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false, + fixp_q88_fromint(val), NULL); dev_dbg(dev->dev, "update STT_SKINTEMP_APU: %u\n", val); dev->prev_data->stt_skintemp_apu = val; } @@ -128,7 +132,8 @@ static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_ case PMF_POLICY_STT_SKINTEMP_HS2: if (dev->prev_data->stt_skintemp_hs2 != val) { - amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false, val, NULL); + amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false, + fixp_q88_fromint(val), NULL); dev_dbg(dev->dev, "update STT_SKINTEMP_HS2: %u\n", val); dev->prev_data->stt_skintemp_hs2 = val; } @@ -321,9 +326,9 @@ static int amd_pmf_start_policy_engine(struct amd_pmf_dev *dev) */ schedule_delayed_work(&dev->pb_work, msecs_to_jiffies(pb_actions_ms * 3)); } else { - dev_err(dev->dev, "ta invoke cmd init failed err: %x\n", res); + dev_dbg(dev->dev, "ta invoke cmd init failed err: %x\n", res); dev->smart_pc_enabled = false; - return -EIO; + return res; } return 0; @@ -390,12 +395,12 @@ static int amd_pmf_amdtee_ta_match(struct tee_ioctl_version_data *ver, const voi return ver->impl_id == TEE_IMPL_ID_AMDTEE; } -static int amd_pmf_ta_open_session(struct tee_context *ctx, u32 *id) +static int amd_pmf_ta_open_session(struct tee_context *ctx, u32 *id, const uuid_t *uuid) { struct tee_ioctl_open_session_arg sess_arg = {}; int rc; - export_uuid(sess_arg.uuid, &amd_pmf_ta_uuid); + export_uuid(sess_arg.uuid, uuid); sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC; sess_arg.num_params = 0; @@ -434,7 +439,7 @@ static int amd_pmf_register_input_device(struct amd_pmf_dev *dev) return 0; } -static int amd_pmf_tee_init(struct amd_pmf_dev *dev) +static int amd_pmf_tee_init(struct amd_pmf_dev *dev, const uuid_t *uuid) { u32 size; int ret; @@ -445,7 +450,7 @@ static int amd_pmf_tee_init(struct amd_pmf_dev *dev) return PTR_ERR(dev->tee_ctx); } - ret = amd_pmf_ta_open_session(dev->tee_ctx, &dev->session_id); + ret = amd_pmf_ta_open_session(dev->tee_ctx, &dev->session_id, uuid); if (ret) { dev_err(dev->dev, "Failed to open TA session (%d)\n", ret); ret = -EINVAL; @@ -489,7 +494,8 @@ static void amd_pmf_tee_deinit(struct amd_pmf_dev *dev) int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev) { - int ret; + bool status; + int ret, i; ret = apmf_check_smart_pc(dev); if (ret) { @@ -502,26 +508,22 @@ int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev) return -ENODEV; } - ret = amd_pmf_tee_init(dev); - if (ret) - return ret; - INIT_DELAYED_WORK(&dev->pb_work, amd_pmf_invoke_cmd); ret = amd_pmf_set_dram_addr(dev, true); if (ret) - goto error; + goto err_cancel_work; dev->policy_base = devm_ioremap(dev->dev, dev->policy_addr, dev->policy_sz); if (!dev->policy_base) { ret = -ENOMEM; - goto error; + goto err_free_dram_buf; } dev->policy_buf = kzalloc(dev->policy_sz, GFP_KERNEL); if (!dev->policy_buf) { ret = -ENOMEM; - goto error; + goto err_free_dram_buf; } memcpy_fromio(dev->policy_buf, dev->policy_base, dev->policy_sz); @@ -531,24 +533,60 @@ int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev) dev->prev_data = kzalloc(sizeof(*dev->prev_data), GFP_KERNEL); if (!dev->prev_data) { ret = -ENOMEM; - goto error; + goto err_free_policy; } - ret = amd_pmf_start_policy_engine(dev); - if (ret) - goto error; + for (i = 0; i < ARRAY_SIZE(amd_pmf_ta_uuid); i++) { + ret = amd_pmf_tee_init(dev, &amd_pmf_ta_uuid[i]); + if (ret) + goto err_free_prev_data; + + ret = amd_pmf_start_policy_engine(dev); + switch (ret) { + case TA_PMF_TYPE_SUCCESS: + status = true; + break; + case TA_ERROR_CRYPTO_INVALID_PARAM: + case TA_ERROR_CRYPTO_BIN_TOO_LARGE: + amd_pmf_tee_deinit(dev); + status = false; + break; + default: + ret = -EINVAL; + amd_pmf_tee_deinit(dev); + goto err_free_prev_data; + } + + if (status) + break; + } + + if (!status && !pb_side_load) { + ret = -EINVAL; + goto err_free_prev_data; + } if (pb_side_load) amd_pmf_open_pb(dev, dev->dbgfs_dir); ret = amd_pmf_register_input_device(dev); if (ret) - goto error; + goto err_pmf_remove_pb; return 0; -error: - amd_pmf_deinit_smart_pc(dev); +err_pmf_remove_pb: + if (pb_side_load && dev->esbin) + amd_pmf_remove_pb(dev); + amd_pmf_tee_deinit(dev); +err_free_prev_data: + kfree(dev->prev_data); +err_free_policy: + kfree(dev->policy_buf); +err_free_dram_buf: + kfree(dev->buf); +err_cancel_work: + cancel_delayed_work_sync(&dev->pb_work); return ret; } diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c index 9d7e6b712abf1..8d2e6d8be9e54 100644 --- a/drivers/platform/x86/asus-laptop.c +++ b/drivers/platform/x86/asus-laptop.c @@ -426,11 +426,14 @@ static int asus_pega_lucid_set(struct asus_laptop *asus, int unit, bool enable) static int pega_acc_axis(struct asus_laptop *asus, int curr, char *method) { + unsigned long long val = (unsigned long long)curr; + acpi_status status; int i, delta; - unsigned long long val; - for (i = 0; i < PEGA_ACC_RETRIES; i++) { - acpi_evaluate_integer(asus->handle, method, NULL, &val); + for (i = 0; i < PEGA_ACC_RETRIES; i++) { + status = acpi_evaluate_integer(asus->handle, method, NULL, &val); + if (ACPI_FAILURE(status)) + continue; /* The output is noisy. From reading the ASL * dissassembly, timeout errors are returned with 1's * in the high word, and the lack of locking around diff --git a/drivers/platform/x86/dell/dell-uart-backlight.c b/drivers/platform/x86/dell/dell-uart-backlight.c index c45bc332af7a0..e4868584cde2d 100644 --- a/drivers/platform/x86/dell/dell-uart-backlight.c +++ b/drivers/platform/x86/dell/dell-uart-backlight.c @@ -325,7 +325,7 @@ static int dell_uart_bl_serdev_probe(struct serdev_device *serdev) return PTR_ERR_OR_ZERO(dell_bl->bl); } -struct serdev_device_driver dell_uart_bl_serdev_driver = { +static struct serdev_device_driver dell_uart_bl_serdev_driver = { .probe = dell_uart_bl_serdev_probe, .driver = { .name = KBUILD_MODNAME, diff --git a/drivers/platform/x86/dell/dell-wmi-ddv.c b/drivers/platform/x86/dell/dell-wmi-ddv.c index e75cd6e1efe6a..ab5f7d3ab8249 100644 --- a/drivers/platform/x86/dell/dell-wmi-ddv.c +++ b/drivers/platform/x86/dell/dell-wmi-ddv.c @@ -665,8 +665,10 @@ static ssize_t temp_show(struct device *dev, struct device_attribute *attr, char if (ret < 0) return ret; - /* Use 2731 instead of 2731.5 to avoid unnecessary rounding */ - return sysfs_emit(buf, "%d\n", value - 2731); + /* Use 2732 instead of 2731.5 to avoid unnecessary rounding and to emulate + * the behaviour of the OEM application which seems to round down the result. + */ + return sysfs_emit(buf, "%d\n", value - 2732); } static ssize_t eppid_show(struct device *dev, struct device_attribute *attr, char *buf) diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c index 445e7a59beb41..9a609358956f3 100644 --- a/drivers/platform/x86/intel/hid.c +++ b/drivers/platform/x86/intel/hid.c @@ -132,6 +132,13 @@ static const struct dmi_system_id button_array_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 3"), }, }, + { + .ident = "Microsoft Surface Go 4", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 4"), + }, + }, { } }; diff --git a/drivers/platform/x86/intel/int3472/discrete.c b/drivers/platform/x86/intel/int3472/discrete.c index 15678508ee501..9e69ac9cfb92c 100644 --- a/drivers/platform/x86/intel/int3472/discrete.c +++ b/drivers/platform/x86/intel/int3472/discrete.c @@ -2,6 +2,7 @@ /* Author: Dan Scally */ #include +#include #include #include #include @@ -55,7 +56,7 @@ static void skl_int3472_log_sensor_module_name(struct int3472_discrete_device *i static int skl_int3472_fill_gpiod_lookup(struct gpiod_lookup *table_entry, struct acpi_resource_gpio *agpio, - const char *func, u32 polarity) + const char *func, unsigned long gpio_flags) { char *path = agpio->resource_source.string_ptr; struct acpi_device *adev; @@ -70,14 +71,14 @@ static int skl_int3472_fill_gpiod_lookup(struct gpiod_lookup *table_entry, if (!adev) return -ENODEV; - *table_entry = GPIO_LOOKUP(acpi_dev_name(adev), agpio->pin_table[0], func, polarity); + *table_entry = GPIO_LOOKUP(acpi_dev_name(adev), agpio->pin_table[0], func, gpio_flags); return 0; } static int skl_int3472_map_gpio_to_sensor(struct int3472_discrete_device *int3472, struct acpi_resource_gpio *agpio, - const char *func, u32 polarity) + const char *func, unsigned long gpio_flags) { int ret; @@ -87,7 +88,7 @@ static int skl_int3472_map_gpio_to_sensor(struct int3472_discrete_device *int347 } ret = skl_int3472_fill_gpiod_lookup(&int3472->gpios.table[int3472->n_sensor_gpios], - agpio, func, polarity); + agpio, func, gpio_flags); if (ret) return ret; @@ -100,7 +101,7 @@ static int skl_int3472_map_gpio_to_sensor(struct int3472_discrete_device *int347 static struct gpio_desc * skl_int3472_gpiod_get_from_temp_lookup(struct int3472_discrete_device *int3472, struct acpi_resource_gpio *agpio, - const char *func, u32 polarity) + const char *func, unsigned long gpio_flags) { struct gpio_desc *desc; int ret; @@ -111,7 +112,7 @@ skl_int3472_gpiod_get_from_temp_lookup(struct int3472_discrete_device *int3472, return ERR_PTR(-ENOMEM); lookup->dev_id = dev_name(int3472->dev); - ret = skl_int3472_fill_gpiod_lookup(&lookup->table[0], agpio, func, polarity); + ret = skl_int3472_fill_gpiod_lookup(&lookup->table[0], agpio, func, gpio_flags); if (ret) return ERR_PTR(ret); @@ -122,32 +123,76 @@ skl_int3472_gpiod_get_from_temp_lookup(struct int3472_discrete_device *int3472, return desc; } -static void int3472_get_func_and_polarity(u8 type, const char **func, u32 *polarity) +/** + * struct int3472_gpio_map - Map GPIOs to whatever is expected by the + * sensor driver (as in DT bindings) + * @hid: The ACPI HID of the device without the instance number e.g. INT347E + * @type_from: The GPIO type from ACPI ?SDT + * @type_to: The assigned GPIO type, typically same as @type_from + * @func: The function, e.g. "enable" + * @polarity_low: GPIO_ACTIVE_LOW true if the @polarity_low is true, + * GPIO_ACTIVE_HIGH otherwise + */ +struct int3472_gpio_map { + const char *hid; + u8 type_from; + u8 type_to; + bool polarity_low; + const char *func; +}; + +static const struct int3472_gpio_map int3472_gpio_map[] = { + { "INT347E", INT3472_GPIO_TYPE_RESET, INT3472_GPIO_TYPE_RESET, false, "enable" }, +}; + +static void int3472_get_func_and_polarity(struct acpi_device *adev, u8 *type, + const char **func, unsigned long *gpio_flags) { - switch (type) { + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(int3472_gpio_map); i++) { + /* + * Map the firmware-provided GPIO to whatever a driver expects + * (as in DT bindings). First check if the type matches with the + * GPIO map, then further check that the device _HID matches. + */ + if (*type != int3472_gpio_map[i].type_from) + continue; + + if (!acpi_dev_hid_uid_match(adev, int3472_gpio_map[i].hid, NULL)) + continue; + + *type = int3472_gpio_map[i].type_to; + *gpio_flags = int3472_gpio_map[i].polarity_low ? + GPIO_ACTIVE_LOW : GPIO_ACTIVE_HIGH; + *func = int3472_gpio_map[i].func; + return; + } + + switch (*type) { case INT3472_GPIO_TYPE_RESET: *func = "reset"; - *polarity = GPIO_ACTIVE_LOW; + *gpio_flags = GPIO_ACTIVE_LOW; break; case INT3472_GPIO_TYPE_POWERDOWN: *func = "powerdown"; - *polarity = GPIO_ACTIVE_LOW; + *gpio_flags = GPIO_ACTIVE_LOW; break; case INT3472_GPIO_TYPE_CLK_ENABLE: *func = "clk-enable"; - *polarity = GPIO_ACTIVE_HIGH; + *gpio_flags = GPIO_ACTIVE_HIGH; break; case INT3472_GPIO_TYPE_PRIVACY_LED: *func = "privacy-led"; - *polarity = GPIO_ACTIVE_HIGH; + *gpio_flags = GPIO_ACTIVE_HIGH; break; case INT3472_GPIO_TYPE_POWER_ENABLE: *func = "power-enable"; - *polarity = GPIO_ACTIVE_HIGH; + *gpio_flags = GPIO_ACTIVE_HIGH; break; default: *func = "unknown"; - *polarity = GPIO_ACTIVE_HIGH; + *gpio_flags = GPIO_ACTIVE_HIGH; break; } } @@ -194,7 +239,7 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares, struct gpio_desc *gpio; const char *err_msg; const char *func; - u32 polarity; + unsigned long gpio_flags; int ret; if (!acpi_gpio_get_io_resource(ares, &agpio)) @@ -217,7 +262,7 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares, type = FIELD_GET(INT3472_GPIO_DSM_TYPE, obj->integer.value); - int3472_get_func_and_polarity(type, &func, &polarity); + int3472_get_func_and_polarity(int3472->sensor, &type, &func, &gpio_flags); pin = FIELD_GET(INT3472_GPIO_DSM_PIN, obj->integer.value); if (pin != agpio->pin_table[0]) @@ -227,16 +272,16 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares, active_value = FIELD_GET(INT3472_GPIO_DSM_SENSOR_ON_VAL, obj->integer.value); if (!active_value) - polarity ^= GPIO_ACTIVE_LOW; + gpio_flags ^= GPIO_ACTIVE_LOW; dev_dbg(int3472->dev, "%s %s pin %d active-%s\n", func, agpio->resource_source.string_ptr, agpio->pin_table[0], - str_high_low(polarity == GPIO_ACTIVE_HIGH)); + str_high_low(gpio_flags == GPIO_ACTIVE_HIGH)); switch (type) { case INT3472_GPIO_TYPE_RESET: case INT3472_GPIO_TYPE_POWERDOWN: - ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, func, polarity); + ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, func, gpio_flags); if (ret) err_msg = "Failed to map GPIO pin to sensor\n"; @@ -244,7 +289,7 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares, case INT3472_GPIO_TYPE_CLK_ENABLE: case INT3472_GPIO_TYPE_PRIVACY_LED: case INT3472_GPIO_TYPE_POWER_ENABLE: - gpio = skl_int3472_gpiod_get_from_temp_lookup(int3472, agpio, func, polarity); + gpio = skl_int3472_gpiod_get_from_temp_lookup(int3472, agpio, func, gpio_flags); if (IS_ERR(gpio)) { ret = PTR_ERR(gpio); err_msg = "Failed to get GPIO\n"; diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c index 4e9c8c96c8cce..257c03c59fd95 100644 --- a/drivers/platform/x86/intel/pmc/core.c +++ b/drivers/platform/x86/intel/pmc/core.c @@ -625,8 +625,8 @@ static u32 convert_ltr_scale(u32 val) static int pmc_core_ltr_show(struct seq_file *s, void *unused) { struct pmc_dev *pmcdev = s->private; - u64 decoded_snoop_ltr, decoded_non_snoop_ltr; - u32 ltr_raw_data, scale, val; + u64 decoded_snoop_ltr, decoded_non_snoop_ltr, val; + u32 ltr_raw_data, scale; u16 snoop_ltr, nonsnoop_ltr; unsigned int i, index, ltr_index = 0; diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c index dbcd3087aaa4b..31239a93dd71b 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c @@ -84,7 +84,7 @@ static DECLARE_HASHTABLE(isst_hash, 8); static DEFINE_MUTEX(isst_hash_lock); static int isst_store_new_cmd(int cmd, u32 cpu, int mbox_cmd_type, u32 param, - u32 data) + u64 data) { struct isst_cmd *sst_cmd; diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c index 7b5cc9993974e..55dd2286f3f35 100644 --- a/drivers/platform/x86/intel/vsec.c +++ b/drivers/platform/x86/intel/vsec.c @@ -410,6 +410,11 @@ static const struct intel_vsec_platform_info oobmsm_info = { .caps = VSEC_CAP_TELEMETRY | VSEC_CAP_SDSI | VSEC_CAP_TPMI, }; +/* DMR OOBMSM info */ +static const struct intel_vsec_platform_info dmr_oobmsm_info = { + .caps = VSEC_CAP_TELEMETRY | VSEC_CAP_TPMI, +}; + /* TGL info */ static const struct intel_vsec_platform_info tgl_info = { .caps = VSEC_CAP_TELEMETRY, @@ -426,6 +431,7 @@ static const struct intel_vsec_platform_info lnl_info = { #define PCI_DEVICE_ID_INTEL_VSEC_MTL_M 0x7d0d #define PCI_DEVICE_ID_INTEL_VSEC_MTL_S 0xad0d #define PCI_DEVICE_ID_INTEL_VSEC_OOBMSM 0x09a7 +#define PCI_DEVICE_ID_INTEL_VSEC_OOBMSM_DMR 0x09a1 #define PCI_DEVICE_ID_INTEL_VSEC_RPL 0xa77d #define PCI_DEVICE_ID_INTEL_VSEC_TGL 0x9a0d #define PCI_DEVICE_ID_INTEL_VSEC_LNL_M 0x647d @@ -435,6 +441,7 @@ static const struct pci_device_id intel_vsec_pci_ids[] = { { PCI_DEVICE_DATA(INTEL, VSEC_MTL_M, &mtl_info) }, { PCI_DEVICE_DATA(INTEL, VSEC_MTL_S, &mtl_info) }, { PCI_DEVICE_DATA(INTEL, VSEC_OOBMSM, &oobmsm_info) }, + { PCI_DEVICE_DATA(INTEL, VSEC_OOBMSM_DMR, &dmr_oobmsm_info) }, { PCI_DEVICE_DATA(INTEL, VSEC_RPL, &tgl_info) }, { PCI_DEVICE_DATA(INTEL, VSEC_TGL, &tgl_info) }, { PCI_DEVICE_DATA(INTEL, VSEC_LNL_M, &lnl_info) }, diff --git a/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c b/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c index 32d9b6009c422..21de7c3a1ee3d 100644 --- a/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c +++ b/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c @@ -219,7 +219,7 @@ static int yt2_1380_fc_serdev_probe(struct serdev_device *serdev) return 0; } -struct serdev_device_driver yt2_1380_fc_serdev_driver = { +static struct serdev_device_driver yt2_1380_fc_serdev_driver = { .probe = yt2_1380_fc_serdev_probe, .driver = { .name = KBUILD_MODNAME, diff --git a/drivers/platform/x86/msi-wmi-platform.c b/drivers/platform/x86/msi-wmi-platform.c index 9b5c7f8c79b0d..dc5e9878cb682 100644 --- a/drivers/platform/x86/msi-wmi-platform.c +++ b/drivers/platform/x86/msi-wmi-platform.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -17,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -76,8 +78,13 @@ enum msi_wmi_platform_method { MSI_PLATFORM_GET_WMI = 0x1d, }; -struct msi_wmi_platform_debugfs_data { +struct msi_wmi_platform_data { struct wmi_device *wdev; + struct mutex wmi_lock; /* Necessary when calling WMI methods */ +}; + +struct msi_wmi_platform_debugfs_data { + struct msi_wmi_platform_data *data; enum msi_wmi_platform_method method; struct rw_semaphore buffer_lock; /* Protects debugfs buffer */ size_t length; @@ -132,8 +139,9 @@ static int msi_wmi_platform_parse_buffer(union acpi_object *obj, u8 *output, siz return 0; } -static int msi_wmi_platform_query(struct wmi_device *wdev, enum msi_wmi_platform_method method, - u8 *input, size_t input_length, u8 *output, size_t output_length) +static int msi_wmi_platform_query(struct msi_wmi_platform_data *data, + enum msi_wmi_platform_method method, u8 *input, + size_t input_length, u8 *output, size_t output_length) { struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_buffer in = { @@ -147,9 +155,15 @@ static int msi_wmi_platform_query(struct wmi_device *wdev, enum msi_wmi_platform if (!input_length || !output_length) return -EINVAL; - status = wmidev_evaluate_method(wdev, 0x0, method, &in, &out); - if (ACPI_FAILURE(status)) - return -EIO; + /* + * The ACPI control method responsible for handling the WMI method calls + * is not thread-safe. Because of this we have to do the locking ourself. + */ + scoped_guard(mutex, &data->wmi_lock) { + status = wmidev_evaluate_method(data->wdev, 0x0, method, &in, &out); + if (ACPI_FAILURE(status)) + return -EIO; + } obj = out.pointer; if (!obj) @@ -170,22 +184,22 @@ static umode_t msi_wmi_platform_is_visible(const void *drvdata, enum hwmon_senso static int msi_wmi_platform_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long *val) { - struct wmi_device *wdev = dev_get_drvdata(dev); + struct msi_wmi_platform_data *data = dev_get_drvdata(dev); u8 input[32] = { 0 }; u8 output[32]; - u16 data; + u16 value; int ret; - ret = msi_wmi_platform_query(wdev, MSI_PLATFORM_GET_FAN, input, sizeof(input), output, + ret = msi_wmi_platform_query(data, MSI_PLATFORM_GET_FAN, input, sizeof(input), output, sizeof(output)); if (ret < 0) return ret; - data = get_unaligned_be16(&output[channel * 2 + 1]); - if (!data) + value = get_unaligned_be16(&output[channel * 2 + 1]); + if (!value) *val = 0; else - *val = 480000 / data; + *val = 480000 / value; return 0; } @@ -231,7 +245,7 @@ static ssize_t msi_wmi_platform_write(struct file *fp, const char __user *input, return ret; down_write(&data->buffer_lock); - ret = msi_wmi_platform_query(data->wdev, data->method, payload, data->length, data->buffer, + ret = msi_wmi_platform_query(data->data, data->method, payload, data->length, data->buffer, data->length); up_write(&data->buffer_lock); @@ -277,17 +291,17 @@ static void msi_wmi_platform_debugfs_remove(void *data) debugfs_remove_recursive(dir); } -static void msi_wmi_platform_debugfs_add(struct wmi_device *wdev, struct dentry *dir, +static void msi_wmi_platform_debugfs_add(struct msi_wmi_platform_data *drvdata, struct dentry *dir, const char *name, enum msi_wmi_platform_method method) { struct msi_wmi_platform_debugfs_data *data; struct dentry *entry; - data = devm_kzalloc(&wdev->dev, sizeof(*data), GFP_KERNEL); + data = devm_kzalloc(&drvdata->wdev->dev, sizeof(*data), GFP_KERNEL); if (!data) return; - data->wdev = wdev; + data->data = drvdata; data->method = method; init_rwsem(&data->buffer_lock); @@ -298,82 +312,82 @@ static void msi_wmi_platform_debugfs_add(struct wmi_device *wdev, struct dentry entry = debugfs_create_file(name, 0600, dir, data, &msi_wmi_platform_debugfs_fops); if (IS_ERR(entry)) - devm_kfree(&wdev->dev, data); + devm_kfree(&drvdata->wdev->dev, data); } -static void msi_wmi_platform_debugfs_init(struct wmi_device *wdev) +static void msi_wmi_platform_debugfs_init(struct msi_wmi_platform_data *data) { struct dentry *dir; char dir_name[64]; int ret, method; - scnprintf(dir_name, ARRAY_SIZE(dir_name), "%s-%s", DRIVER_NAME, dev_name(&wdev->dev)); + scnprintf(dir_name, ARRAY_SIZE(dir_name), "%s-%s", DRIVER_NAME, dev_name(&data->wdev->dev)); dir = debugfs_create_dir(dir_name, NULL); if (IS_ERR(dir)) return; - ret = devm_add_action_or_reset(&wdev->dev, msi_wmi_platform_debugfs_remove, dir); + ret = devm_add_action_or_reset(&data->wdev->dev, msi_wmi_platform_debugfs_remove, dir); if (ret < 0) return; for (method = MSI_PLATFORM_GET_PACKAGE; method <= MSI_PLATFORM_GET_WMI; method++) - msi_wmi_platform_debugfs_add(wdev, dir, msi_wmi_platform_debugfs_names[method - 1], + msi_wmi_platform_debugfs_add(data, dir, msi_wmi_platform_debugfs_names[method - 1], method); } -static int msi_wmi_platform_hwmon_init(struct wmi_device *wdev) +static int msi_wmi_platform_hwmon_init(struct msi_wmi_platform_data *data) { struct device *hdev; - hdev = devm_hwmon_device_register_with_info(&wdev->dev, "msi_wmi_platform", wdev, + hdev = devm_hwmon_device_register_with_info(&data->wdev->dev, "msi_wmi_platform", data, &msi_wmi_platform_chip_info, NULL); return PTR_ERR_OR_ZERO(hdev); } -static int msi_wmi_platform_ec_init(struct wmi_device *wdev) +static int msi_wmi_platform_ec_init(struct msi_wmi_platform_data *data) { u8 input[32] = { 0 }; u8 output[32]; u8 flags; int ret; - ret = msi_wmi_platform_query(wdev, MSI_PLATFORM_GET_EC, input, sizeof(input), output, + ret = msi_wmi_platform_query(data, MSI_PLATFORM_GET_EC, input, sizeof(input), output, sizeof(output)); if (ret < 0) return ret; flags = output[MSI_PLATFORM_EC_FLAGS_OFFSET]; - dev_dbg(&wdev->dev, "EC RAM version %lu.%lu\n", + dev_dbg(&data->wdev->dev, "EC RAM version %lu.%lu\n", FIELD_GET(MSI_PLATFORM_EC_MAJOR_MASK, flags), FIELD_GET(MSI_PLATFORM_EC_MINOR_MASK, flags)); - dev_dbg(&wdev->dev, "EC firmware version %.28s\n", + dev_dbg(&data->wdev->dev, "EC firmware version %.28s\n", &output[MSI_PLATFORM_EC_VERSION_OFFSET]); if (!(flags & MSI_PLATFORM_EC_IS_TIGERLAKE)) { if (!force) return -ENODEV; - dev_warn(&wdev->dev, "Loading on a non-Tigerlake platform\n"); + dev_warn(&data->wdev->dev, "Loading on a non-Tigerlake platform\n"); } return 0; } -static int msi_wmi_platform_init(struct wmi_device *wdev) +static int msi_wmi_platform_init(struct msi_wmi_platform_data *data) { u8 input[32] = { 0 }; u8 output[32]; int ret; - ret = msi_wmi_platform_query(wdev, MSI_PLATFORM_GET_WMI, input, sizeof(input), output, + ret = msi_wmi_platform_query(data, MSI_PLATFORM_GET_WMI, input, sizeof(input), output, sizeof(output)); if (ret < 0) return ret; - dev_dbg(&wdev->dev, "WMI interface version %u.%u\n", + dev_dbg(&data->wdev->dev, "WMI interface version %u.%u\n", output[MSI_PLATFORM_WMI_MAJOR_OFFSET], output[MSI_PLATFORM_WMI_MINOR_OFFSET]); @@ -381,7 +395,8 @@ static int msi_wmi_platform_init(struct wmi_device *wdev) if (!force) return -ENODEV; - dev_warn(&wdev->dev, "Loading despite unsupported WMI interface version (%u.%u)\n", + dev_warn(&data->wdev->dev, + "Loading despite unsupported WMI interface version (%u.%u)\n", output[MSI_PLATFORM_WMI_MAJOR_OFFSET], output[MSI_PLATFORM_WMI_MINOR_OFFSET]); } @@ -391,19 +406,31 @@ static int msi_wmi_platform_init(struct wmi_device *wdev) static int msi_wmi_platform_probe(struct wmi_device *wdev, const void *context) { + struct msi_wmi_platform_data *data; int ret; - ret = msi_wmi_platform_init(wdev); + data = devm_kzalloc(&wdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->wdev = wdev; + dev_set_drvdata(&wdev->dev, data); + + ret = devm_mutex_init(&wdev->dev, &data->wmi_lock); + if (ret < 0) + return ret; + + ret = msi_wmi_platform_init(data); if (ret < 0) return ret; - ret = msi_wmi_platform_ec_init(wdev); + ret = msi_wmi_platform_ec_init(data); if (ret < 0) return ret; - msi_wmi_platform_debugfs_init(wdev); + msi_wmi_platform_debugfs_init(data); - return msi_wmi_platform_hwmon_init(wdev); + return msi_wmi_platform_hwmon_init(data); } static const struct wmi_device_id msi_wmi_platform_id_table[] = { diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 2cfb2ac3f465a..dea40da867552 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -7883,6 +7883,7 @@ static struct ibm_struct volume_driver_data = { #define FAN_NS_CTRL_STATUS BIT(2) /* Bit which determines control is enabled or not */ #define FAN_NS_CTRL BIT(4) /* Bit which determines control is by host or EC */ +#define FAN_CLOCK_TPM (22500*60) /* Ticks per minute for a 22.5 kHz clock */ enum { /* Fan control constants */ fan_status_offset = 0x2f, /* EC register 0x2f */ @@ -7938,6 +7939,7 @@ static int fan_watchdog_maxinterval; static bool fan_with_ns_addr; static bool ecfw_with_fan_dec_rpm; +static bool fan_speed_in_tpr; static struct mutex fan_mutex; @@ -8140,8 +8142,11 @@ static int fan_get_speed(unsigned int *speed) !acpi_ec_read(fan_rpm_offset + 1, &hi))) return -EIO; - if (likely(speed)) + if (likely(speed)) { *speed = (hi << 8) | lo; + if (fan_speed_in_tpr && *speed != 0) + *speed = FAN_CLOCK_TPM / *speed; + } break; case TPACPI_FAN_RD_TPEC_NS: if (!acpi_ec_read(fan_rpm_status_ns, &lo)) @@ -8174,8 +8179,11 @@ static int fan2_get_speed(unsigned int *speed) if (rc) return -EIO; - if (likely(speed)) + if (likely(speed)) { *speed = (hi << 8) | lo; + if (fan_speed_in_tpr && *speed != 0) + *speed = FAN_CLOCK_TPM / *speed; + } break; case TPACPI_FAN_RD_TPEC_NS: @@ -8786,6 +8794,8 @@ static const struct attribute_group fan_driver_attr_group = { #define TPACPI_FAN_NOFAN 0x0008 /* no fan available */ #define TPACPI_FAN_NS 0x0010 /* For EC with non-Standard register addresses */ #define TPACPI_FAN_DECRPM 0x0020 /* For ECFW's with RPM in register as decimal */ +#define TPACPI_FAN_TPR 0x0040 /* Fan speed is in Ticks Per Revolution */ +#define TPACPI_FAN_NOACPI 0x0080 /* Don't use ACPI methods even if detected */ static const struct tpacpi_quirk fan_quirk_table[] __initconst = { TPACPI_QEC_IBM('1', 'Y', TPACPI_FAN_Q1), @@ -8815,6 +8825,10 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = { TPACPI_Q_LNV3('R', '0', 'V', TPACPI_FAN_NS), /* 11e Gen5 KL-Y */ TPACPI_Q_LNV3('N', '1', 'O', TPACPI_FAN_NOFAN), /* X1 Tablet (2nd gen) */ TPACPI_Q_LNV3('R', '0', 'Q', TPACPI_FAN_DECRPM),/* L480 */ + TPACPI_Q_LNV('8', 'F', TPACPI_FAN_TPR), /* ThinkPad x120e */ + TPACPI_Q_LNV3('R', '0', '0', TPACPI_FAN_NOACPI),/* E560 */ + TPACPI_Q_LNV3('R', '1', '2', TPACPI_FAN_NOACPI),/* T495 */ + TPACPI_Q_LNV3('R', '1', '3', TPACPI_FAN_NOACPI),/* T495s */ }; static int __init fan_init(struct ibm_init_struct *iibm) @@ -8866,6 +8880,13 @@ static int __init fan_init(struct ibm_init_struct *iibm) tp_features.fan_ctrl_status_undef = 1; } + if (quirks & TPACPI_FAN_NOACPI) { + /* E560, T495, T495s */ + pr_info("Ignoring buggy ACPI fan access method\n"); + fang_handle = NULL; + fanw_handle = NULL; + } + if (gfan_handle) { /* 570, 600e/x, 770e, 770x */ fan_status_access_mode = TPACPI_FAN_RD_ACPI_GFAN; @@ -8885,6 +8906,8 @@ static int __init fan_init(struct ibm_init_struct *iibm) if (quirks & TPACPI_FAN_Q1) fan_quirk1_setup(); + if (quirks & TPACPI_FAN_TPR) + fan_speed_in_tpr = true; /* Try and probe the 2nd fan */ tp_features.second_fan = 1; /* needed for get_speed to work */ res = fan2_get_speed(&speed); @@ -9958,6 +9981,7 @@ static const struct tpacpi_quirk battery_quirk_table[] __initconst = { * Individual addressing is broken on models that expose the * primary battery as BAT1. */ + TPACPI_Q_LNV('G', '8', true), /* ThinkPad X131e */ TPACPI_Q_LNV('8', 'F', true), /* Thinkpad X120e */ TPACPI_Q_LNV('J', '7', true), /* B5400 */ TPACPI_Q_LNV('J', 'I', true), /* Thinkpad 11e */ @@ -10317,6 +10341,10 @@ static struct ibm_struct proxsensor_driver_data = { #define DYTC_MODE_PSC_BALANCE 5 /* Default mode aka balanced */ #define DYTC_MODE_PSC_PERFORM 7 /* High power mode aka performance */ +#define DYTC_MODE_PSCV9_LOWPOWER 1 /* Low power mode */ +#define DYTC_MODE_PSCV9_BALANCE 3 /* Default mode aka balanced */ +#define DYTC_MODE_PSCV9_PERFORM 4 /* High power mode aka performance */ + #define DYTC_ERR_MASK 0xF /* Bits 0-3 in cmd result are the error result */ #define DYTC_ERR_SUCCESS 1 /* CMD completed successful */ @@ -10337,6 +10365,10 @@ static int dytc_capabilities; static bool dytc_mmc_get_available; static int profile_force; +static int platform_psc_profile_lowpower = DYTC_MODE_PSC_LOWPOWER; +static int platform_psc_profile_balanced = DYTC_MODE_PSC_BALANCE; +static int platform_psc_profile_performance = DYTC_MODE_PSC_PERFORM; + static int convert_dytc_to_profile(int funcmode, int dytcmode, enum platform_profile_option *profile) { @@ -10358,19 +10390,15 @@ static int convert_dytc_to_profile(int funcmode, int dytcmode, } return 0; case DYTC_FUNCTION_PSC: - switch (dytcmode) { - case DYTC_MODE_PSC_LOWPOWER: + if (dytcmode == platform_psc_profile_lowpower) *profile = PLATFORM_PROFILE_LOW_POWER; - break; - case DYTC_MODE_PSC_BALANCE: + else if (dytcmode == platform_psc_profile_balanced) *profile = PLATFORM_PROFILE_BALANCED; - break; - case DYTC_MODE_PSC_PERFORM: + else if (dytcmode == platform_psc_profile_performance) *profile = PLATFORM_PROFILE_PERFORMANCE; - break; - default: /* Unknown mode */ + else return -EINVAL; - } + return 0; case DYTC_FUNCTION_AMT: /* For now return balanced. It's the closest we have to 'auto' */ @@ -10391,19 +10419,19 @@ static int convert_profile_to_dytc(enum platform_profile_option profile, int *pe if (dytc_capabilities & BIT(DYTC_FC_MMC)) *perfmode = DYTC_MODE_MMC_LOWPOWER; else if (dytc_capabilities & BIT(DYTC_FC_PSC)) - *perfmode = DYTC_MODE_PSC_LOWPOWER; + *perfmode = platform_psc_profile_lowpower; break; case PLATFORM_PROFILE_BALANCED: if (dytc_capabilities & BIT(DYTC_FC_MMC)) *perfmode = DYTC_MODE_MMC_BALANCE; else if (dytc_capabilities & BIT(DYTC_FC_PSC)) - *perfmode = DYTC_MODE_PSC_BALANCE; + *perfmode = platform_psc_profile_balanced; break; case PLATFORM_PROFILE_PERFORMANCE: if (dytc_capabilities & BIT(DYTC_FC_MMC)) *perfmode = DYTC_MODE_MMC_PERFORM; else if (dytc_capabilities & BIT(DYTC_FC_PSC)) - *perfmode = DYTC_MODE_PSC_PERFORM; + *perfmode = platform_psc_profile_performance; break; default: /* Unknown profile */ return -EOPNOTSUPP; @@ -10592,6 +10620,7 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm) if (output & BIT(DYTC_QUERY_ENABLE_BIT)) dytc_version = (output >> DYTC_QUERY_REV_BIT) & 0xF; + dbg_printk(TPACPI_DBG_INIT, "DYTC version %d\n", dytc_version); /* Check DYTC is enabled and supports mode setting */ if (dytc_version < 5) return -ENODEV; @@ -10630,6 +10659,11 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm) } } else if (dytc_capabilities & BIT(DYTC_FC_PSC)) { /* PSC MODE */ pr_debug("PSC is supported\n"); + if (dytc_version >= 9) { /* update profiles for DYTC 9 and up */ + platform_psc_profile_lowpower = DYTC_MODE_PSCV9_LOWPOWER; + platform_psc_profile_balanced = DYTC_MODE_PSCV9_BALANCE; + platform_psc_profile_performance = DYTC_MODE_PSCV9_PERFORM; + } } else { dbg_printk(TPACPI_DBG_INIT, "No DYTC support available\n"); return -ENODEV; diff --git a/drivers/platform/x86/x86-android-tablets/Kconfig b/drivers/platform/x86/x86-android-tablets/Kconfig index 88d9e8f2ff24e..c98dfbdfb9dda 100644 --- a/drivers/platform/x86/x86-android-tablets/Kconfig +++ b/drivers/platform/x86/x86-android-tablets/Kconfig @@ -8,6 +8,7 @@ config X86_ANDROID_TABLETS depends on I2C && SPI && SERIAL_DEV_BUS && ACPI && EFI && GPIOLIB && PMIC_OPREGION select NEW_LEDS select LEDS_CLASS + select POWER_SUPPLY help X86 tablets which ship with Android as (part of) the factory image typically have various problems with their DSDTs. The factory kernels diff --git a/drivers/pmdomain/amlogic/meson-secure-pwrc.c b/drivers/pmdomain/amlogic/meson-secure-pwrc.c index 42ce41a2fe3a0..ff76ea36835e5 100644 --- a/drivers/pmdomain/amlogic/meson-secure-pwrc.c +++ b/drivers/pmdomain/amlogic/meson-secure-pwrc.c @@ -221,7 +221,7 @@ static const struct meson_secure_pwrc_domain_desc t7_pwrc_domains[] = { SEC_PD(T7_VI_CLK2, 0), /* ETH is for ethernet online wakeup, and should be always on */ SEC_PD(T7_ETH, GENPD_FLAG_ALWAYS_ON), - SEC_PD(T7_ISP, 0), + TOP_PD(T7_ISP, 0, PWRC_T7_MIPI_ISP_ID), SEC_PD(T7_MIPI_ISP, 0), TOP_PD(T7_GDC, 0, PWRC_T7_NIC3_ID), TOP_PD(T7_DEWARP, 0, PWRC_T7_NIC3_ID), diff --git a/drivers/power/supply/axp20x_battery.c b/drivers/power/supply/axp20x_battery.c index f71cc90fea127..57eba1ddb17ba 100644 --- a/drivers/power/supply/axp20x_battery.c +++ b/drivers/power/supply/axp20x_battery.c @@ -466,10 +466,9 @@ static int axp717_battery_get_prop(struct power_supply *psy, /* * If a fault is detected it must also be cleared; if the - * condition persists it should reappear (This is an - * assumption, it's actually not documented). A restart was - * not sufficient to clear the bit in testing despite the - * register listed as POR. + * condition persists it should reappear. A restart was not + * sufficient to clear the bit in testing despite the register + * listed as POR. */ case POWER_SUPPLY_PROP_HEALTH: ret = regmap_read(axp20x_batt->regmap, AXP717_PMU_FAULT, @@ -480,26 +479,26 @@ static int axp717_battery_get_prop(struct power_supply *psy, switch (reg & AXP717_BATT_PMU_FAULT_MASK) { case AXP717_BATT_UVLO_2_5V: val->intval = POWER_SUPPLY_HEALTH_DEAD; - regmap_update_bits(axp20x_batt->regmap, - AXP717_PMU_FAULT, - AXP717_BATT_UVLO_2_5V, - AXP717_BATT_UVLO_2_5V); + regmap_write_bits(axp20x_batt->regmap, + AXP717_PMU_FAULT, + AXP717_BATT_UVLO_2_5V, + AXP717_BATT_UVLO_2_5V); return 0; case AXP717_BATT_OVER_TEMP: val->intval = POWER_SUPPLY_HEALTH_HOT; - regmap_update_bits(axp20x_batt->regmap, - AXP717_PMU_FAULT, - AXP717_BATT_OVER_TEMP, - AXP717_BATT_OVER_TEMP); + regmap_write_bits(axp20x_batt->regmap, + AXP717_PMU_FAULT, + AXP717_BATT_OVER_TEMP, + AXP717_BATT_OVER_TEMP); return 0; case AXP717_BATT_UNDER_TEMP: val->intval = POWER_SUPPLY_HEALTH_COLD; - regmap_update_bits(axp20x_batt->regmap, - AXP717_PMU_FAULT, - AXP717_BATT_UNDER_TEMP, - AXP717_BATT_UNDER_TEMP); + regmap_write_bits(axp20x_batt->regmap, + AXP717_PMU_FAULT, + AXP717_BATT_UNDER_TEMP, + AXP717_BATT_UNDER_TEMP); return 0; default: diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c index 51fb88aca0f9f..1a20c775489c7 100644 --- a/drivers/power/supply/bq27xxx_battery.c +++ b/drivers/power/supply/bq27xxx_battery.c @@ -1913,7 +1913,6 @@ static void bq27xxx_battery_update_unlocked(struct bq27xxx_device_info *di) cache.flags = -1; /* read error */ if (cache.flags >= 0) { cache.capacity = bq27xxx_battery_read_soc(di); - di->cache.flags = cache.flags; /* * On gauges with signed current reporting the current must be diff --git a/drivers/power/supply/da9150-fg.c b/drivers/power/supply/da9150-fg.c index 652c1f213af1c..4f28ef1bba1a3 100644 --- a/drivers/power/supply/da9150-fg.c +++ b/drivers/power/supply/da9150-fg.c @@ -247,9 +247,9 @@ static int da9150_fg_current_avg(struct da9150_fg *fg, DA9150_QIF_SD_GAIN_SIZE); da9150_fg_read_sync_end(fg); - div = (u64) (sd_gain * shunt_val * 65536ULL); + div = 65536ULL * sd_gain * shunt_val; do_div(div, 1000000); - res = (u64) (iavg * 1000000ULL); + res = 1000000ULL * iavg; do_div(res, div); val->intval = (int) res; diff --git a/drivers/power/supply/max77693_charger.c b/drivers/power/supply/max77693_charger.c index 4caac142c4285..b32d881111850 100644 --- a/drivers/power/supply/max77693_charger.c +++ b/drivers/power/supply/max77693_charger.c @@ -608,7 +608,7 @@ static int max77693_set_charge_input_threshold_volt(struct max77693_charger *chg case 4700000: case 4800000: case 4900000: - data = (uvolt - 4700000) / 100000; + data = ((uvolt - 4700000) / 100000) + 1; break; default: dev_err(chg->dev, "Wrong value for charge input voltage regulation threshold\n"); diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c index 52c32dcbf7d84..4112a00973382 100644 --- a/drivers/powercap/powercap_sys.c +++ b/drivers/powercap/powercap_sys.c @@ -627,8 +627,7 @@ struct powercap_control_type *powercap_register_control_type( dev_set_name(&control_type->dev, "%s", name); result = device_register(&control_type->dev); if (result) { - if (control_type->allocated) - kfree(control_type); + put_device(&control_type->dev); return ERR_PTR(result); } idr_init(&control_type->idr); diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c index 120db96d9e95d..0eeb503e06c23 100644 --- a/drivers/ptp/ptp_ocp.c +++ b/drivers/ptp/ptp_ocp.c @@ -2067,6 +2067,7 @@ ptp_ocp_signal_set(struct ptp_ocp *bp, int gen, struct ptp_ocp_signal *s) if (!s->start) { /* roundup() does not work on 32-bit systems */ s->start = DIV64_U64_ROUND_UP(start_ns, s->period); + s->start *= s->period; s->start = ktime_add(s->start, s->phase); } diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c index 2510c10ca4730..c45a5fca4cbbd 100644 --- a/drivers/pwm/pwm-fsl-ftm.c +++ b/drivers/pwm/pwm-fsl-ftm.c @@ -118,6 +118,9 @@ static unsigned int fsl_pwm_ticks_to_ns(struct fsl_pwm_chip *fpc, unsigned long long exval; rate = clk_get_rate(fpc->clk[fpc->period.clk_select]); + if (rate >> fpc->period.clk_ps == 0) + return 0; + exval = ticks; exval *= 1000000000UL; do_div(exval, rate >> fpc->period.clk_ps); @@ -190,6 +193,9 @@ static unsigned int fsl_pwm_calculate_duty(struct fsl_pwm_chip *fpc, unsigned int period = fpc->period.mod_period + 1; unsigned int period_ns = fsl_pwm_ticks_to_ns(fpc, period); + if (!period_ns) + return 0; + duty = (unsigned long long)duty_ns * period; do_div(duty, period_ns); diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c index 01dfa0fab80a4..7eaab58314995 100644 --- a/drivers/pwm/pwm-mediatek.c +++ b/drivers/pwm/pwm-mediatek.c @@ -121,21 +121,25 @@ static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm, struct pwm_mediatek_chip *pc = to_pwm_mediatek_chip(chip); u32 clkdiv = 0, cnt_period, cnt_duty, reg_width = PWMDWIDTH, reg_thres = PWMTHRES; + unsigned long clk_rate; u64 resolution; int ret; ret = pwm_mediatek_clk_enable(chip, pwm); - if (ret < 0) return ret; + clk_rate = clk_get_rate(pc->clk_pwms[pwm->hwpwm]); + if (!clk_rate) + return -EINVAL; + /* Make sure we use the bus clock and not the 26MHz clock */ if (pc->soc->has_ck_26m_sel) writel(0, pc->regs + PWM_CK_26M_SEL); /* Using resolution in picosecond gets accuracy higher */ resolution = (u64)NSEC_PER_SEC * 1000; - do_div(resolution, clk_get_rate(pc->clk_pwms[pwm->hwpwm])); + do_div(resolution, clk_rate); cnt_period = DIV_ROUND_CLOSEST_ULL((u64)period_ns * 1000, resolution); while (cnt_period > 8191) { diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c index 2261789cc27da..578dbdd2d5a72 100644 --- a/drivers/pwm/pwm-rcar.c +++ b/drivers/pwm/pwm-rcar.c @@ -8,6 +8,7 @@ * - The hardware cannot generate a 0% duty cycle. */ +#include #include #include #include @@ -102,23 +103,24 @@ static void rcar_pwm_set_clock_control(struct rcar_pwm_chip *rp, rcar_pwm_write(rp, value, RCAR_PWMCR); } -static int rcar_pwm_set_counter(struct rcar_pwm_chip *rp, int div, int duty_ns, - int period_ns) +static int rcar_pwm_set_counter(struct rcar_pwm_chip *rp, int div, u64 duty_ns, + u64 period_ns) { - unsigned long long one_cycle, tmp; /* 0.01 nanoseconds */ + unsigned long long tmp; unsigned long clk_rate = clk_get_rate(rp->clk); u32 cyc, ph; - one_cycle = NSEC_PER_SEC * 100ULL << div; - do_div(one_cycle, clk_rate); + /* div <= 24 == RCAR_PWM_MAX_DIVISION, so the shift doesn't overflow. */ + tmp = mul_u64_u64_div_u64(period_ns, clk_rate, (u64)NSEC_PER_SEC << div); + if (tmp > FIELD_MAX(RCAR_PWMCNT_CYC0_MASK)) + tmp = FIELD_MAX(RCAR_PWMCNT_CYC0_MASK); - tmp = period_ns * 100ULL; - do_div(tmp, one_cycle); - cyc = (tmp << RCAR_PWMCNT_CYC0_SHIFT) & RCAR_PWMCNT_CYC0_MASK; + cyc = FIELD_PREP(RCAR_PWMCNT_CYC0_MASK, tmp); - tmp = duty_ns * 100ULL; - do_div(tmp, one_cycle); - ph = tmp & RCAR_PWMCNT_PH0_MASK; + tmp = mul_u64_u64_div_u64(duty_ns, clk_rate, (u64)NSEC_PER_SEC << div); + if (tmp > FIELD_MAX(RCAR_PWMCNT_PH0_MASK)) + tmp = FIELD_MAX(RCAR_PWMCNT_PH0_MASK); + ph = FIELD_PREP(RCAR_PWMCNT_PH0_MASK, tmp); /* Avoid prohibited setting */ if (cyc == 0 || ph == 0) diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c index 27afbb9d544b7..cbf531d0ba688 100644 --- a/drivers/rapidio/devices/rio_mport_cdev.c +++ b/drivers/rapidio/devices/rio_mport_cdev.c @@ -1742,7 +1742,8 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv, err = rio_add_net(net); if (err) { rmcd_debug(RDEV, "failed to register net, err=%d", err); - kfree(net); + put_device(&net->dev); + mport->net = NULL; goto cleanup; } } diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c index fdcf742b2adbc..c12941f71e2cb 100644 --- a/drivers/rapidio/rio-scan.c +++ b/drivers/rapidio/rio-scan.c @@ -871,7 +871,10 @@ static struct rio_net *rio_scan_alloc_net(struct rio_mport *mport, dev_set_name(&net->dev, "rnet_%d", net->id); net->dev.parent = &mport->dev; net->dev.release = rio_scan_release_dev; - rio_add_net(net); + if (rio_add_net(net)) { + put_device(&net->dev); + net = NULL; + } } return net; diff --git a/drivers/ras/amd/atl/internal.h b/drivers/ras/amd/atl/internal.h index 143d04c779a82..b7c7d5ba4d9dd 100644 --- a/drivers/ras/amd/atl/internal.h +++ b/drivers/ras/amd/atl/internal.h @@ -361,4 +361,7 @@ static inline void atl_debug_on_bad_intlv_mode(struct addr_ctx *ctx) atl_debug(ctx, "Unrecognized interleave mode: %u", ctx->map.intlv_mode); } +#define MI300_UMC_MCA_COL GENMASK(5, 1) +#define MI300_UMC_MCA_ROW13 BIT(23) + #endif /* __AMD_ATL_INTERNAL_H__ */ diff --git a/drivers/ras/amd/atl/umc.c b/drivers/ras/amd/atl/umc.c index dc8aa12f63c81..6e072b7667e98 100644 --- a/drivers/ras/amd/atl/umc.c +++ b/drivers/ras/amd/atl/umc.c @@ -229,7 +229,6 @@ int get_umc_info_mi300(void) * Additionally, the PC and Bank bits may be hashed. This must be accounted for before * reconstructing the normalized address. */ -#define MI300_UMC_MCA_COL GENMASK(5, 1) #define MI300_UMC_MCA_BANK GENMASK(9, 6) #define MI300_UMC_MCA_ROW GENMASK(24, 10) #define MI300_UMC_MCA_PC BIT(25) @@ -320,7 +319,7 @@ static unsigned long convert_dram_to_norm_addr_mi300(unsigned long addr) * See amd_atl::convert_dram_to_norm_addr_mi300() for MI300 address formats. */ #define MI300_NUM_COL BIT(HWEIGHT(MI300_UMC_MCA_COL)) -static void retire_row_mi300(struct atl_err *a_err) +static void _retire_row_mi300(struct atl_err *a_err) { unsigned long addr; struct page *p; @@ -351,6 +350,22 @@ static void retire_row_mi300(struct atl_err *a_err) } } +/* + * In addition to the column bits, the row[13] bit should also be included when + * calculating addresses affected by a physical row. + * + * Instead of running through another loop over a single bit, just run through + * the column bits twice and flip the row[13] bit in-between. + * + * See MI300_UMC_MCA_ROW for the row bits in MCA_ADDR_UMC value. + */ +static void retire_row_mi300(struct atl_err *a_err) +{ + _retire_row_mi300(a_err); + a_err->addr ^= MI300_UMC_MCA_ROW13; + _retire_row_mi300(a_err); +} + void amd_retire_dram_row(struct atl_err *a_err) { if (df_cfg.rev == DF4p5 && df_cfg.flags.heterogeneous) diff --git a/drivers/ras/amd/fmpm.c b/drivers/ras/amd/fmpm.c index 90de737fbc909..8877c6ff64c46 100644 --- a/drivers/ras/amd/fmpm.c +++ b/drivers/ras/amd/fmpm.c @@ -250,6 +250,13 @@ static bool rec_has_valid_entries(struct fru_rec *rec) return true; } +/* + * Row retirement is done on MI300 systems, and some bits are 'don't + * care' for comparing addresses with unique physical rows. This + * includes all column bits and the row[13] bit. + */ +#define MASK_ADDR(addr) ((addr) & ~(MI300_UMC_MCA_ROW13 | MI300_UMC_MCA_COL)) + static bool fpds_equal(struct cper_fru_poison_desc *old, struct cper_fru_poison_desc *new) { /* @@ -258,7 +265,7 @@ static bool fpds_equal(struct cper_fru_poison_desc *old, struct cper_fru_poison_ * * Also, order the checks from most->least likely to fail to shortcut the code. */ - if (old->addr != new->addr) + if (MASK_ADDR(old->addr) != MASK_ADDR(new->addr)) return false; if (old->hw_id != new->hw_id) diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 4bb2652740d00..1f4698d724bb7 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -2024,6 +2024,10 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) if (have_full_constraints()) { r = dummy_regulator_rdev; + if (!r) { + ret = -EPROBE_DEFER; + goto out; + } get_device(&r->dev); } else { dev_err(dev, "Failed to resolve %s-supply for %s\n", @@ -2041,6 +2045,10 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) goto out; } r = dummy_regulator_rdev; + if (!r) { + ret = -EPROBE_DEFER; + goto out; + } get_device(&r->dev); } @@ -2166,8 +2174,10 @@ struct regulator *_regulator_get_common(struct regulator_dev *rdev, struct devic * enabled, even if it isn't hooked up, and just * provide a dummy. */ - dev_warn(dev, "supply %s not found, using dummy regulator\n", id); rdev = dummy_regulator_rdev; + if (!rdev) + return ERR_PTR(-EPROBE_DEFER); + dev_warn(dev, "supply %s not found, using dummy regulator\n", id); get_device(&rdev->dev); break; diff --git a/drivers/regulator/dummy.c b/drivers/regulator/dummy.c index 5b9b9e4e762d5..9f59889129abe 100644 --- a/drivers/regulator/dummy.c +++ b/drivers/regulator/dummy.c @@ -60,7 +60,7 @@ static struct platform_driver dummy_regulator_driver = { .probe = dummy_regulator_probe, .driver = { .name = "reg-dummy", - .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .probe_type = PROBE_FORCE_SYNCHRONOUS, }, }; diff --git a/drivers/regulator/pca9450-regulator.c b/drivers/regulator/pca9450-regulator.c index 9714afe347dcc..1ffa145319f23 100644 --- a/drivers/regulator/pca9450-regulator.c +++ b/drivers/regulator/pca9450-regulator.c @@ -454,7 +454,7 @@ static const struct pca9450_regulator_desc pca9450a_regulators[] = { .n_linear_ranges = ARRAY_SIZE(pca9450_ldo5_volts), .vsel_reg = PCA9450_REG_LDO5CTRL_H, .vsel_mask = LDO5HOUT_MASK, - .enable_reg = PCA9450_REG_LDO5CTRL_H, + .enable_reg = PCA9450_REG_LDO5CTRL_L, .enable_mask = LDO5H_EN_MASK, .owner = THIS_MODULE, }, @@ -663,7 +663,7 @@ static const struct pca9450_regulator_desc pca9450bc_regulators[] = { .n_linear_ranges = ARRAY_SIZE(pca9450_ldo5_volts), .vsel_reg = PCA9450_REG_LDO5CTRL_H, .vsel_mask = LDO5HOUT_MASK, - .enable_reg = PCA9450_REG_LDO5CTRL_H, + .enable_reg = PCA9450_REG_LDO5CTRL_L, .enable_mask = LDO5H_EN_MASK, .owner = THIS_MODULE, }, @@ -835,7 +835,7 @@ static const struct pca9450_regulator_desc pca9451a_regulators[] = { .n_linear_ranges = ARRAY_SIZE(pca9450_ldo5_volts), .vsel_reg = PCA9450_REG_LDO5CTRL_H, .vsel_mask = LDO5HOUT_MASK, - .enable_reg = PCA9450_REG_LDO5CTRL_H, + .enable_reg = PCA9450_REG_LDO5CTRL_L, .enable_mask = LDO5H_EN_MASK, .owner = THIS_MODULE, }, diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c index 32c3531b20c70..e19081d530226 100644 --- a/drivers/remoteproc/qcom_q6v5_mss.c +++ b/drivers/remoteproc/qcom_q6v5_mss.c @@ -1839,6 +1839,13 @@ static int q6v5_pds_attach(struct device *dev, struct device **devs, while (pd_names[num_pds]) num_pds++; + /* Handle single power domain */ + if (num_pds == 1 && dev->pm_domain) { + devs[0] = dev; + pm_runtime_enable(dev); + return 1; + } + for (i = 0; i < num_pds; i++) { devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]); if (IS_ERR_OR_NULL(devs[i])) { @@ -1859,8 +1866,15 @@ static int q6v5_pds_attach(struct device *dev, struct device **devs, static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds, size_t pd_count) { + struct device *dev = qproc->dev; int i; + /* Handle single power domain */ + if (pd_count == 1 && dev->pm_domain) { + pm_runtime_disable(dev); + return; + } + for (i = 0; i < pd_count; i++) dev_pm_domain_detach(pds[i], false); } @@ -2469,13 +2483,13 @@ static const struct rproc_hexagon_res msm8974_mss = { .supply = "pll", .uA = 100000, }, - {} - }, - .fallback_proxy_supply = (struct qcom_mss_reg_res[]) { { .supply = "mx", .uV = 1050000, }, + {} + }, + .fallback_proxy_supply = (struct qcom_mss_reg_res[]) { { .supply = "cx", .uA = 100000, @@ -2501,7 +2515,6 @@ static const struct rproc_hexagon_res msm8974_mss = { NULL }, .proxy_pd_names = (char*[]){ - "mx", "cx", NULL }, diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c index 1a2d08ec9de9e..ea4a91f37b506 100644 --- a/drivers/remoteproc/qcom_q6v5_pas.c +++ b/drivers/remoteproc/qcom_q6v5_pas.c @@ -509,16 +509,16 @@ static int adsp_pds_attach(struct device *dev, struct device **devs, if (!pd_names) return 0; + while (pd_names[num_pds]) + num_pds++; + /* Handle single power domain */ - if (dev->pm_domain) { + if (num_pds == 1 && dev->pm_domain) { devs[0] = dev; pm_runtime_enable(dev); return 1; } - while (pd_names[num_pds]) - num_pds++; - for (i = 0; i < num_pds; i++) { devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]); if (IS_ERR_OR_NULL(devs[i])) { @@ -543,7 +543,7 @@ static void adsp_pds_detach(struct qcom_adsp *adsp, struct device **pds, int i; /* Handle single power domain */ - if (dev->pm_domain && pd_count) { + if (pd_count == 1 && dev->pm_domain) { pm_runtime_disable(dev); return; } @@ -1356,6 +1356,7 @@ static const struct adsp_data sc7280_wpss_resource = { .crash_reason_smem = 626, .firmware_name = "wpss.mdt", .pas_id = 6, + .minidump_id = 4, .auto_boot = false, .proxy_pd_names = (char*[]){ "cx", @@ -1418,7 +1419,7 @@ static const struct adsp_data sm8650_mpss_resource = { }; static const struct of_device_id adsp_of_match[] = { - { .compatible = "qcom,msm8226-adsp-pil", .data = &adsp_resource_init}, + { .compatible = "qcom,msm8226-adsp-pil", .data = &msm8996_adsp_resource}, { .compatible = "qcom,msm8953-adsp-pil", .data = &msm8996_adsp_resource}, { .compatible = "qcom,msm8974-adsp-pil", .data = &adsp_resource_init}, { .compatible = "qcom,msm8996-adsp-pil", .data = &msm8996_adsp_resource}, diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index ef6febe356330..d2308c2f97eb9 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -2025,6 +2025,7 @@ int rproc_shutdown(struct rproc *rproc) kfree(rproc->cached_table); rproc->cached_table = NULL; rproc->table_ptr = NULL; + rproc->table_sz = 0; out: mutex_unlock(&rproc->lock); return ret; diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c index a07bbecba61cd..0c5bda060249e 100644 --- a/drivers/s390/cio/chp.c +++ b/drivers/s390/cio/chp.c @@ -682,7 +682,8 @@ static int info_update(void) if (time_after(jiffies, chp_info_expires)) { /* Data is too old, update. */ rc = sclp_chp_read_info(&chp_info); - chp_info_expires = jiffies + CHP_INFO_UPDATE_INTERVAL ; + if (!rc) + chp_info_expires = jiffies + CHP_INFO_UPDATE_INTERVAL; } mutex_unlock(&info_lock); diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c index e36e3ea165d3b..2f34761e64135 100644 --- a/drivers/s390/net/ism_drv.c +++ b/drivers/s390/net/ism_drv.c @@ -588,6 +588,15 @@ static int ism_dev_init(struct ism_dev *ism) return ret; } +static void ism_dev_release(struct device *dev) +{ + struct ism_dev *ism; + + ism = container_of(dev, struct ism_dev, dev); + + kfree(ism); +} + static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct ism_dev *ism; @@ -601,6 +610,7 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id) dev_set_drvdata(&pdev->dev, ism); ism->pdev = pdev; ism->dev.parent = &pdev->dev; + ism->dev.release = ism_dev_release; device_initialize(&ism->dev); dev_set_name(&ism->dev, dev_name(&pdev->dev)); ret = device_add(&ism->dev); @@ -637,7 +647,7 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id) device_del(&ism->dev); err_dev: dev_set_drvdata(&pdev->dev, NULL); - kfree(ism); + put_device(&ism->dev); return ret; } @@ -682,7 +692,7 @@ static void ism_remove(struct pci_dev *pdev) pci_disable_device(pdev); device_del(&ism->dev); dev_set_drvdata(&pdev->dev, NULL); - kfree(ism); + put_device(&ism->dev); } static struct pci_driver ism_driver = { diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index 21fa7ac849e5c..4904b831c0a75 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c @@ -302,11 +302,17 @@ static struct airq_info *new_airq_info(int index) static unsigned long *get_airq_indicator(struct virtqueue *vqs[], int nvqs, u64 *first, void **airq_info) { - int i, j; + int i, j, queue_idx, highest_queue_idx = -1; struct airq_info *info; unsigned long *indicator_addr = NULL; unsigned long bit, flags; + /* Array entries without an actual queue pointer must be ignored. */ + for (i = 0; i < nvqs; i++) { + if (vqs[i]) + highest_queue_idx++; + } + for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) { mutex_lock(&airq_areas_lock); if (!airq_areas[i]) @@ -316,7 +322,7 @@ static unsigned long *get_airq_indicator(struct virtqueue *vqs[], int nvqs, if (!info) return NULL; write_lock_irqsave(&info->lock, flags); - bit = airq_iv_alloc(info->aiv, nvqs); + bit = airq_iv_alloc(info->aiv, highest_queue_idx + 1); if (bit == -1UL) { /* Not enough vacancies. */ write_unlock_irqrestore(&info->lock, flags); @@ -325,8 +331,10 @@ static unsigned long *get_airq_indicator(struct virtqueue *vqs[], int nvqs, *first = bit; *airq_info = info; indicator_addr = info->aiv->vector; - for (j = 0; j < nvqs; j++) { - airq_iv_set_ptr(info->aiv, bit + j, + for (j = 0, queue_idx = 0; j < nvqs; j++) { + if (!vqs[j]) + continue; + airq_iv_set_ptr(info->aiv, bit + queue_idx++, (unsigned long)vqs[j]); } write_unlock_irqrestore(&info->lock, flags); diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index adec0df24bc47..1cb517f731f4a 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include @@ -601,7 +600,7 @@ void fnic_mq_map_queues_cpus(struct Scsi_Host *host) return; } - blk_mq_pci_map_queues(qmap, l_pdev, FNIC_PCI_OFFSET); + blk_mq_map_hw_queues(qmap, &l_pdev->dev, FNIC_PCI_OFFSET); } static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h index d223f482488fc..010479a354eee 100644 --- a/drivers/scsi/hisi_sas/hisi_sas.h +++ b/drivers/scsi/hisi_sas/hisi_sas.h @@ -9,7 +9,6 @@ #include #include -#include #include #include #include diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index 342d75f12051d..89ff33daba404 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -2501,6 +2501,7 @@ static void prep_ata_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_port *port = to_hisi_sas_port(sas_port); struct sas_ata_task *ata_task = &task->ata_task; struct sas_tmf_task *tmf = slot->tmf; + int phy_id; u8 *buf_cmd; int has_data = 0, hdr_tag = 0; u32 dw0, dw1 = 0, dw2 = 0; @@ -2508,10 +2509,14 @@ static void prep_ata_v2_hw(struct hisi_hba *hisi_hba, /* create header */ /* dw0 */ dw0 = port->id << CMD_HDR_PORT_OFF; - if (parent_dev && dev_is_expander(parent_dev->dev_type)) + if (parent_dev && dev_is_expander(parent_dev->dev_type)) { dw0 |= 3 << CMD_HDR_CMD_OFF; - else + } else { + phy_id = device->phy->identify.phy_identifier; + dw0 |= (1U << phy_id) << CMD_HDR_PHY_ID_OFF; + dw0 |= CMD_HDR_FORCE_PHY_MSK; dw0 |= 4 << CMD_HDR_CMD_OFF; + } if (tmf && ata_task->force_phy) { dw0 |= CMD_HDR_FORCE_PHY_MSK; diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index cd394d8c9f07f..2b04556681a1a 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -358,6 +358,10 @@ #define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF) #define CMD_HDR_TLR_CTRL_OFF 6 #define CMD_HDR_TLR_CTRL_MSK (0x3 << CMD_HDR_TLR_CTRL_OFF) +#define CMD_HDR_PHY_ID_OFF 8 +#define CMD_HDR_PHY_ID_MSK (0x1ff << CMD_HDR_PHY_ID_OFF) +#define CMD_HDR_FORCE_PHY_OFF 17 +#define CMD_HDR_FORCE_PHY_MSK (0x1U << CMD_HDR_FORCE_PHY_OFF) #define CMD_HDR_PORT_OFF 18 #define CMD_HDR_PORT_MSK (0xf << CMD_HDR_PORT_OFF) #define CMD_HDR_PRIORITY_OFF 27 @@ -1425,15 +1429,21 @@ static void prep_ata_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; struct asd_sas_port *sas_port = device->port; struct hisi_sas_port *port = to_hisi_sas_port(sas_port); + int phy_id; u8 *buf_cmd; int has_data = 0, hdr_tag = 0; u32 dw1 = 0, dw2 = 0; hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF); - if (parent_dev && dev_is_expander(parent_dev->dev_type)) + if (parent_dev && dev_is_expander(parent_dev->dev_type)) { hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF); - else + } else { + phy_id = device->phy->identify.phy_identifier; + hdr->dw0 |= cpu_to_le32((1U << phy_id) + << CMD_HDR_PHY_ID_OFF); + hdr->dw0 |= CMD_HDR_FORCE_PHY_MSK; hdr->dw0 |= cpu_to_le32(4U << CMD_HDR_CMD_OFF); + } switch (task->data_dir) { case DMA_TO_DEVICE: @@ -3323,8 +3333,8 @@ static void hisi_sas_map_queues(struct Scsi_Host *shost) if (i == HCTX_TYPE_POLL) blk_mq_map_queues(qmap); else - blk_mq_pci_map_queues(qmap, hisi_hba->pci_dev, - BASE_VECTORS_V3_HW); + blk_mq_map_hw_queues(qmap, hisi_hba->dev, + BASE_VECTORS_V3_HW); qoff += qmap->nr_queues; } } diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 50f1dcb6d5846..21f22e913cd08 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -37,7 +37,6 @@ #include #include #include -#include #include #include @@ -2104,6 +2103,9 @@ static int megasas_device_configure(struct scsi_device *sdev, /* This sdev property may change post OCR */ megasas_set_dynamic_target_properties(sdev, lim, is_target_prop); + if (!MEGASAS_IS_LOGICAL(sdev)) + sdev->no_vpd_size = 1; + mutex_unlock(&instance->reset_mutex); return 0; @@ -3193,7 +3195,7 @@ static void megasas_map_queues(struct Scsi_Host *shost) map = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; map->nr_queues = instance->msix_vectors - offset; map->queue_offset = 0; - blk_mq_pci_map_queues(map, instance->pdev, offset); + blk_mq_map_hw_queues(map, &instance->pdev->dev, offset); qoff += map->nr_queues; offset += map->nr_queues; @@ -3663,8 +3665,10 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, case MFI_STAT_SCSI_IO_FAILED: case MFI_STAT_LD_INIT_IN_PROGRESS: - cmd->scmd->result = - (DID_ERROR << 16) | hdr->scsi_status; + if (hdr->scsi_status == 0xf0) + cmd->scmd->result = (DID_ERROR << 16) | SAM_STAT_CHECK_CONDITION; + else + cmd->scmd->result = (DID_ERROR << 16) | hdr->scsi_status; break; case MFI_STAT_SCSI_DONE_WITH_ERROR: diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 1eec23da28e2d..1eea4df9e47d3 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -2043,7 +2043,10 @@ map_cmd_status(struct fusion_context *fusion, case MFI_STAT_SCSI_IO_FAILED: case MFI_STAT_LD_INIT_IN_PROGRESS: - scmd->result = (DID_ERROR << 16) | ext_status; + if (ext_status == 0xf0) + scmd->result = (DID_ERROR << 16) | SAM_STAT_CHECK_CONDITION; + else + scmd->result = (DID_ERROR << 16) | ext_status; break; case MFI_STAT_SCSI_DONE_WITH_ERROR: diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h index 1e715fd65a7d4..ab7c5f1fc0412 100644 --- a/drivers/scsi/mpi3mr/mpi3mr.h +++ b/drivers/scsi/mpi3mr/mpi3mr.h @@ -12,7 +12,6 @@ #include #include -#include #include #include #include @@ -81,13 +80,14 @@ extern atomic64_t event_counter; /* Admin queue management definitions */ #define MPI3MR_ADMIN_REQ_Q_SIZE (2 * MPI3MR_PAGE_SIZE_4K) -#define MPI3MR_ADMIN_REPLY_Q_SIZE (4 * MPI3MR_PAGE_SIZE_4K) +#define MPI3MR_ADMIN_REPLY_Q_SIZE (8 * MPI3MR_PAGE_SIZE_4K) #define MPI3MR_ADMIN_REQ_FRAME_SZ 128 #define MPI3MR_ADMIN_REPLY_FRAME_SZ 16 /* Operational queue management definitions */ #define MPI3MR_OP_REQ_Q_QD 512 #define MPI3MR_OP_REP_Q_QD 1024 +#define MPI3MR_OP_REP_Q_QD2K 2048 #define MPI3MR_OP_REP_Q_QD4K 4096 #define MPI3MR_OP_REQ_Q_SEG_SIZE 4096 #define MPI3MR_OP_REP_Q_SEG_SIZE 4096 @@ -329,6 +329,7 @@ enum mpi3mr_reset_reason { #define MPI3MR_RESET_REASON_OSTYPE_SHIFT 28 #define MPI3MR_RESET_REASON_IOCNUM_SHIFT 20 + /* Queue type definitions */ enum queue_type { MPI3MR_DEFAULT_QUEUE = 0, @@ -388,6 +389,7 @@ struct mpi3mr_ioc_facts { u16 max_msix_vectors; u8 personality; u8 dma_mask; + bool max_req_limit; u8 protocol_flags; u8 sge_mod_mask; u8 sge_mod_value; @@ -457,6 +459,8 @@ struct op_req_qinfo { * @enable_irq_poll: Flag to indicate polling is enabled * @in_use: Queue is handled by poll/ISR * @qtype: Type of queue (types defined in enum queue_type) + * @qfull_watermark: Watermark defined in reply queue to avoid + * reply queue full */ struct op_reply_qinfo { u16 ci; @@ -472,6 +476,7 @@ struct op_reply_qinfo { bool enable_irq_poll; atomic_t in_use; enum queue_type qtype; + u16 qfull_watermark; }; /** @@ -1091,6 +1096,7 @@ struct scmd_priv { * @ts_update_interval: Timestamp update interval * @reset_in_progress: Reset in progress flag * @unrecoverable: Controller unrecoverable flag + * @io_admin_reset_sync: Manage state of I/O ops during an admin reset process * @prev_reset_result: Result of previous reset * @reset_mutex: Controller reset mutex * @reset_waitq: Controller reset wait queue @@ -1154,6 +1160,8 @@ struct scmd_priv { * @snapdump_trigger_active: Snapdump trigger active flag * @pci_err_recovery: PCI error recovery in progress * @block_on_pci_err: Block IO during PCI error recovery + * @reply_qfull_count: Occurences of reply queue full avoidance kicking-in + * @prevent_reply_qfull: Enable reply queue prevention */ struct mpi3mr_ioc { struct list_head list; @@ -1277,6 +1285,7 @@ struct mpi3mr_ioc { u16 ts_update_interval; u8 reset_in_progress; u8 unrecoverable; + u8 io_admin_reset_sync; int prev_reset_result; struct mutex reset_mutex; wait_queue_head_t reset_waitq; @@ -1352,6 +1361,8 @@ struct mpi3mr_ioc { bool fw_release_trigger_active; bool pci_err_recovery; bool block_on_pci_err; + atomic_t reply_qfull_count; + bool prevent_reply_qfull; }; /** diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c index 7589f48aebc80..1532436f0f3af 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_app.c +++ b/drivers/scsi/mpi3mr/mpi3mr_app.c @@ -3060,6 +3060,29 @@ reply_queue_count_show(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR_RO(reply_queue_count); +/** + * reply_qfull_count_show - Show reply qfull count + * @dev: class device + * @attr: Device attributes + * @buf: Buffer to copy + * + * Retrieves the current value of the reply_qfull_count from the mrioc structure and + * formats it as a string for display. + * + * Return: sysfs_emit() return + */ +static ssize_t +reply_qfull_count_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct mpi3mr_ioc *mrioc = shost_priv(shost); + + return sysfs_emit(buf, "%u\n", atomic_read(&mrioc->reply_qfull_count)); +} + +static DEVICE_ATTR_RO(reply_qfull_count); + /** * logging_level_show - Show controller debug level * @dev: class device @@ -3152,6 +3175,7 @@ static struct attribute *mpi3mr_host_attrs[] = { &dev_attr_fw_queue_depth.attr, &dev_attr_op_req_q_count.attr, &dev_attr_reply_queue_count.attr, + &dev_attr_reply_qfull_count.attr, &dev_attr_logging_level.attr, &dev_attr_adp_state.attr, NULL, diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c index 5ed31fe57474a..ec5b1ab287177 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_fw.c +++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c @@ -17,7 +17,7 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc, struct mpi3_ioc_facts_data *facts_data); static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc, struct mpi3mr_drv_cmd *drv_cmd); - +static int mpi3mr_check_op_admin_proc(struct mpi3mr_ioc *mrioc); static int poll_queues; module_param(poll_queues, int, 0444); MODULE_PARM_DESC(poll_queues, "Number of queues for io_uring poll mode. (Range 1 - 126)"); @@ -459,7 +459,7 @@ int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc) } do { - if (mrioc->unrecoverable) + if (mrioc->unrecoverable || mrioc->io_admin_reset_sync) break; mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci); @@ -554,7 +554,7 @@ int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc, } do { - if (mrioc->unrecoverable) + if (mrioc->unrecoverable || mrioc->io_admin_reset_sync) break; req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1; @@ -2104,15 +2104,22 @@ static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx) } reply_qid = qidx + 1; - op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD; - if ((mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) && - !mrioc->pdev->revision) - op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K; + + if (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) { + if (mrioc->pdev->revision) + op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD; + else + op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K; + } else + op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD2K; + op_reply_q->ci = 0; op_reply_q->ephase = 1; atomic_set(&op_reply_q->pend_ios, 0); atomic_set(&op_reply_q->in_use, 0); op_reply_q->enable_irq_poll = false; + op_reply_q->qfull_watermark = + op_reply_q->num_replies - (MPI3MR_THRESHOLD_REPLY_COUNT * 2); if (!op_reply_q->q_segments) { retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx); @@ -2416,8 +2423,10 @@ int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc, void *segment_base_addr; u16 req_sz = mrioc->facts.op_req_sz; struct segments *segments = op_req_q->q_segments; + struct op_reply_qinfo *op_reply_q = NULL; reply_qidx = op_req_q->reply_qid - 1; + op_reply_q = mrioc->op_reply_qinfo + reply_qidx; if (mrioc->unrecoverable) return -EFAULT; @@ -2448,6 +2457,15 @@ int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc, goto out; } + /* Reply queue is nearing to get full, push back IOs to SML */ + if ((mrioc->prevent_reply_qfull == true) && + (atomic_read(&op_reply_q->pend_ios) > + (op_reply_q->qfull_watermark))) { + atomic_inc(&mrioc->reply_qfull_count); + retval = -EAGAIN; + goto out; + } + segment_base_addr = segments[pi / op_req_q->segment_qd].segment; req_entry = (u8 *)segment_base_addr + ((pi % op_req_q->segment_qd) * req_sz); @@ -3091,6 +3109,9 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc, mrioc->facts.dma_mask = (facts_flags & MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >> MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT; + mrioc->facts.dma_mask = (facts_flags & + MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >> + MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT; mrioc->facts.protocol_flags = facts_data->protocol_flags; mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word); mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_requests); @@ -4214,6 +4235,9 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc) mrioc->shost->transportt = mpi3mr_transport_template; } + if (mrioc->facts.max_req_limit) + mrioc->prevent_reply_qfull = true; + mrioc->reply_sz = mrioc->facts.reply_sz; retval = mpi3mr_check_reset_dma_mask(mrioc); @@ -4370,6 +4394,7 @@ int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume) goto out_failed_noretry; } + mrioc->io_admin_reset_sync = 0; if (is_resume || mrioc->block_on_pci_err) { dprint_reset(mrioc, "setting up single ISR\n"); retval = mpi3mr_setup_isr(mrioc, 1); @@ -5228,6 +5253,55 @@ void mpi3mr_pel_get_seqnum_complete(struct mpi3mr_ioc *mrioc, drv_cmd->retry_count = 0; } +/** + * mpi3mr_check_op_admin_proc - + * @mrioc: Adapter instance reference + * + * Check if any of the operation reply queues + * or the admin reply queue are currently in use. + * If any queue is in use, this function waits for + * a maximum of 10 seconds for them to become available. + * + * Return: 0 on success, non-zero on failure. + */ +static int mpi3mr_check_op_admin_proc(struct mpi3mr_ioc *mrioc) +{ + + u16 timeout = 10 * 10; + u16 elapsed_time = 0; + bool op_admin_in_use = false; + + do { + op_admin_in_use = false; + + /* Check admin_reply queue first to exit early */ + if (atomic_read(&mrioc->admin_reply_q_in_use) == 1) + op_admin_in_use = true; + else { + /* Check op_reply queues */ + int i; + + for (i = 0; i < mrioc->num_queues; i++) { + if (atomic_read(&mrioc->op_reply_qinfo[i].in_use) == 1) { + op_admin_in_use = true; + break; + } + } + } + + if (!op_admin_in_use) + break; + + msleep(100); + + } while (++elapsed_time < timeout); + + if (op_admin_in_use) + return 1; + + return 0; +} + /** * mpi3mr_soft_reset_handler - Reset the controller * @mrioc: Adapter instance reference @@ -5308,6 +5382,7 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc, mpi3mr_wait_for_host_io(mrioc, MPI3MR_RESET_HOST_IOWAIT_TIMEOUT); mpi3mr_ioc_disable_intr(mrioc); + mrioc->io_admin_reset_sync = 1; if (snapdump) { mpi3mr_set_diagsave(mrioc); @@ -5335,6 +5410,16 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc, ioc_err(mrioc, "Failed to issue soft reset to the ioc\n"); goto out; } + + retval = mpi3mr_check_op_admin_proc(mrioc); + if (retval) { + ioc_err(mrioc, "Soft reset failed due to an Admin or I/O queue polling\n" + "thread still processing replies even after a 10 second\n" + "timeout. Marking the controller as unrecoverable!\n"); + + goto out; + } + if (mrioc->num_io_throttle_group != mrioc->facts.max_io_throttle_group) { ioc_err(mrioc, diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c index 1bef88130d0c0..1e8735538b238 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_os.c +++ b/drivers/scsi/mpi3mr/mpi3mr_os.c @@ -4042,7 +4042,7 @@ static void mpi3mr_map_queues(struct Scsi_Host *shost) */ map->queue_offset = qoff; if (i != HCTX_TYPE_POLL) - blk_mq_pci_map_queues(map, mrioc->pdev, offset); + blk_mq_map_hw_queues(map, &mrioc->pdev->dev, offset); else blk_mq_map_queues(map); diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index f2a55aa5fe650..9599d7a500286 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -53,7 +53,6 @@ #include #include #include -#include #include #include "mpt3sas_base.h" @@ -11890,7 +11889,7 @@ static void scsih_map_queues(struct Scsi_Host *shost) */ map->queue_offset = qoff; if (i != HCTX_TYPE_POLL) - blk_mq_pci_map_queues(map, ioc->pdev, offset); + blk_mq_map_hw_queues(map, &ioc->pdev->dev, offset); else blk_mq_map_queues(map); diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index 33e1eba62ca12..b53b1ae5b74c3 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -101,7 +101,7 @@ static void pm8001_map_queues(struct Scsi_Host *shost) struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; if (pm8001_ha->number_of_intr > 1) { - blk_mq_pci_map_queues(qmap, pm8001_ha->pdev, 1); + blk_mq_map_hw_queues(qmap, &pm8001_ha->pdev->dev, 1); return; } diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h index ced6721380a85..c46470e0cf63b 100644 --- a/drivers/scsi/pm8001/pm8001_sas.h +++ b/drivers/scsi/pm8001/pm8001_sas.h @@ -56,7 +56,6 @@ #include #include #include -#include #include "pm8001_defs.h" #define DRV_NAME "pm80xx" diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c index 8958547ac111a..fed07b1460702 100644 --- a/drivers/scsi/qla1280.c +++ b/drivers/scsi/qla1280.c @@ -2867,7 +2867,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n", cpu_to_le32(upper_32_bits(dma_handle)), cpu_to_le32(lower_32_bits(dma_handle)), - cpu_to_le32(sg_dma_len(sg_next(s)))); + cpu_to_le32(sg_dma_len(s))); remseg--; } dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather " diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index 8f4cc136a9c9c..8ee2e337c9e1b 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -8,7 +8,6 @@ #include #include #include -#include #include static struct nvme_fc_port_template qla_nvme_fc_transport; @@ -841,7 +840,7 @@ static void qla_nvme_map_queues(struct nvme_fc_local_port *lport, { struct scsi_qla_host *vha = lport->private; - blk_mq_pci_map_queues(map, vha->hw->pdev, vha->irq_offset); + blk_mq_map_hw_queues(map, &vha->hw->pdev->dev, vha->irq_offset); } static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport) diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 7ab717ed72327..31535beaaa161 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -13,7 +13,6 @@ #include #include #include -#include #include #include #include @@ -8071,7 +8070,8 @@ static void qla2xxx_map_queues(struct Scsi_Host *shost) if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase) blk_mq_map_queues(qmap); else - blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset); + blk_mq_map_hw_queues(qmap, &vha->hw->pdev->dev, + vha->irq_offset); } struct scsi_host_template qla2xxx_driver_template = { diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index c9dde1ac9523e..3023b07dc483b 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1653,13 +1653,6 @@ static blk_status_t scsi_prepare_cmd(struct request *req) if (in_flight) __set_bit(SCMD_STATE_INFLIGHT, &cmd->state); - /* - * Only clear the driver-private command data if the LLD does not supply - * a function to initialize that data. - */ - if (!shost->hostt->init_cmd_priv) - memset(cmd + 1, 0, shost->hostt->cmd_size); - cmd->prot_op = SCSI_PROT_NORMAL; if (blk_rq_bytes(req)) cmd->sc_data_direction = rq_dma_dir(req); @@ -1826,6 +1819,13 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, if (!scsi_host_queue_ready(q, shost, sdev, cmd)) goto out_dec_target_busy; + /* + * Only clear the driver-private command data if the LLD does not supply + * a function to initialize that data. + */ + if (shost->hostt->cmd_size && !shost->hostt->init_cmd_priv) + memset(cmd + 1, 0, shost->hostt->cmd_size); + if (!(req->rq_flags & RQF_DONTPREP)) { ret = scsi_prepare_cmd(req); if (ret != BLK_STS_OK) diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 042329b74c6e6..fe08af4dcb67c 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -245,7 +245,7 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev, } ret = sbitmap_init_node(&sdev->budget_map, scsi_device_max_queue_depth(sdev), - new_shift, GFP_KERNEL, + new_shift, GFP_NOIO, sdev->request_queue->node, false, true); if (!ret) sbitmap_resize(&sdev->budget_map, depth); diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 9b47f91c5b972..8274fe0ec7146 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -3209,11 +3209,14 @@ iscsi_set_host_param(struct iscsi_transport *transport, } /* see similar check in iscsi_if_set_param() */ - if (strlen(data) > ev->u.set_host_param.len) - return -EINVAL; + if (strlen(data) > ev->u.set_host_param.len) { + err = -EINVAL; + goto out; + } err = transport->set_host_param(shost, ev->u.set_host_param.param, data, ev->u.set_host_param.len); +out: scsi_host_put(shost); return err; } diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 870f37b705464..d919a74746a05 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #include #include @@ -5247,7 +5247,7 @@ static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info) ctrl_info->error_buffer_length = ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH; - if (reset_devices) + if (is_kdump_kernel()) max_transfer_size = min(ctrl_info->max_transfer_size, PQI_MAX_TRANSFER_SIZE_KDUMP); else @@ -5276,7 +5276,7 @@ static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info) u16 num_elements_per_iq; u16 num_elements_per_oq; - if (reset_devices) { + if (is_kdump_kernel()) { num_queue_groups = 1; } else { int num_cpus; @@ -6547,10 +6547,10 @@ static void pqi_map_queues(struct Scsi_Host *shost) struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); if (!ctrl_info->disable_managed_interrupts) - return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT], - ctrl_info->pci_dev, 0); + blk_mq_map_hw_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT], + &ctrl_info->pci_dev->dev, 0); else - return blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]); + blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]); } static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device) @@ -8288,12 +8288,12 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) u32 product_id; if (reset_devices) { - if (pqi_is_fw_triage_supported(ctrl_info)) { + if (is_kdump_kernel() && pqi_is_fw_triage_supported(ctrl_info)) { rc = sis_wait_for_fw_triage_completion(ctrl_info); if (rc) return rc; } - if (sis_is_ctrl_logging_supported(ctrl_info)) { + if (is_kdump_kernel() && sis_is_ctrl_logging_supported(ctrl_info)) { sis_notify_kdump(ctrl_info); rc = sis_wait_for_ctrl_logging_completion(ctrl_info); if (rc) @@ -8344,7 +8344,7 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) ctrl_info->product_id = (u8)product_id; ctrl_info->product_revision = (u8)(product_id >> 8); - if (reset_devices) { + if (is_kdump_kernel()) { if (ctrl_info->max_outstanding_requests > PQI_MAX_OUTSTANDING_REQUESTS_KDUMP) ctrl_info->max_outstanding_requests = @@ -8480,7 +8480,7 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) if (rc) return rc; - if (ctrl_info->ctrl_logging_supported && !reset_devices) { + if (ctrl_info->ctrl_logging_supported && !is_kdump_kernel()) { pqi_host_setup_buffer(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_CTRL_LOG_TOTAL_SIZE, PQI_CTRL_LOG_MIN_SIZE); pqi_host_memory_update(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE); } diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 0dc37fc6f2367..a17441635ff3a 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -4119,7 +4119,7 @@ static void validate_options(void) */ static int __init st_setup(char *str) { - int i, len, ints[5]; + int i, len, ints[ARRAY_SIZE(parms) + 1]; char *stp; stp = get_options(str, ARRAY_SIZE(ints), ints); diff --git a/drivers/slimbus/messaging.c b/drivers/slimbus/messaging.c index 242570a5e5654..455c1fd1490fd 100644 --- a/drivers/slimbus/messaging.c +++ b/drivers/slimbus/messaging.c @@ -148,8 +148,9 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn) } ret = ctrl->xfer_msg(ctrl, txn); - - if (!ret && need_tid && !txn->msg->comp) { + if (ret == -ETIMEDOUT) { + slim_free_txn_tid(ctrl, txn); + } else if (!ret && need_tid && !txn->msg->comp) { unsigned long ms = txn->rl + HZ; time_left = wait_for_completion_timeout(txn->comp, diff --git a/drivers/soc/imx/soc-imx8m.c b/drivers/soc/imx/soc-imx8m.c index 5ea8887828c06..3ed8161d7d28b 100644 --- a/drivers/soc/imx/soc-imx8m.c +++ b/drivers/soc/imx/soc-imx8m.c @@ -30,11 +30,9 @@ struct imx8_soc_data { char *name; - int (*soc_revision)(u32 *socrev); + int (*soc_revision)(u32 *socrev, u64 *socuid); }; -static u64 soc_uid; - #ifdef CONFIG_HAVE_ARM_SMCCC static u32 imx8mq_soc_revision_from_atf(void) { @@ -51,24 +49,22 @@ static u32 imx8mq_soc_revision_from_atf(void) static inline u32 imx8mq_soc_revision_from_atf(void) { return 0; }; #endif -static int imx8mq_soc_revision(u32 *socrev) +static int imx8mq_soc_revision(u32 *socrev, u64 *socuid) { - struct device_node *np; + struct device_node *np __free(device_node) = + of_find_compatible_node(NULL, NULL, "fsl,imx8mq-ocotp"); void __iomem *ocotp_base; u32 magic; u32 rev; struct clk *clk; int ret; - np = of_find_compatible_node(NULL, NULL, "fsl,imx8mq-ocotp"); if (!np) return -EINVAL; ocotp_base = of_iomap(np, 0); - if (!ocotp_base) { - ret = -EINVAL; - goto err_iomap; - } + if (!ocotp_base) + return -EINVAL; clk = of_clk_get_by_name(np, NULL); if (IS_ERR(clk)) { @@ -89,44 +85,39 @@ static int imx8mq_soc_revision(u32 *socrev) rev = REV_B1; } - soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH); - soc_uid <<= 32; - soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW); + *socuid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH); + *socuid <<= 32; + *socuid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW); *socrev = rev; clk_disable_unprepare(clk); clk_put(clk); iounmap(ocotp_base); - of_node_put(np); return 0; err_clk: iounmap(ocotp_base); -err_iomap: - of_node_put(np); return ret; } -static int imx8mm_soc_uid(void) +static int imx8mm_soc_uid(u64 *socuid) { + struct device_node *np __free(device_node) = + of_find_compatible_node(NULL, NULL, "fsl,imx8mm-ocotp"); void __iomem *ocotp_base; - struct device_node *np; struct clk *clk; int ret = 0; u32 offset = of_machine_is_compatible("fsl,imx8mp") ? IMX8MP_OCOTP_UID_OFFSET : 0; - np = of_find_compatible_node(NULL, NULL, "fsl,imx8mm-ocotp"); if (!np) return -EINVAL; ocotp_base = of_iomap(np, 0); - if (!ocotp_base) { - ret = -EINVAL; - goto err_iomap; - } + if (!ocotp_base) + return -EINVAL; clk = of_clk_get_by_name(np, NULL); if (IS_ERR(clk)) { @@ -136,47 +127,36 @@ static int imx8mm_soc_uid(void) clk_prepare_enable(clk); - soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH + offset); - soc_uid <<= 32; - soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW + offset); + *socuid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH + offset); + *socuid <<= 32; + *socuid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW + offset); clk_disable_unprepare(clk); clk_put(clk); err_clk: iounmap(ocotp_base); -err_iomap: - of_node_put(np); - return ret; } -static int imx8mm_soc_revision(u32 *socrev) +static int imx8mm_soc_revision(u32 *socrev, u64 *socuid) { - struct device_node *np; + struct device_node *np __free(device_node) = + of_find_compatible_node(NULL, NULL, "fsl,imx8mm-anatop"); void __iomem *anatop_base; - int ret; - np = of_find_compatible_node(NULL, NULL, "fsl,imx8mm-anatop"); if (!np) return -EINVAL; anatop_base = of_iomap(np, 0); - if (!anatop_base) { - ret = -EINVAL; - goto err_iomap; - } + if (!anatop_base) + return -EINVAL; *socrev = readl_relaxed(anatop_base + ANADIG_DIGPROG_IMX8MM); iounmap(anatop_base); - of_node_put(np); - return imx8mm_soc_uid(); - -err_iomap: - of_node_put(np); - return ret; + return imx8mm_soc_uid(socuid); } static const struct imx8_soc_data imx8mq_soc_data = { @@ -207,21 +187,34 @@ static __maybe_unused const struct of_device_id imx8_soc_match[] = { { } }; -#define imx8_revision(soc_rev) \ - soc_rev ? \ - kasprintf(GFP_KERNEL, "%d.%d", (soc_rev >> 4) & 0xf, soc_rev & 0xf) : \ +#define imx8_revision(dev, soc_rev) \ + (soc_rev) ? \ + devm_kasprintf((dev), GFP_KERNEL, "%d.%d", ((soc_rev) >> 4) & 0xf, (soc_rev) & 0xf) : \ "unknown" +static void imx8m_unregister_soc(void *data) +{ + soc_device_unregister(data); +} + +static void imx8m_unregister_cpufreq(void *data) +{ + platform_device_unregister(data); +} + static int imx8m_soc_probe(struct platform_device *pdev) { struct soc_device_attribute *soc_dev_attr; - struct soc_device *soc_dev; + struct platform_device *cpufreq_dev; + const struct imx8_soc_data *data; + struct device *dev = &pdev->dev; const struct of_device_id *id; + struct soc_device *soc_dev; u32 soc_rev = 0; - const struct imx8_soc_data *data; + u64 soc_uid = 0; int ret; - soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); + soc_dev_attr = devm_kzalloc(dev, sizeof(*soc_dev_attr), GFP_KERNEL); if (!soc_dev_attr) return -ENOMEM; @@ -229,58 +222,52 @@ static int imx8m_soc_probe(struct platform_device *pdev) ret = of_property_read_string(of_root, "model", &soc_dev_attr->machine); if (ret) - goto free_soc; + return ret; id = of_match_node(imx8_soc_match, of_root); - if (!id) { - ret = -ENODEV; - goto free_soc; - } + if (!id) + return -ENODEV; data = id->data; if (data) { soc_dev_attr->soc_id = data->name; if (data->soc_revision) { - ret = data->soc_revision(&soc_rev); + ret = data->soc_revision(&soc_rev, &soc_uid); if (ret) - goto free_soc; + return ret; } } - soc_dev_attr->revision = imx8_revision(soc_rev); - if (!soc_dev_attr->revision) { - ret = -ENOMEM; - goto free_soc; - } + soc_dev_attr->revision = imx8_revision(dev, soc_rev); + if (!soc_dev_attr->revision) + return -ENOMEM; - soc_dev_attr->serial_number = kasprintf(GFP_KERNEL, "%016llX", soc_uid); - if (!soc_dev_attr->serial_number) { - ret = -ENOMEM; - goto free_rev; - } + soc_dev_attr->serial_number = devm_kasprintf(dev, GFP_KERNEL, "%016llX", soc_uid); + if (!soc_dev_attr->serial_number) + return -ENOMEM; soc_dev = soc_device_register(soc_dev_attr); - if (IS_ERR(soc_dev)) { - ret = PTR_ERR(soc_dev); - goto free_serial_number; - } + if (IS_ERR(soc_dev)) + return PTR_ERR(soc_dev); + + ret = devm_add_action(dev, imx8m_unregister_soc, soc_dev); + if (ret) + return ret; pr_info("SoC: %s revision %s\n", soc_dev_attr->soc_id, soc_dev_attr->revision); - if (IS_ENABLED(CONFIG_ARM_IMX_CPUFREQ_DT)) - platform_device_register_simple("imx-cpufreq-dt", -1, NULL, 0); + if (IS_ENABLED(CONFIG_ARM_IMX_CPUFREQ_DT)) { + cpufreq_dev = platform_device_register_simple("imx-cpufreq-dt", -1, NULL, 0); + if (IS_ERR(cpufreq_dev)) + return dev_err_probe(dev, PTR_ERR(cpufreq_dev), + "Failed to register imx-cpufreq-dev device\n"); + ret = devm_add_action(dev, imx8m_unregister_cpufreq, cpufreq_dev); + if (ret) + return ret; + } return 0; - -free_serial_number: - kfree(soc_dev_attr->serial_number); -free_rev: - if (strcmp(soc_dev_attr->revision, "unknown")) - kfree(soc_dev_attr->revision); -free_soc: - kfree(soc_dev_attr); - return ret; } static struct platform_driver imx8m_soc_driver = { diff --git a/drivers/soc/loongson/loongson2_guts.c b/drivers/soc/loongson/loongson2_guts.c index ef352a0f50220..1fcf7ca8083e1 100644 --- a/drivers/soc/loongson/loongson2_guts.c +++ b/drivers/soc/loongson/loongson2_guts.c @@ -114,8 +114,11 @@ static int loongson2_guts_probe(struct platform_device *pdev) if (of_property_read_string(root, "model", &machine)) of_property_read_string_index(root, "compatible", 0, &machine); of_node_put(root); - if (machine) + if (machine) { soc_dev_attr.machine = devm_kstrdup(dev, machine, GFP_KERNEL); + if (!soc_dev_attr.machine) + return -ENOMEM; + } svr = loongson2_guts_get_svr(); soc_die = loongson2_soc_die_match(svr, loongson2_soc_die); diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c index 328b6153b2be6..71be378d2e43a 100644 --- a/drivers/soc/qcom/pdr_interface.c +++ b/drivers/soc/qcom/pdr_interface.c @@ -75,7 +75,6 @@ static int pdr_locator_new_server(struct qmi_handle *qmi, { struct pdr_handle *pdr = container_of(qmi, struct pdr_handle, locator_hdl); - struct pdr_service *pds; mutex_lock(&pdr->lock); /* Create a local client port for QMI communication */ @@ -87,12 +86,7 @@ static int pdr_locator_new_server(struct qmi_handle *qmi, mutex_unlock(&pdr->lock); /* Service pending lookup requests */ - mutex_lock(&pdr->list_lock); - list_for_each_entry(pds, &pdr->lookups, node) { - if (pds->need_locator_lookup) - schedule_work(&pdr->locator_work); - } - mutex_unlock(&pdr->list_lock); + schedule_work(&pdr->locator_work); return 0; } diff --git a/drivers/soc/samsung/exynos-chipid.c b/drivers/soc/samsung/exynos-chipid.c index b1118d37779e4..bba8d86ae1bb0 100644 --- a/drivers/soc/samsung/exynos-chipid.c +++ b/drivers/soc/samsung/exynos-chipid.c @@ -131,6 +131,8 @@ static int exynos_chipid_probe(struct platform_device *pdev) soc_dev_attr->revision = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%x", soc_info.revision); + if (!soc_dev_attr->revision) + return -ENOMEM; soc_dev_attr->soc_id = product_id_to_soc_id(soc_info.product_id); if (!soc_dev_attr->soc_id) { pr_err("Unknown SoC\n"); diff --git a/drivers/soundwire/slave.c b/drivers/soundwire/slave.c index f1a4df6cfebd9..2bcb733de4de4 100644 --- a/drivers/soundwire/slave.c +++ b/drivers/soundwire/slave.c @@ -12,6 +12,7 @@ static void sdw_slave_release(struct device *dev) { struct sdw_slave *slave = dev_to_sdw_dev(dev); + of_node_put(slave->dev.of_node); mutex_destroy(&slave->sdw_dev_lock); kfree(slave); } diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c index e1b9b12357877..5926e004d9a65 100644 --- a/drivers/spi/spi-bcm2835.c +++ b/drivers/spi/spi-bcm2835.c @@ -1162,7 +1162,8 @@ static void bcm2835_spi_cleanup(struct spi_device *spi) sizeof(u32), DMA_TO_DEVICE); - gpiod_put(bs->cs_gpio); + if (!IS_ERR(bs->cs_gpio)) + gpiod_put(bs->cs_gpio); spi_set_csgpiod(spi, 0, NULL); kfree(target); @@ -1225,7 +1226,12 @@ static int bcm2835_spi_setup(struct spi_device *spi) struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); struct bcm2835_spidev *target = spi_get_ctldata(spi); struct gpiod_lookup_table *lookup __free(kfree) = NULL; - int ret; + const char *pinctrl_compats[] = { + "brcm,bcm2835-gpio", + "brcm,bcm2711-gpio", + "brcm,bcm7211-gpio", + }; + int ret, i; u32 cs; if (!target) { @@ -1290,6 +1296,14 @@ static int bcm2835_spi_setup(struct spi_device *spi) goto err_cleanup; } + for (i = 0; i < ARRAY_SIZE(pinctrl_compats); i++) { + if (of_find_compatible_node(NULL, NULL, pinctrl_compats[i])) + break; + } + + if (i == ARRAY_SIZE(pinctrl_compats)) + return 0; + /* * TODO: The code below is a slightly better alternative to the utter * abuse of the GPIO API that I found here before. It creates a diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c index 73b1edd0531b4..f9463f263fba1 100644 --- a/drivers/spi/spi-cadence-quadspi.c +++ b/drivers/spi/spi-cadence-quadspi.c @@ -1634,6 +1634,12 @@ static int cqspi_request_mmap_dma(struct cqspi_st *cqspi) int ret = PTR_ERR(cqspi->rx_chan); cqspi->rx_chan = NULL; + if (ret == -ENODEV) { + /* DMA support is not mandatory */ + dev_info(&cqspi->pdev->dev, "No Rx DMA available\n"); + return 0; + } + return dev_err_probe(&cqspi->pdev->dev, ret, "No Rx DMA available\n"); } init_completion(&cqspi->rx_dma_complete); diff --git a/drivers/spi/spi-cadence-xspi.c b/drivers/spi/spi-cadence-xspi.c index aed98ab143346..6dcba0e0ddaa3 100644 --- a/drivers/spi/spi-cadence-xspi.c +++ b/drivers/spi/spi-cadence-xspi.c @@ -432,7 +432,7 @@ static bool cdns_mrvl_xspi_setup_clock(struct cdns_xspi_dev *cdns_xspi, u32 clk_reg; bool update_clk = false; - while (i < ARRAY_SIZE(cdns_mrvl_xspi_clk_div_list)) { + while (i < (ARRAY_SIZE(cdns_mrvl_xspi_clk_div_list) - 1)) { clk_val = MRVL_XSPI_CLOCK_DIVIDED( cdns_mrvl_xspi_clk_div_list[i]); if (clk_val <= requested_clk) diff --git a/drivers/spi/spi-microchip-core.c b/drivers/spi/spi-microchip-core.c index 7c1a9a9853733..92b63a7f20415 100644 --- a/drivers/spi/spi-microchip-core.c +++ b/drivers/spi/spi-microchip-core.c @@ -70,8 +70,7 @@ #define INT_RX_CHANNEL_OVERFLOW BIT(2) #define INT_TX_CHANNEL_UNDERRUN BIT(3) -#define INT_ENABLE_MASK (CONTROL_RX_DATA_INT | CONTROL_TX_DATA_INT | \ - CONTROL_RX_OVER_INT | CONTROL_TX_UNDER_INT) +#define INT_ENABLE_MASK (CONTROL_RX_OVER_INT | CONTROL_TX_UNDER_INT) #define REG_CONTROL (0x00) #define REG_FRAME_SIZE (0x04) @@ -133,10 +132,15 @@ static inline void mchp_corespi_disable(struct mchp_corespi *spi) mchp_corespi_write(spi, REG_CONTROL, control); } -static inline void mchp_corespi_read_fifo(struct mchp_corespi *spi) +static inline void mchp_corespi_read_fifo(struct mchp_corespi *spi, int fifo_max) { - while (spi->rx_len >= spi->n_bytes && !(mchp_corespi_read(spi, REG_STATUS) & STATUS_RXFIFO_EMPTY)) { - u32 data = mchp_corespi_read(spi, REG_RX_DATA); + for (int i = 0; i < fifo_max; i++) { + u32 data; + + while (mchp_corespi_read(spi, REG_STATUS) & STATUS_RXFIFO_EMPTY) + ; + + data = mchp_corespi_read(spi, REG_RX_DATA); spi->rx_len -= spi->n_bytes; @@ -211,11 +215,10 @@ static inline void mchp_corespi_set_xfer_size(struct mchp_corespi *spi, int len) mchp_corespi_write(spi, REG_FRAMESUP, len); } -static inline void mchp_corespi_write_fifo(struct mchp_corespi *spi) +static inline void mchp_corespi_write_fifo(struct mchp_corespi *spi, int fifo_max) { - int fifo_max, i = 0; + int i = 0; - fifo_max = DIV_ROUND_UP(min(spi->tx_len, FIFO_DEPTH), spi->n_bytes); mchp_corespi_set_xfer_size(spi, fifo_max); while ((i < fifo_max) && !(mchp_corespi_read(spi, REG_STATUS) & STATUS_TXFIFO_FULL)) { @@ -413,19 +416,6 @@ static irqreturn_t mchp_corespi_interrupt(int irq, void *dev_id) if (intfield == 0) return IRQ_NONE; - if (intfield & INT_TXDONE) - mchp_corespi_write(spi, REG_INT_CLEAR, INT_TXDONE); - - if (intfield & INT_RXRDY) { - mchp_corespi_write(spi, REG_INT_CLEAR, INT_RXRDY); - - if (spi->rx_len) - mchp_corespi_read_fifo(spi); - } - - if (!spi->rx_len && !spi->tx_len) - finalise = true; - if (intfield & INT_RX_CHANNEL_OVERFLOW) { mchp_corespi_write(spi, REG_INT_CLEAR, INT_RX_CHANNEL_OVERFLOW); finalise = true; @@ -512,9 +502,14 @@ static int mchp_corespi_transfer_one(struct spi_controller *host, mchp_corespi_write(spi, REG_SLAVE_SELECT, spi->pending_slave_select); - while (spi->tx_len) - mchp_corespi_write_fifo(spi); + while (spi->tx_len) { + int fifo_max = DIV_ROUND_UP(min(spi->tx_len, FIFO_DEPTH), spi->n_bytes); + + mchp_corespi_write_fifo(spi, fifo_max); + mchp_corespi_read_fifo(spi, fifo_max); + } + spi_finalize_current_transfer(host); return 1; } diff --git a/drivers/staging/rtl8723bs/Kconfig b/drivers/staging/rtl8723bs/Kconfig index 8d48c61961a6b..353e6ee2c1450 100644 --- a/drivers/staging/rtl8723bs/Kconfig +++ b/drivers/staging/rtl8723bs/Kconfig @@ -4,6 +4,7 @@ config RTL8723BS depends on WLAN && MMC && CFG80211 depends on m select CRYPTO + select CRYPTO_LIB_AES select CRYPTO_LIB_ARC4 help This option enables support for RTL8723BS SDIO drivers, such as diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c index 5fab33adf58ed..97787002080a1 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c @@ -1745,8 +1745,6 @@ static int vchiq_probe(struct platform_device *pdev) if (ret) goto failed_platform_init; - vchiq_debugfs_init(&mgmt->state); - dev_dbg(&pdev->dev, "arm: platform initialised - version %d (min %d)\n", VCHIQ_VERSION, VCHIQ_VERSION_MIN); @@ -1760,6 +1758,8 @@ static int vchiq_probe(struct platform_device *pdev) goto error_exit; } + vchiq_debugfs_init(&mgmt->state); + bcm2835_audio = vchiq_device_register(&pdev->dev, "bcm2835-audio"); bcm2835_camera = vchiq_device_register(&pdev->dev, "bcm2835-camera"); @@ -1786,7 +1786,8 @@ static void vchiq_remove(struct platform_device *pdev) kthread_stop(mgmt->state.slot_handler_thread); arm_state = vchiq_platform_get_arm_state(&mgmt->state); - kthread_stop(arm_state->ka_thread); + if (!IS_ERR_OR_NULL(arm_state->ka_thread)) + kthread_stop(arm_state->ka_thread); } static struct platform_driver vchiq_driver = { diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index ea14a38356814..61c065702350e 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c @@ -2243,7 +2243,7 @@ spc_emulate_report_supp_op_codes(struct se_cmd *cmd) response_length += spc_rsoc_encode_command_descriptor( &buf[response_length], rctd, descr); } - put_unaligned_be32(response_length - 3, buf); + put_unaligned_be32(response_length - 4, buf); } else { response_length = spc_rsoc_encode_one_command_descriptor( &buf[response_length], rctd, descr, diff --git a/drivers/tee/optee/supp.c b/drivers/tee/optee/supp.c index 322a543b8c278..d0f397c902420 100644 --- a/drivers/tee/optee/supp.c +++ b/drivers/tee/optee/supp.c @@ -80,7 +80,6 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params, struct optee *optee = tee_get_drvdata(ctx->teedev); struct optee_supp *supp = &optee->supp; struct optee_supp_req *req; - bool interruptable; u32 ret; /* @@ -111,36 +110,18 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params, /* * Wait for supplicant to process and return result, once we've * returned from wait_for_completion(&req->c) successfully we have - * exclusive access again. + * exclusive access again. Allow the wait to be killable such that + * the wait doesn't turn into an indefinite state if the supplicant + * gets hung for some reason. */ - while (wait_for_completion_interruptible(&req->c)) { + if (wait_for_completion_killable(&req->c)) { mutex_lock(&supp->mutex); - interruptable = !supp->ctx; - if (interruptable) { - /* - * There's no supplicant available and since the - * supp->mutex currently is held none can - * become available until the mutex released - * again. - * - * Interrupting an RPC to supplicant is only - * allowed as a way of slightly improving the user - * experience in case the supplicant hasn't been - * started yet. During normal operation the supplicant - * will serve all requests in a timely manner and - * interrupting then wouldn't make sense. - */ - if (req->in_queue) { - list_del(&req->link); - req->in_queue = false; - } + if (req->in_queue) { + list_del(&req->link); + req->in_queue = false; } mutex_unlock(&supp->mutex); - - if (interruptable) { - req->ret = TEEC_ERROR_COMMUNICATION; - break; - } + req->ret = TEEC_ERROR_COMMUNICATION; } ret = req->ret; diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c index 280071be30b15..6b7ab1814c12d 100644 --- a/drivers/thermal/cpufreq_cooling.c +++ b/drivers/thermal/cpufreq_cooling.c @@ -57,8 +57,6 @@ struct time_in_idle { * @max_level: maximum cooling level. One less than total number of valid * cpufreq frequencies. * @em: Reference on the Energy Model of the device - * @cdev: thermal_cooling_device pointer to keep track of the - * registered cooling device. * @policy: cpufreq policy. * @cooling_ops: cpufreq callbacks to thermal cooling device ops * @idle_time: idle time stats diff --git a/drivers/thermal/gov_bang_bang.c b/drivers/thermal/gov_bang_bang.c index 863e7a4272e66..b887e48e8c7e6 100644 --- a/drivers/thermal/gov_bang_bang.c +++ b/drivers/thermal/gov_bang_bang.c @@ -67,6 +67,7 @@ static void bang_bang_control(struct thermal_zone_device *tz, const struct thermal_trip *trip, bool crossed_up) { + const struct thermal_trip_desc *td = trip_to_trip_desc(trip); struct thermal_instance *instance; lockdep_assert_held(&tz->lock); @@ -75,10 +76,8 @@ static void bang_bang_control(struct thermal_zone_device *tz, thermal_zone_trip_id(tz, trip), trip->temperature, tz->temperature, trip->hysteresis); - list_for_each_entry(instance, &tz->thermal_instances, tz_node) { - if (instance->trip == trip) - bang_bang_set_instance_target(instance, crossed_up); - } + list_for_each_entry(instance, &td->thermal_instances, trip_node) + bang_bang_set_instance_target(instance, crossed_up); } static void bang_bang_manage(struct thermal_zone_device *tz) @@ -104,8 +103,8 @@ static void bang_bang_manage(struct thermal_zone_device *tz) * to the thermal zone temperature and the trip point threshold. */ turn_on = tz->temperature >= td->threshold; - list_for_each_entry(instance, &tz->thermal_instances, tz_node) { - if (!instance->initialized && instance->trip == trip) + list_for_each_entry(instance, &td->thermal_instances, trip_node) { + if (!instance->initialized) bang_bang_set_instance_target(instance, turn_on); } } diff --git a/drivers/thermal/gov_fair_share.c b/drivers/thermal/gov_fair_share.c index ce0ea571ed67a..d37d57d48c389 100644 --- a/drivers/thermal/gov_fair_share.c +++ b/drivers/thermal/gov_fair_share.c @@ -44,7 +44,7 @@ static int get_trip_level(struct thermal_zone_device *tz) /** * fair_share_throttle - throttles devices associated with the given zone * @tz: thermal_zone_device - * @trip: trip point + * @td: trip point descriptor * @trip_level: number of trips crossed by the zone temperature * * Throttling Logic: This uses three parameters to calculate the new @@ -61,29 +61,23 @@ static int get_trip_level(struct thermal_zone_device *tz) * new_state of cooling device = P3 * P2 * P1 */ static void fair_share_throttle(struct thermal_zone_device *tz, - const struct thermal_trip *trip, + const struct thermal_trip_desc *td, int trip_level) { struct thermal_instance *instance; int total_weight = 0; int nr_instances = 0; - list_for_each_entry(instance, &tz->thermal_instances, tz_node) { - if (instance->trip != trip) - continue; - + list_for_each_entry(instance, &td->thermal_instances, trip_node) { total_weight += instance->weight; nr_instances++; } - list_for_each_entry(instance, &tz->thermal_instances, tz_node) { + list_for_each_entry(instance, &td->thermal_instances, trip_node) { struct thermal_cooling_device *cdev = instance->cdev; u64 dividend; u32 divisor; - if (instance->trip != trip) - continue; - dividend = trip_level; dividend *= cdev->max_state; divisor = tz->num_trips; @@ -116,7 +110,7 @@ static void fair_share_manage(struct thermal_zone_device *tz) trip->type == THERMAL_TRIP_HOT) continue; - fair_share_throttle(tz, trip, trip_level); + fair_share_throttle(tz, td, trip_level); } } diff --git a/drivers/thermal/gov_power_allocator.c b/drivers/thermal/gov_power_allocator.c index 1b2345a697c5a..90b4bfd9237bc 100644 --- a/drivers/thermal/gov_power_allocator.c +++ b/drivers/thermal/gov_power_allocator.c @@ -97,11 +97,9 @@ struct power_allocator_params { struct power_actor *power; }; -static bool power_actor_is_valid(struct power_allocator_params *params, - struct thermal_instance *instance) +static bool power_actor_is_valid(struct thermal_instance *instance) { - return (instance->trip == params->trip_max && - cdev_is_power_actor(instance->cdev)); + return cdev_is_power_actor(instance->cdev); } /** @@ -118,13 +116,14 @@ static bool power_actor_is_valid(struct power_allocator_params *params, static u32 estimate_sustainable_power(struct thermal_zone_device *tz) { struct power_allocator_params *params = tz->governor_data; + const struct thermal_trip_desc *td = trip_to_trip_desc(params->trip_max); struct thermal_cooling_device *cdev; struct thermal_instance *instance; u32 sustainable_power = 0; u32 min_power; - list_for_each_entry(instance, &tz->thermal_instances, tz_node) { - if (!power_actor_is_valid(params, instance)) + list_for_each_entry(instance, &td->thermal_instances, trip_node) { + if (!power_actor_is_valid(instance)) continue; cdev = instance->cdev; @@ -364,7 +363,7 @@ static void divvy_up_power(struct power_actor *power, int num_actors, for (i = 0; i < num_actors; i++) { struct power_actor *pa = &power[i]; - u64 req_range = (u64)pa->req_power * power_range; + u64 req_range = (u64)pa->weighted_req_power * power_range; pa->granted_power = DIV_ROUND_CLOSEST_ULL(req_range, total_req_power); @@ -400,6 +399,7 @@ static void divvy_up_power(struct power_actor *power, int num_actors, static void allocate_power(struct thermal_zone_device *tz, int control_temp) { struct power_allocator_params *params = tz->governor_data; + const struct thermal_trip_desc *td = trip_to_trip_desc(params->trip_max); unsigned int num_actors = params->num_actors; struct power_actor *power = params->power; struct thermal_cooling_device *cdev; @@ -417,10 +417,10 @@ static void allocate_power(struct thermal_zone_device *tz, int control_temp) /* Clean all buffers for new power estimations */ memset(power, 0, params->buffer_size); - list_for_each_entry(instance, &tz->thermal_instances, tz_node) { + list_for_each_entry(instance, &td->thermal_instances, trip_node) { struct power_actor *pa = &power[i]; - if (!power_actor_is_valid(params, instance)) + if (!power_actor_is_valid(instance)) continue; cdev = instance->cdev; @@ -454,10 +454,10 @@ static void allocate_power(struct thermal_zone_device *tz, int control_temp) power_range); i = 0; - list_for_each_entry(instance, &tz->thermal_instances, tz_node) { + list_for_each_entry(instance, &td->thermal_instances, trip_node) { struct power_actor *pa = &power[i]; - if (!power_actor_is_valid(params, instance)) + if (!power_actor_is_valid(instance)) continue; power_actor_set_power(instance->cdev, instance, @@ -538,12 +538,13 @@ static void reset_pid_controller(struct power_allocator_params *params) static void allow_maximum_power(struct thermal_zone_device *tz) { struct power_allocator_params *params = tz->governor_data; + const struct thermal_trip_desc *td = trip_to_trip_desc(params->trip_max); struct thermal_cooling_device *cdev; struct thermal_instance *instance; u32 req_power; - list_for_each_entry(instance, &tz->thermal_instances, tz_node) { - if (!power_actor_is_valid(params, instance)) + list_for_each_entry(instance, &td->thermal_instances, trip_node) { + if (!power_actor_is_valid(instance)) continue; cdev = instance->cdev; @@ -581,13 +582,16 @@ static void allow_maximum_power(struct thermal_zone_device *tz) static int check_power_actors(struct thermal_zone_device *tz, struct power_allocator_params *params) { + const struct thermal_trip_desc *td; struct thermal_instance *instance; int ret = 0; - list_for_each_entry(instance, &tz->thermal_instances, tz_node) { - if (instance->trip != params->trip_max) - continue; + if (!params->trip_max) + return 0; + + td = trip_to_trip_desc(params->trip_max); + list_for_each_entry(instance, &td->thermal_instances, trip_node) { if (!cdev_is_power_actor(instance->cdev)) { dev_warn(&tz->device, "power_allocator: %s is not a power actor\n", instance->cdev->type); @@ -631,30 +635,43 @@ static int allocate_actors_buffer(struct power_allocator_params *params, return ret; } +static void power_allocator_update_weight(struct power_allocator_params *params) +{ + const struct thermal_trip_desc *td; + struct thermal_instance *instance; + + if (!params->trip_max) + return; + + td = trip_to_trip_desc(params->trip_max); + + params->total_weight = 0; + list_for_each_entry(instance, &td->thermal_instances, trip_node) + if (power_actor_is_valid(instance)) + params->total_weight += instance->weight; +} + static void power_allocator_update_tz(struct thermal_zone_device *tz, enum thermal_notify_event reason) { struct power_allocator_params *params = tz->governor_data; + const struct thermal_trip_desc *td = trip_to_trip_desc(params->trip_max); struct thermal_instance *instance; int num_actors = 0; switch (reason) { case THERMAL_TZ_BIND_CDEV: case THERMAL_TZ_UNBIND_CDEV: - list_for_each_entry(instance, &tz->thermal_instances, tz_node) - if (power_actor_is_valid(params, instance)) + list_for_each_entry(instance, &td->thermal_instances, trip_node) + if (power_actor_is_valid(instance)) num_actors++; - if (num_actors == params->num_actors) - return; + if (num_actors != params->num_actors) + allocate_actors_buffer(params, num_actors); - allocate_actors_buffer(params, num_actors); - break; + fallthrough; case THERMAL_INSTANCE_WEIGHT_CHANGED: - params->total_weight = 0; - list_for_each_entry(instance, &tz->thermal_instances, tz_node) - if (power_actor_is_valid(params, instance)) - params->total_weight += instance->weight; + power_allocator_update_weight(params); break; default: break; @@ -720,6 +737,8 @@ static int power_allocator_bind(struct thermal_zone_device *tz) tz->governor_data = params; + power_allocator_update_weight(params); + return 0; free_params: diff --git a/drivers/thermal/gov_step_wise.c b/drivers/thermal/gov_step_wise.c index fd5527188cf91..ea4bf88d37f33 100644 --- a/drivers/thermal/gov_step_wise.c +++ b/drivers/thermal/gov_step_wise.c @@ -66,9 +66,10 @@ static unsigned long get_target_state(struct thermal_instance *instance, } static void thermal_zone_trip_update(struct thermal_zone_device *tz, - const struct thermal_trip *trip, + const struct thermal_trip_desc *td, int trip_threshold) { + const struct thermal_trip *trip = &td->trip; enum thermal_trend trend = get_tz_trend(tz, trip); int trip_id = thermal_zone_trip_id(tz, trip); struct thermal_instance *instance; @@ -82,12 +83,9 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, dev_dbg(&tz->device, "Trip%d[type=%d,temp=%d]:trend=%d,throttle=%d\n", trip_id, trip->type, trip_threshold, trend, throttle); - list_for_each_entry(instance, &tz->thermal_instances, tz_node) { + list_for_each_entry(instance, &td->thermal_instances, trip_node) { int old_target; - if (instance->trip != trip) - continue; - old_target = instance->target; instance->target = get_target_state(instance, trend, throttle); @@ -127,11 +125,13 @@ static void step_wise_manage(struct thermal_zone_device *tz) trip->type == THERMAL_TRIP_HOT) continue; - thermal_zone_trip_update(tz, trip, td->threshold); + thermal_zone_trip_update(tz, td, td->threshold); } - list_for_each_entry(instance, &tz->thermal_instances, tz_node) - thermal_cdev_update(instance->cdev); + for_each_trip_desc(tz, td) { + list_for_each_entry(instance, &td->thermal_instances, trip_node) + thermal_cdev_update(instance->cdev); + } } static struct thermal_governor thermal_gov_step_wise = { diff --git a/drivers/thermal/intel/int340x_thermal/int3402_thermal.c b/drivers/thermal/intel/int340x_thermal/int3402_thermal.c index ab8bfb5a3946b..40ab6b2d4fb05 100644 --- a/drivers/thermal/intel/int340x_thermal/int3402_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3402_thermal.c @@ -45,6 +45,9 @@ static int int3402_thermal_probe(struct platform_device *pdev) struct int3402_thermal_data *d; int ret; + if (!adev) + return -ENODEV; + if (!acpi_has_method(adev->handle, "_TMP")) return -ENODEV; diff --git a/drivers/thermal/mediatek/lvts_thermal.c b/drivers/thermal/mediatek/lvts_thermal.c index 1997e91bb3be9..4b3225377e8f8 100644 --- a/drivers/thermal/mediatek/lvts_thermal.c +++ b/drivers/thermal/mediatek/lvts_thermal.c @@ -65,7 +65,7 @@ #define LVTS_HW_FILTER 0x0 #define LVTS_TSSEL_CONF 0x13121110 #define LVTS_CALSCALE_CONF 0x300 -#define LVTS_MONINT_CONF 0x8300318C +#define LVTS_MONINT_CONF 0x0300318C #define LVTS_MONINT_OFFSET_SENSOR0 0xC #define LVTS_MONINT_OFFSET_SENSOR1 0x180 @@ -91,8 +91,6 @@ #define LVTS_MSR_READ_TIMEOUT_US 400 #define LVTS_MSR_READ_WAIT_US (LVTS_MSR_READ_TIMEOUT_US / 2) -#define LVTS_HW_TSHUT_TEMP 105000 - #define LVTS_MINIMUM_THRESHOLD 20000 static int golden_temp = LVTS_GOLDEN_TEMP_DEFAULT; @@ -145,7 +143,6 @@ struct lvts_ctrl { struct lvts_sensor sensors[LVTS_SENSOR_MAX]; const struct lvts_data *lvts_data; u32 calibration[LVTS_SENSOR_MAX]; - u32 hw_tshut_raw_temp; u8 valid_sensor_mask; int mode; void __iomem *base; @@ -837,14 +834,6 @@ static int lvts_ctrl_init(struct device *dev, struct lvts_domain *lvts_td, */ lvts_ctrl[i].mode = lvts_data->lvts_ctrl[i].mode; - /* - * The temperature to raw temperature must be done - * after initializing the calibration. - */ - lvts_ctrl[i].hw_tshut_raw_temp = - lvts_temp_to_raw(LVTS_HW_TSHUT_TEMP, - lvts_data->temp_factor); - lvts_ctrl[i].low_thresh = INT_MIN; lvts_ctrl[i].high_thresh = INT_MIN; } @@ -860,6 +849,32 @@ static int lvts_ctrl_init(struct device *dev, struct lvts_domain *lvts_td, return 0; } +static void lvts_ctrl_monitor_enable(struct device *dev, struct lvts_ctrl *lvts_ctrl, bool enable) +{ + /* + * Bitmaps to enable each sensor on filtered mode in the MONCTL0 + * register. + */ + static const u8 sensor_filt_bitmap[] = { BIT(0), BIT(1), BIT(2), BIT(3) }; + u32 sensor_map = 0; + int i; + + if (lvts_ctrl->mode != LVTS_MSR_FILTERED_MODE) + return; + + if (enable) { + lvts_for_each_valid_sensor(i, lvts_ctrl) + sensor_map |= sensor_filt_bitmap[i]; + } + + /* + * Bits: + * 9: Single point access flow + * 0-3: Enable sensing point 0-3 + */ + writel(sensor_map | BIT(9), LVTS_MONCTL0(lvts_ctrl->base)); +} + /* * At this point the configuration register is the only place in the * driver where we write multiple values. Per hardware constraint, @@ -893,7 +908,6 @@ static int lvts_irq_init(struct lvts_ctrl *lvts_ctrl) * 10 : Selected sensor with bits 19-18 * 11 : Reserved */ - writel(BIT(16), LVTS_PROTCTL(lvts_ctrl->base)); /* * LVTS_PROTTA : Stage 1 temperature threshold @@ -906,8 +920,8 @@ static int lvts_irq_init(struct lvts_ctrl *lvts_ctrl) * * writel(0x0, LVTS_PROTTA(lvts_ctrl->base)); * writel(0x0, LVTS_PROTTB(lvts_ctrl->base)); + * writel(0x0, LVTS_PROTTC(lvts_ctrl->base)); */ - writel(lvts_ctrl->hw_tshut_raw_temp, LVTS_PROTTC(lvts_ctrl->base)); /* * LVTS_MONINT : Interrupt configuration register @@ -1381,8 +1395,11 @@ static int lvts_suspend(struct device *dev) lvts_td = dev_get_drvdata(dev); - for (i = 0; i < lvts_td->num_lvts_ctrl; i++) + for (i = 0; i < lvts_td->num_lvts_ctrl; i++) { + lvts_ctrl_monitor_enable(dev, &lvts_td->lvts_ctrl[i], false); + usleep_range(100, 200); lvts_ctrl_set_enable(&lvts_td->lvts_ctrl[i], false); + } clk_disable_unprepare(lvts_td->clk); @@ -1400,8 +1417,11 @@ static int lvts_resume(struct device *dev) if (ret) return ret; - for (i = 0; i < lvts_td->num_lvts_ctrl; i++) + for (i = 0; i < lvts_td->num_lvts_ctrl; i++) { lvts_ctrl_set_enable(&lvts_td->lvts_ctrl[i], true); + usleep_range(100, 200); + lvts_ctrl_monitor_enable(dev, &lvts_td->lvts_ctrl[i], true); + } return 0; } diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c index 086ed42dd16cd..a84f48a752d15 100644 --- a/drivers/thermal/rockchip_thermal.c +++ b/drivers/thermal/rockchip_thermal.c @@ -386,6 +386,7 @@ static const struct tsadc_table rk3328_code_table[] = { {296, -40000}, {304, -35000}, {313, -30000}, + {322, -25000}, {331, -20000}, {340, -15000}, {349, -10000}, diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 1d2f2b307bac5..c2fa236e10cda 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -490,7 +490,7 @@ static void thermal_zone_device_check(struct work_struct *work) static void thermal_zone_device_init(struct thermal_zone_device *tz) { - struct thermal_instance *pos; + struct thermal_trip_desc *td; INIT_DELAYED_WORK(&tz->poll_queue, thermal_zone_device_check); @@ -498,8 +498,12 @@ static void thermal_zone_device_init(struct thermal_zone_device *tz) tz->passive = 0; tz->prev_low_trip = -INT_MAX; tz->prev_high_trip = INT_MAX; - list_for_each_entry(pos, &tz->thermal_instances, tz_node) - pos->initialized = false; + for_each_trip_desc(tz, td) { + struct thermal_instance *instance; + + list_for_each_entry(instance, &td->thermal_instances, trip_node) + instance->initialized = false; + } } static void thermal_governor_trip_crossed(struct thermal_governor *governor, @@ -764,12 +768,12 @@ struct thermal_zone_device *thermal_zone_get_by_id(int id) * Return: 0 on success, the proper error value otherwise. */ static int thermal_bind_cdev_to_trip(struct thermal_zone_device *tz, - const struct thermal_trip *trip, + struct thermal_trip *trip, struct thermal_cooling_device *cdev, struct cooling_spec *cool_spec) { - struct thermal_instance *dev; - struct thermal_instance *pos; + struct thermal_trip_desc *td = trip_to_trip_desc(trip); + struct thermal_instance *dev, *instance; bool upper_no_limit; int result; @@ -832,13 +836,13 @@ static int thermal_bind_cdev_to_trip(struct thermal_zone_device *tz, goto remove_trip_file; mutex_lock(&cdev->lock); - list_for_each_entry(pos, &tz->thermal_instances, tz_node) - if (pos->trip == trip && pos->cdev == cdev) { + list_for_each_entry(instance, &td->thermal_instances, trip_node) + if (instance->cdev == cdev) { result = -EEXIST; break; } if (!result) { - list_add_tail(&dev->tz_node, &tz->thermal_instances); + list_add_tail(&dev->trip_node, &td->thermal_instances); list_add_tail(&dev->cdev_node, &cdev->thermal_instances); atomic_set(&tz->need_update, 1); @@ -872,15 +876,16 @@ static int thermal_bind_cdev_to_trip(struct thermal_zone_device *tz, * This function is usually called in the thermal zone device .unbind callback. */ static void thermal_unbind_cdev_from_trip(struct thermal_zone_device *tz, - const struct thermal_trip *trip, + struct thermal_trip *trip, struct thermal_cooling_device *cdev) { + struct thermal_trip_desc *td = trip_to_trip_desc(trip); struct thermal_instance *pos, *next; mutex_lock(&cdev->lock); - list_for_each_entry_safe(pos, next, &tz->thermal_instances, tz_node) { - if (pos->trip == trip && pos->cdev == cdev) { - list_del(&pos->tz_node); + list_for_each_entry_safe(pos, next, &td->thermal_instances, trip_node) { + if (pos->cdev == cdev) { + list_del(&pos->trip_node); list_del(&pos->cdev_node); thermal_governor_update_tz(tz, THERMAL_TZ_UNBIND_CDEV); @@ -1435,7 +1440,6 @@ thermal_zone_device_register_with_trips(const char *type, } } - INIT_LIST_HEAD(&tz->thermal_instances); INIT_LIST_HEAD(&tz->node); ida_init(&tz->ida); mutex_init(&tz->lock); @@ -1459,6 +1463,7 @@ thermal_zone_device_register_with_trips(const char *type, tz->num_trips = num_trips; for_each_trip_desc(tz, td) { td->trip = *trip++; + INIT_LIST_HEAD(&td->thermal_instances); /* * Mark all thresholds as invalid to start with even though * this only matters for the trips that start as invalid and diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h index 421522a2bb9d4..163871699a602 100644 --- a/drivers/thermal/thermal_core.h +++ b/drivers/thermal/thermal_core.h @@ -30,6 +30,7 @@ struct thermal_trip_desc { struct thermal_trip trip; struct thermal_trip_attrs trip_attrs; struct list_head notify_list_node; + struct list_head thermal_instances; int notify_temp; int threshold; }; @@ -99,7 +100,6 @@ struct thermal_governor { * @tzp: thermal zone parameters * @governor: pointer to the governor for this thermal zone * @governor_data: private pointer for governor data - * @thermal_instances: list of &struct thermal_instance of this thermal zone * @ida: &struct ida to generate unique id for this zone's cooling * devices * @lock: lock to protect thermal_instances list @@ -133,7 +133,6 @@ struct thermal_zone_device { struct thermal_zone_params *tzp; struct thermal_governor *governor; void *governor_data; - struct list_head thermal_instances; struct ida ida; struct mutex lock; struct list_head node; @@ -230,7 +229,7 @@ struct thermal_instance { struct device_attribute attr; char weight_attr_name[THERMAL_NAME_LENGTH]; struct device_attribute weight_attr; - struct list_head tz_node; /* node in tz->thermal_instances */ + struct list_head trip_node; /* node in trip->thermal_instances */ struct list_head cdev_node; /* node in cdev->thermal_instances */ unsigned int weight; /* The weight of the cooling device */ bool upper_no_limit; diff --git a/drivers/thermal/thermal_helpers.c b/drivers/thermal/thermal_helpers.c index dc374a7a1a659..403d62d3ce77e 100644 --- a/drivers/thermal/thermal_helpers.c +++ b/drivers/thermal/thermal_helpers.c @@ -43,10 +43,11 @@ static bool thermal_instance_present(struct thermal_zone_device *tz, struct thermal_cooling_device *cdev, const struct thermal_trip *trip) { + const struct thermal_trip_desc *td = trip_to_trip_desc(trip); struct thermal_instance *ti; - list_for_each_entry(ti, &tz->thermal_instances, tz_node) { - if (ti->trip == trip && ti->cdev == cdev) + list_for_each_entry(ti, &td->thermal_instances, trip_node) { + if (ti->cdev == cdev) return true; } diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c index 5d3d8ce672cd5..e0aa9d9d5604b 100644 --- a/drivers/thermal/thermal_of.c +++ b/drivers/thermal/thermal_of.c @@ -293,12 +293,40 @@ static bool thermal_of_get_cooling_spec(struct device_node *map_np, int index, return true; } +static bool thermal_of_cm_lookup(struct device_node *cm_np, + const struct thermal_trip *trip, + struct thermal_cooling_device *cdev, + struct cooling_spec *c) +{ + for_each_child_of_node_scoped(cm_np, child) { + struct device_node *tr_np; + int count, i; + + tr_np = of_parse_phandle(child, "trip", 0); + if (tr_np != trip->priv) + continue; + + /* The trip has been found, look up the cdev. */ + count = of_count_phandle_with_args(child, "cooling-device", + "#cooling-cells"); + if (count <= 0) + pr_err("Add a cooling_device property with at least one device\n"); + + for (i = 0; i < count; i++) { + if (thermal_of_get_cooling_spec(child, i, cdev, c)) + return true; + } + } + + return false; +} + static bool thermal_of_should_bind(struct thermal_zone_device *tz, const struct thermal_trip *trip, struct thermal_cooling_device *cdev, struct cooling_spec *c) { - struct device_node *tz_np, *cm_np, *child; + struct device_node *tz_np, *cm_np; bool result = false; tz_np = thermal_of_zone_get_by_name(tz); @@ -312,28 +340,7 @@ static bool thermal_of_should_bind(struct thermal_zone_device *tz, goto out; /* Look up the trip and the cdev in the cooling maps. */ - for_each_child_of_node(cm_np, child) { - struct device_node *tr_np; - int count, i; - - tr_np = of_parse_phandle(child, "trip", 0); - if (tr_np != trip->priv) - continue; - - /* The trip has been found, look up the cdev. */ - count = of_count_phandle_with_args(child, "cooling-device", "#cooling-cells"); - if (count <= 0) - pr_err("Add a cooling_device property with at least one device\n"); - - for (i = 0; i < count; i++) { - result = thermal_of_get_cooling_spec(child, i, cdev, c); - if (result) - break; - } - - of_node_put(child); - break; - } + result = thermal_of_cm_lookup(cm_np, trip, cdev, c); of_node_put(cm_np); out: diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index 5e9ca4376d686..94fa981081fdb 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -486,7 +486,8 @@ static int do_output_char(u8 c, struct tty_struct *tty, int space) static int process_output(u8 c, struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; - int space, retval; + unsigned int space; + int retval; mutex_lock(&ldata->output_lock); @@ -522,16 +523,16 @@ static ssize_t process_output_block(struct tty_struct *tty, const u8 *buf, unsigned int nr) { struct n_tty_data *ldata = tty->disc_data; - int space; - int i; + unsigned int space; + int i; const u8 *cp; mutex_lock(&ldata->output_lock); space = tty_write_room(tty); - if (space <= 0) { + if (space == 0) { mutex_unlock(&ldata->output_lock); - return space; + return 0; } if (nr > space) nr = space; @@ -696,7 +697,7 @@ static int n_tty_process_echo_ops(struct tty_struct *tty, size_t *tail, static size_t __process_echoes(struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; - int space, old_space; + unsigned int space, old_space; size_t tail; u8 c; diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c index f245a84f4a508..bdd26c9f34bdf 100644 --- a/drivers/tty/serial/8250/8250_dma.c +++ b/drivers/tty/serial/8250/8250_dma.c @@ -162,7 +162,7 @@ void serial8250_tx_dma_flush(struct uart_8250_port *p) */ dma->tx_size = 0; - dmaengine_terminate_async(dma->rxchan); + dmaengine_terminate_async(dma->txchan); } int serial8250_rx_dma(struct uart_8250_port *p) diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index de6d90bf0d70a..b3c19ba777c68 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -2687,6 +2687,22 @@ static struct pci_serial_quirk pci_serial_quirks[] = { .init = pci_oxsemi_tornado_init, .setup = pci_oxsemi_tornado_setup, }, + { + .vendor = PCI_VENDOR_ID_INTASHIELD, + .device = 0x4026, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_oxsemi_tornado_init, + .setup = pci_oxsemi_tornado_setup, + }, + { + .vendor = PCI_VENDOR_ID_INTASHIELD, + .device = 0x4021, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_oxsemi_tornado_init, + .setup = pci_oxsemi_tornado_setup, + }, { .vendor = PCI_VENDOR_ID_INTEL, .device = 0x8811, @@ -5213,6 +5229,14 @@ static const struct pci_device_id serial_pci_tbl[] = { PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_2_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0BA2, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0BA3, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, /* * Brainboxes UC-235/246 */ @@ -5333,6 +5357,14 @@ static const struct pci_device_id serial_pci_tbl[] = { PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_4_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0C42, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_4_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0C43, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_4_115200 }, /* * Brainboxes UC-420 */ @@ -5559,6 +5591,20 @@ static const struct pci_device_id serial_pci_tbl[] = { PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_1_15625000 }, + /* + * Brainboxes XC-235 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x4026, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_oxsemi_1_15625000 }, + /* + * Brainboxes XC-475 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x4021, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_oxsemi_1_15625000 }, /* * Perle PCI-RAS cards diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index 77efa7ee6eda2..951c3cdac3b94 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -440,7 +440,7 @@ static unsigned int lpuart_get_baud_clk_rate(struct lpuart_port *sport) static void lpuart_stop_tx(struct uart_port *port) { - unsigned char temp; + u8 temp; temp = readb(port->membase + UARTCR2); temp &= ~(UARTCR2_TIE | UARTCR2_TCIE); @@ -449,7 +449,7 @@ static void lpuart_stop_tx(struct uart_port *port) static void lpuart32_stop_tx(struct uart_port *port) { - unsigned long temp; + u32 temp; temp = lpuart32_read(port, UARTCTRL); temp &= ~(UARTCTRL_TIE | UARTCTRL_TCIE); @@ -458,7 +458,7 @@ static void lpuart32_stop_tx(struct uart_port *port) static void lpuart_stop_rx(struct uart_port *port) { - unsigned char temp; + u8 temp; temp = readb(port->membase + UARTCR2); writeb(temp & ~UARTCR2_RE, port->membase + UARTCR2); @@ -466,7 +466,7 @@ static void lpuart_stop_rx(struct uart_port *port) static void lpuart32_stop_rx(struct uart_port *port) { - unsigned long temp; + u32 temp; temp = lpuart32_read(port, UARTCTRL); lpuart32_write(port, temp & ~UARTCTRL_RE, UARTCTRL); @@ -580,7 +580,7 @@ static int lpuart_dma_tx_request(struct uart_port *port) ret = dmaengine_slave_config(sport->dma_tx_chan, &dma_tx_sconfig); if (ret) { - dev_err(sport->port.dev, + dev_err(port->dev, "DMA slave config failed, err = %d\n", ret); return ret; } @@ -610,13 +610,13 @@ static void lpuart_flush_buffer(struct uart_port *port) } if (lpuart_is_32(sport)) { - val = lpuart32_read(&sport->port, UARTFIFO); + val = lpuart32_read(port, UARTFIFO); val |= UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH; - lpuart32_write(&sport->port, val, UARTFIFO); + lpuart32_write(port, val, UARTFIFO); } else { - val = readb(sport->port.membase + UARTCFIFO); + val = readb(port->membase + UARTCFIFO); val |= UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH; - writeb(val, sport->port.membase + UARTCFIFO); + writeb(val, port->membase + UARTCFIFO); } } @@ -638,38 +638,36 @@ static void lpuart32_wait_bit_set(struct uart_port *port, unsigned int offset, static int lpuart_poll_init(struct uart_port *port) { - struct lpuart_port *sport = container_of(port, - struct lpuart_port, port); unsigned long flags; - unsigned char temp; + u8 temp; - sport->port.fifosize = 0; + port->fifosize = 0; - uart_port_lock_irqsave(&sport->port, &flags); + uart_port_lock_irqsave(port, &flags); /* Disable Rx & Tx */ - writeb(0, sport->port.membase + UARTCR2); + writeb(0, port->membase + UARTCR2); - temp = readb(sport->port.membase + UARTPFIFO); + temp = readb(port->membase + UARTPFIFO); /* Enable Rx and Tx FIFO */ writeb(temp | UARTPFIFO_RXFE | UARTPFIFO_TXFE, - sport->port.membase + UARTPFIFO); + port->membase + UARTPFIFO); /* flush Tx and Rx FIFO */ writeb(UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH, - sport->port.membase + UARTCFIFO); + port->membase + UARTCFIFO); /* explicitly clear RDRF */ - if (readb(sport->port.membase + UARTSR1) & UARTSR1_RDRF) { - readb(sport->port.membase + UARTDR); - writeb(UARTSFIFO_RXUF, sport->port.membase + UARTSFIFO); + if (readb(port->membase + UARTSR1) & UARTSR1_RDRF) { + readb(port->membase + UARTDR); + writeb(UARTSFIFO_RXUF, port->membase + UARTSFIFO); } - writeb(0, sport->port.membase + UARTTWFIFO); - writeb(1, sport->port.membase + UARTRWFIFO); + writeb(0, port->membase + UARTTWFIFO); + writeb(1, port->membase + UARTRWFIFO); /* Enable Rx and Tx */ - writeb(UARTCR2_RE | UARTCR2_TE, sport->port.membase + UARTCR2); - uart_port_unlock_irqrestore(&sport->port, flags); + writeb(UARTCR2_RE | UARTCR2_TE, port->membase + UARTCR2); + uart_port_unlock_irqrestore(port, flags); return 0; } @@ -692,33 +690,32 @@ static int lpuart_poll_get_char(struct uart_port *port) static int lpuart32_poll_init(struct uart_port *port) { unsigned long flags; - struct lpuart_port *sport = container_of(port, struct lpuart_port, port); u32 temp; - sport->port.fifosize = 0; + port->fifosize = 0; - uart_port_lock_irqsave(&sport->port, &flags); + uart_port_lock_irqsave(port, &flags); /* Disable Rx & Tx */ - lpuart32_write(&sport->port, 0, UARTCTRL); + lpuart32_write(port, 0, UARTCTRL); - temp = lpuart32_read(&sport->port, UARTFIFO); + temp = lpuart32_read(port, UARTFIFO); /* Enable Rx and Tx FIFO */ - lpuart32_write(&sport->port, temp | UARTFIFO_RXFE | UARTFIFO_TXFE, UARTFIFO); + lpuart32_write(port, temp | UARTFIFO_RXFE | UARTFIFO_TXFE, UARTFIFO); /* flush Tx and Rx FIFO */ - lpuart32_write(&sport->port, UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH, UARTFIFO); + lpuart32_write(port, UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH, UARTFIFO); /* explicitly clear RDRF */ - if (lpuart32_read(&sport->port, UARTSTAT) & UARTSTAT_RDRF) { - lpuart32_read(&sport->port, UARTDATA); - lpuart32_write(&sport->port, UARTFIFO_RXUF, UARTFIFO); + if (lpuart32_read(port, UARTSTAT) & UARTSTAT_RDRF) { + lpuart32_read(port, UARTDATA); + lpuart32_write(port, UARTFIFO_RXUF, UARTFIFO); } /* Enable Rx and Tx */ - lpuart32_write(&sport->port, UARTCTRL_RE | UARTCTRL_TE, UARTCTRL); - uart_port_unlock_irqrestore(&sport->port, flags); + lpuart32_write(port, UARTCTRL_RE | UARTCTRL_TE, UARTCTRL); + uart_port_unlock_irqrestore(port, flags); return 0; } @@ -751,7 +748,7 @@ static inline void lpuart_transmit_buffer(struct lpuart_port *sport) static inline void lpuart32_transmit_buffer(struct lpuart_port *sport) { struct tty_port *tport = &sport->port.state->port; - unsigned long txcnt; + u32 txcnt; unsigned char c; if (sport->port.x_char) { @@ -788,7 +785,7 @@ static void lpuart_start_tx(struct uart_port *port) { struct lpuart_port *sport = container_of(port, struct lpuart_port, port); - unsigned char temp; + u8 temp; temp = readb(port->membase + UARTCR2); writeb(temp | UARTCR2_TIE, port->membase + UARTCR2); @@ -805,7 +802,7 @@ static void lpuart_start_tx(struct uart_port *port) static void lpuart32_start_tx(struct uart_port *port) { struct lpuart_port *sport = container_of(port, struct lpuart_port, port); - unsigned long temp; + u32 temp; if (sport->lpuart_dma_tx_use) { if (!lpuart_stopped_or_empty(port)) @@ -838,8 +835,8 @@ static unsigned int lpuart_tx_empty(struct uart_port *port) { struct lpuart_port *sport = container_of(port, struct lpuart_port, port); - unsigned char sr1 = readb(port->membase + UARTSR1); - unsigned char sfifo = readb(port->membase + UARTSFIFO); + u8 sr1 = readb(port->membase + UARTSR1); + u8 sfifo = readb(port->membase + UARTSFIFO); if (sport->dma_tx_in_progress) return 0; @@ -854,9 +851,9 @@ static unsigned int lpuart32_tx_empty(struct uart_port *port) { struct lpuart_port *sport = container_of(port, struct lpuart_port, port); - unsigned long stat = lpuart32_read(port, UARTSTAT); - unsigned long sfifo = lpuart32_read(port, UARTFIFO); - unsigned long ctrl = lpuart32_read(port, UARTCTRL); + u32 stat = lpuart32_read(port, UARTSTAT); + u32 sfifo = lpuart32_read(port, UARTFIFO); + u32 ctrl = lpuart32_read(port, UARTCTRL); if (sport->dma_tx_in_progress) return 0; @@ -883,7 +880,7 @@ static void lpuart_rxint(struct lpuart_port *sport) { unsigned int flg, ignored = 0, overrun = 0; struct tty_port *port = &sport->port.state->port; - unsigned char rx, sr; + u8 rx, sr; uart_port_lock(&sport->port); @@ -960,7 +957,7 @@ static void lpuart32_rxint(struct lpuart_port *sport) { unsigned int flg, ignored = 0; struct tty_port *port = &sport->port.state->port; - unsigned long rx, sr; + u32 rx, sr; bool is_break; uart_port_lock(&sport->port); @@ -1038,7 +1035,7 @@ static void lpuart32_rxint(struct lpuart_port *sport) static irqreturn_t lpuart_int(int irq, void *dev_id) { struct lpuart_port *sport = dev_id; - unsigned char sts; + u8 sts; sts = readb(sport->port.membase + UARTSR1); @@ -1112,7 +1109,7 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport) int count, copied; if (lpuart_is_32(sport)) { - unsigned long sr = lpuart32_read(&sport->port, UARTSTAT); + u32 sr = lpuart32_read(&sport->port, UARTSTAT); if (sr & (UARTSTAT_PE | UARTSTAT_FE)) { /* Clear the error flags */ @@ -1124,10 +1121,10 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport) sport->port.icount.frame++; } } else { - unsigned char sr = readb(sport->port.membase + UARTSR1); + u8 sr = readb(sport->port.membase + UARTSR1); if (sr & (UARTSR1_PE | UARTSR1_FE)) { - unsigned char cr2; + u8 cr2; /* Disable receiver during this operation... */ cr2 = readb(sport->port.membase + UARTCR2); @@ -1278,7 +1275,7 @@ static void lpuart32_dma_idleint(struct lpuart_port *sport) static irqreturn_t lpuart32_int(int irq, void *dev_id) { struct lpuart_port *sport = dev_id; - unsigned long sts, rxcount; + u32 sts, rxcount; sts = lpuart32_read(&sport->port, UARTSTAT); rxcount = lpuart32_read(&sport->port, UARTWATER); @@ -1410,12 +1407,12 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport) dma_async_issue_pending(chan); if (lpuart_is_32(sport)) { - unsigned long temp = lpuart32_read(&sport->port, UARTBAUD); + u32 temp = lpuart32_read(&sport->port, UARTBAUD); lpuart32_write(&sport->port, temp | UARTBAUD_RDMAE, UARTBAUD); if (sport->dma_idle_int) { - unsigned long ctrl = lpuart32_read(&sport->port, UARTCTRL); + u32 ctrl = lpuart32_read(&sport->port, UARTCTRL); lpuart32_write(&sport->port, ctrl | UARTCTRL_ILIE, UARTCTRL); } @@ -1448,12 +1445,9 @@ static void lpuart_dma_rx_free(struct uart_port *port) static int lpuart_config_rs485(struct uart_port *port, struct ktermios *termios, struct serial_rs485 *rs485) { - struct lpuart_port *sport = container_of(port, - struct lpuart_port, port); - - u8 modem = readb(sport->port.membase + UARTMODEM) & + u8 modem = readb(port->membase + UARTMODEM) & ~(UARTMODEM_TXRTSPOL | UARTMODEM_TXRTSE); - writeb(modem, sport->port.membase + UARTMODEM); + writeb(modem, port->membase + UARTMODEM); if (rs485->flags & SER_RS485_ENABLED) { /* Enable auto RS-485 RTS mode */ @@ -1471,19 +1465,29 @@ static int lpuart_config_rs485(struct uart_port *port, struct ktermios *termios, modem &= ~UARTMODEM_TXRTSPOL; } - writeb(modem, sport->port.membase + UARTMODEM); + writeb(modem, port->membase + UARTMODEM); return 0; } static int lpuart32_config_rs485(struct uart_port *port, struct ktermios *termios, struct serial_rs485 *rs485) { - struct lpuart_port *sport = container_of(port, - struct lpuart_port, port); - - unsigned long modem = lpuart32_read(&sport->port, UARTMODIR) + u32 modem = lpuart32_read(port, UARTMODIR) & ~(UARTMODIR_TXRTSPOL | UARTMODIR_TXRTSE); - lpuart32_write(&sport->port, modem, UARTMODIR); + u32 ctrl; + + /* TXRTSE and TXRTSPOL only can be changed when transmitter is disabled. */ + ctrl = lpuart32_read(port, UARTCTRL); + if (ctrl & UARTCTRL_TE) { + /* wait for the transmit engine to complete */ + lpuart32_wait_bit_set(port, UARTSTAT, UARTSTAT_TC); + lpuart32_write(port, ctrl & ~UARTCTRL_TE, UARTCTRL); + + while (lpuart32_read(port, UARTCTRL) & UARTCTRL_TE) + cpu_relax(); + } + + lpuart32_write(port, modem, UARTMODIR); if (rs485->flags & SER_RS485_ENABLED) { /* Enable auto RS-485 RTS mode */ @@ -1501,7 +1505,11 @@ static int lpuart32_config_rs485(struct uart_port *port, struct ktermios *termio modem &= ~UARTMODIR_TXRTSPOL; } - lpuart32_write(&sport->port, modem, UARTMODIR); + lpuart32_write(port, modem, UARTMODIR); + + if (ctrl & UARTCTRL_TE) + lpuart32_write(port, ctrl, UARTCTRL); + return 0; } @@ -1559,7 +1567,7 @@ static void lpuart32_set_mctrl(struct uart_port *port, unsigned int mctrl) static void lpuart_break_ctl(struct uart_port *port, int break_state) { - unsigned char temp; + u8 temp; temp = readb(port->membase + UARTCR2) & ~UARTCR2_SBK; @@ -1571,7 +1579,7 @@ static void lpuart_break_ctl(struct uart_port *port, int break_state) static void lpuart32_break_ctl(struct uart_port *port, int break_state) { - unsigned long temp; + u32 temp; temp = lpuart32_read(port, UARTCTRL); @@ -1605,8 +1613,7 @@ static void lpuart32_break_ctl(struct uart_port *port, int break_state) static void lpuart_setup_watermark(struct lpuart_port *sport) { - unsigned char val, cr2; - unsigned char cr2_saved; + u8 val, cr2, cr2_saved; cr2 = readb(sport->port.membase + UARTCR2); cr2_saved = cr2; @@ -1639,7 +1646,7 @@ static void lpuart_setup_watermark(struct lpuart_port *sport) static void lpuart_setup_watermark_enable(struct lpuart_port *sport) { - unsigned char cr2; + u8 cr2; lpuart_setup_watermark(sport); @@ -1650,8 +1657,7 @@ static void lpuart_setup_watermark_enable(struct lpuart_port *sport) static void lpuart32_setup_watermark(struct lpuart_port *sport) { - unsigned long val, ctrl; - unsigned long ctrl_saved; + u32 val, ctrl, ctrl_saved; ctrl = lpuart32_read(&sport->port, UARTCTRL); ctrl_saved = ctrl; @@ -1760,7 +1766,7 @@ static void lpuart_tx_dma_startup(struct lpuart_port *sport) static void lpuart_rx_dma_startup(struct lpuart_port *sport) { int ret; - unsigned char cr3; + u8 cr3; if (uart_console(&sport->port)) goto err; @@ -1810,14 +1816,14 @@ static void lpuart_hw_setup(struct lpuart_port *sport) static int lpuart_startup(struct uart_port *port) { struct lpuart_port *sport = container_of(port, struct lpuart_port, port); - unsigned char temp; + u8 temp; /* determine FIFO size and enable FIFO mode */ - temp = readb(sport->port.membase + UARTPFIFO); + temp = readb(port->membase + UARTPFIFO); sport->txfifo_size = UARTFIFO_DEPTH((temp >> UARTPFIFO_TXSIZE_OFF) & UARTPFIFO_FIFOSIZE_MASK); - sport->port.fifosize = sport->txfifo_size; + port->fifosize = sport->txfifo_size; sport->rxfifo_size = UARTFIFO_DEPTH((temp >> UARTPFIFO_RXSIZE_OFF) & UARTPFIFO_FIFOSIZE_MASK); @@ -1830,7 +1836,7 @@ static int lpuart_startup(struct uart_port *port) static void lpuart32_hw_disable(struct lpuart_port *sport) { - unsigned long temp; + u32 temp; temp = lpuart32_read(&sport->port, UARTCTRL); temp &= ~(UARTCTRL_RIE | UARTCTRL_ILIE | UARTCTRL_RE | @@ -1840,7 +1846,7 @@ static void lpuart32_hw_disable(struct lpuart_port *sport) static void lpuart32_configure(struct lpuart_port *sport) { - unsigned long temp; + u32 temp; temp = lpuart32_read(&sport->port, UARTCTRL); if (!sport->lpuart_dma_rx_use) @@ -1870,14 +1876,14 @@ static void lpuart32_hw_setup(struct lpuart_port *sport) static int lpuart32_startup(struct uart_port *port) { struct lpuart_port *sport = container_of(port, struct lpuart_port, port); - unsigned long temp; + u32 temp; /* determine FIFO size */ - temp = lpuart32_read(&sport->port, UARTFIFO); + temp = lpuart32_read(port, UARTFIFO); sport->txfifo_size = UARTFIFO_DEPTH((temp >> UARTFIFO_TXSIZE_OFF) & UARTFIFO_FIFOSIZE_MASK); - sport->port.fifosize = sport->txfifo_size; + port->fifosize = sport->txfifo_size; sport->rxfifo_size = UARTFIFO_DEPTH((temp >> UARTFIFO_RXSIZE_OFF) & UARTFIFO_FIFOSIZE_MASK); @@ -1890,7 +1896,7 @@ static int lpuart32_startup(struct uart_port *port) if (is_layerscape_lpuart(sport)) { sport->rxfifo_size = 16; sport->txfifo_size = 16; - sport->port.fifosize = sport->txfifo_size; + port->fifosize = sport->txfifo_size; } lpuart_request_dma(sport); @@ -1924,7 +1930,7 @@ static void lpuart_dma_shutdown(struct lpuart_port *sport) static void lpuart_shutdown(struct uart_port *port) { struct lpuart_port *sport = container_of(port, struct lpuart_port, port); - unsigned char temp; + u8 temp; unsigned long flags; uart_port_lock_irqsave(port, &flags); @@ -1944,14 +1950,14 @@ static void lpuart32_shutdown(struct uart_port *port) { struct lpuart_port *sport = container_of(port, struct lpuart_port, port); - unsigned long temp; + u32 temp; unsigned long flags; uart_port_lock_irqsave(port, &flags); /* clear status */ - temp = lpuart32_read(&sport->port, UARTSTAT); - lpuart32_write(&sport->port, temp, UARTSTAT); + temp = lpuart32_read(port, UARTSTAT); + lpuart32_write(port, temp, UARTSTAT); /* disable Rx/Tx DMA */ temp = lpuart32_read(port, UARTBAUD); @@ -1975,17 +1981,17 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios, { struct lpuart_port *sport = container_of(port, struct lpuart_port, port); unsigned long flags; - unsigned char cr1, old_cr1, old_cr2, cr3, cr4, bdh, modem; + u8 cr1, old_cr1, old_cr2, cr3, cr4, bdh, modem; unsigned int baud; unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8; unsigned int sbr, brfa; - cr1 = old_cr1 = readb(sport->port.membase + UARTCR1); - old_cr2 = readb(sport->port.membase + UARTCR2); - cr3 = readb(sport->port.membase + UARTCR3); - cr4 = readb(sport->port.membase + UARTCR4); - bdh = readb(sport->port.membase + UARTBDH); - modem = readb(sport->port.membase + UARTMODEM); + cr1 = old_cr1 = readb(port->membase + UARTCR1); + old_cr2 = readb(port->membase + UARTCR2); + cr3 = readb(port->membase + UARTCR3); + cr4 = readb(port->membase + UARTCR4); + bdh = readb(port->membase + UARTBDH); + modem = readb(port->membase + UARTMODEM); /* * only support CS8 and CS7, and for CS7 must enable PE. * supported mode: @@ -2017,7 +2023,7 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios, * When auto RS-485 RTS mode is enabled, * hardware flow control need to be disabled. */ - if (sport->port.rs485.flags & SER_RS485_ENABLED) + if (port->rs485.flags & SER_RS485_ENABLED) termios->c_cflag &= ~CRTSCTS; if (termios->c_cflag & CRTSCTS) @@ -2058,59 +2064,59 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios, * Need to update the Ring buffer length according to the selected * baud rate and restart Rx DMA path. * - * Since timer function acqures sport->port.lock, need to stop before + * Since timer function acqures port->lock, need to stop before * acquring same lock because otherwise del_timer_sync() can deadlock. */ if (old && sport->lpuart_dma_rx_use) - lpuart_dma_rx_free(&sport->port); + lpuart_dma_rx_free(port); - uart_port_lock_irqsave(&sport->port, &flags); + uart_port_lock_irqsave(port, &flags); - sport->port.read_status_mask = 0; + port->read_status_mask = 0; if (termios->c_iflag & INPCK) - sport->port.read_status_mask |= UARTSR1_FE | UARTSR1_PE; + port->read_status_mask |= UARTSR1_FE | UARTSR1_PE; if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) - sport->port.read_status_mask |= UARTSR1_FE; + port->read_status_mask |= UARTSR1_FE; /* characters to ignore */ - sport->port.ignore_status_mask = 0; + port->ignore_status_mask = 0; if (termios->c_iflag & IGNPAR) - sport->port.ignore_status_mask |= UARTSR1_PE; + port->ignore_status_mask |= UARTSR1_PE; if (termios->c_iflag & IGNBRK) { - sport->port.ignore_status_mask |= UARTSR1_FE; + port->ignore_status_mask |= UARTSR1_FE; /* * if we're ignoring parity and break indicators, * ignore overruns too (for real raw support). */ if (termios->c_iflag & IGNPAR) - sport->port.ignore_status_mask |= UARTSR1_OR; + port->ignore_status_mask |= UARTSR1_OR; } /* update the per-port timeout */ uart_update_timeout(port, termios->c_cflag, baud); /* wait transmit engin complete */ - lpuart_wait_bit_set(&sport->port, UARTSR1, UARTSR1_TC); + lpuart_wait_bit_set(port, UARTSR1, UARTSR1_TC); /* disable transmit and receive */ writeb(old_cr2 & ~(UARTCR2_TE | UARTCR2_RE), - sport->port.membase + UARTCR2); + port->membase + UARTCR2); - sbr = sport->port.uartclk / (16 * baud); - brfa = ((sport->port.uartclk - (16 * sbr * baud)) * 2) / baud; + sbr = port->uartclk / (16 * baud); + brfa = ((port->uartclk - (16 * sbr * baud)) * 2) / baud; bdh &= ~UARTBDH_SBR_MASK; bdh |= (sbr >> 8) & 0x1F; cr4 &= ~UARTCR4_BRFA_MASK; brfa &= UARTCR4_BRFA_MASK; - writeb(cr4 | brfa, sport->port.membase + UARTCR4); - writeb(bdh, sport->port.membase + UARTBDH); - writeb(sbr & 0xFF, sport->port.membase + UARTBDL); - writeb(cr3, sport->port.membase + UARTCR3); - writeb(cr1, sport->port.membase + UARTCR1); - writeb(modem, sport->port.membase + UARTMODEM); + writeb(cr4 | brfa, port->membase + UARTCR4); + writeb(bdh, port->membase + UARTBDH); + writeb(sbr & 0xFF, port->membase + UARTBDL); + writeb(cr3, port->membase + UARTCR3); + writeb(cr1, port->membase + UARTCR1); + writeb(modem, port->membase + UARTMODEM); /* restore control register */ - writeb(old_cr2, sport->port.membase + UARTCR2); + writeb(old_cr2, port->membase + UARTCR2); if (old && sport->lpuart_dma_rx_use) { if (!lpuart_start_rx_dma(sport)) @@ -2119,7 +2125,7 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios, sport->lpuart_dma_rx_use = false; } - uart_port_unlock_irqrestore(&sport->port, flags); + uart_port_unlock_irqrestore(port, flags); } static void __lpuart32_serial_setbrg(struct uart_port *port, @@ -2213,13 +2219,13 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios, { struct lpuart_port *sport = container_of(port, struct lpuart_port, port); unsigned long flags; - unsigned long ctrl, old_ctrl, bd, modem; + u32 ctrl, old_ctrl, bd, modem; unsigned int baud; unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8; - ctrl = old_ctrl = lpuart32_read(&sport->port, UARTCTRL); - bd = lpuart32_read(&sport->port, UARTBAUD); - modem = lpuart32_read(&sport->port, UARTMODIR); + ctrl = old_ctrl = lpuart32_read(port, UARTCTRL); + bd = lpuart32_read(port, UARTBAUD); + modem = lpuart32_read(port, UARTMODIR); sport->is_cs7 = false; /* * only support CS8 and CS7, and for CS7 must enable PE. @@ -2252,7 +2258,7 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios, * When auto RS-485 RTS mode is enabled, * hardware flow control need to be disabled. */ - if (sport->port.rs485.flags & SER_RS485_ENABLED) + if (port->rs485.flags & SER_RS485_ENABLED) termios->c_cflag &= ~CRTSCTS; if (termios->c_cflag & CRTSCTS) @@ -2293,59 +2299,61 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios, * Need to update the Ring buffer length according to the selected * baud rate and restart Rx DMA path. * - * Since timer function acqures sport->port.lock, need to stop before + * Since timer function acqures port->lock, need to stop before * acquring same lock because otherwise del_timer_sync() can deadlock. */ if (old && sport->lpuart_dma_rx_use) - lpuart_dma_rx_free(&sport->port); + lpuart_dma_rx_free(port); - uart_port_lock_irqsave(&sport->port, &flags); + uart_port_lock_irqsave(port, &flags); - sport->port.read_status_mask = 0; + port->read_status_mask = 0; if (termios->c_iflag & INPCK) - sport->port.read_status_mask |= UARTSTAT_FE | UARTSTAT_PE; + port->read_status_mask |= UARTSTAT_FE | UARTSTAT_PE; if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) - sport->port.read_status_mask |= UARTSTAT_FE; + port->read_status_mask |= UARTSTAT_FE; /* characters to ignore */ - sport->port.ignore_status_mask = 0; + port->ignore_status_mask = 0; if (termios->c_iflag & IGNPAR) - sport->port.ignore_status_mask |= UARTSTAT_PE; + port->ignore_status_mask |= UARTSTAT_PE; if (termios->c_iflag & IGNBRK) { - sport->port.ignore_status_mask |= UARTSTAT_FE; + port->ignore_status_mask |= UARTSTAT_FE; /* * if we're ignoring parity and break indicators, * ignore overruns too (for real raw support). */ if (termios->c_iflag & IGNPAR) - sport->port.ignore_status_mask |= UARTSTAT_OR; + port->ignore_status_mask |= UARTSTAT_OR; } /* update the per-port timeout */ uart_update_timeout(port, termios->c_cflag, baud); + /* + * disable CTS to ensure the transmit engine is not blocked by the flow + * control when there is dirty data in TX FIFO + */ + lpuart32_write(port, modem & ~UARTMODIR_TXCTSE, UARTMODIR); + /* * LPUART Transmission Complete Flag may never be set while queuing a break * character, so skip waiting for transmission complete when UARTCTRL_SBK is * asserted. */ - if (!(old_ctrl & UARTCTRL_SBK)) { - lpuart32_write(&sport->port, 0, UARTMODIR); - lpuart32_wait_bit_set(&sport->port, UARTSTAT, UARTSTAT_TC); - } + if (!(old_ctrl & UARTCTRL_SBK)) + lpuart32_wait_bit_set(port, UARTSTAT, UARTSTAT_TC); /* disable transmit and receive */ - lpuart32_write(&sport->port, old_ctrl & ~(UARTCTRL_TE | UARTCTRL_RE), + lpuart32_write(port, old_ctrl & ~(UARTCTRL_TE | UARTCTRL_RE), UARTCTRL); - lpuart32_write(&sport->port, bd, UARTBAUD); + lpuart32_write(port, bd, UARTBAUD); lpuart32_serial_setbrg(sport, baud); - /* disable CTS before enabling UARTCTRL_TE to avoid pending idle preamble */ - lpuart32_write(&sport->port, modem & ~UARTMODIR_TXCTSE, UARTMODIR); /* restore control register */ - lpuart32_write(&sport->port, ctrl, UARTCTRL); + lpuart32_write(port, ctrl, UARTCTRL); /* re-enable the CTS if needed */ - lpuart32_write(&sport->port, modem, UARTMODIR); + lpuart32_write(port, modem, UARTMODIR); if ((ctrl & (UARTCTRL_PE | UARTCTRL_M)) == UARTCTRL_PE) sport->is_cs7 = true; @@ -2357,7 +2365,7 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios, sport->lpuart_dma_rx_use = false; } - uart_port_unlock_irqrestore(&sport->port, flags); + uart_port_unlock_irqrestore(port, flags); } static const char *lpuart_type(struct uart_port *port) @@ -2470,7 +2478,7 @@ static void lpuart_console_write(struct console *co, const char *s, unsigned int count) { struct lpuart_port *sport = lpuart_ports[co->index]; - unsigned char old_cr2, cr2; + u8 old_cr2, cr2; unsigned long flags; int locked = 1; @@ -2500,7 +2508,7 @@ static void lpuart32_console_write(struct console *co, const char *s, unsigned int count) { struct lpuart_port *sport = lpuart_ports[co->index]; - unsigned long old_cr, cr; + u32 old_cr, cr; unsigned long flags; int locked = 1; @@ -2534,7 +2542,7 @@ static void __init lpuart_console_get_options(struct lpuart_port *sport, int *baud, int *parity, int *bits) { - unsigned char cr, bdh, bdl, brfa; + u8 cr, bdh, bdl, brfa; unsigned int sbr, uartclk, baud_raw; cr = readb(sport->port.membase + UARTCR2); @@ -2583,7 +2591,7 @@ static void __init lpuart32_console_get_options(struct lpuart_port *sport, int *baud, int *parity, int *bits) { - unsigned long cr, bd; + u32 cr, bd; unsigned int sbr, uartclk, baud_raw; cr = lpuart32_read(&sport->port, UARTCTRL); @@ -2789,13 +2797,13 @@ static int lpuart_global_reset(struct lpuart_port *sport) { struct uart_port *port = &sport->port; void __iomem *global_addr; - unsigned long ctrl, bd; + u32 ctrl, bd; unsigned int val = 0; int ret; ret = clk_prepare_enable(sport->ipg_clk); if (ret) { - dev_err(sport->port.dev, "failed to enable uart ipg clk: %d\n", ret); + dev_err(port->dev, "failed to enable uart ipg clk: %d\n", ret); return ret; } @@ -2806,10 +2814,10 @@ static int lpuart_global_reset(struct lpuart_port *sport) */ ctrl = lpuart32_read(port, UARTCTRL); if (ctrl & UARTCTRL_TE) { - bd = lpuart32_read(&sport->port, UARTBAUD); + bd = lpuart32_read(port, UARTBAUD); if (read_poll_timeout(lpuart32_tx_empty, val, val, 1, 100000, false, port)) { - dev_warn(sport->port.dev, + dev_warn(port->dev, "timeout waiting for transmit engine to complete\n"); clk_disable_unprepare(sport->ipg_clk); return 0; @@ -2995,7 +3003,7 @@ static int lpuart_runtime_resume(struct device *dev) static void serial_lpuart_enable_wakeup(struct lpuart_port *sport, bool on) { - unsigned int val, baud; + u32 val, baud; if (lpuart_is_32(sport)) { val = lpuart32_read(&sport->port, UARTCTRL); @@ -3060,7 +3068,7 @@ static int lpuart_suspend_noirq(struct device *dev) static int lpuart_resume_noirq(struct device *dev) { struct lpuart_port *sport = dev_get_drvdata(dev); - unsigned int val; + u32 val; pinctrl_pm_select_default_state(dev); @@ -3080,7 +3088,8 @@ static int lpuart_resume_noirq(struct device *dev) static int lpuart_suspend(struct device *dev) { struct lpuart_port *sport = dev_get_drvdata(dev); - unsigned long temp, flags; + u32 temp; + unsigned long flags; uart_suspend_port(&lpuart_reg, &sport->port); @@ -3160,7 +3169,7 @@ static void lpuart_console_fixup(struct lpuart_port *sport) * in VLLS mode, or restore console setting here. */ if (is_imx7ulp_lpuart(sport) && lpuart_uport_is_active(sport) && - console_suspend_enabled && uart_console(&sport->port)) { + console_suspend_enabled && uart_console(uport)) { mutex_lock(&port->mutex); memset(&termios, 0, sizeof(struct ktermios)); diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c index f5199fdecff27..9b9981352b1e1 100644 --- a/drivers/tty/serial/stm32-usart.c +++ b/drivers/tty/serial/stm32-usart.c @@ -965,10 +965,8 @@ static void stm32_usart_start_tx(struct uart_port *port) { struct tty_port *tport = &port->state->port; - if (kfifo_is_empty(&tport->xmit_fifo) && !port->x_char) { - stm32_usart_rs485_rts_disable(port); + if (kfifo_is_empty(&tport->xmit_fifo) && !port->x_char) return; - } stm32_usart_rs485_rts_enable(port); diff --git a/drivers/ufs/core/ufs_bsg.c b/drivers/ufs/core/ufs_bsg.c index 8d4ad0a3f2cf0..252186124669a 100644 --- a/drivers/ufs/core/ufs_bsg.c +++ b/drivers/ufs/core/ufs_bsg.c @@ -194,10 +194,12 @@ static int ufs_bsg_request(struct bsg_job *job) ufshcd_rpm_put_sync(hba); kfree(buff); bsg_reply->result = ret; - job->reply_len = !rpmb ? sizeof(struct ufs_bsg_reply) : sizeof(struct ufs_rpmb_reply); /* complete the job here only if no error */ - if (ret == 0) + if (ret == 0) { + job->reply_len = rpmb ? sizeof(struct ufs_rpmb_reply) : + sizeof(struct ufs_bsg_reply); bsg_job_done(job, ret, bsg_reply->reply_payload_rcv_len); + } return ret; } diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 67410c4cebee6..89fc0b5662919 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -266,7 +266,7 @@ static bool ufshcd_has_pending_tasks(struct ufs_hba *hba) static bool ufshcd_is_ufs_dev_busy(struct ufs_hba *hba) { - return hba->outstanding_reqs || ufshcd_has_pending_tasks(hba); + return scsi_host_busy(hba->host) || ufshcd_has_pending_tasks(hba); } static const struct ufs_dev_quirk ufs_fixups[] = { @@ -639,8 +639,8 @@ static void ufshcd_print_host_state(struct ufs_hba *hba) const struct scsi_device *sdev_ufs = hba->ufs_device_wlun; dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state); - dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n", - hba->outstanding_reqs, hba->outstanding_tasks); + dev_err(hba->dev, "%d outstanding reqs, tasks=0x%lx\n", + scsi_host_busy(hba->host), hba->outstanding_tasks); dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n", hba->saved_err, hba->saved_uic_err); dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n", @@ -3138,8 +3138,13 @@ ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) case UPIU_TRANSACTION_QUERY_RSP: { u8 response = lrbp->ucd_rsp_ptr->header.response; - if (response == 0) + if (response == 0) { err = ufshcd_copy_query_response(hba, lrbp); + } else { + err = -EINVAL; + dev_err(hba->dev, "%s: unexpected response in Query RSP: %x\n", + __func__, response); + } break; } case UPIU_TRANSACTION_REJECT_UPIU: @@ -8975,7 +8980,7 @@ static enum scsi_timeout_action ufshcd_eh_timed_out(struct scsi_cmnd *scmd) dev_info(hba->dev, "%s() finished; outstanding_tasks = %#lx.\n", __func__, hba->outstanding_tasks); - return hba->outstanding_reqs ? SCSI_EH_RESET_TIMER : SCSI_EH_DONE; + return scsi_host_busy(hba->host) ? SCSI_EH_RESET_TIMER : SCSI_EH_DONE; } static const struct attribute_group *ufshcd_driver_groups[] = { @@ -10457,6 +10462,21 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) */ spin_lock_init(&hba->clk_gating.lock); + /* + * Set the default power management level for runtime and system PM. + * Host controller drivers can override them in their + * 'ufs_hba_variant_ops::init' callback. + * + * Default power saving mode is to keep UFS link in Hibern8 state + * and UFS device in sleep state. + */ + hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( + UFS_SLEEP_PWR_MODE, + UIC_LINK_HIBERN8_STATE); + hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( + UFS_SLEEP_PWR_MODE, + UIC_LINK_HIBERN8_STATE); + err = ufshcd_hba_init(hba); if (err) goto out_error; @@ -10606,21 +10626,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) goto free_tmf_queue; } - /* - * Set the default power management level for runtime and system PM if - * not set by the host controller drivers. - * Default power saving mode is to keep UFS link in Hibern8 state - * and UFS device in sleep state. - */ - if (!hba->rpm_lvl) - hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( - UFS_SLEEP_PWR_MODE, - UIC_LINK_HIBERN8_STATE); - if (!hba->spm_lvl) - hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( - UFS_SLEEP_PWR_MODE, - UIC_LINK_HIBERN8_STATE); - INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work, ufshcd_rpm_dev_flush_recheck_work); INIT_DELAYED_WORK(&hba->ufs_rtc_update_work, ufshcd_rtc_work); diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c index 98505c68103d0..f2cbfc2d399cd 100644 --- a/drivers/ufs/host/ufs-exynos.c +++ b/drivers/ufs/host/ufs-exynos.c @@ -915,6 +915,12 @@ static int exynos_ufs_phy_init(struct exynos_ufs *ufs) } phy_set_bus_width(generic_phy, ufs->avail_ln_rx); + + if (generic_phy->power_count) { + phy_power_off(generic_phy); + phy_exit(generic_phy); + } + ret = phy_init(generic_phy); if (ret) { dev_err(hba->dev, "%s: phy init failed, ret = %d\n", diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c index 0dd85d2635b99..47d06af33747d 100644 --- a/drivers/usb/atm/cxacru.c +++ b/drivers/usb/atm/cxacru.c @@ -1131,7 +1131,10 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance, struct cxacru_data *instance; struct usb_device *usb_dev = interface_to_usbdev(intf); struct usb_host_endpoint *cmd_ep = usb_dev->ep_in[CXACRU_EP_CMD]; - struct usb_endpoint_descriptor *in, *out; + static const u8 ep_addrs[] = { + CXACRU_EP_CMD + USB_DIR_IN, + CXACRU_EP_CMD + USB_DIR_OUT, + 0}; int ret; /* instance init */ @@ -1179,13 +1182,11 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance, } if (usb_endpoint_xfer_int(&cmd_ep->desc)) - ret = usb_find_common_endpoints(intf->cur_altsetting, - NULL, NULL, &in, &out); + ret = usb_check_int_endpoints(intf, ep_addrs); else - ret = usb_find_common_endpoints(intf->cur_altsetting, - &in, &out, NULL, NULL); + ret = usb_check_bulk_endpoints(intf, ep_addrs); - if (ret) { + if (!ret) { usb_err(usbatm_instance, "cxacru_bind: interface has incorrect endpoints\n"); ret = -ENODEV; goto fail; diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 906daf423cb02..145787c424e0c 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -6065,6 +6065,36 @@ void usb_hub_cleanup(void) usb_deregister(&hub_driver); } /* usb_hub_cleanup() */ +/** + * hub_hc_release_resources - clear resources used by host controller + * @udev: pointer to device being released + * + * Context: task context, might sleep + * + * Function releases the host controller resources in correct order before + * making any operation on resuming usb device. The host controller resources + * allocated for devices in tree should be released starting from the last + * usb device in tree toward the root hub. This function is used only during + * resuming device when usb device require reinitialization – that is, when + * flag udev->reset_resume is set. + * + * This call is synchronous, and may not be used in an interrupt context. + */ +static void hub_hc_release_resources(struct usb_device *udev) +{ + struct usb_hub *hub = usb_hub_to_struct_hub(udev); + struct usb_hcd *hcd = bus_to_hcd(udev->bus); + int i; + + /* Release up resources for all children before this device */ + for (i = 0; i < udev->maxchild; i++) + if (hub->ports[i]->child) + hub_hc_release_resources(hub->ports[i]->child); + + if (hcd->driver->reset_device) + hcd->driver->reset_device(hcd, udev); +} + /** * usb_reset_and_verify_device - perform a USB port reset to reinitialize a device * @udev: device to reset (not in SUSPENDED or NOTATTACHED state) @@ -6129,6 +6159,9 @@ static int usb_reset_and_verify_device(struct usb_device *udev) bos = udev->bos; udev->bos = NULL; + if (udev->reset_resume) + hub_hc_release_resources(udev); + mutex_lock(hcd->address0_mutex); for (i = 0; i < PORT_INIT_TRIES; ++i) { diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 027479179f09e..6926bd639ec6f 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -341,6 +341,10 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x0638, 0x0a13), .driver_info = USB_QUIRK_STRING_FETCH_255 }, + /* Prolific Single-LUN Mass Storage Card Reader */ + { USB_DEVICE(0x067b, 0x2731), .driver_info = USB_QUIRK_DELAY_INIT | + USB_QUIRK_NO_LPM }, + /* Saitek Cyborg Gold Joystick */ { USB_DEVICE(0x06a3, 0x0006), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 5658517715238..abcd7f85ab50c 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -131,11 +131,24 @@ void dwc3_enable_susphy(struct dwc3 *dwc, bool enable) } } -void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode) +void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode, bool ignore_susphy) { + unsigned int hw_mode; u32 reg; reg = dwc3_readl(dwc->regs, DWC3_GCTL); + + /* + * For DRD controllers, GUSB3PIPECTL.SUSPENDENABLE and + * GUSB2PHYCFG.SUSPHY should be cleared during mode switching, + * and they can be set after core initialization. + */ + hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); + if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD && !ignore_susphy) { + if (DWC3_GCTL_PRTCAP(reg) != mode) + dwc3_enable_susphy(dwc, false); + } + reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG)); reg |= DWC3_GCTL_PRTCAPDIR(mode); dwc3_writel(dwc->regs, DWC3_GCTL, reg); @@ -216,7 +229,7 @@ static void __dwc3_set_mode(struct work_struct *work) spin_lock_irqsave(&dwc->lock, flags); - dwc3_set_prtcap(dwc, desired_dr_role); + dwc3_set_prtcap(dwc, desired_dr_role, false); spin_unlock_irqrestore(&dwc->lock, flags); @@ -695,16 +708,7 @@ static int dwc3_ss_phy_setup(struct dwc3 *dwc, int index) */ reg &= ~DWC3_GUSB3PIPECTL_UX_EXIT_PX; - /* - * Above DWC_usb3.0 1.94a, it is recommended to set - * DWC3_GUSB3PIPECTL_SUSPHY to '0' during coreConsultant configuration. - * So default value will be '0' when the core is reset. Application - * needs to set it to '1' after the core initialization is completed. - * - * Similarly for DRD controllers, GUSB3PIPECTL.SUSPENDENABLE must be - * cleared after power-on reset, and it can be set after core - * initialization. - */ + /* Ensure the GUSB3PIPECTL.SUSPENDENABLE is cleared prior to phy init. */ reg &= ~DWC3_GUSB3PIPECTL_SUSPHY; if (dwc->u2ss_inp3_quirk) @@ -784,15 +788,7 @@ static int dwc3_hs_phy_setup(struct dwc3 *dwc, int index) break; } - /* - * Above DWC_usb3.0 1.94a, it is recommended to set - * DWC3_GUSB2PHYCFG_SUSPHY to '0' during coreConsultant configuration. - * So default value will be '0' when the core is reset. Application - * needs to set it to '1' after the core initialization is completed. - * - * Similarly for DRD controllers, GUSB2PHYCFG.SUSPHY must be cleared - * after power-on reset, and it can be set after core initialization. - */ + /* Ensure the GUSB2PHYCFG.SUSPHY is cleared prior to phy init. */ reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; if (dwc->dis_enblslpm_quirk) @@ -867,6 +863,25 @@ static int dwc3_phy_init(struct dwc3 *dwc) goto err_exit_usb3_phy; } + /* + * Above DWC_usb3.0 1.94a, it is recommended to set + * DWC3_GUSB3PIPECTL_SUSPHY and DWC3_GUSB2PHYCFG_SUSPHY to '0' during + * coreConsultant configuration. So default value will be '0' when the + * core is reset. Application needs to set it to '1' after the core + * initialization is completed. + * + * Certain phy requires to be in P0 power state during initialization. + * Make sure GUSB3PIPECTL.SUSPENDENABLE and GUSB2PHYCFG.SUSPHY are clear + * prior to phy init to maintain in the P0 state. + * + * After phy initialization, some phy operations can only be executed + * while in lower P states. Ensure GUSB3PIPECTL.SUSPENDENABLE and + * GUSB2PHYCFG.SUSPHY are set soon after initialization to avoid + * blocking phy ops. + */ + if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A)) + dwc3_enable_susphy(dwc, true); + return 0; err_exit_usb3_phy: @@ -1601,7 +1616,7 @@ static int dwc3_core_init_mode(struct dwc3 *dwc) switch (dwc->dr_mode) { case USB_DR_MODE_PERIPHERAL: - dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE); + dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, false); if (dwc->usb2_phy) otg_set_vbus(dwc->usb2_phy->otg, false); @@ -1613,7 +1628,7 @@ static int dwc3_core_init_mode(struct dwc3 *dwc) return dev_err_probe(dev, ret, "failed to initialize gadget\n"); break; case USB_DR_MODE_HOST: - dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST); + dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST, false); if (dwc->usb2_phy) otg_set_vbus(dwc->usb2_phy->otg, true); @@ -1658,7 +1673,7 @@ static void dwc3_core_exit_mode(struct dwc3 *dwc) } /* de-assert DRVVBUS for HOST and OTG mode */ - dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE); + dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, true); } static void dwc3_get_software_properties(struct dwc3 *dwc) @@ -1851,8 +1866,6 @@ static void dwc3_get_properties(struct dwc3 *dwc) dwc->tx_thr_num_pkt_prd = tx_thr_num_pkt_prd; dwc->tx_max_burst_prd = tx_max_burst_prd; - dwc->imod_interval = 0; - dwc->tx_fifo_resize_max_num = tx_fifo_resize_max_num; } @@ -1870,21 +1883,19 @@ static void dwc3_check_params(struct dwc3 *dwc) unsigned int hwparam_gen = DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3); - /* Check for proper value of imod_interval */ - if (dwc->imod_interval && !dwc3_has_imod(dwc)) { - dev_warn(dwc->dev, "Interrupt moderation not supported\n"); - dwc->imod_interval = 0; - } - /* + * Enable IMOD for all supporting controllers. + * + * Particularly, DWC_usb3 v3.00a must enable this feature for + * the following reason: + * * Workaround for STAR 9000961433 which affects only version * 3.00a of the DWC_usb3 core. This prevents the controller * interrupt from being masked while handling events. IMOD * allows us to work around this issue. Enable it for the * affected version. */ - if (!dwc->imod_interval && - DWC3_VER_IS(DWC3, 300A)) + if (dwc3_has_imod((dwc))) dwc->imod_interval = 1; /* Check the maximum_speed parameter */ @@ -2473,7 +2484,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg) if (ret) return ret; - dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE); + dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, true); dwc3_gadget_resume(dwc); break; case DWC3_GCTL_PRTCAP_HOST: @@ -2481,7 +2492,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg) ret = dwc3_core_init_for_resume(dwc); if (ret) return ret; - dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST); + dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST, true); break; } /* Restore GUSB2PHYCFG bits that were modified in suspend */ @@ -2510,7 +2521,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg) if (ret) return ret; - dwc3_set_prtcap(dwc, dwc->current_dr_role); + dwc3_set_prtcap(dwc, dwc->current_dr_role, true); dwc3_otg_init(dwc); if (dwc->current_otg_role == DWC3_OTG_ROLE_HOST) { diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index 01e55828de2d7..b91bccaaeae73 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -1565,7 +1565,7 @@ struct dwc3_gadget_ep_cmd_params { #define DWC3_HAS_OTG BIT(3) /* prototypes */ -void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode); +void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode, bool ignore_susphy); void dwc3_set_mode(struct dwc3 *dwc, u32 mode); u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type); diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c index d76ae676783cf..7977860932b14 100644 --- a/drivers/usb/dwc3/drd.c +++ b/drivers/usb/dwc3/drd.c @@ -173,7 +173,7 @@ void dwc3_otg_init(struct dwc3 *dwc) * block "Initialize GCTL for OTG operation". */ /* GCTL.PrtCapDir=2'b11 */ - dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_OTG); + dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_OTG, true); /* GUSB2PHYCFG0.SusPHY=0 */ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; @@ -556,7 +556,7 @@ int dwc3_drd_init(struct dwc3 *dwc) dwc3_drd_update(dwc); } else { - dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_OTG); + dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_OTG, true); /* use OTG block to get ID event */ irq = dwc3_otg_get_irq(dwc); diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 994a434947ece..9bd4f7e448392 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -4515,14 +4515,18 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt) dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), DWC3_GEVNTSIZ_SIZE(evt->length)); + evt->flags &= ~DWC3_EVENT_PENDING; + /* + * Add an explicit write memory barrier to make sure that the update of + * clearing DWC3_EVENT_PENDING is observed in dwc3_check_event_buf() + */ + wmb(); + if (dwc->imod_interval) { dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB); dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval); } - /* Keep the clearing of DWC3_EVENT_PENDING at the end */ - evt->flags &= ~DWC3_EVENT_PENDING; - return ret; } diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index cec86c0c6369c..8402a86176f48 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -1050,10 +1050,11 @@ static int set_config(struct usb_composite_dev *cdev, else usb_gadget_set_remote_wakeup(gadget, 0); done: - if (power <= USB_SELF_POWER_VBUS_MAX_DRAW) - usb_gadget_set_selfpowered(gadget); - else + if (power > USB_SELF_POWER_VBUS_MAX_DRAW || + (c && !(c->bmAttributes & USB_CONFIG_ATT_SELFPOWER))) usb_gadget_clear_selfpowered(gadget); + else + usb_gadget_set_selfpowered(gadget); usb_gadget_vbus_draw(gadget, power); if (result >= 0 && cdev->delayed_status) @@ -2615,7 +2616,10 @@ void composite_suspend(struct usb_gadget *gadget) cdev->suspended = 1; - usb_gadget_set_selfpowered(gadget); + if (cdev->config && + cdev->config->bmAttributes & USB_CONFIG_ATT_SELFPOWER) + usb_gadget_set_selfpowered(gadget); + usb_gadget_vbus_draw(gadget, 2); } @@ -2649,8 +2653,11 @@ void composite_resume(struct usb_gadget *gadget) else maxpower = min(maxpower, 900U); - if (maxpower > USB_SELF_POWER_VBUS_MAX_DRAW) + if (maxpower > USB_SELF_POWER_VBUS_MAX_DRAW || + !(cdev->config->bmAttributes & USB_CONFIG_ATT_SELFPOWER)) usb_gadget_clear_selfpowered(gadget); + else + usb_gadget_set_selfpowered(gadget); usb_gadget_vbus_draw(gadget, maxpower); } else { diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c index 4153643c67dce..1f18f15dba277 100644 --- a/drivers/usb/gadget/function/f_midi.c +++ b/drivers/usb/gadget/function/f_midi.c @@ -283,7 +283,7 @@ f_midi_complete(struct usb_ep *ep, struct usb_request *req) /* Our transmit completed. See if there's more to go. * f_midi_transmit eats req, don't queue it again. */ req->length = 0; - f_midi_transmit(midi); + queue_work(system_highpri_wq, &midi->work); return; } break; diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c index 09e2838917e29..f58590bf5e02f 100644 --- a/drivers/usb/gadget/function/u_ether.c +++ b/drivers/usb/gadget/function/u_ether.c @@ -1052,8 +1052,8 @@ void gether_suspend(struct gether *link) * There is a transfer in progress. So we trigger a remote * wakeup to inform the host. */ - ether_wakeup_host(dev->port_usb); - return; + if (!ether_wakeup_host(dev->port_usb)) + return; } spin_lock_irqsave(&dev->lock, flags); link->is_suspend = true; diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 8d774f19271e6..2fe3a92978fa2 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -12,6 +12,7 @@ #include #include #include +#include #include "xhci.h" #include "xhci-trace.h" @@ -770,9 +771,16 @@ static int xhci_exit_test_mode(struct xhci_hcd *xhci) enum usb_link_tunnel_mode xhci_port_is_tunneled(struct xhci_hcd *xhci, struct xhci_port *port) { + struct usb_hcd *hcd; void __iomem *base; u32 offset; + /* Don't try and probe this capability for non-Intel hosts */ + hcd = xhci_to_hcd(xhci); + if (!dev_is_pci(hcd->self.controller) || + to_pci_dev(hcd->self.controller)->vendor != PCI_VENDOR_ID_INTEL) + return USB_LINK_UNKNOWN; + base = &xhci->cap_regs->hc_capbase; offset = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_INTEL_SPR_SHADOW); diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index d2900197a49e7..8c26275696df9 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -2397,10 +2397,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) page_size = readl(&xhci->op_regs->page_size); xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Supported page size register = 0x%x", page_size); - i = ffs(page_size); - if (i < 16) + val = ffs(page_size) - 1; + if (val < 16) xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "Supported page size of %iK", (1 << (i+12)) / 1024); + "Supported page size of %iK", (1 << (val + 12)) / 1024); else xhci_warn(xhci, "WARN: no supported page size\n"); /* Use 4K pages, since that's common and the minimum the HC supports */ @@ -2443,7 +2443,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) * and our use of dma addresses in the trb_address_map radix tree needs * TRB_SEGMENT_SIZE alignment, so we pick the greater alignment need. */ - if (xhci->quirks & XHCI_ZHAOXIN_TRB_FETCH) + if (xhci->quirks & XHCI_TRB_OVERFETCH) + /* Buggy HC prefetches beyond segment bounds - allocate dummy space at the end */ xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, TRB_SEGMENT_SIZE * 2, TRB_SEGMENT_SIZE * 2, xhci->page_size * 2); else diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index deb3c98c9beaf..1b033c8ce188e 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -28,8 +28,8 @@ #define SPARSE_CNTL_ENABLE 0xC12C /* Device for a quirk */ -#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73 -#define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000 +#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73 +#define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000 #define PCI_DEVICE_ID_FRESCO_LOGIC_FL1009 0x1009 #define PCI_DEVICE_ID_FRESCO_LOGIC_FL1100 0x1100 #define PCI_DEVICE_ID_FRESCO_LOGIC_FL1400 0x1400 @@ -38,8 +38,10 @@ #define PCI_DEVICE_ID_EJ168 0x7023 #define PCI_DEVICE_ID_EJ188 0x7052 -#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31 -#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31 +#define PCI_DEVICE_ID_VIA_VL805 0x3483 + +#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31 +#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31 #define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI 0x9cb1 #define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5 #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f @@ -424,8 +426,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) pdev->device == 0x3432) xhci->quirks |= XHCI_BROKEN_STREAMS; - if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3483) + if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == PCI_DEVICE_ID_VIA_VL805) { xhci->quirks |= XHCI_LPM_SUPPORT; + xhci->quirks |= XHCI_TRB_OVERFETCH; + } if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI) { @@ -473,11 +477,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) if (pdev->device == 0x9202) { xhci->quirks |= XHCI_RESET_ON_RESUME; - xhci->quirks |= XHCI_ZHAOXIN_TRB_FETCH; + xhci->quirks |= XHCI_TRB_OVERFETCH; } if (pdev->device == 0x9203) - xhci->quirks |= XHCI_ZHAOXIN_TRB_FETCH; + xhci->quirks |= XHCI_TRB_OVERFETCH; } if (pdev->vendor == PCI_DEVICE_ID_CADENCE && diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 4384b86ea7b66..2fad9563dca40 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -2866,6 +2866,10 @@ static int handle_tx_event(struct xhci_hcd *xhci, if (!ep_seg) { if (ep->skip && usb_endpoint_xfer_isoc(&td->urb->ep->desc)) { + /* this event is unlikely to match any TD, don't skip them all */ + if (trb_comp_code == COMP_STOPPED_LENGTH_INVALID) + return 0; + skip_isoc_td(xhci, td, ep, status); if (!list_empty(&ep_ring->td_list)) continue; diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 673179047eb82..71588e4db0e34 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1621,7 +1621,7 @@ struct xhci_hcd { #define XHCI_EP_CTX_BROKEN_DCS BIT_ULL(42) #define XHCI_SUSPEND_RESUME_CLKS BIT_ULL(43) #define XHCI_RESET_TO_DEFAULT BIT_ULL(44) -#define XHCI_ZHAOXIN_TRB_FETCH BIT_ULL(45) +#define XHCI_TRB_OVERFETCH BIT_ULL(45) #define XHCI_ZHAOXIN_HOST BIT_ULL(46) #define XHCI_WRITE_64_HI_LO BIT_ULL(47) #define XHCI_CDNS_SCTX_QUIRK BIT_ULL(48) @@ -1748,11 +1748,20 @@ static inline void xhci_write_64(struct xhci_hcd *xhci, } -/* Link TRB chain should always be set on 0.95 hosts, and AMD 0.96 ISOC rings */ +/* + * Reportedly, some chapters of v0.95 spec said that Link TRB always has its chain bit set. + * Other chapters and later specs say that it should only be set if the link is inside a TD + * which continues from the end of one segment to the next segment. + * + * Some 0.95 hardware was found to misbehave if any link TRB doesn't have the chain bit set. + * + * 0.96 hardware from AMD and NEC was found to ignore unchained isochronous link TRBs when + * "resynchronizing the pipe" after a Missed Service Error. + */ static inline bool xhci_link_chain_quirk(struct xhci_hcd *xhci, enum xhci_ring_type type) { return (xhci->quirks & XHCI_LINK_TRB_QUIRK) || - (type == TYPE_ISOC && (xhci->quirks & XHCI_AMD_0x96_HOST)); + (type == TYPE_ISOC && (xhci->quirks & (XHCI_AMD_0x96_HOST | XHCI_NEC_HOST))); } /* xHCI debugging */ diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c index e7d50e0a16123..aadf98f65c608 100644 --- a/drivers/usb/phy/phy-generic.c +++ b/drivers/usb/phy/phy-generic.c @@ -212,7 +212,7 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop) if (of_property_read_u32(node, "clock-frequency", &clk_rate)) clk_rate = 0; - needs_clk = of_property_read_bool(node, "clocks"); + needs_clk = of_property_present(node, "clocks"); } nop->gpiod_reset = devm_gpiod_get_optional(dev, "reset", GPIOD_ASIS); diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c index edc43f169d493..7324de52d9505 100644 --- a/drivers/usb/renesas_usbhs/common.c +++ b/drivers/usb/renesas_usbhs/common.c @@ -312,8 +312,10 @@ static int usbhsc_clk_get(struct device *dev, struct usbhs_priv *priv) priv->clks[1] = of_clk_get(dev_of_node(dev), 1); if (PTR_ERR(priv->clks[1]) == -ENOENT) priv->clks[1] = NULL; - else if (IS_ERR(priv->clks[1])) + else if (IS_ERR(priv->clks[1])) { + clk_put(priv->clks[0]); return PTR_ERR(priv->clks[1]); + } return 0; } @@ -779,6 +781,8 @@ static void usbhs_remove(struct platform_device *pdev) dev_dbg(&pdev->dev, "usb remove\n"); + flush_delayed_work(&priv->notify_hotplug_work); + /* power off */ if (!usbhs_get_dparam(priv, runtime_pwctrl)) usbhsc_power_ctrl(priv, 0); diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c index 105132ae87acb..e8e5723f54122 100644 --- a/drivers/usb/renesas_usbhs/mod_gadget.c +++ b/drivers/usb/renesas_usbhs/mod_gadget.c @@ -1094,7 +1094,7 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv) goto usbhs_mod_gadget_probe_err_gpriv; } - gpriv->transceiver = usb_get_phy(USB_PHY_TYPE_UNDEFINED); + gpriv->transceiver = devm_usb_get_phy(dev, USB_PHY_TYPE_UNDEFINED); dev_info(dev, "%stransceiver found\n", !IS_ERR(gpriv->transceiver) ? "" : "no "); diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index c6f17d732b958..236205ce35003 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -1079,6 +1079,20 @@ static const struct usb_device_id id_table_combined[] = { .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, /* GMC devices */ { USB_DEVICE(GMC_VID, GMC_Z216C_PID) }, + /* Altera USB Blaster 3 */ + { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6022_PID, 1) }, + { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6025_PID, 2) }, + { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6026_PID, 2) }, + { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6026_PID, 3) }, + { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6029_PID, 2) }, + { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602A_PID, 2) }, + { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602A_PID, 3) }, + { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602C_PID, 1) }, + { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602D_PID, 1) }, + { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602D_PID, 2) }, + { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602E_PID, 1) }, + { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602E_PID, 2) }, + { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602E_PID, 3) }, { } /* Terminating entry */ }; diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 5ee60ba2a73cd..52be47d684ea6 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -1612,3 +1612,16 @@ */ #define GMC_VID 0x1cd7 #define GMC_Z216C_PID 0x0217 /* GMC Z216C Adapter IR-USB */ + +/* + * Altera USB Blaster 3 (http://www.altera.com). + */ +#define ALTERA_VID 0x09fb +#define ALTERA_UB3_6022_PID 0x6022 +#define ALTERA_UB3_6025_PID 0x6025 +#define ALTERA_UB3_6026_PID 0x6026 +#define ALTERA_UB3_6029_PID 0x6029 +#define ALTERA_UB3_602A_PID 0x602a +#define ALTERA_UB3_602C_PID 0x602c +#define ALTERA_UB3_602D_PID 0x602d +#define ALTERA_UB3_602E_PID 0x602e diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 58bd54e8c483a..5cd26dac2069f 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -1368,13 +1368,13 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(0) | RSVD(1) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1075, 0xff), /* Telit FN990A (PCIe) */ .driver_info = RSVD(0) }, - { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1080, 0xff), /* Telit FE990 (rmnet) */ + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1080, 0xff), /* Telit FE990A (rmnet) */ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, - { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1081, 0xff), /* Telit FE990 (MBIM) */ + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1081, 0xff), /* Telit FE990A (MBIM) */ .driver_info = NCTRL(0) | RSVD(1) }, - { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1082, 0xff), /* Telit FE990 (RNDIS) */ + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1082, 0xff), /* Telit FE990A (RNDIS) */ .driver_info = NCTRL(2) | RSVD(3) }, - { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1083, 0xff), /* Telit FE990 (ECM) */ + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1083, 0xff), /* Telit FE990A (ECM) */ .driver_info = NCTRL(0) | RSVD(1) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a0, 0xff), /* Telit FN20C04 (rmnet) */ .driver_info = RSVD(0) | NCTRL(3) }, @@ -1388,28 +1388,44 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10aa, 0xff), /* Telit FN920C04 (MBIM) */ .driver_info = NCTRL(3) | RSVD(4) | RSVD(5) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b0, 0xff, 0xff, 0x30), /* Telit FE990B (rmnet) */ + .driver_info = NCTRL(5) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b0, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b0, 0xff, 0xff, 0x60) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b1, 0xff, 0xff, 0x30), /* Telit FE990B (MBIM) */ + .driver_info = NCTRL(6) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b1, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b1, 0xff, 0xff, 0x60) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b2, 0xff, 0xff, 0x30), /* Telit FE990B (RNDIS) */ + .driver_info = NCTRL(6) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b2, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b2, 0xff, 0xff, 0x60) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b3, 0xff, 0xff, 0x30), /* Telit FE990B (ECM) */ + .driver_info = NCTRL(6) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b3, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b3, 0xff, 0xff, 0x60) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10c0, 0xff), /* Telit FE910C04 (rmnet) */ .driver_info = RSVD(0) | NCTRL(3) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10c4, 0xff), /* Telit FE910C04 (rmnet) */ .driver_info = RSVD(0) | NCTRL(3) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10c8, 0xff), /* Telit FE910C04 (rmnet) */ .driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) }, - { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d0, 0x60) }, /* Telit FN990B (rmnet) */ - { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d0, 0x40) }, - { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d0, 0x30), + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d0, 0xff, 0xff, 0x30), /* Telit FN990B (rmnet) */ .driver_info = NCTRL(5) }, - { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d1, 0x60) }, /* Telit FN990B (MBIM) */ - { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d1, 0x40) }, - { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d1, 0x30), + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d0, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d0, 0xff, 0xff, 0x60) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d1, 0xff, 0xff, 0x30), /* Telit FN990B (MBIM) */ .driver_info = NCTRL(6) }, - { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d2, 0x60) }, /* Telit FN990B (RNDIS) */ - { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d2, 0x40) }, - { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d2, 0x30), + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d1, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d1, 0xff, 0xff, 0x60) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d2, 0xff, 0xff, 0x30), /* Telit FN990B (RNDIS) */ .driver_info = NCTRL(6) }, - { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d3, 0x60) }, /* Telit FN990B (ECM) */ - { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d3, 0x40) }, - { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d3, 0x30), + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d2, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d2, 0xff, 0xff, 0x60) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d3, 0xff, 0xff, 0x30), /* Telit FN990B (ECM) */ .driver_info = NCTRL(6) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d3, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d3, 0xff, 0xff, 0x60) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910), .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM), diff --git a/drivers/usb/typec/tcpm/tcpci_rt1711h.c b/drivers/usb/typec/tcpm/tcpci_rt1711h.c index 64f6dd0dc6609..88c50b984e8a3 100644 --- a/drivers/usb/typec/tcpm/tcpci_rt1711h.c +++ b/drivers/usb/typec/tcpm/tcpci_rt1711h.c @@ -334,6 +334,11 @@ static int rt1711h_probe(struct i2c_client *client) { int ret; struct rt1711h_chip *chip; + const u16 alert_mask = TCPC_ALERT_TX_SUCCESS | TCPC_ALERT_TX_DISCARDED | + TCPC_ALERT_TX_FAILED | TCPC_ALERT_RX_HARD_RST | + TCPC_ALERT_RX_STATUS | TCPC_ALERT_POWER_STATUS | + TCPC_ALERT_CC_STATUS | TCPC_ALERT_RX_BUF_OVF | + TCPC_ALERT_FAULT; chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL); if (!chip) @@ -382,6 +387,12 @@ static int rt1711h_probe(struct i2c_client *client) dev_name(chip->dev), chip); if (ret < 0) return ret; + + /* Enable alert interrupts */ + ret = rt1711h_write16(chip, TCPC_ALERT_MASK, alert_mask); + if (ret < 0) + return ret; + enable_irq_wake(client->irq); return 0; diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c index 7a3f0f5af38fd..3f2bc13efa486 100644 --- a/drivers/usb/typec/ucsi/ucsi.c +++ b/drivers/usb/typec/ucsi/ucsi.c @@ -25,7 +25,7 @@ * difficult to estimate the time it takes for the system to process the command * before it is actually passed to the PPM. */ -#define UCSI_TIMEOUT_MS 5000 +#define UCSI_TIMEOUT_MS 10000 /* * UCSI_SWAP_TIMEOUT_MS - Timeout for role swap requests @@ -1330,7 +1330,7 @@ static int ucsi_reset_ppm(struct ucsi *ucsi) mutex_lock(&ucsi->ppm_lock); - ret = ucsi->ops->read_cci(ucsi, &cci); + ret = ucsi->ops->poll_cci(ucsi, &cci); if (ret < 0) goto out; @@ -1348,7 +1348,7 @@ static int ucsi_reset_ppm(struct ucsi *ucsi) tmo = jiffies + msecs_to_jiffies(UCSI_TIMEOUT_MS); do { - ret = ucsi->ops->read_cci(ucsi, &cci); + ret = ucsi->ops->poll_cci(ucsi, &cci); if (ret < 0) goto out; if (cci & UCSI_CCI_COMMAND_COMPLETE) @@ -1377,7 +1377,7 @@ static int ucsi_reset_ppm(struct ucsi *ucsi) /* Give the PPM time to process a reset before reading CCI */ msleep(20); - ret = ucsi->ops->read_cci(ucsi, &cci); + ret = ucsi->ops->poll_cci(ucsi, &cci); if (ret) goto out; @@ -1809,11 +1809,11 @@ static int ucsi_init(struct ucsi *ucsi) err_unregister: for (con = connector; con->port; con++) { + if (con->wq) + destroy_workqueue(con->wq); ucsi_unregister_partner(con); ucsi_unregister_altmodes(con, UCSI_RECIPIENT_CON); ucsi_unregister_port_psy(con); - if (con->wq) - destroy_workqueue(con->wq); usb_power_delivery_unregister_capabilities(con->port_sink_caps); con->port_sink_caps = NULL; @@ -1913,8 +1913,8 @@ struct ucsi *ucsi_create(struct device *dev, const struct ucsi_operations *ops) struct ucsi *ucsi; if (!ops || - !ops->read_version || !ops->read_cci || !ops->read_message_in || - !ops->sync_control || !ops->async_control) + !ops->read_version || !ops->read_cci || !ops->poll_cci || + !ops->read_message_in || !ops->sync_control || !ops->async_control) return ERR_PTR(-EINVAL); ucsi = kzalloc(sizeof(*ucsi), GFP_KERNEL); @@ -1997,10 +1997,6 @@ void ucsi_unregister(struct ucsi *ucsi) for (i = 0; i < ucsi->cap.num_connectors; i++) { cancel_work_sync(&ucsi->connector[i].work); - ucsi_unregister_partner(&ucsi->connector[i]); - ucsi_unregister_altmodes(&ucsi->connector[i], - UCSI_RECIPIENT_CON); - ucsi_unregister_port_psy(&ucsi->connector[i]); if (ucsi->connector[i].wq) { struct ucsi_work *uwork; @@ -2016,6 +2012,11 @@ void ucsi_unregister(struct ucsi *ucsi) destroy_workqueue(ucsi->connector[i].wq); } + ucsi_unregister_partner(&ucsi->connector[i]); + ucsi_unregister_altmodes(&ucsi->connector[i], + UCSI_RECIPIENT_CON); + ucsi_unregister_port_psy(&ucsi->connector[i]); + usb_power_delivery_unregister_capabilities(ucsi->connector[i].port_sink_caps); ucsi->connector[i].port_sink_caps = NULL; usb_power_delivery_unregister_capabilities(ucsi->connector[i].port_source_caps); diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h index 1cf5aad4c23a9..a333006d3496a 100644 --- a/drivers/usb/typec/ucsi/ucsi.h +++ b/drivers/usb/typec/ucsi/ucsi.h @@ -60,6 +60,7 @@ struct dentry; * struct ucsi_operations - UCSI I/O operations * @read_version: Read implemented UCSI version * @read_cci: Read CCI register + * @poll_cci: Read CCI register while polling with notifications disabled * @read_message_in: Read message data from UCSI * @sync_control: Blocking control operation * @async_control: Non-blocking control operation @@ -74,6 +75,7 @@ struct dentry; struct ucsi_operations { int (*read_version)(struct ucsi *ucsi, u16 *version); int (*read_cci)(struct ucsi *ucsi, u32 *cci); + int (*poll_cci)(struct ucsi *ucsi, u32 *cci); int (*read_message_in)(struct ucsi *ucsi, void *val, size_t val_len); int (*sync_control)(struct ucsi *ucsi, u64 command); int (*async_control)(struct ucsi *ucsi, u64 command); diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c index accf15ff1306a..8de2961718cdd 100644 --- a/drivers/usb/typec/ucsi/ucsi_acpi.c +++ b/drivers/usb/typec/ucsi/ucsi_acpi.c @@ -59,19 +59,24 @@ static int ucsi_acpi_read_version(struct ucsi *ucsi, u16 *version) static int ucsi_acpi_read_cci(struct ucsi *ucsi, u32 *cci) { struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi); - int ret; - - if (UCSI_COMMAND(ua->cmd) == UCSI_PPM_RESET) { - ret = ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_READ); - if (ret) - return ret; - } memcpy(cci, ua->base + UCSI_CCI, sizeof(*cci)); return 0; } +static int ucsi_acpi_poll_cci(struct ucsi *ucsi, u32 *cci) +{ + struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi); + int ret; + + ret = ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_READ); + if (ret) + return ret; + + return ucsi_acpi_read_cci(ucsi, cci); +} + static int ucsi_acpi_read_message_in(struct ucsi *ucsi, void *val, size_t val_len) { struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi); @@ -94,6 +99,7 @@ static int ucsi_acpi_async_control(struct ucsi *ucsi, u64 command) static const struct ucsi_operations ucsi_acpi_ops = { .read_version = ucsi_acpi_read_version, .read_cci = ucsi_acpi_read_cci, + .poll_cci = ucsi_acpi_poll_cci, .read_message_in = ucsi_acpi_read_message_in, .sync_control = ucsi_sync_control_common, .async_control = ucsi_acpi_async_control @@ -145,6 +151,7 @@ static int ucsi_gram_sync_control(struct ucsi *ucsi, u64 command) static const struct ucsi_operations ucsi_gram_ops = { .read_version = ucsi_acpi_read_version, .read_cci = ucsi_acpi_read_cci, + .poll_cci = ucsi_acpi_poll_cci, .read_message_in = ucsi_gram_read_message_in, .sync_control = ucsi_gram_sync_control, .async_control = ucsi_acpi_async_control diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c index 740171f24ef9f..511dd1b224ae5 100644 --- a/drivers/usb/typec/ucsi/ucsi_ccg.c +++ b/drivers/usb/typec/ucsi/ucsi_ccg.c @@ -664,6 +664,7 @@ static int ucsi_ccg_sync_control(struct ucsi *ucsi, u64 command) static const struct ucsi_operations ucsi_ccg_ops = { .read_version = ucsi_ccg_read_version, .read_cci = ucsi_ccg_read_cci, + .poll_cci = ucsi_ccg_read_cci, .read_message_in = ucsi_ccg_read_message_in, .sync_control = ucsi_ccg_sync_control, .async_control = ucsi_ccg_async_control, @@ -1432,11 +1433,10 @@ static int ucsi_ccg_probe(struct i2c_client *client) uc->fw_build = CCG_FW_BUILD_NVIDIA_TEGRA; else if (!strcmp(fw_name, "nvidia,gpu")) uc->fw_build = CCG_FW_BUILD_NVIDIA; + if (!uc->fw_build) + dev_err(uc->dev, "failed to get FW build information\n"); } - if (!uc->fw_build) - dev_err(uc->dev, "failed to get FW build information\n"); - /* reset ccg device and initialize ucsi */ status = ucsi_ccg_init(uc); if (status < 0) { diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c index 9b6cb76e63280..75c0e54c37fa0 100644 --- a/drivers/usb/typec/ucsi/ucsi_glink.c +++ b/drivers/usb/typec/ucsi/ucsi_glink.c @@ -201,6 +201,7 @@ static void pmic_glink_ucsi_connector_status(struct ucsi_connector *con) static const struct ucsi_operations pmic_glink_ucsi_ops = { .read_version = pmic_glink_ucsi_read_version, .read_cci = pmic_glink_ucsi_read_cci, + .poll_cci = pmic_glink_ucsi_read_cci, .read_message_in = pmic_glink_ucsi_read_message_in, .sync_control = ucsi_sync_control_common, .async_control = pmic_glink_ucsi_async_control, diff --git a/drivers/usb/typec/ucsi/ucsi_stm32g0.c b/drivers/usb/typec/ucsi/ucsi_stm32g0.c index 6923fad31d795..57ef7d83a4121 100644 --- a/drivers/usb/typec/ucsi/ucsi_stm32g0.c +++ b/drivers/usb/typec/ucsi/ucsi_stm32g0.c @@ -424,6 +424,7 @@ static irqreturn_t ucsi_stm32g0_irq_handler(int irq, void *data) static const struct ucsi_operations ucsi_stm32g0_ops = { .read_version = ucsi_stm32g0_read_version, .read_cci = ucsi_stm32g0_read_cci, + .poll_cci = ucsi_stm32g0_read_cci, .read_message_in = ucsi_stm32g0_read_message_in, .sync_control = ucsi_sync_control_common, .async_control = ucsi_stm32g0_async_control, diff --git a/drivers/usb/typec/ucsi/ucsi_yoga_c630.c b/drivers/usb/typec/ucsi/ucsi_yoga_c630.c index f3a5e24ea84d5..40e5da4fd2a45 100644 --- a/drivers/usb/typec/ucsi/ucsi_yoga_c630.c +++ b/drivers/usb/typec/ucsi/ucsi_yoga_c630.c @@ -74,6 +74,7 @@ static int yoga_c630_ucsi_async_control(struct ucsi *ucsi, u64 command) const struct ucsi_operations yoga_c630_ucsi_ops = { .read_version = yoga_c630_ucsi_read_version, .read_cci = yoga_c630_ucsi_read_cci, + .poll_cci = yoga_c630_ucsi_read_cci, .read_message_in = yoga_c630_ucsi_read_message_in, .sync_control = ucsi_sync_control_common, .async_control = yoga_c630_ucsi_async_control, diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c index 8455f08f5d406..61424342c0964 100644 --- a/drivers/vdpa/mlx5/core/mr.c +++ b/drivers/vdpa/mlx5/core/mr.c @@ -190,9 +190,12 @@ static void fill_indir(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mkey, v klm->bcount = cpu_to_be32(klm_bcount(dmr->end - dmr->start)); preve = dmr->end; } else { + u64 bcount = min_t(u64, dmr->start - preve, MAX_KLM_SIZE); + klm->key = cpu_to_be32(mvdev->res.null_mkey); - klm->bcount = cpu_to_be32(klm_bcount(dmr->start - preve)); - preve = dmr->start; + klm->bcount = cpu_to_be32(klm_bcount(bcount)); + preve += bcount; + goto again; } } diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 718fa4e0b31ec..7aeff435c1d87 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -1699,14 +1699,19 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, } } + if (vs->vs_tpg) { + pr_err("vhost-scsi endpoint already set for %s.\n", + vs->vs_vhost_wwpn); + ret = -EEXIST; + goto out; + } + len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET; vs_tpg = kzalloc(len, GFP_KERNEL); if (!vs_tpg) { ret = -ENOMEM; goto out; } - if (vs->vs_tpg) - memcpy(vs_tpg, vs->vs_tpg, len); mutex_lock(&vhost_scsi_mutex); list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) { @@ -1722,12 +1727,6 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, tv_tport = tpg->tport; if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) { - if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) { - mutex_unlock(&tpg->tv_tpg_mutex); - mutex_unlock(&vhost_scsi_mutex); - ret = -EEXIST; - goto undepend; - } /* * In order to ensure individual vhost-scsi configfs * groups cannot be removed while in use by vhost ioctl, @@ -1774,15 +1773,15 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, } ret = 0; } else { - ret = -EEXIST; + ret = -ENODEV; + goto free_tpg; } /* - * Act as synchronize_rcu to make sure access to - * old vs->vs_tpg is finished. + * Act as synchronize_rcu to make sure requests after this point + * see a fully setup device. */ vhost_scsi_flush(vs); - kfree(vs->vs_tpg); vs->vs_tpg = vs_tpg; goto out; @@ -1802,6 +1801,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, target_undepend_item(&tpg->se_tpg.tpg_group.cg_item); } } +free_tpg: kfree(vs_tpg); out: mutex_unlock(&vs->dev.mutex); @@ -1904,6 +1904,7 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs, vhost_scsi_flush(vs); kfree(vs->vs_tpg); vs->vs_tpg = NULL; + memset(vs->vs_vhost_wwpn, 0, sizeof(vs->vs_vhost_wwpn)); WARN_ON(vs->vs_events_nr); mutex_unlock(&vs->dev.mutex); return 0; diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 9ac25d08f473e..63612faeab727 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -666,7 +666,7 @@ static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev) vtsk = vhost_task_create(vhost_run_work_list, vhost_worker_killed, worker, name); - if (!vtsk) + if (IS_ERR(vtsk)) goto free_worker; mutex_init(&worker->mutex); diff --git a/drivers/video/backlight/led_bl.c b/drivers/video/backlight/led_bl.c index c7aefcd6e4e3e..7826006018457 100644 --- a/drivers/video/backlight/led_bl.c +++ b/drivers/video/backlight/led_bl.c @@ -229,8 +229,11 @@ static void led_bl_remove(struct platform_device *pdev) backlight_device_unregister(bl); led_bl_power_off(priv); - for (i = 0; i < priv->nb_leds; i++) + for (i = 0; i < priv->nb_leds; i++) { + mutex_lock(&priv->leds[i]->led_access); led_sysfs_enable(priv->leds[i]); + mutex_unlock(&priv->leds[i]->led_access); + } } static const struct of_device_id led_bl_of_match[] = { diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig index bc31db6ef7d26..3e9f2bda67027 100644 --- a/drivers/video/console/Kconfig +++ b/drivers/video/console/Kconfig @@ -24,7 +24,7 @@ config VGA_CONSOLE Say Y. config MDA_CONSOLE - depends on !M68K && !PARISC && ISA + depends on VGA_CONSOLE && ISA tristate "MDA text console (dual-headed)" help Say Y here if you have an old MDA or monochrome Hercules graphics @@ -52,7 +52,7 @@ config DUMMY_CONSOLE config DUMMY_CONSOLE_COLUMNS int "Initial number of console screen columns" - depends on DUMMY_CONSOLE && !ARCH_FOOTBRIDGE + depends on DUMMY_CONSOLE && !(ARCH_FOOTBRIDGE && VGA_CONSOLE) default 160 if PARISC default 80 help @@ -62,7 +62,7 @@ config DUMMY_CONSOLE_COLUMNS config DUMMY_CONSOLE_ROWS int "Initial number of console screen rows" - depends on DUMMY_CONSOLE && !ARCH_FOOTBRIDGE + depends on DUMMY_CONSOLE && !(ARCH_FOOTBRIDGE && VGA_CONSOLE) default 64 if PARISC default 30 if ARM default 25 diff --git a/drivers/video/fbdev/au1100fb.c b/drivers/video/fbdev/au1100fb.c index 840f221607635..6251a6b07b3a1 100644 --- a/drivers/video/fbdev/au1100fb.c +++ b/drivers/video/fbdev/au1100fb.c @@ -137,13 +137,15 @@ static int au1100fb_fb_blank(int blank_mode, struct fb_info *fbi) */ int au1100fb_setmode(struct au1100fb_device *fbdev) { - struct fb_info *info = &fbdev->info; + struct fb_info *info; u32 words; int index; if (!fbdev) return -EINVAL; + info = &fbdev->info; + /* Update var-dependent FB info */ if (panel_is_active(fbdev->panel) || panel_is_color(fbdev->panel)) { if (info->var.bits_per_pixel <= 8) { diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c index 7fdb5edd7e2e8..75338ffc703fb 100644 --- a/drivers/video/fbdev/hyperv_fb.c +++ b/drivers/video/fbdev/hyperv_fb.c @@ -282,6 +282,8 @@ static uint screen_depth; static uint screen_fb_size; static uint dio_fb_size; /* FB size for deferred IO */ +static void hvfb_putmem(struct fb_info *info); + /* Send message to Hyper-V host */ static inline int synthvid_send(struct hv_device *hdev, struct synthvid_msg *msg) @@ -862,6 +864,17 @@ static void hvfb_ops_damage_area(struct fb_info *info, u32 x, u32 y, u32 width, hvfb_ondemand_refresh_throttle(par, x, y, width, height); } +/* + * fb_ops.fb_destroy is called by the last put_fb_info() call at the end + * of unregister_framebuffer() or fb_release(). Do any cleanup related to + * framebuffer here. + */ +static void hvfb_destroy(struct fb_info *info) +{ + hvfb_putmem(info); + framebuffer_release(info); +} + /* * TODO: GEN1 codepaths allocate from system or DMA-able memory. Fix the * driver to use the _SYSMEM_ or _DMAMEM_ helpers in these cases. @@ -877,6 +890,7 @@ static const struct fb_ops hvfb_ops = { .fb_set_par = hvfb_set_par, .fb_setcolreg = hvfb_setcolreg, .fb_blank = hvfb_blank, + .fb_destroy = hvfb_destroy, }; /* Get options from kernel paramenter "video=" */ @@ -952,7 +966,7 @@ static phys_addr_t hvfb_get_phymem(struct hv_device *hdev, } /* Release contiguous physical memory */ -static void hvfb_release_phymem(struct hv_device *hdev, +static void hvfb_release_phymem(struct device *device, phys_addr_t paddr, unsigned int size) { unsigned int order = get_order(size); @@ -960,7 +974,7 @@ static void hvfb_release_phymem(struct hv_device *hdev, if (order <= MAX_PAGE_ORDER) __free_pages(pfn_to_page(paddr >> PAGE_SHIFT), order); else - dma_free_coherent(&hdev->device, + dma_free_coherent(device, round_up(size, PAGE_SIZE), phys_to_virt(paddr), paddr); @@ -989,6 +1003,7 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info) base = pci_resource_start(pdev, 0); size = pci_resource_len(pdev, 0); + aperture_remove_conflicting_devices(base, size, KBUILD_MODNAME); /* * For Gen 1 VM, we can directly use the contiguous memory @@ -1010,11 +1025,21 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info) goto getmem_done; } pr_info("Unable to allocate enough contiguous physical memory on Gen 1 VM. Using MMIO instead.\n"); + } else { + aperture_remove_all_conflicting_devices(KBUILD_MODNAME); } /* - * Cannot use the contiguous physical memory. - * Allocate mmio space for framebuffer. + * Cannot use contiguous physical memory, so allocate MMIO space for + * the framebuffer. At this point in the function, conflicting devices + * that might have claimed the framebuffer MMIO space based on + * screen_info.lfb_base must have already been removed so that + * vmbus_allocate_mmio() does not allocate different MMIO space. If the + * kdump image were to be loaded using kexec_file_load(), the + * framebuffer location in the kdump image would be set from + * screen_info.lfb_base at the time that kdump is enabled. If the + * framebuffer has moved elsewhere, this could be the wrong location, + * causing kdump to hang when efifb (for example) loads. */ dio_fb_size = screen_width * screen_height * screen_depth / 8; @@ -1051,11 +1076,6 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info) info->screen_size = dio_fb_size; getmem_done: - if (base && size) - aperture_remove_conflicting_devices(base, size, KBUILD_MODNAME); - else - aperture_remove_all_conflicting_devices(KBUILD_MODNAME); - if (!gen2vm) pci_dev_put(pdev); @@ -1074,16 +1094,16 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info) } /* Release the framebuffer */ -static void hvfb_putmem(struct hv_device *hdev, struct fb_info *info) +static void hvfb_putmem(struct fb_info *info) { struct hvfb_par *par = info->par; if (par->need_docopy) { vfree(par->dio_vp); - iounmap(info->screen_base); + iounmap(par->mmio_vp); vmbus_free_mmio(par->mem->start, screen_fb_size); } else { - hvfb_release_phymem(hdev, info->fix.smem_start, + hvfb_release_phymem(info->device, info->fix.smem_start, screen_fb_size); } @@ -1172,7 +1192,7 @@ static int hvfb_probe(struct hv_device *hdev, if (ret) goto error; - ret = register_framebuffer(info); + ret = devm_register_framebuffer(&hdev->device, info); if (ret) { pr_err("Unable to register framebuffer\n"); goto error; @@ -1197,7 +1217,7 @@ static int hvfb_probe(struct hv_device *hdev, error: fb_deferred_io_cleanup(info); - hvfb_putmem(hdev, info); + hvfb_putmem(info); error2: vmbus_close(hdev->channel); error1: @@ -1220,14 +1240,10 @@ static void hvfb_remove(struct hv_device *hdev) fb_deferred_io_cleanup(info); - unregister_framebuffer(info); cancel_delayed_work_sync(&par->dwork); vmbus_close(hdev->channel); hv_set_drvdata(hdev, NULL); - - hvfb_putmem(hdev, info); - framebuffer_release(info); } static int hvfb_suspend(struct hv_device *hdev) diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c index 5832485ab998c..c29b6236952b3 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c @@ -2749,9 +2749,13 @@ int dispc_ovl_setup(enum omap_plane plane, const struct omap_overlay_info *oi, bool mem_to_mem) { int r; - enum omap_overlay_caps caps = dss_feat_get_overlay_caps(plane); + enum omap_overlay_caps caps; enum omap_channel channel; + if (plane == OMAP_DSS_WB) + return -EINVAL; + + caps = dss_feat_get_overlay_caps(plane); channel = dispc_ovl_get_channel_out(plane); DSSDBG("dispc_ovl_setup %d, pa %pad, pa_uv %pad, sw %d, %d,%d, %dx%d ->" diff --git a/drivers/video/fbdev/sm501fb.c b/drivers/video/fbdev/sm501fb.c index 86ecbb2d86db8..2eb27ebf822e8 100644 --- a/drivers/video/fbdev/sm501fb.c +++ b/drivers/video/fbdev/sm501fb.c @@ -326,6 +326,13 @@ static int sm501fb_check_var(struct fb_var_screeninfo *var, if (var->xres_virtual > 4096 || var->yres_virtual > 2048) return -EINVAL; + /* geometry sanity checks */ + if (var->xres + var->xoffset > var->xres_virtual) + return -EINVAL; + + if (var->yres + var->yoffset > var->yres_virtual) + return -EINVAL; + /* can cope with 8,16 or 32bpp */ if (var->bits_per_pixel <= 8) diff --git a/drivers/virt/acrn/hsm.c b/drivers/virt/acrn/hsm.c index c24036c4e51ec..e4e196abdaac9 100644 --- a/drivers/virt/acrn/hsm.c +++ b/drivers/virt/acrn/hsm.c @@ -49,7 +49,7 @@ static int pmcmd_ioctl(u64 cmd, void __user *uptr) switch (cmd & PMCMD_TYPE_MASK) { case ACRN_PMCMD_GET_PX_CNT: case ACRN_PMCMD_GET_CX_CNT: - pm_info = kmalloc(sizeof(u64), GFP_KERNEL); + pm_info = kzalloc(sizeof(u64), GFP_KERNEL); if (!pm_info) return -ENOMEM; @@ -64,7 +64,7 @@ static int pmcmd_ioctl(u64 cmd, void __user *uptr) kfree(pm_info); break; case ACRN_PMCMD_GET_PX_DATA: - px_data = kmalloc(sizeof(*px_data), GFP_KERNEL); + px_data = kzalloc(sizeof(*px_data), GFP_KERNEL); if (!px_data) return -ENOMEM; @@ -79,7 +79,7 @@ static int pmcmd_ioctl(u64 cmd, void __user *uptr) kfree(px_data); break; case ACRN_PMCMD_GET_CX_DATA: - cx_data = kmalloc(sizeof(*cx_data), GFP_KERNEL); + cx_data = kzalloc(sizeof(*cx_data), GFP_KERNEL); if (!cx_data) return -ENOMEM; diff --git a/drivers/w1/masters/w1-uart.c b/drivers/w1/masters/w1-uart.c index a31782e56ba75..c87eea3478067 100644 --- a/drivers/w1/masters/w1-uart.c +++ b/drivers/w1/masters/w1-uart.c @@ -372,11 +372,11 @@ static int w1_uart_probe(struct serdev_device *serdev) init_completion(&w1dev->rx_byte_received); mutex_init(&w1dev->rx_mutex); + serdev_device_set_drvdata(serdev, w1dev); + serdev_device_set_client_ops(serdev, &w1_uart_serdev_ops); ret = w1_uart_serdev_open(w1dev); if (ret < 0) return ret; - serdev_device_set_drvdata(serdev, w1dev); - serdev_device_set_client_ops(serdev, &w1_uart_serdev_ops); return w1_add_master_device(&w1dev->bus); } diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 528395133b4f8..4bd31242bd773 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -675,7 +675,7 @@ void xen_free_ballooned_pages(unsigned int nr_pages, struct page **pages) } EXPORT_SYMBOL(xen_free_ballooned_pages); -static void __init balloon_add_regions(void) +static int __init balloon_add_regions(void) { unsigned long start_pfn, pages; unsigned long pfn, extra_pfn_end; @@ -698,26 +698,38 @@ static void __init balloon_add_regions(void) for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) balloon_append(pfn_to_page(pfn)); - balloon_stats.total_pages += extra_pfn_end - start_pfn; + /* + * Extra regions are accounted for in the physmap, but need + * decreasing from current_pages to balloon down the initial + * allocation, because they are already accounted for in + * total_pages. + */ + if (extra_pfn_end - start_pfn >= balloon_stats.current_pages) { + WARN(1, "Extra pages underflow current target"); + return -ERANGE; + } + balloon_stats.current_pages -= extra_pfn_end - start_pfn; } + + return 0; } static int __init balloon_init(void) { struct task_struct *task; + int rc; if (!xen_domain()) return -ENODEV; pr_info("Initialising balloon driver\n"); -#ifdef CONFIG_XEN_PV - balloon_stats.current_pages = xen_pv_domain() - ? min(xen_start_info->nr_pages - xen_released_pages, max_pfn) - : get_num_physpages(); -#else - balloon_stats.current_pages = get_num_physpages(); -#endif + if (xen_released_pages >= get_num_physpages()) { + WARN(1, "Released pages underflow current target"); + return -ERANGE; + } + + balloon_stats.current_pages = get_num_physpages() - xen_released_pages; balloon_stats.target_pages = balloon_stats.current_pages; balloon_stats.balloon_low = 0; balloon_stats.balloon_high = 0; @@ -734,7 +746,9 @@ static int __init balloon_init(void) register_sysctl_init("xen/balloon", balloon_table); #endif - balloon_add_regions(); + rc = balloon_add_regions(); + if (rc) + return rc; task = kthread_run(balloon_thread, NULL, "xen-balloon"); if (IS_ERR(task)) { diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 26c62e0d34e98..1f65795cf5d7a 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -113,7 +113,7 @@ static struct io_tlb_pool *xen_swiotlb_find_pool(struct device *dev, } #ifdef CONFIG_X86 -int xen_swiotlb_fixup(void *buf, unsigned long nslabs) +int __init xen_swiotlb_fixup(void *buf, unsigned long nslabs) { int rc; unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT); diff --git a/drivers/xen/xenfs/xensyms.c b/drivers/xen/xenfs/xensyms.c index b799bc759c15f..088b7f02c3586 100644 --- a/drivers/xen/xenfs/xensyms.c +++ b/drivers/xen/xenfs/xensyms.c @@ -48,7 +48,7 @@ static int xensyms_next_sym(struct xensyms *xs) return -ENOMEM; set_xen_guest_handle(symdata->name, xs->name); - symdata->symnum--; /* Rewind */ + symdata->symnum = symnum; /* Rewind */ ret = HYPERVISOR_platform_op(&xs->op); if (ret < 0) @@ -78,7 +78,7 @@ static void *xensyms_next(struct seq_file *m, void *p, loff_t *pos) { struct xensyms *xs = m->private; - xs->op.u.symdata.symnum = ++(*pos); + *pos = xs->op.u.symdata.symnum; if (xensyms_next_sym(xs)) return NULL; diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c index 143ac03b7425c..3397939fd2d5a 100644 --- a/fs/9p/vfs_inode_dotl.c +++ b/fs/9p/vfs_inode_dotl.c @@ -407,8 +407,8 @@ static int v9fs_vfs_mkdir_dotl(struct mnt_idmap *idmap, err); goto error; } - v9fs_fid_add(dentry, &fid); v9fs_set_create_acl(inode, fid, dacl, pacl); + v9fs_fid_add(dentry, &fid); d_instantiate(dentry, inode); err = 0; inc_nlink(dir); diff --git a/fs/Kconfig b/fs/Kconfig index aae170fc27952..3117304676331 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -369,6 +369,7 @@ config GRACE_PERIOD config LOCKD tristate depends on FILE_LOCKING + select CRC32 select GRACE_PERIOD config LOCKD_V4 diff --git a/fs/affs/file.c b/fs/affs/file.c index a5a861dd52230..7a71018e3f675 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c @@ -596,7 +596,7 @@ affs_extent_file_ofs(struct inode *inode, u32 newsize) BUG_ON(tmp > bsize); AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino); - AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx); + AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx + 1); AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp); affs_fix_checksum(sb, bh); bh->b_state &= ~(1UL << BH_New); @@ -724,7 +724,8 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping, tmp = min(bsize - boff, to - from); BUG_ON(boff + tmp > bsize || tmp > bsize); memcpy(AFFS_DATA(bh) + boff, data + from, tmp); - be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp); + AFFS_DATA_HEAD(bh)->size = cpu_to_be32( + max(boff + tmp, be32_to_cpu(AFFS_DATA_HEAD(bh)->size))); affs_fix_checksum(sb, bh); mark_buffer_dirty_inode(bh, inode); written += tmp; @@ -746,7 +747,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping, if (buffer_new(bh)) { AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino); - AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx); + AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx + 1); AFFS_DATA_HEAD(bh)->size = cpu_to_be32(bsize); AFFS_DATA_HEAD(bh)->next = 0; bh->b_state &= ~(1UL << BH_New); @@ -780,7 +781,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping, if (buffer_new(bh)) { AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino); - AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx); + AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx + 1); AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp); AFFS_DATA_HEAD(bh)->next = 0; bh->b_state &= ~(1UL << BH_New); diff --git a/fs/afs/server.c b/fs/afs/server.c index 038f9d0ae3af8..4504e16b458cc 100644 --- a/fs/afs/server.c +++ b/fs/afs/server.c @@ -163,6 +163,8 @@ static struct afs_server *afs_install_server(struct afs_cell *cell, rb_insert_color(&server->uuid_rb, &net->fs_servers); hlist_add_head_rcu(&server->proc_link, &net->fs_proc); + afs_get_cell(cell, afs_cell_trace_get_server); + added_dup: write_seqlock(&net->fs_addr_lock); estate = rcu_dereference_protected(server->endpoint_state, @@ -442,6 +444,7 @@ static void afs_server_rcu(struct rcu_head *rcu) atomic_read(&server->active), afs_server_trace_free); afs_put_endpoint_state(rcu_access_pointer(server->endpoint_state), afs_estate_trace_put_server); + afs_put_cell(server->cell, afs_cell_trace_put_server); kfree(server); } diff --git a/fs/afs/server_list.c b/fs/afs/server_list.c index 7e7e567a7f8a2..d20cd902ef949 100644 --- a/fs/afs/server_list.c +++ b/fs/afs/server_list.c @@ -97,8 +97,8 @@ struct afs_server_list *afs_alloc_server_list(struct afs_volume *volume, break; if (j < slist->nr_servers) { if (slist->servers[j].server == server) { - afs_put_server(volume->cell->net, server, - afs_server_trace_put_slist_isort); + afs_unuse_server(volume->cell->net, server, + afs_server_trace_put_slist_isort); continue; } diff --git a/fs/bcachefs/fs-ioctl.c b/fs/bcachefs/fs-ioctl.c index 405cf08bda347..e599d5ac6e4d2 100644 --- a/fs/bcachefs/fs-ioctl.c +++ b/fs/bcachefs/fs-ioctl.c @@ -520,10 +520,12 @@ static long bch2_ioctl_subvolume_destroy(struct bch_fs *c, struct file *filp, ret = -ENOENT; goto err; } - ret = __bch2_unlink(dir, victim, true); + + ret = inode_permission(file_mnt_idmap(filp), d_inode(victim), MAY_WRITE) ?: + __bch2_unlink(dir, victim, true); if (!ret) { fsnotify_rmdir(dir, victim); - d_delete(victim); + d_invalidate(victim); } err: inode_unlock(dir); diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 90aef2627ca27..40332ab62f101 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -545,8 +545,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, * subpage::readers and to unlock the page. */ if (fs_info->sectorsize < PAGE_SIZE) - btrfs_subpage_start_reader(fs_info, folio, cur, - add_size); + btrfs_folio_set_lock(fs_info, folio, cur, add_size); folio_put(folio); cur += add_size; } diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 563f106774e59..19e5f8eaae772 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -4274,6 +4274,18 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info) */ btrfs_flush_workqueue(fs_info->delalloc_workers); + /* + * When finishing a compressed write bio we schedule a work queue item + * to finish an ordered extent - btrfs_finish_compressed_write_work() + * calls btrfs_finish_ordered_extent() which in turns does a call to + * btrfs_queue_ordered_fn(), and that queues the ordered extent + * completion either in the endio_write_workers work queue or in the + * fs_info->endio_freespace_worker work queue. We flush those queues + * below, so before we flush them we must flush this queue for the + * workers of compressed writes. + */ + flush_workqueue(fs_info->compressed_write_workers); + /* * After we parked the cleaner kthread, ordered extents may have * completed and created new delayed iputs. If one of the async reclaim diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index f3e93ba7ec97f..4ceffbef32987 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2897,7 +2897,15 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans) block_group->length, &trimmed); + /* + * Not strictly necessary to lock, as the block_group should be + * read-only from btrfs_delete_unused_bgs(). + */ + ASSERT(block_group->ro); + spin_lock(&fs_info->unused_bgs_lock); list_del_init(&block_group->bg_list); + spin_unlock(&fs_info->unused_bgs_lock); + btrfs_unfreeze_block_group(block_group); btrfs_put_block_group(block_group); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index fe08c983d5bb4..6551fb003eed2 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -190,7 +190,7 @@ static void process_one_folio(struct btrfs_fs_info *fs_info, btrfs_folio_clamp_clear_writeback(fs_info, folio, start, len); if (folio != locked_folio && (page_ops & PAGE_UNLOCK)) - btrfs_folio_end_writer_lock(fs_info, folio, start, len); + btrfs_folio_end_lock(fs_info, folio, start, len); } static void __process_folios_contig(struct address_space *mapping, @@ -276,7 +276,7 @@ static noinline int lock_delalloc_folios(struct inode *inode, range_start = max_t(u64, folio_pos(folio), start); range_len = min_t(u64, folio_pos(folio) + folio_size(folio), end + 1) - range_start; - btrfs_folio_set_writer_lock(fs_info, folio, range_start, range_len); + btrfs_folio_set_lock(fs_info, folio, range_start, range_len); processed_end = range_start + range_len - 1; } @@ -438,7 +438,7 @@ static void end_folio_read(struct folio *folio, bool uptodate, u64 start, u32 le if (!btrfs_is_subpage(fs_info, folio->mapping)) folio_unlock(folio); else - btrfs_subpage_end_reader(fs_info, folio, start, len); + btrfs_folio_end_lock(fs_info, folio, start, len); } /* @@ -495,7 +495,7 @@ static void begin_folio_read(struct btrfs_fs_info *fs_info, struct folio *folio) return; ASSERT(folio_test_private(folio)); - btrfs_subpage_start_reader(fs_info, folio, folio_pos(folio), PAGE_SIZE); + btrfs_folio_set_lock(fs_info, folio, folio_pos(folio), PAGE_SIZE); } /* @@ -526,8 +526,6 @@ static void end_bbio_data_read(struct btrfs_bio *bbio) u64 end; u32 len; - /* For now only order 0 folios are supported for data. */ - ASSERT(folio_order(folio) == 0); btrfs_debug(fs_info, "%s: bi_sector=%llu, err=%d, mirror=%u", __func__, bio->bi_iter.bi_sector, bio->bi_status, @@ -555,7 +553,6 @@ static void end_bbio_data_read(struct btrfs_bio *bbio) if (likely(uptodate)) { loff_t i_size = i_size_read(inode); - pgoff_t end_index = i_size >> folio_shift(folio); /* * Zero out the remaining part if this range straddles @@ -564,9 +561,11 @@ static void end_bbio_data_read(struct btrfs_bio *bbio) * Here we should only zero the range inside the folio, * not touch anything else. * - * NOTE: i_size is exclusive while end is inclusive. + * NOTE: i_size is exclusive while end is inclusive and + * folio_contains() takes PAGE_SIZE units. */ - if (folio_index(folio) == end_index && i_size <= end) { + if (folio_contains(folio, i_size >> PAGE_SHIFT) && + i_size <= end) { u32 zero_start = max(offset_in_folio(folio, i_size), offset_in_folio(folio, start)); u32 zero_len = offset_in_folio(folio, end) + 1 - @@ -960,7 +959,7 @@ static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached, return ret; } - if (folio->index == last_byte >> folio_shift(folio)) { + if (folio_contains(folio, last_byte >> PAGE_SHIFT)) { size_t zero_offset = offset_in_folio(folio, last_byte); if (zero_offset) { @@ -1105,15 +1104,59 @@ int btrfs_read_folio(struct file *file, struct folio *folio) return ret; } +static void set_delalloc_bitmap(struct folio *folio, unsigned long *delalloc_bitmap, + u64 start, u32 len) +{ + struct btrfs_fs_info *fs_info = folio_to_fs_info(folio); + const u64 folio_start = folio_pos(folio); + unsigned int start_bit; + unsigned int nbits; + + ASSERT(start >= folio_start && start + len <= folio_start + PAGE_SIZE); + start_bit = (start - folio_start) >> fs_info->sectorsize_bits; + nbits = len >> fs_info->sectorsize_bits; + ASSERT(bitmap_test_range_all_zero(delalloc_bitmap, start_bit, nbits)); + bitmap_set(delalloc_bitmap, start_bit, nbits); +} + +static bool find_next_delalloc_bitmap(struct folio *folio, + unsigned long *delalloc_bitmap, u64 start, + u64 *found_start, u32 *found_len) +{ + struct btrfs_fs_info *fs_info = folio_to_fs_info(folio); + const u64 folio_start = folio_pos(folio); + const unsigned int bitmap_size = fs_info->sectors_per_page; + unsigned int start_bit; + unsigned int first_zero; + unsigned int first_set; + + ASSERT(start >= folio_start && start < folio_start + PAGE_SIZE); + + start_bit = (start - folio_start) >> fs_info->sectorsize_bits; + first_set = find_next_bit(delalloc_bitmap, bitmap_size, start_bit); + if (first_set >= bitmap_size) + return false; + + *found_start = folio_start + (first_set << fs_info->sectorsize_bits); + first_zero = find_next_zero_bit(delalloc_bitmap, bitmap_size, first_set); + *found_len = (first_zero - first_set) << fs_info->sectorsize_bits; + return true; +} + /* - * helper for extent_writepage(), doing all of the delayed allocation setup. + * Do all of the delayed allocation setup. + * + * Return >0 if all the dirty blocks are submitted async (compression) or inlined. + * The @folio should no longer be touched (treat it as already unlocked). * - * This returns 1 if btrfs_run_delalloc_range function did all the work required - * to write the page (copy into inline extent). In this case the IO has - * been started and the page is already unlocked. + * Return 0 if there is still dirty block that needs to be submitted through + * extent_writepage_io(). + * bio_ctrl->submit_bitmap will indicate which blocks of the folio should be + * submitted, and @folio is still kept locked. * - * This returns 0 if all went well (page still locked) - * This returns < 0 if there were errors (page still locked) + * Return <0 if there is any error hit. + * Any allocated ordered extent range covering this folio will be marked + * finished (IOERR), and @folio is still kept locked. */ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode, struct folio *folio, @@ -1124,16 +1167,28 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode, const bool is_subpage = btrfs_is_subpage(fs_info, folio->mapping); const u64 page_start = folio_pos(folio); const u64 page_end = page_start + folio_size(folio) - 1; + unsigned long delalloc_bitmap = 0; /* * Save the last found delalloc end. As the delalloc end can go beyond * page boundary, thus we cannot rely on subpage bitmap to locate the * last delalloc end. */ u64 last_delalloc_end = 0; + /* + * The range end (exclusive) of the last successfully finished delalloc + * range. + * Any range covered by ordered extent must either be manually marked + * finished (error handling), or has IO submitted (and finish the + * ordered extent normally). + * + * This records the end of ordered extent cleanup if we hit an error. + */ + u64 last_finished_delalloc_end = page_start; u64 delalloc_start = page_start; u64 delalloc_end = page_end; u64 delalloc_to_write = 0; int ret = 0; + int bit; /* Save the dirty bitmap as our submission bitmap will be a subset of it. */ if (btrfs_is_subpage(fs_info, inode->vfs_inode.i_mapping)) { @@ -1143,6 +1198,12 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode, bio_ctrl->submit_bitmap = 1; } + for_each_set_bit(bit, &bio_ctrl->submit_bitmap, fs_info->sectors_per_page) { + u64 start = page_start + (bit << fs_info->sectorsize_bits); + + btrfs_folio_set_lock(fs_info, folio, start, fs_info->sectorsize); + } + /* Lock all (subpage) delalloc ranges inside the folio first. */ while (delalloc_start < page_end) { delalloc_end = page_end; @@ -1151,9 +1212,8 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode, delalloc_start = delalloc_end + 1; continue; } - btrfs_folio_set_writer_lock(fs_info, folio, delalloc_start, - min(delalloc_end, page_end) + 1 - - delalloc_start); + set_delalloc_bitmap(folio, &delalloc_bitmap, delalloc_start, + min(delalloc_end, page_end) + 1 - delalloc_start); last_delalloc_end = delalloc_end; delalloc_start = delalloc_end + 1; } @@ -1178,7 +1238,7 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode, found_len = last_delalloc_end + 1 - found_start; found = true; } else { - found = btrfs_subpage_find_writer_locked(fs_info, folio, + found = find_next_delalloc_bitmap(folio, &delalloc_bitmap, delalloc_start, &found_start, &found_len); } if (!found) @@ -1192,11 +1252,19 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode, found_len = last_delalloc_end + 1 - found_start; if (ret >= 0) { + /* + * Some delalloc range may be created by previous folios. + * Thus we still need to clean up this range during error + * handling. + */ + last_finished_delalloc_end = found_start; /* No errors hit so far, run the current delalloc range. */ ret = btrfs_run_delalloc_range(inode, folio, found_start, found_start + found_len - 1, wbc); + if (ret >= 0) + last_finished_delalloc_end = found_start + found_len; } else { /* * We've hit an error during previous delalloc range, @@ -1231,8 +1299,22 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode, delalloc_start = found_start + found_len; } - if (ret < 0) + /* + * It's possible we had some ordered extents created before we hit + * an error, cleanup non-async successfully created delalloc ranges. + */ + if (unlikely(ret < 0)) { + unsigned int bitmap_size = min( + (last_finished_delalloc_end - page_start) >> + fs_info->sectorsize_bits, + fs_info->sectors_per_page); + + for_each_set_bit(bit, &bio_ctrl->submit_bitmap, bitmap_size) + btrfs_mark_ordered_io_finished(inode, folio, + page_start + (bit << fs_info->sectorsize_bits), + fs_info->sectorsize, false); return ret; + } out: if (last_delalloc_end) delalloc_end = last_delalloc_end; @@ -1348,6 +1430,7 @@ static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode, struct btrfs_fs_info *fs_info = inode->root->fs_info; unsigned long range_bitmap = 0; bool submitted_io = false; + bool error = false; const u64 folio_start = folio_pos(folio); u64 cur; int bit; @@ -1390,13 +1473,26 @@ static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode, break; } ret = submit_one_sector(inode, folio, cur, bio_ctrl, i_size); - if (ret < 0) - goto out; + if (unlikely(ret < 0)) { + /* + * bio_ctrl may contain a bio crossing several folios. + * Submit it immediately so that the bio has a chance + * to finish normally, other than marked as error. + */ + submit_one_bio(bio_ctrl); + /* + * Failed to grab the extent map which should be very rare. + * Since there is no bio submitted to finish the ordered + * extent, we have to manually finish this sector. + */ + btrfs_mark_ordered_io_finished(inode, folio, cur, + fs_info->sectorsize, false); + error = true; + continue; + } submitted_io = true; } - btrfs_folio_assert_not_dirty(fs_info, folio, start, len); -out: /* * If we didn't submitted any sector (>= i_size), folio dirty get * cleared but PAGECACHE_TAG_DIRTY is not cleared (only cleared @@ -1404,8 +1500,11 @@ static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode, * * Here we set writeback and clear for the range. If the full folio * is no longer dirty then we clear the PAGECACHE_TAG_DIRTY tag. + * + * If we hit any error, the corresponding sector will still be dirty + * thus no need to clear PAGECACHE_TAG_DIRTY. */ - if (!submitted_io) { + if (!submitted_io && !error) { btrfs_folio_set_writeback(fs_info, folio, start, len); btrfs_folio_clear_writeback(fs_info, folio, start, len); } @@ -1423,15 +1522,14 @@ static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode, */ static int extent_writepage(struct folio *folio, struct btrfs_bio_ctrl *bio_ctrl) { - struct inode *inode = folio->mapping->host; - struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); - const u64 page_start = folio_pos(folio); + struct btrfs_inode *inode = BTRFS_I(folio->mapping->host); + struct btrfs_fs_info *fs_info = inode->root->fs_info; int ret; size_t pg_offset; - loff_t i_size = i_size_read(inode); + loff_t i_size = i_size_read(&inode->vfs_inode); unsigned long end_index = i_size >> PAGE_SHIFT; - trace_extent_writepage(folio, inode, bio_ctrl->wbc); + trace_extent_writepage(folio, &inode->vfs_inode, bio_ctrl->wbc); WARN_ON(!folio_test_locked(folio)); @@ -1455,13 +1553,13 @@ static int extent_writepage(struct folio *folio, struct btrfs_bio_ctrl *bio_ctrl if (ret < 0) goto done; - ret = writepage_delalloc(BTRFS_I(inode), folio, bio_ctrl); + ret = writepage_delalloc(inode, folio, bio_ctrl); if (ret == 1) return 0; if (ret) goto done; - ret = extent_writepage_io(BTRFS_I(inode), folio, folio_pos(folio), + ret = extent_writepage_io(inode, folio, folio_pos(folio), PAGE_SIZE, bio_ctrl, i_size); if (ret == 1) return 0; @@ -1469,17 +1567,13 @@ static int extent_writepage(struct folio *folio, struct btrfs_bio_ctrl *bio_ctrl bio_ctrl->wbc->nr_to_write--; done: - if (ret) { - btrfs_mark_ordered_io_finished(BTRFS_I(inode), folio, - page_start, PAGE_SIZE, !ret); + if (ret < 0) mapping_set_error(folio->mapping, ret); - } - /* * Only unlock ranges that are submitted. As there can be some async * submitted ranges inside the folio. */ - btrfs_folio_end_writer_lock_bitmap(fs_info, folio, bio_ctrl->submit_bitmap); + btrfs_folio_end_lock_bitmap(fs_info, folio, bio_ctrl->submit_bitmap); ASSERT(ret <= 0); return ret; } @@ -2231,12 +2325,9 @@ void extent_write_locked_range(struct inode *inode, const struct folio *locked_f if (ret == 1) goto next_page; - if (ret) { - btrfs_mark_ordered_io_finished(BTRFS_I(inode), folio, - cur, cur_len, !ret); + if (ret) mapping_set_error(mapping, ret); - } - btrfs_folio_end_writer_lock(fs_info, folio, cur, cur_len); + btrfs_folio_end_lock(fs_info, folio, cur, cur_len); if (ret < 0) found_error = true; next_page: @@ -2463,12 +2554,6 @@ static bool folio_range_has_eb(struct btrfs_fs_info *fs_info, struct folio *foli subpage = folio_get_private(folio); if (atomic_read(&subpage->eb_refs)) return true; - /* - * Even there is no eb refs here, we may still have - * end_folio_read() call relying on page::private. - */ - if (atomic_read(&subpage->readers)) - return true; } return false; } diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 848cb2c3d9dde..78c4a3765002e 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1200,7 +1200,7 @@ ssize_t btrfs_buffered_write(struct kiocb *iocb, struct iov_iter *i) ssize_t ret; bool only_release_metadata = false; bool force_page_uptodate = false; - loff_t old_isize = i_size_read(inode); + loff_t old_isize; unsigned int ilock_flags = 0; const bool nowait = (iocb->ki_flags & IOCB_NOWAIT); unsigned int bdp_flags = (nowait ? BDP_ASYNC : 0); @@ -1212,6 +1212,13 @@ ssize_t btrfs_buffered_write(struct kiocb *iocb, struct iov_iter *i) if (ret < 0) return ret; + /* + * We can only trust the isize with inode lock held, or it can race with + * other buffered writes and cause incorrect call of + * pagecache_isize_extended() to overwrite existing data. + */ + old_isize = i_size_read(inode); + ret = generic_write_checks(iocb, i); if (ret <= 0) goto out; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index f7e7d864f4144..5b842276573e8 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2419,8 +2419,7 @@ int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct folio *locked_fol out: if (ret < 0) - btrfs_cleanup_ordered_extents(inode, locked_folio, start, - end - start + 1); + btrfs_cleanup_ordered_extents(inode, NULL, start, end - start + 1); return ret; } diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index fa9025c05d4e2..e9f58cdeeb5f3 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -1899,11 +1899,7 @@ int btrfs_qgroup_cleanup_dropped_subvolume(struct btrfs_fs_info *fs_info, u64 su * Commit current transaction to make sure all the rfer/excl numbers * get updated. */ - trans = btrfs_start_transaction(fs_info->quota_root, 0); - if (IS_ERR(trans)) - return PTR_ERR(trans); - - ret = btrfs_commit_transaction(trans); + ret = btrfs_commit_current_transaction(fs_info->quota_root); if (ret < 0) return ret; diff --git a/fs/btrfs/subpage.c b/fs/btrfs/subpage.c index ec7328a6bfd75..88a01d51ab11f 100644 --- a/fs/btrfs/subpage.c +++ b/fs/btrfs/subpage.c @@ -140,12 +140,10 @@ struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info, return ERR_PTR(-ENOMEM); spin_lock_init(&ret->lock); - if (type == BTRFS_SUBPAGE_METADATA) { + if (type == BTRFS_SUBPAGE_METADATA) atomic_set(&ret->eb_refs, 0); - } else { - atomic_set(&ret->readers, 0); - atomic_set(&ret->writers, 0); - } + else + atomic_set(&ret->nr_locked, 0); return ret; } @@ -221,62 +219,6 @@ static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info, __start_bit; \ }) -void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info, - struct folio *folio, u64 start, u32 len) -{ - struct btrfs_subpage *subpage = folio_get_private(folio); - const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len); - const int nbits = len >> fs_info->sectorsize_bits; - unsigned long flags; - - - btrfs_subpage_assert(fs_info, folio, start, len); - - spin_lock_irqsave(&subpage->lock, flags); - /* - * Even though it's just for reading the page, no one should have - * locked the subpage range. - */ - ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits)); - bitmap_set(subpage->bitmaps, start_bit, nbits); - atomic_add(nbits, &subpage->readers); - spin_unlock_irqrestore(&subpage->lock, flags); -} - -void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info, - struct folio *folio, u64 start, u32 len) -{ - struct btrfs_subpage *subpage = folio_get_private(folio); - const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len); - const int nbits = len >> fs_info->sectorsize_bits; - unsigned long flags; - bool is_data; - bool last; - - btrfs_subpage_assert(fs_info, folio, start, len); - is_data = is_data_inode(BTRFS_I(folio->mapping->host)); - - spin_lock_irqsave(&subpage->lock, flags); - - /* The range should have already been locked. */ - ASSERT(bitmap_test_range_all_set(subpage->bitmaps, start_bit, nbits)); - ASSERT(atomic_read(&subpage->readers) >= nbits); - - bitmap_clear(subpage->bitmaps, start_bit, nbits); - last = atomic_sub_and_test(nbits, &subpage->readers); - - /* - * For data we need to unlock the page if the last read has finished. - * - * And please don't replace @last with atomic_sub_and_test() call - * inside if () condition. - * As we want the atomic_sub_and_test() to be always executed. - */ - if (is_data && last) - folio_unlock(folio); - spin_unlock_irqrestore(&subpage->lock, flags); -} - static void btrfs_subpage_clamp_range(struct folio *folio, u64 *start, u32 *len) { u64 orig_start = *start; @@ -295,28 +237,8 @@ static void btrfs_subpage_clamp_range(struct folio *folio, u64 *start, u32 *len) orig_start + orig_len) - *start; } -static void btrfs_subpage_start_writer(const struct btrfs_fs_info *fs_info, - struct folio *folio, u64 start, u32 len) -{ - struct btrfs_subpage *subpage = folio_get_private(folio); - const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len); - const int nbits = (len >> fs_info->sectorsize_bits); - unsigned long flags; - int ret; - - btrfs_subpage_assert(fs_info, folio, start, len); - - spin_lock_irqsave(&subpage->lock, flags); - ASSERT(atomic_read(&subpage->readers) == 0); - ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits)); - bitmap_set(subpage->bitmaps, start_bit, nbits); - ret = atomic_add_return(nbits, &subpage->writers); - ASSERT(ret == nbits); - spin_unlock_irqrestore(&subpage->lock, flags); -} - -static bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_info, - struct folio *folio, u64 start, u32 len) +static bool btrfs_subpage_end_and_test_lock(const struct btrfs_fs_info *fs_info, + struct folio *folio, u64 start, u32 len) { struct btrfs_subpage *subpage = folio_get_private(folio); const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len); @@ -334,9 +256,9 @@ static bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_inf * extent_clear_unlock_delalloc() for compression path. * * This @locked_page is locked by plain lock_page(), thus its - * subpage::writers is 0. Handle them in a special way. + * subpage::locked is 0. Handle them in a special way. */ - if (atomic_read(&subpage->writers) == 0) { + if (atomic_read(&subpage->nr_locked) == 0) { spin_unlock_irqrestore(&subpage->lock, flags); return true; } @@ -345,39 +267,12 @@ static bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_inf clear_bit(bit, subpage->bitmaps); cleared++; } - ASSERT(atomic_read(&subpage->writers) >= cleared); - last = atomic_sub_and_test(cleared, &subpage->writers); + ASSERT(atomic_read(&subpage->nr_locked) >= cleared); + last = atomic_sub_and_test(cleared, &subpage->nr_locked); spin_unlock_irqrestore(&subpage->lock, flags); return last; } -/* - * Lock a folio for delalloc page writeback. - * - * Return -EAGAIN if the page is not properly initialized. - * Return 0 with the page locked, and writer counter updated. - * - * Even with 0 returned, the page still need extra check to make sure - * it's really the correct page, as the caller is using - * filemap_get_folios_contig(), which can race with page invalidating. - */ -int btrfs_folio_start_writer_lock(const struct btrfs_fs_info *fs_info, - struct folio *folio, u64 start, u32 len) -{ - if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio->mapping)) { - folio_lock(folio); - return 0; - } - folio_lock(folio); - if (!folio_test_private(folio) || !folio_get_private(folio)) { - folio_unlock(folio); - return -EAGAIN; - } - btrfs_subpage_clamp_range(folio, &start, &len); - btrfs_subpage_start_writer(fs_info, folio, start, len); - return 0; -} - /* * Handle different locked folios: * @@ -394,8 +289,8 @@ int btrfs_folio_start_writer_lock(const struct btrfs_fs_info *fs_info, * bitmap, reduce the writer lock number, and unlock the page if that's * the last locked range. */ -void btrfs_folio_end_writer_lock(const struct btrfs_fs_info *fs_info, - struct folio *folio, u64 start, u32 len) +void btrfs_folio_end_lock(const struct btrfs_fs_info *fs_info, + struct folio *folio, u64 start, u32 len) { struct btrfs_subpage *subpage = folio_get_private(folio); @@ -408,24 +303,24 @@ void btrfs_folio_end_writer_lock(const struct btrfs_fs_info *fs_info, /* * For subpage case, there are two types of locked page. With or - * without writers number. + * without locked number. * - * Since we own the page lock, no one else could touch subpage::writers + * Since we own the page lock, no one else could touch subpage::locked * and we are safe to do several atomic operations without spinlock. */ - if (atomic_read(&subpage->writers) == 0) { - /* No writers, locked by plain lock_page(). */ + if (atomic_read(&subpage->nr_locked) == 0) { + /* No subpage lock, locked by plain lock_page(). */ folio_unlock(folio); return; } btrfs_subpage_clamp_range(folio, &start, &len); - if (btrfs_subpage_end_and_test_writer(fs_info, folio, start, len)) + if (btrfs_subpage_end_and_test_lock(fs_info, folio, start, len)) folio_unlock(folio); } -void btrfs_folio_end_writer_lock_bitmap(const struct btrfs_fs_info *fs_info, - struct folio *folio, unsigned long bitmap) +void btrfs_folio_end_lock_bitmap(const struct btrfs_fs_info *fs_info, + struct folio *folio, unsigned long bitmap) { struct btrfs_subpage *subpage = folio_get_private(folio); const int start_bit = fs_info->sectors_per_page * btrfs_bitmap_nr_locked; @@ -439,8 +334,8 @@ void btrfs_folio_end_writer_lock_bitmap(const struct btrfs_fs_info *fs_info, return; } - if (atomic_read(&subpage->writers) == 0) { - /* No writers, locked by plain lock_page(). */ + if (atomic_read(&subpage->nr_locked) == 0) { + /* No subpage lock, locked by plain lock_page(). */ folio_unlock(folio); return; } @@ -450,8 +345,8 @@ void btrfs_folio_end_writer_lock_bitmap(const struct btrfs_fs_info *fs_info, if (test_and_clear_bit(bit + start_bit, subpage->bitmaps)) cleared++; } - ASSERT(atomic_read(&subpage->writers) >= cleared); - last = atomic_sub_and_test(cleared, &subpage->writers); + ASSERT(atomic_read(&subpage->nr_locked) >= cleared); + last = atomic_sub_and_test(cleared, &subpage->nr_locked); spin_unlock_irqrestore(&subpage->lock, flags); if (last) folio_unlock(folio); @@ -776,8 +671,8 @@ void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info, * This populates the involved subpage ranges so that subpage helpers can * properly unlock them. */ -void btrfs_folio_set_writer_lock(const struct btrfs_fs_info *fs_info, - struct folio *folio, u64 start, u32 len) +void btrfs_folio_set_lock(const struct btrfs_fs_info *fs_info, + struct folio *folio, u64 start, u32 len) { struct btrfs_subpage *subpage; unsigned long flags; @@ -796,58 +691,11 @@ void btrfs_folio_set_writer_lock(const struct btrfs_fs_info *fs_info, /* Target range should not yet be locked. */ ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits)); bitmap_set(subpage->bitmaps, start_bit, nbits); - ret = atomic_add_return(nbits, &subpage->writers); + ret = atomic_add_return(nbits, &subpage->nr_locked); ASSERT(ret <= fs_info->sectors_per_page); spin_unlock_irqrestore(&subpage->lock, flags); } -/* - * Find any subpage writer locked range inside @folio, starting at file offset - * @search_start. The caller should ensure the folio is locked. - * - * Return true and update @found_start_ret and @found_len_ret to the first - * writer locked range. - * Return false if there is no writer locked range. - */ -bool btrfs_subpage_find_writer_locked(const struct btrfs_fs_info *fs_info, - struct folio *folio, u64 search_start, - u64 *found_start_ret, u32 *found_len_ret) -{ - struct btrfs_subpage *subpage = folio_get_private(folio); - const u32 sectors_per_page = fs_info->sectors_per_page; - const unsigned int len = PAGE_SIZE - offset_in_page(search_start); - const unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, - locked, search_start, len); - const unsigned int locked_bitmap_start = sectors_per_page * btrfs_bitmap_nr_locked; - const unsigned int locked_bitmap_end = locked_bitmap_start + sectors_per_page; - unsigned long flags; - int first_zero; - int first_set; - bool found = false; - - ASSERT(folio_test_locked(folio)); - spin_lock_irqsave(&subpage->lock, flags); - first_set = find_next_bit(subpage->bitmaps, locked_bitmap_end, start_bit); - if (first_set >= locked_bitmap_end) - goto out; - - found = true; - - *found_start_ret = folio_pos(folio) + - ((first_set - locked_bitmap_start) << fs_info->sectorsize_bits); - /* - * Since @first_set is ensured to be smaller than locked_bitmap_end - * here, @found_start_ret should be inside the folio. - */ - ASSERT(*found_start_ret < folio_pos(folio) + PAGE_SIZE); - - first_zero = find_next_zero_bit(subpage->bitmaps, locked_bitmap_end, first_set); - *found_len_ret = (first_zero - first_set) << fs_info->sectorsize_bits; -out: - spin_unlock_irqrestore(&subpage->lock, flags); - return found; -} - #define GET_SUBPAGE_BITMAP(subpage, fs_info, name, dst) \ { \ const int sectors_per_page = fs_info->sectors_per_page; \ diff --git a/fs/btrfs/subpage.h b/fs/btrfs/subpage.h index cdb554e0d215e..44fff1f4eac48 100644 --- a/fs/btrfs/subpage.h +++ b/fs/btrfs/subpage.h @@ -45,14 +45,6 @@ enum { struct btrfs_subpage { /* Common members for both data and metadata pages */ spinlock_t lock; - /* - * Both data and metadata needs to track how many readers are for the - * page. - * Data relies on @readers to unlock the page when last reader finished. - * While metadata doesn't need page unlock, it needs to prevent - * page::private get cleared before the last end_page_read(). - */ - atomic_t readers; union { /* * Structures only used by metadata @@ -62,8 +54,12 @@ struct btrfs_subpage { */ atomic_t eb_refs; - /* Structures only used by data */ - atomic_t writers; + /* + * Structures only used by data, + * + * How many sectors inside the page is locked. + */ + atomic_t nr_locked; }; unsigned long bitmaps[]; }; @@ -95,23 +91,12 @@ void btrfs_free_subpage(struct btrfs_subpage *subpage); void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio); void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio); -void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info, - struct folio *folio, u64 start, u32 len); -void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info, - struct folio *folio, u64 start, u32 len); - -int btrfs_folio_start_writer_lock(const struct btrfs_fs_info *fs_info, - struct folio *folio, u64 start, u32 len); -void btrfs_folio_end_writer_lock(const struct btrfs_fs_info *fs_info, - struct folio *folio, u64 start, u32 len); -void btrfs_folio_set_writer_lock(const struct btrfs_fs_info *fs_info, - struct folio *folio, u64 start, u32 len); -void btrfs_folio_end_writer_lock_bitmap(const struct btrfs_fs_info *fs_info, - struct folio *folio, unsigned long bitmap); -bool btrfs_subpage_find_writer_locked(const struct btrfs_fs_info *fs_info, - struct folio *folio, u64 search_start, - u64 *found_start_ret, u32 *found_len_ret); - +void btrfs_folio_end_lock(const struct btrfs_fs_info *fs_info, + struct folio *folio, u64 start, u32 len); +void btrfs_folio_set_lock(const struct btrfs_fs_info *fs_info, + struct folio *folio, u64 start, u32 len); +void btrfs_folio_end_lock_bitmap(const struct btrfs_fs_info *fs_info, + struct folio *folio, unsigned long bitmap); /* * Template for subpage related operations. * diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 73343503ea60e..08ccf5d5e1440 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -1140,8 +1140,7 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry) subvol_name = btrfs_get_subvol_name_from_objectid(info, btrfs_root_id(BTRFS_I(d_inode(dentry))->root)); if (!IS_ERR(subvol_name)) { - seq_puts(seq, ",subvol="); - seq_escape(seq, subvol_name, " \t\n\\"); + seq_show_option(seq, "subvol", subvol_name); kfree(subvol_name); } return 0; diff --git a/fs/btrfs/tests/extent-map-tests.c b/fs/btrfs/tests/extent-map-tests.c index 56e61ac1cc64c..609bb6c9c0873 100644 --- a/fs/btrfs/tests/extent-map-tests.c +++ b/fs/btrfs/tests/extent-map-tests.c @@ -1045,6 +1045,7 @@ static int test_rmap_block(struct btrfs_fs_info *fs_info, ret = btrfs_add_chunk_map(fs_info, map); if (ret) { test_err("error adding chunk map to mapping tree"); + btrfs_free_chunk_map(map); goto out_free; } diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 82dd9ee89fbc5..24806e19c7c41 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -161,7 +161,13 @@ void btrfs_put_transaction(struct btrfs_transaction *transaction) cache = list_first_entry(&transaction->deleted_bgs, struct btrfs_block_group, bg_list); + /* + * Not strictly necessary to lock, as no other task will be using a + * block_group on the deleted_bgs list during a transaction abort. + */ + spin_lock(&transaction->fs_info->unused_bgs_lock); list_del_init(&cache->bg_list); + spin_unlock(&transaction->fs_info->unused_bgs_lock); btrfs_unfreeze_block_group(cache); btrfs_put_block_group(cache); } @@ -2099,7 +2105,13 @@ static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans) list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) { btrfs_dec_delayed_refs_rsv_bg_inserts(fs_info); + /* + * Not strictly necessary to lock, as no other task will be using a + * block_group on the new_bgs list during a transaction abort. + */ + spin_lock(&fs_info->unused_bgs_lock); list_del_init(&block_group->bg_list); + spin_unlock(&fs_info->unused_bgs_lock); } } diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 395b8b880ce78..587ac07cd1941 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -7094,6 +7094,7 @@ static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf, btrfs_err(fs_info, "failed to add chunk map, start=%llu len=%llu: %d", map->start, map->chunk_len, ret); + btrfs_free_chunk_map(map); } return ret; diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c index 69d03feea4e0e..2bb7e32ad9458 100644 --- a/fs/btrfs/zoned.c +++ b/fs/btrfs/zoned.c @@ -2107,6 +2107,9 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group) physical = map->stripes[i].physical; zinfo = device->zone_info; + if (!device->bdev) + continue; + if (zinfo->max_active_zones == 0) continue; @@ -2268,6 +2271,9 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ struct btrfs_zoned_device_info *zinfo = device->zone_info; unsigned int nofs_flags; + if (!device->bdev) + continue; + if (zinfo->max_active_zones == 0) continue; diff --git a/fs/coredump.c b/fs/coredump.c index 45737b43dda5c..2b8c36c9660c5 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -63,6 +63,7 @@ static void free_vma_snapshot(struct coredump_params *cprm); static int core_uses_pid; static unsigned int core_pipe_limit; +static unsigned int core_sort_vma; static char core_pattern[CORENAME_MAX_SIZE] = "core"; static int core_name_size = CORENAME_MAX_SIZE; unsigned int core_file_note_size_limit = CORE_FILE_NOTE_SIZE_DEFAULT; @@ -1025,6 +1026,15 @@ static struct ctl_table coredump_sysctls[] = { .extra1 = (unsigned int *)&core_file_note_size_min, .extra2 = (unsigned int *)&core_file_note_size_max, }, + { + .procname = "core_sort_vma", + .data = &core_sort_vma, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_douintvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, }; static int __init init_fs_coredump_sysctls(void) @@ -1255,8 +1265,9 @@ static bool dump_vma_snapshot(struct coredump_params *cprm) cprm->vma_data_size += m->dump_size; } - sort(cprm->vma_meta, cprm->vma_count, sizeof(*cprm->vma_meta), - cmp_vma_size, NULL); + if (core_sort_vma) + sort(cprm->vma_meta, cprm->vma_count, sizeof(*cprm->vma_meta), + cmp_vma_size, NULL); return true; } diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index 0c01e4423ee2a..0ad496ceb638d 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -741,6 +741,7 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len, read_lock_bh(&ls->ls_rsbtbl_lock); if (!rsb_flag(r, RSB_HASHED)) { read_unlock_bh(&ls->ls_rsbtbl_lock); + error = -EBADR; goto do_new; } @@ -784,6 +785,7 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len, } } else { write_unlock_bh(&ls->ls_rsbtbl_lock); + error = -EBADR; goto do_new; } diff --git a/fs/erofs/fileio.c b/fs/erofs/fileio.c index 33f8539dda4ae..17aed5f6c5490 100644 --- a/fs/erofs/fileio.c +++ b/fs/erofs/fileio.c @@ -32,6 +32,8 @@ static void erofs_fileio_ki_complete(struct kiocb *iocb, long ret) ret = 0; } if (rq->bio.bi_end_io) { + if (ret < 0 && !rq->bio.bi_status) + rq->bio.bi_status = errno_to_blk_status(ret); rq->bio.bi_end_io(&rq->bio); } else { bio_for_each_folio_all(fi, &rq->bio) { diff --git a/fs/exec.c b/fs/exec.c index 67513bd606c24..d607943729638 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1246,13 +1246,12 @@ int begin_new_exec(struct linux_binprm * bprm) */ bprm->point_of_no_return = true; - /* - * Make this the only thread in the thread group. - */ + /* Make this the only thread in the thread group */ retval = de_thread(me); if (retval) goto out; - + /* see the comment in check_unsafe_exec() */ + current->fs->in_exec = 0; /* * Cancel any io_uring activity across execve */ @@ -1514,6 +1513,8 @@ static void free_bprm(struct linux_binprm *bprm) } free_arg_pages(bprm); if (bprm->cred) { + /* in case exec fails before de_thread() succeeds */ + current->fs->in_exec = 0; mutex_unlock(¤t->signal->cred_guard_mutex); abort_creds(bprm->cred); } @@ -1620,6 +1621,10 @@ static void check_unsafe_exec(struct linux_binprm *bprm) * suid exec because the differently privileged task * will be able to manipulate the current directory, etc. * It would be nice to force an unshare instead... + * + * Otherwise we set fs->in_exec = 1 to deny clone(CLONE_FS) + * from another sub-thread until de_thread() succeeds, this + * state is protected by cred_guard_mutex we hold. */ n_fs = 1; spin_lock(&p->fs->lock); @@ -1878,7 +1883,6 @@ static int bprm_execve(struct linux_binprm *bprm) sched_mm_cid_after_execve(current); /* execve succeeded */ - current->fs->in_exec = 0; current->in_execve = 0; rseq_execve(current); user_events_execve(current); @@ -1897,7 +1901,6 @@ static int bprm_execve(struct linux_binprm *bprm) force_fatal_sig(SIGSEGV); sched_mm_cid_after_execve(current); - current->fs->in_exec = 0; current->in_execve = 0; return retval; diff --git a/fs/exfat/balloc.c b/fs/exfat/balloc.c index ce9be95c9172f..9ff825f1502d5 100644 --- a/fs/exfat/balloc.c +++ b/fs/exfat/balloc.c @@ -141,7 +141,7 @@ int exfat_set_bitmap(struct inode *inode, unsigned int clu, bool sync) return 0; } -void exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync) +int exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync) { int i, b; unsigned int ent_idx; @@ -150,13 +150,17 @@ void exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync) struct exfat_mount_options *opts = &sbi->options; if (!is_valid_cluster(sbi, clu)) - return; + return -EIO; ent_idx = CLUSTER_TO_BITMAP_ENT(clu); i = BITMAP_OFFSET_SECTOR_INDEX(sb, ent_idx); b = BITMAP_OFFSET_BIT_IN_SECTOR(sb, ent_idx); + if (!test_bit_le(b, sbi->vol_amap[i]->b_data)) + return -EIO; + clear_bit_le(b, sbi->vol_amap[i]->b_data); + exfat_update_bh(sbi->vol_amap[i], sync); if (opts->discard) { @@ -171,6 +175,8 @@ void exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync) opts->discard = 0; } } + + return 0; } /* diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h index 3cdc1de362a94..d2ba8e2d0c398 100644 --- a/fs/exfat/exfat_fs.h +++ b/fs/exfat/exfat_fs.h @@ -452,7 +452,7 @@ int exfat_count_num_clusters(struct super_block *sb, int exfat_load_bitmap(struct super_block *sb); void exfat_free_bitmap(struct exfat_sb_info *sbi); int exfat_set_bitmap(struct inode *inode, unsigned int clu, bool sync); -void exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync); +int exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync); unsigned int exfat_find_free_bitmap(struct super_block *sb, unsigned int clu); int exfat_count_used_clusters(struct super_block *sb, unsigned int *ret_count); int exfat_trim_fs(struct inode *inode, struct fstrim_range *range); diff --git a/fs/exfat/fatent.c b/fs/exfat/fatent.c index 9e5492ac409b0..8df5ad6ebb10c 100644 --- a/fs/exfat/fatent.c +++ b/fs/exfat/fatent.c @@ -175,6 +175,7 @@ static int __exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain BITMAP_OFFSET_SECTOR_INDEX(sb, CLUSTER_TO_BITMAP_ENT(clu)); if (p_chain->flags == ALLOC_NO_FAT_CHAIN) { + int err; unsigned int last_cluster = p_chain->dir + p_chain->size - 1; do { bool sync = false; @@ -189,7 +190,9 @@ static int __exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain cur_cmap_i = next_cmap_i; } - exfat_clear_bitmap(inode, clu, (sync && IS_DIRSYNC(inode))); + err = exfat_clear_bitmap(inode, clu, (sync && IS_DIRSYNC(inode))); + if (err) + break; clu++; num_clusters++; } while (num_clusters < p_chain->size); @@ -210,12 +213,13 @@ static int __exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain cur_cmap_i = next_cmap_i; } - exfat_clear_bitmap(inode, clu, (sync && IS_DIRSYNC(inode))); + if (exfat_clear_bitmap(inode, clu, (sync && IS_DIRSYNC(inode)))) + break; clu = n_clu; num_clusters++; if (err) - goto dec_used_clus; + break; if (num_clusters >= sbi->num_clusters - EXFAT_FIRST_CLUSTER) { /* @@ -229,7 +233,6 @@ static int __exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain } while (clu != EXFAT_EOF_CLUSTER); } -dec_used_clus: sbi->used_clusters -= num_clusters; return 0; } @@ -262,7 +265,7 @@ int exfat_find_last_cluster(struct super_block *sb, struct exfat_chain *p_chain, clu = next; if (exfat_ent_get(sb, clu, &next)) return -EIO; - } while (next != EXFAT_EOF_CLUSTER); + } while (next != EXFAT_EOF_CLUSTER && count <= p_chain->size); if (p_chain->size != count) { exfat_fs_error(sb, diff --git a/fs/exfat/file.c b/fs/exfat/file.c index 05b51e7217838..841a5b18e3dfd 100644 --- a/fs/exfat/file.c +++ b/fs/exfat/file.c @@ -582,12 +582,15 @@ static ssize_t exfat_file_write_iter(struct kiocb *iocb, struct iov_iter *iter) loff_t pos = iocb->ki_pos; loff_t valid_size; + if (unlikely(exfat_forced_shutdown(inode->i_sb))) + return -EIO; + inode_lock(inode); valid_size = ei->valid_size; ret = generic_write_checks(iocb, iter); - if (ret < 0) + if (ret <= 0) goto unlock; if (iocb->ki_flags & IOCB_DIRECT) { @@ -635,6 +638,16 @@ static ssize_t exfat_file_write_iter(struct kiocb *iocb, struct iov_iter *iter) return ret; } +static ssize_t exfat_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) +{ + struct inode *inode = file_inode(iocb->ki_filp); + + if (unlikely(exfat_forced_shutdown(inode->i_sb))) + return -EIO; + + return generic_file_read_iter(iocb, iter); +} + static vm_fault_t exfat_page_mkwrite(struct vm_fault *vmf) { int err; @@ -672,14 +685,26 @@ static const struct vm_operations_struct exfat_file_vm_ops = { static int exfat_file_mmap(struct file *file, struct vm_area_struct *vma) { + if (unlikely(exfat_forced_shutdown(file_inode(file)->i_sb))) + return -EIO; + file_accessed(file); vma->vm_ops = &exfat_file_vm_ops; return 0; } +static ssize_t exfat_splice_read(struct file *in, loff_t *ppos, + struct pipe_inode_info *pipe, size_t len, unsigned int flags) +{ + if (unlikely(exfat_forced_shutdown(file_inode(in)->i_sb))) + return -EIO; + + return filemap_splice_read(in, ppos, pipe, len, flags); +} + const struct file_operations exfat_file_operations = { .llseek = generic_file_llseek, - .read_iter = generic_file_read_iter, + .read_iter = exfat_file_read_iter, .write_iter = exfat_file_write_iter, .unlocked_ioctl = exfat_ioctl, #ifdef CONFIG_COMPAT @@ -687,7 +712,7 @@ const struct file_operations exfat_file_operations = { #endif .mmap = exfat_file_mmap, .fsync = exfat_file_fsync, - .splice_read = filemap_splice_read, + .splice_read = exfat_splice_read, .splice_write = iter_file_splice_write, }; diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c index d724de8f57bf9..3801516ac5071 100644 --- a/fs/exfat/inode.c +++ b/fs/exfat/inode.c @@ -344,7 +344,8 @@ static int exfat_get_block(struct inode *inode, sector_t iblock, * The block has been partially written, * zero the unwritten part and map the block. */ - loff_t size, off, pos; + loff_t size, pos; + void *addr; max_blocks = 1; @@ -355,17 +356,43 @@ static int exfat_get_block(struct inode *inode, sector_t iblock, if (!bh_result->b_folio) goto done; + /* + * No buffer_head is allocated. + * (1) bmap: It's enough to fill bh_result without I/O. + * (2) read: The unwritten part should be filled with 0 + * If a folio does not have any buffers, + * let's returns -EAGAIN to fallback to + * per-bh IO like block_read_full_folio(). + */ + if (!folio_buffers(bh_result->b_folio)) { + err = -EAGAIN; + goto done; + } + pos = EXFAT_BLK_TO_B(iblock, sb); size = ei->valid_size - pos; - off = pos & (PAGE_SIZE - 1); + addr = folio_address(bh_result->b_folio) + + offset_in_folio(bh_result->b_folio, pos); + + /* Check if bh->b_data points to proper addr in folio */ + if (bh_result->b_data != addr) { + exfat_fs_error_ratelimit(sb, + "b_data(%p) != folio_addr(%p)", + bh_result->b_data, addr); + err = -EINVAL; + goto done; + } - folio_set_bh(bh_result, bh_result->b_folio, off); + /* Read a block */ err = bh_read(bh_result, 0); if (err < 0) - goto unlock_ret; + goto done; + + /* Zero unwritten part of a block */ + memset(bh_result->b_data + size, 0, + bh_result->b_size - size); - folio_zero_segment(bh_result->b_folio, off + size, - off + sb->s_blocksize); + err = 0; } else { /* * The range has not been written, clear the mapped flag @@ -376,6 +403,8 @@ static int exfat_get_block(struct inode *inode, sector_t iblock, } done: bh_result->b_size = EXFAT_BLK_TO_B(max_blocks, sb); + if (err < 0) + clear_buffer_mapped(bh_result); unlock_ret: mutex_unlock(&sbi->s_lock); return err; diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c index 337197ece5995..7b3951951f8af 100644 --- a/fs/exfat/namei.c +++ b/fs/exfat/namei.c @@ -237,7 +237,7 @@ static int exfat_search_empty_slot(struct super_block *sb, dentry = 0; } - while (dentry + num_entries < total_entries && + while (dentry + num_entries <= total_entries && clu.dir != EXFAT_EOF_CLUSTER) { i = dentry & (dentries_per_clu - 1); @@ -639,6 +639,11 @@ static int exfat_find(struct inode *dir, struct qstr *qname, info->valid_size = le64_to_cpu(ep2->dentry.stream.valid_size); info->size = le64_to_cpu(ep2->dentry.stream.size); + if (unlikely(EXFAT_B_TO_CLU_ROUND_UP(info->size, sbi) > sbi->used_clusters)) { + exfat_fs_error(sb, "data size is invalid(%lld)", info->size); + return -EIO; + } + info->start_clu = le32_to_cpu(ep2->dentry.stream.start_clu); if (!is_valid_cluster(sbi, info->start_clu) && info->size) { exfat_warn(sb, "start_clu is invalid cluster(0x%x)", diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c index ef6a3c8f3a9a0..b278b5703c197 100644 --- a/fs/ext4/dir.c +++ b/fs/ext4/dir.c @@ -104,6 +104,9 @@ int __ext4_check_dir_entry(const char *function, unsigned int line, else if (unlikely(le32_to_cpu(de->inode) > le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count))) error_msg = "inode out of bounds"; + else if (unlikely(next_offset == size && de->name_len == 1 && + de->name[0] == '.')) + error_msg = "'.' directory cannot be the last in data block"; else return 0; diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 67a5b937f5a92..ffa6aa55a1a7a 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -4681,22 +4681,43 @@ static inline void ext4_inode_set_iversion_queried(struct inode *inode, u64 val) inode_set_iversion_queried(inode, val); } -static const char *check_igot_inode(struct inode *inode, ext4_iget_flags flags) - +static int check_igot_inode(struct inode *inode, ext4_iget_flags flags, + const char *function, unsigned int line) { + const char *err_str; + if (flags & EXT4_IGET_EA_INODE) { - if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) - return "missing EA_INODE flag"; + if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) { + err_str = "missing EA_INODE flag"; + goto error; + } if (ext4_test_inode_state(inode, EXT4_STATE_XATTR) || - EXT4_I(inode)->i_file_acl) - return "ea_inode with extended attributes"; + EXT4_I(inode)->i_file_acl) { + err_str = "ea_inode with extended attributes"; + goto error; + } } else { - if ((EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) - return "unexpected EA_INODE flag"; + if ((EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) { + /* + * open_by_handle_at() could provide an old inode number + * that has since been reused for an ea_inode; this does + * not indicate filesystem corruption + */ + if (flags & EXT4_IGET_HANDLE) + return -ESTALE; + err_str = "unexpected EA_INODE flag"; + goto error; + } + } + if (is_bad_inode(inode) && !(flags & EXT4_IGET_BAD)) { + err_str = "unexpected bad inode w/o EXT4_IGET_BAD"; + goto error; } - if (is_bad_inode(inode) && !(flags & EXT4_IGET_BAD)) - return "unexpected bad inode w/o EXT4_IGET_BAD"; - return NULL; + return 0; + +error: + ext4_error_inode(inode, function, line, 0, err_str); + return -EFSCORRUPTED; } struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, @@ -4708,7 +4729,6 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, struct ext4_inode_info *ei; struct ext4_super_block *es = EXT4_SB(sb)->s_es; struct inode *inode; - const char *err_str; journal_t *journal = EXT4_SB(sb)->s_journal; long ret; loff_t size; @@ -4737,10 +4757,10 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) { - if ((err_str = check_igot_inode(inode, flags)) != NULL) { - ext4_error_inode(inode, function, line, 0, err_str); + ret = check_igot_inode(inode, flags, function, line); + if (ret) { iput(inode); - return ERR_PTR(-EFSCORRUPTED); + return ERR_PTR(ret); } return inode; } @@ -5012,13 +5032,21 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, ret = -EFSCORRUPTED; goto bad_inode; } - if ((err_str = check_igot_inode(inode, flags)) != NULL) { - ext4_error_inode(inode, function, line, 0, err_str); - ret = -EFSCORRUPTED; - goto bad_inode; + ret = check_igot_inode(inode, flags, function, line); + /* + * -ESTALE here means there is nothing inherently wrong with the inode, + * it's just not an inode we can return for an fhandle lookup. + */ + if (ret == -ESTALE) { + brelse(iloc.bh); + unlock_new_inode(inode); + iput(inode); + return ERR_PTR(-ESTALE); } - + if (ret) + goto bad_inode; brelse(iloc.bh); + unlock_new_inode(inode); return inode; diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 790db7eac6c2a..286f8fcb74cc9 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -1995,7 +1995,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, * split it in half by count; each resulting block will have at least * half the space free. */ - if (i > 0) + if (i >= 0) split = count - move; else split = count/2; diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 940ac1a49b729..4291ab3c20be6 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -6781,22 +6781,29 @@ static int ext4_statfs_project(struct super_block *sb, dquot->dq_dqb.dqb_bhardlimit); limit >>= sb->s_blocksize_bits; - if (limit && buf->f_blocks > limit) { + if (limit) { + uint64_t remaining = 0; + curblock = (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits; - buf->f_blocks = limit; - buf->f_bfree = buf->f_bavail = - (buf->f_blocks > curblock) ? - (buf->f_blocks - curblock) : 0; + if (limit > curblock) + remaining = limit - curblock; + + buf->f_blocks = min(buf->f_blocks, limit); + buf->f_bfree = min(buf->f_bfree, remaining); + buf->f_bavail = min(buf->f_bavail, remaining); } limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit, dquot->dq_dqb.dqb_ihardlimit); - if (limit && buf->f_files > limit) { - buf->f_files = limit; - buf->f_ffree = - (buf->f_files > dquot->dq_dqb.dqb_curinodes) ? - (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0; + if (limit) { + uint64_t remaining = 0; + + if (limit > dquot->dq_dqb.dqb_curinodes) + remaining = limit - dquot->dq_dqb.dqb_curinodes; + + buf->f_files = min(buf->f_files, limit); + buf->f_ffree = min(buf->f_ffree, remaining); } spin_unlock(&dquot->dq_dqb_lock); @@ -6899,12 +6906,25 @@ static int ext4_release_dquot(struct dquot *dquot) { int ret, err; handle_t *handle; + bool freeze_protected = false; + + /* + * Trying to sb_start_intwrite() in a running transaction + * can result in a deadlock. Further, running transactions + * are already protected from freezing. + */ + if (!ext4_journal_current_handle()) { + sb_start_intwrite(dquot->dq_sb); + freeze_protected = true; + } handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA, EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb)); if (IS_ERR(handle)) { /* Release dquot anyway to avoid endless cycle in dqput() */ dquot_release(dquot); + if (freeze_protected) + sb_end_intwrite(dquot->dq_sb); return PTR_ERR(handle); } ret = dquot_release(dquot); @@ -6915,6 +6935,10 @@ static int ext4_release_dquot(struct dquot *dquot) err = ext4_journal_stop(handle); if (!ret) ret = err; + + if (freeze_protected) + sb_end_intwrite(dquot->dq_sb); + return ret; } diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 7647e9f6e1903..6ff94cdf1515c 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -1176,15 +1176,24 @@ ext4_xattr_inode_dec_ref_all(handle_t *handle, struct inode *parent, { struct inode *ea_inode; struct ext4_xattr_entry *entry; + struct ext4_iloc iloc; bool dirty = false; unsigned int ea_ino; int err; int credits; + void *end; + + if (block_csum) + end = (void *)bh->b_data + bh->b_size; + else { + ext4_get_inode_loc(parent, &iloc); + end = (void *)ext4_raw_inode(&iloc) + EXT4_SB(parent->i_sb)->s_inode_size; + } /* One credit for dec ref on ea_inode, one for orphan list addition, */ credits = 2 + extra_credits; - for (entry = first; !IS_LAST_ENTRY(entry); + for (entry = first; (void *)entry < end && !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) { if (!entry->e_value_inum) continue; diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index efda9a0229816..86228f82f54d0 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -1344,21 +1344,13 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc) struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); unsigned long flags; - if (cpc->reason & CP_UMOUNT) { - if (le32_to_cpu(ckpt->cp_pack_total_block_count) + - NM_I(sbi)->nat_bits_blocks > BLKS_PER_SEG(sbi)) { - clear_ckpt_flags(sbi, CP_NAT_BITS_FLAG); - f2fs_notice(sbi, "Disable nat_bits due to no space"); - } else if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG) && - f2fs_nat_bitmap_enabled(sbi)) { - f2fs_enable_nat_bits(sbi); - set_ckpt_flags(sbi, CP_NAT_BITS_FLAG); - f2fs_notice(sbi, "Rebuild and enable nat_bits"); - } - } - spin_lock_irqsave(&sbi->cp_lock, flags); + if ((cpc->reason & CP_UMOUNT) && + le32_to_cpu(ckpt->cp_pack_total_block_count) > + sbi->blocks_per_seg - NM_I(sbi)->nat_bits_blocks) + disable_nat_bits(sbi, false); + if (cpc->reason & CP_TRIMMED) __set_ckpt_flags(ckpt, CP_TRIMMED_FLAG); else @@ -1541,8 +1533,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) start_blk = __start_cp_next_addr(sbi); /* write nat bits */ - if ((cpc->reason & CP_UMOUNT) && - is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG)) { + if (enabled_nat_bits(sbi, cpc)) { __u64 cp_ver = cur_cp_version(ckpt); block_t blk; diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index b52df8aa95350..1c783c2e4902a 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -2231,6 +2231,36 @@ static inline void f2fs_up_write(struct f2fs_rwsem *sem) #endif } +static inline void disable_nat_bits(struct f2fs_sb_info *sbi, bool lock) +{ + unsigned long flags; + unsigned char *nat_bits; + + /* + * In order to re-enable nat_bits we need to call fsck.f2fs by + * set_sbi_flag(sbi, SBI_NEED_FSCK). But it may give huge cost, + * so let's rely on regular fsck or unclean shutdown. + */ + + if (lock) + spin_lock_irqsave(&sbi->cp_lock, flags); + __clear_ckpt_flags(F2FS_CKPT(sbi), CP_NAT_BITS_FLAG); + nat_bits = NM_I(sbi)->nat_bits; + NM_I(sbi)->nat_bits = NULL; + if (lock) + spin_unlock_irqrestore(&sbi->cp_lock, flags); + + kvfree(nat_bits); +} + +static inline bool enabled_nat_bits(struct f2fs_sb_info *sbi, + struct cp_control *cpc) +{ + bool set = is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG); + + return (cpc) ? (cpc->reason & CP_UMOUNT) && set : set; +} + static inline void f2fs_lock_op(struct f2fs_sb_info *sbi) { f2fs_down_read(&sbi->cp_rwsem); @@ -3671,7 +3701,6 @@ int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from); int f2fs_truncate_xattr_node(struct inode *inode); int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, unsigned int seq_id); -bool f2fs_nat_bitmap_enabled(struct f2fs_sb_info *sbi); int f2fs_remove_inode_page(struct inode *inode); struct page *f2fs_new_inode_page(struct inode *inode); struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs); @@ -3696,7 +3725,6 @@ int f2fs_recover_xattr_data(struct inode *inode, struct page *page); int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page); int f2fs_restore_node_summary(struct f2fs_sb_info *sbi, unsigned int segno, struct f2fs_summary_block *sum); -void f2fs_enable_nat_bits(struct f2fs_sb_info *sbi); int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc); int f2fs_build_node_manager(struct f2fs_sb_info *sbi); void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi); diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c index 10780e37fc7b6..a60db5e795a4c 100644 --- a/fs/f2fs/inode.c +++ b/fs/f2fs/inode.c @@ -34,10 +34,8 @@ void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync) if (f2fs_inode_dirtied(inode, sync)) return; - if (f2fs_is_atomic_file(inode)) { - set_inode_flag(inode, FI_ATOMIC_DIRTIED); + if (f2fs_is_atomic_file(inode)) return; - } mark_inode_dirty_sync(inode); } @@ -751,8 +749,12 @@ void f2fs_update_inode_page(struct inode *inode) if (err == -ENOENT) return; + if (err == -EFSCORRUPTED) + goto stop_checkpoint; + if (err == -ENOMEM || ++count <= DEFAULT_RETRY_IO_COUNT) goto retry; +stop_checkpoint: f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_UPDATE_INODE); return; } diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 4d7b9fd6ef31a..12c76e3d1cd49 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -1134,7 +1134,14 @@ int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from) trace_f2fs_truncate_inode_blocks_enter(inode, from); level = get_node_path(inode, from, offset, noffset); - if (level < 0) { + if (level <= 0) { + if (!level) { + level = -EFSCORRUPTED; + f2fs_err(sbi, "%s: inode ino=%lx has corrupted node block, from:%lu addrs:%u", + __func__, inode->i_ino, + from, ADDRS_PER_INODE(inode)); + set_sbi_flag(sbi, SBI_NEED_FSCK); + } trace_f2fs_truncate_inode_blocks_exit(inode, level); return level; } @@ -2270,24 +2277,6 @@ static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i, } } -bool f2fs_nat_bitmap_enabled(struct f2fs_sb_info *sbi) -{ - struct f2fs_nm_info *nm_i = NM_I(sbi); - unsigned int i; - bool ret = true; - - f2fs_down_read(&nm_i->nat_tree_lock); - for (i = 0; i < nm_i->nat_blocks; i++) { - if (!test_bit_le(i, nm_i->nat_block_bitmap)) { - ret = false; - break; - } - } - f2fs_up_read(&nm_i->nat_tree_lock); - - return ret; -} - static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid, bool set, bool build) { @@ -2966,23 +2955,7 @@ static void __adjust_nat_entry_set(struct nat_entry_set *nes, list_add_tail(&nes->set_list, head); } -static void __update_nat_bits(struct f2fs_nm_info *nm_i, unsigned int nat_ofs, - unsigned int valid) -{ - if (valid == 0) { - __set_bit_le(nat_ofs, nm_i->empty_nat_bits); - __clear_bit_le(nat_ofs, nm_i->full_nat_bits); - return; - } - - __clear_bit_le(nat_ofs, nm_i->empty_nat_bits); - if (valid == NAT_ENTRY_PER_BLOCK) - __set_bit_le(nat_ofs, nm_i->full_nat_bits); - else - __clear_bit_le(nat_ofs, nm_i->full_nat_bits); -} - -static void update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid, +static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid, struct page *page) { struct f2fs_nm_info *nm_i = NM_I(sbi); @@ -2991,7 +2964,7 @@ static void update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid, int valid = 0; int i = 0; - if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG)) + if (!enabled_nat_bits(sbi, NULL)) return; if (nat_index == 0) { @@ -3002,36 +2975,17 @@ static void update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid, if (le32_to_cpu(nat_blk->entries[i].block_addr) != NULL_ADDR) valid++; } - - __update_nat_bits(nm_i, nat_index, valid); -} - -void f2fs_enable_nat_bits(struct f2fs_sb_info *sbi) -{ - struct f2fs_nm_info *nm_i = NM_I(sbi); - unsigned int nat_ofs; - - f2fs_down_read(&nm_i->nat_tree_lock); - - for (nat_ofs = 0; nat_ofs < nm_i->nat_blocks; nat_ofs++) { - unsigned int valid = 0, nid_ofs = 0; - - /* handle nid zero due to it should never be used */ - if (unlikely(nat_ofs == 0)) { - valid = 1; - nid_ofs = 1; - } - - for (; nid_ofs < NAT_ENTRY_PER_BLOCK; nid_ofs++) { - if (!test_bit_le(nid_ofs, - nm_i->free_nid_bitmap[nat_ofs])) - valid++; - } - - __update_nat_bits(nm_i, nat_ofs, valid); + if (valid == 0) { + __set_bit_le(nat_index, nm_i->empty_nat_bits); + __clear_bit_le(nat_index, nm_i->full_nat_bits); + return; } - f2fs_up_read(&nm_i->nat_tree_lock); + __clear_bit_le(nat_index, nm_i->empty_nat_bits); + if (valid == NAT_ENTRY_PER_BLOCK) + __set_bit_le(nat_index, nm_i->full_nat_bits); + else + __clear_bit_le(nat_index, nm_i->full_nat_bits); } static int __flush_nat_entry_set(struct f2fs_sb_info *sbi, @@ -3050,7 +3004,7 @@ static int __flush_nat_entry_set(struct f2fs_sb_info *sbi, * #1, flush nat entries to journal in current hot data summary block. * #2, flush nat entries to nat page. */ - if ((cpc->reason & CP_UMOUNT) || + if (enabled_nat_bits(sbi, cpc) || !__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL)) to_journal = false; @@ -3097,7 +3051,7 @@ static int __flush_nat_entry_set(struct f2fs_sb_info *sbi, if (to_journal) { up_write(&curseg->journal_rwsem); } else { - update_nat_bits(sbi, start_nid, page); + __update_nat_bits(sbi, start_nid, page); f2fs_put_page(page, 1); } @@ -3128,7 +3082,7 @@ int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) * during unmount, let's flush nat_bits before checking * nat_cnt[DIRTY_NAT]. */ - if (cpc->reason & CP_UMOUNT) { + if (enabled_nat_bits(sbi, cpc)) { f2fs_down_write(&nm_i->nat_tree_lock); remove_nats_in_journal(sbi); f2fs_up_write(&nm_i->nat_tree_lock); @@ -3144,7 +3098,7 @@ int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) * entries, remove all entries from journal and merge them * into nat entry set. */ - if (cpc->reason & CP_UMOUNT || + if (enabled_nat_bits(sbi, cpc) || !__has_cursum_space(journal, nm_i->nat_cnt[DIRTY_NAT], NAT_JOURNAL)) remove_nats_in_journal(sbi); @@ -3181,18 +3135,15 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi) __u64 cp_ver = cur_cp_version(ckpt); block_t nat_bits_addr; + if (!enabled_nat_bits(sbi, NULL)) + return 0; + nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8); nm_i->nat_bits = f2fs_kvzalloc(sbi, F2FS_BLK_TO_BYTES(nm_i->nat_bits_blocks), GFP_KERNEL); if (!nm_i->nat_bits) return -ENOMEM; - nm_i->full_nat_bits = nm_i->nat_bits + 8; - nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes; - - if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG)) - return 0; - nat_bits_addr = __start_cp_addr(sbi) + BLKS_PER_SEG(sbi) - nm_i->nat_bits_blocks; for (i = 0; i < nm_i->nat_bits_blocks; i++) { @@ -3209,12 +3160,13 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi) cp_ver |= (cur_cp_crc(ckpt) << 32); if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) { - clear_ckpt_flags(sbi, CP_NAT_BITS_FLAG); - f2fs_notice(sbi, "Disable nat_bits due to incorrect cp_ver (%llu, %llu)", - cp_ver, le64_to_cpu(*(__le64 *)nm_i->nat_bits)); + disable_nat_bits(sbi, true); return 0; } + nm_i->full_nat_bits = nm_i->nat_bits + 8; + nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes; + f2fs_notice(sbi, "Found nat_bits in checkpoint"); return 0; } @@ -3225,7 +3177,7 @@ static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi) unsigned int i = 0; nid_t nid, last_nid; - if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG)) + if (!enabled_nat_bits(sbi, NULL)) return; for (i = 0; i < nm_i->nat_blocks; i++) { diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index a622056f27f3a..573cc4725e2e8 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -1515,6 +1515,10 @@ int f2fs_inode_dirtied(struct inode *inode, bool sync) inc_page_count(sbi, F2FS_DIRTY_IMETA); } spin_unlock(&sbi->inode_lock[DIRTY_META]); + + if (!ret && f2fs_is_atomic_file(inode)) + set_inode_flag(inode, FI_ATOMIC_DIRTIED); + return ret; } diff --git a/fs/file.c b/fs/file.c index 4cb952541dd03..b6fb6d18ac3b9 100644 --- a/fs/file.c +++ b/fs/file.c @@ -367,17 +367,25 @@ struct files_struct *dup_fd(struct files_struct *oldf, struct fd_range *punch_ho old_fds = old_fdt->fd; new_fds = new_fdt->fd; + /* + * We may be racing against fd allocation from other threads using this + * files_struct, despite holding ->file_lock. + * + * alloc_fd() might have already claimed a slot, while fd_install() + * did not populate it yet. Note the latter operates locklessly, so + * the file can show up as we are walking the array below. + * + * At the same time we know no files will disappear as all other + * operations take the lock. + * + * Instead of trying to placate userspace racing with itself, we + * ref the file if we see it and mark the fd slot as unused otherwise. + */ for (i = open_files; i != 0; i--) { - struct file *f = *old_fds++; + struct file *f = rcu_dereference_raw(*old_fds++); if (f) { get_file(f); } else { - /* - * The fd may be claimed in the fd bitmap but not yet - * instantiated in the files array if a sibling thread - * is partway through open(). So make sure that this - * fd is available to the new process. - */ __clear_open_fd(open_files - i, new_fdt); } rcu_assign_pointer(*new_fds++, f); @@ -637,7 +645,7 @@ struct file *file_close_fd_locked(struct files_struct *files, unsigned fd) return NULL; fd = array_index_nospec(fd, fdt->max_fds); - file = fdt->fd[fd]; + file = rcu_dereference_raw(fdt->fd[fd]); if (file) { rcu_assign_pointer(fdt->fd[fd], NULL); __put_unused_fd(files, fd); @@ -1219,7 +1227,7 @@ __releases(&files->file_lock) */ fdt = files_fdtable(files); fd = array_index_nospec(fd, fdt->max_fds); - tofree = fdt->fd[fd]; + tofree = rcu_dereference_raw(fdt->fd[fd]); if (!tofree && fd_is_open(fd, fdt)) goto Ebusy; get_file(file); diff --git a/fs/fuse/dax.c b/fs/fuse/dax.c index 12ef91d170bb3..7faf1af59d5d8 100644 --- a/fs/fuse/dax.c +++ b/fs/fuse/dax.c @@ -681,7 +681,6 @@ static int __fuse_dax_break_layouts(struct inode *inode, bool *retry, 0, 0, fuse_wait_dax_page(inode)); } -/* dmap_end == 0 leads to unmapping of whole file */ int fuse_dax_break_layouts(struct inode *inode, u64 dmap_start, u64 dmap_end) { diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 2e62e62c07f83..a1e86ec07c38b 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -1632,7 +1632,7 @@ static const char *fuse_get_link(struct dentry *dentry, struct inode *inode, goto out_err; if (fc->cache_symlinks) - return page_get_link(dentry, inode, callback); + return page_get_link_raw(dentry, inode, callback); err = -ECHILD; if (!dentry) @@ -1936,7 +1936,7 @@ int fuse_do_setattr(struct mnt_idmap *idmap, struct dentry *dentry, if (FUSE_IS_DAX(inode) && is_truncate) { filemap_invalidate_lock(mapping); fault_blocked = true; - err = fuse_dax_break_layouts(inode, 0, 0); + err = fuse_dax_break_layouts(inode, 0, -1); if (err) { filemap_invalidate_unlock(mapping); return err; diff --git a/fs/fuse/file.c b/fs/fuse/file.c index e20d91d0ae558..f597f7e68e501 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -253,7 +253,7 @@ static int fuse_open(struct inode *inode, struct file *file) if (dax_truncate) { filemap_invalidate_lock(inode->i_mapping); - err = fuse_dax_break_layouts(inode, 0, 0); + err = fuse_dax_break_layouts(inode, 0, -1); if (err) goto out_inode_unlock; } @@ -3146,7 +3146,7 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, inode_lock(inode); if (block_faults) { filemap_invalidate_lock(inode->i_mapping); - err = fuse_dax_break_layouts(inode, 0, 0); + err = fuse_dax_break_layouts(inode, 0, -1); if (err) goto out; } diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c index d220e28e755fe..749c9f66d74c6 100644 --- a/fs/fuse/virtio_fs.c +++ b/fs/fuse/virtio_fs.c @@ -1663,6 +1663,9 @@ static int virtio_fs_get_tree(struct fs_context *fsc) unsigned int virtqueue_size; int err = -EIO; + if (!fsc->source) + return invalf(fsc, "No source specified"); + /* This gets a reference on virtio_fs object. This ptr gets installed * in fc->iq->priv. Once fuse_conn is going away, it calls ->put() * to drop the reference to this object. diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c index 6add6ebfef896..cb823a8a6ba96 100644 --- a/fs/hfs/bnode.c +++ b/fs/hfs/bnode.c @@ -67,6 +67,12 @@ void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off) else key_len = tree->max_key_len + 1; + if (key_len > sizeof(hfs_btree_key) || key_len < 1) { + memset(key, 0, sizeof(hfs_btree_key)); + pr_err("hfs: Invalid key length: %d\n", key_len); + return; + } + hfs_bnode_read(node, key, off, key_len); } diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c index 87974d5e67915..079ea80534f7d 100644 --- a/fs/hfsplus/bnode.c +++ b/fs/hfsplus/bnode.c @@ -67,6 +67,12 @@ void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off) else key_len = tree->max_key_len + 2; + if (key_len > sizeof(hfsplus_btree_key) || key_len < 1) { + memset(key, 0, sizeof(hfsplus_btree_key)); + pr_err("hfsplus: Invalid key length: %d\n", key_len); + return; + } + hfs_bnode_read(node, key, off, key_len); } diff --git a/fs/hostfs/hostfs.h b/fs/hostfs/hostfs.h index 8b39c15c408cc..15b2f094d36ef 100644 --- a/fs/hostfs/hostfs.h +++ b/fs/hostfs/hostfs.h @@ -60,7 +60,7 @@ struct hostfs_stat { unsigned int uid; unsigned int gid; unsigned long long size; - struct hostfs_timespec atime, mtime, ctime; + struct hostfs_timespec atime, mtime, ctime, btime; unsigned int blksize; unsigned long long blocks; struct { diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index 94f3cc42c7403..a16a7df0766cd 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c @@ -33,6 +33,7 @@ struct hostfs_inode_info { struct inode vfs_inode; struct mutex open_mutex; dev_t dev; + struct hostfs_timespec btime; }; static inline struct hostfs_inode_info *HOSTFS_I(struct inode *inode) @@ -550,6 +551,7 @@ static int hostfs_inode_set(struct inode *ino, void *data) } HOSTFS_I(ino)->dev = dev; + HOSTFS_I(ino)->btime = st->btime; ino->i_ino = st->ino; ino->i_mode = st->mode; return hostfs_inode_update(ino, st); @@ -560,7 +562,10 @@ static int hostfs_inode_test(struct inode *inode, void *data) const struct hostfs_stat *st = data; dev_t dev = MKDEV(st->dev.maj, st->dev.min); - return inode->i_ino == st->ino && HOSTFS_I(inode)->dev == dev; + return inode->i_ino == st->ino && HOSTFS_I(inode)->dev == dev && + (inode->i_mode & S_IFMT) == (st->mode & S_IFMT) && + HOSTFS_I(inode)->btime.tv_sec == st->btime.tv_sec && + HOSTFS_I(inode)->btime.tv_nsec == st->btime.tv_nsec; } static struct inode *hostfs_iget(struct super_block *sb, char *name) diff --git a/fs/hostfs/hostfs_user.c b/fs/hostfs/hostfs_user.c index 97e9c40a94488..3bcd9f35e70b2 100644 --- a/fs/hostfs/hostfs_user.c +++ b/fs/hostfs/hostfs_user.c @@ -18,39 +18,48 @@ #include "hostfs.h" #include -static void stat64_to_hostfs(const struct stat64 *buf, struct hostfs_stat *p) +static void statx_to_hostfs(const struct statx *buf, struct hostfs_stat *p) { - p->ino = buf->st_ino; - p->mode = buf->st_mode; - p->nlink = buf->st_nlink; - p->uid = buf->st_uid; - p->gid = buf->st_gid; - p->size = buf->st_size; - p->atime.tv_sec = buf->st_atime; - p->atime.tv_nsec = 0; - p->ctime.tv_sec = buf->st_ctime; - p->ctime.tv_nsec = 0; - p->mtime.tv_sec = buf->st_mtime; - p->mtime.tv_nsec = 0; - p->blksize = buf->st_blksize; - p->blocks = buf->st_blocks; - p->rdev.maj = os_major(buf->st_rdev); - p->rdev.min = os_minor(buf->st_rdev); - p->dev.maj = os_major(buf->st_dev); - p->dev.min = os_minor(buf->st_dev); + p->ino = buf->stx_ino; + p->mode = buf->stx_mode; + p->nlink = buf->stx_nlink; + p->uid = buf->stx_uid; + p->gid = buf->stx_gid; + p->size = buf->stx_size; + p->atime.tv_sec = buf->stx_atime.tv_sec; + p->atime.tv_nsec = buf->stx_atime.tv_nsec; + p->ctime.tv_sec = buf->stx_ctime.tv_sec; + p->ctime.tv_nsec = buf->stx_ctime.tv_nsec; + p->mtime.tv_sec = buf->stx_mtime.tv_sec; + p->mtime.tv_nsec = buf->stx_mtime.tv_nsec; + if (buf->stx_mask & STATX_BTIME) { + p->btime.tv_sec = buf->stx_btime.tv_sec; + p->btime.tv_nsec = buf->stx_btime.tv_nsec; + } else { + memset(&p->btime, 0, sizeof(p->btime)); + } + p->blksize = buf->stx_blksize; + p->blocks = buf->stx_blocks; + p->rdev.maj = buf->stx_rdev_major; + p->rdev.min = buf->stx_rdev_minor; + p->dev.maj = buf->stx_dev_major; + p->dev.min = buf->stx_dev_minor; } int stat_file(const char *path, struct hostfs_stat *p, int fd) { - struct stat64 buf; + struct statx buf; + int flags = AT_SYMLINK_NOFOLLOW; if (fd >= 0) { - if (fstat64(fd, &buf) < 0) - return -errno; - } else if (lstat64(path, &buf) < 0) { - return -errno; + flags |= AT_EMPTY_PATH; + path = ""; } - stat64_to_hostfs(&buf, p); + + if ((statx(fd, path, flags, STATX_BASIC_STATS | STATX_BTIME, &buf)) < 0) + return -errno; + + statx_to_hostfs(&buf, p); return 0; } diff --git a/fs/isofs/dir.c b/fs/isofs/dir.c index eb2f8273e6f15..09df40b612fbf 100644 --- a/fs/isofs/dir.c +++ b/fs/isofs/dir.c @@ -147,7 +147,8 @@ static int do_isofs_readdir(struct inode *inode, struct file *file, de = tmpde; } /* Basic sanity check, whether name doesn't exceed dir entry */ - if (de_len < de->name_len[0] + + if (de_len < sizeof(struct iso_directory_record) || + de_len < de->name_len[0] + sizeof(struct iso_directory_record)) { printk(KERN_NOTICE "iso9660: Corrupted directory entry" " in block %lu of inode %lu\n", block, diff --git a/fs/isofs/export.c b/fs/isofs/export.c index 35768a63fb1d2..421d247fae523 100644 --- a/fs/isofs/export.c +++ b/fs/isofs/export.c @@ -180,7 +180,7 @@ static struct dentry *isofs_fh_to_parent(struct super_block *sb, return NULL; return isofs_export_iget(sb, - fh_len > 2 ? ifid->parent_block : 0, + fh_len > 3 ? ifid->parent_block : 0, ifid->parent_offset, fh_len > 4 ? ifid->parent_generation : 0); } diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 97f487c3d8fcf..c073f5fb98594 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -1884,7 +1884,6 @@ int jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid, /* Log is no longer empty */ write_lock(&journal->j_state_lock); - WARN_ON(!sb->s_sequence); journal->j_flags &= ~JBD2_FLUSHED; write_unlock(&journal->j_state_lock); diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c index f9009e4f9ffd8..0e1019382cf51 100644 --- a/fs/jfs/jfs_dmap.c +++ b/fs/jfs/jfs_dmap.c @@ -204,6 +204,10 @@ int dbMount(struct inode *ipbmap) bmp->db_aglevel = le32_to_cpu(dbmp_le->dn_aglevel); bmp->db_agheight = le32_to_cpu(dbmp_le->dn_agheight); bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth); + if (!bmp->db_agwidth) { + err = -EINVAL; + goto err_release_metapage; + } bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart); bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size); if (bmp->db_agl2size > L2MAXL2SIZE - L2MAXAG || @@ -3403,7 +3407,7 @@ int dbExtendFS(struct inode *ipbmap, s64 blkno, s64 nblocks) oldl2agsize = bmp->db_agl2size; bmp->db_agl2size = l2agsize; - bmp->db_agsize = 1 << l2agsize; + bmp->db_agsize = (s64)1 << l2agsize; /* compute new number of AG */ agno = bmp->db_numag; @@ -3666,8 +3670,8 @@ void dbFinalizeBmap(struct inode *ipbmap) * system size is not a multiple of the group size). */ inactfree = (inactags && ag_rem) ? - ((inactags - 1) << bmp->db_agl2size) + ag_rem - : inactags << bmp->db_agl2size; + (((s64)inactags - 1) << bmp->db_agl2size) + ag_rem + : ((s64)inactags << bmp->db_agl2size); /* determine how many free blocks are in the active * allocation groups plus the average number of free blocks diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c index 8f85177f284b5..93db6eec44655 100644 --- a/fs/jfs/jfs_dtree.c +++ b/fs/jfs/jfs_dtree.c @@ -117,7 +117,8 @@ do { \ if (!(RC)) { \ if (((P)->header.nextindex > \ (((BN) == 0) ? DTROOTMAXSLOT : (P)->header.maxslot)) || \ - ((BN) && ((P)->header.maxslot > DTPAGEMAXSLOT))) { \ + ((BN) && (((P)->header.maxslot > DTPAGEMAXSLOT) || \ + ((P)->header.stblindex >= DTPAGEMAXSLOT)))) { \ BT_PUTPAGE(MP); \ jfs_error((IP)->i_sb, \ "DT_GETPAGE: dtree page corrupt\n"); \ diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c index a360b24ed320c..8ddc14c56501a 100644 --- a/fs/jfs/jfs_imap.c +++ b/fs/jfs/jfs_imap.c @@ -102,7 +102,7 @@ int diMount(struct inode *ipimap) * allocate/initialize the in-memory inode map control structure */ /* allocate the in-memory inode map control structure. */ - imap = kmalloc(sizeof(struct inomap), GFP_KERNEL); + imap = kzalloc(sizeof(struct inomap), GFP_KERNEL); if (imap == NULL) return -ENOMEM; @@ -456,7 +456,7 @@ struct inode *diReadSpecial(struct super_block *sb, ino_t inum, int secondary) dp += inum % 8; /* 8 inodes per 4K page */ /* copy on-disk inode to in-memory inode */ - if ((copy_from_dinode(dp, ip)) != 0) { + if ((copy_from_dinode(dp, ip) != 0) || (ip->i_nlink == 0)) { /* handle bad return by returning NULL for ip */ set_nlink(ip, 1); /* Don't want iput() deleting it */ iput(ip); diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c index 24afbae87225a..11d7f74d207be 100644 --- a/fs/jfs/xattr.c +++ b/fs/jfs/xattr.c @@ -559,11 +559,16 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size) size_check: if (EALIST_SIZE(ea_buf->xattr) != ea_size) { - int size = clamp_t(int, ea_size, 0, EALIST_SIZE(ea_buf->xattr)); - - printk(KERN_ERR "ea_get: invalid extended attribute\n"); - print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, - ea_buf->xattr, size, 1); + if (unlikely(EALIST_SIZE(ea_buf->xattr) > INT_MAX)) { + printk(KERN_ERR "ea_get: extended attribute size too large: %u > INT_MAX\n", + EALIST_SIZE(ea_buf->xattr)); + } else { + int size = clamp_t(int, ea_size, 0, EALIST_SIZE(ea_buf->xattr)); + + printk(KERN_ERR "ea_get: invalid extended attribute\n"); + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, + ea_buf->xattr, size, 1); + } ea_release(inode, ea_buf); rc = -EIO; goto clean_up; diff --git a/fs/libfs.c b/fs/libfs.c index b0f262223b535..3cb49463a8496 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -492,7 +492,7 @@ offset_dir_lookup(struct dentry *parent, loff_t offset) found = find_positive_dentry(parent, NULL, false); else { rcu_read_lock(); - child = mas_find(&mas, DIR_OFFSET_MAX); + child = mas_find_rev(&mas, DIR_OFFSET_MIN); found = find_positive_dentry(parent, child, false); rcu_read_unlock(); } diff --git a/fs/namei.c b/fs/namei.c index 4a4a22a08ac20..6795600c5738a 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -5300,10 +5300,9 @@ const char *vfs_get_link(struct dentry *dentry, struct delayed_call *done) EXPORT_SYMBOL(vfs_get_link); /* get the link contents into pagecache */ -const char *page_get_link(struct dentry *dentry, struct inode *inode, - struct delayed_call *callback) +static char *__page_get_link(struct dentry *dentry, struct inode *inode, + struct delayed_call *callback) { - char *kaddr; struct page *page; struct address_space *mapping = inode->i_mapping; @@ -5322,8 +5321,23 @@ const char *page_get_link(struct dentry *dentry, struct inode *inode, } set_delayed_call(callback, page_put_link, page); BUG_ON(mapping_gfp_mask(mapping) & __GFP_HIGHMEM); - kaddr = page_address(page); - nd_terminate_link(kaddr, inode->i_size, PAGE_SIZE - 1); + return page_address(page); +} + +const char *page_get_link_raw(struct dentry *dentry, struct inode *inode, + struct delayed_call *callback) +{ + return __page_get_link(dentry, inode, callback); +} +EXPORT_SYMBOL_GPL(page_get_link_raw); + +const char *page_get_link(struct dentry *dentry, struct inode *inode, + struct delayed_call *callback) +{ + char *kaddr = __page_get_link(dentry, inode, callback); + + if (!IS_ERR(kaddr)) + nd_terminate_link(kaddr, inode->i_size, PAGE_SIZE - 1); return kaddr; } diff --git a/fs/namespace.c b/fs/namespace.c index 73da51ac5a034..f898de3a6f705 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1986,6 +1986,7 @@ static void warn_mandlock(void) static int can_umount(const struct path *path, int flags) { struct mount *mnt = real_mount(path->mnt); + struct super_block *sb = path->dentry->d_sb; if (!may_mount()) return -EPERM; @@ -1995,7 +1996,7 @@ static int can_umount(const struct path *path, int flags) return -EINVAL; if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */ return -EINVAL; - if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN)) + if (flags & MNT_FORCE && !ns_capable(sb->s_user_ns, CAP_SYS_ADMIN)) return -EPERM; return 0; } diff --git a/fs/netfs/direct_read.c b/fs/netfs/direct_read.c index b1a66a6e6bc2d..917b7edc34ef5 100644 --- a/fs/netfs/direct_read.c +++ b/fs/netfs/direct_read.c @@ -108,9 +108,9 @@ static int netfs_dispatch_unbuffered_reads(struct netfs_io_request *rreq) * Perform a read to an application buffer, bypassing the pagecache and the * local disk cache. */ -static int netfs_unbuffered_read(struct netfs_io_request *rreq, bool sync) +static ssize_t netfs_unbuffered_read(struct netfs_io_request *rreq, bool sync) { - int ret; + ssize_t ret; _enter("R=%x %llx-%llx", rreq->debug_id, rreq->start, rreq->start + rreq->len - 1); @@ -149,7 +149,7 @@ static int netfs_unbuffered_read(struct netfs_io_request *rreq, bool sync) } out: - _leave(" = %d", ret); + _leave(" = %zd", ret); return ret; } diff --git a/fs/netfs/read_collect.c b/fs/netfs/read_collect.c index a44132c986538..eae415efae24a 100644 --- a/fs/netfs/read_collect.c +++ b/fs/netfs/read_collect.c @@ -258,17 +258,18 @@ static bool netfs_consume_read_data(struct netfs_io_subrequest *subreq, bool was */ if (!subreq->consumed && !prev_donated && - !list_is_first(&subreq->rreq_link, &rreq->subrequests) && - subreq->start == prev->start + prev->len) { + !list_is_first(&subreq->rreq_link, &rreq->subrequests)) { prev = list_prev_entry(subreq, rreq_link); - WRITE_ONCE(prev->next_donated, prev->next_donated + subreq->len); - subreq->start += subreq->len; - subreq->len = 0; - subreq->transferred = 0; - trace_netfs_donate(rreq, subreq, prev, subreq->len, - netfs_trace_donate_to_prev); - trace_netfs_sreq(subreq, netfs_sreq_trace_donate_to_prev); - goto remove_subreq_locked; + if (subreq->start == prev->start + prev->len) { + WRITE_ONCE(prev->next_donated, prev->next_donated + subreq->len); + subreq->start += subreq->len; + subreq->len = 0; + subreq->transferred = 0; + trace_netfs_donate(rreq, subreq, prev, subreq->len, + netfs_trace_donate_to_prev); + trace_netfs_sreq(subreq, netfs_sreq_trace_donate_to_prev); + goto remove_subreq_locked; + } } /* If we can't donate down the chain, donate up the chain instead. */ @@ -283,7 +284,7 @@ static bool netfs_consume_read_data(struct netfs_io_subrequest *subreq, bool was netfs_trace_donate_to_deferred_next); } else { next = list_next_entry(subreq, rreq_link); - WRITE_ONCE(next->prev_donated, excess); + WRITE_ONCE(next->prev_donated, next->prev_donated + excess); trace_netfs_donate(rreq, subreq, next, excess, netfs_trace_donate_to_next); } diff --git a/fs/netfs/read_pgpriv2.c b/fs/netfs/read_pgpriv2.c index 54d5004fec182..e72f5e6748342 100644 --- a/fs/netfs/read_pgpriv2.c +++ b/fs/netfs/read_pgpriv2.c @@ -181,16 +181,17 @@ void netfs_pgpriv2_write_to_the_cache(struct netfs_io_request *rreq) break; folioq_unmark3(folioq, slot); - if (!folioq->marks3) { + while (!folioq->marks3) { folioq = folioq->next; if (!folioq) - break; + goto end_of_queue; } slot = __ffs(folioq->marks3); folio = folioq_folio(folioq, slot); } +end_of_queue: netfs_issue_write(wreq, &wreq->io_streams[1]); smp_wmb(); /* Write lists before ALL_QUEUED. */ set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags); diff --git a/fs/netfs/write_collect.c b/fs/netfs/write_collect.c index 82290c92ba7a2..412d4da742270 100644 --- a/fs/netfs/write_collect.c +++ b/fs/netfs/write_collect.c @@ -576,7 +576,8 @@ void netfs_write_collection_worker(struct work_struct *work) trace_netfs_rreq(wreq, netfs_rreq_trace_write_done); if (wreq->io_streams[1].active && - wreq->io_streams[1].failed) { + wreq->io_streams[1].failed && + ictx->ops->invalidate_cache) { /* Cache write failure doesn't prevent writeback completion * unless we're in disconnected mode. */ diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig index d3f76101ad4b9..07932ce9246c1 100644 --- a/fs/nfs/Kconfig +++ b/fs/nfs/Kconfig @@ -2,6 +2,7 @@ config NFS_FS tristate "NFS client support" depends on INET && FILE_LOCKING && MULTIUSER + select CRC32 select LOCKD select SUNRPC select NFS_COMMON @@ -196,7 +197,6 @@ config NFS_USE_KERNEL_DNS config NFS_DEBUG bool depends on NFS_FS && SUNRPC_DEBUG - select CRC32 default y config NFS_DISABLE_UDP_SUPPORT diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index 035ba52742a50..325ba0663a6de 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -79,6 +79,7 @@ static void nfs_mark_return_delegation(struct nfs_server *server, struct nfs_delegation *delegation) { set_bit(NFS_DELEGATION_RETURN, &delegation->flags); + set_bit(NFS4SERV_DELEGRETURN, &server->delegation_flags); set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state); } @@ -330,14 +331,16 @@ nfs_start_delegation_return(struct nfs_inode *nfsi) } static void nfs_abort_delegation_return(struct nfs_delegation *delegation, - struct nfs_client *clp, int err) + struct nfs_server *server, int err) { - spin_lock(&delegation->lock); clear_bit(NFS_DELEGATION_RETURNING, &delegation->flags); if (err == -EAGAIN) { set_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags); - set_bit(NFS4CLNT_DELEGRETURN_DELAYED, &clp->cl_state); + set_bit(NFS4SERV_DELEGRETURN_DELAYED, + &server->delegation_flags); + set_bit(NFS4CLNT_DELEGRETURN_DELAYED, + &server->nfs_client->cl_state); } spin_unlock(&delegation->lock); } @@ -547,7 +550,7 @@ int nfs_inode_set_delegation(struct inode *inode, const struct cred *cred, */ static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation *delegation, int issync) { - struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; + struct nfs_server *server = NFS_SERVER(inode); unsigned int mode = O_WRONLY | O_RDWR; int err = 0; @@ -569,11 +572,11 @@ static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation /* * Guard against state recovery */ - err = nfs4_wait_clnt_recover(clp); + err = nfs4_wait_clnt_recover(server->nfs_client); } if (err) { - nfs_abort_delegation_return(delegation, clp, err); + nfs_abort_delegation_return(delegation, server, err); goto out; } @@ -590,17 +593,6 @@ static bool nfs_delegation_need_return(struct nfs_delegation *delegation) if (test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags)) ret = true; - else if (test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags)) { - struct inode *inode; - - spin_lock(&delegation->lock); - inode = delegation->inode; - if (inode && list_empty(&NFS_I(inode)->open_files)) - ret = true; - spin_unlock(&delegation->lock); - } - if (ret) - clear_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags); if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags) || test_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags) || test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) @@ -619,6 +611,9 @@ static int nfs_server_return_marked_delegations(struct nfs_server *server, struct nfs_delegation *place_holder_deleg = NULL; int err = 0; + if (!test_and_clear_bit(NFS4SERV_DELEGRETURN, + &server->delegation_flags)) + return 0; restart: /* * To avoid quadratic looping we hold a reference @@ -670,6 +665,7 @@ static int nfs_server_return_marked_delegations(struct nfs_server *server, cond_resched(); if (!err) goto restart; + set_bit(NFS4SERV_DELEGRETURN, &server->delegation_flags); set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state); goto out; } @@ -684,6 +680,9 @@ static bool nfs_server_clear_delayed_delegations(struct nfs_server *server) struct nfs_delegation *d; bool ret = false; + if (!test_and_clear_bit(NFS4SERV_DELEGRETURN_DELAYED, + &server->delegation_flags)) + goto out; list_for_each_entry_rcu (d, &server->delegations, super_list) { if (!test_bit(NFS_DELEGATION_RETURN_DELAYED, &d->flags)) continue; @@ -691,6 +690,7 @@ static bool nfs_server_clear_delayed_delegations(struct nfs_server *server) clear_bit(NFS_DELEGATION_RETURN_DELAYED, &d->flags); ret = true; } +out: return ret; } @@ -780,6 +780,43 @@ int nfs4_inode_return_delegation(struct inode *inode) return 0; } +/** + * nfs4_inode_set_return_delegation_on_close - asynchronously return a delegation + * @inode: inode to process + * + * This routine is called to request that the delegation be returned as soon + * as the file is closed. If the file is already closed, the delegation is + * immediately returned. + */ +void nfs4_inode_set_return_delegation_on_close(struct inode *inode) +{ + struct nfs_delegation *delegation; + struct nfs_delegation *ret = NULL; + + if (!inode) + return; + rcu_read_lock(); + delegation = nfs4_get_valid_delegation(inode); + if (!delegation) + goto out; + spin_lock(&delegation->lock); + if (!delegation->inode) + goto out_unlock; + if (list_empty(&NFS_I(inode)->open_files) && + !test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) { + /* Refcount matched in nfs_end_delegation_return() */ + ret = nfs_get_delegation(delegation); + } else + set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags); +out_unlock: + spin_unlock(&delegation->lock); + if (ret) + nfs_clear_verifier_delegated(inode); +out: + rcu_read_unlock(); + nfs_end_delegation_return(inode, ret, 0); +} + /** * nfs4_inode_return_delegation_on_close - asynchronously return a delegation * @inode: inode to process @@ -841,11 +878,25 @@ int nfs4_inode_make_writeable(struct inode *inode) return nfs4_inode_return_delegation(inode); } -static void nfs_mark_return_if_closed_delegation(struct nfs_server *server, - struct nfs_delegation *delegation) +static void +nfs_mark_return_if_closed_delegation(struct nfs_server *server, + struct nfs_delegation *delegation) { - set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags); - set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state); + struct inode *inode; + + if (test_bit(NFS_DELEGATION_RETURN, &delegation->flags) || + test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags)) + return; + spin_lock(&delegation->lock); + inode = delegation->inode; + if (!inode) + goto out; + if (list_empty(&NFS_I(inode)->open_files)) + nfs_mark_return_delegation(server, delegation); + else + set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags); +out: + spin_unlock(&delegation->lock); } static bool nfs_server_mark_return_all_delegations(struct nfs_server *server) @@ -1239,6 +1290,7 @@ static void nfs_mark_test_expired_delegation(struct nfs_server *server, return; clear_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags); set_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags); + set_bit(NFS4SERV_DELEGATION_EXPIRED, &server->delegation_flags); set_bit(NFS4CLNT_DELEGATION_EXPIRED, &server->nfs_client->cl_state); } @@ -1317,6 +1369,9 @@ static int nfs_server_reap_expired_delegations(struct nfs_server *server, nfs4_stateid stateid; unsigned long gen = ++server->delegation_gen; + if (!test_and_clear_bit(NFS4SERV_DELEGATION_EXPIRED, + &server->delegation_flags)) + return 0; restart: rcu_read_lock(); list_for_each_entry_rcu(delegation, &server->delegations, super_list) { @@ -1346,6 +1401,9 @@ static int nfs_server_reap_expired_delegations(struct nfs_server *server, goto restart; } nfs_inode_mark_test_expired_delegation(server,inode); + set_bit(NFS4SERV_DELEGATION_EXPIRED, &server->delegation_flags); + set_bit(NFS4CLNT_DELEGATION_EXPIRED, + &server->nfs_client->cl_state); iput(inode); return -EAGAIN; } diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h index 71524d34ed207..8ff5ab9c5c256 100644 --- a/fs/nfs/delegation.h +++ b/fs/nfs/delegation.h @@ -49,6 +49,7 @@ void nfs_inode_reclaim_delegation(struct inode *inode, const struct cred *cred, unsigned long pagemod_limit, u32 deleg_type); int nfs4_inode_return_delegation(struct inode *inode); void nfs4_inode_return_delegation_on_close(struct inode *inode); +void nfs4_inode_set_return_delegation_on_close(struct inode *inode); int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid); void nfs_inode_evict_delegation(struct inode *inode); diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 90079ca134dd3..c1f1b826888c9 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -56,6 +56,7 @@ #include #include +#include "delegation.h" #include "internal.h" #include "iostat.h" #include "pnfs.h" @@ -130,6 +131,20 @@ static void nfs_direct_truncate_request(struct nfs_direct_req *dreq, dreq->count = req_start; } +static void nfs_direct_file_adjust_size_locked(struct inode *inode, + loff_t offset, size_t count) +{ + loff_t newsize = offset + (loff_t)count; + loff_t oldsize = i_size_read(inode); + + if (newsize > oldsize) { + i_size_write(inode, newsize); + NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_SIZE; + trace_nfs_size_grow(inode, newsize); + nfs_inc_stats(inode, NFSIOS_EXTENDWRITE); + } +} + /** * nfs_swap_rw - NFS address space operation for swap I/O * @iocb: target I/O control block @@ -272,6 +287,8 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr) nfs_direct_count_bytes(dreq, hdr); spin_unlock(&dreq->lock); + nfs_update_delegated_atime(dreq->inode); + while (!list_empty(&hdr->pages)) { struct nfs_page *req = nfs_list_entry(hdr->pages.next); struct page *page = req->wb_page; @@ -732,6 +749,7 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr) struct nfs_direct_req *dreq = hdr->dreq; struct nfs_commit_info cinfo; struct nfs_page *req = nfs_list_entry(hdr->pages.next); + struct inode *inode = dreq->inode; int flags = NFS_ODIRECT_DONE; trace_nfs_direct_write_completion(dreq); @@ -753,6 +771,11 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr) } spin_unlock(&dreq->lock); + spin_lock(&inode->i_lock); + nfs_direct_file_adjust_size_locked(inode, dreq->io_start, dreq->count); + nfs_update_delegated_mtime_locked(dreq->inode); + spin_unlock(&inode->i_lock); + while (!list_empty(&hdr->pages)) { req = nfs_list_entry(hdr->pages.next); diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 6800ee92d742a..153d25d4b810c 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -451,7 +452,7 @@ static bool nfs_release_folio(struct folio *folio, gfp_t gfp) /* If the private flag is set, then the folio is not freeable */ if (folio_test_private(folio)) { if ((current_gfp_context(gfp) & GFP_KERNEL) != GFP_KERNEL || - current_is_kswapd()) + current_is_kswapd() || current_is_kcompactd()) return false; if (nfs_wb_folio(folio->mapping->host, folio) < 0) return false; diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 6bcc4b0e00ab7..8b568a514fd1c 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -895,18 +895,11 @@ u64 nfs_timespec_to_change_attr(const struct timespec64 *ts) return ((u64)ts->tv_sec << 30) + ts->tv_nsec; } -#ifdef CONFIG_CRC32 static inline u32 nfs_stateid_hash(const nfs4_stateid *stateid) { return ~crc32_le(0xFFFFFFFF, &stateid->other[0], NFS4_STATEID_OTHER_SIZE); } -#else -static inline u32 nfs_stateid_hash(nfs4_stateid *stateid) -{ - return 0; -} -#endif static inline bool nfs_error_is_fatal(int err) { diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 405f17e6e0b45..e7bc99c69743c 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -3898,8 +3898,11 @@ nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync) { + struct dentry *dentry = ctx->dentry; if (ctx->state == NULL) return; + if (dentry->d_flags & DCACHE_NFSFS_RENAMED) + nfs4_inode_set_return_delegation_on_close(d_inode(dentry)); if (is_sync) nfs4_close_sync(ctx->state, _nfs4_ctx_to_openmode(ctx)); else diff --git a/fs/nfs/nfs4session.h b/fs/nfs/nfs4session.h index 351616c61df54..f9c291e2165cd 100644 --- a/fs/nfs/nfs4session.h +++ b/fs/nfs/nfs4session.h @@ -148,16 +148,12 @@ static inline void nfs4_copy_sessionid(struct nfs4_sessionid *dst, memcpy(dst->data, src->data, NFS4_MAX_SESSIONID_LEN); } -#ifdef CONFIG_CRC32 /* * nfs_session_id_hash - calculate the crc32 hash for the session id * @session - pointer to session */ #define nfs_session_id_hash(sess_id) \ (~crc32_le(0xFFFFFFFF, &(sess_id)->data[0], sizeof((sess_id)->data))) -#else -#define nfs_session_id_hash(session) (0) -#endif #else /* defined(CONFIG_NFS_V4_1) */ static inline int nfs4_init_session(struct nfs_client *clp) diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index e8ac3f615f932..71f45cc0ca74d 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -82,9 +82,8 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req, * we currently use size 2 (u64) out of (NFS4_OPAQUE_LIMIT >> 2) */ #define pagepad_maxsz (1) -#define open_owner_id_maxsz (1 + 2 + 1 + 1 + 2) -#define lock_owner_id_maxsz (1 + 1 + 4) -#define decode_lockowner_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ)) +#define open_owner_id_maxsz (2 + 1 + 2 + 2) +#define lock_owner_id_maxsz (2 + 1 + 2) #define compound_encode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2)) #define compound_decode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2)) #define op_encode_hdr_maxsz (1) @@ -185,7 +184,7 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req, #define encode_claim_null_maxsz (1 + nfs4_name_maxsz) #define encode_open_maxsz (op_encode_hdr_maxsz + \ 2 + encode_share_access_maxsz + 2 + \ - open_owner_id_maxsz + \ + 1 + open_owner_id_maxsz + \ encode_opentype_maxsz + \ encode_claim_null_maxsz) #define decode_space_limit_maxsz (3) @@ -255,13 +254,14 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req, #define encode_link_maxsz (op_encode_hdr_maxsz + \ nfs4_name_maxsz) #define decode_link_maxsz (op_decode_hdr_maxsz + decode_change_info_maxsz) -#define encode_lockowner_maxsz (7) +#define encode_lockowner_maxsz (2 + 1 + lock_owner_id_maxsz) + #define encode_lock_maxsz (op_encode_hdr_maxsz + \ 7 + \ 1 + encode_stateid_maxsz + 1 + \ encode_lockowner_maxsz) #define decode_lock_denied_maxsz \ - (8 + decode_lockowner_maxsz) + (2 + 2 + 1 + 2 + 1 + lock_owner_id_maxsz) #define decode_lock_maxsz (op_decode_hdr_maxsz + \ decode_lock_denied_maxsz) #define encode_lockt_maxsz (op_encode_hdr_maxsz + 5 + \ @@ -617,7 +617,7 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req, encode_lockowner_maxsz) #define NFS4_dec_release_lockowner_sz \ (compound_decode_hdr_maxsz + \ - decode_lockowner_maxsz) + decode_release_lockowner_maxsz) #define NFS4_enc_access_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ @@ -1412,7 +1412,7 @@ static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_opena __be32 *p; /* * opcode 4, seqid 4, share_access 4, share_deny 4, clientid 8, ownerlen 4, - * owner 4 = 32 + * owner 28 */ encode_nfs4_seqid(xdr, arg->seqid); encode_share_access(xdr, arg->share_access); @@ -5077,7 +5077,7 @@ static int decode_link(struct xdr_stream *xdr, struct nfs4_change_info *cinfo) /* * We create the owner, so we know a proper owner.id length is 4. */ -static int decode_lock_denied (struct xdr_stream *xdr, struct file_lock *fl) +static int decode_lock_denied(struct xdr_stream *xdr, struct file_lock *fl) { uint64_t offset, length, clientid; __be32 *p; diff --git a/fs/nfs/sysfs.c b/fs/nfs/sysfs.c index 7b59a40d40c06..784f7c1d003bf 100644 --- a/fs/nfs/sysfs.c +++ b/fs/nfs/sysfs.c @@ -14,6 +14,7 @@ #include #include +#include "internal.h" #include "nfs4_fs.h" #include "netns.h" #include "sysfs.h" @@ -228,6 +229,25 @@ static void shutdown_client(struct rpc_clnt *clnt) rpc_cancel_tasks(clnt, -EIO, shutdown_match_client, NULL); } +/* + * Shut down the nfs_client only once all the superblocks + * have been shut down. + */ +static void shutdown_nfs_client(struct nfs_client *clp) +{ + struct nfs_server *server; + rcu_read_lock(); + list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { + if (!(server->flags & NFS_MOUNT_SHUTDOWN)) { + rcu_read_unlock(); + return; + } + } + rcu_read_unlock(); + nfs_mark_client_ready(clp, -EIO); + shutdown_client(clp->cl_rpcclient); +} + static ssize_t shutdown_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) @@ -259,7 +279,6 @@ shutdown_store(struct kobject *kobj, struct kobj_attribute *attr, server->flags |= NFS_MOUNT_SHUTDOWN; shutdown_client(server->client); - shutdown_client(server->nfs_client->cl_rpcclient); if (!IS_ERR(server->client_acl)) shutdown_client(server->client_acl); @@ -267,6 +286,7 @@ shutdown_store(struct kobject *kobj, struct kobj_attribute *attr, if (server->nlm_host) shutdown_client(server->nlm_host->h_rpcclnt); out: + shutdown_nfs_client(server->nfs_client); return count; } diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 82ae2b85d393c..8ff8db09a1e06 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -579,8 +579,10 @@ static struct nfs_page *nfs_lock_and_join_requests(struct folio *folio) while (!nfs_lock_request(head)) { ret = nfs_wait_on_request(head); - if (ret < 0) + if (ret < 0) { + nfs_release_request(head); return ERR_PTR(ret); + } } /* Ensure that nobody removed the request before we locked it */ diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig index c0bd1509ccd48..9eb2e795c43c4 100644 --- a/fs/nfsd/Kconfig +++ b/fs/nfsd/Kconfig @@ -4,6 +4,7 @@ config NFSD depends on INET depends on FILE_LOCKING depends on FSNOTIFY + select CRC32 select LOCKD select SUNRPC select EXPORTFS diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 88c03e1825732..127626aba7a23 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -605,7 +605,7 @@ static int nfs4_xdr_dec_cb_getattr(struct rpc_rqst *rqstp, return status; status = decode_cb_op_status(xdr, OP_CB_GETATTR, &cb->cb_status); - if (status) + if (unlikely(status || cb->cb_status)) return status; if (xdr_stream_decode_uint32_array(xdr, bitmap, 3) < 0) return -NFSERR_BAD_XDR; diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c index 4a765555bf845..1c8fcb04b3cde 100644 --- a/fs/nfsd/nfs4recover.c +++ b/fs/nfsd/nfs4recover.c @@ -2052,7 +2052,6 @@ static inline int check_for_legacy_methods(int status, struct net *net) path_put(&path); if (status) return -ENOTDIR; - status = nn->client_tracking_ops->init(net); } return status; } diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 57f8818aa47c5..c50839a015e94 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -1057,6 +1057,12 @@ static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp) return openlockstateid(stid); } +/* + * As the sc_free callback of deleg, this may be called by nfs4_put_stid + * in nfsd_break_one_deleg. + * Considering nfsd_break_one_deleg is called with the flc->flc_lock held, + * this function mustn't ever sleep. + */ static void nfs4_free_deleg(struct nfs4_stid *stid) { struct nfs4_delegation *dp = delegstateid(stid); @@ -5269,6 +5275,7 @@ static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = { static void nfsd_break_one_deleg(struct nfs4_delegation *dp) { + bool queued; /* * We're assuming the state code never drops its reference * without first removing the lease. Since we're in this lease @@ -5277,7 +5284,10 @@ static void nfsd_break_one_deleg(struct nfs4_delegation *dp) * we know it's safe to take a reference. */ refcount_inc(&dp->dl_stid.sc_count); - WARN_ON_ONCE(!nfsd4_run_cb(&dp->dl_recall)); + queued = nfsd4_run_cb(&dp->dl_recall); + WARN_ON_ONCE(!queued); + if (!queued) + refcount_dec(&dp->dl_stid.sc_count); } /* Called from break_lease() with flc_lock held. */ @@ -6689,14 +6699,19 @@ deleg_reaper(struct nfsd_net *nn) spin_lock(&nn->client_lock); list_for_each_safe(pos, next, &nn->client_lru) { clp = list_entry(pos, struct nfs4_client, cl_lru); - if (clp->cl_state != NFSD4_ACTIVE || - list_empty(&clp->cl_delegations) || - atomic_read(&clp->cl_delegs_in_recall) || - test_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags) || - (ktime_get_boottime_seconds() - - clp->cl_ra_time < 5)) { + + if (clp->cl_state != NFSD4_ACTIVE) + continue; + if (list_empty(&clp->cl_delegations)) + continue; + if (atomic_read(&clp->cl_delegs_in_recall)) + continue; + if (test_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags)) + continue; + if (ktime_get_boottime_seconds() - clp->cl_ra_time < 5) + continue; + if (clp->cl_cb_state != NFSD4_CB_UP) continue; - } list_add(&clp->cl_ra_cblist, &cblist); /* release in nfsd4_cb_recall_any_release */ @@ -6880,7 +6895,7 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate, */ statusmask |= SC_STATUS_REVOKED; - statusmask |= SC_STATUS_ADMIN_REVOKED; + statusmask |= SC_STATUS_ADMIN_REVOKED | SC_STATUS_FREEABLE; if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) || CLOSE_STATEID(stateid)) @@ -7535,9 +7550,7 @@ nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) return status; - status = nfsd4_lookup_stateid(cstate, stateid, SC_TYPE_DELEG, - SC_STATUS_REVOKED | SC_STATUS_FREEABLE, - &s, nn); + status = nfsd4_lookup_stateid(cstate, stateid, SC_TYPE_DELEG, SC_STATUS_REVOKED, &s, nn); if (status) goto out; dp = delegstateid(s); diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index 3adbc05ebaac4..2e835e7c107ee 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -1959,6 +1959,7 @@ int nfsd_nl_listener_set_doit(struct sk_buff *skb, struct genl_info *info) struct svc_serv *serv; LIST_HEAD(permsocks); struct nfsd_net *nn; + bool delete = false; int err, rem; mutex_lock(&nfsd_mutex); @@ -2019,34 +2020,28 @@ int nfsd_nl_listener_set_doit(struct sk_buff *skb, struct genl_info *info) } } - /* For now, no removing old sockets while server is running */ - if (serv->sv_nrthreads && !list_empty(&permsocks)) { + /* + * If there are listener transports remaining on the permsocks list, + * it means we were asked to remove a listener. + */ + if (!list_empty(&permsocks)) { list_splice_init(&permsocks, &serv->sv_permsocks); - spin_unlock_bh(&serv->sv_lock); - err = -EBUSY; - goto out_unlock_mtx; + delete = true; } + spin_unlock_bh(&serv->sv_lock); - /* Close the remaining sockets on the permsocks list */ - while (!list_empty(&permsocks)) { - xprt = list_first_entry(&permsocks, struct svc_xprt, xpt_list); - list_move(&xprt->xpt_list, &serv->sv_permsocks); - - /* - * Newly-created sockets are born with the BUSY bit set. Clear - * it if there are no threads, since nothing can pick it up - * in that case. - */ - if (!serv->sv_nrthreads) - clear_bit(XPT_BUSY, &xprt->xpt_flags); - - set_bit(XPT_CLOSE, &xprt->xpt_flags); - spin_unlock_bh(&serv->sv_lock); - svc_xprt_close(xprt); - spin_lock_bh(&serv->sv_lock); + /* Do not remove listeners while there are active threads. */ + if (serv->sv_nrthreads) { + err = -EBUSY; + goto out_unlock_mtx; } - spin_unlock_bh(&serv->sv_lock); + /* + * Since we can't delete an arbitrary llist entry, destroy the + * remaining listeners and recreate the list. + */ + if (delete) + svc_xprt_destroy_all(serv, net); /* walk list of addrs again, open any that still don't exist */ nlmsg_for_each_attr(attr, info->nlhdr, GENL_HDRLEN, rem) { @@ -2073,6 +2068,9 @@ int nfsd_nl_listener_set_doit(struct sk_buff *skb, struct genl_info *info) xprt = svc_find_listener(serv, xcl_name, net, sa); if (xprt) { + if (delete) + WARN_ONCE(1, "Transport type=%s already exists\n", + xcl_name); svc_xprt_put(xprt); continue; } @@ -2246,8 +2244,14 @@ static __net_init int nfsd_net_init(struct net *net) NFSD_STATS_COUNTERS_NUM); if (retval) goto out_repcache_error; + memset(&nn->nfsd_svcstats, 0, sizeof(nn->nfsd_svcstats)); nn->nfsd_svcstats.program = &nfsd_programs[0]; + if (!nfsd_proc_stat_init(net)) { + retval = -ENOMEM; + goto out_proc_error; + } + for (i = 0; i < sizeof(nn->nfsd_versions); i++) nn->nfsd_versions[i] = nfsd_support_version(i); for (i = 0; i < sizeof(nn->nfsd4_minorversions); i++) @@ -2257,12 +2261,13 @@ static __net_init int nfsd_net_init(struct net *net) nfsd4_init_leases_net(nn); get_random_bytes(&nn->siphash_key, sizeof(nn->siphash_key)); seqlock_init(&nn->writeverf_lock); - nfsd_proc_stat_init(net); #if IS_ENABLED(CONFIG_NFS_LOCALIO) INIT_LIST_HEAD(&nn->local_clients); #endif return 0; +out_proc_error: + percpu_counter_destroy_many(nn->counter, NFSD_STATS_COUNTERS_NUM); out_repcache_error: nfsd_idmap_shutdown(net); out_idmap_error: diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h index 876152a91f122..5103c2f4d2253 100644 --- a/fs/nfsd/nfsfh.h +++ b/fs/nfsd/nfsfh.h @@ -267,7 +267,6 @@ static inline bool fh_fsid_match(const struct knfsd_fh *fh1, return true; } -#ifdef CONFIG_CRC32 /** * knfsd_fh_hash - calculate the crc32 hash for the filehandle * @fh - pointer to filehandle @@ -279,12 +278,6 @@ static inline u32 knfsd_fh_hash(const struct knfsd_fh *fh) { return ~crc32_le(0xFFFFFFFF, fh->fh_raw, fh->fh_size); } -#else -static inline u32 knfsd_fh_hash(const struct knfsd_fh *fh) -{ - return 0; -} -#endif /** * fh_clear_pre_post_attrs - Reset pre/post attributes diff --git a/fs/nfsd/stats.c b/fs/nfsd/stats.c index bb22893f1157e..f7eaf95e20fc8 100644 --- a/fs/nfsd/stats.c +++ b/fs/nfsd/stats.c @@ -73,11 +73,11 @@ static int nfsd_show(struct seq_file *seq, void *v) DEFINE_PROC_SHOW_ATTRIBUTE(nfsd); -void nfsd_proc_stat_init(struct net *net) +struct proc_dir_entry *nfsd_proc_stat_init(struct net *net) { struct nfsd_net *nn = net_generic(net, nfsd_net_id); - svc_proc_register(net, &nn->nfsd_svcstats, &nfsd_proc_ops); + return svc_proc_register(net, &nn->nfsd_svcstats, &nfsd_proc_ops); } void nfsd_proc_stat_shutdown(struct net *net) diff --git a/fs/nfsd/stats.h b/fs/nfsd/stats.h index 04aacb6c36e25..e4efb0e4e56d4 100644 --- a/fs/nfsd/stats.h +++ b/fs/nfsd/stats.h @@ -10,7 +10,7 @@ #include #include -void nfsd_proc_stat_init(struct net *net); +struct proc_dir_entry *nfsd_proc_stat_init(struct net *net); void nfsd_proc_stat_shutdown(struct net *net); static inline void nfsd_stats_rc_hits_inc(struct nfsd_net *nn) diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index d6d4f2a0e8982..ca29a5e1600fd 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -1935,9 +1935,17 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen, return err; } -/* - * Unlink a file or directory - * N.B. After this call fhp needs an fh_put +/** + * nfsd_unlink - remove a directory entry + * @rqstp: RPC transaction context + * @fhp: the file handle of the parent directory to be modified + * @type: enforced file type of the object to be removed + * @fname: the name of directory entry to be removed + * @flen: length of @fname in octets + * + * After this call fhp needs an fh_put. + * + * Returns a generic NFS status code in network byte-order. */ __be32 nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, @@ -2011,15 +2019,17 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, fh_drop_write(fhp); out_nfserr: if (host_err == -EBUSY) { - /* name is mounted-on. There is no perfect - * error status. + /* + * See RFC 8881 Section 18.25.4 para 4: NFSv4 REMOVE + * wants a status unique to the object type. */ - err = nfserr_file_open; - } else { - err = nfserrno(host_err); + if (type != S_IFDIR) + err = nfserr_file_open; + else + err = nfserr_acces; } out: - return err; + return err != nfs_ok ? err : nfserrno(host_err); out_unlock: inode_unlock(dirp); goto out_drop_write; diff --git a/fs/ntfs3/attrib.c b/fs/ntfs3/attrib.c index da1a9312e61a0..dd459316529e8 100644 --- a/fs/ntfs3/attrib.c +++ b/fs/ntfs3/attrib.c @@ -2663,8 +2663,9 @@ int attr_set_compress(struct ntfs_inode *ni, bool compr) attr->nres.run_off = cpu_to_le16(run_off); } - /* Update data attribute flags. */ + /* Update attribute flags. */ if (compr) { + attr->flags &= ~ATTR_FLAG_SPARSED; attr->flags |= ATTR_FLAG_COMPRESSED; attr->nres.c_unit = NTFS_LZNT_CUNIT; } else { diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c index f704ceef95394..7976ac4611c8d 100644 --- a/fs/ntfs3/file.c +++ b/fs/ntfs3/file.c @@ -101,8 +101,26 @@ int ntfs_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry, /* Allowed to change compression for empty files and for directories only. */ if (!is_dedup(ni) && !is_encrypted(ni) && (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) { - /* Change compress state. */ - int err = ni_set_compress(inode, flags & FS_COMPR_FL); + int err = 0; + struct address_space *mapping = inode->i_mapping; + + /* write out all data and wait. */ + filemap_invalidate_lock(mapping); + err = filemap_write_and_wait(mapping); + + if (err >= 0) { + /* Change compress state. */ + bool compr = flags & FS_COMPR_FL; + err = ni_set_compress(inode, compr); + + /* For files change a_ops too. */ + if (!err) + mapping->a_ops = compr ? &ntfs_aops_cmpr : + &ntfs_aops; + } + + filemap_invalidate_unlock(mapping); + if (err) return err; } diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c index 175662acd5eaf..608634361a302 100644 --- a/fs/ntfs3/frecord.c +++ b/fs/ntfs3/frecord.c @@ -3431,10 +3431,12 @@ int ni_set_compress(struct inode *inode, bool compr) } ni->std_fa = std->fa; - if (compr) + if (compr) { + std->fa &= ~FILE_ATTRIBUTE_SPARSE_FILE; std->fa |= FILE_ATTRIBUTE_COMPRESSED; - else + } else { std->fa &= ~FILE_ATTRIBUTE_COMPRESSED; + } if (ni->std_fa != std->fa) { ni->std_fa = std->fa; diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c index 7eb9fae22f8da..78d20e4baa2c9 100644 --- a/fs/ntfs3/index.c +++ b/fs/ntfs3/index.c @@ -618,7 +618,7 @@ static bool index_hdr_check(const struct INDEX_HDR *hdr, u32 bytes) u32 off = le32_to_cpu(hdr->de_off); if (!IS_ALIGNED(off, 8) || tot > bytes || end > tot || - off + sizeof(struct NTFS_DE) > end) { + size_add(off, sizeof(struct NTFS_DE)) > end) { /* incorrect index buffer. */ return false; } @@ -736,7 +736,7 @@ static struct NTFS_DE *hdr_find_e(const struct ntfs_index *indx, if (end > total) return NULL; - if (off + sizeof(struct NTFS_DE) > end) + if (size_add(off, sizeof(struct NTFS_DE)) > end) return NULL; e = Add2Ptr(hdr, off); diff --git a/fs/ntfs3/ntfs.h b/fs/ntfs3/ntfs.h index 241f2ffdd9201..1ff13b6f96132 100644 --- a/fs/ntfs3/ntfs.h +++ b/fs/ntfs3/ntfs.h @@ -717,7 +717,7 @@ static inline struct NTFS_DE *hdr_first_de(const struct INDEX_HDR *hdr) struct NTFS_DE *e; u16 esize; - if (de_off >= used || de_off + sizeof(struct NTFS_DE) > used ) + if (de_off >= used || size_add(de_off, sizeof(struct NTFS_DE)) > used) return NULL; e = Add2Ptr(hdr, de_off); diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index ea9127ba32084..5d9388b44e5be 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -1803,6 +1803,14 @@ static int __ocfs2_find_path(struct ocfs2_caching_info *ci, el = root_el; while (el->l_tree_depth) { + if (unlikely(le16_to_cpu(el->l_tree_depth) >= OCFS2_MAX_PATH_DEPTH)) { + ocfs2_error(ocfs2_metadata_cache_get_super(ci), + "Owner %llu has invalid tree depth %u in extent list\n", + (unsigned long long)ocfs2_metadata_cache_owner(ci), + le16_to_cpu(el->l_tree_depth)); + ret = -EROFS; + goto out; + } if (le16_to_cpu(el->l_next_free_rec) == 0) { ocfs2_error(ocfs2_metadata_cache_get_super(ci), "Owner %llu has empty extent list at depth %u\n", diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c index b2c78621da44a..4388004a319d0 100644 --- a/fs/overlayfs/copy_up.c +++ b/fs/overlayfs/copy_up.c @@ -619,7 +619,6 @@ static int ovl_link_up(struct ovl_copy_up_ctx *c) err = PTR_ERR(upper); if (!IS_ERR(upper)) { err = ovl_do_link(ofs, ovl_dentry_upper(c->dentry), udir, upper); - dput(upper); if (!err) { /* Restore timestamps on parent (best effort) */ @@ -627,6 +626,7 @@ static int ovl_link_up(struct ovl_copy_up_ctx *c) ovl_dentry_set_upper_alias(c->dentry); ovl_dentry_update_reval(c->dentry, upper); } + dput(upper); } inode_unlock(udir); if (err) diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h index 844874b4a91a9..500a9634ad533 100644 --- a/fs/overlayfs/overlayfs.h +++ b/fs/overlayfs/overlayfs.h @@ -547,8 +547,6 @@ int ovl_set_metacopy_xattr(struct ovl_fs *ofs, struct dentry *d, bool ovl_is_metacopy_dentry(struct dentry *dentry); char *ovl_get_redirect_xattr(struct ovl_fs *ofs, const struct path *path, int padding); int ovl_ensure_verity_loaded(struct path *path); -int ovl_get_verity_xattr(struct ovl_fs *ofs, const struct path *path, - u8 *digest_buf, int *buf_length); int ovl_validate_verity(struct ovl_fs *ofs, struct path *metapath, struct path *datapath); diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index fe511192f83cb..87a36c6eea5f3 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -1119,6 +1119,11 @@ static struct ovl_entry *ovl_get_lowerstack(struct super_block *sb, return ERR_PTR(-EINVAL); } + if (ctx->nr == ctx->nr_data) { + pr_err("at least one non-data lowerdir is required\n"); + return ERR_PTR(-EINVAL); + } + err = -EINVAL; for (i = 0; i < ctx->nr; i++) { l = &ctx->lower[i]; diff --git a/fs/proc/base.c b/fs/proc/base.c index b31283d81c52e..a2541f5204af0 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -417,7 +417,7 @@ static const struct file_operations proc_pid_cmdline_ops = { #ifdef CONFIG_KALLSYMS /* * Provides a wchan file via kallsyms in a proper one-value-per-file format. - * Returns the resolved symbol. If that fails, simply return the address. + * Returns the resolved symbol to user space. */ static int proc_pid_wchan(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) diff --git a/fs/proc/generic.c b/fs/proc/generic.c index dbe82cf23ee49..3431b083f7d05 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c @@ -557,10 +557,16 @@ struct proc_dir_entry *proc_create_reg(const char *name, umode_t mode, return p; } -static inline void pde_set_flags(struct proc_dir_entry *pde) +static void pde_set_flags(struct proc_dir_entry *pde) { if (pde->proc_ops->proc_flags & PROC_ENTRY_PERMANENT) pde->flags |= PROC_ENTRY_PERMANENT; + if (pde->proc_ops->proc_read_iter) + pde->flags |= PROC_ENTRY_proc_read_iter; +#ifdef CONFIG_COMPAT + if (pde->proc_ops->proc_compat_ioctl) + pde->flags |= PROC_ENTRY_proc_compat_ioctl; +#endif } struct proc_dir_entry *proc_create_data(const char *name, umode_t mode, @@ -624,6 +630,7 @@ struct proc_dir_entry *proc_create_seq_private(const char *name, umode_t mode, p->proc_ops = &proc_seq_ops; p->seq_ops = ops; p->state_size = state_size; + pde_set_flags(p); return proc_register(parent, p); } EXPORT_SYMBOL(proc_create_seq_private); @@ -654,6 +661,7 @@ struct proc_dir_entry *proc_create_single_data(const char *name, umode_t mode, return NULL; p->proc_ops = &proc_single_ops; p->single_show = show; + pde_set_flags(p); return proc_register(parent, p); } EXPORT_SYMBOL(proc_create_single_data); diff --git a/fs/proc/inode.c b/fs/proc/inode.c index 626ad7bd94f24..a3eb3b740f766 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c @@ -656,13 +656,13 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de) if (S_ISREG(inode->i_mode)) { inode->i_op = de->proc_iops; - if (de->proc_ops->proc_read_iter) + if (pde_has_proc_read_iter(de)) inode->i_fop = &proc_iter_file_ops; else inode->i_fop = &proc_reg_file_ops; #ifdef CONFIG_COMPAT - if (de->proc_ops->proc_compat_ioctl) { - if (de->proc_ops->proc_read_iter) + if (pde_has_proc_compat_ioctl(de)) { + if (pde_has_proc_read_iter(de)) inode->i_fop = &proc_iter_file_ops_compat; else inode->i_fop = &proc_reg_file_ops_compat; diff --git a/fs/proc/internal.h b/fs/proc/internal.h index 87e4d62820252..4e0c5b57ffdbb 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h @@ -85,6 +85,20 @@ static inline void pde_make_permanent(struct proc_dir_entry *pde) pde->flags |= PROC_ENTRY_PERMANENT; } +static inline bool pde_has_proc_read_iter(const struct proc_dir_entry *pde) +{ + return pde->flags & PROC_ENTRY_proc_read_iter; +} + +static inline bool pde_has_proc_compat_ioctl(const struct proc_dir_entry *pde) +{ +#ifdef CONFIG_COMPAT + return pde->flags & PROC_ENTRY_proc_compat_ioctl; +#else + return false; +#endif +} + extern struct kmem_cache *proc_dir_entry_cache; void pde_free(struct proc_dir_entry *pde); diff --git a/fs/smb/client/asn1.c b/fs/smb/client/asn1.c index b5724ef9f182f..214a44509e7b9 100644 --- a/fs/smb/client/asn1.c +++ b/fs/smb/client/asn1.c @@ -52,6 +52,8 @@ int cifs_neg_token_init_mech_type(void *context, size_t hdrlen, server->sec_kerberos = true; else if (oid == OID_ntlmssp) server->sec_ntlmssp = true; + else if (oid == OID_IAKerb) + server->sec_iakerb = true; else { char buf[50]; diff --git a/fs/smb/client/cifs_spnego.c b/fs/smb/client/cifs_spnego.c index af7849e5974ff..2ad067886ec3f 100644 --- a/fs/smb/client/cifs_spnego.c +++ b/fs/smb/client/cifs_spnego.c @@ -130,11 +130,13 @@ cifs_get_spnego_key(struct cifs_ses *sesInfo, dp = description + strlen(description); - /* for now, only sec=krb5 and sec=mskrb5 are valid */ + /* for now, only sec=krb5 and sec=mskrb5 and iakerb are valid */ if (server->sec_kerberos) sprintf(dp, ";sec=krb5"); else if (server->sec_mskerberos) sprintf(dp, ";sec=mskrb5"); + else if (server->sec_iakerb) + sprintf(dp, ";sec=iakerb"); else { cifs_dbg(VFS, "unknown or missing server auth type, use krb5\n"); sprintf(dp, ";sec=krb5"); diff --git a/fs/smb/client/cifsacl.c b/fs/smb/client/cifsacl.c index ebe9a7d7c70e8..e36f0e2d7d21e 100644 --- a/fs/smb/client/cifsacl.c +++ b/fs/smb/client/cifsacl.c @@ -763,7 +763,7 @@ static void parse_dacl(struct smb_acl *pdacl, char *end_of_acl, struct cifs_fattr *fattr, bool mode_from_special_sid) { int i; - int num_aces = 0; + u16 num_aces = 0; int acl_size; char *acl_base; struct smb_ace **ppace; @@ -778,14 +778,15 @@ static void parse_dacl(struct smb_acl *pdacl, char *end_of_acl, } /* validate that we do not go past end of acl */ - if (end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) { + if (end_of_acl < (char *)pdacl + sizeof(struct smb_acl) || + end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) { cifs_dbg(VFS, "ACL too small to parse DACL\n"); return; } cifs_dbg(NOISY, "DACL revision %d size %d num aces %d\n", le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size), - le32_to_cpu(pdacl->num_aces)); + le16_to_cpu(pdacl->num_aces)); /* reset rwx permissions for user/group/other. Also, if num_aces is 0 i.e. DACL has no ACEs, @@ -795,12 +796,15 @@ static void parse_dacl(struct smb_acl *pdacl, char *end_of_acl, acl_base = (char *)pdacl; acl_size = sizeof(struct smb_acl); - num_aces = le32_to_cpu(pdacl->num_aces); + num_aces = le16_to_cpu(pdacl->num_aces); if (num_aces > 0) { umode_t denied_mode = 0; - if (num_aces > ULONG_MAX / sizeof(struct smb_ace *)) + if (num_aces > (le16_to_cpu(pdacl->size) - sizeof(struct smb_acl)) / + (offsetof(struct smb_ace, sid) + + offsetof(struct smb_sid, sub_auth) + sizeof(__le16))) return; + ppace = kmalloc_array(num_aces, sizeof(struct smb_ace *), GFP_KERNEL); if (!ppace) @@ -937,12 +941,12 @@ unsigned int setup_special_user_owner_ACE(struct smb_ace *pntace) static void populate_new_aces(char *nacl_base, struct smb_sid *pownersid, struct smb_sid *pgrpsid, - __u64 *pnmode, u32 *pnum_aces, u16 *pnsize, + __u64 *pnmode, u16 *pnum_aces, u16 *pnsize, bool modefromsid, bool posix) { __u64 nmode; - u32 num_aces = 0; + u16 num_aces = 0; u16 nsize = 0; __u64 user_mode; __u64 group_mode; @@ -1050,7 +1054,7 @@ static __u16 replace_sids_and_copy_aces(struct smb_acl *pdacl, struct smb_acl *p u16 size = 0; struct smb_ace *pntace = NULL; char *acl_base = NULL; - u32 src_num_aces = 0; + u16 src_num_aces = 0; u16 nsize = 0; struct smb_ace *pnntace = NULL; char *nacl_base = NULL; @@ -1058,7 +1062,7 @@ static __u16 replace_sids_and_copy_aces(struct smb_acl *pdacl, struct smb_acl *p acl_base = (char *)pdacl; size = sizeof(struct smb_acl); - src_num_aces = le32_to_cpu(pdacl->num_aces); + src_num_aces = le16_to_cpu(pdacl->num_aces); nacl_base = (char *)pndacl; nsize = sizeof(struct smb_acl); @@ -1090,11 +1094,11 @@ static int set_chmod_dacl(struct smb_acl *pdacl, struct smb_acl *pndacl, u16 size = 0; struct smb_ace *pntace = NULL; char *acl_base = NULL; - u32 src_num_aces = 0; + u16 src_num_aces = 0; u16 nsize = 0; struct smb_ace *pnntace = NULL; char *nacl_base = NULL; - u32 num_aces = 0; + u16 num_aces = 0; bool new_aces_set = false; /* Assuming that pndacl and pnmode are never NULL */ @@ -1112,7 +1116,7 @@ static int set_chmod_dacl(struct smb_acl *pdacl, struct smb_acl *pndacl, acl_base = (char *)pdacl; size = sizeof(struct smb_acl); - src_num_aces = le32_to_cpu(pdacl->num_aces); + src_num_aces = le16_to_cpu(pdacl->num_aces); /* Retain old ACEs which we can retain */ for (i = 0; i < src_num_aces; ++i) { @@ -1158,7 +1162,7 @@ static int set_chmod_dacl(struct smb_acl *pdacl, struct smb_acl *pndacl, } finalize_dacl: - pndacl->num_aces = cpu_to_le32(num_aces); + pndacl->num_aces = cpu_to_le16(num_aces); pndacl->size = cpu_to_le16(nsize); return 0; @@ -1293,7 +1297,7 @@ static int build_sec_desc(struct smb_ntsd *pntsd, struct smb_ntsd *pnntsd, dacloffset ? dacl_ptr->revision : cpu_to_le16(ACL_REVISION); ndacl_ptr->size = cpu_to_le16(0); - ndacl_ptr->num_aces = cpu_to_le32(0); + ndacl_ptr->num_aces = cpu_to_le16(0); rc = set_chmod_dacl(dacl_ptr, ndacl_ptr, owner_sid_ptr, group_sid_ptr, pnmode, mode_from_sid, posix); @@ -1651,7 +1655,7 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 *pnmode, dacl_ptr = (struct smb_acl *)((char *)pntsd + dacloffset); if (mode_from_sid) nsecdesclen += - le32_to_cpu(dacl_ptr->num_aces) * sizeof(struct smb_ace); + le16_to_cpu(dacl_ptr->num_aces) * sizeof(struct smb_ace); else /* cifsacl */ nsecdesclen += le16_to_cpu(dacl_ptr->size); } diff --git a/fs/smb/client/cifsencrypt.c b/fs/smb/client/cifsencrypt.c index 7a43daacc8159..7c61c1e944c7a 100644 --- a/fs/smb/client/cifsencrypt.c +++ b/fs/smb/client/cifsencrypt.c @@ -702,18 +702,12 @@ cifs_crypto_secmech_release(struct TCP_Server_Info *server) cifs_free_hash(&server->secmech.md5); cifs_free_hash(&server->secmech.sha512); - if (!SERVER_IS_CHAN(server)) { - if (server->secmech.enc) { - crypto_free_aead(server->secmech.enc); - server->secmech.enc = NULL; - } - - if (server->secmech.dec) { - crypto_free_aead(server->secmech.dec); - server->secmech.dec = NULL; - } - } else { + if (server->secmech.enc) { + crypto_free_aead(server->secmech.enc); server->secmech.enc = NULL; + } + if (server->secmech.dec) { + crypto_free_aead(server->secmech.dec); server->secmech.dec = NULL; } } diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h index 05274121e46f0..a8484af7a2fbc 100644 --- a/fs/smb/client/cifsglob.h +++ b/fs/smb/client/cifsglob.h @@ -151,6 +151,7 @@ enum securityEnum { NTLMv2, /* Legacy NTLM auth with NTLMv2 hash */ RawNTLMSSP, /* NTLMSSP without SPNEGO, NTLMv2 hash */ Kerberos, /* Kerberos via SPNEGO */ + IAKerb, /* Kerberos proxy */ }; enum cifs_reparse_type { @@ -209,10 +210,8 @@ struct cifs_cred { struct cifs_open_info_data { bool adjust_tz; - union { - bool reparse_point; - bool symlink; - }; + bool reparse_point; + bool contains_posix_file_info; struct { /* ioctl response buffer */ struct { @@ -745,6 +744,7 @@ struct TCP_Server_Info { bool sec_kerberosu2u; /* supports U2U Kerberos */ bool sec_kerberos; /* supports plain Kerberos */ bool sec_mskerberos; /* supports legacy MS Kerberos */ + bool sec_iakerb; /* supports pass-through auth for Kerberos (krb5 proxy) */ bool large_buf; /* is current buffer large? */ /* use SMBD connection instead of socket */ bool rdma; @@ -2117,6 +2117,8 @@ static inline char *get_security_type_str(enum securityEnum sectype) return "Kerberos"; case NTLMv2: return "NTLMv2"; + case IAKerb: + return "IAKerb"; default: return "Unknown"; } diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h index 907af3cbffd1b..90b7b30abfbd8 100644 --- a/fs/smb/client/cifsproto.h +++ b/fs/smb/client/cifsproto.h @@ -160,6 +160,8 @@ extern int cifs_get_writable_path(struct cifs_tcon *tcon, const char *name, extern struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *, bool); extern int cifs_get_readable_path(struct cifs_tcon *tcon, const char *name, struct cifsFileInfo **ret_file); +extern int cifs_get_hardlink_path(struct cifs_tcon *tcon, struct inode *inode, + struct file *file); extern unsigned int smbCalcSize(void *buf); extern int decode_negTokenInit(unsigned char *security_blob, int length, struct TCP_Server_Info *server); diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c index fb51cdf552061..d5549e06a533d 100644 --- a/fs/smb/client/connect.c +++ b/fs/smb/client/connect.c @@ -987,13 +987,9 @@ clean_demultiplex_info(struct TCP_Server_Info *server) msleep(125); if (cifs_rdma_enabled(server)) smbd_destroy(server); - if (server->ssocket) { sock_release(server->ssocket); server->ssocket = NULL; - - /* Release netns reference for the socket. */ - put_net(cifs_net_ns(server)); } if (!list_empty(&server->pending_mid_q)) { @@ -1041,7 +1037,6 @@ clean_demultiplex_info(struct TCP_Server_Info *server) */ } - /* Release netns reference for this server. */ put_net(cifs_net_ns(server)); kfree(server->leaf_fullpath); kfree(server->hostname); @@ -1717,10 +1712,9 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx, tcp_ses->ops = ctx->ops; tcp_ses->vals = ctx->vals; - - /* Grab netns reference for this server. */ cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns)); + tcp_ses->sign = ctx->sign; tcp_ses->conn_id = atomic_inc_return(&tcpSesNextId); tcp_ses->noblockcnt = ctx->rootfs; tcp_ses->noblocksnd = ctx->noblocksnd || ctx->rootfs; @@ -1850,7 +1844,6 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx, out_err_crypto_release: cifs_crypto_secmech_release(tcp_ses); - /* Release netns reference for this server. */ put_net(cifs_net_ns(tcp_ses)); out_err: @@ -1859,10 +1852,8 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx, cifs_put_tcp_session(tcp_ses->primary_server, false); kfree(tcp_ses->hostname); kfree(tcp_ses->leaf_fullpath); - if (tcp_ses->ssocket) { + if (tcp_ses->ssocket) sock_release(tcp_ses->ssocket); - put_net(cifs_net_ns(tcp_ses)); - } kfree(tcp_ses); } return ERR_PTR(rc); @@ -1873,9 +1864,8 @@ static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx, bool match_super) { - if (ctx->sectype != Unspecified && - ctx->sectype != ses->sectype) - return 0; + struct TCP_Server_Info *server = ses->server; + enum securityEnum ctx_sec, ses_sec; if (!match_super && ctx->dfs_root_ses != ses->dfs_root_ses) return 0; @@ -1887,11 +1877,20 @@ static int match_session(struct cifs_ses *ses, if (ses->chan_max < ctx->max_channels) return 0; - switch (ses->sectype) { + ctx_sec = server->ops->select_sectype(server, ctx->sectype); + ses_sec = server->ops->select_sectype(server, ses->sectype); + + if (ctx_sec != ses_sec) + return 0; + + switch (ctx_sec) { + case IAKerb: case Kerberos: if (!uid_eq(ctx->cred_uid, ses->cred_uid)) return 0; break; + case NTLMv2: + case RawNTLMSSP: default: /* NULL username means anonymous session */ if (ses->user_name == NULL) { @@ -2465,6 +2464,8 @@ static int match_tcon(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) return 0; if (tcon->nodelete != ctx->nodelete) return 0; + if (tcon->posix_extensions != ctx->linux_ext) + return 0; return 1; } @@ -3120,20 +3121,20 @@ generic_ip_connect(struct TCP_Server_Info *server) socket = server->ssocket; } else { struct net *net = cifs_net_ns(server); + struct sock *sk; - rc = sock_create_kern(net, sfamily, SOCK_STREAM, IPPROTO_TCP, &server->ssocket); + rc = __sock_create(net, sfamily, SOCK_STREAM, + IPPROTO_TCP, &server->ssocket, 1); if (rc < 0) { cifs_server_dbg(VFS, "Error %d creating socket\n", rc); return rc; } - /* - * Grab netns reference for the socket. - * - * It'll be released here, on error, or in clean_demultiplex_info() upon server - * teardown. - */ - get_net(net); + sk = server->ssocket->sk; + __netns_tracker_free(net, &sk->ns_tracker, false); + sk->sk_net_refcnt = 1; + get_net_track(net, &sk->ns_tracker, GFP_KERNEL); + sock_inuse_add(net, 1); /* BB other socket options to set KEEPALIVE, NODELAY? */ cifs_dbg(FYI, "Socket created\n"); @@ -3147,10 +3148,8 @@ generic_ip_connect(struct TCP_Server_Info *server) } rc = bind_socket(server); - if (rc < 0) { - put_net(cifs_net_ns(server)); + if (rc < 0) return rc; - } /* * Eventually check for other socket options to change from @@ -3187,7 +3186,6 @@ generic_ip_connect(struct TCP_Server_Info *server) if (rc < 0) { cifs_dbg(FYI, "Error %d connecting to server\n", rc); trace_smb3_connect_err(server->hostname, server->conn_id, &server->dstaddr, rc); - put_net(cifs_net_ns(server)); sock_release(socket); server->ssocket = NULL; return rc; @@ -3196,9 +3194,6 @@ generic_ip_connect(struct TCP_Server_Info *server) if (sport == htons(RFC1001_PORT)) rc = ip_rfc1001_connect(server); - if (rc < 0) - put_net(cifs_net_ns(server)); - return rc; } diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c index 313c851fc1c12..0f6fec042f6a0 100644 --- a/fs/smb/client/file.c +++ b/fs/smb/client/file.c @@ -1002,6 +1002,11 @@ int cifs_open(struct inode *inode, struct file *file) } else { _cifsFileInfo_put(cfile, true, false); } + } else { + /* hard link on the defeered close file */ + rc = cifs_get_hardlink_path(tcon, inode, file); + if (rc) + cifs_close_deferred_file(CIFS_I(inode)); } if (server->oplocks) @@ -2066,6 +2071,29 @@ cifs_move_llist(struct list_head *source, struct list_head *dest) list_move(li, dest); } +int +cifs_get_hardlink_path(struct cifs_tcon *tcon, struct inode *inode, + struct file *file) +{ + struct cifsFileInfo *open_file = NULL; + struct cifsInodeInfo *cinode = CIFS_I(inode); + int rc = 0; + + spin_lock(&tcon->open_file_lock); + spin_lock(&cinode->open_file_lock); + + list_for_each_entry(open_file, &cinode->openFileList, flist) { + if (file->f_flags == open_file->f_flags) { + rc = -EINVAL; + break; + } + } + + spin_unlock(&cinode->open_file_lock); + spin_unlock(&tcon->open_file_lock); + return rc; +} + void cifs_free_llist(struct list_head *llist) { diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c index 48606e2ddffdc..1f1f4586673a7 100644 --- a/fs/smb/client/fs_context.c +++ b/fs/smb/client/fs_context.c @@ -164,6 +164,7 @@ const struct fs_parameter_spec smb3_fs_parameters[] = { fsparam_string("username", Opt_user), fsparam_string("pass", Opt_pass), fsparam_string("password", Opt_pass), + fsparam_string("pass2", Opt_pass2), fsparam_string("password2", Opt_pass2), fsparam_string("ip", Opt_ip), fsparam_string("addr", Opt_ip), @@ -1041,6 +1042,9 @@ static int smb3_fs_context_parse_param(struct fs_context *fc, } else if (!strcmp("user", param->key) || !strcmp("username", param->key)) { skip_parsing = true; opt = Opt_user; + } else if (!strcmp("pass2", param->key) || !strcmp("password2", param->key)) { + skip_parsing = true; + opt = Opt_pass2; } } @@ -1250,21 +1254,21 @@ static int smb3_fs_context_parse_param(struct fs_context *fc, } break; case Opt_acregmax: - ctx->acregmax = HZ * result.uint_32; - if (ctx->acregmax > CIFS_MAX_ACTIMEO) { + if (result.uint_32 > CIFS_MAX_ACTIMEO / HZ) { cifs_errorf(fc, "acregmax too large\n"); goto cifs_parse_mount_err; } + ctx->acregmax = HZ * result.uint_32; break; case Opt_acdirmax: - ctx->acdirmax = HZ * result.uint_32; - if (ctx->acdirmax > CIFS_MAX_ACTIMEO) { + if (result.uint_32 > CIFS_MAX_ACTIMEO / HZ) { cifs_errorf(fc, "acdirmax too large\n"); goto cifs_parse_mount_err; } + ctx->acdirmax = HZ * result.uint_32; break; case Opt_actimeo: - if (HZ * result.uint_32 > CIFS_MAX_ACTIMEO) { + if (result.uint_32 > CIFS_MAX_ACTIMEO / HZ) { cifs_errorf(fc, "timeout too large\n"); goto cifs_parse_mount_err; } @@ -1276,13 +1280,18 @@ static int smb3_fs_context_parse_param(struct fs_context *fc, ctx->acdirmax = ctx->acregmax = HZ * result.uint_32; break; case Opt_closetimeo: - ctx->closetimeo = HZ * result.uint_32; - if (ctx->closetimeo > SMB3_MAX_DCLOSETIMEO) { + if (result.uint_32 > SMB3_MAX_DCLOSETIMEO / HZ) { cifs_errorf(fc, "closetimeo too large\n"); goto cifs_parse_mount_err; } + ctx->closetimeo = HZ * result.uint_32; break; case Opt_echo_interval: + if (result.uint_32 < SMB_ECHO_INTERVAL_MIN || + result.uint_32 > SMB_ECHO_INTERVAL_MAX) { + cifs_errorf(fc, "echo interval is out of bounds\n"); + goto cifs_parse_mount_err; + } ctx->echo_interval = result.uint_32; break; case Opt_snapshot: diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c index fafc07e38663c..31fce0a1b5719 100644 --- a/fs/smb/client/inode.c +++ b/fs/smb/client/inode.c @@ -968,7 +968,7 @@ cifs_get_file_info(struct file *filp) /* TODO: add support to query reparse tag */ data.adjust_tz = false; if (data.symlink_target) { - data.symlink = true; + data.reparse_point = true; data.reparse.tag = IO_REPARSE_TAG_SYMLINK; } path = build_path_from_dentry(dentry, page); @@ -1193,6 +1193,29 @@ static int reparse_info_to_fattr(struct cifs_open_info_data *data, rc = server->ops->parse_reparse_point(cifs_sb, full_path, iov, data); + /* + * If the reparse point was not handled but it is the + * name surrogate which points to directory, then treat + * is as a new mount point. Name surrogate reparse point + * represents another named entity in the system. + */ + if (rc == -EOPNOTSUPP && + IS_REPARSE_TAG_NAME_SURROGATE(data->reparse.tag) && + (le32_to_cpu(data->fi.Attributes) & ATTR_DIRECTORY)) { + rc = 0; + cifs_create_junction_fattr(fattr, sb); + goto out; + } + /* + * If the reparse point is unsupported by the Linux SMB + * client then let it process by the SMB server. So mask + * the -EOPNOTSUPP error code. This will allow Linux SMB + * client to send SMB OPEN request to server. If server + * does not support this reparse point too then server + * will return error during open the path. + */ + if (rc == -EOPNOTSUPP) + rc = 0; } break; } @@ -1381,7 +1404,7 @@ int cifs_get_inode_info(struct inode **inode, struct cifs_fattr fattr = {}; int rc; - if (is_inode_cache_good(*inode)) { + if (!data && is_inode_cache_good(*inode)) { cifs_dbg(FYI, "No need to revalidate cached inode sizes\n"); return 0; } @@ -1480,7 +1503,7 @@ int smb311_posix_get_inode_info(struct inode **inode, struct cifs_fattr fattr = {}; int rc; - if (is_inode_cache_good(*inode)) { + if (!data && is_inode_cache_good(*inode)) { cifs_dbg(FYI, "No need to revalidate cached inode sizes\n"); return 0; } diff --git a/fs/smb/client/reparse.c b/fs/smb/client/reparse.c index e56a8df23fec9..b6556fe3dfa11 100644 --- a/fs/smb/client/reparse.c +++ b/fs/smb/client/reparse.c @@ -633,8 +633,6 @@ int parse_reparse_point(struct reparse_data_buffer *buf, const char *full_path, bool unicode, struct cifs_open_info_data *data) { - struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); - data->reparse.buf = buf; /* See MS-FSCC 2.1.2 */ @@ -651,13 +649,15 @@ int parse_reparse_point(struct reparse_data_buffer *buf, case IO_REPARSE_TAG_LX_FIFO: case IO_REPARSE_TAG_LX_CHR: case IO_REPARSE_TAG_LX_BLK: - break; + if (le16_to_cpu(buf->ReparseDataLength) != 0) { + cifs_dbg(VFS, "srv returned malformed buffer for reparse point: 0x%08x\n", + le32_to_cpu(buf->ReparseTag)); + return -EIO; + } + return 0; default: - cifs_tcon_dbg(VFS | ONCE, "unhandled reparse tag: 0x%08x\n", - le32_to_cpu(buf->ReparseTag)); - break; + return -EOPNOTSUPP; } - return 0; } int smb2_parse_reparse_point(struct cifs_sb_info *cifs_sb, diff --git a/fs/smb/client/reparse.h b/fs/smb/client/reparse.h index ff05b0e75c928..f080f92cb1e74 100644 --- a/fs/smb/client/reparse.h +++ b/fs/smb/client/reparse.h @@ -97,14 +97,30 @@ static inline bool reparse_inode_match(struct inode *inode, static inline bool cifs_open_data_reparse(struct cifs_open_info_data *data) { - struct smb2_file_all_info *fi = &data->fi; - u32 attrs = le32_to_cpu(fi->Attributes); + u32 attrs; bool ret; - ret = data->reparse_point || (attrs & ATTR_REPARSE); - if (ret) - attrs |= ATTR_REPARSE; - fi->Attributes = cpu_to_le32(attrs); + if (data->contains_posix_file_info) { + struct smb311_posix_qinfo *fi = &data->posix_fi; + + attrs = le32_to_cpu(fi->DosAttributes); + if (data->reparse_point) { + attrs |= ATTR_REPARSE; + fi->DosAttributes = cpu_to_le32(attrs); + } + + } else { + struct smb2_file_all_info *fi = &data->fi; + + attrs = le32_to_cpu(fi->Attributes); + if (data->reparse_point) { + attrs |= ATTR_REPARSE; + fi->Attributes = cpu_to_le32(attrs); + } + } + + ret = attrs & ATTR_REPARSE; + return ret; } diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c index c88e9657f47a8..2426fa7405173 100644 --- a/fs/smb/client/sess.c +++ b/fs/smb/client/sess.c @@ -550,6 +550,13 @@ cifs_ses_add_channel(struct cifs_ses *ses, ctx->sockopt_tcp_nodelay = ses->server->tcp_nodelay; ctx->echo_interval = ses->server->echo_interval / HZ; ctx->max_credits = ses->server->max_credits; + ctx->min_offload = ses->server->min_offload; + ctx->compress = ses->server->compression.requested; + ctx->dfs_conn = ses->server->dfs_conn; + ctx->ignore_signature = ses->server->ignore_signature; + ctx->leaf_fullpath = ses->server->leaf_fullpath; + ctx->rootfs = ses->server->noblockcnt; + ctx->retrans = ses->server->retrans; /* * This will be used for encoding/decoding user/domain/pw @@ -1263,12 +1270,13 @@ cifs_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested) switch (requested) { case Kerberos: case RawNTLMSSP: + case IAKerb: return requested; case Unspecified: if (server->sec_ntlmssp && (global_secflags & CIFSSEC_MAY_NTLMSSP)) return RawNTLMSSP; - if ((server->sec_kerberos || server->sec_mskerberos) && + if ((server->sec_kerberos || server->sec_mskerberos || server->sec_iakerb) && (global_secflags & CIFSSEC_MAY_KRB5)) return Kerberos; fallthrough; diff --git a/fs/smb/client/smb1ops.c b/fs/smb/client/smb1ops.c index c70f4961c4eb7..bd791aa54681f 100644 --- a/fs/smb/client/smb1ops.c +++ b/fs/smb/client/smb1ops.c @@ -551,7 +551,7 @@ static int cifs_query_path_info(const unsigned int xid, int rc; FILE_ALL_INFO fi = {}; - data->symlink = false; + data->reparse_point = false; data->adjust_tz = false; /* could do find first instead but this returns more info */ @@ -592,7 +592,7 @@ static int cifs_query_path_info(const unsigned int xid, /* Need to check if this is a symbolic link or not */ tmprc = CIFS_open(xid, &oparms, &oplock, NULL); if (tmprc == -EOPNOTSUPP) - data->symlink = true; + data->reparse_point = true; else if (tmprc == 0) CIFSSMBClose(xid, tcon, fid.netfid); } diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c index 7dfd3eb3847b3..6048b3fed3e78 100644 --- a/fs/smb/client/smb2inode.c +++ b/fs/smb/client/smb2inode.c @@ -648,6 +648,7 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon, switch (cmds[i]) { case SMB2_OP_QUERY_INFO: idata = in_iov[i].iov_base; + idata->contains_posix_file_info = false; if (rc == 0 && cfile && cfile->symlink_target) { idata->symlink_target = kstrdup(cfile->symlink_target, GFP_KERNEL); if (!idata->symlink_target) @@ -671,6 +672,7 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon, break; case SMB2_OP_POSIX_QUERY_INFO: idata = in_iov[i].iov_base; + idata->contains_posix_file_info = true; if (rc == 0 && cfile && cfile->symlink_target) { idata->symlink_target = kstrdup(cfile->symlink_target, GFP_KERNEL); if (!idata->symlink_target) @@ -768,6 +770,7 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon, idata = in_iov[i].iov_base; idata->reparse.io.iov = *iov; idata->reparse.io.buftype = resp_buftype[i + 1]; + idata->contains_posix_file_info = false; /* BB VERIFY */ rbuf = reparse_buf_ptr(iov); if (IS_ERR(rbuf)) { rc = PTR_ERR(rbuf); @@ -789,6 +792,7 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon, case SMB2_OP_QUERY_WSL_EA: if (!rc) { idata = in_iov[i].iov_base; + idata->contains_posix_file_info = false; qi_rsp = rsp_iov[i + 1].iov_base; data[0] = (u8 *)qi_rsp + le16_to_cpu(qi_rsp->OutputBufferOffset); size[0] = le32_to_cpu(qi_rsp->OutputBufferLength); diff --git a/fs/smb/client/smb2misc.c b/fs/smb/client/smb2misc.c index f3c4b70b77b94..cddf273c14aed 100644 --- a/fs/smb/client/smb2misc.c +++ b/fs/smb/client/smb2misc.c @@ -816,11 +816,12 @@ smb2_handle_cancelled_close(struct cifs_tcon *tcon, __u64 persistent_fid, WARN_ONCE(tcon->tc_count < 0, "tcon refcount is negative"); spin_unlock(&cifs_tcp_ses_lock); - if (tcon->ses) + if (tcon->ses) { server = tcon->ses->server; - - cifs_server_dbg(FYI, "tid=0x%x: tcon is closing, skipping async close retry of fid %llu %llu\n", - tcon->tid, persistent_fid, volatile_fid); + cifs_server_dbg(FYI, + "tid=0x%x: tcon is closing, skipping async close retry of fid %llu %llu\n", + tcon->tid, persistent_fid, volatile_fid); + } return 0; } diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c index 44952727fef9e..590b70d71694b 100644 --- a/fs/smb/client/smb2ops.c +++ b/fs/smb/client/smb2ops.c @@ -1001,6 +1001,7 @@ static int smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon, if (!data->symlink_target) return -ENOMEM; } + data->contains_posix_file_info = false; return SMB2_query_info(xid, tcon, fid->persistent_fid, fid->volatile_fid, &data->fi); } @@ -4575,9 +4576,9 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf, return rc; } } else { - if (unlikely(!server->secmech.dec)) - return -EIO; - + rc = smb3_crypto_aead_allocate(server); + if (unlikely(rc)) + return rc; tfm = server->secmech.dec; } @@ -4991,6 +4992,10 @@ receive_encrypted_standard(struct TCP_Server_Info *server, next_buffer = (char *)cifs_buf_get(); else next_buffer = (char *)cifs_small_buf_get(); + if (!next_buffer) { + cifs_server_dbg(VFS, "No memory for (large) SMB response\n"); + return -1; + } memcpy(next_buffer, buf + next_cmd, pdu_length - next_cmd); } @@ -5173,7 +5178,7 @@ int __cifs_sfu_make_node(unsigned int xid, struct inode *inode, FILE_CREATE, CREATE_NOT_DIR | CREATE_OPTION_SPECIAL, ACL_NO_MODE); oparms.fid = &fid; - + idata.contains_posix_file_info = false; rc = server->ops->open(xid, &oparms, &oplock, &idata); if (rc) goto out; diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c index 2e3f78fe9210f..1a7b82664255a 100644 --- a/fs/smb/client/smb2pdu.c +++ b/fs/smb/client/smb2pdu.c @@ -1269,15 +1269,8 @@ SMB2_negotiate(const unsigned int xid, cifs_server_dbg(VFS, "Missing expected negotiate contexts\n"); } - if (server->cipher_type && !rc) { - if (!SERVER_IS_CHAN(server)) { - rc = smb3_crypto_aead_allocate(server); - } else { - /* For channels, just reuse the primary server crypto secmech. */ - server->secmech.enc = server->primary_server->secmech.enc; - server->secmech.dec = server->primary_server->secmech.dec; - } - } + if (server->cipher_type && !rc) + rc = smb3_crypto_aead_allocate(server); neg_exit: free_rsp_buf(resp_buftype, rsp); return rc; @@ -1435,7 +1428,7 @@ smb2_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested) if (server->sec_ntlmssp && (global_secflags & CIFSSEC_MAY_NTLMSSP)) return RawNTLMSSP; - if ((server->sec_kerberos || server->sec_mskerberos) && + if ((server->sec_kerberos || server->sec_mskerberos || server->sec_iakerb) && (global_secflags & CIFSSEC_MAY_KRB5)) return Kerberos; fallthrough; @@ -2175,7 +2168,7 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, tcon_error_exit: if (rsp && rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) - cifs_tcon_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree); + cifs_dbg(VFS | ONCE, "BAD_NETWORK_NAME: %s\n", tree); goto tcon_exit; } diff --git a/fs/smb/common/smbacl.h b/fs/smb/common/smbacl.h index 6a60698fc6f0f..a624ec9e4a144 100644 --- a/fs/smb/common/smbacl.h +++ b/fs/smb/common/smbacl.h @@ -107,7 +107,8 @@ struct smb_sid { struct smb_acl { __le16 revision; /* revision level */ __le16 size; - __le32 num_aces; + __le16 num_aces; + __le16 reserved; } __attribute__((packed)); struct smb_ace { diff --git a/fs/smb/common/smbfsctl.h b/fs/smb/common/smbfsctl.h index 4b379e84c46b9..3253a18ecb5cb 100644 --- a/fs/smb/common/smbfsctl.h +++ b/fs/smb/common/smbfsctl.h @@ -159,6 +159,9 @@ #define IO_REPARSE_TAG_LX_CHR 0x80000025 #define IO_REPARSE_TAG_LX_BLK 0x80000026 +/* If Name Surrogate Bit is set, the file or directory represents another named entity in the system. */ +#define IS_REPARSE_TAG_NAME_SURROGATE(tag) (!!((tag) & 0x20000000)) + /* fsctl flags */ /* If Flags is set to this value, the request is an FSCTL not ioctl request */ #define SMB2_0_IOCTL_IS_FSCTL 0x00000001 diff --git a/fs/smb/server/auth.c b/fs/smb/server/auth.c index 8892177e500f1..9544975136831 100644 --- a/fs/smb/server/auth.c +++ b/fs/smb/server/auth.c @@ -1016,9 +1016,9 @@ static int ksmbd_get_encryption_key(struct ksmbd_work *work, __u64 ses_id, ses_enc_key = enc ? sess->smb3encryptionkey : sess->smb3decryptionkey; - if (enc) - ksmbd_user_session_get(sess); memcpy(key, ses_enc_key, SMB3_ENC_DEC_KEY_SIZE); + if (!enc) + ksmbd_user_session_put(sess); return 0; } @@ -1217,7 +1217,7 @@ int ksmbd_crypt_message(struct ksmbd_work *work, struct kvec *iov, free_sg: kfree(sg); free_req: - kfree(req); + aead_request_free(req); free_ctx: ksmbd_release_crypto_ctx(ctx); return rc; diff --git a/fs/smb/server/connection.c b/fs/smb/server/connection.c index bf45822db5d58..ab11246ccd8a0 100644 --- a/fs/smb/server/connection.c +++ b/fs/smb/server/connection.c @@ -432,6 +432,26 @@ void ksmbd_conn_init_server_callbacks(struct ksmbd_conn_ops *ops) default_conn_ops.terminate_fn = ops->terminate_fn; } +void ksmbd_conn_r_count_inc(struct ksmbd_conn *conn) +{ + atomic_inc(&conn->r_count); +} + +void ksmbd_conn_r_count_dec(struct ksmbd_conn *conn) +{ + /* + * Checking waitqueue to dropping pending requests on + * disconnection. waitqueue_active is safe because it + * uses atomic operation for condition. + */ + atomic_inc(&conn->refcnt); + if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q)) + wake_up(&conn->r_count_q); + + if (atomic_dec_and_test(&conn->refcnt)) + kfree(conn); +} + int ksmbd_conn_transport_init(void) { int ret; diff --git a/fs/smb/server/connection.h b/fs/smb/server/connection.h index b379ae4fdcdff..14620e147dda5 100644 --- a/fs/smb/server/connection.h +++ b/fs/smb/server/connection.h @@ -27,6 +27,7 @@ enum { KSMBD_SESS_EXITING, KSMBD_SESS_NEED_RECONNECT, KSMBD_SESS_NEED_NEGOTIATE, + KSMBD_SESS_NEED_SETUP, KSMBD_SESS_RELEASING }; @@ -168,6 +169,8 @@ int ksmbd_conn_transport_init(void); void ksmbd_conn_transport_destroy(void); void ksmbd_conn_lock(struct ksmbd_conn *conn); void ksmbd_conn_unlock(struct ksmbd_conn *conn); +void ksmbd_conn_r_count_inc(struct ksmbd_conn *conn); +void ksmbd_conn_r_count_dec(struct ksmbd_conn *conn); /* * WARNING @@ -185,6 +188,11 @@ static inline bool ksmbd_conn_need_negotiate(struct ksmbd_conn *conn) return READ_ONCE(conn->status) == KSMBD_SESS_NEED_NEGOTIATE; } +static inline bool ksmbd_conn_need_setup(struct ksmbd_conn *conn) +{ + return READ_ONCE(conn->status) == KSMBD_SESS_NEED_SETUP; +} + static inline bool ksmbd_conn_need_reconnect(struct ksmbd_conn *conn) { return READ_ONCE(conn->status) == KSMBD_SESS_NEED_RECONNECT; @@ -215,6 +223,11 @@ static inline void ksmbd_conn_set_need_negotiate(struct ksmbd_conn *conn) WRITE_ONCE(conn->status, KSMBD_SESS_NEED_NEGOTIATE); } +static inline void ksmbd_conn_set_need_setup(struct ksmbd_conn *conn) +{ + WRITE_ONCE(conn->status, KSMBD_SESS_NEED_SETUP); +} + static inline void ksmbd_conn_set_need_reconnect(struct ksmbd_conn *conn) { WRITE_ONCE(conn->status, KSMBD_SESS_NEED_RECONNECT); diff --git a/fs/smb/server/ksmbd_work.c b/fs/smb/server/ksmbd_work.c index d7c676c151e20..544d8ccd29b0a 100644 --- a/fs/smb/server/ksmbd_work.c +++ b/fs/smb/server/ksmbd_work.c @@ -26,7 +26,6 @@ struct ksmbd_work *ksmbd_alloc_work_struct(void) INIT_LIST_HEAD(&work->request_entry); INIT_LIST_HEAD(&work->async_request_entry); INIT_LIST_HEAD(&work->fp_entry); - INIT_LIST_HEAD(&work->interim_entry); INIT_LIST_HEAD(&work->aux_read_list); work->iov_alloc_cnt = 4; work->iov = kcalloc(work->iov_alloc_cnt, sizeof(struct kvec), @@ -56,8 +55,6 @@ void ksmbd_free_work_struct(struct ksmbd_work *work) kfree(work->tr_buf); kvfree(work->request_buf); kfree(work->iov); - if (!list_empty(&work->interim_entry)) - list_del(&work->interim_entry); if (work->async_id) ksmbd_release_id(&work->conn->async_ida, work->async_id); diff --git a/fs/smb/server/ksmbd_work.h b/fs/smb/server/ksmbd_work.h index 8ca2c813246e6..d36393ff8310c 100644 --- a/fs/smb/server/ksmbd_work.h +++ b/fs/smb/server/ksmbd_work.h @@ -89,7 +89,6 @@ struct ksmbd_work { /* List head at conn->async_requests */ struct list_head async_request_entry; struct list_head fp_entry; - struct list_head interim_entry; }; /** diff --git a/fs/smb/server/mgmt/user_session.c b/fs/smb/server/mgmt/user_session.c index d960ddcbba165..f83daf72f877e 100644 --- a/fs/smb/server/mgmt/user_session.c +++ b/fs/smb/server/mgmt/user_session.c @@ -181,7 +181,7 @@ static void ksmbd_expire_session(struct ksmbd_conn *conn) down_write(&sessions_table_lock); down_write(&conn->session_lock); xa_for_each(&conn->sessions, id, sess) { - if (atomic_read(&sess->refcnt) == 0 && + if (atomic_read(&sess->refcnt) <= 1 && (sess->state != SMB2_SESSION_VALID || time_after(jiffies, sess->last_active + SMB2_SESSION_TIMEOUT))) { @@ -230,7 +230,11 @@ void ksmbd_sessions_deregister(struct ksmbd_conn *conn) if (!ksmbd_chann_del(conn, sess) && xa_empty(&sess->ksmbd_chann_list)) { hash_del(&sess->hlist); - ksmbd_session_destroy(sess); + down_write(&conn->session_lock); + xa_erase(&conn->sessions, sess->id); + up_write(&conn->session_lock); + if (atomic_dec_and_test(&sess->refcnt)) + ksmbd_session_destroy(sess); } } } @@ -249,13 +253,30 @@ void ksmbd_sessions_deregister(struct ksmbd_conn *conn) if (xa_empty(&sess->ksmbd_chann_list)) { xa_erase(&conn->sessions, sess->id); hash_del(&sess->hlist); - ksmbd_session_destroy(sess); + if (atomic_dec_and_test(&sess->refcnt)) + ksmbd_session_destroy(sess); } } up_write(&conn->session_lock); up_write(&sessions_table_lock); } +bool is_ksmbd_session_in_connection(struct ksmbd_conn *conn, + unsigned long long id) +{ + struct ksmbd_session *sess; + + down_read(&conn->session_lock); + sess = xa_load(&conn->sessions, id); + if (sess) { + up_read(&conn->session_lock); + return true; + } + up_read(&conn->session_lock); + + return false; +} + struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn, unsigned long long id) { @@ -309,8 +330,8 @@ void ksmbd_user_session_put(struct ksmbd_session *sess) if (atomic_read(&sess->refcnt) <= 0) WARN_ON(1); - else - atomic_dec(&sess->refcnt); + else if (atomic_dec_and_test(&sess->refcnt)) + ksmbd_session_destroy(sess); } struct preauth_session *ksmbd_preauth_session_alloc(struct ksmbd_conn *conn, @@ -353,13 +374,13 @@ void destroy_previous_session(struct ksmbd_conn *conn, ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_RECONNECT); err = ksmbd_conn_wait_idle_sess_id(conn, id); if (err) { - ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_NEGOTIATE); + ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_SETUP); goto out; } ksmbd_destroy_file_table(&prev_sess->file_table); prev_sess->state = SMB2_SESSION_EXPIRED; - ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_NEGOTIATE); + ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_SETUP); ksmbd_launch_ksmbd_durable_scavenger(); out: up_write(&conn->session_lock); @@ -417,7 +438,7 @@ static struct ksmbd_session *__session_create(int protocol) xa_init(&sess->rpc_handle_list); sess->sequence_number = 1; rwlock_init(&sess->tree_conns_lock); - atomic_set(&sess->refcnt, 1); + atomic_set(&sess->refcnt, 2); ret = __init_smb2_session(sess); if (ret) diff --git a/fs/smb/server/mgmt/user_session.h b/fs/smb/server/mgmt/user_session.h index c1c4b20bd5c6c..f21348381d598 100644 --- a/fs/smb/server/mgmt/user_session.h +++ b/fs/smb/server/mgmt/user_session.h @@ -87,6 +87,8 @@ void ksmbd_session_destroy(struct ksmbd_session *sess); struct ksmbd_session *ksmbd_session_lookup_slowpath(unsigned long long id); struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn, unsigned long long id); +bool is_ksmbd_session_in_connection(struct ksmbd_conn *conn, + unsigned long long id); int ksmbd_session_register(struct ksmbd_conn *conn, struct ksmbd_session *sess); void ksmbd_sessions_deregister(struct ksmbd_conn *conn); diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c index 4142c7ad5fa91..e2ba0dadb5fbf 100644 --- a/fs/smb/server/oplock.c +++ b/fs/smb/server/oplock.c @@ -46,7 +46,6 @@ static struct oplock_info *alloc_opinfo(struct ksmbd_work *work, opinfo->fid = id; opinfo->Tid = Tid; INIT_LIST_HEAD(&opinfo->op_entry); - INIT_LIST_HEAD(&opinfo->interim_list); init_waitqueue_head(&opinfo->oplock_q); init_waitqueue_head(&opinfo->oplock_brk); atomic_set(&opinfo->refcount, 1); @@ -130,14 +129,6 @@ static void free_opinfo(struct oplock_info *opinfo) kfree(opinfo); } -static inline void opinfo_free_rcu(struct rcu_head *rcu_head) -{ - struct oplock_info *opinfo; - - opinfo = container_of(rcu_head, struct oplock_info, rcu_head); - free_opinfo(opinfo); -} - struct oplock_info *opinfo_get(struct ksmbd_file *fp) { struct oplock_info *opinfo; @@ -158,8 +149,8 @@ static struct oplock_info *opinfo_get_list(struct ksmbd_inode *ci) if (list_empty(&ci->m_op_list)) return NULL; - rcu_read_lock(); - opinfo = list_first_or_null_rcu(&ci->m_op_list, struct oplock_info, + down_read(&ci->m_lock); + opinfo = list_first_entry(&ci->m_op_list, struct oplock_info, op_entry); if (opinfo) { if (opinfo->conn == NULL || @@ -172,8 +163,7 @@ static struct oplock_info *opinfo_get_list(struct ksmbd_inode *ci) } } } - - rcu_read_unlock(); + up_read(&ci->m_lock); return opinfo; } @@ -186,7 +176,7 @@ void opinfo_put(struct oplock_info *opinfo) if (!atomic_dec_and_test(&opinfo->refcount)) return; - call_rcu(&opinfo->rcu_head, opinfo_free_rcu); + free_opinfo(opinfo); } static void opinfo_add(struct oplock_info *opinfo) @@ -194,7 +184,7 @@ static void opinfo_add(struct oplock_info *opinfo) struct ksmbd_inode *ci = opinfo->o_fp->f_ci; down_write(&ci->m_lock); - list_add_rcu(&opinfo->op_entry, &ci->m_op_list); + list_add(&opinfo->op_entry, &ci->m_op_list); up_write(&ci->m_lock); } @@ -208,7 +198,7 @@ static void opinfo_del(struct oplock_info *opinfo) write_unlock(&lease_list_lock); } down_write(&ci->m_lock); - list_del_rcu(&opinfo->op_entry); + list_del(&opinfo->op_entry); up_write(&ci->m_lock); } @@ -635,6 +625,7 @@ static void __smb2_oplock_break_noti(struct work_struct *wk) { struct smb2_oplock_break *rsp = NULL; struct ksmbd_work *work = container_of(wk, struct ksmbd_work, work); + struct ksmbd_conn *conn = work->conn; struct oplock_break_info *br_info = work->request_buf; struct smb2_hdr *rsp_hdr; struct ksmbd_file *fp; @@ -690,6 +681,7 @@ static void __smb2_oplock_break_noti(struct work_struct *wk) out: ksmbd_free_work_struct(work); + ksmbd_conn_r_count_dec(conn); } /** @@ -723,6 +715,7 @@ static int smb2_oplock_break_noti(struct oplock_info *opinfo) work->conn = conn; work->sess = opinfo->sess; + ksmbd_conn_r_count_inc(conn); if (opinfo->op_state == OPLOCK_ACK_WAIT) { INIT_WORK(&work->work, __smb2_oplock_break_noti); ksmbd_queue_work(work); @@ -745,6 +738,7 @@ static void __smb2_lease_break_noti(struct work_struct *wk) { struct smb2_lease_break *rsp = NULL; struct ksmbd_work *work = container_of(wk, struct ksmbd_work, work); + struct ksmbd_conn *conn = work->conn; struct lease_break_info *br_info = work->request_buf; struct smb2_hdr *rsp_hdr; @@ -791,6 +785,7 @@ static void __smb2_lease_break_noti(struct work_struct *wk) out: ksmbd_free_work_struct(work); + ksmbd_conn_r_count_dec(conn); } /** @@ -803,7 +798,6 @@ static void __smb2_lease_break_noti(struct work_struct *wk) static int smb2_lease_break_noti(struct oplock_info *opinfo) { struct ksmbd_conn *conn = opinfo->conn; - struct list_head *tmp, *t; struct ksmbd_work *work; struct lease_break_info *br_info; struct lease *lease = opinfo->o_lease; @@ -830,17 +824,8 @@ static int smb2_lease_break_noti(struct oplock_info *opinfo) work->conn = conn; work->sess = opinfo->sess; + ksmbd_conn_r_count_inc(conn); if (opinfo->op_state == OPLOCK_ACK_WAIT) { - list_for_each_safe(tmp, t, &opinfo->interim_list) { - struct ksmbd_work *in_work; - - in_work = list_entry(tmp, struct ksmbd_work, - interim_entry); - setup_async_work(in_work, NULL, NULL); - smb2_send_interim_resp(in_work, STATUS_PENDING); - list_del_init(&in_work->interim_entry); - release_async_work(in_work); - } INIT_WORK(&work->work, __smb2_lease_break_noti); ksmbd_queue_work(work); wait_for_break_ack(opinfo); @@ -871,7 +856,8 @@ static void wait_lease_breaking(struct oplock_info *opinfo) } } -static int oplock_break(struct oplock_info *brk_opinfo, int req_op_level) +static int oplock_break(struct oplock_info *brk_opinfo, int req_op_level, + struct ksmbd_work *in_work) { int err = 0; @@ -914,9 +900,15 @@ static int oplock_break(struct oplock_info *brk_opinfo, int req_op_level) } if (lease->state & (SMB2_LEASE_WRITE_CACHING_LE | - SMB2_LEASE_HANDLE_CACHING_LE)) + SMB2_LEASE_HANDLE_CACHING_LE)) { + if (in_work) { + setup_async_work(in_work, NULL, NULL); + smb2_send_interim_resp(in_work, STATUS_PENDING); + release_async_work(in_work); + } + brk_opinfo->op_state = OPLOCK_ACK_WAIT; - else + } else atomic_dec(&brk_opinfo->breaking_cnt); } else { err = oplock_break_pending(brk_opinfo, req_op_level); @@ -1116,7 +1108,7 @@ void smb_send_parent_lease_break_noti(struct ksmbd_file *fp, if (ksmbd_conn_releasing(opinfo->conn)) continue; - oplock_break(opinfo, SMB2_OPLOCK_LEVEL_NONE); + oplock_break(opinfo, SMB2_OPLOCK_LEVEL_NONE, NULL); opinfo_put(opinfo); } } @@ -1152,7 +1144,7 @@ void smb_lazy_parent_lease_break_close(struct ksmbd_file *fp) if (ksmbd_conn_releasing(opinfo->conn)) continue; - oplock_break(opinfo, SMB2_OPLOCK_LEVEL_NONE); + oplock_break(opinfo, SMB2_OPLOCK_LEVEL_NONE, NULL); opinfo_put(opinfo); } } @@ -1252,8 +1244,7 @@ int smb_grant_oplock(struct ksmbd_work *work, int req_op_level, u64 pid, goto op_break_not_needed; } - list_add(&work->interim_entry, &prev_opinfo->interim_list); - err = oplock_break(prev_opinfo, SMB2_OPLOCK_LEVEL_II); + err = oplock_break(prev_opinfo, SMB2_OPLOCK_LEVEL_II, work); opinfo_put(prev_opinfo); if (err == -ENOENT) goto set_lev; @@ -1322,8 +1313,7 @@ static void smb_break_all_write_oplock(struct ksmbd_work *work, } brk_opinfo->open_trunc = is_trunc; - list_add(&work->interim_entry, &brk_opinfo->interim_list); - oplock_break(brk_opinfo, SMB2_OPLOCK_LEVEL_II); + oplock_break(brk_opinfo, SMB2_OPLOCK_LEVEL_II, work); opinfo_put(brk_opinfo); } @@ -1348,8 +1338,8 @@ void smb_break_all_levII_oplock(struct ksmbd_work *work, struct ksmbd_file *fp, ci = fp->f_ci; op = opinfo_get(fp); - rcu_read_lock(); - list_for_each_entry_rcu(brk_op, &ci->m_op_list, op_entry) { + down_read(&ci->m_lock); + list_for_each_entry(brk_op, &ci->m_op_list, op_entry) { if (brk_op->conn == NULL) continue; @@ -1359,7 +1349,6 @@ void smb_break_all_levII_oplock(struct ksmbd_work *work, struct ksmbd_file *fp, if (ksmbd_conn_releasing(brk_op->conn)) continue; - rcu_read_unlock(); if (brk_op->is_lease && (brk_op->o_lease->state & (~(SMB2_LEASE_READ_CACHING_LE | SMB2_LEASE_HANDLE_CACHING_LE)))) { @@ -1386,12 +1375,11 @@ void smb_break_all_levII_oplock(struct ksmbd_work *work, struct ksmbd_file *fp, SMB2_LEASE_KEY_SIZE)) goto next; brk_op->open_trunc = is_trunc; - oplock_break(brk_op, SMB2_OPLOCK_LEVEL_NONE); + oplock_break(brk_op, SMB2_OPLOCK_LEVEL_NONE, NULL); next: opinfo_put(brk_op); - rcu_read_lock(); } - rcu_read_unlock(); + up_read(&ci->m_lock); if (op) opinfo_put(op); @@ -1506,6 +1494,10 @@ struct lease_ctx_info *parse_lease_state(void *open_req) if (sizeof(struct lease_context_v2) == le32_to_cpu(cc->DataLength)) { struct create_lease_v2 *lc = (struct create_lease_v2 *)cc; + if (le16_to_cpu(cc->DataOffset) + le32_to_cpu(cc->DataLength) < + sizeof(struct create_lease_v2) - 4) + return NULL; + memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE); lreq->req_state = lc->lcontext.LeaseState; lreq->flags = lc->lcontext.LeaseFlags; @@ -1518,6 +1510,10 @@ struct lease_ctx_info *parse_lease_state(void *open_req) } else { struct create_lease *lc = (struct create_lease *)cc; + if (le16_to_cpu(cc->DataOffset) + le32_to_cpu(cc->DataLength) < + sizeof(struct create_lease)) + return NULL; + memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE); lreq->req_state = lc->lcontext.LeaseState; lreq->flags = lc->lcontext.LeaseFlags; diff --git a/fs/smb/server/oplock.h b/fs/smb/server/oplock.h index 72bc88a63a408..9a56eaadd0dd8 100644 --- a/fs/smb/server/oplock.h +++ b/fs/smb/server/oplock.h @@ -67,12 +67,10 @@ struct oplock_info { bool is_lease; bool open_trunc; /* truncate on open */ struct lease *o_lease; - struct list_head interim_list; struct list_head op_entry; struct list_head lease_entry; wait_queue_head_t oplock_q; /* Other server threads */ wait_queue_head_t oplock_brk; /* oplock breaking wait */ - struct rcu_head rcu_head; }; struct lease_break_info { diff --git a/fs/smb/server/server.c b/fs/smb/server/server.c index d146b0e7c3a9d..d523b860236ab 100644 --- a/fs/smb/server/server.c +++ b/fs/smb/server/server.c @@ -270,17 +270,7 @@ static void handle_ksmbd_work(struct work_struct *wk) ksmbd_conn_try_dequeue_request(work); ksmbd_free_work_struct(work); - /* - * Checking waitqueue to dropping pending requests on - * disconnection. waitqueue_active is safe because it - * uses atomic operation for condition. - */ - atomic_inc(&conn->refcnt); - if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q)) - wake_up(&conn->r_count_q); - - if (atomic_dec_and_test(&conn->refcnt)) - kfree(conn); + ksmbd_conn_r_count_dec(conn); } /** @@ -310,7 +300,7 @@ static int queue_ksmbd_work(struct ksmbd_conn *conn) conn->request_buf = NULL; ksmbd_conn_enqueue_request(work); - atomic_inc(&conn->r_count); + ksmbd_conn_r_count_inc(conn); /* update activity on connection */ conn->last_active = jiffies; INIT_WORK(&work->work, handle_ksmbd_work); diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c index c763a2f7df664..129517a0c5c73 100644 --- a/fs/smb/server/smb2pdu.c +++ b/fs/smb/server/smb2pdu.c @@ -1247,7 +1247,7 @@ int smb2_handle_negotiate(struct ksmbd_work *work) } conn->srv_sec_mode = le16_to_cpu(rsp->SecurityMode); - ksmbd_conn_set_need_negotiate(conn); + ksmbd_conn_set_need_setup(conn); err_out: if (rc) @@ -1268,6 +1268,9 @@ static int alloc_preauth_hash(struct ksmbd_session *sess, if (sess->Preauth_HashValue) return 0; + if (!conn->preauth_info) + return -ENOMEM; + sess->Preauth_HashValue = kmemdup(conn->preauth_info->Preauth_HashValue, PREAUTH_HASHVALUE_SIZE, GFP_KERNEL); if (!sess->Preauth_HashValue) @@ -1596,8 +1599,10 @@ static int krb5_authenticate(struct ksmbd_work *work, if (prev_sess_id && prev_sess_id != sess->id) destroy_previous_session(conn, sess->user, prev_sess_id); - if (sess->state == SMB2_SESSION_VALID) + if (sess->state == SMB2_SESSION_VALID) { ksmbd_free_user(sess->user); + sess->user = NULL; + } retval = ksmbd_krb5_authenticate(sess, in_blob, in_len, out_blob, &out_len); @@ -1671,6 +1676,11 @@ int smb2_sess_setup(struct ksmbd_work *work) ksmbd_debug(SMB, "Received request for session setup\n"); + if (!ksmbd_conn_need_setup(conn) && !ksmbd_conn_good(conn)) { + work->send_no_response = 1; + return rc; + } + WORK_BUFFERS(work, req, rsp); rsp->StructureSize = cpu_to_le16(9); @@ -1704,44 +1714,38 @@ int smb2_sess_setup(struct ksmbd_work *work) if (conn->dialect != sess->dialect) { rc = -EINVAL; - ksmbd_user_session_put(sess); goto out_err; } if (!(req->hdr.Flags & SMB2_FLAGS_SIGNED)) { rc = -EINVAL; - ksmbd_user_session_put(sess); goto out_err; } if (strncmp(conn->ClientGUID, sess->ClientGUID, SMB2_CLIENT_GUID_SIZE)) { rc = -ENOENT; - ksmbd_user_session_put(sess); goto out_err; } if (sess->state == SMB2_SESSION_IN_PROGRESS) { rc = -EACCES; - ksmbd_user_session_put(sess); goto out_err; } if (sess->state == SMB2_SESSION_EXPIRED) { rc = -EFAULT; - ksmbd_user_session_put(sess); goto out_err; } - ksmbd_user_session_put(sess); if (ksmbd_conn_need_reconnect(conn)) { rc = -EFAULT; + ksmbd_user_session_put(sess); sess = NULL; goto out_err; } - sess = ksmbd_session_lookup(conn, sess_id); - if (!sess) { + if (is_ksmbd_session_in_connection(conn, sess_id)) { rc = -EACCES; goto out_err; } @@ -1907,10 +1911,12 @@ int smb2_sess_setup(struct ksmbd_work *work) sess->last_active = jiffies; sess->state = SMB2_SESSION_EXPIRED; + ksmbd_user_session_put(sess); + work->sess = NULL; if (try_delay) { ksmbd_conn_set_need_reconnect(conn); ssleep(5); - ksmbd_conn_set_need_negotiate(conn); + ksmbd_conn_set_need_setup(conn); } } smb2_set_err_rsp(work); @@ -2234,14 +2240,15 @@ int smb2_session_logoff(struct ksmbd_work *work) return -ENOENT; } - ksmbd_destroy_file_table(&sess->file_table); down_write(&conn->session_lock); sess->state = SMB2_SESSION_EXPIRED; up_write(&conn->session_lock); - ksmbd_free_user(sess->user); - sess->user = NULL; - ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_NEGOTIATE); + if (sess->user) { + ksmbd_free_user(sess->user); + sess->user = NULL; + } + ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_SETUP); rsp->StructureSize = cpu_to_le16(4); err = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_logoff_rsp)); @@ -2703,6 +2710,13 @@ static int parse_durable_handle_context(struct ksmbd_work *work, goto out; } + if (le16_to_cpu(context->DataOffset) + + le32_to_cpu(context->DataLength) < + sizeof(struct create_durable_reconn_v2_req)) { + err = -EINVAL; + goto out; + } + recon_v2 = (struct create_durable_reconn_v2_req *)context; persistent_id = recon_v2->Fid.PersistentFileId; dh_info->fp = ksmbd_lookup_durable_fd(persistent_id); @@ -2736,6 +2750,13 @@ static int parse_durable_handle_context(struct ksmbd_work *work, goto out; } + if (le16_to_cpu(context->DataOffset) + + le32_to_cpu(context->DataLength) < + sizeof(struct create_durable_reconn_req)) { + err = -EINVAL; + goto out; + } + recon = (struct create_durable_reconn_req *)context; persistent_id = recon->Data.Fid.PersistentFileId; dh_info->fp = ksmbd_lookup_durable_fd(persistent_id); @@ -2761,6 +2782,13 @@ static int parse_durable_handle_context(struct ksmbd_work *work, goto out; } + if (le16_to_cpu(context->DataOffset) + + le32_to_cpu(context->DataLength) < + sizeof(struct create_durable_req_v2)) { + err = -EINVAL; + goto out; + } + durable_v2_blob = (struct create_durable_req_v2 *)context; ksmbd_debug(SMB, "Request for durable v2 open\n"); @@ -7441,17 +7469,17 @@ int smb2_lock(struct ksmbd_work *work) } no_check_cl: + flock = smb_lock->fl; + list_del(&smb_lock->llist); + if (smb_lock->zero_len) { err = 0; goto skip; } - - flock = smb_lock->fl; - list_del(&smb_lock->llist); retry: rc = vfs_lock_file(filp, smb_lock->cmd, flock, NULL); skip: - if (flags & SMB2_LOCKFLAG_UNLOCK) { + if (smb_lock->flags & SMB2_LOCKFLAG_UNLOCK) { if (!rc) { ksmbd_debug(SMB, "File unlocked\n"); } else if (rc == -ENOENT) { diff --git a/fs/smb/server/smbacl.c b/fs/smb/server/smbacl.c index 1c9775f1efa56..376ae68144afa 100644 --- a/fs/smb/server/smbacl.c +++ b/fs/smb/server/smbacl.c @@ -270,6 +270,11 @@ static int sid_to_id(struct mnt_idmap *idmap, return -EIO; } + if (psid->num_subauth == 0) { + pr_err("%s: zero subauthorities!\n", __func__); + return -EIO; + } + if (sidtype == SIDOWNER) { kuid_t uid; uid_t id; @@ -333,7 +338,7 @@ void posix_state_to_acl(struct posix_acl_state *state, pace->e_perm = state->other.allow; } -int init_acl_state(struct posix_acl_state *state, int cnt) +int init_acl_state(struct posix_acl_state *state, u16 cnt) { int alloc; @@ -368,7 +373,7 @@ static void parse_dacl(struct mnt_idmap *idmap, struct smb_fattr *fattr) { int i, ret; - int num_aces = 0; + u16 num_aces = 0; unsigned int acl_size; char *acl_base; struct smb_ace **ppace; @@ -389,16 +394,18 @@ static void parse_dacl(struct mnt_idmap *idmap, ksmbd_debug(SMB, "DACL revision %d size %d num aces %d\n", le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size), - le32_to_cpu(pdacl->num_aces)); + le16_to_cpu(pdacl->num_aces)); acl_base = (char *)pdacl; acl_size = sizeof(struct smb_acl); - num_aces = le32_to_cpu(pdacl->num_aces); + num_aces = le16_to_cpu(pdacl->num_aces); if (num_aces <= 0) return; - if (num_aces > ULONG_MAX / sizeof(struct smb_ace *)) + if (num_aces > (le16_to_cpu(pdacl->size) - sizeof(struct smb_acl)) / + (offsetof(struct smb_ace, sid) + + offsetof(struct smb_sid, sub_auth) + sizeof(__le16))) return; ret = init_acl_state(&acl_state, num_aces); @@ -432,6 +439,7 @@ static void parse_dacl(struct mnt_idmap *idmap, offsetof(struct smb_sid, sub_auth); if (end_of_acl - acl_base < acl_size || + ppace[i]->sid.num_subauth == 0 || ppace[i]->sid.num_subauth > SID_MAX_SUB_AUTHORITIES || (end_of_acl - acl_base < acl_size + sizeof(__le32) * ppace[i]->sid.num_subauth) || @@ -580,7 +588,7 @@ static void parse_dacl(struct mnt_idmap *idmap, static void set_posix_acl_entries_dacl(struct mnt_idmap *idmap, struct smb_ace *pndace, - struct smb_fattr *fattr, u32 *num_aces, + struct smb_fattr *fattr, u16 *num_aces, u16 *size, u32 nt_aces_num) { struct posix_acl_entry *pace; @@ -701,7 +709,7 @@ static void set_ntacl_dacl(struct mnt_idmap *idmap, struct smb_fattr *fattr) { struct smb_ace *ntace, *pndace; - int nt_num_aces = le32_to_cpu(nt_dacl->num_aces), num_aces = 0; + u16 nt_num_aces = le16_to_cpu(nt_dacl->num_aces), num_aces = 0; unsigned short size = 0; int i; @@ -728,7 +736,7 @@ static void set_ntacl_dacl(struct mnt_idmap *idmap, set_posix_acl_entries_dacl(idmap, pndace, fattr, &num_aces, &size, nt_num_aces); - pndacl->num_aces = cpu_to_le32(num_aces); + pndacl->num_aces = cpu_to_le16(num_aces); pndacl->size = cpu_to_le16(le16_to_cpu(pndacl->size) + size); } @@ -736,7 +744,7 @@ static void set_mode_dacl(struct mnt_idmap *idmap, struct smb_acl *pndacl, struct smb_fattr *fattr) { struct smb_ace *pace, *pndace; - u32 num_aces = 0; + u16 num_aces = 0; u16 size = 0, ace_size = 0; uid_t uid; const struct smb_sid *sid; @@ -792,7 +800,7 @@ static void set_mode_dacl(struct mnt_idmap *idmap, fattr->cf_mode, 0007); out: - pndacl->num_aces = cpu_to_le32(num_aces); + pndacl->num_aces = cpu_to_le16(num_aces); pndacl->size = cpu_to_le16(le16_to_cpu(pndacl->size) + size); } @@ -807,6 +815,13 @@ static int parse_sid(struct smb_sid *psid, char *end_of_acl) return -EINVAL; } + if (!psid->num_subauth) + return 0; + + if (psid->num_subauth > SID_MAX_SUB_AUTHORITIES || + end_of_acl < (char *)psid + 8 + sizeof(__le32) * psid->num_subauth) + return -EINVAL; + return 0; } @@ -848,6 +863,9 @@ int parse_sec_desc(struct mnt_idmap *idmap, struct smb_ntsd *pntsd, pntsd->type = cpu_to_le16(DACL_PRESENT); if (pntsd->osidoffset) { + if (le32_to_cpu(pntsd->osidoffset) < sizeof(struct smb_ntsd)) + return -EINVAL; + rc = parse_sid(owner_sid_ptr, end_of_acl); if (rc) { pr_err("%s: Error %d parsing Owner SID\n", __func__, rc); @@ -863,6 +881,9 @@ int parse_sec_desc(struct mnt_idmap *idmap, struct smb_ntsd *pntsd, } if (pntsd->gsidoffset) { + if (le32_to_cpu(pntsd->gsidoffset) < sizeof(struct smb_ntsd)) + return -EINVAL; + rc = parse_sid(group_sid_ptr, end_of_acl); if (rc) { pr_err("%s: Error %d mapping Owner SID to gid\n", @@ -884,6 +905,9 @@ int parse_sec_desc(struct mnt_idmap *idmap, struct smb_ntsd *pntsd, pntsd->type |= cpu_to_le16(DACL_PROTECTED); if (dacloffset) { + if (dacloffset < sizeof(struct smb_ntsd)) + return -EINVAL; + parse_dacl(idmap, dacl_ptr, end_of_acl, owner_sid_ptr, group_sid_ptr, fattr); } @@ -1006,8 +1030,11 @@ int smb_inherit_dacl(struct ksmbd_conn *conn, struct smb_sid owner_sid, group_sid; struct dentry *parent = path->dentry->d_parent; struct mnt_idmap *idmap = mnt_idmap(path->mnt); - int inherited_flags = 0, flags = 0, i, ace_cnt = 0, nt_size = 0, pdacl_size; - int rc = 0, num_aces, dacloffset, pntsd_type, pntsd_size, acl_len, aces_size; + int inherited_flags = 0, flags = 0, i, nt_size = 0, pdacl_size; + int rc = 0, pntsd_type, pntsd_size, acl_len, aces_size; + unsigned int dacloffset; + size_t dacl_struct_end; + u16 num_aces, ace_cnt = 0; char *aces_base; bool is_dir = S_ISDIR(d_inode(path->dentry)->i_mode); @@ -1015,15 +1042,18 @@ int smb_inherit_dacl(struct ksmbd_conn *conn, parent, &parent_pntsd); if (pntsd_size <= 0) return -ENOENT; + dacloffset = le32_to_cpu(parent_pntsd->dacloffset); - if (!dacloffset || (dacloffset + sizeof(struct smb_acl) > pntsd_size)) { + if (!dacloffset || + check_add_overflow(dacloffset, sizeof(struct smb_acl), &dacl_struct_end) || + dacl_struct_end > (size_t)pntsd_size) { rc = -EINVAL; goto free_parent_pntsd; } parent_pdacl = (struct smb_acl *)((char *)parent_pntsd + dacloffset); acl_len = pntsd_size - dacloffset; - num_aces = le32_to_cpu(parent_pdacl->num_aces); + num_aces = le16_to_cpu(parent_pdacl->num_aces); pntsd_type = le16_to_cpu(parent_pntsd->type); pdacl_size = le16_to_cpu(parent_pdacl->size); @@ -1182,7 +1212,7 @@ int smb_inherit_dacl(struct ksmbd_conn *conn, pdacl = (struct smb_acl *)((char *)pntsd + le32_to_cpu(pntsd->dacloffset)); pdacl->revision = cpu_to_le16(2); pdacl->size = cpu_to_le16(sizeof(struct smb_acl) + nt_size); - pdacl->num_aces = cpu_to_le32(ace_cnt); + pdacl->num_aces = cpu_to_le16(ace_cnt); pace = (struct smb_ace *)((char *)pdacl + sizeof(struct smb_acl)); memcpy(pace, aces_base, nt_size); pntsd_size += sizeof(struct smb_acl) + nt_size; @@ -1219,7 +1249,9 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, const struct path *path, struct smb_ntsd *pntsd = NULL; struct smb_acl *pdacl; struct posix_acl *posix_acls; - int rc = 0, pntsd_size, acl_size, aces_size, pdacl_size, dacl_offset; + int rc = 0, pntsd_size, acl_size, aces_size, pdacl_size; + unsigned int dacl_offset; + size_t dacl_struct_end; struct smb_sid sid; int granted = le32_to_cpu(*pdaccess & ~FILE_MAXIMAL_ACCESS_LE); struct smb_ace *ace; @@ -1238,7 +1270,8 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, const struct path *path, dacl_offset = le32_to_cpu(pntsd->dacloffset); if (!dacl_offset || - (dacl_offset + sizeof(struct smb_acl) > pntsd_size)) + check_add_overflow(dacl_offset, sizeof(struct smb_acl), &dacl_struct_end) || + dacl_struct_end > (size_t)pntsd_size) goto err_out; pdacl = (struct smb_acl *)((char *)pntsd + le32_to_cpu(pntsd->dacloffset)); @@ -1263,7 +1296,7 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, const struct path *path, ace = (struct smb_ace *)((char *)pdacl + sizeof(struct smb_acl)); aces_size = acl_size - sizeof(struct smb_acl); - for (i = 0; i < le32_to_cpu(pdacl->num_aces); i++) { + for (i = 0; i < le16_to_cpu(pdacl->num_aces); i++) { if (offsetof(struct smb_ace, access_req) > aces_size) break; ace_size = le16_to_cpu(ace->size); @@ -1284,7 +1317,7 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, const struct path *path, ace = (struct smb_ace *)((char *)pdacl + sizeof(struct smb_acl)); aces_size = acl_size - sizeof(struct smb_acl); - for (i = 0; i < le32_to_cpu(pdacl->num_aces); i++) { + for (i = 0; i < le16_to_cpu(pdacl->num_aces); i++) { if (offsetof(struct smb_ace, access_req) > aces_size) break; ace_size = le16_to_cpu(ace->size); diff --git a/fs/smb/server/smbacl.h b/fs/smb/server/smbacl.h index 24ce576fc2924..355adaee39b87 100644 --- a/fs/smb/server/smbacl.h +++ b/fs/smb/server/smbacl.h @@ -86,7 +86,7 @@ int parse_sec_desc(struct mnt_idmap *idmap, struct smb_ntsd *pntsd, int build_sec_desc(struct mnt_idmap *idmap, struct smb_ntsd *pntsd, struct smb_ntsd *ppntsd, int ppntsd_size, int addition_info, __u32 *secdesclen, struct smb_fattr *fattr); -int init_acl_state(struct posix_acl_state *state, int cnt); +int init_acl_state(struct posix_acl_state *state, u16 cnt); void free_acl_state(struct posix_acl_state *state); void posix_state_to_acl(struct posix_acl_state *state, struct posix_acl_entry *pace); diff --git a/fs/smb/server/transport_ipc.c b/fs/smb/server/transport_ipc.c index 69bac122adbe0..9b3c68014aee2 100644 --- a/fs/smb/server/transport_ipc.c +++ b/fs/smb/server/transport_ipc.c @@ -281,6 +281,7 @@ static int handle_response(int type, void *payload, size_t sz) if (entry->type + 1 != type) { pr_err("Waiting for IPC type %d, got %d. Ignore.\n", entry->type + 1, type); + continue; } entry->response = kvzalloc(sz, GFP_KERNEL); @@ -309,7 +310,11 @@ static int ipc_server_config_on_startup(struct ksmbd_startup_request *req) server_conf.signing = req->signing; server_conf.tcp_port = req->tcp_port; server_conf.ipc_timeout = req->ipc_timeout * HZ; - server_conf.deadtime = req->deadtime * SMB_ECHO_INTERVAL; + if (check_mul_overflow(req->deadtime, SMB_ECHO_INTERVAL, + &server_conf.deadtime)) { + ret = -EINVAL; + goto out; + } server_conf.share_fake_fscaps = req->share_fake_fscaps; ksmbd_init_domain(req->sub_auth); @@ -335,6 +340,7 @@ static int ipc_server_config_on_startup(struct ksmbd_startup_request *req) ret |= ksmbd_set_work_group(req->work_group); ret |= ksmbd_tcp_set_interfaces(KSMBD_STARTUP_CONFIG_INTERFACES(req), req->ifc_list_sz); +out: if (ret) { pr_err("Server configuration error: %s %s %s\n", req->netbios_name, req->server_string, diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c index ee825971abd9a..8fd070e31fa7d 100644 --- a/fs/smb/server/vfs.c +++ b/fs/smb/server/vfs.c @@ -496,7 +496,8 @@ int ksmbd_vfs_write(struct ksmbd_work *work, struct ksmbd_file *fp, int err = 0; if (work->conn->connection_type) { - if (!(fp->daccess & (FILE_WRITE_DATA_LE | FILE_APPEND_DATA_LE))) { + if (!(fp->daccess & (FILE_WRITE_DATA_LE | FILE_APPEND_DATA_LE)) || + S_ISDIR(file_inode(fp->filp)->i_mode)) { pr_err("no right to write(%pD)\n", fp->filp); err = -EACCES; goto out; diff --git a/fs/udf/inode.c b/fs/udf/inode.c index 70c907fe8af9e..4386dd845e400 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -810,6 +810,7 @@ static int inode_getblk(struct inode *inode, struct udf_map_rq *map) } map->oflags = UDF_BLK_MAPPED; map->pblk = udf_get_lb_pblock(inode->i_sb, &eloc, offset); + ret = 0; goto out_free; } diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 7c0bd0b55f880..199ec6d10b62a 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -395,32 +395,6 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason) if (!(vmf->flags & FAULT_FLAG_USER) && (ctx->flags & UFFD_USER_MODE_ONLY)) goto out; - /* - * If it's already released don't get it. This avoids to loop - * in __get_user_pages if userfaultfd_release waits on the - * caller of handle_userfault to release the mmap_lock. - */ - if (unlikely(READ_ONCE(ctx->released))) { - /* - * Don't return VM_FAULT_SIGBUS in this case, so a non - * cooperative manager can close the uffd after the - * last UFFDIO_COPY, without risking to trigger an - * involuntary SIGBUS if the process was starting the - * userfaultfd while the userfaultfd was still armed - * (but after the last UFFDIO_COPY). If the uffd - * wasn't already closed when the userfault reached - * this point, that would normally be solved by - * userfaultfd_must_wait returning 'false'. - * - * If we were to return VM_FAULT_SIGBUS here, the non - * cooperative manager would be instead forced to - * always call UFFDIO_UNREGISTER before it can safely - * close the uffd. - */ - ret = VM_FAULT_NOPAGE; - goto out; - } - /* * Check that we can return VM_FAULT_RETRY. * @@ -457,6 +431,31 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason) if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) goto out; + if (unlikely(READ_ONCE(ctx->released))) { + /* + * If a concurrent release is detected, do not return + * VM_FAULT_SIGBUS or VM_FAULT_NOPAGE, but instead always + * return VM_FAULT_RETRY with lock released proactively. + * + * If we were to return VM_FAULT_SIGBUS here, the non + * cooperative manager would be instead forced to + * always call UFFDIO_UNREGISTER before it can safely + * close the uffd, to avoid involuntary SIGBUS triggered. + * + * If we were to return VM_FAULT_NOPAGE, it would work for + * the fault path, in which the lock will be released + * later. However for GUP, faultin_page() does nothing + * special on NOPAGE, so GUP would spin retrying without + * releasing the mmap read lock, causing possible livelock. + * + * Here only VM_FAULT_RETRY would make sure the mmap lock + * be released immediately, so that the thread concurrently + * releasing the userfault would always make progress. + */ + release_fault_lock(vmf); + goto out; + } + /* take the reference before dropping the mmap_lock */ userfaultfd_ctx_get(ctx); diff --git a/fs/vboxsf/super.c b/fs/vboxsf/super.c index e95b8a48d8a02..1d94bb7841081 100644 --- a/fs/vboxsf/super.c +++ b/fs/vboxsf/super.c @@ -21,7 +21,8 @@ #define VBOXSF_SUPER_MAGIC 0x786f4256 /* 'VBox' little endian */ -static const unsigned char VBSF_MOUNT_SIGNATURE[4] = "\000\377\376\375"; +static const unsigned char VBSF_MOUNT_SIGNATURE[4] = { '\000', '\377', '\376', + '\375' }; static int follow_symlinks; module_param(follow_symlinks, int, 0444); diff --git a/fs/xfs/scrub/common.h b/fs/xfs/scrub/common.h index 47148cc4a833e..eb00d48590f20 100644 --- a/fs/xfs/scrub/common.h +++ b/fs/xfs/scrub/common.h @@ -179,7 +179,6 @@ static inline bool xchk_skip_xref(struct xfs_scrub_metadata *sm) bool xchk_dir_looks_zapped(struct xfs_inode *dp); bool xchk_pptr_looks_zapped(struct xfs_inode *ip); -#ifdef CONFIG_XFS_ONLINE_REPAIR /* Decide if a repair is required. */ static inline bool xchk_needs_repair(const struct xfs_scrub_metadata *sm) { @@ -199,10 +198,6 @@ static inline bool xchk_could_repair(const struct xfs_scrub *sc) return (sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR) && !(sc->flags & XREP_ALREADY_FIXED); } -#else -# define xchk_needs_repair(sc) (false) -# define xchk_could_repair(sc) (false) -#endif /* CONFIG_XFS_ONLINE_REPAIR */ int xchk_metadata_inode_forks(struct xfs_scrub *sc); diff --git a/fs/xfs/scrub/repair.h b/fs/xfs/scrub/repair.h index 0e0dc2bf985c2..96180176c582f 100644 --- a/fs/xfs/scrub/repair.h +++ b/fs/xfs/scrub/repair.h @@ -163,7 +163,16 @@ bool xrep_buf_verify_struct(struct xfs_buf *bp, const struct xfs_buf_ops *ops); #else #define xrep_ino_dqattach(sc) (0) -#define xrep_will_attempt(sc) (false) + +/* + * When online repair is not built into the kernel, we still want to attempt + * the repair so that the stub xrep_attempt below will return EOPNOTSUPP. + */ +static inline bool xrep_will_attempt(const struct xfs_scrub *sc) +{ + return (sc->sm->sm_flags & XFS_SCRUB_IFLAG_FORCE_REBUILD) || + xchk_needs_repair(sc->sm); +} static inline int xrep_attempt( diff --git a/fs/xfs/scrub/scrub.c b/fs/xfs/scrub/scrub.c index 4cbcf7a86dbec..5c266d2842dbe 100644 --- a/fs/xfs/scrub/scrub.c +++ b/fs/xfs/scrub/scrub.c @@ -149,6 +149,18 @@ xchk_probe( if (xchk_should_terminate(sc, &error)) return error; + /* + * If the caller is probing to see if repair works but repair isn't + * built into the kernel, return EOPNOTSUPP because that's the signal + * that userspace expects. If online repair is built in, set the + * CORRUPT flag (without any of the usual tracing/logging) to force us + * into xrep_probe. + */ + if (xchk_could_repair(sc)) { + if (!IS_ENABLED(CONFIG_XFS_ONLINE_REPAIR)) + return -EOPNOTSUPP; + sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; + } return 0; } diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h index 594d5905f6151..215bf9f317cbf 100644 --- a/include/asm-generic/hugetlb.h +++ b/include/asm-generic/hugetlb.h @@ -84,7 +84,7 @@ static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, #ifndef __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, - unsigned long addr, pte_t *ptep) + unsigned long addr, pte_t *ptep, unsigned long sz) { return ptep_get_and_clear(mm, addr, ptep); } diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index fa284b64b2de2..23b358a1271cd 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -450,7 +450,7 @@ . = ALIGN((align)); \ .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ __start_rodata = .; \ - *(.rodata) *(.rodata.*) \ + *(.rodata) *(.rodata.*) *(.data.rel.ro*) \ SCHED_DATA \ RO_AFTER_INIT_DATA /* Read only after init */ \ . = ALIGN(8); \ diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h index a80ba457a858f..6398a6b50bd1b 100644 --- a/include/drm/display/drm_dp_mst_helper.h +++ b/include/drm/display/drm_dp_mst_helper.h @@ -222,6 +222,13 @@ struct drm_dp_mst_branch { */ struct list_head destroy_next; + /** + * @rad: Relative Address of the MST branch. + * For &drm_dp_mst_topology_mgr.mst_primary, it's rad[8] are all 0, + * unset and unused. For MST branches connected after mst_primary, + * in each element of rad[] the nibbles are ordered by the most + * signifcant 4 bits first and the least significant 4 bits second. + */ u8 rad[8]; u8 lct; int num_ports; diff --git a/include/drm/drm_client_setup.h b/include/drm/drm_client_setup.h new file mode 100644 index 0000000000000..46aab3fb46be5 --- /dev/null +++ b/include/drm/drm_client_setup.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: MIT */ + +#ifndef DRM_CLIENT_SETUP_H +#define DRM_CLIENT_SETUP_H + +#include + +struct drm_device; +struct drm_format_info; + +#if defined(CONFIG_DRM_CLIENT_SETUP) +void drm_client_setup(struct drm_device *dev, const struct drm_format_info *format); +void drm_client_setup_with_fourcc(struct drm_device *dev, u32 fourcc); +void drm_client_setup_with_color_mode(struct drm_device *dev, unsigned int color_mode); +#else +static inline void drm_client_setup(struct drm_device *dev, + const struct drm_format_info *format) +{ } +static inline void drm_client_setup_with_fourcc(struct drm_device *dev, u32 fourcc) +{ } +static inline void drm_client_setup_with_color_mode(struct drm_device *dev, + unsigned int color_mode) +{ } +#endif + +#endif diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index 02ea4e3248fdf..36a606af4ba1d 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -34,6 +34,8 @@ #include +struct drm_fb_helper; +struct drm_fb_helper_surface_size; struct drm_file; struct drm_gem_object; struct drm_master; @@ -366,6 +368,22 @@ struct drm_driver { struct drm_device *dev, uint32_t handle, uint64_t *offset); + /** + * @fbdev_probe + * + * Allocates and initialize the fb_info structure for fbdev emulation. + * Furthermore it also needs to allocate the DRM framebuffer used to + * back the fbdev. + * + * This callback is mandatory for fbdev support. + * + * Returns: + * + * 0 on success ot a negative error code otherwise. + */ + int (*fbdev_probe)(struct drm_fb_helper *fbdev_helper, + struct drm_fb_helper_surface_size *sizes); + /** * @show_fdinfo: * diff --git a/include/drm/drm_fbdev_client.h b/include/drm/drm_fbdev_client.h new file mode 100644 index 0000000000000..e11a5614f127c --- /dev/null +++ b/include/drm/drm_fbdev_client.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: MIT */ + +#ifndef DRM_FBDEV_CLIENT_H +#define DRM_FBDEV_CLIENT_H + +struct drm_device; +struct drm_format_info; + +#ifdef CONFIG_DRM_FBDEV_EMULATION +int drm_fbdev_client_setup(struct drm_device *dev, const struct drm_format_info *format); +#else +static inline int drm_fbdev_client_setup(struct drm_device *dev, + const struct drm_format_info *format) +{ + return 0; +} +#endif + +#endif diff --git a/include/drm/drm_fbdev_ttm.h b/include/drm/drm_fbdev_ttm.h index 9e6c3bdf35376..243685d02eb13 100644 --- a/include/drm/drm_fbdev_ttm.h +++ b/include/drm/drm_fbdev_ttm.h @@ -3,11 +3,24 @@ #ifndef DRM_FBDEV_TTM_H #define DRM_FBDEV_TTM_H +#include + struct drm_device; +struct drm_fb_helper; +struct drm_fb_helper_surface_size; #ifdef CONFIG_DRM_FBDEV_EMULATION +int drm_fbdev_ttm_driver_fbdev_probe(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *sizes); + +#define DRM_FBDEV_TTM_DRIVER_OPS \ + .fbdev_probe = drm_fbdev_ttm_driver_fbdev_probe + void drm_fbdev_ttm_setup(struct drm_device *dev, unsigned int preferred_bpp); #else +#define DRM_FBDEV_TTM_DRIVER_OPS \ + .fbdev_probe = NULL + static inline void drm_fbdev_ttm_setup(struct drm_device *dev, unsigned int preferred_bpp) { } #endif diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h index ccf91daa43070..c3f4405d66629 100644 --- a/include/drm/drm_fourcc.h +++ b/include/drm/drm_fourcc.h @@ -313,6 +313,7 @@ drm_get_format_info(struct drm_device *dev, uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth); uint32_t drm_driver_legacy_fb_format(struct drm_device *dev, uint32_t bpp, uint32_t depth); +uint32_t drm_driver_color_mode_format(struct drm_device *dev, unsigned int color_mode); unsigned int drm_format_info_block_width(const struct drm_format_info *info, int plane); unsigned int drm_format_info_block_height(const struct drm_format_info *info, diff --git a/include/drm/drm_kunit_helpers.h b/include/drm/drm_kunit_helpers.h index afdd46ef04f70..c835f113055dc 100644 --- a/include/drm/drm_kunit_helpers.h +++ b/include/drm/drm_kunit_helpers.h @@ -120,6 +120,9 @@ drm_kunit_helper_create_crtc(struct kunit *test, const struct drm_crtc_funcs *funcs, const struct drm_crtc_helper_funcs *helper_funcs); +int drm_kunit_add_mode_destroy_action(struct kunit *test, + struct drm_display_mode *mode); + struct drm_display_mode * drm_kunit_display_mode_from_cea_vic(struct kunit *test, struct drm_device *dev, u8 video_code); diff --git a/include/drm/intel/i915_pciids.h b/include/drm/intel/i915_pciids.h index f35534522d333..dacea289acaf5 100644 --- a/include/drm/intel/i915_pciids.h +++ b/include/drm/intel/i915_pciids.h @@ -809,6 +809,9 @@ MACRO__(0xE20B, ## __VA_ARGS__), \ MACRO__(0xE20C, ## __VA_ARGS__), \ MACRO__(0xE20D, ## __VA_ARGS__), \ - MACRO__(0xE212, ## __VA_ARGS__) + MACRO__(0xE210, ## __VA_ARGS__), \ + MACRO__(0xE212, ## __VA_ARGS__), \ + MACRO__(0xE215, ## __VA_ARGS__), \ + MACRO__(0xE216, ## __VA_ARGS__) #endif /* _I915_PCIIDS_H */ diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 8e7af9a03b41d..e721148c95d07 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -249,6 +249,7 @@ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode) { #ifdef CONFIG_LOCKDEP WARN_ON_ONCE(debug_locks && + (inode->i_sb->s_iflags & SB_I_CGROUPWB) && (!lockdep_is_held(&inode->i_lock) && !lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) && !lockdep_is_held(&inode->i_wb->list_lock))); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index a53cbe2569104..318245b4e38fb 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -230,62 +230,61 @@ static inline unsigned short req_get_ioprio(struct request *req) #define rq_dma_dir(rq) \ (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE) -#define rq_list_add(listptr, rq) do { \ - (rq)->rq_next = *(listptr); \ - *(listptr) = rq; \ -} while (0) - -#define rq_list_add_tail(lastpptr, rq) do { \ - (rq)->rq_next = NULL; \ - **(lastpptr) = rq; \ - *(lastpptr) = &rq->rq_next; \ -} while (0) - -#define rq_list_pop(listptr) \ -({ \ - struct request *__req = NULL; \ - if ((listptr) && *(listptr)) { \ - __req = *(listptr); \ - *(listptr) = __req->rq_next; \ - } \ - __req; \ -}) +static inline int rq_list_empty(const struct rq_list *rl) +{ + return rl->head == NULL; +} -#define rq_list_peek(listptr) \ -({ \ - struct request *__req = NULL; \ - if ((listptr) && *(listptr)) \ - __req = *(listptr); \ - __req; \ -}) +static inline void rq_list_init(struct rq_list *rl) +{ + rl->head = NULL; + rl->tail = NULL; +} -#define rq_list_for_each(listptr, pos) \ - for (pos = rq_list_peek((listptr)); pos; pos = rq_list_next(pos)) +static inline void rq_list_add_tail(struct rq_list *rl, struct request *rq) +{ + rq->rq_next = NULL; + if (rl->tail) + rl->tail->rq_next = rq; + else + rl->head = rq; + rl->tail = rq; +} -#define rq_list_for_each_safe(listptr, pos, nxt) \ - for (pos = rq_list_peek((listptr)), nxt = rq_list_next(pos); \ - pos; pos = nxt, nxt = pos ? rq_list_next(pos) : NULL) +static inline void rq_list_add_head(struct rq_list *rl, struct request *rq) +{ + rq->rq_next = rl->head; + rl->head = rq; + if (!rl->tail) + rl->tail = rq; +} -#define rq_list_next(rq) (rq)->rq_next -#define rq_list_empty(list) ((list) == (struct request *) NULL) +static inline struct request *rq_list_pop(struct rq_list *rl) +{ + struct request *rq = rl->head; -/** - * rq_list_move() - move a struct request from one list to another - * @src: The source list @rq is currently in - * @dst: The destination list that @rq will be appended to - * @rq: The request to move - * @prev: The request preceding @rq in @src (NULL if @rq is the head) - */ -static inline void rq_list_move(struct request **src, struct request **dst, - struct request *rq, struct request *prev) + if (rq) { + rl->head = rl->head->rq_next; + if (!rl->head) + rl->tail = NULL; + rq->rq_next = NULL; + } + + return rq; +} + +static inline struct request *rq_list_peek(struct rq_list *rl) { - if (prev) - prev->rq_next = rq->rq_next; - else - *src = rq->rq_next; - rq_list_add(dst, rq); + return rl->head; } +#define rq_list_for_each(rl, pos) \ + for (pos = rq_list_peek((rl)); (pos); pos = pos->rq_next) + +#define rq_list_for_each_safe(rl, pos, nxt) \ + for (pos = rq_list_peek((rl)), nxt = pos->rq_next; \ + pos; pos = nxt, nxt = pos ? pos->rq_next : NULL) + /** * enum blk_eh_timer_return - How the timeout handler should proceed * @BLK_EH_DONE: The block driver completed the command or will complete it at @@ -577,7 +576,7 @@ struct blk_mq_ops { * empty the @rqlist completely, then the rest will be queued * individually by the block layer upon return. */ - void (*queue_rqs)(struct request **rqlist); + void (*queue_rqs)(struct rq_list *rqlist); /** * @get_budget: Reserve budget before queue request, once .queue_rq is @@ -871,12 +870,20 @@ static inline bool blk_mq_is_reserved_rq(struct request *rq) return rq->rq_flags & RQF_RESV; } -/* +/** + * blk_mq_add_to_batch() - add a request to the completion batch + * @req: The request to add to batch + * @iob: The batch to add the request + * @is_error: Specify true if the request failed with an error + * @complete: The completaion handler for the request + * * Batched completions only work when there is no I/O error and no special * ->end_io handler. + * + * Return: true when the request was added to the batch, otherwise false */ static inline bool blk_mq_add_to_batch(struct request *req, - struct io_comp_batch *iob, int ioerror, + struct io_comp_batch *iob, bool is_error, void (*complete)(struct io_comp_batch *)) { /* @@ -884,7 +891,7 @@ static inline bool blk_mq_add_to_batch(struct request *req, * 1) No batch container * 2) Has scheduler data attached * 3) Not a passthrough request and end_io set - * 4) Not a passthrough request and an ioerror + * 4) Not a passthrough request and failed with an error */ if (!iob) return false; @@ -893,7 +900,7 @@ static inline bool blk_mq_add_to_batch(struct request *req, if (!blk_rq_is_passthrough(req)) { if (req->end_io) return false; - if (ioerror < 0) + if (is_error) return false; } @@ -902,7 +909,7 @@ static inline bool blk_mq_add_to_batch(struct request *req, else if (iob->complete != complete) return false; iob->need_ts |= blk_mq_need_time_stamp(req); - rq_list_add(&iob->req_list, req); + rq_list_add_head(&iob->req_list, req); return true; } @@ -939,6 +946,8 @@ void blk_mq_unfreeze_queue_non_owner(struct request_queue *q); void blk_freeze_queue_start_non_owner(struct request_queue *q); void blk_mq_map_queues(struct blk_mq_queue_map *qmap); +void blk_mq_map_hw_queues(struct blk_mq_queue_map *qmap, + struct device *dev, unsigned int offset); void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues); void blk_mq_quiesce_queue_nowait(struct request_queue *q); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index b7f327ce797e5..b94dc4b796f5a 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -196,10 +196,11 @@ struct gendisk { unsigned int zone_capacity; unsigned int last_zone_capacity; unsigned long __rcu *conv_zones_bitmap; - unsigned int zone_wplugs_hash_bits; - spinlock_t zone_wplugs_lock; + unsigned int zone_wplugs_hash_bits; + atomic_t nr_zone_wplugs; + spinlock_t zone_wplugs_lock; struct mempool_s *zone_wplugs_pool; - struct hlist_head *zone_wplugs_hash; + struct hlist_head *zone_wplugs_hash; struct workqueue_struct *zone_wplugs_wq; #endif /* CONFIG_BLK_DEV_ZONED */ @@ -994,6 +995,11 @@ extern void blk_put_queue(struct request_queue *); void blk_mark_disk_dead(struct gendisk *disk); +struct rq_list { + struct request *head; + struct request *tail; +}; + #ifdef CONFIG_BLOCK /* * blk_plug permits building a queue of related requests by holding the I/O @@ -1007,10 +1013,10 @@ void blk_mark_disk_dead(struct gendisk *disk); * blk_flush_plug() is called. */ struct blk_plug { - struct request *mq_list; /* blk-mq requests */ + struct rq_list mq_list; /* blk-mq requests */ /* if ios_left is > 1, we can batch tag/rq allocations */ - struct request *cached_rq; + struct rq_list cached_rqs; u64 cur_ktime; unsigned short nr_ios; @@ -1659,7 +1665,7 @@ int bdev_thaw(struct block_device *bdev); void bdev_fput(struct file *bdev_file); struct io_comp_batch { - struct request *req_list; + struct rq_list req_list; bool need_ts; void (*complete)(struct io_comp_batch *); }; diff --git a/include/linux/bpf.h b/include/linux/bpf.h index a7af13f550e0d..1150a595aa54c 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1499,6 +1499,7 @@ struct bpf_prog_aux { bool exception_cb; bool exception_boundary; bool is_extended; /* true if extended by freplace program */ + bool changes_pkt_data; u64 prog_array_member_cnt; /* counts how many times as member of prog_array */ struct mutex ext_mutex; /* mutex for is_extended and prog_array_member_cnt */ struct bpf_arena *arena; diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 4513372c5bc8e..50eeb5b86ed70 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -668,6 +668,7 @@ struct bpf_subprog_info { bool args_cached: 1; /* true if bpf_fastcall stack region is used by functions that can't be inlined */ bool keep_fastcall_stack: 1; + bool changes_pkt_data: 1; u8 arg_cnt; struct bpf_subprog_arg_info args[MAX_BPF_FUNC_REG_ARGS]; diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index a32eebcd23da4..252eed781a6e9 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -324,6 +324,7 @@ struct cgroup_base_stat { #ifdef CONFIG_SCHED_CORE u64 forceidle_sum; #endif + u64 ntime; }; /* @@ -710,6 +711,7 @@ struct cgroup_subsys { void (*css_released)(struct cgroup_subsys_state *css); void (*css_free)(struct cgroup_subsys_state *css); void (*css_reset)(struct cgroup_subsys_state *css); + void (*css_killed)(struct cgroup_subsys_state *css); void (*css_rstat_flush)(struct cgroup_subsys_state *css, int cpu); int (*css_extra_stat_show)(struct seq_file *seq, struct cgroup_subsys_state *css); diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index f8ef47f8a634d..fc1324ed597d6 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -343,7 +343,7 @@ static inline u64 cgroup_id(const struct cgroup *cgrp) */ static inline bool css_is_dying(struct cgroup_subsys_state *css) { - return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt); + return css->flags & CSS_DYING; } static inline void cgroup_get(struct cgroup *cgrp) diff --git a/include/linux/compaction.h b/include/linux/compaction.h index e947764960496..7bf0c521db634 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -80,6 +80,11 @@ static inline unsigned long compact_gap(unsigned int order) return 2UL << order; } +static inline int current_is_kcompactd(void) +{ + return current->flags & PF_KCOMPACTD; +} + #ifdef CONFIG_COMPACTION extern unsigned int extfrag_for_order(struct zone *zone, unsigned int order); diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index cd6f9aae311fc..070b3b680209c 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -52,18 +52,6 @@ */ #define barrier_before_unreachable() asm volatile("") -/* - * Mark a position in code as unreachable. This can be used to - * suppress control flow warnings after asm blocks that transfer - * control elsewhere. - */ -#define unreachable() \ - do { \ - annotate_unreachable(); \ - barrier_before_unreachable(); \ - __builtin_unreachable(); \ - } while (0) - #if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) #define __HAVE_BUILTIN_BSWAP32__ #define __HAVE_BUILTIN_BSWAP64__ diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 2d962dade9fae..b15911e201bf9 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -109,44 +109,21 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, /* Unreachable code */ #ifdef CONFIG_OBJTOOL -/* - * These macros help objtool understand GCC code flow for unreachable code. - * The __COUNTER__ based labels are a hack to make each instance of the macros - * unique, to convince GCC not to merge duplicate inline asm statements. - */ -#define __stringify_label(n) #n - -#define __annotate_reachable(c) ({ \ - asm volatile(__stringify_label(c) ":\n\t" \ - ".pushsection .discard.reachable\n\t" \ - ".long " __stringify_label(c) "b - .\n\t" \ - ".popsection\n\t"); \ -}) -#define annotate_reachable() __annotate_reachable(__COUNTER__) - -#define __annotate_unreachable(c) ({ \ - asm volatile(__stringify_label(c) ":\n\t" \ - ".pushsection .discard.unreachable\n\t" \ - ".long " __stringify_label(c) "b - .\n\t" \ - ".popsection\n\t" : : "i" (c)); \ -}) -#define annotate_unreachable() __annotate_unreachable(__COUNTER__) - /* Annotate a C jump table to allow objtool to follow the code flow */ -#define __annotate_jump_table __section(".rodata..c_jump_table,\"a\",@progbits #") - +#define __annotate_jump_table __section(".data.rel.ro.c_jump_table") #else /* !CONFIG_OBJTOOL */ -#define annotate_reachable() -#define annotate_unreachable() #define __annotate_jump_table #endif /* CONFIG_OBJTOOL */ -#ifndef unreachable -# define unreachable() do { \ - annotate_unreachable(); \ +/* + * Mark a position in code as unreachable. This can be used to + * suppress control flow warnings after asm blocks that transfer + * control elsewhere. + */ +#define unreachable() do { \ + barrier_before_unreachable(); \ __builtin_unreachable(); \ } while (0) -#endif /* * KENTRY - kernel entry point diff --git a/include/linux/context_tracking_irq.h b/include/linux/context_tracking_irq.h index c50b5670c4a52..197916ee91a4b 100644 --- a/include/linux/context_tracking_irq.h +++ b/include/linux/context_tracking_irq.h @@ -10,12 +10,12 @@ void ct_irq_exit_irqson(void); void ct_nmi_enter(void); void ct_nmi_exit(void); #else -static inline void ct_irq_enter(void) { } -static inline void ct_irq_exit(void) { } +static __always_inline void ct_irq_enter(void) { } +static __always_inline void ct_irq_exit(void) { } static inline void ct_irq_enter_irqson(void) { } static inline void ct_irq_exit_irqson(void) { } -static inline void ct_nmi_enter(void) { } -static inline void ct_nmi_exit(void) { } +static __always_inline void ct_nmi_enter(void) { } +static __always_inline void ct_nmi_exit(void) { } #endif #endif diff --git a/include/linux/coresight.h b/include/linux/coresight.h index c133425942785..f106b10251118 100644 --- a/include/linux/coresight.h +++ b/include/linux/coresight.h @@ -639,6 +639,10 @@ extern int coresight_enable_sysfs(struct coresight_device *csdev); extern void coresight_disable_sysfs(struct coresight_device *csdev); extern int coresight_timeout(struct csdev_access *csa, u32 offset, int position, int value); +typedef void (*coresight_timeout_cb_t) (struct csdev_access *, u32, int, int); +extern int coresight_timeout_action(struct csdev_access *csa, u32 offset, + int position, int value, + coresight_timeout_cb_t cb); extern int coresight_claim_device(struct coresight_device *csdev); extern int coresight_claim_device_unlocked(struct coresight_device *csdev); diff --git a/include/linux/device/bus.h b/include/linux/device/bus.h index cdc4757217f9b..b18658bce2c38 100644 --- a/include/linux/device/bus.h +++ b/include/linux/device/bus.h @@ -48,6 +48,7 @@ struct fwnode_handle; * will never get called until they do. * @remove: Called when a device removed from this bus. * @shutdown: Called at shut-down time to quiesce the device. + * @irq_get_affinity: Get IRQ affinity mask for the device on this bus. * * @online: Called to put the device back online (after offlining it). * @offline: Called to put the device offline for hot-removal. May fail. @@ -87,6 +88,8 @@ struct bus_type { void (*sync_state)(struct device *dev); void (*remove)(struct device *dev); void (*shutdown)(struct device *dev); + const struct cpumask *(*irq_get_affinity)(struct device *dev, + unsigned int irq_vec); int (*online)(struct device *dev); int (*offline)(struct device *dev); diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index b8b935b526033..b0ed740ca749b 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -412,6 +412,29 @@ struct ethtool_eth_phy_stats { ); }; +/** + * struct ethtool_phy_stats - PHY-level statistics counters + * @rx_packets: Total successfully received frames + * @rx_bytes: Total successfully received bytes + * @rx_errors: Total received frames with errors (e.g., CRC errors) + * @tx_packets: Total successfully transmitted frames + * @tx_bytes: Total successfully transmitted bytes + * @tx_errors: Total transmitted frames with errors + * + * This structure provides a standardized interface for reporting + * PHY-level statistics counters. It is designed to expose statistics + * commonly provided by PHYs but not explicitly defined in the IEEE + * 802.3 standard. + */ +struct ethtool_phy_stats { + u64 rx_packets; + u64 rx_bytes; + u64 rx_errors; + u64 tx_packets; + u64 tx_bytes; + u64 tx_errors; +}; + /* Basic IEEE 802.3 MAC Ctrl statistics (30.3.3.*), not otherwise exposed * via a more targeted API. */ diff --git a/include/linux/fs.h b/include/linux/fs.h index fc3de42d9d764..b98f128c9afa7 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -3320,6 +3320,8 @@ extern const struct file_operations generic_ro_fops; extern int readlink_copy(char __user *, int, const char *); extern int page_readlink(struct dentry *, char __user *, int); +extern const char *page_get_link_raw(struct dentry *, struct inode *, + struct delayed_call *); extern const char *page_get_link(struct dentry *, struct inode *, struct delayed_call *); extern void page_put_link(void *); diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h index 0d79070c5a70f..487d4bd9b0c99 100644 --- a/include/linux/fwnode.h +++ b/include/linux/fwnode.h @@ -91,7 +91,7 @@ struct fwnode_endpoint { #define SWNODE_GRAPH_PORT_NAME_FMT "port@%u" #define SWNODE_GRAPH_ENDPOINT_NAME_FMT "endpoint@%u" -#define NR_FWNODE_REFERENCE_ARGS 8 +#define NR_FWNODE_REFERENCE_ARGS 16 /** * struct fwnode_reference_args - Fwnode reference with additional arguments diff --git a/include/linux/hid.h b/include/linux/hid.h index dd33423012538..018de72505b07 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -1221,12 +1221,6 @@ unsigned long hid_lookup_quirk(const struct hid_device *hdev); int hid_quirks_init(char **quirks_param, __u16 bus, int count); void hid_quirks_exit(__u16 bus); -#ifdef CONFIG_HID_PID -int hid_pidff_init(struct hid_device *hid); -#else -#define hid_pidff_init NULL -#endif - #define dbg_hid(fmt, ...) pr_debug("%s: " fmt, __FILE__, ##__VA_ARGS__) #define hid_err(hid, fmt, ...) \ diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index e4697539b665a..12f7a7b9c06e9 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -687,6 +687,7 @@ struct huge_bootmem_page { }; int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list); +void wait_for_freed_hugetlb_folios(void); struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve); struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, @@ -1009,7 +1010,9 @@ static inline void hugetlb_count_sub(long l, struct mm_struct *mm) static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { - return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); + unsigned long psize = huge_page_size(hstate_vma(vma)); + + return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, psize); } #endif @@ -1055,6 +1058,10 @@ static inline int isolate_or_dissolve_huge_page(struct page *page, return -ENOMEM; } +static inline void wait_for_freed_hugetlb_folios(void) +{ +} + static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve) diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 235fb5e45fc39..e6cebad0d6c0e 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -450,7 +450,7 @@ irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec, static inline void disable_irq_nosync_lockdep(unsigned int irq) { disable_irq_nosync(irq); -#ifdef CONFIG_LOCKDEP +#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT) local_irq_disable(); #endif } @@ -458,7 +458,7 @@ static inline void disable_irq_nosync_lockdep(unsigned int irq) static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags) { disable_irq_nosync(irq); -#ifdef CONFIG_LOCKDEP +#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT) local_irq_save(*flags); #endif } @@ -473,7 +473,7 @@ static inline void disable_irq_lockdep(unsigned int irq) static inline void enable_irq_lockdep(unsigned int irq) { -#ifdef CONFIG_LOCKDEP +#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT) local_irq_enable(); #endif enable_irq(irq); @@ -481,7 +481,7 @@ static inline void enable_irq_lockdep(unsigned int irq) static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags) { -#ifdef CONFIG_LOCKDEP +#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT) local_irq_restore(*flags); #endif enable_irq(irq); diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index 4b9ba523978d2..5ce332fc6ff50 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -457,6 +457,7 @@ enum { REQ_F_SKIP_LINK_CQES_BIT, REQ_F_SINGLE_POLL_BIT, REQ_F_DOUBLE_POLL_BIT, + REQ_F_MULTISHOT_BIT, REQ_F_APOLL_MULTISHOT_BIT, REQ_F_CLEAR_POLLIN_BIT, REQ_F_HASH_LOCKED_BIT, @@ -530,6 +531,8 @@ enum { REQ_F_SINGLE_POLL = IO_REQ_FLAG(REQ_F_SINGLE_POLL_BIT), /* double poll may active */ REQ_F_DOUBLE_POLL = IO_REQ_FLAG(REQ_F_DOUBLE_POLL_BIT), + /* request posts multiple completions, should be set at prep time */ + REQ_F_MULTISHOT = IO_REQ_FLAG(REQ_F_MULTISHOT_BIT), /* fast poll multishot mode */ REQ_F_APOLL_MULTISHOT = IO_REQ_FLAG(REQ_F_APOLL_MULTISHOT_BIT), /* recvmsg special flag, clear EPOLLIN */ diff --git a/include/linux/key.h b/include/linux/key.h index 074dca3222b96..ba05de8579ecc 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -236,6 +236,7 @@ struct key { #define KEY_FLAG_ROOT_CAN_INVAL 7 /* set if key can be invalidated by root without permission */ #define KEY_FLAG_KEEP 8 /* set if key should not be removed */ #define KEY_FLAG_UID_KEYRING 9 /* set if key is a user or user session keyring */ +#define KEY_FLAG_FINAL_PUT 10 /* set if final put has happened on key */ /* the key type and key description string * - the desc is used to match a key against search criteria diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 2c66ca21801c1..15206450929d5 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -2330,7 +2330,7 @@ static inline bool kvm_is_visible_memslot(struct kvm_memory_slot *memslot) struct kvm_vcpu *kvm_get_running_vcpu(void); struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void); -#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS +#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS) bool kvm_arch_has_irq_bypass(void); int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *, struct irq_bypass_producer *); diff --git a/include/linux/libata.h b/include/linux/libata.h index 9b4a6ff03235b..79974a99265fc 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -88,6 +88,7 @@ enum ata_quirks { __ATA_QUIRK_MAX_SEC_1024, /* Limit max sects to 1024 */ __ATA_QUIRK_MAX_TRIM_128M, /* Limit max trim size to 128M */ __ATA_QUIRK_NO_NCQ_ON_ATI, /* Disable NCQ on ATI chipset */ + __ATA_QUIRK_NO_LPM_ON_ATI, /* Disable LPM on ATI chipset */ __ATA_QUIRK_NO_ID_DEV_LOG, /* Identify device log missing */ __ATA_QUIRK_NO_LOG_DIR, /* Do not read log directory */ __ATA_QUIRK_NO_FUA, /* Do not use FUA */ @@ -434,6 +435,7 @@ enum { ATA_QUIRK_MAX_SEC_1024 = (1U << __ATA_QUIRK_MAX_SEC_1024), ATA_QUIRK_MAX_TRIM_128M = (1U << __ATA_QUIRK_MAX_TRIM_128M), ATA_QUIRK_NO_NCQ_ON_ATI = (1U << __ATA_QUIRK_NO_NCQ_ON_ATI), + ATA_QUIRK_NO_LPM_ON_ATI = (1U << __ATA_QUIRK_NO_LPM_ON_ATI), ATA_QUIRK_NO_ID_DEV_LOG = (1U << __ATA_QUIRK_NO_ID_DEV_LOG), ATA_QUIRK_NO_LOG_DIR = (1U << __ATA_QUIRK_NO_LOG_DIR), ATA_QUIRK_NO_FUA = (1U << __ATA_QUIRK_NO_FUA), diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 4f17b786828af..35b886385f329 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3064,6 +3064,8 @@ static inline struct net_device *first_net_device_rcu(struct net *net) } int netdev_boot_setup_check(struct net_device *dev); +struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, + const char *hwaddr); struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, const char *hwaddr); struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); diff --git a/include/linux/nfs.h b/include/linux/nfs.h index 9ad727ddfedb3..0906a0b40c6aa 100644 --- a/include/linux/nfs.h +++ b/include/linux/nfs.h @@ -55,7 +55,6 @@ enum nfs3_stable_how { NFS_INVALID_STABLE_HOW = -1 }; -#ifdef CONFIG_CRC32 /** * nfs_fhandle_hash - calculate the crc32 hash for the filehandle * @fh - pointer to filehandle @@ -67,10 +66,4 @@ static inline u32 nfs_fhandle_hash(const struct nfs_fh *fh) { return ~crc32_le(0xFFFFFFFF, &fh->data[0], fh->size); } -#else /* CONFIG_CRC32 */ -static inline u32 nfs_fhandle_hash(const struct nfs_fh *fh) -{ - return 0; -} -#endif /* CONFIG_CRC32 */ #endif /* _LINUX_NFS_H */ diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index b804346a97419..81ab18658d72d 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -251,6 +251,10 @@ struct nfs_server { struct list_head ss_copies; struct list_head ss_src_copies; + unsigned long delegation_flags; +#define NFS4SERV_DELEGRETURN (1) +#define NFS4SERV_DELEGATION_EXPIRED (2) +#define NFS4SERV_DELEGRETURN_DELAYED (3) unsigned long delegation_gen; unsigned long mig_gen; unsigned long mig_status; diff --git a/include/linux/nmi.h b/include/linux/nmi.h index a8dfb38c9bb6f..e78fa535f61dd 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -17,7 +17,6 @@ void lockup_detector_init(void); void lockup_detector_retry_init(void); void lockup_detector_soft_poweroff(void); -void lockup_detector_cleanup(void); extern int watchdog_user_enabled; extern int watchdog_thresh; @@ -37,7 +36,6 @@ extern int sysctl_hardlockup_all_cpu_backtrace; static inline void lockup_detector_init(void) { } static inline void lockup_detector_retry_init(void) { } static inline void lockup_detector_soft_poweroff(void) { } -static inline void lockup_detector_cleanup(void) { } #endif /* !CONFIG_LOCKUP_DETECTOR */ #ifdef CONFIG_SOFTLOCKUP_DETECTOR @@ -104,12 +102,10 @@ void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs); #if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF) extern void hardlockup_detector_perf_stop(void); extern void hardlockup_detector_perf_restart(void); -extern void hardlockup_detector_perf_cleanup(void); extern void hardlockup_config_perf_event(const char *str); #else static inline void hardlockup_detector_perf_stop(void) { } static inline void hardlockup_detector_perf_restart(void) { } -static inline void hardlockup_detector_perf_cleanup(void) { } static inline void hardlockup_config_perf_event(const char *str) { } #endif diff --git a/include/linux/nvme-tcp.h b/include/linux/nvme-tcp.h index e07e8978d691b..e435250fcb4d0 100644 --- a/include/linux/nvme-tcp.h +++ b/include/linux/nvme-tcp.h @@ -13,6 +13,8 @@ #define NVME_TCP_ADMIN_CCSZ SZ_8K #define NVME_TCP_DIGEST_LENGTH 4 #define NVME_TCP_MIN_MAXH2CDATA 4096 +#define NVME_TCP_MIN_C2HTERM_PLEN 24 +#define NVME_TCP_MAX_C2HTERM_PLEN 152 enum nvme_tcp_pfv { NVME_TCP_PFV_1_0 = 0x0, diff --git a/include/linux/nvme.h b/include/linux/nvme.h index b58d9405d65e0..1c101f6fad2f3 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -388,6 +388,7 @@ enum { NVME_CTRL_CTRATT_PREDICTABLE_LAT = 1 << 5, NVME_CTRL_CTRATT_NAMESPACE_GRANULARITY = 1 << 7, NVME_CTRL_CTRATT_UUID_LIST = 1 << 9, + NVME_CTRL_SGLS_MSDS = 1 << 19, }; struct nvme_lbaf { diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 48c66b8466828..a9244291f5067 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -1111,6 +1111,12 @@ static inline bool is_page_hwpoison(const struct page *page) return folio_test_hugetlb(folio) && PageHWPoison(&folio->page); } +static inline bool folio_contain_hwpoisoned_page(struct folio *folio) +{ + return folio_test_hwpoison(folio) || + (folio_test_large(folio) && folio_test_has_hwpoisoned(folio)); +} + bool is_free_buddy_page(const struct page *page); PAGEFLAG(Isolated, isolated, PF_ANY); diff --git a/include/linux/pci.h b/include/linux/pci.h index 4e77c4230c0a1..74114acbb07fb 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -2293,6 +2293,8 @@ static inline void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) { } #endif +int pcim_intx(struct pci_dev *pdev, int enabled); +int pcim_request_all_regions(struct pci_dev *pdev, const char *name); void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen); void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar, const char *name); diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 22f6b018cff8d..c395b3c5c05cf 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2605,6 +2605,8 @@ #define PCI_VENDOR_ID_ZHAOXIN 0x1d17 +#define PCI_VENDOR_ID_ROCKCHIP 0x1d87 + #define PCI_VENDOR_ID_HYGON 0x1d94 #define PCI_VENDOR_ID_META 0x1d9b @@ -3133,6 +3135,7 @@ #define PCI_DEVICE_ID_INTEL_HDA_LNL_P 0xa828 #define PCI_DEVICE_ID_INTEL_S21152BB 0xb152 #define PCI_DEVICE_ID_INTEL_HDA_BMG 0xe2f7 +#define PCI_DEVICE_ID_INTEL_HDA_PTL_H 0xe328 #define PCI_DEVICE_ID_INTEL_HDA_PTL 0xe428 #define PCI_DEVICE_ID_INTEL_HDA_CML_R 0xf0c8 #define PCI_DEVICE_ID_INTEL_HDA_RKL_S 0xf1c8 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 347901525a46a..0997077bcc52a 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -170,6 +170,12 @@ struct hw_perf_event { }; struct { /* aux / Intel-PT */ u64 aux_config; + /* + * For AUX area events, aux_paused cannot be a state + * flag because it can be updated asynchronously to + * state. + */ + unsigned int aux_paused; }; struct { /* software */ struct hrtimer hrtimer; @@ -294,6 +300,7 @@ struct perf_event_pmu_context; #define PERF_PMU_CAP_NO_EXCLUDE 0x0040 #define PERF_PMU_CAP_AUX_OUTPUT 0x0080 #define PERF_PMU_CAP_EXTENDED_HW_TYPE 0x0100 +#define PERF_PMU_CAP_AUX_PAUSE 0x0200 /** * pmu::scope @@ -384,6 +391,8 @@ struct pmu { #define PERF_EF_START 0x01 /* start the counter when adding */ #define PERF_EF_RELOAD 0x02 /* reload the counter when starting */ #define PERF_EF_UPDATE 0x04 /* update the counter when stopping */ +#define PERF_EF_PAUSE 0x08 /* AUX area event, pause tracing */ +#define PERF_EF_RESUME 0x10 /* AUX area event, resume tracing */ /* * Adds/Removes a counter to/from the PMU, can be done inside a @@ -423,6 +432,18 @@ struct pmu { * * ->start() with PERF_EF_RELOAD will reprogram the counter * value, must be preceded by a ->stop() with PERF_EF_UPDATE. + * + * ->stop() with PERF_EF_PAUSE will stop as simply as possible. Will not + * overlap another ->stop() with PERF_EF_PAUSE nor ->start() with + * PERF_EF_RESUME. + * + * ->start() with PERF_EF_RESUME will start as simply as possible but + * only if the counter is not otherwise stopped. Will not overlap + * another ->start() with PERF_EF_RESUME nor ->stop() with + * PERF_EF_PAUSE. + * + * Notably, PERF_EF_PAUSE/PERF_EF_RESUME *can* be concurrent with other + * ->stop()/->start() invocations, just not itself. */ void (*start) (struct perf_event *event, int flags); void (*stop) (struct perf_event *event, int flags); @@ -652,13 +673,15 @@ struct swevent_hlist { struct rcu_head rcu_head; }; -#define PERF_ATTACH_CONTEXT 0x01 -#define PERF_ATTACH_GROUP 0x02 -#define PERF_ATTACH_TASK 0x04 -#define PERF_ATTACH_TASK_DATA 0x08 -#define PERF_ATTACH_ITRACE 0x10 -#define PERF_ATTACH_SCHED_CB 0x20 -#define PERF_ATTACH_CHILD 0x40 +#define PERF_ATTACH_CONTEXT 0x0001 +#define PERF_ATTACH_GROUP 0x0002 +#define PERF_ATTACH_TASK 0x0004 +#define PERF_ATTACH_TASK_DATA 0x0008 +#define PERF_ATTACH_ITRACE 0x0010 +#define PERF_ATTACH_SCHED_CB 0x0020 +#define PERF_ATTACH_CHILD 0x0040 +#define PERF_ATTACH_EXCLUSIVE 0x0080 +#define PERF_ATTACH_CALLCHAIN 0x0100 struct bpf_prog; struct perf_cgroup; @@ -810,7 +833,6 @@ struct perf_event { struct irq_work pending_disable_irq; struct callback_head pending_task; unsigned int pending_work; - struct rcuwait pending_work_wait; atomic_t event_limit; @@ -1685,6 +1707,13 @@ static inline bool has_aux(struct perf_event *event) return event->pmu->setup_aux; } +static inline bool has_aux_action(struct perf_event *event) +{ + return event->attr.aux_sample_size || + event->attr.aux_pause || + event->attr.aux_resume; +} + static inline bool is_write_backward(struct perf_event *event) { return !!event->attr.write_backward; diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index e8b2ac6bd2ae3..be6ca84db4d85 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -201,10 +201,14 @@ static inline int pmd_dirty(pmd_t pmd) * hazard could result in the direct mode hypervisor case, since the actual * write to the page tables may not yet have taken place, so reads though * a raw PTE pointer after it has been modified are not guaranteed to be - * up to date. This mode can only be entered and left under the protection of - * the page table locks for all page tables which may be modified. In the UP - * case, this is required so that preemption is disabled, and in the SMP case, - * it must synchronize the delayed page table writes properly on other CPUs. + * up to date. + * + * In the general case, no lock is guaranteed to be held between entry and exit + * of the lazy mode. So the implementation must assume preemption may be enabled + * and cpu migration is possible; it must take steps to be robust against this. + * (In practice, for user PTE updates, the appropriate page table lock(s) are + * held, but for kernel PTE updates, no lock is held). Nesting is not permitted + * and the mode cannot be used in interrupt context. */ #ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE #define arch_enter_lazy_mmu_mode() do {} while (0) @@ -266,7 +270,6 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr, { page_table_check_ptes_set(mm, ptep, pte, nr); - arch_enter_lazy_mmu_mode(); for (;;) { set_pte(ptep, pte); if (--nr == 0) @@ -274,7 +277,6 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr, ptep++; pte = pte_next_pfn(pte); } - arch_leave_lazy_mmu_mode(); } #endif #define set_pte_at(mm, addr, ptep, pte) set_ptes(mm, addr, ptep, pte, 1) @@ -1518,14 +1520,25 @@ static inline void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, } /* - * track_pfn_copy is called when vma that is covering the pfnmap gets - * copied through copy_page_range(). + * track_pfn_copy is called when a VM_PFNMAP VMA is about to get the page + * tables copied during copy_page_range(). On success, stores the pfn to be + * passed to untrack_pfn_copy(). */ -static inline int track_pfn_copy(struct vm_area_struct *vma) +static inline int track_pfn_copy(struct vm_area_struct *dst_vma, + struct vm_area_struct *src_vma, unsigned long *pfn) { return 0; } +/* + * untrack_pfn_copy is called when a VM_PFNMAP VMA failed to copy during + * copy_page_range(), but after track_pfn_copy() was already called. + */ +static inline void untrack_pfn_copy(struct vm_area_struct *dst_vma, + unsigned long pfn) +{ +} + /* * untrack_pfn is called while unmapping a pfnmap for a region. * untrack can be called for a specific region indicated by pfn and size or @@ -1538,8 +1551,10 @@ static inline void untrack_pfn(struct vm_area_struct *vma, } /* - * untrack_pfn_clear is called while mremapping a pfnmap for a new region - * or fails to copy pgtable during duplicate vm area. + * untrack_pfn_clear is called in the following cases on a VM_PFNMAP VMA: + * + * 1) During mremap() on the src VMA after the page tables were moved. + * 2) During fork() on the dst VMA, immediately after duplicating the src VMA. */ static inline void untrack_pfn_clear(struct vm_area_struct *vma) { @@ -1550,7 +1565,10 @@ extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, unsigned long size); extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, pfn_t pfn); -extern int track_pfn_copy(struct vm_area_struct *vma); +extern int track_pfn_copy(struct vm_area_struct *dst_vma, + struct vm_area_struct *src_vma, unsigned long *pfn); +extern void untrack_pfn_copy(struct vm_area_struct *dst_vma, + unsigned long pfn); extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, unsigned long size, bool mm_wr_locked); extern void untrack_pfn_clear(struct vm_area_struct *vma); diff --git a/include/linux/phy.h b/include/linux/phy.h index a98bc91a0cde9..945264f457d8a 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -1090,6 +1090,35 @@ struct phy_driver { int (*cable_test_get_status)(struct phy_device *dev, bool *finished); /* Get statistics from the PHY using ethtool */ + /** + * @get_phy_stats: Retrieve PHY statistics. + * @dev: The PHY device for which the statistics are retrieved. + * @eth_stats: structure where Ethernet PHY stats will be stored. + * @stats: structure where additional PHY-specific stats will be stored. + * + * Retrieves the supported PHY statistics and populates the provided + * structures. The input structures are pre-initialized with + * `ETHTOOL_STAT_NOT_SET`, and the driver must only modify members + * corresponding to supported statistics. Unmodified members will remain + * set to `ETHTOOL_STAT_NOT_SET` and will not be returned to userspace. + */ + void (*get_phy_stats)(struct phy_device *dev, + struct ethtool_eth_phy_stats *eth_stats, + struct ethtool_phy_stats *stats); + + /** + * @get_link_stats: Retrieve link statistics. + * @dev: The PHY device for which the statistics are retrieved. + * @link_stats: structure where link-specific stats will be stored. + * + * Retrieves link-related statistics for the given PHY device. The input + * structure is pre-initialized with `ETHTOOL_STAT_NOT_SET`, and the + * driver must only modify members corresponding to supported + * statistics. Unmodified members will remain set to + * `ETHTOOL_STAT_NOT_SET` and will not be returned to userspace. + */ + void (*get_link_stats)(struct phy_device *dev, + struct ethtool_link_ext_stats *link_stats); /** @get_sset_count: Number of statistic counters */ int (*get_sset_count)(struct phy_device *dev); /** @get_strings: Names of the statistic counters */ @@ -2055,6 +2084,13 @@ int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data); int phy_ethtool_get_sset_count(struct phy_device *phydev); int phy_ethtool_get_stats(struct phy_device *phydev, struct ethtool_stats *stats, u64 *data); + +void __phy_ethtool_get_phy_stats(struct phy_device *phydev, + struct ethtool_eth_phy_stats *phy_stats, + struct ethtool_phy_stats *phydev_stats); +void __phy_ethtool_get_link_ext_stats(struct phy_device *phydev, + struct ethtool_link_ext_stats *link_stats); + int phy_ethtool_get_plca_cfg(struct phy_device *phydev, struct phy_plca_cfg *plca_cfg); int phy_ethtool_set_plca_cfg(struct phy_device *phydev, diff --git a/include/linux/phylib_stubs.h b/include/linux/phylib_stubs.h index 1279f48c8a707..9d2d6090c86d1 100644 --- a/include/linux/phylib_stubs.h +++ b/include/linux/phylib_stubs.h @@ -5,6 +5,9 @@ #include +struct ethtool_eth_phy_stats; +struct ethtool_link_ext_stats; +struct ethtool_phy_stats; struct kernel_hwtstamp_config; struct netlink_ext_ack; struct phy_device; @@ -19,6 +22,11 @@ struct phylib_stubs { int (*hwtstamp_set)(struct phy_device *phydev, struct kernel_hwtstamp_config *config, struct netlink_ext_ack *extack); + void (*get_phy_stats)(struct phy_device *phydev, + struct ethtool_eth_phy_stats *phy_stats, + struct ethtool_phy_stats *phydev_stats); + void (*get_link_ext_stats)(struct phy_device *phydev, + struct ethtool_link_ext_stats *link_stats); }; static inline int phy_hwtstamp_get(struct phy_device *phydev, @@ -50,6 +58,29 @@ static inline int phy_hwtstamp_set(struct phy_device *phydev, return phylib_stubs->hwtstamp_set(phydev, config, extack); } +static inline void phy_ethtool_get_phy_stats(struct phy_device *phydev, + struct ethtool_eth_phy_stats *phy_stats, + struct ethtool_phy_stats *phydev_stats) +{ + ASSERT_RTNL(); + + if (!phylib_stubs) + return; + + phylib_stubs->get_phy_stats(phydev, phy_stats, phydev_stats); +} + +static inline void phy_ethtool_get_link_ext_stats(struct phy_device *phydev, + struct ethtool_link_ext_stats *link_stats) +{ + ASSERT_RTNL(); + + if (!phylib_stubs) + return; + + phylib_stubs->get_link_ext_stats(phydev, link_stats); +} + #else static inline int phy_hwtstamp_get(struct phy_device *phydev, @@ -65,4 +96,15 @@ static inline int phy_hwtstamp_set(struct phy_device *phydev, return -EOPNOTSUPP; } +static inline void phy_ethtool_get_phy_stats(struct phy_device *phydev, + struct ethtool_eth_phy_stats *phy_stats, + struct ethtool_phy_stats *phydev_stats) +{ +} + +static inline void phy_ethtool_get_link_ext_stats(struct phy_device *phydev, + struct ethtool_link_ext_stats *link_stats) +{ +} + #endif diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index d39dc863f612f..d0b29cd1fd204 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -66,6 +66,7 @@ static inline bool queue_pm_work(struct work_struct *work) extern int pm_generic_runtime_suspend(struct device *dev); extern int pm_generic_runtime_resume(struct device *dev); +extern bool pm_runtime_need_not_resume(struct device *dev); extern int pm_runtime_force_suspend(struct device *dev); extern int pm_runtime_force_resume(struct device *dev); @@ -241,6 +242,7 @@ static inline bool queue_pm_work(struct work_struct *work) { return false; } static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; } static inline int pm_generic_runtime_resume(struct device *dev) { return 0; } +static inline bool pm_runtime_need_not_resume(struct device *dev) {return true; } static inline int pm_runtime_force_suspend(struct device *dev) { return 0; } static inline int pm_runtime_force_resume(struct device *dev) { return 0; } diff --git a/include/linux/printk.h b/include/linux/printk.h index eca9bb2ee637b..0cb647ecd77f5 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -204,6 +204,7 @@ void printk_legacy_allow_panic_sync(void); extern bool nbcon_device_try_acquire(struct console *con); extern void nbcon_device_release(struct console *con); void nbcon_atomic_flush_unsafe(void); +bool pr_flush(int timeout_ms, bool reset_on_progress); #else static inline __printf(1, 0) int vprintk(const char *s, va_list args) @@ -304,6 +305,11 @@ static inline void nbcon_atomic_flush_unsafe(void) { } +static inline bool pr_flush(int timeout_ms, bool reset_on_progress) +{ + return true; +} + #endif bool this_cpu_in_panic(void); diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index 0b2a898544409..ea62201c74c40 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -20,10 +20,13 @@ enum { * If in doubt, ignore this flag. */ #ifdef MODULE - PROC_ENTRY_PERMANENT = 0U, + PROC_ENTRY_PERMANENT = 0U, #else - PROC_ENTRY_PERMANENT = 1U << 0, + PROC_ENTRY_PERMANENT = 1U << 0, #endif + + PROC_ENTRY_proc_read_iter = 1U << 1, + PROC_ENTRY_proc_compat_ioctl = 1U << 2, }; struct proc_ops { diff --git a/include/linux/pse-pd/pse.h b/include/linux/pse-pd/pse.h index 591a53e082e65..df1592022d938 100644 --- a/include/linux/pse-pd/pse.h +++ b/include/linux/pse-pd/pse.h @@ -75,12 +75,8 @@ struct pse_control_status { * @pi_disable: Configure the PSE PI as disabled. * @pi_get_voltage: Return voltage similarly to get_voltage regulator * callback. - * @pi_get_current_limit: Get the configured current limit similarly to - * get_current_limit regulator callback. - * @pi_set_current_limit: Configure the current limit similarly to - * set_current_limit regulator callback. - * Should not return an error in case of MAX_PI_CURRENT - * current value set. + * @pi_get_pw_limit: Get the configured power limit of the PSE PI. + * @pi_set_pw_limit: Configure the power limit of the PSE PI. */ struct pse_controller_ops { int (*ethtool_get_status)(struct pse_controller_dev *pcdev, @@ -91,10 +87,10 @@ struct pse_controller_ops { int (*pi_enable)(struct pse_controller_dev *pcdev, int id); int (*pi_disable)(struct pse_controller_dev *pcdev, int id); int (*pi_get_voltage)(struct pse_controller_dev *pcdev, int id); - int (*pi_get_current_limit)(struct pse_controller_dev *pcdev, - int id); - int (*pi_set_current_limit)(struct pse_controller_dev *pcdev, - int id, int max_uA); + int (*pi_get_pw_limit)(struct pse_controller_dev *pcdev, + int id); + int (*pi_set_pw_limit)(struct pse_controller_dev *pcdev, + int id, int max_mW); }; struct module; diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 48e5c03df1dd8..bd69ddc102fbc 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -138,7 +138,7 @@ static inline void rcu_sysrq_end(void) { } #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK)) void rcu_irq_work_resched(void); #else -static inline void rcu_irq_work_resched(void) { } +static __always_inline void rcu_irq_work_resched(void) { } #endif #ifdef CONFIG_RCU_NOCB_CPU diff --git a/include/linux/rcuref.h b/include/linux/rcuref.h index 2c8bfd0f1b6b3..6322d8c1c6b42 100644 --- a/include/linux/rcuref.h +++ b/include/linux/rcuref.h @@ -71,27 +71,30 @@ static inline __must_check bool rcuref_get(rcuref_t *ref) return rcuref_get_slowpath(ref); } -extern __must_check bool rcuref_put_slowpath(rcuref_t *ref); +extern __must_check bool rcuref_put_slowpath(rcuref_t *ref, unsigned int cnt); /* * Internal helper. Do not invoke directly. */ static __always_inline __must_check bool __rcuref_put(rcuref_t *ref) { + int cnt; + RCU_LOCKDEP_WARN(!rcu_read_lock_held() && preemptible(), "suspicious rcuref_put_rcusafe() usage"); /* * Unconditionally decrease the reference count. The saturation and * dead zones provide enough tolerance for this. */ - if (likely(!atomic_add_negative_release(-1, &ref->refcnt))) + cnt = atomic_sub_return_release(1, &ref->refcnt); + if (likely(cnt >= 0)) return false; /* * Handle the last reference drop and cases inside the saturation * and dead zones. */ - return rcuref_put_slowpath(ref); + return rcuref_put_slowpath(ref, cnt); } /** diff --git a/include/linux/sched.h b/include/linux/sched.h index c547dedac7b18..f62d6f59c653e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1682,7 +1682,7 @@ extern struct pid *cad_pid; #define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */ #define PF_USER_WORKER 0x00004000 /* Kernel thread cloned from userspace thread */ #define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */ -#define PF__HOLE__00010000 0x00010000 +#define PF_KCOMPACTD 0x00010000 /* I am kcompactd */ #define PF_KSWAPD 0x00020000 /* I am kswapd */ #define PF_MEMALLOC_NOFS 0x00040000 /* All allocations inherit GFP_NOFS. See memalloc_nfs_save() */ #define PF_MEMALLOC_NOIO 0x00080000 /* All allocations inherit GFP_NOIO. See memalloc_noio_save() */ diff --git a/include/linux/sched/smt.h b/include/linux/sched/smt.h index fb1e295e7e63e..166b19af956f8 100644 --- a/include/linux/sched/smt.h +++ b/include/linux/sched/smt.h @@ -12,7 +12,7 @@ static __always_inline bool sched_smt_active(void) return static_branch_likely(&sched_smt_present); } #else -static inline bool sched_smt_active(void) { return false; } +static __always_inline bool sched_smt_active(void) { return false; } #endif void arch_smt_update(void); diff --git a/include/linux/serio.h b/include/linux/serio.h index bf2191f253509..69a47674af653 100644 --- a/include/linux/serio.h +++ b/include/linux/serio.h @@ -6,6 +6,7 @@ #define _SERIO_H +#include #include #include #include @@ -161,4 +162,6 @@ static inline void serio_continue_rx(struct serio *serio) spin_unlock_irq(&serio->lock); } +DEFINE_GUARD(serio_pause_rx, struct serio *, serio_pause_rx(_T), serio_continue_rx(_T)) + #endif diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index 2cbe0c22a32f3..0b9095a281b89 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -91,6 +91,8 @@ struct sk_psock { struct sk_psock_progs progs; #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER) struct strparser strp; + u32 copied_seq; + u32 ingress_bytes; #endif struct sk_buff_head ingress_skb; struct list_head ingress_msg; diff --git a/include/linux/socket.h b/include/linux/socket.h index d18cc47e89bd0..c3322eb3d6865 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -392,6 +392,8 @@ struct ucred { extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr); extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); +extern int put_cmsg_notrunc(struct msghdr *msg, int level, int type, int len, + void *data); struct timespec64; struct __kernel_timespec; diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index fec1e8a1570c3..eac57914dcf32 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -158,7 +158,6 @@ enum { RPC_TASK_NEED_XMIT, RPC_TASK_NEED_RECV, RPC_TASK_MSG_PIN_WAIT, - RPC_TASK_SIGNALLED, }; #define rpc_test_and_set_running(t) \ @@ -171,7 +170,7 @@ enum { #define RPC_IS_ACTIVATED(t) test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate) -#define RPC_SIGNALLED(t) test_bit(RPC_TASK_SIGNALLED, &(t)->tk_runstate) +#define RPC_SIGNALLED(t) (READ_ONCE(task->tk_rpc_status) == -ERESTARTSYS) /* * Task priorities. diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 25ea8fe2313e6..0da2c257e32cf 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -83,8 +83,6 @@ struct thermal_trip { #define THERMAL_TRIP_PRIV_TO_INT(_val_) (uintptr_t)(_val_) #define THERMAL_INT_TO_TRIP_PRIV(_val_) (void *)(uintptr_t)(_val_) -struct thermal_zone_device; - struct cooling_spec { unsigned long upper; /* Highest cooling state */ unsigned long lower; /* Lowest cooling state */ diff --git a/include/linux/tpm.h b/include/linux/tpm.h index 20a40ade80308..6c3125300c009 100644 --- a/include/linux/tpm.h +++ b/include/linux/tpm.h @@ -335,6 +335,7 @@ enum tpm2_cc_attrs { #define TPM_VID_WINBOND 0x1050 #define TPM_VID_STM 0x104A #define TPM_VID_ATML 0x1114 +#define TPM_VID_IFX 0x15D1 enum tpm_chip_flags { TPM_CHIP_FLAG_BOOTSTRAPPED = BIT(0), diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 6d7adf2767d6b..46bf241113246 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -677,6 +677,20 @@ struct trace_event_file { atomic_t tm_ref; /* trigger-mode reference counter */ }; +#ifdef CONFIG_HIST_TRIGGERS +extern struct irq_work hist_poll_work; +extern wait_queue_head_t hist_poll_wq; + +static inline void hist_poll_wakeup(void) +{ + if (wq_has_sleeper(&hist_poll_wq)) + irq_work_queue(&hist_poll_work); +} + +#define hist_poll_wait(file, wait) \ + poll_wait(file, &hist_poll_wq, wait) +#endif + #define __TRACE_EVENT_FLAGS(name, value) \ static int __init trace_init_flags_##name(void) \ { \ diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index 2b294bf1881fe..d0cb0e02cd6ae 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h @@ -28,6 +28,8 @@ struct page; #define MAX_URETPROBE_DEPTH 64 +#define UPROBE_NO_TRAMPOLINE_VADDR (~0UL) + struct uprobe_consumer { /* * handler() can return UPROBE_HANDLER_REMOVE to signal the need to diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 5bb4eaa52e14c..71d2432876406 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -353,6 +353,22 @@ enum { * during the hdev->setup vendor callback. */ HCI_QUIRK_FIXUP_LE_EXT_ADV_REPORT_PHY, + + /* When this quirk is set, the HCI_OP_READ_VOICE_SETTING command is + * skipped. This is required for a subset of the CSR controller clones + * which erroneously claim to support it. + * + * This quirk must be set before hci_register_dev is called. + */ + HCI_QUIRK_BROKEN_READ_VOICE_SETTING, + + /* When this quirk is set, the HCI_OP_READ_PAGE_SCAN_TYPE command is + * skipped. This is required for a subset of the CSR controller clones + * which erroneously claim to support it. + * + * This quirk must be set before hci_register_dev is called. + */ + HCI_QUIRK_BROKEN_READ_PAGE_SCAN_TYPE, }; /* HCI device flags */ @@ -683,7 +699,7 @@ enum { #define HCI_ERROR_REMOTE_POWER_OFF 0x15 #define HCI_ERROR_LOCAL_HOST_TERM 0x16 #define HCI_ERROR_PAIRING_NOT_ALLOWED 0x18 -#define HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE 0x1e +#define HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE 0x1a #define HCI_ERROR_INVALID_LL_PARAMS 0x1e #define HCI_ERROR_UNSPECIFIED 0x1f #define HCI_ERROR_ADVERTISING_TIMEOUT 0x3c diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index ba7b52584770d..4245910ffc4a2 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -804,6 +804,7 @@ struct hci_conn_params { extern struct list_head hci_dev_list; extern struct list_head hci_cb_list; extern rwlock_t hci_dev_list_lock; +extern struct mutex hci_cb_list_lock; #define hci_dev_set_flag(hdev, nr) set_bit((nr), (hdev)->dev_flags) #define hci_dev_clear_flag(hdev, nr) clear_bit((nr), (hdev)->dev_flags) @@ -1920,6 +1921,10 @@ void hci_conn_del_sysfs(struct hci_conn *conn); ((dev)->commands[20] & 0x10 && \ !test_bit(HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE, &hdev->quirks)) +#define read_voice_setting_capable(dev) \ + ((dev)->commands[9] & 0x04 && \ + !test_bit(HCI_QUIRK_BROKEN_READ_VOICE_SETTING, &(dev)->quirks)) + /* Use enhanced synchronous connection if command is supported and its quirk * has not been set. */ @@ -2006,47 +2011,24 @@ struct hci_cb { char *name; - bool (*match) (struct hci_conn *conn); void (*connect_cfm) (struct hci_conn *conn, __u8 status); void (*disconn_cfm) (struct hci_conn *conn, __u8 status); void (*security_cfm) (struct hci_conn *conn, __u8 status, - __u8 encrypt); + __u8 encrypt); void (*key_change_cfm) (struct hci_conn *conn, __u8 status); void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role); }; -static inline void hci_cb_lookup(struct hci_conn *conn, struct list_head *list) -{ - struct hci_cb *cb, *cpy; - - rcu_read_lock(); - list_for_each_entry_rcu(cb, &hci_cb_list, list) { - if (cb->match && cb->match(conn)) { - cpy = kmalloc(sizeof(*cpy), GFP_ATOMIC); - if (!cpy) - break; - - *cpy = *cb; - INIT_LIST_HEAD(&cpy->list); - list_add_rcu(&cpy->list, list); - } - } - rcu_read_unlock(); -} - static inline void hci_connect_cfm(struct hci_conn *conn, __u8 status) { - struct list_head list; - struct hci_cb *cb, *tmp; - - INIT_LIST_HEAD(&list); - hci_cb_lookup(conn, &list); + struct hci_cb *cb; - list_for_each_entry_safe(cb, tmp, &list, list) { + mutex_lock(&hci_cb_list_lock); + list_for_each_entry(cb, &hci_cb_list, list) { if (cb->connect_cfm) cb->connect_cfm(conn, status); - kfree(cb); } + mutex_unlock(&hci_cb_list_lock); if (conn->connect_cfm_cb) conn->connect_cfm_cb(conn, status); @@ -2054,43 +2036,22 @@ static inline void hci_connect_cfm(struct hci_conn *conn, __u8 status) static inline void hci_disconn_cfm(struct hci_conn *conn, __u8 reason) { - struct list_head list; - struct hci_cb *cb, *tmp; - - INIT_LIST_HEAD(&list); - hci_cb_lookup(conn, &list); + struct hci_cb *cb; - list_for_each_entry_safe(cb, tmp, &list, list) { + mutex_lock(&hci_cb_list_lock); + list_for_each_entry(cb, &hci_cb_list, list) { if (cb->disconn_cfm) cb->disconn_cfm(conn, reason); - kfree(cb); } + mutex_unlock(&hci_cb_list_lock); if (conn->disconn_cfm_cb) conn->disconn_cfm_cb(conn, reason); } -static inline void hci_security_cfm(struct hci_conn *conn, __u8 status, - __u8 encrypt) -{ - struct list_head list; - struct hci_cb *cb, *tmp; - - INIT_LIST_HEAD(&list); - hci_cb_lookup(conn, &list); - - list_for_each_entry_safe(cb, tmp, &list, list) { - if (cb->security_cfm) - cb->security_cfm(conn, status, encrypt); - kfree(cb); - } - - if (conn->security_cfm_cb) - conn->security_cfm_cb(conn, status); -} - static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status) { + struct hci_cb *cb; __u8 encrypt; if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) @@ -2098,11 +2059,20 @@ static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status) encrypt = test_bit(HCI_CONN_ENCRYPT, &conn->flags) ? 0x01 : 0x00; - hci_security_cfm(conn, status, encrypt); + mutex_lock(&hci_cb_list_lock); + list_for_each_entry(cb, &hci_cb_list, list) { + if (cb->security_cfm) + cb->security_cfm(conn, status, encrypt); + } + mutex_unlock(&hci_cb_list_lock); + + if (conn->security_cfm_cb) + conn->security_cfm_cb(conn, status); } static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status) { + struct hci_cb *cb; __u8 encrypt; if (conn->state == BT_CONFIG) { @@ -2129,38 +2099,40 @@ static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status) conn->sec_level = conn->pending_sec_level; } - hci_security_cfm(conn, status, encrypt); + mutex_lock(&hci_cb_list_lock); + list_for_each_entry(cb, &hci_cb_list, list) { + if (cb->security_cfm) + cb->security_cfm(conn, status, encrypt); + } + mutex_unlock(&hci_cb_list_lock); + + if (conn->security_cfm_cb) + conn->security_cfm_cb(conn, status); } static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status) { - struct list_head list; - struct hci_cb *cb, *tmp; - - INIT_LIST_HEAD(&list); - hci_cb_lookup(conn, &list); + struct hci_cb *cb; - list_for_each_entry_safe(cb, tmp, &list, list) { + mutex_lock(&hci_cb_list_lock); + list_for_each_entry(cb, &hci_cb_list, list) { if (cb->key_change_cfm) cb->key_change_cfm(conn, status); - kfree(cb); } + mutex_unlock(&hci_cb_list_lock); } static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, __u8 role) { - struct list_head list; - struct hci_cb *cb, *tmp; - - INIT_LIST_HEAD(&list); - hci_cb_lookup(conn, &list); + struct hci_cb *cb; - list_for_each_entry_safe(cb, tmp, &list, list) { + mutex_lock(&hci_cb_list_lock); + list_for_each_entry(cb, &hci_cb_list, list) { if (cb->role_switch_cfm) cb->role_switch_cfm(conn, status, role); - kfree(cb); } + mutex_unlock(&hci_cb_list_lock); } static inline bool hci_bdaddr_is_rpa(bdaddr_t *bdaddr, u8 addr_type) diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h index d9c767cf773de..9189354c568f4 100644 --- a/include/net/bluetooth/l2cap.h +++ b/include/net/bluetooth/l2cap.h @@ -668,7 +668,7 @@ struct l2cap_conn { struct l2cap_chan *smp; struct list_head chan_l; - struct mutex chan_lock; + struct mutex lock; struct kref ref; struct list_head users; }; @@ -970,6 +970,7 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err); void l2cap_send_conn_req(struct l2cap_chan *chan); struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn); +struct l2cap_conn *l2cap_conn_hold_unless_zero(struct l2cap_conn *conn); void l2cap_conn_put(struct l2cap_conn *conn); int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user); diff --git a/include/net/gro.h b/include/net/gro.h index b9b58c1f8d190..7b548f91754bf 100644 --- a/include/net/gro.h +++ b/include/net/gro.h @@ -11,6 +11,9 @@ #include #include +/* This should be increased if a protocol with a bigger head is added. */ +#define GRO_MAX_HEAD (MAX_HEADER + 128) + struct napi_gro_cb { union { struct { diff --git a/include/net/ip.h b/include/net/ip.h index fe4f854381143..bd201278c55a5 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -424,6 +424,11 @@ int ip_decrease_ttl(struct iphdr *iph) return --iph->ttl; } +static inline dscp_t ip4h_dscp(const struct iphdr *ip4h) +{ + return inet_dsfield_to_dscp(ip4h->tos); +} + static inline int ip_mtu_locked(const struct dst_entry *dst) { const struct rtable *rt = dst_rtable(dst); diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 5b712582f9a9c..3b964f8834e71 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -2826,6 +2826,11 @@ struct ieee80211_txq { * implements MLO, so operation can continue on other links when one * link is switching. * + * @IEEE80211_HW_STRICT: strictly enforce certain things mandated by the spec + * but otherwise ignored/worked around for interoperability. This is a + * HW flag so drivers can opt in according to their own control, e.g. in + * testing. + * * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays */ enum ieee80211_hw_flags { @@ -2885,6 +2890,7 @@ enum ieee80211_hw_flags { IEEE80211_HW_DISALLOW_PUNCTURING, IEEE80211_HW_DISALLOW_PUNCTURING_5GHZ, IEEE80211_HW_HANDLES_QUIET_CSA, + IEEE80211_HW_STRICT, /* keep last, obviously */ NUM_IEEE80211_HW_FLAGS diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h index de47fa533b150..6a0e83ac0fdb4 100644 --- a/include/net/mana/gdma.h +++ b/include/net/mana/gdma.h @@ -406,8 +406,6 @@ struct gdma_context { struct gdma_dev mana_ib; }; -#define MAX_NUM_GDMA_DEVICES 4 - static inline bool mana_gd_is_mana(struct gdma_dev *gd) { return gd->dev_id.type == GDMA_DEVICE_MANA; @@ -554,11 +552,15 @@ enum { #define GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG BIT(3) #define GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT BIT(5) +/* Driver can handle holes (zeros) in the device list */ +#define GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP BIT(11) + #define GDMA_DRV_CAP_FLAGS1 \ (GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \ GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \ GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG | \ - GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT) + GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT | \ + GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP) #define GDMA_DRV_CAP_FLAGS2 0 @@ -619,11 +621,12 @@ struct gdma_query_max_resources_resp { }; /* HW DATA */ /* GDMA_LIST_DEVICES */ +#define GDMA_DEV_LIST_SIZE 64 struct gdma_list_devices_resp { struct gdma_resp_hdr hdr; u32 num_of_devs; u32 reserved; - struct gdma_dev_id devs[64]; + struct gdma_dev_id devs[GDMA_DEV_LIST_SIZE]; }; /* HW DATA */ /* GDMA_REGISTER_DEVICE */ diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 788513cc384b7..757abcb54d117 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -1889,7 +1889,7 @@ void nft_chain_filter_fini(void); void __init nft_chain_route_init(void); void nft_chain_route_fini(void); -void nf_tables_trans_destroy_flush_work(void); +void nf_tables_trans_destroy_flush_work(struct net *net); int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result); __be64 nf_jiffies64_to_msecs(u64 input); @@ -1903,6 +1903,7 @@ static inline int nft_request_module(struct net *net, const char *fmt, ...) { re struct nftables_pernet { struct list_head tables; struct list_head commit_list; + struct list_head destroy_list; struct list_head commit_set_list; struct list_head binding_list; struct list_head module_list; @@ -1913,6 +1914,7 @@ struct nftables_pernet { unsigned int base_seq; unsigned int gc_seq; u8 validate_state; + struct work_struct destroy_work; }; extern unsigned int nf_tables_net_id; diff --git a/include/net/route.h b/include/net/route.h index da34b6fa9862d..8a11d19f897bb 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -208,12 +208,13 @@ int ip_route_use_hint(struct sk_buff *skb, __be32 dst, __be32 src, const struct sk_buff *hint); static inline int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src, - u8 tos, struct net_device *devin) + dscp_t dscp, struct net_device *devin) { int err; rcu_read_lock(); - err = ip_route_input_noref(skb, dst, src, tos, devin); + err = ip_route_input_noref(skb, dst, src, inet_dscp_to_dsfield(dscp), + devin); if (!err) { skb_dst_force(skb); if (!skb_dst(skb)) diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 31248cfdfb235..dcd288fa1bb6f 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -775,6 +775,7 @@ struct sctp_transport { /* Reference counting. */ refcount_t refcnt; + __u32 dead:1, /* RTO-Pending : A flag used to track if one of the DATA * chunks sent to this address is currently being * used to compute a RTT. If this flag is 0, @@ -784,7 +785,7 @@ struct sctp_transport { * calculation completes (i.e. the DATA chunk * is SACK'd) clear this flag. */ - __u32 rto_pending:1, + rto_pending:1, /* * hb_sent : a flag that signals that we have a pending diff --git a/include/net/sock.h b/include/net/sock.h index fa055cf1785ef..fa9b9dadbe170 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -338,6 +338,8 @@ struct sk_filter; * @sk_txtime_unused: unused txtime flags * @ns_tracker: tracker for netns reference * @sk_user_frags: xarray of pages the user is holding a reference on. + * @sk_owner: reference to the real owner of the socket that calls + * sock_lock_init_class_and_name(). */ struct sock { /* @@ -544,6 +546,10 @@ struct sock { struct rcu_head sk_rcu; netns_tracker ns_tracker; struct xarray sk_user_frags; + +#if IS_ENABLED(CONFIG_PROVE_LOCKING) && IS_ENABLED(CONFIG_MODULES) + struct module *sk_owner; +#endif }; struct sock_bh_locked { @@ -1585,6 +1591,35 @@ static inline void sk_mem_uncharge(struct sock *sk, int size) sk_mem_reclaim(sk); } +#if IS_ENABLED(CONFIG_PROVE_LOCKING) && IS_ENABLED(CONFIG_MODULES) +static inline void sk_owner_set(struct sock *sk, struct module *owner) +{ + __module_get(owner); + sk->sk_owner = owner; +} + +static inline void sk_owner_clear(struct sock *sk) +{ + sk->sk_owner = NULL; +} + +static inline void sk_owner_put(struct sock *sk) +{ + module_put(sk->sk_owner); +} +#else +static inline void sk_owner_set(struct sock *sk, struct module *owner) +{ +} + +static inline void sk_owner_clear(struct sock *sk) +{ +} + +static inline void sk_owner_put(struct sock *sk) +{ +} +#endif /* * Macro so as to not evaluate some arguments when * lockdep is not enabled. @@ -1594,13 +1629,14 @@ static inline void sk_mem_uncharge(struct sock *sk, int size) */ #define sock_lock_init_class_and_name(sk, sname, skey, name, key) \ do { \ + sk_owner_set(sk, THIS_MODULE); \ sk->sk_lock.owned = 0; \ init_waitqueue_head(&sk->sk_lock.wq); \ spin_lock_init(&(sk)->sk_lock.slock); \ debug_check_no_locks_freed((void *)&(sk)->sk_lock, \ - sizeof((sk)->sk_lock)); \ + sizeof((sk)->sk_lock)); \ lockdep_set_class_and_name(&(sk)->sk_lock.slock, \ - (skey), (sname)); \ + (skey), (sname)); \ lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \ } while (0) diff --git a/include/net/strparser.h b/include/net/strparser.h index 41e2ce9e9e10f..0a83010b3a64a 100644 --- a/include/net/strparser.h +++ b/include/net/strparser.h @@ -43,6 +43,8 @@ struct strparser; struct strp_callbacks { int (*parse_msg)(struct strparser *strp, struct sk_buff *skb); void (*rcv_msg)(struct strparser *strp, struct sk_buff *skb); + int (*read_sock)(struct strparser *strp, read_descriptor_t *desc, + sk_read_actor_t recv_actor); int (*read_sock_done)(struct strparser *strp, int err); void (*abort_parser)(struct strparser *strp, int err); void (*lock)(struct strparser *strp); diff --git a/include/net/tcp.h b/include/net/tcp.h index d1948d357dade..3255a199ef60d 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -41,6 +41,7 @@ #include #include #include +#include #include #include @@ -683,6 +684,19 @@ void tcp_fin(struct sock *sk); void tcp_check_space(struct sock *sk); void tcp_sack_compress_send_ack(struct sock *sk); +static inline void tcp_cleanup_skb(struct sk_buff *skb) +{ + skb_dst_drop(skb); + secpath_reset(skb); +} + +static inline void tcp_add_receive_queue(struct sock *sk, struct sk_buff *skb) +{ + DEBUG_NET_WARN_ON_ONCE(skb_dst(skb)); + DEBUG_NET_WARN_ON_ONCE(secpath_exists(skb)); + __skb_queue_tail(&sk->sk_receive_queue, skb); +} + /* tcp_timer.c */ void tcp_init_xmit_timers(struct sock *); static inline void tcp_clear_xmit_timers(struct sock *sk) @@ -729,6 +743,9 @@ void tcp_get_info(struct sock *, struct tcp_info *); /* Read 'sendfile()'-style from a TCP socket */ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, sk_read_actor_t recv_actor); +int tcp_read_sock_noack(struct sock *sk, read_descriptor_t *desc, + sk_read_actor_t recv_actor, bool noack, + u32 *copied_seq); int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor); struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off); void tcp_read_done(struct sock *sk, size_t len); @@ -2595,6 +2612,11 @@ struct sk_psock; #ifdef CONFIG_BPF_SYSCALL int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore); void tcp_bpf_clone(const struct sock *sk, struct sock *newsk); +#ifdef CONFIG_BPF_STREAM_PARSER +struct strparser; +int tcp_bpf_strp_read_sock(struct strparser *strp, read_descriptor_t *desc, + sk_read_actor_t recv_actor); +#endif /* CONFIG_BPF_STREAM_PARSER */ #endif /* CONFIG_BPF_SYSCALL */ #ifdef CONFIG_INET diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 67551133b5228..c2b5de75daf25 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -2737,6 +2737,7 @@ struct ib_device { * It is a NULL terminated array. */ const struct attribute_group *groups[4]; + u8 hw_stats_attr_index; u64 uverbs_cmd_mask; diff --git a/include/sound/cs35l56.h b/include/sound/cs35l56.h index 3dc7a1551ac35..5d653a3491d07 100644 --- a/include/sound/cs35l56.h +++ b/include/sound/cs35l56.h @@ -12,6 +12,7 @@ #include #include #include +#include #include #define CS35L56_DEVID 0x0000000 @@ -61,6 +62,7 @@ #define CS35L56_IRQ1_MASK_8 0x000E0AC #define CS35L56_IRQ1_MASK_18 0x000E0D4 #define CS35L56_IRQ1_MASK_20 0x000E0DC +#define CS35L56_DSP_MBOX_1_RAW 0x0011000 #define CS35L56_DSP_VIRTUAL1_MBOX_1 0x0011020 #define CS35L56_DSP_VIRTUAL1_MBOX_2 0x0011024 #define CS35L56_DSP_VIRTUAL1_MBOX_3 0x0011028 @@ -224,6 +226,7 @@ #define CS35L56_HALO_STATE_SHUTDOWN 1 #define CS35L56_HALO_STATE_BOOT_DONE 2 +#define CS35L56_MBOX_CMD_PING 0x0A000000 #define CS35L56_MBOX_CMD_AUDIO_PLAY 0x0B000001 #define CS35L56_MBOX_CMD_AUDIO_PAUSE 0x0B000002 #define CS35L56_MBOX_CMD_AUDIO_REINIT 0x0B000003 @@ -254,6 +257,16 @@ #define CS35L56_NUM_BULK_SUPPLIES 3 #define CS35L56_NUM_DSP_REGIONS 5 +/* Additional margin for SYSTEM_RESET to control port ready on SPI */ +#define CS35L56_SPI_RESET_TO_PORT_READY_US (CS35L56_CONTROL_PORT_READY_US + 2500) + +struct cs35l56_spi_payload { + __be32 addr; + __be16 pad; + __be32 value; +} __packed; +static_assert(sizeof(struct cs35l56_spi_payload) == 10); + struct cs35l56_base { struct device *dev; struct regmap *regmap; @@ -269,6 +282,7 @@ struct cs35l56_base { s8 cal_index; struct cirrus_amp_cal_data cal_data; struct gpio_desc *reset_gpio; + struct cs35l56_spi_payload *spi_payload_buf; }; static inline bool cs35l56_is_otp_register(unsigned int reg) @@ -276,6 +290,23 @@ static inline bool cs35l56_is_otp_register(unsigned int reg) return (reg >> 16) == 3; } +static inline int cs35l56_init_config_for_spi(struct cs35l56_base *cs35l56, + struct spi_device *spi) +{ + cs35l56->spi_payload_buf = devm_kzalloc(&spi->dev, + sizeof(*cs35l56->spi_payload_buf), + GFP_KERNEL | GFP_DMA); + if (!cs35l56->spi_payload_buf) + return -ENOMEM; + + return 0; +} + +static inline bool cs35l56_is_spi(struct cs35l56_base *cs35l56) +{ + return IS_ENABLED(CONFIG_SPI_MASTER) && !!cs35l56->spi_payload_buf; +} + extern const struct regmap_config cs35l56_regmap_i2c; extern const struct regmap_config cs35l56_regmap_spi; extern const struct regmap_config cs35l56_regmap_sdw; diff --git a/include/sound/soc.h b/include/sound/soc.h index e6e359c1a2ac4..db3b464a91c7b 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -1251,7 +1251,10 @@ void snd_soc_close_delayed_work(struct snd_soc_pcm_runtime *rtd); /* mixer control */ struct soc_mixer_control { - int min, max, platform_max; + /* Minimum and maximum specified as written to the hardware */ + int min, max; + /* Limited maximum value specified as presented through the control */ + int platform_max; int reg, rreg; unsigned int shift, rshift; unsigned int sign_bit; diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h index 9a75590227f26..3dddfc6abf0ee 100644 --- a/include/trace/events/afs.h +++ b/include/trace/events/afs.h @@ -173,6 +173,7 @@ enum yfs_cm_operation { EM(afs_cell_trace_get_queue_dns, "GET q-dns ") \ EM(afs_cell_trace_get_queue_manage, "GET q-mng ") \ EM(afs_cell_trace_get_queue_new, "GET q-new ") \ + EM(afs_cell_trace_get_server, "GET server") \ EM(afs_cell_trace_get_vol, "GET vol ") \ EM(afs_cell_trace_insert, "INSERT ") \ EM(afs_cell_trace_manage, "MANAGE ") \ @@ -180,6 +181,7 @@ enum yfs_cm_operation { EM(afs_cell_trace_put_destroy, "PUT destry") \ EM(afs_cell_trace_put_queue_work, "PUT q-work") \ EM(afs_cell_trace_put_queue_fail, "PUT q-fail") \ + EM(afs_cell_trace_put_server, "PUT server") \ EM(afs_cell_trace_put_vol, "PUT vol ") \ EM(afs_cell_trace_see_source, "SEE source") \ EM(afs_cell_trace_see_ws, "SEE ws ") \ diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index 5e84952166895..5fe852bd31abc 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -360,8 +360,7 @@ TRACE_EVENT(rpc_request, { (1UL << RPC_TASK_ACTIVE), "ACTIVE" }, \ { (1UL << RPC_TASK_NEED_XMIT), "NEED_XMIT" }, \ { (1UL << RPC_TASK_NEED_RECV), "NEED_RECV" }, \ - { (1UL << RPC_TASK_MSG_PIN_WAIT), "MSG_PIN_WAIT" }, \ - { (1UL << RPC_TASK_SIGNALLED), "SIGNALLED" }) + { (1UL << RPC_TASK_MSG_PIN_WAIT), "MSG_PIN_WAIT" }) DECLARE_EVENT_CLASS(rpc_task_running, diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h index c4182e95a6195..4a8a4a63e99ca 100644 --- a/include/uapi/drm/xe_drm.h +++ b/include/uapi/drm/xe_drm.h @@ -1485,6 +1485,7 @@ struct drm_xe_oa_unit { /** @capabilities: OA capabilities bit-mask */ __u64 capabilities; #define DRM_XE_OA_CAPS_BASE (1 << 0) +#define DRM_XE_OA_CAPS_SYNCS (1 << 1) /** @oa_timestamp_freq: OA timestamp freq */ __u64 oa_timestamp_freq; @@ -1634,6 +1635,22 @@ enum drm_xe_oa_property_id { * to be disabled for the stream exec queue. */ DRM_XE_OA_PROPERTY_NO_PREEMPT, + + /** + * @DRM_XE_OA_PROPERTY_NUM_SYNCS: Number of syncs in the sync array + * specified in @DRM_XE_OA_PROPERTY_SYNCS + */ + DRM_XE_OA_PROPERTY_NUM_SYNCS, + + /** + * @DRM_XE_OA_PROPERTY_SYNCS: Pointer to struct @drm_xe_sync array + * with array size specified via @DRM_XE_OA_PROPERTY_NUM_SYNCS. OA + * configuration will wait till input fences signal. Output fences + * will signal after the new OA configuration takes effect. For + * @DRM_XE_SYNC_TYPE_USER_FENCE, @addr is a user pointer, similar + * to the VM bind case. + */ + DRM_XE_OA_PROPERTY_SYNCS, }; /** diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index 717307d6b5b74..3e1c11d9d9808 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -62,6 +62,8 @@ struct kfd_ioctl_get_version_args { #define KFD_MAX_QUEUE_PERCENTAGE 100 #define KFD_MAX_QUEUE_PRIORITY 15 +#define KFD_MIN_QUEUE_RING_SIZE 1024 + struct kfd_ioctl_create_queue_args { __u64 ring_base_address; /* to KFD */ __u64 write_pointer_address; /* from KFD */ diff --git a/include/uapi/linux/landlock.h b/include/uapi/linux/landlock.h index 33745642f7875..c223572f82296 100644 --- a/include/uapi/linux/landlock.h +++ b/include/uapi/linux/landlock.h @@ -57,9 +57,11 @@ struct landlock_ruleset_attr { * * - %LANDLOCK_CREATE_RULESET_VERSION: Get the highest supported Landlock ABI * version. + * - %LANDLOCK_CREATE_RULESET_ERRATA: Get a bitmask of fixed issues. */ /* clang-format off */ #define LANDLOCK_CREATE_RULESET_VERSION (1U << 0) +#define LANDLOCK_CREATE_RULESET_ERRATA (1U << 1) /* clang-format on */ /** diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 4842c36fdf801..0524d541d4e3d 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -511,7 +511,16 @@ struct perf_event_attr { __u16 sample_max_stack; __u16 __reserved_2; __u32 aux_sample_size; - __u32 __reserved_3; + + union { + __u32 aux_action; + struct { + __u32 aux_start_paused : 1, /* start AUX area tracing paused */ + aux_pause : 1, /* on overflow, pause AUX area tracing */ + aux_resume : 1, /* on overflow, resume AUX area tracing */ + __reserved_3 : 29; + }; + }; /* * User provided data if sigtrap=1, passed back to user via diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h index 832c15d9155bd..eeb20dfb1fdaa 100644 --- a/include/uapi/linux/psp-sev.h +++ b/include/uapi/linux/psp-sev.h @@ -73,13 +73,20 @@ typedef enum { SEV_RET_INVALID_PARAM, SEV_RET_RESOURCE_LIMIT, SEV_RET_SECURE_DATA_INVALID, - SEV_RET_INVALID_KEY = 0x27, - SEV_RET_INVALID_PAGE_SIZE, - SEV_RET_INVALID_PAGE_STATE, - SEV_RET_INVALID_MDATA_ENTRY, - SEV_RET_INVALID_PAGE_OWNER, - SEV_RET_INVALID_PAGE_AEAD_OFLOW, - SEV_RET_RMP_INIT_REQUIRED, + SEV_RET_INVALID_PAGE_SIZE = 0x0019, + SEV_RET_INVALID_PAGE_STATE = 0x001A, + SEV_RET_INVALID_MDATA_ENTRY = 0x001B, + SEV_RET_INVALID_PAGE_OWNER = 0x001C, + SEV_RET_AEAD_OFLOW = 0x001D, + SEV_RET_EXIT_RING_BUFFER = 0x001F, + SEV_RET_RMP_INIT_REQUIRED = 0x0020, + SEV_RET_BAD_SVN = 0x0021, + SEV_RET_BAD_VERSION = 0x0022, + SEV_RET_SHUTDOWN_REQUIRED = 0x0023, + SEV_RET_UPDATE_FAILED = 0x0024, + SEV_RET_RESTORE_REQUIRED = 0x0025, + SEV_RET_RMP_INITIALIZATION_FAILED = 0x0026, + SEV_RET_INVALID_KEY = 0x0027, SEV_RET_MAX, } sev_ret_code; diff --git a/include/uapi/linux/rkisp1-config.h b/include/uapi/linux/rkisp1-config.h index 430daceafac70..2d995f3c1ca37 100644 --- a/include/uapi/linux/rkisp1-config.h +++ b/include/uapi/linux/rkisp1-config.h @@ -1528,7 +1528,7 @@ enum rksip1_ext_param_buffer_version { * The expected memory layout of the parameters buffer is:: * * +-------------------- struct rkisp1_ext_params_cfg -------------------+ - * | version = RKISP_EXT_PARAMS_BUFFER_V1; | + * | version = RKISP1_EXT_PARAM_BUFFER_V1; | * | data_size = sizeof(struct rkisp1_ext_params_bls_config) | * | + sizeof(struct rkisp1_ext_params_dpcc_config); | * | +------------------------- data ---------------------------------+ | diff --git a/include/xen/interface/xen-mca.h b/include/xen/interface/xen-mca.h index 464aa6b3a5f92..1c9afbe8cc260 100644 --- a/include/xen/interface/xen-mca.h +++ b/include/xen/interface/xen-mca.h @@ -372,7 +372,7 @@ struct xen_mce { #define XEN_MCE_LOG_LEN 32 struct xen_mce_log { - char signature[12]; /* "MACHINECHECK" */ + char signature[12] __nonstring; /* "MACHINECHECK" */ unsigned len; /* = XEN_MCE_LOG_LEN */ unsigned next; unsigned flags; diff --git a/init/Kconfig b/init/Kconfig index 7256fa127530f..243d0087f9445 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -129,6 +129,11 @@ config CC_HAS_COUNTED_BY # https://github.com/llvm/llvm-project/pull/112636 depends on !(CC_IS_CLANG && CLANG_VERSION < 190103) +config LD_CAN_USE_KEEP_IN_OVERLAY + # ld.lld prior to 21.0.0 did not support KEEP within an overlay description + # https://github.com/llvm/llvm-project/pull/130661 + def_bool LD_IS_BFD || LLD_VERSION >= 210000 + config PAHOLE_VERSION int default $(shell,$(srctree)/scripts/pahole-version.sh $(PAHOLE)) @@ -1958,7 +1963,7 @@ config RUST depends on !MODVERSIONS depends on !GCC_PLUGIN_RANDSTRUCT depends on !RANDSTRUCT - depends on !DEBUG_INFO_BTF || PAHOLE_HAS_LANG_EXCLUDE + depends on !DEBUG_INFO_BTF || (PAHOLE_HAS_LANG_EXCLUDE && !LTO) depends on !CFI_CLANG || HAVE_CFI_ICALL_NORMALIZE_INTEGERS_RUSTC select CFI_ICALL_NORMALIZE_INTEGERS if CFI_CLANG depends on !CALL_PADDING || RUSTC_VERSION >= 108100 diff --git a/io_uring/futex.c b/io_uring/futex.c index 914848f46beb2..01f044f89f8fa 100644 --- a/io_uring/futex.c +++ b/io_uring/futex.c @@ -349,7 +349,7 @@ int io_futex_wait(struct io_kiocb *req, unsigned int issue_flags) hlist_add_head(&req->hash_node, &ctx->futex_list); io_ring_submit_unlock(ctx, issue_flags); - futex_queue(&ifd->q, hb); + futex_queue(&ifd->q, hb, NULL); return IOU_ISSUE_SKIP_COMPLETE; } diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c index a38f36b680604..a2d577b099308 100644 --- a/io_uring/io-wq.c +++ b/io_uring/io-wq.c @@ -64,7 +64,7 @@ struct io_worker { union { struct rcu_head rcu; - struct work_struct work; + struct delayed_work work; }; }; @@ -770,6 +770,18 @@ static inline bool io_should_retry_thread(struct io_worker *worker, long err) } } +static void queue_create_worker_retry(struct io_worker *worker) +{ + /* + * We only bother retrying because there's a chance that the + * failure to create a worker is due to some temporary condition + * in the forking task (e.g. outstanding signal); give the task + * some time to clear that condition. + */ + schedule_delayed_work(&worker->work, + msecs_to_jiffies(worker->init_retries * 5)); +} + static void create_worker_cont(struct callback_head *cb) { struct io_worker *worker; @@ -809,12 +821,13 @@ static void create_worker_cont(struct callback_head *cb) /* re-create attempts grab a new worker ref, drop the existing one */ io_worker_release(worker); - schedule_work(&worker->work); + queue_create_worker_retry(worker); } static void io_workqueue_create(struct work_struct *work) { - struct io_worker *worker = container_of(work, struct io_worker, work); + struct io_worker *worker = container_of(work, struct io_worker, + work.work); struct io_wq_acct *acct = io_wq_get_acct(worker); if (!io_queue_worker_create(worker, acct, create_worker_cont)) @@ -855,8 +868,8 @@ static bool create_io_worker(struct io_wq *wq, int index) kfree(worker); goto fail; } else { - INIT_WORK(&worker->work, io_workqueue_create); - schedule_work(&worker->work); + INIT_DELAYED_WORK(&worker->work, io_workqueue_create); + queue_create_worker_retry(worker); } return true; diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 21f1bcba2f52b..19de7129ae0b3 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -1821,7 +1821,7 @@ void io_wq_submit_work(struct io_wq_work *work) * Don't allow any multishot execution from io-wq. It's more restrictive * than necessary and also cleaner. */ - if (req->flags & REQ_F_APOLL_MULTISHOT) { + if (req->flags & (REQ_F_MULTISHOT|REQ_F_APOLL_MULTISHOT)) { err = -EBADFD; if (!io_file_can_poll(req)) goto fail; @@ -1832,7 +1832,7 @@ void io_wq_submit_work(struct io_wq_work *work) goto fail; return; } else { - req->flags &= ~REQ_F_APOLL_MULTISHOT; + req->flags &= ~(REQ_F_APOLL_MULTISHOT|REQ_F_MULTISHOT); } } @@ -2053,6 +2053,8 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, req->opcode = 0; return io_init_fail_req(req, -EINVAL); } + opcode = array_index_nospec(opcode, IORING_OP_LAST); + def = &io_issue_defs[opcode]; if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) { /* enforce forwards compatibility on users */ diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c index e1895952066ee..7a8c3a004800e 100644 --- a/io_uring/kbuf.c +++ b/io_uring/kbuf.c @@ -484,6 +484,8 @@ int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe p->nbufs = tmp; p->addr = READ_ONCE(sqe->addr); p->len = READ_ONCE(sqe->len); + if (!p->len) + return -EINVAL; if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs, &size)) diff --git a/io_uring/net.c b/io_uring/net.c index 3974c417fe264..384915d931b72 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -152,7 +152,7 @@ static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags) if (iov) kasan_mempool_poison_object(iov); req->async_data = NULL; - req->flags &= ~REQ_F_ASYNC_DATA; + req->flags &= ~(REQ_F_ASYNC_DATA|REQ_F_NEED_CLEANUP); } } @@ -334,7 +334,9 @@ static int io_sendmsg_copy_hdr(struct io_kiocb *req, if (unlikely(ret)) return ret; - return __get_compat_msghdr(&iomsg->msg, &cmsg, NULL); + ret = __get_compat_msghdr(&iomsg->msg, &cmsg, NULL); + sr->msg_control = iomsg->msg.msg_control_user; + return ret; } #endif @@ -433,6 +435,7 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) sr->msg_flags |= MSG_WAITALL; sr->buf_group = req->buf_index; req->buf_list = NULL; + req->flags |= REQ_F_MULTISHOT; } #ifdef CONFIG_COMPAT @@ -445,7 +448,6 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) static void io_req_msg_cleanup(struct io_kiocb *req, unsigned int issue_flags) { - req->flags &= ~REQ_F_NEED_CLEANUP; io_netmsg_recycle(req, issue_flags); } @@ -1426,6 +1428,7 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags) */ if (!(issue_flags & IO_URING_F_UNLOCKED)) { io_notif_flush(zc->notif); + zc->notif = NULL; io_req_msg_cleanup(req, 0); } io_req_set_res(req, ret, IORING_CQE_F_MORE); @@ -1486,6 +1489,7 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags) */ if (!(issue_flags & IO_URING_F_UNLOCKED)) { io_notif_flush(sr->notif); + sr->notif = NULL; io_req_msg_cleanup(req, 0); } io_req_set_res(req, ret, IORING_CQE_F_MORE); @@ -1613,6 +1617,8 @@ int io_accept(struct io_kiocb *req, unsigned int issue_flags) } io_req_set_res(req, ret, cflags); + if (!(issue_flags & IO_URING_F_MULTISHOT)) + return IOU_OK; return IOU_STOP_MULTISHOT; } diff --git a/io_uring/rw.c b/io_uring/rw.c index 39ad25d16ed40..a1ed64760eba2 100644 --- a/io_uring/rw.c +++ b/io_uring/rw.c @@ -862,7 +862,15 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags) if (unlikely(ret)) return ret; - ret = io_iter_do_read(rw, &io->iter); + if (unlikely(req->opcode == IORING_OP_READ_MULTISHOT)) { + void *cb_copy = rw->kiocb.ki_complete; + + rw->kiocb.ki_complete = NULL; + ret = io_iter_do_read(rw, &io->iter); + rw->kiocb.ki_complete = cb_copy; + } else { + ret = io_iter_do_read(rw, &io->iter); + } /* * Some file systems like to return -EOPNOTSUPP for an IOCB_NOWAIT @@ -887,7 +895,8 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags) } else if (ret == -EIOCBQUEUED) { return IOU_ISSUE_SKIP_COMPLETE; } else if (ret == req->cqe.res || ret <= 0 || !force_nonblock || - (req->flags & REQ_F_NOWAIT) || !need_complete_io(req)) { + (req->flags & REQ_F_NOWAIT) || !need_complete_io(req) || + (issue_flags & IO_URING_F_MULTISHOT)) { /* read all, failed, already did sync or don't want to retry */ goto done; } @@ -1181,12 +1190,12 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) poll_flags |= BLK_POLL_ONESHOT; /* iopoll may have completed current req */ - if (!rq_list_empty(iob.req_list) || + if (!rq_list_empty(&iob.req_list) || READ_ONCE(req->iopoll_completed)) break; } - if (!rq_list_empty(iob.req_list)) + if (!rq_list_empty(&iob.req_list)) iob.complete(&iob); else if (!pos) return 0; diff --git a/kernel/acct.c b/kernel/acct.c index 179848ad33e97..d9d55fa4d01a7 100644 --- a/kernel/acct.c +++ b/kernel/acct.c @@ -103,48 +103,50 @@ struct bsd_acct_struct { atomic_long_t count; struct rcu_head rcu; struct mutex lock; - int active; + bool active; + bool check_space; unsigned long needcheck; struct file *file; struct pid_namespace *ns; struct work_struct work; struct completion done; + acct_t ac; }; -static void do_acct_process(struct bsd_acct_struct *acct); +static void fill_ac(struct bsd_acct_struct *acct); +static void acct_write_process(struct bsd_acct_struct *acct); /* * Check the amount of free space and suspend/resume accordingly. */ -static int check_free_space(struct bsd_acct_struct *acct) +static bool check_free_space(struct bsd_acct_struct *acct) { struct kstatfs sbuf; - if (time_is_after_jiffies(acct->needcheck)) - goto out; + if (!acct->check_space) + return acct->active; /* May block */ if (vfs_statfs(&acct->file->f_path, &sbuf)) - goto out; + return acct->active; if (acct->active) { u64 suspend = sbuf.f_blocks * SUSPEND; do_div(suspend, 100); if (sbuf.f_bavail <= suspend) { - acct->active = 0; + acct->active = false; pr_info("Process accounting paused\n"); } } else { u64 resume = sbuf.f_blocks * RESUME; do_div(resume, 100); if (sbuf.f_bavail >= resume) { - acct->active = 1; + acct->active = true; pr_info("Process accounting resumed\n"); } } acct->needcheck = jiffies + ACCT_TIMEOUT*HZ; -out: return acct->active; } @@ -189,7 +191,11 @@ static void acct_pin_kill(struct fs_pin *pin) { struct bsd_acct_struct *acct = to_acct(pin); mutex_lock(&acct->lock); - do_acct_process(acct); + /* + * Fill the accounting struct with the exiting task's info + * before punting to the workqueue. + */ + fill_ac(acct); schedule_work(&acct->work); wait_for_completion(&acct->done); cmpxchg(&acct->ns->bacct, pin, NULL); @@ -202,6 +208,9 @@ static void close_work(struct work_struct *work) { struct bsd_acct_struct *acct = container_of(work, struct bsd_acct_struct, work); struct file *file = acct->file; + + /* We were fired by acct_pin_kill() which holds acct->lock. */ + acct_write_process(acct); if (file->f_op->flush) file->f_op->flush(file, NULL); __fput_sync(file); @@ -234,6 +243,20 @@ static int acct_on(struct filename *pathname) return -EACCES; } + /* Exclude kernel kernel internal filesystems. */ + if (file_inode(file)->i_sb->s_flags & (SB_NOUSER | SB_KERNMOUNT)) { + kfree(acct); + filp_close(file, NULL); + return -EINVAL; + } + + /* Exclude procfs and sysfs. */ + if (file_inode(file)->i_sb->s_iflags & SB_I_USERNS_VISIBLE) { + kfree(acct); + filp_close(file, NULL); + return -EINVAL; + } + if (!(file->f_mode & FMODE_CAN_WRITE)) { kfree(acct); filp_close(file, NULL); @@ -430,13 +453,27 @@ static u32 encode_float(u64 value) * do_exit() or when switching to a different output file. */ -static void fill_ac(acct_t *ac) +static void fill_ac(struct bsd_acct_struct *acct) { struct pacct_struct *pacct = ¤t->signal->pacct; + struct file *file = acct->file; + acct_t *ac = &acct->ac; u64 elapsed, run_time; time64_t btime; struct tty_struct *tty; + lockdep_assert_held(&acct->lock); + + if (time_is_after_jiffies(acct->needcheck)) { + acct->check_space = false; + + /* Don't fill in @ac if nothing will be written. */ + if (!acct->active) + return; + } else { + acct->check_space = true; + } + /* * Fill the accounting struct with the needed info as recorded * by the different kernel functions. @@ -484,64 +521,61 @@ static void fill_ac(acct_t *ac) ac->ac_majflt = encode_comp_t(pacct->ac_majflt); ac->ac_exitcode = pacct->ac_exitcode; spin_unlock_irq(¤t->sighand->siglock); -} -/* - * do_acct_process does all actual work. Caller holds the reference to file. - */ -static void do_acct_process(struct bsd_acct_struct *acct) -{ - acct_t ac; - unsigned long flim; - const struct cred *orig_cred; - struct file *file = acct->file; - /* - * Accounting records are not subject to resource limits. - */ - flim = rlimit(RLIMIT_FSIZE); - current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY; - /* Perform file operations on behalf of whoever enabled accounting */ - orig_cred = override_creds(file->f_cred); - - /* - * First check to see if there is enough free_space to continue - * the process accounting system. - */ - if (!check_free_space(acct)) - goto out; - - fill_ac(&ac); /* we really need to bite the bullet and change layout */ - ac.ac_uid = from_kuid_munged(file->f_cred->user_ns, orig_cred->uid); - ac.ac_gid = from_kgid_munged(file->f_cred->user_ns, orig_cred->gid); + ac->ac_uid = from_kuid_munged(file->f_cred->user_ns, current_uid()); + ac->ac_gid = from_kgid_munged(file->f_cred->user_ns, current_gid()); #if ACCT_VERSION == 1 || ACCT_VERSION == 2 /* backward-compatible 16 bit fields */ - ac.ac_uid16 = ac.ac_uid; - ac.ac_gid16 = ac.ac_gid; + ac->ac_uid16 = ac->ac_uid; + ac->ac_gid16 = ac->ac_gid; #elif ACCT_VERSION == 3 { struct pid_namespace *ns = acct->ns; - ac.ac_pid = task_tgid_nr_ns(current, ns); + ac->ac_pid = task_tgid_nr_ns(current, ns); rcu_read_lock(); - ac.ac_ppid = task_tgid_nr_ns(rcu_dereference(current->real_parent), - ns); + ac->ac_ppid = task_tgid_nr_ns(rcu_dereference(current->real_parent), ns); rcu_read_unlock(); } #endif +} + +static void acct_write_process(struct bsd_acct_struct *acct) +{ + struct file *file = acct->file; + const struct cred *cred; + acct_t *ac = &acct->ac; + + /* Perform file operations on behalf of whoever enabled accounting */ + cred = override_creds(file->f_cred); + /* - * Get freeze protection. If the fs is frozen, just skip the write - * as we could deadlock the system otherwise. + * First check to see if there is enough free_space to continue + * the process accounting system. Then get freeze protection. If + * the fs is frozen, just skip the write as we could deadlock + * the system otherwise. */ - if (file_start_write_trylock(file)) { + if (check_free_space(acct) && file_start_write_trylock(file)) { /* it's been opened O_APPEND, so position is irrelevant */ loff_t pos = 0; - __kernel_write(file, &ac, sizeof(acct_t), &pos); + __kernel_write(file, ac, sizeof(acct_t), &pos); file_end_write(file); } -out: + + revert_creds(cred); +} + +static void do_acct_process(struct bsd_acct_struct *acct) +{ + unsigned long flim; + + /* Accounting records are not subject to resource limits. */ + flim = rlimit(RLIMIT_FSIZE); + current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY; + fill_ac(acct); + acct_write_process(acct); current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim; - revert_creds(orig_cred); } /** diff --git a/kernel/bpf/arena.c b/kernel/bpf/arena.c index 93e48c7cad4ef..8c775a1401d3e 100644 --- a/kernel/bpf/arena.c +++ b/kernel/bpf/arena.c @@ -37,7 +37,7 @@ */ /* number of bytes addressable by LDX/STX insn with 16-bit 'off' field */ -#define GUARD_SZ (1ull << sizeof_field(struct bpf_insn, off) * 8) +#define GUARD_SZ round_up(1ull << sizeof_field(struct bpf_insn, off) * 8, PAGE_SIZE << 1) #define KERN_VM_SZ (SZ_4G + GUARD_SZ) struct bpf_arena { diff --git a/kernel/bpf/bpf_cgrp_storage.c b/kernel/bpf/bpf_cgrp_storage.c index 28efd0a3f2200..6547fb7ac0dcb 100644 --- a/kernel/bpf/bpf_cgrp_storage.c +++ b/kernel/bpf/bpf_cgrp_storage.c @@ -154,7 +154,7 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr) static void cgroup_storage_map_free(struct bpf_map *map) { - bpf_local_storage_map_free(map, &cgroup_cache, NULL); + bpf_local_storage_map_free(map, &cgroup_cache, &bpf_cgrp_storage_busy); } /* *gfp_flags* is a hidden argument provided by the verifier */ diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index a44f4be592be7..2c54c148a94f3 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -6483,6 +6483,8 @@ static const struct bpf_raw_tp_null_args raw_tp_null_args[] = { /* rxrpc */ { "rxrpc_recvdata", 0x1 }, { "rxrpc_resend", 0x10 }, + /* skb */ + {"kfree_skb", 0x1000}, /* sunrpc */ { "xs_stream_read_data", 0x1 }, /* ... from xprt_cong_event event class */ diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 2b9c8c168a0ba..a60a6a2ce0d7f 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -2290,17 +2290,18 @@ void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth) insn->code = BPF_JMP | BPF_CALL_ARGS; } #endif -#else +#endif + static unsigned int __bpf_prog_ret0_warn(const void *ctx, const struct bpf_insn *insn) { /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON - * is not working properly, so warn about it! + * is not working properly, or interpreter is being used when + * prog->jit_requested is not 0, so warn about it! */ WARN_ON_ONCE(1); return 0; } -#endif bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp) @@ -2380,8 +2381,18 @@ static void bpf_prog_select_func(struct bpf_prog *fp) { #ifndef CONFIG_BPF_JIT_ALWAYS_ON u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1); + u32 idx = (round_up(stack_depth, 32) / 32) - 1; - fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1]; + /* may_goto may cause stack size > 512, leading to idx out-of-bounds. + * But for non-JITed programs, we don't need bpf_func, so no bounds + * check needed. + */ + if (!fp->jit_requested && + !WARN_ON_ONCE(idx >= ARRAY_SIZE(interpreters))) { + fp->bpf_func = interpreters[idx]; + } else { + fp->bpf_func = __bpf_prog_ret0_warn; + } #else fp->bpf_func = __bpf_prog_ret0_warn; #endif diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c index e1cfe890e0be6..1499d8caa9a35 100644 --- a/kernel/bpf/ringbuf.c +++ b/kernel/bpf/ringbuf.c @@ -268,8 +268,6 @@ static int ringbuf_map_mmap_kern(struct bpf_map *map, struct vm_area_struct *vma /* allow writable mapping for the consumer_pos only */ if (vma->vm_pgoff != 0 || vma->vm_end - vma->vm_start != PAGE_SIZE) return -EPERM; - } else { - vm_flags_clear(vma, VM_MAYWRITE); } /* remap_vmalloc_range() checks size and offset constraints */ return remap_vmalloc_range(vma, rb_map->rb, @@ -289,8 +287,6 @@ static int ringbuf_map_mmap_user(struct bpf_map *map, struct vm_area_struct *vma * position, and the ring buffer data itself. */ return -EPERM; - } else { - vm_flags_clear(vma, VM_MAYWRITE); } /* remap_vmalloc_range() checks size and offset constraints */ return remap_vmalloc_range(vma, rb_map->rb, vma->vm_pgoff + RINGBUF_PGOFF); diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 368ae8d231d41..696e5a2cbea2e 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -936,7 +936,7 @@ static const struct vm_operations_struct bpf_map_default_vmops = { static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma) { struct bpf_map *map = filp->private_data; - int err; + int err = 0; if (!map->ops->map_mmap || !IS_ERR_OR_NULL(map->record)) return -ENOTSUPP; @@ -960,24 +960,33 @@ static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma) err = -EACCES; goto out; } + bpf_map_write_active_inc(map); } +out: + mutex_unlock(&map->freeze_mutex); + if (err) + return err; /* set default open/close callbacks */ vma->vm_ops = &bpf_map_default_vmops; vma->vm_private_data = map; vm_flags_clear(vma, VM_MAYEXEC); + /* If mapping is read-only, then disallow potentially re-mapping with + * PROT_WRITE by dropping VM_MAYWRITE flag. This VM_MAYWRITE clearing + * means that as far as BPF map's memory-mapped VMAs are concerned, + * VM_WRITE and VM_MAYWRITE and equivalent, if one of them is set, + * both should be set, so we can forget about VM_MAYWRITE and always + * check just VM_WRITE + */ if (!(vma->vm_flags & VM_WRITE)) - /* disallow re-mapping with PROT_WRITE */ vm_flags_clear(vma, VM_MAYWRITE); err = map->ops->map_mmap(map, vma); - if (err) - goto out; + if (err) { + if (vma->vm_flags & VM_WRITE) + bpf_map_write_active_dec(map); + } - if (vma->vm_flags & VM_MAYWRITE) - bpf_map_write_active_inc(map); -out: - mutex_unlock(&map->freeze_mutex); return err; } @@ -1863,8 +1872,6 @@ int generic_map_update_batch(struct bpf_map *map, struct file *map_file, return err; } -#define MAP_LOOKUP_RETRIES 3 - int generic_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr, union bpf_attr __user *uattr) @@ -1874,8 +1881,8 @@ int generic_map_lookup_batch(struct bpf_map *map, void __user *values = u64_to_user_ptr(attr->batch.values); void __user *keys = u64_to_user_ptr(attr->batch.keys); void *buf, *buf_prevkey, *prev_key, *key, *value; - int err, retry = MAP_LOOKUP_RETRIES; u32 value_size, cp, max_count; + int err; if (attr->batch.elem_flags & ~BPF_F_LOCK) return -EINVAL; @@ -1921,14 +1928,8 @@ int generic_map_lookup_batch(struct bpf_map *map, err = bpf_map_copy_value(map, key, value, attr->batch.elem_flags); - if (err == -ENOENT) { - if (retry) { - retry--; - continue; - } - err = -EINTR; - break; - } + if (err == -ENOENT) + goto next_key; if (err) goto free_buf; @@ -1943,12 +1944,12 @@ int generic_map_lookup_batch(struct bpf_map *map, goto free_buf; } + cp++; +next_key: if (!prev_key) prev_key = buf_prevkey; swap(prev_key, key); - retry = MAP_LOOKUP_RETRIES; - cp++; cond_resched(); } diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index a0cab0d0252fa..d2ef289993f20 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2528,16 +2528,36 @@ static int cmp_subprogs(const void *a, const void *b) ((struct bpf_subprog_info *)b)->start; } +/* Find subprogram that contains instruction at 'off' */ +static struct bpf_subprog_info *find_containing_subprog(struct bpf_verifier_env *env, int off) +{ + struct bpf_subprog_info *vals = env->subprog_info; + int l, r, m; + + if (off >= env->prog->len || off < 0 || env->subprog_cnt == 0) + return NULL; + + l = 0; + r = env->subprog_cnt - 1; + while (l < r) { + m = l + (r - l + 1) / 2; + if (vals[m].start <= off) + l = m; + else + r = m - 1; + } + return &vals[l]; +} + +/* Find subprogram that starts exactly at 'off' */ static int find_subprog(struct bpf_verifier_env *env, int off) { struct bpf_subprog_info *p; - p = bsearch(&off, env->subprog_info, env->subprog_cnt, - sizeof(env->subprog_info[0]), cmp_subprogs); - if (!p) + p = find_containing_subprog(env, off); + if (!p || p->start != off) return -ENOENT; return p - env->subprog_info; - } static int add_subprog(struct bpf_verifier_env *env, int off) @@ -9811,6 +9831,8 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, verbose(env, "Func#%d ('%s') is global and assumed valid.\n", subprog, sub_name); + if (env->subprog_info[subprog].changes_pkt_data) + clear_all_pkt_pointers(env); /* mark global subprog for verifying after main prog */ subprog_aux(env, subprog)->called = true; clear_caller_saved_regs(env, caller->regs); @@ -16001,6 +16023,29 @@ static int check_return_code(struct bpf_verifier_env *env, int regno, const char return 0; } +static void mark_subprog_changes_pkt_data(struct bpf_verifier_env *env, int off) +{ + struct bpf_subprog_info *subprog; + + subprog = find_containing_subprog(env, off); + subprog->changes_pkt_data = true; +} + +/* 't' is an index of a call-site. + * 'w' is a callee entry point. + * Eventually this function would be called when env->cfg.insn_state[w] == EXPLORED. + * Rely on DFS traversal order and absence of recursive calls to guarantee that + * callee's change_pkt_data marks would be correct at that moment. + */ +static void merge_callee_effects(struct bpf_verifier_env *env, int t, int w) +{ + struct bpf_subprog_info *caller, *callee; + + caller = find_containing_subprog(env, t); + callee = find_containing_subprog(env, w); + caller->changes_pkt_data |= callee->changes_pkt_data; +} + /* non-recursive DFS pseudo code * 1 procedure DFS-iterative(G,v): * 2 label v as discovered @@ -16134,6 +16179,7 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns, bool visit_callee) { int ret, insn_sz; + int w; insn_sz = bpf_is_ldimm64(&insns[t]) ? 2 : 1; ret = push_insn(t, t + insn_sz, FALLTHROUGH, env); @@ -16145,8 +16191,10 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns, mark_jmp_point(env, t + insn_sz); if (visit_callee) { + w = t + insns[t].imm + 1; mark_prune_point(env, t); - ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env); + merge_callee_effects(env, t, w); + ret = push_insn(t, w, BRANCH, env); } return ret; } @@ -16466,6 +16514,8 @@ static int visit_insn(int t, struct bpf_verifier_env *env) mark_prune_point(env, t); mark_jmp_point(env, t); } + if (bpf_helper_call(insn) && bpf_helper_changes_pkt_data(insn->imm)) + mark_subprog_changes_pkt_data(env, t); if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { struct bpf_kfunc_call_arg_meta meta; @@ -16600,6 +16650,7 @@ static int check_cfg(struct bpf_verifier_env *env) } } ret = 0; /* cfg looks good */ + env->prog->aux->changes_pkt_data = env->subprog_info[0].changes_pkt_data; err_free: kvfree(insn_state); @@ -20102,6 +20153,7 @@ static int jit_subprogs(struct bpf_verifier_env *env) func[i]->aux->num_exentries = num_exentries; func[i]->aux->tail_call_reachable = env->subprog_info[i].tail_call_reachable; func[i]->aux->exception_cb = env->subprog_info[i].is_exception_cb; + func[i]->aux->changes_pkt_data = env->subprog_info[i].changes_pkt_data; if (!i) func[i]->aux->exception_boundary = env->seen_exception; func[i] = bpf_int_jit_compile(func[i]); @@ -21276,6 +21328,13 @@ static int do_misc_fixups(struct bpf_verifier_env *env) if (subprogs[cur_subprog + 1].start == i + delta + 1) { subprogs[cur_subprog].stack_depth += stack_depth_extra; subprogs[cur_subprog].stack_extra = stack_depth_extra; + + stack_depth = subprogs[cur_subprog].stack_depth; + if (stack_depth > MAX_BPF_STACK && !prog->jit_requested) { + verbose(env, "stack size %d(extra %d) is too large\n", + stack_depth, stack_depth_extra); + return -EINVAL; + } cur_subprog++; stack_depth = subprogs[cur_subprog].stack_depth; stack_depth_extra = 0; @@ -21931,6 +21990,7 @@ int bpf_check_attach_target(struct bpf_verifier_log *log, } if (tgt_prog) { struct bpf_prog_aux *aux = tgt_prog->aux; + bool tgt_changes_pkt_data; if (bpf_prog_is_dev_bound(prog->aux) && !bpf_prog_dev_bound_match(prog, tgt_prog)) { @@ -21965,6 +22025,14 @@ int bpf_check_attach_target(struct bpf_verifier_log *log, "Extension programs should be JITed\n"); return -EINVAL; } + tgt_changes_pkt_data = aux->func + ? aux->func[subprog]->aux->changes_pkt_data + : aux->changes_pkt_data; + if (prog->aux->changes_pkt_data && !tgt_changes_pkt_data) { + bpf_log(log, + "Extension program changes packet data, while original does not\n"); + return -EINVAL; + } } if (!tgt_prog->jited) { bpf_log(log, "Can attach to only JITed progs\n"); @@ -22430,10 +22498,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3 if (ret < 0) goto skip_full_check; - ret = check_attach_btf_id(env); - if (ret) - goto skip_full_check; - ret = resolve_pseudo_ldimm64(env); if (ret < 0) goto skip_full_check; @@ -22448,6 +22512,10 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3 if (ret < 0) goto skip_full_check; + ret = check_attach_btf_id(env); + if (ret) + goto skip_full_check; + ret = mark_fastcall_patterns(env); if (ret < 0) goto skip_full_check; diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 216535e055e11..4378f3eff25d2 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -5909,6 +5909,12 @@ static void kill_css(struct cgroup_subsys_state *css) if (css->flags & CSS_DYING) return; + /* + * Call css_killed(), if defined, before setting the CSS_DYING flag + */ + if (css->ss->css_killed) + css->ss->css_killed(css); + css->flags |= CSS_DYING; /* diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 24ece85fd3b12..839f88ba17f7d 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -84,9 +84,19 @@ static bool have_boot_isolcpus; static struct list_head remote_children; /* - * A flag to force sched domain rebuild at the end of an operation while - * inhibiting it in the intermediate stages when set. Currently it is only - * set in hotplug code. + * A flag to force sched domain rebuild at the end of an operation. + * It can be set in + * - update_partition_sd_lb() + * - remote_partition_check() + * - update_cpumasks_hier() + * - cpuset_update_flag() + * - cpuset_hotplug_update_tasks() + * - cpuset_handle_hotplug() + * + * Protected by cpuset_mutex (with cpus_read_lock held) or cpus_write_lock. + * + * Note that update_relax_domain_level() in cpuset-v1.c can still call + * rebuild_sched_domains_locked() directly without using this flag. */ static bool force_sd_rebuild; @@ -283,6 +293,12 @@ static inline void dec_attach_in_progress(struct cpuset *cs) mutex_unlock(&cpuset_mutex); } +static inline bool cpuset_v2(void) +{ + return !IS_ENABLED(CONFIG_CPUSETS_V1) || + cgroup_subsys_on_dfl(cpuset_cgrp_subsys); +} + /* * Cgroup v2 behavior is used on the "cpus" and "mems" control files when * on default hierarchy or when the cpuset_v2_mode flag is set by mounting @@ -293,7 +309,7 @@ static inline void dec_attach_in_progress(struct cpuset *cs) */ static inline bool is_in_v2_mode(void) { - return cgroup_subsys_on_dfl(cpuset_cgrp_subsys) || + return cpuset_v2() || (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE); } @@ -728,7 +744,7 @@ static int generate_sched_domains(cpumask_var_t **domains, int nslot; /* next empty doms[] struct cpumask slot */ struct cgroup_subsys_state *pos_css; bool root_load_balance = is_sched_load_balance(&top_cpuset); - bool cgrpv2 = cgroup_subsys_on_dfl(cpuset_cgrp_subsys); + bool cgrpv2 = cpuset_v2(); int nslot_update; doms = NULL; @@ -998,6 +1014,7 @@ void rebuild_sched_domains_locked(void) lockdep_assert_cpus_held(); lockdep_assert_held(&cpuset_mutex); + force_sd_rebuild = false; /* * If we have raced with CPU hotplug, return early to avoid @@ -1172,8 +1189,8 @@ static void update_partition_sd_lb(struct cpuset *cs, int old_prs) clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); } - if (rebuild_domains && !force_sd_rebuild) - rebuild_sched_domains_locked(); + if (rebuild_domains) + cpuset_force_rebuild(); } /* @@ -1195,7 +1212,7 @@ static void reset_partition_data(struct cpuset *cs) { struct cpuset *parent = parent_cs(cs); - if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) + if (!cpuset_v2()) return; lockdep_assert_held(&callback_lock); @@ -1383,6 +1400,7 @@ static int remote_partition_enable(struct cpuset *cs, int new_prs, list_add(&cs->remote_sibling, &remote_children); spin_unlock_irq(&callback_lock); update_unbound_workqueue_cpumask(isolcpus_updated); + cs->prs_err = 0; /* * Proprogate changes in top_cpuset's effective_cpus down the hierarchy. @@ -1413,9 +1431,11 @@ static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp) list_del_init(&cs->remote_sibling); isolcpus_updated = partition_xcpus_del(cs->partition_root_state, NULL, tmp->new_cpus); - cs->partition_root_state = -cs->partition_root_state; - if (!cs->prs_err) - cs->prs_err = PERR_INVCPUS; + if (cs->prs_err) + cs->partition_root_state = -cs->partition_root_state; + else + cs->partition_root_state = PRS_MEMBER; + reset_partition_data(cs); spin_unlock_irq(&callback_lock); update_unbound_workqueue_cpumask(isolcpus_updated); @@ -1448,8 +1468,10 @@ static void remote_cpus_update(struct cpuset *cs, struct cpumask *newmask, WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus)); - if (cpumask_empty(newmask)) + if (cpumask_empty(newmask)) { + cs->prs_err = PERR_CPUSEMPTY; goto invalidate; + } adding = cpumask_andnot(tmp->addmask, newmask, cs->effective_xcpus); deleting = cpumask_andnot(tmp->delmask, cs->effective_xcpus, newmask); @@ -1459,10 +1481,15 @@ static void remote_cpus_update(struct cpuset *cs, struct cpumask *newmask, * not allocated to other partitions and there are effective_cpus * left in the top cpuset. */ - if (adding && (!capable(CAP_SYS_ADMIN) || - cpumask_intersects(tmp->addmask, subpartitions_cpus) || - cpumask_subset(top_cpuset.effective_cpus, tmp->addmask))) - goto invalidate; + if (adding) { + if (!capable(CAP_SYS_ADMIN)) + cs->prs_err = PERR_ACCESS; + else if (cpumask_intersects(tmp->addmask, subpartitions_cpus) || + cpumask_subset(top_cpuset.effective_cpus, tmp->addmask)) + cs->prs_err = PERR_NOCPUS; + if (cs->prs_err) + goto invalidate; + } spin_lock_irq(&callback_lock); if (adding) @@ -1520,8 +1547,8 @@ static void remote_partition_check(struct cpuset *cs, struct cpumask *newmask, remote_partition_disable(child, tmp); disable_cnt++; } - if (disable_cnt && !force_sd_rebuild) - rebuild_sched_domains_locked(); + if (disable_cnt) + cpuset_force_rebuild(); } /* @@ -1578,7 +1605,7 @@ static bool prstate_housekeeping_conflict(int prstate, struct cpumask *new_cpus) * The partcmd_update command is used by update_cpumasks_hier() with newmask * NULL and update_cpumask() with newmask set. The partcmd_invalidate is used * by update_cpumask() with NULL newmask. In both cases, the callers won't - * check for error and so partition_root_state and prs_error will be updated + * check for error and so partition_root_state and prs_err will be updated * directly. */ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd, @@ -1656,9 +1683,9 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd, if (nocpu) return PERR_NOCPUS; - cpumask_copy(tmp->delmask, xcpus); - deleting = true; - subparts_delta++; + deleting = cpumask_and(tmp->delmask, xcpus, parent->effective_xcpus); + if (deleting) + subparts_delta++; new_prs = (cmd == partcmd_enable) ? PRS_ROOT : PRS_ISOLATED; } else if (cmd == partcmd_disable) { /* @@ -1930,12 +1957,6 @@ static void compute_partition_effective_cpumask(struct cpuset *cs, rcu_read_unlock(); } -/* - * update_cpumasks_hier() flags - */ -#define HIER_CHECKALL 0x01 /* Check all cpusets with no skipping */ -#define HIER_NO_SD_REBUILD 0x02 /* Don't rebuild sched domains */ - /* * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree * @cs: the cpuset to consider @@ -1950,7 +1971,7 @@ static void compute_partition_effective_cpumask(struct cpuset *cs, * Called with cpuset_mutex held */ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp, - int flags) + bool force) { struct cpuset *cp; struct cgroup_subsys_state *pos_css; @@ -2015,12 +2036,12 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp, * Skip the whole subtree if * 1) the cpumask remains the same, * 2) has no partition root state, - * 3) HIER_CHECKALL flag not set, and + * 3) force flag not set, and * 4) for v2 load balance state same as its parent. */ - if (!cp->partition_root_state && !(flags & HIER_CHECKALL) && + if (!cp->partition_root_state && !force && cpumask_equal(tmp->new_cpus, cp->effective_cpus) && - (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) || + (!cpuset_v2() || (is_sched_load_balance(parent) == is_sched_load_balance(cp)))) { pos_css = css_rightmost_descendant(pos_css); continue; @@ -2094,8 +2115,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp, * from parent if current cpuset isn't a valid partition root * and their load balance states differ. */ - if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && - !is_partition_valid(cp) && + if (cpuset_v2() && !is_partition_valid(cp) && (is_sched_load_balance(parent) != is_sched_load_balance(cp))) { if (is_sched_load_balance(parent)) set_bit(CS_SCHED_LOAD_BALANCE, &cp->flags); @@ -2111,8 +2131,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp, */ if (!cpumask_empty(cp->cpus_allowed) && is_sched_load_balance(cp) && - (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) || - is_partition_valid(cp))) + (!cpuset_v2() || is_partition_valid(cp))) need_rebuild_sched_domains = true; rcu_read_lock(); @@ -2120,9 +2139,8 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp, } rcu_read_unlock(); - if (need_rebuild_sched_domains && !(flags & HIER_NO_SD_REBUILD) && - !force_sd_rebuild) - rebuild_sched_domains_locked(); + if (need_rebuild_sched_domains) + cpuset_force_rebuild(); } /** @@ -2149,9 +2167,7 @@ static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, * directly. * * The update_cpumasks_hier() function may sleep. So we have to - * release the RCU read lock before calling it. HIER_NO_SD_REBUILD - * flag is used to suppress rebuild of sched domains as the callers - * will take care of that. + * release the RCU read lock before calling it. */ rcu_read_lock(); cpuset_for_each_child(sibling, pos_css, parent) { @@ -2167,7 +2183,7 @@ static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, continue; rcu_read_unlock(); - update_cpumasks_hier(sibling, tmp, HIER_NO_SD_REBUILD); + update_cpumasks_hier(sibling, tmp, false); rcu_read_lock(); css_put(&sibling->css); } @@ -2187,7 +2203,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, struct tmpmasks tmp; struct cpuset *parent = parent_cs(cs); bool invalidate = false; - int hier_flags = 0; + bool force = false; int old_prs = cs->partition_root_state; /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */ @@ -2248,12 +2264,11 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, * Check all the descendants in update_cpumasks_hier() if * effective_xcpus is to be changed. */ - if (!cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus)) - hier_flags = HIER_CHECKALL; + force = !cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus); retval = validate_change(cs, trialcs); - if ((retval == -EINVAL) && cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) { + if ((retval == -EINVAL) && cpuset_v2()) { struct cgroup_subsys_state *css; struct cpuset *cp; @@ -2317,7 +2332,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, spin_unlock_irq(&callback_lock); /* effective_cpus/effective_xcpus will be updated here */ - update_cpumasks_hier(cs, &tmp, hier_flags); + update_cpumasks_hier(cs, &tmp, force); /* Update CS_SCHED_LOAD_BALANCE and/or sched_domains, if necessary */ if (cs->partition_root_state) @@ -2342,7 +2357,7 @@ static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs, struct tmpmasks tmp; struct cpuset *parent = parent_cs(cs); bool invalidate = false; - int hier_flags = 0; + bool force = false; int old_prs = cs->partition_root_state; if (!*buf) { @@ -2365,8 +2380,7 @@ static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs, * Check all the descendants in update_cpumasks_hier() if * effective_xcpus is to be changed. */ - if (!cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus)) - hier_flags = HIER_CHECKALL; + force = !cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus); retval = validate_change(cs, trialcs); if (retval) @@ -2419,8 +2433,8 @@ static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs, * of the subtree when it is a valid partition root or effective_xcpus * is updated. */ - if (is_partition_valid(cs) || hier_flags) - update_cpumasks_hier(cs, &tmp, hier_flags); + if (is_partition_valid(cs) || force) + update_cpumasks_hier(cs, &tmp, force); /* Update CS_SCHED_LOAD_BALANCE and/or sched_domains, if necessary */ if (cs->partition_root_state) @@ -2745,9 +2759,12 @@ int cpuset_update_flag(cpuset_flagbits_t bit, struct cpuset *cs, cs->flags = trialcs->flags; spin_unlock_irq(&callback_lock); - if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed && - !force_sd_rebuild) - rebuild_sched_domains_locked(); + if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) { + if (cpuset_v2()) + cpuset_force_rebuild(); + else + rebuild_sched_domains_locked(); + } if (spread_flag_changed) cpuset1_update_tasks_flags(cs); @@ -2861,12 +2878,14 @@ static int update_prstate(struct cpuset *cs, int new_prs) update_unbound_workqueue_cpumask(new_xcpus_state); /* Force update if switching back to member */ - update_cpumasks_hier(cs, &tmpmask, !new_prs ? HIER_CHECKALL : 0); + update_cpumasks_hier(cs, &tmpmask, !new_prs); /* Update sched domains and load balance flag */ update_partition_sd_lb(cs, old_prs); notify_partition_change(cs, old_prs); + if (force_sd_rebuild) + rebuild_sched_domains_locked(); free_cpumasks(NULL, &tmpmask); return 0; } @@ -2927,8 +2946,7 @@ static int cpuset_can_attach(struct cgroup_taskset *tset) * migration permission derives from hierarchy ownership in * cgroup_procs_write_permission()). */ - if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) || - (cpus_updated || mems_updated)) { + if (!cpuset_v2() || (cpus_updated || mems_updated)) { ret = security_task_setscheduler(task); if (ret) goto out_unlock; @@ -3042,8 +3060,7 @@ static void cpuset_attach(struct cgroup_taskset *tset) * in effective cpus and mems. In that case, we can optimize out * by skipping the task iteration and update. */ - if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && - !cpus_updated && !mems_updated) { + if (cpuset_v2() && !cpus_updated && !mems_updated) { cpuset_attach_nodemask_to = cs->effective_mems; goto out; } @@ -3137,6 +3154,8 @@ ssize_t cpuset_write_resmask(struct kernfs_open_file *of, } free_cpuset(trialcs); + if (force_sd_rebuild) + rebuild_sched_domains_locked(); out_unlock: mutex_unlock(&cpuset_mutex); cpus_read_unlock(); @@ -3366,7 +3385,7 @@ cpuset_css_alloc(struct cgroup_subsys_state *parent_css) INIT_LIST_HEAD(&cs->remote_sibling); /* Set CS_MEMORY_MIGRATE for default hierarchy */ - if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) + if (cpuset_v2()) __set_bit(CS_MEMORY_MIGRATE, &cs->flags); return &cs->css; @@ -3393,8 +3412,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) /* * For v2, clear CS_SCHED_LOAD_BALANCE if parent is isolated */ - if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && - !is_sched_load_balance(parent)) + if (cpuset_v2() && !is_sched_load_balance(parent)) clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); cpuset_inc(); @@ -3461,11 +3479,7 @@ static void cpuset_css_offline(struct cgroup_subsys_state *css) cpus_read_lock(); mutex_lock(&cpuset_mutex); - if (is_partition_valid(cs)) - update_prstate(cs, 0); - - if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && - is_sched_load_balance(cs)) + if (!cpuset_v2() && is_sched_load_balance(cs)) cpuset_update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); cpuset_dec(); @@ -3475,6 +3489,22 @@ static void cpuset_css_offline(struct cgroup_subsys_state *css) cpus_read_unlock(); } +static void cpuset_css_killed(struct cgroup_subsys_state *css) +{ + struct cpuset *cs = css_cs(css); + + cpus_read_lock(); + mutex_lock(&cpuset_mutex); + + /* Reset valid partition back to member */ + if (is_partition_valid(cs)) + update_prstate(cs, PRS_MEMBER); + + mutex_unlock(&cpuset_mutex); + cpus_read_unlock(); + +} + static void cpuset_css_free(struct cgroup_subsys_state *css) { struct cpuset *cs = css_cs(css); @@ -3596,6 +3626,7 @@ struct cgroup_subsys cpuset_cgrp_subsys = { .css_alloc = cpuset_css_alloc, .css_online = cpuset_css_online, .css_offline = cpuset_css_offline, + .css_killed = cpuset_css_killed, .css_free = cpuset_css_free, .can_attach = cpuset_can_attach, .cancel_attach = cpuset_cancel_attach, @@ -3726,6 +3757,7 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp) if (remote && cpumask_empty(&new_cpus) && partition_is_populated(cs, NULL)) { + cs->prs_err = PERR_HOTPLUG; remote_partition_disable(cs, tmp); compute_effective_cpumask(&new_cpus, cs, parent); remote = false; @@ -3879,11 +3911,9 @@ static void cpuset_handle_hotplug(void) rcu_read_unlock(); } - /* rebuild sched domains if cpus_allowed has changed */ - if (force_sd_rebuild) { - force_sd_rebuild = false; + /* rebuild sched domains if necessary */ + if (force_sd_rebuild) rebuild_sched_domains_cpuslocked(); - } free_cpumasks(NULL, ptmp); } diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c index ce295b73c0a36..3e01781aeb7bd 100644 --- a/kernel/cgroup/rstat.c +++ b/kernel/cgroup/rstat.c @@ -444,6 +444,7 @@ static void cgroup_base_stat_add(struct cgroup_base_stat *dst_bstat, #ifdef CONFIG_SCHED_CORE dst_bstat->forceidle_sum += src_bstat->forceidle_sum; #endif + dst_bstat->ntime += src_bstat->ntime; } static void cgroup_base_stat_sub(struct cgroup_base_stat *dst_bstat, @@ -455,6 +456,7 @@ static void cgroup_base_stat_sub(struct cgroup_base_stat *dst_bstat, #ifdef CONFIG_SCHED_CORE dst_bstat->forceidle_sum -= src_bstat->forceidle_sum; #endif + dst_bstat->ntime -= src_bstat->ntime; } static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu) @@ -534,8 +536,10 @@ void __cgroup_account_cputime_field(struct cgroup *cgrp, rstatc = cgroup_base_stat_cputime_account_begin(cgrp, &flags); switch (index) { - case CPUTIME_USER: case CPUTIME_NICE: + rstatc->bstat.ntime += delta_exec; + fallthrough; + case CPUTIME_USER: rstatc->bstat.cputime.utime += delta_exec; break; case CPUTIME_SYSTEM: @@ -590,6 +594,7 @@ static void root_cgroup_cputime(struct cgroup_base_stat *bstat) #ifdef CONFIG_SCHED_CORE bstat->forceidle_sum += cpustat[CPUTIME_FORCEIDLE]; #endif + bstat->ntime += cpustat[CPUTIME_NICE]; } } @@ -607,32 +612,33 @@ static void cgroup_force_idle_show(struct seq_file *seq, struct cgroup_base_stat void cgroup_base_stat_cputime_show(struct seq_file *seq) { struct cgroup *cgrp = seq_css(seq)->cgroup; - u64 usage, utime, stime; + struct cgroup_base_stat bstat; if (cgroup_parent(cgrp)) { cgroup_rstat_flush_hold(cgrp); - usage = cgrp->bstat.cputime.sum_exec_runtime; + bstat = cgrp->bstat; cputime_adjust(&cgrp->bstat.cputime, &cgrp->prev_cputime, - &utime, &stime); + &bstat.cputime.utime, &bstat.cputime.stime); cgroup_rstat_flush_release(cgrp); } else { - /* cgrp->bstat of root is not actually used, reuse it */ - root_cgroup_cputime(&cgrp->bstat); - usage = cgrp->bstat.cputime.sum_exec_runtime; - utime = cgrp->bstat.cputime.utime; - stime = cgrp->bstat.cputime.stime; + root_cgroup_cputime(&bstat); } - do_div(usage, NSEC_PER_USEC); - do_div(utime, NSEC_PER_USEC); - do_div(stime, NSEC_PER_USEC); + do_div(bstat.cputime.sum_exec_runtime, NSEC_PER_USEC); + do_div(bstat.cputime.utime, NSEC_PER_USEC); + do_div(bstat.cputime.stime, NSEC_PER_USEC); + do_div(bstat.ntime, NSEC_PER_USEC); seq_printf(seq, "usage_usec %llu\n" - "user_usec %llu\n" - "system_usec %llu\n", - usage, utime, stime); - - cgroup_force_idle_show(seq, &cgrp->bstat); + "user_usec %llu\n" + "system_usec %llu\n" + "nice_usec %llu\n", + bstat.cputime.sum_exec_runtime, + bstat.cputime.utime, + bstat.cputime.stime, + bstat.ntime); + + cgroup_force_idle_show(seq, &bstat); } /* Add bpf kfuncs for cgroup_rstat_updated() and cgroup_rstat_flush() */ diff --git a/kernel/cpu.c b/kernel/cpu.c index 9ee6c9145b1df..cf02a629f9902 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -1452,11 +1452,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen, out: cpus_write_unlock(); - /* - * Do post unplug cleanup. This is still protected against - * concurrent CPU hotplug via cpu_add_remove_lock. - */ - lockup_detector_cleanup(); arch_smt_update(); return ret; } diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 5b4e6d3bf7bcc..b8fe0b3d0ffb6 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -584,6 +584,22 @@ int dma_direct_supported(struct device *dev, u64 mask) return mask >= phys_to_dma_unencrypted(dev, min_mask); } +static const struct bus_dma_region *dma_find_range(struct device *dev, + unsigned long start_pfn) +{ + const struct bus_dma_region *m; + + for (m = dev->dma_range_map; PFN_DOWN(m->size); m++) { + unsigned long cpu_start_pfn = PFN_DOWN(m->cpu_start); + + if (start_pfn >= cpu_start_pfn && + start_pfn - cpu_start_pfn < PFN_DOWN(m->size)) + return m; + } + + return NULL; +} + /* * To check whether all ram resource ranges are covered by dma range map * Returns 0 when further check is needed @@ -593,20 +609,12 @@ static int check_ram_in_range_map(unsigned long start_pfn, unsigned long nr_pages, void *data) { unsigned long end_pfn = start_pfn + nr_pages; - const struct bus_dma_region *bdr = NULL; - const struct bus_dma_region *m; struct device *dev = data; while (start_pfn < end_pfn) { - for (m = dev->dma_range_map; PFN_DOWN(m->size); m++) { - unsigned long cpu_start_pfn = PFN_DOWN(m->cpu_start); + const struct bus_dma_region *bdr; - if (start_pfn >= cpu_start_pfn && - start_pfn - cpu_start_pfn < PFN_DOWN(m->size)) { - bdr = m; - break; - } - } + bdr = dma_find_range(dev, start_pfn); if (!bdr) return 1; diff --git a/kernel/events/core.c b/kernel/events/core.c index 501d8c2fedff4..97af53c43608e 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -2146,7 +2146,7 @@ static void perf_put_aux_event(struct perf_event *event) static bool perf_need_aux_event(struct perf_event *event) { - return !!event->attr.aux_output || !!event->attr.aux_sample_size; + return event->attr.aux_output || has_aux_action(event); } static int perf_get_aux_event(struct perf_event *event, @@ -2171,6 +2171,10 @@ static int perf_get_aux_event(struct perf_event *event, !perf_aux_output_match(event, group_leader)) return 0; + if ((event->attr.aux_pause || event->attr.aux_resume) && + !(group_leader->pmu->capabilities & PERF_PMU_CAP_AUX_PAUSE)) + return 0; + if (event->attr.aux_sample_size && !group_leader->pmu->snapshot_aux) return 0; @@ -2407,6 +2411,7 @@ ctx_time_update_event(struct perf_event_context *ctx, struct perf_event *event) #define DETACH_GROUP 0x01UL #define DETACH_CHILD 0x02UL #define DETACH_DEAD 0x04UL +#define DETACH_EXIT 0x08UL /* * Cross CPU call to remove a performance event @@ -2421,6 +2426,7 @@ __perf_remove_from_context(struct perf_event *event, void *info) { struct perf_event_pmu_context *pmu_ctx = event->pmu_ctx; + enum perf_event_state state = PERF_EVENT_STATE_OFF; unsigned long flags = (unsigned long)info; ctx_time_update(cpuctx, ctx); @@ -2429,16 +2435,19 @@ __perf_remove_from_context(struct perf_event *event, * Ensure event_sched_out() switches to OFF, at the very least * this avoids raising perf_pending_task() at this time. */ - if (flags & DETACH_DEAD) + if (flags & DETACH_EXIT) + state = PERF_EVENT_STATE_EXIT; + if (flags & DETACH_DEAD) { event->pending_disable = 1; + state = PERF_EVENT_STATE_DEAD; + } event_sched_out(event, ctx); + perf_event_set_state(event, min(event->state, state)); if (flags & DETACH_GROUP) perf_group_detach(event); if (flags & DETACH_CHILD) perf_child_detach(event); list_del_event(event, ctx); - if (flags & DETACH_DEAD) - event->state = PERF_EVENT_STATE_DEAD; if (!pmu_ctx->nr_events) { pmu_ctx->rotate_necessary = 0; @@ -4957,7 +4966,7 @@ static struct perf_event_pmu_context * find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx, struct perf_event *event) { - struct perf_event_pmu_context *new = NULL, *epc; + struct perf_event_pmu_context *new = NULL, *pos = NULL, *epc; void *task_ctx_data = NULL; if (!ctx->task) { @@ -5014,12 +5023,19 @@ find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx, atomic_inc(&epc->refcount); goto found_epc; } + /* Make sure the pmu_ctx_list is sorted by PMU type: */ + if (!pos && epc->pmu->type > pmu->type) + pos = epc; } epc = new; new = NULL; - list_add(&epc->pmu_ctx_entry, &ctx->pmu_ctx_list); + if (!pos) + list_add_tail(&epc->pmu_ctx_entry, &ctx->pmu_ctx_list); + else + list_add(&epc->pmu_ctx_entry, pos->pmu_ctx_entry.prev); + epc->ctx = ctx; found_epc: @@ -5246,6 +5262,8 @@ static int exclusive_event_init(struct perf_event *event) return -EBUSY; } + event->attach_state |= PERF_ATTACH_EXCLUSIVE; + return 0; } @@ -5253,14 +5271,13 @@ static void exclusive_event_destroy(struct perf_event *event) { struct pmu *pmu = event->pmu; - if (!is_exclusive_pmu(pmu)) - return; - /* see comment in exclusive_event_init() */ if (event->attach_state & PERF_ATTACH_TASK) atomic_dec(&pmu->exclusive_cnt); else atomic_inc(&pmu->exclusive_cnt); + + event->attach_state &= ~PERF_ATTACH_EXCLUSIVE; } static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2) @@ -5295,35 +5312,58 @@ static bool exclusive_event_installable(struct perf_event *event, static void perf_addr_filters_splice(struct perf_event *event, struct list_head *head); -static void perf_pending_task_sync(struct perf_event *event) +/* vs perf_event_alloc() error */ +static void __free_event(struct perf_event *event) { - struct callback_head *head = &event->pending_task; + if (event->attach_state & PERF_ATTACH_CALLCHAIN) + put_callchain_buffers(); + + kfree(event->addr_filter_ranges); + + if (event->attach_state & PERF_ATTACH_EXCLUSIVE) + exclusive_event_destroy(event); + + if (is_cgroup_event(event)) + perf_detach_cgroup(event); + + if (event->destroy) + event->destroy(event); - if (!event->pending_work) - return; /* - * If the task is queued to the current task's queue, we - * obviously can't wait for it to complete. Simply cancel it. + * Must be after ->destroy(), due to uprobe_perf_close() using + * hw.target. */ - if (task_work_cancel(current, head)) { - event->pending_work = 0; - local_dec(&event->ctx->nr_no_switch_fast); - return; + if (event->hw.target) + put_task_struct(event->hw.target); + + if (event->pmu_ctx) { + /* + * put_pmu_ctx() needs an event->ctx reference, because of + * epc->ctx. + */ + WARN_ON_ONCE(!event->ctx); + WARN_ON_ONCE(event->pmu_ctx->ctx != event->ctx); + put_pmu_ctx(event->pmu_ctx); } /* - * All accesses related to the event are within the same RCU section in - * perf_pending_task(). The RCU grace period before the event is freed - * will make sure all those accesses are complete by then. + * perf_event_free_task() relies on put_ctx() being 'last', in + * particular all task references must be cleaned up. */ - rcuwait_wait_event(&event->pending_work_wait, !event->pending_work, TASK_UNINTERRUPTIBLE); + if (event->ctx) + put_ctx(event->ctx); + + if (event->pmu) + module_put(event->pmu->module); + + call_rcu(&event->rcu_head, free_event_rcu); } +/* vs perf_event_alloc() success */ static void _free_event(struct perf_event *event) { irq_work_sync(&event->pending_irq); irq_work_sync(&event->pending_disable_irq); - perf_pending_task_sync(event); unaccount_event(event); @@ -5341,42 +5381,10 @@ static void _free_event(struct perf_event *event) mutex_unlock(&event->mmap_mutex); } - if (is_cgroup_event(event)) - perf_detach_cgroup(event); - - if (!event->parent) { - if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) - put_callchain_buffers(); - } - perf_event_free_bpf_prog(event); perf_addr_filters_splice(event, NULL); - kfree(event->addr_filter_ranges); - if (event->destroy) - event->destroy(event); - - /* - * Must be after ->destroy(), due to uprobe_perf_close() using - * hw.target. - */ - if (event->hw.target) - put_task_struct(event->hw.target); - - if (event->pmu_ctx) - put_pmu_ctx(event->pmu_ctx); - - /* - * perf_event_free_task() relies on put_ctx() being 'last', in particular - * all task references must be cleaned up. - */ - if (event->ctx) - put_ctx(event->ctx); - - exclusive_event_destroy(event); - module_put(event->pmu->module); - - call_rcu(&event->rcu_head, free_event_rcu); + __free_event(event); } /* @@ -5448,10 +5456,17 @@ static void perf_remove_from_owner(struct perf_event *event) static void put_event(struct perf_event *event) { + struct perf_event *parent; + if (!atomic_long_dec_and_test(&event->refcount)) return; + parent = event->parent; _free_event(event); + + /* Matches the refcount bump in inherit_event() */ + if (parent) + put_event(parent); } /* @@ -5535,11 +5550,6 @@ int perf_event_release_kernel(struct perf_event *event) if (tmp == child) { perf_remove_from_context(child, DETACH_GROUP); list_move(&child->child_list, &free_list); - /* - * This matches the refcount bump in inherit_event(); - * this can't be the last reference. - */ - put_event(event); } else { var = &ctx->refcount; } @@ -5565,7 +5575,8 @@ int perf_event_release_kernel(struct perf_event *event) void *var = &child->ctx->refcount; list_del(&child->child_list); - free_event(child); + /* Last reference unless ->pending_task work is pending */ + put_event(child); /* * Wake any perf_event_free_task() waiting for this event to be @@ -5576,7 +5587,11 @@ int perf_event_release_kernel(struct perf_event *event) } no_ctx: - put_event(event); /* Must be the 'last' reference */ + /* + * Last reference unless ->pending_task work is pending on this event + * or any of its children. + */ + put_event(event); return 0; } EXPORT_SYMBOL_GPL(perf_event_release_kernel); @@ -5969,14 +5984,15 @@ static int _perf_event_period(struct perf_event *event, u64 value) if (!value) return -EINVAL; - if (event->attr.freq && value > sysctl_perf_event_sample_rate) - return -EINVAL; - - if (perf_event_check_period(event, value)) - return -EINVAL; - - if (!event->attr.freq && (value & (1ULL << 63))) - return -EINVAL; + if (event->attr.freq) { + if (value > sysctl_perf_event_sample_rate) + return -EINVAL; + } else { + if (perf_event_check_period(event, value)) + return -EINVAL; + if (value & (1ULL << 63)) + return -EINVAL; + } event_function_call(event, __perf_event_period, &value); @@ -6960,12 +6976,6 @@ static void perf_pending_task(struct callback_head *head) struct perf_event *event = container_of(head, struct perf_event, pending_task); int rctx; - /* - * All accesses to the event must belong to the same implicit RCU read-side - * critical section as the ->pending_work reset. See comment in - * perf_pending_task_sync(). - */ - rcu_read_lock(); /* * If we 'fail' here, that's OK, it means recursion is already disabled * and we won't recurse 'further'. @@ -6976,9 +6986,8 @@ static void perf_pending_task(struct callback_head *head) event->pending_work = 0; perf_sigtrap(event); local_dec(&event->ctx->nr_no_switch_fast); - rcuwait_wake_up(&event->pending_work_wait); } - rcu_read_unlock(); + put_event(event); if (rctx >= 0) perf_swevent_put_recursion_context(rctx); @@ -8016,6 +8025,49 @@ void perf_prepare_header(struct perf_event_header *header, WARN_ON_ONCE(header->size & 7); } +static void __perf_event_aux_pause(struct perf_event *event, bool pause) +{ + if (pause) { + if (!event->hw.aux_paused) { + event->hw.aux_paused = 1; + event->pmu->stop(event, PERF_EF_PAUSE); + } + } else { + if (event->hw.aux_paused) { + event->hw.aux_paused = 0; + event->pmu->start(event, PERF_EF_RESUME); + } + } +} + +static void perf_event_aux_pause(struct perf_event *event, bool pause) +{ + struct perf_buffer *rb; + + if (WARN_ON_ONCE(!event)) + return; + + rb = ring_buffer_get(event); + if (!rb) + return; + + scoped_guard (irqsave) { + /* + * Guard against self-recursion here. Another event could trip + * this same from NMI context. + */ + if (READ_ONCE(rb->aux_in_pause_resume)) + break; + + WRITE_ONCE(rb->aux_in_pause_resume, 1); + barrier(); + __perf_event_aux_pause(event, pause); + barrier(); + WRITE_ONCE(rb->aux_in_pause_resume, 0); + } + ring_buffer_put(rb); +} + static __always_inline int __perf_event_output(struct perf_event *event, struct perf_sample_data *data, @@ -8233,7 +8285,8 @@ void perf_event_exec(void) perf_event_enable_on_exec(ctx); perf_event_remove_on_exec(ctx); - perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL, true); + scoped_guard(rcu) + perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL, true); perf_unpin_context(ctx); put_ctx(ctx); @@ -9818,9 +9871,12 @@ static int __perf_event_overflow(struct perf_event *event, ret = __perf_event_account_interrupt(event, throttle); + if (event->attr.aux_pause) + perf_event_aux_pause(event->aux_event, true); + if (event->prog && event->prog->type == BPF_PROG_TYPE_PERF_EVENT && !bpf_overflow_handler(event, data, regs)) - return ret; + goto out; /* * XXX event_limit might not quite work as expected on inherited @@ -9854,6 +9910,7 @@ static int __perf_event_overflow(struct perf_event *event, !task_work_add(current, &event->pending_task, notify_mode)) { event->pending_work = pending_id; local_inc(&event->ctx->nr_no_switch_fast); + WARN_ON_ONCE(!atomic_long_inc_not_zero(&event->refcount)); event->pending_addr = 0; if (valid_sample && (data->sample_flags & PERF_SAMPLE_ADDR)) @@ -9882,6 +9939,9 @@ static int __perf_event_overflow(struct perf_event *event, event->pending_wakeup = 1; irq_work_queue(&event->pending_irq); } +out: + if (event->attr.aux_resume) + perf_event_aux_pause(event->aux_event, false); return ret; } @@ -11728,6 +11788,21 @@ static int pmu_dev_alloc(struct pmu *pmu) static struct lock_class_key cpuctx_mutex; static struct lock_class_key cpuctx_lock; +static bool idr_cmpxchg(struct idr *idr, unsigned long id, void *old, void *new) +{ + void *tmp, *val = idr_find(idr, id); + + if (val != old) + return false; + + tmp = idr_replace(idr, new, id); + if (IS_ERR(tmp)) + return false; + + WARN_ON_ONCE(tmp != val); + return true; +} + int perf_pmu_register(struct pmu *pmu, const char *name, int type) { int cpu, ret, max = PERF_TYPE_MAX; @@ -11754,7 +11829,7 @@ int perf_pmu_register(struct pmu *pmu, const char *name, int type) if (type >= 0) max = type; - ret = idr_alloc(&pmu_idr, pmu, max, 0, GFP_KERNEL); + ret = idr_alloc(&pmu_idr, NULL, max, 0, GFP_KERNEL); if (ret < 0) goto free_pdc; @@ -11762,6 +11837,7 @@ int perf_pmu_register(struct pmu *pmu, const char *name, int type) type = ret; pmu->type = type; + atomic_set(&pmu->exclusive_cnt, 0); if (pmu_bus_running && !pmu->dev) { ret = pmu_dev_alloc(pmu); @@ -11810,14 +11886,22 @@ int perf_pmu_register(struct pmu *pmu, const char *name, int type) if (!pmu->event_idx) pmu->event_idx = perf_event_idx_default; + /* + * Now that the PMU is complete, make it visible to perf_try_init_event(). + */ + if (!idr_cmpxchg(&pmu_idr, pmu->type, NULL, pmu)) + goto free_context; list_add_rcu(&pmu->entry, &pmus); - atomic_set(&pmu->exclusive_cnt, 0); + ret = 0; unlock: mutex_unlock(&pmus_lock); return ret; +free_context: + free_percpu(pmu->cpu_pmu_context); + free_dev: if (pmu->dev && pmu->dev != PMU_NULL_DEV) { device_del(pmu->dev); @@ -11837,6 +11921,8 @@ void perf_pmu_unregister(struct pmu *pmu) { mutex_lock(&pmus_lock); list_del_rcu(&pmu->entry); + idr_remove(&pmu_idr, pmu->type); + mutex_unlock(&pmus_lock); /* * We dereference the pmu list under both SRCU and regular RCU, so @@ -11846,7 +11932,6 @@ void perf_pmu_unregister(struct pmu *pmu) synchronize_rcu(); free_percpu(pmu->pmu_disable_count); - idr_remove(&pmu_idr, pmu->type); if (pmu_bus_running && pmu->dev && pmu->dev != PMU_NULL_DEV) { if (pmu->nr_addr_filters) device_remove_file(pmu->dev, &dev_attr_nr_addr_filters); @@ -11854,7 +11939,6 @@ void perf_pmu_unregister(struct pmu *pmu) put_device(pmu->dev); } free_pmu_context(pmu); - mutex_unlock(&pmus_lock); } EXPORT_SYMBOL_GPL(perf_pmu_unregister); @@ -11923,8 +12007,10 @@ static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) event->destroy(event); } - if (ret) + if (ret) { + event->pmu = NULL; module_put(pmu->module); + } return ret; } @@ -12173,7 +12259,6 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, init_irq_work(&event->pending_irq, perf_pending_irq); event->pending_disable_irq = IRQ_WORK_INIT_HARD(perf_pending_disable); init_task_work(&event->pending_task, perf_pending_task); - rcuwait_init(&event->pending_work_wait); mutex_init(&event->mmap_mutex); raw_spin_lock_init(&event->addr_filters.lock); @@ -12252,7 +12337,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, * See perf_output_read(). */ if (has_inherit_and_sample_read(attr) && !(attr->sample_type & PERF_SAMPLE_TID)) - goto err_ns; + goto err; if (!has_branch_stack(event)) event->attr.branch_sample_type = 0; @@ -12260,7 +12345,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, pmu = perf_init_event(event); if (IS_ERR(pmu)) { err = PTR_ERR(pmu); - goto err_ns; + goto err; } /* @@ -12270,24 +12355,38 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, */ if (pmu->task_ctx_nr == perf_invalid_context && (task || cgroup_fd != -1)) { err = -EINVAL; - goto err_pmu; + goto err; } if (event->attr.aux_output && - !(pmu->capabilities & PERF_PMU_CAP_AUX_OUTPUT)) { + (!(pmu->capabilities & PERF_PMU_CAP_AUX_OUTPUT) || + event->attr.aux_pause || event->attr.aux_resume)) { err = -EOPNOTSUPP; - goto err_pmu; + goto err; + } + + if (event->attr.aux_pause && event->attr.aux_resume) { + err = -EINVAL; + goto err; + } + + if (event->attr.aux_start_paused) { + if (!(pmu->capabilities & PERF_PMU_CAP_AUX_PAUSE)) { + err = -EOPNOTSUPP; + goto err; + } + event->hw.aux_paused = 1; } if (cgroup_fd != -1) { err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader); if (err) - goto err_pmu; + goto err; } err = exclusive_event_init(event); if (err) - goto err_pmu; + goto err; if (has_addr_filter(event)) { event->addr_filter_ranges = kcalloc(pmu->nr_addr_filters, @@ -12295,7 +12394,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, GFP_KERNEL); if (!event->addr_filter_ranges) { err = -ENOMEM; - goto err_per_task; + goto err; } /* @@ -12320,41 +12419,22 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { err = get_callchain_buffers(attr->sample_max_stack); if (err) - goto err_addr_filters; + goto err; + event->attach_state |= PERF_ATTACH_CALLCHAIN; } } err = security_perf_event_alloc(event); if (err) - goto err_callchain_buffer; + goto err; /* symmetric to unaccount_event() in _free_event() */ account_event(event); return event; -err_callchain_buffer: - if (!event->parent) { - if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) - put_callchain_buffers(); - } -err_addr_filters: - kfree(event->addr_filter_ranges); - -err_per_task: - exclusive_event_destroy(event); - -err_pmu: - if (is_cgroup_event(event)) - perf_detach_cgroup(event); - if (event->destroy) - event->destroy(event); - module_put(pmu->module); -err_ns: - if (event->hw.target) - put_task_struct(event->hw.target); - call_rcu(&event->rcu_head, free_event_rcu); - +err: + __free_event(event); return ERR_PTR(err); } @@ -13074,7 +13154,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, * Grouping is not supported for kernel events, neither is 'AUX', * make sure the caller's intentions are adjusted. */ - if (attr->aux_output) + if (attr->aux_output || attr->aux_action) return ERR_PTR(-EINVAL); event = perf_event_alloc(attr, cpu, task, NULL, NULL, @@ -13310,12 +13390,7 @@ perf_event_exit_event(struct perf_event *event, struct perf_event_context *ctx) mutex_lock(&parent_event->child_mutex); } - perf_remove_from_context(event, detach_flags); - - raw_spin_lock_irq(&ctx->lock); - if (event->state > PERF_EVENT_STATE_EXIT) - perf_event_set_state(event, PERF_EVENT_STATE_EXIT); - raw_spin_unlock_irq(&ctx->lock); + perf_remove_from_context(event, detach_flags | DETACH_EXIT); /* * Child events can be freed. @@ -13326,8 +13401,7 @@ perf_event_exit_event(struct perf_event *event, struct perf_event_context *ctx) * Kick perf_poll() for is_event_hup(); */ perf_event_wakeup(parent_event); - free_event(event); - put_event(parent_event); + put_event(event); return; } @@ -13445,13 +13519,11 @@ static void perf_free_event(struct perf_event *event, list_del_init(&event->child_list); mutex_unlock(&parent->child_mutex); - put_event(parent); - raw_spin_lock_irq(&ctx->lock); perf_group_detach(event); list_del_event(event, ctx); raw_spin_unlock_irq(&ctx->lock); - free_event(event); + put_event(event); } /* diff --git a/kernel/events/internal.h b/kernel/events/internal.h index e072d995d670f..249288d82b8dc 100644 --- a/kernel/events/internal.h +++ b/kernel/events/internal.h @@ -52,6 +52,7 @@ struct perf_buffer { void (*free_aux)(void *); refcount_t aux_refcount; int aux_in_sampling; + int aux_in_pause_resume; void **aux_pages; void *aux_priv; diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 4f46f688d0d49..bbfa22c0a1597 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -19,7 +19,7 @@ static void perf_output_wakeup(struct perf_output_handle *handle) { - atomic_set(&handle->rb->poll, EPOLLIN); + atomic_set(&handle->rb->poll, EPOLLIN | EPOLLRDNORM); handle->event->pending_wakeup = 1; diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 4b52cb2ae6d62..e60f5e71e35df 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -167,6 +167,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, DEFINE_FOLIO_VMA_WALK(pvmw, old_folio, vma, addr, 0); int err; struct mmu_notifier_range range; + pte_t pte; mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr, addr + PAGE_SIZE); @@ -186,6 +187,16 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, if (!page_vma_mapped_walk(&pvmw)) goto unlock; VM_BUG_ON_PAGE(addr != pvmw.address, old_page); + pte = ptep_get(pvmw.pte); + + /* + * Handle PFN swap PTES, such as device-exclusive ones, that actually + * map pages: simply trigger GUP again to fix it up. + */ + if (unlikely(!pte_present(pte))) { + page_vma_mapped_walk_done(&pvmw); + goto unlock; + } if (new_page) { folio_get(new_folio); @@ -200,7 +211,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, inc_mm_counter(mm, MM_ANONPAGES); } - flush_cache_page(vma, addr, pte_pfn(ptep_get(pvmw.pte))); + flush_cache_page(vma, addr, pte_pfn(pte)); ptep_clear_flush(vma, addr, pvmw.pte); if (new_page) set_pte_at(mm, addr, pvmw.pte, @@ -489,6 +500,11 @@ int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, if (ret <= 0) goto put_old; + if (is_zero_page(old_page)) { + ret = -EINVAL; + goto put_old; + } + if (WARN(!is_register && PageCompound(old_page), "uprobe unregister should never work on compound page\n")) { ret = -EINVAL; @@ -1770,6 +1786,7 @@ void uprobe_free_utask(struct task_struct *t) if (!utask) return; + t->utask = NULL; if (utask->active_uprobe) put_uprobe(utask->active_uprobe); @@ -1779,7 +1796,6 @@ void uprobe_free_utask(struct task_struct *t) xol_free_insn_slot(t); kfree(utask); - t->utask = NULL; } /* @@ -1882,8 +1898,8 @@ void uprobe_copy_process(struct task_struct *t, unsigned long flags) */ unsigned long uprobe_get_trampoline_vaddr(void) { + unsigned long trampoline_vaddr = UPROBE_NO_TRAMPOLINE_VADDR; struct xol_area *area; - unsigned long trampoline_vaddr = -1; /* Pairs with xol_add_vma() smp_store_release() */ area = READ_ONCE(current->mm->uprobes_state.xol_area); /* ^^^ */ diff --git a/kernel/fork.c b/kernel/fork.c index e192bdbc9adeb..12decadff468f 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -505,6 +505,10 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) vma_numab_state_init(new); dup_anon_vma_name(orig, new); + /* track_pfn_copy() will later take care of copying internal state. */ + if (unlikely(new->vm_flags & VM_PFNMAP)) + untrack_pfn_clear(new); + return new; } diff --git a/kernel/futex/core.c b/kernel/futex/core.c index 136768ae26375..010607a991949 100644 --- a/kernel/futex/core.c +++ b/kernel/futex/core.c @@ -554,7 +554,8 @@ void futex_q_unlock(struct futex_hash_bucket *hb) futex_hb_waiters_dec(hb); } -void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb) +void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb, + struct task_struct *task) { int prio; @@ -570,7 +571,7 @@ void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb) plist_node_init(&q->list, prio); plist_add(&q->list, &hb->chain); - q->task = current; + q->task = task; } /** diff --git a/kernel/futex/futex.h b/kernel/futex/futex.h index 8b195d06f4e8e..12e47386232ed 100644 --- a/kernel/futex/futex.h +++ b/kernel/futex/futex.h @@ -230,13 +230,15 @@ extern int futex_get_value_locked(u32 *dest, u32 __user *from); extern struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb, union futex_key *key); extern void __futex_unqueue(struct futex_q *q); -extern void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb); +extern void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb, + struct task_struct *task); extern int futex_unqueue(struct futex_q *q); /** * futex_queue() - Enqueue the futex_q on the futex_hash_bucket * @q: The futex_q to enqueue * @hb: The destination hash bucket + * @task: Task queueing this futex * * The hb->lock must be held by the caller, and is released here. A call to * futex_queue() is typically paired with exactly one call to futex_unqueue(). The @@ -244,11 +246,14 @@ extern int futex_unqueue(struct futex_q *q); * or nothing if the unqueue is done as part of the wake process and the unqueue * state is implicit in the state of woken task (see futex_wait_requeue_pi() for * an example). + * + * Note that @task may be NULL, for async usage of futexes. */ -static inline void futex_queue(struct futex_q *q, struct futex_hash_bucket *hb) +static inline void futex_queue(struct futex_q *q, struct futex_hash_bucket *hb, + struct task_struct *task) __releases(&hb->lock) { - __futex_queue(q, hb); + __futex_queue(q, hb, task); spin_unlock(&hb->lock); } diff --git a/kernel/futex/pi.c b/kernel/futex/pi.c index 5722467f27379..8ec12f1aff83b 100644 --- a/kernel/futex/pi.c +++ b/kernel/futex/pi.c @@ -981,7 +981,7 @@ int futex_lock_pi(u32 __user *uaddr, unsigned int flags, ktime_t *time, int tryl /* * Only actually queue now that the atomic ops are done: */ - __futex_queue(&q, hb); + __futex_queue(&q, hb, current); if (trylock) { ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex); diff --git a/kernel/futex/waitwake.c b/kernel/futex/waitwake.c index 3a10375d95218..a9056acb75eef 100644 --- a/kernel/futex/waitwake.c +++ b/kernel/futex/waitwake.c @@ -350,7 +350,7 @@ void futex_wait_queue(struct futex_hash_bucket *hb, struct futex_q *q, * access to the hash list and forcing another memory barrier. */ set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE); - futex_queue(q, hb); + futex_queue(q, hb, current); /* Arm the timer */ if (timeout) @@ -461,7 +461,7 @@ int futex_wait_multiple_setup(struct futex_vector *vs, int count, int *woken) * next futex. Queue each futex at this moment so hb can * be unlocked. */ - futex_queue(q, hb); + futex_queue(q, hb, current); continue; } diff --git a/kernel/kexec_elf.c b/kernel/kexec_elf.c index d3689632e8b90..3a5c25b2adc94 100644 --- a/kernel/kexec_elf.c +++ b/kernel/kexec_elf.c @@ -390,7 +390,7 @@ int kexec_elf_load(struct kimage *image, struct elfhdr *ehdr, struct kexec_buf *kbuf, unsigned long *lowest_load_addr) { - unsigned long lowest_addr = UINT_MAX; + unsigned long lowest_addr = ULONG_MAX; int ret; size_t i; diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 536bd471557f5..53c76dc71f3f5 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -6223,6 +6223,9 @@ static void zap_class(struct pending_free *pf, struct lock_class *class) hlist_del_rcu(&class->hash_entry); WRITE_ONCE(class->key, NULL); WRITE_ONCE(class->name, NULL); + /* Class allocated but not used, -1 in nr_unused_locks */ + if (class->usage_mask == 0) + debug_atomic_dec(nr_unused_locks); nr_lock_classes--; __clear_bit(class - lock_classes, lock_classes_in_use); if (class - lock_classes == max_lock_class_idx) diff --git a/kernel/locking/semaphore.c b/kernel/locking/semaphore.c index 34bfae72f2952..de9117c0e671e 100644 --- a/kernel/locking/semaphore.c +++ b/kernel/locking/semaphore.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -38,7 +39,7 @@ static noinline void __down(struct semaphore *sem); static noinline int __down_interruptible(struct semaphore *sem); static noinline int __down_killable(struct semaphore *sem); static noinline int __down_timeout(struct semaphore *sem, long timeout); -static noinline void __up(struct semaphore *sem); +static noinline void __up(struct semaphore *sem, struct wake_q_head *wake_q); /** * down - acquire the semaphore @@ -183,13 +184,16 @@ EXPORT_SYMBOL(down_timeout); void __sched up(struct semaphore *sem) { unsigned long flags; + DEFINE_WAKE_Q(wake_q); raw_spin_lock_irqsave(&sem->lock, flags); if (likely(list_empty(&sem->wait_list))) sem->count++; else - __up(sem); + __up(sem, &wake_q); raw_spin_unlock_irqrestore(&sem->lock, flags); + if (!wake_q_empty(&wake_q)) + wake_up_q(&wake_q); } EXPORT_SYMBOL(up); @@ -269,11 +273,12 @@ static noinline int __sched __down_timeout(struct semaphore *sem, long timeout) return __down_common(sem, TASK_UNINTERRUPTIBLE, timeout); } -static noinline void __sched __up(struct semaphore *sem) +static noinline void __sched __up(struct semaphore *sem, + struct wake_q_head *wake_q) { struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list, struct semaphore_waiter, list); list_del(&waiter->list); waiter->up = true; - wake_up_process(waiter->task); + wake_q_add(wake_q, waiter->task); } diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index b483fcea811b1..d8bad1eeedd3e 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -1443,10 +1443,10 @@ static const char * const comp_alg_enabled[] = { static int hibernate_compressor_param_set(const char *compressor, const struct kernel_param *kp) { - unsigned int sleep_flags; int index, ret; - sleep_flags = lock_system_sleep(); + if (!mutex_trylock(&system_transition_mutex)) + return -EBUSY; index = sysfs_match_string(comp_alg_enabled, compressor); if (index >= 0) { @@ -1458,7 +1458,7 @@ static int hibernate_compressor_param_set(const char *compressor, ret = index; } - unlock_system_sleep(sleep_flags); + mutex_unlock(&system_transition_mutex); if (ret) pr_debug("Cannot set specified compressor %s\n", diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 3b75f6e8410b9..881a26e18c658 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -2436,7 +2436,6 @@ asmlinkage __visible int _printk(const char *fmt, ...) } EXPORT_SYMBOL(_printk); -static bool pr_flush(int timeout_ms, bool reset_on_progress); static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress); #else /* CONFIG_PRINTK */ @@ -2449,7 +2448,6 @@ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre static u64 syslog_seq; -static bool pr_flush(int timeout_ms, bool reset_on_progress) { return true; } static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { return true; } #endif /* CONFIG_PRINTK */ @@ -4436,7 +4434,7 @@ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre * Context: Process context. May sleep while acquiring console lock. * Return: true if all usable printers are caught up. */ -static bool pr_flush(int timeout_ms, bool reset_on_progress) +bool pr_flush(int timeout_ms, bool reset_on_progress) { return __pr_flush(NULL, timeout_ms, reset_on_progress); } diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 3e486ccaa4ca3..8e52c1dd06284 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -3191,6 +3191,8 @@ void call_rcu(struct rcu_head *head, rcu_callback_t func) } EXPORT_SYMBOL_GPL(call_rcu); +static struct workqueue_struct *rcu_reclaim_wq; + /* Maximum number of jiffies to wait before draining a batch. */ #define KFREE_DRAIN_JIFFIES (5 * HZ) #define KFREE_N_BATCHES 2 @@ -3519,10 +3521,10 @@ __schedule_delayed_monitor_work(struct kfree_rcu_cpu *krcp) if (delayed_work_pending(&krcp->monitor_work)) { delay_left = krcp->monitor_work.timer.expires - jiffies; if (delay < delay_left) - mod_delayed_work(system_unbound_wq, &krcp->monitor_work, delay); + mod_delayed_work(rcu_reclaim_wq, &krcp->monitor_work, delay); return; } - queue_delayed_work(system_unbound_wq, &krcp->monitor_work, delay); + queue_delayed_work(rcu_reclaim_wq, &krcp->monitor_work, delay); } static void @@ -3620,7 +3622,7 @@ kvfree_rcu_queue_batch(struct kfree_rcu_cpu *krcp) // "free channels", the batch can handle. Break // the loop since it is done with this CPU thus // queuing an RCU work is _always_ success here. - queued = queue_rcu_work(system_unbound_wq, &krwp->rcu_work); + queued = queue_rcu_work(rcu_reclaim_wq, &krwp->rcu_work); WARN_ON_ONCE(!queued); break; } @@ -3708,7 +3710,7 @@ run_page_cache_worker(struct kfree_rcu_cpu *krcp) if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING && !atomic_xchg(&krcp->work_in_progress, 1)) { if (atomic_read(&krcp->backoff_page_cache_fill)) { - queue_delayed_work(system_unbound_wq, + queue_delayed_work(rcu_reclaim_wq, &krcp->page_cache_work, msecs_to_jiffies(rcu_delay_page_cache_fill_msec)); } else { @@ -5662,6 +5664,10 @@ static void __init kfree_rcu_batch_init(void) int i, j; struct shrinker *kfree_rcu_shrinker; + rcu_reclaim_wq = alloc_workqueue("kvfree_rcu_reclaim", + WQ_UNBOUND | WQ_MEM_RECLAIM, 0); + WARN_ON(!rcu_reclaim_wq); + /* Clamp it to [0:100] seconds interval. */ if (rcu_delay_page_cache_fill_msec < 0 || rcu_delay_page_cache_fill_msec > 100 * MSEC_PER_SEC) { diff --git a/kernel/reboot.c b/kernel/reboot.c index f05dbde2c93fe..d6ee090eda943 100644 --- a/kernel/reboot.c +++ b/kernel/reboot.c @@ -697,6 +697,7 @@ void kernel_power_off(void) migrate_to_reboot_cpu(); syscore_shutdown(); pr_emerg("Power down\n"); + pr_flush(1000, true); kmsg_dump(KMSG_DUMP_SHUTDOWN); machine_power_off(); } diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 4c446c8d30907..8ba19aa660f66 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1057,9 +1057,10 @@ void wake_up_q(struct wake_q_head *head) struct task_struct *task; task = container_of(node, struct task_struct, wake_q); - /* Task can safely be re-inserted now: */ node = node->next; - task->wake_q.next = NULL; + /* pairs with cmpxchg_relaxed() in __wake_q_add() */ + WRITE_ONCE(task->wake_q.next, NULL); + /* Task can safely be re-inserted now. */ /* * wake_up_process() executes a full barrier, which pairs with @@ -7279,7 +7280,7 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) int __sched __cond_resched(void) { - if (should_resched(0)) { + if (should_resched(0) && !irqs_disabled()) { preempt_schedule_common(); return 1; } @@ -9047,7 +9048,7 @@ void sched_release_group(struct task_group *tg) spin_unlock_irqrestore(&task_group_lock, flags); } -static struct task_group *sched_get_task_group(struct task_struct *tsk) +static void sched_change_group(struct task_struct *tsk) { struct task_group *tg; @@ -9059,13 +9060,7 @@ static struct task_group *sched_get_task_group(struct task_struct *tsk) tg = container_of(task_css_check(tsk, cpu_cgrp_id, true), struct task_group, css); tg = autogroup_task_group(tsk, tg); - - return tg; -} - -static void sched_change_group(struct task_struct *tsk, struct task_group *group) -{ - tsk->sched_task_group = group; + tsk->sched_task_group = tg; #ifdef CONFIG_FAIR_GROUP_SCHED if (tsk->sched_class->task_change_group) @@ -9086,20 +9081,11 @@ void sched_move_task(struct task_struct *tsk, bool for_autogroup) { int queued, running, queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; - struct task_group *group; struct rq *rq; CLASS(task_rq_lock, rq_guard)(tsk); rq = rq_guard.rq; - /* - * Esp. with SCHED_AUTOGROUP enabled it is possible to get superfluous - * group changes. - */ - group = sched_get_task_group(tsk); - if (group == tsk->sched_task_group) - return; - update_rq_clock(rq); running = task_current(rq, tsk); @@ -9110,7 +9096,7 @@ void sched_move_task(struct task_struct *tsk, bool for_autogroup) if (running) put_prev_task(rq, tsk); - sched_change_group(tsk, group); + sched_change_group(tsk); if (!for_autogroup) scx_cgroup_move_task(tsk); diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index e51d5ce730be1..e5ced97d9681c 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -81,9 +81,20 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time) if (!cpufreq_this_cpu_can_update(sg_policy->policy)) return false; - if (unlikely(sg_policy->limits_changed)) { - sg_policy->limits_changed = false; - sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS); + if (unlikely(READ_ONCE(sg_policy->limits_changed))) { + WRITE_ONCE(sg_policy->limits_changed, false); + sg_policy->need_freq_update = true; + + /* + * The above limits_changed update must occur before the reads + * of policy limits in cpufreq_driver_resolve_freq() or a policy + * limits update might be missed, so use a memory barrier to + * ensure it. + * + * This pairs with the write memory barrier in sugov_limits(). + */ + smp_mb(); + return true; } @@ -95,10 +106,22 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time) static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time, unsigned int next_freq) { - if (sg_policy->need_freq_update) + if (sg_policy->need_freq_update) { sg_policy->need_freq_update = false; - else if (sg_policy->next_freq == next_freq) + /* + * The policy limits have changed, but if the return value of + * cpufreq_driver_resolve_freq() after applying the new limits + * is still equal to the previously selected frequency, the + * driver callback need not be invoked unless the driver + * specifically wants that to happen on every update of the + * policy limits. + */ + if (sg_policy->next_freq == next_freq && + !cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS)) + return false; + } else if (sg_policy->next_freq == next_freq) { return false; + } sg_policy->next_freq = next_freq; sg_policy->last_freq_update_time = time; @@ -365,7 +388,7 @@ static inline bool sugov_hold_freq(struct sugov_cpu *sg_cpu) { return false; } static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu) { if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_min) - sg_cpu->sg_policy->limits_changed = true; + WRITE_ONCE(sg_cpu->sg_policy->limits_changed, true); } static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu, @@ -888,7 +911,16 @@ static void sugov_limits(struct cpufreq_policy *policy) mutex_unlock(&sg_policy->work_lock); } - sg_policy->limits_changed = true; + /* + * The limits_changed update below must take place before the updates + * of policy limits in cpufreq_set_policy() or a policy limits update + * might be missed, so use a memory barrier to ensure it. + * + * This pairs with the memory barrier in sugov_should_update_freq(). + */ + smp_wmb(); + + WRITE_ONCE(sg_policy->limits_changed, true); } struct cpufreq_governor schedutil_gov = { diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index a17c23b53049c..5e7ae404c8d2a 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -3179,7 +3179,7 @@ int sched_dl_global_validate(void) * value smaller than the currently allocated bandwidth in * any of the root_domains. */ - for_each_possible_cpu(cpu) { + for_each_online_cpu(cpu) { rcu_read_lock_sched(); if (dl_bw_visited(cpu, gen)) diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index bded84e7d5e71..2437b05297771 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -1265,6 +1265,8 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, if (task_has_dl_policy(p)) { P(dl.runtime); P(dl.deadline); + } else if (fair_policy(p->policy)) { + P(se.slice); } #ifdef CONFIG_SCHED_CLASS_EXT __PS("ext.enabled", task_on_scx(p)); diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 689f7e8f69f54..fcf968490308b 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -2300,12 +2300,35 @@ static void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags, * * - The BPF scheduler is bypassed while the rq is offline and we can always say * no to the BPF scheduler initiated migrations while offline. + * + * The caller must ensure that @p and @rq are on different CPUs. */ static bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq, bool trigger_error) { int cpu = cpu_of(rq); + SCHED_WARN_ON(task_cpu(p) == cpu); + + /* + * If @p has migration disabled, @p->cpus_ptr is updated to contain only + * the pinned CPU in migrate_disable_switch() while @p is being switched + * out. However, put_prev_task_scx() is called before @p->cpus_ptr is + * updated and thus another CPU may see @p on a DSQ inbetween leading to + * @p passing the below task_allowed_on_cpu() check while migration is + * disabled. + * + * Test the migration disabled state first as the race window is narrow + * and the BPF scheduler failing to check migration disabled state can + * easily be masked if task_allowed_on_cpu() is done first. + */ + if (unlikely(is_migration_disabled(p))) { + if (trigger_error) + scx_ops_error("SCX_DSQ_LOCAL[_ON] cannot move migration disabled %s[%d] from CPU %d to %d", + p->comm, p->pid, task_cpu(p), cpu); + return false; + } + /* * We don't require the BPF scheduler to avoid dispatching to offline * CPUs mostly for convenience but also because CPUs can go offline @@ -2314,14 +2337,11 @@ static bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq, */ if (!task_allowed_on_cpu(p, cpu)) { if (trigger_error) - scx_ops_error("SCX_DSQ_LOCAL[_ON] verdict target cpu %d not allowed for %s[%d]", - cpu_of(rq), p->comm, p->pid); + scx_ops_error("SCX_DSQ_LOCAL[_ON] target CPU %d not allowed for %s[%d]", + cpu, p->comm, p->pid); return false; } - if (unlikely(is_migration_disabled(p))) - return false; - if (!scx_rq_online(rq)) return false; @@ -2397,6 +2417,74 @@ static inline bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *r static inline bool consume_remote_task(struct rq *this_rq, struct task_struct *p, struct scx_dispatch_q *dsq, struct rq *task_rq) { return false; } #endif /* CONFIG_SMP */ +/** + * move_task_between_dsqs() - Move a task from one DSQ to another + * @p: target task + * @enq_flags: %SCX_ENQ_* + * @src_dsq: DSQ @p is currently on, must not be a local DSQ + * @dst_dsq: DSQ @p is being moved to, can be any DSQ + * + * Must be called with @p's task_rq and @src_dsq locked. If @dst_dsq is a local + * DSQ and @p is on a different CPU, @p will be migrated and thus its task_rq + * will change. As @p's task_rq is locked, this function doesn't need to use the + * holding_cpu mechanism. + * + * On return, @src_dsq is unlocked and only @p's new task_rq, which is the + * return value, is locked. + */ +static struct rq *move_task_between_dsqs(struct task_struct *p, u64 enq_flags, + struct scx_dispatch_q *src_dsq, + struct scx_dispatch_q *dst_dsq) +{ + struct rq *src_rq = task_rq(p), *dst_rq; + + BUG_ON(src_dsq->id == SCX_DSQ_LOCAL); + lockdep_assert_held(&src_dsq->lock); + lockdep_assert_rq_held(src_rq); + + if (dst_dsq->id == SCX_DSQ_LOCAL) { + dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq); + if (src_rq != dst_rq && + unlikely(!task_can_run_on_remote_rq(p, dst_rq, true))) { + dst_dsq = find_global_dsq(p); + dst_rq = src_rq; + } + } else { + /* no need to migrate if destination is a non-local DSQ */ + dst_rq = src_rq; + } + + /* + * Move @p into $dst_dsq. If $dst_dsq is the local DSQ of a different + * CPU, @p will be migrated. + */ + if (dst_dsq->id == SCX_DSQ_LOCAL) { + /* @p is going from a non-local DSQ to a local DSQ */ + if (src_rq == dst_rq) { + task_unlink_from_dsq(p, src_dsq); + move_local_task_to_local_dsq(p, enq_flags, + src_dsq, dst_rq); + raw_spin_unlock(&src_dsq->lock); + } else { + raw_spin_unlock(&src_dsq->lock); + move_remote_task_to_local_dsq(p, enq_flags, + src_rq, dst_rq); + } + } else { + /* + * @p is going from a non-local DSQ to a non-local DSQ. As + * $src_dsq is already locked, do an abbreviated dequeue. + */ + task_unlink_from_dsq(p, src_dsq); + p->scx.dsq = NULL; + raw_spin_unlock(&src_dsq->lock); + + dispatch_enqueue(dst_dsq, p, enq_flags); + } + + return dst_rq; +} + static bool consume_dispatch_q(struct rq *rq, struct scx_dispatch_q *dsq) { struct task_struct *p; @@ -2474,7 +2562,8 @@ static void dispatch_to_local_dsq(struct rq *rq, struct scx_dispatch_q *dst_dsq, } #ifdef CONFIG_SMP - if (unlikely(!task_can_run_on_remote_rq(p, dst_rq, true))) { + if (src_rq != dst_rq && + unlikely(!task_can_run_on_remote_rq(p, dst_rq, true))) { dispatch_enqueue(find_global_dsq(p), p, enq_flags | SCX_ENQ_CLEAR_OPSS); return; @@ -2958,7 +3047,6 @@ static struct task_struct *pick_task_scx(struct rq *rq) { struct task_struct *prev = rq->curr; struct task_struct *p; - bool prev_on_scx = prev->sched_class == &ext_sched_class; bool keep_prev = rq->scx.flags & SCX_RQ_BAL_KEEP; bool kick_idle = false; @@ -2978,14 +3066,18 @@ static struct task_struct *pick_task_scx(struct rq *rq) * if pick_task_scx() is called without preceding balance_scx(). */ if (unlikely(rq->scx.flags & SCX_RQ_BAL_PENDING)) { - if (prev_on_scx) { + if (prev->scx.flags & SCX_TASK_QUEUED) { keep_prev = true; } else { keep_prev = false; kick_idle = true; } - } else if (unlikely(keep_prev && !prev_on_scx)) { - /* only allowed during transitions */ + } else if (unlikely(keep_prev && + prev->sched_class != &ext_sched_class)) { + /* + * Can happen while enabling as SCX_RQ_BAL_PENDING assertion is + * conditional on scx_enabled() and may have been skipped. + */ WARN_ON_ONCE(scx_ops_enable_state() == SCX_OPS_ENABLED); keep_prev = false; } @@ -4068,8 +4160,8 @@ static struct scx_dispatch_q *create_dsq(u64 dsq_id, int node) init_dsq(dsq, dsq_id); - ret = rhashtable_insert_fast(&dsq_hash, &dsq->hash_node, - dsq_hash_params); + ret = rhashtable_lookup_insert_fast(&dsq_hash, &dsq->hash_node, + dsq_hash_params); if (ret) { kfree(dsq); return ERR_PTR(ret); @@ -5960,6 +6052,9 @@ __bpf_kfunc_start_defs(); __bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, bool *is_idle) { + if (!ops_cpu_valid(prev_cpu, NULL)) + goto prev_cpu; + if (!static_branch_likely(&scx_builtin_idle_enabled)) { scx_ops_error("built-in idle tracking is disabled"); goto prev_cpu; @@ -6134,7 +6229,7 @@ static bool scx_dispatch_from_dsq(struct bpf_iter_scx_dsq_kern *kit, u64 enq_flags) { struct scx_dispatch_q *src_dsq = kit->dsq, *dst_dsq; - struct rq *this_rq, *src_rq, *dst_rq, *locked_rq; + struct rq *this_rq, *src_rq, *locked_rq; bool dispatched = false; bool in_balance; unsigned long flags; @@ -6180,51 +6275,18 @@ static bool scx_dispatch_from_dsq(struct bpf_iter_scx_dsq_kern *kit, /* @p is still on $src_dsq and stable, determine the destination */ dst_dsq = find_dsq_for_dispatch(this_rq, dsq_id, p); - if (dst_dsq->id == SCX_DSQ_LOCAL) { - dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq); - if (!task_can_run_on_remote_rq(p, dst_rq, true)) { - dst_dsq = find_global_dsq(p); - dst_rq = src_rq; - } - } else { - /* no need to migrate if destination is a non-local DSQ */ - dst_rq = src_rq; - } - /* - * Move @p into $dst_dsq. If $dst_dsq is the local DSQ of a different - * CPU, @p will be migrated. + * Apply vtime and slice updates before moving so that the new time is + * visible before inserting into $dst_dsq. @p is still on $src_dsq but + * this is safe as we're locking it. */ - if (dst_dsq->id == SCX_DSQ_LOCAL) { - /* @p is going from a non-local DSQ to a local DSQ */ - if (src_rq == dst_rq) { - task_unlink_from_dsq(p, src_dsq); - move_local_task_to_local_dsq(p, enq_flags, - src_dsq, dst_rq); - raw_spin_unlock(&src_dsq->lock); - } else { - raw_spin_unlock(&src_dsq->lock); - move_remote_task_to_local_dsq(p, enq_flags, - src_rq, dst_rq); - locked_rq = dst_rq; - } - } else { - /* - * @p is going from a non-local DSQ to a non-local DSQ. As - * $src_dsq is already locked, do an abbreviated dequeue. - */ - task_unlink_from_dsq(p, src_dsq); - p->scx.dsq = NULL; - raw_spin_unlock(&src_dsq->lock); - - if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_VTIME) - p->scx.dsq_vtime = kit->vtime; - dispatch_enqueue(dst_dsq, p, enq_flags); - } - + if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_VTIME) + p->scx.dsq_vtime = kit->vtime; if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_SLICE) p->scx.slice = kit->slice; + /* execute move */ + locked_rq = move_task_between_dsqs(p, enq_flags, src_dsq, dst_dsq); dispatched = true; out: if (in_balance) { diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 6f1d3426691db..fee716c1bb775 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -885,6 +885,26 @@ struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq) return __node_2_se(left); } +/* + * HACK, stash a copy of deadline at the point of pick in vlag, + * which isn't used until dequeue. + */ +static inline void set_protect_slice(struct sched_entity *se) +{ + se->vlag = se->deadline; +} + +static inline bool protect_slice(struct sched_entity *se) +{ + return se->vlag == se->deadline; +} + +static inline void cancel_protect_slice(struct sched_entity *se) +{ + if (protect_slice(se)) + se->vlag = se->deadline + 1; +} + /* * Earliest Eligible Virtual Deadline First * @@ -921,11 +941,7 @@ static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq) if (curr && (!curr->on_rq || !entity_eligible(cfs_rq, curr))) curr = NULL; - /* - * Once selected, run a task until it either becomes non-eligible or - * until it gets a new slice. See the HACK in set_next_entity(). - */ - if (sched_feat(RUN_TO_PARITY) && curr && curr->vlag == curr->deadline) + if (sched_feat(RUN_TO_PARITY) && curr && protect_slice(curr)) return curr; /* Pick the leftmost entity if it's eligible */ @@ -4155,15 +4171,17 @@ static inline bool child_cfs_rq_on_list(struct cfs_rq *cfs_rq) { struct cfs_rq *prev_cfs_rq; struct list_head *prev; + struct rq *rq = rq_of(cfs_rq); if (cfs_rq->on_list) { prev = cfs_rq->leaf_cfs_rq_list.prev; } else { - struct rq *rq = rq_of(cfs_rq); - prev = rq->tmp_alone_branch; } + if (prev == &rq->leaf_cfs_rq_list) + return false; + prev_cfs_rq = container_of(prev, struct cfs_rq, leaf_cfs_rq_list); return (prev_cfs_rq->tg->parent == cfs_rq->tg); @@ -5624,11 +5642,8 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) update_stats_wait_end_fair(cfs_rq, se); __dequeue_entity(cfs_rq, se); update_load_avg(cfs_rq, se, UPDATE_TG); - /* - * HACK, stash a copy of deadline at the point of pick in vlag, - * which isn't used until dequeue. - */ - se->vlag = se->deadline; + + set_protect_slice(se); } update_stats_curr_start(cfs_rq, se); @@ -7088,6 +7103,8 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) update_cfs_group(se); se->slice = slice; + if (se != cfs_rq->curr) + min_vruntime_cb_propagate(&se->run_node, NULL); slice = cfs_rq_min_slice(cfs_rq); cfs_rq->h_nr_running++; @@ -7217,6 +7234,8 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags) update_cfs_group(se); se->slice = slice; + if (se != cfs_rq->curr) + min_vruntime_cb_propagate(&se->run_node, NULL); slice = cfs_rq_min_slice(cfs_rq); cfs_rq->h_nr_running -= h_nr_running; @@ -8880,8 +8899,15 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int * Preempt an idle entity in favor of a non-idle entity (and don't preempt * in the inverse case). */ - if (cse_is_idle && !pse_is_idle) + if (cse_is_idle && !pse_is_idle) { + /* + * When non-idle entity preempt an idle entity, + * don't give idle entity slice protection. + */ + cancel_protect_slice(se); goto preempt; + } + if (cse_is_idle != pse_is_idle) return; @@ -8900,8 +8926,8 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int * Note that even if @p does not turn out to be the most eligible * task at this moment, current's slice protection will be lost. */ - if (do_preempt_short(cfs_rq, pse, se) && se->vlag == se->deadline) - se->vlag = se->deadline + 1; + if (do_preempt_short(cfs_rq, pse, se)) + cancel_protect_slice(se); /* * If @p has become the most eligible task, force preemption. diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 15922c463e34b..b1bc3ce6a0cef 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -156,11 +156,6 @@ static struct hrtimer_cpu_base migration_cpu_base = { #define migration_base migration_cpu_base.clock_base[0] -static inline bool is_migration_base(struct hrtimer_clock_base *base) -{ - return base == &migration_base; -} - /* * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock * means that all timers which are tied to this base via timer->base are @@ -312,11 +307,6 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base, #else /* CONFIG_SMP */ -static inline bool is_migration_base(struct hrtimer_clock_base *base) -{ - return false; -} - static inline struct hrtimer_clock_base * lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) __acquires(&timer->base->cpu_base->lock) @@ -1441,6 +1431,18 @@ static void hrtimer_sync_wait_running(struct hrtimer_cpu_base *cpu_base, } } +#ifdef CONFIG_SMP +static __always_inline bool is_migration_base(struct hrtimer_clock_base *base) +{ + return base == &migration_base; +} +#else +static __always_inline bool is_migration_base(struct hrtimer_clock_base *base) +{ + return false; +} +#endif + /* * This function is called on PREEMPT_RT kernels when the fast path * deletion of a timer failed because the timer callback function was diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 449efaaa387a6..55f279ddfd63d 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -833,7 +833,7 @@ static int bpf_send_signal_common(u32 sig, enum pid_type type) if (unlikely(is_global_init(current))) return -EPERM; - if (!preemptible()) { + if (preempt_count() != 0 || irqs_disabled()) { /* Do an early check on signal validity. Otherwise, * the error is lost in deferred irq_work. */ diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index cd9dbfb303833..e67d67f7b9065 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -541,6 +541,7 @@ static int function_stat_show(struct seq_file *m, void *v) static struct trace_seq s; unsigned long long avg; unsigned long long stddev; + unsigned long long stddev_denom; #endif mutex_lock(&ftrace_profile_lock); @@ -562,23 +563,19 @@ static int function_stat_show(struct seq_file *m, void *v) #ifdef CONFIG_FUNCTION_GRAPH_TRACER seq_puts(m, " "); - /* Sample standard deviation (s^2) */ - if (rec->counter <= 1) - stddev = 0; - else { - /* - * Apply Welford's method: - * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2) - */ + /* + * Variance formula: + * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2) + * Maybe Welford's method is better here? + * Divide only by 1000 for ns^2 -> us^2 conversion. + * trace_print_graph_duration will divide by 1000 again. + */ + stddev = 0; + stddev_denom = rec->counter * (rec->counter - 1) * 1000; + if (stddev_denom) { stddev = rec->counter * rec->time_squared - rec->time * rec->time; - - /* - * Divide only 1000 for ns^2 -> us^2 conversion. - * trace_print_graph_duration will divide 1000 again. - */ - stddev = div64_ul(stddev, - rec->counter * (rec->counter - 1) * 1000); + stddev = div64_ul(stddev, stddev_denom); } trace_seq_init(&s); @@ -3219,15 +3216,22 @@ static struct ftrace_hash *copy_hash(struct ftrace_hash *src) * The filter_hash updates uses just the append_hash() function * and the notrace_hash does not. */ -static int append_hash(struct ftrace_hash **hash, struct ftrace_hash *new_hash) +static int append_hash(struct ftrace_hash **hash, struct ftrace_hash *new_hash, + int size_bits) { struct ftrace_func_entry *entry; int size; int i; - /* An empty hash does everything */ - if (ftrace_hash_empty(*hash)) - return 0; + if (*hash) { + /* An empty hash does everything */ + if (ftrace_hash_empty(*hash)) + return 0; + } else { + *hash = alloc_ftrace_hash(size_bits); + if (!*hash) + return -ENOMEM; + } /* If new_hash has everything make hash have everything */ if (ftrace_hash_empty(new_hash)) { @@ -3291,16 +3295,18 @@ static int intersect_hash(struct ftrace_hash **hash, struct ftrace_hash *new_has /* Return a new hash that has a union of all @ops->filter_hash entries */ static struct ftrace_hash *append_hashes(struct ftrace_ops *ops) { - struct ftrace_hash *new_hash; + struct ftrace_hash *new_hash = NULL; struct ftrace_ops *subops; + int size_bits; int ret; - new_hash = alloc_ftrace_hash(ops->func_hash->filter_hash->size_bits); - if (!new_hash) - return NULL; + if (ops->func_hash->filter_hash) + size_bits = ops->func_hash->filter_hash->size_bits; + else + size_bits = FTRACE_HASH_DEFAULT_BITS; list_for_each_entry(subops, &ops->subop_list, list) { - ret = append_hash(&new_hash, subops->func_hash->filter_hash); + ret = append_hash(&new_hash, subops->func_hash->filter_hash, size_bits); if (ret < 0) { free_ftrace_hash(new_hash); return NULL; @@ -3309,7 +3315,8 @@ static struct ftrace_hash *append_hashes(struct ftrace_ops *ops) if (ftrace_hash_empty(new_hash)) break; } - return new_hash; + /* Can't return NULL as that means this failed */ + return new_hash ? : EMPTY_HASH; } /* Make @ops trace evenything except what all its subops do not trace */ @@ -3504,7 +3511,8 @@ int ftrace_startup_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int filter_hash = alloc_and_copy_ftrace_hash(size_bits, ops->func_hash->filter_hash); if (!filter_hash) return -ENOMEM; - ret = append_hash(&filter_hash, subops->func_hash->filter_hash); + ret = append_hash(&filter_hash, subops->func_hash->filter_hash, + size_bits); if (ret < 0) { free_ftrace_hash(filter_hash); return ret; @@ -3515,16 +3523,16 @@ int ftrace_startup_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int ftrace_hash_empty(subops->func_hash->notrace_hash)) { notrace_hash = EMPTY_HASH; } else { - size_bits = max(ops->func_hash->filter_hash->size_bits, - subops->func_hash->filter_hash->size_bits); + size_bits = max(ops->func_hash->notrace_hash->size_bits, + subops->func_hash->notrace_hash->size_bits); notrace_hash = alloc_ftrace_hash(size_bits); if (!notrace_hash) { free_ftrace_hash(filter_hash); return -ENOMEM; } - ret = intersect_hash(¬race_hash, ops->func_hash->filter_hash, - subops->func_hash->filter_hash); + ret = intersect_hash(¬race_hash, ops->func_hash->notrace_hash, + subops->func_hash->notrace_hash); if (ret < 0) { free_ftrace_hash(filter_hash); free_ftrace_hash(notrace_hash); @@ -5747,6 +5755,9 @@ __ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) return -ENOENT; free_hash_entry(hash, entry); return 0; + } else if (__ftrace_lookup_ip(hash, ip) != NULL) { + /* Already exists */ + return 0; } entry = add_hash_entry(hash, ip); @@ -5933,9 +5944,10 @@ int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr) /* Make a copy hash to place the new and the old entries in */ size = hash->count + direct_functions->count; - if (size > 32) - size = 32; - new_hash = alloc_ftrace_hash(fls(size)); + size = fls(size); + if (size > FTRACE_HASH_MAX_BITS) + size = FTRACE_HASH_MAX_BITS; + new_hash = alloc_ftrace_hash(size); if (!new_hash) goto out_unlock; @@ -6837,6 +6849,7 @@ ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer) } } } + cond_resched(); } while_for_each_ftrace_rec(); out: mutex_unlock(&ftrace_lock); diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index ea8ad5480e286..e1ffbed8cc5eb 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -5994,7 +5994,7 @@ static void rb_update_meta_page(struct ring_buffer_per_cpu *cpu_buffer) meta->read = cpu_buffer->read; /* Some archs do not have data cache coherency between kernel and user-space */ - flush_dcache_folio(virt_to_folio(cpu_buffer->meta_page)); + flush_kernel_vmap_range(cpu_buffer->meta_page, PAGE_SIZE); } static void @@ -7309,7 +7309,8 @@ int ring_buffer_map_get_reader(struct trace_buffer *buffer, int cpu) out: /* Some archs do not have data cache coherency between kernel and user-space */ - flush_dcache_folio(virt_to_folio(cpu_buffer->reader_page->page)); + flush_kernel_vmap_range(cpu_buffer->reader_page->page, + buffer->subbuf_size + BUF_PAGE_HDR_SIZE); rb_update_meta_page(cpu_buffer); @@ -7442,9 +7443,9 @@ static __init int rb_write_something(struct rb_test_data *data, bool nested) /* Ignore dropped events before test starts. */ if (started) { if (nested) - data->bytes_dropped += len; - else data->bytes_dropped_nested += len; + else + data->bytes_dropped += len; } return len; } diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 2229d6a5b117a..42c9a1bd8a0a4 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -535,19 +536,16 @@ LIST_HEAD(ftrace_trace_arrays); int trace_array_get(struct trace_array *this_tr) { struct trace_array *tr; - int ret = -ENODEV; - mutex_lock(&trace_types_lock); + guard(mutex)(&trace_types_lock); list_for_each_entry(tr, &ftrace_trace_arrays, list) { if (tr == this_tr) { tr->ref++; - ret = 0; - break; + return 0; } } - mutex_unlock(&trace_types_lock); - return ret; + return -ENODEV; } static void __trace_array_put(struct trace_array *this_tr) @@ -1456,22 +1454,20 @@ EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update) { - struct cond_snapshot *cond_snapshot; - int ret = 0; + struct cond_snapshot *cond_snapshot __free(kfree) = + kzalloc(sizeof(*cond_snapshot), GFP_KERNEL); + int ret; - cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL); if (!cond_snapshot) return -ENOMEM; cond_snapshot->cond_data = cond_data; cond_snapshot->update = update; - mutex_lock(&trace_types_lock); + guard(mutex)(&trace_types_lock); - if (tr->current_trace->use_max_tr) { - ret = -EBUSY; - goto fail_unlock; - } + if (tr->current_trace->use_max_tr) + return -EBUSY; /* * The cond_snapshot can only change to NULL without the @@ -1481,29 +1477,20 @@ int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, * do safely with only holding the trace_types_lock and not * having to take the max_lock. */ - if (tr->cond_snapshot) { - ret = -EBUSY; - goto fail_unlock; - } + if (tr->cond_snapshot) + return -EBUSY; ret = tracing_arm_snapshot_locked(tr); if (ret) - goto fail_unlock; + return ret; local_irq_disable(); arch_spin_lock(&tr->max_lock); - tr->cond_snapshot = cond_snapshot; + tr->cond_snapshot = no_free_ptr(cond_snapshot); arch_spin_unlock(&tr->max_lock); local_irq_enable(); - mutex_unlock(&trace_types_lock); - - return ret; - - fail_unlock: - mutex_unlock(&trace_types_lock); - kfree(cond_snapshot); - return ret; + return 0; } EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable); @@ -2216,10 +2203,10 @@ static __init int init_trace_selftests(void) selftests_can_run = true; - mutex_lock(&trace_types_lock); + guard(mutex)(&trace_types_lock); if (list_empty(&postponed_selftests)) - goto out; + return 0; pr_info("Running postponed tracer tests:\n"); @@ -2248,9 +2235,6 @@ static __init int init_trace_selftests(void) } tracing_selftest_running = false; - out: - mutex_unlock(&trace_types_lock); - return 0; } core_initcall(init_trace_selftests); @@ -2820,7 +2804,7 @@ int tracepoint_printk_sysctl(const struct ctl_table *table, int write, int save_tracepoint_printk; int ret; - mutex_lock(&tracepoint_printk_mutex); + guard(mutex)(&tracepoint_printk_mutex); save_tracepoint_printk = tracepoint_printk; ret = proc_dointvec(table, write, buffer, lenp, ppos); @@ -2833,16 +2817,13 @@ int tracepoint_printk_sysctl(const struct ctl_table *table, int write, tracepoint_printk = 0; if (save_tracepoint_printk == tracepoint_printk) - goto out; + return ret; if (tracepoint_printk) static_key_enable(&tracepoint_printk_key.key); else static_key_disable(&tracepoint_printk_key.key); - out: - mutex_unlock(&tracepoint_printk_mutex); - return ret; } @@ -5152,7 +5133,8 @@ static int tracing_trace_options_show(struct seq_file *m, void *v) u32 tracer_flags; int i; - mutex_lock(&trace_types_lock); + guard(mutex)(&trace_types_lock); + tracer_flags = tr->current_trace->flags->val; trace_opts = tr->current_trace->flags->opts; @@ -5169,7 +5151,6 @@ static int tracing_trace_options_show(struct seq_file *m, void *v) else seq_printf(m, "no%s\n", trace_opts[i].name); } - mutex_unlock(&trace_types_lock); return 0; } @@ -5834,7 +5815,7 @@ trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start, return; } - mutex_lock(&trace_eval_mutex); + guard(mutex)(&trace_eval_mutex); if (!trace_eval_maps) trace_eval_maps = map_array; @@ -5858,8 +5839,6 @@ trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start, map_array++; } memset(map_array, 0, sizeof(*map_array)); - - mutex_unlock(&trace_eval_mutex); } static void trace_create_eval_file(struct dentry *d_tracer) @@ -6021,26 +6000,15 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr, ssize_t tracing_resize_ring_buffer(struct trace_array *tr, unsigned long size, int cpu_id) { - int ret; - - mutex_lock(&trace_types_lock); + guard(mutex)(&trace_types_lock); if (cpu_id != RING_BUFFER_ALL_CPUS) { /* make sure, this cpu is enabled in the mask */ - if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) { - ret = -EINVAL; - goto out; - } + if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) + return -EINVAL; } - ret = __tracing_resize_ring_buffer(tr, size, cpu_id); - if (ret < 0) - ret = -ENOMEM; - -out: - mutex_unlock(&trace_types_lock); - - return ret; + return __tracing_resize_ring_buffer(tr, size, cpu_id); } static void update_last_data(struct trace_array *tr) @@ -6131,9 +6099,9 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf) #ifdef CONFIG_TRACER_MAX_TRACE bool had_max_tr; #endif - int ret = 0; + int ret; - mutex_lock(&trace_types_lock); + guard(mutex)(&trace_types_lock); update_last_data(tr); @@ -6141,7 +6109,7 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf) ret = __tracing_resize_ring_buffer(tr, trace_buf_size, RING_BUFFER_ALL_CPUS); if (ret < 0) - goto out; + return ret; ret = 0; } @@ -6149,43 +6117,37 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf) if (strcmp(t->name, buf) == 0) break; } - if (!t) { - ret = -EINVAL; - goto out; - } + if (!t) + return -EINVAL; + if (t == tr->current_trace) - goto out; + return 0; #ifdef CONFIG_TRACER_SNAPSHOT if (t->use_max_tr) { local_irq_disable(); arch_spin_lock(&tr->max_lock); - if (tr->cond_snapshot) - ret = -EBUSY; + ret = tr->cond_snapshot ? -EBUSY : 0; arch_spin_unlock(&tr->max_lock); local_irq_enable(); if (ret) - goto out; + return ret; } #endif /* Some tracers won't work on kernel command line */ if (system_state < SYSTEM_RUNNING && t->noboot) { pr_warn("Tracer '%s' is not allowed on command line, ignored\n", t->name); - goto out; + return 0; } /* Some tracers are only allowed for the top level buffer */ - if (!trace_ok_for_array(t, tr)) { - ret = -EINVAL; - goto out; - } + if (!trace_ok_for_array(t, tr)) + return -EINVAL; /* If trace pipe files are being read, we can't change the tracer */ - if (tr->trace_ref) { - ret = -EBUSY; - goto out; - } + if (tr->trace_ref) + return -EBUSY; trace_branch_disable(); @@ -6216,7 +6178,7 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf) if (!had_max_tr && t->use_max_tr) { ret = tracing_arm_snapshot_locked(tr); if (ret) - goto out; + return ret; } #else tr->current_trace = &nop_trace; @@ -6229,17 +6191,15 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf) if (t->use_max_tr) tracing_disarm_snapshot(tr); #endif - goto out; + return ret; } } tr->current_trace = t; tr->current_trace->enabled++; trace_branch_enable(tr); - out: - mutex_unlock(&trace_types_lock); - return ret; + return 0; } static ssize_t @@ -6317,22 +6277,18 @@ tracing_thresh_write(struct file *filp, const char __user *ubuf, struct trace_array *tr = filp->private_data; int ret; - mutex_lock(&trace_types_lock); + guard(mutex)(&trace_types_lock); ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos); if (ret < 0) - goto out; + return ret; if (tr->current_trace->update_thresh) { ret = tr->current_trace->update_thresh(tr); if (ret < 0) - goto out; + return ret; } - ret = cnt; -out: - mutex_unlock(&trace_types_lock); - - return ret; + return cnt; } #ifdef CONFIG_TRACER_MAX_TRACE @@ -6551,31 +6507,29 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, * This is just a matter of traces coherency, the ring buffer itself * is protected. */ - mutex_lock(&iter->mutex); + guard(mutex)(&iter->mutex); /* return any leftover data */ sret = trace_seq_to_user(&iter->seq, ubuf, cnt); if (sret != -EBUSY) - goto out; + return sret; trace_seq_init(&iter->seq); if (iter->trace->read) { sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); if (sret) - goto out; + return sret; } waitagain: sret = tracing_wait_pipe(filp); if (sret <= 0) - goto out; + return sret; /* stop when tracing is finished */ - if (trace_empty(iter)) { - sret = 0; - goto out; - } + if (trace_empty(iter)) + return 0; if (cnt >= TRACE_SEQ_BUFFER_SIZE) cnt = TRACE_SEQ_BUFFER_SIZE - 1; @@ -6639,9 +6593,6 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, if (sret == -EBUSY) goto waitagain; -out: - mutex_unlock(&iter->mutex); - return sret; } @@ -7316,25 +7267,19 @@ u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_eve */ int tracing_set_filter_buffering(struct trace_array *tr, bool set) { - int ret = 0; - - mutex_lock(&trace_types_lock); + guard(mutex)(&trace_types_lock); if (set && tr->no_filter_buffering_ref++) - goto out; + return 0; if (!set) { - if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) { - ret = -EINVAL; - goto out; - } + if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) + return -EINVAL; --tr->no_filter_buffering_ref; } - out: - mutex_unlock(&trace_types_lock); - return ret; + return 0; } struct ftrace_buffer_info { @@ -7410,12 +7355,10 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, if (ret) return ret; - mutex_lock(&trace_types_lock); + guard(mutex)(&trace_types_lock); - if (tr->current_trace->use_max_tr) { - ret = -EBUSY; - goto out; - } + if (tr->current_trace->use_max_tr) + return -EBUSY; local_irq_disable(); arch_spin_lock(&tr->max_lock); @@ -7424,24 +7367,20 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, arch_spin_unlock(&tr->max_lock); local_irq_enable(); if (ret) - goto out; + return ret; switch (val) { case 0: - if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { - ret = -EINVAL; - break; - } + if (iter->cpu_file != RING_BUFFER_ALL_CPUS) + return -EINVAL; if (tr->allocated_snapshot) free_snapshot(tr); break; case 1: /* Only allow per-cpu swap if the ring buffer supports it */ #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP - if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { - ret = -EINVAL; - break; - } + if (iter->cpu_file != RING_BUFFER_ALL_CPUS) + return -EINVAL; #endif if (tr->allocated_snapshot) ret = resize_buffer_duplicate_size(&tr->max_buffer, @@ -7449,7 +7388,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, ret = tracing_arm_snapshot_locked(tr); if (ret) - break; + return ret; /* Now, we're going to swap */ if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { @@ -7476,8 +7415,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, *ppos += cnt; ret = cnt; } -out: - mutex_unlock(&trace_types_lock); + return ret; } @@ -7869,12 +7807,11 @@ void tracing_log_err(struct trace_array *tr, len += sizeof(CMD_PREFIX) + 2 * sizeof("\n") + strlen(cmd) + 1; - mutex_lock(&tracing_err_log_lock); + guard(mutex)(&tracing_err_log_lock); + err = get_tracing_log_err(tr, len); - if (PTR_ERR(err) == -ENOMEM) { - mutex_unlock(&tracing_err_log_lock); + if (PTR_ERR(err) == -ENOMEM) return; - } snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc); snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd); @@ -7885,7 +7822,6 @@ void tracing_log_err(struct trace_array *tr, err->info.ts = local_clock(); list_add_tail(&err->list, &tr->err_log); - mutex_unlock(&tracing_err_log_lock); } static void clear_tracing_err_log(struct trace_array *tr) @@ -9626,20 +9562,17 @@ static int instance_mkdir(const char *name) struct trace_array *tr; int ret; - mutex_lock(&event_mutex); - mutex_lock(&trace_types_lock); + guard(mutex)(&event_mutex); + guard(mutex)(&trace_types_lock); ret = -EEXIST; if (trace_array_find(name)) - goto out_unlock; + return -EEXIST; tr = trace_array_create(name); ret = PTR_ERR_OR_ZERO(tr); -out_unlock: - mutex_unlock(&trace_types_lock); - mutex_unlock(&event_mutex); return ret; } @@ -9689,24 +9622,23 @@ struct trace_array *trace_array_get_by_name(const char *name, const char *system { struct trace_array *tr; - mutex_lock(&event_mutex); - mutex_lock(&trace_types_lock); + guard(mutex)(&event_mutex); + guard(mutex)(&trace_types_lock); list_for_each_entry(tr, &ftrace_trace_arrays, list) { - if (tr->name && strcmp(tr->name, name) == 0) - goto out_unlock; + if (tr->name && strcmp(tr->name, name) == 0) { + tr->ref++; + return tr; + } } tr = trace_array_create_systems(name, systems, 0, 0); if (IS_ERR(tr)) tr = NULL; -out_unlock: - if (tr) + else tr->ref++; - mutex_unlock(&trace_types_lock); - mutex_unlock(&event_mutex); return tr; } EXPORT_SYMBOL_GPL(trace_array_get_by_name); @@ -9757,48 +9689,36 @@ static int __remove_instance(struct trace_array *tr) int trace_array_destroy(struct trace_array *this_tr) { struct trace_array *tr; - int ret; if (!this_tr) return -EINVAL; - mutex_lock(&event_mutex); - mutex_lock(&trace_types_lock); + guard(mutex)(&event_mutex); + guard(mutex)(&trace_types_lock); - ret = -ENODEV; /* Making sure trace array exists before destroying it. */ list_for_each_entry(tr, &ftrace_trace_arrays, list) { - if (tr == this_tr) { - ret = __remove_instance(tr); - break; - } + if (tr == this_tr) + return __remove_instance(tr); } - mutex_unlock(&trace_types_lock); - mutex_unlock(&event_mutex); - - return ret; + return -ENODEV; } EXPORT_SYMBOL_GPL(trace_array_destroy); static int instance_rmdir(const char *name) { struct trace_array *tr; - int ret; - mutex_lock(&event_mutex); - mutex_lock(&trace_types_lock); + guard(mutex)(&event_mutex); + guard(mutex)(&trace_types_lock); - ret = -ENODEV; tr = trace_array_find(name); - if (tr) - ret = __remove_instance(tr); - - mutex_unlock(&trace_types_lock); - mutex_unlock(&event_mutex); + if (!tr) + return -ENODEV; - return ret; + return __remove_instance(tr); } static __init void create_trace_instances(struct dentry *d_tracer) @@ -9811,19 +9731,16 @@ static __init void create_trace_instances(struct dentry *d_tracer) if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n")) return; - mutex_lock(&event_mutex); - mutex_lock(&trace_types_lock); + guard(mutex)(&event_mutex); + guard(mutex)(&trace_types_lock); list_for_each_entry(tr, &ftrace_trace_arrays, list) { if (!tr->name) continue; if (MEM_FAIL(trace_array_create_dir(tr) < 0, "Failed to create instance directory\n")) - break; + return; } - - mutex_unlock(&trace_types_lock); - mutex_unlock(&event_mutex); } static void @@ -10037,7 +9954,7 @@ static void trace_module_remove_evals(struct module *mod) if (!mod->num_trace_evals) return; - mutex_lock(&trace_eval_mutex); + guard(mutex)(&trace_eval_mutex); map = trace_eval_maps; @@ -10049,12 +9966,10 @@ static void trace_module_remove_evals(struct module *mod) map = map->tail.next; } if (!map) - goto out; + return; *last = trace_eval_jmp_to_tail(map)->tail.next; kfree(map); - out: - mutex_unlock(&trace_eval_mutex); } #else static inline void trace_module_remove_evals(struct module *mod) { } diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index ea9b44847ce6b..11dea25ef880a 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -790,7 +790,9 @@ static int __ftrace_event_enable_disable(struct trace_event_file *file, clear_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags); } - call->class->reg(call, TRACE_REG_UNREGISTER, file); + ret = call->class->reg(call, TRACE_REG_UNREGISTER, file); + + WARN_ON_ONCE(ret); } /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */ if (file->flags & EVENT_FILE_FL_SOFT_MODE) @@ -3111,6 +3113,20 @@ static bool event_in_systems(struct trace_event_call *call, return !*p || isspace(*p) || *p == ','; } +#ifdef CONFIG_HIST_TRIGGERS +/* + * Wake up waiter on the hist_poll_wq from irq_work because the hist trigger + * may happen in any context. + */ +static void hist_poll_event_irq_work(struct irq_work *work) +{ + wake_up_all(&hist_poll_wq); +} + +DEFINE_IRQ_WORK(hist_poll_work, hist_poll_event_irq_work); +DECLARE_WAIT_QUEUE_HEAD(hist_poll_wq); +#endif + static struct trace_event_file * trace_create_new_event(struct trace_event_call *call, struct trace_array *tr) diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 0c611b281a5b5..f50c2ad43f3d8 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -808,7 +808,7 @@ static __always_inline char *test_string(char *str) kstr = ubuf->buffer; /* For safety, do not trust the string pointer */ - if (!strncpy_from_kernel_nofault(kstr, str, USTRING_BUF_SIZE)) + if (strncpy_from_kernel_nofault(kstr, str, USTRING_BUF_SIZE) < 0) return NULL; return kstr; } @@ -827,7 +827,7 @@ static __always_inline char *test_ustring(char *str) /* user space address? */ ustr = (char __user *)str; - if (!strncpy_from_user_nofault(kstr, ustr, USTRING_BUF_SIZE)) + if (strncpy_from_user_nofault(kstr, ustr, USTRING_BUF_SIZE) < 0) return NULL; return kstr; diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 5f9119eb7c67f..4ebafc655223a 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -5314,6 +5314,8 @@ static void event_hist_trigger(struct event_trigger_data *data, if (resolve_var_refs(hist_data, key, var_ref_vals, true)) hist_trigger_actions(hist_data, elt, buffer, rec, rbe, key, var_ref_vals); + + hist_poll_wakeup(); } static void hist_trigger_stacktrace_print(struct seq_file *m, @@ -5593,49 +5595,137 @@ static void hist_trigger_show(struct seq_file *m, n_entries, (u64)atomic64_read(&hist_data->map->drops)); } +struct hist_file_data { + struct file *file; + u64 last_read; + u64 last_act; +}; + +static u64 get_hist_hit_count(struct trace_event_file *event_file) +{ + struct hist_trigger_data *hist_data; + struct event_trigger_data *data; + u64 ret = 0; + + list_for_each_entry(data, &event_file->triggers, list) { + if (data->cmd_ops->trigger_type == ETT_EVENT_HIST) { + hist_data = data->private_data; + ret += atomic64_read(&hist_data->map->hits); + } + } + return ret; +} + static int hist_show(struct seq_file *m, void *v) { + struct hist_file_data *hist_file = m->private; struct event_trigger_data *data; struct trace_event_file *event_file; - int n = 0, ret = 0; + int n = 0; - mutex_lock(&event_mutex); + guard(mutex)(&event_mutex); - event_file = event_file_file(m->private); - if (unlikely(!event_file)) { - ret = -ENODEV; - goto out_unlock; - } + event_file = event_file_file(hist_file->file); + if (unlikely(!event_file)) + return -ENODEV; list_for_each_entry(data, &event_file->triggers, list) { if (data->cmd_ops->trigger_type == ETT_EVENT_HIST) hist_trigger_show(m, data, n++); } + hist_file->last_read = get_hist_hit_count(event_file); + /* + * Update last_act too so that poll()/POLLPRI can wait for the next + * event after any syscall on hist file. + */ + hist_file->last_act = hist_file->last_read; - out_unlock: - mutex_unlock(&event_mutex); + return 0; +} + +static __poll_t event_hist_poll(struct file *file, struct poll_table_struct *wait) +{ + struct trace_event_file *event_file; + struct seq_file *m = file->private_data; + struct hist_file_data *hist_file = m->private; + __poll_t ret = 0; + u64 cnt; + + guard(mutex)(&event_mutex); + + event_file = event_file_data(file); + if (!event_file) + return EPOLLERR; + + hist_poll_wait(file, wait); + + cnt = get_hist_hit_count(event_file); + if (hist_file->last_read != cnt) + ret |= EPOLLIN | EPOLLRDNORM; + if (hist_file->last_act != cnt) { + hist_file->last_act = cnt; + ret |= EPOLLPRI; + } return ret; } +static int event_hist_release(struct inode *inode, struct file *file) +{ + struct seq_file *m = file->private_data; + struct hist_file_data *hist_file = m->private; + + kfree(hist_file); + return tracing_single_release_file_tr(inode, file); +} + static int event_hist_open(struct inode *inode, struct file *file) { + struct trace_event_file *event_file; + struct hist_file_data *hist_file; int ret; ret = tracing_open_file_tr(inode, file); if (ret) return ret; + guard(mutex)(&event_mutex); + + event_file = event_file_data(file); + if (!event_file) { + ret = -ENODEV; + goto err; + } + + hist_file = kzalloc(sizeof(*hist_file), GFP_KERNEL); + if (!hist_file) { + ret = -ENOMEM; + goto err; + } + + hist_file->file = file; + hist_file->last_act = get_hist_hit_count(event_file); + /* Clear private_data to avoid warning in single_open() */ file->private_data = NULL; - return single_open(file, hist_show, file); + ret = single_open(file, hist_show, hist_file); + if (ret) { + kfree(hist_file); + goto err; + } + + return 0; +err: + tracing_release_file_tr(inode, file); + return ret; } const struct file_operations event_hist_fops = { .open = event_hist_open, .read = seq_read, .llseek = seq_lseek, - .release = tracing_single_release_file_tr, + .release = event_hist_release, + .poll = event_hist_poll, }; #ifdef CONFIG_HIST_TRIGGERS_DEBUG @@ -5876,25 +5966,19 @@ static int hist_debug_show(struct seq_file *m, void *v) { struct event_trigger_data *data; struct trace_event_file *event_file; - int n = 0, ret = 0; + int n = 0; - mutex_lock(&event_mutex); + guard(mutex)(&event_mutex); event_file = event_file_file(m->private); - if (unlikely(!event_file)) { - ret = -ENODEV; - goto out_unlock; - } + if (unlikely(!event_file)) + return -ENODEV; list_for_each_entry(data, &event_file->triggers, list) { if (data->cmd_ops->trigger_type == ETT_EVENT_HIST) hist_trigger_debug_show(m, data, n++); } - - out_unlock: - mutex_unlock(&event_mutex); - - return ret; + return 0; } static int event_hist_debug_open(struct inode *inode, struct file *file) @@ -5907,7 +5991,10 @@ static int event_hist_debug_open(struct inode *inode, struct file *file) /* Clear private_data to avoid warning in single_open() */ file->private_data = NULL; - return single_open(file, hist_debug_show, file); + ret = single_open(file, hist_debug_show, file); + if (ret) + tracing_release_file_tr(inode, file); + return ret; } const struct file_operations event_hist_debug_fops = { @@ -6652,27 +6739,27 @@ static int event_hist_trigger_parse(struct event_command *cmd_ops, if (existing_hist_update_only(glob, trigger_data, file)) goto out_free; - ret = event_trigger_register(cmd_ops, file, glob, trigger_data); - if (ret < 0) - goto out_free; + if (!get_named_trigger_data(trigger_data)) { - if (get_named_trigger_data(trigger_data)) - goto enable; + ret = create_actions(hist_data); + if (ret) + goto out_free; - ret = create_actions(hist_data); - if (ret) - goto out_unreg; + if (has_hist_vars(hist_data) || hist_data->n_var_refs) { + ret = save_hist_vars(hist_data); + if (ret) + goto out_free; + } - if (has_hist_vars(hist_data) || hist_data->n_var_refs) { - ret = save_hist_vars(hist_data); + ret = tracing_map_init(hist_data->map); if (ret) - goto out_unreg; + goto out_free; } - ret = tracing_map_init(hist_data->map); - if (ret) - goto out_unreg; -enable: + ret = event_trigger_register(cmd_ops, file, glob, trigger_data); + if (ret < 0) + goto out_free; + ret = hist_trigger_enable(trigger_data, file); if (ret) goto out_unreg; diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c index c82b401a294d9..1b9e32f6442fb 100644 --- a/kernel/trace/trace_events_synth.c +++ b/kernel/trace/trace_events_synth.c @@ -312,7 +312,7 @@ static const char *synth_field_fmt(char *type) else if (strcmp(type, "gfp_t") == 0) fmt = "%x"; else if (synth_field_is_string(type)) - fmt = "%.*s"; + fmt = "%s"; else if (synth_field_is_stack(type)) fmt = "%s"; @@ -377,7 +377,6 @@ static enum print_line_t print_synth_event(struct trace_iterator *iter, union trace_synth_field *data = &entry->fields[n_u64]; trace_seq_printf(s, print_fmt, se->fields[i]->name, - STR_VAR_LEN_MAX, (char *)entry + data->as_dynamic.offset, i == se->n_fields - 1 ? "" : " "); n_u64++; @@ -859,6 +858,38 @@ static struct trace_event_fields synth_event_fields_array[] = { {} }; +static int synth_event_reg(struct trace_event_call *call, + enum trace_reg type, void *data) +{ + struct synth_event *event = container_of(call, struct synth_event, call); + + switch (type) { +#ifdef CONFIG_PERF_EVENTS + case TRACE_REG_PERF_REGISTER: +#endif + case TRACE_REG_REGISTER: + if (!try_module_get(event->mod)) + return -EBUSY; + break; + default: + break; + } + + int ret = trace_event_reg(call, type, data); + + switch (type) { +#ifdef CONFIG_PERF_EVENTS + case TRACE_REG_PERF_UNREGISTER: +#endif + case TRACE_REG_UNREGISTER: + module_put(event->mod); + break; + default: + break; + } + return ret; +} + static int register_synth_event(struct synth_event *event) { struct trace_event_call *call = &event->call; @@ -888,7 +919,7 @@ static int register_synth_event(struct synth_event *event) goto out; } call->flags = TRACE_EVENT_FL_TRACEPOINT; - call->class->reg = trace_event_reg; + call->class->reg = synth_event_reg; call->class->probe = trace_event_raw_event_synth; call->data = event; call->tp = event->tp; diff --git a/kernel/trace/trace_fprobe.c b/kernel/trace/trace_fprobe.c index c62d1629cffec..af7d6e2060d9d 100644 --- a/kernel/trace/trace_fprobe.c +++ b/kernel/trace/trace_fprobe.c @@ -888,14 +888,15 @@ static void __find_tracepoint_module_cb(struct tracepoint *tp, struct module *mo struct __find_tracepoint_cb_data *data = priv; if (!data->tpoint && !strcmp(data->tp_name, tp->name)) { - data->tpoint = tp; - if (!data->mod) { + /* If module is not specified, try getting module refcount. */ + if (!data->mod && mod) { + /* If failed to get refcount, ignore this tracepoint. */ + if (!try_module_get(mod)) + return; + data->mod = mod; - if (!try_module_get(data->mod)) { - data->tpoint = NULL; - data->mod = NULL; - } } + data->tpoint = tp; } } @@ -908,11 +909,9 @@ static void __find_tracepoint_cb(struct tracepoint *tp, void *priv) } /* - * Find a tracepoint from kernel and module. If the tracepoint is in a module, - * this increments the module refcount to prevent unloading until the - * trace_fprobe is registered to the list. After registering the trace_fprobe - * on the trace_fprobe list, the module refcount is decremented because - * tracepoint_probe_module_cb will handle it. + * Find a tracepoint from kernel and module. If the tracepoint is on the module, + * the module's refcount is incremented and returned as *@tp_mod. Thus, if it is + * not NULL, caller must call module_put(*tp_mod) after used the tracepoint. */ static struct tracepoint *find_tracepoint(const char *tp_name, struct module **tp_mod) @@ -942,6 +941,10 @@ static void reenable_trace_fprobe(struct trace_fprobe *tf) } } +/* + * Find a tracepoint from specified module. In this case, this does not get the + * module's refcount. The caller must ensure the module is not freed. + */ static struct tracepoint *find_tracepoint_in_module(struct module *mod, const char *tp_name) { @@ -977,10 +980,13 @@ static int __tracepoint_probe_module_cb(struct notifier_block *self, reenable_trace_fprobe(tf); } } else if (val == MODULE_STATE_GOING && tp_mod->mod == tf->mod) { - tracepoint_probe_unregister(tf->tpoint, + unregister_fprobe(&tf->fp); + if (trace_fprobe_is_tracepoint(tf)) { + tracepoint_probe_unregister(tf->tpoint, tf->tpoint->probestub, NULL); - tf->tpoint = NULL; - tf->mod = NULL; + tf->tpoint = TRACEPOINT_STUB; + tf->mod = NULL; + } } } mutex_unlock(&event_mutex); @@ -1018,6 +1024,19 @@ static int parse_symbol_and_return(int argc, const char *argv[], if (*is_return) return 0; + if (is_tracepoint) { + tmp = *symbol; + while (*tmp && (isalnum(*tmp) || *tmp == '_')) + tmp++; + if (*tmp) { + /* find a wrong character. */ + trace_probe_log_err(tmp - *symbol, BAD_TP_NAME); + kfree(*symbol); + *symbol = NULL; + return -EINVAL; + } + } + /* If there is $retval, this should be a return fprobe. */ for (i = 2; i < argc; i++) { tmp = strstr(argv[i], "$retval"); @@ -1025,6 +1044,8 @@ static int parse_symbol_and_return(int argc, const char *argv[], if (is_tracepoint) { trace_probe_log_set_index(i); trace_probe_log_err(tmp - argv[i], RETVAL_ON_PROBE); + kfree(*symbol); + *symbol = NULL; return -EINVAL; } *is_return = true; diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 3b0cea37e0297..fbbc3c719d2f6 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -193,7 +193,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip, if (bit < 0) return; - trace_ctx = tracing_gen_ctx(); + trace_ctx = tracing_gen_ctx_dec(); cpu = smp_processor_id(); data = per_cpu_ptr(tr->array_buffer.data, cpu); @@ -298,7 +298,6 @@ function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip, struct trace_array *tr = op->private; struct trace_array_cpu *data; unsigned int trace_ctx; - unsigned long flags; int bit; int cpu; @@ -325,8 +324,7 @@ function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip, if (is_repeat_check(tr, last_info, ip, parent_ip)) goto out; - local_save_flags(flags); - trace_ctx = tracing_gen_ctx_flags(flags); + trace_ctx = tracing_gen_ctx_dec(); process_repeats(tr, ip, parent_ip, last_info, trace_ctx); trace_function(tr, ip, parent_ip, trace_ctx); diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index ebb61ddca749d..655246a9bec34 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -1353,6 +1353,7 @@ void graph_trace_close(struct trace_iterator *iter) if (data) { free_percpu(data->cpu_data); kfree(data); + iter->private = NULL; } } diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index fce064e205706..10ea6d0a35e85 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c @@ -233,8 +233,6 @@ static void irqsoff_trace_open(struct trace_iterator *iter) { if (is_graph(iter->tr)) graph_trace_open(iter); - else - iter->private = NULL; } static void irqsoff_trace_close(struct trace_iterator *iter) diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c index 032fdeba37d35..a94790f5cda72 100644 --- a/kernel/trace/trace_osnoise.c +++ b/kernel/trace/trace_osnoise.c @@ -2038,7 +2038,6 @@ static int start_kthread(unsigned int cpu) if (IS_ERR(kthread)) { pr_err(BANNER "could not start sampling thread\n"); - stop_per_cpu_kthreads(); return -ENOMEM; } diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index 16a5e368e7b77..578919962e5df 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -770,6 +770,10 @@ static int check_prepare_btf_string_fetch(char *typename, #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API +/* + * Add the entry code to store the 'argnum'th parameter and return the offset + * in the entry data buffer where the data will be stored. + */ static int __store_entry_arg(struct trace_probe *tp, int argnum) { struct probe_entry_arg *earg = tp->entry_arg; @@ -793,6 +797,20 @@ static int __store_entry_arg(struct trace_probe *tp, int argnum) tp->entry_arg = earg; } + /* + * The entry code array is repeating the pair of + * [FETCH_OP_ARG(argnum)][FETCH_OP_ST_EDATA(offset of entry data buffer)] + * and the rest of entries are filled with [FETCH_OP_END]. + * + * To reduce the redundant function parameter fetching, we scan the entry + * code array to find the FETCH_OP_ARG which already fetches the 'argnum' + * parameter. If it doesn't match, update 'offset' to find the last + * offset. + * If we find the FETCH_OP_END without matching FETCH_OP_ARG entry, we + * will save the entry with FETCH_OP_ARG and FETCH_OP_ST_EDATA, and + * return data offset so that caller can find the data offset in the entry + * data buffer. + */ offset = 0; for (i = 0; i < earg->size - 1; i++) { switch (earg->code[i].op) { @@ -826,6 +844,16 @@ int traceprobe_get_entry_data_size(struct trace_probe *tp) if (!earg) return 0; + /* + * earg->code[] array has an operation sequence which is run in + * the entry handler. + * The sequence stopped by FETCH_OP_END and each data stored in + * the entry data buffer by FETCH_OP_ST_EDATA. The FETCH_OP_ST_EDATA + * stores the data at the data buffer + its offset, and all data are + * "unsigned long" size. The offset must be increased when a data is + * stored. Thus we need to find the last FETCH_OP_ST_EDATA in the + * code array. + */ for (i = 0; i < earg->size; i++) { switch (earg->code[i].op) { case FETCH_OP_END: diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h index 5803e6a415705..8a6797c2278d9 100644 --- a/kernel/trace/trace_probe.h +++ b/kernel/trace/trace_probe.h @@ -36,7 +36,6 @@ #define MAX_BTF_ARGS_LEN 128 #define MAX_DENTRY_ARGS_LEN 256 #define MAX_STRING_SIZE PATH_MAX -#define MAX_ARG_BUF_LEN (MAX_TRACE_ARGS * MAX_ARG_NAME_LEN) /* Reserved field names */ #define FIELD_STRING_IP "__probe_ip" @@ -481,6 +480,7 @@ extern int traceprobe_define_arg_fields(struct trace_event_call *event_call, C(NON_UNIQ_SYMBOL, "The symbol is not unique"), \ C(BAD_RETPROBE, "Retprobe address must be an function entry"), \ C(NO_TRACEPOINT, "Tracepoint is not found"), \ + C(BAD_TP_NAME, "Invalid character in tracepoint name"),\ C(BAD_ADDR_SUFFIX, "Invalid probed address suffix"), \ C(NO_GROUP_NAME, "Group name is not specified"), \ C(GROUP_TOO_LONG, "Group name is too long"), \ diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index ae2ace5e515af..039382576bc16 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -170,8 +170,6 @@ static void wakeup_trace_open(struct trace_iterator *iter) { if (is_graph(iter->tr)) graph_trace_open(iter); - else - iter->private = NULL; } static void wakeup_trace_close(struct trace_iterator *iter) diff --git a/kernel/vhost_task.c b/kernel/vhost_task.c index 8800f5acc0071..2ef2e1b800916 100644 --- a/kernel/vhost_task.c +++ b/kernel/vhost_task.c @@ -133,7 +133,7 @@ struct vhost_task *vhost_task_create(bool (*fn)(void *), vtsk = kzalloc(sizeof(*vtsk), GFP_KERNEL); if (!vtsk) - return NULL; + return ERR_PTR(-ENOMEM); init_completion(&vtsk->exited); mutex_init(&vtsk->exit_mutex); vtsk->data = arg; @@ -145,7 +145,7 @@ struct vhost_task *vhost_task_create(bool (*fn)(void *), tsk = copy_process(NULL, 0, NUMA_NO_NODE, &args); if (IS_ERR(tsk)) { kfree(vtsk); - return NULL; + return ERR_PTR(PTR_ERR(tsk)); } vtsk->task = tsk; diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c index d36242fd49364..e55f9810b91ad 100644 --- a/kernel/watch_queue.c +++ b/kernel/watch_queue.c @@ -269,6 +269,15 @@ long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes) if (ret < 0) goto error; + /* + * pipe_resize_ring() does not update nr_accounted for watch_queue + * pipes, because the above vastly overprovisions. Set nr_accounted on + * and max_usage this pipe to the number that was actually charged to + * the user above via account_pipe_buffers. + */ + pipe->max_usage = nr_pages; + pipe->nr_accounted = nr_pages; + ret = -ENOMEM; pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); if (!pages) diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 262691ba62b7a..4dc72540c3b0f 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -347,8 +347,6 @@ static int __init watchdog_thresh_setup(char *str) } __setup("watchdog_thresh=", watchdog_thresh_setup); -static void __lockup_detector_cleanup(void); - #ifdef CONFIG_SOFTLOCKUP_DETECTOR_INTR_STORM enum stats_per_group { STATS_SYSTEM, @@ -878,11 +876,6 @@ static void __lockup_detector_reconfigure(void) watchdog_hardlockup_start(); cpus_read_unlock(); - /* - * Must be called outside the cpus locked section to prevent - * recursive locking in the perf code. - */ - __lockup_detector_cleanup(); } void lockup_detector_reconfigure(void) @@ -932,24 +925,6 @@ static inline void lockup_detector_setup(void) } #endif /* !CONFIG_SOFTLOCKUP_DETECTOR */ -static void __lockup_detector_cleanup(void) -{ - lockdep_assert_held(&watchdog_mutex); - hardlockup_detector_perf_cleanup(); -} - -/** - * lockup_detector_cleanup - Cleanup after cpu hotplug or sysctl changes - * - * Caller must not hold the cpu hotplug rwsem. - */ -void lockup_detector_cleanup(void) -{ - mutex_lock(&watchdog_mutex); - __lockup_detector_cleanup(); - mutex_unlock(&watchdog_mutex); -} - /** * lockup_detector_soft_poweroff - Interface to stop lockup detector(s) * diff --git a/kernel/watchdog_perf.c b/kernel/watchdog_perf.c index 59c1d86a73a24..2fdb96eaf4933 100644 --- a/kernel/watchdog_perf.c +++ b/kernel/watchdog_perf.c @@ -21,8 +21,6 @@ #include static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); -static DEFINE_PER_CPU(struct perf_event *, dead_event); -static struct cpumask dead_events_mask; static atomic_t watchdog_cpus = ATOMIC_INIT(0); @@ -181,36 +179,12 @@ void watchdog_hardlockup_disable(unsigned int cpu) if (event) { perf_event_disable(event); + perf_event_release_kernel(event); this_cpu_write(watchdog_ev, NULL); - this_cpu_write(dead_event, event); - cpumask_set_cpu(smp_processor_id(), &dead_events_mask); atomic_dec(&watchdog_cpus); } } -/** - * hardlockup_detector_perf_cleanup - Cleanup disabled events and destroy them - * - * Called from lockup_detector_cleanup(). Serialized by the caller. - */ -void hardlockup_detector_perf_cleanup(void) -{ - int cpu; - - for_each_cpu(cpu, &dead_events_mask) { - struct perf_event *event = per_cpu(dead_event, cpu); - - /* - * Required because for_each_cpu() reports unconditionally - * CPU0 as set on UP kernels. Sigh. - */ - if (event) - perf_event_release_kernel(event); - per_cpu(dead_event, cpu) = NULL; - } - cpumask_clear(&dead_events_mask); -} - /** * hardlockup_detector_perf_stop - Globally stop watchdog events * diff --git a/lib/842/842_compress.c b/lib/842/842_compress.c index c02baa4168e16..055356508d97c 100644 --- a/lib/842/842_compress.c +++ b/lib/842/842_compress.c @@ -532,6 +532,8 @@ int sw842_compress(const u8 *in, unsigned int ilen, } if (repeat_count) { ret = add_repeat_template(p, repeat_count); + if (ret) + return ret; repeat_count = 0; if (next == last) /* reached max repeat bits */ goto repeat; diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 908e75a28d90b..bdb37d572e97c 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -1428,6 +1428,8 @@ static ssize_t __import_iovec_ubuf(int type, const struct iovec __user *uvec, struct iovec *iov = *iovp; ssize_t ret; + *iovp = NULL; + if (compat) ret = copy_compat_iovec_from_user(iov, uvec, 1); else @@ -1438,7 +1440,6 @@ static ssize_t __import_iovec_ubuf(int type, const struct iovec __user *uvec, ret = import_ubuf(type, iov->iov_base, iov->iov_len, i); if (unlikely(ret)) return ret; - *iovp = NULL; return i->count; } diff --git a/lib/rcuref.c b/lib/rcuref.c index 97f300eca927c..5bd726b71e393 100644 --- a/lib/rcuref.c +++ b/lib/rcuref.c @@ -220,6 +220,7 @@ EXPORT_SYMBOL_GPL(rcuref_get_slowpath); /** * rcuref_put_slowpath - Slowpath of __rcuref_put() * @ref: Pointer to the reference count + * @cnt: The resulting value of the fastpath decrement * * Invoked when the reference count is outside of the valid zone. * @@ -233,10 +234,8 @@ EXPORT_SYMBOL_GPL(rcuref_get_slowpath); * with a concurrent get()/put() pair. Caller is not allowed to * deconstruct the protected object. */ -bool rcuref_put_slowpath(rcuref_t *ref) +bool rcuref_put_slowpath(rcuref_t *ref, unsigned int cnt) { - unsigned int cnt = atomic_read(&ref->refcnt); - /* Did this drop the last reference? */ if (likely(cnt == RCUREF_NOREF)) { /* diff --git a/lib/sg_split.c b/lib/sg_split.c index 60a0babebf2ef..0f89aab5c6715 100644 --- a/lib/sg_split.c +++ b/lib/sg_split.c @@ -88,8 +88,6 @@ static void sg_split_phys(struct sg_splitter *splitters, const int nb_splits) if (!j) { out_sg->offset += split->skip_sg0; out_sg->length -= split->skip_sg0; - } else { - out_sg->offset = 0; } sg_dma_address(out_sg) = 0; sg_dma_len(out_sg) = 0; diff --git a/lib/stackinit_kunit.c b/lib/stackinit_kunit.c index c40818ec9c180..49d32e43d06ef 100644 --- a/lib/stackinit_kunit.c +++ b/lib/stackinit_kunit.c @@ -146,6 +146,15 @@ static bool stackinit_range_contains(char *haystack_start, size_t haystack_size, #define INIT_STRUCT_assigned_copy(var_type) \ ; var = *(arg) +/* + * The "did we actually fill the stack?" check value needs + * to be neither 0 nor any of the "pattern" bytes. The + * pattern bytes are compiler, architecture, and type based, + * so we have to pick a value that never appears for those + * combinations. Use 0x99 which is not 0xFF, 0xFE, nor 0xAA. + */ +#define FILL_BYTE 0x99 + /* * @name: unique string name for the test * @var_type: type to be tested for zeroing initialization @@ -168,12 +177,12 @@ static noinline void test_ ## name (struct kunit *test) \ ZERO_CLONE_ ## which(zero); \ /* Clear entire check buffer for 0xFF overlap test. */ \ memset(check_buf, 0x00, sizeof(check_buf)); \ - /* Fill stack with 0xFF. */ \ + /* Fill stack with FILL_BYTE. */ \ ignored = leaf_ ##name((unsigned long)&ignored, 1, \ FETCH_ARG_ ## which(zero)); \ - /* Verify all bytes overwritten with 0xFF. */ \ + /* Verify all bytes overwritten with FILL_BYTE. */ \ for (sum = 0, i = 0; i < target_size; i++) \ - sum += (check_buf[i] != 0xFF); \ + sum += (check_buf[i] != FILL_BYTE); \ /* Clear entire check buffer for later bit tests. */ \ memset(check_buf, 0x00, sizeof(check_buf)); \ /* Extract stack-defined variable contents. */ \ @@ -184,7 +193,8 @@ static noinline void test_ ## name (struct kunit *test) \ * possible between the two leaf function calls. \ */ \ KUNIT_ASSERT_EQ_MSG(test, sum, 0, \ - "leaf fill was not 0xFF!?\n"); \ + "leaf fill was not 0x%02X!?\n", \ + FILL_BYTE); \ \ /* Validate that compiler lined up fill and target. */ \ KUNIT_ASSERT_TRUE_MSG(test, \ @@ -196,9 +206,9 @@ static noinline void test_ ## name (struct kunit *test) \ (int)((ssize_t)(uintptr_t)fill_start - \ (ssize_t)(uintptr_t)target_start)); \ \ - /* Look for any bytes still 0xFF in check region. */ \ + /* Validate check region has no FILL_BYTE bytes. */ \ for (sum = 0, i = 0; i < target_size; i++) \ - sum += (check_buf[i] == 0xFF); \ + sum += (check_buf[i] == FILL_BYTE); \ \ if (sum != 0 && xfail) \ kunit_skip(test, \ @@ -233,12 +243,12 @@ static noinline int leaf_ ## name(unsigned long sp, bool fill, \ * stack frame of SOME kind... \ */ \ memset(buf, (char)(sp & 0xff), sizeof(buf)); \ - /* Fill variable with 0xFF. */ \ + /* Fill variable with FILL_BYTE. */ \ if (fill) { \ fill_start = &var; \ fill_size = sizeof(var); \ memset(fill_start, \ - (char)((sp & 0xff) | forced_mask), \ + FILL_BYTE & forced_mask, \ fill_size); \ } \ \ @@ -380,7 +390,7 @@ static int noinline __leaf_switch_none(int path, bool fill) fill_start = &var; fill_size = sizeof(var); - memset(fill_start, forced_mask | 0x55, fill_size); + memset(fill_start, (forced_mask | 0x55) & FILL_BYTE, fill_size); } memcpy(check_buf, target_start, target_size); break; @@ -391,7 +401,7 @@ static int noinline __leaf_switch_none(int path, bool fill) fill_start = &var; fill_size = sizeof(var); - memset(fill_start, forced_mask | 0xaa, fill_size); + memset(fill_start, (forced_mask | 0xaa) & FILL_BYTE, fill_size); } memcpy(check_buf, target_start, target_size); break; diff --git a/lib/string.c b/lib/string.c index 76327b51e36f2..e657809fa7189 100644 --- a/lib/string.c +++ b/lib/string.c @@ -113,6 +113,7 @@ ssize_t sized_strscpy(char *dest, const char *src, size_t count) if (count == 0 || WARN_ON_ONCE(count > INT_MAX)) return -E2BIG; +#ifndef CONFIG_DCACHE_WORD_ACCESS #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS /* * If src is unaligned, don't cross a page boundary, @@ -127,12 +128,14 @@ ssize_t sized_strscpy(char *dest, const char *src, size_t count) /* If src or dest is unaligned, don't do word-at-a-time. */ if (((long) dest | (long) src) & (sizeof(long) - 1)) max = 0; +#endif #endif /* - * read_word_at_a_time() below may read uninitialized bytes after the - * trailing zero and use them in comparisons. Disable this optimization - * under KMSAN to prevent false positive reports. + * load_unaligned_zeropad() or read_word_at_a_time() below may read + * uninitialized bytes after the trailing zero and use them in + * comparisons. Disable this optimization under KMSAN to prevent + * false positive reports. */ if (IS_ENABLED(CONFIG_KMSAN)) max = 0; @@ -140,7 +143,11 @@ ssize_t sized_strscpy(char *dest, const char *src, size_t count) while (max >= sizeof(unsigned long)) { unsigned long c, data; +#ifdef CONFIG_DCACHE_WORD_ACCESS + c = load_unaligned_zeropad(src+res); +#else c = read_word_at_a_time(src+res); +#endif if (has_zero(c, &data, &constants)) { data = prep_zero_mask(c, data, &constants); data = create_zero_mask(data); diff --git a/lib/vsprintf.c b/lib/vsprintf.c index c5e2ec9303c5d..a69e71a1ca55e 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -2255,7 +2255,7 @@ int __init no_hash_pointers_enable(char *str) early_param("no_hash_pointers", no_hash_pointers_enable); /* Used for Rust formatting ('%pA'). */ -char *rust_fmt_argument(char *buf, char *end, void *ptr); +char *rust_fmt_argument(char *buf, char *end, const void *ptr); /* * Show a '%p' thing. A kernel extension is that the '%p' is followed diff --git a/lib/zstd/common/portability_macros.h b/lib/zstd/common/portability_macros.h index 0e3b2c0a527db..0dde8bf56595e 100644 --- a/lib/zstd/common/portability_macros.h +++ b/lib/zstd/common/portability_macros.h @@ -55,7 +55,7 @@ #ifndef DYNAMIC_BMI2 #if ((defined(__clang__) && __has_attribute(__target__)) \ || (defined(__GNUC__) \ - && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)))) \ + && (__GNUC__ >= 11))) \ && (defined(__x86_64__) || defined(_M_X64)) \ && !defined(__BMI2__) # define DYNAMIC_BMI2 1 diff --git a/mm/compaction.c b/mm/compaction.c index 384e4672998e5..eb5474dea04d9 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -980,13 +980,13 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, } if (PageHuge(page)) { + const unsigned int order = compound_order(page); /* * skip hugetlbfs if we are not compacting for pages * bigger than its order. THPs and other compound pages * are handled below. */ if (!cc->alloc_contig) { - const unsigned int order = compound_order(page); if (order <= MAX_PAGE_ORDER) { low_pfn += (1UL << order) - 1; @@ -1010,8 +1010,8 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, /* Do not report -EBUSY down the chain */ if (ret == -EBUSY) ret = 0; - low_pfn += compound_nr(page) - 1; - nr_scanned += compound_nr(page) - 1; + low_pfn += (1UL << order) - 1; + nr_scanned += (1UL << order) - 1; goto isolate_fail; } @@ -3164,6 +3164,7 @@ static int kcompactd(void *p) if (!cpumask_empty(cpumask)) set_cpus_allowed_ptr(tsk, cpumask); + current->flags |= PF_KCOMPACTD; set_freezable(); pgdat->kcompactd_max_order = 0; @@ -3220,6 +3221,8 @@ static int kcompactd(void *p) pgdat->proactive_compact_trigger = false; } + current->flags &= ~PF_KCOMPACTD; + return 0; } diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c index d25d99cb5f2bb..d511be201c4c9 100644 --- a/mm/damon/ops-common.c +++ b/mm/damon/ops-common.c @@ -24,7 +24,7 @@ struct folio *damon_get_folio(unsigned long pfn) struct page *page = pfn_to_online_page(pfn); struct folio *folio; - if (!page || PageTail(page)) + if (!page) return NULL; folio = page_folio(page); diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index a9ff35341d65d..8813038abc6fb 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -264,11 +264,14 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s) damos_add_filter(s, filter); } - for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) { + addr = r->ar.start; + while (addr < r->ar.end) { struct folio *folio = damon_get_folio(PHYS_PFN(addr)); - if (!folio) + if (!folio) { + addr += PAGE_SIZE; continue; + } if (damos_pa_filter_out(s, folio)) goto put_folio; @@ -282,6 +285,7 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s) else list_add(&folio->lru, &folio_list); put_folio: + addr += folio_size(folio); folio_put(folio); } if (install_young_filter) @@ -296,11 +300,14 @@ static inline unsigned long damon_pa_mark_accessed_or_deactivate( { unsigned long addr, applied = 0; - for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) { + addr = r->ar.start; + while (addr < r->ar.end) { struct folio *folio = damon_get_folio(PHYS_PFN(addr)); - if (!folio) + if (!folio) { + addr += PAGE_SIZE; continue; + } if (damos_pa_filter_out(s, folio)) goto put_folio; @@ -311,6 +318,7 @@ static inline unsigned long damon_pa_mark_accessed_or_deactivate( folio_deactivate(folio); applied += folio_nr_pages(folio); put_folio: + addr += folio_size(folio); folio_put(folio); } return applied * PAGE_SIZE; @@ -454,11 +462,14 @@ static unsigned long damon_pa_migrate(struct damon_region *r, struct damos *s) unsigned long addr, applied; LIST_HEAD(folio_list); - for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) { + addr = r->ar.start; + while (addr < r->ar.end) { struct folio *folio = damon_get_folio(PHYS_PFN(addr)); - if (!folio) + if (!folio) { + addr += PAGE_SIZE; continue; + } if (damos_pa_filter_out(s, folio)) goto put_folio; @@ -467,6 +478,7 @@ static unsigned long damon_pa_migrate(struct damon_region *r, struct damos *s) goto put_folio; list_add(&folio->lru, &folio_list); put_folio: + addr += folio_size(folio); folio_put(folio); } applied = damon_pa_migrate_pages(&folio_list, s->target_nid); diff --git a/mm/filemap.c b/mm/filemap.c index 05adf0392625d..fa18e71f9c889 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1966,8 +1966,19 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, if (err == -EEXIST) goto repeat; - if (err) + if (err) { + /* + * When NOWAIT I/O fails to allocate folios this could + * be due to a nonblocking memory allocation and not + * because the system actually is out of memory. + * Return -EAGAIN so that there caller retries in a + * blocking fashion instead of propagating -ENOMEM + * to the application. + */ + if ((fgp_flags & FGP_NOWAIT) && err == -ENOMEM) + err = -EAGAIN; return ERR_PTR(err); + } /* * filemap_add_folio locks the page, and for mmap * we expect an unlocked page. @@ -2211,6 +2222,7 @@ unsigned filemap_get_folios_contig(struct address_space *mapping, *start = folio->index + nr; goto out; } + xas_advance(&xas, folio_next_index(folio) - 1); continue; put_folio: folio_put(folio); diff --git a/mm/gup.c b/mm/gup.c index 44c536904a83b..90866b827b60f 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1283,6 +1283,9 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma)) return -EOPNOTSUPP; + if ((gup_flags & FOLL_SPLIT_PMD) && is_vm_hugetlb_page(vma)) + return -EOPNOTSUPP; + if (vma_is_secretmem(vma)) return -EFAULT; @@ -2210,8 +2213,8 @@ size_t fault_in_safe_writeable(const char __user *uaddr, size_t size) } while (start != end); mmap_read_unlock(mm); - if (size > (unsigned long)uaddr - start) - return size - ((unsigned long)uaddr - start); + if (size > start - (unsigned long)uaddr) + return size - (start - (unsigned long)uaddr); return 0; } EXPORT_SYMBOL(fault_in_safe_writeable); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index f127b61f04a82..40ac11e294231 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3224,7 +3224,7 @@ static void __split_huge_page(struct page *page, struct list_head *list, folio_account_cleaned(tail, inode_to_wb(folio->mapping->host)); __filemap_remove_folio(tail, NULL); - folio_put(tail); + folio_put_refs(tail, folio_nr_pages(tail)); } else if (!PageAnon(page)) { __xa_store(&folio->mapping->i_pages, head[i].index, head + i, 0); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index bdee6d3ab0e7e..ad646fe6688a4 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2955,6 +2955,14 @@ int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list) return ret; } +void wait_for_freed_hugetlb_folios(void) +{ + if (llist_empty(&hpage_freelist)) + return; + + flush_work(&free_hpage_work); +} + struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve) { @@ -4855,7 +4863,7 @@ static struct ctl_table hugetlb_table[] = { }, }; -static void hugetlb_sysctl_init(void) +static void __init hugetlb_sysctl_init(void) { register_sysctl_init("vm", hugetlb_table); } @@ -5395,7 +5403,7 @@ static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr, if (src_ptl != dst_ptl) spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); - pte = huge_ptep_get_and_clear(mm, old_addr, src_pte); + pte = huge_ptep_get_and_clear(mm, old_addr, src_pte, sz); if (need_clear_uffd_wp && pte_marker_uffd_wp(pte)) huge_pte_clear(mm, new_addr, dst_pte, sz); @@ -5570,7 +5578,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED); } - pte = huge_ptep_get_and_clear(mm, address, ptep); + pte = huge_ptep_get_and_clear(mm, address, ptep, sz); tlb_remove_huge_tlb_entry(h, tlb, ptep, address); if (huge_pte_dirty(pte)) set_page_dirty(page); diff --git a/mm/internal.h b/mm/internal.h index 9bb098e78f155..398633d6b6c9f 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -1101,7 +1101,7 @@ static inline int find_next_best_node(int node, nodemask_t *used_node_mask) * mm/memory-failure.c */ #ifdef CONFIG_MEMORY_FAILURE -void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu); +int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill); void shake_folio(struct folio *folio); extern int hwpoison_filter(struct page *p); @@ -1123,8 +1123,9 @@ void add_to_kill_ksm(struct task_struct *tsk, struct page *p, unsigned long page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); #else -static inline void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu) +static inline int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill) { + return -EBUSY; } #endif diff --git a/mm/kasan/kasan_test_rust.rs b/mm/kasan/kasan_test_rust.rs index caa7175964ef6..5b34edf30e724 100644 --- a/mm/kasan/kasan_test_rust.rs +++ b/mm/kasan/kasan_test_rust.rs @@ -11,11 +11,12 @@ use kernel::prelude::*; /// drop the vector, and touch it. #[no_mangle] pub extern "C" fn kasan_test_rust_uaf() -> u8 { - let mut v: Vec = Vec::new(); + let mut v: KVec = KVec::new(); for _ in 0..4096 { v.push(0x42, GFP_KERNEL).unwrap(); } let ptr: *mut u8 = addr_of_mut!(v[2048]); drop(v); + // SAFETY: Incorrect, on purpose. unsafe { *ptr } } diff --git a/mm/kmsan/hooks.c b/mm/kmsan/hooks.c index 3ea50f09311fd..3df45c25c1f62 100644 --- a/mm/kmsan/hooks.c +++ b/mm/kmsan/hooks.c @@ -357,6 +357,7 @@ void kmsan_handle_dma(struct page *page, size_t offset, size_t size, size -= to_go; } } +EXPORT_SYMBOL_GPL(kmsan_handle_dma); void kmsan_handle_dma_sg(struct scatterlist *sg, int nents, enum dma_data_direction dir) diff --git a/mm/madvise.c b/mm/madvise.c index ff139e57cca29..c211e8fa4e49b 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -920,7 +920,16 @@ static long madvise_dontneed_free(struct vm_area_struct *vma, */ end = vma->vm_end; } - VM_WARN_ON(start >= end); + /* + * If the memory region between start and end was + * originally backed by 4kB pages and then remapped to + * be backed by hugepages while mmap_lock was dropped, + * the adjustment for hugetlb vma above may have rounded + * end down to the start address. + */ + if (start == end) + return 0; + VM_WARN_ON(start > end); } if (behavior == MADV_DONTNEED || behavior == MADV_DONTNEED_LOCKED) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index ae1d184d035a4..2d1e402f06f22 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1882,9 +1882,18 @@ void drain_all_stock(struct mem_cgroup *root_memcg) static int memcg_hotplug_cpu_dead(unsigned int cpu) { struct memcg_stock_pcp *stock; + struct obj_cgroup *old; + unsigned long flags; stock = &per_cpu(memcg_stock, cpu); + + /* drain_obj_stock requires stock_lock */ + local_lock_irqsave(&memcg_stock.stock_lock, flags); + old = drain_obj_stock(stock); + local_unlock_irqrestore(&memcg_stock.stock_lock, flags); + drain_stock(stock); + obj_cgroup_put(old); return 0; } diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 96ce31e5a203b..ec1c71abe88df 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -879,12 +879,17 @@ static int kill_accessing_process(struct task_struct *p, unsigned long pfn, mmap_read_lock(p->mm); ret = walk_page_range(p->mm, 0, TASK_SIZE, &hwpoison_walk_ops, (void *)&priv); + /* + * ret = 1 when CMCI wins, regardless of whether try_to_unmap() + * succeeds or fails, then kill the process with SIGBUS. + * ret = 0 when poison page is a clean page and it's dropped, no + * SIGBUS is needed. + */ if (ret == 1 && priv.tk.addr) kill_proc(&priv.tk, pfn, flags); - else - ret = 0; mmap_read_unlock(p->mm); - return ret > 0 ? -EHWPOISON : -EFAULT; + + return ret > 0 ? -EHWPOISON : 0; } /* @@ -1554,11 +1559,35 @@ static int get_hwpoison_page(struct page *p, unsigned long flags) return ret; } -void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu) +int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill) { - if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) { - struct address_space *mapping; + enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC | TTU_HWPOISON; + struct address_space *mapping; + + if (folio_test_swapcache(folio)) { + pr_err("%#lx: keeping poisoned page in swap cache\n", pfn); + ttu &= ~TTU_HWPOISON; + } + + /* + * Propagate the dirty bit from PTEs to struct page first, because we + * need this to decide if we should kill or just drop the page. + * XXX: the dirty test could be racy: set_page_dirty() may not always + * be called inside page lock (it's recommended but not enforced). + */ + mapping = folio_mapping(folio); + if (!must_kill && !folio_test_dirty(folio) && mapping && + mapping_can_writeback(mapping)) { + if (folio_mkclean(folio)) { + folio_set_dirty(folio); + } else { + ttu &= ~TTU_HWPOISON; + pr_info("%#lx: corrupted page was clean: dropped without side effects\n", + pfn); + } + } + if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) { /* * For hugetlb folios in shared mappings, try_to_unmap * could potentially call huge_pmd_unshare. Because of @@ -1570,7 +1599,7 @@ void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu) if (!mapping) { pr_info("%#lx: could not lock mapping for mapped hugetlb folio\n", folio_pfn(folio)); - return; + return -EBUSY; } try_to_unmap(folio, ttu|TTU_RMAP_LOCKED); @@ -1578,6 +1607,8 @@ void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu) } else { try_to_unmap(folio, ttu); } + + return folio_mapped(folio) ? -EBUSY : 0; } /* @@ -1587,8 +1618,6 @@ void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu) static bool hwpoison_user_mappings(struct folio *folio, struct page *p, unsigned long pfn, int flags) { - enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC | TTU_HWPOISON; - struct address_space *mapping; LIST_HEAD(tokill); bool unmap_success; int forcekill; @@ -1611,29 +1640,6 @@ static bool hwpoison_user_mappings(struct folio *folio, struct page *p, if (!folio_mapped(folio)) return true; - if (folio_test_swapcache(folio)) { - pr_err("%#lx: keeping poisoned page in swap cache\n", pfn); - ttu &= ~TTU_HWPOISON; - } - - /* - * Propagate the dirty bit from PTEs to struct page first, because we - * need this to decide if we should kill or just drop the page. - * XXX: the dirty test could be racy: set_page_dirty() may not always - * be called inside page lock (it's recommended but not enforced). - */ - mapping = folio_mapping(folio); - if (!(flags & MF_MUST_KILL) && !folio_test_dirty(folio) && mapping && - mapping_can_writeback(mapping)) { - if (folio_mkclean(folio)) { - folio_set_dirty(folio); - } else { - ttu &= ~TTU_HWPOISON; - pr_info("%#lx: corrupted page was clean: dropped without side effects\n", - pfn); - } - } - /* * First collect all the processes that have the page * mapped in dirty form. This has to be done before try_to_unmap, @@ -1641,9 +1647,7 @@ static bool hwpoison_user_mappings(struct folio *folio, struct page *p, */ collect_procs(folio, p, &tokill, flags & MF_ACTION_REQUIRED); - unmap_poisoned_folio(folio, ttu); - - unmap_success = !folio_mapped(folio); + unmap_success = !unmap_poisoned_folio(folio, pfn, flags & MF_MUST_KILL); if (!unmap_success) pr_err("%#lx: failed to unmap page (folio mapcount=%d)\n", pfn, folio_mapcount(folio)); diff --git a/mm/memory.c b/mm/memory.c index d322ddfe67916..b6daa0e673a54 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1356,12 +1356,12 @@ int copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) { pgd_t *src_pgd, *dst_pgd; - unsigned long next; unsigned long addr = src_vma->vm_start; unsigned long end = src_vma->vm_end; struct mm_struct *dst_mm = dst_vma->vm_mm; struct mm_struct *src_mm = src_vma->vm_mm; struct mmu_notifier_range range; + unsigned long next, pfn; bool is_cow; int ret; @@ -1372,11 +1372,7 @@ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) return copy_hugetlb_page_range(dst_mm, src_mm, dst_vma, src_vma); if (unlikely(src_vma->vm_flags & VM_PFNMAP)) { - /* - * We do not free on error cases below as remove_vma - * gets called on error from higher level routine - */ - ret = track_pfn_copy(src_vma); + ret = track_pfn_copy(dst_vma, src_vma, &pfn); if (ret) return ret; } @@ -1413,7 +1409,6 @@ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) continue; if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd, addr, next))) { - untrack_pfn_clear(dst_vma); ret = -ENOMEM; break; } @@ -1423,6 +1418,8 @@ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) raw_write_seqcount_end(&src_mm->write_protect_seq); mmu_notifier_invalidate_range_end(&range); } + if (ret && unlikely(src_vma->vm_flags & VM_PFNMAP)) + untrack_pfn_copy(dst_vma, pfn); return ret; } @@ -2814,11 +2811,11 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, if (fn) { do { if (create || !pte_none(ptep_get(pte))) { - err = fn(pte++, addr, data); + err = fn(pte, addr, data); if (err) break; } - } while (addr += PAGE_SIZE, addr != end); + } while (pte++, addr += PAGE_SIZE, addr != end); } *mask |= PGTBL_PTE_MODIFIED; @@ -2957,8 +2954,10 @@ static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr, next = pgd_addr_end(addr, end); if (pgd_none(*pgd) && !create) continue; - if (WARN_ON_ONCE(pgd_leaf(*pgd))) - return -EINVAL; + if (WARN_ON_ONCE(pgd_leaf(*pgd))) { + err = -EINVAL; + break; + } if (!pgd_none(*pgd) && WARN_ON_ONCE(pgd_bad(*pgd))) { if (!create) continue; @@ -5077,7 +5076,11 @@ vm_fault_t finish_fault(struct vm_fault *vmf) bool is_cow = (vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED); int type, nr_pages; - unsigned long addr = vmf->address; + unsigned long addr; + bool needs_fallback = false; + +fallback: + addr = vmf->address; /* Did we COW the page? */ if (is_cow) @@ -5116,7 +5119,8 @@ vm_fault_t finish_fault(struct vm_fault *vmf) * approach also applies to non-anonymous-shmem faults to avoid * inflating the RSS of the process. */ - if (!vma_is_anon_shmem(vma) || unlikely(userfaultfd_armed(vma))) { + if (!vma_is_anon_shmem(vma) || unlikely(userfaultfd_armed(vma)) || + unlikely(needs_fallback)) { nr_pages = 1; } else if (nr_pages > 1) { pgoff_t idx = folio_page_idx(folio, page); @@ -5152,9 +5156,9 @@ vm_fault_t finish_fault(struct vm_fault *vmf) ret = VM_FAULT_NOPAGE; goto unlock; } else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) { - update_mmu_tlb_range(vma, addr, vmf->pte, nr_pages); - ret = VM_FAULT_NOPAGE; - goto unlock; + needs_fallback = true; + pte_unmap_unlock(vmf->pte, vmf->ptl); + goto fallback; } folio_ref_add(folio, nr_pages - 1); @@ -6711,10 +6715,8 @@ void __might_fault(const char *file, int line) if (pagefault_disabled()) return; __might_sleep(file, line); -#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) if (current->mm) might_lock_read(¤t->mm->mmap_lock); -#endif } EXPORT_SYMBOL(__might_fault); #endif diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 621ae1015106c..0a42e9a8caba2 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1795,27 +1795,24 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) if (folio_test_large(folio)) pfn = folio_pfn(folio) + folio_nr_pages(folio) - 1; - /* - * HWPoison pages have elevated reference counts so the migration would - * fail on them. It also doesn't make any sense to migrate them in the - * first place. Still try to unmap such a page in case it is still mapped - * (keep the unmap as the catch all safety net). - */ - if (folio_test_hwpoison(folio) || - (folio_test_large(folio) && folio_test_has_hwpoisoned(folio))) { - if (WARN_ON(folio_test_lru(folio))) - folio_isolate_lru(folio); - if (folio_mapped(folio)) - unmap_poisoned_folio(folio, TTU_IGNORE_MLOCK); - continue; - } - if (!folio_try_get(folio)) continue; if (unlikely(page_folio(page) != folio)) goto put_folio; + if (folio_contain_hwpoisoned_page(folio)) { + if (WARN_ON(folio_test_lru(folio))) + folio_isolate_lru(folio); + if (folio_mapped(folio)) { + folio_lock(folio); + unmap_poisoned_folio(folio, pfn, false); + folio_unlock(folio); + } + + goto put_folio; + } + if (!isolate_folio_to_list(folio, &source)) { if (__ratelimit(&migrate_rs)) { pr_warn("failed to isolate pfn %lx\n", diff --git a/mm/migrate.c b/mm/migrate.c index dfa24e41e8f95..25e7438af968a 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -526,15 +526,13 @@ static int __folio_migrate_mapping(struct address_space *mapping, if (folio_test_anon(folio) && folio_test_large(folio)) mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1); folio_ref_add(newfolio, nr); /* add cache reference */ - if (folio_test_swapbacked(folio)) { + if (folio_test_swapbacked(folio)) __folio_set_swapbacked(newfolio); - if (folio_test_swapcache(folio)) { - folio_set_swapcache(newfolio); - newfolio->private = folio_get_private(folio); - } + if (folio_test_swapcache(folio)) { + folio_set_swapcache(newfolio); + newfolio->private = folio_get_private(folio); entries = nr; } else { - VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio); entries = 1; } diff --git a/mm/migrate_device.c b/mm/migrate_device.c index 9cf26592ac934..5bd888223cc8b 100644 --- a/mm/migrate_device.c +++ b/mm/migrate_device.c @@ -840,20 +840,15 @@ void migrate_device_finalize(unsigned long *src_pfns, dst = src; } + if (!folio_is_zone_device(dst)) + folio_add_lru(dst); remove_migration_ptes(src, dst, 0); folio_unlock(src); - - if (folio_is_zone_device(src)) - folio_put(src); - else - folio_putback_lru(src); + folio_put(src); if (dst != src) { folio_unlock(dst); - if (folio_is_zone_device(dst)) - folio_put(dst); - else - folio_putback_lru(dst); + folio_put(dst); } } } diff --git a/mm/mremap.c b/mm/mremap.c index 1b2edd65c2a17..12af89b4342a7 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -696,8 +696,8 @@ static unsigned long move_vma(struct vm_area_struct *vma, unsigned long vm_flags = vma->vm_flags; unsigned long new_pgoff; unsigned long moved_len; - unsigned long account_start = 0; - unsigned long account_end = 0; + bool account_start = false; + bool account_end = false; unsigned long hiwater_vm; int err = 0; bool need_rmap_locks; @@ -781,9 +781,9 @@ static unsigned long move_vma(struct vm_area_struct *vma, if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) { vm_flags_clear(vma, VM_ACCOUNT); if (vma->vm_start < old_addr) - account_start = vma->vm_start; + account_start = true; if (vma->vm_end > old_addr + old_len) - account_end = vma->vm_end; + account_end = true; } /* @@ -823,7 +823,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, /* OOM: unable to split vma, just get accounts right */ if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) vm_acct_memory(old_len >> PAGE_SHIFT); - account_start = account_end = 0; + account_start = account_end = false; } if (vm_flags & VM_LOCKED) { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index de65e8b4f75f2..fd4e0e1cd65e4 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4243,6 +4243,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, restart: compaction_retries = 0; no_progress_loops = 0; + compact_result = COMPACT_SKIPPED; compact_priority = DEF_COMPACT_PRIORITY; cpuset_mems_cookie = read_mems_allowed_begin(); zonelist_iter_cookie = zonelist_iter_begin(); @@ -5991,11 +5992,10 @@ static void setup_per_zone_lowmem_reserve(void) for (j = i + 1; j < MAX_NR_ZONES; j++) { struct zone *upper_zone = &pgdat->node_zones[j]; - bool empty = !zone_managed_pages(upper_zone); managed_pages += zone_managed_pages(upper_zone); - if (clear || empty) + if (clear) zone->lowmem_reserve[j] = 0; else zone->lowmem_reserve[j] = managed_pages / ratio; @@ -7094,7 +7094,7 @@ static inline bool has_unaccepted_memory(void) static bool cond_accept_memory(struct zone *zone, unsigned int order) { - long to_accept; + long to_accept, wmark; bool ret = false; if (!has_unaccepted_memory()) @@ -7103,8 +7103,18 @@ static bool cond_accept_memory(struct zone *zone, unsigned int order) if (list_empty(&zone->unaccepted_pages)) return false; + wmark = promo_wmark_pages(zone); + + /* + * Watermarks have not been initialized yet. + * + * Accepting one MAX_ORDER page to ensure progress. + */ + if (!wmark) + return try_to_accept_memory_one(zone); + /* How much to accept to get to promo watermark? */ - to_accept = promo_wmark_pages(zone) - + to_accept = wmark - (zone_page_state(zone, NR_FREE_PAGES) - __zone_watermark_unusable_free(zone, order, 0) - zone_page_state(zone, NR_UNACCEPTED)); diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 7e04047977cfe..6989c5ffd4741 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -611,6 +611,16 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, struct zone *zone; int ret; + /* + * Due to the deferred freeing of hugetlb folios, the hugepage folios may + * not immediately release to the buddy system. This can cause PageBuddy() + * to fail in __test_page_isolated_in_pageblock(). To ensure that the + * hugetlb folios are properly released back to the buddy system, we + * invoke the wait_for_freed_hugetlb_folios() function to wait for the + * release to complete. + */ + wait_for_freed_hugetlb_folios(); + /* * Note: pageblock_nr_pages != MAX_PAGE_ORDER. Then, chunks of free * pages are not aligned to pageblock_nr_pages. diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c index ae5cc42aa2087..585a53f7b06f0 100644 --- a/mm/page_vma_mapped.c +++ b/mm/page_vma_mapped.c @@ -77,6 +77,7 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp) * mapped at the @pvmw->pte * @pvmw: page_vma_mapped_walk struct, includes a pair pte and pfn range * for checking + * @pte_nr: the number of small pages described by @pvmw->pte. * * page_vma_mapped_walk() found a place where pfn range is *potentially* * mapped. check_pte() has to validate this. @@ -93,7 +94,7 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp) * Otherwise, return false. * */ -static bool check_pte(struct page_vma_mapped_walk *pvmw) +static bool check_pte(struct page_vma_mapped_walk *pvmw, unsigned long pte_nr) { unsigned long pfn; pte_t ptent = ptep_get(pvmw->pte); @@ -126,7 +127,11 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw) pfn = pte_pfn(ptent); } - return (pfn - pvmw->pfn) < pvmw->nr_pages; + if ((pfn + pte_nr - 1) < pvmw->pfn) + return false; + if (pfn > (pvmw->pfn + pvmw->nr_pages - 1)) + return false; + return true; } /* Returns true if the two ranges overlap. Careful to not overflow. */ @@ -201,7 +206,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw) return false; pvmw->ptl = huge_pte_lock(hstate, mm, pvmw->pte); - if (!check_pte(pvmw)) + if (!check_pte(pvmw, pages_per_huge_page(hstate))) return not_found(pvmw); return true; } @@ -284,7 +289,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw) goto next_pte; } this_pte: - if (check_pte(pvmw)) + if (check_pte(pvmw, 1)) return true; next_pte: do { diff --git a/mm/rmap.c b/mm/rmap.c index 73d5998677d40..674362de029d2 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -2488,7 +2488,7 @@ static bool folio_make_device_exclusive(struct folio *folio, * Restrict to anonymous folios for now to avoid potential writeback * issues. */ - if (!folio_test_anon(folio)) + if (!folio_test_anon(folio) || folio_test_hugetlb(folio)) return false; rmap_walk(folio, &rwc); diff --git a/mm/shmem.c b/mm/shmem.c index 5960e5035f983..88fd6e2a2dcf8 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -3042,8 +3042,7 @@ shmem_write_begin(struct file *file, struct address_space *mapping, if (ret) return ret; - if (folio_test_hwpoison(folio) || - (folio_test_large(folio) && folio_test_has_hwpoisoned(folio))) { + if (folio_contain_hwpoisoned_page(folio)) { folio_unlock(folio); folio_put(folio); return -EIO; diff --git a/mm/slub.c b/mm/slub.c index b9447a955f611..c26d9cd107ccb 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1960,6 +1960,11 @@ static inline void handle_failed_objexts_alloc(unsigned long obj_exts, #define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | \ __GFP_ACCOUNT | __GFP_NOFAIL) +static inline void init_slab_obj_exts(struct slab *slab) +{ + slab->obj_exts = 0; +} + int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, gfp_t gfp, bool new_slab) { @@ -2044,6 +2049,10 @@ static inline bool need_slab_obj_ext(void) #else /* CONFIG_SLAB_OBJ_EXT */ +static inline void init_slab_obj_exts(struct slab *slab) +{ +} + static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, gfp_t gfp, bool new_slab) { @@ -2613,6 +2622,7 @@ static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) slab->objects = oo_objects(oo); slab->inuse = 0; slab->frozen = 0; + init_slab_obj_exts(slab); account_slab(slab, oo_order(oo), s, flags); diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index ce13c40626472..748b52ce85675 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -18,6 +18,7 @@ #include #include #include "internal.h" +#include "swap.h" static __always_inline bool validate_dst_vma(struct vm_area_struct *dst_vma, unsigned long dst_end) @@ -1067,15 +1068,13 @@ static int move_present_pte(struct mm_struct *mm, return err; } -static int move_swap_pte(struct mm_struct *mm, +static int move_swap_pte(struct mm_struct *mm, struct vm_area_struct *dst_vma, unsigned long dst_addr, unsigned long src_addr, pte_t *dst_pte, pte_t *src_pte, pte_t orig_dst_pte, pte_t orig_src_pte, - spinlock_t *dst_ptl, spinlock_t *src_ptl) + spinlock_t *dst_ptl, spinlock_t *src_ptl, + struct folio *src_folio) { - if (!pte_swp_exclusive(orig_src_pte)) - return -EBUSY; - double_pt_lock(dst_ptl, src_ptl); if (!pte_same(ptep_get(src_pte), orig_src_pte) || @@ -1084,6 +1083,16 @@ static int move_swap_pte(struct mm_struct *mm, return -EAGAIN; } + /* + * The src_folio resides in the swapcache, requiring an update to its + * index and mapping to align with the dst_vma, where a swap-in may + * occur and hit the swapcache after moving the PTE. + */ + if (src_folio) { + folio_move_anon_rmap(src_folio, dst_vma); + src_folio->index = linear_page_index(dst_vma, dst_addr); + } + orig_src_pte = ptep_get_and_clear(mm, src_addr, src_pte); set_pte_at(mm, dst_addr, dst_pte, orig_src_pte); double_pt_unlock(dst_ptl, src_ptl); @@ -1130,6 +1139,7 @@ static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, __u64 mode) { swp_entry_t entry; + struct swap_info_struct *si = NULL; pte_t orig_src_pte, orig_dst_pte; pte_t src_folio_pte; spinlock_t *src_ptl, *dst_ptl; @@ -1215,6 +1225,7 @@ static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, */ if (!src_folio) { struct folio *folio; + bool locked; /* * Pin the page while holding the lock to be sure the @@ -1234,14 +1245,28 @@ static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, goto out; } + locked = folio_trylock(folio); + /* + * We avoid waiting for folio lock with a raised + * refcount for large folios because extra refcounts + * will result in split_folio() failing later and + * retrying. If multiple tasks are trying to move a + * large folio we can end up livelocking. + */ + if (!locked && folio_test_large(folio)) { + spin_unlock(src_ptl); + err = -EAGAIN; + goto out; + } + folio_get(folio); src_folio = folio; src_folio_pte = orig_src_pte; spin_unlock(src_ptl); - if (!folio_trylock(src_folio)) { - pte_unmap(&orig_src_pte); - pte_unmap(&orig_dst_pte); + if (!locked) { + pte_unmap(src_pte); + pte_unmap(dst_pte); src_pte = dst_pte = NULL; /* now we can block and wait */ folio_lock(src_folio); @@ -1257,8 +1282,8 @@ static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, /* at this point we have src_folio locked */ if (folio_test_large(src_folio)) { /* split_folio() can block */ - pte_unmap(&orig_src_pte); - pte_unmap(&orig_dst_pte); + pte_unmap(src_pte); + pte_unmap(dst_pte); src_pte = dst_pte = NULL; err = split_folio(src_folio); if (err) @@ -1283,8 +1308,8 @@ static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, goto out; } if (!anon_vma_trylock_write(src_anon_vma)) { - pte_unmap(&orig_src_pte); - pte_unmap(&orig_dst_pte); + pte_unmap(src_pte); + pte_unmap(dst_pte); src_pte = dst_pte = NULL; /* now we can block and wait */ anon_vma_lock_write(src_anon_vma); @@ -1297,11 +1322,13 @@ static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, orig_dst_pte, orig_src_pte, dst_ptl, src_ptl, src_folio); } else { + struct folio *folio = NULL; + entry = pte_to_swp_entry(orig_src_pte); if (non_swap_entry(entry)) { if (is_migration_entry(entry)) { - pte_unmap(&orig_src_pte); - pte_unmap(&orig_dst_pte); + pte_unmap(src_pte); + pte_unmap(dst_pte); src_pte = dst_pte = NULL; migration_entry_wait(mm, src_pmd, src_addr); err = -EAGAIN; @@ -1310,10 +1337,53 @@ static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, goto out; } - err = move_swap_pte(mm, dst_addr, src_addr, - dst_pte, src_pte, - orig_dst_pte, orig_src_pte, - dst_ptl, src_ptl); + if (!pte_swp_exclusive(orig_src_pte)) { + err = -EBUSY; + goto out; + } + + si = get_swap_device(entry); + if (unlikely(!si)) { + err = -EAGAIN; + goto out; + } + /* + * Verify the existence of the swapcache. If present, the folio's + * index and mapping must be updated even when the PTE is a swap + * entry. The anon_vma lock is not taken during this process since + * the folio has already been unmapped, and the swap entry is + * exclusive, preventing rmap walks. + * + * For large folios, return -EBUSY immediately, as split_folio() + * also returns -EBUSY when attempting to split unmapped large + * folios in the swapcache. This issue needs to be resolved + * separately to allow proper handling. + */ + if (!src_folio) + folio = filemap_get_folio(swap_address_space(entry), + swap_cache_index(entry)); + if (!IS_ERR_OR_NULL(folio)) { + if (folio_test_large(folio)) { + err = -EBUSY; + folio_put(folio); + goto out; + } + src_folio = folio; + src_folio_pte = orig_src_pte; + if (!folio_trylock(src_folio)) { + pte_unmap(src_pte); + pte_unmap(dst_pte); + src_pte = dst_pte = NULL; + put_swap_device(si); + si = NULL; + /* now we can block and wait */ + folio_lock(src_folio); + goto retry; + } + } + err = move_swap_pte(mm, dst_vma, dst_addr, src_addr, dst_pte, src_pte, + orig_dst_pte, orig_src_pte, + dst_ptl, src_ptl, src_folio); } out: @@ -1330,6 +1400,8 @@ static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, if (src_pte) pte_unmap(src_pte); mmu_notifier_invalidate_range_end(&range); + if (si) + put_swap_device(si); return err; } @@ -1801,6 +1873,14 @@ struct vm_area_struct *userfaultfd_clear_vma(struct vma_iterator *vmi, unsigned long end) { struct vm_area_struct *ret; + bool give_up_on_oom = false; + + /* + * If we are modifying only and not splitting, just give up on the merge + * if OOM prevents us from merging successfully. + */ + if (start == vma->vm_start && end == vma->vm_end) + give_up_on_oom = true; /* Reset ptes for the whole vma range if wr-protected */ if (userfaultfd_wp(vma)) @@ -1808,7 +1888,7 @@ struct vm_area_struct *userfaultfd_clear_vma(struct vma_iterator *vmi, ret = vma_modify_flags_uffd(vmi, prev, vma, start, end, vma->vm_flags & ~__VM_UFFD_FLAGS, - NULL_VM_UFFD_CTX); + NULL_VM_UFFD_CTX, give_up_on_oom); /* * In the vma_merge() successful mprotect-like case 8: @@ -1859,7 +1939,8 @@ int userfaultfd_register_range(struct userfaultfd_ctx *ctx, new_flags = (vma->vm_flags & ~__VM_UFFD_FLAGS) | vm_flags; vma = vma_modify_flags_uffd(&vmi, prev, vma, start, vma_end, new_flags, - (struct vm_userfaultfd_ctx){ctx}); + (struct vm_userfaultfd_ctx){ctx}, + /* give_up_on_oom = */false); if (IS_ERR(vma)) return PTR_ERR(vma); diff --git a/mm/vma.c b/mm/vma.c index 7621384d64cf5..9b4517944901d 100644 --- a/mm/vma.c +++ b/mm/vma.c @@ -846,7 +846,13 @@ static struct vm_area_struct *vma_merge_existing_range(struct vma_merge_struct * if (anon_dup) unlink_anon_vmas(anon_dup); - vmg->state = VMA_MERGE_ERROR_NOMEM; + /* + * We've cleaned up any cloned anon_vma's, no VMAs have been + * modified, no harm no foul if the user requests that we not + * report this and just give up, leaving the VMAs unmerged. + */ + if (!vmg->give_up_on_oom) + vmg->state = VMA_MERGE_ERROR_NOMEM; return NULL; } @@ -859,7 +865,15 @@ static struct vm_area_struct *vma_merge_existing_range(struct vma_merge_struct * abort: vma_iter_set(vmg->vmi, start); vma_iter_load(vmg->vmi); - vmg->state = VMA_MERGE_ERROR_NOMEM; + + /* + * This means we have failed to clone anon_vma's correctly, but no + * actual changes to VMAs have occurred, so no harm no foul - if the + * user doesn't want this reported and instead just wants to give up on + * the merge, allow it. + */ + if (!vmg->give_up_on_oom) + vmg->state = VMA_MERGE_ERROR_NOMEM; return NULL; } @@ -1033,9 +1047,15 @@ int vma_expand(struct vma_merge_struct *vmg) return 0; nomem: - vmg->state = VMA_MERGE_ERROR_NOMEM; if (anon_dup) unlink_anon_vmas(anon_dup); + /* + * If the user requests that we just give upon OOM, we are safe to do so + * here, as commit merge provides this contract to us. Nothing has been + * changed - no harm no foul, just don't report it. + */ + if (!vmg->give_up_on_oom) + vmg->state = VMA_MERGE_ERROR_NOMEM; return -ENOMEM; } @@ -1417,24 +1437,35 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, static struct vm_area_struct *vma_modify(struct vma_merge_struct *vmg) { struct vm_area_struct *vma = vmg->vma; + unsigned long start = vmg->start; + unsigned long end = vmg->end; struct vm_area_struct *merged; /* First, try to merge. */ merged = vma_merge_existing_range(vmg); if (merged) return merged; + if (vmg_nomem(vmg)) + return ERR_PTR(-ENOMEM); + + /* + * Split can fail for reasons other than OOM, so if the user requests + * this it's probably a mistake. + */ + VM_WARN_ON(vmg->give_up_on_oom && + (vma->vm_start != start || vma->vm_end != end)); /* Split any preceding portion of the VMA. */ - if (vma->vm_start < vmg->start) { - int err = split_vma(vmg->vmi, vma, vmg->start, 1); + if (vma->vm_start < start) { + int err = split_vma(vmg->vmi, vma, start, 1); if (err) return ERR_PTR(err); } /* Split any trailing portion of the VMA. */ - if (vma->vm_end > vmg->end) { - int err = split_vma(vmg->vmi, vma, vmg->end, 0); + if (vma->vm_end > end) { + int err = split_vma(vmg->vmi, vma, end, 0); if (err) return ERR_PTR(err); @@ -1492,12 +1523,15 @@ struct vm_area_struct struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long new_flags, - struct vm_userfaultfd_ctx new_ctx) + struct vm_userfaultfd_ctx new_ctx, + bool give_up_on_oom) { VMG_VMA_STATE(vmg, vmi, prev, vma, start, end); vmg.flags = new_flags; vmg.uffd_ctx = new_ctx; + if (give_up_on_oom) + vmg.give_up_on_oom = true; return vma_modify(&vmg); } diff --git a/mm/vma.h b/mm/vma.h index d58068c0ff2ea..729fe3741e897 100644 --- a/mm/vma.h +++ b/mm/vma.h @@ -87,6 +87,12 @@ struct vma_merge_struct { struct anon_vma_name *anon_name; enum vma_merge_flags merge_flags; enum vma_merge_state state; + + /* + * If a merge is possible, but an OOM error occurs, give up and don't + * execute the merge, returning NULL. + */ + bool give_up_on_oom :1; }; static inline bool vmg_nomem(struct vma_merge_struct *vmg) @@ -303,7 +309,8 @@ struct vm_area_struct struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long new_flags, - struct vm_userfaultfd_ctx new_ctx); + struct vm_userfaultfd_ctx new_ctx, + bool give_up_on_oom); struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg); diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 3f9255dfacb0c..fd70a7cd1c8fa 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -586,13 +586,13 @@ static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end, mask |= PGTBL_PGD_MODIFIED; err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask); if (err) - return err; + break; } while (pgd++, addr = next, addr != end); if (mask & ARCH_PAGE_TABLE_SYNC_MASK) arch_sync_kernel_mappings(start, end); - return 0; + return err; } /* diff --git a/mm/vmscan.c b/mm/vmscan.c index 77d015d5db0c5..39b3c7f35ea85 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -7557,7 +7557,7 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) return NODE_RECLAIM_NOSCAN; ret = __node_reclaim(pgdat, gfp_mask, order); - clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags); + clear_bit_unlock(PGDAT_RECLAIM_LOCKED, &pgdat->flags); if (ret) count_vm_event(PGSCAN_ZONE_RECLAIM_SUCCESS); diff --git a/mm/zswap.c b/mm/zswap.c index 7fefb2eb3fcd8..00d51d0137574 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -876,18 +876,32 @@ static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node) { struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu); + struct acomp_req *req; + struct crypto_acomp *acomp; + u8 *buffer; + + if (IS_ERR_OR_NULL(acomp_ctx)) + return 0; mutex_lock(&acomp_ctx->mutex); - if (!IS_ERR_OR_NULL(acomp_ctx)) { - if (!IS_ERR_OR_NULL(acomp_ctx->req)) - acomp_request_free(acomp_ctx->req); - acomp_ctx->req = NULL; - if (!IS_ERR_OR_NULL(acomp_ctx->acomp)) - crypto_free_acomp(acomp_ctx->acomp); - kfree(acomp_ctx->buffer); - } + req = acomp_ctx->req; + acomp = acomp_ctx->acomp; + buffer = acomp_ctx->buffer; + acomp_ctx->req = NULL; + acomp_ctx->acomp = NULL; + acomp_ctx->buffer = NULL; mutex_unlock(&acomp_ctx->mutex); + /* + * Do the actual freeing after releasing the mutex to avoid subtle + * locking dependencies causing deadlocks. + */ + if (!IS_ERR_OR_NULL(req)) + acomp_request_free(req); + if (!IS_ERR_OR_NULL(acomp)) + crypto_free_acomp(acomp); + kfree(buffer); + return 0; } diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index e45187b882206..41be38264493d 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -131,7 +131,8 @@ int vlan_check_real_dev(struct net_device *real_dev, { const char *name = real_dev->name; - if (real_dev->features & NETIF_F_VLAN_CHALLENGED) { + if (real_dev->features & NETIF_F_VLAN_CHALLENGED || + real_dev->type != ARPHRD_ETHER) { pr_info("VLANs not supported on %s\n", name); NL_SET_ERR_MSG_MOD(extack, "VLANs not supported on device"); return -EOPNOTSUPP; diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 458040e8a0e0b..9184cf7eb1286 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -273,17 +273,6 @@ static int vlan_dev_open(struct net_device *dev) goto out; } - if (dev->flags & IFF_ALLMULTI) { - err = dev_set_allmulti(real_dev, 1); - if (err < 0) - goto del_unicast; - } - if (dev->flags & IFF_PROMISC) { - err = dev_set_promiscuity(real_dev, 1); - if (err < 0) - goto clear_allmulti; - } - ether_addr_copy(vlan->real_dev_addr, real_dev->dev_addr); if (vlan->flags & VLAN_FLAG_GVRP) @@ -297,12 +286,6 @@ static int vlan_dev_open(struct net_device *dev) netif_carrier_on(dev); return 0; -clear_allmulti: - if (dev->flags & IFF_ALLMULTI) - dev_set_allmulti(real_dev, -1); -del_unicast: - if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) - dev_uc_del(real_dev, dev->dev_addr); out: netif_carrier_off(dev); return err; @@ -315,10 +298,6 @@ static int vlan_dev_stop(struct net_device *dev) dev_mc_unsync(real_dev, dev); dev_uc_unsync(real_dev, dev); - if (dev->flags & IFF_ALLMULTI) - dev_set_allmulti(real_dev, -1); - if (dev->flags & IFF_PROMISC) - dev_set_promiscuity(real_dev, -1); if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) dev_uc_del(real_dev, dev->dev_addr); @@ -490,12 +469,10 @@ static void vlan_dev_change_rx_flags(struct net_device *dev, int change) { struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; - if (dev->flags & IFF_UP) { - if (change & IFF_ALLMULTI) - dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1); - if (change & IFF_PROMISC) - dev_set_promiscuity(real_dev, dev->flags & IFF_PROMISC ? 1 : -1); - } + if (change & IFF_ALLMULTI) + dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1); + if (change & IFF_PROMISC) + dev_set_promiscuity(real_dev, dev->flags & IFF_PROMISC ? 1 : -1); } static void vlan_dev_set_rx_mode(struct net_device *vlan_dev) diff --git a/net/atm/lec.c b/net/atm/lec.c index ffef658862db1..a948dd47c3f34 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c @@ -181,6 +181,7 @@ static void lec_send(struct atm_vcc *vcc, struct sk_buff *skb) { struct net_device *dev = skb->dev; + unsigned int len = skb->len; ATM_SKB(skb)->vcc = vcc; atm_account_tx(vcc, skb); @@ -191,7 +192,7 @@ lec_send(struct atm_vcc *vcc, struct sk_buff *skb) } dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; + dev->stats.tx_bytes += len; } static void lec_tx_timeout(struct net_device *dev, unsigned int txqueue) diff --git a/net/atm/mpc.c b/net/atm/mpc.c index 324e3ab96bb39..12da0269275c5 100644 --- a/net/atm/mpc.c +++ b/net/atm/mpc.c @@ -1314,6 +1314,8 @@ static void MPOA_cache_impos_rcvd(struct k_message *msg, holding_time = msg->content.eg_info.holding_time; dprintk("(%s) entry = %p, holding_time = %u\n", mpc->dev->name, entry, holding_time); + if (entry == NULL && !holding_time) + return; if (entry == NULL && holding_time) { entry = mpc->eg_ops->add_entry(msg, mpc); mpc->eg_ops->put(entry); diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 74b49c35ddc14..209180b4c2681 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -324,8 +324,7 @@ batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len, /* check if there is enough space for the optional TVLV */ next_buff_pos += ntohs(ogm_packet->tvlv_len); - return (next_buff_pos <= packet_len) && - (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES); + return next_buff_pos <= packet_len; } /* send a batman ogm to a given interface */ diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c index e503ee0d896bd..8f89ffe6020ce 100644 --- a/net/batman-adv/bat_v_ogm.c +++ b/net/batman-adv/bat_v_ogm.c @@ -839,8 +839,7 @@ batadv_v_ogm_aggr_packet(int buff_pos, int packet_len, /* check if there is enough space for the optional TVLV */ next_buff_pos += ntohs(ogm2_packet->tvlv_len); - return (next_buff_pos <= packet_len) && - (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES); + return next_buff_pos <= packet_len; } /** diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c index 50cfec8ccac4f..3c29778171c58 100644 --- a/net/bluetooth/6lowpan.c +++ b/net/bluetooth/6lowpan.c @@ -825,11 +825,16 @@ static struct sk_buff *chan_alloc_skb_cb(struct l2cap_chan *chan, unsigned long hdr_len, unsigned long len, int nb) { + struct sk_buff *skb; + /* Note that we must allocate using GFP_ATOMIC here as * this function is called originally from netdev hard xmit * function in atomic context. */ - return bt_skb_alloc(hdr_len + len, GFP_ATOMIC); + skb = bt_skb_alloc(hdr_len + len, GFP_ATOMIC); + if (!skb) + return ERR_PTR(-ENOMEM); + return skb; } static void chan_suspend_cb(struct l2cap_chan *chan) diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index b5553c08e7316..72439764186ed 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -57,6 +57,7 @@ DEFINE_RWLOCK(hci_dev_list_lock); /* HCI callback list */ LIST_HEAD(hci_cb_list); +DEFINE_MUTEX(hci_cb_list_lock); /* HCI ID Numbering */ static DEFINE_IDA(hci_index_ida); @@ -2992,7 +2993,9 @@ int hci_register_cb(struct hci_cb *cb) { BT_DBG("%p name %s", cb, cb->name); - list_add_tail_rcu(&cb->list, &hci_cb_list); + mutex_lock(&hci_cb_list_lock); + list_add_tail(&cb->list, &hci_cb_list); + mutex_unlock(&hci_cb_list_lock); return 0; } @@ -3002,8 +3005,9 @@ int hci_unregister_cb(struct hci_cb *cb) { BT_DBG("%p name %s", cb, cb->name); - list_del_rcu(&cb->list); - synchronize_rcu(); + mutex_lock(&hci_cb_list_lock); + list_del(&cb->list); + mutex_unlock(&hci_cb_list_lock); return 0; } diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 388d46c6a043d..96ad1b75d1c4d 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -3393,23 +3393,30 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data, hci_update_scan(hdev); } - params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); - if (params) { - switch (params->auto_connect) { - case HCI_AUTO_CONN_LINK_LOSS: - if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT) + /* Re-enable passive scanning if disconnected device is marked + * as auto-connectable. + */ + if (conn->type == LE_LINK) { + params = hci_conn_params_lookup(hdev, &conn->dst, + conn->dst_type); + if (params) { + switch (params->auto_connect) { + case HCI_AUTO_CONN_LINK_LOSS: + if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT) + break; + fallthrough; + + case HCI_AUTO_CONN_DIRECT: + case HCI_AUTO_CONN_ALWAYS: + hci_pend_le_list_del_init(params); + hci_pend_le_list_add(params, + &hdev->pend_le_conns); + hci_update_passive_scan(hdev); break; - fallthrough; - case HCI_AUTO_CONN_DIRECT: - case HCI_AUTO_CONN_ALWAYS: - hci_pend_le_list_del_init(params); - hci_pend_le_list_add(params, &hdev->pend_le_conns); - hci_update_passive_scan(hdev); - break; - - default: - break; + default: + break; + } } } @@ -6143,11 +6150,12 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, * event or send an immediate device found event if the data * should not be stored for later. */ - if (!ext_adv && !has_pending_adv_report(hdev)) { + if (!has_pending_adv_report(hdev)) { /* If the report will trigger a SCAN_REQ store it for * later merging. */ - if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) { + if (!ext_adv && (type == LE_ADV_IND || + type == LE_ADV_SCAN_IND)) { store_pending_adv_report(hdev, bdaddr, bdaddr_type, rssi, flags, data, len); return; diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c index 7b2b04d6b8563..cb4d47ae129e8 100644 --- a/net/bluetooth/hci_sync.c +++ b/net/bluetooth/hci_sync.c @@ -3720,6 +3720,9 @@ static int hci_read_local_name_sync(struct hci_dev *hdev) /* Read Voice Setting */ static int hci_read_voice_setting_sync(struct hci_dev *hdev) { + if (!read_voice_setting_capable(hdev)) + return 0; + return __hci_cmd_sync_status(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL, HCI_CMD_TIMEOUT); } @@ -4153,7 +4156,8 @@ static int hci_read_page_scan_type_sync(struct hci_dev *hdev) * support the Read Page Scan Type command. Check support for * this command in the bit mask of supported commands. */ - if (!(hdev->commands[13] & 0x01)) + if (!(hdev->commands[13] & 0x01) || + test_bit(HCI_QUIRK_BROKEN_READ_PAGE_SCAN_TYPE, &hdev->quirks)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_TYPE, diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c index bda2f2da7d731..644b606743e21 100644 --- a/net/bluetooth/iso.c +++ b/net/bluetooth/iso.c @@ -2137,11 +2137,6 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags) return HCI_LM_ACCEPT; } -static bool iso_match(struct hci_conn *hcon) -{ - return hcon->type == ISO_LINK || hcon->type == LE_LINK; -} - static void iso_connect_cfm(struct hci_conn *hcon, __u8 status) { if (hcon->type != ISO_LINK) { @@ -2323,7 +2318,6 @@ void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) static struct hci_cb iso_cb = { .name = "ISO", - .match = iso_match, .connect_cfm = iso_connect_cfm, .disconn_cfm = iso_disconn_cfm, }; diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 27b4c4a2ba1fd..a55388fbf07c8 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -119,7 +119,6 @@ static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, { struct l2cap_chan *c; - mutex_lock(&conn->chan_lock); c = __l2cap_get_chan_by_scid(conn, cid); if (c) { /* Only lock if chan reference is not 0 */ @@ -127,7 +126,6 @@ static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, if (c) l2cap_chan_lock(c); } - mutex_unlock(&conn->chan_lock); return c; } @@ -140,7 +138,6 @@ static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn, { struct l2cap_chan *c; - mutex_lock(&conn->chan_lock); c = __l2cap_get_chan_by_dcid(conn, cid); if (c) { /* Only lock if chan reference is not 0 */ @@ -148,7 +145,6 @@ static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn, if (c) l2cap_chan_lock(c); } - mutex_unlock(&conn->chan_lock); return c; } @@ -418,7 +414,7 @@ static void l2cap_chan_timeout(struct work_struct *work) if (!conn) return; - mutex_lock(&conn->chan_lock); + mutex_lock(&conn->lock); /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling * this work. No need to call l2cap_chan_hold(chan) here again. */ @@ -439,7 +435,7 @@ static void l2cap_chan_timeout(struct work_struct *work) l2cap_chan_unlock(chan); l2cap_chan_put(chan); - mutex_unlock(&conn->chan_lock); + mutex_unlock(&conn->lock); } struct l2cap_chan *l2cap_chan_create(void) @@ -636,14 +632,15 @@ void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) test_bit(FLAG_HOLD_HCI_CONN, &chan->flags)) hci_conn_hold(conn->hcon); - list_add(&chan->list, &conn->chan_l); + /* Append to the list since the order matters for ECRED */ + list_add_tail(&chan->list, &conn->chan_l); } void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) { - mutex_lock(&conn->chan_lock); + mutex_lock(&conn->lock); __l2cap_chan_add(conn, chan); - mutex_unlock(&conn->chan_lock); + mutex_unlock(&conn->lock); } void l2cap_chan_del(struct l2cap_chan *chan, int err) @@ -731,9 +728,9 @@ void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func, if (!conn) return; - mutex_lock(&conn->chan_lock); + mutex_lock(&conn->lock); __l2cap_chan_list(conn, func, data); - mutex_unlock(&conn->chan_lock); + mutex_unlock(&conn->lock); } EXPORT_SYMBOL_GPL(l2cap_chan_list); @@ -745,7 +742,7 @@ static void l2cap_conn_update_id_addr(struct work_struct *work) struct hci_conn *hcon = conn->hcon; struct l2cap_chan *chan; - mutex_lock(&conn->chan_lock); + mutex_lock(&conn->lock); list_for_each_entry(chan, &conn->chan_l, list) { l2cap_chan_lock(chan); @@ -754,7 +751,7 @@ static void l2cap_conn_update_id_addr(struct work_struct *work) l2cap_chan_unlock(chan); } - mutex_unlock(&conn->chan_lock); + mutex_unlock(&conn->lock); } static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan) @@ -948,6 +945,16 @@ static u8 l2cap_get_ident(struct l2cap_conn *conn) return id; } +static void l2cap_send_acl(struct l2cap_conn *conn, struct sk_buff *skb, + u8 flags) +{ + /* Check if the hcon still valid before attempting to send */ + if (hci_conn_valid(conn->hcon->hdev, conn->hcon)) + hci_send_acl(conn->hchan, skb, flags); + else + kfree_skb(skb); +} + static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data) { @@ -970,7 +977,7 @@ static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON; skb->priority = HCI_PRIO_MAX; - hci_send_acl(conn->hchan, skb, flags); + l2cap_send_acl(conn, skb, flags); } static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb) @@ -1497,8 +1504,6 @@ static void l2cap_conn_start(struct l2cap_conn *conn) BT_DBG("conn %p", conn); - mutex_lock(&conn->chan_lock); - list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) { l2cap_chan_lock(chan); @@ -1567,8 +1572,6 @@ static void l2cap_conn_start(struct l2cap_conn *conn) l2cap_chan_unlock(chan); } - - mutex_unlock(&conn->chan_lock); } static void l2cap_le_conn_ready(struct l2cap_conn *conn) @@ -1614,7 +1617,7 @@ static void l2cap_conn_ready(struct l2cap_conn *conn) if (hcon->type == ACL_LINK) l2cap_request_info(conn); - mutex_lock(&conn->chan_lock); + mutex_lock(&conn->lock); list_for_each_entry(chan, &conn->chan_l, list) { @@ -1632,7 +1635,7 @@ static void l2cap_conn_ready(struct l2cap_conn *conn) l2cap_chan_unlock(chan); } - mutex_unlock(&conn->chan_lock); + mutex_unlock(&conn->lock); if (hcon->type == LE_LINK) l2cap_le_conn_ready(conn); @@ -1647,14 +1650,10 @@ static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err) BT_DBG("conn %p", conn); - mutex_lock(&conn->chan_lock); - list_for_each_entry(chan, &conn->chan_l, list) { if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags)) l2cap_chan_set_err(chan, err); } - - mutex_unlock(&conn->chan_lock); } static void l2cap_info_timeout(struct work_struct *work) @@ -1665,7 +1664,9 @@ static void l2cap_info_timeout(struct work_struct *work) conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; conn->info_ident = 0; + mutex_lock(&conn->lock); l2cap_conn_start(conn); + mutex_unlock(&conn->lock); } /* @@ -1757,6 +1758,8 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err) BT_DBG("hcon %p conn %p, err %d", hcon, conn, err); + mutex_lock(&conn->lock); + kfree_skb(conn->rx_skb); skb_queue_purge(&conn->pending_rx); @@ -1775,8 +1778,6 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err) /* Force the connection to be immediately dropped */ hcon->disc_timeout = 0; - mutex_lock(&conn->chan_lock); - /* Kill channels */ list_for_each_entry_safe(chan, l, &conn->chan_l, list) { l2cap_chan_hold(chan); @@ -1790,15 +1791,14 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err) l2cap_chan_put(chan); } - mutex_unlock(&conn->chan_lock); - - hci_chan_del(conn->hchan); - if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) cancel_delayed_work_sync(&conn->info_timer); - hcon->l2cap_data = NULL; + hci_chan_del(conn->hchan); conn->hchan = NULL; + + hcon->l2cap_data = NULL; + mutex_unlock(&conn->lock); l2cap_conn_put(conn); } @@ -2916,8 +2916,6 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb) BT_DBG("conn %p", conn); - mutex_lock(&conn->chan_lock); - list_for_each_entry(chan, &conn->chan_l, list) { if (chan->chan_type != L2CAP_CHAN_RAW) continue; @@ -2932,8 +2930,6 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb) if (chan->ops->recv(chan, nskb)) kfree_skb(nskb); } - - mutex_unlock(&conn->chan_lock); } /* ---- L2CAP signalling commands ---- */ @@ -3776,7 +3772,11 @@ static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data) struct l2cap_ecred_conn_rsp *rsp_flex = container_of(&rsp->pdu.rsp, struct l2cap_ecred_conn_rsp, hdr); - if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags)) + /* Check if channel for outgoing connection or if it wasn't deferred + * since in those cases it must be skipped. + */ + if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags) || + !test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags)) return; /* Reset ident so only one response is sent */ @@ -3952,12 +3952,12 @@ static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, goto response; } - mutex_lock(&conn->chan_lock); l2cap_chan_lock(pchan); /* Check if the ACL is secure enough (if not SDP) */ if (psm != cpu_to_le16(L2CAP_PSM_SDP) && - !hci_conn_check_link_mode(conn->hcon)) { + (!hci_conn_check_link_mode(conn->hcon) || + !l2cap_check_enc_key_size(conn->hcon))) { conn->disc_reason = HCI_ERROR_AUTH_FAILURE; result = L2CAP_CR_SEC_BLOCK; goto response; @@ -4059,7 +4059,6 @@ static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, } l2cap_chan_unlock(pchan); - mutex_unlock(&conn->chan_lock); l2cap_chan_put(pchan); } @@ -4098,27 +4097,19 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn, BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status); - mutex_lock(&conn->chan_lock); - if (scid) { chan = __l2cap_get_chan_by_scid(conn, scid); - if (!chan) { - err = -EBADSLT; - goto unlock; - } + if (!chan) + return -EBADSLT; } else { chan = __l2cap_get_chan_by_ident(conn, cmd->ident); - if (!chan) { - err = -EBADSLT; - goto unlock; - } + if (!chan) + return -EBADSLT; } chan = l2cap_chan_hold_unless_zero(chan); - if (!chan) { - err = -EBADSLT; - goto unlock; - } + if (!chan) + return -EBADSLT; err = 0; @@ -4156,9 +4147,6 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn, l2cap_chan_unlock(chan); l2cap_chan_put(chan); -unlock: - mutex_unlock(&conn->chan_lock); - return err; } @@ -4446,11 +4434,7 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, chan->ops->set_shutdown(chan); - l2cap_chan_unlock(chan); - mutex_lock(&conn->chan_lock); - l2cap_chan_lock(chan); l2cap_chan_del(chan, ECONNRESET); - mutex_unlock(&conn->chan_lock); chan->ops->close(chan); @@ -4487,11 +4471,7 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, return 0; } - l2cap_chan_unlock(chan); - mutex_lock(&conn->chan_lock); - l2cap_chan_lock(chan); l2cap_chan_del(chan, 0); - mutex_unlock(&conn->chan_lock); chan->ops->close(chan); @@ -4689,13 +4669,9 @@ static int l2cap_le_connect_rsp(struct l2cap_conn *conn, BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x", dcid, mtu, mps, credits, result); - mutex_lock(&conn->chan_lock); - chan = __l2cap_get_chan_by_ident(conn, cmd->ident); - if (!chan) { - err = -EBADSLT; - goto unlock; - } + if (!chan) + return -EBADSLT; err = 0; @@ -4743,9 +4719,6 @@ static int l2cap_le_connect_rsp(struct l2cap_conn *conn, l2cap_chan_unlock(chan); -unlock: - mutex_unlock(&conn->chan_lock); - return err; } @@ -4857,7 +4830,6 @@ static int l2cap_le_connect_req(struct l2cap_conn *conn, goto response; } - mutex_lock(&conn->chan_lock); l2cap_chan_lock(pchan); if (!smp_sufficient_security(conn->hcon, pchan->sec_level, @@ -4923,7 +4895,6 @@ static int l2cap_le_connect_req(struct l2cap_conn *conn, response_unlock: l2cap_chan_unlock(pchan); - mutex_unlock(&conn->chan_lock); l2cap_chan_put(pchan); if (result == L2CAP_CR_PEND) @@ -5057,7 +5028,6 @@ static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn, goto response; } - mutex_lock(&conn->chan_lock); l2cap_chan_lock(pchan); if (!smp_sufficient_security(conn->hcon, pchan->sec_level, @@ -5132,7 +5102,6 @@ static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn, unlock: l2cap_chan_unlock(pchan); - mutex_unlock(&conn->chan_lock); l2cap_chan_put(pchan); response: @@ -5169,8 +5138,6 @@ static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn, BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits, result); - mutex_lock(&conn->chan_lock); - cmd_len -= sizeof(*rsp); list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) { @@ -5256,8 +5223,6 @@ static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn, l2cap_chan_unlock(chan); } - mutex_unlock(&conn->chan_lock); - return err; } @@ -5370,8 +5335,6 @@ static inline int l2cap_le_command_rej(struct l2cap_conn *conn, if (cmd_len < sizeof(*rej)) return -EPROTO; - mutex_lock(&conn->chan_lock); - chan = __l2cap_get_chan_by_ident(conn, cmd->ident); if (!chan) goto done; @@ -5386,7 +5349,6 @@ static inline int l2cap_le_command_rej(struct l2cap_conn *conn, l2cap_chan_put(chan); done: - mutex_unlock(&conn->chan_lock); return 0; } @@ -6841,8 +6803,12 @@ static void process_pending_rx(struct work_struct *work) BT_DBG(""); + mutex_lock(&conn->lock); + while ((skb = skb_dequeue(&conn->pending_rx))) l2cap_recv_frame(conn, skb); + + mutex_unlock(&conn->lock); } static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon) @@ -6881,7 +6847,7 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon) conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR; mutex_init(&conn->ident_lock); - mutex_init(&conn->chan_lock); + mutex_init(&conn->lock); INIT_LIST_HEAD(&conn->chan_l); INIT_LIST_HEAD(&conn->users); @@ -7072,7 +7038,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, } } - mutex_lock(&conn->chan_lock); + mutex_lock(&conn->lock); l2cap_chan_lock(chan); if (cid && __l2cap_get_chan_by_dcid(conn, cid)) { @@ -7113,7 +7079,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, chan_unlock: l2cap_chan_unlock(chan); - mutex_unlock(&conn->chan_lock); + mutex_unlock(&conn->lock); done: hci_dev_unlock(hdev); hci_dev_put(hdev); @@ -7217,11 +7183,6 @@ static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c, return NULL; } -static bool l2cap_match(struct hci_conn *hcon) -{ - return hcon->type == ACL_LINK || hcon->type == LE_LINK; -} - static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status) { struct hci_dev *hdev = hcon->hdev; @@ -7229,6 +7190,9 @@ static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status) struct l2cap_chan *pchan; u8 dst_type; + if (hcon->type != ACL_LINK && hcon->type != LE_LINK) + return; + BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status); if (status) { @@ -7293,6 +7257,9 @@ int l2cap_disconn_ind(struct hci_conn *hcon) static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason) { + if (hcon->type != ACL_LINK && hcon->type != LE_LINK) + return; + BT_DBG("hcon %p reason %d", hcon, reason); l2cap_conn_del(hcon, bt_to_errno(reason)); @@ -7325,7 +7292,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt); - mutex_lock(&conn->chan_lock); + mutex_lock(&conn->lock); list_for_each_entry(chan, &conn->chan_l, list) { l2cap_chan_lock(chan); @@ -7399,7 +7366,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) l2cap_chan_unlock(chan); } - mutex_unlock(&conn->chan_lock); + mutex_unlock(&conn->lock); } /* Append fragment into frame respecting the maximum len of rx_skb */ @@ -7466,19 +7433,45 @@ static void l2cap_recv_reset(struct l2cap_conn *conn) conn->rx_len = 0; } +struct l2cap_conn *l2cap_conn_hold_unless_zero(struct l2cap_conn *c) +{ + if (!c) + return NULL; + + BT_DBG("conn %p orig refcnt %u", c, kref_read(&c->ref)); + + if (!kref_get_unless_zero(&c->ref)) + return NULL; + + return c; +} + void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) { - struct l2cap_conn *conn = hcon->l2cap_data; + struct l2cap_conn *conn; int len; + /* Lock hdev to access l2cap_data to avoid race with l2cap_conn_del */ + hci_dev_lock(hcon->hdev); + + conn = hcon->l2cap_data; + if (!conn) conn = l2cap_conn_add(hcon); - if (!conn) - goto drop; + conn = l2cap_conn_hold_unless_zero(conn); + + hci_dev_unlock(hcon->hdev); + + if (!conn) { + kfree_skb(skb); + return; + } BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags); + mutex_lock(&conn->lock); + switch (flags) { case ACL_START: case ACL_START_NO_FLUSH: @@ -7503,7 +7496,7 @@ void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) if (len == skb->len) { /* Complete frame received */ l2cap_recv_frame(conn, skb); - return; + goto unlock; } BT_DBG("Start: total len %d, frag len %u", len, skb->len); @@ -7511,8 +7504,24 @@ void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) if (skb->len > len) { BT_ERR("Frame is too long (len %u, expected len %d)", skb->len, len); + /* PTS test cases L2CAP/COS/CED/BI-14-C and BI-15-C + * (Multiple Signaling Command in one PDU, Data + * Truncated, BR/EDR) send a C-frame to the IUT with + * PDU Length set to 8 and Channel ID set to the + * correct signaling channel for the logical link. + * The Information payload contains one L2CAP_ECHO_REQ + * packet with Data Length set to 0 with 0 octets of + * echo data and one invalid command packet due to + * data truncated in PDU but present in HCI packet. + * + * Shorter the socket buffer to the PDU length to + * allow to process valid commands from the PDU before + * setting the socket unreliable. + */ + skb->len = len; + l2cap_recv_frame(conn, skb); l2cap_conn_unreliable(conn, ECOMM); - goto drop; + goto unlock; } /* Append fragment into frame (with header) */ @@ -7567,11 +7576,13 @@ void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) drop: kfree_skb(skb); +unlock: + mutex_unlock(&conn->lock); + l2cap_conn_put(conn); } static struct hci_cb l2cap_cb = { .name = "L2CAP", - .match = l2cap_match, .connect_cfm = l2cap_connect_cfm, .disconn_cfm = l2cap_disconn_cfm, .security_cfm = l2cap_security_cfm, diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index 46ea0bee2259f..acd11b268b98a 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -1326,9 +1326,10 @@ static int l2cap_sock_shutdown(struct socket *sock, int how) /* prevent sk structure from being freed whilst unlocked */ sock_hold(sk); - chan = l2cap_pi(sk)->chan; /* prevent chan structure from being freed whilst unlocked */ - l2cap_chan_hold(chan); + chan = l2cap_chan_hold_unless_zero(l2cap_pi(sk)->chan); + if (!chan) + goto shutdown_already; BT_DBG("chan %p state %s", chan, state_to_string(chan->state)); @@ -1358,22 +1359,20 @@ static int l2cap_sock_shutdown(struct socket *sock, int how) release_sock(sk); l2cap_chan_lock(chan); - conn = chan->conn; - if (conn) - /* prevent conn structure from being freed */ - l2cap_conn_get(conn); + /* prevent conn structure from being freed */ + conn = l2cap_conn_hold_unless_zero(chan->conn); l2cap_chan_unlock(chan); if (conn) /* mutex lock must be taken before l2cap_chan_lock() */ - mutex_lock(&conn->chan_lock); + mutex_lock(&conn->lock); l2cap_chan_lock(chan); l2cap_chan_close(chan, 0); l2cap_chan_unlock(chan); if (conn) { - mutex_unlock(&conn->chan_lock); + mutex_unlock(&conn->lock); l2cap_conn_put(conn); } diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 90c21b3edcd80..c019f69c59395 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -9731,6 +9731,9 @@ void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn, sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) + eir_precalc_len(sizeof(conn->dev_class))); + if (!skb) + return; + ev = skb_put(skb, sizeof(*ev)); bacpy(&ev->addr.bdaddr, &conn->dst); ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type); @@ -10484,6 +10487,8 @@ void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND, sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0)); + if (!skb) + return; ev = skb_put(skb, sizeof(*ev)); bacpy(&ev->addr.bdaddr, bdaddr); diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index 4c56ca5a216c6..ad5177e3a69b7 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c @@ -2134,11 +2134,6 @@ static int rfcomm_run(void *unused) return 0; } -static bool rfcomm_match(struct hci_conn *hcon) -{ - return hcon->type == ACL_LINK; -} - static void rfcomm_security_cfm(struct hci_conn *conn, u8 status, u8 encrypt) { struct rfcomm_session *s; @@ -2185,7 +2180,6 @@ static void rfcomm_security_cfm(struct hci_conn *conn, u8 status, u8 encrypt) static struct hci_cb rfcomm_cb = { .name = "RFCOMM", - .match = rfcomm_match, .security_cfm = rfcomm_security_cfm }; diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 071c404c790af..b872a2ca3ff38 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -1355,13 +1355,11 @@ int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags) return lm; } -static bool sco_match(struct hci_conn *hcon) -{ - return hcon->type == SCO_LINK || hcon->type == ESCO_LINK; -} - static void sco_connect_cfm(struct hci_conn *hcon, __u8 status) { + if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK) + return; + BT_DBG("hcon %p bdaddr %pMR status %u", hcon, &hcon->dst, status); if (!status) { @@ -1376,6 +1374,9 @@ static void sco_connect_cfm(struct hci_conn *hcon, __u8 status) static void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason) { + if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK) + return; + BT_DBG("hcon %p reason %d", hcon, reason); sco_conn_del(hcon, bt_to_errno(reason)); @@ -1401,7 +1402,6 @@ void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb) static struct hci_cb sco_cb = { .name = "SCO", - .match = sco_match, .connect_cfm = sco_connect_cfm, .disconn_cfm = sco_disconn_cfm, }; diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 501ec4249fedc..8612023bec60d 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -660,12 +660,9 @@ static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size, void __user *data_in = u64_to_user_ptr(kattr->test.data_in); void *data; - if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom) + if (user_size < ETH_HLEN || user_size > PAGE_SIZE - headroom - tailroom) return ERR_PTR(-EINVAL); - if (user_size > size) - return ERR_PTR(-EMSGSIZE); - size = SKB_DATA_ALIGN(size); data = kzalloc(size + headroom + tailroom, GFP_USER); if (!data) diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c index 1d458e9da660c..17a5f5923d615 100644 --- a/net/bridge/br_netfilter_hooks.c +++ b/net/bridge/br_netfilter_hooks.c @@ -370,9 +370,9 @@ br_nf_ipv4_daddr_was_changed(const struct sk_buff *skb, */ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_buff *skb) { - struct net_device *dev = skb->dev, *br_indev; - struct iphdr *iph = ip_hdr(skb); struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); + struct net_device *dev = skb->dev, *br_indev; + const struct iphdr *iph = ip_hdr(skb); struct rtable *rt; int err; @@ -390,7 +390,9 @@ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_ } nf_bridge->in_prerouting = 0; if (br_nf_ipv4_daddr_was_changed(skb, nf_bridge)) { - if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) { + err = ip_route_input(skb, iph->daddr, iph->saddr, + ip4h_dscp(iph), dev); + if (err) { struct in_device *in_dev = __in_dev_get_rcu(dev); /* If err equals -EHOSTUNREACH the error is due to a diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index 89f51ea4cabec..f2efb58d152bc 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@ -715,8 +715,8 @@ static int br_vlan_add_existing(struct net_bridge *br, u16 flags, bool *changed, struct netlink_ext_ack *extack) { - bool would_change = __vlan_flags_would_change(vlan, flags); bool becomes_brentry = false; + bool would_change = false; int err; if (!br_vlan_is_brentry(vlan)) { @@ -725,6 +725,8 @@ static int br_vlan_add_existing(struct net_bridge *br, return -EINVAL; becomes_brentry = true; + } else { + would_change = __vlan_flags_would_change(vlan, flags); } /* Master VLANs that aren't brentries weren't notified before, diff --git a/net/can/af_can.c b/net/can/af_can.c index 01f3fbb3b67dc..65230e81fa08c 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c @@ -287,8 +287,8 @@ int can_send(struct sk_buff *skb, int loop) netif_rx(newskb); /* update statistics */ - pkg_stats->tx_frames++; - pkg_stats->tx_frames_delta++; + atomic_long_inc(&pkg_stats->tx_frames); + atomic_long_inc(&pkg_stats->tx_frames_delta); return 0; @@ -647,8 +647,8 @@ static void can_receive(struct sk_buff *skb, struct net_device *dev) int matches; /* update statistics */ - pkg_stats->rx_frames++; - pkg_stats->rx_frames_delta++; + atomic_long_inc(&pkg_stats->rx_frames); + atomic_long_inc(&pkg_stats->rx_frames_delta); /* create non-zero unique skb identifier together with *skb */ while (!(can_skb_prv(skb)->skbcnt)) @@ -669,8 +669,8 @@ static void can_receive(struct sk_buff *skb, struct net_device *dev) consume_skb(skb); if (matches > 0) { - pkg_stats->matches++; - pkg_stats->matches_delta++; + atomic_long_inc(&pkg_stats->matches); + atomic_long_inc(&pkg_stats->matches_delta); } } diff --git a/net/can/af_can.h b/net/can/af_can.h index 7c2d9161e2245..22f3352c77fec 100644 --- a/net/can/af_can.h +++ b/net/can/af_can.h @@ -66,9 +66,9 @@ struct receiver { struct can_pkg_stats { unsigned long jiffies_init; - unsigned long rx_frames; - unsigned long tx_frames; - unsigned long matches; + atomic_long_t rx_frames; + atomic_long_t tx_frames; + atomic_long_t matches; unsigned long total_rx_rate; unsigned long total_tx_rate; @@ -82,9 +82,9 @@ struct can_pkg_stats { unsigned long max_tx_rate; unsigned long max_rx_match_ratio; - unsigned long rx_frames_delta; - unsigned long tx_frames_delta; - unsigned long matches_delta; + atomic_long_t rx_frames_delta; + atomic_long_t tx_frames_delta; + atomic_long_t matches_delta; }; /* persistent statistics */ diff --git a/net/can/proc.c b/net/can/proc.c index bbce97825f13f..25fdf060e30d0 100644 --- a/net/can/proc.c +++ b/net/can/proc.c @@ -118,6 +118,13 @@ void can_stat_update(struct timer_list *t) struct can_pkg_stats *pkg_stats = net->can.pkg_stats; unsigned long j = jiffies; /* snapshot */ + long rx_frames = atomic_long_read(&pkg_stats->rx_frames); + long tx_frames = atomic_long_read(&pkg_stats->tx_frames); + long matches = atomic_long_read(&pkg_stats->matches); + long rx_frames_delta = atomic_long_read(&pkg_stats->rx_frames_delta); + long tx_frames_delta = atomic_long_read(&pkg_stats->tx_frames_delta); + long matches_delta = atomic_long_read(&pkg_stats->matches_delta); + /* restart counting in timer context on user request */ if (user_reset) can_init_stats(net); @@ -127,35 +134,33 @@ void can_stat_update(struct timer_list *t) can_init_stats(net); /* prevent overflow in calc_rate() */ - if (pkg_stats->rx_frames > (ULONG_MAX / HZ)) + if (rx_frames > (LONG_MAX / HZ)) can_init_stats(net); /* prevent overflow in calc_rate() */ - if (pkg_stats->tx_frames > (ULONG_MAX / HZ)) + if (tx_frames > (LONG_MAX / HZ)) can_init_stats(net); /* matches overflow - very improbable */ - if (pkg_stats->matches > (ULONG_MAX / 100)) + if (matches > (LONG_MAX / 100)) can_init_stats(net); /* calc total values */ - if (pkg_stats->rx_frames) - pkg_stats->total_rx_match_ratio = (pkg_stats->matches * 100) / - pkg_stats->rx_frames; + if (rx_frames) + pkg_stats->total_rx_match_ratio = (matches * 100) / rx_frames; pkg_stats->total_tx_rate = calc_rate(pkg_stats->jiffies_init, j, - pkg_stats->tx_frames); + tx_frames); pkg_stats->total_rx_rate = calc_rate(pkg_stats->jiffies_init, j, - pkg_stats->rx_frames); + rx_frames); /* calc current values */ - if (pkg_stats->rx_frames_delta) + if (rx_frames_delta) pkg_stats->current_rx_match_ratio = - (pkg_stats->matches_delta * 100) / - pkg_stats->rx_frames_delta; + (matches_delta * 100) / rx_frames_delta; - pkg_stats->current_tx_rate = calc_rate(0, HZ, pkg_stats->tx_frames_delta); - pkg_stats->current_rx_rate = calc_rate(0, HZ, pkg_stats->rx_frames_delta); + pkg_stats->current_tx_rate = calc_rate(0, HZ, tx_frames_delta); + pkg_stats->current_rx_rate = calc_rate(0, HZ, rx_frames_delta); /* check / update maximum values */ if (pkg_stats->max_tx_rate < pkg_stats->current_tx_rate) @@ -168,9 +173,9 @@ void can_stat_update(struct timer_list *t) pkg_stats->max_rx_match_ratio = pkg_stats->current_rx_match_ratio; /* clear values for 'current rate' calculation */ - pkg_stats->tx_frames_delta = 0; - pkg_stats->rx_frames_delta = 0; - pkg_stats->matches_delta = 0; + atomic_long_set(&pkg_stats->tx_frames_delta, 0); + atomic_long_set(&pkg_stats->rx_frames_delta, 0); + atomic_long_set(&pkg_stats->matches_delta, 0); /* restart timer (one second) */ mod_timer(&net->can.stattimer, round_jiffies(jiffies + HZ)); @@ -214,9 +219,12 @@ static int can_stats_proc_show(struct seq_file *m, void *v) struct can_rcv_lists_stats *rcv_lists_stats = net->can.rcv_lists_stats; seq_putc(m, '\n'); - seq_printf(m, " %8ld transmitted frames (TXF)\n", pkg_stats->tx_frames); - seq_printf(m, " %8ld received frames (RXF)\n", pkg_stats->rx_frames); - seq_printf(m, " %8ld matched frames (RXMF)\n", pkg_stats->matches); + seq_printf(m, " %8ld transmitted frames (TXF)\n", + atomic_long_read(&pkg_stats->tx_frames)); + seq_printf(m, " %8ld received frames (RXF)\n", + atomic_long_read(&pkg_stats->rx_frames)); + seq_printf(m, " %8ld matched frames (RXMF)\n", + atomic_long_read(&pkg_stats->matches)); seq_putc(m, '\n'); diff --git a/net/core/dev.c b/net/core/dev.c index 2e0fe38d0e877..7b7b36c43c82c 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1012,6 +1012,12 @@ int netdev_get_name(struct net *net, char *name, int ifindex) return ret; } +static bool dev_addr_cmp(struct net_device *dev, unsigned short type, + const char *ha) +{ + return dev->type == type && !memcmp(dev->dev_addr, ha, dev->addr_len); +} + /** * dev_getbyhwaddr_rcu - find a device by its hardware address * @net: the applicable net namespace @@ -1020,7 +1026,7 @@ int netdev_get_name(struct net *net, char *name, int ifindex) * * Search for an interface by MAC address. Returns NULL if the device * is not found or a pointer to the device. - * The caller must hold RCU or RTNL. + * The caller must hold RCU. * The returned device has not had its ref count increased * and the caller must therefore be careful about locking * @@ -1032,14 +1038,39 @@ struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, struct net_device *dev; for_each_netdev_rcu(net, dev) - if (dev->type == type && - !memcmp(dev->dev_addr, ha, dev->addr_len)) + if (dev_addr_cmp(dev, type, ha)) return dev; return NULL; } EXPORT_SYMBOL(dev_getbyhwaddr_rcu); +/** + * dev_getbyhwaddr() - find a device by its hardware address + * @net: the applicable net namespace + * @type: media type of device + * @ha: hardware address + * + * Similar to dev_getbyhwaddr_rcu(), but the owner needs to hold + * rtnl_lock. + * + * Context: rtnl_lock() must be held. + * Return: pointer to the net_device, or NULL if not found + */ +struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, + const char *ha) +{ + struct net_device *dev; + + ASSERT_RTNL(); + for_each_netdev(net, dev) + if (dev_addr_cmp(dev, type, ha)) + return dev; + + return NULL; +} +EXPORT_SYMBOL(dev_getbyhwaddr); + struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) { struct net_device *dev, *ret = NULL; @@ -3692,6 +3723,9 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device { netdev_features_t features; + if (!skb_frags_readable(skb)) + goto out_kfree_skb; + features = netif_skb_features(skb); skb = validate_xmit_vlan(skb, features); if (unlikely(!skb)) @@ -4577,7 +4611,7 @@ static inline void ____napi_schedule(struct softnet_data *sd, * we have to raise NET_RX_SOFTIRQ. */ if (!sd->in_net_rx_action) - __raise_softirq_irqoff(NET_RX_SOFTIRQ); + raise_softirq_irqoff(NET_RX_SOFTIRQ); } #ifdef CONFIG_RPS diff --git a/net/core/devmem.c b/net/core/devmem.c index 11b91c12ee113..17f8a83a5ee74 100644 --- a/net/core/devmem.c +++ b/net/core/devmem.c @@ -108,6 +108,7 @@ void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding) struct netdev_rx_queue *rxq; unsigned long xa_idx; unsigned int rxq_idx; + int err; if (binding->list.next) list_del(&binding->list); @@ -119,7 +120,8 @@ void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding) rxq_idx = get_netdev_rx_queue_index(rxq); - WARN_ON(netdev_rx_queue_restart(binding->dev, rxq_idx)); + err = netdev_rx_queue_restart(binding->dev, rxq_idx); + WARN_ON(err && err != -ENETDOWN); } xa_erase(&net_devmem_dmabuf_bindings, binding->id); diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index 6efd4cccc9ddd..212f0a048cab6 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -1734,30 +1734,30 @@ static int __init init_net_drop_monitor(void) return -ENOSPC; } - rc = genl_register_family(&net_drop_monitor_family); - if (rc) { - pr_err("Could not create drop monitor netlink family\n"); - return rc; + for_each_possible_cpu(cpu) { + net_dm_cpu_data_init(cpu); + net_dm_hw_cpu_data_init(cpu); } - WARN_ON(net_drop_monitor_family.mcgrp_offset != NET_DM_GRP_ALERT); rc = register_netdevice_notifier(&dropmon_net_notifier); if (rc < 0) { pr_crit("Failed to register netdevice notifier\n"); + return rc; + } + + rc = genl_register_family(&net_drop_monitor_family); + if (rc) { + pr_err("Could not create drop monitor netlink family\n"); goto out_unreg; } + WARN_ON(net_drop_monitor_family.mcgrp_offset != NET_DM_GRP_ALERT); rc = 0; - for_each_possible_cpu(cpu) { - net_dm_cpu_data_init(cpu); - net_dm_hw_cpu_data_init(cpu); - } - goto out; out_unreg: - genl_unregister_family(&net_drop_monitor_family); + WARN_ON(unregister_netdevice_notifier(&dropmon_net_notifier)); out: return rc; } @@ -1766,19 +1766,18 @@ static void exit_net_drop_monitor(void) { int cpu; - BUG_ON(unregister_netdevice_notifier(&dropmon_net_notifier)); - /* * Because of the module_get/put we do in the trace state change path * we are guaranteed not to have any current users when we get here */ + BUG_ON(genl_unregister_family(&net_drop_monitor_family)); + + BUG_ON(unregister_netdevice_notifier(&dropmon_net_notifier)); for_each_possible_cpu(cpu) { net_dm_hw_cpu_data_fini(cpu); net_dm_cpu_data_fini(cpu); } - - BUG_ON(genl_unregister_family(&net_drop_monitor_family)); } module_init(init_net_drop_monitor); diff --git a/net/core/dst.c b/net/core/dst.c index 9552a90d4772d..6d76b799ce645 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -165,6 +165,14 @@ static void dst_count_dec(struct dst_entry *dst) void dst_release(struct dst_entry *dst) { if (dst && rcuref_put(&dst->__rcuref)) { +#ifdef CONFIG_DST_CACHE + if (dst->flags & DST_METADATA) { + struct metadata_dst *md_dst = (struct metadata_dst *)dst; + + if (md_dst->type == METADATA_IP_TUNNEL) + dst_cache_reset_now(&md_dst->u.tun_info.dst_cache); + } +#endif dst_count_dec(dst); call_rcu_hurry(&dst->rcu_head, dst_destroy_rcu); } diff --git a/net/core/filter.c b/net/core/filter.c index a2f990bf51e5e..790345c2546b7 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -218,24 +218,36 @@ BPF_CALL_3(bpf_skb_get_nlattr_nest, struct sk_buff *, skb, u32, a, u32, x) return 0; } +static int bpf_skb_load_helper_convert_offset(const struct sk_buff *skb, int offset) +{ + if (likely(offset >= 0)) + return offset; + + if (offset >= SKF_NET_OFF) + return offset - SKF_NET_OFF + skb_network_offset(skb); + + if (offset >= SKF_LL_OFF && skb_mac_header_was_set(skb)) + return offset - SKF_LL_OFF + skb_mac_offset(skb); + + return INT_MIN; +} + BPF_CALL_4(bpf_skb_load_helper_8, const struct sk_buff *, skb, const void *, data, int, headlen, int, offset) { - u8 tmp, *ptr; + u8 tmp; const int len = sizeof(tmp); - if (offset >= 0) { - if (headlen - offset >= len) - return *(u8 *)(data + offset); - if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp))) - return tmp; - } else { - ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len); - if (likely(ptr)) - return *(u8 *)ptr; - } + offset = bpf_skb_load_helper_convert_offset(skb, offset); + if (offset == INT_MIN) + return -EFAULT; - return -EFAULT; + if (headlen - offset >= len) + return *(u8 *)(data + offset); + if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp))) + return tmp; + else + return -EFAULT; } BPF_CALL_2(bpf_skb_load_helper_8_no_cache, const struct sk_buff *, skb, @@ -248,21 +260,19 @@ BPF_CALL_2(bpf_skb_load_helper_8_no_cache, const struct sk_buff *, skb, BPF_CALL_4(bpf_skb_load_helper_16, const struct sk_buff *, skb, const void *, data, int, headlen, int, offset) { - __be16 tmp, *ptr; + __be16 tmp; const int len = sizeof(tmp); - if (offset >= 0) { - if (headlen - offset >= len) - return get_unaligned_be16(data + offset); - if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp))) - return be16_to_cpu(tmp); - } else { - ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len); - if (likely(ptr)) - return get_unaligned_be16(ptr); - } + offset = bpf_skb_load_helper_convert_offset(skb, offset); + if (offset == INT_MIN) + return -EFAULT; - return -EFAULT; + if (headlen - offset >= len) + return get_unaligned_be16(data + offset); + if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp))) + return be16_to_cpu(tmp); + else + return -EFAULT; } BPF_CALL_2(bpf_skb_load_helper_16_no_cache, const struct sk_buff *, skb, @@ -275,21 +285,19 @@ BPF_CALL_2(bpf_skb_load_helper_16_no_cache, const struct sk_buff *, skb, BPF_CALL_4(bpf_skb_load_helper_32, const struct sk_buff *, skb, const void *, data, int, headlen, int, offset) { - __be32 tmp, *ptr; + __be32 tmp; const int len = sizeof(tmp); - if (likely(offset >= 0)) { - if (headlen - offset >= len) - return get_unaligned_be32(data + offset); - if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp))) - return be32_to_cpu(tmp); - } else { - ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len); - if (likely(ptr)) - return get_unaligned_be32(ptr); - } + offset = bpf_skb_load_helper_convert_offset(skb, offset); + if (offset == INT_MIN) + return -EFAULT; - return -EFAULT; + if (headlen - offset >= len) + return get_unaligned_be32(data + offset); + if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp))) + return be32_to_cpu(tmp); + else + return -EFAULT; } BPF_CALL_2(bpf_skb_load_helper_32_no_cache, const struct sk_buff *, skb, diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 5db41bf2ed93e..9cd8de6bebb54 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -853,23 +853,30 @@ __skb_flow_dissect_ports(const struct sk_buff *skb, void *target_container, const void *data, int nhoff, u8 ip_proto, int hlen) { - enum flow_dissector_key_id dissector_ports = FLOW_DISSECTOR_KEY_MAX; - struct flow_dissector_key_ports *key_ports; + struct flow_dissector_key_ports_range *key_ports_range = NULL; + struct flow_dissector_key_ports *key_ports = NULL; + __be32 ports; if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS)) - dissector_ports = FLOW_DISSECTOR_KEY_PORTS; - else if (dissector_uses_key(flow_dissector, - FLOW_DISSECTOR_KEY_PORTS_RANGE)) - dissector_ports = FLOW_DISSECTOR_KEY_PORTS_RANGE; + key_ports = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_PORTS, + target_container); - if (dissector_ports == FLOW_DISSECTOR_KEY_MAX) + if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS_RANGE)) + key_ports_range = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_PORTS_RANGE, + target_container); + + if (!key_ports && !key_ports_range) return; - key_ports = skb_flow_dissector_target(flow_dissector, - dissector_ports, - target_container); - key_ports->ports = __skb_flow_get_ports(skb, nhoff, ip_proto, - data, hlen); + ports = __skb_flow_get_ports(skb, nhoff, ip_proto, data, hlen); + + if (key_ports) + key_ports->ports = ports; + + if (key_ports_range) + key_ports_range->tp.ports = ports; } static void @@ -924,6 +931,7 @@ static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys, struct flow_dissector *flow_dissector, void *target_container) { + struct flow_dissector_key_ports_range *key_ports_range = NULL; struct flow_dissector_key_ports *key_ports = NULL; struct flow_dissector_key_control *key_control; struct flow_dissector_key_basic *key_basic; @@ -968,20 +976,21 @@ static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys, key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; } - if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS)) + if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS)) { key_ports = skb_flow_dissector_target(flow_dissector, FLOW_DISSECTOR_KEY_PORTS, target_container); - else if (dissector_uses_key(flow_dissector, - FLOW_DISSECTOR_KEY_PORTS_RANGE)) - key_ports = skb_flow_dissector_target(flow_dissector, - FLOW_DISSECTOR_KEY_PORTS_RANGE, - target_container); - - if (key_ports) { key_ports->src = flow_keys->sport; key_ports->dst = flow_keys->dport; } + if (dissector_uses_key(flow_dissector, + FLOW_DISSECTOR_KEY_PORTS_RANGE)) { + key_ports_range = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_PORTS_RANGE, + target_container); + key_ports_range->tp.src = flow_keys->sport; + key_ports_range->tp.dst = flow_keys->dport; + } if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_FLOW_LABEL)) { diff --git a/net/core/gro.c b/net/core/gro.c index d1f44084e978f..0ad549b07e039 100644 --- a/net/core/gro.c +++ b/net/core/gro.c @@ -7,9 +7,6 @@ #define MAX_GRO_SKBS 8 -/* This should be increased if a protocol with a bigger head is added. */ -#define GRO_MAX_HEAD (MAX_HEADER + 128) - static DEFINE_SPINLOCK(offload_lock); /** @@ -656,6 +653,7 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) skb->pkt_type = PACKET_HOST; skb->encapsulation = 0; + skb->ip_summed = CHECKSUM_NONE; skb_shinfo(skb)->gso_type = 0; skb_shinfo(skb)->gso_size = 0; if (unlikely(skb->slow_gro)) { diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c index 711cd3b4347a7..4417a18b3e951 100644 --- a/net/core/lwtunnel.c +++ b/net/core/lwtunnel.c @@ -23,6 +23,8 @@ #include #include +#include "dev.h" + DEFINE_STATIC_KEY_FALSE(nf_hooks_lwtunnel_enabled); EXPORT_SYMBOL_GPL(nf_hooks_lwtunnel_enabled); @@ -325,13 +327,23 @@ EXPORT_SYMBOL_GPL(lwtunnel_cmp_encap); int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb) { - struct dst_entry *dst = skb_dst(skb); const struct lwtunnel_encap_ops *ops; struct lwtunnel_state *lwtstate; - int ret = -EINVAL; + struct dst_entry *dst; + int ret; + + if (dev_xmit_recursion()) { + net_crit_ratelimited("%s(): recursion limit reached on datapath\n", + __func__); + ret = -ENETDOWN; + goto drop; + } - if (!dst) + dst = skb_dst(skb); + if (!dst) { + ret = -EINVAL; goto drop; + } lwtstate = dst->lwtstate; if (lwtstate->type == LWTUNNEL_ENCAP_NONE || @@ -341,8 +353,11 @@ int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb) ret = -EOPNOTSUPP; rcu_read_lock(); ops = rcu_dereference(lwtun_encaps[lwtstate->type]); - if (likely(ops && ops->output)) + if (likely(ops && ops->output)) { + dev_xmit_recursion_inc(); ret = ops->output(net, sk, skb); + dev_xmit_recursion_dec(); + } rcu_read_unlock(); if (ret == -EOPNOTSUPP) @@ -359,13 +374,23 @@ EXPORT_SYMBOL_GPL(lwtunnel_output); int lwtunnel_xmit(struct sk_buff *skb) { - struct dst_entry *dst = skb_dst(skb); const struct lwtunnel_encap_ops *ops; struct lwtunnel_state *lwtstate; - int ret = -EINVAL; + struct dst_entry *dst; + int ret; + + if (dev_xmit_recursion()) { + net_crit_ratelimited("%s(): recursion limit reached on datapath\n", + __func__); + ret = -ENETDOWN; + goto drop; + } - if (!dst) + dst = skb_dst(skb); + if (!dst) { + ret = -EINVAL; goto drop; + } lwtstate = dst->lwtstate; @@ -376,8 +401,11 @@ int lwtunnel_xmit(struct sk_buff *skb) ret = -EOPNOTSUPP; rcu_read_lock(); ops = rcu_dereference(lwtun_encaps[lwtstate->type]); - if (likely(ops && ops->xmit)) + if (likely(ops && ops->xmit)) { + dev_xmit_recursion_inc(); ret = ops->xmit(skb); + dev_xmit_recursion_dec(); + } rcu_read_unlock(); if (ret == -EOPNOTSUPP) @@ -394,13 +422,23 @@ EXPORT_SYMBOL_GPL(lwtunnel_xmit); int lwtunnel_input(struct sk_buff *skb) { - struct dst_entry *dst = skb_dst(skb); const struct lwtunnel_encap_ops *ops; struct lwtunnel_state *lwtstate; - int ret = -EINVAL; + struct dst_entry *dst; + int ret; - if (!dst) + if (dev_xmit_recursion()) { + net_crit_ratelimited("%s(): recursion limit reached on datapath\n", + __func__); + ret = -ENETDOWN; goto drop; + } + + dst = skb_dst(skb); + if (!dst) { + ret = -EINVAL; + goto drop; + } lwtstate = dst->lwtstate; if (lwtstate->type == LWTUNNEL_ENCAP_NONE || @@ -410,8 +448,11 @@ int lwtunnel_input(struct sk_buff *skb) ret = -EOPNOTSUPP; rcu_read_lock(); ops = rcu_dereference(lwtun_encaps[lwtstate->type]); - if (likely(ops && ops->input)) + if (likely(ops && ops->input)) { + dev_xmit_recursion_inc(); ret = ops->input(skb); + dev_xmit_recursion_dec(); + } rcu_read_unlock(); if (ret == -EOPNOTSUPP) diff --git a/net/core/neighbour.c b/net/core/neighbour.c index c7f7ea61b524a..8082cc6be4fc1 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -2301,6 +2301,7 @@ static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = { static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = { [NDTPA_IFINDEX] = { .type = NLA_U32 }, [NDTPA_QUEUE_LEN] = { .type = NLA_U32 }, + [NDTPA_QUEUE_LENBYTES] = { .type = NLA_U32 }, [NDTPA_PROXY_QLEN] = { .type = NLA_U32 }, [NDTPA_APP_PROBES] = { .type = NLA_U32 }, [NDTPA_UCAST_PROBES] = { .type = NLA_U32 }, diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 45fb60bc48039..e95c2933756df 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -319,6 +319,7 @@ static int netpoll_owner_active(struct net_device *dev) static netdev_tx_t __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) { netdev_tx_t status = NETDEV_TX_BUSY; + netdev_tx_t ret = NET_XMIT_DROP; struct net_device *dev; unsigned long tries; /* It is up to the caller to keep npinfo alive. */ @@ -327,11 +328,12 @@ static netdev_tx_t __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) lockdep_assert_irqs_disabled(); dev = np->dev; + rcu_read_lock(); npinfo = rcu_dereference_bh(dev->npinfo); if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) { dev_kfree_skb_irq(skb); - return NET_XMIT_DROP; + goto out; } /* don't get messages out of order, and no recursion */ @@ -370,7 +372,10 @@ static netdev_tx_t __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) skb_queue_tail(&npinfo->txq, skb); schedule_delayed_work(&npinfo->tx_work,0); } - return NETDEV_TX_OK; + ret = NETDEV_TX_OK; +out: + rcu_read_unlock(); + return ret; } netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) diff --git a/net/core/page_pool.c b/net/core/page_pool.c index a813d30d21353..7b20f6fcb82c0 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -1066,7 +1066,13 @@ static void page_pool_release_retry(struct work_struct *wq) int inflight; inflight = page_pool_release(pool); - if (!inflight) + /* In rare cases, a driver bug may cause inflight to go negative. + * Don't reschedule release if inflight is 0 or negative. + * - If 0, the page_pool has been destroyed + * - if negative, we will never recover + * in both cases no reschedule is necessary. + */ + if (inflight <= 0) return; /* Periodic warning for page pools the user can't see */ diff --git a/net/core/page_pool_user.c b/net/core/page_pool_user.c index 48335766c1bfd..8d31c71bea1a3 100644 --- a/net/core/page_pool_user.c +++ b/net/core/page_pool_user.c @@ -353,7 +353,7 @@ void page_pool_unlist(struct page_pool *pool) int page_pool_check_memory_provider(struct net_device *dev, struct netdev_rx_queue *rxq) { - struct net_devmem_dmabuf_binding *binding = rxq->mp_params.mp_priv; + void *binding = rxq->mp_params.mp_priv; struct page_pool *pool; struct hlist_node *n; diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 2ba5cd965d3fa..4d0ee1c9002aa 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1005,6 +1005,9 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev, /* IFLA_VF_STATS_TX_DROPPED */ nla_total_size_64bit(sizeof(__u64))); } + if (dev->netdev_ops->ndo_get_vf_guid) + size += num_vfs * 2 * + nla_total_size(sizeof(struct ifla_vf_guid)); return size; } else return 0; diff --git a/net/core/scm.c b/net/core/scm.c index 4f6a14babe5ae..733c0cbd393d2 100644 --- a/net/core/scm.c +++ b/net/core/scm.c @@ -282,6 +282,16 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data) } EXPORT_SYMBOL(put_cmsg); +int put_cmsg_notrunc(struct msghdr *msg, int level, int type, int len, + void *data) +{ + /* Don't produce truncated CMSGs */ + if (!msg->msg_control || msg->msg_controllen < CMSG_LEN(len)) + return -ETOOSMALL; + + return put_cmsg(msg, level, type, len, data); +} + void put_cmsg_scm_timestamping64(struct msghdr *msg, struct scm_timestamping_internal *tss_internal) { struct scm_timestamping64 tss; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 74149dc4ee318..f220306731dac 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -69,6 +69,7 @@ #include #include #include +#include #include #include #include @@ -95,7 +96,9 @@ static struct kmem_cache *skbuff_ext_cache __ro_after_init; #endif -#define SKB_SMALL_HEAD_SIZE SKB_HEAD_ALIGN(MAX_TCP_HEADER) +#define GRO_MAX_HEAD_PAD (GRO_MAX_HEAD + NET_SKB_PAD + NET_IP_ALIGN) +#define SKB_SMALL_HEAD_SIZE SKB_HEAD_ALIGN(max(MAX_TCP_HEADER, \ + GRO_MAX_HEAD_PAD)) /* We want SKB_SMALL_HEAD_CACHE_SIZE to not be a power of two. * This should ensure that SKB_SMALL_HEAD_HEADROOM is a unique @@ -736,7 +739,7 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, /* If requested length is either too small or too big, * we use kmalloc() for skb->head allocation. */ - if (len <= SKB_WITH_OVERHEAD(1024) || + if (len <= SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE) || len > SKB_WITH_OVERHEAD(PAGE_SIZE) || (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); @@ -816,7 +819,8 @@ struct sk_buff *napi_alloc_skb(struct napi_struct *napi, unsigned int len) * When the small frag allocator is available, prefer it over kmalloc * for small fragments */ - if ((!NAPI_HAS_SMALL_PAGE_FRAG && len <= SKB_WITH_OVERHEAD(1024)) || + if ((!NAPI_HAS_SMALL_PAGE_FRAG && + len <= SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE)) || len > SKB_WITH_OVERHEAD(PAGE_SIZE) || (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX | SKB_ALLOC_NAPI, @@ -6123,11 +6127,11 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet) skb->offload_fwd_mark = 0; skb->offload_l3_fwd_mark = 0; #endif + ipvs_reset(skb); if (!xnet) return; - ipvs_reset(skb); skb->mark = 0; skb_clear_tstamp(skb); } diff --git a/net/core/skmsg.c b/net/core/skmsg.c index 8ad7e6755fd64..f76cbf49c68c8 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -548,6 +548,9 @@ static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb, return num_sge; } +#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER) + psock->ingress_bytes += len; +#endif copied = len; msg->sg.start = 0; msg->sg.size = copied; @@ -1143,6 +1146,10 @@ int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock) if (!ret) sk_psock_set_state(psock, SK_PSOCK_RX_STRP_ENABLED); + if (sk_is_tcp(sk)) { + psock->strp.cb.read_sock = tcp_bpf_strp_read_sock; + psock->copied_seq = tcp_sk(sk)->copied_seq; + } return ret; } diff --git a/net/core/sock.c b/net/core/sock.c index a83f64a1d96a2..0842dc9189bf8 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2107,6 +2107,8 @@ int sk_getsockopt(struct sock *sk, int level, int optname, */ static inline void sock_lock_init(struct sock *sk) { + sk_owner_clear(sk); + if (sk->sk_kern_sock) sock_lock_init_class_and_name( sk, @@ -2203,6 +2205,9 @@ static void sk_prot_free(struct proto *prot, struct sock *sk) cgroup_sk_free(&sk->sk_cgrp_data); mem_cgroup_sk_free(sk); security_sk_free(sk); + + sk_owner_put(sk); + if (slab != NULL) kmem_cache_free(slab, sk); else diff --git a/net/core/sock_map.c b/net/core/sock_map.c index f1b9b3958792c..82a14f131d00c 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -303,7 +303,10 @@ static int sock_map_link(struct bpf_map *map, struct sock *sk) write_lock_bh(&sk->sk_callback_lock); if (stream_parser && stream_verdict && !psock->saved_data_ready) { - ret = sk_psock_init_strp(sk, psock); + if (sk_is_tcp(sk)) + ret = sk_psock_init_strp(sk, psock); + else + ret = -EOPNOTSUPP; if (ret) { write_unlock_bh(&sk->sk_callback_lock); sk_psock_put(sk, psock); @@ -541,6 +544,9 @@ static bool sock_map_sk_state_allowed(const struct sock *sk) return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_LISTEN); if (sk_is_stream_unix(sk)) return (1 << sk->sk_state) & TCPF_ESTABLISHED; + if (sk_is_vsock(sk) && + (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) + return (1 << sk->sk_state) & TCPF_ESTABLISHED; return true; } diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 5dd54a8133980..47e2743ffe228 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -34,6 +34,7 @@ static int min_sndbuf = SOCK_MIN_SNDBUF; static int min_rcvbuf = SOCK_MIN_RCVBUF; static int max_skb_frags = MAX_SKB_FRAGS; static int min_mem_pcpu_rsv = SK_MEMORY_PCPU_RESERVE; +static int netdev_budget_usecs_min = 2 * USEC_PER_SEC / HZ; static int net_msg_warn; /* Unused, but still a sysctl */ @@ -580,7 +581,7 @@ static struct ctl_table net_core_table[] = { .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_minmax, - .extra1 = SYSCTL_ZERO, + .extra1 = &netdev_budget_usecs_min, }, { .procname = "fb_tunnels_only_for_init_net", diff --git a/net/devlink/core.c b/net/devlink/core.c index f49cd83f1955f..7203c39532fcc 100644 --- a/net/devlink/core.c +++ b/net/devlink/core.c @@ -117,7 +117,7 @@ static struct devlink_rel *devlink_rel_alloc(void) err = xa_alloc_cyclic(&devlink_rels, &rel->index, rel, xa_limit_32b, &next, GFP_KERNEL); - if (err) { + if (err < 0) { kfree(rel); return ERR_PTR(err); } diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 1664547deffd0..ac3a252969cb6 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -862,6 +862,16 @@ static void dsa_tree_teardown_lags(struct dsa_switch_tree *dst) kfree(dst->lags); } +static void dsa_tree_teardown_routing_table(struct dsa_switch_tree *dst) +{ + struct dsa_link *dl, *next; + + list_for_each_entry_safe(dl, next, &dst->rtable, list) { + list_del(&dl->list); + kfree(dl); + } +} + static int dsa_tree_setup(struct dsa_switch_tree *dst) { bool complete; @@ -879,7 +889,7 @@ static int dsa_tree_setup(struct dsa_switch_tree *dst) err = dsa_tree_setup_cpu_ports(dst); if (err) - return err; + goto teardown_rtable; err = dsa_tree_setup_switches(dst); if (err) @@ -911,14 +921,14 @@ static int dsa_tree_setup(struct dsa_switch_tree *dst) dsa_tree_teardown_switches(dst); teardown_cpu_ports: dsa_tree_teardown_cpu_ports(dst); +teardown_rtable: + dsa_tree_teardown_routing_table(dst); return err; } static void dsa_tree_teardown(struct dsa_switch_tree *dst) { - struct dsa_link *dl, *next; - if (!dst->setup) return; @@ -932,10 +942,7 @@ static void dsa_tree_teardown(struct dsa_switch_tree *dst) dsa_tree_teardown_cpu_ports(dst); - list_for_each_entry_safe(dl, next, &dst->rtable, list) { - list_del(&dl->list); - kfree(dl); - } + dsa_tree_teardown_routing_table(dst); pr_info("DSA: tree %d torn down\n", dst->index); @@ -1478,12 +1485,44 @@ static int dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd) static void dsa_switch_release_ports(struct dsa_switch *ds) { + struct dsa_mac_addr *a, *tmp; struct dsa_port *dp, *next; + struct dsa_vlan *v, *n; dsa_switch_for_each_port_safe(dp, next, ds) { - WARN_ON(!list_empty(&dp->fdbs)); - WARN_ON(!list_empty(&dp->mdbs)); - WARN_ON(!list_empty(&dp->vlans)); + /* These are either entries that upper layers lost track of + * (probably due to bugs), or installed through interfaces + * where one does not necessarily have to remove them, like + * ndo_dflt_fdb_add(). + */ + list_for_each_entry_safe(a, tmp, &dp->fdbs, list) { + dev_info(ds->dev, + "Cleaning up unicast address %pM vid %u from port %d\n", + a->addr, a->vid, dp->index); + list_del(&a->list); + kfree(a); + } + + list_for_each_entry_safe(a, tmp, &dp->mdbs, list) { + dev_info(ds->dev, + "Cleaning up multicast address %pM vid %u from port %d\n", + a->addr, a->vid, dp->index); + list_del(&a->list); + kfree(a); + } + + /* These are entries that upper layers have lost track of, + * probably due to bugs, but also due to dsa_port_do_vlan_del() + * having failed and the VLAN entry still lingering on. + */ + list_for_each_entry_safe(v, n, &dp->vlans, list) { + dev_info(ds->dev, + "Cleaning up vid %u from port %d\n", + v->vid, dp->index); + list_del(&v->list); + kfree(v); + } + list_del(&dp->list); kfree(dp); } diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c index 3ee53e28ec2e9..53e03fd8071b4 100644 --- a/net/dsa/tag_8021q.c +++ b/net/dsa/tag_8021q.c @@ -197,7 +197,7 @@ static int dsa_port_do_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid) err = ds->ops->tag_8021q_vlan_del(ds, port, vid); if (err) { - refcount_inc(&v->refcount); + refcount_set(&v->refcount, 1); return err; } diff --git a/net/ethtool/cabletest.c b/net/ethtool/cabletest.c index f22051f33868a..84096f6b0236e 100644 --- a/net/ethtool/cabletest.c +++ b/net/ethtool/cabletest.c @@ -72,8 +72,8 @@ int ethnl_act_cable_test(struct sk_buff *skb, struct genl_info *info) dev = req_info.dev; rtnl_lock(); - phydev = ethnl_req_get_phydev(&req_info, - tb[ETHTOOL_A_CABLE_TEST_HEADER], + phydev = ethnl_req_get_phydev(&req_info, tb, + ETHTOOL_A_CABLE_TEST_HEADER, info->extack); if (IS_ERR_OR_NULL(phydev)) { ret = -EOPNOTSUPP; @@ -339,8 +339,8 @@ int ethnl_act_cable_test_tdr(struct sk_buff *skb, struct genl_info *info) goto out_dev_put; rtnl_lock(); - phydev = ethnl_req_get_phydev(&req_info, - tb[ETHTOOL_A_CABLE_TEST_TDR_HEADER], + phydev = ethnl_req_get_phydev(&req_info, tb, + ETHTOOL_A_CABLE_TEST_TDR_HEADER, info->extack); if (IS_ERR_OR_NULL(phydev)) { ret = -EOPNOTSUPP; diff --git a/net/ethtool/cmis_cdb.c b/net/ethtool/cmis_cdb.c index 4d55811479520..8bf99295bfbe9 100644 --- a/net/ethtool/cmis_cdb.c +++ b/net/ethtool/cmis_cdb.c @@ -346,7 +346,7 @@ ethtool_cmis_module_poll(struct net_device *dev, struct netlink_ext_ack extack = {}; int err; - ethtool_cmis_page_init(&page_data, 0, offset, sizeof(rpl)); + ethtool_cmis_page_init(&page_data, 0, offset, sizeof(*rpl)); page_data.data = (u8 *)rpl; err = ops->get_module_eeprom_by_page(dev, &page_data, &extack); diff --git a/net/ethtool/linkstate.c b/net/ethtool/linkstate.c index 34d76e87847d0..05a5f72c99fab 100644 --- a/net/ethtool/linkstate.c +++ b/net/ethtool/linkstate.c @@ -3,6 +3,7 @@ #include "netlink.h" #include "common.h" #include +#include struct linkstate_req_info { struct ethnl_req_info base; @@ -26,9 +27,8 @@ const struct nla_policy ethnl_linkstate_get_policy[] = { NLA_POLICY_NESTED(ethnl_header_policy_stats), }; -static int linkstate_get_sqi(struct net_device *dev) +static int linkstate_get_sqi(struct phy_device *phydev) { - struct phy_device *phydev = dev->phydev; int ret; if (!phydev) @@ -46,9 +46,8 @@ static int linkstate_get_sqi(struct net_device *dev) return ret; } -static int linkstate_get_sqi_max(struct net_device *dev) +static int linkstate_get_sqi_max(struct phy_device *phydev) { - struct phy_device *phydev = dev->phydev; int ret; if (!phydev) @@ -100,19 +99,28 @@ static int linkstate_prepare_data(const struct ethnl_req_info *req_base, { struct linkstate_reply_data *data = LINKSTATE_REPDATA(reply_base); struct net_device *dev = reply_base->dev; + struct nlattr **tb = info->attrs; + struct phy_device *phydev; int ret; + phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_LINKSTATE_HEADER, + info->extack); + if (IS_ERR(phydev)) { + ret = PTR_ERR(phydev); + goto out; + } + ret = ethnl_ops_begin(dev); if (ret < 0) return ret; data->link = __ethtool_get_link(dev); - ret = linkstate_get_sqi(dev); + ret = linkstate_get_sqi(phydev); if (linkstate_sqi_critical_error(ret)) goto out; data->sqi = ret; - ret = linkstate_get_sqi_max(dev); + ret = linkstate_get_sqi_max(phydev); if (linkstate_sqi_critical_error(ret)) goto out; data->sqi_max = ret; @@ -127,9 +135,9 @@ static int linkstate_prepare_data(const struct ethnl_req_info *req_base, sizeof(data->link_stats) / 8); if (req_base->flags & ETHTOOL_FLAG_STATS) { - if (dev->phydev) - data->link_stats.link_down_events = - READ_ONCE(dev->phydev->link_down_events); + if (phydev) + phy_ethtool_get_link_ext_stats(phydev, + &data->link_stats); if (dev->ethtool_ops->get_link_ext_stats) dev->ethtool_ops->get_link_ext_stats(dev, diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c index 4d18dc29b3043..a52be67139d0a 100644 --- a/net/ethtool/netlink.c +++ b/net/ethtool/netlink.c @@ -210,7 +210,7 @@ int ethnl_parse_header_dev_get(struct ethnl_req_info *req_info, } struct phy_device *ethnl_req_get_phydev(const struct ethnl_req_info *req_info, - const struct nlattr *header, + struct nlattr **tb, unsigned int header, struct netlink_ext_ack *extack) { struct phy_device *phydev; @@ -224,8 +224,8 @@ struct phy_device *ethnl_req_get_phydev(const struct ethnl_req_info *req_info, return req_info->dev->phydev; phydev = phy_link_topo_get_phy(req_info->dev, req_info->phy_index); - if (!phydev) { - NL_SET_ERR_MSG_ATTR(extack, header, + if (!phydev && tb) { + NL_SET_ERR_MSG_ATTR(extack, tb[header], "no phy matching phyindex"); return ERR_PTR(-ENODEV); } @@ -490,7 +490,7 @@ static int ethnl_default_doit(struct sk_buff *skb, struct genl_info *info) ret = ops->prepare_data(req_info, reply_data, info); rtnl_unlock(); if (ret < 0) - goto err_cleanup; + goto err_dev; ret = ops->reply_size(req_info, reply_data); if (ret < 0) goto err_cleanup; @@ -548,7 +548,7 @@ static int ethnl_default_dump_one(struct sk_buff *skb, struct net_device *dev, ret = ctx->ops->prepare_data(ctx->req_info, ctx->reply_data, info); rtnl_unlock(); if (ret < 0) - goto out; + goto out_cancel; ret = ethnl_fill_reply_header(skb, dev, ctx->ops->hdr_attr); if (ret < 0) goto out; @@ -557,6 +557,7 @@ static int ethnl_default_dump_one(struct sk_buff *skb, struct net_device *dev, out: if (ctx->ops->cleanup_data) ctx->ops->cleanup_data(ctx->reply_data); +out_cancel: ctx->reply_data->dev = NULL; if (ret < 0) genlmsg_cancel(skb, ehdr); @@ -760,7 +761,7 @@ static void ethnl_default_notify(struct net_device *dev, unsigned int cmd, ethnl_init_reply_data(reply_data, ops, dev); ret = ops->prepare_data(req_info, reply_data, &info); if (ret < 0) - goto err_cleanup; + goto err_rep; ret = ops->reply_size(req_info, reply_data); if (ret < 0) goto err_cleanup; @@ -795,6 +796,7 @@ static void ethnl_default_notify(struct net_device *dev, unsigned int cmd, err_cleanup: if (ops->cleanup_data) ops->cleanup_data(reply_data); +err_rep: kfree(reply_data); kfree(req_info); return; diff --git a/net/ethtool/netlink.h b/net/ethtool/netlink.h index 203b08eb6c6f6..5e176938d6d22 100644 --- a/net/ethtool/netlink.h +++ b/net/ethtool/netlink.h @@ -275,7 +275,8 @@ static inline void ethnl_parse_header_dev_put(struct ethnl_req_info *req_info) * ethnl_req_get_phydev() - Gets the phy_device targeted by this request, * if any. Must be called under rntl_lock(). * @req_info: The ethnl request to get the phy from. - * @header: The netlink header, used for error reporting. + * @tb: The netlink attributes array, for error reporting. + * @header: The netlink header index, used for error reporting. * @extack: The netlink extended ACK, for error reporting. * * The caller must hold RTNL, until it's done interacting with the returned @@ -289,7 +290,7 @@ static inline void ethnl_parse_header_dev_put(struct ethnl_req_info *req_info) * is returned. */ struct phy_device *ethnl_req_get_phydev(const struct ethnl_req_info *req_info, - const struct nlattr *header, + struct nlattr **tb, unsigned int header, struct netlink_ext_ack *extack); /** diff --git a/net/ethtool/phy.c b/net/ethtool/phy.c index ed8f690f6bac8..e067cc234419d 100644 --- a/net/ethtool/phy.c +++ b/net/ethtool/phy.c @@ -125,7 +125,7 @@ static int ethnl_phy_parse_request(struct ethnl_req_info *req_base, struct phy_req_info *req_info = PHY_REQINFO(req_base); struct phy_device *phydev; - phydev = ethnl_req_get_phydev(req_base, tb[ETHTOOL_A_PHY_HEADER], + phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_PHY_HEADER, extack); if (!phydev) return 0; diff --git a/net/ethtool/plca.c b/net/ethtool/plca.c index d95d92f173a6d..e1f7820a6158f 100644 --- a/net/ethtool/plca.c +++ b/net/ethtool/plca.c @@ -62,7 +62,7 @@ static int plca_get_cfg_prepare_data(const struct ethnl_req_info *req_base, struct phy_device *phydev; int ret; - phydev = ethnl_req_get_phydev(req_base, tb[ETHTOOL_A_PLCA_HEADER], + phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_PLCA_HEADER, info->extack); // check that the PHY device is available and connected if (IS_ERR_OR_NULL(phydev)) { @@ -152,7 +152,7 @@ ethnl_set_plca(struct ethnl_req_info *req_info, struct genl_info *info) bool mod = false; int ret; - phydev = ethnl_req_get_phydev(req_info, tb[ETHTOOL_A_PLCA_HEADER], + phydev = ethnl_req_get_phydev(req_info, tb, ETHTOOL_A_PLCA_HEADER, info->extack); // check that the PHY device is available and connected if (IS_ERR_OR_NULL(phydev)) @@ -211,7 +211,7 @@ static int plca_get_status_prepare_data(const struct ethnl_req_info *req_base, struct phy_device *phydev; int ret; - phydev = ethnl_req_get_phydev(req_base, tb[ETHTOOL_A_PLCA_HEADER], + phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_PLCA_HEADER, info->extack); // check that the PHY device is available and connected if (IS_ERR_OR_NULL(phydev)) { diff --git a/net/ethtool/pse-pd.c b/net/ethtool/pse-pd.c index a0705edca22a1..71843de832cca 100644 --- a/net/ethtool/pse-pd.c +++ b/net/ethtool/pse-pd.c @@ -64,7 +64,7 @@ static int pse_prepare_data(const struct ethnl_req_info *req_base, if (ret < 0) return ret; - phydev = ethnl_req_get_phydev(req_base, tb[ETHTOOL_A_PSE_HEADER], + phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_PSE_HEADER, info->extack); if (IS_ERR(phydev)) return -ENODEV; @@ -261,7 +261,7 @@ ethnl_set_pse(struct ethnl_req_info *req_info, struct genl_info *info) struct phy_device *phydev; int ret; - phydev = ethnl_req_get_phydev(req_info, tb[ETHTOOL_A_PSE_HEADER], + phydev = ethnl_req_get_phydev(req_info, tb, ETHTOOL_A_PSE_HEADER, info->extack); ret = ethnl_set_pse_validate(phydev, info); if (ret) diff --git a/net/ethtool/stats.c b/net/ethtool/stats.c index 912f0c4fff2fb..273ae4ff343fe 100644 --- a/net/ethtool/stats.c +++ b/net/ethtool/stats.c @@ -1,5 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-only +#include +#include + #include "netlink.h" #include "common.h" #include "bitset.h" @@ -20,6 +23,7 @@ struct stats_reply_data { struct ethtool_eth_mac_stats mac_stats; struct ethtool_eth_ctrl_stats ctrl_stats; struct ethtool_rmon_stats rmon_stats; + struct ethtool_phy_stats phydev_stats; ); const struct ethtool_rmon_hist_range *rmon_ranges; }; @@ -120,8 +124,15 @@ static int stats_prepare_data(const struct ethnl_req_info *req_base, struct stats_reply_data *data = STATS_REPDATA(reply_base); enum ethtool_mac_stats_src src = req_info->src; struct net_device *dev = reply_base->dev; + struct nlattr **tb = info->attrs; + struct phy_device *phydev; int ret; + phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_STATS_HEADER, + info->extack); + if (IS_ERR(phydev)) + return PTR_ERR(phydev); + ret = ethnl_ops_begin(dev); if (ret < 0) return ret; @@ -145,6 +156,13 @@ static int stats_prepare_data(const struct ethnl_req_info *req_base, data->ctrl_stats.src = src; data->rmon_stats.src = src; + if (test_bit(ETHTOOL_STATS_ETH_PHY, req_info->stat_mask) && + src == ETHTOOL_MAC_STATS_SRC_AGGREGATE) { + if (phydev) + phy_ethtool_get_phy_stats(phydev, &data->phy_stats, + &data->phydev_stats); + } + if (test_bit(ETHTOOL_STATS_ETH_PHY, req_info->stat_mask) && dev->ethtool_ops->get_eth_phy_stats) dev->ethtool_ops->get_eth_phy_stats(dev, &data->phy_stats); diff --git a/net/ethtool/strset.c b/net/ethtool/strset.c index b3382b3cf325c..b9400d18f01d5 100644 --- a/net/ethtool/strset.c +++ b/net/ethtool/strset.c @@ -299,7 +299,7 @@ static int strset_prepare_data(const struct ethnl_req_info *req_base, return 0; } - phydev = ethnl_req_get_phydev(req_base, tb[ETHTOOL_A_HEADER_FLAGS], + phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_HEADER_FLAGS, info->extack); /* phydev can be NULL, check for errors only */ diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 59ffaa89d7b05..8fb48f42581ce 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -1077,7 +1077,7 @@ static int arp_req_set_public(struct net *net, struct arpreq *r, __be32 mask = ((struct sockaddr_in *)&r->arp_netmask)->sin_addr.s_addr; if (!dev && (r->arp_flags & ATF_COM)) { - dev = dev_getbyhwaddr_rcu(net, r->arp_ha.sa_family, + dev = dev_getbyhwaddr(net, r->arp_ha.sa_family, r->arp_ha.sa_data); if (!dev) return -ENODEV; diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index f45bc187a92a7..b8111ec651b54 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -477,13 +477,11 @@ static struct net_device *icmp_get_route_lookup_dev(struct sk_buff *skb) return route_lookup_dev; } -static struct rtable *icmp_route_lookup(struct net *net, - struct flowi4 *fl4, +static struct rtable *icmp_route_lookup(struct net *net, struct flowi4 *fl4, struct sk_buff *skb_in, - const struct iphdr *iph, - __be32 saddr, u8 tos, u32 mark, - int type, int code, - struct icmp_bxm *param) + const struct iphdr *iph, __be32 saddr, + dscp_t dscp, u32 mark, int type, + int code, struct icmp_bxm *param) { struct net_device *route_lookup_dev; struct dst_entry *dst, *dst2; @@ -497,7 +495,7 @@ static struct rtable *icmp_route_lookup(struct net *net, fl4->saddr = saddr; fl4->flowi4_mark = mark; fl4->flowi4_uid = sock_net_uid(net, NULL); - fl4->flowi4_tos = tos & INET_DSCP_MASK; + fl4->flowi4_tos = inet_dscp_to_dsfield(dscp); fl4->flowi4_proto = IPPROTO_ICMP; fl4->fl4_icmp_type = type; fl4->fl4_icmp_code = code; @@ -549,7 +547,7 @@ static struct rtable *icmp_route_lookup(struct net *net, orefdst = skb_in->_skb_refdst; /* save old refdst */ skb_dst_set(skb_in, NULL); err = ip_route_input(skb_in, fl4_dec.daddr, fl4_dec.saddr, - tos, rt2->dst.dev); + dscp, rt2->dst.dev); dst_release(&rt2->dst); rt2 = skb_rtable(skb_in); @@ -745,8 +743,9 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info, ipc.opt = &icmp_param.replyopts.opt; ipc.sockc.mark = mark; - rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos, mark, - type, code, &icmp_param); + rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, + inet_dsfield_to_dscp(tos), mark, type, code, + &icmp_param); if (IS_ERR(rt)) goto out_unlock; diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index 68aedb8877b9f..81e86e5defee6 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c @@ -617,7 +617,8 @@ int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev) orefdst = skb->_skb_refdst; skb_dst_set(skb, NULL); - err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, dev); + err = ip_route_input(skb, nexthop, iph->saddr, ip4h_dscp(iph), + dev); rt2 = skb_rtable(skb); if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) { skb_dst_drop(skb); diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index a3676155be78b..f65d2f7273813 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -416,7 +416,7 @@ int skb_tunnel_check_pmtu(struct sk_buff *skb, struct dst_entry *encap_dst, skb_dst_update_pmtu_no_confirm(skb, mtu); - if (!reply || skb->pkt_type == PACKET_HOST) + if (!reply) return 0; if (skb->protocol == htons(ETH_P_IP)) @@ -451,7 +451,7 @@ static const struct nla_policy geneve_opt_policy[LWTUNNEL_IP_OPT_GENEVE_MAX + 1] = { [LWTUNNEL_IP_OPT_GENEVE_CLASS] = { .type = NLA_U16 }, [LWTUNNEL_IP_OPT_GENEVE_TYPE] = { .type = NLA_U8 }, - [LWTUNNEL_IP_OPT_GENEVE_DATA] = { .type = NLA_BINARY, .len = 128 }, + [LWTUNNEL_IP_OPT_GENEVE_DATA] = { .type = NLA_BINARY, .len = 127 }, }; static const struct nla_policy diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 4f77bd862e957..b731a4a8f2b0d 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1564,12 +1564,13 @@ EXPORT_SYMBOL(tcp_recv_skb); * or for 'peeking' the socket using this routine * (although both would be easy to implement). */ -int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, - sk_read_actor_t recv_actor) +static int __tcp_read_sock(struct sock *sk, read_descriptor_t *desc, + sk_read_actor_t recv_actor, bool noack, + u32 *copied_seq) { struct sk_buff *skb; struct tcp_sock *tp = tcp_sk(sk); - u32 seq = tp->copied_seq; + u32 seq = *copied_seq; u32 offset; int copied = 0; @@ -1623,9 +1624,12 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, tcp_eat_recv_skb(sk, skb); if (!desc->count) break; - WRITE_ONCE(tp->copied_seq, seq); + WRITE_ONCE(*copied_seq, seq); } - WRITE_ONCE(tp->copied_seq, seq); + WRITE_ONCE(*copied_seq, seq); + + if (noack) + goto out; tcp_rcv_space_adjust(sk); @@ -1634,10 +1638,25 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, tcp_recv_skb(sk, seq, &offset); tcp_cleanup_rbuf(sk, copied); } +out: return copied; } + +int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, + sk_read_actor_t recv_actor) +{ + return __tcp_read_sock(sk, desc, recv_actor, false, + &tcp_sk(sk)->copied_seq); +} EXPORT_SYMBOL(tcp_read_sock); +int tcp_read_sock_noack(struct sock *sk, read_descriptor_t *desc, + sk_read_actor_t recv_actor, bool noack, + u32 *copied_seq) +{ + return __tcp_read_sock(sk, desc, recv_actor, noack, copied_seq); +} + int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor) { struct sk_buff *skb; @@ -2437,14 +2456,12 @@ static int tcp_recvmsg_dmabuf(struct sock *sk, const struct sk_buff *skb, */ memset(&dmabuf_cmsg, 0, sizeof(dmabuf_cmsg)); dmabuf_cmsg.frag_size = copy; - err = put_cmsg(msg, SOL_SOCKET, SO_DEVMEM_LINEAR, - sizeof(dmabuf_cmsg), &dmabuf_cmsg); - if (err || msg->msg_flags & MSG_CTRUNC) { - msg->msg_flags &= ~MSG_CTRUNC; - if (!err) - err = -ETOOSMALL; + err = put_cmsg_notrunc(msg, SOL_SOCKET, + SO_DEVMEM_LINEAR, + sizeof(dmabuf_cmsg), + &dmabuf_cmsg); + if (err) goto out; - } sent += copy; @@ -2498,16 +2515,12 @@ static int tcp_recvmsg_dmabuf(struct sock *sk, const struct sk_buff *skb, offset += copy; remaining_len -= copy; - err = put_cmsg(msg, SOL_SOCKET, - SO_DEVMEM_DMABUF, - sizeof(dmabuf_cmsg), - &dmabuf_cmsg); - if (err || msg->msg_flags & MSG_CTRUNC) { - msg->msg_flags &= ~MSG_CTRUNC; - if (!err) - err = -ETOOSMALL; + err = put_cmsg_notrunc(msg, SOL_SOCKET, + SO_DEVMEM_DMABUF, + sizeof(dmabuf_cmsg), + &dmabuf_cmsg); + if (err) goto out; - } atomic_long_inc(&niov->pp_ref_count); tcp_xa_pool.netmems[tcp_xa_pool.idx++] = skb_frag_netmem(frag); diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c index 392678ae80f4e..22e8a2af5dd8b 100644 --- a/net/ipv4/tcp_bpf.c +++ b/net/ipv4/tcp_bpf.c @@ -646,6 +646,42 @@ static int tcp_bpf_assert_proto_ops(struct proto *ops) ops->sendmsg == tcp_sendmsg ? 0 : -ENOTSUPP; } +#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER) +int tcp_bpf_strp_read_sock(struct strparser *strp, read_descriptor_t *desc, + sk_read_actor_t recv_actor) +{ + struct sock *sk = strp->sk; + struct sk_psock *psock; + struct tcp_sock *tp; + int copied = 0; + + tp = tcp_sk(sk); + rcu_read_lock(); + psock = sk_psock(sk); + if (WARN_ON_ONCE(!psock)) { + desc->error = -EINVAL; + goto out; + } + + psock->ingress_bytes = 0; + copied = tcp_read_sock_noack(sk, desc, recv_actor, true, + &psock->copied_seq); + if (copied < 0) + goto out; + /* recv_actor may redirect skb to another socket (SK_REDIRECT) or + * just put skb into ingress queue of current socket (SK_PASS). + * For SK_REDIRECT, we need to ack the frame immediately but for + * SK_PASS, we want to delay the ack until tcp_bpf_recvmsg_parser(). + */ + tp->copied_seq = psock->copied_seq - psock->ingress_bytes; + tcp_rcv_space_adjust(sk); + __tcp_cleanup_rbuf(sk, copied - psock->ingress_bytes); +out: + rcu_read_unlock(); + return copied; +} +#endif /* CONFIG_BPF_STREAM_PARSER */ + int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore) { int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4; diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index 0f523cbfe329e..32b28fc21b63c 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c @@ -178,7 +178,7 @@ void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb) if (!skb) return; - skb_dst_drop(skb); + tcp_cleanup_skb(skb); /* segs_in has been initialized to 1 in tcp_create_openreq_child(). * Hence, reset segs_in to 0 before calling tcp_segs_in() * to avoid double counting. Also, tcp_segs_in() expects @@ -195,7 +195,7 @@ void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb) TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN; tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; - __skb_queue_tail(&sk->sk_receive_queue, skb); + tcp_add_receive_queue(sk, skb); tp->syn_data_acked = 1; /* u64_stats_update_begin(&tp->syncp) not needed here, diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 2d43b29da15e2..d93a5a89c5692 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -243,9 +243,15 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb) do_div(val, skb->truesize); tcp_sk(sk)->scaling_ratio = val ? val : 1; - if (old_ratio != tcp_sk(sk)->scaling_ratio) - WRITE_ONCE(tcp_sk(sk)->window_clamp, - tcp_win_from_space(sk, sk->sk_rcvbuf)); + if (old_ratio != tcp_sk(sk)->scaling_ratio) { + struct tcp_sock *tp = tcp_sk(sk); + + val = tcp_win_from_space(sk, sk->sk_rcvbuf); + tcp_set_window_clamp(sk, val); + + if (tp->window_clamp < tp->rcvq_space.space) + tp->rcvq_space.space = tp->window_clamp; + } } icsk->icsk_ack.rcv_mss = min_t(unsigned int, len, tcp_sk(sk)->advmss); @@ -4964,7 +4970,7 @@ static void tcp_ofo_queue(struct sock *sk) tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN; if (!eaten) - __skb_queue_tail(&sk->sk_receive_queue, skb); + tcp_add_receive_queue(sk, skb); else kfree_skb_partial(skb, fragstolen); @@ -5156,7 +5162,7 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, skb, fragstolen)) ? 1 : 0; tcp_rcv_nxt_update(tcp_sk(sk), TCP_SKB_CB(skb)->end_seq); if (!eaten) { - __skb_queue_tail(&sk->sk_receive_queue, skb); + tcp_add_receive_queue(sk, skb); skb_set_owner_r(skb, sk); } return eaten; @@ -5239,7 +5245,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) __kfree_skb(skb); return; } - skb_dst_drop(skb); + tcp_cleanup_skb(skb); __skb_pull(skb, tcp_hdr(skb)->doff * 4); reason = SKB_DROP_REASON_NOT_SPECIFIED; @@ -6208,7 +6214,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb) NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITS); /* Bulk data transfer: receiver */ - skb_dst_drop(skb); + tcp_cleanup_skb(skb); __skb_pull(skb, tcp_header_len); eaten = tcp_queue_rcv(sk, skb, &fragstolen); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index bcc2f1e090c7d..824048679e1b8 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2025,7 +2025,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb, */ skb_condense(skb); - skb_dst_drop(skb); + tcp_cleanup_skb(skb); if (unlikely(tcp_checksum_complete(skb))) { bh_unlock_sock(sk); diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index bb1fe1ba867ac..f3e4fc9572196 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -806,12 +806,6 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, /* In sequence, PAWS is OK. */ - /* TODO: We probably should defer ts_recent change once - * we take ownership of @req. - */ - if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt)) - WRITE_ONCE(req->ts_recent, tmp_opt.rcv_tsval); - if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) { /* Truncate SYN, it is out of window starting at tcp_rsk(req)->rcv_isn + 1. */ @@ -860,6 +854,10 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, if (!child) goto listen_overflow; + if (own_req && tmp_opt.saw_tstamp && + !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt)) + tcp_sk(child)->rx_opt.ts_recent = tmp_opt.rcv_tsval; + if (own_req && rsk_drop_req(req)) { reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req); inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req); diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c index 2308665b51c53..2dfac79dc78b8 100644 --- a/net/ipv4/tcp_offload.c +++ b/net/ipv4/tcp_offload.c @@ -13,12 +13,15 @@ #include #include -static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq, +static void tcp_gso_tstamp(struct sk_buff *skb, struct sk_buff *gso_skb, unsigned int seq, unsigned int mss) { + u32 flags = skb_shinfo(gso_skb)->tx_flags & SKBTX_ANY_TSTAMP; + u32 ts_seq = skb_shinfo(gso_skb)->tskey; + while (skb) { if (before(ts_seq, seq + mss)) { - skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP; + skb_shinfo(skb)->tx_flags |= flags; skb_shinfo(skb)->tskey = ts_seq; return; } @@ -193,8 +196,8 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb, th = tcp_hdr(skb); seq = ntohl(th->seq); - if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP)) - tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss); + if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_ANY_TSTAMP)) + tcp_gso_tstamp(segs, gso_skb, seq, mss); newcheck = ~csum_fold(csum_add(csum_unfold(th->check), delta)); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 8da74dc63061c..f4e24fc878fab 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1470,12 +1470,12 @@ static bool udp_skb_has_head_state(struct sk_buff *skb) } /* fully reclaim rmem/fwd memory allocated for skb */ -static void udp_rmem_release(struct sock *sk, int size, int partial, - bool rx_queue_lock_held) +static void udp_rmem_release(struct sock *sk, unsigned int size, + int partial, bool rx_queue_lock_held) { struct udp_sock *up = udp_sk(sk); struct sk_buff_head *sk_queue; - int amt; + unsigned int amt; if (likely(partial)) { up->forward_deficit += size; @@ -1495,10 +1495,8 @@ static void udp_rmem_release(struct sock *sk, int size, int partial, if (!rx_queue_lock_held) spin_lock(&sk_queue->lock); - - sk_forward_alloc_add(sk, size); - amt = (sk->sk_forward_alloc - partial) & ~(PAGE_SIZE - 1); - sk_forward_alloc_add(sk, -amt); + amt = (size + sk->sk_forward_alloc - partial) & ~(PAGE_SIZE - 1); + sk_forward_alloc_add(sk, size - amt); if (amt) __sk_mem_reduce_allocated(sk, amt >> PAGE_SHIFT); @@ -1570,17 +1568,25 @@ static int udp_rmem_schedule(struct sock *sk, int size) int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb) { struct sk_buff_head *list = &sk->sk_receive_queue; - int rmem, err = -ENOMEM; + unsigned int rmem, rcvbuf; spinlock_t *busy = NULL; - int size, rcvbuf; + int size, err = -ENOMEM; - /* Immediately drop when the receive queue is full. - * Always allow at least one packet. - */ rmem = atomic_read(&sk->sk_rmem_alloc); rcvbuf = READ_ONCE(sk->sk_rcvbuf); - if (rmem > rcvbuf) - goto drop; + size = skb->truesize; + + /* Immediately drop when the receive queue is full. + * Cast to unsigned int performs the boundary check for INT_MAX. + */ + if (rmem + size > rcvbuf) { + if (rcvbuf > INT_MAX >> 1) + goto drop; + + /* Always allow at least one packet for small buffer. */ + if (rmem > rcvbuf) + goto drop; + } /* Under mem pressure, it might be helpful to help udp_recvmsg() * having linear skbs : @@ -1590,10 +1596,10 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb) */ if (rmem > (rcvbuf >> 1)) { skb_condense(skb); - + size = skb->truesize; busy = busylock_acquire(sk); } - size = skb->truesize; + udp_set_dev_scratch(skb); atomic_add(size, &sk->sk_rmem_alloc); @@ -1680,7 +1686,7 @@ EXPORT_SYMBOL_GPL(skb_consume_udp); static struct sk_buff *__first_packet_length(struct sock *sk, struct sk_buff_head *rcvq, - int *total) + unsigned int *total) { struct sk_buff *skb; @@ -1713,8 +1719,8 @@ static int first_packet_length(struct sock *sk) { struct sk_buff_head *rcvq = &udp_sk(sk)->reader_queue; struct sk_buff_head *sk_queue = &sk->sk_receive_queue; + unsigned int total = 0; struct sk_buff *skb; - int total = 0; int res; spin_lock_bh(&rcvq->lock); diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index a5be6e4ed326f..ecfca59f31f13 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -321,13 +321,17 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, /* clear destructor to avoid skb_segment assigning it to tail */ copy_dtor = gso_skb->destructor == sock_wfree; - if (copy_dtor) + if (copy_dtor) { gso_skb->destructor = NULL; + gso_skb->sk = NULL; + } segs = skb_segment(gso_skb, features); if (IS_ERR_OR_NULL(segs)) { - if (copy_dtor) + if (copy_dtor) { gso_skb->destructor = sock_wfree; + gso_skb->sk = sk; + } return segs; } diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index f7c17388ff6aa..f5d49162f7983 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -5807,6 +5807,27 @@ static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype, } } +static int inet6_fill_ifla6_stats_attrs(struct sk_buff *skb, + struct inet6_dev *idev) +{ + struct nlattr *nla; + + nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64)); + if (!nla) + goto nla_put_failure; + snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla)); + + nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64)); + if (!nla) + goto nla_put_failure; + snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla)); + + return 0; + +nla_put_failure: + return -EMSGSIZE; +} + static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev, u32 ext_filter_mask) { @@ -5829,18 +5850,10 @@ static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev, /* XXX - MC not implemented */ - if (ext_filter_mask & RTEXT_FILTER_SKIP_STATS) - return 0; - - nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64)); - if (!nla) - goto nla_put_failure; - snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla)); - - nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64)); - if (!nla) - goto nla_put_failure; - snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla)); + if (!(ext_filter_mask & RTEXT_FILTER_SKIP_STATS)) { + if (inet6_fill_ifla6_stats_attrs(skb, idev) < 0) + goto nla_put_failure; + } nla = nla_reserve(skb, IFLA_INET6_TOKEN, sizeof(struct in6_addr)); if (!nla) diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c index dbcea9fee6262..62618a058b8fa 100644 --- a/net/ipv6/calipso.c +++ b/net/ipv6/calipso.c @@ -1072,8 +1072,13 @@ static int calipso_sock_getattr(struct sock *sk, struct ipv6_opt_hdr *hop; int opt_len, len, ret_val = -ENOMSG, offset; unsigned char *opt; - struct ipv6_txoptions *txopts = txopt_get(inet6_sk(sk)); + struct ipv6_pinfo *pinfo = inet6_sk(sk); + struct ipv6_txoptions *txopts; + + if (!pinfo) + return -EAFNOSUPPORT; + txopts = txopt_get(pinfo); if (!txopts || !txopts->hopopt) goto done; @@ -1125,8 +1130,13 @@ static int calipso_sock_setattr(struct sock *sk, { int ret_val; struct ipv6_opt_hdr *old, *new; - struct ipv6_txoptions *txopts = txopt_get(inet6_sk(sk)); + struct ipv6_pinfo *pinfo = inet6_sk(sk); + struct ipv6_txoptions *txopts; + + if (!pinfo) + return -EAFNOSUPPORT; + txopts = txopt_get(pinfo); old = NULL; if (txopts) old = txopts->hopopt; @@ -1153,8 +1163,13 @@ static int calipso_sock_setattr(struct sock *sk, static void calipso_sock_delattr(struct sock *sk) { struct ipv6_opt_hdr *new_hop; - struct ipv6_txoptions *txopts = txopt_get(inet6_sk(sk)); + struct ipv6_pinfo *pinfo = inet6_sk(sk); + struct ipv6_txoptions *txopts; + + if (!pinfo) + return; + txopts = txopt_get(pinfo); if (!txopts || !txopts->hopopt) goto done; diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c index ff7e734e335b0..7d574f5132e2f 100644 --- a/net/ipv6/ila/ila_lwt.c +++ b/net/ipv6/ila/ila_lwt.c @@ -88,13 +88,15 @@ static int ila_output(struct net *net, struct sock *sk, struct sk_buff *skb) goto drop; } - if (ilwt->connected) { + /* cache only if we don't create a dst reference loop */ + if (ilwt->connected && orig_dst->lwtstate != dst->lwtstate) { local_bh_disable(); dst_cache_set_ip6(&ilwt->dst_cache, dst, &fl6.saddr); local_bh_enable(); } } + skb_dst_drop(skb); skb_dst_set(skb, dst); return dst_output(net, sk, skb); diff --git a/net/ipv6/ioam6_iptunnel.c b/net/ipv6/ioam6_iptunnel.c index 4215cebe7d85a..647dd8417c6cf 100644 --- a/net/ipv6/ioam6_iptunnel.c +++ b/net/ipv6/ioam6_iptunnel.c @@ -339,7 +339,6 @@ static int ioam6_do_encap(struct net *net, struct sk_buff *skb, static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb), *cache_dst = NULL; - struct in6_addr orig_daddr; struct ioam6_lwt *ilwt; int err = -EINVAL; u32 pkt_cnt; @@ -354,8 +353,6 @@ static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb) if (pkt_cnt % ilwt->freq.n >= ilwt->freq.k) goto out; - orig_daddr = ipv6_hdr(skb)->daddr; - local_bh_disable(); cache_dst = dst_cache_get(&ilwt->cache); local_bh_enable(); @@ -424,7 +421,10 @@ static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb) goto drop; } - if (!ipv6_addr_equal(&orig_daddr, &ipv6_hdr(skb)->daddr)) { + /* avoid lwtunnel_output() reentry loop when destination is the same + * after transformation (e.g., with the inline mode) + */ + if (dst->lwtstate != cache_dst->lwtstate) { skb_dst_drop(skb); skb_dst_set(skb, cache_dst); return dst_output(net, sk, skb); diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index b60e13c42bcac..48fd53b989726 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -630,8 +630,8 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, } skb_dst_set(skb2, &rt->dst); } else { - if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, - skb2->dev) || + if (ip_route_input(skb2, eiph->daddr, eiph->saddr, + ip4h_dscp(eiph), skb2->dev) || skb_dst(skb2)->dev->type != ARPHRD_TUNNEL6) goto out; } diff --git a/net/ipv6/netfilter/nf_socket_ipv6.c b/net/ipv6/netfilter/nf_socket_ipv6.c index a7690ec623259..9ea5ef56cb270 100644 --- a/net/ipv6/netfilter/nf_socket_ipv6.c +++ b/net/ipv6/netfilter/nf_socket_ipv6.c @@ -103,6 +103,10 @@ struct sock *nf_sk_lookup_slow_v6(struct net *net, const struct sk_buff *skb, struct sk_buff *data_skb = NULL; int doff = 0; int thoff = 0, tproto; +#if IS_ENABLED(CONFIG_NF_CONNTRACK) + enum ip_conntrack_info ctinfo; + struct nf_conn const *ct; +#endif tproto = ipv6_find_hdr(skb, &thoff, -1, NULL, NULL); if (tproto < 0) { @@ -136,6 +140,25 @@ struct sock *nf_sk_lookup_slow_v6(struct net *net, const struct sk_buff *skb, return NULL; } +#if IS_ENABLED(CONFIG_NF_CONNTRACK) + /* Do the lookup with the original socket address in + * case this is a reply packet of an established + * SNAT-ted connection. + */ + ct = nf_ct_get(skb, &ctinfo); + if (ct && + ((tproto != IPPROTO_ICMPV6 && + ctinfo == IP_CT_ESTABLISHED_REPLY) || + (tproto == IPPROTO_ICMPV6 && + ctinfo == IP_CT_RELATED_REPLY)) && + (ct->status & IPS_SRC_NAT_DONE)) { + daddr = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.in6; + dport = (tproto == IPPROTO_TCP) ? + ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.tcp.port : + ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.udp.port; + } +#endif + return nf_socket_get_sock_v6(net, data_skb, doff, tproto, saddr, daddr, sport, dport, indev); } diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 2736dea77575b..d9ab070e78e05 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -412,12 +412,37 @@ static bool rt6_check_expired(const struct rt6_info *rt) return false; } +static struct fib6_info * +rt6_multipath_first_sibling_rcu(const struct fib6_info *rt) +{ + struct fib6_info *iter; + struct fib6_node *fn; + + fn = rcu_dereference(rt->fib6_node); + if (!fn) + goto out; + iter = rcu_dereference(fn->leaf); + if (!iter) + goto out; + + while (iter) { + if (iter->fib6_metric == rt->fib6_metric && + rt6_qualify_for_ecmp(iter)) + return iter; + iter = rcu_dereference(iter->fib6_next); + } + +out: + return NULL; +} + void fib6_select_path(const struct net *net, struct fib6_result *res, struct flowi6 *fl6, int oif, bool have_oif_match, const struct sk_buff *skb, int strict) { - struct fib6_info *match = res->f6i; + struct fib6_info *first, *match = res->f6i; struct fib6_info *sibling; + int hash; if (!match->nh && (!match->fib6_nsiblings || have_oif_match)) goto out; @@ -440,16 +465,25 @@ void fib6_select_path(const struct net *net, struct fib6_result *res, return; } - if (fl6->mp_hash <= atomic_read(&match->fib6_nh->fib_nh_upper_bound)) + first = rt6_multipath_first_sibling_rcu(match); + if (!first) goto out; - list_for_each_entry_rcu(sibling, &match->fib6_siblings, + hash = fl6->mp_hash; + if (hash <= atomic_read(&first->fib6_nh->fib_nh_upper_bound)) { + if (rt6_score_route(first->fib6_nh, first->fib6_flags, oif, + strict) >= 0) + match = first; + goto out; + } + + list_for_each_entry_rcu(sibling, &first->fib6_siblings, fib6_siblings) { const struct fib6_nh *nh = sibling->fib6_nh; int nh_upper_bound; nh_upper_bound = atomic_read(&nh->fib_nh_upper_bound); - if (fl6->mp_hash > nh_upper_bound) + if (hash > nh_upper_bound) continue; if (rt6_score_route(nh, sibling->fib6_flags, oif, strict) < 0) break; @@ -1737,6 +1771,7 @@ static int rt6_insert_exception(struct rt6_info *nrt, if (!err) { spin_lock_bh(&f6i->fib6_table->tb6_lock); fib6_update_sernum(net, f6i); + fib6_add_gc_list(f6i); spin_unlock_bh(&f6i->fib6_table->tb6_lock); fib6_force_start_gc(net); } @@ -3644,7 +3679,8 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh, in6_dev_put(idev); if (err) { - lwtstate_put(fib6_nh->fib_nh_lws); + fib_nh_common_release(&fib6_nh->nh_common); + fib6_nh->nh_common.nhc_pcpu_rth_output = NULL; fib6_nh->fib_nh_lws = NULL; netdev_put(dev, dev_tracker); } @@ -3802,10 +3838,12 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg, if (nh) { if (rt->fib6_src.plen) { NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing"); + err = -EINVAL; goto out_free; } if (!nexthop_get(nh)) { NL_SET_ERR_MSG(extack, "Nexthop has been deleted"); + err = -ENOENT; goto out_free; } rt->nh = nh; diff --git a/net/ipv6/rpl_iptunnel.c b/net/ipv6/rpl_iptunnel.c index 0ac4283acdf20..7c05ac846646f 100644 --- a/net/ipv6/rpl_iptunnel.c +++ b/net/ipv6/rpl_iptunnel.c @@ -262,10 +262,18 @@ static int rpl_input(struct sk_buff *skb) { struct dst_entry *orig_dst = skb_dst(skb); struct dst_entry *dst = NULL; + struct lwtunnel_state *lwtst; struct rpl_lwt *rlwt; int err; - rlwt = rpl_lwt_lwtunnel(orig_dst->lwtstate); + /* We cannot dereference "orig_dst" once ip6_route_input() or + * skb_dst_drop() is called. However, in order to detect a dst loop, we + * need the address of its lwtstate. So, save the address of lwtstate + * now and use it later as a comparison. + */ + lwtst = orig_dst->lwtstate; + + rlwt = rpl_lwt_lwtunnel(lwtst); local_bh_disable(); dst = dst_cache_get(&rlwt->cache); @@ -280,7 +288,9 @@ static int rpl_input(struct sk_buff *skb) if (!dst) { ip6_route_input(skb); dst = skb_dst(skb); - if (!dst->error) { + + /* cache only if we don't create a dst reference loop */ + if (!dst->error && lwtst != dst->lwtstate) { local_bh_disable(); dst_cache_set_ip6(&rlwt->cache, dst, &ipv6_hdr(skb)->saddr); diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c index 33833b2064c07..51583461ae29b 100644 --- a/net/ipv6/seg6_iptunnel.c +++ b/net/ipv6/seg6_iptunnel.c @@ -472,10 +472,18 @@ static int seg6_input_core(struct net *net, struct sock *sk, { struct dst_entry *orig_dst = skb_dst(skb); struct dst_entry *dst = NULL; + struct lwtunnel_state *lwtst; struct seg6_lwt *slwt; int err; - slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate); + /* We cannot dereference "orig_dst" once ip6_route_input() or + * skb_dst_drop() is called. However, in order to detect a dst loop, we + * need the address of its lwtstate. So, save the address of lwtstate + * now and use it later as a comparison. + */ + lwtst = orig_dst->lwtstate; + + slwt = seg6_lwt_lwtunnel(lwtst); local_bh_disable(); dst = dst_cache_get(&slwt->cache); @@ -490,7 +498,9 @@ static int seg6_input_core(struct net *net, struct sock *sk, if (!dst) { ip6_route_input(skb); dst = skb_dst(skb); - if (!dst->error) { + + /* cache only if we don't create a dst reference loop */ + if (!dst->error && lwtst != dst->lwtstate) { local_bh_disable(); dst_cache_set_ip6(&slwt->cache, dst, &ipv6_hdr(skb)->saddr); diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c index a45bf17cb2a17..ae2da28f9dfb1 100644 --- a/net/ipv6/tcpv6_offload.c +++ b/net/ipv6/tcpv6_offload.c @@ -94,14 +94,23 @@ INDIRECT_CALLABLE_SCOPE int tcp6_gro_complete(struct sk_buff *skb, int thoff) } static void __tcpv6_gso_segment_csum(struct sk_buff *seg, + struct in6_addr *oldip, + const struct in6_addr *newip, __be16 *oldport, __be16 newport) { - struct tcphdr *th; + struct tcphdr *th = tcp_hdr(seg); + + if (!ipv6_addr_equal(oldip, newip)) { + inet_proto_csum_replace16(&th->check, seg, + oldip->s6_addr32, + newip->s6_addr32, + true); + *oldip = *newip; + } if (*oldport == newport) return; - th = tcp_hdr(seg); inet_proto_csum_replace2(&th->check, seg, *oldport, newport, false); *oldport = newport; } @@ -129,10 +138,10 @@ static struct sk_buff *__tcpv6_gso_segment_list_csum(struct sk_buff *segs) th2 = tcp_hdr(seg); iph2 = ipv6_hdr(seg); - iph2->saddr = iph->saddr; - iph2->daddr = iph->daddr; - __tcpv6_gso_segment_csum(seg, &th2->source, th->source); - __tcpv6_gso_segment_csum(seg, &th2->dest, th->dest); + __tcpv6_gso_segment_csum(seg, &iph2->saddr, &iph->saddr, + &th2->source, th->source); + __tcpv6_gso_segment_csum(seg, &iph2->daddr, &iph->daddr, + &th2->dest, th->dest); } return segs; diff --git a/net/llc/llc_s_ac.c b/net/llc/llc_s_ac.c index 06fb8e6944b06..7a0cae9a81114 100644 --- a/net/llc/llc_s_ac.c +++ b/net/llc/llc_s_ac.c @@ -24,7 +24,7 @@ #include #include #include - +#include /** * llc_sap_action_unitdata_ind - forward UI PDU to network layer @@ -40,6 +40,26 @@ int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb) return 0; } +static int llc_prepare_and_xmit(struct sk_buff *skb) +{ + struct llc_sap_state_ev *ev = llc_sap_ev(skb); + struct sk_buff *nskb; + int rc; + + rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac); + if (rc) + return rc; + + nskb = skb_clone(skb, GFP_ATOMIC); + if (!nskb) + return -ENOMEM; + + if (skb->sk) + skb_set_owner_w(nskb, skb->sk); + + return dev_queue_xmit(nskb); +} + /** * llc_sap_action_send_ui - sends UI PDU resp to UNITDATA REQ to MAC layer * @sap: SAP @@ -52,17 +72,12 @@ int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb) int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb) { struct llc_sap_state_ev *ev = llc_sap_ev(skb); - int rc; llc_pdu_header_init(skb, LLC_PDU_TYPE_U, ev->saddr.lsap, ev->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_ui_cmd(skb); - rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac); - if (likely(!rc)) { - skb_get(skb); - rc = dev_queue_xmit(skb); - } - return rc; + + return llc_prepare_and_xmit(skb); } /** @@ -77,17 +92,12 @@ int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb) int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb) { struct llc_sap_state_ev *ev = llc_sap_ev(skb); - int rc; llc_pdu_header_init(skb, LLC_PDU_TYPE_U_XID, ev->saddr.lsap, ev->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_xid_cmd(skb, LLC_XID_NULL_CLASS_2, 0); - rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac); - if (likely(!rc)) { - skb_get(skb); - rc = dev_queue_xmit(skb); - } - return rc; + + return llc_prepare_and_xmit(skb); } /** @@ -133,17 +143,12 @@ int llc_sap_action_send_xid_r(struct llc_sap *sap, struct sk_buff *skb) int llc_sap_action_send_test_c(struct llc_sap *sap, struct sk_buff *skb) { struct llc_sap_state_ev *ev = llc_sap_ev(skb); - int rc; llc_pdu_header_init(skb, LLC_PDU_TYPE_U, ev->saddr.lsap, ev->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_test_cmd(skb); - rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac); - if (likely(!rc)) { - skb_get(skb); - rc = dev_queue_xmit(skb); - } - return rc; + + return llc_prepare_and_xmit(skb); } int llc_sap_action_send_test_r(struct llc_sap *sap, struct sk_buff *skb) diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c index 02b5476a4376c..a0710ae0e7a49 100644 --- a/net/mac80211/debugfs.c +++ b/net/mac80211/debugfs.c @@ -499,6 +499,7 @@ static const char *hw_flag_names[] = { FLAG(DISALLOW_PUNCTURING), FLAG(DISALLOW_PUNCTURING_5GHZ), FLAG(HANDLES_QUIET_CSA), + FLAG(STRICT), #undef FLAG }; @@ -531,6 +532,46 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf, return rv; } +static ssize_t hwflags_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_local *local = file->private_data; + char buf[100]; + int val; + + if (count >= sizeof(buf)) + return -EINVAL; + + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + + if (count && buf[count - 1] == '\n') + buf[count - 1] = '\0'; + else + buf[count] = '\0'; + + if (sscanf(buf, "strict=%d", &val) == 1) { + switch (val) { + case 0: + ieee80211_hw_set(&local->hw, STRICT); + return count; + case 1: + __clear_bit(IEEE80211_HW_STRICT, local->hw.flags); + return count; + default: + return -EINVAL; + } + } + + return -EINVAL; +} + +static const struct file_operations hwflags_ops = { + .open = simple_open, + .read = hwflags_read, + .write = hwflags_write, +}; + static ssize_t misc_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { @@ -581,7 +622,6 @@ static ssize_t queues_read(struct file *file, char __user *user_buf, return simple_read_from_buffer(user_buf, count, ppos, buf, res); } -DEBUGFS_READONLY_FILE_OPS(hwflags); DEBUGFS_READONLY_FILE_OPS(queues); DEBUGFS_READONLY_FILE_OPS(misc); @@ -659,7 +699,7 @@ void debugfs_hw_add(struct ieee80211_local *local) #ifdef CONFIG_PM DEBUGFS_ADD_MODE(reset, 0200); #endif - DEBUGFS_ADD(hwflags); + DEBUGFS_ADD_MODE(hwflags, 0600); DEBUGFS_ADD(user_power); DEBUGFS_ADD(power); DEBUGFS_ADD(hw_conf); diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c index fe868b5216220..4243f8ee5ab6b 100644 --- a/net/mac80211/driver-ops.c +++ b/net/mac80211/driver-ops.c @@ -115,8 +115,14 @@ void drv_remove_interface(struct ieee80211_local *local, sdata->flags &= ~IEEE80211_SDATA_IN_DRIVER; - /* Remove driver debugfs entries */ - ieee80211_debugfs_recreate_netdev(sdata, sdata->vif.valid_links); + /* + * Remove driver debugfs entries. + * The virtual monitor interface doesn't get a debugfs + * entry, so it's exempt here. + */ + if (sdata != rcu_access_pointer(local->monitor_sdata)) + ieee80211_debugfs_recreate_netdev(sdata, + sdata->vif.valid_links); trace_drv_remove_interface(local, sdata); local->ops->remove_interface(&local->hw, &sdata->vif); diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 7a0242e937d36..bfe0514efca37 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1751,6 +1751,7 @@ struct ieee802_11_elems { const struct ieee80211_eht_operation *eht_operation; const struct ieee80211_multi_link_elem *ml_basic; const struct ieee80211_multi_link_elem *ml_reconf; + const struct ieee80211_multi_link_elem *ml_epcs; const struct ieee80211_bandwidth_indication *bandwidth_indication; const struct ieee80211_ttlm_elem *ttlm[IEEE80211_TTLM_MAX_CNT]; @@ -1781,6 +1782,7 @@ struct ieee802_11_elems { /* mult-link element can be de-fragmented and thus u8 is not sufficient */ size_t ml_basic_len; size_t ml_reconf_len; + size_t ml_epcs_len; u8 ttlm_num; diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index af9055252e6df..7e1e561ef76c1 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -8,7 +8,7 @@ * Copyright 2008, Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (c) 2016 Intel Deutschland GmbH - * Copyright (C) 2018-2024 Intel Corporation + * Copyright (C) 2018-2025 Intel Corporation */ #include #include @@ -667,6 +667,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) ieee80211_txq_remove_vlan(local, sdata); + if (sdata->vif.txq) + ieee80211_txq_purge(sdata->local, to_txq_info(sdata->vif.txq)); + sdata->bss = NULL; if (local->open_count == 0) @@ -812,6 +815,9 @@ static void ieee80211_set_multicast_list(struct net_device *dev) */ static void ieee80211_teardown_sdata(struct ieee80211_sub_if_data *sdata) { + if (WARN_ON(!list_empty(&sdata->work.entry))) + wiphy_work_cancel(sdata->local->hw.wiphy, &sdata->work); + /* free extra data */ ieee80211_free_keys(sdata, false); @@ -1205,16 +1211,17 @@ void ieee80211_del_virtual_monitor(struct ieee80211_local *local) return; } - RCU_INIT_POINTER(local->monitor_sdata, NULL); - mutex_unlock(&local->iflist_mtx); - - synchronize_net(); - + clear_bit(SDATA_STATE_RUNNING, &sdata->state); ieee80211_link_release_channel(&sdata->deflink); if (ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) drv_remove_interface(local, sdata); + RCU_INIT_POINTER(local->monitor_sdata, NULL); + mutex_unlock(&local->iflist_mtx); + + synchronize_net(); + kfree(sdata); } diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 579d0f24ac9d6..2922a9fec950d 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -367,6 +367,12 @@ u32 airtime_link_metric_get(struct ieee80211_local *local, return (u32)result; } +/* Check that the first metric is at least 10% better than the second one */ +static bool is_metric_better(u32 x, u32 y) +{ + return (x < y) && (x < (y - x / 10)); +} + /** * hwmp_route_info_get - Update routing info to originator and transmitter * @@ -458,8 +464,8 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata, (mpath->sn == orig_sn && (rcu_access_pointer(mpath->next_hop) != sta ? - mult_frac(new_metric, 10, 9) : - new_metric) >= mpath->metric)) { + !is_metric_better(new_metric, mpath->metric) : + new_metric >= mpath->metric))) { process = false; fresh_info = false; } @@ -533,8 +539,8 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata, if ((mpath->flags & MESH_PATH_FIXED) || ((mpath->flags & MESH_PATH_ACTIVE) && ((rcu_access_pointer(mpath->next_hop) != sta ? - mult_frac(last_hop_metric, 10, 9) : - last_hop_metric) > mpath->metric))) + !is_metric_better(last_hop_metric, mpath->metric) : + last_hop_metric > mpath->metric)))) fresh_info = false; } else { mpath = mesh_path_add(sdata, ta); diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 111066928b963..ad0d040569dcd 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -166,6 +166,9 @@ ieee80211_determine_ap_chan(struct ieee80211_sub_if_data *sdata, bool no_vht = false; u32 ht_cfreq; + if (ieee80211_hw_check(&sdata->local->hw, STRICT)) + ignore_ht_channel_mismatch = false; + *chandef = (struct cfg80211_chan_def) { .chan = channel, .width = NL80211_CHAN_WIDTH_20_NOHT, @@ -385,7 +388,7 @@ ieee80211_verify_peer_he_mcs_support(struct ieee80211_sub_if_data *sdata, * zeroes, which is nonsense, and completely inconsistent with itself * (it doesn't have 8 streams). Accept the settings in this case anyway. */ - if (!ap_min_req_set) + if (!ieee80211_hw_check(&sdata->local->hw, STRICT) && !ap_min_req_set) return true; /* make sure the AP is consistent with itself @@ -445,7 +448,7 @@ ieee80211_verify_sta_he_mcs_support(struct ieee80211_sub_if_data *sdata, * zeroes, which is nonsense, and completely inconsistent with itself * (it doesn't have 8 streams). Accept the settings in this case anyway. */ - if (!ap_min_req_set) + if (!ieee80211_hw_check(&sdata->local->hw, STRICT) && !ap_min_req_set) return true; /* Need to go over for 80MHz, 160MHz and for 80+80 */ @@ -1212,13 +1215,15 @@ static bool ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata, * Some APs apparently get confused if our capabilities are better * than theirs, so restrict what we advertise in the assoc request. */ - if (!(ap_vht_cap->vht_cap_info & - cpu_to_le32(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE))) - cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | - IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE); - else if (!(ap_vht_cap->vht_cap_info & - cpu_to_le32(IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))) - cap &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE; + if (!ieee80211_hw_check(&local->hw, STRICT)) { + if (!(ap_vht_cap->vht_cap_info & + cpu_to_le32(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE))) + cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | + IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE); + else if (!(ap_vht_cap->vht_cap_info & + cpu_to_le32(IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))) + cap &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE; + } /* * If some other vif is using the MU-MIMO capability we cannot associate @@ -1260,14 +1265,16 @@ static bool ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata, return mu_mimo_owner; } -static void ieee80211_assoc_add_rates(struct sk_buff *skb, +static void ieee80211_assoc_add_rates(struct ieee80211_local *local, + struct sk_buff *skb, enum nl80211_chan_width width, struct ieee80211_supported_band *sband, struct ieee80211_mgd_assoc_data *assoc_data) { u32 rates; - if (assoc_data->supp_rates_len) { + if (assoc_data->supp_rates_len && + !ieee80211_hw_check(&local->hw, STRICT)) { /* * Get all rates supported by the device and the AP as * some APs don't like getting a superset of their rates @@ -1481,7 +1488,7 @@ static size_t ieee80211_assoc_link_elems(struct ieee80211_sub_if_data *sdata, *capab |= WLAN_CAPABILITY_SPECTRUM_MGMT; if (sband->band != NL80211_BAND_S1GHZ) - ieee80211_assoc_add_rates(skb, width, sband, assoc_data); + ieee80211_assoc_add_rates(local, skb, width, sband, assoc_data); if (*capab & WLAN_CAPABILITY_SPECTRUM_MGMT || *capab & WLAN_CAPABILITY_RADIO_MEASURE) { @@ -1925,7 +1932,8 @@ static int ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) * for some reason check it and want it to be set, set the bit for all * pre-EHT connections as we used to do. */ - if (link->u.mgd.conn.mode < IEEE80211_CONN_MODE_EHT) + if (link->u.mgd.conn.mode < IEEE80211_CONN_MODE_EHT && + !ieee80211_hw_check(&local->hw, STRICT)) capab |= WLAN_CAPABILITY_ESS; /* add the elements for the assoc (main) link */ @@ -4710,7 +4718,7 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link, * 2G/3G/4G wifi routers, reported models include the "Onda PN51T", * "Vodafone PocketWiFi 2", "ZTE MF60" and a similar T-Mobile device. */ - if (!is_6ghz && + if (!ieee80211_hw_check(&local->hw, STRICT) && !is_6ghz && ((assoc_data->wmm && !elems->wmm_param) || (link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HT && (!elems->ht_cap_elem || !elems->ht_operation)) || @@ -4733,6 +4741,7 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link, parse_params.start = bss_ies->data; parse_params.len = bss_ies->len; parse_params.bss = cbss; + parse_params.link_id = -1; bss_elems = ieee802_11_parse_elems_full(&parse_params); if (!bss_elems) { ret = false; @@ -4845,6 +4854,15 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link, bss_vht_cap = (const void *)elem->data; } + if (ieee80211_hw_check(&local->hw, STRICT) && + (!bss_vht_cap || memcmp(bss_vht_cap, elems->vht_cap_elem, + sizeof(*bss_vht_cap)))) { + rcu_read_unlock(); + ret = false; + link_info(link, "VHT capabilities mismatch\n"); + goto out; + } + ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband, elems->vht_cap_elem, bss_vht_cap, link_sta); diff --git a/net/mac80211/parse.c b/net/mac80211/parse.c index 279c5143b3356..6da39c864f45b 100644 --- a/net/mac80211/parse.c +++ b/net/mac80211/parse.c @@ -44,6 +44,12 @@ struct ieee80211_elems_parse { /* The reconfiguration Multi-Link element in the original elements */ const struct element *ml_reconf_elem; + /* The EPCS Multi-Link element in the original elements */ + const struct element *ml_epcs_elem; + + bool multi_link_inner; + bool skip_vendor; + /* * scratch buffer that can be used for various element parsing related * tasks, e.g., element de-fragmentation etc. @@ -149,16 +155,18 @@ ieee80211_parse_extension_element(u32 *crc, switch (le16_get_bits(mle->control, IEEE80211_ML_CONTROL_TYPE)) { case IEEE80211_ML_CONTROL_TYPE_BASIC: - if (elems_parse->ml_basic_elem) { + if (elems_parse->multi_link_inner) { elems->parse_error |= IEEE80211_PARSE_ERR_DUP_NEST_ML_BASIC; break; } - elems_parse->ml_basic_elem = elem; break; case IEEE80211_ML_CONTROL_TYPE_RECONF: elems_parse->ml_reconf_elem = elem; break; + case IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS: + elems_parse->ml_epcs_elem = elem; + break; default: break; } @@ -393,6 +401,9 @@ _ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params, IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; case WLAN_EID_VENDOR_SPECIFIC: + if (elems_parse->skip_vendor) + break; + if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 && pos[2] == 0xf2) { /* Microsoft OUI (00:50:F2) */ @@ -860,21 +871,36 @@ ieee80211_mle_get_sta_prof(struct ieee80211_elems_parse *elems_parse, } } -static void ieee80211_mle_parse_link(struct ieee80211_elems_parse *elems_parse, - struct ieee80211_elems_parse_params *params) +static const struct element * +ieee80211_prep_mle_link_parse(struct ieee80211_elems_parse *elems_parse, + struct ieee80211_elems_parse_params *params, + struct ieee80211_elems_parse_params *sub) { struct ieee802_11_elems *elems = &elems_parse->elems; struct ieee80211_mle_per_sta_profile *prof; - struct ieee80211_elems_parse_params sub = { - .mode = params->mode, - .action = params->action, - .from_ap = params->from_ap, - .link_id = -1, - }; - ssize_t ml_len = elems->ml_basic_len; - const struct element *non_inherit = NULL; + const struct element *tmp; + ssize_t ml_len; const u8 *end; + if (params->mode < IEEE80211_CONN_MODE_EHT) + return NULL; + + for_each_element_extid(tmp, WLAN_EID_EXT_EHT_MULTI_LINK, + elems->ie_start, elems->total_len) { + const struct ieee80211_multi_link_elem *mle = + (void *)tmp->data + 1; + + if (!ieee80211_mle_size_ok(tmp->data + 1, tmp->datalen - 1)) + continue; + + if (le16_get_bits(mle->control, IEEE80211_ML_CONTROL_TYPE) != + IEEE80211_ML_CONTROL_TYPE_BASIC) + continue; + + elems_parse->ml_basic_elem = tmp; + break; + } + ml_len = cfg80211_defragment_element(elems_parse->ml_basic_elem, elems->ie_start, elems->total_len, @@ -885,26 +911,26 @@ static void ieee80211_mle_parse_link(struct ieee80211_elems_parse *elems_parse, WLAN_EID_FRAGMENT); if (ml_len < 0) - return; + return NULL; elems->ml_basic = (const void *)elems_parse->scratch_pos; elems->ml_basic_len = ml_len; elems_parse->scratch_pos += ml_len; if (params->link_id == -1) - return; + return NULL; ieee80211_mle_get_sta_prof(elems_parse, params->link_id); prof = elems->prof; if (!prof) - return; + return NULL; /* check if we have the 4 bytes for the fixed part in assoc response */ if (elems->sta_prof_len < sizeof(*prof) + prof->sta_info_len - 1 + 4) { elems->prof = NULL; elems->sta_prof_len = 0; - return; + return NULL; } /* @@ -913,13 +939,17 @@ static void ieee80211_mle_parse_link(struct ieee80211_elems_parse *elems_parse, * the -1 is because the 'sta_info_len' is accounted to as part of the * per-STA profile, but not part of the 'u8 variable[]' portion. */ - sub.start = prof->variable + prof->sta_info_len - 1 + 4; + sub->start = prof->variable + prof->sta_info_len - 1 + 4; end = (const u8 *)prof + elems->sta_prof_len; - sub.len = end - sub.start; + sub->len = end - sub->start; + + sub->mode = params->mode; + sub->action = params->action; + sub->from_ap = params->from_ap; + sub->link_id = -1; - non_inherit = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE, - sub.start, sub.len); - _ieee802_11_parse_elems_full(&sub, elems_parse, non_inherit); + return cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE, + sub->start, sub->len); } static void @@ -943,18 +973,43 @@ ieee80211_mle_defrag_reconf(struct ieee80211_elems_parse *elems_parse) elems_parse->scratch_pos += ml_len; } +static void +ieee80211_mle_defrag_epcs(struct ieee80211_elems_parse *elems_parse) +{ + struct ieee802_11_elems *elems = &elems_parse->elems; + ssize_t ml_len; + + ml_len = cfg80211_defragment_element(elems_parse->ml_epcs_elem, + elems->ie_start, + elems->total_len, + elems_parse->scratch_pos, + elems_parse->scratch + + elems_parse->scratch_len - + elems_parse->scratch_pos, + WLAN_EID_FRAGMENT); + if (ml_len < 0) + return; + elems->ml_epcs = (void *)elems_parse->scratch_pos; + elems->ml_epcs_len = ml_len; + elems_parse->scratch_pos += ml_len; +} + struct ieee802_11_elems * ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params) { + struct ieee80211_elems_parse_params sub = {}; struct ieee80211_elems_parse *elems_parse; - struct ieee802_11_elems *elems; const struct element *non_inherit = NULL; - u8 *nontransmitted_profile; - int nontransmitted_profile_len = 0; + struct ieee802_11_elems *elems; size_t scratch_len = 3 * params->len; + bool multi_link_inner = false; BUILD_BUG_ON(offsetof(typeof(*elems_parse), elems) != 0); + /* cannot parse for both a specific link and non-transmitted BSS */ + if (WARN_ON(params->link_id >= 0 && params->bss)) + return NULL; + elems_parse = kzalloc(struct_size(elems_parse, scratch, scratch_len), GFP_ATOMIC); if (!elems_parse) @@ -971,36 +1026,55 @@ ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params) ieee80211_clear_tpe(&elems->tpe); ieee80211_clear_tpe(&elems->csa_tpe); - nontransmitted_profile = elems_parse->scratch_pos; - nontransmitted_profile_len = - ieee802_11_find_bssid_profile(params->start, params->len, - elems, params->bss, - nontransmitted_profile); - elems_parse->scratch_pos += nontransmitted_profile_len; - non_inherit = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE, - nontransmitted_profile, - nontransmitted_profile_len); + /* + * If we're looking for a non-transmitted BSS then we cannot at + * the same time be looking for a second link as the two can only + * appear in the same frame carrying info for different BSSes. + * + * In any case, we only look for one at a time, as encoded by + * the WARN_ON above. + */ + if (params->bss) { + int nontx_len = + ieee802_11_find_bssid_profile(params->start, + params->len, + elems, params->bss, + elems_parse->scratch_pos); + sub.start = elems_parse->scratch_pos; + sub.mode = params->mode; + sub.len = nontx_len; + sub.action = params->action; + sub.link_id = params->link_id; + + /* consume the space used for non-transmitted profile */ + elems_parse->scratch_pos += nontx_len; + + non_inherit = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE, + sub.start, nontx_len); + } else { + /* must always parse to get elems_parse->ml_basic_elem */ + non_inherit = ieee80211_prep_mle_link_parse(elems_parse, params, + &sub); + multi_link_inner = true; + } + elems_parse->skip_vendor = + cfg80211_find_elem(WLAN_EID_VENDOR_SPECIFIC, + sub.start, sub.len); elems->crc = _ieee802_11_parse_elems_full(params, elems_parse, non_inherit); - /* Override with nontransmitted profile, if found */ - if (nontransmitted_profile_len) { - struct ieee80211_elems_parse_params sub = { - .mode = params->mode, - .start = nontransmitted_profile, - .len = nontransmitted_profile_len, - .action = params->action, - .link_id = params->link_id, - }; - + /* Override with nontransmitted/per-STA profile if found */ + if (sub.len) { + elems_parse->multi_link_inner = multi_link_inner; + elems_parse->skip_vendor = false; _ieee802_11_parse_elems_full(&sub, elems_parse, NULL); } - ieee80211_mle_parse_link(elems_parse, params); - ieee80211_mle_defrag_reconf(elems_parse); + ieee80211_mle_defrag_epcs(elems_parse); + if (elems->tim && !elems->parse_error) { const struct ieee80211_tim_ie *tim_ie = elems->tim; diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 6f3a86040cfcd..8e1fbdd3bff10 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -6,7 +6,7 @@ * Copyright 2007-2010 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2024 Intel Corporation + * Copyright (C) 2018-2025 Intel Corporation */ #include @@ -3323,8 +3323,8 @@ static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata, return; } - if (!ether_addr_equal(mgmt->sa, sdata->deflink.u.mgd.bssid) || - !ether_addr_equal(mgmt->bssid, sdata->deflink.u.mgd.bssid)) { + if (!ether_addr_equal(mgmt->sa, sdata->vif.cfg.ap_addr) || + !ether_addr_equal(mgmt->bssid, sdata->vif.cfg.ap_addr)) { /* Not from the current AP or not associated yet. */ return; } @@ -3340,9 +3340,9 @@ static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata, skb_reserve(skb, local->hw.extra_tx_headroom); resp = skb_put_zero(skb, 24); - memcpy(resp->da, mgmt->sa, ETH_ALEN); + memcpy(resp->da, sdata->vif.cfg.ap_addr, ETH_ALEN); memcpy(resp->sa, sdata->vif.addr, ETH_ALEN); - memcpy(resp->bssid, sdata->deflink.u.mgd.bssid, ETH_ALEN); + memcpy(resp->bssid, sdata->vif.cfg.ap_addr, ETH_ALEN); resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query)); diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index aa22f09e6d145..49095f19a0f22 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -4,7 +4,7 @@ * Copyright 2006-2007 Jiri Benc * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2023 Intel Corporation + * Copyright (C) 2018-2024 Intel Corporation */ #include @@ -1317,9 +1317,13 @@ static int _sta_info_move_state(struct sta_info *sta, sta->sta.addr, new_state); /* notify the driver before the actual changes so it can - * fail the transition + * fail the transition if the state is increasing. + * The driver is required not to fail when the transition + * is decreasing the state, so first, do all the preparation + * work and only then, notify the driver. */ - if (test_sta_flag(sta, WLAN_STA_INSERTED)) { + if (new_state > sta->sta_state && + test_sta_flag(sta, WLAN_STA_INSERTED)) { int err = drv_sta_state(sta->local, sta->sdata, sta, sta->sta_state, new_state); if (err) @@ -1395,6 +1399,16 @@ static int _sta_info_move_state(struct sta_info *sta, break; } + if (new_state < sta->sta_state && + test_sta_flag(sta, WLAN_STA_INSERTED)) { + int err = drv_sta_state(sta->local, sta->sdata, sta, + sta->sta_state, new_state); + + WARN_ONCE(err, + "Driver is not allowed to fail if the sta_state is transitioning down the list: %d\n", + err); + } + sta->sta_state = new_state; return 0; diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 38c30e4ddda98..a98ae563613c0 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -6,7 +6,7 @@ * Copyright 2007 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2024 Intel Corporation + * Copyright (C) 2018-2025 Intel Corporation * * utilities for mac80211 */ @@ -685,7 +685,7 @@ void __ieee80211_flush_queues(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, unsigned int queues, bool drop) { - if (!local->ops->flush) + if (!local->ops->flush && !drop) return; /* @@ -712,7 +712,8 @@ void __ieee80211_flush_queues(struct ieee80211_local *local, } } - drv_flush(local, sdata, queues, drop); + if (local->ops->flush) + drv_flush(local, sdata, queues, drop); ieee80211_wake_queues_by_reason(&local->hw, queues, IEEE80211_QUEUE_STOP_REASON_FLUSH, @@ -2184,8 +2185,10 @@ int ieee80211_reconfig(struct ieee80211_local *local) ieee80211_reconfig_roc(local); /* Requeue all works */ - list_for_each_entry(sdata, &local->interfaces, list) - wiphy_work_queue(local->hw.wiphy, &sdata->work); + list_for_each_entry(sdata, &local->interfaces, list) { + if (ieee80211_sdata_running(sdata)) + wiphy_work_queue(local->hw.wiphy, &sdata->work); + } } ieee80211_wake_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP, diff --git a/net/mctp/af_mctp.c b/net/mctp/af_mctp.c index f6de136008f6f..57850d4dac5db 100644 --- a/net/mctp/af_mctp.c +++ b/net/mctp/af_mctp.c @@ -630,6 +630,9 @@ static int mctp_sk_hash(struct sock *sk) { struct net *net = sock_net(sk); + /* Bind lookup runs under RCU, remain live during that. */ + sock_set_flag(sk, SOCK_RCU_FREE); + mutex_lock(&net->mctp.bind_lock); sk_add_node_rcu(sk, &net->mctp.binds); mutex_unlock(&net->mctp.bind_lock); diff --git a/net/mctp/route.c b/net/mctp/route.c index 3f2bd65ff5e3c..4c460160914f0 100644 --- a/net/mctp/route.c +++ b/net/mctp/route.c @@ -332,8 +332,14 @@ static int mctp_frag_queue(struct mctp_sk_key *key, struct sk_buff *skb) & MCTP_HDR_SEQ_MASK; if (!key->reasm_head) { - key->reasm_head = skb; - key->reasm_tailp = &(skb_shinfo(skb)->frag_list); + /* Since we're manipulating the shared frag_list, ensure it isn't + * shared with any other SKBs. + */ + key->reasm_head = skb_unshare(skb, GFP_ATOMIC); + if (!key->reasm_head) + return -ENOMEM; + + key->reasm_tailp = &(skb_shinfo(key->reasm_head)->frag_list); key->last_seq = this_seq; return 0; } diff --git a/net/mctp/test/route-test.c b/net/mctp/test/route-test.c index 17165b86ce22d..06c1897b685a8 100644 --- a/net/mctp/test/route-test.c +++ b/net/mctp/test/route-test.c @@ -921,6 +921,114 @@ static void mctp_test_route_input_sk_fail_frag(struct kunit *test) __mctp_route_test_fini(test, dev, rt, sock); } +/* Input route to socket, using a fragmented message created from clones. + */ +static void mctp_test_route_input_cloned_frag(struct kunit *test) +{ + /* 5 packet fragments, forming 2 complete messages */ + const struct mctp_hdr hdrs[5] = { + RX_FRAG(FL_S, 0), + RX_FRAG(0, 1), + RX_FRAG(FL_E, 2), + RX_FRAG(FL_S, 0), + RX_FRAG(FL_E, 1), + }; + struct mctp_test_route *rt; + struct mctp_test_dev *dev; + struct sk_buff *skb[5]; + struct sk_buff *rx_skb; + struct socket *sock; + size_t data_len; + u8 compare[100]; + u8 flat[100]; + size_t total; + void *p; + int rc; + + /* Arbitrary length */ + data_len = 3; + total = data_len + sizeof(struct mctp_hdr); + + __mctp_route_test_init(test, &dev, &rt, &sock, MCTP_NET_ANY); + + /* Create a single skb initially with concatenated packets */ + skb[0] = mctp_test_create_skb(&hdrs[0], 5 * total); + mctp_test_skb_set_dev(skb[0], dev); + memset(skb[0]->data, 0 * 0x11, skb[0]->len); + memcpy(skb[0]->data, &hdrs[0], sizeof(struct mctp_hdr)); + + /* Extract and populate packets */ + for (int i = 1; i < 5; i++) { + skb[i] = skb_clone(skb[i - 1], GFP_ATOMIC); + KUNIT_ASSERT_TRUE(test, skb[i]); + p = skb_pull(skb[i], total); + KUNIT_ASSERT_TRUE(test, p); + skb_reset_network_header(skb[i]); + memcpy(skb[i]->data, &hdrs[i], sizeof(struct mctp_hdr)); + memset(&skb[i]->data[sizeof(struct mctp_hdr)], i * 0x11, data_len); + } + for (int i = 0; i < 5; i++) + skb_trim(skb[i], total); + + /* SOM packets have a type byte to match the socket */ + skb[0]->data[4] = 0; + skb[3]->data[4] = 0; + + skb_dump("pkt1 ", skb[0], false); + skb_dump("pkt2 ", skb[1], false); + skb_dump("pkt3 ", skb[2], false); + skb_dump("pkt4 ", skb[3], false); + skb_dump("pkt5 ", skb[4], false); + + for (int i = 0; i < 5; i++) { + KUNIT_EXPECT_EQ(test, refcount_read(&skb[i]->users), 1); + /* Take a reference so we can check refcounts at the end */ + skb_get(skb[i]); + } + + /* Feed the fragments into MCTP core */ + for (int i = 0; i < 5; i++) { + rc = mctp_route_input(&rt->rt, skb[i]); + KUNIT_EXPECT_EQ(test, rc, 0); + } + + /* Receive first reassembled message */ + rx_skb = skb_recv_datagram(sock->sk, MSG_DONTWAIT, &rc); + KUNIT_EXPECT_EQ(test, rc, 0); + KUNIT_EXPECT_EQ(test, rx_skb->len, 3 * data_len); + rc = skb_copy_bits(rx_skb, 0, flat, rx_skb->len); + for (int i = 0; i < rx_skb->len; i++) + compare[i] = (i / data_len) * 0x11; + /* Set type byte */ + compare[0] = 0; + + KUNIT_EXPECT_MEMEQ(test, flat, compare, rx_skb->len); + KUNIT_EXPECT_EQ(test, refcount_read(&rx_skb->users), 1); + kfree_skb(rx_skb); + + /* Receive second reassembled message */ + rx_skb = skb_recv_datagram(sock->sk, MSG_DONTWAIT, &rc); + KUNIT_EXPECT_EQ(test, rc, 0); + KUNIT_EXPECT_EQ(test, rx_skb->len, 2 * data_len); + rc = skb_copy_bits(rx_skb, 0, flat, rx_skb->len); + for (int i = 0; i < rx_skb->len; i++) + compare[i] = (i / data_len + 3) * 0x11; + /* Set type byte */ + compare[0] = 0; + + KUNIT_EXPECT_MEMEQ(test, flat, compare, rx_skb->len); + KUNIT_EXPECT_EQ(test, refcount_read(&rx_skb->users), 1); + kfree_skb(rx_skb); + + /* Check input skb refcounts */ + for (int i = 0; i < 5; i++) { + KUNIT_EXPECT_EQ(test, refcount_read(&skb[i]->users), 1); + kfree_skb(skb[i]); + } + + __mctp_route_test_fini(test, dev, rt, sock); +} + #if IS_ENABLED(CONFIG_MCTP_FLOWS) static void mctp_test_flow_init(struct kunit *test, @@ -1144,6 +1252,7 @@ static struct kunit_case mctp_test_cases[] = { KUNIT_CASE(mctp_test_packet_flow), KUNIT_CASE(mctp_test_fragment_flow), KUNIT_CASE(mctp_test_route_output_key_create), + KUNIT_CASE(mctp_test_route_input_cloned_frag), {} }; diff --git a/net/mptcp/options.c b/net/mptcp/options.c index fd2de185bc939..23949ae2a3a8d 100644 --- a/net/mptcp/options.c +++ b/net/mptcp/options.c @@ -651,6 +651,7 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff * struct mptcp_sock *msk = mptcp_sk(subflow->conn); bool drop_other_suboptions = false; unsigned int opt_size = *size; + struct mptcp_addr_info addr; bool echo; int len; @@ -659,7 +660,7 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff * */ if (!mptcp_pm_should_add_signal(msk) || (opts->suboptions & (OPTION_MPTCP_MPJ_ACK | OPTION_MPTCP_MPC_ACK)) || - !mptcp_pm_add_addr_signal(msk, skb, opt_size, remaining, &opts->addr, + !mptcp_pm_add_addr_signal(msk, skb, opt_size, remaining, &addr, &echo, &drop_other_suboptions)) return false; @@ -672,7 +673,7 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff * else if (opts->suboptions & OPTION_MPTCP_DSS) return false; - len = mptcp_add_addr_len(opts->addr.family, echo, !!opts->addr.port); + len = mptcp_add_addr_len(addr.family, echo, !!addr.port); if (remaining < len) return false; @@ -689,6 +690,7 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff * opts->ahmac = 0; *size -= opt_size; } + opts->addr = addr; opts->suboptions |= OPTION_MPTCP_ADD_ADDR; if (!echo) { MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ADDADDRTX); diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c index 8c4f934d198cc..2a085ec5bfd09 100644 --- a/net/mptcp/pm_netlink.c +++ b/net/mptcp/pm_netlink.c @@ -968,7 +968,7 @@ static void __mptcp_pm_release_addr_entry(struct mptcp_pm_addr_entry *entry) static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet, struct mptcp_pm_addr_entry *entry, - bool needs_id) + bool needs_id, bool replace) { struct mptcp_pm_addr_entry *cur, *del_entry = NULL; unsigned int addr_max; @@ -1008,6 +1008,17 @@ static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet, if (entry->addr.id) goto out; + /* allow callers that only need to look up the local + * addr's id to skip replacement. This allows them to + * avoid calling synchronize_rcu in the packet recv + * path. + */ + if (!replace) { + kfree(entry); + ret = cur->addr.id; + goto out; + } + pernet->addrs--; entry->addr.id = cur->addr.id; list_del_rcu(&cur->list); @@ -1160,7 +1171,7 @@ int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc entry->ifindex = 0; entry->flags = MPTCP_PM_ADDR_FLAG_IMPLICIT; entry->lsk = NULL; - ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, true); + ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, true, false); if (ret < 0) kfree(entry); @@ -1432,7 +1443,8 @@ int mptcp_pm_nl_add_addr_doit(struct sk_buff *skb, struct genl_info *info) } } ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, - !mptcp_pm_has_addr_attr_id(attr, info)); + !mptcp_pm_has_addr_attr_id(attr, info), + true); if (ret < 0) { GENL_SET_ERR_MSG_FMT(info, "too many addresses or duplicate one: %d", ret); goto out_free; @@ -1513,11 +1525,6 @@ static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net, if (mptcp_pm_is_userspace(msk)) goto next; - if (list_empty(&msk->conn_list)) { - mptcp_pm_remove_anno_addr(msk, addr, false); - goto next; - } - lock_sock(sk); remove_subflow = lookup_subflow_by_saddr(&msk->conn_list, addr); mptcp_pm_remove_anno_addr(msk, addr, remove_subflow && diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index b70a303e08287..7e2f70f22b05b 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -1194,6 +1194,8 @@ static inline void __mptcp_do_fallback(struct mptcp_sock *msk) pr_debug("TCP fallback already done (msk=%p)\n", msk); return; } + if (WARN_ON_ONCE(!READ_ONCE(msk->allow_infinite_fallback))) + return; set_bit(MPTCP_FALLBACK_DONE, &msk->flags); } diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c index 505445a9598fa..3caa0a9d3b388 100644 --- a/net/mptcp/sockopt.c +++ b/net/mptcp/sockopt.c @@ -1419,6 +1419,12 @@ static int mptcp_getsockopt_v4(struct mptcp_sock *msk, int optname, switch (optname) { case IP_TOS: return mptcp_put_int_option(msk, optval, optlen, READ_ONCE(inet_sk(sk)->tos)); + case IP_FREEBIND: + return mptcp_put_int_option(msk, optval, optlen, + inet_test_bit(FREEBIND, sk)); + case IP_TRANSPARENT: + return mptcp_put_int_option(msk, optval, optlen, + inet_test_bit(TRANSPARENT, sk)); case IP_BIND_ADDRESS_NO_PORT: return mptcp_put_int_option(msk, optval, optlen, inet_test_bit(BIND_ADDRESS_NO_PORT, sk)); @@ -1430,6 +1436,26 @@ static int mptcp_getsockopt_v4(struct mptcp_sock *msk, int optname, return -EOPNOTSUPP; } +static int mptcp_getsockopt_v6(struct mptcp_sock *msk, int optname, + char __user *optval, int __user *optlen) +{ + struct sock *sk = (void *)msk; + + switch (optname) { + case IPV6_V6ONLY: + return mptcp_put_int_option(msk, optval, optlen, + sk->sk_ipv6only); + case IPV6_TRANSPARENT: + return mptcp_put_int_option(msk, optval, optlen, + inet_test_bit(TRANSPARENT, sk)); + case IPV6_FREEBIND: + return mptcp_put_int_option(msk, optval, optlen, + inet_test_bit(FREEBIND, sk)); + } + + return -EOPNOTSUPP; +} + static int mptcp_getsockopt_sol_mptcp(struct mptcp_sock *msk, int optname, char __user *optval, int __user *optlen) { @@ -1469,6 +1495,8 @@ int mptcp_getsockopt(struct sock *sk, int level, int optname, if (level == SOL_IP) return mptcp_getsockopt_v4(msk, optname, optval, option); + if (level == SOL_IPV6) + return mptcp_getsockopt_v6(msk, optname, optval, option); if (level == SOL_TCP) return mptcp_getsockopt_sol_tcp(msk, optname, optval, option); if (level == SOL_MPTCP) diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index 860903e064225..4c2aa45c466d9 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -754,8 +754,6 @@ static bool subflow_hmac_valid(const struct request_sock *req, subflow_req = mptcp_subflow_rsk(req); msk = subflow_req->msk; - if (!msk) - return false; subflow_generate_hmac(READ_ONCE(msk->remote_key), READ_ONCE(msk->local_key), @@ -853,12 +851,8 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk, } else if (subflow_req->mp_join) { mptcp_get_options(skb, &mp_opt); - if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK) || - !subflow_hmac_valid(req, &mp_opt) || - !mptcp_can_accept_new_subflow(subflow_req->msk)) { - SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC); + if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK)) fallback = true; - } } create_child: @@ -908,6 +902,17 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk, goto dispose_child; } + if (!subflow_hmac_valid(req, &mp_opt)) { + SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC); + subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT); + goto dispose_child; + } + + if (!mptcp_can_accept_new_subflow(owner)) { + subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT); + goto dispose_child; + } + /* move the msk reference ownership to the subflow */ subflow_req->msk = NULL; ctx->conn = (struct sock *)owner; @@ -1140,7 +1145,6 @@ static enum mapping_status get_mapping_status(struct sock *ssk, if (data_len == 0) { pr_debug("infinite mapping received\n"); MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX); - subflow->map_data_len = 0; return MAPPING_INVALID; } @@ -1284,18 +1288,6 @@ static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ss mptcp_schedule_work(sk); } -static bool subflow_can_fallback(struct mptcp_subflow_context *subflow) -{ - struct mptcp_sock *msk = mptcp_sk(subflow->conn); - - if (subflow->mp_join) - return false; - else if (READ_ONCE(msk->csum_enabled)) - return !subflow->valid_csum_seen; - else - return READ_ONCE(msk->allow_infinite_fallback); -} - static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk) { struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); @@ -1391,7 +1383,7 @@ static bool subflow_check_data_avail(struct sock *ssk) return true; } - if (!subflow_can_fallback(subflow) && subflow->map_data_len) { + if (!READ_ONCE(msk->allow_infinite_fallback)) { /* fatal protocol error, close the socket. * subflow_error_report() will introduce the appropriate barriers */ diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index dc6ddc4abbe21..3224f6e17e736 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -3091,12 +3091,12 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) case IP_VS_SO_GET_SERVICES: { struct ip_vs_get_services *get; - int size; + size_t size; get = (struct ip_vs_get_services *)arg; size = struct_size(get, entrytable, get->num_services); if (*len != size) { - pr_err("length: %u != %u\n", *len, size); + pr_err("length: %u != %zu\n", *len, size); ret = -EINVAL; goto out; } @@ -3132,12 +3132,12 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) case IP_VS_SO_GET_DESTS: { struct ip_vs_get_dests *get; - int size; + size_t size; get = (struct ip_vs_get_dests *)arg; size = struct_size(get, entrytable, get->num_dests); if (*len != size) { - pr_err("length: %u != %u\n", *len, size); + pr_err("length: %u != %zu\n", *len, size); ret = -EINVAL; goto out; } diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c index 4890af4dc263f..913ede2f57f9a 100644 --- a/net/netfilter/nf_conncount.c +++ b/net/netfilter/nf_conncount.c @@ -132,7 +132,7 @@ static int __nf_conncount_add(struct net *net, struct nf_conn *found_ct; unsigned int collect = 0; - if (time_is_after_eq_jiffies((unsigned long)list->last_gc)) + if ((u32)jiffies == list->last_gc) goto add_new_node; /* check the saved connections */ @@ -234,7 +234,7 @@ bool nf_conncount_gc_list(struct net *net, bool ret = false; /* don't bother if we just did GC */ - if (time_is_after_eq_jiffies((unsigned long)READ_ONCE(list->last_gc))) + if ((u32)jiffies == READ_ONCE(list->last_gc)) return false; /* don't bother if other cpu is already doing GC */ @@ -377,6 +377,8 @@ insert_tree(struct net *net, conn->tuple = *tuple; conn->zone = *zone; + conn->cpu = raw_smp_processor_id(); + conn->jiffies32 = (u32)jiffies; memcpy(rbconn->key, key, sizeof(u32) * data->keylen); nf_conncount_list_init(&rbconn->list); diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 939510247ef5a..bdee187bc5dd4 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -31,7 +31,6 @@ unsigned int nf_tables_net_id __read_mostly; static LIST_HEAD(nf_tables_expressions); static LIST_HEAD(nf_tables_objects); static LIST_HEAD(nf_tables_flowtables); -static LIST_HEAD(nf_tables_destroy_list); static LIST_HEAD(nf_tables_gc_list); static DEFINE_SPINLOCK(nf_tables_destroy_list_lock); static DEFINE_SPINLOCK(nf_tables_gc_list_lock); @@ -122,7 +121,6 @@ static void nft_validate_state_update(struct nft_table *table, u8 new_validate_s table->validate_state = new_validate_state; } static void nf_tables_trans_destroy_work(struct work_struct *w); -static DECLARE_WORK(trans_destroy_work, nf_tables_trans_destroy_work); static void nft_trans_gc_work(struct work_struct *work); static DECLARE_WORK(trans_gc_work, nft_trans_gc_work); @@ -2734,11 +2732,11 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, err = nft_netdev_register_hooks(ctx->net, &hook.list); if (err < 0) goto err_hooks; + + unregister = true; } } - unregister = true; - if (nla[NFTA_CHAIN_COUNTERS]) { if (!nft_is_base_chain(chain)) { err = -EOPNOTSUPP; @@ -9748,11 +9746,12 @@ static void nft_commit_release(struct nft_trans *trans) static void nf_tables_trans_destroy_work(struct work_struct *w) { + struct nftables_pernet *nft_net = container_of(w, struct nftables_pernet, destroy_work); struct nft_trans *trans, *next; LIST_HEAD(head); spin_lock(&nf_tables_destroy_list_lock); - list_splice_init(&nf_tables_destroy_list, &head); + list_splice_init(&nft_net->destroy_list, &head); spin_unlock(&nf_tables_destroy_list_lock); if (list_empty(&head)) @@ -9766,9 +9765,11 @@ static void nf_tables_trans_destroy_work(struct work_struct *w) } } -void nf_tables_trans_destroy_flush_work(void) +void nf_tables_trans_destroy_flush_work(struct net *net) { - flush_work(&trans_destroy_work); + struct nftables_pernet *nft_net = nft_pernet(net); + + flush_work(&nft_net->destroy_work); } EXPORT_SYMBOL_GPL(nf_tables_trans_destroy_flush_work); @@ -10226,11 +10227,11 @@ static void nf_tables_commit_release(struct net *net) trans->put_net = true; spin_lock(&nf_tables_destroy_list_lock); - list_splice_tail_init(&nft_net->commit_list, &nf_tables_destroy_list); + list_splice_tail_init(&nft_net->commit_list, &nft_net->destroy_list); spin_unlock(&nf_tables_destroy_list_lock); nf_tables_module_autoload_cleanup(net); - schedule_work(&trans_destroy_work); + schedule_work(&nft_net->destroy_work); mutex_unlock(&nft_net->commit_mutex); } @@ -11653,7 +11654,7 @@ static int nft_rcv_nl_event(struct notifier_block *this, unsigned long event, gc_seq = nft_gc_seq_begin(nft_net); - nf_tables_trans_destroy_flush_work(); + nf_tables_trans_destroy_flush_work(net); again: list_for_each_entry(table, &nft_net->tables, list) { if (nft_table_has_owner(table) && @@ -11695,6 +11696,7 @@ static int __net_init nf_tables_init_net(struct net *net) INIT_LIST_HEAD(&nft_net->tables); INIT_LIST_HEAD(&nft_net->commit_list); + INIT_LIST_HEAD(&nft_net->destroy_list); INIT_LIST_HEAD(&nft_net->commit_set_list); INIT_LIST_HEAD(&nft_net->binding_list); INIT_LIST_HEAD(&nft_net->module_list); @@ -11703,6 +11705,7 @@ static int __net_init nf_tables_init_net(struct net *net) nft_net->base_seq = 1; nft_net->gc_seq = 0; nft_net->validate_state = NFT_VALIDATE_SKIP; + INIT_WORK(&nft_net->destroy_work, nf_tables_trans_destroy_work); return 0; } @@ -11731,14 +11734,17 @@ static void __net_exit nf_tables_exit_net(struct net *net) if (!list_empty(&nft_net->module_list)) nf_tables_module_autoload_cleanup(net); + cancel_work_sync(&nft_net->destroy_work); __nft_release_tables(net); nft_gc_seq_end(nft_net, gc_seq); mutex_unlock(&nft_net->commit_mutex); + WARN_ON_ONCE(!list_empty(&nft_net->tables)); WARN_ON_ONCE(!list_empty(&nft_net->module_list)); WARN_ON_ONCE(!list_empty(&nft_net->notify_list)); + WARN_ON_ONCE(!list_empty(&nft_net->destroy_list)); } static void nf_tables_exit_batch(struct list_head *net_exit_list) @@ -11829,10 +11835,8 @@ static void __exit nf_tables_module_exit(void) unregister_netdevice_notifier(&nf_tables_flowtable_notifier); nft_chain_filter_fini(); nft_chain_route_fini(); - nf_tables_trans_destroy_flush_work(); unregister_pernet_subsys(&nf_tables_net_ops); cancel_work_sync(&trans_gc_work); - cancel_work_sync(&trans_destroy_work); rcu_barrier(); rhltable_destroy(&nft_objname_ht); nf_tables_core_module_exit(); diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index 7ca4f0d21fe2a..72711d62fddfa 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c @@ -228,7 +228,7 @@ static int nft_parse_compat(const struct nlattr *attr, u16 *proto, bool *inv) return 0; } -static void nft_compat_wait_for_destructors(void) +static void nft_compat_wait_for_destructors(struct net *net) { /* xtables matches or targets can have side effects, e.g. * creation/destruction of /proc files. @@ -236,7 +236,7 @@ static void nft_compat_wait_for_destructors(void) * work queue. If we have pending invocations we thus * need to wait for those to finish. */ - nf_tables_trans_destroy_flush_work(); + nf_tables_trans_destroy_flush_work(net); } static int @@ -262,7 +262,7 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr, nft_target_set_tgchk_param(&par, ctx, target, info, &e, proto, inv); - nft_compat_wait_for_destructors(); + nft_compat_wait_for_destructors(ctx->net); ret = xt_check_target(&par, size, proto, inv); if (ret < 0) { @@ -515,7 +515,7 @@ __nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr, nft_match_set_mtchk_param(&par, ctx, match, info, &e, proto, inv); - nft_compat_wait_for_destructors(); + nft_compat_wait_for_destructors(ctx->net); return xt_check_match(&par, size, proto, inv); } diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index 67a41cd2baaff..a1b373b99f7b8 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c @@ -230,6 +230,7 @@ static void nft_ct_set_zone_eval(const struct nft_expr *expr, enum ip_conntrack_info ctinfo; u16 value = nft_reg_load16(®s->data[priv->sreg]); struct nf_conn *ct; + int oldcnt; ct = nf_ct_get(skb, &ctinfo); if (ct) /* already tracked */ @@ -250,10 +251,11 @@ static void nft_ct_set_zone_eval(const struct nft_expr *expr, ct = this_cpu_read(nft_ct_pcpu_template); - if (likely(refcount_read(&ct->ct_general.use) == 1)) { - refcount_inc(&ct->ct_general.use); + __refcount_inc(&ct->ct_general.use, &oldcnt); + if (likely(oldcnt == 1)) { nf_ct_zone_add(ct, &zone); } else { + refcount_dec(&ct->ct_general.use); /* previous skb got queued to userspace, allocate temporary * one until percpu template can be reused. */ diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c index b8d03364566c1..c74012c991255 100644 --- a/net/netfilter/nft_exthdr.c +++ b/net/netfilter/nft_exthdr.c @@ -85,7 +85,6 @@ static int ipv4_find_option(struct net *net, struct sk_buff *skb, unsigned char optbuf[sizeof(struct ip_options) + 40]; struct ip_options *opt = (struct ip_options *)optbuf; struct iphdr *iph, _iph; - unsigned int start; bool found = false; __be32 info; int optlen; @@ -93,7 +92,6 @@ static int ipv4_find_option(struct net *net, struct sk_buff *skb, iph = skb_header_pointer(skb, 0, sizeof(_iph), &_iph); if (!iph) return -EBADMSG; - start = sizeof(struct iphdr); optlen = iph->ihl * 4 - (int)sizeof(struct iphdr); if (optlen <= 0) @@ -103,7 +101,7 @@ static int ipv4_find_option(struct net *net, struct sk_buff *skb, /* Copy the options since __ip_options_compile() modifies * the options. */ - if (skb_copy_bits(skb, start, opt->__data, optlen)) + if (skb_copy_bits(skb, sizeof(struct iphdr), opt->__data, optlen)) return -EBADMSG; opt->optlen = optlen; @@ -118,18 +116,18 @@ static int ipv4_find_option(struct net *net, struct sk_buff *skb, found = target == IPOPT_SSRR ? opt->is_strictroute : !opt->is_strictroute; if (found) - *offset = opt->srr + start; + *offset = opt->srr; break; case IPOPT_RR: if (!opt->rr) break; - *offset = opt->rr + start; + *offset = opt->rr; found = true; break; case IPOPT_RA: if (!opt->router_alert) break; - *offset = opt->router_alert + start; + *offset = opt->router_alert; found = true; break; default: diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c index b93f046ac7d1e..4b3452dff2ec0 100644 --- a/net/netfilter/nft_set_hash.c +++ b/net/netfilter/nft_set_hash.c @@ -309,7 +309,8 @@ static bool nft_rhash_expr_needs_gc_run(const struct nft_set *set, nft_setelem_expr_foreach(expr, elem_expr, size) { if (expr->ops->gc && - expr->ops->gc(read_pnet(&set->net), expr)) + expr->ops->gc(read_pnet(&set->net), expr) && + set->flags & NFT_SET_EVAL) return true; } diff --git a/net/netfilter/nft_set_pipapo_avx2.c b/net/netfilter/nft_set_pipapo_avx2.c index b8d3c3213efee..c15db28c5ebc4 100644 --- a/net/netfilter/nft_set_pipapo_avx2.c +++ b/net/netfilter/nft_set_pipapo_avx2.c @@ -994,8 +994,9 @@ static int nft_pipapo_avx2_lookup_8b_16(unsigned long *map, unsigned long *fill, NFT_PIPAPO_AVX2_BUCKET_LOAD8(5, lt, 8, pkt[8], bsize); NFT_PIPAPO_AVX2_AND(6, 2, 3); + NFT_PIPAPO_AVX2_AND(3, 4, 7); NFT_PIPAPO_AVX2_BUCKET_LOAD8(7, lt, 9, pkt[9], bsize); - NFT_PIPAPO_AVX2_AND(0, 4, 5); + NFT_PIPAPO_AVX2_AND(0, 3, 5); NFT_PIPAPO_AVX2_BUCKET_LOAD8(1, lt, 10, pkt[10], bsize); NFT_PIPAPO_AVX2_AND(2, 6, 7); NFT_PIPAPO_AVX2_BUCKET_LOAD8(3, lt, 11, pkt[11], bsize); diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c index 5c6ed68cc6e05..0d99786c322e8 100644 --- a/net/netfilter/nft_tunnel.c +++ b/net/netfilter/nft_tunnel.c @@ -335,13 +335,13 @@ static int nft_tunnel_obj_erspan_init(const struct nlattr *attr, static const struct nla_policy nft_tunnel_opts_geneve_policy[NFTA_TUNNEL_KEY_GENEVE_MAX + 1] = { [NFTA_TUNNEL_KEY_GENEVE_CLASS] = { .type = NLA_U16 }, [NFTA_TUNNEL_KEY_GENEVE_TYPE] = { .type = NLA_U8 }, - [NFTA_TUNNEL_KEY_GENEVE_DATA] = { .type = NLA_BINARY, .len = 128 }, + [NFTA_TUNNEL_KEY_GENEVE_DATA] = { .type = NLA_BINARY, .len = 127 }, }; static int nft_tunnel_obj_geneve_init(const struct nlattr *attr, struct nft_tunnel_opts *opts) { - struct geneve_opt *opt = (struct geneve_opt *)opts->u.data + opts->len; + struct geneve_opt *opt = (struct geneve_opt *)(opts->u.data + opts->len); struct nlattr *tb[NFTA_TUNNEL_KEY_GENEVE_MAX + 1]; int err, data_len; @@ -628,7 +628,7 @@ static int nft_tunnel_opts_dump(struct sk_buff *skb, if (!inner) goto failure; while (opts->len > offset) { - opt = (struct geneve_opt *)opts->u.data + offset; + opt = (struct geneve_opt *)(opts->u.data + offset); if (nla_put_be16(skb, NFTA_TUNNEL_KEY_GENEVE_CLASS, opt->opt_class) || nla_put_u8(skb, NFTA_TUNNEL_KEY_GENEVE_TYPE, diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 704c858cf2093..61fea7baae5d5 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -947,12 +947,6 @@ static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port, pskb_trim(skb, ovs_mac_header_len(key)); } - /* Need to set the pkt_type to involve the routing layer. The - * packet movement through the OVS datapath doesn't generally - * use routing, but this is needed for tunnel cases. - */ - skb->pkt_type = PACKET_OUTGOING; - if (likely(!mru || (skb->len <= mru + vport->dev->hard_header_len))) { ovs_vport_send(vport, skb, ovs_key_mac_proto(key)); diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 3bb4810234aac..e573e92213029 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -1368,8 +1368,11 @@ bool ovs_ct_verify(struct net *net, enum ovs_key_attr attr) attr == OVS_KEY_ATTR_CT_MARK) return true; if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) && - attr == OVS_KEY_ATTR_CT_LABELS) - return true; + attr == OVS_KEY_ATTR_CT_LABELS) { + struct ovs_net *ovs_net = net_generic(net, ovs_net_id); + + return ovs_net->xt_label; + } return false; } @@ -1378,7 +1381,6 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr, const struct sw_flow_key *key, struct sw_flow_actions **sfa, bool log) { - unsigned int n_bits = sizeof(struct ovs_key_ct_labels) * BITS_PER_BYTE; struct ovs_conntrack_info ct_info; const char *helper = NULL; u16 family; @@ -1407,12 +1409,6 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr, return -ENOMEM; } - if (nf_connlabels_get(net, n_bits - 1)) { - nf_ct_tmpl_free(ct_info.ct); - OVS_NLERR(log, "Failed to set connlabel length"); - return -EOPNOTSUPP; - } - if (ct_info.timeout[0]) { if (nf_ct_set_timeout(net, ct_info.ct, family, key->ip.proto, ct_info.timeout)) @@ -1581,7 +1577,6 @@ static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info) if (ct_info->ct) { if (ct_info->timeout[0]) nf_ct_destroy_timeout(ct_info->ct); - nf_connlabels_put(nf_ct_net(ct_info->ct)); nf_ct_tmpl_free(ct_info->ct); } } @@ -2006,9 +2001,17 @@ struct genl_family dp_ct_limit_genl_family __ro_after_init = { int ovs_ct_init(struct net *net) { -#if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT) + unsigned int n_bits = sizeof(struct ovs_key_ct_labels) * BITS_PER_BYTE; struct ovs_net *ovs_net = net_generic(net, ovs_net_id); + if (nf_connlabels_get(net, n_bits - 1)) { + ovs_net->xt_label = false; + OVS_NLERR(true, "Failed to set connlabel length"); + } else { + ovs_net->xt_label = true; + } + +#if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT) return ovs_ct_limit_init(net, ovs_net); #else return 0; @@ -2017,9 +2020,12 @@ int ovs_ct_init(struct net *net) void ovs_ct_exit(struct net *net) { -#if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT) struct ovs_net *ovs_net = net_generic(net, ovs_net_id); +#if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT) ovs_ct_limit_exit(net, ovs_net); #endif + + if (ovs_net->xt_label) + nf_connlabels_put(net); } diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h index 365b9bb7f546e..9ca6231ea6470 100644 --- a/net/openvswitch/datapath.h +++ b/net/openvswitch/datapath.h @@ -160,6 +160,9 @@ struct ovs_net { #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT) struct ovs_ct_limit_info *ct_limit_info; #endif + + /* Module reference for configuring conntrack. */ + bool xt_label; }; /** diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 729ef582a3a8b..305daf57a4f9d 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -2317,14 +2317,10 @@ int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb) OVS_FLOW_ATTR_MASK, true, skb); } -#define MAX_ACTIONS_BUFSIZE (32 * 1024) - static struct sw_flow_actions *nla_alloc_flow_actions(int size) { struct sw_flow_actions *sfa; - WARN_ON_ONCE(size > MAX_ACTIONS_BUFSIZE); - sfa = kmalloc(kmalloc_size_roundup(sizeof(*sfa) + size), GFP_KERNEL); if (!sfa) return ERR_PTR(-ENOMEM); @@ -2480,15 +2476,6 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa, new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2); - if (new_acts_size > MAX_ACTIONS_BUFSIZE) { - if ((next_offset + req_size) > MAX_ACTIONS_BUFSIZE) { - OVS_NLERR(log, "Flow action size exceeds max %u", - MAX_ACTIONS_BUFSIZE); - return ERR_PTR(-EMSGSIZE); - } - new_acts_size = MAX_ACTIONS_BUFSIZE; - } - acts = nla_alloc_flow_actions(new_acts_size); if (IS_ERR(acts)) return ERR_CAST(acts); @@ -2889,7 +2876,8 @@ static int validate_set(const struct nlattr *a, size_t key_len; /* There can be only one key in a action */ - if (nla_total_size(nla_len(ovs_key)) != nla_len(a)) + if (!nla_ok(ovs_key, nla_len(a)) || + nla_total_size(nla_len(ovs_key)) != nla_len(a)) return -EINVAL; key_len = nla_len(ovs_key); @@ -3545,7 +3533,7 @@ int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr, int err; u32 mpls_label_count = 0; - *sfa = nla_alloc_flow_actions(min(nla_len(attr), MAX_ACTIONS_BUFSIZE)); + *sfa = nla_alloc_flow_actions(nla_len(attr)); if (IS_ERR(*sfa)) return PTR_ERR(*sfa); diff --git a/net/rxrpc/rxperf.c b/net/rxrpc/rxperf.c index 085e7892d3104..b1536da2246b8 100644 --- a/net/rxrpc/rxperf.c +++ b/net/rxrpc/rxperf.c @@ -478,6 +478,18 @@ static int rxperf_deliver_request(struct rxperf_call *call) call->unmarshal++; fallthrough; case 2: + ret = rxperf_extract_data(call, true); + if (ret < 0) + return ret; + + /* Deal with the terminal magic cookie. */ + call->iov_len = 4; + call->kvec[0].iov_len = call->iov_len; + call->kvec[0].iov_base = call->tmp; + iov_iter_kvec(&call->iter, READ, call->kvec, 1, call->iov_len); + call->unmarshal++; + fallthrough; + case 3: ret = rxperf_extract_data(call, false); if (ret < 0) return ret; diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index af7c998459488..e296714803dc0 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c @@ -68,7 +68,7 @@ geneve_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1] = { [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 }, [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 }, [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY, - .len = 128 }, + .len = 127 }, }; static const struct nla_policy diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index dfa3067084948..a3bab5e27e71b 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -97,7 +97,7 @@ tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp, err = xa_alloc_cyclic(&tcf_exts_miss_cookies_xa, &n->miss_cookie_base, n, xa_limit_32b, &next, GFP_KERNEL); - if (err) + if (err < 0) goto err_xa_alloc; exts->miss_cookie_node = n; @@ -2051,6 +2051,7 @@ static int tcf_fill_node(struct net *net, struct sk_buff *skb, struct tcmsg *tcm; struct nlmsghdr *nlh; unsigned char *b = skb_tail_pointer(skb); + int ret = -EMSGSIZE; nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); if (!nlh) @@ -2095,11 +2096,45 @@ static int tcf_fill_node(struct net *net, struct sk_buff *skb, return skb->len; +cls_op_not_supp: + ret = -EOPNOTSUPP; out_nlmsg_trim: nla_put_failure: -cls_op_not_supp: nlmsg_trim(skb, b); - return -1; + return ret; +} + +static struct sk_buff *tfilter_notify_prep(struct net *net, + struct sk_buff *oskb, + struct nlmsghdr *n, + struct tcf_proto *tp, + struct tcf_block *block, + struct Qdisc *q, u32 parent, + void *fh, int event, + u32 portid, bool rtnl_held, + struct netlink_ext_ack *extack) +{ + unsigned int size = oskb ? max(NLMSG_GOODSIZE, oskb->len) : NLMSG_GOODSIZE; + struct sk_buff *skb; + int ret; + +retry: + skb = alloc_skb(size, GFP_KERNEL); + if (!skb) + return ERR_PTR(-ENOBUFS); + + ret = tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, + n->nlmsg_seq, n->nlmsg_flags, event, false, + rtnl_held, extack); + if (ret <= 0) { + kfree_skb(skb); + if (ret == -EMSGSIZE) { + size += NLMSG_GOODSIZE; + goto retry; + } + return ERR_PTR(-EINVAL); + } + return skb; } static int tfilter_notify(struct net *net, struct sk_buff *oskb, @@ -2115,16 +2150,10 @@ static int tfilter_notify(struct net *net, struct sk_buff *oskb, if (!unicast && !rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC)) return 0; - skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); - if (!skb) - return -ENOBUFS; - - if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, - n->nlmsg_seq, n->nlmsg_flags, event, - false, rtnl_held, extack) <= 0) { - kfree_skb(skb); - return -EINVAL; - } + skb = tfilter_notify_prep(net, oskb, n, tp, block, q, parent, fh, event, + portid, rtnl_held, extack); + if (IS_ERR(skb)) + return PTR_ERR(skb); if (unicast) err = rtnl_unicast(skb, net, portid); @@ -2147,16 +2176,11 @@ static int tfilter_del_notify(struct net *net, struct sk_buff *oskb, if (!rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC)) return tp->ops->delete(tp, fh, last, rtnl_held, extack); - skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); - if (!skb) - return -ENOBUFS; - - if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, - n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER, - false, rtnl_held, extack) <= 0) { + skb = tfilter_notify_prep(net, oskb, n, tp, block, q, parent, fh, + RTM_DELTFILTER, portid, rtnl_held, extack); + if (IS_ERR(skb)) { NL_SET_ERR_MSG(extack, "Failed to build del event notification"); - kfree_skb(skb); - return -EINVAL; + return PTR_ERR(skb); } err = tp->ops->delete(tp, fh, last, rtnl_held, extack); diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 03505673d5234..099ff6a3e1f51 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -766,7 +766,7 @@ geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = { [TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY, - .len = 128 }, + .len = 127 }, }; static const struct nla_policy diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index d26ac6bd9b108..518f52f65a49d 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -2254,6 +2254,12 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, return -EOPNOTSUPP; } + /* Prevent creation of traffic classes with classid TC_H_ROOT */ + if (clid == TC_H_ROOT) { + NL_SET_ERR_MSG(extack, "Cannot create traffic class with classid TC_H_ROOT"); + return -EINVAL; + } + new_cl = cl; err = -EOPNOTSUPP; if (cops->change) diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c index 3e8d4fe4d91e3..e1f6e7618debd 100644 --- a/net/sched/sch_codel.c +++ b/net/sched/sch_codel.c @@ -65,10 +65,7 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch) &q->stats, qdisc_pkt_len, codel_get_enqueue_time, drop_func, dequeue_func); - /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0, - * or HTB crashes. Defer it for next round. - */ - if (q->stats.drop_count && sch->q.qlen) { + if (q->stats.drop_count) { qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len); q->stats.drop_count = 0; q->stats.drop_len = 0; diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 4f908c11ba952..778f6e5966be8 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -314,10 +314,8 @@ static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch) } qdisc_bstats_update(sch, skb); flow->deficit -= qdisc_pkt_len(skb); - /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0, - * or HTB crashes. Defer it for next round. - */ - if (q->cstats.drop_count && sch->q.qlen) { + + if (q->cstats.drop_count) { qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len); q->cstats.drop_count = 0; diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index 79ba9dc702541..43b0343a7cd0c 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c @@ -913,7 +913,8 @@ static void gred_destroy(struct Qdisc *sch) for (i = 0; i < table->DPs; i++) gred_destroy_vq(table->tab[i]); - gred_offload(sch, TC_GRED_DESTROY); + if (table->opt) + gred_offload(sch, TC_GRED_DESTROY); kfree(table->opt); } diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 65d5b59da5830..58b42dcf8f201 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -631,6 +631,15 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt, struct red_parms *p = NULL; struct sk_buff *to_free = NULL; struct sk_buff *tail = NULL; + unsigned int maxflows; + unsigned int quantum; + unsigned int divisor; + int perturb_period; + u8 headdrop; + u8 maxdepth; + int limit; + u8 flags; + if (opt->nla_len < nla_attr_size(sizeof(*ctl))) return -EINVAL; @@ -652,39 +661,64 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt, if (!p) return -ENOMEM; } - if (ctl->limit == 1) { - NL_SET_ERR_MSG_MOD(extack, "invalid limit"); - return -EINVAL; - } + sch_tree_lock(sch); + + limit = q->limit; + divisor = q->divisor; + headdrop = q->headdrop; + maxdepth = q->maxdepth; + maxflows = q->maxflows; + perturb_period = q->perturb_period; + quantum = q->quantum; + flags = q->flags; + + /* update and validate configuration */ if (ctl->quantum) - q->quantum = ctl->quantum; - WRITE_ONCE(q->perturb_period, ctl->perturb_period * HZ); + quantum = ctl->quantum; + perturb_period = ctl->perturb_period * HZ; if (ctl->flows) - q->maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS); + maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS); if (ctl->divisor) { - q->divisor = ctl->divisor; - q->maxflows = min_t(u32, q->maxflows, q->divisor); + divisor = ctl->divisor; + maxflows = min_t(u32, maxflows, divisor); } if (ctl_v1) { if (ctl_v1->depth) - q->maxdepth = min_t(u32, ctl_v1->depth, SFQ_MAX_DEPTH); + maxdepth = min_t(u32, ctl_v1->depth, SFQ_MAX_DEPTH); if (p) { - swap(q->red_parms, p); - red_set_parms(q->red_parms, + red_set_parms(p, ctl_v1->qth_min, ctl_v1->qth_max, ctl_v1->Wlog, ctl_v1->Plog, ctl_v1->Scell_log, NULL, ctl_v1->max_P); } - q->flags = ctl_v1->flags; - q->headdrop = ctl_v1->headdrop; + flags = ctl_v1->flags; + headdrop = ctl_v1->headdrop; } if (ctl->limit) { - q->limit = min_t(u32, ctl->limit, q->maxdepth * q->maxflows); - q->maxflows = min_t(u32, q->maxflows, q->limit); + limit = min_t(u32, ctl->limit, maxdepth * maxflows); + maxflows = min_t(u32, maxflows, limit); } + if (limit == 1) { + sch_tree_unlock(sch); + kfree(p); + NL_SET_ERR_MSG_MOD(extack, "invalid limit"); + return -EINVAL; + } + + /* commit configuration */ + q->limit = limit; + q->divisor = divisor; + q->headdrop = headdrop; + q->maxdepth = maxdepth; + q->maxflows = maxflows; + WRITE_ONCE(q->perturb_period, perturb_period); + q->quantum = quantum; + q->flags = flags; + if (p) + swap(q->red_parms, p); qlen = sch->q.qlen; while (sch->q.qlen > q->limit) { diff --git a/net/sched/sch_skbprio.c b/net/sched/sch_skbprio.c index 20ff7386b74bd..f485f62ab721a 100644 --- a/net/sched/sch_skbprio.c +++ b/net/sched/sch_skbprio.c @@ -123,8 +123,6 @@ static int skbprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, /* Check to update highest and lowest priorities. */ if (skb_queue_empty(lp_qdisc)) { if (q->lowest_prio == q->highest_prio) { - /* The incoming packet is the only packet in queue. */ - BUG_ON(sch->q.qlen != 1); q->lowest_prio = prio; q->highest_prio = prio; } else { @@ -156,7 +154,6 @@ static struct sk_buff *skbprio_dequeue(struct Qdisc *sch) /* Update highest priority field. */ if (skb_queue_empty(hpq)) { if (q->lowest_prio == q->highest_prio) { - BUG_ON(sch->q.qlen); q->highest_prio = 0; q->lowest_prio = SKBPRIO_MAX_PRIORITY - 1; } else { diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 36ee34f483d70..53725ee7ba06d 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -72,8 +72,9 @@ /* Forward declarations for internal helper functions. */ static bool sctp_writeable(const struct sock *sk); static void sctp_wfree(struct sk_buff *skb); -static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, - size_t msg_len); +static int sctp_wait_for_sndbuf(struct sctp_association *asoc, + struct sctp_transport *transport, + long *timeo_p, size_t msg_len); static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p); static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p); static int sctp_wait_for_accept(struct sock *sk, long timeo); @@ -1828,7 +1829,7 @@ static int sctp_sendmsg_to_asoc(struct sctp_association *asoc, if (sctp_wspace(asoc) <= 0 || !sk_wmem_schedule(sk, msg_len)) { timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); - err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len); + err = sctp_wait_for_sndbuf(asoc, transport, &timeo, msg_len); if (err) goto err; if (unlikely(sinfo->sinfo_stream >= asoc->stream.outcnt)) { @@ -9214,8 +9215,9 @@ void sctp_sock_rfree(struct sk_buff *skb) /* Helper function to wait for space in the sndbuf. */ -static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, - size_t msg_len) +static int sctp_wait_for_sndbuf(struct sctp_association *asoc, + struct sctp_transport *transport, + long *timeo_p, size_t msg_len) { struct sock *sk = asoc->base.sk; long current_timeo = *timeo_p; @@ -9225,7 +9227,9 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc, *timeo_p, msg_len); - /* Increment the association's refcnt. */ + /* Increment the transport and association's refcnt. */ + if (transport) + sctp_transport_hold(transport); sctp_association_hold(asoc); /* Wait on the association specific sndbuf space. */ @@ -9234,7 +9238,7 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, TASK_INTERRUPTIBLE); if (asoc->base.dead) goto do_dead; - if (!*timeo_p) + if ((!*timeo_p) || (transport && transport->dead)) goto do_nonblock; if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING) goto do_error; @@ -9259,7 +9263,9 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, out: finish_wait(&asoc->wait, &wait); - /* Release the association's refcnt. */ + /* Release the transport and association's refcnt. */ + if (transport) + sctp_transport_put(transport); sctp_association_put(asoc); return err; diff --git a/net/sctp/stream.c b/net/sctp/stream.c index c241cc552e8d5..bfcff6d6a4386 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c @@ -735,7 +735,7 @@ struct sctp_chunk *sctp_process_strreset_tsnreq( * value SHOULD be the smallest TSN not acknowledged by the * receiver of the request plus 2^31. */ - init_tsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + (1 << 31); + init_tsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + (1U << 31); sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL, init_tsn, GFP_ATOMIC); diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c index 8e1e97be4df79..ee3eac338a9de 100644 --- a/net/sctp/sysctl.c +++ b/net/sctp/sysctl.c @@ -525,6 +525,8 @@ static int proc_sctp_do_auth(const struct ctl_table *ctl, int write, return ret; } +static DEFINE_MUTEX(sctp_sysctl_mutex); + static int proc_sctp_do_udp_port(const struct ctl_table *ctl, int write, void *buffer, size_t *lenp, loff_t *ppos) { @@ -549,6 +551,7 @@ static int proc_sctp_do_udp_port(const struct ctl_table *ctl, int write, if (new_value > max || new_value < min) return -EINVAL; + mutex_lock(&sctp_sysctl_mutex); net->sctp.udp_port = new_value; sctp_udp_sock_stop(net); if (new_value) { @@ -561,6 +564,7 @@ static int proc_sctp_do_udp_port(const struct ctl_table *ctl, int write, lock_sock(sk); sctp_sk(sk)->udp_port = htons(net->sctp.udp_port); release_sock(sk); + mutex_unlock(&sctp_sysctl_mutex); } return ret; diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 2abe45af98e7c..31eca29b6cfbf 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -117,6 +117,8 @@ struct sctp_transport *sctp_transport_new(struct net *net, */ void sctp_transport_free(struct sctp_transport *transport) { + transport->dead = 1; + /* Try to delete the heartbeat timer. */ if (del_timer(&transport->hb_timer)) sctp_transport_put(transport); diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index ebc41a7b13dbe..78b0e6dba0a2b 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -362,6 +362,9 @@ static void smc_destruct(struct sock *sk) return; } +static struct lock_class_key smc_key; +static struct lock_class_key smc_slock_key; + void smc_sk_init(struct net *net, struct sock *sk, int protocol) { struct smc_sock *smc = smc_sk(sk); @@ -375,6 +378,8 @@ void smc_sk_init(struct net *net, struct sock *sk, int protocol) INIT_WORK(&smc->connect_work, smc_connect_work); INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work); INIT_LIST_HEAD(&smc->accept_q); + sock_lock_init_class_and_name(sk, "slock-AF_SMC", &smc_slock_key, + "sk_lock-AF_SMC", &smc_key); spin_lock_init(&smc->accept_q_lock); spin_lock_init(&smc->conn.send_lock); sk->sk_prot->hash(sk); diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c index 8299ceb3e3739..95696f42647ec 100644 --- a/net/strparser/strparser.c +++ b/net/strparser/strparser.c @@ -347,7 +347,10 @@ static int strp_read_sock(struct strparser *strp) struct socket *sock = strp->sk->sk_socket; read_descriptor_t desc; - if (unlikely(!sock || !sock->ops || !sock->ops->read_sock)) + if (unlikely(!sock || !sock->ops)) + return -EBUSY; + + if (unlikely(!strp->cb.read_sock && !sock->ops->read_sock)) return -EBUSY; desc.arg.data = strp; @@ -355,7 +358,10 @@ static int strp_read_sock(struct strparser *strp) desc.count = 1; /* give more than one skb per call */ /* sk should be locked here, so okay to do read_sock */ - sock->ops->read_sock(strp->sk, &desc, strp_recv); + if (strp->cb.read_sock) + strp->cb.read_sock(strp, &desc, strp_recv); + else + sock->ops->read_sock(strp->sk, &desc, strp_recv); desc.error = strp->cb.read_sock_done(strp, desc.error); @@ -468,6 +474,7 @@ int strp_init(struct strparser *strp, struct sock *sk, strp->cb.unlock = cb->unlock ? : strp_sock_unlock; strp->cb.rcv_msg = cb->rcv_msg; strp->cb.parse_msg = cb->parse_msg; + strp->cb.read_sock = cb->read_sock; strp->cb.read_sock_done = cb->read_sock_done ? : default_read_sock_done; strp->cb.abort_parser = cb->abort_parser ? : strp_abort_strp; diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 059f6ef1ad189..7fcb0574fc79e 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c @@ -1669,12 +1669,14 @@ static void remove_cache_proc_entries(struct cache_detail *cd) } } -#ifdef CONFIG_PROC_FS static int create_cache_proc_entries(struct cache_detail *cd, struct net *net) { struct proc_dir_entry *p; struct sunrpc_net *sn; + if (!IS_ENABLED(CONFIG_PROC_FS)) + return 0; + sn = net_generic(net, sunrpc_net_id); cd->procfs = proc_mkdir(cd->name, sn->proc_net_rpc); if (cd->procfs == NULL) @@ -1702,12 +1704,6 @@ static int create_cache_proc_entries(struct cache_detail *cd, struct net *net) remove_cache_proc_entries(cd); return -ENOMEM; } -#else /* CONFIG_PROC_FS */ -static int create_cache_proc_entries(struct cache_detail *cd, struct net *net) -{ - return 0; -} -#endif void __init cache_initialize(void) { diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index cef623ea15060..9b45fbdc90cab 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -864,8 +864,6 @@ void rpc_signal_task(struct rpc_task *task) if (!rpc_task_set_rpc_status(task, -ERESTARTSYS)) return; trace_rpc_task_signalled(task, task->tk_action); - set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate); - smp_mb__after_atomic(); queue = READ_ONCE(task->tk_waitqueue); if (queue) rpc_wake_up_queued_task(queue, task); diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index c3fbf0779d4ab..aca8bdf65d729 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -621,7 +621,8 @@ static void __svc_rdma_free(struct work_struct *work) /* Destroy the CM ID */ rdma_destroy_id(rdma->sc_cm_id); - rpcrdma_rn_unregister(device, &rdma->sc_rn); + if (!test_bit(XPT_LISTENER, &rdma->sc_xprt.xpt_flags)) + rpcrdma_rn_unregister(device, &rdma->sc_rn); kfree(rdma); } diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index b69e6290acfab..171ad4e2523f1 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -2580,7 +2580,15 @@ static void xs_tls_handshake_done(void *data, int status, key_serial_t peerid) struct sock_xprt *lower_transport = container_of(lower_xprt, struct sock_xprt, xprt); - lower_transport->xprt_err = status ? -EACCES : 0; + switch (status) { + case 0: + case -EACCES: + case -ETIMEDOUT: + lower_transport->xprt_err = status; + break; + default: + lower_transport->xprt_err = -EACCES; + } complete(&lower_transport->handshake_done); xprt_put(lower_xprt); } diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c index 6488ead9e4645..4d5fbacef496f 100644 --- a/net/switchdev/switchdev.c +++ b/net/switchdev/switchdev.c @@ -472,7 +472,7 @@ bool switchdev_port_obj_act_is_deferred(struct net_device *dev, EXPORT_SYMBOL_GPL(switchdev_port_obj_act_is_deferred); static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain); -static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain); +static RAW_NOTIFIER_HEAD(switchdev_blocking_notif_chain); /** * register_switchdev_notifier - Register notifier @@ -518,17 +518,27 @@ EXPORT_SYMBOL_GPL(call_switchdev_notifiers); int register_switchdev_blocking_notifier(struct notifier_block *nb) { - struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain; + struct raw_notifier_head *chain = &switchdev_blocking_notif_chain; + int err; + + rtnl_lock(); + err = raw_notifier_chain_register(chain, nb); + rtnl_unlock(); - return blocking_notifier_chain_register(chain, nb); + return err; } EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier); int unregister_switchdev_blocking_notifier(struct notifier_block *nb) { - struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain; + struct raw_notifier_head *chain = &switchdev_blocking_notif_chain; + int err; - return blocking_notifier_chain_unregister(chain, nb); + rtnl_lock(); + err = raw_notifier_chain_unregister(chain, nb); + rtnl_unlock(); + + return err; } EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier); @@ -536,10 +546,11 @@ int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev, struct switchdev_notifier_info *info, struct netlink_ext_ack *extack) { + ASSERT_RTNL(); info->dev = dev; info->extack = extack; - return blocking_notifier_call_chain(&switchdev_blocking_notif_chain, - val, info); + return raw_notifier_call_chain(&switchdev_blocking_notif_chain, + val, info); } EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers); diff --git a/net/tipc/link.c b/net/tipc/link.c index 5c2088a469cea..5689e1f485479 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -1046,6 +1046,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) { if (imp == TIPC_SYSTEM_IMPORTANCE) { pr_warn("%s<%s>, link overflow", link_rst_msg, l->name); + __skb_queue_purge(list); return -ENOBUFS; } rc = link_schedule_user(l, hdr); diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 6b4b9f2749a6f..0acf313deb01f 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -809,6 +809,11 @@ static int tls_setsockopt(struct sock *sk, int level, int optname, return do_tls_setsockopt(sk, optname, optval, optlen); } +static int tls_disconnect(struct sock *sk, int flags) +{ + return -EOPNOTSUPP; +} + struct tls_context *tls_ctx_create(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); @@ -904,6 +909,7 @@ static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG], prot[TLS_BASE][TLS_BASE] = *base; prot[TLS_BASE][TLS_BASE].setsockopt = tls_setsockopt; prot[TLS_BASE][TLS_BASE].getsockopt = tls_getsockopt; + prot[TLS_BASE][TLS_BASE].disconnect = tls_disconnect; prot[TLS_BASE][TLS_BASE].close = tls_sk_proto_close; prot[TLS_SW][TLS_BASE] = prot[TLS_BASE][TLS_BASE]; diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 37299a7ca1876..d08f205b33dcc 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -1189,6 +1189,9 @@ static int vsock_read_skb(struct sock *sk, skb_read_actor_t read_actor) { struct vsock_sock *vsk = vsock_sk(sk); + if (WARN_ON_ONCE(!vsk->transport)) + return -ENODEV; + return vsk->transport->read_skb(vsk, read_actor); } @@ -1548,7 +1551,11 @@ static int vsock_connect(struct socket *sock, struct sockaddr *addr, timeout = vsk->connect_timeout; prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); - while (sk->sk_state != TCP_ESTABLISHED && sk->sk_err == 0) { + /* If the socket is already closing or it is in an error state, there + * is no point in waiting. + */ + while (sk->sk_state != TCP_ESTABLISHED && + sk->sk_state != TCP_CLOSING && sk->sk_err == 0) { if (flags & O_NONBLOCK) { /* If we're not going to block, we schedule a timeout * function to generate a timeout on the connection diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index b58c3818f284f..f0e48e6911fc4 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c @@ -670,6 +670,13 @@ static int virtio_vsock_vqs_init(struct virtio_vsock *vsock) }; int ret; + mutex_lock(&vsock->rx_lock); + vsock->rx_buf_nr = 0; + vsock->rx_buf_max_nr = 0; + mutex_unlock(&vsock->rx_lock); + + atomic_set(&vsock->queued_replies, 0); + ret = virtio_find_vqs(vdev, VSOCK_VQ_MAX, vsock->vqs, vqs_info, NULL); if (ret < 0) return ret; @@ -779,9 +786,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev) vsock->vdev = vdev; - vsock->rx_buf_nr = 0; - vsock->rx_buf_max_nr = 0; - atomic_set(&vsock->queued_replies, 0); mutex_init(&vsock->tx_lock); mutex_init(&vsock->rx_lock); diff --git a/net/vmw_vsock/vsock_bpf.c b/net/vmw_vsock/vsock_bpf.c index f201d9eca1df2..07b96d56f3a57 100644 --- a/net/vmw_vsock/vsock_bpf.c +++ b/net/vmw_vsock/vsock_bpf.c @@ -87,7 +87,7 @@ static int vsock_bpf_recvmsg(struct sock *sk, struct msghdr *msg, lock_sock(sk); vsk = vsock_sk(sk); - if (!vsk->transport) { + if (WARN_ON_ONCE(!vsk->transport)) { copied = -ENODEV; goto out; } diff --git a/net/wireless/core.c b/net/wireless/core.c index 7d313fb66d76b..1ce8fff2a28a4 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -1198,6 +1198,13 @@ void cfg80211_dev_free(struct cfg80211_registered_device *rdev) { struct cfg80211_internal_bss *scan, *tmp; struct cfg80211_beacon_registration *reg, *treg; + unsigned long flags; + + spin_lock_irqsave(&rdev->wiphy_work_lock, flags); + WARN_ON(!list_empty(&rdev->wiphy_work_list)); + spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags); + cancel_work_sync(&rdev->wiphy_work); + rfkill_destroy(rdev->wiphy.rfkill); list_for_each_entry_safe(reg, treg, &rdev->beacon_registrations, list) { list_del(®->list); diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 1e78f575fb563..ecfceddce00fc 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -4218,6 +4218,11 @@ static int parse_monitor_flags(struct nlattr *nla, u32 *mntrflags) if (flags[flag]) *mntrflags |= (1<unaligned) pool->free_heads[i] = xskb; else - xp_init_xskb_addr(xskb, pool, i * pool->chunk_size); + xp_init_xskb_addr(xskb, pool, (u64)i * pool->chunk_size); } return pool; diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index e5722c95b8bb3..a30538a980cc7 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c @@ -610,6 +610,40 @@ int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err) } EXPORT_SYMBOL_GPL(xfrm_output_resume); +static int xfrm_dev_direct_output(struct sock *sk, struct xfrm_state *x, + struct sk_buff *skb) +{ + struct dst_entry *dst = skb_dst(skb); + struct net *net = xs_net(x); + int err; + + dst = skb_dst_pop(skb); + if (!dst) { + XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); + kfree_skb(skb); + return -EHOSTUNREACH; + } + skb_dst_set(skb, dst); + nf_reset_ct(skb); + + err = skb_dst(skb)->ops->local_out(net, sk, skb); + if (unlikely(err != 1)) { + kfree_skb(skb); + return err; + } + + /* In transport mode, network destination is + * directly reachable, while in tunnel mode, + * inner packet network may not be. In packet + * offload type, HW is responsible for hard + * header packet mangling so directly xmit skb + * to netdevice. + */ + skb->dev = x->xso.dev; + __skb_push(skb, skb->dev->hard_header_len); + return dev_queue_xmit(skb); +} + static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb) { return xfrm_output_resume(sk, skb, 1); @@ -729,6 +763,13 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb) return -EHOSTUNREACH; } + /* Exclusive direct xmit for tunnel mode, as + * some filtering or matching rules may apply + * in transport mode. + */ + if (x->props.mode == XFRM_MODE_TUNNEL) + return xfrm_dev_direct_output(sk, x, skb); + return xfrm_output_resume(sk, skb, 0); } @@ -752,7 +793,7 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb) skb->encapsulation = 1; if (skb_is_gso(skb)) { - if (skb->inner_protocol) + if (skb->inner_protocol && x->props.mode == XFRM_MODE_TUNNEL) return xfrm_output_gso(net, sk, skb); skb_shinfo(skb)->gso_type |= SKB_GSO_ESP; diff --git a/rust/Makefile b/rust/Makefile index 45779a064fa4f..1b00e16951eeb 100644 --- a/rust/Makefile +++ b/rust/Makefile @@ -3,7 +3,7 @@ # Where to place rustdoc generated documentation rustdoc_output := $(objtree)/Documentation/output/rust/rustdoc -obj-$(CONFIG_RUST) += core.o compiler_builtins.o +obj-$(CONFIG_RUST) += core.o compiler_builtins.o ffi.o always-$(CONFIG_RUST) += exports_core_generated.h # Missing prototypes are expected in the helpers since these are exported @@ -15,8 +15,8 @@ always-$(CONFIG_RUST) += libmacros.so no-clean-files += libmacros.so always-$(CONFIG_RUST) += bindings/bindings_generated.rs bindings/bindings_helpers_generated.rs -obj-$(CONFIG_RUST) += alloc.o bindings.o kernel.o -always-$(CONFIG_RUST) += exports_alloc_generated.h exports_helpers_generated.h \ +obj-$(CONFIG_RUST) += bindings.o kernel.o +always-$(CONFIG_RUST) += exports_helpers_generated.h \ exports_bindings_generated.h exports_kernel_generated.h always-$(CONFIG_RUST) += uapi/uapi_generated.rs @@ -53,15 +53,10 @@ endif core-cfgs = \ --cfg no_fp_fmt_parse -alloc-cfgs = \ - --cfg no_global_oom_handling \ - --cfg no_rc \ - --cfg no_sync - quiet_cmd_rustdoc = RUSTDOC $(if $(rustdoc_host),H, ) $< cmd_rustdoc = \ OBJTREE=$(abspath $(objtree)) \ - $(RUSTDOC) $(if $(rustdoc_host),$(rust_common_flags),$(rust_flags)) \ + $(RUSTDOC) $(filter-out $(skip_flags),$(if $(rustdoc_host),$(rust_common_flags),$(rust_flags))) \ $(rustc_target_flags) -L$(objtree)/$(obj) \ -Zunstable-options --generate-link-to-definition \ --output $(rustdoc_output) \ @@ -81,7 +76,7 @@ quiet_cmd_rustdoc = RUSTDOC $(if $(rustdoc_host),H, ) $< # command-like flags to solve the issue. Meanwhile, we use the non-custom case # and then retouch the generated files. rustdoc: rustdoc-core rustdoc-macros rustdoc-compiler_builtins \ - rustdoc-alloc rustdoc-kernel + rustdoc-kernel $(Q)cp $(srctree)/Documentation/images/logo.svg $(rustdoc_output)/static.files/ $(Q)cp $(srctree)/Documentation/images/COPYING-logo $(rustdoc_output)/static.files/ $(Q)find $(rustdoc_output) -name '*.html' -type f -print0 | xargs -0 sed -Ei \ @@ -98,6 +93,9 @@ rustdoc-macros: private rustc_target_flags = --crate-type proc-macro \ rustdoc-macros: $(src)/macros/lib.rs FORCE +$(call if_changed,rustdoc) +# Starting with Rust 1.82.0, skipping `-Wrustdoc::unescaped_backticks` should +# not be needed -- see https://github.com/rust-lang/rust/pull/128307. +rustdoc-core: private skip_flags = -Wrustdoc::unescaped_backticks rustdoc-core: private rustc_target_flags = $(core-cfgs) rustdoc-core: $(RUST_LIB_SRC)/core/src/lib.rs FORCE +$(call if_changed,rustdoc) @@ -105,20 +103,14 @@ rustdoc-core: $(RUST_LIB_SRC)/core/src/lib.rs FORCE rustdoc-compiler_builtins: $(src)/compiler_builtins.rs rustdoc-core FORCE +$(call if_changed,rustdoc) -# We need to allow `rustdoc::broken_intra_doc_links` because some -# `no_global_oom_handling` functions refer to non-`no_global_oom_handling` -# functions. Ideally `rustdoc` would have a way to distinguish broken links -# due to things that are "configured out" vs. entirely non-existing ones. -rustdoc-alloc: private rustc_target_flags = $(alloc-cfgs) \ - -Arustdoc::broken_intra_doc_links -rustdoc-alloc: $(RUST_LIB_SRC)/alloc/src/lib.rs rustdoc-core rustdoc-compiler_builtins FORCE +rustdoc-ffi: $(src)/ffi.rs rustdoc-core FORCE +$(call if_changed,rustdoc) -rustdoc-kernel: private rustc_target_flags = --extern alloc \ +rustdoc-kernel: private rustc_target_flags = --extern ffi \ --extern build_error --extern macros=$(objtree)/$(obj)/libmacros.so \ --extern bindings --extern uapi -rustdoc-kernel: $(src)/kernel/lib.rs rustdoc-core rustdoc-macros \ - rustdoc-compiler_builtins rustdoc-alloc $(obj)/libmacros.so \ +rustdoc-kernel: $(src)/kernel/lib.rs rustdoc-core rustdoc-ffi rustdoc-macros \ + rustdoc-compiler_builtins $(obj)/libmacros.so \ $(obj)/bindings.o FORCE +$(call if_changed,rustdoc) @@ -135,15 +127,28 @@ quiet_cmd_rustc_test_library = RUSTC TL $< rusttestlib-build_error: $(src)/build_error.rs FORCE +$(call if_changed,rustc_test_library) +rusttestlib-ffi: $(src)/ffi.rs FORCE + +$(call if_changed,rustc_test_library) + rusttestlib-macros: private rustc_target_flags = --extern proc_macro rusttestlib-macros: private rustc_test_library_proc = yes rusttestlib-macros: $(src)/macros/lib.rs FORCE +$(call if_changed,rustc_test_library) -rusttestlib-bindings: $(src)/bindings/lib.rs FORCE +rusttestlib-kernel: private rustc_target_flags = --extern ffi \ + --extern build_error --extern macros \ + --extern bindings --extern uapi +rusttestlib-kernel: $(src)/kernel/lib.rs \ + rusttestlib-bindings rusttestlib-uapi rusttestlib-build_error \ + $(obj)/libmacros.so $(obj)/bindings.o FORCE + +$(call if_changed,rustc_test_library) + +rusttestlib-bindings: private rustc_target_flags = --extern ffi +rusttestlib-bindings: $(src)/bindings/lib.rs rusttestlib-ffi FORCE +$(call if_changed,rustc_test_library) -rusttestlib-uapi: $(src)/uapi/lib.rs FORCE +rusttestlib-uapi: private rustc_target_flags = --extern ffi +rusttestlib-uapi: $(src)/uapi/lib.rs rusttestlib-ffi FORCE +$(call if_changed,rustc_test_library) quiet_cmd_rustdoc_test = RUSTDOC T $< @@ -162,7 +167,7 @@ quiet_cmd_rustdoc_test_kernel = RUSTDOC TK $< mkdir -p $(objtree)/$(obj)/test/doctests/kernel; \ OBJTREE=$(abspath $(objtree)) \ $(RUSTDOC) --test $(rust_flags) \ - -L$(objtree)/$(obj) --extern alloc --extern kernel \ + -L$(objtree)/$(obj) --extern ffi --extern kernel \ --extern build_error --extern macros \ --extern bindings --extern uapi \ --no-run --crate-name kernel -Zunstable-options \ @@ -192,19 +197,20 @@ quiet_cmd_rustc_test = RUSTC T $< rusttest: rusttest-macros rusttest-kernel -rusttest-macros: private rustc_target_flags = --extern proc_macro +rusttest-macros: private rustc_target_flags = --extern proc_macro \ + --extern macros --extern kernel rusttest-macros: private rustdoc_test_target_flags = --crate-type proc-macro -rusttest-macros: $(src)/macros/lib.rs FORCE +rusttest-macros: $(src)/macros/lib.rs \ + rusttestlib-macros rusttestlib-kernel FORCE +$(call if_changed,rustc_test) +$(call if_changed,rustdoc_test) -rusttest-kernel: private rustc_target_flags = --extern alloc \ +rusttest-kernel: private rustc_target_flags = --extern ffi \ --extern build_error --extern macros --extern bindings --extern uapi -rusttest-kernel: $(src)/kernel/lib.rs \ +rusttest-kernel: $(src)/kernel/lib.rs rusttestlib-ffi rusttestlib-kernel \ rusttestlib-build_error rusttestlib-macros rusttestlib-bindings \ rusttestlib-uapi FORCE +$(call if_changed,rustc_test) - +$(call if_changed,rustc_test_library) ifdef CONFIG_CC_IS_CLANG bindgen_c_flags = $(c_flags) @@ -221,7 +227,8 @@ bindgen_skip_c_flags := -mno-fp-ret-in-387 -mpreferred-stack-boundary=% \ -mfunction-return=thunk-extern -mrecord-mcount -mabi=lp64 \ -mindirect-branch-cs-prefix -mstack-protector-guard% -mtraceback=no \ -mno-pointers-to-nested-functions -mno-string \ - -mno-strict-align -mstrict-align \ + -mno-strict-align -mstrict-align -mdirect-extern-access \ + -mexplicit-relocs -mno-check-zero-division \ -fconserve-stack -falign-jumps=% -falign-loops=% \ -femit-struct-debug-baseonly -fno-ipa-cp-clone -fno-ipa-sra \ -fno-partial-inlining -fplugin-arg-arm_ssp_per_task_plugin-% \ @@ -235,6 +242,7 @@ bindgen_skip_c_flags := -mno-fp-ret-in-387 -mpreferred-stack-boundary=% \ # Derived from `scripts/Makefile.clang`. BINDGEN_TARGET_x86 := x86_64-linux-gnu BINDGEN_TARGET_arm64 := aarch64-linux-gnu +BINDGEN_TARGET_loongarch := loongarch64-linux-gnusf BINDGEN_TARGET := $(BINDGEN_TARGET_$(SRCARCH)) # All warnings are inhibited since GCC builds are very experimental, @@ -266,7 +274,11 @@ else bindgen_c_flags_lto = $(bindgen_c_flags) endif -bindgen_c_flags_final = $(bindgen_c_flags_lto) -D__BINDGEN__ +# `-fno-builtin` is passed to avoid `bindgen` from using `clang` builtin +# prototypes for functions like `memcpy` -- if this flag is not passed, +# `bindgen`-generated prototypes use `c_ulong` or `c_uint` depending on +# architecture instead of generating `usize`. +bindgen_c_flags_final = $(bindgen_c_flags_lto) -fno-builtin -D__BINDGEN__ # Each `bindgen` release may upgrade the list of Rust target versions. By # default, the highest stable release in their list is used. Thus we need to set @@ -284,7 +296,7 @@ bindgen_c_flags_final = $(bindgen_c_flags_lto) -D__BINDGEN__ quiet_cmd_bindgen = BINDGEN $@ cmd_bindgen = \ $(BINDGEN) $< $(bindgen_target_flags) --rust-target 1.68 \ - --use-core --with-derive-default --ctypes-prefix core::ffi --no-layout-tests \ + --use-core --with-derive-default --ctypes-prefix ffi --no-layout-tests \ --no-debug '.*' --enable-function-attribute-detection \ -o $@ -- $(bindgen_c_flags_final) -DMODULE \ $(bindgen_target_cflags) $(bindgen_target_extra) @@ -325,9 +337,6 @@ quiet_cmd_exports = EXPORTS $@ $(obj)/exports_core_generated.h: $(obj)/core.o FORCE $(call if_changed,exports) -$(obj)/exports_alloc_generated.h: $(obj)/alloc.o FORCE - $(call if_changed,exports) - # Even though Rust kernel modules should never use the bindings directly, # symbols from the `bindings` crate and the C helpers need to be exported # because Rust generics and inlined functions may not get their code generated @@ -374,7 +383,7 @@ quiet_cmd_rustc_library = $(if $(skip_clippy),RUSTC,$(RUSTC_OR_CLIPPY_QUIET)) L rust-analyzer: $(Q)$(srctree)/scripts/generate_rust_analyzer.py \ - --cfgs='core=$(core-cfgs)' --cfgs='alloc=$(alloc-cfgs)' \ + --cfgs='core=$(core-cfgs)' \ $(realpath $(srctree)) $(realpath $(objtree)) \ $(rustc_sysroot) $(RUST_LIB_SRC) $(KBUILD_EXTMOD) > \ $(if $(KBUILD_EXTMOD),$(extmod_prefix),$(objtree))/rust-project.json @@ -412,29 +421,28 @@ $(obj)/compiler_builtins.o: private rustc_objcopy = -w -W '__*' $(obj)/compiler_builtins.o: $(src)/compiler_builtins.rs $(obj)/core.o FORCE +$(call if_changed_rule,rustc_library) -$(obj)/alloc.o: private skip_clippy = 1 -$(obj)/alloc.o: private skip_flags = -Wunreachable_pub -$(obj)/alloc.o: private rustc_target_flags = $(alloc-cfgs) -$(obj)/alloc.o: $(RUST_LIB_SRC)/alloc/src/lib.rs $(obj)/compiler_builtins.o FORCE +$(obj)/build_error.o: $(src)/build_error.rs $(obj)/compiler_builtins.o FORCE +$(call if_changed_rule,rustc_library) -$(obj)/build_error.o: $(src)/build_error.rs $(obj)/compiler_builtins.o FORCE +$(obj)/ffi.o: $(src)/ffi.rs $(obj)/compiler_builtins.o FORCE +$(call if_changed_rule,rustc_library) +$(obj)/bindings.o: private rustc_target_flags = --extern ffi $(obj)/bindings.o: $(src)/bindings/lib.rs \ - $(obj)/compiler_builtins.o \ + $(obj)/ffi.o \ $(obj)/bindings/bindings_generated.rs \ $(obj)/bindings/bindings_helpers_generated.rs FORCE +$(call if_changed_rule,rustc_library) +$(obj)/uapi.o: private rustc_target_flags = --extern ffi $(obj)/uapi.o: $(src)/uapi/lib.rs \ - $(obj)/compiler_builtins.o \ + $(obj)/ffi.o \ $(obj)/uapi/uapi_generated.rs FORCE +$(call if_changed_rule,rustc_library) -$(obj)/kernel.o: private rustc_target_flags = --extern alloc \ +$(obj)/kernel.o: private rustc_target_flags = --extern ffi \ --extern build_error --extern macros --extern bindings --extern uapi -$(obj)/kernel.o: $(src)/kernel/lib.rs $(obj)/alloc.o $(obj)/build_error.o \ +$(obj)/kernel.o: $(src)/kernel/lib.rs $(obj)/build_error.o \ $(obj)/libmacros.so $(obj)/bindings.o $(obj)/uapi.o FORCE +$(call if_changed_rule,rustc_library) diff --git a/rust/bindgen_parameters b/rust/bindgen_parameters index b7c7483123b7a..0f96af8b9a7fe 100644 --- a/rust/bindgen_parameters +++ b/rust/bindgen_parameters @@ -1,5 +1,10 @@ # SPDX-License-Identifier: GPL-2.0 +# We want to map these types to `isize`/`usize` manually, instead of +# define them as `int`/`long` depending on platform bitwidth. +--blocklist-type __kernel_s?size_t +--blocklist-type __kernel_ptrdiff_t + --opaque-type xregs_state --opaque-type desc_struct --opaque-type arch_lbr_state diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h index ae82e9c941afa..a80783fcbe042 100644 --- a/rust/bindings/bindings_helper.h +++ b/rust/bindings/bindings_helper.h @@ -31,4 +31,5 @@ const gfp_t RUST_CONST_HELPER_GFP_KERNEL_ACCOUNT = GFP_KERNEL_ACCOUNT; const gfp_t RUST_CONST_HELPER_GFP_NOWAIT = GFP_NOWAIT; const gfp_t RUST_CONST_HELPER___GFP_ZERO = __GFP_ZERO; const gfp_t RUST_CONST_HELPER___GFP_HIGHMEM = ___GFP_HIGHMEM; +const gfp_t RUST_CONST_HELPER___GFP_NOWARN = ___GFP_NOWARN; const blk_features_t RUST_CONST_HELPER_BLK_FEAT_ROTATIONAL = BLK_FEAT_ROTATIONAL; diff --git a/rust/bindings/lib.rs b/rust/bindings/lib.rs index 93a1a3fc97bc9..014af0d1fc70c 100644 --- a/rust/bindings/lib.rs +++ b/rust/bindings/lib.rs @@ -25,7 +25,13 @@ )] #[allow(dead_code)] +#[allow(clippy::undocumented_unsafe_blocks)] mod bindings_raw { + // Manual definition for blocklisted types. + type __kernel_size_t = usize; + type __kernel_ssize_t = isize; + type __kernel_ptrdiff_t = isize; + // Use glob import here to expose all helpers. // Symbols defined within the module will take precedence to the glob import. pub use super::bindings_helper::*; diff --git a/rust/exports.c b/rust/exports.c index e5695f3b45b7a..82a037381798d 100644 --- a/rust/exports.c +++ b/rust/exports.c @@ -16,7 +16,6 @@ #define EXPORT_SYMBOL_RUST_GPL(sym) extern int sym; EXPORT_SYMBOL_GPL(sym) #include "exports_core_generated.h" -#include "exports_alloc_generated.h" #include "exports_helpers_generated.h" #include "exports_bindings_generated.h" #include "exports_kernel_generated.h" diff --git a/rust/ffi.rs b/rust/ffi.rs new file mode 100644 index 0000000000000..584f75b49862b --- /dev/null +++ b/rust/ffi.rs @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! Foreign function interface (FFI) types. +//! +//! This crate provides mapping from C primitive types to Rust ones. +//! +//! The Rust [`core`] crate provides [`core::ffi`], which maps integer types to the platform default +//! C ABI. The kernel does not use [`core::ffi`], so it can customise the mapping that deviates from +//! the platform default. + +#![no_std] + +macro_rules! alias { + ($($name:ident = $ty:ty;)*) => {$( + #[allow(non_camel_case_types, missing_docs)] + pub type $name = $ty; + + // Check size compatibility with `core`. + const _: () = assert!( + core::mem::size_of::<$name>() == core::mem::size_of::() + ); + )*} +} + +alias! { + // `core::ffi::c_char` is either `i8` or `u8` depending on architecture. In the kernel, we use + // `-funsigned-char` so it's always mapped to `u8`. + c_char = u8; + + c_schar = i8; + c_uchar = u8; + + c_short = i16; + c_ushort = u16; + + c_int = i32; + c_uint = u32; + + // In the kernel, `intptr_t` is defined to be `long` in all platforms, so we can map the type to + // `isize`. + c_long = isize; + c_ulong = usize; + + c_longlong = i64; + c_ulonglong = u64; +} + +pub use core::ffi::c_void; diff --git a/rust/helpers/helpers.c b/rust/helpers/helpers.c index 30f40149f3a96..20a0c69d5cc7b 100644 --- a/rust/helpers/helpers.c +++ b/rust/helpers/helpers.c @@ -22,5 +22,6 @@ #include "spinlock.c" #include "task.c" #include "uaccess.c" +#include "vmalloc.c" #include "wait.c" #include "workqueue.c" diff --git a/rust/helpers/slab.c b/rust/helpers/slab.c index f043e087f9d66..a842bfbddcba9 100644 --- a/rust/helpers/slab.c +++ b/rust/helpers/slab.c @@ -7,3 +7,9 @@ rust_helper_krealloc(const void *objp, size_t new_size, gfp_t flags) { return krealloc(objp, new_size, flags); } + +void * __must_check __realloc_size(2) +rust_helper_kvrealloc(const void *p, size_t size, gfp_t flags) +{ + return kvrealloc(p, size, flags); +} diff --git a/rust/helpers/vmalloc.c b/rust/helpers/vmalloc.c new file mode 100644 index 0000000000000..80d34501bbc01 --- /dev/null +++ b/rust/helpers/vmalloc.c @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include + +void * __must_check __realloc_size(2) +rust_helper_vrealloc(const void *p, size_t size, gfp_t flags) +{ + return vrealloc(p, size, flags); +} diff --git a/rust/kernel/alloc.rs b/rust/kernel/alloc.rs index 1966bd4070174..f2f7f3a53d298 100644 --- a/rust/kernel/alloc.rs +++ b/rust/kernel/alloc.rs @@ -1,23 +1,41 @@ // SPDX-License-Identifier: GPL-2.0 -//! Extensions to the [`alloc`] crate. +//! Implementation of the kernel's memory allocation infrastructure. -#[cfg(not(test))] -#[cfg(not(testlib))] -mod allocator; -pub mod box_ext; -pub mod vec_ext; +#[cfg(not(any(test, testlib)))] +pub mod allocator; +pub mod kbox; +pub mod kvec; +pub mod layout; + +#[cfg(any(test, testlib))] +pub mod allocator_test; + +#[cfg(any(test, testlib))] +pub use self::allocator_test as allocator; + +pub use self::kbox::Box; +pub use self::kbox::KBox; +pub use self::kbox::KVBox; +pub use self::kbox::VBox; + +pub use self::kvec::IntoIter; +pub use self::kvec::KVVec; +pub use self::kvec::KVec; +pub use self::kvec::VVec; +pub use self::kvec::Vec; /// Indicates an allocation error. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub struct AllocError; +use core::{alloc::Layout, ptr::NonNull}; /// Flags to be used when allocating memory. /// /// They can be combined with the operators `|`, `&`, and `!`. /// /// Values can be used from the [`flags`] module. -#[derive(Clone, Copy)] +#[derive(Clone, Copy, PartialEq)] pub struct Flags(u32); impl Flags { @@ -25,6 +43,11 @@ impl Flags { pub(crate) fn as_raw(self) -> u32 { self.0 } + + /// Check whether `flags` is contained in `self`. + pub fn contains(self, flags: Flags) -> bool { + (self & flags) == flags + } } impl core::ops::BitOr for Flags { @@ -85,4 +108,117 @@ pub mod flags { /// use any filesystem callback. It is very likely to fail to allocate memory, even for very /// small allocations. pub const GFP_NOWAIT: Flags = Flags(bindings::GFP_NOWAIT); + + /// Suppresses allocation failure reports. + /// + /// This is normally or'd with other flags. + pub const __GFP_NOWARN: Flags = Flags(bindings::__GFP_NOWARN); +} + +/// The kernel's [`Allocator`] trait. +/// +/// An implementation of [`Allocator`] can allocate, re-allocate and free memory buffers described +/// via [`Layout`]. +/// +/// [`Allocator`] is designed to be implemented as a ZST; [`Allocator`] functions do not operate on +/// an object instance. +/// +/// In order to be able to support `#[derive(SmartPointer)]` later on, we need to avoid a design +/// that requires an `Allocator` to be instantiated, hence its functions must not contain any kind +/// of `self` parameter. +/// +/// # Safety +/// +/// - A memory allocation returned from an allocator must remain valid until it is explicitly freed. +/// +/// - Any pointer to a valid memory allocation must be valid to be passed to any other [`Allocator`] +/// function of the same type. +/// +/// - Implementers must ensure that all trait functions abide by the guarantees documented in the +/// `# Guarantees` sections. +pub unsafe trait Allocator { + /// Allocate memory based on `layout` and `flags`. + /// + /// On success, returns a buffer represented as `NonNull<[u8]>` that satisfies the layout + /// constraints (i.e. minimum size and alignment as specified by `layout`). + /// + /// This function is equivalent to `realloc` when called with `None`. + /// + /// # Guarantees + /// + /// When the return value is `Ok(ptr)`, then `ptr` is + /// - valid for reads and writes for `layout.size()` bytes, until it is passed to + /// [`Allocator::free`] or [`Allocator::realloc`], + /// - aligned to `layout.align()`, + /// + /// Additionally, `Flags` are honored as documented in + /// . + fn alloc(layout: Layout, flags: Flags) -> Result, AllocError> { + // SAFETY: Passing `None` to `realloc` is valid by its safety requirements and asks for a + // new memory allocation. + unsafe { Self::realloc(None, layout, Layout::new::<()>(), flags) } + } + + /// Re-allocate an existing memory allocation to satisfy the requested `layout`. + /// + /// If the requested size is zero, `realloc` behaves equivalent to `free`. + /// + /// If the requested size is larger than the size of the existing allocation, a successful call + /// to `realloc` guarantees that the new or grown buffer has at least `Layout::size` bytes, but + /// may also be larger. + /// + /// If the requested size is smaller than the size of the existing allocation, `realloc` may or + /// may not shrink the buffer; this is implementation specific to the allocator. + /// + /// On allocation failure, the existing buffer, if any, remains valid. + /// + /// The buffer is represented as `NonNull<[u8]>`. + /// + /// # Safety + /// + /// - If `ptr == Some(p)`, then `p` must point to an existing and valid memory allocation + /// created by this [`Allocator`]; if `old_layout` is zero-sized `p` does not need to be a + /// pointer returned by this [`Allocator`]. + /// - `ptr` is allowed to be `None`; in this case a new memory allocation is created and + /// `old_layout` is ignored. + /// - `old_layout` must match the `Layout` the allocation has been created with. + /// + /// # Guarantees + /// + /// This function has the same guarantees as [`Allocator::alloc`]. When `ptr == Some(p)`, then + /// it additionally guarantees that: + /// - the contents of the memory pointed to by `p` are preserved up to the lesser of the new + /// and old size, i.e. `ret_ptr[0..min(layout.size(), old_layout.size())] == + /// p[0..min(layout.size(), old_layout.size())]`. + /// - when the return value is `Err(AllocError)`, then `ptr` is still valid. + unsafe fn realloc( + ptr: Option>, + layout: Layout, + old_layout: Layout, + flags: Flags, + ) -> Result, AllocError>; + + /// Free an existing memory allocation. + /// + /// # Safety + /// + /// - `ptr` must point to an existing and valid memory allocation created by this [`Allocator`]; + /// if `old_layout` is zero-sized `p` does not need to be a pointer returned by this + /// [`Allocator`]. + /// - `layout` must match the `Layout` the allocation has been created with. + /// - The memory allocation at `ptr` must never again be read from or written to. + unsafe fn free(ptr: NonNull, layout: Layout) { + // SAFETY: The caller guarantees that `ptr` points at a valid allocation created by this + // allocator. We are passing a `Layout` with the smallest possible alignment, so it is + // smaller than or equal to the alignment previously used with this allocation. + let _ = unsafe { Self::realloc(Some(ptr), Layout::new::<()>(), layout, Flags(0)) }; + } +} + +/// Returns a properly aligned dangling pointer from the given `layout`. +pub(crate) fn dangling_from_layout(layout: Layout) -> NonNull { + let ptr = layout.align() as *mut u8; + + // SAFETY: `layout.align()` (and hence `ptr`) is guaranteed to be non-zero. + unsafe { NonNull::new_unchecked(ptr) } } diff --git a/rust/kernel/alloc/allocator.rs b/rust/kernel/alloc/allocator.rs index e6ea601f38c6d..439985e29fbc0 100644 --- a/rust/kernel/alloc/allocator.rs +++ b/rust/kernel/alloc/allocator.rs @@ -1,74 +1,188 @@ // SPDX-License-Identifier: GPL-2.0 //! Allocator support. +//! +//! Documentation for the kernel's memory allocators can found in the "Memory Allocation Guide" +//! linked below. For instance, this includes the concept of "get free page" (GFP) flags and the +//! typical application of the different kernel allocators. +//! +//! Reference: -use super::{flags::*, Flags}; -use core::alloc::{GlobalAlloc, Layout}; +use super::Flags; +use core::alloc::Layout; use core::ptr; +use core::ptr::NonNull; -struct KernelAllocator; +use crate::alloc::{AllocError, Allocator}; +use crate::bindings; +use crate::pr_warn; -/// Calls `krealloc` with a proper size to alloc a new object aligned to `new_layout`'s alignment. +/// The contiguous kernel allocator. /// -/// # Safety +/// `Kmalloc` is typically used for physically contiguous allocations up to page size, but also +/// supports larger allocations up to `bindings::KMALLOC_MAX_SIZE`, which is hardware specific. /// -/// - `ptr` can be either null or a pointer which has been allocated by this allocator. -/// - `new_layout` must have a non-zero size. -pub(crate) unsafe fn krealloc_aligned(ptr: *mut u8, new_layout: Layout, flags: Flags) -> *mut u8 { +/// For more details see [self]. +pub struct Kmalloc; + +/// The virtually contiguous kernel allocator. +/// +/// `Vmalloc` allocates pages from the page level allocator and maps them into the contiguous kernel +/// virtual space. It is typically used for large allocations. The memory allocated with this +/// allocator is not physically contiguous. +/// +/// For more details see [self]. +pub struct Vmalloc; + +/// The kvmalloc kernel allocator. +/// +/// `KVmalloc` attempts to allocate memory with `Kmalloc` first, but falls back to `Vmalloc` upon +/// failure. This allocator is typically used when the size for the requested allocation is not +/// known and may exceed the capabilities of `Kmalloc`. +/// +/// For more details see [self]. +pub struct KVmalloc; + +/// Returns a proper size to alloc a new object aligned to `new_layout`'s alignment. +fn aligned_size(new_layout: Layout) -> usize { // Customized layouts from `Layout::from_size_align()` can have size < align, so pad first. let layout = new_layout.pad_to_align(); // Note that `layout.size()` (after padding) is guaranteed to be a multiple of `layout.align()` // which together with the slab guarantees means the `krealloc` will return a properly aligned // object (see comments in `kmalloc()` for more information). - let size = layout.size(); - - // SAFETY: - // - `ptr` is either null or a pointer returned from a previous `k{re}alloc()` by the - // function safety requirement. - // - `size` is greater than 0 since it's from `layout.size()` (which cannot be zero according - // to the function safety requirement) - unsafe { bindings::krealloc(ptr as *const core::ffi::c_void, size, flags.0) as *mut u8 } + layout.size() } -unsafe impl GlobalAlloc for KernelAllocator { - unsafe fn alloc(&self, layout: Layout) -> *mut u8 { - // SAFETY: `ptr::null_mut()` is null and `layout` has a non-zero size by the function safety - // requirement. - unsafe { krealloc_aligned(ptr::null_mut(), layout, GFP_KERNEL) } - } +/// # Invariants +/// +/// One of the following: `krealloc`, `vrealloc`, `kvrealloc`. +struct ReallocFunc( + unsafe extern "C" fn(*const crate::ffi::c_void, usize, u32) -> *mut crate::ffi::c_void, +); - unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) { - unsafe { - bindings::kfree(ptr as *const core::ffi::c_void); - } - } +impl ReallocFunc { + // INVARIANT: `krealloc` satisfies the type invariants. + const KREALLOC: Self = Self(bindings::krealloc); - unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { - // SAFETY: - // - `new_size`, when rounded up to the nearest multiple of `layout.align()`, will not - // overflow `isize` by the function safety requirement. - // - `layout.align()` is a proper alignment (i.e. not zero and must be a power of two). - let layout = unsafe { Layout::from_size_align_unchecked(new_size, layout.align()) }; + // INVARIANT: `vrealloc` satisfies the type invariants. + const VREALLOC: Self = Self(bindings::vrealloc); + + // INVARIANT: `kvrealloc` satisfies the type invariants. + const KVREALLOC: Self = Self(bindings::kvrealloc); + + /// # Safety + /// + /// This method has the same safety requirements as [`Allocator::realloc`]. + /// + /// # Guarantees + /// + /// This method has the same guarantees as `Allocator::realloc`. Additionally + /// - it accepts any pointer to a valid memory allocation allocated by this function. + /// - memory allocated by this function remains valid until it is passed to this function. + unsafe fn call( + &self, + ptr: Option>, + layout: Layout, + old_layout: Layout, + flags: Flags, + ) -> Result, AllocError> { + let size = aligned_size(layout); + let ptr = match ptr { + Some(ptr) => { + if old_layout.size() == 0 { + ptr::null() + } else { + ptr.as_ptr() + } + } + None => ptr::null(), + }; // SAFETY: - // - `ptr` is either null or a pointer allocated by this allocator by the function safety - // requirement. - // - the size of `layout` is not zero because `new_size` is not zero by the function safety - // requirement. - unsafe { krealloc_aligned(ptr, layout, GFP_KERNEL) } + // - `self.0` is one of `krealloc`, `vrealloc`, `kvrealloc` and thus only requires that + // `ptr` is NULL or valid. + // - `ptr` is either NULL or valid by the safety requirements of this function. + // + // GUARANTEE: + // - `self.0` is one of `krealloc`, `vrealloc`, `kvrealloc`. + // - Those functions provide the guarantees of this function. + let raw_ptr = unsafe { + // If `size == 0` and `ptr != NULL` the memory behind the pointer is freed. + self.0(ptr.cast(), size, flags.0).cast() + }; + + let ptr = if size == 0 { + crate::alloc::dangling_from_layout(layout) + } else { + NonNull::new(raw_ptr).ok_or(AllocError)? + }; + + Ok(NonNull::slice_from_raw_parts(ptr, size)) + } +} + +// SAFETY: `realloc` delegates to `ReallocFunc::call`, which guarantees that +// - memory remains valid until it is explicitly freed, +// - passing a pointer to a valid memory allocation is OK, +// - `realloc` satisfies the guarantees, since `ReallocFunc::call` has the same. +unsafe impl Allocator for Kmalloc { + #[inline] + unsafe fn realloc( + ptr: Option>, + layout: Layout, + old_layout: Layout, + flags: Flags, + ) -> Result, AllocError> { + // SAFETY: `ReallocFunc::call` has the same safety requirements as `Allocator::realloc`. + unsafe { ReallocFunc::KREALLOC.call(ptr, layout, old_layout, flags) } } +} + +// SAFETY: `realloc` delegates to `ReallocFunc::call`, which guarantees that +// - memory remains valid until it is explicitly freed, +// - passing a pointer to a valid memory allocation is OK, +// - `realloc` satisfies the guarantees, since `ReallocFunc::call` has the same. +unsafe impl Allocator for Vmalloc { + #[inline] + unsafe fn realloc( + ptr: Option>, + layout: Layout, + old_layout: Layout, + flags: Flags, + ) -> Result, AllocError> { + // TODO: Support alignments larger than PAGE_SIZE. + if layout.align() > bindings::PAGE_SIZE { + pr_warn!("Vmalloc does not support alignments larger than PAGE_SIZE yet.\n"); + return Err(AllocError); + } - unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { - // SAFETY: `ptr::null_mut()` is null and `layout` has a non-zero size by the function safety - // requirement. - unsafe { krealloc_aligned(ptr::null_mut(), layout, GFP_KERNEL | __GFP_ZERO) } + // SAFETY: If not `None`, `ptr` is guaranteed to point to valid memory, which was previously + // allocated with this `Allocator`. + unsafe { ReallocFunc::VREALLOC.call(ptr, layout, old_layout, flags) } } } -#[global_allocator] -static ALLOCATOR: KernelAllocator = KernelAllocator; +// SAFETY: `realloc` delegates to `ReallocFunc::call`, which guarantees that +// - memory remains valid until it is explicitly freed, +// - passing a pointer to a valid memory allocation is OK, +// - `realloc` satisfies the guarantees, since `ReallocFunc::call` has the same. +unsafe impl Allocator for KVmalloc { + #[inline] + unsafe fn realloc( + ptr: Option>, + layout: Layout, + old_layout: Layout, + flags: Flags, + ) -> Result, AllocError> { + // TODO: Support alignments larger than PAGE_SIZE. + if layout.align() > bindings::PAGE_SIZE { + pr_warn!("KVmalloc does not support alignments larger than PAGE_SIZE yet.\n"); + return Err(AllocError); + } -// See . -#[no_mangle] -static __rust_no_alloc_shim_is_unstable: u8 = 0; + // SAFETY: If not `None`, `ptr` is guaranteed to point to valid memory, which was previously + // allocated with this `Allocator`. + unsafe { ReallocFunc::KVREALLOC.call(ptr, layout, old_layout, flags) } + } +} diff --git a/rust/kernel/alloc/allocator_test.rs b/rust/kernel/alloc/allocator_test.rs new file mode 100644 index 0000000000000..c37d4c0c64e9f --- /dev/null +++ b/rust/kernel/alloc/allocator_test.rs @@ -0,0 +1,113 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! So far the kernel's `Box` and `Vec` types can't be used by userspace test cases, since all users +//! of those types (e.g. `CString`) use kernel allocators for instantiation. +//! +//! In order to allow userspace test cases to make use of such types as well, implement the +//! `Cmalloc` allocator within the allocator_test module and type alias all kernel allocators to +//! `Cmalloc`. The `Cmalloc` allocator uses libc's `realloc()` function as allocator backend. + +#![allow(missing_docs)] + +use super::{flags::*, AllocError, Allocator, Flags}; +use core::alloc::Layout; +use core::cmp; +use core::ptr; +use core::ptr::NonNull; + +/// The userspace allocator based on libc. +pub struct Cmalloc; + +pub type Kmalloc = Cmalloc; +pub type Vmalloc = Kmalloc; +pub type KVmalloc = Kmalloc; + +extern "C" { + #[link_name = "aligned_alloc"] + fn libc_aligned_alloc(align: usize, size: usize) -> *mut crate::ffi::c_void; + + #[link_name = "free"] + fn libc_free(ptr: *mut crate::ffi::c_void); +} + +// SAFETY: +// - memory remains valid until it is explicitly freed, +// - passing a pointer to a valid memory allocation created by this `Allocator` is always OK, +// - `realloc` provides the guarantees as provided in the `# Guarantees` section. +unsafe impl Allocator for Cmalloc { + unsafe fn realloc( + ptr: Option>, + layout: Layout, + old_layout: Layout, + flags: Flags, + ) -> Result, AllocError> { + let src = match ptr { + Some(src) => { + if old_layout.size() == 0 { + ptr::null_mut() + } else { + src.as_ptr() + } + } + None => ptr::null_mut(), + }; + + if layout.size() == 0 { + // SAFETY: `src` is either NULL or was previously allocated with this `Allocator` + unsafe { libc_free(src.cast()) }; + + return Ok(NonNull::slice_from_raw_parts( + crate::alloc::dangling_from_layout(layout), + 0, + )); + } + + // ISO C (ISO/IEC 9899:2011) defines `aligned_alloc`: + // + // > The value of alignment shall be a valid alignment supported by the implementation + // [...]. + // + // As an example of the "supported by the implementation" requirement, POSIX.1-2001 (IEEE + // 1003.1-2001) defines `posix_memalign`: + // + // > The value of alignment shall be a power of two multiple of sizeof (void *). + // + // and POSIX-based implementations of `aligned_alloc` inherit this requirement. At the time + // of writing, this is known to be the case on macOS (but not in glibc). + // + // Satisfy the stricter requirement to avoid spurious test failures on some platforms. + let min_align = core::mem::size_of::<*const crate::ffi::c_void>(); + let layout = layout.align_to(min_align).map_err(|_| AllocError)?; + let layout = layout.pad_to_align(); + + // SAFETY: Returns either NULL or a pointer to a memory allocation that satisfies or + // exceeds the given size and alignment requirements. + let dst = unsafe { libc_aligned_alloc(layout.align(), layout.size()) } as *mut u8; + let dst = NonNull::new(dst).ok_or(AllocError)?; + + if flags.contains(__GFP_ZERO) { + // SAFETY: The preceding calls to `libc_aligned_alloc` and `NonNull::new` + // guarantee that `dst` points to memory of at least `layout.size()` bytes. + unsafe { dst.as_ptr().write_bytes(0, layout.size()) }; + } + + if !src.is_null() { + // SAFETY: + // - `src` has previously been allocated with this `Allocator`; `dst` has just been + // newly allocated, hence the memory regions do not overlap. + // - both` src` and `dst` are properly aligned and valid for reads and writes + unsafe { + ptr::copy_nonoverlapping( + src, + dst.as_ptr(), + cmp::min(layout.size(), old_layout.size()), + ) + }; + } + + // SAFETY: `src` is either NULL or was previously allocated with this `Allocator` + unsafe { libc_free(src.cast()) }; + + Ok(NonNull::slice_from_raw_parts(dst, layout.size())) + } +} diff --git a/rust/kernel/alloc/box_ext.rs b/rust/kernel/alloc/box_ext.rs deleted file mode 100644 index 7009ad78d4e08..0000000000000 --- a/rust/kernel/alloc/box_ext.rs +++ /dev/null @@ -1,89 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -//! Extensions to [`Box`] for fallible allocations. - -use super::{AllocError, Flags}; -use alloc::boxed::Box; -use core::{mem::MaybeUninit, ptr, result::Result}; - -/// Extensions to [`Box`]. -pub trait BoxExt: Sized { - /// Allocates a new box. - /// - /// The allocation may fail, in which case an error is returned. - fn new(x: T, flags: Flags) -> Result; - - /// Allocates a new uninitialised box. - /// - /// The allocation may fail, in which case an error is returned. - fn new_uninit(flags: Flags) -> Result>, AllocError>; - - /// Drops the contents, but keeps the allocation. - /// - /// # Examples - /// - /// ``` - /// use kernel::alloc::{flags, box_ext::BoxExt}; - /// let value = Box::new([0; 32], flags::GFP_KERNEL)?; - /// assert_eq!(*value, [0; 32]); - /// let mut value = Box::drop_contents(value); - /// // Now we can re-use `value`: - /// value.write([1; 32]); - /// // SAFETY: We just wrote to it. - /// let value = unsafe { value.assume_init() }; - /// assert_eq!(*value, [1; 32]); - /// # Ok::<(), Error>(()) - /// ``` - fn drop_contents(this: Self) -> Box>; -} - -impl BoxExt for Box { - fn new(x: T, flags: Flags) -> Result { - let mut b = >::new_uninit(flags)?; - b.write(x); - // SAFETY: We just wrote to it. - Ok(unsafe { b.assume_init() }) - } - - #[cfg(any(test, testlib))] - fn new_uninit(_flags: Flags) -> Result>, AllocError> { - Ok(Box::new_uninit()) - } - - #[cfg(not(any(test, testlib)))] - fn new_uninit(flags: Flags) -> Result>, AllocError> { - let ptr = if core::mem::size_of::>() == 0 { - core::ptr::NonNull::<_>::dangling().as_ptr() - } else { - let layout = core::alloc::Layout::new::>(); - - // SAFETY: Memory is being allocated (first arg is null). The only other source of - // safety issues is sleeping on atomic context, which is addressed by klint. Lastly, - // the type is not a SZT (checked above). - let ptr = - unsafe { super::allocator::krealloc_aligned(core::ptr::null_mut(), layout, flags) }; - if ptr.is_null() { - return Err(AllocError); - } - - ptr.cast::>() - }; - - // SAFETY: For non-zero-sized types, we allocate above using the global allocator. For - // zero-sized types, we use `NonNull::dangling`. - Ok(unsafe { Box::from_raw(ptr) }) - } - - fn drop_contents(this: Self) -> Box> { - let ptr = Box::into_raw(this); - // SAFETY: `ptr` is valid, because it came from `Box::into_raw`. - unsafe { ptr::drop_in_place(ptr) }; - - // CAST: `MaybeUninit` is a transparent wrapper of `T`. - let ptr = ptr.cast::>(); - - // SAFETY: `ptr` is valid for writes, because it came from `Box::into_raw` and it is valid for - // reads, since the pointer came from `Box::into_raw` and the type is `MaybeUninit`. - unsafe { Box::from_raw(ptr) } - } -} diff --git a/rust/kernel/alloc/kbox.rs b/rust/kernel/alloc/kbox.rs new file mode 100644 index 0000000000000..9ce414361c2c6 --- /dev/null +++ b/rust/kernel/alloc/kbox.rs @@ -0,0 +1,456 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! Implementation of [`Box`]. + +#[allow(unused_imports)] // Used in doc comments. +use super::allocator::{KVmalloc, Kmalloc, Vmalloc}; +use super::{AllocError, Allocator, Flags}; +use core::alloc::Layout; +use core::fmt; +use core::marker::PhantomData; +use core::mem::ManuallyDrop; +use core::mem::MaybeUninit; +use core::ops::{Deref, DerefMut}; +use core::pin::Pin; +use core::ptr::NonNull; +use core::result::Result; + +use crate::init::{InPlaceInit, InPlaceWrite, Init, PinInit}; +use crate::types::ForeignOwnable; + +/// The kernel's [`Box`] type -- a heap allocation for a single value of type `T`. +/// +/// This is the kernel's version of the Rust stdlib's `Box`. There are several differences, +/// for example no `noalias` attribute is emitted and partially moving out of a `Box` is not +/// supported. There are also several API differences, e.g. `Box` always requires an [`Allocator`] +/// implementation to be passed as generic, page [`Flags`] when allocating memory and all functions +/// that may allocate memory are fallible. +/// +/// `Box` works with any of the kernel's allocators, e.g. [`Kmalloc`], [`Vmalloc`] or [`KVmalloc`]. +/// There are aliases for `Box` with these allocators ([`KBox`], [`VBox`], [`KVBox`]). +/// +/// When dropping a [`Box`], the value is also dropped and the heap memory is automatically freed. +/// +/// # Examples +/// +/// ``` +/// let b = KBox::::new(24_u64, GFP_KERNEL)?; +/// +/// assert_eq!(*b, 24_u64); +/// # Ok::<(), Error>(()) +/// ``` +/// +/// ``` +/// # use kernel::bindings; +/// const SIZE: usize = bindings::KMALLOC_MAX_SIZE as usize + 1; +/// struct Huge([u8; SIZE]); +/// +/// assert!(KBox::::new_uninit(GFP_KERNEL | __GFP_NOWARN).is_err()); +/// ``` +/// +/// ``` +/// # use kernel::bindings; +/// const SIZE: usize = bindings::KMALLOC_MAX_SIZE as usize + 1; +/// struct Huge([u8; SIZE]); +/// +/// assert!(KVBox::::new_uninit(GFP_KERNEL).is_ok()); +/// ``` +/// +/// # Invariants +/// +/// `self.0` is always properly aligned and either points to memory allocated with `A` or, for +/// zero-sized types, is a dangling, well aligned pointer. +#[repr(transparent)] +pub struct Box(NonNull, PhantomData); + +/// Type alias for [`Box`] with a [`Kmalloc`] allocator. +/// +/// # Examples +/// +/// ``` +/// let b = KBox::new(24_u64, GFP_KERNEL)?; +/// +/// assert_eq!(*b, 24_u64); +/// # Ok::<(), Error>(()) +/// ``` +pub type KBox = Box; + +/// Type alias for [`Box`] with a [`Vmalloc`] allocator. +/// +/// # Examples +/// +/// ``` +/// let b = VBox::new(24_u64, GFP_KERNEL)?; +/// +/// assert_eq!(*b, 24_u64); +/// # Ok::<(), Error>(()) +/// ``` +pub type VBox = Box; + +/// Type alias for [`Box`] with a [`KVmalloc`] allocator. +/// +/// # Examples +/// +/// ``` +/// let b = KVBox::new(24_u64, GFP_KERNEL)?; +/// +/// assert_eq!(*b, 24_u64); +/// # Ok::<(), Error>(()) +/// ``` +pub type KVBox = Box; + +// SAFETY: `Box` is `Send` if `T` is `Send` because the `Box` owns a `T`. +unsafe impl Send for Box +where + T: Send + ?Sized, + A: Allocator, +{ +} + +// SAFETY: `Box` is `Sync` if `T` is `Sync` because the `Box` owns a `T`. +unsafe impl Sync for Box +where + T: Sync + ?Sized, + A: Allocator, +{ +} + +impl Box +where + T: ?Sized, + A: Allocator, +{ + /// Creates a new `Box` from a raw pointer. + /// + /// # Safety + /// + /// For non-ZSTs, `raw` must point at an allocation allocated with `A` that is sufficiently + /// aligned for and holds a valid `T`. The caller passes ownership of the allocation to the + /// `Box`. + /// + /// For ZSTs, `raw` must be a dangling, well aligned pointer. + #[inline] + pub const unsafe fn from_raw(raw: *mut T) -> Self { + // INVARIANT: Validity of `raw` is guaranteed by the safety preconditions of this function. + // SAFETY: By the safety preconditions of this function, `raw` is not a NULL pointer. + Self(unsafe { NonNull::new_unchecked(raw) }, PhantomData) + } + + /// Consumes the `Box` and returns a raw pointer. + /// + /// This will not run the destructor of `T` and for non-ZSTs the allocation will stay alive + /// indefinitely. Use [`Box::from_raw`] to recover the [`Box`], drop the value and free the + /// allocation, if any. + /// + /// # Examples + /// + /// ``` + /// let x = KBox::new(24, GFP_KERNEL)?; + /// let ptr = KBox::into_raw(x); + /// // SAFETY: `ptr` comes from a previous call to `KBox::into_raw`. + /// let x = unsafe { KBox::from_raw(ptr) }; + /// + /// assert_eq!(*x, 24); + /// # Ok::<(), Error>(()) + /// ``` + #[inline] + pub fn into_raw(b: Self) -> *mut T { + ManuallyDrop::new(b).0.as_ptr() + } + + /// Consumes and leaks the `Box` and returns a mutable reference. + /// + /// See [`Box::into_raw`] for more details. + #[inline] + pub fn leak<'a>(b: Self) -> &'a mut T { + // SAFETY: `Box::into_raw` always returns a properly aligned and dereferenceable pointer + // which points to an initialized instance of `T`. + unsafe { &mut *Box::into_raw(b) } + } +} + +impl Box, A> +where + A: Allocator, +{ + /// Converts a `Box, A>` to a `Box`. + /// + /// It is undefined behavior to call this function while the value inside of `b` is not yet + /// fully initialized. + /// + /// # Safety + /// + /// Callers must ensure that the value inside of `b` is in an initialized state. + pub unsafe fn assume_init(self) -> Box { + let raw = Self::into_raw(self); + + // SAFETY: `raw` comes from a previous call to `Box::into_raw`. By the safety requirements + // of this function, the value inside the `Box` is in an initialized state. Hence, it is + // safe to reconstruct the `Box` as `Box`. + unsafe { Box::from_raw(raw.cast()) } + } + + /// Writes the value and converts to `Box`. + pub fn write(mut self, value: T) -> Box { + (*self).write(value); + + // SAFETY: We've just initialized `b`'s value. + unsafe { self.assume_init() } + } +} + +impl Box +where + A: Allocator, +{ + /// Creates a new `Box` and initializes its contents with `x`. + /// + /// New memory is allocated with `A`. The allocation may fail, in which case an error is + /// returned. For ZSTs no memory is allocated. + pub fn new(x: T, flags: Flags) -> Result { + let b = Self::new_uninit(flags)?; + Ok(Box::write(b, x)) + } + + /// Creates a new `Box` with uninitialized contents. + /// + /// New memory is allocated with `A`. The allocation may fail, in which case an error is + /// returned. For ZSTs no memory is allocated. + /// + /// # Examples + /// + /// ``` + /// let b = KBox::::new_uninit(GFP_KERNEL)?; + /// let b = KBox::write(b, 24); + /// + /// assert_eq!(*b, 24_u64); + /// # Ok::<(), Error>(()) + /// ``` + pub fn new_uninit(flags: Flags) -> Result, A>, AllocError> { + let layout = Layout::new::>(); + let ptr = A::alloc(layout, flags)?; + + // INVARIANT: `ptr` is either a dangling pointer or points to memory allocated with `A`, + // which is sufficient in size and alignment for storing a `T`. + Ok(Box(ptr.cast(), PhantomData)) + } + + /// Constructs a new `Pin>`. If `T` does not implement [`Unpin`], then `x` will be + /// pinned in memory and can't be moved. + #[inline] + pub fn pin(x: T, flags: Flags) -> Result>, AllocError> + where + A: 'static, + { + Ok(Self::new(x, flags)?.into()) + } + + /// Forgets the contents (does not run the destructor), but keeps the allocation. + fn forget_contents(this: Self) -> Box, A> { + let ptr = Self::into_raw(this); + + // SAFETY: `ptr` is valid, because it came from `Box::into_raw`. + unsafe { Box::from_raw(ptr.cast()) } + } + + /// Drops the contents, but keeps the allocation. + /// + /// # Examples + /// + /// ``` + /// let value = KBox::new([0; 32], GFP_KERNEL)?; + /// assert_eq!(*value, [0; 32]); + /// let value = KBox::drop_contents(value); + /// // Now we can re-use `value`: + /// let value = KBox::write(value, [1; 32]); + /// assert_eq!(*value, [1; 32]); + /// # Ok::<(), Error>(()) + /// ``` + pub fn drop_contents(this: Self) -> Box, A> { + let ptr = this.0.as_ptr(); + + // SAFETY: `ptr` is valid, because it came from `this`. After this call we never access the + // value stored in `this` again. + unsafe { core::ptr::drop_in_place(ptr) }; + + Self::forget_contents(this) + } + + /// Moves the `Box`'s value out of the `Box` and consumes the `Box`. + pub fn into_inner(b: Self) -> T { + // SAFETY: By the type invariant `&*b` is valid for `read`. + let value = unsafe { core::ptr::read(&*b) }; + let _ = Self::forget_contents(b); + value + } +} + +impl From> for Pin> +where + T: ?Sized, + A: Allocator, +{ + /// Converts a `Box` into a `Pin>`. If `T` does not implement [`Unpin`], then + /// `*b` will be pinned in memory and can't be moved. + /// + /// This moves `b` into `Pin` without moving `*b` or allocating and copying any memory. + fn from(b: Box) -> Self { + // SAFETY: The value wrapped inside a `Pin>` cannot be moved or replaced as long + // as `T` does not implement `Unpin`. + unsafe { Pin::new_unchecked(b) } + } +} + +impl InPlaceWrite for Box, A> +where + A: Allocator + 'static, +{ + type Initialized = Box; + + fn write_init(mut self, init: impl Init) -> Result { + let slot = self.as_mut_ptr(); + // SAFETY: When init errors/panics, slot will get deallocated but not dropped, + // slot is valid. + unsafe { init.__init(slot)? }; + // SAFETY: All fields have been initialized. + Ok(unsafe { Box::assume_init(self) }) + } + + fn write_pin_init(mut self, init: impl PinInit) -> Result, E> { + let slot = self.as_mut_ptr(); + // SAFETY: When init errors/panics, slot will get deallocated but not dropped, + // slot is valid and will not be moved, because we pin it later. + unsafe { init.__pinned_init(slot)? }; + // SAFETY: All fields have been initialized. + Ok(unsafe { Box::assume_init(self) }.into()) + } +} + +impl InPlaceInit for Box +where + A: Allocator + 'static, +{ + type PinnedSelf = Pin; + + #[inline] + fn try_pin_init(init: impl PinInit, flags: Flags) -> Result, E> + where + E: From, + { + Box::<_, A>::new_uninit(flags)?.write_pin_init(init) + } + + #[inline] + fn try_init(init: impl Init, flags: Flags) -> Result + where + E: From, + { + Box::<_, A>::new_uninit(flags)?.write_init(init) + } +} + +impl ForeignOwnable for Box +where + A: Allocator, +{ + type Borrowed<'a> = &'a T; + + fn into_foreign(self) -> *const crate::ffi::c_void { + Box::into_raw(self) as _ + } + + unsafe fn from_foreign(ptr: *const crate::ffi::c_void) -> Self { + // SAFETY: The safety requirements of this function ensure that `ptr` comes from a previous + // call to `Self::into_foreign`. + unsafe { Box::from_raw(ptr as _) } + } + + unsafe fn borrow<'a>(ptr: *const crate::ffi::c_void) -> &'a T { + // SAFETY: The safety requirements of this method ensure that the object remains alive and + // immutable for the duration of 'a. + unsafe { &*ptr.cast() } + } +} + +impl ForeignOwnable for Pin> +where + A: Allocator, +{ + type Borrowed<'a> = Pin<&'a T>; + + fn into_foreign(self) -> *const crate::ffi::c_void { + // SAFETY: We are still treating the box as pinned. + Box::into_raw(unsafe { Pin::into_inner_unchecked(self) }) as _ + } + + unsafe fn from_foreign(ptr: *const crate::ffi::c_void) -> Self { + // SAFETY: The safety requirements of this function ensure that `ptr` comes from a previous + // call to `Self::into_foreign`. + unsafe { Pin::new_unchecked(Box::from_raw(ptr as _)) } + } + + unsafe fn borrow<'a>(ptr: *const crate::ffi::c_void) -> Pin<&'a T> { + // SAFETY: The safety requirements for this function ensure that the object is still alive, + // so it is safe to dereference the raw pointer. + // The safety requirements of `from_foreign` also ensure that the object remains alive for + // the lifetime of the returned value. + let r = unsafe { &*ptr.cast() }; + + // SAFETY: This pointer originates from a `Pin>`. + unsafe { Pin::new_unchecked(r) } + } +} + +impl Deref for Box +where + T: ?Sized, + A: Allocator, +{ + type Target = T; + + fn deref(&self) -> &T { + // SAFETY: `self.0` is always properly aligned, dereferenceable and points to an initialized + // instance of `T`. + unsafe { self.0.as_ref() } + } +} + +impl DerefMut for Box +where + T: ?Sized, + A: Allocator, +{ + fn deref_mut(&mut self) -> &mut T { + // SAFETY: `self.0` is always properly aligned, dereferenceable and points to an initialized + // instance of `T`. + unsafe { self.0.as_mut() } + } +} + +impl fmt::Debug for Box +where + T: ?Sized + fmt::Debug, + A: Allocator, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(&**self, f) + } +} + +impl Drop for Box +where + T: ?Sized, + A: Allocator, +{ + fn drop(&mut self) { + let layout = Layout::for_value::(self); + + // SAFETY: The pointer in `self.0` is guaranteed to be valid by the type invariant. + unsafe { core::ptr::drop_in_place::(self.deref_mut()) }; + + // SAFETY: + // - `self.0` was previously allocated with `A`. + // - `layout` is equal to the `Layout´ `self.0` was allocated with. + unsafe { A::free(self.0.cast(), layout) }; + } +} diff --git a/rust/kernel/alloc/kvec.rs b/rust/kernel/alloc/kvec.rs new file mode 100644 index 0000000000000..ae9d072741ced --- /dev/null +++ b/rust/kernel/alloc/kvec.rs @@ -0,0 +1,913 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! Implementation of [`Vec`]. + +use super::{ + allocator::{KVmalloc, Kmalloc, Vmalloc}, + layout::ArrayLayout, + AllocError, Allocator, Box, Flags, +}; +use core::{ + fmt, + marker::PhantomData, + mem::{ManuallyDrop, MaybeUninit}, + ops::Deref, + ops::DerefMut, + ops::Index, + ops::IndexMut, + ptr, + ptr::NonNull, + slice, + slice::SliceIndex, +}; + +/// Create a [`KVec`] containing the arguments. +/// +/// New memory is allocated with `GFP_KERNEL`. +/// +/// # Examples +/// +/// ``` +/// let mut v = kernel::kvec![]; +/// v.push(1, GFP_KERNEL)?; +/// assert_eq!(v, [1]); +/// +/// let mut v = kernel::kvec![1; 3]?; +/// v.push(4, GFP_KERNEL)?; +/// assert_eq!(v, [1, 1, 1, 4]); +/// +/// let mut v = kernel::kvec![1, 2, 3]?; +/// v.push(4, GFP_KERNEL)?; +/// assert_eq!(v, [1, 2, 3, 4]); +/// +/// # Ok::<(), Error>(()) +/// ``` +#[macro_export] +macro_rules! kvec { + () => ( + $crate::alloc::KVec::new() + ); + ($elem:expr; $n:expr) => ( + $crate::alloc::KVec::from_elem($elem, $n, GFP_KERNEL) + ); + ($($x:expr),+ $(,)?) => ( + match $crate::alloc::KBox::new_uninit(GFP_KERNEL) { + Ok(b) => Ok($crate::alloc::KVec::from($crate::alloc::KBox::write(b, [$($x),+]))), + Err(e) => Err(e), + } + ); +} + +/// The kernel's [`Vec`] type. +/// +/// A contiguous growable array type with contents allocated with the kernel's allocators (e.g. +/// [`Kmalloc`], [`Vmalloc`] or [`KVmalloc`]), written `Vec`. +/// +/// For non-zero-sized values, a [`Vec`] will use the given allocator `A` for its allocation. For +/// the most common allocators the type aliases [`KVec`], [`VVec`] and [`KVVec`] exist. +/// +/// For zero-sized types the [`Vec`]'s pointer must be `dangling_mut::`; no memory is allocated. +/// +/// Generally, [`Vec`] consists of a pointer that represents the vector's backing buffer, the +/// capacity of the vector (the number of elements that currently fit into the vector), its length +/// (the number of elements that are currently stored in the vector) and the `Allocator` type used +/// to allocate (and free) the backing buffer. +/// +/// A [`Vec`] can be deconstructed into and (re-)constructed from its previously named raw parts +/// and manually modified. +/// +/// [`Vec`]'s backing buffer gets, if required, automatically increased (re-allocated) when elements +/// are added to the vector. +/// +/// # Invariants +/// +/// - `self.ptr` is always properly aligned and either points to memory allocated with `A` or, for +/// zero-sized types, is a dangling, well aligned pointer. +/// +/// - `self.len` always represents the exact number of elements stored in the vector. +/// +/// - `self.layout` represents the absolute number of elements that can be stored within the vector +/// without re-allocation. For ZSTs `self.layout`'s capacity is zero. However, it is legal for the +/// backing buffer to be larger than `layout`. +/// +/// - The `Allocator` type `A` of the vector is the exact same `Allocator` type the backing buffer +/// was allocated with (and must be freed with). +pub struct Vec { + ptr: NonNull, + /// Represents the actual buffer size as `cap` times `size_of::` bytes. + /// + /// Note: This isn't quite the same as `Self::capacity`, which in contrast returns the number of + /// elements we can still store without reallocating. + layout: ArrayLayout, + len: usize, + _p: PhantomData, +} + +/// Type alias for [`Vec`] with a [`Kmalloc`] allocator. +/// +/// # Examples +/// +/// ``` +/// let mut v = KVec::new(); +/// v.push(1, GFP_KERNEL)?; +/// assert_eq!(&v, &[1]); +/// +/// # Ok::<(), Error>(()) +/// ``` +pub type KVec = Vec; + +/// Type alias for [`Vec`] with a [`Vmalloc`] allocator. +/// +/// # Examples +/// +/// ``` +/// let mut v = VVec::new(); +/// v.push(1, GFP_KERNEL)?; +/// assert_eq!(&v, &[1]); +/// +/// # Ok::<(), Error>(()) +/// ``` +pub type VVec = Vec; + +/// Type alias for [`Vec`] with a [`KVmalloc`] allocator. +/// +/// # Examples +/// +/// ``` +/// let mut v = KVVec::new(); +/// v.push(1, GFP_KERNEL)?; +/// assert_eq!(&v, &[1]); +/// +/// # Ok::<(), Error>(()) +/// ``` +pub type KVVec = Vec; + +// SAFETY: `Vec` is `Send` if `T` is `Send` because `Vec` owns its elements. +unsafe impl Send for Vec +where + T: Send, + A: Allocator, +{ +} + +// SAFETY: `Vec` is `Sync` if `T` is `Sync` because `Vec` owns its elements. +unsafe impl Sync for Vec +where + T: Sync, + A: Allocator, +{ +} + +impl Vec +where + A: Allocator, +{ + #[inline] + const fn is_zst() -> bool { + core::mem::size_of::() == 0 + } + + /// Returns the number of elements that can be stored within the vector without allocating + /// additional memory. + pub fn capacity(&self) -> usize { + if const { Self::is_zst() } { + usize::MAX + } else { + self.layout.len() + } + } + + /// Returns the number of elements stored within the vector. + #[inline] + pub fn len(&self) -> usize { + self.len + } + + /// Forcefully sets `self.len` to `new_len`. + /// + /// # Safety + /// + /// - `new_len` must be less than or equal to [`Self::capacity`]. + /// - If `new_len` is greater than `self.len`, all elements within the interval + /// [`self.len`,`new_len`) must be initialized. + #[inline] + pub unsafe fn set_len(&mut self, new_len: usize) { + debug_assert!(new_len <= self.capacity()); + self.len = new_len; + } + + /// Returns a slice of the entire vector. + #[inline] + pub fn as_slice(&self) -> &[T] { + self + } + + /// Returns a mutable slice of the entire vector. + #[inline] + pub fn as_mut_slice(&mut self) -> &mut [T] { + self + } + + /// Returns a mutable raw pointer to the vector's backing buffer, or, if `T` is a ZST, a + /// dangling raw pointer. + #[inline] + pub fn as_mut_ptr(&mut self) -> *mut T { + self.ptr.as_ptr() + } + + /// Returns a raw pointer to the vector's backing buffer, or, if `T` is a ZST, a dangling raw + /// pointer. + #[inline] + pub fn as_ptr(&self) -> *const T { + self.ptr.as_ptr() + } + + /// Returns `true` if the vector contains no elements, `false` otherwise. + /// + /// # Examples + /// + /// ``` + /// let mut v = KVec::new(); + /// assert!(v.is_empty()); + /// + /// v.push(1, GFP_KERNEL); + /// assert!(!v.is_empty()); + /// ``` + #[inline] + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Creates a new, empty `Vec`. + /// + /// This method does not allocate by itself. + #[inline] + pub const fn new() -> Self { + // INVARIANT: Since this is a new, empty `Vec` with no backing memory yet, + // - `ptr` is a properly aligned dangling pointer for type `T`, + // - `layout` is an empty `ArrayLayout` (zero capacity) + // - `len` is zero, since no elements can be or have been stored, + // - `A` is always valid. + Self { + ptr: NonNull::dangling(), + layout: ArrayLayout::empty(), + len: 0, + _p: PhantomData::, + } + } + + /// Returns a slice of `MaybeUninit` for the remaining spare capacity of the vector. + pub fn spare_capacity_mut(&mut self) -> &mut [MaybeUninit] { + // SAFETY: + // - `self.len` is smaller than `self.capacity` and hence, the resulting pointer is + // guaranteed to be part of the same allocated object. + // - `self.len` can not overflow `isize`. + let ptr = unsafe { self.as_mut_ptr().add(self.len) } as *mut MaybeUninit; + + // SAFETY: The memory between `self.len` and `self.capacity` is guaranteed to be allocated + // and valid, but uninitialized. + unsafe { slice::from_raw_parts_mut(ptr, self.capacity() - self.len) } + } + + /// Appends an element to the back of the [`Vec`] instance. + /// + /// # Examples + /// + /// ``` + /// let mut v = KVec::new(); + /// v.push(1, GFP_KERNEL)?; + /// assert_eq!(&v, &[1]); + /// + /// v.push(2, GFP_KERNEL)?; + /// assert_eq!(&v, &[1, 2]); + /// # Ok::<(), Error>(()) + /// ``` + pub fn push(&mut self, v: T, flags: Flags) -> Result<(), AllocError> { + self.reserve(1, flags)?; + + // SAFETY: + // - `self.len` is smaller than `self.capacity` and hence, the resulting pointer is + // guaranteed to be part of the same allocated object. + // - `self.len` can not overflow `isize`. + let ptr = unsafe { self.as_mut_ptr().add(self.len) }; + + // SAFETY: + // - `ptr` is properly aligned and valid for writes. + unsafe { core::ptr::write(ptr, v) }; + + // SAFETY: We just initialised the first spare entry, so it is safe to increase the length + // by 1. We also know that the new length is <= capacity because of the previous call to + // `reserve` above. + unsafe { self.set_len(self.len() + 1) }; + Ok(()) + } + + /// Creates a new [`Vec`] instance with at least the given capacity. + /// + /// # Examples + /// + /// ``` + /// let v = KVec::::with_capacity(20, GFP_KERNEL)?; + /// + /// assert!(v.capacity() >= 20); + /// # Ok::<(), Error>(()) + /// ``` + pub fn with_capacity(capacity: usize, flags: Flags) -> Result { + let mut v = Vec::new(); + + v.reserve(capacity, flags)?; + + Ok(v) + } + + /// Creates a `Vec` from a pointer, a length and a capacity using the allocator `A`. + /// + /// # Examples + /// + /// ``` + /// let mut v = kernel::kvec![1, 2, 3]?; + /// v.reserve(1, GFP_KERNEL)?; + /// + /// let (mut ptr, mut len, cap) = v.into_raw_parts(); + /// + /// // SAFETY: We've just reserved memory for another element. + /// unsafe { ptr.add(len).write(4) }; + /// len += 1; + /// + /// // SAFETY: We only wrote an additional element at the end of the `KVec`'s buffer and + /// // correspondingly increased the length of the `KVec` by one. Otherwise, we construct it + /// // from the exact same raw parts. + /// let v = unsafe { KVec::from_raw_parts(ptr, len, cap) }; + /// + /// assert_eq!(v, [1, 2, 3, 4]); + /// + /// # Ok::<(), Error>(()) + /// ``` + /// + /// # Safety + /// + /// If `T` is a ZST: + /// + /// - `ptr` must be a dangling, well aligned pointer. + /// + /// Otherwise: + /// + /// - `ptr` must have been allocated with the allocator `A`. + /// - `ptr` must satisfy or exceed the alignment requirements of `T`. + /// - `ptr` must point to memory with a size of at least `size_of::() * capacity` bytes. + /// - The allocated size in bytes must not be larger than `isize::MAX`. + /// - `length` must be less than or equal to `capacity`. + /// - The first `length` elements must be initialized values of type `T`. + /// + /// It is also valid to create an empty `Vec` passing a dangling pointer for `ptr` and zero for + /// `cap` and `len`. + pub unsafe fn from_raw_parts(ptr: *mut T, length: usize, capacity: usize) -> Self { + let layout = if Self::is_zst() { + ArrayLayout::empty() + } else { + // SAFETY: By the safety requirements of this function, `capacity * size_of::()` is + // smaller than `isize::MAX`. + unsafe { ArrayLayout::new_unchecked(capacity) } + }; + + // INVARIANT: For ZSTs, we store an empty `ArrayLayout`, all other type invariants are + // covered by the safety requirements of this function. + Self { + // SAFETY: By the safety requirements, `ptr` is either dangling or pointing to a valid + // memory allocation, allocated with `A`. + ptr: unsafe { NonNull::new_unchecked(ptr) }, + layout, + len: length, + _p: PhantomData::, + } + } + + /// Consumes the `Vec` and returns its raw components `pointer`, `length` and `capacity`. + /// + /// This will not run the destructor of the contained elements and for non-ZSTs the allocation + /// will stay alive indefinitely. Use [`Vec::from_raw_parts`] to recover the [`Vec`], drop the + /// elements and free the allocation, if any. + pub fn into_raw_parts(self) -> (*mut T, usize, usize) { + let mut me = ManuallyDrop::new(self); + let len = me.len(); + let capacity = me.capacity(); + let ptr = me.as_mut_ptr(); + (ptr, len, capacity) + } + + /// Ensures that the capacity exceeds the length by at least `additional` elements. + /// + /// # Examples + /// + /// ``` + /// let mut v = KVec::new(); + /// v.push(1, GFP_KERNEL)?; + /// + /// v.reserve(10, GFP_KERNEL)?; + /// let cap = v.capacity(); + /// assert!(cap >= 10); + /// + /// v.reserve(10, GFP_KERNEL)?; + /// let new_cap = v.capacity(); + /// assert_eq!(new_cap, cap); + /// + /// # Ok::<(), Error>(()) + /// ``` + pub fn reserve(&mut self, additional: usize, flags: Flags) -> Result<(), AllocError> { + let len = self.len(); + let cap = self.capacity(); + + if cap - len >= additional { + return Ok(()); + } + + if Self::is_zst() { + // The capacity is already `usize::MAX` for ZSTs, we can't go higher. + return Err(AllocError); + } + + // We know that `cap <= isize::MAX` because of the type invariants of `Self`. So the + // multiplication by two won't overflow. + let new_cap = core::cmp::max(cap * 2, len.checked_add(additional).ok_or(AllocError)?); + let layout = ArrayLayout::new(new_cap).map_err(|_| AllocError)?; + + // SAFETY: + // - `ptr` is valid because it's either `None` or comes from a previous call to + // `A::realloc`. + // - `self.layout` matches the `ArrayLayout` of the preceding allocation. + let ptr = unsafe { + A::realloc( + Some(self.ptr.cast()), + layout.into(), + self.layout.into(), + flags, + )? + }; + + // INVARIANT: + // - `layout` is some `ArrayLayout::`, + // - `ptr` has been created by `A::realloc` from `layout`. + self.ptr = ptr.cast(); + self.layout = layout; + + Ok(()) + } +} + +impl Vec { + /// Extend the vector by `n` clones of `value`. + pub fn extend_with(&mut self, n: usize, value: T, flags: Flags) -> Result<(), AllocError> { + if n == 0 { + return Ok(()); + } + + self.reserve(n, flags)?; + + let spare = self.spare_capacity_mut(); + + for item in spare.iter_mut().take(n - 1) { + item.write(value.clone()); + } + + // We can write the last element directly without cloning needlessly. + spare[n - 1].write(value); + + // SAFETY: + // - `self.len() + n < self.capacity()` due to the call to reserve above, + // - the loop and the line above initialized the next `n` elements. + unsafe { self.set_len(self.len() + n) }; + + Ok(()) + } + + /// Pushes clones of the elements of slice into the [`Vec`] instance. + /// + /// # Examples + /// + /// ``` + /// let mut v = KVec::new(); + /// v.push(1, GFP_KERNEL)?; + /// + /// v.extend_from_slice(&[20, 30, 40], GFP_KERNEL)?; + /// assert_eq!(&v, &[1, 20, 30, 40]); + /// + /// v.extend_from_slice(&[50, 60], GFP_KERNEL)?; + /// assert_eq!(&v, &[1, 20, 30, 40, 50, 60]); + /// # Ok::<(), Error>(()) + /// ``` + pub fn extend_from_slice(&mut self, other: &[T], flags: Flags) -> Result<(), AllocError> { + self.reserve(other.len(), flags)?; + for (slot, item) in core::iter::zip(self.spare_capacity_mut(), other) { + slot.write(item.clone()); + } + + // SAFETY: + // - `other.len()` spare entries have just been initialized, so it is safe to increase + // the length by the same number. + // - `self.len() + other.len() <= self.capacity()` is guaranteed by the preceding `reserve` + // call. + unsafe { self.set_len(self.len() + other.len()) }; + Ok(()) + } + + /// Create a new `Vec` and extend it by `n` clones of `value`. + pub fn from_elem(value: T, n: usize, flags: Flags) -> Result { + let mut v = Self::with_capacity(n, flags)?; + + v.extend_with(n, value, flags)?; + + Ok(v) + } +} + +impl Drop for Vec +where + A: Allocator, +{ + fn drop(&mut self) { + // SAFETY: `self.as_mut_ptr` is guaranteed to be valid by the type invariant. + unsafe { + ptr::drop_in_place(core::ptr::slice_from_raw_parts_mut( + self.as_mut_ptr(), + self.len, + )) + }; + + // SAFETY: + // - `self.ptr` was previously allocated with `A`. + // - `self.layout` matches the `ArrayLayout` of the preceding allocation. + unsafe { A::free(self.ptr.cast(), self.layout.into()) }; + } +} + +impl From> for Vec +where + A: Allocator, +{ + fn from(b: Box<[T; N], A>) -> Vec { + let len = b.len(); + let ptr = Box::into_raw(b); + + // SAFETY: + // - `b` has been allocated with `A`, + // - `ptr` fulfills the alignment requirements for `T`, + // - `ptr` points to memory with at least a size of `size_of::() * len`, + // - all elements within `b` are initialized values of `T`, + // - `len` does not exceed `isize::MAX`. + unsafe { Vec::from_raw_parts(ptr as _, len, len) } + } +} + +impl Default for KVec { + #[inline] + fn default() -> Self { + Self::new() + } +} + +impl fmt::Debug for Vec { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(&**self, f) + } +} + +impl Deref for Vec +where + A: Allocator, +{ + type Target = [T]; + + #[inline] + fn deref(&self) -> &[T] { + // SAFETY: The memory behind `self.as_ptr()` is guaranteed to contain `self.len` + // initialized elements of type `T`. + unsafe { slice::from_raw_parts(self.as_ptr(), self.len) } + } +} + +impl DerefMut for Vec +where + A: Allocator, +{ + #[inline] + fn deref_mut(&mut self) -> &mut [T] { + // SAFETY: The memory behind `self.as_ptr()` is guaranteed to contain `self.len` + // initialized elements of type `T`. + unsafe { slice::from_raw_parts_mut(self.as_mut_ptr(), self.len) } + } +} + +impl Eq for Vec where A: Allocator {} + +impl, A> Index for Vec +where + A: Allocator, +{ + type Output = I::Output; + + #[inline] + fn index(&self, index: I) -> &Self::Output { + Index::index(&**self, index) + } +} + +impl, A> IndexMut for Vec +where + A: Allocator, +{ + #[inline] + fn index_mut(&mut self, index: I) -> &mut Self::Output { + IndexMut::index_mut(&mut **self, index) + } +} + +macro_rules! impl_slice_eq { + ($([$($vars:tt)*] $lhs:ty, $rhs:ty,)*) => { + $( + impl PartialEq<$rhs> for $lhs + where + T: PartialEq, + { + #[inline] + fn eq(&self, other: &$rhs) -> bool { self[..] == other[..] } + } + )* + } +} + +impl_slice_eq! { + [A1: Allocator, A2: Allocator] Vec, Vec, + [A: Allocator] Vec, &[U], + [A: Allocator] Vec, &mut [U], + [A: Allocator] &[T], Vec, + [A: Allocator] &mut [T], Vec, + [A: Allocator] Vec, [U], + [A: Allocator] [T], Vec, + [A: Allocator, const N: usize] Vec, [U; N], + [A: Allocator, const N: usize] Vec, &[U; N], +} + +impl<'a, T, A> IntoIterator for &'a Vec +where + A: Allocator, +{ + type Item = &'a T; + type IntoIter = slice::Iter<'a, T>; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl<'a, T, A: Allocator> IntoIterator for &'a mut Vec +where + A: Allocator, +{ + type Item = &'a mut T; + type IntoIter = slice::IterMut<'a, T>; + + fn into_iter(self) -> Self::IntoIter { + self.iter_mut() + } +} + +/// An [`Iterator`] implementation for [`Vec`] that moves elements out of a vector. +/// +/// This structure is created by the [`Vec::into_iter`] method on [`Vec`] (provided by the +/// [`IntoIterator`] trait). +/// +/// # Examples +/// +/// ``` +/// let v = kernel::kvec![0, 1, 2]?; +/// let iter = v.into_iter(); +/// +/// # Ok::<(), Error>(()) +/// ``` +pub struct IntoIter { + ptr: *mut T, + buf: NonNull, + len: usize, + layout: ArrayLayout, + _p: PhantomData, +} + +impl IntoIter +where + A: Allocator, +{ + fn into_raw_parts(self) -> (*mut T, NonNull, usize, usize) { + let me = ManuallyDrop::new(self); + let ptr = me.ptr; + let buf = me.buf; + let len = me.len; + let cap = me.layout.len(); + (ptr, buf, len, cap) + } + + /// Same as `Iterator::collect` but specialized for `Vec`'s `IntoIter`. + /// + /// # Examples + /// + /// ``` + /// let v = kernel::kvec![1, 2, 3]?; + /// let mut it = v.into_iter(); + /// + /// assert_eq!(it.next(), Some(1)); + /// + /// let v = it.collect(GFP_KERNEL); + /// assert_eq!(v, [2, 3]); + /// + /// # Ok::<(), Error>(()) + /// ``` + /// + /// # Implementation details + /// + /// Currently, we can't implement `FromIterator`. There are a couple of issues with this trait + /// in the kernel, namely: + /// + /// - Rust's specialization feature is unstable. This prevents us to optimize for the special + /// case where `I::IntoIter` equals `Vec`'s `IntoIter` type. + /// - We also can't use `I::IntoIter`'s type ID either to work around this, since `FromIterator` + /// doesn't require this type to be `'static`. + /// - `FromIterator::from_iter` does return `Self` instead of `Result`, hence + /// we can't properly handle allocation failures. + /// - Neither `Iterator::collect` nor `FromIterator::from_iter` can handle additional allocation + /// flags. + /// + /// Instead, provide `IntoIter::collect`, such that we can at least convert a `IntoIter` into a + /// `Vec` again. + /// + /// Note that `IntoIter::collect` doesn't require `Flags`, since it re-uses the existing backing + /// buffer. However, this backing buffer may be shrunk to the actual count of elements. + pub fn collect(self, flags: Flags) -> Vec { + let old_layout = self.layout; + let (mut ptr, buf, len, mut cap) = self.into_raw_parts(); + let has_advanced = ptr != buf.as_ptr(); + + if has_advanced { + // Copy the contents we have advanced to at the beginning of the buffer. + // + // SAFETY: + // - `ptr` is valid for reads of `len * size_of::()` bytes, + // - `buf.as_ptr()` is valid for writes of `len * size_of::()` bytes, + // - `ptr` and `buf.as_ptr()` are not be subject to aliasing restrictions relative to + // each other, + // - both `ptr` and `buf.ptr()` are properly aligned. + unsafe { ptr::copy(ptr, buf.as_ptr(), len) }; + ptr = buf.as_ptr(); + + // SAFETY: `len` is guaranteed to be smaller than `self.layout.len()`. + let layout = unsafe { ArrayLayout::::new_unchecked(len) }; + + // SAFETY: `buf` points to the start of the backing buffer and `len` is guaranteed to be + // smaller than `cap`. Depending on `alloc` this operation may shrink the buffer or leaves + // it as it is. + ptr = match unsafe { + A::realloc(Some(buf.cast()), layout.into(), old_layout.into(), flags) + } { + // If we fail to shrink, which likely can't even happen, continue with the existing + // buffer. + Err(_) => ptr, + Ok(ptr) => { + cap = len; + ptr.as_ptr().cast() + } + }; + } + + // SAFETY: If the iterator has been advanced, the advanced elements have been copied to + // the beginning of the buffer and `len` has been adjusted accordingly. + // + // - `ptr` is guaranteed to point to the start of the backing buffer. + // - `cap` is either the original capacity or, after shrinking the buffer, equal to `len`. + // - `alloc` is guaranteed to be unchanged since `into_iter` has been called on the original + // `Vec`. + unsafe { Vec::from_raw_parts(ptr, len, cap) } + } +} + +impl Iterator for IntoIter +where + A: Allocator, +{ + type Item = T; + + /// # Examples + /// + /// ``` + /// let v = kernel::kvec![1, 2, 3]?; + /// let mut it = v.into_iter(); + /// + /// assert_eq!(it.next(), Some(1)); + /// assert_eq!(it.next(), Some(2)); + /// assert_eq!(it.next(), Some(3)); + /// assert_eq!(it.next(), None); + /// + /// # Ok::<(), Error>(()) + /// ``` + fn next(&mut self) -> Option { + if self.len == 0 { + return None; + } + + let current = self.ptr; + + // SAFETY: We can't overflow; decreasing `self.len` by one every time we advance `self.ptr` + // by one guarantees that. + unsafe { self.ptr = self.ptr.add(1) }; + + self.len -= 1; + + // SAFETY: `current` is guaranteed to point at a valid element within the buffer. + Some(unsafe { current.read() }) + } + + /// # Examples + /// + /// ``` + /// let v: KVec = kernel::kvec![1, 2, 3]?; + /// let mut iter = v.into_iter(); + /// let size = iter.size_hint().0; + /// + /// iter.next(); + /// assert_eq!(iter.size_hint().0, size - 1); + /// + /// iter.next(); + /// assert_eq!(iter.size_hint().0, size - 2); + /// + /// iter.next(); + /// assert_eq!(iter.size_hint().0, size - 3); + /// + /// # Ok::<(), Error>(()) + /// ``` + fn size_hint(&self) -> (usize, Option) { + (self.len, Some(self.len)) + } +} + +impl Drop for IntoIter +where + A: Allocator, +{ + fn drop(&mut self) { + // SAFETY: `self.ptr` is guaranteed to be valid by the type invariant. + unsafe { ptr::drop_in_place(ptr::slice_from_raw_parts_mut(self.ptr, self.len)) }; + + // SAFETY: + // - `self.buf` was previously allocated with `A`. + // - `self.layout` matches the `ArrayLayout` of the preceding allocation. + unsafe { A::free(self.buf.cast(), self.layout.into()) }; + } +} + +impl IntoIterator for Vec +where + A: Allocator, +{ + type Item = T; + type IntoIter = IntoIter; + + /// Consumes the `Vec` and creates an `Iterator`, which moves each value out of the + /// vector (from start to end). + /// + /// # Examples + /// + /// ``` + /// let v = kernel::kvec![1, 2]?; + /// let mut v_iter = v.into_iter(); + /// + /// let first_element: Option = v_iter.next(); + /// + /// assert_eq!(first_element, Some(1)); + /// assert_eq!(v_iter.next(), Some(2)); + /// assert_eq!(v_iter.next(), None); + /// + /// # Ok::<(), Error>(()) + /// ``` + /// + /// ``` + /// let v = kernel::kvec![]; + /// let mut v_iter = v.into_iter(); + /// + /// let first_element: Option = v_iter.next(); + /// + /// assert_eq!(first_element, None); + /// + /// # Ok::<(), Error>(()) + /// ``` + #[inline] + fn into_iter(self) -> Self::IntoIter { + let buf = self.ptr; + let layout = self.layout; + let (ptr, len, _) = self.into_raw_parts(); + + IntoIter { + ptr, + buf, + len, + layout, + _p: PhantomData::, + } + } +} diff --git a/rust/kernel/alloc/layout.rs b/rust/kernel/alloc/layout.rs new file mode 100644 index 0000000000000..4b3cd7fdc816c --- /dev/null +++ b/rust/kernel/alloc/layout.rs @@ -0,0 +1,91 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! Memory layout. +//! +//! Custom layout types extending or improving [`Layout`]. + +use core::{alloc::Layout, marker::PhantomData}; + +/// Error when constructing an [`ArrayLayout`]. +pub struct LayoutError; + +/// A layout for an array `[T; n]`. +/// +/// # Invariants +/// +/// - `len * size_of::() <= isize::MAX`. +pub struct ArrayLayout { + len: usize, + _phantom: PhantomData T>, +} + +impl Clone for ArrayLayout { + fn clone(&self) -> Self { + *self + } +} +impl Copy for ArrayLayout {} + +const ISIZE_MAX: usize = isize::MAX as usize; + +impl ArrayLayout { + /// Creates a new layout for `[T; 0]`. + pub const fn empty() -> Self { + // INVARIANT: `0 * size_of::() <= isize::MAX`. + Self { + len: 0, + _phantom: PhantomData, + } + } + + /// Creates a new layout for `[T; len]`. + /// + /// # Errors + /// + /// When `len * size_of::()` overflows or when `len * size_of::() > isize::MAX`. + pub const fn new(len: usize) -> Result { + match len.checked_mul(core::mem::size_of::()) { + Some(size) if size <= ISIZE_MAX => { + // INVARIANT: We checked above that `len * size_of::() <= isize::MAX`. + Ok(Self { + len, + _phantom: PhantomData, + }) + } + _ => Err(LayoutError), + } + } + + /// Creates a new layout for `[T; len]`. + /// + /// # Safety + /// + /// `len` must be a value, for which `len * size_of::() <= isize::MAX` is true. + pub unsafe fn new_unchecked(len: usize) -> Self { + // INVARIANT: By the safety requirements of this function + // `len * size_of::() <= isize::MAX`. + Self { + len, + _phantom: PhantomData, + } + } + + /// Returns the number of array elements represented by this layout. + pub const fn len(&self) -> usize { + self.len + } + + /// Returns `true` when no array elements are represented by this layout. + pub const fn is_empty(&self) -> bool { + self.len == 0 + } +} + +impl From> for Layout { + fn from(value: ArrayLayout) -> Self { + let res = Layout::array::(value.len); + // SAFETY: By the type invariant of `ArrayLayout` we have + // `len * size_of::() <= isize::MAX` and thus the result must be `Ok`. + unsafe { res.unwrap_unchecked() } + } +} diff --git a/rust/kernel/alloc/vec_ext.rs b/rust/kernel/alloc/vec_ext.rs deleted file mode 100644 index 1297a4be32e8c..0000000000000 --- a/rust/kernel/alloc/vec_ext.rs +++ /dev/null @@ -1,185 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -//! Extensions to [`Vec`] for fallible allocations. - -use super::{AllocError, Flags}; -use alloc::vec::Vec; - -/// Extensions to [`Vec`]. -pub trait VecExt: Sized { - /// Creates a new [`Vec`] instance with at least the given capacity. - /// - /// # Examples - /// - /// ``` - /// let v = Vec::::with_capacity(20, GFP_KERNEL)?; - /// - /// assert!(v.capacity() >= 20); - /// # Ok::<(), Error>(()) - /// ``` - fn with_capacity(capacity: usize, flags: Flags) -> Result; - - /// Appends an element to the back of the [`Vec`] instance. - /// - /// # Examples - /// - /// ``` - /// let mut v = Vec::new(); - /// v.push(1, GFP_KERNEL)?; - /// assert_eq!(&v, &[1]); - /// - /// v.push(2, GFP_KERNEL)?; - /// assert_eq!(&v, &[1, 2]); - /// # Ok::<(), Error>(()) - /// ``` - fn push(&mut self, v: T, flags: Flags) -> Result<(), AllocError>; - - /// Pushes clones of the elements of slice into the [`Vec`] instance. - /// - /// # Examples - /// - /// ``` - /// let mut v = Vec::new(); - /// v.push(1, GFP_KERNEL)?; - /// - /// v.extend_from_slice(&[20, 30, 40], GFP_KERNEL)?; - /// assert_eq!(&v, &[1, 20, 30, 40]); - /// - /// v.extend_from_slice(&[50, 60], GFP_KERNEL)?; - /// assert_eq!(&v, &[1, 20, 30, 40, 50, 60]); - /// # Ok::<(), Error>(()) - /// ``` - fn extend_from_slice(&mut self, other: &[T], flags: Flags) -> Result<(), AllocError> - where - T: Clone; - - /// Ensures that the capacity exceeds the length by at least `additional` elements. - /// - /// # Examples - /// - /// ``` - /// let mut v = Vec::new(); - /// v.push(1, GFP_KERNEL)?; - /// - /// v.reserve(10, GFP_KERNEL)?; - /// let cap = v.capacity(); - /// assert!(cap >= 10); - /// - /// v.reserve(10, GFP_KERNEL)?; - /// let new_cap = v.capacity(); - /// assert_eq!(new_cap, cap); - /// - /// # Ok::<(), Error>(()) - /// ``` - fn reserve(&mut self, additional: usize, flags: Flags) -> Result<(), AllocError>; -} - -impl VecExt for Vec { - fn with_capacity(capacity: usize, flags: Flags) -> Result { - let mut v = Vec::new(); - >::reserve(&mut v, capacity, flags)?; - Ok(v) - } - - fn push(&mut self, v: T, flags: Flags) -> Result<(), AllocError> { - >::reserve(self, 1, flags)?; - let s = self.spare_capacity_mut(); - s[0].write(v); - - // SAFETY: We just initialised the first spare entry, so it is safe to increase the length - // by 1. We also know that the new length is <= capacity because of the previous call to - // `reserve` above. - unsafe { self.set_len(self.len() + 1) }; - Ok(()) - } - - fn extend_from_slice(&mut self, other: &[T], flags: Flags) -> Result<(), AllocError> - where - T: Clone, - { - >::reserve(self, other.len(), flags)?; - for (slot, item) in core::iter::zip(self.spare_capacity_mut(), other) { - slot.write(item.clone()); - } - - // SAFETY: We just initialised the `other.len()` spare entries, so it is safe to increase - // the length by the same amount. We also know that the new length is <= capacity because - // of the previous call to `reserve` above. - unsafe { self.set_len(self.len() + other.len()) }; - Ok(()) - } - - #[cfg(any(test, testlib))] - fn reserve(&mut self, additional: usize, _flags: Flags) -> Result<(), AllocError> { - Vec::reserve(self, additional); - Ok(()) - } - - #[cfg(not(any(test, testlib)))] - fn reserve(&mut self, additional: usize, flags: Flags) -> Result<(), AllocError> { - let len = self.len(); - let cap = self.capacity(); - - if cap - len >= additional { - return Ok(()); - } - - if core::mem::size_of::() == 0 { - // The capacity is already `usize::MAX` for SZTs, we can't go higher. - return Err(AllocError); - } - - // We know cap is <= `isize::MAX` because `Layout::array` fails if the resulting byte size - // is greater than `isize::MAX`. So the multiplication by two won't overflow. - let new_cap = core::cmp::max(cap * 2, len.checked_add(additional).ok_or(AllocError)?); - let layout = core::alloc::Layout::array::(new_cap).map_err(|_| AllocError)?; - - let (old_ptr, len, cap) = destructure(self); - - // We need to make sure that `ptr` is either NULL or comes from a previous call to - // `krealloc_aligned`. A `Vec`'s `ptr` value is not guaranteed to be NULL and might be - // dangling after being created with `Vec::new`. Instead, we can rely on `Vec`'s capacity - // to be zero if no memory has been allocated yet. - let ptr = if cap == 0 { - core::ptr::null_mut() - } else { - old_ptr - }; - - // SAFETY: `ptr` is valid because it's either NULL or comes from a previous call to - // `krealloc_aligned`. We also verified that the type is not a ZST. - let new_ptr = unsafe { super::allocator::krealloc_aligned(ptr.cast(), layout, flags) }; - if new_ptr.is_null() { - // SAFETY: We are just rebuilding the existing `Vec` with no changes. - unsafe { rebuild(self, old_ptr, len, cap) }; - Err(AllocError) - } else { - // SAFETY: `ptr` has been reallocated with the layout for `new_cap` elements. New cap - // is greater than `cap`, so it continues to be >= `len`. - unsafe { rebuild(self, new_ptr.cast::(), len, new_cap) }; - Ok(()) - } - } -} - -#[cfg(not(any(test, testlib)))] -fn destructure(v: &mut Vec) -> (*mut T, usize, usize) { - let mut tmp = Vec::new(); - core::mem::swap(&mut tmp, v); - let mut tmp = core::mem::ManuallyDrop::new(tmp); - let len = tmp.len(); - let cap = tmp.capacity(); - (tmp.as_mut_ptr(), len, cap) -} - -/// Rebuilds a `Vec` from a pointer, length, and capacity. -/// -/// # Safety -/// -/// The same as [`Vec::from_raw_parts`]. -#[cfg(not(any(test, testlib)))] -unsafe fn rebuild(v: &mut Vec, ptr: *mut T, len: usize, cap: usize) { - // SAFETY: The safety requirements from this function satisfy those of `from_raw_parts`. - let mut tmp = unsafe { Vec::from_raw_parts(ptr, len, cap) }; - core::mem::swap(&mut tmp, v); -} diff --git a/rust/kernel/block/mq/gen_disk.rs b/rust/kernel/block/mq/gen_disk.rs index 708125dce96a9..c6df153ebb886 100644 --- a/rust/kernel/block/mq/gen_disk.rs +++ b/rust/kernel/block/mq/gen_disk.rs @@ -174,9 +174,9 @@ impl GenDiskBuilder { /// /// # Invariants /// -/// - `gendisk` must always point to an initialized and valid `struct gendisk`. -/// - `gendisk` was added to the VFS through a call to -/// `bindings::device_add_disk`. +/// - `gendisk` must always point to an initialized and valid `struct gendisk`. +/// - `gendisk` was added to the VFS through a call to +/// `bindings::device_add_disk`. pub struct GenDisk { _tagset: Arc>, gendisk: *mut bindings::gendisk, diff --git a/rust/kernel/block/mq/operations.rs b/rust/kernel/block/mq/operations.rs index 9ba7fdfeb4b22..c8646d0d98669 100644 --- a/rust/kernel/block/mq/operations.rs +++ b/rust/kernel/block/mq/operations.rs @@ -131,7 +131,7 @@ impl OperationsVTable { unsafe extern "C" fn poll_callback( _hctx: *mut bindings::blk_mq_hw_ctx, _iob: *mut bindings::io_comp_batch, - ) -> core::ffi::c_int { + ) -> crate::ffi::c_int { T::poll().into() } @@ -145,9 +145,9 @@ impl OperationsVTable { /// for the same context. unsafe extern "C" fn init_hctx_callback( _hctx: *mut bindings::blk_mq_hw_ctx, - _tagset_data: *mut core::ffi::c_void, - _hctx_idx: core::ffi::c_uint, - ) -> core::ffi::c_int { + _tagset_data: *mut crate::ffi::c_void, + _hctx_idx: crate::ffi::c_uint, + ) -> crate::ffi::c_int { from_result(|| Ok(0)) } @@ -159,7 +159,7 @@ impl OperationsVTable { /// This function may only be called by blk-mq C infrastructure. unsafe extern "C" fn exit_hctx_callback( _hctx: *mut bindings::blk_mq_hw_ctx, - _hctx_idx: core::ffi::c_uint, + _hctx_idx: crate::ffi::c_uint, ) { } @@ -176,9 +176,9 @@ impl OperationsVTable { unsafe extern "C" fn init_request_callback( _set: *mut bindings::blk_mq_tag_set, rq: *mut bindings::request, - _hctx_idx: core::ffi::c_uint, - _numa_node: core::ffi::c_uint, - ) -> core::ffi::c_int { + _hctx_idx: crate::ffi::c_uint, + _numa_node: crate::ffi::c_uint, + ) -> crate::ffi::c_int { from_result(|| { // SAFETY: By the safety requirements of this function, `rq` points // to a valid allocation. @@ -203,7 +203,7 @@ impl OperationsVTable { unsafe extern "C" fn exit_request_callback( _set: *mut bindings::blk_mq_tag_set, rq: *mut bindings::request, - _hctx_idx: core::ffi::c_uint, + _hctx_idx: crate::ffi::c_uint, ) { // SAFETY: The tagset invariants guarantee that all requests are allocated with extra memory // for the request data. diff --git a/rust/kernel/block/mq/raw_writer.rs b/rust/kernel/block/mq/raw_writer.rs index 9222465d670bf..7e2159e4f6a6f 100644 --- a/rust/kernel/block/mq/raw_writer.rs +++ b/rust/kernel/block/mq/raw_writer.rs @@ -25,7 +25,7 @@ impl<'a> RawWriter<'a> { } pub(crate) fn from_array( - a: &'a mut [core::ffi::c_char; N], + a: &'a mut [crate::ffi::c_char; N], ) -> Result> { Self::new( // SAFETY: the buffer of `a` is valid for read and write as `u8` for diff --git a/rust/kernel/block/mq/tag_set.rs b/rust/kernel/block/mq/tag_set.rs index f9a1ca655a35b..d7f175a05d992 100644 --- a/rust/kernel/block/mq/tag_set.rs +++ b/rust/kernel/block/mq/tag_set.rs @@ -53,7 +53,7 @@ impl TagSet { queue_depth: num_tags, cmd_size, flags: bindings::BLK_MQ_F_SHOULD_MERGE, - driver_data: core::ptr::null_mut::(), + driver_data: core::ptr::null_mut::(), nr_maps: num_maps, ..tag_set } diff --git a/rust/kernel/error.rs b/rust/kernel/error.rs index 6f1587a2524e8..4911b294bfe66 100644 --- a/rust/kernel/error.rs +++ b/rust/kernel/error.rs @@ -6,9 +6,10 @@ use crate::{alloc::AllocError, str::CStr}; -use alloc::alloc::LayoutError; +use core::alloc::LayoutError; use core::fmt; +use core::num::NonZeroI32; use core::num::TryFromIntError; use core::str::Utf8Error; @@ -20,7 +21,11 @@ pub mod code { $( #[doc = $doc] )* - pub const $err: super::Error = super::Error(-(crate::bindings::$err as i32)); + pub const $err: super::Error = + match super::Error::try_from_errno(-(crate::bindings::$err as i32)) { + Some(err) => err, + None => panic!("Invalid errno in `declare_err!`"), + }; }; } @@ -88,18 +93,18 @@ pub mod code { /// /// The value is a valid `errno` (i.e. `>= -MAX_ERRNO && < 0`). #[derive(Clone, Copy, PartialEq, Eq)] -pub struct Error(core::ffi::c_int); +pub struct Error(NonZeroI32); impl Error { /// Creates an [`Error`] from a kernel error code. /// /// It is a bug to pass an out-of-range `errno`. `EINVAL` would /// be returned in such a case. - pub(crate) fn from_errno(errno: core::ffi::c_int) -> Error { + pub fn from_errno(errno: crate::ffi::c_int) -> Error { if errno < -(bindings::MAX_ERRNO as i32) || errno >= 0 { // TODO: Make it a `WARN_ONCE` once available. crate::pr_warn!( - "attempted to create `Error` with out of range `errno`: {}", + "attempted to create `Error` with out of range `errno`: {}\n", errno ); return code::EINVAL; @@ -107,7 +112,20 @@ impl Error { // INVARIANT: The check above ensures the type invariant // will hold. - Error(errno) + // SAFETY: `errno` is checked above to be in a valid range. + unsafe { Error::from_errno_unchecked(errno) } + } + + /// Creates an [`Error`] from a kernel error code. + /// + /// Returns [`None`] if `errno` is out-of-range. + const fn try_from_errno(errno: crate::ffi::c_int) -> Option { + if errno < -(bindings::MAX_ERRNO as i32) || errno >= 0 { + return None; + } + + // SAFETY: `errno` is checked above to be in a valid range. + Some(unsafe { Error::from_errno_unchecked(errno) }) } /// Creates an [`Error`] from a kernel error code. @@ -115,38 +133,35 @@ impl Error { /// # Safety /// /// `errno` must be within error code range (i.e. `>= -MAX_ERRNO && < 0`). - unsafe fn from_errno_unchecked(errno: core::ffi::c_int) -> Error { + const unsafe fn from_errno_unchecked(errno: crate::ffi::c_int) -> Error { // INVARIANT: The contract ensures the type invariant // will hold. - Error(errno) + // SAFETY: The caller guarantees `errno` is non-zero. + Error(unsafe { NonZeroI32::new_unchecked(errno) }) } /// Returns the kernel error code. - pub fn to_errno(self) -> core::ffi::c_int { - self.0 + pub fn to_errno(self) -> crate::ffi::c_int { + self.0.get() } #[cfg(CONFIG_BLOCK)] pub(crate) fn to_blk_status(self) -> bindings::blk_status_t { // SAFETY: `self.0` is a valid error due to its invariant. - unsafe { bindings::errno_to_blk_status(self.0) } + unsafe { bindings::errno_to_blk_status(self.0.get()) } } /// Returns the error encoded as a pointer. - #[allow(dead_code)] - pub(crate) fn to_ptr(self) -> *mut T { - #[cfg_attr(target_pointer_width = "32", allow(clippy::useless_conversion))] + pub fn to_ptr(self) -> *mut T { // SAFETY: `self.0` is a valid error due to its invariant. - unsafe { - bindings::ERR_PTR(self.0.into()) as *mut _ - } + unsafe { bindings::ERR_PTR(self.0.get() as _) as *mut _ } } /// Returns a string representing the error, if one exists. - #[cfg(not(testlib))] + #[cfg(not(any(test, testlib)))] pub fn name(&self) -> Option<&'static CStr> { // SAFETY: Just an FFI call, there are no extra safety requirements. - let ptr = unsafe { bindings::errname(-self.0) }; + let ptr = unsafe { bindings::errname(-self.0.get()) }; if ptr.is_null() { None } else { @@ -160,7 +175,7 @@ impl Error { /// When `testlib` is configured, this always returns `None` to avoid the dependency on a /// kernel function so that tests that use this (e.g., by calling [`Result::unwrap`]) can still /// run in userspace. - #[cfg(testlib)] + #[cfg(any(test, testlib))] pub fn name(&self) -> Option<&'static CStr> { None } @@ -171,9 +186,11 @@ impl fmt::Debug for Error { match self.name() { // Print out number if no name can be found. None => f.debug_tuple("Error").field(&-self.0).finish(), - // SAFETY: These strings are ASCII-only. Some(name) => f - .debug_tuple(unsafe { core::str::from_utf8_unchecked(name) }) + .debug_tuple( + // SAFETY: These strings are ASCII-only. + unsafe { core::str::from_utf8_unchecked(name) }, + ) .finish(), } } @@ -239,7 +256,7 @@ pub type Result = core::result::Result; /// Converts an integer as returned by a C kernel function to an error if it's negative, and /// `Ok(())` otherwise. -pub fn to_result(err: core::ffi::c_int) -> Result { +pub fn to_result(err: crate::ffi::c_int) -> Result { if err < 0 { Err(Error::from_errno(err)) } else { @@ -262,21 +279,21 @@ pub fn to_result(err: core::ffi::c_int) -> Result { /// fn devm_platform_ioremap_resource( /// pdev: &mut PlatformDevice, /// index: u32, -/// ) -> Result<*mut core::ffi::c_void> { +/// ) -> Result<*mut kernel::ffi::c_void> { /// // SAFETY: `pdev` points to a valid platform device. There are no safety requirements /// // on `index`. /// from_err_ptr(unsafe { bindings::devm_platform_ioremap_resource(pdev.to_ptr(), index) }) /// } /// ``` -// TODO: Remove `dead_code` marker once an in-kernel client is available. -#[allow(dead_code)] -pub(crate) fn from_err_ptr(ptr: *mut T) -> Result<*mut T> { - // CAST: Casting a pointer to `*const core::ffi::c_void` is always valid. - let const_ptr: *const core::ffi::c_void = ptr.cast(); +pub fn from_err_ptr(ptr: *mut T) -> Result<*mut T> { + // CAST: Casting a pointer to `*const crate::ffi::c_void` is always valid. + let const_ptr: *const crate::ffi::c_void = ptr.cast(); // SAFETY: The FFI function does not deref the pointer. if unsafe { bindings::IS_ERR(const_ptr) } { // SAFETY: The FFI function does not deref the pointer. let err = unsafe { bindings::PTR_ERR(const_ptr) }; + + #[allow(clippy::unnecessary_cast)] // CAST: If `IS_ERR()` returns `true`, // then `PTR_ERR()` is guaranteed to return a // negative value greater-or-equal to `-bindings::MAX_ERRNO`, @@ -286,8 +303,7 @@ pub(crate) fn from_err_ptr(ptr: *mut T) -> Result<*mut T> { // // SAFETY: `IS_ERR()` ensures `err` is a // negative value greater-or-equal to `-bindings::MAX_ERRNO`. - #[allow(clippy::unnecessary_cast)] - return Err(unsafe { Error::from_errno_unchecked(err as core::ffi::c_int) }); + return Err(unsafe { Error::from_errno_unchecked(err as crate::ffi::c_int) }); } Ok(ptr) } @@ -307,7 +323,7 @@ pub(crate) fn from_err_ptr(ptr: *mut T) -> Result<*mut T> { /// # use kernel::bindings; /// unsafe extern "C" fn probe_callback( /// pdev: *mut bindings::platform_device, -/// ) -> core::ffi::c_int { +/// ) -> kernel::ffi::c_int { /// from_result(|| { /// let ptr = devm_alloc(pdev)?; /// bindings::platform_set_drvdata(pdev, ptr); @@ -315,9 +331,7 @@ pub(crate) fn from_err_ptr(ptr: *mut T) -> Result<*mut T> { /// }) /// } /// ``` -// TODO: Remove `dead_code` marker once an in-kernel client is available. -#[allow(dead_code)] -pub(crate) fn from_result(f: F) -> T +pub fn from_result(f: F) -> T where T: From, F: FnOnce() -> Result, diff --git a/rust/kernel/firmware.rs b/rust/kernel/firmware.rs index 13a374a5cdb74..c5162fdc95ff0 100644 --- a/rust/kernel/firmware.rs +++ b/rust/kernel/firmware.rs @@ -12,7 +12,7 @@ use core::ptr::NonNull; /// One of the following: `bindings::request_firmware`, `bindings::firmware_request_nowarn`, /// `bindings::firmware_request_platform`, `bindings::request_firmware_direct`. struct FwFunc( - unsafe extern "C" fn(*mut *const bindings::firmware, *const i8, *mut bindings::device) -> i32, + unsafe extern "C" fn(*mut *const bindings::firmware, *const u8, *mut bindings::device) -> i32, ); impl FwFunc { diff --git a/rust/kernel/init.rs b/rust/kernel/init.rs index 789f80f71ca7e..90bfb5cb26cd7 100644 --- a/rust/kernel/init.rs +++ b/rust/kernel/init.rs @@ -13,7 +13,7 @@ //! To initialize a `struct` with an in-place constructor you will need two things: //! - an in-place constructor, //! - a memory location that can hold your `struct` (this can be the [stack], an [`Arc`], -//! [`UniqueArc`], [`Box`] or any other smart pointer that implements [`InPlaceInit`]). +//! [`UniqueArc`], [`KBox`] or any other smart pointer that implements [`InPlaceInit`]). //! //! To get an in-place constructor there are generally three options: //! - directly creating an in-place constructor using the [`pin_init!`] macro, @@ -35,7 +35,7 @@ //! that you need to write `<-` instead of `:` for fields that you want to initialize in-place. //! //! ```rust -//! # #![allow(clippy::disallowed_names)] +//! # #![expect(clippy::disallowed_names)] //! use kernel::sync::{new_mutex, Mutex}; //! # use core::pin::Pin; //! #[pin_data] @@ -55,7 +55,7 @@ //! (or just the stack) to actually initialize a `Foo`: //! //! ```rust -//! # #![allow(clippy::disallowed_names)] +//! # #![expect(clippy::disallowed_names)] //! # use kernel::sync::{new_mutex, Mutex}; //! # use core::pin::Pin; //! # #[pin_data] @@ -68,7 +68,7 @@ //! # a <- new_mutex!(42, "Foo::a"), //! # b: 24, //! # }); -//! let foo: Result>> = Box::pin_init(foo, GFP_KERNEL); +//! let foo: Result>> = KBox::pin_init(foo, GFP_KERNEL); //! ``` //! //! For more information see the [`pin_init!`] macro. @@ -87,20 +87,19 @@ //! To declare an init macro/function you just return an [`impl PinInit`]: //! //! ```rust -//! # #![allow(clippy::disallowed_names)] //! # use kernel::{sync::Mutex, new_mutex, init::PinInit, try_pin_init}; //! #[pin_data] //! struct DriverData { //! #[pin] //! status: Mutex, -//! buffer: Box<[u8; 1_000_000]>, +//! buffer: KBox<[u8; 1_000_000]>, //! } //! //! impl DriverData { //! fn new() -> impl PinInit { //! try_pin_init!(Self { //! status <- new_mutex!(0, "DriverData::status"), -//! buffer: Box::init(kernel::init::zeroed(), GFP_KERNEL)?, +//! buffer: KBox::init(kernel::init::zeroed(), GFP_KERNEL)?, //! }) //! } //! } @@ -121,11 +120,12 @@ //! `slot` gets called. //! //! ```rust -//! # #![allow(unreachable_pub, clippy::disallowed_names)] +//! # #![expect(unreachable_pub, clippy::disallowed_names)] //! use kernel::{init, types::Opaque}; //! use core::{ptr::addr_of_mut, marker::PhantomPinned, pin::Pin}; //! # mod bindings { -//! # #![allow(non_camel_case_types)] +//! # #![expect(non_camel_case_types)] +//! # #![expect(clippy::missing_safety_doc)] //! # pub struct foo; //! # pub unsafe fn init_foo(_ptr: *mut foo) {} //! # pub unsafe fn destroy_foo(_ptr: *mut foo) {} @@ -133,7 +133,7 @@ //! # } //! # // `Error::from_errno` is `pub(crate)` in the `kernel` crate, thus provide a workaround. //! # trait FromErrno { -//! # fn from_errno(errno: core::ffi::c_int) -> Error { +//! # fn from_errno(errno: kernel::ffi::c_int) -> Error { //! # // Dummy error that can be constructed outside the `kernel` crate. //! # Error::from(core::fmt::Error) //! # } @@ -211,13 +211,12 @@ //! [`pin_init!`]: crate::pin_init! use crate::{ - alloc::{box_ext::BoxExt, AllocError, Flags}, + alloc::{AllocError, Flags, KBox}, error::{self, Error}, sync::Arc, sync::UniqueArc, types::{Opaque, ScopeGuard}, }; -use alloc::boxed::Box; use core::{ cell::UnsafeCell, convert::Infallible, @@ -238,7 +237,7 @@ pub mod macros; /// # Examples /// /// ```rust -/// # #![allow(clippy::disallowed_names)] +/// # #![expect(clippy::disallowed_names)] /// # use kernel::{init, macros::pin_data, pin_init, stack_pin_init, init::*, sync::Mutex, new_mutex}; /// # use core::pin::Pin; /// #[pin_data] @@ -260,7 +259,7 @@ pub mod macros; /// }, /// })); /// let foo: Pin<&mut Foo> = foo; -/// pr_info!("a: {}", &*foo.a.lock()); +/// pr_info!("a: {}\n", &*foo.a.lock()); /// ``` /// /// # Syntax @@ -290,7 +289,7 @@ macro_rules! stack_pin_init { /// # Examples /// /// ```rust,ignore -/// # #![allow(clippy::disallowed_names)] +/// # #![expect(clippy::disallowed_names)] /// # use kernel::{init, pin_init, stack_try_pin_init, init::*, sync::Mutex, new_mutex}; /// # use macros::pin_data; /// # use core::{alloc::AllocError, pin::Pin}; @@ -298,7 +297,7 @@ macro_rules! stack_pin_init { /// struct Foo { /// #[pin] /// a: Mutex, -/// b: Box, +/// b: KBox, /// } /// /// struct Bar { @@ -307,16 +306,16 @@ macro_rules! stack_pin_init { /// /// stack_try_pin_init!(let foo: Result, AllocError> = pin_init!(Foo { /// a <- new_mutex!(42), -/// b: Box::new(Bar { +/// b: KBox::new(Bar { /// x: 64, /// }, GFP_KERNEL)?, /// })); /// let foo = foo.unwrap(); -/// pr_info!("a: {}", &*foo.a.lock()); +/// pr_info!("a: {}\n", &*foo.a.lock()); /// ``` /// /// ```rust,ignore -/// # #![allow(clippy::disallowed_names)] +/// # #![expect(clippy::disallowed_names)] /// # use kernel::{init, pin_init, stack_try_pin_init, init::*, sync::Mutex, new_mutex}; /// # use macros::pin_data; /// # use core::{alloc::AllocError, pin::Pin}; @@ -324,7 +323,7 @@ macro_rules! stack_pin_init { /// struct Foo { /// #[pin] /// a: Mutex, -/// b: Box, +/// b: KBox, /// } /// /// struct Bar { @@ -333,11 +332,11 @@ macro_rules! stack_pin_init { /// /// stack_try_pin_init!(let foo: Pin<&mut Foo> =? pin_init!(Foo { /// a <- new_mutex!(42), -/// b: Box::new(Bar { +/// b: KBox::new(Bar { /// x: 64, /// }, GFP_KERNEL)?, /// })); -/// pr_info!("a: {}", &*foo.a.lock()); +/// pr_info!("a: {}\n", &*foo.a.lock()); /// # Ok::<_, AllocError>(()) /// ``` /// @@ -368,7 +367,6 @@ macro_rules! stack_try_pin_init { /// The syntax is almost identical to that of a normal `struct` initializer: /// /// ```rust -/// # #![allow(clippy::disallowed_names)] /// # use kernel::{init, pin_init, macros::pin_data, init::*}; /// # use core::pin::Pin; /// #[pin_data] @@ -392,7 +390,7 @@ macro_rules! stack_try_pin_init { /// }, /// }); /// # initializer } -/// # Box::pin_init(demo(), GFP_KERNEL).unwrap(); +/// # KBox::pin_init(demo(), GFP_KERNEL).unwrap(); /// ``` /// /// Arbitrary Rust expressions can be used to set the value of a variable. @@ -413,7 +411,6 @@ macro_rules! stack_try_pin_init { /// To create an initializer function, simply declare it like this: /// /// ```rust -/// # #![allow(clippy::disallowed_names)] /// # use kernel::{init, pin_init, init::*}; /// # use core::pin::Pin; /// # #[pin_data] @@ -440,7 +437,7 @@ macro_rules! stack_try_pin_init { /// Users of `Foo` can now create it like this: /// /// ```rust -/// # #![allow(clippy::disallowed_names)] +/// # #![expect(clippy::disallowed_names)] /// # use kernel::{init, pin_init, macros::pin_data, init::*}; /// # use core::pin::Pin; /// # #[pin_data] @@ -462,13 +459,12 @@ macro_rules! stack_try_pin_init { /// # }) /// # } /// # } -/// let foo = Box::pin_init(Foo::new(), GFP_KERNEL); +/// let foo = KBox::pin_init(Foo::new(), GFP_KERNEL); /// ``` /// /// They can also easily embed it into their own `struct`s: /// /// ```rust -/// # #![allow(clippy::disallowed_names)] /// # use kernel::{init, pin_init, macros::pin_data, init::*}; /// # use core::pin::Pin; /// # #[pin_data] @@ -541,6 +537,7 @@ macro_rules! stack_try_pin_init { /// } /// pin_init!(&this in Buf { /// buf: [0; 64], +/// // SAFETY: TODO. /// ptr: unsafe { addr_of_mut!((*this.as_ptr()).buf).cast() }, /// pin: PhantomPinned, /// }); @@ -590,11 +587,10 @@ macro_rules! pin_init { /// # Examples /// /// ```rust -/// # #![feature(new_uninit)] /// use kernel::{init::{self, PinInit}, error::Error}; /// #[pin_data] /// struct BigBuf { -/// big: Box<[u8; 1024 * 1024 * 1024]>, +/// big: KBox<[u8; 1024 * 1024 * 1024]>, /// small: [u8; 1024 * 1024], /// ptr: *mut u8, /// } @@ -602,7 +598,7 @@ macro_rules! pin_init { /// impl BigBuf { /// fn new() -> impl PinInit { /// try_pin_init!(Self { -/// big: Box::init(init::zeroed(), GFP_KERNEL)?, +/// big: KBox::init(init::zeroed(), GFP_KERNEL)?, /// small: [0; 1024 * 1024], /// ptr: core::ptr::null_mut(), /// }? Error) @@ -694,16 +690,16 @@ macro_rules! init { /// # Examples /// /// ```rust -/// use kernel::{init::{PinInit, zeroed}, error::Error}; +/// use kernel::{alloc::KBox, init::{PinInit, zeroed}, error::Error}; /// struct BigBuf { -/// big: Box<[u8; 1024 * 1024 * 1024]>, +/// big: KBox<[u8; 1024 * 1024 * 1024]>, /// small: [u8; 1024 * 1024], /// } /// /// impl BigBuf { /// fn new() -> impl Init { /// try_init!(Self { -/// big: Box::init(zeroed(), GFP_KERNEL)?, +/// big: KBox::init(zeroed(), GFP_KERNEL)?, /// small: [0; 1024 * 1024], /// }? Error) /// } @@ -814,8 +810,8 @@ macro_rules! assert_pinned { /// A pin-initializer for the type `T`. /// /// To use this initializer, you will need a suitable memory location that can hold a `T`. This can -/// be [`Box`], [`Arc`], [`UniqueArc`] or even the stack (see [`stack_pin_init!`]). Use the -/// [`InPlaceInit::pin_init`] function of a smart pointer like [`Arc`] on this. +/// be [`KBox`], [`Arc`], [`UniqueArc`] or even the stack (see [`stack_pin_init!`]). Use +/// the [`InPlaceInit::pin_init`] function of a smart pointer like [`Arc`] on this. /// /// Also see the [module description](self). /// @@ -854,7 +850,7 @@ pub unsafe trait PinInit: Sized { /// # Examples /// /// ```rust - /// # #![allow(clippy::disallowed_names)] + /// # #![expect(clippy::disallowed_names)] /// use kernel::{types::Opaque, init::pin_init_from_closure}; /// #[repr(C)] /// struct RawFoo([u8; 16]); @@ -870,11 +866,12 @@ pub unsafe trait PinInit: Sized { /// /// impl Foo { /// fn setup(self: Pin<&mut Self>) { - /// pr_info!("Setting up foo"); + /// pr_info!("Setting up foo\n"); /// } /// } /// /// let foo = pin_init!(Foo { + /// // SAFETY: TODO. /// raw <- unsafe { /// Opaque::ffi_init(|s| { /// init_foo(s); @@ -894,7 +891,7 @@ pub unsafe trait PinInit: Sized { } /// An initializer returned by [`PinInit::pin_chain`]. -pub struct ChainPinInit(I, F, __internal::Invariant<(E, Box)>); +pub struct ChainPinInit(I, F, __internal::Invariant<(E, KBox)>); // SAFETY: The `__pinned_init` function is implemented such that it // - returns `Ok(())` on successful initialization, @@ -920,8 +917,8 @@ where /// An initializer for `T`. /// /// To use this initializer, you will need a suitable memory location that can hold a `T`. This can -/// be [`Box`], [`Arc`], [`UniqueArc`] or even the stack (see [`stack_pin_init!`]). Use the -/// [`InPlaceInit::init`] function of a smart pointer like [`Arc`] on this. Because +/// be [`KBox`], [`Arc`], [`UniqueArc`] or even the stack (see [`stack_pin_init!`]). Use +/// the [`InPlaceInit::init`] function of a smart pointer like [`Arc`] on this. Because /// [`PinInit`] is a super trait, you can use every function that takes it as well. /// /// Also see the [module description](self). @@ -965,7 +962,7 @@ pub unsafe trait Init: PinInit { /// # Examples /// /// ```rust - /// # #![allow(clippy::disallowed_names)] + /// # #![expect(clippy::disallowed_names)] /// use kernel::{types::Opaque, init::{self, init_from_closure}}; /// struct Foo { /// buf: [u8; 1_000_000], @@ -973,7 +970,7 @@ pub unsafe trait Init: PinInit { /// /// impl Foo { /// fn setup(&mut self) { - /// pr_info!("Setting up foo"); + /// pr_info!("Setting up foo\n"); /// } /// } /// @@ -993,7 +990,7 @@ pub unsafe trait Init: PinInit { } /// An initializer returned by [`Init::chain`]. -pub struct ChainInit(I, F, __internal::Invariant<(E, Box)>); +pub struct ChainInit(I, F, __internal::Invariant<(E, KBox)>); // SAFETY: The `__init` function is implemented such that it // - returns `Ok(())` on successful initialization, @@ -1077,8 +1074,9 @@ pub fn uninit() -> impl Init, E> { /// # Examples /// /// ```rust -/// use kernel::{error::Error, init::init_array_from_fn}; -/// let array: Box<[usize; 1_000]> = Box::init::(init_array_from_fn(|i| i), GFP_KERNEL).unwrap(); +/// use kernel::{alloc::KBox, error::Error, init::init_array_from_fn}; +/// let array: KBox<[usize; 1_000]> = +/// KBox::init::(init_array_from_fn(|i| i), GFP_KERNEL).unwrap(); /// assert_eq!(array.len(), 1_000); /// ``` pub fn init_array_from_fn( @@ -1162,6 +1160,7 @@ where // SAFETY: Every type can be initialized by-value. unsafe impl Init for T { unsafe fn __init(self, slot: *mut T) -> Result<(), E> { + // SAFETY: TODO. unsafe { slot.write(self) }; Ok(()) } @@ -1170,6 +1169,7 @@ unsafe impl Init for T { // SAFETY: Every type can be initialized by-value. `__pinned_init` calls `__init`. unsafe impl PinInit for T { unsafe fn __pinned_init(self, slot: *mut T) -> Result<(), E> { + // SAFETY: TODO. unsafe { self.__init(slot) } } } @@ -1243,26 +1243,6 @@ impl InPlaceInit for Arc { } } -impl InPlaceInit for Box { - type PinnedSelf = Pin; - - #[inline] - fn try_pin_init(init: impl PinInit, flags: Flags) -> Result - where - E: From, - { - as BoxExt<_>>::new_uninit(flags)?.write_pin_init(init) - } - - #[inline] - fn try_init(init: impl Init, flags: Flags) -> Result - where - E: From, - { - as BoxExt<_>>::new_uninit(flags)?.write_init(init) - } -} - impl InPlaceInit for UniqueArc { type PinnedSelf = Pin; @@ -1299,28 +1279,6 @@ pub trait InPlaceWrite { fn write_pin_init(self, init: impl PinInit) -> Result, E>; } -impl InPlaceWrite for Box> { - type Initialized = Box; - - fn write_init(mut self, init: impl Init) -> Result { - let slot = self.as_mut_ptr(); - // SAFETY: When init errors/panics, slot will get deallocated but not dropped, - // slot is valid. - unsafe { init.__init(slot)? }; - // SAFETY: All fields have been initialized. - Ok(unsafe { self.assume_init() }) - } - - fn write_pin_init(mut self, init: impl PinInit) -> Result, E> { - let slot = self.as_mut_ptr(); - // SAFETY: When init errors/panics, slot will get deallocated but not dropped, - // slot is valid and will not be moved, because we pin it later. - unsafe { init.__pinned_init(slot)? }; - // SAFETY: All fields have been initialized. - Ok(unsafe { self.assume_init() }.into()) - } -} - impl InPlaceWrite for UniqueArc> { type Initialized = UniqueArc; @@ -1360,7 +1318,7 @@ impl InPlaceWrite for UniqueArc> { /// #[pinned_drop] /// impl PinnedDrop for Foo { /// fn drop(self: Pin<&mut Self>) { -/// pr_info!("Foo is being dropped!"); +/// pr_info!("Foo is being dropped!\n"); /// } /// } /// ``` @@ -1411,6 +1369,7 @@ pub fn zeroed() -> impl Init { macro_rules! impl_zeroable { ($($({$($generics:tt)*})? $t:ty, )*) => { + // SAFETY: Safety comments written in the macro invocation. $(unsafe impl$($($generics)*)? Zeroable for $t {})* }; } @@ -1441,17 +1400,14 @@ impl_zeroable! { // SAFETY: `T: Zeroable` and `UnsafeCell` is `repr(transparent)`. {} UnsafeCell, - // SAFETY: All zeros is equivalent to `None` (option layout optimization guarantee). + // SAFETY: All zeros is equivalent to `None` (option layout optimization guarantee: + // https://doc.rust-lang.org/stable/std/option/index.html#representation). Option, Option, Option, Option, Option, Option, Option, Option, Option, Option, Option, Option, - - // SAFETY: All zeros is equivalent to `None` (option layout optimization guarantee). - // - // In this case we are allowed to use `T: ?Sized`, since all zeros is the `None` variant. - {} Option>, - {} Option>, + {} Option>, + {} Option>, // SAFETY: `null` pointer is valid. // diff --git a/rust/kernel/init/__internal.rs b/rust/kernel/init/__internal.rs index 13cefd37512f8..74329cc3262c0 100644 --- a/rust/kernel/init/__internal.rs +++ b/rust/kernel/init/__internal.rs @@ -15,9 +15,10 @@ use super::*; /// [this table]: https://doc.rust-lang.org/nomicon/phantom-data.html#table-of-phantomdata-patterns pub(super) type Invariant = PhantomData *mut T>; -/// This is the module-internal type implementing `PinInit` and `Init`. It is unsafe to create this -/// type, since the closure needs to fulfill the same safety requirement as the -/// `__pinned_init`/`__init` functions. +/// Module-internal type implementing `PinInit` and `Init`. +/// +/// It is unsafe to create this type, since the closure needs to fulfill the same safety +/// requirement as the `__pinned_init`/`__init` functions. pub(crate) struct InitClosure(pub(crate) F, pub(crate) Invariant<(E, T)>); // SAFETY: While constructing the `InitClosure`, the user promised that it upholds the @@ -53,6 +54,7 @@ where pub unsafe trait HasPinData { type PinData: PinData; + #[expect(clippy::missing_safety_doc)] unsafe fn __pin_data() -> Self::PinData; } @@ -82,6 +84,7 @@ pub unsafe trait PinData: Copy { pub unsafe trait HasInitData { type InitData: InitData; + #[expect(clippy::missing_safety_doc)] unsafe fn __init_data() -> Self::InitData; } @@ -102,7 +105,7 @@ pub unsafe trait InitData: Copy { } } -pub struct AllData(PhantomData) -> Box>); +pub struct AllData(PhantomData) -> KBox>); impl Clone for AllData { fn clone(&self) -> Self { @@ -112,10 +115,12 @@ impl Clone for AllData { impl Copy for AllData {} +// SAFETY: TODO. unsafe impl InitData for AllData { type Datee = T; } +// SAFETY: TODO. unsafe impl HasInitData for T { type InitData = AllData; diff --git a/rust/kernel/init/macros.rs b/rust/kernel/init/macros.rs index 9a0c4650ef676..b7213962a6a5a 100644 --- a/rust/kernel/init/macros.rs +++ b/rust/kernel/init/macros.rs @@ -45,7 +45,7 @@ //! #[pinned_drop] //! impl PinnedDrop for Foo { //! fn drop(self: Pin<&mut Self>) { -//! pr_info!("{self:p} is getting dropped."); +//! pr_info!("{self:p} is getting dropped.\n"); //! } //! } //! @@ -182,13 +182,13 @@ //! // Normally `Drop` bounds do not have the correct semantics, but for this purpose they do //! // (normally people want to know if a type has any kind of drop glue at all, here we want //! // to know if it has any kind of custom drop glue, which is exactly what this bound does). -//! #[allow(drop_bounds)] +//! #[expect(drop_bounds)] //! impl MustNotImplDrop for T {} //! impl MustNotImplDrop for Bar {} //! // Here comes a convenience check, if one implemented `PinnedDrop`, but forgot to add it to //! // `#[pin_data]`, then this will error with the same mechanic as above, this is not needed //! // for safety, but a good sanity check, since no normal code calls `PinnedDrop::drop`. -//! #[allow(non_camel_case_types)] +//! #[expect(non_camel_case_types)] //! trait UselessPinnedDropImpl_you_need_to_specify_PinnedDrop {} //! impl< //! T: ::kernel::init::PinnedDrop, @@ -412,7 +412,7 @@ //! #[pinned_drop] //! impl PinnedDrop for Foo { //! fn drop(self: Pin<&mut Self>) { -//! pr_info!("{self:p} is getting dropped."); +//! pr_info!("{self:p} is getting dropped.\n"); //! } //! } //! ``` @@ -423,7 +423,7 @@ //! // `unsafe`, full path and the token parameter are added, everything else stays the same. //! unsafe impl ::kernel::init::PinnedDrop for Foo { //! fn drop(self: Pin<&mut Self>, _: ::kernel::init::__internal::OnlyCallFromDrop) { -//! pr_info!("{self:p} is getting dropped."); +//! pr_info!("{self:p} is getting dropped.\n"); //! } //! } //! ``` @@ -513,6 +513,7 @@ macro_rules! __pinned_drop { } ), ) => { + // SAFETY: TODO. unsafe $($impl_sig)* { // Inherit all attributes and the type/ident tokens for the signature. $(#[$($attr)*])* @@ -872,6 +873,7 @@ macro_rules! __pin_data { } } + // SAFETY: TODO. unsafe impl<$($impl_generics)*> $crate::init::__internal::PinData for __ThePinData<$($ty_generics)*> where $($whr)* @@ -923,14 +925,14 @@ macro_rules! __pin_data { // `Drop`. Additionally we will implement this trait for the struct leading to a conflict, // if it also implements `Drop` trait MustNotImplDrop {} - #[allow(drop_bounds)] + #[expect(drop_bounds)] impl MustNotImplDrop for T {} impl<$($impl_generics)*> MustNotImplDrop for $name<$($ty_generics)*> where $($whr)* {} // We also take care to prevent users from writing a useless `PinnedDrop` implementation. // They might implement `PinnedDrop` correctly for the struct, but forget to give // `PinnedDrop` as the parameter to `#[pin_data]`. - #[allow(non_camel_case_types)] + #[expect(non_camel_case_types)] trait UselessPinnedDropImpl_you_need_to_specify_PinnedDrop {} impl UselessPinnedDropImpl_you_need_to_specify_PinnedDrop for T {} @@ -987,6 +989,7 @@ macro_rules! __pin_data { // // The functions are `unsafe` to prevent accidentally calling them. #[allow(dead_code)] + #[expect(clippy::missing_safety_doc)] impl<$($impl_generics)*> $pin_data<$($ty_generics)*> where $($whr)* { @@ -997,6 +1000,7 @@ macro_rules! __pin_data { slot: *mut $p_type, init: impl $crate::init::PinInit<$p_type, E>, ) -> ::core::result::Result<(), E> { + // SAFETY: TODO. unsafe { $crate::init::PinInit::__pinned_init(init, slot) } } )* @@ -1007,6 +1011,7 @@ macro_rules! __pin_data { slot: *mut $type, init: impl $crate::init::Init<$type, E>, ) -> ::core::result::Result<(), E> { + // SAFETY: TODO. unsafe { $crate::init::Init::__init(init, slot) } } )* @@ -1121,6 +1126,8 @@ macro_rules! __init_internal { // no possibility of returning without `unsafe`. struct __InitOk; // Get the data about fields from the supplied type. + // + // SAFETY: TODO. let data = unsafe { use $crate::init::__internal::$has_data; // Here we abuse `paste!` to retokenize `$t`. Declarative macros have some internal @@ -1176,6 +1183,7 @@ macro_rules! __init_internal { let init = move |slot| -> ::core::result::Result<(), $err> { init(slot).map(|__InitOk| ()) }; + // SAFETY: TODO. let init = unsafe { $crate::init::$construct_closure::<_, $err>(init) }; init }}; @@ -1324,6 +1332,8 @@ macro_rules! __init_internal { // Endpoint, nothing more to munch, create the initializer. // Since we are in the closure that is never called, this will never get executed. // We abuse `slot` to get the correct type inference here: + // + // SAFETY: TODO. unsafe { // Here we abuse `paste!` to retokenize `$t`. Declarative macros have some internal // information that is associated to already parsed fragments, so a path fragment diff --git a/rust/kernel/ioctl.rs b/rust/kernel/ioctl.rs index cfa7d080b5319..2fc7662339e54 100644 --- a/rust/kernel/ioctl.rs +++ b/rust/kernel/ioctl.rs @@ -4,7 +4,7 @@ //! //! C header: [`include/asm-generic/ioctl.h`](srctree/include/asm-generic/ioctl.h) -#![allow(non_snake_case)] +#![expect(non_snake_case)] use crate::build_assert; diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs index e936254531fd0..904d241604db9 100644 --- a/rust/kernel/lib.rs +++ b/rust/kernel/lib.rs @@ -6,7 +6,7 @@ //! usage by Rust code in the kernel and is shared by all of them. //! //! In other words, all the rest of the Rust code in the kernel (e.g. kernel -//! modules written in Rust) depends on [`core`], [`alloc`] and this crate. +//! modules written in Rust) depends on [`core`] and this crate. //! //! If you need a kernel C API that is not ported or wrapped yet here, then //! do so first instead of bypassing this crate. @@ -15,7 +15,8 @@ #![feature(arbitrary_self_types)] #![feature(coerce_unsized)] #![feature(dispatch_from_dyn)] -#![feature(new_uninit)] +#![feature(inline_const)] +#![feature(lint_reasons)] #![feature(unsize)] // Ensure conditional compilation based on the kernel configuration works; @@ -26,6 +27,8 @@ compile_error!("Missing kernel configuration for conditional compilation"); // Allow proc-macros to refer to `::kernel` inside the `kernel` crate (this crate). extern crate self as kernel; +pub use ffi; + pub mod alloc; #[cfg(CONFIG_BLOCK)] pub mod block; diff --git a/rust/kernel/list.rs b/rust/kernel/list.rs index 5b4aec29eb675..fb93330f4af48 100644 --- a/rust/kernel/list.rs +++ b/rust/kernel/list.rs @@ -354,6 +354,7 @@ impl, const ID: u64> List { /// /// `item` must not be in a different linked list (with the same id). pub unsafe fn remove(&mut self, item: &T) -> Option> { + // SAFETY: TODO. let mut item = unsafe { ListLinks::fields(T::view_links(item)) }; // SAFETY: The user provided a reference, and reference are never dangling. // diff --git a/rust/kernel/list/arc_field.rs b/rust/kernel/list/arc_field.rs index 2330f673427ab..c4b9dd5039826 100644 --- a/rust/kernel/list/arc_field.rs +++ b/rust/kernel/list/arc_field.rs @@ -56,7 +56,7 @@ impl ListArcField { /// /// The caller must have mutable access to the `ListArc` containing the struct with this /// field for the duration of the returned reference. - #[allow(clippy::mut_from_ref)] + #[expect(clippy::mut_from_ref)] pub unsafe fn assert_mut(&self) -> &mut T { // SAFETY: The caller has exclusive access to the `ListArc`, so they also have exclusive // access to this field. diff --git a/rust/kernel/net/phy.rs b/rust/kernel/net/phy.rs index 910ce867480a8..beb62ec712c37 100644 --- a/rust/kernel/net/phy.rs +++ b/rust/kernel/net/phy.rs @@ -314,7 +314,7 @@ impl Adapter { /// `phydev` must be passed by the corresponding callback in `phy_driver`. unsafe extern "C" fn soft_reset_callback( phydev: *mut bindings::phy_device, - ) -> core::ffi::c_int { + ) -> crate::ffi::c_int { from_result(|| { // SAFETY: This callback is called only in contexts // where we hold `phy_device->lock`, so the accessors on @@ -328,7 +328,7 @@ impl Adapter { /// # Safety /// /// `phydev` must be passed by the corresponding callback in `phy_driver`. - unsafe extern "C" fn probe_callback(phydev: *mut bindings::phy_device) -> core::ffi::c_int { + unsafe extern "C" fn probe_callback(phydev: *mut bindings::phy_device) -> crate::ffi::c_int { from_result(|| { // SAFETY: This callback is called only in contexts // where we can exclusively access `phy_device` because @@ -345,7 +345,7 @@ impl Adapter { /// `phydev` must be passed by the corresponding callback in `phy_driver`. unsafe extern "C" fn get_features_callback( phydev: *mut bindings::phy_device, - ) -> core::ffi::c_int { + ) -> crate::ffi::c_int { from_result(|| { // SAFETY: This callback is called only in contexts // where we hold `phy_device->lock`, so the accessors on @@ -359,7 +359,7 @@ impl Adapter { /// # Safety /// /// `phydev` must be passed by the corresponding callback in `phy_driver`. - unsafe extern "C" fn suspend_callback(phydev: *mut bindings::phy_device) -> core::ffi::c_int { + unsafe extern "C" fn suspend_callback(phydev: *mut bindings::phy_device) -> crate::ffi::c_int { from_result(|| { // SAFETY: The C core code ensures that the accessors on // `Device` are okay to call even though `phy_device->lock` @@ -373,7 +373,7 @@ impl Adapter { /// # Safety /// /// `phydev` must be passed by the corresponding callback in `phy_driver`. - unsafe extern "C" fn resume_callback(phydev: *mut bindings::phy_device) -> core::ffi::c_int { + unsafe extern "C" fn resume_callback(phydev: *mut bindings::phy_device) -> crate::ffi::c_int { from_result(|| { // SAFETY: The C core code ensures that the accessors on // `Device` are okay to call even though `phy_device->lock` @@ -389,7 +389,7 @@ impl Adapter { /// `phydev` must be passed by the corresponding callback in `phy_driver`. unsafe extern "C" fn config_aneg_callback( phydev: *mut bindings::phy_device, - ) -> core::ffi::c_int { + ) -> crate::ffi::c_int { from_result(|| { // SAFETY: This callback is called only in contexts // where we hold `phy_device->lock`, so the accessors on @@ -405,7 +405,7 @@ impl Adapter { /// `phydev` must be passed by the corresponding callback in `phy_driver`. unsafe extern "C" fn read_status_callback( phydev: *mut bindings::phy_device, - ) -> core::ffi::c_int { + ) -> crate::ffi::c_int { from_result(|| { // SAFETY: This callback is called only in contexts // where we hold `phy_device->lock`, so the accessors on @@ -421,7 +421,7 @@ impl Adapter { /// `phydev` must be passed by the corresponding callback in `phy_driver`. unsafe extern "C" fn match_phy_device_callback( phydev: *mut bindings::phy_device, - ) -> core::ffi::c_int { + ) -> crate::ffi::c_int { // SAFETY: This callback is called only in contexts // where we hold `phy_device->lock`, so the accessors on // `Device` are okay to call. diff --git a/rust/kernel/prelude.rs b/rust/kernel/prelude.rs index 4571daec0961b..8bdab9aa0d16b 100644 --- a/rust/kernel/prelude.rs +++ b/rust/kernel/prelude.rs @@ -14,10 +14,7 @@ #[doc(no_inline)] pub use core::pin::Pin; -pub use crate::alloc::{box_ext::BoxExt, flags::*, vec_ext::VecExt}; - -#[doc(no_inline)] -pub use alloc::{boxed::Box, vec::Vec}; +pub use crate::alloc::{flags::*, Box, KBox, KVBox, KVVec, KVec, VBox, VVec, Vec}; #[doc(no_inline)] pub use macros::{module, pin_data, pinned_drop, vtable, Zeroable}; diff --git a/rust/kernel/print.rs b/rust/kernel/print.rs index 508b0221256c9..e52cd64333bcc 100644 --- a/rust/kernel/print.rs +++ b/rust/kernel/print.rs @@ -6,14 +6,14 @@ //! //! Reference: -use core::{ +use crate::{ ffi::{c_char, c_void}, - fmt, + str::RawFormatter, }; - -use crate::str::RawFormatter; +use core::fmt; // Called from `vsprintf` with format specifier `%pA`. +#[expect(clippy::missing_safety_doc)] #[no_mangle] unsafe extern "C" fn rust_fmt_argument( buf: *mut c_char, @@ -23,6 +23,7 @@ unsafe extern "C" fn rust_fmt_argument( use fmt::Write; // SAFETY: The C contract guarantees that `buf` is valid if it's less than `end`. let mut w = unsafe { RawFormatter::from_ptrs(buf.cast(), end.cast()) }; + // SAFETY: TODO. let _ = w.write_fmt(unsafe { *(ptr as *const fmt::Arguments<'_>) }); w.pos().cast() } @@ -102,6 +103,7 @@ pub unsafe fn call_printk( ) { // `_printk` does not seem to fail in any path. #[cfg(CONFIG_PRINTK)] + // SAFETY: TODO. unsafe { bindings::_printk( format_string.as_ptr() as _, @@ -137,7 +139,7 @@ pub fn call_printk_cont(args: fmt::Arguments<'_>) { #[doc(hidden)] #[cfg(not(testlib))] #[macro_export] -#[allow(clippy::crate_in_macro_def)] +#[expect(clippy::crate_in_macro_def)] macro_rules! print_macro ( // The non-continuation cases (most of them, e.g. `INFO`). ($format_string:path, false, $($arg:tt)+) => ( diff --git a/rust/kernel/rbtree.rs b/rust/kernel/rbtree.rs index 7543378d37292..571e27efe5448 100644 --- a/rust/kernel/rbtree.rs +++ b/rust/kernel/rbtree.rs @@ -7,7 +7,6 @@ //! Reference: use crate::{alloc::Flags, bindings, container_of, error::Result, prelude::*}; -use alloc::boxed::Box; use core::{ cmp::{Ord, Ordering}, marker::PhantomData, @@ -497,7 +496,7 @@ impl Drop for RBTree { // but it is not observable. The loop invariant is still maintained. // SAFETY: `this` is valid per the loop invariant. - unsafe { drop(Box::from_raw(this.cast_mut())) }; + unsafe { drop(KBox::from_raw(this.cast_mut())) }; } } } @@ -764,7 +763,7 @@ impl<'a, K, V> Cursor<'a, K, V> { // point to the links field of `Node` objects. let this = unsafe { container_of!(self.current.as_ptr(), Node, links) }.cast_mut(); // SAFETY: `this` is valid by the type invariants as described above. - let node = unsafe { Box::from_raw(this) }; + let node = unsafe { KBox::from_raw(this) }; let node = RBTreeNode { node }; // SAFETY: The reference to the tree used to create the cursor outlives the cursor, so // the tree cannot change. By the tree invariant, all nodes are valid. @@ -809,7 +808,7 @@ impl<'a, K, V> Cursor<'a, K, V> { // point to the links field of `Node` objects. let this = unsafe { container_of!(neighbor, Node, links) }.cast_mut(); // SAFETY: `this` is valid by the type invariants as described above. - let node = unsafe { Box::from_raw(this) }; + let node = unsafe { KBox::from_raw(this) }; return Some(RBTreeNode { node }); } None @@ -1038,7 +1037,7 @@ impl Iterator for IterRaw { /// It contains the memory needed to hold a node that can be inserted into a red-black tree. One /// can be obtained by directly allocating it ([`RBTreeNodeReservation::new`]). pub struct RBTreeNodeReservation { - node: Box>>, + node: KBox>>, } impl RBTreeNodeReservation { @@ -1046,7 +1045,7 @@ impl RBTreeNodeReservation { /// call to [`RBTree::insert`]. pub fn new(flags: Flags) -> Result> { Ok(RBTreeNodeReservation { - node: as BoxExt<_>>::new_uninit(flags)?, + node: KBox::new_uninit(flags)?, }) } } @@ -1062,14 +1061,15 @@ impl RBTreeNodeReservation { /// Initialises a node reservation. /// /// It then becomes an [`RBTreeNode`] that can be inserted into a tree. - pub fn into_node(mut self, key: K, value: V) -> RBTreeNode { - self.node.write(Node { - key, - value, - links: bindings::rb_node::default(), - }); - // SAFETY: We just wrote to it. - let node = unsafe { self.node.assume_init() }; + pub fn into_node(self, key: K, value: V) -> RBTreeNode { + let node = KBox::write( + self.node, + Node { + key, + value, + links: bindings::rb_node::default(), + }, + ); RBTreeNode { node } } } @@ -1079,7 +1079,7 @@ impl RBTreeNodeReservation { /// The node is fully initialised (with key and value) and can be inserted into a tree without any /// extra allocations or failure paths. pub struct RBTreeNode { - node: Box>, + node: KBox>, } impl RBTreeNode { @@ -1091,7 +1091,9 @@ impl RBTreeNode { /// Get the key and value from inside the node. pub fn to_key_value(self) -> (K, V) { - (self.node.key, self.node.value) + let node = KBox::into_inner(self.node); + + (node.key, node.value) } } @@ -1113,7 +1115,7 @@ impl RBTreeNode { /// may be freed (but only for the key/value; memory for the node itself is kept for reuse). pub fn into_reservation(self) -> RBTreeNodeReservation { RBTreeNodeReservation { - node: Box::drop_contents(self.node), + node: KBox::drop_contents(self.node), } } } @@ -1164,7 +1166,7 @@ impl<'a, K, V> RawVacantEntry<'a, K, V> { /// The `node` must have a key such that inserting it here does not break the ordering of this /// [`RBTree`]. fn insert(self, node: RBTreeNode) -> &'a mut V { - let node = Box::into_raw(node.node); + let node = KBox::into_raw(node.node); // SAFETY: `node` is valid at least until we call `Box::from_raw`, which only happens when // the node is removed or replaced. @@ -1238,21 +1240,24 @@ impl<'a, K, V> OccupiedEntry<'a, K, V> { // SAFETY: The node was a node in the tree, but we removed it, so we can convert it // back into a box. node: unsafe { - Box::from_raw(container_of!(self.node_links, Node, links).cast_mut()) + KBox::from_raw(container_of!(self.node_links, Node, links).cast_mut()) }, } } /// Takes the value of the entry out of the map, and returns it. pub fn remove(self) -> V { - self.remove_node().node.value + let rb_node = self.remove_node(); + let node = KBox::into_inner(rb_node.node); + + node.value } /// Swap the current node for the provided node. /// /// The key of both nodes must be equal. fn replace(self, node: RBTreeNode) -> RBTreeNode { - let node = Box::into_raw(node.node); + let node = KBox::into_raw(node.node); // SAFETY: `node` is valid at least until we call `Box::from_raw`, which only happens when // the node is removed or replaced. @@ -1268,7 +1273,7 @@ impl<'a, K, V> OccupiedEntry<'a, K, V> { // - `self.node_ptr` produces a valid pointer to a node in the tree. // - Now that we removed this entry from the tree, we can convert the node to a box. let old_node = - unsafe { Box::from_raw(container_of!(self.node_links, Node, links).cast_mut()) }; + unsafe { KBox::from_raw(container_of!(self.node_links, Node, links).cast_mut()) }; RBTreeNode { node: old_node } } diff --git a/rust/kernel/std_vendor.rs b/rust/kernel/std_vendor.rs index 67bf9d37ddb55..8b4872b48e977 100644 --- a/rust/kernel/std_vendor.rs +++ b/rust/kernel/std_vendor.rs @@ -1,5 +1,7 @@ // SPDX-License-Identifier: Apache-2.0 OR MIT +//! Rust standard library vendored code. +//! //! The contents of this file come from the Rust standard library, hosted in //! the repository, licensed under //! "Apache-2.0 OR MIT" and adapted for kernel use. For copyright details, @@ -14,7 +16,7 @@ /// /// ```rust /// let a = 2; -/// # #[allow(clippy::dbg_macro)] +/// # #[expect(clippy::disallowed_macros)] /// let b = dbg!(a * 2) + 1; /// // ^-- prints: [src/main.rs:2] a * 2 = 4 /// assert_eq!(b, 5); @@ -52,7 +54,7 @@ /// With a method call: /// /// ```rust -/// # #[allow(clippy::dbg_macro)] +/// # #[expect(clippy::disallowed_macros)] /// fn foo(n: usize) { /// if dbg!(n.checked_sub(4)).is_some() { /// // ... @@ -71,7 +73,7 @@ /// Naive factorial implementation: /// /// ```rust -/// # #[allow(clippy::dbg_macro)] +/// # #[expect(clippy::disallowed_macros)] /// # { /// fn factorial(n: u32) -> u32 { /// if dbg!(n <= 1) { @@ -118,7 +120,7 @@ /// a tuple (and return it, too): /// /// ``` -/// # #[allow(clippy::dbg_macro)] +/// # #![expect(clippy::disallowed_macros)] /// assert_eq!(dbg!(1usize, 2u32), (1, 2)); /// ``` /// @@ -127,7 +129,7 @@ /// invocations. You can use a 1-tuple directly if you need one: /// /// ``` -/// # #[allow(clippy::dbg_macro)] +/// # #[expect(clippy::disallowed_macros)] /// # { /// assert_eq!(1, dbg!(1u32,)); // trailing comma ignored /// assert_eq!((1,), dbg!((1u32,))); // 1-tuple diff --git a/rust/kernel/str.rs b/rust/kernel/str.rs index bb8d4f41475b5..d04c12a1426d1 100644 --- a/rust/kernel/str.rs +++ b/rust/kernel/str.rs @@ -2,8 +2,7 @@ //! String representations. -use crate::alloc::{flags::*, vec_ext::VecExt, AllocError}; -use alloc::vec::Vec; +use crate::alloc::{flags::*, AllocError, KVec}; use core::fmt::{self, Write}; use core::ops::{self, Deref, DerefMut, Index}; @@ -162,10 +161,10 @@ impl CStr { /// Returns the length of this string with `NUL`. #[inline] pub const fn len_with_nul(&self) -> usize { - // SAFETY: This is one of the invariant of `CStr`. - // We add a `unreachable_unchecked` here to hint the optimizer that - // the value returned from this function is non-zero. if self.0.is_empty() { + // SAFETY: This is one of the invariant of `CStr`. + // We add a `unreachable_unchecked` here to hint the optimizer that + // the value returned from this function is non-zero. unsafe { core::hint::unreachable_unchecked() }; } self.0.len() @@ -185,7 +184,7 @@ impl CStr { /// last at least `'a`. When `CStr` is alive, the memory pointed by `ptr` /// must not be mutated. #[inline] - pub unsafe fn from_char_ptr<'a>(ptr: *const core::ffi::c_char) -> &'a Self { + pub unsafe fn from_char_ptr<'a>(ptr: *const crate::ffi::c_char) -> &'a Self { // SAFETY: The safety precondition guarantees `ptr` is a valid pointer // to a `NUL`-terminated C string. let len = unsafe { bindings::strlen(ptr) } + 1; @@ -248,7 +247,7 @@ impl CStr { /// Returns a C pointer to the string. #[inline] - pub const fn as_char_ptr(&self) -> *const core::ffi::c_char { + pub const fn as_char_ptr(&self) -> *const crate::ffi::c_char { self.0.as_ptr() as _ } @@ -301,6 +300,7 @@ impl CStr { /// ``` #[inline] pub unsafe fn as_str_unchecked(&self) -> &str { + // SAFETY: TODO. unsafe { core::str::from_utf8_unchecked(self.as_bytes()) } } @@ -524,7 +524,28 @@ macro_rules! c_str { #[cfg(test)] mod tests { use super::*; - use alloc::format; + + struct String(CString); + + impl String { + fn from_fmt(args: fmt::Arguments<'_>) -> Self { + String(CString::try_from_fmt(args).unwrap()) + } + } + + impl Deref for String { + type Target = str; + + fn deref(&self) -> &str { + self.0.to_str().unwrap() + } + } + + macro_rules! format { + ($($f:tt)*) => ({ + &*String::from_fmt(kernel::fmt!($($f)*)) + }) + } const ALL_ASCII_CHARS: &'static str = "\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\\x09\\x0a\\x0b\\x0c\\x0d\\x0e\\x0f\ @@ -790,7 +811,7 @@ impl fmt::Write for Formatter { /// assert_eq!(s.is_ok(), false); /// ``` pub struct CString { - buf: Vec, + buf: KVec, } impl CString { @@ -803,7 +824,7 @@ impl CString { let size = f.bytes_written(); // Allocate a vector with the required number of bytes, and write to it. - let mut buf = as VecExt<_>>::with_capacity(size, GFP_KERNEL)?; + let mut buf = KVec::with_capacity(size, GFP_KERNEL)?; // SAFETY: The buffer stored in `buf` is at least of size `size` and is valid for writes. let mut f = unsafe { Formatter::from_buffer(buf.as_mut_ptr(), size) }; f.write_fmt(args)?; @@ -850,10 +871,9 @@ impl<'a> TryFrom<&'a CStr> for CString { type Error = AllocError; fn try_from(cstr: &'a CStr) -> Result { - let mut buf = Vec::new(); + let mut buf = KVec::new(); - as VecExt<_>>::extend_from_slice(&mut buf, cstr.as_bytes_with_nul(), GFP_KERNEL) - .map_err(|_| AllocError)?; + buf.extend_from_slice(cstr.as_bytes_with_nul(), GFP_KERNEL)?; // INVARIANT: The `CStr` and `CString` types have the same invariants for // the string data, and we copied it over without changes. diff --git a/rust/kernel/sync.rs b/rust/kernel/sync.rs index 0ab20975a3b5d..697649ddef72e 100644 --- a/rust/kernel/sync.rs +++ b/rust/kernel/sync.rs @@ -27,28 +27,20 @@ pub struct LockClassKey(Opaque); unsafe impl Sync for LockClassKey {} impl LockClassKey { - /// Creates a new lock class key. - pub const fn new() -> Self { - Self(Opaque::uninit()) - } - pub(crate) fn as_ptr(&self) -> *mut bindings::lock_class_key { self.0.get() } } -impl Default for LockClassKey { - fn default() -> Self { - Self::new() - } -} - /// Defines a new static lock class and returns a pointer to it. #[doc(hidden)] #[macro_export] macro_rules! static_lock_class { () => {{ - static CLASS: $crate::sync::LockClassKey = $crate::sync::LockClassKey::new(); + static CLASS: $crate::sync::LockClassKey = + // SAFETY: lockdep expects uninitialized memory when it's handed a statically allocated + // lock_class_key + unsafe { ::core::mem::MaybeUninit::uninit().assume_init() }; &CLASS }}; } diff --git a/rust/kernel/sync/arc.rs b/rust/kernel/sync/arc.rs index 28743a7c74a84..fa4509406ee90 100644 --- a/rust/kernel/sync/arc.rs +++ b/rust/kernel/sync/arc.rs @@ -17,13 +17,12 @@ //! [`Arc`]: https://doc.rust-lang.org/std/sync/struct.Arc.html use crate::{ - alloc::{box_ext::BoxExt, AllocError, Flags}, + alloc::{AllocError, Flags, KBox}, bindings, init::{self, InPlaceInit, Init, PinInit}, try_init, types::{ForeignOwnable, Opaque}, }; -use alloc::boxed::Box; use core::{ alloc::Layout, fmt, @@ -201,11 +200,11 @@ impl Arc { data: contents, }; - let inner = as BoxExt<_>>::new(value, flags)?; + let inner = KBox::new(value, flags)?; // SAFETY: We just created `inner` with a reference count of 1, which is owned by the new // `Arc` object. - Ok(unsafe { Self::from_inner(Box::leak(inner).into()) }) + Ok(unsafe { Self::from_inner(KBox::leak(inner).into()) }) } } @@ -333,12 +332,12 @@ impl Arc { impl ForeignOwnable for Arc { type Borrowed<'a> = ArcBorrow<'a, T>; - fn into_foreign(self) -> *const core::ffi::c_void { + fn into_foreign(self) -> *const crate::ffi::c_void { ManuallyDrop::new(self).ptr.as_ptr() as _ } - unsafe fn borrow<'a>(ptr: *const core::ffi::c_void) -> ArcBorrow<'a, T> { - // SAFETY: By the safety requirement of this function, we know that `ptr` came from + unsafe fn borrow<'a>(ptr: *const crate::ffi::c_void) -> ArcBorrow<'a, T> { + // By the safety requirement of this function, we know that `ptr` came from // a previous call to `Arc::into_foreign`. let inner = NonNull::new(ptr as *mut ArcInner).unwrap(); @@ -347,7 +346,7 @@ impl ForeignOwnable for Arc { unsafe { ArcBorrow::new(inner) } } - unsafe fn from_foreign(ptr: *const core::ffi::c_void) -> Self { + unsafe fn from_foreign(ptr: *const crate::ffi::c_void) -> Self { // SAFETY: By the safety requirement of this function, we know that `ptr` came from // a previous call to `Arc::into_foreign`, which guarantees that `ptr` is valid and // holds a reference count increment that is transferrable to us. @@ -398,8 +397,8 @@ impl Drop for Arc { if is_zero { // The count reached zero, we must free the memory. // - // SAFETY: The pointer was initialised from the result of `Box::leak`. - unsafe { drop(Box::from_raw(self.ptr.as_ptr())) }; + // SAFETY: The pointer was initialised from the result of `KBox::leak`. + unsafe { drop(KBox::from_raw(self.ptr.as_ptr())) }; } } } @@ -641,7 +640,7 @@ impl UniqueArc { /// Tries to allocate a new [`UniqueArc`] instance whose contents are not initialised yet. pub fn new_uninit(flags: Flags) -> Result>, AllocError> { // INVARIANT: The refcount is initialised to a non-zero value. - let inner = Box::try_init::( + let inner = KBox::try_init::( try_init!(ArcInner { // SAFETY: There are no safety requirements for this FFI call. refcount: Opaque::new(unsafe { bindings::REFCOUNT_INIT(1) }), @@ -651,8 +650,8 @@ impl UniqueArc { )?; Ok(UniqueArc { // INVARIANT: The newly-created object has a refcount of 1. - // SAFETY: The pointer from the `Box` is valid. - inner: unsafe { Arc::from_inner(Box::leak(inner).into()) }, + // SAFETY: The pointer from the `KBox` is valid. + inner: unsafe { Arc::from_inner(KBox::leak(inner).into()) }, }) } } diff --git a/rust/kernel/sync/arc/std_vendor.rs b/rust/kernel/sync/arc/std_vendor.rs index a66a0c2831b3e..11b3f4ecca5f7 100644 --- a/rust/kernel/sync/arc/std_vendor.rs +++ b/rust/kernel/sync/arc/std_vendor.rs @@ -1,5 +1,7 @@ // SPDX-License-Identifier: Apache-2.0 OR MIT +//! Rust standard library vendored code. +//! //! The contents of this file come from the Rust standard library, hosted in //! the repository, licensed under //! "Apache-2.0 OR MIT" and adapted for kernel use. For copyright details, diff --git a/rust/kernel/sync/condvar.rs b/rust/kernel/sync/condvar.rs index 2b306afbe56d9..7df565038d7d0 100644 --- a/rust/kernel/sync/condvar.rs +++ b/rust/kernel/sync/condvar.rs @@ -7,6 +7,7 @@ use super::{lock::Backend, lock::Guard, LockClassKey}; use crate::{ + ffi::{c_int, c_long}, init::PinInit, pin_init, str::CStr, @@ -14,7 +15,6 @@ use crate::{ time::Jiffies, types::Opaque, }; -use core::ffi::{c_int, c_long}; use core::marker::PhantomPinned; use core::ptr; use macros::pin_data; @@ -70,8 +70,8 @@ pub use new_condvar; /// } /// /// /// Allocates a new boxed `Example`. -/// fn new_example() -> Result>> { -/// Box::pin_init(pin_init!(Example { +/// fn new_example() -> Result>> { +/// KBox::pin_init(pin_init!(Example { /// value <- new_mutex!(0), /// value_changed <- new_condvar!(), /// }), GFP_KERNEL) @@ -93,7 +93,6 @@ pub struct CondVar { } // SAFETY: `CondVar` only uses a `struct wait_queue_head`, which is safe to use on any thread. -#[allow(clippy::non_send_fields_in_send_ty)] unsafe impl Send for CondVar {} // SAFETY: `CondVar` only uses a `struct wait_queue_head`, which is safe to use on multiple threads diff --git a/rust/kernel/sync/lock.rs b/rust/kernel/sync/lock.rs index f6c34ca4d819f..528eb69072312 100644 --- a/rust/kernel/sync/lock.rs +++ b/rust/kernel/sync/lock.rs @@ -46,7 +46,7 @@ pub unsafe trait Backend { /// remain valid for read indefinitely. unsafe fn init( ptr: *mut Self::State, - name: *const core::ffi::c_char, + name: *const crate::ffi::c_char, key: *mut bindings::lock_class_key, ); @@ -150,9 +150,9 @@ impl Guard<'_, T, B> { // SAFETY: The caller owns the lock, so it is safe to unlock it. unsafe { B::unlock(self.lock.state.get(), &self.state) }; - // SAFETY: The lock was just unlocked above and is being relocked now. - let _relock = - ScopeGuard::new(|| unsafe { B::relock(self.lock.state.get(), &mut self.state) }); + let _relock = ScopeGuard::new(|| + // SAFETY: The lock was just unlocked above and is being relocked now. + unsafe { B::relock(self.lock.state.get(), &mut self.state) }); cb() } diff --git a/rust/kernel/sync/lock/mutex.rs b/rust/kernel/sync/lock/mutex.rs index 30632070ee670..59a872cbcac64 100644 --- a/rust/kernel/sync/lock/mutex.rs +++ b/rust/kernel/sync/lock/mutex.rs @@ -58,7 +58,7 @@ pub use new_mutex; /// } /// /// // Allocate a boxed `Example`. -/// let e = Box::pin_init(Example::new(), GFP_KERNEL)?; +/// let e = KBox::pin_init(Example::new(), GFP_KERNEL)?; /// assert_eq!(e.c, 10); /// assert_eq!(e.d.lock().a, 20); /// assert_eq!(e.d.lock().b, 30); @@ -96,7 +96,7 @@ unsafe impl super::Backend for MutexBackend { unsafe fn init( ptr: *mut Self::State, - name: *const core::ffi::c_char, + name: *const crate::ffi::c_char, key: *mut bindings::lock_class_key, ) { // SAFETY: The safety requirements ensure that `ptr` is valid for writes, and `name` and diff --git a/rust/kernel/sync/lock/spinlock.rs b/rust/kernel/sync/lock/spinlock.rs index ea5c5bc1ce12e..b77eed1789ad0 100644 --- a/rust/kernel/sync/lock/spinlock.rs +++ b/rust/kernel/sync/lock/spinlock.rs @@ -56,7 +56,7 @@ pub use new_spinlock; /// } /// /// // Allocate a boxed `Example`. -/// let e = Box::pin_init(Example::new(), GFP_KERNEL)?; +/// let e = KBox::pin_init(Example::new(), GFP_KERNEL)?; /// assert_eq!(e.c, 10); /// assert_eq!(e.d.lock().a, 20); /// assert_eq!(e.d.lock().b, 30); @@ -95,7 +95,7 @@ unsafe impl super::Backend for SpinLockBackend { unsafe fn init( ptr: *mut Self::State, - name: *const core::ffi::c_char, + name: *const crate::ffi::c_char, key: *mut bindings::lock_class_key, ) { // SAFETY: The safety requirements ensure that `ptr` is valid for writes, and `name` and diff --git a/rust/kernel/sync/locked_by.rs b/rust/kernel/sync/locked_by.rs index ce2ee8d878658..a7b244675c2b9 100644 --- a/rust/kernel/sync/locked_by.rs +++ b/rust/kernel/sync/locked_by.rs @@ -43,7 +43,7 @@ use core::{cell::UnsafeCell, mem::size_of, ptr}; /// struct InnerDirectory { /// /// The sum of the bytes used by all files. /// bytes_used: u64, -/// _files: Vec, +/// _files: KVec, /// } /// /// struct Directory { diff --git a/rust/kernel/task.rs b/rust/kernel/task.rs index 55dff7e088bf5..5bce090a38697 100644 --- a/rust/kernel/task.rs +++ b/rust/kernel/task.rs @@ -4,13 +4,9 @@ //! //! C header: [`include/linux/sched.h`](srctree/include/linux/sched.h). +use crate::ffi::{c_int, c_long, c_uint}; use crate::types::Opaque; -use core::{ - ffi::{c_int, c_long, c_uint}, - marker::PhantomData, - ops::Deref, - ptr, -}; +use core::{marker::PhantomData, ops::Deref, ptr}; /// A sentinel value used for infinite timeouts. pub const MAX_SCHEDULE_TIMEOUT: c_long = c_long::MAX; diff --git a/rust/kernel/time.rs b/rust/kernel/time.rs index e3bb5e89f88da..379c0f5772e57 100644 --- a/rust/kernel/time.rs +++ b/rust/kernel/time.rs @@ -12,10 +12,10 @@ pub const NSEC_PER_MSEC: i64 = bindings::NSEC_PER_MSEC as i64; /// The time unit of Linux kernel. One jiffy equals (1/HZ) second. -pub type Jiffies = core::ffi::c_ulong; +pub type Jiffies = crate::ffi::c_ulong; /// The millisecond time unit. -pub type Msecs = core::ffi::c_uint; +pub type Msecs = crate::ffi::c_uint; /// Converts milliseconds to jiffies. #[inline] diff --git a/rust/kernel/types.rs b/rust/kernel/types.rs index 9e7ca066355cd..7c8c531ef1909 100644 --- a/rust/kernel/types.rs +++ b/rust/kernel/types.rs @@ -3,13 +3,11 @@ //! Kernel types. use crate::init::{self, PinInit}; -use alloc::boxed::Box; use core::{ cell::UnsafeCell, marker::{PhantomData, PhantomPinned}, mem::{ManuallyDrop, MaybeUninit}, ops::{Deref, DerefMut}, - pin::Pin, ptr::NonNull, }; @@ -31,7 +29,7 @@ pub trait ForeignOwnable: Sized { /// For example, it might be invalid, dangling or pointing to uninitialized memory. Using it in /// any way except for [`ForeignOwnable::from_foreign`], [`ForeignOwnable::borrow`], /// [`ForeignOwnable::try_from_foreign`] can result in undefined behavior. - fn into_foreign(self) -> *const core::ffi::c_void; + fn into_foreign(self) -> *const crate::ffi::c_void; /// Borrows a foreign-owned object. /// @@ -39,7 +37,7 @@ pub trait ForeignOwnable: Sized { /// /// `ptr` must have been returned by a previous call to [`ForeignOwnable::into_foreign`] for /// which a previous matching [`ForeignOwnable::from_foreign`] hasn't been called yet. - unsafe fn borrow<'a>(ptr: *const core::ffi::c_void) -> Self::Borrowed<'a>; + unsafe fn borrow<'a>(ptr: *const crate::ffi::c_void) -> Self::Borrowed<'a>; /// Converts a foreign-owned object back to a Rust-owned one. /// @@ -49,7 +47,7 @@ pub trait ForeignOwnable: Sized { /// which a previous matching [`ForeignOwnable::from_foreign`] hasn't been called yet. /// Additionally, all instances (if any) of values returned by [`ForeignOwnable::borrow`] for /// this object must have been dropped. - unsafe fn from_foreign(ptr: *const core::ffi::c_void) -> Self; + unsafe fn from_foreign(ptr: *const crate::ffi::c_void) -> Self; /// Tries to convert a foreign-owned object back to a Rust-owned one. /// @@ -60,7 +58,7 @@ pub trait ForeignOwnable: Sized { /// /// `ptr` must either be null or satisfy the safety requirements for /// [`ForeignOwnable::from_foreign`]. - unsafe fn try_from_foreign(ptr: *const core::ffi::c_void) -> Option { + unsafe fn try_from_foreign(ptr: *const crate::ffi::c_void) -> Option { if ptr.is_null() { None } else { @@ -71,64 +69,16 @@ pub trait ForeignOwnable: Sized { } } -impl ForeignOwnable for Box { - type Borrowed<'a> = &'a T; - - fn into_foreign(self) -> *const core::ffi::c_void { - Box::into_raw(self) as _ - } - - unsafe fn borrow<'a>(ptr: *const core::ffi::c_void) -> &'a T { - // SAFETY: The safety requirements for this function ensure that the object is still alive, - // so it is safe to dereference the raw pointer. - // The safety requirements of `from_foreign` also ensure that the object remains alive for - // the lifetime of the returned value. - unsafe { &*ptr.cast() } - } - - unsafe fn from_foreign(ptr: *const core::ffi::c_void) -> Self { - // SAFETY: The safety requirements of this function ensure that `ptr` comes from a previous - // call to `Self::into_foreign`. - unsafe { Box::from_raw(ptr as _) } - } -} - -impl ForeignOwnable for Pin> { - type Borrowed<'a> = Pin<&'a T>; - - fn into_foreign(self) -> *const core::ffi::c_void { - // SAFETY: We are still treating the box as pinned. - Box::into_raw(unsafe { Pin::into_inner_unchecked(self) }) as _ - } - - unsafe fn borrow<'a>(ptr: *const core::ffi::c_void) -> Pin<&'a T> { - // SAFETY: The safety requirements for this function ensure that the object is still alive, - // so it is safe to dereference the raw pointer. - // The safety requirements of `from_foreign` also ensure that the object remains alive for - // the lifetime of the returned value. - let r = unsafe { &*ptr.cast() }; - - // SAFETY: This pointer originates from a `Pin>`. - unsafe { Pin::new_unchecked(r) } - } - - unsafe fn from_foreign(ptr: *const core::ffi::c_void) -> Self { - // SAFETY: The safety requirements of this function ensure that `ptr` comes from a previous - // call to `Self::into_foreign`. - unsafe { Pin::new_unchecked(Box::from_raw(ptr as _)) } - } -} - impl ForeignOwnable for () { type Borrowed<'a> = (); - fn into_foreign(self) -> *const core::ffi::c_void { + fn into_foreign(self) -> *const crate::ffi::c_void { core::ptr::NonNull::dangling().as_ptr() } - unsafe fn borrow<'a>(_: *const core::ffi::c_void) -> Self::Borrowed<'a> {} + unsafe fn borrow<'a>(_: *const crate::ffi::c_void) -> Self::Borrowed<'a> {} - unsafe fn from_foreign(_: *const core::ffi::c_void) -> Self {} + unsafe fn from_foreign(_: *const crate::ffi::c_void) -> Self {} } /// Runs a cleanup function/closure when dropped. @@ -185,7 +135,7 @@ impl ForeignOwnable for () { /// # use kernel::types::ScopeGuard; /// fn example3(arg: bool) -> Result { /// let mut vec = -/// ScopeGuard::new_with_data(Vec::new(), |v| pr_info!("vec had {} elements\n", v.len())); +/// ScopeGuard::new_with_data(KVec::new(), |v| pr_info!("vec had {} elements\n", v.len())); /// /// vec.push(10u8, GFP_KERNEL)?; /// if arg { @@ -225,7 +175,7 @@ impl ScopeGuard { impl ScopeGuard<(), fn(())> { /// Creates a new guarded object with the given cleanup function. pub fn new(cleanup: impl FnOnce()) -> ScopeGuard<(), impl FnOnce(())> { - ScopeGuard::new_with_data((), move |_| cleanup()) + ScopeGuard::new_with_data((), move |()| cleanup()) } } @@ -410,6 +360,7 @@ impl ARef { /// /// struct Empty {} /// + /// # // SAFETY: TODO. /// unsafe impl AlwaysRefCounted for Empty { /// fn inc_ref(&self) {} /// unsafe fn dec_ref(_obj: NonNull) {} @@ -417,6 +368,7 @@ impl ARef { /// /// let mut data = Empty {}; /// let ptr = NonNull::::new(&mut data as *mut _).unwrap(); + /// # // SAFETY: TODO. /// let data_ref: ARef = unsafe { ARef::from_raw(ptr) }; /// let raw_ptr: NonNull = ARef::into_raw(data_ref); /// @@ -481,21 +433,23 @@ pub enum Either { /// All bit-patterns must be valid for this type. This type must not have interior mutability. pub unsafe trait FromBytes {} -// SAFETY: All bit patterns are acceptable values of the types below. -unsafe impl FromBytes for u8 {} -unsafe impl FromBytes for u16 {} -unsafe impl FromBytes for u32 {} -unsafe impl FromBytes for u64 {} -unsafe impl FromBytes for usize {} -unsafe impl FromBytes for i8 {} -unsafe impl FromBytes for i16 {} -unsafe impl FromBytes for i32 {} -unsafe impl FromBytes for i64 {} -unsafe impl FromBytes for isize {} -// SAFETY: If all bit patterns are acceptable for individual values in an array, then all bit -// patterns are also acceptable for arrays of that type. -unsafe impl FromBytes for [T] {} -unsafe impl FromBytes for [T; N] {} +macro_rules! impl_frombytes { + ($($({$($generics:tt)*})? $t:ty, )*) => { + // SAFETY: Safety comments written in the macro invocation. + $(unsafe impl$($($generics)*)? FromBytes for $t {})* + }; +} + +impl_frombytes! { + // SAFETY: All bit patterns are acceptable values of the types below. + u8, u16, u32, u64, usize, + i8, i16, i32, i64, isize, + + // SAFETY: If all bit patterns are acceptable for individual values in an array, then all bit + // patterns are also acceptable for arrays of that type. + {} [T], + {} [T; N], +} /// Types that can be viewed as an immutable slice of initialized bytes. /// @@ -514,21 +468,23 @@ unsafe impl FromBytes for [T; N] {} /// mutability. pub unsafe trait AsBytes {} -// SAFETY: Instances of the following types have no uninitialized portions. -unsafe impl AsBytes for u8 {} -unsafe impl AsBytes for u16 {} -unsafe impl AsBytes for u32 {} -unsafe impl AsBytes for u64 {} -unsafe impl AsBytes for usize {} -unsafe impl AsBytes for i8 {} -unsafe impl AsBytes for i16 {} -unsafe impl AsBytes for i32 {} -unsafe impl AsBytes for i64 {} -unsafe impl AsBytes for isize {} -unsafe impl AsBytes for bool {} -unsafe impl AsBytes for char {} -unsafe impl AsBytes for str {} -// SAFETY: If individual values in an array have no uninitialized portions, then the array itself -// does not have any uninitialized portions either. -unsafe impl AsBytes for [T] {} -unsafe impl AsBytes for [T; N] {} +macro_rules! impl_asbytes { + ($($({$($generics:tt)*})? $t:ty, )*) => { + // SAFETY: Safety comments written in the macro invocation. + $(unsafe impl$($($generics)*)? AsBytes for $t {})* + }; +} + +impl_asbytes! { + // SAFETY: Instances of the following types have no uninitialized portions. + u8, u16, u32, u64, usize, + i8, i16, i32, i64, isize, + bool, + char, + str, + + // SAFETY: If individual values in an array have no uninitialized portions, then the array + // itself does not have any uninitialized portions either. + {} [T], + {} [T; N], +} diff --git a/rust/kernel/uaccess.rs b/rust/kernel/uaccess.rs index e9347cff99ab2..5a3c2d4df65f8 100644 --- a/rust/kernel/uaccess.rs +++ b/rust/kernel/uaccess.rs @@ -8,11 +8,10 @@ use crate::{ alloc::Flags, bindings, error::Result, + ffi::c_void, prelude::*, types::{AsBytes, FromBytes}, }; -use alloc::vec::Vec; -use core::ffi::{c_ulong, c_void}; use core::mem::{size_of, MaybeUninit}; /// The type used for userspace addresses. @@ -46,15 +45,14 @@ pub type UserPtr = usize; /// every byte in the region. /// /// ```no_run -/// use alloc::vec::Vec; -/// use core::ffi::c_void; +/// use kernel::ffi::c_void; /// use kernel::error::Result; /// use kernel::uaccess::{UserPtr, UserSlice}; /// /// fn bytes_add_one(uptr: UserPtr, len: usize) -> Result<()> { /// let (read, mut write) = UserSlice::new(uptr, len).reader_writer(); /// -/// let mut buf = Vec::new(); +/// let mut buf = KVec::new(); /// read.read_all(&mut buf, GFP_KERNEL)?; /// /// for b in &mut buf { @@ -69,8 +67,7 @@ pub type UserPtr = usize; /// Example illustrating a TOCTOU (time-of-check to time-of-use) bug. /// /// ```no_run -/// use alloc::vec::Vec; -/// use core::ffi::c_void; +/// use kernel::ffi::c_void; /// use kernel::error::{code::EINVAL, Result}; /// use kernel::uaccess::{UserPtr, UserSlice}; /// @@ -78,21 +75,21 @@ pub type UserPtr = usize; /// fn is_valid(uptr: UserPtr, len: usize) -> Result { /// let read = UserSlice::new(uptr, len).reader(); /// -/// let mut buf = Vec::new(); +/// let mut buf = KVec::new(); /// read.read_all(&mut buf, GFP_KERNEL)?; /// /// todo!() /// } /// /// /// Returns the bytes behind this user pointer if they are valid. -/// fn get_bytes_if_valid(uptr: UserPtr, len: usize) -> Result> { +/// fn get_bytes_if_valid(uptr: UserPtr, len: usize) -> Result> { /// if !is_valid(uptr, len)? { /// return Err(EINVAL); /// } /// /// let read = UserSlice::new(uptr, len).reader(); /// -/// let mut buf = Vec::new(); +/// let mut buf = KVec::new(); /// read.read_all(&mut buf, GFP_KERNEL)?; /// /// // THIS IS A BUG! The bytes could have changed since we checked them. @@ -130,7 +127,7 @@ impl UserSlice { /// Reads the entirety of the user slice, appending it to the end of the provided buffer. /// /// Fails with [`EFAULT`] if the read happens on a bad address. - pub fn read_all(self, buf: &mut Vec, flags: Flags) -> Result { + pub fn read_all(self, buf: &mut KVec, flags: Flags) -> Result { self.reader().read_all(buf, flags) } @@ -227,13 +224,9 @@ impl UserSliceReader { if len > self.length { return Err(EFAULT); } - let Ok(len_ulong) = c_ulong::try_from(len) else { - return Err(EFAULT); - }; - // SAFETY: `out_ptr` points into a mutable slice of length `len_ulong`, so we may write + // SAFETY: `out_ptr` points into a mutable slice of length `len`, so we may write // that many bytes to it. - let res = - unsafe { bindings::copy_from_user(out_ptr, self.ptr as *const c_void, len_ulong) }; + let res = unsafe { bindings::copy_from_user(out_ptr, self.ptr as *const c_void, len) }; if res != 0 { return Err(EFAULT); } @@ -262,9 +255,6 @@ impl UserSliceReader { if len > self.length { return Err(EFAULT); } - let Ok(len_ulong) = c_ulong::try_from(len) else { - return Err(EFAULT); - }; let mut out: MaybeUninit = MaybeUninit::uninit(); // SAFETY: The local variable `out` is valid for writing `size_of::()` bytes. // @@ -275,7 +265,7 @@ impl UserSliceReader { bindings::_copy_from_user( out.as_mut_ptr().cast::(), self.ptr as *const c_void, - len_ulong, + len, ) }; if res != 0 { @@ -291,9 +281,9 @@ impl UserSliceReader { /// Reads the entirety of the user slice, appending it to the end of the provided buffer. /// /// Fails with [`EFAULT`] if the read happens on a bad address. - pub fn read_all(mut self, buf: &mut Vec, flags: Flags) -> Result { + pub fn read_all(mut self, buf: &mut KVec, flags: Flags) -> Result { let len = self.length; - VecExt::::reserve(buf, len, flags)?; + buf.reserve(len, flags)?; // The call to `try_reserve` was successful, so the spare capacity is at least `len` bytes // long. @@ -338,12 +328,9 @@ impl UserSliceWriter { if len > self.length { return Err(EFAULT); } - let Ok(len_ulong) = c_ulong::try_from(len) else { - return Err(EFAULT); - }; - // SAFETY: `data_ptr` points into an immutable slice of length `len_ulong`, so we may read + // SAFETY: `data_ptr` points into an immutable slice of length `len`, so we may read // that many bytes from it. - let res = unsafe { bindings::copy_to_user(self.ptr as *mut c_void, data_ptr, len_ulong) }; + let res = unsafe { bindings::copy_to_user(self.ptr as *mut c_void, data_ptr, len) }; if res != 0 { return Err(EFAULT); } @@ -362,9 +349,6 @@ impl UserSliceWriter { if len > self.length { return Err(EFAULT); } - let Ok(len_ulong) = c_ulong::try_from(len) else { - return Err(EFAULT); - }; // SAFETY: The reference points to a value of type `T`, so it is valid for reading // `size_of::()` bytes. // @@ -375,7 +359,7 @@ impl UserSliceWriter { bindings::_copy_to_user( self.ptr as *mut c_void, (value as *const T).cast::(), - len_ulong, + len, ) }; if res != 0 { diff --git a/rust/kernel/workqueue.rs b/rust/kernel/workqueue.rs index 553a5cba2adcb..4d1d2062f6eba 100644 --- a/rust/kernel/workqueue.rs +++ b/rust/kernel/workqueue.rs @@ -216,7 +216,7 @@ impl Queue { func: Some(func), }); - self.enqueue(Box::pin_init(init, flags).map_err(|_| AllocError)?); + self.enqueue(KBox::pin_init(init, flags).map_err(|_| AllocError)?); Ok(()) } } @@ -239,9 +239,9 @@ impl ClosureWork { } impl WorkItem for ClosureWork { - type Pointer = Pin>; + type Pointer = Pin>; - fn run(mut this: Pin>) { + fn run(mut this: Pin>) { if let Some(func) = this.as_mut().project().take() { (func)() } @@ -297,7 +297,7 @@ pub unsafe trait RawWorkItem { /// Defines the method that should be called directly when a work item is executed. /// -/// This trait is implemented by `Pin>` and [`Arc`], and is mainly intended to be +/// This trait is implemented by `Pin>` and [`Arc`], and is mainly intended to be /// implemented for smart pointer types. For your own structs, you would implement [`WorkItem`] /// instead. The [`run`] method on this trait will usually just perform the appropriate /// `container_of` translation and then call into the [`run`][WorkItem::run] method from the @@ -329,7 +329,7 @@ pub unsafe trait WorkItemPointer: RawWorkItem { /// This trait is used when the `work_struct` field is defined using the [`Work`] helper. pub trait WorkItem { /// The pointer type that this struct is wrapped in. This will typically be `Arc` or - /// `Pin>`. + /// `Pin>`. type Pointer: WorkItemPointer; /// The method that should be called when this work item is executed. @@ -366,7 +366,6 @@ unsafe impl Sync for Work {} impl Work { /// Creates a new instance of [`Work`]. #[inline] - #[allow(clippy::new_ret_no_self)] pub fn new(name: &'static CStr, key: &'static LockClassKey) -> impl PinInit where T: WorkItem, @@ -520,13 +519,14 @@ impl_has_work! { impl{T} HasWork for ClosureWork { self.work } } +// SAFETY: TODO. unsafe impl WorkItemPointer for Arc where T: WorkItem, T: HasWork, { unsafe extern "C" fn run(ptr: *mut bindings::work_struct) { - // SAFETY: The `__enqueue` method always uses a `work_struct` stored in a `Work`. + // The `__enqueue` method always uses a `work_struct` stored in a `Work`. let ptr = ptr as *mut Work; // SAFETY: This computes the pointer that `__enqueue` got from `Arc::into_raw`. let ptr = unsafe { T::work_container_of(ptr) }; @@ -537,6 +537,7 @@ where } } +// SAFETY: TODO. unsafe impl RawWorkItem for Arc where T: WorkItem, @@ -565,18 +566,19 @@ where } } -unsafe impl WorkItemPointer for Pin> +// SAFETY: TODO. +unsafe impl WorkItemPointer for Pin> where T: WorkItem, T: HasWork, { unsafe extern "C" fn run(ptr: *mut bindings::work_struct) { - // SAFETY: The `__enqueue` method always uses a `work_struct` stored in a `Work`. + // The `__enqueue` method always uses a `work_struct` stored in a `Work`. let ptr = ptr as *mut Work; // SAFETY: This computes the pointer that `__enqueue` got from `Arc::into_raw`. let ptr = unsafe { T::work_container_of(ptr) }; // SAFETY: This pointer comes from `Arc::into_raw` and we've been given back ownership. - let boxed = unsafe { Box::from_raw(ptr) }; + let boxed = unsafe { KBox::from_raw(ptr) }; // SAFETY: The box was already pinned when it was enqueued. let pinned = unsafe { Pin::new_unchecked(boxed) }; @@ -584,7 +586,8 @@ where } } -unsafe impl RawWorkItem for Pin> +// SAFETY: TODO. +unsafe impl RawWorkItem for Pin> where T: WorkItem, T: HasWork, @@ -598,9 +601,9 @@ where // SAFETY: We're not going to move `self` or any of its fields, so its okay to temporarily // remove the `Pin` wrapper. let boxed = unsafe { Pin::into_inner_unchecked(self) }; - let ptr = Box::into_raw(boxed); + let ptr = KBox::into_raw(boxed); - // SAFETY: Pointers into a `Box` point at a valid value. + // SAFETY: Pointers into a `KBox` point at a valid value. let work_ptr = unsafe { T::raw_get_work(ptr) }; // SAFETY: `raw_get_work` returns a pointer to a valid value. let work_ptr = unsafe { Work::raw_get(work_ptr) }; diff --git a/rust/macros/lib.rs b/rust/macros/lib.rs index 90e2202ba4d5a..b16402a16acd4 100644 --- a/rust/macros/lib.rs +++ b/rust/macros/lib.rs @@ -132,7 +132,7 @@ pub fn module(ts: TokenStream) -> TokenStream { /// calls to this function at compile time: /// /// ```compile_fail -/// # use kernel::error::VTABLE_DEFAULT_ERROR; +/// # // Intentionally missing `use`s to simplify `rusttest`. /// kernel::build_error(VTABLE_DEFAULT_ERROR) /// ``` /// @@ -242,8 +242,8 @@ pub fn concat_idents(ts: TokenStream) -> TokenStream { /// #[pin_data] /// struct DriverData { /// #[pin] -/// queue: Mutex>, -/// buf: Box<[u8; 1024 * 1024]>, +/// queue: Mutex>, +/// buf: KBox<[u8; 1024 * 1024]>, /// } /// ``` /// @@ -251,8 +251,8 @@ pub fn concat_idents(ts: TokenStream) -> TokenStream { /// #[pin_data(PinnedDrop)] /// struct DriverData { /// #[pin] -/// queue: Mutex>, -/// buf: Box<[u8; 1024 * 1024]>, +/// queue: Mutex>, +/// buf: KBox<[u8; 1024 * 1024]>, /// raw_info: *mut Info, /// } /// @@ -281,8 +281,8 @@ pub fn pin_data(inner: TokenStream, item: TokenStream) -> TokenStream { /// #[pin_data(PinnedDrop)] /// struct DriverData { /// #[pin] -/// queue: Mutex>, -/// buf: Box<[u8; 1024 * 1024]>, +/// queue: Mutex>, +/// buf: KBox<[u8; 1024 * 1024]>, /// raw_info: *mut Info, /// } /// diff --git a/rust/macros/module.rs b/rust/macros/module.rs index aef3b132f32b3..e7a087b7e8849 100644 --- a/rust/macros/module.rs +++ b/rust/macros/module.rs @@ -253,7 +253,7 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream { #[doc(hidden)] #[no_mangle] #[link_section = \".init.text\"] - pub unsafe extern \"C\" fn init_module() -> core::ffi::c_int {{ + pub unsafe extern \"C\" fn init_module() -> kernel::ffi::c_int {{ // SAFETY: This function is inaccessible to the outside due to the double // module wrapping it. It is called exactly once by the C side via its // unique name. @@ -292,7 +292,7 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream { #[doc(hidden)] #[link_section = \"{initcall_section}\"] #[used] - pub static __{name}_initcall: extern \"C\" fn() -> core::ffi::c_int = __{name}_init; + pub static __{name}_initcall: extern \"C\" fn() -> kernel::ffi::c_int = __{name}_init; #[cfg(not(MODULE))] #[cfg(CONFIG_HAVE_ARCH_PREL32_RELOCATIONS)] @@ -307,7 +307,7 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream { #[cfg(not(MODULE))] #[doc(hidden)] #[no_mangle] - pub extern \"C\" fn __{name}_init() -> core::ffi::c_int {{ + pub extern \"C\" fn __{name}_init() -> kernel::ffi::c_int {{ // SAFETY: This function is inaccessible to the outside due to the double // module wrapping it. It is called exactly once by the C side via its // placement above in the initcall section. @@ -330,7 +330,7 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream { /// # Safety /// /// This function must only be called once. - unsafe fn __init() -> core::ffi::c_int {{ + unsafe fn __init() -> kernel::ffi::c_int {{ match <{type_} as kernel::Module>::init(&super::super::THIS_MODULE) {{ Ok(m) => {{ // SAFETY: No data race, since `__MOD` can only be accessed by this diff --git a/rust/uapi/lib.rs b/rust/uapi/lib.rs index 80a00260e3e7a..13495910271fa 100644 --- a/rust/uapi/lib.rs +++ b/rust/uapi/lib.rs @@ -14,6 +14,7 @@ #![cfg_attr(test, allow(unsafe_op_in_unsafe_fn))] #![allow( clippy::all, + clippy::undocumented_unsafe_blocks, dead_code, missing_docs, non_camel_case_types, @@ -24,4 +25,9 @@ unsafe_op_in_unsafe_fn )] +// Manual definition of blocklisted types. +type __kernel_size_t = usize; +type __kernel_ssize_t = isize; +type __kernel_ptrdiff_t = isize; + include!(concat!(env!("OBJTREE"), "/rust/uapi/uapi_generated.rs")); diff --git a/samples/rust/rust_minimal.rs b/samples/rust/rust_minimal.rs index 2a9eaab62d1ca..4aaf117bf8e3c 100644 --- a/samples/rust/rust_minimal.rs +++ b/samples/rust/rust_minimal.rs @@ -13,7 +13,7 @@ module! { } struct RustMinimal { - numbers: Vec, + numbers: KVec, } impl kernel::Module for RustMinimal { @@ -21,7 +21,7 @@ impl kernel::Module for RustMinimal { pr_info!("Rust minimal sample (init)\n"); pr_info!("Am I built-in? {}\n", !cfg!(MODULE)); - let mut numbers = Vec::new(); + let mut numbers = KVec::new(); numbers.push(72, GFP_KERNEL)?; numbers.push(108, GFP_KERNEL)?; numbers.push(200, GFP_KERNEL)?; diff --git a/samples/rust/rust_print.rs b/samples/rust/rust_print.rs index 6eabb0d79ea3a..ba1606bdbd754 100644 --- a/samples/rust/rust_print.rs +++ b/samples/rust/rust_print.rs @@ -15,6 +15,7 @@ module! { struct RustPrint; +#[expect(clippy::disallowed_macros)] fn arc_print() -> Result { use kernel::sync::*; diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 880785b52c04a..2bba59e790b8a 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -248,7 +248,7 @@ $(obj)/%.lst: $(obj)/%.c FORCE # Compile Rust sources (.rs) # --------------------------------------------------------------------------- -rust_allowed_features := arbitrary_self_types,new_uninit +rust_allowed_features := arbitrary_self_types,lint_reasons # `--out-dir` is required to avoid temporaries being created by `rustc` in the # current working directory, which may be not accessible in the out-of-tree @@ -258,7 +258,7 @@ rust_common_cmd = \ -Zallow-features=$(rust_allowed_features) \ -Zcrate-attr=no_std \ -Zcrate-attr='feature($(rust_allowed_features))' \ - -Zunstable-options --extern force:alloc --extern kernel \ + -Zunstable-options --extern kernel \ --crate-type rlib -L $(objtree)/rust/ \ --crate-name $(basename $(notdir $@)) \ --sysroot=/dev/null \ diff --git a/scripts/Makefile.compiler b/scripts/Makefile.compiler index e0842496d26ed..c6cd729b65cbf 100644 --- a/scripts/Makefile.compiler +++ b/scripts/Makefile.compiler @@ -75,8 +75,8 @@ ld-option = $(call try-run, $(LD) $(KBUILD_LDFLAGS) $(1) -v,$(1),$(2),$(3)) # Usage: MY_RUSTFLAGS += $(call __rustc-option,$(RUSTC),$(MY_RUSTFLAGS),-Cinstrument-coverage,-Zinstrument-coverage) # TODO: remove RUSTC_BOOTSTRAP=1 when we raise the minimum GNU Make version to 4.4 __rustc-option = $(call try-run,\ - echo '#![allow(missing_docs)]#![feature(no_core)]#![no_core]' | RUSTC_BOOTSTRAP=1\ - $(1) --sysroot=/dev/null $(filter-out --sysroot=/dev/null,$(2)) $(3)\ + echo '$(pound)![allow(missing_docs)]$(pound)![feature(no_core)]$(pound)![no_core]' | RUSTC_BOOTSTRAP=1\ + $(1) --sysroot=/dev/null $(filter-out --sysroot=/dev/null --target=%,$(2)) $(3)\ --crate-type=rlib --out-dir=$(TMPOUT) --emit=obj=- - >/dev/null,$(3),$(4)) # rustc-option diff --git a/scripts/generate_builtin_ranges.awk b/scripts/generate_builtin_ranges.awk index b9ec761b3befc..d4bd5c2b998ca 100755 --- a/scripts/generate_builtin_ranges.awk +++ b/scripts/generate_builtin_ranges.awk @@ -282,6 +282,11 @@ ARGIND == 2 && !anchor && NF == 2 && $1 ~ /^0x/ && $2 !~ /^0x/ { # section. # ARGIND == 2 && sect && NF == 4 && /^ [^ \*]/ && !($1 in sect_addend) { + # There are a few sections with constant data (without symbols) that + # can get resized during linking, so it is best to ignore them. + if ($1 ~ /^\.rodata\.(cst|str)[0-9]/) + next; + if (!($1 in sect_base)) { sect_base[$1] = base; diff --git a/scripts/generate_rust_analyzer.py b/scripts/generate_rust_analyzer.py index d2bc63cde8c6a..690f9830f0648 100755 --- a/scripts/generate_rust_analyzer.py +++ b/scripts/generate_rust_analyzer.py @@ -49,14 +49,26 @@ def append_crate(display_name, root_module, deps, cfg=[], is_workspace_member=Tr } }) - # First, the ones in `rust/` since they are a bit special. - append_crate( - "core", - sysroot_src / "core" / "src" / "lib.rs", - [], - cfg=crates_cfgs.get("core", []), - is_workspace_member=False, - ) + def append_sysroot_crate( + display_name, + deps, + cfg=[], + ): + append_crate( + display_name, + sysroot_src / display_name / "src" / "lib.rs", + deps, + cfg, + is_workspace_member=False, + ) + + # NB: sysroot crates reexport items from one another so setting up our transitive dependencies + # here is important for ensuring that rust-analyzer can resolve symbols. The sources of truth + # for this dependency graph are `(sysroot_src / crate / "Cargo.toml" for crate in crates)`. + append_sysroot_crate("core", [], cfg=crates_cfgs.get("core", [])) + append_sysroot_crate("alloc", ["core"]) + append_sysroot_crate("std", ["alloc", "core"]) + append_sysroot_crate("proc_macro", ["core", "std"]) append_crate( "compiler_builtins", @@ -64,17 +76,10 @@ def append_crate(display_name, root_module, deps, cfg=[], is_workspace_member=Tr [], ) - append_crate( - "alloc", - sysroot_src / "alloc" / "src" / "lib.rs", - ["core", "compiler_builtins"], - cfg=crates_cfgs.get("alloc", []), - ) - append_crate( "macros", srctree / "rust" / "macros" / "lib.rs", - [], + ["std", "proc_macro"], is_proc_macro=True, ) crates[-1]["proc_macro_dylib_path"] = f"{objtree}/rust/libmacros.so" @@ -86,26 +91,33 @@ def append_crate(display_name, root_module, deps, cfg=[], is_workspace_member=Tr ) append_crate( - "bindings", - srctree / "rust"/ "bindings" / "lib.rs", - ["core"], - cfg=cfg, + "ffi", + srctree / "rust" / "ffi.rs", + ["core", "compiler_builtins"], ) - crates[-1]["env"]["OBJTREE"] = str(objtree.resolve(True)) - append_crate( - "kernel", - srctree / "rust" / "kernel" / "lib.rs", - ["core", "alloc", "macros", "build_error", "bindings"], - cfg=cfg, - ) - crates[-1]["source"] = { - "include_dirs": [ - str(srctree / "rust" / "kernel"), - str(objtree / "rust") - ], - "exclude_dirs": [], - } + def append_crate_with_generated( + display_name, + deps, + ): + append_crate( + display_name, + srctree / "rust"/ display_name / "lib.rs", + deps, + cfg=cfg, + ) + crates[-1]["env"]["OBJTREE"] = str(objtree.resolve(True)) + crates[-1]["source"] = { + "include_dirs": [ + str(srctree / "rust" / display_name), + str(objtree / "rust") + ], + "exclude_dirs": [], + } + + append_crate_with_generated("bindings", ["core", "ffi"]) + append_crate_with_generated("uapi", ["core", "ffi"]) + append_crate_with_generated("kernel", ["core", "macros", "build_error", "ffi", "bindings", "uapi"]) def is_root_crate(build_file, target): try: @@ -133,7 +145,7 @@ def is_root_crate(build_file, target): append_crate( name, path, - ["core", "alloc", "kernel"], + ["core", "kernel"], cfg=cfg, ) diff --git a/scripts/package/debian/rules b/scripts/package/debian/rules index ca07243bd5cdf..2b3f9a0bd6c40 100755 --- a/scripts/package/debian/rules +++ b/scripts/package/debian/rules @@ -21,9 +21,11 @@ ifeq ($(origin KBUILD_VERBOSE),undefined) endif endif -revision = $(lastword $(subst -, ,$(shell dpkg-parsechangelog -S Version))) +revision = $(shell dpkg-parsechangelog -S Version | sed -n 's/.*-//p') CROSS_COMPILE ?= $(filter-out $(DEB_BUILD_GNU_TYPE)-, $(DEB_HOST_GNU_TYPE)-) -make-opts = ARCH=$(ARCH) KERNELRELEASE=$(KERNELRELEASE) KBUILD_BUILD_VERSION=$(revision) $(addprefix CROSS_COMPILE=,$(CROSS_COMPILE)) +make-opts = ARCH=$(ARCH) KERNELRELEASE=$(KERNELRELEASE) \ + $(addprefix KBUILD_BUILD_VERSION=,$(revision)) \ + $(addprefix CROSS_COMPILE=,$(CROSS_COMPILE)) binary-targets := $(addprefix binary-, image image-dbg headers libc-dev) diff --git a/scripts/rustdoc_test_gen.rs b/scripts/rustdoc_test_gen.rs index 5ebd42ae4a3fd..76aaa8329413d 100644 --- a/scripts/rustdoc_test_gen.rs +++ b/scripts/rustdoc_test_gen.rs @@ -15,8 +15,8 @@ //! - Test code should be able to define functions and call them, without having to carry //! the context. //! -//! - Later on, we may want to be able to test non-kernel code (e.g. `core`, `alloc` or -//! third-party crates) which likely use the standard library `assert*!` macros. +//! - Later on, we may want to be able to test non-kernel code (e.g. `core` or third-party +//! crates) which likely use the standard library `assert*!` macros. //! //! For this reason, instead of the passed context, `kunit_get_current_test()` is used instead //! (i.e. `current->kunit_test`). diff --git a/scripts/selinux/install_policy.sh b/scripts/selinux/install_policy.sh index 24086793b0d8d..db40237e60ce7 100755 --- a/scripts/selinux/install_policy.sh +++ b/scripts/selinux/install_policy.sh @@ -6,27 +6,24 @@ if [ `id -u` -ne 0 ]; then exit 1 fi -SF=`which setfiles` -if [ $? -eq 1 ]; then +SF=`which setfiles` || { echo "Could not find setfiles" echo "Do you have policycoreutils installed?" exit 1 -fi +} -CP=`which checkpolicy` -if [ $? -eq 1 ]; then +CP=`which checkpolicy` || { echo "Could not find checkpolicy" echo "Do you have checkpolicy installed?" exit 1 -fi +} VERS=`$CP -V | awk '{print $1}'` -ENABLED=`which selinuxenabled` -if [ $? -eq 1 ]; then +ENABLED=`which selinuxenabled` || { echo "Could not find selinuxenabled" echo "Do you have libselinux-utils installed?" exit 1 -fi +} if selinuxenabled; then echo "SELinux is already enabled" diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h index 3c323ca213d42..56bf2f55d9387 100644 --- a/security/integrity/ima/ima.h +++ b/security/integrity/ima/ima.h @@ -149,6 +149,9 @@ struct ima_kexec_hdr { #define IMA_CHECK_BLACKLIST 0x40000000 #define IMA_VERITY_REQUIRED 0x80000000 +/* Exclude non-action flags which are not rule-specific. */ +#define IMA_NONACTION_RULE_FLAGS (IMA_NONACTION_FLAGS & ~IMA_NEW_FILE) + #define IMA_DO_MASK (IMA_MEASURE | IMA_APPRAISE | IMA_AUDIT | \ IMA_HASH | IMA_APPRAISE_SUBMASK) #define IMA_DONE_MASK (IMA_MEASURED | IMA_APPRAISED | IMA_AUDITED | \ @@ -178,7 +181,8 @@ struct ima_kexec_hdr { #define IMA_UPDATE_XATTR 1 #define IMA_CHANGE_ATTR 2 #define IMA_DIGSIG 3 -#define IMA_MUST_MEASURE 4 +#define IMA_MAY_EMIT_TOMTOU 4 +#define IMA_EMITTED_OPENWRITERS 5 /* IMA integrity metadata associated with an inode */ struct ima_iint_cache { diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c index 06132cf47016d..a9aab10bebcaa 100644 --- a/security/integrity/ima/ima_main.c +++ b/security/integrity/ima/ima_main.c @@ -129,16 +129,22 @@ static void ima_rdwr_violation_check(struct file *file, if (atomic_read(&inode->i_readcount) && IS_IMA(inode)) { if (!iint) iint = ima_iint_find(inode); + /* IMA_MEASURE is set from reader side */ - if (iint && test_bit(IMA_MUST_MEASURE, - &iint->atomic_flags)) + if (iint && test_and_clear_bit(IMA_MAY_EMIT_TOMTOU, + &iint->atomic_flags)) send_tomtou = true; } } else { if (must_measure) - set_bit(IMA_MUST_MEASURE, &iint->atomic_flags); - if (inode_is_open_for_write(inode) && must_measure) - send_writers = true; + set_bit(IMA_MAY_EMIT_TOMTOU, &iint->atomic_flags); + + /* Limit number of open_writers violations */ + if (inode_is_open_for_write(inode) && must_measure) { + if (!test_and_set_bit(IMA_EMITTED_OPENWRITERS, + &iint->atomic_flags)) + send_writers = true; + } } if (!send_tomtou && !send_writers) @@ -167,6 +173,8 @@ static void ima_check_last_writer(struct ima_iint_cache *iint, if (atomic_read(&inode->i_writecount) == 1) { struct kstat stat; + clear_bit(IMA_EMITTED_OPENWRITERS, &iint->atomic_flags); + update = test_and_clear_bit(IMA_UPDATE_XATTR, &iint->atomic_flags); if ((iint->flags & IMA_NEW_FILE) || @@ -269,10 +277,13 @@ static int process_measurement(struct file *file, const struct cred *cred, mutex_lock(&iint->mutex); if (test_and_clear_bit(IMA_CHANGE_ATTR, &iint->atomic_flags)) - /* reset appraisal flags if ima_inode_post_setattr was called */ + /* + * Reset appraisal flags (action and non-action rule-specific) + * if ima_inode_post_setattr was called. + */ iint->flags &= ~(IMA_APPRAISE | IMA_APPRAISED | IMA_APPRAISE_SUBMASK | IMA_APPRAISED_SUBMASK | - IMA_NONACTION_FLAGS); + IMA_NONACTION_RULE_FLAGS); /* * Re-evaulate the file if either the xattr has changed or the diff --git a/security/keys/gc.c b/security/keys/gc.c index 7d687b0962b14..f27223ea4578f 100644 --- a/security/keys/gc.c +++ b/security/keys/gc.c @@ -218,8 +218,10 @@ static void key_garbage_collector(struct work_struct *work) key = rb_entry(cursor, struct key, serial_node); cursor = rb_next(cursor); - if (refcount_read(&key->usage) == 0) + if (test_bit(KEY_FLAG_FINAL_PUT, &key->flags)) { + smp_mb(); /* Clobber key->user after FINAL_PUT seen. */ goto found_unreferenced_key; + } if (unlikely(gc_state & KEY_GC_REAPING_DEAD_1)) { if (key->type == key_gc_dead_keytype) { diff --git a/security/keys/key.c b/security/keys/key.c index 3d7d185019d30..7198cd2ac3a3a 100644 --- a/security/keys/key.c +++ b/security/keys/key.c @@ -658,6 +658,8 @@ void key_put(struct key *key) key->user->qnbytes -= key->quotalen; spin_unlock_irqrestore(&key->user->lock, flags); } + smp_mb(); /* key->user before FINAL_PUT set. */ + set_bit(KEY_FLAG_FINAL_PUT, &key->flags); schedule_work(&key_gc_work); } } diff --git a/security/landlock/errata.h b/security/landlock/errata.h new file mode 100644 index 0000000000000..8e626accac101 --- /dev/null +++ b/security/landlock/errata.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Landlock - Errata information + * + * Copyright © 2025 Microsoft Corporation + */ + +#ifndef _SECURITY_LANDLOCK_ERRATA_H +#define _SECURITY_LANDLOCK_ERRATA_H + +#include + +struct landlock_erratum { + const int abi; + const u8 number; +}; + +/* clang-format off */ +#define LANDLOCK_ERRATUM(NUMBER) \ + { \ + .abi = LANDLOCK_ERRATA_ABI, \ + .number = NUMBER, \ + }, +/* clang-format on */ + +/* + * Some fixes may require user space to check if they are applied on the running + * kernel before using a specific feature. For instance, this applies when a + * restriction was previously too restrictive and is now getting relaxed (for + * compatibility or semantic reasons). However, non-visible changes for + * legitimate use (e.g. security fixes) do not require an erratum. + */ +static const struct landlock_erratum landlock_errata_init[] __initconst = { + +/* + * Only Sparse may not implement __has_include. If a compiler does not + * implement __has_include, a warning will be printed at boot time (see + * setup.c). + */ +#ifdef __has_include + +#define LANDLOCK_ERRATA_ABI 1 +#if __has_include("errata/abi-1.h") +#include "errata/abi-1.h" +#endif +#undef LANDLOCK_ERRATA_ABI + +#define LANDLOCK_ERRATA_ABI 2 +#if __has_include("errata/abi-2.h") +#include "errata/abi-2.h" +#endif +#undef LANDLOCK_ERRATA_ABI + +#define LANDLOCK_ERRATA_ABI 3 +#if __has_include("errata/abi-3.h") +#include "errata/abi-3.h" +#endif +#undef LANDLOCK_ERRATA_ABI + +#define LANDLOCK_ERRATA_ABI 4 +#if __has_include("errata/abi-4.h") +#include "errata/abi-4.h" +#endif +#undef LANDLOCK_ERRATA_ABI + +#define LANDLOCK_ERRATA_ABI 5 +#if __has_include("errata/abi-5.h") +#include "errata/abi-5.h" +#endif +#undef LANDLOCK_ERRATA_ABI + +#define LANDLOCK_ERRATA_ABI 6 +#if __has_include("errata/abi-6.h") +#include "errata/abi-6.h" +#endif +#undef LANDLOCK_ERRATA_ABI + +/* + * For each new erratum, we need to include all the ABI files up to the impacted + * ABI to make all potential future intermediate errata easy to backport. + * + * If such change involves more than one ABI addition, then it must be in a + * dedicated commit with the same Fixes tag as used for the actual fix. + * + * Each commit creating a new security/landlock/errata/abi-*.h file must have a + * Depends-on tag to reference the commit that previously added the line to + * include this new file, except if the original Fixes tag is enough. + * + * Each erratum must be documented in its related ABI file, and a dedicated + * commit must update Documentation/userspace-api/landlock.rst to include this + * erratum. This commit will not be backported. + */ + +#endif + + {} +}; + +#endif /* _SECURITY_LANDLOCK_ERRATA_H */ diff --git a/security/landlock/errata/abi-4.h b/security/landlock/errata/abi-4.h new file mode 100644 index 0000000000000..c052ee54f89f6 --- /dev/null +++ b/security/landlock/errata/abi-4.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +/** + * DOC: erratum_1 + * + * Erratum 1: TCP socket identification + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This fix addresses an issue where IPv4 and IPv6 stream sockets (e.g., SMC, + * MPTCP, or SCTP) were incorrectly restricted by TCP access rights during + * :manpage:`bind(2)` and :manpage:`connect(2)` operations. This change ensures + * that only TCP sockets are subject to TCP access rights, allowing other + * protocols to operate without unnecessary restrictions. + */ +LANDLOCK_ERRATUM(1) diff --git a/security/landlock/errata/abi-6.h b/security/landlock/errata/abi-6.h new file mode 100644 index 0000000000000..df7bc0e1fdf47 --- /dev/null +++ b/security/landlock/errata/abi-6.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +/** + * DOC: erratum_2 + * + * Erratum 2: Scoped signal handling + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This fix addresses an issue where signal scoping was overly restrictive, + * preventing sandboxed threads from signaling other threads within the same + * process if they belonged to different domains. Because threads are not + * security boundaries, user space might assume that any thread within the same + * process can send signals between themselves (see :manpage:`nptl(7)` and + * :manpage:`libpsx(3)`). Consistent with :manpage:`ptrace(2)` behavior, direct + * interaction between threads of the same process should always be allowed. + * This change ensures that any thread is allowed to send signals to any other + * thread within the same process, regardless of their domain. + */ +LANDLOCK_ERRATUM(2) diff --git a/security/landlock/fs.c b/security/landlock/fs.c index 7adb25150488f..511e6ae8b79c9 100644 --- a/security/landlock/fs.c +++ b/security/landlock/fs.c @@ -27,7 +27,9 @@ #include #include #include +#include #include +#include #include #include #include @@ -1623,21 +1625,46 @@ static int hook_file_ioctl_compat(struct file *file, unsigned int cmd, return -EACCES; } -static void hook_file_set_fowner(struct file *file) +/* + * Always allow sending signals between threads of the same process. This + * ensures consistency with hook_task_kill(). + */ +static bool control_current_fowner(struct fown_struct *const fown) { - struct landlock_ruleset *new_dom, *prev_dom; + struct task_struct *p; /* * Lock already held by __f_setown(), see commit 26f204380a3c ("fs: Fix * file_set_fowner LSM hook inconsistencies"). */ - lockdep_assert_held(&file_f_owner(file)->lock); - new_dom = landlock_get_current_domain(); - landlock_get_ruleset(new_dom); + lockdep_assert_held(&fown->lock); + + /* + * Some callers (e.g. fcntl_dirnotify) may not be in an RCU read-side + * critical section. + */ + guard(rcu)(); + p = pid_task(fown->pid, fown->pid_type); + if (!p) + return true; + + return !same_thread_group(p, current); +} + +static void hook_file_set_fowner(struct file *file) +{ + struct landlock_ruleset *prev_dom; + struct landlock_ruleset *new_dom = NULL; + + if (control_current_fowner(file_f_owner(file))) { + new_dom = landlock_get_current_domain(); + landlock_get_ruleset(new_dom); + } + prev_dom = landlock_file(file)->fown_domain; landlock_file(file)->fown_domain = new_dom; - /* Called in an RCU read-side critical section. */ + /* May be called in an RCU read-side critical section. */ landlock_put_ruleset_deferred(prev_dom); } diff --git a/security/landlock/net.c b/security/landlock/net.c index d5dcc4407a197..104b6c01fe503 100644 --- a/security/landlock/net.c +++ b/security/landlock/net.c @@ -63,8 +63,7 @@ static int current_check_access_socket(struct socket *const sock, if (WARN_ON_ONCE(dom->num_layers < 1)) return -EACCES; - /* Checks if it's a (potential) TCP socket. */ - if (sock->type != SOCK_STREAM) + if (!sk_is_tcp(sock->sk)) return 0; /* Checks for minimal header length to safely read sa_family. */ diff --git a/security/landlock/setup.c b/security/landlock/setup.c index 28519a45b11ff..0c85ea27e4099 100644 --- a/security/landlock/setup.c +++ b/security/landlock/setup.c @@ -6,12 +6,14 @@ * Copyright © 2018-2020 ANSSI */ +#include #include #include #include #include "common.h" #include "cred.h" +#include "errata.h" #include "fs.h" #include "net.h" #include "setup.h" @@ -19,6 +21,11 @@ bool landlock_initialized __ro_after_init = false; +const struct lsm_id landlock_lsmid = { + .name = LANDLOCK_NAME, + .id = LSM_ID_LANDLOCK, +}; + struct lsm_blob_sizes landlock_blob_sizes __ro_after_init = { .lbs_cred = sizeof(struct landlock_cred_security), .lbs_file = sizeof(struct landlock_file_security), @@ -26,13 +33,36 @@ struct lsm_blob_sizes landlock_blob_sizes __ro_after_init = { .lbs_superblock = sizeof(struct landlock_superblock_security), }; -const struct lsm_id landlock_lsmid = { - .name = LANDLOCK_NAME, - .id = LSM_ID_LANDLOCK, -}; +int landlock_errata __ro_after_init; + +static void __init compute_errata(void) +{ + size_t i; + +#ifndef __has_include + /* + * This is a safeguard to make sure the compiler implements + * __has_include (see errata.h). + */ + WARN_ON_ONCE(1); + return; +#endif + + for (i = 0; landlock_errata_init[i].number; i++) { + const int prev_errata = landlock_errata; + + if (WARN_ON_ONCE(landlock_errata_init[i].abi > + landlock_abi_version)) + continue; + + landlock_errata |= BIT(landlock_errata_init[i].number - 1); + WARN_ON_ONCE(prev_errata == landlock_errata); + } +} static int __init landlock_init(void) { + compute_errata(); landlock_add_cred_hooks(); landlock_add_task_hooks(); landlock_add_fs_hooks(); diff --git a/security/landlock/setup.h b/security/landlock/setup.h index c4252d46d49d4..fca307c35fee5 100644 --- a/security/landlock/setup.h +++ b/security/landlock/setup.h @@ -11,7 +11,10 @@ #include +extern const int landlock_abi_version; + extern bool landlock_initialized; +extern int landlock_errata; extern struct lsm_blob_sizes landlock_blob_sizes; extern const struct lsm_id landlock_lsmid; diff --git a/security/landlock/syscalls.c b/security/landlock/syscalls.c index c097d356fa453..4fa2d09f657ae 100644 --- a/security/landlock/syscalls.c +++ b/security/landlock/syscalls.c @@ -159,7 +159,9 @@ static const struct file_operations ruleset_fops = { * the new ruleset. * @size: Size of the pointed &struct landlock_ruleset_attr (needed for * backward and forward compatibility). - * @flags: Supported value: %LANDLOCK_CREATE_RULESET_VERSION. + * @flags: Supported value: + * - %LANDLOCK_CREATE_RULESET_VERSION + * - %LANDLOCK_CREATE_RULESET_ERRATA * * This system call enables to create a new Landlock ruleset, and returns the * related file descriptor on success. @@ -168,6 +170,10 @@ static const struct file_operations ruleset_fops = { * 0, then the returned value is the highest supported Landlock ABI version * (starting at 1). * + * If @flags is %LANDLOCK_CREATE_RULESET_ERRATA and @attr is NULL and @size is + * 0, then the returned value is a bitmask of fixed issues for the current + * Landlock ABI version. + * * Possible returned errors are: * * - %EOPNOTSUPP: Landlock is supported by the kernel but disabled at boot time; @@ -191,9 +197,15 @@ SYSCALL_DEFINE3(landlock_create_ruleset, return -EOPNOTSUPP; if (flags) { - if ((flags == LANDLOCK_CREATE_RULESET_VERSION) && !attr && - !size) - return LANDLOCK_ABI_VERSION; + if (attr || size) + return -EINVAL; + + if (flags == LANDLOCK_CREATE_RULESET_VERSION) + return landlock_abi_version; + + if (flags == LANDLOCK_CREATE_RULESET_ERRATA) + return landlock_errata; + return -EINVAL; } @@ -234,6 +246,8 @@ SYSCALL_DEFINE3(landlock_create_ruleset, return ruleset_fd; } +const int landlock_abi_version = LANDLOCK_ABI_VERSION; + /* * Returns an owned ruleset from a FD. It is thus needed to call * landlock_put_ruleset() on the return value. diff --git a/security/landlock/task.c b/security/landlock/task.c index dc7dab78392ed..4578ce6e319d8 100644 --- a/security/landlock/task.c +++ b/security/landlock/task.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -264,6 +265,17 @@ static int hook_task_kill(struct task_struct *const p, /* Dealing with USB IO. */ dom = landlock_cred(cred)->domain; } else { + /* + * Always allow sending signals between threads of the same process. + * This is required for process credential changes by the Native POSIX + * Threads Library and implemented by the set*id(2) wrappers and + * libcap(3) with tgkill(2). See nptl(7) and libpsx(3). + * + * This exception is similar to the __ptrace_may_access() one. + */ + if (same_thread_group(p, current)) + return 0; + dom = landlock_get_current_domain(); } dom = landlock_get_applicable_domain(dom, signal_scope); diff --git a/security/smack/smack.h b/security/smack/smack.h index dbf8d7226eb56..1c3656b5e3b91 100644 --- a/security/smack/smack.h +++ b/security/smack/smack.h @@ -152,6 +152,7 @@ struct smk_net4addr { struct smack_known *smk_label; /* label */ }; +#if IS_ENABLED(CONFIG_IPV6) /* * An entry in the table identifying IPv6 hosts. */ @@ -162,7 +163,9 @@ struct smk_net6addr { int smk_masks; /* mask size */ struct smack_known *smk_label; /* label */ }; +#endif /* CONFIG_IPV6 */ +#ifdef SMACK_IPV6_PORT_LABELING /* * An entry in the table identifying ports. */ @@ -175,6 +178,7 @@ struct smk_port_label { short smk_sock_type; /* Socket type */ short smk_can_reuse; }; +#endif /* SMACK_IPV6_PORT_LABELING */ struct smack_known_list_elem { struct list_head list; @@ -314,7 +318,9 @@ extern struct smack_known smack_known_web; extern struct mutex smack_known_lock; extern struct list_head smack_known_list; extern struct list_head smk_net4addr_list; +#if IS_ENABLED(CONFIG_IPV6) extern struct list_head smk_net6addr_list; +#endif /* CONFIG_IPV6 */ extern struct mutex smack_onlycap_lock; extern struct list_head smack_onlycap_list; diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index 370fd594da125..9e13fd3920630 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c @@ -2498,6 +2498,7 @@ static struct smack_known *smack_ipv4host_label(struct sockaddr_in *sip) return NULL; } +#if IS_ENABLED(CONFIG_IPV6) /* * smk_ipv6_localhost - Check for local ipv6 host address * @sip: the address @@ -2565,6 +2566,7 @@ static struct smack_known *smack_ipv6host_label(struct sockaddr_in6 *sip) return NULL; } +#endif /* CONFIG_IPV6 */ /** * smack_netlbl_add - Set the secattr on a socket @@ -2669,6 +2671,7 @@ static int smk_ipv4_check(struct sock *sk, struct sockaddr_in *sap) return rc; } +#if IS_ENABLED(CONFIG_IPV6) /** * smk_ipv6_check - check Smack access * @subject: subject Smack label @@ -2701,6 +2704,7 @@ static int smk_ipv6_check(struct smack_known *subject, rc = smk_bu_note("IPv6 check", subject, object, MAY_WRITE, rc); return rc; } +#endif /* CONFIG_IPV6 */ #ifdef SMACK_IPV6_PORT_LABELING /** @@ -3033,7 +3037,9 @@ static int smack_socket_connect(struct socket *sock, struct sockaddr *sap, return 0; if (addrlen < offsetofend(struct sockaddr, sa_family)) return 0; - if (IS_ENABLED(CONFIG_IPV6) && sap->sa_family == AF_INET6) { + +#if IS_ENABLED(CONFIG_IPV6) + if (sap->sa_family == AF_INET6) { struct sockaddr_in6 *sip = (struct sockaddr_in6 *)sap; struct smack_known *rsp = NULL; @@ -3053,6 +3059,8 @@ static int smack_socket_connect(struct socket *sock, struct sockaddr *sap, return rc; } +#endif /* CONFIG_IPV6 */ + if (sap->sa_family != AF_INET || addrlen < sizeof(struct sockaddr_in)) return 0; rc = smk_ipv4_check(sock->sk, (struct sockaddr_in *)sap); @@ -4349,29 +4357,6 @@ static int smack_socket_getpeersec_dgram(struct socket *sock, return 0; } -/** - * smack_sock_graft - Initialize a newly created socket with an existing sock - * @sk: child sock - * @parent: parent socket - * - * Set the smk_{in,out} state of an existing sock based on the process that - * is creating the new socket. - */ -static void smack_sock_graft(struct sock *sk, struct socket *parent) -{ - struct socket_smack *ssp; - struct smack_known *skp = smk_of_current(); - - if (sk == NULL || - (sk->sk_family != PF_INET && sk->sk_family != PF_INET6)) - return; - - ssp = smack_sock(sk); - ssp->smk_in = skp; - ssp->smk_out = skp; - /* cssp->smk_packet is already set in smack_inet_csk_clone() */ -} - /** * smack_inet_conn_request - Smack access check on connect * @sk: socket involved @@ -5160,7 +5145,6 @@ static struct security_hook_list smack_hooks[] __ro_after_init = { LSM_HOOK_INIT(sk_free_security, smack_sk_free_security), #endif LSM_HOOK_INIT(sk_clone_security, smack_sk_clone_security), - LSM_HOOK_INIT(sock_graft, smack_sock_graft), LSM_HOOK_INIT(inet_conn_request, smack_inet_conn_request), LSM_HOOK_INIT(inet_csk_clone, smack_inet_csk_clone), diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c index 77b6ac9b5c11b..b30faf731da72 100644 --- a/sound/core/seq/seq_clientmgr.c +++ b/sound/core/seq/seq_clientmgr.c @@ -106,7 +106,7 @@ static struct snd_seq_client *clientptr(int clientid) return clienttab[clientid]; } -struct snd_seq_client *snd_seq_client_use_ptr(int clientid) +static struct snd_seq_client *client_use_ptr(int clientid, bool load_module) { unsigned long flags; struct snd_seq_client *client; @@ -126,7 +126,7 @@ struct snd_seq_client *snd_seq_client_use_ptr(int clientid) } spin_unlock_irqrestore(&clients_lock, flags); #ifdef CONFIG_MODULES - if (!in_interrupt()) { + if (load_module) { static DECLARE_BITMAP(client_requested, SNDRV_SEQ_GLOBAL_CLIENTS); static DECLARE_BITMAP(card_requested, SNDRV_CARDS); @@ -168,6 +168,20 @@ struct snd_seq_client *snd_seq_client_use_ptr(int clientid) return client; } +/* get snd_seq_client object for the given id quickly */ +struct snd_seq_client *snd_seq_client_use_ptr(int clientid) +{ + return client_use_ptr(clientid, false); +} + +/* get snd_seq_client object for the given id; + * if not found, retry after loading the modules + */ +static struct snd_seq_client *client_load_and_use_ptr(int clientid) +{ + return client_use_ptr(clientid, IS_ENABLED(CONFIG_MODULES)); +} + /* Take refcount and perform ioctl_mutex lock on the given client; * used only for OSS sequencer * Unlock via snd_seq_client_ioctl_unlock() below @@ -176,7 +190,7 @@ bool snd_seq_client_ioctl_lock(int clientid) { struct snd_seq_client *client; - client = snd_seq_client_use_ptr(clientid); + client = client_load_and_use_ptr(clientid); if (!client) return false; mutex_lock(&client->ioctl_mutex); @@ -678,12 +692,18 @@ static int snd_seq_deliver_single_event(struct snd_seq_client *client, dest_port->time_real); #if IS_ENABLED(CONFIG_SND_SEQ_UMP) - if (!(dest->filter & SNDRV_SEQ_FILTER_NO_CONVERT)) { - if (snd_seq_ev_is_ump(event)) { + if (snd_seq_ev_is_ump(event)) { + if (!(dest->filter & SNDRV_SEQ_FILTER_NO_CONVERT)) { result = snd_seq_deliver_from_ump(client, dest, dest_port, event, atomic, hop); goto __skip; - } else if (snd_seq_client_is_ump(dest)) { + } else if (dest->type == USER_CLIENT && + !snd_seq_client_is_ump(dest)) { + result = 0; // drop the event + goto __skip; + } + } else if (snd_seq_client_is_ump(dest)) { + if (!(dest->filter & SNDRV_SEQ_FILTER_NO_CONVERT)) { result = snd_seq_deliver_to_ump(client, dest, dest_port, event, atomic, hop); goto __skip; @@ -1189,7 +1209,7 @@ static int snd_seq_ioctl_running_mode(struct snd_seq_client *client, void *arg) int err = 0; /* requested client number */ - cptr = snd_seq_client_use_ptr(info->client); + cptr = client_load_and_use_ptr(info->client); if (cptr == NULL) return -ENOENT; /* don't change !!! */ @@ -1251,7 +1271,7 @@ static int snd_seq_ioctl_get_client_info(struct snd_seq_client *client, struct snd_seq_client *cptr; /* requested client number */ - cptr = snd_seq_client_use_ptr(client_info->client); + cptr = client_load_and_use_ptr(client_info->client); if (cptr == NULL) return -ENOENT; /* don't change !!! */ @@ -1386,7 +1406,7 @@ static int snd_seq_ioctl_get_port_info(struct snd_seq_client *client, void *arg) struct snd_seq_client *cptr; struct snd_seq_client_port *port; - cptr = snd_seq_client_use_ptr(info->addr.client); + cptr = client_load_and_use_ptr(info->addr.client); if (cptr == NULL) return -ENXIO; @@ -1490,10 +1510,10 @@ static int snd_seq_ioctl_subscribe_port(struct snd_seq_client *client, struct snd_seq_client *receiver = NULL, *sender = NULL; struct snd_seq_client_port *sport = NULL, *dport = NULL; - receiver = snd_seq_client_use_ptr(subs->dest.client); + receiver = client_load_and_use_ptr(subs->dest.client); if (!receiver) goto __end; - sender = snd_seq_client_use_ptr(subs->sender.client); + sender = client_load_and_use_ptr(subs->sender.client); if (!sender) goto __end; sport = snd_seq_port_use_ptr(sender, subs->sender.port); @@ -1858,7 +1878,7 @@ static int snd_seq_ioctl_get_client_pool(struct snd_seq_client *client, struct snd_seq_client_pool *info = arg; struct snd_seq_client *cptr; - cptr = snd_seq_client_use_ptr(info->client); + cptr = client_load_and_use_ptr(info->client); if (cptr == NULL) return -ENOENT; memset(info, 0, sizeof(*info)); @@ -1962,7 +1982,7 @@ static int snd_seq_ioctl_get_subscription(struct snd_seq_client *client, struct snd_seq_client_port *sport = NULL; result = -EINVAL; - sender = snd_seq_client_use_ptr(subs->sender.client); + sender = client_load_and_use_ptr(subs->sender.client); if (!sender) goto __end; sport = snd_seq_port_use_ptr(sender, subs->sender.port); @@ -1993,7 +2013,7 @@ static int snd_seq_ioctl_query_subs(struct snd_seq_client *client, void *arg) struct list_head *p; int i; - cptr = snd_seq_client_use_ptr(subs->root.client); + cptr = client_load_and_use_ptr(subs->root.client); if (!cptr) goto __end; port = snd_seq_port_use_ptr(cptr, subs->root.port); @@ -2060,7 +2080,7 @@ static int snd_seq_ioctl_query_next_client(struct snd_seq_client *client, if (info->client < 0) info->client = 0; for (; info->client < SNDRV_SEQ_MAX_CLIENTS; info->client++) { - cptr = snd_seq_client_use_ptr(info->client); + cptr = client_load_and_use_ptr(info->client); if (cptr) break; /* found */ } @@ -2083,7 +2103,7 @@ static int snd_seq_ioctl_query_next_port(struct snd_seq_client *client, struct snd_seq_client *cptr; struct snd_seq_client_port *port = NULL; - cptr = snd_seq_client_use_ptr(info->addr.client); + cptr = client_load_and_use_ptr(info->addr.client); if (cptr == NULL) return -ENXIO; @@ -2180,7 +2200,7 @@ static int snd_seq_ioctl_client_ump_info(struct snd_seq_client *caller, size = sizeof(struct snd_ump_endpoint_info); else size = sizeof(struct snd_ump_block_info); - cptr = snd_seq_client_use_ptr(client); + cptr = client_load_and_use_ptr(client); if (!cptr) return -ENOENT; @@ -2452,7 +2472,7 @@ int snd_seq_kernel_client_enqueue(int client, struct snd_seq_event *ev, if (check_event_type_and_length(ev)) return -EINVAL; - cptr = snd_seq_client_use_ptr(client); + cptr = client_load_and_use_ptr(client); if (cptr == NULL) return -EINVAL; @@ -2684,7 +2704,7 @@ void snd_seq_info_clients_read(struct snd_info_entry *entry, /* list the client table */ for (c = 0; c < SNDRV_SEQ_MAX_CLIENTS; c++) { - client = snd_seq_client_use_ptr(c); + client = client_load_and_use_ptr(c); if (client == NULL) continue; if (client->type == NO_CLIENT) { diff --git a/sound/core/timer.c b/sound/core/timer.c index fbada79380f9e..d774b9b71ce23 100644 --- a/sound/core/timer.c +++ b/sound/core/timer.c @@ -1515,91 +1515,97 @@ static void snd_timer_user_copy_id(struct snd_timer_id *id, struct snd_timer *ti id->subdevice = timer->tmr_subdevice; } -static int snd_timer_user_next_device(struct snd_timer_id __user *_tid) +static void get_next_device(struct snd_timer_id *id) { - struct snd_timer_id id; struct snd_timer *timer; struct list_head *p; - if (copy_from_user(&id, _tid, sizeof(id))) - return -EFAULT; - guard(mutex)(®ister_mutex); - if (id.dev_class < 0) { /* first item */ + if (id->dev_class < 0) { /* first item */ if (list_empty(&snd_timer_list)) - snd_timer_user_zero_id(&id); + snd_timer_user_zero_id(id); else { timer = list_entry(snd_timer_list.next, struct snd_timer, device_list); - snd_timer_user_copy_id(&id, timer); + snd_timer_user_copy_id(id, timer); } } else { - switch (id.dev_class) { + switch (id->dev_class) { case SNDRV_TIMER_CLASS_GLOBAL: - id.device = id.device < 0 ? 0 : id.device + 1; + id->device = id->device < 0 ? 0 : id->device + 1; list_for_each(p, &snd_timer_list) { timer = list_entry(p, struct snd_timer, device_list); if (timer->tmr_class > SNDRV_TIMER_CLASS_GLOBAL) { - snd_timer_user_copy_id(&id, timer); + snd_timer_user_copy_id(id, timer); break; } - if (timer->tmr_device >= id.device) { - snd_timer_user_copy_id(&id, timer); + if (timer->tmr_device >= id->device) { + snd_timer_user_copy_id(id, timer); break; } } if (p == &snd_timer_list) - snd_timer_user_zero_id(&id); + snd_timer_user_zero_id(id); break; case SNDRV_TIMER_CLASS_CARD: case SNDRV_TIMER_CLASS_PCM: - if (id.card < 0) { - id.card = 0; + if (id->card < 0) { + id->card = 0; } else { - if (id.device < 0) { - id.device = 0; + if (id->device < 0) { + id->device = 0; } else { - if (id.subdevice < 0) - id.subdevice = 0; - else if (id.subdevice < INT_MAX) - id.subdevice++; + if (id->subdevice < 0) + id->subdevice = 0; + else if (id->subdevice < INT_MAX) + id->subdevice++; } } list_for_each(p, &snd_timer_list) { timer = list_entry(p, struct snd_timer, device_list); - if (timer->tmr_class > id.dev_class) { - snd_timer_user_copy_id(&id, timer); + if (timer->tmr_class > id->dev_class) { + snd_timer_user_copy_id(id, timer); break; } - if (timer->tmr_class < id.dev_class) + if (timer->tmr_class < id->dev_class) continue; - if (timer->card->number > id.card) { - snd_timer_user_copy_id(&id, timer); + if (timer->card->number > id->card) { + snd_timer_user_copy_id(id, timer); break; } - if (timer->card->number < id.card) + if (timer->card->number < id->card) continue; - if (timer->tmr_device > id.device) { - snd_timer_user_copy_id(&id, timer); + if (timer->tmr_device > id->device) { + snd_timer_user_copy_id(id, timer); break; } - if (timer->tmr_device < id.device) + if (timer->tmr_device < id->device) continue; - if (timer->tmr_subdevice > id.subdevice) { - snd_timer_user_copy_id(&id, timer); + if (timer->tmr_subdevice > id->subdevice) { + snd_timer_user_copy_id(id, timer); break; } - if (timer->tmr_subdevice < id.subdevice) + if (timer->tmr_subdevice < id->subdevice) continue; - snd_timer_user_copy_id(&id, timer); + snd_timer_user_copy_id(id, timer); break; } if (p == &snd_timer_list) - snd_timer_user_zero_id(&id); + snd_timer_user_zero_id(id); break; default: - snd_timer_user_zero_id(&id); + snd_timer_user_zero_id(id); } } +} + +static int snd_timer_user_next_device(struct snd_timer_id __user *_tid) +{ + struct snd_timer_id id; + + if (copy_from_user(&id, _tid, sizeof(id))) + return -EFAULT; + scoped_guard(mutex, ®ister_mutex) + get_next_device(&id); if (copy_to_user(_tid, &id, sizeof(*_tid))) return -EFAULT; return 0; @@ -1620,23 +1626,24 @@ static int snd_timer_user_ginfo(struct file *file, tid = ginfo->tid; memset(ginfo, 0, sizeof(*ginfo)); ginfo->tid = tid; - guard(mutex)(®ister_mutex); - t = snd_timer_find(&tid); - if (!t) - return -ENODEV; - ginfo->card = t->card ? t->card->number : -1; - if (t->hw.flags & SNDRV_TIMER_HW_SLAVE) - ginfo->flags |= SNDRV_TIMER_FLG_SLAVE; - strscpy(ginfo->id, t->id, sizeof(ginfo->id)); - strscpy(ginfo->name, t->name, sizeof(ginfo->name)); - scoped_guard(spinlock_irq, &t->lock) - ginfo->resolution = snd_timer_hw_resolution(t); - if (t->hw.resolution_min > 0) { - ginfo->resolution_min = t->hw.resolution_min; - ginfo->resolution_max = t->hw.resolution_max; - } - list_for_each(p, &t->open_list_head) { - ginfo->clients++; + scoped_guard(mutex, ®ister_mutex) { + t = snd_timer_find(&tid); + if (!t) + return -ENODEV; + ginfo->card = t->card ? t->card->number : -1; + if (t->hw.flags & SNDRV_TIMER_HW_SLAVE) + ginfo->flags |= SNDRV_TIMER_FLG_SLAVE; + strscpy(ginfo->id, t->id, sizeof(ginfo->id)); + strscpy(ginfo->name, t->name, sizeof(ginfo->name)); + scoped_guard(spinlock_irq, &t->lock) + ginfo->resolution = snd_timer_hw_resolution(t); + if (t->hw.resolution_min > 0) { + ginfo->resolution_min = t->hw.resolution_min; + ginfo->resolution_max = t->hw.resolution_max; + } + list_for_each(p, &t->open_list_head) { + ginfo->clients++; + } } if (copy_to_user(_ginfo, ginfo, sizeof(*ginfo))) return -EFAULT; @@ -1674,31 +1681,31 @@ static int snd_timer_user_gstatus(struct file *file, struct snd_timer_gstatus gstatus; struct snd_timer_id tid; struct snd_timer *t; - int err = 0; if (copy_from_user(&gstatus, _gstatus, sizeof(gstatus))) return -EFAULT; tid = gstatus.tid; memset(&gstatus, 0, sizeof(gstatus)); gstatus.tid = tid; - guard(mutex)(®ister_mutex); - t = snd_timer_find(&tid); - if (t != NULL) { - guard(spinlock_irq)(&t->lock); - gstatus.resolution = snd_timer_hw_resolution(t); - if (t->hw.precise_resolution) { - t->hw.precise_resolution(t, &gstatus.resolution_num, - &gstatus.resolution_den); + scoped_guard(mutex, ®ister_mutex) { + t = snd_timer_find(&tid); + if (t != NULL) { + guard(spinlock_irq)(&t->lock); + gstatus.resolution = snd_timer_hw_resolution(t); + if (t->hw.precise_resolution) { + t->hw.precise_resolution(t, &gstatus.resolution_num, + &gstatus.resolution_den); + } else { + gstatus.resolution_num = gstatus.resolution; + gstatus.resolution_den = 1000000000uL; + } } else { - gstatus.resolution_num = gstatus.resolution; - gstatus.resolution_den = 1000000000uL; + return -ENODEV; } - } else { - err = -ENODEV; } - if (err >= 0 && copy_to_user(_gstatus, &gstatus, sizeof(gstatus))) - err = -EFAULT; - return err; + if (copy_to_user(_gstatus, &gstatus, sizeof(gstatus))) + return -EFAULT; + return 0; } static int snd_timer_user_tselect(struct file *file, diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c index 9f849e05ce79f..34825b2f3b108 100644 --- a/sound/hda/intel-dsp-config.c +++ b/sound/hda/intel-dsp-config.c @@ -539,6 +539,11 @@ static const struct config_entry config_table[] = { .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE, .device = PCI_DEVICE_ID_INTEL_HDA_PTL, }, + { + .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE, + .device = PCI_DEVICE_ID_INTEL_HDA_PTL_H, + }, + #endif }; diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig index 68f1eee9e5c93..fd9391e61b3d9 100644 --- a/sound/pci/hda/Kconfig +++ b/sound/pci/hda/Kconfig @@ -96,9 +96,7 @@ config SND_HDA_CIRRUS_SCODEC config SND_HDA_CIRRUS_SCODEC_KUNIT_TEST tristate "KUnit test for Cirrus side-codec library" if !KUNIT_ALL_TESTS - select SND_HDA_CIRRUS_SCODEC - select GPIOLIB - depends on KUNIT + depends on SND_HDA_CIRRUS_SCODEC && GPIOLIB && KUNIT default KUNIT_ALL_TESTS help This builds KUnit tests for the cirrus side-codec library. @@ -208,6 +206,7 @@ comment "Set to Y if you want auto-loading the side codec driver" config SND_HDA_CODEC_REALTEK tristate "Build Realtek HD-audio codec support" + depends on INPUT select SND_HDA_GENERIC select SND_HDA_GENERIC_LEDS select SND_HDA_SCODEC_COMPONENT diff --git a/sound/pci/hda/cs35l56_hda_spi.c b/sound/pci/hda/cs35l56_hda_spi.c index 7f02155fe61e3..7c94110b6272a 100644 --- a/sound/pci/hda/cs35l56_hda_spi.c +++ b/sound/pci/hda/cs35l56_hda_spi.c @@ -22,6 +22,9 @@ static int cs35l56_hda_spi_probe(struct spi_device *spi) return -ENOMEM; cs35l56->base.dev = &spi->dev; + ret = cs35l56_init_config_for_spi(&cs35l56->base, spi); + if (ret) + return ret; #ifdef CS35L56_WAKE_HOLD_TIME_US cs35l56->base.can_hibernate = true; diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 14763c0f31ad9..46a2204049993 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -2470,7 +2470,9 @@ int snd_hda_create_dig_out_ctls(struct hda_codec *codec, break; id = kctl->id; id.index = spdif_index; - snd_ctl_rename_id(codec->card, &kctl->id, &id); + err = snd_ctl_rename_id(codec->card, &kctl->id, &id); + if (err < 0) + return err; } bus->primary_dig_out_type = HDA_PCM_TYPE_HDMI; } diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index b4540c5cd2a6f..25b1984898ab2 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -37,6 +37,7 @@ #include #include #include +#include #ifdef CONFIG_X86 /* for snoop control */ @@ -1360,8 +1361,21 @@ static void azx_free(struct azx *chip) if (use_vga_switcheroo(hda)) { if (chip->disabled && hda->probe_continued) snd_hda_unlock_devices(&chip->bus); - if (hda->vga_switcheroo_registered) + if (hda->vga_switcheroo_registered) { vga_switcheroo_unregister_client(chip->pci); + + /* Some GPUs don't have sound, and azx_first_init fails, + * leaving the device probed but non-functional. As long + * as it's probed, the PCI subsystem keeps its runtime + * PM status as active. Force it to suspended (as we + * actually stop the chip) to allow GPU to suspend via + * vga_switcheroo, and print a warning. + */ + dev_warn(&pci->dev, "GPU sound probed, but not operational: please add a quirk to driver_denylist\n"); + pm_runtime_disable(&pci->dev); + pm_runtime_set_suspended(&pci->dev); + pm_runtime_enable(&pci->dev); + } } if (bus->chip_init) { @@ -2071,6 +2085,27 @@ static const struct pci_device_id driver_denylist[] = { {} }; +static struct pci_device_id driver_denylist_ideapad_z570[] = { + { PCI_DEVICE_SUB(0x10de, 0x0bea, 0x0000, 0x0000) }, /* NVIDIA GF108 HDA */ + {} +}; + +/* DMI-based denylist, to be used when: + * - PCI subsystem IDs are zero, impossible to distinguish from valid sound cards. + * - Different modifications of the same laptop use different GPU models. + */ +static const struct dmi_system_id driver_denylist_dmi[] = { + { + /* No HDA in NVIDIA DGPU. BIOS disables it, but quirk_nvidia_hda() reenables. */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Ideapad Z570"), + }, + .driver_data = &driver_denylist_ideapad_z570, + }, + {} +}; + static const struct hda_controller_ops pci_hda_ops = { .disable_msi_reset_irq = disable_msi_reset_irq, .position_check = azx_position_check, @@ -2081,6 +2116,7 @@ static DECLARE_BITMAP(probed_devs, SNDRV_CARDS); static int azx_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { + const struct dmi_system_id *dmi; struct snd_card *card; struct hda_intel *hda; struct azx *chip; @@ -2093,6 +2129,12 @@ static int azx_probe(struct pci_dev *pci, return -ENODEV; } + dmi = dmi_first_match(driver_denylist_dmi); + if (dmi && pci_match_id(dmi->driver_data, pci)) { + dev_info(&pci->dev, "Skipping the device on the DMI denylist\n"); + return -ENODEV; + } + dev = find_first_zero_bit(probed_devs, SNDRV_CARDS); if (dev >= SNDRV_CARDS) return -ENODEV; @@ -2242,6 +2284,8 @@ static const struct snd_pci_quirk power_save_denylist[] = { SND_PCI_QUIRK(0x1631, 0xe017, "Packard Bell NEC IMEDIA 5204", 0), /* KONTRON SinglePC may cause a stall at runtime resume */ SND_PCI_QUIRK(0x1734, 0x1232, "KONTRON SinglePC", 0), + /* Dell ALC3271 */ + SND_PCI_QUIRK(0x1028, 0x0962, "Dell ALC3271", 0), {} }; @@ -2506,6 +2550,8 @@ static const struct pci_device_id azx_ids[] = { { PCI_DEVICE_DATA(INTEL, HDA_ARL, AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE) }, /* Panther Lake */ { PCI_DEVICE_DATA(INTEL, HDA_PTL, AZX_DRIVER_SKL | AZX_DCAPS_INTEL_LNL) }, + /* Panther Lake-H */ + { PCI_DEVICE_DATA(INTEL, HDA_PTL_H, AZX_DRIVER_SKL | AZX_DCAPS_INTEL_LNL) }, /* Apollolake (Broxton-P) */ { PCI_DEVICE_DATA(INTEL, HDA_APL, AZX_DRIVER_SKL | AZX_DCAPS_INTEL_BROXTON) }, /* Gemini-Lake */ diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 538c37a78a56f..84ab357b840d6 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -1080,6 +1080,7 @@ static const struct hda_quirk cxt5066_fixups[] = { SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO), SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE), SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO), + SND_PCI_QUIRK(0x103c, 0x8231, "HP ProBook 450 G4", CXT_FIXUP_MUTE_LED_GPIO), SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK), SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE), diff --git a/sound/pci/hda/patch_cs8409-tables.c b/sound/pci/hda/patch_cs8409-tables.c index 759f48038273d..621f947e38174 100644 --- a/sound/pci/hda/patch_cs8409-tables.c +++ b/sound/pci/hda/patch_cs8409-tables.c @@ -121,7 +121,7 @@ static const struct cs8409_i2c_param cs42l42_init_reg_seq[] = { { CS42L42_MIXER_CHA_VOL, 0x3F }, { CS42L42_MIXER_CHB_VOL, 0x3F }, { CS42L42_MIXER_ADC_VOL, 0x3f }, - { CS42L42_HP_CTL, 0x03 }, + { CS42L42_HP_CTL, 0x0D }, { CS42L42_MIC_DET_CTL1, 0xB6 }, { CS42L42_TIPSENSE_CTL, 0xC2 }, { CS42L42_HS_CLAMP_DISABLE, 0x01 }, @@ -315,7 +315,7 @@ static const struct cs8409_i2c_param dolphin_c0_init_reg_seq[] = { { CS42L42_ASP_TX_SZ_EN, 0x01 }, { CS42L42_PWR_CTL1, 0x0A }, { CS42L42_PWR_CTL2, 0x84 }, - { CS42L42_HP_CTL, 0x03 }, + { CS42L42_HP_CTL, 0x0D }, { CS42L42_MIXER_CHA_VOL, 0x3F }, { CS42L42_MIXER_CHB_VOL, 0x3F }, { CS42L42_MIXER_ADC_VOL, 0x3f }, @@ -371,7 +371,7 @@ static const struct cs8409_i2c_param dolphin_c1_init_reg_seq[] = { { CS42L42_ASP_TX_SZ_EN, 0x00 }, { CS42L42_PWR_CTL1, 0x0E }, { CS42L42_PWR_CTL2, 0x84 }, - { CS42L42_HP_CTL, 0x01 }, + { CS42L42_HP_CTL, 0x0D }, { CS42L42_MIXER_CHA_VOL, 0x3F }, { CS42L42_MIXER_CHB_VOL, 0x3F }, { CS42L42_MIXER_ADC_VOL, 0x3f }, diff --git a/sound/pci/hda/patch_cs8409.c b/sound/pci/hda/patch_cs8409.c index 614327218634c..b760332a4e357 100644 --- a/sound/pci/hda/patch_cs8409.c +++ b/sound/pci/hda/patch_cs8409.c @@ -876,7 +876,7 @@ static void cs42l42_resume(struct sub_codec *cs42l42) { CS42L42_DET_INT_STATUS2, 0x00 }, { CS42L42_TSRS_PLUG_STATUS, 0x00 }, }; - int fsv_old, fsv_new; + unsigned int fsv; /* Bring CS42L42 out of Reset */ spec->gpio_data = snd_hda_codec_read(codec, CS8409_PIN_AFG, 0, AC_VERB_GET_GPIO_DATA, 0); @@ -893,13 +893,15 @@ static void cs42l42_resume(struct sub_codec *cs42l42) /* Clear interrupts, by reading interrupt status registers */ cs8409_i2c_bulk_read(cs42l42, irq_regs, ARRAY_SIZE(irq_regs)); - fsv_old = cs8409_i2c_read(cs42l42, CS42L42_HP_CTL); - if (cs42l42->full_scale_vol == CS42L42_FULL_SCALE_VOL_0DB) - fsv_new = fsv_old & ~CS42L42_FULL_SCALE_VOL_MASK; - else - fsv_new = fsv_old & CS42L42_FULL_SCALE_VOL_MASK; - if (fsv_new != fsv_old) - cs8409_i2c_write(cs42l42, CS42L42_HP_CTL, fsv_new); + fsv = cs8409_i2c_read(cs42l42, CS42L42_HP_CTL); + if (cs42l42->full_scale_vol) { + // Set the full scale volume bit + fsv |= CS42L42_FULL_SCALE_VOL_MASK; + cs8409_i2c_write(cs42l42, CS42L42_HP_CTL, fsv); + } + // Unmute analog channels A and B + fsv = (fsv & ~CS42L42_ANA_MUTE_AB); + cs8409_i2c_write(cs42l42, CS42L42_HP_CTL, fsv); /* we have to explicitly allow unsol event handling even during the * resume phase so that the jack event is processed properly @@ -920,7 +922,7 @@ static void cs42l42_suspend(struct sub_codec *cs42l42) { CS42L42_MIXER_CHA_VOL, 0x3F }, { CS42L42_MIXER_ADC_VOL, 0x3F }, { CS42L42_MIXER_CHB_VOL, 0x3F }, - { CS42L42_HP_CTL, 0x0F }, + { CS42L42_HP_CTL, 0x0D }, { CS42L42_ASP_RX_DAI0_EN, 0x00 }, { CS42L42_ASP_CLK_CFG, 0x00 }, { CS42L42_PWR_CTL1, 0xFE }, diff --git a/sound/pci/hda/patch_cs8409.h b/sound/pci/hda/patch_cs8409.h index 5e48115caf096..14645d25e70fd 100644 --- a/sound/pci/hda/patch_cs8409.h +++ b/sound/pci/hda/patch_cs8409.h @@ -230,9 +230,10 @@ enum cs8409_coefficient_index_registers { #define CS42L42_PDN_TIMEOUT_US (250000) #define CS42L42_PDN_SLEEP_US (2000) #define CS42L42_INIT_TIMEOUT_MS (45) +#define CS42L42_ANA_MUTE_AB (0x0C) #define CS42L42_FULL_SCALE_VOL_MASK (2) -#define CS42L42_FULL_SCALE_VOL_0DB (1) -#define CS42L42_FULL_SCALE_VOL_MINUS6DB (0) +#define CS42L42_FULL_SCALE_VOL_0DB (0) +#define CS42L42_FULL_SCALE_VOL_MINUS6DB (1) /* Dell BULLSEYE / WARLOCK / CYBORG Specific Definitions */ diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index f3f849b96402d..4171aa22747c3 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -586,6 +586,9 @@ static void alc_shutup_pins(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; + if (spec->no_shutup_pins) + return; + switch (codec->core.vendor_id) { case 0x10ec0236: case 0x10ec0256: @@ -601,8 +604,7 @@ static void alc_shutup_pins(struct hda_codec *codec) alc_headset_mic_no_shutup(codec); break; default: - if (!spec->no_shutup_pins) - snd_hda_shutup_pins(codec); + snd_hda_shutup_pins(codec); break; } } @@ -3790,6 +3792,7 @@ static void alc225_init(struct hda_codec *codec) AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE); msleep(75); + alc_update_coef_idx(codec, 0x4a, 3 << 10, 0); alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x4); /* Hight power */ } } @@ -3844,6 +3847,79 @@ static void alc225_shutup(struct hda_codec *codec) } } +static void alc222_init(struct hda_codec *codec) +{ + struct alc_spec *spec = codec->spec; + hda_nid_t hp_pin = alc_get_hp_pin(spec); + bool hp1_pin_sense, hp2_pin_sense; + + if (!hp_pin) + return; + + msleep(30); + + hp1_pin_sense = snd_hda_jack_detect(codec, hp_pin); + hp2_pin_sense = snd_hda_jack_detect(codec, 0x14); + + if (hp1_pin_sense || hp2_pin_sense) { + msleep(2); + + if (hp1_pin_sense) + snd_hda_codec_write(codec, hp_pin, 0, + AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT); + if (hp2_pin_sense) + snd_hda_codec_write(codec, 0x14, 0, + AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT); + msleep(75); + + if (hp1_pin_sense) + snd_hda_codec_write(codec, hp_pin, 0, + AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE); + if (hp2_pin_sense) + snd_hda_codec_write(codec, 0x14, 0, + AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE); + + msleep(75); + } +} + +static void alc222_shutup(struct hda_codec *codec) +{ + struct alc_spec *spec = codec->spec; + hda_nid_t hp_pin = alc_get_hp_pin(spec); + bool hp1_pin_sense, hp2_pin_sense; + + if (!hp_pin) + hp_pin = 0x21; + + hp1_pin_sense = snd_hda_jack_detect(codec, hp_pin); + hp2_pin_sense = snd_hda_jack_detect(codec, 0x14); + + if (hp1_pin_sense || hp2_pin_sense) { + msleep(2); + + if (hp1_pin_sense) + snd_hda_codec_write(codec, hp_pin, 0, + AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); + if (hp2_pin_sense) + snd_hda_codec_write(codec, 0x14, 0, + AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); + + msleep(75); + + if (hp1_pin_sense) + snd_hda_codec_write(codec, hp_pin, 0, + AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0); + if (hp2_pin_sense) + snd_hda_codec_write(codec, 0x14, 0, + AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0); + + msleep(75); + } + alc_auto_setup_eapd(codec, false); + alc_shutup_pins(codec); +} + static void alc_default_init(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; @@ -4668,6 +4744,22 @@ static void alc245_fixup_hp_mute_led_coefbit(struct hda_codec *codec, } } +static void alc245_fixup_hp_mute_led_v1_coefbit(struct hda_codec *codec, + const struct hda_fixup *fix, + int action) +{ + struct alc_spec *spec = codec->spec; + + if (action == HDA_FIXUP_ACT_PRE_PROBE) { + spec->mute_led_polarity = 0; + spec->mute_led_coef.idx = 0x0b; + spec->mute_led_coef.mask = 1 << 3; + spec->mute_led_coef.on = 1 << 3; + spec->mute_led_coef.off = 0; + snd_hda_gen_add_mute_led_cdev(codec, coef_mute_led_set); + } +} + /* turn on/off mic-mute LED per capture hook by coef bit */ static int coef_micmute_led_set(struct led_classdev *led_cdev, enum led_brightness brightness) @@ -4718,6 +4810,21 @@ static void alc236_fixup_hp_coef_micmute_led(struct hda_codec *codec, } } +static void alc295_fixup_hp_mute_led_coefbit11(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + struct alc_spec *spec = codec->spec; + + if (action == HDA_FIXUP_ACT_PRE_PROBE) { + spec->mute_led_polarity = 0; + spec->mute_led_coef.idx = 0xb; + spec->mute_led_coef.mask = 3 << 3; + spec->mute_led_coef.on = 1 << 3; + spec->mute_led_coef.off = 1 << 4; + snd_hda_gen_add_mute_led_cdev(codec, coef_mute_led_set); + } +} + static void alc285_fixup_hp_mute_led(struct hda_codec *codec, const struct hda_fixup *fix, int action) { @@ -4928,7 +5035,6 @@ static void alc298_fixup_samsung_amp_v2_4_amps(struct hda_codec *codec, alc298_samsung_v2_init_amps(codec, 4); } -#if IS_REACHABLE(CONFIG_INPUT) static void gpio2_mic_hotkey_event(struct hda_codec *codec, struct hda_jack_callback *event) { @@ -5037,10 +5143,6 @@ static void alc233_fixup_lenovo_line2_mic_hotkey(struct hda_codec *codec, spec->kb_dev = NULL; } } -#else /* INPUT */ -#define alc280_fixup_hp_gpio2_mic_hotkey NULL -#define alc233_fixup_lenovo_line2_mic_hotkey NULL -#endif /* INPUT */ static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec, const struct hda_fixup *fix, int action) @@ -5054,6 +5156,16 @@ static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec, } } +static void alc233_fixup_lenovo_low_en_micmute_led(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + struct alc_spec *spec = codec->spec; + + if (action == HDA_FIXUP_ACT_PRE_PROBE) + spec->micmute_led_polarity = 1; + alc233_fixup_lenovo_line2_mic_hotkey(codec, fix, action); +} + static void alc_hp_mute_disable(struct hda_codec *codec, unsigned int delay) { if (delay <= 0) @@ -6491,6 +6603,16 @@ static void alc285_fixup_speaker2_to_dac1(struct hda_codec *codec, } } +/* disable DAC3 (0x06) selection on NID 0x15 - share Speaker/Bass Speaker DAC 0x03 */ +static void alc294_fixup_bass_speaker_15(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + if (action == HDA_FIXUP_ACT_PRE_PROBE) { + static const hda_nid_t conn[] = { 0x02, 0x03 }; + snd_hda_override_conn_list(codec, 0x15, ARRAY_SIZE(conn), conn); + } +} + /* Hook to update amp GPIO4 for automute */ static void alc280_hp_gpio4_automute_hook(struct hda_codec *codec, struct hda_jack_callback *jack) @@ -7475,6 +7597,16 @@ static void alc287_fixup_lenovo_thinkpad_with_alc1318(struct hda_codec *codec, spec->gen.pcm_playback_hook = alc287_alc1318_playback_pcm_hook; } +/* + * Clear COEF 0x0d (PCBEEP passthrough) bit 0x40 where BIOS sets it wrongly + * at PM resume + */ +static void alc283_fixup_dell_hp_resume(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + if (action == HDA_FIXUP_ACT_INIT) + alc_write_coef_idx(codec, 0xd, 0x2800); +} enum { ALC269_FIXUP_GPIO2, @@ -7545,6 +7677,7 @@ enum { ALC290_FIXUP_MONO_SPEAKERS_HSJACK, ALC290_FIXUP_SUBWOOFER, ALC290_FIXUP_SUBWOOFER_HSJACK, + ALC295_FIXUP_HP_MUTE_LED_COEFBIT11, ALC269_FIXUP_THINKPAD_ACPI, ALC269_FIXUP_DMIC_THINKPAD_ACPI, ALC269VB_FIXUP_INFINIX_ZERO_BOOK_13, @@ -7587,6 +7720,7 @@ enum { ALC275_FIXUP_DELL_XPS, ALC293_FIXUP_LENOVO_SPK_NOISE, ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, + ALC233_FIXUP_LENOVO_L2MH_LOW_ENLED, ALC255_FIXUP_DELL_SPK_NOISE, ALC225_FIXUP_DISABLE_MIC_VREF, ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, @@ -7656,7 +7790,6 @@ enum { ALC285_FIXUP_THINKPAD_X1_GEN7, ALC285_FIXUP_THINKPAD_HEADSET_JACK, ALC294_FIXUP_ASUS_ALLY, - ALC294_FIXUP_ASUS_ALLY_X, ALC294_FIXUP_ASUS_ALLY_PINS, ALC294_FIXUP_ASUS_ALLY_VERBS, ALC294_FIXUP_ASUS_ALLY_SPEAKER, @@ -7754,6 +7887,7 @@ enum { ALC287_FIXUP_TAS2781_I2C, ALC287_FIXUP_YOGA7_14ARB7_I2C, ALC245_FIXUP_HP_MUTE_LED_COEFBIT, + ALC245_FIXUP_HP_MUTE_LED_V1_COEFBIT, ALC245_FIXUP_HP_X360_MUTE_LEDS, ALC287_FIXUP_THINKPAD_I2S_SPK, ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD, @@ -7774,6 +7908,9 @@ enum { ALC245_FIXUP_CLEVO_NOISY_MIC, ALC269_FIXUP_VAIO_VJFH52_MIC_NO_PRESENCE, ALC233_FIXUP_MEDION_MTL_SPK, + ALC294_FIXUP_BASS_SPEAKER_15, + ALC283_FIXUP_DELL_HP_RESUME, + ALC294_FIXUP_ASUS_CS35L41_SPI_2, }; /* A special fixup for Lenovo C940 and Yoga Duet 7; @@ -8573,6 +8710,10 @@ static const struct hda_fixup alc269_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = alc233_fixup_lenovo_line2_mic_hotkey, }, + [ALC233_FIXUP_LENOVO_L2MH_LOW_ENLED] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc233_fixup_lenovo_low_en_micmute_led, + }, [ALC233_FIXUP_INTEL_NUC8_DMIC] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_inv_dmic, @@ -9095,12 +9236,6 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC294_FIXUP_ASUS_ALLY_PINS }, - [ALC294_FIXUP_ASUS_ALLY_X] = { - .type = HDA_FIXUP_FUNC, - .v.func = tas2781_fixup_i2c, - .chained = true, - .chain_id = ALC294_FIXUP_ASUS_ALLY_PINS - }, [ALC294_FIXUP_ASUS_ALLY_PINS] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { @@ -9282,6 +9417,10 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC283_FIXUP_INT_MIC, }, + [ALC295_FIXUP_HP_MUTE_LED_COEFBIT11] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc295_fixup_hp_mute_led_coefbit11, + }, [ALC298_FIXUP_SAMSUNG_AMP] = { .type = HDA_FIXUP_FUNC, .v.func = alc298_fixup_samsung_amp, @@ -9985,6 +10124,10 @@ static const struct hda_fixup alc269_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = alc245_fixup_hp_mute_led_coefbit, }, + [ALC245_FIXUP_HP_MUTE_LED_V1_COEFBIT] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc245_fixup_hp_mute_led_v1_coefbit, + }, [ALC245_FIXUP_HP_X360_MUTE_LEDS] = { .type = HDA_FIXUP_FUNC, .v.func = alc245_fixup_hp_mute_led_coefbit, @@ -10102,6 +10245,20 @@ static const struct hda_fixup alc269_fixups[] = { { } }, }, + [ALC294_FIXUP_BASS_SPEAKER_15] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc294_fixup_bass_speaker_15, + }, + [ALC283_FIXUP_DELL_HP_RESUME] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc283_fixup_dell_hp_resume, + }, + [ALC294_FIXUP_ASUS_CS35L41_SPI_2] = { + .type = HDA_FIXUP_FUNC, + .v.func = cs35l41_fixup_spi_two, + .chained = true, + .chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC, + }, }; static const struct hda_quirk alc269_fixup_tbl[] = { @@ -10162,6 +10319,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x05f4, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x05f5, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x05f6, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x0604, "Dell Venue 11 Pro 7130", ALC283_FIXUP_DELL_HP_RESUME), SND_PCI_QUIRK(0x1028, 0x0615, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK), SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK), SND_PCI_QUIRK(0x1028, 0x062c, "Dell Latitude E5550", ALC292_FIXUP_DELL_E7X), @@ -10317,6 +10475,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3), SND_PCI_QUIRK(0x103c, 0x8519, "HP Spectre x360 15-df0xxx", ALC285_FIXUP_HP_SPECTRE_X360), SND_PCI_QUIRK(0x103c, 0x8537, "HP ProBook 440 G6", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), + SND_PCI_QUIRK(0x103c, 0x85c6, "HP Pavilion x360 Convertible 14-dy1xxx", ALC295_FIXUP_HP_MUTE_LED_COEFBIT11), SND_PCI_QUIRK(0x103c, 0x85de, "HP Envy x360 13-ar0xxx", ALC285_FIXUP_HP_ENVY_X360), SND_PCI_QUIRK(0x103c, 0x860f, "HP ZBook 15 G6", ALC285_FIXUP_HP_GPIO_AMP_INIT), SND_PCI_QUIRK(0x103c, 0x861f, "HP Elite Dragonfly G1", ALC285_FIXUP_HP_GPIO_AMP_INIT), @@ -10364,6 +10523,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x8811, "HP Spectre x360 15-eb1xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1), SND_PCI_QUIRK(0x103c, 0x8812, "HP Spectre x360 15-eb1xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1), SND_PCI_QUIRK(0x103c, 0x881d, "HP 250 G8 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), + SND_PCI_QUIRK(0x103c, 0x881e, "HP Laptop 15s-du3xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), SND_PCI_QUIRK(0x103c, 0x8846, "HP EliteBook 850 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8847, "HP EliteBook x360 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x884b, "HP EliteBook 840 Aero G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED), @@ -10468,6 +10628,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x8b97, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), SND_PCI_QUIRK(0x103c, 0x8bb3, "HP Slim OMEN", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8bb4, "HP Slim OMEN", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x103c, 0x8bcd, "HP Omen 16-xd0xxx", ALC245_FIXUP_HP_MUTE_LED_V1_COEFBIT), SND_PCI_QUIRK(0x103c, 0x8bdd, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8bde, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8bdf, "HP Envy 15", ALC287_FIXUP_CS35L41_I2C_2), @@ -10540,7 +10701,9 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x8e1a, "HP ZBook Firefly 14 G12A", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), + SND_PCI_QUIRK(0x1043, 0x1054, "ASUS G614FH/FM/FP", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x1043, 0x1074, "ASUS G614PH/PM/PP", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x10a1, "ASUS UX391UA", ALC294_FIXUP_ASUS_SPK), SND_PCI_QUIRK(0x1043, 0x10a4, "ASUS TP3407SA", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x1043, 0x10c0, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), @@ -10548,27 +10711,32 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x10d3, "ASUS K6500ZC", ALC294_FIXUP_ASUS_SPK), SND_PCI_QUIRK(0x1043, 0x1154, "ASUS TP3607SH", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x1043, 0x1194, "ASUS UM3406KA", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1204, "ASUS Strix G615JHR_JMR_JPR", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x1043, 0x1214, "ASUS Strix G615LH_LM_LP", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x1043, 0x125e, "ASUS Q524UQK", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1271, "ASUS X430UN", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1290, "ASUS X441SA", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1043, 0x1294, "ASUS B3405CVA", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x12a3, "Asus N7691ZM", ALC269_FIXUP_ASUS_N7601ZM), SND_PCI_QUIRK(0x1043, 0x12af, "ASUS UX582ZS", ALC245_FIXUP_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1043, 0x12b4, "ASUS B3405CCA / P3405CCA", ALC294_FIXUP_ASUS_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x1313, "Asus K42JZ", ALC269VB_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK), SND_PCI_QUIRK(0x1043, 0x1433, "ASUS GX650PY/PZ/PV/PU/PYV/PZV/PIV/PVV", ALC285_FIXUP_ASUS_I2C_HEADSET_MIC), + SND_PCI_QUIRK(0x1043, 0x1460, "Asus VivoBook 15", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1463, "Asus GA402X/GA402N", ALC285_FIXUP_ASUS_I2C_HEADSET_MIC), SND_PCI_QUIRK(0x1043, 0x1473, "ASUS GU604VI/VC/VE/VG/VJ/VQ/VU/VV/VY/VZ", ALC285_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x1043, 0x1483, "ASUS GU603VQ/VU/VV/VJ/VI", ALC285_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x1043, 0x1493, "ASUS GV601VV/VU/VJ/VQ/VI", ALC285_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x1043, 0x14d3, "ASUS G614JY/JZ/JG", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x14e3, "ASUS G513PI/PU/PV", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x1043, 0x14f2, "ASUS VivoBook X515JA", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1503, "ASUS G733PY/PZ/PZV/PYV", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A), SND_PCI_QUIRK(0x1043, 0x1533, "ASUS GV302XA/XJ/XQ/XU/XV/XI", ALC287_FIXUP_CS35L41_I2C_2), @@ -10584,7 +10752,6 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1740, "ASUS UX430UA", ALC295_FIXUP_ASUS_DACS), SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK), SND_PCI_QUIRK(0x1043, 0x17f3, "ROG Ally NR2301L/X", ALC294_FIXUP_ASUS_ALLY), - SND_PCI_QUIRK(0x1043, 0x1eb3, "ROG Ally X RC72LA", ALC294_FIXUP_ASUS_ALLY_X), SND_PCI_QUIRK(0x1043, 0x1863, "ASUS UX6404VI/VV", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS), SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC), @@ -10596,7 +10763,6 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x19ce, "ASUS B9450FA", ALC294_FIXUP_ASUS_HPE), SND_PCI_QUIRK(0x1043, 0x19e1, "ASUS UX581LV", ALC295_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), - SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x1a63, "ASUS UX3405MA", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x1a83, "ASUS UM5302LA", ALC294_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x1a8f, "ASUS UX582ZS", ALC245_FIXUP_CS35L41_SPI_2), @@ -10610,6 +10776,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1c43, "ASUS UX8406MA", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x1c62, "ASUS GU603", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x1c63, "ASUS GU605M", ALC285_FIXUP_ASUS_GU605_SPI_SPEAKER2_TO_DAC1), + SND_PCI_QUIRK(0x1043, 0x1c80, "ASUS VivoBook TP401", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS), SND_PCI_QUIRK(0x1043, 0x1c9f, "ASUS G614JU/JV/JI", ALC285_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x1043, 0x1caf, "ASUS G634JY/JZ/JI/JG", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS), @@ -10621,6 +10788,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE), SND_PCI_QUIRK(0x1043, 0x1da2, "ASUS UP6502ZA/ZD", ALC245_FIXUP_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1043, 0x1df3, "ASUS UM5606WA", ALC294_FIXUP_BASS_SPEAKER_15), SND_PCI_QUIRK(0x1043, 0x1e02, "ASUS UX3402ZA", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502), SND_PCI_QUIRK(0x1043, 0x1e12, "ASUS UM3402", ALC287_FIXUP_CS35L41_I2C_2), @@ -10638,14 +10806,28 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1f12, "ASUS UM5302", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x1f1f, "ASUS H7604JI/JV/J3D", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x1f62, "ASUS UX7602ZM", ALC245_FIXUP_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1043, 0x1f63, "ASUS P5405CSA", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x1f92, "ASUS ROG Flow X16", ALC289_FIXUP_ASUS_GA401), + SND_PCI_QUIRK(0x1043, 0x1fb3, "ASUS ROG Flow Z13 GZ302EA", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x1043, 0x3011, "ASUS B5605CVA", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2), + SND_PCI_QUIRK(0x1043, 0x3061, "ASUS B3405CCA", ALC294_FIXUP_ASUS_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1043, 0x3071, "ASUS B5405CCA", ALC294_FIXUP_ASUS_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1043, 0x30c1, "ASUS B3605CCA / P3605CCA", ALC294_FIXUP_ASUS_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1043, 0x30d1, "ASUS B5405CCA", ALC294_FIXUP_ASUS_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1043, 0x30e1, "ASUS B5605CCA", ALC294_FIXUP_ASUS_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x31d0, "ASUS Zen AIO 27 Z272SD_A272SD", ALC274_FIXUP_ASUS_ZEN_AIO_27), + SND_PCI_QUIRK(0x1043, 0x31e1, "ASUS B5605CCA", ALC294_FIXUP_ASUS_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1043, 0x31f1, "ASUS B3605CCA", ALC294_FIXUP_ASUS_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x3a20, "ASUS G614JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS), SND_PCI_QUIRK(0x1043, 0x3a30, "ASUS G814JVR/JIR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS), SND_PCI_QUIRK(0x1043, 0x3a40, "ASUS G814JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS), SND_PCI_QUIRK(0x1043, 0x3a50, "ASUS G834JYR/JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS), SND_PCI_QUIRK(0x1043, 0x3a60, "ASUS G634JYR/JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS), + SND_PCI_QUIRK(0x1043, 0x3d78, "ASUS GA603KH", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x1043, 0x3d88, "ASUS GA603KM", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x1043, 0x3e00, "ASUS G814FH/FM/FP", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x1043, 0x3e20, "ASUS G814PH/PM/PP", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x3e30, "ASUS TP3607SA", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x1043, 0x3ee0, "ASUS Strix G815_JHR_JMR_JPR", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x1043, 0x3ef0, "ASUS Strix G635LR_LW_LX", ALC287_FIXUP_TAS2781_I2C), @@ -10653,6 +10835,8 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x3f10, "ASUS Strix G835LR_LW_LX", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x1043, 0x3f20, "ASUS Strix G615LR_LW", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x1043, 0x3f30, "ASUS Strix G815LR_LW", ALC287_FIXUP_TAS2781_I2C), + SND_PCI_QUIRK(0x1043, 0x3fd0, "ASUS B3605CVA", ALC245_FIXUP_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1043, 0x3ff0, "ASUS B5405CVA", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC), @@ -10851,6 +11035,9 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x17aa, 0x31af, "ThinkCentre Station", ALC623_FIXUP_LENOVO_THINKSTATION_P340), SND_PCI_QUIRK(0x17aa, 0x334b, "Lenovo ThinkCentre M70 Gen5", ALC283_FIXUP_HEADSET_MIC), + SND_PCI_QUIRK(0x17aa, 0x3384, "ThinkCentre M90a PRO", ALC233_FIXUP_LENOVO_L2MH_LOW_ENLED), + SND_PCI_QUIRK(0x17aa, 0x3386, "ThinkCentre M90a Gen6", ALC233_FIXUP_LENOVO_L2MH_LOW_ENLED), + SND_PCI_QUIRK(0x17aa, 0x3387, "ThinkCentre M70a Gen6", ALC233_FIXUP_LENOVO_L2MH_LOW_ENLED), SND_PCI_QUIRK(0x17aa, 0x3801, "Lenovo Yoga9 14IAP7", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN), HDA_CODEC_QUIRK(0x17aa, 0x3802, "DuetITL 2021", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS), SND_PCI_QUIRK(0x17aa, 0x3802, "Lenovo Yoga Pro 9 14IRP8", ALC287_FIXUP_TAS2781_I2C), @@ -10985,6 +11172,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1d72, 0x1945, "Redmi G", ALC256_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC), SND_PCI_QUIRK(0x1f66, 0x0105, "Ayaneo Portable Game Player", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x2014, 0x800a, "Positivo ARN50", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x2782, 0x0214, "VAIO VJFE-CL", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x2782, 0x0228, "Infinix ZERO BOOK 13", ALC269VB_FIXUP_INFINIX_ZERO_BOOK_13), SND_PCI_QUIRK(0x2782, 0x0232, "CHUWI CoreBook XPro", ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO), @@ -11837,8 +12025,11 @@ static int patch_alc269(struct hda_codec *codec) spec->codec_variant = ALC269_TYPE_ALC300; spec->gen.mixer_nid = 0; /* no loopback on ALC300 */ break; + case 0x10ec0222: case 0x10ec0623: spec->codec_variant = ALC269_TYPE_ALC623; + spec->shutup = alc222_shutup; + spec->init_hook = alc222_init; break; case 0x10ec0700: case 0x10ec0701: diff --git a/sound/soc/amd/acp/acp-legacy-common.c b/sound/soc/amd/acp/acp-legacy-common.c index be01b178172e8..e4af2640feeb1 100644 --- a/sound/soc/amd/acp/acp-legacy-common.c +++ b/sound/soc/amd/acp/acp-legacy-common.c @@ -13,6 +13,7 @@ */ #include "amd.h" +#include #include #include @@ -445,7 +446,9 @@ void check_acp_config(struct pci_dev *pci, struct acp_chip_info *chip) { struct acpi_device *pdm_dev; const union acpi_object *obj; - u32 pdm_addr; + acpi_handle handle; + acpi_integer dmic_status; + u32 pdm_addr, ret; switch (chip->acp_rev) { case ACP3X_DEV: @@ -477,6 +480,11 @@ void check_acp_config(struct pci_dev *pci, struct acp_chip_info *chip) obj->integer.value == pdm_addr) chip->is_pdm_dev = true; } + + handle = ACPI_HANDLE(&pci->dev); + ret = acpi_evaluate_integer(handle, "_WOV", NULL, &dmic_status); + if (!ACPI_FAILURE(ret)) + chip->is_pdm_dev = dmic_status; } } EXPORT_SYMBOL_NS_GPL(check_acp_config, SND_SOC_ACP_COMMON); diff --git a/sound/soc/amd/ps/acp63.h b/sound/soc/amd/ps/acp63.h index 39208305dd6c3..f9759c9342cf3 100644 --- a/sound/soc/amd/ps/acp63.h +++ b/sound/soc/amd/ps/acp63.h @@ -11,6 +11,7 @@ #define ACP_DEVICE_ID 0x15E2 #define ACP63_REG_START 0x1240000 #define ACP63_REG_END 0x125C000 +#define ACP63_PCI_REV 0x63 #define ACP_SOFT_RESET_SOFTRESET_AUDDONE_MASK 0x00010001 #define ACP_PGFSM_CNTL_POWER_ON_MASK 1 diff --git a/sound/soc/amd/ps/pci-ps.c b/sound/soc/amd/ps/pci-ps.c index 5c4a0be7a7889..aec3150ecf581 100644 --- a/sound/soc/amd/ps/pci-ps.c +++ b/sound/soc/amd/ps/pci-ps.c @@ -559,7 +559,7 @@ static int snd_acp63_probe(struct pci_dev *pci, /* Pink Sardine device check */ switch (pci->revision) { - case 0x63: + case ACP63_PCI_REV: break; default: dev_dbg(&pci->dev, "acp63 pci device not found\n"); diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c index b16587d8f97a8..e632f16c91025 100644 --- a/sound/soc/amd/yc/acp6x-mach.c +++ b/sound/soc/amd/yc/acp6x-mach.c @@ -248,6 +248,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "21M5"), } }, + { + .driver_data = &acp6x_card, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "21M6"), + } + }, { .driver_data = &acp6x_card, .matches = { @@ -332,6 +339,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "83Q3"), } }, + { + .driver_data = &acp6x_card, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "83J2"), + } + }, { .driver_data = &acp6x_card, .matches = { @@ -577,6 +591,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "pang13"), } }, + { + .driver_data = &acp6x_card, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Micro-Star International Co., Ltd."), + DMI_MATCH(DMI_PRODUCT_NAME, "Bravo 15 C7UCX"), + } + }, {} }; diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c index 402b9a2ff0240..68cdb1027d0c0 100644 --- a/sound/soc/codecs/arizona.c +++ b/sound/soc/codecs/arizona.c @@ -967,7 +967,7 @@ int arizona_out_ev(struct snd_soc_dapm_widget *w, case ARIZONA_OUT3L_ENA_SHIFT: case ARIZONA_OUT3R_ENA_SHIFT: priv->out_up_pending++; - priv->out_up_delay += 17; + priv->out_up_delay += 17000; break; case ARIZONA_OUT4L_ENA_SHIFT: case ARIZONA_OUT4R_ENA_SHIFT: @@ -977,7 +977,7 @@ int arizona_out_ev(struct snd_soc_dapm_widget *w, case WM8997: break; default: - priv->out_up_delay += 10; + priv->out_up_delay += 10000; break; } break; @@ -999,7 +999,7 @@ int arizona_out_ev(struct snd_soc_dapm_widget *w, if (!priv->out_up_pending && priv->out_up_delay) { dev_dbg(component->dev, "Power up delay: %d\n", priv->out_up_delay); - msleep(priv->out_up_delay); + fsleep(priv->out_up_delay); priv->out_up_delay = 0; } break; @@ -1017,7 +1017,7 @@ int arizona_out_ev(struct snd_soc_dapm_widget *w, case ARIZONA_OUT3L_ENA_SHIFT: case ARIZONA_OUT3R_ENA_SHIFT: priv->out_down_pending++; - priv->out_down_delay++; + priv->out_down_delay += 1000; break; case ARIZONA_OUT4L_ENA_SHIFT: case ARIZONA_OUT4R_ENA_SHIFT: @@ -1028,10 +1028,10 @@ int arizona_out_ev(struct snd_soc_dapm_widget *w, break; case WM8998: case WM1814: - priv->out_down_delay += 5; + priv->out_down_delay += 5000; break; default: - priv->out_down_delay++; + priv->out_down_delay += 1000; break; } break; @@ -1053,7 +1053,7 @@ int arizona_out_ev(struct snd_soc_dapm_widget *w, if (!priv->out_down_pending && priv->out_down_delay) { dev_dbg(component->dev, "Power down delay: %d\n", priv->out_down_delay); - msleep(priv->out_down_delay); + fsleep(priv->out_down_delay); priv->out_down_delay = 0; } break; diff --git a/sound/soc/codecs/cs35l41-spi.c b/sound/soc/codecs/cs35l41-spi.c index a6db44520c060..f9b6bf7bea9c9 100644 --- a/sound/soc/codecs/cs35l41-spi.c +++ b/sound/soc/codecs/cs35l41-spi.c @@ -32,13 +32,16 @@ static int cs35l41_spi_probe(struct spi_device *spi) const struct regmap_config *regmap_config = &cs35l41_regmap_spi; struct cs35l41_hw_cfg *hw_cfg = dev_get_platdata(&spi->dev); struct cs35l41_private *cs35l41; + int ret; cs35l41 = devm_kzalloc(&spi->dev, sizeof(struct cs35l41_private), GFP_KERNEL); if (!cs35l41) return -ENOMEM; spi->max_speed_hz = CS35L41_SPI_MAX_FREQ; - spi_setup(spi); + ret = spi_setup(spi); + if (ret < 0) + return ret; spi_set_drvdata(spi, cs35l41); cs35l41->regmap = devm_regmap_init_spi(spi, regmap_config); diff --git a/sound/soc/codecs/cs35l56-shared.c b/sound/soc/codecs/cs35l56-shared.c index e45e9ae01bc66..195841a567c3d 100644 --- a/sound/soc/codecs/cs35l56-shared.c +++ b/sound/soc/codecs/cs35l56-shared.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -303,6 +304,79 @@ void cs35l56_wait_min_reset_pulse(void) } EXPORT_SYMBOL_NS_GPL(cs35l56_wait_min_reset_pulse, SND_SOC_CS35L56_SHARED); +static const struct { + u32 addr; + u32 value; +} cs35l56_spi_system_reset_stages[] = { + { .addr = CS35L56_DSP_VIRTUAL1_MBOX_1, .value = CS35L56_MBOX_CMD_SYSTEM_RESET }, + /* The next write is necessary to delimit the soft reset */ + { .addr = CS35L56_DSP_MBOX_1_RAW, .value = CS35L56_MBOX_CMD_PING }, +}; + +static void cs35l56_spi_issue_bus_locked_reset(struct cs35l56_base *cs35l56_base, + struct spi_device *spi) +{ + struct cs35l56_spi_payload *buf = cs35l56_base->spi_payload_buf; + struct spi_transfer t = { + .tx_buf = buf, + .len = sizeof(*buf), + }; + struct spi_message m; + int i, ret; + + for (i = 0; i < ARRAY_SIZE(cs35l56_spi_system_reset_stages); i++) { + buf->addr = cpu_to_be32(cs35l56_spi_system_reset_stages[i].addr); + buf->value = cpu_to_be32(cs35l56_spi_system_reset_stages[i].value); + spi_message_init_with_transfers(&m, &t, 1); + ret = spi_sync_locked(spi, &m); + if (ret) + dev_warn(cs35l56_base->dev, "spi_sync failed: %d\n", ret); + + usleep_range(CS35L56_SPI_RESET_TO_PORT_READY_US, + 2 * CS35L56_SPI_RESET_TO_PORT_READY_US); + } +} + +static void cs35l56_spi_system_reset(struct cs35l56_base *cs35l56_base) +{ + struct spi_device *spi = to_spi_device(cs35l56_base->dev); + unsigned int val; + int read_ret, ret; + + /* + * There must not be any other SPI bus activity while the amp is + * soft-resetting. + */ + ret = spi_bus_lock(spi->controller); + if (ret) { + dev_warn(cs35l56_base->dev, "spi_bus_lock failed: %d\n", ret); + return; + } + + cs35l56_spi_issue_bus_locked_reset(cs35l56_base, spi); + spi_bus_unlock(spi->controller); + + /* + * Check firmware boot by testing for a response in MBOX_2. + * HALO_STATE cannot be trusted yet because the reset sequence + * can leave it with stale state. But MBOX is reset. + * The regmap must remain in cache-only until the chip has + * booted, so use a bypassed read. + */ + ret = read_poll_timeout(regmap_read_bypassed, read_ret, + (val > 0) && (val < 0xffffffff), + CS35L56_HALO_STATE_POLL_US, + CS35L56_HALO_STATE_TIMEOUT_US, + false, + cs35l56_base->regmap, + CS35L56_DSP_VIRTUAL1_MBOX_2, + &val); + if (ret) { + dev_err(cs35l56_base->dev, "SPI reboot timed out(%d): MBOX2=%#x\n", + read_ret, val); + } +} + static const struct reg_sequence cs35l56_system_reset_seq[] = { REG_SEQ0(CS35L56_DSP1_HALO_STATE, 0), REG_SEQ0(CS35L56_DSP_VIRTUAL1_MBOX_1, CS35L56_MBOX_CMD_SYSTEM_RESET), @@ -315,6 +389,12 @@ void cs35l56_system_reset(struct cs35l56_base *cs35l56_base, bool is_soundwire) * accesses other than the controlled system reset sequence below. */ regcache_cache_only(cs35l56_base->regmap, true); + + if (cs35l56_is_spi(cs35l56_base)) { + cs35l56_spi_system_reset(cs35l56_base); + return; + } + regmap_multi_reg_write_bypassed(cs35l56_base->regmap, cs35l56_system_reset_seq, ARRAY_SIZE(cs35l56_system_reset_seq)); diff --git a/sound/soc/codecs/cs35l56-spi.c b/sound/soc/codecs/cs35l56-spi.c index b07b798b0b45d..568f554a8638b 100644 --- a/sound/soc/codecs/cs35l56-spi.c +++ b/sound/soc/codecs/cs35l56-spi.c @@ -33,6 +33,9 @@ static int cs35l56_spi_probe(struct spi_device *spi) cs35l56->base.dev = &spi->dev; cs35l56->base.can_hibernate = true; + ret = cs35l56_init_config_for_spi(&cs35l56->base, spi); + if (ret) + return ret; ret = cs35l56_common_probe(cs35l56); if (ret != 0) diff --git a/sound/soc/codecs/cs42l43-jack.c b/sound/soc/codecs/cs42l43-jack.c index d9ab003e166bf..73d764fc85392 100644 --- a/sound/soc/codecs/cs42l43-jack.c +++ b/sound/soc/codecs/cs42l43-jack.c @@ -702,6 +702,9 @@ static void cs42l43_clear_jack(struct cs42l43_codec *priv) CS42L43_PGA_WIDESWING_MODE_EN_MASK, 0); regmap_update_bits(cs42l43->regmap, CS42L43_STEREO_MIC_CTRL, CS42L43_JACK_STEREO_CONFIG_MASK, 0); + regmap_update_bits(cs42l43->regmap, CS42L43_STEREO_MIC_CLAMP_CTRL, + CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_MASK, + CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_MASK); regmap_update_bits(cs42l43->regmap, CS42L43_HS2, CS42L43_HSDET_MODE_MASK | CS42L43_HSDET_MANUAL_MODE_MASK, 0x2 << CS42L43_HSDET_MODE_SHIFT); diff --git a/sound/soc/codecs/cs42l43.c b/sound/soc/codecs/cs42l43.c index 8ec4083cd3b80..2c43e4a6751b1 100644 --- a/sound/soc/codecs/cs42l43.c +++ b/sound/soc/codecs/cs42l43.c @@ -1146,7 +1146,7 @@ static const struct snd_kcontrol_new cs42l43_controls[] = { SOC_DOUBLE_R_SX_TLV("ADC Volume", CS42L43_ADC_B_CTRL1, CS42L43_ADC_B_CTRL2, CS42L43_ADC_PGA_GAIN_SHIFT, - 0xF, 5, cs42l43_adc_tlv), + 0xF, 4, cs42l43_adc_tlv), SOC_DOUBLE("PDM1 Invert Switch", CS42L43_DMIC_PDM_CTRL, CS42L43_PDM1L_INV_SHIFT, CS42L43_PDM1R_INV_SHIFT, 1, 0), diff --git a/sound/soc/codecs/es8328.c b/sound/soc/codecs/es8328.c index f3c97da798dc8..76159c45e6b52 100644 --- a/sound/soc/codecs/es8328.c +++ b/sound/soc/codecs/es8328.c @@ -233,7 +233,6 @@ static const struct snd_kcontrol_new es8328_right_line_controls = /* Left Mixer */ static const struct snd_kcontrol_new es8328_left_mixer_controls[] = { - SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL17, 7, 1, 0), SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL17, 6, 1, 0), SOC_DAPM_SINGLE("Right Playback Switch", ES8328_DACCONTROL18, 7, 1, 0), SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL18, 6, 1, 0), @@ -243,7 +242,6 @@ static const struct snd_kcontrol_new es8328_left_mixer_controls[] = { static const struct snd_kcontrol_new es8328_right_mixer_controls[] = { SOC_DAPM_SINGLE("Left Playback Switch", ES8328_DACCONTROL19, 7, 1, 0), SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL19, 6, 1, 0), - SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL20, 7, 1, 0), SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL20, 6, 1, 0), }; @@ -336,10 +334,10 @@ static const struct snd_soc_dapm_widget es8328_dapm_widgets[] = { SND_SOC_DAPM_DAC("Left DAC", "Left Playback", ES8328_DACPOWER, ES8328_DACPOWER_LDAC_OFF, 1), - SND_SOC_DAPM_MIXER("Left Mixer", SND_SOC_NOPM, 0, 0, + SND_SOC_DAPM_MIXER("Left Mixer", ES8328_DACCONTROL17, 7, 0, &es8328_left_mixer_controls[0], ARRAY_SIZE(es8328_left_mixer_controls)), - SND_SOC_DAPM_MIXER("Right Mixer", SND_SOC_NOPM, 0, 0, + SND_SOC_DAPM_MIXER("Right Mixer", ES8328_DACCONTROL20, 7, 0, &es8328_right_mixer_controls[0], ARRAY_SIZE(es8328_right_mixer_controls)), @@ -418,19 +416,14 @@ static const struct snd_soc_dapm_route es8328_dapm_routes[] = { { "Right Line Mux", "PGA", "Right PGA Mux" }, { "Right Line Mux", "Differential", "Differential Mux" }, - { "Left Out 1", NULL, "Left DAC" }, - { "Right Out 1", NULL, "Right DAC" }, - { "Left Out 2", NULL, "Left DAC" }, - { "Right Out 2", NULL, "Right DAC" }, - - { "Left Mixer", "Playback Switch", "Left DAC" }, + { "Left Mixer", NULL, "Left DAC" }, { "Left Mixer", "Left Bypass Switch", "Left Line Mux" }, { "Left Mixer", "Right Playback Switch", "Right DAC" }, { "Left Mixer", "Right Bypass Switch", "Right Line Mux" }, { "Right Mixer", "Left Playback Switch", "Left DAC" }, { "Right Mixer", "Left Bypass Switch", "Left Line Mux" }, - { "Right Mixer", "Playback Switch", "Right DAC" }, + { "Right Mixer", NULL, "Right DAC" }, { "Right Mixer", "Right Bypass Switch", "Right Line Mux" }, { "DAC DIG", NULL, "DAC STM" }, diff --git a/sound/soc/codecs/lpass-wsa-macro.c b/sound/soc/codecs/lpass-wsa-macro.c index c989d82d1d3c1..81bab8299eae4 100644 --- a/sound/soc/codecs/lpass-wsa-macro.c +++ b/sound/soc/codecs/lpass-wsa-macro.c @@ -63,6 +63,10 @@ #define CDC_WSA_TX_SPKR_PROT_CLK_DISABLE 0 #define CDC_WSA_TX_SPKR_PROT_PCM_RATE_MASK GENMASK(3, 0) #define CDC_WSA_TX_SPKR_PROT_PCM_RATE_8K 0 +#define CDC_WSA_TX_SPKR_PROT_PCM_RATE_16K 1 +#define CDC_WSA_TX_SPKR_PROT_PCM_RATE_24K 2 +#define CDC_WSA_TX_SPKR_PROT_PCM_RATE_32K 3 +#define CDC_WSA_TX_SPKR_PROT_PCM_RATE_48K 4 #define CDC_WSA_TX0_SPKR_PROT_PATH_CFG0 (0x0248) #define CDC_WSA_TX1_SPKR_PROT_PATH_CTL (0x0264) #define CDC_WSA_TX1_SPKR_PROT_PATH_CFG0 (0x0268) @@ -407,6 +411,7 @@ struct wsa_macro { int ear_spkr_gain; int spkr_gain_offset; int spkr_mode; + u32 pcm_rate_vi; int is_softclip_on[WSA_MACRO_SOFTCLIP_MAX]; int softclip_clk_users[WSA_MACRO_SOFTCLIP_MAX]; struct regmap *regmap; @@ -1280,6 +1285,7 @@ static int wsa_macro_hw_params(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_component *component = dai->component; + struct wsa_macro *wsa = snd_soc_component_get_drvdata(component); int ret; switch (substream->stream) { @@ -1291,6 +1297,11 @@ static int wsa_macro_hw_params(struct snd_pcm_substream *substream, __func__, params_rate(params)); return ret; } + break; + case SNDRV_PCM_STREAM_CAPTURE: + if (dai->id == WSA_MACRO_AIF_VI) + wsa->pcm_rate_vi = params_rate(params); + break; default: break; @@ -1448,35 +1459,11 @@ static void wsa_macro_mclk_enable(struct wsa_macro *wsa, bool mclk_enable) } } -static int wsa_macro_mclk_event(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, int event) +static void wsa_macro_enable_disable_vi_sense(struct snd_soc_component *component, bool enable, + u32 tx_reg0, u32 tx_reg1, u32 val) { - struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); - struct wsa_macro *wsa = snd_soc_component_get_drvdata(component); - - wsa_macro_mclk_enable(wsa, event == SND_SOC_DAPM_PRE_PMU); - return 0; -} - -static int wsa_macro_enable_vi_feedback(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, - int event) -{ - struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); - struct wsa_macro *wsa = snd_soc_component_get_drvdata(component); - u32 tx_reg0, tx_reg1; - - if (test_bit(WSA_MACRO_TX0, &wsa->active_ch_mask[WSA_MACRO_AIF_VI])) { - tx_reg0 = CDC_WSA_TX0_SPKR_PROT_PATH_CTL; - tx_reg1 = CDC_WSA_TX1_SPKR_PROT_PATH_CTL; - } else if (test_bit(WSA_MACRO_TX1, &wsa->active_ch_mask[WSA_MACRO_AIF_VI])) { - tx_reg0 = CDC_WSA_TX2_SPKR_PROT_PATH_CTL; - tx_reg1 = CDC_WSA_TX3_SPKR_PROT_PATH_CTL; - } - - switch (event) { - case SND_SOC_DAPM_POST_PMU: - /* Enable V&I sensing */ + if (enable) { + /* Enable V&I sensing */ snd_soc_component_update_bits(component, tx_reg0, CDC_WSA_TX_SPKR_PROT_RESET_MASK, CDC_WSA_TX_SPKR_PROT_RESET); @@ -1485,10 +1472,10 @@ static int wsa_macro_enable_vi_feedback(struct snd_soc_dapm_widget *w, CDC_WSA_TX_SPKR_PROT_RESET); snd_soc_component_update_bits(component, tx_reg0, CDC_WSA_TX_SPKR_PROT_PCM_RATE_MASK, - CDC_WSA_TX_SPKR_PROT_PCM_RATE_8K); + val); snd_soc_component_update_bits(component, tx_reg1, CDC_WSA_TX_SPKR_PROT_PCM_RATE_MASK, - CDC_WSA_TX_SPKR_PROT_PCM_RATE_8K); + val); snd_soc_component_update_bits(component, tx_reg0, CDC_WSA_TX_SPKR_PROT_CLK_EN_MASK, CDC_WSA_TX_SPKR_PROT_CLK_ENABLE); @@ -1501,9 +1488,7 @@ static int wsa_macro_enable_vi_feedback(struct snd_soc_dapm_widget *w, snd_soc_component_update_bits(component, tx_reg1, CDC_WSA_TX_SPKR_PROT_RESET_MASK, CDC_WSA_TX_SPKR_PROT_NO_RESET); - break; - case SND_SOC_DAPM_POST_PMD: - /* Disable V&I sensing */ + } else { snd_soc_component_update_bits(component, tx_reg0, CDC_WSA_TX_SPKR_PROT_RESET_MASK, CDC_WSA_TX_SPKR_PROT_RESET); @@ -1516,6 +1501,72 @@ static int wsa_macro_enable_vi_feedback(struct snd_soc_dapm_widget *w, snd_soc_component_update_bits(component, tx_reg1, CDC_WSA_TX_SPKR_PROT_CLK_EN_MASK, CDC_WSA_TX_SPKR_PROT_CLK_DISABLE); + } +} + +static void wsa_macro_enable_disable_vi_feedback(struct snd_soc_component *component, + bool enable, u32 rate) +{ + struct wsa_macro *wsa = snd_soc_component_get_drvdata(component); + + if (test_bit(WSA_MACRO_TX0, &wsa->active_ch_mask[WSA_MACRO_AIF_VI])) + wsa_macro_enable_disable_vi_sense(component, enable, + CDC_WSA_TX0_SPKR_PROT_PATH_CTL, + CDC_WSA_TX1_SPKR_PROT_PATH_CTL, rate); + + if (test_bit(WSA_MACRO_TX1, &wsa->active_ch_mask[WSA_MACRO_AIF_VI])) + wsa_macro_enable_disable_vi_sense(component, enable, + CDC_WSA_TX2_SPKR_PROT_PATH_CTL, + CDC_WSA_TX3_SPKR_PROT_PATH_CTL, rate); +} + +static int wsa_macro_mclk_event(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); + struct wsa_macro *wsa = snd_soc_component_get_drvdata(component); + + wsa_macro_mclk_enable(wsa, event == SND_SOC_DAPM_PRE_PMU); + return 0; +} + +static int wsa_macro_enable_vi_feedback(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, + int event) +{ + struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); + struct wsa_macro *wsa = snd_soc_component_get_drvdata(component); + u32 rate_val; + + switch (wsa->pcm_rate_vi) { + case 8000: + rate_val = CDC_WSA_TX_SPKR_PROT_PCM_RATE_8K; + break; + case 16000: + rate_val = CDC_WSA_TX_SPKR_PROT_PCM_RATE_16K; + break; + case 24000: + rate_val = CDC_WSA_TX_SPKR_PROT_PCM_RATE_24K; + break; + case 32000: + rate_val = CDC_WSA_TX_SPKR_PROT_PCM_RATE_32K; + break; + case 48000: + rate_val = CDC_WSA_TX_SPKR_PROT_PCM_RATE_48K; + break; + default: + rate_val = CDC_WSA_TX_SPKR_PROT_PCM_RATE_8K; + break; + } + + switch (event) { + case SND_SOC_DAPM_POST_PMU: + /* Enable V&I sensing */ + wsa_macro_enable_disable_vi_feedback(component, true, rate_val); + break; + case SND_SOC_DAPM_POST_PMD: + /* Disable V&I sensing */ + wsa_macro_enable_disable_vi_feedback(component, false, rate_val); break; } diff --git a/sound/soc/codecs/madera.c b/sound/soc/codecs/madera.c index b24d6472ad5fc..fbfd7fb7f1685 100644 --- a/sound/soc/codecs/madera.c +++ b/sound/soc/codecs/madera.c @@ -2322,10 +2322,10 @@ int madera_out_ev(struct snd_soc_dapm_widget *w, case CS42L92: case CS47L92: case CS47L93: - out_up_delay = 6; + out_up_delay = 6000; break; default: - out_up_delay = 17; + out_up_delay = 17000; break; } @@ -2356,7 +2356,7 @@ int madera_out_ev(struct snd_soc_dapm_widget *w, case MADERA_OUT3R_ENA_SHIFT: priv->out_up_pending--; if (!priv->out_up_pending) { - msleep(priv->out_up_delay); + fsleep(priv->out_up_delay); priv->out_up_delay = 0; } break; @@ -2375,7 +2375,7 @@ int madera_out_ev(struct snd_soc_dapm_widget *w, case MADERA_OUT3L_ENA_SHIFT: case MADERA_OUT3R_ENA_SHIFT: priv->out_down_pending++; - priv->out_down_delay++; + priv->out_down_delay += 1000; break; default: break; @@ -2392,7 +2392,7 @@ int madera_out_ev(struct snd_soc_dapm_widget *w, case MADERA_OUT3R_ENA_SHIFT: priv->out_down_pending--; if (!priv->out_down_pending) { - msleep(priv->out_down_delay); + fsleep(priv->out_down_delay); priv->out_down_delay = 0; } break; diff --git a/sound/soc/codecs/rt1320-sdw.c b/sound/soc/codecs/rt1320-sdw.c index f4e1ea29c2651..f2d194e76a947 100644 --- a/sound/soc/codecs/rt1320-sdw.c +++ b/sound/soc/codecs/rt1320-sdw.c @@ -3705,6 +3705,9 @@ static int rt1320_read_prop(struct sdw_slave *slave) /* set the timeout values */ prop->clk_stop_timeout = 64; + /* BIOS may set wake_capable. Make sure it is 0 as wake events are disabled. */ + prop->wake_capable = 0; + return 0; } diff --git a/sound/soc/codecs/rt5665.c b/sound/soc/codecs/rt5665.c index 47df14ba52784..4f0236b34a2d9 100644 --- a/sound/soc/codecs/rt5665.c +++ b/sound/soc/codecs/rt5665.c @@ -31,9 +31,7 @@ #include "rl6231.h" #include "rt5665.h" -#define RT5665_NUM_SUPPLIES 3 - -static const char *rt5665_supply_names[RT5665_NUM_SUPPLIES] = { +static const char * const rt5665_supply_names[] = { "AVDD", "MICVDD", "VBAT", @@ -46,7 +44,6 @@ struct rt5665_priv { struct gpio_desc *gpiod_ldo1_en; struct gpio_desc *gpiod_reset; struct snd_soc_jack *hs_jack; - struct regulator_bulk_data supplies[RT5665_NUM_SUPPLIES]; struct delayed_work jack_detect_work; struct delayed_work calibrate_work; struct delayed_work jd_check_work; @@ -4471,8 +4468,6 @@ static void rt5665_remove(struct snd_soc_component *component) struct rt5665_priv *rt5665 = snd_soc_component_get_drvdata(component); regmap_write(rt5665->regmap, RT5665_RESET, 0); - - regulator_bulk_disable(ARRAY_SIZE(rt5665->supplies), rt5665->supplies); } #ifdef CONFIG_PM @@ -4758,7 +4753,7 @@ static int rt5665_i2c_probe(struct i2c_client *i2c) { struct rt5665_platform_data *pdata = dev_get_platdata(&i2c->dev); struct rt5665_priv *rt5665; - int i, ret; + int ret; unsigned int val; rt5665 = devm_kzalloc(&i2c->dev, sizeof(struct rt5665_priv), @@ -4774,24 +4769,13 @@ static int rt5665_i2c_probe(struct i2c_client *i2c) else rt5665_parse_dt(rt5665, &i2c->dev); - for (i = 0; i < ARRAY_SIZE(rt5665->supplies); i++) - rt5665->supplies[i].supply = rt5665_supply_names[i]; - - ret = devm_regulator_bulk_get(&i2c->dev, ARRAY_SIZE(rt5665->supplies), - rt5665->supplies); + ret = devm_regulator_bulk_get_enable(&i2c->dev, ARRAY_SIZE(rt5665_supply_names), + rt5665_supply_names); if (ret != 0) { dev_err(&i2c->dev, "Failed to request supplies: %d\n", ret); return ret; } - ret = regulator_bulk_enable(ARRAY_SIZE(rt5665->supplies), - rt5665->supplies); - if (ret != 0) { - dev_err(&i2c->dev, "Failed to enable supplies: %d\n", ret); - return ret; - } - - rt5665->gpiod_ldo1_en = devm_gpiod_get_optional(&i2c->dev, "realtek,ldo1-en", GPIOD_OUT_HIGH); diff --git a/sound/soc/codecs/rt722-sdca-sdw.c b/sound/soc/codecs/rt722-sdca-sdw.c index d5c985ff5ac55..5449d6b5cf3d1 100644 --- a/sound/soc/codecs/rt722-sdca-sdw.c +++ b/sound/soc/codecs/rt722-sdca-sdw.c @@ -86,6 +86,10 @@ static bool rt722_sdca_mbq_readable_register(struct device *dev, unsigned int re case 0x6100067: case 0x6100070 ... 0x610007c: case 0x6100080: + case SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT722_SDCA_ENT_FU15, RT722_SDCA_CTL_FU_CH_GAIN, + CH_01) ... + SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT722_SDCA_ENT_FU15, RT722_SDCA_CTL_FU_CH_GAIN, + CH_04): case SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT722_SDCA_ENT_USER_FU1E, RT722_SDCA_CTL_FU_VOLUME, CH_01): case SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT722_SDCA_ENT_USER_FU1E, RT722_SDCA_CTL_FU_VOLUME, diff --git a/sound/soc/codecs/tas2764.c b/sound/soc/codecs/tas2764.c index d482cd194c08c..58315eab492a1 100644 --- a/sound/soc/codecs/tas2764.c +++ b/sound/soc/codecs/tas2764.c @@ -365,7 +365,7 @@ static int tas2764_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) { struct snd_soc_component *component = dai->component; struct tas2764_priv *tas2764 = snd_soc_component_get_drvdata(component); - u8 tdm_rx_start_slot = 0, asi_cfg_0 = 0, asi_cfg_1 = 0; + u8 tdm_rx_start_slot = 0, asi_cfg_0 = 0, asi_cfg_1 = 0, asi_cfg_4 = 0; int ret; switch (fmt & SND_SOC_DAIFMT_INV_MASK) { @@ -374,12 +374,14 @@ static int tas2764_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) fallthrough; case SND_SOC_DAIFMT_NB_NF: asi_cfg_1 = TAS2764_TDM_CFG1_RX_RISING; + asi_cfg_4 = TAS2764_TDM_CFG4_TX_FALLING; break; case SND_SOC_DAIFMT_IB_IF: asi_cfg_0 ^= TAS2764_TDM_CFG0_FRAME_START; fallthrough; case SND_SOC_DAIFMT_IB_NF: asi_cfg_1 = TAS2764_TDM_CFG1_RX_FALLING; + asi_cfg_4 = TAS2764_TDM_CFG4_TX_RISING; break; } @@ -389,6 +391,12 @@ static int tas2764_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) if (ret < 0) return ret; + ret = snd_soc_component_update_bits(component, TAS2764_TDM_CFG4, + TAS2764_TDM_CFG4_TX_MASK, + asi_cfg_4); + if (ret < 0) + return ret; + switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: asi_cfg_0 ^= TAS2764_TDM_CFG0_FRAME_START; diff --git a/sound/soc/codecs/tas2764.h b/sound/soc/codecs/tas2764.h index 168af772a898f..9490f2686e389 100644 --- a/sound/soc/codecs/tas2764.h +++ b/sound/soc/codecs/tas2764.h @@ -25,7 +25,7 @@ /* Power Control */ #define TAS2764_PWR_CTRL TAS2764_REG(0X0, 0x02) -#define TAS2764_PWR_CTRL_MASK GENMASK(1, 0) +#define TAS2764_PWR_CTRL_MASK GENMASK(2, 0) #define TAS2764_PWR_CTRL_ACTIVE 0x0 #define TAS2764_PWR_CTRL_MUTE BIT(0) #define TAS2764_PWR_CTRL_SHUTDOWN BIT(1) @@ -79,6 +79,12 @@ #define TAS2764_TDM_CFG3_RXS_SHIFT 0x4 #define TAS2764_TDM_CFG3_MASK GENMASK(3, 0) +/* TDM Configuration Reg4 */ +#define TAS2764_TDM_CFG4 TAS2764_REG(0X0, 0x0d) +#define TAS2764_TDM_CFG4_TX_MASK BIT(0) +#define TAS2764_TDM_CFG4_TX_RISING 0x0 +#define TAS2764_TDM_CFG4_TX_FALLING BIT(0) + /* TDM Configuration Reg5 */ #define TAS2764_TDM_CFG5 TAS2764_REG(0X0, 0x0e) #define TAS2764_TDM_CFG5_VSNS_MASK BIT(6) diff --git a/sound/soc/codecs/tas2770.c b/sound/soc/codecs/tas2770.c index 9f93b230652a5..863c3f672ba98 100644 --- a/sound/soc/codecs/tas2770.c +++ b/sound/soc/codecs/tas2770.c @@ -506,7 +506,7 @@ static int tas2770_codec_probe(struct snd_soc_component *component) } static DECLARE_TLV_DB_SCALE(tas2770_digital_tlv, 1100, 50, 0); -static DECLARE_TLV_DB_SCALE(tas2770_playback_volume, -12750, 50, 0); +static DECLARE_TLV_DB_SCALE(tas2770_playback_volume, -10050, 50, 0); static const struct snd_kcontrol_new tas2770_snd_controls[] = { SOC_SINGLE_TLV("Speaker Playback Volume", TAS2770_PLAY_CFG_REG2, diff --git a/sound/soc/codecs/wcd937x.c b/sound/soc/codecs/wcd937x.c index 08fb13a334a4c..9c1997a42334d 100644 --- a/sound/soc/codecs/wcd937x.c +++ b/sound/soc/codecs/wcd937x.c @@ -2564,6 +2564,7 @@ static int wcd937x_soc_codec_probe(struct snd_soc_component *component) ARRAY_SIZE(wcd9375_dapm_widgets)); if (ret < 0) { dev_err(component->dev, "Failed to add snd_ctls\n"); + wcd_clsh_ctrl_free(wcd937x->clsh_info); return ret; } @@ -2571,6 +2572,7 @@ static int wcd937x_soc_codec_probe(struct snd_soc_component *component) ARRAY_SIZE(wcd9375_audio_map)); if (ret < 0) { dev_err(component->dev, "Failed to add routes\n"); + wcd_clsh_ctrl_free(wcd937x->clsh_info); return ret; } } diff --git a/sound/soc/codecs/wm0010.c b/sound/soc/codecs/wm0010.c index edd2cb185c42c..9e67fbfc2ccaf 100644 --- a/sound/soc/codecs/wm0010.c +++ b/sound/soc/codecs/wm0010.c @@ -920,7 +920,7 @@ static int wm0010_spi_probe(struct spi_device *spi) if (ret) { dev_err(wm0010->dev, "Failed to set IRQ %d as wake source: %d\n", irq, ret); - return ret; + goto free_irq; } if (spi->max_speed_hz) @@ -932,9 +932,18 @@ static int wm0010_spi_probe(struct spi_device *spi) &soc_component_dev_wm0010, wm0010_dai, ARRAY_SIZE(wm0010_dai)); if (ret < 0) - return ret; + goto disable_irq_wake; return 0; + +disable_irq_wake: + irq_set_irq_wake(wm0010->irq, 0); + +free_irq: + if (wm0010->irq) + free_irq(wm0010->irq, wm0010); + + return ret; } static void wm0010_spi_remove(struct spi_device *spi) diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c index 502196253d42a..64eee0d2347da 100644 --- a/sound/soc/codecs/wm5110.c +++ b/sound/soc/codecs/wm5110.c @@ -302,7 +302,7 @@ static int wm5110_hp_pre_enable(struct snd_soc_dapm_widget *w) } else { wseq = wm5110_no_dre_left_enable; nregs = ARRAY_SIZE(wm5110_no_dre_left_enable); - priv->out_up_delay += 10; + priv->out_up_delay += 10000; } break; case ARIZONA_OUT1R_ENA_SHIFT: @@ -312,7 +312,7 @@ static int wm5110_hp_pre_enable(struct snd_soc_dapm_widget *w) } else { wseq = wm5110_no_dre_right_enable; nregs = ARRAY_SIZE(wm5110_no_dre_right_enable); - priv->out_up_delay += 10; + priv->out_up_delay += 10000; } break; default: @@ -338,7 +338,7 @@ static int wm5110_hp_pre_disable(struct snd_soc_dapm_widget *w) snd_soc_component_update_bits(component, ARIZONA_SPARE_TRIGGERS, ARIZONA_WS_TRG1, 0); - priv->out_down_delay += 27; + priv->out_down_delay += 27000; } break; case ARIZONA_OUT1R_ENA_SHIFT: @@ -350,7 +350,7 @@ static int wm5110_hp_pre_disable(struct snd_soc_dapm_widget *w) snd_soc_component_update_bits(component, ARIZONA_SPARE_TRIGGERS, ARIZONA_WS_TRG2, 0); - priv->out_down_delay += 27; + priv->out_down_delay += 27000; } break; default: diff --git a/sound/soc/codecs/wsa884x.c b/sound/soc/codecs/wsa884x.c index 86df5152c547b..560a2c04b6955 100644 --- a/sound/soc/codecs/wsa884x.c +++ b/sound/soc/codecs/wsa884x.c @@ -1875,7 +1875,7 @@ static int wsa884x_get_temp(struct wsa884x_priv *wsa884x, long *temp) * Reading temperature is possible only when Power Amplifier is * off. Report last cached data. */ - *temp = wsa884x->temperature; + *temp = wsa884x->temperature * 1000; return 0; } @@ -1934,7 +1934,7 @@ static int wsa884x_get_temp(struct wsa884x_priv *wsa884x, long *temp) if ((val > WSA884X_LOW_TEMP_THRESHOLD) && (val < WSA884X_HIGH_TEMP_THRESHOLD)) { wsa884x->temperature = val; - *temp = val; + *temp = val * 1000; ret = 0; } else { ret = -EAGAIN; diff --git a/sound/soc/dwc/dwc-i2s.c b/sound/soc/dwc/dwc-i2s.c index 57b789d7fbedd..5b4f20dbf7bba 100644 --- a/sound/soc/dwc/dwc-i2s.c +++ b/sound/soc/dwc/dwc-i2s.c @@ -199,12 +199,10 @@ static void i2s_start(struct dw_i2s_dev *dev, else i2s_write_reg(dev->i2s_base, IRER, 1); - /* I2S needs to enable IRQ to make a handshake with DMAC on the JH7110 SoC */ - if (dev->use_pio || dev->is_jh7110) - i2s_enable_irqs(dev, substream->stream, config->chan_nr); - else + if (!(dev->use_pio || dev->is_jh7110)) i2s_enable_dma(dev, substream->stream); + i2s_enable_irqs(dev, substream->stream, config->chan_nr); i2s_write_reg(dev->i2s_base, CER, 1); } @@ -218,11 +216,12 @@ static void i2s_stop(struct dw_i2s_dev *dev, else i2s_write_reg(dev->i2s_base, IRER, 0); - if (dev->use_pio || dev->is_jh7110) - i2s_disable_irqs(dev, substream->stream, 8); - else + if (!(dev->use_pio || dev->is_jh7110)) i2s_disable_dma(dev, substream->stream); + i2s_disable_irqs(dev, substream->stream, 8); + + if (!dev->active) { i2s_write_reg(dev->i2s_base, CER, 0); i2s_write_reg(dev->i2s_base, IER, 0); diff --git a/sound/soc/fsl/fsl_audmix.c b/sound/soc/fsl/fsl_audmix.c index 3cd9a66b70a15..7981d598ba139 100644 --- a/sound/soc/fsl/fsl_audmix.c +++ b/sound/soc/fsl/fsl_audmix.c @@ -488,11 +488,17 @@ static int fsl_audmix_probe(struct platform_device *pdev) goto err_disable_pm; } - priv->pdev = platform_device_register_data(dev, "imx-audmix", 0, NULL, 0); - if (IS_ERR(priv->pdev)) { - ret = PTR_ERR(priv->pdev); - dev_err(dev, "failed to register platform: %d\n", ret); - goto err_disable_pm; + /* + * If dais property exist, then register the imx-audmix card driver. + * otherwise, it should be linked by audio graph card. + */ + if (of_find_property(pdev->dev.of_node, "dais", NULL)) { + priv->pdev = platform_device_register_data(dev, "imx-audmix", 0, NULL, 0); + if (IS_ERR(priv->pdev)) { + ret = PTR_ERR(priv->pdev); + dev_err(dev, "failed to register platform: %d\n", ret); + goto err_disable_pm; + } } return 0; diff --git a/sound/soc/fsl/fsl_micfil.c b/sound/soc/fsl/fsl_micfil.c index 67c2d4cb0dea2..7cfe77b57b3c2 100644 --- a/sound/soc/fsl/fsl_micfil.c +++ b/sound/soc/fsl/fsl_micfil.c @@ -156,6 +156,8 @@ static int micfil_set_quality(struct fsl_micfil *micfil) case QUALITY_VLOW2: qsel = MICFIL_QSEL_VLOW2_QUALITY; break; + default: + return -EINVAL; } return regmap_update_bits(micfil->regmap, REG_MICFIL_CTRL2, diff --git a/sound/soc/fsl/fsl_qmc_audio.c b/sound/soc/fsl/fsl_qmc_audio.c index 8668abd352080..d41cb6f3efcac 100644 --- a/sound/soc/fsl/fsl_qmc_audio.c +++ b/sound/soc/fsl/fsl_qmc_audio.c @@ -250,6 +250,9 @@ static int qmc_audio_pcm_trigger(struct snd_soc_component *component, switch (cmd) { case SNDRV_PCM_TRIGGER_START: bitmap_zero(prtd->chans_pending, 64); + prtd->buffer_ended = 0; + prtd->ch_dma_addr_current = prtd->ch_dma_addr_start; + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { for (i = 0; i < prtd->channels; i++) prtd->qmc_dai->chans[i].prtd_tx = prtd; diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c index 634168d2bb6e5..c5efbceb06d1f 100644 --- a/sound/soc/fsl/fsl_sai.c +++ b/sound/soc/fsl/fsl_sai.c @@ -994,10 +994,10 @@ static struct snd_soc_dai_driver fsl_sai_dai_template[] = { { .name = "sai-tx", .playback = { - .stream_name = "CPU-Playback", + .stream_name = "SAI-Playback", .channels_min = 1, .channels_max = 32, - .rate_min = 8000, + .rate_min = 8000, .rate_max = 2822400, .rates = SNDRV_PCM_RATE_KNOT, .formats = FSL_SAI_FORMATS, @@ -1007,7 +1007,7 @@ static struct snd_soc_dai_driver fsl_sai_dai_template[] = { { .name = "sai-rx", .capture = { - .stream_name = "CPU-Capture", + .stream_name = "SAI-Capture", .channels_min = 1, .channels_max = 32, .rate_min = 8000, diff --git a/sound/soc/fsl/imx-audmix.c b/sound/soc/fsl/imx-audmix.c index 8e7b75cf64db4..ca33ecad07521 100644 --- a/sound/soc/fsl/imx-audmix.c +++ b/sound/soc/fsl/imx-audmix.c @@ -23,7 +23,6 @@ struct imx_audmix { struct snd_soc_card card; struct platform_device *audmix_pdev; struct platform_device *out_pdev; - struct clk *cpu_mclk; int num_dai; struct snd_soc_dai_link *dai; int num_dai_conf; @@ -32,34 +31,11 @@ struct imx_audmix { struct snd_soc_dapm_route *dapm_routes; }; -static const u32 imx_audmix_rates[] = { - 8000, 12000, 16000, 24000, 32000, 48000, 64000, 96000, -}; - -static const struct snd_pcm_hw_constraint_list imx_audmix_rate_constraints = { - .count = ARRAY_SIZE(imx_audmix_rates), - .list = imx_audmix_rates, -}; - static int imx_audmix_fe_startup(struct snd_pcm_substream *substream) { - struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); - struct imx_audmix *priv = snd_soc_card_get_drvdata(rtd->card); struct snd_pcm_runtime *runtime = substream->runtime; - struct device *dev = rtd->card->dev; - unsigned long clk_rate = clk_get_rate(priv->cpu_mclk); int ret; - if (clk_rate % 24576000 == 0) { - ret = snd_pcm_hw_constraint_list(runtime, 0, - SNDRV_PCM_HW_PARAM_RATE, - &imx_audmix_rate_constraints); - if (ret < 0) - return ret; - } else { - dev_warn(dev, "mclk may be not supported %lu\n", clk_rate); - } - ret = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_CHANNELS, 1, 8); if (ret < 0) @@ -143,8 +119,8 @@ static const struct snd_soc_ops imx_audmix_be_ops = { static const char *name[][3] = { {"HiFi-AUDMIX-FE-0", "HiFi-AUDMIX-FE-1", "HiFi-AUDMIX-FE-2"}, {"sai-tx", "sai-tx", "sai-rx"}, - {"AUDMIX-Playback-0", "AUDMIX-Playback-1", "CPU-Capture"}, - {"CPU-Playback", "CPU-Playback", "AUDMIX-Capture-0"}, + {"AUDMIX-Playback-0", "AUDMIX-Playback-1", "SAI-Capture"}, + {"SAI-Playback", "SAI-Playback", "AUDMIX-Capture-0"}, }; static int imx_audmix_probe(struct platform_device *pdev) @@ -325,13 +301,6 @@ static int imx_audmix_probe(struct platform_device *pdev) } put_device(&cpu_pdev->dev); - priv->cpu_mclk = devm_clk_get(&cpu_pdev->dev, "mclk1"); - if (IS_ERR(priv->cpu_mclk)) { - ret = PTR_ERR(priv->cpu_mclk); - dev_err(&cpu_pdev->dev, "failed to get DAI mclk1: %d\n", ret); - return ret; - } - priv->audmix_pdev = audmix_pdev; priv->out_pdev = cpu_pdev; diff --git a/sound/soc/fsl/imx-card.c b/sound/soc/fsl/imx-card.c index a7215bad64845..93dbe40008c00 100644 --- a/sound/soc/fsl/imx-card.c +++ b/sound/soc/fsl/imx-card.c @@ -738,6 +738,8 @@ static int imx_card_probe(struct platform_device *pdev) data->dapm_routes[i].sink = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%d %s", i + 1, "Playback"); + if (!data->dapm_routes[i].sink) + return -ENOMEM; data->dapm_routes[i].source = "CPU-Playback"; } } @@ -755,6 +757,8 @@ static int imx_card_probe(struct platform_device *pdev) data->dapm_routes[i].source = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%d %s", i + 1, "Capture"); + if (!data->dapm_routes[i].source) + return -ENOMEM; data->dapm_routes[i].sink = "CPU-Capture"; } } diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c index fedae7f6f70cc..975ffd2cad292 100644 --- a/sound/soc/generic/simple-card-utils.c +++ b/sound/soc/generic/simple-card-utils.c @@ -1097,6 +1097,7 @@ int graph_util_parse_dai(struct device *dev, struct device_node *ep, args.np = ep; dai = snd_soc_get_dai_via_args(&args); if (dai) { + dlc->of_node = node; dlc->dai_name = snd_soc_dai_name_get(dai); dlc->dai_args = snd_soc_copy_dai_args(dev, &args); if (!dlc->dai_args) diff --git a/sound/soc/intel/avs/pcm.c b/sound/soc/intel/avs/pcm.c index 945f9c0a6a545..15defce0f3eb8 100644 --- a/sound/soc/intel/avs/pcm.c +++ b/sound/soc/intel/avs/pcm.c @@ -925,7 +925,8 @@ static int avs_component_probe(struct snd_soc_component *component) else mach->tplg_filename = devm_kasprintf(adev->dev, GFP_KERNEL, "hda-generic-tplg.bin"); - + if (!mach->tplg_filename) + return -ENOMEM; filename = kasprintf(GFP_KERNEL, "%s/%s", component->driver->topology_name_prefix, mach->tplg_filename); if (!filename) diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c index 84fc35d88b926..5911a05586516 100644 --- a/sound/soc/intel/boards/sof_sdw.c +++ b/sound/soc/intel/boards/sof_sdw.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include "sof_sdw_common.h" #include "../../codecs/rt711.h" @@ -685,6 +686,24 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = { {} }; +static const struct snd_pci_quirk sof_sdw_ssid_quirk_table[] = { + SND_PCI_QUIRK(0x1043, 0x1e13, "ASUS Zenbook S14", SOC_SDW_CODEC_MIC), + SND_PCI_QUIRK(0x1043, 0x1f43, "ASUS Zenbook S16", SOC_SDW_CODEC_MIC), + {} +}; + +static void sof_sdw_check_ssid_quirk(const struct snd_soc_acpi_mach *mach) +{ + const struct snd_pci_quirk *quirk_entry; + + quirk_entry = snd_pci_quirk_lookup_id(mach->mach_params.subsystem_vendor, + mach->mach_params.subsystem_device, + sof_sdw_ssid_quirk_table); + + if (quirk_entry) + sof_sdw_quirk = quirk_entry->value; +} + static struct snd_soc_dai_link_component platform_component[] = { { /* name might be overridden during probe */ @@ -853,7 +872,7 @@ static int create_sdw_dailinks(struct snd_soc_card *card, /* generate DAI links by each sdw link */ while (sof_dais->initialised) { - int current_be_id; + int current_be_id = 0; ret = create_sdw_dailink(card, sof_dais, dai_links, ¤t_be_id, codec_conf); @@ -1212,6 +1231,13 @@ static int mc_probe(struct platform_device *pdev) snd_soc_card_set_drvdata(card, ctx); + if (mach->mach_params.subsystem_id_set) { + snd_soc_card_set_pci_ssid(card, + mach->mach_params.subsystem_vendor, + mach->mach_params.subsystem_device); + sof_sdw_check_ssid_quirk(mach); + } + dmi_check_system(sof_sdw_quirk_table); if (quirk_override != -1) { @@ -1227,12 +1253,6 @@ static int mc_probe(struct platform_device *pdev) for (i = 0; i < ctx->codec_info_list_count; i++) codec_info_list[i].amp_num = 0; - if (mach->mach_params.subsystem_id_set) { - snd_soc_card_set_pci_ssid(card, - mach->mach_params.subsystem_vendor, - mach->mach_params.subsystem_device); - } - ret = sof_card_dai_links_create(card); if (ret < 0) return ret; diff --git a/sound/soc/intel/common/soc-acpi-intel-adl-match.c b/sound/soc/intel/common/soc-acpi-intel-adl-match.c index bb1324fb588e9..a68efbe98948f 100644 --- a/sound/soc/intel/common/soc-acpi-intel-adl-match.c +++ b/sound/soc/intel/common/soc-acpi-intel-adl-match.c @@ -214,6 +214,15 @@ static const struct snd_soc_acpi_adr_device rt1316_1_group2_adr[] = { } }; +static const struct snd_soc_acpi_adr_device rt1316_2_group2_adr[] = { + { + .adr = 0x000232025D131601ull, + .num_endpoints = 1, + .endpoints = &spk_r_endpoint, + .name_prefix = "rt1316-2" + } +}; + static const struct snd_soc_acpi_adr_device rt1316_1_single_adr[] = { { .adr = 0x000130025D131601ull, @@ -547,6 +556,20 @@ static const struct snd_soc_acpi_link_adr adl_chromebook_base[] = { {} }; +static const struct snd_soc_acpi_link_adr adl_sdw_rt1316_link02[] = { + { + .mask = BIT(0), + .num_adr = ARRAY_SIZE(rt1316_0_group2_adr), + .adr_d = rt1316_0_group2_adr, + }, + { + .mask = BIT(2), + .num_adr = ARRAY_SIZE(rt1316_2_group2_adr), + .adr_d = rt1316_2_group2_adr, + }, + {} +}; + static const struct snd_soc_acpi_codecs adl_max98357a_amp = { .num_codecs = 1, .codecs = {"MX98357A"} @@ -749,6 +772,12 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_sdw_machines[] = { .drv_name = "sof_sdw", .sof_tplg_filename = "sof-adl-sdw-max98373-rt5682.tplg", }, + { + .link_mask = BIT(0) | BIT(2), + .links = adl_sdw_rt1316_link02, + .drv_name = "sof_sdw", + .sof_tplg_filename = "sof-adl-rt1316-l02.tplg", + }, {}, }; EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_adl_sdw_machines); diff --git a/sound/soc/intel/common/soc-acpi-intel-mtl-match.c b/sound/soc/intel/common/soc-acpi-intel-mtl-match.c index fd02c864e25ef..a3f7917656371 100644 --- a/sound/soc/intel/common/soc-acpi-intel-mtl-match.c +++ b/sound/soc/intel/common/soc-acpi-intel-mtl-match.c @@ -297,7 +297,7 @@ static const struct snd_soc_acpi_adr_device rt1316_3_single_adr[] = { static const struct snd_soc_acpi_adr_device rt1318_1_single_adr[] = { { - .adr = 0x000130025D131801, + .adr = 0x000130025D131801ull, .num_endpoints = 1, .endpoints = &single_endpoint, .name_prefix = "rt1318-1" diff --git a/sound/soc/qcom/lpass.h b/sound/soc/qcom/lpass.h index 27a2bf9a66139..de3ec6f594c11 100644 --- a/sound/soc/qcom/lpass.h +++ b/sound/soc/qcom/lpass.h @@ -13,10 +13,11 @@ #include #include #include +#include #include "lpass-hdmi.h" #define LPASS_AHBIX_CLOCK_FREQUENCY 131072000 -#define LPASS_MAX_PORTS (LPASS_CDC_DMA_VA_TX8 + 1) +#define LPASS_MAX_PORTS (DISPLAY_PORT_RX_7 + 1) #define LPASS_MAX_MI2S_PORTS (8) #define LPASS_MAX_DMA_CHANNELS (8) #define LPASS_MAX_HDMI_DMA_CHANNELS (4) diff --git a/sound/soc/qcom/qdsp6/q6apm-dai.c b/sound/soc/qcom/qdsp6/q6apm-dai.c index c9404b5934c7e..2cd522108221a 100644 --- a/sound/soc/qcom/qdsp6/q6apm-dai.c +++ b/sound/soc/qcom/qdsp6/q6apm-dai.c @@ -24,8 +24,8 @@ #define PLAYBACK_MIN_PERIOD_SIZE 128 #define CAPTURE_MIN_NUM_PERIODS 2 #define CAPTURE_MAX_NUM_PERIODS 8 -#define CAPTURE_MAX_PERIOD_SIZE 4096 -#define CAPTURE_MIN_PERIOD_SIZE 320 +#define CAPTURE_MAX_PERIOD_SIZE 65536 +#define CAPTURE_MIN_PERIOD_SIZE 6144 #define BUFFER_BYTES_MAX (PLAYBACK_MAX_NUM_PERIODS * PLAYBACK_MAX_PERIOD_SIZE) #define BUFFER_BYTES_MIN (PLAYBACK_MIN_NUM_PERIODS * PLAYBACK_MIN_PERIOD_SIZE) #define COMPR_PLAYBACK_MAX_FRAGMENT_SIZE (128 * 1024) @@ -64,12 +64,12 @@ struct q6apm_dai_rtd { phys_addr_t phys; unsigned int pcm_size; unsigned int pcm_count; - unsigned int pos; /* Buffer position */ unsigned int periods; unsigned int bytes_sent; unsigned int bytes_received; unsigned int copied_total; uint16_t bits_per_sample; + snd_pcm_uframes_t queue_ptr; bool next_track; enum stream_state state; struct q6apm_graph *graph; @@ -123,25 +123,16 @@ static void event_handler(uint32_t opcode, uint32_t token, void *payload, void * { struct q6apm_dai_rtd *prtd = priv; struct snd_pcm_substream *substream = prtd->substream; - unsigned long flags; switch (opcode) { case APM_CLIENT_EVENT_CMD_EOS_DONE: prtd->state = Q6APM_STREAM_STOPPED; break; case APM_CLIENT_EVENT_DATA_WRITE_DONE: - spin_lock_irqsave(&prtd->lock, flags); - prtd->pos += prtd->pcm_count; - spin_unlock_irqrestore(&prtd->lock, flags); snd_pcm_period_elapsed(substream); - if (prtd->state == Q6APM_STREAM_RUNNING) - q6apm_write_async(prtd->graph, prtd->pcm_count, 0, 0, 0); break; case APM_CLIENT_EVENT_DATA_READ_DONE: - spin_lock_irqsave(&prtd->lock, flags); - prtd->pos += prtd->pcm_count; - spin_unlock_irqrestore(&prtd->lock, flags); snd_pcm_period_elapsed(substream); if (prtd->state == Q6APM_STREAM_RUNNING) q6apm_read(prtd->graph); @@ -248,7 +239,6 @@ static int q6apm_dai_prepare(struct snd_soc_component *component, } prtd->pcm_count = snd_pcm_lib_period_bytes(substream); - prtd->pos = 0; /* rate and channels are sent to audio driver */ ret = q6apm_graph_media_format_shmem(prtd->graph, &cfg); if (ret < 0) { @@ -294,6 +284,27 @@ static int q6apm_dai_prepare(struct snd_soc_component *component, return 0; } +static int q6apm_dai_ack(struct snd_soc_component *component, struct snd_pcm_substream *substream) +{ + struct snd_pcm_runtime *runtime = substream->runtime; + struct q6apm_dai_rtd *prtd = runtime->private_data; + int i, ret = 0, avail_periods; + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { + avail_periods = (runtime->control->appl_ptr - prtd->queue_ptr)/runtime->period_size; + for (i = 0; i < avail_periods; i++) { + ret = q6apm_write_async(prtd->graph, prtd->pcm_count, 0, 0, NO_TIMESTAMP); + if (ret < 0) { + dev_err(component->dev, "Error queuing playback buffer %d\n", ret); + return ret; + } + prtd->queue_ptr += runtime->period_size; + } + } + + return ret; +} + static int q6apm_dai_trigger(struct snd_soc_component *component, struct snd_pcm_substream *substream, int cmd) { @@ -305,9 +316,6 @@ static int q6apm_dai_trigger(struct snd_soc_component *component, case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: - /* start writing buffers for playback only as we already queued capture buffers */ - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) - ret = q6apm_write_async(prtd->graph, prtd->pcm_count, 0, 0, 0); break; case SNDRV_PCM_TRIGGER_STOP: /* TODO support be handled via SoftPause Module */ @@ -377,13 +385,14 @@ static int q6apm_dai_open(struct snd_soc_component *component, } } - ret = snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 32); + /* setup 10ms latency to accommodate DSP restrictions */ + ret = snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 480); if (ret < 0) { dev_err(dev, "constraint for period bytes step ret = %d\n", ret); goto err; } - ret = snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 32); + ret = snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 480); if (ret < 0) { dev_err(dev, "constraint for buffer bytes step ret = %d\n", ret); goto err; @@ -428,16 +437,12 @@ static snd_pcm_uframes_t q6apm_dai_pointer(struct snd_soc_component *component, struct snd_pcm_runtime *runtime = substream->runtime; struct q6apm_dai_rtd *prtd = runtime->private_data; snd_pcm_uframes_t ptr; - unsigned long flags; - spin_lock_irqsave(&prtd->lock, flags); - if (prtd->pos == prtd->pcm_size) - prtd->pos = 0; - - ptr = bytes_to_frames(runtime, prtd->pos); - spin_unlock_irqrestore(&prtd->lock, flags); + ptr = q6apm_get_hw_pointer(prtd->graph, substream->stream) * runtime->period_size; + if (ptr) + return ptr - 1; - return ptr; + return 0; } static int q6apm_dai_hw_params(struct snd_soc_component *component, @@ -652,8 +657,6 @@ static int q6apm_dai_compr_set_params(struct snd_soc_component *component, prtd->pcm_size = runtime->fragments * runtime->fragment_size; prtd->bits_per_sample = 16; - prtd->pos = 0; - if (prtd->next_track != true) { memcpy(&prtd->codec, codec, sizeof(*codec)); @@ -836,6 +839,7 @@ static const struct snd_soc_component_driver q6apm_fe_dai_component = { .hw_params = q6apm_dai_hw_params, .pointer = q6apm_dai_pointer, .trigger = q6apm_dai_trigger, + .ack = q6apm_dai_ack, .compress_ops = &q6apm_dai_compress_ops, .use_dai_pcm_id = true, }; diff --git a/sound/soc/qcom/qdsp6/q6apm.c b/sound/soc/qcom/qdsp6/q6apm.c index 2a2a5bd98110b..ca57413cb7847 100644 --- a/sound/soc/qcom/qdsp6/q6apm.c +++ b/sound/soc/qcom/qdsp6/q6apm.c @@ -494,6 +494,19 @@ int q6apm_read(struct q6apm_graph *graph) } EXPORT_SYMBOL_GPL(q6apm_read); +int q6apm_get_hw_pointer(struct q6apm_graph *graph, int dir) +{ + struct audioreach_graph_data *data; + + if (dir == SNDRV_PCM_STREAM_PLAYBACK) + data = &graph->rx_data; + else + data = &graph->tx_data; + + return (int)atomic_read(&data->hw_ptr); +} +EXPORT_SYMBOL_GPL(q6apm_get_hw_pointer); + static int graph_callback(struct gpr_resp_pkt *data, void *priv, int op) { struct data_cmd_rsp_rd_sh_mem_ep_data_buffer_done_v2 *rd_done; @@ -520,7 +533,8 @@ static int graph_callback(struct gpr_resp_pkt *data, void *priv, int op) done = data->payload; phys = graph->rx_data.buf[token].phys; mutex_unlock(&graph->lock); - + /* token numbering starts at 0 */ + atomic_set(&graph->rx_data.hw_ptr, token + 1); if (lower_32_bits(phys) == done->buf_addr_lsw && upper_32_bits(phys) == done->buf_addr_msw) { graph->result.opcode = hdr->opcode; @@ -553,6 +567,8 @@ static int graph_callback(struct gpr_resp_pkt *data, void *priv, int op) rd_done = data->payload; phys = graph->tx_data.buf[hdr->token].phys; mutex_unlock(&graph->lock); + /* token numbering starts at 0 */ + atomic_set(&graph->tx_data.hw_ptr, hdr->token + 1); if (upper_32_bits(phys) == rd_done->buf_addr_msw && lower_32_bits(phys) == rd_done->buf_addr_lsw) { diff --git a/sound/soc/qcom/qdsp6/q6apm.h b/sound/soc/qcom/qdsp6/q6apm.h index c248c8d2b1ab7..7ce08b401e310 100644 --- a/sound/soc/qcom/qdsp6/q6apm.h +++ b/sound/soc/qcom/qdsp6/q6apm.h @@ -2,6 +2,7 @@ #ifndef __Q6APM_H__ #define __Q6APM_H__ #include +#include #include #include #include @@ -77,6 +78,7 @@ struct audioreach_graph_data { uint32_t num_periods; uint32_t dsp_buf; uint32_t mem_map_handle; + atomic_t hw_ptr; }; struct audioreach_graph { @@ -150,4 +152,5 @@ int q6apm_enable_compress_module(struct device *dev, struct q6apm_graph *graph, int q6apm_remove_initial_silence(struct device *dev, struct q6apm_graph *graph, uint32_t samples); int q6apm_remove_trailing_silence(struct device *dev, struct q6apm_graph *graph, uint32_t samples); int q6apm_set_real_module_id(struct device *dev, struct q6apm_graph *graph, uint32_t codec_id); +int q6apm_get_hw_pointer(struct q6apm_graph *graph, int dir); #endif /* __APM_GRAPH_ */ diff --git a/sound/soc/qcom/qdsp6/q6asm-dai.c b/sound/soc/qcom/qdsp6/q6asm-dai.c index 045100c943527..a400c9a31fead 100644 --- a/sound/soc/qcom/qdsp6/q6asm-dai.c +++ b/sound/soc/qcom/qdsp6/q6asm-dai.c @@ -892,9 +892,7 @@ static int q6asm_dai_compr_set_params(struct snd_soc_component *component, if (ret < 0) { dev_err(dev, "q6asm_open_write failed\n"); - q6asm_audio_client_free(prtd->audio_client); - prtd->audio_client = NULL; - return ret; + goto open_err; } } @@ -903,7 +901,7 @@ static int q6asm_dai_compr_set_params(struct snd_soc_component *component, prtd->session_id, dir); if (ret) { dev_err(dev, "Stream reg failed ret:%d\n", ret); - return ret; + goto q6_err; } ret = __q6asm_dai_compr_set_codec_params(component, stream, @@ -911,7 +909,7 @@ static int q6asm_dai_compr_set_params(struct snd_soc_component *component, prtd->stream_id); if (ret) { dev_err(dev, "codec param setup failed ret:%d\n", ret); - return ret; + goto q6_err; } ret = q6asm_map_memory_regions(dir, prtd->audio_client, prtd->phys, @@ -920,12 +918,21 @@ static int q6asm_dai_compr_set_params(struct snd_soc_component *component, if (ret < 0) { dev_err(dev, "Buffer Mapping failed ret:%d\n", ret); - return -ENOMEM; + ret = -ENOMEM; + goto q6_err; } prtd->state = Q6ASM_STREAM_RUNNING; return 0; + +q6_err: + q6asm_cmd(prtd->audio_client, prtd->stream_id, CMD_CLOSE); + +open_err: + q6asm_audio_client_free(prtd->audio_client); + prtd->audio_client = NULL; + return ret; } static int q6asm_dai_compr_set_metadata(struct snd_soc_component *component, diff --git a/sound/soc/rockchip/rockchip_i2s_tdm.c b/sound/soc/rockchip/rockchip_i2s_tdm.c index acd75e48851fc..7feefeb6b876d 100644 --- a/sound/soc/rockchip/rockchip_i2s_tdm.c +++ b/sound/soc/rockchip/rockchip_i2s_tdm.c @@ -451,11 +451,11 @@ static int rockchip_i2s_tdm_set_fmt(struct snd_soc_dai *cpu_dai, break; case SND_SOC_DAIFMT_DSP_A: val = I2S_TXCR_TFS_TDM_PCM; - tdm_val = TDM_SHIFT_CTRL(0); + tdm_val = TDM_SHIFT_CTRL(2); break; case SND_SOC_DAIFMT_DSP_B: val = I2S_TXCR_TFS_TDM_PCM; - tdm_val = TDM_SHIFT_CTRL(2); + tdm_val = TDM_SHIFT_CTRL(4); break; default: ret = -EINVAL; diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c index eca5ce096e545..e3ef9104b411c 100644 --- a/sound/soc/sh/rcar/core.c +++ b/sound/soc/sh/rcar/core.c @@ -1758,20 +1758,6 @@ int rsnd_kctrl_accept_anytime(struct rsnd_dai_stream *io) return 1; } -int rsnd_kctrl_accept_runtime(struct rsnd_dai_stream *io) -{ - struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); - struct rsnd_priv *priv = rsnd_io_to_priv(io); - struct device *dev = rsnd_priv_to_dev(priv); - - if (!runtime) { - dev_warn(dev, "Can't update kctrl when idle\n"); - return 0; - } - - return 1; -} - struct rsnd_kctrl_cfg *rsnd_kctrl_init_m(struct rsnd_kctrl_cfg_m *cfg) { cfg->cfg.val = cfg->val; diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h index 3c164d8e3b16b..3f1100b98cdd3 100644 --- a/sound/soc/sh/rcar/rsnd.h +++ b/sound/soc/sh/rcar/rsnd.h @@ -742,7 +742,6 @@ struct rsnd_kctrl_cfg_s { #define rsnd_kctrl_vals(x) ((x).val) /* = (x).cfg.val[0] */ int rsnd_kctrl_accept_anytime(struct rsnd_dai_stream *io); -int rsnd_kctrl_accept_runtime(struct rsnd_dai_stream *io); struct rsnd_kctrl_cfg *rsnd_kctrl_init_m(struct rsnd_kctrl_cfg_m *cfg); struct rsnd_kctrl_cfg *rsnd_kctrl_init_s(struct rsnd_kctrl_cfg_s *cfg); int rsnd_kctrl_new(struct rsnd_mod *mod, diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c index e7f86db0d94c3..7d73b183bda68 100644 --- a/sound/soc/sh/rcar/src.c +++ b/sound/soc/sh/rcar/src.c @@ -35,6 +35,7 @@ struct rsnd_src { struct rsnd_mod *dma; struct rsnd_kctrl_cfg_s sen; /* sync convert enable */ struct rsnd_kctrl_cfg_s sync; /* sync convert */ + u32 current_sync_rate; int irq; }; @@ -100,7 +101,7 @@ static u32 rsnd_src_convert_rate(struct rsnd_dai_stream *io, if (!rsnd_src_sync_is_enabled(mod)) return rsnd_io_converted_rate(io); - convert_rate = src->sync.val; + convert_rate = src->current_sync_rate; if (!convert_rate) convert_rate = rsnd_io_converted_rate(io); @@ -201,13 +202,73 @@ static const u32 chan222222[] = { static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io, struct rsnd_mod *mod) { + struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); struct rsnd_priv *priv = rsnd_mod_to_priv(mod); - struct device *dev = rsnd_priv_to_dev(priv); + struct rsnd_src *src = rsnd_mod_to_src(mod); + u32 fin, fout, new_rate; + int inc, cnt, rate; + u64 base, val; + + if (!runtime) + return; + + if (!rsnd_src_sync_is_enabled(mod)) + return; + + fin = rsnd_src_get_in_rate(priv, io); + fout = rsnd_src_get_out_rate(priv, io); + + new_rate = src->sync.val; + + if (!new_rate) + new_rate = fout; + + /* Do nothing if no diff */ + if (new_rate == src->current_sync_rate) + return; + + /* + * SRCm_IFSVR::INTIFS can change within 1% + * see + * SRCm_IFSVR::INTIFS Note + */ + inc = fout / 100; + cnt = abs(new_rate - fout) / inc; + if (fout > new_rate) + inc *= -1; + + /* + * After start running SRC, we can update only SRC_IFSVR + * for Synchronous Mode + */ + base = (u64)0x0400000 * fin; + rate = fout; + for (int i = 0; i < cnt; i++) { + val = base; + rate += inc; + do_div(val, rate); + + rsnd_mod_write(mod, SRC_IFSVR, val); + } + val = base; + do_div(val, new_rate); + + rsnd_mod_write(mod, SRC_IFSVR, val); + + /* update current_sync_rate */ + src->current_sync_rate = new_rate; +} + +static void rsnd_src_init_convert_rate(struct rsnd_dai_stream *io, + struct rsnd_mod *mod) +{ struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); + struct rsnd_priv *priv = rsnd_mod_to_priv(mod); + struct device *dev = rsnd_priv_to_dev(priv); int is_play = rsnd_io_is_play(io); int use_src = 0; u32 fin, fout; - u32 ifscr, fsrate, adinr; + u32 ifscr, adinr; u32 cr, route; u32 i_busif, o_busif, tmp; const u32 *bsdsr_table; @@ -245,26 +306,15 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io, adinr = rsnd_get_adinr_bit(mod, io) | chan; /* - * SRC_IFSCR / SRC_IFSVR - */ - ifscr = 0; - fsrate = 0; - if (use_src) { - u64 n; - - ifscr = 1; - n = (u64)0x0400000 * fin; - do_div(n, fout); - fsrate = n; - } - - /* + * SRC_IFSCR * SRC_SRCCR / SRC_ROUTE_MODE0 */ + ifscr = 0; cr = 0x00011110; route = 0x0; if (use_src) { route = 0x1; + ifscr = 0x1; if (rsnd_src_sync_is_enabled(mod)) { cr |= 0x1; @@ -335,7 +385,6 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io, rsnd_mod_write(mod, SRC_SRCIR, 1); /* initialize */ rsnd_mod_write(mod, SRC_ADINR, adinr); rsnd_mod_write(mod, SRC_IFSCR, ifscr); - rsnd_mod_write(mod, SRC_IFSVR, fsrate); rsnd_mod_write(mod, SRC_SRCCR, cr); rsnd_mod_write(mod, SRC_BSDSR, bsdsr_table[idx]); rsnd_mod_write(mod, SRC_BSISR, bsisr_table[idx]); @@ -348,6 +397,9 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io, rsnd_adg_set_src_timesel_gen2(mod, io, fin, fout); + /* update SRC_IFSVR */ + rsnd_src_set_convert_rate(io, mod); + return; convert_rate_err: @@ -467,7 +519,8 @@ static int rsnd_src_init(struct rsnd_mod *mod, int ret; /* reset sync convert_rate */ - src->sync.val = 0; + src->sync.val = + src->current_sync_rate = 0; ret = rsnd_mod_power_on(mod); if (ret < 0) @@ -475,7 +528,7 @@ static int rsnd_src_init(struct rsnd_mod *mod, rsnd_src_activation(mod); - rsnd_src_set_convert_rate(io, mod); + rsnd_src_init_convert_rate(io, mod); rsnd_src_status_clear(mod); @@ -493,7 +546,8 @@ static int rsnd_src_quit(struct rsnd_mod *mod, rsnd_mod_power_off(mod); /* reset sync convert_rate */ - src->sync.val = 0; + src->sync.val = + src->current_sync_rate = 0; return 0; } @@ -531,6 +585,22 @@ static irqreturn_t rsnd_src_interrupt(int irq, void *data) return IRQ_HANDLED; } +static int rsnd_src_kctrl_accept_runtime(struct rsnd_dai_stream *io) +{ + struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); + + if (!runtime) { + struct rsnd_priv *priv = rsnd_io_to_priv(io); + struct device *dev = rsnd_priv_to_dev(priv); + + dev_warn(dev, "\"SRC Out Rate\" can use during running\n"); + + return 0; + } + + return 1; +} + static int rsnd_src_probe_(struct rsnd_mod *mod, struct rsnd_dai_stream *io, struct rsnd_priv *priv) @@ -585,7 +655,7 @@ static int rsnd_src_pcm_new(struct rsnd_mod *mod, "SRC Out Rate Switch" : "SRC In Rate Switch", rsnd_kctrl_accept_anytime, - rsnd_src_set_convert_rate, + rsnd_src_init_convert_rate, &src->sen, 1); if (ret < 0) return ret; @@ -594,7 +664,7 @@ static int rsnd_src_pcm_new(struct rsnd_mod *mod, rsnd_io_is_play(io) ? "SRC Out Rate" : "SRC In Rate", - rsnd_kctrl_accept_runtime, + rsnd_src_kctrl_accept_runtime, rsnd_src_set_convert_rate, &src->sync, 192000); diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c index b3d4e8ae07eff..0c6424a1fcac0 100644 --- a/sound/soc/sh/rcar/ssi.c +++ b/sound/soc/sh/rcar/ssi.c @@ -336,7 +336,8 @@ static int rsnd_ssi_master_clk_start(struct rsnd_mod *mod, return 0; rate_err: - dev_err(dev, "unsupported clock rate\n"); + dev_err(dev, "unsupported clock rate (%d)\n", rate); + return ret; } diff --git a/sound/soc/sh/rz-ssi.c b/sound/soc/sh/rz-ssi.c index 32db2cead8a4e..4f483bfa584f5 100644 --- a/sound/soc/sh/rz-ssi.c +++ b/sound/soc/sh/rz-ssi.c @@ -416,8 +416,12 @@ static int rz_ssi_stop(struct rz_ssi_priv *ssi, struct rz_ssi_stream *strm) rz_ssi_reg_mask_setl(ssi, SSICR, SSICR_TEN | SSICR_REN, 0); /* Cancel all remaining DMA transactions */ - if (rz_ssi_is_dma_enabled(ssi)) - dmaengine_terminate_async(strm->dma_ch); + if (rz_ssi_is_dma_enabled(ssi)) { + if (ssi->playback.dma_ch) + dmaengine_terminate_async(ssi->playback.dma_ch); + if (ssi->capture.dma_ch) + dmaengine_terminate_async(ssi->capture.dma_ch); + } rz_ssi_set_idle(ssi); @@ -524,6 +528,8 @@ static int rz_ssi_pio_send(struct rz_ssi_priv *ssi, struct rz_ssi_stream *strm) sample_space = strm->fifo_sample_size; ssifsr = rz_ssi_reg_readl(ssi, SSIFSR); sample_space -= (ssifsr >> SSIFSR_TDC_SHIFT) & SSIFSR_TDC_MASK; + if (sample_space < 0) + return -EINVAL; /* Only add full frames at a time */ while (frames_left && (sample_space >= runtime->channels)) { diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c index 19928f098d8dc..b0e4e4168f38d 100644 --- a/sound/soc/soc-ops.c +++ b/sound/soc/soc-ops.c @@ -337,7 +337,7 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol, if (ucontrol->value.integer.value[0] < 0) return -EINVAL; val = ucontrol->value.integer.value[0]; - if (mc->platform_max && ((int)val + min) > mc->platform_max) + if (mc->platform_max && val > mc->platform_max) return -EINVAL; if (val > max - min) return -EINVAL; @@ -350,7 +350,7 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol, if (ucontrol->value.integer.value[1] < 0) return -EINVAL; val2 = ucontrol->value.integer.value[1]; - if (mc->platform_max && ((int)val2 + min) > mc->platform_max) + if (mc->platform_max && val2 > mc->platform_max) return -EINVAL; if (val2 > max - min) return -EINVAL; @@ -503,17 +503,16 @@ int snd_soc_info_volsw_range(struct snd_kcontrol *kcontrol, { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; - int platform_max; - int min = mc->min; + int max; - if (!mc->platform_max) - mc->platform_max = mc->max; - platform_max = mc->platform_max; + max = mc->max - mc->min; + if (mc->platform_max && mc->platform_max < max) + max = mc->platform_max; uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = snd_soc_volsw_is_stereo(mc) ? 2 : 1; uinfo->value.integer.min = 0; - uinfo->value.integer.max = platform_max - min; + uinfo->value.integer.max = max; return 0; } diff --git a/sound/soc/sof/amd/acp-ipc.c b/sound/soc/sof/amd/acp-ipc.c index b44b1b1adb6ed..cf3994a705f94 100644 --- a/sound/soc/sof/amd/acp-ipc.c +++ b/sound/soc/sof/amd/acp-ipc.c @@ -167,6 +167,7 @@ irqreturn_t acp_sof_ipc_irq_thread(int irq, void *context) if (sdev->first_boot && sdev->fw_state != SOF_FW_BOOT_COMPLETE) { acp_mailbox_read(sdev, sdev->dsp_box.offset, &status, sizeof(status)); + if ((status & SOF_IPC_PANIC_MAGIC_MASK) == SOF_IPC_PANIC_MAGIC) { snd_sof_dsp_panic(sdev, sdev->dsp_box.offset + sizeof(status), true); @@ -188,13 +189,21 @@ irqreturn_t acp_sof_ipc_irq_thread(int irq, void *context) dsp_ack = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_SCRATCH_REG_0 + dsp_ack_write); if (dsp_ack) { - spin_lock_irq(&sdev->ipc_lock); - /* handle immediate reply from DSP core */ - acp_dsp_ipc_get_reply(sdev); - snd_sof_ipc_reply(sdev, 0); - /* set the done bit */ - acp_dsp_ipc_dsp_done(sdev); - spin_unlock_irq(&sdev->ipc_lock); + if (likely(sdev->fw_state == SOF_FW_BOOT_COMPLETE)) { + spin_lock_irq(&sdev->ipc_lock); + + /* handle immediate reply from DSP core */ + acp_dsp_ipc_get_reply(sdev); + snd_sof_ipc_reply(sdev, 0); + /* set the done bit */ + acp_dsp_ipc_dsp_done(sdev); + + spin_unlock_irq(&sdev->ipc_lock); + } else { + dev_dbg_ratelimited(sdev->dev, "IPC reply before FW_BOOT_COMPLETE: %#x\n", + dsp_ack); + } + ipc_irq = true; } diff --git a/sound/soc/sof/amd/acp.c b/sound/soc/sof/amd/acp.c index 95d4762c9d939..35eb23d2a056d 100644 --- a/sound/soc/sof/amd/acp.c +++ b/sound/soc/sof/amd/acp.c @@ -27,6 +27,7 @@ MODULE_PARM_DESC(enable_fw_debug, "Enable Firmware debug"); static struct acp_quirk_entry quirk_valve_galileo = { .signed_fw_image = true, .skip_iram_dram_size_mod = true, + .post_fw_run_delay = true, }; const struct dmi_system_id acp_sof_quirk_table[] = { diff --git a/sound/soc/sof/amd/acp.h b/sound/soc/sof/amd/acp.h index 800594440f739..2a19d82d62002 100644 --- a/sound/soc/sof/amd/acp.h +++ b/sound/soc/sof/amd/acp.h @@ -220,6 +220,7 @@ struct sof_amd_acp_desc { struct acp_quirk_entry { bool signed_fw_image; bool skip_iram_dram_size_mod; + bool post_fw_run_delay; }; /* Common device data struct for ACP devices */ diff --git a/sound/soc/sof/amd/vangogh.c b/sound/soc/sof/amd/vangogh.c index 61372958c09dc..436f58be3a9f9 100644 --- a/sound/soc/sof/amd/vangogh.c +++ b/sound/soc/sof/amd/vangogh.c @@ -11,6 +11,7 @@ * Hardware interface for Audio DSP on Vangogh platform */ +#include #include #include @@ -136,6 +137,20 @@ static struct snd_soc_dai_driver vangogh_sof_dai[] = { }, }; +static int sof_vangogh_post_fw_run_delay(struct snd_sof_dev *sdev) +{ + /* + * Resuming from suspend in some cases my cause the DSP firmware + * to enter an unrecoverable faulty state. Delaying a bit any host + * to DSP transmission right after firmware boot completion seems + * to resolve the issue. + */ + if (!sdev->first_boot) + usleep_range(100, 150); + + return 0; +} + /* Vangogh ops */ struct snd_sof_dsp_ops sof_vangogh_ops; EXPORT_SYMBOL_NS(sof_vangogh_ops, SND_SOC_SOF_AMD_COMMON); @@ -157,6 +172,9 @@ int sof_vangogh_ops_init(struct snd_sof_dev *sdev) if (quirks->signed_fw_image) sof_vangogh_ops.load_firmware = acp_sof_load_signed_firmware; + + if (quirks->post_fw_run_delay) + sof_vangogh_ops.post_fw_run = sof_vangogh_post_fw_run_delay; } return 0; diff --git a/sound/soc/sof/intel/hda-codec.c b/sound/soc/sof/intel/hda-codec.c index dc46888faa0dc..c0c58b4297155 100644 --- a/sound/soc/sof/intel/hda-codec.c +++ b/sound/soc/sof/intel/hda-codec.c @@ -454,6 +454,7 @@ int hda_codec_i915_exit(struct snd_sof_dev *sdev) } EXPORT_SYMBOL_NS_GPL(hda_codec_i915_exit, SND_SOC_SOF_HDA_AUDIO_CODEC_I915); +MODULE_SOFTDEP("pre: snd-hda-codec-hdmi"); #endif MODULE_LICENSE("Dual BSD/GPL"); diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c index f10ed4d102501..c924a998d6f90 100644 --- a/sound/soc/sof/intel/hda.c +++ b/sound/soc/sof/intel/hda.c @@ -1305,22 +1305,8 @@ struct snd_soc_acpi_mach *hda_machine_select(struct snd_sof_dev *sdev) /* report to machine driver if any DMICs are found */ mach->mach_params.dmic_num = check_dmic_num(sdev); - if (sdw_mach_found) { - /* - * DMICs use up to 4 pins and are typically pin-muxed with SoundWire - * link 2 and 3, or link 1 and 2, thus we only try to enable dmics - * if all conditions are true: - * a) 2 or fewer links are used by SoundWire - * b) the NHLT table reports the presence of microphones - */ - if (hweight_long(mach->link_mask) <= 2) - dmic_fixup = true; - else - mach->mach_params.dmic_num = 0; - } else { - if (mach->tplg_quirk_mask & SND_SOC_ACPI_TPLG_INTEL_DMIC_NUMBER) - dmic_fixup = true; - } + if (sdw_mach_found || mach->tplg_quirk_mask & SND_SOC_ACPI_TPLG_INTEL_DMIC_NUMBER) + dmic_fixup = true; if (tplg_fixup && dmic_fixup && diff --git a/sound/soc/sof/intel/pci-ptl.c b/sound/soc/sof/intel/pci-ptl.c index 69195b5e7b1a9..f54d098d616f6 100644 --- a/sound/soc/sof/intel/pci-ptl.c +++ b/sound/soc/sof/intel/pci-ptl.c @@ -50,6 +50,7 @@ static const struct sof_dev_desc ptl_desc = { /* PCI IDs */ static const struct pci_device_id sof_pci_ids[] = { { PCI_DEVICE_DATA(INTEL, HDA_PTL, &ptl_desc) }, /* PTL */ + { PCI_DEVICE_DATA(INTEL, HDA_PTL_H, &ptl_desc) }, /* PTL-H */ { 0, } }; MODULE_DEVICE_TABLE(pci, sof_pci_ids); diff --git a/sound/soc/sof/ipc4-topology.c b/sound/soc/sof/ipc4-topology.c index 240fee2166d12..f82db7f2a6b7e 100644 --- a/sound/soc/sof/ipc4-topology.c +++ b/sound/soc/sof/ipc4-topology.c @@ -671,10 +671,16 @@ static int sof_ipc4_widget_setup_comp_dai(struct snd_sof_widget *swidget) } list_for_each_entry(w, &sdev->widget_list, list) { - if (w->widget->sname && + struct snd_sof_dai *alh_dai; + + if (!WIDGET_IS_DAI(w->id) || !w->widget->sname || strcmp(w->widget->sname, swidget->widget->sname)) continue; + alh_dai = w->private; + if (alh_dai->type != SOF_DAI_INTEL_ALH) + continue; + blob->alh_cfg.device_count++; } @@ -1973,11 +1979,13 @@ sof_ipc4_prepare_copier_module(struct snd_sof_widget *swidget, list_for_each_entry(w, &sdev->widget_list, list) { u32 node_type; - if (w->widget->sname && + if (!WIDGET_IS_DAI(w->id) || !w->widget->sname || strcmp(w->widget->sname, swidget->widget->sname)) continue; dai = w->private; + if (dai->type != SOF_DAI_INTEL_ALH) + continue; alh_copier = (struct sof_ipc4_copier *)dai->private; alh_data = &alh_copier->data; node_type = SOF_IPC4_GET_NODE_TYPE(alh_data->gtw_cfg.node_id); diff --git a/sound/soc/sof/pcm.c b/sound/soc/sof/pcm.c index 35a7462d8b693..c5c6353f18cee 100644 --- a/sound/soc/sof/pcm.c +++ b/sound/soc/sof/pcm.c @@ -511,6 +511,8 @@ static int sof_pcm_close(struct snd_soc_component *component, */ } + spcm->stream[substream->stream].substream = NULL; + return 0; } diff --git a/sound/soc/sof/stream-ipc.c b/sound/soc/sof/stream-ipc.c index 794c7bbccbaf9..8262443ac89ad 100644 --- a/sound/soc/sof/stream-ipc.c +++ b/sound/soc/sof/stream-ipc.c @@ -43,7 +43,7 @@ int sof_ipc_msg_data(struct snd_sof_dev *sdev, return -ESTRPIPE; posn_offset = stream->posn_offset; - } else { + } else if (sps->cstream) { struct sof_compr_stream *sstream = sps->cstream->runtime->private_data; @@ -51,6 +51,10 @@ int sof_ipc_msg_data(struct snd_sof_dev *sdev, return -ESTRPIPE; posn_offset = sstream->posn_offset; + + } else { + dev_err(sdev->dev, "%s: No stream opened\n", __func__); + return -EINVAL; } snd_sof_dsp_mailbox_read(sdev, posn_offset, p, sz); diff --git a/sound/soc/sof/topology.c b/sound/soc/sof/topology.c index b3fca5fd87d68..37ca15cc5728c 100644 --- a/sound/soc/sof/topology.c +++ b/sound/soc/sof/topology.c @@ -1269,8 +1269,8 @@ static int sof_widget_parse_tokens(struct snd_soc_component *scomp, struct snd_s struct snd_sof_tuple *new_tuples; num_tuples += token_list[object_token_list[i]].count * (num_sets - 1); - new_tuples = krealloc(swidget->tuples, - sizeof(*new_tuples) * num_tuples, GFP_KERNEL); + new_tuples = krealloc_array(swidget->tuples, + num_tuples, sizeof(*new_tuples), GFP_KERNEL); if (!new_tuples) { ret = -ENOMEM; goto err; diff --git a/sound/soc/ti/j721e-evm.c b/sound/soc/ti/j721e-evm.c index d9d1e021f5b2e..0f96cc45578d8 100644 --- a/sound/soc/ti/j721e-evm.c +++ b/sound/soc/ti/j721e-evm.c @@ -182,6 +182,8 @@ static int j721e_configure_refclk(struct j721e_priv *priv, clk_id = J721E_CLK_PARENT_48000; else if (!(rate % 11025) && priv->pll_rates[J721E_CLK_PARENT_44100]) clk_id = J721E_CLK_PARENT_44100; + else if (!(rate % 11025) && priv->pll_rates[J721E_CLK_PARENT_48000]) + clk_id = J721E_CLK_PARENT_48000; else return ret; diff --git a/sound/usb/midi.c b/sound/usb/midi.c index 737dd00e97b14..826ac870f2469 100644 --- a/sound/usb/midi.c +++ b/sound/usb/midi.c @@ -489,16 +489,84 @@ static void ch345_broken_sysex_input(struct snd_usb_midi_in_endpoint *ep, /* * CME protocol: like the standard protocol, but SysEx commands are sent as a - * single USB packet preceded by a 0x0F byte. + * single USB packet preceded by a 0x0F byte, as are system realtime + * messages and MIDI Active Sensing. + * Also, multiple messages can be sent in the same packet. */ static void snd_usbmidi_cme_input(struct snd_usb_midi_in_endpoint *ep, uint8_t *buffer, int buffer_length) { - if (buffer_length < 2 || (buffer[0] & 0x0f) != 0x0f) - snd_usbmidi_standard_input(ep, buffer, buffer_length); - else - snd_usbmidi_input_data(ep, buffer[0] >> 4, - &buffer[1], buffer_length - 1); + int remaining = buffer_length; + + /* + * CME send sysex, song position pointer, system realtime + * and active sensing using CIN 0x0f, which in the standard + * is only intended for single byte unparsed data. + * So we need to interpret these here before sending them on. + * By default, we assume single byte data, which is true + * for system realtime (midi clock, start, stop and continue) + * and active sensing, and handle the other (known) cases + * separately. + * In contrast to the standard, CME does not split sysex + * into multiple 4-byte packets, but lumps everything together + * into one. In addition, CME can string multiple messages + * together in the same packet; pressing the Record button + * on an UF6 sends a sysex message directly followed + * by a song position pointer in the same packet. + * For it to have any reasonable meaning, a sysex message + * needs to be at least 3 bytes in length (0xf0, id, 0xf7), + * corresponding to a packet size of 4 bytes, and the ones sent + * by CME devices are 6 or 7 bytes, making the packet fragments + * 7 or 8 bytes long (six or seven bytes plus preceding CN+CIN byte). + * For the other types, the packet size is always 4 bytes, + * as per the standard, with the data size being 3 for SPP + * and 1 for the others. + * Thus all packet fragments are at least 4 bytes long, so we can + * skip anything that is shorter; this also conveniantly skips + * packets with size 0, which CME devices continuously send when + * they have nothing better to do. + * Another quirk is that sometimes multiple messages are sent + * in the same packet. This has been observed for midi clock + * and active sensing i.e. 0x0f 0xf8 0x00 0x00 0x0f 0xfe 0x00 0x00, + * but also multiple note ons/offs, and control change together + * with MIDI clock. Similarly, some sysex messages are followed by + * the song position pointer in the same packet, and occasionally + * additionally by a midi clock or active sensing. + * We handle this by looping over all data and parsing it along the way. + */ + while (remaining >= 4) { + int source_length = 4; /* default */ + + if ((buffer[0] & 0x0f) == 0x0f) { + int data_length = 1; /* default */ + + if (buffer[1] == 0xf0) { + /* Sysex: Find EOX and send on whole message. */ + /* To kick off the search, skip the first + * two bytes (CN+CIN and SYSEX (0xf0). + */ + uint8_t *tmp_buf = buffer + 2; + int tmp_length = remaining - 2; + + while (tmp_length > 1 && *tmp_buf != 0xf7) { + tmp_buf++; + tmp_length--; + } + data_length = tmp_buf - buffer; + source_length = data_length + 1; + } else if (buffer[1] == 0xf2) { + /* Three byte song position pointer */ + data_length = 3; + } + snd_usbmidi_input_data(ep, buffer[0] >> 4, + &buffer[1], data_length); + } else { + /* normal channel events */ + snd_usbmidi_standard_input(ep, buffer, source_length); + } + buffer += source_length; + remaining -= source_length; + } } /* @@ -1145,7 +1213,7 @@ static int snd_usbmidi_output_close(struct snd_rawmidi_substream *substream) { struct usbmidi_out_port *port = substream->runtime->private_data; - cancel_work_sync(&port->ep->work); + flush_work(&port->ep->work); return substream_open(substream, 0, 0); } diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c index a95ebcf4e46e7..1e7192cb4693c 100644 --- a/sound/usb/mixer_quirks.c +++ b/sound/usb/mixer_quirks.c @@ -4156,6 +4156,52 @@ static void snd_dragonfly_quirk_db_scale(struct usb_mixer_interface *mixer, } } +/* + * Some Plantronics headsets have control names that don't meet ALSA naming + * standards. This function fixes nonstandard source names. By the time + * this function is called the control name should look like one of these: + * "source names Playback Volume" + * "source names Playback Switch" + * "source names Capture Volume" + * "source names Capture Switch" + * If any of the trigger words are found in the name then the name will + * be changed to: + * "Headset Playback Volume" + * "Headset Playback Switch" + * "Headset Capture Volume" + * "Headset Capture Switch" + * depending on the current suffix. + */ +static void snd_fix_plt_name(struct snd_usb_audio *chip, + struct snd_ctl_elem_id *id) +{ + /* no variant of "Sidetone" should be added to this list */ + static const char * const trigger[] = { + "Earphone", "Microphone", "Receive", "Transmit" + }; + static const char * const suffix[] = { + " Playback Volume", " Playback Switch", + " Capture Volume", " Capture Switch" + }; + int i; + + for (i = 0; i < ARRAY_SIZE(trigger); i++) + if (strstr(id->name, trigger[i])) + goto triggered; + usb_audio_dbg(chip, "no change in %s\n", id->name); + return; + +triggered: + for (i = 0; i < ARRAY_SIZE(suffix); i++) + if (strstr(id->name, suffix[i])) { + usb_audio_dbg(chip, "fixing kctl name %s\n", id->name); + snprintf(id->name, sizeof(id->name), "Headset%s", + suffix[i]); + return; + } + usb_audio_dbg(chip, "something wrong in kctl name %s\n", id->name); +} + void snd_usb_mixer_fu_apply_quirk(struct usb_mixer_interface *mixer, struct usb_mixer_elem_info *cval, int unitid, struct snd_kcontrol *kctl) @@ -4173,5 +4219,10 @@ void snd_usb_mixer_fu_apply_quirk(struct usb_mixer_interface *mixer, cval->min_mute = 1; break; } + + /* ALSA-ify some Plantronics headset control names */ + if (USB_ID_VENDOR(mixer->chip->usb_id) == 0x047f && + (cval->control == UAC_FU_MUTE || cval->control == UAC_FU_VOLUME)) + snd_fix_plt_name(mixer->chip, &kctl->id); } diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index a97efb7b131ea..09210fb4ac60c 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -1868,6 +1868,7 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs, case USB_ID(0x534d, 0x2109): /* MacroSilicon MS2109 */ subs->stream_offset_adj = 2; break; + case USB_ID(0x2b73, 0x000a): /* Pioneer DJM-900NXS2 */ case USB_ID(0x2b73, 0x0013): /* Pioneer DJM-450 */ pioneer_djm_set_format_quirk(subs, 0x0082); break; diff --git a/sound/usb/usx2y/usbusx2y.c b/sound/usb/usx2y/usbusx2y.c index 5f81c68fd42b6..5756ff3528a2d 100644 --- a/sound/usb/usx2y/usbusx2y.c +++ b/sound/usb/usx2y/usbusx2y.c @@ -151,6 +151,12 @@ static int snd_usx2y_card_used[SNDRV_CARDS]; static void snd_usx2y_card_private_free(struct snd_card *card); static void usx2y_unlinkseq(struct snd_usx2y_async_seq *s); +#ifdef USX2Y_NRPACKS_VARIABLE +int nrpacks = USX2Y_NRPACKS; /* number of packets per urb */ +module_param(nrpacks, int, 0444); +MODULE_PARM_DESC(nrpacks, "Number of packets per URB."); +#endif + /* * pipe 4 is used for switching the lamps, setting samplerate, volumes .... */ @@ -432,6 +438,11 @@ static int snd_usx2y_probe(struct usb_interface *intf, struct snd_card *card; int err; +#ifdef USX2Y_NRPACKS_VARIABLE + if (nrpacks < 0 || nrpacks > USX2Y_NRPACKS_MAX) + return -EINVAL; +#endif + if (le16_to_cpu(device->descriptor.idVendor) != 0x1604 || (le16_to_cpu(device->descriptor.idProduct) != USB_ID_US122 && le16_to_cpu(device->descriptor.idProduct) != USB_ID_US224 && diff --git a/sound/usb/usx2y/usbusx2y.h b/sound/usb/usx2y/usbusx2y.h index 391fd7b4ed5ef..6a76d04bf1c7d 100644 --- a/sound/usb/usx2y/usbusx2y.h +++ b/sound/usb/usx2y/usbusx2y.h @@ -7,6 +7,32 @@ #define NRURBS 2 +/* Default value used for nr of packs per urb. + * 1 to 4 have been tested ok on uhci. + * To use 3 on ohci, you'd need a patch: + * look for "0000425-linux-2.6.9-rc4-mm1_ohci-hcd.patch.gz" on + * "https://bugtrack.alsa-project.org/alsa-bug/bug_view_page.php?bug_id=0000425" + * + * 1, 2 and 4 work out of the box on ohci, if I recall correctly. + * Bigger is safer operation, smaller gives lower latencies. + */ +#define USX2Y_NRPACKS 4 + +#define USX2Y_NRPACKS_MAX 1024 + +/* If your system works ok with this module's parameter + * nrpacks set to 1, you might as well comment + * this define out, and thereby produce smaller, faster code. + * You'd also set USX2Y_NRPACKS to 1 then. + */ +#define USX2Y_NRPACKS_VARIABLE 1 + +#ifdef USX2Y_NRPACKS_VARIABLE +extern int nrpacks; +#define nr_of_packs() nrpacks +#else +#define nr_of_packs() USX2Y_NRPACKS +#endif #define URBS_ASYNC_SEQ 10 #define URB_DATA_LEN_ASYNC_SEQ 32 diff --git a/sound/usb/usx2y/usbusx2yaudio.c b/sound/usb/usx2y/usbusx2yaudio.c index f540f46a0b143..acca8bead82e5 100644 --- a/sound/usb/usx2y/usbusx2yaudio.c +++ b/sound/usb/usx2y/usbusx2yaudio.c @@ -28,33 +28,6 @@ #include "usx2y.h" #include "usbusx2y.h" -/* Default value used for nr of packs per urb. - * 1 to 4 have been tested ok on uhci. - * To use 3 on ohci, you'd need a patch: - * look for "0000425-linux-2.6.9-rc4-mm1_ohci-hcd.patch.gz" on - * "https://bugtrack.alsa-project.org/alsa-bug/bug_view_page.php?bug_id=0000425" - * - * 1, 2 and 4 work out of the box on ohci, if I recall correctly. - * Bigger is safer operation, smaller gives lower latencies. - */ -#define USX2Y_NRPACKS 4 - -/* If your system works ok with this module's parameter - * nrpacks set to 1, you might as well comment - * this define out, and thereby produce smaller, faster code. - * You'd also set USX2Y_NRPACKS to 1 then. - */ -#define USX2Y_NRPACKS_VARIABLE 1 - -#ifdef USX2Y_NRPACKS_VARIABLE -static int nrpacks = USX2Y_NRPACKS; /* number of packets per urb */ -#define nr_of_packs() nrpacks -module_param(nrpacks, int, 0444); -MODULE_PARM_DESC(nrpacks, "Number of packets per URB."); -#else -#define nr_of_packs() USX2Y_NRPACKS -#endif - static int usx2y_urb_capt_retire(struct snd_usx2y_substream *subs) { struct urb *urb = subs->completed_urb; diff --git a/tools/arch/x86/lib/insn.c b/tools/arch/x86/lib/insn.c index ab5cdc3337dac..e91d4c4e1c162 100644 --- a/tools/arch/x86/lib/insn.c +++ b/tools/arch/x86/lib/insn.c @@ -13,7 +13,7 @@ #endif #include "../include/asm/inat.h" /* __ignore_sync_check__ */ #include "../include/asm/insn.h" /* __ignore_sync_check__ */ -#include "../include/linux/unaligned.h" /* __ignore_sync_check__ */ +#include /* __ignore_sync_check__ */ #include #include diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c index 0a7327541c17f..46cce18c83086 100644 --- a/tools/lib/bpf/btf_dump.c +++ b/tools/lib/bpf/btf_dump.c @@ -867,8 +867,8 @@ static void btf_dump_emit_bit_padding(const struct btf_dump *d, } pads[] = { {"long", d->ptr_sz * 8}, {"int", 32}, {"short", 16}, {"char", 8} }; - int new_off, pad_bits, bits, i; - const char *pad_type; + int new_off = 0, pad_bits = 0, bits, i; + const char *pad_type = NULL; if (cur_off >= next_off) return; /* no gap */ diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c index 777600822d8e4..179f6b31cbd6f 100644 --- a/tools/lib/bpf/linker.c +++ b/tools/lib/bpf/linker.c @@ -2007,7 +2007,7 @@ static int linker_append_elf_sym(struct bpf_linker *linker, struct src_obj *obj, obj->sym_map[src_sym_idx] = dst_sym_idx; - if (sym_type == STT_SECTION && dst_sym) { + if (sym_type == STT_SECTION && dst_sec) { dst_sec->sec_sym_idx = dst_sym_idx; dst_sym->st_value = 0; } diff --git a/tools/lib/subcmd/parse-options.c b/tools/lib/subcmd/parse-options.c index eb896d30545b6..555d617c1f502 100644 --- a/tools/lib/subcmd/parse-options.c +++ b/tools/lib/subcmd/parse-options.c @@ -807,7 +807,7 @@ static int option__cmp(const void *va, const void *vb) static struct option *options__order(const struct option *opts) { int nr_opts = 0, nr_group = 0, nr_parent = 0, len; - const struct option *o, *p = opts; + const struct option *o = NULL, *p = opts; struct option *opt, *ordered = NULL, *group; /* flatten the options that have parents */ diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 8e02db7e83323..ce3ea0c2de042 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -217,6 +217,7 @@ static bool is_rust_noreturn(const struct symbol *func) str_ends_with(func->name, "_4core9panicking14panic_nounwind") || str_ends_with(func->name, "_4core9panicking18panic_bounds_check") || str_ends_with(func->name, "_4core9panicking19assert_failed_inner") || + str_ends_with(func->name, "_4core9panicking30panic_null_pointer_dereference") || str_ends_with(func->name, "_4core9panicking36panic_misaligned_pointer_dereference") || strstr(func->name, "_4core9panicking13assert_failed") || strstr(func->name, "_4core9panicking11panic_const24panic_const_") || @@ -639,47 +640,8 @@ static int add_dead_ends(struct objtool_file *file) uint64_t offset; /* - * Check for manually annotated dead ends. - */ - rsec = find_section_by_name(file->elf, ".rela.discard.unreachable"); - if (!rsec) - goto reachable; - - for_each_reloc(rsec, reloc) { - if (reloc->sym->type == STT_SECTION) { - offset = reloc_addend(reloc); - } else if (reloc->sym->local_label) { - offset = reloc->sym->offset; - } else { - WARN("unexpected relocation symbol type in %s", rsec->name); - return -1; - } - - insn = find_insn(file, reloc->sym->sec, offset); - if (insn) - insn = prev_insn_same_sec(file, insn); - else if (offset == reloc->sym->sec->sh.sh_size) { - insn = find_last_insn(file, reloc->sym->sec); - if (!insn) { - WARN("can't find unreachable insn at %s+0x%" PRIx64, - reloc->sym->sec->name, offset); - return -1; - } - } else { - WARN("can't find unreachable insn at %s+0x%" PRIx64, - reloc->sym->sec->name, offset); - return -1; - } - - insn->dead_end = true; - } - -reachable: - /* - * These manually annotated reachable checks are needed for GCC 4.4, - * where the Linux unreachable() macro isn't supported. In that case - * GCC doesn't know the "ud2" is fatal, so it generates code as if it's - * not a dead end. + * UD2 defaults to being a dead-end, allow them to be annotated for + * non-fatal, eg WARN. */ rsec = find_section_by_name(file->elf, ".rela.discard.reachable"); if (!rsec) @@ -2100,6 +2062,14 @@ static int add_jump_table(struct objtool_file *file, struct instruction *insn, reloc_addend(reloc) == pfunc->offset) break; + /* + * Clang sometimes leaves dangling unused jump table entries + * which point to the end of the function. Ignore them. + */ + if (reloc->sym->sec == pfunc->sec && + reloc_addend(reloc) == pfunc->offset + pfunc->len) + goto next; + dest_insn = find_insn(file, reloc->sym->sec, reloc_addend(reloc)); if (!dest_insn) break; @@ -2117,6 +2087,7 @@ static int add_jump_table(struct objtool_file *file, struct instruction *insn, alt->insn = dest_insn; alt->next = insn->alts; insn->alts = alt; +next: prev_offset = reloc_offset(reloc); } @@ -2628,13 +2599,14 @@ static void mark_rodata(struct objtool_file *file) * * - .rodata: can contain GCC switch tables * - .rodata.: same, if -fdata-sections is being used - * - .rodata..c_jump_table: contains C annotated jump tables + * - .data.rel.ro.c_jump_table: contains C annotated jump tables * * .rodata.str1.* sections are ignored; they don't contain jump tables. */ for_each_sec(file, sec) { - if (!strncmp(sec->name, ".rodata", 7) && - !strstr(sec->name, ".str1.")) { + if ((!strncmp(sec->name, ".rodata", 7) && + !strstr(sec->name, ".str1.")) || + !strncmp(sec->name, ".data.rel.ro", 12)) { sec->rodata = true; found = true; } @@ -4019,6 +3991,11 @@ static int validate_unret(struct objtool_file *file, struct instruction *insn) WARN_INSN(insn, "RET before UNTRAIN"); return 1; + case INSN_CONTEXT_SWITCH: + if (insn_func(insn)) + break; + return 0; + case INSN_NOP: if (insn->retpoline_safe) return 0; @@ -4182,7 +4159,7 @@ static bool ignore_unreachable_insn(struct objtool_file *file, struct instructio * It may also insert a UD2 after calling a __noreturn function. */ prev_insn = prev_insn_same_sec(file, insn); - if (prev_insn->dead_end && + if (prev_insn && prev_insn->dead_end && (insn->type == INSN_BUG || (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest && insn->jump_dest->type == INSN_BUG))) @@ -4604,35 +4581,6 @@ static int validate_sls(struct objtool_file *file) return warnings; } -static bool ignore_noreturn_call(struct instruction *insn) -{ - struct symbol *call_dest = insn_call_dest(insn); - - /* - * FIXME: hack, we need a real noreturn solution - * - * Problem is, exc_double_fault() may or may not return, depending on - * whether CONFIG_X86_ESPFIX64 is set. But objtool has no visibility - * to the kernel config. - * - * Other potential ways to fix it: - * - * - have compiler communicate __noreturn functions somehow - * - remove CONFIG_X86_ESPFIX64 - * - read the .config file - * - add a cmdline option - * - create a generic objtool annotation format (vs a bunch of custom - * formats) and annotate it - */ - if (!strcmp(call_dest->name, "exc_double_fault")) { - /* prevent further unreachable warnings for the caller */ - insn->sym->warned = 1; - return true; - } - - return false; -} - static int validate_reachable_instructions(struct objtool_file *file) { struct instruction *insn, *prev_insn; @@ -4649,7 +4597,7 @@ static int validate_reachable_instructions(struct objtool_file *file) prev_insn = prev_insn_same_sec(file, insn); if (prev_insn && prev_insn->dead_end) { call_dest = insn_call_dest(prev_insn); - if (call_dest && !ignore_noreturn_call(prev_insn)) { + if (call_dest) { WARN_INSN(insn, "%s() is missing a __noreturn annotation", call_dest->name); warnings++; @@ -4672,6 +4620,8 @@ static int disas_funcs(const char *funcs) char *cmd; cross_compile = getenv("CROSS_COMPILE"); + if (!cross_compile) + cross_compile = ""; objdump_str = "%sobjdump -wdr %s | gawk -M -v _funcs='%s' '" "BEGIN { split(_funcs, funcs); }" diff --git a/tools/objtool/include/objtool/special.h b/tools/objtool/include/objtool/special.h index 86d4af9c5aa9d..89ee12b1a1384 100644 --- a/tools/objtool/include/objtool/special.h +++ b/tools/objtool/include/objtool/special.h @@ -10,7 +10,7 @@ #include #include -#define C_JUMP_TABLE_SECTION ".rodata..c_jump_table" +#define C_JUMP_TABLE_SECTION ".data.rel.ro.c_jump_table" struct special_alt { struct list_head list; diff --git a/tools/perf/Documentation/intel-hybrid.txt b/tools/perf/Documentation/intel-hybrid.txt index e7a776ad25d71..0379903673a4a 100644 --- a/tools/perf/Documentation/intel-hybrid.txt +++ b/tools/perf/Documentation/intel-hybrid.txt @@ -8,15 +8,15 @@ Part of events are available on core cpu, part of events are available on atom cpu and even part of events are available on both. Kernel exports two new cpu pmus via sysfs: -/sys/devices/cpu_core -/sys/devices/cpu_atom +/sys/bus/event_source/devices/cpu_core +/sys/bus/event_source/devices/cpu_atom The 'cpus' files are created under the directories. For example, -cat /sys/devices/cpu_core/cpus +cat /sys/bus/event_source/devices/cpu_core/cpus 0-15 -cat /sys/devices/cpu_atom/cpus +cat /sys/bus/event_source/devices/cpu_atom/cpus 16-23 It indicates cpu0-cpu15 are core cpus and cpu16-cpu23 are atom cpus. @@ -60,8 +60,8 @@ can't carry pmu information. So now this type is extended to be PMU aware type. The PMU type ID is stored at attr.config[63:32]. PMU type ID is retrieved from sysfs. -/sys/devices/cpu_atom/type -/sys/devices/cpu_core/type +/sys/bus/event_source/devices/cpu_atom/type +/sys/bus/event_source/devices/cpu_core/type The new attr.config layout for PERF_TYPE_HARDWARE: diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt index dea005410ec02..ee5d333e2ca96 100644 --- a/tools/perf/Documentation/perf-list.txt +++ b/tools/perf/Documentation/perf-list.txt @@ -188,7 +188,7 @@ in the CPU vendor specific documentation. The available PMUs and their raw parameters can be listed with - ls /sys/devices/*/format + ls /sys/bus/event_source/devices/*/format For example the raw event "LSD.UOPS" core pmu event above could be specified as diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config index 2ce71d2e5fae0..b102a4c525e4b 100644 --- a/tools/perf/Makefile.config +++ b/tools/perf/Makefile.config @@ -513,13 +513,14 @@ ifeq ($(feature-setns), 1) $(call detected,CONFIG_SETNS) endif +ifeq ($(feature-reallocarray), 0) + CFLAGS += -DCOMPAT_NEED_REALLOCARRAY +endif + ifdef CORESIGHT $(call feature_check,libopencsd) ifeq ($(feature-libopencsd), 1) CFLAGS += -DHAVE_CSTRACE_SUPPORT $(LIBOPENCSD_CFLAGS) - ifeq ($(feature-reallocarray), 0) - CFLAGS += -DCOMPAT_NEED_REALLOCARRAY - endif LDFLAGS += $(LIBOPENCSD_LDFLAGS) EXTLIBS += $(OPENCSDLIBS) $(call detected,CONFIG_LIBOPENCSD) @@ -1135,9 +1136,6 @@ ifndef NO_AUXTRACE ifndef NO_AUXTRACE $(call detected,CONFIG_AUXTRACE) CFLAGS += -DHAVE_AUXTRACE_SUPPORT - ifeq ($(feature-reallocarray), 0) - CFLAGS += -DCOMPAT_NEED_REALLOCARRAY - endif endif endif diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf index 9dd2e8d3f3c9b..8ee59ecb14110 100644 --- a/tools/perf/Makefile.perf +++ b/tools/perf/Makefile.perf @@ -164,7 +164,7 @@ ifneq ($(OUTPUT),) VPATH += $(OUTPUT) export VPATH # create symlink to the original source -SOURCE := $(shell ln -sf $(srctree)/tools/perf $(OUTPUT)/source) +SOURCE := $(shell ln -sfn $(srctree)/tools/perf $(OUTPUT)/source) endif ifeq ($(V),1) diff --git a/tools/perf/arch/x86/util/iostat.c b/tools/perf/arch/x86/util/iostat.c index df7b5dfcc26a5..7ea882ef293a1 100644 --- a/tools/perf/arch/x86/util/iostat.c +++ b/tools/perf/arch/x86/util/iostat.c @@ -32,7 +32,7 @@ #define MAX_PATH 1024 #endif -#define UNCORE_IIO_PMU_PATH "devices/uncore_iio_%d" +#define UNCORE_IIO_PMU_PATH "bus/event_source/devices/uncore_iio_%d" #define SYSFS_UNCORE_PMU_PATH "%s/"UNCORE_IIO_PMU_PATH #define PLATFORM_MAPPING_PATH UNCORE_IIO_PMU_PATH"/die%d" diff --git a/tools/perf/bench/syscall.c b/tools/perf/bench/syscall.c index ea4dfc07cbd6b..e7dc216f717f5 100644 --- a/tools/perf/bench/syscall.c +++ b/tools/perf/bench/syscall.c @@ -22,8 +22,7 @@ #define __NR_fork -1 #endif -#define LOOPS_DEFAULT 10000000 -static int loops = LOOPS_DEFAULT; +static int loops; static const struct option options[] = { OPT_INTEGER('l', "loop", &loops, "Specify number of loops"), @@ -80,6 +79,18 @@ static int bench_syscall_common(int argc, const char **argv, int syscall) const char *name = NULL; int i; + switch (syscall) { + case __NR_fork: + case __NR_execve: + /* Limit default loop to 10000 times to save time */ + loops = 10000; + break; + default: + loops = 10000000; + break; + } + + /* Options -l and --loops override default above */ argc = parse_options(argc, argv, options, bench_syscall_usage, 0); gettimeofday(&start, NULL); @@ -94,16 +105,9 @@ static int bench_syscall_common(int argc, const char **argv, int syscall) break; case __NR_fork: test_fork(); - /* Only loop 10000 times to save time */ - if (i == 10000) - loops = 10000; break; case __NR_execve: test_execve(); - /* Only loop 10000 times to save time */ - if (i == 10000) - loops = 10000; - break; default: break; } diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 645deec294c84..8700c39680662 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -1551,12 +1551,12 @@ int cmd_report(int argc, const char **argv) input_name = "perf.data"; } +repeat: data.path = input_name; data.force = symbol_conf.force; symbol_conf.skip_empty = report.skip_empty; -repeat: perf_tool__init(&report.tool, ordered_events); report.tool.sample = process_sample_event; report.tool.mmap = perf_event__process_mmap; diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 4933efdfee76f..628c61397d2d3 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -96,7 +96,7 @@ #include #define DEFAULT_SEPARATOR " " -#define FREEZE_ON_SMI_PATH "devices/cpu/freeze_on_smi" +#define FREEZE_ON_SMI_PATH "bus/event_source/devices/cpu/freeze_on_smi" static void print_counters(struct timespec *ts, int argc, const char **argv); diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/metrics.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/metrics.json index c5d1d22bd034b..5228f94a793f9 100644 --- a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/metrics.json +++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/metrics.json @@ -229,19 +229,19 @@ }, { "MetricName": "slots_lost_misspeculation_fraction", - "MetricExpr": "(OP_SPEC - OP_RETIRED) / (CPU_CYCLES * #slots)", + "MetricExpr": "100 * (OP_SPEC - OP_RETIRED) / (CPU_CYCLES * #slots)", "BriefDescription": "Fraction of slots lost due to misspeculation", "DefaultMetricgroupName": "TopdownL1", "MetricGroup": "Default;TopdownL1", - "ScaleUnit": "100percent of slots" + "ScaleUnit": "1percent of slots" }, { "MetricName": "retired_fraction", - "MetricExpr": "OP_RETIRED / (CPU_CYCLES * #slots)", + "MetricExpr": "100 * OP_RETIRED / (CPU_CYCLES * #slots)", "BriefDescription": "Fraction of slots retiring, useful work", "DefaultMetricgroupName": "TopdownL1", "MetricGroup": "Default;TopdownL1", - "ScaleUnit": "100percent of slots" + "ScaleUnit": "1percent of slots" }, { "MetricName": "backend_core", @@ -266,7 +266,7 @@ }, { "MetricName": "frontend_bandwidth", - "MetricExpr": "frontend_bound - frontend_latency", + "MetricExpr": "frontend_bound - 100 * frontend_latency", "BriefDescription": "Fraction of slots the CPU did not dispatch at full bandwidth - able to dispatch partial slots only (1, 2, or 3 uops)", "MetricGroup": "TopdownL2", "ScaleUnit": "1percent of slots" diff --git a/tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S b/tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S index 75cf084a927d3..5777600467723 100644 --- a/tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S +++ b/tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S @@ -26,3 +26,5 @@ skip: mov x0, #0 mov x8, #93 // __NR_exit syscall svc #0 + +.section .note.GNU-stack, "", @progbits diff --git a/tools/perf/tests/shell/record_bpf_filter.sh b/tools/perf/tests/shell/record_bpf_filter.sh index 1b58ccc1fd882..4d6c3c1b7fb92 100755 --- a/tools/perf/tests/shell/record_bpf_filter.sh +++ b/tools/perf/tests/shell/record_bpf_filter.sh @@ -89,7 +89,7 @@ test_bpf_filter_fail() { test_bpf_filter_group() { echo "Group bpf-filter test" - if ! perf record -e task-clock --filter 'period > 1000 || ip > 0' \ + if ! perf record -e task-clock --filter 'period > 1000, ip > 0' \ -o /dev/null true 2>/dev/null then echo "Group bpf-filter test [Failed should succeed]" @@ -97,7 +97,7 @@ test_bpf_filter_group() { return fi - if ! perf record -e task-clock --filter 'cpu > 0 || ip > 0' \ + if ! perf record -e task-clock --filter 'period > 1000 , cpu > 0 || ip > 0' \ -o /dev/null true 2>&1 | grep -q PERF_SAMPLE_CPU then echo "Group bpf-filter test [Failed forbidden CPU]" diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c index 138ffc71b32dd..2c06f2a85400e 100644 --- a/tools/perf/util/arm-spe.c +++ b/tools/perf/util/arm-spe.c @@ -37,6 +37,8 @@ #include "../../arch/arm64/include/asm/cputype.h" #define MAX_TIMESTAMP (~0ULL) +#define is_ldst_op(op) (!!((op) & ARM_SPE_OP_LDST)) + struct arm_spe { struct auxtrace auxtrace; struct auxtrace_queues queues; @@ -520,6 +522,10 @@ static u64 arm_spe__synth_data_source(const struct arm_spe_record *record, u64 m union perf_mem_data_src data_src = { .mem_op = PERF_MEM_OP_NA }; bool is_neoverse = is_midr_in_range_list(midr, neoverse_spe); + /* Only synthesize data source for LDST operations */ + if (!is_ldst_op(record->op)) + return 0; + if (record->op & ARM_SPE_OP_LD) data_src.mem_op = PERF_MEM_OP_LOAD; else if (record->op & ARM_SPE_OP_ST) @@ -619,7 +625,7 @@ static int arm_spe_sample(struct arm_spe_queue *speq) * When data_src is zero it means the record is not a memory operation, * skip to synthesize memory sample for this case. */ - if (spe->sample_memory && data_src) { + if (spe->sample_memory && is_ldst_op(record->op)) { err = arm_spe__synth_mem_sample(speq, spe->memory_id, data_src); if (err) return err; diff --git a/tools/perf/util/bpf-filter.l b/tools/perf/util/bpf-filter.l index f313404f95a90..6aa65ade33851 100644 --- a/tools/perf/util/bpf-filter.l +++ b/tools/perf/util/bpf-filter.l @@ -76,7 +76,7 @@ static int path_or_error(void) num_dec [0-9]+ num_hex 0[Xx][0-9a-fA-F]+ space [ \t]+ -path [^ \t\n]+ +path [^ \t\n,]+ ident [_a-zA-Z][_a-zA-Z0-9]+ %% diff --git a/tools/perf/util/comm.c b/tools/perf/util/comm.c index 49b79cf0c5cc5..8aa456d7c2cd2 100644 --- a/tools/perf/util/comm.c +++ b/tools/perf/util/comm.c @@ -5,6 +5,8 @@ #include #include #include +#include // reallocarray + #include "rwsem.h" DECLARE_RC_STRUCT(comm_str) { diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c index d633d15329fa0..e56330c85fe7e 100644 --- a/tools/perf/util/debug.c +++ b/tools/perf/util/debug.c @@ -46,8 +46,8 @@ int debug_type_profile; FILE *debug_file(void) { if (!_debug_file) { - pr_warning_once("debug_file not set"); debug_set_file(stderr); + pr_warning_once("debug_file not set"); } return _debug_file; } diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h index bb8e8f444054d..c0472a41147c3 100644 --- a/tools/perf/util/dso.h +++ b/tools/perf/util/dso.h @@ -808,7 +808,9 @@ static inline bool dso__is_kcore(const struct dso *dso) static inline bool dso__is_kallsyms(const struct dso *dso) { - return RC_CHK_ACCESS(dso)->kernel && RC_CHK_ACCESS(dso)->long_name[0] != '/'; + enum dso_binary_type bt = dso__binary_type(dso); + + return bt == DSO_BINARY_TYPE__KALLSYMS || bt == DSO_BINARY_TYPE__GUEST_KALLSYMS; } bool dso__is_object_file(const struct dso *dso); diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index a9df84692d4a8..dac87dccaaaa5 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -1434,19 +1434,18 @@ static int evlist__create_syswide_maps(struct evlist *evlist) */ cpus = perf_cpu_map__new_online_cpus(); if (!cpus) - goto out; + return -ENOMEM; threads = perf_thread_map__new_dummy(); - if (!threads) - goto out_put; + if (!threads) { + perf_cpu_map__put(cpus); + return -ENOMEM; + } perf_evlist__set_maps(&evlist->core, cpus, threads); - perf_thread_map__put(threads); -out_put: perf_cpu_map__put(cpus); -out: - return -ENOMEM; + return 0; } int evlist__open(struct evlist *evlist) diff --git a/tools/perf/util/intel-tpebs.c b/tools/perf/util/intel-tpebs.c index 50a3c3e071606..2c421b475b3b8 100644 --- a/tools/perf/util/intel-tpebs.c +++ b/tools/perf/util/intel-tpebs.c @@ -254,7 +254,7 @@ int tpebs_start(struct evlist *evsel_list) new = zalloc(sizeof(*new)); if (!new) { ret = -1; - zfree(name); + zfree(&name); goto err; } new->name = name; diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c index bf5090f5220bb..9c4adfb45f62b 100644 --- a/tools/perf/util/mem-events.c +++ b/tools/perf/util/mem-events.c @@ -189,7 +189,7 @@ static bool perf_pmu__mem_events_supported(const char *mnt, struct perf_pmu *pmu if (!e->event_name) return true; - scnprintf(path, PATH_MAX, "%s/devices/%s/events/%s", mnt, pmu->name, e->event_name); + scnprintf(path, PATH_MAX, "%s/bus/event_source/devices/%s/events/%s", mnt, pmu->name, e->event_name); return !stat(path, &st); } diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index 61bdda01a05ac..8b4e346808b4c 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c @@ -33,12 +33,12 @@ #define UNIT_MAX_LEN 31 /* max length for event unit name */ enum event_source { - /* An event loaded from /sys/devices//events. */ + /* An event loaded from /sys/bus/event_source/devices//events. */ EVENT_SRC_SYSFS, /* An event loaded from a CPUID matched json file. */ EVENT_SRC_CPU_JSON, /* - * An event loaded from a /sys/devices//identifier matched json + * An event loaded from a /sys/bus/event_source/devices//identifier matched json * file. */ EVENT_SRC_SYS_JSON, @@ -593,7 +593,7 @@ static int perf_pmu__new_alias(struct perf_pmu *pmu, const char *name, }; if (pmu_events_table__find_event(pmu->events_table, pmu, name, update_alias, &data) == 0) - pmu->cpu_json_aliases++; + pmu->cpu_common_json_aliases++; } pmu->sysfs_aliases++; break; @@ -1807,9 +1807,10 @@ size_t perf_pmu__num_events(struct perf_pmu *pmu) if (pmu->cpu_aliases_added) nr += pmu->cpu_json_aliases; else if (pmu->events_table) - nr += pmu_events_table__num_events(pmu->events_table, pmu) - pmu->cpu_json_aliases; + nr += pmu_events_table__num_events(pmu->events_table, pmu) - + pmu->cpu_common_json_aliases; else - assert(pmu->cpu_json_aliases == 0); + assert(pmu->cpu_json_aliases == 0 && pmu->cpu_common_json_aliases == 0); return pmu->selectable ? nr + 1 : nr; } diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h index 4397c48ad569a..bcd278b9b546f 100644 --- a/tools/perf/util/pmu.h +++ b/tools/perf/util/pmu.h @@ -131,6 +131,11 @@ struct perf_pmu { uint32_t cpu_json_aliases; /** @sys_json_aliases: Number of json event aliases loaded matching the PMU's identifier. */ uint32_t sys_json_aliases; + /** + * @cpu_common_json_aliases: Number of json events that overlapped with sysfs when + * loading all sysfs events. + */ + uint32_t cpu_common_json_aliases; /** @sysfs_aliases_loaded: Are sysfs aliases loaded from disk? */ bool sysfs_aliases_loaded; /** diff --git a/tools/perf/util/pmus.c b/tools/perf/util/pmus.c index d7d67e09d759b..362596ed27294 100644 --- a/tools/perf/util/pmus.c +++ b/tools/perf/util/pmus.c @@ -701,11 +701,25 @@ char *perf_pmus__default_pmu_name(void) struct perf_pmu *evsel__find_pmu(const struct evsel *evsel) { struct perf_pmu *pmu = evsel->pmu; + bool legacy_core_type; - if (!pmu) { - pmu = perf_pmus__find_by_type(evsel->core.attr.type); - ((struct evsel *)evsel)->pmu = pmu; + if (pmu) + return pmu; + + pmu = perf_pmus__find_by_type(evsel->core.attr.type); + legacy_core_type = + evsel->core.attr.type == PERF_TYPE_HARDWARE || + evsel->core.attr.type == PERF_TYPE_HW_CACHE; + if (!pmu && legacy_core_type) { + if (perf_pmus__supports_extended_type()) { + u32 type = evsel->core.attr.config >> PERF_PMU_TYPE_SHIFT; + + pmu = perf_pmus__find_by_type(type); + } else { + pmu = perf_pmus__find_core_pmu(); + } } + ((struct evsel *)evsel)->pmu = pmu; return pmu; } diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c index ee3d43a7ba457..e7f36ea9e2fa1 100644 --- a/tools/perf/util/python.c +++ b/tools/perf/util/python.c @@ -79,7 +79,7 @@ struct pyrf_event { }; #define sample_members \ - sample_member_def(sample_ip, ip, T_ULONGLONG, "event type"), \ + sample_member_def(sample_ip, ip, T_ULONGLONG, "event ip"), \ sample_member_def(sample_pid, pid, T_INT, "event pid"), \ sample_member_def(sample_tid, tid, T_INT, "event tid"), \ sample_member_def(sample_time, time, T_ULONGLONG, "event timestamp"), \ @@ -512,6 +512,11 @@ static PyObject *pyrf_event__new(union perf_event *event) event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)) return NULL; + // FIXME this better be dynamic or we need to parse everything + // before calling perf_mmap__consume(), including tracepoint fields. + if (sizeof(pevent->event) < event->header.size) + return NULL; + ptype = pyrf_event__type[event->header.type]; pevent = PyObject_New(struct pyrf_event, ptype); if (pevent != NULL) @@ -1011,20 +1016,22 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist, evsel = evlist__event2evsel(evlist, event); if (!evsel) { + Py_DECREF(pyevent); Py_INCREF(Py_None); return Py_None; } pevent->evsel = evsel; - err = evsel__parse_sample(evsel, event, &pevent->sample); - - /* Consume the even only after we parsed it out. */ perf_mmap__consume(&md->core); - if (err) + err = evsel__parse_sample(evsel, &pevent->event, &pevent->sample); + if (err) { + Py_DECREF(pyevent); return PyErr_Format(PyExc_OSError, "perf: can't parse sample, err=%d", err); + } + return pyevent; } end: diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c index 99376c12dd8ec..7c49997fab3a3 100644 --- a/tools/perf/util/stat-shadow.c +++ b/tools/perf/util/stat-shadow.c @@ -154,6 +154,7 @@ static double find_stat(const struct evsel *evsel, int aggr_idx, enum stat_type { const struct evsel *cur; int evsel_ctx = evsel_context(evsel); + struct perf_pmu *evsel_pmu = evsel__find_pmu(evsel); evlist__for_each_entry(evsel->evlist, cur) { struct perf_stat_aggr *aggr; @@ -180,7 +181,7 @@ static double find_stat(const struct evsel *evsel, int aggr_idx, enum stat_type * Except the SW CLOCK events, * ignore if not the PMU we're looking for. */ - if ((type != STAT_NSECS) && (evsel->pmu != cur->pmu)) + if ((type != STAT_NSECS) && (evsel_pmu != evsel__find_pmu(cur))) continue; aggr = &cur->stats->aggr[aggr_idx]; diff --git a/tools/perf/util/units.c b/tools/perf/util/units.c index 32c39cfe209b3..4c6a86e1cb54b 100644 --- a/tools/perf/util/units.c +++ b/tools/perf/util/units.c @@ -64,7 +64,7 @@ unsigned long convert_unit(unsigned long value, char *unit) int unit_number__scnprintf(char *buf, size_t size, u64 n) { - char unit[4] = "BKMG"; + char unit[] = "BKMG"; int i = 0; while (((n / 1024) > 1) && (i < 3)) { diff --git a/tools/power/cpupower/bench/parse.c b/tools/power/cpupower/bench/parse.c index e63dc11fa3a53..48e25be6e1635 100644 --- a/tools/power/cpupower/bench/parse.c +++ b/tools/power/cpupower/bench/parse.c @@ -120,6 +120,10 @@ FILE *prepare_output(const char *dirname) struct config *prepare_default_config() { struct config *config = malloc(sizeof(struct config)); + if (!config) { + perror("malloc"); + return NULL; + } dprintf("loading defaults\n"); diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8 index 56c7ff6efcdab..a3cf1d17163ae 100644 --- a/tools/power/x86/turbostat/turbostat.8 +++ b/tools/power/x86/turbostat/turbostat.8 @@ -168,6 +168,8 @@ The system configuration dump (if --quiet is not used) is followed by statistics .PP \fBPkgTmp\fP Degrees Celsius reported by the per-package Package Thermal Monitor. .PP +\fBCoreThr\fP Core Thermal Throttling events during the measurement interval. Note that events since boot can be find in /sys/devices/system/cpu/cpu*/thermal_throttle/* +.PP \fBGFX%rc6\fP The percentage of time the GPU is in the "render C6" state, rc6, during the measurement interval. From /sys/class/drm/card0/power/rc6_residency_ms or /sys/class/drm/card0/gt/gt0/rc6_residency_ms or /sys/class/drm/card0/device/tile0/gtN/gtidle/idle_residency_ms depending on the graphics driver being used. .PP \fBGFXMHz\fP Instantaneous snapshot of what sysfs presents at the end of the measurement interval. From /sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz or /sys/class/drm/card0/gt_cur_freq_mhz or /sys/class/drm/card0/gt/gt0/rps_cur_freq_mhz or /sys/class/drm/card0/device/tile0/gtN/freq0/cur_freq depending on the graphics driver being used. diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 235e82fe7d0a5..77ef60980ee58 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -3242,7 +3242,7 @@ void delta_core(struct core_data *new, struct core_data *old) old->c6 = new->c6 - old->c6; old->c7 = new->c7 - old->c7; old->core_temp_c = new->core_temp_c; - old->core_throt_cnt = new->core_throt_cnt; + old->core_throt_cnt = new->core_throt_cnt - old->core_throt_cnt; old->mc6_us = new->mc6_us - old->mc6_us; DELTA_WRAP32(new->core_energy.raw_value, old->core_energy.raw_value); diff --git a/tools/sched_ext/include/scx/common.bpf.h b/tools/sched_ext/include/scx/common.bpf.h index f7206374a73dd..75d6d61279d66 100644 --- a/tools/sched_ext/include/scx/common.bpf.h +++ b/tools/sched_ext/include/scx/common.bpf.h @@ -333,6 +333,17 @@ static __always_inline const struct cpumask *cast_mask(struct bpf_cpumask *mask) return (const struct cpumask *)mask; } +/* + * Return true if task @p cannot migrate to a different CPU, false + * otherwise. + */ +static inline bool is_migration_disabled(const struct task_struct *p) +{ + if (bpf_core_field_exists(p->migration_disabled)) + return p->migration_disabled; + return false; +} + /* rcu */ void bpf_rcu_read_lock(void) __ksym; void bpf_rcu_read_unlock(void) __ksym; diff --git a/tools/sound/dapm-graph b/tools/sound/dapm-graph index f14bdfedee8f1..b6196ee5065a4 100755 --- a/tools/sound/dapm-graph +++ b/tools/sound/dapm-graph @@ -10,7 +10,7 @@ set -eu STYLE_COMPONENT_ON="color=dodgerblue;style=bold" STYLE_COMPONENT_OFF="color=gray40;style=filled;fillcolor=gray90" -STYLE_NODE_ON="shape=box,style=bold,color=green4" +STYLE_NODE_ON="shape=box,style=bold,color=green4,fillcolor=white" STYLE_NODE_OFF="shape=box,style=filled,color=gray30,fillcolor=gray95" # Print usage and exit diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index c76ad0be54e2e..7e524601e01ad 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl @@ -4303,6 +4303,14 @@ sub cancel_test { if ($opt{"CLEAR_LOG"}) { unlink $opt{"LOG_FILE"}; } + + if (! -e $opt{"LOG_FILE"} && $opt{"LOG_FILE"} =~ m,^(.*/),) { + my $dir = $1; + if (! -d $dir) { + mkpath($dir) or die "Failed to create directories '$dir': $!"; + print "\nThe log directory $dir did not exist, so it was created.\n"; + } + } open(LOG, ">> $opt{LOG_FILE}") or die "Can't write to $opt{LOG_FILE}"; LOG->autoflush(1); } diff --git a/tools/testing/kunit/qemu_configs/sh.py b/tools/testing/kunit/qemu_configs/sh.py index 78a474a5b95f3..f00cb89fdef6a 100644 --- a/tools/testing/kunit/qemu_configs/sh.py +++ b/tools/testing/kunit/qemu_configs/sh.py @@ -7,7 +7,9 @@ CONFIG_MEMORY_START=0x0c000000 CONFIG_SH_RTS7751R2D=y CONFIG_RTS7751R2D_PLUS=y -CONFIG_SERIAL_SH_SCI=y''', +CONFIG_SERIAL_SH_SCI=y +CONFIG_CMDLINE_EXTEND=y +''', qemu_arch='sh4', kernel_path='arch/sh/boot/zImage', kernel_command_line='console=ttySC1', diff --git a/tools/testing/selftests/bpf/benchs/bench_trigger.c b/tools/testing/selftests/bpf/benchs/bench_trigger.c index 2ed0ef6f21eee..32e9f194d4497 100644 --- a/tools/testing/selftests/bpf/benchs/bench_trigger.c +++ b/tools/testing/selftests/bpf/benchs/bench_trigger.c @@ -4,6 +4,7 @@ #include #include #include +#include "bpf_util.h" #include "bench.h" #include "trigger_bench.skel.h" #include "trace_helpers.h" @@ -72,7 +73,7 @@ static __always_inline void inc_counter(struct counter *counters) unsigned slot; if (unlikely(tid == 0)) - tid = syscall(SYS_gettid); + tid = sys_gettid(); /* multiplicative hashing, it's fast */ slot = 2654435769U * tid; diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h index 6c3b4d4f173ac..aeef86b3da747 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h @@ -40,6 +40,14 @@ DECLARE_TRACE(bpf_testmod_test_nullable_bare, TP_ARGS(ctx__nullable) ); +struct sk_buff; + +DECLARE_TRACE(bpf_testmod_test_raw_tp_null, + TP_PROTO(struct sk_buff *skb), + TP_ARGS(skb) +); + + #undef BPF_TESTMOD_DECLARE_TRACE #ifdef DECLARE_TRACE_WRITABLE #define BPF_TESTMOD_DECLARE_TRACE(call, proto, args, size) \ diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c index 8835761d9a126..4e6a9e9c03687 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c @@ -380,6 +380,8 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj, (void)bpf_testmod_test_arg_ptr_to_struct(&struct_arg1_2); + (void)trace_bpf_testmod_test_raw_tp_null(NULL); + struct_arg3 = kmalloc((sizeof(struct bpf_testmod_struct_arg_3) + sizeof(int)), GFP_KERNEL); if (struct_arg3 != NULL) { diff --git a/tools/testing/selftests/bpf/bpf_util.h b/tools/testing/selftests/bpf/bpf_util.h index 10587a29b9674..feff92219e213 100644 --- a/tools/testing/selftests/bpf/bpf_util.h +++ b/tools/testing/selftests/bpf/bpf_util.h @@ -6,6 +6,7 @@ #include #include #include +#include #include /* libbpf_num_possible_cpus */ static inline unsigned int bpf_num_possible_cpus(void) @@ -59,4 +60,12 @@ static inline void bpf_strlcpy(char *dst, const char *src, size_t sz) (offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER)) #endif +/* Availability of gettid across glibc versions is hit-and-miss, therefore + * fallback to syscall in this macro and use it everywhere. + */ +#ifndef sys_gettid +#define sys_gettid() syscall(SYS_gettid) +#endif + + #endif /* __BPF_UTIL__ */ diff --git a/tools/testing/selftests/bpf/map_tests/task_storage_map.c b/tools/testing/selftests/bpf/map_tests/task_storage_map.c index 7d050364efca1..62971dbf29961 100644 --- a/tools/testing/selftests/bpf/map_tests/task_storage_map.c +++ b/tools/testing/selftests/bpf/map_tests/task_storage_map.c @@ -12,6 +12,7 @@ #include #include +#include "bpf_util.h" #include "test_maps.h" #include "task_local_storage_helpers.h" #include "read_bpf_task_storage_busy.skel.h" @@ -115,7 +116,7 @@ void test_task_storage_map_stress_lookup(void) CHECK(err, "attach", "error %d\n", err); /* Trigger program */ - syscall(SYS_gettid); + sys_gettid(); skel->bss->pid = 0; CHECK(skel->bss->busy != 0, "bad bpf_task_storage_busy", "got %d\n", skel->bss->busy); diff --git a/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c b/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c index cc184e4420f6e..67557cda22083 100644 --- a/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c +++ b/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c @@ -6,6 +6,10 @@ #include #include "bloom_filter_map.skel.h" +#ifndef NUMA_NO_NODE +#define NUMA_NO_NODE (-1) +#endif + static void test_fail_cases(void) { LIBBPF_OPTS(bpf_map_create_opts, opts); @@ -69,6 +73,7 @@ static void test_success_cases(void) /* Create a map */ opts.map_flags = BPF_F_ZERO_SEED | BPF_F_NUMA_NODE; + opts.numa_node = NUMA_NO_NODE; fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, sizeof(value), 100, &opts); if (!ASSERT_GE(fd, 0, "bpf_map_create bloom filter success case")) return; diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c index 070c52c312e5f..6befa870434bc 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c @@ -690,7 +690,7 @@ void test_bpf_cookie(void) if (!ASSERT_OK_PTR(skel, "skel_open")) return; - skel->bss->my_tid = syscall(SYS_gettid); + skel->bss->my_tid = sys_gettid(); if (test__start_subtest("kprobe")) kprobe_subtest(skel); diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c index 9006549a12945..b8e1224cfd190 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c @@ -226,7 +226,7 @@ static void test_task_common_nocheck(struct bpf_iter_attach_opts *opts, ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL), "pthread_create"); - skel->bss->tid = syscall(SYS_gettid); + skel->bss->tid = sys_gettid(); do_dummy_read_opts(skel->progs.dump_task, opts); @@ -255,10 +255,10 @@ static void *run_test_task_tid(void *arg) union bpf_iter_link_info linfo; int num_unknown_tid, num_known_tid; - ASSERT_NEQ(getpid(), syscall(SYS_gettid), "check_new_thread_id"); + ASSERT_NEQ(getpid(), sys_gettid(), "check_new_thread_id"); memset(&linfo, 0, sizeof(linfo)); - linfo.task.tid = syscall(SYS_gettid); + linfo.task.tid = sys_gettid(); opts.link_info = &linfo; opts.link_info_len = sizeof(linfo); test_task_common(&opts, 0, 1); diff --git a/tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c b/tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c index 747761572098c..9015e2c2ab120 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c +++ b/tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c @@ -63,14 +63,14 @@ static void test_tp_btf(int cgroup_fd) if (!ASSERT_OK(err, "map_delete_elem")) goto out; - skel->bss->target_pid = syscall(SYS_gettid); + skel->bss->target_pid = sys_gettid(); err = cgrp_ls_tp_btf__attach(skel); if (!ASSERT_OK(err, "skel_attach")) goto out; - syscall(SYS_gettid); - syscall(SYS_gettid); + sys_gettid(); + sys_gettid(); skel->bss->target_pid = 0; @@ -154,7 +154,7 @@ static void test_recursion(int cgroup_fd) goto out; /* trigger sys_enter, make sure it does not cause deadlock */ - syscall(SYS_gettid); + sys_gettid(); out: cgrp_ls_recursion__destroy(skel); @@ -224,7 +224,7 @@ static void test_yes_rcu_lock(__u64 cgroup_id) return; CGROUP_MODE_SET(skel); - skel->bss->target_pid = syscall(SYS_gettid); + skel->bss->target_pid = sys_gettid(); bpf_program__set_autoload(skel->progs.yes_rcu_lock, true); err = cgrp_ls_sleepable__load(skel); diff --git a/tools/testing/selftests/bpf/prog_tests/changes_pkt_data.c b/tools/testing/selftests/bpf/prog_tests/changes_pkt_data.c new file mode 100644 index 0000000000000..7526de3790814 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/changes_pkt_data.c @@ -0,0 +1,107 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "bpf/libbpf.h" +#include "changes_pkt_data_freplace.skel.h" +#include "changes_pkt_data.skel.h" +#include + +static void print_verifier_log(const char *log) +{ + if (env.verbosity >= VERBOSE_VERY) + fprintf(stdout, "VERIFIER LOG:\n=============\n%s=============\n", log); +} + +static void test_aux(const char *main_prog_name, + const char *to_be_replaced, + const char *replacement, + bool expect_load) +{ + struct changes_pkt_data_freplace *freplace = NULL; + struct bpf_program *freplace_prog = NULL; + struct bpf_program *main_prog = NULL; + LIBBPF_OPTS(bpf_object_open_opts, opts); + struct changes_pkt_data *main = NULL; + char log[16*1024]; + int err; + + opts.kernel_log_buf = log; + opts.kernel_log_size = sizeof(log); + if (env.verbosity >= VERBOSE_SUPER) + opts.kernel_log_level = 1 | 2 | 4; + main = changes_pkt_data__open_opts(&opts); + if (!ASSERT_OK_PTR(main, "changes_pkt_data__open")) + goto out; + main_prog = bpf_object__find_program_by_name(main->obj, main_prog_name); + if (!ASSERT_OK_PTR(main_prog, "main_prog")) + goto out; + bpf_program__set_autoload(main_prog, true); + err = changes_pkt_data__load(main); + print_verifier_log(log); + if (!ASSERT_OK(err, "changes_pkt_data__load")) + goto out; + freplace = changes_pkt_data_freplace__open_opts(&opts); + if (!ASSERT_OK_PTR(freplace, "changes_pkt_data_freplace__open")) + goto out; + freplace_prog = bpf_object__find_program_by_name(freplace->obj, replacement); + if (!ASSERT_OK_PTR(freplace_prog, "freplace_prog")) + goto out; + bpf_program__set_autoload(freplace_prog, true); + bpf_program__set_autoattach(freplace_prog, true); + bpf_program__set_attach_target(freplace_prog, + bpf_program__fd(main_prog), + to_be_replaced); + err = changes_pkt_data_freplace__load(freplace); + print_verifier_log(log); + if (expect_load) { + ASSERT_OK(err, "changes_pkt_data_freplace__load"); + } else { + ASSERT_ERR(err, "changes_pkt_data_freplace__load"); + ASSERT_HAS_SUBSTR(log, "Extension program changes packet data", "error log"); + } + +out: + changes_pkt_data_freplace__destroy(freplace); + changes_pkt_data__destroy(main); +} + +/* There are two global subprograms in both changes_pkt_data.skel.h: + * - one changes packet data; + * - another does not. + * It is ok to freplace subprograms that change packet data with those + * that either do or do not. It is only ok to freplace subprograms + * that do not change packet data with those that do not as well. + * The below tests check outcomes for each combination of such freplace. + * Also test a case when main subprogram itself is replaced and is a single + * subprogram in a program. + */ +void test_changes_pkt_data_freplace(void) +{ + struct { + const char *main; + const char *to_be_replaced; + bool changes; + } mains[] = { + { "main_with_subprogs", "changes_pkt_data", true }, + { "main_with_subprogs", "does_not_change_pkt_data", false }, + { "main_changes", "main_changes", true }, + { "main_does_not_change", "main_does_not_change", false }, + }; + struct { + const char *func; + bool changes; + } replacements[] = { + { "changes_pkt_data", true }, + { "does_not_change_pkt_data", false } + }; + char buf[64]; + + for (int i = 0; i < ARRAY_SIZE(mains); ++i) { + for (int j = 0; j < ARRAY_SIZE(replacements); ++j) { + snprintf(buf, sizeof(buf), "%s_with_%s", + mains[i].to_be_replaced, replacements[j].func); + if (!test__start_subtest(buf)) + continue; + test_aux(mains[i].main, mains[i].to_be_replaced, replacements[j].func, + mains[i].changes || !replacements[j].changes); + } + } +} diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c index 26019313e1fc2..1c682550e0e7c 100644 --- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c +++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c @@ -1010,7 +1010,7 @@ static void run_core_reloc_tests(bool use_btfgen) struct data *data; void *mmap_data = NULL; - my_pid_tgid = getpid() | ((uint64_t)syscall(SYS_gettid) << 32); + my_pid_tgid = getpid() | ((uint64_t)sys_gettid() << 32); for (i = 0; i < ARRAY_SIZE(test_cases); i++) { char btf_file[] = "/tmp/core_reloc.btf.XXXXXX"; diff --git a/tools/testing/selftests/bpf/prog_tests/linked_funcs.c b/tools/testing/selftests/bpf/prog_tests/linked_funcs.c index cad6645469129..fa639b021f7ef 100644 --- a/tools/testing/selftests/bpf/prog_tests/linked_funcs.c +++ b/tools/testing/selftests/bpf/prog_tests/linked_funcs.c @@ -20,7 +20,7 @@ void test_linked_funcs(void) bpf_program__set_autoload(skel->progs.handler1, true); bpf_program__set_autoload(skel->progs.handler2, true); - skel->rodata->my_tid = syscall(SYS_gettid); + skel->rodata->my_tid = sys_gettid(); skel->bss->syscall_id = SYS_getpgid; err = linked_funcs__load(skel); diff --git a/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c b/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c index c29787e092d66..761ce24bce38f 100644 --- a/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c +++ b/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c @@ -23,7 +23,7 @@ static int get_pid_tgid(pid_t *pid, pid_t *tgid, struct stat st; int err; - *pid = syscall(SYS_gettid); + *pid = sys_gettid(); *tgid = getpid(); err = stat("/proc/self/ns/pid", &st); diff --git a/tools/testing/selftests/bpf/prog_tests/raw_tp_null.c b/tools/testing/selftests/bpf/prog_tests/raw_tp_null.c new file mode 100644 index 0000000000000..6fa19449297e9 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/raw_tp_null.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include +#include "raw_tp_null.skel.h" + +void test_raw_tp_null(void) +{ + struct raw_tp_null *skel; + + skel = raw_tp_null__open_and_load(); + if (!ASSERT_OK_PTR(skel, "raw_tp_null__open_and_load")) + return; + + skel->bss->tid = sys_gettid(); + + if (!ASSERT_OK(raw_tp_null__attach(skel), "raw_tp_null__attach")) + goto end; + + ASSERT_OK(trigger_module_test_read(2), "trigger testmod read"); + ASSERT_EQ(skel->bss->i, 3, "invocations"); + +end: + raw_tp_null__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c b/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c index a1f7e7378a64c..ebe0c12b55363 100644 --- a/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c +++ b/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c @@ -21,7 +21,7 @@ static void test_success(void) if (!ASSERT_OK_PTR(skel, "skel_open")) return; - skel->bss->target_pid = syscall(SYS_gettid); + skel->bss->target_pid = sys_gettid(); bpf_program__set_autoload(skel->progs.get_cgroup_id, true); bpf_program__set_autoload(skel->progs.task_succ, true); @@ -58,7 +58,7 @@ static void test_rcuptr_acquire(void) if (!ASSERT_OK_PTR(skel, "skel_open")) return; - skel->bss->target_pid = syscall(SYS_gettid); + skel->bss->target_pid = sys_gettid(); bpf_program__set_autoload(skel->progs.task_acquire, true); err = rcu_read_lock__load(skel); diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c index 82bfb266741cf..fb08c565d6aad 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c @@ -492,8 +492,8 @@ static void test_sockmap_skb_verdict_shutdown(void) if (!ASSERT_EQ(err, 1, "epoll_wait(fd)")) goto out_close; - n = recv(c1, &b, 1, SOCK_NONBLOCK); - ASSERT_EQ(n, 0, "recv_timeout(fin)"); + n = recv(c1, &b, 1, MSG_DONTWAIT); + ASSERT_EQ(n, 0, "recv(fin)"); out_close: close(c1); close(p1); @@ -546,7 +546,7 @@ static void test_sockmap_skb_verdict_fionread(bool pass_prog) ASSERT_EQ(avail, expected, "ioctl(FIONREAD)"); /* On DROP test there will be no data to read */ if (pass_prog) { - recvd = recv_timeout(c1, &buf, sizeof(buf), SOCK_NONBLOCK, IO_TIMEOUT_SEC); + recvd = recv_timeout(c1, &buf, sizeof(buf), MSG_DONTWAIT, IO_TIMEOUT_SEC); ASSERT_EQ(recvd, sizeof(buf), "recv_timeout(c0)"); } diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c index 40f22454cf05b..1f0977742741f 100644 --- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c +++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c @@ -1599,6 +1599,7 @@ static void test_tailcall_bpf2bpf_freplace(void) goto out; err = bpf_link__destroy(freplace_link); + freplace_link = NULL; if (!ASSERT_OK(err, "destroy link")) goto out; diff --git a/tools/testing/selftests/bpf/prog_tests/task_local_storage.c b/tools/testing/selftests/bpf/prog_tests/task_local_storage.c index c33c05161a9ea..0d42ce00166f0 100644 --- a/tools/testing/selftests/bpf/prog_tests/task_local_storage.c +++ b/tools/testing/selftests/bpf/prog_tests/task_local_storage.c @@ -23,14 +23,14 @@ static void test_sys_enter_exit(void) if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) return; - skel->bss->target_pid = syscall(SYS_gettid); + skel->bss->target_pid = sys_gettid(); err = task_local_storage__attach(skel); if (!ASSERT_OK(err, "skel_attach")) goto out; - syscall(SYS_gettid); - syscall(SYS_gettid); + sys_gettid(); + sys_gettid(); /* 3x syscalls: 1x attach and 2x gettid */ ASSERT_EQ(skel->bss->enter_cnt, 3, "enter_cnt"); @@ -99,7 +99,7 @@ static void test_recursion(void) /* trigger sys_enter, make sure it does not cause deadlock */ skel->bss->test_pid = getpid(); - syscall(SYS_gettid); + sys_gettid(); skel->bss->test_pid = 0; task_ls_recursion__detach(skel); diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c index c1ac813ff9bae..02a484b22aa69 100644 --- a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c +++ b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c @@ -125,7 +125,7 @@ static void *child_thread(void *ctx) struct child *child = ctx; int c = 0, err; - child->tid = syscall(SYS_gettid); + child->tid = sys_gettid(); /* let parent know we are ready */ err = write(child->c2p[1], &c, 1); diff --git a/tools/testing/selftests/bpf/progs/changes_pkt_data.c b/tools/testing/selftests/bpf/progs/changes_pkt_data.c new file mode 100644 index 0000000000000..43cada48b28ad --- /dev/null +++ b/tools/testing/selftests/bpf/progs/changes_pkt_data.c @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +__noinline +long changes_pkt_data(struct __sk_buff *sk) +{ + return bpf_skb_pull_data(sk, 0); +} + +__noinline __weak +long does_not_change_pkt_data(struct __sk_buff *sk) +{ + return 0; +} + +SEC("?tc") +int main_with_subprogs(struct __sk_buff *sk) +{ + changes_pkt_data(sk); + does_not_change_pkt_data(sk); + return 0; +} + +SEC("?tc") +int main_changes(struct __sk_buff *sk) +{ + bpf_skb_pull_data(sk, 0); + return 0; +} + +SEC("?tc") +int main_does_not_change(struct __sk_buff *sk) +{ + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/changes_pkt_data_freplace.c b/tools/testing/selftests/bpf/progs/changes_pkt_data_freplace.c new file mode 100644 index 0000000000000..f9a622705f1b3 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/changes_pkt_data_freplace.c @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +SEC("?freplace") +long changes_pkt_data(struct __sk_buff *sk) +{ + return bpf_skb_pull_data(sk, 0); +} + +SEC("?freplace") +long does_not_change_pkt_data(struct __sk_buff *sk) +{ + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/raw_tp_null.c b/tools/testing/selftests/bpf/progs/raw_tp_null.c new file mode 100644 index 0000000000000..5927054b6dd96 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/raw_tp_null.c @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include +#include +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + +int tid; +int i; + +SEC("tp_btf/bpf_testmod_test_raw_tp_null") +int BPF_PROG(test_raw_tp_null, struct sk_buff *skb) +{ + struct task_struct *task = bpf_get_current_task_btf(); + + if (task->pid != tid) + return 0; + + /* If dead code elimination kicks in, the increment +=2 will be + * removed. For raw_tp programs attaching to tracepoints in kernel + * modules, we mark input arguments as PTR_MAYBE_NULL, so branch + * prediction should never kick in. + */ + asm volatile ("%[i] += 1; if %[ctx] != 0 goto +1; %[i] += 2;" + : [i]"+r"(i) + : [ctx]"r"(skb) + : "memory"); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/strncmp_bench.c b/tools/testing/selftests/bpf/progs/strncmp_bench.c index 18373a7df76e6..f47bf88f8d2a7 100644 --- a/tools/testing/selftests/bpf/progs/strncmp_bench.c +++ b/tools/testing/selftests/bpf/progs/strncmp_bench.c @@ -35,7 +35,10 @@ static __always_inline int local_strncmp(const char *s1, unsigned int sz, SEC("tp/syscalls/sys_enter_getpgid") int strncmp_no_helper(void *ctx) { - if (local_strncmp(str, cmp_str_len + 1, target) < 0) + const char *target_str = target; + + barrier_var(target_str); + if (local_strncmp(str, cmp_str_len + 1, target_str) < 0) __sync_add_and_fetch(&hits, 1); return 0; } diff --git a/tools/testing/selftests/bpf/progs/verifier_sock.c b/tools/testing/selftests/bpf/progs/verifier_sock.c index ee76b51005abe..3c8f6646e33da 100644 --- a/tools/testing/selftests/bpf/progs/verifier_sock.c +++ b/tools/testing/selftests/bpf/progs/verifier_sock.c @@ -50,6 +50,13 @@ struct { __uint(map_flags, BPF_F_NO_PREALLOC); } sk_storage_map SEC(".maps"); +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 1); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(__u32)); +} jmp_table SEC(".maps"); + SEC("cgroup/skb") __description("skb->sk: no NULL check") __failure __msg("invalid mem access 'sock_common_or_null'") @@ -977,4 +984,53 @@ l1_%=: r0 = *(u8*)(r7 + 0); \ : __clobber_all); } +__noinline +long skb_pull_data2(struct __sk_buff *sk, __u32 len) +{ + return bpf_skb_pull_data(sk, len); +} + +__noinline +long skb_pull_data1(struct __sk_buff *sk, __u32 len) +{ + return skb_pull_data2(sk, len); +} + +/* global function calls bpf_skb_pull_data(), which invalidates packet + * pointers established before global function call. + */ +SEC("tc") +__failure __msg("invalid mem access") +int invalidate_pkt_pointers_from_global_func(struct __sk_buff *sk) +{ + int *p = (void *)(long)sk->data; + + if ((void *)(p + 1) > (void *)(long)sk->data_end) + return TCX_DROP; + skb_pull_data1(sk, 0); + *p = 42; /* this is unsafe */ + return TCX_PASS; +} + +__noinline +int tail_call(struct __sk_buff *sk) +{ + bpf_tail_call_static(sk, &jmp_table, 0); + return 0; +} + +/* Tail calls invalidate packet pointers. */ +SEC("tc") +__failure __msg("invalid mem access") +int invalidate_pkt_pointers_by_tail_call(struct __sk_buff *sk) +{ + int *p = (void *)(long)sk->data; + + if ((void *)(p + 1) > (void *)(long)sk->data_end) + return TCX_DROP; + tail_call(sk); + *p = 42; /* this is unsafe */ + return TCX_PASS; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/cgroup/test_cpuset_v1_hp.sh b/tools/testing/selftests/cgroup/test_cpuset_v1_hp.sh index 3f45512fb512e..7406c24be1ac9 100755 --- a/tools/testing/selftests/cgroup/test_cpuset_v1_hp.sh +++ b/tools/testing/selftests/cgroup/test_cpuset_v1_hp.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash # SPDX-License-Identifier: GPL-2.0 # # Test the special cpuset v1 hotplug case where a cpuset become empty of diff --git a/tools/testing/selftests/damon/damon_nr_regions.py b/tools/testing/selftests/damon/damon_nr_regions.py index 2e8a74aff5431..58f3291fed12a 100755 --- a/tools/testing/selftests/damon/damon_nr_regions.py +++ b/tools/testing/selftests/damon/damon_nr_regions.py @@ -65,6 +65,7 @@ def test_nr_regions(real_nr_regions, min_nr_regions, max_nr_regions): test_name = 'nr_regions test with %d/%d/%d real/min/max nr_regions' % ( real_nr_regions, min_nr_regions, max_nr_regions) + collected_nr_regions.sort() if (collected_nr_regions[0] < min_nr_regions or collected_nr_regions[-1] > max_nr_regions): print('fail %s' % test_name) @@ -109,6 +110,7 @@ def main(): attrs = kdamonds.kdamonds[0].contexts[0].monitoring_attrs attrs.min_nr_regions = 3 attrs.max_nr_regions = 7 + attrs.update_us = 100000 err = kdamonds.kdamonds[0].commit() if err is not None: proc.terminate() diff --git a/tools/testing/selftests/damon/damos_quota.py b/tools/testing/selftests/damon/damos_quota.py index 7d4c6bb2e3cd2..57c4937aaed28 100755 --- a/tools/testing/selftests/damon/damos_quota.py +++ b/tools/testing/selftests/damon/damos_quota.py @@ -51,16 +51,19 @@ def main(): nr_quota_exceeds = scheme.stats.qt_exceeds wss_collected.sort() + nr_expected_quota_exceeds = 0 for wss in wss_collected: if wss > sz_quota: print('quota is not kept: %s > %s' % (wss, sz_quota)) print('collected samples are as below') print('\n'.join(['%d' % wss for wss in wss_collected])) exit(1) + if wss == sz_quota: + nr_expected_quota_exceeds += 1 - if nr_quota_exceeds < len(wss_collected): - print('quota is not always exceeded: %d > %d' % - (len(wss_collected), nr_quota_exceeds)) + if nr_quota_exceeds < nr_expected_quota_exceeds: + print('quota is exceeded less than expected: %d < %d' % + (nr_quota_exceeds, nr_expected_quota_exceeds)) exit(1) if __name__ == '__main__': diff --git a/tools/testing/selftests/damon/damos_quota_goal.py b/tools/testing/selftests/damon/damos_quota_goal.py index 18246f3b62f7e..f76e0412b564c 100755 --- a/tools/testing/selftests/damon/damos_quota_goal.py +++ b/tools/testing/selftests/damon/damos_quota_goal.py @@ -63,6 +63,9 @@ def main(): if last_effective_bytes != 0 else -1.0)) if last_effective_bytes == goal.effective_bytes: + # effective quota was already minimum that cannot be more reduced + if expect_increase is False and last_effective_bytes == 1: + continue print('efective bytes not changed: %d' % goal.effective_bytes) exit(1) diff --git a/tools/testing/selftests/drivers/net/bonding/bond_options.sh b/tools/testing/selftests/drivers/net/bonding/bond_options.sh index edc56e2cc6069..7bc148889ca72 100755 --- a/tools/testing/selftests/drivers/net/bonding/bond_options.sh +++ b/tools/testing/selftests/drivers/net/bonding/bond_options.sh @@ -11,8 +11,8 @@ ALL_TESTS=" lib_dir=$(dirname "$0") source ${lib_dir}/bond_topo_3d1c.sh -c_maddr="33:33:00:00:00:10" -g_maddr="33:33:00:00:02:54" +c_maddr="33:33:ff:00:00:10" +g_maddr="33:33:ff:00:02:54" skip_prio() { diff --git a/tools/testing/selftests/drivers/net/queues.py b/tools/testing/selftests/drivers/net/queues.py index 30f29096e27c2..4868b514ae78d 100755 --- a/tools/testing/selftests/drivers/net/queues.py +++ b/tools/testing/selftests/drivers/net/queues.py @@ -40,10 +40,9 @@ def addremove_queues(cfg, nl) -> None: netnl = EthtoolFamily() channels = netnl.channels_get({'header': {'dev-index': cfg.ifindex}}) - if channels['combined-count'] == 0: - rx_type = 'rx' - else: - rx_type = 'combined' + rx_type = 'rx' + if channels.get('combined-count', 0) > 0: + rx_type = 'combined' expected = curr_queues - 1 cmd(f"ethtool -L {cfg.dev['ifname']} {rx_type} {expected}", timeout=10) diff --git a/tools/testing/selftests/filesystems/statmount/statmount_test.c b/tools/testing/selftests/filesystems/statmount/statmount_test.c index c773334bbcc95..550e5d762c23f 100644 --- a/tools/testing/selftests/filesystems/statmount/statmount_test.c +++ b/tools/testing/selftests/filesystems/statmount/statmount_test.c @@ -383,6 +383,10 @@ static void test_statmount_mnt_point(void) return; } + if (!(sm->mask & STATMOUNT_MNT_POINT)) { + ksft_test_result_fail("missing STATMOUNT_MNT_POINT in mask\n"); + return; + } if (strcmp(sm->str + sm->mnt_point, "/") != 0) { ksft_test_result_fail("unexpected mount point: '%s' != '/'\n", sm->str + sm->mnt_point); @@ -408,6 +412,10 @@ static void test_statmount_mnt_root(void) strerror(errno)); return; } + if (!(sm->mask & STATMOUNT_MNT_ROOT)) { + ksft_test_result_fail("missing STATMOUNT_MNT_ROOT in mask\n"); + return; + } mnt_root = sm->str + sm->mnt_root; last_root = strrchr(mnt_root, '/'); if (last_root) @@ -437,6 +445,10 @@ static void test_statmount_fs_type(void) strerror(errno)); return; } + if (!(sm->mask & STATMOUNT_FS_TYPE)) { + ksft_test_result_fail("missing STATMOUNT_FS_TYPE in mask\n"); + return; + } fs_type = sm->str + sm->fs_type; for (s = known_fs; s != NULL; s++) { if (strcmp(fs_type, *s) == 0) @@ -464,6 +476,11 @@ static void test_statmount_mnt_opts(void) return; } + if (!(sm->mask & STATMOUNT_MNT_BASIC)) { + ksft_test_result_fail("missing STATMOUNT_MNT_BASIC in mask\n"); + return; + } + while (getline(&line, &len, f_mountinfo) != -1) { int i; char *p, *p2; @@ -514,7 +531,10 @@ static void test_statmount_mnt_opts(void) if (p2) *p2 = '\0'; - statmount_opts = sm->str + sm->mnt_opts; + if (sm->mask & STATMOUNT_MNT_OPTS) + statmount_opts = sm->str + sm->mnt_opts; + else + statmount_opts = ""; if (strcmp(statmount_opts, p) != 0) ksft_test_result_fail( "unexpected mount options: '%s' != '%s'\n", diff --git a/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c b/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c index 7d7a6a06cdb75..2d8230da90642 100644 --- a/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c +++ b/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c @@ -98,7 +98,7 @@ int main(int argc, char *argv[]) info("Calling futex_waitv on f1: %u @ %p with val=%u\n", f1, &f1, f1+1); res = futex_waitv(&waitv, 1, 0, &to, CLOCK_MONOTONIC); if (!res || errno != EWOULDBLOCK) { - ksft_test_result_pass("futex_waitv returned: %d %s\n", + ksft_test_result_fail("futex_waitv returned: %d %s\n", res ? errno : res, res ? strerror(errno) : ""); ret = RET_FAIL; diff --git a/tools/testing/selftests/landlock/base_test.c b/tools/testing/selftests/landlock/base_test.c index 1bc16fde2e8ae..4766f8fec9f60 100644 --- a/tools/testing/selftests/landlock/base_test.c +++ b/tools/testing/selftests/landlock/base_test.c @@ -98,10 +98,54 @@ TEST(abi_version) ASSERT_EQ(EINVAL, errno); } +/* + * Old source trees might not have the set of Kselftest fixes related to kernel + * UAPI headers. + */ +#ifndef LANDLOCK_CREATE_RULESET_ERRATA +#define LANDLOCK_CREATE_RULESET_ERRATA (1U << 1) +#endif + +TEST(errata) +{ + const struct landlock_ruleset_attr ruleset_attr = { + .handled_access_fs = LANDLOCK_ACCESS_FS_READ_FILE, + }; + int errata; + + errata = landlock_create_ruleset(NULL, 0, + LANDLOCK_CREATE_RULESET_ERRATA); + /* The errata bitmask will not be backported to tests. */ + ASSERT_LE(0, errata); + TH_LOG("errata: 0x%x", errata); + + ASSERT_EQ(-1, landlock_create_ruleset(&ruleset_attr, 0, + LANDLOCK_CREATE_RULESET_ERRATA)); + ASSERT_EQ(EINVAL, errno); + + ASSERT_EQ(-1, landlock_create_ruleset(NULL, sizeof(ruleset_attr), + LANDLOCK_CREATE_RULESET_ERRATA)); + ASSERT_EQ(EINVAL, errno); + + ASSERT_EQ(-1, + landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), + LANDLOCK_CREATE_RULESET_ERRATA)); + ASSERT_EQ(EINVAL, errno); + + ASSERT_EQ(-1, landlock_create_ruleset( + NULL, 0, + LANDLOCK_CREATE_RULESET_VERSION | + LANDLOCK_CREATE_RULESET_ERRATA)); + ASSERT_EQ(-1, landlock_create_ruleset(NULL, 0, + LANDLOCK_CREATE_RULESET_ERRATA | + 1 << 31)); + ASSERT_EQ(EINVAL, errno); +} + /* Tests ordering of syscall argument checks. */ TEST(create_ruleset_checks_ordering) { - const int last_flag = LANDLOCK_CREATE_RULESET_VERSION; + const int last_flag = LANDLOCK_CREATE_RULESET_ERRATA; const int invalid_flag = last_flag << 1; int ruleset_fd; const struct landlock_ruleset_attr ruleset_attr = { diff --git a/tools/testing/selftests/landlock/common.h b/tools/testing/selftests/landlock/common.h index 61056fa074bb2..60afc1ce11bcd 100644 --- a/tools/testing/selftests/landlock/common.h +++ b/tools/testing/selftests/landlock/common.h @@ -68,6 +68,7 @@ static void _init_caps(struct __test_metadata *const _metadata, bool drop_all) CAP_MKNOD, CAP_NET_ADMIN, CAP_NET_BIND_SERVICE, + CAP_SETUID, CAP_SYS_ADMIN, CAP_SYS_CHROOT, /* clang-format on */ @@ -234,6 +235,7 @@ enforce_ruleset(struct __test_metadata *const _metadata, const int ruleset_fd) struct protocol_variant { int domain; int type; + int protocol; }; struct service_fixture { diff --git a/tools/testing/selftests/landlock/config b/tools/testing/selftests/landlock/config index 29af19c4e9f98..a8982da4acbdc 100644 --- a/tools/testing/selftests/landlock/config +++ b/tools/testing/selftests/landlock/config @@ -3,6 +3,8 @@ CONFIG_CGROUP_SCHED=y CONFIG_INET=y CONFIG_IPV6=y CONFIG_KEYS=y +CONFIG_MPTCP=y +CONFIG_MPTCP_IPV6=y CONFIG_NET=y CONFIG_NET_NS=y CONFIG_OVERLAY_FS=y diff --git a/tools/testing/selftests/landlock/net_test.c b/tools/testing/selftests/landlock/net_test.c index 4e0aeb53b225a..376079d70d3fc 100644 --- a/tools/testing/selftests/landlock/net_test.c +++ b/tools/testing/selftests/landlock/net_test.c @@ -85,18 +85,18 @@ static void setup_loopback(struct __test_metadata *const _metadata) clear_ambient_cap(_metadata, CAP_NET_ADMIN); } +static bool prot_is_tcp(const struct protocol_variant *const prot) +{ + return (prot->domain == AF_INET || prot->domain == AF_INET6) && + prot->type == SOCK_STREAM && + (prot->protocol == IPPROTO_TCP || prot->protocol == IPPROTO_IP); +} + static bool is_restricted(const struct protocol_variant *const prot, const enum sandbox_type sandbox) { - switch (prot->domain) { - case AF_INET: - case AF_INET6: - switch (prot->type) { - case SOCK_STREAM: - return sandbox == TCP_SANDBOX; - } - break; - } + if (sandbox == TCP_SANDBOX) + return prot_is_tcp(prot); return false; } @@ -105,7 +105,7 @@ static int socket_variant(const struct service_fixture *const srv) int ret; ret = socket(srv->protocol.domain, srv->protocol.type | SOCK_CLOEXEC, - 0); + srv->protocol.protocol); if (ret < 0) return -errno; return ret; @@ -290,22 +290,59 @@ FIXTURE_TEARDOWN(protocol) } /* clang-format off */ -FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv4_tcp) { +FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv4_tcp1) { /* clang-format on */ .sandbox = NO_SANDBOX, .prot = { .domain = AF_INET, .type = SOCK_STREAM, + /* IPPROTO_IP == 0 */ + .protocol = IPPROTO_IP, }, }; /* clang-format off */ -FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv6_tcp) { +FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv4_tcp2) { + /* clang-format on */ + .sandbox = NO_SANDBOX, + .prot = { + .domain = AF_INET, + .type = SOCK_STREAM, + .protocol = IPPROTO_TCP, + }, +}; + +/* clang-format off */ +FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv6_tcp1) { /* clang-format on */ .sandbox = NO_SANDBOX, .prot = { .domain = AF_INET6, .type = SOCK_STREAM, + /* IPPROTO_IP == 0 */ + .protocol = IPPROTO_IP, + }, +}; + +/* clang-format off */ +FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv6_tcp2) { + /* clang-format on */ + .sandbox = NO_SANDBOX, + .prot = { + .domain = AF_INET6, + .type = SOCK_STREAM, + .protocol = IPPROTO_TCP, + }, +}; + +/* clang-format off */ +FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv4_mptcp) { + /* clang-format on */ + .sandbox = NO_SANDBOX, + .prot = { + .domain = AF_INET, + .type = SOCK_STREAM, + .protocol = IPPROTO_MPTCP, }, }; @@ -329,6 +366,17 @@ FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv6_udp) { }, }; +/* clang-format off */ +FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv6_mptcp) { + /* clang-format on */ + .sandbox = NO_SANDBOX, + .prot = { + .domain = AF_INET6, + .type = SOCK_STREAM, + .protocol = IPPROTO_MPTCP, + }, +}; + /* clang-format off */ FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_unix_stream) { /* clang-format on */ @@ -350,22 +398,48 @@ FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_unix_datagram) { }; /* clang-format off */ -FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv4_tcp) { +FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv4_tcp1) { + /* clang-format on */ + .sandbox = TCP_SANDBOX, + .prot = { + .domain = AF_INET, + .type = SOCK_STREAM, + /* IPPROTO_IP == 0 */ + .protocol = IPPROTO_IP, + }, +}; + +/* clang-format off */ +FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv4_tcp2) { /* clang-format on */ .sandbox = TCP_SANDBOX, .prot = { .domain = AF_INET, .type = SOCK_STREAM, + .protocol = IPPROTO_TCP, + }, +}; + +/* clang-format off */ +FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv6_tcp1) { + /* clang-format on */ + .sandbox = TCP_SANDBOX, + .prot = { + .domain = AF_INET6, + .type = SOCK_STREAM, + /* IPPROTO_IP == 0 */ + .protocol = IPPROTO_IP, }, }; /* clang-format off */ -FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv6_tcp) { +FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv6_tcp2) { /* clang-format on */ .sandbox = TCP_SANDBOX, .prot = { .domain = AF_INET6, .type = SOCK_STREAM, + .protocol = IPPROTO_TCP, }, }; @@ -389,6 +463,17 @@ FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv6_udp) { }, }; +/* clang-format off */ +FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv4_mptcp) { + /* clang-format on */ + .sandbox = TCP_SANDBOX, + .prot = { + .domain = AF_INET, + .type = SOCK_STREAM, + .protocol = IPPROTO_MPTCP, + }, +}; + /* clang-format off */ FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_unix_stream) { /* clang-format on */ @@ -399,6 +484,17 @@ FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_unix_stream) { }, }; +/* clang-format off */ +FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv6_mptcp) { + /* clang-format on */ + .sandbox = TCP_SANDBOX, + .prot = { + .domain = AF_INET6, + .type = SOCK_STREAM, + .protocol = IPPROTO_MPTCP, + }, +}; + /* clang-format off */ FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_unix_datagram) { /* clang-format on */ diff --git a/tools/testing/selftests/landlock/scoped_signal_test.c b/tools/testing/selftests/landlock/scoped_signal_test.c index 475ee62a832d6..d8bf33417619f 100644 --- a/tools/testing/selftests/landlock/scoped_signal_test.c +++ b/tools/testing/selftests/landlock/scoped_signal_test.c @@ -249,47 +249,67 @@ TEST_F(scoped_domains, check_access_signal) _metadata->exit_code = KSFT_FAIL; } -static int thread_pipe[2]; - enum thread_return { THREAD_INVALID = 0, THREAD_SUCCESS = 1, THREAD_ERROR = 2, + THREAD_TEST_FAILED = 3, }; -void *thread_func(void *arg) +static void *thread_sync(void *arg) { + const int pipe_read = *(int *)arg; char buf; - if (read(thread_pipe[0], &buf, 1) != 1) + if (read(pipe_read, &buf, 1) != 1) return (void *)THREAD_ERROR; return (void *)THREAD_SUCCESS; } -TEST(signal_scoping_threads) +TEST(signal_scoping_thread_before) { - pthread_t no_sandbox_thread, scoped_thread; + pthread_t no_sandbox_thread; enum thread_return ret = THREAD_INVALID; + int thread_pipe[2]; drop_caps(_metadata); ASSERT_EQ(0, pipe2(thread_pipe, O_CLOEXEC)); - ASSERT_EQ(0, - pthread_create(&no_sandbox_thread, NULL, thread_func, NULL)); + ASSERT_EQ(0, pthread_create(&no_sandbox_thread, NULL, thread_sync, + &thread_pipe[0])); - /* Restricts the domain after creating the first thread. */ + /* Enforces restriction after creating the thread. */ create_scoped_domain(_metadata, LANDLOCK_SCOPE_SIGNAL); - ASSERT_EQ(EPERM, pthread_kill(no_sandbox_thread, 0)); - ASSERT_EQ(1, write(thread_pipe[1], ".", 1)); - - ASSERT_EQ(0, pthread_create(&scoped_thread, NULL, thread_func, NULL)); - ASSERT_EQ(0, pthread_kill(scoped_thread, 0)); - ASSERT_EQ(1, write(thread_pipe[1], ".", 1)); + EXPECT_EQ(0, pthread_kill(no_sandbox_thread, 0)); + EXPECT_EQ(1, write(thread_pipe[1], ".", 1)); EXPECT_EQ(0, pthread_join(no_sandbox_thread, (void **)&ret)); EXPECT_EQ(THREAD_SUCCESS, ret); + + EXPECT_EQ(0, close(thread_pipe[0])); + EXPECT_EQ(0, close(thread_pipe[1])); +} + +TEST(signal_scoping_thread_after) +{ + pthread_t scoped_thread; + enum thread_return ret = THREAD_INVALID; + int thread_pipe[2]; + + drop_caps(_metadata); + ASSERT_EQ(0, pipe2(thread_pipe, O_CLOEXEC)); + + /* Enforces restriction before creating the thread. */ + create_scoped_domain(_metadata, LANDLOCK_SCOPE_SIGNAL); + + ASSERT_EQ(0, pthread_create(&scoped_thread, NULL, thread_sync, + &thread_pipe[0])); + + EXPECT_EQ(0, pthread_kill(scoped_thread, 0)); + EXPECT_EQ(1, write(thread_pipe[1], ".", 1)); + EXPECT_EQ(0, pthread_join(scoped_thread, (void **)&ret)); EXPECT_EQ(THREAD_SUCCESS, ret); @@ -297,6 +317,64 @@ TEST(signal_scoping_threads) EXPECT_EQ(0, close(thread_pipe[1])); } +struct thread_setuid_args { + int pipe_read, new_uid; +}; + +void *thread_setuid(void *ptr) +{ + const struct thread_setuid_args *arg = ptr; + char buf; + + if (read(arg->pipe_read, &buf, 1) != 1) + return (void *)THREAD_ERROR; + + /* libc's setuid() should update all thread's credentials. */ + if (getuid() != arg->new_uid) + return (void *)THREAD_TEST_FAILED; + + return (void *)THREAD_SUCCESS; +} + +TEST(signal_scoping_thread_setuid) +{ + struct thread_setuid_args arg; + pthread_t no_sandbox_thread; + enum thread_return ret = THREAD_INVALID; + int pipe_parent[2]; + int prev_uid; + + disable_caps(_metadata); + + /* This test does not need to be run as root. */ + prev_uid = getuid(); + arg.new_uid = prev_uid + 1; + EXPECT_LT(0, arg.new_uid); + + ASSERT_EQ(0, pipe2(pipe_parent, O_CLOEXEC)); + arg.pipe_read = pipe_parent[0]; + + /* Capabilities must be set before creating a new thread. */ + set_cap(_metadata, CAP_SETUID); + ASSERT_EQ(0, pthread_create(&no_sandbox_thread, NULL, thread_setuid, + &arg)); + + /* Enforces restriction after creating the thread. */ + create_scoped_domain(_metadata, LANDLOCK_SCOPE_SIGNAL); + + EXPECT_NE(arg.new_uid, getuid()); + EXPECT_EQ(0, setuid(arg.new_uid)); + EXPECT_EQ(arg.new_uid, getuid()); + EXPECT_EQ(1, write(pipe_parent[1], ".", 1)); + + EXPECT_EQ(0, pthread_join(no_sandbox_thread, (void **)&ret)); + EXPECT_EQ(THREAD_SUCCESS, ret); + + clear_cap(_metadata, CAP_SETUID); + EXPECT_EQ(0, close(pipe_parent[0])); + EXPECT_EQ(0, close(pipe_parent[1])); +} + const short backlog = 10; static volatile sig_atomic_t signal_received; diff --git a/tools/testing/selftests/mm/Makefile b/tools/testing/selftests/mm/Makefile index 02e1204971b0a..c0138cb19705b 100644 --- a/tools/testing/selftests/mm/Makefile +++ b/tools/testing/selftests/mm/Makefile @@ -33,9 +33,16 @@ endif # LDLIBS. MAKEFLAGS += --no-builtin-rules -CFLAGS = -Wall -I $(top_srcdir) $(EXTRA_CFLAGS) $(KHDR_INCLUDES) $(TOOLS_INCLUDES) +CFLAGS = -Wall -O2 -I $(top_srcdir) $(EXTRA_CFLAGS) $(KHDR_INCLUDES) $(TOOLS_INCLUDES) LDLIBS = -lrt -lpthread -lm +# Some distributions (such as Ubuntu) configure GCC so that _FORTIFY_SOURCE is +# automatically enabled at -O1 or above. This triggers various unused-result +# warnings where functions such as read() or write() are called and their +# return value is not checked. Disable _FORTIFY_SOURCE to silence those +# warnings. +CFLAGS += -U_FORTIFY_SOURCE + TEST_GEN_FILES = cow TEST_GEN_FILES += compaction_test TEST_GEN_FILES += gup_longterm diff --git a/tools/testing/selftests/mm/charge_reserved_hugetlb.sh b/tools/testing/selftests/mm/charge_reserved_hugetlb.sh index 67df7b47087f0..e1fe16bcbbe88 100755 --- a/tools/testing/selftests/mm/charge_reserved_hugetlb.sh +++ b/tools/testing/selftests/mm/charge_reserved_hugetlb.sh @@ -29,7 +29,7 @@ fi if [[ $cgroup2 ]]; then cgroup_path=$(mount -t cgroup2 | head -1 | awk '{print $3}') if [[ -z "$cgroup_path" ]]; then - cgroup_path=/dev/cgroup/memory + cgroup_path=$(mktemp -d) mount -t cgroup2 none $cgroup_path do_umount=1 fi @@ -37,7 +37,7 @@ if [[ $cgroup2 ]]; then else cgroup_path=$(mount -t cgroup | grep ",hugetlb" | awk '{print $3}') if [[ -z "$cgroup_path" ]]; then - cgroup_path=/dev/cgroup/memory + cgroup_path=$(mktemp -d) mount -t cgroup memory,hugetlb $cgroup_path do_umount=1 fi diff --git a/tools/testing/selftests/mm/cow.c b/tools/testing/selftests/mm/cow.c index 1238e1c5aae15..d87c5b1763ff1 100644 --- a/tools/testing/selftests/mm/cow.c +++ b/tools/testing/selftests/mm/cow.c @@ -876,7 +876,7 @@ static void do_run_with_thp(test_fn fn, enum thp_run thp_run, size_t thpsize) mremap_size = thpsize / 2; mremap_mem = mmap(NULL, mremap_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); - if (mem == MAP_FAILED) { + if (mremap_mem == MAP_FAILED) { ksft_test_result_fail("mmap() failed\n"); goto munmap; } diff --git a/tools/testing/selftests/mm/hugepage-mremap.c b/tools/testing/selftests/mm/hugepage-mremap.c index ada9156cc497b..c463d1c09c9b4 100644 --- a/tools/testing/selftests/mm/hugepage-mremap.c +++ b/tools/testing/selftests/mm/hugepage-mremap.c @@ -15,7 +15,7 @@ #define _GNU_SOURCE #include #include -#include +#include #include #include #include /* Definition of O_* constants */ diff --git a/tools/testing/selftests/mm/hugetlb_reparenting_test.sh b/tools/testing/selftests/mm/hugetlb_reparenting_test.sh index 11f9bbe7dc222..0b0d4ba1af277 100755 --- a/tools/testing/selftests/mm/hugetlb_reparenting_test.sh +++ b/tools/testing/selftests/mm/hugetlb_reparenting_test.sh @@ -23,7 +23,7 @@ fi if [[ $cgroup2 ]]; then CGROUP_ROOT=$(mount -t cgroup2 | head -1 | awk '{print $3}') if [[ -z "$CGROUP_ROOT" ]]; then - CGROUP_ROOT=/dev/cgroup/memory + CGROUP_ROOT=$(mktemp -d) mount -t cgroup2 none $CGROUP_ROOT do_umount=1 fi diff --git a/tools/testing/selftests/mm/ksm_functional_tests.c b/tools/testing/selftests/mm/ksm_functional_tests.c index 66b4e111b5a27..b61803e36d1cf 100644 --- a/tools/testing/selftests/mm/ksm_functional_tests.c +++ b/tools/testing/selftests/mm/ksm_functional_tests.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include #include #include @@ -369,6 +369,7 @@ static void test_unmerge_discarded(void) munmap(map, size); } +#ifdef __NR_userfaultfd static void test_unmerge_uffd_wp(void) { struct uffdio_writeprotect uffd_writeprotect; @@ -429,6 +430,7 @@ static void test_unmerge_uffd_wp(void) unmap: munmap(map, size); } +#endif /* Verify that KSM can be enabled / queried with prctl. */ static void test_prctl(void) @@ -684,7 +686,9 @@ int main(int argc, char **argv) exit(test_child_ksm()); } +#ifdef __NR_userfaultfd tests++; +#endif ksft_print_header(); ksft_set_plan(tests); @@ -696,7 +700,9 @@ int main(int argc, char **argv) test_unmerge(); test_unmerge_zero_pages(); test_unmerge_discarded(); +#ifdef __NR_userfaultfd test_unmerge_uffd_wp(); +#endif test_prot_none(); diff --git a/tools/testing/selftests/mm/memfd_secret.c b/tools/testing/selftests/mm/memfd_secret.c index 74c911aa3aea9..9a0597310a765 100644 --- a/tools/testing/selftests/mm/memfd_secret.c +++ b/tools/testing/selftests/mm/memfd_secret.c @@ -17,7 +17,7 @@ #include #include -#include +#include #include #include #include @@ -28,6 +28,8 @@ #define pass(fmt, ...) ksft_test_result_pass(fmt, ##__VA_ARGS__) #define skip(fmt, ...) ksft_test_result_skip(fmt, ##__VA_ARGS__) +#ifdef __NR_memfd_secret + #define PATTERN 0x55 static const int prot = PROT_READ | PROT_WRITE; @@ -332,3 +334,13 @@ int main(int argc, char *argv[]) ksft_finished(); } + +#else /* __NR_memfd_secret */ + +int main(int argc, char *argv[]) +{ + printf("skip: skipping memfd_secret test (missing __NR_memfd_secret)\n"); + return KSFT_SKIP; +} + +#endif /* __NR_memfd_secret */ diff --git a/tools/testing/selftests/mm/mkdirty.c b/tools/testing/selftests/mm/mkdirty.c index 1db134063c38c..b8a7efe9204ea 100644 --- a/tools/testing/selftests/mm/mkdirty.c +++ b/tools/testing/selftests/mm/mkdirty.c @@ -9,7 +9,7 @@ */ #include #include -#include +#include #include #include #include @@ -265,6 +265,7 @@ static void test_pte_mapped_thp(void) munmap(mmap_mem, mmap_size); } +#ifdef __NR_userfaultfd static void test_uffdio_copy(void) { struct uffdio_register uffdio_register; @@ -321,6 +322,7 @@ static void test_uffdio_copy(void) munmap(dst, pagesize); free(src); } +#endif /* __NR_userfaultfd */ int main(void) { @@ -333,7 +335,9 @@ int main(void) thpsize / 1024); tests += 3; } +#ifdef __NR_userfaultfd tests += 1; +#endif /* __NR_userfaultfd */ ksft_print_header(); ksft_set_plan(tests); @@ -363,7 +367,9 @@ int main(void) if (thpsize) test_pte_mapped_thp(); /* Placing a fresh page via userfaultfd may set the PTE dirty. */ +#ifdef __NR_userfaultfd test_uffdio_copy(); +#endif /* __NR_userfaultfd */ err = ksft_get_fail_cnt(); if (err) diff --git a/tools/testing/selftests/mm/mlock2.h b/tools/testing/selftests/mm/mlock2.h index 1e5731bab499a..4417eaa5cfb78 100644 --- a/tools/testing/selftests/mm/mlock2.h +++ b/tools/testing/selftests/mm/mlock2.h @@ -3,7 +3,6 @@ #include #include #include -#include static int mlock2_(void *start, size_t len, int flags) { diff --git a/tools/testing/selftests/mm/protection_keys.c b/tools/testing/selftests/mm/protection_keys.c index 4990f7ab4cb72..4fcecfb7b189b 100644 --- a/tools/testing/selftests/mm/protection_keys.c +++ b/tools/testing/selftests/mm/protection_keys.c @@ -42,7 +42,7 @@ #include #include #include -#include +#include #include #include diff --git a/tools/testing/selftests/mm/run_vmtests.sh b/tools/testing/selftests/mm/run_vmtests.sh index c5797ad1d37b6..d86ca1554d6d0 100755 --- a/tools/testing/selftests/mm/run_vmtests.sh +++ b/tools/testing/selftests/mm/run_vmtests.sh @@ -300,7 +300,9 @@ uffd_stress_bin=./uffd-stress CATEGORY="userfaultfd" run_test ${uffd_stress_bin} anon 20 16 # Hugetlb tests require source and destination huge pages. Pass in half # the size of the free pages we have, which is used for *each*. -half_ufd_size_MB=$((freepgs / 2)) +# uffd-stress expects a region expressed in MiB, so we adjust +# half_ufd_size_MB accordingly. +half_ufd_size_MB=$(((freepgs * hpgsize_KB) / 1024 / 2)) CATEGORY="userfaultfd" run_test ${uffd_stress_bin} hugetlb "$half_ufd_size_MB" 32 CATEGORY="userfaultfd" run_test ${uffd_stress_bin} hugetlb-private "$half_ufd_size_MB" 32 CATEGORY="userfaultfd" run_test ${uffd_stress_bin} shmem 20 16 diff --git a/tools/testing/selftests/mm/uffd-common.c b/tools/testing/selftests/mm/uffd-common.c index 717539eddf987..7ad6ba660c7d6 100644 --- a/tools/testing/selftests/mm/uffd-common.c +++ b/tools/testing/selftests/mm/uffd-common.c @@ -673,7 +673,11 @@ int uffd_open_dev(unsigned int flags) int uffd_open_sys(unsigned int flags) { +#ifdef __NR_userfaultfd return syscall(__NR_userfaultfd, flags); +#else + return -1; +#endif } int uffd_open(unsigned int flags) diff --git a/tools/testing/selftests/mm/uffd-stress.c b/tools/testing/selftests/mm/uffd-stress.c index a4b83280998ab..944d559ade21f 100644 --- a/tools/testing/selftests/mm/uffd-stress.c +++ b/tools/testing/selftests/mm/uffd-stress.c @@ -33,10 +33,11 @@ * pthread_mutex_lock will also verify the atomicity of the memory * transfer (UFFDIO_COPY). */ -#include + #include "uffd-common.h" uint64_t features; +#ifdef __NR_userfaultfd #define BOUNCE_RANDOM (1<<0) #define BOUNCE_RACINGFAULTS (1<<1) @@ -471,3 +472,15 @@ int main(int argc, char **argv) nr_pages, nr_pages_per_cpu); return userfaultfd_stress(); } + +#else /* __NR_userfaultfd */ + +#warning "missing __NR_userfaultfd definition" + +int main(void) +{ + printf("skip: Skipping userfaultfd test (missing __NR_userfaultfd)\n"); + return KSFT_SKIP; +} + +#endif /* __NR_userfaultfd */ diff --git a/tools/testing/selftests/mm/uffd-unit-tests.c b/tools/testing/selftests/mm/uffd-unit-tests.c index a2e71b1636e7c..3ddbb0a71b9c1 100644 --- a/tools/testing/selftests/mm/uffd-unit-tests.c +++ b/tools/testing/selftests/mm/uffd-unit-tests.c @@ -5,11 +5,12 @@ * Copyright (C) 2015-2023 Red Hat, Inc. */ -#include #include "uffd-common.h" #include "../../../../mm/gup_test.h" +#ifdef __NR_userfaultfd + /* The unit test doesn't need a large or random size, make it 32MB for now */ #define UFFD_TEST_MEM_SIZE (32UL << 20) @@ -1558,3 +1559,14 @@ int main(int argc, char *argv[]) return ksft_get_fail_cnt() ? KSFT_FAIL : KSFT_PASS; } +#else /* __NR_userfaultfd */ + +#warning "missing __NR_userfaultfd definition" + +int main(void) +{ + printf("Skipping %s (missing __NR_userfaultfd)\n", __file__); + return KSFT_SKIP; +} + +#endif /* __NR_userfaultfd */ diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c index d240d02fa443a..c83a8b47bbdfa 100644 --- a/tools/testing/selftests/net/mptcp/mptcp_connect.c +++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c @@ -1270,7 +1270,7 @@ int main_loop(void) if (cfg_input && cfg_sockopt_types.mptfo) { fd_in = open(cfg_input, O_RDONLY); - if (fd < 0) + if (fd_in < 0) xerror("can't open %s:%d", cfg_input, errno); } @@ -1293,13 +1293,13 @@ int main_loop(void) if (cfg_input && !cfg_sockopt_types.mptfo) { fd_in = open(cfg_input, O_RDONLY); - if (fd < 0) + if (fd_in < 0) xerror("can't open %s:%d", cfg_input, errno); } ret = copyfd_io(fd_in, fd, 1, 0, &winfo); if (ret) - return ret; + goto out; if (cfg_truncate > 0) { shutdown(fd, SHUT_WR); @@ -1320,7 +1320,10 @@ int main_loop(void) close(fd); } - return 0; +out: + if (cfg_input) + close(fd_in); + return ret; } int parse_proto(const char *proto) diff --git a/tools/testing/selftests/net/netfilter/br_netfilter.sh b/tools/testing/selftests/net/netfilter/br_netfilter.sh index c28379a965d83..1559ba275105e 100755 --- a/tools/testing/selftests/net/netfilter/br_netfilter.sh +++ b/tools/testing/selftests/net/netfilter/br_netfilter.sh @@ -13,6 +13,12 @@ source lib.sh checktool "nft --version" "run test without nft tool" +read t < /proc/sys/kernel/tainted +if [ "$t" -ne 0 ];then + echo SKIP: kernel is tainted + exit $ksft_skip +fi + cleanup() { cleanup_all_ns } @@ -165,6 +171,7 @@ if [ "$t" -eq 0 ];then echo PASS: kernel not tainted else echo ERROR: kernel is tainted + dmesg ret=1 fi diff --git a/tools/testing/selftests/net/netfilter/br_netfilter_queue.sh b/tools/testing/selftests/net/netfilter/br_netfilter_queue.sh index 6a764d70ab06f..4788641717d93 100755 --- a/tools/testing/selftests/net/netfilter/br_netfilter_queue.sh +++ b/tools/testing/selftests/net/netfilter/br_netfilter_queue.sh @@ -4,6 +4,12 @@ source lib.sh checktool "nft --version" "run test without nft tool" +read t < /proc/sys/kernel/tainted +if [ "$t" -ne 0 ];then + echo SKIP: kernel is tainted + exit $ksft_skip +fi + cleanup() { cleanup_all_ns } @@ -72,6 +78,7 @@ if [ "$t" -eq 0 ];then echo PASS: kernel not tainted else echo ERROR: kernel is tainted + dmesg exit 1 fi diff --git a/tools/testing/selftests/net/netfilter/nft_queue.sh b/tools/testing/selftests/net/netfilter/nft_queue.sh index a9d109fcc15c2..00fe1a6c1f30c 100755 --- a/tools/testing/selftests/net/netfilter/nft_queue.sh +++ b/tools/testing/selftests/net/netfilter/nft_queue.sh @@ -593,6 +593,7 @@ EOF echo "PASS: queue program exiting while packets queued" else echo "TAINT: queue program exiting while packets queued" + dmesg ret=1 fi } diff --git a/tools/testing/selftests/rseq/rseq-riscv-bits.h b/tools/testing/selftests/rseq/rseq-riscv-bits.h index de31a0143139b..f02f411d550d1 100644 --- a/tools/testing/selftests/rseq/rseq-riscv-bits.h +++ b/tools/testing/selftests/rseq/rseq-riscv-bits.h @@ -243,7 +243,7 @@ int RSEQ_TEMPLATE_IDENTIFIER(rseq_offset_deref_addv)(intptr_t *ptr, off_t off, i #ifdef RSEQ_COMPARE_TWICE RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, "%l[error1]") #endif - RSEQ_ASM_OP_R_DEREF_ADDV(ptr, off, 3) + RSEQ_ASM_OP_R_DEREF_ADDV(ptr, off, inc, 3) RSEQ_INJECT_ASM(4) RSEQ_ASM_DEFINE_ABORT(4, abort) : /* gcc asm goto does not allow outputs */ @@ -251,8 +251,8 @@ int RSEQ_TEMPLATE_IDENTIFIER(rseq_offset_deref_addv)(intptr_t *ptr, off_t off, i [current_cpu_id] "m" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD), [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), [ptr] "r" (ptr), - [off] "er" (off), - [inc] "er" (inc) + [off] "r" (off), + [inc] "r" (inc) RSEQ_INJECT_INPUT : "memory", RSEQ_ASM_TMP_REG_1 RSEQ_INJECT_CLOBBER diff --git a/tools/testing/selftests/rseq/rseq-riscv.h b/tools/testing/selftests/rseq/rseq-riscv.h index 37e598d0a365e..67d544aaa9a3b 100644 --- a/tools/testing/selftests/rseq/rseq-riscv.h +++ b/tools/testing/selftests/rseq/rseq-riscv.h @@ -158,7 +158,7 @@ do { \ "bnez " RSEQ_ASM_TMP_REG_1 ", 222b\n" \ "333:\n" -#define RSEQ_ASM_OP_R_DEREF_ADDV(ptr, off, post_commit_label) \ +#define RSEQ_ASM_OP_R_DEREF_ADDV(ptr, off, inc, post_commit_label) \ "mv " RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(ptr) "]\n" \ RSEQ_ASM_OP_R_ADD(off) \ REG_L RSEQ_ASM_TMP_REG_1 ", 0(" RSEQ_ASM_TMP_REG_1 ")\n" \ diff --git a/tools/testing/selftests/sched_ext/dsp_local_on.bpf.c b/tools/testing/selftests/sched_ext/dsp_local_on.bpf.c index c9a2da0575a0f..6dcf7e6104afb 100644 --- a/tools/testing/selftests/sched_ext/dsp_local_on.bpf.c +++ b/tools/testing/selftests/sched_ext/dsp_local_on.bpf.c @@ -43,7 +43,7 @@ void BPF_STRUCT_OPS(dsp_local_on_dispatch, s32 cpu, struct task_struct *prev) if (!p) return; - if (p->nr_cpus_allowed == nr_cpus) + if (p->nr_cpus_allowed == nr_cpus && !is_migration_disabled(p)) target = bpf_get_prandom_u32() % nr_cpus; else target = scx_bpf_task_cpu(p); diff --git a/tools/testing/shared/linux.c b/tools/testing/shared/linux.c index 17263696b5d88..61b3f571f7a70 100644 --- a/tools/testing/shared/linux.c +++ b/tools/testing/shared/linux.c @@ -147,7 +147,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp) void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list) { if (kmalloc_verbose) - pr_debug("Bulk free %p[0-%lu]\n", list, size - 1); + pr_debug("Bulk free %p[0-%zu]\n", list, size - 1); pthread_mutex_lock(&cachep->lock); for (int i = 0; i < size; i++) @@ -165,7 +165,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size, size_t i; if (kmalloc_verbose) - pr_debug("Bulk alloc %lu\n", size); + pr_debug("Bulk alloc %zu\n", size); pthread_mutex_lock(&cachep->lock); if (cachep->nr_objs >= size) { diff --git a/usr/include/Makefile b/usr/include/Makefile index 771e32872b2ab..58173cfe5ff17 100644 --- a/usr/include/Makefile +++ b/usr/include/Makefile @@ -10,7 +10,7 @@ UAPI_CFLAGS := -std=c90 -Wall -Werror=implicit-function-declaration # In theory, we do not care -m32 or -m64 for header compile tests. # It is here just because CONFIG_CC_CAN_LINK is tested with -m32 or -m64. -UAPI_CFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CFLAGS)) +UAPI_CFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS)) # USERCFLAGS might contain sysroot location for CC. UAPI_CFLAGS += $(USERCFLAGS) diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig index fd6a3010afa83..1f51a4d906b87 100644 --- a/virt/kvm/Kconfig +++ b/virt/kvm/Kconfig @@ -75,7 +75,7 @@ config KVM_COMPAT depends on KVM && COMPAT && !(S390 || ARM64 || RISCV) config HAVE_KVM_IRQ_BYPASS - bool + tristate select IRQ_BYPASS_MANAGER config HAVE_KVM_VCPU_ASYNC_IOCTL diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index 6b390b622b728..929c7980fda6a 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c @@ -149,7 +149,7 @@ irqfd_shutdown(struct work_struct *work) /* * It is now safe to release the object's resources */ -#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS +#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS) irq_bypass_unregister_consumer(&irqfd->consumer); #endif eventfd_ctx_put(irqfd->eventfd); @@ -274,7 +274,7 @@ static void irqfd_update(struct kvm *kvm, struct kvm_kernel_irqfd *irqfd) write_seqcount_end(&irqfd->irq_entry_sc); } -#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS +#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS) void __attribute__((weak)) kvm_arch_irq_bypass_stop( struct irq_bypass_consumer *cons) { @@ -425,7 +425,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args) if (events & EPOLLIN) schedule_work(&irqfd->inject); -#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS +#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS) if (kvm_arch_has_irq_bypass()) { irqfd->consumer.token = (void *)irqfd->eventfd; irqfd->consumer.add_producer = kvm_arch_irq_bypass_add_producer; @@ -618,14 +618,14 @@ void kvm_irq_routing_update(struct kvm *kvm) spin_lock_irq(&kvm->irqfds.lock); list_for_each_entry(irqfd, &kvm->irqfds.items, list) { -#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS +#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS) /* Under irqfds.lock, so can read irq_entry safely */ struct kvm_kernel_irq_routing_entry old = irqfd->irq_entry; #endif irqfd_update(kvm, irqfd); -#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS +#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS) if (irqfd->producer && kvm_arch_irqfd_route_changed(&old, &irqfd->irq_entry)) { int ret = kvm_arch_update_irqfd_routing(