diff options
719 files changed, 22447 insertions, 9246 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-event_source-devices-dsa b/Documentation/ABI/testing/sysfs-bus-event_source-devices-dsa new file mode 100644 index 000000000000..3c7d132281b0 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-event_source-devices-dsa @@ -0,0 +1,30 @@ +What: /sys/bus/event_source/devices/dsa*/format +Date: April 2021 +KernelVersion: 5.13 +Contact: Tom Zanussi <[email protected]> +Description: Read-only. Attribute group to describe the magic bits + that go into perf_event_attr.config or + perf_event_attr.config1 for the IDXD DSA pmu. (See also + ABI/testing/sysfs-bus-event_source-devices-format). + + Each attribute in this group defines a bit range in + perf_event_attr.config or perf_event_attr.config1. + All supported attributes are listed below (See the + IDXD DSA Spec for possible attribute values):: + + event_category = "config:0-3" - event category + event = "config:4-31" - event ID + + filter_wq = "config1:0-31" - workqueue filter + filter_tc = "config1:32-39" - traffic class filter + filter_pgsz = "config1:40-43" - page size filter + filter_sz = "config1:44-51" - transfer size filter + filter_eng = "config1:52-59" - engine filter + +What: /sys/bus/event_source/devices/dsa*/cpumask +Date: April 2021 +KernelVersion: 5.13 +Contact: Tom Zanussi <[email protected]> +Description: Read-only. This file always returns the cpu to which the + IDXD DSA pmu is bound for access to all dsa pmu + performance monitoring events. diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index 0eee30b27ab6..fe13baa53c59 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu @@ -285,7 +285,7 @@ Description: Disable L3 cache indices All AMD processors with L3 caches provide this functionality. For details, see BKDGs at - http://developer.amd.com/documentation/guides/Pages/default.aspx + https://www.amd.com/en/support/tech-docs?keyword=bios+kernel What: /sys/devices/system/cpu/cpufreq/boost diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs index cbeac1bebe2f..4849b8e84e42 100644 --- a/Documentation/ABI/testing/sysfs-fs-f2fs +++ b/Documentation/ABI/testing/sysfs-fs-f2fs @@ -276,7 +276,7 @@ Date April 2019 Contact: "Daniel Rosenberg" <[email protected]> Description: If checkpoint=disable, it displays the number of blocks that are unusable. - If checkpoint=enable it displays the enumber of blocks that + If checkpoint=enable it displays the number of blocks that would be unusable if checkpoint=disable were to be set. What: /sys/fs/f2fs/<disk>/encoding @@ -409,3 +409,32 @@ Description: Give a way to change checkpoint merge daemon's io priority. I/O priority "3". We can select the class between "rt" and "be", and set the I/O priority within valid range of it. "," delimiter is necessary in between I/O class and priority number. + +What: /sys/fs/f2fs/<disk>/ovp_segments +Date: March 2021 +Contact: "Jaegeuk Kim" <[email protected]> +Description: Shows the number of overprovision segments. + +What: /sys/fs/f2fs/<disk>/compr_written_block +Date: March 2021 +Contact: "Daeho Jeong" <[email protected]> +Description: Show the block count written after compression since mount. Note + that when the compressed blocks are deleted, this count doesn't + decrease. If you write "0" here, you can initialize + compr_written_block and compr_saved_block to "0". + +What: /sys/fs/f2fs/<disk>/compr_saved_block +Date: March 2021 +Contact: "Daeho Jeong" <[email protected]> +Description: Show the saved block count with compression since mount. Note + that when the compressed blocks are deleted, this count doesn't + decrease. If you write "0" here, you can initialize + compr_written_block and compr_saved_block to "0". + +What: /sys/fs/f2fs/<disk>/compr_new_inode +Date: March 2021 +Contact: "Daeho Jeong" <[email protected]> +Description: Show the count of inode newly enabled for compression since mount. + Note that when the compression is disabled for the files, this count + doesn't decrease. If you write "0" here, you can initialize + compr_new_inode to "0". diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-cma b/Documentation/ABI/testing/sysfs-kernel-mm-cma new file mode 100644 index 000000000000..02b2bb60c296 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-kernel-mm-cma @@ -0,0 +1,25 @@ +What: /sys/kernel/mm/cma/ +Date: Feb 2021 +Contact: Minchan Kim <[email protected]> +Description: + /sys/kernel/mm/cma/ contains a subdirectory for each CMA + heap name (also sometimes called CMA areas). + + Each CMA heap subdirectory (that is, each + /sys/kernel/mm/cma/<cma-heap-name> directory) contains the + following items: + + alloc_pages_success + alloc_pages_fail + +What: /sys/kernel/mm/cma/<cma-heap-name>/alloc_pages_success +Date: Feb 2021 +Contact: Minchan Kim <[email protected]> +Description: + the number of pages CMA API succeeded to allocate + +What: /sys/kernel/mm/cma/<cma-heap-name>/alloc_pages_fail +Date: Feb 2021 +Contact: Minchan Kim <[email protected]> +Description: + the number of pages CMA API failed to allocate diff --git a/Documentation/admin-guide/gpio/gpio-mockup.rst b/Documentation/admin-guide/gpio/gpio-mockup.rst index 9fa1618b3adc..493071da1738 100644 --- a/Documentation/admin-guide/gpio/gpio-mockup.rst +++ b/Documentation/admin-guide/gpio/gpio-mockup.rst @@ -17,17 +17,18 @@ module. gpio_mockup_ranges This parameter takes an argument in the form of an array of integer - pairs. Each pair defines the base GPIO number (if any) and the number - of lines exposed by the chip. If the base GPIO is -1, the gpiolib - will assign it automatically. + pairs. Each pair defines the base GPIO number (non-negative integer) + and the first number after the last of this chip. If the base GPIO + is -1, the gpiolib will assign it automatically. while the following + parameter is the number of lines exposed by the chip. - Example: gpio_mockup_ranges=-1,8,-1,16,405,4 + Example: gpio_mockup_ranges=-1,8,-1,16,405,409 The line above creates three chips. The first one will expose 8 lines, the second 16 and the third 4. The base GPIO for the third chip is set to 405 while for two first chips it will be assigned automatically. - gpio_named_lines + gpio_mockup_named_lines This parameter doesn't take any arguments. It lets the driver know that GPIO lines exposed by it should be named. diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 02470ba1fe6a..5bcc229d31e2 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -1469,6 +1469,12 @@ Don't use this when you are not running on the android emulator + gpio-mockup.gpio_mockup_ranges + [HW] Sets the ranges of gpiochip of for this device. + Format: <start1>,<end1>,<start2>,<end2>... + gpio-mockup.gpio_mockup_named_lines + [HW] Let the driver know GPIO lines should be named. + gpt [EFI] Forces disk with valid GPT signature but invalid Protective MBR to be treated as GPT. If the primary GPT is corrupted, it enables the backup/alternate @@ -1492,10 +1498,6 @@ Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0. Default: 1024 - gpio-mockup.gpio_mockup_ranges - [HW] Sets the ranges of gpiochip of for this device. - Format: <start1>,<end1>,<start2>,<end2>... - hardlockup_all_cpu_backtrace= [KNL] Should the hard-lockup detector generate backtraces on all cpus. @@ -2802,7 +2804,24 @@ seconds. Use this parameter to check at some other rate. 0 disables periodic checking. - memtest= [KNL,X86,ARM,PPC] Enable memtest + memory_hotplug.memmap_on_memory + [KNL,X86,ARM] Boolean flag to enable this feature. + Format: {on | off (default)} + When enabled, runtime hotplugged memory will + allocate its internal metadata (struct pages) + from the hotadded memory which will allow to + hotadd a lot of memory without requiring + additional memory to do so. + This feature is disabled by default because it + has some implication on large (e.g. GB) + allocations in some configurations (e.g. small + memory blocks). + The state of the flag can be read in + /sys/module/memory_hotplug/parameters/memmap_on_memory. + Note that even when enabled, there are a few cases where + the feature is not effective. + + memtest= [KNL,X86,ARM,PPC,RISCV] Enable memtest Format: <integer> default : 0 <disable> Specifies the number of memtest passes to be diff --git a/Documentation/admin-guide/mm/memory-hotplug.rst b/Documentation/admin-guide/mm/memory-hotplug.rst index 5307f90738aa..05d51d2d8beb 100644 --- a/Documentation/admin-guide/mm/memory-hotplug.rst +++ b/Documentation/admin-guide/mm/memory-hotplug.rst @@ -357,6 +357,15 @@ creates ZONE_MOVABLE as following. Unfortunately, there is no information to show which memory block belongs to ZONE_MOVABLE. This is TBD. +.. note:: + Techniques that rely on long-term pinnings of memory (especially, RDMA and + vfio) are fundamentally problematic with ZONE_MOVABLE and, therefore, memory + hot remove. Pinned pages cannot reside on ZONE_MOVABLE, to guarantee that + memory can still get hot removed - be aware that pinning can fail even if + there is plenty of free memory in ZONE_MOVABLE. In addition, using + ZONE_MOVABLE might make page pinning more expensive, because pages have to be + migrated off that zone first. + .. _memory_hotplug_how_to_offline_memory: How to offline memory diff --git a/Documentation/admin-guide/mm/userfaultfd.rst b/Documentation/admin-guide/mm/userfaultfd.rst index 65eefa66c0ba..3aa38e8b8361 100644 --- a/Documentation/admin-guide/mm/userfaultfd.rst +++ b/Documentation/admin-guide/mm/userfaultfd.rst @@ -63,36 +63,36 @@ the generic ioctl available. The ``uffdio_api.features`` bitmask returned by the ``UFFDIO_API`` ioctl defines what memory types are supported by the ``userfaultfd`` and what -events, except page fault notifications, may be generated. - -If the kernel supports registering ``userfaultfd`` ranges on hugetlbfs -virtual memory areas, ``UFFD_FEATURE_MISSING_HUGETLBFS`` will be set in -``uffdio_api.features``. Similarly, ``UFFD_FEATURE_MISSING_SHMEM`` will be -set if the kernel supports registering ``userfaultfd`` ranges on shared -memory (covering all shmem APIs, i.e. tmpfs, ``IPCSHM``, ``/dev/zero``, -``MAP_SHARED``, ``memfd_create``, etc). - -The userland application that wants to use ``userfaultfd`` with hugetlbfs -or shared memory need to set the corresponding flag in -``uffdio_api.features`` to enable those features. - -If the userland desires to receive notifications for events other than -page faults, it has to verify that ``uffdio_api.features`` has appropriate -``UFFD_FEATURE_EVENT_*`` bits set. These events are described in more -detail below in `Non-cooperative userfaultfd`_ section. - -Once the ``userfaultfd`` has been enabled the ``UFFDIO_REGISTER`` ioctl should -be invoked (if present in the returned ``uffdio_api.ioctls`` bitmask) to -register a memory range in the ``userfaultfd`` by setting the +events, except page fault notifications, may be generated: + +- The ``UFFD_FEATURE_EVENT_*`` flags indicate that various other events + other than page faults are supported. These events are described in more + detail below in the `Non-cooperative userfaultfd`_ section. + +- ``UFFD_FEATURE_MISSING_HUGETLBFS`` and ``UFFD_FEATURE_MISSING_SHMEM`` + indicate that the kernel supports ``UFFDIO_REGISTER_MODE_MISSING`` + registrations for hugetlbfs and shared memory (covering all shmem APIs, + i.e. tmpfs, ``IPCSHM``, ``/dev/zero``, ``MAP_SHARED``, ``memfd_create``, + etc) virtual memory areas, respectively. + +- ``UFFD_FEATURE_MINOR_HUGETLBFS`` indicates that the kernel supports + ``UFFDIO_REGISTER_MODE_MINOR`` registration for hugetlbfs virtual memory + areas. + +The userland application should set the feature flags it intends to use +when invoking the ``UFFDIO_API`` ioctl, to request that those features be +enabled if supported. + +Once the ``userfaultfd`` API has been enabled the ``UFFDIO_REGISTER`` +ioctl should be invoked (if present in the returned ``uffdio_api.ioctls`` +bitmask) to register a memory range in the ``userfaultfd`` by setting the uffdio_register structure accordingly. The ``uffdio_register.mode`` bitmask will specify to the kernel which kind of faults to track for -the range (``UFFDIO_REGISTER_MODE_MISSING`` would track missing -pages). The ``UFFDIO_REGISTER`` ioctl will return the +the range. The ``UFFDIO_REGISTER`` ioctl will return the ``uffdio_register.ioctls`` bitmask of ioctls that are suitable to resolve userfaults on the range registered. Not all ioctls will necessarily be -supported for all memory types depending on the underlying virtual -memory backend (anonymous memory vs tmpfs vs real filebacked -mappings). +supported for all memory types (e.g. anonymous memory vs. shmem vs. +hugetlbfs), or all types of intercepted faults. Userland can use the ``uffdio_register.ioctls`` to manage the virtual address space in the background (to add or potentially also remove @@ -100,21 +100,46 @@ memory from the ``userfaultfd`` registered range). This means a userfault could be triggering just before userland maps in the background the user-faulted page. -The primary ioctl to resolve userfaults is ``UFFDIO_COPY``. That -atomically copies a page into the userfault registered range and wakes -up the blocked userfaults -(unless ``uffdio_copy.mode & UFFDIO_COPY_MODE_DONTWAKE`` is set). -Other ioctl works similarly to ``UFFDIO_COPY``. They're atomic as in -guaranteeing that nothing can see an half copied page since it'll -keep userfaulting until the copy has finished. +Resolving Userfaults +-------------------- + +There are three basic ways to resolve userfaults: + +- ``UFFDIO_COPY`` atomically copies some existing page contents from + userspace. + +- ``UFFDIO_ZEROPAGE`` atomically zeros the new page. + +- ``UFFDIO_CONTINUE`` maps an existing, previously-populated page. + +These operations are atomic in the sense that they guarantee nothing can +see a half-populated page, since readers will keep userfaulting until the +operation has finished. + +By default, these wake up userfaults blocked on the range in question. +They support a ``UFFDIO_*_MODE_DONTWAKE`` ``mode`` flag, which indicates +that waking will be done separately at some later time. + +Which ioctl to choose depends on the kind of page fault, and what we'd +like to do to resolve it: + +- For ``UFFDIO_REGISTER_MODE_MISSING`` faults, the fault needs to be + resolved by either providing a new page (``UFFDIO_COPY``), or mapping + the zero page (``UFFDIO_ZEROPAGE``). By default, the kernel would map + the zero page for a missing fault. With userfaultfd, userspace can + decide what content to provide before the faulting thread continues. + +- For ``UFFDIO_REGISTER_MODE_MINOR`` faults, there is an existing page (in + the page cache). Userspace has the option of modifying the page's + contents before resolving the fault. Once the contents are correct + (modified or not), userspace asks the kernel to map the page and let the + faulting thread continue with ``UFFDIO_CONTINUE``. Notes: -- If you requested ``UFFDIO_REGISTER_MODE_MISSING`` when registering then - you must provide some kind of page in your thread after reading from - the uffd. You must provide either ``UFFDIO_COPY`` or ``UFFDIO_ZEROPAGE``. - The normal behavior of the OS automatically providing a zero page on - an anonymous mmaping is not in place. +- You can tell which kind of fault occurred by examining + ``pagefault.flags`` within the ``uffd_msg``, checking for the + ``UFFD_PAGEFAULT_FLAG_*`` flags. - None of the page-delivering ioctls default to the range that you registered with. You must fill in all fields for the appropriate @@ -122,9 +147,9 @@ Notes: - You get the address of the access that triggered the missing page event out of a struct uffd_msg that you read in the thread from the - uffd. You can supply as many pages as you want with ``UFFDIO_COPY`` or - ``UFFDIO_ZEROPAGE``. Keep in mind that unless you used DONTWAKE then - the first of any of those IOCTLs wakes up the faulting thread. + uffd. You can supply as many pages as you want with these IOCTLs. + Keep in mind that unless you used DONTWAKE then the first of any of + those IOCTLs wakes up the faulting thread. - Be sure to test for all errors including (``pollfd[0].revents & POLLERR``). This can happen, e.g. when ranges diff --git a/Documentation/admin-guide/reporting-issues.rst b/Documentation/admin-guide/reporting-issues.rst index 48b4d0ef2b09..18d8e25ba9df 100644 --- a/Documentation/admin-guide/reporting-issues.rst +++ b/Documentation/admin-guide/reporting-issues.rst @@ -24,7 +24,8 @@ longterm series? One still supported? Then search the `LKML you don't find any, install `the latest release from that series <https://kernel.org/>`_. If it still shows the issue, report it to the stable mailing list ([email protected]) and CC the regressions list +([email protected]); ideally also CC the maintainer and the mailing +list for the subsystem in question. In all other cases try your best guess which kernel part might be causing the issue. Check the :ref:`MAINTAINERS <maintainers>` file for how its developers @@ -48,8 +49,9 @@ before the issue occurs. If you are facing multiple issues with the Linux kernel at once, report each separately. While writing your report, include all information relevant to the issue, like the kernel and the distro used. In case of a regression, CC the -regressions mailing list ([email protected]) to your report; also try -to include the commit-id of the change causing it, which a bisection can find. +regressions mailing list ([email protected]) to your report. Also try +to pin-point the culprit with a bisection; if you succeed, include its +commit-id and CC everyone in the sign-off-by chain. Once the report is out, answer any questions that come up and help where you can. That includes keeping the ball rolling by occasionally retesting with newer @@ -198,10 +200,11 @@ report them: * Send a short problem report to the Linux stable mailing list ([email protected]) and CC the Linux regressions mailing list - ([email protected]). Roughly describe the issue and ideally - explain how to reproduce it. Mention the first version that shows the - problem and the last version that's working fine. Then wait for further - instructions. + ([email protected]); if you suspect the cause in a particular + subsystem, CC its maintainer and its mailing list. Roughly describe the + issue and ideally explain how to reproduce it. Mention the first version + that shows the problem and the last version that's working fine. Then + wait for further instructions. The reference section below explains each of these steps in more detail. @@ -768,7 +771,9 @@ regular internet search engine and add something like the results to the archives at that URL. It's also wise to check the internet, LKML and maybe bugzilla.kernel.org again -at this point. +at this point. If your report needs to be filed in a bug tracker, you may want +to check the mailing list archives for the subsystem as well, as someone might +have reported it only there. For details how to search and what to do if you find matching reports see "Search for existing reports, first run" above. @@ -1249,9 +1254,10 @@ and the oldest where the issue occurs (say 5.8-rc1). When sending the report by mail, CC the Linux regressions mailing list ([email protected]). In case the report needs to be filed to some web -tracker, proceed to do so; once filed, forward the report by mail to the -regressions list. Make sure to inline the forwarded report, hence do not attach -it. Also add a short note at the top where you mention the URL to the ticket. +tracker, proceed to do so. Once filed, forward the report by mail to the +regressions list; CC the maintainer and the mailing list for the subsystem in +question. Make sure to inline the forwarded report, hence do not attach it. +Also add a short note at the top where you mention the URL to the ticket. When mailing or forwarding the report, in case of a successful bisection add the author of the culprit to the recipients; also CC everyone in the signed-off-by @@ -1536,17 +1542,20 @@ Report the regression *Send a short problem report to the Linux stable mailing list ([email protected]) and CC the Linux regressions mailing list - ([email protected]). Roughly describe the issue and ideally - explain how to reproduce it. Mention the first version that shows the - problem and the last version that's working fine. Then wait for further - instructions.* + ([email protected]); if you suspect the cause in a particular + subsystem, CC its maintainer and its mailing list. Roughly describe the + issue and ideally explain how to reproduce it. Mention the first version + that shows the problem and the last version that's working fine. Then + wait for further instructions.* When reporting a regression that happens within a stable or longterm kernel line (say when updating from 5.10.4 to 5.10.5) a brief report is enough for -the start to get the issue reported quickly. Hence a rough description is all -it takes. +the start to get the issue reported quickly. Hence a rough description to the +stable and regressions mailing list is all it takes; but in case you suspect +the cause in a particular subsystem, CC its maintainers and its mailing list +as well, because that will speed things up. -But note, it helps developers a great deal if you can specify the exact version +And note, it helps developers a great deal if you can specify the exact version that introduced the problem. Hence if possible within a reasonable time frame, try to find that version using vanilla kernels. Lets assume something broke when your distributor released a update from Linux kernel 5.10.5 to 5.10.8. Then as @@ -1563,7 +1572,9 @@ pinpoint the exact change that causes the issue (which then can easily get reverted to fix the issue quickly). Hence consider to do a proper bisection right away if time permits. See the section 'Special care for regressions' and the document 'Documentation/admin-guide/bug-bisect.rst' for details how to -perform one. +perform one. In case of a successful bisection add the author of the culprit to +the recipients; also CC everyone in the signed-off-by chain, which you find at +the end of its commit message. Reference for "Reporting issues only occurring in older kernel version lines" diff --git a/Documentation/core-api/dma-api.rst b/Documentation/core-api/dma-api.rst index e6d23f117308..00a1d4fa3f9e 100644 --- a/Documentation/core-api/dma-api.rst +++ b/Documentation/core-api/dma-api.rst @@ -565,6 +565,16 @@ dma_alloc_pages(). page must be the pointer returned by dma_alloc_pages(). :: + int + dma_mmap_pages(struct device *dev, struct vm_area_struct *vma, + size_t size, struct page *page) + +Map an allocation returned from dma_alloc_pages() into a user address space. +dev and size must be the same as those passed into dma_alloc_pages(). +page must be the pointer returned by dma_alloc_pages(). + +:: + void * dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle, enum dma_data_direction dir, @@ -586,6 +596,84 @@ dma_alloc_noncoherent(). :: + struct sg_table * + dma_alloc_noncontiguous(struct device *dev, size_t size, + enum dma_data_direction dir, gfp_t gfp, + unsigned long attrs); + +This routine allocates <size> bytes of non-coherent and possibly non-contiguous +memory. It returns a pointer to struct sg_table that describes the allocated +and DMA mapped memory, or NULL if the allocation failed. The resulting memory +can be used for struct page mapped into a scatterlist are suitable for. + +The return sg_table is guaranteed to have 1 single DMA mapped segment as +indicated by sgt->nents, but it might have multiple CPU side segments as +indicated by sgt->orig_nents. + +The dir parameter specified if data is read and/or written by the device, +see dma_map_single() for details. + +The gfp parameter allows the caller to specify the ``GFP_`` flags (see +kmalloc()) for the allocation, but rejects flags used to specify a memory +zone such as GFP_DMA or GFP_HIGHMEM. + +The attrs argument must be either 0 or DMA_ATTR_ALLOC_SINGLE_PAGES. + +Before giving the memory to the device, dma_sync_sgtable_for_device() needs +to be called, and before reading memory written by the device, +dma_sync_sgtable_for_cpu(), just like for streaming DMA mappings that are +reused. + +:: + + void + dma_free_noncontiguous(struct device *dev, size_t size, + struct sg_table *sgt, + enum dma_data_direction dir) + +Free memory previously allocated using dma_alloc_noncontiguous(). dev, size, +and dir must all be the same as those passed into dma_alloc_noncontiguous(). +sgt must be the pointer returned by dma_alloc_noncontiguous(). + +:: + + void * + dma_vmap_noncontiguous(struct device *dev, size_t size, + struct sg_table *sgt) + +Return a contiguous kernel mapping for an allocation returned from +dma_alloc_noncontiguous(). dev and size must be the same as those passed into +dma_alloc_noncontiguous(). sgt must be the pointer returned by +dma_alloc_noncontiguous(). + +Once a non-contiguous allocation is mapped using this function, the +flush_kernel_vmap_range() and invalidate_kernel_vmap_range() APIs must be used +to manage the coherency between the kernel mapping, the device and user space +mappings (if any). + +:: + + void + dma_vunmap_noncontiguous(struct device *dev, void *vaddr) + +Unmap a kernel mapping returned by dma_vmap_noncontiguous(). dev must be the +same the one passed into dma_alloc_noncontiguous(). vaddr must be the pointer +returned by dma_vmap_noncontiguous(). + + +:: + + int + dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma, + size_t size, struct sg_table *sgt) + +Map an allocation returned from dma_alloc_noncontiguous() into a user address +space. dev and size must be the same as those passed into +dma_alloc_noncontiguous(). sgt must be the pointer returned by +dma_alloc_noncontiguous(). + +:: + int dma_get_cache_alignment(void) diff --git a/Documentation/core-api/irq/irq-domain.rst b/Documentation/core-api/irq/irq-domain.rst index a77c24c27f7b..8214e215a8bf 100644 --- a/Documentation/core-api/irq/irq-domain.rst +++ b/Documentation/core-api/irq/irq-domain.rst @@ -42,10 +42,10 @@ irq_domain usage ================ An interrupt controller driver creates and registers an irq_domain by -calling one of the irq_domain_add_*() functions (each mapping method -has a different allocator function, more on that later). The function -will return a pointer to the irq_domain on success. The caller must -provide the allocator function with an irq_domain_ops structure. +calling one of the irq_domain_add_*() or irq_domain_create_*() functions +(each mapping method has a different allocator function, more on that later). +The function will return a pointer to the irq_domain on success. The caller +must provide the allocator function with an irq_domain_ops structure. In most cases, the irq_domain will begin empty without any mappings between hwirq and IRQ numbers. Mappings are added to the irq_domain @@ -147,6 +147,7 @@ Legacy irq_domain_add_simple() irq_domain_add_legacy() irq_domain_add_legacy_isa() + irq_domain_create_simple() irq_domain_create_legacy() The Legacy mapping is a special case for drivers that already have a @@ -169,13 +170,13 @@ supported. For example, ISA controllers would use the legacy map for mapping Linux IRQs 0-15 so that existing ISA drivers get the correct IRQ numbers. -Most users of legacy mappings should use irq_domain_add_simple() which -will use a legacy domain only if an IRQ range is supplied by the -system and will otherwise use a linear domain mapping. The semantics -of this call are such that if an IRQ range is specified then +Most users of legacy mappings should use irq_domain_add_simple() or +irq_domain_create_simple() which will use a legacy domain only if an IRQ range +is supplied by the system and will otherwise use a linear domain mapping. +The semantics of this call are such that if an IRQ range is specified then descriptors will be allocated on-the-fly for it, and if no range is -specified it will fall through to irq_domain_add_linear() which means -*no* irq descriptors will be allocated. +specified it will fall through to irq_domain_add_linear() or +irq_domain_create_linear() which means *no* irq descriptors will be allocated. A typical use case for simple domains is where an irqchip provider is supporting both dynamic and static IRQ assignments. @@ -186,6 +187,7 @@ that the driver using the simple domain call irq_create_mapping() before any irq_find_mapping() since the latter will actually work for the static IRQ assignment case. +irq_domain_add_simple() and irq_domain_create_simple() as well as irq_domain_add_legacy() and irq_domain_create_legacy() are functionally equivalent, except for the first argument is different - the former accepts an Open Firmware specific 'struct device_node', while the latter diff --git a/Documentation/core-api/symbol-namespaces.rst b/Documentation/core-api/symbol-namespaces.rst index 9b76337f6756..5ad9e0abe42c 100644 --- a/Documentation/core-api/symbol-namespaces.rst +++ b/Documentation/core-api/symbol-namespaces.rst @@ -43,14 +43,14 @@ exporting of kernel symbols to the kernel symbol table, variants of these are available to export symbols into a certain namespace: EXPORT_SYMBOL_NS() and EXPORT_SYMBOL_NS_GPL(). They take one additional argument: the namespace. Please note that due to macro expansion that argument needs to be a -preprocessor symbol. E.g. to export the symbol `usb_stor_suspend` into the -namespace `USB_STORAGE`, use:: +preprocessor symbol. E.g. to export the symbol ``usb_stor_suspend`` into the +namespace ``USB_STORAGE``, use:: EXPORT_SYMBOL_NS(usb_stor_suspend, USB_STORAGE); -The corresponding ksymtab entry struct `kernel_symbol` will have the member -`namespace` set accordingly. A symbol that is exported without a namespace will -refer to `NULL`. There is no default namespace if none is defined. `modpost` +The corresponding ksymtab entry struct ``kernel_symbol`` will have the member +``namespace`` set accordingly. A symbol that is exported without a namespace will +refer to ``NULL``. There is no default namespace if none is defined. ``modpost`` and kernel/module.c make use the namespace at build time or module load time, respectively. @@ -64,7 +64,7 @@ and EXPORT_SYMBOL_GPL() macro expansions that do not specify a namespace. There are multiple ways of specifying this define and it depends on the subsystem and the maintainer's preference, which one to use. The first option -is to define the default namespace in the `Makefile` of the subsystem. E.g. to +is to define the default namespace in the ``Makefile`` of the subsystem. E.g. to export all symbols defined in usb-common into the namespace USB_COMMON, add a line like this to drivers/usb/common/Makefile:: @@ -96,7 +96,7 @@ using a statement like:: MODULE_IMPORT_NS(USB_STORAGE); -This will create a `modinfo` tag in the module for each imported namespace. +This will create a ``modinfo`` tag in the module for each imported namespace. This has the side effect, that the imported namespaces of a module can be inspected with modinfo:: @@ -113,7 +113,7 @@ metadata definitions like MODULE_AUTHOR() or MODULE_LICENSE(). Refer to section 4. Loading Modules that use namespaced Symbols ============================================== -At module loading time (e.g. `insmod`), the kernel will check each symbol +At module loading time (e.g. ``insmod``), the kernel will check each symbol referenced from the module for its availability and whether the namespace it might be exported to has been imported by the module. The default behaviour of the kernel is to reject loading modules that don't specify sufficient imports. @@ -138,19 +138,19 @@ missing imports. Fixing missing imports can be done with:: A typical scenario for module authors would be:: - write code that depends on a symbol from a not imported namespace - - `make` + - ``make`` - notice the warning of modpost telling about a missing import - - run `make nsdeps` to add the import to the correct code location + - run ``make nsdeps`` to add the import to the correct code location For subsystem maintainers introducing a namespace, the steps are very similar. -Again, `make nsdeps` will eventually add the missing namespace imports for +Again, ``make nsdeps`` will eventually add the missing namespace imports for in-tree modules:: - move or add symbols to a namespace (e.g. with EXPORT_SYMBOL_NS()) - - `make` (preferably with an allmodconfig to cover all in-kernel + - ``make`` (preferably with an allmodconfig to cover all in-kernel modules) - notice the warning of modpost telling about a missing import - - run `make nsdeps` to add the import to the correct code location + - run ``make nsdeps`` to add the import to the correct code location You can also run nsdeps for external module builds. A typical usage is:: diff --git a/Documentation/devicetree/bindings/display/brcm,bcm2711-hdmi.yaml b/Documentation/devicetree/bindings/display/brcm,bcm2711-hdmi.yaml index 57324a5f0271..a1d5a32660e0 100644 --- a/Documentation/devicetree/bindings/display/brcm,bcm2711-hdmi.yaml +++ b/Documentation/devicetree/bindings/display/brcm,bcm2711-hdmi.yaml @@ -109,7 +109,7 @@ required: - resets - ddc -unevaluatedProperties: false +additionalProperties: false examples: - | diff --git a/Documentation/devicetree/bindings/display/renesas,du.yaml b/Documentation/devicetree/bindings/display/renesas,du.yaml index 552a99ce4f12..121596f106da 100644 --- a/Documentation/devicetree/bindings/display/renesas,du.yaml +++ b/Documentation/devicetree/bindings/display/renesas,du.yaml @@ -51,6 +51,9 @@ properties: resets: true reset-names: true + power-domains: + maxItems: 1 + ports: $ref: /schemas/graph.yaml#/properties/port description: | diff --git a/Documentation/devicetree/bindings/dma/qcom,gpi.yaml b/Documentation/devicetree/bindings/dma/qcom,gpi.yaml index 2e66840a78fe..e302147e53c6 100644 --- a/Documentation/devicetree/bindings/dma/qcom,gpi.yaml +++ b/Documentation/devicetree/bindings/dma/qcom,gpi.yaml @@ -20,6 +20,7 @@ properties: compatible: enum: - qcom,sdm845-gpi-dma + - qcom,sm8150-gpi-dma reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/gpio/fairchild,74hc595.yaml b/Documentation/devicetree/bindings/gpio/fairchild,74hc595.yaml new file mode 100644 index 000000000000..5fe19fa5f67c --- /dev/null +++ b/Documentation/devicetree/bindings/gpio/fairchild,74hc595.yaml @@ -0,0 +1,77 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/gpio/fairchild,74hc595.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Generic 8-bit shift register + +maintainers: + - Maxime Ripard <[email protected]> + +properties: + compatible: + enum: + - fairchild,74hc595 + - nxp,74lvc594 + + reg: + maxItems: 1 + + gpio-controller: true + + '#gpio-cells': + description: + The second cell is only used to specify the GPIO polarity. + const: 2 + + registers-number: + description: Number of daisy-chained shift registers + + enable-gpios: + description: GPIO connected to the OE (Output Enable) pin. + maxItems: 1 + + spi-max-frequency: true + +patternProperties: + "^(hog-[0-9]+|.+-hog(-[0-9]+)?)$": + type: object + + properties: + gpio-hog: true + gpios: true + output-high: true + output-low: true + line-name: true + + required: + - gpio-hog + - gpios + + additionalProperties: false + +required: + - compatible + - reg + - gpio-controller + - '#gpio-cells' + - registers-number + +additionalProperties: false + +examples: + - | + spi { + #address-cells = <1>; + #size-cells = <0>; + + gpio5: gpio5@0 { + compatible = "fairchild,74hc595"; + reg = <0>; + gpio-controller; + #gpio-cells = <2>; + registers-number = <4>; + spi-max-frequency = <100000>; + }; + }; diff --git a/Documentation/devicetree/bindings/gpio/gpio-74x164.txt b/Documentation/devicetree/bindings/gpio/gpio-74x164.txt deleted file mode 100644 index 2a97553d8d76..000000000000 --- a/Documentation/devicetree/bindings/gpio/gpio-74x164.txt +++ /dev/null @@ -1,27 +0,0 @@ -* Generic 8-bits shift register GPIO driver - -Required properties: -- compatible: Should contain one of the following: - "fairchild,74hc595" - "nxp,74lvc594" -- reg : chip select number -- gpio-controller : Marks the device node as a gpio controller. -- #gpio-cells : Should be two. The first cell is the pin number and - the second cell is used to specify the gpio polarity: - 0 = active high - 1 = active low -- registers-number: Number of daisy-chained shift registers - -Optional properties: -- enable-gpios: GPIO connected to the OE (Output Enable) pin. - -Example: - -gpio5: gpio5@0 { - compatible = "fairchild,74hc595"; - reg = <0>; - gpio-controller; - #gpio-cells = <2>; - registers-number = <4>; - spi-max-frequency = <100000>; -}; diff --git a/Documentation/devicetree/bindings/gpio/realtek,otto-gpio.yaml b/Documentation/devicetree/bindings/gpio/realtek,otto-gpio.yaml new file mode 100644 index 000000000000..100f20cebd76 --- /dev/null +++ b/Documentation/devicetree/bindings/gpio/realtek,otto-gpio.yaml @@ -0,0 +1,78 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/gpio/realtek,otto-gpio.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Realtek Otto GPIO controller + +maintainers: + - Sander Vanheule <[email protected]> + - Bert Vermeulen <[email protected]> + +description: | + Realtek's GPIO controller on their MIPS switch SoCs (Otto platform) consists + of two banks of 32 GPIOs. These GPIOs can generate edge-triggered interrupts. + Each bank's interrupts are cascased into one interrupt line on the parent + interrupt controller, if provided. + This binding allows defining a single bank in the devicetree. The interrupt + controller is not supported on the fallback compatible name, which only + allows for GPIO port use. + +properties: + $nodename: + pattern: "^gpio@[0-9a-f]+$" + + compatible: + items: + - enum: + - realtek,rtl8380-gpio + - realtek,rtl8390-gpio + - const: realtek,otto-gpio + + reg: + maxItems: 1 + + "#gpio-cells": + const: 2 + + gpio-controller: true + + ngpios: + minimum: 1 + maximum: 32 + + interrupt-controller: true + + "#interrupt-cells": + const: 2 + + interrupts: + maxItems: 1 + +required: + - compatible + - reg + - "#gpio-cells" + - gpio-controller + +additionalProperties: false + +dependencies: + interrupt-controller: [ interrupts ] + +examples: + - | + gpio@3500 { + compatible = "realtek,rtl8380-gpio", "realtek,otto-gpio"; + reg = <0x3500 0x1c>; + gpio-controller; + #gpio-cells = <2>; + ngpios = <24>; + interrupt-controller; + #interrupt-cells = <2>; + interrupt-parent = <&rtlintc>; + interrupts = <23>; + }; + +... diff --git a/Documentation/devicetree/bindings/gpio/rockchip,gpio-bank.yaml b/Documentation/devicetree/bindings/gpio/rockchip,gpio-bank.yaml new file mode 100644 index 000000000000..d993e002cebe --- /dev/null +++ b/Documentation/devicetree/bindings/gpio/rockchip,gpio-bank.yaml @@ -0,0 +1,82 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/gpio/rockchip,gpio-bank.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Rockchip GPIO bank + +maintainers: + - Heiko Stuebner <[email protected]> + +properties: + compatible: + enum: + - rockchip,gpio-bank + - rockchip,rk3188-gpio-bank0 + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + clocks: + maxItems: 1 + + gpio-controller: true + + "#gpio-cells": + const: 2 + + interrupt-controller: true + + "#interrupt-cells": + const: 2 + +required: + - compatible + - reg + - interrupts + - clocks + - gpio-controller + - "#gpio-cells" + - interrupt-controller + - "#interrupt-cells" + +additionalProperties: false + +examples: + - | + #include <dt-bindings/interrupt-controller/arm-gic.h> + pinctrl: pinctrl { + #address-cells = <1>; + #size-cells = <1>; + ranges; + + gpio0: gpio@2000a000 { + compatible = "rockchip,rk3188-gpio-bank0"; + reg = <0x2000a000 0x100>; + interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk_gates8 9>; + + gpio-controller; + #gpio-cells = <2>; + + interrupt-controller; + #interrupt-cells = <2>; + }; + + gpio1: gpio@2003c000 { + compatible = "rockchip,gpio-bank"; + reg = <0x2003c000 0x100>; + interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk_gates8 10>; + + gpio-controller; + #gpio-cells = <2>; + + interrupt-controller; + #interrupt-cells = <2>; + }; + }; diff --git a/Documentation/devicetree/bindings/hwlock/sirf,hwspinlock.txt b/Documentation/devicetree/bindings/hwlock/sirf,hwspinlock.txt deleted file mode 100644 index 9bb1240a68e0..000000000000 --- a/Documentation/devicetree/bindings/hwlock/sirf,hwspinlock.txt +++ /dev/null @@ -1,28 +0,0 @@ -SIRF Hardware spinlock device Binding ------------------------------------------------ - -Required properties : -- compatible : shall contain only one of the following: - "sirf,hwspinlock" - -- reg : the register address of hwspinlock - -- #hwlock-cells : hwlock users only use the hwlock id to represent a specific - hwlock, so the number of cells should be <1> here. - -Please look at the generic hwlock binding for usage information for consumers, -"Documentation/devicetree/bindings/hwlock/hwlock.txt" - -Example of hwlock provider: - hwlock { - compatible = "sirf,hwspinlock"; - reg = <0x13240000 0x00010000>; - #hwlock-cells = <1>; - }; - -Example of hwlock users: - node { - ... - hwlocks = <&hwlock 2>; - ... - }; diff --git a/Documentation/devicetree/bindings/interrupt-controller/idt,32434-pic.yaml b/Documentation/devicetree/bindings/interrupt-controller/idt,32434-pic.yaml index df5d8d1ead70..160ff4b07cac 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/idt,32434-pic.yaml +++ b/Documentation/devicetree/bindings/interrupt-controller/idt,32434-pic.yaml @@ -22,6 +22,9 @@ properties: reg: maxItems: 1 + interrupts: + maxItems: 1 + interrupt-controller: true required: @@ -29,6 +32,7 @@ required: - compatible - reg - interrupt-controller + - interrupts additionalProperties: false diff --git a/Documentation/devicetree/bindings/media/renesas,vin.yaml b/Documentation/devicetree/bindings/media/renesas,vin.yaml index fe7c4cbfe4ba..dd1a5ce5896c 100644 --- a/Documentation/devicetree/bindings/media/renesas,vin.yaml +++ b/Documentation/devicetree/bindings/media/renesas,vin.yaml @@ -193,23 +193,35 @@ required: - interrupts - clocks - power-domains - - resets - -if: - properties: - compatible: - contains: - enum: - - renesas,vin-r8a7778 - - renesas,vin-r8a7779 - - renesas,rcar-gen2-vin -then: - required: - - port -else: - required: - - renesas,id - - ports + +allOf: + - if: + not: + properties: + compatible: + contains: + enum: + - renesas,vin-r8a7778 + - renesas,vin-r8a7779 + then: + required: + - resets + + - if: + properties: + compatible: + contains: + enum: + - renesas,vin-r8a7778 + - renesas,vin-r8a7779 + - renesas,rcar-gen2-vin + then: + required: + - port + else: + required: + - renesas,id + - ports additionalProperties: false diff --git a/Documentation/devicetree/bindings/mtd/tango-nand.txt b/Documentation/devicetree/bindings/mtd/tango-nand.txt deleted file mode 100644 index 91c8420241af..000000000000 --- a/Documentation/devicetree/bindings/mtd/tango-nand.txt +++ /dev/null @@ -1,38 +0,0 @@ -Sigma Designs Tango4 NAND Flash Controller (NFC) - -Required properties: - -- compatible: "sigma,smp8758-nand" -- reg: address/size of nfc_reg, nfc_mem, and pbus_reg -- dmas: reference to the DMA channel used by the controller -- dma-names: "rxtx" -- clocks: reference to the system clock -- #address-cells: <1> -- #size-cells: <0> - -Children nodes represent the available NAND chips. -See Documentation/devicetree/bindings/mtd/nand-controller.yaml for generic bindings. - -Example: - - nandc: nand-controller@2c000 { - compatible = "sigma,smp8758-nand"; - reg = <0x2c000 0x30>, <0x2d000 0x800>, <0x20000 0x1000>; - dmas = <&dma0 3>; - dma-names = "rxtx"; - clocks = <&clkgen SYS_CLK>; - #address-cells = <1>; - #size-cells = <0>; - - nand@0 { - reg = <0>; /* CS0 */ - nand-ecc-strength = <14>; - nand-ecc-step-size = <1024>; - }; - - nand@1 { - reg = <1>; /* CS1 */ - nand-ecc-strength = <14>; - nand-ecc-step-size = <1024>; - }; - }; diff --git a/Documentation/devicetree/bindings/net/renesas,etheravb.yaml b/Documentation/devicetree/bindings/net/renesas,etheravb.yaml index fe72a5598add..005868f703a6 100644 --- a/Documentation/devicetree/bindings/net/renesas,etheravb.yaml +++ b/Documentation/devicetree/bindings/net/renesas,etheravb.yaml @@ -51,12 +51,12 @@ properties: clocks: minItems: 1 - maxItems: 2 items: - description: AVB functional clock - description: Optional TXC reference clock clock-names: + minItems: 1 items: - const: fck - const: refclk diff --git a/Documentation/devicetree/bindings/pci/hisilicon-pcie.txt b/Documentation/devicetree/bindings/pci/hisilicon-pcie.txt deleted file mode 100644 index d6796ef54ea1..000000000000 --- a/Documentation/devicetree/bindings/pci/hisilicon-pcie.txt +++ /dev/null @@ -1,43 +0,0 @@ -HiSilicon Hip05 and Hip06 PCIe host bridge DT description - -HiSilicon PCIe host controller is based on the Synopsys DesignWare PCI core. -It shares common functions with the PCIe DesignWare core driver and inherits -common properties defined in -Documentation/devicetree/bindings/pci/designware-pcie.txt. - -Additional properties are described here: - -Required properties -- compatible: Should contain "hisilicon,hip05-pcie" or "hisilicon,hip06-pcie". -- reg: Should contain rc_dbi, config registers location and length. -- reg-names: Must include the following entries: - "rc_dbi": controller configuration registers; - "config": PCIe configuration space registers. -- msi-parent: Should be its_pcie which is an ITS receiving MSI interrupts. -- port-id: Should be 0, 1, 2 or 3. - -Optional properties: -- status: Either "ok" or "disabled". -- dma-coherent: Present if DMA operations are coherent. - -Hip05 Example (note that Hip06 is the same except compatible): - pcie@b0080000 { - compatible = "hisilicon,hip05-pcie", "snps,dw-pcie"; - reg = <0 0xb0080000 0 0x10000>, <0x220 0x00000000 0 0x2000>; - reg-names = "rc_dbi", "config"; - bus-range = <0 15>; - msi-parent = <&its_pcie>; - #address-cells = <3>; - #size-cells = <2>; - device_type = "pci"; - dma-coherent; - ranges = <0x82000000 0 0x00000000 0x220 0x00000000 0 0x10000000>; - num-lanes = <8>; - port-id = <1>; - #interrupt-cells = <1>; - interrupt-map-mask = <0xf800 0 0 7>; - interrupt-map = <0x0 0 0 1 &mbigen_pcie 1 10 - 0x0 0 0 2 &mbigen_pcie 2 11 - 0x0 0 0 3 &mbigen_pcie 3 12 - 0x0 0 0 4 &mbigen_pcie 4 13>; - }; diff --git a/Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml b/Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml new file mode 100644 index 000000000000..e7b1f9892da4 --- /dev/null +++ b/Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml @@ -0,0 +1,181 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/pci/mediatek-pcie-gen3.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Gen3 PCIe controller on MediaTek SoCs + +maintainers: + - Jianjun Wang <[email protected]> + +description: |+ + PCIe Gen3 MAC controller for MediaTek SoCs, it supports Gen3 speed + and compatible with Gen2, Gen1 speed. + + This PCIe controller supports up to 256 MSI vectors, the MSI hardware + block diagram is as follows: + + +-----+ + | GIC | + +-----+ + ^ + | + port->irq + | + +-+-+-+-+-+-+-+-+ + |0|1|2|3|4|5|6|7| (PCIe intc) + +-+-+-+-+-+-+-+-+ + ^ ^ ^ + | | ... | + +-------+ +------+ +-----------+ + | | | + +-+-+---+--+--+ +-+-+---+--+--+ +-+-+---+--+--+ + |0|1|...|30|31| |0|1|...|30|31| |0|1|...|30|31| (MSI sets) + +-+-+---+--+--+ +-+-+---+--+--+ +-+-+---+--+--+ + ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ + | | | | | | | | | | | | (MSI vectors) + | | | | | | | | | | | | + + (MSI SET0) (MSI SET1) ... (MSI SET7) + + With 256 MSI vectors supported, the MSI vectors are composed of 8 sets, + each set has its own address for MSI message, and supports 32 MSI vectors + to generate interrupt. + +allOf: + - $ref: /schemas/pci/pci-bus.yaml# + +properties: + compatible: + const: mediatek,mt8192-pcie + + reg: + maxItems: 1 + + reg-names: + items: + - const: pcie-mac + + interrupts: + maxItems: 1 + + ranges: + minItems: 1 + maxItems: 8 + + resets: + minItems: 1 + maxItems: 2 + + reset-names: + minItems: 1 + maxItems: 2 + items: + - const: phy + - const: mac + + clocks: + maxItems: 6 + + clock-names: + items: + - const: pl_250m + - const: tl_26m + - const: tl_96m + - const: tl_32k + - const: peri_26m + - const: top_133m + + assigned-clocks: + maxItems: 1 + + assigned-clock-parents: + maxItems: 1 + + phys: + maxItems: 1 + + '#interrupt-cells': + const: 1 + + interrupt-controller: + description: Interrupt controller node for handling legacy PCI interrupts. + type: object + properties: + '#address-cells': + const: 0 + '#interrupt-cells': + const: 1 + interrupt-controller: true + + required: + - '#address-cells' + - '#interrupt-cells' + - interrupt-controller + + additionalProperties: false + +required: + - compatible + - reg + - reg-names + - interrupts + - ranges + - clocks + - '#interrupt-cells' + - interrupt-controller + +unevaluatedProperties: false + +examples: + - | + #include <dt-bindings/interrupt-controller/arm-gic.h> + #include <dt-bindings/interrupt-controller/irq.h> + + bus { + #address-cells = <2>; + #size-cells = <2>; + + pcie: pcie@11230000 { + compatible = "mediatek,mt8192-pcie"; + device_type = "pci"; + #address-cells = <3>; + #size-cells = <2>; + reg = <0x00 0x11230000 0x00 0x4000>; + reg-names = "pcie-mac"; + interrupts = <GIC_SPI 251 IRQ_TYPE_LEVEL_HIGH 0>; + bus-range = <0x00 0xff>; + ranges = <0x82000000 0x00 0x12000000 0x00 + 0x12000000 0x00 0x1000000>; + clocks = <&infracfg 44>, + <&infracfg 40>, + <&infracfg 43>, + <&infracfg 97>, + <&infracfg 99>, + <&infracfg 111>; + clock-names = "pl_250m", "tl_26m", "tl_96m", + "tl_32k", "peri_26m", "top_133m"; + assigned-clocks = <&topckgen 50>; + assigned-clock-parents = <&topckgen 91>; + + phys = <&pciephy>; + phy-names = "pcie-phy"; + + resets = <&infracfg_rst 2>, + <&infracfg_rst 3>; + reset-names = "phy", "mac"; + + #interrupt-cells = <1>; + interrupt-map-mask = <0 0 0 0x7>; + interrupt-map = <0 0 0 1 &pcie_intc 0>, + <0 0 0 2 &pcie_intc 1>, + <0 0 0 3 &pcie_intc 2>, + <0 0 0 4 &pcie_intc 3>; + pcie_intc: interrupt-controller { + #address-cells = <0>; + #interrupt-cells = <1>; + interrupt-controller; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml b/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml index 4a2bcc0158e2..8fdfbc763d70 100644 --- a/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml +++ b/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml @@ -17,6 +17,7 @@ allOf: properties: compatible: oneOf: + - const: renesas,pcie-r8a7779 # R-Car H1 - items: - enum: - renesas,pcie-r8a7742 # RZ/G1H @@ -74,7 +75,16 @@ required: - clocks - clock-names - power-domains - - resets + +if: + not: + properties: + compatible: + contains: + const: renesas,pcie-r8a7779 +then: + required: + - resets unevaluatedProperties: false diff --git a/Documentation/devicetree/bindings/pci/sifive,fu740-pcie.yaml b/Documentation/devicetree/bindings/pci/sifive,fu740-pcie.yaml new file mode 100644 index 000000000000..b03cbb9b6602 --- /dev/null +++ b/Documentation/devicetree/bindings/pci/sifive,fu740-pcie.yaml @@ -0,0 +1,113 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/pci/sifive,fu740-pcie.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: SiFive FU740 PCIe host controller + +description: |+ + SiFive FU740 PCIe host controller is based on the Synopsys DesignWare + PCI core. It shares common features with the PCIe DesignWare core and + inherits common properties defined in + Documentation/devicetree/bindings/pci/designware-pcie.txt. + +maintainers: + - Paul Walmsley <[email protected]> + - Greentime Hu <[email protected]> + +allOf: + - $ref: /schemas/pci/pci-bus.yaml# + +properties: + compatible: + const: sifive,fu740-pcie + + reg: + maxItems: 3 + + reg-names: + items: + - const: dbi + - const: config + - const: mgmt + + num-lanes: + const: 8 + + msi-parent: true + + interrupt-names: + items: + - const: msi + - const: inta + - const: intb + - const: intc + - const: intd + + resets: + description: A phandle to the PCIe power up reset line. + maxItems: 1 + + pwren-gpios: + description: Should specify the GPIO for controlling the PCI bus device power on. + maxItems: 1 + + reset-gpios: + maxItems: 1 + +required: + - dma-coherent + - num-lanes + - interrupts + - interrupt-names + - interrupt-parent + - interrupt-map-mask + - interrupt-map + - clock-names + - clocks + - resets + - pwren-gpios + - reset-gpios + +unevaluatedProperties: false + +examples: + - | + bus { + #address-cells = <2>; + #size-cells = <2>; + #include <dt-bindings/clock/sifive-fu740-prci.h> + + pcie@e00000000 { + compatible = "sifive,fu740-pcie"; + #address-cells = <3>; + #size-cells = <2>; + #interrupt-cells = <1>; + reg = <0xe 0x00000000 0x0 0x80000000>, + <0xd 0xf0000000 0x0 0x10000000>, + <0x0 0x100d0000 0x0 0x1000>; + reg-names = "dbi", "config", "mgmt"; + device_type = "pci"; + dma-coherent; + bus-range = <0x0 0xff>; + ranges = <0x81000000 0x0 0x60080000 0x0 0x60080000 0x0 0x10000>, /* I/O */ + <0x82000000 0x0 0x60090000 0x0 0x60090000 0x0 0xff70000>, /* mem */ + <0x82000000 0x0 0x70000000 0x0 0x70000000 0x0 0x1000000>, /* mem */ + <0xc3000000 0x20 0x00000000 0x20 0x00000000 0x20 0x00000000>; /* mem prefetchable */ + num-lanes = <0x8>; + interrupts = <56>, <57>, <58>, <59>, <60>, <61>, <62>, <63>, <64>; + interrupt-names = "msi", "inta", "intb", "intc", "intd"; + interrupt-parent = <&plic0>; + interrupt-map-mask = <0x0 0x0 0x0 0x7>; + interrupt-map = <0x0 0x0 0x0 0x1 &plic0 57>, + <0x0 0x0 0x0 0x2 &plic0 58>, + <0x0 0x0 0x0 0x3 &plic0 59>, + <0x0 0x0 0x0 0x4 &plic0 60>; + clock-names = "pcie_aux"; + clocks = <&prci PRCI_CLK_PCIE_AUX>; + resets = <&prci 4>; + pwren-gpios = <&gpio 5 0>; + reset-gpios = <&gpio 8 0>; + }; + }; diff --git a/Documentation/devicetree/bindings/pci/tango-pcie.txt b/Documentation/devicetree/bindings/pci/tango-pcie.txt deleted file mode 100644 index 244683836a79..000000000000 --- a/Documentation/devicetree/bindings/pci/tango-pcie.txt +++ /dev/null @@ -1,29 +0,0 @@ -Sigma Designs Tango PCIe controller - -Required properties: - -- compatible: "sigma,smp8759-pcie" -- reg: address/size of PCI configuration space, address/size of register area -- bus-range: defined by size of PCI configuration space -- device_type: "pci" -- #size-cells: <2> -- #address-cells: <3> -- msi-controller -- ranges: translation from system to bus addresses -- interrupts: spec for misc interrupts, spec for MSI - -Example: - - pcie@2e000 { - compatible = "sigma,smp8759-pcie"; - reg = <0x50000000 0x400000>, <0x2e000 0x100>; - bus-range = <0 3>; - device_type = "pci"; - #size-cells = <2>; - #address-cells = <3>; - msi-controller; - ranges = <0x02000000 0x0 0x00400000 0x50400000 0x0 0x3c00000>; - interrupts = - <54 IRQ_TYPE_LEVEL_HIGH>, /* misc interrupts */ - <55 IRQ_TYPE_LEVEL_HIGH>; /* MSI */ - }; diff --git a/Documentation/devicetree/bindings/pci/ti,j721e-pci-ep.yaml b/Documentation/devicetree/bindings/pci/ti,j721e-pci-ep.yaml index d06f0c4464c6..aed437dac363 100644 --- a/Documentation/devicetree/bindings/pci/ti,j721e-pci-ep.yaml +++ b/Documentation/devicetree/bindings/pci/ti,j721e-pci-ep.yaml @@ -16,12 +16,14 @@ allOf: properties: compatible: oneOf: - - description: PCIe EP controller in J7200 + - const: ti,j721e-pcie-ep + - description: PCIe EP controller in AM64 items: - - const: ti,j7200-pcie-ep + - const: ti,am64-pcie-ep - const: ti,j721e-pcie-ep - - description: PCIe EP controller in J721E + - description: PCIe EP controller in J7200 items: + - const: ti,j7200-pcie-ep - const: ti,j721e-pcie-ep reg: @@ -66,7 +68,6 @@ required: - power-domains - clocks - clock-names - - dma-coherent - max-functions - phys - phy-names diff --git a/Documentation/devicetree/bindings/pci/ti,j721e-pci-host.yaml b/Documentation/devicetree/bindings/pci/ti,j721e-pci-host.yaml index 0880a613ece6..cc900202df29 100644 --- a/Documentation/devicetree/bindings/pci/ti,j721e-pci-host.yaml +++ b/Documentation/devicetree/bindings/pci/ti,j721e-pci-host.yaml @@ -16,12 +16,14 @@ allOf: properties: compatible: oneOf: - - description: PCIe controller in J7200 + - const: ti,j721e-pcie-host + - description: PCIe controller in AM64 items: - - const: ti,j7200-pcie-host + - const: ti,am64-pcie-host - const: ti,j721e-pcie-host - - description: PCIe controller in J721E + - description: PCIe controller in J7200 items: + - const: ti,j7200-pcie-host - const: ti,j721e-pcie-host reg: @@ -46,12 +48,17 @@ properties: maxItems: 1 clocks: - maxItems: 1 - description: clock-specifier to represent input to the PCIe + minItems: 1 + maxItems: 2 + description: |+ + clock-specifier to represent input to the PCIe for 1 item. + 2nd item if present represents reference clock to the connector. clock-names: + minItems: 1 items: - const: fck + - const: pcie_refclk vendor-id: const: 0x104c @@ -62,6 +69,8 @@ properties: - const: 0xb00d - items: - const: 0xb00f + - items: + - const: 0xb010 msi-map: true @@ -78,7 +87,6 @@ required: - vendor-id - device-id - msi-map - - dma-coherent - dma-ranges - ranges - reset-gpios diff --git a/Documentation/devicetree/bindings/pci/xilinx-nwl-pcie.txt b/Documentation/devicetree/bindings/pci/xilinx-nwl-pcie.txt index 01bf7fdf4c19..2d677e90a7e2 100644 --- a/Documentation/devicetree/bindings/pci/xilinx-nwl-pcie.txt +++ b/Documentation/devicetree/bindings/pci/xilinx-nwl-pcie.txt @@ -33,6 +33,8 @@ Required properties: - #address-cells: specifies the number of cells needed to encode an address. The value must be 0. +Optional properties: +- dma-coherent: present if DMA operations are coherent Example: ++++++++ diff --git a/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt index 91fab614c381..84c4111293bd 100644 --- a/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt @@ -51,23 +51,7 @@ Deprecated properties for iomux controller: Use rockchip,grf and rockchip,pmu described above instead. Required properties for gpio sub nodes: - - compatible: "rockchip,gpio-bank" - - reg: register of the gpio bank (different than the iomux registerset) - - interrupts: base interrupt of the gpio bank in the interrupt controller - - clocks: clock that drives this bank - - gpio-controller: identifies the node as a gpio controller and pin bank. - - #gpio-cells: number of cells in GPIO specifier. Since the generic GPIO - binding is used, the amount of cells must be specified as 2. See generic - GPIO binding documentation for description of particular cells. - - interrupt-controller: identifies the controller node as interrupt-parent. - - #interrupt-cells: the value of this property should be 2 and the interrupt - cells should use the standard two-cell scheme described in - bindings/interrupt-controller/interrupts.txt - -Deprecated properties for gpio sub nodes: - - compatible: "rockchip,rk3188-gpio-bank0" - - reg: second element: separate pull register for rk3188 bank0, use - rockchip,pmu described above instead +See rockchip,gpio-bank.yaml Required properties for pin configuration node: - rockchip,pins: 3 integers array, represents a group of pins mux and config @@ -128,43 +112,3 @@ uart2: serial@20064000 { pinctrl-names = "default"; pinctrl-0 = <&uart2_xfer>; }; - -Example for rk3188: - - pinctrl@20008000 { - compatible = "rockchip,rk3188-pinctrl"; - rockchip,grf = <&grf>; - rockchip,pmu = <&pmu>; - #address-cells = <1>; - #size-cells = <1>; - ranges; - - gpio0: gpio0@2000a000 { - compatible = "rockchip,rk3188-gpio-bank0"; - reg = <0x2000a000 0x100>; - interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&clk_gates8 9>; - - gpio-controller; - #gpio-cells = <2>; - - interrupt-controller; - #interrupt-cells = <2>; - }; - - gpio1: gpio1@2003c000 { - compatible = "rockchip,gpio-bank"; - reg = <0x2003c000 0x100>; - interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&clk_gates8 10>; - - gpio-controller; - #gpio-cells = <2>; - - interrupt-controller; - #interrupt-cells = <2>; - }; - - ... - - }; diff --git a/Documentation/devicetree/bindings/pwm/pwm-rockchip.txt b/Documentation/devicetree/bindings/pwm/pwm-rockchip.txt deleted file mode 100644 index f70956dea77b..000000000000 --- a/Documentation/devicetree/bindings/pwm/pwm-rockchip.txt +++ /dev/null @@ -1,27 +0,0 @@ -Rockchip PWM controller - -Required properties: - - compatible: should be "rockchip,<name>-pwm" - "rockchip,rk2928-pwm": found on RK29XX,RK3066 and RK3188 SoCs - "rockchip,rk3288-pwm": found on RK3288 SOC - "rockchip,rv1108-pwm", "rockchip,rk3288-pwm": found on RV1108 SoC - "rockchip,vop-pwm": found integrated in VOP on RK3288 SoC - - reg: physical base address and length of the controller's registers - - clocks: See ../clock/clock-bindings.txt - - For older hardware (rk2928, rk3066, rk3188, rk3228, rk3288, rk3399): - - There is one clock that's used both to derive the functional clock - for the device and as the bus clock. - - For newer hardware (rk3328 and future socs): specified by name - - "pwm": This is used to derive the functional clock. - - "pclk": This is the APB bus clock. - - #pwm-cells: must be 2 (rk2928) or 3 (rk3288). See pwm.yaml in this directory - for a description of the cell format. - -Example: - - pwm0: pwm@20030000 { - compatible = "rockchip,rk2928-pwm"; - reg = <0x20030000 0x10>; - clocks = <&cru PCLK_PWM01>; - #pwm-cells = <2>; - }; diff --git a/Documentation/devicetree/bindings/pwm/pwm-rockchip.yaml b/Documentation/devicetree/bindings/pwm/pwm-rockchip.yaml new file mode 100644 index 000000000000..5596bee70509 --- /dev/null +++ b/Documentation/devicetree/bindings/pwm/pwm-rockchip.yaml @@ -0,0 +1,100 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/pwm/pwm-rockchip.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Rockchip PWM controller + +maintainers: + - Heiko Stuebner <[email protected]> + +properties: + compatible: + oneOf: + - const: rockchip,rk2928-pwm + - const: rockchip,rk3288-pwm + - const: rockchip,rk3328-pwm + - const: rockchip,vop-pwm + - items: + - const: rockchip,rk3036-pwm + - const: rockchip,rk2928-pwm + - items: + - enum: + - rockchip,rk3368-pwm + - rockchip,rk3399-pwm + - rockchip,rv1108-pwm + - const: rockchip,rk3288-pwm + - items: + - enum: + - rockchip,px30-pwm + - rockchip,rk3308-pwm + - const: rockchip,rk3328-pwm + + reg: + maxItems: 1 + + clocks: + minItems: 1 + maxItems: 2 + + clock-names: + maxItems: 2 + + "#pwm-cells": + enum: [2, 3] + description: + Must be 2 (rk2928) or 3 (rk3288 and later). + See pwm.yaml for a description of the cell format. + +required: + - compatible + - reg + - "#pwm-cells" + +if: + properties: + compatible: + contains: + enum: + - rockchip,rk3328-pwm + - rockchip,rv1108-pwm + +then: + properties: + clocks: + items: + - description: Used to derive the functional clock for the device. + - description: Used as the APB bus clock. + + clock-names: + items: + - const: pwm + - const: pclk + + required: + - clocks + - clock-names + +else: + properties: + clocks: + maxItems: 1 + description: + Used both to derive the functional clock + for the device and as the bus clock. + + required: + - clocks + +additionalProperties: false + +examples: + - | + #include <dt-bindings/clock/rk3188-cru-common.h> + pwm0: pwm@20030000 { + compatible = "rockchip,rk2928-pwm"; + reg = <0x20030000 0x10>; + clocks = <&cru PCLK_PWM01>; + #pwm-cells = <2>; + }; diff --git a/Documentation/devicetree/bindings/pwm/toshiba,pwm-visconti.yaml b/Documentation/devicetree/bindings/pwm/toshiba,pwm-visconti.yaml new file mode 100644 index 000000000000..d350f5edfb67 --- /dev/null +++ b/Documentation/devicetree/bindings/pwm/toshiba,pwm-visconti.yaml @@ -0,0 +1,43 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/pwm/toshiba,pwm-visconti.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Toshiba Visconti PWM Controller + +maintainers: + - Nobuhiro Iwamatsu <[email protected]> + +properties: + compatible: + items: + - const: toshiba,visconti-pwm + + reg: + maxItems: 1 + + '#pwm-cells': + const: 2 + +required: + - compatible + - reg + - '#pwm-cells' + +additionalProperties: false + +examples: + - | + soc { + #address-cells = <2>; + #size-cells = <2>; + + pwm: pwm@241c0000 { + compatible = "toshiba,visconti-pwm"; + reg = <0 0x241c0000 0 0x1000>; + pinctrl-names = "default"; + pinctrl-0 = <&pwm_mux>; + #pwm-cells = <2>; + }; + }; diff --git a/Documentation/devicetree/bindings/remoteproc/fsl,imx-rproc.yaml b/Documentation/devicetree/bindings/remoteproc/fsl,imx-rproc.yaml new file mode 100644 index 000000000000..208a628f8d6c --- /dev/null +++ b/Documentation/devicetree/bindings/remoteproc/fsl,imx-rproc.yaml @@ -0,0 +1,90 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: "http://devicetree.org/schemas/remoteproc/fsl,imx-rproc.yaml#" +$schema: "http://devicetree.org/meta-schemas/core.yaml#" + +title: NXP i.MX Co-Processor Bindings + +description: + This binding provides support for ARM Cortex M4 Co-processor found on some NXP iMX SoCs. + +maintainers: + - Peng Fan <[email protected]> + +properties: + compatible: + enum: + - fsl,imx8mq-cm4 + - fsl,imx8mm-cm4 + - fsl,imx7d-cm4 + - fsl,imx6sx-cm4 + + clocks: + maxItems: 1 + + syscon: + $ref: /schemas/types.yaml#/definitions/phandle + description: + Phandle to syscon block which provide access to System Reset Controller + + mbox-names: + items: + - const: tx + - const: rx + - const: rxdb + + mboxes: + description: + This property is required only if the rpmsg/virtio functionality is used. + List of <&phandle type channel> - 1 channel for TX, 1 channel for RX, 1 channel for RXDB. + (see mailbox/fsl,mu.yaml) + minItems: 1 + maxItems: 3 + + memory-region: + description: + If present, a phandle for a reserved memory area that used for vdev buffer, + resource table, vring region and others used by remote processor. + minItems: 1 + maxItems: 32 + +required: + - compatible + - clocks + - syscon + +additionalProperties: false + +examples: + - | + #include <dt-bindings/clock/imx7d-clock.h> + m4_reserved_sysmem1: cm4@80000000 { + reg = <0x80000000 0x80000>; + }; + + m4_reserved_sysmem2: cm4@81000000 { + reg = <0x81000000 0x80000>; + }; + + imx7d-cm4 { + compatible = "fsl,imx7d-cm4"; + memory-region = <&m4_reserved_sysmem1>, <&m4_reserved_sysmem2>; + syscon = <&src>; + clocks = <&clks IMX7D_ARM_M4_ROOT_CLK>; + }; + + - | + #include <dt-bindings/clock/imx8mm-clock.h> + + imx8mm-cm4 { + compatible = "fsl,imx8mm-cm4"; + clocks = <&clk IMX8MM_CLK_M4_DIV>; + mbox-names = "tx", "rx", "rxdb"; + mboxes = <&mu 0 1 + &mu 1 1 + &mu 3 1>; + memory-region = <&vdev0buffer>, <&vdev0vring0>, <&vdev0vring1>, <&rsc_table>; + syscon = <&src>; + }; +... diff --git a/Documentation/devicetree/bindings/remoteproc/imx-rproc.txt b/Documentation/devicetree/bindings/remoteproc/imx-rproc.txt deleted file mode 100644 index fbcefd965dc4..000000000000 --- a/Documentation/devicetree/bindings/remoteproc/imx-rproc.txt +++ /dev/null @@ -1,33 +0,0 @@ -NXP iMX6SX/iMX7D Co-Processor Bindings ----------------------------------------- - -This binding provides support for ARM Cortex M4 Co-processor found on some -NXP iMX SoCs. - -Required properties: -- compatible Should be one of: - "fsl,imx7d-cm4" - "fsl,imx6sx-cm4" -- clocks Clock for co-processor (See: ../clock/clock-bindings.txt) -- syscon Phandle to syscon block which provide access to - System Reset Controller - -Optional properties: -- memory-region list of phandels to the reserved memory regions. - (See: ../reserved-memory/reserved-memory.txt) - -Example: - m4_reserved_sysmem1: cm4@80000000 { - reg = <0x80000000 0x80000>; - }; - - m4_reserved_sysmem2: cm4@81000000 { - reg = <0x81000000 0x80000>; - }; - - imx7d-cm4 { - compatible = "fsl,imx7d-cm4"; - memory-region = <&m4_reserved_sysmem1>, <&m4_reserved_sysmem2>; - syscon = <&src>; - clocks = <&clks IMX7D_ARM_M4_ROOT_CLK>; - }; diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,adsp.txt b/Documentation/devicetree/bindings/remoteproc/qcom,adsp.txt index 1c330a8941f9..229f908fd831 100644 --- a/Documentation/devicetree/bindings/remoteproc/qcom,adsp.txt +++ b/Documentation/devicetree/bindings/remoteproc/qcom,adsp.txt @@ -18,6 +18,7 @@ on the Qualcomm ADSP Hexagon core. "qcom,sc7180-mpss-pas" "qcom,sdm845-adsp-pas" "qcom,sdm845-cdsp-pas" + "qcom,sdx55-mpss-pas" "qcom,sm8150-adsp-pas" "qcom,sm8150-cdsp-pas" "qcom,sm8150-mpss-pas" @@ -61,6 +62,7 @@ on the Qualcomm ADSP Hexagon core. must be "wdog", "fatal", "ready", "handover", "stop-ack" qcom,qcs404-wcss-pas: qcom,sc7180-mpss-pas: + qcom,sdx55-mpss-pas: qcom,sm8150-mpss-pas: qcom,sm8350-mpss-pas: must be "wdog", "fatal", "ready", "handover", "stop-ack", @@ -128,6 +130,8 @@ on the Qualcomm ADSP Hexagon core. qcom,sm8150-mpss-pas: qcom,sm8350-mpss-pas: must be "cx", "load_state", "mss" + qcom,sdx55-mpss-pas: + must be "cx", "mss" qcom,sm8250-adsp-pas: qcom,sm8350-adsp-pas: qcom,sm8150-slpi-pas: diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt b/Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt index 7ccd5534b0ae..69c49c7b2cff 100644 --- a/Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt +++ b/Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt @@ -9,6 +9,7 @@ on the Qualcomm Hexagon core. Definition: must be one of: "qcom,q6v5-pil", "qcom,ipq8074-wcss-pil" + "qcom,qcs404-wcss-pil" "qcom,msm8916-mss-pil", "qcom,msm8974-mss-pil" "qcom,msm8996-mss-pil" @@ -39,6 +40,7 @@ on the Qualcomm Hexagon core. string: qcom,q6v5-pil: qcom,ipq8074-wcss-pil: + qcom,qcs404-wcss-pil: qcom,msm8916-mss-pil: qcom,msm8974-mss-pil: must be "wdog", "fatal", "ready", "handover", "stop-ack" @@ -67,6 +69,11 @@ on the Qualcomm Hexagon core. Definition: The clocks needed depend on the compatible string: qcom,ipq8074-wcss-pil: no clock names required + qcom,qcs404-wcss-pil: + must be "xo", "gcc_abhs_cbcr", "gcc_abhs_cbcr", + "gcc_axim_cbcr", "lcc_ahbfabric_cbc", "tcsr_lcc_cbc", + "lcc_abhs_cbc", "lcc_tcm_slave_cbc", "lcc_abhm_cbc", + "lcc_axim_cbc", "lcc_bcr_sleep" qcom,q6v5-pil: qcom,msm8916-mss-pil: qcom,msm8974-mss-pil: @@ -133,6 +140,14 @@ For the compatible string below the following supplies are required: booting of the Hexagon core For the compatible string below the following supplies are required: + "qcom,qcs404-wcss-pil" +- cx-supply: + Usage: required + Value type: <phandle> + Definition: reference to the regulators to be held on behalf of the + booting of the Hexagon core + +For the compatible string below the following supplies are required: "qcom,msm8996-mss-pil" - pll-supply: Usage: required diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,wcnss-pil.txt b/Documentation/devicetree/bindings/remoteproc/qcom,wcnss-pil.txt index da09c0d79ac0..a83080b8905c 100644 --- a/Documentation/devicetree/bindings/remoteproc/qcom,wcnss-pil.txt +++ b/Documentation/devicetree/bindings/remoteproc/qcom,wcnss-pil.txt @@ -34,6 +34,12 @@ on the Qualcomm WCNSS core. Definition: should be "wdog", "fatal", optionally followed by "ready", "handover", "stop-ack" +- firmware-name: + Usage: optional + Value type: <string> + Definition: must list the relative firmware image path for the + WCNSS core. Defaults to "wcnss.mdt". + - vddmx-supply: (deprecated for qcom,pronto-v1/2-pil) - vddcx-supply: (deprecated for qcom,pronto-v1/2-pil) - vddpx-supply: diff --git a/Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml b/Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml index a1171dfba024..64afdcfb613d 100644 --- a/Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml +++ b/Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml @@ -65,16 +65,23 @@ properties: Unidirectional channel: - from local to remote, where ACK from the remote means that it is ready for shutdown + - description: | + A channel (d) used by the local proc to notify the remote proc that it + has to stop interprocessor communnication. + Unidirectional channel: + - from local to remote, where ACK from the remote means that communnication + as been stopped on the remote side. minItems: 1 - maxItems: 3 + maxItems: 4 mbox-names: items: - const: vq0 - const: vq1 - const: shutdown + - const: detach minItems: 1 - maxItems: 3 + maxItems: 4 memory-region: description: diff --git a/Documentation/devicetree/bindings/riscv/microchip.yaml b/Documentation/devicetree/bindings/riscv/microchip.yaml new file mode 100644 index 000000000000..3f981e897126 --- /dev/null +++ b/Documentation/devicetree/bindings/riscv/microchip.yaml @@ -0,0 +1,27 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/riscv/microchip.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Microchip PolarFire SoC-based boards device tree bindings + +maintainers: + - Cyril Jean <[email protected]> + - Lewis Hanly <[email protected]> + +description: + Microchip PolarFire SoC-based boards + +properties: + $nodename: + const: '/' + compatible: + items: + - enum: + - microchip,mpfs-icicle-kit + - const: microchip,mpfs + +additionalProperties: true + +... diff --git a/Documentation/devicetree/bindings/serial/8250.yaml b/Documentation/devicetree/bindings/serial/8250.yaml index f0506a917793..41f57c448621 100644 --- a/Documentation/devicetree/bindings/serial/8250.yaml +++ b/Documentation/devicetree/bindings/serial/8250.yaml @@ -100,11 +100,6 @@ properties: - mediatek,mt7623-btif - const: mediatek,mtk-btif - items: - - enum: - - mediatek,mt7622-btif - - mediatek,mt7623-btif - - const: mediatek,mtk-btif - - items: - const: mrvl,mmp-uart - const: intel,xscale-uart - items: diff --git a/Documentation/devicetree/bindings/thermal/brcm,ns-thermal.txt b/Documentation/devicetree/bindings/thermal/brcm,ns-thermal.txt deleted file mode 100644 index 68e047170039..000000000000 --- a/Documentation/devicetree/bindings/thermal/brcm,ns-thermal.txt +++ /dev/null @@ -1,37 +0,0 @@ -* Broadcom Northstar Thermal - -This binding describes thermal sensor that is part of Northstar's DMU (Device -Management Unit). - -Required properties: -- compatible : Must be "brcm,ns-thermal" -- reg : iomem address range of PVTMON registers -- #thermal-sensor-cells : Should be <0> - -Example: - -thermal: thermal@1800c2c0 { - compatible = "brcm,ns-thermal"; - reg = <0x1800c2c0 0x10>; - #thermal-sensor-cells = <0>; -}; - -thermal-zones { - cpu_thermal: cpu-thermal { - polling-delay-passive = <0>; - polling-delay = <1000>; - coefficients = <(-556) 418000>; - thermal-sensors = <&thermal>; - - trips { - cpu-crit { - temperature = <125000>; - hysteresis = <0>; - type = "critical"; - }; - }; - - cooling-maps { - }; - }; -}; diff --git a/Documentation/devicetree/bindings/thermal/brcm,ns-thermal.yaml b/Documentation/devicetree/bindings/thermal/brcm,ns-thermal.yaml new file mode 100644 index 000000000000..fdeb333e010d --- /dev/null +++ b/Documentation/devicetree/bindings/thermal/brcm,ns-thermal.yaml @@ -0,0 +1,60 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/thermal/brcm,ns-thermal.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Broadcom Northstar Thermal + +maintainers: + - Rafał Miłecki <[email protected]> + +description: + Thermal sensor that is part of Northstar's DMU (Device Management Unit). + +allOf: + - $ref: thermal-sensor.yaml# + +properties: + compatible: + const: brcm,ns-thermal + + reg: + description: PVTMON registers range + maxItems: 1 + + "#thermal-sensor-cells": + const: 0 + +unevaluatedProperties: false + +required: + - reg + +examples: + - | + thermal: thermal@1800c2c0 { + compatible = "brcm,ns-thermal"; + reg = <0x1800c2c0 0x10>; + #thermal-sensor-cells = <0>; + }; + + thermal-zones { + cpu-thermal { + polling-delay-passive = <0>; + polling-delay = <1000>; + coefficients = <(-556) 418000>; + thermal-sensors = <&thermal>; + + trips { + cpu-crit { + temperature = <125000>; + hysteresis = <0>; + type = "critical"; + }; + }; + + cooling-maps { + }; + }; + }; diff --git a/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml b/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml index 95462e071ab4..0242fd91b622 100644 --- a/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml +++ b/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml @@ -19,9 +19,15 @@ description: | properties: compatible: oneOf: + - description: msm9860 TSENS based + items: + - enum: + - qcom,ipq8064-tsens + - description: v0.1 of TSENS items: - enum: + - qcom,mdm9607-tsens - qcom,msm8916-tsens - qcom,msm8939-tsens - qcom,msm8974-tsens @@ -43,6 +49,7 @@ properties: - qcom,sdm845-tsens - qcom,sm8150-tsens - qcom,sm8250-tsens + - qcom,sm8350-tsens - const: qcom,tsens-v2 reg: @@ -73,7 +80,9 @@ properties: maxItems: 2 items: - const: calib - - const: calib_sel + - enum: + - calib_backup + - calib_sel "#qcom,sensors": description: @@ -88,12 +97,21 @@ properties: Number of cells required to uniquely identify the thermal sensors. Since we have multiple sensors this is set to 1 +required: + - compatible + - interrupts + - interrupt-names + - "#thermal-sensor-cells" + - "#qcom,sensors" + allOf: - if: properties: compatible: contains: enum: + - qcom,ipq8064-tsens + - qcom,mdm9607-tsens - qcom,msm8916-tsens - qcom,msm8974-tsens - qcom,msm8976-tsens @@ -114,19 +132,44 @@ allOf: interrupt-names: minItems: 2 -required: - - compatible - - reg - - "#qcom,sensors" - - interrupts - - interrupt-names - - "#thermal-sensor-cells" + - if: + properties: + compatible: + contains: + enum: + - qcom,tsens-v0_1 + - qcom,tsens-v1 + - qcom,tsens-v2 + + then: + required: + - reg additionalProperties: false examples: - | #include <dt-bindings/interrupt-controller/arm-gic.h> + // Example msm9860 based SoC (ipq8064): + gcc: clock-controller { + + /* ... */ + + tsens: thermal-sensor { + compatible = "qcom,ipq8064-tsens"; + + nvmem-cells = <&tsens_calib>, <&tsens_calib_backup>; + nvmem-cell-names = "calib", "calib_backup"; + interrupts = <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "uplow"; + + #qcom,sensors = <11>; + #thermal-sensor-cells = <1>; + }; + }; + + - | + #include <dt-bindings/interrupt-controller/arm-gic.h> // Example 1 (legacy: for pre v1 IP): tsens1: thermal-sensor@900000 { compatible = "qcom,msm8916-tsens", "qcom,tsens-v0_1"; diff --git a/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml b/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml index b33a76eeac4e..f963204e0b16 100644 --- a/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml +++ b/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml @@ -28,14 +28,7 @@ properties: - renesas,r8a77980-thermal # R-Car V3H - renesas,r8a779a0-thermal # R-Car V3U - reg: - minItems: 2 - maxItems: 4 - items: - - description: TSC1 registers - - description: TSC2 registers - - description: TSC3 registers - - description: TSC4 registers + reg: true interrupts: items: @@ -71,8 +64,25 @@ if: enum: - renesas,r8a779a0-thermal then: + properties: + reg: + minItems: 2 + maxItems: 3 + items: + - description: TSC1 registers + - description: TSC2 registers + - description: TSC3 registers required: - interrupts +else: + properties: + reg: + items: + - description: TSC0 registers + - description: TSC1 registers + - description: TSC2 registers + - description: TSC3 registers + - description: TSC4 registers additionalProperties: false @@ -111,3 +121,20 @@ examples: }; }; }; + - | + #include <dt-bindings/clock/r8a779a0-cpg-mssr.h> + #include <dt-bindings/interrupt-controller/arm-gic.h> + #include <dt-bindings/power/r8a779a0-sysc.h> + + tsc_r8a779a0: thermal@e6190000 { + compatible = "renesas,r8a779a0-thermal"; + reg = <0xe6190000 0x200>, + <0xe6198000 0x200>, + <0xe61a0000 0x200>, + <0xe61a8000 0x200>, + <0xe61b0000 0x200>; + clocks = <&cpg CPG_MOD 919>; + power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>; + resets = <&cpg 919>; + #thermal-sensor-cells = <1>; + }; diff --git a/Documentation/devicetree/bindings/thermal/thermal-sensor.yaml b/Documentation/devicetree/bindings/thermal/thermal-sensor.yaml index 9f747921e851..4bd345c71eb8 100644 --- a/Documentation/devicetree/bindings/thermal/thermal-sensor.yaml +++ b/Documentation/devicetree/bindings/thermal/thermal-sensor.yaml @@ -36,6 +36,9 @@ properties: containing several internal sensors. enum: [0, 1] +required: + - "#thermal-sensor-cells" + additionalProperties: true examples: diff --git a/Documentation/driver-api/gpio/consumer.rst b/Documentation/driver-api/gpio/consumer.rst index 22271c342d92..3366a991b4aa 100644 --- a/Documentation/driver-api/gpio/consumer.rst +++ b/Documentation/driver-api/gpio/consumer.rst @@ -12,7 +12,7 @@ Guidelines for GPIOs consumers Drivers that can't work without standard GPIO calls should have Kconfig entries that depend on GPIOLIB or select GPIOLIB. The functions that allow a driver to -obtain and use GPIOs are available by including the following file: +obtain and use GPIOs are available by including the following file:: #include <linux/gpio/consumer.h> diff --git a/Documentation/driver-api/gpio/drivers-on-gpio.rst b/Documentation/driver-api/gpio/drivers-on-gpio.rst index 41ec3cc72d32..af632d764ac6 100644 --- a/Documentation/driver-api/gpio/drivers-on-gpio.rst +++ b/Documentation/driver-api/gpio/drivers-on-gpio.rst @@ -96,6 +96,12 @@ hardware descriptions such as device tree or ACPI: way to pass the charging parameters from hardware descriptions such as the device tree. +- gpio-mux: drivers/mux/gpio.c is used for controlling a multiplexer using + n GPIO lines such that you can mux in 2^n different devices by activating + different GPIO lines. Often the GPIOs are on a SoC and the devices are + some SoC-external entities, such as different components on a PCB that + can be selectively enabled. + Apart from this there are special GPIO drivers in subsystems like MMC/SD to read card detect and write protect GPIO lines, and in the TTY serial subsystem to emulate MCTRL (modem control) signals CTS/RTS by using two GPIO lines. The diff --git a/Documentation/driver-api/pwm.rst b/Documentation/driver-api/pwm.rst index ab62f1bb0366..a7ca4f58305a 100644 --- a/Documentation/driver-api/pwm.rst +++ b/Documentation/driver-api/pwm.rst @@ -55,7 +55,11 @@ several parameter at once. For example, if you see pwm_config() and pwm_{enable,disable}() calls in the same function, this probably means you should switch to pwm_apply_state(). -The PWM user API also allows one to query the PWM state with pwm_get_state(). +The PWM user API also allows one to query the PWM state that was passed to the +last invocation of pwm_apply_state() using pwm_get_state(). Note this is +different to what the driver has actually implemented if the request cannot be +satisfied exactly with the hardware in use. There is currently no way for +consumers to get the actually implemented settings. In addition to the PWM state, the PWM API also exposes PWM arguments, which are the reference PWM config one should use on this PWM. diff --git a/Documentation/driver-api/thermal/sysfs-api.rst b/Documentation/driver-api/thermal/sysfs-api.rst index 29fdd817ddb0..4b638c14bc16 100644 --- a/Documentation/driver-api/thermal/sysfs-api.rst +++ b/Documentation/driver-api/thermal/sysfs-api.rst @@ -730,17 +730,7 @@ This function returns the thermal_instance corresponding to a given {thermal_zone, cooling_device, trip_point} combination. Returns NULL if such an instance does not exist. -4.3. thermal_notify_framework ------------------------------ - -This function handles the trip events from sensor drivers. It starts -throttling the cooling devices according to the policy configured. -For CRITICAL and HOT trip points, this notifies the respective drivers, -and does actual throttling for other trip points i.e ACTIVE and PASSIVE. -The throttling policy is based on the configured platform data; if no -platform data is provided, this uses the step_wise throttling policy. - -4.4. thermal_cdev_update +4.3. thermal_cdev_update ------------------------ This function serves as an arbitrator to set the state of a cooling diff --git a/Documentation/filesystems/f2fs.rst b/Documentation/filesystems/f2fs.rst index 35ed01a5fbc9..992bf91eeec8 100644 --- a/Documentation/filesystems/f2fs.rst +++ b/Documentation/filesystems/f2fs.rst @@ -110,6 +110,12 @@ background_gc=%s Turn on/off cleaning operations, namely garbage on synchronous garbage collection running in background. Default value for this option is on. So garbage collection is on by default. +gc_merge When background_gc is on, this option can be enabled to + let background GC thread to handle foreground GC requests, + it can eliminate the sluggish issue caused by slow foreground + GC operation when GC is triggered from a process with limited + I/O and CPU resources. +nogc_merge Disable GC merge feature. disable_roll_forward Disable the roll-forward recovery routine norecovery Disable the roll-forward recovery routine, mounted read- only (i.e., -o ro,disable_roll_forward) @@ -813,6 +819,14 @@ Compression implementation * chattr +c file * chattr +c dir; touch dir/file * mount w/ -o compress_extension=ext; touch file.ext + * mount w/ -o compress_extension=*; touch any_file + +- At this point, compression feature doesn't expose compressed space to user + directly in order to guarantee potential data updates later to the space. + Instead, the main goal is to reduce data writes to flash disk as much as + possible, resulting in extending disk life time as well as relaxing IO + congestion. Alternatively, we've added ioctl interface to reclaim compressed + space and show it to user after putting the immutable bit. Compress metadata layout:: diff --git a/Documentation/firmware-guide/acpi/gpio-properties.rst b/Documentation/firmware-guide/acpi/gpio-properties.rst index 4e264c16ddff..df4b711053ee 100644 --- a/Documentation/firmware-guide/acpi/gpio-properties.rst +++ b/Documentation/firmware-guide/acpi/gpio-properties.rst @@ -99,6 +99,12 @@ native:: } } +Note, that historically ACPI has no means of the GPIO polarity and thus +the SPISerialBus() resource defines it on the per-chip basis. In order +to avoid a chain of negations, the GPIO polarity is considered being +Active High. Even for the cases when _DSD() is involved (see the example +above) the GPIO CS polarity must be defined Active High to avoid ambiguity. + Other supported properties ========================== diff --git a/Documentation/input/joydev/joystick-api.rst b/Documentation/input/joydev/joystick-api.rst index 95803e2e8cd0..af5934c10c1c 100644 --- a/Documentation/input/joydev/joystick-api.rst +++ b/Documentation/input/joydev/joystick-api.rst @@ -71,7 +71,7 @@ The possible values of ``type`` are:: #define JS_EVENT_INIT 0x80 /* initial state of device */ As mentioned above, the driver will issue synthetic JS_EVENT_INIT ORed -events on open. That is, if it's issuing a INIT BUTTON event, the +events on open. That is, if it's issuing an INIT BUTTON event, the current type value will be:: int type = JS_EVENT_BUTTON | JS_EVENT_INIT; /* 0x81 */ @@ -100,8 +100,8 @@ is, you have both an axis 0 and a button 0). Generally, =============== ======= Hats vary from one joystick type to another. Some can be moved in 8 -directions, some only in 4, The driver, however, always reports a hat as two -independent axis, even if the hardware doesn't allow independent movement. +directions, some only in 4. The driver, however, always reports a hat as two +independent axes, even if the hardware doesn't allow independent movement. js_event.value @@ -188,10 +188,10 @@ One reason for emptying the queue is that if it gets full you'll start missing events since the queue is finite, and older events will get overwritten. -The other reason is that you want to know all what happened, and not +The other reason is that you want to know all that happened, and not delay the processing till later. -Why can get the queue full? Because you don't empty the queue as +Why can the queue get full? Because you don't empty the queue as mentioned, or because too much time elapses from one read to another and too many events to store in the queue get generated. Note that high system load may contribute to space those reads even more. @@ -277,7 +277,7 @@ to be in the stable part of the API, and therefore may change without warning in following releases of the driver. Both JSIOCSCORR and JSIOCGCORR expect &js_corr to be able to hold -information for all axis. That is, struct js_corr corr[MAX_AXIS]; +information for all axes. That is, struct js_corr corr[MAX_AXIS]; struct js_corr is defined as:: @@ -328,7 +328,7 @@ To test the state of the buttons, second_button_state = js.buttons & 2; The axis values do not have a defined range in the original 0.x driver, -except for that the values are non-negative. The 1.2.8+ drivers use a +except that the values are non-negative. The 1.2.8+ drivers use a fixed range for reporting the values, 1 being the minimum, 128 the center, and 255 maximum value. diff --git a/Documentation/input/joydev/joystick.rst b/Documentation/input/joydev/joystick.rst index 9746fd76cc58..f615906a0821 100644 --- a/Documentation/input/joydev/joystick.rst +++ b/Documentation/input/joydev/joystick.rst @@ -133,15 +133,15 @@ And add a line to your rc script executing that file:: This way, after the next reboot your joystick will remain calibrated. You can also add the ``jscal -p`` line to your shutdown script. -HW specific driver information -============================== +Hardware-specific driver information +==================================== In this section each of the separate hardware specific drivers is described. Analog joysticks ---------------- -The analog.c uses the standard analog inputs of the gameport, and thus +The analog.c driver uses the standard analog inputs of the gameport, and thus supports all standard joysticks and gamepads. It uses a very advanced routine for this, allowing for data precision that can't be found on any other system. @@ -266,7 +266,7 @@ to: * Logitech WingMan Extreme Digital 3D ADI devices are autodetected, and the driver supports up to two (any -combination of) devices on a single gameport, using an Y-cable or chained +combination of) devices on a single gameport, using a Y-cable or chained together. Logitech WingMan Joystick, Logitech WingMan Attack, Logitech WingMan @@ -288,7 +288,7 @@ supports: * Gravis Xterminator DualControl All these devices are autodetected, and you can even use any combination -of up to two of these pads either chained together or using an Y-cable on a +of up to two of these pads either chained together or using a Y-cable on a single gameport. GrIP MultiPort isn't supported yet. Gravis Stinger is a serial device and is @@ -311,7 +311,7 @@ allow connecting analog joysticks to them, you'll need to load the analog driver as well to handle the attached joysticks. The trackball should work with USB mousedev module as a normal mouse. See -the USB documentation for how to setup an USB mouse. +the USB documentation for how to setup a USB mouse. ThrustMaster DirectConnect (BSP) -------------------------------- @@ -332,7 +332,7 @@ If you have one of these, contact me. TMDC devices are autodetected, and thus no parameters to the module are needed. Up to two TMDC devices can be connected to one gameport, using -an Y-cable. +a Y-cable. Creative Labs Blaster --------------------- @@ -342,7 +342,7 @@ the: * Creative Blaster GamePad Cobra -Up to two of these can be used on a single gameport, using an Y-cable. +Up to two of these can be used on a single gameport, using a Y-cable. Genius Digital joysticks ------------------------ @@ -381,7 +381,7 @@ card, 16 in case you have two in your system. Trident 4DWave / Aureal Vortex ------------------------------ -Soundcards with a Trident 4DWave DX/NX or Aureal Vortex/Vortex2 chipsets +Soundcards with a Trident 4DWave DX/NX or Aureal Vortex/Vortex2 chipset provide an "Enhanced Game Port" mode where the soundcard handles polling the joystick. This mode is supported by the pcigame.c module. Once loaded the analog driver can use the enhanced features of these gameports.. @@ -454,7 +454,7 @@ Devices currently supported by spaceball.c are: * SpaceTec SpaceBall 4000 FLX In addition to having the spaceorb/spaceball and serport modules in the -kernel, you also need to attach a serial port to it. to do that, run the +kernel, you also need to attach a serial port to it. To do that, run the inputattach program:: inputattach --spaceorb /dev/tts/x & @@ -466,7 +466,7 @@ or:: where /dev/tts/x is the serial port which the device is connected to. After doing this, the device will be reported and will start working. -There is one caveat with the SpaceOrb. The button #6, the on the bottom +There is one caveat with the SpaceOrb. The button #6, the one on the bottom side of the orb, although reported as an ordinary button, causes internal recentering of the spaceorb, moving the zero point to the position in which the ball is at the moment of pressing the button. So, think first before @@ -500,7 +500,7 @@ joy-magellan module. It currently supports only the: * Magellan 3D * Space Mouse -models, the additional buttons on the 'Plus' versions are not supported yet. +models; the additional buttons on the 'Plus' versions are not supported yet. To use it, you need to attach the serial port to the driver using the:: @@ -575,7 +575,7 @@ FAQ :A: The device files don't exist. Create them (see section 2.2). :Q: Is it possible to connect my old Atari/Commodore/Amiga/console joystick - or pad that uses a 9-pin D-type cannon connector to the serial port of my + or pad that uses a 9-pin D-type Cannon connector to the serial port of my PC? :A: Yes, it is possible, but it'll burn your serial port or the pad. It won't work, of course. diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst index dac17711dc11..d3a8557b66a1 100644 --- a/Documentation/process/changes.rst +++ b/Documentation/process/changes.rst @@ -48,7 +48,6 @@ quota-tools 3.09 quota -V PPP 2.4.0 pppd --version nfs-utils 1.0.5 showmount --version procps 3.2.0 ps --version -oprofile 0.9 oprofiled --version udev 081 udevd --version grub 0.93 grub --version || grub-install --version mcelog 0.6 mcelog --version diff --git a/Documentation/riscv/index.rst b/Documentation/riscv/index.rst index 6e6e39482502..ea915c196048 100644 --- a/Documentation/riscv/index.rst +++ b/Documentation/riscv/index.rst @@ -6,6 +6,7 @@ RISC-V architecture :maxdepth: 1 boot-image-header + vm-layout pmu patch-acceptance diff --git a/Documentation/riscv/vm-layout.rst b/Documentation/riscv/vm-layout.rst new file mode 100644 index 000000000000..329d32098af4 --- /dev/null +++ b/Documentation/riscv/vm-layout.rst @@ -0,0 +1,63 @@ +.. SPDX-License-Identifier: GPL-2.0 + +===================================== +Virtual Memory Layout on RISC-V Linux +===================================== + +:Author: Alexandre Ghiti <[email protected]> +:Date: 12 February 2021 + +This document describes the virtual memory layout used by the RISC-V Linux +Kernel. + +RISC-V Linux Kernel 32bit +========================= + +RISC-V Linux Kernel SV32 +------------------------ + +TODO + +RISC-V Linux Kernel 64bit +========================= + +The RISC-V privileged architecture document states that the 64bit addresses +"must have bits 63–48 all equal to bit 47, or else a page-fault exception will +occur.": that splits the virtual address space into 2 halves separated by a very +big hole, the lower half is where the userspace resides, the upper half is where +the RISC-V Linux Kernel resides. + +RISC-V Linux Kernel SV39 +------------------------ + +:: + + ======================================================================================================================== + Start addr | Offset | End addr | Size | VM area description + ======================================================================================================================== + | | | | + 0000000000000000 | 0 | 0000003fffffffff | 256 GB | user-space virtual memory, different per mm + __________________|____________|__________________|_________|___________________________________________________________ + | | | | + 0000004000000000 | +256 GB | ffffffbfffffffff | ~16M TB | ... huge, almost 64 bits wide hole of non-canonical + | | | | virtual memory addresses up to the -256 GB + | | | | starting offset of kernel mappings. + __________________|____________|__________________|_________|___________________________________________________________ + | + | Kernel-space virtual memory, shared between all processes: + ____________________________________________________________|___________________________________________________________ + | | | | + ffffffc000000000 | -256 GB | ffffffc7ffffffff | 32 GB | kasan + ffffffcefee00000 | -196 GB | ffffffcefeffffff | 2 MB | fixmap + ffffffceff000000 | -196 GB | ffffffceffffffff | 16 MB | PCI io + ffffffcf00000000 | -196 GB | ffffffcfffffffff | 4 GB | vmemmap + ffffffd000000000 | -192 GB | ffffffdfffffffff | 64 GB | vmalloc/ioremap space + ffffffe000000000 | -128 GB | ffffffff7fffffff | 124 GB | direct mapping of all physical memory + __________________|____________|__________________|_________|____________________________________________________________ + | + | + ____________________________________________________________|____________________________________________________________ + | | | | + ffffffff00000000 | -4 GB | ffffffff7fffffff | 2 GB | modules + ffffffff80000000 | -2 GB | ffffffffffffffff | 2 GB | kernel, BPF + __________________|____________|__________________|_________|____________________________________________________________ diff --git a/Documentation/translations/it_IT/process/changes.rst b/Documentation/translations/it_IT/process/changes.rst index cc883f8d96c4..87d081889bfc 100644 --- a/Documentation/translations/it_IT/process/changes.rst +++ b/Documentation/translations/it_IT/process/changes.rst @@ -51,7 +51,6 @@ quota-tools 3.09 quota -V PPP 2.4.0 pppd --version nfs-utils 1.0.5 showmount --version procps 3.2.0 ps --version -oprofile 0.9 oprofiled --version udev 081 udevd --version grub 0.93 grub --version || grub-install --version mcelog 0.6 mcelog --version diff --git a/Documentation/translations/zh_CN/index.rst b/Documentation/translations/zh_CN/index.rst index ee6b20ca9080..d56d6b7092e6 100644 --- a/Documentation/translations/zh_CN/index.rst +++ b/Documentation/translations/zh_CN/index.rst @@ -1,36 +1,184 @@ +.. SPDX-License-Identifier: GPL-2.0 + .. raw:: latex \renewcommand\thesection* \renewcommand\thesubsection* +.. _linux_doc_zh: + 中文翻译 ======== -这些手册包含有关如何开发内核的整体信息。内核社区非常庞大,一年下来有数千名开发 -人员做出贡献。 与任何大型社区一样,知道如何完成任务将使得更改合并的过程变得更 -加容易。 -翻译计划: -内核中文文档欢迎任何翻译投稿,特别是关于内核用户和管理员指南部分。 +.. note:: + + **翻译计划:** + 内核中文文档欢迎任何翻译投稿,特别是关于内核用户和管理员指南部分。 + +许可证文档 +---------- + +下面的文档介绍了Linux内核源代码的许可证(GPLv2)、如何在源代码树中正确标记 +单个文件的许可证、以及指向完整许可证文本的链接。 + +* Documentation/translations/zh_CN/process/license-rules.rst + +用户文档 +-------- + +下面的手册是为内核用户编写的——即那些试图让它在给定系统上以最佳方式工作的 +用户。 .. toctree:: :maxdepth: 2 admin-guide/index + +TODOList: + +* kbuild/index + +固件相关文档 +------------ + +下列文档描述了内核需要的平台固件相关信息。 + +TODOList: + +* firmware-guide/index +* devicetree/index + +应用程序开发人员文档 +-------------------- + +用户空间API手册涵盖了描述应用程序开发人员可见内核接口方面的文档。 + +TODOlist: + +* userspace-api/index + +内核开发简介 +------------ + +这些手册包含有关如何开发内核的整体信息。内核社区非常庞大,一年下来有数千名 +开发人员做出贡献。与任何大型社区一样,知道如何完成任务将使得更改合并的过程 +变得更加容易。 + +.. toctree:: + :maxdepth: 2 + process/index dev-tools/index doc-guide/index kernel-hacking/index - filesystems/index - arm64/index - sound/index + +TODOList: + +* trace/index +* maintainer/index +* fault-injection/index +* livepatch/index +* rust/index + +内核API文档 +----------- + +以下手册从内核开发人员的角度详细介绍了特定的内核子系统是如何工作的。这里的 +大部分信息都是直接从内核源代码获取的,并根据需要添加补充材料(或者至少是在 +我们设法添加的时候——可能不是所有的都是有需要的)。 + +.. toctree:: + :maxdepth: 2 + + core-api/index cpu-freq/index - mips/index iio/index + sound/index + filesystems/index + +TODOList: + +* driver-api/index +* locking/index +* accounting/index +* block/index +* cdrom/index +* ide/index +* fb/index +* fpga/index +* hid/index +* i2c/index +* isdn/index +* infiniband/index +* leds/index +* netlabel/index +* networking/index +* pcmcia/index +* power/index +* target/index +* timers/index +* spi/index +* w1/index +* watchdog/index +* virt/index +* input/index +* hwmon/index +* gpu/index +* security/index +* crypto/index +* vm/index +* bpf/index +* usb/index +* PCI/index +* scsi/index +* misc-devices/index +* scheduler/index +* mhi/index + +体系结构无关文档 +---------------- + +TODOList: + +* asm-annotations + +特定体系结构文档 +---------------- + +.. toctree:: + :maxdepth: 2 + + mips/index + arm64/index riscv/index - core-api/index openrisc/index +TODOList: + +* arm/index +* ia64/index +* m68k/index +* nios2/index +* parisc/index +* powerpc/index +* s390/index +* sh/index +* sparc/index +* x86/index +* xtensa/index + +其他文档 +-------- + +有几份未排序的文档似乎不适合放在文档的其他部分,或者可能需要进行一些调整和/或 +转换为reStructureText格式,也有可能太旧。 + +TODOList: + +* staging/index +* watch_queue + 目录和表格 ---------- diff --git a/Documentation/x86/x86_64/5level-paging.rst b/Documentation/x86/x86_64/5level-paging.rst index 44856417e6a5..b792bbdc0b01 100644 --- a/Documentation/x86/x86_64/5level-paging.rst +++ b/Documentation/x86/x86_64/5level-paging.rst @@ -6,9 +6,9 @@ Overview ======== -Original x86-64 was limited by 4-level paing to 256 TiB of virtual address +Original x86-64 was limited by 4-level paging to 256 TiB of virtual address space and 64 TiB of physical address space. We are already bumping into -this limit: some vendors offers servers with 64 TiB of memory today. +this limit: some vendors offer servers with 64 TiB of memory today. To overcome the limitation upcoming hardware will introduce support for 5-level paging. It is a straight-forward extension of the current page diff --git a/MAINTAINERS b/MAINTAINERS index 121b1a12384a..65d200e6e68b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -10170,6 +10170,12 @@ S: Maintained F: Documentation/devicetree/bindings/leds/backlight/kinetic,ktd253.yaml F: drivers/video/backlight/ktd253-backlight.c +KTEST +M: Steven Rostedt <[email protected]> +M: John Hawley <[email protected]> +S: Maintained +F: tools/testing/ktest + L3MDEV M: David Ahern <[email protected]> @@ -13977,6 +13983,14 @@ S: Maintained F: Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt F: drivers/pci/controller/dwc/*imx6* +PCI DRIVER FOR FU740 +M: Paul Walmsley <[email protected]> +M: Greentime Hu <[email protected]> +S: Maintained +F: Documentation/devicetree/bindings/pci/sifive,fu740-pcie.yaml +F: drivers/pci/controller/dwc/pcie-fu740.c + PCI DRIVER FOR INTEL VOLUME MANAGEMENT DEVICE (VMD) M: Jonathan Derrick <[email protected]> @@ -14184,7 +14198,6 @@ PCIE DRIVER FOR HISILICON M: Zhou Wang <[email protected]> S: Maintained -F: Documentation/devicetree/bindings/pci/hisilicon-pcie.txt F: drivers/pci/controller/dwc/pcie-hisi.c PCIE DRIVER FOR HISILICON KIRIN @@ -14204,6 +14217,7 @@ F: drivers/pci/controller/dwc/pcie-histb.c PCIE DRIVER FOR MEDIATEK M: Ryder Lee <[email protected]> +M: Jianjun Wang <[email protected]> S: Supported @@ -15191,6 +15205,7 @@ F: include/linux/if_rmnet.h QUALCOMM TSENS THERMAL DRIVER M: Amit Kucheria <[email protected]> +M: Thara Gopinath <[email protected]> S: Maintained @@ -18095,7 +18110,7 @@ THERMAL/CPU_COOLING M: Amit Daniel Kachhap <[email protected]> M: Daniel Lezcano <[email protected]> M: Viresh Kumar <[email protected]> -M: Javi Merino <[email protected]> +R: Lukasz Luba <[email protected]> S: Supported F: Documentation/driver-api/thermal/cpu-cooling-api.rst diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index bc8d6aecfbbd..2d98501c0897 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -6,6 +6,7 @@ config ARC def_bool y select ARC_TIMERS + select ARCH_HAS_CACHE_LINE_SIZE select ARCH_HAS_DEBUG_VM_PGTABLE select ARCH_HAS_DMA_PREP_COHERENT select ARCH_HAS_PTE_SPECIAL @@ -28,6 +29,7 @@ config ARC select GENERIC_SMP_IDLE_THREAD select HAVE_ARCH_KGDB select HAVE_ARCH_TRACEHOOK + select HAVE_ARCH_TRANSPARENT_HUGEPAGE if ARC_MMU_V4 select HAVE_DEBUG_STACKOVERFLOW select HAVE_DEBUG_KMEMLEAK select HAVE_FUTEX_CMPXCHG if FUTEX @@ -48,9 +50,6 @@ config ARC select HAVE_ARCH_JUMP_LABEL if ISA_ARCV2 && !CPU_ENDIAN_BE32 select SET_FS -config ARCH_HAS_CACHE_LINE_SIZE - def_bool y - config TRACE_IRQFLAGS_SUPPORT def_bool y @@ -86,10 +85,6 @@ config STACKTRACE_SUPPORT def_bool y select STACKTRACE -config HAVE_ARCH_TRANSPARENT_HUGEPAGE - def_bool y - depends on ARC_MMU_V4 - menu "ARC Architecture Configuration" menu "ARC Platform/SoC/Board" diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 085c830d344b..24804f11302d 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -31,6 +31,7 @@ config ARM select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT if CPU_V7 select ARCH_SUPPORTS_ATOMIC_RMW + select ARCH_SUPPORTS_HUGETLBFS if ARM_LPAE select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_CMPXCHG_LOCKREF select ARCH_USE_MEMTEST @@ -77,6 +78,7 @@ config ARM select HAVE_ARCH_SECCOMP_FILTER if AEABI && !OABI_COMPAT select HAVE_ARCH_THREAD_STRUCT_WHITELIST select HAVE_ARCH_TRACEHOOK + select HAVE_ARCH_TRANSPARENT_HUGEPAGE if ARM_LPAE select HAVE_ARM_SMCCC if CPU_V7 select HAVE_EBPF_JIT if !CPU_ENDIAN_BE32 select HAVE_CONTEXT_TRACKING @@ -1511,14 +1513,6 @@ config HW_PERF_EVENTS def_bool y depends on ARM_PMU -config SYS_SUPPORTS_HUGETLBFS - def_bool y - depends on ARM_LPAE - -config HAVE_ARCH_TRANSPARENT_HUGEPAGE - def_bool y - depends on ARM_LPAE - config ARCH_WANT_GENERAL_HUGETLB def_bool y diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index fd94e27ba4fa..c1f804768621 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -118,8 +118,8 @@ asflags-y := -DZIMAGE # Supply kernel BSS size to the decompressor via a linker symbol. KBSS_SZ = $(shell echo $$(($$($(NM) $(obj)/../../../../vmlinux | \ - sed -n -e 's/^\([^ ]*\) [AB] __bss_start$$/-0x\1/p' \ - -e 's/^\([^ ]*\) [AB] __bss_stop$$/+0x\1/p') )) ) + sed -n -e 's/^\([^ ]*\) [ABD] __bss_start$$/-0x\1/p' \ + -e 's/^\([^ ]*\) [ABD] __bss_stop$$/+0x\1/p') )) ) LDFLAGS_vmlinux = --defsym _kernel_bss_size=$(KBSS_SZ) # Supply ZRELADDR to the decompressor via a linker symbol. ifneq ($(CONFIG_AUTO_ZRELADDR),y) diff --git a/arch/arm/boot/dts/rk3036.dtsi b/arch/arm/boot/dts/rk3036.dtsi index 47a787a12e55..e24230d50a78 100644 --- a/arch/arm/boot/dts/rk3036.dtsi +++ b/arch/arm/boot/dts/rk3036.dtsi @@ -355,7 +355,6 @@ reg = <0x20050000 0x10>; #pwm-cells = <3>; clocks = <&cru PCLK_PWM>; - clock-names = "pwm"; pinctrl-names = "default"; pinctrl-0 = <&pwm0_pin>; status = "disabled"; @@ -366,7 +365,6 @@ reg = <0x20050010 0x10>; #pwm-cells = <3>; clocks = <&cru PCLK_PWM>; - clock-names = "pwm"; pinctrl-names = "default"; pinctrl-0 = <&pwm1_pin>; status = "disabled"; @@ -377,7 +375,6 @@ reg = <0x20050020 0x10>; #pwm-cells = <3>; clocks = <&cru PCLK_PWM>; - clock-names = "pwm"; pinctrl-names = "default"; pinctrl-0 = <&pwm2_pin>; status = "disabled"; @@ -388,7 +385,6 @@ reg = <0x20050030 0x10>; #pwm-cells = <2>; clocks = <&cru PCLK_PWM>; - clock-names = "pwm"; pinctrl-names = "default"; pinctrl-0 = <&pwm3_pin>; status = "disabled"; diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi index ea7416c31f9b..05557ad02b33 100644 --- a/arch/arm/boot/dts/rk3288.dtsi +++ b/arch/arm/boot/dts/rk3288.dtsi @@ -679,7 +679,6 @@ pinctrl-names = "default"; pinctrl-0 = <&pwm0_pin>; clocks = <&cru PCLK_RKPWM>; - clock-names = "pwm"; status = "disabled"; }; @@ -690,7 +689,6 @@ pinctrl-names = "default"; pinctrl-0 = <&pwm1_pin>; clocks = <&cru PCLK_RKPWM>; - clock-names = "pwm"; status = "disabled"; }; @@ -701,7 +699,6 @@ pinctrl-names = "default"; pinctrl-0 = <&pwm2_pin>; clocks = <&cru PCLK_RKPWM>; - clock-names = "pwm"; status = "disabled"; }; @@ -712,7 +709,6 @@ pinctrl-names = "default"; pinctrl-0 = <&pwm3_pin>; clocks = <&cru PCLK_RKPWM>; - clock-names = "pwm"; status = "disabled"; }; diff --git a/arch/arm/configs/footbridge_defconfig b/arch/arm/configs/footbridge_defconfig index 3a7938f244e5..2aa3ebeb89d7 100644 --- a/arch/arm/configs/footbridge_defconfig +++ b/arch/arm/configs/footbridge_defconfig @@ -7,7 +7,6 @@ CONFIG_EXPERT=y CONFIG_MODULES=y CONFIG_ARCH_FOOTBRIDGE=y CONFIG_ARCH_CATS=y -CONFIG_ARCH_PERSONAL_SERVER=y CONFIG_ARCH_EBSA285_HOST=y CONFIG_ARCH_NETWINDER=y CONFIG_LEDS=y diff --git a/arch/arm/include/asm/kexec.h b/arch/arm/include/asm/kexec.h index 22751b5b5735..e62832dcba76 100644 --- a/arch/arm/include/asm/kexec.h +++ b/arch/arm/include/asm/kexec.h @@ -56,9 +56,6 @@ static inline void crash_setup_regs(struct pt_regs *newregs, } } -/* Function pointer to optional machine-specific reinitialization */ -extern void (*kexec_reinit)(void); - static inline unsigned long phys_to_boot_phys(phys_addr_t phys) { return phys_to_idmap(phys); diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 2f841cb65c30..a711322d9f40 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -150,21 +150,6 @@ extern unsigned long vectors_base; */ #define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET) -#ifdef CONFIG_XIP_KERNEL -/* - * When referencing data in RAM from the XIP region in a relative manner - * with the MMU off, we need the relative offset between the two physical - * addresses. The macro below achieves this, which is: - * __pa(v_data) - __xip_pa(v_text) - */ -#define PHYS_RELATIVE(v_data, v_text) \ - (((v_data) - PAGE_OFFSET + PLAT_PHYS_OFFSET) - \ - ((v_text) - XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR) + \ - CONFIG_XIP_PHYS_ADDR)) -#else -#define PHYS_RELATIVE(v_data, v_text) ((v_data) - (v_text)) -#endif - #ifndef __ASSEMBLY__ /* diff --git a/arch/arm/include/asm/set_memory.h b/arch/arm/include/asm/set_memory.h index a1ceff4295d3..ec17fc0fda7a 100644 --- a/arch/arm/include/asm/set_memory.h +++ b/arch/arm/include/asm/set_memory.h @@ -18,12 +18,4 @@ static inline int set_memory_x(unsigned long addr, int numpages) { return 0; } static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; } #endif -#ifdef CONFIG_STRICT_KERNEL_RWX -void set_kernel_text_rw(void); -void set_kernel_text_ro(void); -#else -static inline void set_kernel_text_rw(void) { } -static inline void set_kernel_text_ro(void) { } -#endif - #endif diff --git a/arch/arm/include/uapi/asm/Kbuild b/arch/arm/include/uapi/asm/Kbuild index ce8573157774..63748af8bc9d 100644 --- a/arch/arm/include/uapi/asm/Kbuild +++ b/arch/arm/include/uapi/asm/Kbuild @@ -1,6 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -generated-y += unistd-common.h generated-y += unistd-oabi.h generated-y += unistd-eabi.h generic-y += kvm_para.h diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h index 93ecf8aa4fe5..ae7749e15726 100644 --- a/arch/arm/include/uapi/asm/unistd.h +++ b/arch/arm/include/uapi/asm/unistd.h @@ -24,7 +24,6 @@ #include <asm/unistd-oabi.h> #endif -#include <asm/unistd-common.h> #define __NR_sync_file_range2 __NR_arm_sync_file_range /* diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index be8050b0c3df..70993af22d80 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -24,6 +24,7 @@ #include <asm/vdso_datapage.h> #include <asm/hardware/cache-l2x0.h> #include <linux/kbuild.h> +#include <linux/arm-smccc.h> #include "signal.h" /* @@ -148,6 +149,8 @@ int main(void) DEFINE(SLEEP_SAVE_SP_PHYS, offsetof(struct sleep_save_sp, save_ptr_stash_phys)); DEFINE(SLEEP_SAVE_SP_VIRT, offsetof(struct sleep_save_sp, save_ptr_stash)); #endif + DEFINE(ARM_SMCCC_QUIRK_ID_OFFS, offsetof(struct arm_smccc_quirk, id)); + DEFINE(ARM_SMCCC_QUIRK_STATE_OFFS, offsetof(struct arm_smccc_quirk, state)); BLANK(); DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index e0d7833a1827..7f0b7aba1498 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -344,20 +344,19 @@ ENTRY(\sym) .size \sym, . - \sym .endm -#define NATIVE(nr, func) syscall nr, func +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) +#define __SYSCALL(nr, func) syscall nr, func /* * This is the syscall table declaration for native ABI syscalls. * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall. */ syscall_table_start sys_call_table -#define COMPAT(nr, native, compat) syscall nr, native #ifdef CONFIG_AEABI #include <calls-eabi.S> #else #include <calls-oabi.S> #endif -#undef COMPAT syscall_table_end sys_call_table /*============================================================================ @@ -455,7 +454,8 @@ ENDPROC(sys_oabi_readahead) * using the compatibility syscall entries. */ syscall_table_start sys_oabi_call_table -#define COMPAT(nr, native, compat) syscall nr, compat +#undef __SYSCALL_WITH_COMPAT +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat) #include <calls-oabi.S> syscall_table_end sys_oabi_call_table diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 08660ae9dcbc..b1423fb130ea 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -886,7 +886,7 @@ static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs) info->trigger = addr; pr_debug("breakpoint fired: address = 0x%x\n", addr); perf_bp_event(bp, regs); - if (!bp->overflow_handler) + if (is_default_overflow_handler(bp)) enable_single_step(bp, addr); goto unlock; } diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index 2b09dad7935e..f567032a09c0 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -147,11 +147,6 @@ void machine_crash_shutdown(struct pt_regs *regs) pr_info("Loading crashdump kernel...\n"); } -/* - * Function pointer to optional machine-specific reinitialization - */ -void (*kexec_reinit)(void); - void machine_kexec(struct kimage *image) { unsigned long page_list, reboot_entry_phys; @@ -187,9 +182,6 @@ void machine_kexec(struct kimage *image) pr_info("Bye!\n"); - if (kexec_reinit) - kexec_reinit(); - soft_restart(reboot_entry_phys); } diff --git a/arch/arm/kernel/smccc-call.S b/arch/arm/kernel/smccc-call.S index 00664c78faca..931df62a7831 100644 --- a/arch/arm/kernel/smccc-call.S +++ b/arch/arm/kernel/smccc-call.S @@ -3,7 +3,9 @@ * Copyright (c) 2015, Linaro Limited */ #include <linux/linkage.h> +#include <linux/arm-smccc.h> +#include <asm/asm-offsets.h> #include <asm/opcodes-sec.h> #include <asm/opcodes-virt.h> #include <asm/unwind.h> @@ -27,7 +29,14 @@ UNWIND( .fnstart) UNWIND( .save {r4-r7}) ldm r12, {r4-r7} \instr - pop {r4-r7} + ldr r4, [sp, #36] + cmp r4, #0 + beq 1f // No quirk structure + ldr r5, [r4, #ARM_SMCCC_QUIRK_ID_OFFS] + cmp r5, #ARM_SMCCC_QUIRK_QCOM_A6 + bne 1f // No quirk present + str r6, [r4, #ARM_SMCCC_QUIRK_STATE_OFFS] +1: pop {r4-r7} ldr r12, [sp, #(4 * 4)] stm r12, {r0-r3} bx lr diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c index 24bd20564be7..43f0a3ebf390 100644 --- a/arch/arm/kernel/suspend.c +++ b/arch/arm/kernel/suspend.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 +#include <linux/ftrace.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/mm_types.h> @@ -26,12 +27,22 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) return -EINVAL; /* + * Function graph tracer state gets incosistent when the kernel + * calls functions that never return (aka suspend finishers) hence + * disable graph tracing during their execution. + */ + pause_graph_tracing(); + + /* * Provide a temporary page table with an identity mapping for * the MMU-enable code, required for resuming. On successful * resume (indicated by a zero return code), we need to switch * back to the correct page tables. */ ret = __cpu_suspend(arg, fn, __mpidr); + + unpause_graph_tracing(); + if (ret == 0) { cpu_switch_mm(mm->pgd, mm); local_flush_bp_all(); @@ -45,7 +56,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) { u32 __mpidr = cpu_logical_map(smp_processor_id()); - return __cpu_suspend(arg, fn, __mpidr); + int ret; + + pause_graph_tracing(); + ret = __cpu_suspend(arg, fn, __mpidr); + unpause_graph_tracing(); + + return ret; } #define idmap_pgd NULL #endif diff --git a/arch/arm/mach-footbridge/Kconfig b/arch/arm/mach-footbridge/Kconfig index 844aa585b966..728aff93fba9 100644 --- a/arch/arm/mach-footbridge/Kconfig +++ b/arch/arm/mach-footbridge/Kconfig @@ -16,27 +16,6 @@ config ARCH_CATS Saying N will reduce the size of the Footbridge kernel. -config ARCH_PERSONAL_SERVER - bool "Compaq Personal Server" - select FOOTBRIDGE_HOST - select ISA - select ISA_DMA - select FORCE_PCI - help - Say Y here if you intend to run this kernel on the Compaq - Personal Server. - - Saying N will reduce the size of the Footbridge kernel. - - The Compaq Personal Server is not available for purchase. - There are no product plans beyond the current research - prototypes at this time. Information is available at: - - <http://www.crl.hpl.hp.com/projects/personalserver/> - - If you have any questions or comments about the Compaq Personal - Server, send e-mail to <[email protected]>. - config ARCH_EBSA285_ADDIN bool "EBSA285 (addin mode)" select ARCH_EBSA285 diff --git a/arch/arm/mach-footbridge/Makefile b/arch/arm/mach-footbridge/Makefile index a09f1041f141..6262993c0555 100644 --- a/arch/arm/mach-footbridge/Makefile +++ b/arch/arm/mach-footbridge/Makefile @@ -11,12 +11,10 @@ pci-y += dc21285.o pci-$(CONFIG_ARCH_CATS) += cats-pci.o pci-$(CONFIG_ARCH_EBSA285_HOST) += ebsa285-pci.o pci-$(CONFIG_ARCH_NETWINDER) += netwinder-pci.o -pci-$(CONFIG_ARCH_PERSONAL_SERVER) += personal-pci.o obj-$(CONFIG_ARCH_CATS) += cats-hw.o isa-timer.o obj-$(CONFIG_ARCH_EBSA285) += ebsa285.o dc21285-timer.o obj-$(CONFIG_ARCH_NETWINDER) += netwinder-hw.o isa-timer.o -obj-$(CONFIG_ARCH_PERSONAL_SERVER) += personal.o dc21285-timer.o obj-$(CONFIG_PCI) +=$(pci-y) diff --git a/arch/arm/mach-footbridge/personal-pci.c b/arch/arm/mach-footbridge/personal-pci.c deleted file mode 100644 index 9d19aa98a663..000000000000 --- a/arch/arm/mach-footbridge/personal-pci.c +++ /dev/null @@ -1,57 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * linux/arch/arm/mach-footbridge/personal-pci.c - * - * PCI bios-type initialisation for PCI machines - * - * Bits taken from various places. - */ -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/init.h> - -#include <asm/irq.h> -#include <asm/mach/pci.h> -#include <asm/mach-types.h> - -static int irqmap_personal_server[] = { - IRQ_IN0, IRQ_IN1, IRQ_IN2, IRQ_IN3, 0, 0, 0, - IRQ_DOORBELLHOST, IRQ_DMA1, IRQ_DMA2, IRQ_PCI -}; - -static int personal_server_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) -{ - unsigned char line; - - pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &line); - - if (line > 0x40 && line <= 0x5f) { - /* line corresponds to the bit controlling this interrupt - * in the footbridge. Ignore the first 8 interrupt bits, - * look up the rest in the map. IN0 is bit number 8 - */ - return irqmap_personal_server[(line & 0x1f) - 8]; - } else if (line == 0) { - /* no interrupt */ - return 0; - } else - return irqmap_personal_server[(line - 1) & 3]; -} - -static struct hw_pci personal_server_pci __initdata = { - .map_irq = personal_server_map_irq, - .nr_controllers = 1, - .ops = &dc21285_ops, - .setup = dc21285_setup, - .preinit = dc21285_preinit, - .postinit = dc21285_postinit, -}; - -static int __init personal_pci_init(void) -{ - if (machine_is_personal_server()) - pci_common_init(&personal_server_pci); - return 0; -} - -subsys_initcall(personal_pci_init); diff --git a/arch/arm/mach-footbridge/personal.c b/arch/arm/mach-footbridge/personal.c deleted file mode 100644 index ca715754fc00..000000000000 --- a/arch/arm/mach-footbridge/personal.c +++ /dev/null @@ -1,25 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * linux/arch/arm/mach-footbridge/personal.c - * - * Personal server (Skiff) machine fixup - */ -#include <linux/init.h> -#include <linux/spinlock.h> - -#include <asm/hardware/dec21285.h> -#include <asm/mach-types.h> - -#include <asm/mach/arch.h> - -#include "common.h" - -MACHINE_START(PERSONAL_SERVER, "Compaq-PersonalServer") - /* Maintainer: Jamey Hicks / George France */ - .atag_offset = 0x100, - .map_io = footbridge_map_io, - .init_irq = footbridge_init_irq, - .init_time = footbridge_timer_init, - .restart = footbridge_restart, -MACHINE_END - diff --git a/arch/arm/mach-iop32x/n2100.c b/arch/arm/mach-iop32x/n2100.c index 78b9a5ee41c9..bf99e718f8b8 100644 --- a/arch/arm/mach-iop32x/n2100.c +++ b/arch/arm/mach-iop32x/n2100.c @@ -116,16 +116,16 @@ static struct hw_pci n2100_pci __initdata = { }; /* - * Both r8169 chips on the n2100 exhibit PCI parity problems. Set - * the ->broken_parity_status flag for both ports so that the r8169 - * driver knows it should ignore error interrupts. + * Both r8169 chips on the n2100 exhibit PCI parity problems. Turn + * off parity reporting for both ports so we don't get error interrupts + * for them. */ static void n2100_fixup_r8169(struct pci_dev *dev) { if (dev->bus->number == 0 && (dev->devfn == PCI_DEVFN(1, 0) || dev->devfn == PCI_DEVFN(2, 0))) - dev->broken_parity_status = 1; + pci_disable_parity(dev); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_REALTEK, PCI_ANY_ID, n2100_fixup_r8169); diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index dc8f152f3556..830bbfb26ca5 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -33,41 +33,41 @@ icache_size: * processor. We fix this by performing an invalidate, rather than a * clean + invalidate, before jumping into the kernel. * - * This function is cloned from arch/arm/mach-tegra/headsmp.S, and needs - * to be called for both secondary cores startup and primary core resume - * procedures. + * This function needs to be called for both secondary cores startup and + * primary core resume procedures. */ ENTRY(v7_invalidate_l1) - mov r0, #0 - mcr p15, 2, r0, c0, c0, 0 - mrc p15, 1, r0, c0, c0, 0 - - movw r1, #0x7fff - and r2, r1, r0, lsr #13 + mov r0, #0 + mcr p15, 2, r0, c0, c0, 0 @ select L1 data cache in CSSELR + isb + mrc p15, 1, r0, c0, c0, 0 @ read cache geometry from CCSIDR - movw r1, #0x3ff + movw r3, #0x3ff + and r3, r3, r0, lsr #3 @ 'Associativity' in CCSIDR[12:3] + clz r1, r3 @ WayShift + mov r2, #1 + mov r3, r3, lsl r1 @ NumWays-1 shifted into bits [31:...] + movs r1, r2, lsl r1 @ #1 shifted left by same amount + moveq r1, #1 @ r1 needs value > 0 even if only 1 way - and r3, r1, r0, lsr #3 @ NumWays - 1 - add r2, r2, #1 @ NumSets + and r2, r0, #0x7 + add r2, r2, #4 @ SetShift - and r0, r0, #0x7 - add r0, r0, #4 @ SetShift +1: movw ip, #0x7fff + and r0, ip, r0, lsr #13 @ 'NumSets' in CCSIDR[27:13] - clz r1, r3 @ WayShift - add r4, r3, #1 @ NumWays -1: sub r2, r2, #1 @ NumSets-- - mov r3, r4 @ Temp = NumWays -2: subs r3, r3, #1 @ Temp-- - mov r5, r3, lsl r1 - mov r6, r2, lsl r0 - orr r5, r5, r6 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift) - mcr p15, 0, r5, c7, c6, 2 - bgt 2b - cmp r2, #0 - bgt 1b - dsb st - isb - ret lr +2: mov ip, r0, lsl r2 @ NumSet << SetShift + orr ip, ip, r3 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift) + mcr p15, 0, ip, c7, c6, 2 + subs r0, r0, #1 @ Set-- + bpl 2b + subs r3, r3, r1 @ Way-- + bcc 3f + mrc p15, 1, r0, c0, c0, 0 @ re-read cache geometry from CCSIDR + b 1b +3: dsb st + isb + ret lr ENDPROC(v7_invalidate_l1) /* diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c index 93ff0097f00b..fb688003d156 100644 --- a/arch/arm/mm/dump.c +++ b/arch/arm/mm/dump.c @@ -420,7 +420,7 @@ void ptdump_walk_pgd(struct seq_file *m, struct ptdump_info *info) note_page(&st, 0, 0, 0, NULL); } -static void ptdump_initialize(void) +static void __init ptdump_initialize(void) { unsigned i, j; @@ -466,7 +466,7 @@ void ptdump_check_wx(void) pr_info("Checked W+X mappings: passed, no W+X pages found\n"); } -static int ptdump_init(void) +static int __init ptdump_init(void) { ptdump_initialize(); ptdump_debugfs_register(&kernel_ptdump_info, "kernel_page_tables"); diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 7022b7b5c400..9d4744a632c6 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -301,7 +301,11 @@ static void __init free_highpages(void) void __init mem_init(void) { #ifdef CONFIG_ARM_LPAE - swiotlb_init(1); + if (swiotlb_force == SWIOTLB_FORCE || + max_pfn > arm_dma_pfn_limit) + swiotlb_init(1); + else + swiotlb_force = SWIOTLB_NO_FORCE; #endif set_max_mapnr(pfn_to_page(max_pfn) - mem_map); @@ -485,33 +489,12 @@ static int __mark_rodata_ro(void *unused) return 0; } -static int kernel_set_to_readonly __read_mostly; - void mark_rodata_ro(void) { - kernel_set_to_readonly = 1; stop_machine(__mark_rodata_ro, NULL, NULL); debug_checkwx(); } -void set_kernel_text_rw(void) -{ - if (!kernel_set_to_readonly) - return; - - set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false, - current->active_mm); -} - -void set_kernel_text_ro(void) -{ - if (!kernel_set_to_readonly) - return; - - set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true, - current->active_mm); -} - #else static inline void fix_kernmem_perms(void) { } #endif /* CONFIG_STRICT_KERNEL_RWX */ diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 28c9d32fa99a..26d726a08a34 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -256,6 +256,20 @@ ENDPROC(cpu_pj4b_do_resume) #endif + @ + @ Invoke the v7_invalidate_l1() function, which adheres to the AAPCS + @ rules, and so it may corrupt registers that we need to preserve. + @ + .macro do_invalidate_l1 + mov r6, r1 + mov r7, r2 + mov r10, lr + bl v7_invalidate_l1 @ corrupts {r0-r3, ip, lr} + mov r1, r6 + mov r2, r7 + mov lr, r10 + .endm + /* * __v7_setup * @@ -277,6 +291,7 @@ __v7_ca5mp_setup: __v7_ca9mp_setup: __v7_cr7mp_setup: __v7_cr8mp_setup: + do_invalidate_l1 mov r10, #(1 << 0) @ Cache/TLB ops broadcasting b 1f __v7_ca7mp_setup: @@ -284,13 +299,9 @@ __v7_ca12mp_setup: __v7_ca15mp_setup: __v7_b15mp_setup: __v7_ca17mp_setup: + do_invalidate_l1 mov r10, #0 -1: adr r0, __v7_setup_stack_ptr - ldr r12, [r0] - add r12, r12, r0 @ the local stack - stmia r12, {r1-r6, lr} @ v7_invalidate_l1 touches r0-r6 - bl v7_invalidate_l1 - ldmia r12, {r1-r6, lr} +1: #ifdef CONFIG_SMP orr r10, r10, #(1 << 6) @ Enable SMP/nAMP mode ALT_SMP(mrc p15, 0, r0, c1, c0, 1) @@ -471,12 +482,7 @@ __v7_pj4b_setup: #endif /* CONFIG_CPU_PJ4B */ __v7_setup: - adr r0, __v7_setup_stack_ptr - ldr r12, [r0] - add r12, r12, r0 @ the local stack - stmia r12, {r1-r6, lr} @ v7_invalidate_l1 touches r0-r6 - bl v7_invalidate_l1 - ldmia r12, {r1-r6, lr} + do_invalidate_l1 __v7_setup_cont: and r0, r9, #0xff000000 @ ARM? @@ -548,17 +554,8 @@ __errata_finish: orr r0, r0, r6 @ set them THUMB( orr r0, r0, #1 << 30 ) @ Thumb exceptions ret lr @ return to head.S:__ret - - .align 2 -__v7_setup_stack_ptr: - .word PHYS_RELATIVE(__v7_setup_stack, .) ENDPROC(__v7_setup) - .bss - .align 2 -__v7_setup_stack: - .space 4 * 7 @ 7 registers - __INITDATA .weak cpu_v7_bugs_init diff --git a/arch/arm/mm/ptdump_debugfs.c b/arch/arm/mm/ptdump_debugfs.c index 598b636615a2..318de969ae0f 100644 --- a/arch/arm/mm/ptdump_debugfs.c +++ b/arch/arm/mm/ptdump_debugfs.c @@ -11,20 +11,9 @@ static int ptdump_show(struct seq_file *m, void *v) ptdump_walk_pgd(m, info); return 0; } +DEFINE_SHOW_ATTRIBUTE(ptdump); -static int ptdump_open(struct inode *inode, struct file *file) -{ - return single_open(file, ptdump_show, inode->i_private); -} - -static const struct file_operations ptdump_fops = { - .open = ptdump_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -void ptdump_debugfs_register(struct ptdump_info *info, const char *name) +void __init ptdump_debugfs_register(struct ptdump_info *info, const char *name) { debugfs_create_file(name, 0400, NULL, info, &ptdump_fops); } diff --git a/arch/arm/probes/kprobes/test-arm.c b/arch/arm/probes/kprobes/test-arm.c index 977369f1aa48..a0dae35ffacd 100644 --- a/arch/arm/probes/kprobes/test-arm.c +++ b/arch/arm/probes/kprobes/test-arm.c @@ -55,25 +55,25 @@ void kprobe_arm_test_cases(void) TEST_GROUP("Data-processing (register), (register-shifted register), (immediate)") #define _DATA_PROCESSING_DNM(op,s,val) \ - TEST_RR( op "eq" s " r0, r",1, VAL1,", r",2, val, "") \ - TEST_RR( op "ne" s " r1, r",1, VAL1,", r",2, val, ", lsl #3") \ - TEST_RR( op "cs" s " r2, r",3, VAL1,", r",2, val, ", lsr #4") \ - TEST_RR( op "cc" s " r3, r",3, VAL1,", r",2, val, ", asr #5") \ - TEST_RR( op "mi" s " r4, r",5, VAL1,", r",2, N(val),", asr #6") \ - TEST_RR( op "pl" s " r5, r",5, VAL1,", r",2, val, ", ror #7") \ - TEST_RR( op "vs" s " r6, r",7, VAL1,", r",2, val, ", rrx") \ - TEST_R( op "vc" s " r6, r",7, VAL1,", pc, lsl #3") \ - TEST_R( op "vc" s " r6, r",7, VAL1,", sp, lsr #4") \ - TEST_R( op "vc" s " r6, pc, r",7, VAL1,", asr #5") \ - TEST_R( op "vc" s " r6, sp, r",7, VAL1,", ror #6") \ - TEST_RRR( op "hi" s " r8, r",9, VAL1,", r",14,val, ", lsl r",0, 3,"")\ - TEST_RRR( op "ls" s " r9, r",9, VAL1,", r",14,val, ", lsr r",7, 4,"")\ - TEST_RRR( op "ge" s " r10, r",11,VAL1,", r",14,val, ", asr r",7, 5,"")\ - TEST_RRR( op "lt" s " r11, r",11,VAL1,", r",14,N(val),", asr r",7, 6,"")\ - TEST_RR( op "gt" s " r12, r13" ", r",14,val, ", ror r",14,7,"")\ - TEST_RR( op "le" s " r14, r",0, val, ", r13" ", lsl r",14,8,"")\ - TEST_R( op "eq" s " r0, r",11,VAL1,", #0xf5") \ - TEST_R( op "ne" s " r11, r",0, VAL1,", #0xf5000000") \ + TEST_RR( op s "eq r0, r",1, VAL1,", r",2, val, "") \ + TEST_RR( op s "ne r1, r",1, VAL1,", r",2, val, ", lsl #3") \ + TEST_RR( op s "cs r2, r",3, VAL1,", r",2, val, ", lsr #4") \ + TEST_RR( op s "cc r3, r",3, VAL1,", r",2, val, ", asr #5") \ + TEST_RR( op s "mi r4, r",5, VAL1,", r",2, N(val),", asr #6") \ + TEST_RR( op s "pl r5, r",5, VAL1,", r",2, val, ", ror #7") \ + TEST_RR( op s "vs r6, r",7, VAL1,", r",2, val, ", rrx") \ + TEST_R( op s "vc r6, r",7, VAL1,", pc, lsl #3") \ + TEST_R( op s "vc r6, r",7, VAL1,", sp, lsr #4") \ + TEST_R( op s "vc r6, pc, r",7, VAL1,", asr #5") \ + TEST_R( op s "vc r6, sp, r",7, VAL1,", ror #6") \ + TEST_RRR( op s "hi r8, r",9, VAL1,", r",14,val, ", lsl r",0, 3,"")\ + TEST_RRR( op s "ls r9, r",9, VAL1,", r",14,val, ", lsr r",7, 4,"")\ + TEST_RRR( op s "ge r10, r",11,VAL1,", r",14,val, ", asr r",7, 5,"")\ + TEST_RRR( op s "lt r11, r",11,VAL1,", r",14,N(val),", asr r",7, 6,"")\ + TEST_RR( op s "gt r12, r13" ", r",14,val, ", ror r",14,7,"")\ + TEST_RR( op s "le r14, r",0, val, ", r13" ", lsl r",14,8,"")\ + TEST_R( op s "eq r0, r",11,VAL1,", #0xf5") \ + TEST_R( op s "ne r11, r",0, VAL1,", #0xf5000000") \ TEST_R( op s " r7, r",8, VAL2,", #0x000af000") \ TEST( op s " r4, pc" ", #0x00005a00") @@ -104,23 +104,23 @@ void kprobe_arm_test_cases(void) TEST_R( op " r",8, VAL2,", #0x000af000") #define _DATA_PROCESSING_DM(op,s,val) \ - TEST_R( op "eq" s " r0, r",1, val, "") \ - TEST_R( op "ne" s " r1, r",1, val, ", lsl #3") \ - TEST_R( op "cs" s " r2, r",3, val, ", lsr #4") \ - TEST_R( op "cc" s " r3, r",3, val, ", asr #5") \ - TEST_R( op "mi" s " r4, r",5, N(val),", asr #6") \ - TEST_R( op "pl" s " r5, r",5, val, ", ror #7") \ - TEST_R( op "vs" s " r6, r",10,val, ", rrx") \ - TEST( op "vs" s " r7, pc, lsl #3") \ - TEST( op "vs" s " r7, sp, lsr #4") \ - TEST_RR( op "vc" s " r8, r",7, val, ", lsl r",0, 3,"") \ - TEST_RR( op "hi" s " r9, r",9, val, ", lsr r",7, 4,"") \ - TEST_RR( op "ls" s " r10, r",9, val, ", asr r",7, 5,"") \ - TEST_RR( op "ge" s " r11, r",11,N(val),", asr r",7, 6,"") \ - TEST_RR( op "lt" s " r12, r",11,val, ", ror r",14,7,"") \ - TEST_R( op "gt" s " r14, r13" ", lsl r",14,8,"") \ - TEST( op "eq" s " r0, #0xf5") \ - TEST( op "ne" s " r11, #0xf5000000") \ + TEST_R( op s "eq r0, r",1, val, "") \ + TEST_R( op s "ne r1, r",1, val, ", lsl #3") \ + TEST_R( op s "cs r2, r",3, val, ", lsr #4") \ + TEST_R( op s "cc r3, r",3, val, ", asr #5") \ + TEST_R( op s "mi r4, r",5, N(val),", asr #6") \ + TEST_R( op s "pl r5, r",5, val, ", ror #7") \ + TEST_R( op s "vs r6, r",10,val, ", rrx") \ + TEST( op s "vs r7, pc, lsl #3") \ + TEST( op s "vs r7, sp, lsr #4") \ + TEST_RR( op s "vc r8, r",7, val, ", lsl r",0, 3,"") \ + TEST_RR( op s "hi r9, r",9, val, ", lsr r",7, 4,"") \ + TEST_RR( op s "ls r10, r",9, val, ", asr r",7, 5,"") \ + TEST_RR( op s "ge r11, r",11,N(val),", asr r",7, 6,"") \ + TEST_RR( op s "lt r12, r",11,val, ", ror r",14,7,"") \ + TEST_R( op s "gt r14, r13" ", lsl r",14,8,"") \ + TEST( op s "eq r0, #0xf5") \ + TEST( op s "ne r11, #0xf5000000") \ TEST( op s " r7, #0x000af000") \ TEST( op s " r4, #0x00005a00") @@ -166,10 +166,10 @@ void kprobe_arm_test_cases(void) /* Data-processing with PC as a target and status registers updated */ TEST_UNSUPPORTED("movs pc, r1") - TEST_UNSUPPORTED("movs pc, r1, lsl r2") + TEST_UNSUPPORTED(__inst_arm(0xe1b0f211) " @movs pc, r1, lsl r2") TEST_UNSUPPORTED("movs pc, #0x10000") TEST_UNSUPPORTED("adds pc, lr, r1") - TEST_UNSUPPORTED("adds pc, lr, r1, lsl r2") + TEST_UNSUPPORTED(__inst_arm(0xe09ef211) " @adds pc, lr, r1, lsl r2") TEST_UNSUPPORTED("adds pc, lr, #4") /* Data-processing with SP as target */ @@ -352,7 +352,7 @@ void kprobe_arm_test_cases(void) TEST_UNSUPPORTED(__inst_arm(0xe000029f) " @ mul r0, pc, r2") TEST_UNSUPPORTED(__inst_arm(0xe0000f91) " @ mul r0, r1, pc") TEST_RR( "muls r0, r",1, VAL1,", r",2, VAL2,"") - TEST_RR( "mullss r7, r",8, VAL2,", r",9, VAL2,"") + TEST_RR( "mulsls r7, r",8, VAL2,", r",9, VAL2,"") TEST_R( "muls lr, r",4, VAL3,", r13") TEST_UNSUPPORTED(__inst_arm(0xe01f0291) " @ muls pc, r1, r2") @@ -361,7 +361,7 @@ void kprobe_arm_test_cases(void) TEST_RR( "mla lr, r",1, VAL2,", r",2, VAL3,", r13") TEST_UNSUPPORTED(__inst_arm(0xe02f3291) " @ mla pc, r1, r2, r3") TEST_RRR( "mlas r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"") - TEST_RRR( "mlahis r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"") + TEST_RRR( "mlashi r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"") TEST_RR( "mlas lr, r",1, VAL2,", r",2, VAL3,", r13") TEST_UNSUPPORTED(__inst_arm(0xe03f3291) " @ mlas pc, r1, r2, r3") @@ -394,7 +394,7 @@ void kprobe_arm_test_cases(void) TEST_UNSUPPORTED(__inst_arm(0xe081f392) " @ umull pc, r1, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe08f1392) " @ umull r1, pc, r2, r3") TEST_RR( "umulls r0, r1, r",2, VAL1,", r",3, VAL2,"") - TEST_RR( "umulllss r7, r8, r",9, VAL2,", r",10, VAL1,"") + TEST_RR( "umullsls r7, r8, r",9, VAL2,", r",10, VAL1,"") TEST_R( "umulls lr, r12, r",11,VAL3,", r13") TEST_UNSUPPORTED(__inst_arm(0xe091f392) " @ umulls pc, r1, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe09f1392) " @ umulls r1, pc, r2, r3") @@ -405,7 +405,7 @@ void kprobe_arm_test_cases(void) TEST_UNSUPPORTED(__inst_arm(0xe0af1392) " @ umlal pc, r1, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe0a1f392) " @ umlal r1, pc, r2, r3") TEST_RRRR( "umlals r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4) - TEST_RRRR( "umlalles r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3) + TEST_RRRR( "umlalsle r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3) TEST_RRR( "umlals r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13") TEST_UNSUPPORTED(__inst_arm(0xe0bf1392) " @ umlals pc, r1, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe0b1f392) " @ umlals r1, pc, r2, r3") @@ -416,7 +416,7 @@ void kprobe_arm_test_cases(void) TEST_UNSUPPORTED(__inst_arm(0xe0c1f392) " @ smull pc, r1, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe0cf1392) " @ smull r1, pc, r2, r3") TEST_RR( "smulls r0, r1, r",2, VAL1,", r",3, VAL2,"") - TEST_RR( "smulllss r7, r8, r",9, VAL2,", r",10, VAL1,"") + TEST_RR( "smullsls r7, r8, r",9, VAL2,", r",10, VAL1,"") TEST_R( "smulls lr, r12, r",11,VAL3,", r13") TEST_UNSUPPORTED(__inst_arm(0xe0d1f392) " @ smulls pc, r1, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe0df1392) " @ smulls r1, pc, r2, r3") @@ -427,7 +427,7 @@ void kprobe_arm_test_cases(void) TEST_UNSUPPORTED(__inst_arm(0xe0ef1392) " @ smlal pc, r1, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe0e1f392) " @ smlal r1, pc, r2, r3") TEST_RRRR( "smlals r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4) - TEST_RRRR( "smlalles r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3) + TEST_RRRR( "smlalsle r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3) TEST_RRR( "smlals r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13") TEST_UNSUPPORTED(__inst_arm(0xe0ff1392) " @ smlals pc, r1, r2, r3") TEST_UNSUPPORTED(__inst_arm(0xe0f0f392) " @ smlals r0, pc, r2, r3") @@ -450,7 +450,7 @@ void kprobe_arm_test_cases(void) TEST_UNSUPPORTED(__inst_arm(0xe10f0091) " @ swp r0, r1, [pc]") #if __LINUX_ARM_ARCH__ < 6 TEST_RP("swpb lr, r",7,VAL2,", [r",8,0,"]") - TEST_R( "swpvsb r0, r",1,VAL1,", [sp]") + TEST_R( "swpbvs r0, r",1,VAL1,", [sp]") #else TEST_UNSUPPORTED(__inst_arm(0xe148e097) " @ swpb lr, r7, [r8]") TEST_UNSUPPORTED(__inst_arm(0x614d0091) " @ swpvsb r0, r1, [sp]") @@ -477,11 +477,11 @@ void kprobe_arm_test_cases(void) TEST_GROUP("Extra load/store instructions") TEST_RPR( "strh r",0, VAL1,", [r",1, 48,", -r",2, 24,"]") - TEST_RPR( "streqh r",14,VAL2,", [r",11,0, ", r",12, 48,"]") - TEST_UNSUPPORTED( "streqh r14, [r13, r12]") - TEST_UNSUPPORTED( "streqh r14, [r12, r13]") + TEST_RPR( "strheq r",14,VAL2,", [r",11,0, ", r",12, 48,"]") + TEST_UNSUPPORTED( "strheq r14, [r13, r12]") + TEST_UNSUPPORTED( "strheq r14, [r12, r13]") TEST_RPR( "strh r",1, VAL1,", [r",2, 24,", r",3, 48,"]!") - TEST_RPR( "strneh r",12,VAL2,", [r",11,48,", -r",10,24,"]!") + TEST_RPR( "strhne r",12,VAL2,", [r",11,48,", -r",10,24,"]!") TEST_RPR( "strh r",2, VAL1,", [r",3, 24,"], r",4, 48,"") TEST_RPR( "strh r",10,VAL2,", [r",9, 48,"], -r",11,24,"") TEST_UNSUPPORTED(__inst_arm(0xe1afc0ba) " @ strh r12, [pc, r10]!") @@ -489,9 +489,9 @@ void kprobe_arm_test_cases(void) TEST_UNSUPPORTED(__inst_arm(0xe089a0bf) " @ strh r10, [r9], pc") TEST_PR( "ldrh r0, [r",0, 48,", -r",2, 24,"]") - TEST_PR( "ldrcsh r14, [r",13,0, ", r",12, 48,"]") + TEST_PR( "ldrhcs r14, [r",13,0, ", r",12, 48,"]") TEST_PR( "ldrh r1, [r",2, 24,", r",3, 48,"]!") - TEST_PR( "ldrcch r12, [r",11,48,", -r",10,24,"]!") + TEST_PR( "ldrhcc r12, [r",11,48,", -r",10,24,"]!") TEST_PR( "ldrh r2, [r",3, 24,"], r",4, 48,"") TEST_PR( "ldrh r10, [r",9, 48,"], -r",11,24,"") TEST_UNSUPPORTED(__inst_arm(0xe1bfc0ba) " @ ldrh r12, [pc, r10]!") @@ -499,9 +499,9 @@ void kprobe_arm_test_cases(void) TEST_UNSUPPORTED(__inst_arm(0xe099a0bf) " @ ldrh r10, [r9], pc") TEST_RP( "strh r",0, VAL1,", [r",1, 24,", #-2]") - TEST_RP( "strmih r",14,VAL2,", [r",13,0, ", #2]") + TEST_RP( "strhmi r",14,VAL2,", [r",13,0, ", #2]") TEST_RP( "strh r",1, VAL1,", [r",2, 24,", #4]!") - TEST_RP( "strplh r",12,VAL2,", [r",11,24,", #-4]!") + TEST_RP( "strhpl r",12,VAL2,", [r",11,24,", #-4]!") TEST_RP( "strh r",2, VAL1,", [r",3, 24,"], #48") TEST_RP( "strh r",10,VAL2,", [r",9, 64,"], #-48") TEST_RP( "strh r",3, VAL1,", [r",13,TEST_MEMORY_SIZE,", #-"__stringify(MAX_STACK_SIZE)"]!") @@ -511,9 +511,9 @@ void kprobe_arm_test_cases(void) TEST_UNSUPPORTED(__inst_arm(0xe0c9f3b0) " @ strh pc, [r9], #48") TEST_P( "ldrh r0, [r",0, 24,", #-2]") - TEST_P( "ldrvsh r14, [r",13,0, ", #2]") + TEST_P( "ldrhvs r14, [r",13,0, ", #2]") TEST_P( "ldrh r1, [r",2, 24,", #4]!") - TEST_P( "ldrvch r12, [r",11,24,", #-4]!") + TEST_P( "ldrhvc r12, [r",11,24,", #-4]!") TEST_P( "ldrh r2, [r",3, 24,"], #48") TEST_P( "ldrh r10, [r",9, 64,"], #-48") TEST( "ldrh r0, [pc, #0]") @@ -521,18 +521,18 @@ void kprobe_arm_test_cases(void) TEST_UNSUPPORTED(__inst_arm(0xe0d9f3b0) " @ ldrh pc, [r9], #48") TEST_PR( "ldrsb r0, [r",0, 48,", -r",2, 24,"]") - TEST_PR( "ldrhisb r14, [r",13,0,", r",12, 48,"]") + TEST_PR( "ldrsbhi r14, [r",13,0,", r",12, 48,"]") TEST_PR( "ldrsb r1, [r",2, 24,", r",3, 48,"]!") - TEST_PR( "ldrlssb r12, [r",11,48,", -r",10,24,"]!") + TEST_PR( "ldrsbls r12, [r",11,48,", -r",10,24,"]!") TEST_PR( "ldrsb r2, [r",3, 24,"], r",4, 48,"") TEST_PR( "ldrsb r10, [r",9, 48,"], -r",11,24,"") TEST_UNSUPPORTED(__inst_arm(0xe1bfc0da) " @ ldrsb r12, [pc, r10]!") TEST_UNSUPPORTED(__inst_arm(0xe099f0db) " @ ldrsb pc, [r9], r11") TEST_P( "ldrsb r0, [r",0, 24,", #-1]") - TEST_P( "ldrgesb r14, [r",13,0, ", #1]") + TEST_P( "ldrsbge r14, [r",13,0, ", #1]") TEST_P( "ldrsb r1, [r",2, 24,", #4]!") - TEST_P( "ldrltsb r12, [r",11,24,", #-4]!") + TEST_P( "ldrsblt r12, [r",11,24,", #-4]!") TEST_P( "ldrsb r2, [r",3, 24,"], #48") TEST_P( "ldrsb r10, [r",9, 64,"], #-48") TEST( "ldrsb r0, [pc, #0]") @@ -540,18 +540,18 @@ void kprobe_arm_test_cases(void) TEST_UNSUPPORTED(__inst_arm(0xe0d9f3d0) " @ ldrsb pc, [r9], #48") TEST_PR( "ldrsh r0, [r",0, 48,", -r",2, 24,"]") - TEST_PR( "ldrgtsh r14, [r",13,0, ", r",12, 48,"]") + TEST_PR( "ldrshgt r14, [r",13,0, ", r",12, 48,"]") TEST_PR( "ldrsh r1, [r",2, 24,", r",3, 48,"]!") - TEST_PR( "ldrlesh r12, [r",11,48,", -r",10,24,"]!") + TEST_PR( "ldrshle r12, [r",11,48,", -r",10,24,"]!") TEST_PR( "ldrsh r2, [r",3, 24,"], r",4, 48,"") TEST_PR( "ldrsh r10, [r",9, 48,"], -r",11,24,"") TEST_UNSUPPORTED(__inst_arm(0xe1bfc0fa) " @ ldrsh r12, [pc, r10]!") TEST_UNSUPPORTED(__inst_arm(0xe099f0fb) " @ ldrsh pc, [r9], r11") TEST_P( "ldrsh r0, [r",0, 24,", #-1]") - TEST_P( "ldreqsh r14, [r",13,0 ,", #1]") + TEST_P( "ldrsheq r14, [r",13,0 ,", #1]") TEST_P( "ldrsh r1, [r",2, 24,", #4]!") - TEST_P( "ldrnesh r12, [r",11,24,", #-4]!") + TEST_P( "ldrshne r12, [r",11,24,", #-4]!") TEST_P( "ldrsh r2, [r",3, 24,"], #48") TEST_P( "ldrsh r10, [r",9, 64,"], #-48") TEST( "ldrsh r0, [pc, #0]") @@ -571,30 +571,30 @@ void kprobe_arm_test_cases(void) #if __LINUX_ARM_ARCH__ >= 5 TEST_RPR( "strd r",0, VAL1,", [r",1, 48,", -r",2,24,"]") - TEST_RPR( "strccd r",8, VAL2,", [r",11,0, ", r",12,48,"]") - TEST_UNSUPPORTED( "strccd r8, [r13, r12]") - TEST_UNSUPPORTED( "strccd r8, [r12, r13]") + TEST_RPR( "strdcc r",8, VAL2,", [r",11,0, ", r",12,48,"]") + TEST_UNSUPPORTED( "strdcc r8, [r13, r12]") + TEST_UNSUPPORTED( "strdcc r8, [r12, r13]") TEST_RPR( "strd r",4, VAL1,", [r",2, 24,", r",3, 48,"]!") - TEST_RPR( "strcsd r",12,VAL2,", [r",11,48,", -r",10,24,"]!") - TEST_RPR( "strd r",2, VAL1,", [r",5, 24,"], r",4,48,"") - TEST_RPR( "strd r",10,VAL2,", [r",9, 48,"], -r",7,24,"") + TEST_RPR( "strdcs r",12,VAL2,", r13, [r",11,48,", -r",10,24,"]!") + TEST_RPR( "strd r",2, VAL1,", r3, [r",5, 24,"], r",4,48,"") + TEST_RPR( "strd r",10,VAL2,", r11, [r",9, 48,"], -r",7,24,"") TEST_UNSUPPORTED(__inst_arm(0xe1afc0fa) " @ strd r12, [pc, r10]!") TEST_PR( "ldrd r0, [r",0, 48,", -r",2,24,"]") - TEST_PR( "ldrmid r8, [r",13,0, ", r",12,48,"]") + TEST_PR( "ldrdmi r8, [r",13,0, ", r",12,48,"]") TEST_PR( "ldrd r4, [r",2, 24,", r",3, 48,"]!") - TEST_PR( "ldrpld r6, [r",11,48,", -r",10,24,"]!") - TEST_PR( "ldrd r2, [r",5, 24,"], r",4,48,"") - TEST_PR( "ldrd r10, [r",9,48,"], -r",7,24,"") + TEST_PR( "ldrdpl r6, [r",11,48,", -r",10,24,"]!") + TEST_PR( "ldrd r2, r3, [r",5, 24,"], r",4,48,"") + TEST_PR( "ldrd r10, r11, [r",9,48,"], -r",7,24,"") TEST_UNSUPPORTED(__inst_arm(0xe1afc0da) " @ ldrd r12, [pc, r10]!") TEST_UNSUPPORTED(__inst_arm(0xe089f0db) " @ ldrd pc, [r9], r11") TEST_UNSUPPORTED(__inst_arm(0xe089e0db) " @ ldrd lr, [r9], r11") TEST_UNSUPPORTED(__inst_arm(0xe089c0df) " @ ldrd r12, [r9], pc") TEST_RP( "strd r",0, VAL1,", [r",1, 24,", #-8]") - TEST_RP( "strvsd r",8, VAL2,", [r",13,0, ", #8]") + TEST_RP( "strdvs r",8, VAL2,", [r",13,0, ", #8]") TEST_RP( "strd r",4, VAL1,", [r",2, 24,", #16]!") - TEST_RP( "strvcd r",12,VAL2,", [r",11,24,", #-16]!") + TEST_RP( "strdvc r",12,VAL2,", r13, [r",11,24,", #-16]!") TEST_RP( "strd r",2, VAL1,", [r",4, 24,"], #48") TEST_RP( "strd r",10,VAL2,", [r",9, 64,"], #-48") TEST_RP( "strd r",6, VAL1,", [r",13,TEST_MEMORY_SIZE,", #-"__stringify(MAX_STACK_SIZE)"]!") @@ -603,9 +603,9 @@ void kprobe_arm_test_cases(void) TEST_UNSUPPORTED(__inst_arm(0xe1efc3f0) " @ strd r12, [pc, #48]!") TEST_P( "ldrd r0, [r",0, 24,", #-8]") - TEST_P( "ldrhid r8, [r",13,0, ", #8]") + TEST_P( "ldrdhi r8, [r",13,0, ", #8]") TEST_P( "ldrd r4, [r",2, 24,", #16]!") - TEST_P( "ldrlsd r6, [r",11,24,", #-16]!") + TEST_P( "ldrdls r6, [r",11,24,", #-16]!") TEST_P( "ldrd r2, [r",5, 24,"], #48") TEST_P( "ldrd r10, [r",9,6,"], #-48") TEST_UNSUPPORTED(__inst_arm(0xe1efc3d0) " @ ldrd r12, [pc, #48]!") @@ -1084,63 +1084,63 @@ void kprobe_arm_test_cases(void) TEST_GROUP("Branch, branch with link, and block data transfer") TEST_P( "stmda r",0, 16*4,", {r0}") - TEST_P( "stmeqda r",4, 16*4,", {r0-r15}") - TEST_P( "stmneda r",8, 16*4,"!, {r8-r15}") + TEST_P( "stmdaeq r",4, 16*4,", {r0-r15}") + TEST_P( "stmdane r",8, 16*4,"!, {r8-r15}") TEST_P( "stmda r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_P( "stmda r",13,0, "!, {pc}") TEST_P( "ldmda r",0, 16*4,", {r0}") - TEST_BF_P("ldmcsda r",4, 15*4,", {r0-r15}") - TEST_BF_P("ldmccda r",7, 15*4,"!, {r8-r15}") + TEST_BF_P("ldmdacs r",4, 15*4,", {r0-r15}") + TEST_BF_P("ldmdacc r",7, 15*4,"!, {r8-r15}") TEST_P( "ldmda r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_BF_P("ldmda r",14,15*4,"!, {pc}") TEST_P( "stmia r",0, 16*4,", {r0}") - TEST_P( "stmmiia r",4, 16*4,", {r0-r15}") - TEST_P( "stmplia r",8, 16*4,"!, {r8-r15}") + TEST_P( "stmiami r",4, 16*4,", {r0-r15}") + TEST_P( "stmiapl r",8, 16*4,"!, {r8-r15}") TEST_P( "stmia r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_P( "stmia r",14,0, "!, {pc}") TEST_P( "ldmia r",0, 16*4,", {r0}") - TEST_BF_P("ldmvsia r",4, 0, ", {r0-r15}") - TEST_BF_P("ldmvcia r",7, 8*4, "!, {r8-r15}") + TEST_BF_P("ldmiavs r",4, 0, ", {r0-r15}") + TEST_BF_P("ldmiavc r",7, 8*4, "!, {r8-r15}") TEST_P( "ldmia r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_BF_P("ldmia r",14,15*4,"!, {pc}") TEST_P( "stmdb r",0, 16*4,", {r0}") - TEST_P( "stmhidb r",4, 16*4,", {r0-r15}") - TEST_P( "stmlsdb r",8, 16*4,"!, {r8-r15}") + TEST_P( "stmdbhi r",4, 16*4,", {r0-r15}") + TEST_P( "stmdbls r",8, 16*4,"!, {r8-r15}") TEST_P( "stmdb r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_P( "stmdb r",13,4, "!, {pc}") TEST_P( "ldmdb r",0, 16*4,", {r0}") - TEST_BF_P("ldmgedb r",4, 16*4,", {r0-r15}") - TEST_BF_P("ldmltdb r",7, 16*4,"!, {r8-r15}") + TEST_BF_P("ldmdbge r",4, 16*4,", {r0-r15}") + TEST_BF_P("ldmdblt r",7, 16*4,"!, {r8-r15}") TEST_P( "ldmdb r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_BF_P("ldmdb r",14,16*4,"!, {pc}") TEST_P( "stmib r",0, 16*4,", {r0}") - TEST_P( "stmgtib r",4, 16*4,", {r0-r15}") - TEST_P( "stmleib r",8, 16*4,"!, {r8-r15}") + TEST_P( "stmibgt r",4, 16*4,", {r0-r15}") + TEST_P( "stmible r",8, 16*4,"!, {r8-r15}") TEST_P( "stmib r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_P( "stmib r",13,-4, "!, {pc}") TEST_P( "ldmib r",0, 16*4,", {r0}") - TEST_BF_P("ldmeqib r",4, -4,", {r0-r15}") - TEST_BF_P("ldmneib r",7, 7*4,"!, {r8-r15}") + TEST_BF_P("ldmibeq r",4, -4,", {r0-r15}") + TEST_BF_P("ldmibne r",7, 7*4,"!, {r8-r15}") TEST_P( "ldmib r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}") TEST_BF_P("ldmib r",14,14*4,"!, {pc}") TEST_P( "stmdb r",13,16*4,"!, {r3-r12,lr}") - TEST_P( "stmeqdb r",13,16*4,"!, {r3-r12}") - TEST_P( "stmnedb r",2, 16*4,", {r3-r12,lr}") + TEST_P( "stmdbeq r",13,16*4,"!, {r3-r12}") + TEST_P( "stmdbne r",2, 16*4,", {r3-r12,lr}") TEST_P( "stmdb r",13,16*4,"!, {r2-r12,lr}") TEST_P( "stmdb r",0, 16*4,", {r0-r12}") TEST_P( "stmdb r",0, 16*4,", {r0-r12,lr}") TEST_BF_P("ldmia r",13,5*4, "!, {r3-r12,pc}") - TEST_P( "ldmccia r",13,5*4, "!, {r3-r12}") - TEST_BF_P("ldmcsia r",2, 5*4, "!, {r3-r12,pc}") + TEST_P( "ldmiacc r",13,5*4, "!, {r3-r12}") + TEST_BF_P("ldmiacs r",2, 5*4, "!, {r3-r12,pc}") TEST_BF_P("ldmia r",13,4*4, "!, {r2-r12,pc}") TEST_P( "ldmia r",0, 16*4,", {r0-r12}") TEST_P( "ldmia r",0, 16*4,", {r0-r12,lr}") @@ -1174,80 +1174,80 @@ void kprobe_arm_test_cases(void) #define TEST_COPROCESSOR(code) TEST_UNSUPPORTED(code) #define COPROCESSOR_INSTRUCTIONS_ST_LD(two,cc) \ - TEST_COPROCESSOR("stc"two" 0, cr0, [r13, #4]") \ - TEST_COPROCESSOR("stc"two" 0, cr0, [r13, #-4]") \ - TEST_COPROCESSOR("stc"two" 0, cr0, [r13, #4]!") \ - TEST_COPROCESSOR("stc"two" 0, cr0, [r13, #-4]!") \ - TEST_COPROCESSOR("stc"two" 0, cr0, [r13], #4") \ - TEST_COPROCESSOR("stc"two" 0, cr0, [r13], #-4") \ - TEST_COPROCESSOR("stc"two" 0, cr0, [r13], {1}") \ - TEST_COPROCESSOR("stc"two"l 0, cr0, [r13, #4]") \ - TEST_COPROCESSOR("stc"two"l 0, cr0, [r13, #-4]") \ - TEST_COPROCESSOR("stc"two"l 0, cr0, [r13, #4]!") \ - TEST_COPROCESSOR("stc"two"l 0, cr0, [r13, #-4]!") \ - TEST_COPROCESSOR("stc"two"l 0, cr0, [r13], #4") \ - TEST_COPROCESSOR("stc"two"l 0, cr0, [r13], #-4") \ - TEST_COPROCESSOR("stc"two"l 0, cr0, [r13], {1}") \ - TEST_COPROCESSOR("ldc"two" 0, cr0, [r13, #4]") \ - TEST_COPROCESSOR("ldc"two" 0, cr0, [r13, #-4]") \ - TEST_COPROCESSOR("ldc"two" 0, cr0, [r13, #4]!") \ - TEST_COPROCESSOR("ldc"two" 0, cr0, [r13, #-4]!") \ - TEST_COPROCESSOR("ldc"two" 0, cr0, [r13], #4") \ - TEST_COPROCESSOR("ldc"two" 0, cr0, [r13], #-4") \ - TEST_COPROCESSOR("ldc"two" 0, cr0, [r13], {1}") \ - TEST_COPROCESSOR("ldc"two"l 0, cr0, [r13, #4]") \ - TEST_COPROCESSOR("ldc"two"l 0, cr0, [r13, #-4]") \ - TEST_COPROCESSOR("ldc"two"l 0, cr0, [r13, #4]!") \ - TEST_COPROCESSOR("ldc"two"l 0, cr0, [r13, #-4]!") \ - TEST_COPROCESSOR("ldc"two"l 0, cr0, [r13], #4") \ - TEST_COPROCESSOR("ldc"two"l 0, cr0, [r13], #-4") \ - TEST_COPROCESSOR("ldc"two"l 0, cr0, [r13], {1}") \ + TEST_COPROCESSOR("stc"two" p0, cr0, [r13, #4]") \ + TEST_COPROCESSOR("stc"two" p0, cr0, [r13, #-4]") \ + TEST_COPROCESSOR("stc"two" p0, cr0, [r13, #4]!") \ + TEST_COPROCESSOR("stc"two" p0, cr0, [r13, #-4]!") \ + TEST_COPROCESSOR("stc"two" p0, cr0, [r13], #4") \ + TEST_COPROCESSOR("stc"two" p0, cr0, [r13], #-4") \ + TEST_COPROCESSOR("stc"two" p0, cr0, [r13], {1}") \ + TEST_COPROCESSOR("stc"two"l p0, cr0, [r13, #4]") \ + TEST_COPROCESSOR("stc"two"l p0, cr0, [r13, #-4]") \ + TEST_COPROCESSOR("stc"two"l p0, cr0, [r13, #4]!") \ + TEST_COPROCESSOR("stc"two"l p0, cr0, [r13, #-4]!") \ + TEST_COPROCESSOR("stc"two"l p0, cr0, [r13], #4") \ + TEST_COPROCESSOR("stc"two"l p0, cr0, [r13], #-4") \ + TEST_COPROCESSOR("stc"two"l p0, cr0, [r13], {1}") \ + TEST_COPROCESSOR("ldc"two" p0, cr0, [r13, #4]") \ + TEST_COPROCESSOR("ldc"two" p0, cr0, [r13, #-4]") \ + TEST_COPROCESSOR("ldc"two" p0, cr0, [r13, #4]!") \ + TEST_COPROCESSOR("ldc"two" p0, cr0, [r13, #-4]!") \ + TEST_COPROCESSOR("ldc"two" p0, cr0, [r13], #4") \ + TEST_COPROCESSOR("ldc"two" p0, cr0, [r13], #-4") \ + TEST_COPROCESSOR("ldc"two" p0, cr0, [r13], {1}") \ + TEST_COPROCESSOR("ldc"two"l p0, cr0, [r13, #4]") \ + TEST_COPROCESSOR("ldc"two"l p0, cr0, [r13, #-4]") \ + TEST_COPROCESSOR("ldc"two"l p0, cr0, [r13, #4]!") \ + TEST_COPROCESSOR("ldc"two"l p0, cr0, [r13, #-4]!") \ + TEST_COPROCESSOR("ldc"two"l p0, cr0, [r13], #4") \ + TEST_COPROCESSOR("ldc"two"l p0, cr0, [r13], #-4") \ + TEST_COPROCESSOR("ldc"two"l p0, cr0, [r13], {1}") \ \ - TEST_COPROCESSOR( "stc"two" 0, cr0, [r15, #4]") \ - TEST_COPROCESSOR( "stc"two" 0, cr0, [r15, #-4]") \ + TEST_COPROCESSOR( "stc"two" p0, cr0, [r15, #4]") \ + TEST_COPROCESSOR( "stc"two" p0, cr0, [r15, #-4]") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##daf0001) " @ stc"two" 0, cr0, [r15, #4]!") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##d2f0001) " @ stc"two" 0, cr0, [r15, #-4]!") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##caf0001) " @ stc"two" 0, cr0, [r15], #4") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##c2f0001) " @ stc"two" 0, cr0, [r15], #-4") \ - TEST_COPROCESSOR( "stc"two" 0, cr0, [r15], {1}") \ - TEST_COPROCESSOR( "stc"two"l 0, cr0, [r15, #4]") \ - TEST_COPROCESSOR( "stc"two"l 0, cr0, [r15, #-4]") \ + TEST_COPROCESSOR( "stc"two" p0, cr0, [r15], {1}") \ + TEST_COPROCESSOR( "stc"two"l p0, cr0, [r15, #4]") \ + TEST_COPROCESSOR( "stc"two"l p0, cr0, [r15, #-4]") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##def0001) " @ stc"two"l 0, cr0, [r15, #4]!") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##d6f0001) " @ stc"two"l 0, cr0, [r15, #-4]!") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##cef0001) " @ stc"two"l 0, cr0, [r15], #4") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##c6f0001) " @ stc"two"l 0, cr0, [r15], #-4") \ - TEST_COPROCESSOR( "stc"two"l 0, cr0, [r15], {1}") \ - TEST_COPROCESSOR( "ldc"two" 0, cr0, [r15, #4]") \ - TEST_COPROCESSOR( "ldc"two" 0, cr0, [r15, #-4]") \ + TEST_COPROCESSOR( "stc"two"l p0, cr0, [r15], {1}") \ + TEST_COPROCESSOR( "ldc"two" p0, cr0, [r15, #4]") \ + TEST_COPROCESSOR( "ldc"two" p0, cr0, [r15, #-4]") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##dbf0001) " @ ldc"two" 0, cr0, [r15, #4]!") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##d3f0001) " @ ldc"two" 0, cr0, [r15, #-4]!") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##cbf0001) " @ ldc"two" 0, cr0, [r15], #4") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##c3f0001) " @ ldc"two" 0, cr0, [r15], #-4") \ - TEST_COPROCESSOR( "ldc"two" 0, cr0, [r15], {1}") \ - TEST_COPROCESSOR( "ldc"two"l 0, cr0, [r15, #4]") \ - TEST_COPROCESSOR( "ldc"two"l 0, cr0, [r15, #-4]") \ + TEST_COPROCESSOR( "ldc"two" p0, cr0, [r15], {1}") \ + TEST_COPROCESSOR( "ldc"two"l p0, cr0, [r15, #4]") \ + TEST_COPROCESSOR( "ldc"two"l p0, cr0, [r15, #-4]") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##dff0001) " @ ldc"two"l 0, cr0, [r15, #4]!") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##d7f0001) " @ ldc"two"l 0, cr0, [r15, #-4]!") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##cff0001) " @ ldc"two"l 0, cr0, [r15], #4") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##c7f0001) " @ ldc"two"l 0, cr0, [r15], #-4") \ - TEST_COPROCESSOR( "ldc"two"l 0, cr0, [r15], {1}") + TEST_COPROCESSOR( "ldc"two"l p0, cr0, [r15], {1}") #define COPROCESSOR_INSTRUCTIONS_MC_MR(two,cc) \ \ - TEST_COPROCESSOR( "mcrr"two" 0, 15, r0, r14, cr0") \ - TEST_COPROCESSOR( "mcrr"two" 15, 0, r14, r0, cr15") \ + TEST_COPROCESSOR( "mcrr"two" p0, 15, r0, r14, cr0") \ + TEST_COPROCESSOR( "mcrr"two" p15, 0, r14, r0, cr15") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##c4f00f0) " @ mcrr"two" 0, 15, r0, r15, cr0") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##c40ff0f) " @ mcrr"two" 15, 0, r15, r0, cr15") \ - TEST_COPROCESSOR( "mrrc"two" 0, 15, r0, r14, cr0") \ - TEST_COPROCESSOR( "mrrc"two" 15, 0, r14, r0, cr15") \ + TEST_COPROCESSOR( "mrrc"two" p0, 15, r0, r14, cr0") \ + TEST_COPROCESSOR( "mrrc"two" p15, 0, r14, r0, cr15") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##c5f00f0) " @ mrrc"two" 0, 15, r0, r15, cr0") \ TEST_UNSUPPORTED(__inst_arm(0x##cc##c50ff0f) " @ mrrc"two" 15, 0, r15, r0, cr15") \ - TEST_COPROCESSOR( "cdp"two" 15, 15, cr15, cr15, cr15, 7") \ - TEST_COPROCESSOR( "cdp"two" 0, 0, cr0, cr0, cr0, 0") \ - TEST_COPROCESSOR( "mcr"two" 15, 7, r15, cr15, cr15, 7") \ - TEST_COPROCESSOR( "mcr"two" 0, 0, r0, cr0, cr0, 0") \ - TEST_COPROCESSOR( "mrc"two" 15, 7, r15, cr15, cr15, 7") \ - TEST_COPROCESSOR( "mrc"two" 0, 0, r0, cr0, cr0, 0") + TEST_COPROCESSOR( "cdp"two" p15, 15, cr15, cr15, cr15, 7") \ + TEST_COPROCESSOR( "cdp"two" p0, 0, cr0, cr0, cr0, 0") \ + TEST_COPROCESSOR( "mcr"two" p15, 7, r15, cr15, cr15, 7") \ + TEST_COPROCESSOR( "mcr"two" p0, 0, r0, cr0, cr0, 0") \ + TEST_COPROCESSOR( "mrc"two" p15, 7, r14, cr15, cr15, 7") \ + TEST_COPROCESSOR( "mrc"two" p0, 0, r0, cr0, cr0, 0") COPROCESSOR_INSTRUCTIONS_ST_LD("",e) #if __LINUX_ARM_ARCH__ >= 5 diff --git a/arch/arm/probes/kprobes/test-core.h b/arch/arm/probes/kprobes/test-core.h index 19a5b2add41e..f1d5583e7bbb 100644 --- a/arch/arm/probes/kprobes/test-core.h +++ b/arch/arm/probes/kprobes/test-core.h @@ -108,6 +108,7 @@ struct test_arg_end { #define TESTCASE_START(title) \ __asm__ __volatile__ ( \ + ".syntax unified \n\t" \ "bl __kprobes_test_case_start \n\t" \ ".pushsection .rodata \n\t" \ "10: \n\t" \ diff --git a/arch/arm/tools/Makefile b/arch/arm/tools/Makefile index 3654f979851b..87de1f63f649 100644 --- a/arch/arm/tools/Makefile +++ b/arch/arm/tools/Makefile @@ -8,16 +8,15 @@ gen := arch/$(ARCH)/include/generated kapi := $(gen)/asm uapi := $(gen)/uapi/asm -syshdr := $(srctree)/$(src)/syscallhdr.sh +syshdr := $(srctree)/scripts/syscallhdr.sh sysnr := $(srctree)/$(src)/syscallnr.sh -systbl := $(srctree)/$(src)/syscalltbl.sh +systbl := $(srctree)/scripts/syscalltbl.sh syscall := $(src)/syscall.tbl gen-y := $(gen)/calls-oabi.S gen-y += $(gen)/calls-eabi.S kapi-hdrs-y := $(kapi)/unistd-nr.h kapi-hdrs-y += $(kapi)/mach-types.h -uapi-hdrs-y := $(uapi)/unistd-common.h uapi-hdrs-y += $(uapi)/unistd-oabi.h uapi-hdrs-y += $(uapi)/unistd-eabi.h @@ -41,28 +40,21 @@ $(kapi)/mach-types.h: $(src)/gen-mach-types $(src)/mach-types FORCE $(call if_changed,gen_mach) quiet_cmd_syshdr = SYSHDR $@ - cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \ - '$(syshdr_abi_$(basetarget))' \ - '$(syshdr_pfx_$(basetarget))' \ - '__NR_SYSCALL_BASE' + cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --abis $(abis) \ + --offset __NR_SYSCALL_BASE $< $@ quiet_cmd_systbl = SYSTBL $@ - cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \ - '$(systbl_abi_$(basetarget))' + cmd_systbl = $(CONFIG_SHELL) $(systbl) --abis $(abis) $< $@ quiet_cmd_sysnr = SYSNR $@ cmd_sysnr = $(CONFIG_SHELL) '$(sysnr)' '$<' '$@' \ '$(syshdr_abi_$(basetarget))' -syshdr_abi_unistd-common := common -$(uapi)/unistd-common.h: $(syscall) $(syshdr) FORCE - $(call if_changed,syshdr) - -syshdr_abi_unistd-oabi := oabi +$(uapi)/unistd-oabi.h: abis := common,oabi $(uapi)/unistd-oabi.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) -syshdr_abi_unistd-eabi := eabi +$(uapi)/unistd-eabi.h: abis := common,eabi $(uapi)/unistd-eabi.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) @@ -70,10 +62,10 @@ sysnr_abi_unistd-nr := common,oabi,eabi,compat $(kapi)/unistd-nr.h: $(syscall) $(sysnr) FORCE $(call if_changed,sysnr) -systbl_abi_calls-oabi := common,oabi +$(gen)/calls-oabi.S: abis := common,oabi $(gen)/calls-oabi.S: $(syscall) $(systbl) FORCE $(call if_changed,systbl) -systbl_abi_calls-eabi := common,eabi +$(gen)/calls-eabi.S: abis := common,eabi $(gen)/calls-eabi.S: $(syscall) $(systbl) FORCE $(call if_changed,systbl) diff --git a/arch/arm/tools/syscallhdr.sh b/arch/arm/tools/syscallhdr.sh deleted file mode 100644 index 6b2f25cdd721..000000000000 --- a/arch/arm/tools/syscallhdr.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 - -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` -prefix="$4" -offset="$5" - -fileguard=_ASM_ARM_`basename "$out" | sed \ - -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \ - -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'` -if echo $out | grep -q uapi; then - fileguard="_UAPI$fileguard" -fi -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( - echo "#ifndef ${fileguard}" - echo "#define ${fileguard} 1" - echo "" - - while read nr abi name entry ; do - if [ -z "$offset" ]; then - echo "#define __NR_${prefix}${name} $nr" - else - echo "#define __NR_${prefix}${name} ($offset + $nr)" - fi - done - - echo "" - echo "#endif /* ${fileguard} */" -) > "$out" diff --git a/arch/arm/tools/syscalltbl.sh b/arch/arm/tools/syscalltbl.sh deleted file mode 100644 index ae7e93cfbfd3..000000000000 --- a/arch/arm/tools/syscalltbl.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` - -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( - while read nr abi name entry compat; do - if [ "$abi" = "eabi" -a -n "$compat" ]; then - echo "$in: error: a compat entry for an EABI syscall ($name) makes no sense" >&2 - exit 1 - fi - - if [ -n "$entry" ]; then - if [ -z "$compat" ]; then - echo "NATIVE($nr, $entry)" - else - echo "COMPAT($nr, $entry, $compat)" - fi - fi - done -) > "$out" diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c index e1b12b242a32..f8f07469d259 100644 --- a/arch/arm/xen/mm.c +++ b/arch/arm/xen/mm.c @@ -152,7 +152,7 @@ static int __init xen_mm_init(void) struct gnttab_cache_flush cflush; if (!xen_swiotlb_detect()) return 0; - xen_swiotlb_init(1, false); + xen_swiotlb_init(); cflush.op = 0; cflush.a.dev_bus_addr = 0; diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 7f2a80091337..f0b17d758912 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -11,6 +11,12 @@ config ARM64 select ACPI_PPTT if ACPI select ARCH_HAS_DEBUG_WX select ARCH_BINFMT_ELF_STATE + select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION + select ARCH_ENABLE_MEMORY_HOTPLUG + select ARCH_ENABLE_MEMORY_HOTREMOVE + select ARCH_ENABLE_SPLIT_PMD_PTLOCK if PGTABLE_LEVELS > 2 + select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE + select ARCH_HAS_CACHE_LINE_SIZE select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEBUG_VM_PGTABLE select ARCH_HAS_DMA_PREP_COHERENT @@ -72,6 +78,7 @@ config ARM64 select ARCH_USE_QUEUED_SPINLOCKS select ARCH_USE_SYM_ANNOTATIONS select ARCH_SUPPORTS_DEBUG_PAGEALLOC + select ARCH_SUPPORTS_HUGETLBFS select ARCH_SUPPORTS_MEMORY_FAILURE select ARCH_SUPPORTS_SHADOW_CALL_STACK if CC_HAVE_SHADOW_CALL_STACK select ARCH_SUPPORTS_LTO_CLANG if CPU_LITTLE_ENDIAN @@ -213,6 +220,7 @@ config ARM64 select SWIOTLB select SYSCTL_EXCEPTION_TRACE select THREAD_INFO_IN_TASK + select HAVE_ARCH_USERFAULTFD_MINOR if USERFAULTFD help ARM 64-bit (AArch64) Linux support. @@ -308,10 +316,7 @@ config ZONE_DMA32 bool "Support DMA32 zone" if EXPERT default y -config ARCH_ENABLE_MEMORY_HOTPLUG - def_bool y - -config ARCH_ENABLE_MEMORY_HOTREMOVE +config ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE def_bool y config SMP @@ -1070,18 +1075,9 @@ config HW_PERF_EVENTS def_bool y depends on ARM_PMU -config SYS_SUPPORTS_HUGETLBFS - def_bool y - -config ARCH_HAS_CACHE_LINE_SIZE - def_bool y - config ARCH_HAS_FILTER_PGPROT def_bool y -config ARCH_ENABLE_SPLIT_PMD_PTLOCK - def_bool y if PGTABLE_LEVELS > 2 - # Supported by clang >= 7.0 config CC_HAVE_SHADOW_CALL_STACK def_bool $(cc-option, -fsanitize=shadow-call-stack -ffixed-x18) @@ -1923,14 +1919,6 @@ config SYSVIPC_COMPAT def_bool y depends on COMPAT && SYSVIPC -config ARCH_ENABLE_HUGEPAGE_MIGRATION - def_bool y - depends on HUGETLB_PAGE && MIGRATION - -config ARCH_ENABLE_THP_MIGRATION - def_bool y - depends on TRANSPARENT_HUGEPAGE - menu "Power management options" source "kernel/power/Kconfig" diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi index 242f821a90ba..dfc6376171d0 100644 --- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi @@ -558,7 +558,6 @@ pinctrl-names = "default"; pinctrl-0 = <&pwm0_pin>; clocks = <&cru PCLK_PWM1>; - clock-names = "pwm"; status = "disabled"; }; @@ -569,7 +568,6 @@ pinctrl-names = "default"; pinctrl-0 = <&pwm1_pin>; clocks = <&cru PCLK_PWM1>; - clock-names = "pwm"; status = "disabled"; }; @@ -578,7 +576,6 @@ reg = <0x0 0xff680020 0x0 0x10>; #pwm-cells = <3>; clocks = <&cru PCLK_PWM1>; - clock-names = "pwm"; status = "disabled"; }; @@ -589,7 +586,6 @@ pinctrl-names = "default"; pinctrl-0 = <&pwm3_pin>; clocks = <&cru PCLK_PWM1>; - clock-names = "pwm"; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi index 0f2879cc1a66..634a91af8e83 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi @@ -1182,7 +1182,6 @@ pinctrl-names = "default"; pinctrl-0 = <&pwm0_pin>; clocks = <&pmucru PCLK_RKPWM_PMU>; - clock-names = "pwm"; status = "disabled"; }; @@ -1193,7 +1192,6 @@ pinctrl-names = "default"; pinctrl-0 = <&pwm1_pin>; clocks = <&pmucru PCLK_RKPWM_PMU>; - clock-names = "pwm"; status = "disabled"; }; @@ -1204,7 +1202,6 @@ pinctrl-names = "default"; pinctrl-0 = <&pwm2_pin>; clocks = <&pmucru PCLK_RKPWM_PMU>; - clock-names = "pwm"; status = "disabled"; }; @@ -1215,7 +1212,6 @@ pinctrl-names = "default"; pinctrl-0 = <&pwm3a_pin>; clocks = <&pmucru PCLK_RKPWM_PMU>; - clock-names = "pwm"; status = "disabled"; }; diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index 55ecf6de9ff7..58987a98e179 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -252,7 +252,7 @@ void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, set_pte(ptep, pte); } -pte_t *huge_pte_alloc(struct mm_struct *mm, +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { pgd_t *pgdp; @@ -284,9 +284,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, */ ptep = pte_alloc_map(mm, pmdp, addr); } else if (sz == PMD_SIZE) { - if (IS_ENABLED(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) && - pud_none(READ_ONCE(*pudp))) - ptep = huge_pmd_share(mm, addr, pudp); + if (want_pmd_share(vma, addr) && pud_none(READ_ONCE(*pudp))) + ptep = huge_pmd_share(mm, vma, addr, pudp); else ptep = (pte_t *)pmd_alloc(mm, pudp, addr); } else if (sz == (CONT_PMD_SIZE)) { diff --git a/arch/hexagon/Makefile b/arch/hexagon/Makefile index c168c6980d05..74b644ea8a00 100644 --- a/arch/hexagon/Makefile +++ b/arch/hexagon/Makefile @@ -10,6 +10,9 @@ LDFLAGS_vmlinux += -G0 # Do not use single-byte enums; these will overflow. KBUILD_CFLAGS += -fno-short-enums +# We must use long-calls: +KBUILD_CFLAGS += -mlong-calls + # Modules must use either long-calls, or use pic/plt. # Use long-calls for now, it's easier. And faster. # KBUILD_CFLAGS_MODULE += -fPIC @@ -30,9 +33,6 @@ TIR_NAME := r19 KBUILD_CFLAGS += -ffixed-$(TIR_NAME) -DTHREADINFO_REG=$(TIR_NAME) -D__linux__ KBUILD_AFLAGS += -DTHREADINFO_REG=$(TIR_NAME) -LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name 2>/dev/null) -libs-y += $(LIBGCC) - head-y := arch/hexagon/kernel/head.o core-y += arch/hexagon/kernel/ \ diff --git a/arch/hexagon/configs/comet_defconfig b/arch/hexagon/configs/comet_defconfig index f19ae2ab0aaa..f70daa038e85 100644 --- a/arch/hexagon/configs/comet_defconfig +++ b/arch/hexagon/configs/comet_defconfig @@ -81,4 +81,3 @@ CONFIG_FRAME_WARN=0 CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_FS=y # CONFIG_SCHED_DEBUG is not set -CONFIG_DEBUG_INFO=y diff --git a/arch/hexagon/include/asm/futex.h b/arch/hexagon/include/asm/futex.h index 6b9c554aee78..9fb00a0ae89f 100644 --- a/arch/hexagon/include/asm/futex.h +++ b/arch/hexagon/include/asm/futex.h @@ -21,7 +21,7 @@ "3:\n" \ ".section .fixup,\"ax\"\n" \ "4: %1 = #%5;\n" \ - " jump 3b\n" \ + " jump ##3b\n" \ ".previous\n" \ ".section __ex_table,\"a\"\n" \ ".long 1b,4b,2b,4b\n" \ @@ -90,7 +90,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, "3:\n" ".section .fixup,\"ax\"\n" "4: %0 = #%6\n" - " jump 3b\n" + " jump ##3b\n" ".previous\n" ".section __ex_table,\"a\"\n" ".long 1b,4b,2b,4b\n" diff --git a/arch/hexagon/include/asm/timex.h b/arch/hexagon/include/asm/timex.h index 78338d8ada83..8d4ec76fceb4 100644 --- a/arch/hexagon/include/asm/timex.h +++ b/arch/hexagon/include/asm/timex.h @@ -8,6 +8,7 @@ #include <asm-generic/timex.h> #include <asm/timer-regs.h> +#include <asm/hexagon_vm.h> /* Using TCX0 as our clock. CLOCK_TICK_RATE scheduled to be removed. */ #define CLOCK_TICK_RATE TCX0_CLK_RATE @@ -16,7 +17,7 @@ static inline int read_current_timer(unsigned long *timer_val) { - *timer_val = (unsigned long) __vmgettime(); + *timer_val = __vmgettime(); return 0; } diff --git a/arch/hexagon/kernel/hexagon_ksyms.c b/arch/hexagon/kernel/hexagon_ksyms.c index 6fb1aaab1c29..35545a7386a0 100644 --- a/arch/hexagon/kernel/hexagon_ksyms.c +++ b/arch/hexagon/kernel/hexagon_ksyms.c @@ -35,8 +35,8 @@ EXPORT_SYMBOL(_dflt_cache_att); DECLARE_EXPORT(__hexagon_memcpy_likely_aligned_min32bytes_mult8bytes); /* Additional functions */ -DECLARE_EXPORT(__divsi3); -DECLARE_EXPORT(__modsi3); -DECLARE_EXPORT(__udivsi3); -DECLARE_EXPORT(__umodsi3); +DECLARE_EXPORT(__hexagon_divsi3); +DECLARE_EXPORT(__hexagon_modsi3); +DECLARE_EXPORT(__hexagon_udivsi3); +DECLARE_EXPORT(__hexagon_umodsi3); DECLARE_EXPORT(csum_tcpudp_magic); diff --git a/arch/hexagon/kernel/ptrace.c b/arch/hexagon/kernel/ptrace.c index a5a89e944257..8975f9b4cedf 100644 --- a/arch/hexagon/kernel/ptrace.c +++ b/arch/hexagon/kernel/ptrace.c @@ -35,7 +35,7 @@ void user_disable_single_step(struct task_struct *child) static int genregs_get(struct task_struct *target, const struct user_regset *regset, - srtuct membuf to) + struct membuf to) { struct pt_regs *regs = task_pt_regs(target); @@ -54,7 +54,7 @@ static int genregs_get(struct task_struct *target, membuf_store(&to, regs->m0); membuf_store(&to, regs->m1); membuf_store(&to, regs->usr); - membuf_store(&to, regs->p3_0); + membuf_store(&to, regs->preds); membuf_store(&to, regs->gp); membuf_store(&to, regs->ugp); membuf_store(&to, pt_elr(regs)); // pc diff --git a/arch/hexagon/lib/Makefile b/arch/hexagon/lib/Makefile index 54be529d17a2..a64641e89d5f 100644 --- a/arch/hexagon/lib/Makefile +++ b/arch/hexagon/lib/Makefile @@ -2,4 +2,5 @@ # # Makefile for hexagon-specific library files. # -obj-y = checksum.o io.o memcpy.o memset.o +obj-y = checksum.o io.o memcpy.o memset.o memcpy_likely_aligned.o \ + divsi3.o modsi3.o udivsi3.o umodsi3.o diff --git a/arch/hexagon/lib/divsi3.S b/arch/hexagon/lib/divsi3.S new file mode 100644 index 000000000000..783e09424c2c --- /dev/null +++ b/arch/hexagon/lib/divsi3.S @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#include <linux/linkage.h> + +SYM_FUNC_START(__hexagon_divsi3) + { + p0 = cmp.gt(r0,#-1) + p1 = cmp.gt(r1,#-1) + r3:2 = vabsw(r1:0) + } + { + p3 = xor(p0,p1) + r4 = sub(r2,r3) + r6 = cl0(r2) + p0 = cmp.gtu(r3,r2) + } + { + r0 = mux(p3,#-1,#1) + r7 = cl0(r3) + p1 = cmp.gtu(r3,r4) + } + { + r0 = mux(p0,#0,r0) + p0 = or(p0,p1) + if (p0.new) jumpr:nt r31 + r6 = sub(r7,r6) + } + { + r7 = r6 + r5:4 = combine(#1,r3) + r6 = add(#1,lsr(r6,#1)) + p0 = cmp.gtu(r6,#4) + } + { + r5:4 = vaslw(r5:4,r7) + if (!p0) r6 = #3 + } + { + loop0(1f,r6) + r7:6 = vlsrw(r5:4,#1) + r1:0 = #0 + } + .falign +1: + { + r5:4 = vlsrw(r5:4,#2) + if (!p0.new) r0 = add(r0,r5) + if (!p0.new) r2 = sub(r2,r4) + p0 = cmp.gtu(r4,r2) + } + { + r7:6 = vlsrw(r7:6,#2) + if (!p0.new) r0 = add(r0,r7) + if (!p0.new) r2 = sub(r2,r6) + p0 = cmp.gtu(r6,r2) + }:endloop0 + { + if (!p0) r0 = add(r0,r7) + } + { + if (p3) r0 = sub(r1,r0) + jumpr r31 + } +SYM_FUNC_END(__hexagon_divsi3) diff --git a/arch/hexagon/lib/memcpy_likely_aligned.S b/arch/hexagon/lib/memcpy_likely_aligned.S new file mode 100644 index 000000000000..6a541fb90a54 --- /dev/null +++ b/arch/hexagon/lib/memcpy_likely_aligned.S @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#include <linux/linkage.h> + +SYM_FUNC_START(__hexagon_memcpy_likely_aligned_min32bytes_mult8bytes) + { + p0 = bitsclr(r1,#7) + p0 = bitsclr(r0,#7) + if (p0.new) r5:4 = memd(r1) + if (p0.new) r7:6 = memd(r1+#8) + } + { + if (!p0) jump:nt .Lmemcpy_call + if (p0) r9:8 = memd(r1+#16) + if (p0) r11:10 = memd(r1+#24) + p0 = cmp.gtu(r2,#64) + } + { + if (p0) jump:nt .Lmemcpy_call + if (!p0) memd(r0) = r5:4 + if (!p0) memd(r0+#8) = r7:6 + p0 = cmp.gtu(r2,#32) + } + { + p1 = cmp.gtu(r2,#40) + p2 = cmp.gtu(r2,#48) + if (p0) r13:12 = memd(r1+#32) + if (p1.new) r15:14 = memd(r1+#40) + } + { + memd(r0+#16) = r9:8 + memd(r0+#24) = r11:10 + } + { + if (p0) memd(r0+#32) = r13:12 + if (p1) memd(r0+#40) = r15:14 + if (!p2) jumpr:t r31 + } + { + p0 = cmp.gtu(r2,#56) + r5:4 = memd(r1+#48) + if (p0.new) r7:6 = memd(r1+#56) + } + { + memd(r0+#48) = r5:4 + if (p0) memd(r0+#56) = r7:6 + jumpr r31 + } + +.Lmemcpy_call: + jump memcpy + +SYM_FUNC_END(__hexagon_memcpy_likely_aligned_min32bytes_mult8bytes) diff --git a/arch/hexagon/lib/modsi3.S b/arch/hexagon/lib/modsi3.S new file mode 100644 index 000000000000..9ea1c86efac2 --- /dev/null +++ b/arch/hexagon/lib/modsi3.S @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#include <linux/linkage.h> + +SYM_FUNC_START(__hexagon_modsi3) + { + p2 = cmp.ge(r0,#0) + r2 = abs(r0) + r1 = abs(r1) + } + { + r3 = cl0(r2) + r4 = cl0(r1) + p0 = cmp.gtu(r1,r2) + } + { + r3 = sub(r4,r3) + if (p0) jumpr r31 + } + { + p1 = cmp.eq(r3,#0) + loop0(1f,r3) + r0 = r2 + r2 = lsl(r1,r3) + } + .falign +1: + { + p0 = cmp.gtu(r2,r0) + if (!p0.new) r0 = sub(r0,r2) + r2 = lsr(r2,#1) + if (p1) r1 = #0 + }:endloop0 + { + p0 = cmp.gtu(r2,r0) + if (!p0.new) r0 = sub(r0,r1) + if (p2) jumpr r31 + } + { + r0 = neg(r0) + jumpr r31 + } +SYM_FUNC_END(__hexagon_modsi3) diff --git a/arch/hexagon/lib/udivsi3.S b/arch/hexagon/lib/udivsi3.S new file mode 100644 index 000000000000..477f27b9311c --- /dev/null +++ b/arch/hexagon/lib/udivsi3.S @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#include <linux/linkage.h> + +SYM_FUNC_START(__hexagon_udivsi3) + { + r2 = cl0(r0) + r3 = cl0(r1) + r5:4 = combine(#1,#0) + p0 = cmp.gtu(r1,r0) + } + { + r6 = sub(r3,r2) + r4 = r1 + r1:0 = combine(r0,r4) + if (p0) jumpr r31 + } + { + r3:2 = vlslw(r5:4,r6) + loop0(1f,r6) + } + .falign +1: + { + p0 = cmp.gtu(r2,r1) + if (!p0.new) r1 = sub(r1,r2) + if (!p0.new) r0 = add(r0,r3) + r3:2 = vlsrw(r3:2,#1) + }:endloop0 + { + p0 = cmp.gtu(r2,r1) + if (!p0.new) r0 = add(r0,r3) + jumpr r31 + } +SYM_FUNC_END(__hexagon_udivsi3) diff --git a/arch/hexagon/lib/umodsi3.S b/arch/hexagon/lib/umodsi3.S new file mode 100644 index 000000000000..280bf06a55e7 --- /dev/null +++ b/arch/hexagon/lib/umodsi3.S @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#include <linux/linkage.h> + +SYM_FUNC_START(__hexagon_umodsi3) + { + r2 = cl0(r0) + r3 = cl0(r1) + p0 = cmp.gtu(r1,r0) + } + { + r2 = sub(r3,r2) + if (p0) jumpr r31 + } + { + loop0(1f,r2) + p1 = cmp.eq(r2,#0) + r2 = lsl(r1,r2) + } + .falign +1: + { + p0 = cmp.gtu(r2,r0) + if (!p0.new) r0 = sub(r0,r2) + r2 = lsr(r2,#1) + if (p1) r1 = #0 + }:endloop0 + { + p0 = cmp.gtu(r2,r0) + if (!p0.new) r0 = sub(r0,r1) + jumpr r31 + } +SYM_FUNC_END(__hexagon_umodsi3) diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 81e2b893b1e7..279252e3e0f7 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -13,6 +13,8 @@ config IA64 select ARCH_MIGHT_HAVE_PC_SERIO select ACPI select ACPI_NUMA if NUMA + select ARCH_ENABLE_MEMORY_HOTPLUG + select ARCH_ENABLE_MEMORY_HOTREMOVE select ARCH_SUPPORTS_ACPI select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI @@ -32,6 +34,7 @@ config IA64 select TTY select HAVE_ARCH_TRACEHOOK select HAVE_VIRT_CPU_ACCOUNTING + select HUGETLB_PAGE_SIZE_VARIABLE if HUGETLB_PAGE select VIRT_TO_BUS select GENERIC_IRQ_PROBE select GENERIC_PENDING_IRQ if SMP @@ -82,11 +85,6 @@ config STACKTRACE_SUPPORT config GENERIC_LOCKBREAK def_bool n -config HUGETLB_PAGE_SIZE_VARIABLE - bool - depends on HUGETLB_PAGE - default y - config GENERIC_CALIBRATE_DELAY bool default y @@ -250,12 +248,6 @@ config HOTPLUG_CPU can be controlled through /sys/devices/system/cpu/cpu#. Say N if you want to disable CPU hotplug. -config ARCH_ENABLE_MEMORY_HOTPLUG - def_bool y - -config ARCH_ENABLE_MEMORY_HOTREMOVE - def_bool y - config SCHED_SMT bool "SMT scheduler support" depends on SMP diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c index b331f94d20ac..f993cb36c062 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c @@ -25,7 +25,8 @@ unsigned int hpage_shift = HPAGE_SHIFT_DEFAULT; EXPORT_SYMBOL(hpage_shift); pte_t * -huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) +huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, unsigned long sz) { unsigned long taddr = htlbpage_to_page(addr); pgd_t *pgd; diff --git a/arch/m68k/coldfire/intc-simr.c b/arch/m68k/coldfire/intc-simr.c index 15c4b7a6e38f..f7c2c41b3156 100644 --- a/arch/m68k/coldfire/intc-simr.c +++ b/arch/m68k/coldfire/intc-simr.c @@ -68,9 +68,9 @@ static void intc_irq_mask(struct irq_data *d) { unsigned int irq = d->irq - MCFINT_VECBASE; - if (MCFINTC2_SIMR && (irq > 128)) + if (MCFINTC2_SIMR && (irq > 127)) __raw_writeb(irq - 128, MCFINTC2_SIMR); - else if (MCFINTC1_SIMR && (irq > 64)) + else if (MCFINTC1_SIMR && (irq > 63)) __raw_writeb(irq - 64, MCFINTC1_SIMR); else __raw_writeb(irq, MCFINTC0_SIMR); @@ -80,9 +80,9 @@ static void intc_irq_unmask(struct irq_data *d) { unsigned int irq = d->irq - MCFINT_VECBASE; - if (MCFINTC2_CIMR && (irq > 128)) + if (MCFINTC2_CIMR && (irq > 127)) __raw_writeb(irq - 128, MCFINTC2_CIMR); - else if (MCFINTC1_CIMR && (irq > 64)) + else if (MCFINTC1_CIMR && (irq > 63)) __raw_writeb(irq - 64, MCFINTC1_CIMR); else __raw_writeb(irq, MCFINTC0_CIMR); @@ -115,9 +115,9 @@ static unsigned int intc_irq_startup(struct irq_data *d) } irq -= MCFINT_VECBASE; - if (MCFINTC2_ICR0 && (irq > 128)) + if (MCFINTC2_ICR0 && (irq > 127)) __raw_writeb(5, MCFINTC2_ICR0 + irq - 128); - else if (MCFINTC1_ICR0 && (irq > 64)) + else if (MCFINTC1_ICR0 && (irq > 63)) __raw_writeb(5, MCFINTC1_ICR0 + irq - 64); else __raw_writeb(5, MCFINTC0_ICR0 + irq); diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 49a3c9cd1cb2..ed51970c08e7 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -19,6 +19,7 @@ config MIPS select ARCH_USE_MEMTEST select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_SPINLOCKS + select ARCH_SUPPORTS_HUGETLBFS if CPU_SUPPORTS_HUGEPAGES select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU select ARCH_WANT_IPC_PARSE_VERSION select ARCH_WANT_LD_ORPHAN_WARN @@ -1287,11 +1288,6 @@ config SYS_SUPPORTS_BIG_ENDIAN config SYS_SUPPORTS_LITTLE_ENDIAN bool -config SYS_SUPPORTS_HUGETLBFS - bool - depends on CPU_SUPPORTS_HUGEPAGES - default y - config MIPS_HUGE_TLB_SUPPORT def_bool HUGETLB_PAGE || TRANSPARENT_HUGEPAGE diff --git a/arch/mips/mm/hugetlbpage.c b/arch/mips/mm/hugetlbpage.c index b9f76f433617..7eaff5b07873 100644 --- a/arch/mips/mm/hugetlbpage.c +++ b/arch/mips/mm/hugetlbpage.c @@ -21,8 +21,8 @@ #include <asm/tlb.h> #include <asm/tlbflush.h> -pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, - unsigned long sz) +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, unsigned long sz) { pgd_t *pgd; p4d_t *p4d; diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index afc3b8d03572..bde9907bc5b2 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -12,6 +12,7 @@ config PARISC select ARCH_HAS_STRICT_KERNEL_RWX select ARCH_HAS_UBSAN_SANITIZE_ALL select ARCH_NO_SG_CHAIN + select ARCH_SUPPORTS_HUGETLBFS if PA20 select ARCH_SUPPORTS_MEMORY_FAILURE select DMA_OPS select RTC_CLASS @@ -138,10 +139,6 @@ config PGTABLE_LEVELS default 3 if 64BIT && PARISC_PAGE_SIZE_4KB default 2 -config SYS_SUPPORTS_HUGETLBFS - def_bool y if PA20 - - menu "Processor type and features" choice diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c index 43652de5f139..d1d3990b83f6 100644 --- a/arch/parisc/mm/hugetlbpage.c +++ b/arch/parisc/mm/hugetlbpage.c @@ -44,7 +44,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, } -pte_t *huge_pte_alloc(struct mm_struct *mm, +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { pgd_t *pgd; diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 1e6230bea09d..cb2d44ee4e38 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -118,6 +118,8 @@ config PPC # Please keep this list sorted alphabetically. # select ARCH_32BIT_OFF_T if PPC32 + select ARCH_ENABLE_MEMORY_HOTPLUG + select ARCH_ENABLE_MEMORY_HOTREMOVE select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEBUG_VM_PGTABLE select ARCH_HAS_DEVMEM_IS_ALLOWED @@ -236,6 +238,7 @@ config PPC select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP + select HUGETLB_PAGE_SIZE_VARIABLE if PPC_BOOK3S_64 && HUGETLB_PAGE select MMU_GATHER_RCU_TABLE_FREE select MMU_GATHER_PAGE_SIZE select HAVE_REGS_AND_STACK_ACCESS_API @@ -420,11 +423,6 @@ config HIGHMEM source "kernel/Kconfig.hz" -config HUGETLB_PAGE_SIZE_VARIABLE - bool - depends on HUGETLB_PAGE && PPC_BOOK3S_64 - default y - config MATH_EMULATION bool "Math emulation" depends on 4xx || PPC_8xx || PPC_MPC832x || BOOKE @@ -520,12 +518,6 @@ config ARCH_CPU_PROBE_RELEASE def_bool y depends on HOTPLUG_CPU -config ARCH_ENABLE_MEMORY_HOTPLUG - def_bool y - -config ARCH_ENABLE_MEMORY_HOTREMOVE - def_bool y - config PPC64_SUPPORTS_MEMORY_FAILURE bool "Add support for memory hwpoison" depends on PPC_BOOK3S_64 @@ -705,9 +697,6 @@ config ARCH_SPARSEMEM_DEFAULT def_bool y depends on PPC_BOOK3S_64 -config SYS_SUPPORTS_HUGETLBFS - bool - config ILLEGAL_POINTER_VALUE hex # This is roughly half way between the top of user space and the bottom diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index d142b76d507d..9a75ba078e1b 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -106,7 +106,8 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, * At this point we do the placement change only for BOOK3S 64. This would * possibly work on other subarchs. */ -pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, unsigned long sz) { pgd_t *pg; p4d_t *p4; diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index e4b05667686e..f998e655b570 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -40,8 +40,8 @@ config PPC_85xx config PPC_8xx bool "Freescale 8xx" + select ARCH_SUPPORTS_HUGETLBFS select FSL_SOC - select SYS_SUPPORTS_HUGETLBFS select PPC_HAVE_KUEP select PPC_HAVE_KUAP select HAVE_ARCH_VMAP_STACK @@ -95,9 +95,11 @@ config PPC_BOOK3S_64 bool "Server processors" select PPC_FPU select PPC_HAVE_PMU_SUPPORT - select SYS_SUPPORTS_HUGETLBFS select HAVE_ARCH_TRANSPARENT_HUGEPAGE + select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION + select ARCH_ENABLE_PMD_SPLIT_PTLOCK select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE + select ARCH_SUPPORTS_HUGETLBFS select ARCH_SUPPORTS_NUMA_BALANCING select IRQ_WORK select PPC_MM_SLICES @@ -280,9 +282,9 @@ config FSL_BOOKE # this is for common code between PPC32 & PPC64 FSL BOOKE config PPC_FSL_BOOK3E bool + select ARCH_SUPPORTS_HUGETLBFS if PHYS_64BIT || PPC64 select FSL_EMB_PERFMON select PPC_SMP_MUXED_IPI - select SYS_SUPPORTS_HUGETLBFS if PHYS_64BIT || PPC64 select PPC_DOORBELL default y if FSL_BOOKE @@ -358,10 +360,6 @@ config SPE If in doubt, say Y here. -config ARCH_ENABLE_SPLIT_PMD_PTLOCK - def_bool y - depends on PPC_BOOK3S_64 - config PPC_RADIX_MMU bool "Radix MMU Support" depends on PPC_BOOK3S_64 @@ -421,10 +419,6 @@ config PPC_PKEY depends on PPC_BOOK3S_64 depends on PPC_MEM_KEYS || PPC_KUAP || PPC_KUEP -config ARCH_ENABLE_HUGEPAGE_MIGRATION - def_bool y - depends on PPC_BOOK3S_64 && HUGETLB_PAGE && MIGRATION - config PPC_MMU_NOHASH def_bool y diff --git a/arch/powerpc/platforms/pseries/svm.c b/arch/powerpc/platforms/pseries/svm.c index 7b739cc7a8a9..1d829e257996 100644 --- a/arch/powerpc/platforms/pseries/svm.c +++ b/arch/powerpc/platforms/pseries/svm.c @@ -55,9 +55,9 @@ void __init svm_swiotlb_init(void) if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, false)) return; - if (io_tlb_start) - memblock_free_early(io_tlb_start, - PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); + + memblock_free_early(__pa(vstart), + PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); panic("SVM: Cannot allocate SWIOTLB buffer"); } diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 4515a10c5d22..a8ad8eb76120 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -20,6 +20,7 @@ config RISCV select ARCH_HAS_DEBUG_VM_PGTABLE select ARCH_HAS_DEBUG_VIRTUAL if MMU select ARCH_HAS_DEBUG_WX + select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_GIGANTIC_PAGE select ARCH_HAS_KCOV @@ -27,18 +28,23 @@ config RISCV select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_SET_DIRECT_MAP select ARCH_HAS_SET_MEMORY - select ARCH_HAS_STRICT_KERNEL_RWX if MMU + select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL + select ARCH_HAS_STRICT_MODULE_RWX if MMU && !XIP_KERNEL + select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT + select ARCH_SUPPORTS_HUGETLBFS if MMU select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU select ARCH_WANT_FRAME_POINTERS select ARCH_WANT_HUGE_PMD_SHARE if 64BIT + select BINFMT_FLAT_NO_DATA_START_OFFSET if !MMU select CLONE_BACKWARDS select CLINT_TIMER if !MMU select COMMON_CLK select EDAC_SUPPORT select GENERIC_ARCH_TOPOLOGY if SMP select GENERIC_ATOMIC64 if !64BIT + select GENERIC_CLOCKEVENTS_BROADCAST if SMP select GENERIC_EARLY_IOREMAP select GENERIC_GETTIMEOFDAY if HAVE_GENERIC_VDSO select GENERIC_IOREMAP @@ -165,10 +171,6 @@ config ARCH_WANT_GENERAL_HUGETLB config ARCH_SUPPORTS_UPROBES def_bool y -config SYS_SUPPORTS_HUGETLBFS - depends on MMU - def_bool y - config STACKTRACE_SUPPORT def_bool y @@ -204,6 +206,7 @@ config LOCKDEP_SUPPORT def_bool y source "arch/riscv/Kconfig.socs" +source "arch/riscv/Kconfig.erratas" menu "Platform type" @@ -227,7 +230,7 @@ config ARCH_RV64I bool "RV64I" select 64BIT select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && GCC_VERSION >= 50000 - select HAVE_DYNAMIC_FTRACE if MMU + select HAVE_DYNAMIC_FTRACE if MMU && $(cc-option,-fpatchable-function-entry=8) select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FUNCTION_GRAPH_TRACER @@ -386,6 +389,31 @@ config RISCV_SBI_V01 help This config allows kernel to use SBI v0.1 APIs. This will be deprecated in future once legacy M-mode software are no longer in use. + +config KEXEC + bool "Kexec system call" + select KEXEC_CORE + select HOTPLUG_CPU if SMP + depends on MMU + help + kexec is a system call that implements the ability to shutdown your + current kernel, and to start another kernel. It is like a reboot + but it is independent of the system firmware. And like a reboot + you can start any kernel with it, not just Linux. + + The name comes from the similarity to the exec system call. + +config CRASH_DUMP + bool "Build kdump crash kernel" + help + Generate crash dump after being started by kexec. This should + be normally only set in special crash dump kernels which are + loaded in the main kernel with kexec-tools into a specially + reserved region and then later executed after a crash by + kdump/kexec. + + For more details see Documentation/admin-guide/kdump/kdump.rst + endmenu menu "Boot options" @@ -438,7 +466,7 @@ config EFI_STUB config EFI bool "UEFI runtime support" - depends on OF + depends on OF && !XIP_KERNEL select LIBFDT select UCS2_STRING select EFI_PARAMS_FROM_FDT @@ -462,11 +490,63 @@ config STACKPROTECTOR_PER_TASK def_bool y depends on STACKPROTECTOR && CC_HAVE_STACKPROTECTOR_TLS +config PHYS_RAM_BASE_FIXED + bool "Explicitly specified physical RAM address" + default n + +config PHYS_RAM_BASE + hex "Platform Physical RAM address" + depends on PHYS_RAM_BASE_FIXED + default "0x80000000" + help + This is the physical address of RAM in the system. It has to be + explicitly specified to run early relocations of read-write data + from flash to RAM. + +config XIP_KERNEL + bool "Kernel Execute-In-Place from ROM" + depends on MMU && SPARSEMEM + # This prevents XIP from being enabled by all{yes,mod}config, which + # fail to build since XIP doesn't support large kernels. + depends on !COMPILE_TEST + select PHYS_RAM_BASE_FIXED + help + Execute-In-Place allows the kernel to run from non-volatile storage + directly addressable by the CPU, such as NOR flash. This saves RAM + space since the text section of the kernel is not loaded from flash + to RAM. Read-write sections, such as the data section and stack, + are still copied to RAM. The XIP kernel is not compressed since + it has to run directly from flash, so it will take more space to + store it. The flash address used to link the kernel object files, + and for storing it, is configuration dependent. Therefore, if you + say Y here, you must know the proper physical address where to + store the kernel image depending on your own flash memory usage. + + Also note that the make target becomes "make xipImage" rather than + "make zImage" or "make Image". The final kernel binary to put in + ROM memory will be arch/riscv/boot/xipImage. + + SPARSEMEM is required because the kernel text and rodata that are + flash resident are not backed by memmap, then any attempt to get + a struct page on those regions will trigger a fault. + + If unsure, say N. + +config XIP_PHYS_ADDR + hex "XIP Kernel Physical Location" + depends on XIP_KERNEL + default "0x21000000" + help + This is the physical address in your flash memory the kernel will + be linked for and stored to. This address is dependent on your + own flash usage. + endmenu config BUILTIN_DTB - def_bool n + bool depends on OF + default y if XIP_KERNEL menu "Power management options" diff --git a/arch/riscv/Kconfig.erratas b/arch/riscv/Kconfig.erratas new file mode 100644 index 000000000000..d5d03ae8d685 --- /dev/null +++ b/arch/riscv/Kconfig.erratas @@ -0,0 +1,44 @@ +menu "CPU errata selection" + +config RISCV_ERRATA_ALTERNATIVE + bool "RISC-V alternative scheme" + default y + help + This Kconfig allows the kernel to automatically patch the + errata required by the execution platform at run time. The + code patching is performed once in the boot stages. It means + that the overhead from this mechanism is just taken once. + +config ERRATA_SIFIVE + bool "SiFive errata" + depends on RISCV_ERRATA_ALTERNATIVE + help + All SiFive errata Kconfig depend on this Kconfig. Disabling + this Kconfig will disable all SiFive errata. Please say "Y" + here if your platform uses SiFive CPU cores. + + Otherwise, please say "N" here to avoid unnecessary overhead. + +config ERRATA_SIFIVE_CIP_453 + bool "Apply SiFive errata CIP-453" + depends on ERRATA_SIFIVE + default y + help + This will apply the SiFive CIP-453 errata to add sign extension + to the $badaddr when exception type is instruction page fault + and instruction access fault. + + If you don't know what to do here, say "Y". + +config ERRATA_SIFIVE_CIP_1200 + bool "Apply SiFive errata CIP-1200" + depends on ERRATA_SIFIVE + default y + help + This will apply the SiFive CIP-1200 errata to repalce all + "sfence.vma addr" with "sfence.vma" to ensure that the addr + has been flushed from TLB. + + If you don't know what to do here, say "Y". + +endmenu diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs index e1b2690b6e45..ed963761fbd2 100644 --- a/arch/riscv/Kconfig.socs +++ b/arch/riscv/Kconfig.socs @@ -1,5 +1,12 @@ menu "SoC selection" +config SOC_MICROCHIP_POLARFIRE + bool "Microchip PolarFire SoCs" + select MCHP_CLK_MPFS + select SIFIVE_PLIC + help + This enables support for Microchip PolarFire SoC platforms. + config SOC_SIFIVE bool "SiFive SoCs" select SERIAL_SIFIVE if TTY @@ -7,6 +14,7 @@ config SOC_SIFIVE select CLK_SIFIVE select CLK_SIFIVE_PRCI select SIFIVE_PLIC + select ERRATA_SIFIVE help This enables support for SiFive SoC platform hardware. diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index 1368d943f1f3..3eb9590a0775 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile @@ -82,11 +82,16 @@ CHECKFLAGS += -D__riscv -D__riscv_xlen=$(BITS) # Default target when executing plain make boot := arch/riscv/boot +ifeq ($(CONFIG_XIP_KERNEL),y) +KBUILD_IMAGE := $(boot)/xipImage +else KBUILD_IMAGE := $(boot)/Image.gz +endif head-y := arch/riscv/kernel/head.o core-y += arch/riscv/ +core-$(CONFIG_RISCV_ERRATA_ALTERNATIVE) += arch/riscv/errata/ libs-y += arch/riscv/lib/ libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a @@ -95,12 +100,14 @@ PHONY += vdso_install vdso_install: $(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@ +ifneq ($(CONFIG_XIP_KERNEL),y) ifeq ($(CONFIG_RISCV_M_MODE)$(CONFIG_SOC_CANAAN),yy) KBUILD_IMAGE := $(boot)/loader.bin else KBUILD_IMAGE := $(boot)/Image.gz endif -BOOT_TARGETS := Image Image.gz loader loader.bin +endif +BOOT_TARGETS := Image Image.gz loader loader.bin xipImage all: $(notdir $(KBUILD_IMAGE)) diff --git a/arch/riscv/boot/Makefile b/arch/riscv/boot/Makefile index 03404c84f971..6bf299f70c27 100644 --- a/arch/riscv/boot/Makefile +++ b/arch/riscv/boot/Makefile @@ -17,8 +17,21 @@ KCOV_INSTRUMENT := n OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S +OBJCOPYFLAGS_xipImage :=-O binary -R .note -R .note.gnu.build-id -R .comment -S targets := Image Image.* loader loader.o loader.lds loader.bin +targets := Image Image.* loader loader.o loader.lds loader.bin xipImage + +ifeq ($(CONFIG_XIP_KERNEL),y) + +quiet_cmd_mkxip = $(quiet_cmd_objcopy) +cmd_mkxip = $(cmd_objcopy) + +$(obj)/xipImage: vmlinux FORCE + $(call if_changed,mkxip) + @$(kecho) ' Physical Address of xipImage: $(CONFIG_XIP_PHYS_ADDR)' + +endif $(obj)/Image: vmlinux FORCE $(call if_changed,objcopy) diff --git a/arch/riscv/boot/dts/Makefile b/arch/riscv/boot/dts/Makefile index 7ffd502e3e7b..fe996b88319e 100644 --- a/arch/riscv/boot/dts/Makefile +++ b/arch/riscv/boot/dts/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 subdir-y += sifive subdir-$(CONFIG_SOC_CANAAN_K210_DTB_BUILTIN) += canaan +subdir-y += microchip obj-$(CONFIG_BUILTIN_DTB) := $(addsuffix /, $(subdir-y)) diff --git a/arch/riscv/boot/dts/microchip/Makefile b/arch/riscv/boot/dts/microchip/Makefile new file mode 100644 index 000000000000..622b12771fd3 --- /dev/null +++ b/arch/riscv/boot/dts/microchip/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +dtb-$(CONFIG_SOC_MICROCHIP_POLARFIRE) += microchip-mpfs-icicle-kit.dtb diff --git a/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts b/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts new file mode 100644 index 000000000000..ec79944065c9 --- /dev/null +++ b/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* Copyright (c) 2020 Microchip Technology Inc */ + +/dts-v1/; + +#include "microchip-mpfs.dtsi" + +/* Clock frequency (in Hz) of the rtcclk */ +#define RTCCLK_FREQ 1000000 + +/ { + #address-cells = <2>; + #size-cells = <2>; + model = "Microchip PolarFire-SoC Icicle Kit"; + compatible = "microchip,mpfs-icicle-kit"; + + chosen { + stdout-path = &serial0; + }; + + cpus { + timebase-frequency = <RTCCLK_FREQ>; + }; + + memory@80000000 { + device_type = "memory"; + reg = <0x0 0x80000000 0x0 0x40000000>; + clocks = <&clkcfg 26>; + }; + + soc { + }; +}; + +&serial0 { + status = "okay"; +}; + +&serial1 { + status = "okay"; +}; + +&serial2 { + status = "okay"; +}; + +&serial3 { + status = "okay"; +}; + +&sdcard { + status = "okay"; +}; + +&emac0 { + phy-mode = "sgmii"; + phy-handle = <&phy0>; + phy0: ethernet-phy@8 { + reg = <8>; + ti,fifo-depth = <0x01>; + }; +}; + +&emac1 { + status = "okay"; + phy-mode = "sgmii"; + phy-handle = <&phy1>; + phy1: ethernet-phy@9 { + reg = <9>; + ti,fifo-depth = <0x01>; + }; +}; diff --git a/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi b/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi new file mode 100644 index 000000000000..b9819570a7d1 --- /dev/null +++ b/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi @@ -0,0 +1,329 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* Copyright (c) 2020 Microchip Technology Inc */ + +/dts-v1/; + +/ { + #address-cells = <2>; + #size-cells = <2>; + model = "Microchip MPFS Icicle Kit"; + compatible = "microchip,mpfs-icicle-kit"; + + chosen { + }; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu@0 { + clock-frequency = <0>; + compatible = "sifive,e51", "sifive,rocket0", "riscv"; + device_type = "cpu"; + i-cache-block-size = <64>; + i-cache-sets = <128>; + i-cache-size = <16384>; + reg = <0>; + riscv,isa = "rv64imac"; + status = "disabled"; + + cpu0_intc: interrupt-controller { + #interrupt-cells = <1>; + compatible = "riscv,cpu-intc"; + interrupt-controller; + }; + }; + + cpu@1 { + clock-frequency = <0>; + compatible = "sifive,u54-mc", "sifive,rocket0", "riscv"; + d-cache-block-size = <64>; + d-cache-sets = <64>; + d-cache-size = <32768>; + d-tlb-sets = <1>; + d-tlb-size = <32>; + device_type = "cpu"; + i-cache-block-size = <64>; + i-cache-sets = <64>; + i-cache-size = <32768>; + i-tlb-sets = <1>; + i-tlb-size = <32>; + mmu-type = "riscv,sv39"; + reg = <1>; + riscv,isa = "rv64imafdc"; + tlb-split; + status = "okay"; + + cpu1_intc: interrupt-controller { + #interrupt-cells = <1>; + compatible = "riscv,cpu-intc"; + interrupt-controller; + }; + }; + + cpu@2 { + clock-frequency = <0>; + compatible = "sifive,u54-mc", "sifive,rocket0", "riscv"; + d-cache-block-size = <64>; + d-cache-sets = <64>; + d-cache-size = <32768>; + d-tlb-sets = <1>; + d-tlb-size = <32>; + device_type = "cpu"; + i-cache-block-size = <64>; + i-cache-sets = <64>; + i-cache-size = <32768>; + i-tlb-sets = <1>; + i-tlb-size = <32>; + mmu-type = "riscv,sv39"; + reg = <2>; + riscv,isa = "rv64imafdc"; + tlb-split; + status = "okay"; + + cpu2_intc: interrupt-controller { + #interrupt-cells = <1>; + compatible = "riscv,cpu-intc"; + interrupt-controller; + }; + }; + + cpu@3 { + clock-frequency = <0>; + compatible = "sifive,u54-mc", "sifive,rocket0", "riscv"; + d-cache-block-size = <64>; + d-cache-sets = <64>; + d-cache-size = <32768>; + d-tlb-sets = <1>; + d-tlb-size = <32>; + device_type = "cpu"; + i-cache-block-size = <64>; + i-cache-sets = <64>; + i-cache-size = <32768>; + i-tlb-sets = <1>; + i-tlb-size = <32>; + mmu-type = "riscv,sv39"; + reg = <3>; + riscv,isa = "rv64imafdc"; + tlb-split; + status = "okay"; + + cpu3_intc: interrupt-controller { + #interrupt-cells = <1>; + compatible = "riscv,cpu-intc"; + interrupt-controller; + }; + }; + + cpu@4 { + clock-frequency = <0>; + compatible = "sifive,u54-mc", "sifive,rocket0", "riscv"; + d-cache-block-size = <64>; + d-cache-sets = <64>; + d-cache-size = <32768>; + d-tlb-sets = <1>; + d-tlb-size = <32>; + device_type = "cpu"; + i-cache-block-size = <64>; + i-cache-sets = <64>; + i-cache-size = <32768>; + i-tlb-sets = <1>; + i-tlb-size = <32>; + mmu-type = "riscv,sv39"; + reg = <4>; + riscv,isa = "rv64imafdc"; + tlb-split; + status = "okay"; + cpu4_intc: interrupt-controller { + #interrupt-cells = <1>; + compatible = "riscv,cpu-intc"; + interrupt-controller; + }; + }; + }; + + soc { + #address-cells = <2>; + #size-cells = <2>; + compatible = "simple-bus"; + ranges; + + cache-controller@2010000 { + compatible = "sifive,fu540-c000-ccache", "cache"; + cache-block-size = <64>; + cache-level = <2>; + cache-sets = <1024>; + cache-size = <2097152>; + cache-unified; + interrupt-parent = <&plic>; + interrupts = <1 2 3>; + reg = <0x0 0x2010000 0x0 0x1000>; + }; + + clint@2000000 { + compatible = "sifive,clint0"; + reg = <0x0 0x2000000 0x0 0xC000>; + interrupts-extended = <&cpu0_intc 3 &cpu0_intc 7 + &cpu1_intc 3 &cpu1_intc 7 + &cpu2_intc 3 &cpu2_intc 7 + &cpu3_intc 3 &cpu3_intc 7 + &cpu4_intc 3 &cpu4_intc 7>; + }; + + plic: interrupt-controller@c000000 { + #interrupt-cells = <1>; + compatible = "sifive,plic-1.0.0"; + reg = <0x0 0xc000000 0x0 0x4000000>; + riscv,ndev = <186>; + interrupt-controller; + interrupts-extended = <&cpu0_intc 11 + &cpu1_intc 11 &cpu1_intc 9 + &cpu2_intc 11 &cpu2_intc 9 + &cpu3_intc 11 &cpu3_intc 9 + &cpu4_intc 11 &cpu4_intc 9>; + }; + + dma@3000000 { + compatible = "sifive,fu540-c000-pdma"; + reg = <0x0 0x3000000 0x0 0x8000>; + interrupt-parent = <&plic>; + interrupts = <23 24 25 26 27 28 29 30>; + #dma-cells = <1>; + }; + + refclk: refclk { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <600000000>; + clock-output-names = "msspllclk"; + }; + + clkcfg: clkcfg@20002000 { + compatible = "microchip,mpfs-clkcfg"; + reg = <0x0 0x20002000 0x0 0x1000>; + reg-names = "mss_sysreg"; + clocks = <&refclk>; + #clock-cells = <1>; + clock-output-names = "cpu", "axi", "ahb", "envm", /* 0-3 */ + "mac0", "mac1", "mmc", "timer", /* 4-7 */ + "mmuart0", "mmuart1", "mmuart2", "mmuart3", /* 8-11 */ + "mmuart4", "spi0", "spi1", "i2c0", /* 12-15 */ + "i2c1", "can0", "can1", "usb", /* 16-19 */ + "rsvd", "rtc", "qspi", "gpio0", /* 20-23 */ + "gpio1", "gpio2", "ddrc", "fic0", /* 24-27 */ + "fic1", "fic2", "fic3", "athena", "cfm"; /* 28-32 */ + }; + + serial0: serial@20000000 { + compatible = "ns16550a"; + reg = <0x0 0x20000000 0x0 0x400>; + reg-io-width = <4>; + reg-shift = <2>; + interrupt-parent = <&plic>; + interrupts = <90>; + current-speed = <115200>; + clocks = <&clkcfg 8>; + status = "disabled"; + }; + + serial1: serial@20100000 { + compatible = "ns16550a"; + reg = <0x0 0x20100000 0x0 0x400>; + reg-io-width = <4>; + reg-shift = <2>; + interrupt-parent = <&plic>; + interrupts = <91>; + current-speed = <115200>; + clocks = <&clkcfg 9>; + status = "disabled"; + }; + + serial2: serial@20102000 { + compatible = "ns16550a"; + reg = <0x0 0x20102000 0x0 0x400>; + reg-io-width = <4>; + reg-shift = <2>; + interrupt-parent = <&plic>; + interrupts = <92>; + current-speed = <115200>; + clocks = <&clkcfg 10>; + status = "disabled"; + }; + + serial3: serial@20104000 { + compatible = "ns16550a"; + reg = <0x0 0x20104000 0x0 0x400>; + reg-io-width = <4>; + reg-shift = <2>; + interrupt-parent = <&plic>; + interrupts = <93>; + current-speed = <115200>; + clocks = <&clkcfg 11>; + status = "disabled"; + }; + + emmc: mmc@20008000 { + compatible = "cdns,sd4hc"; + reg = <0x0 0x20008000 0x0 0x1000>; + interrupt-parent = <&plic>; + interrupts = <88 89>; + pinctrl-names = "default"; + clocks = <&clkcfg 6>; + bus-width = <4>; + cap-mmc-highspeed; + mmc-ddr-3_3v; + max-frequency = <200000000>; + non-removable; + no-sd; + no-sdio; + voltage-ranges = <3300 3300>; + status = "disabled"; + }; + + sdcard: sdhc@20008000 { + compatible = "cdns,sd4hc"; + reg = <0x0 0x20008000 0x0 0x1000>; + interrupt-parent = <&plic>; + interrupts = <88>; + pinctrl-names = "default"; + clocks = <&clkcfg 6>; + bus-width = <4>; + disable-wp; + cap-sd-highspeed; + card-detect-delay = <200>; + sd-uhs-sdr12; + sd-uhs-sdr25; + sd-uhs-sdr50; + sd-uhs-sdr104; + max-frequency = <200000000>; + status = "disabled"; + }; + + emac0: ethernet@20110000 { + compatible = "cdns,macb"; + reg = <0x0 0x20110000 0x0 0x2000>; + interrupt-parent = <&plic>; + interrupts = <64 65 66 67>; + local-mac-address = [00 00 00 00 00 00]; + clocks = <&clkcfg 4>, <&clkcfg 2>; + clock-names = "pclk", "hclk"; + status = "disabled"; + #address-cells = <1>; + #size-cells = <0>; + }; + + emac1: ethernet@20112000 { + compatible = "cdns,macb"; + reg = <0x0 0x20112000 0x0 0x2000>; + interrupt-parent = <&plic>; + interrupts = <70 71 72 73>; + mac-address = [00 00 00 00 00 00]; + clocks = <&clkcfg 5>, <&clkcfg 2>; + status = "disabled"; + clock-names = "pclk", "hclk"; + #address-cells = <1>; + #size-cells = <0>; + }; + + }; +}; diff --git a/arch/riscv/boot/dts/sifive/fu740-c000.dtsi b/arch/riscv/boot/dts/sifive/fu740-c000.dtsi index eeb4f8c3e0e7..8eef82e4199f 100644 --- a/arch/riscv/boot/dts/sifive/fu740-c000.dtsi +++ b/arch/riscv/boot/dts/sifive/fu740-c000.dtsi @@ -159,6 +159,7 @@ reg = <0x0 0x10000000 0x0 0x1000>; clocks = <&hfclk>, <&rtcclk>; #clock-cells = <1>; + #reset-cells = <1>; }; uart0: serial@10010000 { compatible = "sifive,fu740-c000-uart", "sifive,uart0"; @@ -289,5 +290,37 @@ clocks = <&prci PRCI_CLK_PCLK>; status = "disabled"; }; + pcie@e00000000 { + compatible = "sifive,fu740-pcie"; + #address-cells = <3>; + #size-cells = <2>; + #interrupt-cells = <1>; + reg = <0xe 0x00000000 0x0 0x80000000>, + <0xd 0xf0000000 0x0 0x10000000>, + <0x0 0x100d0000 0x0 0x1000>; + reg-names = "dbi", "config", "mgmt"; + device_type = "pci"; + dma-coherent; + bus-range = <0x0 0xff>; + ranges = <0x81000000 0x0 0x60080000 0x0 0x60080000 0x0 0x10000>, /* I/O */ + <0x82000000 0x0 0x60090000 0x0 0x60090000 0x0 0xff70000>, /* mem */ + <0x82000000 0x0 0x70000000 0x0 0x70000000 0x0 0x1000000>, /* mem */ + <0xc3000000 0x20 0x00000000 0x20 0x00000000 0x20 0x00000000>; /* mem prefetchable */ + num-lanes = <0x8>; + interrupts = <56>, <57>, <58>, <59>, <60>, <61>, <62>, <63>, <64>; + interrupt-names = "msi", "inta", "intb", "intc", "intd"; + interrupt-parent = <&plic0>; + interrupt-map-mask = <0x0 0x0 0x0 0x7>; + interrupt-map = <0x0 0x0 0x0 0x1 &plic0 57>, + <0x0 0x0 0x0 0x2 &plic0 58>, + <0x0 0x0 0x0 0x3 &plic0 59>, + <0x0 0x0 0x0 0x4 &plic0 60>; + clock-names = "pcie_aux"; + clocks = <&prci PRCI_CLK_PCIE_AUX>; + pwren-gpios = <&gpio 5 0>; + reset-gpios = <&gpio 8 0>; + resets = <&prci 4>; + status = "okay"; + }; }; }; diff --git a/arch/riscv/boot/loader.lds.S b/arch/riscv/boot/loader.lds.S index 47a5003c2e28..62d94696a19c 100644 --- a/arch/riscv/boot/loader.lds.S +++ b/arch/riscv/boot/loader.lds.S @@ -1,13 +1,14 @@ /* SPDX-License-Identifier: GPL-2.0 */ #include <asm/page.h> +#include <asm/pgtable.h> OUTPUT_ARCH(riscv) ENTRY(_start) SECTIONS { - . = PAGE_OFFSET; + . = KERNEL_LINK_ADDR; .payload : { *(.payload) diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig index 6c0625aa96c7..1f2be234b11c 100644 --- a/arch/riscv/configs/defconfig +++ b/arch/riscv/configs/defconfig @@ -16,6 +16,7 @@ CONFIG_EXPERT=y CONFIG_BPF_SYSCALL=y CONFIG_SOC_SIFIVE=y CONFIG_SOC_VIRT=y +CONFIG_SOC_MICROCHIP_POLARFIRE=y CONFIG_SMP=y CONFIG_HOTPLUG_CPU=y CONFIG_JUMP_LABEL=y @@ -82,6 +83,9 @@ CONFIG_USB_OHCI_HCD=y CONFIG_USB_OHCI_HCD_PLATFORM=y CONFIG_USB_STORAGE=y CONFIG_USB_UAS=y +CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_PLTFM=y +CONFIG_MMC_SDHCI_CADENCE=y CONFIG_MMC=y CONFIG_MMC_SPI=y CONFIG_RTC_CLASS=y diff --git a/arch/riscv/errata/Makefile b/arch/riscv/errata/Makefile new file mode 100644 index 000000000000..b8f8740a3e44 --- /dev/null +++ b/arch/riscv/errata/Makefile @@ -0,0 +1,2 @@ +obj-y += alternative.o +obj-$(CONFIG_ERRATA_SIFIVE) += sifive/ diff --git a/arch/riscv/errata/alternative.c b/arch/riscv/errata/alternative.c new file mode 100644 index 000000000000..3b15885db70b --- /dev/null +++ b/arch/riscv/errata/alternative.c @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * alternative runtime patching + * inspired by the ARM64 and x86 version + * + * Copyright (C) 2021 Sifive. + */ + +#include <linux/init.h> +#include <linux/cpu.h> +#include <linux/uaccess.h> +#include <asm/alternative.h> +#include <asm/sections.h> +#include <asm/vendorid_list.h> +#include <asm/sbi.h> +#include <asm/csr.h> + +static struct cpu_manufacturer_info_t { + unsigned long vendor_id; + unsigned long arch_id; + unsigned long imp_id; +} cpu_mfr_info; + +static void (*vendor_patch_func)(struct alt_entry *begin, struct alt_entry *end, + unsigned long archid, unsigned long impid); + +static inline void __init riscv_fill_cpu_mfr_info(void) +{ +#ifdef CONFIG_RISCV_M_MODE + cpu_mfr_info.vendor_id = csr_read(CSR_MVENDORID); + cpu_mfr_info.arch_id = csr_read(CSR_MARCHID); + cpu_mfr_info.imp_id = csr_read(CSR_MIMPID); +#else + cpu_mfr_info.vendor_id = sbi_get_mvendorid(); + cpu_mfr_info.arch_id = sbi_get_marchid(); + cpu_mfr_info.imp_id = sbi_get_mimpid(); +#endif +} + +static void __init init_alternative(void) +{ + riscv_fill_cpu_mfr_info(); + + switch (cpu_mfr_info.vendor_id) { +#ifdef CONFIG_ERRATA_SIFIVE + case SIFIVE_VENDOR_ID: + vendor_patch_func = sifive_errata_patch_func; + break; +#endif + default: + vendor_patch_func = NULL; + } +} + +/* + * This is called very early in the boot process (directly after we run + * a feature detect on the boot CPU). No need to worry about other CPUs + * here. + */ +void __init apply_boot_alternatives(void) +{ + /* If called on non-boot cpu things could go wrong */ + WARN_ON(smp_processor_id() != 0); + + init_alternative(); + + if (!vendor_patch_func) + return; + + vendor_patch_func((struct alt_entry *)__alt_start, + (struct alt_entry *)__alt_end, + cpu_mfr_info.arch_id, cpu_mfr_info.imp_id); +} + diff --git a/arch/riscv/errata/sifive/Makefile b/arch/riscv/errata/sifive/Makefile new file mode 100644 index 000000000000..bdd5fc843b8e --- /dev/null +++ b/arch/riscv/errata/sifive/Makefile @@ -0,0 +1,2 @@ +obj-y += errata_cip_453.o +obj-y += errata.o diff --git a/arch/riscv/errata/sifive/errata.c b/arch/riscv/errata/sifive/errata.c new file mode 100644 index 000000000000..f5e5ae70e829 --- /dev/null +++ b/arch/riscv/errata/sifive/errata.c @@ -0,0 +1,106 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2021 Sifive. + */ + +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/bug.h> +#include <asm/patch.h> +#include <asm/alternative.h> +#include <asm/vendorid_list.h> +#include <asm/errata_list.h> + +struct errata_info_t { + char name[ERRATA_STRING_LENGTH_MAX]; + bool (*check_func)(unsigned long arch_id, unsigned long impid); +}; + +static bool errata_cip_453_check_func(unsigned long arch_id, unsigned long impid) +{ + /* + * Affected cores: + * Architecture ID: 0x8000000000000007 + * Implement ID: 0x20181004 <= impid <= 0x20191105 + */ + if (arch_id != 0x8000000000000007 || + (impid < 0x20181004 || impid > 0x20191105)) + return false; + return true; +} + +static bool errata_cip_1200_check_func(unsigned long arch_id, unsigned long impid) +{ + /* + * Affected cores: + * Architecture ID: 0x8000000000000007 or 0x1 + * Implement ID: mimpid[23:0] <= 0x200630 and mimpid != 0x01200626 + */ + if (arch_id != 0x8000000000000007 && arch_id != 0x1) + return false; + if ((impid & 0xffffff) > 0x200630 || impid == 0x1200626) + return false; + return true; +} + +static struct errata_info_t errata_list[ERRATA_SIFIVE_NUMBER] = { + { + .name = "cip-453", + .check_func = errata_cip_453_check_func + }, + { + .name = "cip-1200", + .check_func = errata_cip_1200_check_func + }, +}; + +static u32 __init sifive_errata_probe(unsigned long archid, unsigned long impid) +{ + int idx; + u32 cpu_req_errata = 0; + + for (idx = 0; idx < ERRATA_SIFIVE_NUMBER; idx++) + if (errata_list[idx].check_func(archid, impid)) + cpu_req_errata |= (1U << idx); + + return cpu_req_errata; +} + +static void __init warn_miss_errata(u32 miss_errata) +{ + int i; + + pr_warn("----------------------------------------------------------------\n"); + pr_warn("WARNING: Missing the following errata may cause potential issues\n"); + for (i = 0; i < ERRATA_SIFIVE_NUMBER; i++) + if (miss_errata & 0x1 << i) + pr_warn("\tSiFive Errata[%d]:%s\n", i, errata_list[i].name); + pr_warn("Please enable the corresponding Kconfig to apply them\n"); + pr_warn("----------------------------------------------------------------\n"); +} + +void __init sifive_errata_patch_func(struct alt_entry *begin, struct alt_entry *end, + unsigned long archid, unsigned long impid) +{ + struct alt_entry *alt; + u32 cpu_req_errata = sifive_errata_probe(archid, impid); + u32 cpu_apply_errata = 0; + u32 tmp; + + for (alt = begin; alt < end; alt++) { + if (alt->vendor_id != SIFIVE_VENDOR_ID) + continue; + if (alt->errata_id >= ERRATA_SIFIVE_NUMBER) { + WARN(1, "This errata id:%d is not in kernel errata list", alt->errata_id); + continue; + } + + tmp = (1U << alt->errata_id); + if (cpu_req_errata & tmp) { + patch_text_nosync(alt->old_ptr, alt->alt_ptr, alt->alt_len); + cpu_apply_errata |= tmp; + } + } + if (cpu_apply_errata != cpu_req_errata) + warn_miss_errata(cpu_req_errata - cpu_apply_errata); +} diff --git a/arch/riscv/errata/sifive/errata_cip_453.S b/arch/riscv/errata/sifive/errata_cip_453.S new file mode 100644 index 000000000000..f1b9623fe1de --- /dev/null +++ b/arch/riscv/errata/sifive/errata_cip_453.S @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 SiFive + */ + +#include <linux/linkage.h> +#include <asm/asm.h> +#include <asm/asm-offsets.h> +#include <asm/alternative.h> + +.macro ADD_SIGN_EXT pt_reg badaddr tmp_reg + REG_L \badaddr, PT_BADADDR(\pt_reg) + li \tmp_reg,1 + slli \tmp_reg,\tmp_reg,0x26 + and \tmp_reg,\tmp_reg,\badaddr + beqz \tmp_reg, 1f + li \tmp_reg,-1 + slli \tmp_reg,\tmp_reg,0x27 + or \badaddr,\tmp_reg,\badaddr + REG_S \badaddr, PT_BADADDR(\pt_reg) +1: +.endm + +ENTRY(sifive_cip_453_page_fault_trp) + ADD_SIGN_EXT a0, t0, t1 +#ifdef CONFIG_MMU + la t0, do_page_fault +#else + la t0, do_trap_unknown +#endif + jr t0 +END(sifive_cip_453_page_fault_trp) + +ENTRY(sifive_cip_453_insn_fault_trp) + ADD_SIGN_EXT a0, t0, t1 + la t0, do_trap_insn_fault + jr t0 +END(sifive_cip_453_insn_fault_trp) diff --git a/arch/riscv/include/asm/alternative-macros.h b/arch/riscv/include/asm/alternative-macros.h new file mode 100644 index 000000000000..88c08705f64a --- /dev/null +++ b/arch/riscv/include/asm/alternative-macros.h @@ -0,0 +1,142 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_ALTERNATIVE_MACROS_H +#define __ASM_ALTERNATIVE_MACROS_H + +#ifdef CONFIG_RISCV_ERRATA_ALTERNATIVE + +#ifdef __ASSEMBLY__ + +.macro ALT_ENTRY oldptr newptr vendor_id errata_id new_len + RISCV_PTR \oldptr + RISCV_PTR \newptr + REG_ASM \vendor_id + REG_ASM \new_len + .word \errata_id +.endm + +.macro ALT_NEW_CONTENT vendor_id, errata_id, enable = 1, new_c : vararg + .if \enable + .pushsection .alternative, "a" + ALT_ENTRY 886b, 888f, \vendor_id, \errata_id, 889f - 888f + .popsection + .subsection 1 +888 : + \new_c +889 : + .previous + .org . - (889b - 888b) + (887b - 886b) + .org . - (887b - 886b) + (889b - 888b) + .endif +.endm + +.macro __ALTERNATIVE_CFG old_c, new_c, vendor_id, errata_id, enable +886 : + \old_c +887 : + ALT_NEW_CONTENT \vendor_id, \errata_id, \enable, \new_c +.endm + +#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \ + __ALTERNATIVE_CFG old_c, new_c, vendor_id, errata_id, IS_ENABLED(CONFIG_k) + +#else /* !__ASSEMBLY__ */ + +#include <asm/asm.h> +#include <linux/stringify.h> + +#define ALT_ENTRY(oldptr, newptr, vendor_id, errata_id, newlen) \ + RISCV_PTR " " oldptr "\n" \ + RISCV_PTR " " newptr "\n" \ + REG_ASM " " vendor_id "\n" \ + REG_ASM " " newlen "\n" \ + ".word " errata_id "\n" + +#define ALT_NEW_CONSTENT(vendor_id, errata_id, enable, new_c) \ + ".if " __stringify(enable) " == 1\n" \ + ".pushsection .alternative, \"a\"\n" \ + ALT_ENTRY("886b", "888f", __stringify(vendor_id), __stringify(errata_id), "889f - 888f") \ + ".popsection\n" \ + ".subsection 1\n" \ + "888 :\n" \ + new_c "\n" \ + "889 :\n" \ + ".previous\n" \ + ".org . - (887b - 886b) + (889b - 888b)\n" \ + ".org . - (889b - 888b) + (887b - 886b)\n" \ + ".endif\n" + +#define __ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, enable) \ + "886 :\n" \ + old_c "\n" \ + "887 :\n" \ + ALT_NEW_CONSTENT(vendor_id, errata_id, enable, new_c) + +#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \ + __ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, IS_ENABLED(CONFIG_k)) + +#endif /* __ASSEMBLY__ */ + +#else /* !CONFIG_RISCV_ERRATA_ALTERNATIVE*/ +#ifdef __ASSEMBLY__ + +.macro __ALTERNATIVE_CFG old_c + \old_c +.endm + +#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \ + __ALTERNATIVE_CFG old_c + +#else /* !__ASSEMBLY__ */ + +#define __ALTERNATIVE_CFG(old_c) \ + old_c "\n" + +#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \ + __ALTERNATIVE_CFG(old_c) + +#endif /* __ASSEMBLY__ */ +#endif /* CONFIG_RISCV_ERRATA_ALTERNATIVE */ +/* + * Usage: + * ALTERNATIVE(old_content, new_content, vendor_id, errata_id, CONFIG_k) + * in the assembly code. Otherwise, + * asm(ALTERNATIVE(old_content, new_content, vendor_id, errata_id, CONFIG_k)); + * + * old_content: The old content which is probably replaced with new content. + * new_content: The new content. + * vendor_id: The CPU vendor ID. + * errata_id: The errata ID. + * CONFIG_k: The Kconfig of this errata. When Kconfig is disabled, the old + * content will alwyas be executed. + */ +#define ALTERNATIVE(old_content, new_content, vendor_id, errata_id, CONFIG_k) \ + _ALTERNATIVE_CFG(old_content, new_content, vendor_id, errata_id, CONFIG_k) + +/* + * A vendor wants to replace an old_content, but another vendor has used + * ALTERNATIVE() to patch its customized content at the same location. In + * this case, this vendor can create a new macro ALTERNATIVE_2() based + * on the following sample code and then replace ALTERNATIVE() with + * ALTERNATIVE_2() to append its customized content. + * + * .macro __ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1, enable_1, \ + * new_c_2, vendor_id_2, errata_id_2, enable_2 + * 886 : + * \old_c + * 887 : + * ALT_NEW_CONTENT \vendor_id_1, \errata_id_1, \enable_1, \new_c_1 + * ALT_NEW_CONTENT \vendor_id_2, \errata_id_2, \enable_2, \new_c_2 + * .endm + * + * #define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, CONFIG_k_1, \ + * new_c_2, vendor_id_2, errata_id_2, CONFIG_k_2) \ + * __ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1, IS_ENABLED(CONFIG_k_1), \ + * new_c_2, vendor_id_2, errata_id_2, IS_ENABLED(CONFIG_k_2) \ + * + * #define ALTERNATIVE_2(old_content, new_content_1, vendor_id_1, errata_id_1, CONFIG_k_1, \ + * new_content_2, vendor_id_2, errata_id_2, CONFIG_k_2) \ + * _ALTERNATIVE_CFG_2(old_content, new_content_1, vendor_id_1, errata_id_1, CONFIG_k_1, \ + * new_content_2, vendor_id_2, errata_id_2, CONFIG_k_2) + * + */ +#endif diff --git a/arch/riscv/include/asm/alternative.h b/arch/riscv/include/asm/alternative.h new file mode 100644 index 000000000000..e625d3cafbed --- /dev/null +++ b/arch/riscv/include/asm/alternative.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 Sifive. + */ + +#ifndef __ASM_ALTERNATIVE_H +#define __ASM_ALTERNATIVE_H + +#define ERRATA_STRING_LENGTH_MAX 32 + +#include <asm/alternative-macros.h> + +#ifndef __ASSEMBLY__ + +#include <linux/init.h> +#include <linux/types.h> +#include <linux/stddef.h> +#include <asm/hwcap.h> + +void __init apply_boot_alternatives(void); + +struct alt_entry { + void *old_ptr; /* address of original instruciton or data */ + void *alt_ptr; /* address of replacement instruction or data */ + unsigned long vendor_id; /* cpu vendor id */ + unsigned long alt_len; /* The replacement size */ + unsigned int errata_id; /* The errata id */ +} __packed; + +struct errata_checkfunc_id { + unsigned long vendor_id; + bool (*func)(struct alt_entry *alt); +}; + +void sifive_errata_patch_func(struct alt_entry *begin, struct alt_entry *end, + unsigned long archid, unsigned long impid); + +#endif +#endif diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h index 9c992a88d858..618d7c5af1a2 100644 --- a/arch/riscv/include/asm/asm.h +++ b/arch/riscv/include/asm/asm.h @@ -23,6 +23,7 @@ #define REG_L __REG_SEL(ld, lw) #define REG_S __REG_SEL(sd, sw) #define REG_SC __REG_SEL(sc.d, sc.w) +#define REG_ASM __REG_SEL(.dword, .word) #define SZREG __REG_SEL(8, 4) #define LGREG __REG_SEL(3, 2) diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h index caadfc1d7487..87ac65696871 100644 --- a/arch/riscv/include/asm/csr.h +++ b/arch/riscv/include/asm/csr.h @@ -115,6 +115,9 @@ #define CSR_MIP 0x344 #define CSR_PMPCFG0 0x3a0 #define CSR_PMPADDR0 0x3b0 +#define CSR_MVENDORID 0xf11 +#define CSR_MARCHID 0xf12 +#define CSR_MIMPID 0xf13 #define CSR_MHARTID 0xf14 #ifdef CONFIG_RISCV_M_MODE diff --git a/arch/riscv/include/asm/elf.h b/arch/riscv/include/asm/elf.h index 5c725e1df58b..f4b490cd0e5d 100644 --- a/arch/riscv/include/asm/elf.h +++ b/arch/riscv/include/asm/elf.h @@ -81,4 +81,10 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp); #endif /* CONFIG_MMU */ +#define ELF_CORE_COPY_REGS(dest, regs) \ +do { \ + *(struct user_regs_struct *)&(dest) = \ + *(struct user_regs_struct *)regs; \ +} while (0); + #endif /* _ASM_RISCV_ELF_H */ diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h new file mode 100644 index 000000000000..5f1046e82d9f --- /dev/null +++ b/arch/riscv/include/asm/errata_list.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 Sifive. + */ +#ifndef ASM_ERRATA_LIST_H +#define ASM_ERRATA_LIST_H + +#include <asm/alternative.h> +#include <asm/vendorid_list.h> + +#ifdef CONFIG_ERRATA_SIFIVE +#define ERRATA_SIFIVE_CIP_453 0 +#define ERRATA_SIFIVE_CIP_1200 1 +#define ERRATA_SIFIVE_NUMBER 2 +#endif + +#ifdef __ASSEMBLY__ + +#define ALT_INSN_FAULT(x) \ +ALTERNATIVE(__stringify(RISCV_PTR do_trap_insn_fault), \ + __stringify(RISCV_PTR sifive_cip_453_insn_fault_trp), \ + SIFIVE_VENDOR_ID, ERRATA_SIFIVE_CIP_453, \ + CONFIG_ERRATA_SIFIVE_CIP_453) + +#define ALT_PAGE_FAULT(x) \ +ALTERNATIVE(__stringify(RISCV_PTR do_page_fault), \ + __stringify(RISCV_PTR sifive_cip_453_page_fault_trp), \ + SIFIVE_VENDOR_ID, ERRATA_SIFIVE_CIP_453, \ + CONFIG_ERRATA_SIFIVE_CIP_453) +#else /* !__ASSEMBLY__ */ + +#define ALT_FLUSH_TLB_PAGE(x) \ +asm(ALTERNATIVE("sfence.vma %0", "sfence.vma", SIFIVE_VENDOR_ID, \ + ERRATA_SIFIVE_CIP_1200, CONFIG_ERRATA_SIFIVE_CIP_1200) \ + : : "r" (addr) : "memory") + +#endif /* __ASSEMBLY__ */ + +#endif diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h index 845002cc2e57..04dad3380041 100644 --- a/arch/riscv/include/asm/ftrace.h +++ b/arch/riscv/include/asm/ftrace.h @@ -13,9 +13,19 @@ #endif #define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR +/* + * Clang prior to 13 had "mcount" instead of "_mcount": + * https://reviews.llvm.org/D98881 + */ +#if defined(CONFIG_CC_IS_GCC) || CONFIG_CLANG_VERSION >= 130000 +#define MCOUNT_NAME _mcount +#else +#define MCOUNT_NAME mcount +#endif + #define ARCH_SUPPORTS_FTRACE_OPS 1 #ifndef __ASSEMBLY__ -void _mcount(void); +void MCOUNT_NAME(void); static inline unsigned long ftrace_call_adjust(unsigned long addr) { return addr; @@ -36,7 +46,7 @@ struct dyn_arch_ftrace { * both auipc and jalr at the same time. */ -#define MCOUNT_ADDR ((unsigned long)_mcount) +#define MCOUNT_ADDR ((unsigned long)MCOUNT_NAME) #define JALR_SIGN_MASK (0x00000800) #define JALR_OFFSET_MASK (0x00000fff) #define AUIPC_OFFSET_MASK (0xfffff000) diff --git a/arch/riscv/include/asm/kexec.h b/arch/riscv/include/asm/kexec.h new file mode 100644 index 000000000000..1e954101906a --- /dev/null +++ b/arch/riscv/include/asm/kexec.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 FORTH-ICS/CARV + * Nick Kossifidis <[email protected]> + */ + +#ifndef _RISCV_KEXEC_H +#define _RISCV_KEXEC_H + +#include <asm/page.h> /* For PAGE_SIZE */ + +/* Maximum physical address we can use pages from */ +#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) + +/* Maximum address we can reach in physical address mode */ +#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) + +/* Maximum address we can use for the control code buffer */ +#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL) + +/* Reserve a page for the control code buffer */ +#define KEXEC_CONTROL_PAGE_SIZE PAGE_SIZE + +#define KEXEC_ARCH KEXEC_ARCH_RISCV + +extern void riscv_crash_save_regs(struct pt_regs *newregs); + +static inline void +crash_setup_regs(struct pt_regs *newregs, + struct pt_regs *oldregs) +{ + if (oldregs) + memcpy(newregs, oldregs, sizeof(struct pt_regs)); + else + riscv_crash_save_regs(newregs); +} + + +#define ARCH_HAS_KIMAGE_ARCH + +struct kimage_arch { + unsigned long fdt_addr; +}; + +const extern unsigned char riscv_kexec_relocate[]; +const extern unsigned int riscv_kexec_relocate_size; + +typedef void (*riscv_kexec_method)(unsigned long first_ind_entry, + unsigned long jump_addr, + unsigned long fdt_addr, + unsigned long hartid, + unsigned long va_pa_off); + +extern riscv_kexec_method riscv_kexec_norelocate; + +#endif diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h index adc9d26f3d75..6a7761c86ec2 100644 --- a/arch/riscv/include/asm/page.h +++ b/arch/riscv/include/asm/page.h @@ -90,15 +90,58 @@ typedef struct page *pgtable_t; #ifdef CONFIG_MMU extern unsigned long va_pa_offset; +#ifdef CONFIG_64BIT +extern unsigned long va_kernel_pa_offset; +#endif +#ifdef CONFIG_XIP_KERNEL +extern unsigned long va_kernel_xip_pa_offset; +#endif extern unsigned long pfn_base; #define ARCH_PFN_OFFSET (pfn_base) #else #define va_pa_offset 0 +#ifdef CONFIG_64BIT +#define va_kernel_pa_offset 0 +#endif #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) #endif /* CONFIG_MMU */ -#define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + va_pa_offset)) -#define __va_to_pa_nodebug(x) ((unsigned long)(x) - va_pa_offset) +extern unsigned long kernel_virt_addr; + +#ifdef CONFIG_64BIT +#define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_pa_offset)) +#ifdef CONFIG_XIP_KERNEL +#define kernel_mapping_pa_to_va(y) ({ \ + unsigned long _y = y; \ + (_y >= CONFIG_PHYS_RAM_BASE) ? \ + (void *)((unsigned long)(_y) + va_kernel_pa_offset + XIP_OFFSET) : \ + (void *)((unsigned long)(_y) + va_kernel_xip_pa_offset); \ + }) +#else +#define kernel_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_kernel_pa_offset)) +#endif +#define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x) + +#define linear_mapping_va_to_pa(x) ((unsigned long)(x) - va_pa_offset) +#ifdef CONFIG_XIP_KERNEL +#define kernel_mapping_va_to_pa(y) ({ \ + unsigned long _y = y; \ + (_y < kernel_virt_addr + XIP_OFFSET) ? \ + ((unsigned long)(_y) - va_kernel_xip_pa_offset) : \ + ((unsigned long)(_y) - va_kernel_pa_offset - XIP_OFFSET); \ + }) +#else +#define kernel_mapping_va_to_pa(x) ((unsigned long)(x) - va_kernel_pa_offset) +#endif +#define __va_to_pa_nodebug(x) ({ \ + unsigned long _x = x; \ + (_x < kernel_virt_addr) ? \ + linear_mapping_va_to_pa(_x) : kernel_mapping_va_to_pa(_x); \ + }) +#else +#define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + va_pa_offset)) +#define __va_to_pa_nodebug(x) ((unsigned long)(x) - va_pa_offset) +#endif #ifdef CONFIG_DEBUG_VIRTUAL extern phys_addr_t __virt_to_phys(unsigned long x); diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index ebf817c1bdf4..9469f464e71a 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -11,23 +11,38 @@ #include <asm/pgtable-bits.h> -#ifndef __ASSEMBLY__ +#ifndef CONFIG_MMU +#define KERNEL_LINK_ADDR PAGE_OFFSET +#else -/* Page Upper Directory not used in RISC-V */ -#include <asm-generic/pgtable-nopud.h> -#include <asm/page.h> -#include <asm/tlbflush.h> -#include <linux/mm_types.h> +#define ADDRESS_SPACE_END (UL(-1)) -#ifdef CONFIG_MMU +#ifdef CONFIG_64BIT +/* Leave 2GB for kernel and BPF at the end of the address space */ +#define KERNEL_LINK_ADDR (ADDRESS_SPACE_END - SZ_2G + 1) +#else +#define KERNEL_LINK_ADDR PAGE_OFFSET +#endif #define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) #define VMALLOC_END (PAGE_OFFSET - 1) #define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE) #define BPF_JIT_REGION_SIZE (SZ_128M) +#ifdef CONFIG_64BIT +/* KASLR should leave at least 128MB for BPF after the kernel */ +#define BPF_JIT_REGION_START PFN_ALIGN((unsigned long)&_end) +#define BPF_JIT_REGION_END (BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE) +#else #define BPF_JIT_REGION_START (PAGE_OFFSET - BPF_JIT_REGION_SIZE) #define BPF_JIT_REGION_END (VMALLOC_END) +#endif + +/* Modules always live before the kernel */ +#ifdef CONFIG_64BIT +#define MODULES_VADDR (PFN_ALIGN((unsigned long)&_end) - SZ_2G) +#define MODULES_END (PFN_ALIGN((unsigned long)&_start)) +#endif /* * Roughly size the vmemmap space to be large enough to fit enough @@ -60,12 +75,35 @@ #endif +#ifdef CONFIG_XIP_KERNEL +#define XIP_OFFSET SZ_8M +#endif + +#ifndef __ASSEMBLY__ + +/* Page Upper Directory not used in RISC-V */ +#include <asm-generic/pgtable-nopud.h> +#include <asm/page.h> +#include <asm/tlbflush.h> +#include <linux/mm_types.h> + #ifdef CONFIG_64BIT #include <asm/pgtable-64.h> #else #include <asm/pgtable-32.h> #endif /* CONFIG_64BIT */ +#ifdef CONFIG_XIP_KERNEL +#define XIP_FIXUP(addr) ({ \ + uintptr_t __a = (uintptr_t)(addr); \ + (__a >= CONFIG_XIP_PHYS_ADDR && __a < CONFIG_XIP_PHYS_ADDR + SZ_16M) ? \ + __a - CONFIG_XIP_PHYS_ADDR + CONFIG_PHYS_RAM_BASE - XIP_OFFSET :\ + __a; \ + }) +#else +#define XIP_FIXUP(addr) (addr) +#endif /* CONFIG_XIP_KERNEL */ + #ifdef CONFIG_MMU /* Number of entries in the page global directory */ #define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t)) @@ -484,8 +522,17 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma, #define kern_addr_valid(addr) (1) /* FIXME */ -extern void *dtb_early_va; -extern uintptr_t dtb_early_pa; +extern char _start[]; +extern void *_dtb_early_va; +extern uintptr_t _dtb_early_pa; +#if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_MMU) +#define dtb_early_va (*(void **)XIP_FIXUP(&_dtb_early_va)) +#define dtb_early_pa (*(uintptr_t *)XIP_FIXUP(&_dtb_early_pa)) +#else +#define dtb_early_va _dtb_early_va +#define dtb_early_pa _dtb_early_pa +#endif /* CONFIG_XIP_KERNEL */ + void setup_bootmem(void); void paging_init(void); void misc_mem_init(void); diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h index d7027411dde8..0d42693cb65e 100644 --- a/arch/riscv/include/asm/sbi.h +++ b/arch/riscv/include/asm/sbi.h @@ -97,6 +97,9 @@ struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0, void sbi_console_putchar(int ch); int sbi_console_getchar(void); +long sbi_get_mvendorid(void); +long sbi_get_marchid(void); +long sbi_get_mimpid(void); void sbi_set_timer(uint64_t stime_value); void sbi_shutdown(void); void sbi_clear_ipi(void); diff --git a/arch/riscv/include/asm/sections.h b/arch/riscv/include/asm/sections.h index 1595c5b60cfd..8a303fb1ee3b 100644 --- a/arch/riscv/include/asm/sections.h +++ b/arch/riscv/include/asm/sections.h @@ -11,5 +11,6 @@ extern char _start[]; extern char _start_kernel[]; extern char __init_data_begin[], __init_data_end[]; extern char __init_text_begin[], __init_text_end[]; +extern char __alt_start[], __alt_end[]; #endif /* __ASM_SECTIONS_H */ diff --git a/arch/riscv/include/asm/set_memory.h b/arch/riscv/include/asm/set_memory.h index 6887b3d9f371..a9c56776fa0e 100644 --- a/arch/riscv/include/asm/set_memory.h +++ b/arch/riscv/include/asm/set_memory.h @@ -17,6 +17,7 @@ int set_memory_x(unsigned long addr, int numpages); int set_memory_nx(unsigned long addr, int numpages); int set_memory_rw_nx(unsigned long addr, int numpages); void protect_kernel_text_data(void); +void protect_kernel_linear_mapping_text_rodata(void); #else static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; } static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; } diff --git a/arch/riscv/include/asm/smp.h b/arch/riscv/include/asm/smp.h index df1f7c4cd433..a7d2811f3536 100644 --- a/arch/riscv/include/asm/smp.h +++ b/arch/riscv/include/asm/smp.h @@ -46,7 +46,7 @@ int riscv_hartid_to_cpuid(int hartid); void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out); /* Set custom IPI operations */ -void riscv_set_ipi_ops(struct riscv_ipi_ops *ops); +void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops); /* Clear IPI for current CPU */ void riscv_clear_ipi(void); @@ -92,7 +92,7 @@ static inline void riscv_cpuid_to_hartid_mask(const struct cpumask *in, cpumask_set_cpu(boot_cpu_hartid, out); } -static inline void riscv_set_ipi_ops(struct riscv_ipi_ops *ops) +static inline void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops) { } diff --git a/arch/riscv/include/asm/string.h b/arch/riscv/include/asm/string.h index 5477e7ecb6e1..909049366555 100644 --- a/arch/riscv/include/asm/string.h +++ b/arch/riscv/include/asm/string.h @@ -23,5 +23,10 @@ extern asmlinkage void *__memmove(void *, const void *, size_t); #define memcpy(dst, src, len) __memcpy(dst, src, len) #define memset(s, c, n) __memset(s, c, n) #define memmove(dst, src, len) __memmove(dst, src, len) + +#ifndef __NO_FORTIFY +#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */ +#endif + #endif #endif /* _ASM_RISCV_STRING_H */ diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h index 49350c8bd7b0..b933b1583c9f 100644 --- a/arch/riscv/include/asm/syscall.h +++ b/arch/riscv/include/asm/syscall.h @@ -15,7 +15,7 @@ #include <linux/err.h> /* The array of function pointers for syscalls. */ -extern void *sys_call_table[]; +extern void * const sys_call_table[]; /* * Only the low 32 bits of orig_r0 are meaningful, so we return int. diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h index 394cfbccdcd9..c84218ad7afc 100644 --- a/arch/riscv/include/asm/tlbflush.h +++ b/arch/riscv/include/asm/tlbflush.h @@ -9,6 +9,7 @@ #include <linux/mm_types.h> #include <asm/smp.h> +#include <asm/errata_list.h> #ifdef CONFIG_MMU static inline void local_flush_tlb_all(void) @@ -19,7 +20,7 @@ static inline void local_flush_tlb_all(void) /* Flush one page from local TLB */ static inline void local_flush_tlb_page(unsigned long addr) { - __asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"); + ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory")); } #else /* CONFIG_MMU */ #define local_flush_tlb_all() do { } while (0) diff --git a/arch/riscv/include/asm/vendorid_list.h b/arch/riscv/include/asm/vendorid_list.h new file mode 100644 index 000000000000..9d934215b3c8 --- /dev/null +++ b/arch/riscv/include/asm/vendorid_list.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 SiFive + */ +#ifndef ASM_VENDOR_LIST_H +#define ASM_VENDOR_LIST_H + +#define SIFIVE_VENDOR_ID 0x489 + +#endif diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index 647a47f5484a..d3081e4d9600 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -10,6 +10,10 @@ CFLAGS_REMOVE_sbi.o = $(CC_FLAGS_FTRACE) endif CFLAGS_syscall_table.o += $(call cc-option,-Wno-override-init,) +ifdef CONFIG_KEXEC +AFLAGS_kexec_relocate.o := -mcmodel=medany -mno-relax +endif + extra-y += head.o extra-y += vmlinux.lds @@ -55,6 +59,8 @@ obj-$(CONFIG_SMP) += cpu_ops_sbi.o endif obj-$(CONFIG_HOTPLUG_CPU) += cpu-hotplug.o obj-$(CONFIG_KGDB) += kgdb.o +obj-$(CONFIG_KEXEC) += kexec_relocate.o crash_save_regs.o machine_kexec.o +obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o diff --git a/arch/riscv/kernel/crash_dump.c b/arch/riscv/kernel/crash_dump.c new file mode 100644 index 000000000000..86cc0ada5752 --- /dev/null +++ b/arch/riscv/kernel/crash_dump.c @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This code comes from arch/arm64/kernel/crash_dump.c + * Created by: AKASHI Takahiro <[email protected]> + * Copyright (C) 2017 Linaro Limited + */ + +#include <linux/crash_dump.h> +#include <linux/io.h> + +/** + * copy_oldmem_page() - copy one page from old kernel memory + * @pfn: page frame number to be copied + * @buf: buffer where the copied page is placed + * @csize: number of bytes to copy + * @offset: offset in bytes into the page + * @userbuf: if set, @buf is in a user address space + * + * This function copies one page from old kernel memory into buffer pointed by + * @buf. If @buf is in userspace, set @userbuf to %1. Returns number of bytes + * copied or negative error in case of failure. + */ +ssize_t copy_oldmem_page(unsigned long pfn, char *buf, + size_t csize, unsigned long offset, + int userbuf) +{ + void *vaddr; + + if (!csize) + return 0; + + vaddr = memremap(__pfn_to_phys(pfn), PAGE_SIZE, MEMREMAP_WB); + if (!vaddr) + return -ENOMEM; + + if (userbuf) { + if (copy_to_user((char __user *)buf, vaddr + offset, csize)) { + memunmap(vaddr); + return -EFAULT; + } + } else + memcpy(buf, vaddr + offset, csize); + + memunmap(vaddr); + return csize; +} diff --git a/arch/riscv/kernel/crash_save_regs.S b/arch/riscv/kernel/crash_save_regs.S new file mode 100644 index 000000000000..7832fb763aba --- /dev/null +++ b/arch/riscv/kernel/crash_save_regs.S @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020 FORTH-ICS/CARV + * Nick Kossifidis <[email protected]> + */ + +#include <asm/asm.h> /* For RISCV_* and REG_* macros */ +#include <asm/csr.h> /* For CSR_* macros */ +#include <asm/asm-offsets.h> /* For offsets on pt_regs */ +#include <linux/linkage.h> /* For SYM_* macros */ + +.section ".text" +SYM_CODE_START(riscv_crash_save_regs) + REG_S ra, PT_RA(a0) /* x1 */ + REG_S sp, PT_SP(a0) /* x2 */ + REG_S gp, PT_GP(a0) /* x3 */ + REG_S tp, PT_TP(a0) /* x4 */ + REG_S t0, PT_T0(a0) /* x5 */ + REG_S t1, PT_T1(a0) /* x6 */ + REG_S t2, PT_T2(a0) /* x7 */ + REG_S s0, PT_S0(a0) /* x8/fp */ + REG_S s1, PT_S1(a0) /* x9 */ + REG_S a0, PT_A0(a0) /* x10 */ + REG_S a1, PT_A1(a0) /* x11 */ + REG_S a2, PT_A2(a0) /* x12 */ + REG_S a3, PT_A3(a0) /* x13 */ + REG_S a4, PT_A4(a0) /* x14 */ + REG_S a5, PT_A5(a0) /* x15 */ + REG_S a6, PT_A6(a0) /* x16 */ + REG_S a7, PT_A7(a0) /* x17 */ + REG_S s2, PT_S2(a0) /* x18 */ + REG_S s3, PT_S3(a0) /* x19 */ + REG_S s4, PT_S4(a0) /* x20 */ + REG_S s5, PT_S5(a0) /* x21 */ + REG_S s6, PT_S6(a0) /* x22 */ + REG_S s7, PT_S7(a0) /* x23 */ + REG_S s8, PT_S8(a0) /* x24 */ + REG_S s9, PT_S9(a0) /* x25 */ + REG_S s10, PT_S10(a0) /* x26 */ + REG_S s11, PT_S11(a0) /* x27 */ + REG_S t3, PT_T3(a0) /* x28 */ + REG_S t4, PT_T4(a0) /* x29 */ + REG_S t5, PT_T5(a0) /* x30 */ + REG_S t6, PT_T6(a0) /* x31 */ + + csrr t1, CSR_STATUS + csrr t2, CSR_EPC + csrr t3, CSR_TVAL + csrr t4, CSR_CAUSE + + REG_S t1, PT_STATUS(a0) + REG_S t2, PT_EPC(a0) + REG_S t3, PT_BADADDR(a0) + REG_S t4, PT_CAUSE(a0) + ret +SYM_CODE_END(riscv_crash_save_regs) diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S index 83095faa680e..80d5a9e017b0 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S @@ -12,6 +12,7 @@ #include <asm/unistd.h> #include <asm/thread_info.h> #include <asm/asm-offsets.h> +#include <asm/errata_list.h> #if !IS_ENABLED(CONFIG_PREEMPTION) .set resume_kernel, restore_all @@ -454,7 +455,7 @@ ENDPROC(__switch_to) /* Exception vector table */ ENTRY(excp_vect_table) RISCV_PTR do_trap_insn_misaligned - RISCV_PTR do_trap_insn_fault + ALT_INSN_FAULT(RISCV_PTR do_trap_insn_fault) RISCV_PTR do_trap_insn_illegal RISCV_PTR do_trap_break RISCV_PTR do_trap_load_misaligned @@ -465,7 +466,8 @@ ENTRY(excp_vect_table) RISCV_PTR do_trap_ecall_s RISCV_PTR do_trap_unknown RISCV_PTR do_trap_ecall_m - RISCV_PTR do_page_fault /* instruction page fault */ + /* instruciton page fault */ + ALT_PAGE_FAULT(RISCV_PTR do_page_fault) RISCV_PTR do_page_fault /* load page fault */ RISCV_PTR do_trap_unknown RISCV_PTR do_page_fault /* store page fault */ diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index f5a9bad86e58..89cc58ab52b4 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S @@ -9,11 +9,23 @@ #include <linux/linkage.h> #include <asm/thread_info.h> #include <asm/page.h> +#include <asm/pgtable.h> #include <asm/csr.h> #include <asm/hwcap.h> #include <asm/image.h> #include "efi-header.S" +#ifdef CONFIG_XIP_KERNEL +.macro XIP_FIXUP_OFFSET reg + REG_L t0, _xip_fixup + add \reg, \reg, t0 +.endm +_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET +#else +.macro XIP_FIXUP_OFFSET reg +.endm +#endif /* CONFIG_XIP_KERNEL */ + __HEAD ENTRY(_start) /* @@ -69,7 +81,9 @@ pe_head_start: #ifdef CONFIG_MMU relocate: /* Relocate return address */ - li a1, PAGE_OFFSET + la a1, kernel_virt_addr + XIP_FIXUP_OFFSET a1 + REG_L a1, 0(a1) la a2, _start sub a1, a1, a2 add ra, ra, a1 @@ -91,6 +105,7 @@ relocate: * to ensure the new translations are in use. */ la a0, trampoline_pg_dir + XIP_FIXUP_OFFSET a0 srl a0, a0, PAGE_SHIFT or a0, a0, a1 sfence.vma @@ -144,7 +159,9 @@ secondary_start_sbi: slli a3, a0, LGREG la a4, __cpu_up_stack_pointer + XIP_FIXUP_OFFSET a4 la a5, __cpu_up_task_pointer + XIP_FIXUP_OFFSET a5 add a4, a3, a4 add a5, a3, a5 REG_L sp, (a4) @@ -156,6 +173,7 @@ secondary_start_common: #ifdef CONFIG_MMU /* Enable virtual memory and relocate to virtual address */ la a0, swapper_pg_dir + XIP_FIXUP_OFFSET a0 call relocate #endif call setup_trap_vector @@ -236,12 +254,33 @@ pmp_done: .Lgood_cores: #endif +#ifndef CONFIG_XIP_KERNEL /* Pick one hart to run the main boot sequence */ la a3, hart_lottery li a2, 1 amoadd.w a3, a2, (a3) bnez a3, .Lsecondary_start +#else + /* hart_lottery in flash contains a magic number */ + la a3, hart_lottery + mv a2, a3 + XIP_FIXUP_OFFSET a2 + lw t1, (a3) + amoswap.w t0, t1, (a2) + /* first time here if hart_lottery in RAM is not set */ + beq t0, t1, .Lsecondary_start + + la sp, _end + THREAD_SIZE + XIP_FIXUP_OFFSET sp + mv s0, a0 + call __copy_data + + /* Restore a0 copy */ + mv a0, s0 +#endif + +#ifndef CONFIG_XIP_KERNEL /* Clear BSS for flat non-ELF images */ la a3, __bss_start la a4, __bss_stop @@ -251,15 +290,18 @@ clear_bss: add a3, a3, RISCV_SZPTR blt a3, a4, clear_bss clear_bss_done: - +#endif /* Save hart ID and DTB physical address */ mv s0, a0 mv s1, a1 + la a2, boot_cpu_hartid + XIP_FIXUP_OFFSET a2 REG_S a0, (a2) /* Initialize page tables and relocate to virtual addresses */ la sp, init_thread_union + THREAD_SIZE + XIP_FIXUP_OFFSET sp #ifdef CONFIG_BUILTIN_DTB la a0, __dtb_start #else @@ -268,6 +310,7 @@ clear_bss_done: call setup_vm #ifdef CONFIG_MMU la a0, early_pg_dir + XIP_FIXUP_OFFSET a0 call relocate #endif /* CONFIG_MMU */ @@ -292,7 +335,9 @@ clear_bss_done: slli a3, a0, LGREG la a1, __cpu_up_stack_pointer + XIP_FIXUP_OFFSET a1 la a2, __cpu_up_task_pointer + XIP_FIXUP_OFFSET a2 add a1, a3, a1 add a2, a3, a2 diff --git a/arch/riscv/kernel/head.h b/arch/riscv/kernel/head.h index b48dda3d04f6..aabbc3ac3e48 100644 --- a/arch/riscv/kernel/head.h +++ b/arch/riscv/kernel/head.h @@ -12,6 +12,9 @@ extern atomic_t hart_lottery; asmlinkage void do_page_fault(struct pt_regs *regs); asmlinkage void __init setup_vm(uintptr_t dtb_pa); +#ifdef CONFIG_XIP_KERNEL +asmlinkage void __init __copy_data(void); +#endif extern void *__cpu_up_stack_pointer[]; extern void *__cpu_up_task_pointer[]; diff --git a/arch/riscv/kernel/kexec_relocate.S b/arch/riscv/kernel/kexec_relocate.S new file mode 100644 index 000000000000..88c3beabe9b4 --- /dev/null +++ b/arch/riscv/kernel/kexec_relocate.S @@ -0,0 +1,223 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 FORTH-ICS/CARV + * Nick Kossifidis <[email protected]> + */ + +#include <asm/asm.h> /* For RISCV_* and REG_* macros */ +#include <asm/csr.h> /* For CSR_* macros */ +#include <asm/page.h> /* For PAGE_SIZE */ +#include <linux/linkage.h> /* For SYM_* macros */ + +.section ".rodata" +SYM_CODE_START(riscv_kexec_relocate) + + /* + * s0: Pointer to the current entry + * s1: (const) Phys address to jump to after relocation + * s2: (const) Phys address of the FDT image + * s3: (const) The hartid of the current hart + * s4: Pointer to the destination address for the relocation + * s5: (const) Number of words per page + * s6: (const) 1, used for subtraction + * s7: (const) va_pa_offset, used when switching MMU off + * s8: (const) Physical address of the main loop + * s9: (debug) indirection page counter + * s10: (debug) entry counter + * s11: (debug) copied words counter + */ + mv s0, a0 + mv s1, a1 + mv s2, a2 + mv s3, a3 + mv s4, zero + li s5, (PAGE_SIZE / RISCV_SZPTR) + li s6, 1 + mv s7, a4 + mv s8, zero + mv s9, zero + mv s10, zero + mv s11, zero + + /* Disable / cleanup interrupts */ + csrw CSR_SIE, zero + csrw CSR_SIP, zero + + /* + * When we switch SATP.MODE to "Bare" we'll only + * play with physical addresses. However the first time + * we try to jump somewhere, the offset on the jump + * will be relative to pc which will still be on VA. To + * deal with this we set stvec to the physical address at + * the start of the loop below so that we jump there in + * any case. + */ + la s8, 1f + sub s8, s8, s7 + csrw CSR_STVEC, s8 + + /* Process entries in a loop */ +.align 2 +1: + addi s10, s10, 1 + REG_L t0, 0(s0) /* t0 = *image->entry */ + addi s0, s0, RISCV_SZPTR /* image->entry++ */ + + /* IND_DESTINATION entry ? -> save destination address */ + andi t1, t0, 0x1 + beqz t1, 2f + andi s4, t0, ~0x1 + j 1b + +2: + /* IND_INDIRECTION entry ? -> update next entry ptr (PA) */ + andi t1, t0, 0x2 + beqz t1, 2f + andi s0, t0, ~0x2 + addi s9, s9, 1 + csrw CSR_SATP, zero + jalr zero, s8, 0 + +2: + /* IND_DONE entry ? -> jump to done label */ + andi t1, t0, 0x4 + beqz t1, 2f + j 4f + +2: + /* + * IND_SOURCE entry ? -> copy page word by word to the + * destination address we got from IND_DESTINATION + */ + andi t1, t0, 0x8 + beqz t1, 1b /* Unknown entry type, ignore it */ + andi t0, t0, ~0x8 + mv t3, s5 /* i = num words per page */ +3: /* copy loop */ + REG_L t1, (t0) /* t1 = *src_ptr */ + REG_S t1, (s4) /* *dst_ptr = *src_ptr */ + addi t0, t0, RISCV_SZPTR /* stc_ptr++ */ + addi s4, s4, RISCV_SZPTR /* dst_ptr++ */ + sub t3, t3, s6 /* i-- */ + addi s11, s11, 1 /* c++ */ + beqz t3, 1b /* copy done ? */ + j 3b + +4: + /* Pass the arguments to the next kernel / Cleanup*/ + mv a0, s3 + mv a1, s2 + mv a2, s1 + + /* Cleanup */ + mv a3, zero + mv a4, zero + mv a5, zero + mv a6, zero + mv a7, zero + + mv s0, zero + mv s1, zero + mv s2, zero + mv s3, zero + mv s4, zero + mv s5, zero + mv s6, zero + mv s7, zero + mv s8, zero + mv s9, zero + mv s10, zero + mv s11, zero + + mv t0, zero + mv t1, zero + mv t2, zero + mv t3, zero + mv t4, zero + mv t5, zero + mv t6, zero + csrw CSR_SEPC, zero + csrw CSR_SCAUSE, zero + csrw CSR_SSCRATCH, zero + + /* + * Make sure the relocated code is visible + * and jump to the new kernel + */ + fence.i + + jalr zero, a2, 0 + +SYM_CODE_END(riscv_kexec_relocate) +riscv_kexec_relocate_end: + + +/* Used for jumping to crashkernel */ +.section ".text" +SYM_CODE_START(riscv_kexec_norelocate) + /* + * s0: (const) Phys address to jump to + * s1: (const) Phys address of the FDT image + * s2: (const) The hartid of the current hart + * s3: (const) va_pa_offset, used when switching MMU off + */ + mv s0, a1 + mv s1, a2 + mv s2, a3 + mv s3, a4 + + /* Disable / cleanup interrupts */ + csrw CSR_SIE, zero + csrw CSR_SIP, zero + + /* Switch to physical addressing */ + la s4, 1f + sub s4, s4, s3 + csrw CSR_STVEC, s4 + csrw CSR_SATP, zero + +.align 2 +1: + /* Pass the arguments to the next kernel / Cleanup*/ + mv a0, s2 + mv a1, s1 + mv a2, s0 + + /* Cleanup */ + mv a3, zero + mv a4, zero + mv a5, zero + mv a6, zero + mv a7, zero + + mv s0, zero + mv s1, zero + mv s2, zero + mv s3, zero + mv s4, zero + mv s5, zero + mv s6, zero + mv s7, zero + mv s8, zero + mv s9, zero + mv s10, zero + mv s11, zero + + mv t0, zero + mv t1, zero + mv t2, zero + mv t3, zero + mv t4, zero + mv t5, zero + mv t6, zero + csrw CSR_SEPC, zero + csrw CSR_SCAUSE, zero + csrw CSR_SSCRATCH, zero + + jalr zero, a2, 0 +SYM_CODE_END(riscv_kexec_norelocate) + +.section ".rodata" +SYM_DATA(riscv_kexec_relocate_size, + .long riscv_kexec_relocate_end - riscv_kexec_relocate) + diff --git a/arch/riscv/kernel/machine_kexec.c b/arch/riscv/kernel/machine_kexec.c new file mode 100644 index 000000000000..cc048143fba5 --- /dev/null +++ b/arch/riscv/kernel/machine_kexec.c @@ -0,0 +1,193 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 FORTH-ICS/CARV + * Nick Kossifidis <[email protected]> + */ + +#include <linux/kexec.h> +#include <asm/kexec.h> /* For riscv_kexec_* symbol defines */ +#include <linux/smp.h> /* For smp_send_stop () */ +#include <asm/cacheflush.h> /* For local_flush_icache_all() */ +#include <asm/barrier.h> /* For smp_wmb() */ +#include <asm/page.h> /* For PAGE_MASK */ +#include <linux/libfdt.h> /* For fdt_check_header() */ +#include <asm/set_memory.h> /* For set_memory_x() */ +#include <linux/compiler.h> /* For unreachable() */ +#include <linux/cpu.h> /* For cpu_down() */ + +/** + * kexec_image_info - Print received image details + */ +static void +kexec_image_info(const struct kimage *image) +{ + unsigned long i; + + pr_debug("Kexec image info:\n"); + pr_debug("\ttype: %d\n", image->type); + pr_debug("\tstart: %lx\n", image->start); + pr_debug("\thead: %lx\n", image->head); + pr_debug("\tnr_segments: %lu\n", image->nr_segments); + + for (i = 0; i < image->nr_segments; i++) { + pr_debug("\t segment[%lu]: %016lx - %016lx", i, + image->segment[i].mem, + image->segment[i].mem + image->segment[i].memsz); + pr_debug("\t\t0x%lx bytes, %lu pages\n", + (unsigned long) image->segment[i].memsz, + (unsigned long) image->segment[i].memsz / PAGE_SIZE); + } +} + +/** + * machine_kexec_prepare - Initialize kexec + * + * This function is called from do_kexec_load, when the user has + * provided us with an image to be loaded. Its goal is to validate + * the image and prepare the control code buffer as needed. + * Note that kimage_alloc_init has already been called and the + * control buffer has already been allocated. + */ +int +machine_kexec_prepare(struct kimage *image) +{ + struct kimage_arch *internal = &image->arch; + struct fdt_header fdt = {0}; + void *control_code_buffer = NULL; + unsigned int control_code_buffer_sz = 0; + int i = 0; + + kexec_image_info(image); + + /* Find the Flattened Device Tree and save its physical address */ + for (i = 0; i < image->nr_segments; i++) { + if (image->segment[i].memsz <= sizeof(fdt)) + continue; + + if (copy_from_user(&fdt, image->segment[i].buf, sizeof(fdt))) + continue; + + if (fdt_check_header(&fdt)) + continue; + + internal->fdt_addr = (unsigned long) image->segment[i].mem; + break; + } + + if (!internal->fdt_addr) { + pr_err("Device tree not included in the provided image\n"); + return -EINVAL; + } + + /* Copy the assembler code for relocation to the control page */ + if (image->type != KEXEC_TYPE_CRASH) { + control_code_buffer = page_address(image->control_code_page); + control_code_buffer_sz = page_size(image->control_code_page); + + if (unlikely(riscv_kexec_relocate_size > control_code_buffer_sz)) { + pr_err("Relocation code doesn't fit within a control page\n"); + return -EINVAL; + } + + memcpy(control_code_buffer, riscv_kexec_relocate, + riscv_kexec_relocate_size); + + /* Mark the control page executable */ + set_memory_x((unsigned long) control_code_buffer, 1); + } + + return 0; +} + + +/** + * machine_kexec_cleanup - Cleanup any leftovers from + * machine_kexec_prepare + * + * This function is called by kimage_free to handle any arch-specific + * allocations done on machine_kexec_prepare. Since we didn't do any + * allocations there, this is just an empty function. Note that the + * control buffer is freed by kimage_free. + */ +void +machine_kexec_cleanup(struct kimage *image) +{ +} + + +/* + * machine_shutdown - Prepare for a kexec reboot + * + * This function is called by kernel_kexec just before machine_kexec + * below. Its goal is to prepare the rest of the system (the other + * harts and possibly devices etc) for a kexec reboot. + */ +void machine_shutdown(void) +{ + /* + * No more interrupts on this hart + * until we are back up. + */ + local_irq_disable(); + +#if defined(CONFIG_HOTPLUG_CPU) + smp_shutdown_nonboot_cpus(smp_processor_id()); +#endif +} + +/** + * machine_crash_shutdown - Prepare to kexec after a kernel crash + * + * This function is called by crash_kexec just before machine_kexec + * below and its goal is similar to machine_shutdown, but in case of + * a kernel crash. Since we don't handle such cases yet, this function + * is empty. + */ +void +machine_crash_shutdown(struct pt_regs *regs) +{ + crash_save_cpu(regs, smp_processor_id()); + machine_shutdown(); + pr_info("Starting crashdump kernel...\n"); +} + +/** + * machine_kexec - Jump to the loaded kimage + * + * This function is called by kernel_kexec which is called by the + * reboot system call when the reboot cmd is LINUX_REBOOT_CMD_KEXEC, + * or by crash_kernel which is called by the kernel's arch-specific + * trap handler in case of a kernel panic. It's the final stage of + * the kexec process where the pre-loaded kimage is ready to be + * executed. We assume at this point that all other harts are + * suspended and this hart will be the new boot hart. + */ +void __noreturn +machine_kexec(struct kimage *image) +{ + struct kimage_arch *internal = &image->arch; + unsigned long jump_addr = (unsigned long) image->start; + unsigned long first_ind_entry = (unsigned long) &image->head; + unsigned long this_hart_id = raw_smp_processor_id(); + unsigned long fdt_addr = internal->fdt_addr; + void *control_code_buffer = page_address(image->control_code_page); + riscv_kexec_method kexec_method = NULL; + + if (image->type != KEXEC_TYPE_CRASH) + kexec_method = control_code_buffer; + else + kexec_method = (riscv_kexec_method) &riscv_kexec_norelocate; + + pr_notice("Will call new kernel at %08lx from hart id %lx\n", + jump_addr, this_hart_id); + pr_notice("FDT image at %08lx\n", fdt_addr); + + /* Make sure the relocation code is visible to the hart */ + local_flush_icache_all(); + + /* Jump to the relocation code */ + pr_notice("Bye...\n"); + kexec_method(first_ind_entry, jump_addr, fdt_addr, + this_hart_id, va_pa_offset); + unreachable(); +} diff --git a/arch/riscv/kernel/mcount.S b/arch/riscv/kernel/mcount.S index 8a5593ff9ff3..6d462681c9c0 100644 --- a/arch/riscv/kernel/mcount.S +++ b/arch/riscv/kernel/mcount.S @@ -47,8 +47,8 @@ ENTRY(ftrace_stub) #ifdef CONFIG_DYNAMIC_FTRACE - .global _mcount - .set _mcount, ftrace_stub + .global MCOUNT_NAME + .set MCOUNT_NAME, ftrace_stub #endif ret ENDPROC(ftrace_stub) @@ -78,7 +78,7 @@ ENDPROC(return_to_handler) #endif #ifndef CONFIG_DYNAMIC_FTRACE -ENTRY(_mcount) +ENTRY(MCOUNT_NAME) la t4, ftrace_stub #ifdef CONFIG_FUNCTION_GRAPH_TRACER la t0, ftrace_graph_return @@ -124,6 +124,6 @@ do_trace: jalr t5 RESTORE_ABI_STATE ret -ENDPROC(_mcount) +ENDPROC(MCOUNT_NAME) #endif -EXPORT_SYMBOL(_mcount) +EXPORT_SYMBOL(MCOUNT_NAME) diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c index 104fba889cf7..68a9e3d1fe16 100644 --- a/arch/riscv/kernel/module.c +++ b/arch/riscv/kernel/module.c @@ -408,13 +408,11 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, } #if defined(CONFIG_MMU) && defined(CONFIG_64BIT) -#define VMALLOC_MODULE_START \ - max(PFN_ALIGN((unsigned long)&_end - SZ_2G), VMALLOC_START) void *module_alloc(unsigned long size) { - return __vmalloc_node_range(size, 1, VMALLOC_MODULE_START, - VMALLOC_END, GFP_KERNEL, - PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, + return __vmalloc_node_range(size, 1, MODULES_VADDR, + MODULES_END, GFP_KERNEL, + PAGE_KERNEL, 0, NUMA_NO_NODE, __builtin_return_address(0)); } #endif diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c index 7e2c78e2ca6b..10b965c34536 100644 --- a/arch/riscv/kernel/probes/kprobes.c +++ b/arch/riscv/kernel/probes/kprobes.c @@ -84,6 +84,14 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) return 0; } +void *alloc_insn_page(void) +{ + return __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, VMALLOC_END, + GFP_KERNEL, PAGE_KERNEL_READ_EXEC, + VM_FLUSH_RESET_PERMS, NUMA_NO_NODE, + __builtin_return_address(0)); +} + /* install breakpoint in text */ void __kprobes arch_arm_kprobe(struct kprobe *p) { @@ -260,8 +268,10 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr) if (kcb->kprobe_status == KPROBE_REENTER) restore_previous_kprobe(kcb); - else + else { + kprobes_restore_local_irqflag(kcb, regs); reset_current_kprobe(); + } break; case KPROBE_HIT_ACTIVE: diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c index d3bf756321a5..7402a417f38e 100644 --- a/arch/riscv/kernel/sbi.c +++ b/arch/riscv/kernel/sbi.c @@ -11,14 +11,14 @@ #include <asm/smp.h> /* default SBI version is 0.1 */ -unsigned long sbi_spec_version = SBI_SPEC_VERSION_DEFAULT; +unsigned long sbi_spec_version __ro_after_init = SBI_SPEC_VERSION_DEFAULT; EXPORT_SYMBOL(sbi_spec_version); -static void (*__sbi_set_timer)(uint64_t stime); -static int (*__sbi_send_ipi)(const unsigned long *hart_mask); +static void (*__sbi_set_timer)(uint64_t stime) __ro_after_init; +static int (*__sbi_send_ipi)(const unsigned long *hart_mask) __ro_after_init; static int (*__sbi_rfence)(int fid, const unsigned long *hart_mask, unsigned long start, unsigned long size, - unsigned long arg4, unsigned long arg5); + unsigned long arg4, unsigned long arg5) __ro_after_init; struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0, unsigned long arg1, unsigned long arg2, @@ -547,6 +547,21 @@ static inline long sbi_get_firmware_version(void) return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_VERSION); } +long sbi_get_mvendorid(void) +{ + return __sbi_base_ecall(SBI_EXT_BASE_GET_MVENDORID); +} + +long sbi_get_marchid(void) +{ + return __sbi_base_ecall(SBI_EXT_BASE_GET_MARCHID); +} + +long sbi_get_mimpid(void) +{ + return __sbi_base_ecall(SBI_EXT_BASE_GET_MIMPID); +} + static void sbi_send_cpumask_ipi(const struct cpumask *target) { struct cpumask hartid_mask; @@ -556,7 +571,7 @@ static void sbi_send_cpumask_ipi(const struct cpumask *target) sbi_send_ipi(cpumask_bits(&hartid_mask)); } -static struct riscv_ipi_ops sbi_ipi_ops = { +static const struct riscv_ipi_ops sbi_ipi_ops = { .ipi_inject = sbi_send_cpumask_ipi }; @@ -577,19 +592,19 @@ void __init sbi_init(void) sbi_get_firmware_id(), sbi_get_firmware_version()); if (sbi_probe_extension(SBI_EXT_TIME) > 0) { __sbi_set_timer = __sbi_set_timer_v02; - pr_info("SBI v0.2 TIME extension detected\n"); + pr_info("SBI TIME extension detected\n"); } else { __sbi_set_timer = __sbi_set_timer_v01; } if (sbi_probe_extension(SBI_EXT_IPI) > 0) { __sbi_send_ipi = __sbi_send_ipi_v02; - pr_info("SBI v0.2 IPI extension detected\n"); + pr_info("SBI IPI extension detected\n"); } else { __sbi_send_ipi = __sbi_send_ipi_v01; } if (sbi_probe_extension(SBI_EXT_RFENCE) > 0) { __sbi_rfence = __sbi_rfence_v02; - pr_info("SBI v0.2 RFENCE extension detected\n"); + pr_info("SBI RFENCE extension detected\n"); } else { __sbi_rfence = __sbi_rfence_v01; } diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index f8f15332caa2..7b31779101f6 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -20,9 +20,11 @@ #include <linux/swiotlb.h> #include <linux/smp.h> #include <linux/efi.h> +#include <linux/crash_dump.h> #include <asm/cpu_ops.h> #include <asm/early_ioremap.h> +#include <asm/pgtable.h> #include <asm/setup.h> #include <asm/set_memory.h> #include <asm/sections.h> @@ -50,7 +52,11 @@ struct screen_info screen_info __section(".data") = { * This is used before the kernel initializes the BSS so it can't be in the * BSS. */ -atomic_t hart_lottery __section(".sdata"); +atomic_t hart_lottery __section(".sdata") +#ifdef CONFIG_XIP_KERNEL += ATOMIC_INIT(0xC001BEEF) +#endif +; unsigned long boot_cpu_hartid; static DEFINE_PER_CPU(struct cpu, cpu_devices); @@ -60,10 +66,14 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices); * also add "System RAM" regions for compatibility with other * archs, and the rest of the known regions for completeness. */ +static struct resource kimage_res = { .name = "Kernel image", }; static struct resource code_res = { .name = "Kernel code", }; static struct resource data_res = { .name = "Kernel data", }; static struct resource rodata_res = { .name = "Kernel rodata", }; static struct resource bss_res = { .name = "Kernel bss", }; +#ifdef CONFIG_CRASH_DUMP +static struct resource elfcorehdr_res = { .name = "ELF Core hdr", }; +#endif static int __init add_resource(struct resource *parent, struct resource *res) @@ -80,45 +90,54 @@ static int __init add_resource(struct resource *parent, return 1; } -static int __init add_kernel_resources(struct resource *res) +static int __init add_kernel_resources(void) { int ret = 0; /* * The memory region of the kernel image is continuous and - * was reserved on setup_bootmem, find it here and register - * it as a resource, then register the various segments of - * the image as child nodes + * was reserved on setup_bootmem, register it here as a + * resource, with the various segments of the image as + * child nodes. */ - if (!(res->start <= code_res.start && res->end >= data_res.end)) - return 0; - res->name = "Kernel image"; - res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; + code_res.start = __pa_symbol(_text); + code_res.end = __pa_symbol(_etext) - 1; + code_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; - /* - * We removed a part of this region on setup_bootmem so - * we need to expand the resource for the bss to fit in. - */ - res->end = bss_res.end; + rodata_res.start = __pa_symbol(__start_rodata); + rodata_res.end = __pa_symbol(__end_rodata) - 1; + rodata_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; + + data_res.start = __pa_symbol(_data); + data_res.end = __pa_symbol(_edata) - 1; + data_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; + + bss_res.start = __pa_symbol(__bss_start); + bss_res.end = __pa_symbol(__bss_stop) - 1; + bss_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; + + kimage_res.start = code_res.start; + kimage_res.end = bss_res.end; + kimage_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; - ret = add_resource(&iomem_resource, res); + ret = add_resource(&iomem_resource, &kimage_res); if (ret < 0) return ret; - ret = add_resource(res, &code_res); + ret = add_resource(&kimage_res, &code_res); if (ret < 0) return ret; - ret = add_resource(res, &rodata_res); + ret = add_resource(&kimage_res, &rodata_res); if (ret < 0) return ret; - ret = add_resource(res, &data_res); + ret = add_resource(&kimage_res, &data_res); if (ret < 0) return ret; - ret = add_resource(res, &bss_res); + ret = add_resource(&kimage_res, &bss_res); return ret; } @@ -129,54 +148,59 @@ static void __init init_resources(void) struct resource *res = NULL; struct resource *mem_res = NULL; size_t mem_res_sz = 0; - int ret = 0, i = 0; - - code_res.start = __pa_symbol(_text); - code_res.end = __pa_symbol(_etext) - 1; - code_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; - - rodata_res.start = __pa_symbol(__start_rodata); - rodata_res.end = __pa_symbol(__end_rodata) - 1; - rodata_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; - - data_res.start = __pa_symbol(_data); - data_res.end = __pa_symbol(_edata) - 1; - data_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; - - bss_res.start = __pa_symbol(__bss_start); - bss_res.end = __pa_symbol(__bss_stop) - 1; - bss_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; + int num_resources = 0, res_idx = 0; + int ret = 0; /* + 1 as memblock_alloc() might increase memblock.reserved.cnt */ - mem_res_sz = (memblock.memory.cnt + memblock.reserved.cnt + 1) * sizeof(*mem_res); + num_resources = memblock.memory.cnt + memblock.reserved.cnt + 1; + res_idx = num_resources - 1; + + mem_res_sz = num_resources * sizeof(*mem_res); mem_res = memblock_alloc(mem_res_sz, SMP_CACHE_BYTES); if (!mem_res) panic("%s: Failed to allocate %zu bytes\n", __func__, mem_res_sz); + /* * Start by adding the reserved regions, if they overlap * with /memory regions, insert_resource later on will take * care of it. */ + ret = add_kernel_resources(); + if (ret < 0) + goto error; + +#ifdef CONFIG_KEXEC_CORE + if (crashk_res.start != crashk_res.end) { + ret = add_resource(&iomem_resource, &crashk_res); + if (ret < 0) + goto error; + } +#endif + +#ifdef CONFIG_CRASH_DUMP + if (elfcorehdr_size > 0) { + elfcorehdr_res.start = elfcorehdr_addr; + elfcorehdr_res.end = elfcorehdr_addr + elfcorehdr_size - 1; + elfcorehdr_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; + add_resource(&iomem_resource, &elfcorehdr_res); + } +#endif + for_each_reserved_mem_region(region) { - res = &mem_res[i++]; + res = &mem_res[res_idx--]; res->name = "Reserved"; res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; res->start = __pfn_to_phys(memblock_region_reserved_base_pfn(region)); res->end = __pfn_to_phys(memblock_region_reserved_end_pfn(region)) - 1; - ret = add_kernel_resources(res); - if (ret < 0) - goto error; - else if (ret) - continue; - /* * Ignore any other reserved regions within * system memory. */ if (memblock_is_memory(res->start)) { - memblock_free((phys_addr_t) res, sizeof(struct resource)); + /* Re-use this pre-allocated resource */ + res_idx++; continue; } @@ -187,7 +211,7 @@ static void __init init_resources(void) /* Add /memory regions to the resource tree */ for_each_mem_region(region) { - res = &mem_res[i++]; + res = &mem_res[res_idx--]; if (unlikely(memblock_is_nomap(region))) { res->name = "Reserved"; @@ -205,6 +229,9 @@ static void __init init_resources(void) goto error; } + /* Clean-up any unused pre-allocated resources */ + mem_res_sz = (num_resources - res_idx + 1) * sizeof(*mem_res); + memblock_free((phys_addr_t) mem_res, mem_res_sz); return; error: @@ -251,21 +278,26 @@ void __init setup_arch(char **cmdline_p) efi_init(); setup_bootmem(); paging_init(); - init_resources(); #if IS_ENABLED(CONFIG_BUILTIN_DTB) unflatten_and_copy_device_tree(); #else - if (early_init_dt_verify(__va(dtb_early_pa))) + if (early_init_dt_verify(__va(XIP_FIXUP(dtb_early_pa)))) unflatten_device_tree(); else pr_err("No DTB found in kernel mappings\n"); #endif misc_mem_init(); + init_resources(); sbi_init(); - if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) + if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) { protect_kernel_text_data(); +#if defined(CONFIG_64BIT) && defined(CONFIG_MMU) && !defined(CONFIG_XIP_KERNEL) + protect_kernel_linear_mapping_text_rodata(); +#endif + } + #ifdef CONFIG_SWIOTLB swiotlb_init(1); #endif diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c index ea028d9e0d24..921d9d7df400 100644 --- a/arch/riscv/kernel/smp.c +++ b/arch/riscv/kernel/smp.c @@ -9,6 +9,7 @@ */ #include <linux/cpu.h> +#include <linux/clockchips.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/profile.h> @@ -27,10 +28,11 @@ enum ipi_message_type { IPI_CALL_FUNC, IPI_CPU_STOP, IPI_IRQ_WORK, + IPI_TIMER, IPI_MAX }; -unsigned long __cpuid_to_hartid_map[NR_CPUS] = { +unsigned long __cpuid_to_hartid_map[NR_CPUS] __ro_after_init = { [0 ... NR_CPUS-1] = INVALID_HARTID }; @@ -54,7 +56,7 @@ int riscv_hartid_to_cpuid(int hartid) return i; pr_err("Couldn't find cpu id for hartid [%d]\n", hartid); - return i; + return -ENOENT; } void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out) @@ -85,9 +87,9 @@ static void ipi_stop(void) wait_for_interrupt(); } -static struct riscv_ipi_ops *ipi_ops; +static const struct riscv_ipi_ops *ipi_ops __ro_after_init; -void riscv_set_ipi_ops(struct riscv_ipi_ops *ops) +void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops) { ipi_ops = ops; } @@ -176,6 +178,12 @@ void handle_IPI(struct pt_regs *regs) irq_work_run(); } +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST + if (ops & (1 << IPI_TIMER)) { + stats[IPI_TIMER]++; + tick_receive_broadcast(); + } +#endif BUG_ON((ops >> IPI_MAX) != 0); /* Order data access and bit testing. */ @@ -192,6 +200,7 @@ static const char * const ipi_names[] = { [IPI_CALL_FUNC] = "Function call interrupts", [IPI_CPU_STOP] = "CPU stop interrupts", [IPI_IRQ_WORK] = "IRQ work interrupts", + [IPI_TIMER] = "Timer broadcast interrupts", }; void show_ipi_stats(struct seq_file *p, int prec) @@ -217,6 +226,13 @@ void arch_send_call_function_single_ipi(int cpu) send_ipi_single(cpu, IPI_CALL_FUNC); } +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST +void tick_broadcast(const struct cpumask *mask) +{ + send_ipi_mask(mask, IPI_TIMER); +} +#endif + void smp_send_stop(void) { unsigned long timeout; diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c index 5e276c25646f..9a408e2942ac 100644 --- a/arch/riscv/kernel/smpboot.c +++ b/arch/riscv/kernel/smpboot.c @@ -32,6 +32,7 @@ #include <asm/sections.h> #include <asm/sbi.h> #include <asm/smp.h> +#include <asm/alternative.h> #include "head.h" @@ -40,6 +41,9 @@ static DECLARE_COMPLETION(cpu_running); void __init smp_prepare_boot_cpu(void) { init_cpu_topology(); +#ifdef CONFIG_RISCV_ERRATA_ALTERNATIVE + apply_boot_alternatives(); +#endif } void __init smp_prepare_cpus(unsigned int max_cpus) diff --git a/arch/riscv/kernel/syscall_table.c b/arch/riscv/kernel/syscall_table.c index f1ead9df96ca..a63c667c27b3 100644 --- a/arch/riscv/kernel/syscall_table.c +++ b/arch/riscv/kernel/syscall_table.c @@ -13,7 +13,7 @@ #undef __SYSCALL #define __SYSCALL(nr, call) [nr] = (call), -void *sys_call_table[__NR_syscalls] = { +void * const sys_call_table[__NR_syscalls] = { [0 ... __NR_syscalls - 1] = sys_ni_syscall, #include <asm/unistd.h> }; diff --git a/arch/riscv/kernel/time.c b/arch/riscv/kernel/time.c index 1b432264f7ef..8217b0f67c6c 100644 --- a/arch/riscv/kernel/time.c +++ b/arch/riscv/kernel/time.c @@ -11,7 +11,7 @@ #include <asm/processor.h> #include <asm/timex.h> -unsigned long riscv_timebase; +unsigned long riscv_timebase __ro_after_init; EXPORT_SYMBOL_GPL(riscv_timebase); void __init time_init(void) diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index 1357abf79570..07fdded10c21 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -197,6 +197,6 @@ int is_valid_bugaddr(unsigned long pc) #endif /* CONFIG_GENERIC_BUG */ /* stvec & scratch is already set from head.S */ -void trap_init(void) +void __init trap_init(void) { } diff --git a/arch/riscv/kernel/vdso.c b/arch/riscv/kernel/vdso.c index 3f1d35e7c98a..25a3b8849599 100644 --- a/arch/riscv/kernel/vdso.c +++ b/arch/riscv/kernel/vdso.c @@ -20,8 +20,8 @@ extern char vdso_start[], vdso_end[]; -static unsigned int vdso_pages; -static struct page **vdso_pagelist; +static unsigned int vdso_pages __ro_after_init; +static struct page **vdso_pagelist __ro_after_init; /* * The vDSO data page. diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile index 71a315e73cbe..24d936c147cd 100644 --- a/arch/riscv/kernel/vdso/Makefile +++ b/arch/riscv/kernel/vdso/Makefile @@ -23,7 +23,7 @@ ifneq ($(c-gettimeofday-y),) endif # Build rules -targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-dummy.o +targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-syms.S obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) obj-y += vdso.o vdso-syms.o @@ -41,11 +41,10 @@ KASAN_SANITIZE := n $(obj)/vdso.o: $(obj)/vdso.so # link rule for the .so file, .lds has to be first -SYSCFLAGS_vdso.so.dbg = $(c_flags) -$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE +$(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE $(call if_changed,vdsold) -SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \ - -Wl,--build-id=sha1 -Wl,--hash-style=both +LDFLAGS_vdso.so.dbg = -shared -s -soname=linux-vdso.so.1 \ + --build-id=sha1 --hash-style=both --eh-frame-hdr # We also create a special relocatable object that should mirror the symbol # table and layout of the linked DSO. With ld --just-symbols we can then @@ -60,13 +59,10 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE # actual build commands # The DSO images are built using a special linker script -# Add -lgcc so rv32 gets static muldi3 and lshrdi3 definitions. # Make sure only to export the intended __vdso_xxx symbol offsets. quiet_cmd_vdsold = VDSOLD $@ - cmd_vdsold = $(CC) $(KBUILD_CFLAGS) $(call cc-option, -no-pie) -nostdlib -nostartfiles $(SYSCFLAGS_$(@F)) \ - -Wl,-T,$(filter-out FORCE,$^) -o [email protected] && \ - $(CROSS_COMPILE)objcopy \ - $(patsubst %, -G __vdso_%, $(vdso-syms)) [email protected] $@ && \ + cmd_vdsold = $(LD) $(ld_flags) -T $(filter-out FORCE,$^) -o [email protected] && \ + $(OBJCOPY) $(patsubst %, -G __vdso_%, $(vdso-syms)) [email protected] $@ && \ # Extracts symbol offsets from the VDSO, converting them into an assembly file diff --git a/arch/riscv/kernel/vmlinux-xip.lds.S b/arch/riscv/kernel/vmlinux-xip.lds.S new file mode 100644 index 000000000000..4b29b9917f99 --- /dev/null +++ b/arch/riscv/kernel/vmlinux-xip.lds.S @@ -0,0 +1,133 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2012 Regents of the University of California + * Copyright (C) 2017 SiFive + * Copyright (C) 2020 Vitaly Wool, Konsulko AB + */ + +#include <asm/pgtable.h> +#define LOAD_OFFSET KERNEL_LINK_ADDR +/* No __ro_after_init data in the .rodata section - which will always be ro */ +#define RO_AFTER_INIT_DATA + +#include <asm/vmlinux.lds.h> +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/cache.h> +#include <asm/thread_info.h> + +OUTPUT_ARCH(riscv) +ENTRY(_start) + +jiffies = jiffies_64; + +SECTIONS +{ + /* Beginning of code and text segment */ + . = LOAD_OFFSET; + _xiprom = .; + _start = .; + HEAD_TEXT_SECTION + INIT_TEXT_SECTION(PAGE_SIZE) + /* we have to discard exit text and such at runtime, not link time */ + .exit.text : + { + EXIT_TEXT + } + + .text : { + _text = .; + _stext = .; + TEXT_TEXT + SCHED_TEXT + CPUIDLE_TEXT + LOCK_TEXT + KPROBES_TEXT + ENTRY_TEXT + IRQENTRY_TEXT + SOFTIRQENTRY_TEXT + *(.fixup) + _etext = .; + } + RO_DATA(L1_CACHE_BYTES) + .srodata : { + *(.srodata*) + } + .init.rodata : { + INIT_SETUP(16) + INIT_CALLS + CON_INITCALL + INIT_RAM_FS + } + _exiprom = .; /* End of XIP ROM area */ + + +/* + * From this point, stuff is considered writable and will be copied to RAM + */ + __data_loc = ALIGN(16); /* location in file */ + . = LOAD_OFFSET + XIP_OFFSET; /* location in memory */ + + _sdata = .; /* Start of data section */ + _data = .; + RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) + _edata = .; + __start_ro_after_init = .; + .data.ro_after_init : AT(ADDR(.data.ro_after_init) - LOAD_OFFSET) { + *(.data..ro_after_init) + } + __end_ro_after_init = .; + + . = ALIGN(PAGE_SIZE); + __init_begin = .; + .init.data : { + INIT_DATA + } + .exit.data : { + EXIT_DATA + } + . = ALIGN(8); + __soc_early_init_table : { + __soc_early_init_table_start = .; + KEEP(*(__soc_early_init_table)) + __soc_early_init_table_end = .; + } + __soc_builtin_dtb_table : { + __soc_builtin_dtb_table_start = .; + KEEP(*(__soc_builtin_dtb_table)) + __soc_builtin_dtb_table_end = .; + } + PERCPU_SECTION(L1_CACHE_BYTES) + + . = ALIGN(PAGE_SIZE); + __init_end = .; + + .sdata : { + __global_pointer$ = . + 0x800; + *(.sdata*) + *(.sbss*) + } + + BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 0) + EXCEPTION_TABLE(0x10) + + .rel.dyn : AT(ADDR(.rel.dyn) - LOAD_OFFSET) { + *(.rel.dyn*) + } + + /* + * End of copied data. We need a dummy section to get its LMA. + * Also located before final ALIGN() as trailing padding is not stored + * in the resulting binary file and useless to copy. + */ + .data.endmark : AT(ADDR(.data.endmark) - LOAD_OFFSET) { } + _edata_loc = LOADADDR(.data.endmark); + + . = ALIGN(PAGE_SIZE); + _end = .; + + STABS_DEBUG + DWARF_DEBUG + + DISCARDS +} diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S index de03cb22d0e9..891742ff75a7 100644 --- a/arch/riscv/kernel/vmlinux.lds.S +++ b/arch/riscv/kernel/vmlinux.lds.S @@ -4,7 +4,13 @@ * Copyright (C) 2017 SiFive */ -#define LOAD_OFFSET PAGE_OFFSET +#ifdef CONFIG_XIP_KERNEL +#include "vmlinux-xip.lds.S" +#else + +#include <asm/pgtable.h> +#define LOAD_OFFSET KERNEL_LINK_ADDR + #include <asm/vmlinux.lds.h> #include <asm/page.h> #include <asm/cache.h> @@ -90,6 +96,13 @@ SECTIONS } __init_data_end = .; + + . = ALIGN(8); + .alternative : { + __alt_start = .; + *(.alternative) + __alt_end = .; + } __init_end = .; /* Start of data section */ @@ -132,3 +145,4 @@ SECTIONS DISCARDS } +#endif /* CONFIG_XIP_KERNEL */ diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index c5dbd55cbf7c..096463cc6fff 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -231,6 +231,19 @@ asmlinkage void do_page_fault(struct pt_regs *regs) return; } +#ifdef CONFIG_64BIT + /* + * Modules in 64bit kernels lie in their own virtual region which is not + * in the vmalloc region, but dealing with page faults in this region + * or the vmalloc region amounts to doing the same thing: checking that + * the mapping exists in init_mm.pgd and updating user page table, so + * just use vmalloc_fault. + */ + if (unlikely(addr >= MODULES_VADDR && addr < MODULES_END)) { + vmalloc_fault(regs, code, addr); + return; + } +#endif /* Enable interrupts if they were enabled in the parent context. */ if (likely(regs->status & SR_PIE)) local_irq_enable(); diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 92e39cfa5227..dfb5e4f7a670 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -2,6 +2,8 @@ /* * Copyright (C) 2012 Regents of the University of California * Copyright (C) 2019 Western Digital Corporation or its affiliates. + * Copyright (C) 2020 FORTH-ICS/CARV + * Nick Kossifidis <[email protected]> */ #include <linux/init.h> @@ -11,9 +13,11 @@ #include <linux/swap.h> #include <linux/sizes.h> #include <linux/of_fdt.h> +#include <linux/of_reserved_mem.h> #include <linux/libfdt.h> #include <linux/set_memory.h> #include <linux/dma-map-ops.h> +#include <linux/crash_dump.h> #include <asm/fixmap.h> #include <asm/tlbflush.h> @@ -25,14 +29,20 @@ #include "../kernel/head.h" +unsigned long kernel_virt_addr = KERNEL_LINK_ADDR; +EXPORT_SYMBOL(kernel_virt_addr); +#ifdef CONFIG_XIP_KERNEL +#define kernel_virt_addr (*((unsigned long *)XIP_FIXUP(&kernel_virt_addr))) +#endif + unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; EXPORT_SYMBOL(empty_zero_page); extern char _start[]; #define DTB_EARLY_BASE_VA PGDIR_SIZE -void *dtb_early_va __initdata; -uintptr_t dtb_early_pa __initdata; +void *_dtb_early_va __initdata; +uintptr_t _dtb_early_pa __initdata; struct pt_alloc_ops { pte_t *(*get_pte_virt)(phys_addr_t pa); @@ -57,7 +67,7 @@ static void __init zone_sizes_init(void) free_area_init(max_zone_pfns); } -static void setup_zero_page(void) +static void __init setup_zero_page(void) { memset((void *)empty_zero_page, 0, PAGE_SIZE); } @@ -75,7 +85,7 @@ static inline void print_mlm(char *name, unsigned long b, unsigned long t) (((t) - (b)) >> 20)); } -static void print_vm_layout(void) +static void __init print_vm_layout(void) { pr_notice("Virtual kernel memory layout:\n"); print_mlk("fixmap", (unsigned long)FIXADDR_START, @@ -88,6 +98,10 @@ static void print_vm_layout(void) (unsigned long)VMALLOC_END); print_mlm("lowmem", (unsigned long)PAGE_OFFSET, (unsigned long)high_memory); +#ifdef CONFIG_64BIT + print_mlm("kernel", (unsigned long)KERNEL_LINK_ADDR, + (unsigned long)ADDRESS_SPACE_END); +#endif } #else static void print_vm_layout(void) { } @@ -112,11 +126,20 @@ void __init setup_bootmem(void) phys_addr_t dram_end = memblock_end_of_DRAM(); phys_addr_t max_mapped_addr = __pa(~(ulong)0); +#ifdef CONFIG_XIP_KERNEL + vmlinux_start = __pa_symbol(&_sdata); +#endif + /* The maximal physical memory size is -PAGE_OFFSET. */ memblock_enforce_memory_limit(-PAGE_OFFSET); - /* Reserve from the start of the kernel to the end of the kernel */ - memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start); + /* + * Reserve from the start of the kernel to the end of the kernel + * and make sure we align the reservation on PMD_SIZE since we will + * map the kernel in the linear mapping as read-only: we do not want + * any allocation to happen between _end and the next pmd aligned page. + */ + memblock_reserve(vmlinux_start, (vmlinux_end - vmlinux_start + PMD_SIZE - 1) & PMD_MASK); /* * memblock allocator is not aware of the fact that last 4K bytes of @@ -127,8 +150,9 @@ void __init setup_bootmem(void) if (max_mapped_addr == (dram_end - 1)) memblock_set_current_limit(max_mapped_addr - 4096); - max_pfn = PFN_DOWN(dram_end); - max_low_pfn = max_pfn; + min_low_pfn = PFN_UP(memblock_start_of_DRAM()); + max_low_pfn = max_pfn = PFN_DOWN(dram_end); + dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn)); set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET); @@ -147,12 +171,42 @@ void __init setup_bootmem(void) memblock_allow_resize(); } +#ifdef CONFIG_XIP_KERNEL + +extern char _xiprom[], _exiprom[]; +extern char _sdata[], _edata[]; + +#endif /* CONFIG_XIP_KERNEL */ + #ifdef CONFIG_MMU -static struct pt_alloc_ops pt_ops; +static struct pt_alloc_ops _pt_ops __ro_after_init; + +#ifdef CONFIG_XIP_KERNEL +#define pt_ops (*(struct pt_alloc_ops *)XIP_FIXUP(&_pt_ops)) +#else +#define pt_ops _pt_ops +#endif -unsigned long va_pa_offset; +/* Offset between linear mapping virtual address and kernel load address */ +unsigned long va_pa_offset __ro_after_init; EXPORT_SYMBOL(va_pa_offset); -unsigned long pfn_base; +#ifdef CONFIG_XIP_KERNEL +#define va_pa_offset (*((unsigned long *)XIP_FIXUP(&va_pa_offset))) +#endif +/* Offset between kernel mapping virtual address and kernel load address */ +#ifdef CONFIG_64BIT +unsigned long va_kernel_pa_offset; +EXPORT_SYMBOL(va_kernel_pa_offset); +#endif +#ifdef CONFIG_XIP_KERNEL +#define va_kernel_pa_offset (*((unsigned long *)XIP_FIXUP(&va_kernel_pa_offset))) +#endif +unsigned long va_kernel_xip_pa_offset; +EXPORT_SYMBOL(va_kernel_xip_pa_offset); +#ifdef CONFIG_XIP_KERNEL +#define va_kernel_xip_pa_offset (*((unsigned long *)XIP_FIXUP(&va_kernel_xip_pa_offset))) +#endif +unsigned long pfn_base __ro_after_init; EXPORT_SYMBOL(pfn_base); pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; @@ -161,6 +215,12 @@ pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss; pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE); +#ifdef CONFIG_XIP_KERNEL +#define trampoline_pg_dir ((pgd_t *)XIP_FIXUP(trampoline_pg_dir)) +#define fixmap_pte ((pte_t *)XIP_FIXUP(fixmap_pte)) +#define early_pg_dir ((pgd_t *)XIP_FIXUP(early_pg_dir)) +#endif /* CONFIG_XIP_KERNEL */ + void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) { unsigned long addr = __fix_to_virt(idx); @@ -212,8 +272,8 @@ static phys_addr_t alloc_pte_late(uintptr_t va) unsigned long vaddr; vaddr = __get_free_page(GFP_KERNEL); - if (!vaddr || !pgtable_pte_page_ctor(virt_to_page(vaddr))) - BUG(); + BUG_ON(!vaddr || !pgtable_pte_page_ctor(virt_to_page(vaddr))); + return __pa(vaddr); } @@ -236,6 +296,12 @@ pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss; pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE); pmd_t early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE); +#ifdef CONFIG_XIP_KERNEL +#define trampoline_pmd ((pmd_t *)XIP_FIXUP(trampoline_pmd)) +#define fixmap_pmd ((pmd_t *)XIP_FIXUP(fixmap_pmd)) +#define early_pmd ((pmd_t *)XIP_FIXUP(early_pmd)) +#endif /* CONFIG_XIP_KERNEL */ + static pmd_t *__init get_pmd_virt_early(phys_addr_t pa) { /* Before MMU is enabled */ @@ -255,7 +321,7 @@ static pmd_t *get_pmd_virt_late(phys_addr_t pa) static phys_addr_t __init alloc_pmd_early(uintptr_t va) { - BUG_ON((va - PAGE_OFFSET) >> PGDIR_SHIFT); + BUG_ON((va - kernel_virt_addr) >> PGDIR_SHIFT); return (uintptr_t)early_pmd; } @@ -352,6 +418,19 @@ static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size) return PMD_SIZE; } +#ifdef CONFIG_XIP_KERNEL +/* called from head.S with MMU off */ +asmlinkage void __init __copy_data(void) +{ + void *from = (void *)(&_sdata); + void *end = (void *)(&_end); + void *to = (void *)CONFIG_PHYS_RAM_BASE; + size_t sz = (size_t)(end - from + 1); + + memcpy(to, from, sz); +} +#endif + /* * setup_vm() is called from head.S with MMU-off. * @@ -370,17 +449,74 @@ static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size) #error "setup_vm() is called from head.S before relocate so it should not use absolute addressing." #endif +uintptr_t load_pa, load_sz; +#ifdef CONFIG_XIP_KERNEL +#define load_pa (*((uintptr_t *)XIP_FIXUP(&load_pa))) +#define load_sz (*((uintptr_t *)XIP_FIXUP(&load_sz))) +#endif + +#ifdef CONFIG_XIP_KERNEL +uintptr_t xiprom, xiprom_sz; +#define xiprom_sz (*((uintptr_t *)XIP_FIXUP(&xiprom_sz))) +#define xiprom (*((uintptr_t *)XIP_FIXUP(&xiprom))) + +static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size) +{ + uintptr_t va, end_va; + + /* Map the flash resident part */ + end_va = kernel_virt_addr + xiprom_sz; + for (va = kernel_virt_addr; va < end_va; va += map_size) + create_pgd_mapping(pgdir, va, + xiprom + (va - kernel_virt_addr), + map_size, PAGE_KERNEL_EXEC); + + /* Map the data in RAM */ + end_va = kernel_virt_addr + XIP_OFFSET + load_sz; + for (va = kernel_virt_addr + XIP_OFFSET; va < end_va; va += map_size) + create_pgd_mapping(pgdir, va, + load_pa + (va - (kernel_virt_addr + XIP_OFFSET)), + map_size, PAGE_KERNEL); +} +#else +static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size) +{ + uintptr_t va, end_va; + + end_va = kernel_virt_addr + load_sz; + for (va = kernel_virt_addr; va < end_va; va += map_size) + create_pgd_mapping(pgdir, va, + load_pa + (va - kernel_virt_addr), + map_size, PAGE_KERNEL_EXEC); +} +#endif + asmlinkage void __init setup_vm(uintptr_t dtb_pa) { - uintptr_t va, pa, end_va; - uintptr_t load_pa = (uintptr_t)(&_start); - uintptr_t load_sz = (uintptr_t)(&_end) - load_pa; + uintptr_t __maybe_unused pa; uintptr_t map_size; #ifndef __PAGETABLE_PMD_FOLDED pmd_t fix_bmap_spmd, fix_bmap_epmd; #endif +#ifdef CONFIG_XIP_KERNEL + xiprom = (uintptr_t)CONFIG_XIP_PHYS_ADDR; + xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom); + + load_pa = (uintptr_t)CONFIG_PHYS_RAM_BASE; + load_sz = (uintptr_t)(&_end) - (uintptr_t)(&_sdata); + + va_kernel_xip_pa_offset = kernel_virt_addr - xiprom; +#else + load_pa = (uintptr_t)(&_start); + load_sz = (uintptr_t)(&_end) - load_pa; +#endif + va_pa_offset = PAGE_OFFSET - load_pa; +#ifdef CONFIG_64BIT + va_kernel_pa_offset = kernel_virt_addr - load_pa; +#endif + pfn_base = PFN_DOWN(load_pa); /* @@ -408,26 +544,27 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) create_pmd_mapping(fixmap_pmd, FIXADDR_START, (uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE); /* Setup trampoline PGD and PMD */ - create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET, + create_pgd_mapping(trampoline_pg_dir, kernel_virt_addr, (uintptr_t)trampoline_pmd, PGDIR_SIZE, PAGE_TABLE); - create_pmd_mapping(trampoline_pmd, PAGE_OFFSET, +#ifdef CONFIG_XIP_KERNEL + create_pmd_mapping(trampoline_pmd, kernel_virt_addr, + xiprom, PMD_SIZE, PAGE_KERNEL_EXEC); +#else + create_pmd_mapping(trampoline_pmd, kernel_virt_addr, load_pa, PMD_SIZE, PAGE_KERNEL_EXEC); +#endif #else /* Setup trampoline PGD */ - create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET, + create_pgd_mapping(trampoline_pg_dir, kernel_virt_addr, load_pa, PGDIR_SIZE, PAGE_KERNEL_EXEC); #endif /* - * Setup early PGD covering entire kernel which will allows + * Setup early PGD covering entire kernel which will allow * us to reach paging_init(). We map all memory banks later * in setup_vm_final() below. */ - end_va = PAGE_OFFSET + load_sz; - for (va = PAGE_OFFSET; va < end_va; va += map_size) - create_pgd_mapping(early_pg_dir, va, - load_pa + (va - PAGE_OFFSET), - map_size, PAGE_KERNEL_EXEC); + create_kernel_page_table(early_pg_dir, map_size); #ifndef __PAGETABLE_PMD_FOLDED /* Setup early PMD for DTB */ @@ -442,7 +579,16 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL); dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PMD_SIZE - 1)); #else /* CONFIG_BUILTIN_DTB */ +#ifdef CONFIG_64BIT + /* + * __va can't be used since it would return a linear mapping address + * whereas dtb_early_va will be used before setup_vm_final installs + * the linear mapping. + */ + dtb_early_va = kernel_mapping_pa_to_va(XIP_FIXUP(dtb_pa)); +#else dtb_early_va = __va(dtb_pa); +#endif /* CONFIG_64BIT */ #endif /* CONFIG_BUILTIN_DTB */ #else #ifndef CONFIG_BUILTIN_DTB @@ -454,7 +600,11 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) pa + PGDIR_SIZE, PGDIR_SIZE, PAGE_KERNEL); dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PGDIR_SIZE - 1)); #else /* CONFIG_BUILTIN_DTB */ +#ifdef CONFIG_64BIT + dtb_early_va = kernel_mapping_pa_to_va(XIP_FIXUP(dtb_pa)); +#else dtb_early_va = __va(dtb_pa); +#endif /* CONFIG_64BIT */ #endif /* CONFIG_BUILTIN_DTB */ #endif dtb_early_pa = dtb_pa; @@ -490,6 +640,22 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) #endif } +#if defined(CONFIG_64BIT) && !defined(CONFIG_XIP_KERNEL) +void protect_kernel_linear_mapping_text_rodata(void) +{ + unsigned long text_start = (unsigned long)lm_alias(_start); + unsigned long init_text_start = (unsigned long)lm_alias(__init_text_begin); + unsigned long rodata_start = (unsigned long)lm_alias(__start_rodata); + unsigned long data_start = (unsigned long)lm_alias(_data); + + set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT); + set_memory_nx(text_start, (init_text_start - text_start) >> PAGE_SHIFT); + + set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT); + set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT); +} +#endif + static void __init setup_vm_final(void) { uintptr_t va, map_size; @@ -511,7 +677,7 @@ static void __init setup_vm_final(void) __pa_symbol(fixmap_pgd_next), PGDIR_SIZE, PAGE_TABLE); - /* Map all memory banks */ + /* Map all memory banks in the linear mapping */ for_each_mem_range(i, &start, &end) { if (start >= end) break; @@ -523,10 +689,22 @@ static void __init setup_vm_final(void) for (pa = start; pa < end; pa += map_size) { va = (uintptr_t)__va(pa); create_pgd_mapping(swapper_pg_dir, va, pa, - map_size, PAGE_KERNEL_EXEC); + map_size, +#ifdef CONFIG_64BIT + PAGE_KERNEL +#else + PAGE_KERNEL_EXEC +#endif + ); + } } +#ifdef CONFIG_64BIT + /* Map the kernel */ + create_kernel_page_table(swapper_pg_dir, PMD_SIZE); +#endif + /* Clear fixmap PTE and PMD mappings */ clear_fixmap(FIX_PTE); clear_fixmap(FIX_PMD); @@ -556,7 +734,7 @@ static inline void setup_vm_final(void) #endif /* CONFIG_MMU */ #ifdef CONFIG_STRICT_KERNEL_RWX -void protect_kernel_text_data(void) +void __init protect_kernel_text_data(void) { unsigned long text_start = (unsigned long)_start; unsigned long init_text_start = (unsigned long)__init_text_begin; @@ -584,6 +762,103 @@ void mark_rodata_ro(void) } #endif +#ifdef CONFIG_KEXEC_CORE +/* + * reserve_crashkernel() - reserves memory for crash kernel + * + * This function reserves memory area given in "crashkernel=" kernel command + * line parameter. The memory reserved is used by dump capture kernel when + * primary kernel is crashing. + */ +static void __init reserve_crashkernel(void) +{ + unsigned long long crash_base = 0; + unsigned long long crash_size = 0; + unsigned long search_start = memblock_start_of_DRAM(); + unsigned long search_end = memblock_end_of_DRAM(); + + int ret = 0; + + /* + * Don't reserve a region for a crash kernel on a crash kernel + * since it doesn't make much sense and we have limited memory + * resources. + */ +#ifdef CONFIG_CRASH_DUMP + if (is_kdump_kernel()) { + pr_info("crashkernel: ignoring reservation request\n"); + return; + } +#endif + + ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), + &crash_size, &crash_base); + if (ret || !crash_size) + return; + + crash_size = PAGE_ALIGN(crash_size); + + if (crash_base == 0) { + /* + * Current riscv boot protocol requires 2MB alignment for + * RV64 and 4MB alignment for RV32 (hugepage size) + */ + crash_base = memblock_find_in_range(search_start, search_end, + crash_size, PMD_SIZE); + + if (crash_base == 0) { + pr_warn("crashkernel: couldn't allocate %lldKB\n", + crash_size >> 10); + return; + } + } else { + /* User specifies base address explicitly. */ + if (!memblock_is_region_memory(crash_base, crash_size)) { + pr_warn("crashkernel: requested region is not memory\n"); + return; + } + + if (memblock_is_region_reserved(crash_base, crash_size)) { + pr_warn("crashkernel: requested region is reserved\n"); + return; + } + + + if (!IS_ALIGNED(crash_base, PMD_SIZE)) { + pr_warn("crashkernel: requested region is misaligned\n"); + return; + } + } + memblock_reserve(crash_base, crash_size); + + pr_info("crashkernel: reserved 0x%016llx - 0x%016llx (%lld MB)\n", + crash_base, crash_base + crash_size, crash_size >> 20); + + crashk_res.start = crash_base; + crashk_res.end = crash_base + crash_size - 1; +} +#endif /* CONFIG_KEXEC_CORE */ + +#ifdef CONFIG_CRASH_DUMP +/* + * We keep track of the ELF core header of the crashed + * kernel with a reserved-memory region with compatible + * string "linux,elfcorehdr". Here we register a callback + * to populate elfcorehdr_addr/size when this region is + * present. Note that this region will be marked as + * reserved once we call early_init_fdt_scan_reserved_mem() + * later on. + */ +static int elfcore_hdr_setup(struct reserved_mem *rmem) +{ + elfcorehdr_addr = rmem->base; + elfcorehdr_size = rmem->size; + return 0; +} + +RESERVEDMEM_OF_DECLARE(elfcorehdr, "linux,elfcorehdr", elfcore_hdr_setup); +#endif + void __init paging_init(void) { setup_vm_final(); @@ -592,9 +867,13 @@ void __init paging_init(void) void __init misc_mem_init(void) { + early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT); arch_numa_init(); sparse_init(); zone_sizes_init(); +#ifdef CONFIG_KEXEC_CORE + reserve_crashkernel(); +#endif memblock_dump_all(); } diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c index 937d13ce9ab8..9daacae93e33 100644 --- a/arch/riscv/mm/kasan_init.c +++ b/arch/riscv/mm/kasan_init.c @@ -11,18 +11,6 @@ #include <asm/fixmap.h> #include <asm/pgalloc.h> -static __init void *early_alloc(size_t size, int node) -{ - void *ptr = memblock_alloc_try_nid(size, size, - __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, node); - - if (!ptr) - panic("%pS: Failed to allocate %zu bytes align=%zx nid=%d from=%llx\n", - __func__, size, size, node, (u64)__pa(MAX_DMA_ADDRESS)); - - return ptr; -} - extern pgd_t early_pg_dir[PTRS_PER_PGD]; asmlinkage void __init kasan_early_init(void) { @@ -60,7 +48,7 @@ asmlinkage void __init kasan_early_init(void) local_flush_tlb_all(); } -static void kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end) +static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end) { phys_addr_t phys_addr; pte_t *ptep, *base_pte; @@ -82,7 +70,7 @@ static void kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long en set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(base_pte)), PAGE_TABLE)); } -static void kasan_populate_pmd(pgd_t *pgd, unsigned long vaddr, unsigned long end) +static void __init kasan_populate_pmd(pgd_t *pgd, unsigned long vaddr, unsigned long end) { phys_addr_t phys_addr; pmd_t *pmdp, *base_pmd; @@ -117,7 +105,7 @@ static void kasan_populate_pmd(pgd_t *pgd, unsigned long vaddr, unsigned long en set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE)); } -static void kasan_populate_pgd(unsigned long vaddr, unsigned long end) +static void __init kasan_populate_pgd(unsigned long vaddr, unsigned long end) { phys_addr_t phys_addr; pgd_t *pgdp = pgd_offset_k(vaddr); @@ -155,39 +143,27 @@ static void __init kasan_populate(void *start, void *end) memset(start, KASAN_SHADOW_INIT, end - start); } +static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end) +{ + unsigned long next; + void *p; + pgd_t *pgd_k = pgd_offset_k(vaddr); + + do { + next = pgd_addr_end(vaddr, end); + if (pgd_page_vaddr(*pgd_k) == (unsigned long)lm_alias(kasan_early_shadow_pmd)) { + p = memblock_alloc(PAGE_SIZE, PAGE_SIZE); + set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE)); + } + } while (pgd_k++, vaddr = next, vaddr != end); +} + static void __init kasan_shallow_populate(void *start, void *end) { unsigned long vaddr = (unsigned long)start & PAGE_MASK; unsigned long vend = PAGE_ALIGN((unsigned long)end); - unsigned long pfn; - int index; - void *p; - pud_t *pud_dir, *pud_k; - pgd_t *pgd_dir, *pgd_k; - p4d_t *p4d_dir, *p4d_k; - - while (vaddr < vend) { - index = pgd_index(vaddr); - pfn = csr_read(CSR_SATP) & SATP_PPN; - pgd_dir = (pgd_t *)pfn_to_virt(pfn) + index; - pgd_k = init_mm.pgd + index; - pgd_dir = pgd_offset_k(vaddr); - set_pgd(pgd_dir, *pgd_k); - - p4d_dir = p4d_offset(pgd_dir, vaddr); - p4d_k = p4d_offset(pgd_k, vaddr); - - vaddr = (vaddr + PUD_SIZE) & PUD_MASK; - pud_dir = pud_offset(p4d_dir, vaddr); - pud_k = pud_offset(p4d_k, vaddr); - - if (pud_present(*pud_dir)) { - p = early_alloc(PAGE_SIZE, NUMA_NO_NODE); - pud_populate(&init_mm, pud_dir, p); - } - vaddr += PAGE_SIZE; - } + kasan_shallow_populate_pgd(vaddr, vend); local_flush_tlb_all(); } @@ -196,6 +172,10 @@ void __init kasan_init(void) phys_addr_t _start, _end; u64 i; + /* + * Populate all kernel virtual address space with kasan_early_shadow_page + * except for the linear mapping and the modules/kernel/BPF mapping. + */ kasan_populate_early_shadow((void *)KASAN_SHADOW_START, (void *)kasan_mem_to_shadow((void *) VMEMMAP_END)); @@ -208,6 +188,7 @@ void __init kasan_init(void) (void *)kasan_mem_to_shadow((void *)VMALLOC_START), (void *)kasan_mem_to_shadow((void *)VMALLOC_END)); + /* Populate the linear mapping */ for_each_mem_range(i, &_start, &_end) { void *start = (void *)__va(_start); void *end = (void *)__va(_end); @@ -218,6 +199,10 @@ void __init kasan_init(void) kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end)); } + /* Populate kernel, BPF, modules mapping */ + kasan_populate(kasan_mem_to_shadow((const void *)MODULES_VADDR), + kasan_mem_to_shadow((const void *)BPF_JIT_REGION_END)); + for (i = 0; i < PTRS_PER_PTE; i++) set_pte(&kasan_early_shadow_pte[i], mk_pte(virt_to_page(kasan_early_shadow_page), diff --git a/arch/riscv/mm/physaddr.c b/arch/riscv/mm/physaddr.c index e8e4dcd39fed..35703d5ef5fd 100644 --- a/arch/riscv/mm/physaddr.c +++ b/arch/riscv/mm/physaddr.c @@ -23,7 +23,7 @@ EXPORT_SYMBOL(__virt_to_phys); phys_addr_t __phys_addr_symbol(unsigned long x) { - unsigned long kernel_start = (unsigned long)PAGE_OFFSET; + unsigned long kernel_start = (unsigned long)kernel_virt_addr; unsigned long kernel_end = (unsigned long)_end; /* diff --git a/arch/riscv/mm/ptdump.c b/arch/riscv/mm/ptdump.c index ace74dec7492..0536ac84b730 100644 --- a/arch/riscv/mm/ptdump.c +++ b/arch/riscv/mm/ptdump.c @@ -58,29 +58,56 @@ struct ptd_mm_info { unsigned long end; }; +enum address_markers_idx { +#ifdef CONFIG_KASAN + KASAN_SHADOW_START_NR, + KASAN_SHADOW_END_NR, +#endif + FIXMAP_START_NR, + FIXMAP_END_NR, + PCI_IO_START_NR, + PCI_IO_END_NR, +#ifdef CONFIG_SPARSEMEM_VMEMMAP + VMEMMAP_START_NR, + VMEMMAP_END_NR, +#endif + VMALLOC_START_NR, + VMALLOC_END_NR, + PAGE_OFFSET_NR, +#ifdef CONFIG_64BIT + MODULES_MAPPING_NR, + KERNEL_MAPPING_NR, +#endif + END_OF_SPACE_NR +}; + static struct addr_marker address_markers[] = { #ifdef CONFIG_KASAN - {KASAN_SHADOW_START, "Kasan shadow start"}, - {KASAN_SHADOW_END, "Kasan shadow end"}, + {0, "Kasan shadow start"}, + {0, "Kasan shadow end"}, #endif - {FIXADDR_START, "Fixmap start"}, - {FIXADDR_TOP, "Fixmap end"}, - {PCI_IO_START, "PCI I/O start"}, - {PCI_IO_END, "PCI I/O end"}, + {0, "Fixmap start"}, + {0, "Fixmap end"}, + {0, "PCI I/O start"}, + {0, "PCI I/O end"}, #ifdef CONFIG_SPARSEMEM_VMEMMAP - {VMEMMAP_START, "vmemmap start"}, - {VMEMMAP_END, "vmemmap end"}, + {0, "vmemmap start"}, + {0, "vmemmap end"}, +#endif + {0, "vmalloc() area"}, + {0, "vmalloc() end"}, + {0, "Linear mapping"}, +#ifdef CONFIG_64BIT + {0, "Modules mapping"}, + {0, "Kernel mapping (kernel, BPF)"}, #endif - {VMALLOC_START, "vmalloc() area"}, - {VMALLOC_END, "vmalloc() end"}, - {PAGE_OFFSET, "Linear mapping"}, {-1, NULL}, }; static struct ptd_mm_info kernel_ptd_info = { .mm = &init_mm, .markers = address_markers, - .base_addr = KERN_VIRT_START, + .base_addr = 0, .end = ULONG_MAX, }; @@ -331,10 +358,32 @@ static int ptdump_show(struct seq_file *m, void *v) DEFINE_SHOW_ATTRIBUTE(ptdump); -static int ptdump_init(void) +static int __init ptdump_init(void) { unsigned int i, j; +#ifdef CONFIG_KASAN + address_markers[KASAN_SHADOW_START_NR].start_address = KASAN_SHADOW_START; + address_markers[KASAN_SHADOW_END_NR].start_address = KASAN_SHADOW_END; +#endif + address_markers[FIXMAP_START_NR].start_address = FIXADDR_START; + address_markers[FIXMAP_END_NR].start_address = FIXADDR_TOP; + address_markers[PCI_IO_START_NR].start_address = PCI_IO_START; + address_markers[PCI_IO_END_NR].start_address = PCI_IO_END; +#ifdef CONFIG_SPARSEMEM_VMEMMAP + address_markers[VMEMMAP_START_NR].start_address = VMEMMAP_START; + address_markers[VMEMMAP_END_NR].start_address = VMEMMAP_END; +#endif + address_markers[VMALLOC_START_NR].start_address = VMALLOC_START; + address_markers[VMALLOC_END_NR].start_address = VMALLOC_END; + address_markers[PAGE_OFFSET_NR].start_address = PAGE_OFFSET; +#ifdef CONFIG_64BIT + address_markers[MODULES_MAPPING_NR].start_address = MODULES_VADDR; + address_markers[KERNEL_MAPPING_NR].start_address = kernel_virt_addr; +#endif + + kernel_ptd_info.base_addr = KERN_VIRT_START; + for (i = 0; i < ARRAY_SIZE(pg_level); i++) for (j = 0; j < ARRAY_SIZE(pte_bits); j++) pg_level[i].mask |= pte_bits[j].mask; diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c index b44ff52f84a6..87e3bf5b9086 100644 --- a/arch/riscv/net/bpf_jit_comp64.c +++ b/arch/riscv/net/bpf_jit_comp64.c @@ -1148,16 +1148,3 @@ void bpf_jit_build_epilogue(struct rv_jit_context *ctx) { __build_epilogue(false, ctx); } - -void *bpf_jit_alloc_exec(unsigned long size) -{ - return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START, - BPF_JIT_REGION_END, GFP_KERNEL, - PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, - __builtin_return_address(0)); -} - -void bpf_jit_free_exec(void *addr) -{ - return vfree(addr); -} diff --git a/arch/riscv/net/bpf_jit_core.c b/arch/riscv/net/bpf_jit_core.c index 3630d447352c..fed86f42dfbe 100644 --- a/arch/riscv/net/bpf_jit_core.c +++ b/arch/riscv/net/bpf_jit_core.c @@ -152,6 +152,7 @@ skip_init_ctx: bpf_flush_icache(jit_data->header, ctx->insns + ctx->ninsns); if (!prog->is_func || extra_pass) { + bpf_jit_binary_lock_ro(jit_data->header); out_offset: kfree(ctx->offset); kfree(jit_data); @@ -164,3 +165,16 @@ out: tmp : orig_prog); return prog; } + +void *bpf_jit_alloc_exec(unsigned long size) +{ + return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START, + BPF_JIT_REGION_END, GFP_KERNEL, + PAGE_KERNEL, 0, NUMA_NO_NODE, + __builtin_return_address(0)); +} + +void bpf_jit_free_exec(void *addr) +{ + return vfree(addr); +} diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index c1ff874e6c2e..d72989591223 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -60,6 +60,9 @@ config S390 imply IMA_SECURE_AND_OR_TRUSTED_BOOT select ARCH_32BIT_USTAT_F_TINODE select ARCH_BINFMT_ELF_STATE + select ARCH_ENABLE_MEMORY_HOTPLUG if SPARSEMEM + select ARCH_ENABLE_MEMORY_HOTREMOVE + select ARCH_ENABLE_SPLIT_PMD_PTLOCK select ARCH_HAS_DEBUG_VM_PGTABLE select ARCH_HAS_DEBUG_WX select ARCH_HAS_DEVMEM_IS_ALLOWED @@ -626,15 +629,6 @@ config ARCH_SPARSEMEM_ENABLE config ARCH_SPARSEMEM_DEFAULT def_bool y -config ARCH_ENABLE_MEMORY_HOTPLUG - def_bool y if SPARSEMEM - -config ARCH_ENABLE_MEMORY_HOTREMOVE - def_bool y - -config ARCH_ENABLE_SPLIT_PMD_PTLOCK - def_bool y - config MAX_PHYSMEM_BITS int "Maximum size of supported physical memory in bits (42-53)" range 42 53 diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index 3b5a4d25ca9b..da36d13ffc16 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c @@ -189,7 +189,7 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, return pte; } -pte_t *huge_pte_alloc(struct mm_struct *mm, +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { pgd_t *pgdp; diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index e798e55915c2..68129537e350 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -2,6 +2,8 @@ config SUPERH def_bool y select ARCH_32BIT_OFF_T + select ARCH_ENABLE_MEMORY_HOTPLUG if SPARSEMEM && MMU + select ARCH_ENABLE_MEMORY_HOTREMOVE if SPARSEMEM && MMU select ARCH_HAVE_CUSTOM_GPIO_H select ARCH_HAVE_NMI_SAFE_CMPXCHG if (GUSA_RB || CPU_SH4A) select ARCH_HAS_BINFMT_FLAT if !MMU @@ -101,9 +103,6 @@ config SYS_SUPPORTS_APM_EMULATION bool select ARCH_SUSPEND_POSSIBLE -config SYS_SUPPORTS_HUGETLBFS - bool - config SYS_SUPPORTS_SMP bool @@ -175,12 +174,12 @@ config CPU_SH3 config CPU_SH4 bool + select ARCH_SUPPORTS_HUGETLBFS if MMU select CPU_HAS_INTEVT select CPU_HAS_SR_RB select CPU_HAS_FPU if !CPU_SH4AL_DSP select SH_INTC select SYS_SUPPORTS_SH_TMU - select SYS_SUPPORTS_HUGETLBFS if MMU config CPU_SH4A bool diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 77aa2f802d8d..d551a9cac41e 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -136,14 +136,6 @@ config ARCH_SPARSEMEM_DEFAULT config ARCH_SELECT_MEMORY_MODEL def_bool y -config ARCH_ENABLE_MEMORY_HOTPLUG - def_bool y - depends on SPARSEMEM && MMU - -config ARCH_ENABLE_MEMORY_HOTREMOVE - def_bool y - depends on SPARSEMEM && MMU - config ARCH_MEMORY_PROBE def_bool y depends on MEMORY_HOTPLUG diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c index 220d7bc43d2b..999ab5916e69 100644 --- a/arch/sh/mm/hugetlbpage.c +++ b/arch/sh/mm/hugetlbpage.c @@ -21,7 +21,7 @@ #include <asm/tlbflush.h> #include <asm/cacheflush.h> -pte_t *huge_pte_alloc(struct mm_struct *mm, +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { pgd_t *pgd; diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index ad4b42f04988..04d8790f6c32 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -279,7 +279,7 @@ unsigned long pud_leaf_size(pud_t pud) { return 1UL << tte_to_shift(*(pte_t *)&p unsigned long pmd_leaf_size(pmd_t pmd) { return 1UL << tte_to_shift(*(pte_t *)&pmd); } unsigned long pte_leaf_size(pte_t pte) { return 1UL << tte_to_shift(pte); } -pte_t *huge_pte_alloc(struct mm_struct *mm, +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { pgd_t *pgd; diff --git a/arch/um/Kconfig.debug b/arch/um/Kconfig.debug index 315d368e63ad..1dfb2959c73b 100644 --- a/arch/um/Kconfig.debug +++ b/arch/um/Kconfig.debug @@ -17,6 +17,7 @@ config GCOV bool "Enable gcov support" depends on DEBUG_INFO depends on !KCOV + depends on !MODULES help This option allows developers to retrieve coverage data from a UML session. diff --git a/arch/um/drivers/hostaudio_kern.c b/arch/um/drivers/hostaudio_kern.c index d35d3f305a31..5b064d360cb7 100644 --- a/arch/um/drivers/hostaudio_kern.c +++ b/arch/um/drivers/hostaudio_kern.c @@ -122,13 +122,11 @@ static ssize_t hostaudio_write(struct file *file, const char __user *buffer, static __poll_t hostaudio_poll(struct file *file, struct poll_table_struct *wait) { - __poll_t mask = 0; - #ifdef DEBUG printk(KERN_DEBUG "hostaudio: poll called (unimplemented)\n"); #endif - return mask; + return 0; } static long hostaudio_ioctl(struct file *file, diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c index 47a02e60898d..d27a2a9faf3e 100644 --- a/arch/um/drivers/vector_kern.c +++ b/arch/um/drivers/vector_kern.c @@ -8,7 +8,6 @@ * Copyright (C) 2001 by various other people who didn't put their name here. */ -#include <linux/version.h> #include <linux/memblock.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h index def376194dce..b9e20bbe2f75 100644 --- a/arch/um/include/asm/pgtable.h +++ b/arch/um/include/asm/pgtable.h @@ -302,7 +302,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) struct mm_struct; extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr); -#define update_mmu_cache(vma,address,ptep) do ; while (0) +#define update_mmu_cache(vma,address,ptep) do {} while (0) /* Encode and de-code a swap entry */ #define __swp_type(x) (((x).val >> 5) & 0x1f) diff --git a/arch/um/include/uapi/asm/Kbuild b/arch/um/include/uapi/asm/Kbuild new file mode 100644 index 000000000000..f66554cd5c45 --- /dev/null +++ b/arch/um/include/uapi/asm/Kbuild @@ -0,0 +1 @@ +# SPDX-License-Identifier: GPL-2.0 diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile index 5aa882011e04..e698e0c7dbdc 100644 --- a/arch/um/kernel/Makefile +++ b/arch/um/kernel/Makefile @@ -21,7 +21,6 @@ obj-y = config.o exec.o exitcode.o irq.o ksyms.o mem.o \ obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o obj-$(CONFIG_GPROF) += gprof_syms.o -obj-$(CONFIG_GCOV) += gmon_syms.o obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_STACKTRACE) += stacktrace.o diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S index dacbfabf66d8..2f2a8ce92f1e 100644 --- a/arch/um/kernel/dyn.lds.S +++ b/arch/um/kernel/dyn.lds.S @@ -6,6 +6,12 @@ OUTPUT_ARCH(ELF_ARCH) ENTRY(_start) jiffies = jiffies_64; +VERSION { + { + local: *; + }; +} + SECTIONS { PROVIDE (__executable_start = START); diff --git a/arch/um/kernel/gmon_syms.c b/arch/um/kernel/gmon_syms.c deleted file mode 100644 index 9361a8eb9bf1..000000000000 --- a/arch/um/kernel/gmon_syms.c +++ /dev/null @@ -1,16 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) - */ - -#include <linux/module.h> - -extern void __bb_init_func(void *) __attribute__((weak)); -EXPORT_SYMBOL(__bb_init_func); - -extern void __gcov_init(void *) __attribute__((weak)); -EXPORT_SYMBOL(__gcov_init); -extern void __gcov_merge_add(void *, unsigned int) __attribute__((weak)); -EXPORT_SYMBOL(__gcov_merge_add); -extern void __gcov_exit(void) __attribute__((weak)); -EXPORT_SYMBOL(__gcov_exit); diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c index 9019ff5905b1..8e636ce02949 100644 --- a/arch/um/kernel/mem.c +++ b/arch/um/kernel/mem.c @@ -72,8 +72,7 @@ static void __init one_page_table_init(pmd_t *pmd) set_pmd(pmd, __pmd(_KERNPG_TABLE + (unsigned long) __pa(pte))); - if (pte != pte_offset_kernel(pmd, 0)) - BUG(); + BUG_ON(pte != pte_offset_kernel(pmd, 0)); } } diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S index 45d957d7004c..7a8e2b123e29 100644 --- a/arch/um/kernel/uml.lds.S +++ b/arch/um/kernel/uml.lds.S @@ -7,6 +7,12 @@ OUTPUT_ARCH(ELF_ARCH) ENTRY(_start) jiffies = jiffies_64; +VERSION { + { + local: *; + }; +} + SECTIONS { /* This must contain the right address - not quite the default ELF one.*/ diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index dac15f646f79..0045e1b44190 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -60,7 +60,13 @@ config X86 select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI select ARCH_32BIT_OFF_T if X86_32 select ARCH_CLOCKSOURCE_INIT + select ARCH_ENABLE_HUGEPAGE_MIGRATION if X86_64 && HUGETLB_PAGE && MIGRATION + select ARCH_ENABLE_MEMORY_HOTPLUG if X86_64 || (X86_32 && HIGHMEM) + select ARCH_ENABLE_MEMORY_HOTREMOVE if MEMORY_HOTPLUG + select ARCH_ENABLE_SPLIT_PMD_PTLOCK if X86_64 || X86_PAE + select ARCH_ENABLE_THP_MIGRATION if X86_64 && TRANSPARENT_HUGEPAGE select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI + select ARCH_HAS_CACHE_LINE_SIZE select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEBUG_VM_PGTABLE if !X86_PAE select ARCH_HAS_DEVMEM_IS_ALLOWED @@ -165,6 +171,7 @@ config X86 select HAVE_ARCH_TRANSPARENT_HUGEPAGE select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD if X86_64 select HAVE_ARCH_USERFAULTFD_WP if X86_64 && USERFAULTFD + select HAVE_ARCH_USERFAULTFD_MINOR if X86_64 && USERFAULTFD select HAVE_ARCH_VMAP_STACK if X86_64 select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET select HAVE_ARCH_WITHIN_STACK_FRAMES @@ -315,9 +322,6 @@ config GENERIC_CALIBRATE_DELAY config ARCH_HAS_CPU_RELAX def_bool y -config ARCH_HAS_CACHE_LINE_SIZE - def_bool y - config ARCH_HAS_FILTER_PGPROT def_bool y @@ -2428,30 +2432,13 @@ config ARCH_HAS_ADD_PAGES def_bool y depends on X86_64 && ARCH_ENABLE_MEMORY_HOTPLUG -config ARCH_ENABLE_MEMORY_HOTPLUG - def_bool y - depends on X86_64 || (X86_32 && HIGHMEM) - -config ARCH_ENABLE_MEMORY_HOTREMOVE +config ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE def_bool y - depends on MEMORY_HOTPLUG config USE_PERCPU_NUMA_NODE_ID def_bool y depends on NUMA -config ARCH_ENABLE_SPLIT_PMD_PTLOCK - def_bool y - depends on X86_64 || X86_PAE - -config ARCH_ENABLE_HUGEPAGE_MIGRATION - def_bool y - depends on X86_64 && HUGETLB_PAGE && MIGRATION - -config ARCH_ENABLE_THP_MIGRATION - def_bool y - depends on X86_64 && TRANSPARENT_HUGEPAGE - menu "Power management and ACPI options" config ARCH_HIBERNATION_HEADER diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c index 427980617557..156cd235659f 100644 --- a/arch/x86/mm/pat/set_memory.c +++ b/arch/x86/mm/pat/set_memory.c @@ -16,6 +16,8 @@ #include <linux/pci.h> #include <linux/vmalloc.h> #include <linux/libnvdimm.h> +#include <linux/vmstat.h> +#include <linux/kernel.h> #include <asm/e820/api.h> #include <asm/processor.h> @@ -91,6 +93,12 @@ static void split_page_count(int level) return; direct_pages_count[level]--; + if (system_state == SYSTEM_RUNNING) { + if (level == PG_LEVEL_2M) + count_vm_event(DIRECT_MAP_LEVEL2_SPLIT); + else if (level == PG_LEVEL_1G) + count_vm_event(DIRECT_MAP_LEVEL3_SPLIT); + } direct_pages_count[level - 1] += PTRS_PER_PTE; } diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c index bfa50e65ef6c..ae744b6a0785 100644 --- a/arch/x86/pci/amd_bus.c +++ b/arch/x86/pci/amd_bus.c @@ -126,7 +126,7 @@ static int __init early_root_info_init(void) node = (reg >> 4) & 0x07; link = (reg >> 8) & 0x03; - info = alloc_pci_root_info(min_bus, max_bus, node, link); + alloc_pci_root_info(min_bus, max_bus, node, link); } /* diff --git a/arch/x86/um/Makefile b/arch/x86/um/Makefile index 77f70b969d14..5ccb18290d71 100644 --- a/arch/x86/um/Makefile +++ b/arch/x86/um/Makefile @@ -21,6 +21,7 @@ obj-y += checksum_32.o syscalls_32.o obj-$(CONFIG_ELF_CORE) += elfcore.o subarch-y = ../lib/string_32.o ../lib/atomic64_32.o ../lib/atomic64_cx8_32.o +subarch-y += ../lib/cmpxchg8b_emu.o ../lib/atomic64_386_32.o subarch-y += ../kernel/sys_ia32.o else diff --git a/arch/x86/um/asm/elf.h b/arch/x86/um/asm/elf.h index c907b20d4993..dcaf3b38a9e0 100644 --- a/arch/x86/um/asm/elf.h +++ b/arch/x86/um/asm/elf.h @@ -212,6 +212,6 @@ extern int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu); extern long elf_aux_hwcap; #define ELF_HWCAP (elf_aux_hwcap) -#define SET_PERSONALITY(ex) do ; while(0) +#define SET_PERSONALITY(ex) do {} while(0) #endif diff --git a/arch/x86/um/shared/sysdep/stub_32.h b/arch/x86/um/shared/sysdep/stub_32.h index c3891c1ada26..b95db9daf0e8 100644 --- a/arch/x86/um/shared/sysdep/stub_32.h +++ b/arch/x86/um/shared/sysdep/stub_32.h @@ -77,7 +77,7 @@ static inline void trap_myself(void) __asm("int3"); } -static void inline remap_stack_and_trap(void) +static inline void remap_stack_and_trap(void) { __asm__ volatile ( "movl %%esp,%%ebx ;" diff --git a/arch/x86/xen/pci-swiotlb-xen.c b/arch/x86/xen/pci-swiotlb-xen.c index 19ae3e4fe4e9..54f9aa7e8457 100644 --- a/arch/x86/xen/pci-swiotlb-xen.c +++ b/arch/x86/xen/pci-swiotlb-xen.c @@ -59,7 +59,7 @@ int __init pci_xen_swiotlb_detect(void) void __init pci_xen_swiotlb_init(void) { if (xen_swiotlb) { - xen_swiotlb_init(1, true /* early */); + xen_swiotlb_init_early(); dma_ops = &xen_swiotlb_dma_ops; #ifdef CONFIG_PCI @@ -76,7 +76,7 @@ int pci_xen_swiotlb_init_late(void) if (xen_swiotlb) return 0; - rc = xen_swiotlb_init(1, false /* late */); + rc = xen_swiotlb_init(); if (rc) return rc; diff --git a/drivers/Makefile b/drivers/Makefile index 8f3fee8281ad..5a6d613e868d 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -42,6 +42,7 @@ obj-$(CONFIG_DMADEVICES) += dma/ obj-y += soc/ obj-$(CONFIG_VIRTIO) += virtio/ +obj-$(CONFIG_VIRTIO_PCI_LIB) += virtio/ obj-$(CONFIG_VDPA) += vdpa/ obj-$(CONFIG_XEN) += xen/ diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c index b02fd51e5589..8cc195c4c861 100644 --- a/drivers/acpi/acpi_memhotplug.c +++ b/drivers/acpi/acpi_memhotplug.c @@ -171,6 +171,7 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device) acpi_handle handle = mem_device->device->handle; int result, num_enabled = 0; struct acpi_memory_info *info; + mhp_t mhp_flags = MHP_NONE; int node; node = acpi_get_node(handle); @@ -194,8 +195,10 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device) if (node < 0) node = memory_add_physaddr_to_nid(info->start_addr); + if (mhp_supports_memmap_on_memory(info->length)) + mhp_flags |= MHP_MEMMAP_ON_MEMORY; result = __add_memory(node, info->start_addr, info->length, - MHP_NONE); + mhp_flags); /* * If the memory block has been used by the kernel, add_memory() diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c index 443fdf62dd22..d39a9b474727 100644 --- a/drivers/acpi/custom_method.c +++ b/drivers/acpi/custom_method.c @@ -42,6 +42,8 @@ static ssize_t cm_write(struct file *file, const char __user *user_buf, sizeof(struct acpi_table_header))) return -EFAULT; uncopied_bytes = max_size = table.length; + /* make sure the buf is not allocated */ + kfree(buf); buf = kzalloc(max_size, GFP_KERNEL); if (!buf) return -ENOMEM; @@ -55,6 +57,7 @@ static ssize_t cm_write(struct file *file, const char __user *user_buf, (*ppos + count < count) || (count > uncopied_bytes)) { kfree(buf); + buf = NULL; return -EINVAL; } @@ -76,7 +79,6 @@ static ssize_t cm_write(struct file *file, const char __user *user_buf, add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE); } - kfree(buf); return count; } diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index f973bbe90e5e..b852cff80287 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -142,7 +142,6 @@ int acpi_device_sleep_wake(struct acpi_device *dev, int acpi_power_get_inferred_state(struct acpi_device *device, int *state); int acpi_power_on_resources(struct acpi_device *device, int state); int acpi_power_transition(struct acpi_device *device, int state); -void acpi_turn_off_unused_power_resources(void); /* -------------------------------------------------------------------------- Device Power Management diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c index 95f23acd5b80..53cab975f612 100644 --- a/drivers/acpi/pci_mcfg.c +++ b/drivers/acpi/pci_mcfg.c @@ -116,6 +116,13 @@ static struct mcfg_fixup mcfg_quirks[] = { THUNDER_ECAM_QUIRK(2, 12), THUNDER_ECAM_QUIRK(2, 13), + { "NVIDIA", "TEGRA194", 1, 0, MCFG_BUS_ANY, &tegra194_pcie_ops}, + { "NVIDIA", "TEGRA194", 1, 1, MCFG_BUS_ANY, &tegra194_pcie_ops}, + { "NVIDIA", "TEGRA194", 1, 2, MCFG_BUS_ANY, &tegra194_pcie_ops}, + { "NVIDIA", "TEGRA194", 1, 3, MCFG_BUS_ANY, &tegra194_pcie_ops}, + { "NVIDIA", "TEGRA194", 1, 4, MCFG_BUS_ANY, &tegra194_pcie_ops}, + { "NVIDIA", "TEGRA194", 1, 5, MCFG_BUS_ANY, &tegra194_pcie_ops}, + #define XGENE_V1_ECAM_MCFG(rev, seg) \ {"APM ", "XGENE ", rev, seg, MCFG_BUS_ANY, \ &xgene_v1_pcie_ecam_ops } diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c index 56102eaaa2da..32974b575e46 100644 --- a/drivers/acpi/power.c +++ b/drivers/acpi/power.c @@ -995,7 +995,6 @@ void acpi_resume_power_resources(void) mutex_unlock(&power_resource_list_lock); } -#endif void acpi_turn_off_unused_power_resources(void) { @@ -1016,3 +1015,4 @@ void acpi_turn_off_unused_power_resources(void) mutex_unlock(&power_resource_list_lock); } +#endif diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index bc973fbd70b2..a22778e880c2 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -2359,8 +2359,6 @@ int __init acpi_scan_init(void) } } - acpi_turn_off_unused_power_resources(); - acpi_scan_initialized = true; out: diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h index 7fe41ee489d6..1856f76ac83f 100644 --- a/drivers/acpi/sleep.h +++ b/drivers/acpi/sleep.h @@ -8,6 +8,7 @@ extern struct list_head acpi_wakeup_device_list; extern struct mutex acpi_device_lock; extern void acpi_resume_power_resources(void); +extern void acpi_turn_off_unused_power_resources(void); static inline acpi_status acpi_set_waking_vector(u32 wakeup_address) { diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c index 5b32df5d33ad..6e9c5ade4c2e 100644 --- a/drivers/ata/ahci_brcm.c +++ b/drivers/ata/ahci_brcm.c @@ -86,7 +86,8 @@ struct brcm_ahci_priv { u32 port_mask; u32 quirks; enum brcm_ahci_version version; - struct reset_control *rcdev; + struct reset_control *rcdev_rescal; + struct reset_control *rcdev_ahci; }; static inline u32 brcm_sata_readreg(void __iomem *addr) @@ -352,8 +353,8 @@ static int brcm_ahci_suspend(struct device *dev) else ret = 0; - if (priv->version != BRCM_SATA_BCM7216) - reset_control_assert(priv->rcdev); + reset_control_assert(priv->rcdev_ahci); + reset_control_rearm(priv->rcdev_rescal); return ret; } @@ -365,10 +366,10 @@ static int __maybe_unused brcm_ahci_resume(struct device *dev) struct brcm_ahci_priv *priv = hpriv->plat_data; int ret = 0; - if (priv->version == BRCM_SATA_BCM7216) - ret = reset_control_reset(priv->rcdev); - else - ret = reset_control_deassert(priv->rcdev); + ret = reset_control_deassert(priv->rcdev_ahci); + if (ret) + return ret; + ret = reset_control_reset(priv->rcdev_rescal); if (ret) return ret; @@ -434,7 +435,6 @@ static int brcm_ahci_probe(struct platform_device *pdev) { const struct of_device_id *of_id; struct device *dev = &pdev->dev; - const char *reset_name = NULL; struct brcm_ahci_priv *priv; struct ahci_host_priv *hpriv; struct resource *res; @@ -456,15 +456,15 @@ static int brcm_ahci_probe(struct platform_device *pdev) if (IS_ERR(priv->top_ctrl)) return PTR_ERR(priv->top_ctrl); - /* Reset is optional depending on platform and named differently */ - if (priv->version == BRCM_SATA_BCM7216) - reset_name = "rescal"; - else - reset_name = "ahci"; - - priv->rcdev = devm_reset_control_get_optional(&pdev->dev, reset_name); - if (IS_ERR(priv->rcdev)) - return PTR_ERR(priv->rcdev); + if (priv->version == BRCM_SATA_BCM7216) { + priv->rcdev_rescal = devm_reset_control_get_optional_shared( + &pdev->dev, "rescal"); + if (IS_ERR(priv->rcdev_rescal)) + return PTR_ERR(priv->rcdev_rescal); + } + priv->rcdev_ahci = devm_reset_control_get_optional(&pdev->dev, "ahci"); + if (IS_ERR(priv->rcdev_ahci)) + return PTR_ERR(priv->rcdev_ahci); hpriv = ahci_platform_get_resources(pdev, 0); if (IS_ERR(hpriv)) @@ -485,10 +485,10 @@ static int brcm_ahci_probe(struct platform_device *pdev) break; } - if (priv->version == BRCM_SATA_BCM7216) - ret = reset_control_reset(priv->rcdev); - else - ret = reset_control_deassert(priv->rcdev); + ret = reset_control_reset(priv->rcdev_rescal); + if (ret) + return ret; + ret = reset_control_deassert(priv->rcdev_ahci); if (ret) return ret; @@ -539,8 +539,8 @@ out_disable_regulators: out_disable_clks: ahci_platform_disable_clks(hpriv); out_reset: - if (priv->version != BRCM_SATA_BCM7216) - reset_control_assert(priv->rcdev); + reset_control_assert(priv->rcdev_ahci); + reset_control_rearm(priv->rcdev_rescal); return ret; } diff --git a/drivers/base/memory.c b/drivers/base/memory.c index f35298425575..b31b3af5c490 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -169,30 +169,98 @@ int memory_notify(unsigned long val, void *v) return blocking_notifier_call_chain(&memory_chain, val, v); } +static int memory_block_online(struct memory_block *mem) +{ + unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); + unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; + unsigned long nr_vmemmap_pages = mem->nr_vmemmap_pages; + struct zone *zone; + int ret; + + zone = zone_for_pfn_range(mem->online_type, mem->nid, start_pfn, nr_pages); + + /* + * Although vmemmap pages have a different lifecycle than the pages + * they describe (they remain until the memory is unplugged), doing + * their initialization and accounting at memory onlining/offlining + * stage helps to keep accounting easier to follow - e.g vmemmaps + * belong to the same zone as the memory they backed. + */ + if (nr_vmemmap_pages) { + ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, zone); + if (ret) + return ret; + } + + ret = online_pages(start_pfn + nr_vmemmap_pages, + nr_pages - nr_vmemmap_pages, zone); + if (ret) { + if (nr_vmemmap_pages) + mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages); + return ret; + } + + /* + * Account once onlining succeeded. If the zone was unpopulated, it is + * now already properly populated. + */ + if (nr_vmemmap_pages) + adjust_present_page_count(zone, nr_vmemmap_pages); + + return ret; +} + +static int memory_block_offline(struct memory_block *mem) +{ + unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); + unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; + unsigned long nr_vmemmap_pages = mem->nr_vmemmap_pages; + struct zone *zone; + int ret; + + zone = page_zone(pfn_to_page(start_pfn)); + + /* + * Unaccount before offlining, such that unpopulated zone and kthreads + * can properly be torn down in offline_pages(). + */ + if (nr_vmemmap_pages) + adjust_present_page_count(zone, -nr_vmemmap_pages); + + ret = offline_pages(start_pfn + nr_vmemmap_pages, + nr_pages - nr_vmemmap_pages); + if (ret) { + /* offline_pages() failed. Account back. */ + if (nr_vmemmap_pages) + adjust_present_page_count(zone, nr_vmemmap_pages); + return ret; + } + + if (nr_vmemmap_pages) + mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages); + + return ret; +} + /* * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is * OK to have direct references to sparsemem variables in here. */ static int -memory_block_action(unsigned long start_section_nr, unsigned long action, - int online_type, int nid) +memory_block_action(struct memory_block *mem, unsigned long action) { - unsigned long start_pfn; - unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; int ret; - start_pfn = section_nr_to_pfn(start_section_nr); - switch (action) { case MEM_ONLINE: - ret = online_pages(start_pfn, nr_pages, online_type, nid); + ret = memory_block_online(mem); break; case MEM_OFFLINE: - ret = offline_pages(start_pfn, nr_pages); + ret = memory_block_offline(mem); break; default: WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: " - "%ld\n", __func__, start_section_nr, action, action); + "%ld\n", __func__, mem->start_section_nr, action, action); ret = -EINVAL; } @@ -210,9 +278,7 @@ static int memory_block_change_state(struct memory_block *mem, if (to_state == MEM_OFFLINE) mem->state = MEM_GOING_OFFLINE; - ret = memory_block_action(mem->start_section_nr, to_state, - mem->online_type, mem->nid); - + ret = memory_block_action(mem, to_state); mem->state = ret ? from_state_req : to_state; return ret; @@ -567,7 +633,8 @@ int register_memory(struct memory_block *memory) return ret; } -static int init_memory_block(unsigned long block_id, unsigned long state) +static int init_memory_block(unsigned long block_id, unsigned long state, + unsigned long nr_vmemmap_pages) { struct memory_block *mem; int ret = 0; @@ -584,6 +651,7 @@ static int init_memory_block(unsigned long block_id, unsigned long state) mem->start_section_nr = block_id * sections_per_block; mem->state = state; mem->nid = NUMA_NO_NODE; + mem->nr_vmemmap_pages = nr_vmemmap_pages; ret = register_memory(mem); @@ -603,7 +671,7 @@ static int add_memory_block(unsigned long base_section_nr) if (section_count == 0) return 0; return init_memory_block(memory_block_id(base_section_nr), - MEM_ONLINE); + MEM_ONLINE, 0); } static void unregister_memory(struct memory_block *memory) @@ -625,7 +693,8 @@ static void unregister_memory(struct memory_block *memory) * * Called under device_hotplug_lock. */ -int create_memory_block_devices(unsigned long start, unsigned long size) +int create_memory_block_devices(unsigned long start, unsigned long size, + unsigned long vmemmap_pages) { const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start)); unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size)); @@ -638,7 +707,7 @@ int create_memory_block_devices(unsigned long start, unsigned long size) return -EINVAL; for (block_id = start_block_id; block_id != end_block_id; block_id++) { - ret = init_memory_block(block_id, MEM_OFFLINE); + ret = init_memory_block(block_id, MEM_OFFLINE, vmemmap_pages); if (ret) break; } diff --git a/drivers/clk/sifive/Kconfig b/drivers/clk/sifive/Kconfig index 1c14eb20c066..9132c3c4aa86 100644 --- a/drivers/clk/sifive/Kconfig +++ b/drivers/clk/sifive/Kconfig @@ -10,6 +10,8 @@ if CLK_SIFIVE config CLK_SIFIVE_PRCI bool "PRCI driver for SiFive SoCs" + select RESET_CONTROLLER + select RESET_SIMPLE select CLK_ANALOGBITS_WRPLL_CLN28HPC help Supports the Power Reset Clock interface (PRCI) IP block found in diff --git a/drivers/clk/sifive/fu740-prci.c b/drivers/clk/sifive/fu740-prci.c index 764d1097aa51..53f6e00a03b9 100644 --- a/drivers/clk/sifive/fu740-prci.c +++ b/drivers/clk/sifive/fu740-prci.c @@ -72,6 +72,12 @@ static const struct clk_ops sifive_fu740_prci_hfpclkplldiv_clk_ops = { .recalc_rate = sifive_prci_hfpclkplldiv_recalc_rate, }; +static const struct clk_ops sifive_fu740_prci_pcie_aux_clk_ops = { + .enable = sifive_prci_pcie_aux_clock_enable, + .disable = sifive_prci_pcie_aux_clock_disable, + .is_enabled = sifive_prci_pcie_aux_clock_is_enabled, +}; + /* List of clock controls provided by the PRCI */ struct __prci_clock __prci_init_clocks_fu740[] = { [PRCI_CLK_COREPLL] = { @@ -120,4 +126,9 @@ struct __prci_clock __prci_init_clocks_fu740[] = { .parent_name = "hfpclkpll", .ops = &sifive_fu740_prci_hfpclkplldiv_clk_ops, }, + [PRCI_CLK_PCIE_AUX] = { + .name = "pcie_aux", + .parent_name = "hfclk", + .ops = &sifive_fu740_prci_pcie_aux_clk_ops, + }, }; diff --git a/drivers/clk/sifive/fu740-prci.h b/drivers/clk/sifive/fu740-prci.h index 13ef971f7764..511a0bf7ba2b 100644 --- a/drivers/clk/sifive/fu740-prci.h +++ b/drivers/clk/sifive/fu740-prci.h @@ -9,7 +9,7 @@ #include "sifive-prci.h" -#define NUM_CLOCK_FU740 8 +#define NUM_CLOCK_FU740 9 extern struct __prci_clock __prci_init_clocks_fu740[NUM_CLOCK_FU740]; diff --git a/drivers/clk/sifive/sifive-prci.c b/drivers/clk/sifive/sifive-prci.c index 1490b01ce629..0d79ba31a793 100644 --- a/drivers/clk/sifive/sifive-prci.c +++ b/drivers/clk/sifive/sifive-prci.c @@ -453,6 +453,47 @@ void sifive_prci_hfpclkpllsel_use_hfpclkpll(struct __prci_data *pd) r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET); /* barrier */ } +/* PCIE AUX clock APIs for enable, disable. */ +int sifive_prci_pcie_aux_clock_is_enabled(struct clk_hw *hw) +{ + struct __prci_clock *pc = clk_hw_to_prci_clock(hw); + struct __prci_data *pd = pc->pd; + u32 r; + + r = __prci_readl(pd, PRCI_PCIE_AUX_OFFSET); + + if (r & PRCI_PCIE_AUX_EN_MASK) + return 1; + else + return 0; +} + +int sifive_prci_pcie_aux_clock_enable(struct clk_hw *hw) +{ + struct __prci_clock *pc = clk_hw_to_prci_clock(hw); + struct __prci_data *pd = pc->pd; + u32 r __maybe_unused; + + if (sifive_prci_pcie_aux_clock_is_enabled(hw)) + return 0; + + __prci_writel(1, PRCI_PCIE_AUX_OFFSET, pd); + r = __prci_readl(pd, PRCI_PCIE_AUX_OFFSET); /* barrier */ + + return 0; +} + +void sifive_prci_pcie_aux_clock_disable(struct clk_hw *hw) +{ + struct __prci_clock *pc = clk_hw_to_prci_clock(hw); + struct __prci_data *pd = pc->pd; + u32 r __maybe_unused; + + __prci_writel(0, PRCI_PCIE_AUX_OFFSET, pd); + r = __prci_readl(pd, PRCI_PCIE_AUX_OFFSET); /* barrier */ + +} + /** * __prci_register_clocks() - register clock controls in the PRCI * @dev: Linux struct device @@ -547,6 +588,19 @@ static int sifive_prci_probe(struct platform_device *pdev) if (IS_ERR(pd->va)) return PTR_ERR(pd->va); + pd->reset.rcdev.owner = THIS_MODULE; + pd->reset.rcdev.nr_resets = PRCI_RST_NR; + pd->reset.rcdev.ops = &reset_simple_ops; + pd->reset.rcdev.of_node = pdev->dev.of_node; + pd->reset.active_low = true; + pd->reset.membase = pd->va + PRCI_DEVICESRESETREG_OFFSET; + spin_lock_init(&pd->reset.lock); + + r = devm_reset_controller_register(&pdev->dev, &pd->reset.rcdev); + if (r) { + dev_err(dev, "could not register reset controller: %d\n", r); + return r; + } r = __prci_register_clocks(dev, pd, desc); if (r) { dev_err(dev, "could not register clocks: %d\n", r); diff --git a/drivers/clk/sifive/sifive-prci.h b/drivers/clk/sifive/sifive-prci.h index dbdbd1722688..91658a88af4e 100644 --- a/drivers/clk/sifive/sifive-prci.h +++ b/drivers/clk/sifive/sifive-prci.h @@ -11,6 +11,7 @@ #include <linux/clk/analogbits-wrpll-cln28hpc.h> #include <linux/clk-provider.h> +#include <linux/reset/reset-simple.h> #include <linux/platform_device.h> /* @@ -67,6 +68,11 @@ #define PRCI_DDRPLLCFG1_CKE_SHIFT 31 #define PRCI_DDRPLLCFG1_CKE_MASK (0x1 << PRCI_DDRPLLCFG1_CKE_SHIFT) +/* PCIEAUX */ +#define PRCI_PCIE_AUX_OFFSET 0x14 +#define PRCI_PCIE_AUX_EN_SHIFT 0 +#define PRCI_PCIE_AUX_EN_MASK (0x1 << PRCI_PCIE_AUX_EN_SHIFT) + /* GEMGXLPLLCFG0 */ #define PRCI_GEMGXLPLLCFG0_OFFSET 0x1c #define PRCI_GEMGXLPLLCFG0_DIVR_SHIFT 0 @@ -116,6 +122,8 @@ #define PRCI_DEVICESRESETREG_CHIPLINK_RST_N_MASK \ (0x1 << PRCI_DEVICESRESETREG_CHIPLINK_RST_N_SHIFT) +#define PRCI_RST_NR 7 + /* CLKMUXSTATUSREG */ #define PRCI_CLKMUXSTATUSREG_OFFSET 0x2c #define PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_SHIFT 1 @@ -216,6 +224,7 @@ */ struct __prci_data { void __iomem *va; + struct reset_simple_data reset; struct clk_hw_onecell_data hw_clks; }; @@ -296,4 +305,8 @@ unsigned long sifive_prci_tlclksel_recalc_rate(struct clk_hw *hw, unsigned long sifive_prci_hfpclkplldiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate); +int sifive_prci_pcie_aux_clock_is_enabled(struct clk_hw *hw); +int sifive_prci_pcie_aux_clock_enable(struct clk_hw *hw); +void sifive_prci_pcie_aux_clock_disable(struct clk_hw *hw); + #endif /* __SIFIVE_CLK_SIFIVE_PRCI_H */ diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index a0836ffc22e0..6ab9d9a488a6 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -300,6 +300,18 @@ config INTEL_IDXD_SVM depends on PCI_PASID depends on PCI_IOV +config INTEL_IDXD_PERFMON + bool "Intel Data Accelerators performance monitor support" + depends on INTEL_IDXD + help + Enable performance monitor (pmu) support for the Intel(R) + data accelerators present in Intel Xeon CPU. With this + enabled, perf can be used to monitor the DSA (Intel Data + Streaming Accelerator) events described in the Intel DSA + spec. + + If unsure, say N. + config INTEL_IOATDMA tristate "Intel I/OAT DMA support" depends on PCI && X86_64 diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index fe45ad5d06c4..64a52bf4d737 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -344,17 +344,6 @@ static inline int at_xdmac_chan_is_paused(struct at_xdmac_chan *atchan) return test_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); } -static inline int at_xdmac_csize(u32 maxburst) -{ - int csize; - - csize = ffs(maxburst) - 1; - if (csize > 4) - csize = -EINVAL; - - return csize; -}; - static inline bool at_xdmac_chan_is_peripheral_xfer(u32 cfg) { return cfg & AT_XDMAC_CC_TYPE_PER_TRAN; diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c index 08d71dafa001..53289927dd0d 100644 --- a/drivers/dma/dw-edma/dw-edma-core.c +++ b/drivers/dma/dw-edma/dw-edma-core.c @@ -81,8 +81,13 @@ static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc) * - Even chunks originate CB equal to 1 */ chunk->cb = !(desc->chunks_alloc % 2); - chunk->ll_region.paddr = dw->ll_region.paddr + chan->ll_off; - chunk->ll_region.vaddr = dw->ll_region.vaddr + chan->ll_off; + if (chan->dir == EDMA_DIR_WRITE) { + chunk->ll_region.paddr = dw->ll_region_wr[chan->id].paddr; + chunk->ll_region.vaddr = dw->ll_region_wr[chan->id].vaddr; + } else { + chunk->ll_region.paddr = dw->ll_region_rd[chan->id].paddr; + chunk->ll_region.vaddr = dw->ll_region_rd[chan->id].vaddr; + } if (desc->chunk) { /* Create and add new element into the linked list */ @@ -329,22 +334,22 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer) struct dw_edma_chunk *chunk; struct dw_edma_burst *burst; struct dw_edma_desc *desc; - u32 cnt; + u32 cnt = 0; int i; if (!chan->configured) return NULL; switch (chan->config.direction) { - case DMA_DEV_TO_MEM: /* local dma */ + case DMA_DEV_TO_MEM: /* local DMA */ if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_READ) break; return NULL; - case DMA_MEM_TO_DEV: /* local dma */ + case DMA_MEM_TO_DEV: /* local DMA */ if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_WRITE) break; return NULL; - default: /* remote dma */ + default: /* remote DMA */ if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_READ) break; if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_WRITE) @@ -352,12 +357,19 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer) return NULL; } - if (xfer->cyclic) { + if (xfer->type == EDMA_XFER_CYCLIC) { if (!xfer->xfer.cyclic.len || !xfer->xfer.cyclic.cnt) return NULL; - } else { + } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) { if (xfer->xfer.sg.len < 1) return NULL; + } else if (xfer->type == EDMA_XFER_INTERLEAVED) { + if (!xfer->xfer.il->numf) + return NULL; + if (xfer->xfer.il->numf > 0 && xfer->xfer.il->frame_size > 0) + return NULL; + } else { + return NULL; } desc = dw_edma_alloc_desc(chan); @@ -368,18 +380,28 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer) if (unlikely(!chunk)) goto err_alloc; - src_addr = chan->config.src_addr; - dst_addr = chan->config.dst_addr; + if (xfer->type == EDMA_XFER_INTERLEAVED) { + src_addr = xfer->xfer.il->src_start; + dst_addr = xfer->xfer.il->dst_start; + } else { + src_addr = chan->config.src_addr; + dst_addr = chan->config.dst_addr; + } - if (xfer->cyclic) { + if (xfer->type == EDMA_XFER_CYCLIC) { cnt = xfer->xfer.cyclic.cnt; - } else { + } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) { cnt = xfer->xfer.sg.len; sg = xfer->xfer.sg.sgl; + } else if (xfer->type == EDMA_XFER_INTERLEAVED) { + if (xfer->xfer.il->numf > 0) + cnt = xfer->xfer.il->numf; + else + cnt = xfer->xfer.il->frame_size; } for (i = 0; i < cnt; i++) { - if (!xfer->cyclic && !sg) + if (xfer->type == EDMA_XFER_SCATTER_GATHER && !sg) break; if (chunk->bursts_alloc == chan->ll_max) { @@ -392,20 +414,23 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer) if (unlikely(!burst)) goto err_alloc; - if (xfer->cyclic) + if (xfer->type == EDMA_XFER_CYCLIC) burst->sz = xfer->xfer.cyclic.len; - else + else if (xfer->type == EDMA_XFER_SCATTER_GATHER) burst->sz = sg_dma_len(sg); + else if (xfer->type == EDMA_XFER_INTERLEAVED) + burst->sz = xfer->xfer.il->sgl[i].size; chunk->ll_region.sz += burst->sz; desc->alloc_sz += burst->sz; if (chan->dir == EDMA_DIR_WRITE) { burst->sar = src_addr; - if (xfer->cyclic) { + if (xfer->type == EDMA_XFER_CYCLIC) { burst->dar = xfer->xfer.cyclic.paddr; - } else { - burst->dar = dst_addr; + } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) { + src_addr += sg_dma_len(sg); + burst->dar = sg_dma_address(sg); /* Unlike the typical assumption by other * drivers/IPs the peripheral memory isn't * a FIFO memory, in this case, it's a @@ -416,10 +441,11 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer) } } else { burst->dar = dst_addr; - if (xfer->cyclic) { + if (xfer->type == EDMA_XFER_CYCLIC) { burst->sar = xfer->xfer.cyclic.paddr; - } else { - burst->sar = src_addr; + } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) { + dst_addr += sg_dma_len(sg); + burst->sar = sg_dma_address(sg); /* Unlike the typical assumption by other * drivers/IPs the peripheral memory isn't * a FIFO memory, in this case, it's a @@ -430,10 +456,22 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer) } } - if (!xfer->cyclic) { - src_addr += sg_dma_len(sg); - dst_addr += sg_dma_len(sg); + if (xfer->type == EDMA_XFER_SCATTER_GATHER) { sg = sg_next(sg); + } else if (xfer->type == EDMA_XFER_INTERLEAVED && + xfer->xfer.il->frame_size > 0) { + struct dma_interleaved_template *il = xfer->xfer.il; + struct data_chunk *dc = &il->sgl[i]; + + if (il->src_sgl) { + src_addr += burst->sz; + src_addr += dmaengine_get_src_icg(il, dc); + } + + if (il->dst_sgl) { + dst_addr += burst->sz; + dst_addr += dmaengine_get_dst_icg(il, dc); + } } } @@ -459,7 +497,7 @@ dw_edma_device_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, xfer.xfer.sg.sgl = sgl; xfer.xfer.sg.len = len; xfer.flags = flags; - xfer.cyclic = false; + xfer.type = EDMA_XFER_SCATTER_GATHER; return dw_edma_device_transfer(&xfer); } @@ -478,7 +516,23 @@ dw_edma_device_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t paddr, xfer.xfer.cyclic.len = len; xfer.xfer.cyclic.cnt = count; xfer.flags = flags; - xfer.cyclic = true; + xfer.type = EDMA_XFER_CYCLIC; + + return dw_edma_device_transfer(&xfer); +} + +static struct dma_async_tx_descriptor * +dw_edma_device_prep_interleaved_dma(struct dma_chan *dchan, + struct dma_interleaved_template *ilt, + unsigned long flags) +{ + struct dw_edma_transfer xfer; + + xfer.dchan = dchan; + xfer.direction = ilt->dir; + xfer.xfer.il = ilt; + xfer.flags = flags; + xfer.type = EDMA_XFER_INTERLEAVED; return dw_edma_device_transfer(&xfer); } @@ -642,24 +696,13 @@ static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write, struct device *dev = chip->dev; struct dw_edma *dw = chip->dw; struct dw_edma_chan *chan; - size_t ll_chunk, dt_chunk; struct dw_edma_irq *irq; struct dma_device *dma; - u32 i, j, cnt, ch_cnt; u32 alloc, off_alloc; + u32 i, j, cnt; int err = 0; u32 pos; - ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt; - ll_chunk = dw->ll_region.sz; - dt_chunk = dw->dt_region.sz; - - /* Calculate linked list chunk for each channel */ - ll_chunk /= roundup_pow_of_two(ch_cnt); - - /* Calculate linked list chunk for each channel */ - dt_chunk /= roundup_pow_of_two(ch_cnt); - if (write) { i = 0; cnt = dw->wr_ch_cnt; @@ -691,14 +734,14 @@ static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write, chan->request = EDMA_REQ_NONE; chan->status = EDMA_ST_IDLE; - chan->ll_off = (ll_chunk * i); - chan->ll_max = (ll_chunk / EDMA_LL_SZ) - 1; - - chan->dt_off = (dt_chunk * i); + if (write) + chan->ll_max = (dw->ll_region_wr[j].sz / EDMA_LL_SZ); + else + chan->ll_max = (dw->ll_region_rd[j].sz / EDMA_LL_SZ); + chan->ll_max -= 1; - dev_vdbg(dev, "L. List:\tChannel %s[%u] off=0x%.8lx, max_cnt=%u\n", - write ? "write" : "read", j, - chan->ll_off, chan->ll_max); + dev_vdbg(dev, "L. List:\tChannel %s[%u] max_cnt=%u\n", + write ? "write" : "read", j, chan->ll_max); if (dw->nr_irqs == 1) pos = 0; @@ -723,12 +766,15 @@ static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write, chan->vc.desc_free = vchan_free_desc; vchan_init(&chan->vc, dma); - dt_region->paddr = dw->dt_region.paddr + chan->dt_off; - dt_region->vaddr = dw->dt_region.vaddr + chan->dt_off; - dt_region->sz = dt_chunk; - - dev_vdbg(dev, "Data:\tChannel %s[%u] off=0x%.8lx\n", - write ? "write" : "read", j, chan->dt_off); + if (write) { + dt_region->paddr = dw->dt_region_wr[j].paddr; + dt_region->vaddr = dw->dt_region_wr[j].vaddr; + dt_region->sz = dw->dt_region_wr[j].sz; + } else { + dt_region->paddr = dw->dt_region_rd[j].paddr; + dt_region->vaddr = dw->dt_region_rd[j].vaddr; + dt_region->sz = dw->dt_region_rd[j].sz; + } dw_edma_v0_core_device_config(chan); } @@ -738,6 +784,7 @@ static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write, dma_cap_set(DMA_SLAVE, dma->cap_mask); dma_cap_set(DMA_CYCLIC, dma->cap_mask); dma_cap_set(DMA_PRIVATE, dma->cap_mask); + dma_cap_set(DMA_INTERLEAVE, dma->cap_mask); dma->directions = BIT(write ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV); dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); @@ -756,6 +803,7 @@ static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write, dma->device_tx_status = dw_edma_device_tx_status; dma->device_prep_slave_sg = dw_edma_device_prep_slave_sg; dma->device_prep_dma_cyclic = dw_edma_device_prep_dma_cyclic; + dma->device_prep_interleaved_dma = dw_edma_device_prep_interleaved_dma; dma_set_max_seg_size(dma->dev, U32_MAX); @@ -863,14 +911,15 @@ int dw_edma_probe(struct dw_edma_chip *chip) raw_spin_lock_init(&dw->lock); - /* Find out how many write channels are supported by hardware */ - dw->wr_ch_cnt = dw_edma_v0_core_ch_count(dw, EDMA_DIR_WRITE); - if (!dw->wr_ch_cnt) - return -EINVAL; + dw->wr_ch_cnt = min_t(u16, dw->wr_ch_cnt, + dw_edma_v0_core_ch_count(dw, EDMA_DIR_WRITE)); + dw->wr_ch_cnt = min_t(u16, dw->wr_ch_cnt, EDMA_MAX_WR_CH); - /* Find out how many read channels are supported by hardware */ - dw->rd_ch_cnt = dw_edma_v0_core_ch_count(dw, EDMA_DIR_READ); - if (!dw->rd_ch_cnt) + dw->rd_ch_cnt = min_t(u16, dw->rd_ch_cnt, + dw_edma_v0_core_ch_count(dw, EDMA_DIR_READ)); + dw->rd_ch_cnt = min_t(u16, dw->rd_ch_cnt, EDMA_MAX_RD_CH); + + if (!dw->wr_ch_cnt && !dw->rd_ch_cnt) return -EINVAL; dev_vdbg(dev, "Channels:\twrite=%d, read=%d\n", @@ -937,24 +986,23 @@ int dw_edma_remove(struct dw_edma_chip *chip) /* Power management */ pm_runtime_disable(dev); + /* Deregister eDMA device */ + dma_async_device_unregister(&dw->wr_edma); list_for_each_entry_safe(chan, _chan, &dw->wr_edma.channels, vc.chan.device_node) { - list_del(&chan->vc.chan.device_node); tasklet_kill(&chan->vc.task); + list_del(&chan->vc.chan.device_node); } + dma_async_device_unregister(&dw->rd_edma); list_for_each_entry_safe(chan, _chan, &dw->rd_edma.channels, vc.chan.device_node) { - list_del(&chan->vc.chan.device_node); tasklet_kill(&chan->vc.task); + list_del(&chan->vc.chan.device_node); } - /* Deregister eDMA device */ - dma_async_device_unregister(&dw->wr_edma); - dma_async_device_unregister(&dw->rd_edma); - /* Turn debugfs off */ - dw_edma_v0_core_debugfs_off(); + dw_edma_v0_core_debugfs_off(chip); return 0; } diff --git a/drivers/dma/dw-edma/dw-edma-core.h b/drivers/dma/dw-edma/dw-edma-core.h index 31fc50d31792..60316d408c3e 100644 --- a/drivers/dma/dw-edma/dw-edma-core.h +++ b/drivers/dma/dw-edma/dw-edma-core.h @@ -15,15 +15,18 @@ #include "../virt-dma.h" #define EDMA_LL_SZ 24 +#define EDMA_MAX_WR_CH 8 +#define EDMA_MAX_RD_CH 8 enum dw_edma_dir { EDMA_DIR_WRITE = 0, EDMA_DIR_READ }; -enum dw_edma_mode { - EDMA_MODE_LEGACY = 0, - EDMA_MODE_UNROLL +enum dw_edma_map_format { + EDMA_MF_EDMA_LEGACY = 0x0, + EDMA_MF_EDMA_UNROLL = 0x1, + EDMA_MF_HDMA_COMPAT = 0x5 }; enum dw_edma_request { @@ -38,6 +41,12 @@ enum dw_edma_status { EDMA_ST_BUSY }; +enum dw_edma_xfer_type { + EDMA_XFER_SCATTER_GATHER = 0, + EDMA_XFER_CYCLIC, + EDMA_XFER_INTERLEAVED +}; + struct dw_edma_chan; struct dw_edma_chunk; @@ -82,11 +91,8 @@ struct dw_edma_chan { int id; enum dw_edma_dir dir; - off_t ll_off; u32 ll_max; - off_t dt_off; - struct msi_msg msi; enum dw_edma_request request; @@ -117,19 +123,23 @@ struct dw_edma { u16 rd_ch_cnt; struct dw_edma_region rg_region; /* Registers */ - struct dw_edma_region ll_region; /* Linked list */ - struct dw_edma_region dt_region; /* Data */ + struct dw_edma_region ll_region_wr[EDMA_MAX_WR_CH]; + struct dw_edma_region ll_region_rd[EDMA_MAX_RD_CH]; + struct dw_edma_region dt_region_wr[EDMA_MAX_WR_CH]; + struct dw_edma_region dt_region_rd[EDMA_MAX_RD_CH]; struct dw_edma_irq *irq; int nr_irqs; - u32 version; - enum dw_edma_mode mode; + enum dw_edma_map_format mf; struct dw_edma_chan *chan; const struct dw_edma_core_ops *ops; raw_spinlock_t lock; /* Only for legacy */ +#ifdef CONFIG_DEBUG_FS + struct dentry *debugfs; +#endif /* CONFIG_DEBUG_FS */ }; struct dw_edma_sg { @@ -146,12 +156,13 @@ struct dw_edma_cyclic { struct dw_edma_transfer { struct dma_chan *dchan; union dw_edma_xfer { - struct dw_edma_sg sg; - struct dw_edma_cyclic cyclic; + struct dw_edma_sg sg; + struct dw_edma_cyclic cyclic; + struct dma_interleaved_template *il; } xfer; enum dma_transfer_direction direction; unsigned long flags; - bool cyclic; + enum dw_edma_xfer_type type; }; static inline diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c index 1eafc602e17e..44f6e09bdb53 100644 --- a/drivers/dma/dw-edma/dw-edma-pcie.c +++ b/drivers/dma/dw-edma/dw-edma-pcie.c @@ -13,45 +13,81 @@ #include <linux/dma/edma.h> #include <linux/pci-epf.h> #include <linux/msi.h> +#include <linux/bitfield.h> #include "dw-edma-core.h" +#define DW_PCIE_VSEC_DMA_ID 0x6 +#define DW_PCIE_VSEC_DMA_BAR GENMASK(10, 8) +#define DW_PCIE_VSEC_DMA_MAP GENMASK(2, 0) +#define DW_PCIE_VSEC_DMA_WR_CH GENMASK(9, 0) +#define DW_PCIE_VSEC_DMA_RD_CH GENMASK(25, 16) + +#define DW_BLOCK(a, b, c) \ + { \ + .bar = a, \ + .off = b, \ + .sz = c, \ + }, + +struct dw_edma_block { + enum pci_barno bar; + off_t off; + size_t sz; +}; + struct dw_edma_pcie_data { /* eDMA registers location */ - enum pci_barno rg_bar; - off_t rg_off; - size_t rg_sz; + struct dw_edma_block rg; /* eDMA memory linked list location */ - enum pci_barno ll_bar; - off_t ll_off; - size_t ll_sz; + struct dw_edma_block ll_wr[EDMA_MAX_WR_CH]; + struct dw_edma_block ll_rd[EDMA_MAX_RD_CH]; /* eDMA memory data location */ - enum pci_barno dt_bar; - off_t dt_off; - size_t dt_sz; + struct dw_edma_block dt_wr[EDMA_MAX_WR_CH]; + struct dw_edma_block dt_rd[EDMA_MAX_RD_CH]; /* Other */ - u32 version; - enum dw_edma_mode mode; + enum dw_edma_map_format mf; u8 irqs; + u16 wr_ch_cnt; + u16 rd_ch_cnt; }; static const struct dw_edma_pcie_data snps_edda_data = { /* eDMA registers location */ - .rg_bar = BAR_0, - .rg_off = 0x00001000, /* 4 Kbytes */ - .rg_sz = 0x00002000, /* 8 Kbytes */ + .rg.bar = BAR_0, + .rg.off = 0x00001000, /* 4 Kbytes */ + .rg.sz = 0x00002000, /* 8 Kbytes */ /* eDMA memory linked list location */ - .ll_bar = BAR_2, - .ll_off = 0x00000000, /* 0 Kbytes */ - .ll_sz = 0x00800000, /* 8 Mbytes */ + .ll_wr = { + /* Channel 0 - BAR 2, offset 0 Mbytes, size 2 Kbytes */ + DW_BLOCK(BAR_2, 0x00000000, 0x00000800) + /* Channel 1 - BAR 2, offset 2 Mbytes, size 2 Kbytes */ + DW_BLOCK(BAR_2, 0x00200000, 0x00000800) + }, + .ll_rd = { + /* Channel 0 - BAR 2, offset 4 Mbytes, size 2 Kbytes */ + DW_BLOCK(BAR_2, 0x00400000, 0x00000800) + /* Channel 1 - BAR 2, offset 6 Mbytes, size 2 Kbytes */ + DW_BLOCK(BAR_2, 0x00600000, 0x00000800) + }, /* eDMA memory data location */ - .dt_bar = BAR_2, - .dt_off = 0x00800000, /* 8 Mbytes */ - .dt_sz = 0x03800000, /* 56 Mbytes */ + .dt_wr = { + /* Channel 0 - BAR 2, offset 8 Mbytes, size 2 Kbytes */ + DW_BLOCK(BAR_2, 0x00800000, 0x00000800) + /* Channel 1 - BAR 2, offset 9 Mbytes, size 2 Kbytes */ + DW_BLOCK(BAR_2, 0x00900000, 0x00000800) + }, + .dt_rd = { + /* Channel 0 - BAR 2, offset 10 Mbytes, size 2 Kbytes */ + DW_BLOCK(BAR_2, 0x00a00000, 0x00000800) + /* Channel 1 - BAR 2, offset 11 Mbytes, size 2 Kbytes */ + DW_BLOCK(BAR_2, 0x00b00000, 0x00000800) + }, /* Other */ - .version = 0, - .mode = EDMA_MODE_UNROLL, + .mf = EDMA_MF_EDMA_UNROLL, .irqs = 1, + .wr_ch_cnt = 2, + .rd_ch_cnt = 2, }; static int dw_edma_pcie_irq_vector(struct device *dev, unsigned int nr) @@ -63,14 +99,58 @@ static const struct dw_edma_core_ops dw_edma_pcie_core_ops = { .irq_vector = dw_edma_pcie_irq_vector, }; +static void dw_edma_pcie_get_vsec_dma_data(struct pci_dev *pdev, + struct dw_edma_pcie_data *pdata) +{ + u32 val, map; + u16 vsec; + u64 off; + + vsec = pci_find_vsec_capability(pdev, PCI_VENDOR_ID_SYNOPSYS, + DW_PCIE_VSEC_DMA_ID); + if (!vsec) + return; + + pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, &val); + if (PCI_VNDR_HEADER_REV(val) != 0x00 || + PCI_VNDR_HEADER_LEN(val) != 0x18) + return; + + pci_dbg(pdev, "Detected PCIe Vendor-Specific Extended Capability DMA\n"); + pci_read_config_dword(pdev, vsec + 0x8, &val); + map = FIELD_GET(DW_PCIE_VSEC_DMA_MAP, val); + if (map != EDMA_MF_EDMA_LEGACY && + map != EDMA_MF_EDMA_UNROLL && + map != EDMA_MF_HDMA_COMPAT) + return; + + pdata->mf = map; + pdata->rg.bar = FIELD_GET(DW_PCIE_VSEC_DMA_BAR, val); + + pci_read_config_dword(pdev, vsec + 0xc, &val); + pdata->wr_ch_cnt = min_t(u16, pdata->wr_ch_cnt, + FIELD_GET(DW_PCIE_VSEC_DMA_WR_CH, val)); + pdata->rd_ch_cnt = min_t(u16, pdata->rd_ch_cnt, + FIELD_GET(DW_PCIE_VSEC_DMA_RD_CH, val)); + + pci_read_config_dword(pdev, vsec + 0x14, &val); + off = val; + pci_read_config_dword(pdev, vsec + 0x10, &val); + off <<= 32; + off |= val; + pdata->rg.off = off; +} + static int dw_edma_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *pid) { - const struct dw_edma_pcie_data *pdata = (void *)pid->driver_data; + struct dw_edma_pcie_data *pdata = (void *)pid->driver_data; + struct dw_edma_pcie_data vsec_data; struct device *dev = &pdev->dev; struct dw_edma_chip *chip; - int err, nr_irqs; struct dw_edma *dw; + int err, nr_irqs; + int i, mask; /* Enable PCI device */ err = pcim_enable_device(pdev); @@ -79,11 +159,25 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev, return err; } + memcpy(&vsec_data, pdata, sizeof(struct dw_edma_pcie_data)); + + /* + * Tries to find if exists a PCIe Vendor-Specific Extended Capability + * for the DMA, if one exists, then reconfigures it. + */ + dw_edma_pcie_get_vsec_dma_data(pdev, &vsec_data); + /* Mapping PCI BAR regions */ - err = pcim_iomap_regions(pdev, BIT(pdata->rg_bar) | - BIT(pdata->ll_bar) | - BIT(pdata->dt_bar), - pci_name(pdev)); + mask = BIT(vsec_data.rg.bar); + for (i = 0; i < vsec_data.wr_ch_cnt; i++) { + mask |= BIT(vsec_data.ll_wr[i].bar); + mask |= BIT(vsec_data.dt_wr[i].bar); + } + for (i = 0; i < vsec_data.rd_ch_cnt; i++) { + mask |= BIT(vsec_data.ll_rd[i].bar); + mask |= BIT(vsec_data.dt_rd[i].bar); + } + err = pcim_iomap_regions(pdev, mask, pci_name(pdev)); if (err) { pci_err(pdev, "eDMA BAR I/O remapping failed\n"); return err; @@ -125,7 +219,7 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev, return -ENOMEM; /* IRQs allocation */ - nr_irqs = pci_alloc_irq_vectors(pdev, 1, pdata->irqs, + nr_irqs = pci_alloc_irq_vectors(pdev, 1, vsec_data.irqs, PCI_IRQ_MSI | PCI_IRQ_MSIX); if (nr_irqs < 1) { pci_err(pdev, "fail to alloc IRQ vector (number of IRQs=%u)\n", @@ -139,46 +233,109 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev, chip->id = pdev->devfn; chip->irq = pdev->irq; - dw->rg_region.vaddr = pcim_iomap_table(pdev)[pdata->rg_bar]; - dw->rg_region.vaddr += pdata->rg_off; - dw->rg_region.paddr = pdev->resource[pdata->rg_bar].start; - dw->rg_region.paddr += pdata->rg_off; - dw->rg_region.sz = pdata->rg_sz; - - dw->ll_region.vaddr = pcim_iomap_table(pdev)[pdata->ll_bar]; - dw->ll_region.vaddr += pdata->ll_off; - dw->ll_region.paddr = pdev->resource[pdata->ll_bar].start; - dw->ll_region.paddr += pdata->ll_off; - dw->ll_region.sz = pdata->ll_sz; - - dw->dt_region.vaddr = pcim_iomap_table(pdev)[pdata->dt_bar]; - dw->dt_region.vaddr += pdata->dt_off; - dw->dt_region.paddr = pdev->resource[pdata->dt_bar].start; - dw->dt_region.paddr += pdata->dt_off; - dw->dt_region.sz = pdata->dt_sz; - - dw->version = pdata->version; - dw->mode = pdata->mode; + dw->mf = vsec_data.mf; dw->nr_irqs = nr_irqs; dw->ops = &dw_edma_pcie_core_ops; + dw->wr_ch_cnt = vsec_data.wr_ch_cnt; + dw->rd_ch_cnt = vsec_data.rd_ch_cnt; - /* Debug info */ - pci_dbg(pdev, "Version:\t%u\n", dw->version); + dw->rg_region.vaddr = pcim_iomap_table(pdev)[vsec_data.rg.bar]; + if (!dw->rg_region.vaddr) + return -ENOMEM; + + dw->rg_region.vaddr += vsec_data.rg.off; + dw->rg_region.paddr = pdev->resource[vsec_data.rg.bar].start; + dw->rg_region.paddr += vsec_data.rg.off; + dw->rg_region.sz = vsec_data.rg.sz; + + for (i = 0; i < dw->wr_ch_cnt; i++) { + struct dw_edma_region *ll_region = &dw->ll_region_wr[i]; + struct dw_edma_region *dt_region = &dw->dt_region_wr[i]; + struct dw_edma_block *ll_block = &vsec_data.ll_wr[i]; + struct dw_edma_block *dt_block = &vsec_data.dt_wr[i]; + + ll_region->vaddr = pcim_iomap_table(pdev)[ll_block->bar]; + if (!ll_region->vaddr) + return -ENOMEM; + + ll_region->vaddr += ll_block->off; + ll_region->paddr = pdev->resource[ll_block->bar].start; + ll_region->paddr += ll_block->off; + ll_region->sz = ll_block->sz; + + dt_region->vaddr = pcim_iomap_table(pdev)[dt_block->bar]; + if (!dt_region->vaddr) + return -ENOMEM; + + dt_region->vaddr += dt_block->off; + dt_region->paddr = pdev->resource[dt_block->bar].start; + dt_region->paddr += dt_block->off; + dt_region->sz = dt_block->sz; + } - pci_dbg(pdev, "Mode:\t%s\n", - dw->mode == EDMA_MODE_LEGACY ? "Legacy" : "Unroll"); + for (i = 0; i < dw->rd_ch_cnt; i++) { + struct dw_edma_region *ll_region = &dw->ll_region_rd[i]; + struct dw_edma_region *dt_region = &dw->dt_region_rd[i]; + struct dw_edma_block *ll_block = &vsec_data.ll_rd[i]; + struct dw_edma_block *dt_block = &vsec_data.dt_rd[i]; + + ll_region->vaddr = pcim_iomap_table(pdev)[ll_block->bar]; + if (!ll_region->vaddr) + return -ENOMEM; + + ll_region->vaddr += ll_block->off; + ll_region->paddr = pdev->resource[ll_block->bar].start; + ll_region->paddr += ll_block->off; + ll_region->sz = ll_block->sz; + + dt_region->vaddr = pcim_iomap_table(pdev)[dt_block->bar]; + if (!dt_region->vaddr) + return -ENOMEM; + + dt_region->vaddr += dt_block->off; + dt_region->paddr = pdev->resource[dt_block->bar].start; + dt_region->paddr += dt_block->off; + dt_region->sz = dt_block->sz; + } + + /* Debug info */ + if (dw->mf == EDMA_MF_EDMA_LEGACY) + pci_dbg(pdev, "Version:\teDMA Port Logic (0x%x)\n", dw->mf); + else if (dw->mf == EDMA_MF_EDMA_UNROLL) + pci_dbg(pdev, "Version:\teDMA Unroll (0x%x)\n", dw->mf); + else if (dw->mf == EDMA_MF_HDMA_COMPAT) + pci_dbg(pdev, "Version:\tHDMA Compatible (0x%x)\n", dw->mf); + else + pci_dbg(pdev, "Version:\tUnknown (0x%x)\n", dw->mf); pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n", - pdata->rg_bar, pdata->rg_off, pdata->rg_sz, + vsec_data.rg.bar, vsec_data.rg.off, vsec_data.rg.sz, dw->rg_region.vaddr, &dw->rg_region.paddr); - pci_dbg(pdev, "L. List:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n", - pdata->ll_bar, pdata->ll_off, pdata->ll_sz, - dw->ll_region.vaddr, &dw->ll_region.paddr); - pci_dbg(pdev, "Data:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n", - pdata->dt_bar, pdata->dt_off, pdata->dt_sz, - dw->dt_region.vaddr, &dw->dt_region.paddr); + for (i = 0; i < dw->wr_ch_cnt; i++) { + pci_dbg(pdev, "L. List:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n", + i, vsec_data.ll_wr[i].bar, + vsec_data.ll_wr[i].off, dw->ll_region_wr[i].sz, + dw->ll_region_wr[i].vaddr, &dw->ll_region_wr[i].paddr); + + pci_dbg(pdev, "Data:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n", + i, vsec_data.dt_wr[i].bar, + vsec_data.dt_wr[i].off, dw->dt_region_wr[i].sz, + dw->dt_region_wr[i].vaddr, &dw->dt_region_wr[i].paddr); + } + + for (i = 0; i < dw->rd_ch_cnt; i++) { + pci_dbg(pdev, "L. List:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n", + i, vsec_data.ll_rd[i].bar, + vsec_data.ll_rd[i].off, dw->ll_region_rd[i].sz, + dw->ll_region_rd[i].vaddr, &dw->ll_region_rd[i].paddr); + + pci_dbg(pdev, "Data:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n", + i, vsec_data.dt_rd[i].bar, + vsec_data.dt_rd[i].off, dw->dt_region_rd[i].sz, + dw->dt_region_rd[i].vaddr, &dw->dt_region_rd[i].paddr); + } pci_dbg(pdev, "Nr. IRQs:\t%u\n", dw->nr_irqs); diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c index 692de47b1670..329fc2e57b70 100644 --- a/drivers/dma/dw-edma/dw-edma-v0-core.c +++ b/drivers/dma/dw-edma/dw-edma-v0-core.c @@ -28,35 +28,75 @@ static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw) return dw->rg_region.vaddr; } -#define SET(dw, name, value) \ +#define SET_32(dw, name, value) \ writel(value, &(__dw_regs(dw)->name)) -#define GET(dw, name) \ +#define GET_32(dw, name) \ readl(&(__dw_regs(dw)->name)) -#define SET_RW(dw, dir, name, value) \ +#define SET_RW_32(dw, dir, name, value) \ do { \ if ((dir) == EDMA_DIR_WRITE) \ - SET(dw, wr_##name, value); \ + SET_32(dw, wr_##name, value); \ else \ - SET(dw, rd_##name, value); \ + SET_32(dw, rd_##name, value); \ } while (0) -#define GET_RW(dw, dir, name) \ +#define GET_RW_32(dw, dir, name) \ ((dir) == EDMA_DIR_WRITE \ - ? GET(dw, wr_##name) \ - : GET(dw, rd_##name)) + ? GET_32(dw, wr_##name) \ + : GET_32(dw, rd_##name)) -#define SET_BOTH(dw, name, value) \ +#define SET_BOTH_32(dw, name, value) \ do { \ - SET(dw, wr_##name, value); \ - SET(dw, rd_##name, value); \ + SET_32(dw, wr_##name, value); \ + SET_32(dw, rd_##name, value); \ + } while (0) + +#ifdef CONFIG_64BIT + +#define SET_64(dw, name, value) \ + writeq(value, &(__dw_regs(dw)->name)) + +#define GET_64(dw, name) \ + readq(&(__dw_regs(dw)->name)) + +#define SET_RW_64(dw, dir, name, value) \ + do { \ + if ((dir) == EDMA_DIR_WRITE) \ + SET_64(dw, wr_##name, value); \ + else \ + SET_64(dw, rd_##name, value); \ + } while (0) + +#define GET_RW_64(dw, dir, name) \ + ((dir) == EDMA_DIR_WRITE \ + ? GET_64(dw, wr_##name) \ + : GET_64(dw, rd_##name)) + +#define SET_BOTH_64(dw, name, value) \ + do { \ + SET_64(dw, wr_##name, value); \ + SET_64(dw, rd_##name, value); \ + } while (0) + +#endif /* CONFIG_64BIT */ + +#define SET_COMPAT(dw, name, value) \ + writel(value, &(__dw_regs(dw)->type.unroll.name)) + +#define SET_RW_COMPAT(dw, dir, name, value) \ + do { \ + if ((dir) == EDMA_DIR_WRITE) \ + SET_COMPAT(dw, wr_##name, value); \ + else \ + SET_COMPAT(dw, rd_##name, value); \ } while (0) static inline struct dw_edma_v0_ch_regs __iomem * __dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch) { - if (dw->mode == EDMA_MODE_LEGACY) + if (dw->mf == EDMA_MF_EDMA_LEGACY) return &(__dw_regs(dw)->type.legacy.ch); if (dir == EDMA_DIR_WRITE) @@ -68,7 +108,7 @@ __dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch) static inline void writel_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch, u32 value, void __iomem *addr) { - if (dw->mode == EDMA_MODE_LEGACY) { + if (dw->mf == EDMA_MF_EDMA_LEGACY) { u32 viewport_sel; unsigned long flags; @@ -93,7 +133,7 @@ static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch, { u32 value; - if (dw->mode == EDMA_MODE_LEGACY) { + if (dw->mf == EDMA_MF_EDMA_LEGACY) { u32 viewport_sel; unsigned long flags; @@ -115,21 +155,86 @@ static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch, return value; } -#define SET_CH(dw, dir, ch, name, value) \ +#define SET_CH_32(dw, dir, ch, name, value) \ writel_ch(dw, dir, ch, value, &(__dw_ch_regs(dw, dir, ch)->name)) -#define GET_CH(dw, dir, ch, name) \ +#define GET_CH_32(dw, dir, ch, name) \ readl_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name)) -#define SET_LL(ll, value) \ +#define SET_LL_32(ll, value) \ writel(value, ll) +#ifdef CONFIG_64BIT + +static inline void writeq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch, + u64 value, void __iomem *addr) +{ + if (dw->mf == EDMA_MF_EDMA_LEGACY) { + u32 viewport_sel; + unsigned long flags; + + raw_spin_lock_irqsave(&dw->lock, flags); + + viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch); + if (dir == EDMA_DIR_READ) + viewport_sel |= BIT(31); + + writel(viewport_sel, + &(__dw_regs(dw)->type.legacy.viewport_sel)); + writeq(value, addr); + + raw_spin_unlock_irqrestore(&dw->lock, flags); + } else { + writeq(value, addr); + } +} + +static inline u64 readq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch, + const void __iomem *addr) +{ + u32 value; + + if (dw->mf == EDMA_MF_EDMA_LEGACY) { + u32 viewport_sel; + unsigned long flags; + + raw_spin_lock_irqsave(&dw->lock, flags); + + viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch); + if (dir == EDMA_DIR_READ) + viewport_sel |= BIT(31); + + writel(viewport_sel, + &(__dw_regs(dw)->type.legacy.viewport_sel)); + value = readq(addr); + + raw_spin_unlock_irqrestore(&dw->lock, flags); + } else { + value = readq(addr); + } + + return value; +} + +#define SET_CH_64(dw, dir, ch, name, value) \ + writeq_ch(dw, dir, ch, value, &(__dw_ch_regs(dw, dir, ch)->name)) + +#define GET_CH_64(dw, dir, ch, name) \ + readq_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name)) + +#define SET_LL_64(ll, value) \ + writeq(value, ll) + +#endif /* CONFIG_64BIT */ + /* eDMA management callbacks */ void dw_edma_v0_core_off(struct dw_edma *dw) { - SET_BOTH(dw, int_mask, EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK); - SET_BOTH(dw, int_clear, EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK); - SET_BOTH(dw, engine_en, 0); + SET_BOTH_32(dw, int_mask, + EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK); + SET_BOTH_32(dw, int_clear, + EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK); + SET_BOTH_32(dw, engine_en, 0); } u16 dw_edma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir) @@ -137,9 +242,11 @@ u16 dw_edma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir) u32 num_ch; if (dir == EDMA_DIR_WRITE) - num_ch = FIELD_GET(EDMA_V0_WRITE_CH_COUNT_MASK, GET(dw, ctrl)); + num_ch = FIELD_GET(EDMA_V0_WRITE_CH_COUNT_MASK, + GET_32(dw, ctrl)); else - num_ch = FIELD_GET(EDMA_V0_READ_CH_COUNT_MASK, GET(dw, ctrl)); + num_ch = FIELD_GET(EDMA_V0_READ_CH_COUNT_MASK, + GET_32(dw, ctrl)); if (num_ch > EDMA_V0_MAX_NR_CH) num_ch = EDMA_V0_MAX_NR_CH; @@ -153,7 +260,7 @@ enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan) u32 tmp; tmp = FIELD_GET(EDMA_V0_CH_STATUS_MASK, - GET_CH(dw, chan->dir, chan->id, ch_control1)); + GET_CH_32(dw, chan->dir, chan->id, ch_control1)); if (tmp == 1) return DMA_IN_PROGRESS; @@ -167,26 +274,28 @@ void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan) { struct dw_edma *dw = chan->chip->dw; - SET_RW(dw, chan->dir, int_clear, - FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id))); + SET_RW_32(dw, chan->dir, int_clear, + FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id))); } void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan) { struct dw_edma *dw = chan->chip->dw; - SET_RW(dw, chan->dir, int_clear, - FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id))); + SET_RW_32(dw, chan->dir, int_clear, + FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id))); } u32 dw_edma_v0_core_status_done_int(struct dw_edma *dw, enum dw_edma_dir dir) { - return FIELD_GET(EDMA_V0_DONE_INT_MASK, GET_RW(dw, dir, int_status)); + return FIELD_GET(EDMA_V0_DONE_INT_MASK, + GET_RW_32(dw, dir, int_status)); } u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir) { - return FIELD_GET(EDMA_V0_ABORT_INT_MASK, GET_RW(dw, dir, int_status)); + return FIELD_GET(EDMA_V0_ABORT_INT_MASK, + GET_RW_32(dw, dir, int_status)); } static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk) @@ -209,15 +318,23 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk) control |= (DW_EDMA_V0_LIE | DW_EDMA_V0_RIE); /* Channel control */ - SET_LL(&lli[i].control, control); + SET_LL_32(&lli[i].control, control); /* Transfer size */ - SET_LL(&lli[i].transfer_size, child->sz); - /* SAR - low, high */ - SET_LL(&lli[i].sar_low, lower_32_bits(child->sar)); - SET_LL(&lli[i].sar_high, upper_32_bits(child->sar)); - /* DAR - low, high */ - SET_LL(&lli[i].dar_low, lower_32_bits(child->dar)); - SET_LL(&lli[i].dar_high, upper_32_bits(child->dar)); + SET_LL_32(&lli[i].transfer_size, child->sz); + /* SAR */ + #ifdef CONFIG_64BIT + SET_LL_64(&lli[i].sar.reg, child->sar); + #else /* CONFIG_64BIT */ + SET_LL_32(&lli[i].sar.lsb, lower_32_bits(child->sar)); + SET_LL_32(&lli[i].sar.msb, upper_32_bits(child->sar)); + #endif /* CONFIG_64BIT */ + /* DAR */ + #ifdef CONFIG_64BIT + SET_LL_64(&lli[i].dar.reg, child->dar); + #else /* CONFIG_64BIT */ + SET_LL_32(&lli[i].dar.lsb, lower_32_bits(child->dar)); + SET_LL_32(&lli[i].dar.msb, upper_32_bits(child->dar)); + #endif /* CONFIG_64BIT */ i++; } @@ -227,10 +344,14 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk) control |= DW_EDMA_V0_CB; /* Channel control */ - SET_LL(&llp->control, control); - /* Linked list - low, high */ - SET_LL(&llp->llp_low, lower_32_bits(chunk->ll_region.paddr)); - SET_LL(&llp->llp_high, upper_32_bits(chunk->ll_region.paddr)); + SET_LL_32(&llp->control, control); + /* Linked list */ + #ifdef CONFIG_64BIT + SET_LL_64(&llp->llp.reg, chunk->ll_region.paddr); + #else /* CONFIG_64BIT */ + SET_LL_32(&llp->llp.lsb, lower_32_bits(chunk->ll_region.paddr)); + SET_LL_32(&llp->llp.msb, upper_32_bits(chunk->ll_region.paddr)); + #endif /* CONFIG_64BIT */ } void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first) @@ -243,28 +364,69 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first) if (first) { /* Enable engine */ - SET_RW(dw, chan->dir, engine_en, BIT(0)); + SET_RW_32(dw, chan->dir, engine_en, BIT(0)); + if (dw->mf == EDMA_MF_HDMA_COMPAT) { + switch (chan->id) { + case 0: + SET_RW_COMPAT(dw, chan->dir, ch0_pwr_en, + BIT(0)); + break; + case 1: + SET_RW_COMPAT(dw, chan->dir, ch1_pwr_en, + BIT(0)); + break; + case 2: + SET_RW_COMPAT(dw, chan->dir, ch2_pwr_en, + BIT(0)); + break; + case 3: + SET_RW_COMPAT(dw, chan->dir, ch3_pwr_en, + BIT(0)); + break; + case 4: + SET_RW_COMPAT(dw, chan->dir, ch4_pwr_en, + BIT(0)); + break; + case 5: + SET_RW_COMPAT(dw, chan->dir, ch5_pwr_en, + BIT(0)); + break; + case 6: + SET_RW_COMPAT(dw, chan->dir, ch6_pwr_en, + BIT(0)); + break; + case 7: + SET_RW_COMPAT(dw, chan->dir, ch7_pwr_en, + BIT(0)); + break; + } + } /* Interrupt unmask - done, abort */ - tmp = GET_RW(dw, chan->dir, int_mask); + tmp = GET_RW_32(dw, chan->dir, int_mask); tmp &= ~FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id)); tmp &= ~FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id)); - SET_RW(dw, chan->dir, int_mask, tmp); + SET_RW_32(dw, chan->dir, int_mask, tmp); /* Linked list error */ - tmp = GET_RW(dw, chan->dir, linked_list_err_en); + tmp = GET_RW_32(dw, chan->dir, linked_list_err_en); tmp |= FIELD_PREP(EDMA_V0_LINKED_LIST_ERR_MASK, BIT(chan->id)); - SET_RW(dw, chan->dir, linked_list_err_en, tmp); + SET_RW_32(dw, chan->dir, linked_list_err_en, tmp); /* Channel control */ - SET_CH(dw, chan->dir, chan->id, ch_control1, - (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE)); - /* Linked list - low, high */ - SET_CH(dw, chan->dir, chan->id, llp_low, - lower_32_bits(chunk->ll_region.paddr)); - SET_CH(dw, chan->dir, chan->id, llp_high, - upper_32_bits(chunk->ll_region.paddr)); + SET_CH_32(dw, chan->dir, chan->id, ch_control1, + (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE)); + /* Linked list */ + #ifdef CONFIG_64BIT + SET_CH_64(dw, chan->dir, chan->id, llp.reg, + chunk->ll_region.paddr); + #else /* CONFIG_64BIT */ + SET_CH_32(dw, chan->dir, chan->id, llp.lsb, + lower_32_bits(chunk->ll_region.paddr)); + SET_CH_32(dw, chan->dir, chan->id, llp.msb, + upper_32_bits(chunk->ll_region.paddr)); + #endif /* CONFIG_64BIT */ } /* Doorbell */ - SET_RW(dw, chan->dir, doorbell, - FIELD_PREP(EDMA_V0_DOORBELL_CH_MASK, chan->id)); + SET_RW_32(dw, chan->dir, doorbell, + FIELD_PREP(EDMA_V0_DOORBELL_CH_MASK, chan->id)); } int dw_edma_v0_core_device_config(struct dw_edma_chan *chan) @@ -273,31 +435,31 @@ int dw_edma_v0_core_device_config(struct dw_edma_chan *chan) u32 tmp = 0; /* MSI done addr - low, high */ - SET_RW(dw, chan->dir, done_imwr_low, chan->msi.address_lo); - SET_RW(dw, chan->dir, done_imwr_high, chan->msi.address_hi); + SET_RW_32(dw, chan->dir, done_imwr.lsb, chan->msi.address_lo); + SET_RW_32(dw, chan->dir, done_imwr.msb, chan->msi.address_hi); /* MSI abort addr - low, high */ - SET_RW(dw, chan->dir, abort_imwr_low, chan->msi.address_lo); - SET_RW(dw, chan->dir, abort_imwr_high, chan->msi.address_hi); + SET_RW_32(dw, chan->dir, abort_imwr.lsb, chan->msi.address_lo); + SET_RW_32(dw, chan->dir, abort_imwr.msb, chan->msi.address_hi); /* MSI data - low, high */ switch (chan->id) { case 0: case 1: - tmp = GET_RW(dw, chan->dir, ch01_imwr_data); + tmp = GET_RW_32(dw, chan->dir, ch01_imwr_data); break; case 2: case 3: - tmp = GET_RW(dw, chan->dir, ch23_imwr_data); + tmp = GET_RW_32(dw, chan->dir, ch23_imwr_data); break; case 4: case 5: - tmp = GET_RW(dw, chan->dir, ch45_imwr_data); + tmp = GET_RW_32(dw, chan->dir, ch45_imwr_data); break; case 6: case 7: - tmp = GET_RW(dw, chan->dir, ch67_imwr_data); + tmp = GET_RW_32(dw, chan->dir, ch67_imwr_data); break; } @@ -316,22 +478,22 @@ int dw_edma_v0_core_device_config(struct dw_edma_chan *chan) switch (chan->id) { case 0: case 1: - SET_RW(dw, chan->dir, ch01_imwr_data, tmp); + SET_RW_32(dw, chan->dir, ch01_imwr_data, tmp); break; case 2: case 3: - SET_RW(dw, chan->dir, ch23_imwr_data, tmp); + SET_RW_32(dw, chan->dir, ch23_imwr_data, tmp); break; case 4: case 5: - SET_RW(dw, chan->dir, ch45_imwr_data, tmp); + SET_RW_32(dw, chan->dir, ch45_imwr_data, tmp); break; case 6: case 7: - SET_RW(dw, chan->dir, ch67_imwr_data, tmp); + SET_RW_32(dw, chan->dir, ch67_imwr_data, tmp); break; } @@ -344,7 +506,7 @@ void dw_edma_v0_core_debugfs_on(struct dw_edma_chip *chip) dw_edma_v0_debugfs_on(chip); } -void dw_edma_v0_core_debugfs_off(void) +void dw_edma_v0_core_debugfs_off(struct dw_edma_chip *chip) { - dw_edma_v0_debugfs_off(); + dw_edma_v0_debugfs_off(chip); } diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.h b/drivers/dma/dw-edma/dw-edma-v0-core.h index abae1527f1f9..2afa626b8300 100644 --- a/drivers/dma/dw-edma/dw-edma-v0-core.h +++ b/drivers/dma/dw-edma/dw-edma-v0-core.h @@ -23,6 +23,6 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first); int dw_edma_v0_core_device_config(struct dw_edma_chan *chan); /* eDMA debug fs callbacks */ void dw_edma_v0_core_debugfs_on(struct dw_edma_chip *chip); -void dw_edma_v0_core_debugfs_off(void); +void dw_edma_v0_core_debugfs_off(struct dw_edma_chip *chip); #endif /* _DW_EDMA_V0_CORE_H */ diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c index 6f62711a4c94..4b3bcffd15ef 100644 --- a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c +++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c @@ -38,7 +38,6 @@ #define CHANNEL_STR "channel" #define REGISTERS_STR "registers" -static struct dentry *base_dir; static struct dw_edma *dw; static struct dw_edma_v0_regs __iomem *regs; @@ -55,7 +54,7 @@ struct debugfs_entries { static int dw_edma_debugfs_u32_get(void *data, u64 *val) { void __iomem *reg = (void __force __iomem *)data; - if (dw->mode == EDMA_MODE_LEGACY && + if (dw->mf == EDMA_MF_EDMA_LEGACY && reg >= (void __iomem *)®s->type.legacy.ch) { void __iomem *ptr = ®s->type.legacy.ch; u32 viewport_sel = 0; @@ -114,12 +113,12 @@ static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs __iomem *regs, REGISTER(ch_control1), REGISTER(ch_control2), REGISTER(transfer_size), - REGISTER(sar_low), - REGISTER(sar_high), - REGISTER(dar_low), - REGISTER(dar_high), - REGISTER(llp_low), - REGISTER(llp_high), + REGISTER(sar.lsb), + REGISTER(sar.msb), + REGISTER(dar.lsb), + REGISTER(dar.msb), + REGISTER(llp.lsb), + REGISTER(llp.msb), }; nr_entries = ARRAY_SIZE(debugfs_regs); @@ -132,17 +131,17 @@ static void dw_edma_debugfs_regs_wr(struct dentry *dir) /* eDMA global registers */ WR_REGISTER(engine_en), WR_REGISTER(doorbell), - WR_REGISTER(ch_arb_weight_low), - WR_REGISTER(ch_arb_weight_high), + WR_REGISTER(ch_arb_weight.lsb), + WR_REGISTER(ch_arb_weight.msb), /* eDMA interrupts registers */ WR_REGISTER(int_status), WR_REGISTER(int_mask), WR_REGISTER(int_clear), WR_REGISTER(err_status), - WR_REGISTER(done_imwr_low), - WR_REGISTER(done_imwr_high), - WR_REGISTER(abort_imwr_low), - WR_REGISTER(abort_imwr_high), + WR_REGISTER(done_imwr.lsb), + WR_REGISTER(done_imwr.msb), + WR_REGISTER(abort_imwr.lsb), + WR_REGISTER(abort_imwr.msb), WR_REGISTER(ch01_imwr_data), WR_REGISTER(ch23_imwr_data), WR_REGISTER(ch45_imwr_data), @@ -152,8 +151,8 @@ static void dw_edma_debugfs_regs_wr(struct dentry *dir) const struct debugfs_entries debugfs_unroll_regs[] = { /* eDMA channel context grouping */ WR_REGISTER_UNROLL(engine_chgroup), - WR_REGISTER_UNROLL(engine_hshake_cnt_low), - WR_REGISTER_UNROLL(engine_hshake_cnt_high), + WR_REGISTER_UNROLL(engine_hshake_cnt.lsb), + WR_REGISTER_UNROLL(engine_hshake_cnt.msb), WR_REGISTER_UNROLL(ch0_pwr_en), WR_REGISTER_UNROLL(ch1_pwr_en), WR_REGISTER_UNROLL(ch2_pwr_en), @@ -174,7 +173,7 @@ static void dw_edma_debugfs_regs_wr(struct dentry *dir) nr_entries = ARRAY_SIZE(debugfs_regs); dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir); - if (dw->mode == EDMA_MODE_UNROLL) { + if (dw->mf == EDMA_MF_HDMA_COMPAT) { nr_entries = ARRAY_SIZE(debugfs_unroll_regs); dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries, regs_dir); @@ -200,19 +199,19 @@ static void dw_edma_debugfs_regs_rd(struct dentry *dir) /* eDMA global registers */ RD_REGISTER(engine_en), RD_REGISTER(doorbell), - RD_REGISTER(ch_arb_weight_low), - RD_REGISTER(ch_arb_weight_high), + RD_REGISTER(ch_arb_weight.lsb), + RD_REGISTER(ch_arb_weight.msb), /* eDMA interrupts registers */ RD_REGISTER(int_status), RD_REGISTER(int_mask), RD_REGISTER(int_clear), - RD_REGISTER(err_status_low), - RD_REGISTER(err_status_high), + RD_REGISTER(err_status.lsb), + RD_REGISTER(err_status.msb), RD_REGISTER(linked_list_err_en), - RD_REGISTER(done_imwr_low), - RD_REGISTER(done_imwr_high), - RD_REGISTER(abort_imwr_low), - RD_REGISTER(abort_imwr_high), + RD_REGISTER(done_imwr.lsb), + RD_REGISTER(done_imwr.msb), + RD_REGISTER(abort_imwr.lsb), + RD_REGISTER(abort_imwr.msb), RD_REGISTER(ch01_imwr_data), RD_REGISTER(ch23_imwr_data), RD_REGISTER(ch45_imwr_data), @@ -221,8 +220,8 @@ static void dw_edma_debugfs_regs_rd(struct dentry *dir) const struct debugfs_entries debugfs_unroll_regs[] = { /* eDMA channel context grouping */ RD_REGISTER_UNROLL(engine_chgroup), - RD_REGISTER_UNROLL(engine_hshake_cnt_low), - RD_REGISTER_UNROLL(engine_hshake_cnt_high), + RD_REGISTER_UNROLL(engine_hshake_cnt.lsb), + RD_REGISTER_UNROLL(engine_hshake_cnt.msb), RD_REGISTER_UNROLL(ch0_pwr_en), RD_REGISTER_UNROLL(ch1_pwr_en), RD_REGISTER_UNROLL(ch2_pwr_en), @@ -243,7 +242,7 @@ static void dw_edma_debugfs_regs_rd(struct dentry *dir) nr_entries = ARRAY_SIZE(debugfs_regs); dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir); - if (dw->mode == EDMA_MODE_UNROLL) { + if (dw->mf == EDMA_MF_HDMA_COMPAT) { nr_entries = ARRAY_SIZE(debugfs_unroll_regs); dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries, regs_dir); @@ -272,7 +271,7 @@ static void dw_edma_debugfs_regs(void) struct dentry *regs_dir; int nr_entries; - regs_dir = debugfs_create_dir(REGISTERS_STR, base_dir); + regs_dir = debugfs_create_dir(REGISTERS_STR, dw->debugfs); if (!regs_dir) return; @@ -293,19 +292,23 @@ void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip) if (!regs) return; - base_dir = debugfs_create_dir(dw->name, NULL); - if (!base_dir) + dw->debugfs = debugfs_create_dir(dw->name, NULL); + if (!dw->debugfs) return; - debugfs_create_u32("version", 0444, base_dir, &dw->version); - debugfs_create_u32("mode", 0444, base_dir, &dw->mode); - debugfs_create_u16("wr_ch_cnt", 0444, base_dir, &dw->wr_ch_cnt); - debugfs_create_u16("rd_ch_cnt", 0444, base_dir, &dw->rd_ch_cnt); + debugfs_create_u32("mf", 0444, dw->debugfs, &dw->mf); + debugfs_create_u16("wr_ch_cnt", 0444, dw->debugfs, &dw->wr_ch_cnt); + debugfs_create_u16("rd_ch_cnt", 0444, dw->debugfs, &dw->rd_ch_cnt); dw_edma_debugfs_regs(); } -void dw_edma_v0_debugfs_off(void) +void dw_edma_v0_debugfs_off(struct dw_edma_chip *chip) { - debugfs_remove_recursive(base_dir); + dw = chip->dw; + if (!dw) + return; + + debugfs_remove_recursive(dw->debugfs); + dw->debugfs = NULL; } diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.h b/drivers/dma/dw-edma/dw-edma-v0-debugfs.h index 5450a0a94193..d0ff25a9ea5c 100644 --- a/drivers/dma/dw-edma/dw-edma-v0-debugfs.h +++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.h @@ -13,13 +13,13 @@ #ifdef CONFIG_DEBUG_FS void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip); -void dw_edma_v0_debugfs_off(void); +void dw_edma_v0_debugfs_off(struct dw_edma_chip *chip); #else static inline void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip) { } -static inline void dw_edma_v0_debugfs_off(void) +static inline void dw_edma_v0_debugfs_off(struct dw_edma_chip *chip) { } #endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/dma/dw-edma/dw-edma-v0-regs.h b/drivers/dma/dw-edma/dw-edma-v0-regs.h index dfd70e223c2f..e175f7b20480 100644 --- a/drivers/dma/dw-edma/dw-edma-v0-regs.h +++ b/drivers/dma/dw-edma/dw-edma-v0-regs.h @@ -25,134 +25,209 @@ #define EDMA_V0_CH_EVEN_MSI_DATA_MASK GENMASK(15, 0) struct dw_edma_v0_ch_regs { - u32 ch_control1; /* 0x000 */ - u32 ch_control2; /* 0x004 */ - u32 transfer_size; /* 0x008 */ - u32 sar_low; /* 0x00c */ - u32 sar_high; /* 0x010 */ - u32 dar_low; /* 0x014 */ - u32 dar_high; /* 0x018 */ - u32 llp_low; /* 0x01c */ - u32 llp_high; /* 0x020 */ -}; + u32 ch_control1; /* 0x0000 */ + u32 ch_control2; /* 0x0004 */ + u32 transfer_size; /* 0x0008 */ + union { + u64 reg; /* 0x000c..0x0010 */ + struct { + u32 lsb; /* 0x000c */ + u32 msb; /* 0x0010 */ + }; + } sar; + union { + u64 reg; /* 0x0014..0x0018 */ + struct { + u32 lsb; /* 0x0014 */ + u32 msb; /* 0x0018 */ + }; + } dar; + union { + u64 reg; /* 0x001c..0x0020 */ + struct { + u32 lsb; /* 0x001c */ + u32 msb; /* 0x0020 */ + }; + } llp; +} __packed; struct dw_edma_v0_ch { - struct dw_edma_v0_ch_regs wr; /* 0x200 */ - u32 padding_1[55]; /* [0x224..0x2fc] */ - struct dw_edma_v0_ch_regs rd; /* 0x300 */ - u32 padding_2[55]; /* [0x324..0x3fc] */ -}; + struct dw_edma_v0_ch_regs wr; /* 0x0200 */ + u32 padding_1[55]; /* 0x0224..0x02fc */ + struct dw_edma_v0_ch_regs rd; /* 0x0300 */ + u32 padding_2[55]; /* 0x0324..0x03fc */ +} __packed; struct dw_edma_v0_unroll { - u32 padding_1; /* 0x0f8 */ - u32 wr_engine_chgroup; /* 0x100 */ - u32 rd_engine_chgroup; /* 0x104 */ - u32 wr_engine_hshake_cnt_low; /* 0x108 */ - u32 wr_engine_hshake_cnt_high; /* 0x10c */ - u32 padding_2[2]; /* [0x110..0x114] */ - u32 rd_engine_hshake_cnt_low; /* 0x118 */ - u32 rd_engine_hshake_cnt_high; /* 0x11c */ - u32 padding_3[2]; /* [0x120..0x124] */ - u32 wr_ch0_pwr_en; /* 0x128 */ - u32 wr_ch1_pwr_en; /* 0x12c */ - u32 wr_ch2_pwr_en; /* 0x130 */ - u32 wr_ch3_pwr_en; /* 0x134 */ - u32 wr_ch4_pwr_en; /* 0x138 */ - u32 wr_ch5_pwr_en; /* 0x13c */ - u32 wr_ch6_pwr_en; /* 0x140 */ - u32 wr_ch7_pwr_en; /* 0x144 */ - u32 padding_4[8]; /* [0x148..0x164] */ - u32 rd_ch0_pwr_en; /* 0x168 */ - u32 rd_ch1_pwr_en; /* 0x16c */ - u32 rd_ch2_pwr_en; /* 0x170 */ - u32 rd_ch3_pwr_en; /* 0x174 */ - u32 rd_ch4_pwr_en; /* 0x178 */ - u32 rd_ch5_pwr_en; /* 0x18c */ - u32 rd_ch6_pwr_en; /* 0x180 */ - u32 rd_ch7_pwr_en; /* 0x184 */ - u32 padding_5[30]; /* [0x188..0x1fc] */ - struct dw_edma_v0_ch ch[EDMA_V0_MAX_NR_CH]; /* [0x200..0x1120] */ -}; + u32 padding_1; /* 0x00f8 */ + u32 wr_engine_chgroup; /* 0x0100 */ + u32 rd_engine_chgroup; /* 0x0104 */ + union { + u64 reg; /* 0x0108..0x010c */ + struct { + u32 lsb; /* 0x0108 */ + u32 msb; /* 0x010c */ + }; + } wr_engine_hshake_cnt; + u32 padding_2[2]; /* 0x0110..0x0114 */ + union { + u64 reg; /* 0x0120..0x0124 */ + struct { + u32 lsb; /* 0x0120 */ + u32 msb; /* 0x0124 */ + }; + } rd_engine_hshake_cnt; + u32 padding_3[2]; /* 0x0120..0x0124 */ + u32 wr_ch0_pwr_en; /* 0x0128 */ + u32 wr_ch1_pwr_en; /* 0x012c */ + u32 wr_ch2_pwr_en; /* 0x0130 */ + u32 wr_ch3_pwr_en; /* 0x0134 */ + u32 wr_ch4_pwr_en; /* 0x0138 */ + u32 wr_ch5_pwr_en; /* 0x013c */ + u32 wr_ch6_pwr_en; /* 0x0140 */ + u32 wr_ch7_pwr_en; /* 0x0144 */ + u32 padding_4[8]; /* 0x0148..0x0164 */ + u32 rd_ch0_pwr_en; /* 0x0168 */ + u32 rd_ch1_pwr_en; /* 0x016c */ + u32 rd_ch2_pwr_en; /* 0x0170 */ + u32 rd_ch3_pwr_en; /* 0x0174 */ + u32 rd_ch4_pwr_en; /* 0x0178 */ + u32 rd_ch5_pwr_en; /* 0x018c */ + u32 rd_ch6_pwr_en; /* 0x0180 */ + u32 rd_ch7_pwr_en; /* 0x0184 */ + u32 padding_5[30]; /* 0x0188..0x01fc */ + struct dw_edma_v0_ch ch[EDMA_V0_MAX_NR_CH]; /* 0x0200..0x1120 */ +} __packed; struct dw_edma_v0_legacy { - u32 viewport_sel; /* 0x0f8 */ - struct dw_edma_v0_ch_regs ch; /* [0x100..0x120] */ -}; + u32 viewport_sel; /* 0x00f8 */ + struct dw_edma_v0_ch_regs ch; /* 0x0100..0x0120 */ +} __packed; struct dw_edma_v0_regs { /* eDMA global registers */ - u32 ctrl_data_arb_prior; /* 0x000 */ - u32 padding_1; /* 0x004 */ - u32 ctrl; /* 0x008 */ - u32 wr_engine_en; /* 0x00c */ - u32 wr_doorbell; /* 0x010 */ - u32 padding_2; /* 0x014 */ - u32 wr_ch_arb_weight_low; /* 0x018 */ - u32 wr_ch_arb_weight_high; /* 0x01c */ - u32 padding_3[3]; /* [0x020..0x028] */ - u32 rd_engine_en; /* 0x02c */ - u32 rd_doorbell; /* 0x030 */ - u32 padding_4; /* 0x034 */ - u32 rd_ch_arb_weight_low; /* 0x038 */ - u32 rd_ch_arb_weight_high; /* 0x03c */ - u32 padding_5[3]; /* [0x040..0x048] */ + u32 ctrl_data_arb_prior; /* 0x0000 */ + u32 padding_1; /* 0x0004 */ + u32 ctrl; /* 0x0008 */ + u32 wr_engine_en; /* 0x000c */ + u32 wr_doorbell; /* 0x0010 */ + u32 padding_2; /* 0x0014 */ + union { + u64 reg; /* 0x0018..0x001c */ + struct { + u32 lsb; /* 0x0018 */ + u32 msb; /* 0x001c */ + }; + } wr_ch_arb_weight; + u32 padding_3[3]; /* 0x0020..0x0028 */ + u32 rd_engine_en; /* 0x002c */ + u32 rd_doorbell; /* 0x0030 */ + u32 padding_4; /* 0x0034 */ + union { + u64 reg; /* 0x0038..0x003c */ + struct { + u32 lsb; /* 0x0038 */ + u32 msb; /* 0x003c */ + }; + } rd_ch_arb_weight; + u32 padding_5[3]; /* 0x0040..0x0048 */ /* eDMA interrupts registers */ - u32 wr_int_status; /* 0x04c */ - u32 padding_6; /* 0x050 */ - u32 wr_int_mask; /* 0x054 */ - u32 wr_int_clear; /* 0x058 */ - u32 wr_err_status; /* 0x05c */ - u32 wr_done_imwr_low; /* 0x060 */ - u32 wr_done_imwr_high; /* 0x064 */ - u32 wr_abort_imwr_low; /* 0x068 */ - u32 wr_abort_imwr_high; /* 0x06c */ - u32 wr_ch01_imwr_data; /* 0x070 */ - u32 wr_ch23_imwr_data; /* 0x074 */ - u32 wr_ch45_imwr_data; /* 0x078 */ - u32 wr_ch67_imwr_data; /* 0x07c */ - u32 padding_7[4]; /* [0x080..0x08c] */ - u32 wr_linked_list_err_en; /* 0x090 */ - u32 padding_8[3]; /* [0x094..0x09c] */ - u32 rd_int_status; /* 0x0a0 */ - u32 padding_9; /* 0x0a4 */ - u32 rd_int_mask; /* 0x0a8 */ - u32 rd_int_clear; /* 0x0ac */ - u32 padding_10; /* 0x0b0 */ - u32 rd_err_status_low; /* 0x0b4 */ - u32 rd_err_status_high; /* 0x0b8 */ - u32 padding_11[2]; /* [0x0bc..0x0c0] */ - u32 rd_linked_list_err_en; /* 0x0c4 */ - u32 padding_12; /* 0x0c8 */ - u32 rd_done_imwr_low; /* 0x0cc */ - u32 rd_done_imwr_high; /* 0x0d0 */ - u32 rd_abort_imwr_low; /* 0x0d4 */ - u32 rd_abort_imwr_high; /* 0x0d8 */ - u32 rd_ch01_imwr_data; /* 0x0dc */ - u32 rd_ch23_imwr_data; /* 0x0e0 */ - u32 rd_ch45_imwr_data; /* 0x0e4 */ - u32 rd_ch67_imwr_data; /* 0x0e8 */ - u32 padding_13[4]; /* [0x0ec..0x0f8] */ + u32 wr_int_status; /* 0x004c */ + u32 padding_6; /* 0x0050 */ + u32 wr_int_mask; /* 0x0054 */ + u32 wr_int_clear; /* 0x0058 */ + u32 wr_err_status; /* 0x005c */ + union { + u64 reg; /* 0x0060..0x0064 */ + struct { + u32 lsb; /* 0x0060 */ + u32 msb; /* 0x0064 */ + }; + } wr_done_imwr; + union { + u64 reg; /* 0x0068..0x006c */ + struct { + u32 lsb; /* 0x0068 */ + u32 msb; /* 0x006c */ + }; + } wr_abort_imwr; + u32 wr_ch01_imwr_data; /* 0x0070 */ + u32 wr_ch23_imwr_data; /* 0x0074 */ + u32 wr_ch45_imwr_data; /* 0x0078 */ + u32 wr_ch67_imwr_data; /* 0x007c */ + u32 padding_7[4]; /* 0x0080..0x008c */ + u32 wr_linked_list_err_en; /* 0x0090 */ + u32 padding_8[3]; /* 0x0094..0x009c */ + u32 rd_int_status; /* 0x00a0 */ + u32 padding_9; /* 0x00a4 */ + u32 rd_int_mask; /* 0x00a8 */ + u32 rd_int_clear; /* 0x00ac */ + u32 padding_10; /* 0x00b0 */ + union { + u64 reg; /* 0x00b4..0x00b8 */ + struct { + u32 lsb; /* 0x00b4 */ + u32 msb; /* 0x00b8 */ + }; + } rd_err_status; + u32 padding_11[2]; /* 0x00bc..0x00c0 */ + u32 rd_linked_list_err_en; /* 0x00c4 */ + u32 padding_12; /* 0x00c8 */ + union { + u64 reg; /* 0x00cc..0x00d0 */ + struct { + u32 lsb; /* 0x00cc */ + u32 msb; /* 0x00d0 */ + }; + } rd_done_imwr; + union { + u64 reg; /* 0x00d4..0x00d8 */ + struct { + u32 lsb; /* 0x00d4 */ + u32 msb; /* 0x00d8 */ + }; + } rd_abort_imwr; + u32 rd_ch01_imwr_data; /* 0x00dc */ + u32 rd_ch23_imwr_data; /* 0x00e0 */ + u32 rd_ch45_imwr_data; /* 0x00e4 */ + u32 rd_ch67_imwr_data; /* 0x00e8 */ + u32 padding_13[4]; /* 0x00ec..0x00f8 */ /* eDMA channel context grouping */ union dw_edma_v0_type { - struct dw_edma_v0_legacy legacy; /* [0x0f8..0x120] */ - struct dw_edma_v0_unroll unroll; /* [0x0f8..0x1120] */ + struct dw_edma_v0_legacy legacy; /* 0x00f8..0x0120 */ + struct dw_edma_v0_unroll unroll; /* 0x00f8..0x1120 */ } type; -}; +} __packed; struct dw_edma_v0_lli { u32 control; u32 transfer_size; - u32 sar_low; - u32 sar_high; - u32 dar_low; - u32 dar_high; -}; + union { + u64 reg; + struct { + u32 lsb; + u32 msb; + }; + } sar; + union { + u64 reg; + struct { + u32 lsb; + u32 msb; + }; + } dar; +} __packed; struct dw_edma_v0_llp { u32 control; u32 reserved; - u32 llp_low; - u32 llp_high; -}; + union { + u64 reg; + struct { + u32 lsb; + u32 msb; + }; + } llp; +} __packed; #endif /* _DW_EDMA_V0_REGS_H */ diff --git a/drivers/dma/idxd/Makefile b/drivers/dma/idxd/Makefile index 8978b898d777..6d11558756f8 100644 --- a/drivers/dma/idxd/Makefile +++ b/drivers/dma/idxd/Makefile @@ -1,2 +1,4 @@ obj-$(CONFIG_INTEL_IDXD) += idxd.o idxd-y := init.o irq.o device.o sysfs.o submit.o dma.o cdev.o + +idxd-$(CONFIG_INTEL_IDXD_PERFMON) += perfmon.o diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c index 0db9b82ed8cf..302cba5ff779 100644 --- a/drivers/dma/idxd/cdev.c +++ b/drivers/dma/idxd/cdev.c @@ -39,15 +39,15 @@ struct idxd_user_context { struct iommu_sva *sva; }; -enum idxd_cdev_cleanup { - CDEV_NORMAL = 0, - CDEV_FAILED, -}; - static void idxd_cdev_dev_release(struct device *dev) { - dev_dbg(dev, "releasing cdev device\n"); - kfree(dev); + struct idxd_cdev *idxd_cdev = container_of(dev, struct idxd_cdev, dev); + struct idxd_cdev_context *cdev_ctx; + struct idxd_wq *wq = idxd_cdev->wq; + + cdev_ctx = &ictx[wq->idxd->data->type]; + ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor); + kfree(idxd_cdev); } static struct device_type idxd_cdev_device_type = { @@ -62,14 +62,11 @@ static inline struct idxd_cdev *inode_idxd_cdev(struct inode *inode) return container_of(cdev, struct idxd_cdev, cdev); } -static inline struct idxd_wq *idxd_cdev_wq(struct idxd_cdev *idxd_cdev) -{ - return container_of(idxd_cdev, struct idxd_wq, idxd_cdev); -} - static inline struct idxd_wq *inode_wq(struct inode *inode) { - return idxd_cdev_wq(inode_idxd_cdev(inode)); + struct idxd_cdev *idxd_cdev = inode_idxd_cdev(inode); + + return idxd_cdev->wq; } static int idxd_cdev_open(struct inode *inode, struct file *filp) @@ -220,11 +217,10 @@ static __poll_t idxd_cdev_poll(struct file *filp, struct idxd_user_context *ctx = filp->private_data; struct idxd_wq *wq = ctx->wq; struct idxd_device *idxd = wq->idxd; - struct idxd_cdev *idxd_cdev = &wq->idxd_cdev; unsigned long flags; __poll_t out = 0; - poll_wait(filp, &idxd_cdev->err_queue, wait); + poll_wait(filp, &wq->err_queue, wait); spin_lock_irqsave(&idxd->dev_lock, flags); if (idxd->sw_err.valid) out = EPOLLIN | EPOLLRDNORM; @@ -243,101 +239,69 @@ static const struct file_operations idxd_cdev_fops = { int idxd_cdev_get_major(struct idxd_device *idxd) { - return MAJOR(ictx[idxd->type].devt); + return MAJOR(ictx[idxd->data->type].devt); } -static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq) +int idxd_wq_add_cdev(struct idxd_wq *wq) { struct idxd_device *idxd = wq->idxd; - struct idxd_cdev *idxd_cdev = &wq->idxd_cdev; - struct idxd_cdev_context *cdev_ctx; + struct idxd_cdev *idxd_cdev; + struct cdev *cdev; struct device *dev; - int minor, rc; + struct idxd_cdev_context *cdev_ctx; + int rc, minor; - idxd_cdev->dev = kzalloc(sizeof(*idxd_cdev->dev), GFP_KERNEL); - if (!idxd_cdev->dev) + idxd_cdev = kzalloc(sizeof(*idxd_cdev), GFP_KERNEL); + if (!idxd_cdev) return -ENOMEM; - dev = idxd_cdev->dev; - dev->parent = &idxd->pdev->dev; - dev_set_name(dev, "%s/wq%u.%u", idxd_get_dev_name(idxd), - idxd->id, wq->id); - dev->bus = idxd_get_bus_type(idxd); - - cdev_ctx = &ictx[wq->idxd->type]; + idxd_cdev->wq = wq; + cdev = &idxd_cdev->cdev; + dev = &idxd_cdev->dev; + cdev_ctx = &ictx[wq->idxd->data->type]; minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL); if (minor < 0) { - rc = minor; - kfree(dev); - goto ida_err; - } - - dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor); - dev->type = &idxd_cdev_device_type; - rc = device_register(dev); - if (rc < 0) { - dev_err(&idxd->pdev->dev, "device register failed\n"); - goto dev_reg_err; + kfree(idxd_cdev); + return minor; } idxd_cdev->minor = minor; - return 0; - - dev_reg_err: - ida_simple_remove(&cdev_ctx->minor_ida, MINOR(dev->devt)); - put_device(dev); - ida_err: - idxd_cdev->dev = NULL; - return rc; -} - -static void idxd_wq_cdev_cleanup(struct idxd_wq *wq, - enum idxd_cdev_cleanup cdev_state) -{ - struct idxd_cdev *idxd_cdev = &wq->idxd_cdev; - struct idxd_cdev_context *cdev_ctx; - - cdev_ctx = &ictx[wq->idxd->type]; - if (cdev_state == CDEV_NORMAL) - cdev_del(&idxd_cdev->cdev); - device_unregister(idxd_cdev->dev); - /* - * The device_type->release() will be called on the device and free - * the allocated struct device. We can just forget it. - */ - ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor); - idxd_cdev->dev = NULL; - idxd_cdev->minor = -1; -} - -int idxd_wq_add_cdev(struct idxd_wq *wq) -{ - struct idxd_cdev *idxd_cdev = &wq->idxd_cdev; - struct cdev *cdev = &idxd_cdev->cdev; - struct device *dev; - int rc; + device_initialize(dev); + dev->parent = &wq->conf_dev; + dev->bus = &dsa_bus_type; + dev->type = &idxd_cdev_device_type; + dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor); - rc = idxd_wq_cdev_dev_setup(wq); + rc = dev_set_name(dev, "%s/wq%u.%u", idxd->data->name_prefix, idxd->id, wq->id); if (rc < 0) - return rc; + goto err; - dev = idxd_cdev->dev; + wq->idxd_cdev = idxd_cdev; cdev_init(cdev, &idxd_cdev_fops); - cdev_set_parent(cdev, &dev->kobj); - rc = cdev_add(cdev, dev->devt, 1); + rc = cdev_device_add(cdev, dev); if (rc) { dev_dbg(&wq->idxd->pdev->dev, "cdev_add failed: %d\n", rc); - idxd_wq_cdev_cleanup(wq, CDEV_FAILED); - return rc; + goto err; } - init_waitqueue_head(&idxd_cdev->err_queue); return 0; + + err: + put_device(dev); + wq->idxd_cdev = NULL; + return rc; } void idxd_wq_del_cdev(struct idxd_wq *wq) { - idxd_wq_cdev_cleanup(wq, CDEV_NORMAL); + struct idxd_cdev *idxd_cdev; + struct idxd_cdev_context *cdev_ctx; + + cdev_ctx = &ictx[wq->idxd->data->type]; + idxd_cdev = wq->idxd_cdev; + wq->idxd_cdev = NULL; + cdev_device_del(&idxd_cdev->cdev, &idxd_cdev->dev); + put_device(&idxd_cdev->dev); } int idxd_cdev_register(void) diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c index 31c819544a22..420b93fe5feb 100644 --- a/drivers/dma/idxd/device.c +++ b/drivers/dma/idxd/device.c @@ -19,7 +19,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand, /* Interrupt control bits */ void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id) { - struct irq_data *data = irq_get_irq_data(idxd->msix_entries[vec_id].vector); + struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector); pci_msi_mask_irq(data); } @@ -36,7 +36,7 @@ void idxd_mask_msix_vectors(struct idxd_device *idxd) void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id) { - struct irq_data *data = irq_get_irq_data(idxd->msix_entries[vec_id].vector); + struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector); pci_msi_unmask_irq(data); } @@ -47,6 +47,7 @@ void idxd_unmask_error_interrupts(struct idxd_device *idxd) genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET); genctrl.softerr_int_en = 1; + genctrl.halt_int_en = 1; iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET); } @@ -56,6 +57,7 @@ void idxd_mask_error_interrupts(struct idxd_device *idxd) genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET); genctrl.softerr_int_en = 0; + genctrl.halt_int_en = 0; iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET); } @@ -144,14 +146,8 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq) if (rc < 0) return rc; - if (idxd->type == IDXD_TYPE_DSA) - align = 32; - else if (idxd->type == IDXD_TYPE_IAX) - align = 64; - else - return -ENODEV; - - wq->compls_size = num_descs * idxd->compl_size + align; + align = idxd->data->align; + wq->compls_size = num_descs * idxd->data->compl_size + align; wq->compls_raw = dma_alloc_coherent(dev, wq->compls_size, &wq->compls_addr_raw, GFP_KERNEL); if (!wq->compls_raw) { @@ -178,16 +174,14 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq) struct idxd_desc *desc = wq->descs[i]; desc->hw = wq->hw_descs[i]; - if (idxd->type == IDXD_TYPE_DSA) + if (idxd->data->type == IDXD_TYPE_DSA) desc->completion = &wq->compls[i]; - else if (idxd->type == IDXD_TYPE_IAX) + else if (idxd->data->type == IDXD_TYPE_IAX) desc->iax_completion = &wq->iax_compls[i]; - desc->compl_dma = wq->compls_addr + idxd->compl_size * i; + desc->compl_dma = wq->compls_addr + idxd->data->compl_size * i; desc->id = i; desc->wq = wq; desc->cpu = -1; - dma_async_tx_descriptor_init(&desc->txd, &wq->dma_chan); - desc->txd.tx_submit = idxd_dma_tx_submit; } return 0; @@ -320,6 +314,19 @@ void idxd_wq_unmap_portal(struct idxd_wq *wq) struct device *dev = &wq->idxd->pdev->dev; devm_iounmap(dev, wq->portal); + wq->portal = NULL; +} + +void idxd_wqs_unmap_portal(struct idxd_device *idxd) +{ + int i; + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = idxd->wqs[i]; + + if (wq->portal) + idxd_wq_unmap_portal(wq); + } } int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid) @@ -392,6 +399,32 @@ void idxd_wq_disable_cleanup(struct idxd_wq *wq) memset(wq->name, 0, WQ_NAME_SIZE); } +static void idxd_wq_ref_release(struct percpu_ref *ref) +{ + struct idxd_wq *wq = container_of(ref, struct idxd_wq, wq_active); + + complete(&wq->wq_dead); +} + +int idxd_wq_init_percpu_ref(struct idxd_wq *wq) +{ + int rc; + + memset(&wq->wq_active, 0, sizeof(wq->wq_active)); + rc = percpu_ref_init(&wq->wq_active, idxd_wq_ref_release, 0, GFP_KERNEL); + if (rc < 0) + return rc; + reinit_completion(&wq->wq_dead); + return 0; +} + +void idxd_wq_quiesce(struct idxd_wq *wq) +{ + percpu_ref_kill(&wq->wq_active); + wait_for_completion(&wq->wq_dead); + percpu_ref_exit(&wq->wq_active); +} + /* Device control bits */ static inline bool idxd_is_enabled(struct idxd_device *idxd) { @@ -432,13 +465,13 @@ int idxd_device_init_reset(struct idxd_device *idxd) memset(&cmd, 0, sizeof(cmd)); cmd.cmd = IDXD_CMD_RESET_DEVICE; dev_dbg(dev, "%s: sending reset for init.\n", __func__); - spin_lock_irqsave(&idxd->dev_lock, flags); + spin_lock_irqsave(&idxd->cmd_lock, flags); iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET); while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) & IDXD_CMDSTS_ACTIVE) cpu_relax(); - spin_unlock_irqrestore(&idxd->dev_lock, flags); + spin_unlock_irqrestore(&idxd->cmd_lock, flags); return 0; } @@ -451,7 +484,8 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand, if (idxd_device_is_halted(idxd)) { dev_warn(&idxd->pdev->dev, "Device is HALTED!\n"); - *status = IDXD_CMDSTS_HW_ERR; + if (status) + *status = IDXD_CMDSTS_HW_ERR; return; } @@ -460,10 +494,10 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand, cmd.operand = operand; cmd.int_req = 1; - spin_lock_irqsave(&idxd->dev_lock, flags); + spin_lock_irqsave(&idxd->cmd_lock, flags); wait_event_lock_irq(idxd->cmd_waitq, !test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags), - idxd->dev_lock); + idxd->cmd_lock); dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n", __func__, cmd_code, operand); @@ -477,9 +511,9 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand, * After command submitted, release lock and go to sleep until * the command completes via interrupt. */ - spin_unlock_irqrestore(&idxd->dev_lock, flags); + spin_unlock_irqrestore(&idxd->cmd_lock, flags); wait_for_completion(&done); - spin_lock_irqsave(&idxd->dev_lock, flags); + spin_lock_irqsave(&idxd->cmd_lock, flags); if (status) { *status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET); idxd->cmd_status = *status & GENMASK(7, 0); @@ -488,7 +522,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand, __clear_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags); /* Wake up other pending commands */ wake_up(&idxd->cmd_waitq); - spin_unlock_irqrestore(&idxd->dev_lock, flags); + spin_unlock_irqrestore(&idxd->cmd_lock, flags); } int idxd_device_enable(struct idxd_device *idxd) @@ -521,7 +555,7 @@ void idxd_device_wqs_clear_state(struct idxd_device *idxd) lockdep_assert_held(&idxd->dev_lock); for (i = 0; i < idxd->max_wqs; i++) { - struct idxd_wq *wq = &idxd->wqs[i]; + struct idxd_wq *wq = idxd->wqs[i]; if (wq->state == IDXD_WQ_ENABLED) { idxd_wq_disable_cleanup(wq); @@ -579,6 +613,77 @@ void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid) dev_dbg(dev, "pasid %d drained\n", pasid); } +int idxd_device_request_int_handle(struct idxd_device *idxd, int idx, int *handle, + enum idxd_interrupt_type irq_type) +{ + struct device *dev = &idxd->pdev->dev; + u32 operand, status; + + if (!(idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE))) + return -EOPNOTSUPP; + + dev_dbg(dev, "get int handle, idx %d\n", idx); + + operand = idx & GENMASK(15, 0); + if (irq_type == IDXD_IRQ_IMS) + operand |= CMD_INT_HANDLE_IMS; + + dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_REQUEST_INT_HANDLE, operand); + + idxd_cmd_exec(idxd, IDXD_CMD_REQUEST_INT_HANDLE, operand, &status); + + if ((status & IDXD_CMDSTS_ERR_MASK) != IDXD_CMDSTS_SUCCESS) { + dev_dbg(dev, "request int handle failed: %#x\n", status); + return -ENXIO; + } + + *handle = (status >> IDXD_CMDSTS_RES_SHIFT) & GENMASK(15, 0); + + dev_dbg(dev, "int handle acquired: %u\n", *handle); + return 0; +} + +int idxd_device_release_int_handle(struct idxd_device *idxd, int handle, + enum idxd_interrupt_type irq_type) +{ + struct device *dev = &idxd->pdev->dev; + u32 operand, status; + union idxd_command_reg cmd; + unsigned long flags; + + if (!(idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE))) + return -EOPNOTSUPP; + + dev_dbg(dev, "release int handle, handle %d\n", handle); + + memset(&cmd, 0, sizeof(cmd)); + operand = handle & GENMASK(15, 0); + + if (irq_type == IDXD_IRQ_IMS) + operand |= CMD_INT_HANDLE_IMS; + + cmd.cmd = IDXD_CMD_RELEASE_INT_HANDLE; + cmd.operand = operand; + + dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_RELEASE_INT_HANDLE, operand); + + spin_lock_irqsave(&idxd->cmd_lock, flags); + iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET); + + while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) & IDXD_CMDSTS_ACTIVE) + cpu_relax(); + status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET); + spin_unlock_irqrestore(&idxd->cmd_lock, flags); + + if ((status & IDXD_CMDSTS_ERR_MASK) != IDXD_CMDSTS_SUCCESS) { + dev_dbg(dev, "release int handle failed: %#x\n", status); + return -ENXIO; + } + + dev_dbg(dev, "int handle released.\n"); + return 0; +} + /* Device configuration bits */ void idxd_msix_perm_setup(struct idxd_device *idxd) { @@ -660,7 +765,7 @@ static int idxd_groups_config_write(struct idxd_device *idxd) ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET)); for (i = 0; i < idxd->max_groups; i++) { - struct idxd_group *group = &idxd->groups[i]; + struct idxd_group *group = idxd->groups[i]; idxd_group_config_write(group); } @@ -739,7 +844,7 @@ static int idxd_wqs_config_write(struct idxd_device *idxd) int i, rc; for (i = 0; i < idxd->max_wqs; i++) { - struct idxd_wq *wq = &idxd->wqs[i]; + struct idxd_wq *wq = idxd->wqs[i]; rc = idxd_wq_config_write(wq); if (rc < 0) @@ -755,7 +860,7 @@ static void idxd_group_flags_setup(struct idxd_device *idxd) /* TC-A 0 and TC-B 1 should be defaults */ for (i = 0; i < idxd->max_groups; i++) { - struct idxd_group *group = &idxd->groups[i]; + struct idxd_group *group = idxd->groups[i]; if (group->tc_a == -1) group->tc_a = group->grpcfg.flags.tc_a = 0; @@ -782,12 +887,12 @@ static int idxd_engines_setup(struct idxd_device *idxd) struct idxd_group *group; for (i = 0; i < idxd->max_groups; i++) { - group = &idxd->groups[i]; + group = idxd->groups[i]; group->grpcfg.engines = 0; } for (i = 0; i < idxd->max_engines; i++) { - eng = &idxd->engines[i]; + eng = idxd->engines[i]; group = eng->group; if (!group) @@ -811,13 +916,13 @@ static int idxd_wqs_setup(struct idxd_device *idxd) struct device *dev = &idxd->pdev->dev; for (i = 0; i < idxd->max_groups; i++) { - group = &idxd->groups[i]; + group = idxd->groups[i]; for (j = 0; j < 4; j++) group->grpcfg.wqs[j] = 0; } for (i = 0; i < idxd->max_wqs; i++) { - wq = &idxd->wqs[i]; + wq = idxd->wqs[i]; group = wq->group; if (!wq->group) @@ -865,3 +970,119 @@ int idxd_device_config(struct idxd_device *idxd) return 0; } + +static int idxd_wq_load_config(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct device *dev = &idxd->pdev->dev; + int wqcfg_offset; + int i; + + wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, 0); + memcpy_fromio(wq->wqcfg, idxd->reg_base + wqcfg_offset, idxd->wqcfg_size); + + wq->size = wq->wqcfg->wq_size; + wq->threshold = wq->wqcfg->wq_thresh; + if (wq->wqcfg->priv) + wq->type = IDXD_WQT_KERNEL; + + /* The driver does not support shared WQ mode in read-only config yet */ + if (wq->wqcfg->mode == 0 || wq->wqcfg->pasid_en) + return -EOPNOTSUPP; + + set_bit(WQ_FLAG_DEDICATED, &wq->flags); + + wq->priority = wq->wqcfg->priority; + + for (i = 0; i < WQCFG_STRIDES(idxd); i++) { + wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, i); + dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n", wq->id, i, wqcfg_offset, wq->wqcfg->bits[i]); + } + + return 0; +} + +static void idxd_group_load_config(struct idxd_group *group) +{ + struct idxd_device *idxd = group->idxd; + struct device *dev = &idxd->pdev->dev; + int i, j, grpcfg_offset; + + /* + * Load WQS bit fields + * Iterate through all 256 bits 64 bits at a time + */ + for (i = 0; i < GRPWQCFG_STRIDES; i++) { + struct idxd_wq *wq; + + grpcfg_offset = GRPWQCFG_OFFSET(idxd, group->id, i); + group->grpcfg.wqs[i] = ioread64(idxd->reg_base + grpcfg_offset); + dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n", + group->id, i, grpcfg_offset, group->grpcfg.wqs[i]); + + if (i * 64 >= idxd->max_wqs) + break; + + /* Iterate through all 64 bits and check for wq set */ + for (j = 0; j < 64; j++) { + int id = i * 64 + j; + + /* No need to check beyond max wqs */ + if (id >= idxd->max_wqs) + break; + + /* Set group assignment for wq if wq bit is set */ + if (group->grpcfg.wqs[i] & BIT(j)) { + wq = idxd->wqs[id]; + wq->group = group; + } + } + } + + grpcfg_offset = GRPENGCFG_OFFSET(idxd, group->id); + group->grpcfg.engines = ioread64(idxd->reg_base + grpcfg_offset); + dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id, + grpcfg_offset, group->grpcfg.engines); + + /* Iterate through all 64 bits to check engines set */ + for (i = 0; i < 64; i++) { + if (i >= idxd->max_engines) + break; + + if (group->grpcfg.engines & BIT(i)) { + struct idxd_engine *engine = idxd->engines[i]; + + engine->group = group; + } + } + + grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id); + group->grpcfg.flags.bits = ioread32(idxd->reg_base + grpcfg_offset); + dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n", + group->id, grpcfg_offset, group->grpcfg.flags.bits); +} + +int idxd_device_load_config(struct idxd_device *idxd) +{ + union gencfg_reg reg; + int i, rc; + + reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET); + idxd->token_limit = reg.token_limit; + + for (i = 0; i < idxd->max_groups; i++) { + struct idxd_group *group = idxd->groups[i]; + + idxd_group_load_config(group); + } + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = idxd->wqs[i]; + + rc = idxd_wq_load_config(wq); + if (rc < 0) + return rc; + } + + return 0; +} diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c index a15e50126434..77439b645044 100644 --- a/drivers/dma/idxd/dma.c +++ b/drivers/dma/idxd/dma.c @@ -14,7 +14,10 @@ static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c) { - return container_of(c, struct idxd_wq, dma_chan); + struct idxd_dma_chan *idxd_chan; + + idxd_chan = container_of(c, struct idxd_dma_chan, chan); + return idxd_chan->wq; } void idxd_dma_complete_txd(struct idxd_desc *desc, @@ -135,7 +138,7 @@ static void idxd_dma_issue_pending(struct dma_chan *dma_chan) { } -dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx) +static dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx) { struct dma_chan *c = tx->chan; struct idxd_wq *wq = to_idxd_wq(c); @@ -156,14 +159,25 @@ dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx) static void idxd_dma_release(struct dma_device *device) { + struct idxd_dma_dev *idxd_dma = container_of(device, struct idxd_dma_dev, dma); + + kfree(idxd_dma); } int idxd_register_dma_device(struct idxd_device *idxd) { - struct dma_device *dma = &idxd->dma_dev; + struct idxd_dma_dev *idxd_dma; + struct dma_device *dma; + struct device *dev = &idxd->pdev->dev; + int rc; + idxd_dma = kzalloc_node(sizeof(*idxd_dma), GFP_KERNEL, dev_to_node(dev)); + if (!idxd_dma) + return -ENOMEM; + + dma = &idxd_dma->dma; INIT_LIST_HEAD(&dma->channels); - dma->dev = &idxd->pdev->dev; + dma->dev = dev; dma_cap_set(DMA_PRIVATE, dma->cap_mask); dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask); @@ -179,35 +193,72 @@ int idxd_register_dma_device(struct idxd_device *idxd) dma->device_alloc_chan_resources = idxd_dma_alloc_chan_resources; dma->device_free_chan_resources = idxd_dma_free_chan_resources; - return dma_async_device_register(&idxd->dma_dev); + rc = dma_async_device_register(dma); + if (rc < 0) { + kfree(idxd_dma); + return rc; + } + + idxd_dma->idxd = idxd; + /* + * This pointer is protected by the refs taken by the dma_chan. It will remain valid + * as long as there are outstanding channels. + */ + idxd->idxd_dma = idxd_dma; + return 0; } void idxd_unregister_dma_device(struct idxd_device *idxd) { - dma_async_device_unregister(&idxd->dma_dev); + dma_async_device_unregister(&idxd->idxd_dma->dma); } int idxd_register_dma_channel(struct idxd_wq *wq) { struct idxd_device *idxd = wq->idxd; - struct dma_device *dma = &idxd->dma_dev; - struct dma_chan *chan = &wq->dma_chan; - int rc; + struct dma_device *dma = &idxd->idxd_dma->dma; + struct device *dev = &idxd->pdev->dev; + struct idxd_dma_chan *idxd_chan; + struct dma_chan *chan; + int rc, i; + + idxd_chan = kzalloc_node(sizeof(*idxd_chan), GFP_KERNEL, dev_to_node(dev)); + if (!idxd_chan) + return -ENOMEM; - memset(&wq->dma_chan, 0, sizeof(struct dma_chan)); + chan = &idxd_chan->chan; chan->device = dma; list_add_tail(&chan->device_node, &dma->channels); + + for (i = 0; i < wq->num_descs; i++) { + struct idxd_desc *desc = wq->descs[i]; + + dma_async_tx_descriptor_init(&desc->txd, chan); + desc->txd.tx_submit = idxd_dma_tx_submit; + } + rc = dma_async_device_channel_register(dma, chan); - if (rc < 0) + if (rc < 0) { + kfree(idxd_chan); return rc; + } + + wq->idxd_chan = idxd_chan; + idxd_chan->wq = wq; + get_device(&wq->conf_dev); return 0; } void idxd_unregister_dma_channel(struct idxd_wq *wq) { - struct dma_chan *chan = &wq->dma_chan; + struct idxd_dma_chan *idxd_chan = wq->idxd_chan; + struct dma_chan *chan = &idxd_chan->chan; + struct idxd_dma_dev *idxd_dma = wq->idxd->idxd_dma; - dma_async_device_channel_unregister(&wq->idxd->dma_dev, chan); + dma_async_device_channel_unregister(&idxd_dma->dma, chan); list_del(&chan->device_node); + kfree(wq->idxd_chan); + wq->idxd_chan = NULL; + put_device(&wq->conf_dev); } diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h index 76014c14f473..26482c7d4c3a 100644 --- a/drivers/dma/idxd/idxd.h +++ b/drivers/dma/idxd/idxd.h @@ -8,12 +8,18 @@ #include <linux/percpu-rwsem.h> #include <linux/wait.h> #include <linux/cdev.h> +#include <linux/idr.h> +#include <linux/pci.h> +#include <linux/perf_event.h> #include "registers.h" #define IDXD_DRIVER_VERSION "1.00" extern struct kmem_cache *idxd_desc_pool; +struct idxd_device; +struct idxd_wq; + #define IDXD_REG_TIMEOUT 50 #define IDXD_DRAIN_TIMEOUT 5000 @@ -25,6 +31,7 @@ enum idxd_type { }; #define IDXD_NAME_SIZE 128 +#define IDXD_PMU_EVENT_MAX 64 struct idxd_device_driver { struct device_driver drv; @@ -33,6 +40,7 @@ struct idxd_device_driver { struct idxd_irq_entry { struct idxd_device *idxd; int id; + int vector; struct llist_head pending_llist; struct list_head work_list; /* @@ -56,6 +64,31 @@ struct idxd_group { int tc_b; }; +struct idxd_pmu { + struct idxd_device *idxd; + + struct perf_event *event_list[IDXD_PMU_EVENT_MAX]; + int n_events; + + DECLARE_BITMAP(used_mask, IDXD_PMU_EVENT_MAX); + + struct pmu pmu; + char name[IDXD_NAME_SIZE]; + int cpu; + + int n_counters; + int counter_width; + int n_event_categories; + + bool per_counter_caps_supported; + unsigned long supported_event_categories; + + unsigned long supported_filters; + int n_filters; + + struct hlist_node cpuhp_node; +}; + #define IDXD_MAX_PRIORITY 0xf enum idxd_wq_state { @@ -75,10 +108,10 @@ enum idxd_wq_type { }; struct idxd_cdev { + struct idxd_wq *wq; struct cdev cdev; - struct device *dev; + struct device dev; int minor; - struct wait_queue_head err_queue; }; #define IDXD_ALLOCATED_BATCH_SIZE 128U @@ -96,10 +129,18 @@ enum idxd_complete_type { IDXD_COMPLETE_DEV_FAIL, }; +struct idxd_dma_chan { + struct dma_chan chan; + struct idxd_wq *wq; +}; + struct idxd_wq { void __iomem *portal; + struct percpu_ref wq_active; + struct completion wq_dead; struct device conf_dev; - struct idxd_cdev idxd_cdev; + struct idxd_cdev *idxd_cdev; + struct wait_queue_head err_queue; struct idxd_device *idxd; int id; enum idxd_wq_type type; @@ -125,7 +166,7 @@ struct idxd_wq { int compls_size; struct idxd_desc **descs; struct sbitmap_queue sbq; - struct dma_chan dma_chan; + struct idxd_dma_chan *idxd_chan; char name[WQ_NAME_SIZE + 1]; u64 max_xfer_bytes; u32 max_batch_size; @@ -147,6 +188,7 @@ struct idxd_hw { union group_cap_reg group_cap; union engine_cap_reg engine_cap; struct opcap opcap; + u32 cmd_cap; }; enum idxd_device_state { @@ -162,9 +204,22 @@ enum idxd_device_flag { IDXD_FLAG_PASID_ENABLED, }; -struct idxd_device { +struct idxd_dma_dev { + struct idxd_device *idxd; + struct dma_device dma; +}; + +struct idxd_driver_data { + const char *name_prefix; enum idxd_type type; + struct device_type *dev_type; + int compl_size; + int align; +}; + +struct idxd_device { struct device conf_dev; + struct idxd_driver_data *data; struct list_head list; struct idxd_hw hw; enum idxd_device_state state; @@ -177,10 +232,11 @@ struct idxd_device { void __iomem *reg_base; spinlock_t dev_lock; /* spinlock for device */ + spinlock_t cmd_lock; /* spinlock for device commands */ struct completion *cmd_done; - struct idxd_group *groups; - struct idxd_wq *wqs; - struct idxd_engine *engines; + struct idxd_group **groups; + struct idxd_wq **wqs; + struct idxd_engine **engines; struct iommu_sva *sva; unsigned int pasid; @@ -202,17 +258,19 @@ struct idxd_device { int token_limit; int nr_tokens; /* non-reserved tokens */ unsigned int wqcfg_size; - int compl_size; union sw_err_reg sw_err; wait_queue_head_t cmd_waitq; - struct msix_entry *msix_entries; int num_wq_irqs; struct idxd_irq_entry *irq_entries; - struct dma_device dma_dev; + struct idxd_dma_dev *idxd_dma; struct workqueue_struct *wq; struct work_struct work; + + int *int_handles; + + struct idxd_pmu *idxd_pmu; }; /* IDXD software descriptor */ @@ -232,6 +290,7 @@ struct idxd_desc { struct list_head list; int id; int cpu; + unsigned int vector; struct idxd_wq *wq; }; @@ -242,6 +301,44 @@ extern struct bus_type dsa_bus_type; extern struct bus_type iax_bus_type; extern bool support_enqcmd; +extern struct ida idxd_ida; +extern struct device_type dsa_device_type; +extern struct device_type iax_device_type; +extern struct device_type idxd_wq_device_type; +extern struct device_type idxd_engine_device_type; +extern struct device_type idxd_group_device_type; + +static inline bool is_dsa_dev(struct device *dev) +{ + return dev->type == &dsa_device_type; +} + +static inline bool is_iax_dev(struct device *dev) +{ + return dev->type == &iax_device_type; +} + +static inline bool is_idxd_dev(struct device *dev) +{ + return is_dsa_dev(dev) || is_iax_dev(dev); +} + +static inline bool is_idxd_wq_dev(struct device *dev) +{ + return dev->type == &idxd_wq_device_type; +} + +static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq) +{ + if (wq->type == IDXD_WQT_KERNEL && strcmp(wq->name, "dmaengine") == 0) + return true; + return false; +} + +static inline bool is_idxd_wq_cdev(struct idxd_wq *wq) +{ + return wq->type == IDXD_WQT_USER; +} static inline bool wq_dedicated(struct idxd_wq *wq) { @@ -268,6 +365,11 @@ enum idxd_portal_prot { IDXD_PORTAL_LIMITED, }; +enum idxd_interrupt_type { + IDXD_IRQ_MSIX = 0, + IDXD_IRQ_IMS, +}; + static inline int idxd_get_wq_portal_offset(enum idxd_portal_prot prot) { return prot * 0x1000; @@ -279,18 +381,6 @@ static inline int idxd_get_wq_portal_full_offset(int wq_id, return ((wq_id * 4) << PAGE_SHIFT) + idxd_get_wq_portal_offset(prot); } -static inline void idxd_set_type(struct idxd_device *idxd) -{ - struct pci_dev *pdev = idxd->pdev; - - if (pdev->device == PCI_DEVICE_ID_INTEL_DSA_SPR0) - idxd->type = IDXD_TYPE_DSA; - else if (pdev->device == PCI_DEVICE_ID_INTEL_IAX_SPR0) - idxd->type = IDXD_TYPE_IAX; - else - idxd->type = IDXD_TYPE_UNKNOWN; -} - static inline void idxd_wq_get(struct idxd_wq *wq) { wq->client_count++; @@ -306,19 +396,17 @@ static inline int idxd_wq_refcount(struct idxd_wq *wq) return wq->client_count; }; -const char *idxd_get_dev_name(struct idxd_device *idxd); int idxd_register_bus_type(void); void idxd_unregister_bus_type(void); -int idxd_setup_sysfs(struct idxd_device *idxd); -void idxd_cleanup_sysfs(struct idxd_device *idxd); +int idxd_register_devices(struct idxd_device *idxd); +void idxd_unregister_devices(struct idxd_device *idxd); int idxd_register_driver(void); void idxd_unregister_driver(void); -struct bus_type *idxd_get_bus_type(struct idxd_device *idxd); +void idxd_wqs_quiesce(struct idxd_device *idxd); /* device interrupt control */ void idxd_msix_perm_setup(struct idxd_device *idxd); void idxd_msix_perm_clear(struct idxd_device *idxd); -irqreturn_t idxd_irq_handler(int vec, void *data); irqreturn_t idxd_misc_thread(int vec, void *data); irqreturn_t idxd_wq_thread(int irq, void *data); void idxd_mask_error_interrupts(struct idxd_device *idxd); @@ -336,8 +424,14 @@ void idxd_device_cleanup(struct idxd_device *idxd); int idxd_device_config(struct idxd_device *idxd); void idxd_device_wqs_clear_state(struct idxd_device *idxd); void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid); +int idxd_device_load_config(struct idxd_device *idxd); +int idxd_device_request_int_handle(struct idxd_device *idxd, int idx, int *handle, + enum idxd_interrupt_type irq_type); +int idxd_device_release_int_handle(struct idxd_device *idxd, int handle, + enum idxd_interrupt_type irq_type); /* work queue control */ +void idxd_wqs_unmap_portal(struct idxd_device *idxd); int idxd_wq_alloc_resources(struct idxd_wq *wq); void idxd_wq_free_resources(struct idxd_wq *wq); int idxd_wq_enable(struct idxd_wq *wq); @@ -349,6 +443,8 @@ void idxd_wq_unmap_portal(struct idxd_wq *wq); void idxd_wq_disable_cleanup(struct idxd_wq *wq); int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid); int idxd_wq_disable_pasid(struct idxd_wq *wq); +void idxd_wq_quiesce(struct idxd_wq *wq); +int idxd_wq_init_percpu_ref(struct idxd_wq *wq); /* submission */ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc); @@ -363,7 +459,6 @@ void idxd_unregister_dma_channel(struct idxd_wq *wq); void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res); void idxd_dma_complete_txd(struct idxd_desc *desc, enum idxd_complete_type comp_type); -dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx); /* cdev */ int idxd_cdev_register(void); @@ -372,4 +467,19 @@ int idxd_cdev_get_major(struct idxd_device *idxd); int idxd_wq_add_cdev(struct idxd_wq *wq); void idxd_wq_del_cdev(struct idxd_wq *wq); +/* perfmon */ +#if IS_ENABLED(CONFIG_INTEL_IDXD_PERFMON) +int perfmon_pmu_init(struct idxd_device *idxd); +void perfmon_pmu_remove(struct idxd_device *idxd); +void perfmon_counter_overflow(struct idxd_device *idxd); +void perfmon_init(void); +void perfmon_exit(void); +#else +static inline int perfmon_pmu_init(struct idxd_device *idxd) { return 0; } +static inline void perfmon_pmu_remove(struct idxd_device *idxd) {} +static inline void perfmon_counter_overflow(struct idxd_device *idxd) {} +static inline void perfmon_init(void) {} +static inline void perfmon_exit(void) {} +#endif + #endif diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c index 6584b0ec07d5..2a926bef87f2 100644 --- a/drivers/dma/idxd/init.c +++ b/drivers/dma/idxd/init.c @@ -21,6 +21,7 @@ #include "../dmaengine.h" #include "registers.h" #include "idxd.h" +#include "perfmon.h" MODULE_VERSION(IDXD_DRIVER_VERSION); MODULE_LICENSE("GPL v2"); @@ -33,35 +34,39 @@ MODULE_PARM_DESC(sva, "Toggle SVA support on/off"); #define DRV_NAME "idxd" bool support_enqcmd; - -static struct idr idxd_idrs[IDXD_TYPE_MAX]; -static DEFINE_MUTEX(idxd_idr_lock); +DEFINE_IDA(idxd_ida); + +static struct idxd_driver_data idxd_driver_data[] = { + [IDXD_TYPE_DSA] = { + .name_prefix = "dsa", + .type = IDXD_TYPE_DSA, + .compl_size = sizeof(struct dsa_completion_record), + .align = 32, + .dev_type = &dsa_device_type, + }, + [IDXD_TYPE_IAX] = { + .name_prefix = "iax", + .type = IDXD_TYPE_IAX, + .compl_size = sizeof(struct iax_completion_record), + .align = 64, + .dev_type = &iax_device_type, + }, +}; static struct pci_device_id idxd_pci_tbl[] = { /* DSA ver 1.0 platforms */ - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_DSA_SPR0) }, + { PCI_DEVICE_DATA(INTEL, DSA_SPR0, &idxd_driver_data[IDXD_TYPE_DSA]) }, /* IAX ver 1.0 platforms */ - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IAX_SPR0) }, + { PCI_DEVICE_DATA(INTEL, IAX_SPR0, &idxd_driver_data[IDXD_TYPE_IAX]) }, { 0, } }; MODULE_DEVICE_TABLE(pci, idxd_pci_tbl); -static char *idxd_name[] = { - "dsa", - "iax" -}; - -const char *idxd_get_dev_name(struct idxd_device *idxd) -{ - return idxd_name[idxd->type]; -} - static int idxd_setup_interrupts(struct idxd_device *idxd) { struct pci_dev *pdev = idxd->pdev; struct device *dev = &pdev->dev; - struct msix_entry *msix; struct idxd_irq_entry *irq_entry; int i, msixcnt; int rc = 0; @@ -69,23 +74,13 @@ static int idxd_setup_interrupts(struct idxd_device *idxd) msixcnt = pci_msix_vec_count(pdev); if (msixcnt < 0) { dev_err(dev, "Not MSI-X interrupt capable.\n"); - goto err_no_irq; + return -ENOSPC; } - idxd->msix_entries = devm_kzalloc(dev, sizeof(struct msix_entry) * - msixcnt, GFP_KERNEL); - if (!idxd->msix_entries) { - rc = -ENOMEM; - goto err_no_irq; - } - - for (i = 0; i < msixcnt; i++) - idxd->msix_entries[i].entry = i; - - rc = pci_enable_msix_exact(pdev, idxd->msix_entries, msixcnt); - if (rc) { - dev_err(dev, "Failed enabling %d MSIX entries.\n", msixcnt); - goto err_no_irq; + rc = pci_alloc_irq_vectors(pdev, msixcnt, msixcnt, PCI_IRQ_MSIX); + if (rc != msixcnt) { + dev_err(dev, "Failed enabling %d MSIX entries: %d\n", msixcnt, rc); + return -ENOSPC; } dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt); @@ -93,119 +88,266 @@ static int idxd_setup_interrupts(struct idxd_device *idxd) * We implement 1 completion list per MSI-X entry except for * entry 0, which is for errors and others. */ - idxd->irq_entries = devm_kcalloc(dev, msixcnt, - sizeof(struct idxd_irq_entry), - GFP_KERNEL); + idxd->irq_entries = kcalloc_node(msixcnt, sizeof(struct idxd_irq_entry), + GFP_KERNEL, dev_to_node(dev)); if (!idxd->irq_entries) { rc = -ENOMEM; - goto err_no_irq; + goto err_irq_entries; } for (i = 0; i < msixcnt; i++) { idxd->irq_entries[i].id = i; idxd->irq_entries[i].idxd = idxd; + idxd->irq_entries[i].vector = pci_irq_vector(pdev, i); spin_lock_init(&idxd->irq_entries[i].list_lock); } - msix = &idxd->msix_entries[0]; irq_entry = &idxd->irq_entries[0]; - rc = devm_request_threaded_irq(dev, msix->vector, idxd_irq_handler, - idxd_misc_thread, 0, "idxd-misc", - irq_entry); + rc = request_threaded_irq(irq_entry->vector, NULL, idxd_misc_thread, + 0, "idxd-misc", irq_entry); if (rc < 0) { dev_err(dev, "Failed to allocate misc interrupt.\n"); - goto err_no_irq; + goto err_misc_irq; } - dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n", - msix->vector); + dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n", irq_entry->vector); /* first MSI-X entry is not for wq interrupts */ idxd->num_wq_irqs = msixcnt - 1; for (i = 1; i < msixcnt; i++) { - msix = &idxd->msix_entries[i]; irq_entry = &idxd->irq_entries[i]; init_llist_head(&idxd->irq_entries[i].pending_llist); INIT_LIST_HEAD(&idxd->irq_entries[i].work_list); - rc = devm_request_threaded_irq(dev, msix->vector, - idxd_irq_handler, - idxd_wq_thread, 0, - "idxd-portal", irq_entry); + rc = request_threaded_irq(irq_entry->vector, NULL, + idxd_wq_thread, 0, "idxd-portal", irq_entry); if (rc < 0) { - dev_err(dev, "Failed to allocate irq %d.\n", - msix->vector); - goto err_no_irq; + dev_err(dev, "Failed to allocate irq %d.\n", irq_entry->vector); + goto err_wq_irqs; + } + + dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n", i, irq_entry->vector); + if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)) { + /* + * The MSIX vector enumeration starts at 1 with vector 0 being the + * misc interrupt that handles non I/O completion events. The + * interrupt handles are for IMS enumeration on guest. The misc + * interrupt vector does not require a handle and therefore we start + * the int_handles at index 0. Since 'i' starts at 1, the first + * int_handles index will be 0. + */ + rc = idxd_device_request_int_handle(idxd, i, &idxd->int_handles[i - 1], + IDXD_IRQ_MSIX); + if (rc < 0) { + free_irq(irq_entry->vector, irq_entry); + goto err_wq_irqs; + } + dev_dbg(dev, "int handle requested: %u\n", idxd->int_handles[i - 1]); } - dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n", - i, msix->vector); } idxd_unmask_error_interrupts(idxd); idxd_msix_perm_setup(idxd); return 0; - err_no_irq: + err_wq_irqs: + while (--i >= 0) { + irq_entry = &idxd->irq_entries[i]; + free_irq(irq_entry->vector, irq_entry); + if (i != 0) + idxd_device_release_int_handle(idxd, + idxd->int_handles[i], IDXD_IRQ_MSIX); + } + err_misc_irq: /* Disable error interrupt generation */ idxd_mask_error_interrupts(idxd); - pci_disable_msix(pdev); + err_irq_entries: + pci_free_irq_vectors(pdev); dev_err(dev, "No usable interrupts\n"); return rc; } -static int idxd_setup_internals(struct idxd_device *idxd) +static int idxd_setup_wqs(struct idxd_device *idxd) { struct device *dev = &idxd->pdev->dev; - int i; + struct idxd_wq *wq; + int i, rc; - init_waitqueue_head(&idxd->cmd_waitq); - idxd->groups = devm_kcalloc(dev, idxd->max_groups, - sizeof(struct idxd_group), GFP_KERNEL); - if (!idxd->groups) - return -ENOMEM; - - for (i = 0; i < idxd->max_groups; i++) { - idxd->groups[i].idxd = idxd; - idxd->groups[i].id = i; - idxd->groups[i].tc_a = -1; - idxd->groups[i].tc_b = -1; - } - - idxd->wqs = devm_kcalloc(dev, idxd->max_wqs, sizeof(struct idxd_wq), - GFP_KERNEL); + idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *), + GFP_KERNEL, dev_to_node(dev)); if (!idxd->wqs) return -ENOMEM; - idxd->engines = devm_kcalloc(dev, idxd->max_engines, - sizeof(struct idxd_engine), GFP_KERNEL); - if (!idxd->engines) - return -ENOMEM; - for (i = 0; i < idxd->max_wqs; i++) { - struct idxd_wq *wq = &idxd->wqs[i]; + wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev)); + if (!wq) { + rc = -ENOMEM; + goto err; + } wq->id = i; wq->idxd = idxd; + device_initialize(&wq->conf_dev); + wq->conf_dev.parent = &idxd->conf_dev; + wq->conf_dev.bus = &dsa_bus_type; + wq->conf_dev.type = &idxd_wq_device_type; + rc = dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id); + if (rc < 0) { + put_device(&wq->conf_dev); + goto err; + } + mutex_init(&wq->wq_lock); - wq->idxd_cdev.minor = -1; + init_waitqueue_head(&wq->err_queue); + init_completion(&wq->wq_dead); wq->max_xfer_bytes = idxd->max_xfer_bytes; wq->max_batch_size = idxd->max_batch_size; - wq->wqcfg = devm_kzalloc(dev, idxd->wqcfg_size, GFP_KERNEL); - if (!wq->wqcfg) - return -ENOMEM; + wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev)); + if (!wq->wqcfg) { + put_device(&wq->conf_dev); + rc = -ENOMEM; + goto err; + } + idxd->wqs[i] = wq; } + return 0; + + err: + while (--i >= 0) + put_device(&idxd->wqs[i]->conf_dev); + return rc; +} + +static int idxd_setup_engines(struct idxd_device *idxd) +{ + struct idxd_engine *engine; + struct device *dev = &idxd->pdev->dev; + int i, rc; + + idxd->engines = kcalloc_node(idxd->max_engines, sizeof(struct idxd_engine *), + GFP_KERNEL, dev_to_node(dev)); + if (!idxd->engines) + return -ENOMEM; + for (i = 0; i < idxd->max_engines; i++) { - idxd->engines[i].idxd = idxd; - idxd->engines[i].id = i; + engine = kzalloc_node(sizeof(*engine), GFP_KERNEL, dev_to_node(dev)); + if (!engine) { + rc = -ENOMEM; + goto err; + } + + engine->id = i; + engine->idxd = idxd; + device_initialize(&engine->conf_dev); + engine->conf_dev.parent = &idxd->conf_dev; + engine->conf_dev.type = &idxd_engine_device_type; + rc = dev_set_name(&engine->conf_dev, "engine%d.%d", idxd->id, engine->id); + if (rc < 0) { + put_device(&engine->conf_dev); + goto err; + } + + idxd->engines[i] = engine; } - idxd->wq = create_workqueue(dev_name(dev)); - if (!idxd->wq) + return 0; + + err: + while (--i >= 0) + put_device(&idxd->engines[i]->conf_dev); + return rc; +} + +static int idxd_setup_groups(struct idxd_device *idxd) +{ + struct device *dev = &idxd->pdev->dev; + struct idxd_group *group; + int i, rc; + + idxd->groups = kcalloc_node(idxd->max_groups, sizeof(struct idxd_group *), + GFP_KERNEL, dev_to_node(dev)); + if (!idxd->groups) return -ENOMEM; + for (i = 0; i < idxd->max_groups; i++) { + group = kzalloc_node(sizeof(*group), GFP_KERNEL, dev_to_node(dev)); + if (!group) { + rc = -ENOMEM; + goto err; + } + + group->id = i; + group->idxd = idxd; + device_initialize(&group->conf_dev); + group->conf_dev.parent = &idxd->conf_dev; + group->conf_dev.bus = &dsa_bus_type; + group->conf_dev.type = &idxd_group_device_type; + rc = dev_set_name(&group->conf_dev, "group%d.%d", idxd->id, group->id); + if (rc < 0) { + put_device(&group->conf_dev); + goto err; + } + + idxd->groups[i] = group; + group->tc_a = -1; + group->tc_b = -1; + } + + return 0; + + err: + while (--i >= 0) + put_device(&idxd->groups[i]->conf_dev); + return rc; +} + +static int idxd_setup_internals(struct idxd_device *idxd) +{ + struct device *dev = &idxd->pdev->dev; + int rc, i; + + init_waitqueue_head(&idxd->cmd_waitq); + + if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)) { + idxd->int_handles = devm_kcalloc(dev, idxd->max_wqs, sizeof(int), GFP_KERNEL); + if (!idxd->int_handles) + return -ENOMEM; + } + + rc = idxd_setup_wqs(idxd); + if (rc < 0) + goto err_wqs; + + rc = idxd_setup_engines(idxd); + if (rc < 0) + goto err_engine; + + rc = idxd_setup_groups(idxd); + if (rc < 0) + goto err_group; + + idxd->wq = create_workqueue(dev_name(dev)); + if (!idxd->wq) { + rc = -ENOMEM; + goto err_wkq_create; + } + return 0; + + err_wkq_create: + for (i = 0; i < idxd->max_groups; i++) + put_device(&idxd->groups[i]->conf_dev); + err_group: + for (i = 0; i < idxd->max_engines; i++) + put_device(&idxd->engines[i]->conf_dev); + err_engine: + for (i = 0; i < idxd->max_wqs; i++) + put_device(&idxd->wqs[i]->conf_dev); + err_wqs: + kfree(idxd->int_handles); + return rc; } static void idxd_read_table_offsets(struct idxd_device *idxd) @@ -233,6 +375,12 @@ static void idxd_read_caps(struct idxd_device *idxd) /* reading generic capabilities */ idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET); dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits); + + if (idxd->hw.gen_cap.cmd_cap) { + idxd->hw.cmd_cap = ioread32(idxd->reg_base + IDXD_CMDCAP_OFFSET); + dev_dbg(dev, "cmd_cap: %#x\n", idxd->hw.cmd_cap); + } + idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift; dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes); idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift; @@ -275,17 +423,34 @@ static void idxd_read_caps(struct idxd_device *idxd) } } -static struct idxd_device *idxd_alloc(struct pci_dev *pdev) +static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data) { struct device *dev = &pdev->dev; struct idxd_device *idxd; + int rc; - idxd = devm_kzalloc(dev, sizeof(struct idxd_device), GFP_KERNEL); + idxd = kzalloc_node(sizeof(*idxd), GFP_KERNEL, dev_to_node(dev)); if (!idxd) return NULL; idxd->pdev = pdev; + idxd->data = data; + idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL); + if (idxd->id < 0) + return NULL; + + device_initialize(&idxd->conf_dev); + idxd->conf_dev.parent = dev; + idxd->conf_dev.bus = &dsa_bus_type; + idxd->conf_dev.type = idxd->data->dev_type; + rc = dev_set_name(&idxd->conf_dev, "%s%d", idxd->data->name_prefix, idxd->id); + if (rc < 0) { + put_device(&idxd->conf_dev); + return NULL; + } + spin_lock_init(&idxd->dev_lock); + spin_lock_init(&idxd->cmd_lock); return idxd; } @@ -338,11 +503,18 @@ static int idxd_probe(struct idxd_device *idxd) dev_dbg(dev, "IDXD reset complete\n"); if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) { - rc = idxd_enable_system_pasid(idxd); - if (rc < 0) - dev_warn(dev, "Failed to enable PASID. No SVA support: %d\n", rc); - else - set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); + rc = iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA); + if (rc == 0) { + rc = idxd_enable_system_pasid(idxd); + if (rc < 0) { + iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); + dev_warn(dev, "Failed to enable PASID. No SVA support: %d\n", rc); + } else { + set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); + } + } else { + dev_warn(dev, "Unable to turn on SVA feature.\n"); + } } else if (!sva) { dev_warn(dev, "User forced SVA off via module param.\n"); } @@ -352,80 +524,75 @@ static int idxd_probe(struct idxd_device *idxd) rc = idxd_setup_internals(idxd); if (rc) - goto err_setup; + goto err; + + /* If the configs are readonly, then load them from device */ + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) { + dev_dbg(dev, "Loading RO device config\n"); + rc = idxd_device_load_config(idxd); + if (rc < 0) + goto err; + } rc = idxd_setup_interrupts(idxd); if (rc) - goto err_setup; + goto err; dev_dbg(dev, "IDXD interrupt setup complete.\n"); - mutex_lock(&idxd_idr_lock); - idxd->id = idr_alloc(&idxd_idrs[idxd->type], idxd, 0, 0, GFP_KERNEL); - mutex_unlock(&idxd_idr_lock); - if (idxd->id < 0) { - rc = -ENOMEM; - goto err_idr_fail; - } - idxd->major = idxd_cdev_get_major(idxd); + rc = perfmon_pmu_init(idxd); + if (rc < 0) + dev_warn(dev, "Failed to initialize perfmon. No PMU support: %d\n", rc); + dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id); return 0; - err_idr_fail: - idxd_mask_error_interrupts(idxd); - idxd_mask_msix_vectors(idxd); - err_setup: + err: if (device_pasid_enabled(idxd)) idxd_disable_system_pasid(idxd); + iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); return rc; } -static void idxd_type_init(struct idxd_device *idxd) -{ - if (idxd->type == IDXD_TYPE_DSA) - idxd->compl_size = sizeof(struct dsa_completion_record); - else if (idxd->type == IDXD_TYPE_IAX) - idxd->compl_size = sizeof(struct iax_completion_record); -} - static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct device *dev = &pdev->dev; struct idxd_device *idxd; + struct idxd_driver_data *data = (struct idxd_driver_data *)id->driver_data; int rc; - rc = pcim_enable_device(pdev); + rc = pci_enable_device(pdev); if (rc) return rc; dev_dbg(dev, "Alloc IDXD context\n"); - idxd = idxd_alloc(pdev); - if (!idxd) - return -ENOMEM; + idxd = idxd_alloc(pdev, data); + if (!idxd) { + rc = -ENOMEM; + goto err_idxd_alloc; + } dev_dbg(dev, "Mapping BARs\n"); - idxd->reg_base = pcim_iomap(pdev, IDXD_MMIO_BAR, 0); - if (!idxd->reg_base) - return -ENOMEM; + idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0); + if (!idxd->reg_base) { + rc = -ENOMEM; + goto err_iomap; + } dev_dbg(dev, "Set DMA masks\n"); rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); if (rc) rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) - return rc; + goto err; rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (rc) rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) - return rc; - - idxd_set_type(idxd); - - idxd_type_init(idxd); + goto err; dev_dbg(dev, "Set PCI master\n"); pci_set_master(pdev); @@ -435,13 +602,13 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) rc = idxd_probe(idxd); if (rc) { dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n"); - return -ENODEV; + goto err; } - rc = idxd_setup_sysfs(idxd); + rc = idxd_register_devices(idxd); if (rc) { dev_err(dev, "IDXD sysfs setup failed\n"); - return -ENODEV; + goto err; } idxd->state = IDXD_DEV_CONF_READY; @@ -450,6 +617,14 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) idxd->hw.version); return 0; + + err: + pci_iounmap(pdev, idxd->reg_base); + err_iomap: + put_device(&idxd->conf_dev); + err_idxd_alloc: + pci_disable_device(pdev); + return rc; } static void idxd_flush_pending_llist(struct idxd_irq_entry *ie) @@ -478,6 +653,36 @@ static void idxd_flush_work_list(struct idxd_irq_entry *ie) } } +void idxd_wqs_quiesce(struct idxd_device *idxd) +{ + struct idxd_wq *wq; + int i; + + for (i = 0; i < idxd->max_wqs; i++) { + wq = idxd->wqs[i]; + if (wq->state == IDXD_WQ_ENABLED && wq->type == IDXD_WQT_KERNEL) + idxd_wq_quiesce(wq); + } +} + +static void idxd_release_int_handles(struct idxd_device *idxd) +{ + struct device *dev = &idxd->pdev->dev; + int i, rc; + + for (i = 0; i < idxd->num_wq_irqs; i++) { + if (idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE)) { + rc = idxd_device_release_int_handle(idxd, idxd->int_handles[i], + IDXD_IRQ_MSIX); + if (rc < 0) + dev_warn(dev, "irq handle %d release failed\n", + idxd->int_handles[i]); + else + dev_dbg(dev, "int handle requested: %u\n", idxd->int_handles[i]); + } + } +} + static void idxd_shutdown(struct pci_dev *pdev) { struct idxd_device *idxd = pci_get_drvdata(pdev); @@ -495,7 +700,8 @@ static void idxd_shutdown(struct pci_dev *pdev) for (i = 0; i < msixcnt; i++) { irq_entry = &idxd->irq_entries[i]; - synchronize_irq(idxd->msix_entries[i].vector); + synchronize_irq(irq_entry->vector); + free_irq(irq_entry->vector, irq_entry); if (i == 0) continue; idxd_flush_pending_llist(irq_entry); @@ -503,6 +709,10 @@ static void idxd_shutdown(struct pci_dev *pdev) } idxd_msix_perm_clear(idxd); + idxd_release_int_handles(idxd); + pci_free_irq_vectors(pdev); + pci_iounmap(pdev, idxd->reg_base); + pci_disable_device(pdev); destroy_workqueue(idxd->wq); } @@ -511,13 +721,12 @@ static void idxd_remove(struct pci_dev *pdev) struct idxd_device *idxd = pci_get_drvdata(pdev); dev_dbg(&pdev->dev, "%s called\n", __func__); - idxd_cleanup_sysfs(idxd); idxd_shutdown(pdev); if (device_pasid_enabled(idxd)) idxd_disable_system_pasid(idxd); - mutex_lock(&idxd_idr_lock); - idr_remove(&idxd_idrs[idxd->type], idxd->id); - mutex_unlock(&idxd_idr_lock); + idxd_unregister_devices(idxd); + perfmon_pmu_remove(idxd); + iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA); } static struct pci_driver idxd_pci_driver = { @@ -530,7 +739,7 @@ static struct pci_driver idxd_pci_driver = { static int __init idxd_init_module(void) { - int err, i; + int err; /* * If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in @@ -546,8 +755,7 @@ static int __init idxd_init_module(void) else support_enqcmd = true; - for (i = 0; i < IDXD_TYPE_MAX; i++) - idr_init(&idxd_idrs[i]); + perfmon_init(); err = idxd_register_bus_type(); if (err < 0) @@ -582,5 +790,6 @@ static void __exit idxd_exit_module(void) pci_unregister_driver(&idxd_pci_driver); idxd_cdev_remove(); idxd_unregister_bus_type(); + perfmon_exit(); } module_exit(idxd_exit_module); diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c index f1463fc58112..ae68e1e5487a 100644 --- a/drivers/dma/idxd/irq.c +++ b/drivers/dma/idxd/irq.c @@ -45,7 +45,7 @@ static void idxd_device_reinit(struct work_struct *work) goto out; for (i = 0; i < idxd->max_wqs; i++) { - struct idxd_wq *wq = &idxd->wqs[i]; + struct idxd_wq *wq = idxd->wqs[i]; if (wq->state == IDXD_WQ_ENABLED) { rc = idxd_wq_enable(wq); @@ -102,15 +102,6 @@ static int idxd_device_schedule_fault_process(struct idxd_device *idxd, return 0; } -irqreturn_t idxd_irq_handler(int vec, void *data) -{ - struct idxd_irq_entry *irq_entry = data; - struct idxd_device *idxd = irq_entry->idxd; - - idxd_mask_msix_vector(idxd, irq_entry->id); - return IRQ_WAKE_THREAD; -} - static int process_misc_interrupts(struct idxd_device *idxd, u32 cause) { struct device *dev = &idxd->pdev->dev; @@ -130,18 +121,18 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause) if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) { int id = idxd->sw_err.wq_idx; - struct idxd_wq *wq = &idxd->wqs[id]; + struct idxd_wq *wq = idxd->wqs[id]; if (wq->type == IDXD_WQT_USER) - wake_up_interruptible(&wq->idxd_cdev.err_queue); + wake_up_interruptible(&wq->err_queue); } else { int i; for (i = 0; i < idxd->max_wqs; i++) { - struct idxd_wq *wq = &idxd->wqs[i]; + struct idxd_wq *wq = idxd->wqs[i]; if (wq->type == IDXD_WQT_USER) - wake_up_interruptible(&wq->idxd_cdev.err_queue); + wake_up_interruptible(&wq->err_queue); } } @@ -165,11 +156,8 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause) } if (cause & IDXD_INTC_PERFMON_OVFL) { - /* - * Driver does not utilize perfmon counter overflow interrupt - * yet. - */ val |= IDXD_INTC_PERFMON_OVFL; + perfmon_counter_overflow(idxd); } val ^= cause; @@ -202,6 +190,8 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause) queue_work(idxd->wq, &idxd->work); } else { spin_lock_bh(&idxd->dev_lock); + idxd_wqs_quiesce(idxd); + idxd_wqs_unmap_portal(idxd); idxd_device_wqs_clear_state(idxd); dev_err(&idxd->pdev->dev, "idxd halted, need %s.\n", @@ -235,7 +225,6 @@ irqreturn_t idxd_misc_thread(int vec, void *data) iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET); } - idxd_unmask_msix_vector(idxd, irq_entry->id); return IRQ_HANDLED; } @@ -392,8 +381,6 @@ irqreturn_t idxd_wq_thread(int irq, void *data) int processed; processed = idxd_desc_process(irq_entry); - idxd_unmask_msix_vector(irq_entry->idxd, irq_entry->id); - if (processed == 0) return IRQ_NONE; diff --git a/drivers/dma/idxd/perfmon.c b/drivers/dma/idxd/perfmon.c new file mode 100644 index 000000000000..d73004f47cf4 --- /dev/null +++ b/drivers/dma/idxd/perfmon.c @@ -0,0 +1,662 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 Intel Corporation. All rights rsvd. */ + +#include <linux/sched/task.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include "idxd.h" +#include "perfmon.h" + +static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr, + char *buf); + +static cpumask_t perfmon_dsa_cpu_mask; +static bool cpuhp_set_up; +static enum cpuhp_state cpuhp_slot; + +/* + * perf userspace reads this attribute to determine which cpus to open + * counters on. It's connected to perfmon_dsa_cpu_mask, which is + * maintained by the cpu hotplug handlers. + */ +static DEVICE_ATTR_RO(cpumask); + +static struct attribute *perfmon_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static struct attribute_group cpumask_attr_group = { + .attrs = perfmon_cpumask_attrs, +}; + +/* + * These attributes specify the bits in the config word that the perf + * syscall uses to pass the event ids and categories to perfmon. + */ +DEFINE_PERFMON_FORMAT_ATTR(event_category, "config:0-3"); +DEFINE_PERFMON_FORMAT_ATTR(event, "config:4-31"); + +/* + * These attributes specify the bits in the config1 word that the perf + * syscall uses to pass filter data to perfmon. + */ +DEFINE_PERFMON_FORMAT_ATTR(filter_wq, "config1:0-31"); +DEFINE_PERFMON_FORMAT_ATTR(filter_tc, "config1:32-39"); +DEFINE_PERFMON_FORMAT_ATTR(filter_pgsz, "config1:40-43"); +DEFINE_PERFMON_FORMAT_ATTR(filter_sz, "config1:44-51"); +DEFINE_PERFMON_FORMAT_ATTR(filter_eng, "config1:52-59"); + +#define PERFMON_FILTERS_START 2 +#define PERFMON_FILTERS_MAX 5 + +static struct attribute *perfmon_format_attrs[] = { + &format_attr_idxd_event_category.attr, + &format_attr_idxd_event.attr, + &format_attr_idxd_filter_wq.attr, + &format_attr_idxd_filter_tc.attr, + &format_attr_idxd_filter_pgsz.attr, + &format_attr_idxd_filter_sz.attr, + &format_attr_idxd_filter_eng.attr, + NULL, +}; + +static struct attribute_group perfmon_format_attr_group = { + .name = "format", + .attrs = perfmon_format_attrs, +}; + +static const struct attribute_group *perfmon_attr_groups[] = { + &perfmon_format_attr_group, + &cpumask_attr_group, + NULL, +}; + +static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return cpumap_print_to_pagebuf(true, buf, &perfmon_dsa_cpu_mask); +} + +static bool is_idxd_event(struct idxd_pmu *idxd_pmu, struct perf_event *event) +{ + return &idxd_pmu->pmu == event->pmu; +} + +static int perfmon_collect_events(struct idxd_pmu *idxd_pmu, + struct perf_event *leader, + bool do_grp) +{ + struct perf_event *event; + int n, max_count; + + max_count = idxd_pmu->n_counters; + n = idxd_pmu->n_events; + + if (n >= max_count) + return -EINVAL; + + if (is_idxd_event(idxd_pmu, leader)) { + idxd_pmu->event_list[n] = leader; + idxd_pmu->event_list[n]->hw.idx = n; + n++; + } + + if (!do_grp) + return n; + + for_each_sibling_event(event, leader) { + if (!is_idxd_event(idxd_pmu, event) || + event->state <= PERF_EVENT_STATE_OFF) + continue; + + if (n >= max_count) + return -EINVAL; + + idxd_pmu->event_list[n] = event; + idxd_pmu->event_list[n]->hw.idx = n; + n++; + } + + return n; +} + +static void perfmon_assign_hw_event(struct idxd_pmu *idxd_pmu, + struct perf_event *event, int idx) +{ + struct idxd_device *idxd = idxd_pmu->idxd; + struct hw_perf_event *hwc = &event->hw; + + hwc->idx = idx; + hwc->config_base = ioread64(CNTRCFG_REG(idxd, idx)); + hwc->event_base = ioread64(CNTRCFG_REG(idxd, idx)); +} + +static int perfmon_assign_event(struct idxd_pmu *idxd_pmu, + struct perf_event *event) +{ + int i; + + for (i = 0; i < IDXD_PMU_EVENT_MAX; i++) + if (!test_and_set_bit(i, idxd_pmu->used_mask)) + return i; + + return -EINVAL; +} + +/* + * Check whether there are enough counters to satisfy that all the + * events in the group can actually be scheduled at the same time. + * + * To do this, create a fake idxd_pmu object so the event collection + * and assignment functions can be used without affecting the internal + * state of the real idxd_pmu object. + */ +static int perfmon_validate_group(struct idxd_pmu *pmu, + struct perf_event *event) +{ + struct perf_event *leader = event->group_leader; + struct idxd_pmu *fake_pmu; + int i, ret = 0, n, idx; + + fake_pmu = kzalloc(sizeof(*fake_pmu), GFP_KERNEL); + if (!fake_pmu) + return -ENOMEM; + + fake_pmu->pmu.name = pmu->pmu.name; + fake_pmu->n_counters = pmu->n_counters; + + n = perfmon_collect_events(fake_pmu, leader, true); + if (n < 0) { + ret = n; + goto out; + } + + fake_pmu->n_events = n; + n = perfmon_collect_events(fake_pmu, event, false); + if (n < 0) { + ret = n; + goto out; + } + + fake_pmu->n_events = n; + + for (i = 0; i < n; i++) { + event = fake_pmu->event_list[i]; + + idx = perfmon_assign_event(fake_pmu, event); + if (idx < 0) { + ret = idx; + goto out; + } + } +out: + kfree(fake_pmu); + + return ret; +} + +static int perfmon_pmu_event_init(struct perf_event *event) +{ + struct idxd_device *idxd; + int ret = 0; + + idxd = event_to_idxd(event); + event->hw.idx = -1; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + /* sampling not supported */ + if (event->attr.sample_period) + return -EINVAL; + + if (event->cpu < 0) + return -EINVAL; + + if (event->pmu != &idxd->idxd_pmu->pmu) + return -EINVAL; + + event->hw.event_base = ioread64(PERFMON_TABLE_OFFSET(idxd)); + event->cpu = idxd->idxd_pmu->cpu; + event->hw.config = event->attr.config; + + if (event->group_leader != event) + /* non-group events have themselves as leader */ + ret = perfmon_validate_group(idxd->idxd_pmu, event); + + return ret; +} + +static inline u64 perfmon_pmu_read_counter(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct idxd_device *idxd; + int cntr = hwc->idx; + + idxd = event_to_idxd(event); + + return ioread64(CNTRDATA_REG(idxd, cntr)); +} + +static void perfmon_pmu_event_update(struct perf_event *event) +{ + struct idxd_device *idxd = event_to_idxd(event); + u64 prev_raw_count, new_raw_count, delta, p, n; + int shift = 64 - idxd->idxd_pmu->counter_width; + struct hw_perf_event *hwc = &event->hw; + + do { + prev_raw_count = local64_read(&hwc->prev_count); + new_raw_count = perfmon_pmu_read_counter(event); + } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count); + + n = (new_raw_count << shift); + p = (prev_raw_count << shift); + + delta = ((n - p) >> shift); + + local64_add(delta, &event->count); +} + +void perfmon_counter_overflow(struct idxd_device *idxd) +{ + int i, n_counters, max_loop = OVERFLOW_SIZE; + struct perf_event *event; + unsigned long ovfstatus; + + n_counters = min(idxd->idxd_pmu->n_counters, OVERFLOW_SIZE); + + ovfstatus = ioread32(OVFSTATUS_REG(idxd)); + + /* + * While updating overflowed counters, other counters behind + * them could overflow and be missed in a given pass. + * Normally this could happen at most n_counters times, but in + * theory a tiny counter width could result in continual + * overflows and endless looping. max_loop provides a + * failsafe in that highly unlikely case. + */ + while (ovfstatus && max_loop--) { + /* Figure out which counter(s) overflowed */ + for_each_set_bit(i, &ovfstatus, n_counters) { + unsigned long ovfstatus_clear = 0; + + /* Update event->count for overflowed counter */ + event = idxd->idxd_pmu->event_list[i]; + perfmon_pmu_event_update(event); + /* Writing 1 to OVFSTATUS bit clears it */ + set_bit(i, &ovfstatus_clear); + iowrite32(ovfstatus_clear, OVFSTATUS_REG(idxd)); + } + + ovfstatus = ioread32(OVFSTATUS_REG(idxd)); + } + + /* + * Should never happen. If so, it means a counter(s) looped + * around twice while this handler was running. + */ + WARN_ON_ONCE(ovfstatus); +} + +static inline void perfmon_reset_config(struct idxd_device *idxd) +{ + iowrite32(CONFIG_RESET, PERFRST_REG(idxd)); + iowrite32(0, OVFSTATUS_REG(idxd)); + iowrite32(0, PERFFRZ_REG(idxd)); +} + +static inline void perfmon_reset_counters(struct idxd_device *idxd) +{ + iowrite32(CNTR_RESET, PERFRST_REG(idxd)); +} + +static inline void perfmon_reset(struct idxd_device *idxd) +{ + perfmon_reset_config(idxd); + perfmon_reset_counters(idxd); +} + +static void perfmon_pmu_event_start(struct perf_event *event, int mode) +{ + u32 flt_wq, flt_tc, flt_pg_sz, flt_xfer_sz, flt_eng = 0; + u64 cntr_cfg, cntrdata, event_enc, event_cat = 0; + struct hw_perf_event *hwc = &event->hw; + union filter_cfg flt_cfg; + union event_cfg event_cfg; + struct idxd_device *idxd; + int cntr; + + idxd = event_to_idxd(event); + + event->hw.idx = hwc->idx; + cntr = hwc->idx; + + /* Obtain event category and event value from user space */ + event_cfg.val = event->attr.config; + flt_cfg.val = event->attr.config1; + event_cat = event_cfg.event_cat; + event_enc = event_cfg.event_enc; + + /* Obtain filter configuration from user space */ + flt_wq = flt_cfg.wq; + flt_tc = flt_cfg.tc; + flt_pg_sz = flt_cfg.pg_sz; + flt_xfer_sz = flt_cfg.xfer_sz; + flt_eng = flt_cfg.eng; + + if (flt_wq && test_bit(FLT_WQ, &idxd->idxd_pmu->supported_filters)) + iowrite32(flt_wq, FLTCFG_REG(idxd, cntr, FLT_WQ)); + if (flt_tc && test_bit(FLT_TC, &idxd->idxd_pmu->supported_filters)) + iowrite32(flt_tc, FLTCFG_REG(idxd, cntr, FLT_TC)); + if (flt_pg_sz && test_bit(FLT_PG_SZ, &idxd->idxd_pmu->supported_filters)) + iowrite32(flt_pg_sz, FLTCFG_REG(idxd, cntr, FLT_PG_SZ)); + if (flt_xfer_sz && test_bit(FLT_XFER_SZ, &idxd->idxd_pmu->supported_filters)) + iowrite32(flt_xfer_sz, FLTCFG_REG(idxd, cntr, FLT_XFER_SZ)); + if (flt_eng && test_bit(FLT_ENG, &idxd->idxd_pmu->supported_filters)) + iowrite32(flt_eng, FLTCFG_REG(idxd, cntr, FLT_ENG)); + + /* Read the start value */ + cntrdata = ioread64(CNTRDATA_REG(idxd, cntr)); + local64_set(&event->hw.prev_count, cntrdata); + + /* Set counter to event/category */ + cntr_cfg = event_cat << CNTRCFG_CATEGORY_SHIFT; + cntr_cfg |= event_enc << CNTRCFG_EVENT_SHIFT; + /* Set interrupt on overflow and counter enable bits */ + cntr_cfg |= (CNTRCFG_IRQ_OVERFLOW | CNTRCFG_ENABLE); + + iowrite64(cntr_cfg, CNTRCFG_REG(idxd, cntr)); +} + +static void perfmon_pmu_event_stop(struct perf_event *event, int mode) +{ + struct hw_perf_event *hwc = &event->hw; + struct idxd_device *idxd; + int i, cntr = hwc->idx; + u64 cntr_cfg; + + idxd = event_to_idxd(event); + + /* remove this event from event list */ + for (i = 0; i < idxd->idxd_pmu->n_events; i++) { + if (event != idxd->idxd_pmu->event_list[i]) + continue; + + for (++i; i < idxd->idxd_pmu->n_events; i++) + idxd->idxd_pmu->event_list[i - 1] = idxd->idxd_pmu->event_list[i]; + --idxd->idxd_pmu->n_events; + break; + } + + cntr_cfg = ioread64(CNTRCFG_REG(idxd, cntr)); + cntr_cfg &= ~CNTRCFG_ENABLE; + iowrite64(cntr_cfg, CNTRCFG_REG(idxd, cntr)); + + if (mode == PERF_EF_UPDATE) + perfmon_pmu_event_update(event); + + event->hw.idx = -1; + clear_bit(cntr, idxd->idxd_pmu->used_mask); +} + +static void perfmon_pmu_event_del(struct perf_event *event, int mode) +{ + perfmon_pmu_event_stop(event, PERF_EF_UPDATE); +} + +static int perfmon_pmu_event_add(struct perf_event *event, int flags) +{ + struct idxd_device *idxd = event_to_idxd(event); + struct idxd_pmu *idxd_pmu = idxd->idxd_pmu; + struct hw_perf_event *hwc = &event->hw; + int idx, n; + + n = perfmon_collect_events(idxd_pmu, event, false); + if (n < 0) + return n; + + hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + if (!(flags & PERF_EF_START)) + hwc->state |= PERF_HES_ARCH; + + idx = perfmon_assign_event(idxd_pmu, event); + if (idx < 0) + return idx; + + perfmon_assign_hw_event(idxd_pmu, event, idx); + + if (flags & PERF_EF_START) + perfmon_pmu_event_start(event, 0); + + idxd_pmu->n_events = n; + + return 0; +} + +static void enable_perfmon_pmu(struct idxd_device *idxd) +{ + iowrite32(COUNTER_UNFREEZE, PERFFRZ_REG(idxd)); +} + +static void disable_perfmon_pmu(struct idxd_device *idxd) +{ + iowrite32(COUNTER_FREEZE, PERFFRZ_REG(idxd)); +} + +static void perfmon_pmu_enable(struct pmu *pmu) +{ + struct idxd_device *idxd = pmu_to_idxd(pmu); + + enable_perfmon_pmu(idxd); +} + +static void perfmon_pmu_disable(struct pmu *pmu) +{ + struct idxd_device *idxd = pmu_to_idxd(pmu); + + disable_perfmon_pmu(idxd); +} + +static void skip_filter(int i) +{ + int j; + + for (j = i; j < PERFMON_FILTERS_MAX; j++) + perfmon_format_attrs[PERFMON_FILTERS_START + j] = + perfmon_format_attrs[PERFMON_FILTERS_START + j + 1]; +} + +static void idxd_pmu_init(struct idxd_pmu *idxd_pmu) +{ + int i; + + for (i = 0 ; i < PERFMON_FILTERS_MAX; i++) { + if (!test_bit(i, &idxd_pmu->supported_filters)) + skip_filter(i); + } + + idxd_pmu->pmu.name = idxd_pmu->name; + idxd_pmu->pmu.attr_groups = perfmon_attr_groups; + idxd_pmu->pmu.task_ctx_nr = perf_invalid_context; + idxd_pmu->pmu.event_init = perfmon_pmu_event_init; + idxd_pmu->pmu.pmu_enable = perfmon_pmu_enable, + idxd_pmu->pmu.pmu_disable = perfmon_pmu_disable, + idxd_pmu->pmu.add = perfmon_pmu_event_add; + idxd_pmu->pmu.del = perfmon_pmu_event_del; + idxd_pmu->pmu.start = perfmon_pmu_event_start; + idxd_pmu->pmu.stop = perfmon_pmu_event_stop; + idxd_pmu->pmu.read = perfmon_pmu_event_update; + idxd_pmu->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE; + idxd_pmu->pmu.module = THIS_MODULE; +} + +void perfmon_pmu_remove(struct idxd_device *idxd) +{ + if (!idxd->idxd_pmu) + return; + + cpuhp_state_remove_instance(cpuhp_slot, &idxd->idxd_pmu->cpuhp_node); + perf_pmu_unregister(&idxd->idxd_pmu->pmu); + kfree(idxd->idxd_pmu); + idxd->idxd_pmu = NULL; +} + +static int perf_event_cpu_online(unsigned int cpu, struct hlist_node *node) +{ + struct idxd_pmu *idxd_pmu; + + idxd_pmu = hlist_entry_safe(node, typeof(*idxd_pmu), cpuhp_node); + + /* select the first online CPU as the designated reader */ + if (cpumask_empty(&perfmon_dsa_cpu_mask)) { + cpumask_set_cpu(cpu, &perfmon_dsa_cpu_mask); + idxd_pmu->cpu = cpu; + } + + return 0; +} + +static int perf_event_cpu_offline(unsigned int cpu, struct hlist_node *node) +{ + struct idxd_pmu *idxd_pmu; + unsigned int target; + + idxd_pmu = hlist_entry_safe(node, typeof(*idxd_pmu), cpuhp_node); + + if (!cpumask_test_and_clear_cpu(cpu, &perfmon_dsa_cpu_mask)) + return 0; + + target = cpumask_any_but(cpu_online_mask, cpu); + + /* migrate events if there is a valid target */ + if (target < nr_cpu_ids) + cpumask_set_cpu(target, &perfmon_dsa_cpu_mask); + else + target = -1; + + perf_pmu_migrate_context(&idxd_pmu->pmu, cpu, target); + + return 0; +} + +int perfmon_pmu_init(struct idxd_device *idxd) +{ + union idxd_perfcap perfcap; + struct idxd_pmu *idxd_pmu; + int rc = -ENODEV; + + /* + * perfmon module initialization failed, nothing to do + */ + if (!cpuhp_set_up) + return -ENODEV; + + /* + * If perfmon_offset or num_counters is 0, it means perfmon is + * not supported on this hardware. + */ + if (idxd->perfmon_offset == 0) + return -ENODEV; + + idxd_pmu = kzalloc(sizeof(*idxd_pmu), GFP_KERNEL); + if (!idxd_pmu) + return -ENOMEM; + + idxd_pmu->idxd = idxd; + idxd->idxd_pmu = idxd_pmu; + + if (idxd->data->type == IDXD_TYPE_DSA) { + rc = sprintf(idxd_pmu->name, "dsa%d", idxd->id); + if (rc < 0) + goto free; + } else if (idxd->data->type == IDXD_TYPE_IAX) { + rc = sprintf(idxd_pmu->name, "iax%d", idxd->id); + if (rc < 0) + goto free; + } else { + goto free; + } + + perfmon_reset(idxd); + + perfcap.bits = ioread64(PERFCAP_REG(idxd)); + + /* + * If total perf counter is 0, stop further registration. + * This is necessary in order to support driver running on + * guest which does not have pmon support. + */ + if (perfcap.num_perf_counter == 0) + goto free; + + /* A counter width of 0 means it can't count */ + if (perfcap.counter_width == 0) + goto free; + + /* Overflow interrupt and counter freeze support must be available */ + if (!perfcap.overflow_interrupt || !perfcap.counter_freeze) + goto free; + + /* Number of event categories cannot be 0 */ + if (perfcap.num_event_category == 0) + goto free; + + /* + * We don't support per-counter capabilities for now. + */ + if (perfcap.cap_per_counter) + goto free; + + idxd_pmu->n_event_categories = perfcap.num_event_category; + idxd_pmu->supported_event_categories = perfcap.global_event_category; + idxd_pmu->per_counter_caps_supported = perfcap.cap_per_counter; + + /* check filter capability. If 0, then filters are not supported */ + idxd_pmu->supported_filters = perfcap.filter; + if (perfcap.filter) + idxd_pmu->n_filters = hweight8(perfcap.filter); + + /* Store the total number of counters categories, and counter width */ + idxd_pmu->n_counters = perfcap.num_perf_counter; + idxd_pmu->counter_width = perfcap.counter_width; + + idxd_pmu_init(idxd_pmu); + + rc = perf_pmu_register(&idxd_pmu->pmu, idxd_pmu->name, -1); + if (rc) + goto free; + + rc = cpuhp_state_add_instance(cpuhp_slot, &idxd_pmu->cpuhp_node); + if (rc) { + perf_pmu_unregister(&idxd->idxd_pmu->pmu); + goto free; + } +out: + return rc; +free: + kfree(idxd_pmu); + idxd->idxd_pmu = NULL; + + goto out; +} + +void __init perfmon_init(void) +{ + int rc = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, + "driver/dma/idxd/perf:online", + perf_event_cpu_online, + perf_event_cpu_offline); + if (WARN_ON(rc < 0)) + return; + + cpuhp_slot = rc; + cpuhp_set_up = true; +} + +void __exit perfmon_exit(void) +{ + if (cpuhp_set_up) + cpuhp_remove_multi_state(cpuhp_slot); +} diff --git a/drivers/dma/idxd/perfmon.h b/drivers/dma/idxd/perfmon.h new file mode 100644 index 000000000000..9a081a1bc605 --- /dev/null +++ b/drivers/dma/idxd/perfmon.h @@ -0,0 +1,119 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 Intel Corporation. All rights rsvd. */ + +#ifndef _PERFMON_H_ +#define _PERFMON_H_ + +#include <linux/slab.h> +#include <linux/pci.h> +#include <linux/sbitmap.h> +#include <linux/dmaengine.h> +#include <linux/percpu-rwsem.h> +#include <linux/wait.h> +#include <linux/cdev.h> +#include <linux/uuid.h> +#include <linux/idxd.h> +#include <linux/perf_event.h> +#include "registers.h" + +static inline struct idxd_pmu *event_to_pmu(struct perf_event *event) +{ + struct idxd_pmu *idxd_pmu; + struct pmu *pmu; + + pmu = event->pmu; + idxd_pmu = container_of(pmu, struct idxd_pmu, pmu); + + return idxd_pmu; +} + +static inline struct idxd_device *event_to_idxd(struct perf_event *event) +{ + struct idxd_pmu *idxd_pmu; + struct pmu *pmu; + + pmu = event->pmu; + idxd_pmu = container_of(pmu, struct idxd_pmu, pmu); + + return idxd_pmu->idxd; +} + +static inline struct idxd_device *pmu_to_idxd(struct pmu *pmu) +{ + struct idxd_pmu *idxd_pmu; + + idxd_pmu = container_of(pmu, struct idxd_pmu, pmu); + + return idxd_pmu->idxd; +} + +enum dsa_perf_events { + DSA_PERF_EVENT_WQ = 0, + DSA_PERF_EVENT_ENGINE, + DSA_PERF_EVENT_ADDR_TRANS, + DSA_PERF_EVENT_OP, + DSA_PERF_EVENT_COMPL, + DSA_PERF_EVENT_MAX, +}; + +enum filter_enc { + FLT_WQ = 0, + FLT_TC, + FLT_PG_SZ, + FLT_XFER_SZ, + FLT_ENG, + FLT_MAX, +}; + +#define CONFIG_RESET 0x0000000000000001 +#define CNTR_RESET 0x0000000000000002 +#define CNTR_ENABLE 0x0000000000000001 +#define INTR_OVFL 0x0000000000000002 + +#define COUNTER_FREEZE 0x00000000FFFFFFFF +#define COUNTER_UNFREEZE 0x0000000000000000 +#define OVERFLOW_SIZE 32 + +#define CNTRCFG_ENABLE BIT(0) +#define CNTRCFG_IRQ_OVERFLOW BIT(1) +#define CNTRCFG_CATEGORY_SHIFT 8 +#define CNTRCFG_EVENT_SHIFT 32 + +#define PERFMON_TABLE_OFFSET(_idxd) \ +({ \ + typeof(_idxd) __idxd = (_idxd); \ + ((__idxd)->reg_base + (__idxd)->perfmon_offset); \ +}) +#define PERFMON_REG_OFFSET(idxd, offset) \ + (PERFMON_TABLE_OFFSET(idxd) + (offset)) + +#define PERFCAP_REG(idxd) (PERFMON_REG_OFFSET(idxd, IDXD_PERFCAP_OFFSET)) +#define PERFRST_REG(idxd) (PERFMON_REG_OFFSET(idxd, IDXD_PERFRST_OFFSET)) +#define OVFSTATUS_REG(idxd) (PERFMON_REG_OFFSET(idxd, IDXD_OVFSTATUS_OFFSET)) +#define PERFFRZ_REG(idxd) (PERFMON_REG_OFFSET(idxd, IDXD_PERFFRZ_OFFSET)) + +#define FLTCFG_REG(idxd, cntr, flt) \ + (PERFMON_REG_OFFSET(idxd, IDXD_FLTCFG_OFFSET) + ((cntr) * 32) + ((flt) * 4)) + +#define CNTRCFG_REG(idxd, cntr) \ + (PERFMON_REG_OFFSET(idxd, IDXD_CNTRCFG_OFFSET) + ((cntr) * 8)) +#define CNTRDATA_REG(idxd, cntr) \ + (PERFMON_REG_OFFSET(idxd, IDXD_CNTRDATA_OFFSET) + ((cntr) * 8)) +#define CNTRCAP_REG(idxd, cntr) \ + (PERFMON_REG_OFFSET(idxd, IDXD_CNTRCAP_OFFSET) + ((cntr) * 8)) + +#define EVNTCAP_REG(idxd, category) \ + (PERFMON_REG_OFFSET(idxd, IDXD_EVNTCAP_OFFSET) + ((category) * 8)) + +#define DEFINE_PERFMON_FORMAT_ATTR(_name, _format) \ +static ssize_t __perfmon_idxd_##_name##_show(struct kobject *kobj, \ + struct kobj_attribute *attr, \ + char *page) \ +{ \ + BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ + return sprintf(page, _format "\n"); \ +} \ +static struct kobj_attribute format_attr_idxd_##_name = \ + __ATTR(_name, 0444, __perfmon_idxd_##_name##_show, NULL) + +#endif diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h index 751ecb4f9f81..c970c3f025f0 100644 --- a/drivers/dma/idxd/registers.h +++ b/drivers/dma/idxd/registers.h @@ -24,8 +24,8 @@ union gen_cap_reg { u64 overlap_copy:1; u64 cache_control_mem:1; u64 cache_control_cache:1; + u64 cmd_cap:1; u64 rsvd:3; - u64 int_handle_req:1; u64 dest_readback:1; u64 drain_readback:1; u64 rsvd2:6; @@ -120,7 +120,8 @@ union gencfg_reg { union genctrl_reg { struct { u32 softerr_int_en:1; - u32 rsvd:31; + u32 halt_int_en:1; + u32 rsvd:30; }; u32 bits; } __packed; @@ -180,8 +181,11 @@ enum idxd_cmd { IDXD_CMD_DRAIN_PASID, IDXD_CMD_ABORT_PASID, IDXD_CMD_REQUEST_INT_HANDLE, + IDXD_CMD_RELEASE_INT_HANDLE, }; +#define CMD_INT_HANDLE_IMS 0x10000 + #define IDXD_CMDSTS_OFFSET 0xa8 union cmdsts_reg { struct { @@ -193,6 +197,8 @@ union cmdsts_reg { u32 bits; } __packed; #define IDXD_CMDSTS_ACTIVE 0x80000000 +#define IDXD_CMDSTS_ERR_MASK 0xff +#define IDXD_CMDSTS_RES_SHIFT 8 enum idxd_cmdsts_err { IDXD_CMDSTS_SUCCESS = 0, @@ -228,6 +234,8 @@ enum idxd_cmdsts_err { IDXD_CMDSTS_ERR_NO_HANDLE, }; +#define IDXD_CMDCAP_OFFSET 0xb0 + #define IDXD_SWERR_OFFSET 0xc0 #define IDXD_SWERR_VALID 0x00000001 #define IDXD_SWERR_OVERFLOW 0x00000002 @@ -378,4 +386,112 @@ union wqcfg { #define GRPENGCFG_OFFSET(idxd_dev, n) ((idxd_dev)->grpcfg_offset + (n) * GRPCFG_SIZE + 32) #define GRPFLGCFG_OFFSET(idxd_dev, n) ((idxd_dev)->grpcfg_offset + (n) * GRPCFG_SIZE + 40) +/* Following is performance monitor registers */ +#define IDXD_PERFCAP_OFFSET 0x0 +union idxd_perfcap { + struct { + u64 num_perf_counter:6; + u64 rsvd1:2; + u64 counter_width:8; + u64 num_event_category:4; + u64 global_event_category:16; + u64 filter:8; + u64 rsvd2:8; + u64 cap_per_counter:1; + u64 writeable_counter:1; + u64 counter_freeze:1; + u64 overflow_interrupt:1; + u64 rsvd3:8; + }; + u64 bits; +} __packed; + +#define IDXD_EVNTCAP_OFFSET 0x80 +union idxd_evntcap { + struct { + u64 events:28; + u64 rsvd:36; + }; + u64 bits; +} __packed; + +struct idxd_event { + union { + struct { + u32 event_category:4; + u32 events:28; + }; + u32 val; + }; +} __packed; + +#define IDXD_CNTRCAP_OFFSET 0x800 +struct idxd_cntrcap { + union { + struct { + u32 counter_width:8; + u32 rsvd:20; + u32 num_events:4; + }; + u32 val; + }; + struct idxd_event events[]; +} __packed; + +#define IDXD_PERFRST_OFFSET 0x10 +union idxd_perfrst { + struct { + u32 perfrst_config:1; + u32 perfrst_counter:1; + u32 rsvd:30; + }; + u32 val; +} __packed; + +#define IDXD_OVFSTATUS_OFFSET 0x30 +#define IDXD_PERFFRZ_OFFSET 0x20 +#define IDXD_CNTRCFG_OFFSET 0x100 +union idxd_cntrcfg { + struct { + u64 enable:1; + u64 interrupt_ovf:1; + u64 global_freeze_ovf:1; + u64 rsvd1:5; + u64 event_category:4; + u64 rsvd2:20; + u64 events:28; + u64 rsvd3:4; + }; + u64 val; +} __packed; + +#define IDXD_FLTCFG_OFFSET 0x300 + +#define IDXD_CNTRDATA_OFFSET 0x200 +union idxd_cntrdata { + struct { + u64 event_count_value; + }; + u64 val; +} __packed; + +union event_cfg { + struct { + u64 event_cat:4; + u64 event_enc:28; + }; + u64 val; +} __packed; + +union filter_cfg { + struct { + u64 wq:32; + u64 tc:8; + u64 pg_sz:4; + u64 xfer_sz:8; + u64 eng:8; + }; + u64 val; +} __packed; + #endif diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c index a7a61bcc17d5..19afb62abaff 100644 --- a/drivers/dma/idxd/submit.c +++ b/drivers/dma/idxd/submit.c @@ -15,18 +15,30 @@ static struct idxd_desc *__get_desc(struct idxd_wq *wq, int idx, int cpu) desc = wq->descs[idx]; memset(desc->hw, 0, sizeof(struct dsa_hw_desc)); - memset(desc->completion, 0, idxd->compl_size); + memset(desc->completion, 0, idxd->data->compl_size); desc->cpu = cpu; if (device_pasid_enabled(idxd)) desc->hw->pasid = idxd->pasid; /* - * Descriptor completion vectors are 1-8 for MSIX. We will round - * robin through the 8 vectors. + * Descriptor completion vectors are 1...N for MSIX. We will round + * robin through the N vectors. */ wq->vec_ptr = (wq->vec_ptr % idxd->num_wq_irqs) + 1; - desc->hw->int_handle = wq->vec_ptr; + if (!idxd->int_handles) { + desc->hw->int_handle = wq->vec_ptr; + } else { + desc->vector = wq->vec_ptr; + /* + * int_handles are only for descriptor completion. However for device + * MSIX enumeration, vec 0 is used for misc interrupts. Therefore even + * though we are rotating through 1...N for descriptor interrupts, we + * need to acqurie the int_handles from 0..N-1. + */ + desc->hw->int_handle = idxd->int_handles[desc->vector - 1]; + } + return desc; } @@ -79,13 +91,15 @@ void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc) int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc) { struct idxd_device *idxd = wq->idxd; - int vec = desc->hw->int_handle; void __iomem *portal; int rc; if (idxd->state != IDXD_DEV_ENABLED) return -EIO; + if (!percpu_ref_tryget_live(&wq->wq_active)) + return -ENXIO; + portal = wq->portal; /* @@ -108,13 +122,25 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc) return rc; } + percpu_ref_put(&wq->wq_active); + /* * Pending the descriptor to the lockless list for the irq_entry * that we designated the descriptor to. */ - if (desc->hw->flags & IDXD_OP_FLAG_RCI) - llist_add(&desc->llnode, - &idxd->irq_entries[vec].pending_llist); + if (desc->hw->flags & IDXD_OP_FLAG_RCI) { + int vec; + + /* + * If the driver is on host kernel, it would be the value + * assigned to interrupt handle, which is index for MSIX + * vector. If it's guest then can't use the int_handle since + * that is the index to IMS for the entire device. The guest + * device local index will be used. + */ + vec = !idxd->int_handles ? desc->hw->int_handle : desc->vector; + llist_add(&desc->llnode, &idxd->irq_entries[vec].pending_llist); + } return 0; } diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c index 18bf4d148989..0460d58e3941 100644 --- a/drivers/dma/idxd/sysfs.c +++ b/drivers/dma/idxd/sysfs.c @@ -16,69 +16,6 @@ static char *idxd_wq_type_names[] = { [IDXD_WQT_USER] = "user", }; -static void idxd_conf_device_release(struct device *dev) -{ - dev_dbg(dev, "%s for %s\n", __func__, dev_name(dev)); -} - -static struct device_type idxd_group_device_type = { - .name = "group", - .release = idxd_conf_device_release, -}; - -static struct device_type idxd_wq_device_type = { - .name = "wq", - .release = idxd_conf_device_release, -}; - -static struct device_type idxd_engine_device_type = { - .name = "engine", - .release = idxd_conf_device_release, -}; - -static struct device_type dsa_device_type = { - .name = "dsa", - .release = idxd_conf_device_release, -}; - -static struct device_type iax_device_type = { - .name = "iax", - .release = idxd_conf_device_release, -}; - -static inline bool is_dsa_dev(struct device *dev) -{ - return dev ? dev->type == &dsa_device_type : false; -} - -static inline bool is_iax_dev(struct device *dev) -{ - return dev ? dev->type == &iax_device_type : false; -} - -static inline bool is_idxd_dev(struct device *dev) -{ - return is_dsa_dev(dev) || is_iax_dev(dev); -} - -static inline bool is_idxd_wq_dev(struct device *dev) -{ - return dev ? dev->type == &idxd_wq_device_type : false; -} - -static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq) -{ - if (wq->type == IDXD_WQT_KERNEL && - strcmp(wq->name, "dmaengine") == 0) - return true; - return false; -} - -static inline bool is_idxd_wq_cdev(struct idxd_wq *wq) -{ - return wq->type == IDXD_WQT_USER; -} - static int idxd_config_bus_match(struct device *dev, struct device_driver *drv) { @@ -110,9 +47,131 @@ static int idxd_config_bus_match(struct device *dev, return matched; } -static int idxd_config_bus_probe(struct device *dev) +static int enable_wq(struct idxd_wq *wq) { + struct idxd_device *idxd = wq->idxd; + struct device *dev = &idxd->pdev->dev; + unsigned long flags; int rc; + + mutex_lock(&wq->wq_lock); + + if (idxd->state != IDXD_DEV_ENABLED) { + mutex_unlock(&wq->wq_lock); + dev_warn(dev, "Enabling while device not enabled.\n"); + return -EPERM; + } + + if (wq->state != IDXD_WQ_DISABLED) { + mutex_unlock(&wq->wq_lock); + dev_warn(dev, "WQ %d already enabled.\n", wq->id); + return -EBUSY; + } + + if (!wq->group) { + mutex_unlock(&wq->wq_lock); + dev_warn(dev, "WQ not attached to group.\n"); + return -EINVAL; + } + + if (strlen(wq->name) == 0) { + mutex_unlock(&wq->wq_lock); + dev_warn(dev, "WQ name not set.\n"); + return -EINVAL; + } + + /* Shared WQ checks */ + if (wq_shared(wq)) { + if (!device_swq_supported(idxd)) { + dev_warn(dev, "PASID not enabled and shared WQ.\n"); + mutex_unlock(&wq->wq_lock); + return -ENXIO; + } + /* + * Shared wq with the threshold set to 0 means the user + * did not set the threshold or transitioned from a + * dedicated wq but did not set threshold. A value + * of 0 would effectively disable the shared wq. The + * driver does not allow a value of 0 to be set for + * threshold via sysfs. + */ + if (wq->threshold == 0) { + dev_warn(dev, "Shared WQ and threshold 0.\n"); + mutex_unlock(&wq->wq_lock); + return -EINVAL; + } + } + + rc = idxd_wq_alloc_resources(wq); + if (rc < 0) { + mutex_unlock(&wq->wq_lock); + dev_warn(dev, "WQ resource alloc failed\n"); + return rc; + } + + spin_lock_irqsave(&idxd->dev_lock, flags); + if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + rc = idxd_device_config(idxd); + spin_unlock_irqrestore(&idxd->dev_lock, flags); + if (rc < 0) { + mutex_unlock(&wq->wq_lock); + dev_warn(dev, "Writing WQ %d config failed: %d\n", wq->id, rc); + return rc; + } + + rc = idxd_wq_enable(wq); + if (rc < 0) { + mutex_unlock(&wq->wq_lock); + dev_warn(dev, "WQ %d enabling failed: %d\n", wq->id, rc); + return rc; + } + + rc = idxd_wq_map_portal(wq); + if (rc < 0) { + dev_warn(dev, "wq portal mapping failed: %d\n", rc); + rc = idxd_wq_disable(wq); + if (rc < 0) + dev_warn(dev, "IDXD wq disable failed\n"); + mutex_unlock(&wq->wq_lock); + return rc; + } + + wq->client_count = 0; + + if (wq->type == IDXD_WQT_KERNEL) { + rc = idxd_wq_init_percpu_ref(wq); + if (rc < 0) { + dev_dbg(dev, "percpu_ref setup failed\n"); + mutex_unlock(&wq->wq_lock); + return rc; + } + } + + if (is_idxd_wq_dmaengine(wq)) { + rc = idxd_register_dma_channel(wq); + if (rc < 0) { + dev_dbg(dev, "DMA channel register failed\n"); + mutex_unlock(&wq->wq_lock); + return rc; + } + } else if (is_idxd_wq_cdev(wq)) { + rc = idxd_wq_add_cdev(wq); + if (rc < 0) { + dev_dbg(dev, "Cdev creation failed\n"); + mutex_unlock(&wq->wq_lock); + return rc; + } + } + + mutex_unlock(&wq->wq_lock); + dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev)); + + return 0; +} + +static int idxd_config_bus_probe(struct device *dev) +{ + int rc = 0; unsigned long flags; dev_dbg(dev, "%s called\n", __func__); @@ -130,7 +189,8 @@ static int idxd_config_bus_probe(struct device *dev) /* Perform IDXD configuration and enabling */ spin_lock_irqsave(&idxd->dev_lock, flags); - rc = idxd_device_config(idxd); + if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + rc = idxd_device_config(idxd); spin_unlock_irqrestore(&idxd->dev_lock, flags); if (rc < 0) { module_put(THIS_MODULE); @@ -157,115 +217,8 @@ static int idxd_config_bus_probe(struct device *dev) return 0; } else if (is_idxd_wq_dev(dev)) { struct idxd_wq *wq = confdev_to_wq(dev); - struct idxd_device *idxd = wq->idxd; - - mutex_lock(&wq->wq_lock); - - if (idxd->state != IDXD_DEV_ENABLED) { - mutex_unlock(&wq->wq_lock); - dev_warn(dev, "Enabling while device not enabled.\n"); - return -EPERM; - } - - if (wq->state != IDXD_WQ_DISABLED) { - mutex_unlock(&wq->wq_lock); - dev_warn(dev, "WQ %d already enabled.\n", wq->id); - return -EBUSY; - } - - if (!wq->group) { - mutex_unlock(&wq->wq_lock); - dev_warn(dev, "WQ not attached to group.\n"); - return -EINVAL; - } - - if (strlen(wq->name) == 0) { - mutex_unlock(&wq->wq_lock); - dev_warn(dev, "WQ name not set.\n"); - return -EINVAL; - } - - /* Shared WQ checks */ - if (wq_shared(wq)) { - if (!device_swq_supported(idxd)) { - dev_warn(dev, - "PASID not enabled and shared WQ.\n"); - mutex_unlock(&wq->wq_lock); - return -ENXIO; - } - /* - * Shared wq with the threshold set to 0 means the user - * did not set the threshold or transitioned from a - * dedicated wq but did not set threshold. A value - * of 0 would effectively disable the shared wq. The - * driver does not allow a value of 0 to be set for - * threshold via sysfs. - */ - if (wq->threshold == 0) { - dev_warn(dev, - "Shared WQ and threshold 0.\n"); - mutex_unlock(&wq->wq_lock); - return -EINVAL; - } - } - - rc = idxd_wq_alloc_resources(wq); - if (rc < 0) { - mutex_unlock(&wq->wq_lock); - dev_warn(dev, "WQ resource alloc failed\n"); - return rc; - } - - spin_lock_irqsave(&idxd->dev_lock, flags); - rc = idxd_device_config(idxd); - spin_unlock_irqrestore(&idxd->dev_lock, flags); - if (rc < 0) { - mutex_unlock(&wq->wq_lock); - dev_warn(dev, "Writing WQ %d config failed: %d\n", - wq->id, rc); - return rc; - } - - rc = idxd_wq_enable(wq); - if (rc < 0) { - mutex_unlock(&wq->wq_lock); - dev_warn(dev, "WQ %d enabling failed: %d\n", - wq->id, rc); - return rc; - } - - rc = idxd_wq_map_portal(wq); - if (rc < 0) { - dev_warn(dev, "wq portal mapping failed: %d\n", rc); - rc = idxd_wq_disable(wq); - if (rc < 0) - dev_warn(dev, "IDXD wq disable failed\n"); - mutex_unlock(&wq->wq_lock); - return rc; - } - - wq->client_count = 0; - - dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev)); - if (is_idxd_wq_dmaengine(wq)) { - rc = idxd_register_dma_channel(wq); - if (rc < 0) { - dev_dbg(dev, "DMA channel register failed\n"); - mutex_unlock(&wq->wq_lock); - return rc; - } - } else if (is_idxd_wq_cdev(wq)) { - rc = idxd_wq_add_cdev(wq); - if (rc < 0) { - dev_dbg(dev, "Cdev creation failed\n"); - mutex_unlock(&wq->wq_lock); - return rc; - } - } - - mutex_unlock(&wq->wq_lock); - return 0; + return enable_wq(wq); } return -ENODEV; @@ -283,6 +236,9 @@ static void disable_wq(struct idxd_wq *wq) return; } + if (wq->type == IDXD_WQT_KERNEL) + idxd_wq_quiesce(wq); + if (is_idxd_wq_dmaengine(wq)) idxd_unregister_dma_channel(wq); else if (is_idxd_wq_cdev(wq)) @@ -322,7 +278,7 @@ static int idxd_config_bus_remove(struct device *dev) dev_dbg(dev, "%s removing dev %s\n", __func__, dev_name(&idxd->conf_dev)); for (i = 0; i < idxd->max_wqs; i++) { - struct idxd_wq *wq = &idxd->wqs[i]; + struct idxd_wq *wq = idxd->wqs[i]; if (wq->state == IDXD_WQ_DISABLED) continue; @@ -333,12 +289,14 @@ static int idxd_config_bus_remove(struct device *dev) idxd_unregister_dma_device(idxd); rc = idxd_device_disable(idxd); - for (i = 0; i < idxd->max_wqs; i++) { - struct idxd_wq *wq = &idxd->wqs[i]; + if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) { + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = idxd->wqs[i]; - mutex_lock(&wq->wq_lock); - idxd_wq_disable_cleanup(wq); - mutex_unlock(&wq->wq_lock); + mutex_lock(&wq->wq_lock); + idxd_wq_disable_cleanup(wq); + mutex_unlock(&wq->wq_lock); + } } module_put(THIS_MODULE); if (rc < 0) @@ -364,19 +322,6 @@ struct bus_type dsa_bus_type = { .shutdown = idxd_config_bus_shutdown, }; -struct bus_type iax_bus_type = { - .name = "iax", - .match = idxd_config_bus_match, - .probe = idxd_config_bus_probe, - .remove = idxd_config_bus_remove, - .shutdown = idxd_config_bus_shutdown, -}; - -static struct bus_type *idxd_bus_types[] = { - &dsa_bus_type, - &iax_bus_type -}; - static struct idxd_device_driver dsa_drv = { .drv = { .name = "dsa", @@ -386,60 +331,15 @@ static struct idxd_device_driver dsa_drv = { }, }; -static struct idxd_device_driver iax_drv = { - .drv = { - .name = "iax", - .bus = &iax_bus_type, - .owner = THIS_MODULE, - .mod_name = KBUILD_MODNAME, - }, -}; - -static struct idxd_device_driver *idxd_drvs[] = { - &dsa_drv, - &iax_drv -}; - -struct bus_type *idxd_get_bus_type(struct idxd_device *idxd) -{ - return idxd_bus_types[idxd->type]; -} - -static struct device_type *idxd_get_device_type(struct idxd_device *idxd) -{ - if (idxd->type == IDXD_TYPE_DSA) - return &dsa_device_type; - else if (idxd->type == IDXD_TYPE_IAX) - return &iax_device_type; - else - return NULL; -} - /* IDXD generic driver setup */ int idxd_register_driver(void) { - int i, rc; - - for (i = 0; i < IDXD_TYPE_MAX; i++) { - rc = driver_register(&idxd_drvs[i]->drv); - if (rc < 0) - goto drv_fail; - } - - return 0; - -drv_fail: - while (--i >= 0) - driver_unregister(&idxd_drvs[i]->drv); - return rc; + return driver_register(&dsa_drv.drv); } void idxd_unregister_driver(void) { - int i; - - for (i = 0; i < IDXD_TYPE_MAX; i++) - driver_unregister(&idxd_drvs[i]->drv); + driver_unregister(&dsa_drv.drv); } /* IDXD engine attributes */ @@ -450,9 +350,9 @@ static ssize_t engine_group_id_show(struct device *dev, container_of(dev, struct idxd_engine, conf_dev); if (engine->group) - return sprintf(buf, "%d\n", engine->group->id); + return sysfs_emit(buf, "%d\n", engine->group->id); else - return sprintf(buf, "%d\n", -1); + return sysfs_emit(buf, "%d\n", -1); } static ssize_t engine_group_id_store(struct device *dev, @@ -488,7 +388,7 @@ static ssize_t engine_group_id_store(struct device *dev, if (prevg) prevg->num_engines--; - engine->group = &idxd->groups[id]; + engine->group = idxd->groups[id]; engine->group->num_engines++; return count; @@ -512,6 +412,19 @@ static const struct attribute_group *idxd_engine_attribute_groups[] = { NULL, }; +static void idxd_conf_engine_release(struct device *dev) +{ + struct idxd_engine *engine = container_of(dev, struct idxd_engine, conf_dev); + + kfree(engine); +} + +struct device_type idxd_engine_device_type = { + .name = "engine", + .release = idxd_conf_engine_release, + .groups = idxd_engine_attribute_groups, +}; + /* Group attributes */ static void idxd_set_free_tokens(struct idxd_device *idxd) @@ -519,7 +432,7 @@ static void idxd_set_free_tokens(struct idxd_device *idxd) int i, tokens; for (i = 0, tokens = 0; i < idxd->max_groups; i++) { - struct idxd_group *g = &idxd->groups[i]; + struct idxd_group *g = idxd->groups[i]; tokens += g->tokens_reserved; } @@ -534,7 +447,7 @@ static ssize_t group_tokens_reserved_show(struct device *dev, struct idxd_group *group = container_of(dev, struct idxd_group, conf_dev); - return sprintf(buf, "%u\n", group->tokens_reserved); + return sysfs_emit(buf, "%u\n", group->tokens_reserved); } static ssize_t group_tokens_reserved_store(struct device *dev, @@ -551,7 +464,7 @@ static ssize_t group_tokens_reserved_store(struct device *dev, if (rc < 0) return -EINVAL; - if (idxd->type == IDXD_TYPE_IAX) + if (idxd->data->type == IDXD_TYPE_IAX) return -EOPNOTSUPP; if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) @@ -582,7 +495,7 @@ static ssize_t group_tokens_allowed_show(struct device *dev, struct idxd_group *group = container_of(dev, struct idxd_group, conf_dev); - return sprintf(buf, "%u\n", group->tokens_allowed); + return sysfs_emit(buf, "%u\n", group->tokens_allowed); } static ssize_t group_tokens_allowed_store(struct device *dev, @@ -599,7 +512,7 @@ static ssize_t group_tokens_allowed_store(struct device *dev, if (rc < 0) return -EINVAL; - if (idxd->type == IDXD_TYPE_IAX) + if (idxd->data->type == IDXD_TYPE_IAX) return -EOPNOTSUPP; if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) @@ -627,7 +540,7 @@ static ssize_t group_use_token_limit_show(struct device *dev, struct idxd_group *group = container_of(dev, struct idxd_group, conf_dev); - return sprintf(buf, "%u\n", group->use_token_limit); + return sysfs_emit(buf, "%u\n", group->use_token_limit); } static ssize_t group_use_token_limit_store(struct device *dev, @@ -644,7 +557,7 @@ static ssize_t group_use_token_limit_store(struct device *dev, if (rc < 0) return -EINVAL; - if (idxd->type == IDXD_TYPE_IAX) + if (idxd->data->type == IDXD_TYPE_IAX) return -EOPNOTSUPP; if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) @@ -670,22 +583,22 @@ static ssize_t group_engines_show(struct device *dev, struct idxd_group *group = container_of(dev, struct idxd_group, conf_dev); int i, rc = 0; - char *tmp = buf; struct idxd_device *idxd = group->idxd; for (i = 0; i < idxd->max_engines; i++) { - struct idxd_engine *engine = &idxd->engines[i]; + struct idxd_engine *engine = idxd->engines[i]; if (!engine->group) continue; if (engine->group->id == group->id) - rc += sprintf(tmp + rc, "engine%d.%d ", - idxd->id, engine->id); + rc += sysfs_emit_at(buf, rc, "engine%d.%d ", idxd->id, engine->id); } + if (!rc) + return 0; rc--; - rc += sprintf(tmp + rc, "\n"); + rc += sysfs_emit_at(buf, rc, "\n"); return rc; } @@ -699,22 +612,22 @@ static ssize_t group_work_queues_show(struct device *dev, struct idxd_group *group = container_of(dev, struct idxd_group, conf_dev); int i, rc = 0; - char *tmp = buf; struct idxd_device *idxd = group->idxd; for (i = 0; i < idxd->max_wqs; i++) { - struct idxd_wq *wq = &idxd->wqs[i]; + struct idxd_wq *wq = idxd->wqs[i]; if (!wq->group) continue; if (wq->group->id == group->id) - rc += sprintf(tmp + rc, "wq%d.%d ", - idxd->id, wq->id); + rc += sysfs_emit_at(buf, rc, "wq%d.%d ", idxd->id, wq->id); } + if (!rc) + return 0; rc--; - rc += sprintf(tmp + rc, "\n"); + rc += sysfs_emit_at(buf, rc, "\n"); return rc; } @@ -729,7 +642,7 @@ static ssize_t group_traffic_class_a_show(struct device *dev, struct idxd_group *group = container_of(dev, struct idxd_group, conf_dev); - return sprintf(buf, "%d\n", group->tc_a); + return sysfs_emit(buf, "%d\n", group->tc_a); } static ssize_t group_traffic_class_a_store(struct device *dev, @@ -770,7 +683,7 @@ static ssize_t group_traffic_class_b_show(struct device *dev, struct idxd_group *group = container_of(dev, struct idxd_group, conf_dev); - return sprintf(buf, "%d\n", group->tc_b); + return sysfs_emit(buf, "%d\n", group->tc_b); } static ssize_t group_traffic_class_b_store(struct device *dev, @@ -824,13 +737,26 @@ static const struct attribute_group *idxd_group_attribute_groups[] = { NULL, }; +static void idxd_conf_group_release(struct device *dev) +{ + struct idxd_group *group = container_of(dev, struct idxd_group, conf_dev); + + kfree(group); +} + +struct device_type idxd_group_device_type = { + .name = "group", + .release = idxd_conf_group_release, + .groups = idxd_group_attribute_groups, +}; + /* IDXD work queue attribs */ static ssize_t wq_clients_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); - return sprintf(buf, "%d\n", wq->client_count); + return sysfs_emit(buf, "%d\n", wq->client_count); } static struct device_attribute dev_attr_wq_clients = @@ -843,12 +769,12 @@ static ssize_t wq_state_show(struct device *dev, switch (wq->state) { case IDXD_WQ_DISABLED: - return sprintf(buf, "disabled\n"); + return sysfs_emit(buf, "disabled\n"); case IDXD_WQ_ENABLED: - return sprintf(buf, "enabled\n"); + return sysfs_emit(buf, "enabled\n"); } - return sprintf(buf, "unknown\n"); + return sysfs_emit(buf, "unknown\n"); } static struct device_attribute dev_attr_wq_state = @@ -860,9 +786,9 @@ static ssize_t wq_group_id_show(struct device *dev, struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); if (wq->group) - return sprintf(buf, "%u\n", wq->group->id); + return sysfs_emit(buf, "%u\n", wq->group->id); else - return sprintf(buf, "-1\n"); + return sysfs_emit(buf, "-1\n"); } static ssize_t wq_group_id_store(struct device *dev, @@ -896,7 +822,7 @@ static ssize_t wq_group_id_store(struct device *dev, return count; } - group = &idxd->groups[id]; + group = idxd->groups[id]; prevg = wq->group; if (prevg) @@ -914,8 +840,7 @@ static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr, { struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); - return sprintf(buf, "%s\n", - wq_dedicated(wq) ? "dedicated" : "shared"); + return sysfs_emit(buf, "%s\n", wq_dedicated(wq) ? "dedicated" : "shared"); } static ssize_t wq_mode_store(struct device *dev, @@ -951,7 +876,7 @@ static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr, { struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); - return sprintf(buf, "%u\n", wq->size); + return sysfs_emit(buf, "%u\n", wq->size); } static int total_claimed_wq_size(struct idxd_device *idxd) @@ -960,7 +885,7 @@ static int total_claimed_wq_size(struct idxd_device *idxd) int wq_size = 0; for (i = 0; i < idxd->max_wqs; i++) { - struct idxd_wq *wq = &idxd->wqs[i]; + struct idxd_wq *wq = idxd->wqs[i]; wq_size += wq->size; } @@ -1002,7 +927,7 @@ static ssize_t wq_priority_show(struct device *dev, { struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); - return sprintf(buf, "%u\n", wq->priority); + return sysfs_emit(buf, "%u\n", wq->priority); } static ssize_t wq_priority_store(struct device *dev, @@ -1039,8 +964,7 @@ static ssize_t wq_block_on_fault_show(struct device *dev, { struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); - return sprintf(buf, "%u\n", - test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags)); + return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags)); } static ssize_t wq_block_on_fault_store(struct device *dev, @@ -1079,7 +1003,7 @@ static ssize_t wq_threshold_show(struct device *dev, { struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); - return sprintf(buf, "%u\n", wq->threshold); + return sysfs_emit(buf, "%u\n", wq->threshold); } static ssize_t wq_threshold_store(struct device *dev, @@ -1122,15 +1046,12 @@ static ssize_t wq_type_show(struct device *dev, switch (wq->type) { case IDXD_WQT_KERNEL: - return sprintf(buf, "%s\n", - idxd_wq_type_names[IDXD_WQT_KERNEL]); + return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_KERNEL]); case IDXD_WQT_USER: - return sprintf(buf, "%s\n", - idxd_wq_type_names[IDXD_WQT_USER]); + return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_USER]); case IDXD_WQT_NONE: default: - return sprintf(buf, "%s\n", - idxd_wq_type_names[IDXD_WQT_NONE]); + return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_NONE]); } return -EINVAL; @@ -1171,7 +1092,7 @@ static ssize_t wq_name_show(struct device *dev, { struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); - return sprintf(buf, "%s\n", wq->name); + return sysfs_emit(buf, "%s\n", wq->name); } static ssize_t wq_name_store(struct device *dev, @@ -1206,8 +1127,16 @@ static ssize_t wq_cdev_minor_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + int minor = -1; - return sprintf(buf, "%d\n", wq->idxd_cdev.minor); + mutex_lock(&wq->wq_lock); + if (wq->idxd_cdev) + minor = wq->idxd_cdev->minor; + mutex_unlock(&wq->wq_lock); + + if (minor == -1) + return -ENXIO; + return sysfs_emit(buf, "%d\n", minor); } static struct device_attribute dev_attr_wq_cdev_minor = @@ -1233,7 +1162,7 @@ static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attri { struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); - return sprintf(buf, "%llu\n", wq->max_xfer_bytes); + return sysfs_emit(buf, "%llu\n", wq->max_xfer_bytes); } static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attribute *attr, @@ -1267,7 +1196,7 @@ static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribut { struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); - return sprintf(buf, "%u\n", wq->max_batch_size); + return sysfs_emit(buf, "%u\n", wq->max_batch_size); } static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribute *attr, @@ -1300,7 +1229,7 @@ static ssize_t wq_ats_disable_show(struct device *dev, struct device_attribute * { struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); - return sprintf(buf, "%u\n", wq->ats_dis); + return sysfs_emit(buf, "%u\n", wq->ats_dis); } static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute *attr, @@ -1356,6 +1285,20 @@ static const struct attribute_group *idxd_wq_attribute_groups[] = { NULL, }; +static void idxd_conf_wq_release(struct device *dev) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + + kfree(wq->wqcfg); + kfree(wq); +} + +struct device_type idxd_wq_device_type = { + .name = "wq", + .release = idxd_conf_wq_release, + .groups = idxd_wq_attribute_groups, +}; + /* IDXD device attribs */ static ssize_t version_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -1363,7 +1306,7 @@ static ssize_t version_show(struct device *dev, struct device_attribute *attr, struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev); - return sprintf(buf, "%#x\n", idxd->hw.version); + return sysfs_emit(buf, "%#x\n", idxd->hw.version); } static DEVICE_ATTR_RO(version); @@ -1374,7 +1317,7 @@ static ssize_t max_work_queues_size_show(struct device *dev, struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev); - return sprintf(buf, "%u\n", idxd->max_wq_size); + return sysfs_emit(buf, "%u\n", idxd->max_wq_size); } static DEVICE_ATTR_RO(max_work_queues_size); @@ -1384,7 +1327,7 @@ static ssize_t max_groups_show(struct device *dev, struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev); - return sprintf(buf, "%u\n", idxd->max_groups); + return sysfs_emit(buf, "%u\n", idxd->max_groups); } static DEVICE_ATTR_RO(max_groups); @@ -1394,7 +1337,7 @@ static ssize_t max_work_queues_show(struct device *dev, struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev); - return sprintf(buf, "%u\n", idxd->max_wqs); + return sysfs_emit(buf, "%u\n", idxd->max_wqs); } static DEVICE_ATTR_RO(max_work_queues); @@ -1404,7 +1347,7 @@ static ssize_t max_engines_show(struct device *dev, struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev); - return sprintf(buf, "%u\n", idxd->max_engines); + return sysfs_emit(buf, "%u\n", idxd->max_engines); } static DEVICE_ATTR_RO(max_engines); @@ -1414,7 +1357,7 @@ static ssize_t numa_node_show(struct device *dev, struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev); - return sprintf(buf, "%d\n", dev_to_node(&idxd->pdev->dev)); + return sysfs_emit(buf, "%d\n", dev_to_node(&idxd->pdev->dev)); } static DEVICE_ATTR_RO(numa_node); @@ -1424,7 +1367,7 @@ static ssize_t max_batch_size_show(struct device *dev, struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev); - return sprintf(buf, "%u\n", idxd->max_batch_size); + return sysfs_emit(buf, "%u\n", idxd->max_batch_size); } static DEVICE_ATTR_RO(max_batch_size); @@ -1435,7 +1378,7 @@ static ssize_t max_transfer_size_show(struct device *dev, struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev); - return sprintf(buf, "%llu\n", idxd->max_xfer_bytes); + return sysfs_emit(buf, "%llu\n", idxd->max_xfer_bytes); } static DEVICE_ATTR_RO(max_transfer_size); @@ -1461,7 +1404,7 @@ static ssize_t gen_cap_show(struct device *dev, struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev); - return sprintf(buf, "%#llx\n", idxd->hw.gen_cap.bits); + return sysfs_emit(buf, "%#llx\n", idxd->hw.gen_cap.bits); } static DEVICE_ATTR_RO(gen_cap); @@ -1471,8 +1414,7 @@ static ssize_t configurable_show(struct device *dev, struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev); - return sprintf(buf, "%u\n", - test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)); + return sysfs_emit(buf, "%u\n", test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)); } static DEVICE_ATTR_RO(configurable); @@ -1486,13 +1428,13 @@ static ssize_t clients_show(struct device *dev, spin_lock_irqsave(&idxd->dev_lock, flags); for (i = 0; i < idxd->max_wqs; i++) { - struct idxd_wq *wq = &idxd->wqs[i]; + struct idxd_wq *wq = idxd->wqs[i]; count += wq->client_count; } spin_unlock_irqrestore(&idxd->dev_lock, flags); - return sprintf(buf, "%d\n", count); + return sysfs_emit(buf, "%d\n", count); } static DEVICE_ATTR_RO(clients); @@ -1502,7 +1444,7 @@ static ssize_t pasid_enabled_show(struct device *dev, struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev); - return sprintf(buf, "%u\n", device_pasid_enabled(idxd)); + return sysfs_emit(buf, "%u\n", device_pasid_enabled(idxd)); } static DEVICE_ATTR_RO(pasid_enabled); @@ -1515,14 +1457,14 @@ static ssize_t state_show(struct device *dev, switch (idxd->state) { case IDXD_DEV_DISABLED: case IDXD_DEV_CONF_READY: - return sprintf(buf, "disabled\n"); + return sysfs_emit(buf, "disabled\n"); case IDXD_DEV_ENABLED: - return sprintf(buf, "enabled\n"); + return sysfs_emit(buf, "enabled\n"); case IDXD_DEV_HALTED: - return sprintf(buf, "halted\n"); + return sysfs_emit(buf, "halted\n"); } - return sprintf(buf, "unknown\n"); + return sysfs_emit(buf, "unknown\n"); } static DEVICE_ATTR_RO(state); @@ -1536,10 +1478,10 @@ static ssize_t errors_show(struct device *dev, spin_lock_irqsave(&idxd->dev_lock, flags); for (i = 0; i < 4; i++) - out += sprintf(buf + out, "%#018llx ", idxd->sw_err.bits[i]); + out += sysfs_emit_at(buf, out, "%#018llx ", idxd->sw_err.bits[i]); spin_unlock_irqrestore(&idxd->dev_lock, flags); out--; - out += sprintf(buf + out, "\n"); + out += sysfs_emit_at(buf, out, "\n"); return out; } static DEVICE_ATTR_RO(errors); @@ -1550,7 +1492,7 @@ static ssize_t max_tokens_show(struct device *dev, struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev); - return sprintf(buf, "%u\n", idxd->max_tokens); + return sysfs_emit(buf, "%u\n", idxd->max_tokens); } static DEVICE_ATTR_RO(max_tokens); @@ -1560,7 +1502,7 @@ static ssize_t token_limit_show(struct device *dev, struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev); - return sprintf(buf, "%u\n", idxd->token_limit); + return sysfs_emit(buf, "%u\n", idxd->token_limit); } static ssize_t token_limit_store(struct device *dev, @@ -1599,7 +1541,7 @@ static ssize_t cdev_major_show(struct device *dev, struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev); - return sprintf(buf, "%u\n", idxd->major); + return sysfs_emit(buf, "%u\n", idxd->major); } static DEVICE_ATTR_RO(cdev_major); @@ -1608,7 +1550,7 @@ static ssize_t cmd_status_show(struct device *dev, { struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev); - return sprintf(buf, "%#x\n", idxd->cmd_status); + return sysfs_emit(buf, "%#x\n", idxd->cmd_status); } static DEVICE_ATTR_RO(cmd_status); @@ -1644,183 +1586,161 @@ static const struct attribute_group *idxd_attribute_groups[] = { NULL, }; -static int idxd_setup_engine_sysfs(struct idxd_device *idxd) +static void idxd_conf_device_release(struct device *dev) { - struct device *dev = &idxd->pdev->dev; - int i, rc; + struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev); + + kfree(idxd->groups); + kfree(idxd->wqs); + kfree(idxd->engines); + kfree(idxd->irq_entries); + kfree(idxd->int_handles); + ida_free(&idxd_ida, idxd->id); + kfree(idxd); +} + +struct device_type dsa_device_type = { + .name = "dsa", + .release = idxd_conf_device_release, + .groups = idxd_attribute_groups, +}; + +struct device_type iax_device_type = { + .name = "iax", + .release = idxd_conf_device_release, + .groups = idxd_attribute_groups, +}; + +static int idxd_register_engine_devices(struct idxd_device *idxd) +{ + int i, j, rc; for (i = 0; i < idxd->max_engines; i++) { - struct idxd_engine *engine = &idxd->engines[i]; - - engine->conf_dev.parent = &idxd->conf_dev; - dev_set_name(&engine->conf_dev, "engine%d.%d", - idxd->id, engine->id); - engine->conf_dev.bus = idxd_get_bus_type(idxd); - engine->conf_dev.groups = idxd_engine_attribute_groups; - engine->conf_dev.type = &idxd_engine_device_type; - dev_dbg(dev, "Engine device register: %s\n", - dev_name(&engine->conf_dev)); - rc = device_register(&engine->conf_dev); - if (rc < 0) { - put_device(&engine->conf_dev); + struct idxd_engine *engine = idxd->engines[i]; + + rc = device_add(&engine->conf_dev); + if (rc < 0) goto cleanup; - } } return 0; cleanup: - while (i--) { - struct idxd_engine *engine = &idxd->engines[i]; + j = i - 1; + for (; i < idxd->max_engines; i++) + put_device(&idxd->engines[i]->conf_dev); - device_unregister(&engine->conf_dev); - } + while (j--) + device_unregister(&idxd->engines[j]->conf_dev); return rc; } -static int idxd_setup_group_sysfs(struct idxd_device *idxd) +static int idxd_register_group_devices(struct idxd_device *idxd) { - struct device *dev = &idxd->pdev->dev; - int i, rc; + int i, j, rc; for (i = 0; i < idxd->max_groups; i++) { - struct idxd_group *group = &idxd->groups[i]; - - group->conf_dev.parent = &idxd->conf_dev; - dev_set_name(&group->conf_dev, "group%d.%d", - idxd->id, group->id); - group->conf_dev.bus = idxd_get_bus_type(idxd); - group->conf_dev.groups = idxd_group_attribute_groups; - group->conf_dev.type = &idxd_group_device_type; - dev_dbg(dev, "Group device register: %s\n", - dev_name(&group->conf_dev)); - rc = device_register(&group->conf_dev); - if (rc < 0) { - put_device(&group->conf_dev); + struct idxd_group *group = idxd->groups[i]; + + rc = device_add(&group->conf_dev); + if (rc < 0) goto cleanup; - } } return 0; cleanup: - while (i--) { - struct idxd_group *group = &idxd->groups[i]; + j = i - 1; + for (; i < idxd->max_groups; i++) + put_device(&idxd->groups[i]->conf_dev); - device_unregister(&group->conf_dev); - } + while (j--) + device_unregister(&idxd->groups[j]->conf_dev); return rc; } -static int idxd_setup_wq_sysfs(struct idxd_device *idxd) +static int idxd_register_wq_devices(struct idxd_device *idxd) { - struct device *dev = &idxd->pdev->dev; - int i, rc; + int i, rc, j; for (i = 0; i < idxd->max_wqs; i++) { - struct idxd_wq *wq = &idxd->wqs[i]; - - wq->conf_dev.parent = &idxd->conf_dev; - dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id); - wq->conf_dev.bus = idxd_get_bus_type(idxd); - wq->conf_dev.groups = idxd_wq_attribute_groups; - wq->conf_dev.type = &idxd_wq_device_type; - dev_dbg(dev, "WQ device register: %s\n", - dev_name(&wq->conf_dev)); - rc = device_register(&wq->conf_dev); - if (rc < 0) { - put_device(&wq->conf_dev); + struct idxd_wq *wq = idxd->wqs[i]; + + rc = device_add(&wq->conf_dev); + if (rc < 0) goto cleanup; - } } return 0; cleanup: - while (i--) { - struct idxd_wq *wq = &idxd->wqs[i]; + j = i - 1; + for (; i < idxd->max_wqs; i++) + put_device(&idxd->wqs[i]->conf_dev); - device_unregister(&wq->conf_dev); - } + while (j--) + device_unregister(&idxd->wqs[j]->conf_dev); return rc; } -static int idxd_setup_device_sysfs(struct idxd_device *idxd) +int idxd_register_devices(struct idxd_device *idxd) { struct device *dev = &idxd->pdev->dev; - int rc; - char devname[IDXD_NAME_SIZE]; + int rc, i; - sprintf(devname, "%s%d", idxd_get_dev_name(idxd), idxd->id); - idxd->conf_dev.parent = dev; - dev_set_name(&idxd->conf_dev, "%s", devname); - idxd->conf_dev.bus = idxd_get_bus_type(idxd); - idxd->conf_dev.groups = idxd_attribute_groups; - idxd->conf_dev.type = idxd_get_device_type(idxd); - - dev_dbg(dev, "IDXD device register: %s\n", dev_name(&idxd->conf_dev)); - rc = device_register(&idxd->conf_dev); - if (rc < 0) { - put_device(&idxd->conf_dev); - return rc; - } - - return 0; -} - -int idxd_setup_sysfs(struct idxd_device *idxd) -{ - struct device *dev = &idxd->pdev->dev; - int rc; - - rc = idxd_setup_device_sysfs(idxd); - if (rc < 0) { - dev_dbg(dev, "Device sysfs registering failed: %d\n", rc); + rc = device_add(&idxd->conf_dev); + if (rc < 0) return rc; - } - rc = idxd_setup_wq_sysfs(idxd); + rc = idxd_register_wq_devices(idxd); if (rc < 0) { - /* unregister conf dev */ - dev_dbg(dev, "Work Queue sysfs registering failed: %d\n", rc); - return rc; + dev_dbg(dev, "WQ devices registering failed: %d\n", rc); + goto err_wq; } - rc = idxd_setup_group_sysfs(idxd); + rc = idxd_register_engine_devices(idxd); if (rc < 0) { - /* unregister conf dev */ - dev_dbg(dev, "Group sysfs registering failed: %d\n", rc); - return rc; + dev_dbg(dev, "Engine devices registering failed: %d\n", rc); + goto err_engine; } - rc = idxd_setup_engine_sysfs(idxd); + rc = idxd_register_group_devices(idxd); if (rc < 0) { - /* unregister conf dev */ - dev_dbg(dev, "Engine sysfs registering failed: %d\n", rc); - return rc; + dev_dbg(dev, "Group device registering failed: %d\n", rc); + goto err_group; } return 0; + + err_group: + for (i = 0; i < idxd->max_engines; i++) + device_unregister(&idxd->engines[i]->conf_dev); + err_engine: + for (i = 0; i < idxd->max_wqs; i++) + device_unregister(&idxd->wqs[i]->conf_dev); + err_wq: + device_del(&idxd->conf_dev); + return rc; } -void idxd_cleanup_sysfs(struct idxd_device *idxd) +void idxd_unregister_devices(struct idxd_device *idxd) { int i; for (i = 0; i < idxd->max_wqs; i++) { - struct idxd_wq *wq = &idxd->wqs[i]; + struct idxd_wq *wq = idxd->wqs[i]; device_unregister(&wq->conf_dev); } for (i = 0; i < idxd->max_engines; i++) { - struct idxd_engine *engine = &idxd->engines[i]; + struct idxd_engine *engine = idxd->engines[i]; device_unregister(&engine->conf_dev); } for (i = 0; i < idxd->max_groups; i++) { - struct idxd_group *group = &idxd->groups[i]; + struct idxd_group *group = idxd->groups[i]; device_unregister(&group->conf_dev); } @@ -1830,26 +1750,10 @@ void idxd_cleanup_sysfs(struct idxd_device *idxd) int idxd_register_bus_type(void) { - int i, rc; - - for (i = 0; i < IDXD_TYPE_MAX; i++) { - rc = bus_register(idxd_bus_types[i]); - if (rc < 0) - goto bus_err; - } - - return 0; - -bus_err: - while (--i >= 0) - bus_unregister(idxd_bus_types[i]); - return rc; + return bus_register(&dsa_bus_type); } void idxd_unregister_bus_type(void) { - int i; - - for (i = 0; i < IDXD_TYPE_MAX; i++) - bus_unregister(idxd_bus_types[i]); + bus_unregister(&dsa_bus_type); } diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index d0b2e601e3e5..ecdaada95120 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2013 - 2015 Linaro Ltd. - * Copyright (c) 2013 Hisilicon Limited. + * Copyright (c) 2013 HiSilicon Limited. */ #include <linux/sched.h> #include <linux/device.h> @@ -1039,6 +1039,6 @@ static struct platform_driver k3_pdma_driver = { module_platform_driver(k3_pdma_driver); -MODULE_DESCRIPTION("Hisilicon k3 DMA Driver"); +MODULE_DESCRIPTION("HiSilicon k3 DMA Driver"); MODULE_ALIAS("platform:k3dma"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c index 57f5ee4235c7..43ac3ab23d4c 100644 --- a/drivers/dma/qcom/gpi.c +++ b/drivers/dma/qcom/gpi.c @@ -2281,6 +2281,7 @@ static int gpi_probe(struct platform_device *pdev) static const struct of_device_id gpi_of_match[] = { { .compatible = "qcom,sdm845-gpi-dma" }, + { .compatible = "qcom,sm8150-gpi-dma" }, { }, }; MODULE_DEVICE_TABLE(of, gpi_of_match); diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c index 6c0f9eb8ecc6..23d64489d25f 100644 --- a/drivers/dma/qcom/hidma.c +++ b/drivers/dma/qcom/hidma.c @@ -90,12 +90,6 @@ static inline struct hidma_chan *to_hidma_chan(struct dma_chan *dmach) return container_of(dmach, struct hidma_chan, chan); } -static inline -struct hidma_desc *to_hidma_desc(struct dma_async_tx_descriptor *t) -{ - return container_of(t, struct hidma_desc, desc); -} - static void hidma_free(struct hidma_dev *dmadev) { INIT_LIST_HEAD(&dmadev->ddev.channels); diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 3aded7861fef..75c0b8e904e5 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -2453,6 +2453,13 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan) return 0; } +static void xilinx_dma_synchronize(struct dma_chan *dchan) +{ + struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + + tasklet_kill(&chan->tasklet); +} + /** * xilinx_dma_channel_set_config - Configure VDMA channel * Run-time configuration for Axi VDMA, supports: @@ -3074,6 +3081,7 @@ static int xilinx_dma_probe(struct platform_device *pdev) xdev->common.device_free_chan_resources = xilinx_dma_free_chan_resources; xdev->common.device_terminate_all = xilinx_dma_terminate_all; + xdev->common.device_synchronize = xilinx_dma_synchronize; xdev->common.device_tx_status = xilinx_dma_tx_status; xdev->common.device_issue_pending = xilinx_dma_issue_pending; if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index d3b3de514f6e..1dd0ec6727fd 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -321,9 +321,8 @@ config GPIO_HLWD config GPIO_ICH tristate "Intel ICH GPIO" - depends on PCI && X86 - select MFD_CORE - select LPC_ICH + depends on X86 + depends on LPC_ICH help Say yes here to support the GPIO functionality of a number of Intel ICH-based chipsets. Currently supported devices: ICH6, ICH7, ICH8 @@ -502,6 +501,19 @@ config GPIO_RDA help Say Y here to support RDA Micro GPIO controller. +config GPIO_REALTEK_OTTO + tristate "Realtek Otto GPIO support" + depends on MACH_REALTEK_RTL + default MACH_REALTEK_RTL + select GPIO_GENERIC + select GPIOLIB_IRQCHIP + help + The GPIO controller on the Otto MIPS platform supports up to two + banks of 32 GPIOs, with edge triggered interrupts. The 32 GPIOs + are grouped in four 8-bit wide ports. + + When built as a module, the module will be called realtek_otto_gpio. + config GPIO_REG bool help @@ -847,9 +859,9 @@ config GPIO_IT87 config GPIO_SCH tristate "Intel SCH/TunnelCreek/Centerton/Quark X1000 GPIO" - depends on (X86 || COMPILE_TEST) && PCI - select MFD_CORE - select LPC_SCH + depends on (X86 || COMPILE_TEST) && ACPI + depends on LPC_SCH + select GPIOLIB_IRQCHIP help Say yes here to support GPIO interface on Intel Poulsbo SCH, Intel Tunnel Creek processor, Intel Centerton processor or diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index 4c12f31db31f..d7c81e1611a4 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -125,6 +125,7 @@ obj-$(CONFIG_GPIO_RC5T583) += gpio-rc5t583.o obj-$(CONFIG_GPIO_RCAR) += gpio-rcar.o obj-$(CONFIG_GPIO_RDA) += gpio-rda.o obj-$(CONFIG_GPIO_RDC321X) += gpio-rdc321x.o +obj-$(CONFIG_GPIO_REALTEK_OTTO) += gpio-realtek-otto.o obj-$(CONFIG_GPIO_REG) += gpio-reg.o obj-$(CONFIG_ARCH_SA1100) += gpio-sa1100.o obj-$(CONFIG_GPIO_SAMA5D2_PIOBU) += gpio-sama5d2-piobu.o diff --git a/drivers/gpio/gpio-104-dio-48e.c b/drivers/gpio/gpio-104-dio-48e.c index 7a9021c4fa48..71c0bea34d7b 100644 --- a/drivers/gpio/gpio-104-dio-48e.c +++ b/drivers/gpio/gpio-104-dio-48e.c @@ -49,15 +49,15 @@ struct dio48e_gpio { unsigned char out_state[6]; unsigned char control[2]; raw_spinlock_t lock; - unsigned base; + unsigned int base; unsigned char irq_mask; }; -static int dio48e_gpio_get_direction(struct gpio_chip *chip, unsigned offset) +static int dio48e_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) { struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip); - const unsigned port = offset / 8; - const unsigned mask = BIT(offset % 8); + const unsigned int port = offset / 8; + const unsigned int mask = BIT(offset % 8); if (dio48egpio->io_state[port] & mask) return GPIO_LINE_DIRECTION_IN; @@ -65,14 +65,14 @@ static int dio48e_gpio_get_direction(struct gpio_chip *chip, unsigned offset) return GPIO_LINE_DIRECTION_OUT; } -static int dio48e_gpio_direction_input(struct gpio_chip *chip, unsigned offset) +static int dio48e_gpio_direction_input(struct gpio_chip *chip, unsigned int offset) { struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip); - const unsigned io_port = offset / 8; + const unsigned int io_port = offset / 8; const unsigned int control_port = io_port / 3; - const unsigned control_addr = dio48egpio->base + 3 + control_port*4; + const unsigned int control_addr = dio48egpio->base + 3 + control_port * 4; unsigned long flags; - unsigned control; + unsigned int control; raw_spin_lock_irqsave(&dio48egpio->lock, flags); @@ -104,17 +104,17 @@ static int dio48e_gpio_direction_input(struct gpio_chip *chip, unsigned offset) return 0; } -static int dio48e_gpio_direction_output(struct gpio_chip *chip, unsigned offset, - int value) +static int dio48e_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, + int value) { struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip); - const unsigned io_port = offset / 8; + const unsigned int io_port = offset / 8; const unsigned int control_port = io_port / 3; - const unsigned mask = BIT(offset % 8); - const unsigned control_addr = dio48egpio->base + 3 + control_port*4; - const unsigned out_port = (io_port > 2) ? io_port + 1 : io_port; + const unsigned int mask = BIT(offset % 8); + const unsigned int control_addr = dio48egpio->base + 3 + control_port * 4; + const unsigned int out_port = (io_port > 2) ? io_port + 1 : io_port; unsigned long flags; - unsigned control; + unsigned int control; raw_spin_lock_irqsave(&dio48egpio->lock, flags); @@ -154,14 +154,14 @@ static int dio48e_gpio_direction_output(struct gpio_chip *chip, unsigned offset, return 0; } -static int dio48e_gpio_get(struct gpio_chip *chip, unsigned offset) +static int dio48e_gpio_get(struct gpio_chip *chip, unsigned int offset) { struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip); - const unsigned port = offset / 8; - const unsigned mask = BIT(offset % 8); - const unsigned in_port = (port > 2) ? port + 1 : port; + const unsigned int port = offset / 8; + const unsigned int mask = BIT(offset % 8); + const unsigned int in_port = (port > 2) ? port + 1 : port; unsigned long flags; - unsigned port_state; + unsigned int port_state; raw_spin_lock_irqsave(&dio48egpio->lock, flags); @@ -202,12 +202,12 @@ static int dio48e_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask, return 0; } -static void dio48e_gpio_set(struct gpio_chip *chip, unsigned offset, int value) +static void dio48e_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) { struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip); - const unsigned port = offset / 8; - const unsigned mask = BIT(offset % 8); - const unsigned out_port = (port > 2) ? port + 1 : port; + const unsigned int port = offset / 8; + const unsigned int mask = BIT(offset % 8); + const unsigned int out_port = (port > 2) ? port + 1 : port; unsigned long flags; raw_spin_lock_irqsave(&dio48egpio->lock, flags); @@ -306,7 +306,7 @@ static void dio48e_irq_unmask(struct irq_data *data) raw_spin_unlock_irqrestore(&dio48egpio->lock, flags); } -static int dio48e_irq_set_type(struct irq_data *data, unsigned flow_type) +static int dio48e_irq_set_type(struct irq_data *data, unsigned int flow_type) { const unsigned long offset = irqd_to_hwirq(data); diff --git a/drivers/gpio/gpio-aggregator.c b/drivers/gpio/gpio-aggregator.c index 08171431bb8f..34e35b64dcdc 100644 --- a/drivers/gpio/gpio-aggregator.c +++ b/drivers/gpio/gpio-aggregator.c @@ -37,31 +37,6 @@ struct gpio_aggregator { static DEFINE_MUTEX(gpio_aggregator_lock); /* protects idr */ static DEFINE_IDR(gpio_aggregator_idr); -static char *get_arg(char **args) -{ - char *start, *end; - - start = skip_spaces(*args); - if (!*start) - return NULL; - - if (*start == '"') { - /* Quoted arg */ - end = strchr(++start, '"'); - if (!end) - return ERR_PTR(-EINVAL); - } else { - /* Unquoted arg */ - for (end = start; *end && !isspace(*end); end++) ; - } - - if (*end) - *end++ = '\0'; - - *args = end; - return start; -} - static int aggr_add_gpio(struct gpio_aggregator *aggr, const char *key, int hwnum, unsigned int *n) { @@ -83,8 +58,8 @@ static int aggr_add_gpio(struct gpio_aggregator *aggr, const char *key, static int aggr_parse(struct gpio_aggregator *aggr) { + char *args = skip_spaces(aggr->args); char *name, *offsets, *p; - char *args = aggr->args; unsigned long *bitmap; unsigned int i, n = 0; int error = 0; @@ -93,13 +68,9 @@ static int aggr_parse(struct gpio_aggregator *aggr) if (!bitmap) return -ENOMEM; - for (name = get_arg(&args), offsets = get_arg(&args); name; - offsets = get_arg(&args)) { - if (IS_ERR(name)) { - pr_err("Cannot get GPIO specifier: %pe\n", name); - error = PTR_ERR(name); - goto free_bitmap; - } + args = next_arg(args, &name, &p); + while (*args) { + args = next_arg(args, &offsets, &p); p = get_options(offsets, 0, &error); if (error == 0 || *p) { @@ -125,7 +96,7 @@ static int aggr_parse(struct gpio_aggregator *aggr) goto free_bitmap; } - name = get_arg(&args); + args = next_arg(args, &name, &p); } if (!n) { diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c index de56c013a658..3b31f5e9bf40 100644 --- a/drivers/gpio/gpio-ich.c +++ b/drivers/gpio/gpio-ich.c @@ -5,13 +5,11 @@ * Copyright (C) 2010 Extreme Engineering Solutions. */ - #include <linux/bitops.h> #include <linux/gpio/driver.h> #include <linux/ioport.h> #include <linux/mfd/lpc_ich.h> #include <linux/module.h> -#include <linux/pci.h> #include <linux/platform_device.h> #define DRV_NAME "gpio_ich" diff --git a/drivers/gpio/gpio-it87.c b/drivers/gpio/gpio-it87.c index 8f1be34953ce..f332341fd4c8 100644 --- a/drivers/gpio/gpio-it87.c +++ b/drivers/gpio/gpio-it87.c @@ -125,14 +125,6 @@ static inline int superio_inw(int reg) return val; } -static inline void superio_outw(int val, int reg) -{ - outb(reg++, REG); - outb(val >> 8, VAL); - outb(reg, REG); - outb(val, VAL); -} - static inline void superio_set_mask(int mask, int reg) { u8 curr_val = superio_inb(reg); diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c index 28b757d34046..d7e73876a3b9 100644 --- a/drivers/gpio/gpio-mockup.c +++ b/drivers/gpio/gpio-mockup.c @@ -479,15 +479,10 @@ static struct platform_device *gpio_mockup_pdevs[GPIO_MOCKUP_MAX_GC]; static void gpio_mockup_unregister_pdevs(void) { - struct platform_device *pdev; int i; - for (i = 0; i < GPIO_MOCKUP_MAX_GC; i++) { - pdev = gpio_mockup_pdevs[i]; - - if (pdev) - platform_device_unregister(pdev); - } + for (i = 0; i < GPIO_MOCKUP_MAX_GC; i++) + platform_device_unregister(gpio_mockup_pdevs[i]); } static __init char **gpio_mockup_make_line_names(const char *label, diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c index 6dfca83bcd90..4b9157a69fca 100644 --- a/drivers/gpio/gpio-mpc8xxx.c +++ b/drivers/gpio/gpio-mpc8xxx.c @@ -9,6 +9,7 @@ * kind, whether express or implied. */ +#include <linux/acpi.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/spinlock.h> @@ -18,6 +19,8 @@ #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_platform.h> +#include <linux/property.h> +#include <linux/mod_devicetable.h> #include <linux/slab.h> #include <linux/irq.h> #include <linux/gpio/driver.h> @@ -303,8 +306,8 @@ static int mpc8xxx_probe(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; struct mpc8xxx_gpio_chip *mpc8xxx_gc; struct gpio_chip *gc; - const struct mpc8xxx_gpio_devtype *devtype = - of_device_get_match_data(&pdev->dev); + const struct mpc8xxx_gpio_devtype *devtype = NULL; + struct fwnode_handle *fwnode; int ret; mpc8xxx_gc = devm_kzalloc(&pdev->dev, sizeof(*mpc8xxx_gc), GFP_KERNEL); @@ -315,14 +318,14 @@ static int mpc8xxx_probe(struct platform_device *pdev) raw_spin_lock_init(&mpc8xxx_gc->lock); - mpc8xxx_gc->regs = of_iomap(np, 0); - if (!mpc8xxx_gc->regs) - return -ENOMEM; + mpc8xxx_gc->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(mpc8xxx_gc->regs)) + return PTR_ERR(mpc8xxx_gc->regs); gc = &mpc8xxx_gc->gc; gc->parent = &pdev->dev; - if (of_property_read_bool(np, "little-endian")) { + if (device_property_read_bool(&pdev->dev, "little-endian")) { ret = bgpio_init(gc, &pdev->dev, 4, mpc8xxx_gc->regs + GPIO_DAT, NULL, NULL, @@ -345,6 +348,7 @@ static int mpc8xxx_probe(struct platform_device *pdev) mpc8xxx_gc->direction_output = gc->direction_output; + devtype = device_get_match_data(&pdev->dev); if (!devtype) devtype = &mpc8xxx_gpio_devtype_default; @@ -369,24 +373,29 @@ static int mpc8xxx_probe(struct platform_device *pdev) * associated input enable must be set (GPIOxGPIE[IEn]=1) to propagate * the port value to the GPIO Data Register. */ + fwnode = dev_fwnode(&pdev->dev); if (of_device_is_compatible(np, "fsl,qoriq-gpio") || of_device_is_compatible(np, "fsl,ls1028a-gpio") || - of_device_is_compatible(np, "fsl,ls1088a-gpio")) + of_device_is_compatible(np, "fsl,ls1088a-gpio") || + is_acpi_node(fwnode)) gc->write_reg(mpc8xxx_gc->regs + GPIO_IBE, 0xffffffff); ret = gpiochip_add_data(gc, mpc8xxx_gc); if (ret) { - pr_err("%pOF: GPIO chip registration failed with status %d\n", - np, ret); + dev_err(&pdev->dev, + "GPIO chip registration failed with status %d\n", ret); goto err; } - mpc8xxx_gc->irqn = irq_of_parse_and_map(np, 0); + mpc8xxx_gc->irqn = platform_get_irq(pdev, 0); if (!mpc8xxx_gc->irqn) return 0; - mpc8xxx_gc->irq = irq_domain_add_linear(np, MPC8XXX_GPIO_PINS, - &mpc8xxx_gpio_irq_ops, mpc8xxx_gc); + mpc8xxx_gc->irq = irq_domain_create_linear(fwnode, + MPC8XXX_GPIO_PINS, + &mpc8xxx_gpio_irq_ops, + mpc8xxx_gc); + if (!mpc8xxx_gc->irq) return 0; @@ -399,8 +408,9 @@ static int mpc8xxx_probe(struct platform_device *pdev) IRQF_SHARED, "gpio-cascade", mpc8xxx_gc); if (ret) { - dev_err(&pdev->dev, "%s: failed to devm_request_irq(%d), ret = %d\n", - np->full_name, mpc8xxx_gc->irqn, ret); + dev_err(&pdev->dev, + "failed to devm_request_irq(%d), ret = %d\n", + mpc8xxx_gc->irqn, ret); goto err; } @@ -425,12 +435,21 @@ static int mpc8xxx_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_ACPI +static const struct acpi_device_id gpio_acpi_ids[] = { + {"NXP0031",}, + { } +}; +MODULE_DEVICE_TABLE(acpi, gpio_acpi_ids); +#endif + static struct platform_driver mpc8xxx_plat_driver = { .probe = mpc8xxx_probe, .remove = mpc8xxx_remove, .driver = { .name = "gpio-mpc8xxx", .of_match_table = mpc8xxx_gpio_ids, + .acpi_match_table = ACPI_PTR(gpio_acpi_ids), }, }; diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c index dfc0c1eb1b33..524b668eb1ac 100644 --- a/drivers/gpio/gpio-mxs.c +++ b/drivers/gpio/gpio-mxs.c @@ -60,11 +60,6 @@ static inline int is_imx23_gpio(struct mxs_gpio_port *port) return port->devid == IMX23_GPIO; } -static inline int is_imx28_gpio(struct mxs_gpio_port *port) -{ - return port->devid == IMX28_GPIO; -} - /* Note: This driver assumes 32 GPIOs are handled in one register */ static int mxs_gpio_set_irq_type(struct irq_data *d, unsigned int type) diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index 56152263ab38..ca23f72165ca 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c @@ -1373,15 +1373,14 @@ static int omap_gpio_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *node = dev->of_node; - const struct of_device_id *match; const struct omap_gpio_platform_data *pdata; struct gpio_bank *bank; struct irq_chip *irqc; int ret; - match = of_match_device(of_match_ptr(omap_gpio_match), dev); + pdata = device_get_match_data(dev); - pdata = match ? match->data : dev_get_platdata(dev); + pdata = pdata ?: dev_get_platdata(dev); if (!pdata) return -EINVAL; diff --git a/drivers/gpio/gpio-realtek-otto.c b/drivers/gpio/gpio-realtek-otto.c new file mode 100644 index 000000000000..cb64fb5a51aa --- /dev/null +++ b/drivers/gpio/gpio-realtek-otto.c @@ -0,0 +1,325 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/gpio/driver.h> +#include <linux/irq.h> +#include <linux/minmax.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/property.h> + +/* + * Total register block size is 0x1C for one bank of four ports (A, B, C, D). + * An optional second bank, with ports E, F, G, and H, may be present, starting + * at register offset 0x1C. + */ + +/* + * Pin select: (0) "normal", (1) "dedicate peripheral" + * Not used on RTL8380/RTL8390, peripheral selection is managed by control bits + * in the peripheral registers. + */ +#define REALTEK_GPIO_REG_CNR 0x00 +/* Clear bit (0) for input, set bit (1) for output */ +#define REALTEK_GPIO_REG_DIR 0x08 +#define REALTEK_GPIO_REG_DATA 0x0C +/* Read bit for IRQ status, write 1 to clear IRQ */ +#define REALTEK_GPIO_REG_ISR 0x10 +/* Two bits per GPIO in IMR registers */ +#define REALTEK_GPIO_REG_IMR 0x14 +#define REALTEK_GPIO_REG_IMR_AB 0x14 +#define REALTEK_GPIO_REG_IMR_CD 0x18 +#define REALTEK_GPIO_IMR_LINE_MASK GENMASK(1, 0) +#define REALTEK_GPIO_IRQ_EDGE_FALLING 1 +#define REALTEK_GPIO_IRQ_EDGE_RISING 2 +#define REALTEK_GPIO_IRQ_EDGE_BOTH 3 + +#define REALTEK_GPIO_MAX 32 +#define REALTEK_GPIO_PORTS_PER_BANK 4 + +/** + * realtek_gpio_ctrl - Realtek Otto GPIO driver data + * + * @gc: Associated gpio_chip instance + * @base: Base address of the register block for a GPIO bank + * @lock: Lock for accessing the IRQ registers and values + * @intr_mask: Mask for interrupts lines + * @intr_type: Interrupt type selection + * + * Because the interrupt mask register (IMR) combines the function of IRQ type + * selection and masking, two extra values are stored. @intr_mask is used to + * mask/unmask the interrupts for a GPIO port, and @intr_type is used to store + * the selected interrupt types. The logical AND of these values is written to + * IMR on changes. + */ +struct realtek_gpio_ctrl { + struct gpio_chip gc; + void __iomem *base; + raw_spinlock_t lock; + u16 intr_mask[REALTEK_GPIO_PORTS_PER_BANK]; + u16 intr_type[REALTEK_GPIO_PORTS_PER_BANK]; +}; + +/* Expand with more flags as devices with other quirks are added */ +enum realtek_gpio_flags { + /* + * Allow disabling interrupts, for cases where the port order is + * unknown. This may result in a port mismatch between ISR and IMR. + * An interrupt would appear to come from a different line than the + * line the IRQ handler was assigned to, causing uncaught interrupts. + */ + GPIO_INTERRUPTS_DISABLED = BIT(0), +}; + +static struct realtek_gpio_ctrl *irq_data_to_ctrl(struct irq_data *data) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(data); + + return container_of(gc, struct realtek_gpio_ctrl, gc); +} + +/* + * Normal port order register access + * + * Port information is stored with the first port at offset 0, followed by the + * second, etc. Most registers store one bit per GPIO and use a u8 value per + * port. The two interrupt mask registers store two bits per GPIO, so use u16 + * values. + */ +static void realtek_gpio_write_imr(struct realtek_gpio_ctrl *ctrl, + unsigned int port, u16 irq_type, u16 irq_mask) +{ + iowrite16(irq_type & irq_mask, ctrl->base + REALTEK_GPIO_REG_IMR + 2 * port); +} + +static void realtek_gpio_clear_isr(struct realtek_gpio_ctrl *ctrl, + unsigned int port, u8 mask) +{ + iowrite8(mask, ctrl->base + REALTEK_GPIO_REG_ISR + port); +} + +static u8 realtek_gpio_read_isr(struct realtek_gpio_ctrl *ctrl, unsigned int port) +{ + return ioread8(ctrl->base + REALTEK_GPIO_REG_ISR + port); +} + +/* Set the rising and falling edge mask bits for a GPIO port pin */ +static u16 realtek_gpio_imr_bits(unsigned int pin, u16 value) +{ + return (value & REALTEK_GPIO_IMR_LINE_MASK) << 2 * pin; +} + +static void realtek_gpio_irq_ack(struct irq_data *data) +{ + struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data); + irq_hw_number_t line = irqd_to_hwirq(data); + unsigned int port = line / 8; + unsigned int port_pin = line % 8; + + realtek_gpio_clear_isr(ctrl, port, BIT(port_pin)); +} + +static void realtek_gpio_irq_unmask(struct irq_data *data) +{ + struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data); + unsigned int line = irqd_to_hwirq(data); + unsigned int port = line / 8; + unsigned int port_pin = line % 8; + unsigned long flags; + u16 m; + + raw_spin_lock_irqsave(&ctrl->lock, flags); + m = ctrl->intr_mask[port]; + m |= realtek_gpio_imr_bits(port_pin, REALTEK_GPIO_IMR_LINE_MASK); + ctrl->intr_mask[port] = m; + realtek_gpio_write_imr(ctrl, port, ctrl->intr_type[port], m); + raw_spin_unlock_irqrestore(&ctrl->lock, flags); +} + +static void realtek_gpio_irq_mask(struct irq_data *data) +{ + struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data); + unsigned int line = irqd_to_hwirq(data); + unsigned int port = line / 8; + unsigned int port_pin = line % 8; + unsigned long flags; + u16 m; + + raw_spin_lock_irqsave(&ctrl->lock, flags); + m = ctrl->intr_mask[port]; + m &= ~realtek_gpio_imr_bits(port_pin, REALTEK_GPIO_IMR_LINE_MASK); + ctrl->intr_mask[port] = m; + realtek_gpio_write_imr(ctrl, port, ctrl->intr_type[port], m); + raw_spin_unlock_irqrestore(&ctrl->lock, flags); +} + +static int realtek_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type) +{ + struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data); + unsigned int line = irqd_to_hwirq(data); + unsigned int port = line / 8; + unsigned int port_pin = line % 8; + unsigned long flags; + u16 type, t; + + switch (flow_type & IRQ_TYPE_SENSE_MASK) { + case IRQ_TYPE_EDGE_FALLING: + type = REALTEK_GPIO_IRQ_EDGE_FALLING; + break; + case IRQ_TYPE_EDGE_RISING: + type = REALTEK_GPIO_IRQ_EDGE_RISING; + break; + case IRQ_TYPE_EDGE_BOTH: + type = REALTEK_GPIO_IRQ_EDGE_BOTH; + break; + default: + return -EINVAL; + } + + irq_set_handler_locked(data, handle_edge_irq); + + raw_spin_lock_irqsave(&ctrl->lock, flags); + t = ctrl->intr_type[port]; + t &= ~realtek_gpio_imr_bits(port_pin, REALTEK_GPIO_IMR_LINE_MASK); + t |= realtek_gpio_imr_bits(port_pin, type); + ctrl->intr_type[port] = t; + realtek_gpio_write_imr(ctrl, port, t, ctrl->intr_mask[port]); + raw_spin_unlock_irqrestore(&ctrl->lock, flags); + + return 0; +} + +static void realtek_gpio_irq_handler(struct irq_desc *desc) +{ + struct gpio_chip *gc = irq_desc_get_handler_data(desc); + struct realtek_gpio_ctrl *ctrl = gpiochip_get_data(gc); + struct irq_chip *irq_chip = irq_desc_get_chip(desc); + unsigned int lines_done; + unsigned int port_pin_count; + unsigned int irq; + unsigned long status; + int offset; + + chained_irq_enter(irq_chip, desc); + + for (lines_done = 0; lines_done < gc->ngpio; lines_done += 8) { + status = realtek_gpio_read_isr(ctrl, lines_done / 8); + port_pin_count = min(gc->ngpio - lines_done, 8U); + for_each_set_bit(offset, &status, port_pin_count) { + irq = irq_find_mapping(gc->irq.domain, offset); + generic_handle_irq(irq); + } + } + + chained_irq_exit(irq_chip, desc); +} + +static int realtek_gpio_irq_init(struct gpio_chip *gc) +{ + struct realtek_gpio_ctrl *ctrl = gpiochip_get_data(gc); + unsigned int port; + + for (port = 0; (port * 8) < gc->ngpio; port++) { + realtek_gpio_write_imr(ctrl, port, 0, 0); + realtek_gpio_clear_isr(ctrl, port, GENMASK(7, 0)); + } + + return 0; +} + +static struct irq_chip realtek_gpio_irq_chip = { + .name = "realtek-otto-gpio", + .irq_ack = realtek_gpio_irq_ack, + .irq_mask = realtek_gpio_irq_mask, + .irq_unmask = realtek_gpio_irq_unmask, + .irq_set_type = realtek_gpio_irq_set_type, +}; + +static const struct of_device_id realtek_gpio_of_match[] = { + { + .compatible = "realtek,otto-gpio", + .data = (void *)GPIO_INTERRUPTS_DISABLED, + }, + { + .compatible = "realtek,rtl8380-gpio", + }, + { + .compatible = "realtek,rtl8390-gpio", + }, + {} +}; +MODULE_DEVICE_TABLE(of, realtek_gpio_of_match); + +static int realtek_gpio_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + unsigned int dev_flags; + struct gpio_irq_chip *girq; + struct realtek_gpio_ctrl *ctrl; + u32 ngpios; + int err, irq; + + ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL); + if (!ctrl) + return -ENOMEM; + + dev_flags = (unsigned int) device_get_match_data(dev); + + ngpios = REALTEK_GPIO_MAX; + device_property_read_u32(dev, "ngpios", &ngpios); + + if (ngpios > REALTEK_GPIO_MAX) { + dev_err(&pdev->dev, "invalid ngpios (max. %d)\n", + REALTEK_GPIO_MAX); + return -EINVAL; + } + + ctrl->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(ctrl->base)) + return PTR_ERR(ctrl->base); + + raw_spin_lock_init(&ctrl->lock); + + err = bgpio_init(&ctrl->gc, dev, 4, + ctrl->base + REALTEK_GPIO_REG_DATA, NULL, NULL, + ctrl->base + REALTEK_GPIO_REG_DIR, NULL, + BGPIOF_BIG_ENDIAN_BYTE_ORDER); + if (err) { + dev_err(dev, "unable to init generic GPIO"); + return err; + } + + ctrl->gc.ngpio = ngpios; + ctrl->gc.owner = THIS_MODULE; + + irq = platform_get_irq_optional(pdev, 0); + if (!(dev_flags & GPIO_INTERRUPTS_DISABLED) && irq > 0) { + girq = &ctrl->gc.irq; + girq->chip = &realtek_gpio_irq_chip; + girq->default_type = IRQ_TYPE_NONE; + girq->handler = handle_bad_irq; + girq->parent_handler = realtek_gpio_irq_handler; + girq->num_parents = 1; + girq->parents = devm_kcalloc(dev, girq->num_parents, + sizeof(*girq->parents), GFP_KERNEL); + if (!girq->parents) + return -ENOMEM; + girq->parents[0] = irq; + girq->init_hw = realtek_gpio_irq_init; + } + + return devm_gpiochip_add_data(dev, &ctrl->gc, ctrl); +} + +static struct platform_driver realtek_gpio_driver = { + .driver = { + .name = "realtek-otto-gpio", + .of_match_table = realtek_gpio_of_match, + }, + .probe = realtek_gpio_probe, +}; +module_platform_driver(realtek_gpio_driver); + +MODULE_DESCRIPTION("Realtek Otto GPIO support"); +MODULE_AUTHOR("Sander Vanheule <[email protected]>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c index 3a1b1adb08c6..a6f0421d6e50 100644 --- a/drivers/gpio/gpio-sch.c +++ b/drivers/gpio/gpio-sch.c @@ -7,33 +7,55 @@ */ #include <linux/acpi.h> +#include <linux/bitops.h> #include <linux/errno.h> #include <linux/gpio/driver.h> #include <linux/io.h> +#include <linux/irq.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci_ids.h> #include <linux/platform_device.h> +#include <linux/types.h> #define GEN 0x00 #define GIO 0x04 #define GLV 0x08 +#define GTPE 0x0c +#define GTNE 0x10 +#define GGPE 0x14 +#define GSMI 0x18 +#define GTS 0x1c + +#define CORE_BANK_OFFSET 0x00 +#define RESUME_BANK_OFFSET 0x20 + +/* + * iLB datasheet describes GPE0BLK registers, in particular GPE0E.GPIO bit. + * Document Number: 328195-001 + */ +#define GPE0E_GPIO 14 struct sch_gpio { struct gpio_chip chip; + struct irq_chip irqchip; spinlock_t lock; unsigned short iobase; unsigned short resume_base; + + /* GPE handling */ + u32 gpe; + acpi_gpe_handler gpe_handler; }; static unsigned int sch_gpio_offset(struct sch_gpio *sch, unsigned int gpio, unsigned int reg) { - unsigned int base = 0; + unsigned int base = CORE_BANK_OFFSET; if (gpio >= sch->resume_base) { gpio -= sch->resume_base; - base += 0x20; + base = RESUME_BANK_OFFSET; } return base + reg + gpio / 8; @@ -79,10 +101,11 @@ static void sch_gpio_reg_set(struct sch_gpio *sch, unsigned int gpio, unsigned i static int sch_gpio_direction_in(struct gpio_chip *gc, unsigned int gpio_num) { struct sch_gpio *sch = gpiochip_get_data(gc); + unsigned long flags; - spin_lock(&sch->lock); + spin_lock_irqsave(&sch->lock, flags); sch_gpio_reg_set(sch, gpio_num, GIO, 1); - spin_unlock(&sch->lock); + spin_unlock_irqrestore(&sch->lock, flags); return 0; } @@ -96,20 +119,22 @@ static int sch_gpio_get(struct gpio_chip *gc, unsigned int gpio_num) static void sch_gpio_set(struct gpio_chip *gc, unsigned int gpio_num, int val) { struct sch_gpio *sch = gpiochip_get_data(gc); + unsigned long flags; - spin_lock(&sch->lock); + spin_lock_irqsave(&sch->lock, flags); sch_gpio_reg_set(sch, gpio_num, GLV, val); - spin_unlock(&sch->lock); + spin_unlock_irqrestore(&sch->lock, flags); } static int sch_gpio_direction_out(struct gpio_chip *gc, unsigned int gpio_num, int val) { struct sch_gpio *sch = gpiochip_get_data(gc); + unsigned long flags; - spin_lock(&sch->lock); + spin_lock_irqsave(&sch->lock, flags); sch_gpio_reg_set(sch, gpio_num, GIO, 0); - spin_unlock(&sch->lock); + spin_unlock_irqrestore(&sch->lock, flags); /* * according to the datasheet, writing to the level register has no @@ -144,10 +169,145 @@ static const struct gpio_chip sch_gpio_chip = { .get_direction = sch_gpio_get_direction, }; +static int sch_irq_type(struct irq_data *d, unsigned int type) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct sch_gpio *sch = gpiochip_get_data(gc); + irq_hw_number_t gpio_num = irqd_to_hwirq(d); + unsigned long flags; + int rising, falling; + + switch (type & IRQ_TYPE_SENSE_MASK) { + case IRQ_TYPE_EDGE_RISING: + rising = 1; + falling = 0; + break; + case IRQ_TYPE_EDGE_FALLING: + rising = 0; + falling = 1; + break; + case IRQ_TYPE_EDGE_BOTH: + rising = 1; + falling = 1; + break; + default: + return -EINVAL; + } + + spin_lock_irqsave(&sch->lock, flags); + + sch_gpio_reg_set(sch, gpio_num, GTPE, rising); + sch_gpio_reg_set(sch, gpio_num, GTNE, falling); + + irq_set_handler_locked(d, handle_edge_irq); + + spin_unlock_irqrestore(&sch->lock, flags); + + return 0; +} + +static void sch_irq_ack(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct sch_gpio *sch = gpiochip_get_data(gc); + irq_hw_number_t gpio_num = irqd_to_hwirq(d); + unsigned long flags; + + spin_lock_irqsave(&sch->lock, flags); + sch_gpio_reg_set(sch, gpio_num, GTS, 1); + spin_unlock_irqrestore(&sch->lock, flags); +} + +static void sch_irq_mask_unmask(struct irq_data *d, int val) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct sch_gpio *sch = gpiochip_get_data(gc); + irq_hw_number_t gpio_num = irqd_to_hwirq(d); + unsigned long flags; + + spin_lock_irqsave(&sch->lock, flags); + sch_gpio_reg_set(sch, gpio_num, GGPE, val); + spin_unlock_irqrestore(&sch->lock, flags); +} + +static void sch_irq_mask(struct irq_data *d) +{ + sch_irq_mask_unmask(d, 0); +} + +static void sch_irq_unmask(struct irq_data *d) +{ + sch_irq_mask_unmask(d, 1); +} + +static u32 sch_gpio_gpe_handler(acpi_handle gpe_device, u32 gpe, void *context) +{ + struct sch_gpio *sch = context; + struct gpio_chip *gc = &sch->chip; + unsigned long core_status, resume_status; + unsigned long pending; + unsigned long flags; + int offset; + u32 ret; + + spin_lock_irqsave(&sch->lock, flags); + + core_status = inl(sch->iobase + CORE_BANK_OFFSET + GTS); + resume_status = inl(sch->iobase + RESUME_BANK_OFFSET + GTS); + + spin_unlock_irqrestore(&sch->lock, flags); + + pending = (resume_status << sch->resume_base) | core_status; + for_each_set_bit(offset, &pending, sch->chip.ngpio) + generic_handle_irq(irq_find_mapping(gc->irq.domain, offset)); + + /* Set returning value depending on whether we handled an interrupt */ + ret = pending ? ACPI_INTERRUPT_HANDLED : ACPI_INTERRUPT_NOT_HANDLED; + + /* Acknowledge GPE to ACPICA */ + ret |= ACPI_REENABLE_GPE; + + return ret; +} + +static void sch_gpio_remove_gpe_handler(void *data) +{ + struct sch_gpio *sch = data; + + acpi_disable_gpe(NULL, sch->gpe); + acpi_remove_gpe_handler(NULL, sch->gpe, sch->gpe_handler); +} + +static int sch_gpio_install_gpe_handler(struct sch_gpio *sch) +{ + struct device *dev = sch->chip.parent; + acpi_status status; + + status = acpi_install_gpe_handler(NULL, sch->gpe, ACPI_GPE_LEVEL_TRIGGERED, + sch->gpe_handler, sch); + if (ACPI_FAILURE(status)) { + dev_err(dev, "Failed to install GPE handler for %u: %s\n", + sch->gpe, acpi_format_exception(status)); + return -ENODEV; + } + + status = acpi_enable_gpe(NULL, sch->gpe); + if (ACPI_FAILURE(status)) { + dev_err(dev, "Failed to enable GPE handler for %u: %s\n", + sch->gpe, acpi_format_exception(status)); + acpi_remove_gpe_handler(NULL, sch->gpe, sch->gpe_handler); + return -ENODEV; + } + + return devm_add_action_or_reset(dev, sch_gpio_remove_gpe_handler, sch); +} + static int sch_gpio_probe(struct platform_device *pdev) { + struct gpio_irq_chip *girq; struct sch_gpio *sch; struct resource *res; + int ret; sch = devm_kzalloc(&pdev->dev, sizeof(*sch), GFP_KERNEL); if (!sch) @@ -207,6 +367,28 @@ static int sch_gpio_probe(struct platform_device *pdev) platform_set_drvdata(pdev, sch); + sch->irqchip.name = "sch_gpio"; + sch->irqchip.irq_ack = sch_irq_ack; + sch->irqchip.irq_mask = sch_irq_mask; + sch->irqchip.irq_unmask = sch_irq_unmask; + sch->irqchip.irq_set_type = sch_irq_type; + + girq = &sch->chip.irq; + girq->chip = &sch->irqchip; + girq->num_parents = 0; + girq->parents = NULL; + girq->parent_handler = NULL; + girq->default_type = IRQ_TYPE_NONE; + girq->handler = handle_bad_irq; + + /* GPE setup is optional */ + sch->gpe = GPE0E_GPIO; + sch->gpe_handler = sch_gpio_gpe_handler; + + ret = sch_gpio_install_gpe_handler(sch); + if (ret) + dev_warn(&pdev->dev, "Can't setup GPE, no IRQ support\n"); + return devm_gpiochip_add_data(&pdev->dev, &sch->chip, sch); } diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index 1aacd2a5a1fd..3ef22a3c104d 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -1291,6 +1291,13 @@ void acpi_gpiochip_remove(struct gpio_chip *chip) kfree(acpi_gpio); } +void acpi_gpio_dev_init(struct gpio_chip *gc, struct gpio_device *gdev) +{ + /* Set default fwnode to parent's one if present */ + if (gc->parent) + ACPI_COMPANION_SET(&gdev->dev, ACPI_COMPANION(gc->parent)); +} + static int acpi_gpio_package_count(const union acpi_object *obj) { const union acpi_object *element = obj->package.elements; @@ -1440,6 +1447,20 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = { }, { /* + * The Dell Venue 10 Pro 5055, with Bay Trail SoC + TI PMIC uses an + * external embedded-controller connected via I2C + an ACPI GPIO + * event handler on INT33FFC:02 pin 12, causing spurious wakeups. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Venue 10 Pro 5055"), + }, + .driver_data = &(struct acpi_gpiolib_dmi_quirk) { + .ignore_wake = "INT33FC:02@12", + }, + }, + { + /* * HP X2 10 models with Cherry Trail SoC + TI PMIC use an * external embedded-controller connected via I2C + an ACPI GPIO * event handler on INT33FF:01 pin 0, causing spurious wakeups. diff --git a/drivers/gpio/gpiolib-acpi.h b/drivers/gpio/gpiolib-acpi.h index e2edb632b2cc..e476558d9471 100644 --- a/drivers/gpio/gpiolib-acpi.h +++ b/drivers/gpio/gpiolib-acpi.h @@ -36,6 +36,8 @@ struct acpi_gpio_info { void acpi_gpiochip_add(struct gpio_chip *chip); void acpi_gpiochip_remove(struct gpio_chip *chip); +void acpi_gpio_dev_init(struct gpio_chip *gc, struct gpio_device *gdev); + void acpi_gpiochip_request_interrupts(struct gpio_chip *chip); void acpi_gpiochip_free_interrupts(struct gpio_chip *chip); @@ -58,6 +60,8 @@ int acpi_gpio_count(struct device *dev, const char *con_id); static inline void acpi_gpiochip_add(struct gpio_chip *chip) { } static inline void acpi_gpiochip_remove(struct gpio_chip *chip) { } +static inline void acpi_gpio_dev_init(struct gpio_chip *gc, struct gpio_device *gdev) { } + static inline void acpi_gpiochip_request_interrupts(struct gpio_chip *chip) { } diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index baf0153b7bca..bbcc7c073f63 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -1042,11 +1042,13 @@ void of_gpiochip_remove(struct gpio_chip *chip) void of_gpio_dev_init(struct gpio_chip *gc, struct gpio_device *gdev) { + /* Set default OF node to parent's one if present */ + if (gc->parent) + gdev->dev.of_node = gc->parent->of_node; + /* If the gpiochip has an assigned OF node this takes precedence */ if (gc->of_node) gdev->dev.of_node = gc->of_node; else gc->of_node = gdev->dev.of_node; - if (gdev->dev.of_node) - gdev->dev.fwnode = of_fwnode_handle(gdev->dev.of_node); } diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 6367646dce83..1427c1be749b 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -586,14 +586,12 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, if (!gdev) return -ENOMEM; gdev->dev.bus = &gpio_bus_type; + gdev->dev.parent = gc->parent; gdev->chip = gc; gc->gpiodev = gdev; - if (gc->parent) { - gdev->dev.parent = gc->parent; - gdev->dev.of_node = gc->parent->of_node; - } of_gpio_dev_init(gc, gdev); + acpi_gpio_dev_init(gc, gdev); /* * Assign fwnode depending on the result of the previous calls, @@ -1465,9 +1463,8 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc, struct lock_class_key *lock_key, struct lock_class_key *request_key) { + struct fwnode_handle *fwnode = dev_fwnode(&gc->gpiodev->dev); struct irq_chip *irqchip = gc->irq.chip; - const struct irq_domain_ops *ops = NULL; - struct device_node *np; unsigned int type; unsigned int i; @@ -1479,7 +1476,6 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc, return -EINVAL; } - np = gc->gpiodev->dev.of_node; type = gc->irq.default_type; /* @@ -1487,15 +1483,9 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc, * used to configure the interrupts, as you may end up with * conflicting triggers. Tell the user, and reset to NONE. */ - if (WARN(np && type != IRQ_TYPE_NONE, - "%s: Ignoring %u default trigger\n", np->full_name, type)) - type = IRQ_TYPE_NONE; - - if (has_acpi_companion(gc->parent) && type != IRQ_TYPE_NONE) { - acpi_handle_warn(ACPI_HANDLE(gc->parent), - "Ignoring %u default trigger\n", type); + if (WARN(fwnode && type != IRQ_TYPE_NONE, + "%pfw: Ignoring %u default trigger\n", fwnode, type)) type = IRQ_TYPE_NONE; - } if (gc->to_irq) chip_warn(gc, "to_irq is redefined in %s and you shouldn't rely on it\n", __func__); @@ -1512,15 +1502,11 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc, return ret; } else { /* Some drivers provide custom irqdomain ops */ - if (gc->irq.domain_ops) - ops = gc->irq.domain_ops; - - if (!ops) - ops = &gpiochip_domain_ops; - gc->irq.domain = irq_domain_add_simple(np, + gc->irq.domain = irq_domain_create_simple(fwnode, gc->ngpio, gc->irq.first, - ops, gc); + gc->irq.domain_ops ?: &gpiochip_domain_ops, + gc); if (!gc->irq.domain) return -EINVAL; } @@ -3684,11 +3670,12 @@ EXPORT_SYMBOL_GPL(fwnode_gpiod_get_index); */ int gpiod_count(struct device *dev, const char *con_id) { + const struct fwnode_handle *fwnode = dev ? dev_fwnode(dev) : NULL; int count = -ENOENT; - if (IS_ENABLED(CONFIG_OF) && dev && dev->of_node) + if (is_of_node(fwnode)) count = of_gpio_get_count(dev, con_id); - else if (IS_ENABLED(CONFIG_ACPI) && dev && ACPI_HANDLE(dev)) + else if (is_acpi_node(fwnode)) count = acpi_gpio_count(dev, con_id); if (count < 0) @@ -3826,18 +3813,17 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev, int ret; /* Maybe we have a device name, maybe not */ const char *devname = dev ? dev_name(dev) : "?"; + const struct fwnode_handle *fwnode = dev ? dev_fwnode(dev) : NULL; dev_dbg(dev, "GPIO lookup for consumer %s\n", con_id); - if (dev) { - /* Using device tree? */ - if (IS_ENABLED(CONFIG_OF) && dev->of_node) { - dev_dbg(dev, "using device tree for GPIO lookup\n"); - desc = of_find_gpio(dev, con_id, idx, &lookupflags); - } else if (ACPI_COMPANION(dev)) { - dev_dbg(dev, "using ACPI for GPIO lookup\n"); - desc = acpi_find_gpio(dev, con_id, idx, &flags, &lookupflags); - } + /* Using device tree? */ + if (is_of_node(fwnode)) { + dev_dbg(dev, "using device tree for GPIO lookup\n"); + desc = of_find_gpio(dev, con_id, idx, &lookupflags); + } else if (is_acpi_node(fwnode)) { + dev_dbg(dev, "using ACPI for GPIO lookup\n"); + desc = acpi_find_gpio(dev, con_id, idx, &flags, &lookupflags); } /* @@ -3921,9 +3907,6 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode, struct gpio_desc *desc = ERR_PTR(-ENODEV); int ret; - if (!fwnode) - return ERR_PTR(-EINVAL); - if (is_of_node(fwnode)) { desc = gpiod_get_from_of_node(to_of_node(fwnode), propname, index, @@ -3939,7 +3922,8 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode, acpi_gpio_update_gpiod_flags(&dflags, &info); acpi_gpio_update_gpiod_lookup_flags(&lflags, &info); - } + } else + return ERR_PTR(-EINVAL); /* Currently only ACPI takes this path */ ret = gpiod_request(desc, label); @@ -4220,11 +4204,13 @@ EXPORT_SYMBOL_GPL(gpiod_put_array); static int gpio_bus_match(struct device *dev, struct device_driver *drv) { + struct fwnode_handle *fwnode = dev_fwnode(dev); + /* * Only match if the fwnode doesn't already have a proper struct device * created for it. */ - if (dev->fwnode && dev->fwnode->dev != dev) + if (fwnode && fwnode->dev != dev) return 0; return 1; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c index 21cc40897ca8..ce6b664b10aa 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c @@ -42,7 +42,7 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) max_order = MAX_ORDER; #ifdef CONFIG_SWIOTLB - if (swiotlb_nr_tbl()) { + if (is_swiotlb_active()) { unsigned int max_segment; max_segment = swiotlb_max_segment(); diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index b81ae90b8449..e8b506a6685b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -321,7 +321,7 @@ nouveau_ttm_init(struct nouveau_drm *drm) } #if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86) - need_swiotlb = !!swiotlb_nr_tbl(); + need_swiotlb = is_swiotlb_active(); #endif ret = ttm_device_init(&drm->ttm.bdev, &nouveau_bo_driver, drm->dev->dev, diff --git a/drivers/hwspinlock/Kconfig b/drivers/hwspinlock/Kconfig index 32cd26352f38..53e13476e831 100644 --- a/drivers/hwspinlock/Kconfig +++ b/drivers/hwspinlock/Kconfig @@ -28,17 +28,6 @@ config HWSPINLOCK_QCOM If unsure, say N. -config HWSPINLOCK_SIRF - tristate "SIRF Hardware Spinlock device" - depends on ARCH_SIRF || COMPILE_TEST - help - Say y here to support the SIRF Hardware Spinlock device, which - provides a synchronisation mechanism for the various processors - on the SoC. - - It's safe to say n here if you're not interested in SIRF hardware - spinlock or just want a bare minimum kernel. - config HWSPINLOCK_SPRD tristate "SPRD Hardware Spinlock device" depends on ARCH_SPRD || COMPILE_TEST diff --git a/drivers/hwspinlock/Makefile b/drivers/hwspinlock/Makefile index ed053e3f02be..1f8dd6f5814f 100644 --- a/drivers/hwspinlock/Makefile +++ b/drivers/hwspinlock/Makefile @@ -6,7 +6,6 @@ obj-$(CONFIG_HWSPINLOCK) += hwspinlock_core.o obj-$(CONFIG_HWSPINLOCK_OMAP) += omap_hwspinlock.o obj-$(CONFIG_HWSPINLOCK_QCOM) += qcom_hwspinlock.o -obj-$(CONFIG_HWSPINLOCK_SIRF) += sirf_hwspinlock.o obj-$(CONFIG_HWSPINLOCK_SPRD) += sprd_hwspinlock.o obj-$(CONFIG_HWSPINLOCK_STM32) += stm32_hwspinlock.o obj-$(CONFIG_HSEM_U8500) += u8500_hsem.o diff --git a/drivers/hwspinlock/sirf_hwspinlock.c b/drivers/hwspinlock/sirf_hwspinlock.c deleted file mode 100644 index a3f77120bad7..000000000000 --- a/drivers/hwspinlock/sirf_hwspinlock.c +++ /dev/null @@ -1,105 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * SIRF hardware spinlock driver - * - * Copyright (c) 2015 Cambridge Silicon Radio Limited, a CSR plc group company. - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/device.h> -#include <linux/io.h> -#include <linux/slab.h> -#include <linux/spinlock.h> -#include <linux/hwspinlock.h> -#include <linux/platform_device.h> -#include <linux/of.h> -#include <linux/of_address.h> - -#include "hwspinlock_internal.h" - -struct sirf_hwspinlock { - void __iomem *io_base; - struct hwspinlock_device bank; -}; - -/* Number of Hardware Spinlocks*/ -#define HW_SPINLOCK_NUMBER 30 - -/* Hardware spinlock register offsets */ -#define HW_SPINLOCK_BASE 0x404 -#define HW_SPINLOCK_OFFSET(x) (HW_SPINLOCK_BASE + 0x4 * (x)) - -static int sirf_hwspinlock_trylock(struct hwspinlock *lock) -{ - void __iomem *lock_addr = lock->priv; - - /* attempt to acquire the lock by reading value == 1 from it */ - return !!readl(lock_addr); -} - -static void sirf_hwspinlock_unlock(struct hwspinlock *lock) -{ - void __iomem *lock_addr = lock->priv; - - /* release the lock by writing 0 to it */ - writel(0, lock_addr); -} - -static const struct hwspinlock_ops sirf_hwspinlock_ops = { - .trylock = sirf_hwspinlock_trylock, - .unlock = sirf_hwspinlock_unlock, -}; - -static int sirf_hwspinlock_probe(struct platform_device *pdev) -{ - struct sirf_hwspinlock *hwspin; - struct hwspinlock *hwlock; - int idx; - - if (!pdev->dev.of_node) - return -ENODEV; - - hwspin = devm_kzalloc(&pdev->dev, - struct_size(hwspin, bank.lock, - HW_SPINLOCK_NUMBER), - GFP_KERNEL); - if (!hwspin) - return -ENOMEM; - - /* retrieve io base */ - hwspin->io_base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(hwspin->io_base)) - return PTR_ERR(hwspin->io_base); - - for (idx = 0; idx < HW_SPINLOCK_NUMBER; idx++) { - hwlock = &hwspin->bank.lock[idx]; - hwlock->priv = hwspin->io_base + HW_SPINLOCK_OFFSET(idx); - } - - platform_set_drvdata(pdev, hwspin); - - return devm_hwspin_lock_register(&pdev->dev, &hwspin->bank, - &sirf_hwspinlock_ops, 0, - HW_SPINLOCK_NUMBER); -} - -static const struct of_device_id sirf_hwpinlock_ids[] = { - { .compatible = "sirf,hwspinlock", }, - {}, -}; -MODULE_DEVICE_TABLE(of, sirf_hwpinlock_ids); - -static struct platform_driver sirf_hwspinlock_driver = { - .probe = sirf_hwspinlock_probe, - .driver = { - .name = "atlas7_hwspinlock", - .of_match_table = sirf_hwpinlock_ids, - }, -}; - -module_platform_driver(sirf_hwspinlock_driver); - -MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("SIRF Hardware spinlock driver"); -MODULE_AUTHOR("Wei Chen <[email protected]>"); diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index efaf5eab40a1..7bcdd1205535 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -496,8 +496,6 @@ static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr, unsigned long attrs) { struct iommu_domain *domain = iommu_get_dma_domain(dev); - struct iommu_dma_cookie *cookie = domain->iova_cookie; - struct iova_domain *iovad = &cookie->iovad; phys_addr_t phys; phys = iommu_iova_to_phys(domain, dma_addr); @@ -507,8 +505,7 @@ static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr, __iommu_dma_unmap(dev, dma_addr, size); if (unlikely(is_swiotlb_buffer(phys))) - swiotlb_tbl_unmap_single(dev, phys, size, - iova_align(iovad, size), dir, attrs); + swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs); } static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, @@ -578,10 +575,8 @@ static dma_addr_t __iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys, } iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask); - if ((iova == DMA_MAPPING_ERROR) && is_swiotlb_buffer(phys)) - swiotlb_tbl_unmap_single(dev, phys, org_size, - aligned_size, dir, attrs); - + if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(phys)) + swiotlb_tbl_unmap_single(dev, phys, org_size, dir, attrs); return iova; } @@ -647,23 +642,12 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev, return pages; } -/** - * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space - * @dev: Device to allocate memory for. Must be a real device - * attached to an iommu_dma_domain - * @size: Size of buffer in bytes - * @dma_handle: Out argument for allocated DMA handle - * @gfp: Allocation flags - * @prot: pgprot_t to use for the remapped mapping - * @attrs: DMA attributes for this allocation - * - * If @size is less than PAGE_SIZE, then a full CPU page will be allocated, +/* + * If size is less than PAGE_SIZE, then a full CPU page will be allocated, * but an IOMMU which supports smaller pages might not map the whole thing. - * - * Return: Mapped virtual address, or NULL on failure. */ -static void *iommu_dma_alloc_remap(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot, +static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev, + size_t size, struct sg_table *sgt, gfp_t gfp, pgprot_t prot, unsigned long attrs) { struct iommu_domain *domain = iommu_get_dma_domain(dev); @@ -673,11 +657,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size, int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; struct page **pages; - struct sg_table sgt; dma_addr_t iova; - void *vaddr; - - *dma_handle = DMA_MAPPING_ERROR; if (static_branch_unlikely(&iommu_deferred_attach_enabled) && iommu_deferred_attach(dev, domain)) @@ -704,41 +684,91 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size, if (!iova) goto out_free_pages; - if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL)) + if (sg_alloc_table_from_pages(sgt, pages, count, 0, size, GFP_KERNEL)) goto out_free_iova; if (!(ioprot & IOMMU_CACHE)) { struct scatterlist *sg; int i; - for_each_sg(sgt.sgl, sg, sgt.orig_nents, i) + for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) arch_dma_prep_coherent(sg_page(sg), sg->length); } - if (iommu_map_sg_atomic(domain, iova, sgt.sgl, sgt.orig_nents, ioprot) + if (iommu_map_sg_atomic(domain, iova, sgt->sgl, sgt->orig_nents, ioprot) < size) goto out_free_sg; + sgt->sgl->dma_address = iova; + sgt->sgl->dma_length = size; + return pages; + +out_free_sg: + sg_free_table(sgt); +out_free_iova: + iommu_dma_free_iova(cookie, iova, size, NULL); +out_free_pages: + __iommu_dma_free_pages(pages, count); + return NULL; +} + +static void *iommu_dma_alloc_remap(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot, + unsigned long attrs) +{ + struct page **pages; + struct sg_table sgt; + void *vaddr; + + pages = __iommu_dma_alloc_noncontiguous(dev, size, &sgt, gfp, prot, + attrs); + if (!pages) + return NULL; + *dma_handle = sgt.sgl->dma_address; + sg_free_table(&sgt); vaddr = dma_common_pages_remap(pages, size, prot, __builtin_return_address(0)); if (!vaddr) goto out_unmap; - - *dma_handle = iova; - sg_free_table(&sgt); return vaddr; out_unmap: - __iommu_dma_unmap(dev, iova, size); -out_free_sg: - sg_free_table(&sgt); -out_free_iova: - iommu_dma_free_iova(cookie, iova, size, NULL); -out_free_pages: - __iommu_dma_free_pages(pages, count); + __iommu_dma_unmap(dev, *dma_handle, size); + __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT); return NULL; } +#ifdef CONFIG_DMA_REMAP +static struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev, + size_t size, enum dma_data_direction dir, gfp_t gfp, + unsigned long attrs) +{ + struct dma_sgt_handle *sh; + + sh = kmalloc(sizeof(*sh), gfp); + if (!sh) + return NULL; + + sh->pages = __iommu_dma_alloc_noncontiguous(dev, size, &sh->sgt, gfp, + PAGE_KERNEL, attrs); + if (!sh->pages) { + kfree(sh); + return NULL; + } + return &sh->sgt; +} + +static void iommu_dma_free_noncontiguous(struct device *dev, size_t size, + struct sg_table *sgt, enum dma_data_direction dir) +{ + struct dma_sgt_handle *sh = sgt_handle(sgt); + + __iommu_dma_unmap(dev, sgt->sgl->dma_address, size); + __iommu_dma_free_pages(sh->pages, PAGE_ALIGN(size) >> PAGE_SHIFT); + sg_free_table(&sh->sgt); +} +#endif /* CONFIG_DMA_REMAP */ + static void iommu_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) { @@ -752,7 +782,7 @@ static void iommu_dma_sync_single_for_cpu(struct device *dev, arch_sync_dma_for_cpu(phys, size, dir); if (is_swiotlb_buffer(phys)) - swiotlb_tbl_sync_single(dev, phys, size, dir, SYNC_FOR_CPU); + swiotlb_sync_single_for_cpu(dev, phys, size, dir); } static void iommu_dma_sync_single_for_device(struct device *dev, @@ -765,7 +795,7 @@ static void iommu_dma_sync_single_for_device(struct device *dev, phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); if (is_swiotlb_buffer(phys)) - swiotlb_tbl_sync_single(dev, phys, size, dir, SYNC_FOR_DEVICE); + swiotlb_sync_single_for_device(dev, phys, size, dir); if (!dev_is_dma_coherent(dev)) arch_sync_dma_for_device(phys, size, dir); @@ -786,8 +816,8 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev, arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir); if (is_swiotlb_buffer(sg_phys(sg))) - swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length, - dir, SYNC_FOR_CPU); + swiotlb_sync_single_for_cpu(dev, sg_phys(sg), + sg->length, dir); } } @@ -803,8 +833,8 @@ static void iommu_dma_sync_sg_for_device(struct device *dev, for_each_sg(sgl, sg, nelems, i) { if (is_swiotlb_buffer(sg_phys(sg))) - swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length, - dir, SYNC_FOR_DEVICE); + swiotlb_sync_single_for_device(dev, sg_phys(sg), + sg->length, dir); if (!dev_is_dma_coherent(dev)) arch_sync_dma_for_device(sg_phys(sg), sg->length, dir); @@ -1255,6 +1285,10 @@ static const struct dma_map_ops iommu_dma_ops = { .free = iommu_dma_free, .alloc_pages = dma_common_alloc_pages, .free_pages = dma_common_free_pages, +#ifdef CONFIG_DMA_REMAP + .alloc_noncontiguous = iommu_dma_alloc_noncontiguous, + .free_noncontiguous = iommu_dma_free_noncontiguous, +#endif .mmap = iommu_dma_mmap, .get_sgtable = iommu_dma_get_sgtable, .map_page = iommu_dma_map_page, diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c index 5c16ebe037a1..f912fe45bea2 100644 --- a/drivers/iommu/intel/irq_remapping.c +++ b/drivers/iommu/intel/irq_remapping.c @@ -1280,7 +1280,8 @@ static void intel_irq_remapping_prepare_irte(struct intel_ir_data *data, break; case X86_IRQ_ALLOC_TYPE_PCI_MSI: case X86_IRQ_ALLOC_TYPE_PCI_MSIX: - set_msi_sid(irte, msi_desc_to_pci_dev(info->desc)); + set_msi_sid(irte, + pci_real_dma_dev(msi_desc_to_pci_dev(info->desc))); break; default: BUG_ON(1); diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c index f2f565281e63..a777b389a66e 100644 --- a/drivers/media/usb/uvc/uvc_video.c +++ b/drivers/media/usb/uvc/uvc_video.c @@ -6,11 +6,14 @@ * Laurent Pinchart ([email protected]) */ +#include <linux/dma-mapping.h> +#include <linux/highmem.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/usb.h> +#include <linux/usb/hcd.h> #include <linux/videodev2.h> #include <linux/vmalloc.h> #include <linux/wait.h> @@ -1096,6 +1099,29 @@ static int uvc_video_decode_start(struct uvc_streaming *stream, return data[0]; } +static inline enum dma_data_direction uvc_stream_dir( + struct uvc_streaming *stream) +{ + if (stream->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) + return DMA_FROM_DEVICE; + else + return DMA_TO_DEVICE; +} + +static inline struct device *uvc_stream_to_dmadev(struct uvc_streaming *stream) +{ + return bus_to_hcd(stream->dev->udev->bus)->self.sysdev; +} + +static int uvc_submit_urb(struct uvc_urb *uvc_urb, gfp_t mem_flags) +{ + /* Sync DMA. */ + dma_sync_sgtable_for_device(uvc_stream_to_dmadev(uvc_urb->stream), + uvc_urb->sgt, + uvc_stream_dir(uvc_urb->stream)); + return usb_submit_urb(uvc_urb->urb, mem_flags); +} + /* * uvc_video_decode_data_work: Asynchronous memcpy processing * @@ -1117,7 +1143,7 @@ static void uvc_video_copy_data_work(struct work_struct *work) uvc_queue_buffer_release(op->buf); } - ret = usb_submit_urb(uvc_urb->urb, GFP_KERNEL); + ret = uvc_submit_urb(uvc_urb, GFP_KERNEL); if (ret < 0) dev_err(&uvc_urb->stream->intf->dev, "Failed to resubmit video URB (%d).\n", ret); @@ -1537,6 +1563,12 @@ static void uvc_video_complete(struct urb *urb) /* Re-initialise the URB async work. */ uvc_urb->async_operations = 0; + /* Sync DMA and invalidate vmap range. */ + dma_sync_sgtable_for_cpu(uvc_stream_to_dmadev(uvc_urb->stream), + uvc_urb->sgt, uvc_stream_dir(stream)); + invalidate_kernel_vmap_range(uvc_urb->buffer, + uvc_urb->stream->urb_size); + /* * Process the URB headers, and optionally queue expensive memcpy tasks * to be deferred to a work queue. @@ -1545,7 +1577,7 @@ static void uvc_video_complete(struct urb *urb) /* If no async work is needed, resubmit the URB immediately. */ if (!uvc_urb->async_operations) { - ret = usb_submit_urb(uvc_urb->urb, GFP_ATOMIC); + ret = uvc_submit_urb(uvc_urb, GFP_ATOMIC); if (ret < 0) dev_err(&stream->intf->dev, "Failed to resubmit video URB (%d).\n", ret); @@ -1560,24 +1592,49 @@ static void uvc_video_complete(struct urb *urb) */ static void uvc_free_urb_buffers(struct uvc_streaming *stream) { + struct device *dma_dev = uvc_stream_to_dmadev(stream); struct uvc_urb *uvc_urb; for_each_uvc_urb(uvc_urb, stream) { if (!uvc_urb->buffer) continue; -#ifndef CONFIG_DMA_NONCOHERENT - usb_free_coherent(stream->dev->udev, stream->urb_size, - uvc_urb->buffer, uvc_urb->dma); -#else - kfree(uvc_urb->buffer); -#endif + dma_vunmap_noncontiguous(dma_dev, uvc_urb->buffer); + dma_free_noncontiguous(dma_dev, stream->urb_size, uvc_urb->sgt, + uvc_stream_dir(stream)); + uvc_urb->buffer = NULL; + uvc_urb->sgt = NULL; } stream->urb_size = 0; } +static bool uvc_alloc_urb_buffer(struct uvc_streaming *stream, + struct uvc_urb *uvc_urb, gfp_t gfp_flags) +{ + struct device *dma_dev = uvc_stream_to_dmadev(stream); + + uvc_urb->sgt = dma_alloc_noncontiguous(dma_dev, stream->urb_size, + uvc_stream_dir(stream), + gfp_flags, 0); + if (!uvc_urb->sgt) + return false; + uvc_urb->dma = uvc_urb->sgt->sgl->dma_address; + + uvc_urb->buffer = dma_vmap_noncontiguous(dma_dev, stream->urb_size, + uvc_urb->sgt); + if (!uvc_urb->buffer) { + dma_free_noncontiguous(dma_dev, stream->urb_size, + uvc_urb->sgt, + uvc_stream_dir(stream)); + uvc_urb->sgt = NULL; + return false; + } + + return true; +} + /* * Allocate transfer buffers. This function can be called with buffers * already allocated when resuming from suspend, in which case it will @@ -1608,19 +1665,12 @@ static int uvc_alloc_urb_buffers(struct uvc_streaming *stream, /* Retry allocations until one succeed. */ for (; npackets > 1; npackets /= 2) { + stream->urb_size = psize * npackets; + for (i = 0; i < UVC_URBS; ++i) { struct uvc_urb *uvc_urb = &stream->uvc_urb[i]; - stream->urb_size = psize * npackets; -#ifndef CONFIG_DMA_NONCOHERENT - uvc_urb->buffer = usb_alloc_coherent( - stream->dev->udev, stream->urb_size, - gfp_flags | __GFP_NOWARN, &uvc_urb->dma); -#else - uvc_urb->buffer = - kmalloc(stream->urb_size, gfp_flags | __GFP_NOWARN); -#endif - if (!uvc_urb->buffer) { + if (!uvc_alloc_urb_buffer(stream, uvc_urb, gfp_flags)) { uvc_free_urb_buffers(stream); break; } @@ -1730,12 +1780,8 @@ static int uvc_init_video_isoc(struct uvc_streaming *stream, urb->context = uvc_urb; urb->pipe = usb_rcvisocpipe(stream->dev->udev, ep->desc.bEndpointAddress); -#ifndef CONFIG_DMA_NONCOHERENT urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP; urb->transfer_dma = uvc_urb->dma; -#else - urb->transfer_flags = URB_ISO_ASAP; -#endif urb->interval = ep->desc.bInterval; urb->transfer_buffer = uvc_urb->buffer; urb->complete = uvc_video_complete; @@ -1795,10 +1841,8 @@ static int uvc_init_video_bulk(struct uvc_streaming *stream, usb_fill_bulk_urb(urb, stream->dev->udev, pipe, uvc_urb->buffer, size, uvc_video_complete, uvc_urb); -#ifndef CONFIG_DMA_NONCOHERENT urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP; urb->transfer_dma = uvc_urb->dma; -#endif uvc_urb->urb = urb; } @@ -1895,7 +1939,7 @@ static int uvc_video_start_transfer(struct uvc_streaming *stream, /* Submit the URBs. */ for_each_uvc_urb(uvc_urb, stream) { - ret = usb_submit_urb(uvc_urb->urb, gfp_flags); + ret = uvc_submit_urb(uvc_urb, gfp_flags); if (ret < 0) { dev_err(&stream->intf->dev, "Failed to submit URB %u (%d).\n", diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h index 97df5ecd66c9..cce5e38133cd 100644 --- a/drivers/media/usb/uvc/uvcvideo.h +++ b/drivers/media/usb/uvc/uvcvideo.h @@ -219,6 +219,7 @@ */ struct gpio_desc; +struct sg_table; struct uvc_device; /* TODO: Put the most frequently accessed fields at the beginning of @@ -545,7 +546,8 @@ struct uvc_copy_op { * @urb: the URB described by this context structure * @stream: UVC streaming context * @buffer: memory storage for the URB - * @dma: DMA coherent addressing for the urb_buffer + * @dma: Allocated DMA handle + * @sgt: sgt_table with the urb locations in memory * @async_operations: counter to indicate the number of copy operations * @copy_operations: work descriptors for asynchronous copy operations * @work: work queue entry for asynchronous decode @@ -556,6 +558,7 @@ struct uvc_urb { char *buffer; dma_addr_t dma; + struct sg_table *sgt; unsigned int async_operations; struct uvc_copy_op copy_operations[UVC_MAX_PACKETS]; diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index f399edc82191..a7e3eb9befb6 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c @@ -1350,6 +1350,7 @@ static int bytes_str_to_int(const char *str) fallthrough; case 'K': result *= 1024; + break; case '\0': break; default: diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index c2da77163f94..7c083ad58274 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h @@ -388,8 +388,6 @@ struct ubi_volume_desc { int mode; }; -struct ubi_wl_entry; - /** * struct ubi_debug_info - debugging information for an UBI device. * diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 3e8a179f39db..c0986096c701 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -8057,7 +8057,7 @@ bnx2_read_vpd_fw_ver(struct bnx2 *bp) data[i + 3] = data[i + BNX2_VPD_LEN]; } - i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA); + i = pci_vpd_find_tag(data, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA); if (i < 0) goto vpd_done; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 56801387591d..281b1c2e04a7 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -12206,8 +12206,7 @@ static void bnx2x_read_fwinfo(struct bnx2x *bp) /* VPD RO tag should be first tag after identifier string, hence * we should be able to find it in first BNX2X_VPD_LEN chars */ - i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN, - PCI_VPD_LRDT_RO_DATA); + i = pci_vpd_find_tag(vpd_start, BNX2X_VPD_LEN, PCI_VPD_LRDT_RO_DATA); if (i < 0) goto out_not_found; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index e5d52288d0a4..2985844634c8 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -12794,7 +12794,7 @@ static void bnxt_vpd_read_info(struct bnxt *bp) goto exit; } - i = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA); + i = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA); if (i < 0) { netdev_err(bp->dev, "VPD READ-Only not found\n"); goto exit; diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index d2381929931b..b0e49643f483 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -13016,7 +13016,7 @@ static int tg3_test_nvram(struct tg3 *tp) if (!buf) return -ENOMEM; - i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA); + i = pci_vpd_find_tag((u8 *)buf, len, PCI_VPD_LRDT_RO_DATA); if (i > 0) { j = pci_vpd_lrdt_size(&((u8 *)buf)[i]); if (j < 0) @@ -15629,7 +15629,7 @@ static void tg3_read_vpd(struct tg3 *tp) if (!vpd_data) goto out_no_vpd; - i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA); + i = pci_vpd_find_tag(vpd_data, vpdlen, PCI_VPD_LRDT_RO_DATA); if (i < 0) goto out_not_found; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 80882cfc370f..9428ef1f04a8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -2775,7 +2775,7 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p) if (id_len > ID_LEN) id_len = ID_LEN; - i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA); + i = pci_vpd_find_tag(vpd, VPD_LEN, PCI_VPD_LRDT_RO_DATA); if (i < 0) { dev_err(adapter->pdev_dev, "missing VPD-R section\n"); ret = -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c index 37fb2e1fb278..dfea14399607 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c @@ -132,7 +132,7 @@ static int mlxsw_get_cooling_device_idx(struct mlxsw_thermal *thermal, /* Allow mlxsw thermal zone binding to an external cooling device */ for (i = 0; i < ARRAY_SIZE(mlxsw_thermal_external_allowed_cdev); i++) { if (strnstr(cdev->type, mlxsw_thermal_external_allowed_cdev[i], - sizeof(cdev->type))) + strlen(cdev->type))) return 0; } diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 3e86fbe21431..2c89cde7da1e 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -4398,20 +4398,6 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev) if (net_ratelimit()) netdev_err(dev, "PCI error (cmd = 0x%04x, status_errs = 0x%04x)\n", pci_cmd, pci_status_errs); - /* - * The recovery sequence below admits a very elaborated explanation: - * - it seems to work; - * - I did not see what else could be done; - * - it makes iop3xx happy. - * - * Feel free to adjust to your needs. - */ - if (pdev->broken_parity_status) - pci_cmd &= ~PCI_COMMAND_PARITY; - else - pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY; - - pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); } diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 36c8625a6fd7..c746ca7235f1 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -920,7 +920,7 @@ static void efx_probe_vpd_strings(struct efx_nic *efx) } /* Get the Read only section */ - ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA); + ro_start = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA); if (ro_start < 0) { netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n"); return; diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c index f8979991970e..5e7a57b680ca 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.c +++ b/drivers/net/ethernet/sfc/falcon/efx.c @@ -2800,7 +2800,7 @@ static void ef4_probe_vpd_strings(struct ef4_nic *efx) } /* Get the Read only section */ - ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA); + ro_start = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA); if (ro_start < 0) { netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n"); return; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 7fda2ae4c40f..9b6a4a875c55 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -2870,9 +2870,13 @@ static int virtnet_alloc_queues(struct virtnet_info *vi) { int i; - vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL); - if (!vi->ctrl) - goto err_ctrl; + if (vi->has_cvq) { + vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL); + if (!vi->ctrl) + goto err_ctrl; + } else { + vi->ctrl = NULL; + } vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL); if (!vi->sq) goto err_sq; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index 2a7339b12b13..398390c59344 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c @@ -146,8 +146,8 @@ void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) if (mvm->tz_device.tzone) { struct iwl_mvm_thermal_device *tz_dev = &mvm->tz_device; - thermal_notify_framework(tz_dev->tzone, - tz_dev->fw_trips_index[ths_crossed]); + thermal_zone_device_update(tz_dev->tzone, + THERMAL_TRIP_VIOLATED); } #endif /* CONFIG_THERMAL */ } diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c index 180c6fb3ef36..d80160cf34bb 100644 --- a/drivers/of/overlay.c +++ b/drivers/of/overlay.c @@ -1024,7 +1024,6 @@ int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size, struct device_node *overlay_root = NULL; *ovcs_id = 0; - ret = 0; if (overlay_fdt_size < sizeof(struct fdt_header) || fdt_check_header(overlay_fdt)) { @@ -1195,8 +1194,6 @@ int of_overlay_remove(int *ovcs_id) struct overlay_changeset *ovcs; int ret, ret_apply, ret_tmp; - ret = 0; - if (devicetree_corrupt()) { pr_err("suspect devicetree state, refuse to remove overlay\n"); ret = -EBUSY; diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c index 0d3719407b8b..6d7d64939f82 100644 --- a/drivers/pci/ats.c +++ b/drivers/pci/ats.c @@ -480,7 +480,7 @@ EXPORT_SYMBOL_GPL(pci_pasid_features); #define PASID_NUMBER_SHIFT 8 #define PASID_NUMBER_MASK (0x1f << PASID_NUMBER_SHIFT) /** - * pci_max_pasid - Get maximum number of PASIDs supported by device + * pci_max_pasids - Get maximum number of PASIDs supported by device * @pdev: PCI device structure * * Returns negative value when PASID capability is not present. diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig index 5aa8977d7b0f..2f2c8a1729f9 100644 --- a/drivers/pci/controller/Kconfig +++ b/drivers/pci/controller/Kconfig @@ -41,7 +41,6 @@ config PCI_TEGRA bool "NVIDIA Tegra PCIe controller" depends on ARCH_TEGRA || COMPILE_TEST depends on PCI_MSI_IRQ_DOMAIN - select PCI_MSI_ARCH_FALLBACKS help Say Y here if you want support for the PCIe host controller found on NVIDIA Tegra SoCs. @@ -59,7 +58,6 @@ config PCIE_RCAR_HOST bool "Renesas R-Car PCIe host controller" depends on ARCH_RENESAS || COMPILE_TEST depends on PCI_MSI_IRQ_DOMAIN - select PCI_MSI_ARCH_FALLBACKS help Say Y here if you want PCIe controller support on R-Car SoCs in host mode. @@ -88,7 +86,7 @@ config PCI_HOST_GENERIC config PCIE_XILINX bool "Xilinx AXI PCIe host bridge support" depends on OF || COMPILE_TEST - select PCI_MSI_ARCH_FALLBACKS + depends on PCI_MSI_IRQ_DOMAIN help Say 'Y' here if you want kernel to support the Xilinx AXI PCIe Host Bridge driver. @@ -233,6 +231,19 @@ config PCIE_MEDIATEK Say Y here if you want to enable PCIe controller support on MediaTek SoCs. +config PCIE_MEDIATEK_GEN3 + tristate "MediaTek Gen3 PCIe controller" + depends on ARCH_MEDIATEK || COMPILE_TEST + depends on PCI_MSI_IRQ_DOMAIN + help + Adds support for PCIe Gen3 MAC controller for MediaTek SoCs. + This PCIe controller is compatible with Gen3, Gen2 and Gen1 speed, + and support up to 256 MSI interrupt numbers for + multi-function devices. + + Say Y here if you want to enable Gen3 PCIe controller support on + MediaTek SoCs. + config VMD depends on PCI_MSI && X86_64 && SRCU tristate "Intel Volume Management Device Driver" diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile index e4559f2182f2..63e3880a3e87 100644 --- a/drivers/pci/controller/Makefile +++ b/drivers/pci/controller/Makefile @@ -11,10 +11,13 @@ obj-$(CONFIG_PCIE_RCAR_HOST) += pcie-rcar.o pcie-rcar-host.o obj-$(CONFIG_PCIE_RCAR_EP) += pcie-rcar.o pcie-rcar-ep.o obj-$(CONFIG_PCI_HOST_COMMON) += pci-host-common.o obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o +obj-$(CONFIG_PCI_HOST_THUNDER_ECAM) += pci-thunder-ecam.o +obj-$(CONFIG_PCI_HOST_THUNDER_PEM) += pci-thunder-pem.o obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o obj-$(CONFIG_PCIE_XILINX_NWL) += pcie-xilinx-nwl.o obj-$(CONFIG_PCIE_XILINX_CPM) += pcie-xilinx-cpm.o obj-$(CONFIG_PCI_V3_SEMI) += pci-v3-semi.o +obj-$(CONFIG_PCI_XGENE) += pci-xgene.o obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o obj-$(CONFIG_PCIE_IPROC) += pcie-iproc.o @@ -27,6 +30,7 @@ obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o obj-$(CONFIG_PCIE_ROCKCHIP_EP) += pcie-rockchip-ep.o obj-$(CONFIG_PCIE_ROCKCHIP_HOST) += pcie-rockchip-host.o obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o +obj-$(CONFIG_PCIE_MEDIATEK_GEN3) += pcie-mediatek-gen3.o obj-$(CONFIG_PCIE_MICROCHIP_HOST) += pcie-microchip-host.o obj-$(CONFIG_VMD) += vmd.o obj-$(CONFIG_PCIE_BRCMSTB) += pcie-brcmstb.o @@ -47,8 +51,10 @@ obj-y += mobiveil/ # ARM64 and use internal ifdefs to only build the pieces we need # depending on whether ACPI, the DT driver, or both are enabled. -ifdef CONFIG_PCI +ifdef CONFIG_ACPI +ifdef CONFIG_PCI_QUIRKS obj-$(CONFIG_ARM64) += pci-thunder-ecam.o obj-$(CONFIG_ARM64) += pci-thunder-pem.o obj-$(CONFIG_ARM64) += pci-xgene.o endif +endif diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c index 849f1e416ea5..35e61048e133 100644 --- a/drivers/pci/controller/cadence/pci-j721e.c +++ b/drivers/pci/controller/cadence/pci-j721e.c @@ -1,11 +1,12 @@ // SPDX-License-Identifier: GPL-2.0 -/** +/* * pci-j721e - PCIe controller driver for TI's J721E SoCs * * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com * Author: Kishon Vijay Abraham I <[email protected]> */ +#include <linux/clk.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/io.h> @@ -50,6 +51,7 @@ enum link_status { struct j721e_pcie { struct device *dev; + struct clk *refclk; u32 mode; u32 num_lanes; struct cdns_pcie *cdns_pcie; @@ -312,6 +314,7 @@ static int j721e_pcie_probe(struct platform_device *pdev) struct cdns_pcie_ep *ep; struct gpio_desc *gpiod; void __iomem *base; + struct clk *clk; u32 num_lanes; u32 mode; int ret; @@ -411,6 +414,20 @@ static int j721e_pcie_probe(struct platform_device *pdev) goto err_get_sync; } + clk = devm_clk_get_optional(dev, "pcie_refclk"); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + dev_err(dev, "failed to get pcie_refclk\n"); + goto err_pcie_setup; + } + + ret = clk_prepare_enable(clk); + if (ret) { + dev_err(dev, "failed to enable pcie_refclk\n"); + goto err_get_sync; + } + pcie->refclk = clk; + /* * "Power Sequencing and Reset Signal Timings" table in * PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 3.0 @@ -425,8 +442,10 @@ static int j721e_pcie_probe(struct platform_device *pdev) } ret = cdns_pcie_host_setup(rc); - if (ret < 0) + if (ret < 0) { + clk_disable_unprepare(pcie->refclk); goto err_pcie_setup; + } break; case PCI_MODE_EP: @@ -479,6 +498,7 @@ static int j721e_pcie_remove(struct platform_device *pdev) struct cdns_pcie *cdns_pcie = pcie->cdns_pcie; struct device *dev = &pdev->dev; + clk_disable_unprepare(pcie->refclk); cdns_pcie_disable_phy(cdns_pcie); pm_runtime_put(dev); pm_runtime_disable(dev); diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig index 22c5529e9a65..423d35872ce4 100644 --- a/drivers/pci/controller/dwc/Kconfig +++ b/drivers/pci/controller/dwc/Kconfig @@ -280,7 +280,7 @@ config PCIE_TEGRA194_EP select PCIE_TEGRA194 help Enables support for the PCIe controller in the NVIDIA Tegra194 SoC to - work in host mode. There are two instances of PCIe controllers in + work in endpoint mode. There are two instances of PCIe controllers in Tegra194. This controller can work either as EP or RC. In order to enable host-specific features PCIE_TEGRA194_HOST must be selected and in order to enable device-specific features PCIE_TEGRA194_EP must be @@ -311,6 +311,7 @@ config PCIE_AL depends on OF && (ARM64 || COMPILE_TEST) depends on PCI_MSI_IRQ_DOMAIN select PCIE_DW_HOST + select PCI_ECAM help Say Y here to enable support of the Amazon's Annapurna Labs PCIe controller IP on Amazon SoCs. The PCIe controller uses the DesignWare @@ -318,4 +319,13 @@ config PCIE_AL required only for DT-based platforms. ACPI platforms with the Annapurna Labs PCIe controller don't need to enable this. +config PCIE_FU740 + bool "SiFive FU740 PCIe host controller" + depends on PCI_MSI_IRQ_DOMAIN + depends on SOC_SIFIVE || COMPILE_TEST + select PCIE_DW_HOST + help + Say Y here if you want PCIe controller support for the SiFive + FU740. + endmenu diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile index a751553fa0db..eca805c1a023 100644 --- a/drivers/pci/controller/dwc/Makefile +++ b/drivers/pci/controller/dwc/Makefile @@ -5,6 +5,7 @@ obj-$(CONFIG_PCIE_DW_EP) += pcie-designware-ep.o obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o +obj-$(CONFIG_PCIE_FU740) += pcie-fu740.o obj-$(CONFIG_PCI_IMX6) += pci-imx6.o obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone.o @@ -17,7 +18,6 @@ obj-$(CONFIG_PCIE_INTEL_GW) += pcie-intel-gw.o obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o obj-$(CONFIG_PCI_MESON) += pci-meson.o -obj-$(CONFIG_PCIE_TEGRA194) += pcie-tegra194.o obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o obj-$(CONFIG_PCIE_UNIPHIER_EP) += pcie-uniphier-ep.o @@ -31,7 +31,13 @@ obj-$(CONFIG_PCIE_UNIPHIER_EP) += pcie-uniphier-ep.o # ARM64 and use internal ifdefs to only build the pieces we need # depending on whether ACPI, the DT driver, or both are enabled. -ifdef CONFIG_PCI +obj-$(CONFIG_PCIE_AL) += pcie-al.o +obj-$(CONFIG_PCI_HISI) += pcie-hisi.o + +ifdef CONFIG_ACPI +ifdef CONFIG_PCI_QUIRKS obj-$(CONFIG_ARM64) += pcie-al.o obj-$(CONFIG_ARM64) += pcie-hisi.o +obj-$(CONFIG_ARM64) += pcie-tegra194.o +endif endif diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index 53aa35cb3a49..bde3b2824e89 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c @@ -346,8 +346,9 @@ static const struct irq_domain_ops ks_pcie_legacy_irq_domain_ops = { }; /** - * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask - * registers + * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask registers + * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone + * PCIe host controller driver information. * * Since modification of dbi_cs2 involves different clock domain, read the * status back to ensure the transition is complete. @@ -367,6 +368,8 @@ static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie) /** * ks_pcie_clear_dbi_mode() - Disable DBI mode + * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone + * PCIe host controller driver information. * * Since modification of dbi_cs2 involves different clock domain, read the * status back to ensure the transition is complete. @@ -449,6 +452,7 @@ static struct pci_ops ks_child_pcie_ops = { /** * ks_pcie_v3_65_add_bus() - keystone add_bus post initialization + * @bus: A pointer to the PCI bus structure. * * This sets BAR0 to enable inbound access for MSI_IRQ register */ @@ -488,6 +492,8 @@ static struct pci_ops ks_pcie_ops = { /** * ks_pcie_link_up() - Check if link up + * @pci: A pointer to the dw_pcie structure which holds the DesignWare PCIe host + * controller driver information. */ static int ks_pcie_link_up(struct dw_pcie *pci) { @@ -605,7 +611,6 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc) /** * ks_pcie_legacy_irq_handler() - Handle legacy interrupt - * @irq: IRQ line for legacy interrupts * @desc: Pointer to irq descriptor * * Traverse through pending legacy interrupts and invoke handler for each. Also @@ -798,7 +803,8 @@ static int __init ks_pcie_host_init(struct pcie_port *pp) int ret; pp->bridge->ops = &ks_pcie_ops; - pp->bridge->child_ops = &ks_child_pcie_ops; + if (!ks_pcie->is_am6) + pp->bridge->child_ops = &ks_child_pcie_ops; ret = ks_pcie_config_legacy_irq(ks_pcie); if (ret) diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c index 39fe2ed5a6a2..39f4664bd84c 100644 --- a/drivers/pci/controller/dwc/pci-layerscape-ep.c +++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c @@ -154,7 +154,7 @@ static int __init ls_pcie_ep_probe(struct platform_device *pdev) pci->dev = dev; pci->ops = pcie->drvdata->dw_pcie_ops; - ls_epc->bar_fixed_64bit = (1 << BAR_2) | (1 << BAR_4), + ls_epc->bar_fixed_64bit = (1 << BAR_2) | (1 << BAR_4); pcie->pci = pci; pcie->ls_epc = ls_epc; diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index 1c25d8337151..8d028a88b375 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -705,6 +705,8 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) } } + dw_pcie_iatu_detect(pci); + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); if (!res) return -EINVAL; diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index 7e55b2b66182..a608ae1fad57 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -398,9 +398,9 @@ int dw_pcie_host_init(struct pcie_port *pp) if (ret) goto err_free_msi; } + dw_pcie_iatu_detect(pci); dw_pcie_setup_rc(pp); - dw_pcie_msi_init(pp); if (!dw_pcie_link_up(pci) && pci->ops && pci->ops->start_link) { ret = pci->ops->start_link(pci); @@ -551,6 +551,8 @@ void dw_pcie_setup_rc(struct pcie_port *pp) } } + dw_pcie_msi_init(pp); + /* Setup RC BARs */ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004); dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000); diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c index 004cb860e266..a945f0c0e73d 100644 --- a/drivers/pci/controller/dwc/pcie-designware.c +++ b/drivers/pci/controller/dwc/pcie-designware.c @@ -660,11 +660,9 @@ static void dw_pcie_iatu_detect_regions(struct dw_pcie *pci) pci->num_ob_windows = ob; } -void dw_pcie_setup(struct dw_pcie *pci) +void dw_pcie_iatu_detect(struct dw_pcie *pci) { - u32 val; struct device *dev = pci->dev; - struct device_node *np = dev->of_node; struct platform_device *pdev = to_platform_device(dev); if (pci->version >= 0x480A || (!pci->version && @@ -693,6 +691,13 @@ void dw_pcie_setup(struct dw_pcie *pci) dev_info(pci->dev, "Detected iATU regions: %u outbound, %u inbound", pci->num_ob_windows, pci->num_ib_windows); +} + +void dw_pcie_setup(struct dw_pcie *pci) +{ + u32 val; + struct device *dev = pci->dev; + struct device_node *np = dev->of_node; if (pci->link_gen > 0) dw_pcie_link_set_max_speed(pci, pci->link_gen); diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h index 7247c8b01f04..7d6e9b7576be 100644 --- a/drivers/pci/controller/dwc/pcie-designware.h +++ b/drivers/pci/controller/dwc/pcie-designware.h @@ -306,6 +306,7 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index, void dw_pcie_disable_atu(struct dw_pcie *pci, int index, enum dw_pcie_region_type type); void dw_pcie_setup(struct dw_pcie *pci); +void dw_pcie_iatu_detect(struct dw_pcie *pci); static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val) { diff --git a/drivers/pci/controller/dwc/pcie-fu740.c b/drivers/pci/controller/dwc/pcie-fu740.c new file mode 100644 index 000000000000..00cde9a248b5 --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-fu740.c @@ -0,0 +1,309 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * FU740 DesignWare PCIe Controller integration + * Copyright (C) 2019-2021 SiFive, Inc. + * Paul Walmsley + * Greentime Hu + * + * Based in part on the i.MX6 PCIe host controller shim which is: + * + * Copyright (C) 2013 Kosagi + * https://www.kosagi.com + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/gpio.h> +#include <linux/gpio/consumer.h> +#include <linux/kernel.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/regulator/consumer.h> +#include <linux/resource.h> +#include <linux/types.h> +#include <linux/interrupt.h> +#include <linux/iopoll.h> +#include <linux/reset.h> + +#include "pcie-designware.h" + +#define to_fu740_pcie(x) dev_get_drvdata((x)->dev) + +struct fu740_pcie { + struct dw_pcie pci; + void __iomem *mgmt_base; + struct gpio_desc *reset; + struct gpio_desc *pwren; + struct clk *pcie_aux; + struct reset_control *rst; +}; + +#define SIFIVE_DEVICESRESETREG 0x28 + +#define PCIEX8MGMT_PERST_N 0x0 +#define PCIEX8MGMT_APP_LTSSM_ENABLE 0x10 +#define PCIEX8MGMT_APP_HOLD_PHY_RST 0x18 +#define PCIEX8MGMT_DEVICE_TYPE 0x708 +#define PCIEX8MGMT_PHY0_CR_PARA_ADDR 0x860 +#define PCIEX8MGMT_PHY0_CR_PARA_RD_EN 0x870 +#define PCIEX8MGMT_PHY0_CR_PARA_RD_DATA 0x878 +#define PCIEX8MGMT_PHY0_CR_PARA_SEL 0x880 +#define PCIEX8MGMT_PHY0_CR_PARA_WR_DATA 0x888 +#define PCIEX8MGMT_PHY0_CR_PARA_WR_EN 0x890 +#define PCIEX8MGMT_PHY0_CR_PARA_ACK 0x898 +#define PCIEX8MGMT_PHY1_CR_PARA_ADDR 0x8a0 +#define PCIEX8MGMT_PHY1_CR_PARA_RD_EN 0x8b0 +#define PCIEX8MGMT_PHY1_CR_PARA_RD_DATA 0x8b8 +#define PCIEX8MGMT_PHY1_CR_PARA_SEL 0x8c0 +#define PCIEX8MGMT_PHY1_CR_PARA_WR_DATA 0x8c8 +#define PCIEX8MGMT_PHY1_CR_PARA_WR_EN 0x8d0 +#define PCIEX8MGMT_PHY1_CR_PARA_ACK 0x8d8 + +#define PCIEX8MGMT_PHY_CDR_TRACK_EN BIT(0) +#define PCIEX8MGMT_PHY_LOS_THRSHLD BIT(5) +#define PCIEX8MGMT_PHY_TERM_EN BIT(9) +#define PCIEX8MGMT_PHY_TERM_ACDC BIT(10) +#define PCIEX8MGMT_PHY_EN BIT(11) +#define PCIEX8MGMT_PHY_INIT_VAL (PCIEX8MGMT_PHY_CDR_TRACK_EN|\ + PCIEX8MGMT_PHY_LOS_THRSHLD|\ + PCIEX8MGMT_PHY_TERM_EN|\ + PCIEX8MGMT_PHY_TERM_ACDC|\ + PCIEX8MGMT_PHY_EN) + +#define PCIEX8MGMT_PHY_LANEN_DIG_ASIC_RX_OVRD_IN_3 0x1008 +#define PCIEX8MGMT_PHY_LANE_OFF 0x100 +#define PCIEX8MGMT_PHY_LANE0_BASE (PCIEX8MGMT_PHY_LANEN_DIG_ASIC_RX_OVRD_IN_3 + 0x100 * 0) +#define PCIEX8MGMT_PHY_LANE1_BASE (PCIEX8MGMT_PHY_LANEN_DIG_ASIC_RX_OVRD_IN_3 + 0x100 * 1) +#define PCIEX8MGMT_PHY_LANE2_BASE (PCIEX8MGMT_PHY_LANEN_DIG_ASIC_RX_OVRD_IN_3 + 0x100 * 2) +#define PCIEX8MGMT_PHY_LANE3_BASE (PCIEX8MGMT_PHY_LANEN_DIG_ASIC_RX_OVRD_IN_3 + 0x100 * 3) + +static void fu740_pcie_assert_reset(struct fu740_pcie *afp) +{ + /* Assert PERST_N GPIO */ + gpiod_set_value_cansleep(afp->reset, 0); + /* Assert controller PERST_N */ + writel_relaxed(0x0, afp->mgmt_base + PCIEX8MGMT_PERST_N); +} + +static void fu740_pcie_deassert_reset(struct fu740_pcie *afp) +{ + /* Deassert controller PERST_N */ + writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_PERST_N); + /* Deassert PERST_N GPIO */ + gpiod_set_value_cansleep(afp->reset, 1); +} + +static void fu740_pcie_power_on(struct fu740_pcie *afp) +{ + gpiod_set_value_cansleep(afp->pwren, 1); + /* + * Ensure that PERST has been asserted for at least 100 ms. + * Section 2.2 of PCI Express Card Electromechanical Specification + * Revision 3.0 + */ + msleep(100); +} + +static void fu740_pcie_drive_reset(struct fu740_pcie *afp) +{ + fu740_pcie_assert_reset(afp); + fu740_pcie_power_on(afp); + fu740_pcie_deassert_reset(afp); +} + +static void fu740_phyregwrite(const uint8_t phy, const uint16_t addr, + const uint16_t wrdata, struct fu740_pcie *afp) +{ + struct device *dev = afp->pci.dev; + void __iomem *phy_cr_para_addr; + void __iomem *phy_cr_para_wr_data; + void __iomem *phy_cr_para_wr_en; + void __iomem *phy_cr_para_ack; + int ret, val; + + /* Setup */ + if (phy) { + phy_cr_para_addr = afp->mgmt_base + PCIEX8MGMT_PHY1_CR_PARA_ADDR; + phy_cr_para_wr_data = afp->mgmt_base + PCIEX8MGMT_PHY1_CR_PARA_WR_DATA; + phy_cr_para_wr_en = afp->mgmt_base + PCIEX8MGMT_PHY1_CR_PARA_WR_EN; + phy_cr_para_ack = afp->mgmt_base + PCIEX8MGMT_PHY1_CR_PARA_ACK; + } else { + phy_cr_para_addr = afp->mgmt_base + PCIEX8MGMT_PHY0_CR_PARA_ADDR; + phy_cr_para_wr_data = afp->mgmt_base + PCIEX8MGMT_PHY0_CR_PARA_WR_DATA; + phy_cr_para_wr_en = afp->mgmt_base + PCIEX8MGMT_PHY0_CR_PARA_WR_EN; + phy_cr_para_ack = afp->mgmt_base + PCIEX8MGMT_PHY0_CR_PARA_ACK; + } + + writel_relaxed(addr, phy_cr_para_addr); + writel_relaxed(wrdata, phy_cr_para_wr_data); + writel_relaxed(1, phy_cr_para_wr_en); + + /* Wait for wait_idle */ + ret = readl_poll_timeout(phy_cr_para_ack, val, val, 10, 5000); + if (ret) + dev_warn(dev, "Wait for wait_idle state failed!\n"); + + /* Clear */ + writel_relaxed(0, phy_cr_para_wr_en); + + /* Wait for ~wait_idle */ + ret = readl_poll_timeout(phy_cr_para_ack, val, !val, 10, 5000); + if (ret) + dev_warn(dev, "Wait for !wait_idle state failed!\n"); +} + +static void fu740_pcie_init_phy(struct fu740_pcie *afp) +{ + /* Enable phy cr_para_sel interfaces */ + writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_PHY0_CR_PARA_SEL); + writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_PHY1_CR_PARA_SEL); + + /* + * Wait 10 cr_para cycles to guarantee that the registers are ready + * to be edited. + */ + ndelay(10); + + /* Set PHY AC termination mode */ + fu740_phyregwrite(0, PCIEX8MGMT_PHY_LANE0_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp); + fu740_phyregwrite(0, PCIEX8MGMT_PHY_LANE1_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp); + fu740_phyregwrite(0, PCIEX8MGMT_PHY_LANE2_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp); + fu740_phyregwrite(0, PCIEX8MGMT_PHY_LANE3_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp); + fu740_phyregwrite(1, PCIEX8MGMT_PHY_LANE0_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp); + fu740_phyregwrite(1, PCIEX8MGMT_PHY_LANE1_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp); + fu740_phyregwrite(1, PCIEX8MGMT_PHY_LANE2_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp); + fu740_phyregwrite(1, PCIEX8MGMT_PHY_LANE3_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp); +} + +static int fu740_pcie_start_link(struct dw_pcie *pci) +{ + struct device *dev = pci->dev; + struct fu740_pcie *afp = dev_get_drvdata(dev); + + /* Enable LTSSM */ + writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_APP_LTSSM_ENABLE); + return 0; +} + +static int fu740_pcie_host_init(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct fu740_pcie *afp = to_fu740_pcie(pci); + struct device *dev = pci->dev; + int ret; + + /* Power on reset */ + fu740_pcie_drive_reset(afp); + + /* Enable pcieauxclk */ + ret = clk_prepare_enable(afp->pcie_aux); + if (ret) { + dev_err(dev, "unable to enable pcie_aux clock\n"); + return ret; + } + + /* + * Assert hold_phy_rst (hold the controller LTSSM in reset after + * power_up_rst_n for register programming with cr_para) + */ + writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_APP_HOLD_PHY_RST); + + /* Deassert power_up_rst_n */ + ret = reset_control_deassert(afp->rst); + if (ret) { + dev_err(dev, "unable to deassert pcie_power_up_rst_n\n"); + return ret; + } + + fu740_pcie_init_phy(afp); + + /* Disable pcieauxclk */ + clk_disable_unprepare(afp->pcie_aux); + /* Clear hold_phy_rst */ + writel_relaxed(0x0, afp->mgmt_base + PCIEX8MGMT_APP_HOLD_PHY_RST); + /* Enable pcieauxclk */ + ret = clk_prepare_enable(afp->pcie_aux); + /* Set RC mode */ + writel_relaxed(0x4, afp->mgmt_base + PCIEX8MGMT_DEVICE_TYPE); + + return 0; +} + +static const struct dw_pcie_host_ops fu740_pcie_host_ops = { + .host_init = fu740_pcie_host_init, +}; + +static const struct dw_pcie_ops dw_pcie_ops = { + .start_link = fu740_pcie_start_link, +}; + +static int fu740_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct dw_pcie *pci; + struct fu740_pcie *afp; + + afp = devm_kzalloc(dev, sizeof(*afp), GFP_KERNEL); + if (!afp) + return -ENOMEM; + pci = &afp->pci; + pci->dev = dev; + pci->ops = &dw_pcie_ops; + pci->pp.ops = &fu740_pcie_host_ops; + + /* SiFive specific region: mgmt */ + afp->mgmt_base = devm_platform_ioremap_resource_byname(pdev, "mgmt"); + if (IS_ERR(afp->mgmt_base)) + return PTR_ERR(afp->mgmt_base); + + /* Fetch GPIOs */ + afp->reset = devm_gpiod_get_optional(dev, "reset-gpios", GPIOD_OUT_LOW); + if (IS_ERR(afp->reset)) + return dev_err_probe(dev, PTR_ERR(afp->reset), "unable to get reset-gpios\n"); + + afp->pwren = devm_gpiod_get_optional(dev, "pwren-gpios", GPIOD_OUT_LOW); + if (IS_ERR(afp->pwren)) + return dev_err_probe(dev, PTR_ERR(afp->pwren), "unable to get pwren-gpios\n"); + + /* Fetch clocks */ + afp->pcie_aux = devm_clk_get(dev, "pcie_aux"); + if (IS_ERR(afp->pcie_aux)) + return dev_err_probe(dev, PTR_ERR(afp->pcie_aux), + "pcie_aux clock source missing or invalid\n"); + + /* Fetch reset */ + afp->rst = devm_reset_control_get_exclusive(dev, NULL); + if (IS_ERR(afp->rst)) + return dev_err_probe(dev, PTR_ERR(afp->rst), "unable to get reset\n"); + + platform_set_drvdata(pdev, afp); + + return dw_pcie_host_init(&pci->pp); +} + +static void fu740_pcie_shutdown(struct platform_device *pdev) +{ + struct fu740_pcie *afp = platform_get_drvdata(pdev); + + /* Bring down link, so bootloader gets clean state in case of reboot */ + fu740_pcie_assert_reset(afp); +} + +static const struct of_device_id fu740_pcie_of_match[] = { + { .compatible = "sifive,fu740-pcie", }, + {}, +}; + +static struct platform_driver fu740_pcie_driver = { + .driver = { + .name = "fu740-pcie", + .of_match_table = fu740_pcie_of_match, + .suppress_bind_attrs = true, + }, + .probe = fu740_pcie_probe, + .shutdown = fu740_pcie_shutdown, +}; + +builtin_platform_driver(fu740_pcie_driver); diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c index 0cedd1f95f37..f89a7d24ba28 100644 --- a/drivers/pci/controller/dwc/pcie-intel-gw.c +++ b/drivers/pci/controller/dwc/pcie-intel-gw.c @@ -81,11 +81,6 @@ static void pcie_update_bits(void __iomem *base, u32 ofs, u32 mask, u32 val) writel(val, base + ofs); } -static inline u32 pcie_app_rd(struct intel_pcie_port *lpp, u32 ofs) -{ - return readl(lpp->app_base + ofs); -} - static inline void pcie_app_wr(struct intel_pcie_port *lpp, u32 ofs, u32 val) { writel(val, lpp->app_base + ofs); diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c index 6fa216e52d14..bafd2c6ab3c2 100644 --- a/drivers/pci/controller/dwc/pcie-tegra194.c +++ b/drivers/pci/controller/dwc/pcie-tegra194.c @@ -22,6 +22,8 @@ #include <linux/of_irq.h> #include <linux/of_pci.h> #include <linux/pci.h> +#include <linux/pci-acpi.h> +#include <linux/pci-ecam.h> #include <linux/phy/phy.h> #include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> @@ -311,6 +313,104 @@ struct tegra_pcie_dw_of_data { enum dw_pcie_device_mode mode; }; +#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) +struct tegra194_pcie_ecam { + void __iomem *config_base; + void __iomem *iatu_base; + void __iomem *dbi_base; +}; + +static int tegra194_acpi_init(struct pci_config_window *cfg) +{ + struct device *dev = cfg->parent; + struct tegra194_pcie_ecam *pcie_ecam; + + pcie_ecam = devm_kzalloc(dev, sizeof(*pcie_ecam), GFP_KERNEL); + if (!pcie_ecam) + return -ENOMEM; + + pcie_ecam->config_base = cfg->win; + pcie_ecam->iatu_base = cfg->win + SZ_256K; + pcie_ecam->dbi_base = cfg->win + SZ_512K; + cfg->priv = pcie_ecam; + + return 0; +} + +static void atu_reg_write(struct tegra194_pcie_ecam *pcie_ecam, int index, + u32 val, u32 reg) +{ + u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); + + writel(val, pcie_ecam->iatu_base + offset + reg); +} + +static void program_outbound_atu(struct tegra194_pcie_ecam *pcie_ecam, + int index, int type, u64 cpu_addr, + u64 pci_addr, u64 size) +{ + atu_reg_write(pcie_ecam, index, lower_32_bits(cpu_addr), + PCIE_ATU_LOWER_BASE); + atu_reg_write(pcie_ecam, index, upper_32_bits(cpu_addr), + PCIE_ATU_UPPER_BASE); + atu_reg_write(pcie_ecam, index, lower_32_bits(pci_addr), + PCIE_ATU_LOWER_TARGET); + atu_reg_write(pcie_ecam, index, lower_32_bits(cpu_addr + size - 1), + PCIE_ATU_LIMIT); + atu_reg_write(pcie_ecam, index, upper_32_bits(pci_addr), + PCIE_ATU_UPPER_TARGET); + atu_reg_write(pcie_ecam, index, type, PCIE_ATU_CR1); + atu_reg_write(pcie_ecam, index, PCIE_ATU_ENABLE, PCIE_ATU_CR2); +} + +static void __iomem *tegra194_map_bus(struct pci_bus *bus, + unsigned int devfn, int where) +{ + struct pci_config_window *cfg = bus->sysdata; + struct tegra194_pcie_ecam *pcie_ecam = cfg->priv; + u32 busdev; + int type; + + if (bus->number < cfg->busr.start || bus->number > cfg->busr.end) + return NULL; + + if (bus->number == cfg->busr.start) { + if (PCI_SLOT(devfn) == 0) + return pcie_ecam->dbi_base + where; + else + return NULL; + } + + busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | + PCIE_ATU_FUNC(PCI_FUNC(devfn)); + + if (bus->parent->number == cfg->busr.start) { + if (PCI_SLOT(devfn) == 0) + type = PCIE_ATU_TYPE_CFG0; + else + return NULL; + } else { + type = PCIE_ATU_TYPE_CFG1; + } + + program_outbound_atu(pcie_ecam, 0, type, cfg->res.start, busdev, + SZ_256K); + + return pcie_ecam->config_base + where; +} + +const struct pci_ecam_ops tegra194_pcie_ops = { + .init = tegra194_acpi_init, + .pci_ops = { + .map_bus = tegra194_map_bus, + .read = pci_generic_config_read, + .write = pci_generic_config_write, + } +}; +#endif /* defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) */ + +#ifdef CONFIG_PCIE_TEGRA194 + static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci) { return container_of(pci, struct tegra_pcie_dw, pci); @@ -1019,7 +1119,7 @@ static const struct dw_pcie_ops tegra_dw_pcie_ops = { .stop_link = tegra_pcie_dw_stop_link, }; -static struct dw_pcie_host_ops tegra_pcie_dw_host_ops = { +static const struct dw_pcie_host_ops tegra_pcie_dw_host_ops = { .host_init = tegra_pcie_dw_host_init, }; @@ -1645,7 +1745,7 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie) if (pcie->ep_state == EP_STATE_ENABLED) return; - ret = pm_runtime_get_sync(dev); + ret = pm_runtime_resume_and_get(dev); if (ret < 0) { dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n", ret); @@ -1881,7 +1981,7 @@ tegra_pcie_ep_get_features(struct dw_pcie_ep *ep) return &tegra_pcie_epc_features; } -static struct dw_pcie_ep_ops pcie_ep_ops = { +static const struct dw_pcie_ep_ops pcie_ep_ops = { .raise_irq = tegra_pcie_ep_raise_irq, .get_features = tegra_pcie_ep_get_features, }; @@ -2311,3 +2411,5 @@ MODULE_DEVICE_TABLE(of, tegra_pcie_dw_of_match); MODULE_AUTHOR("Vidya Sagar <[email protected]>"); MODULE_DESCRIPTION("NVIDIA PCIe host controller driver"); MODULE_LICENSE("GPL v2"); + +#endif /* CONFIG_PCIE_TEGRA194 */ diff --git a/drivers/pci/controller/mobiveil/Kconfig b/drivers/pci/controller/mobiveil/Kconfig index a62d247018cf..e4643fb94e78 100644 --- a/drivers/pci/controller/mobiveil/Kconfig +++ b/drivers/pci/controller/mobiveil/Kconfig @@ -24,8 +24,7 @@ config PCIE_MOBIVEIL_PLAT config PCIE_LAYERSCAPE_GEN4 bool "Freescale Layerscape PCIe Gen4 controller" - depends on PCI - depends on OF && (ARM64 || ARCH_LAYERSCAPE) + depends on ARCH_LAYERSCAPE || COMPILE_TEST depends on PCI_MSI_IRQ_DOMAIN select PCIE_MOBIVEIL_HOST help diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c index 6ab694f8d283..d3924a44db02 100644 --- a/drivers/pci/controller/pci-host-common.c +++ b/drivers/pci/controller/pci-host-common.c @@ -79,6 +79,7 @@ int pci_host_common_probe(struct platform_device *pdev) bridge->sysdata = cfg; bridge->ops = (struct pci_ops *)&ops->pci_ops; + bridge->msi_domain = true; return pci_host_probe(bridge); } diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index 1ff4ce24f4b3..6511648271b2 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -473,7 +473,6 @@ struct hv_pcibus_device { struct list_head dr_list; struct msi_domain_info msi_info; - struct msi_controller msi_chip; struct irq_domain *irq_domain; spinlock_t retarget_msi_interrupt_lock; @@ -1866,9 +1865,6 @@ static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus) if (!hbus->pci_bus) return -ENODEV; - hbus->pci_bus->msi = &hbus->msi_chip; - hbus->pci_bus->msi->dev = &hbus->hdev->device; - pci_lock_rescan_remove(); pci_scan_child_bus(hbus->pci_bus); hv_pci_assign_numa_node(hbus); diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c index 8fcabed7c6a6..8069bd9232d4 100644 --- a/drivers/pci/controller/pci-tegra.c +++ b/drivers/pci/controller/pci-tegra.c @@ -21,6 +21,7 @@ #include <linux/interrupt.h> #include <linux/iopoll.h> #include <linux/irq.h> +#include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/init.h> @@ -78,23 +79,8 @@ #define AFI_MSI_FPCI_BAR_ST 0x64 #define AFI_MSI_AXI_BAR_ST 0x68 -#define AFI_MSI_VEC0 0x6c -#define AFI_MSI_VEC1 0x70 -#define AFI_MSI_VEC2 0x74 -#define AFI_MSI_VEC3 0x78 -#define AFI_MSI_VEC4 0x7c -#define AFI_MSI_VEC5 0x80 -#define AFI_MSI_VEC6 0x84 -#define AFI_MSI_VEC7 0x88 - -#define AFI_MSI_EN_VEC0 0x8c -#define AFI_MSI_EN_VEC1 0x90 -#define AFI_MSI_EN_VEC2 0x94 -#define AFI_MSI_EN_VEC3 0x98 -#define AFI_MSI_EN_VEC4 0x9c -#define AFI_MSI_EN_VEC5 0xa0 -#define AFI_MSI_EN_VEC6 0xa4 -#define AFI_MSI_EN_VEC7 0xa8 +#define AFI_MSI_VEC(x) (0x6c + ((x) * 4)) +#define AFI_MSI_EN_VEC(x) (0x8c + ((x) * 4)) #define AFI_CONFIGURATION 0xac #define AFI_CONFIGURATION_EN_FPCI (1 << 0) @@ -280,10 +266,10 @@ #define LINK_RETRAIN_TIMEOUT 100000 /* in usec */ struct tegra_msi { - struct msi_controller chip; DECLARE_BITMAP(used, INT_PCI_MSI_NR); struct irq_domain *domain; - struct mutex lock; + struct mutex map_lock; + spinlock_t mask_lock; void *virt; dma_addr_t phys; int irq; @@ -333,11 +319,6 @@ struct tegra_pcie_soc { } ectl; }; -static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip) -{ - return container_of(chip, struct tegra_msi, chip); -} - struct tegra_pcie { struct device *dev; @@ -372,6 +353,11 @@ struct tegra_pcie { struct dentry *debugfs; }; +static inline struct tegra_pcie *msi_to_pcie(struct tegra_msi *msi) +{ + return container_of(msi, struct tegra_pcie, msi); +} + struct tegra_pcie_port { struct tegra_pcie *pcie; struct device_node *np; @@ -1432,7 +1418,6 @@ static void tegra_pcie_phys_put(struct tegra_pcie *pcie) } } - static int tegra_pcie_get_resources(struct tegra_pcie *pcie) { struct device *dev = pcie->dev; @@ -1509,6 +1494,7 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie) phys_put: if (soc->program_uphy) tegra_pcie_phys_put(pcie); + return err; } @@ -1551,161 +1537,227 @@ static void tegra_pcie_pme_turnoff(struct tegra_pcie_port *port) afi_writel(pcie, val, AFI_PCIE_PME); } -static int tegra_msi_alloc(struct tegra_msi *chip) -{ - int msi; - - mutex_lock(&chip->lock); - - msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR); - if (msi < INT_PCI_MSI_NR) - set_bit(msi, chip->used); - else - msi = -ENOSPC; - - mutex_unlock(&chip->lock); - - return msi; -} - -static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq) +static void tegra_pcie_msi_irq(struct irq_desc *desc) { - struct device *dev = chip->chip.dev; - - mutex_lock(&chip->lock); - - if (!test_bit(irq, chip->used)) - dev_err(dev, "trying to free unused MSI#%lu\n", irq); - else - clear_bit(irq, chip->used); - - mutex_unlock(&chip->lock); -} - -static irqreturn_t tegra_pcie_msi_irq(int irq, void *data) -{ - struct tegra_pcie *pcie = data; - struct device *dev = pcie->dev; + struct tegra_pcie *pcie = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); struct tegra_msi *msi = &pcie->msi; - unsigned int i, processed = 0; + struct device *dev = pcie->dev; + unsigned int i; + + chained_irq_enter(chip, desc); for (i = 0; i < 8; i++) { - unsigned long reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4); + unsigned long reg = afi_readl(pcie, AFI_MSI_VEC(i)); while (reg) { unsigned int offset = find_first_bit(®, 32); unsigned int index = i * 32 + offset; unsigned int irq; - /* clear the interrupt */ - afi_writel(pcie, 1 << offset, AFI_MSI_VEC0 + i * 4); - - irq = irq_find_mapping(msi->domain, index); + irq = irq_find_mapping(msi->domain->parent, index); if (irq) { - if (test_bit(index, msi->used)) - generic_handle_irq(irq); - else - dev_info(dev, "unhandled MSI\n"); + generic_handle_irq(irq); } else { /* * that's weird who triggered this? * just clear it */ dev_info(dev, "unexpected MSI\n"); + afi_writel(pcie, BIT(index % 32), AFI_MSI_VEC(index)); } /* see if there's any more pending in this vector */ - reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4); - - processed++; + reg = afi_readl(pcie, AFI_MSI_VEC(i)); } } - return processed > 0 ? IRQ_HANDLED : IRQ_NONE; + chained_irq_exit(chip, desc); } -static int tegra_msi_setup_irq(struct msi_controller *chip, - struct pci_dev *pdev, struct msi_desc *desc) +static void tegra_msi_top_irq_ack(struct irq_data *d) { - struct tegra_msi *msi = to_tegra_msi(chip); - struct msi_msg msg; - unsigned int irq; - int hwirq; + irq_chip_ack_parent(d); +} - hwirq = tegra_msi_alloc(msi); - if (hwirq < 0) - return hwirq; +static void tegra_msi_top_irq_mask(struct irq_data *d) +{ + pci_msi_mask_irq(d); + irq_chip_mask_parent(d); +} - irq = irq_create_mapping(msi->domain, hwirq); - if (!irq) { - tegra_msi_free(msi, hwirq); - return -EINVAL; - } +static void tegra_msi_top_irq_unmask(struct irq_data *d) +{ + pci_msi_unmask_irq(d); + irq_chip_unmask_parent(d); +} + +static struct irq_chip tegra_msi_top_chip = { + .name = "Tegra PCIe MSI", + .irq_ack = tegra_msi_top_irq_ack, + .irq_mask = tegra_msi_top_irq_mask, + .irq_unmask = tegra_msi_top_irq_unmask, +}; - irq_set_msi_desc(irq, desc); +static void tegra_msi_irq_ack(struct irq_data *d) +{ + struct tegra_msi *msi = irq_data_get_irq_chip_data(d); + struct tegra_pcie *pcie = msi_to_pcie(msi); + unsigned int index = d->hwirq / 32; - msg.address_lo = lower_32_bits(msi->phys); - msg.address_hi = upper_32_bits(msi->phys); - msg.data = hwirq; + /* clear the interrupt */ + afi_writel(pcie, BIT(d->hwirq % 32), AFI_MSI_VEC(index)); +} - pci_write_msi_msg(irq, &msg); +static void tegra_msi_irq_mask(struct irq_data *d) +{ + struct tegra_msi *msi = irq_data_get_irq_chip_data(d); + struct tegra_pcie *pcie = msi_to_pcie(msi); + unsigned int index = d->hwirq / 32; + unsigned long flags; + u32 value; - return 0; + spin_lock_irqsave(&msi->mask_lock, flags); + value = afi_readl(pcie, AFI_MSI_EN_VEC(index)); + value &= ~BIT(d->hwirq % 32); + afi_writel(pcie, value, AFI_MSI_EN_VEC(index)); + spin_unlock_irqrestore(&msi->mask_lock, flags); } -static void tegra_msi_teardown_irq(struct msi_controller *chip, - unsigned int irq) +static void tegra_msi_irq_unmask(struct irq_data *d) { - struct tegra_msi *msi = to_tegra_msi(chip); - struct irq_data *d = irq_get_irq_data(irq); - irq_hw_number_t hwirq = irqd_to_hwirq(d); + struct tegra_msi *msi = irq_data_get_irq_chip_data(d); + struct tegra_pcie *pcie = msi_to_pcie(msi); + unsigned int index = d->hwirq / 32; + unsigned long flags; + u32 value; - irq_dispose_mapping(irq); - tegra_msi_free(msi, hwirq); + spin_lock_irqsave(&msi->mask_lock, flags); + value = afi_readl(pcie, AFI_MSI_EN_VEC(index)); + value |= BIT(d->hwirq % 32); + afi_writel(pcie, value, AFI_MSI_EN_VEC(index)); + spin_unlock_irqrestore(&msi->mask_lock, flags); } -static struct irq_chip tegra_msi_irq_chip = { - .name = "Tegra PCIe MSI", - .irq_enable = pci_msi_unmask_irq, - .irq_disable = pci_msi_mask_irq, - .irq_mask = pci_msi_mask_irq, - .irq_unmask = pci_msi_unmask_irq, +static int tegra_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force) +{ + return -EINVAL; +} + +static void tegra_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) +{ + struct tegra_msi *msi = irq_data_get_irq_chip_data(data); + + msg->address_lo = lower_32_bits(msi->phys); + msg->address_hi = upper_32_bits(msi->phys); + msg->data = data->hwirq; +} + +static struct irq_chip tegra_msi_bottom_chip = { + .name = "Tegra MSI", + .irq_ack = tegra_msi_irq_ack, + .irq_mask = tegra_msi_irq_mask, + .irq_unmask = tegra_msi_irq_unmask, + .irq_set_affinity = tegra_msi_set_affinity, + .irq_compose_msi_msg = tegra_compose_msi_msg, }; -static int tegra_msi_map(struct irq_domain *domain, unsigned int irq, - irq_hw_number_t hwirq) +static int tegra_msi_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *args) { - irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq); - irq_set_chip_data(irq, domain->host_data); + struct tegra_msi *msi = domain->host_data; + unsigned int i; + int hwirq; + + mutex_lock(&msi->map_lock); + + hwirq = bitmap_find_free_region(msi->used, INT_PCI_MSI_NR, order_base_2(nr_irqs)); + + mutex_unlock(&msi->map_lock); + + if (hwirq < 0) + return -ENOSPC; + + for (i = 0; i < nr_irqs; i++) + irq_domain_set_info(domain, virq + i, hwirq + i, + &tegra_msi_bottom_chip, domain->host_data, + handle_edge_irq, NULL, NULL); tegra_cpuidle_pcie_irqs_in_use(); return 0; } -static const struct irq_domain_ops msi_domain_ops = { - .map = tegra_msi_map, +static void tegra_msi_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + struct irq_data *d = irq_domain_get_irq_data(domain, virq); + struct tegra_msi *msi = domain->host_data; + + mutex_lock(&msi->map_lock); + + bitmap_release_region(msi->used, d->hwirq, order_base_2(nr_irqs)); + + mutex_unlock(&msi->map_lock); +} + +static const struct irq_domain_ops tegra_msi_domain_ops = { + .alloc = tegra_msi_domain_alloc, + .free = tegra_msi_domain_free, +}; + +static struct msi_domain_info tegra_msi_info = { + .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_PCI_MSIX), + .chip = &tegra_msi_top_chip, }; +static int tegra_allocate_domains(struct tegra_msi *msi) +{ + struct tegra_pcie *pcie = msi_to_pcie(msi); + struct fwnode_handle *fwnode = dev_fwnode(pcie->dev); + struct irq_domain *parent; + + parent = irq_domain_create_linear(fwnode, INT_PCI_MSI_NR, + &tegra_msi_domain_ops, msi); + if (!parent) { + dev_err(pcie->dev, "failed to create IRQ domain\n"); + return -ENOMEM; + } + irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS); + + msi->domain = pci_msi_create_irq_domain(fwnode, &tegra_msi_info, parent); + if (!msi->domain) { + dev_err(pcie->dev, "failed to create MSI domain\n"); + irq_domain_remove(parent); + return -ENOMEM; + } + + return 0; +} + +static void tegra_free_domains(struct tegra_msi *msi) +{ + struct irq_domain *parent = msi->domain->parent; + + irq_domain_remove(msi->domain); + irq_domain_remove(parent); +} + static int tegra_pcie_msi_setup(struct tegra_pcie *pcie) { - struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); struct platform_device *pdev = to_platform_device(pcie->dev); struct tegra_msi *msi = &pcie->msi; struct device *dev = pcie->dev; int err; - mutex_init(&msi->lock); - - msi->chip.dev = dev; - msi->chip.setup_irq = tegra_msi_setup_irq; - msi->chip.teardown_irq = tegra_msi_teardown_irq; + mutex_init(&msi->map_lock); + spin_lock_init(&msi->mask_lock); - msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR, - &msi_domain_ops, &msi->chip); - if (!msi->domain) { - dev_err(dev, "failed to create IRQ domain\n"); - return -ENOMEM; + if (IS_ENABLED(CONFIG_PCI_MSI)) { + err = tegra_allocate_domains(msi); + if (err) + return err; } err = platform_get_irq_byname(pdev, "msi"); @@ -1714,12 +1766,7 @@ static int tegra_pcie_msi_setup(struct tegra_pcie *pcie) msi->irq = err; - err = request_irq(msi->irq, tegra_pcie_msi_irq, IRQF_NO_THREAD, - tegra_msi_irq_chip.name, pcie); - if (err < 0) { - dev_err(dev, "failed to request IRQ: %d\n", err); - goto free_irq_domain; - } + irq_set_chained_handler_and_data(msi->irq, tegra_pcie_msi_irq, pcie); /* Though the PCIe controller can address >32-bit address space, to * facilitate endpoints that support only 32-bit MSI target address, @@ -1740,14 +1787,14 @@ static int tegra_pcie_msi_setup(struct tegra_pcie *pcie) goto free_irq; } - host->msi = &msi->chip; - return 0; free_irq: - free_irq(msi->irq, pcie); + irq_set_chained_handler_and_data(msi->irq, NULL, NULL); free_irq_domain: - irq_domain_remove(msi->domain); + if (IS_ENABLED(CONFIG_PCI_MSI)) + tegra_free_domains(msi); + return err; } @@ -1755,22 +1802,18 @@ static void tegra_pcie_enable_msi(struct tegra_pcie *pcie) { const struct tegra_pcie_soc *soc = pcie->soc; struct tegra_msi *msi = &pcie->msi; - u32 reg; + u32 reg, msi_state[INT_PCI_MSI_NR / 32]; + int i; afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST); afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST); /* this register is in 4K increments */ afi_writel(pcie, 1, AFI_MSI_BAR_SZ); - /* enable all MSI vectors */ - afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC0); - afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC1); - afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC2); - afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC3); - afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC4); - afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC5); - afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC6); - afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC7); + /* Restore the MSI allocation state */ + bitmap_to_arr32(msi_state, msi->used, INT_PCI_MSI_NR); + for (i = 0; i < ARRAY_SIZE(msi_state); i++) + afi_writel(pcie, msi_state[i], AFI_MSI_EN_VEC(i)); /* and unmask the MSI interrupt */ reg = afi_readl(pcie, AFI_INTR_MASK); @@ -1786,16 +1829,16 @@ static void tegra_pcie_msi_teardown(struct tegra_pcie *pcie) dma_free_attrs(pcie->dev, PAGE_SIZE, msi->virt, msi->phys, DMA_ATTR_NO_KERNEL_MAPPING); - if (msi->irq > 0) - free_irq(msi->irq, pcie); - for (i = 0; i < INT_PCI_MSI_NR; i++) { irq = irq_find_mapping(msi->domain, i); if (irq > 0) - irq_dispose_mapping(irq); + irq_domain_free_irqs(irq, 1); } - irq_domain_remove(msi->domain); + irq_set_chained_handler_and_data(msi->irq, NULL, NULL); + + if (IS_ENABLED(CONFIG_PCI_MSI)) + tegra_free_domains(msi); } static int tegra_pcie_disable_msi(struct tegra_pcie *pcie) @@ -1807,16 +1850,6 @@ static int tegra_pcie_disable_msi(struct tegra_pcie *pcie) value &= ~AFI_INTR_MASK_MSI_MASK; afi_writel(pcie, value, AFI_INTR_MASK); - /* disable all MSI vectors */ - afi_writel(pcie, 0, AFI_MSI_EN_VEC0); - afi_writel(pcie, 0, AFI_MSI_EN_VEC1); - afi_writel(pcie, 0, AFI_MSI_EN_VEC2); - afi_writel(pcie, 0, AFI_MSI_EN_VEC3); - afi_writel(pcie, 0, AFI_MSI_EN_VEC4); - afi_writel(pcie, 0, AFI_MSI_EN_VEC5); - afi_writel(pcie, 0, AFI_MSI_EN_VEC6); - afi_writel(pcie, 0, AFI_MSI_EN_VEC7); - return 0; } diff --git a/drivers/pci/controller/pci-thunder-ecam.c b/drivers/pci/controller/pci-thunder-ecam.c index f964fd26f7e0..ffd84656544f 100644 --- a/drivers/pci/controller/pci-thunder-ecam.c +++ b/drivers/pci/controller/pci-thunder-ecam.c @@ -116,7 +116,7 @@ static int thunder_ecam_p2_config_read(struct pci_bus *bus, unsigned int devfn, * the config space access window. Since we are working with * the high-order 32 bits, shift everything down by 32 bits. */ - node_bits = (cfg->res.start >> 32) & (1 << 12); + node_bits = upper_32_bits(cfg->res.start) & (1 << 12); v |= node_bits; set_val(v, where, size, val); diff --git a/drivers/pci/controller/pci-thunder-pem.c b/drivers/pci/controller/pci-thunder-pem.c index 1a3f70ac61fc..0660b9da204f 100644 --- a/drivers/pci/controller/pci-thunder-pem.c +++ b/drivers/pci/controller/pci-thunder-pem.c @@ -12,6 +12,7 @@ #include <linux/pci-acpi.h> #include <linux/pci-ecam.h> #include <linux/platform_device.h> +#include <linux/io-64-nonatomic-lo-hi.h> #include "../pci.h" #if defined(CONFIG_PCI_HOST_THUNDER_PEM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)) @@ -324,9 +325,9 @@ static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg, * structure here for the BAR. */ bar4_start = res_pem->start + 0xf00000; - pem_pci->ea_entry[0] = (u32)bar4_start | 2; - pem_pci->ea_entry[1] = (u32)(res_pem->end - bar4_start) & ~3u; - pem_pci->ea_entry[2] = (u32)(bar4_start >> 32); + pem_pci->ea_entry[0] = lower_32_bits(bar4_start) | 2; + pem_pci->ea_entry[1] = lower_32_bits(res_pem->end - bar4_start) & ~3u; + pem_pci->ea_entry[2] = upper_32_bits(bar4_start); cfg->priv = pem_pci; return 0; @@ -334,9 +335,9 @@ static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg, #if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) -#define PEM_RES_BASE 0x87e0c0000000UL -#define PEM_NODE_MASK GENMASK(45, 44) -#define PEM_INDX_MASK GENMASK(26, 24) +#define PEM_RES_BASE 0x87e0c0000000ULL +#define PEM_NODE_MASK GENMASK_ULL(45, 44) +#define PEM_INDX_MASK GENMASK_ULL(26, 24) #define PEM_MIN_DOM_IN_NODE 4 #define PEM_MAX_DOM_IN_NODE 10 diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c index 2afdc865253e..7f503dd4ff81 100644 --- a/drivers/pci/controller/pci-xgene.c +++ b/drivers/pci/controller/pci-xgene.c @@ -354,7 +354,8 @@ static int xgene_pcie_map_reg(struct xgene_pcie_port *port, if (IS_ERR(port->csr_base)) return PTR_ERR(port->csr_base); - port->cfg_base = devm_platform_ioremap_resource_byname(pdev, "cfg"); + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg"); + port->cfg_base = devm_ioremap_resource(dev, res); if (IS_ERR(port->cfg_base)) return PTR_ERR(port->cfg_base); port->cfg_addr = res->start; diff --git a/drivers/pci/controller/pcie-altera-msi.c b/drivers/pci/controller/pcie-altera-msi.c index 42691dd8ebef..98aa1dccc6e6 100644 --- a/drivers/pci/controller/pcie-altera-msi.c +++ b/drivers/pci/controller/pcie-altera-msi.c @@ -236,10 +236,8 @@ static int altera_msi_probe(struct platform_device *pdev) res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vector_slave"); msi->vector_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(msi->vector_base)) { - dev_err(&pdev->dev, "failed to map vector_slave memory\n"); + if (IS_ERR(msi->vector_base)) return PTR_ERR(msi->vector_base); - } msi->vector_phy = res->start; diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c index e330e6811f0b..08bc788d9422 100644 --- a/drivers/pci/controller/pcie-brcmstb.c +++ b/drivers/pci/controller/pcie-brcmstb.c @@ -1148,6 +1148,7 @@ static int brcm_pcie_suspend(struct device *dev) brcm_pcie_turn_off(pcie); ret = brcm_phy_stop(pcie); + reset_control_rearm(pcie->rescal); clk_disable_unprepare(pcie->clk); return ret; @@ -1163,9 +1164,13 @@ static int brcm_pcie_resume(struct device *dev) base = pcie->base; clk_prepare_enable(pcie->clk); + ret = reset_control_reset(pcie->rescal); + if (ret) + goto err_disable_clk; + ret = brcm_phy_start(pcie); if (ret) - goto err; + goto err_reset; /* Take bridge out of reset so we can access the SERDES reg */ pcie->bridge_sw_init_set(pcie, 0); @@ -1180,14 +1185,16 @@ static int brcm_pcie_resume(struct device *dev) ret = brcm_pcie_setup(pcie); if (ret) - goto err; + goto err_reset; if (pcie->msi) brcm_msi_set_regs(pcie->msi); return 0; -err: +err_reset: + reset_control_rearm(pcie->rescal); +err_disable_clk: clk_disable_unprepare(pcie->clk); return ret; } @@ -1197,7 +1204,7 @@ static void __brcm_pcie_remove(struct brcm_pcie *pcie) brcm_msi_remove(pcie); brcm_pcie_turn_off(pcie); brcm_phy_stop(pcie); - reset_control_assert(pcie->rescal); + reset_control_rearm(pcie->rescal); clk_disable_unprepare(pcie->clk); } @@ -1278,13 +1285,13 @@ static int brcm_pcie_probe(struct platform_device *pdev) return PTR_ERR(pcie->perst_reset); } - ret = reset_control_deassert(pcie->rescal); + ret = reset_control_reset(pcie->rescal); if (ret) dev_err(&pdev->dev, "failed to deassert 'rescal'\n"); ret = brcm_phy_start(pcie); if (ret) { - reset_control_assert(pcie->rescal); + reset_control_rearm(pcie->rescal); clk_disable_unprepare(pcie->clk); return ret; } @@ -1296,6 +1303,7 @@ static int brcm_pcie_probe(struct platform_device *pdev) pcie->hw_rev = readl(pcie->base + PCIE_MISC_REVISION); if (pcie->type == BCM4908 && pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) { dev_err(pcie->dev, "hardware revision with unsupported PERST# setup\n"); + ret = -ENODEV; goto fail; } diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c index 908475d27e0e..eede4e8f3f75 100644 --- a/drivers/pci/controller/pcie-iproc-msi.c +++ b/drivers/pci/controller/pcie-iproc-msi.c @@ -271,7 +271,7 @@ static int iproc_msi_irq_domain_alloc(struct irq_domain *domain, NULL, NULL); } - return hwirq; + return 0; } static void iproc_msi_irq_domain_free(struct irq_domain *domain, diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c new file mode 100644 index 000000000000..3c5b97716d40 --- /dev/null +++ b/drivers/pci/controller/pcie-mediatek-gen3.c @@ -0,0 +1,1027 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * MediaTek PCIe host controller driver. + * + * Copyright (c) 2020 MediaTek Inc. + * Author: Jianjun Wang <[email protected]> + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/iopoll.h> +#include <linux/irq.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/msi.h> +#include <linux/pci.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/pm_domain.h> +#include <linux/pm_runtime.h> +#include <linux/reset.h> + +#include "../pci.h" + +#define PCIE_SETTING_REG 0x80 +#define PCIE_PCI_IDS_1 0x9c +#define PCI_CLASS(class) (class << 8) +#define PCIE_RC_MODE BIT(0) + +#define PCIE_CFGNUM_REG 0x140 +#define PCIE_CFG_DEVFN(devfn) ((devfn) & GENMASK(7, 0)) +#define PCIE_CFG_BUS(bus) (((bus) << 8) & GENMASK(15, 8)) +#define PCIE_CFG_BYTE_EN(bytes) (((bytes) << 16) & GENMASK(19, 16)) +#define PCIE_CFG_FORCE_BYTE_EN BIT(20) +#define PCIE_CFG_OFFSET_ADDR 0x1000 +#define PCIE_CFG_HEADER(bus, devfn) \ + (PCIE_CFG_BUS(bus) | PCIE_CFG_DEVFN(devfn)) + +#define PCIE_RST_CTRL_REG 0x148 +#define PCIE_MAC_RSTB BIT(0) +#define PCIE_PHY_RSTB BIT(1) +#define PCIE_BRG_RSTB BIT(2) +#define PCIE_PE_RSTB BIT(3) + +#define PCIE_LTSSM_STATUS_REG 0x150 +#define PCIE_LTSSM_STATE_MASK GENMASK(28, 24) +#define PCIE_LTSSM_STATE(val) ((val & PCIE_LTSSM_STATE_MASK) >> 24) +#define PCIE_LTSSM_STATE_L2_IDLE 0x14 + +#define PCIE_LINK_STATUS_REG 0x154 +#define PCIE_PORT_LINKUP BIT(8) + +#define PCIE_MSI_SET_NUM 8 +#define PCIE_MSI_IRQS_PER_SET 32 +#define PCIE_MSI_IRQS_NUM \ + (PCIE_MSI_IRQS_PER_SET * PCIE_MSI_SET_NUM) + +#define PCIE_INT_ENABLE_REG 0x180 +#define PCIE_MSI_ENABLE GENMASK(PCIE_MSI_SET_NUM + 8 - 1, 8) +#define PCIE_MSI_SHIFT 8 +#define PCIE_INTX_SHIFT 24 +#define PCIE_INTX_ENABLE \ + GENMASK(PCIE_INTX_SHIFT + PCI_NUM_INTX - 1, PCIE_INTX_SHIFT) + +#define PCIE_INT_STATUS_REG 0x184 +#define PCIE_MSI_SET_ENABLE_REG 0x190 +#define PCIE_MSI_SET_ENABLE GENMASK(PCIE_MSI_SET_NUM - 1, 0) + +#define PCIE_MSI_SET_BASE_REG 0xc00 +#define PCIE_MSI_SET_OFFSET 0x10 +#define PCIE_MSI_SET_STATUS_OFFSET 0x04 +#define PCIE_MSI_SET_ENABLE_OFFSET 0x08 + +#define PCIE_MSI_SET_ADDR_HI_BASE 0xc80 +#define PCIE_MSI_SET_ADDR_HI_OFFSET 0x04 + +#define PCIE_ICMD_PM_REG 0x198 +#define PCIE_TURN_OFF_LINK BIT(4) + +#define PCIE_TRANS_TABLE_BASE_REG 0x800 +#define PCIE_ATR_SRC_ADDR_MSB_OFFSET 0x4 +#define PCIE_ATR_TRSL_ADDR_LSB_OFFSET 0x8 +#define PCIE_ATR_TRSL_ADDR_MSB_OFFSET 0xc +#define PCIE_ATR_TRSL_PARAM_OFFSET 0x10 +#define PCIE_ATR_TLB_SET_OFFSET 0x20 + +#define PCIE_MAX_TRANS_TABLES 8 +#define PCIE_ATR_EN BIT(0) +#define PCIE_ATR_SIZE(size) \ + (((((size) - 1) << 1) & GENMASK(6, 1)) | PCIE_ATR_EN) +#define PCIE_ATR_ID(id) ((id) & GENMASK(3, 0)) +#define PCIE_ATR_TYPE_MEM PCIE_ATR_ID(0) +#define PCIE_ATR_TYPE_IO PCIE_ATR_ID(1) +#define PCIE_ATR_TLP_TYPE(type) (((type) << 16) & GENMASK(18, 16)) +#define PCIE_ATR_TLP_TYPE_MEM PCIE_ATR_TLP_TYPE(0) +#define PCIE_ATR_TLP_TYPE_IO PCIE_ATR_TLP_TYPE(2) + +/** + * struct mtk_msi_set - MSI information for each set + * @base: IO mapped register base + * @msg_addr: MSI message address + * @saved_irq_state: IRQ enable state saved at suspend time + */ +struct mtk_msi_set { + void __iomem *base; + phys_addr_t msg_addr; + u32 saved_irq_state; +}; + +/** + * struct mtk_pcie_port - PCIe port information + * @dev: pointer to PCIe device + * @base: IO mapped register base + * @reg_base: physical register base + * @mac_reset: MAC reset control + * @phy_reset: PHY reset control + * @phy: PHY controller block + * @clks: PCIe clocks + * @num_clks: PCIe clocks count for this port + * @irq: PCIe controller interrupt number + * @saved_irq_state: IRQ enable state saved at suspend time + * @irq_lock: lock protecting IRQ register access + * @intx_domain: legacy INTx IRQ domain + * @msi_domain: MSI IRQ domain + * @msi_bottom_domain: MSI IRQ bottom domain + * @msi_sets: MSI sets information + * @lock: lock protecting IRQ bit map + * @msi_irq_in_use: bit map for assigned MSI IRQ + */ +struct mtk_pcie_port { + struct device *dev; + void __iomem *base; + phys_addr_t reg_base; + struct reset_control *mac_reset; + struct reset_control *phy_reset; + struct phy *phy; + struct clk_bulk_data *clks; + int num_clks; + + int irq; + u32 saved_irq_state; + raw_spinlock_t irq_lock; + struct irq_domain *intx_domain; + struct irq_domain *msi_domain; + struct irq_domain *msi_bottom_domain; + struct mtk_msi_set msi_sets[PCIE_MSI_SET_NUM]; + struct mutex lock; + DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_IRQS_NUM); +}; + +/** + * mtk_pcie_config_tlp_header() - Configure a configuration TLP header + * @bus: PCI bus to query + * @devfn: device/function number + * @where: offset in config space + * @size: data size in TLP header + * + * Set byte enable field and device information in configuration TLP header. + */ +static void mtk_pcie_config_tlp_header(struct pci_bus *bus, unsigned int devfn, + int where, int size) +{ + struct mtk_pcie_port *port = bus->sysdata; + int bytes; + u32 val; + + bytes = (GENMASK(size - 1, 0) & 0xf) << (where & 0x3); + + val = PCIE_CFG_FORCE_BYTE_EN | PCIE_CFG_BYTE_EN(bytes) | + PCIE_CFG_HEADER(bus->number, devfn); + + writel_relaxed(val, port->base + PCIE_CFGNUM_REG); +} + +static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, + int where) +{ + struct mtk_pcie_port *port = bus->sysdata; + + return port->base + PCIE_CFG_OFFSET_ADDR + where; +} + +static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + mtk_pcie_config_tlp_header(bus, devfn, where, size); + + return pci_generic_config_read32(bus, devfn, where, size, val); +} + +static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + mtk_pcie_config_tlp_header(bus, devfn, where, size); + + if (size <= 2) + val <<= (where & 0x3) * 8; + + return pci_generic_config_write32(bus, devfn, where, 4, val); +} + +static struct pci_ops mtk_pcie_ops = { + .map_bus = mtk_pcie_map_bus, + .read = mtk_pcie_config_read, + .write = mtk_pcie_config_write, +}; + +static int mtk_pcie_set_trans_table(struct mtk_pcie_port *port, + resource_size_t cpu_addr, + resource_size_t pci_addr, + resource_size_t size, + unsigned long type, int num) +{ + void __iomem *table; + u32 val; + + if (num >= PCIE_MAX_TRANS_TABLES) { + dev_err(port->dev, "not enough translate table for addr: %#llx, limited to [%d]\n", + (unsigned long long)cpu_addr, PCIE_MAX_TRANS_TABLES); + return -ENODEV; + } + + table = port->base + PCIE_TRANS_TABLE_BASE_REG + + num * PCIE_ATR_TLB_SET_OFFSET; + + writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(size) - 1), + table); + writel_relaxed(upper_32_bits(cpu_addr), + table + PCIE_ATR_SRC_ADDR_MSB_OFFSET); + writel_relaxed(lower_32_bits(pci_addr), + table + PCIE_ATR_TRSL_ADDR_LSB_OFFSET); + writel_relaxed(upper_32_bits(pci_addr), + table + PCIE_ATR_TRSL_ADDR_MSB_OFFSET); + + if (type == IORESOURCE_IO) + val = PCIE_ATR_TYPE_IO | PCIE_ATR_TLP_TYPE_IO; + else + val = PCIE_ATR_TYPE_MEM | PCIE_ATR_TLP_TYPE_MEM; + + writel_relaxed(val, table + PCIE_ATR_TRSL_PARAM_OFFSET); + + return 0; +} + +static void mtk_pcie_enable_msi(struct mtk_pcie_port *port) +{ + int i; + u32 val; + + for (i = 0; i < PCIE_MSI_SET_NUM; i++) { + struct mtk_msi_set *msi_set = &port->msi_sets[i]; + + msi_set->base = port->base + PCIE_MSI_SET_BASE_REG + + i * PCIE_MSI_SET_OFFSET; + msi_set->msg_addr = port->reg_base + PCIE_MSI_SET_BASE_REG + + i * PCIE_MSI_SET_OFFSET; + + /* Configure the MSI capture address */ + writel_relaxed(lower_32_bits(msi_set->msg_addr), msi_set->base); + writel_relaxed(upper_32_bits(msi_set->msg_addr), + port->base + PCIE_MSI_SET_ADDR_HI_BASE + + i * PCIE_MSI_SET_ADDR_HI_OFFSET); + } + + val = readl_relaxed(port->base + PCIE_MSI_SET_ENABLE_REG); + val |= PCIE_MSI_SET_ENABLE; + writel_relaxed(val, port->base + PCIE_MSI_SET_ENABLE_REG); + + val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG); + val |= PCIE_MSI_ENABLE; + writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG); +} + +static int mtk_pcie_startup_port(struct mtk_pcie_port *port) +{ + struct resource_entry *entry; + struct pci_host_bridge *host = pci_host_bridge_from_priv(port); + unsigned int table_index = 0; + int err; + u32 val; + + /* Set as RC mode */ + val = readl_relaxed(port->base + PCIE_SETTING_REG); + val |= PCIE_RC_MODE; + writel_relaxed(val, port->base + PCIE_SETTING_REG); + + /* Set class code */ + val = readl_relaxed(port->base + PCIE_PCI_IDS_1); + val &= ~GENMASK(31, 8); + val |= PCI_CLASS(PCI_CLASS_BRIDGE_PCI << 8); + writel_relaxed(val, port->base + PCIE_PCI_IDS_1); + + /* Mask all INTx interrupts */ + val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG); + val &= ~PCIE_INTX_ENABLE; + writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG); + + /* Assert all reset signals */ + val = readl_relaxed(port->base + PCIE_RST_CTRL_REG); + val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB; + writel_relaxed(val, port->base + PCIE_RST_CTRL_REG); + + /* + * Described in PCIe CEM specification setctions 2.2 (PERST# Signal) + * and 2.2.1 (Initial Power-Up (G3 to S0)). + * The deassertion of PERST# should be delayed 100ms (TPVPERL) + * for the power and clock to become stable. + */ + msleep(100); + + /* De-assert reset signals */ + val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB); + writel_relaxed(val, port->base + PCIE_RST_CTRL_REG); + + /* Check if the link is up or not */ + err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_REG, val, + !!(val & PCIE_PORT_LINKUP), 20, + PCI_PM_D3COLD_WAIT * USEC_PER_MSEC); + if (err) { + val = readl_relaxed(port->base + PCIE_LTSSM_STATUS_REG); + dev_err(port->dev, "PCIe link down, ltssm reg val: %#x\n", val); + return err; + } + + mtk_pcie_enable_msi(port); + + /* Set PCIe translation windows */ + resource_list_for_each_entry(entry, &host->windows) { + struct resource *res = entry->res; + unsigned long type = resource_type(res); + resource_size_t cpu_addr; + resource_size_t pci_addr; + resource_size_t size; + const char *range_type; + + if (type == IORESOURCE_IO) { + cpu_addr = pci_pio_to_address(res->start); + range_type = "IO"; + } else if (type == IORESOURCE_MEM) { + cpu_addr = res->start; + range_type = "MEM"; + } else { + continue; + } + + pci_addr = res->start - entry->offset; + size = resource_size(res); + err = mtk_pcie_set_trans_table(port, cpu_addr, pci_addr, size, + type, table_index); + if (err) + return err; + + dev_dbg(port->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n", + range_type, table_index, (unsigned long long)cpu_addr, + (unsigned long long)pci_addr, (unsigned long long)size); + + table_index++; + } + + return 0; +} + +static int mtk_pcie_set_affinity(struct irq_data *data, + const struct cpumask *mask, bool force) +{ + return -EINVAL; +} + +static void mtk_pcie_msi_irq_mask(struct irq_data *data) +{ + pci_msi_mask_irq(data); + irq_chip_mask_parent(data); +} + +static void mtk_pcie_msi_irq_unmask(struct irq_data *data) +{ + pci_msi_unmask_irq(data); + irq_chip_unmask_parent(data); +} + +static struct irq_chip mtk_msi_irq_chip = { + .irq_ack = irq_chip_ack_parent, + .irq_mask = mtk_pcie_msi_irq_mask, + .irq_unmask = mtk_pcie_msi_irq_unmask, + .name = "MSI", +}; + +static struct msi_domain_info mtk_msi_domain_info = { + .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI), + .chip = &mtk_msi_irq_chip, +}; + +static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) +{ + struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data); + struct mtk_pcie_port *port = data->domain->host_data; + unsigned long hwirq; + + hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET; + + msg->address_hi = upper_32_bits(msi_set->msg_addr); + msg->address_lo = lower_32_bits(msi_set->msg_addr); + msg->data = hwirq; + dev_dbg(port->dev, "msi#%#lx address_hi %#x address_lo %#x data %d\n", + hwirq, msg->address_hi, msg->address_lo, msg->data); +} + +static void mtk_msi_bottom_irq_ack(struct irq_data *data) +{ + struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data); + unsigned long hwirq; + + hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET; + + writel_relaxed(BIT(hwirq), msi_set->base + PCIE_MSI_SET_STATUS_OFFSET); +} + +static void mtk_msi_bottom_irq_mask(struct irq_data *data) +{ + struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data); + struct mtk_pcie_port *port = data->domain->host_data; + unsigned long hwirq, flags; + u32 val; + + hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET; + + raw_spin_lock_irqsave(&port->irq_lock, flags); + val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET); + val &= ~BIT(hwirq); + writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET); + raw_spin_unlock_irqrestore(&port->irq_lock, flags); +} + +static void mtk_msi_bottom_irq_unmask(struct irq_data *data) +{ + struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data); + struct mtk_pcie_port *port = data->domain->host_data; + unsigned long hwirq, flags; + u32 val; + + hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET; + + raw_spin_lock_irqsave(&port->irq_lock, flags); + val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET); + val |= BIT(hwirq); + writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET); + raw_spin_unlock_irqrestore(&port->irq_lock, flags); +} + +static struct irq_chip mtk_msi_bottom_irq_chip = { + .irq_ack = mtk_msi_bottom_irq_ack, + .irq_mask = mtk_msi_bottom_irq_mask, + .irq_unmask = mtk_msi_bottom_irq_unmask, + .irq_compose_msi_msg = mtk_compose_msi_msg, + .irq_set_affinity = mtk_pcie_set_affinity, + .name = "MSI", +}; + +static int mtk_msi_bottom_domain_alloc(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs, + void *arg) +{ + struct mtk_pcie_port *port = domain->host_data; + struct mtk_msi_set *msi_set; + int i, hwirq, set_idx; + + mutex_lock(&port->lock); + + hwirq = bitmap_find_free_region(port->msi_irq_in_use, PCIE_MSI_IRQS_NUM, + order_base_2(nr_irqs)); + + mutex_unlock(&port->lock); + + if (hwirq < 0) + return -ENOSPC; + + set_idx = hwirq / PCIE_MSI_IRQS_PER_SET; + msi_set = &port->msi_sets[set_idx]; + + for (i = 0; i < nr_irqs; i++) + irq_domain_set_info(domain, virq + i, hwirq + i, + &mtk_msi_bottom_irq_chip, msi_set, + handle_edge_irq, NULL, NULL); + + return 0; +} + +static void mtk_msi_bottom_domain_free(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs) +{ + struct mtk_pcie_port *port = domain->host_data; + struct irq_data *data = irq_domain_get_irq_data(domain, virq); + + mutex_lock(&port->lock); + + bitmap_release_region(port->msi_irq_in_use, data->hwirq, + order_base_2(nr_irqs)); + + mutex_unlock(&port->lock); + + irq_domain_free_irqs_common(domain, virq, nr_irqs); +} + +static const struct irq_domain_ops mtk_msi_bottom_domain_ops = { + .alloc = mtk_msi_bottom_domain_alloc, + .free = mtk_msi_bottom_domain_free, +}; + +static void mtk_intx_mask(struct irq_data *data) +{ + struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data); + unsigned long flags; + u32 val; + + raw_spin_lock_irqsave(&port->irq_lock, flags); + val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG); + val &= ~BIT(data->hwirq + PCIE_INTX_SHIFT); + writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG); + raw_spin_unlock_irqrestore(&port->irq_lock, flags); +} + +static void mtk_intx_unmask(struct irq_data *data) +{ + struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data); + unsigned long flags; + u32 val; + + raw_spin_lock_irqsave(&port->irq_lock, flags); + val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG); + val |= BIT(data->hwirq + PCIE_INTX_SHIFT); + writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG); + raw_spin_unlock_irqrestore(&port->irq_lock, flags); +} + +/** + * mtk_intx_eoi() - Clear INTx IRQ status at the end of interrupt + * @data: pointer to chip specific data + * + * As an emulated level IRQ, its interrupt status will remain + * until the corresponding de-assert message is received; hence that + * the status can only be cleared when the interrupt has been serviced. + */ +static void mtk_intx_eoi(struct irq_data *data) +{ + struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data); + unsigned long hwirq; + + hwirq = data->hwirq + PCIE_INTX_SHIFT; + writel_relaxed(BIT(hwirq), port->base + PCIE_INT_STATUS_REG); +} + +static struct irq_chip mtk_intx_irq_chip = { + .irq_mask = mtk_intx_mask, + .irq_unmask = mtk_intx_unmask, + .irq_eoi = mtk_intx_eoi, + .irq_set_affinity = mtk_pcie_set_affinity, + .name = "INTx", +}; + +static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_chip_data(irq, domain->host_data); + irq_set_chip_and_handler_name(irq, &mtk_intx_irq_chip, + handle_fasteoi_irq, "INTx"); + return 0; +} + +static const struct irq_domain_ops intx_domain_ops = { + .map = mtk_pcie_intx_map, +}; + +static int mtk_pcie_init_irq_domains(struct mtk_pcie_port *port) +{ + struct device *dev = port->dev; + struct device_node *intc_node, *node = dev->of_node; + int ret; + + raw_spin_lock_init(&port->irq_lock); + + /* Setup INTx */ + intc_node = of_get_child_by_name(node, "interrupt-controller"); + if (!intc_node) { + dev_err(dev, "missing interrupt-controller node\n"); + return -ENODEV; + } + + port->intx_domain = irq_domain_add_linear(intc_node, PCI_NUM_INTX, + &intx_domain_ops, port); + if (!port->intx_domain) { + dev_err(dev, "failed to create INTx IRQ domain\n"); + return -ENODEV; + } + + /* Setup MSI */ + mutex_init(&port->lock); + + port->msi_bottom_domain = irq_domain_add_linear(node, PCIE_MSI_IRQS_NUM, + &mtk_msi_bottom_domain_ops, port); + if (!port->msi_bottom_domain) { + dev_err(dev, "failed to create MSI bottom domain\n"); + ret = -ENODEV; + goto err_msi_bottom_domain; + } + + port->msi_domain = pci_msi_create_irq_domain(dev->fwnode, + &mtk_msi_domain_info, + port->msi_bottom_domain); + if (!port->msi_domain) { + dev_err(dev, "failed to create MSI domain\n"); + ret = -ENODEV; + goto err_msi_domain; + } + + return 0; + +err_msi_domain: + irq_domain_remove(port->msi_bottom_domain); +err_msi_bottom_domain: + irq_domain_remove(port->intx_domain); + + return ret; +} + +static void mtk_pcie_irq_teardown(struct mtk_pcie_port *port) +{ + irq_set_chained_handler_and_data(port->irq, NULL, NULL); + + if (port->intx_domain) + irq_domain_remove(port->intx_domain); + + if (port->msi_domain) + irq_domain_remove(port->msi_domain); + + if (port->msi_bottom_domain) + irq_domain_remove(port->msi_bottom_domain); + + irq_dispose_mapping(port->irq); +} + +static void mtk_pcie_msi_handler(struct mtk_pcie_port *port, int set_idx) +{ + struct mtk_msi_set *msi_set = &port->msi_sets[set_idx]; + unsigned long msi_enable, msi_status; + unsigned int virq; + irq_hw_number_t bit, hwirq; + + msi_enable = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET); + + do { + msi_status = readl_relaxed(msi_set->base + + PCIE_MSI_SET_STATUS_OFFSET); + msi_status &= msi_enable; + if (!msi_status) + break; + + for_each_set_bit(bit, &msi_status, PCIE_MSI_IRQS_PER_SET) { + hwirq = bit + set_idx * PCIE_MSI_IRQS_PER_SET; + virq = irq_find_mapping(port->msi_bottom_domain, hwirq); + generic_handle_irq(virq); + } + } while (true); +} + +static void mtk_pcie_irq_handler(struct irq_desc *desc) +{ + struct mtk_pcie_port *port = irq_desc_get_handler_data(desc); + struct irq_chip *irqchip = irq_desc_get_chip(desc); + unsigned long status; + unsigned int virq; + irq_hw_number_t irq_bit = PCIE_INTX_SHIFT; + + chained_irq_enter(irqchip, desc); + + status = readl_relaxed(port->base + PCIE_INT_STATUS_REG); + for_each_set_bit_from(irq_bit, &status, PCI_NUM_INTX + + PCIE_INTX_SHIFT) { + virq = irq_find_mapping(port->intx_domain, + irq_bit - PCIE_INTX_SHIFT); + generic_handle_irq(virq); + } + + irq_bit = PCIE_MSI_SHIFT; + for_each_set_bit_from(irq_bit, &status, PCIE_MSI_SET_NUM + + PCIE_MSI_SHIFT) { + mtk_pcie_msi_handler(port, irq_bit - PCIE_MSI_SHIFT); + + writel_relaxed(BIT(irq_bit), port->base + PCIE_INT_STATUS_REG); + } + + chained_irq_exit(irqchip, desc); +} + +static int mtk_pcie_setup_irq(struct mtk_pcie_port *port) +{ + struct device *dev = port->dev; + struct platform_device *pdev = to_platform_device(dev); + int err; + + err = mtk_pcie_init_irq_domains(port); + if (err) + return err; + + port->irq = platform_get_irq(pdev, 0); + if (port->irq < 0) + return port->irq; + + irq_set_chained_handler_and_data(port->irq, mtk_pcie_irq_handler, port); + + return 0; +} + +static int mtk_pcie_parse_port(struct mtk_pcie_port *port) +{ + struct device *dev = port->dev; + struct platform_device *pdev = to_platform_device(dev); + struct resource *regs; + int ret; + + regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-mac"); + if (!regs) + return -EINVAL; + port->base = devm_ioremap_resource(dev, regs); + if (IS_ERR(port->base)) { + dev_err(dev, "failed to map register base\n"); + return PTR_ERR(port->base); + } + + port->reg_base = regs->start; + + port->phy_reset = devm_reset_control_get_optional_exclusive(dev, "phy"); + if (IS_ERR(port->phy_reset)) { + ret = PTR_ERR(port->phy_reset); + if (ret != -EPROBE_DEFER) + dev_err(dev, "failed to get PHY reset\n"); + + return ret; + } + + port->mac_reset = devm_reset_control_get_optional_exclusive(dev, "mac"); + if (IS_ERR(port->mac_reset)) { + ret = PTR_ERR(port->mac_reset); + if (ret != -EPROBE_DEFER) + dev_err(dev, "failed to get MAC reset\n"); + + return ret; + } + + port->phy = devm_phy_optional_get(dev, "pcie-phy"); + if (IS_ERR(port->phy)) { + ret = PTR_ERR(port->phy); + if (ret != -EPROBE_DEFER) + dev_err(dev, "failed to get PHY\n"); + + return ret; + } + + port->num_clks = devm_clk_bulk_get_all(dev, &port->clks); + if (port->num_clks < 0) { + dev_err(dev, "failed to get clocks\n"); + return port->num_clks; + } + + return 0; +} + +static int mtk_pcie_power_up(struct mtk_pcie_port *port) +{ + struct device *dev = port->dev; + int err; + + /* PHY power on and enable pipe clock */ + reset_control_deassert(port->phy_reset); + + err = phy_init(port->phy); + if (err) { + dev_err(dev, "failed to initialize PHY\n"); + goto err_phy_init; + } + + err = phy_power_on(port->phy); + if (err) { + dev_err(dev, "failed to power on PHY\n"); + goto err_phy_on; + } + + /* MAC power on and enable transaction layer clocks */ + reset_control_deassert(port->mac_reset); + + pm_runtime_enable(dev); + pm_runtime_get_sync(dev); + + err = clk_bulk_prepare_enable(port->num_clks, port->clks); + if (err) { + dev_err(dev, "failed to enable clocks\n"); + goto err_clk_init; + } + + return 0; + +err_clk_init: + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); + reset_control_assert(port->mac_reset); + phy_power_off(port->phy); +err_phy_on: + phy_exit(port->phy); +err_phy_init: + reset_control_assert(port->phy_reset); + + return err; +} + +static void mtk_pcie_power_down(struct mtk_pcie_port *port) +{ + clk_bulk_disable_unprepare(port->num_clks, port->clks); + + pm_runtime_put_sync(port->dev); + pm_runtime_disable(port->dev); + reset_control_assert(port->mac_reset); + + phy_power_off(port->phy); + phy_exit(port->phy); + reset_control_assert(port->phy_reset); +} + +static int mtk_pcie_setup(struct mtk_pcie_port *port) +{ + int err; + + err = mtk_pcie_parse_port(port); + if (err) + return err; + + /* Don't touch the hardware registers before power up */ + err = mtk_pcie_power_up(port); + if (err) + return err; + + /* Try link up */ + err = mtk_pcie_startup_port(port); + if (err) + goto err_setup; + + err = mtk_pcie_setup_irq(port); + if (err) + goto err_setup; + + return 0; + +err_setup: + mtk_pcie_power_down(port); + + return err; +} + +static int mtk_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mtk_pcie_port *port; + struct pci_host_bridge *host; + int err; + + host = devm_pci_alloc_host_bridge(dev, sizeof(*port)); + if (!host) + return -ENOMEM; + + port = pci_host_bridge_priv(host); + + port->dev = dev; + platform_set_drvdata(pdev, port); + + err = mtk_pcie_setup(port); + if (err) + return err; + + host->ops = &mtk_pcie_ops; + host->sysdata = port; + + err = pci_host_probe(host); + if (err) { + mtk_pcie_irq_teardown(port); + mtk_pcie_power_down(port); + return err; + } + + return 0; +} + +static int mtk_pcie_remove(struct platform_device *pdev) +{ + struct mtk_pcie_port *port = platform_get_drvdata(pdev); + struct pci_host_bridge *host = pci_host_bridge_from_priv(port); + + pci_lock_rescan_remove(); + pci_stop_root_bus(host->bus); + pci_remove_root_bus(host->bus); + pci_unlock_rescan_remove(); + + mtk_pcie_irq_teardown(port); + mtk_pcie_power_down(port); + + return 0; +} + +static void __maybe_unused mtk_pcie_irq_save(struct mtk_pcie_port *port) +{ + int i; + + raw_spin_lock(&port->irq_lock); + + port->saved_irq_state = readl_relaxed(port->base + PCIE_INT_ENABLE_REG); + + for (i = 0; i < PCIE_MSI_SET_NUM; i++) { + struct mtk_msi_set *msi_set = &port->msi_sets[i]; + + msi_set->saved_irq_state = readl_relaxed(msi_set->base + + PCIE_MSI_SET_ENABLE_OFFSET); + } + + raw_spin_unlock(&port->irq_lock); +} + +static void __maybe_unused mtk_pcie_irq_restore(struct mtk_pcie_port *port) +{ + int i; + + raw_spin_lock(&port->irq_lock); + + writel_relaxed(port->saved_irq_state, port->base + PCIE_INT_ENABLE_REG); + + for (i = 0; i < PCIE_MSI_SET_NUM; i++) { + struct mtk_msi_set *msi_set = &port->msi_sets[i]; + + writel_relaxed(msi_set->saved_irq_state, + msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET); + } + + raw_spin_unlock(&port->irq_lock); +} + +static int __maybe_unused mtk_pcie_turn_off_link(struct mtk_pcie_port *port) +{ + u32 val; + + val = readl_relaxed(port->base + PCIE_ICMD_PM_REG); + val |= PCIE_TURN_OFF_LINK; + writel_relaxed(val, port->base + PCIE_ICMD_PM_REG); + + /* Check the link is L2 */ + return readl_poll_timeout(port->base + PCIE_LTSSM_STATUS_REG, val, + (PCIE_LTSSM_STATE(val) == + PCIE_LTSSM_STATE_L2_IDLE), 20, + 50 * USEC_PER_MSEC); +} + +static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev) +{ + struct mtk_pcie_port *port = dev_get_drvdata(dev); + int err; + u32 val; + + /* Trigger link to L2 state */ + err = mtk_pcie_turn_off_link(port); + if (err) { + dev_err(port->dev, "cannot enter L2 state\n"); + return err; + } + + /* Pull down the PERST# pin */ + val = readl_relaxed(port->base + PCIE_RST_CTRL_REG); + val |= PCIE_PE_RSTB; + writel_relaxed(val, port->base + PCIE_RST_CTRL_REG); + + dev_dbg(port->dev, "entered L2 states successfully"); + + mtk_pcie_irq_save(port); + mtk_pcie_power_down(port); + + return 0; +} + +static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev) +{ + struct mtk_pcie_port *port = dev_get_drvdata(dev); + int err; + + err = mtk_pcie_power_up(port); + if (err) + return err; + + err = mtk_pcie_startup_port(port); + if (err) { + mtk_pcie_power_down(port); + return err; + } + + mtk_pcie_irq_restore(port); + + return 0; +} + +static const struct dev_pm_ops mtk_pcie_pm_ops = { + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq, + mtk_pcie_resume_noirq) +}; + +static const struct of_device_id mtk_pcie_of_match[] = { + { .compatible = "mediatek,mt8192-pcie" }, + {}, +}; + +static struct platform_driver mtk_pcie_driver = { + .probe = mtk_pcie_probe, + .remove = mtk_pcie_remove, + .driver = { + .name = "mtk-pcie", + .of_match_table = mtk_pcie_of_match, + .pm = &mtk_pcie_pm_ops, + }, +}; + +module_platform_driver(mtk_pcie_driver); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c index 23548b517e4b..62a042e75d9a 100644 --- a/drivers/pci/controller/pcie-mediatek.c +++ b/drivers/pci/controller/pcie-mediatek.c @@ -143,6 +143,7 @@ struct mtk_pcie_port; * struct mtk_pcie_soc - differentiate between host generations * @need_fix_class_id: whether this host's class ID needed to be fixed or not * @need_fix_device_id: whether this host's device ID needed to be fixed or not + * @no_msi: Bridge has no MSI support, and relies on an external block * @device_id: device ID which this host need to be fixed * @ops: pointer to configuration access functions * @startup: pointer to controller setting functions @@ -151,6 +152,7 @@ struct mtk_pcie_port; struct mtk_pcie_soc { bool need_fix_class_id; bool need_fix_device_id; + bool no_msi; unsigned int device_id; struct pci_ops *ops; int (*startup)(struct mtk_pcie_port *port); @@ -760,7 +762,7 @@ static struct pci_ops mtk_pcie_ops = { static int mtk_pcie_startup_port(struct mtk_pcie_port *port) { struct mtk_pcie *pcie = port->pcie; - u32 func = PCI_FUNC(port->slot << 3); + u32 func = PCI_FUNC(port->slot); u32 slot = PCI_SLOT(port->slot << 3); u32 val; int err; @@ -1087,6 +1089,7 @@ static int mtk_pcie_probe(struct platform_device *pdev) host->ops = pcie->soc->ops; host->sysdata = pcie; + host->msi_domain = pcie->soc->no_msi; err = pci_host_probe(host); if (err) @@ -1176,6 +1179,7 @@ static const struct dev_pm_ops mtk_pcie_pm_ops = { }; static const struct mtk_pcie_soc mtk_pcie_soc_v1 = { + .no_msi = true, .ops = &mtk_pcie_ops, .startup = mtk_pcie_startup_port, }; @@ -1210,6 +1214,7 @@ static const struct of_device_id mtk_pcie_ids[] = { { .compatible = "mediatek,mt7629-pcie", .data = &mtk_pcie_soc_mt7629 }, {}, }; +MODULE_DEVICE_TABLE(of, mtk_pcie_ids); static struct platform_driver mtk_pcie_driver = { .probe = mtk_pcie_probe, diff --git a/drivers/pci/controller/pcie-microchip-host.c b/drivers/pci/controller/pcie-microchip-host.c index 04c19ff81aff..89c68c56d93b 100644 --- a/drivers/pci/controller/pcie-microchip-host.c +++ b/drivers/pci/controller/pcie-microchip-host.c @@ -301,27 +301,27 @@ static const struct cause event_cause[NUM_EVENTS] = { LOCAL_EVENT_CAUSE(PM_MSI_INT_SYS_ERR, "system error"), }; -struct event_map pcie_event_to_event[] = { +static struct event_map pcie_event_to_event[] = { PCIE_EVENT_TO_EVENT_MAP(L2_EXIT), PCIE_EVENT_TO_EVENT_MAP(HOTRST_EXIT), PCIE_EVENT_TO_EVENT_MAP(DLUP_EXIT), }; -struct event_map sec_error_to_event[] = { +static struct event_map sec_error_to_event[] = { SEC_ERROR_TO_EVENT_MAP(TX_RAM_SEC_ERR), SEC_ERROR_TO_EVENT_MAP(RX_RAM_SEC_ERR), SEC_ERROR_TO_EVENT_MAP(PCIE2AXI_RAM_SEC_ERR), SEC_ERROR_TO_EVENT_MAP(AXI2PCIE_RAM_SEC_ERR), }; -struct event_map ded_error_to_event[] = { +static struct event_map ded_error_to_event[] = { DED_ERROR_TO_EVENT_MAP(TX_RAM_DED_ERR), DED_ERROR_TO_EVENT_MAP(RX_RAM_DED_ERR), DED_ERROR_TO_EVENT_MAP(PCIE2AXI_RAM_DED_ERR), DED_ERROR_TO_EVENT_MAP(AXI2PCIE_RAM_DED_ERR), }; -struct event_map local_status_to_event[] = { +static struct event_map local_status_to_event[] = { LOCAL_STATUS_TO_EVENT_MAP(DMA_END_ENGINE_0), LOCAL_STATUS_TO_EVENT_MAP(DMA_END_ENGINE_1), LOCAL_STATUS_TO_EVENT_MAP(DMA_ERROR_ENGINE_0), @@ -1023,10 +1023,8 @@ static int mc_platform_init(struct pci_config_window *cfg) } irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "unable to request IRQ%d\n", irq); + if (irq < 0) return -ENODEV; - } for (i = 0; i < NUM_EVENTS; i++) { event_irq = irq_create_mapping(port->event_domain, i); diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c index a728e8f9ad3c..765cf2b45e24 100644 --- a/drivers/pci/controller/pcie-rcar-host.c +++ b/drivers/pci/controller/pcie-rcar-host.c @@ -35,18 +35,12 @@ struct rcar_msi { DECLARE_BITMAP(used, INT_PCI_MSI_NR); struct irq_domain *domain; - struct msi_controller chip; - unsigned long pages; - struct mutex lock; + struct mutex map_lock; + spinlock_t mask_lock; int irq1; int irq2; }; -static inline struct rcar_msi *to_rcar_msi(struct msi_controller *chip) -{ - return container_of(chip, struct rcar_msi, chip); -} - /* Structure representing the PCIe interface */ struct rcar_pcie_host { struct rcar_pcie pcie; @@ -56,6 +50,11 @@ struct rcar_pcie_host { int (*phy_init_fn)(struct rcar_pcie_host *host); }; +static struct rcar_pcie_host *msi_to_host(struct rcar_msi *msi) +{ + return container_of(msi, struct rcar_pcie_host, msi); +} + static u32 rcar_read_conf(struct rcar_pcie *pcie, int where) { unsigned int shift = BITS_PER_BYTE * (where & 3); @@ -292,8 +291,6 @@ static int rcar_pcie_enable(struct rcar_pcie_host *host) bridge->sysdata = host; bridge->ops = &rcar_pcie_ops; - if (IS_ENABLED(CONFIG_PCI_MSI)) - bridge->msi = &host->msi.chip; return pci_host_probe(bridge); } @@ -473,42 +470,6 @@ static int rcar_pcie_phy_init_gen3(struct rcar_pcie_host *host) return err; } -static int rcar_msi_alloc(struct rcar_msi *chip) -{ - int msi; - - mutex_lock(&chip->lock); - - msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR); - if (msi < INT_PCI_MSI_NR) - set_bit(msi, chip->used); - else - msi = -ENOSPC; - - mutex_unlock(&chip->lock); - - return msi; -} - -static int rcar_msi_alloc_region(struct rcar_msi *chip, int no_irqs) -{ - int msi; - - mutex_lock(&chip->lock); - msi = bitmap_find_free_region(chip->used, INT_PCI_MSI_NR, - order_base_2(no_irqs)); - mutex_unlock(&chip->lock); - - return msi; -} - -static void rcar_msi_free(struct rcar_msi *chip, unsigned long irq) -{ - mutex_lock(&chip->lock); - clear_bit(irq, chip->used); - mutex_unlock(&chip->lock); -} - static irqreturn_t rcar_pcie_msi_irq(int irq, void *data) { struct rcar_pcie_host *host = data; @@ -527,18 +488,13 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data) unsigned int index = find_first_bit(®, 32); unsigned int msi_irq; - /* clear the interrupt */ - rcar_pci_write_reg(pcie, 1 << index, PCIEMSIFR); - - msi_irq = irq_find_mapping(msi->domain, index); + msi_irq = irq_find_mapping(msi->domain->parent, index); if (msi_irq) { - if (test_bit(index, msi->used)) - generic_handle_irq(msi_irq); - else - dev_info(dev, "unhandled MSI\n"); + generic_handle_irq(msi_irq); } else { /* Unknown MSI, just clear it */ dev_dbg(dev, "unexpected MSI\n"); + rcar_pci_write_reg(pcie, BIT(index), PCIEMSIFR); } /* see if there's any more pending in this vector */ @@ -548,149 +504,169 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data) return IRQ_HANDLED; } -static int rcar_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev, - struct msi_desc *desc) +static void rcar_msi_top_irq_ack(struct irq_data *d) { - struct rcar_msi *msi = to_rcar_msi(chip); - struct rcar_pcie_host *host = container_of(chip, struct rcar_pcie_host, - msi.chip); - struct rcar_pcie *pcie = &host->pcie; - struct msi_msg msg; - unsigned int irq; - int hwirq; + irq_chip_ack_parent(d); +} - hwirq = rcar_msi_alloc(msi); - if (hwirq < 0) - return hwirq; +static void rcar_msi_top_irq_mask(struct irq_data *d) +{ + pci_msi_mask_irq(d); + irq_chip_mask_parent(d); +} - irq = irq_find_mapping(msi->domain, hwirq); - if (!irq) { - rcar_msi_free(msi, hwirq); - return -EINVAL; - } +static void rcar_msi_top_irq_unmask(struct irq_data *d) +{ + pci_msi_unmask_irq(d); + irq_chip_unmask_parent(d); +} - irq_set_msi_desc(irq, desc); +static struct irq_chip rcar_msi_top_chip = { + .name = "PCIe MSI", + .irq_ack = rcar_msi_top_irq_ack, + .irq_mask = rcar_msi_top_irq_mask, + .irq_unmask = rcar_msi_top_irq_unmask, +}; - msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE; - msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR); - msg.data = hwirq; +static void rcar_msi_irq_ack(struct irq_data *d) +{ + struct rcar_msi *msi = irq_data_get_irq_chip_data(d); + struct rcar_pcie *pcie = &msi_to_host(msi)->pcie; - pci_write_msi_msg(irq, &msg); + /* clear the interrupt */ + rcar_pci_write_reg(pcie, BIT(d->hwirq), PCIEMSIFR); +} - return 0; +static void rcar_msi_irq_mask(struct irq_data *d) +{ + struct rcar_msi *msi = irq_data_get_irq_chip_data(d); + struct rcar_pcie *pcie = &msi_to_host(msi)->pcie; + unsigned long flags; + u32 value; + + spin_lock_irqsave(&msi->mask_lock, flags); + value = rcar_pci_read_reg(pcie, PCIEMSIIER); + value &= ~BIT(d->hwirq); + rcar_pci_write_reg(pcie, value, PCIEMSIIER); + spin_unlock_irqrestore(&msi->mask_lock, flags); } -static int rcar_msi_setup_irqs(struct msi_controller *chip, - struct pci_dev *pdev, int nvec, int type) +static void rcar_msi_irq_unmask(struct irq_data *d) { - struct rcar_msi *msi = to_rcar_msi(chip); - struct rcar_pcie_host *host = container_of(chip, struct rcar_pcie_host, - msi.chip); - struct rcar_pcie *pcie = &host->pcie; - struct msi_desc *desc; - struct msi_msg msg; - unsigned int irq; - int hwirq; - int i; + struct rcar_msi *msi = irq_data_get_irq_chip_data(d); + struct rcar_pcie *pcie = &msi_to_host(msi)->pcie; + unsigned long flags; + u32 value; + + spin_lock_irqsave(&msi->mask_lock, flags); + value = rcar_pci_read_reg(pcie, PCIEMSIIER); + value |= BIT(d->hwirq); + rcar_pci_write_reg(pcie, value, PCIEMSIIER); + spin_unlock_irqrestore(&msi->mask_lock, flags); +} - /* MSI-X interrupts are not supported */ - if (type == PCI_CAP_ID_MSIX) - return -EINVAL; +static int rcar_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force) +{ + return -EINVAL; +} - WARN_ON(!list_is_singular(&pdev->dev.msi_list)); - desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list); +static void rcar_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) +{ + struct rcar_msi *msi = irq_data_get_irq_chip_data(data); + struct rcar_pcie *pcie = &msi_to_host(msi)->pcie; - hwirq = rcar_msi_alloc_region(msi, nvec); - if (hwirq < 0) - return -ENOSPC; + msg->address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE; + msg->address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR); + msg->data = data->hwirq; +} - irq = irq_find_mapping(msi->domain, hwirq); - if (!irq) - return -ENOSPC; +static struct irq_chip rcar_msi_bottom_chip = { + .name = "Rcar MSI", + .irq_ack = rcar_msi_irq_ack, + .irq_mask = rcar_msi_irq_mask, + .irq_unmask = rcar_msi_irq_unmask, + .irq_set_affinity = rcar_msi_set_affinity, + .irq_compose_msi_msg = rcar_compose_msi_msg, +}; - for (i = 0; i < nvec; i++) { - /* - * irq_create_mapping() called from rcar_pcie_probe() pre- - * allocates descs, so there is no need to allocate descs here. - * We can therefore assume that if irq_find_mapping() above - * returns non-zero, then the descs are also successfully - * allocated. - */ - if (irq_set_msi_desc_off(irq, i, desc)) { - /* TODO: clear */ - return -EINVAL; - } - } +static int rcar_msi_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *args) +{ + struct rcar_msi *msi = domain->host_data; + unsigned int i; + int hwirq; - desc->nvec_used = nvec; - desc->msi_attrib.multiple = order_base_2(nvec); + mutex_lock(&msi->map_lock); - msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE; - msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR); - msg.data = hwirq; + hwirq = bitmap_find_free_region(msi->used, INT_PCI_MSI_NR, order_base_2(nr_irqs)); - pci_write_msi_msg(irq, &msg); + mutex_unlock(&msi->map_lock); + + if (hwirq < 0) + return -ENOSPC; + + for (i = 0; i < nr_irqs; i++) + irq_domain_set_info(domain, virq + i, hwirq + i, + &rcar_msi_bottom_chip, domain->host_data, + handle_edge_irq, NULL, NULL); return 0; } -static void rcar_msi_teardown_irq(struct msi_controller *chip, unsigned int irq) +static void rcar_msi_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) { - struct rcar_msi *msi = to_rcar_msi(chip); - struct irq_data *d = irq_get_irq_data(irq); - - rcar_msi_free(msi, d->hwirq); -} + struct irq_data *d = irq_domain_get_irq_data(domain, virq); + struct rcar_msi *msi = domain->host_data; -static struct irq_chip rcar_msi_irq_chip = { - .name = "R-Car PCIe MSI", - .irq_enable = pci_msi_unmask_irq, - .irq_disable = pci_msi_mask_irq, - .irq_mask = pci_msi_mask_irq, - .irq_unmask = pci_msi_unmask_irq, -}; + mutex_lock(&msi->map_lock); -static int rcar_msi_map(struct irq_domain *domain, unsigned int irq, - irq_hw_number_t hwirq) -{ - irq_set_chip_and_handler(irq, &rcar_msi_irq_chip, handle_simple_irq); - irq_set_chip_data(irq, domain->host_data); + bitmap_release_region(msi->used, d->hwirq, order_base_2(nr_irqs)); - return 0; + mutex_unlock(&msi->map_lock); } -static const struct irq_domain_ops msi_domain_ops = { - .map = rcar_msi_map, +static const struct irq_domain_ops rcar_msi_domain_ops = { + .alloc = rcar_msi_domain_alloc, + .free = rcar_msi_domain_free, +}; + +static struct msi_domain_info rcar_msi_info = { + .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_MULTI_PCI_MSI), + .chip = &rcar_msi_top_chip, }; -static void rcar_pcie_unmap_msi(struct rcar_pcie_host *host) +static int rcar_allocate_domains(struct rcar_msi *msi) { - struct rcar_msi *msi = &host->msi; - int i, irq; + struct rcar_pcie *pcie = &msi_to_host(msi)->pcie; + struct fwnode_handle *fwnode = dev_fwnode(pcie->dev); + struct irq_domain *parent; + + parent = irq_domain_create_linear(fwnode, INT_PCI_MSI_NR, + &rcar_msi_domain_ops, msi); + if (!parent) { + dev_err(pcie->dev, "failed to create IRQ domain\n"); + return -ENOMEM; + } + irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS); - for (i = 0; i < INT_PCI_MSI_NR; i++) { - irq = irq_find_mapping(msi->domain, i); - if (irq > 0) - irq_dispose_mapping(irq); + msi->domain = pci_msi_create_irq_domain(fwnode, &rcar_msi_info, parent); + if (!msi->domain) { + dev_err(pcie->dev, "failed to create MSI domain\n"); + irq_domain_remove(parent); + return -ENOMEM; } - irq_domain_remove(msi->domain); + return 0; } -static void rcar_pcie_hw_enable_msi(struct rcar_pcie_host *host) +static void rcar_free_domains(struct rcar_msi *msi) { - struct rcar_pcie *pcie = &host->pcie; - struct rcar_msi *msi = &host->msi; - unsigned long base; - - /* setup MSI data target */ - base = virt_to_phys((void *)msi->pages); + struct irq_domain *parent = msi->domain->parent; - rcar_pci_write_reg(pcie, lower_32_bits(base) | MSIFE, PCIEMSIALR); - rcar_pci_write_reg(pcie, upper_32_bits(base), PCIEMSIAUR); - - /* enable all MSI interrupts */ - rcar_pci_write_reg(pcie, 0xffffffff, PCIEMSIIER); + irq_domain_remove(msi->domain); + irq_domain_remove(parent); } static int rcar_pcie_enable_msi(struct rcar_pcie_host *host) @@ -698,29 +674,24 @@ static int rcar_pcie_enable_msi(struct rcar_pcie_host *host) struct rcar_pcie *pcie = &host->pcie; struct device *dev = pcie->dev; struct rcar_msi *msi = &host->msi; - int err, i; - - mutex_init(&msi->lock); + struct resource res; + int err; - msi->chip.dev = dev; - msi->chip.setup_irq = rcar_msi_setup_irq; - msi->chip.setup_irqs = rcar_msi_setup_irqs; - msi->chip.teardown_irq = rcar_msi_teardown_irq; + mutex_init(&msi->map_lock); + spin_lock_init(&msi->mask_lock); - msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR, - &msi_domain_ops, &msi->chip); - if (!msi->domain) { - dev_err(dev, "failed to create IRQ domain\n"); - return -ENOMEM; - } + err = of_address_to_resource(dev->of_node, 0, &res); + if (err) + return err; - for (i = 0; i < INT_PCI_MSI_NR; i++) - irq_create_mapping(msi->domain, i); + err = rcar_allocate_domains(msi); + if (err) + return err; /* Two irqs are for MSI, but they are also used for non-MSI irqs */ err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq, IRQF_SHARED | IRQF_NO_THREAD, - rcar_msi_irq_chip.name, host); + rcar_msi_bottom_chip.name, host); if (err < 0) { dev_err(dev, "failed to request IRQ: %d\n", err); goto err; @@ -728,27 +699,32 @@ static int rcar_pcie_enable_msi(struct rcar_pcie_host *host) err = devm_request_irq(dev, msi->irq2, rcar_pcie_msi_irq, IRQF_SHARED | IRQF_NO_THREAD, - rcar_msi_irq_chip.name, host); + rcar_msi_bottom_chip.name, host); if (err < 0) { dev_err(dev, "failed to request IRQ: %d\n", err); goto err; } - /* setup MSI data target */ - msi->pages = __get_free_pages(GFP_KERNEL | GFP_DMA32, 0); - rcar_pcie_hw_enable_msi(host); + /* disable all MSIs */ + rcar_pci_write_reg(pcie, 0, PCIEMSIIER); + + /* + * Setup MSI data target using RC base address address, which + * is guaranteed to be in the low 32bit range on any RCar HW. + */ + rcar_pci_write_reg(pcie, lower_32_bits(res.start) | MSIFE, PCIEMSIALR); + rcar_pci_write_reg(pcie, upper_32_bits(res.start), PCIEMSIAUR); return 0; err: - rcar_pcie_unmap_msi(host); + rcar_free_domains(msi); return err; } static void rcar_pcie_teardown_msi(struct rcar_pcie_host *host) { struct rcar_pcie *pcie = &host->pcie; - struct rcar_msi *msi = &host->msi; /* Disable all MSI interrupts */ rcar_pci_write_reg(pcie, 0, PCIEMSIIER); @@ -756,9 +732,7 @@ static void rcar_pcie_teardown_msi(struct rcar_pcie_host *host) /* Disable address decoding of the MSI interrupt, MSIFE */ rcar_pci_write_reg(pcie, 0, PCIEMSIALR); - free_pages(msi->pages, 0); - - rcar_pcie_unmap_msi(host); + rcar_free_domains(&host->msi); } static int rcar_pcie_get_resources(struct rcar_pcie_host *host) @@ -1011,8 +985,17 @@ static int __maybe_unused rcar_pcie_resume(struct device *dev) dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f); /* Enable MSI */ - if (IS_ENABLED(CONFIG_PCI_MSI)) - rcar_pcie_hw_enable_msi(host); + if (IS_ENABLED(CONFIG_PCI_MSI)) { + struct resource res; + u32 val; + + of_address_to_resource(dev->of_node, 0, &res); + rcar_pci_write_reg(pcie, upper_32_bits(res.start), PCIEMSIAUR); + rcar_pci_write_reg(pcie, lower_32_bits(res.start) | MSIFE, PCIEMSIALR); + + bitmap_to_arr32(&val, host->msi.used, INT_PCI_MSI_NR); + rcar_pci_write_reg(pcie, val, PCIEMSIIER); + } rcar_pcie_hw_enable(host); diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c index 07e36661bbc2..8689311c5ef6 100644 --- a/drivers/pci/controller/pcie-xilinx-nwl.c +++ b/drivers/pci/controller/pcie-xilinx-nwl.c @@ -26,6 +26,7 @@ /* Bridge core config registers */ #define BRCFG_PCIE_RX0 0x00000000 +#define BRCFG_PCIE_RX1 0x00000004 #define BRCFG_INTERRUPT 0x00000010 #define BRCFG_PCIE_RX_MSG_FILTER 0x00000020 @@ -128,6 +129,7 @@ #define NWL_ECAM_VALUE_DEFAULT 12 #define CFG_DMA_REG_BAR GENMASK(2, 0) +#define CFG_PCIE_CACHE GENMASK(7, 0) #define INT_PCI_MSI_NR (2 * 32) @@ -675,6 +677,11 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie) nwl_bridge_writel(pcie, CFG_ENABLE_MSG_FILTER_MASK, BRCFG_PCIE_RX_MSG_FILTER); + /* This routes the PCIe DMA traffic to go through CCI path */ + if (of_dma_is_coherent(dev->of_node)) + nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, BRCFG_PCIE_RX1) | + CFG_PCIE_CACHE, BRCFG_PCIE_RX1); + err = nwl_wait_for_link(pcie); if (err) return err; diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c index fa5baeb82653..14001febf59a 100644 --- a/drivers/pci/controller/pcie-xilinx.c +++ b/drivers/pci/controller/pcie-xilinx.c @@ -93,25 +93,23 @@ /** * struct xilinx_pcie_port - PCIe port information * @reg_base: IO Mapped Register Base - * @irq: Interrupt number - * @msi_pages: MSI pages * @dev: Device pointer + * @msi_map: Bitmap of allocated MSIs + * @map_lock: Mutex protecting the MSI allocation * @msi_domain: MSI IRQ domain pointer * @leg_domain: Legacy IRQ domain pointer * @resources: Bus Resources */ struct xilinx_pcie_port { void __iomem *reg_base; - u32 irq; - unsigned long msi_pages; struct device *dev; + unsigned long msi_map[BITS_TO_LONGS(XILINX_NUM_MSI_IRQS)]; + struct mutex map_lock; struct irq_domain *msi_domain; struct irq_domain *leg_domain; struct list_head resources; }; -static DECLARE_BITMAP(msi_irq_in_use, XILINX_NUM_MSI_IRQS); - static inline u32 pcie_read(struct xilinx_pcie_port *port, u32 reg) { return readl(port->reg_base + reg); @@ -196,151 +194,118 @@ static struct pci_ops xilinx_pcie_ops = { /* MSI functions */ -/** - * xilinx_pcie_destroy_msi - Free MSI number - * @irq: IRQ to be freed - */ -static void xilinx_pcie_destroy_msi(unsigned int irq) +static void xilinx_msi_top_irq_ack(struct irq_data *d) { - struct msi_desc *msi; - struct xilinx_pcie_port *port; - struct irq_data *d = irq_get_irq_data(irq); - irq_hw_number_t hwirq = irqd_to_hwirq(d); - - if (!test_bit(hwirq, msi_irq_in_use)) { - msi = irq_get_msi_desc(irq); - port = msi_desc_to_pci_sysdata(msi); - dev_err(port->dev, "Trying to free unused MSI#%d\n", irq); - } else { - clear_bit(hwirq, msi_irq_in_use); - } + /* + * xilinx_pcie_intr_handler() will have performed the Ack. + * Eventually, this should be fixed and the Ack be moved in + * the respective callbacks for INTx and MSI. + */ } -/** - * xilinx_pcie_assign_msi - Allocate MSI number - * - * Return: A valid IRQ on success and error value on failure. - */ -static int xilinx_pcie_assign_msi(void) -{ - int pos; - - pos = find_first_zero_bit(msi_irq_in_use, XILINX_NUM_MSI_IRQS); - if (pos < XILINX_NUM_MSI_IRQS) - set_bit(pos, msi_irq_in_use); - else - return -ENOSPC; +static struct irq_chip xilinx_msi_top_chip = { + .name = "PCIe MSI", + .irq_ack = xilinx_msi_top_irq_ack, +}; - return pos; +static int xilinx_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force) +{ + return -EINVAL; } -/** - * xilinx_msi_teardown_irq - Destroy the MSI - * @chip: MSI Chip descriptor - * @irq: MSI IRQ to destroy - */ -static void xilinx_msi_teardown_irq(struct msi_controller *chip, - unsigned int irq) +static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { - xilinx_pcie_destroy_msi(irq); - irq_dispose_mapping(irq); + struct xilinx_pcie_port *pcie = irq_data_get_irq_chip_data(data); + phys_addr_t pa = ALIGN_DOWN(virt_to_phys(pcie), SZ_4K); + + msg->address_lo = lower_32_bits(pa); + msg->address_hi = upper_32_bits(pa); + msg->data = data->hwirq; } -/** - * xilinx_pcie_msi_setup_irq - Setup MSI request - * @chip: MSI chip pointer - * @pdev: PCIe device pointer - * @desc: MSI descriptor pointer - * - * Return: '0' on success and error value on failure - */ -static int xilinx_pcie_msi_setup_irq(struct msi_controller *chip, - struct pci_dev *pdev, - struct msi_desc *desc) -{ - struct xilinx_pcie_port *port = pdev->bus->sysdata; - unsigned int irq; - int hwirq; - struct msi_msg msg; - phys_addr_t msg_addr; +static struct irq_chip xilinx_msi_bottom_chip = { + .name = "Xilinx MSI", + .irq_set_affinity = xilinx_msi_set_affinity, + .irq_compose_msi_msg = xilinx_compose_msi_msg, +}; - hwirq = xilinx_pcie_assign_msi(); - if (hwirq < 0) - return hwirq; +static int xilinx_msi_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *args) +{ + struct xilinx_pcie_port *port = domain->host_data; + int hwirq, i; - irq = irq_create_mapping(port->msi_domain, hwirq); - if (!irq) - return -EINVAL; + mutex_lock(&port->map_lock); - irq_set_msi_desc(irq, desc); + hwirq = bitmap_find_free_region(port->msi_map, XILINX_NUM_MSI_IRQS, order_base_2(nr_irqs)); - msg_addr = virt_to_phys((void *)port->msi_pages); + mutex_unlock(&port->map_lock); - msg.address_hi = 0; - msg.address_lo = msg_addr; - msg.data = irq; + if (hwirq < 0) + return -ENOSPC; - pci_write_msi_msg(irq, &msg); + for (i = 0; i < nr_irqs; i++) + irq_domain_set_info(domain, virq + i, hwirq + i, + &xilinx_msi_bottom_chip, domain->host_data, + handle_edge_irq, NULL, NULL); return 0; } -/* MSI Chip Descriptor */ -static struct msi_controller xilinx_pcie_msi_chip = { - .setup_irq = xilinx_pcie_msi_setup_irq, - .teardown_irq = xilinx_msi_teardown_irq, -}; +static void xilinx_msi_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + struct irq_data *d = irq_domain_get_irq_data(domain, virq); + struct xilinx_pcie_port *port = domain->host_data; -/* HW Interrupt Chip Descriptor */ -static struct irq_chip xilinx_msi_irq_chip = { - .name = "Xilinx PCIe MSI", - .irq_enable = pci_msi_unmask_irq, - .irq_disable = pci_msi_mask_irq, - .irq_mask = pci_msi_mask_irq, - .irq_unmask = pci_msi_unmask_irq, -}; + mutex_lock(&port->map_lock); -/** - * xilinx_pcie_msi_map - Set the handler for the MSI and mark IRQ as valid - * @domain: IRQ domain - * @irq: Virtual IRQ number - * @hwirq: HW interrupt number - * - * Return: Always returns 0. - */ -static int xilinx_pcie_msi_map(struct irq_domain *domain, unsigned int irq, - irq_hw_number_t hwirq) -{ - irq_set_chip_and_handler(irq, &xilinx_msi_irq_chip, handle_simple_irq); - irq_set_chip_data(irq, domain->host_data); + bitmap_release_region(port->msi_map, d->hwirq, order_base_2(nr_irqs)); - return 0; + mutex_unlock(&port->map_lock); } -/* IRQ Domain operations */ -static const struct irq_domain_ops msi_domain_ops = { - .map = xilinx_pcie_msi_map, +static const struct irq_domain_ops xilinx_msi_domain_ops = { + .alloc = xilinx_msi_domain_alloc, + .free = xilinx_msi_domain_free, }; -/** - * xilinx_pcie_enable_msi - Enable MSI support - * @port: PCIe port information - */ -static int xilinx_pcie_enable_msi(struct xilinx_pcie_port *port) +static struct msi_domain_info xilinx_msi_info = { + .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS), + .chip = &xilinx_msi_top_chip, +}; + +static int xilinx_allocate_msi_domains(struct xilinx_pcie_port *pcie) { - phys_addr_t msg_addr; + struct fwnode_handle *fwnode = dev_fwnode(pcie->dev); + struct irq_domain *parent; - port->msi_pages = __get_free_pages(GFP_KERNEL, 0); - if (!port->msi_pages) + parent = irq_domain_create_linear(fwnode, XILINX_NUM_MSI_IRQS, + &xilinx_msi_domain_ops, pcie); + if (!parent) { + dev_err(pcie->dev, "failed to create IRQ domain\n"); return -ENOMEM; + } + irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS); - msg_addr = virt_to_phys((void *)port->msi_pages); - pcie_write(port, 0x0, XILINX_PCIE_REG_MSIBASE1); - pcie_write(port, msg_addr, XILINX_PCIE_REG_MSIBASE2); + pcie->msi_domain = pci_msi_create_irq_domain(fwnode, &xilinx_msi_info, parent); + if (!pcie->msi_domain) { + dev_err(pcie->dev, "failed to create MSI domain\n"); + irq_domain_remove(parent); + return -ENOMEM; + } return 0; } +static void xilinx_free_msi_domains(struct xilinx_pcie_port *pcie) +{ + struct irq_domain *parent = pcie->msi_domain->parent; + + irq_domain_remove(pcie->msi_domain); + irq_domain_remove(parent); +} + /* INTx Functions */ /** @@ -420,6 +385,8 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data) } if (status & (XILINX_PCIE_INTR_INTX | XILINX_PCIE_INTR_MSI)) { + unsigned int irq; + val = pcie_read(port, XILINX_PCIE_REG_RPIFR1); /* Check whether interrupt valid */ @@ -432,20 +399,19 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data) if (val & XILINX_PCIE_RPIFR1_MSI_INTR) { val = pcie_read(port, XILINX_PCIE_REG_RPIFR2) & XILINX_PCIE_RPIFR2_MSG_DATA; + irq = irq_find_mapping(port->msi_domain->parent, val); } else { val = (val & XILINX_PCIE_RPIFR1_INTR_MASK) >> XILINX_PCIE_RPIFR1_INTR_SHIFT; - val = irq_find_mapping(port->leg_domain, val); + irq = irq_find_mapping(port->leg_domain, val); } /* Clear interrupt FIFO register 1 */ pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK, XILINX_PCIE_REG_RPIFR1); - /* Handle the interrupt */ - if (IS_ENABLED(CONFIG_PCI_MSI) || - !(val & XILINX_PCIE_RPIFR1_MSI_INTR)) - generic_handle_irq(val); + if (irq) + generic_handle_irq(irq); } if (status & XILINX_PCIE_INTR_SLV_UNSUPP) @@ -491,12 +457,11 @@ error: static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port) { struct device *dev = port->dev; - struct device_node *node = dev->of_node; struct device_node *pcie_intc_node; int ret; /* Setup INTx */ - pcie_intc_node = of_get_next_child(node, NULL); + pcie_intc_node = of_get_next_child(dev->of_node, NULL); if (!pcie_intc_node) { dev_err(dev, "No PCIe Intc node found\n"); return -ENODEV; @@ -513,18 +478,14 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port) /* Setup MSI */ if (IS_ENABLED(CONFIG_PCI_MSI)) { - port->msi_domain = irq_domain_add_linear(node, - XILINX_NUM_MSI_IRQS, - &msi_domain_ops, - &xilinx_pcie_msi_chip); - if (!port->msi_domain) { - dev_err(dev, "Failed to get a MSI IRQ domain\n"); - return -ENODEV; - } + phys_addr_t pa = ALIGN_DOWN(virt_to_phys(port), SZ_4K); - ret = xilinx_pcie_enable_msi(port); + ret = xilinx_allocate_msi_domains(port); if (ret) return ret; + + pcie_write(port, upper_32_bits(pa), XILINX_PCIE_REG_MSIBASE1); + pcie_write(port, lower_32_bits(pa), XILINX_PCIE_REG_MSIBASE2); } return 0; @@ -572,6 +533,7 @@ static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port) struct device *dev = port->dev; struct device_node *node = dev->of_node; struct resource regs; + unsigned int irq; int err; err = of_address_to_resource(node, 0, ®s); @@ -584,12 +546,12 @@ static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port) if (IS_ERR(port->reg_base)) return PTR_ERR(port->reg_base); - port->irq = irq_of_parse_and_map(node, 0); - err = devm_request_irq(dev, port->irq, xilinx_pcie_intr_handler, + irq = irq_of_parse_and_map(node, 0); + err = devm_request_irq(dev, irq, xilinx_pcie_intr_handler, IRQF_SHARED | IRQF_NO_THREAD, "xilinx-pcie", port); if (err) { - dev_err(dev, "unable to request irq %d\n", port->irq); + dev_err(dev, "unable to request irq %d\n", irq); return err; } @@ -617,7 +579,7 @@ static int xilinx_pcie_probe(struct platform_device *pdev) return -ENODEV; port = pci_host_bridge_priv(bridge); - + mutex_init(&port->map_lock); port->dev = dev; err = xilinx_pcie_parse_dt(port); @@ -637,11 +599,11 @@ static int xilinx_pcie_probe(struct platform_device *pdev) bridge->sysdata = port; bridge->ops = &xilinx_pcie_ops; -#ifdef CONFIG_PCI_MSI - xilinx_pcie_msi_chip.dev = dev; - bridge->msi = &xilinx_pcie_msi_chip; -#endif - return pci_host_probe(bridge); + err = pci_host_probe(bridge); + if (err) + xilinx_free_msi_domains(port); + + return err; } static const struct of_device_id xilinx_pcie_of_match[] = { diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index 5e80f28f0119..e3fcdfec58b3 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -28,6 +28,7 @@ #define BUS_RESTRICT_CAP(vmcap) (vmcap & 0x1) #define PCI_REG_VMCONFIG 0x44 #define BUS_RESTRICT_CFG(vmcfg) ((vmcfg >> 8) & 0x3) +#define VMCONFIG_MSI_REMAP 0x2 #define PCI_REG_VMLOCK 0x70 #define MB2_SHADOW_EN(vmlock) (vmlock & 0x2) @@ -59,6 +60,13 @@ enum vmd_features { * be used for MSI remapping */ VMD_FEAT_OFFSET_FIRST_VECTOR = (1 << 3), + + /* + * Device can bypass remapping MSI-X transactions into its MSI-X table, + * avoiding the requirement of a VMD MSI domain for child device + * interrupt handling. + */ + VMD_FEAT_CAN_BYPASS_MSI_REMAP = (1 << 4), }; /* @@ -306,6 +314,16 @@ static struct msi_domain_info vmd_msi_domain_info = { .chip = &vmd_msi_controller, }; +static void vmd_set_msi_remapping(struct vmd_dev *vmd, bool enable) +{ + u16 reg; + + pci_read_config_word(vmd->dev, PCI_REG_VMCONFIG, ®); + reg = enable ? (reg & ~VMCONFIG_MSI_REMAP) : + (reg | VMCONFIG_MSI_REMAP); + pci_write_config_word(vmd->dev, PCI_REG_VMCONFIG, reg); +} + static int vmd_create_irq_domain(struct vmd_dev *vmd) { struct fwnode_handle *fn; @@ -325,6 +343,13 @@ static int vmd_create_irq_domain(struct vmd_dev *vmd) static void vmd_remove_irq_domain(struct vmd_dev *vmd) { + /* + * Some production BIOS won't enable remapping between soft reboots. + * Ensure remapping is restored before unloading the driver. + */ + if (!vmd->msix_count) + vmd_set_msi_remapping(vmd, true); + if (vmd->irq_domain) { struct fwnode_handle *fn = vmd->irq_domain->fwnode; @@ -679,15 +704,32 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) sd->node = pcibus_to_node(vmd->dev->bus); - ret = vmd_create_irq_domain(vmd); - if (ret) - return ret; - /* - * Override the irq domain bus token so the domain can be distinguished - * from a regular PCI/MSI domain. + * Currently MSI remapping must be enabled in guest passthrough mode + * due to some missing interrupt remapping plumbing. This is probably + * acceptable because the guest is usually CPU-limited and MSI + * remapping doesn't become a performance bottleneck. */ - irq_domain_update_bus_token(vmd->irq_domain, DOMAIN_BUS_VMD_MSI); + if (!(features & VMD_FEAT_CAN_BYPASS_MSI_REMAP) || + offset[0] || offset[1]) { + ret = vmd_alloc_irqs(vmd); + if (ret) + return ret; + + vmd_set_msi_remapping(vmd, true); + + ret = vmd_create_irq_domain(vmd); + if (ret) + return ret; + + /* + * Override the IRQ domain bus token so the domain can be + * distinguished from a regular PCI/MSI domain. + */ + irq_domain_update_bus_token(vmd->irq_domain, DOMAIN_BUS_VMD_MSI); + } else { + vmd_set_msi_remapping(vmd, false); + } pci_add_resource(&resources, &vmd->resources[0]); pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]); @@ -753,10 +795,6 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id) if (features & VMD_FEAT_OFFSET_FIRST_VECTOR) vmd->first_vec = 1; - err = vmd_alloc_irqs(vmd); - if (err) - return err; - spin_lock_init(&vmd->cfg_lock); pci_set_drvdata(dev, vmd); err = vmd_enable_domain(vmd, features); @@ -825,7 +863,8 @@ static const struct pci_device_id vmd_ids[] = { .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP,}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0), .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW | - VMD_FEAT_HAS_BUS_RESTRICTIONS,}, + VMD_FEAT_HAS_BUS_RESTRICTIONS | + VMD_FEAT_CAN_BYPASS_MSI_REMAP,}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x467f), .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP | VMD_FEAT_HAS_BUS_RESTRICTIONS | diff --git a/drivers/pci/endpoint/functions/pci-epf-ntb.c b/drivers/pci/endpoint/functions/pci-epf-ntb.c index 338148cf56f5..bce274d02dcf 100644 --- a/drivers/pci/endpoint/functions/pci-epf-ntb.c +++ b/drivers/pci/endpoint/functions/pci-epf-ntb.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/** +/* * Endpoint Function Driver to implement Non-Transparent Bridge functionality * * Copyright (C) 2020 Texas Instruments @@ -696,7 +696,8 @@ reset_handler: /** * epf_ntb_peer_spad_bar_clear() - Clear Peer Scratchpad BAR - * @ntb: NTB device that facilitates communication between HOST1 and HOST2 + * @ntb_epc: EPC associated with one of the HOST which holds peer's outbound + * address. * *+-----------------+------->+------------------+ +-----------------+ *| BAR0 | | CONFIG REGION | | BAR0 | @@ -740,6 +741,7 @@ static void epf_ntb_peer_spad_bar_clear(struct epf_ntb_epc *ntb_epc) /** * epf_ntb_peer_spad_bar_set() - Set peer scratchpad BAR * @ntb: NTB device that facilitates communication between HOST1 and HOST2 + * @type: PRIMARY interface or SECONDARY interface * *+-----------------+------->+------------------+ +-----------------+ *| BAR0 | | CONFIG REGION | | BAR0 | @@ -808,7 +810,8 @@ static int epf_ntb_peer_spad_bar_set(struct epf_ntb *ntb, /** * epf_ntb_config_sspad_bar_clear() - Clear Config + Self scratchpad BAR - * @ntb: NTB device that facilitates communication between HOST1 and HOST2 + * @ntb_epc: EPC associated with one of the HOST which holds peer's outbound + * address. * * +-----------------+------->+------------------+ +-----------------+ * | BAR0 | | CONFIG REGION | | BAR0 | @@ -851,7 +854,8 @@ static void epf_ntb_config_sspad_bar_clear(struct epf_ntb_epc *ntb_epc) /** * epf_ntb_config_sspad_bar_set() - Set Config + Self scratchpad BAR - * @ntb: NTB device that facilitates communication between HOST1 and HOST2 + * @ntb_epc: EPC associated with one of the HOST which holds peer's outbound + * address. * * +-----------------+------->+------------------+ +-----------------+ * | BAR0 | | CONFIG REGION | | BAR0 | @@ -1312,6 +1316,7 @@ static int epf_ntb_configure_interrupt(struct epf_ntb *ntb, /** * epf_ntb_alloc_peer_mem() - Allocate memory in peer's outbound address space + * @dev: The PCI device. * @ntb_epc: EPC associated with one of the HOST whose BAR holds peer's outbound * address * @bar: BAR of @ntb_epc in for which memory has to be allocated (could be @@ -1660,7 +1665,6 @@ static int epf_ntb_init_epc_bar_interface(struct epf_ntb *ntb, * epf_ntb_init_epc_bar() - Identify BARs to be used for each of the NTB * constructs (scratchpad region, doorbell, memorywindow) * @ntb: NTB device that facilitates communication between HOST1 and HOST2 - * @type: PRIMARY interface or SECONDARY interface * * Wrapper to epf_ntb_init_epc_bar_interface() to identify the free BARs * to be used for each of BAR_CONFIG, BAR_PEER_SPAD, BAR_DB_MW1, BAR_MW2, @@ -2037,6 +2041,8 @@ static const struct config_item_type ntb_group_type = { /** * epf_ntb_add_cfs() - Add configfs directory specific to NTB * @epf: NTB endpoint function device + * @group: A pointer to the config_group structure referencing a group of + * config_items of a specific type that belong to a specific sub-system. * * Add configfs directory specific to NTB. This directory will hold * NTB specific properties like db_count, spad_count, num_mws etc., diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c index c0ac4e9cbe72..d2708ca4bece 100644 --- a/drivers/pci/endpoint/functions/pci-epf-test.c +++ b/drivers/pci/endpoint/functions/pci-epf-test.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/** +/* * Test driver to test endpoint functionality * * Copyright (C) 2017 Texas Instruments @@ -833,15 +833,18 @@ static int pci_epf_test_bind(struct pci_epf *epf) return -EINVAL; epc_features = pci_epc_get_features(epc, epf->func_no); - if (epc_features) { - linkup_notifier = epc_features->linkup_notifier; - core_init_notifier = epc_features->core_init_notifier; - test_reg_bar = pci_epc_get_first_free_bar(epc_features); - if (test_reg_bar < 0) - return -EINVAL; - pci_epf_configure_bar(epf, epc_features); + if (!epc_features) { + dev_err(&epf->dev, "epc_features not implemented\n"); + return -EOPNOTSUPP; } + linkup_notifier = epc_features->linkup_notifier; + core_init_notifier = epc_features->core_init_notifier; + test_reg_bar = pci_epc_get_first_free_bar(epc_features); + if (test_reg_bar < 0) + return -EINVAL; + pci_epf_configure_bar(epf, epc_features); + epf_test->test_reg_bar = test_reg_bar; epf_test->epc_features = epc_features; @@ -922,6 +925,7 @@ static int __init pci_epf_test_init(void) ret = pci_epf_register_driver(&test_driver); if (ret) { + destroy_workqueue(kpcitest_workqueue); pr_err("Failed to register pci epf test driver --> %d\n", ret); return ret; } @@ -932,6 +936,8 @@ module_init(pci_epf_test_init); static void __exit pci_epf_test_exit(void) { + if (kpcitest_workqueue) + destroy_workqueue(kpcitest_workqueue); pci_epf_unregister_driver(&test_driver); } module_exit(pci_epf_test_exit); diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c index cc8f9eb2b177..adec9bee72cf 100644 --- a/drivers/pci/endpoint/pci-epc-core.c +++ b/drivers/pci/endpoint/pci-epc-core.c @@ -594,6 +594,8 @@ EXPORT_SYMBOL_GPL(pci_epc_add_epf); * pci_epc_remove_epf() - remove PCI endpoint function from endpoint controller * @epc: the EPC device from which the endpoint function should be removed * @epf: the endpoint function to be removed + * @type: identifies if the EPC is connected to the primary or secondary + * interface of EPF * * Invoke to remove PCI endpoint function from the endpoint controller. */ diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c index 7646c8660d42..e9289d10f822 100644 --- a/drivers/pci/endpoint/pci-epf-core.c +++ b/drivers/pci/endpoint/pci-epf-core.c @@ -113,7 +113,7 @@ EXPORT_SYMBOL_GPL(pci_epf_bind); void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar, enum pci_epc_interface_type type) { - struct device *dev = epf->epc->dev.parent; + struct device *dev; struct pci_epf_bar *epf_bar; struct pci_epc *epc; diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c index 2750a64cecd3..4fedebf2f8c1 100644 --- a/drivers/pci/hotplug/acpi_pcihp.c +++ b/drivers/pci/hotplug/acpi_pcihp.c @@ -157,7 +157,7 @@ static int pcihp_is_ejectable(acpi_handle handle) } /** - * acpi_pcihp_check_ejectable - check if handle is ejectable ACPI PCI slot + * acpi_pci_check_ejectable - check if handle is ejectable ACPI PCI slot * @pbus: the PCI bus of the PCI slot corresponding to 'handle' * @handle: ACPI handle to check * diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h index a74b274a8c45..1f8ab4377ad8 100644 --- a/drivers/pci/hotplug/acpiphp.h +++ b/drivers/pci/hotplug/acpiphp.h @@ -148,8 +148,7 @@ static inline struct acpiphp_root_context *to_acpiphp_root_context(struct acpi_h * ACPI has no generic method of setting/getting attention status * this allows for device specific driver registration */ -struct acpiphp_attention_info -{ +struct acpiphp_attention_info { int (*set_attn)(struct hotplug_slot *slot, u8 status); int (*get_attn)(struct hotplug_slot *slot, u8 *status); struct module *owner; diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 3365c93abf0e..f031302ad401 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -533,6 +533,7 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge) slot->flags &= ~SLOT_ENABLED; continue; } + pci_dev_put(dev); } } diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c index 00cd2b43364f..7a65d427ac11 100644 --- a/drivers/pci/hotplug/cpqphp_nvram.c +++ b/drivers/pci/hotplug/cpqphp_nvram.c @@ -80,7 +80,7 @@ static u8 evbuffer[1024]; static void __iomem *compaq_int15_entry_point; /* lock for ordering int15_bios_call() */ -static spinlock_t int15_lock; +static DEFINE_SPINLOCK(int15_lock); /* This is a series of function that deals with @@ -415,9 +415,6 @@ void compaq_nvram_init(void __iomem *rom_start) compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR); dbg("int15 entry = %p\n", compaq_int15_entry_point); - - /* initialize our int15 lock */ - spin_lock_init(&int15_lock); } diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c index db047284c291..9e3b27744305 100644 --- a/drivers/pci/hotplug/shpchp_hpc.c +++ b/drivers/pci/hotplug/shpchp_hpc.c @@ -174,11 +174,6 @@ static inline u8 shpc_readb(struct controller *ctrl, int reg) return readb(ctrl->creg + reg); } -static inline void shpc_writeb(struct controller *ctrl, int reg, u8 val) -{ - writeb(val, ctrl->creg + reg); -} - static inline u16 shpc_readw(struct controller *ctrl, int reg) { return readw(ctrl->creg + reg); diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 3162f88fe940..217dc9f0231f 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -64,39 +64,18 @@ static void pci_msi_teardown_msi_irqs(struct pci_dev *dev) /* Arch hooks */ int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) { - struct msi_controller *chip = dev->bus->msi; - int err; - - if (!chip || !chip->setup_irq) - return -EINVAL; - - err = chip->setup_irq(chip, dev, desc); - if (err < 0) - return err; - - irq_set_chip_data(desc->irq, chip); - - return 0; + return -EINVAL; } void __weak arch_teardown_msi_irq(unsigned int irq) { - struct msi_controller *chip = irq_get_chip_data(irq); - - if (!chip || !chip->teardown_irq) - return; - - chip->teardown_irq(chip, irq); } int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) { - struct msi_controller *chip = dev->bus->msi; struct msi_desc *entry; int ret; - if (chip && chip->setup_irqs) - return chip->setup_irqs(chip, dev, nvec, type); /* * If an architecture wants to support multiple MSI, it needs to * override arch_setup_msi_irqs() @@ -115,11 +94,7 @@ int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) return 0; } -/* - * We have a default implementation available as a separate non-weak - * function, as it is used by the Xen x86 PCI code - */ -void default_teardown_msi_irqs(struct pci_dev *dev) +void __weak arch_teardown_msi_irqs(struct pci_dev *dev) { int i; struct msi_desc *entry; @@ -129,11 +104,6 @@ void default_teardown_msi_irqs(struct pci_dev *dev) for (i = 0; i < entry->nvec_used; i++) arch_teardown_msi_irq(entry->irq + i); } - -void __weak arch_teardown_msi_irqs(struct pci_dev *dev) -{ - return default_teardown_msi_irqs(dev); -} #endif /* CONFIG_PCI_MSI_ARCH_FALLBACKS */ static void default_restore_msi_irq(struct pci_dev *dev, int irq) @@ -901,8 +871,15 @@ static int pci_msi_supported(struct pci_dev *dev, int nvec) * Any bridge which does NOT route MSI transactions from its * secondary bus to its primary bus must set NO_MSI flag on * the secondary pci_bus. - * We expect only arch-specific PCI host bus controller driver - * or quirks for specific PCI bridges to be setting NO_MSI. + * + * The NO_MSI flag can either be set directly by: + * - arch-specific PCI host bus controller drivers (deprecated) + * - quirks for specific PCI bridges + * + * or indirectly by platform-specific PCI host bridge drivers by + * advertising the 'msi_domain' property, which results in + * the NO_MSI flag when no MSI domain is found for this bridge + * at probe time. */ for (bus = dev->bus; bus; bus = bus->parent) if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI) diff --git a/drivers/pci/of.c b/drivers/pci/of.c index 5ea472ae22ac..da5b414d585a 100644 --- a/drivers/pci/of.c +++ b/drivers/pci/of.c @@ -190,10 +190,18 @@ int of_pci_parse_bus_range(struct device_node *node, struct resource *res) EXPORT_SYMBOL_GPL(of_pci_parse_bus_range); /** - * This function will try to obtain the host bridge domain number by - * finding a property called "linux,pci-domain" of the given device node. + * of_get_pci_domain_nr - Find the host bridge domain number + * of the given device node. + * @node: Device tree node with the domain information. * - * @node: device tree node with the domain information + * This function will try to obtain the host bridge domain number by finding + * a property called "linux,pci-domain" of the given device node. + * + * Return: + * * > 0 - On success, an associated domain number. + * * -EINVAL - The property "linux,pci-domain" does not exist. + * * -ENODATA - The linux,pci-domain" property does not have value. + * * -EOVERFLOW - Invalid "linux,pci-domain" property value. * * Returns the associated domain number from DT in the range [0-0xffff], or * a negative value if the required property is not found. @@ -585,10 +593,16 @@ int devm_of_pci_bridge_init(struct device *dev, struct pci_host_bridge *bridge) #endif /* CONFIG_PCI */ /** + * of_pci_get_max_link_speed - Find the maximum link speed of the given device node. + * @node: Device tree node with the maximum link speed information. + * * This function will try to find the limitation of link speed by finding * a property called "max-link-speed" of the given device node. * - * @node: device tree node with the max link speed information + * Return: + * * > 0 - On success, a maximum link speed. + * * -EINVAL - Invalid "max-link-speed" property value, or failure to access + * the property of the device tree node. * * Returns the associated max link speed from DT, or a negative value if the * required property is not found or is invalid. diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 53502a751914..36bc23e21759 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -1021,7 +1021,7 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) if (!error) pci_dbg(dev, "power state changed by ACPI to %s\n", - acpi_power_state_string(state_conv[state])); + acpi_power_state_string(adev->power.state)); return error; } diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c index 781e45cf60d1..c32f3b7540e8 100644 --- a/drivers/pci/pci-label.c +++ b/drivers/pci/pci-label.c @@ -33,6 +33,21 @@ #include <linux/pci-acpi.h> #include "pci.h" +static bool device_has_acpi_name(struct device *dev) +{ +#ifdef CONFIG_ACPI + acpi_handle handle = ACPI_HANDLE(dev); + + if (!handle) + return false; + + return acpi_check_dsm(handle, &pci_acpi_dsm_guid, 0x2, + 1 << DSM_PCI_DEVICE_NAME); +#else + return false; +#endif +} + #ifdef CONFIG_DMI enum smbios_attr_enum { SMBIOS_ATTR_NONE = 0, @@ -45,13 +60,9 @@ static size_t find_smbios_instance_string(struct pci_dev *pdev, char *buf, { const struct dmi_device *dmi; struct dmi_dev_onboard *donboard; - int domain_nr; - int bus; - int devfn; - - domain_nr = pci_domain_nr(pdev->bus); - bus = pdev->bus->number; - devfn = pdev->devfn; + int domain_nr = pci_domain_nr(pdev->bus); + int bus = pdev->bus->number; + int devfn = pdev->devfn; dmi = NULL; while ((dmi = dmi_find_device(DMI_DEV_TYPE_DEV_ONBOARD, @@ -62,13 +73,11 @@ static size_t find_smbios_instance_string(struct pci_dev *pdev, char *buf, donboard->devfn == devfn) { if (buf) { if (attribute == SMBIOS_ATTR_INSTANCE_SHOW) - return scnprintf(buf, PAGE_SIZE, - "%d\n", - donboard->instance); + return sysfs_emit(buf, "%d\n", + donboard->instance); else if (attribute == SMBIOS_ATTR_LABEL_SHOW) - return scnprintf(buf, PAGE_SIZE, - "%s\n", - dmi->name); + return sysfs_emit(buf, "%s\n", + dmi->name); } return strlen(dmi->name); } @@ -76,78 +85,52 @@ static size_t find_smbios_instance_string(struct pci_dev *pdev, char *buf, return 0; } -static umode_t smbios_instance_string_exist(struct kobject *kobj, - struct attribute *attr, int n) +static ssize_t smbios_label_show(struct device *dev, + struct device_attribute *attr, char *buf) { - struct device *dev; - struct pci_dev *pdev; - - dev = kobj_to_dev(kobj); - pdev = to_pci_dev(dev); - - return find_smbios_instance_string(pdev, NULL, SMBIOS_ATTR_NONE) ? - S_IRUGO : 0; -} - -static ssize_t smbioslabel_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct pci_dev *pdev; - pdev = to_pci_dev(dev); + struct pci_dev *pdev = to_pci_dev(dev); return find_smbios_instance_string(pdev, buf, SMBIOS_ATTR_LABEL_SHOW); } +static struct device_attribute dev_attr_smbios_label = __ATTR(label, 0444, + smbios_label_show, NULL); -static ssize_t smbiosinstance_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t index_show(struct device *dev, struct device_attribute *attr, + char *buf) { - struct pci_dev *pdev; - pdev = to_pci_dev(dev); + struct pci_dev *pdev = to_pci_dev(dev); return find_smbios_instance_string(pdev, buf, SMBIOS_ATTR_INSTANCE_SHOW); } +static DEVICE_ATTR_RO(index); -static struct device_attribute smbios_attr_label = { - .attr = {.name = "label", .mode = 0444}, - .show = smbioslabel_show, -}; - -static struct device_attribute smbios_attr_instance = { - .attr = {.name = "index", .mode = 0444}, - .show = smbiosinstance_show, -}; - -static struct attribute *smbios_attributes[] = { - &smbios_attr_label.attr, - &smbios_attr_instance.attr, +static struct attribute *smbios_attrs[] = { + &dev_attr_smbios_label.attr, + &dev_attr_index.attr, NULL, }; -static const struct attribute_group smbios_attr_group = { - .attrs = smbios_attributes, - .is_visible = smbios_instance_string_exist, -}; - -static int pci_create_smbiosname_file(struct pci_dev *pdev) +static umode_t smbios_attr_is_visible(struct kobject *kobj, struct attribute *a, + int n) { - return sysfs_create_group(&pdev->dev.kobj, &smbios_attr_group); -} + struct device *dev = kobj_to_dev(kobj); + struct pci_dev *pdev = to_pci_dev(dev); -static void pci_remove_smbiosname_file(struct pci_dev *pdev) -{ - sysfs_remove_group(&pdev->dev.kobj, &smbios_attr_group); -} -#else -static inline int pci_create_smbiosname_file(struct pci_dev *pdev) -{ - return -1; -} + if (device_has_acpi_name(dev)) + return 0; -static inline void pci_remove_smbiosname_file(struct pci_dev *pdev) -{ + if (!find_smbios_instance_string(pdev, NULL, SMBIOS_ATTR_NONE)) + return 0; + + return a->mode; } + +const struct attribute_group pci_dev_smbios_attr_group = { + .attrs = smbios_attrs, + .is_visible = smbios_attr_is_visible, +}; #endif #ifdef CONFIG_ACPI @@ -169,11 +152,10 @@ static void dsm_label_utf16s_to_utf8s(union acpi_object *obj, char *buf) static int dsm_get_label(struct device *dev, char *buf, enum acpi_attr_enum attr) { - acpi_handle handle; + acpi_handle handle = ACPI_HANDLE(dev); union acpi_object *obj, *tmp; int len = -1; - handle = ACPI_HANDLE(dev); if (!handle) return -1; @@ -209,103 +191,39 @@ static int dsm_get_label(struct device *dev, char *buf, return len; } -static bool device_has_dsm(struct device *dev) -{ - acpi_handle handle; - - handle = ACPI_HANDLE(dev); - if (!handle) - return false; - - return !!acpi_check_dsm(handle, &pci_acpi_dsm_guid, 0x2, - 1 << DSM_PCI_DEVICE_NAME); -} - -static umode_t acpi_index_string_exist(struct kobject *kobj, - struct attribute *attr, int n) -{ - struct device *dev; - - dev = kobj_to_dev(kobj); - - if (device_has_dsm(dev)) - return S_IRUGO; - - return 0; -} - -static ssize_t acpilabel_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t label_show(struct device *dev, struct device_attribute *attr, + char *buf) { return dsm_get_label(dev, buf, ACPI_ATTR_LABEL_SHOW); } +static DEVICE_ATTR_RO(label); -static ssize_t acpiindex_show(struct device *dev, +static ssize_t acpi_index_show(struct device *dev, struct device_attribute *attr, char *buf) { return dsm_get_label(dev, buf, ACPI_ATTR_INDEX_SHOW); } +static DEVICE_ATTR_RO(acpi_index); -static struct device_attribute acpi_attr_label = { - .attr = {.name = "label", .mode = 0444}, - .show = acpilabel_show, -}; - -static struct device_attribute acpi_attr_index = { - .attr = {.name = "acpi_index", .mode = 0444}, - .show = acpiindex_show, -}; - -static struct attribute *acpi_attributes[] = { - &acpi_attr_label.attr, - &acpi_attr_index.attr, +static struct attribute *acpi_attrs[] = { + &dev_attr_label.attr, + &dev_attr_acpi_index.attr, NULL, }; -static const struct attribute_group acpi_attr_group = { - .attrs = acpi_attributes, - .is_visible = acpi_index_string_exist, -}; - -static int pci_create_acpi_index_label_files(struct pci_dev *pdev) +static umode_t acpi_attr_is_visible(struct kobject *kobj, struct attribute *a, + int n) { - return sysfs_create_group(&pdev->dev.kobj, &acpi_attr_group); -} + struct device *dev = kobj_to_dev(kobj); -static int pci_remove_acpi_index_label_files(struct pci_dev *pdev) -{ - sysfs_remove_group(&pdev->dev.kobj, &acpi_attr_group); - return 0; -} -#else -static inline int pci_create_acpi_index_label_files(struct pci_dev *pdev) -{ - return -1; -} + if (!device_has_acpi_name(dev)) + return 0; -static inline int pci_remove_acpi_index_label_files(struct pci_dev *pdev) -{ - return -1; + return a->mode; } -static inline bool device_has_dsm(struct device *dev) -{ - return false; -} +const struct attribute_group pci_dev_acpi_attr_group = { + .attrs = acpi_attrs, + .is_visible = acpi_attr_is_visible, +}; #endif - -void pci_create_firmware_label_files(struct pci_dev *pdev) -{ - if (device_has_dsm(&pdev->dev)) - pci_create_acpi_index_label_files(pdev); - else - pci_create_smbiosname_file(pdev); -} - -void pci_remove_firmware_label_files(struct pci_dev *pdev) -{ - if (device_has_dsm(&pdev->dev)) - pci_remove_acpi_index_label_files(pdev); - else - pci_remove_smbiosname_file(pdev); -} diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index a6b8fbbba6d2..beb8d1f4fafe 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -39,7 +39,7 @@ field##_show(struct device *dev, struct device_attribute *attr, char *buf) \ struct pci_dev *pdev; \ \ pdev = to_pci_dev(dev); \ - return sprintf(buf, format_string, pdev->field); \ + return sysfs_emit(buf, format_string, pdev->field); \ } \ static DEVICE_ATTR_RO(field) @@ -56,7 +56,7 @@ static ssize_t broken_parity_status_show(struct device *dev, char *buf) { struct pci_dev *pdev = to_pci_dev(dev); - return sprintf(buf, "%u\n", pdev->broken_parity_status); + return sysfs_emit(buf, "%u\n", pdev->broken_parity_status); } static ssize_t broken_parity_status_store(struct device *dev, @@ -129,7 +129,7 @@ static ssize_t power_state_show(struct device *dev, { struct pci_dev *pdev = to_pci_dev(dev); - return sprintf(buf, "%s\n", pci_power_name(pdev->current_state)); + return sysfs_emit(buf, "%s\n", pci_power_name(pdev->current_state)); } static DEVICE_ATTR_RO(power_state); @@ -138,10 +138,10 @@ static ssize_t resource_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pci_dev = to_pci_dev(dev); - char *str = buf; int i; int max; resource_size_t start, end; + size_t len = 0; if (pci_dev->subordinate) max = DEVICE_COUNT_RESOURCE; @@ -151,12 +151,12 @@ static ssize_t resource_show(struct device *dev, struct device_attribute *attr, for (i = 0; i < max; i++) { struct resource *res = &pci_dev->resource[i]; pci_resource_to_user(pci_dev, i, res, &start, &end); - str += sprintf(str, "0x%016llx 0x%016llx 0x%016llx\n", - (unsigned long long)start, - (unsigned long long)end, - (unsigned long long)res->flags); + len += sysfs_emit_at(buf, len, "0x%016llx 0x%016llx 0x%016llx\n", + (unsigned long long)start, + (unsigned long long)end, + (unsigned long long)res->flags); } - return (str - buf); + return len; } static DEVICE_ATTR_RO(resource); @@ -165,8 +165,8 @@ static ssize_t max_link_speed_show(struct device *dev, { struct pci_dev *pdev = to_pci_dev(dev); - return sprintf(buf, "%s\n", - pci_speed_string(pcie_get_speed_cap(pdev))); + return sysfs_emit(buf, "%s\n", + pci_speed_string(pcie_get_speed_cap(pdev))); } static DEVICE_ATTR_RO(max_link_speed); @@ -175,7 +175,7 @@ static ssize_t max_link_width_show(struct device *dev, { struct pci_dev *pdev = to_pci_dev(dev); - return sprintf(buf, "%u\n", pcie_get_width_cap(pdev)); + return sysfs_emit(buf, "%u\n", pcie_get_width_cap(pdev)); } static DEVICE_ATTR_RO(max_link_width); @@ -193,7 +193,7 @@ static ssize_t current_link_speed_show(struct device *dev, speed = pcie_link_speed[linkstat & PCI_EXP_LNKSTA_CLS]; - return sprintf(buf, "%s\n", pci_speed_string(speed)); + return sysfs_emit(buf, "%s\n", pci_speed_string(speed)); } static DEVICE_ATTR_RO(current_link_speed); @@ -208,7 +208,7 @@ static ssize_t current_link_width_show(struct device *dev, if (err) return -EINVAL; - return sprintf(buf, "%u\n", + return sysfs_emit(buf, "%u\n", (linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT); } static DEVICE_ATTR_RO(current_link_width); @@ -225,7 +225,7 @@ static ssize_t secondary_bus_number_show(struct device *dev, if (err) return -EINVAL; - return sprintf(buf, "%u\n", sec_bus); + return sysfs_emit(buf, "%u\n", sec_bus); } static DEVICE_ATTR_RO(secondary_bus_number); @@ -241,7 +241,7 @@ static ssize_t subordinate_bus_number_show(struct device *dev, if (err) return -EINVAL; - return sprintf(buf, "%u\n", sub_bus); + return sysfs_emit(buf, "%u\n", sub_bus); } static DEVICE_ATTR_RO(subordinate_bus_number); @@ -251,7 +251,7 @@ static ssize_t ari_enabled_show(struct device *dev, { struct pci_dev *pci_dev = to_pci_dev(dev); - return sprintf(buf, "%u\n", pci_ari_enabled(pci_dev->bus)); + return sysfs_emit(buf, "%u\n", pci_ari_enabled(pci_dev->bus)); } static DEVICE_ATTR_RO(ari_enabled); @@ -260,11 +260,11 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, { struct pci_dev *pci_dev = to_pci_dev(dev); - return sprintf(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X\n", - pci_dev->vendor, pci_dev->device, - pci_dev->subsystem_vendor, pci_dev->subsystem_device, - (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8), - (u8)(pci_dev->class)); + return sysfs_emit(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X\n", + pci_dev->vendor, pci_dev->device, + pci_dev->subsystem_vendor, pci_dev->subsystem_device, + (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8), + (u8)(pci_dev->class)); } static DEVICE_ATTR_RO(modalias); @@ -302,7 +302,7 @@ static ssize_t enable_show(struct device *dev, struct device_attribute *attr, struct pci_dev *pdev; pdev = to_pci_dev(dev); - return sprintf(buf, "%u\n", atomic_read(&pdev->enable_cnt)); + return sysfs_emit(buf, "%u\n", atomic_read(&pdev->enable_cnt)); } static DEVICE_ATTR_RW(enable); @@ -338,7 +338,7 @@ static ssize_t numa_node_store(struct device *dev, static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr, char *buf) { - return sprintf(buf, "%d\n", dev->numa_node); + return sysfs_emit(buf, "%d\n", dev->numa_node); } static DEVICE_ATTR_RW(numa_node); #endif @@ -348,7 +348,7 @@ static ssize_t dma_mask_bits_show(struct device *dev, { struct pci_dev *pdev = to_pci_dev(dev); - return sprintf(buf, "%d\n", fls64(pdev->dma_mask)); + return sysfs_emit(buf, "%d\n", fls64(pdev->dma_mask)); } static DEVICE_ATTR_RO(dma_mask_bits); @@ -356,7 +356,7 @@ static ssize_t consistent_dma_mask_bits_show(struct device *dev, struct device_attribute *attr, char *buf) { - return sprintf(buf, "%d\n", fls64(dev->coherent_dma_mask)); + return sysfs_emit(buf, "%d\n", fls64(dev->coherent_dma_mask)); } static DEVICE_ATTR_RO(consistent_dma_mask_bits); @@ -366,9 +366,9 @@ static ssize_t msi_bus_show(struct device *dev, struct device_attribute *attr, struct pci_dev *pdev = to_pci_dev(dev); struct pci_bus *subordinate = pdev->subordinate; - return sprintf(buf, "%u\n", subordinate ? - !(subordinate->bus_flags & PCI_BUS_FLAGS_NO_MSI) - : !pdev->no_msi); + return sysfs_emit(buf, "%u\n", subordinate ? + !(subordinate->bus_flags & PCI_BUS_FLAGS_NO_MSI) + : !pdev->no_msi); } static ssize_t msi_bus_store(struct device *dev, struct device_attribute *attr, @@ -523,7 +523,7 @@ static ssize_t d3cold_allowed_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pdev = to_pci_dev(dev); - return sprintf(buf, "%u\n", pdev->d3cold_allowed); + return sysfs_emit(buf, "%u\n", pdev->d3cold_allowed); } static DEVICE_ATTR_RW(d3cold_allowed); #endif @@ -537,7 +537,7 @@ static ssize_t devspec_show(struct device *dev, if (np == NULL) return 0; - return sprintf(buf, "%pOF", np); + return sysfs_emit(buf, "%pOF", np); } static DEVICE_ATTR_RO(devspec); #endif @@ -583,7 +583,7 @@ static ssize_t driver_override_show(struct device *dev, ssize_t len; device_lock(dev); - len = scnprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override); + len = sysfs_emit(buf, "%s\n", pdev->driver_override); device_unlock(dev); return len; } @@ -658,11 +658,11 @@ static ssize_t boot_vga_show(struct device *dev, struct device_attribute *attr, struct pci_dev *vga_dev = vga_default_device(); if (vga_dev) - return sprintf(buf, "%u\n", (pdev == vga_dev)); + return sysfs_emit(buf, "%u\n", (pdev == vga_dev)); - return sprintf(buf, "%u\n", - !!(pdev->resource[PCI_ROM_RESOURCE].flags & - IORESOURCE_ROM_SHADOW)); + return sysfs_emit(buf, "%u\n", + !!(pdev->resource[PCI_ROM_RESOURCE].flags & + IORESOURCE_ROM_SHADOW)); } static DEVICE_ATTR_RO(boot_vga); @@ -808,6 +808,29 @@ static ssize_t pci_write_config(struct file *filp, struct kobject *kobj, return count; } +static BIN_ATTR(config, 0644, pci_read_config, pci_write_config, 0); + +static struct bin_attribute *pci_dev_config_attrs[] = { + &bin_attr_config, + NULL, +}; + +static umode_t pci_dev_config_attr_is_visible(struct kobject *kobj, + struct bin_attribute *a, int n) +{ + struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); + + a->size = PCI_CFG_SPACE_SIZE; + if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) + a->size = PCI_CFG_SPACE_EXP_SIZE; + + return a->attr.mode; +} + +static const struct attribute_group pci_dev_config_attr_group = { + .bin_attrs = pci_dev_config_attrs, + .is_bin_visible = pci_dev_config_attr_is_visible, +}; #ifdef HAVE_PCI_LEGACY /** @@ -1283,25 +1306,32 @@ static ssize_t pci_read_rom(struct file *filp, struct kobject *kobj, return count; } +static BIN_ATTR(rom, 0600, pci_read_rom, pci_write_rom, 0); -static const struct bin_attribute pci_config_attr = { - .attr = { - .name = "config", - .mode = 0644, - }, - .size = PCI_CFG_SPACE_SIZE, - .read = pci_read_config, - .write = pci_write_config, +static struct bin_attribute *pci_dev_rom_attrs[] = { + &bin_attr_rom, + NULL, }; -static const struct bin_attribute pcie_config_attr = { - .attr = { - .name = "config", - .mode = 0644, - }, - .size = PCI_CFG_SPACE_EXP_SIZE, - .read = pci_read_config, - .write = pci_write_config, +static umode_t pci_dev_rom_attr_is_visible(struct kobject *kobj, + struct bin_attribute *a, int n) +{ + struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); + size_t rom_size; + + /* If the device has a ROM, try to expose it in sysfs. */ + rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE); + if (!rom_size) + return 0; + + a->size = rom_size; + + return a->attr.mode; +} + +static const struct attribute_group pci_dev_rom_attr_group = { + .bin_attrs = pci_dev_rom_attrs, + .is_bin_visible = pci_dev_rom_attr_is_visible, }; static ssize_t reset_store(struct device *dev, struct device_attribute *attr, @@ -1325,102 +1355,35 @@ static ssize_t reset_store(struct device *dev, struct device_attribute *attr, return count; } +static DEVICE_ATTR_WO(reset); -static DEVICE_ATTR(reset, 0200, NULL, reset_store); +static struct attribute *pci_dev_reset_attrs[] = { + &dev_attr_reset.attr, + NULL, +}; -static int pci_create_capabilities_sysfs(struct pci_dev *dev) +static umode_t pci_dev_reset_attr_is_visible(struct kobject *kobj, + struct attribute *a, int n) { - int retval; - - pcie_vpd_create_sysfs_dev_files(dev); + struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); - if (dev->reset_fn) { - retval = device_create_file(&dev->dev, &dev_attr_reset); - if (retval) - goto error; - } - return 0; + if (!pdev->reset_fn) + return 0; -error: - pcie_vpd_remove_sysfs_dev_files(dev); - return retval; + return a->mode; } +static const struct attribute_group pci_dev_reset_attr_group = { + .attrs = pci_dev_reset_attrs, + .is_visible = pci_dev_reset_attr_is_visible, +}; + int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev) { - int retval; - int rom_size; - struct bin_attribute *attr; - if (!sysfs_initialized) return -EACCES; - if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) - retval = sysfs_create_bin_file(&pdev->dev.kobj, &pcie_config_attr); - else - retval = sysfs_create_bin_file(&pdev->dev.kobj, &pci_config_attr); - if (retval) - goto err; - - retval = pci_create_resource_files(pdev); - if (retval) - goto err_config_file; - - /* If the device has a ROM, try to expose it in sysfs. */ - rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE); - if (rom_size) { - attr = kzalloc(sizeof(*attr), GFP_ATOMIC); - if (!attr) { - retval = -ENOMEM; - goto err_resource_files; - } - sysfs_bin_attr_init(attr); - attr->size = rom_size; - attr->attr.name = "rom"; - attr->attr.mode = 0600; - attr->read = pci_read_rom; - attr->write = pci_write_rom; - retval = sysfs_create_bin_file(&pdev->dev.kobj, attr); - if (retval) { - kfree(attr); - goto err_resource_files; - } - pdev->rom_attr = attr; - } - - /* add sysfs entries for various capabilities */ - retval = pci_create_capabilities_sysfs(pdev); - if (retval) - goto err_rom_file; - - pci_create_firmware_label_files(pdev); - - return 0; - -err_rom_file: - if (pdev->rom_attr) { - sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr); - kfree(pdev->rom_attr); - pdev->rom_attr = NULL; - } -err_resource_files: - pci_remove_resource_files(pdev); -err_config_file: - if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) - sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr); - else - sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr); -err: - return retval; -} - -static void pci_remove_capabilities_sysfs(struct pci_dev *dev) -{ - pcie_vpd_remove_sysfs_dev_files(dev); - if (dev->reset_fn) { - device_remove_file(&dev->dev, &dev_attr_reset); - dev->reset_fn = 0; - } + return pci_create_resource_files(pdev); } /** @@ -1434,22 +1397,7 @@ void pci_remove_sysfs_dev_files(struct pci_dev *pdev) if (!sysfs_initialized) return; - pci_remove_capabilities_sysfs(pdev); - - if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) - sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr); - else - sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr); - pci_remove_resource_files(pdev); - - if (pdev->rom_attr) { - sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr); - kfree(pdev->rom_attr); - pdev->rom_attr = NULL; - } - - pci_remove_firmware_label_files(pdev); } static int __init pci_sysfs_init(void) @@ -1540,6 +1488,16 @@ static const struct attribute_group pci_dev_group = { const struct attribute_group *pci_dev_groups[] = { &pci_dev_group, + &pci_dev_config_attr_group, + &pci_dev_rom_attr_group, + &pci_dev_reset_attr_group, + &pci_dev_vpd_attr_group, +#ifdef CONFIG_DMI + &pci_dev_smbios_attr_group, +#endif +#ifdef CONFIG_ACPI + &pci_dev_acpi_attr_group, +#endif NULL, }; diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index f4c26e6118ea..b717680377a9 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -693,6 +693,36 @@ u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap) EXPORT_SYMBOL_GPL(pci_find_ht_capability); /** + * pci_find_vsec_capability - Find a vendor-specific extended capability + * @dev: PCI device to query + * @vendor: Vendor ID for which capability is defined + * @cap: Vendor-specific capability ID + * + * If @dev has Vendor ID @vendor, search for a VSEC capability with + * VSEC ID @cap. If found, return the capability offset in + * config space; otherwise return 0. + */ +u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap) +{ + u16 vsec = 0; + u32 header; + + if (vendor != dev->vendor) + return 0; + + while ((vsec = pci_find_next_ext_capability(dev, vsec, + PCI_EXT_CAP_ID_VNDR))) { + if (pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER, + &header) == PCIBIOS_SUCCESSFUL && + PCI_VNDR_HEADER_ID(header) == cap) + return vsec; + } + + return 0; +} +EXPORT_SYMBOL_GPL(pci_find_vsec_capability); + +/** * pci_find_parent_resource - return resource region of parent bus of given * region * @dev: PCI device structure contains resources to be searched @@ -4042,6 +4072,7 @@ phys_addr_t pci_pio_to_address(unsigned long pio) return address; } +EXPORT_SYMBOL_GPL(pci_pio_to_address); unsigned long __weak pci_address_to_pio(phys_addr_t address) { @@ -4444,6 +4475,23 @@ void pci_clear_mwi(struct pci_dev *dev) EXPORT_SYMBOL(pci_clear_mwi); /** + * pci_disable_parity - disable parity checking for device + * @dev: the PCI device to operate on + * + * Disable parity checking for device @dev + */ +void pci_disable_parity(struct pci_dev *dev) +{ + u16 cmd; + + pci_read_config_word(dev, PCI_COMMAND, &cmd); + if (cmd & PCI_COMMAND_PARITY) { + cmd &= ~PCI_COMMAND_PARITY; + pci_write_config_word(dev, PCI_COMMAND, cmd); + } +} + +/** * pci_intx - enables/disables PCI INTx for device dev * @pdev: the PCI device to operate on * @enable: boolean: whether to enable or disable PCI INTx diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index afb87b917f07..37c913bbc6e1 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -21,16 +21,10 @@ bool pcie_cap_has_rtctl(const struct pci_dev *dev); int pci_create_sysfs_dev_files(struct pci_dev *pdev); void pci_remove_sysfs_dev_files(struct pci_dev *pdev); -#if !defined(CONFIG_DMI) && !defined(CONFIG_ACPI) -static inline void pci_create_firmware_label_files(struct pci_dev *pdev) -{ return; } -static inline void pci_remove_firmware_label_files(struct pci_dev *pdev) -{ return; } -#else -void pci_create_firmware_label_files(struct pci_dev *pdev); -void pci_remove_firmware_label_files(struct pci_dev *pdev); -#endif void pci_cleanup_rom(struct pci_dev *dev); +#ifdef CONFIG_DMI +extern const struct attribute_group pci_dev_smbios_attr_group; +#endif enum pci_mmap_api { PCI_MMAP_SYSFS, /* mmap on /sys/bus/pci/devices/<BDF>/resource<N> */ @@ -141,10 +135,9 @@ static inline bool pcie_downstream_port(const struct pci_dev *dev) type == PCI_EXP_TYPE_PCIE_BRIDGE; } -int pci_vpd_init(struct pci_dev *dev); +void pci_vpd_init(struct pci_dev *dev); void pci_vpd_release(struct pci_dev *dev); -void pcie_vpd_create_sysfs_dev_files(struct pci_dev *dev); -void pcie_vpd_remove_sysfs_dev_files(struct pci_dev *dev); +extern const struct attribute_group pci_dev_vpd_attr_group; /* PCI Virtual Channel */ int pci_save_vc_state(struct pci_dev *dev); @@ -625,6 +618,12 @@ static inline int pci_dev_specific_reset(struct pci_dev *dev, int probe) #if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64) int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment, struct resource *res); +#else +static inline int acpi_get_rc_resources(struct device *dev, const char *hid, + u16 segment, struct resource *res) +{ + return -ENODEV; +} #endif int pci_rebar_get_current_size(struct pci_dev *pdev, int bar); @@ -697,6 +696,7 @@ static inline int pci_aer_raw_clear_status(struct pci_dev *dev) { return -EINVAL #ifdef CONFIG_ACPI int pci_acpi_program_hp_params(struct pci_dev *dev); +extern const struct attribute_group pci_dev_acpi_attr_group; #else static inline int pci_acpi_program_hp_params(struct pci_dev *dev) { diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c index ba22388342d1..ec943cee5ecc 100644 --- a/drivers/pci/pcie/aer.c +++ b/drivers/pci/pcie/aer.c @@ -129,7 +129,7 @@ static const char * const ecrc_policy_str[] = { }; /** - * enable_ercr_checking - enable PCIe ECRC checking for a device + * enable_ecrc_checking - enable PCIe ECRC checking for a device * @dev: the PCI device * * Returns 0 on success, or negative on failure. @@ -153,7 +153,7 @@ static int enable_ecrc_checking(struct pci_dev *dev) } /** - * disable_ercr_checking - disables PCIe ECRC checking for a device + * disable_ecrc_checking - disables PCIe ECRC checking for a device * @dev: the PCI device * * Returns 0 on success, or negative on failure. @@ -1442,7 +1442,7 @@ static struct pcie_port_service_driver aerdriver = { }; /** - * aer_service_init - register AER root service driver + * pcie_aer_init - register AER root service driver * * Invoked when AER root service driver is loaded. */ diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c index 3fc08488d65f..1d0dd77fed3a 100644 --- a/drivers/pci/pcie/pme.c +++ b/drivers/pci/pcie/pme.c @@ -463,7 +463,7 @@ static struct pcie_port_service_driver pcie_pme_driver = { }; /** - * pcie_pme_service_init - Register the PCIe PME service driver. + * pcie_pme_init - Register the PCIe PME service driver. */ int __init pcie_pme_init(void) { diff --git a/drivers/pci/pcie/rcec.c b/drivers/pci/pcie/rcec.c index 2c5c552994e4..d0bcd141ac9c 100644 --- a/drivers/pci/pcie/rcec.c +++ b/drivers/pci/pcie/rcec.c @@ -32,7 +32,7 @@ static bool rcec_assoc_rciep(struct pci_dev *rcec, struct pci_dev *rciep) /* Same bus, so check bitmap */ for_each_set_bit(devn, &bitmap, 32) - if (devn == rciep->devfn) + if (devn == PCI_SLOT(rciep->devfn)) return true; return false; diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 953f15abc850..3a62d09b8869 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -895,7 +895,6 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge) /* Temporarily move resources off the list */ list_splice_init(&bridge->windows, &resources); bus->sysdata = bridge->sysdata; - bus->msi = bridge->msi; bus->ops = bridge->ops; bus->number = bus->busn_res.start = bridge->busnr; #ifdef CONFIG_PCI_DOMAINS_GENERIC @@ -926,6 +925,8 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge) device_enable_async_suspend(bus->bridge); pci_set_bus_of_node(bus); pci_set_bus_msi_domain(bus); + if (bridge->msi_domain && !dev_get_msi_domain(&bus->dev)) + bus->bus_flags |= PCI_BUS_FLAGS_NO_MSI; if (!parent) set_dev_node(bus->bridge, pcibus_to_node(bus)); @@ -1053,7 +1054,6 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent, return NULL; child->parent = parent; - child->msi = parent->msi; child->sysdata = parent->sysdata; child->bus_flags = parent->bus_flags; @@ -2353,6 +2353,7 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn) pci_set_of_node(dev); if (pci_setup_device(dev)) { + pci_release_of_node(dev); pci_bus_put(dev->bus); kfree(dev); return NULL; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 653660e3ba9e..dcb229de1acb 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -206,16 +206,11 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_BRIDGE_HOST, 8, quirk_mmio_always_on); /* - * The Mellanox Tavor device gives false positive parity errors. Mark this - * device with a broken_parity_status to allow PCI scanning code to "skip" - * this now blacklisted device. + * The Mellanox Tavor device gives false positive parity errors. Disable + * parity error reporting. */ -static void quirk_mellanox_tavor(struct pci_dev *dev) -{ - dev->broken_parity_status = 1; /* This device gives false positives */ -} -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR, quirk_mellanox_tavor); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE, quirk_mellanox_tavor); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR, pci_disable_parity); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE, pci_disable_parity); /* * Deal with broken BIOSes that neglect to enable passive release, @@ -2585,10 +2580,8 @@ static int msi_ht_cap_enabled(struct pci_dev *dev) /* Check the HyperTransport MSI mapping to know whether MSI is enabled or not */ static void quirk_msi_ht_cap(struct pci_dev *dev) { - if (dev->subordinate && !msi_ht_cap_enabled(dev)) { - pci_warn(dev, "MSI quirk detected; subordinate MSI disabled\n"); - dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI; - } + if (!msi_ht_cap_enabled(dev)) + quirk_disable_msi(dev); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE, quirk_msi_ht_cap); @@ -2601,9 +2594,6 @@ static void quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev) { struct pci_dev *pdev; - if (!dev->subordinate) - return; - /* * Check HT MSI cap on this chipset and the root one. A single one * having MSI is enough to be sure that MSI is supported. @@ -2611,10 +2601,8 @@ static void quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev) pdev = pci_get_slot(dev->bus, 0); if (!pdev) return; - if (!msi_ht_cap_enabled(dev) && !msi_ht_cap_enabled(pdev)) { - pci_warn(dev, "MSI quirk detected; subordinate MSI disabled\n"); - dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI; - } + if (!msi_ht_cap_enabled(pdev)) + quirk_msi_ht_cap(dev); pci_dev_put(pdev); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE, @@ -3922,6 +3910,7 @@ static const struct pci_dev_reset_methods pci_dev_reset_methods[] = { reset_ivb_igd }, { PCI_VENDOR_ID_SAMSUNG, 0xa804, nvme_disable_and_flr }, { PCI_VENDOR_ID_INTEL, 0x0953, delay_250ms_after_flr }, + { PCI_VENDOR_ID_INTEL, 0x0a54, delay_250ms_after_flr }, { PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID, reset_chelsio_generic_dev }, { 0 } diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c index 95dec03d9f2a..dd12c2fcc7dc 100644 --- a/drivers/pci/remove.c +++ b/drivers/pci/remove.c @@ -19,6 +19,8 @@ static void pci_stop_dev(struct pci_dev *dev) pci_pme_active(dev, false); if (pci_dev_is_added(dev)) { + dev->reset_fn = 0; + device_release_driver(&dev->dev); pci_proc_detach_device(dev); pci_remove_sysfs_dev_files(dev); diff --git a/drivers/pci/vpd.c b/drivers/pci/vpd.c index 7915d10f9aa1..26bf7c877de5 100644 --- a/drivers/pci/vpd.c +++ b/drivers/pci/vpd.c @@ -16,12 +16,10 @@ struct pci_vpd_ops { ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf); ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf); - int (*set_size)(struct pci_dev *dev, size_t len); }; struct pci_vpd { const struct pci_vpd_ops *ops; - struct bin_attribute *attr; /* Descriptor for sysfs VPD entry */ struct mutex lock; unsigned int len; u16 flag; @@ -30,6 +28,11 @@ struct pci_vpd { unsigned int valid:1; }; +static struct pci_dev *pci_get_func0_dev(struct pci_dev *dev) +{ + return pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0)); +} + /** * pci_read_vpd - Read one entry from Vital Product Data * @dev: pci device struct @@ -60,19 +63,6 @@ ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void } EXPORT_SYMBOL(pci_write_vpd); -/** - * pci_set_vpd_size - Set size of Vital Product Data space - * @dev: pci device struct - * @len: size of vpd space - */ -int pci_set_vpd_size(struct pci_dev *dev, size_t len) -{ - if (!dev->vpd || !dev->vpd->ops) - return -ENODEV; - return dev->vpd->ops->set_size(dev, len); -} -EXPORT_SYMBOL(pci_set_vpd_size); - #define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1) /** @@ -85,10 +75,14 @@ static size_t pci_vpd_size(struct pci_dev *dev, size_t old_size) size_t off = 0; unsigned char header[1+2]; /* 1 byte tag, 2 bytes length */ - while (off < old_size && - pci_read_vpd(dev, off, 1, header) == 1) { + while (off < old_size && pci_read_vpd(dev, off, 1, header) == 1) { unsigned char tag; + if (!header[0] && !off) { + pci_info(dev, "Invalid VPD tag 00, assume missing optional VPD EPROM\n"); + return 0; + } + if (header[0] & PCI_VPD_LRDT) { /* Large Resource Data Type Tag */ tag = pci_vpd_lrdt_tag(header); @@ -297,30 +291,15 @@ out: return ret ? ret : count; } -static int pci_vpd_set_size(struct pci_dev *dev, size_t len) -{ - struct pci_vpd *vpd = dev->vpd; - - if (len == 0 || len > PCI_VPD_MAX_SIZE) - return -EIO; - - vpd->valid = 1; - vpd->len = len; - - return 0; -} - static const struct pci_vpd_ops pci_vpd_ops = { .read = pci_vpd_read, .write = pci_vpd_write, - .set_size = pci_vpd_set_size, }; static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count, void *arg) { - struct pci_dev *tdev = pci_get_slot(dev->bus, - PCI_DEVFN(PCI_SLOT(dev->devfn), 0)); + struct pci_dev *tdev = pci_get_func0_dev(dev); ssize_t ret; if (!tdev) @@ -334,8 +313,7 @@ static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count, static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count, const void *arg) { - struct pci_dev *tdev = pci_get_slot(dev->bus, - PCI_DEVFN(PCI_SLOT(dev->devfn), 0)); + struct pci_dev *tdev = pci_get_func0_dev(dev); ssize_t ret; if (!tdev) @@ -346,38 +324,23 @@ static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count, return ret; } -static int pci_vpd_f0_set_size(struct pci_dev *dev, size_t len) -{ - struct pci_dev *tdev = pci_get_slot(dev->bus, - PCI_DEVFN(PCI_SLOT(dev->devfn), 0)); - int ret; - - if (!tdev) - return -ENODEV; - - ret = pci_set_vpd_size(tdev, len); - pci_dev_put(tdev); - return ret; -} - static const struct pci_vpd_ops pci_vpd_f0_ops = { .read = pci_vpd_f0_read, .write = pci_vpd_f0_write, - .set_size = pci_vpd_f0_set_size, }; -int pci_vpd_init(struct pci_dev *dev) +void pci_vpd_init(struct pci_dev *dev) { struct pci_vpd *vpd; u8 cap; cap = pci_find_capability(dev, PCI_CAP_ID_VPD); if (!cap) - return -ENODEV; + return; vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC); if (!vpd) - return -ENOMEM; + return; vpd->len = PCI_VPD_MAX_SIZE; if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) @@ -389,7 +352,6 @@ int pci_vpd_init(struct pci_dev *dev) vpd->busy = 0; vpd->valid = 0; dev->vpd = vpd; - return 0; } void pci_vpd_release(struct pci_dev *dev) @@ -397,102 +359,56 @@ void pci_vpd_release(struct pci_dev *dev) kfree(dev->vpd); } -static ssize_t read_vpd_attr(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, char *buf, - loff_t off, size_t count) +static ssize_t vpd_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buf, loff_t off, + size_t count) { struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj)); - if (bin_attr->size > 0) { - if (off > bin_attr->size) - count = 0; - else if (count > bin_attr->size - off) - count = bin_attr->size - off; - } - return pci_read_vpd(dev, off, count, buf); } -static ssize_t write_vpd_attr(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, char *buf, - loff_t off, size_t count) +static ssize_t vpd_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buf, loff_t off, + size_t count) { struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj)); - if (bin_attr->size > 0) { - if (off > bin_attr->size) - count = 0; - else if (count > bin_attr->size - off) - count = bin_attr->size - off; - } - return pci_write_vpd(dev, off, count, buf); } +static BIN_ATTR(vpd, 0600, vpd_read, vpd_write, 0); -void pcie_vpd_create_sysfs_dev_files(struct pci_dev *dev) -{ - int retval; - struct bin_attribute *attr; - - if (!dev->vpd) - return; +static struct bin_attribute *vpd_attrs[] = { + &bin_attr_vpd, + NULL, +}; - attr = kzalloc(sizeof(*attr), GFP_ATOMIC); - if (!attr) - return; +static umode_t vpd_attr_is_visible(struct kobject *kobj, + struct bin_attribute *a, int n) +{ + struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); - sysfs_bin_attr_init(attr); - attr->size = 0; - attr->attr.name = "vpd"; - attr->attr.mode = S_IRUSR | S_IWUSR; - attr->read = read_vpd_attr; - attr->write = write_vpd_attr; - retval = sysfs_create_bin_file(&dev->dev.kobj, attr); - if (retval) { - kfree(attr); - return; - } + if (!pdev->vpd) + return 0; - dev->vpd->attr = attr; + return a->attr.mode; } -void pcie_vpd_remove_sysfs_dev_files(struct pci_dev *dev) -{ - if (dev->vpd && dev->vpd->attr) { - sysfs_remove_bin_file(&dev->dev.kobj, dev->vpd->attr); - kfree(dev->vpd->attr); - } -} +const struct attribute_group pci_dev_vpd_attr_group = { + .bin_attrs = vpd_attrs, + .is_bin_visible = vpd_attr_is_visible, +}; -int pci_vpd_find_tag(const u8 *buf, unsigned int off, unsigned int len, u8 rdt) +int pci_vpd_find_tag(const u8 *buf, unsigned int len, u8 rdt) { - int i; + int i = 0; - for (i = off; i < len; ) { - u8 val = buf[i]; - - if (val & PCI_VPD_LRDT) { - /* Don't return success of the tag isn't complete */ - if (i + PCI_VPD_LRDT_TAG_SIZE > len) - break; - - if (val == rdt) - return i; - - i += PCI_VPD_LRDT_TAG_SIZE + - pci_vpd_lrdt_size(&buf[i]); - } else { - u8 tag = val & ~PCI_VPD_SRDT_LEN_MASK; - - if (tag == rdt) - return i; - - if (tag == PCI_VPD_SRDT_END) - break; + /* look for LRDT tags only, end tag is the only SRDT tag */ + while (i + PCI_VPD_LRDT_TAG_SIZE <= len && buf[i] & PCI_VPD_LRDT) { + if (buf[i] == rdt) + return i; - i += PCI_VPD_SRDT_TAG_SIZE + - pci_vpd_srdt_size(&buf[i]); - } + i += PCI_VPD_LRDT_TAG_SIZE + pci_vpd_lrdt_size(buf + i); } return -ENOENT; @@ -530,7 +446,7 @@ static void quirk_f0_vpd_link(struct pci_dev *dev) if (!PCI_FUNC(dev->devfn)) return; - f0 = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0)); + f0 = pci_get_func0_dev(dev); if (!f0) return; @@ -570,7 +486,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005d, quirk_blacklist_vpd); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID, quirk_blacklist_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_QLOGIC, 0x2261, quirk_blacklist_vpd); /* * The Amazon Annapurna Labs 0x0031 device id is reused for other non Root Port * device types, so the quirk is registered for the PCI_CLASS_BRIDGE_PCI class. @@ -578,51 +493,16 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_QLOGIC, 0x2261, quirk_blacklist_vpd); DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, PCI_CLASS_BRIDGE_PCI, 8, quirk_blacklist_vpd); -/* - * For Broadcom 5706, 5708, 5709 rev. A nics, any read beyond the - * VPD end tag will hang the device. This problem was initially - * observed when a vpd entry was created in sysfs - * ('/sys/bus/pci/devices/<id>/vpd'). A read to this sysfs entry - * will dump 32k of data. Reading a full 32k will cause an access - * beyond the VPD end tag causing the device to hang. Once the device - * is hung, the bnx2 driver will not be able to reset the device. - * We believe that it is legal to read beyond the end tag and - * therefore the solution is to limit the read/write length. - */ -static void quirk_brcm_570x_limit_vpd(struct pci_dev *dev) +static void pci_vpd_set_size(struct pci_dev *dev, size_t len) { - /* - * Only disable the VPD capability for 5706, 5706S, 5708, - * 5708S and 5709 rev. A - */ - if ((dev->device == PCI_DEVICE_ID_NX2_5706) || - (dev->device == PCI_DEVICE_ID_NX2_5706S) || - (dev->device == PCI_DEVICE_ID_NX2_5708) || - (dev->device == PCI_DEVICE_ID_NX2_5708S) || - ((dev->device == PCI_DEVICE_ID_NX2_5709) && - (dev->revision & 0xf0) == 0x0)) { - if (dev->vpd) - dev->vpd->len = 0x80; - } + struct pci_vpd *vpd = dev->vpd; + + if (!vpd || len == 0 || len > PCI_VPD_MAX_SIZE) + return; + + vpd->valid = 1; + vpd->len = len; } -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, - PCI_DEVICE_ID_NX2_5706, - quirk_brcm_570x_limit_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, - PCI_DEVICE_ID_NX2_5706S, - quirk_brcm_570x_limit_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, - PCI_DEVICE_ID_NX2_5708, - quirk_brcm_570x_limit_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, - PCI_DEVICE_ID_NX2_5708S, - quirk_brcm_570x_limit_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, - PCI_DEVICE_ID_NX2_5709, - quirk_brcm_570x_limit_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, - PCI_DEVICE_ID_NX2_5709S, - quirk_brcm_570x_limit_vpd); static void quirk_chelsio_extend_vpd(struct pci_dev *dev) { @@ -642,9 +522,9 @@ static void quirk_chelsio_extend_vpd(struct pci_dev *dev) * limits. */ if (chip == 0x0 && prod >= 0x20) - pci_set_vpd_size(dev, 8192); + pci_vpd_set_size(dev, 8192); else if (chip >= 0x4 && func < 0x8) - pci_set_vpd_size(dev, 2048); + pci_vpd_set_size(dev, 2048); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID, diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c index 2d7502648219..b7a8f3a1921f 100644 --- a/drivers/pci/xen-pcifront.c +++ b/drivers/pci/xen-pcifront.c @@ -693,7 +693,7 @@ static int pcifront_connect_and_init_dma(struct pcifront_device *pdev) spin_unlock(&pcifront_dev_lock); - if (!err && !swiotlb_nr_tbl()) { + if (!err && !is_swiotlb_active()) { err = pci_xen_swiotlb_init_late(); if (err) dev_err(&pdev->xdev->dev, "Could not setup SWIOTLB!\n"); diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c index e6939103991b..948b763dc451 100644 --- a/drivers/pcmcia/cistpl.c +++ b/drivers/pcmcia/cistpl.c @@ -75,7 +75,7 @@ void release_cis_mem(struct pcmcia_socket *s) mutex_unlock(&s->ops_mutex); } -/** +/* * set_cis_map() - map the card memory at "card_offset" into virtual space. * * If flags & MAP_ATTRIB, map the attribute space, otherwise @@ -126,7 +126,7 @@ static void __iomem *set_cis_map(struct pcmcia_socket *s, #define IS_ATTR 1 #define IS_INDIRECT 8 -/** +/* * pcmcia_read_cis_mem() - low-level function to read CIS memory * * must be called with ops_mutex held @@ -206,7 +206,7 @@ int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr, } -/** +/* * pcmcia_write_cis_mem() - low-level function to write CIS memory * * Probably only useful for writing one-byte registers. Must be called @@ -277,7 +277,7 @@ int pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr, } -/** +/* * read_cis_cache() - read CIS memory or its associated cache * * This is a wrapper around read_cis_mem, with the same interface, @@ -365,7 +365,7 @@ void destroy_cis_cache(struct pcmcia_socket *s) } } -/** +/* * verify_cis_cache() - does the CIS match what is in the CIS cache? */ int verify_cis_cache(struct pcmcia_socket *s) @@ -401,7 +401,7 @@ int verify_cis_cache(struct pcmcia_socket *s) return 0; } -/** +/* * pcmcia_replace_cis() - use a replacement CIS instead of the card's CIS * * For really bad cards, we provide a facility for uploading a diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c index 72114907c0e4..bd81aa64d011 100644 --- a/drivers/pcmcia/ds.c +++ b/drivers/pcmcia/ds.c @@ -83,7 +83,7 @@ struct pcmcia_dynid { }; /** - * pcmcia_store_new_id - add a new PCMCIA device ID to this driver and re-probe devices + * new_id_store() - add a new PCMCIA device ID to this driver and re-probe devices * @driver: target device driver * @buf: buffer for scanning device ID data * @count: input size @@ -371,9 +371,6 @@ static int pcmcia_device_remove(struct device *dev) pcmcia_card_remove(p_dev->socket, p_dev); /* detach the "instance" */ - if (!p_drv) - return 0; - if (p_drv->remove) p_drv->remove(p_dev); @@ -389,7 +386,7 @@ static int pcmcia_device_remove(struct device *dev) "pcmcia: driver %s did not release window properly\n", p_drv->name); - /* references from pcmcia_probe_device */ + /* references from pcmcia_device_probe */ pcmcia_put_dev(p_dev); module_put(p_drv->owner); diff --git a/drivers/pcmcia/pcmcia_cis.c b/drivers/pcmcia/pcmcia_cis.c index e4c4daf92038..d2d0ed4b27c8 100644 --- a/drivers/pcmcia/pcmcia_cis.c +++ b/drivers/pcmcia/pcmcia_cis.c @@ -122,7 +122,7 @@ next_entry: } -/** +/* * pcmcia_io_cfg_data_width() - convert cfgtable to data path width parameter */ static int pcmcia_io_cfg_data_width(unsigned int flags) @@ -143,7 +143,7 @@ struct pcmcia_cfg_mem { cistpl_cftable_entry_t dflt; }; -/** +/* * pcmcia_do_loop_config() - internal helper for pcmcia_loop_config() * * pcmcia_do_loop_config() is the internal callback for the call from @@ -289,7 +289,7 @@ struct pcmcia_loop_mem { void *priv_data); }; -/** +/* * pcmcia_do_loop_tuple() - internal helper for pcmcia_loop_config() * * pcmcia_do_loop_tuple() is the internal callback for the call from @@ -337,7 +337,7 @@ struct pcmcia_loop_get { cisdata_t **buf; }; -/** +/* * pcmcia_do_get_tuple() - internal helper for pcmcia_get_tuple() * * pcmcia_do_get_tuple() is the internal callback for the call from @@ -386,7 +386,7 @@ size_t pcmcia_get_tuple(struct pcmcia_device *p_dev, cisdata_t code, EXPORT_SYMBOL(pcmcia_get_tuple); -/** +/* * pcmcia_do_get_mac() - internal helper for pcmcia_get_mac_from_cis() * * pcmcia_do_get_mac() is the internal callback for the call from diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c index e3a6b6c8a5b0..c1c197292111 100644 --- a/drivers/pcmcia/pcmcia_resource.c +++ b/drivers/pcmcia/pcmcia_resource.c @@ -144,7 +144,7 @@ static int alloc_io_space(struct pcmcia_socket *s, struct resource *res, } -/** +/* * pcmcia_access_config() - read or write card configuration registers * * pcmcia_access_config() reads and writes configuration registers in @@ -184,7 +184,7 @@ static int pcmcia_access_config(struct pcmcia_device *p_dev, } -/** +/* * pcmcia_read_config_byte() - read a byte from a card configuration register * * pcmcia_read_config_byte() reads a byte from a configuration register in @@ -197,7 +197,7 @@ int pcmcia_read_config_byte(struct pcmcia_device *p_dev, off_t where, u8 *val) EXPORT_SYMBOL(pcmcia_read_config_byte); -/** +/* * pcmcia_write_config_byte() - write a byte to a card configuration register * * pcmcia_write_config_byte() writes a byte to a configuration register in @@ -720,7 +720,8 @@ static irqreturn_t test_action(int cpl, void *dev_id) /** * pcmcia_setup_isa_irq() - determine whether an ISA IRQ can be used - * @p_dev - the associated PCMCIA device + * @p_dev: the associated PCMCIA device + * @type: IRQ type (flags) * * locking note: must be called with ops_mutex locked. */ @@ -785,7 +786,7 @@ void pcmcia_cleanup_irq(struct pcmcia_socket *s) /** * pcmcia_setup_irq() - determine IRQ to be used for device - * @p_dev - the associated PCMCIA device + * @p_dev: the associated PCMCIA device * * locking note: must be called with ops_mutex locked. */ diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c index 3b05760e69d6..bb15a8bdbaab 100644 --- a/drivers/pcmcia/rsrc_nonstatic.c +++ b/drivers/pcmcia/rsrc_nonstatic.c @@ -257,7 +257,7 @@ static void do_io_probe(struct pcmcia_socket *s, unsigned int base, /*======================================================================*/ -/** +/* * readable() - iomem validation function for cards with a valid CIS */ static int readable(struct pcmcia_socket *s, struct resource *res, @@ -288,7 +288,7 @@ static int readable(struct pcmcia_socket *s, struct resource *res, return 0; } -/** +/* * checksum() - iomem validation function for simple memory cards */ static int checksum(struct pcmcia_socket *s, struct resource *res, @@ -343,9 +343,9 @@ static int checksum(struct pcmcia_socket *s, struct resource *res, */ static int do_validate_mem(struct pcmcia_socket *s, unsigned long base, unsigned long size, - int validate (struct pcmcia_socket *s, - struct resource *res, - unsigned int *value)) + int (*validate)(struct pcmcia_socket *s, + struct resource *res, + unsigned int *value)) { struct socket_data *s_data = s->resource_data; struct resource *res1, *res2; @@ -398,12 +398,12 @@ static int do_validate_mem(struct pcmcia_socket *s, * function returns the size of the usable memory area. */ static int do_mem_probe(struct pcmcia_socket *s, u_long base, u_long num, - int validate (struct pcmcia_socket *s, - struct resource *res, - unsigned int *value), - int fallback (struct pcmcia_socket *s, - struct resource *res, - unsigned int *value)) + int (*validate)(struct pcmcia_socket *s, + struct resource *res, + unsigned int *value), + int (*fallback)(struct pcmcia_socket *s, + struct resource *res, + unsigned int *value)) { struct socket_data *s_data = s->resource_data; u_long i, j, bad, fail, step; diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig index d3371ac7b871..c76adedd58c9 100644 --- a/drivers/pwm/Kconfig +++ b/drivers/pwm/Kconfig @@ -618,6 +618,15 @@ config PWM_TWL_LED To compile this driver as a module, choose M here: the module will be called pwm-twl-led. +config PWM_VISCONTI + tristate "Toshiba Visconti PWM support" + depends on ARCH_VISCONTI || COMPILE_TEST + help + PWM Subsystem driver support for Toshiba Visconti SoCs. + + To compile this driver as a module, choose M here: the module + will be called pwm-visconti. + config PWM_VT8500 tristate "vt8500 PWM support" depends on ARCH_VT8500 || COMPILE_TEST diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile index d3879619bd76..708840b7fba8 100644 --- a/drivers/pwm/Makefile +++ b/drivers/pwm/Makefile @@ -58,4 +58,5 @@ obj-$(CONFIG_PWM_TIECAP) += pwm-tiecap.o obj-$(CONFIG_PWM_TIEHRPWM) += pwm-tiehrpwm.o obj-$(CONFIG_PWM_TWL) += pwm-twl.o obj-$(CONFIG_PWM_TWL_LED) += pwm-twl-led.o +obj-$(CONFIG_PWM_VISCONTI) += pwm-visconti.o obj-$(CONFIG_PWM_VT8500) += pwm-vt8500.o diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index a8eff4b3ee36..c4d5c0667137 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -37,23 +37,13 @@ static struct pwm_device *pwm_to_device(unsigned int pwm) return radix_tree_lookup(&pwm_tree, pwm); } -static int alloc_pwms(int pwm, unsigned int count) +static int alloc_pwms(unsigned int count) { - unsigned int from = 0; unsigned int start; - if (pwm >= MAX_PWMS) - return -EINVAL; - - if (pwm >= 0) - from = pwm; - - start = bitmap_find_next_zero_area(allocated_pwms, MAX_PWMS, from, + start = bitmap_find_next_zero_area(allocated_pwms, MAX_PWMS, 0, count, 0); - if (pwm >= 0 && start != pwm) - return -EEXIST; - if (start + count > MAX_PWMS) return -ENOSPC; @@ -260,18 +250,14 @@ static bool pwm_ops_check(const struct pwm_chip *chip) } /** - * pwmchip_add_with_polarity() - register a new PWM chip + * pwmchip_add() - register a new PWM chip * @chip: the PWM chip to add - * @polarity: initial polarity of PWM channels * - * Register a new PWM chip. If chip->base < 0 then a dynamically assigned base - * will be used. The initial polarity for all channels is specified by the - * @polarity parameter. + * Register a new PWM chip. * * Returns: 0 on success or a negative error code on failure. */ -int pwmchip_add_with_polarity(struct pwm_chip *chip, - enum pwm_polarity polarity) +int pwmchip_add(struct pwm_chip *chip) { struct pwm_device *pwm; unsigned int i; @@ -285,25 +271,24 @@ int pwmchip_add_with_polarity(struct pwm_chip *chip, mutex_lock(&pwm_lock); - ret = alloc_pwms(chip->base, chip->npwm); + ret = alloc_pwms(chip->npwm); if (ret < 0) goto out; + chip->base = ret; + chip->pwms = kcalloc(chip->npwm, sizeof(*pwm), GFP_KERNEL); if (!chip->pwms) { ret = -ENOMEM; goto out; } - chip->base = ret; - for (i = 0; i < chip->npwm; i++) { pwm = &chip->pwms[i]; pwm->chip = chip; pwm->pwm = chip->base + i; pwm->hwpwm = i; - pwm->state.polarity = polarity; radix_tree_insert(&pwm_tree, pwm->pwm, pwm); } @@ -326,21 +311,6 @@ out: return ret; } -EXPORT_SYMBOL_GPL(pwmchip_add_with_polarity); - -/** - * pwmchip_add() - register a new PWM chip - * @chip: the PWM chip to add - * - * Register a new PWM chip. If chip->base < 0 then a dynamically assigned base - * will be used. The initial polarity for all channels is normal. - * - * Returns: 0 on success or a negative error code on failure. - */ -int pwmchip_add(struct pwm_chip *chip) -{ - return pwmchip_add_with_polarity(chip, PWM_POLARITY_NORMAL); -} EXPORT_SYMBOL_GPL(pwmchip_add); /** @@ -607,7 +577,7 @@ int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state) */ if (state->polarity != pwm->state.polarity) { if (!chip->ops->set_polarity) - return -ENOTSUPP; + return -EINVAL; /* * Changing the polarity of a running PWM is diff --git a/drivers/pwm/pwm-ab8500.c b/drivers/pwm/pwm-ab8500.c index 58c6c0f5b0ec..e2a26d9da25b 100644 --- a/drivers/pwm/pwm-ab8500.c +++ b/drivers/pwm/pwm-ab8500.c @@ -24,23 +24,37 @@ struct ab8500_pwm_chip { struct pwm_chip chip; }; -static int ab8500_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, - int duty_ns, int period_ns) +static int ab8500_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_state *state) { - int ret = 0; - unsigned int higher_val, lower_val; + int ret; u8 reg; + unsigned int higher_val, lower_val; + + if (state->polarity != PWM_POLARITY_NORMAL) + return -EINVAL; + + if (!state->enabled) { + ret = abx500_mask_and_set_register_interruptible(chip->dev, + AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG, + 1 << (chip->base - 1), 0); + + if (ret < 0) + dev_err(chip->dev, "%s: Failed to disable PWM, Error %d\n", + pwm->label, ret); + return ret; + } /* * get the first 8 bits that are be written to * AB8500_PWM_OUT_CTRL1_REG[0:7] */ - lower_val = duty_ns & 0x00FF; + lower_val = state->duty_cycle & 0x00FF; /* * get bits [9:10] that are to be written to * AB8500_PWM_OUT_CTRL2_REG[0:1] */ - higher_val = ((duty_ns & 0x0300) >> 8); + higher_val = ((state->duty_cycle & 0x0300) >> 8); reg = AB8500_PWM_OUT_CTRL1_REG + ((chip->base - 1) * 2); @@ -48,15 +62,11 @@ static int ab8500_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, reg, (u8)lower_val); if (ret < 0) return ret; + ret = abx500_set_register_interruptible(chip->dev, AB8500_MISC, (reg + 1), (u8)higher_val); - - return ret; -} - -static int ab8500_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) -{ - int ret; + if (ret < 0) + return ret; ret = abx500_mask_and_set_register_interruptible(chip->dev, AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG, @@ -64,25 +74,12 @@ static int ab8500_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) if (ret < 0) dev_err(chip->dev, "%s: Failed to enable PWM, Error %d\n", pwm->label, ret); - return ret; -} -static void ab8500_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) -{ - int ret; - - ret = abx500_mask_and_set_register_interruptible(chip->dev, - AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG, - 1 << (chip->base - 1), 0); - if (ret < 0) - dev_err(chip->dev, "%s: Failed to disable PWM, Error %d\n", - pwm->label, ret); + return ret; } static const struct pwm_ops ab8500_pwm_ops = { - .config = ab8500_pwm_config, - .enable = ab8500_pwm_enable, - .disable = ab8500_pwm_disable, + .apply = ab8500_pwm_apply, .owner = THIS_MODULE, }; @@ -101,7 +98,6 @@ static int ab8500_pwm_probe(struct platform_device *pdev) ab8500->chip.dev = &pdev->dev; ab8500->chip.ops = &ab8500_pwm_ops; - ab8500->chip.base = -1; ab8500->chip.npwm = 1; err = pwmchip_add(&ab8500->chip); diff --git a/drivers/pwm/pwm-atmel-hlcdc.c b/drivers/pwm/pwm-atmel-hlcdc.c index dcbc0489dfd4..6ab597e54005 100644 --- a/drivers/pwm/pwm-atmel-hlcdc.c +++ b/drivers/pwm/pwm-atmel-hlcdc.c @@ -265,12 +265,11 @@ static int atmel_hlcdc_pwm_probe(struct platform_device *pdev) chip->hlcdc = hlcdc; chip->chip.ops = &atmel_hlcdc_pwm_ops; chip->chip.dev = dev; - chip->chip.base = -1; chip->chip.npwm = 1; chip->chip.of_xlate = of_pwm_xlate_with_flags; chip->chip.of_pwm_n_cells = 3; - ret = pwmchip_add_with_polarity(&chip->chip, PWM_POLARITY_INVERSED); + ret = pwmchip_add(&chip->chip); if (ret) { clk_disable_unprepare(hlcdc->periph_clk); return ret; diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c index 5ccc3e7420e9..8451d3e846be 100644 --- a/drivers/pwm/pwm-atmel-tcb.c +++ b/drivers/pwm/pwm-atmel-tcb.c @@ -362,20 +362,37 @@ static int atmel_tcb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, tcbpwm->div = i; tcbpwm->duty = duty; - /* If the PWM is enabled, call enable to apply the new conf */ - if (pwm_is_enabled(pwm)) - atmel_tcb_pwm_enable(chip, pwm); - return 0; } +static int atmel_tcb_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_state *state) +{ + int duty_cycle, period; + int ret; + + /* This function only sets a flag in driver data */ + atmel_tcb_pwm_set_polarity(chip, pwm, state->polarity); + + if (!state->enabled) { + atmel_tcb_pwm_disable(chip, pwm); + return 0; + } + + period = state->period < INT_MAX ? state->period : INT_MAX; + duty_cycle = state->duty_cycle < INT_MAX ? state->duty_cycle : INT_MAX; + + ret = atmel_tcb_pwm_config(chip, pwm, duty_cycle, period); + if (ret) + return ret; + + return atmel_tcb_pwm_enable(chip, pwm); +} + static const struct pwm_ops atmel_tcb_pwm_ops = { .request = atmel_tcb_pwm_request, .free = atmel_tcb_pwm_free, - .config = atmel_tcb_pwm_config, - .set_polarity = atmel_tcb_pwm_set_polarity, - .enable = atmel_tcb_pwm_enable, - .disable = atmel_tcb_pwm_disable, + .apply = atmel_tcb_pwm_apply, .owner = THIS_MODULE, }; @@ -454,7 +471,6 @@ static int atmel_tcb_pwm_probe(struct platform_device *pdev) tcbpwm->chip.ops = &atmel_tcb_pwm_ops; tcbpwm->chip.of_xlate = of_pwm_xlate_with_flags; tcbpwm->chip.of_pwm_n_cells = 3; - tcbpwm->chip.base = -1; tcbpwm->chip.npwm = NPWM; tcbpwm->channel = channel; tcbpwm->regmap = regmap; @@ -491,14 +507,14 @@ static int atmel_tcb_pwm_remove(struct platform_device *pdev) struct atmel_tcb_pwm_chip *tcbpwm = platform_get_drvdata(pdev); int err; - clk_disable_unprepare(tcbpwm->slow_clk); - clk_put(tcbpwm->slow_clk); - clk_put(tcbpwm->clk); - err = pwmchip_remove(&tcbpwm->chip); if (err < 0) return err; + clk_disable_unprepare(tcbpwm->slow_clk); + clk_put(tcbpwm->slow_clk); + clk_put(tcbpwm->clk); + return 0; } diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c index 5813339b597b..29b5ad03f715 100644 --- a/drivers/pwm/pwm-atmel.c +++ b/drivers/pwm/pwm-atmel.c @@ -124,6 +124,7 @@ static inline void atmel_pwm_ch_writel(struct atmel_pwm_chip *chip, } static int atmel_pwm_calculate_cprd_and_pres(struct pwm_chip *chip, + unsigned long clkrate, const struct pwm_state *state, unsigned long *cprd, u32 *pres) { @@ -132,7 +133,7 @@ static int atmel_pwm_calculate_cprd_and_pres(struct pwm_chip *chip, int shift; /* Calculate the period cycles and prescale value */ - cycles *= clk_get_rate(atmel_pwm->clk); + cycles *= clkrate; do_div(cycles, NSEC_PER_SEC); /* @@ -158,12 +159,14 @@ static int atmel_pwm_calculate_cprd_and_pres(struct pwm_chip *chip, } static void atmel_pwm_calculate_cdty(const struct pwm_state *state, - unsigned long cprd, unsigned long *cdty) + unsigned long clkrate, unsigned long cprd, + u32 pres, unsigned long *cdty) { unsigned long long cycles = state->duty_cycle; - cycles *= cprd; - do_div(cycles, state->period); + cycles *= clkrate; + do_div(cycles, NSEC_PER_SEC); + cycles >>= pres; *cdty = cprd - cycles; } @@ -244,17 +247,23 @@ static int atmel_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, pwm_get_state(pwm, &cstate); if (state->enabled) { + unsigned long clkrate = clk_get_rate(atmel_pwm->clk); + if (cstate.enabled && cstate.polarity == state->polarity && cstate.period == state->period) { + u32 cmr = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, PWM_CMR); + cprd = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, atmel_pwm->data->regs.period); - atmel_pwm_calculate_cdty(state, cprd, &cdty); + pres = cmr & PWM_CMR_CPRE_MSK; + + atmel_pwm_calculate_cdty(state, clkrate, cprd, pres, &cdty); atmel_pwm_update_cdty(chip, pwm, cdty); return 0; } - ret = atmel_pwm_calculate_cprd_and_pres(chip, state, &cprd, + ret = atmel_pwm_calculate_cprd_and_pres(chip, clkrate, state, &cprd, &pres); if (ret) { dev_err(chip->dev, @@ -262,7 +271,7 @@ static int atmel_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, return ret; } - atmel_pwm_calculate_cdty(state, cprd, &cdty); + atmel_pwm_calculate_cdty(state, clkrate, cprd, pres, &cdty); if (cstate.enabled) { atmel_pwm_disable(chip, pwm, false); @@ -319,7 +328,7 @@ static void atmel_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm, cdty = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, atmel_pwm->data->regs.duty); - tmp = (u64)cdty * NSEC_PER_SEC; + tmp = (u64)(cprd - cdty) * NSEC_PER_SEC; tmp <<= pres; state->duty_cycle = DIV64_U64_ROUND_UP(tmp, rate); @@ -429,7 +438,6 @@ static int atmel_pwm_probe(struct platform_device *pdev) atmel_pwm->chip.ops = &atmel_pwm_ops; atmel_pwm->chip.of_xlate = of_pwm_xlate_with_flags; atmel_pwm->chip.of_pwm_n_cells = 3; - atmel_pwm->chip.base = -1; atmel_pwm->chip.npwm = 4; ret = pwmchip_add(&atmel_pwm->chip); @@ -451,10 +459,12 @@ static int atmel_pwm_remove(struct platform_device *pdev) { struct atmel_pwm_chip *atmel_pwm = platform_get_drvdata(pdev); + pwmchip_remove(&atmel_pwm->chip); + clk_unprepare(atmel_pwm->clk); mutex_destroy(&atmel_pwm->isr_lock); - return pwmchip_remove(&atmel_pwm->chip); + return 0; } static struct platform_driver atmel_pwm_driver = { diff --git a/drivers/pwm/pwm-bcm-iproc.c b/drivers/pwm/pwm-bcm-iproc.c index f4853c4a2d75..edd2ce1760ab 100644 --- a/drivers/pwm/pwm-bcm-iproc.c +++ b/drivers/pwm/pwm-bcm-iproc.c @@ -209,7 +209,6 @@ static int iproc_pwmc_probe(struct platform_device *pdev) ip->chip.dev = &pdev->dev; ip->chip.ops = &iproc_pwm_ops; - ip->chip.base = -1; ip->chip.npwm = 4; ip->chip.of_xlate = of_pwm_xlate_with_flags; ip->chip.of_pwm_n_cells = 3; @@ -254,9 +253,11 @@ static int iproc_pwmc_remove(struct platform_device *pdev) { struct iproc_pwmc *ip = platform_get_drvdata(pdev); + pwmchip_remove(&ip->chip); + clk_disable_unprepare(ip->clk); - return pwmchip_remove(&ip->chip); + return 0; } static const struct of_device_id bcm_iproc_pwmc_dt[] = { diff --git a/drivers/pwm/pwm-bcm-kona.c b/drivers/pwm/pwm-bcm-kona.c index 578b3621c97e..800b9edf2e71 100644 --- a/drivers/pwm/pwm-bcm-kona.c +++ b/drivers/pwm/pwm-bcm-kona.c @@ -271,7 +271,6 @@ static int kona_pwmc_probe(struct platform_device *pdev) kp->chip.dev = &pdev->dev; kp->chip.ops = &kona_pwm_ops; - kp->chip.base = -1; kp->chip.npwm = 6; kp->chip.of_xlate = of_pwm_xlate_with_flags; kp->chip.of_pwm_n_cells = 3; @@ -301,7 +300,7 @@ static int kona_pwmc_probe(struct platform_device *pdev) clk_disable_unprepare(kp->clk); - ret = pwmchip_add_with_polarity(&kp->chip, PWM_POLARITY_INVERSED); + ret = pwmchip_add(&kp->chip); if (ret < 0) dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret); @@ -311,11 +310,6 @@ static int kona_pwmc_probe(struct platform_device *pdev) static int kona_pwmc_remove(struct platform_device *pdev) { struct kona_pwmc *kp = platform_get_drvdata(pdev); - unsigned int chan; - - for (chan = 0; chan < kp->chip.npwm; chan++) - if (pwm_is_enabled(&kp->chip.pwms[chan])) - clk_disable_unprepare(kp->clk); return pwmchip_remove(&kp->chip); } diff --git a/drivers/pwm/pwm-bcm2835.c b/drivers/pwm/pwm-bcm2835.c index 6ff5f04b3e07..fc240d5b8121 100644 --- a/drivers/pwm/pwm-bcm2835.c +++ b/drivers/pwm/pwm-bcm2835.c @@ -64,8 +64,9 @@ static int bcm2835_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, struct bcm2835_pwm *pc = to_bcm2835_pwm(chip); unsigned long rate = clk_get_rate(pc->clk); - unsigned long long period; - unsigned long scaler; + unsigned long long period_cycles; + u64 max_period; + u32 val; if (!rate) { @@ -73,18 +74,36 @@ static int bcm2835_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, return -EINVAL; } - scaler = DIV_ROUND_CLOSEST(NSEC_PER_SEC, rate); + /* + * period_cycles must be a 32 bit value, so period * rate / NSEC_PER_SEC + * must be <= U32_MAX. As U32_MAX * NSEC_PER_SEC < U64_MAX the + * multiplication period * rate doesn't overflow. + * To calculate the maximal possible period that guarantees the + * above inequality: + * + * round(period * rate / NSEC_PER_SEC) <= U32_MAX + * <=> period * rate / NSEC_PER_SEC < U32_MAX + 0.5 + * <=> period * rate < (U32_MAX + 0.5) * NSEC_PER_SEC + * <=> period < ((U32_MAX + 0.5) * NSEC_PER_SEC) / rate + * <=> period < ((U32_MAX * NSEC_PER_SEC + NSEC_PER_SEC/2) / rate + * <=> period <= ceil((U32_MAX * NSEC_PER_SEC + NSEC_PER_SEC/2) / rate) - 1 + */ + max_period = DIV_ROUND_UP_ULL((u64)U32_MAX * NSEC_PER_SEC + NSEC_PER_SEC / 2, rate) - 1; + + if (state->period > max_period) + return -EINVAL; + /* set period */ - period = DIV_ROUND_CLOSEST_ULL(state->period, scaler); + period_cycles = DIV_ROUND_CLOSEST_ULL(state->period * rate, NSEC_PER_SEC); - /* dont accept a period that is too small or has been truncated */ - if ((period < PERIOD_MIN) || (period > U32_MAX)) + /* don't accept a period that is too small */ + if (period_cycles < PERIOD_MIN) return -EINVAL; - writel(period, pc->base + PERIOD(pwm->hwpwm)); + writel(period_cycles, pc->base + PERIOD(pwm->hwpwm)); /* set duty cycle */ - val = DIV_ROUND_CLOSEST_ULL(state->duty_cycle, scaler); + val = DIV_ROUND_CLOSEST_ULL(state->duty_cycle * rate, NSEC_PER_SEC); writel(val, pc->base + DUTY(pwm->hwpwm)); /* set polarity */ @@ -139,7 +158,6 @@ static int bcm2835_pwm_probe(struct platform_device *pdev) pc->chip.dev = &pdev->dev; pc->chip.ops = &bcm2835_pwm_ops; - pc->chip.base = -1; pc->chip.npwm = 2; pc->chip.of_xlate = of_pwm_xlate_with_flags; pc->chip.of_pwm_n_cells = 3; @@ -161,9 +179,11 @@ static int bcm2835_pwm_remove(struct platform_device *pdev) { struct bcm2835_pwm *pc = platform_get_drvdata(pdev); + pwmchip_remove(&pc->chip); + clk_disable_unprepare(pc->clk); - return pwmchip_remove(&pc->chip); + return 0; } static const struct of_device_id bcm2835_pwm_of_match[] = { diff --git a/drivers/pwm/pwm-berlin.c b/drivers/pwm/pwm-berlin.c index fe405289e582..acb6fbc3cc32 100644 --- a/drivers/pwm/pwm-berlin.c +++ b/drivers/pwm/pwm-berlin.c @@ -206,7 +206,6 @@ static int berlin_pwm_probe(struct platform_device *pdev) pwm->chip.dev = &pdev->dev; pwm->chip.ops = &berlin_pwm_ops; - pwm->chip.base = -1; pwm->chip.npwm = 4; pwm->chip.of_xlate = of_pwm_xlate_with_flags; pwm->chip.of_pwm_n_cells = 3; diff --git a/drivers/pwm/pwm-brcmstb.c b/drivers/pwm/pwm-brcmstb.c index 8b66f9d2f589..8b1d1e7aa856 100644 --- a/drivers/pwm/pwm-brcmstb.c +++ b/drivers/pwm/pwm-brcmstb.c @@ -258,7 +258,6 @@ static int brcmstb_pwm_probe(struct platform_device *pdev) p->chip.dev = &pdev->dev; p->chip.ops = &brcmstb_pwm_ops; - p->chip.base = -1; p->chip.npwm = 2; p->base = devm_platform_ioremap_resource(pdev, 0); diff --git a/drivers/pwm/pwm-clps711x.c b/drivers/pwm/pwm-clps711x.c index cb1af86873ee..f3d17a590305 100644 --- a/drivers/pwm/pwm-clps711x.c +++ b/drivers/pwm/pwm-clps711x.c @@ -128,7 +128,6 @@ static int clps711x_pwm_probe(struct platform_device *pdev) priv->chip.ops = &clps711x_pwm_ops; priv->chip.dev = &pdev->dev; - priv->chip.base = -1; priv->chip.npwm = 2; priv->chip.of_xlate = clps711x_pwm_xlate; priv->chip.of_pwm_n_cells = 1; diff --git a/drivers/pwm/pwm-crc.c b/drivers/pwm/pwm-crc.c index 1e2276808b7a..02522a9a3073 100644 --- a/drivers/pwm/pwm-crc.c +++ b/drivers/pwm/pwm-crc.c @@ -168,7 +168,6 @@ static int crystalcove_pwm_probe(struct platform_device *pdev) pwm->chip.dev = &pdev->dev; pwm->chip.ops = &crc_pwm_ops; - pwm->chip.base = -1; pwm->chip.npwm = 1; /* get the PMIC regmap */ diff --git a/drivers/pwm/pwm-cros-ec.c b/drivers/pwm/pwm-cros-ec.c index c1c337969e4e..9fffb566af5f 100644 --- a/drivers/pwm/pwm-cros-ec.c +++ b/drivers/pwm/pwm-cros-ec.c @@ -124,6 +124,9 @@ static int cros_ec_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, if (state->period != EC_PWM_MAX_DUTY) return -EINVAL; + if (state->polarity != PWM_POLARITY_NORMAL) + return -EINVAL; + /* * EC doesn't separate the concept of duty cycle and enabled, but * kernel does. Translate. @@ -253,7 +256,6 @@ static int cros_ec_pwm_probe(struct platform_device *pdev) chip->ops = &cros_ec_pwm_ops; chip->of_xlate = cros_ec_pwm_xlate; chip->of_pwm_n_cells = 1; - chip->base = -1; ret = cros_ec_num_pwms(ec); if (ret < 0) { dev_err(dev, "Couldn't find PWMs: %d\n", ret); diff --git a/drivers/pwm/pwm-dwc.c b/drivers/pwm/pwm-dwc.c index f6c98e0d57c2..7568300bb11e 100644 --- a/drivers/pwm/pwm-dwc.c +++ b/drivers/pwm/pwm-dwc.c @@ -233,7 +233,6 @@ static int dwc_pwm_probe(struct pci_dev *pci, const struct pci_device_id *id) dwc->chip.dev = dev; dwc->chip.ops = &dwc_pwm_ops; dwc->chip.npwm = DWC_TIMERS_TOTAL; - dwc->chip.base = -1; ret = pwmchip_add(&dwc->chip); if (ret) diff --git a/drivers/pwm/pwm-ep93xx.c b/drivers/pwm/pwm-ep93xx.c index c9fc6f223640..4ca70794ad96 100644 --- a/drivers/pwm/pwm-ep93xx.c +++ b/drivers/pwm/pwm-ep93xx.c @@ -185,7 +185,6 @@ static int ep93xx_pwm_probe(struct platform_device *pdev) ep93xx_pwm->chip.dev = &pdev->dev; ep93xx_pwm->chip.ops = &ep93xx_pwm_ops; - ep93xx_pwm->chip.base = -1; ep93xx_pwm->chip.npwm = 1; ret = pwmchip_add(&ep93xx_pwm->chip); diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c index 2a6801226aba..0e1ae9469eda 100644 --- a/drivers/pwm/pwm-fsl-ftm.c +++ b/drivers/pwm/pwm-fsl-ftm.c @@ -453,7 +453,6 @@ static int fsl_pwm_probe(struct platform_device *pdev) fpc->chip.ops = &fsl_pwm_ops; fpc->chip.of_xlate = of_pwm_xlate_with_flags; fpc->chip.of_pwm_n_cells = 3; - fpc->chip.base = -1; fpc->chip.npwm = 8; ret = pwmchip_add(&fpc->chip); diff --git a/drivers/pwm/pwm-hibvt.c b/drivers/pwm/pwm-hibvt.c index a1900d0a872e..82d17fc75c21 100644 --- a/drivers/pwm/pwm-hibvt.c +++ b/drivers/pwm/pwm-hibvt.c @@ -205,7 +205,6 @@ static int hibvt_pwm_probe(struct platform_device *pdev) pwm_chip->chip.ops = &hibvt_pwm_ops; pwm_chip->chip.dev = &pdev->dev; - pwm_chip->chip.base = -1; pwm_chip->chip.npwm = soc->num_pwms; pwm_chip->chip.of_xlate = of_pwm_xlate_with_flags; pwm_chip->chip.of_pwm_n_cells = 3; diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c index 6faf5b5a5584..cc37054589cc 100644 --- a/drivers/pwm/pwm-img.c +++ b/drivers/pwm/pwm-img.c @@ -304,7 +304,6 @@ static int img_pwm_probe(struct platform_device *pdev) pwm->chip.dev = &pdev->dev; pwm->chip.ops = &img_pwm_ops; - pwm->chip.base = -1; pwm->chip.npwm = IMG_PWM_NPWM; ret = pwmchip_add(&pwm->chip); diff --git a/drivers/pwm/pwm-imx-tpm.c b/drivers/pwm/pwm-imx-tpm.c index aaf629bd8c35..97c9133b6876 100644 --- a/drivers/pwm/pwm-imx-tpm.c +++ b/drivers/pwm/pwm-imx-tpm.c @@ -363,7 +363,6 @@ static int pwm_imx_tpm_probe(struct platform_device *pdev) tpm->chip.dev = &pdev->dev; tpm->chip.ops = &imx_tpm_pwm_ops; - tpm->chip.base = -1; tpm->chip.of_xlate = of_pwm_xlate_with_flags; tpm->chip.of_pwm_n_cells = 3; @@ -411,9 +410,7 @@ static int __maybe_unused pwm_imx_tpm_resume(struct device *dev) ret = clk_prepare_enable(tpm->clk); if (ret) - dev_err(dev, - "failed to prepare or enable clock: %d\n", - ret); + dev_err(dev, "failed to prepare or enable clock: %d\n", ret); return ret; } diff --git a/drivers/pwm/pwm-imx1.c b/drivers/pwm/pwm-imx1.c index 727e0d3e249e..c957b365448e 100644 --- a/drivers/pwm/pwm-imx1.c +++ b/drivers/pwm/pwm-imx1.c @@ -155,7 +155,6 @@ static int pwm_imx1_probe(struct platform_device *pdev) imx->chip.ops = &pwm_imx1_ops; imx->chip.dev = &pdev->dev; - imx->chip.base = -1; imx->chip.npwm = 1; imx->mmio_base = devm_platform_ioremap_resource(pdev, 0); diff --git a/drivers/pwm/pwm-imx27.c b/drivers/pwm/pwm-imx27.c index 18055326a2f3..ba695115c160 100644 --- a/drivers/pwm/pwm-imx27.c +++ b/drivers/pwm/pwm-imx27.c @@ -327,7 +327,6 @@ static int pwm_imx27_probe(struct platform_device *pdev) imx->chip.ops = &pwm_imx27_ops; imx->chip.dev = &pdev->dev; - imx->chip.base = -1; imx->chip.npwm = 1; imx->chip.of_xlate = of_pwm_xlate_with_flags; diff --git a/drivers/pwm/pwm-intel-lgm.c b/drivers/pwm/pwm-intel-lgm.c index e9e54dda07aa..015f5eba09a1 100644 --- a/drivers/pwm/pwm-intel-lgm.c +++ b/drivers/pwm/pwm-intel-lgm.c @@ -207,7 +207,6 @@ static int lgm_pwm_probe(struct platform_device *pdev) pc->chip.dev = dev; pc->chip.ops = &lgm_pwm_ops; pc->chip.npwm = 1; - pc->chip.base = -1; lgm_pwm_init(pc); diff --git a/drivers/pwm/pwm-iqs620a.c b/drivers/pwm/pwm-iqs620a.c index 957b972c458b..6c6e26d18329 100644 --- a/drivers/pwm/pwm-iqs620a.c +++ b/drivers/pwm/pwm-iqs620a.c @@ -206,7 +206,6 @@ static int iqs620_pwm_probe(struct platform_device *pdev) iqs620_pwm->chip.dev = &pdev->dev; iqs620_pwm->chip.ops = &iqs620_pwm_ops; - iqs620_pwm->chip.base = -1; iqs620_pwm->chip.npwm = 1; mutex_init(&iqs620_pwm->lock); diff --git a/drivers/pwm/pwm-jz4740.c b/drivers/pwm/pwm-jz4740.c index 00c642fa2eed..5b6bdcdcecf5 100644 --- a/drivers/pwm/pwm-jz4740.c +++ b/drivers/pwm/pwm-jz4740.c @@ -244,7 +244,6 @@ static int jz4740_pwm_probe(struct platform_device *pdev) jz4740->chip.dev = dev; jz4740->chip.ops = &jz4740_pwm_ops; jz4740->chip.npwm = info->num_pwms; - jz4740->chip.base = -1; jz4740->chip.of_xlate = of_pwm_xlate_with_flags; jz4740->chip.of_pwm_n_cells = 3; diff --git a/drivers/pwm/pwm-keembay.c b/drivers/pwm/pwm-keembay.c index cdfdef66ff8e..521a825c8ba0 100644 --- a/drivers/pwm/pwm-keembay.c +++ b/drivers/pwm/pwm-keembay.c @@ -203,7 +203,6 @@ static int keembay_pwm_probe(struct platform_device *pdev) if (ret) return ret; - priv->chip.base = -1; priv->chip.dev = dev; priv->chip.ops = &keembay_pwm_ops; priv->chip.npwm = KMB_TOTAL_PWM_CHANNELS; diff --git a/drivers/pwm/pwm-lp3943.c b/drivers/pwm/pwm-lp3943.c index bf3f14fb5f24..7551253ada32 100644 --- a/drivers/pwm/pwm-lp3943.c +++ b/drivers/pwm/pwm-lp3943.c @@ -275,7 +275,6 @@ static int lp3943_pwm_probe(struct platform_device *pdev) lp3943_pwm->chip.dev = &pdev->dev; lp3943_pwm->chip.ops = &lp3943_pwm_ops; lp3943_pwm->chip.npwm = LP3943_NUM_PWMS; - lp3943_pwm->chip.base = -1; platform_set_drvdata(pdev, lp3943_pwm); diff --git a/drivers/pwm/pwm-lpc18xx-sct.c b/drivers/pwm/pwm-lpc18xx-sct.c index 7ef40243eb6c..b643ac61a2e7 100644 --- a/drivers/pwm/pwm-lpc18xx-sct.c +++ b/drivers/pwm/pwm-lpc18xx-sct.c @@ -370,7 +370,6 @@ static int lpc18xx_pwm_probe(struct platform_device *pdev) lpc18xx_pwm->chip.dev = &pdev->dev; lpc18xx_pwm->chip.ops = &lpc18xx_pwm_ops; - lpc18xx_pwm->chip.base = -1; lpc18xx_pwm->chip.npwm = 16; lpc18xx_pwm->chip.of_xlate = of_pwm_xlate_with_flags; lpc18xx_pwm->chip.of_pwm_n_cells = 3; @@ -442,13 +441,15 @@ static int lpc18xx_pwm_remove(struct platform_device *pdev) struct lpc18xx_pwm_chip *lpc18xx_pwm = platform_get_drvdata(pdev); u32 val; + pwmchip_remove(&lpc18xx_pwm->chip); + val = lpc18xx_pwm_readl(lpc18xx_pwm, LPC18XX_PWM_CTRL); lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_CTRL, val | LPC18XX_PWM_CTRL_HALT); clk_disable_unprepare(lpc18xx_pwm->pwm_clk); - return pwmchip_remove(&lpc18xx_pwm->chip); + return 0; } static struct platform_driver lpc18xx_pwm_driver = { diff --git a/drivers/pwm/pwm-lpc32xx.c b/drivers/pwm/pwm-lpc32xx.c index 6b4090436c06..2834a0f001d3 100644 --- a/drivers/pwm/pwm-lpc32xx.c +++ b/drivers/pwm/pwm-lpc32xx.c @@ -116,7 +116,6 @@ static int lpc32xx_pwm_probe(struct platform_device *pdev) lpc32xx->chip.dev = &pdev->dev; lpc32xx->chip.ops = &lpc32xx_pwm_ops; lpc32xx->chip.npwm = 1; - lpc32xx->chip.base = -1; ret = pwmchip_add(&lpc32xx->chip); if (ret < 0) { @@ -137,10 +136,6 @@ static int lpc32xx_pwm_probe(struct platform_device *pdev) static int lpc32xx_pwm_remove(struct platform_device *pdev) { struct lpc32xx_pwm_chip *lpc32xx = platform_get_drvdata(pdev); - unsigned int i; - - for (i = 0; i < lpc32xx->chip.npwm; i++) - pwm_disable(&lpc32xx->chip.pwms[i]); return pwmchip_remove(&lpc32xx->chip); } diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c index 939de93c157b..58b4031524af 100644 --- a/drivers/pwm/pwm-lpss.c +++ b/drivers/pwm/pwm-lpss.c @@ -234,7 +234,6 @@ struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r, lpwm->chip.dev = dev; lpwm->chip.ops = &pwm_lpss_ops; - lpwm->chip.base = -1; lpwm->chip.npwm = info->npwm; ret = pwmchip_add(&lpwm->chip); @@ -255,12 +254,6 @@ EXPORT_SYMBOL_GPL(pwm_lpss_probe); int pwm_lpss_remove(struct pwm_lpss_chip *lpwm) { - int i; - - for (i = 0; i < lpwm->info->npwm; i++) { - if (pwm_is_enabled(&lpwm->chip.pwms[i])) - pm_runtime_put(lpwm->chip.dev); - } return pwmchip_remove(&lpwm->chip); } EXPORT_SYMBOL_GPL(pwm_lpss_remove); diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c index fcfc3b147e5f..b4a31060bcd7 100644 --- a/drivers/pwm/pwm-mediatek.c +++ b/drivers/pwm/pwm-mediatek.c @@ -107,12 +107,6 @@ static void pwm_mediatek_clk_disable(struct pwm_chip *chip, clk_disable_unprepare(pc->clk_top); } -static inline u32 pwm_mediatek_readl(struct pwm_mediatek_chip *chip, - unsigned int num, unsigned int offset) -{ - return readl(chip->regs + pwm_mediatek_reg_offset[num] + offset); -} - static inline void pwm_mediatek_writel(struct pwm_mediatek_chip *chip, unsigned int num, unsigned int offset, u32 value) @@ -263,7 +257,6 @@ static int pwm_mediatek_probe(struct platform_device *pdev) pc->chip.dev = &pdev->dev; pc->chip.ops = &pwm_mediatek_ops; - pc->chip.base = -1; pc->chip.npwm = pc->soc->num_pwms; ret = pwmchip_add(&pc->chip); diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c index a3ce9789412a..9eb060613cb4 100644 --- a/drivers/pwm/pwm-meson.c +++ b/drivers/pwm/pwm-meson.c @@ -550,7 +550,6 @@ static int meson_pwm_probe(struct platform_device *pdev) spin_lock_init(&meson->lock); meson->chip.dev = &pdev->dev; meson->chip.ops = &meson_pwm_ops; - meson->chip.base = -1; meson->chip.npwm = MESON_NUM_PWMS; meson->chip.of_xlate = of_pwm_xlate_with_flags; meson->chip.of_pwm_n_cells = 3; diff --git a/drivers/pwm/pwm-mtk-disp.c b/drivers/pwm/pwm-mtk-disp.c index 87c6b4bc5d43..9b3ba401a3db 100644 --- a/drivers/pwm/pwm-mtk-disp.c +++ b/drivers/pwm/pwm-mtk-disp.c @@ -202,7 +202,6 @@ static int mtk_disp_pwm_probe(struct platform_device *pdev) mdp->chip.dev = &pdev->dev; mdp->chip.ops = &mtk_disp_pwm_ops; - mdp->chip.base = -1; mdp->chip.npwm = 1; ret = pwmchip_add(&mdp->chip); diff --git a/drivers/pwm/pwm-mxs.c b/drivers/pwm/pwm-mxs.c index 7ce616923c52..0266e84e982c 100644 --- a/drivers/pwm/pwm-mxs.c +++ b/drivers/pwm/pwm-mxs.c @@ -140,7 +140,6 @@ static int mxs_pwm_probe(struct platform_device *pdev) mxs->chip.ops = &mxs_pwm_ops; mxs->chip.of_xlate = of_pwm_xlate_with_flags; mxs->chip.of_pwm_n_cells = 3; - mxs->chip.base = -1; ret = of_property_read_u32(np, "fsl,pwm-number", &mxs->chip.npwm); if (ret < 0) { diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c index 358db4ff9d4f..612b3c859295 100644 --- a/drivers/pwm/pwm-omap-dmtimer.c +++ b/drivers/pwm/pwm-omap-dmtimer.c @@ -403,7 +403,6 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev) omap->chip.dev = &pdev->dev; omap->chip.ops = &pwm_omap_dmtimer_ops; - omap->chip.base = -1; omap->chip.npwm = 1; omap->chip.of_xlate = of_pwm_xlate_with_flags; omap->chip.of_pwm_n_cells = 3; diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c index 4a55dc18656c..7c9f174de64e 100644 --- a/drivers/pwm/pwm-pca9685.c +++ b/drivers/pwm/pwm-pca9685.c @@ -51,7 +51,6 @@ #define PCA9685_PRESCALE_MAX 0xFF /* => min. frequency of 24 Hz */ #define PCA9685_COUNTER_RANGE 4096 -#define PCA9685_DEFAULT_PERIOD 5000000 /* Default period_ns = 1/200 Hz */ #define PCA9685_OSC_CLOCK_MHZ 25 /* Internal oscillator with 25 MHz */ #define PCA9685_NUMREGS 0xFF @@ -71,10 +70,14 @@ #define LED_N_OFF_H(N) (PCA9685_LEDX_OFF_H + (4 * (N))) #define LED_N_OFF_L(N) (PCA9685_LEDX_OFF_L + (4 * (N))) +#define REG_ON_H(C) ((C) >= PCA9685_MAXCHAN ? PCA9685_ALL_LED_ON_H : LED_N_ON_H((C))) +#define REG_ON_L(C) ((C) >= PCA9685_MAXCHAN ? PCA9685_ALL_LED_ON_L : LED_N_ON_L((C))) +#define REG_OFF_H(C) ((C) >= PCA9685_MAXCHAN ? PCA9685_ALL_LED_OFF_H : LED_N_OFF_H((C))) +#define REG_OFF_L(C) ((C) >= PCA9685_MAXCHAN ? PCA9685_ALL_LED_OFF_L : LED_N_OFF_L((C))) + struct pca9685 { struct pwm_chip chip; struct regmap *regmap; - int period_ns; #if IS_ENABLED(CONFIG_GPIOLIB) struct mutex lock; struct gpio_chip gpio; @@ -87,6 +90,53 @@ static inline struct pca9685 *to_pca(struct pwm_chip *chip) return container_of(chip, struct pca9685, chip); } +/* Helper function to set the duty cycle ratio to duty/4096 (e.g. duty=2048 -> 50%) */ +static void pca9685_pwm_set_duty(struct pca9685 *pca, int channel, unsigned int duty) +{ + if (duty == 0) { + /* Set the full OFF bit, which has the highest precedence */ + regmap_write(pca->regmap, REG_OFF_H(channel), LED_FULL); + } else if (duty >= PCA9685_COUNTER_RANGE) { + /* Set the full ON bit and clear the full OFF bit */ + regmap_write(pca->regmap, REG_ON_H(channel), LED_FULL); + regmap_write(pca->regmap, REG_OFF_H(channel), 0); + } else { + /* Set OFF time (clears the full OFF bit) */ + regmap_write(pca->regmap, REG_OFF_L(channel), duty & 0xff); + regmap_write(pca->regmap, REG_OFF_H(channel), (duty >> 8) & 0xf); + /* Clear the full ON bit */ + regmap_write(pca->regmap, REG_ON_H(channel), 0); + } +} + +static unsigned int pca9685_pwm_get_duty(struct pca9685 *pca, int channel) +{ + unsigned int off_h = 0, val = 0; + + if (WARN_ON(channel >= PCA9685_MAXCHAN)) { + /* HW does not support reading state of "all LEDs" channel */ + return 0; + } + + regmap_read(pca->regmap, LED_N_OFF_H(channel), &off_h); + if (off_h & LED_FULL) { + /* Full OFF bit is set */ + return 0; + } + + regmap_read(pca->regmap, LED_N_ON_H(channel), &val); + if (val & LED_FULL) { + /* Full ON bit is set */ + return PCA9685_COUNTER_RANGE; + } + + if (regmap_read(pca->regmap, LED_N_OFF_L(channel), &val)) { + /* Reset val to 0 in case reading LED_N_OFF_L failed */ + val = 0; + } + return ((off_h & 0xf) << 8) | (val & 0xff); +} + #if IS_ENABLED(CONFIG_GPIOLIB) static bool pca9685_pwm_test_and_set_inuse(struct pca9685 *pca, int pwm_idx) { @@ -138,34 +188,23 @@ static int pca9685_pwm_gpio_request(struct gpio_chip *gpio, unsigned int offset) static int pca9685_pwm_gpio_get(struct gpio_chip *gpio, unsigned int offset) { struct pca9685 *pca = gpiochip_get_data(gpio); - struct pwm_device *pwm = &pca->chip.pwms[offset]; - unsigned int value; - - regmap_read(pca->regmap, LED_N_ON_H(pwm->hwpwm), &value); - return value & LED_FULL; + return pca9685_pwm_get_duty(pca, offset) != 0; } static void pca9685_pwm_gpio_set(struct gpio_chip *gpio, unsigned int offset, int value) { struct pca9685 *pca = gpiochip_get_data(gpio); - struct pwm_device *pwm = &pca->chip.pwms[offset]; - unsigned int on = value ? LED_FULL : 0; - /* Clear both OFF registers */ - regmap_write(pca->regmap, LED_N_OFF_L(pwm->hwpwm), 0); - regmap_write(pca->regmap, LED_N_OFF_H(pwm->hwpwm), 0); - - /* Set the full ON bit */ - regmap_write(pca->regmap, LED_N_ON_H(pwm->hwpwm), on); + pca9685_pwm_set_duty(pca, offset, value ? PCA9685_COUNTER_RANGE : 0); } static void pca9685_pwm_gpio_free(struct gpio_chip *gpio, unsigned int offset) { struct pca9685 *pca = gpiochip_get_data(gpio); - pca9685_pwm_gpio_set(gpio, offset, 0); + pca9685_pwm_set_duty(pca, offset, 0); pm_runtime_put(pca->chip.dev); pca9685_pwm_clear_inuse(pca, offset); } @@ -246,165 +285,85 @@ static void pca9685_set_sleep_mode(struct pca9685 *pca, bool enable) } } -static int pca9685_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, - int duty_ns, int period_ns) +static int pca9685_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_state *state) { struct pca9685 *pca = to_pca(chip); - unsigned long long duty; - unsigned int reg; - int prescale; - - if (period_ns != pca->period_ns) { - prescale = DIV_ROUND_CLOSEST(PCA9685_OSC_CLOCK_MHZ * period_ns, - PCA9685_COUNTER_RANGE * 1000) - 1; - - if (prescale >= PCA9685_PRESCALE_MIN && - prescale <= PCA9685_PRESCALE_MAX) { - /* - * Putting the chip briefly into SLEEP mode - * at this point won't interfere with the - * pm_runtime framework, because the pm_runtime - * state is guaranteed active here. - */ - /* Put chip into sleep mode */ - pca9685_set_sleep_mode(pca, true); - - /* Change the chip-wide output frequency */ - regmap_write(pca->regmap, PCA9685_PRESCALE, prescale); - - /* Wake the chip up */ - pca9685_set_sleep_mode(pca, false); - - pca->period_ns = period_ns; - } else { - dev_err(chip->dev, - "prescaler not set: period out of bounds!\n"); - return -EINVAL; - } - } + unsigned long long duty, prescale; + unsigned int val = 0; - if (duty_ns < 1) { - if (pwm->hwpwm >= PCA9685_MAXCHAN) - reg = PCA9685_ALL_LED_OFF_H; - else - reg = LED_N_OFF_H(pwm->hwpwm); + if (state->polarity != PWM_POLARITY_NORMAL) + return -EINVAL; - regmap_write(pca->regmap, reg, LED_FULL); - - return 0; + prescale = DIV_ROUND_CLOSEST_ULL(PCA9685_OSC_CLOCK_MHZ * state->period, + PCA9685_COUNTER_RANGE * 1000) - 1; + if (prescale < PCA9685_PRESCALE_MIN || prescale > PCA9685_PRESCALE_MAX) { + dev_err(chip->dev, "pwm not changed: period out of bounds!\n"); + return -EINVAL; } - if (duty_ns == period_ns) { - /* Clear both OFF registers */ - if (pwm->hwpwm >= PCA9685_MAXCHAN) - reg = PCA9685_ALL_LED_OFF_L; - else - reg = LED_N_OFF_L(pwm->hwpwm); - - regmap_write(pca->regmap, reg, 0x0); - - if (pwm->hwpwm >= PCA9685_MAXCHAN) - reg = PCA9685_ALL_LED_OFF_H; - else - reg = LED_N_OFF_H(pwm->hwpwm); - - regmap_write(pca->regmap, reg, 0x0); - - /* Set the full ON bit */ - if (pwm->hwpwm >= PCA9685_MAXCHAN) - reg = PCA9685_ALL_LED_ON_H; - else - reg = LED_N_ON_H(pwm->hwpwm); - - regmap_write(pca->regmap, reg, LED_FULL); - + if (!state->enabled) { + pca9685_pwm_set_duty(pca, pwm->hwpwm, 0); return 0; } - duty = PCA9685_COUNTER_RANGE * (unsigned long long)duty_ns; - duty = DIV_ROUND_UP_ULL(duty, period_ns); - - if (pwm->hwpwm >= PCA9685_MAXCHAN) - reg = PCA9685_ALL_LED_OFF_L; - else - reg = LED_N_OFF_L(pwm->hwpwm); - - regmap_write(pca->regmap, reg, (int)duty & 0xff); - - if (pwm->hwpwm >= PCA9685_MAXCHAN) - reg = PCA9685_ALL_LED_OFF_H; - else - reg = LED_N_OFF_H(pwm->hwpwm); - - regmap_write(pca->regmap, reg, ((int)duty >> 8) & 0xf); + regmap_read(pca->regmap, PCA9685_PRESCALE, &val); + if (prescale != val) { + /* + * Putting the chip briefly into SLEEP mode + * at this point won't interfere with the + * pm_runtime framework, because the pm_runtime + * state is guaranteed active here. + */ + /* Put chip into sleep mode */ + pca9685_set_sleep_mode(pca, true); - /* Clear the full ON bit, otherwise the set OFF time has no effect */ - if (pwm->hwpwm >= PCA9685_MAXCHAN) - reg = PCA9685_ALL_LED_ON_H; - else - reg = LED_N_ON_H(pwm->hwpwm); + /* Change the chip-wide output frequency */ + regmap_write(pca->regmap, PCA9685_PRESCALE, prescale); - regmap_write(pca->regmap, reg, 0); + /* Wake the chip up */ + pca9685_set_sleep_mode(pca, false); + } + duty = PCA9685_COUNTER_RANGE * state->duty_cycle; + duty = DIV_ROUND_UP_ULL(duty, state->period); + pca9685_pwm_set_duty(pca, pwm->hwpwm, duty); return 0; } -static int pca9685_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) +static void pca9685_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_state *state) { struct pca9685 *pca = to_pca(chip); - unsigned int reg; - - /* - * The PWM subsystem does not support a pre-delay. - * So, set the ON-timeout to 0 - */ - if (pwm->hwpwm >= PCA9685_MAXCHAN) - reg = PCA9685_ALL_LED_ON_L; - else - reg = LED_N_ON_L(pwm->hwpwm); - - regmap_write(pca->regmap, reg, 0); - - if (pwm->hwpwm >= PCA9685_MAXCHAN) - reg = PCA9685_ALL_LED_ON_H; - else - reg = LED_N_ON_H(pwm->hwpwm); - - regmap_write(pca->regmap, reg, 0); + unsigned long long duty; + unsigned int val = 0; + /* Calculate (chip-wide) period from prescale value */ + regmap_read(pca->regmap, PCA9685_PRESCALE, &val); /* - * Clear the full-off bit. - * It has precedence over the others and must be off. + * PCA9685_OSC_CLOCK_MHZ is 25, i.e. an integer divider of 1000. + * The following calculation is therefore only a multiplication + * and we are not losing precision. */ - if (pwm->hwpwm >= PCA9685_MAXCHAN) - reg = PCA9685_ALL_LED_OFF_H; - else - reg = LED_N_OFF_H(pwm->hwpwm); - - regmap_update_bits(pca->regmap, reg, LED_FULL, 0x0); + state->period = (PCA9685_COUNTER_RANGE * 1000 / PCA9685_OSC_CLOCK_MHZ) * + (val + 1); - return 0; -} + /* The (per-channel) polarity is fixed */ + state->polarity = PWM_POLARITY_NORMAL; -static void pca9685_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) -{ - struct pca9685 *pca = to_pca(chip); - unsigned int reg; - - if (pwm->hwpwm >= PCA9685_MAXCHAN) - reg = PCA9685_ALL_LED_OFF_H; - else - reg = LED_N_OFF_H(pwm->hwpwm); - - regmap_write(pca->regmap, reg, LED_FULL); - - /* Clear the LED_OFF counter. */ - if (pwm->hwpwm >= PCA9685_MAXCHAN) - reg = PCA9685_ALL_LED_OFF_L; - else - reg = LED_N_OFF_L(pwm->hwpwm); + if (pwm->hwpwm >= PCA9685_MAXCHAN) { + /* + * The "all LEDs" channel does not support HW readout + * Return 0 and disabled for backwards compatibility + */ + state->duty_cycle = 0; + state->enabled = false; + return; + } - regmap_write(pca->regmap, reg, 0x0); + state->enabled = true; + duty = pca9685_pwm_get_duty(pca, pwm->hwpwm); + state->duty_cycle = DIV_ROUND_DOWN_ULL(duty * state->period, PCA9685_COUNTER_RANGE); } static int pca9685_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm) @@ -422,15 +381,14 @@ static void pca9685_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm) { struct pca9685 *pca = to_pca(chip); - pca9685_pwm_disable(chip, pwm); + pca9685_pwm_set_duty(pca, pwm->hwpwm, 0); pm_runtime_put(chip->dev); pca9685_pwm_clear_inuse(pca, pwm->hwpwm); } static const struct pwm_ops pca9685_pwm_ops = { - .enable = pca9685_pwm_enable, - .disable = pca9685_pwm_disable, - .config = pca9685_pwm_config, + .apply = pca9685_pwm_apply, + .get_state = pca9685_pwm_get_state, .request = pca9685_pwm_request, .free = pca9685_pwm_free, .owner = THIS_MODULE, @@ -461,7 +419,6 @@ static int pca9685_pwm_probe(struct i2c_client *client, ret); return ret; } - pca->period_ns = PCA9685_DEFAULT_PERIOD; i2c_set_clientdata(client, pca); @@ -484,16 +441,15 @@ static int pca9685_pwm_probe(struct i2c_client *client, reg &= ~(MODE1_ALLCALL | MODE1_SUB1 | MODE1_SUB2 | MODE1_SUB3); regmap_write(pca->regmap, PCA9685_MODE1, reg); - /* Clear all "full off" bits */ - regmap_write(pca->regmap, PCA9685_ALL_LED_OFF_L, 0); - regmap_write(pca->regmap, PCA9685_ALL_LED_OFF_H, 0); + /* Reset OFF registers to POR default */ + regmap_write(pca->regmap, PCA9685_ALL_LED_OFF_L, LED_FULL); + regmap_write(pca->regmap, PCA9685_ALL_LED_OFF_H, LED_FULL); pca->chip.ops = &pca9685_pwm_ops; /* Add an extra channel for ALL_LED */ pca->chip.npwm = PCA9685_MAXCHAN + 1; pca->chip.dev = &client->dev; - pca->chip.base = -1; ret = pwmchip_add(&pca->chip); if (ret < 0) @@ -505,14 +461,20 @@ static int pca9685_pwm_probe(struct i2c_client *client, return ret; } - /* The chip comes out of power-up in the active state */ - pm_runtime_set_active(&client->dev); - /* - * Enable will put the chip into suspend, which is what we - * want as all outputs are disabled at this point - */ pm_runtime_enable(&client->dev); + if (pm_runtime_enabled(&client->dev)) { + /* + * Although the chip comes out of power-up in the sleep state, + * we force it to sleep in case it was woken up before + */ + pca9685_set_sleep_mode(pca, true); + pm_runtime_set_suspended(&client->dev); + } else { + /* Wake the chip up if runtime PM is disabled */ + pca9685_set_sleep_mode(pca, false); + } + return 0; } @@ -524,7 +486,14 @@ static int pca9685_pwm_remove(struct i2c_client *client) ret = pwmchip_remove(&pca->chip); if (ret) return ret; + + if (!pm_runtime_enabled(&client->dev)) { + /* Put chip in sleep state if runtime PM is disabled */ + pca9685_set_sleep_mode(pca, true); + } + pm_runtime_disable(&client->dev); + return 0; } diff --git a/drivers/pwm/pwm-pxa.c b/drivers/pwm/pwm-pxa.c index d06cf60e6575..cfb683827d32 100644 --- a/drivers/pwm/pwm-pxa.c +++ b/drivers/pwm/pwm-pxa.c @@ -184,7 +184,6 @@ static int pwm_probe(struct platform_device *pdev) pwm->chip.dev = &pdev->dev; pwm->chip.ops = &pxa_pwm_ops; - pwm->chip.base = -1; pwm->chip.npwm = (id->driver_data & HAS_SECONDARY_PWM) ? 2 : 1; if (IS_ENABLED(CONFIG_OF)) { diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c index 002ab79a7ec2..9daca0c772c7 100644 --- a/drivers/pwm/pwm-rcar.c +++ b/drivers/pwm/pwm-rcar.c @@ -224,7 +224,6 @@ static int rcar_pwm_probe(struct platform_device *pdev) rcar_pwm->chip.dev = &pdev->dev; rcar_pwm->chip.ops = &rcar_pwm_ops; - rcar_pwm->chip.base = -1; rcar_pwm->chip.npwm = 1; pm_runtime_enable(&pdev->dev); diff --git a/drivers/pwm/pwm-renesas-tpu.c b/drivers/pwm/pwm-renesas-tpu.c index d02b24b77cdf..e2959fae0969 100644 --- a/drivers/pwm/pwm-renesas-tpu.c +++ b/drivers/pwm/pwm-renesas-tpu.c @@ -410,7 +410,6 @@ static int tpu_probe(struct platform_device *pdev) tpu->chip.ops = &tpu_pwm_ops; tpu->chip.of_xlate = of_pwm_xlate_with_flags; tpu->chip.of_pwm_n_cells = 3; - tpu->chip.base = -1; tpu->chip.npwm = TPU_CHANNEL_MAX; pm_runtime_enable(&pdev->dev); diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c index 6ad7d0a50aed..301785fa293e 100644 --- a/drivers/pwm/pwm-rockchip.c +++ b/drivers/pwm/pwm-rockchip.c @@ -352,7 +352,6 @@ static int rockchip_pwm_probe(struct platform_device *pdev) pc->data = id->data; pc->chip.dev = &pdev->dev; pc->chip.ops = &rockchip_pwm_ops; - pc->chip.base = -1; pc->chip.npwm = 1; if (pc->data->supports_polarity) { diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c index 645d0066ff0a..515489fa4f6d 100644 --- a/drivers/pwm/pwm-samsung.c +++ b/drivers/pwm/pwm-samsung.c @@ -519,7 +519,6 @@ static int pwm_samsung_probe(struct platform_device *pdev) chip->chip.dev = &pdev->dev; chip->chip.ops = &pwm_samsung_ops; - chip->chip.base = -1; chip->chip.npwm = SAMSUNG_PWM_NUM; chip->inverter_mask = BIT(SAMSUNG_PWM_NUM) - 1; diff --git a/drivers/pwm/pwm-sifive.c b/drivers/pwm/pwm-sifive.c index 2a7cd2deaeea..688737f091ac 100644 --- a/drivers/pwm/pwm-sifive.c +++ b/drivers/pwm/pwm-sifive.c @@ -244,7 +244,6 @@ static int pwm_sifive_probe(struct platform_device *pdev) chip->ops = &pwm_sifive_ops; chip->of_xlate = of_pwm_xlate_with_flags; chip->of_pwm_n_cells = 3; - chip->base = -1; chip->npwm = 4; ddata->regs = devm_platform_ioremap_resource(pdev, 0); diff --git a/drivers/pwm/pwm-sl28cpld.c b/drivers/pwm/pwm-sl28cpld.c index 0b01ec25e2f0..7a69c1a0c060 100644 --- a/drivers/pwm/pwm-sl28cpld.c +++ b/drivers/pwm/pwm-sl28cpld.c @@ -229,7 +229,6 @@ static int sl28cpld_pwm_probe(struct platform_device *pdev) chip = &priv->pwm_chip; chip->dev = &pdev->dev; chip->ops = &sl28cpld_pwm_ops; - chip->base = -1; chip->npwm = 1; platform_set_drvdata(pdev, priv); diff --git a/drivers/pwm/pwm-spear.c b/drivers/pwm/pwm-spear.c index f63b54aae1b4..1a1cedfd11ce 100644 --- a/drivers/pwm/pwm-spear.c +++ b/drivers/pwm/pwm-spear.c @@ -193,7 +193,6 @@ static int spear_pwm_probe(struct platform_device *pdev) pc->chip.dev = &pdev->dev; pc->chip.ops = &spear_pwm_ops; - pc->chip.base = -1; pc->chip.npwm = NUM_PWM; ret = clk_prepare(pc->clk); diff --git a/drivers/pwm/pwm-sprd.c b/drivers/pwm/pwm-sprd.c index 5123d948efd6..98c479dfae31 100644 --- a/drivers/pwm/pwm-sprd.c +++ b/drivers/pwm/pwm-sprd.c @@ -164,6 +164,9 @@ static int sprd_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, struct pwm_state *cstate = &pwm->state; int ret; + if (state->polarity != PWM_POLARITY_NORMAL) + return -EINVAL; + if (state->enabled) { if (!cstate->enabled) { /* @@ -268,7 +271,6 @@ static int sprd_pwm_probe(struct platform_device *pdev) spc->chip.dev = &pdev->dev; spc->chip.ops = &sprd_pwm_ops; - spc->chip.base = -1; spc->chip.npwm = spc->num_pwms; ret = pwmchip_add(&spc->chip); diff --git a/drivers/pwm/pwm-sti.c b/drivers/pwm/pwm-sti.c index 99c70e07858d..f491d56254d7 100644 --- a/drivers/pwm/pwm-sti.c +++ b/drivers/pwm/pwm-sti.c @@ -619,7 +619,6 @@ static int sti_pwm_probe(struct platform_device *pdev) pc->chip.dev = dev; pc->chip.ops = &sti_pwm_ops; - pc->chip.base = -1; pc->chip.npwm = pc->cdata->pwm_num_devs; ret = pwmchip_add(&pc->chip); @@ -650,15 +649,13 @@ static int sti_pwm_probe(struct platform_device *pdev) static int sti_pwm_remove(struct platform_device *pdev) { struct sti_pwm_chip *pc = platform_get_drvdata(pdev); - unsigned int i; - for (i = 0; i < pc->cdata->pwm_num_devs; i++) - pwm_disable(&pc->chip.pwms[i]); + pwmchip_remove(&pc->chip); clk_unprepare(pc->pwm_clk); clk_unprepare(pc->cpt_clk); - return pwmchip_remove(&pc->chip); + return 0; } static const struct of_device_id sti_pwm_of_match[] = { diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c index 134c14621ee0..af08f564ef1d 100644 --- a/drivers/pwm/pwm-stm32-lp.c +++ b/drivers/pwm/pwm-stm32-lp.c @@ -205,7 +205,6 @@ static int stm32_pwm_lp_probe(struct platform_device *pdev) priv->regmap = ddata->regmap; priv->clk = ddata->clk; - priv->chip.base = -1; priv->chip.dev = &pdev->dev; priv->chip.ops = &stm32_pwm_lp_ops; priv->chip.npwm = 1; diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c index d3be944f2ae9..c46fb90036ab 100644 --- a/drivers/pwm/pwm-stm32.c +++ b/drivers/pwm/pwm-stm32.c @@ -633,7 +633,6 @@ static int stm32_pwm_probe(struct platform_device *pdev) stm32_pwm_detect_complementary(priv); - priv->chip.base = -1; priv->chip.dev = dev; priv->chip.ops = &stm32pwm_ops; priv->chip.npwm = stm32_pwm_detect_channels(priv); diff --git a/drivers/pwm/pwm-stmpe.c b/drivers/pwm/pwm-stmpe.c index be5f6d7359d4..9dc983a3cbf1 100644 --- a/drivers/pwm/pwm-stmpe.c +++ b/drivers/pwm/pwm-stmpe.c @@ -278,7 +278,6 @@ static int __init stmpe_pwm_probe(struct platform_device *pdev) pwm->stmpe = stmpe; pwm->chip.dev = &pdev->dev; - pwm->chip.base = -1; if (stmpe->partnum == STMPE2401 || stmpe->partnum == STMPE2403) { pwm->chip.ops = &stmpe_24xx_pwm_ops; diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c index ce5c4fc8da6f..e01becd102c0 100644 --- a/drivers/pwm/pwm-sun4i.c +++ b/drivers/pwm/pwm-sun4i.c @@ -459,7 +459,6 @@ static int sun4i_pwm_probe(struct platform_device *pdev) pwm->chip.dev = &pdev->dev; pwm->chip.ops = &sun4i_pwm_ops; - pwm->chip.base = -1; pwm->chip.npwm = pwm->data->npwm; pwm->chip.of_xlate = of_pwm_xlate_with_flags; pwm->chip.of_pwm_n_cells = 3; diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c index 55bc63d5a0ae..c529a170bcdd 100644 --- a/drivers/pwm/pwm-tegra.c +++ b/drivers/pwm/pwm-tegra.c @@ -285,7 +285,6 @@ static int tegra_pwm_probe(struct platform_device *pdev) pwm->chip.dev = &pdev->dev; pwm->chip.ops = &tegra_pwm_ops; - pwm->chip.base = -1; pwm->chip.npwm = pwm->soc->num_channels; ret = pwmchip_add(&pwm->chip); diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c index 2a8949014bb1..b9a17ab0c202 100644 --- a/drivers/pwm/pwm-tiecap.c +++ b/drivers/pwm/pwm-tiecap.c @@ -226,7 +226,6 @@ static int ecap_pwm_probe(struct platform_device *pdev) pc->chip.ops = &ecap_pwm_ops; pc->chip.of_xlate = of_pwm_xlate_with_flags; pc->chip.of_pwm_n_cells = 3; - pc->chip.base = -1; pc->chip.npwm = 1; pc->mmio_base = devm_platform_ioremap_resource(pdev, 0); diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c index a7fb224d6535..90095a19bf2d 100644 --- a/drivers/pwm/pwm-tiehrpwm.c +++ b/drivers/pwm/pwm-tiehrpwm.c @@ -449,7 +449,6 @@ static int ehrpwm_pwm_probe(struct platform_device *pdev) pc->chip.ops = &ehrpwm_pwm_ops; pc->chip.of_xlate = of_pwm_xlate_with_flags; pc->chip.of_pwm_n_cells = 3; - pc->chip.base = -1; pc->chip.npwm = NUM_PWM_CHANNEL; pc->mmio_base = devm_platform_ioremap_resource(pdev, 0); diff --git a/drivers/pwm/pwm-twl-led.c b/drivers/pwm/pwm-twl-led.c index 630b9a578820..6c8df5f4e87d 100644 --- a/drivers/pwm/pwm-twl-led.c +++ b/drivers/pwm/pwm-twl-led.c @@ -291,7 +291,6 @@ static int twl_pwmled_probe(struct platform_device *pdev) } twl->chip.dev = &pdev->dev; - twl->chip.base = -1; mutex_init(&twl->mutex); diff --git a/drivers/pwm/pwm-twl.c b/drivers/pwm/pwm-twl.c index aee67974f353..e83a826bf621 100644 --- a/drivers/pwm/pwm-twl.c +++ b/drivers/pwm/pwm-twl.c @@ -310,7 +310,6 @@ static int twl_pwm_probe(struct platform_device *pdev) twl->chip.ops = &twl6030_pwm_ops; twl->chip.dev = &pdev->dev; - twl->chip.base = -1; twl->chip.npwm = 2; mutex_init(&twl->mutex); diff --git a/drivers/pwm/pwm-visconti.c b/drivers/pwm/pwm-visconti.c new file mode 100644 index 000000000000..46d903786366 --- /dev/null +++ b/drivers/pwm/pwm-visconti.c @@ -0,0 +1,190 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Toshiba Visconti pulse-width-modulation controller driver + * + * Copyright (c) 2020 - 2021 TOSHIBA CORPORATION + * Copyright (c) 2020 - 2021 Toshiba Electronic Devices & Storage Corporation + * + * Authors: Nobuhiro Iwamatsu <[email protected]> + * + * Limitations: + * - The fixed input clock is running at 1 MHz and is divided by either 1, + * 2, 4 or 8. + * - When the settings of the PWM are modified, the new values are shadowed + * in hardware until the PIPGM_PCSR register is written and the currently + * running period is completed. This way the hardware switches atomically + * from the old setting to the new. + * - Disabling the hardware completes the currently running period and keeps + * the output at low level at all times. + */ + +#include <linux/err.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pwm.h> + +#define PIPGM_PCSR(ch) (0x400 + 4 * (ch)) +#define PIPGM_PDUT(ch) (0x420 + 4 * (ch)) +#define PIPGM_PWMC(ch) (0x440 + 4 * (ch)) + +#define PIPGM_PWMC_PWMACT BIT(5) +#define PIPGM_PWMC_CLK_MASK GENMASK(1, 0) +#define PIPGM_PWMC_POLARITY_MASK GENMASK(5, 5) + +struct visconti_pwm_chip { + struct pwm_chip chip; + void __iomem *base; +}; + +static inline struct visconti_pwm_chip *visconti_pwm_from_chip(struct pwm_chip *chip) +{ + return container_of(chip, struct visconti_pwm_chip, chip); +} + +static int visconti_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_state *state) +{ + struct visconti_pwm_chip *priv = visconti_pwm_from_chip(chip); + u32 period, duty_cycle, pwmc0; + + if (!state->enabled) { + writel(0, priv->base + PIPGM_PCSR(pwm->hwpwm)); + return 0; + } + + /* + * The biggest period the hardware can provide is + * (0xffff << 3) * 1000 ns + * This value fits easily in an u32, so simplify the maths by + * capping the values to 32 bit integers. + */ + if (state->period > (0xffff << 3) * 1000) + period = (0xffff << 3) * 1000; + else + period = state->period; + + if (state->duty_cycle > period) + duty_cycle = period; + else + duty_cycle = state->duty_cycle; + + /* + * The input clock runs fixed at 1 MHz, so we have only + * microsecond resolution and so can divide by + * NSEC_PER_SEC / CLKFREQ = 1000 without losing precision. + */ + period /= 1000; + duty_cycle /= 1000; + + if (!period) + return -ERANGE; + + /* + * PWMC controls a divider that divides the input clk by a + * power of two between 1 and 8. As a smaller divider yields + * higher precision, pick the smallest possible one. + */ + if (period > 0xffff) { + pwmc0 = ilog2(period >> 16); + if (WARN_ON(pwmc0 > 3)) + return -EINVAL; + } else { + pwmc0 = 0; + } + + period >>= pwmc0; + duty_cycle >>= pwmc0; + + if (state->polarity == PWM_POLARITY_INVERSED) + pwmc0 |= PIPGM_PWMC_PWMACT; + writel(pwmc0, priv->base + PIPGM_PWMC(pwm->hwpwm)); + writel(duty_cycle, priv->base + PIPGM_PDUT(pwm->hwpwm)); + writel(period, priv->base + PIPGM_PCSR(pwm->hwpwm)); + + return 0; +} + +static void visconti_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_state *state) +{ + struct visconti_pwm_chip *priv = visconti_pwm_from_chip(chip); + u32 period, duty, pwmc0, pwmc0_clk; + + period = readl(priv->base + PIPGM_PCSR(pwm->hwpwm)); + duty = readl(priv->base + PIPGM_PDUT(pwm->hwpwm)); + pwmc0 = readl(priv->base + PIPGM_PWMC(pwm->hwpwm)); + pwmc0_clk = pwmc0 & PIPGM_PWMC_CLK_MASK; + + state->period = (period << pwmc0_clk) * NSEC_PER_USEC; + state->duty_cycle = (duty << pwmc0_clk) * NSEC_PER_USEC; + if (pwmc0 & PIPGM_PWMC_POLARITY_MASK) + state->polarity = PWM_POLARITY_INVERSED; + else + state->polarity = PWM_POLARITY_NORMAL; + + state->enabled = true; +} + +static const struct pwm_ops visconti_pwm_ops = { + .apply = visconti_pwm_apply, + .get_state = visconti_pwm_get_state, + .owner = THIS_MODULE, +}; + +static int visconti_pwm_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct visconti_pwm_chip *priv; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + platform_set_drvdata(pdev, priv); + + priv->chip.dev = dev; + priv->chip.ops = &visconti_pwm_ops; + priv->chip.npwm = 4; + + ret = pwmchip_add(&priv->chip); + if (ret < 0) + return dev_err_probe(&pdev->dev, ret, "Cannot register visconti PWM\n"); + + return 0; +} + +static int visconti_pwm_remove(struct platform_device *pdev) +{ + struct visconti_pwm_chip *priv = platform_get_drvdata(pdev); + + pwmchip_remove(&priv->chip); + + return 0; +} + +static const struct of_device_id visconti_pwm_of_match[] = { + { .compatible = "toshiba,visconti-pwm", }, + { } +}; +MODULE_DEVICE_TABLE(of, visconti_pwm_of_match); + +static struct platform_driver visconti_pwm_driver = { + .driver = { + .name = "pwm-visconti", + .of_match_table = visconti_pwm_of_match, + }, + .probe = visconti_pwm_probe, + .remove = visconti_pwm_remove, +}; +module_platform_driver(visconti_pwm_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Nobuhiro Iwamatsu <[email protected]>"); +MODULE_ALIAS("platform:pwm-visconti"); diff --git a/drivers/pwm/pwm-vt8500.c b/drivers/pwm/pwm-vt8500.c index 6e36851a22bb..52fe5d19473a 100644 --- a/drivers/pwm/pwm-vt8500.c +++ b/drivers/pwm/pwm-vt8500.c @@ -209,7 +209,6 @@ static int vt8500_pwm_probe(struct platform_device *pdev) chip->chip.ops = &vt8500_pwm_ops; chip->chip.of_xlate = of_pwm_xlate_with_flags; chip->chip.of_pwm_n_cells = 3; - chip->chip.base = -1; chip->chip.npwm = VT8500_NR_PWMS; chip->clk = devm_clk_get(&pdev->dev, NULL); diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig index 15d1574d129b..e68fcedc999c 100644 --- a/drivers/remoteproc/Kconfig +++ b/drivers/remoteproc/Kconfig @@ -24,11 +24,12 @@ config REMOTEPROC_CDEV It's safe to say N if you don't want to use this interface. config IMX_REMOTEPROC - tristate "IMX6/7 remoteproc support" + tristate "i.MX remoteproc support" depends on ARCH_MXC + select MAILBOX help - Say y here to support iMX's remote processors (Cortex M4 - on iMX7D) via the remote processor framework. + Say y here to support iMX's remote processors via the remote + processor framework. It's safe to say N here. diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c index 8957ed271d20..d6338872c6db 100644 --- a/drivers/remoteproc/imx_rproc.c +++ b/drivers/remoteproc/imx_rproc.c @@ -7,13 +7,18 @@ #include <linux/err.h> #include <linux/interrupt.h> #include <linux/kernel.h> +#include <linux/mailbox_client.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of_address.h> +#include <linux/of_reserved_mem.h> #include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/remoteproc.h> +#include <linux/workqueue.h> + +#include "remoteproc_internal.h" #define IMX7D_SRC_SCR 0x0C #define IMX7D_ENABLE_M4 BIT(3) @@ -43,7 +48,7 @@ | IMX6SX_SW_M4C_NON_SCLR_RST \ | IMX6SX_SW_M4C_RST) -#define IMX7D_RPROC_MEM_MAX 8 +#define IMX_RPROC_MEM_MAX 32 /** * struct imx_rproc_mem - slim internal memory structure @@ -83,8 +88,42 @@ struct imx_rproc { struct regmap *regmap; struct rproc *rproc; const struct imx_rproc_dcfg *dcfg; - struct imx_rproc_mem mem[IMX7D_RPROC_MEM_MAX]; + struct imx_rproc_mem mem[IMX_RPROC_MEM_MAX]; struct clk *clk; + struct mbox_client cl; + struct mbox_chan *tx_ch; + struct mbox_chan *rx_ch; + struct work_struct rproc_work; + struct workqueue_struct *workqueue; + void __iomem *rsc_table; +}; + +static const struct imx_rproc_att imx_rproc_att_imx8mq[] = { + /* dev addr , sys addr , size , flags */ + /* TCML - alias */ + { 0x00000000, 0x007e0000, 0x00020000, 0 }, + /* OCRAM_S */ + { 0x00180000, 0x00180000, 0x00008000, 0 }, + /* OCRAM */ + { 0x00900000, 0x00900000, 0x00020000, 0 }, + /* OCRAM */ + { 0x00920000, 0x00920000, 0x00020000, 0 }, + /* QSPI Code - alias */ + { 0x08000000, 0x08000000, 0x08000000, 0 }, + /* DDR (Code) - alias */ + { 0x10000000, 0x80000000, 0x0FFE0000, 0 }, + /* TCML */ + { 0x1FFE0000, 0x007E0000, 0x00020000, ATT_OWN }, + /* TCMU */ + { 0x20000000, 0x00800000, 0x00020000, ATT_OWN }, + /* OCRAM_S */ + { 0x20180000, 0x00180000, 0x00008000, ATT_OWN }, + /* OCRAM */ + { 0x20200000, 0x00900000, 0x00020000, ATT_OWN }, + /* OCRAM */ + { 0x20220000, 0x00920000, 0x00020000, ATT_OWN }, + /* DDR (Data) */ + { 0x40000000, 0x40000000, 0x80000000, 0 }, }; static const struct imx_rproc_att imx_rproc_att_imx7d[] = { @@ -137,6 +176,15 @@ static const struct imx_rproc_att imx_rproc_att_imx6sx[] = { { 0x80000000, 0x80000000, 0x60000000, 0 }, }; +static const struct imx_rproc_dcfg imx_rproc_cfg_imx8mq = { + .src_reg = IMX7D_SRC_SCR, + .src_mask = IMX7D_M4_RST_MASK, + .src_start = IMX7D_M4_START, + .src_stop = IMX7D_M4_STOP, + .att = imx_rproc_att_imx8mq, + .att_size = ARRAY_SIZE(imx_rproc_att_imx8mq), +}; + static const struct imx_rproc_dcfg imx_rproc_cfg_imx7d = { .src_reg = IMX7D_SRC_SCR, .src_mask = IMX7D_M4_RST_MASK, @@ -208,7 +256,7 @@ static int imx_rproc_da_to_sys(struct imx_rproc *priv, u64 da, return -ENOENT; } -static void *imx_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len) +static void *imx_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem) { struct imx_rproc *priv = rproc->priv; void *va = NULL; @@ -225,7 +273,7 @@ static void *imx_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len) if (imx_rproc_da_to_sys(priv, da, len, &sys)) return NULL; - for (i = 0; i < IMX7D_RPROC_MEM_MAX; i++) { + for (i = 0; i < IMX_RPROC_MEM_MAX; i++) { if (sys >= priv->mem[i].sys_addr && sys + len < priv->mem[i].sys_addr + priv->mem[i].size) { unsigned int offset = sys - priv->mem[i].sys_addr; @@ -241,10 +289,143 @@ static void *imx_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len) return va; } +static int imx_rproc_mem_alloc(struct rproc *rproc, + struct rproc_mem_entry *mem) +{ + struct device *dev = rproc->dev.parent; + void *va; + + dev_dbg(dev, "map memory: %p+%zx\n", &mem->dma, mem->len); + va = ioremap_wc(mem->dma, mem->len); + if (IS_ERR_OR_NULL(va)) { + dev_err(dev, "Unable to map memory region: %p+%zx\n", + &mem->dma, mem->len); + return -ENOMEM; + } + + /* Update memory entry va */ + mem->va = va; + + return 0; +} + +static int imx_rproc_mem_release(struct rproc *rproc, + struct rproc_mem_entry *mem) +{ + dev_dbg(rproc->dev.parent, "unmap memory: %pa\n", &mem->dma); + iounmap(mem->va); + + return 0; +} + +static int imx_rproc_prepare(struct rproc *rproc) +{ + struct imx_rproc *priv = rproc->priv; + struct device_node *np = priv->dev->of_node; + struct of_phandle_iterator it; + struct rproc_mem_entry *mem; + struct reserved_mem *rmem; + u32 da; + + /* Register associated reserved memory regions */ + of_phandle_iterator_init(&it, np, "memory-region", NULL, 0); + while (of_phandle_iterator_next(&it) == 0) { + /* + * Ignore the first memory region which will be used vdev buffer. + * No need to do extra handlings, rproc_add_virtio_dev will handle it. + */ + if (!strcmp(it.node->name, "vdev0buffer")) + continue; + + rmem = of_reserved_mem_lookup(it.node); + if (!rmem) { + dev_err(priv->dev, "unable to acquire memory-region\n"); + return -EINVAL; + } + + /* No need to translate pa to da, i.MX use same map */ + da = rmem->base; + + /* Register memory region */ + mem = rproc_mem_entry_init(priv->dev, NULL, (dma_addr_t)rmem->base, rmem->size, da, + imx_rproc_mem_alloc, imx_rproc_mem_release, + it.node->name); + + if (mem) + rproc_coredump_add_segment(rproc, da, rmem->size); + else + return -ENOMEM; + + rproc_add_carveout(rproc, mem); + } + + return 0; +} + +static int imx_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw) +{ + int ret; + + ret = rproc_elf_load_rsc_table(rproc, fw); + if (ret) + dev_info(&rproc->dev, "No resource table in elf\n"); + + return 0; +} + +static void imx_rproc_kick(struct rproc *rproc, int vqid) +{ + struct imx_rproc *priv = rproc->priv; + int err; + __u32 mmsg; + + if (!priv->tx_ch) { + dev_err(priv->dev, "No initialized mbox tx channel\n"); + return; + } + + /* + * Send the index of the triggered virtqueue as the mu payload. + * Let remote processor know which virtqueue is used. + */ + mmsg = vqid << 16; + + err = mbox_send_message(priv->tx_ch, (void *)&mmsg); + if (err < 0) + dev_err(priv->dev, "%s: failed (%d, err:%d)\n", + __func__, vqid, err); +} + +static int imx_rproc_attach(struct rproc *rproc) +{ + return 0; +} + +static struct resource_table *imx_rproc_get_loaded_rsc_table(struct rproc *rproc, size_t *table_sz) +{ + struct imx_rproc *priv = rproc->priv; + + /* The resource table has already been mapped in imx_rproc_addr_init */ + if (!priv->rsc_table) + return NULL; + + *table_sz = SZ_1K; + return (struct resource_table *)priv->rsc_table; +} + static const struct rproc_ops imx_rproc_ops = { + .prepare = imx_rproc_prepare, + .attach = imx_rproc_attach, .start = imx_rproc_start, .stop = imx_rproc_stop, + .kick = imx_rproc_kick, .da_to_va = imx_rproc_da_to_va, + .load = rproc_elf_load_segments, + .parse_fw = imx_rproc_parse_fw, + .find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table, + .get_loaded_rsc_table = imx_rproc_get_loaded_rsc_table, + .sanity_check = rproc_elf_sanity_check, + .get_boot_addr = rproc_elf_get_boot_addr, }; static int imx_rproc_addr_init(struct imx_rproc *priv, @@ -262,13 +443,13 @@ static int imx_rproc_addr_init(struct imx_rproc *priv, if (!(att->flags & ATT_OWN)) continue; - if (b >= IMX7D_RPROC_MEM_MAX) + if (b >= IMX_RPROC_MEM_MAX) break; priv->mem[b].cpu_addr = devm_ioremap(&pdev->dev, att->sa, att->size); if (!priv->mem[b].cpu_addr) { - dev_err(dev, "devm_ioremap_resource failed\n"); + dev_err(dev, "failed to remap %#x bytes from %#x\n", att->size, att->sa); return -ENOMEM; } priv->mem[b].sys_addr = att->sa; @@ -287,29 +468,115 @@ static int imx_rproc_addr_init(struct imx_rproc *priv, struct resource res; node = of_parse_phandle(np, "memory-region", a); + /* Not map vdev region */ + if (!strcmp(node->name, "vdev")) + continue; err = of_address_to_resource(node, 0, &res); if (err) { dev_err(dev, "unable to resolve memory region\n"); return err; } - if (b >= IMX7D_RPROC_MEM_MAX) + of_node_put(node); + + if (b >= IMX_RPROC_MEM_MAX) break; - priv->mem[b].cpu_addr = devm_ioremap_resource(&pdev->dev, &res); - if (IS_ERR(priv->mem[b].cpu_addr)) { - dev_err(dev, "devm_ioremap_resource failed\n"); - err = PTR_ERR(priv->mem[b].cpu_addr); - return err; + /* Not use resource version, because we might share region */ + priv->mem[b].cpu_addr = devm_ioremap(&pdev->dev, res.start, resource_size(&res)); + if (!priv->mem[b].cpu_addr) { + dev_err(dev, "failed to remap %pr\n", &res); + return -ENOMEM; } priv->mem[b].sys_addr = res.start; priv->mem[b].size = resource_size(&res); + if (!strcmp(node->name, "rsc_table")) + priv->rsc_table = priv->mem[b].cpu_addr; b++; } return 0; } +static void imx_rproc_vq_work(struct work_struct *work) +{ + struct imx_rproc *priv = container_of(work, struct imx_rproc, + rproc_work); + + rproc_vq_interrupt(priv->rproc, 0); + rproc_vq_interrupt(priv->rproc, 1); +} + +static void imx_rproc_rx_callback(struct mbox_client *cl, void *msg) +{ + struct rproc *rproc = dev_get_drvdata(cl->dev); + struct imx_rproc *priv = rproc->priv; + + queue_work(priv->workqueue, &priv->rproc_work); +} + +static int imx_rproc_xtr_mbox_init(struct rproc *rproc) +{ + struct imx_rproc *priv = rproc->priv; + struct device *dev = priv->dev; + struct mbox_client *cl; + int ret; + + if (!of_get_property(dev->of_node, "mbox-names", NULL)) + return 0; + + cl = &priv->cl; + cl->dev = dev; + cl->tx_block = true; + cl->tx_tout = 100; + cl->knows_txdone = false; + cl->rx_callback = imx_rproc_rx_callback; + + priv->tx_ch = mbox_request_channel_byname(cl, "tx"); + if (IS_ERR(priv->tx_ch)) { + ret = PTR_ERR(priv->tx_ch); + return dev_err_probe(cl->dev, ret, + "failed to request tx mailbox channel: %d\n", ret); + } + + priv->rx_ch = mbox_request_channel_byname(cl, "rx"); + if (IS_ERR(priv->rx_ch)) { + mbox_free_channel(priv->tx_ch); + ret = PTR_ERR(priv->rx_ch); + return dev_err_probe(cl->dev, ret, + "failed to request rx mailbox channel: %d\n", ret); + } + + return 0; +} + +static void imx_rproc_free_mbox(struct rproc *rproc) +{ + struct imx_rproc *priv = rproc->priv; + + mbox_free_channel(priv->tx_ch); + mbox_free_channel(priv->rx_ch); +} + +static int imx_rproc_detect_mode(struct imx_rproc *priv) +{ + const struct imx_rproc_dcfg *dcfg = priv->dcfg; + struct device *dev = priv->dev; + int ret; + u32 val; + + ret = regmap_read(priv->regmap, dcfg->src_reg, &val); + if (ret) { + dev_err(dev, "Failed to read src\n"); + return ret; + } + + if (!(val & dcfg->src_stop)) + priv->rproc->state = RPROC_DETACHED; + + return 0; +} + static int imx_rproc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -347,18 +614,32 @@ static int imx_rproc_probe(struct platform_device *pdev) priv->dev = dev; dev_set_drvdata(dev, rproc); + priv->workqueue = create_workqueue(dev_name(dev)); + if (!priv->workqueue) { + dev_err(dev, "cannot create workqueue\n"); + ret = -ENOMEM; + goto err_put_rproc; + } + + ret = imx_rproc_xtr_mbox_init(rproc); + if (ret) + goto err_put_wkq; ret = imx_rproc_addr_init(priv, pdev); if (ret) { dev_err(dev, "failed on imx_rproc_addr_init\n"); - goto err_put_rproc; + goto err_put_mbox; } + ret = imx_rproc_detect_mode(priv); + if (ret) + goto err_put_mbox; + priv->clk = devm_clk_get(dev, NULL); if (IS_ERR(priv->clk)) { dev_err(dev, "Failed to get clock\n"); ret = PTR_ERR(priv->clk); - goto err_put_rproc; + goto err_put_mbox; } /* @@ -368,9 +649,11 @@ static int imx_rproc_probe(struct platform_device *pdev) ret = clk_prepare_enable(priv->clk); if (ret) { dev_err(&rproc->dev, "Failed to enable clock\n"); - goto err_put_rproc; + goto err_put_mbox; } + INIT_WORK(&priv->rproc_work, imx_rproc_vq_work); + ret = rproc_add(rproc); if (ret) { dev_err(dev, "rproc_add failed\n"); @@ -381,6 +664,10 @@ static int imx_rproc_probe(struct platform_device *pdev) err_put_clk: clk_disable_unprepare(priv->clk); +err_put_mbox: + imx_rproc_free_mbox(rproc); +err_put_wkq: + destroy_workqueue(priv->workqueue); err_put_rproc: rproc_free(rproc); @@ -394,6 +681,7 @@ static int imx_rproc_remove(struct platform_device *pdev) clk_disable_unprepare(priv->clk); rproc_del(rproc); + imx_rproc_free_mbox(rproc); rproc_free(rproc); return 0; @@ -402,6 +690,8 @@ static int imx_rproc_remove(struct platform_device *pdev) static const struct of_device_id imx_rproc_of_match[] = { { .compatible = "fsl,imx7d-cm4", .data = &imx_rproc_cfg_imx7d }, { .compatible = "fsl,imx6sx-cm4", .data = &imx_rproc_cfg_imx6sx }, + { .compatible = "fsl,imx8mq-cm4", .data = &imx_rproc_cfg_imx8mq }, + { .compatible = "fsl,imx8mm-cm4", .data = &imx_rproc_cfg_imx8mq }, {}, }; MODULE_DEVICE_TABLE(of, imx_rproc_of_match); @@ -418,5 +708,5 @@ static struct platform_driver imx_rproc_driver = { module_platform_driver(imx_rproc_driver); MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("IMX6SX/7D remote processor control driver"); +MODULE_DESCRIPTION("i.MX remote processor control driver"); MODULE_AUTHOR("Oleksij Rempel <[email protected]>"); diff --git a/drivers/remoteproc/ingenic_rproc.c b/drivers/remoteproc/ingenic_rproc.c index e2618c36eaab..a356738160a4 100644 --- a/drivers/remoteproc/ingenic_rproc.c +++ b/drivers/remoteproc/ingenic_rproc.c @@ -121,7 +121,7 @@ static void ingenic_rproc_kick(struct rproc *rproc, int vqid) writel(vqid, vpu->aux_base + REG_CORE_MSG); } -static void *ingenic_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len) +static void *ingenic_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem) { struct vpu *vpu = rproc->priv; void __iomem *va = NULL; diff --git a/drivers/remoteproc/keystone_remoteproc.c b/drivers/remoteproc/keystone_remoteproc.c index cd266163a65f..54781f553f4e 100644 --- a/drivers/remoteproc/keystone_remoteproc.c +++ b/drivers/remoteproc/keystone_remoteproc.c @@ -246,7 +246,7 @@ static void keystone_rproc_kick(struct rproc *rproc, int vqid) * can be used either by the remoteproc core for loading (when using kernel * remoteproc loader), or by any rpmsg bus drivers. */ -static void *keystone_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len) +static void *keystone_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem) { struct keystone_rproc *ksproc = rproc->priv; void __iomem *va = NULL; diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c index ce727598c41c..9679cc26895e 100644 --- a/drivers/remoteproc/mtk_scp.c +++ b/drivers/remoteproc/mtk_scp.c @@ -272,7 +272,7 @@ static int scp_elf_load_segments(struct rproc *rproc, const struct firmware *fw) } /* grab the kernel address for this device address */ - ptr = (void __iomem *)rproc_da_to_va(rproc, da, memsz); + ptr = (void __iomem *)rproc_da_to_va(rproc, da, memsz, NULL); if (!ptr) { dev_err(dev, "bad phdr da 0x%x mem 0x%x\n", da, memsz); ret = -EINVAL; @@ -509,7 +509,7 @@ static void *mt8192_scp_da_to_va(struct mtk_scp *scp, u64 da, size_t len) return NULL; } -static void *scp_da_to_va(struct rproc *rproc, u64 da, size_t len) +static void *scp_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem) { struct mtk_scp *scp = (struct mtk_scp *)rproc->priv; @@ -627,7 +627,7 @@ void *scp_mapping_dm_addr(struct mtk_scp *scp, u32 mem_addr) { void *ptr; - ptr = scp_da_to_va(scp->rproc, mem_addr, 0); + ptr = scp_da_to_va(scp->rproc, mem_addr, 0, NULL); if (!ptr) return ERR_PTR(-EINVAL); diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c index d94b7391bf9d..43531caa1959 100644 --- a/drivers/remoteproc/omap_remoteproc.c +++ b/drivers/remoteproc/omap_remoteproc.c @@ -728,7 +728,7 @@ out: * Return: translated virtual address in kernel memory space on success, * or NULL on failure. */ -static void *omap_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len) +static void *omap_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem) { struct omap_rproc *oproc = rproc->priv; int i; diff --git a/drivers/remoteproc/pru_rproc.c b/drivers/remoteproc/pru_rproc.c index dcb380e868df..e5778e476245 100644 --- a/drivers/remoteproc/pru_rproc.c +++ b/drivers/remoteproc/pru_rproc.c @@ -244,8 +244,8 @@ static int pru_rproc_debug_ss_get(void *data, u64 *val) return 0; } -DEFINE_SIMPLE_ATTRIBUTE(pru_rproc_debug_ss_fops, pru_rproc_debug_ss_get, - pru_rproc_debug_ss_set, "%llu\n"); +DEFINE_DEBUGFS_ATTRIBUTE(pru_rproc_debug_ss_fops, pru_rproc_debug_ss_get, + pru_rproc_debug_ss_set, "%llu\n"); /* * Create PRU-specific debugfs entries @@ -266,12 +266,17 @@ static void pru_rproc_create_debug_entries(struct rproc *rproc) static void pru_dispose_irq_mapping(struct pru_rproc *pru) { - while (pru->evt_count--) { + if (!pru->mapped_irq) + return; + + while (pru->evt_count) { + pru->evt_count--; if (pru->mapped_irq[pru->evt_count] > 0) irq_dispose_mapping(pru->mapped_irq[pru->evt_count]); } kfree(pru->mapped_irq); + pru->mapped_irq = NULL; } /* @@ -284,7 +289,7 @@ static int pru_handle_intrmap(struct rproc *rproc) struct pru_rproc *pru = rproc->priv; struct pru_irq_rsc *rsc = pru->pru_interrupt_map; struct irq_fwspec fwspec; - struct device_node *irq_parent; + struct device_node *parent, *irq_parent; int i, ret = 0; /* not having pru_interrupt_map is not an error */ @@ -307,16 +312,31 @@ static int pru_handle_intrmap(struct rproc *rproc) pru->evt_count = rsc->num_evts; pru->mapped_irq = kcalloc(pru->evt_count, sizeof(unsigned int), GFP_KERNEL); - if (!pru->mapped_irq) + if (!pru->mapped_irq) { + pru->evt_count = 0; return -ENOMEM; + } /* * parse and fill in system event to interrupt channel and - * channel-to-host mapping + * channel-to-host mapping. The interrupt controller to be used + * for these mappings for a given PRU remoteproc is always its + * corresponding sibling PRUSS INTC node. */ - irq_parent = of_irq_find_parent(pru->dev->of_node); + parent = of_get_parent(dev_of_node(pru->dev)); + if (!parent) { + kfree(pru->mapped_irq); + pru->mapped_irq = NULL; + pru->evt_count = 0; + return -ENODEV; + } + + irq_parent = of_get_child_by_name(parent, "interrupt-controller"); + of_node_put(parent); if (!irq_parent) { kfree(pru->mapped_irq); + pru->mapped_irq = NULL; + pru->evt_count = 0; return -ENODEV; } @@ -332,16 +352,20 @@ static int pru_handle_intrmap(struct rproc *rproc) pru->mapped_irq[i] = irq_create_fwspec_mapping(&fwspec); if (!pru->mapped_irq[i]) { - dev_err(dev, "failed to get virq\n"); - ret = pru->mapped_irq[i]; + dev_err(dev, "failed to get virq for fw mapping %d: event %d chnl %d host %d\n", + i, fwspec.param[0], fwspec.param[1], + fwspec.param[2]); + ret = -EINVAL; goto map_fail; } } + of_node_put(irq_parent); return ret; map_fail: pru_dispose_irq_mapping(pru); + of_node_put(irq_parent); return ret; } @@ -387,8 +411,7 @@ static int pru_rproc_stop(struct rproc *rproc) pru_control_write_reg(pru, PRU_CTRL_CTRL, val); /* dispose irq mapping - new firmware can provide new mapping */ - if (pru->mapped_irq) - pru_dispose_irq_mapping(pru); + pru_dispose_irq_mapping(pru); return 0; } @@ -483,7 +506,7 @@ static void *pru_i_da_to_va(struct pru_rproc *pru, u32 da, size_t len) * core for any PRU client drivers. The PRU Instruction RAM access is restricted * only to the PRU loader code. */ -static void *pru_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len) +static void *pru_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem) { struct pru_rproc *pru = rproc->priv; diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c index e02450225e4a..8b0d8bbacd2e 100644 --- a/drivers/remoteproc/qcom_q6v5_adsp.c +++ b/drivers/remoteproc/qcom_q6v5_adsp.c @@ -281,7 +281,7 @@ static int adsp_stop(struct rproc *rproc) return ret; } -static void *adsp_da_to_va(struct rproc *rproc, u64 da, size_t len) +static void *adsp_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem) { struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv; int offset; diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c index 66106ba25ba3..423b31dfa574 100644 --- a/drivers/remoteproc/qcom_q6v5_mss.c +++ b/drivers/remoteproc/qcom_q6v5_mss.c @@ -1210,6 +1210,14 @@ static int q6v5_mpss_load(struct q6v5 *qproc) goto release_firmware; } + if (phdr->p_filesz > phdr->p_memsz) { + dev_err(qproc->dev, + "refusing to load segment %d with p_filesz > p_memsz\n", + i); + ret = -EINVAL; + goto release_firmware; + } + ptr = memremap(qproc->mpss_phys + offset, phdr->p_memsz, MEMREMAP_WC); if (!ptr) { dev_err(qproc->dev, @@ -1241,6 +1249,16 @@ static int q6v5_mpss_load(struct q6v5 *qproc) goto release_firmware; } + if (seg_fw->size != phdr->p_filesz) { + dev_err(qproc->dev, + "failed to load segment %d from truncated file %s\n", + i, fw_name); + ret = -EINVAL; + release_firmware(seg_fw); + memunmap(ptr); + goto release_firmware; + } + release_firmware(seg_fw); } @@ -1661,8 +1679,10 @@ static int q6v5_probe(struct platform_device *pdev) mba_image = desc->hexagon_mba_image; ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name", 0, &mba_image); - if (ret < 0 && ret != -EINVAL) + if (ret < 0 && ret != -EINVAL) { + dev_err(&pdev->dev, "unable to read mba firmware-name\n"); return ret; + } rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops, mba_image, sizeof(*qproc)); @@ -1680,8 +1700,10 @@ static int q6v5_probe(struct platform_device *pdev) qproc->hexagon_mdt_image = "modem.mdt"; ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name", 1, &qproc->hexagon_mdt_image); - if (ret < 0 && ret != -EINVAL) + if (ret < 0 && ret != -EINVAL) { + dev_err(&pdev->dev, "unable to read mpss firmware-name\n"); goto free_rproc; + } platform_set_drvdata(pdev, qproc); diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c index e635454d6170..b921fc26cd04 100644 --- a/drivers/remoteproc/qcom_q6v5_pas.c +++ b/drivers/remoteproc/qcom_q6v5_pas.c @@ -242,7 +242,7 @@ static int adsp_stop(struct rproc *rproc) return ret; } -static void *adsp_da_to_va(struct rproc *rproc, u64 da, size_t len) +static void *adsp_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem) { struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv; int offset; @@ -785,6 +785,22 @@ static const struct adsp_data wcss_resource_init = { .ssctl_id = 0x12, }; +static const struct adsp_data sdx55_mpss_resource = { + .crash_reason_smem = 421, + .firmware_name = "modem.mdt", + .pas_id = 4, + .has_aggre2_clk = false, + .auto_boot = true, + .proxy_pd_names = (char*[]){ + "cx", + "mss", + NULL + }, + .ssr_name = "mpss", + .sysmon_name = "modem", + .ssctl_id = 0x22, +}; + static const struct of_device_id adsp_of_match[] = { { .compatible = "qcom,msm8974-adsp-pil", .data = &adsp_resource_init}, { .compatible = "qcom,msm8996-adsp-pil", .data = &adsp_resource_init}, @@ -797,6 +813,7 @@ static const struct of_device_id adsp_of_match[] = { { .compatible = "qcom,sc7180-mpss-pas", .data = &mpss_resource_init}, { .compatible = "qcom,sdm845-adsp-pas", .data = &adsp_resource_init}, { .compatible = "qcom,sdm845-cdsp-pas", .data = &cdsp_resource_init}, + { .compatible = "qcom,sdx55-mpss-pas", .data = &sdx55_mpss_resource}, { .compatible = "qcom,sm8150-adsp-pas", .data = &sm8150_adsp_resource}, { .compatible = "qcom,sm8150-cdsp-pas", .data = &sm8150_cdsp_resource}, { .compatible = "qcom,sm8150-mpss-pas", .data = &mpss_resource_init}, diff --git a/drivers/remoteproc/qcom_q6v5_wcss.c b/drivers/remoteproc/qcom_q6v5_wcss.c index 78ebe1168b33..20d50ec7eff1 100644 --- a/drivers/remoteproc/qcom_q6v5_wcss.c +++ b/drivers/remoteproc/qcom_q6v5_wcss.c @@ -4,13 +4,18 @@ * Copyright (C) 2014 Sony Mobile Communications AB * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. */ +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/io.h> #include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/mfd/syscon.h> #include <linux/module.h> +#include <linux/of_address.h> #include <linux/of_reserved_mem.h> #include <linux/platform_device.h> #include <linux/regmap.h> +#include <linux/regulator/consumer.h> #include <linux/reset.h> #include <linux/soc/qcom/mdt_loader.h> #include "qcom_common.h" @@ -24,6 +29,9 @@ #define Q6SS_GFMUX_CTL_REG 0x020 #define Q6SS_PWR_CTL_REG 0x030 #define Q6SS_MEM_PWR_CTL 0x0B0 +#define Q6SS_STRAP_ACC 0x110 +#define Q6SS_CGC_OVERRIDE 0x034 +#define Q6SS_BCR_REG 0x6000 /* AXI Halt Register Offsets */ #define AXI_HALTREQ_REG 0x0 @@ -37,14 +45,19 @@ #define Q6SS_CORE_ARES BIT(1) #define Q6SS_BUS_ARES_ENABLE BIT(2) +/* Q6SS_BRC_RESET */ +#define Q6SS_BRC_BLK_ARES BIT(0) + /* Q6SS_GFMUX_CTL */ #define Q6SS_CLK_ENABLE BIT(1) +#define Q6SS_SWITCH_CLK_SRC BIT(8) /* Q6SS_PWR_CTL */ #define Q6SS_L2DATA_STBY_N BIT(18) #define Q6SS_SLP_RET_N BIT(19) #define Q6SS_CLAMP_IO BIT(20) #define QDSS_BHS_ON BIT(21) +#define QDSS_Q6_MEMORIES GENMASK(15, 0) /* Q6SS parameters */ #define Q6SS_LDO_BYP BIT(25) @@ -53,6 +66,7 @@ #define Q6SS_CLAMP_QMC_MEM BIT(22) #define HALT_CHECK_MAX_LOOPS 200 #define Q6SS_XO_CBCR GENMASK(5, 3) +#define Q6SS_SLEEP_CBCR GENMASK(5, 2) /* Q6SS config/status registers */ #define TCSR_GLOBAL_CFG0 0x0 @@ -71,6 +85,25 @@ #define TCSR_WCSS_CLK_MASK 0x1F #define TCSR_WCSS_CLK_ENABLE 0x14 +#define MAX_HALT_REG 3 +enum { + WCSS_IPQ8074, + WCSS_QCS404, +}; + +struct wcss_data { + const char *firmware_name; + unsigned int crash_reason_smem; + u32 version; + bool aon_reset_required; + bool wcss_q6_reset_required; + const char *ssr_name; + const char *sysmon_name; + int ssctl_id; + const struct rproc_ops *ops; + bool requires_force_stop; +}; + struct q6v5_wcss { struct device *dev; @@ -82,9 +115,26 @@ struct q6v5_wcss { u32 halt_wcss; u32 halt_nc; + struct clk *xo; + struct clk *ahbfabric_cbcr_clk; + struct clk *gcc_abhs_cbcr; + struct clk *gcc_axim_cbcr; + struct clk *lcc_csr_cbcr; + struct clk *ahbs_cbcr; + struct clk *tcm_slave_cbcr; + struct clk *qdsp6ss_abhm_cbcr; + struct clk *qdsp6ss_sleep_cbcr; + struct clk *qdsp6ss_axim_cbcr; + struct clk *qdsp6ss_xo_cbcr; + struct clk *qdsp6ss_core_gfmux; + struct clk *lcc_bcr_sleep; + struct regulator *cx_supply; + struct qcom_sysmon *sysmon; + struct reset_control *wcss_aon_reset; struct reset_control *wcss_reset; struct reset_control *wcss_q6_reset; + struct reset_control *wcss_q6_bcr_reset; struct qcom_q6v5 q6v5; @@ -93,6 +143,10 @@ struct q6v5_wcss { void *mem_region; size_t mem_size; + unsigned int crash_reason_smem; + u32 version; + bool requires_force_stop; + struct qcom_rproc_glink glink_subdev; struct qcom_rproc_ssr ssr_subdev; }; @@ -237,6 +291,207 @@ wcss_reset: return ret; } +static int q6v5_wcss_qcs404_power_on(struct q6v5_wcss *wcss) +{ + unsigned long val; + int ret, idx; + + /* Toggle the restart */ + reset_control_assert(wcss->wcss_reset); + usleep_range(200, 300); + reset_control_deassert(wcss->wcss_reset); + usleep_range(200, 300); + + /* Enable GCC_WDSP_Q6SS_AHBS_CBCR clock */ + ret = clk_prepare_enable(wcss->gcc_abhs_cbcr); + if (ret) + return ret; + + /* Remove reset to the WCNSS QDSP6SS */ + reset_control_deassert(wcss->wcss_q6_bcr_reset); + + /* Enable Q6SSTOP_AHBFABRIC_CBCR clock */ + ret = clk_prepare_enable(wcss->ahbfabric_cbcr_clk); + if (ret) + goto disable_gcc_abhs_cbcr_clk; + + /* Enable the LCCCSR CBC clock, Q6SSTOP_Q6SSTOP_LCC_CSR_CBCR clock */ + ret = clk_prepare_enable(wcss->lcc_csr_cbcr); + if (ret) + goto disable_ahbfabric_cbcr_clk; + + /* Enable the Q6AHBS CBC, Q6SSTOP_Q6SS_AHBS_CBCR clock */ + ret = clk_prepare_enable(wcss->ahbs_cbcr); + if (ret) + goto disable_csr_cbcr_clk; + + /* Enable the TCM slave CBC, Q6SSTOP_Q6SS_TCM_SLAVE_CBCR clock */ + ret = clk_prepare_enable(wcss->tcm_slave_cbcr); + if (ret) + goto disable_ahbs_cbcr_clk; + + /* Enable the Q6SS AHB master CBC, Q6SSTOP_Q6SS_AHBM_CBCR clock */ + ret = clk_prepare_enable(wcss->qdsp6ss_abhm_cbcr); + if (ret) + goto disable_tcm_slave_cbcr_clk; + + /* Enable the Q6SS AXI master CBC, Q6SSTOP_Q6SS_AXIM_CBCR clock */ + ret = clk_prepare_enable(wcss->qdsp6ss_axim_cbcr); + if (ret) + goto disable_abhm_cbcr_clk; + + /* Enable the Q6SS XO CBC */ + val = readl(wcss->reg_base + Q6SS_XO_CBCR); + val |= BIT(0); + writel(val, wcss->reg_base + Q6SS_XO_CBCR); + /* Read CLKOFF bit to go low indicating CLK is enabled */ + ret = readl_poll_timeout(wcss->reg_base + Q6SS_XO_CBCR, + val, !(val & BIT(31)), 1, + HALT_CHECK_MAX_LOOPS); + if (ret) { + dev_err(wcss->dev, + "xo cbcr enabling timed out (rc:%d)\n", ret); + return ret; + } + + writel(0, wcss->reg_base + Q6SS_CGC_OVERRIDE); + + /* Enable QDSP6 sleep clock clock */ + val = readl(wcss->reg_base + Q6SS_SLEEP_CBCR); + val |= BIT(0); + writel(val, wcss->reg_base + Q6SS_SLEEP_CBCR); + + /* Enable the Enable the Q6 AXI clock, GCC_WDSP_Q6SS_AXIM_CBCR*/ + ret = clk_prepare_enable(wcss->gcc_axim_cbcr); + if (ret) + goto disable_sleep_cbcr_clk; + + /* Assert resets, stop core */ + val = readl(wcss->reg_base + Q6SS_RESET_REG); + val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE; + writel(val, wcss->reg_base + Q6SS_RESET_REG); + + /* Program the QDSP6SS PWR_CTL register */ + writel(0x01700000, wcss->reg_base + Q6SS_PWR_CTL_REG); + + writel(0x03700000, wcss->reg_base + Q6SS_PWR_CTL_REG); + + writel(0x03300000, wcss->reg_base + Q6SS_PWR_CTL_REG); + + writel(0x033C0000, wcss->reg_base + Q6SS_PWR_CTL_REG); + + /* + * Enable memories by turning on the QDSP6 memory foot/head switch, one + * bank at a time to avoid in-rush current + */ + for (idx = 28; idx >= 0; idx--) { + writel((readl(wcss->reg_base + Q6SS_MEM_PWR_CTL) | + (1 << idx)), wcss->reg_base + Q6SS_MEM_PWR_CTL); + } + + writel(0x031C0000, wcss->reg_base + Q6SS_PWR_CTL_REG); + writel(0x030C0000, wcss->reg_base + Q6SS_PWR_CTL_REG); + + val = readl(wcss->reg_base + Q6SS_RESET_REG); + val &= ~Q6SS_CORE_ARES; + writel(val, wcss->reg_base + Q6SS_RESET_REG); + + /* Enable the Q6 core clock at the GFM, Q6SSTOP_QDSP6SS_GFMUX_CTL */ + val = readl(wcss->reg_base + Q6SS_GFMUX_CTL_REG); + val |= Q6SS_CLK_ENABLE | Q6SS_SWITCH_CLK_SRC; + writel(val, wcss->reg_base + Q6SS_GFMUX_CTL_REG); + + /* Enable sleep clock branch needed for BCR circuit */ + ret = clk_prepare_enable(wcss->lcc_bcr_sleep); + if (ret) + goto disable_core_gfmux_clk; + + return 0; + +disable_core_gfmux_clk: + val = readl(wcss->reg_base + Q6SS_GFMUX_CTL_REG); + val &= ~(Q6SS_CLK_ENABLE | Q6SS_SWITCH_CLK_SRC); + writel(val, wcss->reg_base + Q6SS_GFMUX_CTL_REG); + clk_disable_unprepare(wcss->gcc_axim_cbcr); +disable_sleep_cbcr_clk: + val = readl(wcss->reg_base + Q6SS_SLEEP_CBCR); + val &= ~Q6SS_CLK_ENABLE; + writel(val, wcss->reg_base + Q6SS_SLEEP_CBCR); + val = readl(wcss->reg_base + Q6SS_XO_CBCR); + val &= ~Q6SS_CLK_ENABLE; + writel(val, wcss->reg_base + Q6SS_XO_CBCR); + clk_disable_unprepare(wcss->qdsp6ss_axim_cbcr); +disable_abhm_cbcr_clk: + clk_disable_unprepare(wcss->qdsp6ss_abhm_cbcr); +disable_tcm_slave_cbcr_clk: + clk_disable_unprepare(wcss->tcm_slave_cbcr); +disable_ahbs_cbcr_clk: + clk_disable_unprepare(wcss->ahbs_cbcr); +disable_csr_cbcr_clk: + clk_disable_unprepare(wcss->lcc_csr_cbcr); +disable_ahbfabric_cbcr_clk: + clk_disable_unprepare(wcss->ahbfabric_cbcr_clk); +disable_gcc_abhs_cbcr_clk: + clk_disable_unprepare(wcss->gcc_abhs_cbcr); + + return ret; +} + +static inline int q6v5_wcss_qcs404_reset(struct q6v5_wcss *wcss) +{ + unsigned long val; + + writel(0x80800000, wcss->reg_base + Q6SS_STRAP_ACC); + + /* Start core execution */ + val = readl(wcss->reg_base + Q6SS_RESET_REG); + val &= ~Q6SS_STOP_CORE; + writel(val, wcss->reg_base + Q6SS_RESET_REG); + + return 0; +} + +static int q6v5_qcs404_wcss_start(struct rproc *rproc) +{ + struct q6v5_wcss *wcss = rproc->priv; + int ret; + + ret = clk_prepare_enable(wcss->xo); + if (ret) + return ret; + + ret = regulator_enable(wcss->cx_supply); + if (ret) + goto disable_xo_clk; + + qcom_q6v5_prepare(&wcss->q6v5); + + ret = q6v5_wcss_qcs404_power_on(wcss); + if (ret) { + dev_err(wcss->dev, "wcss clk_enable failed\n"); + goto disable_cx_supply; + } + + writel(rproc->bootaddr >> 4, wcss->reg_base + Q6SS_RST_EVB); + + q6v5_wcss_qcs404_reset(wcss); + + ret = qcom_q6v5_wait_for_start(&wcss->q6v5, 5 * HZ); + if (ret == -ETIMEDOUT) { + dev_err(wcss->dev, "start timed out\n"); + goto disable_cx_supply; + } + + return 0; + +disable_cx_supply: + regulator_disable(wcss->cx_supply); +disable_xo_clk: + clk_disable_unprepare(wcss->xo); + + return ret; +} + static void q6v5_wcss_halt_axi_port(struct q6v5_wcss *wcss, struct regmap *halt_map, u32 offset) @@ -271,6 +526,70 @@ static void q6v5_wcss_halt_axi_port(struct q6v5_wcss *wcss, regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0); } +static int q6v5_qcs404_wcss_shutdown(struct q6v5_wcss *wcss) +{ + unsigned long val; + int ret; + + q6v5_wcss_halt_axi_port(wcss, wcss->halt_map, wcss->halt_wcss); + + /* assert clamps to avoid MX current inrush */ + val = readl(wcss->reg_base + Q6SS_PWR_CTL_REG); + val |= (Q6SS_CLAMP_IO | Q6SS_CLAMP_WL | Q6SS_CLAMP_QMC_MEM); + writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG); + + /* Disable memories by turning off memory foot/headswitch */ + writel((readl(wcss->reg_base + Q6SS_MEM_PWR_CTL) & + ~QDSS_Q6_MEMORIES), + wcss->reg_base + Q6SS_MEM_PWR_CTL); + + /* Clear the BHS_ON bit */ + val = readl(wcss->reg_base + Q6SS_PWR_CTL_REG); + val &= ~Q6SS_BHS_ON; + writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG); + + clk_disable_unprepare(wcss->ahbfabric_cbcr_clk); + clk_disable_unprepare(wcss->lcc_csr_cbcr); + clk_disable_unprepare(wcss->tcm_slave_cbcr); + clk_disable_unprepare(wcss->qdsp6ss_abhm_cbcr); + clk_disable_unprepare(wcss->qdsp6ss_axim_cbcr); + + val = readl(wcss->reg_base + Q6SS_SLEEP_CBCR); + val &= ~BIT(0); + writel(val, wcss->reg_base + Q6SS_SLEEP_CBCR); + + val = readl(wcss->reg_base + Q6SS_XO_CBCR); + val &= ~BIT(0); + writel(val, wcss->reg_base + Q6SS_XO_CBCR); + + clk_disable_unprepare(wcss->ahbs_cbcr); + clk_disable_unprepare(wcss->lcc_bcr_sleep); + + val = readl(wcss->reg_base + Q6SS_GFMUX_CTL_REG); + val &= ~(Q6SS_CLK_ENABLE | Q6SS_SWITCH_CLK_SRC); + writel(val, wcss->reg_base + Q6SS_GFMUX_CTL_REG); + + clk_disable_unprepare(wcss->gcc_abhs_cbcr); + + ret = reset_control_assert(wcss->wcss_reset); + if (ret) { + dev_err(wcss->dev, "wcss_reset failed\n"); + return ret; + } + usleep_range(200, 300); + + ret = reset_control_deassert(wcss->wcss_reset); + if (ret) { + dev_err(wcss->dev, "wcss_reset failed\n"); + return ret; + } + usleep_range(200, 300); + + clk_disable_unprepare(wcss->gcc_axim_cbcr); + + return 0; +} + static int q6v5_wcss_powerdown(struct q6v5_wcss *wcss) { int ret; @@ -390,27 +709,35 @@ static int q6v5_wcss_stop(struct rproc *rproc) int ret; /* WCSS powerdown */ - ret = qcom_q6v5_request_stop(&wcss->q6v5, NULL); - if (ret == -ETIMEDOUT) { - dev_err(wcss->dev, "timed out on wait\n"); - return ret; + if (wcss->requires_force_stop) { + ret = qcom_q6v5_request_stop(&wcss->q6v5, NULL); + if (ret == -ETIMEDOUT) { + dev_err(wcss->dev, "timed out on wait\n"); + return ret; + } } - ret = q6v5_wcss_powerdown(wcss); - if (ret) - return ret; - - /* Q6 Power down */ - ret = q6v5_q6_powerdown(wcss); - if (ret) - return ret; + if (wcss->version == WCSS_QCS404) { + ret = q6v5_qcs404_wcss_shutdown(wcss); + if (ret) + return ret; + } else { + ret = q6v5_wcss_powerdown(wcss); + if (ret) + return ret; + + /* Q6 Power down */ + ret = q6v5_q6_powerdown(wcss); + if (ret) + return ret; + } qcom_q6v5_unprepare(&wcss->q6v5); return 0; } -static void *q6v5_wcss_da_to_va(struct rproc *rproc, u64 da, size_t len) +static void *q6v5_wcss_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem) { struct q6v5_wcss *wcss = rproc->priv; int offset; @@ -438,7 +765,7 @@ static int q6v5_wcss_load(struct rproc *rproc, const struct firmware *fw) return ret; } -static const struct rproc_ops q6v5_wcss_ops = { +static const struct rproc_ops q6v5_wcss_ipq8074_ops = { .start = q6v5_wcss_start, .stop = q6v5_wcss_stop, .da_to_va = q6v5_wcss_da_to_va, @@ -446,26 +773,46 @@ static const struct rproc_ops q6v5_wcss_ops = { .get_boot_addr = rproc_elf_get_boot_addr, }; -static int q6v5_wcss_init_reset(struct q6v5_wcss *wcss) +static const struct rproc_ops q6v5_wcss_qcs404_ops = { + .start = q6v5_qcs404_wcss_start, + .stop = q6v5_wcss_stop, + .da_to_va = q6v5_wcss_da_to_va, + .load = q6v5_wcss_load, + .get_boot_addr = rproc_elf_get_boot_addr, + .parse_fw = qcom_register_dump_segments, +}; + +static int q6v5_wcss_init_reset(struct q6v5_wcss *wcss, + const struct wcss_data *desc) { struct device *dev = wcss->dev; - wcss->wcss_aon_reset = devm_reset_control_get(dev, "wcss_aon_reset"); - if (IS_ERR(wcss->wcss_aon_reset)) { - dev_err(wcss->dev, "unable to acquire wcss_aon_reset\n"); - return PTR_ERR(wcss->wcss_aon_reset); + if (desc->aon_reset_required) { + wcss->wcss_aon_reset = devm_reset_control_get_exclusive(dev, "wcss_aon_reset"); + if (IS_ERR(wcss->wcss_aon_reset)) { + dev_err(wcss->dev, "fail to acquire wcss_aon_reset\n"); + return PTR_ERR(wcss->wcss_aon_reset); + } } - wcss->wcss_reset = devm_reset_control_get(dev, "wcss_reset"); + wcss->wcss_reset = devm_reset_control_get_exclusive(dev, "wcss_reset"); if (IS_ERR(wcss->wcss_reset)) { dev_err(wcss->dev, "unable to acquire wcss_reset\n"); return PTR_ERR(wcss->wcss_reset); } - wcss->wcss_q6_reset = devm_reset_control_get(dev, "wcss_q6_reset"); - if (IS_ERR(wcss->wcss_q6_reset)) { - dev_err(wcss->dev, "unable to acquire wcss_q6_reset\n"); - return PTR_ERR(wcss->wcss_q6_reset); + if (desc->wcss_q6_reset_required) { + wcss->wcss_q6_reset = devm_reset_control_get_exclusive(dev, "wcss_q6_reset"); + if (IS_ERR(wcss->wcss_q6_reset)) { + dev_err(wcss->dev, "unable to acquire wcss_q6_reset\n"); + return PTR_ERR(wcss->wcss_q6_reset); + } + } + + wcss->wcss_q6_bcr_reset = devm_reset_control_get_exclusive(dev, "wcss_q6_bcr_reset"); + if (IS_ERR(wcss->wcss_q6_bcr_reset)) { + dev_err(wcss->dev, "unable to acquire wcss_q6_bcr_reset\n"); + return PTR_ERR(wcss->wcss_q6_bcr_reset); } return 0; @@ -474,35 +821,48 @@ static int q6v5_wcss_init_reset(struct q6v5_wcss *wcss) static int q6v5_wcss_init_mmio(struct q6v5_wcss *wcss, struct platform_device *pdev) { - struct of_phandle_args args; + unsigned int halt_reg[MAX_HALT_REG] = {0}; + struct device_node *syscon; struct resource *res; int ret; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6"); - wcss->reg_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(wcss->reg_base)) - return PTR_ERR(wcss->reg_base); + wcss->reg_base = devm_ioremap(&pdev->dev, res->start, + resource_size(res)); + if (!wcss->reg_base) + return -ENOMEM; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb"); - wcss->rmb_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(wcss->rmb_base)) - return PTR_ERR(wcss->rmb_base); + if (wcss->version == WCSS_IPQ8074) { + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb"); + wcss->rmb_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(wcss->rmb_base)) + return PTR_ERR(wcss->rmb_base); + } - ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, - "qcom,halt-regs", 3, 0, &args); - if (ret < 0) { + syscon = of_parse_phandle(pdev->dev.of_node, + "qcom,halt-regs", 0); + if (!syscon) { dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n"); return -EINVAL; } - wcss->halt_map = syscon_node_to_regmap(args.np); - of_node_put(args.np); + wcss->halt_map = syscon_node_to_regmap(syscon); + of_node_put(syscon); if (IS_ERR(wcss->halt_map)) return PTR_ERR(wcss->halt_map); - wcss->halt_q6 = args.args[0]; - wcss->halt_wcss = args.args[1]; - wcss->halt_nc = args.args[2]; + ret = of_property_read_variable_u32_array(pdev->dev.of_node, + "qcom,halt-regs", + halt_reg, 0, + MAX_HALT_REG); + if (ret < 0) { + dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n"); + return -EINVAL; + } + + wcss->halt_q6 = halt_reg[0]; + wcss->halt_wcss = halt_reg[1]; + wcss->halt_nc = halt_reg[2]; return 0; } @@ -536,14 +896,120 @@ static int q6v5_alloc_memory_region(struct q6v5_wcss *wcss) return 0; } +static int q6v5_wcss_init_clock(struct q6v5_wcss *wcss) +{ + int ret; + + wcss->xo = devm_clk_get(wcss->dev, "xo"); + if (IS_ERR(wcss->xo)) { + ret = PTR_ERR(wcss->xo); + if (ret != -EPROBE_DEFER) + dev_err(wcss->dev, "failed to get xo clock"); + return ret; + } + + wcss->gcc_abhs_cbcr = devm_clk_get(wcss->dev, "gcc_abhs_cbcr"); + if (IS_ERR(wcss->gcc_abhs_cbcr)) { + ret = PTR_ERR(wcss->gcc_abhs_cbcr); + if (ret != -EPROBE_DEFER) + dev_err(wcss->dev, "failed to get gcc abhs clock"); + return ret; + } + + wcss->gcc_axim_cbcr = devm_clk_get(wcss->dev, "gcc_axim_cbcr"); + if (IS_ERR(wcss->gcc_axim_cbcr)) { + ret = PTR_ERR(wcss->gcc_axim_cbcr); + if (ret != -EPROBE_DEFER) + dev_err(wcss->dev, "failed to get gcc axim clock\n"); + return ret; + } + + wcss->ahbfabric_cbcr_clk = devm_clk_get(wcss->dev, + "lcc_ahbfabric_cbc"); + if (IS_ERR(wcss->ahbfabric_cbcr_clk)) { + ret = PTR_ERR(wcss->ahbfabric_cbcr_clk); + if (ret != -EPROBE_DEFER) + dev_err(wcss->dev, "failed to get ahbfabric clock\n"); + return ret; + } + + wcss->lcc_csr_cbcr = devm_clk_get(wcss->dev, "tcsr_lcc_cbc"); + if (IS_ERR(wcss->lcc_csr_cbcr)) { + ret = PTR_ERR(wcss->lcc_csr_cbcr); + if (ret != -EPROBE_DEFER) + dev_err(wcss->dev, "failed to get csr cbcr clk\n"); + return ret; + } + + wcss->ahbs_cbcr = devm_clk_get(wcss->dev, + "lcc_abhs_cbc"); + if (IS_ERR(wcss->ahbs_cbcr)) { + ret = PTR_ERR(wcss->ahbs_cbcr); + if (ret != -EPROBE_DEFER) + dev_err(wcss->dev, "failed to get ahbs_cbcr clk\n"); + return ret; + } + + wcss->tcm_slave_cbcr = devm_clk_get(wcss->dev, + "lcc_tcm_slave_cbc"); + if (IS_ERR(wcss->tcm_slave_cbcr)) { + ret = PTR_ERR(wcss->tcm_slave_cbcr); + if (ret != -EPROBE_DEFER) + dev_err(wcss->dev, "failed to get tcm cbcr clk\n"); + return ret; + } + + wcss->qdsp6ss_abhm_cbcr = devm_clk_get(wcss->dev, "lcc_abhm_cbc"); + if (IS_ERR(wcss->qdsp6ss_abhm_cbcr)) { + ret = PTR_ERR(wcss->qdsp6ss_abhm_cbcr); + if (ret != -EPROBE_DEFER) + dev_err(wcss->dev, "failed to get abhm cbcr clk\n"); + return ret; + } + + wcss->qdsp6ss_axim_cbcr = devm_clk_get(wcss->dev, "lcc_axim_cbc"); + if (IS_ERR(wcss->qdsp6ss_axim_cbcr)) { + ret = PTR_ERR(wcss->qdsp6ss_axim_cbcr); + if (ret != -EPROBE_DEFER) + dev_err(wcss->dev, "failed to get axim cbcr clk\n"); + return ret; + } + + wcss->lcc_bcr_sleep = devm_clk_get(wcss->dev, "lcc_bcr_sleep"); + if (IS_ERR(wcss->lcc_bcr_sleep)) { + ret = PTR_ERR(wcss->lcc_bcr_sleep); + if (ret != -EPROBE_DEFER) + dev_err(wcss->dev, "failed to get bcr cbcr clk\n"); + return ret; + } + + return 0; +} + +static int q6v5_wcss_init_regulator(struct q6v5_wcss *wcss) +{ + wcss->cx_supply = devm_regulator_get(wcss->dev, "cx"); + if (IS_ERR(wcss->cx_supply)) + return PTR_ERR(wcss->cx_supply); + + regulator_set_load(wcss->cx_supply, 100000); + + return 0; +} + static int q6v5_wcss_probe(struct platform_device *pdev) { + const struct wcss_data *desc; struct q6v5_wcss *wcss; struct rproc *rproc; int ret; - rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_wcss_ops, - "IPQ8074/q6_fw.mdt", sizeof(*wcss)); + desc = device_get_match_data(&pdev->dev); + if (!desc) + return -EINVAL; + + rproc = rproc_alloc(&pdev->dev, pdev->name, desc->ops, + desc->firmware_name, sizeof(*wcss)); if (!rproc) { dev_err(&pdev->dev, "failed to allocate rproc\n"); return -ENOMEM; @@ -551,6 +1017,10 @@ static int q6v5_wcss_probe(struct platform_device *pdev) wcss = rproc->priv; wcss->dev = &pdev->dev; + wcss->version = desc->version; + + wcss->version = desc->version; + wcss->requires_force_stop = desc->requires_force_stop; ret = q6v5_wcss_init_mmio(wcss, pdev); if (ret) @@ -560,17 +1030,33 @@ static int q6v5_wcss_probe(struct platform_device *pdev) if (ret) goto free_rproc; - ret = q6v5_wcss_init_reset(wcss); + if (wcss->version == WCSS_QCS404) { + ret = q6v5_wcss_init_clock(wcss); + if (ret) + goto free_rproc; + + ret = q6v5_wcss_init_regulator(wcss); + if (ret) + goto free_rproc; + } + + ret = q6v5_wcss_init_reset(wcss, desc); if (ret) goto free_rproc; - ret = qcom_q6v5_init(&wcss->q6v5, pdev, rproc, WCSS_CRASH_REASON, NULL); + ret = qcom_q6v5_init(&wcss->q6v5, pdev, rproc, desc->crash_reason_smem, + NULL); if (ret) goto free_rproc; qcom_add_glink_subdev(rproc, &wcss->glink_subdev, "q6wcss"); qcom_add_ssr_subdev(rproc, &wcss->ssr_subdev, "q6wcss"); + if (desc->ssctl_id) + wcss->sysmon = qcom_add_sysmon_subdev(rproc, + desc->sysmon_name, + desc->ssctl_id); + ret = rproc_add(rproc); if (ret) goto free_rproc; @@ -595,8 +1081,31 @@ static int q6v5_wcss_remove(struct platform_device *pdev) return 0; } +static const struct wcss_data wcss_ipq8074_res_init = { + .firmware_name = "IPQ8074/q6_fw.mdt", + .crash_reason_smem = WCSS_CRASH_REASON, + .aon_reset_required = true, + .wcss_q6_reset_required = true, + .ops = &q6v5_wcss_ipq8074_ops, + .requires_force_stop = true, +}; + +static const struct wcss_data wcss_qcs404_res_init = { + .crash_reason_smem = WCSS_CRASH_REASON, + .firmware_name = "wcnss.mdt", + .version = WCSS_QCS404, + .aon_reset_required = false, + .wcss_q6_reset_required = false, + .ssr_name = "mpss", + .sysmon_name = "wcnss", + .ssctl_id = 0x12, + .ops = &q6v5_wcss_qcs404_ops, + .requires_force_stop = false, +}; + static const struct of_device_id q6v5_wcss_of_match[] = { - { .compatible = "qcom,ipq8074-wcss-pil" }, + { .compatible = "qcom,ipq8074-wcss-pil", .data = &wcss_ipq8074_res_init }, + { .compatible = "qcom,qcs404-wcss-pil", .data = &wcss_qcs404_res_init }, { }, }; MODULE_DEVICE_TABLE(of, q6v5_wcss_of_match); diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c index 2a6a23cb14ca..5f3455aa7e0e 100644 --- a/drivers/remoteproc/qcom_wcnss.c +++ b/drivers/remoteproc/qcom_wcnss.c @@ -320,7 +320,7 @@ static int wcnss_stop(struct rproc *rproc) return ret; } -static void *wcnss_da_to_va(struct rproc *rproc, u64 da, size_t len) +static void *wcnss_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem) { struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv; int offset; @@ -530,6 +530,7 @@ static int wcnss_alloc_memory_region(struct qcom_wcnss *wcnss) static int wcnss_probe(struct platform_device *pdev) { + const char *fw_name = WCNSS_FIRMWARE_NAME; const struct wcnss_data *data; struct qcom_wcnss *wcnss; struct resource *res; @@ -547,8 +548,13 @@ static int wcnss_probe(struct platform_device *pdev) return -ENXIO; } + ret = of_property_read_string(pdev->dev.of_node, "firmware-name", + &fw_name); + if (ret < 0 && ret != -EINVAL) + return ret; + rproc = rproc_alloc(&pdev->dev, pdev->name, &wcnss_ops, - WCNSS_FIRMWARE_NAME, sizeof(*wcnss)); + fw_name, sizeof(*wcnss)); if (!rproc) { dev_err(&pdev->dev, "unable to allocate remoteproc\n"); return -ENOMEM; diff --git a/drivers/remoteproc/remoteproc_cdev.c b/drivers/remoteproc/remoteproc_cdev.c index b19ea3057bde..0b8a84c04f76 100644 --- a/drivers/remoteproc/remoteproc_cdev.c +++ b/drivers/remoteproc/remoteproc_cdev.c @@ -32,15 +32,22 @@ static ssize_t rproc_cdev_write(struct file *filp, const char __user *buf, size_ return -EFAULT; if (!strncmp(cmd, "start", len)) { - if (rproc->state == RPROC_RUNNING) + if (rproc->state == RPROC_RUNNING || + rproc->state == RPROC_ATTACHED) return -EBUSY; ret = rproc_boot(rproc); } else if (!strncmp(cmd, "stop", len)) { - if (rproc->state != RPROC_RUNNING) + if (rproc->state != RPROC_RUNNING && + rproc->state != RPROC_ATTACHED) return -EINVAL; rproc_shutdown(rproc); + } else if (!strncmp(cmd, "detach", len)) { + if (rproc->state != RPROC_ATTACHED) + return -EINVAL; + + ret = rproc_detach(rproc); } else { dev_err(&rproc->dev, "Unrecognized option\n"); ret = -EINVAL; @@ -79,11 +86,17 @@ static long rproc_device_ioctl(struct file *filp, unsigned int ioctl, unsigned l static int rproc_cdev_release(struct inode *inode, struct file *filp) { struct rproc *rproc = container_of(inode->i_cdev, struct rproc, cdev); + int ret = 0; - if (rproc->cdev_put_on_release && rproc->state == RPROC_RUNNING) + if (!rproc->cdev_put_on_release) + return 0; + + if (rproc->state == RPROC_RUNNING) rproc_shutdown(rproc); + else if (rproc->state == RPROC_ATTACHED) + ret = rproc_detach(rproc); - return 0; + return ret; } static const struct file_operations rproc_fops = { diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index ab150765d124..626a6b90fba2 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -189,13 +189,13 @@ EXPORT_SYMBOL(rproc_va_to_pa); * here the output of the DMA API for the carveouts, which should be more * correct. */ -void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len) +void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem) { struct rproc_mem_entry *carveout; void *ptr = NULL; if (rproc->ops->da_to_va) { - ptr = rproc->ops->da_to_va(rproc, da, len); + ptr = rproc->ops->da_to_va(rproc, da, len, is_iomem); if (ptr) goto out; } @@ -217,6 +217,9 @@ void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len) ptr = carveout->va + offset; + if (is_iomem) + *is_iomem = carveout->is_iomem; + break; } @@ -482,7 +485,7 @@ static int copy_dma_range_map(struct device *to, struct device *from) /** * rproc_handle_vdev() - handle a vdev fw resource * @rproc: the remote processor - * @rsc: the vring resource descriptor + * @ptr: the vring resource descriptor * @offset: offset of the resource entry * @avail: size of available data (for sanity checking the image) * @@ -507,9 +510,10 @@ static int copy_dma_range_map(struct device *to, struct device *from) * * Returns 0 on success, or an appropriate error code otherwise */ -static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc, +static int rproc_handle_vdev(struct rproc *rproc, void *ptr, int offset, int avail) { + struct fw_rsc_vdev *rsc = ptr; struct device *dev = &rproc->dev; struct rproc_vdev *rvdev; int i, ret; @@ -627,7 +631,7 @@ void rproc_vdev_release(struct kref *ref) /** * rproc_handle_trace() - handle a shared trace buffer resource * @rproc: the remote processor - * @rsc: the trace resource descriptor + * @ptr: the trace resource descriptor * @offset: offset of the resource entry * @avail: size of available data (for sanity checking the image) * @@ -641,9 +645,10 @@ void rproc_vdev_release(struct kref *ref) * * Returns 0 on success, or an appropriate error code otherwise */ -static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc, +static int rproc_handle_trace(struct rproc *rproc, void *ptr, int offset, int avail) { + struct fw_rsc_trace *rsc = ptr; struct rproc_debug_trace *trace; struct device *dev = &rproc->dev; char name[15]; @@ -693,7 +698,7 @@ static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc, /** * rproc_handle_devmem() - handle devmem resource entry * @rproc: remote processor handle - * @rsc: the devmem resource entry + * @ptr: the devmem resource entry * @offset: offset of the resource entry * @avail: size of available data (for sanity checking the image) * @@ -716,9 +721,10 @@ static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc, * and not allow firmwares to request access to physical addresses that * are outside those ranges. */ -static int rproc_handle_devmem(struct rproc *rproc, struct fw_rsc_devmem *rsc, +static int rproc_handle_devmem(struct rproc *rproc, void *ptr, int offset, int avail) { + struct fw_rsc_devmem *rsc = ptr; struct rproc_mem_entry *mapping; struct device *dev = &rproc->dev; int ret; @@ -896,7 +902,7 @@ static int rproc_release_carveout(struct rproc *rproc, /** * rproc_handle_carveout() - handle phys contig memory allocation requests * @rproc: rproc handle - * @rsc: the resource entry + * @ptr: the resource entry * @offset: offset of the resource entry * @avail: size of available data (for image validation) * @@ -913,9 +919,9 @@ static int rproc_release_carveout(struct rproc *rproc, * pressure is important; it may have a substantial impact on performance. */ static int rproc_handle_carveout(struct rproc *rproc, - struct fw_rsc_carveout *rsc, - int offset, int avail) + void *ptr, int offset, int avail) { + struct fw_rsc_carveout *rsc = ptr; struct rproc_mem_entry *carveout; struct device *dev = &rproc->dev; @@ -1097,10 +1103,10 @@ EXPORT_SYMBOL(rproc_of_parse_firmware); * enum fw_resource_type. */ static rproc_handle_resource_t rproc_loading_handlers[RSC_LAST] = { - [RSC_CARVEOUT] = (rproc_handle_resource_t)rproc_handle_carveout, - [RSC_DEVMEM] = (rproc_handle_resource_t)rproc_handle_devmem, - [RSC_TRACE] = (rproc_handle_resource_t)rproc_handle_trace, - [RSC_VDEV] = (rproc_handle_resource_t)rproc_handle_vdev, + [RSC_CARVEOUT] = rproc_handle_carveout, + [RSC_DEVMEM] = rproc_handle_devmem, + [RSC_TRACE] = rproc_handle_trace, + [RSC_VDEV] = rproc_handle_vdev, }; /* handle firmware resource entries before booting the remote processor */ @@ -1416,7 +1422,7 @@ reset_table_ptr: return ret; } -static int rproc_attach(struct rproc *rproc) +static int __rproc_attach(struct rproc *rproc) { struct device *dev = &rproc->dev; int ret; @@ -1444,7 +1450,7 @@ static int rproc_attach(struct rproc *rproc) goto stop_rproc; } - rproc->state = RPROC_RUNNING; + rproc->state = RPROC_ATTACHED; dev_info(dev, "remote processor %s is now attached\n", rproc->name); @@ -1537,11 +1543,149 @@ disable_iommu: return ret; } +static int rproc_set_rsc_table(struct rproc *rproc) +{ + struct resource_table *table_ptr; + struct device *dev = &rproc->dev; + size_t table_sz; + int ret; + + table_ptr = rproc_get_loaded_rsc_table(rproc, &table_sz); + if (!table_ptr) { + /* Not having a resource table is acceptable */ + return 0; + } + + if (IS_ERR(table_ptr)) { + ret = PTR_ERR(table_ptr); + dev_err(dev, "can't load resource table: %d\n", ret); + return ret; + } + + /* + * If it is possible to detach the remote processor, keep an untouched + * copy of the resource table. That way we can start fresh again when + * the remote processor is re-attached, that is: + * + * DETACHED -> ATTACHED -> DETACHED -> ATTACHED + * + * Free'd in rproc_reset_rsc_table_on_detach() and + * rproc_reset_rsc_table_on_stop(). + */ + if (rproc->ops->detach) { + rproc->clean_table = kmemdup(table_ptr, table_sz, GFP_KERNEL); + if (!rproc->clean_table) + return -ENOMEM; + } else { + rproc->clean_table = NULL; + } + + rproc->cached_table = NULL; + rproc->table_ptr = table_ptr; + rproc->table_sz = table_sz; + + return 0; +} + +static int rproc_reset_rsc_table_on_detach(struct rproc *rproc) +{ + struct resource_table *table_ptr; + + /* A resource table was never retrieved, nothing to do here */ + if (!rproc->table_ptr) + return 0; + + /* + * If we made it to this point a clean_table _must_ have been + * allocated in rproc_set_rsc_table(). If one isn't present + * something went really wrong and we must complain. + */ + if (WARN_ON(!rproc->clean_table)) + return -EINVAL; + + /* Remember where the external entity installed the resource table */ + table_ptr = rproc->table_ptr; + + /* + * If we made it here the remote processor was started by another + * entity and a cache table doesn't exist. As such make a copy of + * the resource table currently used by the remote processor and + * use that for the rest of the shutdown process. The memory + * allocated here is free'd in rproc_detach(). + */ + rproc->cached_table = kmemdup(rproc->table_ptr, + rproc->table_sz, GFP_KERNEL); + if (!rproc->cached_table) + return -ENOMEM; + + /* + * Use a copy of the resource table for the remainder of the + * shutdown process. + */ + rproc->table_ptr = rproc->cached_table; + + /* + * Reset the memory area where the firmware loaded the resource table + * to its original value. That way when we re-attach the remote + * processor the resource table is clean and ready to be used again. + */ + memcpy(table_ptr, rproc->clean_table, rproc->table_sz); + + /* + * The clean resource table is no longer needed. Allocated in + * rproc_set_rsc_table(). + */ + kfree(rproc->clean_table); + + return 0; +} + +static int rproc_reset_rsc_table_on_stop(struct rproc *rproc) +{ + /* A resource table was never retrieved, nothing to do here */ + if (!rproc->table_ptr) + return 0; + + /* + * If a cache table exists the remote processor was started by + * the remoteproc core. That cache table should be used for + * the rest of the shutdown process. + */ + if (rproc->cached_table) + goto out; + + /* + * If we made it here the remote processor was started by another + * entity and a cache table doesn't exist. As such make a copy of + * the resource table currently used by the remote processor and + * use that for the rest of the shutdown process. The memory + * allocated here is free'd in rproc_shutdown(). + */ + rproc->cached_table = kmemdup(rproc->table_ptr, + rproc->table_sz, GFP_KERNEL); + if (!rproc->cached_table) + return -ENOMEM; + + /* + * Since the remote processor is being switched off the clean table + * won't be needed. Allocated in rproc_set_rsc_table(). + */ + kfree(rproc->clean_table); + +out: + /* + * Use a copy of the resource table for the remainder of the + * shutdown process. + */ + rproc->table_ptr = rproc->cached_table; + return 0; +} + /* * Attach to remote processor - similar to rproc_fw_boot() but without * the steps that deal with the firmware image. */ -static int rproc_actuate(struct rproc *rproc) +static int rproc_attach(struct rproc *rproc) { struct device *dev = &rproc->dev; int ret; @@ -1556,6 +1700,19 @@ static int rproc_actuate(struct rproc *rproc) return ret; } + /* Do anything that is needed to boot the remote processor */ + ret = rproc_prepare_device(rproc); + if (ret) { + dev_err(dev, "can't prepare rproc %s: %d\n", rproc->name, ret); + goto disable_iommu; + } + + ret = rproc_set_rsc_table(rproc); + if (ret) { + dev_err(dev, "can't load resource table: %d\n", ret); + goto unprepare_device; + } + /* reset max_notifyid */ rproc->max_notifyid = -1; @@ -1570,7 +1727,7 @@ static int rproc_actuate(struct rproc *rproc) ret = rproc_handle_resources(rproc, rproc_loading_handlers); if (ret) { dev_err(dev, "Failed to process resources: %d\n", ret); - goto disable_iommu; + goto unprepare_device; } /* Allocate carveout resources associated to rproc */ @@ -1581,7 +1738,7 @@ static int rproc_actuate(struct rproc *rproc) goto clean_up_resources; } - ret = rproc_attach(rproc); + ret = __rproc_attach(rproc); if (ret) goto clean_up_resources; @@ -1589,6 +1746,9 @@ static int rproc_actuate(struct rproc *rproc) clean_up_resources: rproc_resource_cleanup(rproc); +unprepare_device: + /* release HW resources if needed */ + rproc_unprepare_device(rproc); disable_iommu: rproc_disable_iommu(rproc); return ret; @@ -1642,11 +1802,20 @@ static int rproc_stop(struct rproc *rproc, bool crashed) struct device *dev = &rproc->dev; int ret; + /* No need to continue if a stop() operation has not been provided */ + if (!rproc->ops->stop) + return -EINVAL; + /* Stop any subdevices for the remote processor */ rproc_stop_subdevices(rproc, crashed); /* the installed resource table is no longer accessible */ - rproc->table_ptr = rproc->cached_table; + ret = rproc_reset_rsc_table_on_stop(rproc); + if (ret) { + dev_err(dev, "can't reset resource table: %d\n", ret); + return ret; + } + /* power off the remote processor */ ret = rproc->ops->stop(rproc); @@ -1659,19 +1828,48 @@ static int rproc_stop(struct rproc *rproc, bool crashed) rproc->state = RPROC_OFFLINE; - /* - * The remote processor has been stopped and is now offline, which means - * that the next time it is brought back online the remoteproc core will - * be responsible to load its firmware. As such it is no longer - * autonomous. - */ - rproc->autonomous = false; - dev_info(dev, "stopped remote processor %s\n", rproc->name); return 0; } +/* + * __rproc_detach(): Does the opposite of __rproc_attach() + */ +static int __rproc_detach(struct rproc *rproc) +{ + struct device *dev = &rproc->dev; + int ret; + + /* No need to continue if a detach() operation has not been provided */ + if (!rproc->ops->detach) + return -EINVAL; + + /* Stop any subdevices for the remote processor */ + rproc_stop_subdevices(rproc, false); + + /* the installed resource table is no longer accessible */ + ret = rproc_reset_rsc_table_on_detach(rproc); + if (ret) { + dev_err(dev, "can't reset resource table: %d\n", ret); + return ret; + } + + /* Tell the remote processor the core isn't available anymore */ + ret = rproc->ops->detach(rproc); + if (ret) { + dev_err(dev, "can't detach from rproc: %d\n", ret); + return ret; + } + + rproc_unprepare_subdevices(rproc); + + rproc->state = RPROC_DETACHED; + + dev_info(dev, "detached remote processor %s\n", rproc->name); + + return 0; +} /** * rproc_trigger_recovery() - recover a remoteproc @@ -1802,7 +2000,7 @@ int rproc_boot(struct rproc *rproc) if (rproc->state == RPROC_DETACHED) { dev_info(dev, "attaching to %s\n", rproc->name); - ret = rproc_actuate(rproc); + ret = rproc_attach(rproc); } else { dev_info(dev, "powering up %s\n", rproc->name); @@ -1885,6 +2083,65 @@ out: EXPORT_SYMBOL(rproc_shutdown); /** + * rproc_detach() - Detach the remote processor from the + * remoteproc core + * + * @rproc: the remote processor + * + * Detach a remote processor (previously attached to with rproc_attach()). + * + * In case @rproc is still being used by an additional user(s), then + * this function will just decrement the power refcount and exit, + * without disconnecting the device. + * + * Function rproc_detach() calls __rproc_detach() in order to let a remote + * processor know that services provided by the application processor are + * no longer available. From there it should be possible to remove the + * platform driver and even power cycle the application processor (if the HW + * supports it) without needing to switch off the remote processor. + */ +int rproc_detach(struct rproc *rproc) +{ + struct device *dev = &rproc->dev; + int ret; + + ret = mutex_lock_interruptible(&rproc->lock); + if (ret) { + dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret); + return ret; + } + + /* if the remote proc is still needed, bail out */ + if (!atomic_dec_and_test(&rproc->power)) { + ret = 0; + goto out; + } + + ret = __rproc_detach(rproc); + if (ret) { + atomic_inc(&rproc->power); + goto out; + } + + /* clean up all acquired resources */ + rproc_resource_cleanup(rproc); + + /* release HW resources if needed */ + rproc_unprepare_device(rproc); + + rproc_disable_iommu(rproc); + + /* Free the copy of the resource table */ + kfree(rproc->cached_table); + rproc->cached_table = NULL; + rproc->table_ptr = NULL; +out: + mutex_unlock(&rproc->lock); + return ret; +} +EXPORT_SYMBOL(rproc_detach); + +/** * rproc_get_by_phandle() - find a remote processor by phandle * @phandle: phandle to the rproc * @@ -2077,16 +2334,6 @@ int rproc_add(struct rproc *rproc) if (ret < 0) return ret; - /* - * Remind ourselves the remote processor has been attached to rather - * than booted by the remoteproc core. This is important because the - * RPROC_DETACHED state will be lost as soon as the remote processor - * has been attached to. Used in firmware_show() and reset in - * rproc_stop(). - */ - if (rproc->state == RPROC_DETACHED) - rproc->autonomous = true; - /* if rproc is marked always-on, request it to boot */ if (rproc->auto_boot) { ret = rproc_trigger_auto_boot(rproc); @@ -2347,10 +2594,8 @@ int rproc_del(struct rproc *rproc) if (!rproc) return -EINVAL; - /* if rproc is marked always-on, rproc_add() booted it */ /* TODO: make sure this works with rproc->power > 1 */ - if (rproc->auto_boot) - rproc_shutdown(rproc); + rproc_shutdown(rproc); mutex_lock(&rproc->lock); rproc->state = RPROC_DELETED; @@ -2492,7 +2737,11 @@ static int rproc_panic_handler(struct notifier_block *nb, unsigned long event, rcu_read_lock(); list_for_each_entry_rcu(rproc, &rproc_list, node) { - if (!rproc->ops->panic || rproc->state != RPROC_RUNNING) + if (!rproc->ops->panic) + continue; + + if (rproc->state != RPROC_RUNNING && + rproc->state != RPROC_ATTACHED) continue; d = rproc->ops->panic(rproc); diff --git a/drivers/remoteproc/remoteproc_coredump.c b/drivers/remoteproc/remoteproc_coredump.c index 81ec154a6a5e..aee657cc08c6 100644 --- a/drivers/remoteproc/remoteproc_coredump.c +++ b/drivers/remoteproc/remoteproc_coredump.c @@ -153,18 +153,22 @@ static void rproc_copy_segment(struct rproc *rproc, void *dest, size_t offset, size_t size) { void *ptr; + bool is_iomem; if (segment->dump) { segment->dump(rproc, segment, dest, offset, size); } else { - ptr = rproc_da_to_va(rproc, segment->da + offset, size); + ptr = rproc_da_to_va(rproc, segment->da + offset, size, &is_iomem); if (!ptr) { dev_err(&rproc->dev, "invalid copy request for segment %pad with offset %zu and size %zu)\n", &segment->da, offset, size); memset(dest, 0xff, size); } else { - memcpy(dest, ptr, size); + if (is_iomem) + memcpy_fromio(dest, ptr, size); + else + memcpy(dest, ptr, size); } } } diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c index 7e5845376e9f..b5a1e3b697d9 100644 --- a/drivers/remoteproc/remoteproc_debugfs.c +++ b/drivers/remoteproc/remoteproc_debugfs.c @@ -132,7 +132,7 @@ static ssize_t rproc_trace_read(struct file *filp, char __user *userbuf, char buf[100]; int len; - va = rproc_da_to_va(data->rproc, trace->da, trace->len); + va = rproc_da_to_va(data->rproc, trace->da, trace->len, NULL); if (!va) { len = scnprintf(buf, sizeof(buf), "Trace %s not available\n", diff --git a/drivers/remoteproc/remoteproc_elf_loader.c b/drivers/remoteproc/remoteproc_elf_loader.c index df68d87752e4..11423588965a 100644 --- a/drivers/remoteproc/remoteproc_elf_loader.c +++ b/drivers/remoteproc/remoteproc_elf_loader.c @@ -175,6 +175,7 @@ int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw) u64 offset = elf_phdr_get_p_offset(class, phdr); u32 type = elf_phdr_get_p_type(class, phdr); void *ptr; + bool is_iomem; if (type != PT_LOAD) continue; @@ -204,7 +205,7 @@ int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw) } /* grab the kernel address for this device address */ - ptr = rproc_da_to_va(rproc, da, memsz); + ptr = rproc_da_to_va(rproc, da, memsz, &is_iomem); if (!ptr) { dev_err(dev, "bad phdr da 0x%llx mem 0x%llx\n", da, memsz); @@ -213,8 +214,12 @@ int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw) } /* put the segment where the remote processor expects it */ - if (filesz) - memcpy(ptr, elf_data + offset, filesz); + if (filesz) { + if (is_iomem) + memcpy_fromio(ptr, (void __iomem *)(elf_data + offset), filesz); + else + memcpy(ptr, elf_data + offset, filesz); + } /* * Zero out remaining memory for this segment. @@ -223,8 +228,12 @@ int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw) * did this for us. albeit harmless, we may consider removing * this. */ - if (memsz > filesz) - memset(ptr + filesz, 0, memsz - filesz); + if (memsz > filesz) { + if (is_iomem) + memset_io((void __iomem *)(ptr + filesz), 0, memsz - filesz); + else + memset(ptr + filesz, 0, memsz - filesz); + } } return ret; @@ -377,6 +386,6 @@ struct resource_table *rproc_elf_find_loaded_rsc_table(struct rproc *rproc, return NULL; } - return rproc_da_to_va(rproc, sh_addr, sh_size); + return rproc_da_to_va(rproc, sh_addr, sh_size, NULL); } EXPORT_SYMBOL(rproc_elf_find_loaded_rsc_table); diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h index c34002888d2c..a328e634b1de 100644 --- a/drivers/remoteproc/remoteproc_internal.h +++ b/drivers/remoteproc/remoteproc_internal.h @@ -84,7 +84,7 @@ static inline void rproc_char_device_remove(struct rproc *rproc) void rproc_free_vring(struct rproc_vring *rvring); int rproc_alloc_vring(struct rproc_vdev *rvdev, int i); -void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len); +void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem); phys_addr_t rproc_va_to_pa(void *cpu_addr); int rproc_trigger_recovery(struct rproc *rproc); @@ -178,6 +178,16 @@ struct resource_table *rproc_find_loaded_rsc_table(struct rproc *rproc, } static inline +struct resource_table *rproc_get_loaded_rsc_table(struct rproc *rproc, + size_t *size) +{ + if (rproc->ops->get_loaded_rsc_table) + return rproc->ops->get_loaded_rsc_table(rproc, size); + + return NULL; +} + +static inline bool rproc_u64_fit_in_size_t(u64 val) { if (sizeof(size_t) == sizeof(u64)) diff --git a/drivers/remoteproc/remoteproc_sysfs.c b/drivers/remoteproc/remoteproc_sysfs.c index 1dbef895e65e..ea8b89f97d7b 100644 --- a/drivers/remoteproc/remoteproc_sysfs.c +++ b/drivers/remoteproc/remoteproc_sysfs.c @@ -15,7 +15,7 @@ static ssize_t recovery_show(struct device *dev, { struct rproc *rproc = to_rproc(dev); - return sprintf(buf, "%s", rproc->recovery_disabled ? "disabled\n" : "enabled\n"); + return sysfs_emit(buf, "%s", rproc->recovery_disabled ? "disabled\n" : "enabled\n"); } /* @@ -82,7 +82,7 @@ static ssize_t coredump_show(struct device *dev, { struct rproc *rproc = to_rproc(dev); - return sprintf(buf, "%s\n", rproc_coredump_str[rproc->dump_conf]); + return sysfs_emit(buf, "%s\n", rproc_coredump_str[rproc->dump_conf]); } /* @@ -138,11 +138,8 @@ static ssize_t firmware_show(struct device *dev, struct device_attribute *attr, * If the remote processor has been started by an external * entity we have no idea of what image it is running. As such * simply display a generic string rather then rproc->firmware. - * - * Here we rely on the autonomous flag because a remote processor - * may have been attached to and currently in a running state. */ - if (rproc->autonomous) + if (rproc->state == RPROC_ATTACHED) firmware = "unknown"; return sprintf(buf, "%s\n", firmware); @@ -172,6 +169,7 @@ static const char * const rproc_state_string[] = { [RPROC_RUNNING] = "running", [RPROC_CRASHED] = "crashed", [RPROC_DELETED] = "deleted", + [RPROC_ATTACHED] = "attached", [RPROC_DETACHED] = "detached", [RPROC_LAST] = "invalid", }; @@ -196,17 +194,24 @@ static ssize_t state_store(struct device *dev, int ret = 0; if (sysfs_streq(buf, "start")) { - if (rproc->state == RPROC_RUNNING) + if (rproc->state == RPROC_RUNNING || + rproc->state == RPROC_ATTACHED) return -EBUSY; ret = rproc_boot(rproc); if (ret) dev_err(&rproc->dev, "Boot failed: %d\n", ret); } else if (sysfs_streq(buf, "stop")) { - if (rproc->state != RPROC_RUNNING) + if (rproc->state != RPROC_RUNNING && + rproc->state != RPROC_ATTACHED) return -EINVAL; rproc_shutdown(rproc); + } else if (sysfs_streq(buf, "detach")) { + if (rproc->state != RPROC_ATTACHED) + return -EINVAL; + + ret = rproc_detach(rproc); } else { dev_err(&rproc->dev, "Unrecognised option: %s\n", buf); ret = -EINVAL; diff --git a/drivers/remoteproc/st_slim_rproc.c b/drivers/remoteproc/st_slim_rproc.c index 09bcb4d8b9e0..22096adc1ad3 100644 --- a/drivers/remoteproc/st_slim_rproc.c +++ b/drivers/remoteproc/st_slim_rproc.c @@ -174,7 +174,7 @@ static int slim_rproc_stop(struct rproc *rproc) return 0; } -static void *slim_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len) +static void *slim_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem) { struct st_slim_rproc *slim_rproc = rproc->priv; void *va = NULL; diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c index ccb3c14a0023..7353f9e7e7af 100644 --- a/drivers/remoteproc/stm32_rproc.c +++ b/drivers/remoteproc/stm32_rproc.c @@ -28,7 +28,7 @@ #define RELEASE_BOOT 1 #define MBOX_NB_VQ 2 -#define MBOX_NB_MBX 3 +#define MBOX_NB_MBX 4 #define STM32_SMC_RCC 0x82001000 #define STM32_SMC_REG_WRITE 0x1 @@ -38,6 +38,7 @@ #define STM32_MBX_VQ1 "vq1" #define STM32_MBX_VQ1_ID 1 #define STM32_MBX_SHUTDOWN "shutdown" +#define STM32_MBX_DETACH "detach" #define RSC_TBL_SIZE 1024 @@ -207,16 +208,7 @@ static int stm32_rproc_mbox_idx(struct rproc *rproc, const unsigned char *name) return -EINVAL; } -static int stm32_rproc_elf_load_rsc_table(struct rproc *rproc, - const struct firmware *fw) -{ - if (rproc_elf_load_rsc_table(rproc, fw)) - dev_warn(&rproc->dev, "no resource table found for this firmware\n"); - - return 0; -} - -static int stm32_rproc_parse_memory_regions(struct rproc *rproc) +static int stm32_rproc_prepare(struct rproc *rproc) { struct device *dev = rproc->dev.parent; struct device_node *np = dev->of_node; @@ -274,12 +266,10 @@ static int stm32_rproc_parse_memory_regions(struct rproc *rproc) static int stm32_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw) { - int ret = stm32_rproc_parse_memory_regions(rproc); - - if (ret) - return ret; + if (rproc_elf_load_rsc_table(rproc, fw)) + dev_warn(&rproc->dev, "no resource table found for this firmware\n"); - return stm32_rproc_elf_load_rsc_table(rproc, fw); + return 0; } static irqreturn_t stm32_rproc_wdg(int irq, void *data) @@ -347,6 +337,15 @@ static const struct stm32_mbox stm32_rproc_mbox[MBOX_NB_MBX] = { .tx_done = NULL, .tx_tout = 500, /* 500 ms time out */ }, + }, + { + .name = STM32_MBX_DETACH, + .vq_id = -1, + .client = { + .tx_block = true, + .tx_done = NULL, + .tx_tout = 200, /* 200 ms time out to detach should be fair enough */ + }, } }; @@ -472,6 +471,25 @@ static int stm32_rproc_attach(struct rproc *rproc) return stm32_rproc_set_hold_boot(rproc, true); } +static int stm32_rproc_detach(struct rproc *rproc) +{ + struct stm32_rproc *ddata = rproc->priv; + int err, dummy_data, idx; + + /* Inform the remote processor of the detach */ + idx = stm32_rproc_mbox_idx(rproc, STM32_MBX_DETACH); + if (idx >= 0 && ddata->mb[idx].chan) { + /* A dummy data is sent to allow to block on transmit */ + err = mbox_send_message(ddata->mb[idx].chan, + &dummy_data); + if (err < 0) + dev_warn(&rproc->dev, "warning: remote FW detach without ack\n"); + } + + /* Allow remote processor to auto-reboot */ + return stm32_rproc_set_hold_boot(rproc, false); +} + static int stm32_rproc_stop(struct rproc *rproc) { struct stm32_rproc *ddata = rproc->priv; @@ -546,14 +564,89 @@ static void stm32_rproc_kick(struct rproc *rproc, int vqid) } } +static int stm32_rproc_da_to_pa(struct rproc *rproc, + u64 da, phys_addr_t *pa) +{ + struct stm32_rproc *ddata = rproc->priv; + struct device *dev = rproc->dev.parent; + struct stm32_rproc_mem *p_mem; + unsigned int i; + + for (i = 0; i < ddata->nb_rmems; i++) { + p_mem = &ddata->rmems[i]; + + if (da < p_mem->dev_addr || + da >= p_mem->dev_addr + p_mem->size) + continue; + + *pa = da - p_mem->dev_addr + p_mem->bus_addr; + dev_dbg(dev, "da %llx to pa %#x\n", da, *pa); + + return 0; + } + + dev_err(dev, "can't translate da %llx\n", da); + + return -EINVAL; +} + +static struct resource_table * +stm32_rproc_get_loaded_rsc_table(struct rproc *rproc, size_t *table_sz) +{ + struct stm32_rproc *ddata = rproc->priv; + struct device *dev = rproc->dev.parent; + phys_addr_t rsc_pa; + u32 rsc_da; + int err; + + /* The resource table has already been mapped, nothing to do */ + if (ddata->rsc_va) + goto done; + + err = regmap_read(ddata->rsctbl.map, ddata->rsctbl.reg, &rsc_da); + if (err) { + dev_err(dev, "failed to read rsc tbl addr\n"); + return ERR_PTR(-EINVAL); + } + + if (!rsc_da) + /* no rsc table */ + return ERR_PTR(-ENOENT); + + err = stm32_rproc_da_to_pa(rproc, rsc_da, &rsc_pa); + if (err) + return ERR_PTR(err); + + ddata->rsc_va = devm_ioremap_wc(dev, rsc_pa, RSC_TBL_SIZE); + if (IS_ERR_OR_NULL(ddata->rsc_va)) { + dev_err(dev, "Unable to map memory region: %pa+%zx\n", + &rsc_pa, RSC_TBL_SIZE); + ddata->rsc_va = NULL; + return ERR_PTR(-ENOMEM); + } + +done: + /* + * Assuming the resource table fits in 1kB is fair. + * Notice for the detach, that this 1 kB memory area has to be reserved in the coprocessor + * firmware for the resource table. On detach, the remoteproc core re-initializes this + * entire area by overwriting it with the initial values stored in rproc->clean_table. + */ + *table_sz = RSC_TBL_SIZE; + return (struct resource_table *)ddata->rsc_va; +} + static const struct rproc_ops st_rproc_ops = { + .prepare = stm32_rproc_prepare, .start = stm32_rproc_start, .stop = stm32_rproc_stop, .attach = stm32_rproc_attach, + .detach = stm32_rproc_detach, .kick = stm32_rproc_kick, .load = rproc_elf_load_segments, .parse_fw = stm32_rproc_parse_fw, .find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table, + .get_loaded_rsc_table = stm32_rproc_get_loaded_rsc_table, .sanity_check = rproc_elf_sanity_check, .get_boot_addr = rproc_elf_get_boot_addr, }; @@ -695,75 +788,6 @@ static int stm32_rproc_get_m4_status(struct stm32_rproc *ddata, return regmap_read(ddata->m4_state.map, ddata->m4_state.reg, state); } -static int stm32_rproc_da_to_pa(struct platform_device *pdev, - struct stm32_rproc *ddata, - u64 da, phys_addr_t *pa) -{ - struct device *dev = &pdev->dev; - struct stm32_rproc_mem *p_mem; - unsigned int i; - - for (i = 0; i < ddata->nb_rmems; i++) { - p_mem = &ddata->rmems[i]; - - if (da < p_mem->dev_addr || - da >= p_mem->dev_addr + p_mem->size) - continue; - - *pa = da - p_mem->dev_addr + p_mem->bus_addr; - dev_dbg(dev, "da %llx to pa %#x\n", da, *pa); - - return 0; - } - - dev_err(dev, "can't translate da %llx\n", da); - - return -EINVAL; -} - -static int stm32_rproc_get_loaded_rsc_table(struct platform_device *pdev, - struct rproc *rproc, - struct stm32_rproc *ddata) -{ - struct device *dev = &pdev->dev; - phys_addr_t rsc_pa; - u32 rsc_da; - int err; - - err = regmap_read(ddata->rsctbl.map, ddata->rsctbl.reg, &rsc_da); - if (err) { - dev_err(dev, "failed to read rsc tbl addr\n"); - return err; - } - - if (!rsc_da) - /* no rsc table */ - return 0; - - err = stm32_rproc_da_to_pa(pdev, ddata, rsc_da, &rsc_pa); - if (err) - return err; - - ddata->rsc_va = devm_ioremap_wc(dev, rsc_pa, RSC_TBL_SIZE); - if (IS_ERR_OR_NULL(ddata->rsc_va)) { - dev_err(dev, "Unable to map memory region: %pa+%zx\n", - &rsc_pa, RSC_TBL_SIZE); - ddata->rsc_va = NULL; - return -ENOMEM; - } - - /* - * The resource table is already loaded in device memory, no need - * to work with a cached table. - */ - rproc->cached_table = NULL; - /* Assuming the resource table fits in 1kB is fair */ - rproc->table_sz = RSC_TBL_SIZE; - rproc->table_ptr = (struct resource_table *)ddata->rsc_va; - - return 0; -} - static int stm32_rproc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -797,18 +821,9 @@ static int stm32_rproc_probe(struct platform_device *pdev) if (ret) goto free_rproc; - if (state == M4_STATE_CRUN) { + if (state == M4_STATE_CRUN) rproc->state = RPROC_DETACHED; - ret = stm32_rproc_parse_memory_regions(rproc); - if (ret) - goto free_resources; - - ret = stm32_rproc_get_loaded_rsc_table(pdev, rproc, ddata); - if (ret) - goto free_resources; - } - rproc->has_iommu = false; ddata->workqueue = create_workqueue(dev_name(dev)); if (!ddata->workqueue) { diff --git a/drivers/remoteproc/ti_k3_dsp_remoteproc.c b/drivers/remoteproc/ti_k3_dsp_remoteproc.c index 863c0214e0a8..fd4eb67a6681 100644 --- a/drivers/remoteproc/ti_k3_dsp_remoteproc.c +++ b/drivers/remoteproc/ti_k3_dsp_remoteproc.c @@ -354,7 +354,7 @@ static int k3_dsp_rproc_stop(struct rproc *rproc) * can be used either by the remoteproc core for loading (when using kernel * remoteproc loader), or by any rpmsg bus drivers. */ -static void *k3_dsp_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len) +static void *k3_dsp_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem) { struct k3_dsp_rproc *kproc = rproc->priv; void __iomem *va = NULL; diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c index 62b5a4c29456..5cf8d030a1f0 100644 --- a/drivers/remoteproc/ti_k3_r5_remoteproc.c +++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c @@ -590,7 +590,7 @@ out: * present in a DSP or IPU device). The translated addresses can be used * either by the remoteproc core for loading, or by any rpmsg bus drivers. */ -static void *k3_r5_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len) +static void *k3_r5_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem) { struct k3_r5_rproc *kproc = rproc->priv; struct k3_r5_core *core = kproc->core; diff --git a/drivers/remoteproc/wkup_m3_rproc.c b/drivers/remoteproc/wkup_m3_rproc.c index 92d387dfc03b..484f7605823e 100644 --- a/drivers/remoteproc/wkup_m3_rproc.c +++ b/drivers/remoteproc/wkup_m3_rproc.c @@ -89,7 +89,7 @@ static int wkup_m3_rproc_stop(struct rproc *rproc) return error; } -static void *wkup_m3_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len) +static void *wkup_m3_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem) { struct wkup_m3_rproc *wkupm3 = rproc->priv; void *va = NULL; diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig index 7043c7f6dcf0..3e7f55e44d84 100644 --- a/drivers/reset/Kconfig +++ b/drivers/reset/Kconfig @@ -197,6 +197,7 @@ config RESET_SIMPLE - RCC reset controller in STM32 MCUs - Allwinner SoCs - ZTE's zx2967 family + - SiFive FU740 SoCs config RESET_STM32MP157 bool "STM32MP157 Reset Driver" if COMPILE_TEST diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c index 27a05167c18c..05533c71b10e 100644 --- a/drivers/rpmsg/qcom_glink_native.c +++ b/drivers/rpmsg/qcom_glink_native.c @@ -857,6 +857,7 @@ static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail) dev_err(glink->dev, "no intent found for channel %s intent %d", channel->name, liid); + ret = -ENOENT; goto advance_rx; } } @@ -1332,6 +1333,20 @@ static int qcom_glink_trysend(struct rpmsg_endpoint *ept, void *data, int len) return __qcom_glink_send(channel, data, len, false); } +static int qcom_glink_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst) +{ + struct glink_channel *channel = to_glink_channel(ept); + + return __qcom_glink_send(channel, data, len, true); +} + +static int qcom_glink_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst) +{ + struct glink_channel *channel = to_glink_channel(ept); + + return __qcom_glink_send(channel, data, len, false); +} + /* * Finds the device_node for the glink child interested in this channel. */ @@ -1364,7 +1379,9 @@ static const struct rpmsg_device_ops glink_device_ops = { static const struct rpmsg_endpoint_ops glink_endpoint_ops = { .destroy_ept = qcom_glink_destroy_ept, .send = qcom_glink_send, + .sendto = qcom_glink_sendto, .trysend = qcom_glink_trysend, + .trysendto = qcom_glink_trysendto, }; static void qcom_glink_rpdev_release(struct device *dev) diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c index 19903de6268d..8da1b5cb31b3 100644 --- a/drivers/rpmsg/qcom_smd.c +++ b/drivers/rpmsg/qcom_smd.c @@ -974,6 +974,20 @@ static int qcom_smd_trysend(struct rpmsg_endpoint *ept, void *data, int len) return __qcom_smd_send(qsept->qsch, data, len, false); } +static int qcom_smd_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst) +{ + struct qcom_smd_endpoint *qsept = to_smd_endpoint(ept); + + return __qcom_smd_send(qsept->qsch, data, len, true); +} + +static int qcom_smd_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst) +{ + struct qcom_smd_endpoint *qsept = to_smd_endpoint(ept); + + return __qcom_smd_send(qsept->qsch, data, len, false); +} + static __poll_t qcom_smd_poll(struct rpmsg_endpoint *ept, struct file *filp, poll_table *wait) { @@ -1038,7 +1052,9 @@ static const struct rpmsg_device_ops qcom_smd_device_ops = { static const struct rpmsg_endpoint_ops qcom_smd_endpoint_ops = { .destroy_ept = qcom_smd_destroy_ept, .send = qcom_smd_send, + .sendto = qcom_smd_sendto, .trysend = qcom_smd_trysend, + .trysendto = qcom_smd_trysendto, .poll = qcom_smd_poll, }; diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c index 4bbbacdbf3bb..2bebc9b2d163 100644 --- a/drivers/rpmsg/rpmsg_char.c +++ b/drivers/rpmsg/rpmsg_char.c @@ -127,6 +127,9 @@ static int rpmsg_eptdev_open(struct inode *inode, struct file *filp) struct rpmsg_device *rpdev = eptdev->rpdev; struct device *dev = &eptdev->dev; + if (eptdev->ept) + return -EBUSY; + get_device(dev); ept = rpmsg_create_ept(rpdev, rpmsg_ept_cb, eptdev, eptdev->chinfo); @@ -239,9 +242,9 @@ static ssize_t rpmsg_eptdev_write_iter(struct kiocb *iocb, } if (filp->f_flags & O_NONBLOCK) - ret = rpmsg_trysend(eptdev->ept, kbuf, len); + ret = rpmsg_trysendto(eptdev->ept, kbuf, len, eptdev->chinfo.dst); else - ret = rpmsg_send(eptdev->ept, kbuf, len); + ret = rpmsg_sendto(eptdev->ept, kbuf, len, eptdev->chinfo.dst); unlock_eptdev: mutex_unlock(&eptdev->ept_lock); @@ -543,7 +546,7 @@ static struct rpmsg_driver rpmsg_chrdev_driver = { }, }; -static int rpmsg_char_init(void) +static int rpmsg_chrdev_init(void) { int ret; @@ -569,7 +572,7 @@ static int rpmsg_char_init(void) return ret; } -postcore_initcall(rpmsg_char_init); +postcore_initcall(rpmsg_chrdev_init); static void rpmsg_chrdev_exit(void) { diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c index e87d4cf926eb..8e49a3bacfc7 100644 --- a/drivers/rpmsg/virtio_rpmsg_bus.c +++ b/drivers/rpmsg/virtio_rpmsg_bus.c @@ -813,14 +813,57 @@ static void rpmsg_xmit_done(struct virtqueue *svq) wake_up_interruptible(&vrp->sendq); } +/* + * Called to expose to user a /dev/rpmsg_ctrlX interface allowing to + * create endpoint-to-endpoint communication without associated RPMsg channel. + * The endpoints are rattached to the ctrldev RPMsg device. + */ +static struct rpmsg_device *rpmsg_virtio_add_ctrl_dev(struct virtio_device *vdev) +{ + struct virtproc_info *vrp = vdev->priv; + struct virtio_rpmsg_channel *vch; + struct rpmsg_device *rpdev_ctrl; + int err = 0; + + vch = kzalloc(sizeof(*vch), GFP_KERNEL); + if (!vch) + return ERR_PTR(-ENOMEM); + + /* Link the channel to the vrp */ + vch->vrp = vrp; + + /* Assign public information to the rpmsg_device */ + rpdev_ctrl = &vch->rpdev; + rpdev_ctrl->ops = &virtio_rpmsg_ops; + + rpdev_ctrl->dev.parent = &vrp->vdev->dev; + rpdev_ctrl->dev.release = virtio_rpmsg_release_device; + rpdev_ctrl->little_endian = virtio_is_little_endian(vrp->vdev); + + err = rpmsg_chrdev_register_device(rpdev_ctrl); + if (err) { + kfree(vch); + return ERR_PTR(err); + } + + return rpdev_ctrl; +} + +static void rpmsg_virtio_del_ctrl_dev(struct rpmsg_device *rpdev_ctrl) +{ + if (!rpdev_ctrl) + return; + kfree(to_virtio_rpmsg_channel(rpdev_ctrl)); +} + static int rpmsg_probe(struct virtio_device *vdev) { vq_callback_t *vq_cbs[] = { rpmsg_recv_done, rpmsg_xmit_done }; static const char * const names[] = { "input", "output" }; struct virtqueue *vqs[2]; struct virtproc_info *vrp; - struct virtio_rpmsg_channel *vch; - struct rpmsg_device *rpdev_ns; + struct virtio_rpmsg_channel *vch = NULL; + struct rpmsg_device *rpdev_ns, *rpdev_ctrl; void *bufs_va; int err = 0, i; size_t total_buf_space; @@ -894,12 +937,18 @@ static int rpmsg_probe(struct virtio_device *vdev) vdev->priv = vrp; + rpdev_ctrl = rpmsg_virtio_add_ctrl_dev(vdev); + if (IS_ERR(rpdev_ctrl)) { + err = PTR_ERR(rpdev_ctrl); + goto free_coherent; + } + /* if supported by the remote processor, enable the name service */ if (virtio_has_feature(vdev, VIRTIO_RPMSG_F_NS)) { vch = kzalloc(sizeof(*vch), GFP_KERNEL); if (!vch) { err = -ENOMEM; - goto free_coherent; + goto free_ctrldev; } /* Link the channel to our vrp */ @@ -915,7 +964,7 @@ static int rpmsg_probe(struct virtio_device *vdev) err = rpmsg_ns_register_device(rpdev_ns); if (err) - goto free_coherent; + goto free_vch; } /* @@ -939,8 +988,11 @@ static int rpmsg_probe(struct virtio_device *vdev) return 0; -free_coherent: +free_vch: kfree(vch); +free_ctrldev: + rpmsg_virtio_del_ctrl_dev(rpdev_ctrl); +free_coherent: dma_free_coherent(vdev->dev.parent, total_buf_space, bufs_va, vrp->bufs_dma); vqs_del: diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index dc36531d589e..222593bc2afe 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -1649,8 +1649,7 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[]) } /* Get the read only section offset */ - ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, - PCI_VPD_LRDT_RO_DATA); + ro_start = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA); if (unlikely(ro_start < 0)) { dev_err(dev, "%s: VPD Read-only data not found\n", __func__); rc = -ENODEV; diff --git a/drivers/thermal/amlogic_thermal.c b/drivers/thermal/amlogic_thermal.c index dffe3ba8c7c4..e61b91d14ad1 100644 --- a/drivers/thermal/amlogic_thermal.c +++ b/drivers/thermal/amlogic_thermal.c @@ -254,10 +254,8 @@ static int amlogic_thermal_probe(struct platform_device *pdev) platform_set_drvdata(pdev, pdata); base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(base)) { - dev_err(dev, "failed to get io address\n"); + if (IS_ERR(base)) return PTR_ERR(base); - } pdata->regmap = devm_regmap_init_mmio(dev, base, pdata->data->regmap_config); diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c index 3199977f1e73..c8e4344d5a3d 100644 --- a/drivers/thermal/broadcom/bcm2835_thermal.c +++ b/drivers/thermal/broadcom/bcm2835_thermal.c @@ -184,7 +184,6 @@ static int bcm2835_thermal_probe(struct platform_device *pdev) data->regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(data->regs)) { err = PTR_ERR(data->regs); - dev_err(&pdev->dev, "Could not get registers: %d\n", err); return err; } diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c index 10af3341e5ea..eeb4e4b76c0b 100644 --- a/drivers/thermal/cpufreq_cooling.c +++ b/drivers/thermal/cpufreq_cooling.c @@ -13,10 +13,10 @@ #include <linux/cpu.h> #include <linux/cpufreq.h> #include <linux/cpu_cooling.h> +#include <linux/device.h> #include <linux/energy_model.h> #include <linux/err.h> #include <linux/export.h> -#include <linux/idr.h> #include <linux/pm_opp.h> #include <linux/pm_qos.h> #include <linux/slab.h> @@ -50,8 +50,6 @@ struct time_in_idle { /** * struct cpufreq_cooling_device - data for cooling device with cpufreq - * @id: unique integer value corresponding to each cpufreq_cooling_device - * registered. * @last_load: load measured by the latest call to cpufreq_get_requested_power() * @cpufreq_state: integer value representing the current state of cpufreq * cooling devices. @@ -61,7 +59,6 @@ struct time_in_idle { * @cdev: thermal_cooling_device pointer to keep track of the * registered cooling device. * @policy: cpufreq policy. - * @node: list_head to link all cpufreq_cooling_device together. * @idle_time: idle time stats * @qos_req: PM QoS contraint to apply * @@ -69,23 +66,17 @@ struct time_in_idle { * cpufreq_cooling_device. */ struct cpufreq_cooling_device { - int id; u32 last_load; unsigned int cpufreq_state; unsigned int max_level; struct em_perf_domain *em; struct cpufreq_policy *policy; - struct list_head node; #ifndef CONFIG_SMP struct time_in_idle *idle_time; #endif struct freq_qos_request qos_req; }; -static DEFINE_IDA(cpufreq_ida); -static DEFINE_MUTEX(cooling_list_lock); -static LIST_HEAD(cpufreq_cdev_list); - #ifdef CONFIG_THERMAL_GOV_POWER_ALLOCATOR /** * get_level: Find the level for a particular frequency @@ -125,7 +116,7 @@ static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_cdev, { int i; - for (i = cpufreq_cdev->max_level; i >= 0; i--) { + for (i = cpufreq_cdev->max_level; i > 0; i--) { if (power >= cpufreq_cdev->em->table[i].power) break; } @@ -528,11 +519,11 @@ __cpufreq_cooling_register(struct device_node *np, { struct thermal_cooling_device *cdev; struct cpufreq_cooling_device *cpufreq_cdev; - char dev_name[THERMAL_NAME_LENGTH]; unsigned int i; struct device *dev; int ret; struct thermal_cooling_device_ops *cooling_ops; + char *name; dev = get_cpu_device(policy->cpu); if (unlikely(!dev)) { @@ -567,16 +558,6 @@ __cpufreq_cooling_register(struct device_node *np, /* max_level is an index, not a counter */ cpufreq_cdev->max_level = i - 1; - ret = ida_simple_get(&cpufreq_ida, 0, 0, GFP_KERNEL); - if (ret < 0) { - cdev = ERR_PTR(ret); - goto free_idle_time; - } - cpufreq_cdev->id = ret; - - snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d", - cpufreq_cdev->id); - cooling_ops = &cpufreq_cooling_ops; #ifdef CONFIG_THERMAL_GOV_POWER_ALLOCATOR @@ -591,7 +572,7 @@ __cpufreq_cooling_register(struct device_node *np, pr_err("%s: unsorted frequency tables are not supported\n", __func__); cdev = ERR_PTR(-EINVAL); - goto remove_ida; + goto free_idle_time; } ret = freq_qos_add_request(&policy->constraints, @@ -601,24 +582,25 @@ __cpufreq_cooling_register(struct device_node *np, pr_err("%s: Failed to add freq constraint (%d)\n", __func__, ret); cdev = ERR_PTR(ret); - goto remove_ida; + goto free_idle_time; } - cdev = thermal_of_cooling_device_register(np, dev_name, cpufreq_cdev, + cdev = ERR_PTR(-ENOMEM); + name = kasprintf(GFP_KERNEL, "cpufreq-%s", dev_name(dev)); + if (!name) + goto remove_qos_req; + + cdev = thermal_of_cooling_device_register(np, name, cpufreq_cdev, cooling_ops); + kfree(name); + if (IS_ERR(cdev)) goto remove_qos_req; - mutex_lock(&cooling_list_lock); - list_add(&cpufreq_cdev->node, &cpufreq_cdev_list); - mutex_unlock(&cooling_list_lock); - return cdev; remove_qos_req: freq_qos_remove_request(&cpufreq_cdev->qos_req); -remove_ida: - ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id); free_idle_time: free_idle_time(cpufreq_cdev); free_cdev: @@ -706,13 +688,8 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) cpufreq_cdev = cdev->devdata; - mutex_lock(&cooling_list_lock); - list_del(&cpufreq_cdev->node); - mutex_unlock(&cooling_list_lock); - thermal_cooling_device_unregister(cdev); freq_qos_remove_request(&cpufreq_cdev->qos_req); - ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id); free_idle_time(cpufreq_cdev); kfree(cpufreq_cdev); } diff --git a/drivers/thermal/cpuidle_cooling.c b/drivers/thermal/cpuidle_cooling.c index 7ecab4b16b29..4f41102e8b16 100644 --- a/drivers/thermal/cpuidle_cooling.c +++ b/drivers/thermal/cpuidle_cooling.c @@ -9,9 +9,9 @@ #include <linux/cpu_cooling.h> #include <linux/cpuidle.h> +#include <linux/device.h> #include <linux/err.h> #include <linux/idle_inject.h> -#include <linux/idr.h> #include <linux/of_device.h> #include <linux/slab.h> #include <linux/thermal.h> @@ -26,8 +26,6 @@ struct cpuidle_cooling_device { unsigned long state; }; -static DEFINE_IDA(cpuidle_ida); - /** * cpuidle_cooling_runtime - Running time computation * @idle_duration_us: CPU idle time to inject in microseconds @@ -174,10 +172,11 @@ static int __cpuidle_cooling_register(struct device_node *np, struct idle_inject_device *ii_dev; struct cpuidle_cooling_device *idle_cdev; struct thermal_cooling_device *cdev; + struct device *dev; unsigned int idle_duration_us = TICK_USEC; unsigned int latency_us = UINT_MAX; - char dev_name[THERMAL_NAME_LENGTH]; - int id, ret; + char *name; + int ret; idle_cdev = kzalloc(sizeof(*idle_cdev), GFP_KERNEL); if (!idle_cdev) { @@ -185,16 +184,10 @@ static int __cpuidle_cooling_register(struct device_node *np, goto out; } - id = ida_simple_get(&cpuidle_ida, 0, 0, GFP_KERNEL); - if (id < 0) { - ret = id; - goto out_kfree; - } - ii_dev = idle_inject_register(drv->cpumask); if (!ii_dev) { ret = -EINVAL; - goto out_id; + goto out_kfree; } of_property_read_u32(np, "duration-us", &idle_duration_us); @@ -205,24 +198,32 @@ static int __cpuidle_cooling_register(struct device_node *np, idle_cdev->ii_dev = ii_dev; - snprintf(dev_name, sizeof(dev_name), "thermal-idle-%d", id); + dev = get_cpu_device(cpumask_first(drv->cpumask)); - cdev = thermal_of_cooling_device_register(np, dev_name, idle_cdev, + name = kasprintf(GFP_KERNEL, "idle-%s", dev_name(dev)); + if (!name) { + ret = -ENOMEM; + goto out_unregister; + } + + cdev = thermal_of_cooling_device_register(np, name, idle_cdev, &cpuidle_cooling_ops); if (IS_ERR(cdev)) { ret = PTR_ERR(cdev); - goto out_unregister; + goto out_kfree_name; } pr_debug("%s: Idle injection set with idle duration=%u, latency=%u\n", - dev_name, idle_duration_us, latency_us); + name, idle_duration_us, latency_us); + + kfree(name); return 0; +out_kfree_name: + kfree(name); out_unregister: idle_inject_unregister(ii_dev); -out_id: - ida_simple_remove(&cpuidle_ida, id); out_kfree: kfree(idle_cdev); out: diff --git a/drivers/thermal/devfreq_cooling.c b/drivers/thermal/devfreq_cooling.c index fed3121ff2a1..3a788ac4f525 100644 --- a/drivers/thermal/devfreq_cooling.c +++ b/drivers/thermal/devfreq_cooling.c @@ -14,7 +14,6 @@ #include <linux/devfreq_cooling.h> #include <linux/energy_model.h> #include <linux/export.h> -#include <linux/idr.h> #include <linux/slab.h> #include <linux/pm_opp.h> #include <linux/pm_qos.h> @@ -25,11 +24,8 @@ #define HZ_PER_KHZ 1000 #define SCALE_ERROR_MITIGATION 100 -static DEFINE_IDA(devfreq_ida); - /** * struct devfreq_cooling_device - Devfreq cooling device - * @id: unique integer value corresponding to each * devfreq_cooling_device registered. * @cdev: Pointer to associated thermal cooling device. * @devfreq: Pointer to associated devfreq device. @@ -51,7 +47,6 @@ static DEFINE_IDA(devfreq_ida); * @em_pd: Energy Model for the associated Devfreq device */ struct devfreq_cooling_device { - int id; struct thermal_cooling_device *cdev; struct devfreq *devfreq; unsigned long cooling_state; @@ -363,7 +358,7 @@ of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df, struct thermal_cooling_device *cdev; struct device *dev = df->dev.parent; struct devfreq_cooling_device *dfc; - char dev_name[THERMAL_NAME_LENGTH]; + char *name; int err, num_opps; dfc = kzalloc(sizeof(*dfc), GFP_KERNEL); @@ -407,30 +402,27 @@ of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df, if (err < 0) goto free_table; - err = ida_simple_get(&devfreq_ida, 0, 0, GFP_KERNEL); - if (err < 0) + err = -ENOMEM; + name = kasprintf(GFP_KERNEL, "devfreq-%s", dev_name(dev)); + if (!name) goto remove_qos_req; - dfc->id = err; - - snprintf(dev_name, sizeof(dev_name), "thermal-devfreq-%d", dfc->id); - - cdev = thermal_of_cooling_device_register(np, dev_name, dfc, + cdev = thermal_of_cooling_device_register(np, name, dfc, &devfreq_cooling_ops); + kfree(name); + if (IS_ERR(cdev)) { err = PTR_ERR(cdev); dev_err(dev, "Failed to register devfreq cooling device (%d)\n", err); - goto release_ida; + goto remove_qos_req; } dfc->cdev = cdev; return cdev; -release_ida: - ida_simple_remove(&devfreq_ida, dfc->id); remove_qos_req: dev_pm_qos_remove_request(&dfc->req_max_freq); free_table: @@ -527,7 +519,6 @@ void devfreq_cooling_unregister(struct thermal_cooling_device *cdev) dev = dfc->devfreq->dev.parent; thermal_cooling_device_unregister(dfc->cdev); - ida_simple_remove(&devfreq_ida, dfc->id); dev_pm_qos_remove_request(&dfc->req_max_freq); em_dev_unregister_perf_domain(dev); diff --git a/drivers/thermal/gov_fair_share.c b/drivers/thermal/gov_fair_share.c index aaa07180ab48..1e5abf4822be 100644 --- a/drivers/thermal/gov_fair_share.c +++ b/drivers/thermal/gov_fair_share.c @@ -82,6 +82,8 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip) int total_instance = 0; int cur_trip_level = get_trip_level(tz); + mutex_lock(&tz->lock); + list_for_each_entry(instance, &tz->thermal_instances, tz_node) { if (instance->trip != trip) continue; @@ -105,11 +107,12 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip) instance->target = get_target_state(tz, cdev, percentage, cur_trip_level); - mutex_lock(&instance->cdev->lock); - instance->cdev->updated = false; - mutex_unlock(&instance->cdev->lock); - thermal_cdev_update(cdev); + mutex_lock(&cdev->lock); + __thermal_cdev_update(cdev); + mutex_unlock(&cdev->lock); } + + mutex_unlock(&tz->lock); return 0; } diff --git a/drivers/thermal/gov_power_allocator.c b/drivers/thermal/gov_power_allocator.c index 92acae53df49..13e375751d22 100644 --- a/drivers/thermal/gov_power_allocator.c +++ b/drivers/thermal/gov_power_allocator.c @@ -301,9 +301,8 @@ power_actor_set_power(struct thermal_cooling_device *cdev, instance->target = clamp_val(state, instance->lower, instance->upper); mutex_lock(&cdev->lock); - cdev->updated = false; + __thermal_cdev_update(cdev); mutex_unlock(&cdev->lock); - thermal_cdev_update(cdev); return 0; } @@ -374,9 +373,11 @@ static void divvy_up_power(u32 *req_power, u32 *max_power, int num_actors, */ extra_power = min(extra_power, capped_extra_power); if (capped_extra_power > 0) - for (i = 0; i < num_actors; i++) - granted_power[i] += (extra_actor_power[i] * - extra_power) / capped_extra_power; + for (i = 0; i < num_actors; i++) { + u64 extra_range = (u64)extra_actor_power[i] * extra_power; + granted_power[i] += DIV_ROUND_CLOSEST_ULL(extra_range, + capped_extra_power); + } } static int allocate_power(struct thermal_zone_device *tz, @@ -569,22 +570,33 @@ static void reset_pid_controller(struct power_allocator_params *params) params->prev_err = 0; } -static void allow_maximum_power(struct thermal_zone_device *tz) +static void allow_maximum_power(struct thermal_zone_device *tz, bool update) { struct thermal_instance *instance; struct power_allocator_params *params = tz->governor_data; + u32 req_power; mutex_lock(&tz->lock); list_for_each_entry(instance, &tz->thermal_instances, tz_node) { + struct thermal_cooling_device *cdev = instance->cdev; + if ((instance->trip != params->trip_max_desired_temperature) || (!cdev_is_power_actor(instance->cdev))) continue; instance->target = 0; mutex_lock(&instance->cdev->lock); - instance->cdev->updated = false; + /* + * Call for updating the cooling devices local stats and avoid + * periods of dozen of seconds when those have not been + * maintained. + */ + cdev->ops->get_requested_power(cdev, &req_power); + + if (update) + __thermal_cdev_update(instance->cdev); + mutex_unlock(&instance->cdev->lock); - thermal_cdev_update(instance->cdev); } mutex_unlock(&tz->lock); } @@ -698,6 +710,7 @@ static int power_allocator_throttle(struct thermal_zone_device *tz, int trip) int ret; int switch_on_temp, control_temp; struct power_allocator_params *params = tz->governor_data; + bool update; /* * We get called for every trip point but we only need to do @@ -709,9 +722,10 @@ static int power_allocator_throttle(struct thermal_zone_device *tz, int trip) ret = tz->ops->get_trip_temp(tz, params->trip_switch_on, &switch_on_temp); if (!ret && (tz->temperature < switch_on_temp)) { + update = (tz->last_temperature >= switch_on_temp); tz->passive = 0; reset_pid_controller(params); - allow_maximum_power(tz); + allow_maximum_power(tz, update); return 0; } diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c index ee05950afd2f..9a21ac0ceb11 100644 --- a/drivers/thermal/hisi_thermal.c +++ b/drivers/thermal/hisi_thermal.c @@ -1,7 +1,7 @@ /* - * Hisilicon thermal sensor driver + * HiSilicon thermal sensor driver * - * Copyright (c) 2014-2015 Hisilicon Limited. + * Copyright (c) 2014-2015 HiSilicon Limited. * Copyright (c) 2014-2015 Linaro Limited. * * Xinwei Kong <[email protected]> @@ -572,10 +572,8 @@ static int hisi_thermal_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); data->regs = devm_ioremap_resource(dev, res); - if (IS_ERR(data->regs)) { - dev_err(dev, "failed to get io address\n"); + if (IS_ERR(data->regs)) return PTR_ERR(data->regs); - } ret = data->ops->probe(data); if (ret) @@ -672,5 +670,5 @@ module_platform_driver(hisi_thermal_driver); MODULE_AUTHOR("Xinwei Kong <[email protected]>"); MODULE_AUTHOR("Leo Yan <[email protected]>"); -MODULE_DESCRIPTION("Hisilicon thermal driver"); +MODULE_DESCRIPTION("HiSilicon thermal driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/thermal/intel/Kconfig b/drivers/thermal/intel/Kconfig index ce4f59213c7a..e4299ca3423c 100644 --- a/drivers/thermal/intel/Kconfig +++ b/drivers/thermal/intel/Kconfig @@ -79,3 +79,14 @@ config INTEL_PCH_THERMAL Enable this to support thermal reporting on certain intel PCHs. Thermal reporting device will provide temperature reading, programmable trip points and other information. + +config INTEL_TCC_COOLING + tristate "Intel TCC offset cooling Driver" + depends on X86 + help + Enable this to support system cooling by adjusting the effective TCC + activation temperature via the TCC Offset register, which is widely + supported on modern Intel platforms. + Note that, on different platforms, the behavior might be different + on how fast the setting takes effect, and how much the CPU frequency + is reduced. diff --git a/drivers/thermal/intel/Makefile b/drivers/thermal/intel/Makefile index ff2ad30ef397..5ff2afa388f7 100644 --- a/drivers/thermal/intel/Makefile +++ b/drivers/thermal/intel/Makefile @@ -10,4 +10,5 @@ obj-$(CONFIG_INTEL_QUARK_DTS_THERMAL) += intel_quark_dts_thermal.o obj-$(CONFIG_INT340X_THERMAL) += int340x_thermal/ obj-$(CONFIG_INTEL_BXT_PMIC_THERMAL) += intel_bxt_pmic_thermal.o obj-$(CONFIG_INTEL_PCH_THERMAL) += intel_pch_thermal.o +obj-$(CONFIG_INTEL_TCC_COOLING) += intel_tcc_cooling.o obj-$(CONFIG_X86_THERMAL_VECTOR) += therm_throt.o diff --git a/drivers/thermal/intel/intel_tcc_cooling.c b/drivers/thermal/intel/intel_tcc_cooling.c new file mode 100644 index 000000000000..8ec10d55d421 --- /dev/null +++ b/drivers/thermal/intel/intel_tcc_cooling.c @@ -0,0 +1,129 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * cooling device driver that activates the processor throttling by + * programming the TCC Offset register. + * Copyright (c) 2021, Intel Corporation. + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/device.h> +#include <linux/module.h> +#include <linux/thermal.h> +#include <asm/cpu_device_id.h> + +#define TCC_SHIFT 24 +#define TCC_MASK (0x3fULL<<24) +#define TCC_PROGRAMMABLE BIT(30) + +static struct thermal_cooling_device *tcc_cdev; + +static int tcc_get_max_state(struct thermal_cooling_device *cdev, unsigned long + *state) +{ + *state = TCC_MASK >> TCC_SHIFT; + return 0; +} + +static int tcc_offset_update(int tcc) +{ + u64 val; + int err; + + err = rdmsrl_safe(MSR_IA32_TEMPERATURE_TARGET, &val); + if (err) + return err; + + val &= ~TCC_MASK; + val |= tcc << TCC_SHIFT; + + err = wrmsrl_safe(MSR_IA32_TEMPERATURE_TARGET, val); + if (err) + return err; + + return 0; +} + +static int tcc_get_cur_state(struct thermal_cooling_device *cdev, unsigned long + *state) +{ + u64 val; + int err; + + err = rdmsrl_safe(MSR_IA32_TEMPERATURE_TARGET, &val); + if (err) + return err; + + *state = (val & TCC_MASK) >> TCC_SHIFT; + return 0; +} + +static int tcc_set_cur_state(struct thermal_cooling_device *cdev, unsigned long + state) +{ + return tcc_offset_update(state); +} + +static const struct thermal_cooling_device_ops tcc_cooling_ops = { + .get_max_state = tcc_get_max_state, + .get_cur_state = tcc_get_cur_state, + .set_cur_state = tcc_set_cur_state, +}; + +static const struct x86_cpu_id tcc_ids[] __initconst = { + X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, NULL), + X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, NULL), + X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, NULL), + X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, NULL), + X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, NULL), + X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, NULL), + X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, NULL), + X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, NULL), + X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, NULL), + {} +}; + +MODULE_DEVICE_TABLE(x86cpu, tcc_ids); + +static int __init tcc_cooling_init(void) +{ + int ret; + u64 val; + const struct x86_cpu_id *id; + + int err; + + id = x86_match_cpu(tcc_ids); + if (!id) + return -ENODEV; + + err = rdmsrl_safe(MSR_PLATFORM_INFO, &val); + if (err) + return err; + + if (!(val & TCC_PROGRAMMABLE)) + return -ENODEV; + + pr_info("Programmable TCC Offset detected\n"); + + tcc_cdev = + thermal_cooling_device_register("TCC Offset", NULL, + &tcc_cooling_ops); + if (IS_ERR(tcc_cdev)) { + ret = PTR_ERR(tcc_cdev); + return ret; + } + return 0; +} + +module_init(tcc_cooling_init) + +static void __exit tcc_cooling_exit(void) +{ + thermal_cooling_device_unregister(tcc_cdev); +} + +module_exit(tcc_cooling_exit) + +MODULE_DESCRIPTION("TCC offset cooling device Driver"); +MODULE_AUTHOR("Zhang Rui <[email protected]>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c index 149c6d7fd5a0..97e8678ccf0e 100644 --- a/drivers/thermal/mtk_thermal.c +++ b/drivers/thermal/mtk_thermal.c @@ -573,12 +573,12 @@ static int raw_to_mcelsius_v1(struct mtk_thermal *mt, int sensno, s32 raw) static int raw_to_mcelsius_v2(struct mtk_thermal *mt, int sensno, s32 raw) { - s32 format_1 = 0; - s32 format_2 = 0; - s32 g_oe = 1; - s32 g_gain = 1; - s32 g_x_roomt = 0; - s32 tmp = 0; + s32 format_1; + s32 format_2; + s32 g_oe; + s32 g_gain; + s32 g_x_roomt; + s32 tmp; if (raw == 0) return 0; diff --git a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c index 6dc879fea9c8..7419e196dbb0 100644 --- a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c +++ b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c @@ -17,6 +17,7 @@ #include "../thermal_core.h" +#define QPNP_TM_REG_DIG_MAJOR 0x01 #define QPNP_TM_REG_TYPE 0x04 #define QPNP_TM_REG_SUBTYPE 0x05 #define QPNP_TM_REG_STATUS 0x08 @@ -38,26 +39,30 @@ #define ALARM_CTRL_FORCE_ENABLE BIT(7) -/* - * Trip point values based on threshold control - * 0 = {105 C, 125 C, 145 C} - * 1 = {110 C, 130 C, 150 C} - * 2 = {115 C, 135 C, 155 C} - * 3 = {120 C, 140 C, 160 C} -*/ -#define TEMP_STAGE_STEP 20000 /* Stage step: 20.000 C */ -#define TEMP_STAGE_HYSTERESIS 2000 +#define THRESH_COUNT 4 +#define STAGE_COUNT 3 + +/* Over-temperature trip point values in mC */ +static const long temp_map_gen1[THRESH_COUNT][STAGE_COUNT] = { + { 105000, 125000, 145000 }, + { 110000, 130000, 150000 }, + { 115000, 135000, 155000 }, + { 120000, 140000, 160000 }, +}; + +static const long temp_map_gen2_v1[THRESH_COUNT][STAGE_COUNT] = { + { 90000, 110000, 140000 }, + { 95000, 115000, 145000 }, + { 100000, 120000, 150000 }, + { 105000, 125000, 155000 }, +}; -#define TEMP_THRESH_MIN 105000 /* Threshold Min: 105 C */ -#define TEMP_THRESH_STEP 5000 /* Threshold step: 5 C */ +#define TEMP_THRESH_STEP 5000 /* Threshold step: 5 C */ #define THRESH_MIN 0 #define THRESH_MAX 3 -/* Stage 2 Threshold Min: 125 C */ -#define STAGE2_THRESHOLD_MIN 125000 -/* Stage 2 Threshold Max: 140 C */ -#define STAGE2_THRESHOLD_MAX 140000 +#define TEMP_STAGE_HYSTERESIS 2000 /* Temperature in Milli Celsius reported during stage 0 if no ADC is present */ #define DEFAULT_TEMP 37000 @@ -77,6 +82,7 @@ struct qpnp_tm_chip { bool initialized; struct iio_channel *adc; + const long (*temp_map)[THRESH_COUNT][STAGE_COUNT]; }; /* This array maps from GEN2 alarm state to GEN1 alarm stage */ @@ -101,6 +107,23 @@ static int qpnp_tm_write(struct qpnp_tm_chip *chip, u16 addr, u8 data) } /** + * qpnp_tm_decode_temp() - return temperature in mC corresponding to the + * specified over-temperature stage + * @chip: Pointer to the qpnp_tm chip + * @stage: Over-temperature stage + * + * Return: temperature in mC + */ +static long qpnp_tm_decode_temp(struct qpnp_tm_chip *chip, unsigned int stage) +{ + if (!chip->temp_map || chip->thresh >= THRESH_COUNT || stage == 0 || + stage > STAGE_COUNT) + return 0; + + return (*chip->temp_map)[chip->thresh][stage - 1]; +} + +/** * qpnp_tm_get_temp_stage() - return over-temperature stage * @chip: Pointer to the qpnp_tm chip * @@ -149,14 +172,12 @@ static int qpnp_tm_update_temp_no_adc(struct qpnp_tm_chip *chip) if (stage_new > stage_old) { /* increasing stage, use lower bound */ - chip->temp = (stage_new - 1) * TEMP_STAGE_STEP + - chip->thresh * TEMP_THRESH_STEP + - TEMP_STAGE_HYSTERESIS + TEMP_THRESH_MIN; + chip->temp = qpnp_tm_decode_temp(chip, stage_new) + + TEMP_STAGE_HYSTERESIS; } else if (stage_new < stage_old) { /* decreasing stage, use upper bound */ - chip->temp = stage_new * TEMP_STAGE_STEP + - chip->thresh * TEMP_THRESH_STEP - - TEMP_STAGE_HYSTERESIS + TEMP_THRESH_MIN; + chip->temp = qpnp_tm_decode_temp(chip, stage_new + 1) + - TEMP_STAGE_HYSTERESIS; } chip->stage = stage; @@ -199,26 +220,28 @@ static int qpnp_tm_get_temp(void *data, int *temp) static int qpnp_tm_update_critical_trip_temp(struct qpnp_tm_chip *chip, int temp) { - u8 reg; + long stage2_threshold_min = (*chip->temp_map)[THRESH_MIN][1]; + long stage2_threshold_max = (*chip->temp_map)[THRESH_MAX][1]; bool disable_s2_shutdown = false; + u8 reg; WARN_ON(!mutex_is_locked(&chip->lock)); /* * Default: S2 and S3 shutdown enabled, thresholds at - * 105C/125C/145C, monitoring at 25Hz + * lowest threshold set, monitoring at 25Hz */ reg = SHUTDOWN_CTRL1_RATE_25HZ; if (temp == THERMAL_TEMP_INVALID || - temp < STAGE2_THRESHOLD_MIN) { + temp < stage2_threshold_min) { chip->thresh = THRESH_MIN; goto skip; } - if (temp <= STAGE2_THRESHOLD_MAX) { + if (temp <= stage2_threshold_max) { chip->thresh = THRESH_MAX - - ((STAGE2_THRESHOLD_MAX - temp) / + ((stage2_threshold_max - temp) / TEMP_THRESH_STEP); disable_s2_shutdown = true; } else { @@ -326,9 +349,7 @@ static int qpnp_tm_init(struct qpnp_tm_chip *chip) ? chip->stage : alarm_state_map[chip->stage]; if (stage) - chip->temp = chip->thresh * TEMP_THRESH_STEP + - (stage - 1) * TEMP_STAGE_STEP + - TEMP_THRESH_MIN; + chip->temp = qpnp_tm_decode_temp(chip, stage); crit_temp = qpnp_tm_get_critical_trip_temp(chip); ret = qpnp_tm_update_critical_trip_temp(chip, crit_temp); @@ -350,7 +371,7 @@ static int qpnp_tm_probe(struct platform_device *pdev) { struct qpnp_tm_chip *chip; struct device_node *node; - u8 type, subtype; + u8 type, subtype, dig_major; u32 res; int ret, irq; @@ -400,6 +421,12 @@ static int qpnp_tm_probe(struct platform_device *pdev) return ret; } + ret = qpnp_tm_read(chip, QPNP_TM_REG_DIG_MAJOR, &dig_major); + if (ret < 0) { + dev_err(&pdev->dev, "could not read dig_major\n"); + return ret; + } + if (type != QPNP_TM_TYPE || (subtype != QPNP_TM_SUBTYPE_GEN1 && subtype != QPNP_TM_SUBTYPE_GEN2)) { dev_err(&pdev->dev, "invalid type 0x%02x or subtype 0x%02x\n", @@ -408,6 +435,10 @@ static int qpnp_tm_probe(struct platform_device *pdev) } chip->subtype = subtype; + if (subtype == QPNP_TM_SUBTYPE_GEN2 && dig_major >= 1) + chip->temp_map = &temp_map_gen2_v1; + else + chip->temp_map = &temp_map_gen1; /* * Register the sensor before initializing the hardware to be able to diff --git a/drivers/thermal/qcom/tsens-8960.c b/drivers/thermal/qcom/tsens-8960.c index 2a28a5af209e..67c1748cdf73 100644 --- a/drivers/thermal/qcom/tsens-8960.c +++ b/drivers/thermal/qcom/tsens-8960.c @@ -10,8 +10,6 @@ #include <linux/thermal.h> #include "tsens.h" -#define CAL_MDEGC 30000 - #define CONFIG_ADDR 0x3640 #define CONFIG_ADDR_8660 0x3620 /* CONFIG_ADDR bitmasks */ @@ -21,40 +19,38 @@ #define CONFIG_SHIFT_8660 28 #define CONFIG_MASK_8660 (3 << CONFIG_SHIFT_8660) -#define STATUS_CNTL_ADDR_8064 0x3660 #define CNTL_ADDR 0x3620 /* CNTL_ADDR bitmasks */ #define EN BIT(0) #define SW_RST BIT(1) -#define SENSOR0_EN BIT(3) + +#define MEASURE_PERIOD BIT(18) #define SLP_CLK_ENA BIT(26) #define SLP_CLK_ENA_8660 BIT(24) -#define MEASURE_PERIOD 1 #define SENSOR0_SHIFT 3 -/* INT_STATUS_ADDR bitmasks */ -#define MIN_STATUS_MASK BIT(0) -#define LOWER_STATUS_CLR BIT(1) -#define UPPER_STATUS_CLR BIT(2) -#define MAX_STATUS_MASK BIT(3) - #define THRESHOLD_ADDR 0x3624 -/* THRESHOLD_ADDR bitmasks */ -#define THRESHOLD_MAX_LIMIT_SHIFT 24 -#define THRESHOLD_MIN_LIMIT_SHIFT 16 -#define THRESHOLD_UPPER_LIMIT_SHIFT 8 -#define THRESHOLD_LOWER_LIMIT_SHIFT 0 - -/* Initial temperature threshold values */ -#define LOWER_LIMIT_TH 0x50 -#define UPPER_LIMIT_TH 0xdf -#define MIN_LIMIT_TH 0x0 -#define MAX_LIMIT_TH 0xff - -#define S0_STATUS_ADDR 0x3628 + #define INT_STATUS_ADDR 0x363c -#define TRDY_MASK BIT(7) -#define TIMEOUT_US 100 + +#define S0_STATUS_OFF 0x3628 +#define S1_STATUS_OFF 0x362c +#define S2_STATUS_OFF 0x3630 +#define S3_STATUS_OFF 0x3634 +#define S4_STATUS_OFF 0x3638 +#define S5_STATUS_OFF 0x3664 /* Sensors 5-10 found on apq8064/msm8960 */ +#define S6_STATUS_OFF 0x3668 +#define S7_STATUS_OFF 0x366c +#define S8_STATUS_OFF 0x3670 +#define S9_STATUS_OFF 0x3674 +#define S10_STATUS_OFF 0x3678 + +/* Original slope - 350 to compensate mC to C inaccuracy */ +static u32 tsens_msm8960_slope[] = { + 826, 826, 804, 826, + 761, 782, 782, 849, + 782, 849, 782 + }; static int suspend_8960(struct tsens_priv *priv) { @@ -115,17 +111,34 @@ static int resume_8960(struct tsens_priv *priv) static int enable_8960(struct tsens_priv *priv, int id) { int ret; - u32 reg, mask; + u32 reg, mask = BIT(id); ret = regmap_read(priv->tm_map, CNTL_ADDR, ®); if (ret) return ret; - mask = BIT(id + SENSOR0_SHIFT); + /* HARDWARE BUG: + * On platforms with more than 6 sensors, all remaining sensors + * must be enabled together, otherwise undefined results are expected. + * (Sensor 6-7 disabled, Sensor 3 disabled...) In the original driver, + * all the sensors are enabled in one step hence this bug is not + * triggered. + */ + if (id > 5) + mask = GENMASK(10, 6); + + mask <<= SENSOR0_SHIFT; + + /* Sensors already enabled. Skip. */ + if ((reg & mask) == mask) + return 0; + ret = regmap_write(priv->tm_map, CNTL_ADDR, reg | SW_RST); if (ret) return ret; + reg |= MEASURE_PERIOD; + if (priv->num_sensors > 1) reg |= mask | SLP_CLK_ENA | EN; else @@ -162,63 +175,11 @@ static void disable_8960(struct tsens_priv *priv) regmap_write(priv->tm_map, CNTL_ADDR, reg_cntl); } -static int init_8960(struct tsens_priv *priv) -{ - int ret, i; - u32 reg_cntl; - - priv->tm_map = dev_get_regmap(priv->dev, NULL); - if (!priv->tm_map) - return -ENODEV; - - /* - * The status registers for each sensor are discontiguous - * because some SoCs have 5 sensors while others have more - * but the control registers stay in the same place, i.e - * directly after the first 5 status registers. - */ - for (i = 0; i < priv->num_sensors; i++) { - if (i >= 5) - priv->sensor[i].status = S0_STATUS_ADDR + 40; - priv->sensor[i].status += i * 4; - } - - reg_cntl = SW_RST; - ret = regmap_update_bits(priv->tm_map, CNTL_ADDR, SW_RST, reg_cntl); - if (ret) - return ret; - - if (priv->num_sensors > 1) { - reg_cntl |= SLP_CLK_ENA | (MEASURE_PERIOD << 18); - reg_cntl &= ~SW_RST; - ret = regmap_update_bits(priv->tm_map, CONFIG_ADDR, - CONFIG_MASK, CONFIG); - } else { - reg_cntl |= SLP_CLK_ENA_8660 | (MEASURE_PERIOD << 16); - reg_cntl &= ~CONFIG_MASK_8660; - reg_cntl |= CONFIG_8660 << CONFIG_SHIFT_8660; - } - - reg_cntl |= GENMASK(priv->num_sensors - 1, 0) << SENSOR0_SHIFT; - ret = regmap_write(priv->tm_map, CNTL_ADDR, reg_cntl); - if (ret) - return ret; - - reg_cntl |= EN; - ret = regmap_write(priv->tm_map, CNTL_ADDR, reg_cntl); - if (ret) - return ret; - - return 0; -} - static int calibrate_8960(struct tsens_priv *priv) { int i; char *data; - - ssize_t num_read = priv->num_sensors; - struct tsens_sensor *s = priv->sensor; + u32 p1[11]; data = qfprom_read(priv->dev, "calib"); if (IS_ERR(data)) @@ -226,60 +187,96 @@ static int calibrate_8960(struct tsens_priv *priv) if (IS_ERR(data)) return PTR_ERR(data); - for (i = 0; i < num_read; i++, s++) - s->offset = data[i]; + for (i = 0; i < priv->num_sensors; i++) { + p1[i] = data[i]; + priv->sensor[i].slope = tsens_msm8960_slope[i]; + } + + compute_intercept_slope(priv, p1, NULL, ONE_PT_CALIB); kfree(data); return 0; } -/* Temperature on y axis and ADC-code on x-axis */ -static inline int code_to_mdegC(u32 adc_code, const struct tsens_sensor *s) -{ - int slope, offset; - - slope = thermal_zone_get_slope(s->tzd); - offset = CAL_MDEGC - slope * s->offset; - - return adc_code * slope + offset; -} - -static int get_temp_8960(const struct tsens_sensor *s, int *temp) -{ - int ret; - u32 code, trdy; - struct tsens_priv *priv = s->priv; - unsigned long timeout; - - timeout = jiffies + usecs_to_jiffies(TIMEOUT_US); - do { - ret = regmap_read(priv->tm_map, INT_STATUS_ADDR, &trdy); - if (ret) - return ret; - if (!(trdy & TRDY_MASK)) - continue; - ret = regmap_read(priv->tm_map, s->status, &code); - if (ret) - return ret; - *temp = code_to_mdegC(code, s); - return 0; - } while (time_before(jiffies, timeout)); - - return -ETIMEDOUT; -} +static const struct reg_field tsens_8960_regfields[MAX_REGFIELDS] = { + /* ----- SROT ------ */ + /* No VERSION information */ + + /* CNTL */ + [TSENS_EN] = REG_FIELD(CNTL_ADDR, 0, 0), + [TSENS_SW_RST] = REG_FIELD(CNTL_ADDR, 1, 1), + /* 8960 has 5 sensors, 8660 has 11, we only handle 5 */ + [SENSOR_EN] = REG_FIELD(CNTL_ADDR, 3, 7), + + /* ----- TM ------ */ + /* INTERRUPT ENABLE */ + /* NO INTERRUPT ENABLE */ + + /* Single UPPER/LOWER TEMPERATURE THRESHOLD for all sensors */ + [LOW_THRESH_0] = REG_FIELD(THRESHOLD_ADDR, 0, 7), + [UP_THRESH_0] = REG_FIELD(THRESHOLD_ADDR, 8, 15), + /* MIN_THRESH_0 and MAX_THRESH_0 are not present in the regfield + * Recycle CRIT_THRESH_0 and 1 to set the required regs to hardcoded temp + * MIN_THRESH_0 -> CRIT_THRESH_1 + * MAX_THRESH_0 -> CRIT_THRESH_0 + */ + [CRIT_THRESH_1] = REG_FIELD(THRESHOLD_ADDR, 16, 23), + [CRIT_THRESH_0] = REG_FIELD(THRESHOLD_ADDR, 24, 31), + + /* UPPER/LOWER INTERRUPT [CLEAR/STATUS] */ + /* 1 == clear, 0 == normal operation */ + [LOW_INT_CLEAR_0] = REG_FIELD(CNTL_ADDR, 9, 9), + [UP_INT_CLEAR_0] = REG_FIELD(CNTL_ADDR, 10, 10), + + /* NO CRITICAL INTERRUPT SUPPORT on 8960 */ + + /* Sn_STATUS */ + [LAST_TEMP_0] = REG_FIELD(S0_STATUS_OFF, 0, 7), + [LAST_TEMP_1] = REG_FIELD(S1_STATUS_OFF, 0, 7), + [LAST_TEMP_2] = REG_FIELD(S2_STATUS_OFF, 0, 7), + [LAST_TEMP_3] = REG_FIELD(S3_STATUS_OFF, 0, 7), + [LAST_TEMP_4] = REG_FIELD(S4_STATUS_OFF, 0, 7), + [LAST_TEMP_5] = REG_FIELD(S5_STATUS_OFF, 0, 7), + [LAST_TEMP_6] = REG_FIELD(S6_STATUS_OFF, 0, 7), + [LAST_TEMP_7] = REG_FIELD(S7_STATUS_OFF, 0, 7), + [LAST_TEMP_8] = REG_FIELD(S8_STATUS_OFF, 0, 7), + [LAST_TEMP_9] = REG_FIELD(S9_STATUS_OFF, 0, 7), + [LAST_TEMP_10] = REG_FIELD(S10_STATUS_OFF, 0, 7), + + /* No VALID field on 8960 */ + /* TSENS_INT_STATUS bits: 1 == threshold violated */ + [MIN_STATUS_0] = REG_FIELD(INT_STATUS_ADDR, 0, 0), + [LOWER_STATUS_0] = REG_FIELD(INT_STATUS_ADDR, 1, 1), + [UPPER_STATUS_0] = REG_FIELD(INT_STATUS_ADDR, 2, 2), + /* No CRITICAL field on 8960 */ + [MAX_STATUS_0] = REG_FIELD(INT_STATUS_ADDR, 3, 3), + + /* TRDY: 1=ready, 0=in progress */ + [TRDY] = REG_FIELD(INT_STATUS_ADDR, 7, 7), +}; static const struct tsens_ops ops_8960 = { - .init = init_8960, + .init = init_common, .calibrate = calibrate_8960, - .get_temp = get_temp_8960, + .get_temp = get_temp_common, .enable = enable_8960, .disable = disable_8960, .suspend = suspend_8960, .resume = resume_8960, }; +static struct tsens_features tsens_8960_feat = { + .ver_major = VER_0, + .crit_int = 0, + .adc = 1, + .srot_split = 0, + .max_sensors = 11, +}; + struct tsens_plat_data data_8960 = { .num_sensors = 11, .ops = &ops_8960, + .feat = &tsens_8960_feat, + .fields = tsens_8960_regfields, }; diff --git a/drivers/thermal/qcom/tsens-v0_1.c b/drivers/thermal/qcom/tsens-v0_1.c index 4ffa2e2c0145..f136cb350238 100644 --- a/drivers/thermal/qcom/tsens-v0_1.c +++ b/drivers/thermal/qcom/tsens-v0_1.c @@ -190,6 +190,39 @@ #define BIT_APPEND 0x3 +/* eeprom layout data for mdm9607 */ +#define MDM9607_BASE0_MASK 0x000000ff +#define MDM9607_BASE1_MASK 0x000ff000 +#define MDM9607_BASE0_SHIFT 0 +#define MDM9607_BASE1_SHIFT 12 + +#define MDM9607_S0_P1_MASK 0x00003f00 +#define MDM9607_S1_P1_MASK 0x03f00000 +#define MDM9607_S2_P1_MASK 0x0000003f +#define MDM9607_S3_P1_MASK 0x0003f000 +#define MDM9607_S4_P1_MASK 0x0000003f + +#define MDM9607_S0_P2_MASK 0x000fc000 +#define MDM9607_S1_P2_MASK 0xfc000000 +#define MDM9607_S2_P2_MASK 0x00000fc0 +#define MDM9607_S3_P2_MASK 0x00fc0000 +#define MDM9607_S4_P2_MASK 0x00000fc0 + +#define MDM9607_S0_P1_SHIFT 8 +#define MDM9607_S1_P1_SHIFT 20 +#define MDM9607_S2_P1_SHIFT 0 +#define MDM9607_S3_P1_SHIFT 12 +#define MDM9607_S4_P1_SHIFT 0 + +#define MDM9607_S0_P2_SHIFT 14 +#define MDM9607_S1_P2_SHIFT 26 +#define MDM9607_S2_P2_SHIFT 6 +#define MDM9607_S3_P2_SHIFT 18 +#define MDM9607_S4_P2_SHIFT 6 + +#define MDM9607_CAL_SEL_MASK 0x00700000 +#define MDM9607_CAL_SEL_SHIFT 20 + static int calibrate_8916(struct tsens_priv *priv) { int base0 = 0, base1 = 0, i; @@ -452,7 +485,56 @@ static int calibrate_8974(struct tsens_priv *priv) return 0; } -/* v0.1: 8916, 8939, 8974 */ +static int calibrate_9607(struct tsens_priv *priv) +{ + int base, i; + u32 p1[5], p2[5]; + int mode = 0; + u32 *qfprom_cdata; + + qfprom_cdata = (u32 *)qfprom_read(priv->dev, "calib"); + if (IS_ERR(qfprom_cdata)) + return PTR_ERR(qfprom_cdata); + + mode = (qfprom_cdata[2] & MDM9607_CAL_SEL_MASK) >> MDM9607_CAL_SEL_SHIFT; + dev_dbg(priv->dev, "calibration mode is %d\n", mode); + + switch (mode) { + case TWO_PT_CALIB: + base = (qfprom_cdata[2] & MDM9607_BASE1_MASK) >> MDM9607_BASE1_SHIFT; + p2[0] = (qfprom_cdata[0] & MDM9607_S0_P2_MASK) >> MDM9607_S0_P2_SHIFT; + p2[1] = (qfprom_cdata[0] & MDM9607_S1_P2_MASK) >> MDM9607_S1_P2_SHIFT; + p2[2] = (qfprom_cdata[1] & MDM9607_S2_P2_MASK) >> MDM9607_S2_P2_SHIFT; + p2[3] = (qfprom_cdata[1] & MDM9607_S3_P2_MASK) >> MDM9607_S3_P2_SHIFT; + p2[4] = (qfprom_cdata[2] & MDM9607_S4_P2_MASK) >> MDM9607_S4_P2_SHIFT; + for (i = 0; i < priv->num_sensors; i++) + p2[i] = ((base + p2[i]) << 2); + fallthrough; + case ONE_PT_CALIB2: + base = (qfprom_cdata[0] & MDM9607_BASE0_MASK); + p1[0] = (qfprom_cdata[0] & MDM9607_S0_P1_MASK) >> MDM9607_S0_P1_SHIFT; + p1[1] = (qfprom_cdata[0] & MDM9607_S1_P1_MASK) >> MDM9607_S1_P1_SHIFT; + p1[2] = (qfprom_cdata[1] & MDM9607_S2_P1_MASK) >> MDM9607_S2_P1_SHIFT; + p1[3] = (qfprom_cdata[1] & MDM9607_S3_P1_MASK) >> MDM9607_S3_P1_SHIFT; + p1[4] = (qfprom_cdata[2] & MDM9607_S4_P1_MASK) >> MDM9607_S4_P1_SHIFT; + for (i = 0; i < priv->num_sensors; i++) + p1[i] = ((base + p1[i]) << 2); + break; + default: + for (i = 0; i < priv->num_sensors; i++) { + p1[i] = 500; + p2[i] = 780; + } + break; + } + + compute_intercept_slope(priv, p1, p2, mode); + kfree(qfprom_cdata); + + return 0; +} + +/* v0.1: 8916, 8939, 8974, 9607 */ static struct tsens_features tsens_v0_1_feat = { .ver_major = VER_0_1, @@ -540,3 +622,17 @@ struct tsens_plat_data data_8974 = { .feat = &tsens_v0_1_feat, .fields = tsens_v0_1_regfields, }; + +static const struct tsens_ops ops_9607 = { + .init = init_common, + .calibrate = calibrate_9607, + .get_temp = get_temp_common, +}; + +struct tsens_plat_data data_9607 = { + .num_sensors = 5, + .ops = &ops_9607, + .hw_ids = (unsigned int []){ 0, 1, 2, 3, 4 }, + .feat = &tsens_v0_1_feat, + .fields = tsens_v0_1_regfields, +}; diff --git a/drivers/thermal/qcom/tsens-v1.c b/drivers/thermal/qcom/tsens-v1.c index 3c19a3800c6d..573e261ccca7 100644 --- a/drivers/thermal/qcom/tsens-v1.c +++ b/drivers/thermal/qcom/tsens-v1.c @@ -380,11 +380,11 @@ static const struct tsens_ops ops_8976 = { .get_temp = get_temp_tsens_valid, }; -/* Valid for both MSM8956 and MSM8976. Sensor ID 3 is unused. */ +/* Valid for both MSM8956 and MSM8976. */ struct tsens_plat_data data_8976 = { .num_sensors = 11, .ops = &ops_8976, - .hw_ids = (unsigned int[]){0, 1, 2, 4, 5, 6, 7, 8, 9, 10}, + .hw_ids = (unsigned int[]){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, .feat = &tsens_v1_feat, .fields = tsens_v1_regfields, }; diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c index d8ce3a687b80..4c7ebd1d3f9c 100644 --- a/drivers/thermal/qcom/tsens.c +++ b/drivers/thermal/qcom/tsens.c @@ -12,6 +12,7 @@ #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_platform.h> +#include <linux/mfd/syscon.h> #include <linux/platform_device.h> #include <linux/pm.h> #include <linux/regmap.h> @@ -85,7 +86,8 @@ void compute_intercept_slope(struct tsens_priv *priv, u32 *p1, "%s: sensor%d - data_point1:%#x data_point2:%#x\n", __func__, i, p1[i], p2[i]); - priv->sensor[i].slope = SLOPE_DEFAULT; + if (!priv->sensor[i].slope) + priv->sensor[i].slope = SLOPE_DEFAULT; if (mode == TWO_PT_CALIB) { /* * slope (m) = adc_code2 - adc_code1 (y2 - y1)/ @@ -515,6 +517,15 @@ static irqreturn_t tsens_irq_thread(int irq, void *data) dev_dbg(priv->dev, "[%u] %s: no violation: %d\n", hw_id, __func__, temp); } + + if (tsens_version(priv) < VER_0_1) { + /* Constraint: There is only 1 interrupt control register for all + * 11 temperature sensor. So monitoring more than 1 sensor based + * on interrupts will yield inconsistent result. To overcome this + * issue we will monitor only sensor 0 which is the master sensor. + */ + break; + } } return IRQ_HANDLED; @@ -530,6 +541,13 @@ static int tsens_set_trips(void *_sensor, int low, int high) int high_val, low_val, cl_high, cl_low; u32 hw_id = s->hw_id; + if (tsens_version(priv) < VER_0_1) { + /* Pre v0.1 IP had a single register for each type of interrupt + * and thresholds + */ + hw_id = 0; + } + dev_dbg(dev, "[%u] %s: proposed thresholds: (%d:%d)\n", hw_id, __func__, low, high); @@ -584,18 +602,21 @@ int get_temp_tsens_valid(const struct tsens_sensor *s, int *temp) u32 valid; int ret; - ret = regmap_field_read(priv->rf[valid_idx], &valid); - if (ret) - return ret; - while (!valid) { - /* Valid bit is 0 for 6 AHB clock cycles. - * At 19.2MHz, 1 AHB clock is ~60ns. - * We should enter this loop very, very rarely. - */ - ndelay(400); + /* VER_0 doesn't have VALID bit */ + if (tsens_version(priv) >= VER_0_1) { ret = regmap_field_read(priv->rf[valid_idx], &valid); if (ret) return ret; + while (!valid) { + /* Valid bit is 0 for 6 AHB clock cycles. + * At 19.2MHz, 1 AHB clock is ~60ns. + * We should enter this loop very, very rarely. + */ + ndelay(400); + ret = regmap_field_read(priv->rf[valid_idx], &valid); + if (ret) + return ret; + } } /* Valid bit is set, OK to read the temperature */ @@ -608,15 +629,29 @@ int get_temp_common(const struct tsens_sensor *s, int *temp) { struct tsens_priv *priv = s->priv; int hw_id = s->hw_id; - int last_temp = 0, ret; + int last_temp = 0, ret, trdy; + unsigned long timeout; - ret = regmap_field_read(priv->rf[LAST_TEMP_0 + hw_id], &last_temp); - if (ret) - return ret; + timeout = jiffies + usecs_to_jiffies(TIMEOUT_US); + do { + if (tsens_version(priv) == VER_0) { + ret = regmap_field_read(priv->rf[TRDY], &trdy); + if (ret) + return ret; + if (!trdy) + continue; + } - *temp = code_to_degc(last_temp, s) * 1000; + ret = regmap_field_read(priv->rf[LAST_TEMP_0 + hw_id], &last_temp); + if (ret) + return ret; - return 0; + *temp = code_to_degc(last_temp, s) * 1000; + + return 0; + } while (time_before(jiffies, timeout)); + + return -ETIMEDOUT; } #ifdef CONFIG_DEBUG_FS @@ -738,25 +773,42 @@ int __init init_common(struct tsens_priv *priv) priv->tm_offset = 0x1000; } - res = platform_get_resource(op, IORESOURCE_MEM, 0); - tm_base = devm_ioremap_resource(dev, res); - if (IS_ERR(tm_base)) { - ret = PTR_ERR(tm_base); - goto err_put_device; + if (tsens_version(priv) >= VER_0_1) { + res = platform_get_resource(op, IORESOURCE_MEM, 0); + tm_base = devm_ioremap_resource(dev, res); + if (IS_ERR(tm_base)) { + ret = PTR_ERR(tm_base); + goto err_put_device; + } + + priv->tm_map = devm_regmap_init_mmio(dev, tm_base, &tsens_config); + } else { /* VER_0 share the same gcc regs using a syscon */ + struct device *parent = priv->dev->parent; + + if (parent) + priv->tm_map = syscon_node_to_regmap(parent->of_node); } - priv->tm_map = devm_regmap_init_mmio(dev, tm_base, &tsens_config); - if (IS_ERR(priv->tm_map)) { - ret = PTR_ERR(priv->tm_map); + if (IS_ERR_OR_NULL(priv->tm_map)) { + if (!priv->tm_map) + ret = -ENODEV; + else + ret = PTR_ERR(priv->tm_map); goto err_put_device; } + /* VER_0 have only tm_map */ + if (!priv->srot_map) + priv->srot_map = priv->tm_map; + if (tsens_version(priv) > VER_0_1) { for (i = VER_MAJOR; i <= VER_STEP; i++) { priv->rf[i] = devm_regmap_field_alloc(dev, priv->srot_map, priv->fields[i]); - if (IS_ERR(priv->rf[i])) - return PTR_ERR(priv->rf[i]); + if (IS_ERR(priv->rf[i])) { + ret = PTR_ERR(priv->rf[i]); + goto err_put_device; + } } ret = regmap_field_read(priv->rf[VER_MINOR], &ver_minor); if (ret) @@ -769,6 +821,10 @@ int __init init_common(struct tsens_priv *priv) ret = PTR_ERR(priv->rf[TSENS_EN]); goto err_put_device; } + /* in VER_0 TSENS need to be explicitly enabled */ + if (tsens_version(priv) == VER_0) + regmap_field_write(priv->rf[TSENS_EN], 1); + ret = regmap_field_read(priv->rf[TSENS_EN], &enabled); if (ret) goto err_put_device; @@ -791,6 +847,19 @@ int __init init_common(struct tsens_priv *priv) goto err_put_device; } + priv->rf[TSENS_SW_RST] = + devm_regmap_field_alloc(dev, priv->srot_map, priv->fields[TSENS_SW_RST]); + if (IS_ERR(priv->rf[TSENS_SW_RST])) { + ret = PTR_ERR(priv->rf[TSENS_SW_RST]); + goto err_put_device; + } + + priv->rf[TRDY] = devm_regmap_field_alloc(dev, priv->tm_map, priv->fields[TRDY]); + if (IS_ERR(priv->rf[TRDY])) { + ret = PTR_ERR(priv->rf[TRDY]); + goto err_put_device; + } + /* This loop might need changes if enum regfield_ids is reordered */ for (j = LAST_TEMP_0; j <= UP_THRESH_15; j += 16) { for (i = 0; i < priv->feat->max_sensors; i++) { @@ -806,7 +875,7 @@ int __init init_common(struct tsens_priv *priv) } } - if (priv->feat->crit_int) { + if (priv->feat->crit_int || tsens_version(priv) < VER_0_1) { /* Loop might need changes if enum regfield_ids is reordered */ for (j = CRITICAL_STATUS_0; j <= CRIT_THRESH_15; j += 16) { for (i = 0; i < priv->feat->max_sensors; i++) { @@ -844,7 +913,11 @@ int __init init_common(struct tsens_priv *priv) } spin_lock_init(&priv->ul_lock); - tsens_enable_irq(priv); + + /* VER_0 interrupt doesn't need to be enabled */ + if (tsens_version(priv) >= VER_0_1) + tsens_enable_irq(priv); + tsens_debug_init(op); err_put_device: @@ -895,6 +968,12 @@ static SIMPLE_DEV_PM_OPS(tsens_pm_ops, tsens_suspend, tsens_resume); static const struct of_device_id tsens_table[] = { { + .compatible = "qcom,ipq8064-tsens", + .data = &data_8960, + }, { + .compatible = "qcom,mdm9607-tsens", + .data = &data_9607, + }, { .compatible = "qcom,msm8916-tsens", .data = &data_8916, }, { @@ -943,10 +1022,19 @@ static int tsens_register_irq(struct tsens_priv *priv, char *irqname, if (irq == -ENXIO) ret = 0; } else { - ret = devm_request_threaded_irq(&pdev->dev, irq, - NULL, thread_fn, - IRQF_ONESHOT, - dev_name(&pdev->dev), priv); + /* VER_0 interrupt is TRIGGER_RISING, VER_0_1 and up is ONESHOT */ + if (tsens_version(priv) == VER_0) + ret = devm_request_threaded_irq(&pdev->dev, irq, + thread_fn, NULL, + IRQF_TRIGGER_RISING, + dev_name(&pdev->dev), + priv); + else + ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, + thread_fn, IRQF_ONESHOT, + dev_name(&pdev->dev), + priv); + if (ret) dev_err(&pdev->dev, "%s: failed to get irq\n", __func__); @@ -975,6 +1063,19 @@ static int tsens_register(struct tsens_priv *priv) priv->ops->enable(priv, i); } + /* VER_0 require to set MIN and MAX THRESH + * These 2 regs are set using the: + * - CRIT_THRESH_0 for MAX THRESH hardcoded to 120°C + * - CRIT_THRESH_1 for MIN THRESH hardcoded to 0°C + */ + if (tsens_version(priv) < VER_0_1) { + regmap_field_write(priv->rf[CRIT_THRESH_0], + tsens_mC_to_hw(priv->sensor, 120000)); + + regmap_field_write(priv->rf[CRIT_THRESH_1], + tsens_mC_to_hw(priv->sensor, 0)); + } + ret = tsens_register_irq(priv, "uplow", tsens_irq_thread); if (ret < 0) return ret; diff --git a/drivers/thermal/qcom/tsens.h b/drivers/thermal/qcom/tsens.h index f40b625f897e..1471a2c00f15 100644 --- a/drivers/thermal/qcom/tsens.h +++ b/drivers/thermal/qcom/tsens.h @@ -13,6 +13,7 @@ #define CAL_DEGC_PT2 120 #define SLOPE_FACTOR 1000 #define SLOPE_DEFAULT 3200 +#define TIMEOUT_US 100 #define THRESHOLD_MAX_ADC_CODE 0x3ff #define THRESHOLD_MIN_ADC_CODE 0x0 @@ -25,7 +26,8 @@ struct tsens_priv; /* IP version numbers in ascending order */ enum tsens_ver { - VER_0_1 = 0, + VER_0 = 0, + VER_0_1, VER_1_X, VER_2_X, }; @@ -585,7 +587,7 @@ int get_temp_common(const struct tsens_sensor *s, int *temp); extern struct tsens_plat_data data_8960; /* TSENS v0.1 targets */ -extern struct tsens_plat_data data_8916, data_8939, data_8974; +extern struct tsens_plat_data data_8916, data_8939, data_8974, data_9607; /* TSENS v1 targets */ extern struct tsens_plat_data data_tsens_v1, data_8976; diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c index 75c69fe6e955..e1e412348076 100644 --- a/drivers/thermal/rcar_gen3_thermal.c +++ b/drivers/thermal/rcar_gen3_thermal.c @@ -60,7 +60,7 @@ #define MCELSIUS(temp) ((temp) * 1000) #define GEN3_FUSE_MASK 0xFFF -#define TSC_MAX_NUM 4 +#define TSC_MAX_NUM 5 /* default THCODE values if FUSEs are missing */ static const int thcodes[TSC_MAX_NUM][3] = { @@ -68,6 +68,7 @@ static const int thcodes[TSC_MAX_NUM][3] = { { 3393, 2795, 2216 }, { 3389, 2805, 2237 }, { 3415, 2694, 2195 }, + { 3356, 2724, 2244 }, }; /* Structure for thermal temperature calculation */ diff --git a/drivers/thermal/sun8i_thermal.c b/drivers/thermal/sun8i_thermal.c index 8c80bd06dd9f..d9cd23cbb671 100644 --- a/drivers/thermal/sun8i_thermal.c +++ b/drivers/thermal/sun8i_thermal.c @@ -300,7 +300,7 @@ static int sun8i_ths_calibrate(struct ths_device *tmdev) * or 0x8xx, so they won't be away from the default value * for a lot. * - * So here we do not return error if the calibartion data is + * So here we do not return error if the calibration data is * not available, except the probe needs deferring. */ goto out; @@ -418,7 +418,7 @@ static int sun8i_h3_thermal_init(struct ths_device *tmdev) } /* - * Without this undocummented value, the returned temperatures would + * Without this undocumented value, the returned temperatures would * be higher than real ones by about 20C. */ #define SUN50I_H6_CTRL0_UNK 0x0000002f diff --git a/drivers/thermal/tegra/soctherm.c b/drivers/thermal/tegra/soctherm.c index 66e0639da4bf..8e303e9d1dc0 100644 --- a/drivers/thermal/tegra/soctherm.c +++ b/drivers/thermal/tegra/soctherm.c @@ -2118,7 +2118,6 @@ static int tegra_soctherm_probe(struct platform_device *pdev) struct tegra_soctherm *tegra; struct thermal_zone_device *z; struct tsensor_shared_calib shared_calib; - struct resource *res; struct tegra_soctherm_soc *soc; unsigned int i; int err; @@ -2140,26 +2139,20 @@ static int tegra_soctherm_probe(struct platform_device *pdev) tegra->soc = soc; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "soctherm-reg"); - tegra->regs = devm_ioremap_resource(&pdev->dev, res); + tegra->regs = devm_platform_ioremap_resource_byname(pdev, "soctherm-reg"); if (IS_ERR(tegra->regs)) { dev_err(&pdev->dev, "can't get soctherm registers"); return PTR_ERR(tegra->regs); } if (!tegra->soc->use_ccroc) { - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "car-reg"); - tegra->clk_regs = devm_ioremap_resource(&pdev->dev, res); + tegra->clk_regs = devm_platform_ioremap_resource_byname(pdev, "car-reg"); if (IS_ERR(tegra->clk_regs)) { dev_err(&pdev->dev, "can't get car clk registers"); return PTR_ERR(tegra->clk_regs); } } else { - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "ccroc-reg"); - tegra->ccroc_regs = devm_ioremap_resource(&pdev->dev, res); + tegra->ccroc_regs = devm_platform_ioremap_resource_byname(pdev, "ccroc-reg"); if (IS_ERR(tegra->ccroc_regs)) { dev_err(&pdev->dev, "can't get ccroc registers"); return PTR_ERR(tegra->ccroc_regs); @@ -2195,7 +2188,7 @@ static int tegra_soctherm_probe(struct platform_device *pdev) if (err) return err; - /* calculate tsensor calibaration data */ + /* calculate tsensor calibration data */ for (i = 0; i < soc->num_tsensors; ++i) { err = tegra_calc_tsensor_calib(&soc->tsensors[i], &shared_calib, diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 996c038f83a4..d20b25f40d19 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -561,24 +561,6 @@ void thermal_zone_device_update(struct thermal_zone_device *tz, } EXPORT_SYMBOL_GPL(thermal_zone_device_update); -/** - * thermal_notify_framework - Sensor drivers use this API to notify framework - * @tz: thermal zone device - * @trip: indicates which trip point has been crossed - * - * This function handles the trip events from sensor drivers. It starts - * throttling the cooling devices according to the policy configured. - * For CRITICAL and HOT trip points, this notifies the respective drivers, - * and does actual throttling for other trip points i.e ACTIVE and PASSIVE. - * The throttling policy is based on the configured platform data; if no - * platform data is provided, this uses the step_wise throttling policy. - */ -void thermal_notify_framework(struct thermal_zone_device *tz, int trip) -{ - handle_thermal_trip(tz, trip); -} -EXPORT_SYMBOL_GPL(thermal_notify_framework); - static void thermal_zone_device_check(struct work_struct *work) { struct thermal_zone_device *tz = container_of(work, struct @@ -960,10 +942,7 @@ __thermal_cooling_device_register(struct device_node *np, { struct thermal_cooling_device *cdev; struct thermal_zone_device *pos = NULL; - int result; - - if (type && strlen(type) >= THERMAL_NAME_LENGTH) - return ERR_PTR(-EINVAL); + int ret; if (!ops || !ops->get_max_state || !ops->get_cur_state || !ops->set_cur_state) @@ -973,14 +952,17 @@ __thermal_cooling_device_register(struct device_node *np, if (!cdev) return ERR_PTR(-ENOMEM); - result = ida_simple_get(&thermal_cdev_ida, 0, 0, GFP_KERNEL); - if (result < 0) { - kfree(cdev); - return ERR_PTR(result); + ret = ida_simple_get(&thermal_cdev_ida, 0, 0, GFP_KERNEL); + if (ret < 0) + goto out_kfree_cdev; + cdev->id = ret; + + cdev->type = kstrdup(type ? type : "", GFP_KERNEL); + if (!cdev->type) { + ret = -ENOMEM; + goto out_ida_remove; } - cdev->id = result; - strlcpy(cdev->type, type ? : "", sizeof(cdev->type)); mutex_init(&cdev->lock); INIT_LIST_HEAD(&cdev->thermal_instances); cdev->np = np; @@ -990,12 +972,9 @@ __thermal_cooling_device_register(struct device_node *np, cdev->devdata = devdata; thermal_cooling_device_setup_sysfs(cdev); dev_set_name(&cdev->device, "cooling_device%d", cdev->id); - result = device_register(&cdev->device); - if (result) { - ida_simple_remove(&thermal_cdev_ida, cdev->id); - put_device(&cdev->device); - return ERR_PTR(result); - } + ret = device_register(&cdev->device); + if (ret) + goto out_kfree_type; /* Add 'this' new cdev to the global cdev list */ mutex_lock(&thermal_list_lock); @@ -1013,6 +992,15 @@ __thermal_cooling_device_register(struct device_node *np, mutex_unlock(&thermal_list_lock); return cdev; + +out_kfree_type: + kfree(cdev->type); + put_device(&cdev->device); +out_ida_remove: + ida_simple_remove(&thermal_cdev_ida, cdev->id); +out_kfree_cdev: + kfree(cdev); + return ERR_PTR(ret); } /** @@ -1171,6 +1159,7 @@ void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev) ida_simple_remove(&thermal_cdev_ida, cdev->id); device_del(&cdev->device); thermal_cooling_device_destroy_sysfs(cdev); + kfree(cdev->type); put_device(&cdev->device); } EXPORT_SYMBOL_GPL(thermal_cooling_device_unregister); diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h index 86b8cef7310e..726e327b4205 100644 --- a/drivers/thermal/thermal_core.h +++ b/drivers/thermal/thermal_core.h @@ -66,6 +66,7 @@ static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev) } void thermal_cdev_update(struct thermal_cooling_device *); +void __thermal_cdev_update(struct thermal_cooling_device *cdev); /** * struct thermal_trip - representation of a point in temperature domain diff --git a/drivers/thermal/thermal_helpers.c b/drivers/thermal/thermal_helpers.c index 7f50f412e02a..3edd047e144f 100644 --- a/drivers/thermal/thermal_helpers.c +++ b/drivers/thermal/thermal_helpers.c @@ -192,18 +192,11 @@ static void thermal_cdev_set_cur_state(struct thermal_cooling_device *cdev, thermal_cooling_device_stats_update(cdev, target); } -void thermal_cdev_update(struct thermal_cooling_device *cdev) +void __thermal_cdev_update(struct thermal_cooling_device *cdev) { struct thermal_instance *instance; unsigned long target = 0; - mutex_lock(&cdev->lock); - /* cooling device is updated*/ - if (cdev->updated) { - mutex_unlock(&cdev->lock); - return; - } - /* Make sure cdev enters the deepest cooling state */ list_for_each_entry(instance, &cdev->thermal_instances, cdev_node) { dev_dbg(&cdev->device, "zone%d->target=%lu\n", @@ -216,11 +209,25 @@ void thermal_cdev_update(struct thermal_cooling_device *cdev) thermal_cdev_set_cur_state(cdev, target); - cdev->updated = true; - mutex_unlock(&cdev->lock); trace_cdev_update(cdev, target); dev_dbg(&cdev->device, "set to state %lu\n", target); } + +/** + * thermal_cdev_update - update cooling device state if needed + * @cdev: pointer to struct thermal_cooling_device + * + * Update the cooling device state if there is a need. + */ +void thermal_cdev_update(struct thermal_cooling_device *cdev) +{ + mutex_lock(&cdev->lock); + if (!cdev->updated) { + __thermal_cdev_update(cdev); + cdev->updated = true; + } + mutex_unlock(&cdev->lock); +} EXPORT_SYMBOL(thermal_cdev_update); /** diff --git a/drivers/thermal/thermal_mmio.c b/drivers/thermal/thermal_mmio.c index d0bdf1ea3331..ded1dd0d4ef7 100644 --- a/drivers/thermal/thermal_mmio.c +++ b/drivers/thermal/thermal_mmio.c @@ -54,11 +54,8 @@ static int thermal_mmio_probe(struct platform_device *pdev) resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); sensor->mmio_base = devm_ioremap_resource(&pdev->dev, resource); - if (IS_ERR(sensor->mmio_base)) { - dev_err(&pdev->dev, "failed to ioremap memory (%ld)\n", - PTR_ERR(sensor->mmio_base)); + if (IS_ERR(sensor->mmio_base)) return PTR_ERR(sensor->mmio_base); - } sensor_init_func = device_get_match_data(&pdev->dev); if (sensor_init_func) { diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c index 69ef12f852b7..5b76f9a1280d 100644 --- a/drivers/thermal/thermal_of.c +++ b/drivers/thermal/thermal_of.c @@ -704,14 +704,17 @@ static int thermal_of_populate_bind_params(struct device_node *np, count = of_count_phandle_with_args(np, "cooling-device", "#cooling-cells"); - if (!count) { + if (count <= 0) { pr_err("Add a cooling_device property with at least one device\n"); + ret = -ENOENT; goto end; } __tcbp = kcalloc(count, sizeof(*__tcbp), GFP_KERNEL); - if (!__tcbp) + if (!__tcbp) { + ret = -ENOMEM; goto end; + } for (i = 0; i < count; i++) { ret = of_parse_phandle_with_args(np, "cooling-device", diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c index 8a3646e26ddd..ebe7cb70bfb6 100644 --- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c +++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c @@ -9,30 +9,29 @@ * Eduardo Valentin <[email protected]> */ -#include <linux/module.h> +#include <linux/clk.h> +#include <linux/cpu_pm.h> +#include <linux/device.h> +#include <linux/err.h> #include <linux/export.h> +#include <linux/gpio/consumer.h> #include <linux/init.h> -#include <linux/kernel.h> #include <linux/interrupt.h> -#include <linux/clk.h> -#include <linux/gpio/consumer.h> -#include <linux/platform_device.h> -#include <linux/err.h> -#include <linux/types.h> -#include <linux/spinlock.h> -#include <linux/sys_soc.h> -#include <linux/reboot.h> -#include <linux/of_device.h> -#include <linux/of_platform.h> -#include <linux/of_irq.h> #include <linux/io.h> #include <linux/iopoll.h> -#include <linux/cpu_pm.h> -#include <linux/device.h> -#include <linux/pm_runtime.h> -#include <linux/pm.h> +#include <linux/kernel.h> +#include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/pm.h> +#include <linux/pm_runtime.h> +#include <linux/reboot.h> +#include <linux/spinlock.h> +#include <linux/sys_soc.h> +#include <linux/types.h> #include "ti-bandgap.h" @@ -1143,14 +1142,10 @@ static int ti_bandgap_restore_ctxt(struct ti_bandgap *bgp) for (i = 0; i < bgp->conf->sensor_count; i++) { struct temp_sensor_registers *tsr; struct temp_sensor_regval *rval; - u32 val = 0; rval = &bgp->regval[i]; tsr = bgp->conf->sensors[i].registers; - if (TI_BANDGAP_HAS(bgp, COUNTER)) - val = ti_bandgap_readl(bgp, tsr->bgap_counter); - if (TI_BANDGAP_HAS(bgp, TSHUT_CONFIG)) ti_bandgap_writel(bgp, rval->tshut_threshold, tsr->tshut_threshold); diff --git a/drivers/vdpa/Kconfig b/drivers/vdpa/Kconfig index ffd1e098bfd2..a503c1b2bfd9 100644 --- a/drivers/vdpa/Kconfig +++ b/drivers/vdpa/Kconfig @@ -14,6 +14,7 @@ config VDPA_SIM depends on RUNTIME_TESTING_MENU && HAS_DMA select DMA_OPS select VHOST_RING + select IOMMU_IOVA help Enable this module to support vDPA device simulators. These devices are used for testing, prototyping and development of vDPA. @@ -25,6 +26,13 @@ config VDPA_SIM_NET help vDPA networking device simulator which loops TX traffic back to RX. +config VDPA_SIM_BLOCK + tristate "vDPA simulator for block device" + depends on VDPA_SIM + help + vDPA block device simulator which terminates IO request in a + memory buffer. + config IFCVF tristate "Intel IFC VF vDPA driver" depends on PCI_MSI @@ -52,4 +60,11 @@ config MLX5_VDPA_NET be executed by the hardware. It also supports a variety of stateless offloads depending on the actual device used and firmware version. +config VP_VDPA + tristate "Virtio PCI bridge vDPA driver" + select VIRTIO_PCI_LIB + depends on PCI_MSI + help + This kernel module bridges virtio PCI device to vDPA bus. + endif # VDPA diff --git a/drivers/vdpa/Makefile b/drivers/vdpa/Makefile index d160e9b63a66..67fe7f3d6943 100644 --- a/drivers/vdpa/Makefile +++ b/drivers/vdpa/Makefile @@ -3,3 +3,4 @@ obj-$(CONFIG_VDPA) += vdpa.o obj-$(CONFIG_VDPA_SIM) += vdpa_sim/ obj-$(CONFIG_IFCVF) += ifcvf/ obj-$(CONFIG_MLX5_VDPA) += mlx5/ +obj-$(CONFIG_VP_VDPA) += virtio_pci/ diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c index f2a128e56de5..1a661ab45af5 100644 --- a/drivers/vdpa/ifcvf/ifcvf_base.c +++ b/drivers/vdpa/ifcvf/ifcvf_base.c @@ -202,10 +202,11 @@ static void ifcvf_add_status(struct ifcvf_hw *hw, u8 status) ifcvf_get_status(hw); } -u64 ifcvf_get_features(struct ifcvf_hw *hw) +u64 ifcvf_get_hw_features(struct ifcvf_hw *hw) { struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg; u32 features_lo, features_hi; + u64 features; ifc_iowrite32(0, &cfg->device_feature_select); features_lo = ifc_ioread32(&cfg->device_feature); @@ -213,7 +214,26 @@ u64 ifcvf_get_features(struct ifcvf_hw *hw) ifc_iowrite32(1, &cfg->device_feature_select); features_hi = ifc_ioread32(&cfg->device_feature); - return ((u64)features_hi << 32) | features_lo; + features = ((u64)features_hi << 32) | features_lo; + + return features; +} + +u64 ifcvf_get_features(struct ifcvf_hw *hw) +{ + return hw->hw_features; +} + +int ifcvf_verify_min_features(struct ifcvf_hw *hw, u64 features) +{ + struct ifcvf_adapter *ifcvf = vf_to_adapter(hw); + + if (!(features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM)) && features) { + IFCVF_ERR(ifcvf->pdev, "VIRTIO_F_ACCESS_PLATFORM is not negotiated\n"); + return -EINVAL; + } + + return 0; } void ifcvf_read_net_config(struct ifcvf_hw *hw, u64 offset, diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h index 64696d63fe07..0111bfdeb342 100644 --- a/drivers/vdpa/ifcvf/ifcvf_base.h +++ b/drivers/vdpa/ifcvf/ifcvf_base.h @@ -15,15 +15,26 @@ #include <linux/pci_regs.h> #include <linux/vdpa.h> #include <uapi/linux/virtio_net.h> +#include <uapi/linux/virtio_blk.h> #include <uapi/linux/virtio_config.h> #include <uapi/linux/virtio_pci.h> -#define IFCVF_VENDOR_ID 0x1AF4 -#define IFCVF_DEVICE_ID 0x1041 -#define IFCVF_SUBSYS_VENDOR_ID 0x8086 -#define IFCVF_SUBSYS_DEVICE_ID 0x001A +#define N3000_VENDOR_ID 0x1AF4 +#define N3000_DEVICE_ID 0x1041 +#define N3000_SUBSYS_VENDOR_ID 0x8086 +#define N3000_SUBSYS_DEVICE_ID 0x001A -#define IFCVF_SUPPORTED_FEATURES \ +#define C5000X_PL_VENDOR_ID 0x1AF4 +#define C5000X_PL_DEVICE_ID 0x1000 +#define C5000X_PL_SUBSYS_VENDOR_ID 0x8086 +#define C5000X_PL_SUBSYS_DEVICE_ID 0x0001 + +#define C5000X_PL_BLK_VENDOR_ID 0x1AF4 +#define C5000X_PL_BLK_DEVICE_ID 0x1001 +#define C5000X_PL_BLK_SUBSYS_VENDOR_ID 0x8086 +#define C5000X_PL_BLK_SUBSYS_DEVICE_ID 0x0002 + +#define IFCVF_NET_SUPPORTED_FEATURES \ ((1ULL << VIRTIO_NET_F_MAC) | \ (1ULL << VIRTIO_F_ANY_LAYOUT) | \ (1ULL << VIRTIO_F_VERSION_1) | \ @@ -78,6 +89,8 @@ struct ifcvf_hw { void __iomem *notify_base; u32 notify_off_multiplier; u64 req_features; + u64 hw_features; + u32 dev_type; struct virtio_pci_common_cfg __iomem *common_cfg; void __iomem *net_cfg; struct vring_info vring[IFCVF_MAX_QUEUE_PAIRS * 2]; @@ -116,7 +129,10 @@ void ifcvf_set_status(struct ifcvf_hw *hw, u8 status); void io_write64_twopart(u64 val, u32 *lo, u32 *hi); void ifcvf_reset(struct ifcvf_hw *hw); u64 ifcvf_get_features(struct ifcvf_hw *hw); +u64 ifcvf_get_hw_features(struct ifcvf_hw *hw); +int ifcvf_verify_min_features(struct ifcvf_hw *hw, u64 features); u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid); int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num); struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw); +int ifcvf_probed_virtio_net(struct ifcvf_hw *hw); #endif /* _IFCVF_H_ */ diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c index d555a6a5d1ba..ab0ab5cf0f6e 100644 --- a/drivers/vdpa/ifcvf/ifcvf_main.c +++ b/drivers/vdpa/ifcvf/ifcvf_main.c @@ -14,7 +14,6 @@ #include <linux/sysfs.h> #include "ifcvf_base.h" -#define VERSION_STRING "0.1" #define DRIVER_AUTHOR "Intel Corporation" #define IFCVF_DRIVER_NAME "ifcvf" @@ -169,10 +168,23 @@ static struct ifcvf_hw *vdpa_to_vf(struct vdpa_device *vdpa_dev) static u64 ifcvf_vdpa_get_features(struct vdpa_device *vdpa_dev) { + struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev); struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); + struct pci_dev *pdev = adapter->pdev; + u64 features; - features = ifcvf_get_features(vf) & IFCVF_SUPPORTED_FEATURES; + switch (vf->dev_type) { + case VIRTIO_ID_NET: + features = ifcvf_get_features(vf) & IFCVF_NET_SUPPORTED_FEATURES; + break; + case VIRTIO_ID_BLOCK: + features = ifcvf_get_features(vf); + break; + default: + features = 0; + IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type); + } return features; } @@ -180,6 +192,11 @@ static u64 ifcvf_vdpa_get_features(struct vdpa_device *vdpa_dev) static int ifcvf_vdpa_set_features(struct vdpa_device *vdpa_dev, u64 features) { struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); + int ret; + + ret = ifcvf_verify_min_features(vf, features); + if (ret) + return ret; vf->req_features = features; @@ -319,12 +336,17 @@ static u32 ifcvf_vdpa_get_generation(struct vdpa_device *vdpa_dev) static u32 ifcvf_vdpa_get_device_id(struct vdpa_device *vdpa_dev) { - return VIRTIO_ID_NET; + struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); + + return vf->dev_type; } static u32 ifcvf_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev) { - return IFCVF_SUBSYS_VENDOR_ID; + struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev); + struct pci_dev *pdev = adapter->pdev; + + return pdev->subsystem_vendor; } static u32 ifcvf_vdpa_get_vq_align(struct vdpa_device *vdpa_dev) @@ -332,6 +354,28 @@ static u32 ifcvf_vdpa_get_vq_align(struct vdpa_device *vdpa_dev) return IFCVF_QUEUE_ALIGNMENT; } +static size_t ifcvf_vdpa_get_config_size(struct vdpa_device *vdpa_dev) +{ + struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev); + struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); + struct pci_dev *pdev = adapter->pdev; + size_t size; + + switch (vf->dev_type) { + case VIRTIO_ID_NET: + size = sizeof(struct virtio_net_config); + break; + case VIRTIO_ID_BLOCK: + size = sizeof(struct virtio_blk_config); + break; + default: + size = 0; + IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type); + } + + return size; +} + static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev, unsigned int offset, void *buf, unsigned int len) @@ -392,6 +436,7 @@ static const struct vdpa_config_ops ifc_vdpa_ops = { .get_device_id = ifcvf_vdpa_get_device_id, .get_vendor_id = ifcvf_vdpa_get_vendor_id, .get_vq_align = ifcvf_vdpa_get_vq_align, + .get_config_size = ifcvf_vdpa_get_config_size, .get_config = ifcvf_vdpa_get_config, .set_config = ifcvf_vdpa_set_config, .set_config_cb = ifcvf_vdpa_set_config_cb, @@ -441,6 +486,19 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_drvdata(pdev, adapter); vf = &adapter->vf; + + /* This drirver drives both modern virtio devices and transitional + * devices in modern mode. + * vDPA requires feature bit VIRTIO_F_ACCESS_PLATFORM, + * so legacy devices and transitional devices in legacy + * mode will not work for vDPA, this driver will not + * drive devices with legacy interface. + */ + if (pdev->device < 0x1040) + vf->dev_type = pdev->subsystem_device; + else + vf->dev_type = pdev->device - 0x1040; + vf->base = pcim_iomap_table(pdev); adapter->pdev = pdev; @@ -455,6 +513,8 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id) for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) vf->vring[i].irq = -EINVAL; + vf->hw_features = ifcvf_get_hw_features(vf); + ret = vdpa_register_device(&adapter->vdpa, IFCVF_MAX_QUEUE_PAIRS * 2); if (ret) { IFCVF_ERR(pdev, "Failed to register ifcvf to vdpa bus"); @@ -476,10 +536,19 @@ static void ifcvf_remove(struct pci_dev *pdev) } static struct pci_device_id ifcvf_pci_ids[] = { - { PCI_DEVICE_SUB(IFCVF_VENDOR_ID, - IFCVF_DEVICE_ID, - IFCVF_SUBSYS_VENDOR_ID, - IFCVF_SUBSYS_DEVICE_ID) }, + { PCI_DEVICE_SUB(N3000_VENDOR_ID, + N3000_DEVICE_ID, + N3000_SUBSYS_VENDOR_ID, + N3000_SUBSYS_DEVICE_ID) }, + { PCI_DEVICE_SUB(C5000X_PL_VENDOR_ID, + C5000X_PL_DEVICE_ID, + C5000X_PL_SUBSYS_VENDOR_ID, + C5000X_PL_SUBSYS_DEVICE_ID) }, + { PCI_DEVICE_SUB(C5000X_PL_BLK_VENDOR_ID, + C5000X_PL_BLK_DEVICE_ID, + C5000X_PL_BLK_SUBSYS_VENDOR_ID, + C5000X_PL_BLK_SUBSYS_DEVICE_ID) }, + { 0 }, }; MODULE_DEVICE_TABLE(pci, ifcvf_pci_ids); @@ -494,4 +563,3 @@ static struct pci_driver ifcvf_driver = { module_pci_driver(ifcvf_driver); MODULE_LICENSE("GPL v2"); -MODULE_VERSION(VERSION_STRING); diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index 4d2809c7d4e3..189e4385df40 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -1809,6 +1809,11 @@ err_setup: ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED; } +static size_t mlx5_vdpa_get_config_size(struct vdpa_device *vdev) +{ + return sizeof(struct virtio_net_config); +} + static void mlx5_vdpa_get_config(struct vdpa_device *vdev, unsigned int offset, void *buf, unsigned int len) { @@ -1895,6 +1900,7 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = { .get_vendor_id = mlx5_vdpa_get_vendor_id, .get_status = mlx5_vdpa_get_status, .set_status = mlx5_vdpa_set_status, + .get_config_size = mlx5_vdpa_get_config_size, .get_config = mlx5_vdpa_get_config, .set_config = mlx5_vdpa_set_config, .get_generation = mlx5_vdpa_get_generation, @@ -1974,23 +1980,32 @@ static void init_mvqs(struct mlx5_vdpa_net *ndev) } } -static int mlx5v_probe(struct auxiliary_device *adev, - const struct auxiliary_device_id *id) +struct mlx5_vdpa_mgmtdev { + struct vdpa_mgmt_dev mgtdev; + struct mlx5_adev *madev; + struct mlx5_vdpa_net *ndev; +}; + +static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name) { - struct mlx5_adev *madev = container_of(adev, struct mlx5_adev, adev); - struct mlx5_core_dev *mdev = madev->mdev; + struct mlx5_vdpa_mgmtdev *mgtdev = container_of(v_mdev, struct mlx5_vdpa_mgmtdev, mgtdev); struct virtio_net_config *config; struct mlx5_vdpa_dev *mvdev; struct mlx5_vdpa_net *ndev; + struct mlx5_core_dev *mdev; u32 max_vqs; int err; + if (mgtdev->ndev) + return -ENOSPC; + + mdev = mgtdev->madev->mdev; /* we save one virtqueue for control virtqueue should we require it */ max_vqs = MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues); max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS); ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops, - NULL); + name); if (IS_ERR(ndev)) return PTR_ERR(ndev); @@ -2017,11 +2032,12 @@ static int mlx5v_probe(struct auxiliary_device *adev, if (err) goto err_res; - err = vdpa_register_device(&mvdev->vdev, 2 * mlx5_vdpa_max_qps(max_vqs)); + mvdev->vdev.mdev = &mgtdev->mgtdev; + err = _vdpa_register_device(&mvdev->vdev, 2 * mlx5_vdpa_max_qps(max_vqs)); if (err) goto err_reg; - dev_set_drvdata(&adev->dev, ndev); + mgtdev->ndev = ndev; return 0; err_reg: @@ -2034,11 +2050,62 @@ err_mtu: return err; } +static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *dev) +{ + struct mlx5_vdpa_mgmtdev *mgtdev = container_of(v_mdev, struct mlx5_vdpa_mgmtdev, mgtdev); + + _vdpa_unregister_device(dev); + mgtdev->ndev = NULL; +} + +static const struct vdpa_mgmtdev_ops mdev_ops = { + .dev_add = mlx5_vdpa_dev_add, + .dev_del = mlx5_vdpa_dev_del, +}; + +static struct virtio_device_id id_table[] = { + { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, + { 0 }, +}; + +static int mlx5v_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) + +{ + struct mlx5_adev *madev = container_of(adev, struct mlx5_adev, adev); + struct mlx5_core_dev *mdev = madev->mdev; + struct mlx5_vdpa_mgmtdev *mgtdev; + int err; + + mgtdev = kzalloc(sizeof(*mgtdev), GFP_KERNEL); + if (!mgtdev) + return -ENOMEM; + + mgtdev->mgtdev.ops = &mdev_ops; + mgtdev->mgtdev.device = mdev->device; + mgtdev->mgtdev.id_table = id_table; + mgtdev->madev = madev; + + err = vdpa_mgmtdev_register(&mgtdev->mgtdev); + if (err) + goto reg_err; + + dev_set_drvdata(&adev->dev, mgtdev); + + return 0; + +reg_err: + kfree(mgtdev); + return err; +} + static void mlx5v_remove(struct auxiliary_device *adev) { - struct mlx5_vdpa_dev *mvdev = dev_get_drvdata(&adev->dev); + struct mlx5_vdpa_mgmtdev *mgtdev; - vdpa_unregister_device(&mvdev->vdev); + mgtdev = dev_get_drvdata(&adev->dev); + vdpa_mgmtdev_unregister(&mgtdev->mgtdev); + kfree(mgtdev); } static const struct auxiliary_device_id mlx5v_id_table[] = { diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c index 5cffce67cab0..bb3f1d1f0422 100644 --- a/drivers/vdpa/vdpa.c +++ b/drivers/vdpa/vdpa.c @@ -75,8 +75,8 @@ static void vdpa_release_dev(struct device *d) * Driver should use vdpa_alloc_device() wrapper macro instead of * using this directly. * - * Returns an error when parent/config/dma_dev is not set or fail to get - * ida. + * Return: Returns an error when parent/config/dma_dev is not set or fail to get + * ida. */ struct vdpa_device *__vdpa_alloc_device(struct device *parent, const struct vdpa_config_ops *config, @@ -157,7 +157,7 @@ static int __vdpa_register_device(struct vdpa_device *vdev, int nvqs) * @vdev: the vdpa device to be registered to vDPA bus * @nvqs: number of virtqueues supported by this device * - * Returns an error when fail to add device to vDPA bus + * Return: Returns an error when fail to add device to vDPA bus */ int _vdpa_register_device(struct vdpa_device *vdev, int nvqs) { @@ -174,7 +174,7 @@ EXPORT_SYMBOL_GPL(_vdpa_register_device); * @vdev: the vdpa device to be registered to vDPA bus * @nvqs: number of virtqueues supported by this device * - * Returns an error when fail to add to vDPA bus + * Return: Returns an error when fail to add to vDPA bus */ int vdpa_register_device(struct vdpa_device *vdev, int nvqs) { @@ -218,7 +218,7 @@ EXPORT_SYMBOL_GPL(vdpa_unregister_device); * @drv: the vdpa device driver to be registered * @owner: module owner of the driver * - * Returns an err when fail to do the registration + * Return: Returns an err when fail to do the registration */ int __vdpa_register_driver(struct vdpa_driver *drv, struct module *owner) { @@ -245,6 +245,8 @@ EXPORT_SYMBOL_GPL(vdpa_unregister_driver); * @mdev: Pointer to vdpa management device * vdpa_mgmtdev_register() register a vdpa management device which supports * vdpa device management. + * Return: Returns 0 on success or failure when required callback ops are not + * initialized. */ int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev) { diff --git a/drivers/vdpa/vdpa_sim/Makefile b/drivers/vdpa/vdpa_sim/Makefile index 79d4536d347e..d458103302f2 100644 --- a/drivers/vdpa/vdpa_sim/Makefile +++ b/drivers/vdpa/vdpa_sim/Makefile @@ -1,3 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_VDPA_SIM) += vdpa_sim.o obj-$(CONFIG_VDPA_SIM_NET) += vdpa_sim_net.o +obj-$(CONFIG_VDPA_SIM_BLOCK) += vdpa_sim_blk.o diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c index 5b6b2f87d40c..98f793bc9376 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c @@ -17,6 +17,7 @@ #include <linux/vringh.h> #include <linux/vdpa.h> #include <linux/vhost_iotlb.h> +#include <linux/iova.h> #include "vdpa_sim.h" @@ -128,30 +129,57 @@ static int dir_to_perm(enum dma_data_direction dir) return perm; } +static dma_addr_t vdpasim_map_range(struct vdpasim *vdpasim, phys_addr_t paddr, + size_t size, unsigned int perm) +{ + struct iova *iova; + dma_addr_t dma_addr; + int ret; + + /* We set the limit_pfn to the maximum (ULONG_MAX - 1) */ + iova = alloc_iova(&vdpasim->iova, size, ULONG_MAX - 1, true); + if (!iova) + return DMA_MAPPING_ERROR; + + dma_addr = iova_dma_addr(&vdpasim->iova, iova); + + spin_lock(&vdpasim->iommu_lock); + ret = vhost_iotlb_add_range(vdpasim->iommu, (u64)dma_addr, + (u64)dma_addr + size - 1, (u64)paddr, perm); + spin_unlock(&vdpasim->iommu_lock); + + if (ret) { + __free_iova(&vdpasim->iova, iova); + return DMA_MAPPING_ERROR; + } + + return dma_addr; +} + +static void vdpasim_unmap_range(struct vdpasim *vdpasim, dma_addr_t dma_addr, + size_t size) +{ + spin_lock(&vdpasim->iommu_lock); + vhost_iotlb_del_range(vdpasim->iommu, (u64)dma_addr, + (u64)dma_addr + size - 1); + spin_unlock(&vdpasim->iommu_lock); + + free_iova(&vdpasim->iova, iova_pfn(&vdpasim->iova, dma_addr)); +} + static dma_addr_t vdpasim_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, unsigned long attrs) { struct vdpasim *vdpasim = dev_to_sim(dev); - struct vhost_iotlb *iommu = vdpasim->iommu; - u64 pa = (page_to_pfn(page) << PAGE_SHIFT) + offset; - int ret, perm = dir_to_perm(dir); + phys_addr_t paddr = page_to_phys(page) + offset; + int perm = dir_to_perm(dir); if (perm < 0) return DMA_MAPPING_ERROR; - /* For simplicity, use identical mapping to avoid e.g iova - * allocator. - */ - spin_lock(&vdpasim->iommu_lock); - ret = vhost_iotlb_add_range(iommu, pa, pa + size - 1, - pa, dir_to_perm(dir)); - spin_unlock(&vdpasim->iommu_lock); - if (ret) - return DMA_MAPPING_ERROR; - - return (dma_addr_t)(pa); + return vdpasim_map_range(vdpasim, paddr, size, perm); } static void vdpasim_unmap_page(struct device *dev, dma_addr_t dma_addr, @@ -159,12 +187,8 @@ static void vdpasim_unmap_page(struct device *dev, dma_addr_t dma_addr, unsigned long attrs) { struct vdpasim *vdpasim = dev_to_sim(dev); - struct vhost_iotlb *iommu = vdpasim->iommu; - spin_lock(&vdpasim->iommu_lock); - vhost_iotlb_del_range(iommu, (u64)dma_addr, - (u64)dma_addr + size - 1); - spin_unlock(&vdpasim->iommu_lock); + vdpasim_unmap_range(vdpasim, dma_addr, size); } static void *vdpasim_alloc_coherent(struct device *dev, size_t size, @@ -172,27 +196,22 @@ static void *vdpasim_alloc_coherent(struct device *dev, size_t size, unsigned long attrs) { struct vdpasim *vdpasim = dev_to_sim(dev); - struct vhost_iotlb *iommu = vdpasim->iommu; - void *addr = kmalloc(size, flag); - int ret; + phys_addr_t paddr; + void *addr; - spin_lock(&vdpasim->iommu_lock); + addr = kmalloc(size, flag); if (!addr) { *dma_addr = DMA_MAPPING_ERROR; - } else { - u64 pa = virt_to_phys(addr); - - ret = vhost_iotlb_add_range(iommu, (u64)pa, - (u64)pa + size - 1, - pa, VHOST_MAP_RW); - if (ret) { - *dma_addr = DMA_MAPPING_ERROR; - kfree(addr); - addr = NULL; - } else - *dma_addr = (dma_addr_t)pa; + return NULL; + } + + paddr = virt_to_phys(addr); + + *dma_addr = vdpasim_map_range(vdpasim, paddr, size, VHOST_MAP_RW); + if (*dma_addr == DMA_MAPPING_ERROR) { + kfree(addr); + return NULL; } - spin_unlock(&vdpasim->iommu_lock); return addr; } @@ -202,14 +221,10 @@ static void vdpasim_free_coherent(struct device *dev, size_t size, unsigned long attrs) { struct vdpasim *vdpasim = dev_to_sim(dev); - struct vhost_iotlb *iommu = vdpasim->iommu; - spin_lock(&vdpasim->iommu_lock); - vhost_iotlb_del_range(iommu, (u64)dma_addr, - (u64)dma_addr + size - 1); - spin_unlock(&vdpasim->iommu_lock); + vdpasim_unmap_range(vdpasim, dma_addr, size); - kfree(phys_to_virt((uintptr_t)dma_addr)); + kfree(vaddr); } static const struct dma_map_ops vdpasim_dma_ops = { @@ -269,7 +284,15 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr) goto err_iommu; for (i = 0; i < dev_attr->nvqs; i++) - vringh_set_iotlb(&vdpasim->vqs[i].vring, vdpasim->iommu); + vringh_set_iotlb(&vdpasim->vqs[i].vring, vdpasim->iommu, + &vdpasim->iommu_lock); + + ret = iova_cache_get(); + if (ret) + goto err_iommu; + + /* For simplicity we use an IOVA allocator with byte granularity */ + init_iova_domain(&vdpasim->iova, 1, 0); vdpasim->vdpa.dma_dev = dev; @@ -439,6 +462,13 @@ static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status) spin_unlock(&vdpasim->lock); } +static size_t vdpasim_get_config_size(struct vdpa_device *vdpa) +{ + struct vdpasim *vdpasim = vdpa_to_sim(vdpa); + + return vdpasim->dev_attr.config_size; +} + static void vdpasim_get_config(struct vdpa_device *vdpa, unsigned int offset, void *buf, unsigned int len) { @@ -539,8 +569,17 @@ static int vdpasim_dma_unmap(struct vdpa_device *vdpa, u64 iova, u64 size) static void vdpasim_free(struct vdpa_device *vdpa) { struct vdpasim *vdpasim = vdpa_to_sim(vdpa); + int i; cancel_work_sync(&vdpasim->work); + + for (i = 0; i < vdpasim->dev_attr.nvqs; i++) { + vringh_kiov_cleanup(&vdpasim->vqs[i].out_iov); + vringh_kiov_cleanup(&vdpasim->vqs[i].in_iov); + } + + put_iova_domain(&vdpasim->iova); + iova_cache_put(); kvfree(vdpasim->buffer); if (vdpasim->iommu) vhost_iotlb_free(vdpasim->iommu); @@ -566,6 +605,7 @@ static const struct vdpa_config_ops vdpasim_config_ops = { .get_vendor_id = vdpasim_get_vendor_id, .get_status = vdpasim_get_status, .set_status = vdpasim_set_status, + .get_config_size = vdpasim_get_config_size, .get_config = vdpasim_get_config, .set_config = vdpasim_set_config, .get_generation = vdpasim_get_generation, @@ -593,6 +633,7 @@ static const struct vdpa_config_ops vdpasim_batch_config_ops = { .get_vendor_id = vdpasim_get_vendor_id, .get_status = vdpasim_get_status, .set_status = vdpasim_set_status, + .get_config_size = vdpasim_get_config_size, .get_config = vdpasim_get_config, .set_config = vdpasim_set_config, .get_generation = vdpasim_get_generation, diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.h b/drivers/vdpa/vdpa_sim/vdpa_sim.h index 6d75444f9948..cd58e888bcf3 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim.h +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.h @@ -6,6 +6,7 @@ #ifndef _VDPA_SIM_H #define _VDPA_SIM_H +#include <linux/iova.h> #include <linux/vringh.h> #include <linux/vdpa.h> #include <linux/virtio_byteorder.h> @@ -57,6 +58,7 @@ struct vdpasim { /* virtio config according to device type */ void *config; struct vhost_iotlb *iommu; + struct iova_domain iova; void *buffer; u32 status; u32 generation; diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c new file mode 100644 index 000000000000..5bfe1c281645 --- /dev/null +++ b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c @@ -0,0 +1,338 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * VDPA simulator for block device. + * + * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021, Red Hat Inc. All rights reserved. + * + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/blkdev.h> +#include <linux/vringh.h> +#include <linux/vdpa.h> +#include <linux/blkdev.h> +#include <uapi/linux/virtio_blk.h> + +#include "vdpa_sim.h" + +#define DRV_VERSION "0.1" +#define DRV_AUTHOR "Max Gurtovoy <[email protected]>" +#define DRV_DESC "vDPA Device Simulator for block device" +#define DRV_LICENSE "GPL v2" + +#define VDPASIM_BLK_FEATURES (VDPASIM_FEATURES | \ + (1ULL << VIRTIO_BLK_F_SIZE_MAX) | \ + (1ULL << VIRTIO_BLK_F_SEG_MAX) | \ + (1ULL << VIRTIO_BLK_F_BLK_SIZE) | \ + (1ULL << VIRTIO_BLK_F_TOPOLOGY) | \ + (1ULL << VIRTIO_BLK_F_MQ)) + +#define VDPASIM_BLK_CAPACITY 0x40000 +#define VDPASIM_BLK_SIZE_MAX 0x1000 +#define VDPASIM_BLK_SEG_MAX 32 +#define VDPASIM_BLK_VQ_NUM 1 + +static char vdpasim_blk_id[VIRTIO_BLK_ID_BYTES] = "vdpa_blk_sim"; + +static bool vdpasim_blk_check_range(u64 start_sector, size_t range_size) +{ + u64 range_sectors = range_size >> SECTOR_SHIFT; + + if (range_size > VDPASIM_BLK_SIZE_MAX * VDPASIM_BLK_SEG_MAX) + return false; + + if (start_sector > VDPASIM_BLK_CAPACITY) + return false; + + if (range_sectors > VDPASIM_BLK_CAPACITY - start_sector) + return false; + + return true; +} + +/* Returns 'true' if the request is handled (with or without an I/O error) + * and the status is correctly written in the last byte of the 'in iov', + * 'false' otherwise. + */ +static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim, + struct vdpasim_virtqueue *vq) +{ + size_t pushed = 0, to_pull, to_push; + struct virtio_blk_outhdr hdr; + ssize_t bytes; + loff_t offset; + u64 sector; + u8 status; + u32 type; + int ret; + + ret = vringh_getdesc_iotlb(&vq->vring, &vq->out_iov, &vq->in_iov, + &vq->head, GFP_ATOMIC); + if (ret != 1) + return false; + + if (vq->out_iov.used < 1 || vq->in_iov.used < 1) { + dev_err(&vdpasim->vdpa.dev, "missing headers - out_iov: %u in_iov %u\n", + vq->out_iov.used, vq->in_iov.used); + return false; + } + + if (vq->in_iov.iov[vq->in_iov.used - 1].iov_len < 1) { + dev_err(&vdpasim->vdpa.dev, "request in header too short\n"); + return false; + } + + /* The last byte is the status and we checked if the last iov has + * enough room for it. + */ + to_push = vringh_kiov_length(&vq->in_iov) - 1; + + to_pull = vringh_kiov_length(&vq->out_iov); + + bytes = vringh_iov_pull_iotlb(&vq->vring, &vq->out_iov, &hdr, + sizeof(hdr)); + if (bytes != sizeof(hdr)) { + dev_err(&vdpasim->vdpa.dev, "request out header too short\n"); + return false; + } + + to_pull -= bytes; + + type = vdpasim32_to_cpu(vdpasim, hdr.type); + sector = vdpasim64_to_cpu(vdpasim, hdr.sector); + offset = sector << SECTOR_SHIFT; + status = VIRTIO_BLK_S_OK; + + switch (type) { + case VIRTIO_BLK_T_IN: + if (!vdpasim_blk_check_range(sector, to_push)) { + dev_err(&vdpasim->vdpa.dev, + "reading over the capacity - offset: 0x%llx len: 0x%zx\n", + offset, to_push); + status = VIRTIO_BLK_S_IOERR; + break; + } + + bytes = vringh_iov_push_iotlb(&vq->vring, &vq->in_iov, + vdpasim->buffer + offset, + to_push); + if (bytes < 0) { + dev_err(&vdpasim->vdpa.dev, + "vringh_iov_push_iotlb() error: %zd offset: 0x%llx len: 0x%zx\n", + bytes, offset, to_push); + status = VIRTIO_BLK_S_IOERR; + break; + } + + pushed += bytes; + break; + + case VIRTIO_BLK_T_OUT: + if (!vdpasim_blk_check_range(sector, to_pull)) { + dev_err(&vdpasim->vdpa.dev, + "writing over the capacity - offset: 0x%llx len: 0x%zx\n", + offset, to_pull); + status = VIRTIO_BLK_S_IOERR; + break; + } + + bytes = vringh_iov_pull_iotlb(&vq->vring, &vq->out_iov, + vdpasim->buffer + offset, + to_pull); + if (bytes < 0) { + dev_err(&vdpasim->vdpa.dev, + "vringh_iov_pull_iotlb() error: %zd offset: 0x%llx len: 0x%zx\n", + bytes, offset, to_pull); + status = VIRTIO_BLK_S_IOERR; + break; + } + break; + + case VIRTIO_BLK_T_GET_ID: + bytes = vringh_iov_push_iotlb(&vq->vring, &vq->in_iov, + vdpasim_blk_id, + VIRTIO_BLK_ID_BYTES); + if (bytes < 0) { + dev_err(&vdpasim->vdpa.dev, + "vringh_iov_push_iotlb() error: %zd\n", bytes); + status = VIRTIO_BLK_S_IOERR; + break; + } + + pushed += bytes; + break; + + default: + dev_warn(&vdpasim->vdpa.dev, + "Unsupported request type %d\n", type); + status = VIRTIO_BLK_S_IOERR; + break; + } + + /* If some operations fail, we need to skip the remaining bytes + * to put the status in the last byte + */ + if (to_push - pushed > 0) + vringh_kiov_advance(&vq->in_iov, to_push - pushed); + + /* Last byte is the status */ + bytes = vringh_iov_push_iotlb(&vq->vring, &vq->in_iov, &status, 1); + if (bytes != 1) + return false; + + pushed += bytes; + + /* Make sure data is wrote before advancing index */ + smp_wmb(); + + vringh_complete_iotlb(&vq->vring, vq->head, pushed); + + return true; +} + +static void vdpasim_blk_work(struct work_struct *work) +{ + struct vdpasim *vdpasim = container_of(work, struct vdpasim, work); + int i; + + spin_lock(&vdpasim->lock); + + if (!(vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK)) + goto out; + + for (i = 0; i < VDPASIM_BLK_VQ_NUM; i++) { + struct vdpasim_virtqueue *vq = &vdpasim->vqs[i]; + + if (!vq->ready) + continue; + + while (vdpasim_blk_handle_req(vdpasim, vq)) { + /* Make sure used is visible before rasing the interrupt. */ + smp_wmb(); + + local_bh_disable(); + if (vringh_need_notify_iotlb(&vq->vring) > 0) + vringh_notify(&vq->vring); + local_bh_enable(); + } + } +out: + spin_unlock(&vdpasim->lock); +} + +static void vdpasim_blk_get_config(struct vdpasim *vdpasim, void *config) +{ + struct virtio_blk_config *blk_config = config; + + memset(config, 0, sizeof(struct virtio_blk_config)); + + blk_config->capacity = cpu_to_vdpasim64(vdpasim, VDPASIM_BLK_CAPACITY); + blk_config->size_max = cpu_to_vdpasim32(vdpasim, VDPASIM_BLK_SIZE_MAX); + blk_config->seg_max = cpu_to_vdpasim32(vdpasim, VDPASIM_BLK_SEG_MAX); + blk_config->num_queues = cpu_to_vdpasim16(vdpasim, VDPASIM_BLK_VQ_NUM); + blk_config->min_io_size = cpu_to_vdpasim16(vdpasim, 1); + blk_config->opt_io_size = cpu_to_vdpasim32(vdpasim, 1); + blk_config->blk_size = cpu_to_vdpasim32(vdpasim, SECTOR_SIZE); +} + +static void vdpasim_blk_mgmtdev_release(struct device *dev) +{ +} + +static struct device vdpasim_blk_mgmtdev = { + .init_name = "vdpasim_blk", + .release = vdpasim_blk_mgmtdev_release, +}; + +static int vdpasim_blk_dev_add(struct vdpa_mgmt_dev *mdev, const char *name) +{ + struct vdpasim_dev_attr dev_attr = {}; + struct vdpasim *simdev; + int ret; + + dev_attr.mgmt_dev = mdev; + dev_attr.name = name; + dev_attr.id = VIRTIO_ID_BLOCK; + dev_attr.supported_features = VDPASIM_BLK_FEATURES; + dev_attr.nvqs = VDPASIM_BLK_VQ_NUM; + dev_attr.config_size = sizeof(struct virtio_blk_config); + dev_attr.get_config = vdpasim_blk_get_config; + dev_attr.work_fn = vdpasim_blk_work; + dev_attr.buffer_size = VDPASIM_BLK_CAPACITY << SECTOR_SHIFT; + + simdev = vdpasim_create(&dev_attr); + if (IS_ERR(simdev)) + return PTR_ERR(simdev); + + ret = _vdpa_register_device(&simdev->vdpa, VDPASIM_BLK_VQ_NUM); + if (ret) + goto put_dev; + + return 0; + +put_dev: + put_device(&simdev->vdpa.dev); + return ret; +} + +static void vdpasim_blk_dev_del(struct vdpa_mgmt_dev *mdev, + struct vdpa_device *dev) +{ + struct vdpasim *simdev = container_of(dev, struct vdpasim, vdpa); + + _vdpa_unregister_device(&simdev->vdpa); +} + +static const struct vdpa_mgmtdev_ops vdpasim_blk_mgmtdev_ops = { + .dev_add = vdpasim_blk_dev_add, + .dev_del = vdpasim_blk_dev_del +}; + +static struct virtio_device_id id_table[] = { + { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID }, + { 0 }, +}; + +static struct vdpa_mgmt_dev mgmt_dev = { + .device = &vdpasim_blk_mgmtdev, + .id_table = id_table, + .ops = &vdpasim_blk_mgmtdev_ops, +}; + +static int __init vdpasim_blk_init(void) +{ + int ret; + + ret = device_register(&vdpasim_blk_mgmtdev); + if (ret) + return ret; + + ret = vdpa_mgmtdev_register(&mgmt_dev); + if (ret) + goto parent_err; + + return 0; + +parent_err: + device_unregister(&vdpasim_blk_mgmtdev); + return ret; +} + +static void __exit vdpasim_blk_exit(void) +{ + vdpa_mgmtdev_unregister(&mgmt_dev); + device_unregister(&vdpasim_blk_mgmtdev); +} + +module_init(vdpasim_blk_init) +module_exit(vdpasim_blk_exit) + +MODULE_VERSION(DRV_VERSION); +MODULE_LICENSE(DRV_LICENSE); +MODULE_AUTHOR(DRV_AUTHOR); +MODULE_DESCRIPTION(DRV_DESC); diff --git a/drivers/vdpa/virtio_pci/Makefile b/drivers/vdpa/virtio_pci/Makefile new file mode 100644 index 000000000000..231088d3af7d --- /dev/null +++ b/drivers/vdpa/virtio_pci/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_VP_VDPA) += vp_vdpa.o diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c new file mode 100644 index 000000000000..c76ebb531212 --- /dev/null +++ b/drivers/vdpa/virtio_pci/vp_vdpa.c @@ -0,0 +1,484 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * vDPA bridge driver for modern virtio-pci device + * + * Copyright (c) 2020, Red Hat Inc. All rights reserved. + * Author: Jason Wang <[email protected]> + * + * Based on virtio_pci_modern.c. + */ + +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/vdpa.h> +#include <linux/virtio.h> +#include <linux/virtio_config.h> +#include <linux/virtio_ring.h> +#include <linux/virtio_pci.h> +#include <linux/virtio_pci_modern.h> + +#define VP_VDPA_QUEUE_MAX 256 +#define VP_VDPA_DRIVER_NAME "vp_vdpa" +#define VP_VDPA_NAME_SIZE 256 + +struct vp_vring { + void __iomem *notify; + char msix_name[VP_VDPA_NAME_SIZE]; + struct vdpa_callback cb; + resource_size_t notify_pa; + int irq; +}; + +struct vp_vdpa { + struct vdpa_device vdpa; + struct virtio_pci_modern_device mdev; + struct vp_vring *vring; + struct vdpa_callback config_cb; + char msix_name[VP_VDPA_NAME_SIZE]; + int config_irq; + int queues; + int vectors; +}; + +static struct vp_vdpa *vdpa_to_vp(struct vdpa_device *vdpa) +{ + return container_of(vdpa, struct vp_vdpa, vdpa); +} + +static struct virtio_pci_modern_device *vdpa_to_mdev(struct vdpa_device *vdpa) +{ + struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); + + return &vp_vdpa->mdev; +} + +static u64 vp_vdpa_get_features(struct vdpa_device *vdpa) +{ + struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); + + return vp_modern_get_features(mdev); +} + +static int vp_vdpa_set_features(struct vdpa_device *vdpa, u64 features) +{ + struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); + + vp_modern_set_features(mdev, features); + + return 0; +} + +static u8 vp_vdpa_get_status(struct vdpa_device *vdpa) +{ + struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); + + return vp_modern_get_status(mdev); +} + +static void vp_vdpa_free_irq(struct vp_vdpa *vp_vdpa) +{ + struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev; + struct pci_dev *pdev = mdev->pci_dev; + int i; + + for (i = 0; i < vp_vdpa->queues; i++) { + if (vp_vdpa->vring[i].irq != VIRTIO_MSI_NO_VECTOR) { + vp_modern_queue_vector(mdev, i, VIRTIO_MSI_NO_VECTOR); + devm_free_irq(&pdev->dev, vp_vdpa->vring[i].irq, + &vp_vdpa->vring[i]); + vp_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR; + } + } + + if (vp_vdpa->config_irq != VIRTIO_MSI_NO_VECTOR) { + vp_modern_config_vector(mdev, VIRTIO_MSI_NO_VECTOR); + devm_free_irq(&pdev->dev, vp_vdpa->config_irq, vp_vdpa); + vp_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR; + } + + if (vp_vdpa->vectors) { + pci_free_irq_vectors(pdev); + vp_vdpa->vectors = 0; + } +} + +static irqreturn_t vp_vdpa_vq_handler(int irq, void *arg) +{ + struct vp_vring *vring = arg; + + if (vring->cb.callback) + return vring->cb.callback(vring->cb.private); + + return IRQ_HANDLED; +} + +static irqreturn_t vp_vdpa_config_handler(int irq, void *arg) +{ + struct vp_vdpa *vp_vdpa = arg; + + if (vp_vdpa->config_cb.callback) + return vp_vdpa->config_cb.callback(vp_vdpa->config_cb.private); + + return IRQ_HANDLED; +} + +static int vp_vdpa_request_irq(struct vp_vdpa *vp_vdpa) +{ + struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev; + struct pci_dev *pdev = mdev->pci_dev; + int i, ret, irq; + int queues = vp_vdpa->queues; + int vectors = queues + 1; + + ret = pci_alloc_irq_vectors(pdev, vectors, vectors, PCI_IRQ_MSIX); + if (ret != vectors) { + dev_err(&pdev->dev, + "vp_vdpa: fail to allocate irq vectors want %d but %d\n", + vectors, ret); + return ret; + } + + vp_vdpa->vectors = vectors; + + for (i = 0; i < queues; i++) { + snprintf(vp_vdpa->vring[i].msix_name, VP_VDPA_NAME_SIZE, + "vp-vdpa[%s]-%d\n", pci_name(pdev), i); + irq = pci_irq_vector(pdev, i); + ret = devm_request_irq(&pdev->dev, irq, + vp_vdpa_vq_handler, + 0, vp_vdpa->vring[i].msix_name, + &vp_vdpa->vring[i]); + if (ret) { + dev_err(&pdev->dev, + "vp_vdpa: fail to request irq for vq %d\n", i); + goto err; + } + vp_modern_queue_vector(mdev, i, i); + vp_vdpa->vring[i].irq = irq; + } + + snprintf(vp_vdpa->msix_name, VP_VDPA_NAME_SIZE, "vp-vdpa[%s]-config\n", + pci_name(pdev)); + irq = pci_irq_vector(pdev, queues); + ret = devm_request_irq(&pdev->dev, irq, vp_vdpa_config_handler, 0, + vp_vdpa->msix_name, vp_vdpa); + if (ret) { + dev_err(&pdev->dev, + "vp_vdpa: fail to request irq for vq %d\n", i); + goto err; + } + vp_modern_config_vector(mdev, queues); + vp_vdpa->config_irq = irq; + + return 0; +err: + vp_vdpa_free_irq(vp_vdpa); + return ret; +} + +static void vp_vdpa_set_status(struct vdpa_device *vdpa, u8 status) +{ + struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); + struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev; + u8 s = vp_vdpa_get_status(vdpa); + + if (status & VIRTIO_CONFIG_S_DRIVER_OK && + !(s & VIRTIO_CONFIG_S_DRIVER_OK)) { + vp_vdpa_request_irq(vp_vdpa); + } + + vp_modern_set_status(mdev, status); + + if (!(status & VIRTIO_CONFIG_S_DRIVER_OK) && + (s & VIRTIO_CONFIG_S_DRIVER_OK)) + vp_vdpa_free_irq(vp_vdpa); +} + +static u16 vp_vdpa_get_vq_num_max(struct vdpa_device *vdpa) +{ + return VP_VDPA_QUEUE_MAX; +} + +static int vp_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 qid, + struct vdpa_vq_state *state) +{ + /* Note that this is not supported by virtio specification, so + * we return -EOPNOTSUPP here. This means we can't support live + * migration, vhost device start/stop. + */ + return -EOPNOTSUPP; +} + +static int vp_vdpa_set_vq_state(struct vdpa_device *vdpa, u16 qid, + const struct vdpa_vq_state *state) +{ + /* Note that this is not supported by virtio specification, so + * we return -ENOPOTSUPP here. This means we can't support live + * migration, vhost device start/stop. + */ + return -EOPNOTSUPP; +} + +static void vp_vdpa_set_vq_cb(struct vdpa_device *vdpa, u16 qid, + struct vdpa_callback *cb) +{ + struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); + + vp_vdpa->vring[qid].cb = *cb; +} + +static void vp_vdpa_set_vq_ready(struct vdpa_device *vdpa, + u16 qid, bool ready) +{ + struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); + + vp_modern_set_queue_enable(mdev, qid, ready); +} + +static bool vp_vdpa_get_vq_ready(struct vdpa_device *vdpa, u16 qid) +{ + struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); + + return vp_modern_get_queue_enable(mdev, qid); +} + +static void vp_vdpa_set_vq_num(struct vdpa_device *vdpa, u16 qid, + u32 num) +{ + struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); + + vp_modern_set_queue_size(mdev, qid, num); +} + +static int vp_vdpa_set_vq_address(struct vdpa_device *vdpa, u16 qid, + u64 desc_area, u64 driver_area, + u64 device_area) +{ + struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); + + vp_modern_queue_address(mdev, qid, desc_area, + driver_area, device_area); + + return 0; +} + +static void vp_vdpa_kick_vq(struct vdpa_device *vdpa, u16 qid) +{ + struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); + + vp_iowrite16(qid, vp_vdpa->vring[qid].notify); +} + +static u32 vp_vdpa_get_generation(struct vdpa_device *vdpa) +{ + struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); + + return vp_modern_generation(mdev); +} + +static u32 vp_vdpa_get_device_id(struct vdpa_device *vdpa) +{ + struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); + + return mdev->id.device; +} + +static u32 vp_vdpa_get_vendor_id(struct vdpa_device *vdpa) +{ + struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); + + return mdev->id.vendor; +} + +static u32 vp_vdpa_get_vq_align(struct vdpa_device *vdpa) +{ + return PAGE_SIZE; +} + +static size_t vp_vdpa_get_config_size(struct vdpa_device *vdpa) +{ + struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); + + return mdev->device_len; +} + +static void vp_vdpa_get_config(struct vdpa_device *vdpa, + unsigned int offset, + void *buf, unsigned int len) +{ + struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); + struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev; + u8 old, new; + u8 *p; + int i; + + do { + old = vp_ioread8(&mdev->common->config_generation); + p = buf; + for (i = 0; i < len; i++) + *p++ = vp_ioread8(mdev->device + offset + i); + + new = vp_ioread8(&mdev->common->config_generation); + } while (old != new); +} + +static void vp_vdpa_set_config(struct vdpa_device *vdpa, + unsigned int offset, const void *buf, + unsigned int len) +{ + struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); + struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev; + const u8 *p = buf; + int i; + + for (i = 0; i < len; i++) + vp_iowrite8(*p++, mdev->device + offset + i); +} + +static void vp_vdpa_set_config_cb(struct vdpa_device *vdpa, + struct vdpa_callback *cb) +{ + struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); + + vp_vdpa->config_cb = *cb; +} + +static struct vdpa_notification_area +vp_vdpa_get_vq_notification(struct vdpa_device *vdpa, u16 qid) +{ + struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); + struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev; + struct vdpa_notification_area notify; + + notify.addr = vp_vdpa->vring[qid].notify_pa; + notify.size = mdev->notify_offset_multiplier; + + return notify; +} + +static const struct vdpa_config_ops vp_vdpa_ops = { + .get_features = vp_vdpa_get_features, + .set_features = vp_vdpa_set_features, + .get_status = vp_vdpa_get_status, + .set_status = vp_vdpa_set_status, + .get_vq_num_max = vp_vdpa_get_vq_num_max, + .get_vq_state = vp_vdpa_get_vq_state, + .get_vq_notification = vp_vdpa_get_vq_notification, + .set_vq_state = vp_vdpa_set_vq_state, + .set_vq_cb = vp_vdpa_set_vq_cb, + .set_vq_ready = vp_vdpa_set_vq_ready, + .get_vq_ready = vp_vdpa_get_vq_ready, + .set_vq_num = vp_vdpa_set_vq_num, + .set_vq_address = vp_vdpa_set_vq_address, + .kick_vq = vp_vdpa_kick_vq, + .get_generation = vp_vdpa_get_generation, + .get_device_id = vp_vdpa_get_device_id, + .get_vendor_id = vp_vdpa_get_vendor_id, + .get_vq_align = vp_vdpa_get_vq_align, + .get_config_size = vp_vdpa_get_config_size, + .get_config = vp_vdpa_get_config, + .set_config = vp_vdpa_set_config, + .set_config_cb = vp_vdpa_set_config_cb, +}; + +static void vp_vdpa_free_irq_vectors(void *data) +{ + pci_free_irq_vectors(data); +} + +static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct virtio_pci_modern_device *mdev; + struct device *dev = &pdev->dev; + struct vp_vdpa *vp_vdpa; + int ret, i; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + vp_vdpa = vdpa_alloc_device(struct vp_vdpa, vdpa, + dev, &vp_vdpa_ops, NULL); + if (vp_vdpa == NULL) { + dev_err(dev, "vp_vdpa: Failed to allocate vDPA structure\n"); + return -ENOMEM; + } + + mdev = &vp_vdpa->mdev; + mdev->pci_dev = pdev; + + ret = vp_modern_probe(mdev); + if (ret) { + dev_err(&pdev->dev, "Failed to probe modern PCI device\n"); + goto err; + } + + pci_set_master(pdev); + pci_set_drvdata(pdev, vp_vdpa); + + vp_vdpa->vdpa.dma_dev = &pdev->dev; + vp_vdpa->queues = vp_modern_get_num_queues(mdev); + + ret = devm_add_action_or_reset(dev, vp_vdpa_free_irq_vectors, pdev); + if (ret) { + dev_err(&pdev->dev, + "Failed for adding devres for freeing irq vectors\n"); + goto err; + } + + vp_vdpa->vring = devm_kcalloc(&pdev->dev, vp_vdpa->queues, + sizeof(*vp_vdpa->vring), + GFP_KERNEL); + if (!vp_vdpa->vring) { + ret = -ENOMEM; + dev_err(&pdev->dev, "Fail to allocate virtqueues\n"); + goto err; + } + + for (i = 0; i < vp_vdpa->queues; i++) { + vp_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR; + vp_vdpa->vring[i].notify = + vp_modern_map_vq_notify(mdev, i, + &vp_vdpa->vring[i].notify_pa); + if (!vp_vdpa->vring[i].notify) { + dev_warn(&pdev->dev, "Fail to map vq notify %d\n", i); + goto err; + } + } + vp_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR; + + ret = vdpa_register_device(&vp_vdpa->vdpa, vp_vdpa->queues); + if (ret) { + dev_err(&pdev->dev, "Failed to register to vdpa bus\n"); + goto err; + } + + return 0; + +err: + put_device(&vp_vdpa->vdpa.dev); + return ret; +} + +static void vp_vdpa_remove(struct pci_dev *pdev) +{ + struct vp_vdpa *vp_vdpa = pci_get_drvdata(pdev); + + vdpa_unregister_device(&vp_vdpa->vdpa); + vp_modern_remove(&vp_vdpa->mdev); +} + +static struct pci_driver vp_vdpa_driver = { + .name = "vp-vdpa", + .id_table = NULL, /* only dynamic ids */ + .probe = vp_vdpa_probe, + .remove = vp_vdpa_remove, +}; + +module_pci_driver(vp_vdpa_driver); + +MODULE_AUTHOR("Jason Wang <[email protected]>"); +MODULE_DESCRIPTION("vp-vdpa"); +MODULE_LICENSE("GPL"); +MODULE_VERSION("1"); diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index f5ebe008a28b..fb41db3da611 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -16,12 +16,12 @@ #include <linux/cdev.h> #include <linux/device.h> #include <linux/mm.h> +#include <linux/slab.h> #include <linux/iommu.h> #include <linux/uuid.h> #include <linux/vdpa.h> #include <linux/nospec.h> #include <linux/vhost.h> -#include <linux/virtio_net.h> #include "vhost.h" @@ -188,13 +188,8 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp) static int vhost_vdpa_config_validate(struct vhost_vdpa *v, struct vhost_vdpa_config *c) { - long size = 0; - - switch (v->virtio_id) { - case VIRTIO_ID_NET: - size = sizeof(struct virtio_net_config); - break; - } + struct vdpa_device *vdpa = v->vdpa; + long size = vdpa->config->get_config_size(vdpa); if (c->len == 0) return -EINVAL; @@ -989,6 +984,7 @@ static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma) if (vma->vm_end - vma->vm_start != notify.size) return -ENOTSUPP; + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; vma->vm_ops = &vhost_vdpa_vm_ops; return 0; } @@ -1023,10 +1019,6 @@ static int vhost_vdpa_probe(struct vdpa_device *vdpa) int minor; int r; - /* Currently, we only accept the network devices. */ - if (ops->get_device_id(vdpa) != VIRTIO_ID_NET) - return -ENOTSUPP; - v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL); if (!v) return -ENOMEM; diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c index 85d85faba058..4af8fa259d65 100644 --- a/drivers/vhost/vringh.c +++ b/drivers/vhost/vringh.c @@ -75,6 +75,34 @@ static inline int __vringh_get_head(const struct vringh *vrh, return head; } +/** + * vringh_kiov_advance - skip bytes from vring_kiov + * @iov: an iov passed to vringh_getdesc_*() (updated as we consume) + * @len: the maximum length to advance + */ +void vringh_kiov_advance(struct vringh_kiov *iov, size_t len) +{ + while (len && iov->i < iov->used) { + size_t partlen = min(iov->iov[iov->i].iov_len, len); + + iov->consumed += partlen; + iov->iov[iov->i].iov_len -= partlen; + iov->iov[iov->i].iov_base += partlen; + + if (!iov->iov[iov->i].iov_len) { + /* Fix up old iov element then increment. */ + iov->iov[iov->i].iov_len = iov->consumed; + iov->iov[iov->i].iov_base -= iov->consumed; + + iov->consumed = 0; + iov->i++; + } + + len -= partlen; + } +} +EXPORT_SYMBOL(vringh_kiov_advance); + /* Copy some bytes to/from the iovec. Returns num copied. */ static inline ssize_t vringh_iov_xfer(struct vringh *vrh, struct vringh_kiov *iov, @@ -95,19 +123,8 @@ static inline ssize_t vringh_iov_xfer(struct vringh *vrh, done += partlen; len -= partlen; ptr += partlen; - iov->consumed += partlen; - iov->iov[iov->i].iov_len -= partlen; - iov->iov[iov->i].iov_base += partlen; - if (!iov->iov[iov->i].iov_len) { - /* Fix up old iov element then increment. */ - iov->iov[iov->i].iov_len = iov->consumed; - iov->iov[iov->i].iov_base -= iov->consumed; - - - iov->consumed = 0; - iov->i++; - } + vringh_kiov_advance(iov, partlen); } return done; } @@ -290,9 +307,9 @@ __vringh_iov(struct vringh *vrh, u16 i, return -EINVAL; if (riov) - riov->i = riov->used = 0; + riov->i = riov->used = riov->consumed = 0; if (wiov) - wiov->i = wiov->used = 0; + wiov->i = wiov->used = wiov->consumed = 0; for (;;) { void *addr; @@ -662,7 +679,10 @@ EXPORT_SYMBOL(vringh_init_user); * *head will be vrh->vring.num. You may be able to ignore an invalid * descriptor, but there's not much you can do with an invalid ring. * - * Note that you may need to clean up riov and wiov, even on error! + * Note that you can reuse riov and wiov with subsequent calls. Content is + * overwritten and memory reallocated if more space is needed. + * When you don't have to use riov and wiov anymore, you should clean up them + * calling vringh_iov_cleanup() to release the memory, even on error! */ int vringh_getdesc_user(struct vringh *vrh, struct vringh_iov *riov, @@ -932,7 +952,10 @@ EXPORT_SYMBOL(vringh_init_kern); * *head will be vrh->vring.num. You may be able to ignore an invalid * descriptor, but there's not much you can do with an invalid ring. * - * Note that you may need to clean up riov and wiov, even on error! + * Note that you can reuse riov and wiov with subsequent calls. Content is + * overwritten and memory reallocated if more space is needed. + * When you don't have to use riov and wiov anymore, you should clean up them + * calling vringh_kiov_cleanup() to release the memory, even on error! */ int vringh_getdesc_kern(struct vringh *vrh, struct vringh_kiov *riov, @@ -1074,6 +1097,8 @@ static int iotlb_translate(const struct vringh *vrh, int ret = 0; u64 s = 0; + spin_lock(vrh->iotlb_lock); + while (len > s) { u64 size, pa, pfn; @@ -1103,6 +1128,8 @@ static int iotlb_translate(const struct vringh *vrh, ++ret; } + spin_unlock(vrh->iotlb_lock); + return ret; } @@ -1262,10 +1289,13 @@ EXPORT_SYMBOL(vringh_init_iotlb); * vringh_set_iotlb - initialize a vringh for a ring with IOTLB. * @vrh: the vring * @iotlb: iotlb associated with this vring + * @iotlb_lock: spinlock to synchronize the iotlb accesses */ -void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb) +void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb, + spinlock_t *iotlb_lock) { vrh->iotlb = iotlb; + vrh->iotlb_lock = iotlb_lock; } EXPORT_SYMBOL(vringh_set_iotlb); @@ -1285,7 +1315,10 @@ EXPORT_SYMBOL(vringh_set_iotlb); * *head will be vrh->vring.num. You may be able to ignore an invalid * descriptor, but there's not much you can do with an invalid ring. * - * Note that you may need to clean up riov and wiov, even on error! + * Note that you can reuse riov and wiov with subsequent calls. Content is + * overwritten and memory reallocated if more space is needed. + * When you don't have to use riov and wiov anymore, you should clean up them + * calling vringh_kiov_cleanup() to release the memory, even on error! */ int vringh_getdesc_iotlb(struct vringh *vrh, struct vringh_kiov *riov, diff --git a/drivers/virt/nitro_enclaves/ne_misc_dev.c b/drivers/virt/nitro_enclaves/ne_misc_dev.c index f1964ea4b826..e21e1e86ad15 100644 --- a/drivers/virt/nitro_enclaves/ne_misc_dev.c +++ b/drivers/virt/nitro_enclaves/ne_misc_dev.c @@ -1524,7 +1524,8 @@ static const struct file_operations ne_enclave_fops = { * enclave file descriptor to be further used for enclave * resources handling e.g. memory regions and CPUs. * @ne_pci_dev : Private data associated with the PCI device. - * @slot_uid: Generated unique slot id associated with an enclave. + * @slot_uid: User pointer to store the generated unique slot id + * associated with an enclave to. * * Context: Process context. This function is called with the ne_pci_dev enclave * mutex held. @@ -1532,7 +1533,7 @@ static const struct file_operations ne_enclave_fops = { * * Enclave fd on success. * * Negative return value on failure. */ -static int ne_create_vm_ioctl(struct ne_pci_dev *ne_pci_dev, u64 *slot_uid) +static int ne_create_vm_ioctl(struct ne_pci_dev *ne_pci_dev, u64 __user *slot_uid) { struct ne_pci_dev_cmd_reply cmd_reply = {}; int enclave_fd = -1; @@ -1634,7 +1635,18 @@ static int ne_create_vm_ioctl(struct ne_pci_dev *ne_pci_dev, u64 *slot_uid) list_add(&ne_enclave->enclave_list_entry, &ne_pci_dev->enclaves_list); - *slot_uid = ne_enclave->slot_uid; + if (copy_to_user(slot_uid, &ne_enclave->slot_uid, sizeof(ne_enclave->slot_uid))) { + /* + * As we're holding the only reference to 'enclave_file', fput() + * will call ne_enclave_release() which will do a proper cleanup + * of all so far allocated resources, leaving only the unused fd + * for us to free. + */ + fput(enclave_file); + put_unused_fd(enclave_fd); + + return -EFAULT; + } fd_install(enclave_fd, enclave_file); @@ -1671,34 +1683,13 @@ static long ne_ioctl(struct file *file, unsigned int cmd, unsigned long arg) switch (cmd) { case NE_CREATE_VM: { int enclave_fd = -1; - struct file *enclave_file = NULL; struct ne_pci_dev *ne_pci_dev = ne_devs.ne_pci_dev; - int rc = -EINVAL; - u64 slot_uid = 0; + u64 __user *slot_uid = (void __user *)arg; mutex_lock(&ne_pci_dev->enclaves_list_mutex); - - enclave_fd = ne_create_vm_ioctl(ne_pci_dev, &slot_uid); - if (enclave_fd < 0) { - rc = enclave_fd; - - mutex_unlock(&ne_pci_dev->enclaves_list_mutex); - - return rc; - } - + enclave_fd = ne_create_vm_ioctl(ne_pci_dev, slot_uid); mutex_unlock(&ne_pci_dev->enclaves_list_mutex); - if (copy_to_user((void __user *)arg, &slot_uid, sizeof(slot_uid))) { - enclave_file = fget(enclave_fd); - /* Decrement file refs to have release() called. */ - fput(enclave_file); - fput(enclave_file); - put_unused_fd(enclave_fd); - - return -EFAULT; - } - return enclave_fd; } diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 8985fc2cea86..510e9318854d 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -734,7 +734,7 @@ static void report_free_page_func(struct work_struct *work) #ifdef CONFIG_BALLOON_COMPACTION /* * virtballoon_migratepage - perform the balloon page migration on behalf of - * a compation thread. (called under page lock) + * a compaction thread. (called under page lock) * @vb_dev_info: the balloon device * @newpage: page that will replace the isolated page after migration finishes. * @page : the isolated (old) page that is about to be migrated to newpage. diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c index fbd4ebc00eb6..30654d3a0b41 100644 --- a/drivers/virtio/virtio_pci_modern.c +++ b/drivers/virtio/virtio_pci_modern.c @@ -192,7 +192,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, struct virtio_pci_modern_device *mdev = &vp_dev->mdev; struct virtqueue *vq; - u16 num, off; + u16 num; int err; if (index >= vp_modern_get_num_queues(mdev)) @@ -208,9 +208,6 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, return ERR_PTR(-EINVAL); } - /* get offset of notification word for this vq */ - off = vp_modern_get_queue_notify_off(mdev, index); - info->msix_vector = msix_vec; /* create the vring */ @@ -227,27 +224,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, virtqueue_get_avail_addr(vq), virtqueue_get_used_addr(vq)); - if (mdev->notify_base) { - /* offset should not wrap */ - if ((u64)off * mdev->notify_offset_multiplier + 2 - > mdev->notify_len) { - dev_warn(&mdev->pci_dev->dev, - "bad notification offset %u (x %u) " - "for queue %u > %zd", - off, mdev->notify_offset_multiplier, - index, mdev->notify_len); - err = -EINVAL; - goto err_map_notify; - } - vq->priv = (void __force *)mdev->notify_base + - off * mdev->notify_offset_multiplier; - } else { - vq->priv = (void __force *)vp_modern_map_capability(mdev, - mdev->notify_map_cap, 2, 2, - off * mdev->notify_offset_multiplier, 2, - NULL); - } - + vq->priv = (void __force *)vp_modern_map_vq_notify(mdev, index, NULL); if (!vq->priv) { err = -ENOMEM; goto err_map_notify; diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c index cbd667496bb1..54f297028586 100644 --- a/drivers/virtio/virtio_pci_modern_dev.c +++ b/drivers/virtio/virtio_pci_modern_dev.c @@ -13,14 +13,14 @@ * @start: start from the capability * @size: map size * @len: the length that is actually mapped + * @pa: physical address of the capability * * Returns the io address of for the part of the capability */ -void __iomem *vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off, - size_t minlen, - u32 align, - u32 start, u32 size, - size_t *len) +static void __iomem * +vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off, + size_t minlen, u32 align, u32 start, u32 size, + size_t *len, resource_size_t *pa) { struct pci_dev *dev = mdev->pci_dev; u8 bar; @@ -88,9 +88,11 @@ void __iomem *vp_modern_map_capability(struct virtio_pci_modern_device *mdev, in dev_err(&dev->dev, "virtio_pci: unable to map virtio %u@%u on bar %i\n", length, offset, bar); + else if (pa) + *pa = pci_resource_start(dev, bar) + offset; + return p; } -EXPORT_SYMBOL_GPL(vp_modern_map_capability); /** * virtio_pci_find_capability - walk capabilities to find device info. @@ -275,12 +277,12 @@ int vp_modern_probe(struct virtio_pci_modern_device *mdev) mdev->common = vp_modern_map_capability(mdev, common, sizeof(struct virtio_pci_common_cfg), 4, 0, sizeof(struct virtio_pci_common_cfg), - NULL); + NULL, NULL); if (!mdev->common) goto err_map_common; mdev->isr = vp_modern_map_capability(mdev, isr, sizeof(u8), 1, 0, 1, - NULL); + NULL, NULL); if (!mdev->isr) goto err_map_isr; @@ -308,7 +310,8 @@ int vp_modern_probe(struct virtio_pci_modern_device *mdev) mdev->notify_base = vp_modern_map_capability(mdev, notify, 2, 2, 0, notify_length, - &mdev->notify_len); + &mdev->notify_len, + &mdev->notify_pa); if (!mdev->notify_base) goto err_map_notify; } else { @@ -321,7 +324,8 @@ int vp_modern_probe(struct virtio_pci_modern_device *mdev) if (device) { mdev->device = vp_modern_map_capability(mdev, device, 0, 4, 0, PAGE_SIZE, - &mdev->device_len); + &mdev->device_len, + NULL); if (!mdev->device) goto err_map_device; } @@ -584,14 +588,51 @@ EXPORT_SYMBOL_GPL(vp_modern_get_num_queues); * * Returns the notification offset for a virtqueue */ -u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev, - u16 index) +static u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev, + u16 index) { vp_iowrite16(index, &mdev->common->queue_select); return vp_ioread16(&mdev->common->queue_notify_off); } -EXPORT_SYMBOL_GPL(vp_modern_get_queue_notify_off); + +/* + * vp_modern_map_vq_notify - map notification area for a + * specific virtqueue + * @mdev: the modern virtio-pci device + * @index: the queue index + * @pa: the pointer to the physical address of the nofity area + * + * Returns the address of the notification area + */ +void __iomem *vp_modern_map_vq_notify(struct virtio_pci_modern_device *mdev, + u16 index, resource_size_t *pa) +{ + u16 off = vp_modern_get_queue_notify_off(mdev, index); + + if (mdev->notify_base) { + /* offset should not wrap */ + if ((u64)off * mdev->notify_offset_multiplier + 2 + > mdev->notify_len) { + dev_warn(&mdev->pci_dev->dev, + "bad notification offset %u (x %u) " + "for queue %u > %zd", + off, mdev->notify_offset_multiplier, + index, mdev->notify_len); + return NULL; + } + if (pa) + *pa = mdev->notify_pa + + off * mdev->notify_offset_multiplier; + return mdev->notify_base + off * mdev->notify_offset_multiplier; + } else { + return vp_modern_map_capability(mdev, + mdev->notify_map_cap, 2, 2, + off * mdev->notify_offset_multiplier, 2, + NULL, pa); + } +} +EXPORT_SYMBOL_GPL(vp_modern_map_vq_notify); MODULE_VERSION("0.1"); MODULE_DESCRIPTION("Modern Virtio PCI Device"); diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 2b385c1b4a99..4c89afc0df62 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -40,14 +40,7 @@ #include <trace/events/swiotlb.h> #define MAX_DMA_BITS 32 -/* - * Used to do a quick range check in swiotlb_tbl_unmap_single and - * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this - * API. - */ -static char *xen_io_tlb_start, *xen_io_tlb_end; -static unsigned long xen_io_tlb_nslabs; /* * Quick lookup value of the bus address of the IOTLB. */ @@ -82,11 +75,6 @@ static inline phys_addr_t xen_dma_to_phys(struct device *dev, return xen_bus_to_phys(dev, dma_to_phys(dev, dma_addr)); } -static inline dma_addr_t xen_virt_to_bus(struct device *dev, void *address) -{ - return xen_phys_to_dma(dev, virt_to_phys(address)); -} - static inline int range_straddles_page_boundary(phys_addr_t p, size_t size) { unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p); @@ -111,15 +99,12 @@ static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr) * have the same virtual address as another address * in our domain. Therefore _only_ check address within our domain. */ - if (pfn_valid(PFN_DOWN(paddr))) { - return paddr >= virt_to_phys(xen_io_tlb_start) && - paddr < virt_to_phys(xen_io_tlb_end); - } + if (pfn_valid(PFN_DOWN(paddr))) + return is_swiotlb_buffer(paddr); return 0; } -static int -xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs) +static int xen_swiotlb_fixup(void *buf, unsigned long nslabs) { int i, rc; int dma_bits; @@ -145,16 +130,6 @@ xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs) } while (i < nslabs); return 0; } -static unsigned long xen_set_nslabs(unsigned long nr_tbl) -{ - if (!nr_tbl) { - xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT); - xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE); - } else - xen_io_tlb_nslabs = nr_tbl; - - return xen_io_tlb_nslabs << IO_TLB_SHIFT; -} enum xen_swiotlb_err { XEN_SWIOTLB_UNKNOWN = 0, @@ -177,102 +152,109 @@ static const char *xen_swiotlb_error(enum xen_swiotlb_err err) } return ""; } -int __ref xen_swiotlb_init(int verbose, bool early) + +#define DEFAULT_NSLABS ALIGN(SZ_64M >> IO_TLB_SHIFT, IO_TLB_SEGSIZE) + +int __ref xen_swiotlb_init(void) { - unsigned long bytes, order; - int rc = -ENOMEM; enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN; - unsigned int repeat = 3; + unsigned long bytes = swiotlb_size_or_default(); + unsigned long nslabs = bytes >> IO_TLB_SHIFT; + unsigned int order, repeat = 3; + int rc = -ENOMEM; + char *start; - xen_io_tlb_nslabs = swiotlb_nr_tbl(); retry: - bytes = xen_set_nslabs(xen_io_tlb_nslabs); - order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT); - - /* - * IO TLB memory already allocated. Just use it. - */ - if (io_tlb_start != 0) { - xen_io_tlb_start = phys_to_virt(io_tlb_start); - goto end; - } + m_ret = XEN_SWIOTLB_ENOMEM; + order = get_order(bytes); /* * Get IO TLB memory from any location. */ - if (early) { - xen_io_tlb_start = memblock_alloc(PAGE_ALIGN(bytes), - PAGE_SIZE); - if (!xen_io_tlb_start) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, PAGE_ALIGN(bytes), PAGE_SIZE); - } else { #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) - while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { - xen_io_tlb_start = (void *)xen_get_swiotlb_free_pages(order); - if (xen_io_tlb_start) - break; - order--; - } - if (order != get_order(bytes)) { - pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n", - (PAGE_SIZE << order) >> 20); - xen_io_tlb_nslabs = SLABS_PER_PAGE << order; - bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT; - } + while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { + start = (void *)xen_get_swiotlb_free_pages(order); + if (start) + break; + order--; } - if (!xen_io_tlb_start) { - m_ret = XEN_SWIOTLB_ENOMEM; + if (!start) goto error; + if (order != get_order(bytes)) { + pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n", + (PAGE_SIZE << order) >> 20); + nslabs = SLABS_PER_PAGE << order; + bytes = nslabs << IO_TLB_SHIFT; } + /* * And replace that memory with pages under 4GB. */ - rc = xen_swiotlb_fixup(xen_io_tlb_start, - bytes, - xen_io_tlb_nslabs); + rc = xen_swiotlb_fixup(start, nslabs); if (rc) { - if (early) - memblock_free(__pa(xen_io_tlb_start), - PAGE_ALIGN(bytes)); - else { - free_pages((unsigned long)xen_io_tlb_start, order); - xen_io_tlb_start = NULL; - } + free_pages((unsigned long)start, order); m_ret = XEN_SWIOTLB_EFIXUP; goto error; } - if (early) { - if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs, - verbose)) - panic("Cannot allocate SWIOTLB buffer"); - rc = 0; - } else - rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs); - -end: - xen_io_tlb_end = xen_io_tlb_start + bytes; - if (!rc) - swiotlb_set_max_segment(PAGE_SIZE); - - return rc; + rc = swiotlb_late_init_with_tbl(start, nslabs); + if (rc) + return rc; + swiotlb_set_max_segment(PAGE_SIZE); + return 0; error: if (repeat--) { - xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */ - (xen_io_tlb_nslabs >> 1)); + /* Min is 2MB */ + nslabs = max(1024UL, (nslabs >> 1)); pr_info("Lowering to %luMB\n", - (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20); + (nslabs << IO_TLB_SHIFT) >> 20); goto retry; } pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc); - if (early) - panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc); - else - free_pages((unsigned long)xen_io_tlb_start, order); + free_pages((unsigned long)start, order); return rc; } +#ifdef CONFIG_X86 +void __init xen_swiotlb_init_early(void) +{ + unsigned long bytes = swiotlb_size_or_default(); + unsigned long nslabs = bytes >> IO_TLB_SHIFT; + unsigned int repeat = 3; + char *start; + int rc; + +retry: + /* + * Get IO TLB memory from any location. + */ + start = memblock_alloc(PAGE_ALIGN(bytes), PAGE_SIZE); + if (!start) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", + __func__, PAGE_ALIGN(bytes), PAGE_SIZE); + + /* + * And replace that memory with pages under 4GB. + */ + rc = xen_swiotlb_fixup(start, nslabs); + if (rc) { + memblock_free(__pa(start), PAGE_ALIGN(bytes)); + if (repeat--) { + /* Min is 2MB */ + nslabs = max(1024UL, (nslabs >> 1)); + bytes = nslabs << IO_TLB_SHIFT; + pr_info("Lowering to %luMB\n", bytes >> 20); + goto retry; + } + panic("%s (rc:%d)", xen_swiotlb_error(XEN_SWIOTLB_EFIXUP), rc); + } + + if (swiotlb_init_with_tbl(start, nslabs, false)) + panic("Cannot allocate SWIOTLB buffer"); + swiotlb_set_max_segment(PAGE_SIZE); +} +#endif /* CONFIG_X86 */ + static void * xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags, @@ -406,7 +388,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, * Ensure that the address returned is DMA'ble */ if (unlikely(!dma_capable(dev, dev_addr, size, true))) { - swiotlb_tbl_unmap_single(dev, map, size, size, dir, + swiotlb_tbl_unmap_single(dev, map, size, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); return DMA_MAPPING_ERROR; } @@ -445,7 +427,7 @@ static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, /* NOTE: We use dev_addr here, not paddr! */ if (is_xen_swiotlb_buffer(hwdev, dev_addr)) - swiotlb_tbl_unmap_single(hwdev, paddr, size, size, dir, attrs); + swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs); } static void @@ -462,7 +444,7 @@ xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr, } if (is_xen_swiotlb_buffer(dev, dma_addr)) - swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU); + swiotlb_sync_single_for_cpu(dev, paddr, size, dir); } static void @@ -472,7 +454,7 @@ xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr, phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr); if (is_xen_swiotlb_buffer(dev, dma_addr)) - swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE); + swiotlb_sync_single_for_device(dev, paddr, size, dir); if (!dev_is_dma_coherent(dev)) { if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr)))) @@ -560,7 +542,7 @@ xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl, static int xen_swiotlb_dma_supported(struct device *hwdev, u64 mask) { - return xen_virt_to_bus(hwdev, xen_io_tlb_end - 1) <= mask; + return xen_phys_to_dma(hwdev, io_tlb_default_mem->end - 1) <= mask; } const struct dma_map_ops xen_swiotlb_dma_ops = { diff --git a/fs/Kconfig b/fs/Kconfig index 97e7b77c9309..89a750d292ba 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -223,10 +223,13 @@ config TMPFS_INODE64 If unsure, say N. +config ARCH_SUPPORTS_HUGETLBFS + def_bool n + config HUGETLBFS bool "HugeTLB file system support" depends on X86 || IA64 || SPARC64 || (S390 && 64BIT) || \ - SYS_SUPPORTS_HUGETLBFS || BROKEN + ARCH_SUPPORTS_HUGETLBFS || BROKEN help hugetlbfs is a filesystem backing for HugeTLB pages, based on ramfs. For architectures that support it, say Y here and read diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt index c6f1c8c1934e..06fb7a93a1bd 100644 --- a/fs/Kconfig.binfmt +++ b/fs/Kconfig.binfmt @@ -112,6 +112,9 @@ config BINFMT_FLAT_ARGVP_ENVP_ON_STACK config BINFMT_FLAT_OLD_ALWAYS_RAM bool +config BINFMT_FLAT_NO_DATA_START_OFFSET + bool + config BINFMT_FLAT_OLD bool "Enable support for very old legacy flat binaries" depends on BINFMT_FLAT diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c index b9c658e0548e..a1072c6a2341 100644 --- a/fs/binfmt_flat.c +++ b/fs/binfmt_flat.c @@ -74,6 +74,12 @@ #define MAX_SHARED_LIBS (1) #endif +#ifdef CONFIG_BINFMT_FLAT_NO_DATA_START_OFFSET +#define DATA_START_OFFSET_WORDS (0) +#else +#define DATA_START_OFFSET_WORDS (MAX_SHARED_LIBS) +#endif + struct lib_info { struct { unsigned long start_code; /* Start of text segment */ @@ -576,7 +582,8 @@ static int load_flat_file(struct linux_binprm *bprm, goto err; } - len = data_len + extra + MAX_SHARED_LIBS * sizeof(unsigned long); + len = data_len + extra + + DATA_START_OFFSET_WORDS * sizeof(unsigned long); len = PAGE_ALIGN(len); realdatastart = vm_mmap(NULL, 0, len, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, 0); @@ -591,7 +598,7 @@ static int load_flat_file(struct linux_binprm *bprm, goto err; } datapos = ALIGN(realdatastart + - MAX_SHARED_LIBS * sizeof(unsigned long), + DATA_START_OFFSET_WORDS * sizeof(unsigned long), FLAT_DATA_ALIGN); pr_debug("Allocated data+bss+stack (%u bytes): %lx\n", @@ -622,7 +629,8 @@ static int load_flat_file(struct linux_binprm *bprm, memp_size = len; } else { - len = text_len + data_len + extra + MAX_SHARED_LIBS * sizeof(u32); + len = text_len + data_len + extra + + DATA_START_OFFSET_WORDS * sizeof(u32); len = PAGE_ALIGN(len); textpos = vm_mmap(NULL, 0, len, PROT_READ | PROT_EXEC | PROT_WRITE, MAP_PRIVATE, 0); @@ -638,7 +646,7 @@ static int load_flat_file(struct linux_binprm *bprm, realdatastart = textpos + ntohl(hdr->data_start); datapos = ALIGN(realdatastart + - MAX_SHARED_LIBS * sizeof(u32), + DATA_START_OFFSET_WORDS * sizeof(u32), FLAT_DATA_ALIGN); reloc = (__be32 __user *) @@ -714,7 +722,7 @@ static int load_flat_file(struct linux_binprm *bprm, ret = result; pr_err("Unable to read code+data+bss, errno %d\n", ret); vm_munmap(textpos, text_len + data_len + extra + - MAX_SHARED_LIBS * sizeof(u32)); + DATA_START_OFFSET_WORDS * sizeof(u32)); goto err; } } diff --git a/fs/block_dev.c b/fs/block_dev.c index a5244e08b6c8..9114e0a0e7b4 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -79,7 +79,7 @@ static void kill_bdev(struct block_device *bdev) { struct address_space *mapping = bdev->bd_inode->i_mapping; - if (mapping->nrpages == 0 && mapping->nrexceptional == 0) + if (mapping_empty(mapping)) return; invalidate_bh_lrus(); diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 17f93fd28f7e..2bea01d23a5b 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -591,16 +591,13 @@ static noinline int add_ra_bio_pages(struct inode *inode, free_extent_map(em); if (page->index == end_index) { - char *userpage; size_t zero_offset = offset_in_page(isize); if (zero_offset) { int zeros; zeros = PAGE_SIZE - zero_offset; - userpage = kmap_atomic(page); - memset(userpage + zero_offset, 0, zeros); + memzero_page(page, zero_offset, zeros); flush_dcache_page(page); - kunmap_atomic(userpage); } } diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index f2d1bb234377..074a78a202b8 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3421,15 +3421,12 @@ int btrfs_do_readpage(struct page *page, struct extent_map **em_cached, } if (page->index == last_byte >> PAGE_SHIFT) { - char *userpage; size_t zero_offset = offset_in_page(last_byte); if (zero_offset) { iosize = PAGE_SIZE - zero_offset; - userpage = kmap_atomic(page); - memset(userpage + zero_offset, 0, iosize); + memzero_page(page, zero_offset, iosize); flush_dcache_page(page); - kunmap_atomic(userpage); } } begin_page_read(fs_info, page); @@ -3438,14 +3435,11 @@ int btrfs_do_readpage(struct page *page, struct extent_map **em_cached, u64 disk_bytenr; if (cur >= last_byte) { - char *userpage; struct extent_state *cached = NULL; iosize = PAGE_SIZE - pg_offset; - userpage = kmap_atomic(page); - memset(userpage + pg_offset, 0, iosize); + memzero_page(page, pg_offset, iosize); flush_dcache_page(page); - kunmap_atomic(userpage); set_extent_uptodate(tree, cur, cur + iosize - 1, &cached, GFP_NOFS); unlock_extent_cached(tree, cur, @@ -3528,13 +3522,10 @@ int btrfs_do_readpage(struct page *page, struct extent_map **em_cached, /* we've found a hole, just zero and go on */ if (block_start == EXTENT_MAP_HOLE) { - char *userpage; struct extent_state *cached = NULL; - userpage = kmap_atomic(page); - memset(userpage + pg_offset, 0, iosize); + memzero_page(page, pg_offset, iosize); flush_dcache_page(page); - kunmap_atomic(userpage); set_extent_uptodate(tree, cur, cur + iosize - 1, &cached, GFP_NOFS); @@ -3845,12 +3836,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, } if (page->index == end_index) { - char *userpage; - - userpage = kmap_atomic(page); - memset(userpage + pg_offset, 0, - PAGE_SIZE - pg_offset); - kunmap_atomic(userpage); + memzero_page(page, pg_offset, PAGE_SIZE - pg_offset); flush_dcache_page(page); } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index b21d491b3adc..4af336008b12 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -646,17 +646,12 @@ again: if (!ret) { unsigned long offset = offset_in_page(total_compressed); struct page *page = pages[nr_pages - 1]; - char *kaddr; /* zero the tail end of the last page, we might be * sending it down to disk */ - if (offset) { - kaddr = kmap_atomic(page); - memset(kaddr + offset, 0, - PAGE_SIZE - offset); - kunmap_atomic(kaddr); - } + if (offset) + memzero_page(page, offset, PAGE_SIZE - offset); will_compress = 1; } } @@ -4833,7 +4828,6 @@ int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len, struct btrfs_ordered_extent *ordered; struct extent_state *cached_state = NULL; struct extent_changeset *data_reserved = NULL; - char *kaddr; bool only_release_metadata = false; u32 blocksize = fs_info->sectorsize; pgoff_t index = from >> PAGE_SHIFT; @@ -4925,15 +4919,13 @@ again: if (offset != blocksize) { if (!len) len = blocksize - offset; - kaddr = kmap(page); if (front) - memset(kaddr + (block_start - page_offset(page)), - 0, offset); + memzero_page(page, (block_start - page_offset(page)), + offset); else - memset(kaddr + (block_start - page_offset(page)) + offset, - 0, len); + memzero_page(page, (block_start - page_offset(page)) + offset, + len); flush_dcache_page(page); - kunmap(page); } ClearPageChecked(page); set_page_dirty(page); @@ -6832,11 +6824,9 @@ static noinline int uncompress_inline(struct btrfs_path *path, * cover that region here. */ - if (max_size + pg_offset < PAGE_SIZE) { - char *map = kmap(page); - memset(map + pg_offset + max_size, 0, PAGE_SIZE - max_size - pg_offset); - kunmap(page); - } + if (max_size + pg_offset < PAGE_SIZE) + memzero_page(page, pg_offset + max_size, + PAGE_SIZE - max_size - pg_offset); kfree(tmp); return ret; } @@ -8506,7 +8496,6 @@ vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf) struct btrfs_ordered_extent *ordered; struct extent_state *cached_state = NULL; struct extent_changeset *data_reserved = NULL; - char *kaddr; unsigned long zero_start; loff_t size; vm_fault_t ret; @@ -8620,10 +8609,8 @@ again: zero_start = PAGE_SIZE; if (zero_start != PAGE_SIZE) { - kaddr = kmap(page); - memset(kaddr + zero_start, 0, PAGE_SIZE - zero_start); + memzero_page(page, zero_start, PAGE_SIZE - zero_start); flush_dcache_page(page); - kunmap(page); } ClearPageChecked(page); set_page_dirty(page); diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c index f4ec06b53aa0..3928ecc40d7b 100644 --- a/fs/btrfs/reflink.c +++ b/fs/btrfs/reflink.c @@ -129,12 +129,8 @@ static int copy_inline_to_page(struct btrfs_inode *inode, * So what's in the range [500, 4095] corresponds to zeroes. */ if (datal < block_size) { - char *map; - - map = kmap(page); - memset(map + datal, 0, block_size - datal); + memzero_page(page, datal, block_size - datal); flush_dcache_page(page); - kunmap(page); } SetPageUptodate(page); diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index d524acf7b3e5..c3fa7d3fa770 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -375,7 +375,6 @@ int zlib_decompress(struct list_head *ws, unsigned char *data_in, unsigned long bytes_left; unsigned long total_out = 0; unsigned long pg_offset = 0; - char *kaddr; destlen = min_t(unsigned long, destlen, PAGE_SIZE); bytes_left = destlen; @@ -455,9 +454,7 @@ next: * end of the inline extent (destlen) to the end of the page */ if (pg_offset < destlen) { - kaddr = kmap_atomic(dest_page); - memset(kaddr + pg_offset, 0, destlen - pg_offset); - kunmap_atomic(kaddr); + memzero_page(dest_page, pg_offset, destlen - pg_offset); } return ret; } diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c index 8e9626d63976..3e26b466476a 100644 --- a/fs/btrfs/zstd.c +++ b/fs/btrfs/zstd.c @@ -631,7 +631,6 @@ int zstd_decompress(struct list_head *ws, unsigned char *data_in, size_t ret2; unsigned long total_out = 0; unsigned long pg_offset = 0; - char *kaddr; stream = ZSTD_initDStream( ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size); @@ -696,9 +695,7 @@ int zstd_decompress(struct list_head *ws, unsigned char *data_in, ret = 0; finish: if (pg_offset < destlen) { - kaddr = kmap_atomic(dest_page); - memset(kaddr + pg_offset, 0, destlen - pg_offset); - kunmap_atomic(kaddr); + memzero_page(dest_page, pg_offset, destlen - pg_offset); } return ret; } diff --git a/fs/buffer.c b/fs/buffer.c index c2e052c0fc5d..ea48c01fb76b 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -1260,6 +1260,15 @@ static void bh_lru_install(struct buffer_head *bh) int i; check_irqs_on(); + /* + * the refcount of buffer_head in bh_lru prevents dropping the + * attached page(i.e., try_to_free_buffers) so it could cause + * failing page migration. + * Skip putting upcoming bh into bh_lru until migration is done. + */ + if (lru_cache_disabled()) + return; + bh_lru_lock(); b = this_cpu_ptr(&bh_lrus); @@ -1400,6 +1409,15 @@ __bread_gfp(struct block_device *bdev, sector_t block, } EXPORT_SYMBOL(__bread_gfp); +static void __invalidate_bh_lrus(struct bh_lru *b) +{ + int i; + + for (i = 0; i < BH_LRU_SIZE; i++) { + brelse(b->bhs[i]); + b->bhs[i] = NULL; + } +} /* * invalidate_bh_lrus() is called rarely - but not only at unmount. * This doesn't race because it runs in each cpu either in irq @@ -1408,16 +1426,12 @@ EXPORT_SYMBOL(__bread_gfp); static void invalidate_bh_lru(void *arg) { struct bh_lru *b = &get_cpu_var(bh_lrus); - int i; - for (i = 0; i < BH_LRU_SIZE; i++) { - brelse(b->bhs[i]); - b->bhs[i] = NULL; - } + __invalidate_bh_lrus(b); put_cpu_var(bh_lrus); } -static bool has_bh_in_lru(int cpu, void *dummy) +bool has_bh_in_lru(int cpu, void *dummy) { struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu); int i; @@ -1436,6 +1450,16 @@ void invalidate_bh_lrus(void) } EXPORT_SYMBOL_GPL(invalidate_bh_lrus); +void invalidate_bh_lrus_cpu(int cpu) +{ + struct bh_lru *b; + + bh_lru_lock(); + b = per_cpu_ptr(&bh_lrus, cpu); + __invalidate_bh_lrus(b); + bh_lru_unlock(); +} + void set_bh_page(struct buffer_head *bh, struct page *page, unsigned long offset) { diff --git a/fs/ceph/Kconfig b/fs/ceph/Kconfig index 471e40156065..94df854147d3 100644 --- a/fs/ceph/Kconfig +++ b/fs/ceph/Kconfig @@ -6,6 +6,7 @@ config CEPH_FS select LIBCRC32C select CRYPTO_AES select CRYPTO + select NETFS_SUPPORT default n help Choose Y or M here to include support for mounting the diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 26e66436f005..c1570fada3d8 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -12,6 +12,7 @@ #include <linux/signal.h> #include <linux/iversion.h> #include <linux/ktime.h> +#include <linux/netfs.h> #include "super.h" #include "mds_client.h" @@ -61,6 +62,9 @@ (CONGESTION_ON_THRESH(congestion_kb) - \ (CONGESTION_ON_THRESH(congestion_kb) >> 2)) +static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len, + struct page *page, void **_fsdata); + static inline struct ceph_snap_context *page_snap_context(struct page *page) { if (PagePrivate(page)) @@ -124,8 +128,7 @@ static int ceph_set_page_dirty(struct page *page) * PagePrivate so that we get invalidatepage callback. */ BUG_ON(PagePrivate(page)); - page->private = (unsigned long)snapc; - SetPagePrivate(page); + attach_page_private(page, snapc); ret = __set_page_dirty_nobuffers(page); WARN_ON(!PageLocked(page)); @@ -144,19 +147,19 @@ static void ceph_invalidatepage(struct page *page, unsigned int offset, { struct inode *inode; struct ceph_inode_info *ci; - struct ceph_snap_context *snapc = page_snap_context(page); + struct ceph_snap_context *snapc; + + wait_on_page_fscache(page); inode = page->mapping->host; ci = ceph_inode(inode); - if (offset != 0 || length != PAGE_SIZE) { + if (offset != 0 || length != thp_size(page)) { dout("%p invalidatepage %p idx %lu partial dirty page %u~%u\n", inode, page, page->index, offset, length); return; } - ceph_invalidate_fscache_page(inode, page); - WARN_ON(!PageLocked(page)); if (!PagePrivate(page)) return; @@ -164,333 +167,222 @@ static void ceph_invalidatepage(struct page *page, unsigned int offset, dout("%p invalidatepage %p idx %lu full dirty page\n", inode, page, page->index); + snapc = detach_page_private(page); ceph_put_wrbuffer_cap_refs(ci, 1, snapc); ceph_put_snap_context(snapc); - page->private = 0; - ClearPagePrivate(page); } -static int ceph_releasepage(struct page *page, gfp_t g) +static int ceph_releasepage(struct page *page, gfp_t gfp) { dout("%p releasepage %p idx %lu (%sdirty)\n", page->mapping->host, page, page->index, PageDirty(page) ? "" : "not "); - /* Can we release the page from the cache? */ - if (!ceph_release_fscache_page(page, g)) - return 0; - + if (PageFsCache(page)) { + if (!(gfp & __GFP_DIRECT_RECLAIM) || !(gfp & __GFP_FS)) + return 0; + wait_on_page_fscache(page); + } return !PagePrivate(page); } -/* read a single page, without unlocking it. */ -static int ceph_do_readpage(struct file *filp, struct page *page) +static void ceph_netfs_expand_readahead(struct netfs_read_request *rreq) { - struct inode *inode = file_inode(filp); + struct inode *inode = rreq->mapping->host; struct ceph_inode_info *ci = ceph_inode(inode); - struct ceph_fs_client *fsc = ceph_inode_to_client(inode); - struct ceph_osd_client *osdc = &fsc->client->osdc; - struct ceph_osd_request *req; - struct ceph_vino vino = ceph_vino(inode); - int err = 0; - u64 off = page_offset(page); - u64 len = PAGE_SIZE; - - if (off >= i_size_read(inode)) { - zero_user_segment(page, 0, PAGE_SIZE); - SetPageUptodate(page); - return 0; - } - - if (ci->i_inline_version != CEPH_INLINE_NONE) { - /* - * Uptodate inline data should have been added - * into page cache while getting Fcr caps. - */ - if (off == 0) - return -EINVAL; - zero_user_segment(page, 0, PAGE_SIZE); - SetPageUptodate(page); - return 0; - } - - err = ceph_readpage_from_fscache(inode, page); - if (err == 0) - return -EINPROGRESS; - - dout("readpage ino %llx.%llx file %p off %llu len %llu page %p index %lu\n", - vino.ino, vino.snap, filp, off, len, page, page->index); - req = ceph_osdc_new_request(osdc, &ci->i_layout, vino, off, &len, 0, 1, - CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, NULL, - ci->i_truncate_seq, ci->i_truncate_size, - false); - if (IS_ERR(req)) - return PTR_ERR(req); + struct ceph_file_layout *lo = &ci->i_layout; + u32 blockoff; + u64 blockno; - osd_req_op_extent_osd_data_pages(req, 0, &page, len, 0, false, false); + /* Expand the start downward */ + blockno = div_u64_rem(rreq->start, lo->stripe_unit, &blockoff); + rreq->start = blockno * lo->stripe_unit; + rreq->len += blockoff; - err = ceph_osdc_start_request(osdc, req, false); - if (!err) - err = ceph_osdc_wait_request(osdc, req); - - ceph_update_read_latency(&fsc->mdsc->metric, req->r_start_latency, - req->r_end_latency, err); - - ceph_osdc_put_request(req); - dout("readpage result %d\n", err); - - if (err == -ENOENT) - err = 0; - if (err < 0) { - ceph_fscache_readpage_cancel(inode, page); - if (err == -EBLOCKLISTED) - fsc->blocklisted = true; - goto out; - } - if (err < PAGE_SIZE) - /* zero fill remainder of page */ - zero_user_segment(page, err, PAGE_SIZE); - else - flush_dcache_page(page); - - SetPageUptodate(page); - ceph_readpage_to_fscache(inode, page); - -out: - return err < 0 ? err : 0; + /* Now, round up the length to the next block */ + rreq->len = roundup(rreq->len, lo->stripe_unit); } -static int ceph_readpage(struct file *filp, struct page *page) +static bool ceph_netfs_clamp_length(struct netfs_read_subrequest *subreq) { - int r = ceph_do_readpage(filp, page); - if (r != -EINPROGRESS) - unlock_page(page); - else - r = 0; - return r; + struct inode *inode = subreq->rreq->mapping->host; + struct ceph_fs_client *fsc = ceph_inode_to_client(inode); + struct ceph_inode_info *ci = ceph_inode(inode); + u64 objno, objoff; + u32 xlen; + + /* Truncate the extent at the end of the current block */ + ceph_calc_file_object_mapping(&ci->i_layout, subreq->start, subreq->len, + &objno, &objoff, &xlen); + subreq->len = min(xlen, fsc->mount_options->rsize); + return true; } -/* - * Finish an async read(ahead) op. - */ -static void finish_read(struct ceph_osd_request *req) +static void finish_netfs_read(struct ceph_osd_request *req) { - struct inode *inode = req->r_inode; - struct ceph_fs_client *fsc = ceph_inode_to_client(inode); - struct ceph_osd_data *osd_data; - int rc = req->r_result <= 0 ? req->r_result : 0; - int bytes = req->r_result >= 0 ? req->r_result : 0; + struct ceph_fs_client *fsc = ceph_inode_to_client(req->r_inode); + struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0); + struct netfs_read_subrequest *subreq = req->r_priv; int num_pages; - int i; + int err = req->r_result; - dout("finish_read %p req %p rc %d bytes %d\n", inode, req, rc, bytes); - if (rc == -EBLOCKLISTED) - ceph_inode_to_client(inode)->blocklisted = true; + ceph_update_read_metrics(&fsc->mdsc->metric, req->r_start_latency, + req->r_end_latency, err); - /* unlock all pages, zeroing any data we didn't read */ - osd_data = osd_req_op_extent_osd_data(req, 0); - BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES); - num_pages = calc_pages_for((u64)osd_data->alignment, - (u64)osd_data->length); - for (i = 0; i < num_pages; i++) { - struct page *page = osd_data->pages[i]; - - if (rc < 0 && rc != -ENOENT) { - ceph_fscache_readpage_cancel(inode, page); - goto unlock; - } - if (bytes < (int)PAGE_SIZE) { - /* zero (remainder of) page */ - int s = bytes < 0 ? 0 : bytes; - zero_user_segment(page, s, PAGE_SIZE); - } - dout("finish_read %p uptodate %p idx %lu\n", inode, page, - page->index); - flush_dcache_page(page); - SetPageUptodate(page); - ceph_readpage_to_fscache(inode, page); -unlock: - unlock_page(page); - put_page(page); - bytes -= PAGE_SIZE; - } + dout("%s: result %d subreq->len=%zu i_size=%lld\n", __func__, req->r_result, + subreq->len, i_size_read(req->r_inode)); - ceph_update_read_latency(&fsc->mdsc->metric, req->r_start_latency, - req->r_end_latency, rc); + /* no object means success but no data */ + if (err == -ENOENT) + err = 0; + else if (err == -EBLOCKLISTED) + fsc->blocklisted = true; + + if (err >= 0 && err < subreq->len) + __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); + + netfs_subreq_terminated(subreq, err, true); - kfree(osd_data->pages); + num_pages = calc_pages_for(osd_data->alignment, osd_data->length); + ceph_put_page_vector(osd_data->pages, num_pages, false); + iput(req->r_inode); } -/* - * start an async read(ahead) operation. return nr_pages we submitted - * a read for on success, or negative error code. - */ -static int start_read(struct inode *inode, struct ceph_rw_context *rw_ctx, - struct list_head *page_list, int max) +static void ceph_netfs_issue_op(struct netfs_read_subrequest *subreq) { - struct ceph_osd_client *osdc = - &ceph_inode_to_client(inode)->client->osdc; + struct netfs_read_request *rreq = subreq->rreq; + struct inode *inode = rreq->mapping->host; struct ceph_inode_info *ci = ceph_inode(inode); - struct page *page = lru_to_page(page_list); - struct ceph_vino vino; + struct ceph_fs_client *fsc = ceph_inode_to_client(inode); struct ceph_osd_request *req; - u64 off; - u64 len; - int i; + struct ceph_vino vino = ceph_vino(inode); + struct iov_iter iter; struct page **pages; - pgoff_t next_index; - int nr_pages = 0; - int got = 0; - int ret = 0; - - if (!rw_ctx) { - /* caller of readpages does not hold buffer and read caps - * (fadvise, madvise and readahead cases) */ - int want = CEPH_CAP_FILE_CACHE; - ret = ceph_try_get_caps(inode, CEPH_CAP_FILE_RD, want, - true, &got); - if (ret < 0) { - dout("start_read %p, error getting cap\n", inode); - } else if (!(got & want)) { - dout("start_read %p, no cache cap\n", inode); - ret = 0; - } - if (ret <= 0) { - if (got) - ceph_put_cap_refs(ci, got); - while (!list_empty(page_list)) { - page = lru_to_page(page_list); - list_del(&page->lru); - put_page(page); - } - return ret; - } - } - - off = (u64) page_offset(page); + size_t page_off; + int err = 0; + u64 len = subreq->len; - /* count pages */ - next_index = page->index; - list_for_each_entry_reverse(page, page_list, lru) { - if (page->index != next_index) - break; - nr_pages++; - next_index++; - if (max && nr_pages == max) - break; - } - len = nr_pages << PAGE_SHIFT; - dout("start_read %p nr_pages %d is %lld~%lld\n", inode, nr_pages, - off, len); - vino = ceph_vino(inode); - req = ceph_osdc_new_request(osdc, &ci->i_layout, vino, off, &len, - 0, 1, CEPH_OSD_OP_READ, - CEPH_OSD_FLAG_READ, NULL, - ci->i_truncate_seq, ci->i_truncate_size, - false); + req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, vino, subreq->start, &len, + 0, 1, CEPH_OSD_OP_READ, + CEPH_OSD_FLAG_READ | fsc->client->osdc.client->options->read_from_replica, + NULL, ci->i_truncate_seq, ci->i_truncate_size, false); if (IS_ERR(req)) { - ret = PTR_ERR(req); + err = PTR_ERR(req); + req = NULL; goto out; } - /* build page vector */ - nr_pages = calc_pages_for(0, len); - pages = kmalloc_array(nr_pages, sizeof(*pages), GFP_KERNEL); - if (!pages) { - ret = -ENOMEM; - goto out_put; - } - for (i = 0; i < nr_pages; ++i) { - page = list_entry(page_list->prev, struct page, lru); - BUG_ON(PageLocked(page)); - list_del(&page->lru); - - dout("start_read %p adding %p idx %lu\n", inode, page, - page->index); - if (add_to_page_cache_lru(page, &inode->i_data, page->index, - GFP_KERNEL)) { - ceph_fscache_uncache_page(inode, page); - put_page(page); - dout("start_read %p add_to_page_cache failed %p\n", - inode, page); - nr_pages = i; - if (nr_pages > 0) { - len = nr_pages << PAGE_SHIFT; - osd_req_op_extent_update(req, 0, len); - break; - } - goto out_pages; - } - pages[i] = page; + dout("%s: pos=%llu orig_len=%zu len=%llu\n", __func__, subreq->start, subreq->len, len); + iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages, subreq->start, len); + err = iov_iter_get_pages_alloc(&iter, &pages, len, &page_off); + if (err < 0) { + dout("%s: iov_ter_get_pages_alloc returned %d\n", __func__, err); + goto out; } + + /* should always give us a page-aligned read */ + WARN_ON_ONCE(page_off); + len = err; + osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false, false); - req->r_callback = finish_read; + req->r_callback = finish_netfs_read; + req->r_priv = subreq; req->r_inode = inode; + ihold(inode); - dout("start_read %p starting %p %lld~%lld\n", inode, req, off, len); - ret = ceph_osdc_start_request(osdc, req, false); - if (ret < 0) - goto out_pages; + err = ceph_osdc_start_request(req->r_osdc, req, false); + if (err) + iput(inode); +out: ceph_osdc_put_request(req); + if (err) + netfs_subreq_terminated(subreq, err, false); + dout("%s: result %d\n", __func__, err); +} - /* After adding locked pages to page cache, the inode holds cache cap. - * So we can drop our cap refs. */ - if (got) - ceph_put_cap_refs(ci, got); +static void ceph_init_rreq(struct netfs_read_request *rreq, struct file *file) +{ +} - return nr_pages; +static void ceph_readahead_cleanup(struct address_space *mapping, void *priv) +{ + struct inode *inode = mapping->host; + struct ceph_inode_info *ci = ceph_inode(inode); + int got = (uintptr_t)priv; -out_pages: - for (i = 0; i < nr_pages; ++i) { - ceph_fscache_readpage_cancel(inode, pages[i]); - unlock_page(pages[i]); - } - ceph_put_page_vector(pages, nr_pages, false); -out_put: - ceph_osdc_put_request(req); -out: if (got) ceph_put_cap_refs(ci, got); - return ret; } +const struct netfs_read_request_ops ceph_netfs_read_ops = { + .init_rreq = ceph_init_rreq, + .is_cache_enabled = ceph_is_cache_enabled, + .begin_cache_operation = ceph_begin_cache_operation, + .issue_op = ceph_netfs_issue_op, + .expand_readahead = ceph_netfs_expand_readahead, + .clamp_length = ceph_netfs_clamp_length, + .check_write_begin = ceph_netfs_check_write_begin, + .cleanup = ceph_readahead_cleanup, +}; -/* - * Read multiple pages. Leave pages we don't read + unlock in page_list; - * the caller (VM) cleans them up. - */ -static int ceph_readpages(struct file *file, struct address_space *mapping, - struct list_head *page_list, unsigned nr_pages) +/* read a single page, without unlocking it. */ +static int ceph_readpage(struct file *file, struct page *page) { struct inode *inode = file_inode(file); - struct ceph_fs_client *fsc = ceph_inode_to_client(inode); - struct ceph_file_info *fi = file->private_data; + struct ceph_inode_info *ci = ceph_inode(inode); + struct ceph_vino vino = ceph_vino(inode); + u64 off = page_offset(page); + u64 len = thp_size(page); + + if (ci->i_inline_version != CEPH_INLINE_NONE) { + /* + * Uptodate inline data should have been added + * into page cache while getting Fcr caps. + */ + if (off == 0) { + unlock_page(page); + return -EINVAL; + } + zero_user_segment(page, 0, thp_size(page)); + SetPageUptodate(page); + unlock_page(page); + return 0; + } + + dout("readpage ino %llx.%llx file %p off %llu len %llu page %p index %lu\n", + vino.ino, vino.snap, file, off, len, page, page->index); + + return netfs_readpage(file, page, &ceph_netfs_read_ops, NULL); +} + +static void ceph_readahead(struct readahead_control *ractl) +{ + struct inode *inode = file_inode(ractl->file); + struct ceph_file_info *fi = ractl->file->private_data; struct ceph_rw_context *rw_ctx; - int rc = 0; - int max = 0; + int got = 0; + int ret = 0; if (ceph_inode(inode)->i_inline_version != CEPH_INLINE_NONE) - return -EINVAL; + return; - rc = ceph_readpages_from_fscache(mapping->host, mapping, page_list, - &nr_pages); + rw_ctx = ceph_find_rw_context(fi); + if (!rw_ctx) { + /* + * readahead callers do not necessarily hold Fcb caps + * (e.g. fadvise, madvise). + */ + int want = CEPH_CAP_FILE_CACHE; - if (rc == 0) - goto out; + ret = ceph_try_get_caps(inode, CEPH_CAP_FILE_RD, want, true, &got); + if (ret < 0) + dout("start_read %p, error getting cap\n", inode); + else if (!(got & want)) + dout("start_read %p, no cache cap\n", inode); - rw_ctx = ceph_find_rw_context(fi); - max = fsc->mount_options->rsize >> PAGE_SHIFT; - dout("readpages %p file %p ctx %p nr_pages %d max %d\n", - inode, file, rw_ctx, nr_pages, max); - while (!list_empty(page_list)) { - rc = start_read(inode, rw_ctx, page_list, max); - if (rc < 0) - goto out; + if (ret <= 0) + return; } -out: - ceph_fscache_readpages_cancel(inode, page_list); - - dout("readpages %p file %p ret %d\n", inode, file, rc); - return rc; + netfs_readahead(ractl, &ceph_netfs_read_ops, (void *)(uintptr_t)got); } struct ceph_writeback_ctl @@ -585,8 +477,8 @@ static u64 get_writepages_data_length(struct inode *inode, spin_unlock(&ci->i_ceph_lock); WARN_ON(!found); } - if (end > page_offset(page) + PAGE_SIZE) - end = page_offset(page) + PAGE_SIZE; + if (end > page_offset(page) + thp_size(page)) + end = page_offset(page) + thp_size(page); return end > start ? end - start : 0; } @@ -604,7 +496,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc) struct ceph_snap_context *snapc, *oldest; loff_t page_off = page_offset(page); int err; - loff_t len = PAGE_SIZE; + loff_t len = thp_size(page); struct ceph_writeback_ctl ceph_wbc; struct ceph_osd_client *osdc = &fsc->client->osdc; struct ceph_osd_request *req; @@ -632,7 +524,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc) /* is this a partial page at end of file? */ if (page_off >= ceph_wbc.i_size) { dout("%p page eof %llu\n", page, ceph_wbc.i_size); - page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE); + page->mapping->a_ops->invalidatepage(page, 0, thp_size(page)); return 0; } @@ -658,7 +550,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc) } /* it may be a short write due to an object boundary */ - WARN_ON_ONCE(len > PAGE_SIZE); + WARN_ON_ONCE(len > thp_size(page)); osd_req_op_extent_osd_data_pages(req, 0, &page, len, 0, false, false); dout("writepage %llu~%llu (%llu bytes)\n", page_off, len, len); @@ -667,7 +559,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc) if (!err) err = ceph_osdc_wait_request(osdc, req); - ceph_update_write_latency(&fsc->mdsc->metric, req->r_start_latency, + ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency, req->r_end_latency, err); ceph_osdc_put_request(req); @@ -695,8 +587,8 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc) dout("writepage cleaned page %p\n", page); err = 0; /* vfs expects us to return 0 */ } - page->private = 0; - ClearPagePrivate(page); + oldest = detach_page_private(page); + WARN_ON_ONCE(oldest != snapc); end_page_writeback(page); ceph_put_wrbuffer_cap_refs(ci, 1, snapc); ceph_put_snap_context(snapc); /* page's reference */ @@ -755,7 +647,7 @@ static void writepages_finish(struct ceph_osd_request *req) ceph_clear_error_write(ci); } - ceph_update_write_latency(&fsc->mdsc->metric, req->r_start_latency, + ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency, req->r_end_latency, rc); /* @@ -788,11 +680,9 @@ static void writepages_finish(struct ceph_osd_request *req) clear_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC); - ceph_put_snap_context(page_snap_context(page)); - page->private = 0; - ClearPagePrivate(page); - dout("unlocking %p\n", page); + ceph_put_snap_context(detach_page_private(page)); end_page_writeback(page); + dout("unlocking %p\n", page); if (remove_page) generic_error_remove_page(inode->i_mapping, @@ -949,7 +839,7 @@ get_more_pages: page_offset(page) >= i_size_read(inode)) && clear_page_dirty_for_io(page)) mapping->a_ops->invalidatepage(page, - 0, PAGE_SIZE); + 0, thp_size(page)); unlock_page(page); continue; } @@ -1038,7 +928,7 @@ get_more_pages: pages[locked_pages++] = page; pvec.pages[i] = NULL; - len += PAGE_SIZE; + len += thp_size(page); } /* did we get anything? */ @@ -1087,7 +977,7 @@ new_request: BUG_ON(IS_ERR(req)); } BUG_ON(len < page_offset(pages[locked_pages - 1]) + - PAGE_SIZE - offset); + thp_size(page) - offset); req->r_callback = writepages_finish; req->r_inode = inode; @@ -1117,7 +1007,7 @@ new_request: } set_page_writeback(pages[i]); - len += PAGE_SIZE; + len += thp_size(page); } if (ceph_wbc.size_stable) { @@ -1126,7 +1016,7 @@ new_request: /* writepages_finish() clears writeback pages * according to the data length, so make sure * data length covers all locked pages */ - u64 min_len = len + 1 - PAGE_SIZE; + u64 min_len = len + 1 - thp_size(page); len = get_writepages_data_length(inode, pages[i - 1], offset); len = max(len, min_len); @@ -1302,6 +1192,31 @@ ceph_find_incompatible(struct page *page) return NULL; } +static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len, + struct page *page, void **_fsdata) +{ + struct inode *inode = file_inode(file); + struct ceph_inode_info *ci = ceph_inode(inode); + struct ceph_snap_context *snapc; + + snapc = ceph_find_incompatible(page); + if (snapc) { + int r; + + unlock_page(page); + put_page(page); + if (IS_ERR(snapc)) + return PTR_ERR(snapc); + + ceph_queue_writeback(inode); + r = wait_event_killable(ci->i_cap_wq, + context_is_writeable_or_written(inode, snapc)); + ceph_put_snap_context(snapc); + return r == 0 ? -EAGAIN : r; + } + return 0; +} + /* * We are only allowed to write into/dirty the page if the page is * clean, or already dirty within the same snap context. @@ -1312,75 +1227,47 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping, { struct inode *inode = file_inode(file); struct ceph_inode_info *ci = ceph_inode(inode); - struct ceph_snap_context *snapc; struct page *page = NULL; pgoff_t index = pos >> PAGE_SHIFT; - int pos_in_page = pos & ~PAGE_MASK; - int r = 0; + int r; - dout("write_begin file %p inode %p page %p %d~%d\n", file, inode, page, (int)pos, (int)len); - - for (;;) { + /* + * Uninlining should have already been done and everything updated, EXCEPT + * for inline_version sent to the MDS. + */ + if (ci->i_inline_version != CEPH_INLINE_NONE) { page = grab_cache_page_write_begin(mapping, index, flags); - if (!page) { - r = -ENOMEM; - break; - } - - snapc = ceph_find_incompatible(page); - if (snapc) { - if (IS_ERR(snapc)) { - r = PTR_ERR(snapc); - break; - } - unlock_page(page); - put_page(page); - page = NULL; - ceph_queue_writeback(inode); - r = wait_event_killable(ci->i_cap_wq, - context_is_writeable_or_written(inode, snapc)); - ceph_put_snap_context(snapc); - if (r != 0) - break; - continue; - } - - if (PageUptodate(page)) { - dout(" page %p already uptodate\n", page); - break; - } + if (!page) + return -ENOMEM; /* - * In some cases we don't need to read at all: - * - full page write - * - write that lies completely beyond EOF - * - write that covers the the page from start to EOF or beyond it + * The inline_version on a new inode is set to 1. If that's the + * case, then the page is brand new and isn't yet Uptodate. */ - if ((pos_in_page == 0 && len == PAGE_SIZE) || - (pos >= i_size_read(inode)) || - (pos_in_page == 0 && (pos + len) >= i_size_read(inode))) { - zero_user_segments(page, 0, pos_in_page, - pos_in_page + len, PAGE_SIZE); - break; + r = 0; + if (index == 0 && ci->i_inline_version != 1) { + if (!PageUptodate(page)) { + WARN_ONCE(1, "ceph: write_begin called on still-inlined inode (inline_version %llu)!\n", + ci->i_inline_version); + r = -EINVAL; + } + goto out; } - - /* - * We need to read it. If we get back -EINPROGRESS, then the page was - * handed off to fscache and it will be unlocked when the read completes. - * Refind the page in that case so we can reacquire the page lock. Otherwise - * we got a hard error or the read was completed synchronously. - */ - r = ceph_do_readpage(file, page); - if (r != -EINPROGRESS) - break; + zero_user_segment(page, 0, thp_size(page)); + SetPageUptodate(page); + goto out; } + r = netfs_write_begin(file, inode->i_mapping, pos, len, 0, &page, NULL, + &ceph_netfs_read_ops, NULL); +out: + if (r == 0) + wait_on_page_fscache(page); if (r < 0) { - if (page) { - unlock_page(page); + if (page) put_page(page); - } } else { + WARN_ON_ONCE(!PageLocked(page)); *pagep = page; } return r; @@ -1438,7 +1325,7 @@ static ssize_t ceph_direct_io(struct kiocb *iocb, struct iov_iter *iter) const struct address_space_operations ceph_aops = { .readpage = ceph_readpage, - .readpages = ceph_readpages, + .readahead = ceph_readahead, .writepage = ceph_writepage, .writepages = ceph_writepages_start, .write_begin = ceph_write_begin, @@ -1470,7 +1357,6 @@ static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf) struct inode *inode = file_inode(vma->vm_file); struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_file_info *fi = vma->vm_file->private_data; - struct page *pinned_page = NULL; loff_t off = (loff_t)vmf->pgoff << PAGE_SHIFT; int want, got, err; sigset_t oldset; @@ -1478,21 +1364,20 @@ static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf) ceph_block_sigs(&oldset); - dout("filemap_fault %p %llx.%llx %llu~%zd trying to get caps\n", - inode, ceph_vinop(inode), off, (size_t)PAGE_SIZE); + dout("filemap_fault %p %llx.%llx %llu trying to get caps\n", + inode, ceph_vinop(inode), off); if (fi->fmode & CEPH_FILE_MODE_LAZY) want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; else want = CEPH_CAP_FILE_CACHE; got = 0; - err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_RD, want, -1, - &got, &pinned_page); + err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_RD, want, -1, &got); if (err < 0) goto out_restore; - dout("filemap_fault %p %llu~%zd got cap refs on %s\n", - inode, off, (size_t)PAGE_SIZE, ceph_cap_string(got)); + dout("filemap_fault %p %llu got cap refs on %s\n", + inode, off, ceph_cap_string(got)); if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) || ci->i_inline_version == CEPH_INLINE_NONE) { @@ -1500,14 +1385,11 @@ static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf) ceph_add_rw_context(fi, &rw_ctx); ret = filemap_fault(vmf); ceph_del_rw_context(fi, &rw_ctx); - dout("filemap_fault %p %llu~%zd drop cap refs %s ret %x\n", - inode, off, (size_t)PAGE_SIZE, - ceph_cap_string(got), ret); + dout("filemap_fault %p %llu drop cap refs %s ret %x\n", + inode, off, ceph_cap_string(got), ret); } else err = -EAGAIN; - if (pinned_page) - put_page(pinned_page); ceph_put_cap_refs(ci, got); if (err != -EAGAIN) @@ -1542,8 +1424,8 @@ static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf) vmf->page = page; ret = VM_FAULT_MAJOR | VM_FAULT_LOCKED; out_inline: - dout("filemap_fault %p %llu~%zd read inline data ret %x\n", - inode, off, (size_t)PAGE_SIZE, ret); + dout("filemap_fault %p %llu read inline data ret %x\n", + inode, off, ret); } out_restore: ceph_restore_sigs(&oldset); @@ -1553,9 +1435,6 @@ out_restore: return ret; } -/* - * Reuse write_begin here for simplicity. - */ static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; @@ -1591,10 +1470,10 @@ static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf) goto out_free; } - if (off + PAGE_SIZE <= size) - len = PAGE_SIZE; + if (off + thp_size(page) <= size) + len = thp_size(page); else - len = size & ~PAGE_MASK; + len = offset_in_thp(page, size); dout("page_mkwrite %p %llx.%llx %llu~%zd getting caps i_size %llu\n", inode, ceph_vinop(inode), off, len, size); @@ -1604,8 +1483,7 @@ static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf) want = CEPH_CAP_FILE_BUFFER; got = 0; - err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_WR, want, off + len, - &got, NULL); + err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_WR, want, off + len, &got); if (err < 0) goto out_free; @@ -1832,7 +1710,7 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page) if (!err) err = ceph_osdc_wait_request(&fsc->client->osdc, req); - ceph_update_write_latency(&fsc->mdsc->metric, req->r_start_latency, + ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency, req->r_end_latency, err); out_put: @@ -2057,6 +1935,10 @@ int ceph_pool_perm_check(struct inode *inode, int need) s64 pool; int ret, flags; + /* Only need to do this for regular files */ + if (!S_ISREG(inode->i_mode)) + return 0; + if (ci->i_vino.snap != CEPH_NOSNAP) { /* * Pool permission check needs to write to the first object. diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c index 2f5cb6bc78e1..9cfadbb86568 100644 --- a/fs/ceph/cache.c +++ b/fs/ceph/cache.c @@ -173,7 +173,6 @@ void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci) ci->fscache = NULL; - fscache_uncache_all_inode_pages(cookie, &ci->vfs_inode); fscache_relinquish_cookie(cookie, &ci->i_vino, false); } @@ -194,7 +193,6 @@ void ceph_fscache_file_set_cookie(struct inode *inode, struct file *filp) dout("fscache_file_set_cookie %p %p disabling cache\n", inode, filp); fscache_disable_cookie(ci->fscache, &ci->i_vino, false); - fscache_uncache_all_inode_pages(ci->fscache, inode); } else { fscache_enable_cookie(ci->fscache, &ci->i_vino, i_size_read(inode), ceph_fscache_can_enable, inode); @@ -205,108 +203,6 @@ void ceph_fscache_file_set_cookie(struct inode *inode, struct file *filp) } } -static void ceph_readpage_from_fscache_complete(struct page *page, void *data, int error) -{ - if (!error) - SetPageUptodate(page); - - unlock_page(page); -} - -static inline bool cache_valid(struct ceph_inode_info *ci) -{ - return ci->i_fscache_gen == ci->i_rdcache_gen; -} - - -/* Atempt to read from the fscache, - * - * This function is called from the readpage_nounlock context. DO NOT attempt to - * unlock the page here (or in the callback). - */ -int ceph_readpage_from_fscache(struct inode *inode, struct page *page) -{ - struct ceph_inode_info *ci = ceph_inode(inode); - int ret; - - if (!cache_valid(ci)) - return -ENOBUFS; - - ret = fscache_read_or_alloc_page(ci->fscache, page, - ceph_readpage_from_fscache_complete, NULL, - GFP_KERNEL); - - switch (ret) { - case 0: /* Page found */ - dout("page read submitted\n"); - return 0; - case -ENOBUFS: /* Pages were not found, and can't be */ - case -ENODATA: /* Pages were not found */ - dout("page/inode not in cache\n"); - return ret; - default: - dout("%s: unknown error ret = %i\n", __func__, ret); - return ret; - } -} - -int ceph_readpages_from_fscache(struct inode *inode, - struct address_space *mapping, - struct list_head *pages, - unsigned *nr_pages) -{ - struct ceph_inode_info *ci = ceph_inode(inode); - int ret; - - if (!cache_valid(ci)) - return -ENOBUFS; - - ret = fscache_read_or_alloc_pages(ci->fscache, mapping, pages, nr_pages, - ceph_readpage_from_fscache_complete, - NULL, mapping_gfp_mask(mapping)); - - switch (ret) { - case 0: /* All pages found */ - dout("all-page read submitted\n"); - return 0; - case -ENOBUFS: /* Some pages were not found, and can't be */ - case -ENODATA: /* some pages were not found */ - dout("page/inode not in cache\n"); - return ret; - default: - dout("%s: unknown error ret = %i\n", __func__, ret); - return ret; - } -} - -void ceph_readpage_to_fscache(struct inode *inode, struct page *page) -{ - struct ceph_inode_info *ci = ceph_inode(inode); - int ret; - - if (!PageFsCache(page)) - return; - - if (!cache_valid(ci)) - return; - - ret = fscache_write_page(ci->fscache, page, i_size_read(inode), - GFP_KERNEL); - if (ret) - fscache_uncache_page(ci->fscache, page); -} - -void ceph_invalidate_fscache_page(struct inode* inode, struct page *page) -{ - struct ceph_inode_info *ci = ceph_inode(inode); - - if (!PageFsCache(page)) - return; - - fscache_wait_on_page_write(ci->fscache, page); - fscache_uncache_page(ci->fscache, page); -} - void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc) { if (fscache_cookie_valid(fsc->fscache)) { @@ -329,24 +225,3 @@ void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc) } fsc->fscache = NULL; } - -/* - * caller should hold CEPH_CAP_FILE_{RD,CACHE} - */ -void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci) -{ - if (cache_valid(ci)) - return; - - /* resue i_truncate_mutex. There should be no pending - * truncate while the caller holds CEPH_CAP_FILE_RD */ - mutex_lock(&ci->i_truncate_mutex); - if (!cache_valid(ci)) { - if (fscache_check_consistency(ci->fscache, &ci->i_vino)) - fscache_invalidate(ci->fscache); - spin_lock(&ci->i_ceph_lock); - ci->i_fscache_gen = ci->i_rdcache_gen; - spin_unlock(&ci->i_ceph_lock); - } - mutex_unlock(&ci->i_truncate_mutex); -} diff --git a/fs/ceph/cache.h b/fs/ceph/cache.h index 89dbdd1eb14a..1409d6149281 100644 --- a/fs/ceph/cache.h +++ b/fs/ceph/cache.h @@ -9,6 +9,8 @@ #ifndef _CEPH_CACHE_H #define _CEPH_CACHE_H +#include <linux/netfs.h> + #ifdef CONFIG_CEPH_FSCACHE extern struct fscache_netfs ceph_cache_netfs; @@ -29,54 +31,37 @@ int ceph_readpages_from_fscache(struct inode *inode, struct address_space *mapping, struct list_head *pages, unsigned *nr_pages); -void ceph_readpage_to_fscache(struct inode *inode, struct page *page); -void ceph_invalidate_fscache_page(struct inode* inode, struct page *page); static inline void ceph_fscache_inode_init(struct ceph_inode_info *ci) { ci->fscache = NULL; - ci->i_fscache_gen = 0; } -static inline void ceph_fscache_invalidate(struct inode *inode) +static inline struct fscache_cookie *ceph_fscache_cookie(struct ceph_inode_info *ci) { - fscache_invalidate(ceph_inode(inode)->fscache); + return ci->fscache; } -static inline void ceph_fscache_uncache_page(struct inode *inode, - struct page *page) +static inline void ceph_fscache_invalidate(struct inode *inode) { - struct ceph_inode_info *ci = ceph_inode(inode); - return fscache_uncache_page(ci->fscache, page); + fscache_invalidate(ceph_inode(inode)->fscache); } -static inline int ceph_release_fscache_page(struct page *page, gfp_t gfp) +static inline bool ceph_is_cache_enabled(struct inode *inode) { - struct inode* inode = page->mapping->host; - struct ceph_inode_info *ci = ceph_inode(inode); - return fscache_maybe_release_page(ci->fscache, page, gfp); -} + struct fscache_cookie *cookie = ceph_fscache_cookie(ceph_inode(inode)); -static inline void ceph_fscache_readpage_cancel(struct inode *inode, - struct page *page) -{ - struct ceph_inode_info *ci = ceph_inode(inode); - if (fscache_cookie_valid(ci->fscache) && PageFsCache(page)) - __fscache_uncache_page(ci->fscache, page); + if (!cookie) + return false; + return fscache_cookie_enabled(cookie); } -static inline void ceph_fscache_readpages_cancel(struct inode *inode, - struct list_head *pages) +static inline int ceph_begin_cache_operation(struct netfs_read_request *rreq) { - struct ceph_inode_info *ci = ceph_inode(inode); - return fscache_readpages_cancel(ci->fscache, pages); -} + struct fscache_cookie *cookie = ceph_fscache_cookie(ceph_inode(rreq->inode)); -static inline void ceph_disable_fscache_readpage(struct ceph_inode_info *ci) -{ - ci->i_fscache_gen = ci->i_rdcache_gen - 1; + return fscache_begin_read_operation(rreq, cookie); } - #else static inline int ceph_fscache_register(void) @@ -102,6 +87,11 @@ static inline void ceph_fscache_inode_init(struct ceph_inode_info *ci) { } +static inline struct fscache_cookie *ceph_fscache_cookie(struct ceph_inode_info *ci) +{ + return NULL; +} + static inline void ceph_fscache_register_inode_cookie(struct inode *inode) { } @@ -115,62 +105,19 @@ static inline void ceph_fscache_file_set_cookie(struct inode *inode, { } -static inline void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci) -{ -} - -static inline void ceph_fscache_uncache_page(struct inode *inode, - struct page *pages) -{ -} - -static inline int ceph_readpage_from_fscache(struct inode* inode, - struct page *page) -{ - return -ENOBUFS; -} - -static inline int ceph_readpages_from_fscache(struct inode *inode, - struct address_space *mapping, - struct list_head *pages, - unsigned *nr_pages) -{ - return -ENOBUFS; -} - -static inline void ceph_readpage_to_fscache(struct inode *inode, - struct page *page) -{ -} - static inline void ceph_fscache_invalidate(struct inode *inode) { } -static inline void ceph_invalidate_fscache_page(struct inode *inode, - struct page *page) +static inline bool ceph_is_cache_enabled(struct inode *inode) { + return false; } -static inline int ceph_release_fscache_page(struct page *page, gfp_t gfp) -{ - return 1; -} - -static inline void ceph_fscache_readpage_cancel(struct inode *inode, - struct page *page) -{ -} - -static inline void ceph_fscache_readpages_cancel(struct inode *inode, - struct list_head *pages) -{ -} - -static inline void ceph_disable_fscache_readpage(struct ceph_inode_info *ci) +static inline int ceph_begin_cache_operation(struct netfs_read_request *rreq) { + return -ENOBUFS; } - #endif -#endif +#endif /* _CEPH_CACHE_H */ diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 3c03fa37cac4..a5e93b185515 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -1390,7 +1390,7 @@ static void __prep_cap(struct cap_msg_args *arg, struct ceph_cap *cap, arg->flush_tid = flush_tid; arg->oldest_flush_tid = oldest_flush_tid; - arg->size = inode->i_size; + arg->size = i_size_read(inode); ci->i_reported_size = arg->size; arg->max_size = ci->i_wanted_max_size; if (cap == ci->i_auth_cap) { @@ -1867,6 +1867,7 @@ static int try_nonblocking_invalidate(struct inode *inode) u32 invalidating_gen = ci->i_rdcache_gen; spin_unlock(&ci->i_ceph_lock); + ceph_fscache_invalidate(inode); invalidate_mapping_pages(&inode->i_data, 0, -1); spin_lock(&ci->i_ceph_lock); @@ -1884,7 +1885,7 @@ static int try_nonblocking_invalidate(struct inode *inode) bool __ceph_should_report_size(struct ceph_inode_info *ci) { - loff_t size = ci->vfs_inode.i_size; + loff_t size = i_size_read(&ci->vfs_inode); /* mds will adjust max size according to the reported size */ if (ci->i_flushing_caps & CEPH_CAP_FILE_WR) return false; @@ -2730,10 +2731,6 @@ again: *got = need | want; else *got = need; - if (S_ISREG(inode->i_mode) && - (need & CEPH_CAP_FILE_RD) && - !(*got & CEPH_CAP_FILE_CACHE)) - ceph_disable_fscache_readpage(ci); ceph_take_cap_refs(ci, *got, true); ret = 1; } @@ -2858,8 +2855,7 @@ int ceph_try_get_caps(struct inode *inode, int need, int want, * due to a small max_size, make sure we check_max_size (and possibly * ask the mds) so we don't get hung up indefinitely. */ -int ceph_get_caps(struct file *filp, int need, int want, - loff_t endoff, int *got, struct page **pinned_page) +int ceph_get_caps(struct file *filp, int need, int want, loff_t endoff, int *got) { struct ceph_file_info *fi = filp->private_data; struct inode *inode = file_inode(filp); @@ -2957,11 +2953,11 @@ int ceph_get_caps(struct file *filp, int need, int want, struct page *page = find_get_page(inode->i_mapping, 0); if (page) { - if (PageUptodate(page)) { - *pinned_page = page; - break; - } + bool uptodate = PageUptodate(page); + put_page(page); + if (uptodate) + break; } /* * drop cap refs first because getattr while @@ -2983,11 +2979,6 @@ int ceph_get_caps(struct file *filp, int need, int want, } break; } - - if (S_ISREG(ci->vfs_inode.i_mode) && - (_got & CEPH_CAP_FILE_RD) && (_got & CEPH_CAP_FILE_CACHE)) - ceph_fscache_revalidate_cookie(ci); - *got = _got; return 0; } @@ -3308,7 +3299,7 @@ static void handle_cap_grant(struct inode *inode, dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n", inode, cap, session->s_mds, seq, ceph_cap_string(newcaps)); dout(" size %llu max_size %llu, i_size %llu\n", size, max_size, - inode->i_size); + i_size_read(inode)); /* diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c index 66989c880adb..425f3356332a 100644 --- a/fs/ceph/debugfs.c +++ b/fs/ceph/debugfs.c @@ -162,34 +162,34 @@ static int metric_show(struct seq_file *s, void *p) seq_printf(s, "item total avg_lat(us) min_lat(us) max_lat(us) stdev(us)\n"); seq_printf(s, "-----------------------------------------------------------------------------------\n"); - spin_lock(&m->read_latency_lock); + spin_lock(&m->read_metric_lock); total = m->total_reads; sum = m->read_latency_sum; avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0; min = m->read_latency_min; max = m->read_latency_max; sq = m->read_latency_sq_sum; - spin_unlock(&m->read_latency_lock); + spin_unlock(&m->read_metric_lock); CEPH_METRIC_SHOW("read", total, avg, min, max, sq); - spin_lock(&m->write_latency_lock); + spin_lock(&m->write_metric_lock); total = m->total_writes; sum = m->write_latency_sum; avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0; min = m->write_latency_min; max = m->write_latency_max; sq = m->write_latency_sq_sum; - spin_unlock(&m->write_latency_lock); + spin_unlock(&m->write_metric_lock); CEPH_METRIC_SHOW("write", total, avg, min, max, sq); - spin_lock(&m->metadata_latency_lock); + spin_lock(&m->metadata_metric_lock); total = m->total_metadatas; sum = m->metadata_latency_sum; avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0; min = m->metadata_latency_min; max = m->metadata_latency_max; sq = m->metadata_latency_sq_sum; - spin_unlock(&m->metadata_latency_lock); + spin_unlock(&m->metadata_metric_lock); CEPH_METRIC_SHOW("metadata", total, avg, min, max, sq); seq_printf(s, "\n"); diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index f7a790ed62c4..5624fae7a603 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -631,10 +631,12 @@ static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence) switch (whence) { case SEEK_CUR: offset += file->f_pos; + break; case SEEK_SET: break; case SEEK_END: retval = -EOPNOTSUPP; + goto out; default: goto out; } @@ -665,8 +667,8 @@ out: /* * Handle lookups for the hidden .snap directory. */ -int ceph_handle_snapdir(struct ceph_mds_request *req, - struct dentry *dentry, int err) +struct dentry *ceph_handle_snapdir(struct ceph_mds_request *req, + struct dentry *dentry, int err) { struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb); struct inode *parent = d_inode(dentry->d_parent); /* we hold i_mutex */ @@ -674,18 +676,17 @@ int ceph_handle_snapdir(struct ceph_mds_request *req, /* .snap dir? */ if (err == -ENOENT && ceph_snap(parent) == CEPH_NOSNAP && - strcmp(dentry->d_name.name, - fsc->mount_options->snapdir_name) == 0) { + strcmp(dentry->d_name.name, fsc->mount_options->snapdir_name) == 0) { + struct dentry *res; struct inode *inode = ceph_get_snapdir(parent); - if (IS_ERR(inode)) - return PTR_ERR(inode); - dout("ENOENT on snapdir %p '%pd', linking to snapdir %p\n", - dentry, dentry, inode); - BUG_ON(!d_unhashed(dentry)); - d_add(dentry, inode); - err = 0; + + res = d_splice_alias(inode, dentry); + dout("ENOENT on snapdir %p '%pd', linking to snapdir %p. Spliced dentry %p\n", + dentry, dentry, inode, res); + if (res) + dentry = res; } - return err; + return dentry; } /* @@ -741,6 +742,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry, struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb); struct ceph_mds_request *req; + struct dentry *res; int op; int mask; int err; @@ -791,7 +793,13 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry, req->r_parent = dir; set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags); err = ceph_mdsc_do_request(mdsc, NULL, req); - err = ceph_handle_snapdir(req, dentry, err); + res = ceph_handle_snapdir(req, dentry, err); + if (IS_ERR(res)) { + err = PTR_ERR(res); + } else { + dentry = res; + err = 0; + } dentry = ceph_finish_lookup(req, dentry, err); ceph_mdsc_put_request(req); /* will dput(dentry) */ dout("lookup result=%p\n", dentry); diff --git a/fs/ceph/export.c b/fs/ceph/export.c index f22156ee7306..65540a4429b2 100644 --- a/fs/ceph/export.c +++ b/fs/ceph/export.c @@ -129,6 +129,10 @@ static struct inode *__lookup_inode(struct super_block *sb, u64 ino) vino.ino = ino; vino.snap = CEPH_NOSNAP; + + if (ceph_vino_is_reserved(vino)) + return ERR_PTR(-ESTALE); + inode = ceph_find_inode(sb, vino); if (!inode) { struct ceph_mds_request *req; @@ -178,8 +182,10 @@ static struct dentry *__fh_to_dentry(struct super_block *sb, u64 ino) return ERR_CAST(inode); /* We need LINK caps to reliably check i_nlink */ err = ceph_do_getattr(inode, CEPH_CAP_LINK_SHARED, false); - if (err) + if (err) { + iput(inode); return ERR_PTR(err); + } /* -ESTALE if inode as been unlinked and no file is open */ if ((inode->i_nlink == 0) && (atomic_read(&inode->i_count) == 1)) { iput(inode); @@ -212,6 +218,10 @@ static struct dentry *__snapfh_to_dentry(struct super_block *sb, vino.ino = sfh->ino; vino.snap = sfh->snapid; } + + if (ceph_vino_is_reserved(vino)) + return ERR_PTR(-ESTALE); + inode = ceph_find_inode(sb, vino); if (inode) return d_obtain_alias(inode); diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 209535d5b8d3..77fc037d5beb 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -739,9 +739,12 @@ retry: err = ceph_mdsc_do_request(mdsc, (flags & (O_CREAT|O_TRUNC)) ? dir : NULL, req); - err = ceph_handle_snapdir(req, dentry, err); - if (err) + dentry = ceph_handle_snapdir(req, dentry, err); + if (IS_ERR(dentry)) { + err = PTR_ERR(dentry); goto out_req; + } + err = 0; if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry) err = ceph_handle_notrace_create(dir, dentry); @@ -892,7 +895,7 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to, if (!ret) ret = ceph_osdc_wait_request(osdc, req); - ceph_update_read_latency(&fsc->mdsc->metric, + ceph_update_read_metrics(&fsc->mdsc->metric, req->r_start_latency, req->r_end_latency, ret); @@ -1034,16 +1037,6 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req) dout("ceph_aio_complete_req %p rc %d bytes %u\n", inode, rc, osd_data->bvec_pos.iter.bi_size); - /* r_start_latency == 0 means the request was not submitted */ - if (req->r_start_latency) { - if (aio_req->write) - ceph_update_write_latency(metric, req->r_start_latency, - req->r_end_latency, rc); - else - ceph_update_read_latency(metric, req->r_start_latency, - req->r_end_latency, rc); - } - if (rc == -EOLDSNAPC) { struct ceph_aio_work *aio_work; BUG_ON(!aio_req->write); @@ -1086,6 +1079,16 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req) } } + /* r_start_latency == 0 means the request was not submitted */ + if (req->r_start_latency) { + if (aio_req->write) + ceph_update_write_metrics(metric, req->r_start_latency, + req->r_end_latency, rc); + else + ceph_update_read_metrics(metric, req->r_start_latency, + req->r_end_latency, rc); + } + put_bvecs(osd_data->bvec_pos.bvecs, osd_data->num_bvecs, aio_req->should_dirty); ceph_osdc_put_request(req); @@ -1290,10 +1293,10 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter, ret = ceph_osdc_wait_request(&fsc->client->osdc, req); if (write) - ceph_update_write_latency(metric, req->r_start_latency, + ceph_update_write_metrics(metric, req->r_start_latency, req->r_end_latency, ret); else - ceph_update_read_latency(metric, req->r_start_latency, + ceph_update_read_metrics(metric, req->r_start_latency, req->r_end_latency, ret); size = i_size_read(inode); @@ -1467,7 +1470,7 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos, if (!ret) ret = ceph_osdc_wait_request(&fsc->client->osdc, req); - ceph_update_write_latency(&fsc->mdsc->metric, req->r_start_latency, + ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency, req->r_end_latency, ret); out: ceph_osdc_put_request(req); @@ -1510,7 +1513,6 @@ static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to) size_t len = iov_iter_count(to); struct inode *inode = file_inode(filp); struct ceph_inode_info *ci = ceph_inode(inode); - struct page *pinned_page = NULL; bool direct_lock = iocb->ki_flags & IOCB_DIRECT; ssize_t ret; int want, got = 0; @@ -1529,8 +1531,7 @@ again: want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; else want = CEPH_CAP_FILE_CACHE; - ret = ceph_get_caps(filp, CEPH_CAP_FILE_RD, want, -1, - &got, &pinned_page); + ret = ceph_get_caps(filp, CEPH_CAP_FILE_RD, want, -1, &got); if (ret < 0) { if (iocb->ki_flags & IOCB_DIRECT) ceph_end_io_direct(inode); @@ -1571,10 +1572,6 @@ again: dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n", inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret); - if (pinned_page) { - put_page(pinned_page); - pinned_page = NULL; - } ceph_put_cap_refs(ci, got); if (direct_lock) @@ -1753,8 +1750,7 @@ retry_snap: else want = CEPH_CAP_FILE_BUFFER; got = 0; - err = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, pos + count, - &got, NULL); + err = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, pos + count, &got); if (err < 0) goto out; @@ -2083,7 +2079,7 @@ static long ceph_fallocate(struct file *file, int mode, else want = CEPH_CAP_FILE_BUFFER; - ret = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, endoff, &got, NULL); + ret = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, endoff, &got); if (ret < 0) goto unlock; @@ -2121,7 +2117,7 @@ static int get_rd_wr_caps(struct file *src_filp, int *src_got, retry_caps: ret = ceph_get_caps(dst_filp, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER, - dst_endoff, dst_got, NULL); + dst_endoff, dst_got); if (ret < 0) return ret; @@ -2143,7 +2139,7 @@ retry_caps: return ret; } ret = ceph_get_caps(src_filp, CEPH_CAP_FILE_RD, - CEPH_CAP_FILE_SHARED, -1, src_got, NULL); + CEPH_CAP_FILE_SHARED, -1, src_got); if (ret < 0) return ret; /*... drop src_ci caps too, and retry */ diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 689e3ffd29d7..e1c63adb196d 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -56,6 +56,9 @@ struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino) { struct inode *inode; + if (ceph_vino_is_reserved(vino)) + return ERR_PTR(-EREMOTEIO); + inode = iget5_locked(sb, (unsigned long)vino.ino, ceph_ino_compare, ceph_set_ino_cb, &vino); if (!inode) @@ -99,14 +102,15 @@ struct inode *ceph_get_snapdir(struct inode *parent) inode->i_mtime = parent->i_mtime; inode->i_ctime = parent->i_ctime; inode->i_atime = parent->i_atime; - inode->i_op = &ceph_snapdir_iops; - inode->i_fop = &ceph_snapdir_fops; - ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */ ci->i_rbytes = 0; ci->i_btime = ceph_inode(parent)->i_btime; - if (inode->i_state & I_NEW) + if (inode->i_state & I_NEW) { + inode->i_op = &ceph_snapdir_iops; + inode->i_fop = &ceph_snapdir_fops; + ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */ unlock_new_inode(inode); + } return inode; } @@ -628,10 +632,11 @@ int ceph_fill_file_size(struct inode *inode, int issued, { struct ceph_inode_info *ci = ceph_inode(inode); int queue_trunc = 0; + loff_t isize = i_size_read(inode); if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 || - (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) { - dout("size %lld -> %llu\n", inode->i_size, size); + (truncate_seq == ci->i_truncate_seq && size > isize)) { + dout("size %lld -> %llu\n", isize, size); if (size > 0 && S_ISDIR(inode->i_mode)) { pr_err("fill_file_size non-zero size for directory\n"); size = 0; @@ -925,6 +930,7 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page, ci->i_rfiles = le64_to_cpu(info->rfiles); ci->i_rsubdirs = le64_to_cpu(info->rsubdirs); ci->i_dir_pin = iinfo->dir_pin; + ci->i_rsnaps = iinfo->rsnaps; ceph_decode_timespec64(&ci->i_rctime, &info->rctime); } } @@ -1818,7 +1824,7 @@ bool ceph_inode_set_size(struct inode *inode, loff_t size) bool ret; spin_lock(&ci->i_ceph_lock); - dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size); + dout("set_size %p %llu -> %llu\n", inode, i_size_read(inode), size); i_size_write(inode, size); inode->i_blocks = calc_inode_blocks(size); @@ -1894,6 +1900,7 @@ static void ceph_do_invalidate_pages(struct inode *inode) orig_gen = ci->i_rdcache_gen; spin_unlock(&ci->i_ceph_lock); + ceph_fscache_invalidate(inode); if (invalidate_inode_pages2(inode->i_mapping) < 0) { pr_err("invalidate_pages %p fails\n", inode); } @@ -2124,20 +2131,19 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr) } } if (ia_valid & ATTR_SIZE) { - dout("setattr %p size %lld -> %lld\n", inode, - inode->i_size, attr->ia_size); - if ((issued & CEPH_CAP_FILE_EXCL) && - attr->ia_size > inode->i_size) { + loff_t isize = i_size_read(inode); + + dout("setattr %p size %lld -> %lld\n", inode, isize, attr->ia_size); + if ((issued & CEPH_CAP_FILE_EXCL) && attr->ia_size > isize) { i_size_write(inode, attr->ia_size); inode->i_blocks = calc_inode_blocks(attr->ia_size); ci->i_reported_size = attr->ia_size; dirtied |= CEPH_CAP_FILE_EXCL; ia_valid |= ATTR_MTIME; } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || - attr->ia_size != inode->i_size) { + attr->ia_size != isize) { req->r_args.setattr.size = cpu_to_le64(attr->ia_size); - req->r_args.setattr.old_size = - cpu_to_le64(inode->i_size); + req->r_args.setattr.old_size = cpu_to_le64(isize); mask |= CEPH_SETATTR_SIZE; release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL | CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR; @@ -2247,7 +2253,7 @@ int ceph_setattr(struct user_namespace *mnt_userns, struct dentry *dentry, return err; if ((attr->ia_valid & ATTR_SIZE) && - attr->ia_size > max(inode->i_size, fsc->max_file_size)) + attr->ia_size > max(i_size_read(inode), fsc->max_file_size)) return -EFBIG; if ((attr->ia_valid & ATTR_SIZE) && diff --git a/fs/ceph/io.c b/fs/ceph/io.c index 97602ea92ff4..c456509b31c3 100644 --- a/fs/ceph/io.c +++ b/fs/ceph/io.c @@ -118,7 +118,7 @@ static void ceph_block_buffered(struct ceph_inode_info *ci, struct inode *inode) } /** - * ceph_end_io_direct - declare the file is being used for direct i/o + * ceph_start_io_direct - declare the file is being used for direct i/o * @inode: file inode * * Declare that a direct I/O operation is about to start, and ensure diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index d87bd852ed96..e5af591d3bd4 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -176,6 +176,13 @@ static int parse_reply_info_in(void **p, void *end, memset(&info->snap_btime, 0, sizeof(info->snap_btime)); } + /* snapshot count, remains zero for v<=3 */ + if (struct_v >= 4) { + ceph_decode_64_safe(p, end, info->rsnaps, bad); + } else { + info->rsnaps = 0; + } + *p = end; } else { if (features & CEPH_FEATURE_MDS_INLINE_DATA) { @@ -214,7 +221,7 @@ static int parse_reply_info_in(void **p, void *end, } info->dir_pin = -ENODATA; - /* info->snap_btime remains zero */ + /* info->snap_btime and info->rsnaps remain zero */ } return 0; bad: @@ -433,6 +440,13 @@ static int ceph_parse_deleg_inos(void **p, void *end, ceph_decode_64_safe(p, end, start, bad); ceph_decode_64_safe(p, end, len, bad); + + /* Don't accept a delegation of system inodes */ + if (start < CEPH_INO_SYSTEM_BASE) { + pr_warn_ratelimited("ceph: ignoring reserved inode range delegation (start=0x%llx len=0x%llx)\n", + start, len); + continue; + } while (len--) { int err = xa_insert(&s->s_delegated_inos, ino = start++, DELEGATED_INO_AVAILABLE, @@ -3306,7 +3320,7 @@ out_err: /* kick calling process */ complete_request(mdsc, req); - ceph_update_metadata_latency(&mdsc->metric, req->r_start_latency, + ceph_update_metadata_metrics(&mdsc->metric, req->r_start_latency, req->r_end_latency, err); out: ceph_mdsc_put_request(req); @@ -3780,7 +3794,7 @@ static int reconnect_caps_cb(struct inode *inode, struct ceph_cap *cap, rec.v1.cap_id = cpu_to_le64(cap->cap_id); rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci)); rec.v1.issued = cpu_to_le32(cap->issued); - rec.v1.size = cpu_to_le64(inode->i_size); + rec.v1.size = cpu_to_le64(i_size_read(inode)); ceph_encode_timespec64(&rec.v1.mtime, &inode->i_mtime); ceph_encode_timespec64(&rec.v1.atime, &inode->i_atime); rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino); diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index eaa7c5422116..15c11a0f2caf 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -88,6 +88,7 @@ struct ceph_mds_reply_info_in { s32 dir_pin; struct ceph_timespec btime; struct ceph_timespec snap_btime; + u64 rsnaps; u64 change_attr; }; diff --git a/fs/ceph/metric.c b/fs/ceph/metric.c index 5ec94bd4c1de..28b6b42ad677 100644 --- a/fs/ceph/metric.c +++ b/fs/ceph/metric.c @@ -17,6 +17,9 @@ static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc, struct ceph_metric_write_latency *write; struct ceph_metric_metadata_latency *meta; struct ceph_metric_dlease *dlease; + struct ceph_opened_files *files; + struct ceph_pinned_icaps *icaps; + struct ceph_opened_inodes *inodes; struct ceph_client_metric *m = &mdsc->metric; u64 nr_caps = atomic64_read(&m->total_caps); struct ceph_msg *msg; @@ -26,7 +29,8 @@ static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc, s32 len; len = sizeof(*head) + sizeof(*cap) + sizeof(*read) + sizeof(*write) - + sizeof(*meta) + sizeof(*dlease); + + sizeof(*meta) + sizeof(*dlease) + sizeof(*files) + + sizeof(*icaps) + sizeof(*inodes); msg = ceph_msg_new(CEPH_MSG_CLIENT_METRICS, len, GFP_NOFS, true); if (!msg) { @@ -95,6 +99,38 @@ static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc, dlease->total = cpu_to_le64(atomic64_read(&m->total_dentries)); items++; + sum = percpu_counter_sum(&m->total_inodes); + + /* encode the opened files metric */ + files = (struct ceph_opened_files *)(dlease + 1); + files->type = cpu_to_le32(CLIENT_METRIC_TYPE_OPENED_FILES); + files->ver = 1; + files->compat = 1; + files->data_len = cpu_to_le32(sizeof(*files) - 10); + files->opened_files = cpu_to_le64(atomic64_read(&m->opened_files)); + files->total = cpu_to_le64(sum); + items++; + + /* encode the pinned icaps metric */ + icaps = (struct ceph_pinned_icaps *)(files + 1); + icaps->type = cpu_to_le32(CLIENT_METRIC_TYPE_PINNED_ICAPS); + icaps->ver = 1; + icaps->compat = 1; + icaps->data_len = cpu_to_le32(sizeof(*icaps) - 10); + icaps->pinned_icaps = cpu_to_le64(nr_caps); + icaps->total = cpu_to_le64(sum); + items++; + + /* encode the opened inodes metric */ + inodes = (struct ceph_opened_inodes *)(icaps + 1); + inodes->type = cpu_to_le32(CLIENT_METRIC_TYPE_OPENED_INODES); + inodes->ver = 1; + inodes->compat = 1; + inodes->data_len = cpu_to_le32(sizeof(*inodes) - 10); + inodes->opened_inodes = cpu_to_le64(percpu_counter_sum(&m->opened_inodes)); + inodes->total = cpu_to_le64(sum); + items++; + put_unaligned_le32(items, &head->num); msg->front.iov_len = len; msg->hdr.version = cpu_to_le16(1); @@ -183,21 +219,21 @@ int ceph_metric_init(struct ceph_client_metric *m) if (ret) goto err_i_caps_mis; - spin_lock_init(&m->read_latency_lock); + spin_lock_init(&m->read_metric_lock); m->read_latency_sq_sum = 0; m->read_latency_min = KTIME_MAX; m->read_latency_max = 0; m->total_reads = 0; m->read_latency_sum = 0; - spin_lock_init(&m->write_latency_lock); + spin_lock_init(&m->write_metric_lock); m->write_latency_sq_sum = 0; m->write_latency_min = KTIME_MAX; m->write_latency_max = 0; m->total_writes = 0; m->write_latency_sum = 0; - spin_lock_init(&m->metadata_latency_lock); + spin_lock_init(&m->metadata_metric_lock); m->metadata_latency_sq_sum = 0; m->metadata_latency_min = KTIME_MAX; m->metadata_latency_max = 0; @@ -274,7 +310,7 @@ static inline void __update_latency(ktime_t *totalp, ktime_t *lsump, *sq_sump += sq; } -void ceph_update_read_latency(struct ceph_client_metric *m, +void ceph_update_read_metrics(struct ceph_client_metric *m, ktime_t r_start, ktime_t r_end, int rc) { @@ -283,14 +319,14 @@ void ceph_update_read_latency(struct ceph_client_metric *m, if (unlikely(rc < 0 && rc != -ENOENT && rc != -ETIMEDOUT)) return; - spin_lock(&m->read_latency_lock); + spin_lock(&m->read_metric_lock); __update_latency(&m->total_reads, &m->read_latency_sum, &m->read_latency_min, &m->read_latency_max, &m->read_latency_sq_sum, lat); - spin_unlock(&m->read_latency_lock); + spin_unlock(&m->read_metric_lock); } -void ceph_update_write_latency(struct ceph_client_metric *m, +void ceph_update_write_metrics(struct ceph_client_metric *m, ktime_t r_start, ktime_t r_end, int rc) { @@ -299,14 +335,14 @@ void ceph_update_write_latency(struct ceph_client_metric *m, if (unlikely(rc && rc != -ETIMEDOUT)) return; - spin_lock(&m->write_latency_lock); + spin_lock(&m->write_metric_lock); __update_latency(&m->total_writes, &m->write_latency_sum, &m->write_latency_min, &m->write_latency_max, &m->write_latency_sq_sum, lat); - spin_unlock(&m->write_latency_lock); + spin_unlock(&m->write_metric_lock); } -void ceph_update_metadata_latency(struct ceph_client_metric *m, +void ceph_update_metadata_metrics(struct ceph_client_metric *m, ktime_t r_start, ktime_t r_end, int rc) { @@ -315,9 +351,9 @@ void ceph_update_metadata_latency(struct ceph_client_metric *m, if (unlikely(rc && rc != -ENOENT)) return; - spin_lock(&m->metadata_latency_lock); + spin_lock(&m->metadata_metric_lock); __update_latency(&m->total_metadatas, &m->metadata_latency_sum, &m->metadata_latency_min, &m->metadata_latency_max, &m->metadata_latency_sq_sum, lat); - spin_unlock(&m->metadata_latency_lock); + spin_unlock(&m->metadata_metric_lock); } diff --git a/fs/ceph/metric.h b/fs/ceph/metric.h index af6038ff39d4..e984eb2bb14b 100644 --- a/fs/ceph/metric.h +++ b/fs/ceph/metric.h @@ -14,8 +14,11 @@ enum ceph_metric_type { CLIENT_METRIC_TYPE_WRITE_LATENCY, CLIENT_METRIC_TYPE_METADATA_LATENCY, CLIENT_METRIC_TYPE_DENTRY_LEASE, + CLIENT_METRIC_TYPE_OPENED_FILES, + CLIENT_METRIC_TYPE_PINNED_ICAPS, + CLIENT_METRIC_TYPE_OPENED_INODES, - CLIENT_METRIC_TYPE_MAX = CLIENT_METRIC_TYPE_DENTRY_LEASE, + CLIENT_METRIC_TYPE_MAX = CLIENT_METRIC_TYPE_OPENED_INODES, }; /* @@ -28,6 +31,9 @@ enum ceph_metric_type { CLIENT_METRIC_TYPE_WRITE_LATENCY, \ CLIENT_METRIC_TYPE_METADATA_LATENCY, \ CLIENT_METRIC_TYPE_DENTRY_LEASE, \ + CLIENT_METRIC_TYPE_OPENED_FILES, \ + CLIENT_METRIC_TYPE_PINNED_ICAPS, \ + CLIENT_METRIC_TYPE_OPENED_INODES, \ \ CLIENT_METRIC_TYPE_MAX, \ } @@ -94,6 +100,42 @@ struct ceph_metric_dlease { __le64 total; } __packed; +/* metric opened files header */ +struct ceph_opened_files { + __le32 type; /* ceph metric type */ + + __u8 ver; + __u8 compat; + + __le32 data_len; /* length of sizeof(opened_files + total) */ + __le64 opened_files; + __le64 total; +} __packed; + +/* metric pinned i_caps header */ +struct ceph_pinned_icaps { + __le32 type; /* ceph metric type */ + + __u8 ver; + __u8 compat; + + __le32 data_len; /* length of sizeof(pinned_icaps + total) */ + __le64 pinned_icaps; + __le64 total; +} __packed; + +/* metric opened inodes header */ +struct ceph_opened_inodes { + __le32 type; /* ceph metric type */ + + __u8 ver; + __u8 compat; + + __le32 data_len; /* length of sizeof(opened_inodes + total) */ + __le64 opened_inodes; + __le64 total; +} __packed; + struct ceph_metric_head { __le32 num; /* the number of metrics that will be sent */ } __packed; @@ -108,21 +150,21 @@ struct ceph_client_metric { struct percpu_counter i_caps_hit; struct percpu_counter i_caps_mis; - spinlock_t read_latency_lock; + spinlock_t read_metric_lock; u64 total_reads; ktime_t read_latency_sum; ktime_t read_latency_sq_sum; ktime_t read_latency_min; ktime_t read_latency_max; - spinlock_t write_latency_lock; + spinlock_t write_metric_lock; u64 total_writes; ktime_t write_latency_sum; ktime_t write_latency_sq_sum; ktime_t write_latency_min; ktime_t write_latency_max; - spinlock_t metadata_latency_lock; + spinlock_t metadata_metric_lock; u64 total_metadatas; ktime_t metadata_latency_sum; ktime_t metadata_latency_sq_sum; @@ -162,13 +204,13 @@ static inline void ceph_update_cap_mis(struct ceph_client_metric *m) percpu_counter_inc(&m->i_caps_mis); } -extern void ceph_update_read_latency(struct ceph_client_metric *m, +extern void ceph_update_read_metrics(struct ceph_client_metric *m, ktime_t r_start, ktime_t r_end, int rc); -extern void ceph_update_write_latency(struct ceph_client_metric *m, +extern void ceph_update_write_metrics(struct ceph_client_metric *m, ktime_t r_start, ktime_t r_end, int rc); -extern void ceph_update_metadata_latency(struct ceph_client_metric *m, +extern void ceph_update_metadata_metrics(struct ceph_client_metric *m, ktime_t r_start, ktime_t r_end, int rc); #endif /* _FS_CEPH_MDS_METRIC_H */ diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index 0728b01d4d43..4ce18055d931 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c @@ -605,7 +605,7 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci, struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb); BUG_ON(capsnap->writing); - capsnap->size = inode->i_size; + capsnap->size = i_size_read(inode); capsnap->mtime = inode->i_mtime; capsnap->atime = inode->i_atime; capsnap->ctime = inode->i_ctime; diff --git a/fs/ceph/super.h b/fs/ceph/super.h index c48bb30c8d70..db80d89556b1 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -21,6 +21,7 @@ #include <linux/ceph/libceph.h> #ifdef CONFIG_CEPH_FSCACHE +#define FSCACHE_USE_NEW_IO_API #include <linux/fscache.h> #endif @@ -333,7 +334,7 @@ struct ceph_inode_info { /* for dirs */ struct timespec64 i_rctime; - u64 i_rbytes, i_rfiles, i_rsubdirs; + u64 i_rbytes, i_rfiles, i_rsubdirs, i_rsnaps; u64 i_files, i_subdirs; /* quotas */ @@ -427,7 +428,6 @@ struct ceph_inode_info { #ifdef CONFIG_CEPH_FSCACHE struct fscache_cookie *fscache; - u32 i_fscache_gen; #endif errseq_t i_meta_err; @@ -529,10 +529,34 @@ static inline int ceph_ino_compare(struct inode *inode, void *data) ci->i_vino.snap == pvino->snap; } +/* + * The MDS reserves a set of inodes for its own usage. These should never + * be accessible by clients, and so the MDS has no reason to ever hand these + * out. The range is CEPH_MDS_INO_MDSDIR_OFFSET..CEPH_INO_SYSTEM_BASE. + * + * These come from src/mds/mdstypes.h in the ceph sources. + */ +#define CEPH_MAX_MDS 0x100 +#define CEPH_NUM_STRAY 10 +#define CEPH_MDS_INO_MDSDIR_OFFSET (1 * CEPH_MAX_MDS) +#define CEPH_INO_SYSTEM_BASE ((6*CEPH_MAX_MDS) + (CEPH_MAX_MDS * CEPH_NUM_STRAY)) + +static inline bool ceph_vino_is_reserved(const struct ceph_vino vino) +{ + if (vino.ino < CEPH_INO_SYSTEM_BASE && + vino.ino >= CEPH_MDS_INO_MDSDIR_OFFSET) { + WARN_RATELIMIT(1, "Attempt to access reserved inode number 0x%llx", vino.ino); + return true; + } + return false; +} static inline struct inode *ceph_find_inode(struct super_block *sb, struct ceph_vino vino) { + if (ceph_vino_is_reserved(vino)) + return NULL; + /* * NB: The hashval will be run through the fs/inode.c hash function * anyway, so there is no need to squash the inode number down to @@ -1156,7 +1180,7 @@ extern int ceph_encode_dentry_release(void **p, struct dentry *dn, int mds, int drop, int unless); extern int ceph_get_caps(struct file *filp, int need, int want, - loff_t endoff, int *got, struct page **pinned_page); + loff_t endoff, int *got); extern int ceph_try_get_caps(struct inode *inode, int need, int want, bool nonblock, int *got); @@ -1193,7 +1217,7 @@ extern const struct dentry_operations ceph_dentry_ops; extern loff_t ceph_make_fpos(unsigned high, unsigned off, bool hash_order); extern int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry); -extern int ceph_handle_snapdir(struct ceph_mds_request *req, +extern struct dentry *ceph_handle_snapdir(struct ceph_mds_request *req, struct dentry *dentry, int err); extern struct dentry *ceph_finish_lookup(struct ceph_mds_request *req, struct dentry *dentry, int err); diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c index 02f59bcb4f27..1242db8d3444 100644 --- a/fs/ceph/xattr.c +++ b/fs/ceph/xattr.c @@ -233,6 +233,12 @@ static ssize_t ceph_vxattrcb_dir_rsubdirs(struct ceph_inode_info *ci, char *val, return ceph_fmt_xattr(val, size, "%lld", ci->i_rsubdirs); } +static ssize_t ceph_vxattrcb_dir_rsnaps(struct ceph_inode_info *ci, char *val, + size_t size) +{ + return ceph_fmt_xattr(val, size, "%lld", ci->i_rsnaps); +} + static ssize_t ceph_vxattrcb_dir_rbytes(struct ceph_inode_info *ci, char *val, size_t size) { @@ -384,6 +390,7 @@ static struct ceph_vxattr ceph_dir_vxattrs[] = { XATTR_RSTAT_FIELD(dir, rentries), XATTR_RSTAT_FIELD(dir, rfiles), XATTR_RSTAT_FIELD(dir, rsubdirs), + XATTR_RSTAT_FIELD(dir, rsnaps), XATTR_RSTAT_FIELD(dir, rbytes), XATTR_RSTAT_FIELD(dir, rctime), { diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h index 2a5325a7ae49..9c45b3a82ad9 100644 --- a/fs/cifs/cifs_fs_sb.h +++ b/fs/cifs/cifs_fs_sb.h @@ -55,6 +55,7 @@ #define CIFS_MOUNT_MODE_FROM_SID 0x10000000 /* retrieve mode from special ACE */ #define CIFS_MOUNT_RO_CACHE 0x20000000 /* assumes share will not change */ #define CIFS_MOUNT_RW_CACHE 0x40000000 /* assumes only client accessing */ +#define CIFS_MOUNT_SHUTDOWN 0x80000000 struct cifs_sb_info { struct rb_root tlink_tree; diff --git a/fs/cifs/cifs_ioctl.h b/fs/cifs/cifs_ioctl.h index 153d5c842a9b..4a97fe12006b 100644 --- a/fs/cifs/cifs_ioctl.h +++ b/fs/cifs/cifs_ioctl.h @@ -57,6 +57,12 @@ struct smb_query_info { /* char buffer[]; */ } __packed; +/* + * Dumping the commonly used 16 byte (e.g. CCM and GCM128) keys still supported + * for backlevel compatibility, but is not sufficient for dumping the less + * frequently used GCM256 (32 byte) keys (see the newer "CIFS_DUMP_FULL_KEY" + * ioctl for dumping decryption info for GCM256 mounts) + */ struct smb3_key_debug_info { __u64 Suid; __u16 cipher_type; @@ -65,6 +71,18 @@ struct smb3_key_debug_info { __u8 smb3decryptionkey[SMB3_SIGN_KEY_SIZE]; } __packed; +/* + * Dump full key (32 byte encrypt/decrypt keys instead of 16 bytes) + * is needed if GCM256 (stronger encryption) negotiated + */ +struct smb3_full_key_debug_info { + __u64 Suid; + __u16 cipher_type; + __u8 auth_key[16]; /* SMB2_NTLMV2_SESSKEY_SIZE */ + __u8 smb3encryptionkey[32]; /* SMB3_ENC_DEC_KEY_SIZE */ + __u8 smb3decryptionkey[32]; /* SMB3_ENC_DEC_KEY_SIZE */ +} __packed; + struct smb3_notify { __u32 completion_filter; bool watch_tree; @@ -78,3 +96,20 @@ struct smb3_notify { #define CIFS_QUERY_INFO _IOWR(CIFS_IOCTL_MAGIC, 7, struct smb_query_info) #define CIFS_DUMP_KEY _IOWR(CIFS_IOCTL_MAGIC, 8, struct smb3_key_debug_info) #define CIFS_IOC_NOTIFY _IOW(CIFS_IOCTL_MAGIC, 9, struct smb3_notify) +#define CIFS_DUMP_FULL_KEY _IOWR(CIFS_IOCTL_MAGIC, 10, struct smb3_full_key_debug_info) +#define CIFS_IOC_SHUTDOWN _IOR ('X', 125, __u32) + +/* + * Flags for going down operation + */ +#define CIFS_GOING_FLAGS_DEFAULT 0x0 /* going down */ +#define CIFS_GOING_FLAGS_LOGFLUSH 0x1 /* flush log but not data */ +#define CIFS_GOING_FLAGS_NOLOGFLUSH 0x2 /* don't flush log nor data */ + +static inline bool cifs_forced_shutdown(struct cifs_sb_info *sbi) +{ + if (CIFS_MOUNT_SHUTDOWN & sbi->mnt_cifs_flags) + return true; + else + return false; +} diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 5f2c139143a7..d7ea9c5fe0f8 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -75,7 +75,7 @@ bool enable_oplocks = true; bool linuxExtEnabled = true; bool lookupCacheEnabled = true; bool disable_legacy_dialects; /* false by default */ -bool enable_gcm_256; /* false by default, change when more servers support it */ +bool enable_gcm_256 = true; bool require_gcm_256; /* false by default */ unsigned int global_secflags = CIFSSEC_DEF; /* unsigned int ntlmv2_support = 0; */ @@ -133,6 +133,7 @@ struct workqueue_struct *cifsiod_wq; struct workqueue_struct *decrypt_wq; struct workqueue_struct *fileinfo_put_wq; struct workqueue_struct *cifsoplockd_wq; +struct workqueue_struct *deferredclose_wq; __u32 cifs_lock_secret; /* @@ -390,6 +391,8 @@ cifs_alloc_inode(struct super_block *sb) /* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME; */ INIT_LIST_HEAD(&cifs_inode->openFileList); INIT_LIST_HEAD(&cifs_inode->llist); + INIT_LIST_HEAD(&cifs_inode->deferred_closes); + spin_lock_init(&cifs_inode->deferred_lock); return &cifs_inode->vfs_inode; } @@ -860,13 +863,7 @@ cifs_smb3_do_mount(struct file_system_type *fs_type, goto out; } - /* cifs_setup_volume_info->smb3_parse_devname() redups UNC & prepath */ - kfree(cifs_sb->ctx->UNC); - cifs_sb->ctx->UNC = NULL; - kfree(cifs_sb->ctx->prepath); - cifs_sb->ctx->prepath = NULL; - - rc = cifs_setup_volume_info(cifs_sb->ctx, NULL, old_ctx->UNC); + rc = cifs_setup_volume_info(cifs_sb->ctx, NULL, NULL); if (rc) { root = ERR_PTR(rc); goto out; @@ -1637,9 +1634,16 @@ init_cifs(void) goto out_destroy_fileinfo_put_wq; } + deferredclose_wq = alloc_workqueue("deferredclose", + WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); + if (!deferredclose_wq) { + rc = -ENOMEM; + goto out_destroy_cifsoplockd_wq; + } + rc = cifs_fscache_register(); if (rc) - goto out_destroy_cifsoplockd_wq; + goto out_destroy_deferredclose_wq; rc = cifs_init_inodecache(); if (rc) @@ -1707,6 +1711,8 @@ out_destroy_inodecache: cifs_destroy_inodecache(); out_unreg_fscache: cifs_fscache_unregister(); +out_destroy_deferredclose_wq: + destroy_workqueue(deferredclose_wq); out_destroy_cifsoplockd_wq: destroy_workqueue(cifsoplockd_wq); out_destroy_fileinfo_put_wq: @@ -1741,6 +1747,7 @@ exit_cifs(void) cifs_destroy_mids(); cifs_destroy_inodecache(); cifs_fscache_unregister(); + destroy_workqueue(deferredclose_wq); destroy_workqueue(cifsoplockd_wq); destroy_workqueue(decrypt_wq); destroy_workqueue(fileinfo_put_wq); diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index b23a0ee8c6f8..d88b4b523dcc 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -1154,6 +1154,14 @@ struct cifs_pending_open { __u32 oplock; }; +struct cifs_deferred_close { + struct list_head dlist; + struct tcon_link *tlink; + __u16 netfid; + __u64 persistent_fid; + __u64 volatile_fid; +}; + /* * This info hangs off the cifsFileInfo structure, pointed to by llist. * This is used to track byte stream locks on the file @@ -1248,6 +1256,9 @@ struct cifsFileInfo { struct cifs_search_info srch_inf; struct work_struct oplock_break; /* work for oplock breaks */ struct work_struct put; /* work for the final part of _put */ + struct delayed_work deferred; + bool oplock_break_received; /* Flag to indicate oplock break */ + bool deferred_scheduled; }; struct cifs_io_parms { @@ -1392,6 +1403,7 @@ struct cifsInodeInfo { #define CIFS_INO_DELETE_PENDING (3) /* delete pending on server */ #define CIFS_INO_INVALID_MAPPING (4) /* pagecache is invalid */ #define CIFS_INO_LOCK (5) /* lock bit for synchronization */ +#define CIFS_INO_MODIFIED_ATTR (6) /* Indicate change in mtime/ctime */ unsigned long flags; spinlock_t writers_lock; unsigned int writers; /* Number of writers on this inode */ @@ -1404,6 +1416,8 @@ struct cifsInodeInfo { struct fscache_cookie *fscache; #endif struct inode vfs_inode; + struct list_head deferred_closes; /* list of deferred closes */ + spinlock_t deferred_lock; /* protection on deferred list */ }; static inline struct cifsInodeInfo * @@ -1871,11 +1885,14 @@ extern bool disable_legacy_dialects; /* forbid vers=1.0 and vers=2.0 mounts */ void cifs_oplock_break(struct work_struct *work); void cifs_queue_oplock_break(struct cifsFileInfo *cfile); +void smb2_deferred_work_close(struct work_struct *work); +extern const struct slow_work_ops cifs_oplock_break_ops; extern struct workqueue_struct *cifsiod_wq; extern struct workqueue_struct *decrypt_wq; extern struct workqueue_struct *fileinfo_put_wq; extern struct workqueue_struct *cifsoplockd_wq; +extern struct workqueue_struct *deferredclose_wq; extern __u32 cifs_lock_secret; extern mempool_t *cifs_mid_poolp; diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index a79d50001fbf..d30cba44ba29 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -267,6 +267,19 @@ extern void cifs_add_pending_open_locked(struct cifs_fid *fid, struct tcon_link *tlink, struct cifs_pending_open *open); extern void cifs_del_pending_open(struct cifs_pending_open *open); + +extern bool cifs_is_deferred_close(struct cifsFileInfo *cfile, + struct cifs_deferred_close **dclose); + +extern void cifs_add_deferred_close(struct cifsFileInfo *cfile, + struct cifs_deferred_close *dclose); + +extern void cifs_del_deferred_close(struct cifsFileInfo *cfile); + +extern void cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode); + +extern void cifs_close_all_deferred_files(struct cifs_tcon *cifs_tcon); + extern struct TCP_Server_Info *cifs_get_tcp_session(struct smb3_fs_context *ctx); extern void cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect); diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 121d8b4535b0..495c395f9def 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -392,16 +392,6 @@ cifs_echo_request(struct work_struct *work) int rc; struct TCP_Server_Info *server = container_of(work, struct TCP_Server_Info, echo.work); - unsigned long echo_interval; - - /* - * If we need to renegotiate, set echo interval to zero to - * immediately call echo service where we can renegotiate. - */ - if (server->tcpStatus == CifsNeedNegotiate) - echo_interval = 0; - else - echo_interval = server->echo_interval; /* * We cannot send an echo if it is disabled. @@ -412,7 +402,7 @@ cifs_echo_request(struct work_struct *work) server->tcpStatus == CifsExiting || server->tcpStatus == CifsNew || (server->ops->can_echo && !server->ops->can_echo(server)) || - time_before(jiffies, server->lstrp + echo_interval - HZ)) + time_before(jiffies, server->lstrp + server->echo_interval - HZ)) goto requeue_echo; rc = server->ops->echo ? server->ops->echo(server) : -ENOSYS; @@ -476,6 +466,7 @@ server_unresponsive(struct TCP_Server_Info *server) */ if ((server->tcpStatus == CifsGood || server->tcpStatus == CifsNeedNegotiate) && + (!server->ops->can_echo || server->ops->can_echo(server)) && time_after(jiffies, server->lstrp + 3 * server->echo_interval)) { cifs_server_dbg(VFS, "has not responded in %lu seconds. Reconnecting...\n", (3 * server->echo_interval) / HZ); @@ -3158,17 +3149,29 @@ out: int cifs_setup_volume_info(struct smb3_fs_context *ctx, const char *mntopts, const char *devname) { - int rc = 0; + int rc; - smb3_parse_devname(devname, ctx); + if (devname) { + cifs_dbg(FYI, "%s: devname=%s\n", __func__, devname); + rc = smb3_parse_devname(devname, ctx); + if (rc) { + cifs_dbg(VFS, "%s: failed to parse %s: %d\n", __func__, devname, rc); + return rc; + } + } if (mntopts) { char *ip; - cifs_dbg(FYI, "%s: mntopts=%s\n", __func__, mntopts); rc = smb3_parse_opt(mntopts, "ip", &ip); - if (!rc && !cifs_convert_address((struct sockaddr *)&ctx->dstaddr, ip, - strlen(ip))) { + if (rc) { + cifs_dbg(VFS, "%s: failed to parse ip options: %d\n", __func__, rc); + return rc; + } + + rc = cifs_convert_address((struct sockaddr *)&ctx->dstaddr, ip, strlen(ip)); + kfree(ip); + if (!rc) { cifs_dbg(VFS, "%s: failed to convert ip address\n", __func__); return -EINVAL; } @@ -3188,7 +3191,7 @@ cifs_setup_volume_info(struct smb3_fs_context *ctx, const char *mntopts, const c return -EINVAL; } - return rc; + return 0; } static int diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index c85aff838305..6bcd3e8f7cda 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -34,6 +34,7 @@ #include "cifs_fs_sb.h" #include "cifs_unicode.h" #include "fs_context.h" +#include "cifs_ioctl.h" static void renew_parental_timestamps(struct dentry *direntry) @@ -430,6 +431,9 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry, __u32 oplock; struct cifsFileInfo *file_info; + if (unlikely(cifs_forced_shutdown(CIFS_SB(inode->i_sb)))) + return -EIO; + /* * Posix open is only called (at lookup time) for file create now. For * opens (rather than creates), because we do not know if it is a file @@ -546,6 +550,9 @@ int cifs_create(struct user_namespace *mnt_userns, struct inode *inode, cifs_dbg(FYI, "cifs_create parent inode = 0x%p name is: %pd and dentry = 0x%p\n", inode, direntry, direntry); + if (unlikely(cifs_forced_shutdown(CIFS_SB(inode->i_sb)))) + return -EIO; + tlink = cifs_sb_tlink(CIFS_SB(inode->i_sb)); rc = PTR_ERR(tlink); if (IS_ERR(tlink)) @@ -583,6 +590,9 @@ int cifs_mknod(struct user_namespace *mnt_userns, struct inode *inode, return -EINVAL; cifs_sb = CIFS_SB(inode->i_sb); + if (unlikely(cifs_forced_shutdown(cifs_sb))) + return -EIO; + tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return PTR_ERR(tlink); diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 639c59596d4f..6caad100c3f3 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -45,6 +45,7 @@ #include "fscache.h" #include "smbdirect.h" #include "fs_context.h" +#include "cifs_ioctl.h" static inline int cifs_convert_flags(unsigned int flags) { @@ -322,9 +323,12 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file, cfile->dentry = dget(dentry); cfile->f_flags = file->f_flags; cfile->invalidHandle = false; + cfile->oplock_break_received = false; + cfile->deferred_scheduled = false; cfile->tlink = cifs_get_tlink(tlink); INIT_WORK(&cfile->oplock_break, cifs_oplock_break); INIT_WORK(&cfile->put, cifsFileInfo_put_work); + INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close); mutex_init(&cfile->fh_mutex); spin_lock_init(&cfile->file_info_lock); @@ -539,6 +543,11 @@ int cifs_open(struct inode *inode, struct file *file) xid = get_xid(); cifs_sb = CIFS_SB(inode->i_sb); + if (unlikely(cifs_forced_shutdown(cifs_sb))) { + free_xid(xid); + return -EIO; + } + tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) { free_xid(xid); @@ -565,6 +574,23 @@ int cifs_open(struct inode *inode, struct file *file) file->f_op = &cifs_file_direct_ops; } + spin_lock(&CIFS_I(inode)->deferred_lock); + /* Get the cached handle as SMB2 close is deferred */ + rc = cifs_get_readable_path(tcon, full_path, &cfile); + if (rc == 0) { + if (file->f_flags == cfile->f_flags) { + file->private_data = cfile; + cifs_del_deferred_close(cfile); + spin_unlock(&CIFS_I(inode)->deferred_lock); + goto out; + } else { + spin_unlock(&CIFS_I(inode)->deferred_lock); + _cifsFileInfo_put(cfile, true, false); + } + } else { + spin_unlock(&CIFS_I(inode)->deferred_lock); + } + if (server->oplocks) oplock = REQ_OPLOCK; else @@ -846,11 +872,56 @@ reopen_error_exit: return rc; } +void smb2_deferred_work_close(struct work_struct *work) +{ + struct cifsFileInfo *cfile = container_of(work, + struct cifsFileInfo, deferred.work); + + spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock); + if (!cfile->deferred_scheduled) { + spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock); + return; + } + cifs_del_deferred_close(cfile); + cfile->deferred_scheduled = false; + spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock); + _cifsFileInfo_put(cfile, true, false); +} + int cifs_close(struct inode *inode, struct file *file) { + struct cifsFileInfo *cfile; + struct cifsInodeInfo *cinode = CIFS_I(inode); + struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); + struct cifs_deferred_close *dclose; + if (file->private_data != NULL) { - _cifsFileInfo_put(file->private_data, true, false); + cfile = file->private_data; file->private_data = NULL; + dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL); + if ((cinode->oplock == CIFS_CACHE_RHW_FLG) && + dclose) { + if (test_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) + inode->i_ctime = inode->i_mtime = current_time(inode); + spin_lock(&cinode->deferred_lock); + cifs_add_deferred_close(cfile, dclose); + if (cfile->deferred_scheduled) { + mod_delayed_work(deferredclose_wq, + &cfile->deferred, cifs_sb->ctx->acregmax); + } else { + /* Deferred close for files */ + queue_delayed_work(deferredclose_wq, + &cfile->deferred, cifs_sb->ctx->acregmax); + cfile->deferred_scheduled = true; + spin_unlock(&cinode->deferred_lock); + return 0; + } + spin_unlock(&cinode->deferred_lock); + _cifsFileInfo_put(cfile, true, false); + } else { + _cifsFileInfo_put(cfile, true, false); + kfree(dclose); + } } /* return code from the ->release op is always ignored */ @@ -1920,8 +1991,10 @@ cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data, if (total_written > 0) { spin_lock(&d_inode(dentry)->i_lock); - if (*offset > d_inode(dentry)->i_size) + if (*offset > d_inode(dentry)->i_size) { i_size_write(d_inode(dentry), *offset); + d_inode(dentry)->i_blocks = (512 - 1 + *offset) >> 9; + } spin_unlock(&d_inode(dentry)->i_lock); } mark_inode_dirty_sync(d_inode(dentry)); @@ -1947,7 +2020,8 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode, if (fsuid_only && !uid_eq(open_file->uid, current_fsuid())) continue; if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) { - if (!open_file->invalidHandle) { + if ((!open_file->invalidHandle) && + (!open_file->oplock_break_received)) { /* found a good file */ /* lock it so it will not be closed on us */ cifsFileInfo_get(open_file); @@ -2476,6 +2550,8 @@ retry: if (cfile) cifsFileInfo_put(cfile); free_xid(xid); + /* Indication to update ctime and mtime as close is deferred */ + set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags); return rc; } @@ -2577,13 +2653,17 @@ static int cifs_write_end(struct file *file, struct address_space *mapping, if (rc > 0) { spin_lock(&inode->i_lock); - if (pos > inode->i_size) + if (pos > inode->i_size) { i_size_write(inode, pos); + inode->i_blocks = (512 - 1 + pos) >> 9; + } spin_unlock(&inode->i_lock); } unlock_page(page); put_page(page); + /* Indication to update ctime and mtime as close is deferred */ + set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags); return rc; } @@ -4744,6 +4824,8 @@ void cifs_oplock_break(struct work_struct *work) struct TCP_Server_Info *server = tcon->ses->server; int rc = 0; bool purge_cache = false; + bool is_deferred = false; + struct cifs_deferred_close *dclose; wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS, TASK_UNINTERRUPTIBLE); @@ -4790,6 +4872,18 @@ oplock_break_ack: cinode); cifs_dbg(FYI, "Oplock release rc = %d\n", rc); } + /* + * When oplock break is received and there are no active + * file handles but cached, then set the flag oplock_break_received. + * So, new open will not use cached handle. + */ + spin_lock(&CIFS_I(inode)->deferred_lock); + is_deferred = cifs_is_deferred_close(cfile, &dclose); + if (is_deferred && cfile->deferred_scheduled) { + cfile->oplock_break_received = true; + mod_delayed_work(deferredclose_wq, &cfile->deferred, 0); + } + spin_unlock(&CIFS_I(inode)->deferred_lock); _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false); cifs_done_oplock_break(cinode); } diff --git a/fs/cifs/fs_context.c b/fs/cifs/fs_context.c index 3e0d016849e3..3bcf881c3ae9 100644 --- a/fs/cifs/fs_context.c +++ b/fs/cifs/fs_context.c @@ -476,6 +476,7 @@ smb3_parse_devname(const char *devname, struct smb3_fs_context *ctx) /* move "pos" up to delimiter or NULL */ pos += len; + kfree(ctx->UNC); ctx->UNC = kstrndup(devname, pos - devname, GFP_KERNEL); if (!ctx->UNC) return -ENOMEM; @@ -486,6 +487,9 @@ smb3_parse_devname(const char *devname, struct smb3_fs_context *ctx) if (*pos == '/' || *pos == '\\') pos++; + kfree(ctx->prepath); + ctx->prepath = NULL; + /* If pos is NULL then no prepath */ if (!*pos) return 0; @@ -1642,6 +1646,7 @@ void smb3_update_mnt_flags(struct cifs_sb_info *cifs_sb) cifs_dbg(VFS, "mount options mfsymlinks and sfu both enabled\n"); } } + cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SHUTDOWN; return; } diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 002d864b8f7b..1dfa57982522 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -26,7 +26,6 @@ #include <linux/sched/signal.h> #include <linux/wait_bit.h> #include <linux/fiemap.h> - #include <asm/div64.h> #include "cifsfs.h" #include "cifspdu.h" @@ -38,7 +37,7 @@ #include "cifs_unicode.h" #include "fscache.h" #include "fs_context.h" - +#include "cifs_ioctl.h" static void cifs_set_ops(struct inode *inode) { @@ -1610,6 +1609,9 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry) cifs_dbg(FYI, "cifs_unlink, dir=0x%p, dentry=0x%p\n", dir, dentry); + if (unlikely(cifs_forced_shutdown(cifs_sb))) + return -EIO; + tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return PTR_ERR(tlink); @@ -1632,6 +1634,7 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry) goto unlink_out; } + cifs_close_all_deferred_files(tcon); if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))) { rc = CIFSPOSIXDelFile(xid, tcon, full_path, @@ -1872,6 +1875,8 @@ int cifs_mkdir(struct user_namespace *mnt_userns, struct inode *inode, mode, inode); cifs_sb = CIFS_SB(inode->i_sb); + if (unlikely(cifs_forced_shutdown(cifs_sb))) + return -EIO; tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return PTR_ERR(tlink); @@ -1954,6 +1959,11 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry) } cifs_sb = CIFS_SB(inode->i_sb); + if (unlikely(cifs_forced_shutdown(cifs_sb))) { + rc = -EIO; + goto rmdir_exit; + } + tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) { rc = PTR_ERR(tlink); @@ -2088,6 +2098,9 @@ cifs_rename2(struct user_namespace *mnt_userns, struct inode *source_dir, return -EINVAL; cifs_sb = CIFS_SB(source_dir->i_sb); + if (unlikely(cifs_forced_shutdown(cifs_sb))) + return -EIO; + tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return PTR_ERR(tlink); @@ -2109,6 +2122,7 @@ cifs_rename2(struct user_namespace *mnt_userns, struct inode *source_dir, goto cifs_rename_exit; } + cifs_close_all_deferred_files(tcon); rc = cifs_do_rename(xid, source_dentry, from_name, target_dentry, to_name); @@ -2404,6 +2418,9 @@ int cifs_getattr(struct user_namespace *mnt_userns, const struct path *path, struct inode *inode = d_inode(dentry); int rc; + if (unlikely(cifs_forced_shutdown(CIFS_SB(inode->i_sb)))) + return -EIO; + /* * We need to be sure that all dirty pages are written and the server * has actual ctime, mtime and file length. @@ -2476,6 +2493,9 @@ int cifs_fiemap(struct inode *inode, struct fiemap_extent_info *fei, u64 start, struct cifsFileInfo *cfile; int rc; + if (unlikely(cifs_forced_shutdown(cifs_sb))) + return -EIO; + /* * We need to be sure that all dirty pages are written as they * might fill holes on the server. @@ -2962,6 +2982,9 @@ cifs_setattr(struct user_namespace *mnt_userns, struct dentry *direntry, struct cifs_tcon *pTcon = cifs_sb_master_tcon(cifs_sb); int rc, retries = 0; + if (unlikely(cifs_forced_shutdown(cifs_sb))) + return -EIO; + do { if (pTcon->unix_ext) rc = cifs_setattr_unix(direntry, attrs); diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c index 08d99fec593e..28ec8d7c521a 100644 --- a/fs/cifs/ioctl.c +++ b/fs/cifs/ioctl.c @@ -164,6 +164,100 @@ static long smb_mnt_get_fsinfo(unsigned int xid, struct cifs_tcon *tcon, return rc; } +static int cifs_shutdown(struct super_block *sb, unsigned long arg) +{ + struct cifs_sb_info *sbi = CIFS_SB(sb); + __u32 flags; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (get_user(flags, (__u32 __user *)arg)) + return -EFAULT; + + if (flags > CIFS_GOING_FLAGS_NOLOGFLUSH) + return -EINVAL; + + if (cifs_forced_shutdown(sbi)) + return 0; + + cifs_dbg(VFS, "shut down requested (%d)", flags); +/* trace_cifs_shutdown(sb, flags);*/ + + /* + * see: + * https://man7.org/linux/man-pages/man2/ioctl_xfs_goingdown.2.html + * for more information and description of original intent of the flags + */ + switch (flags) { + /* + * We could add support later for default flag which requires: + * "Flush all dirty data and metadata to disk" + * would need to call syncfs or equivalent to flush page cache for + * the mount and then issue fsync to server (if nostrictsync not set) + */ + case CIFS_GOING_FLAGS_DEFAULT: + cifs_dbg(FYI, "shutdown with default flag not supported\n"); + return -EINVAL; + /* + * FLAGS_LOGFLUSH is easy since it asks to write out metadata (not + * data) but metadata writes are not cached on the client, so can treat + * it similarly to NOLOGFLUSH + */ + case CIFS_GOING_FLAGS_LOGFLUSH: + case CIFS_GOING_FLAGS_NOLOGFLUSH: + sbi->mnt_cifs_flags |= CIFS_MOUNT_SHUTDOWN; + return 0; + default: + return -EINVAL; + } + return 0; +} + +static int cifs_dump_full_key(struct cifs_tcon *tcon, unsigned long arg) +{ + struct smb3_full_key_debug_info pfull_key_inf; + __u64 suid; + struct list_head *tmp; + struct cifs_ses *ses; + bool found = false; + + if (!smb3_encryption_required(tcon)) + return -EOPNOTSUPP; + + ses = tcon->ses; /* default to user id for current user */ + if (get_user(suid, (__u64 __user *)arg)) + suid = 0; + if (suid) { + /* search to see if there is a session with a matching SMB UID */ + spin_lock(&cifs_tcp_ses_lock); + list_for_each(tmp, &tcon->ses->server->smb_ses_list) { + ses = list_entry(tmp, struct cifs_ses, smb_ses_list); + if (ses->Suid == suid) { + found = true; + break; + } + } + spin_unlock(&cifs_tcp_ses_lock); + if (found == false) + return -EINVAL; + } /* else uses default user's SMB UID (ie current user) */ + + pfull_key_inf.cipher_type = le16_to_cpu(ses->server->cipher_type); + pfull_key_inf.Suid = ses->Suid; + memcpy(pfull_key_inf.auth_key, ses->auth_key.response, + 16 /* SMB2_NTLMV2_SESSKEY_SIZE */); + memcpy(pfull_key_inf.smb3decryptionkey, ses->smb3decryptionkey, + 32 /* SMB3_ENC_DEC_KEY_SIZE */); + memcpy(pfull_key_inf.smb3encryptionkey, + ses->smb3encryptionkey, 32 /* SMB3_ENC_DEC_KEY_SIZE */); + if (copy_to_user((void __user *)arg, &pfull_key_inf, + sizeof(struct smb3_full_key_debug_info))) + return -EFAULT; + + return 0; +} + long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg) { struct inode *inode = file_inode(filep); @@ -304,6 +398,21 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg) else rc = 0; break; + /* + * Dump full key (32 bytes instead of 16 bytes) is + * needed if GCM256 (stronger encryption) negotiated + */ + case CIFS_DUMP_FULL_KEY: + if (pSMBFile == NULL) + break; + if (!capable(CAP_SYS_ADMIN)) { + rc = -EACCES; + break; + } + tcon = tlink_tcon(pSMBFile->tlink); + rc = cifs_dump_full_key(tcon, arg); + + break; case CIFS_IOC_NOTIFY: if (!S_ISDIR(inode->i_mode)) { /* Notify can only be done on directories */ @@ -325,6 +434,9 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg) rc = -EOPNOTSUPP; cifs_put_tlink(tlink); break; + case CIFS_IOC_SHUTDOWN: + rc = cifs_shutdown(inode->i_sb, arg); + break; default: cifs_dbg(FYI, "unsupported ioctl\n"); break; diff --git a/fs/cifs/link.c b/fs/cifs/link.c index 616e1bc0cc0a..970fcf2adb08 100644 --- a/fs/cifs/link.c +++ b/fs/cifs/link.c @@ -30,6 +30,7 @@ #include "cifs_fs_sb.h" #include "cifs_unicode.h" #include "smb2proto.h" +#include "cifs_ioctl.h" /* * M-F Symlink Functions - Begin @@ -518,6 +519,9 @@ cifs_hardlink(struct dentry *old_file, struct inode *inode, struct TCP_Server_Info *server; struct cifsInodeInfo *cifsInode; + if (unlikely(cifs_forced_shutdown(cifs_sb))) + return -EIO; + tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return PTR_ERR(tlink); @@ -679,9 +683,16 @@ cifs_symlink(struct user_namespace *mnt_userns, struct inode *inode, struct tcon_link *tlink; struct cifs_tcon *pTcon; const char *full_path; - void *page = alloc_dentry_path(); + void *page; struct inode *newinode = NULL; + if (unlikely(cifs_forced_shutdown(cifs_sb))) + return -EIO; + + page = alloc_dentry_path(); + if (!page) + return -ENOMEM; + xid = get_xid(); tlink = cifs_sb_tlink(cifs_sb); diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index c15a90e422be..524dbdfb7184 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -672,6 +672,85 @@ cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink, spin_unlock(&tlink_tcon(open->tlink)->open_file_lock); } +bool +cifs_is_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close **pdclose) +{ + struct cifs_deferred_close *dclose; + + list_for_each_entry(dclose, &CIFS_I(d_inode(cfile->dentry))->deferred_closes, dlist) { + if ((dclose->netfid == cfile->fid.netfid) && + (dclose->persistent_fid == cfile->fid.persistent_fid) && + (dclose->volatile_fid == cfile->fid.volatile_fid)) { + *pdclose = dclose; + return true; + } + } + return false; +} + +void +cifs_add_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close *dclose) +{ + bool is_deferred = false; + struct cifs_deferred_close *pdclose; + + is_deferred = cifs_is_deferred_close(cfile, &pdclose); + if (is_deferred) { + kfree(dclose); + return; + } + + dclose->tlink = cfile->tlink; + dclose->netfid = cfile->fid.netfid; + dclose->persistent_fid = cfile->fid.persistent_fid; + dclose->volatile_fid = cfile->fid.volatile_fid; + list_add_tail(&dclose->dlist, &CIFS_I(d_inode(cfile->dentry))->deferred_closes); +} + +void +cifs_del_deferred_close(struct cifsFileInfo *cfile) +{ + bool is_deferred = false; + struct cifs_deferred_close *dclose; + + is_deferred = cifs_is_deferred_close(cfile, &dclose); + if (!is_deferred) + return; + list_del(&dclose->dlist); + kfree(dclose); +} + +void +cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode) +{ + struct cifsFileInfo *cfile = NULL; + struct cifs_deferred_close *dclose; + + list_for_each_entry(cfile, &cifs_inode->openFileList, flist) { + spin_lock(&cifs_inode->deferred_lock); + if (cifs_is_deferred_close(cfile, &dclose)) + mod_delayed_work(deferredclose_wq, &cfile->deferred, 0); + spin_unlock(&cifs_inode->deferred_lock); + } +} + +void +cifs_close_all_deferred_files(struct cifs_tcon *tcon) +{ + struct cifsFileInfo *cfile; + struct cifsInodeInfo *cinode; + struct list_head *tmp; + + spin_lock(&tcon->open_file_lock); + list_for_each(tmp, &tcon->openFileList) { + cfile = list_entry(tmp, struct cifsFileInfo, tlist); + cinode = CIFS_I(d_inode(cfile->dentry)); + if (delayed_work_pending(&cfile->deferred)) + mod_delayed_work(deferredclose_wq, &cfile->deferred, 0); + } + spin_unlock(&tcon->open_file_lock); +} + /* parses DFS refferal V3 structure * caller is responsible for freeing target_nodes * returns: diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c index e351b945135b..aa3e8ca0457c 100644 --- a/fs/cifs/xattr.c +++ b/fs/cifs/xattr.c @@ -30,6 +30,7 @@ #include "cifs_debug.h" #include "cifs_fs_sb.h" #include "cifs_unicode.h" +#include "cifs_ioctl.h" #define MAX_EA_VALUE_SIZE CIFSMaxBufSize #define CIFS_XATTR_CIFS_ACL "system.cifs_acl" /* DACL only */ @@ -421,6 +422,9 @@ ssize_t cifs_listxattr(struct dentry *direntry, char *data, size_t buf_size) const char *full_path; void *page; + if (unlikely(cifs_forced_shutdown(cifs_sb))) + return -EIO; + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) return -EOPNOTSUPP; @@ -525,7 +525,7 @@ retry: dax_disassociate_entry(entry, mapping, false); xas_store(xas, NULL); /* undo the PMD join */ dax_wake_entry(xas, entry, true); - mapping->nrexceptional--; + mapping->nrpages -= PG_PMD_NR; entry = NULL; xas_set(xas, index); } @@ -541,7 +541,7 @@ retry: dax_lock_entry(xas, entry); if (xas_error(xas)) goto out_unlock; - mapping->nrexceptional++; + mapping->nrpages += 1UL << order; } out_unlock: @@ -661,7 +661,7 @@ static int __dax_invalidate_entry(struct address_space *mapping, goto out; dax_disassociate_entry(entry, mapping, trunc); xas_store(&xas, NULL); - mapping->nrexceptional--; + mapping->nrpages -= 1UL << dax_entry_order(entry); ret = 1; out: put_unlocked_entry(&xas, entry); @@ -965,7 +965,7 @@ int dax_writeback_mapping_range(struct address_space *mapping, if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT)) return -EIO; - if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL) + if (mapping_empty(mapping) || wbc->sync_mode != WB_SYNC_ALL) return 0; trace_dax_writeback_range(inode, xas.xa_index, end_index); diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c index 943e523f4c9d..345f8061e3b4 100644 --- a/fs/ecryptfs/crypto.c +++ b/fs/ecryptfs/crypto.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/** +/* * eCryptfs: Linux filesystem encryption layer * * Copyright (C) 1997-2004 Erez Zadok @@ -350,7 +350,7 @@ out: return rc; } -/** +/* * lower_offset_for_page * * Convert an eCryptfs page index into a lower byte offset @@ -535,7 +535,7 @@ int ecryptfs_decrypt_page(struct page *page) rc = crypt_extent(crypt_stat, page, page, extent_offset, DECRYPT); if (rc) { - printk(KERN_ERR "%s: Error encrypting extent; " + printk(KERN_ERR "%s: Error decrypting extent; " "rc = [%d]\n", __func__, rc); goto out; } @@ -627,9 +627,8 @@ void ecryptfs_set_default_sizes(struct ecryptfs_crypt_stat *crypt_stat) } } -/** +/* * ecryptfs_compute_root_iv - * @crypt_stats * * On error, sets the root IV to all 0's. */ @@ -1370,7 +1369,7 @@ int ecryptfs_read_and_validate_xattr_region(struct dentry *dentry, return rc; } -/** +/* * ecryptfs_read_metadata * * Common entry point for reading file metadata. From here, we could @@ -1448,7 +1447,7 @@ out: return rc; } -/** +/* * ecryptfs_encrypt_filename - encrypt filename * * CBC-encrypts the filename. We do not want to encrypt the same @@ -1590,11 +1589,10 @@ out: struct kmem_cache *ecryptfs_key_tfm_cache; static struct list_head key_tfm_list; -struct mutex key_tfm_list_mutex; +DEFINE_MUTEX(key_tfm_list_mutex); int __init ecryptfs_init_crypto(void) { - mutex_init(&key_tfm_list_mutex); INIT_LIST_HEAD(&key_tfm_list); return 0; } @@ -1877,10 +1875,11 @@ out: /** * ecryptfs_encrypt_and_encode_filename - converts a plaintext file name to cipher text - * @crypt_stat: The crypt_stat struct associated with the file anem to encode + * @encoded_name: The encrypted name + * @encoded_name_size: Length of the encrypted name + * @mount_crypt_stat: The crypt_stat struct associated with the file name to encode * @name: The plaintext name - * @length: The length of the plaintext - * @encoded_name: The encypted name + * @name_size: The length of the plaintext name * * Encrypts and encodes a filename into something that constitutes a * valid filename for a filesystem, with printable characters. @@ -1992,7 +1991,7 @@ static bool is_dot_dotdot(const char *name, size_t name_size) * ecryptfs_decode_and_decrypt_filename - converts the encoded cipher text name to decoded plaintext * @plaintext_name: The plaintext name * @plaintext_name_size: The plaintext name size - * @ecryptfs_dir_dentry: eCryptfs directory dentry + * @sb: Ecryptfs's super_block * @name: The filename in cipher text * @name_size: The cipher text name size * diff --git a/fs/ecryptfs/debug.c b/fs/ecryptfs/debug.c index 1f65e99f9a41..cf6d0e8e25a1 100644 --- a/fs/ecryptfs/debug.c +++ b/fs/ecryptfs/debug.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/** +/* * eCryptfs: Linux filesystem encryption layer * Functions only useful for debugging. * @@ -9,7 +9,7 @@ #include "ecryptfs_kernel.h" -/** +/* * ecryptfs_dump_auth_tok - debug function to print auth toks * * This function will print the contents of an ecryptfs authentication diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c index 44606f079efb..acaa0825e9bb 100644 --- a/fs/ecryptfs/dentry.c +++ b/fs/ecryptfs/dentry.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/** +/* * eCryptfs: Linux filesystem encryption layer * * Copyright (C) 1997-2003 Erez Zadok diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h index 495fb4514d09..5f2b49e13731 100644 --- a/fs/ecryptfs/ecryptfs_kernel.h +++ b/fs/ecryptfs/ecryptfs_kernel.h @@ -513,7 +513,7 @@ ecryptfs_dentry_to_lower_path(struct dentry *dentry) } #define ecryptfs_printk(type, fmt, arg...) \ - __ecryptfs_printk(type "%s: " fmt, __func__, ## arg); + __ecryptfs_printk(type "%s: " fmt, __func__, ## arg) __printf(1, 2) void __ecryptfs_printk(const char *fmt, ...); diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c index 5fb45d865ce5..18d5b91cb573 100644 --- a/fs/ecryptfs/file.c +++ b/fs/ecryptfs/file.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/** +/* * eCryptfs: Linux filesystem encryption layer * * Copyright (C) 1997-2004 Erez Zadok @@ -19,7 +19,7 @@ #include <linux/fs_stack.h> #include "ecryptfs_kernel.h" -/** +/* * ecryptfs_read_update_atime * * generic_file_read updates the atime of upper layer inode. But, it diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index 0a1ab1db1450..16d50dface59 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/** +/* * eCryptfs: Linux filesystem encryption layer * * Copyright (C) 1997-2004 Erez Zadok @@ -199,7 +199,7 @@ out_lock: return inode; } -/** +/* * ecryptfs_initialize_file * * Cause the file to be changed from a basic empty file to an ecryptfs @@ -242,10 +242,8 @@ out: return rc; } -/** +/* * ecryptfs_create - * @dir: The inode of the directory in which to create the file. - * @dentry: The eCryptfs dentry * @mode: The mode of the new file. * * Creates a new file. @@ -313,7 +311,7 @@ static int ecryptfs_i_size_read(struct dentry *dentry, struct inode *inode) return 0; } -/** +/* * ecryptfs_lookup_interpose - Dentry interposition for a lookup */ static struct dentry *ecryptfs_lookup_interpose(struct dentry *dentry, @@ -873,6 +871,7 @@ ecryptfs_permission(struct user_namespace *mnt_userns, struct inode *inode, /** * ecryptfs_setattr + * @mnt_userns: user namespace of the target mount * @dentry: dentry handle to the inode to modify * @ia: Structure with flags of what to change and values * diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c index f6a17d259db7..3fe41964c0d8 100644 --- a/fs/ecryptfs/keystore.c +++ b/fs/ecryptfs/keystore.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/** +/* * eCryptfs: Linux filesystem encryption layer * In-kernel key management code. Includes functions to parse and * write authentication token-related packets with the underlying @@ -21,7 +21,7 @@ #include <linux/slab.h> #include "ecryptfs_kernel.h" -/** +/* * request_key returned an error instead of a valid key address; * determine the type of error, make appropriate log entries, and * return an error code. @@ -536,8 +536,9 @@ out: /** * ecryptfs_find_auth_tok_for_sig + * @auth_tok_key: key containing the authentication token * @auth_tok: Set to the matching auth_tok; NULL if not found - * @crypt_stat: inode crypt_stat crypto context + * @mount_crypt_stat: inode crypt_stat crypto context * @sig: Sig of auth_tok to find * * For now, this function simply looks at the registered auth_tok's @@ -576,7 +577,7 @@ ecryptfs_find_auth_tok_for_sig( return rc; } -/** +/* * write_tag_70_packet can gobble a lot of stack space. We stuff most * of the function's parameters in a kmalloc'd struct to help reduce * eCryptfs' overall stack usage. @@ -604,7 +605,7 @@ struct ecryptfs_write_tag_70_packet_silly_stack { struct shash_desc *hash_desc; }; -/** +/* * write_tag_70_packet - Write encrypted filename (EFN) packet against FNEK * @filename: NULL-terminated filename string * @@ -873,7 +874,7 @@ struct ecryptfs_parse_tag_70_packet_silly_stack { }; /** - * parse_tag_70_packet - Parse and process FNEK-encrypted passphrase packet + * ecryptfs_parse_tag_70_packet - Parse and process FNEK-encrypted passphrase packet * @filename: This function kmalloc's the memory for the filename * @filename_size: This function sets this to the amount of memory * kmalloc'd for the filename @@ -1172,7 +1173,7 @@ decrypt_pki_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok, rc = ecryptfs_cipher_code_to_string(crypt_stat->cipher, cipher_code); if (rc) { ecryptfs_printk(KERN_ERR, "Cipher code [%d] is invalid\n", - cipher_code) + cipher_code); goto out; } crypt_stat->flags |= ECRYPTFS_KEY_VALID; diff --git a/fs/ecryptfs/kthread.c b/fs/ecryptfs/kthread.c index a7c903cb01a0..ae4cb4e2e134 100644 --- a/fs/ecryptfs/kthread.c +++ b/fs/ecryptfs/kthread.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/** +/* * eCryptfs: Linux filesystem encryption layer * * Copyright (C) 2008 International Business Machines Corp. @@ -108,6 +108,7 @@ void ecryptfs_destroy_kthread(void) * @lower_file: Result of dentry_open by root on lower dentry * @lower_dentry: Lower dentry for file to open * @lower_mnt: Lower vfsmount for file to open + * @cred: credential to use for this call * * This function gets a r/w file opened against the lower dentry. * diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c index cdf40a54a35d..d66bbd2df191 100644 --- a/fs/ecryptfs/main.c +++ b/fs/ecryptfs/main.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/** +/* * eCryptfs: Linux filesystem encryption layer * * Copyright (C) 1997-2003 Erez Zadok @@ -24,7 +24,7 @@ #include <linux/magic.h> #include "ecryptfs_kernel.h" -/** +/* * Module parameter that defines the ecryptfs_verbosity level. */ int ecryptfs_verbosity = 0; @@ -34,7 +34,7 @@ MODULE_PARM_DESC(ecryptfs_verbosity, "Initial verbosity level (0 or 1; defaults to " "0, which is Quiet)"); -/** +/* * Module parameter that defines the number of message buffer elements */ unsigned int ecryptfs_message_buf_len = ECRYPTFS_DEFAULT_MSG_CTX_ELEMS; @@ -43,7 +43,7 @@ module_param(ecryptfs_message_buf_len, uint, 0); MODULE_PARM_DESC(ecryptfs_message_buf_len, "Number of message buffer elements"); -/** +/* * Module parameter that defines the maximum guaranteed amount of time to wait * for a response from ecryptfsd. The actual sleep time will be, more than * likely, a small amount greater than this specified value, but only less if @@ -57,7 +57,7 @@ MODULE_PARM_DESC(ecryptfs_message_wait_timeout, "sleep while waiting for a message response from " "userspace"); -/** +/* * Module parameter that is an estimate of the maximum number of users * that will be concurrently using eCryptfs. Set this to the right * value to balance performance and memory use. @@ -80,7 +80,7 @@ void __ecryptfs_printk(const char *fmt, ...) va_end(args); } -/** +/* * ecryptfs_init_lower_file * @ecryptfs_dentry: Fully initialized eCryptfs dentry object, with * the lower dentry and the lower mount set @@ -221,7 +221,7 @@ static void ecryptfs_init_mount_crypt_stat( /** * ecryptfs_parse_options - * @sb: The ecryptfs super block + * @sbi: The ecryptfs super block * @options: The options passed to the kernel * @check_ruid: set to 1 if device uid should be checked against the ruid * @@ -466,10 +466,10 @@ out: struct kmem_cache *ecryptfs_sb_info_cache; static struct file_system_type ecryptfs_fs_type; -/** - * ecryptfs_get_sb - * @fs_type - * @flags +/* + * ecryptfs_mount + * @fs_type: The filesystem type that the superblock should belong to + * @flags: The flags associated with the mount * @dev_name: The path to mount over * @raw_data: The options passed into the kernel */ @@ -492,6 +492,12 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags goto out; } + if (!dev_name) { + rc = -EINVAL; + err = "Device name cannot be null"; + goto out; + } + rc = ecryptfs_parse_options(sbi, raw_data, &check_ruid); if (rc) { err = "Error parsing options"; @@ -635,7 +641,7 @@ static struct file_system_type ecryptfs_fs_type = { }; MODULE_ALIAS_FS("ecryptfs"); -/** +/* * inode_info_init_once * * Initializes the ecryptfs_inode_info_cache when it is created diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c index c0dfd9647627..6318f3500e5c 100644 --- a/fs/ecryptfs/messaging.c +++ b/fs/ecryptfs/messaging.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * eCryptfs: Linux filesystem encryption layer * * Copyright (C) 2004-2008 International Business Machines Corp. @@ -14,10 +14,10 @@ static LIST_HEAD(ecryptfs_msg_ctx_free_list); static LIST_HEAD(ecryptfs_msg_ctx_alloc_list); -static struct mutex ecryptfs_msg_ctx_lists_mux; +static DEFINE_MUTEX(ecryptfs_msg_ctx_lists_mux); static struct hlist_head *ecryptfs_daemon_hash; -struct mutex ecryptfs_daemon_hash_mux; +DEFINE_MUTEX(ecryptfs_daemon_hash_mux); static int ecryptfs_hash_bits; #define ecryptfs_current_euid_hash(uid) \ hash_long((unsigned long)from_kuid(&init_user_ns, current_euid()), ecryptfs_hash_bits) @@ -147,7 +147,7 @@ out: return rc; } -/** +/* * ecryptfs_exorcise_daemon - Destroy the daemon struct * * Must be called ceremoniously while in possession of @@ -181,7 +181,8 @@ out: } /** - * ecryptfs_process_reponse + * ecryptfs_process_response + * @daemon: eCryptfs daemon object * @msg: The ecryptfs message received; the caller should sanity check * msg->data_len and free the memory * @seq: The sequence number of the message; must match the sequence @@ -250,6 +251,7 @@ out: * ecryptfs_send_message_locked * @data: The data to send * @data_len: The length of data + * @msg_type: Type of message * @msg_ctx: The message context allocated for the send * * Must be called with ecryptfs_daemon_hash_mux held. @@ -359,7 +361,6 @@ int __init ecryptfs_init_messaging(void) "too large, defaulting to [%d] users\n", __func__, ecryptfs_number_of_users); } - mutex_init(&ecryptfs_daemon_hash_mux); mutex_lock(&ecryptfs_daemon_hash_mux); ecryptfs_hash_bits = 1; while (ecryptfs_number_of_users >> ecryptfs_hash_bits) @@ -383,7 +384,6 @@ int __init ecryptfs_init_messaging(void) rc = -ENOMEM; goto out; } - mutex_init(&ecryptfs_msg_ctx_lists_mux); mutex_lock(&ecryptfs_msg_ctx_lists_mux); ecryptfs_msg_counter = 0; for (i = 0; i < ecryptfs_message_buf_len; i++) { diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c index 742ece22c1d4..4e62c3cef70f 100644 --- a/fs/ecryptfs/miscdev.c +++ b/fs/ecryptfs/miscdev.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * eCryptfs: Linux filesystem encryption layer * * Copyright (C) 2008 International Business Machines Corp. @@ -312,6 +312,7 @@ out_unlock_daemon: /** * ecryptfs_miscdev_response - miscdevess response to message previously sent to daemon + * @daemon: eCryptfs daemon object * @data: Bytes comprising struct ecryptfs_message * @data_size: sizeof(struct ecryptfs_message) + data len * @seq: Sequence number for miscdev response packet diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c index 2f333a40ff4d..392e721b50a3 100644 --- a/fs/ecryptfs/mmap.c +++ b/fs/ecryptfs/mmap.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/** +/* * eCryptfs: Linux filesystem encryption layer * This is where eCryptfs coordinates the symmetric encryption and * decryption of the file data as it passes between the lower @@ -22,7 +22,7 @@ #include <asm/unaligned.h> #include "ecryptfs_kernel.h" -/** +/* * ecryptfs_get_locked_page * * Get one page from cache or lower f/s, return error otherwise. @@ -41,6 +41,7 @@ struct page *ecryptfs_get_locked_page(struct inode *inode, loff_t index) /** * ecryptfs_writepage * @page: Page that is locked before this call is made + * @wbc: Write-back control structure * * Returns zero on success; non-zero otherwise * @@ -78,7 +79,7 @@ static void strip_xattr_flag(char *page_virt, } } -/** +/* * Header Extent: * Octets 0-7: Unencrypted file size (big-endian) * Octets 8-15: eCryptfs special marker @@ -229,7 +230,7 @@ out: return rc; } -/** +/* * Called with lower inode mutex held. */ static int fill_zeros_to_end_of_page(struct page *page, unsigned int to) @@ -368,7 +369,7 @@ out: return rc; } -/** +/* * ecryptfs_write_inode_size_to_header * * Writes the lower file size to the first 8 bytes of the header. diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c index 0438997ac9d8..60bdcaddcbe5 100644 --- a/fs/ecryptfs/read_write.c +++ b/fs/ecryptfs/read_write.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/** +/* * eCryptfs: Linux filesystem encryption layer * * Copyright (C) 2007 International Business Machines Corp. @@ -230,6 +230,8 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size, * ecryptfs_read_lower_page_segment * @page_for_ecryptfs: The page into which data for eCryptfs will be * written + * @page_index: Page index in @page_for_ecryptfs from which to start + * writing * @offset_in_page: Offset in @page_for_ecryptfs from which to start * writing * @size: The number of bytes to write into @page_for_ecryptfs diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c index 6b1853f1c06a..39116af0390f 100644 --- a/fs/ecryptfs/super.c +++ b/fs/ecryptfs/super.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/** +/* * eCryptfs: Linux filesystem encryption layer * * Copyright (C) 1997-2003 Erez Zadok @@ -81,7 +81,7 @@ static void ecryptfs_destroy_inode(struct inode *inode) /** * ecryptfs_statfs - * @sb: The ecryptfs super block + * @dentry: The ecryptfs dentry * @buf: The struct kstatfs to fill in with stats * * Get the filesystem statistics. Currently, we let this pass right through @@ -108,7 +108,7 @@ static int ecryptfs_statfs(struct dentry *dentry, struct kstatfs *buf) /** * ecryptfs_evict_inode - * @inode - The ecryptfs inode + * @inode: The ecryptfs inode * * Called by iput() when the inode reference count reached zero * and the inode is not hashed anywhere. Used to clear anything @@ -123,7 +123,7 @@ static void ecryptfs_evict_inode(struct inode *inode) iput(ecryptfs_inode_to_lower(inode)); } -/** +/* * ecryptfs_show_options * * Prints the mount options for a given superblock. diff --git a/fs/f2fs/Kconfig b/fs/f2fs/Kconfig index 62e638a49bbf..7669de7b49ce 100644 --- a/fs/f2fs/Kconfig +++ b/fs/f2fs/Kconfig @@ -7,6 +7,13 @@ config F2FS_FS select CRYPTO_CRC32 select F2FS_FS_XATTR if FS_ENCRYPTION select FS_ENCRYPTION_ALGS if FS_ENCRYPTION + select LZ4_COMPRESS if F2FS_FS_LZ4 + select LZ4_DECOMPRESS if F2FS_FS_LZ4 + select LZ4HC_COMPRESS if F2FS_FS_LZ4HC + select LZO_COMPRESS if F2FS_FS_LZO + select LZO_DECOMPRESS if F2FS_FS_LZO + select ZSTD_COMPRESS if F2FS_FS_ZSTD + select ZSTD_DECOMPRESS if F2FS_FS_ZSTD help F2FS is based on Log-structured File System (LFS), which supports versatile "flash-friendly" features. The design has been focused on @@ -94,8 +101,6 @@ config F2FS_FS_COMPRESSION config F2FS_FS_LZO bool "LZO compression support" depends on F2FS_FS_COMPRESSION - select LZO_COMPRESS - select LZO_DECOMPRESS default y help Support LZO compress algorithm, if unsure, say Y. @@ -103,8 +108,6 @@ config F2FS_FS_LZO config F2FS_FS_LZ4 bool "LZ4 compression support" depends on F2FS_FS_COMPRESSION - select LZ4_COMPRESS - select LZ4_DECOMPRESS default y help Support LZ4 compress algorithm, if unsure, say Y. @@ -113,7 +116,6 @@ config F2FS_FS_LZ4HC bool "LZ4HC compression support" depends on F2FS_FS_COMPRESSION depends on F2FS_FS_LZ4 - select LZ4HC_COMPRESS default y help Support LZ4HC compress algorithm, LZ4HC has compatible on-disk @@ -122,8 +124,6 @@ config F2FS_FS_LZ4HC config F2FS_FS_ZSTD bool "ZSTD compression support" depends on F2FS_FS_COMPRESSION - select ZSTD_COMPRESS - select ZSTD_DECOMPRESS default y help Support ZSTD compress algorithm, if unsure, say Y. @@ -132,8 +132,6 @@ config F2FS_FS_LZORLE bool "LZO-RLE compression support" depends on F2FS_FS_COMPRESSION depends on F2FS_FS_LZO - select LZO_COMPRESS - select LZO_DECOMPRESS default y help Support LZO-RLE compress algorithm, if unsure, say Y. diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c index 965037a9c205..239ad9453b99 100644 --- a/fs/f2fs/acl.c +++ b/fs/f2fs/acl.c @@ -29,6 +29,7 @@ static inline size_t f2fs_acl_size(int count) static inline int f2fs_acl_count(size_t size) { ssize_t s; + size -= sizeof(struct f2fs_acl_header); s = size - 4 * sizeof(struct f2fs_acl_entry_short); if (s < 0) { diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index be5415a0dbbc..f795049e63d5 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -719,6 +719,7 @@ int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi) orphan_blk = (struct f2fs_orphan_block *)page_address(page); for (j = 0; j < le32_to_cpu(orphan_blk->entry_count); j++) { nid_t ino = le32_to_cpu(orphan_blk->ino[j]); + err = recover_orphan_inode(sbi, ino); if (err) { f2fs_put_page(page, 1); @@ -1456,7 +1457,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) orphan_blocks); if (__remain_node_summaries(cpc->reason)) - ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS+ + ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS + cp_payload_blks + data_sum_blocks + orphan_blocks + NR_CURSEG_NODE_TYPE); else @@ -1818,7 +1819,11 @@ int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi) llist_add(&req.llnode, &cprc->issue_list); atomic_inc(&cprc->queued_ckpt); - /* update issue_list before we wake up issue_checkpoint thread */ + /* + * update issue_list before we wake up issue_checkpoint thread, + * this smp_mb() pairs with another barrier in ___wait_event(), + * see more details in comments of waitqueue_active(). + */ smp_mb(); if (waitqueue_active(&cprc->ckpt_wait_queue)) diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c index 77fa342de38f..53b13787eb2c 100644 --- a/fs/f2fs/compress.c +++ b/fs/f2fs/compress.c @@ -76,12 +76,6 @@ bool f2fs_is_compressed_page(struct page *page) return false; if (IS_ATOMIC_WRITTEN_PAGE(page) || IS_DUMMY_WRITTEN_PAGE(page)) return false; - /* - * page->private may be set with pid. - * pid_max is enough to check if it is traced. - */ - if (IS_IO_TRACED_PAGE(page)) - return false; f2fs_bug_on(F2FS_M_SB(page->mapping), *((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC); @@ -896,7 +890,6 @@ bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index) static bool __cluster_may_compress(struct compress_ctx *cc) { - struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode); loff_t i_size = i_size_read(cc->inode); unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE); int i; @@ -904,12 +897,7 @@ static bool __cluster_may_compress(struct compress_ctx *cc) for (i = 0; i < cc->cluster_size; i++) { struct page *page = cc->rpages[i]; - f2fs_bug_on(sbi, !page); - - if (unlikely(f2fs_cp_error(sbi))) - return false; - if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) - return false; + f2fs_bug_on(F2FS_I_SB(cc->inode), !page); /* beyond EOF */ if (page->index >= nr_pages) @@ -1353,6 +1341,7 @@ unlock_continue: if (fio.compr_blocks) f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false); f2fs_i_compr_blocks_update(inode, cc->nr_cpages, true); + add_compr_block_stat(inode, cc->nr_cpages); set_inode_flag(cc->inode, FI_APPEND_WRITE); if (cc->cluster_idx == 0) diff --git a/fs/f2fs/compress.h b/fs/f2fs/compress.h deleted file mode 100644 index e69de29bb2d1..000000000000 --- a/fs/f2fs/compress.h +++ /dev/null diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 4e5257c763d0..96f1a354f89f 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -1086,6 +1086,7 @@ int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count) for (; count > 0; dn->ofs_in_node++) { block_t blkaddr = f2fs_data_blkaddr(dn); + if (blkaddr == NULL_ADDR) { dn->data_blkaddr = NEW_ADDR; __set_data_blkaddr(dn); @@ -1722,7 +1723,7 @@ static int get_data_block_dio_write(struct inode *inode, sector_t iblock, return __get_data_block(inode, iblock, bh_result, create, F2FS_GET_BLOCK_DIO, NULL, f2fs_rw_hint_to_seg_type(inode->i_write_hint), - IS_SWAPFILE(inode) ? false : true); + true); } static int get_data_block_dio(struct inode *inode, sector_t iblock, @@ -1837,6 +1838,7 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, int ret = 0; bool compr_cluster = false; unsigned int cluster_size = F2FS_I(inode)->i_cluster_size; + loff_t maxbytes; if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) { ret = f2fs_precache_extents(inode); @@ -1850,6 +1852,15 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, inode_lock(inode); + maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS; + if (start > maxbytes) { + ret = -EFBIG; + goto out; + } + + if (len > maxbytes || (maxbytes - len) < start) + len = maxbytes - start; + if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) { ret = f2fs_xattr_fiemap(inode, fieinfo); goto out; @@ -3755,6 +3766,7 @@ int f2fs_migrate_page(struct address_space *mapping, if (atomic_written) { struct inmem_pages *cur; + list_for_each_entry(cur, &fi->inmem_pages, list) if (cur->page == page) { cur->page = newpage; @@ -3780,11 +3792,64 @@ int f2fs_migrate_page(struct address_space *mapping, #endif #ifdef CONFIG_SWAP +static int f2fs_is_file_aligned(struct inode *inode) +{ + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); + block_t main_blkaddr = SM_I(sbi)->main_blkaddr; + block_t cur_lblock; + block_t last_lblock; + block_t pblock; + unsigned long nr_pblocks; + unsigned int blocks_per_sec = BLKS_PER_SEC(sbi); + int ret = 0; + + cur_lblock = 0; + last_lblock = bytes_to_blks(inode, i_size_read(inode)); + + while (cur_lblock < last_lblock) { + struct f2fs_map_blocks map; + + memset(&map, 0, sizeof(map)); + map.m_lblk = cur_lblock; + map.m_len = last_lblock - cur_lblock; + map.m_next_pgofs = NULL; + map.m_next_extent = NULL; + map.m_seg_type = NO_CHECK_TYPE; + map.m_may_create = false; + + ret = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_FIEMAP); + if (ret) + goto out; + + /* hole */ + if (!(map.m_flags & F2FS_MAP_FLAGS)) { + f2fs_err(sbi, "Swapfile has holes\n"); + ret = -ENOENT; + goto out; + } + + pblock = map.m_pblk; + nr_pblocks = map.m_len; + + if ((pblock - main_blkaddr) & (blocks_per_sec - 1) || + nr_pblocks & (blocks_per_sec - 1)) { + f2fs_err(sbi, "Swapfile does not align to section"); + ret = -EINVAL; + goto out; + } + + cur_lblock += nr_pblocks; + } +out: + return ret; +} + static int check_swap_activate_fast(struct swap_info_struct *sis, struct file *swap_file, sector_t *span) { struct address_space *mapping = swap_file->f_mapping; struct inode *inode = mapping->host; + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); sector_t cur_lblock; sector_t last_lblock; sector_t pblock; @@ -3792,8 +3857,8 @@ static int check_swap_activate_fast(struct swap_info_struct *sis, sector_t highest_pblock = 0; int nr_extents = 0; unsigned long nr_pblocks; - u64 len; - int ret; + unsigned int blocks_per_sec = BLKS_PER_SEC(sbi); + int ret = 0; /* * Map all the blocks into the extent list. This code doesn't try @@ -3801,31 +3866,41 @@ static int check_swap_activate_fast(struct swap_info_struct *sis, */ cur_lblock = 0; last_lblock = bytes_to_blks(inode, i_size_read(inode)); - len = i_size_read(inode); - while (cur_lblock <= last_lblock && cur_lblock < sis->max) { + while (cur_lblock < last_lblock && cur_lblock < sis->max) { struct f2fs_map_blocks map; - pgoff_t next_pgofs; cond_resched(); memset(&map, 0, sizeof(map)); map.m_lblk = cur_lblock; - map.m_len = bytes_to_blks(inode, len) - cur_lblock; - map.m_next_pgofs = &next_pgofs; + map.m_len = last_lblock - cur_lblock; + map.m_next_pgofs = NULL; + map.m_next_extent = NULL; map.m_seg_type = NO_CHECK_TYPE; + map.m_may_create = false; ret = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_FIEMAP); if (ret) - goto err_out; + goto out; /* hole */ - if (!(map.m_flags & F2FS_MAP_FLAGS)) - goto err_out; + if (!(map.m_flags & F2FS_MAP_FLAGS)) { + f2fs_err(sbi, "Swapfile has holes\n"); + ret = -ENOENT; + goto out; + } pblock = map.m_pblk; nr_pblocks = map.m_len; + if ((pblock - SM_I(sbi)->main_blkaddr) & (blocks_per_sec - 1) || + nr_pblocks & (blocks_per_sec - 1)) { + f2fs_err(sbi, "Swapfile does not align to section"); + ret = -EINVAL; + goto out; + } + if (cur_lblock + nr_pblocks >= sis->max) nr_pblocks = sis->max - cur_lblock; @@ -3854,9 +3929,6 @@ static int check_swap_activate_fast(struct swap_info_struct *sis, sis->highest_bit = cur_lblock - 1; out: return ret; -err_out: - pr_err("swapon: swapfile has holes\n"); - return -EINVAL; } /* Copied from generic_swapfile_activate() to check any holes */ @@ -3865,6 +3937,7 @@ static int check_swap_activate(struct swap_info_struct *sis, { struct address_space *mapping = swap_file->f_mapping; struct inode *inode = mapping->host; + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); unsigned blocks_per_page; unsigned long page_no; sector_t probe_block; @@ -3872,11 +3945,15 @@ static int check_swap_activate(struct swap_info_struct *sis, sector_t lowest_block = -1; sector_t highest_block = 0; int nr_extents = 0; - int ret; + int ret = 0; if (PAGE_SIZE == F2FS_BLKSIZE) return check_swap_activate_fast(sis, swap_file, span); + ret = f2fs_is_file_aligned(inode); + if (ret) + goto out; + blocks_per_page = bytes_to_blks(inode, PAGE_SIZE); /* @@ -3891,13 +3968,14 @@ static int check_swap_activate(struct swap_info_struct *sis, unsigned block_in_page; sector_t first_block; sector_t block = 0; - int err = 0; cond_resched(); block = probe_block; - err = bmap(inode, &block); - if (err || !block) + ret = bmap(inode, &block); + if (ret) + goto out; + if (!block) goto bad_bmap; first_block = block; @@ -3913,9 +3991,10 @@ static int check_swap_activate(struct swap_info_struct *sis, block_in_page++) { block = probe_block + block_in_page; - err = bmap(inode, &block); - - if (err || !block) + ret = bmap(inode, &block); + if (ret) + goto out; + if (!block) goto bad_bmap; if (block != first_block + block_in_page) { @@ -3955,8 +4034,8 @@ reprobe: out: return ret; bad_bmap: - pr_err("swapon: swapfile has holes\n"); - return -EINVAL; + f2fs_err(sbi, "Swapfile has holes\n"); + return -ENOENT; } static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file, diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c index 91855d5721cd..c03949a7ccff 100644 --- a/fs/f2fs/debug.c +++ b/fs/f2fs/debug.c @@ -173,6 +173,7 @@ static void update_general_status(struct f2fs_sb_info *sbi) si->util_invalid = 50 - si->util_free - si->util_valid; for (i = CURSEG_HOT_DATA; i < NO_CHECK_TYPE; i++) { struct curseg_info *curseg = CURSEG_I(sbi, i); + si->curseg[i] = curseg->segno; si->cursec[i] = GET_SEC_FROM_SEG(sbi, curseg->segno); si->curzone[i] = GET_ZONE_FROM_SEC(sbi, si->cursec[i]); @@ -300,10 +301,12 @@ get_cache: si->page_mem = 0; if (sbi->node_inode) { unsigned npages = NODE_MAPPING(sbi)->nrpages; + si->page_mem += (unsigned long long)npages << PAGE_SHIFT; } if (sbi->meta_inode) { unsigned npages = META_MAPPING(sbi)->nrpages; + si->page_mem += (unsigned long long)npages << PAGE_SHIFT; } } diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index e211a1b6b013..dc7ce79672b8 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c @@ -471,6 +471,7 @@ void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de, struct page *page, struct inode *inode) { enum page_type type = f2fs_has_inline_dentry(dir) ? NODE : DATA; + lock_page(page); f2fs_wait_on_page_writeback(page, type, true, true); de->ino = cpu_to_le32(inode->i_ino); diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 11a20dc505aa..044878866ca3 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -97,6 +97,7 @@ extern const char *f2fs_fault_name[FAULT_MAX]; #define F2FS_MOUNT_NORECOVERY 0x04000000 #define F2FS_MOUNT_ATGC 0x08000000 #define F2FS_MOUNT_MERGE_CHECKPOINT 0x10000000 +#define F2FS_MOUNT_GC_MERGE 0x20000000 #define F2FS_OPTION(sbi) ((sbi)->mount_opt) #define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option) @@ -637,21 +638,26 @@ enum { #define FADVISE_MODIFIABLE_BITS (FADVISE_COLD_BIT | FADVISE_HOT_BIT) #define file_is_cold(inode) is_file(inode, FADVISE_COLD_BIT) -#define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT) #define file_set_cold(inode) set_file(inode, FADVISE_COLD_BIT) -#define file_lost_pino(inode) set_file(inode, FADVISE_LOST_PINO_BIT) #define file_clear_cold(inode) clear_file(inode, FADVISE_COLD_BIT) + +#define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT) +#define file_lost_pino(inode) set_file(inode, FADVISE_LOST_PINO_BIT) #define file_got_pino(inode) clear_file(inode, FADVISE_LOST_PINO_BIT) + #define file_is_encrypt(inode) is_file(inode, FADVISE_ENCRYPT_BIT) #define file_set_encrypt(inode) set_file(inode, FADVISE_ENCRYPT_BIT) -#define file_clear_encrypt(inode) clear_file(inode, FADVISE_ENCRYPT_BIT) + #define file_enc_name(inode) is_file(inode, FADVISE_ENC_NAME_BIT) #define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT) + #define file_keep_isize(inode) is_file(inode, FADVISE_KEEP_SIZE_BIT) #define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT) + #define file_is_hot(inode) is_file(inode, FADVISE_HOT_BIT) #define file_set_hot(inode) set_file(inode, FADVISE_HOT_BIT) #define file_clear_hot(inode) clear_file(inode, FADVISE_HOT_BIT) + #define file_is_verity(inode) is_file(inode, FADVISE_VERITY_BIT) #define file_set_verity(inode) set_file(inode, FADVISE_VERITY_BIT) @@ -860,7 +866,7 @@ struct f2fs_nm_info { /* NAT cache management */ struct radix_tree_root nat_root;/* root of the nat entry cache */ struct radix_tree_root nat_set_root;/* root of the nat set cache */ - struct rw_semaphore nat_tree_lock; /* protect nat_tree_lock */ + struct rw_semaphore nat_tree_lock; /* protect nat entry tree */ struct list_head nat_entries; /* cached nat entry list (clean) */ spinlock_t nat_list_lock; /* protect clean nat entry list */ unsigned int nat_cnt[MAX_NAT_STATE]; /* the # of cached nat entries */ @@ -1297,14 +1303,6 @@ enum { #define IS_DUMMY_WRITTEN_PAGE(page) \ (page_private(page) == DUMMY_WRITTEN_PAGE) -#ifdef CONFIG_F2FS_IO_TRACE -#define IS_IO_TRACED_PAGE(page) \ - (page_private(page) > 0 && \ - page_private(page) < (unsigned long)PID_MAX_LIMIT) -#else -#define IS_IO_TRACED_PAGE(page) (0) -#endif - /* For compression */ enum compress_algorithm_type { COMPRESS_LZO, @@ -1623,6 +1621,11 @@ struct f2fs_sb_info { #ifdef CONFIG_F2FS_FS_COMPRESSION struct kmem_cache *page_array_slab; /* page array entry */ unsigned int page_array_slab_size; /* default page array slab size */ + + /* For runtime compression statistics */ + u64 compr_written_block; + u64 compr_saved_block; + u32 compr_new_inode; #endif }; @@ -2215,6 +2218,7 @@ static inline block_t __cp_payload(struct f2fs_sb_info *sbi) static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag) { struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); + void *tmp_ptr = &ckpt->sit_nat_version_bitmap; int offset; if (is_set_ckpt_flags(sbi, CP_LARGE_NAT_BITMAP_FLAG)) { @@ -2224,7 +2228,7 @@ static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag) * if large_nat_bitmap feature is enabled, leave checksum * protection for all nat/sit bitmaps. */ - return &ckpt->sit_nat_version_bitmap + offset + sizeof(__le32); + return tmp_ptr + offset + sizeof(__le32); } if (__cp_payload(sbi) > 0) { @@ -2235,7 +2239,7 @@ static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag) } else { offset = (flag == NAT_BITMAP) ? le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0; - return &ckpt->sit_nat_version_bitmap + offset; + return tmp_ptr + offset; } } @@ -3302,7 +3306,6 @@ void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname); /* * node.c */ -struct dnode_of_data; struct node_info; int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid); @@ -3379,6 +3382,7 @@ block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi); int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable); void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi); int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra); +bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno); void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi); void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi); void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi); @@ -3386,7 +3390,7 @@ void f2fs_get_new_segment(struct f2fs_sb_info *sbi, unsigned int *newseg, bool new_sec, int dir); void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type, unsigned int start, unsigned int end); -void f2fs_allocate_new_segment(struct f2fs_sb_info *sbi, int type); +void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force); void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi); int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range); bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi, @@ -3550,7 +3554,7 @@ void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi); int f2fs_start_gc_thread(struct f2fs_sb_info *sbi); void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi); block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode); -int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background, +int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background, bool force, unsigned int segno); void f2fs_build_gc_manager(struct f2fs_sb_info *sbi); int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count); @@ -3958,6 +3962,18 @@ int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi); void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi); int __init f2fs_init_compress_cache(void); void f2fs_destroy_compress_cache(void); +#define inc_compr_inode_stat(inode) \ + do { \ + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); \ + sbi->compr_new_inode++; \ + } while (0) +#define add_compr_block_stat(inode, blocks) \ + do { \ + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); \ + int diff = F2FS_I(inode)->i_cluster_size - blocks; \ + sbi->compr_written_block += blocks; \ + sbi->compr_saved_block += diff; \ + } while (0) #else static inline bool f2fs_is_compressed_page(struct page *page) { return false; } static inline bool f2fs_is_compress_backend_ready(struct inode *inode) @@ -3986,6 +4002,7 @@ static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { } static inline int __init f2fs_init_compress_cache(void) { return 0; } static inline void f2fs_destroy_compress_cache(void) { } +#define inc_compr_inode_stat(inode) do { } while (0) #endif static inline void set_compress_context(struct inode *inode) @@ -4009,6 +4026,7 @@ static inline void set_compress_context(struct inode *inode) F2FS_I(inode)->i_flags |= F2FS_COMPR_FL; set_inode_flag(inode, FI_COMPRESSED_FILE); stat_inc_compr_inode(inode); + inc_compr_inode_stat(inode); f2fs_mark_inode_dirty_sync(inode, true); } @@ -4179,8 +4197,7 @@ static inline bool f2fs_force_buffered_io(struct inode *inode, if (F2FS_IO_ALIGNED(sbi)) return true; } - if (is_sbi_flag_set(F2FS_I_SB(inode), SBI_CP_DISABLED) && - !IS_SWAPFILE(inode)) + if (is_sbi_flag_set(F2FS_I_SB(inode), SBI_CP_DISABLED)) return true; return false; diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 8a56acbcee4c..44a4650aea7b 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -1622,9 +1622,10 @@ static int expand_inode_data(struct inode *inode, loff_t offset, struct f2fs_map_blocks map = { .m_next_pgofs = NULL, .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE, .m_may_create = true }; - pgoff_t pg_end; + pgoff_t pg_start, pg_end; loff_t new_size = i_size_read(inode); loff_t off_end; + block_t expanded = 0; int err; err = inode_newsize_ok(inode, (len + offset)); @@ -1637,11 +1638,12 @@ static int expand_inode_data(struct inode *inode, loff_t offset, f2fs_balance_fs(sbi, true); + pg_start = ((unsigned long long)offset) >> PAGE_SHIFT; pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT; off_end = (offset + len) & (PAGE_SIZE - 1); - map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT; - map.m_len = pg_end - map.m_lblk; + map.m_lblk = pg_start; + map.m_len = pg_end - pg_start; if (off_end) map.m_len++; @@ -1649,19 +1651,15 @@ static int expand_inode_data(struct inode *inode, loff_t offset, return 0; if (f2fs_is_pinned_file(inode)) { - block_t len = (map.m_len >> sbi->log_blocks_per_seg) << - sbi->log_blocks_per_seg; - block_t done = 0; + block_t sec_blks = BLKS_PER_SEC(sbi); + block_t sec_len = roundup(map.m_len, sec_blks); - if (map.m_len % sbi->blocks_per_seg) - len += sbi->blocks_per_seg; - - map.m_len = sbi->blocks_per_seg; + map.m_len = sec_blks; next_alloc: if (has_not_enough_free_secs(sbi, 0, GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) { down_write(&sbi->gc_lock); - err = f2fs_gc(sbi, true, false, NULL_SEGNO); + err = f2fs_gc(sbi, true, false, false, NULL_SEGNO); if (err && err != -ENODATA && err != -EAGAIN) goto out_err; } @@ -1669,7 +1667,7 @@ next_alloc: down_write(&sbi->pin_sem); f2fs_lock_op(sbi); - f2fs_allocate_new_segment(sbi, CURSEG_COLD_DATA_PINNED); + f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false); f2fs_unlock_op(sbi); map.m_seg_type = CURSEG_COLD_DATA_PINNED; @@ -1677,24 +1675,25 @@ next_alloc: up_write(&sbi->pin_sem); - done += map.m_len; - len -= map.m_len; + expanded += map.m_len; + sec_len -= map.m_len; map.m_lblk += map.m_len; - if (!err && len) + if (!err && sec_len) goto next_alloc; - map.m_len = done; + map.m_len = expanded; } else { err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO); + expanded = map.m_len; } out_err: if (err) { pgoff_t last_off; - if (!map.m_len) + if (!expanded) return err; - last_off = map.m_lblk + map.m_len - 1; + last_off = pg_start + expanded - 1; /* update new size to the failed position */ new_size = (last_off == pg_end) ? offset + len : @@ -2434,7 +2433,7 @@ static int f2fs_ioc_gc(struct file *filp, unsigned long arg) down_write(&sbi->gc_lock); } - ret = f2fs_gc(sbi, sync, true, NULL_SEGNO); + ret = f2fs_gc(sbi, sync, true, false, NULL_SEGNO); out: mnt_drop_write_file(filp); return ret; @@ -2470,7 +2469,8 @@ do_more: down_write(&sbi->gc_lock); } - ret = f2fs_gc(sbi, range->sync, true, GET_SEGNO(sbi, range->start)); + ret = f2fs_gc(sbi, range->sync, true, false, + GET_SEGNO(sbi, range->start)); if (ret) { if (ret == -EBUSY) ret = -EAGAIN; @@ -2527,7 +2527,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi, { struct inode *inode = file_inode(filp); struct f2fs_map_blocks map = { .m_next_extent = NULL, - .m_seg_type = NO_CHECK_TYPE , + .m_seg_type = NO_CHECK_TYPE, .m_may_create = false }; struct extent_info ei = {0, 0, 0}; pgoff_t pg_start, pg_end, next_pgofs; @@ -2923,7 +2923,7 @@ static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg) sm->last_victim[GC_CB] = end_segno + 1; sm->last_victim[GC_GREEDY] = end_segno + 1; sm->last_victim[ALLOC_NEXT] = end_segno + 1; - ret = f2fs_gc(sbi, true, true, start_segno); + ret = f2fs_gc(sbi, true, true, true, start_segno); if (ret == -EAGAIN) ret = 0; else if (ret < 0) @@ -4311,8 +4311,13 @@ write: clear_inode_flag(inode, FI_NO_PREALLOC); /* if we couldn't write data, we should deallocate blocks. */ - if (preallocated && i_size_read(inode) < target_size) + if (preallocated && i_size_read(inode) < target_size) { + down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + down_write(&F2FS_I(inode)->i_mmap_sem); f2fs_truncate(inode); + up_write(&F2FS_I(inode)->i_mmap_sem); + up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + } if (ret > 0) f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret); diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 39330ad3c44e..8d1f17ab94d8 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -31,19 +31,24 @@ static int gc_thread_func(void *data) struct f2fs_sb_info *sbi = data; struct f2fs_gc_kthread *gc_th = sbi->gc_thread; wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head; + wait_queue_head_t *fggc_wq = &sbi->gc_thread->fggc_wq; unsigned int wait_ms; wait_ms = gc_th->min_sleep_time; set_freezable(); do { - bool sync_mode; + bool sync_mode, foreground = false; wait_event_interruptible_timeout(*wq, kthread_should_stop() || freezing(current) || + waitqueue_active(fggc_wq) || gc_th->gc_wake, msecs_to_jiffies(wait_ms)); + if (test_opt(sbi, GC_MERGE) && waitqueue_active(fggc_wq)) + foreground = true; + /* give it a try one time */ if (gc_th->gc_wake) gc_th->gc_wake = 0; @@ -90,7 +95,10 @@ static int gc_thread_func(void *data) goto do_gc; } - if (!down_write_trylock(&sbi->gc_lock)) { + if (foreground) { + down_write(&sbi->gc_lock); + goto do_gc; + } else if (!down_write_trylock(&sbi->gc_lock)) { stat_other_skip_bggc_count(sbi); goto next; } @@ -107,14 +115,22 @@ static int gc_thread_func(void *data) else increase_sleep_time(gc_th, &wait_ms); do_gc: - stat_inc_bggc_count(sbi->stat_info); + if (!foreground) + stat_inc_bggc_count(sbi->stat_info); sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC; + /* foreground GC was been triggered via f2fs_balance_fs() */ + if (foreground) + sync_mode = false; + /* if return value is not zero, no victim was selected */ - if (f2fs_gc(sbi, sync_mode, true, NULL_SEGNO)) + if (f2fs_gc(sbi, sync_mode, !foreground, false, NULL_SEGNO)) wait_ms = gc_th->no_gc_sleep_time; + if (foreground) + wake_up_all(&gc_th->fggc_wq); + trace_f2fs_background_gc(sbi->sb, wait_ms, prefree_segments(sbi), free_segments(sbi)); @@ -144,10 +160,11 @@ int f2fs_start_gc_thread(struct f2fs_sb_info *sbi) gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME; gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME; - gc_th->gc_wake= 0; + gc_th->gc_wake = 0; sbi->gc_thread = gc_th; init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head); + init_waitqueue_head(&sbi->gc_thread->fggc_wq); sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi, "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev)); if (IS_ERR(gc_th->f2fs_gc_task)) { @@ -162,9 +179,11 @@ out: void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi) { struct f2fs_gc_kthread *gc_th = sbi->gc_thread; + if (!gc_th) return; kthread_stop(gc_th->f2fs_gc_task); + wake_up_all(&gc_th->fggc_wq); kfree(gc_th); sbi->gc_thread = NULL; } @@ -392,10 +411,6 @@ static void add_victim_entry(struct f2fs_sb_info *sbi, if (p->gc_mode == GC_AT && get_valid_blocks(sbi, segno, true) == 0) return; - - if (p->alloc_mode == AT_SSR && - get_seg_entry(sbi, segno)->ckpt_valid_blocks == 0) - return; } for (i = 0; i < sbi->segs_per_sec; i++) @@ -728,11 +743,27 @@ retry: if (sec_usage_check(sbi, secno)) goto next; + /* Don't touch checkpointed data */ - if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) && - get_ckpt_valid_blocks(sbi, segno) && - p.alloc_mode == LFS)) - goto next; + if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { + if (p.alloc_mode == LFS) { + /* + * LFS is set to find source section during GC. + * The victim should have no checkpointed data. + */ + if (get_ckpt_valid_blocks(sbi, segno, true)) + goto next; + } else { + /* + * SSR | AT_SSR are set to find target segment + * for writes which can be full by checkpointed + * and newly written blocks. + */ + if (!f2fs_segment_has_free_slot(sbi, segno)) + goto next; + } + } + if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap)) goto next; @@ -828,6 +859,7 @@ static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode) static void put_gc_inode(struct gc_inode_list *gc_list) { struct inode_entry *ie, *next_ie; + list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) { radix_tree_delete(&gc_list->iroot, ie->inode->i_ino); iput(ie->inode); @@ -952,9 +984,11 @@ block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode) bidx = node_ofs - 1; } else if (node_ofs <= indirect_blks) { int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1); + bidx = node_ofs - 2 - dec; } else { int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1); + bidx = node_ofs - 5 - dec; } return bidx * ADDRS_PER_BLOCK(inode) + ADDRS_PER_INODE(inode); @@ -1120,7 +1154,8 @@ static int move_data_block(struct inode *inode, block_t bidx, block_t newaddr; int err = 0; bool lfs_mode = f2fs_lfs_mode(fio.sbi); - int type = fio.sbi->am.atgc_enabled ? + int type = fio.sbi->am.atgc_enabled && (gc_type == BG_GC) && + (fio.sbi->gc_mode != GC_URGENT_HIGH) ? CURSEG_ALL_DATA_ATGC : CURSEG_COLD_DATA; /* do not read out */ @@ -1354,7 +1389,8 @@ out: * the victim data block is ignored. */ static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, - struct gc_inode_list *gc_list, unsigned int segno, int gc_type) + struct gc_inode_list *gc_list, unsigned int segno, int gc_type, + bool force_migrate) { struct super_block *sb = sbi->sb; struct f2fs_summary *entry; @@ -1383,8 +1419,8 @@ next_step: * race condition along with SSR block allocation. */ if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) || - get_valid_blocks(sbi, segno, true) == - BLKS_PER_SEC(sbi)) + (!force_migrate && get_valid_blocks(sbi, segno, true) == + BLKS_PER_SEC(sbi))) return submitted; if (check_valid_map(sbi, segno, off) == 0) @@ -1519,7 +1555,8 @@ static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim, static int do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int start_segno, - struct gc_inode_list *gc_list, int gc_type) + struct gc_inode_list *gc_list, int gc_type, + bool force_migrate) { struct page *sum_page; struct f2fs_summary_block *sum; @@ -1606,7 +1643,8 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi, gc_type); else submitted += gc_data_segment(sbi, sum->entries, gc_list, - segno, gc_type); + segno, gc_type, + force_migrate); stat_inc_seg_count(sbi, type, gc_type); migrated++; @@ -1634,7 +1672,7 @@ skip: } int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, - bool background, unsigned int segno) + bool background, bool force, unsigned int segno) { int gc_type = sync ? FG_GC : BG_GC; int sec_freed = 0, seg_freed = 0, total_freed = 0; @@ -1696,7 +1734,7 @@ gc_more: if (ret) goto stop; - seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type); + seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type, force); if (gc_type == FG_GC && seg_freed == f2fs_usable_segs_in_sec(sbi, segno)) sec_freed++; @@ -1835,7 +1873,7 @@ static int free_segment_range(struct f2fs_sb_info *sbi, .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS), }; - do_garbage_collect(sbi, segno, &gc_list, FG_GC); + do_garbage_collect(sbi, segno, &gc_list, FG_GC, true); put_gc_inode(&gc_list); if (!gc_only && get_valid_blocks(sbi, segno, true)) { @@ -1974,7 +2012,20 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count) /* stop CP to protect MAIN_SEC in free_segment_range */ f2fs_lock_op(sbi); + + spin_lock(&sbi->stat_lock); + if (shrunk_blocks + valid_user_blocks(sbi) + + sbi->current_reserved_blocks + sbi->unusable_block_count + + F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count) + err = -ENOSPC; + spin_unlock(&sbi->stat_lock); + + if (err) + goto out_unlock; + err = free_segment_range(sbi, secs, true); + +out_unlock: f2fs_unlock_op(sbi); up_write(&sbi->gc_lock); if (err) diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h index 0c8dae12dc51..3fe145e8e594 100644 --- a/fs/f2fs/gc.h +++ b/fs/f2fs/gc.h @@ -42,6 +42,12 @@ struct f2fs_gc_kthread { /* for changing gc mode */ unsigned int gc_wake; + + /* for GC_MERGE mount option */ + wait_queue_head_t fggc_wq; /* + * caller of f2fs_balance_fs() + * will wait on this wait queue. + */ }; struct gc_inode_list { diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c index 993caefcd2bb..92652ca7a7c8 100644 --- a/fs/f2fs/inline.c +++ b/fs/f2fs/inline.c @@ -219,7 +219,8 @@ out: f2fs_put_page(page, 1); - f2fs_balance_fs(sbi, dn.node_changed); + if (!err) + f2fs_balance_fs(sbi, dn.node_changed); return err; } diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c index 349d9cb933ee..b401f08569f7 100644 --- a/fs/f2fs/inode.c +++ b/fs/f2fs/inode.c @@ -666,6 +666,7 @@ retry: node_page = f2fs_get_node_page(sbi, inode->i_ino); if (IS_ERR(node_page)) { int err = PTR_ERR(node_page); + if (err == -ENOMEM) { cond_resched(); goto retry; @@ -698,7 +699,7 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc) /* * We need to balance fs here to prevent from producing dirty node pages - * during the urgent cleaning time when runing out of free sections. + * during the urgent cleaning time when running out of free sections. */ f2fs_update_inode_page(inode); if (wbc && wbc->nr_to_write) diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index 377c6b161b23..a9cd9cf97229 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -418,6 +418,7 @@ struct dentry *f2fs_get_parent(struct dentry *child) { struct page *page; unsigned long ino = f2fs_inode_by_name(d_inode(child), &dotdot_name, &page); + if (!ino) { if (IS_ERR(page)) return ERR_CAST(page); @@ -627,6 +628,7 @@ static const char *f2fs_get_link(struct dentry *dentry, struct delayed_call *done) { const char *link = page_get_link(dentry, inode, done); + if (!IS_ERR(link) && !*link) { /* this is broken symlink case */ do_delayed_call(done); @@ -765,6 +767,7 @@ out_fail: static int f2fs_rmdir(struct inode *dir, struct dentry *dentry) { struct inode *inode = d_inode(dentry); + if (f2fs_empty_dir(inode)) return f2fs_unlink(dir, dentry); return -ENOTEMPTY; diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 4b0e2e3c2c88..e67ce5f13b98 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -43,11 +43,15 @@ int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid) bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type) { struct f2fs_nm_info *nm_i = NM_I(sbi); + struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; struct sysinfo val; unsigned long avail_ram; unsigned long mem_size = 0; bool res = false; + if (!nm_i) + return true; + si_meminfo(&val); /* only uses low memory */ @@ -89,6 +93,10 @@ bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type) /* it allows 20% / total_ram for inmemory pages */ mem_size = get_pages(sbi, F2FS_INMEM_PAGES); res = mem_size < (val.totalram / 5); + } else if (type == DISCARD_CACHE) { + mem_size = (atomic_read(&dcc->discard_cmd_cnt) * + sizeof(struct discard_cmd)) >> PAGE_SHIFT; + res = mem_size < (avail_ram * nm_i->ram_thresh / 100); } else { if (!sbi->sb->s_bdi->wb.dirty_exceeded) return true; @@ -462,6 +470,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, /* increment version no as node is removed */ if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) { unsigned char version = nat_get_version(e); + nat_set_version(e, inc_node_version(version)); } @@ -1383,7 +1392,7 @@ repeat: goto out_err; } page_hit: - if(unlikely(nid != nid_of_node(page))) { + if (unlikely(nid != nid_of_node(page))) { f2fs_warn(sbi, "inconsistent node block, nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]", nid, nid_of_node(page), ino_of_node(page), ofs_of_node(page), cpver_of_node(page), @@ -1775,7 +1784,7 @@ continue_unlock: out: if (nwritten) f2fs_submit_merged_write_cond(sbi, NULL, NULL, ino, NODE); - return ret ? -EIO: 0; + return ret ? -EIO : 0; } static int f2fs_match_ino(struct inode *inode, unsigned long ino, void *data) @@ -2117,8 +2126,8 @@ static int __insert_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i) { struct f2fs_nm_info *nm_i = NM_I(sbi); - int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i); + if (err) return err; @@ -2785,6 +2794,9 @@ static void remove_nats_in_journal(struct f2fs_sb_info *sbi) struct f2fs_nat_entry raw_ne; nid_t nid = le32_to_cpu(nid_in_journal(journal, i)); + if (f2fs_check_nid_range(sbi, nid)) + continue; + raw_ne = nat_in_journal(journal, i); ne = __lookup_nat_cache(nm_i, nid); @@ -2980,6 +2992,7 @@ int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) while ((found = __gang_lookup_nat_set(nm_i, set_idx, SETVEC_SIZE, setvec))) { unsigned idx; + set_idx = setvec[found - 1]->set + 1; for (idx = 0; idx < found; idx++) __adjust_nat_entry_set(setvec[idx], &sets, diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h index f84541b57acb..7a45c0f10629 100644 --- a/fs/f2fs/node.h +++ b/fs/f2fs/node.h @@ -147,6 +147,7 @@ enum mem_type { INO_ENTRIES, /* indicates inode entries */ EXTENT_CACHE, /* indicates extent cache */ INMEM_PAGES, /* indicates inmemory pages */ + DISCARD_CACHE, /* indicates memory of cached discard cmds */ BASE_CHECK, /* check kernel status */ }; diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c index da75d5d52f0a..422146c6d866 100644 --- a/fs/f2fs/recovery.c +++ b/fs/f2fs/recovery.c @@ -458,6 +458,7 @@ static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi, /* Get the previous summary */ for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { struct curseg_info *curseg = CURSEG_I(sbi, i); + if (curseg->segno == segno) { sum = curseg->sum_blk->entries[blkoff]; goto got_it; @@ -875,5 +876,5 @@ out: #endif sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */ - return ret ? ret: err; + return ret ? ret : err; } diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index c2866561263e..c605415840b5 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -186,7 +186,10 @@ void f2fs_register_inmem_page(struct inode *inode, struct page *page) { struct inmem_pages *new; - f2fs_set_page_private(page, ATOMIC_WRITTEN_PAGE); + if (PagePrivate(page)) + set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE); + else + f2fs_set_page_private(page, ATOMIC_WRITTEN_PAGE); new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS); @@ -324,23 +327,27 @@ void f2fs_drop_inmem_pages(struct inode *inode) struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_inode_info *fi = F2FS_I(inode); - while (!list_empty(&fi->inmem_pages)) { + do { mutex_lock(&fi->inmem_lock); + if (list_empty(&fi->inmem_pages)) { + fi->i_gc_failures[GC_FAILURE_ATOMIC] = 0; + + spin_lock(&sbi->inode_lock[ATOMIC_FILE]); + if (!list_empty(&fi->inmem_ilist)) + list_del_init(&fi->inmem_ilist); + if (f2fs_is_atomic_file(inode)) { + clear_inode_flag(inode, FI_ATOMIC_FILE); + sbi->atomic_files--; + } + spin_unlock(&sbi->inode_lock[ATOMIC_FILE]); + + mutex_unlock(&fi->inmem_lock); + break; + } __revoke_inmem_pages(inode, &fi->inmem_pages, true, false, true); mutex_unlock(&fi->inmem_lock); - } - - fi->i_gc_failures[GC_FAILURE_ATOMIC] = 0; - - spin_lock(&sbi->inode_lock[ATOMIC_FILE]); - if (!list_empty(&fi->inmem_ilist)) - list_del_init(&fi->inmem_ilist); - if (f2fs_is_atomic_file(inode)) { - clear_inode_flag(inode, FI_ATOMIC_FILE); - sbi->atomic_files--; - } - spin_unlock(&sbi->inode_lock[ATOMIC_FILE]); + } while (1); } void f2fs_drop_inmem_page(struct inode *inode, struct page *page) @@ -503,8 +510,19 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need) * dir/node pages without enough free segments. */ if (has_not_enough_free_secs(sbi, 0, 0)) { - down_write(&sbi->gc_lock); - f2fs_gc(sbi, false, false, NULL_SEGNO); + if (test_opt(sbi, GC_MERGE) && sbi->gc_thread && + sbi->gc_thread->f2fs_gc_task) { + DEFINE_WAIT(wait); + + prepare_to_wait(&sbi->gc_thread->fggc_wq, &wait, + TASK_UNINTERRUPTIBLE); + wake_up(&sbi->gc_thread->gc_wait_queue_head); + io_schedule(); + finish_wait(&sbi->gc_thread->fggc_wq, &wait); + } else { + down_write(&sbi->gc_lock); + f2fs_gc(sbi, false, false, false, NULL_SEGNO); + } } } @@ -653,7 +671,11 @@ int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino) llist_add(&cmd.llnode, &fcc->issue_list); - /* update issue_list before we wake up issue_flush thread */ + /* + * update issue_list before we wake up issue_flush thread, this + * smp_mb() pairs with another barrier in ___wait_event(), see + * more details in comments of waitqueue_active(). + */ smp_mb(); if (waitqueue_active(&fcc->flush_wait_queue)) @@ -861,7 +883,7 @@ static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno) mutex_lock(&dirty_i->seglist_lock); valid_blocks = get_valid_blocks(sbi, segno, false); - ckpt_valid_blocks = get_ckpt_valid_blocks(sbi, segno); + ckpt_valid_blocks = get_ckpt_valid_blocks(sbi, segno, false); if (valid_blocks == 0 && (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) || ckpt_valid_blocks == usable_blocks)) { @@ -946,7 +968,7 @@ static unsigned int get_free_segment(struct f2fs_sb_info *sbi) for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) { if (get_valid_blocks(sbi, segno, false)) continue; - if (get_ckpt_valid_blocks(sbi, segno)) + if (get_ckpt_valid_blocks(sbi, segno, false)) continue; mutex_unlock(&dirty_i->seglist_lock); return segno; @@ -1095,6 +1117,8 @@ static void __init_discard_policy(struct f2fs_sb_info *sbi, struct discard_policy *dpolicy, int discard_type, unsigned int granularity) { + struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; + /* common policy */ dpolicy->type = discard_type; dpolicy->sync = true; @@ -1114,7 +1138,9 @@ static void __init_discard_policy(struct f2fs_sb_info *sbi, dpolicy->ordered = true; if (utilization(sbi) > DEF_DISCARD_URGENT_UTIL) { dpolicy->granularity = 1; - dpolicy->max_interval = DEF_MIN_DISCARD_ISSUE_TIME; + if (atomic_read(&dcc->discard_cmd_cnt)) + dpolicy->max_interval = + DEF_MIN_DISCARD_ISSUE_TIME; } } else if (discard_type == DPOLICY_FORCE) { dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME; @@ -1730,8 +1756,15 @@ static int issue_discard_thread(void *data) set_freezable(); do { - __init_discard_policy(sbi, &dpolicy, DPOLICY_BG, - dcc->discard_granularity); + if (sbi->gc_mode == GC_URGENT_HIGH || + !f2fs_available_free_memory(sbi, DISCARD_CACHE)) + __init_discard_policy(sbi, &dpolicy, DPOLICY_FORCE, 1); + else + __init_discard_policy(sbi, &dpolicy, DPOLICY_BG, + dcc->discard_granularity); + + if (!atomic_read(&dcc->discard_cmd_cnt)) + wait_ms = dpolicy.max_interval; wait_event_interruptible_timeout(*q, kthread_should_stop() || freezing(current) || @@ -1755,9 +1788,8 @@ static int issue_discard_thread(void *data) wait_ms = dpolicy.max_interval; continue; } - - if (sbi->gc_mode == GC_URGENT_HIGH) - __init_discard_policy(sbi, &dpolicy, DPOLICY_FORCE, 1); + if (!atomic_read(&dcc->discard_cmd_cnt)) + continue; sb_start_intwrite(sbi->sb); @@ -1765,7 +1797,7 @@ static int issue_discard_thread(void *data) if (issued > 0) { __wait_all_discard_cmd(sbi, &dpolicy); wait_ms = dpolicy.min_interval; - } else if (issued == -1){ + } else if (issued == -1) { wait_ms = f2fs_time_to_wait(sbi, DISCARD_TIME); if (!wait_ms) wait_ms = dpolicy.mid_interval; @@ -2142,6 +2174,7 @@ static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type, unsigned int segno, int modified) { struct seg_entry *se = get_seg_entry(sbi, segno); + se->type = type; if (modified) __mark_sit_entry_dirty(sbi, segno); @@ -2333,6 +2366,7 @@ static void __add_sum_entry(struct f2fs_sb_info *sbi, int type, { struct curseg_info *curseg = CURSEG_I(sbi, type); void *addr = curseg->sum_blk; + addr += curseg->next_blkoff * sizeof(struct f2fs_summary); memcpy(addr, sum, sizeof(struct f2fs_summary)); } @@ -2604,22 +2638,20 @@ static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec) curseg->alloc_type = LFS; } -static void __next_free_blkoff(struct f2fs_sb_info *sbi, - struct curseg_info *seg, block_t start) +static int __next_free_blkoff(struct f2fs_sb_info *sbi, + int segno, block_t start) { - struct seg_entry *se = get_seg_entry(sbi, seg->segno); + struct seg_entry *se = get_seg_entry(sbi, segno); int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long); unsigned long *target_map = SIT_I(sbi)->tmp_map; unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map; unsigned long *cur_map = (unsigned long *)se->cur_valid_map; - int i, pos; + int i; for (i = 0; i < entries; i++) target_map[i] = ckpt_map[i] | cur_map[i]; - pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start); - - seg->next_blkoff = pos; + return __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start); } /* @@ -2631,11 +2663,18 @@ static void __refresh_next_blkoff(struct f2fs_sb_info *sbi, struct curseg_info *seg) { if (seg->alloc_type == SSR) - __next_free_blkoff(sbi, seg, seg->next_blkoff + 1); + seg->next_blkoff = + __next_free_blkoff(sbi, seg->segno, + seg->next_blkoff + 1); else seg->next_blkoff++; } +bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno) +{ + return __next_free_blkoff(sbi, segno, 0) < sbi->blocks_per_seg; +} + /* * This function always allocates a used segment(from dirty seglist) by SSR * manner, so it should recover the existing segment information of valid blocks @@ -2661,7 +2700,7 @@ static void change_curseg(struct f2fs_sb_info *sbi, int type, bool flush) reset_curseg(sbi, type, 1); curseg->alloc_type = SSR; - __next_free_blkoff(sbi, curseg, 0); + curseg->next_blkoff = __next_free_blkoff(sbi, curseg->segno, 0); sum_page = f2fs_get_sum_page(sbi, new_segno); if (IS_ERR(sum_page)) { @@ -2893,7 +2932,8 @@ unlock: up_read(&SM_I(sbi)->curseg_lock); } -static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type) +static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type, + bool new_sec, bool force) { struct curseg_info *curseg = CURSEG_I(sbi, type); unsigned int old_segno; @@ -2901,32 +2941,43 @@ static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type) if (!curseg->inited) goto alloc; - if (!curseg->next_blkoff && - !get_valid_blocks(sbi, curseg->segno, false) && - !get_ckpt_valid_blocks(sbi, curseg->segno)) - return; + if (force || curseg->next_blkoff || + get_valid_blocks(sbi, curseg->segno, new_sec)) + goto alloc; + if (!get_ckpt_valid_blocks(sbi, curseg->segno, new_sec)) + return; alloc: old_segno = curseg->segno; SIT_I(sbi)->s_ops->allocate_segment(sbi, type, true); locate_dirty_segment(sbi, old_segno); } -void f2fs_allocate_new_segment(struct f2fs_sb_info *sbi, int type) +static void __allocate_new_section(struct f2fs_sb_info *sbi, + int type, bool force) +{ + __allocate_new_segment(sbi, type, true, force); +} + +void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force) { + down_read(&SM_I(sbi)->curseg_lock); down_write(&SIT_I(sbi)->sentry_lock); - __allocate_new_segment(sbi, type); + __allocate_new_section(sbi, type, force); up_write(&SIT_I(sbi)->sentry_lock); + up_read(&SM_I(sbi)->curseg_lock); } void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi) { int i; + down_read(&SM_I(sbi)->curseg_lock); down_write(&SIT_I(sbi)->sentry_lock); for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) - __allocate_new_segment(sbi, i); + __allocate_new_segment(sbi, i, false, false); up_write(&SIT_I(sbi)->sentry_lock); + up_read(&SM_I(sbi)->curseg_lock); } static const struct segment_allocation default_salloc_ops = { @@ -3239,7 +3290,9 @@ static int __get_segment_type_6(struct f2fs_io_info *fio) struct inode *inode = fio->page->mapping->host; if (is_cold_data(fio->page)) { - if (fio->sbi->am.atgc_enabled) + if (fio->sbi->am.atgc_enabled && + (fio->io_type == FS_DATA_IO) && + (fio->sbi->gc_mode != GC_URGENT_HIGH)) return CURSEG_ALL_DATA_ATGC; else return CURSEG_COLD_DATA; @@ -3365,12 +3418,12 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, f2fs_inode_chksum_set(sbi, page); } - if (F2FS_IO_ALIGNED(sbi)) - fio->retry = false; - if (fio) { struct f2fs_bio_info *io; + if (F2FS_IO_ALIGNED(sbi)) + fio->retry = false; + INIT_LIST_HEAD(&fio->list); fio->in_list = true; io = sbi->write_io[fio->type] + fio->temp; @@ -3499,7 +3552,13 @@ int f2fs_inplace_write_data(struct f2fs_io_info *fio) set_sbi_flag(sbi, SBI_NEED_FSCK); f2fs_warn(sbi, "%s: incorrect segment(%u) type, run fsck to fix.", __func__, segno); - return -EFSCORRUPTED; + err = -EFSCORRUPTED; + goto drop_bio; + } + + if (is_sbi_flag_set(sbi, SBI_NEED_FSCK) || f2fs_cp_error(sbi)) { + err = -EIO; + goto drop_bio; } stat_inc_inplace_blocks(fio->sbi); @@ -3514,6 +3573,15 @@ int f2fs_inplace_write_data(struct f2fs_io_info *fio) } return err; +drop_bio: + if (fio->bio) { + struct bio *bio = *(fio->bio); + + bio->bi_status = BLK_STS_IOERR; + bio_endio(bio); + fio->bio = NULL; + } + return err; } static inline int __f2fs_get_curseg(struct f2fs_sb_info *sbi, @@ -3539,6 +3607,7 @@ void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, struct seg_entry *se; int type; unsigned short old_blkoff; + unsigned char old_alloc_type; segno = GET_SEGNO(sbi, new_blkaddr); se = get_seg_entry(sbi, segno); @@ -3572,6 +3641,7 @@ void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, old_cursegno = curseg->segno; old_blkoff = curseg->next_blkoff; + old_alloc_type = curseg->alloc_type; /* change the current segment */ if (segno != curseg->segno) { @@ -3606,6 +3676,7 @@ void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, change_curseg(sbi, type, true); } curseg->next_blkoff = old_blkoff; + curseg->alloc_type = old_alloc_type; } up_write(&sit_i->sentry_lock); @@ -3717,6 +3788,7 @@ static int read_compacted_summaries(struct f2fs_sb_info *sbi) for (j = 0; j < blk_off; j++) { struct f2fs_summary *s; + s = (struct f2fs_summary *)(kaddr + offset); seg_i->sum_blk->entries[j] = *s; offset += SUMMARY_SIZE; @@ -3779,6 +3851,7 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type) if (__exist_node_summaries(sbi)) { struct f2fs_summary *ns = &sum->entries[0]; int i; + for (i = 0; i < sbi->blocks_per_seg; i++, ns++) { ns->version = 0; ns->ofs_in_node = 0; @@ -3880,6 +3953,7 @@ static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr) /* Step 3: write summary entries */ for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { unsigned short blkoff; + seg_i = CURSEG_I(sbi, i); if (sbi->ckpt->alloc_type[i] == SSR) blkoff = sbi->blocks_per_seg; @@ -3916,6 +3990,7 @@ static void write_normal_summaries(struct f2fs_sb_info *sbi, block_t blkaddr, int type) { int i, end; + if (IS_DATASEG(type)) end = type + NR_CURSEG_DATA_TYPE; else @@ -4499,6 +4574,7 @@ static void init_free_segmap(struct f2fs_sb_info *sbi) /* set use the current segments */ for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) { struct curseg_info *curseg_t = CURSEG_I(sbi, type); + __set_test_and_inuse(sbi, curseg_t->segno); } } @@ -4731,7 +4807,8 @@ static struct f2fs_dev_info *get_target_zoned_dev(struct f2fs_sb_info *sbi, } static int report_one_zone_cb(struct blk_zone *zone, unsigned int idx, - void *data) { + void *data) +{ memcpy(data, zone, sizeof(struct blk_zone)); return 0; } @@ -4783,7 +4860,8 @@ static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type) f2fs_notice(sbi, "Assign new section to curseg[%d]: " "curseg[0x%x,0x%x]", type, cs->segno, cs->next_blkoff); - allocate_segment_by_default(sbi, type, true); + + f2fs_allocate_new_section(sbi, type, true); /* check consistency of the zone curseg pointed to */ if (check_zone_write_pointer(sbi, zbd, &zone)) @@ -4847,8 +4925,10 @@ struct check_zone_write_pointer_args { }; static int check_zone_write_pointer_cb(struct blk_zone *zone, unsigned int idx, - void *data) { + void *data) +{ struct check_zone_write_pointer_args *args; + args = (struct check_zone_write_pointer_args *)data; return check_zone_write_pointer(args->sbi, args->fdev, zone); @@ -5127,6 +5207,7 @@ static void discard_dirty_segmap(struct f2fs_sb_info *sbi, static void destroy_victim_secmap(struct f2fs_sb_info *sbi) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); + kvfree(dirty_i->victim_secmap); } @@ -5171,6 +5252,7 @@ static void destroy_curseg(struct f2fs_sb_info *sbi) static void destroy_free_segmap(struct f2fs_sb_info *sbi) { struct free_segmap_info *free_i = SM_I(sbi)->free_info; + if (!free_i) return; SM_I(sbi)->free_info = NULL; diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h index e9a7a637d688..050230c70a53 100644 --- a/fs/f2fs/segment.h +++ b/fs/f2fs/segment.h @@ -172,12 +172,10 @@ enum { /* * BG_GC means the background cleaning job. * FG_GC means the on-demand cleaning job. - * FORCE_FG_GC means on-demand cleaning job in background. */ enum { BG_GC = 0, FG_GC, - FORCE_FG_GC, }; /* for a function parameter to select a victim segment */ @@ -361,8 +359,20 @@ static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi, } static inline unsigned int get_ckpt_valid_blocks(struct f2fs_sb_info *sbi, - unsigned int segno) + unsigned int segno, bool use_section) { + if (use_section && __is_large_section(sbi)) { + unsigned int start_segno = START_SEGNO(segno); + unsigned int blocks = 0; + int i; + + for (i = 0; i < sbi->segs_per_sec; i++, start_segno++) { + struct seg_entry *se = get_seg_entry(sbi, start_segno); + + blocks += se->ckpt_valid_blocks; + } + return blocks; + } return get_seg_entry(sbi, segno)->ckpt_valid_blocks; } diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 82592b19b4e0..7d325bfaf65a 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -151,6 +151,8 @@ enum { Opt_compress_chksum, Opt_compress_mode, Opt_atgc, + Opt_gc_merge, + Opt_nogc_merge, Opt_err, }; @@ -223,6 +225,8 @@ static match_table_t f2fs_tokens = { {Opt_compress_chksum, "compress_chksum"}, {Opt_compress_mode, "compress_mode=%s"}, {Opt_atgc, "atgc"}, + {Opt_gc_merge, "gc_merge"}, + {Opt_nogc_merge, "nogc_merge"}, {Opt_err, NULL}, }; @@ -555,6 +559,7 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount) while ((p = strsep(&options, ",")) != NULL) { int token; + if (!*p) continue; /* @@ -1073,6 +1078,12 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount) case Opt_atgc: set_opt(sbi, ATGC); break; + case Opt_gc_merge: + set_opt(sbi, GC_MERGE); + break; + case Opt_nogc_merge: + clear_opt(sbi, GC_MERGE); + break; default: f2fs_err(sbi, "Unrecognized mount option \"%s\" or missing value", p); @@ -1616,6 +1627,7 @@ static inline void f2fs_show_quota_options(struct seq_file *seq, #endif } +#ifdef CONFIG_F2FS_FS_COMPRESSION static inline void f2fs_show_compress_options(struct seq_file *seq, struct super_block *sb) { @@ -1661,6 +1673,7 @@ static inline void f2fs_show_compress_options(struct seq_file *seq, else if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_USER) seq_printf(seq, ",compress_mode=%s", "user"); } +#endif static int f2fs_show_options(struct seq_file *seq, struct dentry *root) { @@ -1673,6 +1686,9 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root) else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF) seq_printf(seq, ",background_gc=%s", "off"); + if (test_opt(sbi, GC_MERGE)) + seq_puts(seq, ",gc_merge"); + if (test_opt(sbi, DISABLE_ROLL_FORWARD)) seq_puts(seq, ",disable_roll_forward"); if (test_opt(sbi, NORECOVERY)) @@ -1824,6 +1840,7 @@ static void default_options(struct f2fs_sb_info *sbi) set_opt(sbi, EXTENT_CACHE); set_opt(sbi, NOHEAP); clear_opt(sbi, DISABLE_CHECKPOINT); + set_opt(sbi, MERGE_CHECKPOINT); F2FS_OPTION(sbi).unusable_cap = 0; sbi->sb->s_flags |= SB_LAZYTIME; set_opt(sbi, FLUSH_MERGE); @@ -1865,7 +1882,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi) while (!f2fs_time_over(sbi, DISABLE_TIME)) { down_write(&sbi->gc_lock); - err = f2fs_gc(sbi, true, false, NULL_SEGNO); + err = f2fs_gc(sbi, true, false, false, NULL_SEGNO); if (err == -ENODATA) { err = 0; break; @@ -1876,7 +1893,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi) ret = sync_filesystem(sbi->sb); if (ret || err) { - err = ret ? ret: err; + err = ret ? ret : err; goto restore_flag; } @@ -1925,8 +1942,9 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) struct f2fs_mount_info org_mount_opt; unsigned long old_sb_flags; int err; - bool need_restart_gc = false; - bool need_stop_gc = false; + bool need_restart_gc = false, need_stop_gc = false; + bool need_restart_ckpt = false, need_stop_ckpt = false; + bool need_restart_flush = false, need_stop_flush = false; bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE); bool disable_checkpoint = test_opt(sbi, DISABLE_CHECKPOINT); bool no_io_align = !F2FS_IO_ALIGNED(sbi); @@ -2035,7 +2053,8 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) * option. Also sync the filesystem. */ if ((*flags & SB_RDONLY) || - F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF) { + (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF && + !test_opt(sbi, GC_MERGE))) { if (sbi->gc_thread) { f2fs_stop_gc_thread(sbi); need_restart_gc = true; @@ -2057,18 +2076,11 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) clear_sbi_flag(sbi, SBI_IS_CLOSE); } - if (checkpoint_changed) { - if (test_opt(sbi, DISABLE_CHECKPOINT)) { - err = f2fs_disable_checkpoint(sbi); - if (err) - goto restore_gc; - } else { - f2fs_enable_checkpoint(sbi); - } - } - - if (!test_opt(sbi, DISABLE_CHECKPOINT) && - test_opt(sbi, MERGE_CHECKPOINT)) { + if ((*flags & SB_RDONLY) || test_opt(sbi, DISABLE_CHECKPOINT) || + !test_opt(sbi, MERGE_CHECKPOINT)) { + f2fs_stop_ckpt_thread(sbi); + need_restart_ckpt = true; + } else { err = f2fs_start_ckpt_thread(sbi); if (err) { f2fs_err(sbi, @@ -2076,8 +2088,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) err); goto restore_gc; } - } else { - f2fs_stop_ckpt_thread(sbi); + need_stop_ckpt = true; } /* @@ -2087,11 +2098,24 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) if ((*flags & SB_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) { clear_opt(sbi, FLUSH_MERGE); f2fs_destroy_flush_cmd_control(sbi, false); + need_restart_flush = true; } else { err = f2fs_create_flush_cmd_control(sbi); if (err) - goto restore_gc; + goto restore_ckpt; + need_stop_flush = true; } + + if (checkpoint_changed) { + if (test_opt(sbi, DISABLE_CHECKPOINT)) { + err = f2fs_disable_checkpoint(sbi); + if (err) + goto restore_flush; + } else { + f2fs_enable_checkpoint(sbi); + } + } + skip: #ifdef CONFIG_QUOTA /* Release old quota file names */ @@ -2106,6 +2130,21 @@ skip: adjust_unusable_cap_perc(sbi); *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME); return 0; +restore_flush: + if (need_restart_flush) { + if (f2fs_create_flush_cmd_control(sbi)) + f2fs_warn(sbi, "background flush thread has stopped"); + } else if (need_stop_flush) { + clear_opt(sbi, FLUSH_MERGE); + f2fs_destroy_flush_cmd_control(sbi, false); + } +restore_ckpt: + if (need_restart_ckpt) { + if (f2fs_start_ckpt_thread(sbi)) + f2fs_warn(sbi, "background ckpt thread has stopped"); + } else if (need_stop_ckpt) { + f2fs_stop_ckpt_thread(sbi); + } restore_gc: if (need_restart_gc) { if (f2fs_start_gc_thread(sbi)) @@ -3719,7 +3758,7 @@ try_onemore: sbi->iostat_period_ms = DEFAULT_IOSTAT_PERIOD_MS; for (i = 0; i < NR_PAGE_TYPE; i++) { - int n = (i == META) ? 1: NR_TEMP_TYPE; + int n = (i == META) ? 1 : NR_TEMP_TYPE; int j; sbi->write_io[i] = @@ -3833,7 +3872,7 @@ try_onemore: /* setup checkpoint request control and start checkpoint issue thread */ f2fs_init_ckpt_req_control(sbi); - if (!test_opt(sbi, DISABLE_CHECKPOINT) && + if (!f2fs_readonly(sb) && !test_opt(sbi, DISABLE_CHECKPOINT) && test_opt(sbi, MERGE_CHECKPOINT)) { err = f2fs_start_ckpt_thread(sbi); if (err) { @@ -3929,10 +3968,18 @@ try_onemore: * previous checkpoint was not done by clean system shutdown. */ if (f2fs_hw_is_readonly(sbi)) { - if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) - f2fs_err(sbi, "Need to recover fsync data, but write access unavailable"); - else - f2fs_info(sbi, "write access unavailable, skipping recovery"); + if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) { + err = f2fs_recover_fsync_data(sbi, true); + if (err > 0) { + err = -EROFS; + f2fs_err(sbi, "Need to recover fsync data, but " + "write access unavailable, please try " + "mount w/ disable_roll_forward or norecovery"); + } + if (err < 0) + goto free_meta; + } + f2fs_info(sbi, "write access unavailable, skipping recovery"); goto reset_checkpoint; } @@ -3989,7 +4036,8 @@ reset_checkpoint: * If filesystem is not mounted as read-only then * do start the gc_thread. */ - if (F2FS_OPTION(sbi).bggc_mode != BGGC_MODE_OFF && !f2fs_readonly(sb)) { + if ((F2FS_OPTION(sbi).bggc_mode != BGGC_MODE_OFF || + test_opt(sbi, GC_MERGE)) && !f2fs_readonly(sb)) { /* After POR, we can run background GC thread.*/ err = f2fs_start_gc_thread(sbi); if (err) diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c index e38a7f6921dd..39b522ec73e7 100644 --- a/fs/f2fs/sysfs.c +++ b/fs/f2fs/sysfs.c @@ -12,6 +12,7 @@ #include <linux/seq_file.h> #include <linux/unicode.h> #include <linux/ioprio.h> +#include <linux/sysfs.h> #include "f2fs.h" #include "segment.h" @@ -91,6 +92,13 @@ static ssize_t free_segments_show(struct f2fs_attr *a, (unsigned long long)(free_segments(sbi))); } +static ssize_t ovp_segments_show(struct f2fs_attr *a, + struct f2fs_sb_info *sbi, char *buf) +{ + return sprintf(buf, "%llu\n", + (unsigned long long)(overprovision_segments(sbi))); +} + static ssize_t lifetime_write_kbytes_show(struct f2fs_attr *a, struct f2fs_sb_info *sbi, char *buf) { @@ -282,6 +290,17 @@ static ssize_t f2fs_sbi_show(struct f2fs_attr *a, return len; } +#ifdef CONFIG_F2FS_FS_COMPRESSION + if (!strcmp(a->attr.name, "compr_written_block")) + return sysfs_emit(buf, "%llu\n", sbi->compr_written_block); + + if (!strcmp(a->attr.name, "compr_saved_block")) + return sysfs_emit(buf, "%llu\n", sbi->compr_saved_block); + + if (!strcmp(a->attr.name, "compr_new_inode")) + return sysfs_emit(buf, "%u\n", sbi->compr_new_inode); +#endif + ui = (unsigned int *)(ptr + a->offset); return sprintf(buf, "%u\n", *ui); @@ -458,6 +477,24 @@ out: return count; } +#ifdef CONFIG_F2FS_FS_COMPRESSION + if (!strcmp(a->attr.name, "compr_written_block") || + !strcmp(a->attr.name, "compr_saved_block")) { + if (t != 0) + return -EINVAL; + sbi->compr_written_block = 0; + sbi->compr_saved_block = 0; + return count; + } + + if (!strcmp(a->attr.name, "compr_new_inode")) { + if (t != 0) + return -EINVAL; + sbi->compr_new_inode = 0; + return count; + } +#endif + *ui = (unsigned int)t; return count; @@ -629,6 +666,7 @@ F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, node_io_flag, node_io_flag); F2FS_RW_ATTR(CPRC_INFO, ckpt_req_control, ckpt_thread_ioprio, ckpt_thread_ioprio); F2FS_GENERAL_RO_ATTR(dirty_segments); F2FS_GENERAL_RO_ATTR(free_segments); +F2FS_GENERAL_RO_ATTR(ovp_segments); F2FS_GENERAL_RO_ATTR(lifetime_write_kbytes); F2FS_GENERAL_RO_ATTR(features); F2FS_GENERAL_RO_ATTR(current_reserved_blocks); @@ -668,6 +706,9 @@ F2FS_FEATURE_RO_ATTR(sb_checksum, FEAT_SB_CHECKSUM); F2FS_FEATURE_RO_ATTR(casefold, FEAT_CASEFOLD); #ifdef CONFIG_F2FS_FS_COMPRESSION F2FS_FEATURE_RO_ATTR(compression, FEAT_COMPRESSION); +F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, compr_written_block, compr_written_block); +F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, compr_saved_block, compr_saved_block); +F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, compr_new_inode, compr_new_inode); #endif #define ATTR_LIST(name) (&f2fs_attr_##name.attr) @@ -715,6 +756,7 @@ static struct attribute *f2fs_attrs[] = { ATTR_LIST(ckpt_thread_ioprio), ATTR_LIST(dirty_segments), ATTR_LIST(free_segments), + ATTR_LIST(ovp_segments), ATTR_LIST(unusable), ATTR_LIST(lifetime_write_kbytes), ATTR_LIST(features), @@ -731,6 +773,11 @@ static struct attribute *f2fs_attrs[] = { ATTR_LIST(moved_blocks_background), ATTR_LIST(avg_vblocks), #endif +#ifdef CONFIG_F2FS_FS_COMPRESSION + ATTR_LIST(compr_written_block), + ATTR_LIST(compr_saved_block), + ATTR_LIST(compr_new_inode), +#endif NULL, }; ATTRIBUTE_GROUPS(f2fs); diff --git a/fs/f2fs/verity.c b/fs/f2fs/verity.c index a7beff28a3c5..03549b5ba204 100644 --- a/fs/f2fs/verity.c +++ b/fs/f2fs/verity.c @@ -152,40 +152,73 @@ static int f2fs_end_enable_verity(struct file *filp, const void *desc, size_t desc_size, u64 merkle_tree_size) { struct inode *inode = file_inode(filp); + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); u64 desc_pos = f2fs_verity_metadata_pos(inode) + merkle_tree_size; struct fsverity_descriptor_location dloc = { .version = cpu_to_le32(F2FS_VERIFY_VER), .size = cpu_to_le32(desc_size), .pos = cpu_to_le64(desc_pos), }; - int err = 0; + int err = 0, err2 = 0; - if (desc != NULL) { - /* Succeeded; write the verity descriptor. */ - err = pagecache_write(inode, desc, desc_size, desc_pos); + /* + * If an error already occurred (which fs/verity/ signals by passing + * desc == NULL), then only clean-up is needed. + */ + if (desc == NULL) + goto cleanup; - /* Write all pages before clearing FI_VERITY_IN_PROGRESS. */ - if (!err) - err = filemap_write_and_wait(inode->i_mapping); - } + /* Append the verity descriptor. */ + err = pagecache_write(inode, desc, desc_size, desc_pos); + if (err) + goto cleanup; + + /* + * Write all pages (both data and verity metadata). Note that this must + * happen before clearing FI_VERITY_IN_PROGRESS; otherwise pages beyond + * i_size won't be written properly. For crash consistency, this also + * must happen before the verity inode flag gets persisted. + */ + err = filemap_write_and_wait(inode->i_mapping); + if (err) + goto cleanup; + + /* Set the verity xattr. */ + err = f2fs_setxattr(inode, F2FS_XATTR_INDEX_VERITY, + F2FS_XATTR_NAME_VERITY, &dloc, sizeof(dloc), + NULL, XATTR_CREATE); + if (err) + goto cleanup; - /* If we failed, truncate anything we wrote past i_size. */ - if (desc == NULL || err) - f2fs_truncate(inode); + /* Finally, set the verity inode flag. */ + file_set_verity(inode); + f2fs_set_inode_flags(inode); + f2fs_mark_inode_dirty_sync(inode, true); clear_inode_flag(inode, FI_VERITY_IN_PROGRESS); + return 0; - if (desc != NULL && !err) { - err = f2fs_setxattr(inode, F2FS_XATTR_INDEX_VERITY, - F2FS_XATTR_NAME_VERITY, &dloc, sizeof(dloc), - NULL, XATTR_CREATE); - if (!err) { - file_set_verity(inode); - f2fs_set_inode_flags(inode); - f2fs_mark_inode_dirty_sync(inode, true); - } +cleanup: + /* + * Verity failed to be enabled, so clean up by truncating any verity + * metadata that was written beyond i_size (both from cache and from + * disk) and clearing FI_VERITY_IN_PROGRESS. + * + * Taking i_gc_rwsem[WRITE] is needed to stop f2fs garbage collection + * from re-instantiating cached pages we are truncating (since unlike + * normal file accesses, garbage collection isn't limited by i_size). + */ + down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + truncate_inode_pages(inode->i_mapping, inode->i_size); + err2 = f2fs_truncate(inode); + if (err2) { + f2fs_err(sbi, "Truncating verity metadata failed (errno=%d)", + err2); + set_sbi_flag(sbi, SBI_NEED_FSCK); } - return err; + up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + clear_inode_flag(inode, FI_VERITY_IN_PROGRESS); + return err ?: err2; } static int f2fs_get_verity_descriptor(struct inode *inode, void *buf, diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c index 490f843ec3bf..c8f34decbf8e 100644 --- a/fs/f2fs/xattr.c +++ b/fs/f2fs/xattr.c @@ -488,6 +488,7 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize, f2fs_wait_on_page_writeback(xpage, NODE, true, true); } else { struct dnode_of_data dn; + set_new_dnode(&dn, inode, NULL, NULL, new_nid); xpage = f2fs_new_node_page(&dn, XATTR_NODE_OFFSET); if (IS_ERR(xpage)) { diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 84c38103aa06..ea7fc5c641c7 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -273,8 +273,7 @@ static void __gfs2_glock_put(struct gfs2_glock *gl) if (mapping) { truncate_inode_pages_final(mapping); if (!gfs2_withdrawn(sdp)) - GLOCK_BUG_ON(gl, mapping->nrpages || - mapping->nrexceptional); + GLOCK_BUG_ON(gl, !mapping_empty(mapping)); } trace_gfs2_glock_put(gl); sdp->sd_lockstruct.ls_ops->lm_put_lock(gl); diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 701c82c36138..a2a42335e8fd 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -463,14 +463,11 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart, struct address_space *mapping = &inode->i_data; const pgoff_t start = lstart >> huge_page_shift(h); const pgoff_t end = lend >> huge_page_shift(h); - struct vm_area_struct pseudo_vma; struct pagevec pvec; pgoff_t next, index; int i, freed = 0; bool truncate_op = (lend == LLONG_MAX); - vma_init(&pseudo_vma, current->mm); - pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED); pagevec_init(&pvec); next = start; while (next < end) { @@ -482,10 +479,9 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart, for (i = 0; i < pagevec_count(&pvec); ++i) { struct page *page = pvec.pages[i]; - u32 hash; + u32 hash = 0; index = page->index; - hash = hugetlb_fault_mutex_hash(mapping, index); if (!truncate_op) { /* * Only need to hold the fault mutex in the @@ -493,6 +489,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart, * page faults. Races are not possible in the * case of truncation. */ + hash = hugetlb_fault_mutex_hash(mapping, index); mutex_lock(&hugetlb_fault_mutex_table[hash]); } @@ -1435,7 +1432,7 @@ static int get_hstate_idx(int page_size_log) if (!h) return -1; - return h - hstates; + return hstate_index(h); } /* diff --git a/fs/inode.c b/fs/inode.c index 9e192bea0630..c93500d84264 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -529,7 +529,14 @@ void clear_inode(struct inode *inode) */ xa_lock_irq(&inode->i_data.i_pages); BUG_ON(inode->i_data.nrpages); - BUG_ON(inode->i_data.nrexceptional); + /* + * Almost always, mapping_empty(&inode->i_data) here; but there are + * two known and long-standing ways in which nodes may get left behind + * (when deep radix-tree node allocation failed partway; or when THP + * collapse_file() failed). Until those two known cases are cleaned up, + * or a cleanup function is called here, do not BUG_ON(!mapping_empty), + * nor even WARN_ON(!mapping_empty). + */ xa_unlock_irq(&inode->i_data.i_pages); BUG_ON(!list_empty(&inode->i_data.private_list)); BUG_ON(!(inode->i_state & I_FREEING)); diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c index f8fb89b10227..4fc8cd698d1a 100644 --- a/fs/jffs2/file.c +++ b/fs/jffs2/file.c @@ -57,6 +57,7 @@ const struct file_operations jffs2_file_operations = .mmap = generic_file_readonly_mmap, .fsync = jffs2_fsync, .splice_read = generic_file_splice_read, + .splice_write = iter_file_splice_write, }; /* jffs2_file_inode_operations */ diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c index db72a9d2d0af..b676056826be 100644 --- a/fs/jffs2/scan.c +++ b/fs/jffs2/scan.c @@ -1079,7 +1079,7 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo memcpy(&fd->name, rd->name, checkedlen); fd->name[checkedlen] = 0; - crc = crc32(0, fd->name, rd->nsize); + crc = crc32(0, fd->name, checkedlen); if (crc != je32_to_cpu(rd->name_crc)) { pr_notice("%s(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", __func__, ofs, je32_to_cpu(rd->name_crc), crc); diff --git a/fs/jffs2/summary.h b/fs/jffs2/summary.h index e4131cb1f1d4..36d9a1280770 100644 --- a/fs/jffs2/summary.h +++ b/fs/jffs2/summary.h @@ -194,18 +194,18 @@ int jffs2_sum_scan_sumnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb #define jffs2_sum_active() (0) #define jffs2_sum_init(a) (0) -#define jffs2_sum_exit(a) +#define jffs2_sum_exit(a) do { } while (0) #define jffs2_sum_disable_collecting(a) #define jffs2_sum_is_disabled(a) (0) -#define jffs2_sum_reset_collected(a) +#define jffs2_sum_reset_collected(a) do { } while (0) #define jffs2_sum_add_kvec(a,b,c,d) (0) -#define jffs2_sum_move_collected(a,b) +#define jffs2_sum_move_collected(a,b) do { } while (0) #define jffs2_sum_write_sumnode(a) (0) -#define jffs2_sum_add_padding_mem(a,b) -#define jffs2_sum_add_inode_mem(a,b,c) -#define jffs2_sum_add_dirent_mem(a,b,c) -#define jffs2_sum_add_xattr_mem(a,b,c) -#define jffs2_sum_add_xref_mem(a,b,c) +#define jffs2_sum_add_padding_mem(a,b) do { } while (0) +#define jffs2_sum_add_inode_mem(a,b,c) do { } while (0) +#define jffs2_sum_add_dirent_mem(a,b,c) do { } while (0) +#define jffs2_sum_add_xattr_mem(a,b,c) do { } while (0) +#define jffs2_sum_add_xref_mem(a,b,c) do { } while (0) #define jffs2_sum_scan_sumnode(a,b,c,d,e) (0) #endif /* CONFIG_JFFS2_SUMMARY */ diff --git a/fs/locks.c b/fs/locks.c index 5c42363aa811..74b2a1dfe8d8 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -1808,6 +1808,9 @@ check_conflicting_open(struct file *filp, const long arg, int flags) if (flags & FL_LAYOUT) return 0; + if (flags & FL_DELEG) + /* We leave these checks to the caller */ + return 0; if (arg == F_RDLCK) return inode_is_open_for_write(inode) ? -EAGAIN : 0; diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 7698172ac0c7..b517a8794400 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -354,6 +354,124 @@ static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = { .release = nfsd4_cb_notify_lock_release, }; +/* + * We store the NONE, READ, WRITE, and BOTH bits separately in the + * st_{access,deny}_bmap field of the stateid, in order to track not + * only what share bits are currently in force, but also what + * combinations of share bits previous opens have used. This allows us + * to enforce the recommendation of rfc 3530 14.2.19 that the server + * return an error if the client attempt to downgrade to a combination + * of share bits not explicable by closing some of its previous opens. + * + * XXX: This enforcement is actually incomplete, since we don't keep + * track of access/deny bit combinations; so, e.g., we allow: + * + * OPEN allow read, deny write + * OPEN allow both, deny none + * DOWNGRADE allow read, deny none + * + * which we should reject. + */ +static unsigned int +bmap_to_share_mode(unsigned long bmap) +{ + int i; + unsigned int access = 0; + + for (i = 1; i < 4; i++) { + if (test_bit(i, &bmap)) + access |= i; + } + return access; +} + +/* set share access for a given stateid */ +static inline void +set_access(u32 access, struct nfs4_ol_stateid *stp) +{ + unsigned char mask = 1 << access; + + WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH); + stp->st_access_bmap |= mask; +} + +/* clear share access for a given stateid */ +static inline void +clear_access(u32 access, struct nfs4_ol_stateid *stp) +{ + unsigned char mask = 1 << access; + + WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH); + stp->st_access_bmap &= ~mask; +} + +/* test whether a given stateid has access */ +static inline bool +test_access(u32 access, struct nfs4_ol_stateid *stp) +{ + unsigned char mask = 1 << access; + + return (bool)(stp->st_access_bmap & mask); +} + +/* set share deny for a given stateid */ +static inline void +set_deny(u32 deny, struct nfs4_ol_stateid *stp) +{ + unsigned char mask = 1 << deny; + + WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH); + stp->st_deny_bmap |= mask; +} + +/* clear share deny for a given stateid */ +static inline void +clear_deny(u32 deny, struct nfs4_ol_stateid *stp) +{ + unsigned char mask = 1 << deny; + + WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH); + stp->st_deny_bmap &= ~mask; +} + +/* test whether a given stateid is denying specific access */ +static inline bool +test_deny(u32 deny, struct nfs4_ol_stateid *stp) +{ + unsigned char mask = 1 << deny; + + return (bool)(stp->st_deny_bmap & mask); +} + +static int nfs4_access_to_omode(u32 access) +{ + switch (access & NFS4_SHARE_ACCESS_BOTH) { + case NFS4_SHARE_ACCESS_READ: + return O_RDONLY; + case NFS4_SHARE_ACCESS_WRITE: + return O_WRONLY; + case NFS4_SHARE_ACCESS_BOTH: + return O_RDWR; + } + WARN_ON_ONCE(1); + return O_RDONLY; +} + +static inline int +access_permit_read(struct nfs4_ol_stateid *stp) +{ + return test_access(NFS4_SHARE_ACCESS_READ, stp) || + test_access(NFS4_SHARE_ACCESS_BOTH, stp) || + test_access(NFS4_SHARE_ACCESS_WRITE, stp); +} + +static inline int +access_permit_write(struct nfs4_ol_stateid *stp) +{ + return test_access(NFS4_SHARE_ACCESS_WRITE, stp) || + test_access(NFS4_SHARE_ACCESS_BOTH, stp); +} + static inline struct nfs4_stateowner * nfs4_get_stateowner(struct nfs4_stateowner *sop) { @@ -543,14 +661,12 @@ static unsigned int ownerstr_hashval(struct xdr_netobj *ownername) #define FILE_HASH_BITS 8 #define FILE_HASH_SIZE (1 << FILE_HASH_BITS) -static unsigned int nfsd_fh_hashval(struct knfsd_fh *fh) +static unsigned int file_hashval(struct svc_fh *fh) { - return jhash2(fh->fh_base.fh_pad, XDR_QUADLEN(fh->fh_size), 0); -} + struct inode *inode = d_inode(fh->fh_dentry); -static unsigned int file_hashval(struct knfsd_fh *fh) -{ - return nfsd_fh_hashval(fh) & (FILE_HASH_SIZE - 1); + /* XXX: why not (here & in file cache) use inode? */ + return (unsigned int)hash_long(inode->i_ino, FILE_HASH_BITS); } static struct hlist_head file_hashtbl[FILE_HASH_SIZE]; @@ -1153,108 +1269,6 @@ static unsigned int clientstr_hashval(struct xdr_netobj name) } /* - * We store the NONE, READ, WRITE, and BOTH bits separately in the - * st_{access,deny}_bmap field of the stateid, in order to track not - * only what share bits are currently in force, but also what - * combinations of share bits previous opens have used. This allows us - * to enforce the recommendation of rfc 3530 14.2.19 that the server - * return an error if the client attempt to downgrade to a combination - * of share bits not explicable by closing some of its previous opens. - * - * XXX: This enforcement is actually incomplete, since we don't keep - * track of access/deny bit combinations; so, e.g., we allow: - * - * OPEN allow read, deny write - * OPEN allow both, deny none - * DOWNGRADE allow read, deny none - * - * which we should reject. - */ -static unsigned int -bmap_to_share_mode(unsigned long bmap) { - int i; - unsigned int access = 0; - - for (i = 1; i < 4; i++) { - if (test_bit(i, &bmap)) - access |= i; - } - return access; -} - -/* set share access for a given stateid */ -static inline void -set_access(u32 access, struct nfs4_ol_stateid *stp) -{ - unsigned char mask = 1 << access; - - WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH); - stp->st_access_bmap |= mask; -} - -/* clear share access for a given stateid */ -static inline void -clear_access(u32 access, struct nfs4_ol_stateid *stp) -{ - unsigned char mask = 1 << access; - - WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH); - stp->st_access_bmap &= ~mask; -} - -/* test whether a given stateid has access */ -static inline bool -test_access(u32 access, struct nfs4_ol_stateid *stp) -{ - unsigned char mask = 1 << access; - - return (bool)(stp->st_access_bmap & mask); -} - -/* set share deny for a given stateid */ -static inline void -set_deny(u32 deny, struct nfs4_ol_stateid *stp) -{ - unsigned char mask = 1 << deny; - - WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH); - stp->st_deny_bmap |= mask; -} - -/* clear share deny for a given stateid */ -static inline void -clear_deny(u32 deny, struct nfs4_ol_stateid *stp) -{ - unsigned char mask = 1 << deny; - - WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH); - stp->st_deny_bmap &= ~mask; -} - -/* test whether a given stateid is denying specific access */ -static inline bool -test_deny(u32 deny, struct nfs4_ol_stateid *stp) -{ - unsigned char mask = 1 << deny; - - return (bool)(stp->st_deny_bmap & mask); -} - -static int nfs4_access_to_omode(u32 access) -{ - switch (access & NFS4_SHARE_ACCESS_BOTH) { - case NFS4_SHARE_ACCESS_READ: - return O_RDONLY; - case NFS4_SHARE_ACCESS_WRITE: - return O_WRONLY; - case NFS4_SHARE_ACCESS_BOTH: - return O_RDWR; - } - WARN_ON_ONCE(1); - return O_RDONLY; -} - -/* * A stateid that had a deny mode associated with it is being released * or downgraded. Recalculate the deny mode on the file. */ @@ -3125,6 +3139,7 @@ nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, goto out_nolock; } new->cl_mach_cred = true; + break; case SP4_NONE: break; default: /* checked by xdr code */ @@ -4072,7 +4087,7 @@ static struct nfs4_file *nfsd4_alloc_file(void) } /* OPEN Share state helper functions */ -static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval, +static void nfsd4_init_file(struct svc_fh *fh, unsigned int hashval, struct nfs4_file *fp) { lockdep_assert_held(&state_lock); @@ -4082,12 +4097,14 @@ static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval, INIT_LIST_HEAD(&fp->fi_stateids); INIT_LIST_HEAD(&fp->fi_delegations); INIT_LIST_HEAD(&fp->fi_clnt_odstate); - fh_copy_shallow(&fp->fi_fhandle, fh); + fh_copy_shallow(&fp->fi_fhandle, &fh->fh_handle); fp->fi_deleg_file = NULL; fp->fi_had_conflict = false; fp->fi_share_deny = 0; memset(fp->fi_fds, 0, sizeof(fp->fi_fds)); memset(fp->fi_access, 0, sizeof(fp->fi_access)); + fp->fi_aliased = false; + fp->fi_inode = d_inode(fh->fh_dentry); #ifdef CONFIG_NFSD_PNFS INIT_LIST_HEAD(&fp->fi_lo_states); atomic_set(&fp->fi_lo_recalls, 0); @@ -4426,13 +4443,13 @@ move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net) /* search file_hashtbl[] for file */ static struct nfs4_file * -find_file_locked(struct knfsd_fh *fh, unsigned int hashval) +find_file_locked(struct svc_fh *fh, unsigned int hashval) { struct nfs4_file *fp; hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash, lockdep_is_held(&state_lock)) { - if (fh_match(&fp->fi_fhandle, fh)) { + if (fh_match(&fp->fi_fhandle, &fh->fh_handle)) { if (refcount_inc_not_zero(&fp->fi_ref)) return fp; } @@ -4440,8 +4457,32 @@ find_file_locked(struct knfsd_fh *fh, unsigned int hashval) return NULL; } -struct nfs4_file * -find_file(struct knfsd_fh *fh) +static struct nfs4_file *insert_file(struct nfs4_file *new, struct svc_fh *fh, + unsigned int hashval) +{ + struct nfs4_file *fp; + struct nfs4_file *ret = NULL; + bool alias_found = false; + + spin_lock(&state_lock); + hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash, + lockdep_is_held(&state_lock)) { + if (fh_match(&fp->fi_fhandle, &fh->fh_handle)) { + if (refcount_inc_not_zero(&fp->fi_ref)) + ret = fp; + } else if (d_inode(fh->fh_dentry) == fp->fi_inode) + fp->fi_aliased = alias_found = true; + } + if (likely(ret == NULL)) { + nfsd4_init_file(fh, hashval, new); + new->fi_aliased = alias_found; + ret = new; + } + spin_unlock(&state_lock); + return ret; +} + +static struct nfs4_file * find_file(struct svc_fh *fh) { struct nfs4_file *fp; unsigned int hashval = file_hashval(fh); @@ -4453,7 +4494,7 @@ find_file(struct knfsd_fh *fh) } static struct nfs4_file * -find_or_add_file(struct nfs4_file *new, struct knfsd_fh *fh) +find_or_add_file(struct nfs4_file *new, struct svc_fh *fh) { struct nfs4_file *fp; unsigned int hashval = file_hashval(fh); @@ -4464,15 +4505,7 @@ find_or_add_file(struct nfs4_file *new, struct knfsd_fh *fh) if (fp) return fp; - spin_lock(&state_lock); - fp = find_file_locked(fh, hashval); - if (likely(fp == NULL)) { - nfsd4_init_file(fh, hashval, new); - fp = new; - } - spin_unlock(&state_lock); - - return fp; + return insert_file(new, fh, hashval); } /* @@ -4485,7 +4518,7 @@ nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type) struct nfs4_file *fp; __be32 ret = nfs_ok; - fp = find_file(¤t_fh->fh_handle); + fp = find_file(current_fh); if (!fp) return ret; /* Check for conflicting share reservations */ @@ -4880,6 +4913,11 @@ static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp, if (nf) nfsd_file_put(nf); + status = nfserrno(nfsd_open_break_lease(cur_fh->fh_dentry->d_inode, + access)); + if (status) + goto out_put_access; + status = nfsd4_truncate(rqstp, cur_fh, open); if (status) goto out_put_access; @@ -4951,6 +4989,65 @@ static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, return fl; } +static int nfsd4_check_conflicting_opens(struct nfs4_client *clp, + struct nfs4_file *fp) +{ + struct nfs4_ol_stateid *st; + struct file *f = fp->fi_deleg_file->nf_file; + struct inode *ino = locks_inode(f); + int writes; + + writes = atomic_read(&ino->i_writecount); + if (!writes) + return 0; + /* + * There could be multiple filehandles (hence multiple + * nfs4_files) referencing this file, but that's not too + * common; let's just give up in that case rather than + * trying to go look up all the clients using that other + * nfs4_file as well: + */ + if (fp->fi_aliased) + return -EAGAIN; + /* + * If there's a close in progress, make sure that we see it + * clear any fi_fds[] entries before we see it decrement + * i_writecount: + */ + smp_mb__after_atomic(); + + if (fp->fi_fds[O_WRONLY]) + writes--; + if (fp->fi_fds[O_RDWR]) + writes--; + if (writes > 0) + return -EAGAIN; /* There may be non-NFSv4 writers */ + /* + * It's possible there are non-NFSv4 write opens in progress, + * but if they haven't incremented i_writecount yet then they + * also haven't called break lease yet; so, they'll break this + * lease soon enough. So, all that's left to check for is NFSv4 + * opens: + */ + spin_lock(&fp->fi_lock); + list_for_each_entry(st, &fp->fi_stateids, st_perfile) { + if (st->st_openstp == NULL /* it's an open */ && + access_permit_write(st) && + st->st_stid.sc_client != clp) { + spin_unlock(&fp->fi_lock); + return -EAGAIN; + } + } + spin_unlock(&fp->fi_lock); + /* + * There's a small chance that we could be racing with another + * NFSv4 open. However, any open that hasn't added itself to + * the fi_stateids list also hasn't called break_lease yet; so, + * they'll break this lease soon enough. + */ + return 0; +} + static struct nfs4_delegation * nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh, struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate) @@ -4970,9 +5067,12 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh, nf = find_readable_file(fp); if (!nf) { - /* We should always have a readable file here */ - WARN_ON_ONCE(1); - return ERR_PTR(-EBADF); + /* + * We probably could attempt another open and get a read + * delegation, but for now, don't bother until the + * client actually sends us one. + */ + return ERR_PTR(-EAGAIN); } spin_lock(&state_lock); spin_lock(&fp->fi_lock); @@ -5007,6 +5107,9 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh, locks_free_lock(fl); if (status) goto out_clnt_odstate; + status = nfsd4_check_conflicting_opens(clp, fp); + if (status) + goto out_unlock; spin_lock(&state_lock); spin_lock(&fp->fi_lock); @@ -5088,17 +5191,6 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, goto out_no_deleg; if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED)) goto out_no_deleg; - /* - * Also, if the file was opened for write or - * create, there's a good chance the client's - * about to write to it, resulting in an - * immediate recall (since we don't support - * write delegations): - */ - if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) - goto out_no_deleg; - if (open->op_create == NFS4_OPEN_CREATE) - goto out_no_deleg; break; default: goto out_no_deleg; @@ -5161,7 +5253,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf * and check for delegations in the process of being recalled. * If not found, create the nfs4_file struct */ - fp = find_or_add_file(open->op_file, ¤t_fh->fh_handle); + fp = find_or_add_file(open->op_file, current_fh); if (fp != open->op_file) { status = nfs4_check_deleg(cl, open, &dp); if (status) @@ -5502,21 +5594,6 @@ static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp) return nfs_ok; } -static inline int -access_permit_read(struct nfs4_ol_stateid *stp) -{ - return test_access(NFS4_SHARE_ACCESS_READ, stp) || - test_access(NFS4_SHARE_ACCESS_BOTH, stp) || - test_access(NFS4_SHARE_ACCESS_WRITE, stp); -} - -static inline int -access_permit_write(struct nfs4_ol_stateid *stp) -{ - return test_access(NFS4_SHARE_ACCESS_WRITE, stp) || - test_access(NFS4_SHARE_ACCESS_BOTH, stp); -} - static __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags) { @@ -6288,15 +6365,6 @@ out: return status; } -static inline u64 -end_offset(u64 start, u64 len) -{ - u64 end; - - end = start + len; - return end >= start ? end: NFS4_MAX_UINT64; -} - /* last octet in a range */ static inline u64 last_byte_offset(u64 start, u64 len) @@ -6865,11 +6933,20 @@ out: static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock) { struct nfsd_file *nf; - __be32 err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf); - if (!err) { - err = nfserrno(vfs_test_lock(nf->nf_file, lock)); - nfsd_file_put(nf); - } + __be32 err; + + err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf); + if (err) + return err; + fh_lock(fhp); /* to block new leases till after test_lock: */ + err = nfserrno(nfsd_open_break_lease(fhp->fh_dentry->d_inode, + NFSD_MAY_READ)); + if (err) + goto out; + err = nfserrno(vfs_test_lock(nf->nf_file, lock)); +out: + fh_unlock(fhp); + nfsd_file_put(nf); return err; } diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index 853bf50a2a9b..c2c3d9077dc5 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -1166,6 +1166,7 @@ static struct inode *nfsd_get_inode(struct super_block *sb, umode_t mode) inode->i_fop = &simple_dir_operations; inode->i_op = &simple_dir_inode_operations; inc_nlink(inode); + break; default: break; } diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index 82ba034fa579..dd5d69921676 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -308,7 +308,7 @@ static int nfsd_init_socks(struct net *net, const struct cred *cred) static int nfsd_users = 0; -static int nfsd_startup_generic(int nrservs) +static int nfsd_startup_generic(void) { int ret; @@ -374,7 +374,7 @@ void nfsd_reset_boot_verifier(struct nfsd_net *nn) write_sequnlock(&nn->boot_lock); } -static int nfsd_startup_net(int nrservs, struct net *net, const struct cred *cred) +static int nfsd_startup_net(struct net *net, const struct cred *cred) { struct nfsd_net *nn = net_generic(net, nfsd_net_id); int ret; @@ -382,7 +382,7 @@ static int nfsd_startup_net(int nrservs, struct net *net, const struct cred *cre if (nn->nfsd_net_up) return 0; - ret = nfsd_startup_generic(nrservs); + ret = nfsd_startup_generic(); if (ret) return ret; ret = nfsd_init_socks(net, cred); @@ -790,7 +790,7 @@ nfsd_svc(int nrservs, struct net *net, const struct cred *cred) nfsd_up_before = nn->nfsd_net_up; - error = nfsd_startup_net(nrservs, net, cred); + error = nfsd_startup_net(net, cred); if (error) goto out_destroy; error = nn->nfsd_serv->sv_ops->svo_setup(nn->nfsd_serv, diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h index 54cab651ac1d..e73bdbb1634a 100644 --- a/fs/nfsd/state.h +++ b/fs/nfsd/state.h @@ -516,6 +516,8 @@ struct nfs4_clnt_odstate { */ struct nfs4_file { refcount_t fi_ref; + struct inode * fi_inode; + bool fi_aliased; spinlock_t fi_lock; struct hlist_node fi_hash; /* hash on fi_fhandle */ struct list_head fi_stateids; @@ -669,7 +671,6 @@ extern struct nfs4_client_reclaim *nfs4_client_to_reclaim(struct xdr_netobj name struct xdr_netobj princhash, struct nfsd_net *nn); extern bool nfs4_has_reclaimed_state(struct xdr_netobj name, struct nfsd_net *nn); -struct nfs4_file *find_file(struct knfsd_fh *fh); void put_nfs4_file(struct nfs4_file *fi); extern void nfs4_put_copy(struct nfsd4_copy *copy); extern struct nfsd4_copy * diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index e862cab69583..fc9784544b24 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -661,6 +661,9 @@ static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma) [ilog2(VM_PKEY_BIT4)] = "", #endif #endif /* CONFIG_ARCH_HAS_PKEYS */ +#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR + [ilog2(VM_UFFD_MINOR)] = "ui", +#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */ }; size_t i; diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c index 4d17e5382b74..382a54c82930 100644 --- a/fs/ubifs/replay.c +++ b/fs/ubifs/replay.c @@ -223,7 +223,8 @@ static bool inode_still_linked(struct ubifs_info *c, struct replay_entry *rino) */ list_for_each_entry_reverse(r, &c->replay_list, list) { ubifs_assert(c, r->sqnum >= rino->sqnum); - if (key_inum(c, &r->key) == key_inum(c, &rino->key)) + if (key_inum(c, &r->key) == key_inum(c, &rino->key) && + key_type(c, &r->key) == UBIFS_INO_KEY) return r->deletion == 0; } diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c index c160f718c288..e7693b94e5b5 100644 --- a/fs/ubifs/sb.c +++ b/fs/ubifs/sb.c @@ -53,6 +53,9 @@ static int get_default_compressor(struct ubifs_info *c) { + if (ubifs_compr_present(c, UBIFS_COMPR_ZSTD)) + return UBIFS_COMPR_ZSTD; + if (ubifs_compr_present(c, UBIFS_COMPR_LZO)) return UBIFS_COMPR_LZO; diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index ddb2ca636c93..7b572e1414ba 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -1552,8 +1552,8 @@ static int mount_ubifs(struct ubifs_info *c) ubifs_msg(c, "LEB size: %d bytes (%d KiB), min./max. I/O unit sizes: %d bytes/%d bytes", c->leb_size, c->leb_size >> 10, c->min_io_size, c->max_write_size); - ubifs_msg(c, "FS size: %lld bytes (%lld MiB, %d LEBs), journal size %lld bytes (%lld MiB, %d LEBs)", - x, x >> 20, c->main_lebs, + ubifs_msg(c, "FS size: %lld bytes (%lld MiB, %d LEBs), max %d LEBs, journal size %lld bytes (%lld MiB, %d LEBs)", + x, x >> 20, c->main_lebs, c->max_leb_cnt, y, y >> 20, c->log_lebs + c->max_bud_cnt); ubifs_msg(c, "reserved for root: %llu bytes (%llu KiB)", c->report_rp_size, c->report_rp_size >> 10); @@ -2232,6 +2232,8 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent) goto out_umount; } + import_uuid(&sb->s_uuid, c->uuid); + mutex_unlock(&c->umount_mutex); return 0; diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 0be8cdd4425a..14f92285d04f 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -15,6 +15,7 @@ #include <linux/sched/signal.h> #include <linux/sched/mm.h> #include <linux/mm.h> +#include <linux/mmu_notifier.h> #include <linux/poll.h> #include <linux/slab.h> #include <linux/seq_file.h> @@ -196,24 +197,21 @@ static inline struct uffd_msg userfault_msg(unsigned long address, msg_init(&msg); msg.event = UFFD_EVENT_PAGEFAULT; msg.arg.pagefault.address = address; + /* + * These flags indicate why the userfault occurred: + * - UFFD_PAGEFAULT_FLAG_WP indicates a write protect fault. + * - UFFD_PAGEFAULT_FLAG_MINOR indicates a minor fault. + * - Neither of these flags being set indicates a MISSING fault. + * + * Separately, UFFD_PAGEFAULT_FLAG_WRITE indicates it was a write + * fault. Otherwise, it was a read fault. + */ if (flags & FAULT_FLAG_WRITE) - /* - * If UFFD_FEATURE_PAGEFAULT_FLAG_WP was set in the - * uffdio_api.features and UFFD_PAGEFAULT_FLAG_WRITE - * was not set in a UFFD_EVENT_PAGEFAULT, it means it - * was a read fault, otherwise if set it means it's - * a write fault. - */ msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WRITE; if (reason & VM_UFFD_WP) - /* - * If UFFD_FEATURE_PAGEFAULT_FLAG_WP was set in the - * uffdio_api.features and UFFD_PAGEFAULT_FLAG_WP was - * not set in a UFFD_EVENT_PAGEFAULT, it means it was - * a missing fault, otherwise if set it means it's a - * write protect fault. - */ msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WP; + if (reason & VM_UFFD_MINOR) + msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_MINOR; if (features & UFFD_FEATURE_THREAD_ID) msg.arg.pagefault.feat.ptid = task_pid_vnr(current); return msg; @@ -400,8 +398,10 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason) BUG_ON(ctx->mm != mm); - VM_BUG_ON(reason & ~(VM_UFFD_MISSING|VM_UFFD_WP)); - VM_BUG_ON(!(reason & VM_UFFD_MISSING) ^ !!(reason & VM_UFFD_WP)); + /* Any unrecognized flag is a bug. */ + VM_BUG_ON(reason & ~__VM_UFFD_FLAGS); + /* 0 or > 1 flags set is a bug; we expect exactly 1. */ + VM_BUG_ON(!reason || (reason & (reason - 1))); if (ctx->features & UFFD_FEATURE_SIGBUS) goto out; @@ -611,7 +611,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, for (vma = mm->mmap; vma; vma = vma->vm_next) if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) { vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; - vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING); + vma->vm_flags &= ~__VM_UFFD_FLAGS; } mmap_write_unlock(mm); @@ -643,7 +643,7 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs) octx = vma->vm_userfaultfd_ctx.ctx; if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) { vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; - vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING); + vma->vm_flags &= ~__VM_UFFD_FLAGS; return 0; } @@ -725,7 +725,7 @@ void mremap_userfaultfd_prep(struct vm_area_struct *vma, } else { /* Drop uffd context if remap feature not enabled */ vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; - vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING); + vma->vm_flags &= ~__VM_UFFD_FLAGS; } } @@ -866,12 +866,12 @@ static int userfaultfd_release(struct inode *inode, struct file *file) for (vma = mm->mmap; vma; vma = vma->vm_next) { cond_resched(); BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^ - !!(vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP))); + !!(vma->vm_flags & __VM_UFFD_FLAGS)); if (vma->vm_userfaultfd_ctx.ctx != ctx) { prev = vma; continue; } - new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP); + new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS; prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end, new_flags, vma->anon_vma, vma->vm_file, vma->vm_pgoff, @@ -1261,9 +1261,19 @@ static inline bool vma_can_userfault(struct vm_area_struct *vma, unsigned long vm_flags) { /* FIXME: add WP support to hugetlbfs and shmem */ - return vma_is_anonymous(vma) || - ((is_vm_hugetlb_page(vma) || vma_is_shmem(vma)) && - !(vm_flags & VM_UFFD_WP)); + if (vm_flags & VM_UFFD_WP) { + if (is_vm_hugetlb_page(vma) || vma_is_shmem(vma)) + return false; + } + + if (vm_flags & VM_UFFD_MINOR) { + /* FIXME: Add minor fault interception for shmem. */ + if (!is_vm_hugetlb_page(vma)) + return false; + } + + return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) || + vma_is_shmem(vma); } static int userfaultfd_register(struct userfaultfd_ctx *ctx, @@ -1289,14 +1299,19 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx, ret = -EINVAL; if (!uffdio_register.mode) goto out; - if (uffdio_register.mode & ~(UFFDIO_REGISTER_MODE_MISSING| - UFFDIO_REGISTER_MODE_WP)) + if (uffdio_register.mode & ~UFFD_API_REGISTER_MODES) goto out; vm_flags = 0; if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MISSING) vm_flags |= VM_UFFD_MISSING; if (uffdio_register.mode & UFFDIO_REGISTER_MODE_WP) vm_flags |= VM_UFFD_WP; + if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MINOR) { +#ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR + goto out; +#endif + vm_flags |= VM_UFFD_MINOR; + } ret = validate_range(mm, &uffdio_register.range.start, uffdio_register.range.len); @@ -1340,7 +1355,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx, cond_resched(); BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^ - !!(cur->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP))); + !!(cur->vm_flags & __VM_UFFD_FLAGS)); /* check not compatible vmas */ ret = -EINVAL; @@ -1420,8 +1435,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx, start = vma->vm_start; vma_end = min(end, vma->vm_end); - new_flags = (vma->vm_flags & - ~(VM_UFFD_MISSING|VM_UFFD_WP)) | vm_flags; + new_flags = (vma->vm_flags & ~__VM_UFFD_FLAGS) | vm_flags; prev = vma_merge(mm, prev, start, vma_end, new_flags, vma->anon_vma, vma->vm_file, vma->vm_pgoff, vma_policy(vma), @@ -1449,6 +1463,9 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx, vma->vm_flags = new_flags; vma->vm_userfaultfd_ctx.ctx = ctx; + if (is_vm_hugetlb_page(vma) && uffd_disable_huge_pmd_share(vma)) + hugetlb_unshare_all_pmds(vma); + skip: prev = vma; start = vma->vm_end; @@ -1470,6 +1487,10 @@ out_unlock: if (!(uffdio_register.mode & UFFDIO_REGISTER_MODE_WP)) ioctls_out &= ~((__u64)1 << _UFFDIO_WRITEPROTECT); + /* CONTINUE ioctl is only supported for MINOR ranges. */ + if (!(uffdio_register.mode & UFFDIO_REGISTER_MODE_MINOR)) + ioctls_out &= ~((__u64)1 << _UFFDIO_CONTINUE); + /* * Now that we scanned all vmas we can already tell * userland which ioctls methods are guaranteed to @@ -1540,7 +1561,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx, cond_resched(); BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^ - !!(cur->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP))); + !!(cur->vm_flags & __VM_UFFD_FLAGS)); /* * Check not compatible vmas, not strictly required @@ -1591,7 +1612,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx, wake_userfault(vma->vm_userfaultfd_ctx.ctx, &range); } - new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP); + new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS; prev = vma_merge(mm, prev, start, vma_end, new_flags, vma->anon_vma, vma->vm_file, vma->vm_pgoff, vma_policy(vma), @@ -1823,6 +1844,66 @@ static int userfaultfd_writeprotect(struct userfaultfd_ctx *ctx, return ret; } +static int userfaultfd_continue(struct userfaultfd_ctx *ctx, unsigned long arg) +{ + __s64 ret; + struct uffdio_continue uffdio_continue; + struct uffdio_continue __user *user_uffdio_continue; + struct userfaultfd_wake_range range; + + user_uffdio_continue = (struct uffdio_continue __user *)arg; + + ret = -EAGAIN; + if (READ_ONCE(ctx->mmap_changing)) + goto out; + + ret = -EFAULT; + if (copy_from_user(&uffdio_continue, user_uffdio_continue, + /* don't copy the output fields */ + sizeof(uffdio_continue) - (sizeof(__s64)))) + goto out; + + ret = validate_range(ctx->mm, &uffdio_continue.range.start, + uffdio_continue.range.len); + if (ret) + goto out; + + ret = -EINVAL; + /* double check for wraparound just in case. */ + if (uffdio_continue.range.start + uffdio_continue.range.len <= + uffdio_continue.range.start) { + goto out; + } + if (uffdio_continue.mode & ~UFFDIO_CONTINUE_MODE_DONTWAKE) + goto out; + + if (mmget_not_zero(ctx->mm)) { + ret = mcopy_continue(ctx->mm, uffdio_continue.range.start, + uffdio_continue.range.len, + &ctx->mmap_changing); + mmput(ctx->mm); + } else { + return -ESRCH; + } + + if (unlikely(put_user(ret, &user_uffdio_continue->mapped))) + return -EFAULT; + if (ret < 0) + goto out; + + /* len == 0 would wake all */ + BUG_ON(!ret); + range.len = ret; + if (!(uffdio_continue.mode & UFFDIO_CONTINUE_MODE_DONTWAKE)) { + range.start = uffdio_continue.range.start; + wake_userfault(ctx, &range); + } + ret = range.len == uffdio_continue.range.len ? 0 : -EAGAIN; + +out: + return ret; +} + static inline unsigned int uffd_ctx_features(__u64 user_features) { /* @@ -1859,6 +1940,9 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx, goto err_out; /* report all available features and ioctls to userland */ uffdio_api.features = UFFD_API_FEATURES; +#ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR + uffdio_api.features &= ~UFFD_FEATURE_MINOR_HUGETLBFS; +#endif uffdio_api.ioctls = UFFD_API_IOCTLS; ret = -EFAULT; if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api))) @@ -1907,6 +1991,9 @@ static long userfaultfd_ioctl(struct file *file, unsigned cmd, case UFFDIO_WRITEPROTECT: ret = userfaultfd_writeprotect(ctx, arg); break; + case UFFDIO_CONTINUE: + ret = userfaultfd_continue(ctx, arg); + break; } return ret; } diff --git a/include/dt-bindings/clock/sifive-fu740-prci.h b/include/dt-bindings/clock/sifive-fu740-prci.h index cd7706ea5677..7899b7fee7db 100644 --- a/include/dt-bindings/clock/sifive-fu740-prci.h +++ b/include/dt-bindings/clock/sifive-fu740-prci.h @@ -19,5 +19,6 @@ #define PRCI_CLK_CLTXPLL 5 #define PRCI_CLK_TLCLK 6 #define PRCI_CLK_PCLK 7 +#define PRCI_CLK_PCIE_AUX 8 #endif /* __DT_BINDINGS_CLOCK_SIFIVE_FU740_PRCI_H */ diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 70a932470b2d..73d039476fa4 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -4,10 +4,12 @@ #ifndef __ASSEMBLY__ -#include <linux/types.h> #include <linux/bitops.h> -#include <linux/string.h> #include <linux/kernel.h> +#include <linux/string.h> +#include <linux/types.h> + +struct device; /* * bitmaps provide bit arrays that consume one or more unsigned @@ -118,54 +120,59 @@ * Allocation and deallocation of bitmap. * Provided in lib/bitmap.c to avoid circular dependency. */ -extern unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags); -extern unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags); -extern void bitmap_free(const unsigned long *bitmap); +unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags); +unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags); +void bitmap_free(const unsigned long *bitmap); + +/* Managed variants of the above. */ +unsigned long *devm_bitmap_alloc(struct device *dev, + unsigned int nbits, gfp_t flags); +unsigned long *devm_bitmap_zalloc(struct device *dev, + unsigned int nbits, gfp_t flags); /* * lib/bitmap.c provides these functions: */ -extern int __bitmap_equal(const unsigned long *bitmap1, - const unsigned long *bitmap2, unsigned int nbits); -extern bool __pure __bitmap_or_equal(const unsigned long *src1, - const unsigned long *src2, - const unsigned long *src3, - unsigned int nbits); -extern void __bitmap_complement(unsigned long *dst, const unsigned long *src, - unsigned int nbits); -extern void __bitmap_shift_right(unsigned long *dst, const unsigned long *src, - unsigned int shift, unsigned int nbits); -extern void __bitmap_shift_left(unsigned long *dst, const unsigned long *src, - unsigned int shift, unsigned int nbits); -extern void bitmap_cut(unsigned long *dst, const unsigned long *src, - unsigned int first, unsigned int cut, - unsigned int nbits); -extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, +int __bitmap_equal(const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +bool __pure __bitmap_or_equal(const unsigned long *src1, + const unsigned long *src2, + const unsigned long *src3, + unsigned int nbits); +void __bitmap_complement(unsigned long *dst, const unsigned long *src, + unsigned int nbits); +void __bitmap_shift_right(unsigned long *dst, const unsigned long *src, + unsigned int shift, unsigned int nbits); +void __bitmap_shift_left(unsigned long *dst, const unsigned long *src, + unsigned int shift, unsigned int nbits); +void bitmap_cut(unsigned long *dst, const unsigned long *src, + unsigned int first, unsigned int cut, unsigned int nbits); +int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +void __bitmap_replace(unsigned long *dst, + const unsigned long *old, const unsigned long *new, + const unsigned long *mask, unsigned int nbits); +int __bitmap_intersects(const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int nbits); -extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, - const unsigned long *bitmap2, unsigned int nbits); -extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, - const unsigned long *bitmap2, unsigned int nbits); -extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, - const unsigned long *bitmap2, unsigned int nbits); -extern void __bitmap_replace(unsigned long *dst, - const unsigned long *old, const unsigned long *new, - const unsigned long *mask, unsigned int nbits); -extern int __bitmap_intersects(const unsigned long *bitmap1, - const unsigned long *bitmap2, unsigned int nbits); -extern int __bitmap_subset(const unsigned long *bitmap1, - const unsigned long *bitmap2, unsigned int nbits); -extern int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits); -extern void __bitmap_set(unsigned long *map, unsigned int start, int len); -extern void __bitmap_clear(unsigned long *map, unsigned int start, int len); - -extern unsigned long bitmap_find_next_zero_area_off(unsigned long *map, - unsigned long size, - unsigned long start, - unsigned int nr, - unsigned long align_mask, - unsigned long align_offset); +int __bitmap_subset(const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits); +void __bitmap_set(unsigned long *map, unsigned int start, int len); +void __bitmap_clear(unsigned long *map, unsigned int start, int len); + +unsigned long bitmap_find_next_zero_area_off(unsigned long *map, + unsigned long size, + unsigned long start, + unsigned int nr, + unsigned long align_mask, + unsigned long align_offset); /** * bitmap_find_next_zero_area - find a contiguous aligned zero area @@ -190,33 +197,33 @@ bitmap_find_next_zero_area(unsigned long *map, align_mask, 0); } -extern int bitmap_parse(const char *buf, unsigned int buflen, +int bitmap_parse(const char *buf, unsigned int buflen, unsigned long *dst, int nbits); -extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen, +int bitmap_parse_user(const char __user *ubuf, unsigned int ulen, unsigned long *dst, int nbits); -extern int bitmap_parselist(const char *buf, unsigned long *maskp, +int bitmap_parselist(const char *buf, unsigned long *maskp, int nmaskbits); -extern int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen, +int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen, unsigned long *dst, int nbits); -extern void bitmap_remap(unsigned long *dst, const unsigned long *src, +void bitmap_remap(unsigned long *dst, const unsigned long *src, const unsigned long *old, const unsigned long *new, unsigned int nbits); -extern int bitmap_bitremap(int oldbit, +int bitmap_bitremap(int oldbit, const unsigned long *old, const unsigned long *new, int bits); -extern void bitmap_onto(unsigned long *dst, const unsigned long *orig, +void bitmap_onto(unsigned long *dst, const unsigned long *orig, const unsigned long *relmap, unsigned int bits); -extern void bitmap_fold(unsigned long *dst, const unsigned long *orig, +void bitmap_fold(unsigned long *dst, const unsigned long *orig, unsigned int sz, unsigned int nbits); -extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order); -extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order); -extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order); +int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order); +void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order); +int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order); #ifdef __BIG_ENDIAN -extern void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits); +void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits); #else #define bitmap_copy_le bitmap_copy #endif -extern unsigned int bitmap_ord_to_pos(const unsigned long *bitmap, unsigned int ord, unsigned int nbits); -extern int bitmap_print_to_pagebuf(bool list, char *buf, +unsigned int bitmap_ord_to_pos(const unsigned long *bitmap, unsigned int ord, unsigned int nbits); +int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp, int nmaskbits); #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1))) @@ -265,9 +272,9 @@ static inline void bitmap_copy_clear_tail(unsigned long *dst, * therefore conversion is not needed when copying data from/to arrays of u32. */ #if BITS_PER_LONG == 64 -extern void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, +void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, unsigned int nbits); -extern void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, +void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, unsigned int nbits); #else #define bitmap_from_arr32(bitmap, buf, nbits) \ diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 6b47f94378c5..e7e99da31349 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -194,6 +194,8 @@ void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size, struct buffer_head *__bread_gfp(struct block_device *, sector_t block, unsigned size, gfp_t gfp); void invalidate_bh_lrus(void); +void invalidate_bh_lrus_cpu(int cpu); +bool has_bh_in_lru(int cpu, void *dummy); struct buffer_head *alloc_buffer_head(gfp_t gfp_flags); void free_buffer_head(struct buffer_head * bh); void unlock_buffer(struct buffer_head *bh); @@ -406,6 +408,8 @@ static inline int inode_has_buffers(struct inode *inode) { return 0; } static inline void invalidate_inode_buffers(struct inode *inode) {} static inline int remove_inode_buffers(struct inode *inode) { return 1; } static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; } +static inline void invalidate_bh_lrus_cpu(int cpu) {} +static inline bool has_bh_in_lru(int cpu, void *dummy) { return 0; } #define buffer_heads_over_limit 0 #endif /* CONFIG_BLOCK */ diff --git a/include/linux/cma.h b/include/linux/cma.h index 217999c8a762..53fd8c3cdbd0 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -44,9 +44,9 @@ extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, unsigned int order_per_bit, const char *name, struct cma **res_cma); -extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, +extern struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int align, bool no_warn); -extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count); +extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count); extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data); #endif diff --git a/include/linux/compaction.h b/include/linux/compaction.h index ed4070ed41ef..4221888bdcd6 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -81,7 +81,6 @@ static inline unsigned long compact_gap(unsigned int order) } #ifdef CONFIG_COMPACTION -extern int sysctl_compact_memory; extern unsigned int sysctl_compaction_proactiveness; extern int sysctl_compaction_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index a370af3c08f0..4a62b3980642 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -169,6 +169,7 @@ enum cpuhp_state { CPUHP_AP_PERF_X86_RAPL_ONLINE, CPUHP_AP_PERF_X86_CQM_ONLINE, CPUHP_AP_PERF_X86_CSTATE_ONLINE, + CPUHP_AP_PERF_X86_IDXD_ONLINE, CPUHP_AP_PERF_S390_CF_ONLINE, CPUHP_AP_PERF_S390_CFD_ONLINE, CPUHP_AP_PERF_S390_SF_ONLINE, diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h index 51872e736e7b..0d53a96a3d64 100644 --- a/include/linux/dma-map-ops.h +++ b/include/linux/dma-map-ops.h @@ -22,6 +22,11 @@ struct dma_map_ops { gfp_t gfp); void (*free_pages)(struct device *dev, size_t size, struct page *vaddr, dma_addr_t dma_handle, enum dma_data_direction dir); + struct sg_table *(*alloc_noncontiguous)(struct device *dev, size_t size, + enum dma_data_direction dir, gfp_t gfp, + unsigned long attrs); + void (*free_noncontiguous)(struct device *dev, size_t size, + struct sg_table *sgt, enum dma_data_direction dir); int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t, size_t, unsigned long attrs); @@ -198,6 +203,20 @@ static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma, } #endif /* CONFIG_DMA_DECLARE_COHERENT */ +/* + * This is the actual return value from the ->alloc_noncontiguous method. + * The users of the DMA API should only care about the sg_table, but to make + * the DMA-API internal vmaping and freeing easier we stash away the page + * array as well (except for the fallback case). This can go away any time, + * e.g. when a vmap-variant that takes a scatterlist comes along. + */ +struct dma_sgt_handle { + struct sg_table sgt; + struct page **pages; +}; +#define sgt_handle(sgt) \ + container_of((sgt), struct dma_sgt_handle, sgt) + int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs); diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 2a984cb4d1e0..183e7103a66d 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -95,7 +95,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { debug_dma_mapping_error(dev, dma_addr); - if (dma_addr == DMA_MAPPING_ERROR) + if (unlikely(dma_addr == DMA_MAPPING_ERROR)) return -ENOMEM; return 0; } @@ -144,6 +144,15 @@ u64 dma_get_required_mask(struct device *dev); size_t dma_max_mapping_size(struct device *dev); bool dma_need_sync(struct device *dev, dma_addr_t dma_addr); unsigned long dma_get_merge_boundary(struct device *dev); +struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size, + enum dma_data_direction dir, gfp_t gfp, unsigned long attrs); +void dma_free_noncontiguous(struct device *dev, size_t size, + struct sg_table *sgt, enum dma_data_direction dir); +void *dma_vmap_noncontiguous(struct device *dev, size_t size, + struct sg_table *sgt); +void dma_vunmap_noncontiguous(struct device *dev, void *vaddr); +int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma, + size_t size, struct sg_table *sgt); #else /* CONFIG_HAS_DMA */ static inline dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, size_t offset, size_t size, @@ -257,12 +266,37 @@ static inline unsigned long dma_get_merge_boundary(struct device *dev) { return 0; } +static inline struct sg_table *dma_alloc_noncontiguous(struct device *dev, + size_t size, enum dma_data_direction dir, gfp_t gfp, + unsigned long attrs) +{ + return NULL; +} +static inline void dma_free_noncontiguous(struct device *dev, size_t size, + struct sg_table *sgt, enum dma_data_direction dir) +{ +} +static inline void *dma_vmap_noncontiguous(struct device *dev, size_t size, + struct sg_table *sgt) +{ + return NULL; +} +static inline void dma_vunmap_noncontiguous(struct device *dev, void *vaddr) +{ +} +static inline int dma_mmap_noncontiguous(struct device *dev, + struct vm_area_struct *vma, size_t size, struct sg_table *sgt) +{ + return -EINVAL; +} #endif /* CONFIG_HAS_DMA */ struct page *dma_alloc_pages(struct device *dev, size_t size, dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp); void dma_free_pages(struct device *dev, size_t size, struct page *page, dma_addr_t dma_handle, enum dma_data_direction dir); +int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma, + size_t size, struct page *page); static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) @@ -401,7 +435,6 @@ static inline void dma_sync_sgtable_for_device(struct device *dev, static inline void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp) { - return dma_alloc_attrs(dev, size, dma_handle, gfp, (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0); } diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index c6cc0a566ef5..5487a80617a3 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -168,7 +168,7 @@ struct f2fs_checkpoint { unsigned char alloc_type[MAX_ACTIVE_LOGS]; /* SIT and NAT version bitmap */ - unsigned char sit_nat_version_bitmap[1]; + unsigned char sit_nat_version_bitmap[]; } __packed; #define CP_CHKSUM_OFFSET 4092 /* default chksum offset in checkpoint */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 12766edee81f..acef282b97c6 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -442,7 +442,6 @@ int pagecache_write_end(struct file *, struct address_space *mapping, * @i_mmap: Tree of private and shared mappings. * @i_mmap_rwsem: Protects @i_mmap and @i_mmap_writable. * @nrpages: Number of page entries, protected by the i_pages lock. - * @nrexceptional: Shadow or DAX entries, protected by the i_pages lock. * @writeback_index: Writeback starts here. * @a_ops: Methods. * @flags: Error bits and flags (AS_*). @@ -463,7 +462,6 @@ struct address_space { struct rb_root_cached i_mmap; struct rw_semaphore i_mmap_rwsem; unsigned long nrpages; - unsigned long nrexceptional; pgoff_t writeback_index; const struct address_space_operations *a_ops; unsigned long flags; diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 26f4d907254a..8a5f6c3d7dba 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -657,7 +657,7 @@ extern int alloc_contig_range(unsigned long start, unsigned long end, extern struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask, int nid, nodemask_t *nodemask); #endif -void free_contig_range(unsigned long pfn, unsigned int nr_pages); +void free_contig_range(unsigned long pfn, unsigned long nr_pages); #ifdef CONFIG_CMA /* CMA stuff */ diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index ecf0032a0995..3a268781fcec 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -227,7 +227,7 @@ struct gpio_irq_chip { /** * @valid_mask: * - * If not %NULL holds bitmask of GPIOs which are valid to be included + * If not %NULL, holds bitmask of GPIOs which are valid to be included * in IRQ domain of the chip. */ unsigned long *valid_mask; @@ -346,7 +346,7 @@ struct gpio_irq_chip { * output. * * A gpio_chip can help platforms abstract various sources of GPIOs so - * they can all be accessed through a common programing interface. + * they can all be accessed through a common programming interface. * Example sources would be SOC controllers, FPGAs, multifunction * chips, dedicated GPIO expanders, and so on. * @@ -435,15 +435,15 @@ struct gpio_chip { /** * @valid_mask: * - * If not %NULL holds bitmask of GPIOs which are valid to be used + * If not %NULL, holds bitmask of GPIOs which are valid to be used * from the chip. */ unsigned long *valid_mask; #if defined(CONFIG_OF_GPIO) /* - * If CONFIG_OF is enabled, then all GPIO controllers described in the - * device tree automatically may have an OF translation + * If CONFIG_OF_GPIO is enabled, then all GPIO controllers described in + * the device tree automatically may have an OF translation */ /** @@ -508,7 +508,7 @@ extern int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, * for GPIOs will fail rudely. * * gpiochip_add_data() must only be called after gpiolib initialization, - * ie after core_initcall(). + * i.e. after core_initcall(). * * If gc->base is negative, this requests dynamic assignment of * a range of valid GPIOs. diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 44170f312ae7..832b49b50c7b 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -332,4 +332,11 @@ static inline void memcpy_to_page(struct page *page, size_t offset, kunmap_local(to); } +static inline void memzero_page(struct page *page, size_t offset, size_t len) +{ + char *addr = kmap_atomic(page); + memset(addr + offset, 0, len); + kunmap_atomic(addr); +} + #endif /* _LINUX_HIGHMEM_H */ diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index ba973efcd369..9626fda5efce 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -87,9 +87,6 @@ enum transparent_hugepage_flag { TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG, -#ifdef CONFIG_DEBUG_VM - TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG, -#endif }; struct kobject; diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index cccd1aab69dd..b92f25ccef58 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -11,6 +11,7 @@ #include <linux/kref.h> #include <linux/pgtable.h> #include <linux/gfp.h> +#include <linux/userfaultfd_k.h> struct ctl_table; struct user_struct; @@ -134,11 +135,14 @@ void hugetlb_show_meminfo(void); unsigned long hugetlb_total_pages(void); vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags); +#ifdef CONFIG_USERFAULTFD int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte, struct vm_area_struct *dst_vma, unsigned long dst_addr, unsigned long src_addr, + enum mcopy_atomic_mode mode, struct page **pagep); +#endif /* CONFIG_USERFAULTFD */ bool hugetlb_reserve_pages(struct inode *inode, long from, long to, struct vm_area_struct *vma, vm_flags_t vm_flags); @@ -152,7 +156,8 @@ void hugetlb_fix_reserve_counts(struct inode *inode); extern struct mutex *hugetlb_fault_mutex_table; u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx); -pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); +pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, pud_t *pud); struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage); @@ -161,7 +166,7 @@ extern struct list_head huge_boot_pages; /* arch callbacks */ -pte_t *huge_pte_alloc(struct mm_struct *mm, +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz); pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz); @@ -187,6 +192,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma, unsigned long address, unsigned long end, pgprot_t newprot); bool is_hugetlb_entry_migration(pte_t pte); +void hugetlb_unshare_all_pmds(struct vm_area_struct *vma); #else /* !CONFIG_HUGETLB_PAGE */ @@ -308,16 +314,19 @@ static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, BUG(); } +#ifdef CONFIG_USERFAULTFD static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte, struct vm_area_struct *dst_vma, unsigned long dst_addr, unsigned long src_addr, + enum mcopy_atomic_mode mode, struct page **pagep) { BUG(); return 0; } +#endif /* CONFIG_USERFAULTFD */ static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz) @@ -368,6 +377,8 @@ static inline vm_fault_t hugetlb_fault(struct mm_struct *mm, return 0; } +static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { } + #endif /* !CONFIG_HUGETLB_PAGE */ /* * hugepages at page global directory. If arch support @@ -555,6 +566,7 @@ HPAGEFLAG(Freed, freed) #define HSTATE_NAME_LEN 32 /* Defines one hugetlb page size */ struct hstate { + struct mutex resize_lock; int next_nid_to_alloc; int next_nid_to_free; unsigned int order; @@ -583,6 +595,7 @@ struct huge_bootmem_page { struct hstate *hstate; }; +int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list); struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve); struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, @@ -865,6 +878,12 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, #else /* CONFIG_HUGETLB_PAGE */ struct hstate {}; +static inline int isolate_or_dissolve_huge_page(struct page *page, + struct list_head *list) +{ + return -ENOMEM; +} + static inline struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve) @@ -1039,4 +1058,14 @@ static inline __init void hugetlb_cma_check(void) } #endif +bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr); + +#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE +/* + * ARCHes with special requirements for evicting HUGETLB backing TLB entries can + * implement this. + */ +#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) +#endif + #endif /* _LINUX_HUGETLB_H */ diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 7a1dd7b969b6..62a8e3d23829 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -256,11 +256,11 @@ struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size, irq_hw_number_t hwirq_max, int direct_max, const struct irq_domain_ops *ops, void *host_data); -struct irq_domain *irq_domain_add_simple(struct device_node *of_node, - unsigned int size, - unsigned int first_irq, - const struct irq_domain_ops *ops, - void *host_data); +struct irq_domain *irq_domain_create_simple(struct fwnode_handle *fwnode, + unsigned int size, + unsigned int first_irq, + const struct irq_domain_ops *ops, + void *host_data); struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, unsigned int size, unsigned int first_irq, @@ -325,6 +325,15 @@ static inline struct irq_domain *irq_find_host(struct device_node *node) return d; } +static inline struct irq_domain *irq_domain_add_simple(struct device_node *of_node, + unsigned int size, + unsigned int first_irq, + const struct irq_domain_ops *ops, + void *host_data) +{ + return irq_domain_create_simple(of_node_to_fwnode(of_node), size, first_irq, ops, host_data); +} + /** * irq_domain_add_linear() - Allocate and register a linear revmap irq_domain. * @of_node: pointer to interrupt controller's device tree node. diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 5904716f29ba..c193be760709 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -114,12 +114,13 @@ struct batched_lruvec_stat { }; /* - * Bitmap of shrinker::id corresponding to memcg-aware shrinkers, - * which have elements charged to this memcg. + * Bitmap and deferred work of shrinker::id corresponding to memcg-aware + * shrinkers, which have elements charged to this memcg. */ -struct memcg_shrinker_map { +struct shrinker_info { struct rcu_head rcu; - unsigned long map[]; + atomic_long_t *nr_deferred; + unsigned long *map; }; /* @@ -145,7 +146,7 @@ struct mem_cgroup_per_node { struct mem_cgroup_reclaim_iter iter; - struct memcg_shrinker_map __rcu *shrinker_map; + struct shrinker_info __rcu *shrinker_info; struct rb_node tree_node; /* RB tree node */ unsigned long usage_in_excess;/* Set to the value by which */ @@ -1610,10 +1611,10 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) return false; } -extern int memcg_expand_shrinker_maps(int new_id); - -extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg, - int nid, int shrinker_id); +int alloc_shrinker_info(struct mem_cgroup *memcg); +void free_shrinker_info(struct mem_cgroup *memcg); +void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id); +void reparent_shrinker_deferred(struct mem_cgroup *memcg); #else #define mem_cgroup_sockets_enabled 0 static inline void mem_cgroup_sk_alloc(struct sock *sk) { }; @@ -1623,8 +1624,8 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) return false; } -static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg, - int nid, int shrinker_id) +static inline void set_shrinker_bit(struct mem_cgroup *memcg, + int nid, int shrinker_id) { } #endif diff --git a/include/linux/memory.h b/include/linux/memory.h index 4da95e684e20..97e92e8b556a 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -29,6 +29,11 @@ struct memory_block { int online_type; /* for passing data to online routine */ int nid; /* NID for this memory block */ struct device dev; + /* + * Number of vmemmap pages. These pages + * lay at the beginning of the memory block. + */ + unsigned long nr_vmemmap_pages; }; int arch_get_memory_phys_device(unsigned long start_pfn); @@ -80,7 +85,8 @@ static inline int memory_notify(unsigned long val, void *v) #else extern int register_memory_notifier(struct notifier_block *nb); extern void unregister_memory_notifier(struct notifier_block *nb); -int create_memory_block_devices(unsigned long start, unsigned long size); +int create_memory_block_devices(unsigned long start, unsigned long size, + unsigned long vmemmap_pages); void remove_memory_block_devices(unsigned long start, unsigned long size); extern void memory_dev_init(void); extern int memory_notify(unsigned long val, void *v); diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 7288aa5ef73b..28f32fd00fe9 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -56,6 +56,14 @@ typedef int __bitwise mhp_t; #define MHP_MERGE_RESOURCE ((__force mhp_t)BIT(0)) /* + * We want memmap (struct page array) to be self contained. + * To do so, we will use the beginning of the hot-added range to build + * the page tables for the memmap array that describes the entire range. + * Only selected architectures support it with SPARSE_VMEMMAP. + */ +#define MHP_MEMMAP_ON_MEMORY ((__force mhp_t)BIT(1)) + +/* * Extended parameters for memory hotplug: * altmap: alternative allocator for memmap array (optional) * pgprot: page protection flags to apply to newly created page tables @@ -99,9 +107,13 @@ static inline void zone_seqlock_init(struct zone *zone) extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages); extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages); extern int add_one_highpage(struct page *page, int pfn, int bad_ppro); +extern void adjust_present_page_count(struct zone *zone, long nr_pages); /* VM interface that may be used by firmware interface */ +extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages, + struct zone *zone); +extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages); extern int online_pages(unsigned long pfn, unsigned long nr_pages, - int online_type, int nid); + struct zone *zone); extern struct zone *test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn); extern void __offline_isolated_pages(unsigned long start_pfn, @@ -359,6 +371,7 @@ extern struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_ extern int arch_create_linear_mapping(int nid, u64 start, u64 size, struct mhp_params *params); void arch_remove_linear_mapping(u64 start, u64 size); +extern bool mhp_supports_memmap_on_memory(unsigned long size); #endif /* CONFIG_MEMORY_HOTPLUG */ #endif /* __LINUX_MEMORY_HOTPLUG_H */ diff --git a/include/linux/memremap.h b/include/linux/memremap.h index f5b464daeeca..45a79da89c5f 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -17,7 +17,7 @@ struct device; * @alloc: track pages consumed, private to vmemmap_populate() */ struct vmem_altmap { - const unsigned long base_pfn; + unsigned long base_pfn; const unsigned long end_pfn; const unsigned long reserve; unsigned long free; diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 3a389633b68f..4bb4e519e3f5 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -27,6 +27,7 @@ enum migrate_reason { MR_MEMPOLICY_MBIND, MR_NUMA_MISPLACED, MR_CONTIG_RANGE, + MR_LONGTERM_PIN, MR_TYPES }; @@ -43,10 +44,7 @@ extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, unsigned long private, enum migrate_mode mode, int reason); extern struct page *alloc_migration_target(struct page *page, unsigned long private); extern int isolate_movable_page(struct page *page, isolate_mode_t mode); -extern void putback_movable_page(struct page *page); -extern void migrate_prep(void); -extern void migrate_prep_local(void); extern void migrate_page_states(struct page *newpage, struct page *page); extern void migrate_page_copy(struct page *newpage, struct page *page); extern int migrate_huge_page_move_mapping(struct address_space *mapping, @@ -66,9 +64,6 @@ static inline struct page *alloc_migration_target(struct page *page, static inline int isolate_movable_page(struct page *page, isolate_mode_t mode) { return -EBUSY; } -static inline int migrate_prep(void) { return -ENOSYS; } -static inline int migrate_prep_local(void) { return -ENOSYS; } - static inline void migrate_page_states(struct page *newpage, struct page *page) { } diff --git a/include/linux/mm.h b/include/linux/mm.h index 011f43605807..76e27ebb28a3 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -372,6 +372,13 @@ extern unsigned int kobjsize(const void *objp); # define VM_GROWSUP VM_NONE #endif +#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR +# define VM_UFFD_MINOR_BIT 37 +# define VM_UFFD_MINOR BIT(VM_UFFD_MINOR_BIT) /* UFFD minor faults */ +#else /* !CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */ +# define VM_UFFD_MINOR VM_NONE +#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */ + /* Bits set in the VMA until the stack is in its final location */ #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ) @@ -1134,6 +1141,11 @@ static inline bool is_zone_device_page(const struct page *page) } #endif +static inline bool is_zone_movable_page(const struct page *page) +{ + return page_zonenum(page) == ZONE_MOVABLE; +} + #ifdef CONFIG_DEV_PAGEMAP_OPS void free_devmap_managed_page(struct page *page); DECLARE_STATIC_KEY_FALSE(devmap_managed_key); @@ -1543,6 +1555,20 @@ static inline unsigned long page_to_section(const struct page *page) } #endif +/* MIGRATE_CMA and ZONE_MOVABLE do not allow pin pages */ +#ifdef CONFIG_MIGRATION +static inline bool is_pinnable_page(struct page *page) +{ + return !(is_zone_movable_page(page) || is_migrate_cma_page(page)) || + is_zero_pfn(page_to_pfn(page)); +} +#else +static inline bool is_pinnable_page(struct page *page) +{ + return true; +} +#endif + static inline void set_page_zone(struct page *page, enum zone_type zone) { page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 3b2205741048..917bd6c604d5 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -407,8 +407,13 @@ enum zone_type { * to increase the number of THP/huge pages. Notable special cases are: * * 1. Pinned pages: (long-term) pinning of movable pages might - * essentially turn such pages unmovable. Memory offlining might - * retry a long time. + * essentially turn such pages unmovable. Therefore, we do not allow + * pinning long-term pages in ZONE_MOVABLE. When pages are pinned and + * faulted, they come from the right zone right away. However, it is + * still possible that address space already has pages in + * ZONE_MOVABLE at the time when pages are pinned (i.e. user has + * touches that memory before pinning). In such case we migrate them + * to a different zone. When migration fails - pinning fails. * 2. memblock allocations: kernelcore/movablecore setups might create * situations where ZONE_MOVABLE contains unmovable allocations * after boot. Memory offlining and allocations fail early. @@ -427,6 +432,15 @@ enum zone_type { * techniques might use alloc_contig_range() to hide previously * exposed pages from the buddy again (e.g., to implement some sort * of memory unplug in virtio-mem). + * 6. ZERO_PAGE(0), kernelcore/movablecore setups might create + * situations where ZERO_PAGE(0) which is allocated differently + * on different platforms may end up in a movable zone. ZERO_PAGE(0) + * cannot be migrated. + * 7. Memory-hotplug: when using memmap_on_memory and onlining the + * memory to the MOVABLE zone, the vmemmap pages are also placed in + * such zone. Such pages cannot be really moved around as they are + * self-stored in the range, but they are treated as movable when + * the range they describe is about to be offlined. * * In general, no unmovable allocations that degrade memory offlining * should end up in ZONE_MOVABLE. Allocators (like alloc_contig_range()) @@ -1383,10 +1397,8 @@ static inline int online_section_nr(unsigned long nr) #ifdef CONFIG_MEMORY_HOTPLUG void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn); -#ifdef CONFIG_MEMORY_HOTREMOVE void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn); #endif -#endif static inline struct mem_section *__pfn_to_section(unsigned long pfn) { diff --git a/include/linux/msi.h b/include/linux/msi.h index aef35fd1cf11..6aff469e511d 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -240,8 +240,7 @@ void pci_msi_unmask_irq(struct irq_data *data); /* * The arch hooks to setup up msi irqs. Default functions are implemented * as weak symbols so that they /can/ be overriden by architecture specific - * code if needed. These hooks must be enabled by the architecture or by - * drivers which depend on them via msi_controller based MSI handling. + * code if needed. These hooks can only be enabled by the architecture. * * If CONFIG_PCI_MSI_ARCH_FALLBACKS is not selected they are replaced by * stubs with warnings. @@ -251,7 +250,6 @@ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc); void arch_teardown_msi_irq(unsigned int irq); int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); void arch_teardown_msi_irqs(struct pci_dev *dev); -void default_teardown_msi_irqs(struct pci_dev *dev); #else static inline int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) { @@ -272,19 +270,6 @@ static inline void arch_teardown_msi_irqs(struct pci_dev *dev) void arch_restore_msi_irqs(struct pci_dev *dev); void default_restore_msi_irqs(struct pci_dev *dev); -struct msi_controller { - struct module *owner; - struct device *dev; - struct device_node *of_node; - struct list_head list; - - int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev, - struct msi_desc *desc); - int (*setup_irqs)(struct msi_controller *chip, struct pci_dev *dev, - int nvec, int type); - void (*teardown_irq)(struct msi_controller *chip, unsigned int irq); -}; - #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN #include <linux/irqhandler.h> diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 469fa7ffcf96..a4bd41128bf3 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -18,6 +18,11 @@ struct pagevec; +static inline bool mapping_empty(struct address_space *mapping) +{ + return xa_empty(&mapping->i_pages); +} + /* * Bits in mapping->flags. */ diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h index 65d3d83015c3..fbdadd4d8377 100644 --- a/include/linux/pci-ecam.h +++ b/include/linux/pci-ecam.h @@ -85,6 +85,7 @@ extern const struct pci_ecam_ops pci_thunder_ecam_ops; /* Cavium ThunderX 1.x */ extern const struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 */ extern const struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */ extern const struct pci_ecam_ops al_pcie_ops; /* Amazon Annapurna Labs PCIe */ +extern const struct pci_ecam_ops tegra194_pcie_ops; /* Tegra194 PCIe */ #endif #if IS_ENABLED(CONFIG_PCI_HOST_COMMON) diff --git a/include/linux/pci.h b/include/linux/pci.h index 0fa104ee13d9..c20211e59a57 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -458,7 +458,6 @@ struct pci_dev { u32 saved_config_space[16]; /* Config space saved at suspend time */ struct hlist_head saved_cap_space; - struct bin_attribute *rom_attr; /* Attribute descriptor for sysfs ROM entry */ int rom_attr_enabled; /* Display of ROM attribute enabled? */ struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */ struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */ @@ -540,7 +539,6 @@ struct pci_host_bridge { int (*map_irq)(const struct pci_dev *, u8, u8); void (*release_fn)(struct pci_host_bridge *); void *release_data; - struct msi_controller *msi; unsigned int ignore_reset_delay:1; /* For entire hierarchy */ unsigned int no_ext_tags:1; /* No Extended Tags */ unsigned int native_aer:1; /* OS may use PCIe AER */ @@ -551,6 +549,7 @@ struct pci_host_bridge { unsigned int native_dpc:1; /* OS may use PCIe DPC */ unsigned int preserve_config:1; /* Preserve FW resource setup */ unsigned int size_windows:1; /* Enable root bus sizing */ + unsigned int msi_domain:1; /* Bridge wants MSI domain */ /* Resource alignment requirements */ resource_size_t (*align_resource)(struct pci_dev *dev, @@ -621,7 +620,6 @@ struct pci_bus { struct resource busn_res; /* Bus numbers routed to this bus */ struct pci_ops *ops; /* Configuration access functions */ - struct msi_controller *msi; /* MSI controller */ void *sysdata; /* Hook for sys-specific extension */ struct proc_dir_entry *procdir; /* Directory entry in /proc/bus/pci */ @@ -1085,6 +1083,7 @@ u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap); u16 pci_find_ext_capability(struct pci_dev *dev, int cap); u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 pos, int cap); struct pci_bus *pci_find_next_bus(const struct pci_bus *from); +u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap); u64 pci_get_dsn(struct pci_dev *dev); @@ -1209,6 +1208,7 @@ int __must_check pci_set_mwi(struct pci_dev *dev); int __must_check pcim_set_mwi(struct pci_dev *dev); int pci_try_set_mwi(struct pci_dev *dev); void pci_clear_mwi(struct pci_dev *dev); +void pci_disable_parity(struct pci_dev *dev); void pci_intx(struct pci_dev *dev, int enable); bool pci_check_and_mask_intx(struct pci_dev *dev); bool pci_check_and_unmask_intx(struct pci_dev *dev); @@ -1310,7 +1310,6 @@ void pci_unlock_rescan_remove(void); /* Vital Product Data routines */ ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf); ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf); -int pci_set_vpd_size(struct pci_dev *dev, size_t len); /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */ resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx); @@ -2319,14 +2318,13 @@ static inline u8 pci_vpd_info_field_size(const u8 *info_field) /** * pci_vpd_find_tag - Locates the Resource Data Type tag provided * @buf: Pointer to buffered vpd data - * @off: The offset into the buffer at which to begin the search * @len: The length of the vpd buffer * @rdt: The Resource Data Type to search for * * Returns the index where the Resource Data Type was found or * -ENOENT otherwise. */ -int pci_vpd_find_tag(const u8 *buf, unsigned int off, unsigned int len, u8 rdt); +int pci_vpd_find_tag(const u8 *buf, unsigned int len, u8 rdt); /** * pci_vpd_find_info_keyword - Locates an information field keyword in the VPD diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 5e772392a379..2194a9cd885c 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -1111,6 +1111,7 @@ extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, extern void untrack_pfn_moved(struct vm_area_struct *vma); #endif +#ifdef CONFIG_MMU #ifdef __HAVE_COLOR_ZERO_PAGE static inline int is_zero_pfn(unsigned long pfn) { @@ -1134,6 +1135,17 @@ static inline unsigned long my_zero_pfn(unsigned long addr) return zero_pfn; } #endif +#else +static inline int is_zero_pfn(unsigned long pfn) +{ + return 0; +} + +static inline unsigned long my_zero_pfn(unsigned long addr) +{ + return 0; +} +#endif /* CONFIG_MMU */ #ifdef CONFIG_MMU diff --git a/include/linux/pwm.h b/include/linux/pwm.h index e4d84d4db293..5bb90af4997e 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -91,6 +91,11 @@ struct pwm_device { * pwm_get_state() - retrieve the current PWM state * @pwm: PWM device * @state: state to fill with the current PWM state + * + * The returned PWM state represents the state that was applied by a previous call to + * pwm_apply_state(). Drivers may have to slightly tweak that state before programming it to + * hardware. If pwm_apply_state() was never called, this returns either the current hardware + * state (if supported) or the default settings. */ static inline void pwm_get_state(const struct pwm_device *pwm, struct pwm_state *state) @@ -392,8 +397,6 @@ int pwm_capture(struct pwm_device *pwm, struct pwm_capture *result, int pwm_set_chip_data(struct pwm_device *pwm, void *data); void *pwm_get_chip_data(struct pwm_device *pwm); -int pwmchip_add_with_polarity(struct pwm_chip *chip, - enum pwm_polarity polarity); int pwmchip_add(struct pwm_chip *chip); int pwmchip_remove(struct pwm_chip *chip); struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip, diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index f28ee75d1005..8b795b544f75 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -315,6 +315,7 @@ struct rproc; /** * struct rproc_mem_entry - memory entry descriptor * @va: virtual address + * @is_iomem: io memory * @dma: dma address * @len: length, in bytes * @da: device address @@ -329,6 +330,7 @@ struct rproc; */ struct rproc_mem_entry { void *va; + bool is_iomem; dma_addr_t dma; size_t len; u32 da; @@ -361,6 +363,7 @@ enum rsc_handling_status { * @start: power on the device and boot it * @stop: power off the device * @attach: attach to a device that his already powered up + * @detach: detach from a device, leaving it powered up * @kick: kick a virtqueue (virtqueue id given as a parameter) * @da_to_va: optional platform hook to perform address translations * @parse_fw: parse firmware to extract information (e.g. resource table) @@ -368,7 +371,9 @@ enum rsc_handling_status { * RSC_HANDLED if resource was handled, RSC_IGNORED if not handled and a * negative value on error * @load_rsc_table: load resource table from firmware image - * @find_loaded_rsc_table: find the loaded resouce table + * @find_loaded_rsc_table: find the loaded resource table from firmware image + * @get_loaded_rsc_table: get resource table installed in memory + * by external entity * @load: load firmware to memory, where the remote processor * expects to find it * @sanity_check: sanity check the fw image @@ -383,13 +388,16 @@ struct rproc_ops { int (*start)(struct rproc *rproc); int (*stop)(struct rproc *rproc); int (*attach)(struct rproc *rproc); + int (*detach)(struct rproc *rproc); void (*kick)(struct rproc *rproc, int vqid); - void * (*da_to_va)(struct rproc *rproc, u64 da, size_t len); + void * (*da_to_va)(struct rproc *rproc, u64 da, size_t len, bool *is_iomem); int (*parse_fw)(struct rproc *rproc, const struct firmware *fw); int (*handle_rsc)(struct rproc *rproc, u32 rsc_type, void *rsc, int offset, int avail); struct resource_table *(*find_loaded_rsc_table)( struct rproc *rproc, const struct firmware *fw); + struct resource_table *(*get_loaded_rsc_table)( + struct rproc *rproc, size_t *size); int (*load)(struct rproc *rproc, const struct firmware *fw); int (*sanity_check)(struct rproc *rproc, const struct firmware *fw); u64 (*get_boot_addr)(struct rproc *rproc, const struct firmware *fw); @@ -405,6 +413,8 @@ struct rproc_ops { * @RPROC_RUNNING: device is up and running * @RPROC_CRASHED: device has crashed; need to start recovery * @RPROC_DELETED: device is deleted + * @RPROC_ATTACHED: device has been booted by another entity and the core + * has attached to it * @RPROC_DETACHED: device has been booted by another entity and waiting * for the core to attach to it * @RPROC_LAST: just keep this one at the end @@ -421,8 +431,9 @@ enum rproc_state { RPROC_RUNNING = 2, RPROC_CRASHED = 3, RPROC_DELETED = 4, - RPROC_DETACHED = 5, - RPROC_LAST = 6, + RPROC_ATTACHED = 5, + RPROC_DETACHED = 6, + RPROC_LAST = 7, }; /** @@ -505,11 +516,12 @@ struct rproc_dump_segment { * @recovery_disabled: flag that state if recovery was disabled * @max_notifyid: largest allocated notify id. * @table_ptr: pointer to the resource table in effect + * @clean_table: copy of the resource table without modifications. Used + * when a remote processor is attached or detached from the core * @cached_table: copy of the resource table * @table_sz: size of @cached_table * @has_iommu: flag to indicate if remote processor is behind an MMU * @auto_boot: flag to indicate if remote processor should be auto-started - * @autonomous: true if an external entity has booted the remote processor * @dump_segments: list of segments in the firmware * @nb_vdev: number of vdev currently handled by rproc * @char_dev: character device of the rproc @@ -542,11 +554,11 @@ struct rproc { bool recovery_disabled; int max_notifyid; struct resource_table *table_ptr; + struct resource_table *clean_table; struct resource_table *cached_table; size_t table_sz; bool has_iommu; bool auto_boot; - bool autonomous; struct list_head dump_segments; int nb_vdev; u8 elf_class; @@ -655,6 +667,7 @@ rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, size_t len, int rproc_boot(struct rproc *rproc); void rproc_shutdown(struct rproc *rproc); +int rproc_detach(struct rproc *rproc); int rproc_set_firmware(struct rproc *rproc, const char *fw_name); void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type); void rproc_coredump_using_sections(struct rproc *rproc); diff --git a/include/linux/reset.h b/include/linux/reset.h index 46e6372cb431..db0e6115a2f6 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h @@ -76,6 +76,11 @@ static inline int reset_control_reset(struct reset_control *rstc) return 0; } +static inline int reset_control_rearm(struct reset_control *rstc) +{ + return 0; +} + static inline int reset_control_assert(struct reset_control *rstc) { return 0; diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h index a5db828b2420..d97dcd049f18 100644 --- a/include/linux/rpmsg.h +++ b/include/linux/rpmsg.h @@ -18,8 +18,7 @@ #include <linux/mutex.h> #include <linux/poll.h> #include <linux/rpmsg/byteorder.h> - -#define RPMSG_ADDR_ANY 0xFFFFFFFF +#include <uapi/linux/rpmsg.h> struct rpmsg_device; struct rpmsg_endpoint; diff --git a/include/linux/sched.h b/include/linux/sched.h index 9c25c8e67030..d2c881384517 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1583,7 +1583,7 @@ extern struct pid *cad_pid; #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */ #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ -#define PF_MEMALLOC_NOCMA 0x10000000 /* All allocation request will have _GFP_MOVABLE cleared */ +#define PF_MEMALLOC_PIN 0x10000000 /* Allocation context constrained to zones which allow long term pinning. */ #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ #define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */ diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index 90b2a0bce11c..e24b1fe348e3 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -151,12 +151,13 @@ static inline bool in_vfork(struct task_struct *tsk) * Applies per-task gfp context to the given allocation flags. * PF_MEMALLOC_NOIO implies GFP_NOIO * PF_MEMALLOC_NOFS implies GFP_NOFS + * PF_MEMALLOC_PIN implies !GFP_MOVABLE */ static inline gfp_t current_gfp_context(gfp_t flags) { unsigned int pflags = READ_ONCE(current->flags); - if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS))) { + if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS | PF_MEMALLOC_PIN))) { /* * NOIO implies both NOIO and NOFS and it is a weaker context * so always make sure it makes precedence @@ -165,6 +166,9 @@ static inline gfp_t current_gfp_context(gfp_t flags) flags &= ~(__GFP_IO | __GFP_FS); else if (pflags & PF_MEMALLOC_NOFS) flags &= ~__GFP_FS; + + if (pflags & PF_MEMALLOC_PIN) + flags &= ~__GFP_MOVABLE; } return flags; } @@ -271,29 +275,18 @@ static inline void memalloc_noreclaim_restore(unsigned int flags) current->flags = (current->flags & ~PF_MEMALLOC) | flags; } -#ifdef CONFIG_CMA -static inline unsigned int memalloc_nocma_save(void) +static inline unsigned int memalloc_pin_save(void) { - unsigned int flags = current->flags & PF_MEMALLOC_NOCMA; + unsigned int flags = current->flags & PF_MEMALLOC_PIN; - current->flags |= PF_MEMALLOC_NOCMA; + current->flags |= PF_MEMALLOC_PIN; return flags; } -static inline void memalloc_nocma_restore(unsigned int flags) -{ - current->flags = (current->flags & ~PF_MEMALLOC_NOCMA) | flags; -} -#else -static inline unsigned int memalloc_nocma_save(void) -{ - return 0; -} - -static inline void memalloc_nocma_restore(unsigned int flags) +static inline void memalloc_pin_restore(unsigned int flags) { + current->flags = (current->flags & ~PF_MEMALLOC_PIN) | flags; } -#endif #ifdef CONFIG_MEMCG DECLARE_PER_CPU(struct mem_cgroup *, int_active_memcg); diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h index 0f80123650e2..1eac79ce57d4 100644 --- a/include/linux/shrinker.h +++ b/include/linux/shrinker.h @@ -79,13 +79,14 @@ struct shrinker { #define DEFAULT_SEEKS 2 /* A good number if you don't know better. */ /* Flags */ -#define SHRINKER_NUMA_AWARE (1 << 0) -#define SHRINKER_MEMCG_AWARE (1 << 1) +#define SHRINKER_REGISTERED (1 << 0) +#define SHRINKER_NUMA_AWARE (1 << 1) +#define SHRINKER_MEMCG_AWARE (1 << 2) /* * It just makes sense when the shrinker is also MEMCG_AWARE for now, * non-MEMCG_AWARE shrinker should not have this flag set. */ -#define SHRINKER_NONSLAB (1 << 2) +#define SHRINKER_NONSLAB (1 << 3) extern int prealloc_shrinker(struct shrinker *shrinker); extern void register_shrinker_prepared(struct shrinker *shrinker); diff --git a/include/linux/swap.h b/include/linux/swap.h index 4cc6ec3bf0ab..f69e0f67651d 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -12,6 +12,7 @@ #include <linux/fs.h> #include <linux/atomic.h> #include <linux/page-flags.h> +#include <uapi/linux/mempolicy.h> #include <asm/page.h> struct notifier_block; @@ -339,6 +340,20 @@ extern void lru_note_cost(struct lruvec *lruvec, bool file, extern void lru_note_cost_page(struct page *); extern void lru_cache_add(struct page *); extern void mark_page_accessed(struct page *); + +extern atomic_t lru_disable_count; + +static inline bool lru_cache_disabled(void) +{ + return atomic_read(&lru_disable_count); +} + +static inline void lru_cache_enable(void) +{ + atomic_dec(&lru_disable_count); +} + +extern void lru_cache_disable(void); extern void lru_add_drain(void); extern void lru_add_drain_cpu(int cpu); extern void lru_add_drain_cpu_zone(struct zone *zone); @@ -378,6 +393,12 @@ extern int sysctl_min_slab_ratio; #define node_reclaim_mode 0 #endif +static inline bool node_reclaim_enabled(void) +{ + /* Is any node_reclaim_mode bit set? */ + return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP); +} + extern void check_move_unevictable_pages(struct pagevec *pvec); extern int kswapd_run(int nid); diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 5857a937c637..216854a5e513 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -6,6 +6,7 @@ #include <linux/init.h> #include <linux/types.h> #include <linux/limits.h> +#include <linux/spinlock.h> struct device; struct page; @@ -36,20 +37,11 @@ enum swiotlb_force { extern void swiotlb_init(int verbose); int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose); -extern unsigned long swiotlb_nr_tbl(void); unsigned long swiotlb_size_or_default(void); extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs); extern int swiotlb_late_init_with_default_size(size_t default_size); extern void __init swiotlb_update_mem_attributes(void); -/* - * Enumeration for sync targets - */ -enum dma_sync_target { - SYNC_FOR_CPU = 0, - SYNC_FOR_DEVICE = 1, -}; - phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys, size_t mapping_size, size_t alloc_size, enum dma_data_direction dir, unsigned long attrs); @@ -57,32 +49,70 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys, extern void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, size_t mapping_size, - size_t alloc_size, enum dma_data_direction dir, unsigned long attrs); -extern void swiotlb_tbl_sync_single(struct device *hwdev, - phys_addr_t tlb_addr, - size_t size, enum dma_data_direction dir, - enum dma_sync_target target); - +void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr, + size_t size, enum dma_data_direction dir); +void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr, + size_t size, enum dma_data_direction dir); dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys, size_t size, enum dma_data_direction dir, unsigned long attrs); #ifdef CONFIG_SWIOTLB extern enum swiotlb_force swiotlb_force; -extern phys_addr_t io_tlb_start, io_tlb_end; + +/** + * struct io_tlb_mem - IO TLB Memory Pool Descriptor + * + * @start: The start address of the swiotlb memory pool. Used to do a quick + * range check to see if the memory was in fact allocated by this + * API. + * @end: The end address of the swiotlb memory pool. Used to do a quick + * range check to see if the memory was in fact allocated by this + * API. + * @nslabs: The number of IO TLB blocks (in groups of 64) between @start and + * @end. This is command line adjustable via setup_io_tlb_npages. + * @used: The number of used IO TLB block. + * @list: The free list describing the number of free entries available + * from each index. + * @index: The index to start searching in the next round. + * @orig_addr: The original address corresponding to a mapped entry. + * @alloc_size: Size of the allocated buffer. + * @lock: The lock to protect the above data structures in the map and + * unmap calls. + * @debugfs: The dentry to debugfs. + * @late_alloc: %true if allocated using the page allocator + */ +struct io_tlb_mem { + phys_addr_t start; + phys_addr_t end; + unsigned long nslabs; + unsigned long used; + unsigned int index; + spinlock_t lock; + struct dentry *debugfs; + bool late_alloc; + struct io_tlb_slot { + phys_addr_t orig_addr; + size_t alloc_size; + unsigned int list; + } slots[]; +}; +extern struct io_tlb_mem *io_tlb_default_mem; static inline bool is_swiotlb_buffer(phys_addr_t paddr) { - return paddr >= io_tlb_start && paddr < io_tlb_end; + struct io_tlb_mem *mem = io_tlb_default_mem; + + return mem && paddr >= mem->start && paddr < mem->end; } void __init swiotlb_exit(void); unsigned int swiotlb_max_segment(void); size_t swiotlb_max_mapping_size(struct device *dev); bool is_swiotlb_active(void); -void __init swiotlb_adjust_size(unsigned long new_size); +void __init swiotlb_adjust_size(unsigned long size); #else #define swiotlb_force SWIOTLB_NO_FORCE static inline bool is_swiotlb_buffer(phys_addr_t paddr) @@ -106,7 +136,7 @@ static inline bool is_swiotlb_active(void) return false; } -static inline void swiotlb_adjust_size(unsigned long new_size) +static inline void swiotlb_adjust_size(unsigned long size) { } #endif /* CONFIG_SWIOTLB */ diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 6ac7bb1d2b1f..d296f3b88fb9 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -91,7 +91,7 @@ struct thermal_cooling_device_ops { struct thermal_cooling_device { int id; - char type[THERMAL_NAME_LENGTH]; + char *type; struct device device; struct device_node *np; void *devdata; @@ -390,7 +390,6 @@ int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp); int thermal_zone_get_slope(struct thermal_zone_device *tz); int thermal_zone_get_offset(struct thermal_zone_device *tz); -void thermal_notify_framework(struct thermal_zone_device *, int); int thermal_zone_device_enable(struct thermal_zone_device *tz); int thermal_zone_device_disable(struct thermal_zone_device *tz); void thermal_zone_device_critical(struct thermal_zone_device *tz); @@ -436,10 +435,6 @@ static inline int thermal_zone_get_offset( struct thermal_zone_device *tz) { return -ENODEV; } -static inline void thermal_notify_framework(struct thermal_zone_device *tz, - int trip) -{ } - static inline int thermal_zone_device_enable(struct thermal_zone_device *tz) { return -ENODEV; } diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index a8e5f3ea9bb2..794d1538b8ba 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -17,6 +17,9 @@ #include <linux/mm.h> #include <asm-generic/pgtable_uffd.h> +/* The set of all possible UFFD-related VM flags. */ +#define __VM_UFFD_FLAGS (VM_UFFD_MISSING | VM_UFFD_WP | VM_UFFD_MINOR) + /* * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining * new flags, since they might collide with O_* ones. We want @@ -34,6 +37,22 @@ extern int sysctl_unprivileged_userfaultfd; extern vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason); +/* + * The mode of operation for __mcopy_atomic and its helpers. + * + * This is almost an implementation detail (mcopy_atomic below doesn't take this + * as a parameter), but it's exposed here because memory-kind-specific + * implementations (e.g. hugetlbfs) need to know the mode of operation. + */ +enum mcopy_atomic_mode { + /* A normal copy_from_user into the destination range. */ + MCOPY_ATOMIC_NORMAL, + /* Don't copy; map the destination range to the zero page. */ + MCOPY_ATOMIC_ZEROPAGE, + /* Just install pte(s) with the existing page(s) in the page cache. */ + MCOPY_ATOMIC_CONTINUE, +}; + extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, unsigned long src_start, unsigned long len, bool *mmap_changing, __u64 mode); @@ -41,6 +60,8 @@ extern ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long dst_start, unsigned long len, bool *mmap_changing); +extern ssize_t mcopy_continue(struct mm_struct *dst_mm, unsigned long dst_start, + unsigned long len, bool *mmap_changing); extern int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start, unsigned long len, bool enable_wp, bool *mmap_changing); @@ -52,6 +73,22 @@ static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma, return vma->vm_userfaultfd_ctx.ctx == vm_ctx.ctx; } +/* + * Never enable huge pmd sharing on some uffd registered vmas: + * + * - VM_UFFD_WP VMAs, because write protect information is per pgtable entry. + * + * - VM_UFFD_MINOR VMAs, because otherwise we would never get minor faults for + * VMAs which share huge pmds. (If you have two mappings to the same + * underlying pages, and fault in the non-UFFD-registered one with a write, + * with huge pmd sharing this would *also* setup the second UFFD-registered + * mapping, and we'd not get minor faults.) + */ +static inline bool uffd_disable_huge_pmd_share(struct vm_area_struct *vma) +{ + return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR); +} + static inline bool userfaultfd_missing(struct vm_area_struct *vma) { return vma->vm_flags & VM_UFFD_MISSING; @@ -62,6 +99,11 @@ static inline bool userfaultfd_wp(struct vm_area_struct *vma) return vma->vm_flags & VM_UFFD_WP; } +static inline bool userfaultfd_minor(struct vm_area_struct *vma) +{ + return vma->vm_flags & VM_UFFD_MINOR; +} + static inline bool userfaultfd_pte_wp(struct vm_area_struct *vma, pte_t pte) { @@ -76,7 +118,7 @@ static inline bool userfaultfd_huge_pmd_wp(struct vm_area_struct *vma, static inline bool userfaultfd_armed(struct vm_area_struct *vma) { - return vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP); + return vma->vm_flags & __VM_UFFD_FLAGS; } extern int dup_userfaultfd(struct vm_area_struct *, struct list_head *); @@ -123,6 +165,11 @@ static inline bool userfaultfd_wp(struct vm_area_struct *vma) return false; } +static inline bool userfaultfd_minor(struct vm_area_struct *vma) +{ + return false; +} + static inline bool userfaultfd_pte_wp(struct vm_area_struct *vma, pte_t pte) { diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index 15fa085fab05..f311d227aa1b 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -8,7 +8,7 @@ #include <linux/vhost_iotlb.h> /** - * vDPA callback definition. + * struct vdpa_calllback - vDPA callback definition. * @callback: interrupt callback function * @private: the data passed to the callback function */ @@ -18,7 +18,7 @@ struct vdpa_callback { }; /** - * vDPA notification area + * struct vdpa_notification_area - vDPA notification area * @addr: base address of the notification area * @size: size of the notification area */ @@ -28,7 +28,7 @@ struct vdpa_notification_area { }; /** - * vDPA vq_state definition + * struct vdpa_vq_state - vDPA vq_state definition * @avail_index: available index */ struct vdpa_vq_state { @@ -38,7 +38,7 @@ struct vdpa_vq_state { struct vdpa_mgmt_dev; /** - * vDPA device - representation of a vDPA device + * struct vdpa_device - representation of a vDPA device * @dev: underlying device * @dma_dev: the actual device that is performing DMA * @config: the configuration ops for this device. @@ -59,7 +59,7 @@ struct vdpa_device { }; /** - * vDPA IOVA range - the IOVA range support by the device + * struct vdpa_iova_range - the IOVA range support by the device * @first: start of the IOVA range * @last: end of the IOVA range */ @@ -69,7 +69,7 @@ struct vdpa_iova_range { }; /** - * vDPA_config_ops - operations for configuring a vDPA device. + * struct vdpa_config_ops - operations for configuring a vDPA device. * Note: vDPA device drivers are required to implement all of the * operations unless it is mentioned to be optional in the following * list. @@ -150,6 +150,9 @@ struct vdpa_iova_range { * @set_status: Set the device status * @vdev: vdpa device * @status: virtio device status + * @get_config_size: Get the size of the configuration space + * @vdev: vdpa device + * Returns size_t: configuration size * @get_config: Read from device specific configuration space * @vdev: vdpa device * @offset: offset from the beginning of @@ -231,6 +234,7 @@ struct vdpa_config_ops { u32 (*get_vendor_id)(struct vdpa_device *vdev); u8 (*get_status)(struct vdpa_device *vdev); void (*set_status)(struct vdpa_device *vdev, u8 status); + size_t (*get_config_size)(struct vdpa_device *vdev); void (*get_config)(struct vdpa_device *vdev, unsigned int offset, void *buf, unsigned int len); void (*set_config)(struct vdpa_device *vdev, unsigned int offset, @@ -267,7 +271,7 @@ int _vdpa_register_device(struct vdpa_device *vdev, int nvqs); void _vdpa_unregister_device(struct vdpa_device *vdev); /** - * vdpa_driver - operations for a vDPA driver + * struct vdpa_driver - operations for a vDPA driver * @driver: underlying device driver * @probe: the function to call when a device is found. Returns 0 or -errno. * @remove: the function to call when a device is removed. @@ -344,18 +348,18 @@ static inline void vdpa_get_config(struct vdpa_device *vdev, unsigned offset, } /** - * vdpa_mgmtdev_ops - vdpa device ops - * @dev_add: Add a vdpa device using alloc and register - * @mdev: parent device to use for device addition - * @name: name of the new vdpa device - * Driver need to add a new device using _vdpa_register_device() - * after fully initializing the vdpa device. Driver must return 0 - * on success or appropriate error code. - * @dev_del: Remove a vdpa device using unregister - * @mdev: parent device to use for device removal - * @dev: vdpa device to remove - * Driver need to remove the specified device by calling - * _vdpa_unregister_device(). + * struct vdpa_mgmtdev_ops - vdpa device ops + * @dev_add: Add a vdpa device using alloc and register + * @mdev: parent device to use for device addition + * @name: name of the new vdpa device + * Driver need to add a new device using _vdpa_register_device() + * after fully initializing the vdpa device. Driver must return 0 + * on success or appropriate error code. + * @dev_del: Remove a vdpa device using unregister + * @mdev: parent device to use for device removal + * @dev: vdpa device to remove + * Driver need to remove the specified device by calling + * _vdpa_unregister_device(). */ struct vdpa_mgmtdev_ops { int (*dev_add)(struct vdpa_mgmt_dev *mdev, const char *name); diff --git a/include/linux/virtio_pci_modern.h b/include/linux/virtio_pci_modern.h index f26acbeec965..6a95b58fd0f4 100644 --- a/include/linux/virtio_pci_modern.h +++ b/include/linux/virtio_pci_modern.h @@ -13,6 +13,8 @@ struct virtio_pci_modern_device { void __iomem *device; /* Base of vq notifications (non-legacy mode). */ void __iomem *notify_base; + /* Physical base of vq notifications */ + resource_size_t notify_pa; /* Where to read and clear interrupt */ u8 __iomem *isr; @@ -99,13 +101,8 @@ void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev, u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev, u16 idx); u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev); -u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev, - u16 idx); -void __iomem *vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off, - size_t minlen, - u32 align, - u32 start, u32 size, - size_t *len); +void __iomem * vp_modern_map_vq_notify(struct virtio_pci_modern_device *mdev, + u16 index, resource_size_t *pa); int vp_modern_probe(struct virtio_pci_modern_device *mdev); void vp_modern_remove(struct virtio_pci_modern_device *mdev); #endif diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 18e75974d4e3..ae0dd1948c2b 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -71,6 +71,10 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, #ifdef CONFIG_HUGETLB_PAGE HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL, #endif +#ifdef CONFIG_CMA + CMA_ALLOC_SUCCESS, + CMA_ALLOC_FAIL, +#endif UNEVICTABLE_PGCULLED, /* culled to noreclaim list */ UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */ UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */ @@ -121,6 +125,10 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, SWAP_RA, SWAP_RA_HIT, #endif +#ifdef CONFIG_X86 + DIRECT_MAP_LEVEL2_SPLIT, + DIRECT_MAP_LEVEL3_SPLIT, +#endif NR_VM_EVENT_ITEMS }; diff --git a/include/linux/vringh.h b/include/linux/vringh.h index 59bd50f99291..84db7b8f912f 100644 --- a/include/linux/vringh.h +++ b/include/linux/vringh.h @@ -46,6 +46,9 @@ struct vringh { /* IOTLB for this vring */ struct vhost_iotlb *iotlb; + /* spinlock to synchronize IOTLB accesses */ + spinlock_t *iotlb_lock; + /* The function to call to notify the guest about added buffers */ void (*notify)(struct vringh *); }; @@ -196,6 +199,19 @@ static inline void vringh_kiov_cleanup(struct vringh_kiov *kiov) kiov->iov = NULL; } +static inline size_t vringh_kiov_length(struct vringh_kiov *kiov) +{ + size_t len = 0; + int i; + + for (i = kiov->i; i < kiov->used; i++) + len += kiov->iov[i].iov_len; + + return len; +} + +void vringh_kiov_advance(struct vringh_kiov *kiov, size_t len); + int vringh_getdesc_kern(struct vringh *vrh, struct vringh_kiov *riov, struct vringh_kiov *wiov, @@ -258,7 +274,8 @@ static inline __virtio64 cpu_to_vringh64(const struct vringh *vrh, u64 val) #if IS_REACHABLE(CONFIG_VHOST_IOTLB) -void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb); +void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb, + spinlock_t *iotlb_lock); int vringh_init_iotlb(struct vringh *vrh, u64 features, unsigned int num, bool weak_barriers, diff --git a/include/trace/events/cma.h b/include/trace/events/cma.h index 5017a8829270..c3d354702cb0 100644 --- a/include/trace/events/cma.h +++ b/include/trace/events/cma.h @@ -8,28 +8,31 @@ #include <linux/types.h> #include <linux/tracepoint.h> -TRACE_EVENT(cma_alloc, +DECLARE_EVENT_CLASS(cma_alloc_class, - TP_PROTO(unsigned long pfn, const struct page *page, - unsigned int count, unsigned int align), + TP_PROTO(const char *name, unsigned long pfn, const struct page *page, + unsigned long count, unsigned int align), - TP_ARGS(pfn, page, count, align), + TP_ARGS(name, pfn, page, count, align), TP_STRUCT__entry( + __string(name, name) __field(unsigned long, pfn) __field(const struct page *, page) - __field(unsigned int, count) + __field(unsigned long, count) __field(unsigned int, align) ), TP_fast_assign( + __assign_str(name, name); __entry->pfn = pfn; __entry->page = page; __entry->count = count; __entry->align = align; ), - TP_printk("pfn=%lx page=%p count=%u align=%u", + TP_printk("name=%s pfn=%lx page=%p count=%lu align=%u", + __get_str(name), __entry->pfn, __entry->page, __entry->count, @@ -38,29 +41,72 @@ TRACE_EVENT(cma_alloc, TRACE_EVENT(cma_release, - TP_PROTO(unsigned long pfn, const struct page *page, - unsigned int count), + TP_PROTO(const char *name, unsigned long pfn, const struct page *page, + unsigned long count), - TP_ARGS(pfn, page, count), + TP_ARGS(name, pfn, page, count), TP_STRUCT__entry( + __string(name, name) __field(unsigned long, pfn) __field(const struct page *, page) - __field(unsigned int, count) + __field(unsigned long, count) ), TP_fast_assign( + __assign_str(name, name); __entry->pfn = pfn; __entry->page = page; __entry->count = count; ), - TP_printk("pfn=%lx page=%p count=%u", + TP_printk("name=%s pfn=%lx page=%p count=%lu", + __get_str(name), __entry->pfn, __entry->page, __entry->count) ); +TRACE_EVENT(cma_alloc_start, + + TP_PROTO(const char *name, unsigned long count, unsigned int align), + + TP_ARGS(name, count, align), + + TP_STRUCT__entry( + __string(name, name) + __field(unsigned long, count) + __field(unsigned int, align) + ), + + TP_fast_assign( + __assign_str(name, name); + __entry->count = count; + __entry->align = align; + ), + + TP_printk("name=%s count=%lu align=%u", + __get_str(name), + __entry->count, + __entry->align) +); + +DEFINE_EVENT(cma_alloc_class, cma_alloc_finish, + + TP_PROTO(const char *name, unsigned long pfn, const struct page *page, + unsigned long count, unsigned int align), + + TP_ARGS(name, pfn, page, count, align) +); + +DEFINE_EVENT(cma_alloc_class, cma_alloc_busy_retry, + + TP_PROTO(const char *name, unsigned long pfn, const struct page *page, + unsigned long count, unsigned int align), + + TP_ARGS(name, pfn, page, count, align) +); + #endif /* _TRACE_CMA_H */ /* This part must be outside protection */ diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h index 4d434398d64d..9fb2a3bbcdfb 100644 --- a/include/trace/events/migrate.h +++ b/include/trace/events/migrate.h @@ -20,7 +20,8 @@ EM( MR_SYSCALL, "syscall_or_cpuset") \ EM( MR_MEMPOLICY_MBIND, "mempolicy_mbind") \ EM( MR_NUMA_MISPLACED, "numa_misplaced") \ - EMe(MR_CONTIG_RANGE, "contig_range") + EM( MR_CONTIG_RANGE, "contig_range") \ + EMe(MR_LONGTERM_PIN, "longterm_pin") /* * First define the enums in the above macros to be exported to userspace @@ -81,6 +82,28 @@ TRACE_EVENT(mm_migrate_pages, __print_symbolic(__entry->mode, MIGRATE_MODE), __print_symbolic(__entry->reason, MIGRATE_REASON)) ); + +TRACE_EVENT(mm_migrate_pages_start, + + TP_PROTO(enum migrate_mode mode, int reason), + + TP_ARGS(mode, reason), + + TP_STRUCT__entry( + __field(enum migrate_mode, mode) + __field(int, reason) + ), + + TP_fast_assign( + __entry->mode = mode; + __entry->reason = reason; + ), + + TP_printk("mode=%s reason=%s", + __print_symbolic(__entry->mode, MIGRATE_MODE), + __print_symbolic(__entry->reason, MIGRATE_REASON)) +); + #endif /* _TRACE_MIGRATE_H */ /* This part must be outside protection */ diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index 67018d367b9f..629c7a0eaff2 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -137,6 +137,12 @@ IF_HAVE_PG_ARCH_2(PG_arch_2, "arch_2" ) #define IF_HAVE_VM_SOFTDIRTY(flag,name) #endif +#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR +# define IF_HAVE_UFFD_MINOR(flag, name) {flag, name}, +#else +# define IF_HAVE_UFFD_MINOR(flag, name) +#endif + #define __def_vmaflag_names \ {VM_READ, "read" }, \ {VM_WRITE, "write" }, \ @@ -148,6 +154,7 @@ IF_HAVE_PG_ARCH_2(PG_arch_2, "arch_2" ) {VM_MAYSHARE, "mayshare" }, \ {VM_GROWSDOWN, "growsdown" }, \ {VM_UFFD_MISSING, "uffd_missing" }, \ +IF_HAVE_UFFD_MINOR(VM_UFFD_MINOR, "uffd_minor" ) \ {VM_PFNMAP, "pfnmap" }, \ {VM_DENYWRITE, "denywrite" }, \ {VM_UFFD_WP, "uffd_wp" }, \ diff --git a/include/uapi/linux/kexec.h b/include/uapi/linux/kexec.h index 05669c87a0af..778dc191c265 100644 --- a/include/uapi/linux/kexec.h +++ b/include/uapi/linux/kexec.h @@ -42,6 +42,7 @@ #define KEXEC_ARCH_MIPS_LE (10 << 16) #define KEXEC_ARCH_MIPS ( 8 << 16) #define KEXEC_ARCH_AARCH64 (183 << 16) +#define KEXEC_ARCH_RISCV (243 << 16) /* The artificial cap on the number of segments passed to kexec_load. */ #define KEXEC_SEGMENT_MAX 16 diff --git a/include/uapi/linux/mempolicy.h b/include/uapi/linux/mempolicy.h index 8948467b3992..4832fd0b5642 100644 --- a/include/uapi/linux/mempolicy.h +++ b/include/uapi/linux/mempolicy.h @@ -64,5 +64,12 @@ enum { #define MPOL_F_MOF (1 << 3) /* this policy wants migrate on fault */ #define MPOL_F_MORON (1 << 4) /* Migrate On protnone Reference On Node */ +/* + * These bit locations are exposed in the vm.zone_reclaim_mode sysctl + * ABI. New bits are OK, but existing bits can never change. + */ +#define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */ +#define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */ +#define RECLAIM_UNMAP (1<<2) /* Unmap pages during reclaim */ #endif /* _UAPI_LINUX_MEMPOLICY_H */ diff --git a/include/uapi/linux/rpmsg.h b/include/uapi/linux/rpmsg.h index e14c6dab4223..f5ca8740f3fb 100644 --- a/include/uapi/linux/rpmsg.h +++ b/include/uapi/linux/rpmsg.h @@ -9,11 +9,13 @@ #include <linux/ioctl.h> #include <linux/types.h> +#define RPMSG_ADDR_ANY 0xFFFFFFFF + /** * struct rpmsg_endpoint_info - endpoint info representation * @name: name of service - * @src: local address - * @dst: destination address + * @src: local address. To set to RPMSG_ADDR_ANY if not used. + * @dst: destination address. To set to RPMSG_ADDR_ANY if not used. */ struct rpmsg_endpoint_info { char name[32]; @@ -21,7 +23,14 @@ struct rpmsg_endpoint_info { __u32 dst; }; +/** + * Instantiate a new rmpsg char device endpoint. + */ #define RPMSG_CREATE_EPT_IOCTL _IOW(0xb5, 0x1, struct rpmsg_endpoint_info) + +/** + * Destroy a rpmsg char device endpoint created by the RPMSG_CREATE_EPT_IOCTL. + */ #define RPMSG_DESTROY_EPT_IOCTL _IO(0xb5, 0x2) #endif diff --git a/include/uapi/linux/thermal.h b/include/uapi/linux/thermal.h index c105054cbb57..9aa2fedfa309 100644 --- a/include/uapi/linux/thermal.h +++ b/include/uapi/linux/thermal.h @@ -60,7 +60,7 @@ enum thermal_genl_event { THERMAL_GENL_EVENT_UNSPEC, THERMAL_GENL_EVENT_TZ_CREATE, /* Thermal zone creation */ THERMAL_GENL_EVENT_TZ_DELETE, /* Thermal zone deletion */ - THERMAL_GENL_EVENT_TZ_DISABLE, /* Thermal zone disabed */ + THERMAL_GENL_EVENT_TZ_DISABLE, /* Thermal zone disabled */ THERMAL_GENL_EVENT_TZ_ENABLE, /* Thermal zone enabled */ THERMAL_GENL_EVENT_TZ_TRIP_UP, /* Trip point crossed the way up */ THERMAL_GENL_EVENT_TZ_TRIP_DOWN, /* Trip point crossed the way down */ diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h index 5f2d88212f7c..bafbeb1a2624 100644 --- a/include/uapi/linux/userfaultfd.h +++ b/include/uapi/linux/userfaultfd.h @@ -19,15 +19,19 @@ * means the userland is reading). */ #define UFFD_API ((__u64)0xAA) +#define UFFD_API_REGISTER_MODES (UFFDIO_REGISTER_MODE_MISSING | \ + UFFDIO_REGISTER_MODE_WP | \ + UFFDIO_REGISTER_MODE_MINOR) #define UFFD_API_FEATURES (UFFD_FEATURE_PAGEFAULT_FLAG_WP | \ UFFD_FEATURE_EVENT_FORK | \ UFFD_FEATURE_EVENT_REMAP | \ - UFFD_FEATURE_EVENT_REMOVE | \ + UFFD_FEATURE_EVENT_REMOVE | \ UFFD_FEATURE_EVENT_UNMAP | \ UFFD_FEATURE_MISSING_HUGETLBFS | \ UFFD_FEATURE_MISSING_SHMEM | \ UFFD_FEATURE_SIGBUS | \ - UFFD_FEATURE_THREAD_ID) + UFFD_FEATURE_THREAD_ID | \ + UFFD_FEATURE_MINOR_HUGETLBFS) #define UFFD_API_IOCTLS \ ((__u64)1 << _UFFDIO_REGISTER | \ (__u64)1 << _UFFDIO_UNREGISTER | \ @@ -36,10 +40,12 @@ ((__u64)1 << _UFFDIO_WAKE | \ (__u64)1 << _UFFDIO_COPY | \ (__u64)1 << _UFFDIO_ZEROPAGE | \ - (__u64)1 << _UFFDIO_WRITEPROTECT) + (__u64)1 << _UFFDIO_WRITEPROTECT | \ + (__u64)1 << _UFFDIO_CONTINUE) #define UFFD_API_RANGE_IOCTLS_BASIC \ ((__u64)1 << _UFFDIO_WAKE | \ - (__u64)1 << _UFFDIO_COPY) + (__u64)1 << _UFFDIO_COPY | \ + (__u64)1 << _UFFDIO_CONTINUE) /* * Valid ioctl command number range with this API is from 0x00 to @@ -55,6 +61,7 @@ #define _UFFDIO_COPY (0x03) #define _UFFDIO_ZEROPAGE (0x04) #define _UFFDIO_WRITEPROTECT (0x06) +#define _UFFDIO_CONTINUE (0x07) #define _UFFDIO_API (0x3F) /* userfaultfd ioctl ids */ @@ -73,6 +80,8 @@ struct uffdio_zeropage) #define UFFDIO_WRITEPROTECT _IOWR(UFFDIO, _UFFDIO_WRITEPROTECT, \ struct uffdio_writeprotect) +#define UFFDIO_CONTINUE _IOR(UFFDIO, _UFFDIO_CONTINUE, \ + struct uffdio_continue) /* read() structure */ struct uffd_msg { @@ -127,6 +136,7 @@ struct uffd_msg { /* flags for UFFD_EVENT_PAGEFAULT */ #define UFFD_PAGEFAULT_FLAG_WRITE (1<<0) /* If this was a write fault */ #define UFFD_PAGEFAULT_FLAG_WP (1<<1) /* If reason is VM_UFFD_WP */ +#define UFFD_PAGEFAULT_FLAG_MINOR (1<<2) /* If reason is VM_UFFD_MINOR */ struct uffdio_api { /* userland asks for an API number and the features to enable */ @@ -171,6 +181,10 @@ struct uffdio_api { * * UFFD_FEATURE_THREAD_ID pid of the page faulted task_struct will * be returned, if feature is not requested 0 will be returned. + * + * UFFD_FEATURE_MINOR_HUGETLBFS indicates that minor faults + * can be intercepted (via REGISTER_MODE_MINOR) for + * hugetlbfs-backed pages. */ #define UFFD_FEATURE_PAGEFAULT_FLAG_WP (1<<0) #define UFFD_FEATURE_EVENT_FORK (1<<1) @@ -181,6 +195,7 @@ struct uffdio_api { #define UFFD_FEATURE_EVENT_UNMAP (1<<6) #define UFFD_FEATURE_SIGBUS (1<<7) #define UFFD_FEATURE_THREAD_ID (1<<8) +#define UFFD_FEATURE_MINOR_HUGETLBFS (1<<9) __u64 features; __u64 ioctls; @@ -195,6 +210,7 @@ struct uffdio_register { struct uffdio_range range; #define UFFDIO_REGISTER_MODE_MISSING ((__u64)1<<0) #define UFFDIO_REGISTER_MODE_WP ((__u64)1<<1) +#define UFFDIO_REGISTER_MODE_MINOR ((__u64)1<<2) __u64 mode; /* @@ -257,6 +273,18 @@ struct uffdio_writeprotect { __u64 mode; }; +struct uffdio_continue { + struct uffdio_range range; +#define UFFDIO_CONTINUE_MODE_DONTWAKE ((__u64)1<<0) + __u64 mode; + + /* + * Fields below here are written by the ioctl and must be at the end: + * the copy_from_user will not read past here. + */ + __s64 mapped; +}; + /* * Flags for the userfaultfd(2) system call itself. */ diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h index dbc4a4b785f6..b3e647f86e3e 100644 --- a/include/xen/swiotlb-xen.h +++ b/include/xen/swiotlb-xen.h @@ -10,7 +10,8 @@ void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle, void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir); -extern int xen_swiotlb_init(int verbose, bool early); +int xen_swiotlb_init(void); +void __init xen_swiotlb_init_early(void); extern const struct dma_map_ops xen_swiotlb_dma_ops; #endif /* __LINUX_SWIOTLB_XEN_H */ diff --git a/init/Kconfig b/init/Kconfig index b71bf0cf5688..b1da957a9549 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1644,6 +1644,11 @@ config HAVE_ARCH_USERFAULTFD_WP help Arch has userfaultfd write protection support +config HAVE_ARCH_USERFAULTFD_MINOR + bool + help + Arch has userfaultfd minor fault support + config MEMBARRIER bool "Enable membarrier() system call" if EXPERT default y diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 002268262c9a..f737e3347059 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -344,8 +344,8 @@ void dma_direct_sync_sg_for_device(struct device *dev, phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg)); if (unlikely(is_swiotlb_buffer(paddr))) - swiotlb_tbl_sync_single(dev, paddr, sg->length, - dir, SYNC_FOR_DEVICE); + swiotlb_sync_single_for_device(dev, paddr, sg->length, + dir); if (!dev_is_dma_coherent(dev)) arch_sync_dma_for_device(paddr, sg->length, @@ -370,8 +370,8 @@ void dma_direct_sync_sg_for_cpu(struct device *dev, arch_sync_dma_for_cpu(paddr, sg->length, dir); if (unlikely(is_swiotlb_buffer(paddr))) - swiotlb_tbl_sync_single(dev, paddr, sg->length, dir, - SYNC_FOR_CPU); + swiotlb_sync_single_for_cpu(dev, paddr, sg->length, + dir); if (dir == DMA_FROM_DEVICE) arch_dma_mark_clean(paddr, sg->length); diff --git a/kernel/dma/direct.h b/kernel/dma/direct.h index b98615578737..50afc05b6f1d 100644 --- a/kernel/dma/direct.h +++ b/kernel/dma/direct.h @@ -57,7 +57,7 @@ static inline void dma_direct_sync_single_for_device(struct device *dev, phys_addr_t paddr = dma_to_phys(dev, addr); if (unlikely(is_swiotlb_buffer(paddr))) - swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE); + swiotlb_sync_single_for_device(dev, paddr, size, dir); if (!dev_is_dma_coherent(dev)) arch_sync_dma_for_device(paddr, size, dir); @@ -74,7 +74,7 @@ static inline void dma_direct_sync_single_for_cpu(struct device *dev, } if (unlikely(is_swiotlb_buffer(paddr))) - swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU); + swiotlb_sync_single_for_cpu(dev, paddr, size, dir); if (dir == DMA_FROM_DEVICE) arch_dma_mark_clean(paddr, size); @@ -114,6 +114,6 @@ static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr, dma_direct_sync_single_for_cpu(dev, addr, size, dir); if (unlikely(is_swiotlb_buffer(phys))) - swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs); + swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs); } #endif /* _KERNEL_DMA_DIRECT_H */ diff --git a/kernel/dma/map_benchmark.c b/kernel/dma/map_benchmark.c index e0e64f8b0739..9b9af1bd6be3 100644 --- a/kernel/dma/map_benchmark.c +++ b/kernel/dma/map_benchmark.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2020 Hisilicon Limited. + * Copyright (C) 2020 HiSilicon Limited. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -38,7 +38,8 @@ struct map_benchmark { __u32 dma_bits; /* DMA addressing capability */ __u32 dma_dir; /* DMA data direction */ __u32 dma_trans_ns; /* time for DMA transmission in ns */ - __u8 expansion[80]; /* For future use */ + __u32 granule; /* how many PAGE_SIZE will do map/unmap once a time */ + __u8 expansion[76]; /* For future use */ }; struct map_benchmark_data { @@ -58,9 +59,11 @@ static int map_benchmark_thread(void *data) void *buf; dma_addr_t dma_addr; struct map_benchmark_data *map = data; + int npages = map->bparam.granule; + u64 size = npages * PAGE_SIZE; int ret = 0; - buf = (void *)__get_free_page(GFP_KERNEL); + buf = alloc_pages_exact(size, GFP_KERNEL); if (!buf) return -ENOMEM; @@ -76,10 +79,10 @@ static int map_benchmark_thread(void *data) * 66 means evertything goes well! 66 is lucky. */ if (map->dir != DMA_FROM_DEVICE) - memset(buf, 0x66, PAGE_SIZE); + memset(buf, 0x66, size); map_stime = ktime_get(); - dma_addr = dma_map_single(map->dev, buf, PAGE_SIZE, map->dir); + dma_addr = dma_map_single(map->dev, buf, size, map->dir); if (unlikely(dma_mapping_error(map->dev, dma_addr))) { pr_err("dma_map_single failed on %s\n", dev_name(map->dev)); @@ -93,7 +96,7 @@ static int map_benchmark_thread(void *data) ndelay(map->bparam.dma_trans_ns); unmap_stime = ktime_get(); - dma_unmap_single(map->dev, dma_addr, PAGE_SIZE, map->dir); + dma_unmap_single(map->dev, dma_addr, size, map->dir); unmap_etime = ktime_get(); unmap_delta = ktime_sub(unmap_etime, unmap_stime); @@ -112,7 +115,7 @@ static int map_benchmark_thread(void *data) } out: - free_page((unsigned long)buf); + free_pages_exact(buf, size); return ret; } @@ -203,7 +206,6 @@ static long map_benchmark_ioctl(struct file *file, unsigned int cmd, struct map_benchmark_data *map = file->private_data; void __user *argp = (void __user *)arg; u64 old_dma_mask; - int ret; if (copy_from_user(&map->bparam, argp, sizeof(map->bparam))) @@ -234,6 +236,11 @@ static long map_benchmark_ioctl(struct file *file, unsigned int cmd, return -EINVAL; } + if (map->bparam.granule < 1 || map->bparam.granule > 1024) { + pr_err("invalid granule size\n"); + return -EINVAL; + } + switch (map->bparam.dma_dir) { case DMA_MAP_BIDIRECTIONAL: map->dir = DMA_BIDIRECTIONAL; diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index b6a633679933..2b06a809d0b9 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -477,11 +477,10 @@ void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, } EXPORT_SYMBOL(dma_free_attrs); -struct page *dma_alloc_pages(struct device *dev, size_t size, +static struct page *__dma_alloc_pages(struct device *dev, size_t size, dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) { const struct dma_map_ops *ops = get_dma_ops(dev); - struct page *page; if (WARN_ON_ONCE(!dev->coherent_dma_mask)) return NULL; @@ -490,33 +489,162 @@ struct page *dma_alloc_pages(struct device *dev, size_t size, size = PAGE_ALIGN(size); if (dma_alloc_direct(dev, ops)) - page = dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp); - else if (ops->alloc_pages) - page = ops->alloc_pages(dev, size, dma_handle, dir, gfp); - else + return dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp); + if (!ops->alloc_pages) return NULL; + return ops->alloc_pages(dev, size, dma_handle, dir, gfp); +} - debug_dma_map_page(dev, page, 0, size, dir, *dma_handle); +struct page *dma_alloc_pages(struct device *dev, size_t size, + dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) +{ + struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp); + if (page) + debug_dma_map_page(dev, page, 0, size, dir, *dma_handle); return page; } EXPORT_SYMBOL_GPL(dma_alloc_pages); -void dma_free_pages(struct device *dev, size_t size, struct page *page, +static void __dma_free_pages(struct device *dev, size_t size, struct page *page, dma_addr_t dma_handle, enum dma_data_direction dir) { const struct dma_map_ops *ops = get_dma_ops(dev); size = PAGE_ALIGN(size); - debug_dma_unmap_page(dev, dma_handle, size, dir); - if (dma_alloc_direct(dev, ops)) dma_direct_free_pages(dev, size, page, dma_handle, dir); else if (ops->free_pages) ops->free_pages(dev, size, page, dma_handle, dir); } + +void dma_free_pages(struct device *dev, size_t size, struct page *page, + dma_addr_t dma_handle, enum dma_data_direction dir) +{ + debug_dma_unmap_page(dev, dma_handle, size, dir); + __dma_free_pages(dev, size, page, dma_handle, dir); +} EXPORT_SYMBOL_GPL(dma_free_pages); +int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma, + size_t size, struct page *page) +{ + unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; + + if (vma->vm_pgoff >= count || vma_pages(vma) > count - vma->vm_pgoff) + return -ENXIO; + return remap_pfn_range(vma, vma->vm_start, + page_to_pfn(page) + vma->vm_pgoff, + vma_pages(vma) << PAGE_SHIFT, vma->vm_page_prot); +} +EXPORT_SYMBOL_GPL(dma_mmap_pages); + +static struct sg_table *alloc_single_sgt(struct device *dev, size_t size, + enum dma_data_direction dir, gfp_t gfp) +{ + struct sg_table *sgt; + struct page *page; + + sgt = kmalloc(sizeof(*sgt), gfp); + if (!sgt) + return NULL; + if (sg_alloc_table(sgt, 1, gfp)) + goto out_free_sgt; + page = __dma_alloc_pages(dev, size, &sgt->sgl->dma_address, dir, gfp); + if (!page) + goto out_free_table; + sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); + sg_dma_len(sgt->sgl) = sgt->sgl->length; + return sgt; +out_free_table: + sg_free_table(sgt); +out_free_sgt: + kfree(sgt); + return NULL; +} + +struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size, + enum dma_data_direction dir, gfp_t gfp, unsigned long attrs) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + struct sg_table *sgt; + + if (WARN_ON_ONCE(attrs & ~DMA_ATTR_ALLOC_SINGLE_PAGES)) + return NULL; + + if (ops && ops->alloc_noncontiguous) + sgt = ops->alloc_noncontiguous(dev, size, dir, gfp, attrs); + else + sgt = alloc_single_sgt(dev, size, dir, gfp); + + if (sgt) { + sgt->nents = 1; + debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir); + } + return sgt; +} +EXPORT_SYMBOL_GPL(dma_alloc_noncontiguous); + +static void free_single_sgt(struct device *dev, size_t size, + struct sg_table *sgt, enum dma_data_direction dir) +{ + __dma_free_pages(dev, size, sg_page(sgt->sgl), sgt->sgl->dma_address, + dir); + sg_free_table(sgt); + kfree(sgt); +} + +void dma_free_noncontiguous(struct device *dev, size_t size, + struct sg_table *sgt, enum dma_data_direction dir) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + debug_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); + if (ops && ops->free_noncontiguous) + ops->free_noncontiguous(dev, size, sgt, dir); + else + free_single_sgt(dev, size, sgt, dir); +} +EXPORT_SYMBOL_GPL(dma_free_noncontiguous); + +void *dma_vmap_noncontiguous(struct device *dev, size_t size, + struct sg_table *sgt) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; + + if (ops && ops->alloc_noncontiguous) + return vmap(sgt_handle(sgt)->pages, count, VM_MAP, PAGE_KERNEL); + return page_address(sg_page(sgt->sgl)); +} +EXPORT_SYMBOL_GPL(dma_vmap_noncontiguous); + +void dma_vunmap_noncontiguous(struct device *dev, void *vaddr) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + if (ops && ops->alloc_noncontiguous) + vunmap(vaddr); +} +EXPORT_SYMBOL_GPL(dma_vunmap_noncontiguous); + +int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma, + size_t size, struct sg_table *sgt) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + if (ops && ops->alloc_noncontiguous) { + unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; + + if (vma->vm_pgoff >= count || + vma_pages(vma) > count - vma->vm_pgoff) + return -ENXIO; + return vm_map_pages(vma, sgt_handle(sgt)->pages, count); + } + return dma_mmap_pages(dev, vma, size, sg_page(sgt->sgl)); +} +EXPORT_SYMBOL_GPL(dma_mmap_noncontiguous); + int dma_supported(struct device *dev, u64 mask) { const struct dma_map_ops *ops = get_dma_ops(dev); diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index c10e855a03bc..8ca7d505d61c 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -59,32 +59,11 @@ */ #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) -enum swiotlb_force swiotlb_force; - -/* - * Used to do a quick range check in swiotlb_tbl_unmap_single and - * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this - * API. - */ -phys_addr_t io_tlb_start, io_tlb_end; - -/* - * The number of IO TLB blocks (in groups of 64) between io_tlb_start and - * io_tlb_end. This is command line adjustable via setup_io_tlb_npages. - */ -static unsigned long io_tlb_nslabs; +#define INVALID_PHYS_ADDR (~(phys_addr_t)0) -/* - * The number of used IO TLB block - */ -static unsigned long io_tlb_used; +enum swiotlb_force swiotlb_force; -/* - * This is a free list describing the number of free entries available from - * each index - */ -static unsigned int *io_tlb_list; -static unsigned int io_tlb_index; +struct io_tlb_mem *io_tlb_default_mem; /* * Max segment that we can provide which (if pages are contingous) will @@ -92,57 +71,30 @@ static unsigned int io_tlb_index; */ static unsigned int max_segment; -/* - * We need to save away the original address corresponding to a mapped entry - * for the sync operations. - */ -#define INVALID_PHYS_ADDR (~(phys_addr_t)0) -static phys_addr_t *io_tlb_orig_addr; - -/* - * The mapped buffer's size should be validated during a sync operation. - */ -static size_t *io_tlb_orig_size; - -/* - * Protect the above data structures in the map and unmap calls - */ -static DEFINE_SPINLOCK(io_tlb_lock); - -static int late_alloc; +static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT; static int __init setup_io_tlb_npages(char *str) { if (isdigit(*str)) { - io_tlb_nslabs = simple_strtoul(str, &str, 0); /* avoid tail segment of size < IO_TLB_SEGSIZE */ - io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); + default_nslabs = + ALIGN(simple_strtoul(str, &str, 0), IO_TLB_SEGSIZE); } if (*str == ',') ++str; - if (!strcmp(str, "force")) { + if (!strcmp(str, "force")) swiotlb_force = SWIOTLB_FORCE; - } else if (!strcmp(str, "noforce")) { + else if (!strcmp(str, "noforce")) swiotlb_force = SWIOTLB_NO_FORCE; - io_tlb_nslabs = 1; - } return 0; } early_param("swiotlb", setup_io_tlb_npages); -static bool no_iotlb_memory; - -unsigned long swiotlb_nr_tbl(void) -{ - return unlikely(no_iotlb_memory) ? 0 : io_tlb_nslabs; -} -EXPORT_SYMBOL_GPL(swiotlb_nr_tbl); - unsigned int swiotlb_max_segment(void) { - return unlikely(no_iotlb_memory) ? 0 : max_segment; + return io_tlb_default_mem ? max_segment : 0; } EXPORT_SYMBOL_GPL(swiotlb_max_segment); @@ -156,42 +108,34 @@ void swiotlb_set_max_segment(unsigned int val) unsigned long swiotlb_size_or_default(void) { - unsigned long size; - - size = io_tlb_nslabs << IO_TLB_SHIFT; - - return size ? size : (IO_TLB_DEFAULT_SIZE); + return default_nslabs << IO_TLB_SHIFT; } -void __init swiotlb_adjust_size(unsigned long new_size) +void __init swiotlb_adjust_size(unsigned long size) { - unsigned long size; - /* * If swiotlb parameter has not been specified, give a chance to * architectures such as those supporting memory encryption to * adjust/expand SWIOTLB size for their use. */ - if (!io_tlb_nslabs) { - size = ALIGN(new_size, IO_TLB_SIZE); - io_tlb_nslabs = size >> IO_TLB_SHIFT; - io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); - - pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20); - } + if (default_nslabs != IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT) + return; + size = ALIGN(size, IO_TLB_SIZE); + default_nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE); + pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20); } void swiotlb_print_info(void) { - unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT; + struct io_tlb_mem *mem = io_tlb_default_mem; - if (no_iotlb_memory) { + if (!mem) { pr_warn("No low mem\n"); return; } - pr_info("mapped [mem %pa-%pa] (%luMB)\n", &io_tlb_start, &io_tlb_end, - bytes >> 20); + pr_info("mapped [mem %pa-%pa] (%luMB)\n", &mem->start, &mem->end, + (mem->nslabs << IO_TLB_SHIFT) >> 20); } static inline unsigned long io_tlb_offset(unsigned long val) @@ -212,64 +156,51 @@ static inline unsigned long nr_slots(u64 val) */ void __init swiotlb_update_mem_attributes(void) { + struct io_tlb_mem *mem = io_tlb_default_mem; void *vaddr; unsigned long bytes; - if (no_iotlb_memory || late_alloc) + if (!mem || mem->late_alloc) return; - - vaddr = phys_to_virt(io_tlb_start); - bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT); + vaddr = phys_to_virt(mem->start); + bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT); set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT); memset(vaddr, 0, bytes); } int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) { - unsigned long i, bytes; + unsigned long bytes = nslabs << IO_TLB_SHIFT, i; + struct io_tlb_mem *mem; size_t alloc_size; - bytes = nslabs << IO_TLB_SHIFT; - - io_tlb_nslabs = nslabs; - io_tlb_start = __pa(tlb); - io_tlb_end = io_tlb_start + bytes; - - /* - * Allocate and initialize the free list array. This array is used - * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE - * between io_tlb_start and io_tlb_end. - */ - alloc_size = PAGE_ALIGN(io_tlb_nslabs * sizeof(int)); - io_tlb_list = memblock_alloc(alloc_size, PAGE_SIZE); - if (!io_tlb_list) - panic("%s: Failed to allocate %zu bytes align=0x%lx\n", - __func__, alloc_size, PAGE_SIZE); + if (swiotlb_force == SWIOTLB_NO_FORCE) + return 0; - alloc_size = PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)); - io_tlb_orig_addr = memblock_alloc(alloc_size, PAGE_SIZE); - if (!io_tlb_orig_addr) - panic("%s: Failed to allocate %zu bytes align=0x%lx\n", - __func__, alloc_size, PAGE_SIZE); + /* protect against double initialization */ + if (WARN_ON_ONCE(io_tlb_default_mem)) + return -ENOMEM; - alloc_size = PAGE_ALIGN(io_tlb_nslabs * sizeof(size_t)); - io_tlb_orig_size = memblock_alloc(alloc_size, PAGE_SIZE); - if (!io_tlb_orig_size) + alloc_size = PAGE_ALIGN(struct_size(mem, slots, nslabs)); + mem = memblock_alloc(alloc_size, PAGE_SIZE); + if (!mem) panic("%s: Failed to allocate %zu bytes align=0x%lx\n", __func__, alloc_size, PAGE_SIZE); - - for (i = 0; i < io_tlb_nslabs; i++) { - io_tlb_list[i] = IO_TLB_SEGSIZE - io_tlb_offset(i); - io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; - io_tlb_orig_size[i] = 0; + mem->nslabs = nslabs; + mem->start = __pa(tlb); + mem->end = mem->start + bytes; + mem->index = 0; + spin_lock_init(&mem->lock); + for (i = 0; i < mem->nslabs; i++) { + mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i); + mem->slots[i].orig_addr = INVALID_PHYS_ADDR; + mem->slots[i].alloc_size = 0; } - io_tlb_index = 0; - no_iotlb_memory = false; + io_tlb_default_mem = mem; if (verbose) swiotlb_print_info(); - - swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT); + swiotlb_set_max_segment(mem->nslabs << IO_TLB_SHIFT); return 0; } @@ -280,29 +211,24 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) void __init swiotlb_init(int verbose) { - size_t default_size = IO_TLB_DEFAULT_SIZE; - unsigned char *vstart; - unsigned long bytes; - - if (!io_tlb_nslabs) { - io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); - io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); - } - - bytes = io_tlb_nslabs << IO_TLB_SHIFT; + size_t bytes = PAGE_ALIGN(default_nslabs << IO_TLB_SHIFT); + void *tlb; - /* Get IO TLB memory from the low pages */ - vstart = memblock_alloc_low(PAGE_ALIGN(bytes), PAGE_SIZE); - if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose)) + if (swiotlb_force == SWIOTLB_NO_FORCE) return; - if (io_tlb_start) { - memblock_free_early(io_tlb_start, - PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); - io_tlb_start = 0; - } + /* Get IO TLB memory from the low pages */ + tlb = memblock_alloc_low(bytes, PAGE_SIZE); + if (!tlb) + goto fail; + if (swiotlb_init_with_tbl(tlb, default_nslabs, verbose)) + goto fail_free_mem; + return; + +fail_free_mem: + memblock_free_early(__pa(tlb), bytes); +fail: pr_warn("Cannot allocate buffer"); - no_iotlb_memory = true; } /* @@ -313,22 +239,22 @@ swiotlb_init(int verbose) int swiotlb_late_init_with_default_size(size_t default_size) { - unsigned long bytes, req_nslabs = io_tlb_nslabs; + unsigned long nslabs = + ALIGN(default_size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE); + unsigned long bytes; unsigned char *vstart = NULL; unsigned int order; int rc = 0; - if (!io_tlb_nslabs) { - io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); - io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); - } + if (swiotlb_force == SWIOTLB_NO_FORCE) + return 0; /* * Get IO TLB memory from the low pages */ - order = get_order(io_tlb_nslabs << IO_TLB_SHIFT); - io_tlb_nslabs = SLABS_PER_PAGE << order; - bytes = io_tlb_nslabs << IO_TLB_SHIFT; + order = get_order(nslabs << IO_TLB_SHIFT); + nslabs = SLABS_PER_PAGE << order; + bytes = nslabs << IO_TLB_SHIFT; while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { vstart = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, @@ -338,134 +264,99 @@ swiotlb_late_init_with_default_size(size_t default_size) order--; } - if (!vstart) { - io_tlb_nslabs = req_nslabs; + if (!vstart) return -ENOMEM; - } + if (order != get_order(bytes)) { pr_warn("only able to allocate %ld MB\n", (PAGE_SIZE << order) >> 20); - io_tlb_nslabs = SLABS_PER_PAGE << order; + nslabs = SLABS_PER_PAGE << order; } - rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs); + rc = swiotlb_late_init_with_tbl(vstart, nslabs); if (rc) free_pages((unsigned long)vstart, order); return rc; } -static void swiotlb_cleanup(void) -{ - io_tlb_end = 0; - io_tlb_start = 0; - io_tlb_nslabs = 0; - max_segment = 0; -} - int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) { - unsigned long i, bytes; + unsigned long bytes = nslabs << IO_TLB_SHIFT, i; + struct io_tlb_mem *mem; - bytes = nslabs << IO_TLB_SHIFT; + if (swiotlb_force == SWIOTLB_NO_FORCE) + return 0; - io_tlb_nslabs = nslabs; - io_tlb_start = virt_to_phys(tlb); - io_tlb_end = io_tlb_start + bytes; + /* protect against double initialization */ + if (WARN_ON_ONCE(io_tlb_default_mem)) + return -ENOMEM; - set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT); - memset(tlb, 0, bytes); + mem = (void *)__get_free_pages(GFP_KERNEL, + get_order(struct_size(mem, slots, nslabs))); + if (!mem) + return -ENOMEM; - /* - * Allocate and initialize the free list array. This array is used - * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE - * between io_tlb_start and io_tlb_end. - */ - io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL, - get_order(io_tlb_nslabs * sizeof(int))); - if (!io_tlb_list) - goto cleanup3; - - io_tlb_orig_addr = (phys_addr_t *) - __get_free_pages(GFP_KERNEL, - get_order(io_tlb_nslabs * - sizeof(phys_addr_t))); - if (!io_tlb_orig_addr) - goto cleanup4; - - io_tlb_orig_size = (size_t *) - __get_free_pages(GFP_KERNEL, - get_order(io_tlb_nslabs * - sizeof(size_t))); - if (!io_tlb_orig_size) - goto cleanup5; - - - for (i = 0; i < io_tlb_nslabs; i++) { - io_tlb_list[i] = IO_TLB_SEGSIZE - io_tlb_offset(i); - io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; - io_tlb_orig_size[i] = 0; + mem->nslabs = nslabs; + mem->start = virt_to_phys(tlb); + mem->end = mem->start + bytes; + mem->index = 0; + mem->late_alloc = 1; + spin_lock_init(&mem->lock); + for (i = 0; i < mem->nslabs; i++) { + mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i); + mem->slots[i].orig_addr = INVALID_PHYS_ADDR; + mem->slots[i].alloc_size = 0; } - io_tlb_index = 0; - no_iotlb_memory = false; - - swiotlb_print_info(); - late_alloc = 1; - - swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT); + set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT); + memset(tlb, 0, bytes); + io_tlb_default_mem = mem; + swiotlb_print_info(); + swiotlb_set_max_segment(mem->nslabs << IO_TLB_SHIFT); return 0; - -cleanup5: - free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs * - sizeof(phys_addr_t))); - -cleanup4: - free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * - sizeof(int))); - io_tlb_list = NULL; -cleanup3: - swiotlb_cleanup(); - return -ENOMEM; } void __init swiotlb_exit(void) { - if (!io_tlb_orig_addr) + struct io_tlb_mem *mem = io_tlb_default_mem; + size_t size; + + if (!mem) return; - if (late_alloc) { - free_pages((unsigned long)io_tlb_orig_size, - get_order(io_tlb_nslabs * sizeof(size_t))); - free_pages((unsigned long)io_tlb_orig_addr, - get_order(io_tlb_nslabs * sizeof(phys_addr_t))); - free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * - sizeof(int))); - free_pages((unsigned long)phys_to_virt(io_tlb_start), - get_order(io_tlb_nslabs << IO_TLB_SHIFT)); - } else { - memblock_free_late(__pa(io_tlb_orig_addr), - PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); - memblock_free_late(__pa(io_tlb_orig_size), - PAGE_ALIGN(io_tlb_nslabs * sizeof(size_t))); - memblock_free_late(__pa(io_tlb_list), - PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); - memblock_free_late(io_tlb_start, - PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); - } - swiotlb_cleanup(); + size = struct_size(mem, slots, mem->nslabs); + if (mem->late_alloc) + free_pages((unsigned long)mem, get_order(size)); + else + memblock_free_late(__pa(mem), PAGE_ALIGN(size)); + io_tlb_default_mem = NULL; } /* * Bounce: copy the swiotlb buffer from or back to the original dma location */ -static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr, - size_t size, enum dma_data_direction dir) +static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size, + enum dma_data_direction dir) { + struct io_tlb_mem *mem = io_tlb_default_mem; + int index = (tlb_addr - mem->start) >> IO_TLB_SHIFT; + phys_addr_t orig_addr = mem->slots[index].orig_addr; + size_t alloc_size = mem->slots[index].alloc_size; unsigned long pfn = PFN_DOWN(orig_addr); unsigned char *vaddr = phys_to_virt(tlb_addr); + if (orig_addr == INVALID_PHYS_ADDR) + return; + + if (size > alloc_size) { + dev_WARN_ONCE(dev, 1, + "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu.\n", + alloc_size, size); + size = alloc_size; + } + if (PageHighMem(pfn_to_page(pfn))) { /* The buffer does not have a mapping. Map it in and copy */ unsigned int offset = orig_addr & ~PAGE_MASK; @@ -517,9 +408,9 @@ static inline unsigned long get_max_slots(unsigned long boundary_mask) return nr_slots(boundary_mask + 1); } -static unsigned int wrap_index(unsigned int index) +static unsigned int wrap_index(struct io_tlb_mem *mem, unsigned int index) { - if (index >= io_tlb_nslabs) + if (index >= mem->nslabs) return 0; return index; } @@ -531,9 +422,10 @@ static unsigned int wrap_index(unsigned int index) static int find_slots(struct device *dev, phys_addr_t orig_addr, size_t alloc_size) { + struct io_tlb_mem *mem = io_tlb_default_mem; unsigned long boundary_mask = dma_get_seg_boundary(dev); dma_addr_t tbl_dma_addr = - phys_to_dma_unencrypted(dev, io_tlb_start) & boundary_mask; + phys_to_dma_unencrypted(dev, mem->start) & boundary_mask; unsigned long max_slots = get_max_slots(boundary_mask); unsigned int iotlb_align_mask = dma_get_min_align_mask(dev) & ~(IO_TLB_SIZE - 1); @@ -552,15 +444,15 @@ static int find_slots(struct device *dev, phys_addr_t orig_addr, if (alloc_size >= PAGE_SIZE) stride = max(stride, stride << (PAGE_SHIFT - IO_TLB_SHIFT)); - spin_lock_irqsave(&io_tlb_lock, flags); - if (unlikely(nslots > io_tlb_nslabs - io_tlb_used)) + spin_lock_irqsave(&mem->lock, flags); + if (unlikely(nslots > mem->nslabs - mem->used)) goto not_found; - index = wrap = wrap_index(ALIGN(io_tlb_index, stride)); + index = wrap = wrap_index(mem, ALIGN(mem->index, stride)); do { if ((slot_addr(tbl_dma_addr, index) & iotlb_align_mask) != (orig_addr & iotlb_align_mask)) { - index = wrap_index(index + 1); + index = wrap_index(mem, index + 1); continue; } @@ -572,34 +464,34 @@ static int find_slots(struct device *dev, phys_addr_t orig_addr, if (!iommu_is_span_boundary(index, nslots, nr_slots(tbl_dma_addr), max_slots)) { - if (io_tlb_list[index] >= nslots) + if (mem->slots[index].list >= nslots) goto found; } - index = wrap_index(index + stride); + index = wrap_index(mem, index + stride); } while (index != wrap); not_found: - spin_unlock_irqrestore(&io_tlb_lock, flags); + spin_unlock_irqrestore(&mem->lock, flags); return -1; found: for (i = index; i < index + nslots; i++) - io_tlb_list[i] = 0; + mem->slots[i].list = 0; for (i = index - 1; io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && - io_tlb_list[i]; i--) - io_tlb_list[i] = ++count; + mem->slots[i].list; i--) + mem->slots[i].list = ++count; /* * Update the indices to avoid searching in the next round. */ - if (index + nslots < io_tlb_nslabs) - io_tlb_index = index + nslots; + if (index + nslots < mem->nslabs) + mem->index = index + nslots; else - io_tlb_index = 0; - io_tlb_used += nslots; + mem->index = 0; + mem->used += nslots; - spin_unlock_irqrestore(&io_tlb_lock, flags); + spin_unlock_irqrestore(&mem->lock, flags); return index; } @@ -607,11 +499,13 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, size_t mapping_size, size_t alloc_size, enum dma_data_direction dir, unsigned long attrs) { + struct io_tlb_mem *mem = io_tlb_default_mem; unsigned int offset = swiotlb_align_offset(dev, orig_addr); - unsigned int index, i; + unsigned int i; + int index; phys_addr_t tlb_addr; - if (no_iotlb_memory) + if (!mem) panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer"); if (mem_encrypt_active()) @@ -628,7 +522,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, if (!(attrs & DMA_ATTR_NO_WARN)) dev_warn_ratelimited(dev, "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n", - alloc_size, io_tlb_nslabs, io_tlb_used); + alloc_size, mem->nslabs, mem->used); return (phys_addr_t)DMA_MAPPING_ERROR; } @@ -638,49 +532,37 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, * needed. */ for (i = 0; i < nr_slots(alloc_size + offset); i++) { - io_tlb_orig_addr[index + i] = slot_addr(orig_addr, i); - io_tlb_orig_size[index+i] = alloc_size - (i << IO_TLB_SHIFT); + mem->slots[index + i].orig_addr = slot_addr(orig_addr, i); + mem->slots[index + i].alloc_size = + alloc_size - (i << IO_TLB_SHIFT); } - tlb_addr = slot_addr(io_tlb_start, index) + offset; + tlb_addr = slot_addr(mem->start, index) + offset; if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) - swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_TO_DEVICE); + swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE); return tlb_addr; } -static void validate_sync_size_and_truncate(struct device *hwdev, size_t orig_size, size_t *size) -{ - if (*size > orig_size) { - /* Warn and truncate mapping_size */ - dev_WARN_ONCE(hwdev, 1, - "Attempt for buffer overflow. Original size: %zu. Mapping size: %zu.\n", - orig_size, *size); - *size = orig_size; - } -} - /* * tlb_addr is the physical address of the bounce buffer to unmap. */ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, - size_t mapping_size, size_t alloc_size, - enum dma_data_direction dir, unsigned long attrs) + size_t mapping_size, enum dma_data_direction dir, + unsigned long attrs) { + struct io_tlb_mem *mem = io_tlb_default_mem; unsigned long flags; unsigned int offset = swiotlb_align_offset(hwdev, tlb_addr); - int i, count, nslots = nr_slots(alloc_size + offset); - int index = (tlb_addr - offset - io_tlb_start) >> IO_TLB_SHIFT; - phys_addr_t orig_addr = io_tlb_orig_addr[index]; - - validate_sync_size_and_truncate(hwdev, io_tlb_orig_size[index], &mapping_size); + int index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT; + int nslots = nr_slots(mem->slots[index].alloc_size + offset); + int count, i; /* * First, sync the memory before unmapping the entry */ - if (orig_addr != INVALID_PHYS_ADDR && - !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && - ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) - swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_FROM_DEVICE); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && + (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) + swiotlb_bounce(hwdev, tlb_addr, mapping_size, DMA_FROM_DEVICE); /* * Return the buffer to the free list by setting the corresponding @@ -688,9 +570,9 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, * While returning the entries to the free list, we merge the entries * with slots below and above the pool being returned. */ - spin_lock_irqsave(&io_tlb_lock, flags); + spin_lock_irqsave(&mem->lock, flags); if (index + nslots < ALIGN(index + 1, IO_TLB_SEGSIZE)) - count = io_tlb_list[index + nslots]; + count = mem->slots[index + nslots].list; else count = 0; @@ -699,9 +581,9 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, * superceeding slots */ for (i = index + nslots - 1; i >= index; i--) { - io_tlb_list[i] = ++count; - io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; - io_tlb_orig_size[i] = 0; + mem->slots[i].list = ++count; + mem->slots[i].orig_addr = INVALID_PHYS_ADDR; + mem->slots[i].alloc_size = 0; } /* @@ -709,44 +591,29 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, * available (non zero) */ for (i = index - 1; - io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && io_tlb_list[i]; + io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list; i--) - io_tlb_list[i] = ++count; - io_tlb_used -= nslots; - spin_unlock_irqrestore(&io_tlb_lock, flags); + mem->slots[i].list = ++count; + mem->used -= nslots; + spin_unlock_irqrestore(&mem->lock, flags); } -void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr, - size_t size, enum dma_data_direction dir, - enum dma_sync_target target) +void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr, + size_t size, enum dma_data_direction dir) { - int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT; - size_t orig_size = io_tlb_orig_size[index]; - phys_addr_t orig_addr = io_tlb_orig_addr[index]; - - if (orig_addr == INVALID_PHYS_ADDR) - return; + if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) + swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE); + else + BUG_ON(dir != DMA_FROM_DEVICE); +} - validate_sync_size_and_truncate(hwdev, orig_size, &size); - - switch (target) { - case SYNC_FOR_CPU: - if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) - swiotlb_bounce(orig_addr, tlb_addr, - size, DMA_FROM_DEVICE); - else - BUG_ON(dir != DMA_TO_DEVICE); - break; - case SYNC_FOR_DEVICE: - if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) - swiotlb_bounce(orig_addr, tlb_addr, - size, DMA_TO_DEVICE); - else - BUG_ON(dir != DMA_FROM_DEVICE); - break; - default: - BUG(); - } +void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr, + size_t size, enum dma_data_direction dir) +{ + if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) + swiotlb_bounce(dev, tlb_addr, size, DMA_FROM_DEVICE); + else + BUG_ON(dir != DMA_TO_DEVICE); } /* @@ -770,7 +637,7 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size, /* Ensure that the address returned is DMA'ble */ dma_addr = phys_to_dma_unencrypted(dev, swiotlb_addr); if (unlikely(!dma_capable(dev, dma_addr, size, true))) { - swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, size, dir, + swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); dev_WARN_ONCE(dev, 1, "swiotlb addr %pad+%zu overflow (mask %llx, bus limit %llx).\n", @@ -790,22 +657,21 @@ size_t swiotlb_max_mapping_size(struct device *dev) bool is_swiotlb_active(void) { - /* - * When SWIOTLB is initialized, even if io_tlb_start points to physical - * address zero, io_tlb_end surely doesn't. - */ - return io_tlb_end != 0; + return io_tlb_default_mem != NULL; } +EXPORT_SYMBOL_GPL(is_swiotlb_active); #ifdef CONFIG_DEBUG_FS static int __init swiotlb_create_debugfs(void) { - struct dentry *root; + struct io_tlb_mem *mem = io_tlb_default_mem; - root = debugfs_create_dir("swiotlb", NULL); - debugfs_create_ulong("io_tlb_nslabs", 0400, root, &io_tlb_nslabs); - debugfs_create_ulong("io_tlb_used", 0400, root, &io_tlb_used); + if (!mem) + return 0; + mem->debugfs = debugfs_create_dir("swiotlb", NULL); + debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs); + debugfs_create_ulong("io_tlb_used", 0400, mem->debugfs, &mem->used); return 0; } diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index f42ef868efd3..6284443b87ec 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -295,8 +295,8 @@ void irq_domain_update_bus_token(struct irq_domain *domain, EXPORT_SYMBOL_GPL(irq_domain_update_bus_token); /** - * irq_domain_add_simple() - Register an irq_domain and optionally map a range of irqs - * @of_node: pointer to interrupt controller's device tree node. + * irq_domain_create_simple() - Register an irq_domain and optionally map a range of irqs + * @fwnode: firmware node for the interrupt controller * @size: total number of irqs in mapping * @first_irq: first number of irq block assigned to the domain, * pass zero to assign irqs on-the-fly. If first_irq is non-zero, then @@ -312,15 +312,15 @@ EXPORT_SYMBOL_GPL(irq_domain_update_bus_token); * irqs get mapped dynamically on the fly. However, if the controller requires * static virq assignments (non-DT boot) then it will set that up correctly. */ -struct irq_domain *irq_domain_add_simple(struct device_node *of_node, - unsigned int size, - unsigned int first_irq, - const struct irq_domain_ops *ops, - void *host_data) +struct irq_domain *irq_domain_create_simple(struct fwnode_handle *fwnode, + unsigned int size, + unsigned int first_irq, + const struct irq_domain_ops *ops, + void *host_data) { struct irq_domain *domain; - domain = __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data); + domain = __irq_domain_add(fwnode, size, size, 0, ops, host_data); if (!domain) return NULL; @@ -328,7 +328,7 @@ struct irq_domain *irq_domain_add_simple(struct device_node *of_node, if (IS_ENABLED(CONFIG_SPARSE_IRQ)) { /* attempt to allocated irq_descs */ int rc = irq_alloc_descs(first_irq, first_irq, size, - of_node_to_nid(of_node)); + of_node_to_nid(to_of_node(fwnode))); if (rc < 0) pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", first_irq); @@ -338,7 +338,7 @@ struct irq_domain *irq_domain_add_simple(struct device_node *of_node, return domain; } -EXPORT_SYMBOL_GPL(irq_domain_add_simple); +EXPORT_SYMBOL_GPL(irq_domain_create_simple); /** * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain. diff --git a/kernel/sysctl.c b/kernel/sysctl.c index f91d327273c1..14edf84cc571 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -2830,7 +2830,7 @@ static struct ctl_table vm_table[] = { #ifdef CONFIG_COMPACTION { .procname = "compact_memory", - .data = &sysctl_compact_memory, + .data = NULL, .maxlen = sizeof(int), .mode = 0200, .proc_handler = sysctl_compaction_handler, diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 792b5589ba64..2e8a3fde7104 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -5624,7 +5624,10 @@ int ftrace_regex_release(struct inode *inode, struct file *file) parser = &iter->parser; if (trace_parser_loaded(parser)) { - ftrace_match_records(iter->hash, parser->buffer, parser->idx); + int enable = !(iter->flags & FTRACE_ITER_NOTRACE); + + ftrace_process_regex(iter, parser->buffer, + parser->idx, enable); } trace_parser_put(parser); diff --git a/lib/Kconfig.kfence b/lib/Kconfig.kfence index 78f50ccb3b45..e641add33947 100644 --- a/lib/Kconfig.kfence +++ b/lib/Kconfig.kfence @@ -7,6 +7,7 @@ menuconfig KFENCE bool "KFENCE: low-overhead sampling-based memory safety error detector" depends on HAVE_ARCH_KFENCE && (SLAB || SLUB) select STACKTRACE + select IRQ_WORK help KFENCE is a low-overhead sampling-based detector of heap out-of-bounds access, use-after-free, and invalid-free errors. KFENCE is designed diff --git a/lib/bitmap.c b/lib/bitmap.c index 9f4626a4c95f..74ceb02f45e3 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -3,17 +3,19 @@ * lib/bitmap.c * Helper functions for bitmap.h. */ -#include <linux/export.h> -#include <linux/thread_info.h> -#include <linux/ctype.h> -#include <linux/errno.h> + #include <linux/bitmap.h> #include <linux/bitops.h> #include <linux/bug.h> +#include <linux/ctype.h> +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/export.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/string.h> +#include <linux/thread_info.h> #include <linux/uaccess.h> #include <asm/page.h> @@ -1271,6 +1273,38 @@ void bitmap_free(const unsigned long *bitmap) } EXPORT_SYMBOL(bitmap_free); +static void devm_bitmap_free(void *data) +{ + unsigned long *bitmap = data; + + bitmap_free(bitmap); +} + +unsigned long *devm_bitmap_alloc(struct device *dev, + unsigned int nbits, gfp_t flags) +{ + unsigned long *bitmap; + int ret; + + bitmap = bitmap_alloc(nbits, flags); + if (!bitmap) + return NULL; + + ret = devm_add_action_or_reset(dev, devm_bitmap_free, bitmap); + if (ret) + return NULL; + + return bitmap; +} +EXPORT_SYMBOL_GPL(devm_bitmap_alloc); + +unsigned long *devm_bitmap_zalloc(struct device *dev, + unsigned int nbits, gfp_t flags) +{ + return devm_bitmap_alloc(dev, nbits, flags | __GFP_ZERO); +} +EXPORT_SYMBOL_GPL(devm_bitmap_zalloc); + #if BITS_PER_LONG == 64 /** * bitmap_from_arr32 - copy the contents of u32 array of bits to bitmap diff --git a/lib/cmdline.c b/lib/cmdline.c index 5d474c626e24..5546bf588780 100644 --- a/lib/cmdline.c +++ b/lib/cmdline.c @@ -272,3 +272,4 @@ char *next_arg(char *args, char **param, char **val) /* Chew up trailing spaces. */ return skip_spaces(args); } +EXPORT_SYMBOL(next_arg); diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index c70d6347afa2..921d0a654243 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -396,7 +396,7 @@ static int ddebug_parse_query(char *words[], int nwords, /* tail :$info is function or line-range */ fline = strchr(query->filename, ':'); if (!fline) - break; + continue; *fline++ = '\0'; if (isalpha(*fline) || *fline == '*' || *fline == '?') { /* take as function name */ diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 61228a6c69f8..c701b7a187f2 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -5,6 +5,7 @@ #include <linux/fault-inject-usercopy.h> #include <linux/uio.h> #include <linux/pagemap.h> +#include <linux/highmem.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/splice.h> @@ -507,13 +508,6 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction, } EXPORT_SYMBOL(iov_iter_init); -static void memzero_page(struct page *page, size_t offset, size_t len) -{ - char *addr = kmap_atomic(page); - memset(addr + offset, 0, len); - kunmap_atomic(addr); -} - static inline bool allocated(struct pipe_buffer *buf) { return buf->ops == &default_pipe_buf_ops; diff --git a/mm/Kconfig b/mm/Kconfig index 3636da27c385..02d44e3420f5 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -148,6 +148,9 @@ config MEMORY_ISOLATION config HAVE_BOOTMEM_INFO_NODE def_bool n +config ARCH_ENABLE_MEMORY_HOTPLUG + bool + # eventually, we can have this option just 'select SPARSEMEM' config MEMORY_HOTPLUG bool "Allow for memory hot-add" @@ -176,12 +179,20 @@ config MEMORY_HOTPLUG_DEFAULT_ONLINE Say N here if you want the default policy to keep all hot-plugged memory blocks in 'offline' state. +config ARCH_ENABLE_MEMORY_HOTREMOVE + bool + config MEMORY_HOTREMOVE bool "Allow for memory hot remove" select HAVE_BOOTMEM_INFO_NODE if (X86_64 || PPC64) depends on MEMORY_HOTPLUG && ARCH_ENABLE_MEMORY_HOTREMOVE depends on MIGRATION +config MHP_MEMMAP_ON_MEMORY + def_bool y + depends on MEMORY_HOTPLUG && SPARSEMEM_VMEMMAP + depends on ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE + # Heavily threaded applications may benefit from splitting the mm-wide # page_table_lock, so that faults on different parts of the user address # space can be handled with less contention: split it at this NR_CPUS. @@ -273,6 +284,13 @@ config ARCH_ENABLE_HUGEPAGE_MIGRATION config ARCH_ENABLE_THP_MIGRATION bool +config HUGETLB_PAGE_SIZE_VARIABLE + def_bool n + help + Allows the pageblock_order value to be dynamic instead of just standard + HUGETLB_PAGE_ORDER when there are multiple HugeTLB page sizes available + on a platform. + config CONTIG_ALLOC def_bool (MEMORY_ISOLATION && COMPACTION) || CMA @@ -511,6 +529,13 @@ config CMA_DEBUGFS help Turns on the DebugFS interface for CMA. +config CMA_SYSFS + bool "CMA information through sysfs interface" + depends on CMA && SYSFS + help + This option exposes some sysfs attributes to get information + from CMA. + config CMA_AREAS int "Maximum count of the CMA areas" depends on CMA @@ -758,6 +783,9 @@ config IDLE_PAGE_TRACKING See Documentation/admin-guide/mm/idle_page_tracking.rst for more details. +config ARCH_HAS_CACHE_LINE_SIZE + bool + config ARCH_HAS_PTE_DEVMAP bool diff --git a/mm/Makefile b/mm/Makefile index c0135e385984..bf71e295e9f6 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -58,9 +58,13 @@ obj-y := filemap.o mempool.o oom_kill.o fadvise.o \ page-alloc-y := page_alloc.o page-alloc-$(CONFIG_SHUFFLE_PAGE_ALLOCATOR) += shuffle.o +# Give 'memory_hotplug' its own module-parameter namespace +memory-hotplug-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o + obj-y += page-alloc.o obj-y += init-mm.o obj-y += memblock.o +obj-y += $(memory-hotplug-y) ifdef CONFIG_MMU obj-$(CONFIG_ADVISE_SYSCALLS) += madvise.o @@ -83,7 +87,6 @@ obj-$(CONFIG_SLUB) += slub.o obj-$(CONFIG_KASAN) += kasan/ obj-$(CONFIG_KFENCE) += kfence/ obj-$(CONFIG_FAILSLAB) += failslab.o -obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o obj-$(CONFIG_MEMTEST) += memtest.o obj-$(CONFIG_MIGRATION) += migrate.o obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += huge_memory.o khugepaged.o @@ -109,6 +112,7 @@ obj-$(CONFIG_CMA) += cma.o obj-$(CONFIG_MEMORY_BALLOON) += balloon_compaction.o obj-$(CONFIG_PAGE_EXTENSION) += page_ext.o obj-$(CONFIG_CMA_DEBUGFS) += cma_debug.o +obj-$(CONFIG_CMA_SYSFS) += cma_sysfs.o obj-$(CONFIG_USERFAULTFD) += userfaultfd.o obj-$(CONFIG_IDLE_PAGE_TRACKING) += page_idle.o obj-$(CONFIG_DEBUG_PAGE_REF) += debug_page_ref.o @@ -24,7 +24,6 @@ #include <linux/memblock.h> #include <linux/err.h> #include <linux/mm.h> -#include <linux/mutex.h> #include <linux/sizes.h> #include <linux/slab.h> #include <linux/log2.h> @@ -80,16 +79,17 @@ static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma, } static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, - unsigned int count) + unsigned long count) { unsigned long bitmap_no, bitmap_count; + unsigned long flags; bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; bitmap_count = cma_bitmap_pages_to_bits(cma, count); - mutex_lock(&cma->lock); + spin_lock_irqsave(&cma->lock, flags); bitmap_clear(cma->bitmap, bitmap_no, bitmap_count); - mutex_unlock(&cma->lock); + spin_unlock_irqrestore(&cma->lock, flags); } static void __init cma_activate_area(struct cma *cma) @@ -118,7 +118,7 @@ static void __init cma_activate_area(struct cma *cma) pfn += pageblock_nr_pages) init_cma_reserved_pageblock(pfn_to_page(pfn)); - mutex_init(&cma->lock); + spin_lock_init(&cma->lock); #ifdef CONFIG_CMA_DEBUGFS INIT_HLIST_HEAD(&cma->mem_head); @@ -392,7 +392,7 @@ static void cma_debug_show_areas(struct cma *cma) unsigned long nr_part, nr_total = 0; unsigned long nbits = cma_bitmap_maxno(cma); - mutex_lock(&cma->lock); + spin_lock_irq(&cma->lock); pr_info("number of available pages: "); for (;;) { next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start); @@ -407,7 +407,7 @@ static void cma_debug_show_areas(struct cma *cma) start = next_zero_bit + nr_zero; } pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count); - mutex_unlock(&cma->lock); + spin_unlock_irq(&cma->lock); } #else static inline void cma_debug_show_areas(struct cma *cma) { } @@ -423,25 +423,27 @@ static inline void cma_debug_show_areas(struct cma *cma) { } * This function allocates part of contiguous memory on specific * contiguous memory area. */ -struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, - bool no_warn) +struct page *cma_alloc(struct cma *cma, unsigned long count, + unsigned int align, bool no_warn) { unsigned long mask, offset; unsigned long pfn = -1; unsigned long start = 0; unsigned long bitmap_maxno, bitmap_no, bitmap_count; - size_t i; + unsigned long i; struct page *page = NULL; int ret = -ENOMEM; if (!cma || !cma->count || !cma->bitmap) - return NULL; + goto out; - pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma, + pr_debug("%s(cma %p, count %lu, align %d)\n", __func__, (void *)cma, count, align); if (!count) - return NULL; + goto out; + + trace_cma_alloc_start(cma->name, count, align); mask = cma_bitmap_aligned_mask(cma, align); offset = cma_bitmap_aligned_offset(cma, align); @@ -449,15 +451,15 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, bitmap_count = cma_bitmap_pages_to_bits(cma, count); if (bitmap_count > bitmap_maxno) - return NULL; + goto out; for (;;) { - mutex_lock(&cma->lock); + spin_lock_irq(&cma->lock); bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, bitmap_maxno, start, bitmap_count, mask, offset); if (bitmap_no >= bitmap_maxno) { - mutex_unlock(&cma->lock); + spin_unlock_irq(&cma->lock); break; } bitmap_set(cma->bitmap, bitmap_no, bitmap_count); @@ -466,7 +468,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, * our exclusive use. If the migration fails we will take the * lock again and unmark it. */ - mutex_unlock(&cma->lock); + spin_unlock_irq(&cma->lock); pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, @@ -483,11 +485,14 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, pr_debug("%s(): memory range at %p is busy, retrying\n", __func__, pfn_to_page(pfn)); + + trace_cma_alloc_busy_retry(cma->name, pfn, pfn_to_page(pfn), + count, align); /* try again with a bit different memory target */ start = bitmap_no + mask + 1; } - trace_cma_alloc(pfn, page, count, align); + trace_cma_alloc_finish(cma->name, pfn, page, count, align); /* * CMA can allocate multiple page blocks, which results in different @@ -500,12 +505,22 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, } if (ret && !no_warn) { - pr_err("%s: %s: alloc failed, req-size: %zu pages, ret: %d\n", - __func__, cma->name, count, ret); + pr_err_ratelimited("%s: %s: alloc failed, req-size: %lu pages, ret: %d\n", + __func__, cma->name, count, ret); cma_debug_show_areas(cma); } pr_debug("%s(): returned %p\n", __func__, page); +out: + if (page) { + count_vm_event(CMA_ALLOC_SUCCESS); + cma_sysfs_account_success_pages(cma, count); + } else { + count_vm_event(CMA_ALLOC_FAIL); + if (cma) + cma_sysfs_account_fail_pages(cma, count); + } + return page; } @@ -519,14 +534,15 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, * It returns false when provided pages do not belong to contiguous area and * true otherwise. */ -bool cma_release(struct cma *cma, const struct page *pages, unsigned int count) +bool cma_release(struct cma *cma, const struct page *pages, + unsigned long count) { unsigned long pfn; if (!cma || !pages) return false; - pr_debug("%s(page %p, count %u)\n", __func__, (void *)pages, count); + pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count); pfn = page_to_pfn(pages); @@ -537,7 +553,7 @@ bool cma_release(struct cma *cma, const struct page *pages, unsigned int count) free_contig_range(pfn, count); cma_clear_bitmap(cma, pfn, count); - trace_cma_release(pfn, pages, count); + trace_cma_release(cma->name, pfn, pages, count); return true; } @@ -3,19 +3,33 @@ #define __MM_CMA_H__ #include <linux/debugfs.h> +#include <linux/kobject.h> + +struct cma_kobject { + struct kobject kobj; + struct cma *cma; +}; struct cma { unsigned long base_pfn; unsigned long count; unsigned long *bitmap; unsigned int order_per_bit; /* Order of pages represented by one bit */ - struct mutex lock; + spinlock_t lock; #ifdef CONFIG_CMA_DEBUGFS struct hlist_head mem_head; spinlock_t mem_head_lock; struct debugfs_u32_array dfs_bitmap; #endif char name[CMA_MAX_NAME]; +#ifdef CONFIG_CMA_SYSFS + /* the number of CMA page successful allocations */ + atomic64_t nr_pages_succeeded; + /* the number of CMA page allocation failures */ + atomic64_t nr_pages_failed; + /* kobject requires dynamic object */ + struct cma_kobject *cma_kobj; +#endif }; extern struct cma cma_areas[MAX_CMA_AREAS]; @@ -26,4 +40,13 @@ static inline unsigned long cma_bitmap_maxno(struct cma *cma) return cma->count >> cma->order_per_bit; } +#ifdef CONFIG_CMA_SYSFS +void cma_sysfs_account_success_pages(struct cma *cma, unsigned long nr_pages); +void cma_sysfs_account_fail_pages(struct cma *cma, unsigned long nr_pages); +#else +static inline void cma_sysfs_account_success_pages(struct cma *cma, + unsigned long nr_pages) {}; +static inline void cma_sysfs_account_fail_pages(struct cma *cma, + unsigned long nr_pages) {}; +#endif #endif diff --git a/mm/cma_debug.c b/mm/cma_debug.c index d5bf8aa34fdc..2e7704955f4f 100644 --- a/mm/cma_debug.c +++ b/mm/cma_debug.c @@ -36,10 +36,10 @@ static int cma_used_get(void *data, u64 *val) struct cma *cma = data; unsigned long used; - mutex_lock(&cma->lock); + spin_lock_irq(&cma->lock); /* pages counter is smaller than sizeof(int) */ used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma)); - mutex_unlock(&cma->lock); + spin_unlock_irq(&cma->lock); *val = (u64)used << cma->order_per_bit; return 0; @@ -53,7 +53,7 @@ static int cma_maxchunk_get(void *data, u64 *val) unsigned long start, end = 0; unsigned long bitmap_maxno = cma_bitmap_maxno(cma); - mutex_lock(&cma->lock); + spin_lock_irq(&cma->lock); for (;;) { start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end); if (start >= bitmap_maxno) @@ -61,7 +61,7 @@ static int cma_maxchunk_get(void *data, u64 *val) end = find_next_bit(cma->bitmap, bitmap_maxno, start); maxchunk = max(end - start, maxchunk); } - mutex_unlock(&cma->lock); + spin_unlock_irq(&cma->lock); *val = (u64)maxchunk << cma->order_per_bit; return 0; diff --git a/mm/cma_sysfs.c b/mm/cma_sysfs.c new file mode 100644 index 000000000000..eb2f39caff59 --- /dev/null +++ b/mm/cma_sysfs.c @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * CMA SysFS Interface + * + * Copyright (c) 2021 Minchan Kim <[email protected]> + */ + +#include <linux/cma.h> +#include <linux/kernel.h> +#include <linux/slab.h> + +#include "cma.h" + +#define CMA_ATTR_RO(_name) \ + static struct kobj_attribute _name##_attr = __ATTR_RO(_name) + +void cma_sysfs_account_success_pages(struct cma *cma, unsigned long nr_pages) +{ + atomic64_add(nr_pages, &cma->nr_pages_succeeded); +} + +void cma_sysfs_account_fail_pages(struct cma *cma, unsigned long nr_pages) +{ + atomic64_add(nr_pages, &cma->nr_pages_failed); +} + +static inline struct cma *cma_from_kobj(struct kobject *kobj) +{ + return container_of(kobj, struct cma_kobject, kobj)->cma; +} + +static ssize_t alloc_pages_success_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct cma *cma = cma_from_kobj(kobj); + + return sysfs_emit(buf, "%llu\n", + atomic64_read(&cma->nr_pages_succeeded)); +} +CMA_ATTR_RO(alloc_pages_success); + +static ssize_t alloc_pages_fail_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct cma *cma = cma_from_kobj(kobj); + + return sysfs_emit(buf, "%llu\n", atomic64_read(&cma->nr_pages_failed)); +} +CMA_ATTR_RO(alloc_pages_fail); + +static void cma_kobj_release(struct kobject *kobj) +{ + struct cma *cma = cma_from_kobj(kobj); + struct cma_kobject *cma_kobj = cma->cma_kobj; + + kfree(cma_kobj); + cma->cma_kobj = NULL; +} + +static struct attribute *cma_attrs[] = { + &alloc_pages_success_attr.attr, + &alloc_pages_fail_attr.attr, + NULL, +}; +ATTRIBUTE_GROUPS(cma); + +static struct kobj_type cma_ktype = { + .release = cma_kobj_release, + .sysfs_ops = &kobj_sysfs_ops, + .default_groups = cma_groups, +}; + +static int __init cma_sysfs_init(void) +{ + struct kobject *cma_kobj_root; + struct cma_kobject *cma_kobj; + struct cma *cma; + int i, err; + + cma_kobj_root = kobject_create_and_add("cma", mm_kobj); + if (!cma_kobj_root) + return -ENOMEM; + + for (i = 0; i < cma_area_count; i++) { + cma_kobj = kzalloc(sizeof(*cma_kobj), GFP_KERNEL); + if (!cma_kobj) { + err = -ENOMEM; + goto out; + } + + cma = &cma_areas[i]; + cma->cma_kobj = cma_kobj; + cma_kobj->cma = cma; + err = kobject_init_and_add(&cma_kobj->kobj, &cma_ktype, + cma_kobj_root, "%s", cma->name); + if (err) { + kobject_put(&cma_kobj->kobj); + goto out; + } + } + + return 0; +out: + while (--i >= 0) { + cma = &cma_areas[i]; + kobject_put(&cma->cma_kobj->kobj); + } + kobject_put(cma_kobj_root); + + return err; +} +subsys_initcall(cma_sysfs_init); diff --git a/mm/compaction.c b/mm/compaction.c index e04f4476e68e..3a6c6b821f80 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -787,15 +787,14 @@ static bool too_many_isolated(pg_data_t *pgdat) * * Isolate all pages that can be migrated from the range specified by * [low_pfn, end_pfn). The range is expected to be within same pageblock. - * Returns zero if there is a fatal signal pending, otherwise PFN of the - * first page that was not scanned (which may be both less, equal to or more - * than end_pfn). + * Returns errno, like -EAGAIN or -EINTR in case e.g signal pending or congestion, + * -ENOMEM in case we could not allocate a page, or 0. + * cc->migrate_pfn will contain the next pfn to scan. * * The pages are isolated on cc->migratepages list (not required to be empty), - * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field - * is neither read nor updated. + * and cc->nr_migratepages is updated accordingly. */ -static unsigned long +static int isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, unsigned long end_pfn, isolate_mode_t isolate_mode) { @@ -809,6 +808,9 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, bool skip_on_failure = false; unsigned long next_skip_pfn = 0; bool skip_updated = false; + int ret = 0; + + cc->migrate_pfn = low_pfn; /* * Ensure that there are not too many pages isolated from the LRU @@ -818,16 +820,16 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, while (unlikely(too_many_isolated(pgdat))) { /* stop isolation if there are still pages not migrated */ if (cc->nr_migratepages) - return 0; + return -EAGAIN; /* async migration should just abort */ if (cc->mode == MIGRATE_ASYNC) - return 0; + return -EAGAIN; congestion_wait(BLK_RW_ASYNC, HZ/10); if (fatal_signal_pending(current)) - return 0; + return -EINTR; } cond_resched(); @@ -875,8 +877,8 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, if (fatal_signal_pending(current)) { cc->contended = true; + ret = -EINTR; - low_pfn = 0; goto fatal_pending; } @@ -904,6 +906,38 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, valid_page = page; } + if (PageHuge(page) && cc->alloc_contig) { + ret = isolate_or_dissolve_huge_page(page, &cc->migratepages); + + /* + * Fail isolation in case isolate_or_dissolve_huge_page() + * reports an error. In case of -ENOMEM, abort right away. + */ + if (ret < 0) { + /* Do not report -EBUSY down the chain */ + if (ret == -EBUSY) + ret = 0; + low_pfn += (1UL << compound_order(page)) - 1; + goto isolate_fail; + } + + if (PageHuge(page)) { + /* + * Hugepage was successfully isolated and placed + * on the cc->migratepages list. + */ + low_pfn += compound_nr(page) - 1; + goto isolate_success_no_list; + } + + /* + * Ok, the hugepage was dissolved. Now these pages are + * Buddy and cannot be re-allocated because they are + * isolated. Fall-through as the check below handles + * Buddy pages. + */ + } + /* * Skip if free. We read page order here without zone lock * which is generally unsafe, but the race window is small and @@ -1037,6 +1071,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, isolate_success: list_add(&page->lru, &cc->migratepages); +isolate_success_no_list: cc->nr_migratepages += compound_nr(page); nr_isolated += compound_nr(page); @@ -1063,7 +1098,7 @@ isolate_fail_put: put_page(page); isolate_fail: - if (!skip_on_failure) + if (!skip_on_failure && ret != -ENOMEM) continue; /* @@ -1089,6 +1124,9 @@ isolate_fail: */ next_skip_pfn += 1UL << cc->order; } + + if (ret == -ENOMEM) + break; } /* @@ -1130,7 +1168,9 @@ fatal_pending: if (nr_isolated) count_compact_events(COMPACTISOLATED, nr_isolated); - return low_pfn; + cc->migrate_pfn = low_pfn; + + return ret; } /** @@ -1139,15 +1179,15 @@ fatal_pending: * @start_pfn: The first PFN to start isolating. * @end_pfn: The one-past-last PFN. * - * Returns zero if isolation fails fatally due to e.g. pending signal. - * Otherwise, function returns one-past-the-last PFN of isolated page - * (which may be greater than end_pfn if end fell in a middle of a THP page). + * Returns -EAGAIN when contented, -EINTR in case of a signal pending, -ENOMEM + * in case we could not allocate a page, or 0. */ -unsigned long +int isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, unsigned long end_pfn) { unsigned long pfn, block_start_pfn, block_end_pfn; + int ret = 0; /* Scan block by block. First and last block may be incomplete */ pfn = start_pfn; @@ -1166,17 +1206,17 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, block_end_pfn, cc->zone)) continue; - pfn = isolate_migratepages_block(cc, pfn, block_end_pfn, - ISOLATE_UNEVICTABLE); + ret = isolate_migratepages_block(cc, pfn, block_end_pfn, + ISOLATE_UNEVICTABLE); - if (!pfn) + if (ret) break; if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX) break; } - return pfn; + return ret; } #endif /* CONFIG_COMPACTION || CONFIG_CMA */ @@ -1847,7 +1887,7 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc) */ for (; block_end_pfn <= cc->free_pfn; fast_find_block = false, - low_pfn = block_end_pfn, + cc->migrate_pfn = low_pfn = block_end_pfn, block_start_pfn = block_end_pfn, block_end_pfn += pageblock_nr_pages) { @@ -1889,10 +1929,8 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc) } /* Perform the isolation */ - low_pfn = isolate_migratepages_block(cc, low_pfn, - block_end_pfn, isolate_mode); - - if (!low_pfn) + if (isolate_migratepages_block(cc, low_pfn, block_end_pfn, + isolate_mode)) return ISOLATE_ABORT; /* @@ -1903,9 +1941,6 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc) break; } - /* Record where migration scanner will be restarted. */ - cc->migrate_pfn = low_pfn; - return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; } @@ -2319,7 +2354,8 @@ compact_zone(struct compact_control *cc, struct capture_control *capc) trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn, sync); - migrate_prep_local(); + /* lru_add_drain_all could be expensive with involving other CPUs */ + lru_add_drain(); while ((ret = compact_finished(cc)) == COMPACT_CONTINUE) { int err; @@ -2494,6 +2530,14 @@ static enum compact_result compact_zone_order(struct zone *zone, int order, */ WRITE_ONCE(current->capture_control, NULL); *capture = READ_ONCE(capc.page); + /* + * Technically, it is also possible that compaction is skipped but + * the page is still captured out of luck(IRQ came and freed the page). + * Returning COMPACT_SUCCESS in such cases helps in properly accounting + * the COMPACT[STALL|FAIL] when compaction is skipped. + */ + if (*capture) + ret = COMPACT_SUCCESS; return ret; } @@ -2657,9 +2701,6 @@ static void compact_nodes(void) compact_node(nid); } -/* The written value is actually unused, all memory is compacted */ -int sysctl_compact_memory; - /* * Tunable for proactive compaction. It determines how * aggressively the kernel should compact memory in the @@ -2844,7 +2885,7 @@ void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx) */ static int kcompactd(void *p) { - pg_data_t *pgdat = (pg_data_t*)p; + pg_data_t *pgdat = (pg_data_t *)p; struct task_struct *tsk = current; unsigned int proactive_defer = 0; diff --git a/mm/filemap.c b/mm/filemap.c index 5be57ba01d33..7fadf211643c 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -142,17 +142,6 @@ static void page_cache_delete(struct address_space *mapping, page->mapping = NULL; /* Leave page->index set: truncation lookup relies upon it */ - - if (shadow) { - mapping->nrexceptional += nr; - /* - * Make sure the nrexceptional update is committed before - * the nrpages update so that final truncate racing - * with reclaim does not see both counters 0 at the - * same time and miss a shadow entry. - */ - smp_wmb(); - } mapping->nrpages -= nr; } @@ -629,9 +618,6 @@ EXPORT_SYMBOL(filemap_fdatawait_keep_errors); /* Returns true if writeback might be needed or already in progress. */ static bool mapping_needs_writeback(struct address_space *mapping) { - if (dax_mapping(mapping)) - return mapping->nrexceptional; - return mapping->nrpages; } @@ -925,8 +911,6 @@ noinline int __add_to_page_cache_locked(struct page *page, if (xas_error(&xas)) goto unlock; - if (old) - mapping->nrexceptional--; mapping->nrpages++; /* hugetlb pages do not participate in page cache accounting */ @@ -3283,7 +3267,7 @@ const struct vm_operations_struct generic_file_vm_ops = { /* This is used for a general mmap of a disk file */ -int generic_file_mmap(struct file * file, struct vm_area_struct * vma) +int generic_file_mmap(struct file *file, struct vm_area_struct *vma) { struct address_space *mapping = file->f_mapping; @@ -3308,11 +3292,11 @@ vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf) { return VM_FAULT_SIGBUS; } -int generic_file_mmap(struct file * file, struct vm_area_struct * vma) +int generic_file_mmap(struct file *file, struct vm_area_struct *vma) { return -ENOSYS; } -int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma) +int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma) { return -ENOSYS; } @@ -3740,7 +3724,7 @@ EXPORT_SYMBOL(generic_perform_write); ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; - struct address_space * mapping = file->f_mapping; + struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; ssize_t written = 0; ssize_t err; diff --git a/mm/frontswap.c b/mm/frontswap.c index 2183a56c7874..130e301c5ac0 100644 --- a/mm/frontswap.c +++ b/mm/frontswap.c @@ -60,16 +60,20 @@ static u64 frontswap_succ_stores; static u64 frontswap_failed_stores; static u64 frontswap_invalidates; -static inline void inc_frontswap_loads(void) { +static inline void inc_frontswap_loads(void) +{ data_race(frontswap_loads++); } -static inline void inc_frontswap_succ_stores(void) { +static inline void inc_frontswap_succ_stores(void) +{ data_race(frontswap_succ_stores++); } -static inline void inc_frontswap_failed_stores(void) { +static inline void inc_frontswap_failed_stores(void) +{ data_race(frontswap_failed_stores++); } -static inline void inc_frontswap_invalidates(void) { +static inline void inc_frontswap_invalidates(void) +{ data_race(frontswap_invalidates++); } #else @@ -87,11 +87,12 @@ __maybe_unused struct page *try_grab_compound_head(struct page *page, int orig_refs = refs; /* - * Can't do FOLL_LONGTERM + FOLL_PIN with CMA in the gup fast - * path, so fail and let the caller fall back to the slow path. + * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a + * right zone, so fail and let the caller fall back to the slow + * path. */ - if (unlikely(flags & FOLL_LONGTERM) && - is_migrate_cma_page(page)) + if (unlikely((flags & FOLL_LONGTERM) && + !is_pinnable_page(page))) return NULL; /* @@ -1527,7 +1528,7 @@ static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start, { struct vm_area_struct *vma; unsigned long vm_flags; - int i; + long i; /* calculate required read or write permissions. * If FOLL_FORCE is set, we only require the "MAY" flags. @@ -1600,112 +1601,92 @@ struct page *get_dump_page(unsigned long addr) } #endif /* CONFIG_ELF_CORE */ -#ifdef CONFIG_CMA -static long check_and_migrate_cma_pages(struct mm_struct *mm, - unsigned long start, - unsigned long nr_pages, - struct page **pages, - struct vm_area_struct **vmas, - unsigned int gup_flags) +#ifdef CONFIG_MIGRATION +/* + * Check whether all pages are pinnable, if so return number of pages. If some + * pages are not pinnable, migrate them, and unpin all pages. Return zero if + * pages were migrated, or if some pages were not successfully isolated. + * Return negative error if migration fails. + */ +static long check_and_migrate_movable_pages(unsigned long nr_pages, + struct page **pages, + unsigned int gup_flags) { unsigned long i; - unsigned long step; + unsigned long isolation_error_count = 0; bool drain_allow = true; - bool migrate_allow = true; - LIST_HEAD(cma_page_list); - long ret = nr_pages; + LIST_HEAD(movable_page_list); + long ret = 0; + struct page *prev_head = NULL; + struct page *head; struct migration_target_control mtc = { .nid = NUMA_NO_NODE, - .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_NOWARN, + .gfp_mask = GFP_USER | __GFP_NOWARN, }; -check_again: - for (i = 0; i < nr_pages;) { - - struct page *head = compound_head(pages[i]); - - /* - * gup may start from a tail page. Advance step by the left - * part. - */ - step = compound_nr(head) - (pages[i] - head); + for (i = 0; i < nr_pages; i++) { + head = compound_head(pages[i]); + if (head == prev_head) + continue; + prev_head = head; /* - * If we get a page from the CMA zone, since we are going to - * be pinning these entries, we might as well move them out - * of the CMA zone if possible. + * If we get a movable page, since we are going to be pinning + * these entries, try to move them out if possible. */ - if (is_migrate_cma_page(head)) { - if (PageHuge(head)) - isolate_huge_page(head, &cma_page_list); - else { + if (!is_pinnable_page(head)) { + if (PageHuge(head)) { + if (!isolate_huge_page(head, &movable_page_list)) + isolation_error_count++; + } else { if (!PageLRU(head) && drain_allow) { lru_add_drain_all(); drain_allow = false; } - if (!isolate_lru_page(head)) { - list_add_tail(&head->lru, &cma_page_list); - mod_node_page_state(page_pgdat(head), - NR_ISOLATED_ANON + - page_is_file_lru(head), - thp_nr_pages(head)); + if (isolate_lru_page(head)) { + isolation_error_count++; + continue; } + list_add_tail(&head->lru, &movable_page_list); + mod_node_page_state(page_pgdat(head), + NR_ISOLATED_ANON + + page_is_file_lru(head), + thp_nr_pages(head)); } } - - i += step; } - if (!list_empty(&cma_page_list)) { - /* - * drop the above get_user_pages reference. - */ - if (gup_flags & FOLL_PIN) - unpin_user_pages(pages, nr_pages); - else - for (i = 0; i < nr_pages; i++) - put_page(pages[i]); - - if (migrate_pages(&cma_page_list, alloc_migration_target, NULL, - (unsigned long)&mtc, MIGRATE_SYNC, MR_CONTIG_RANGE)) { - /* - * some of the pages failed migration. Do get_user_pages - * without migration. - */ - migrate_allow = false; + /* + * If list is empty, and no isolation errors, means that all pages are + * in the correct zone. + */ + if (list_empty(&movable_page_list) && !isolation_error_count) + return nr_pages; - if (!list_empty(&cma_page_list)) - putback_movable_pages(&cma_page_list); - } - /* - * We did migrate all the pages, Try to get the page references - * again migrating any new CMA pages which we failed to isolate - * earlier. - */ - ret = __get_user_pages_locked(mm, start, nr_pages, - pages, vmas, NULL, - gup_flags); - - if ((ret > 0) && migrate_allow) { - nr_pages = ret; - drain_allow = true; - goto check_again; - } + if (gup_flags & FOLL_PIN) { + unpin_user_pages(pages, nr_pages); + } else { + for (i = 0; i < nr_pages; i++) + put_page(pages[i]); + } + if (!list_empty(&movable_page_list)) { + ret = migrate_pages(&movable_page_list, alloc_migration_target, + NULL, (unsigned long)&mtc, MIGRATE_SYNC, + MR_LONGTERM_PIN); + if (ret && !list_empty(&movable_page_list)) + putback_movable_pages(&movable_page_list); } - return ret; + return ret > 0 ? -ENOMEM : ret; } #else -static long check_and_migrate_cma_pages(struct mm_struct *mm, - unsigned long start, - unsigned long nr_pages, - struct page **pages, - struct vm_area_struct **vmas, - unsigned int gup_flags) +static long check_and_migrate_movable_pages(unsigned long nr_pages, + struct page **pages, + unsigned int gup_flags) { return nr_pages; } -#endif /* CONFIG_CMA */ +#endif /* CONFIG_MIGRATION */ /* * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which @@ -1718,21 +1699,22 @@ static long __gup_longterm_locked(struct mm_struct *mm, struct vm_area_struct **vmas, unsigned int gup_flags) { - unsigned long flags = 0; + unsigned int flags; long rc; - if (gup_flags & FOLL_LONGTERM) - flags = memalloc_nocma_save(); - - rc = __get_user_pages_locked(mm, start, nr_pages, pages, vmas, NULL, - gup_flags); + if (!(gup_flags & FOLL_LONGTERM)) + return __get_user_pages_locked(mm, start, nr_pages, pages, vmas, + NULL, gup_flags); + flags = memalloc_pin_save(); + do { + rc = __get_user_pages_locked(mm, start, nr_pages, pages, vmas, + NULL, gup_flags); + if (rc <= 0) + break; + rc = check_and_migrate_movable_pages(rc, pages, gup_flags); + } while (!rc); + memalloc_pin_restore(flags); - if (gup_flags & FOLL_LONGTERM) { - if (rc > 0) - rc = check_and_migrate_cma_pages(mm, start, rc, pages, - vmas, gup_flags); - memalloc_nocma_restore(flags); - } return rc; } diff --git a/mm/gup_test.c b/mm/gup_test.c index e3cf78e5873e..d974dec19e1c 100644 --- a/mm/gup_test.c +++ b/mm/gup_test.c @@ -52,6 +52,12 @@ static void verify_dma_pinned(unsigned int cmd, struct page **pages, dump_page(page, "gup_test failure"); break; + } else if (cmd == PIN_LONGTERM_BENCHMARK && + WARN(!is_pinnable_page(page), + "pages[%lu] is NOT pinnable but pinned\n", + i)) { + dump_page(page, "gup_test failure"); + break; } } break; @@ -94,7 +100,7 @@ static int __gup_test_ioctl(unsigned int cmd, { ktime_t start_time, end_time; unsigned long i, nr_pages, addr, next; - int nr; + long nr; struct page **pages; int ret = 0; bool needs_mmap_lock = @@ -126,37 +132,34 @@ static int __gup_test_ioctl(unsigned int cmd, nr = (next - addr) / PAGE_SIZE; } - /* Filter out most gup flags: only allow a tiny subset here: */ - gup->flags &= FOLL_WRITE; - switch (cmd) { case GUP_FAST_BENCHMARK: - nr = get_user_pages_fast(addr, nr, gup->flags, + nr = get_user_pages_fast(addr, nr, gup->gup_flags, pages + i); break; case GUP_BASIC_TEST: - nr = get_user_pages(addr, nr, gup->flags, pages + i, + nr = get_user_pages(addr, nr, gup->gup_flags, pages + i, NULL); break; case PIN_FAST_BENCHMARK: - nr = pin_user_pages_fast(addr, nr, gup->flags, + nr = pin_user_pages_fast(addr, nr, gup->gup_flags, pages + i); break; case PIN_BASIC_TEST: - nr = pin_user_pages(addr, nr, gup->flags, pages + i, + nr = pin_user_pages(addr, nr, gup->gup_flags, pages + i, NULL); break; case PIN_LONGTERM_BENCHMARK: nr = pin_user_pages(addr, nr, - gup->flags | FOLL_LONGTERM, + gup->gup_flags | FOLL_LONGTERM, pages + i, NULL); break; case DUMP_USER_PAGES_TEST: - if (gup->flags & GUP_TEST_FLAG_DUMP_PAGES_USE_PIN) - nr = pin_user_pages(addr, nr, gup->flags, + if (gup->test_flags & GUP_TEST_FLAG_DUMP_PAGES_USE_PIN) + nr = pin_user_pages(addr, nr, gup->gup_flags, pages + i, NULL); else - nr = get_user_pages(addr, nr, gup->flags, + nr = get_user_pages(addr, nr, gup->gup_flags, pages + i, NULL); break; default: @@ -187,7 +190,7 @@ static int __gup_test_ioctl(unsigned int cmd, start_time = ktime_get(); - put_back_pages(cmd, pages, nr_pages, gup->flags); + put_back_pages(cmd, pages, nr_pages, gup->test_flags); end_time = ktime_get(); gup->put_delta_usec = ktime_us_delta(end_time, start_time); diff --git a/mm/gup_test.h b/mm/gup_test.h index 90a6713d50eb..887ac1d5f5bc 100644 --- a/mm/gup_test.h +++ b/mm/gup_test.h @@ -21,7 +21,8 @@ struct gup_test { __u64 addr; __u64 size; __u32 nr_pages_per_call; - __u32 flags; + __u32 gup_flags; + __u32 test_flags; /* * Each non-zero entry is the number of the page (1-based: first page is * page 1, so that zero entries mean "do nothing") from the .addr base. diff --git a/mm/highmem.c b/mm/highmem.c index 6ef8f5e05e7e..e389337e00b4 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -104,7 +104,7 @@ static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color) atomic_long_t _totalhigh_pages __read_mostly; EXPORT_SYMBOL(_totalhigh_pages); -unsigned int __nr_free_highpages (void) +unsigned int __nr_free_highpages(void) { struct zone *zone; unsigned int pages = 0; @@ -120,7 +120,7 @@ unsigned int __nr_free_highpages (void) static int pkmap_count[LAST_PKMAP]; static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock); -pte_t * pkmap_page_table; +pte_t *pkmap_page_table; /* * Most architectures have no use for kmap_high_get(), so let's abstract @@ -147,6 +147,7 @@ struct page *__kmap_to_page(void *vaddr) if (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) { int i = PKMAP_NR(addr); + return pte_page(pkmap_page_table[i]); } @@ -278,9 +279,8 @@ void *kmap_high(struct page *page) pkmap_count[PKMAP_NR(vaddr)]++; BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2); unlock_kmap(); - return (void*) vaddr; + return (void *) vaddr; } - EXPORT_SYMBOL(kmap_high); #ifdef ARCH_NEEDS_KMAP_HIGH_GET @@ -305,7 +305,7 @@ void *kmap_high_get(struct page *page) pkmap_count[PKMAP_NR(vaddr)]++; } unlock_kmap_any(flags); - return (void*) vaddr; + return (void *) vaddr; } #endif @@ -737,7 +737,6 @@ done: spin_unlock_irqrestore(&pas->lock, flags); return ret; } - EXPORT_SYMBOL(page_address); /** diff --git a/mm/huge_memory.c b/mm/huge_memory.c index ae907a9c2050..98456017744d 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -7,6 +7,7 @@ #include <linux/mm.h> #include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/sched/coredump.h> #include <linux/sched/numa_balancing.h> #include <linux/highmem.h> @@ -77,18 +78,18 @@ bool transparent_hugepage_enabled(struct vm_area_struct *vma) return false; } -static struct page *get_huge_zero_page(void) +static bool get_huge_zero_page(void) { struct page *zero_page; retry: if (likely(atomic_inc_not_zero(&huge_zero_refcount))) - return READ_ONCE(huge_zero_page); + return true; zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE, HPAGE_PMD_ORDER); if (!zero_page) { count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED); - return NULL; + return false; } count_vm_event(THP_ZERO_PAGE_ALLOC); preempt_disable(); @@ -101,7 +102,7 @@ retry: /* We take additional reference here. It will be put back by shrinker */ atomic_set(&huge_zero_refcount, 2); preempt_enable(); - return READ_ONCE(huge_zero_page); + return true; } static void put_huge_zero_page(void) @@ -624,14 +625,12 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, /* Deliver the page fault to userland */ if (userfaultfd_missing(vma)) { - vm_fault_t ret2; - spin_unlock(vmf->ptl); put_page(page); pte_free(vma->vm_mm, pgtable); - ret2 = handle_userfault(vmf, VM_UFFD_MISSING); - VM_BUG_ON(ret2 & VM_FAULT_FALLBACK); - return ret2; + ret = handle_userfault(vmf, VM_UFFD_MISSING); + VM_BUG_ON(ret & VM_FAULT_FALLBACK); + return ret; } entry = mk_huge_pmd(page, vma->vm_page_prot); @@ -1293,7 +1292,7 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd) } page = pmd_page(orig_pmd); - VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page); + VM_BUG_ON_PAGE(!PageHead(page), page); /* Lock page for reuse_swap_page() */ if (!trylock_page(page)) { @@ -1464,12 +1463,6 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd) */ page_locked = trylock_page(page); target_nid = mpol_misplaced(page, vma, haddr); - if (target_nid == NUMA_NO_NODE) { - /* If the page was locked, there are no parallel migrations */ - if (page_locked) - goto clear_pmdnuma; - } - /* Migration could have started since the pmd_trans_migrating check */ if (!page_locked) { page_nid = NUMA_NO_NODE; @@ -1478,6 +1471,11 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd) spin_unlock(vmf->ptl); put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE); goto out; + } else if (target_nid == NUMA_NO_NODE) { + /* There are no parallel migrations and page is in the right + * node. Clear the numa hinting info in this pmd. + */ + goto clear_pmdnuma; } /* @@ -1696,7 +1694,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, VM_BUG_ON(!is_pmd_migration_entry(orig_pmd)); entry = pmd_to_swp_entry(orig_pmd); - page = pfn_to_page(swp_offset(entry)); + page = migration_entry_to_page(entry); flush_needed = 0; } else WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!"); @@ -2104,7 +2102,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, swp_entry_t entry; entry = pmd_to_swp_entry(old_pmd); - page = pfn_to_page(swp_offset(entry)); + page = migration_entry_to_page(entry); write = is_write_migration_entry(entry); young = false; soft_dirty = pmd_swp_soft_dirty(old_pmd); @@ -2303,44 +2301,38 @@ void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, __split_huge_pmd(vma, pmd, address, freeze, page); } +static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address) +{ + /* + * If the new address isn't hpage aligned and it could previously + * contain an hugepage: check if we need to split an huge pmd. + */ + if (!IS_ALIGNED(address, HPAGE_PMD_SIZE) && + range_in_vma(vma, ALIGN_DOWN(address, HPAGE_PMD_SIZE), + ALIGN(address, HPAGE_PMD_SIZE))) + split_huge_pmd_address(vma, address, false, NULL); +} + void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start, unsigned long end, long adjust_next) { - /* - * If the new start address isn't hpage aligned and it could - * previously contain an hugepage: check if we need to split - * an huge pmd. - */ - if (start & ~HPAGE_PMD_MASK && - (start & HPAGE_PMD_MASK) >= vma->vm_start && - (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) - split_huge_pmd_address(vma, start, false, NULL); + /* Check if we need to split start first. */ + split_huge_pmd_if_needed(vma, start); - /* - * If the new end address isn't hpage aligned and it could - * previously contain an hugepage: check if we need to split - * an huge pmd. - */ - if (end & ~HPAGE_PMD_MASK && - (end & HPAGE_PMD_MASK) >= vma->vm_start && - (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) - split_huge_pmd_address(vma, end, false, NULL); + /* Check if we need to split end next. */ + split_huge_pmd_if_needed(vma, end); /* - * If we're also updating the vma->vm_next->vm_start, if the new - * vm_next->vm_start isn't hpage aligned and it could previously - * contain an hugepage: check if we need to split an huge pmd. + * If we're also updating the vma->vm_next->vm_start, + * check if we need to split it. */ if (adjust_next > 0) { struct vm_area_struct *next = vma->vm_next; unsigned long nstart = next->vm_start; nstart += adjust_next; - if (nstart & ~HPAGE_PMD_MASK && - (nstart & HPAGE_PMD_MASK) >= next->vm_start && - (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end) - split_huge_pmd_address(next, nstart, false, NULL); + split_huge_pmd_if_needed(next, nstart); } } @@ -2838,8 +2830,8 @@ void deferred_split_huge_page(struct page *page) ds_queue->split_queue_len++; #ifdef CONFIG_MEMCG if (memcg) - memcg_set_shrinker_bit(memcg, page_to_nid(page), - deferred_split_shrinker.id); + set_shrinker_bit(memcg, page_to_nid(page), + deferred_split_shrinker.id); #endif } spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); @@ -2924,16 +2916,14 @@ static struct shrinker deferred_split_shrinker = { }; #ifdef CONFIG_DEBUG_FS -static int split_huge_pages_set(void *data, u64 val) +static void split_huge_pages_all(void) { struct zone *zone; struct page *page; unsigned long pfn, max_zone_pfn; unsigned long total = 0, split = 0; - if (val != 1) - return -EINVAL; - + pr_debug("Split all THPs\n"); for_each_populated_zone(zone) { max_zone_pfn = zone_end_pfn(zone); for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) { @@ -2957,15 +2947,243 @@ static int split_huge_pages_set(void *data, u64 val) unlock_page(page); next: put_page(page); + cond_resched(); } } - pr_info("%lu of %lu THP split\n", split, total); + pr_debug("%lu of %lu THP split\n", split, total); +} - return 0; +static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct *vma) +{ + return vma_is_special_huge(vma) || (vma->vm_flags & VM_IO) || + is_vm_hugetlb_page(vma); +} + +static int split_huge_pages_pid(int pid, unsigned long vaddr_start, + unsigned long vaddr_end) +{ + int ret = 0; + struct task_struct *task; + struct mm_struct *mm; + unsigned long total = 0, split = 0; + unsigned long addr; + + vaddr_start &= PAGE_MASK; + vaddr_end &= PAGE_MASK; + + /* Find the task_struct from pid */ + rcu_read_lock(); + task = find_task_by_vpid(pid); + if (!task) { + rcu_read_unlock(); + ret = -ESRCH; + goto out; + } + get_task_struct(task); + rcu_read_unlock(); + + /* Find the mm_struct */ + mm = get_task_mm(task); + put_task_struct(task); + + if (!mm) { + ret = -EINVAL; + goto out; + } + + pr_debug("Split huge pages in pid: %d, vaddr: [0x%lx - 0x%lx]\n", + pid, vaddr_start, vaddr_end); + + mmap_read_lock(mm); + /* + * always increase addr by PAGE_SIZE, since we could have a PTE page + * table filled with PTE-mapped THPs, each of which is distinct. + */ + for (addr = vaddr_start; addr < vaddr_end; addr += PAGE_SIZE) { + struct vm_area_struct *vma = find_vma(mm, addr); + unsigned int follflags; + struct page *page; + + if (!vma || addr < vma->vm_start) + break; + + /* skip special VMA and hugetlb VMA */ + if (vma_not_suitable_for_thp_split(vma)) { + addr = vma->vm_end; + continue; + } + + /* FOLL_DUMP to ignore special (like zero) pages */ + follflags = FOLL_GET | FOLL_DUMP; + page = follow_page(vma, addr, follflags); + + if (IS_ERR(page)) + continue; + if (!page) + continue; + + if (!is_transparent_hugepage(page)) + goto next; + + total++; + if (!can_split_huge_page(compound_head(page), NULL)) + goto next; + + if (!trylock_page(page)) + goto next; + + if (!split_huge_page(page)) + split++; + + unlock_page(page); +next: + put_page(page); + cond_resched(); + } + mmap_read_unlock(mm); + mmput(mm); + + pr_debug("%lu of %lu THP split\n", split, total); + +out: + return ret; +} + +static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start, + pgoff_t off_end) +{ + struct filename *file; + struct file *candidate; + struct address_space *mapping; + int ret = -EINVAL; + pgoff_t index; + int nr_pages = 1; + unsigned long total = 0, split = 0; + + file = getname_kernel(file_path); + if (IS_ERR(file)) + return ret; + + candidate = file_open_name(file, O_RDONLY, 0); + if (IS_ERR(candidate)) + goto out; + + pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx]\n", + file_path, off_start, off_end); + + mapping = candidate->f_mapping; + + for (index = off_start; index < off_end; index += nr_pages) { + struct page *fpage = pagecache_get_page(mapping, index, + FGP_ENTRY | FGP_HEAD, 0); + + nr_pages = 1; + if (xa_is_value(fpage) || !fpage) + continue; + + if (!is_transparent_hugepage(fpage)) + goto next; + + total++; + nr_pages = thp_nr_pages(fpage); + + if (!trylock_page(fpage)) + goto next; + + if (!split_huge_page(fpage)) + split++; + + unlock_page(fpage); +next: + put_page(fpage); + cond_resched(); + } + + filp_close(candidate, NULL); + ret = 0; + + pr_debug("%lu of %lu file-backed THP split\n", split, total); +out: + putname(file); + return ret; } -DEFINE_DEBUGFS_ATTRIBUTE(split_huge_pages_fops, NULL, split_huge_pages_set, - "%llu\n"); + +#define MAX_INPUT_BUF_SZ 255 + +static ssize_t split_huge_pages_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppops) +{ + static DEFINE_MUTEX(split_debug_mutex); + ssize_t ret; + /* hold pid, start_vaddr, end_vaddr or file_path, off_start, off_end */ + char input_buf[MAX_INPUT_BUF_SZ]; + int pid; + unsigned long vaddr_start, vaddr_end; + + ret = mutex_lock_interruptible(&split_debug_mutex); + if (ret) + return ret; + + ret = -EFAULT; + + memset(input_buf, 0, MAX_INPUT_BUF_SZ); + if (copy_from_user(input_buf, buf, min_t(size_t, count, MAX_INPUT_BUF_SZ))) + goto out; + + input_buf[MAX_INPUT_BUF_SZ - 1] = '\0'; + + if (input_buf[0] == '/') { + char *tok; + char *buf = input_buf; + char file_path[MAX_INPUT_BUF_SZ]; + pgoff_t off_start = 0, off_end = 0; + size_t input_len = strlen(input_buf); + + tok = strsep(&buf, ","); + if (tok) { + strncpy(file_path, tok, MAX_INPUT_BUF_SZ); + } else { + ret = -EINVAL; + goto out; + } + + ret = sscanf(buf, "0x%lx,0x%lx", &off_start, &off_end); + if (ret != 2) { + ret = -EINVAL; + goto out; + } + ret = split_huge_pages_in_file(file_path, off_start, off_end); + if (!ret) + ret = input_len; + + goto out; + } + + ret = sscanf(input_buf, "%d,0x%lx,0x%lx", &pid, &vaddr_start, &vaddr_end); + if (ret == 1 && pid == 1) { + split_huge_pages_all(); + ret = strlen(input_buf); + goto out; + } else if (ret != 3) { + ret = -EINVAL; + goto out; + } + + ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end); + if (!ret) + ret = strlen(input_buf); +out: + mutex_unlock(&split_debug_mutex); + return ret; + +} + +static const struct file_operations split_huge_pages_fops = { + .owner = THIS_MODULE, + .write = split_huge_pages_write, + .llseek = no_llseek, +}; static int __init split_huge_pages_debugfs(void) { diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 6c72433bec1e..629aa4c2259c 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -39,7 +39,6 @@ #include <linux/hugetlb.h> #include <linux/hugetlb_cgroup.h> #include <linux/node.h> -#include <linux/userfaultfd_k.h> #include <linux/page_owner.h> #include "internal.h" @@ -94,9 +93,10 @@ static inline bool subpool_is_free(struct hugepage_subpool *spool) return true; } -static inline void unlock_or_release_subpool(struct hugepage_subpool *spool) +static inline void unlock_or_release_subpool(struct hugepage_subpool *spool, + unsigned long irq_flags) { - spin_unlock(&spool->lock); + spin_unlock_irqrestore(&spool->lock, irq_flags); /* If no pages are used, and no other handles to the subpool * remain, give up any reservations based on minimum size and @@ -135,10 +135,12 @@ struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, void hugepage_put_subpool(struct hugepage_subpool *spool) { - spin_lock(&spool->lock); + unsigned long flags; + + spin_lock_irqsave(&spool->lock, flags); BUG_ON(!spool->count); spool->count--; - unlock_or_release_subpool(spool); + unlock_or_release_subpool(spool, flags); } /* @@ -157,7 +159,7 @@ static long hugepage_subpool_get_pages(struct hugepage_subpool *spool, if (!spool) return ret; - spin_lock(&spool->lock); + spin_lock_irq(&spool->lock); if (spool->max_hpages != -1) { /* maximum size accounting */ if ((spool->used_hpages + delta) <= spool->max_hpages) @@ -184,7 +186,7 @@ static long hugepage_subpool_get_pages(struct hugepage_subpool *spool, } unlock_ret: - spin_unlock(&spool->lock); + spin_unlock_irq(&spool->lock); return ret; } @@ -198,11 +200,12 @@ static long hugepage_subpool_put_pages(struct hugepage_subpool *spool, long delta) { long ret = delta; + unsigned long flags; if (!spool) return delta; - spin_lock(&spool->lock); + spin_lock_irqsave(&spool->lock, flags); if (spool->max_hpages != -1) /* maximum size accounting */ spool->used_hpages -= delta; @@ -223,7 +226,7 @@ static long hugepage_subpool_put_pages(struct hugepage_subpool *spool, * If hugetlbfs_put_super couldn't free spool due to an outstanding * quota reference, free it now. */ - unlock_or_release_subpool(spool); + unlock_or_release_subpool(spool, flags); return ret; } @@ -553,7 +556,6 @@ retry: resv->adds_in_progress -= in_regions_needed; spin_unlock(&resv->lock); - VM_BUG_ON(add < 0); return add; } @@ -743,13 +745,20 @@ void hugetlb_fix_reserve_counts(struct inode *inode) { struct hugepage_subpool *spool = subpool_inode(inode); long rsv_adjust; + bool reserved = false; rsv_adjust = hugepage_subpool_get_pages(spool, 1); - if (rsv_adjust) { + if (rsv_adjust > 0) { struct hstate *h = hstate_inode(inode); - hugetlb_acct_memory(h, 1); + if (!hugetlb_acct_memory(h, 1)) + reserved = true; + } else if (!rsv_adjust) { + reserved = true; } + + if (!reserved) + pr_warn("hugetlb: Huge Page Reserved count may go negative.\n"); } /* @@ -1059,6 +1068,8 @@ static bool vma_has_reserves(struct vm_area_struct *vma, long chg) static void enqueue_huge_page(struct hstate *h, struct page *page) { int nid = page_to_nid(page); + + lockdep_assert_held(&hugetlb_lock); list_move(&page->lru, &h->hugepage_freelists[nid]); h->free_huge_pages++; h->free_huge_pages_node[nid]++; @@ -1068,10 +1079,11 @@ static void enqueue_huge_page(struct hstate *h, struct page *page) static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid) { struct page *page; - bool nocma = !!(current->flags & PF_MEMALLOC_NOCMA); + bool pin = !!(current->flags & PF_MEMALLOC_PIN); + lockdep_assert_held(&hugetlb_lock); list_for_each_entry(page, &h->hugepage_freelists[nid], lru) { - if (nocma && is_migrate_cma_page(page)) + if (pin && !is_pinnable_page(page)) continue; if (PageHWPoison(page)) @@ -1205,7 +1217,7 @@ static int hstate_next_node_to_alloc(struct hstate *h, } /* - * helper for free_pool_huge_page() - return the previously saved + * helper for remove_pool_huge_page() - return the previously saved * node ["this node"] from which to free a huge page. Advance the * next node id whether or not we find a free huge page to free so * that the next attempt to free addresses the next node. @@ -1273,7 +1285,7 @@ static void free_gigantic_page(struct page *page, unsigned int order) static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nodemask) { - unsigned long nr_pages = 1UL << huge_page_order(h); + unsigned long nr_pages = pages_per_huge_page(h); if (nid == NUMA_NO_NODE) nid = numa_mem_id(); @@ -1327,6 +1339,42 @@ static inline void destroy_compound_gigantic_page(struct page *page, unsigned int order) { } #endif +/* + * Remove hugetlb page from lists, and update dtor so that page appears + * as just a compound page. A reference is held on the page. + * + * Must be called with hugetlb lock held. + */ +static void remove_hugetlb_page(struct hstate *h, struct page *page, + bool adjust_surplus) +{ + int nid = page_to_nid(page); + + VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page); + VM_BUG_ON_PAGE(hugetlb_cgroup_from_page_rsvd(page), page); + + lockdep_assert_held(&hugetlb_lock); + if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) + return; + + list_del(&page->lru); + + if (HPageFreed(page)) { + h->free_huge_pages--; + h->free_huge_pages_node[nid]--; + } + if (adjust_surplus) { + h->surplus_huge_pages--; + h->surplus_huge_pages_node[nid]--; + } + + set_page_refcounted(page); + set_compound_page_dtor(page, NULL_COMPOUND_DTOR); + + h->nr_huge_pages--; + h->nr_huge_pages_node[nid]--; +} + static void update_and_free_page(struct hstate *h, struct page *page) { int i; @@ -1335,8 +1383,6 @@ static void update_and_free_page(struct hstate *h, struct page *page) if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) return; - h->nr_huge_pages--; - h->nr_huge_pages_node[page_to_nid(page)]--; for (i = 0; i < pages_per_huge_page(h); i++, subpage = mem_map_next(subpage, page, i)) { subpage->flags &= ~(1 << PG_locked | 1 << PG_error | @@ -1344,24 +1390,24 @@ static void update_and_free_page(struct hstate *h, struct page *page) 1 << PG_active | 1 << PG_private | 1 << PG_writeback); } - VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page); - VM_BUG_ON_PAGE(hugetlb_cgroup_from_page_rsvd(page), page); - set_compound_page_dtor(page, NULL_COMPOUND_DTOR); - set_page_refcounted(page); if (hstate_is_gigantic(h)) { - /* - * Temporarily drop the hugetlb_lock, because - * we might block in free_gigantic_page(). - */ - spin_unlock(&hugetlb_lock); destroy_compound_gigantic_page(page, huge_page_order(h)); free_gigantic_page(page, huge_page_order(h)); - spin_lock(&hugetlb_lock); } else { __free_pages(page, huge_page_order(h)); } } +static void update_and_free_pages_bulk(struct hstate *h, struct list_head *list) +{ + struct page *page, *t_page; + + list_for_each_entry_safe(page, t_page, list, lru) { + update_and_free_page(h, page); + cond_resched(); + } +} + struct hstate *size_to_hstate(unsigned long size) { struct hstate *h; @@ -1373,7 +1419,7 @@ struct hstate *size_to_hstate(unsigned long size) return NULL; } -static void __free_huge_page(struct page *page) +void free_huge_page(struct page *page) { /* * Can't pass hstate in here because it is called from the @@ -1383,6 +1429,7 @@ static void __free_huge_page(struct page *page) int nid = page_to_nid(page); struct hugepage_subpool *spool = hugetlb_page_subpool(page); bool restore_reserve; + unsigned long flags; VM_BUG_ON_PAGE(page_count(page), page); VM_BUG_ON_PAGE(page_mapcount(page), page); @@ -1411,7 +1458,7 @@ static void __free_huge_page(struct page *page) restore_reserve = true; } - spin_lock(&hugetlb_lock); + spin_lock_irqsave(&hugetlb_lock, flags); ClearHPageMigratable(page); hugetlb_cgroup_uncharge_page(hstate_index(h), pages_per_huge_page(h), page); @@ -1421,82 +1468,46 @@ static void __free_huge_page(struct page *page) h->resv_huge_pages++; if (HPageTemporary(page)) { - list_del(&page->lru); - ClearHPageTemporary(page); + remove_hugetlb_page(h, page, false); + spin_unlock_irqrestore(&hugetlb_lock, flags); update_and_free_page(h, page); } else if (h->surplus_huge_pages_node[nid]) { /* remove the page from active list */ - list_del(&page->lru); + remove_hugetlb_page(h, page, true); + spin_unlock_irqrestore(&hugetlb_lock, flags); update_and_free_page(h, page); - h->surplus_huge_pages--; - h->surplus_huge_pages_node[nid]--; } else { arch_clear_hugepage_flags(page); enqueue_huge_page(h, page); + spin_unlock_irqrestore(&hugetlb_lock, flags); } - spin_unlock(&hugetlb_lock); } /* - * As free_huge_page() can be called from a non-task context, we have - * to defer the actual freeing in a workqueue to prevent potential - * hugetlb_lock deadlock. - * - * free_hpage_workfn() locklessly retrieves the linked list of pages to - * be freed and frees them one-by-one. As the page->mapping pointer is - * going to be cleared in __free_huge_page() anyway, it is reused as the - * llist_node structure of a lockless linked list of huge pages to be freed. + * Must be called with the hugetlb lock held */ -static LLIST_HEAD(hpage_freelist); - -static void free_hpage_workfn(struct work_struct *work) +static void __prep_account_new_huge_page(struct hstate *h, int nid) { - struct llist_node *node; - struct page *page; - - node = llist_del_all(&hpage_freelist); - - while (node) { - page = container_of((struct address_space **)node, - struct page, mapping); - node = node->next; - __free_huge_page(page); - } -} -static DECLARE_WORK(free_hpage_work, free_hpage_workfn); - -void free_huge_page(struct page *page) -{ - /* - * Defer freeing if in non-task context to avoid hugetlb_lock deadlock. - */ - if (!in_task()) { - /* - * Only call schedule_work() if hpage_freelist is previously - * empty. Otherwise, schedule_work() had been called but the - * workfn hasn't retrieved the list yet. - */ - if (llist_add((struct llist_node *)&page->mapping, - &hpage_freelist)) - schedule_work(&free_hpage_work); - return; - } - - __free_huge_page(page); + lockdep_assert_held(&hugetlb_lock); + h->nr_huge_pages++; + h->nr_huge_pages_node[nid]++; } -static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) +static void __prep_new_huge_page(struct page *page) { INIT_LIST_HEAD(&page->lru); set_compound_page_dtor(page, HUGETLB_PAGE_DTOR); hugetlb_set_page_subpool(page, NULL); set_hugetlb_cgroup(page, NULL); set_hugetlb_cgroup_rsvd(page, NULL); - spin_lock(&hugetlb_lock); - h->nr_huge_pages++; - h->nr_huge_pages_node[nid]++; - ClearHPageFreed(page); - spin_unlock(&hugetlb_lock); +} + +static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) +{ + __prep_new_huge_page(page); + spin_lock_irq(&hugetlb_lock); + __prep_account_new_huge_page(h, nid); + spin_unlock_irq(&hugetlb_lock); } static void prep_compound_gigantic_page(struct page *page, unsigned int order) @@ -1693,17 +1704,20 @@ static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, } /* - * Free huge page from pool from next node to free. - * Attempt to keep persistent huge pages more or less - * balanced over allowed nodes. + * Remove huge page from pool from next node to free. Attempt to keep + * persistent huge pages more or less balanced over allowed nodes. + * This routine only 'removes' the hugetlb page. The caller must make + * an additional call to free the page to low level allocators. * Called with hugetlb_lock locked. */ -static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, - bool acct_surplus) +static struct page *remove_pool_huge_page(struct hstate *h, + nodemask_t *nodes_allowed, + bool acct_surplus) { int nr_nodes, node; - int ret = 0; + struct page *page = NULL; + lockdep_assert_held(&hugetlb_lock); for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { /* * If we're returning unused surplus pages, only examine @@ -1711,23 +1725,14 @@ static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, */ if ((!acct_surplus || h->surplus_huge_pages_node[node]) && !list_empty(&h->hugepage_freelists[node])) { - struct page *page = - list_entry(h->hugepage_freelists[node].next, + page = list_entry(h->hugepage_freelists[node].next, struct page, lru); - list_del(&page->lru); - h->free_huge_pages--; - h->free_huge_pages_node[node]--; - if (acct_surplus) { - h->surplus_huge_pages--; - h->surplus_huge_pages_node[node]--; - } - update_and_free_page(h, page); - ret = 1; + remove_hugetlb_page(h, page, acct_surplus); break; } } - return ret; + return page; } /* @@ -1749,7 +1754,7 @@ retry: if (!PageHuge(page)) return 0; - spin_lock(&hugetlb_lock); + spin_lock_irq(&hugetlb_lock); if (!PageHuge(page)) { rc = 0; goto out; @@ -1758,7 +1763,6 @@ retry: if (!page_count(page)) { struct page *head = compound_head(page); struct hstate *h = page_hstate(head); - int nid = page_to_nid(head); if (h->free_huge_pages - h->resv_huge_pages == 0) goto out; @@ -1767,7 +1771,7 @@ retry: * when it is dissolved. */ if (unlikely(!HPageFreed(head))) { - spin_unlock(&hugetlb_lock); + spin_unlock_irq(&hugetlb_lock); cond_resched(); /* @@ -1789,15 +1793,14 @@ retry: SetPageHWPoison(page); ClearPageHWPoison(head); } - list_del(&head->lru); - h->free_huge_pages--; - h->free_huge_pages_node[nid]--; + remove_hugetlb_page(h, page, false); h->max_huge_pages--; + spin_unlock_irq(&hugetlb_lock); update_and_free_page(h, head); - rc = 0; + return 0; } out: - spin_unlock(&hugetlb_lock); + spin_unlock_irq(&hugetlb_lock); return rc; } @@ -1839,16 +1842,16 @@ static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask, if (hstate_is_gigantic(h)) return NULL; - spin_lock(&hugetlb_lock); + spin_lock_irq(&hugetlb_lock); if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) goto out_unlock; - spin_unlock(&hugetlb_lock); + spin_unlock_irq(&hugetlb_lock); page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL); if (!page) return NULL; - spin_lock(&hugetlb_lock); + spin_lock_irq(&hugetlb_lock); /* * We could have raced with the pool size change. * Double check that and simply deallocate the new page @@ -1858,7 +1861,7 @@ static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask, */ if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { SetHPageTemporary(page); - spin_unlock(&hugetlb_lock); + spin_unlock_irq(&hugetlb_lock); put_page(page); return NULL; } else { @@ -1867,7 +1870,7 @@ static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask, } out_unlock: - spin_unlock(&hugetlb_lock); + spin_unlock_irq(&hugetlb_lock); return page; } @@ -1917,17 +1920,17 @@ struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h, struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask, gfp_t gfp_mask) { - spin_lock(&hugetlb_lock); + spin_lock_irq(&hugetlb_lock); if (h->free_huge_pages - h->resv_huge_pages > 0) { struct page *page; page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask); if (page) { - spin_unlock(&hugetlb_lock); + spin_unlock_irq(&hugetlb_lock); return page; } } - spin_unlock(&hugetlb_lock); + spin_unlock_irq(&hugetlb_lock); return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask); } @@ -1964,6 +1967,7 @@ static int gather_surplus_pages(struct hstate *h, long delta) long needed, allocated; bool alloc_ok = true; + lockdep_assert_held(&hugetlb_lock); needed = (h->resv_huge_pages + delta) - h->free_huge_pages; if (needed <= 0) { h->resv_huge_pages += delta; @@ -1975,7 +1979,7 @@ static int gather_surplus_pages(struct hstate *h, long delta) ret = -ENOMEM; retry: - spin_unlock(&hugetlb_lock); + spin_unlock_irq(&hugetlb_lock); for (i = 0; i < needed; i++) { page = alloc_surplus_huge_page(h, htlb_alloc_mask(h), NUMA_NO_NODE, NULL); @@ -1992,7 +1996,7 @@ retry: * After retaking hugetlb_lock, we need to recalculate 'needed' * because either resv_huge_pages or free_huge_pages may have changed. */ - spin_lock(&hugetlb_lock); + spin_lock_irq(&hugetlb_lock); needed = (h->resv_huge_pages + delta) - (h->free_huge_pages + allocated); if (needed > 0) { @@ -2032,12 +2036,12 @@ retry: enqueue_huge_page(h, page); } free: - spin_unlock(&hugetlb_lock); + spin_unlock_irq(&hugetlb_lock); /* Free unnecessary surplus pages to the buddy allocator */ list_for_each_entry_safe(page, tmp, &surplus_list, lru) put_page(page); - spin_lock(&hugetlb_lock); + spin_lock_irq(&hugetlb_lock); return ret; } @@ -2049,17 +2053,17 @@ free: * to the associated reservation map. * 2) Free any unused surplus pages that may have been allocated to satisfy * the reservation. As many as unused_resv_pages may be freed. - * - * Called with hugetlb_lock held. However, the lock could be dropped (and - * reacquired) during calls to cond_resched_lock. Whenever dropping the lock, - * we must make sure nobody else can claim pages we are in the process of - * freeing. Do this by ensuring resv_huge_page always is greater than the - * number of huge pages we plan to free when dropping the lock. */ static void return_unused_surplus_pages(struct hstate *h, unsigned long unused_resv_pages) { unsigned long nr_pages; + struct page *page; + LIST_HEAD(page_list); + + lockdep_assert_held(&hugetlb_lock); + /* Uncommit the reservation */ + h->resv_huge_pages -= unused_resv_pages; /* Cannot return gigantic pages currently */ if (hstate_is_gigantic(h)) @@ -2076,24 +2080,21 @@ static void return_unused_surplus_pages(struct hstate *h, * evenly across all nodes with memory. Iterate across these nodes * until we can no longer free unreserved surplus pages. This occurs * when the nodes with surplus pages have no free pages. - * free_pool_huge_page() will balance the freed pages across the + * remove_pool_huge_page() will balance the freed pages across the * on-line nodes with memory and will handle the hstate accounting. - * - * Note that we decrement resv_huge_pages as we free the pages. If - * we drop the lock, resv_huge_pages will still be sufficiently large - * to cover subsequent pages we may free. */ while (nr_pages--) { - h->resv_huge_pages--; - unused_resv_pages--; - if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1)) + page = remove_pool_huge_page(h, &node_states[N_MEMORY], 1); + if (!page) goto out; - cond_resched_lock(&hugetlb_lock); + + list_add(&page->lru, &page_list); } out: - /* Fully uncommit the reservation */ - h->resv_huge_pages -= unused_resv_pages; + spin_unlock_irq(&hugetlb_lock); + update_and_free_pages_bulk(h, &page_list); + spin_lock_irq(&hugetlb_lock); } @@ -2175,27 +2176,26 @@ static long __vma_reservation_common(struct hstate *h, if (vma->vm_flags & VM_MAYSHARE) return ret; - else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) { - /* - * In most cases, reserves always exist for private mappings. - * However, a file associated with mapping could have been - * hole punched or truncated after reserves were consumed. - * As subsequent fault on such a range will not use reserves. - * Subtle - The reserve map for private mappings has the - * opposite meaning than that of shared mappings. If NO - * entry is in the reserve map, it means a reservation exists. - * If an entry exists in the reserve map, it means the - * reservation has already been consumed. As a result, the - * return value of this routine is the opposite of the - * value returned from reserve map manipulation routines above. - */ - if (ret) - return 0; - else - return 1; - } - else - return ret < 0 ? ret : 0; + /* + * We know private mapping must have HPAGE_RESV_OWNER set. + * + * In most cases, reserves always exist for private mappings. + * However, a file associated with mapping could have been + * hole punched or truncated after reserves were consumed. + * As subsequent fault on such a range will not use reserves. + * Subtle - The reserve map for private mappings has the + * opposite meaning than that of shared mappings. If NO + * entry is in the reserve map, it means a reservation exists. + * If an entry exists in the reserve map, it means the + * reservation has already been consumed. As a result, the + * return value of this routine is the opposite of the + * value returned from reserve map manipulation routines above. + */ + if (ret > 0) + return 0; + if (ret == 0) + return 1; + return ret; } static long vma_needs_reservation(struct hstate *h, @@ -2266,6 +2266,134 @@ static void restore_reserve_on_error(struct hstate *h, } } +/* + * alloc_and_dissolve_huge_page - Allocate a new page and dissolve the old one + * @h: struct hstate old page belongs to + * @old_page: Old page to dissolve + * @list: List to isolate the page in case we need to + * Returns 0 on success, otherwise negated error. + */ +static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page, + struct list_head *list) +{ + gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; + int nid = page_to_nid(old_page); + struct page *new_page; + int ret = 0; + + /* + * Before dissolving the page, we need to allocate a new one for the + * pool to remain stable. Using alloc_buddy_huge_page() allows us to + * not having to deal with prep_new_huge_page() and avoids dealing of any + * counters. This simplifies and let us do the whole thing under the + * lock. + */ + new_page = alloc_buddy_huge_page(h, gfp_mask, nid, NULL, NULL); + if (!new_page) + return -ENOMEM; + +retry: + spin_lock_irq(&hugetlb_lock); + if (!PageHuge(old_page)) { + /* + * Freed from under us. Drop new_page too. + */ + goto free_new; + } else if (page_count(old_page)) { + /* + * Someone has grabbed the page, try to isolate it here. + * Fail with -EBUSY if not possible. + */ + spin_unlock_irq(&hugetlb_lock); + if (!isolate_huge_page(old_page, list)) + ret = -EBUSY; + spin_lock_irq(&hugetlb_lock); + goto free_new; + } else if (!HPageFreed(old_page)) { + /* + * Page's refcount is 0 but it has not been enqueued in the + * freelist yet. Race window is small, so we can succeed here if + * we retry. + */ + spin_unlock_irq(&hugetlb_lock); + cond_resched(); + goto retry; + } else { + /* + * Ok, old_page is still a genuine free hugepage. Remove it from + * the freelist and decrease the counters. These will be + * incremented again when calling __prep_account_new_huge_page() + * and enqueue_huge_page() for new_page. The counters will remain + * stable since this happens under the lock. + */ + remove_hugetlb_page(h, old_page, false); + + /* + * new_page needs to be initialized with the standard hugetlb + * state. This is normally done by prep_new_huge_page() but + * that takes hugetlb_lock which is already held so we need to + * open code it here. + * Reference count trick is needed because allocator gives us + * referenced page but the pool requires pages with 0 refcount. + */ + __prep_new_huge_page(new_page); + __prep_account_new_huge_page(h, nid); + page_ref_dec(new_page); + enqueue_huge_page(h, new_page); + + /* + * Pages have been replaced, we can safely free the old one. + */ + spin_unlock_irq(&hugetlb_lock); + update_and_free_page(h, old_page); + } + + return ret; + +free_new: + spin_unlock_irq(&hugetlb_lock); + __free_pages(new_page, huge_page_order(h)); + + return ret; +} + +int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list) +{ + struct hstate *h; + struct page *head; + int ret = -EBUSY; + + /* + * The page might have been dissolved from under our feet, so make sure + * to carefully check the state under the lock. + * Return success when racing as if we dissolved the page ourselves. + */ + spin_lock_irq(&hugetlb_lock); + if (PageHuge(page)) { + head = compound_head(page); + h = page_hstate(head); + } else { + spin_unlock_irq(&hugetlb_lock); + return 0; + } + spin_unlock_irq(&hugetlb_lock); + + /* + * Fence off gigantic pages as there is a cyclic dependency between + * alloc_contig_range and them. Return -ENOMEM as this has the effect + * of bailing out right away without further retrying. + */ + if (hstate_is_gigantic(h)) + return -ENOMEM; + + if (page_count(head) && isolate_huge_page(head, list)) + ret = 0; + else if (!page_count(head)) + ret = alloc_and_dissolve_huge_page(h, head, list); + + return ret; +} + struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve) { @@ -2316,7 +2444,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma, /* If this allocation is not consuming a reservation, charge it now. */ - deferred_reserve = map_chg || avoid_reserve || !vma_resv_map(vma); + deferred_reserve = map_chg || avoid_reserve; if (deferred_reserve) { ret = hugetlb_cgroup_charge_cgroup_rsvd( idx, pages_per_huge_page(h), &h_cg); @@ -2328,7 +2456,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma, if (ret) goto out_uncharge_cgroup_reservation; - spin_lock(&hugetlb_lock); + spin_lock_irq(&hugetlb_lock); /* * glb_chg is passed to indicate whether or not a page must be taken * from the global free pool (global change). gbl_chg == 0 indicates @@ -2336,7 +2464,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma, */ page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg); if (!page) { - spin_unlock(&hugetlb_lock); + spin_unlock_irq(&hugetlb_lock); page = alloc_buddy_huge_page_with_mpol(h, vma, addr); if (!page) goto out_uncharge_cgroup; @@ -2344,7 +2472,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma, SetHPageRestoreReserve(page); h->resv_huge_pages--; } - spin_lock(&hugetlb_lock); + spin_lock_irq(&hugetlb_lock); list_add(&page->lru, &h->hugepage_activelist); /* Fall through */ } @@ -2357,7 +2485,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma, h_cg, page); } - spin_unlock(&hugetlb_lock); + spin_unlock_irq(&hugetlb_lock); hugetlb_set_page_subpool(page, spool); @@ -2547,24 +2675,32 @@ static void try_to_free_low(struct hstate *h, unsigned long count, nodemask_t *nodes_allowed) { int i; + LIST_HEAD(page_list); + lockdep_assert_held(&hugetlb_lock); if (hstate_is_gigantic(h)) return; + /* + * Collect pages to be freed on a list, and free after dropping lock + */ for_each_node_mask(i, *nodes_allowed) { struct page *page, *next; struct list_head *freel = &h->hugepage_freelists[i]; list_for_each_entry_safe(page, next, freel, lru) { if (count >= h->nr_huge_pages) - return; + goto out; if (PageHighMem(page)) continue; - list_del(&page->lru); - update_and_free_page(h, page); - h->free_huge_pages--; - h->free_huge_pages_node[page_to_nid(page)]--; + remove_hugetlb_page(h, page, false); + list_add(&page->lru, &page_list); } } + +out: + spin_unlock_irq(&hugetlb_lock); + update_and_free_pages_bulk(h, &page_list); + spin_lock_irq(&hugetlb_lock); } #else static inline void try_to_free_low(struct hstate *h, unsigned long count, @@ -2583,6 +2719,7 @@ static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed, { int nr_nodes, node; + lockdep_assert_held(&hugetlb_lock); VM_BUG_ON(delta != -1 && delta != 1); if (delta < 0) { @@ -2610,6 +2747,8 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, nodemask_t *nodes_allowed) { unsigned long min_count, ret; + struct page *page; + LIST_HEAD(page_list); NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL); /* @@ -2622,7 +2761,12 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, else return -ENOMEM; - spin_lock(&hugetlb_lock); + /* + * resize_lock mutex prevents concurrent adjustments to number of + * pages in hstate via the proc/sysfs interfaces. + */ + mutex_lock(&h->resize_lock); + spin_lock_irq(&hugetlb_lock); /* * Check for a node specific request. @@ -2653,7 +2797,8 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, */ if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) { if (count > persistent_huge_pages(h)) { - spin_unlock(&hugetlb_lock); + spin_unlock_irq(&hugetlb_lock); + mutex_unlock(&h->resize_lock); NODEMASK_FREE(node_alloc_noretry); return -EINVAL; } @@ -2682,14 +2827,14 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, * page, free_huge_page will handle it by freeing the page * and reducing the surplus. */ - spin_unlock(&hugetlb_lock); + spin_unlock_irq(&hugetlb_lock); /* yield cpu to avoid soft lockup */ cond_resched(); ret = alloc_pool_huge_page(h, nodes_allowed, node_alloc_noretry); - spin_lock(&hugetlb_lock); + spin_lock_irq(&hugetlb_lock); if (!ret) goto out; @@ -2716,18 +2861,30 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; min_count = max(count, min_count); try_to_free_low(h, min_count, nodes_allowed); + + /* + * Collect pages to be removed on list without dropping lock + */ while (min_count < persistent_huge_pages(h)) { - if (!free_pool_huge_page(h, nodes_allowed, 0)) + page = remove_pool_huge_page(h, nodes_allowed, 0); + if (!page) break; - cond_resched_lock(&hugetlb_lock); + + list_add(&page->lru, &page_list); } + /* free the pages after dropping lock */ + spin_unlock_irq(&hugetlb_lock); + update_and_free_pages_bulk(h, &page_list); + spin_lock_irq(&hugetlb_lock); + while (count < persistent_huge_pages(h)) { if (!adjust_pool_surplus(h, nodes_allowed, 1)) break; } out: h->max_huge_pages = persistent_huge_pages(h); - spin_unlock(&hugetlb_lock); + spin_unlock_irq(&hugetlb_lock); + mutex_unlock(&h->resize_lock); NODEMASK_FREE(node_alloc_noretry); @@ -2882,9 +3039,9 @@ static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj, if (err) return err; - spin_lock(&hugetlb_lock); + spin_lock_irq(&hugetlb_lock); h->nr_overcommit_huge_pages = input; - spin_unlock(&hugetlb_lock); + spin_unlock_irq(&hugetlb_lock); return count; } @@ -3215,6 +3372,7 @@ void __init hugetlb_add_hstate(unsigned int order) BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE); BUG_ON(order == 0); h = &hstates[hugetlb_max_hstate++]; + mutex_init(&h->resize_lock); h->order = order; h->mask = ~(huge_page_size(h) - 1); for (i = 0; i < MAX_NUMNODES; ++i) @@ -3267,10 +3425,10 @@ static int __init hugepages_setup(char *s) /* * Global state is always initialized later in hugetlb_init. - * But we need to allocate >= MAX_ORDER hstates here early to still + * But we need to allocate gigantic hstates here early to still * use the bootmem allocator. */ - if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER) + if (hugetlb_max_hstate && hstate_is_gigantic(parsed_hstate)) hugetlb_hstate_alloc_pages(parsed_hstate); last_mhp = mhp; @@ -3470,9 +3628,9 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write, goto out; if (write) { - spin_lock(&hugetlb_lock); + spin_lock_irq(&hugetlb_lock); h->nr_overcommit_huge_pages = tmp; - spin_unlock(&hugetlb_lock); + spin_unlock_irq(&hugetlb_lock); } out: return ret; @@ -3568,7 +3726,7 @@ static int hugetlb_acct_memory(struct hstate *h, long delta) if (!delta) return 0; - spin_lock(&hugetlb_lock); + spin_lock_irq(&hugetlb_lock); /* * When cpuset is configured, it breaks the strict hugetlb page * reservation as the accounting is done on a global variable. Such @@ -3607,7 +3765,7 @@ static int hugetlb_acct_memory(struct hstate *h, long delta) return_unused_surplus_pages(h, (unsigned long) -delta); out: - spin_unlock(&hugetlb_lock); + spin_unlock_irq(&hugetlb_lock); return ret; } @@ -3795,7 +3953,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, src_pte = huge_pte_offset(src, addr, sz); if (!src_pte) continue; - dst_pte = huge_pte_alloc(dst, addr, sz); + dst_pte = huge_pte_alloc(dst, vma, addr, sz); if (!dst_pte) { ret = -ENOMEM; break; @@ -4310,6 +4468,44 @@ int huge_add_to_page_cache(struct page *page, struct address_space *mapping, return 0; } +static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma, + struct address_space *mapping, + pgoff_t idx, + unsigned int flags, + unsigned long haddr, + unsigned long reason) +{ + vm_fault_t ret; + u32 hash; + struct vm_fault vmf = { + .vma = vma, + .address = haddr, + .flags = flags, + + /* + * Hard to debug if it ends up being + * used by a callee that assumes + * something about the other + * uninitialized fields... same as in + * memory.c + */ + }; + + /* + * hugetlb_fault_mutex and i_mmap_rwsem must be + * dropped before handling userfault. Reacquire + * after handling fault to make calling code simpler. + */ + hash = hugetlb_fault_mutex_hash(mapping, idx); + mutex_unlock(&hugetlb_fault_mutex_table[hash]); + i_mmap_unlock_read(mapping); + ret = handle_userfault(&vmf, reason); + i_mmap_lock_read(mapping); + mutex_lock(&hugetlb_fault_mutex_table[hash]); + + return ret; +} + static vm_fault_t hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, struct address_space *mapping, pgoff_t idx, @@ -4348,35 +4544,11 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm, retry: page = find_lock_page(mapping, idx); if (!page) { - /* - * Check for page in userfault range - */ + /* Check for page in userfault range */ if (userfaultfd_missing(vma)) { - u32 hash; - struct vm_fault vmf = { - .vma = vma, - .address = haddr, - .flags = flags, - /* - * Hard to debug if it ends up being - * used by a callee that assumes - * something about the other - * uninitialized fields... same as in - * memory.c - */ - }; - - /* - * hugetlb_fault_mutex and i_mmap_rwsem must be - * dropped before handling userfault. Reacquire - * after handling fault to make calling code simpler. - */ - hash = hugetlb_fault_mutex_hash(mapping, idx); - mutex_unlock(&hugetlb_fault_mutex_table[hash]); - i_mmap_unlock_read(mapping); - ret = handle_userfault(&vmf, VM_UFFD_MISSING); - i_mmap_lock_read(mapping); - mutex_lock(&hugetlb_fault_mutex_table[hash]); + ret = hugetlb_handle_userfault(vma, mapping, idx, + flags, haddr, + VM_UFFD_MISSING); goto out; } @@ -4395,13 +4567,10 @@ retry: * sure there really is no pte entry. */ ptl = huge_pte_lock(h, mm, ptep); - if (!huge_pte_none(huge_ptep_get(ptep))) { - ret = 0; - spin_unlock(ptl); - goto out; - } + ret = 0; + if (huge_pte_none(huge_ptep_get(ptep))) + ret = vmf_error(PTR_ERR(page)); spin_unlock(ptl); - ret = vmf_error(PTR_ERR(page)); goto out; } clear_huge_page(page, address, pages_per_huge_page(h)); @@ -4435,6 +4604,16 @@ retry: VM_FAULT_SET_HINDEX(hstate_index(h)); goto backout_unlocked; } + + /* Check for page in userfault range. */ + if (userfaultfd_minor(vma)) { + unlock_page(page); + put_page(page); + ret = hugetlb_handle_userfault(vma, mapping, idx, + flags, haddr, + VM_UFFD_MINOR); + goto out; + } } /* @@ -4563,7 +4742,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, */ mapping = vma->vm_file->f_mapping; i_mmap_lock_read(mapping); - ptep = huge_pte_alloc(mm, haddr, huge_page_size(h)); + ptep = huge_pte_alloc(mm, vma, haddr, huge_page_size(h)); if (!ptep) { i_mmap_unlock_read(mapping); return VM_FAULT_OOM; @@ -4675,6 +4854,7 @@ out_mutex: return ret; } +#ifdef CONFIG_USERFAULTFD /* * Used by userfaultfd UFFDIO_COPY. Based on mcopy_atomic_pte with * modifications for huge pages. @@ -4684,8 +4864,10 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, struct vm_area_struct *dst_vma, unsigned long dst_addr, unsigned long src_addr, + enum mcopy_atomic_mode mode, struct page **pagep) { + bool is_continue = (mode == MCOPY_ATOMIC_CONTINUE); struct address_space *mapping; pgoff_t idx; unsigned long size; @@ -4695,8 +4877,17 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, spinlock_t *ptl; int ret; struct page *page; + int writable; + + mapping = dst_vma->vm_file->f_mapping; + idx = vma_hugecache_offset(h, dst_vma, dst_addr); - if (!*pagep) { + if (is_continue) { + ret = -EFAULT; + page = find_lock_page(mapping, idx); + if (!page) + goto out; + } else if (!*pagep) { ret = -ENOMEM; page = alloc_huge_page(dst_vma, dst_addr, 0); if (IS_ERR(page)) @@ -4725,13 +4916,8 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, */ __SetPageUptodate(page); - mapping = dst_vma->vm_file->f_mapping; - idx = vma_hugecache_offset(h, dst_vma, dst_addr); - - /* - * If shared, add to page cache - */ - if (vm_shared) { + /* Add shared, newly allocated pages to the page cache. */ + if (vm_shared && !is_continue) { size = i_size_read(mapping->host) >> huge_page_shift(h); ret = -EFAULT; if (idx >= size) @@ -4776,8 +4962,14 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, hugepage_add_new_anon_rmap(page, dst_vma, dst_addr); } - _dst_pte = make_huge_pte(dst_vma, page, dst_vma->vm_flags & VM_WRITE); - if (dst_vma->vm_flags & VM_WRITE) + /* For CONTINUE on a non-shared VMA, don't set VM_WRITE for CoW. */ + if (is_continue && !vm_shared) + writable = 0; + else + writable = dst_vma->vm_flags & VM_WRITE; + + _dst_pte = make_huge_pte(dst_vma, page, writable); + if (writable) _dst_pte = huge_pte_mkdirty(_dst_pte); _dst_pte = pte_mkyoung(_dst_pte); @@ -4791,20 +4983,22 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, update_mmu_cache(dst_vma, dst_addr, dst_pte); spin_unlock(ptl); - SetHPageMigratable(page); - if (vm_shared) + if (!is_continue) + SetHPageMigratable(page); + if (vm_shared || is_continue) unlock_page(page); ret = 0; out: return ret; out_release_unlock: spin_unlock(ptl); - if (vm_shared) + if (vm_shared || is_continue) unlock_page(page); out_release_nounlock: put_page(page); goto out; } +#endif /* CONFIG_USERFAULTFD */ static void record_subpages_vmas(struct page *page, struct vm_area_struct *vma, int refs, struct page **pages, @@ -4996,14 +5190,6 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, return i ? i : err; } -#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE -/* - * ARCHes with special requirements for evicting HUGETLB backing TLB entries can - * implement this. - */ -#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) -#endif - unsigned long hugetlb_change_protection(struct vm_area_struct *vma, unsigned long address, unsigned long end, pgprot_t newprot) { @@ -5280,6 +5466,9 @@ long hugetlb_unreserve_pages(struct inode *inode, long start, long end, /* * If the subpool has a minimum size, the number of global * reservations to be released may be adjusted. + * + * Note that !resv_map implies freed == 0. So (chg - freed) + * won't go negative. */ gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed)); hugetlb_acct_memory(h, -gbl_reserve); @@ -5326,6 +5515,15 @@ static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr) return false; } +bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr) +{ +#ifdef CONFIG_USERFAULTFD + if (uffd_disable_huge_pmd_share(vma)) + return false; +#endif + return vma_shareable(vma, addr); +} + /* * Determine if start,end range within vma could be mapped by shared pmd. * If yes, adjust start and end to cover range associated with possible @@ -5370,9 +5568,9 @@ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, * if !vma_shareable check at the beginning of the routine. i_mmap_rwsem is * only required for subsequent processing. */ -pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) +pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, pud_t *pud) { - struct vm_area_struct *vma = find_vma(mm, addr); struct address_space *mapping = vma->vm_file->f_mapping; pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; @@ -5382,9 +5580,6 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) pte_t *pte; spinlock_t *ptl; - if (!vma_shareable(vma, addr)) - return (pte_t *)pmd_alloc(mm, pud, addr); - i_mmap_assert_locked(mapping); vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { if (svma == vma) @@ -5448,9 +5643,10 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE; return 1; } -#define want_pmd_share() (1) + #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ -pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) +pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, pud_t *pud) { return NULL; } @@ -5465,11 +5661,15 @@ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, unsigned long *start, unsigned long *end) { } -#define want_pmd_share() (0) + +bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr) +{ + return false; +} #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB -pte_t *huge_pte_alloc(struct mm_struct *mm, +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { pgd_t *pgd; @@ -5487,8 +5687,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, pte = (pte_t *)pud; } else { BUG_ON(sz != PMD_SIZE); - if (want_pmd_share() && pud_none(*pud)) - pte = huge_pmd_share(mm, addr, pud); + if (want_pmd_share(vma, addr) && pud_none(*pud)) + pte = huge_pmd_share(mm, vma, addr, pud); else pte = (pte_t *)pmd_alloc(mm, pud, addr); } @@ -5632,7 +5832,7 @@ bool isolate_huge_page(struct page *page, struct list_head *list) { bool ret = true; - spin_lock(&hugetlb_lock); + spin_lock_irq(&hugetlb_lock); if (!PageHeadHuge(page) || !HPageMigratable(page) || !get_page_unless_zero(page)) { @@ -5642,16 +5842,16 @@ bool isolate_huge_page(struct page *page, struct list_head *list) ClearHPageMigratable(page); list_move_tail(&page->lru, list); unlock: - spin_unlock(&hugetlb_lock); + spin_unlock_irq(&hugetlb_lock); return ret; } void putback_active_hugepage(struct page *page) { - spin_lock(&hugetlb_lock); + spin_lock_irq(&hugetlb_lock); SetHPageMigratable(page); list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist); - spin_unlock(&hugetlb_lock); + spin_unlock_irq(&hugetlb_lock); put_page(page); } @@ -5679,13 +5879,70 @@ void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason) SetHPageTemporary(oldpage); ClearHPageTemporary(newpage); - spin_lock(&hugetlb_lock); + /* + * There is no need to transfer the per-node surplus state + * when we do not cross the node. + */ + if (new_nid == old_nid) + return; + spin_lock_irq(&hugetlb_lock); if (h->surplus_huge_pages_node[old_nid]) { h->surplus_huge_pages_node[old_nid]--; h->surplus_huge_pages_node[new_nid]++; } - spin_unlock(&hugetlb_lock); + spin_unlock_irq(&hugetlb_lock); + } +} + +/* + * This function will unconditionally remove all the shared pmd pgtable entries + * within the specific vma for a hugetlbfs memory range. + */ +void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) +{ + struct hstate *h = hstate_vma(vma); + unsigned long sz = huge_page_size(h); + struct mm_struct *mm = vma->vm_mm; + struct mmu_notifier_range range; + unsigned long address, start, end; + spinlock_t *ptl; + pte_t *ptep; + + if (!(vma->vm_flags & VM_MAYSHARE)) + return; + + start = ALIGN(vma->vm_start, PUD_SIZE); + end = ALIGN_DOWN(vma->vm_end, PUD_SIZE); + + if (start >= end) + return; + + /* + * No need to call adjust_range_if_pmd_sharing_possible(), because + * we have already done the PUD_SIZE alignment. + */ + mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, + start, end); + mmu_notifier_invalidate_range_start(&range); + i_mmap_lock_write(vma->vm_file->f_mapping); + for (address = start; address < end; address += PUD_SIZE) { + unsigned long tmp = address; + + ptep = huge_pte_offset(mm, address, sz); + if (!ptep) + continue; + ptl = huge_pte_lock(h, mm, ptep); + /* We don't want 'address' to be changed */ + huge_pmd_unshare(mm, vma, &tmp, ptep); + spin_unlock(ptl); } + flush_hugetlb_tlb_range(vma, start, end); + i_mmap_unlock_write(vma->vm_file->f_mapping); + /* + * No need to call mmu_notifier_invalidate_range(), see + * Documentation/vm/mmu_notifier.rst. + */ + mmu_notifier_invalidate_range_end(&range); } #ifdef CONFIG_CMA diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c index 603a131e262d..5383023d0cca 100644 --- a/mm/hugetlb_cgroup.c +++ b/mm/hugetlb_cgroup.c @@ -204,11 +204,11 @@ static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css) do { idx = 0; for_each_hstate(h) { - spin_lock(&hugetlb_lock); + spin_lock_irq(&hugetlb_lock); list_for_each_entry(page, &h->hugepage_activelist, lru) hugetlb_cgroup_move_parent(idx, h_cg, page); - spin_unlock(&hugetlb_lock); + spin_unlock_irq(&hugetlb_lock); idx++; } cond_resched(); @@ -784,8 +784,7 @@ void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage) if (hugetlb_cgroup_disabled()) return; - VM_BUG_ON_PAGE(!PageHuge(oldhpage), oldhpage); - spin_lock(&hugetlb_lock); + spin_lock_irq(&hugetlb_lock); h_cg = hugetlb_cgroup_from_page(oldhpage); h_cg_rsvd = hugetlb_cgroup_from_page_rsvd(oldhpage); set_hugetlb_cgroup(oldhpage, NULL); @@ -795,7 +794,7 @@ void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage) set_hugetlb_cgroup(newhpage, h_cg); set_hugetlb_cgroup_rsvd(newhpage, h_cg_rsvd); list_move(&newhpage->lru, &h->hugepage_activelist); - spin_unlock(&hugetlb_lock); + spin_unlock_irq(&hugetlb_lock); return; } diff --git a/mm/internal.h b/mm/internal.h index ef5f336f59bd..feeaaf06705d 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -244,7 +244,13 @@ struct compact_control { unsigned int nr_freepages; /* Number of isolated free pages */ unsigned int nr_migratepages; /* Number of pages to migrate */ unsigned long free_pfn; /* isolate_freepages search base */ - unsigned long migrate_pfn; /* isolate_migratepages search base */ + /* + * Acts as an in/out parameter to page isolation for migration. + * isolate_migratepages uses it as a search base. + * isolate_migratepages_block will update the value to the next pfn + * after the last isolated one. + */ + unsigned long migrate_pfn; unsigned long fast_start_pfn; /* a pfn to start linear scan from */ struct zone *zone; unsigned long total_migrate_scanned; @@ -280,7 +286,7 @@ struct capture_control { unsigned long isolate_freepages_range(struct compact_control *cc, unsigned long start_pfn, unsigned long end_pfn); -unsigned long +int isolate_migratepages_range(struct compact_control *cc, unsigned long low_pfn, unsigned long end_pfn); int find_suitable_fallback(struct free_area *area, unsigned int order, diff --git a/mm/kfence/core.c b/mm/kfence/core.c index d53c91f881a4..e18fbbd5d9b4 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -10,6 +10,7 @@ #include <linux/atomic.h> #include <linux/bug.h> #include <linux/debugfs.h> +#include <linux/irq_work.h> #include <linux/kcsan-checks.h> #include <linux/kfence.h> #include <linux/kmemleak.h> @@ -19,6 +20,7 @@ #include <linux/moduleparam.h> #include <linux/random.h> #include <linux/rcupdate.h> +#include <linux/sched/sysctl.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/spinlock.h> @@ -372,6 +374,7 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z /* Restore page protection if there was an OOB access. */ if (meta->unprotected_page) { + memzero_explicit((void *)ALIGN_DOWN(meta->unprotected_page, PAGE_SIZE), PAGE_SIZE); kfence_protect(meta->unprotected_page); meta->unprotected_page = 0; } @@ -586,6 +589,17 @@ late_initcall(kfence_debugfs_init); /* === Allocation Gate Timer ================================================ */ +#ifdef CONFIG_KFENCE_STATIC_KEYS +/* Wait queue to wake up allocation-gate timer task. */ +static DECLARE_WAIT_QUEUE_HEAD(allocation_wait); + +static void wake_up_kfence_timer(struct irq_work *work) +{ + wake_up(&allocation_wait); +} +static DEFINE_IRQ_WORK(wake_up_kfence_timer_work, wake_up_kfence_timer); +#endif + /* * Set up delayed work, which will enable and disable the static key. We need to * use a work queue (rather than a simple timer), since enabling and disabling a @@ -603,29 +617,27 @@ static void toggle_allocation_gate(struct work_struct *work) if (!READ_ONCE(kfence_enabled)) return; - /* Enable static key, and await allocation to happen. */ atomic_set(&kfence_allocation_gate, 0); #ifdef CONFIG_KFENCE_STATIC_KEYS + /* Enable static key, and await allocation to happen. */ static_branch_enable(&kfence_allocation_key); - /* - * Await an allocation. Timeout after 1 second, in case the kernel stops - * doing allocations, to avoid stalling this worker task for too long. - */ - { - unsigned long end_wait = jiffies + HZ; - - do { - set_current_state(TASK_UNINTERRUPTIBLE); - if (atomic_read(&kfence_allocation_gate) != 0) - break; - schedule_timeout(1); - } while (time_before(jiffies, end_wait)); - __set_current_state(TASK_RUNNING); + + if (sysctl_hung_task_timeout_secs) { + /* + * During low activity with no allocations we might wait a + * while; let's avoid the hung task warning. + */ + wait_event_timeout(allocation_wait, atomic_read(&kfence_allocation_gate), + sysctl_hung_task_timeout_secs * HZ / 2); + } else { + wait_event(allocation_wait, atomic_read(&kfence_allocation_gate)); } + /* Disable static key and reset timer. */ static_branch_disable(&kfence_allocation_key); #endif - schedule_delayed_work(&kfence_timer, msecs_to_jiffies(kfence_sample_interval)); + queue_delayed_work(system_power_efficient_wq, &kfence_timer, + msecs_to_jiffies(kfence_sample_interval)); } static DECLARE_DELAYED_WORK(kfence_timer, toggle_allocation_gate); @@ -654,7 +666,7 @@ void __init kfence_init(void) } WRITE_ONCE(kfence_enabled, true); - schedule_delayed_work(&kfence_timer, 0); + queue_delayed_work(system_power_efficient_wq, &kfence_timer, 0); pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE, CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool, (void *)(__kfence_pool + KFENCE_POOL_SIZE)); @@ -728,6 +740,19 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) */ if (atomic_read(&kfence_allocation_gate) || atomic_inc_return(&kfence_allocation_gate) > 1) return NULL; +#ifdef CONFIG_KFENCE_STATIC_KEYS + /* + * waitqueue_active() is fully ordered after the update of + * kfence_allocation_gate per atomic_inc_return(). + */ + if (waitqueue_active(&allocation_wait)) { + /* + * Calling wake_up() here may deadlock when allocations happen + * from within timer code. Use an irq_work to defer it. + */ + irq_work_queue(&wake_up_kfence_timer_work); + } +#endif if (!READ_ONCE(kfence_enabled)) return NULL; diff --git a/mm/khugepaged.c b/mm/khugepaged.c index a7d6cb912b05..ea74da3232ab 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -481,7 +481,7 @@ int __khugepaged_enter(struct mm_struct *mm) return -ENOMEM; /* __khugepaged_exit() must not run from under us */ - VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm); + VM_BUG_ON_MM(khugepaged_test_exit(mm), mm); if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { free_mm_slot(mm_slot); return 0; @@ -716,17 +716,17 @@ next: if (pte_write(pteval)) writable = true; } - if (likely(writable)) { - if (likely(referenced)) { - result = SCAN_SUCCEED; - trace_mm_collapse_huge_page_isolate(page, none_or_zero, - referenced, writable, result); - return 1; - } - } else { + + if (unlikely(!writable)) { result = SCAN_PAGE_RO; + } else if (unlikely(!referenced)) { + result = SCAN_LACK_REFERENCED_PAGE; + } else { + result = SCAN_SUCCEED; + trace_mm_collapse_huge_page_isolate(page, none_or_zero, + referenced, writable, result); + return 1; } - out: release_pte_pages(pte, _pte, compound_pagelist); trace_mm_collapse_huge_page_isolate(page, none_or_zero, @@ -809,7 +809,7 @@ static bool khugepaged_scan_abort(int nid) * If node_reclaim_mode is disabled, then no extra effort is made to * allocate memory locally. */ - if (!node_reclaim_mode) + if (!node_reclaim_enabled()) return false; /* If there is a count for this node already, it must be acceptable */ @@ -1128,10 +1128,10 @@ static void collapse_huge_page(struct mm_struct *mm, mmap_write_lock(mm); result = hugepage_vma_revalidate(mm, address, &vma); if (result) - goto out; + goto out_up_write; /* check if the pmd is still valid */ if (mm_find_pmd(mm, address) != pmd) - goto out; + goto out_up_write; anon_vma_lock_write(vma->anon_vma); @@ -1171,7 +1171,7 @@ static void collapse_huge_page(struct mm_struct *mm, spin_unlock(pmd_ptl); anon_vma_unlock_write(vma->anon_vma); result = SCAN_FAIL; - goto out; + goto out_up_write; } /* @@ -1183,19 +1183,18 @@ static void collapse_huge_page(struct mm_struct *mm, __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl, &compound_pagelist); pte_unmap(pte); + /* + * spin_lock() below is not the equivalent of smp_wmb(), but + * the smp_wmb() inside __SetPageUptodate() can be reused to + * avoid the copy_huge_page writes to become visible after + * the set_pmd_at() write. + */ __SetPageUptodate(new_page); pgtable = pmd_pgtable(_pmd); _pmd = mk_huge_pmd(new_page, vma->vm_page_prot); _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); - /* - * spin_lock() below is not the equivalent of smp_wmb(), so - * this is needed to avoid the copy_huge_page writes to become - * visible after the set_pmd_at() write. - */ - smp_wmb(); - spin_lock(pmd_ptl); BUG_ON(!pmd_none(*pmd)); page_add_new_anon_rmap(new_page, vma, address, true); @@ -1216,8 +1215,6 @@ out_nolock: mem_cgroup_uncharge(*hpage); trace_mm_collapse_huge_page(mm, isolated, result); return; -out: - goto out_up_write; } static int khugepaged_scan_pmd(struct mm_struct *mm, @@ -1274,10 +1271,6 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, goto out_unmap; } } - if (!pte_present(pteval)) { - result = SCAN_PTE_NON_PRESENT; - goto out_unmap; - } if (pte_uffd_wp(pteval)) { /* * Don't collapse the page if any of the small @@ -1447,7 +1440,7 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr) int i; if (!vma || !vma->vm_file || - vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE) + !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE)) return; /* @@ -1533,16 +1526,16 @@ abort: goto drop_hpage; } -static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot) +static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot) { struct mm_struct *mm = mm_slot->mm; int i; if (likely(mm_slot->nr_pte_mapped_thp == 0)) - return 0; + return; if (!mmap_write_trylock(mm)) - return -EBUSY; + return; if (unlikely(khugepaged_test_exit(mm))) goto out; @@ -1553,7 +1546,6 @@ static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot) out: mm_slot->nr_pte_mapped_thp = 0; mmap_write_unlock(mm); - return 0; } static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) @@ -2057,9 +2049,8 @@ static void khugepaged_scan_file(struct mm_struct *mm, BUILD_BUG(); } -static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot) +static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot) { - return 0; } #endif @@ -2205,11 +2196,9 @@ static void khugepaged_do_scan(void) { struct page *hpage = NULL; unsigned int progress = 0, pass_through_head = 0; - unsigned int pages = khugepaged_pages_to_scan; + unsigned int pages = READ_ONCE(khugepaged_pages_to_scan); bool wait = true; - barrier(); /* write khugepaged_pages_to_scan to local stack */ - lru_add_drain_all(); while (progress < pages) { @@ -215,8 +215,6 @@ struct rmap_item { #define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */ #define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */ #define STABLE_FLAG 0x200 /* is listed from the stable tree */ -#define KSM_FLAG_MASK (SEQNR_MASK|UNSTABLE_FLAG|STABLE_FLAG) - /* to mask all the flags */ /* The stable and unstable tree heads */ static struct rb_root one_stable_tree[1] = { RB_ROOT }; @@ -778,12 +776,11 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item) struct page *page; stable_node = rmap_item->head; - page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK); + page = get_ksm_page(stable_node, GET_KSM_PAGE_NOLOCK); if (!page) goto out; hlist_del(&rmap_item->hlist); - unlock_page(page); put_page(page); if (!hlist_empty(&stable_node->hlist)) @@ -794,6 +791,7 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item) stable_node->rmap_hlist_len--; put_anon_vma(rmap_item->anon_vma); + rmap_item->head = NULL; rmap_item->address &= PAGE_MASK; } else if (rmap_item->address & UNSTABLE_FLAG) { @@ -817,8 +815,7 @@ out: cond_resched(); /* we're called from many long loops */ } -static void remove_trailing_rmap_items(struct mm_slot *mm_slot, - struct rmap_item **rmap_list) +static void remove_trailing_rmap_items(struct rmap_item **rmap_list) { while (*rmap_list) { struct rmap_item *rmap_item = *rmap_list; @@ -989,7 +986,7 @@ static int unmerge_and_remove_all_rmap_items(void) goto error; } - remove_trailing_rmap_items(mm_slot, &mm_slot->rmap_list); + remove_trailing_rmap_items(&mm_slot->rmap_list); mmap_read_unlock(mm); spin_lock(&ksm_mmlist_lock); @@ -1771,7 +1768,6 @@ chain_append: * stable_node_dup is the dup to replace. */ if (stable_node_dup == stable_node) { - VM_BUG_ON(is_stable_node_chain(stable_node_dup)); VM_BUG_ON(is_stable_node_dup(stable_node_dup)); /* chain is missing so create it */ stable_node = alloc_stable_node_chain(stable_node_dup, @@ -1785,7 +1781,6 @@ chain_append: * of the current nid for this page * content. */ - VM_BUG_ON(!is_stable_node_chain(stable_node)); VM_BUG_ON(!is_stable_node_dup(stable_node_dup)); VM_BUG_ON(page_node->head != &migrate_nodes); list_del(&page_node->list); @@ -2337,7 +2332,7 @@ next_mm: * Nuke all the rmap_items that are above this current rmap: * because there were no VM_MERGEABLE vmas with such addresses. */ - remove_trailing_rmap_items(slot, ksm_scan.rmap_list); + remove_trailing_rmap_items(ksm_scan.rmap_list); spin_lock(&ksm_mmlist_lock); ksm_scan.mm_slot = list_entry(slot->mm_list.next, @@ -2634,7 +2629,7 @@ again: vma = vmac->vma; /* Ignore the stable/unstable/sqnr flags */ - addr = rmap_item->address & ~KSM_FLAG_MASK; + addr = rmap_item->address & PAGE_MASK; if (addr < vma->vm_start || addr >= vma->vm_end) continue; diff --git a/mm/list_lru.c b/mm/list_lru.c index 6f067b6b935f..cd58790d0fb3 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c @@ -125,8 +125,8 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item) list_add_tail(item, &l->list); /* Set shrinker bit if the first element was added */ if (!l->nr_items++) - memcg_set_shrinker_bit(memcg, nid, - lru_shrinker_id(lru)); + set_shrinker_bit(memcg, nid, + lru_shrinker_id(lru)); nlru->nr_items++; spin_unlock(&nlru->lock); return true; @@ -540,7 +540,7 @@ static void memcg_drain_list_lru_node(struct list_lru *lru, int nid, if (src->nr_items) { dst->nr_items += src->nr_items; - memcg_set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru)); + set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru)); src->nr_items = 0; } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index c100265dc393..3004afb6d090 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -400,130 +400,6 @@ DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key); EXPORT_SYMBOL(memcg_kmem_enabled_key); #endif -static int memcg_shrinker_map_size; -static DEFINE_MUTEX(memcg_shrinker_map_mutex); - -static void memcg_free_shrinker_map_rcu(struct rcu_head *head) -{ - kvfree(container_of(head, struct memcg_shrinker_map, rcu)); -} - -static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg, - int size, int old_size) -{ - struct memcg_shrinker_map *new, *old; - struct mem_cgroup_per_node *pn; - int nid; - - lockdep_assert_held(&memcg_shrinker_map_mutex); - - for_each_node(nid) { - pn = memcg->nodeinfo[nid]; - old = rcu_dereference_protected(pn->shrinker_map, true); - /* Not yet online memcg */ - if (!old) - return 0; - - new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid); - if (!new) - return -ENOMEM; - - /* Set all old bits, clear all new bits */ - memset(new->map, (int)0xff, old_size); - memset((void *)new->map + old_size, 0, size - old_size); - - rcu_assign_pointer(pn->shrinker_map, new); - call_rcu(&old->rcu, memcg_free_shrinker_map_rcu); - } - - return 0; -} - -static void memcg_free_shrinker_maps(struct mem_cgroup *memcg) -{ - struct mem_cgroup_per_node *pn; - struct memcg_shrinker_map *map; - int nid; - - if (mem_cgroup_is_root(memcg)) - return; - - for_each_node(nid) { - pn = memcg->nodeinfo[nid]; - map = rcu_dereference_protected(pn->shrinker_map, true); - kvfree(map); - rcu_assign_pointer(pn->shrinker_map, NULL); - } -} - -static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg) -{ - struct memcg_shrinker_map *map; - int nid, size, ret = 0; - - if (mem_cgroup_is_root(memcg)) - return 0; - - mutex_lock(&memcg_shrinker_map_mutex); - size = memcg_shrinker_map_size; - for_each_node(nid) { - map = kvzalloc_node(sizeof(*map) + size, GFP_KERNEL, nid); - if (!map) { - memcg_free_shrinker_maps(memcg); - ret = -ENOMEM; - break; - } - rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map); - } - mutex_unlock(&memcg_shrinker_map_mutex); - - return ret; -} - -int memcg_expand_shrinker_maps(int new_id) -{ - int size, old_size, ret = 0; - struct mem_cgroup *memcg; - - size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long); - old_size = memcg_shrinker_map_size; - if (size <= old_size) - return 0; - - mutex_lock(&memcg_shrinker_map_mutex); - if (!root_mem_cgroup) - goto unlock; - - for_each_mem_cgroup(memcg) { - if (mem_cgroup_is_root(memcg)) - continue; - ret = memcg_expand_one_shrinker_map(memcg, size, old_size); - if (ret) { - mem_cgroup_iter_break(NULL, memcg); - goto unlock; - } - } -unlock: - if (!ret) - memcg_shrinker_map_size = size; - mutex_unlock(&memcg_shrinker_map_mutex); - return ret; -} - -void memcg_set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id) -{ - if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) { - struct memcg_shrinker_map *map; - - rcu_read_lock(); - map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map); - /* Pairs with smp mb in shrink_slab() */ - smp_mb__before_atomic(); - set_bit(shrinker_id, map->map); - rcu_read_unlock(); - } -} - /** * mem_cgroup_css_from_page - css of the memcg associated with a page * @page: page of interest @@ -5242,11 +5118,11 @@ static int mem_cgroup_css_online(struct cgroup_subsys_state *css) struct mem_cgroup *memcg = mem_cgroup_from_css(css); /* - * A memcg must be visible for memcg_expand_shrinker_maps() + * A memcg must be visible for expand_shrinker_info() * by the time the maps are allocated. So, we allocate maps * here, when for_each_mem_cgroup() can't skip it. */ - if (memcg_alloc_shrinker_maps(memcg)) { + if (alloc_shrinker_info(memcg)) { mem_cgroup_id_remove(memcg); return -ENOMEM; } @@ -5278,6 +5154,7 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) page_counter_set_low(&memcg->memory, 0); memcg_offline_kmem(memcg); + reparent_shrinker_deferred(memcg); wb_memcg_offline(memcg); drain_all_stock(memcg); @@ -5310,7 +5187,7 @@ static void mem_cgroup_css_free(struct cgroup_subsys_state *css) vmpressure_cleanup(&memcg->vmpressure); cancel_work_sync(&memcg->high_work); mem_cgroup_remove_from_trees(memcg); - memcg_free_shrinker_maps(memcg); + free_shrinker_info(memcg); memcg_free_kmem(memcg); mem_cgroup_free(memcg); } diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 0cdbbfbc5757..70620d0dd923 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -42,6 +42,16 @@ #include "internal.h" #include "shuffle.h" + +/* + * memory_hotplug.memmap_on_memory parameter + */ +static bool memmap_on_memory __ro_after_init; +#ifdef CONFIG_MHP_MEMMAP_ON_MEMORY +module_param(memmap_on_memory, bool, 0444); +MODULE_PARM_DESC(memmap_on_memory, "Enable memmap on memory for memory hotplug"); +#endif + /* * online_page_callback contains pointer to current page onlining function. * Initially it is generic_online_page(). If it is required it could be @@ -648,9 +658,16 @@ static void online_pages_range(unsigned long start_pfn, unsigned long nr_pages) * decide to not expose all pages to the buddy (e.g., expose them * later). We account all pages as being online and belonging to this * zone ("present"). + * When using memmap_on_memory, the range might not be aligned to + * MAX_ORDER_NR_PAGES - 1, but pageblock aligned. __ffs() will detect + * this and the first chunk to online will be pageblock_nr_pages. */ - for (pfn = start_pfn; pfn < end_pfn; pfn += MAX_ORDER_NR_PAGES) - (*online_page_callback)(pfn_to_page(pfn), MAX_ORDER - 1); + for (pfn = start_pfn; pfn < end_pfn;) { + int order = min(MAX_ORDER - 1UL, __ffs(pfn)); + + (*online_page_callback)(pfn_to_page(pfn), order); + pfn += (1UL << order); + } /* mark all involved sections as online */ online_mem_sections(start_pfn, end_pfn); @@ -817,7 +834,7 @@ static inline struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn return movable_node_enabled ? movable_zone : kernel_zone; } -struct zone * zone_for_pfn_range(int online_type, int nid, unsigned start_pfn, +struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn, unsigned long nr_pages) { if (online_type == MMOP_ONLINE_KERNEL) @@ -829,24 +846,86 @@ struct zone * zone_for_pfn_range(int online_type, int nid, unsigned start_pfn, return default_zone_for_pfn(nid, start_pfn, nr_pages); } -int __ref online_pages(unsigned long pfn, unsigned long nr_pages, - int online_type, int nid) +/* + * This function should only be called by memory_block_{online,offline}, + * and {online,offline}_pages. + */ +void adjust_present_page_count(struct zone *zone, long nr_pages) +{ + unsigned long flags; + + zone->present_pages += nr_pages; + pgdat_resize_lock(zone->zone_pgdat, &flags); + zone->zone_pgdat->node_present_pages += nr_pages; + pgdat_resize_unlock(zone->zone_pgdat, &flags); +} + +int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages, + struct zone *zone) +{ + unsigned long end_pfn = pfn + nr_pages; + int ret; + + ret = kasan_add_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages)); + if (ret) + return ret; + + move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_UNMOVABLE); + + /* + * It might be that the vmemmap_pages fully span sections. If that is + * the case, mark those sections online here as otherwise they will be + * left offline. + */ + if (nr_pages >= PAGES_PER_SECTION) + online_mem_sections(pfn, ALIGN_DOWN(end_pfn, PAGES_PER_SECTION)); + + return ret; +} + +void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages) +{ + unsigned long end_pfn = pfn + nr_pages; + + /* + * It might be that the vmemmap_pages fully span sections. If that is + * the case, mark those sections offline here as otherwise they will be + * left online. + */ + if (nr_pages >= PAGES_PER_SECTION) + offline_mem_sections(pfn, ALIGN_DOWN(end_pfn, PAGES_PER_SECTION)); + + /* + * The pages associated with this vmemmap have been offlined, so + * we can reset its state here. + */ + remove_pfn_range_from_zone(page_zone(pfn_to_page(pfn)), pfn, nr_pages); + kasan_remove_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages)); +} + +int __ref online_pages(unsigned long pfn, unsigned long nr_pages, struct zone *zone) { unsigned long flags; - struct zone *zone; int need_zonelists_rebuild = 0; + const int nid = zone_to_nid(zone); int ret; struct memory_notify arg; - /* We can only online full sections (e.g., SECTION_IS_ONLINE) */ + /* + * {on,off}lining is constrained to full memory sections (or more + * precisly to memory blocks from the user space POV). + * memmap_on_memory is an exception because it reserves initial part + * of the physical memory space for vmemmaps. That space is pageblock + * aligned. + */ if (WARN_ON_ONCE(!nr_pages || - !IS_ALIGNED(pfn | nr_pages, PAGES_PER_SECTION))) + !IS_ALIGNED(pfn, pageblock_nr_pages) || + !IS_ALIGNED(pfn + nr_pages, PAGES_PER_SECTION))) return -EINVAL; mem_hotplug_begin(); /* associate pfn range with the zone */ - zone = zone_for_pfn_range(online_type, nid, pfn, nr_pages); move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_ISOLATE); arg.start_pfn = pfn; @@ -877,11 +956,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, } online_pages_range(pfn, nr_pages); - zone->present_pages += nr_pages; - - pgdat_resize_lock(zone->zone_pgdat, &flags); - zone->zone_pgdat->node_present_pages += nr_pages; - pgdat_resize_unlock(zone->zone_pgdat, &flags); + adjust_present_page_count(zone, nr_pages); node_states_set_node(nid, &arg); if (need_zonelists_rebuild) @@ -1064,6 +1139,45 @@ static int online_memory_block(struct memory_block *mem, void *arg) return device_online(&mem->dev); } +bool mhp_supports_memmap_on_memory(unsigned long size) +{ + unsigned long nr_vmemmap_pages = size / PAGE_SIZE; + unsigned long vmemmap_size = nr_vmemmap_pages * sizeof(struct page); + unsigned long remaining_size = size - vmemmap_size; + + /* + * Besides having arch support and the feature enabled at runtime, we + * need a few more assumptions to hold true: + * + * a) We span a single memory block: memory onlining/offlinin;g happens + * in memory block granularity. We don't want the vmemmap of online + * memory blocks to reside on offline memory blocks. In the future, + * we might want to support variable-sized memory blocks to make the + * feature more versatile. + * + * b) The vmemmap pages span complete PMDs: We don't want vmemmap code + * to populate memory from the altmap for unrelated parts (i.e., + * other memory blocks) + * + * c) The vmemmap pages (and thereby the pages that will be exposed to + * the buddy) have to cover full pageblocks: memory onlining/offlining + * code requires applicable ranges to be page-aligned, for example, to + * set the migratetypes properly. + * + * TODO: Although we have a check here to make sure that vmemmap pages + * fully populate a PMD, it is not the right place to check for + * this. A much better solution involves improving vmemmap code + * to fallback to base pages when trying to populate vmemmap using + * altmap as an alternative source of memory, and we do not exactly + * populate a single PMD. + */ + return memmap_on_memory && + IS_ENABLED(CONFIG_MHP_MEMMAP_ON_MEMORY) && + size == memory_block_size_bytes() && + IS_ALIGNED(vmemmap_size, PMD_SIZE) && + IS_ALIGNED(remaining_size, (pageblock_nr_pages << PAGE_SHIFT)); +} + /* * NOTE: The caller must call lock_device_hotplug() to serialize hotplug * and online/offline operations (triggered e.g. by sysfs). @@ -1073,6 +1187,7 @@ static int online_memory_block(struct memory_block *mem, void *arg) int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags) { struct mhp_params params = { .pgprot = pgprot_mhp(PAGE_KERNEL) }; + struct vmem_altmap mhp_altmap = {}; u64 start, size; bool new_node = false; int ret; @@ -1099,13 +1214,26 @@ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags) goto error; new_node = ret; + /* + * Self hosted memmap array + */ + if (mhp_flags & MHP_MEMMAP_ON_MEMORY) { + if (!mhp_supports_memmap_on_memory(size)) { + ret = -EINVAL; + goto error; + } + mhp_altmap.free = PHYS_PFN(size); + mhp_altmap.base_pfn = PHYS_PFN(start); + params.altmap = &mhp_altmap; + } + /* call arch's memory hotadd */ ret = arch_add_memory(nid, start, size, ¶ms); if (ret < 0) goto error; /* create memory block devices after memory was added */ - ret = create_memory_block_devices(start, size); + ret = create_memory_block_devices(start, size, mhp_altmap.alloc); if (ret) { arch_remove_memory(nid, start, size, NULL); goto error; @@ -1573,9 +1701,16 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages) int ret, node; char *reason; - /* We can only offline full sections (e.g., SECTION_IS_ONLINE) */ + /* + * {on,off}lining is constrained to full memory sections (or more + * precisly to memory blocks from the user space POV). + * memmap_on_memory is an exception because it reserves initial part + * of the physical memory space for vmemmaps. That space is pageblock + * aligned. + */ if (WARN_ON_ONCE(!nr_pages || - !IS_ALIGNED(start_pfn | nr_pages, PAGES_PER_SECTION))) + !IS_ALIGNED(start_pfn, pageblock_nr_pages) || + !IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION))) return -EINVAL; mem_hotplug_begin(); @@ -1611,6 +1746,7 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages) * in a way that pages from isolated pageblock are left on pcplists. */ zone_pcp_disable(zone); + lru_cache_disable(); /* set above range as isolated */ ret = start_isolate_page_range(start_pfn, end_pfn, @@ -1642,7 +1778,6 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages) } cond_resched(); - lru_add_drain_all(); ret = scan_movable_pages(pfn, end_pfn, &pfn); if (!ret) { @@ -1687,15 +1822,12 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages) zone->nr_isolate_pageblock -= nr_pages / pageblock_nr_pages; spin_unlock_irqrestore(&zone->lock, flags); + lru_cache_enable(); zone_pcp_enable(zone); /* removal success */ adjust_managed_page_count(pfn_to_page(start_pfn), -nr_pages); - zone->present_pages -= nr_pages; - - pgdat_resize_lock(zone->zone_pgdat, &flags); - zone->zone_pgdat->node_present_pages -= nr_pages; - pgdat_resize_unlock(zone->zone_pgdat, &flags); + adjust_present_page_count(zone, -nr_pages); init_per_zone_wmark_min(); @@ -1750,6 +1882,14 @@ static int check_memblock_offlined_cb(struct memory_block *mem, void *arg) return 0; } +static int get_nr_vmemmap_pages_cb(struct memory_block *mem, void *arg) +{ + /* + * If not set, continue with the next block. + */ + return mem->nr_vmemmap_pages; +} + static int check_cpu_on_node(pg_data_t *pgdat) { int cpu; @@ -1824,6 +1964,9 @@ EXPORT_SYMBOL(try_offline_node); static int __ref try_remove_memory(int nid, u64 start, u64 size) { int rc = 0; + struct vmem_altmap mhp_altmap = {}; + struct vmem_altmap *altmap = NULL; + unsigned long nr_vmemmap_pages; BUG_ON(check_hotplug_memory_range(start, size)); @@ -1836,6 +1979,31 @@ static int __ref try_remove_memory(int nid, u64 start, u64 size) if (rc) return rc; + /* + * We only support removing memory added with MHP_MEMMAP_ON_MEMORY in + * the same granularity it was added - a single memory block. + */ + if (memmap_on_memory) { + nr_vmemmap_pages = walk_memory_blocks(start, size, NULL, + get_nr_vmemmap_pages_cb); + if (nr_vmemmap_pages) { + if (size != memory_block_size_bytes()) { + pr_warn("Refuse to remove %#llx - %#llx," + "wrong granularity\n", + start, start + size); + return -EINVAL; + } + + /* + * Let remove_pmd_table->free_hugepage_table do the + * right thing if we used vmem_altmap when hot-adding + * the range. + */ + mhp_altmap.alloc = nr_vmemmap_pages; + altmap = &mhp_altmap; + } + } + /* remove memmap entry */ firmware_map_remove(start, start + size, "System RAM"); @@ -1847,7 +2015,7 @@ static int __ref try_remove_memory(int nid, u64 start, u64 size) mem_hotplug_begin(); - arch_remove_memory(nid, start, size, NULL); + arch_remove_memory(nid, start, size, altmap); if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) { memblock_free(start, size); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index cd0295567a04..3ebe2cfc64af 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -330,7 +330,7 @@ static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes) else if (pol->flags & MPOL_F_RELATIVE_NODES) mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); else { - nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed, + nodes_remap(tmp, pol->v.nodes, pol->w.cpuset_mems_allowed, *nodes); pol->w.cpuset_mems_allowed = *nodes; } @@ -1124,7 +1124,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, int err = 0; nodemask_t tmp; - migrate_prep(); + lru_cache_disable(); mmap_read_lock(mm); @@ -1161,7 +1161,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, tmp = *from; while (!nodes_empty(tmp)) { - int s,d; + int s, d; int source = NUMA_NO_NODE; int dest = 0; @@ -1208,6 +1208,8 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, break; } mmap_read_unlock(mm); + + lru_cache_enable(); if (err < 0) return err; return busy; @@ -1323,7 +1325,7 @@ static long do_mbind(unsigned long start, unsigned long len, if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { - migrate_prep(); + lru_cache_disable(); } { NODEMASK_SCRATCH(scratch); @@ -1371,6 +1373,8 @@ up_out: mmap_write_unlock(mm); mpol_out: mpol_put(new); + if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) + lru_cache_enable(); return err; } diff --git a/mm/mempool.c b/mm/mempool.c index fe19d290a301..a258cf4de575 100644 --- a/mm/mempool.c +++ b/mm/mempool.c @@ -251,7 +251,7 @@ EXPORT_SYMBOL(mempool_init); mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data) { - return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data, + return mempool_create_node(min_nr, alloc_fn, free_fn, pool_data, GFP_KERNEL, NUMA_NO_NODE); } EXPORT_SYMBOL(mempool_create); diff --git a/mm/migrate.c b/mm/migrate.c index 47df0df8f21a..6b37d00890ca 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -57,28 +57,6 @@ #include "internal.h" -/* - * migrate_prep() needs to be called before we start compiling a list of pages - * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is - * undesirable, use migrate_prep_local() - */ -void migrate_prep(void) -{ - /* - * Clear the LRU lists so pages can be isolated. - * Note that pages may be moved off the LRU after we have - * drained them. Those pages will fail to migrate like other - * pages that may be busy. - */ - lru_add_drain_all(); -} - -/* Do the necessary work of migrate_prep but not if it involves other CPUs */ -void migrate_prep_local(void) -{ - lru_add_drain(); -} - int isolate_movable_page(struct page *page, isolate_mode_t mode) { struct address_space *mapping; @@ -140,15 +118,10 @@ out: return -EBUSY; } -/* It should be called on page which is PG_movable */ -void putback_movable_page(struct page *page) +static void putback_movable_page(struct page *page) { struct address_space *mapping; - VM_BUG_ON_PAGE(!PageLocked(page), page); - VM_BUG_ON_PAGE(!PageMovable(page), page); - VM_BUG_ON_PAGE(!PageIsolated(page), page); - mapping = page_mapping(page); mapping->a_ops->putback_page(page); __ClearPageIsolated(page); @@ -1375,7 +1348,7 @@ out_unlock: out: if (rc == MIGRATEPAGE_SUCCESS) putback_active_hugepage(hpage); - else if (rc != -EAGAIN && rc != MIGRATEPAGE_SUCCESS) + else if (rc != -EAGAIN) list_move_tail(&hpage->lru, ret); /* @@ -1445,6 +1418,8 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page, int rc, nr_subpages; LIST_HEAD(ret_pages); + trace_mm_migrate_pages_start(mode, reason); + if (!swapwrite) current->flags |= PF_SWAPWRITE; @@ -1769,7 +1744,7 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, int start, i; int err = 0, err1; - migrate_prep(); + lru_cache_disable(); for (i = start = 0; i < nr_pages; i++) { const void __user *p; @@ -1838,6 +1813,7 @@ out_flush: if (err >= 0) err = err1; out: + lru_cache_enable(); return err; } @@ -2110,17 +2086,6 @@ bool pmd_trans_migrating(pmd_t pmd) return PageLocked(page); } -static inline bool is_shared_exec_page(struct vm_area_struct *vma, - struct page *page) -{ - if (page_mapcount(page) != 1 && - (page_is_file_lru(page) || vma_is_shmem(vma)) && - (vma->vm_flags & VM_EXEC)) - return true; - - return false; -} - /* * Attempt to migrate a misplaced page to the specified destination * node. Caller is expected to have an elevated reference count on @@ -2138,7 +2103,8 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, * Don't migrate file pages that are mapped in multiple processes * with execute permissions as they are probably shared libraries. */ - if (is_shared_exec_page(vma, page)) + if (page_mapcount(page) != 1 && page_is_file_lru(page) && + (vma->vm_flags & VM_EXEC)) goto out; /* @@ -2193,9 +2159,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, int page_lru = page_is_file_lru(page); unsigned long start = address & HPAGE_PMD_MASK; - if (is_shared_exec_page(vma, page)) - goto out; - new_page = alloc_pages_node(node, (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE), HPAGE_PMD_ORDER); @@ -2307,7 +2270,6 @@ out_fail: out_unlock: unlock_page(page); -out: put_page(page); return 0; } @@ -2316,44 +2278,38 @@ out: #endif /* CONFIG_NUMA */ #ifdef CONFIG_DEVICE_PRIVATE -static int migrate_vma_collect_hole(unsigned long start, +static int migrate_vma_collect_skip(unsigned long start, unsigned long end, - __always_unused int depth, struct mm_walk *walk) { struct migrate_vma *migrate = walk->private; unsigned long addr; - /* Only allow populating anonymous memory. */ - if (!vma_is_anonymous(walk->vma)) { - for (addr = start; addr < end; addr += PAGE_SIZE) { - migrate->src[migrate->npages] = 0; - migrate->dst[migrate->npages] = 0; - migrate->npages++; - } - return 0; - } - for (addr = start; addr < end; addr += PAGE_SIZE) { - migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE; migrate->dst[migrate->npages] = 0; - migrate->npages++; - migrate->cpages++; + migrate->src[migrate->npages++] = 0; } return 0; } -static int migrate_vma_collect_skip(unsigned long start, +static int migrate_vma_collect_hole(unsigned long start, unsigned long end, + __always_unused int depth, struct mm_walk *walk) { struct migrate_vma *migrate = walk->private; unsigned long addr; + /* Only allow populating anonymous memory. */ + if (!vma_is_anonymous(walk->vma)) + return migrate_vma_collect_skip(start, end, walk); + for (addr = start; addr < end; addr += PAGE_SIZE) { + migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE; migrate->dst[migrate->npages] = 0; - migrate->src[migrate->npages++] = 0; + migrate->npages++; + migrate->cpages++; } return 0; @@ -2973,6 +2929,13 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate, swp_entry = make_device_private_entry(page, vma->vm_flags & VM_WRITE); entry = swp_entry_to_pte(swp_entry); + } else { + /* + * For now we only support migrating to un-addressable + * device memory. + */ + pr_warn_once("Unsupported ZONE_DEVICE page type.\n"); + goto abort; } } else { entry = mk_pte(page, vma->vm_page_prot); diff --git a/mm/mlock.c b/mm/mlock.c index f8f8cc32d03d..df590fda5688 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -559,7 +559,7 @@ static int apply_vma_lock_flags(unsigned long start, size_t len, vm_flags_t flags) { unsigned long nstart, end, tmp; - struct vm_area_struct * vma, * prev; + struct vm_area_struct *vma, *prev; int error; VM_BUG_ON(offset_in_page(start)); @@ -737,7 +737,7 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len) */ static int apply_mlockall_flags(int flags) { - struct vm_area_struct * vma, * prev = NULL; + struct vm_area_struct *vma, *prev = NULL; vm_flags_t to_add = 0; current->mm->def_flags &= VM_LOCKED_CLEAR_MASK; diff --git a/mm/mmap.c b/mm/mmap.c index 347ef9b83bb5..c1b848fa7da6 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -3029,25 +3029,9 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, flags &= MAP_NONBLOCK; flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE; - if (vma->vm_flags & VM_LOCKED) { - struct vm_area_struct *tmp; + if (vma->vm_flags & VM_LOCKED) flags |= MAP_LOCKED; - /* drop PG_Mlocked flag for over-mapped range */ - for (tmp = vma; tmp->vm_start >= start + size; - tmp = tmp->vm_next) { - /* - * Split pmd and munlock page on the border - * of the range. - */ - vma_adjust_trans_huge(tmp, start, start + size, 0); - - munlock_vma_pages_range(tmp, - max(tmp->vm_start, start), - min(tmp->vm_end, start + size)); - } - } - file = get_file(vma->vm_file); ret = do_mmap(vma->vm_file, start, size, prot, flags, pgoff, &populate, NULL); diff --git a/mm/oom_kill.c b/mm/oom_kill.c index fa1cf18bac97..3df2ac6b8686 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -993,7 +993,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message) if (oom_group) { mem_cgroup_print_oom_group(oom_group); mem_cgroup_scan_tasks(oom_group, oom_kill_memcg_member, - (void*)message); + (void *)message); mem_cgroup_put(oom_group); } } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6b208b1843bf..bcdc0c6f21f1 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3859,16 +3859,13 @@ alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) return alloc_flags; } -static inline unsigned int current_alloc_flags(gfp_t gfp_mask, - unsigned int alloc_flags) +/* Must be called after current_gfp_context() which can change gfp_mask */ +static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, + unsigned int alloc_flags) { #ifdef CONFIG_CMA - unsigned int pflags = current->flags; - - if (!(pflags & PF_MEMALLOC_NOCMA) && - gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) + if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) alloc_flags |= ALLOC_CMA; - #endif return alloc_flags; } @@ -3968,7 +3965,7 @@ retry: if (alloc_flags & ALLOC_NO_WATERMARKS) goto try_this_zone; - if (node_reclaim_mode == 0 || + if (!node_reclaim_enabled() || !zone_allows_reclaim(ac->preferred_zoneref->zone, zone)) continue; @@ -4204,6 +4201,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, memalloc_noreclaim_restore(noreclaim_flag); psi_memstall_leave(&pflags); + if (*compact_result == COMPACT_SKIPPED) + return NULL; /* * At least in one zone compaction wasn't deferred or skipped, so let's * count a compaction stall @@ -4524,7 +4523,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask) } else if (unlikely(rt_task(current)) && !in_interrupt()) alloc_flags |= ALLOC_HARDER; - alloc_flags = current_alloc_flags(gfp_mask, alloc_flags); + alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags); return alloc_flags; } @@ -4826,7 +4825,7 @@ retry: reserve_flags = __gfp_pfmemalloc_flags(gfp_mask); if (reserve_flags) - alloc_flags = current_alloc_flags(gfp_mask, reserve_flags); + alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags); /* * Reset the nodemask and zonelist iterators if memory policies can be @@ -4995,7 +4994,7 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, if (should_fail_alloc_page(gfp_mask, order)) return false; - *alloc_flags = current_alloc_flags(gfp_mask, *alloc_flags); + *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags); /* Dirty zone balancing only done in the fast path */ ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE); @@ -5178,6 +5177,14 @@ struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, } gfp &= gfp_allowed_mask; + /* + * Apply scoped allocation constraints. This is mainly about GFP_NOFS + * resp. GFP_NOIO which has to be inherited for all allocation requests + * from a particular context which has been marked by + * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures + * movable zones are not used during allocation. + */ + gfp = current_gfp_context(gfp); alloc_gfp = gfp; if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags)) @@ -5194,13 +5201,7 @@ struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, if (likely(page)) goto out; - /* - * Apply scoped allocation constraints. This is mainly about GFP_NOFS - * resp. GFP_NOIO which has to be inherited for all allocation requests - * from a particular context which has been marked by - * memalloc_no{fs,io}_{save,restore}. - */ - alloc_gfp = current_gfp_context(gfp); + alloc_gfp = gfp; ac.spread_dirty_pages = false; /* @@ -8679,7 +8680,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc, .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, }; - migrate_prep(); + lru_cache_disable(); while (pfn < end || !list_empty(&cc->migratepages)) { if (fatal_signal_pending(current)) { @@ -8689,14 +8690,13 @@ static int __alloc_contig_migrate_range(struct compact_control *cc, if (list_empty(&cc->migratepages)) { cc->nr_migratepages = 0; - pfn = isolate_migratepages_range(cc, pfn, end); - if (!pfn) { - ret = -EINTR; + ret = isolate_migratepages_range(cc, pfn, end); + if (ret && ret != -EAGAIN) break; - } + pfn = cc->migrate_pfn; tries = 0; } else if (++tries == 5) { - ret = ret < 0 ? ret : -EBUSY; + ret = -EBUSY; break; } @@ -8706,7 +8706,16 @@ static int __alloc_contig_migrate_range(struct compact_control *cc, ret = migrate_pages(&cc->migratepages, alloc_migration_target, NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE); + + /* + * On -ENOMEM, migrate_pages() bails out right away. It is pointless + * to retry again over this error, so do the same here. + */ + if (ret == -ENOMEM) + break; } + + lru_cache_enable(); if (ret < 0) { alloc_contig_dump_pages(&cc->migratepages); putback_movable_pages(&cc->migratepages); @@ -8799,7 +8808,7 @@ int alloc_contig_range(unsigned long start, unsigned long end, ret = __alloc_contig_migrate_range(&cc, start, end); if (ret && ret != -EBUSY) goto done; - ret =0; + ret = 0; /* * Pages from [start, end) are within a MAX_ORDER_NR_PAGES @@ -8892,12 +8901,6 @@ static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn, if (PageReserved(page)) return false; - - if (page_count(page) > 0) - return false; - - if (PageHuge(page)) - return false; } return true; } @@ -8969,9 +8972,9 @@ struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask, } #endif /* CONFIG_CONTIG_ALLOC */ -void free_contig_range(unsigned long pfn, unsigned int nr_pages) +void free_contig_range(unsigned long pfn, unsigned long nr_pages) { - unsigned int count = 0; + unsigned long count = 0; for (; nr_pages--; pfn++) { struct page *page = pfn_to_page(pfn); @@ -8979,7 +8982,7 @@ void free_contig_range(unsigned long pfn, unsigned int nr_pages) count += page_count(page) != 1; __free_page(page); } - WARN(count != 0, "%d pages are still in use!\n", count); + WARN(count != 0, "%lu pages are still in use!\n", count); } EXPORT_SYMBOL(free_contig_range); @@ -9017,12 +9020,9 @@ void zone_pcp_enable(struct zone *zone) void zone_pcp_reset(struct zone *zone) { - unsigned long flags; int cpu; struct per_cpu_pageset *pset; - /* avoid races with drain_pages() */ - local_irq_save(flags); if (zone->pageset != &boot_pageset) { for_each_online_cpu(cpu) { pset = per_cpu_ptr(zone->pageset, cpu); @@ -9031,7 +9031,6 @@ void zone_pcp_reset(struct zone *zone) free_percpu(zone->pageset); zone->pageset = &boot_pageset; } - local_irq_restore(flags); } #ifdef CONFIG_MEMORY_HOTREMOVE diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c index f5fee9cf90f8..4bcc11958089 100644 --- a/mm/process_vm_access.c +++ b/mm/process_vm_access.c @@ -9,7 +9,6 @@ #include <linux/mm.h> #include <linux/uio.h> #include <linux/sched.h> -#include <linux/compat.h> #include <linux/sched/mm.h> #include <linux/highmem.h> #include <linux/ptrace.h> diff --git a/mm/shmem.c b/mm/shmem.c index 162d8f8993bb..a08cedefbfaa 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -3508,7 +3508,7 @@ static int shmem_parse_options(struct fs_context *fc, void *data) } } if (*this_char) { - char *value = strchr(this_char,'='); + char *value = strchr(this_char, '='); size_t len = 0; int err; diff --git a/mm/sparse.c b/mm/sparse.c index 33406ea2ecc4..b2ada9dc00cb 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -257,7 +257,7 @@ static void __init memory_present(int nid, unsigned long start, unsigned long en if (unlikely(!mem_section)) { unsigned long size, align; - size = sizeof(struct mem_section*) * NR_SECTION_ROOTS; + size = sizeof(struct mem_section *) * NR_SECTION_ROOTS; align = 1 << (INTERNODE_CACHE_SHIFT); mem_section = memblock_alloc(size, align); if (!mem_section) @@ -624,7 +624,6 @@ void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn) } } -#ifdef CONFIG_MEMORY_HOTREMOVE /* Mark all memory sections within the pfn range as offline */ void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn) { @@ -645,7 +644,6 @@ void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn) ms->section_mem_map &= ~SECTION_IS_ONLINE; } } -#endif #ifdef CONFIG_SPARSEMEM_VMEMMAP static struct page * __meminit populate_section_memmap(unsigned long pfn, diff --git a/mm/swap.c b/mm/swap.c index 31b844d4ed94..a75a8265302b 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -36,6 +36,7 @@ #include <linux/hugetlb.h> #include <linux/page_idle.h> #include <linux/local_lock.h> +#include <linux/buffer_head.h> #include "internal.h" @@ -235,6 +236,18 @@ static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec) } } +/* return true if pagevec needs to drain */ +static bool pagevec_add_and_need_flush(struct pagevec *pvec, struct page *page) +{ + bool ret = false; + + if (!pagevec_add(pvec, page) || PageCompound(page) || + lru_cache_disabled()) + ret = true; + + return ret; +} + /* * Writeback is about to end against a page which has been marked for immediate * reclaim. If it still appears to be reclaimable, move it to the tail of the @@ -252,7 +265,7 @@ void rotate_reclaimable_page(struct page *page) get_page(page); local_lock_irqsave(&lru_rotate.lock, flags); pvec = this_cpu_ptr(&lru_rotate.pvec); - if (!pagevec_add(pvec, page) || PageCompound(page)) + if (pagevec_add_and_need_flush(pvec, page)) pagevec_lru_move_fn(pvec, pagevec_move_tail_fn); local_unlock_irqrestore(&lru_rotate.lock, flags); } @@ -343,7 +356,7 @@ static void activate_page(struct page *page) local_lock(&lru_pvecs.lock); pvec = this_cpu_ptr(&lru_pvecs.activate_page); get_page(page); - if (!pagevec_add(pvec, page) || PageCompound(page)) + if (pagevec_add_and_need_flush(pvec, page)) pagevec_lru_move_fn(pvec, __activate_page); local_unlock(&lru_pvecs.lock); } @@ -458,7 +471,7 @@ void lru_cache_add(struct page *page) get_page(page); local_lock(&lru_pvecs.lock); pvec = this_cpu_ptr(&lru_pvecs.lru_add); - if (!pagevec_add(pvec, page) || PageCompound(page)) + if (pagevec_add_and_need_flush(pvec, page)) __pagevec_lru_add(pvec); local_unlock(&lru_pvecs.lock); } @@ -629,6 +642,7 @@ void lru_add_drain_cpu(int cpu) pagevec_lru_move_fn(pvec, lru_lazyfree_fn); activate_page_drain(cpu); + invalidate_bh_lrus_cpu(cpu); } /** @@ -654,7 +668,7 @@ void deactivate_file_page(struct page *page) local_lock(&lru_pvecs.lock); pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate_file); - if (!pagevec_add(pvec, page) || PageCompound(page)) + if (pagevec_add_and_need_flush(pvec, page)) pagevec_lru_move_fn(pvec, lru_deactivate_file_fn); local_unlock(&lru_pvecs.lock); } @@ -676,7 +690,7 @@ void deactivate_page(struct page *page) local_lock(&lru_pvecs.lock); pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate); get_page(page); - if (!pagevec_add(pvec, page) || PageCompound(page)) + if (pagevec_add_and_need_flush(pvec, page)) pagevec_lru_move_fn(pvec, lru_deactivate_fn); local_unlock(&lru_pvecs.lock); } @@ -698,7 +712,7 @@ void mark_page_lazyfree(struct page *page) local_lock(&lru_pvecs.lock); pvec = this_cpu_ptr(&lru_pvecs.lru_lazyfree); get_page(page); - if (!pagevec_add(pvec, page) || PageCompound(page)) + if (pagevec_add_and_need_flush(pvec, page)) pagevec_lru_move_fn(pvec, lru_lazyfree_fn); local_unlock(&lru_pvecs.lock); } @@ -735,7 +749,7 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy) * Calling this function with cpu hotplug locks held can actually lead * to obscure indirect dependencies via WQ context. */ -void lru_add_drain_all(void) +inline void __lru_add_drain_all(bool force_all_cpus) { /* * lru_drain_gen - Global pages generation number @@ -780,7 +794,7 @@ void lru_add_drain_all(void) * (C) Exit the draining operation if a newer generation, from another * lru_add_drain_all(), was already scheduled for draining. Check (A). */ - if (unlikely(this_gen != lru_drain_gen)) + if (unlikely(this_gen != lru_drain_gen && !force_all_cpus)) goto done; /* @@ -810,12 +824,14 @@ void lru_add_drain_all(void) for_each_online_cpu(cpu) { struct work_struct *work = &per_cpu(lru_add_drain_work, cpu); - if (pagevec_count(&per_cpu(lru_pvecs.lru_add, cpu)) || + if (force_all_cpus || + pagevec_count(&per_cpu(lru_pvecs.lru_add, cpu)) || data_race(pagevec_count(&per_cpu(lru_rotate.pvec, cpu))) || pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) || pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) || pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree, cpu)) || - need_activate_page_drain(cpu)) { + need_activate_page_drain(cpu) || + has_bh_in_lru(cpu, NULL)) { INIT_WORK(work, lru_add_drain_per_cpu); queue_work_on(cpu, mm_percpu_wq, work); __cpumask_set_cpu(cpu, &has_work); @@ -828,6 +844,11 @@ void lru_add_drain_all(void) done: mutex_unlock(&lock); } + +void lru_add_drain_all(void) +{ + __lru_add_drain_all(false); +} #else void lru_add_drain_all(void) { @@ -835,6 +856,34 @@ void lru_add_drain_all(void) } #endif /* CONFIG_SMP */ +atomic_t lru_disable_count = ATOMIC_INIT(0); + +/* + * lru_cache_disable() needs to be called before we start compiling + * a list of pages to be migrated using isolate_lru_page(). + * It drains pages on LRU cache and then disable on all cpus until + * lru_cache_enable is called. + * + * Must be paired with a call to lru_cache_enable(). + */ +void lru_cache_disable(void) +{ + atomic_inc(&lru_disable_count); +#ifdef CONFIG_SMP + /* + * lru_add_drain_all in the force mode will schedule draining on + * all online CPUs so any calls of lru_cache_disabled wrapped by + * local_lock or preemption disabled would be ordered by that. + * The atomic operation doesn't need to have stronger ordering + * requirements because that is enforeced by the scheduling + * guarantees. + */ + __lru_add_drain_all(true); +#else + lru_add_drain(); +#endif +} + /** * release_pages - batched put_page() * @pages: array of pages to release diff --git a/mm/swap_state.c b/mm/swap_state.c index fb7efa08fe57..3a1259c13f3b 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -132,7 +132,6 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, xas_store(&xas, page); xas_next(&xas); } - address_space->nrexceptional -= nr_shadows; address_space->nrpages += nr; __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr); __mod_lruvec_page_state(page, NR_SWAPCACHE, nr); @@ -172,8 +171,6 @@ void __delete_from_swap_cache(struct page *page, xas_next(&xas); } ClearPageSwapCache(page); - if (shadow) - address_space->nrexceptional += nr; address_space->nrpages -= nr; __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr); __mod_lruvec_page_state(page, NR_SWAPCACHE, -nr); @@ -275,7 +272,6 @@ void clear_shadow_from_swap_cache(int type, unsigned long begin, xas_store(&xas, NULL); nr_shadows++; } - address_space->nrexceptional -= nr_shadows; xa_unlock_irq(&address_space->i_pages); /* search the next swapcache until we meet end */ diff --git a/mm/swapfile.c b/mm/swapfile.c index 084a5b9a18e5..149e77454e3c 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -2780,7 +2780,7 @@ static int swap_show(struct seq_file *swap, void *v) unsigned int bytes, inuse; if (si == SEQ_START_TOKEN) { - seq_puts(swap,"Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority\n"); + seq_puts(swap, "Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority\n"); return 0; } @@ -3284,7 +3284,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) sizeof(long), GFP_KERNEL); - if (p->bdev &&(swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) { + if (p->bdev && (swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) { /* * When discard is enabled for swap with no particular * policy flagged, we set all swap discard flags here in diff --git a/mm/truncate.c b/mm/truncate.c index 455944264663..95af244b112a 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -40,7 +40,6 @@ static inline void __clear_shadow_entry(struct address_space *mapping, if (xas_load(&xas) != entry) return; xas_store(&xas, NULL); - mapping->nrexceptional--; } static void clear_shadow_entry(struct address_space *mapping, pgoff_t index, @@ -295,7 +294,7 @@ void truncate_inode_pages_range(struct address_space *mapping, pgoff_t index; int i; - if (mapping->nrpages == 0 && mapping->nrexceptional == 0) + if (mapping_empty(mapping)) goto out; /* Offsets within partial pages */ @@ -440,9 +439,6 @@ EXPORT_SYMBOL(truncate_inode_pages); */ void truncate_inode_pages_final(struct address_space *mapping) { - unsigned long nrexceptional; - unsigned long nrpages; - /* * Page reclaim can not participate in regular inode lifetime * management (can't call iput()) and thus can race with the @@ -452,16 +448,7 @@ void truncate_inode_pages_final(struct address_space *mapping) */ mapping_set_exiting(mapping); - /* - * When reclaim installs eviction entries, it increases - * nrexceptional first, then decreases nrpages. Make sure we see - * this in the right order or we might miss an entry. - */ - nrpages = mapping->nrpages; - smp_rmb(); - nrexceptional = mapping->nrexceptional; - - if (nrpages || nrexceptional) { + if (!mapping_empty(mapping)) { /* * As truncation uses a lockless tree lookup, cycle * the tree lock to make sure any ongoing tree @@ -633,7 +620,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping, int ret2 = 0; int did_range_unmap = 0; - if (mapping->nrpages == 0 && mapping->nrexceptional == 0) + if (mapping_empty(mapping)) goto out; pagevec_init(&pvec); diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 9a3d451402d7..e14b3820c6a8 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -207,7 +207,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm, unsigned long dst_start, unsigned long src_start, unsigned long len, - bool zeropage) + enum mcopy_atomic_mode mode) { int vm_alloc_shared = dst_vma->vm_flags & VM_SHARED; int vm_shared = dst_vma->vm_flags & VM_SHARED; @@ -227,7 +227,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm, * by THP. Since we can not reliably insert a zero page, this * feature is not supported. */ - if (zeropage) { + if (mode == MCOPY_ATOMIC_ZEROPAGE) { mmap_read_unlock(dst_mm); return -EINVAL; } @@ -273,8 +273,6 @@ retry: } while (src_addr < src_start + len) { - pte_t dst_pteval; - BUG_ON(dst_addr >= dst_start + len); /* @@ -290,23 +288,23 @@ retry: mutex_lock(&hugetlb_fault_mutex_table[hash]); err = -ENOMEM; - dst_pte = huge_pte_alloc(dst_mm, dst_addr, vma_hpagesize); + dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, vma_hpagesize); if (!dst_pte) { mutex_unlock(&hugetlb_fault_mutex_table[hash]); i_mmap_unlock_read(mapping); goto out_unlock; } - err = -EEXIST; - dst_pteval = huge_ptep_get(dst_pte); - if (!huge_pte_none(dst_pteval)) { + if (mode != MCOPY_ATOMIC_CONTINUE && + !huge_pte_none(huge_ptep_get(dst_pte))) { + err = -EEXIST; mutex_unlock(&hugetlb_fault_mutex_table[hash]); i_mmap_unlock_read(mapping); goto out_unlock; } err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, - dst_addr, src_addr, &page); + dst_addr, src_addr, mode, &page); mutex_unlock(&hugetlb_fault_mutex_table[hash]); i_mmap_unlock_read(mapping); @@ -408,7 +406,7 @@ extern ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm, unsigned long dst_start, unsigned long src_start, unsigned long len, - bool zeropage); + enum mcopy_atomic_mode mode); #endif /* CONFIG_HUGETLB_PAGE */ static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm, @@ -458,7 +456,7 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, unsigned long src_start, unsigned long len, - bool zeropage, + enum mcopy_atomic_mode mcopy_mode, bool *mmap_changing, __u64 mode) { @@ -469,6 +467,7 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm, long copied; struct page *page; bool wp_copy; + bool zeropage = (mcopy_mode == MCOPY_ATOMIC_ZEROPAGE); /* * Sanitize the command parameters: @@ -527,10 +526,12 @@ retry: */ if (is_vm_hugetlb_page(dst_vma)) return __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start, - src_start, len, zeropage); + src_start, len, mcopy_mode); if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma)) goto out_unlock; + if (mcopy_mode == MCOPY_ATOMIC_CONTINUE) + goto out_unlock; /* * Ensure the dst_vma has a anon_vma or this page @@ -626,14 +627,22 @@ ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, unsigned long src_start, unsigned long len, bool *mmap_changing, __u64 mode) { - return __mcopy_atomic(dst_mm, dst_start, src_start, len, false, - mmap_changing, mode); + return __mcopy_atomic(dst_mm, dst_start, src_start, len, + MCOPY_ATOMIC_NORMAL, mmap_changing, mode); } ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start, unsigned long len, bool *mmap_changing) { - return __mcopy_atomic(dst_mm, start, 0, len, true, mmap_changing, 0); + return __mcopy_atomic(dst_mm, start, 0, len, MCOPY_ATOMIC_ZEROPAGE, + mmap_changing, 0); +} + +ssize_t mcopy_continue(struct mm_struct *dst_mm, unsigned long start, + unsigned long len, bool *mmap_changing) +{ + return __mcopy_atomic(dst_mm, start, 0, len, MCOPY_ATOMIC_CONTINUE, + mmap_changing, 0); } int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start, diff --git a/mm/util.c b/mm/util.c index 083c5c417cfc..a8bf17f18a81 100644 --- a/mm/util.c +++ b/mm/util.c @@ -765,7 +765,7 @@ int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer, * The deviation of sync_overcommit_as could be big with loose policy * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply - * with the strict "NEVER", and to avoid possible race condtion (even + * with the strict "NEVER", and to avoid possible race condition (even * though user usually won't too frequently do the switching to policy * OVERCOMMIT_NEVER), the switch is done in the following order: * 1. changing the batch @@ -987,22 +987,26 @@ int __weak memcmp_pages(struct page *page1, struct page *page2) */ void mem_dump_obj(void *object) { + const char *type; + if (kmem_valid_obj(object)) { kmem_dump_obj(object); return; } + if (vmalloc_dump_obj(object)) return; - if (!virt_addr_valid(object)) { - if (object == NULL) - pr_cont(" NULL pointer.\n"); - else if (object == ZERO_SIZE_PTR) - pr_cont(" zero-size pointer.\n"); - else - pr_cont(" non-paged memory.\n"); - return; - } - pr_cont(" non-slab/vmalloc memory.\n"); + + if (virt_addr_valid(object)) + type = "non-slab/vmalloc memory"; + else if (object == NULL) + type = "NULL pointer"; + else if (object == ZERO_SIZE_PTR) + type = "zero-size pointer"; + else + type = "non-paged memory"; + + pr_cont(" %s\n", type); } EXPORT_SYMBOL_GPL(mem_dump_obj); #endif diff --git a/mm/vmalloc.c b/mm/vmalloc.c index d33894d7b27a..9c539f0730a5 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -3083,7 +3083,7 @@ EXPORT_SYMBOL(vzalloc_node); * 64b systems should always have either DMA or DMA32 zones. For others * GFP_DMA32 should do the right thing and use the normal zone. */ -#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL +#define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL) #endif /** diff --git a/mm/vmscan.c b/mm/vmscan.c index 562e87cbd7a1..5199b9696bab 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -185,39 +185,181 @@ static LIST_HEAD(shrinker_list); static DECLARE_RWSEM(shrinker_rwsem); #ifdef CONFIG_MEMCG -/* - * We allow subsystems to populate their shrinker-related - * LRU lists before register_shrinker_prepared() is called - * for the shrinker, since we don't want to impose - * restrictions on their internal registration order. - * In this case shrink_slab_memcg() may find corresponding - * bit is set in the shrinkers map. - * - * This value is used by the function to detect registering - * shrinkers and to skip do_shrink_slab() calls for them. - */ -#define SHRINKER_REGISTERING ((struct shrinker *)~0UL) +static int shrinker_nr_max; + +/* The shrinker_info is expanded in a batch of BITS_PER_LONG */ +static inline int shrinker_map_size(int nr_items) +{ + return (DIV_ROUND_UP(nr_items, BITS_PER_LONG) * sizeof(unsigned long)); +} + +static inline int shrinker_defer_size(int nr_items) +{ + return (round_up(nr_items, BITS_PER_LONG) * sizeof(atomic_long_t)); +} + +static struct shrinker_info *shrinker_info_protected(struct mem_cgroup *memcg, + int nid) +{ + return rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info, + lockdep_is_held(&shrinker_rwsem)); +} + +static int expand_one_shrinker_info(struct mem_cgroup *memcg, + int map_size, int defer_size, + int old_map_size, int old_defer_size) +{ + struct shrinker_info *new, *old; + struct mem_cgroup_per_node *pn; + int nid; + int size = map_size + defer_size; + + for_each_node(nid) { + pn = memcg->nodeinfo[nid]; + old = shrinker_info_protected(memcg, nid); + /* Not yet online memcg */ + if (!old) + return 0; + + new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid); + if (!new) + return -ENOMEM; + + new->nr_deferred = (atomic_long_t *)(new + 1); + new->map = (void *)new->nr_deferred + defer_size; + + /* map: set all old bits, clear all new bits */ + memset(new->map, (int)0xff, old_map_size); + memset((void *)new->map + old_map_size, 0, map_size - old_map_size); + /* nr_deferred: copy old values, clear all new values */ + memcpy(new->nr_deferred, old->nr_deferred, old_defer_size); + memset((void *)new->nr_deferred + old_defer_size, 0, + defer_size - old_defer_size); + + rcu_assign_pointer(pn->shrinker_info, new); + kvfree_rcu(old, rcu); + } + + return 0; +} + +void free_shrinker_info(struct mem_cgroup *memcg) +{ + struct mem_cgroup_per_node *pn; + struct shrinker_info *info; + int nid; + + for_each_node(nid) { + pn = memcg->nodeinfo[nid]; + info = rcu_dereference_protected(pn->shrinker_info, true); + kvfree(info); + rcu_assign_pointer(pn->shrinker_info, NULL); + } +} + +int alloc_shrinker_info(struct mem_cgroup *memcg) +{ + struct shrinker_info *info; + int nid, size, ret = 0; + int map_size, defer_size = 0; + + down_write(&shrinker_rwsem); + map_size = shrinker_map_size(shrinker_nr_max); + defer_size = shrinker_defer_size(shrinker_nr_max); + size = map_size + defer_size; + for_each_node(nid) { + info = kvzalloc_node(sizeof(*info) + size, GFP_KERNEL, nid); + if (!info) { + free_shrinker_info(memcg); + ret = -ENOMEM; + break; + } + info->nr_deferred = (atomic_long_t *)(info + 1); + info->map = (void *)info->nr_deferred + defer_size; + rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, info); + } + up_write(&shrinker_rwsem); + + return ret; +} + +static inline bool need_expand(int nr_max) +{ + return round_up(nr_max, BITS_PER_LONG) > + round_up(shrinker_nr_max, BITS_PER_LONG); +} + +static int expand_shrinker_info(int new_id) +{ + int ret = 0; + int new_nr_max = new_id + 1; + int map_size, defer_size = 0; + int old_map_size, old_defer_size = 0; + struct mem_cgroup *memcg; + + if (!need_expand(new_nr_max)) + goto out; + + if (!root_mem_cgroup) + goto out; + + lockdep_assert_held(&shrinker_rwsem); + + map_size = shrinker_map_size(new_nr_max); + defer_size = shrinker_defer_size(new_nr_max); + old_map_size = shrinker_map_size(shrinker_nr_max); + old_defer_size = shrinker_defer_size(shrinker_nr_max); + + memcg = mem_cgroup_iter(NULL, NULL, NULL); + do { + ret = expand_one_shrinker_info(memcg, map_size, defer_size, + old_map_size, old_defer_size); + if (ret) { + mem_cgroup_iter_break(NULL, memcg); + goto out; + } + } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL); +out: + if (!ret) + shrinker_nr_max = new_nr_max; + + return ret; +} + +void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id) +{ + if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) { + struct shrinker_info *info; + + rcu_read_lock(); + info = rcu_dereference(memcg->nodeinfo[nid]->shrinker_info); + /* Pairs with smp mb in shrink_slab() */ + smp_mb__before_atomic(); + set_bit(shrinker_id, info->map); + rcu_read_unlock(); + } +} static DEFINE_IDR(shrinker_idr); -static int shrinker_nr_max; static int prealloc_memcg_shrinker(struct shrinker *shrinker) { int id, ret = -ENOMEM; + if (mem_cgroup_disabled()) + return -ENOSYS; + down_write(&shrinker_rwsem); /* This may call shrinker, so it must use down_read_trylock() */ - id = idr_alloc(&shrinker_idr, SHRINKER_REGISTERING, 0, 0, GFP_KERNEL); + id = idr_alloc(&shrinker_idr, shrinker, 0, 0, GFP_KERNEL); if (id < 0) goto unlock; if (id >= shrinker_nr_max) { - if (memcg_expand_shrinker_maps(id)) { + if (expand_shrinker_info(id)) { idr_remove(&shrinker_idr, id); goto unlock; } - - shrinker_nr_max = id + 1; } shrinker->id = id; ret = 0; @@ -232,9 +374,51 @@ static void unregister_memcg_shrinker(struct shrinker *shrinker) BUG_ON(id < 0); - down_write(&shrinker_rwsem); + lockdep_assert_held(&shrinker_rwsem); + idr_remove(&shrinker_idr, id); - up_write(&shrinker_rwsem); +} + +static long xchg_nr_deferred_memcg(int nid, struct shrinker *shrinker, + struct mem_cgroup *memcg) +{ + struct shrinker_info *info; + + info = shrinker_info_protected(memcg, nid); + return atomic_long_xchg(&info->nr_deferred[shrinker->id], 0); +} + +static long add_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker, + struct mem_cgroup *memcg) +{ + struct shrinker_info *info; + + info = shrinker_info_protected(memcg, nid); + return atomic_long_add_return(nr, &info->nr_deferred[shrinker->id]); +} + +void reparent_shrinker_deferred(struct mem_cgroup *memcg) +{ + int i, nid; + long nr; + struct mem_cgroup *parent; + struct shrinker_info *child_info, *parent_info; + + parent = parent_mem_cgroup(memcg); + if (!parent) + parent = root_mem_cgroup; + + /* Prevent from concurrent shrinker_info expand */ + down_read(&shrinker_rwsem); + for_each_node(nid) { + child_info = shrinker_info_protected(memcg, nid); + parent_info = shrinker_info_protected(parent, nid); + for (i = 0; i < shrinker_nr_max; i++) { + nr = atomic_long_read(&child_info->nr_deferred[i]); + atomic_long_add(nr, &parent_info->nr_deferred[i]); + } + } + up_read(&shrinker_rwsem); } static bool cgroup_reclaim(struct scan_control *sc) @@ -268,13 +452,25 @@ static bool writeback_throttling_sane(struct scan_control *sc) #else static int prealloc_memcg_shrinker(struct shrinker *shrinker) { - return 0; + return -ENOSYS; } static void unregister_memcg_shrinker(struct shrinker *shrinker) { } +static long xchg_nr_deferred_memcg(int nid, struct shrinker *shrinker, + struct mem_cgroup *memcg) +{ + return 0; +} + +static long add_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker, + struct mem_cgroup *memcg) +{ + return 0; +} + static bool cgroup_reclaim(struct scan_control *sc) { return false; @@ -286,6 +482,39 @@ static bool writeback_throttling_sane(struct scan_control *sc) } #endif +static long xchg_nr_deferred(struct shrinker *shrinker, + struct shrink_control *sc) +{ + int nid = sc->nid; + + if (!(shrinker->flags & SHRINKER_NUMA_AWARE)) + nid = 0; + + if (sc->memcg && + (shrinker->flags & SHRINKER_MEMCG_AWARE)) + return xchg_nr_deferred_memcg(nid, shrinker, + sc->memcg); + + return atomic_long_xchg(&shrinker->nr_deferred[nid], 0); +} + + +static long add_nr_deferred(long nr, struct shrinker *shrinker, + struct shrink_control *sc) +{ + int nid = sc->nid; + + if (!(shrinker->flags & SHRINKER_NUMA_AWARE)) + nid = 0; + + if (sc->memcg && + (shrinker->flags & SHRINKER_MEMCG_AWARE)) + return add_nr_deferred_memcg(nr, nid, shrinker, + sc->memcg); + + return atomic_long_add_return(nr, &shrinker->nr_deferred[nid]); +} + /* * This misses isolated pages which are not accounted for to save counters. * As the data only determines if reclaim or compaction continues, it is @@ -335,8 +564,18 @@ static unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, */ int prealloc_shrinker(struct shrinker *shrinker) { - unsigned int size = sizeof(*shrinker->nr_deferred); + unsigned int size; + int err; + + if (shrinker->flags & SHRINKER_MEMCG_AWARE) { + err = prealloc_memcg_shrinker(shrinker); + if (err != -ENOSYS) + return err; + + shrinker->flags &= ~SHRINKER_MEMCG_AWARE; + } + size = sizeof(*shrinker->nr_deferred); if (shrinker->flags & SHRINKER_NUMA_AWARE) size *= nr_node_ids; @@ -344,26 +583,17 @@ int prealloc_shrinker(struct shrinker *shrinker) if (!shrinker->nr_deferred) return -ENOMEM; - if (shrinker->flags & SHRINKER_MEMCG_AWARE) { - if (prealloc_memcg_shrinker(shrinker)) - goto free_deferred; - } - return 0; - -free_deferred: - kfree(shrinker->nr_deferred); - shrinker->nr_deferred = NULL; - return -ENOMEM; } void free_prealloced_shrinker(struct shrinker *shrinker) { - if (!shrinker->nr_deferred) - return; - - if (shrinker->flags & SHRINKER_MEMCG_AWARE) + if (shrinker->flags & SHRINKER_MEMCG_AWARE) { + down_write(&shrinker_rwsem); unregister_memcg_shrinker(shrinker); + up_write(&shrinker_rwsem); + return; + } kfree(shrinker->nr_deferred); shrinker->nr_deferred = NULL; @@ -373,10 +603,7 @@ void register_shrinker_prepared(struct shrinker *shrinker) { down_write(&shrinker_rwsem); list_add_tail(&shrinker->list, &shrinker_list); -#ifdef CONFIG_MEMCG - if (shrinker->flags & SHRINKER_MEMCG_AWARE) - idr_replace(&shrinker_idr, shrinker, shrinker->id); -#endif + shrinker->flags |= SHRINKER_REGISTERED; up_write(&shrinker_rwsem); } @@ -396,13 +623,16 @@ EXPORT_SYMBOL(register_shrinker); */ void unregister_shrinker(struct shrinker *shrinker) { - if (!shrinker->nr_deferred) + if (!(shrinker->flags & SHRINKER_REGISTERED)) return; - if (shrinker->flags & SHRINKER_MEMCG_AWARE) - unregister_memcg_shrinker(shrinker); + down_write(&shrinker_rwsem); list_del(&shrinker->list); + shrinker->flags &= ~SHRINKER_REGISTERED; + if (shrinker->flags & SHRINKER_MEMCG_AWARE) + unregister_memcg_shrinker(shrinker); up_write(&shrinker_rwsem); + kfree(shrinker->nr_deferred); shrinker->nr_deferred = NULL; } @@ -419,14 +649,10 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl, long freeable; long nr; long new_nr; - int nid = shrinkctl->nid; long batch_size = shrinker->batch ? shrinker->batch : SHRINK_BATCH; long scanned = 0, next_deferred; - if (!(shrinker->flags & SHRINKER_NUMA_AWARE)) - nid = 0; - freeable = shrinker->count_objects(shrinker, shrinkctl); if (freeable == 0 || freeable == SHRINK_EMPTY) return freeable; @@ -436,9 +662,8 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl, * and zero it so that other concurrent shrinker invocations * don't also do this scanning work. */ - nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0); + nr = xchg_nr_deferred(shrinker, shrinkctl); - total_scan = nr; if (shrinker->seeks) { delta = freeable >> priority; delta *= 4; @@ -452,37 +677,9 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl, delta = freeable / 2; } + total_scan = nr >> priority; total_scan += delta; - if (total_scan < 0) { - pr_err("shrink_slab: %pS negative objects to delete nr=%ld\n", - shrinker->scan_objects, total_scan); - total_scan = freeable; - next_deferred = nr; - } else - next_deferred = total_scan; - - /* - * We need to avoid excessive windup on filesystem shrinkers - * due to large numbers of GFP_NOFS allocations causing the - * shrinkers to return -1 all the time. This results in a large - * nr being built up so when a shrink that can do some work - * comes along it empties the entire cache due to nr >>> - * freeable. This is bad for sustaining a working set in - * memory. - * - * Hence only allow the shrinker to scan the entire cache when - * a large delta change is calculated directly. - */ - if (delta < freeable / 4) - total_scan = min(total_scan, freeable / 2); - - /* - * Avoid risking looping forever due to too large nr value: - * never try to free more than twice the estimate number of - * freeable entries. - */ - if (total_scan > freeable * 2) - total_scan = freeable * 2; + total_scan = min(total_scan, (2 * freeable)); trace_mm_shrink_slab_start(shrinker, shrinkctl, nr, freeable, delta, total_scan, priority); @@ -521,22 +718,22 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl, cond_resched(); } - if (next_deferred >= scanned) - next_deferred -= scanned; - else - next_deferred = 0; + /* + * The deferred work is increased by any new work (delta) that wasn't + * done, decreased by old deferred work that was done now. + * + * And it is capped to two times of the freeable items. + */ + next_deferred = max_t(long, (nr + delta - scanned), 0); + next_deferred = min(next_deferred, (2 * freeable)); + /* * move the unused scan count back into the shrinker in a - * manner that handles concurrent updates. If we exhausted the - * scan, there is no need to do an update. + * manner that handles concurrent updates. */ - if (next_deferred > 0) - new_nr = atomic_long_add_return(next_deferred, - &shrinker->nr_deferred[nid]); - else - new_nr = atomic_long_read(&shrinker->nr_deferred[nid]); + new_nr = add_nr_deferred(next_deferred, shrinker, shrinkctl); - trace_mm_shrink_slab_end(shrinker, nid, freed, nr, new_nr, total_scan); + trace_mm_shrink_slab_end(shrinker, shrinkctl->nid, freed, nr, new_nr, total_scan); return freed; } @@ -544,7 +741,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl, static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg, int priority) { - struct memcg_shrinker_map *map; + struct shrinker_info *info; unsigned long ret, freed = 0; int i; @@ -554,12 +751,11 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, if (!down_read_trylock(&shrinker_rwsem)) return 0; - map = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_map, - true); - if (unlikely(!map)) + info = shrinker_info_protected(memcg, nid); + if (unlikely(!info)) goto unlock; - for_each_set_bit(i, map->map, shrinker_nr_max) { + for_each_set_bit(i, info->map, shrinker_nr_max) { struct shrink_control sc = { .gfp_mask = gfp_mask, .nid = nid, @@ -568,9 +764,9 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, struct shrinker *shrinker; shrinker = idr_find(&shrinker_idr, i); - if (unlikely(!shrinker || shrinker == SHRINKER_REGISTERING)) { + if (unlikely(!shrinker || !(shrinker->flags & SHRINKER_REGISTERED))) { if (!shrinker) - clear_bit(i, map->map); + clear_bit(i, info->map); continue; } @@ -581,7 +777,7 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, ret = do_shrink_slab(&sc, shrinker, priority); if (ret == SHRINK_EMPTY) { - clear_bit(i, map->map); + clear_bit(i, info->map); /* * After the shrinker reported that it had no objects to * free, but before we cleared the corresponding bit in @@ -590,7 +786,7 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, * case, we invoke the shrinker one more time and reset * the bit if it reports that it is not empty anymore. * The memory barrier here pairs with the barrier in - * memcg_set_shrinker_bit(): + * set_shrinker_bit(): * * list_lru_add() shrink_slab_memcg() * list_add_tail() clear_bit() @@ -602,7 +798,7 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, if (ret == SHRINK_EMPTY) ret = 0; else - memcg_set_shrinker_bit(memcg, nid, i); + set_shrinker_bit(memcg, nid, i); } freed += ret; @@ -1507,8 +1703,9 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone, LIST_HEAD(clean_pages); list_for_each_entry_safe(page, next, page_list, lru) { - if (page_is_file_lru(page) && !PageDirty(page) && - !__PageMovable(page) && !PageUnevictable(page)) { + if (!PageHuge(page) && page_is_file_lru(page) && + !PageDirty(page) && !__PageMovable(page) && + !PageUnevictable(page)) { ClearPageActive(page); list_move(&page->lru, &clean_pages); } @@ -3862,7 +4059,7 @@ static int kswapd(void *p) { unsigned int alloc_order, reclaim_order; unsigned int highest_zoneidx = MAX_NR_ZONES - 1; - pg_data_t *pgdat = (pg_data_t*)p; + pg_data_t *pgdat = (pg_data_t *)p; struct task_struct *tsk = current; const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); @@ -4086,14 +4283,6 @@ module_init(kswapd_init) int node_reclaim_mode __read_mostly; /* - * These bit locations are exposed in the vm.zone_reclaim_mode sysctl - * ABI. New bits are OK, but existing bits can never change. - */ -#define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */ -#define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */ -#define RECLAIM_UNMAP (1<<2) /* Unmap pages during reclaim */ - -/* * Priority for NODE_RECLAIM. This determines the fraction of pages * of a node considered for each zone_reclaim. 4 scans 1/16th of * a zone. diff --git a/mm/vmstat.c b/mm/vmstat.c index 74b2c374b86c..5ba118521ded 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1313,6 +1313,10 @@ const char * const vmstat_text[] = { "htlb_buddy_alloc_success", "htlb_buddy_alloc_fail", #endif +#ifdef CONFIG_CMA + "cma_alloc_success", + "cma_alloc_fail", +#endif "unevictable_pgs_culled", "unevictable_pgs_scanned", "unevictable_pgs_rescued", @@ -1365,6 +1369,10 @@ const char * const vmstat_text[] = { "swap_ra", "swap_ra_hit", #endif +#ifdef CONFIG_X86 + "direct_map_level2_splits", + "direct_map_level3_splits", +#endif #endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */ }; #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA || CONFIG_MEMCG */ @@ -1854,25 +1862,34 @@ int vmstat_refresh(struct ctl_table *table, int write, if (err) return err; for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) { + /* + * Skip checking stats known to go negative occasionally. + */ + switch (i) { + case NR_ZONE_WRITE_PENDING: + case NR_FREE_CMA_PAGES: + continue; + } val = atomic_long_read(&vm_zone_stat[i]); if (val < 0) { pr_warn("%s: %s %ld\n", __func__, zone_stat_name(i), val); - err = -EINVAL; } } -#ifdef CONFIG_NUMA - for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++) { - val = atomic_long_read(&vm_numa_stat[i]); + for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) { + /* + * Skip checking stats known to go negative occasionally. + */ + switch (i) { + case NR_WRITEBACK: + continue; + } + val = atomic_long_read(&vm_node_stat[i]); if (val < 0) { pr_warn("%s: %s %ld\n", - __func__, numa_stat_name(i), val); - err = -EINVAL; + __func__, node_stat_name(i), val); } } -#endif - if (err) - return err; if (write) *ppos += *lenp; else diff --git a/mm/workingset.c b/mm/workingset.c index cd39902c1062..b7cdeca5a76d 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -554,7 +554,6 @@ static enum lru_status shadow_lru_isolate(struct list_head *item, goto out_invalid; if (WARN_ON_ONCE(node->count != node->nr_values)) goto out_invalid; - mapping->nrexceptional -= node->nr_values; xa_delete_node(node, workingset_update_node); __inc_lruvec_kmem_state(node, WORKINGSET_NODERECLAIM); diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 30c358b72025..58697f7a43f8 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1987,8 +1987,7 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage, head = obj_to_head(page, addr); if (head & OBJ_ALLOCATED_TAG) { handle = head & ~OBJ_ALLOCATED_TAG; - if (!testpin_tag(handle)) - BUG(); + BUG_ON(!testpin_tag(handle)); old_obj = handle_to_obj(handle); obj_to_location(old_obj, &dummy, &obj_idx); @@ -2035,8 +2034,7 @@ unpin_objects: head = obj_to_head(page, addr); if (head & OBJ_ALLOCATED_TAG) { handle = head & ~OBJ_ALLOCATED_TAG; - if (!testpin_tag(handle)) - BUG(); + BUG_ON(!testpin_tag(handle)); unpin_tag(handle); } } diff --git a/mm/zswap.c b/mm/zswap.c index 578d9f256920..20763267a219 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -614,7 +614,7 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor) } pr_debug("using %s zpool\n", zpool_get_type(pool->zpool)); - strlcpy(pool->tfm_name, compressor, sizeof(pool->tfm_name)); + strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name)); pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx); if (!pool->acomp_ctx) { diff --git a/net/ceph/auth.c b/net/ceph/auth.c index eb261aa5fe18..de407e8feb97 100644 --- a/net/ceph/auth.c +++ b/net/ceph/auth.c @@ -36,6 +36,20 @@ static int init_protocol(struct ceph_auth_client *ac, int proto) } } +static void set_global_id(struct ceph_auth_client *ac, u64 global_id) +{ + dout("%s global_id %llu\n", __func__, global_id); + + if (!global_id) + pr_err("got zero global_id\n"); + + if (ac->global_id && global_id != ac->global_id) + pr_err("global_id changed from %llu to %llu\n", ac->global_id, + global_id); + + ac->global_id = global_id; +} + /* * setup, teardown. */ @@ -222,11 +236,6 @@ int ceph_handle_auth_reply(struct ceph_auth_client *ac, payload_end = payload + payload_len; - if (global_id && ac->global_id != global_id) { - dout(" set global_id %lld -> %lld\n", ac->global_id, global_id); - ac->global_id = global_id; - } - if (ac->negotiating) { /* server does not support our protocols? */ if (!protocol && result < 0) { @@ -253,11 +262,16 @@ int ceph_handle_auth_reply(struct ceph_auth_client *ac, ret = ac->ops->handle_reply(ac, result, payload, payload_end, NULL, NULL, NULL, NULL); - if (ret == -EAGAIN) + if (ret == -EAGAIN) { ret = build_request(ac, true, reply_buf, reply_len); - else if (ret) + goto out; + } else if (ret) { pr_err("auth protocol '%s' mauth authentication failed: %d\n", ceph_auth_proto_name(ac->protocol), result); + goto out; + } + + set_global_id(ac, global_id); out: mutex_unlock(&ac->mutex); @@ -484,15 +498,11 @@ int ceph_auth_handle_reply_done(struct ceph_auth_client *ac, int ret; mutex_lock(&ac->mutex); - if (global_id && ac->global_id != global_id) { - dout("%s global_id %llu -> %llu\n", __func__, ac->global_id, - global_id); - ac->global_id = global_id; - } - ret = ac->ops->handle_reply(ac, 0, reply, reply + reply_len, session_key, session_key_len, con_secret, con_secret_len); + if (!ret) + set_global_id(ac, global_id); mutex_unlock(&ac->mutex); return ret; } diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c index ca44c327bace..79641c4afee9 100644 --- a/net/ceph/auth_x.c +++ b/net/ceph/auth_x.c @@ -526,7 +526,7 @@ static int ceph_x_build_request(struct ceph_auth_client *ac, if (ret < 0) return ret; - auth->struct_v = 2; /* nautilus+ */ + auth->struct_v = 3; /* nautilus+ */ auth->key = 0; for (u = (u64 *)enc_buf; u + 1 <= (u64 *)(enc_buf + ret); u++) auth->key ^= *(__le64 *)u; diff --git a/net/ceph/decode.c b/net/ceph/decode.c index b44f7651be04..bc109a1a4616 100644 --- a/net/ceph/decode.c +++ b/net/ceph/decode.c @@ -4,6 +4,7 @@ #include <linux/inet.h> #include <linux/ceph/decode.h> +#include <linux/ceph/messenger.h> /* for ceph_pr_addr() */ static int ceph_decode_entity_addr_versioned(void **p, void *end, @@ -110,6 +111,7 @@ int ceph_decode_entity_addrvec(void **p, void *end, bool msgr2, } ceph_decode_32_safe(p, end, addr_cnt, e_inval); + dout("%s addr_cnt %d\n", __func__, addr_cnt); found = false; for (i = 0; i < addr_cnt; i++) { @@ -117,6 +119,7 @@ int ceph_decode_entity_addrvec(void **p, void *end, bool msgr2, if (ret) return ret; + dout("%s i %d addr %s\n", __func__, i, ceph_pr_addr(&tmp_addr)); if (tmp_addr.type == my_type) { if (found) { pr_err("another match of type %d in addrvec\n", @@ -128,13 +131,18 @@ int ceph_decode_entity_addrvec(void **p, void *end, bool msgr2, found = true; } } - if (!found && addr_cnt != 0) { - pr_err("no match of type %d in addrvec\n", - le32_to_cpu(my_type)); - return -ENOENT; - } - return 0; + if (found) + return 0; + + if (!addr_cnt) + return 0; /* normal -- e.g. unused OSD id/slot */ + + if (addr_cnt == 1 && !memchr_inv(&tmp_addr, 0, sizeof(tmp_addr))) + return 0; /* weird but effectively the same as !addr_cnt */ + + pr_err("no match of type %d in addrvec\n", le32_to_cpu(my_type)); + return -ENOENT; e_inval: return -EINVAL; diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index d76dc9d95d16..0de918cb3d90 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -846,7 +846,8 @@ void svc_rqst_free(struct svc_rqst *rqstp) { svc_release_buffer(rqstp); - put_page(rqstp->rq_scratch_page); + if (rqstp->rq_scratch_page) + put_page(rqstp->rq_scratch_page); kfree(rqstp->rq_resp); kfree(rqstp->rq_argp); kfree(rqstp->rq_auth_data); diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 9eb5b6b89077..478f857cdaed 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -1174,7 +1174,7 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp) tcp_sock_set_cork(svsk->sk_sk, true); err = svc_tcp_sendmsg(svsk->sk_sock, xdr, marker, &sent); xdr_free_bvec(xdr); - trace_svcsock_tcp_send(xprt, err < 0 ? err : sent); + trace_svcsock_tcp_send(xprt, err < 0 ? (long)err : sent); if (err < 0 || sent != (xdr->len + sizeof(marker))) goto out_close; if (atomic_dec_and_test(&svsk->sk_sendqlen)) diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index 056452cabc98..d6bbafb773e1 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -921,42 +921,48 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt; __be32 *rdma_argp = rctxt->rc_recv_buf; struct svc_rdma_send_ctxt *sctxt; + unsigned int rc_size; __be32 *p; int ret; ret = -ENOTCONN; if (svc_xprt_is_dead(xprt)) - goto err0; + goto drop_connection; ret = -ENOMEM; sctxt = svc_rdma_send_ctxt_get(rdma); if (!sctxt) - goto err0; + goto drop_connection; + ret = -EMSGSIZE; p = xdr_reserve_space(&sctxt->sc_stream, rpcrdma_fixed_maxsz * sizeof(*p)); if (!p) - goto err0; + goto put_ctxt; ret = svc_rdma_send_reply_chunk(rdma, rctxt, &rqstp->rq_res); if (ret < 0) - goto err2; + goto reply_chunk; + rc_size = ret; *p++ = *rdma_argp; *p++ = *(rdma_argp + 1); *p++ = rdma->sc_fc_credits; *p = pcl_is_empty(&rctxt->rc_reply_pcl) ? rdma_msg : rdma_nomsg; - if (svc_rdma_encode_read_list(sctxt) < 0) - goto err0; - if (svc_rdma_encode_write_list(rctxt, sctxt) < 0) - goto err0; - if (svc_rdma_encode_reply_chunk(rctxt, sctxt, ret) < 0) - goto err0; + ret = svc_rdma_encode_read_list(sctxt); + if (ret < 0) + goto put_ctxt; + ret = svc_rdma_encode_write_list(rctxt, sctxt); + if (ret < 0) + goto put_ctxt; + ret = svc_rdma_encode_reply_chunk(rctxt, sctxt, rc_size); + if (ret < 0) + goto put_ctxt; ret = svc_rdma_send_reply_msg(rdma, sctxt, rctxt, rqstp); if (ret < 0) - goto err1; + goto put_ctxt; /* Prevent svc_xprt_release() from releasing the page backing * rq_res.head[0].iov_base. It's no longer being accessed by @@ -964,16 +970,16 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) rqstp->rq_respages++; return 0; - err2: +reply_chunk: if (ret != -E2BIG && ret != -EINVAL) - goto err1; + goto put_ctxt; svc_rdma_send_error_msg(rdma, sctxt, rctxt, ret); return 0; - err1: +put_ctxt: svc_rdma_send_ctxt_put(rdma, sctxt); - err0: +drop_connection: trace_svcrdma_send_err(rqstp, ret); svc_xprt_deferred_close(&rdma->sc_xprt); return -ENOTCONN; diff --git a/samples/kprobes/kprobe_example.c b/samples/kprobes/kprobe_example.c index 331dcf151532..c495664c0a9b 100644 --- a/samples/kprobes/kprobe_example.c +++ b/samples/kprobes/kprobe_example.c @@ -47,6 +47,10 @@ static int __kprobes handler_pre(struct kprobe *p, struct pt_regs *regs) pr_info("<%s> pre_handler: p->addr = 0x%p, pc = 0x%lx, cpsr = 0x%lx\n", p->symbol_name, p->addr, (long)regs->ARM_pc, (long)regs->ARM_cpsr); #endif +#ifdef CONFIG_RISCV + pr_info("<%s> pre_handler: p->addr = 0x%p, pc = 0x%lx, status = 0x%lx\n", + p->symbol_name, p->addr, regs->epc, regs->status); +#endif #ifdef CONFIG_S390 pr_info("<%s> pre_handler: p->addr, 0x%p, ip = 0x%lx, flags = 0x%lx\n", p->symbol_name, p->addr, regs->psw.addr, regs->flags); @@ -80,6 +84,10 @@ static void __kprobes handler_post(struct kprobe *p, struct pt_regs *regs, pr_info("<%s> post_handler: p->addr = 0x%p, cpsr = 0x%lx\n", p->symbol_name, p->addr, (long)regs->ARM_cpsr); #endif +#ifdef CONFIG_RISCV + pr_info("<%s> post_handler: p->addr = 0x%p, status = 0x%lx\n", + p->symbol_name, p->addr, regs->status); +#endif #ifdef CONFIG_S390 pr_info("<%s> pre_handler: p->addr, 0x%p, flags = 0x%lx\n", p->symbol_name, p->addr, regs->flags); diff --git a/scripts/kernel-doc b/scripts/kernel-doc index 2a85d34fdcd0..4840e748fca8 100755 --- a/scripts/kernel-doc +++ b/scripts/kernel-doc @@ -1777,6 +1777,7 @@ sub dump_function($$) { $prototype =~ s/^noinline +//; $prototype =~ s/__init +//; $prototype =~ s/__init_or_module +//; + $prototype =~ s/__deprecated +//; $prototype =~ s/__flatten +//; $prototype =~ s/__meminit +//; $prototype =~ s/__must_check +//; diff --git a/scripts/package/buildtar b/scripts/package/buildtar index 936198a90477..221aa7df008d 100755 --- a/scripts/package/buildtar +++ b/scripts/package/buildtar @@ -125,6 +125,14 @@ case "${ARCH}" in fi done ;; + riscv) + for i in Image.bz2 Image.gz Image; do + if [ -f "${objtree}/arch/riscv/boot/${i}" ] ; then + cp -v -- "${objtree}/arch/riscv/boot/${i}" "${tmpdir}/boot/vmlinux-${KERNELRELEASE}" + break + fi + done + ;; *) [ -f "${KBUILD_IMAGE}" ] && cp -v -- "${KBUILD_IMAGE}" "${tmpdir}/boot/vmlinux-kbuild-${KERNELRELEASE}" echo "" >&2 diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl index a5429b3dac24..5053b78da19e 100755 --- a/scripts/recordmcount.pl +++ b/scripts/recordmcount.pl @@ -392,7 +392,7 @@ if ($arch eq "x86_64") { $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s_mcount\$"; } elsif ($arch eq "riscv") { $function_regex = "^([0-9a-fA-F]+)\\s+<([^.0-9][0-9a-zA-Z_\\.]+)>:"; - $mcount_regex = "^\\s*([0-9a-fA-F]+):\\sR_RISCV_CALL\\s_mcount\$"; + $mcount_regex = "^\\s*([0-9a-fA-F]+):\\sR_RISCV_CALL(_PLT)?\\s_?mcount\$"; $type = ".quad"; $alignment = 2; } elsif ($arch eq "nds32") { diff --git a/scripts/ver_linux b/scripts/ver_linux index a92acc703f9b..1a8ee4ff0e32 100755 --- a/scripts/ver_linux +++ b/scripts/ver_linux @@ -47,7 +47,6 @@ BEGIN { printversion("Net-tools", version("ifconfig --version")) printversion("Kbd", version("loadkeys -V")) printversion("Console-tools", version("loadkeys -V")) - printversion("Oprofile", version("oprofiled --version")) printversion("Sh-utils", version("expr --v")) printversion("Udev", version("udevadm --version")) printversion("Wireless-tools", version("iwconfig --version")) diff --git a/security/safesetid/lsm.c b/security/safesetid/lsm.c index 8a176b6adbe5..1079c6d54784 100644 --- a/security/safesetid/lsm.c +++ b/security/safesetid/lsm.c @@ -125,7 +125,6 @@ static int safesetid_security_capable(const struct cred *cred, pr_warn("Operation requires CAP_SETUID, which is not available to UID %u for operations besides approved set*uid transitions\n", __kuid_val(cred->uid)); return -EPERM; - break; case CAP_SETGID: /* * If no policy applies to this task, allow the use of CAP_SETGID for @@ -140,11 +139,9 @@ static int safesetid_security_capable(const struct cred *cred, pr_warn("Operation requires CAP_SETGID, which is not available to GID %u for operations besides approved set*gid transitions\n", __kuid_val(cred->uid)); return -EPERM; - break; default: /* Error, the only capabilities were checking for is CAP_SETUID/GID */ return 0; - break; } return 0; } diff --git a/tools/gpio/gpio-utils.c b/tools/gpio/gpio-utils.c index 1639b4d832cd..4096bcd511d1 100644 --- a/tools/gpio/gpio-utils.c +++ b/tools/gpio/gpio-utils.c @@ -20,7 +20,7 @@ #define CONSUMER "gpio-utils" /** - * doc: Operation of gpio + * DOC: Operation of gpio * * Provide the api of gpiochip for chardev interface. There are two * types of api. The first one provide as same function as each @@ -100,7 +100,7 @@ exit_free_name: } /** - * gpiotools_set_values(): Set the value of gpio(s) + * gpiotools_set_values() - Set the value of gpio(s) * @fd: The fd returned by * gpiotools_request_line(). * @values: The array of values want to set. @@ -124,7 +124,7 @@ int gpiotools_set_values(const int fd, struct gpio_v2_line_values *values) } /** - * gpiotools_get_values(): Get the value of gpio(s) + * gpiotools_get_values() - Get the value of gpio(s) * @fd: The fd returned by * gpiotools_request_line(). * @values: The array of values get from hardware. @@ -148,7 +148,7 @@ int gpiotools_get_values(const int fd, struct gpio_v2_line_values *values) } /** - * gpiotools_release_line(): Release the line(s) of gpiochip + * gpiotools_release_line() - Release the line(s) of gpiochip * @fd: The fd returned by * gpiotools_request_line(). * @@ -169,7 +169,7 @@ int gpiotools_release_line(const int fd) } /** - * gpiotools_get(): Get value from specific line + * gpiotools_get() - Get value from specific line * @device_name: The name of gpiochip without prefix "/dev/", * such as "gpiochip0" * @line: number of line, such as 2. @@ -191,7 +191,7 @@ int gpiotools_get(const char *device_name, unsigned int line) /** - * gpiotools_gets(): Get values from specific lines. + * gpiotools_gets() - Get values from specific lines. * @device_name: The name of gpiochip without prefix "/dev/", * such as "gpiochip0". * @lines: An array desired lines, specified by offset @@ -230,7 +230,7 @@ int gpiotools_gets(const char *device_name, unsigned int *lines, } /** - * gpiotools_set(): Set value to specific line + * gpiotools_set() - Set value to specific line * @device_name: The name of gpiochip without prefix "/dev/", * such as "gpiochip0" * @line: number of line, such as 2. @@ -248,13 +248,13 @@ int gpiotools_set(const char *device_name, unsigned int line, } /** - * gpiotools_sets(): Set values to specific lines. + * gpiotools_sets() - Set values to specific lines. * @device_name: The name of gpiochip without prefix "/dev/", * such as "gpiochip0". * @lines: An array desired lines, specified by offset * index for the associated GPIO device. * @num_lines: The number of lines to request. - * @value: The array of values set to gpiochip, must be + * @values: The array of values set to gpiochip, must be * 0(low) or 1(high). * * Return: On success return 0; diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8 index f6b7e85b121c..9b17097bc3d7 100644 --- a/tools/power/x86/turbostat/turbostat.8 +++ b/tools/power/x86/turbostat/turbostat.8 @@ -54,12 +54,14 @@ name as necessary to disambiguate it from others is necessary. Note that option .PP \fB--cpu cpu-set\fP limit output to system summary plus the specified cpu-set. If cpu-set is the string "core", then the system summary plus the first CPU in each core are printed -- eg. subsequent HT siblings are not printed. Or if cpu-set is the string "package", then the system summary plus the first CPU in each package is printed. Otherwise, the system summary plus the specified set of CPUs are printed. The cpu-set is ordered from low to high, comma delimited with ".." and "-" permitted to denote a range. eg. 1,2,8,14..17,21-44 .PP -\fB--hide column\fP do not show the specified built-in columns. May be invoked multiple times, or with a comma-separated list of column names. Use "--hide sysfs" to hide the sysfs statistics columns as a group. +\fB--hide column\fP do not show the specified built-in columns. May be invoked multiple times, or with a comma-separated list of column names. .PP \fB--enable column\fP show the specified built-in columns, which are otherwise disabled, by default. Currently the only built-in counters disabled by default are "usec", "Time_Of_Day_Seconds", "APIC" and "X2APIC". The column name "all" can be used to enable all disabled-by-default built-in counters. .PP -\fB--show column\fP show only the specified built-in columns. May be invoked multiple times, or with a comma-separated list of column names. Use "--show sysfs" to show the sysfs statistics columns as a group. +\fB--show column\fP show only the specified built-in columns. May be invoked multiple times, or with a comma-separated list of column names. +.PP +\fB--show CATEGORY --hide CATEGORY\fP Show and hide also accept a single CATEGORY of columns: "all", "topology", "idle", "frequency", "power", "sysfs", "other". .PP \fB--Dump\fP displays the raw counter values. .PP diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 5939615265f1..47d3ba895d6d 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -3,7 +3,7 @@ * turbostat -- show CPU frequency and C-state residency * on modern Intel and AMD processors. * - * Copyright (c) 2013 Intel Corporation. + * Copyright (c) 2021 Intel Corporation. * Len Brown <[email protected]> */ @@ -33,12 +33,20 @@ #include <sys/capability.h> #include <errno.h> #include <math.h> +#include <linux/perf_event.h> +#include <asm/unistd.h> +#include <stdbool.h> char *proc_stat = "/proc/stat"; FILE *outf; int *fd_percpu; -struct timeval interval_tv = {5, 0}; -struct timespec interval_ts = {5, 0}; +int *fd_instr_count_percpu; +struct timeval interval_tv = { 5, 0 }; +struct timespec interval_ts = { 5, 0 }; + +/* Save original CPU model */ +unsigned int model_orig; + unsigned int num_iterations; unsigned int debug; unsigned int quiet; @@ -75,30 +83,33 @@ char *output_buffer, *outp; unsigned int do_rapl; unsigned int do_dts; unsigned int do_ptm; -unsigned long long gfx_cur_rc6_ms; +unsigned int do_ipc; +unsigned long long gfx_cur_rc6_ms; unsigned long long cpuidle_cur_cpu_lpi_us; unsigned long long cpuidle_cur_sys_lpi_us; unsigned int gfx_cur_mhz; unsigned int gfx_act_mhz; -unsigned int tcc_activation_temp; -unsigned int tcc_activation_temp_override; +unsigned int tj_max; +unsigned int tj_max_override; +int tcc_offset_bits; double rapl_power_units, rapl_time_units; double rapl_dram_energy_units, rapl_energy_units; double rapl_joule_counter_range; unsigned int do_core_perf_limit_reasons; unsigned int has_automatic_cstate_conversion; +unsigned int dis_cstate_prewake; unsigned int do_gfx_perf_limit_reasons; unsigned int do_ring_perf_limit_reasons; unsigned int crystal_hz; unsigned long long tsc_hz; int base_cpu; double discover_bclk(unsigned int family, unsigned int model); -unsigned int has_hwp; /* IA32_PM_ENABLE, IA32_HWP_CAPABILITIES */ +unsigned int has_hwp; /* IA32_PM_ENABLE, IA32_HWP_CAPABILITIES */ /* IA32_HWP_REQUEST, IA32_HWP_STATUS */ -unsigned int has_hwp_notify; /* IA32_HWP_INTERRUPT */ +unsigned int has_hwp_notify; /* IA32_HWP_INTERRUPT */ unsigned int has_hwp_activity_window; /* IA32_HWP_REQUEST[bits 41:32] */ -unsigned int has_hwp_epp; /* IA32_HWP_REQUEST[bits 31:24] */ -unsigned int has_hwp_pkg; /* IA32_HWP_REQUEST_PKG */ +unsigned int has_hwp_epp; /* IA32_HWP_REQUEST[bits 31:24] */ +unsigned int has_hwp_pkg; /* IA32_HWP_REQUEST_PKG */ unsigned int has_misc_feature_control; unsigned int first_counter_read = 1; int ignore_stdin; @@ -173,12 +184,14 @@ struct thread_data { unsigned long long aperf; unsigned long long mperf; unsigned long long c1; - unsigned long long irq_count; + unsigned long long instr_count; + unsigned long long irq_count; unsigned int smi_count; unsigned int cpu_id; unsigned int apic_id; unsigned int x2apic_id; unsigned int flags; + bool is_atom; #define CPU_IS_FIRST_THREAD_IN_CORE 0x2 #define CPU_IS_FIRST_CORE_IN_PACKAGE 0x4 unsigned long long counter[MAX_ADDED_THREAD_COUNTERS]; @@ -240,12 +253,11 @@ struct pkg_data { ((node_no) * topo.cores_per_node) + \ (core_no)) - #define GET_PKG(pkg_base, pkg_no) (pkg_base + pkg_no) -enum counter_scope {SCOPE_CPU, SCOPE_CORE, SCOPE_PACKAGE}; -enum counter_type {COUNTER_ITEMS, COUNTER_CYCLES, COUNTER_SECONDS, COUNTER_USEC}; -enum counter_format {FORMAT_RAW, FORMAT_DELTA, FORMAT_PERCENT}; +enum counter_scope { SCOPE_CPU, SCOPE_CORE, SCOPE_PACKAGE }; +enum counter_type { COUNTER_ITEMS, COUNTER_CYCLES, COUNTER_SECONDS, COUNTER_USEC }; +enum counter_format { FORMAT_RAW, FORMAT_DELTA, FORMAT_PERCENT }; struct msr_counter { unsigned int msr_num; @@ -281,9 +293,9 @@ int get_msr_sum(int cpu, off_t offset, unsigned long long *msr); struct msr_sum_array { /* get_msr_sum() = sum + (get_msr() - last) */ struct { - /*The accumulated MSR value is updated by the timer*/ + /*The accumulated MSR value is updated by the timer */ unsigned long long sum; - /*The MSR footprint recorded in last timer*/ + /*The MSR footprint recorded in last timer */ unsigned long long last; } entries[IDX_COUNT]; }; @@ -291,13 +303,16 @@ struct msr_sum_array { /* The percpu MSR sum array.*/ struct msr_sum_array *per_cpu_msr_sum; -int idx_to_offset(int idx) +off_t idx_to_offset(int idx) { - int offset; + off_t offset; switch (idx) { case IDX_PKG_ENERGY: - offset = MSR_PKG_ENERGY_STATUS; + if (do_rapl & RAPL_AMD_F17H) + offset = MSR_PKG_ENERGY_STAT; + else + offset = MSR_PKG_ENERGY_STATUS; break; case IDX_DRAM_ENERGY: offset = MSR_DRAM_ENERGY_STATUS; @@ -320,12 +335,13 @@ int idx_to_offset(int idx) return offset; } -int offset_to_idx(int offset) +int offset_to_idx(off_t offset) { int idx; switch (offset) { case MSR_PKG_ENERGY_STATUS: + case MSR_PKG_ENERGY_STAT: idx = IDX_PKG_ENERGY; break; case MSR_DRAM_ENERGY_STATUS: @@ -353,7 +369,7 @@ int idx_valid(int idx) { switch (idx) { case IDX_PKG_ENERGY: - return do_rapl & RAPL_PKG; + return do_rapl & (RAPL_PKG | RAPL_AMD_F17H); case IDX_DRAM_ENERGY: return do_rapl & RAPL_DRAM; case IDX_PP0_ENERGY: @@ -368,6 +384,7 @@ int idx_valid(int idx) return 0; } } + struct sys_counters { unsigned int added_thread_counters; unsigned int added_core_counters; @@ -391,7 +408,7 @@ struct cpu_topology { int logical_node_id; /* 0-based count within the package */ int physical_core_id; int thread_id; - cpu_set_t *put_ids; /* Processing Unit/Thread IDs */ + cpu_set_t *put_ids; /* Processing Unit/Thread IDs */ } *cpus; struct topo_params { @@ -408,7 +425,7 @@ struct topo_params { struct timeval tv_even, tv_odd, tv_delta; -int *irq_column_2_cpu; /* /proc/interrupts column numbers */ +int *irq_column_2_cpu; /* /proc/interrupts column numbers */ int *irqs_per_cpu; /* indexed by cpu_num */ void setup_all_buffers(void); @@ -421,34 +438,31 @@ int cpu_is_not_present(int cpu) { return !CPU_ISSET_S(cpu, cpu_present_setsize, cpu_present_set); } + /* * run func(thread, core, package) in topology order * skip non-present cpus */ -int for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg_data *), - struct thread_data *thread_base, struct core_data *core_base, struct pkg_data *pkg_base) +int for_all_cpus(int (func) (struct thread_data *, struct core_data *, struct pkg_data *), + struct thread_data *thread_base, struct core_data *core_base, struct pkg_data *pkg_base) { int retval, pkg_no, core_no, thread_no, node_no; for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) { for (node_no = 0; node_no < topo.nodes_per_pkg; node_no++) { for (core_no = 0; core_no < topo.cores_per_node; ++core_no) { - for (thread_no = 0; thread_no < - topo.threads_per_core; ++thread_no) { + for (thread_no = 0; thread_no < topo.threads_per_core; ++thread_no) { struct thread_data *t; struct core_data *c; struct pkg_data *p; - t = GET_THREAD(thread_base, thread_no, - core_no, node_no, - pkg_no); + t = GET_THREAD(thread_base, thread_no, core_no, node_no, pkg_no); if (cpu_is_not_present(t->cpu_id)) continue; - c = GET_CORE(core_base, core_no, - node_no, pkg_no); + c = GET_CORE(core_base, core_no, node_no, pkg_no); p = GET_PKG(pkg_base, pkg_no); retval = func(t, c, p); @@ -470,6 +484,7 @@ int cpu_migrate(int cpu) else return 0; } + int get_msr_fd(int cpu) { char pathname[32]; @@ -490,6 +505,39 @@ int get_msr_fd(int cpu) return fd; } +static long perf_event_open(struct perf_event_attr *hw_event, pid_t pid, int cpu, int group_fd, unsigned long flags) +{ + return syscall(__NR_perf_event_open, hw_event, pid, cpu, group_fd, flags); +} + +static int perf_instr_count_open(int cpu_num) +{ + struct perf_event_attr pea; + int fd; + + memset(&pea, 0, sizeof(struct perf_event_attr)); + pea.type = PERF_TYPE_HARDWARE; + pea.size = sizeof(struct perf_event_attr); + pea.config = PERF_COUNT_HW_INSTRUCTIONS; + + /* counter for cpu_num, including user + kernel and all processes */ + fd = perf_event_open(&pea, -1, cpu_num, -1, 0); + if (fd == -1) + err(-1, "cpu%d: perf instruction counter\n", cpu_num); + + return fd; +} + +int get_instr_count_fd(int cpu) +{ + if (fd_instr_count_percpu[cpu]) + return fd_instr_count_percpu[cpu]; + + fd_instr_count_percpu[cpu] = perf_instr_count_open(cpu); + + return fd_instr_count_percpu[cpu]; +} + int get_msr(int cpu, off_t offset, unsigned long long *msr) { ssize_t retval; @@ -518,7 +566,7 @@ struct msr_counter bic[] = { { 0x0, "Bzy_MHz" }, { 0x0, "TSC_MHz" }, { 0x0, "IRQ" }, - { 0x0, "SMI", "", 32, 0, FORMAT_DELTA, NULL}, + { 0x0, "SMI", "", 32, 0, FORMAT_DELTA, NULL }, { 0x0, "sysfs" }, { 0x0, "CPU%c1" }, { 0x0, "CPU%c3" }, @@ -561,6 +609,7 @@ struct msr_counter bic[] = { { 0x0, "X2APIC" }, { 0x0, "Die" }, { 0x0, "GFXAMHz" }, + { 0x0, "IPC" }, }; #define MAX_BIC (sizeof(bic) / sizeof(struct msr_counter)) @@ -616,6 +665,13 @@ struct msr_counter bic[] = { #define BIC_X2APIC (1ULL << 49) #define BIC_Die (1ULL << 50) #define BIC_GFXACTMHz (1ULL << 51) +#define BIC_IPC (1ULL << 52) + +#define BIC_TOPOLOGY (BIC_Package | BIC_Node | BIC_CoreCnt | BIC_PkgCnt | BIC_Core | BIC_CPU | BIC_Die ) +#define BIC_THERMAL_PWR ( BIC_CoreTmp | BIC_PkgTmp | BIC_PkgWatt | BIC_CorWatt | BIC_GFXWatt | BIC_RAMWatt | BIC_PKG__ | BIC_RAM__) +#define BIC_FREQUENCY ( BIC_Avg_MHz | BIC_Busy | BIC_Bzy_MHz | BIC_TSC_MHz | BIC_GFXMHz | BIC_GFXACTMHz ) +#define BIC_IDLE ( BIC_sysfs | BIC_CPU_c1 | BIC_CPU_c3 | BIC_CPU_c6 | BIC_CPU_c7 | BIC_GFX_rc6 | BIC_Pkgpc2 | BIC_Pkgpc3 | BIC_Pkgpc6 | BIC_Pkgpc7 | BIC_Pkgpc8 | BIC_Pkgpc9 | BIC_Pkgpc10 | BIC_CPU_LPI | BIC_SYS_LPI | BIC_Mod_c6 | BIC_Totl_c0 | BIC_Any_c0 | BIC_GFX_c0 | BIC_CPUGFX) +#define BIC_OTHER ( BIC_IRQ | BIC_SMI | BIC_ThreadC | BIC_CoreTmp | BIC_IPC) #define BIC_DISABLED_BY_DEFAULT (BIC_USEC | BIC_TOD | BIC_APIC | BIC_X2APIC) @@ -627,7 +683,7 @@ unsigned long long bic_present = BIC_USEC | BIC_TOD | BIC_sysfs | BIC_APIC | BIC #define ENABLE_BIC(COUNTER_NAME) (bic_enabled |= COUNTER_NAME) #define BIC_PRESENT(COUNTER_BIT) (bic_present |= COUNTER_BIT) #define BIC_NOT_PRESENT(COUNTER_BIT) (bic_present &= ~COUNTER_BIT) - +#define BIC_IS_ENABLED(COUNTER_BIT) (bic_enabled & COUNTER_BIT) #define MAX_DEFERRED 16 char *deferred_skip_names[MAX_DEFERRED]; @@ -642,42 +698,40 @@ enum show_hide_mode { SHOW_LIST, HIDE_LIST } global_show_hide_mode = HIDE_LIST; void help(void) { fprintf(outf, - "Usage: turbostat [OPTIONS][(--interval seconds) | COMMAND ...]\n" - "\n" - "Turbostat forks the specified COMMAND and prints statistics\n" - "when COMMAND completes.\n" - "If no COMMAND is specified, turbostat wakes every 5-seconds\n" - "to print statistics, until interrupted.\n" - " -a, --add add a counter\n" - " eg. --add msr0x10,u64,cpu,delta,MY_TSC\n" - " -c, --cpu cpu-set limit output to summary plus cpu-set:\n" - " {core | package | j,k,l..m,n-p }\n" - " -d, --debug displays usec, Time_Of_Day_Seconds and more debugging\n" - " -D, --Dump displays the raw counter values\n" - " -e, --enable [all | column]\n" - " shows all or the specified disabled column\n" - " -H, --hide [column|column,column,...]\n" - " hide the specified column(s)\n" - " -i, --interval sec.subsec\n" - " Override default 5-second measurement interval\n" - " -J, --Joules displays energy in Joules instead of Watts\n" - " -l, --list list column headers only\n" - " -n, --num_iterations num\n" - " number of the measurement iterations\n" - " -o, --out file\n" - " create or truncate \"file\" for all output\n" - " -q, --quiet skip decoding system configuration header\n" - " -s, --show [column|column,column,...]\n" - " show only the specified column(s)\n" - " -S, --Summary\n" - " limits output to 1-line system summary per interval\n" - " -T, --TCC temperature\n" - " sets the Thermal Control Circuit temperature in\n" - " degrees Celsius\n" - " -h, --help print this help message\n" - " -v, --version print version information\n" - "\n" - "For more help, run \"man turbostat\"\n"); + "Usage: turbostat [OPTIONS][(--interval seconds) | COMMAND ...]\n" + "\n" + "Turbostat forks the specified COMMAND and prints statistics\n" + "when COMMAND completes.\n" + "If no COMMAND is specified, turbostat wakes every 5-seconds\n" + "to print statistics, until interrupted.\n" + " -a, --add add a counter\n" + " eg. --add msr0x10,u64,cpu,delta,MY_TSC\n" + " -c, --cpu cpu-set limit output to summary plus cpu-set:\n" + " {core | package | j,k,l..m,n-p }\n" + " -d, --debug displays usec, Time_Of_Day_Seconds and more debugging\n" + " -D, --Dump displays the raw counter values\n" + " -e, --enable [all | column]\n" + " shows all or the specified disabled column\n" + " -H, --hide [column|column,column,...]\n" + " hide the specified column(s)\n" + " -i, --interval sec.subsec\n" + " Override default 5-second measurement interval\n" + " -J, --Joules displays energy in Joules instead of Watts\n" + " -l, --list list column headers only\n" + " -n, --num_iterations num\n" + " number of the measurement iterations\n" + " -o, --out file\n" + " create or truncate \"file\" for all output\n" + " -q, --quiet skip decoding system configuration header\n" + " -s, --show [column|column,column,...]\n" + " show only the specified column(s)\n" + " -S, --Summary\n" + " limits output to 1-line system summary per interval\n" + " -T, --TCC temperature\n" + " sets the Thermal Control Circuit temperature in\n" + " degrees Celsius\n" + " -h, --help print this help message\n" + " -v, --version print version information\n" "\n" "For more help, run \"man turbostat\"\n"); } /* @@ -700,6 +754,18 @@ unsigned long long bic_lookup(char *name_list, enum show_hide_mode mode) if (!strcmp(name_list, "all")) return ~0; + if (!strcmp(name_list, "topology")) + return BIC_TOPOLOGY; + if (!strcmp(name_list, "power")) + return BIC_THERMAL_PWR; + if (!strcmp(name_list, "idle")) + return BIC_IDLE; + if (!strcmp(name_list, "frequency")) + return BIC_FREQUENCY; + if (!strcmp(name_list, "other")) + return BIC_OTHER; + if (!strcmp(name_list, "all")) + return 0; for (i = 0; i < MAX_BIC; ++i) { if (!strcmp(name_list, bic[i].name)) { @@ -731,7 +797,6 @@ unsigned long long bic_lookup(char *name_list, enum show_hide_mode mode) return retval; } - void print_header(char *delim) { struct msr_counter *mp; @@ -764,6 +829,9 @@ void print_header(char *delim) if (DO_BIC(BIC_TSC_MHz)) outp += sprintf(outp, "%sTSC_MHz", (printed++ ? delim : "")); + if (DO_BIC(BIC_IPC)) + outp += sprintf(outp, "%sIPC", (printed++ ? delim : "")); + if (DO_BIC(BIC_IRQ)) { if (sums_need_wide_columns) outp += sprintf(outp, "%s IRQ", (printed++ ? delim : "")); @@ -910,8 +978,7 @@ void print_header(char *delim) outp += sprintf(outp, "\n"); } -int dump_counters(struct thread_data *t, struct core_data *c, - struct pkg_data *p) +int dump_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) { int i; struct msr_counter *mp; @@ -919,21 +986,22 @@ int dump_counters(struct thread_data *t, struct core_data *c, outp += sprintf(outp, "t %p, c %p, p %p\n", t, c, p); if (t) { - outp += sprintf(outp, "CPU: %d flags 0x%x\n", - t->cpu_id, t->flags); + outp += sprintf(outp, "CPU: %d flags 0x%x\n", t->cpu_id, t->flags); outp += sprintf(outp, "TSC: %016llX\n", t->tsc); outp += sprintf(outp, "aperf: %016llX\n", t->aperf); outp += sprintf(outp, "mperf: %016llX\n", t->mperf); outp += sprintf(outp, "c1: %016llX\n", t->c1); + if (DO_BIC(BIC_IPC)) + outp += sprintf(outp, "IPC: %lld\n", t->instr_count); + if (DO_BIC(BIC_IRQ)) outp += sprintf(outp, "IRQ: %lld\n", t->irq_count); if (DO_BIC(BIC_SMI)) outp += sprintf(outp, "SMI: %d\n", t->smi_count); for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) { - outp += sprintf(outp, "tADDED [%d] msr0x%x: %08llX\n", - i, mp->msr_num, t->counter[i]); + outp += sprintf(outp, "tADDED [%d] msr0x%x: %08llX\n", i, mp->msr_num, t->counter[i]); } } @@ -946,8 +1014,7 @@ int dump_counters(struct thread_data *t, struct core_data *c, outp += sprintf(outp, "Joules: %0X\n", c->core_energy); for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { - outp += sprintf(outp, "cADDED [%d] msr0x%x: %08llX\n", - i, mp->msr_num, c->counter[i]); + outp += sprintf(outp, "cADDED [%d] msr0x%x: %08llX\n", i, mp->msr_num, c->counter[i]); } outp += sprintf(outp, "mc6_us: %016llX\n", c->mc6_us); } @@ -976,15 +1043,12 @@ int dump_counters(struct thread_data *t, struct core_data *c, outp += sprintf(outp, "Joules COR: %0llX\n", p->energy_cores); outp += sprintf(outp, "Joules GFX: %0llX\n", p->energy_gfx); outp += sprintf(outp, "Joules RAM: %0llX\n", p->energy_dram); - outp += sprintf(outp, "Throttle PKG: %0llX\n", - p->rapl_pkg_perf_status); - outp += sprintf(outp, "Throttle RAM: %0llX\n", - p->rapl_dram_perf_status); + outp += sprintf(outp, "Throttle PKG: %0llX\n", p->rapl_pkg_perf_status); + outp += sprintf(outp, "Throttle RAM: %0llX\n", p->rapl_dram_perf_status); outp += sprintf(outp, "PTM: %dC\n", p->pkg_temp_c); for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) { - outp += sprintf(outp, "pADDED [%d] msr0x%x: %08llX\n", - i, mp->msr_num, p->counter[i]); + outp += sprintf(outp, "pADDED [%d] msr0x%x: %08llX\n", i, mp->msr_num, p->counter[i]); } } @@ -996,8 +1060,7 @@ int dump_counters(struct thread_data *t, struct core_data *c, /* * column formatting convention & formats */ -int format_counters(struct thread_data *t, struct core_data *c, - struct pkg_data *p) +int format_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) { double interval_float, tsc; char *fmt8; @@ -1006,17 +1069,16 @@ int format_counters(struct thread_data *t, struct core_data *c, char *delim = "\t"; int printed = 0; - /* if showing only 1st thread in core and this isn't one, bail out */ + /* if showing only 1st thread in core and this isn't one, bail out */ if (show_core_only && !(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) return 0; - /* if showing only 1st thread in pkg and this isn't one, bail out */ + /* if showing only 1st thread in pkg and this isn't one, bail out */ if (show_pkg_only && !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) return 0; /*if not summary line and --cpu is used */ - if ((t != &average.threads) && - (cpu_subset && !CPU_ISSET_S(t->cpu_id, cpu_subset_size, cpu_subset))) + if ((t != &average.threads) && (cpu_subset && !CPU_ISSET_S(t->cpu_id, cpu_subset_size, cpu_subset))) return 0; if (DO_BIC(BIC_USEC)) { @@ -1031,7 +1093,7 @@ int format_counters(struct thread_data *t, struct core_data *c, if (DO_BIC(BIC_TOD)) outp += sprintf(outp, "%10ld.%06ld\t", t->tv_end.tv_sec, t->tv_end.tv_usec); - interval_float = t->tv_delta.tv_sec + t->tv_delta.tv_usec/1000000.0; + interval_float = t->tv_delta.tv_sec + t->tv_delta.tv_usec / 1000000.0; tsc = t->tsc * tsc_tweak; @@ -1067,11 +1129,9 @@ int format_counters(struct thread_data *t, struct core_data *c, if (DO_BIC(BIC_Node)) { if (t) outp += sprintf(outp, "%s%d", - (printed++ ? delim : ""), - cpus[t->cpu_id].physical_node_id); + (printed++ ? delim : ""), cpus[t->cpu_id].physical_node_id); else - outp += sprintf(outp, "%s-", - (printed++ ? delim : "")); + outp += sprintf(outp, "%s-", (printed++ ? delim : "")); } if (DO_BIC(BIC_Core)) { if (c) @@ -1088,22 +1148,25 @@ int format_counters(struct thread_data *t, struct core_data *c, } if (DO_BIC(BIC_Avg_MHz)) - outp += sprintf(outp, "%s%.0f", (printed++ ? delim : ""), - 1.0 / units * t->aperf / interval_float); + outp += sprintf(outp, "%s%.0f", (printed++ ? delim : ""), 1.0 / units * t->aperf / interval_float); if (DO_BIC(BIC_Busy)) - outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * t->mperf/tsc); + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * t->mperf / tsc); if (DO_BIC(BIC_Bzy_MHz)) { if (has_base_hz) - outp += sprintf(outp, "%s%.0f", (printed++ ? delim : ""), base_hz / units * t->aperf / t->mperf); + outp += + sprintf(outp, "%s%.0f", (printed++ ? delim : ""), base_hz / units * t->aperf / t->mperf); else outp += sprintf(outp, "%s%.0f", (printed++ ? delim : ""), - tsc / units * t->aperf / t->mperf / interval_float); + tsc / units * t->aperf / t->mperf / interval_float); } if (DO_BIC(BIC_TSC_MHz)) - outp += sprintf(outp, "%s%.0f", (printed++ ? delim : ""), 1.0 * t->tsc/units/interval_float); + outp += sprintf(outp, "%s%.0f", (printed++ ? delim : ""), 1.0 * t->tsc / units / interval_float); + + if (DO_BIC(BIC_IPC)) + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 1.0 * t->instr_count / t->aperf); /* IRQ */ if (DO_BIC(BIC_IRQ)) { @@ -1121,7 +1184,8 @@ int format_counters(struct thread_data *t, struct core_data *c, for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) { if (mp->format == FORMAT_RAW) { if (mp->width == 32) - outp += sprintf(outp, "%s0x%08x", (printed++ ? delim : ""), (unsigned int) t->counter[i]); + outp += + sprintf(outp, "%s0x%08x", (printed++ ? delim : ""), (unsigned int)t->counter[i]); else outp += sprintf(outp, "%s0x%016llx", (printed++ ? delim : ""), t->counter[i]); } else if (mp->format == FORMAT_DELTA) { @@ -1131,27 +1195,28 @@ int format_counters(struct thread_data *t, struct core_data *c, outp += sprintf(outp, "%s%lld", (printed++ ? delim : ""), t->counter[i]); } else if (mp->format == FORMAT_PERCENT) { if (mp->type == COUNTER_USEC) - outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), t->counter[i]/interval_float/10000); + outp += + sprintf(outp, "%s%.2f", (printed++ ? delim : ""), + t->counter[i] / interval_float / 10000); else - outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * t->counter[i]/tsc); + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * t->counter[i] / tsc); } } /* C1 */ if (DO_BIC(BIC_CPU_c1)) - outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * t->c1/tsc); - + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * t->c1 / tsc); /* print per-core data only for 1st thread in core */ if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) goto done; if (DO_BIC(BIC_CPU_c3)) - outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c3/tsc); + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c3 / tsc); if (DO_BIC(BIC_CPU_c6)) - outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c6/tsc); + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c6 / tsc); if (DO_BIC(BIC_CPU_c7)) - outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c7/tsc); + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c7 / tsc); /* Mod%c6 */ if (DO_BIC(BIC_Mod_c6)) @@ -1163,7 +1228,8 @@ int format_counters(struct thread_data *t, struct core_data *c, for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { if (mp->format == FORMAT_RAW) { if (mp->width == 32) - outp += sprintf(outp, "%s0x%08x", (printed++ ? delim : ""), (unsigned int) c->counter[i]); + outp += + sprintf(outp, "%s0x%08x", (printed++ ? delim : ""), (unsigned int)c->counter[i]); else outp += sprintf(outp, "%s0x%016llx", (printed++ ? delim : ""), c->counter[i]); } else if (mp->format == FORMAT_DELTA) { @@ -1172,14 +1238,15 @@ int format_counters(struct thread_data *t, struct core_data *c, else outp += sprintf(outp, "%s%lld", (printed++ ? delim : ""), c->counter[i]); } else if (mp->format == FORMAT_PERCENT) { - outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->counter[i]/tsc); + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->counter[i] / tsc); } } fmt8 = "%s%.2f"; if (DO_BIC(BIC_CorWatt) && (do_rapl & RAPL_PER_CORE_ENERGY)) - outp += sprintf(outp, fmt8, (printed++ ? delim : ""), c->core_energy * rapl_energy_units / interval_float); + outp += + sprintf(outp, fmt8, (printed++ ? delim : ""), c->core_energy * rapl_energy_units / interval_float); if (DO_BIC(BIC_Cor_J) && (do_rapl & RAPL_PER_CORE_ENERGY)) outp += sprintf(outp, fmt8, (printed++ ? delim : ""), c->core_energy * rapl_energy_units); @@ -1197,7 +1264,7 @@ int format_counters(struct thread_data *t, struct core_data *c, outp += sprintf(outp, "%s**.**", (printed++ ? delim : "")); } else { outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), - p->gfx_rc6_ms / 10.0 / interval_float); + p->gfx_rc6_ms / 10.0 / interval_float); } } @@ -1211,42 +1278,49 @@ int format_counters(struct thread_data *t, struct core_data *c, /* Totl%C0, Any%C0 GFX%C0 CPUGFX% */ if (DO_BIC(BIC_Totl_c0)) - outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_wtd_core_c0/tsc); + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_wtd_core_c0 / tsc); if (DO_BIC(BIC_Any_c0)) - outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_any_core_c0/tsc); + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_any_core_c0 / tsc); if (DO_BIC(BIC_GFX_c0)) - outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_any_gfxe_c0/tsc); + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_any_gfxe_c0 / tsc); if (DO_BIC(BIC_CPUGFX)) - outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_both_core_gfxe_c0/tsc); + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_both_core_gfxe_c0 / tsc); if (DO_BIC(BIC_Pkgpc2)) - outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc2/tsc); + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc2 / tsc); if (DO_BIC(BIC_Pkgpc3)) - outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc3/tsc); + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc3 / tsc); if (DO_BIC(BIC_Pkgpc6)) - outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc6/tsc); + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc6 / tsc); if (DO_BIC(BIC_Pkgpc7)) - outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc7/tsc); + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc7 / tsc); if (DO_BIC(BIC_Pkgpc8)) - outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc8/tsc); + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc8 / tsc); if (DO_BIC(BIC_Pkgpc9)) - outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc9/tsc); + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc9 / tsc); if (DO_BIC(BIC_Pkgpc10)) - outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc10/tsc); + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc10 / tsc); if (DO_BIC(BIC_CPU_LPI)) - outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->cpu_lpi / 1000000.0 / interval_float); + outp += + sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->cpu_lpi / 1000000.0 / interval_float); if (DO_BIC(BIC_SYS_LPI)) - outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->sys_lpi / 1000000.0 / interval_float); + outp += + sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->sys_lpi / 1000000.0 / interval_float); if (DO_BIC(BIC_PkgWatt)) - outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units / interval_float); + outp += + sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units / interval_float); if (DO_BIC(BIC_CorWatt) && !(do_rapl & RAPL_PER_CORE_ENERGY)) - outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units / interval_float); + outp += + sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units / interval_float); if (DO_BIC(BIC_GFXWatt)) - outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units / interval_float); + outp += + sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units / interval_float); if (DO_BIC(BIC_RAMWatt)) - outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_dram * rapl_dram_energy_units / interval_float); + outp += + sprintf(outp, fmt8, (printed++ ? delim : ""), + p->energy_dram * rapl_dram_energy_units / interval_float); if (DO_BIC(BIC_Pkg_J)) outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units); if (DO_BIC(BIC_Cor_J) && !(do_rapl & RAPL_PER_CORE_ENERGY)) @@ -1256,14 +1330,19 @@ int format_counters(struct thread_data *t, struct core_data *c, if (DO_BIC(BIC_RAM_J)) outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_dram * rapl_dram_energy_units); if (DO_BIC(BIC_PKG__)) - outp += sprintf(outp, fmt8, (printed++ ? delim : ""), 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float); + outp += + sprintf(outp, fmt8, (printed++ ? delim : ""), + 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float); if (DO_BIC(BIC_RAM__)) - outp += sprintf(outp, fmt8, (printed++ ? delim : ""), 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float); + outp += + sprintf(outp, fmt8, (printed++ ? delim : ""), + 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float); for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) { if (mp->format == FORMAT_RAW) { if (mp->width == 32) - outp += sprintf(outp, "%s0x%08x", (printed++ ? delim : ""), (unsigned int) p->counter[i]); + outp += + sprintf(outp, "%s0x%08x", (printed++ ? delim : ""), (unsigned int)p->counter[i]); else outp += sprintf(outp, "%s0x%016llx", (printed++ ? delim : ""), p->counter[i]); } else if (mp->format == FORMAT_DELTA) { @@ -1272,7 +1351,7 @@ int format_counters(struct thread_data *t, struct core_data *c, else outp += sprintf(outp, "%s%lld", (printed++ ? delim : ""), p->counter[i]); } else if (mp->format == FORMAT_PERCENT) { - outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->counter[i]/tsc); + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->counter[i] / tsc); } } @@ -1297,12 +1376,14 @@ void flush_output_stdout(void) outp = output_buffer; } + void flush_output_stderr(void) { fputs(output_buffer, outf); fflush(outf); outp = output_buffer; } + void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) { static int printed; @@ -1323,13 +1404,11 @@ void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_ #define DELTA_WRAP32(new, old) \ old = ((((unsigned long long)new << 32) - ((unsigned long long)old << 32)) >> 32); -int -delta_package(struct pkg_data *new, struct pkg_data *old) +int delta_package(struct pkg_data *new, struct pkg_data *old) { int i; struct msr_counter *mp; - if (DO_BIC(BIC_Totl_c0)) old->pkg_wtd_core_c0 = new->pkg_wtd_core_c0 - old->pkg_wtd_core_c0; if (DO_BIC(BIC_Any_c0)) @@ -1354,7 +1433,7 @@ delta_package(struct pkg_data *new, struct pkg_data *old) old->pkg_temp_c = new->pkg_temp_c; /* flag an error when rc6 counter resets/wraps */ - if (old->gfx_rc6_ms > new->gfx_rc6_ms) + if (old->gfx_rc6_ms > new->gfx_rc6_ms) old->gfx_rc6_ms = -1; else old->gfx_rc6_ms = new->gfx_rc6_ms - old->gfx_rc6_ms; @@ -1379,8 +1458,7 @@ delta_package(struct pkg_data *new, struct pkg_data *old) return 0; } -void -delta_core(struct core_data *new, struct core_data *old) +void delta_core(struct core_data *new, struct core_data *old) { int i; struct msr_counter *mp; @@ -1412,9 +1490,7 @@ int soft_c1_residency_display(int bic) /* * old = new - old */ -int -delta_thread(struct thread_data *new, struct thread_data *old, - struct core_data *core_delta) +int delta_thread(struct thread_data *new, struct thread_data *old, struct core_data *core_delta) { int i; struct msr_counter *mp; @@ -1445,8 +1521,7 @@ delta_thread(struct thread_data *new, struct thread_data *old, old->c1 = new->c1 - old->c1; - if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz) || - soft_c1_residency_display(BIC_Avg_MHz)) { + if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz) || soft_c1_residency_display(BIC_Avg_MHz)) { if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) { old->aperf = new->aperf - old->aperf; old->mperf = new->mperf - old->mperf; @@ -1455,7 +1530,6 @@ delta_thread(struct thread_data *new, struct thread_data *old, } } - if (use_c1_residency_msr) { /* * Some models have a dedicated C1 residency MSR, @@ -1472,7 +1546,7 @@ delta_thread(struct thread_data *new, struct thread_data *old, else { /* normal case, derive c1 */ old->c1 = (old->tsc * tsc_tweak) - old->mperf - core_delta->c3 - - core_delta->c6 - core_delta->c7; + - core_delta->c6 - core_delta->c7; } } @@ -1482,6 +1556,9 @@ delta_thread(struct thread_data *new, struct thread_data *old, old->mperf = 1; /* divide by 0 protection */ } + if (DO_BIC(BIC_IPC)) + old->instr_count = new->instr_count - old->instr_count; + if (DO_BIC(BIC_IRQ)) old->irq_count = new->irq_count - old->irq_count; @@ -1498,8 +1575,7 @@ delta_thread(struct thread_data *new, struct thread_data *old, } int delta_cpu(struct thread_data *t, struct core_data *c, - struct pkg_data *p, struct thread_data *t2, - struct core_data *c2, struct pkg_data *p2) + struct pkg_data *p, struct thread_data *t2, struct core_data *c2, struct pkg_data *p2) { int retval = 0; @@ -1522,7 +1598,7 @@ int delta_cpu(struct thread_data *t, struct core_data *c, void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) { int i; - struct msr_counter *mp; + struct msr_counter *mp; t->tv_begin.tv_sec = 0; t->tv_begin.tv_usec = 0; @@ -1536,6 +1612,8 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data t->mperf = 0; t->c1 = 0; + t->instr_count = 0; + t->irq_count = 0; t->smi_count = 0; @@ -1587,8 +1665,8 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) p->counter[i] = 0; } -int sum_counters(struct thread_data *t, struct core_data *c, - struct pkg_data *p) + +int sum_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) { int i; struct msr_counter *mp; @@ -1611,6 +1689,8 @@ int sum_counters(struct thread_data *t, struct core_data *c, average.threads.mperf += t->mperf; average.threads.c1 += t->c1; + average.threads.instr_count += t->instr_count; + average.threads.irq_count += t->irq_count; average.threads.smi_count += t->smi_count; @@ -1687,12 +1767,12 @@ int sum_counters(struct thread_data *t, struct core_data *c, } return 0; } + /* * sum the counters for all cpus in the system * compute the weighted average */ -void compute_average(struct thread_data *t, struct core_data *c, - struct pkg_data *p) +void compute_average(struct thread_data *t, struct core_data *c, struct pkg_data *p) { int i; struct msr_counter *mp; @@ -1707,6 +1787,7 @@ void compute_average(struct thread_data *t, struct core_data *c, average.threads.tsc /= topo.num_cpus; average.threads.aperf /= topo.num_cpus; average.threads.mperf /= topo.num_cpus; + average.threads.instr_count /= topo.num_cpus; average.threads.c1 /= topo.num_cpus; if (average.threads.irq_count > 9999999) @@ -1772,7 +1853,7 @@ static unsigned long long rdtsc(void) { unsigned int low, high; - asm volatile("rdtsc" : "=a" (low), "=d" (high)); + asm volatile ("rdtsc":"=a" (low), "=d"(high)); return low | ((unsigned long long)high) << 32; } @@ -1788,6 +1869,7 @@ FILE *fopen_or_die(const char *path, const char *mode) err(1, "%s: open failed", path); return filep; } + /* * snapshot_sysfs_counter() * @@ -1819,8 +1901,7 @@ int get_mp(int cpu, struct msr_counter *mp, unsigned long long *counterp) char path[128 + PATH_BYTES]; if (mp->flags & SYSFS_PERCPU) { - sprintf(path, "/sys/devices/system/cpu/cpu%d/%s", - cpu, mp->path); + sprintf(path, "/sys/devices/system/cpu/cpu%d/%s", cpu, mp->path); *counterp = snapshot_sysfs_counter(path); } else { @@ -1880,7 +1961,7 @@ void get_apic_id(struct thread_data *t) eax = ebx = ecx = edx = 0; __cpuid(0x80000001, eax, ebx, ecx, edx); - topology_extensions = ecx & (1 << 22); + topology_extensions = ecx & (1 << 22); if (topology_extensions == 0) return; @@ -1903,8 +1984,7 @@ void get_apic_id(struct thread_data *t) t->x2apic_id = edx; if (debug && (t->apic_id != (t->x2apic_id & 0xff))) - fprintf(outf, "cpu%d: BIOS BUG: apic 0x%x x2apic 0x%x\n", - t->cpu_id, t->apic_id, t->x2apic_id); + fprintf(outf, "cpu%d: BIOS BUG: apic 0x%x x2apic 0x%x\n", t->cpu_id, t->apic_id, t->x2apic_id); } /* @@ -1932,8 +2012,7 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) retry: t->tsc = rdtsc(); /* we are running on local CPU of interest */ - if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz) || - soft_c1_residency_display(BIC_Avg_MHz)) { + if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz) || soft_c1_residency_display(BIC_Avg_MHz)) { unsigned long long tsc_before, tsc_between, tsc_after, aperf_time, mperf_time; /* @@ -1980,8 +2059,7 @@ retry: if (aperf_mperf_retry_count < 5) goto retry; else - warnx("cpu%d jitter %lld %lld", - cpu, aperf_time, mperf_time); + warnx("cpu%d jitter %lld %lld", cpu, aperf_time, mperf_time); } aperf_mperf_retry_count = 0; @@ -1989,6 +2067,10 @@ retry: t->mperf = t->mperf * aperf_mperf_multiplier; } + if (DO_BIC(BIC_IPC)) + if (read(get_instr_count_fd(cpu), &t->instr_count, sizeof(long long)) != sizeof(long long)) + return -4; + if (DO_BIC(BIC_IRQ)) t->irq_count = irqs_per_cpu[cpu]; if (DO_BIC(BIC_SMI)) { @@ -2023,9 +2105,19 @@ retry: return -7; } - if (DO_BIC(BIC_CPU_c7) || soft_c1_residency_display(BIC_CPU_c7)) + if (DO_BIC(BIC_CPU_c7) || soft_c1_residency_display(BIC_CPU_c7)) { if (get_msr(cpu, MSR_CORE_C7_RESIDENCY, &c->c7)) return -8; + else if (t->is_atom) { + /* + * For Atom CPUs that has core cstate deeper than c6, + * MSR_CORE_C6_RESIDENCY returns residency of cc6 and deeper. + * Minus CC7 (and deeper cstates) residency to get + * accturate cc6 residency. + */ + c->c6 -= c->c7; + } + } if (DO_BIC(BIC_Mod_c6)) if (get_msr(cpu, MSR_MODULE_C6_RES_MS, &c->mc6_us)) @@ -2034,7 +2126,7 @@ retry: if (DO_BIC(BIC_CoreTmp)) { if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr)) return -9; - c->core_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F); + c->core_temp_c = tj_max - ((msr >> 16) & 0x7F); } if (do_rapl & RAPL_AMD_F17H) { @@ -2140,7 +2232,7 @@ retry: if (DO_BIC(BIC_PkgTmp)) { if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr)) return -17; - p->pkg_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F); + p->pkg_temp_c = tj_max - ((msr >> 16) & 0x7F); } if (DO_BIC(BIC_GFX_rc6)) @@ -2168,45 +2260,81 @@ done: * (>= PCL__7) and to index pkg_cstate_limit_strings[]. */ -#define PCLUKN 0 /* Unknown */ -#define PCLRSV 1 /* Reserved */ -#define PCL__0 2 /* PC0 */ -#define PCL__1 3 /* PC1 */ -#define PCL__2 4 /* PC2 */ -#define PCL__3 5 /* PC3 */ -#define PCL__4 6 /* PC4 */ -#define PCL__6 7 /* PC6 */ -#define PCL_6N 8 /* PC6 No Retention */ -#define PCL_6R 9 /* PC6 Retention */ -#define PCL__7 10 /* PC7 */ -#define PCL_7S 11 /* PC7 Shrink */ -#define PCL__8 12 /* PC8 */ -#define PCL__9 13 /* PC9 */ -#define PCL_10 14 /* PC10 */ -#define PCLUNL 15 /* Unlimited */ +#define PCLUKN 0 /* Unknown */ +#define PCLRSV 1 /* Reserved */ +#define PCL__0 2 /* PC0 */ +#define PCL__1 3 /* PC1 */ +#define PCL__2 4 /* PC2 */ +#define PCL__3 5 /* PC3 */ +#define PCL__4 6 /* PC4 */ +#define PCL__6 7 /* PC6 */ +#define PCL_6N 8 /* PC6 No Retention */ +#define PCL_6R 9 /* PC6 Retention */ +#define PCL__7 10 /* PC7 */ +#define PCL_7S 11 /* PC7 Shrink */ +#define PCL__8 12 /* PC8 */ +#define PCL__9 13 /* PC9 */ +#define PCL_10 14 /* PC10 */ +#define PCLUNL 15 /* Unlimited */ int pkg_cstate_limit = PCLUKN; char *pkg_cstate_limit_strings[] = { "reserved", "unknown", "pc0", "pc1", "pc2", - "pc3", "pc4", "pc6", "pc6n", "pc6r", "pc7", "pc7s", "pc8", "pc9", "pc10", "unlimited"}; + "pc3", "pc4", "pc6", "pc6n", "pc6r", "pc7", "pc7s", "pc8", "pc9", "pc10", "unlimited" +}; -int nhm_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__3, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; -int snb_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCL__7, PCL_7S, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; -int hsw_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL__3, PCL__6, PCL__7, PCL_7S, PCL__8, PCL__9, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; -int slv_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7}; -int amt_pkg_cstate_limits[16] = {PCLUNL, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; -int phi_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; -int glm_pkg_cstate_limits[16] = {PCLUNL, PCL__1, PCL__3, PCL__6, PCL__7, PCL_7S, PCL__8, PCL__9, PCL_10, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; -int skx_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; +int nhm_pkg_cstate_limits[16] = + { PCL__0, PCL__1, PCL__3, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, + PCLRSV, PCLRSV +}; +int snb_pkg_cstate_limits[16] = + { PCL__0, PCL__2, PCL_6N, PCL_6R, PCL__7, PCL_7S, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, + PCLRSV, PCLRSV +}; -static void -calculate_tsc_tweak() +int hsw_pkg_cstate_limits[16] = + { PCL__0, PCL__2, PCL__3, PCL__6, PCL__7, PCL_7S, PCL__8, PCL__9, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, + PCLRSV, PCLRSV +}; + +int slv_pkg_cstate_limits[16] = + { PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, + PCL__6, PCL__7 +}; + +int amt_pkg_cstate_limits[16] = + { PCLUNL, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, + PCLRSV, PCLRSV +}; + +int phi_pkg_cstate_limits[16] = + { PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, + PCLRSV, PCLRSV +}; + +int glm_pkg_cstate_limits[16] = + { PCLUNL, PCL__1, PCL__3, PCL__6, PCL__7, PCL_7S, PCL__8, PCL__9, PCL_10, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, + PCLRSV, PCLRSV +}; + +int skx_pkg_cstate_limits[16] = + { PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, + PCLRSV, PCLRSV +}; + +int icx_pkg_cstate_limits[16] = + { PCL__0, PCL__2, PCL__6, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, + PCLRSV, PCLRSV +}; + +static void calculate_tsc_tweak() { tsc_tweak = base_hz / tsc_hz; } -static void -dump_nhm_platform_info(void) +void prewake_cstate_probe(unsigned int family, unsigned int model); + +static void dump_nhm_platform_info(void) { unsigned long long msr; unsigned int ratio; @@ -2216,22 +2344,23 @@ dump_nhm_platform_info(void) fprintf(outf, "cpu%d: MSR_PLATFORM_INFO: 0x%08llx\n", base_cpu, msr); ratio = (msr >> 40) & 0xFF; - fprintf(outf, "%d * %.1f = %.1f MHz max efficiency frequency\n", - ratio, bclk, ratio * bclk); + fprintf(outf, "%d * %.1f = %.1f MHz max efficiency frequency\n", ratio, bclk, ratio * bclk); ratio = (msr >> 8) & 0xFF; - fprintf(outf, "%d * %.1f = %.1f MHz base frequency\n", - ratio, bclk, ratio * bclk); + fprintf(outf, "%d * %.1f = %.1f MHz base frequency\n", ratio, bclk, ratio * bclk); get_msr(base_cpu, MSR_IA32_POWER_CTL, &msr); fprintf(outf, "cpu%d: MSR_IA32_POWER_CTL: 0x%08llx (C1E auto-promotion: %sabled)\n", base_cpu, msr, msr & 0x2 ? "EN" : "DIS"); + /* C-state Pre-wake Disable (CSTATE_PREWAKE_DISABLE) */ + if (dis_cstate_prewake) + fprintf(outf, "C-state Pre-wake: %sabled\n", msr & 0x40000000 ? "DIS" : "EN"); + return; } -static void -dump_hsw_turbo_ratio_limits(void) +static void dump_hsw_turbo_ratio_limits(void) { unsigned long long msr; unsigned int ratio; @@ -2242,18 +2371,15 @@ dump_hsw_turbo_ratio_limits(void) ratio = (msr >> 8) & 0xFF; if (ratio) - fprintf(outf, "%d * %.1f = %.1f MHz max turbo 18 active cores\n", - ratio, bclk, ratio * bclk); + fprintf(outf, "%d * %.1f = %.1f MHz max turbo 18 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 0) & 0xFF; if (ratio) - fprintf(outf, "%d * %.1f = %.1f MHz max turbo 17 active cores\n", - ratio, bclk, ratio * bclk); + fprintf(outf, "%d * %.1f = %.1f MHz max turbo 17 active cores\n", ratio, bclk, ratio * bclk); return; } -static void -dump_ivt_turbo_ratio_limits(void) +static void dump_ivt_turbo_ratio_limits(void) { unsigned long long msr; unsigned int ratio; @@ -2264,45 +2390,38 @@ dump_ivt_turbo_ratio_limits(void) ratio = (msr >> 56) & 0xFF; if (ratio) - fprintf(outf, "%d * %.1f = %.1f MHz max turbo 16 active cores\n", - ratio, bclk, ratio * bclk); + fprintf(outf, "%d * %.1f = %.1f MHz max turbo 16 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 48) & 0xFF; if (ratio) - fprintf(outf, "%d * %.1f = %.1f MHz max turbo 15 active cores\n", - ratio, bclk, ratio * bclk); + fprintf(outf, "%d * %.1f = %.1f MHz max turbo 15 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 40) & 0xFF; if (ratio) - fprintf(outf, "%d * %.1f = %.1f MHz max turbo 14 active cores\n", - ratio, bclk, ratio * bclk); + fprintf(outf, "%d * %.1f = %.1f MHz max turbo 14 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 32) & 0xFF; if (ratio) - fprintf(outf, "%d * %.1f = %.1f MHz max turbo 13 active cores\n", - ratio, bclk, ratio * bclk); + fprintf(outf, "%d * %.1f = %.1f MHz max turbo 13 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 24) & 0xFF; if (ratio) - fprintf(outf, "%d * %.1f = %.1f MHz max turbo 12 active cores\n", - ratio, bclk, ratio * bclk); + fprintf(outf, "%d * %.1f = %.1f MHz max turbo 12 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 16) & 0xFF; if (ratio) - fprintf(outf, "%d * %.1f = %.1f MHz max turbo 11 active cores\n", - ratio, bclk, ratio * bclk); + fprintf(outf, "%d * %.1f = %.1f MHz max turbo 11 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 8) & 0xFF; if (ratio) - fprintf(outf, "%d * %.1f = %.1f MHz max turbo 10 active cores\n", - ratio, bclk, ratio * bclk); + fprintf(outf, "%d * %.1f = %.1f MHz max turbo 10 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 0) & 0xFF; if (ratio) - fprintf(outf, "%d * %.1f = %.1f MHz max turbo 9 active cores\n", - ratio, bclk, ratio * bclk); + fprintf(outf, "%d * %.1f = %.1f MHz max turbo 9 active cores\n", ratio, bclk, ratio * bclk); return; } + int has_turbo_ratio_group_limits(int family, int model) { @@ -2312,6 +2431,7 @@ int has_turbo_ratio_group_limits(int family, int model) switch (model) { case INTEL_FAM6_ATOM_GOLDMONT: case INTEL_FAM6_SKYLAKE_X: + case INTEL_FAM6_ICELAKE_X: case INTEL_FAM6_ATOM_GOLDMONT_D: case INTEL_FAM6_ATOM_TREMONT_D: return 1; @@ -2319,8 +2439,7 @@ int has_turbo_ratio_group_limits(int family, int model) return 0; } -static void -dump_turbo_ratio_limits(int family, int model) +static void dump_turbo_ratio_limits(int family, int model) { unsigned long long msr, core_counts; unsigned int ratio, group_size; @@ -2385,8 +2504,7 @@ dump_turbo_ratio_limits(int family, int model) return; } -static void -dump_atom_turbo_ratio_limits(void) +static void dump_atom_turbo_ratio_limits(void) { unsigned long long msr; unsigned int ratio; @@ -2396,45 +2514,37 @@ dump_atom_turbo_ratio_limits(void) ratio = (msr >> 0) & 0x3F; if (ratio) - fprintf(outf, "%d * %.1f = %.1f MHz minimum operating frequency\n", - ratio, bclk, ratio * bclk); + fprintf(outf, "%d * %.1f = %.1f MHz minimum operating frequency\n", ratio, bclk, ratio * bclk); ratio = (msr >> 8) & 0x3F; if (ratio) - fprintf(outf, "%d * %.1f = %.1f MHz low frequency mode (LFM)\n", - ratio, bclk, ratio * bclk); + fprintf(outf, "%d * %.1f = %.1f MHz low frequency mode (LFM)\n", ratio, bclk, ratio * bclk); ratio = (msr >> 16) & 0x3F; if (ratio) - fprintf(outf, "%d * %.1f = %.1f MHz base frequency\n", - ratio, bclk, ratio * bclk); + fprintf(outf, "%d * %.1f = %.1f MHz base frequency\n", ratio, bclk, ratio * bclk); get_msr(base_cpu, MSR_ATOM_CORE_TURBO_RATIOS, &msr); fprintf(outf, "cpu%d: MSR_ATOM_CORE_TURBO_RATIOS: 0x%08llx\n", base_cpu, msr & 0xFFFFFFFF); ratio = (msr >> 24) & 0x3F; if (ratio) - fprintf(outf, "%d * %.1f = %.1f MHz max turbo 4 active cores\n", - ratio, bclk, ratio * bclk); + fprintf(outf, "%d * %.1f = %.1f MHz max turbo 4 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 16) & 0x3F; if (ratio) - fprintf(outf, "%d * %.1f = %.1f MHz max turbo 3 active cores\n", - ratio, bclk, ratio * bclk); + fprintf(outf, "%d * %.1f = %.1f MHz max turbo 3 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 8) & 0x3F; if (ratio) - fprintf(outf, "%d * %.1f = %.1f MHz max turbo 2 active cores\n", - ratio, bclk, ratio * bclk); + fprintf(outf, "%d * %.1f = %.1f MHz max turbo 2 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 0) & 0x3F; if (ratio) - fprintf(outf, "%d * %.1f = %.1f MHz max turbo 1 active core\n", - ratio, bclk, ratio * bclk); + fprintf(outf, "%d * %.1f = %.1f MHz max turbo 1 active core\n", ratio, bclk, ratio * bclk); } -static void -dump_knl_turbo_ratio_limits(void) +static void dump_knl_turbo_ratio_limits(void) { const unsigned int buckets_no = 7; @@ -2446,8 +2556,7 @@ dump_knl_turbo_ratio_limits(void) get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr); - fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n", - base_cpu, msr); + fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n", base_cpu, msr); /* * Turbo encoding in KNL is as follows: @@ -2492,8 +2601,7 @@ dump_knl_turbo_ratio_limits(void) ratio[i], bclk, ratio[i] * bclk, cores[i]); } -static void -dump_nhm_cst_cfg(void) +static void dump_nhm_cst_cfg(void) { unsigned long long msr; @@ -2506,14 +2614,11 @@ dump_nhm_cst_cfg(void) (msr & SNB_C1_AUTO_UNDEMOTE) ? "UNdemote-C1, " : "", (msr & NHM_C3_AUTO_DEMOTE) ? "demote-C3, " : "", (msr & NHM_C1_AUTO_DEMOTE) ? "demote-C1, " : "", - (msr & (1 << 15)) ? "" : "UN", - (unsigned int)msr & 0xF, - pkg_cstate_limit_strings[pkg_cstate_limit]); + (msr & (1 << 15)) ? "" : "UN", (unsigned int)msr & 0xF, pkg_cstate_limit_strings[pkg_cstate_limit]); #define AUTOMATIC_CSTATE_CONVERSION (1UL << 16) if (has_automatic_cstate_conversion) { - fprintf(outf, ", automatic c-state conversion=%s", - (msr & AUTOMATIC_CSTATE_CONVERSION) ? "on" : "off"); + fprintf(outf, ", automatic c-state conversion=%s", (msr & AUTOMATIC_CSTATE_CONVERSION) ? "on" : "off"); } fprintf(outf, ")\n"); @@ -2521,8 +2626,7 @@ dump_nhm_cst_cfg(void) return; } -static void -dump_config_tdp(void) +static void dump_config_tdp(void) { unsigned long long msr; @@ -2564,7 +2668,7 @@ dump_config_tdp(void) fprintf(outf, ")\n"); } -unsigned int irtl_time_units[] = {1, 32, 1024, 32768, 1048576, 33554432, 0, 0 }; +unsigned int irtl_time_units[] = { 1, 32, 1024, 32768, 1048576, 33554432, 0, 0 }; void print_irtl(void) { @@ -2604,6 +2708,7 @@ void print_irtl(void) (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]); } + void free_fd_percpu(void) { int i; @@ -2660,7 +2765,6 @@ void free_all_buffers(void) free(cpus); } - /* * Parse a file containing a single int. * Return 0 if file can not be opened @@ -2735,8 +2839,7 @@ void set_node_data(void) * the logical_node_id */ for (cpux = cpu; cpux <= topo.max_cpu_num; cpux++) { - if ((cpus[cpux].physical_package_id == pkg) && - (cpus[cpux].physical_node_id == node)) { + if ((cpus[cpux].physical_package_id == pkg) && (cpus[cpux].physical_node_id == node)) { cpus[cpux].logical_node_id = lnode; cpu_count++; } @@ -2758,8 +2861,7 @@ int get_physical_node_id(struct cpu_topology *thiscpu) int cpu = thiscpu->logical_cpu_id; for (i = 0; i <= topo.max_cpu_num; i++) { - sprintf(path, "/sys/devices/system/cpu/cpu%d/node%i/cpulist", - cpu, i); + sprintf(path, "/sys/devices/system/cpu/cpu%d/node%i/cpulist", cpu, i); filep = fopen(path, "r"); if (!filep) continue; @@ -2789,8 +2891,7 @@ int get_thread_siblings(struct cpu_topology *thiscpu) size = CPU_ALLOC_SIZE((topo.max_cpu_num + 1)); CPU_ZERO_S(size, thiscpu->put_ids); - sprintf(path, - "/sys/devices/system/cpu/cpu%d/topology/thread_siblings", cpu); + sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings", cpu); filep = fopen(path, "r"); if (!filep) { @@ -2807,10 +2908,8 @@ int get_thread_siblings(struct cpu_topology *thiscpu) sib_core = get_core_id(so); if (sib_core == thiscpu->physical_core_id) { CPU_SET_S(so, size, thiscpu->put_ids); - if ((so != cpu) && - (cpus[so].thread_id < 0)) - cpus[so].thread_id = - thread_id++; + if ((so != cpu) && (cpus[so].thread_id < 0)) + cpus[so].thread_id = thread_id++; } } } @@ -2825,41 +2924,31 @@ int get_thread_siblings(struct cpu_topology *thiscpu) * skip non-present cpus */ -int for_all_cpus_2(int (func)(struct thread_data *, struct core_data *, - struct pkg_data *, struct thread_data *, struct core_data *, - struct pkg_data *), struct thread_data *thread_base, - struct core_data *core_base, struct pkg_data *pkg_base, - struct thread_data *thread_base2, struct core_data *core_base2, - struct pkg_data *pkg_base2) +int for_all_cpus_2(int (func) (struct thread_data *, struct core_data *, + struct pkg_data *, struct thread_data *, struct core_data *, + struct pkg_data *), struct thread_data *thread_base, + struct core_data *core_base, struct pkg_data *pkg_base, + struct thread_data *thread_base2, struct core_data *core_base2, struct pkg_data *pkg_base2) { int retval, pkg_no, node_no, core_no, thread_no; for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) { for (node_no = 0; node_no < topo.nodes_per_pkg; ++node_no) { - for (core_no = 0; core_no < topo.cores_per_node; - ++core_no) { - for (thread_no = 0; thread_no < - topo.threads_per_core; ++thread_no) { + for (core_no = 0; core_no < topo.cores_per_node; ++core_no) { + for (thread_no = 0; thread_no < topo.threads_per_core; ++thread_no) { struct thread_data *t, *t2; struct core_data *c, *c2; struct pkg_data *p, *p2; - t = GET_THREAD(thread_base, thread_no, - core_no, node_no, - pkg_no); + t = GET_THREAD(thread_base, thread_no, core_no, node_no, pkg_no); if (cpu_is_not_present(t->cpu_id)) continue; - t2 = GET_THREAD(thread_base2, thread_no, - core_no, node_no, - pkg_no); + t2 = GET_THREAD(thread_base2, thread_no, core_no, node_no, pkg_no); - c = GET_CORE(core_base, core_no, - node_no, pkg_no); - c2 = GET_CORE(core_base2, core_no, - node_no, - pkg_no); + c = GET_CORE(core_base, core_no, node_no, pkg_no); + c2 = GET_CORE(core_base2, core_no, node_no, pkg_no); p = GET_PKG(pkg_base, pkg_no); p2 = GET_PKG(pkg_base2, pkg_no); @@ -2878,7 +2967,7 @@ int for_all_cpus_2(int (func)(struct thread_data *, struct core_data *, * run func(cpu) on every cpu in /proc/stat * return max_cpu number */ -int for_all_proc_cpus(int (func)(int)) +int for_all_proc_cpus(int (func) (int)) { FILE *fp; int cpu_num; @@ -2898,7 +2987,7 @@ int for_all_proc_cpus(int (func)(int)) retval = func(cpu_num); if (retval) { fclose(fp); - return(retval); + return (retval); } } fclose(fp); @@ -2922,16 +3011,14 @@ void set_max_cpu_num(void) base_cpu = sched_getcpu(); if (base_cpu < 0) err(1, "cannot find calling cpu ID"); - sprintf(pathname, - "/sys/devices/system/cpu/cpu%d/topology/thread_siblings", - base_cpu); + sprintf(pathname, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings", base_cpu); filep = fopen_or_die(pathname, "r"); topo.max_cpu_num = 0; while (fscanf(filep, "%lx,", &dummy) == 1) topo.max_cpu_num += BITMASK_SIZE; fclose(filep); - topo.max_cpu_num--; /* 0 based */ + topo.max_cpu_num--; /* 0 based */ } /* @@ -2943,6 +3030,7 @@ int count_cpus(int cpu) topo.num_cpus++; return 0; } + int mark_cpu_present(int cpu) { CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set); @@ -3012,12 +3100,12 @@ int snapshot_proc_interrupts(void) } - while (getc(fp) != '\n') - ; /* flush interrupt description */ + while (getc(fp) != '\n') ; /* flush interrupt description */ } return 0; } + /* * snapshot_gfx_rc6_ms() * @@ -3041,6 +3129,7 @@ int snapshot_gfx_rc6_ms(void) return 0; } + /* * snapshot_gfx_mhz() * @@ -3120,6 +3209,7 @@ int snapshot_cpu_lpi_us(void) return 0; } + /* * snapshot_sys_lpi() * @@ -3143,6 +3233,7 @@ int snapshot_sys_lpi_us(void) return 0; } + /* * snapshot /proc and /sys files * @@ -3174,7 +3265,7 @@ int snapshot_proc_sysfs_files(void) int exit_requested; -static void signal_handler (int signal) +static void signal_handler(int signal) { switch (signal) { case SIGINT: @@ -3272,7 +3363,7 @@ static int update_msr_sum(struct thread_data *t, struct core_data *c, struct pkg for (i = IDX_PKG_ENERGY; i < IDX_COUNT; i++) { unsigned long long msr_cur, msr_last; - int offset; + off_t offset; if (!idx_valid(i)) continue; @@ -3281,7 +3372,7 @@ static int update_msr_sum(struct thread_data *t, struct core_data *c, struct pkg continue; ret = get_msr(cpu, offset, &msr_cur); if (ret) { - fprintf(outf, "Can not update msr(0x%x)\n", offset); + fprintf(outf, "Can not update msr(0x%llx)\n", (unsigned long long)offset); continue; } @@ -3294,8 +3385,7 @@ static int update_msr_sum(struct thread_data *t, struct core_data *c, struct pkg return 0; } -static void -msr_record_handler(union sigval v) +static void msr_record_handler(union sigval v) { for_all_cpus(update_msr_sum, EVEN_COUNTERS); } @@ -3340,12 +3430,38 @@ void msr_sum_record(void) } return; - release_timer: +release_timer: timer_delete(timerid); - release_msr: +release_msr: free(per_cpu_msr_sum); } +/* + * set_my_sched_priority(pri) + * return previous + */ +int set_my_sched_priority(int priority) +{ + int retval; + int original_priority; + + errno = 0; + original_priority = getpriority(PRIO_PROCESS, 0); + if (errno && (original_priority == -1)) + err(errno, "getpriority"); + + retval = setpriority(PRIO_PROCESS, 0, priority); + if (retval) + err(retval, "setpriority(%d)", priority); + + errno = 0; + retval = getpriority(PRIO_PROCESS, 0); + if (retval != priority) + err(-1, "getpriority(%d) != setpriority(%d)", retval, priority); + + return original_priority; +} + void turbostat_loop() { int retval; @@ -3354,6 +3470,11 @@ void turbostat_loop() setup_signal_handler(); + /* + * elevate own priority for interval mode + */ + set_my_sched_priority(-20); + restart: restarted++; @@ -3434,7 +3555,7 @@ void check_dev_msr() sprintf(pathname, "/dev/cpu/%d/msr", base_cpu); if (stat(pathname, &sb)) - if (system("/sbin/modprobe msr > /dev/null 2>&1")) + if (system("/sbin/modprobe msr > /dev/null 2>&1")) err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" "); } @@ -3456,8 +3577,7 @@ int check_for_cap_sys_rawio(void) err(-6, "cap_get\n"); if (cap_flag_value != CAP_SET) { - warnx("capget(CAP_SYS_RAWIO) failed," - " try \"# setcap cap_sys_rawio=ep %s\"", progname); + warnx("capget(CAP_SYS_RAWIO) failed," " try \"# setcap cap_sys_rawio=ep %s\"", progname); return 1; } @@ -3466,6 +3586,7 @@ int check_for_cap_sys_rawio(void) return 0; } + void check_permissions(void) { int do_exit = 0; @@ -3551,6 +3672,10 @@ int probe_nhm_msrs(unsigned int family, unsigned int model) pkg_cstate_limits = skx_pkg_cstate_limits; has_misc_feature_control = 1; break; + case INTEL_FAM6_ICELAKE_X: /* ICX */ + pkg_cstate_limits = icx_pkg_cstate_limits; + has_misc_feature_control = 1; + break; case INTEL_FAM6_ATOM_SILVERMONT: /* BYT */ no_MSR_MISC_PWR_MGMT = 1; case INTEL_FAM6_ATOM_SILVERMONT_D: /* AVN */ @@ -3567,7 +3692,7 @@ int probe_nhm_msrs(unsigned int family, unsigned int model) case INTEL_FAM6_ATOM_GOLDMONT_PLUS: case INTEL_FAM6_ATOM_GOLDMONT_D: /* DNV */ case INTEL_FAM6_ATOM_TREMONT: /* EHL */ - case INTEL_FAM6_ATOM_TREMONT_D: /* JVL */ + case INTEL_FAM6_ATOM_TREMONT_D: /* JVL */ pkg_cstate_limits = glm_pkg_cstate_limits; break; default: @@ -3583,6 +3708,7 @@ int probe_nhm_msrs(unsigned int family, unsigned int model) has_base_hz = 1; return 1; } + /* * SLV client has support for unique MSRs: * @@ -3603,6 +3729,7 @@ int has_slv_msrs(unsigned int family, unsigned int model) } return 0; } + int is_dnv(unsigned int family, unsigned int model) { @@ -3615,6 +3742,7 @@ int is_dnv(unsigned int family, unsigned int model) } return 0; } + int is_bdx(unsigned int family, unsigned int model) { @@ -3627,6 +3755,7 @@ int is_bdx(unsigned int family, unsigned int model) } return 0; } + int is_skx(unsigned int family, unsigned int model) { @@ -3639,6 +3768,20 @@ int is_skx(unsigned int family, unsigned int model) } return 0; } + +int is_icx(unsigned int family, unsigned int model) +{ + + if (!genuine_intel) + return 0; + + switch (model) { + case INTEL_FAM6_ICELAKE_X: + return 1; + } + return 0; +} + int is_ehl(unsigned int family, unsigned int model) { if (!genuine_intel) @@ -3650,6 +3793,7 @@ int is_ehl(unsigned int family, unsigned int model) } return 0; } + int is_jvl(unsigned int family, unsigned int model) { if (!genuine_intel) @@ -3668,7 +3812,7 @@ int has_turbo_ratio_limit(unsigned int family, unsigned int model) return 0; switch (model) { - /* Nehalem compatible, but do not include turbo-ratio limit support */ + /* Nehalem compatible, but do not include turbo-ratio limit support */ case INTEL_FAM6_NEHALEM_EX: /* Nehalem-EX Xeon - Beckton */ case INTEL_FAM6_XEON_PHI_KNL: /* PHI - Knights Landing (different MSR definition) */ return 0; @@ -3676,6 +3820,7 @@ int has_turbo_ratio_limit(unsigned int family, unsigned int model) return 1; } } + int has_atom_turbo_ratio_limit(unsigned int family, unsigned int model) { if (has_slv_msrs(family, model)) @@ -3683,6 +3828,7 @@ int has_atom_turbo_ratio_limit(unsigned int family, unsigned int model) return 0; } + int has_ivt_turbo_ratio_limit(unsigned int family, unsigned int model) { if (!genuine_intel) @@ -3699,6 +3845,7 @@ int has_ivt_turbo_ratio_limit(unsigned int family, unsigned int model) return 0; } } + int has_hsw_turbo_ratio_limit(unsigned int family, unsigned int model) { if (!genuine_intel) @@ -3730,6 +3877,7 @@ int has_knl_turbo_ratio_limit(unsigned int family, unsigned int model) return 0; } } + int has_glm_turbo_ratio_limit(unsigned int family, unsigned int model) { if (!genuine_intel) @@ -3741,11 +3889,13 @@ int has_glm_turbo_ratio_limit(unsigned int family, unsigned int model) switch (model) { case INTEL_FAM6_ATOM_GOLDMONT: case INTEL_FAM6_SKYLAKE_X: + case INTEL_FAM6_ICELAKE_X: return 1; default: return 0; } } + int has_config_tdp(unsigned int family, unsigned int model) { if (!genuine_intel) @@ -3766,6 +3916,7 @@ int has_config_tdp(unsigned int family, unsigned int model) case INTEL_FAM6_SKYLAKE_L: /* SKL */ case INTEL_FAM6_CANNONLAKE_L: /* CNL */ case INTEL_FAM6_SKYLAKE_X: /* SKX */ + case INTEL_FAM6_ICELAKE_X: /* ICX */ case INTEL_FAM6_XEON_PHI_KNL: /* Knights Landing */ return 1; @@ -3774,8 +3925,41 @@ int has_config_tdp(unsigned int family, unsigned int model) } } -static void -remove_underbar(char *s) +/* + * tcc_offset_bits: + * 0: Tcc Offset not supported (Default) + * 6: Bit 29:24 of MSR_PLATFORM_INFO + * 4: Bit 27:24 of MSR_PLATFORM_INFO + */ +void check_tcc_offset(int model) +{ + unsigned long long msr; + + if (!genuine_intel) + return; + + switch (model) { + case INTEL_FAM6_SKYLAKE_L: + case INTEL_FAM6_SKYLAKE: + case INTEL_FAM6_KABYLAKE_L: + case INTEL_FAM6_KABYLAKE: + case INTEL_FAM6_ICELAKE_L: + case INTEL_FAM6_ICELAKE: + case INTEL_FAM6_TIGERLAKE_L: + case INTEL_FAM6_TIGERLAKE: + case INTEL_FAM6_COMETLAKE: + if (!get_msr(base_cpu, MSR_PLATFORM_INFO, &msr)) { + msr = (msr >> 30) & 1; + if (msr) + tcc_offset_bits = 6; + } + return; + default: + return; + } +} + +static void remove_underbar(char *s) { char *to = s; @@ -3788,8 +3972,7 @@ remove_underbar(char *s) *to = 0; } -static void -dump_cstate_pstate_config_info(unsigned int family, unsigned int model) +static void dump_cstate_pstate_config_info(unsigned int family, unsigned int model) { if (!do_nhm_platform_info) return; @@ -3834,8 +4017,8 @@ static void dump_sysfs_file(char *path) fprintf(outf, "%s: %s", strrchr(path, '/') + 1, cpuidle_buf); } -static void -dump_sysfs_cstate_config(void) + +static void dump_sysfs_cstate_config(void) { char path[64]; char name_buf[16]; @@ -3855,15 +4038,14 @@ dump_sysfs_cstate_config(void) for (state = 0; state < 10; ++state) { - sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/name", - base_cpu, state); + sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/name", base_cpu, state); input = fopen(path, "r"); if (input == NULL) continue; if (!fgets(name_buf, sizeof(name_buf), input)) err(1, "%s: failed to read file", path); - /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */ + /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */ sp = strchr(name_buf, '-'); if (!sp) sp = strchrnul(name_buf, '\n'); @@ -3872,8 +4054,7 @@ dump_sysfs_cstate_config(void) remove_underbar(name_buf); - sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/desc", - base_cpu, state); + sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/desc", base_cpu, state); input = fopen(path, "r"); if (input == NULL) continue; @@ -3884,8 +4065,8 @@ dump_sysfs_cstate_config(void) fclose(input); } } -static void -dump_sysfs_pstate_config(void) + +static void dump_sysfs_pstate_config(void) { char path[64]; char driver_buf[64]; @@ -3893,8 +4074,7 @@ dump_sysfs_pstate_config(void) FILE *input; int turbo; - sprintf(path, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_driver", - base_cpu); + sprintf(path, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_driver", base_cpu); input = fopen(path, "r"); if (input == NULL) { fprintf(outf, "NSFOD %s\n", path); @@ -3904,8 +4084,7 @@ dump_sysfs_pstate_config(void) err(1, "%s: failed to read file", path); fclose(input); - sprintf(path, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_governor", - base_cpu); + sprintf(path, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_governor", base_cpu); input = fopen(path, "r"); if (input == NULL) { fprintf(outf, "NSFOD %s\n", path); @@ -3937,7 +4116,6 @@ dump_sysfs_pstate_config(void) } } - /* * print_epb() * Decode the ENERGY_PERF_BIAS MSR @@ -3983,6 +4161,7 @@ int print_epb(struct thread_data *t, struct core_data *c, struct pkg_data *p) return 0; } + /* * print_hwp() * Decode the MSR_HWP_CAPABILITIES @@ -4009,8 +4188,7 @@ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p) if (get_msr(cpu, MSR_PM_ENABLE, &msr)) return 0; - fprintf(outf, "cpu%d: MSR_PM_ENABLE: 0x%08llx (%sHWP)\n", - cpu, msr, (msr & (1 << 0)) ? "" : "No-"); + fprintf(outf, "cpu%d: MSR_PM_ENABLE: 0x%08llx (%sHWP)\n", cpu, msr, (msr & (1 << 0)) ? "" : "No-"); /* MSR_PM_ENABLE[1] == 1 if HWP is enabled and MSRs visible */ if ((msr & (1 << 0)) == 0) @@ -4020,25 +4198,23 @@ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p) return 0; fprintf(outf, "cpu%d: MSR_HWP_CAPABILITIES: 0x%08llx " - "(high %d guar %d eff %d low %d)\n", - cpu, msr, - (unsigned int)HWP_HIGHEST_PERF(msr), - (unsigned int)HWP_GUARANTEED_PERF(msr), - (unsigned int)HWP_MOSTEFFICIENT_PERF(msr), - (unsigned int)HWP_LOWEST_PERF(msr)); + "(high %d guar %d eff %d low %d)\n", + cpu, msr, + (unsigned int)HWP_HIGHEST_PERF(msr), + (unsigned int)HWP_GUARANTEED_PERF(msr), + (unsigned int)HWP_MOSTEFFICIENT_PERF(msr), (unsigned int)HWP_LOWEST_PERF(msr)); if (get_msr(cpu, MSR_HWP_REQUEST, &msr)) return 0; fprintf(outf, "cpu%d: MSR_HWP_REQUEST: 0x%08llx " - "(min %d max %d des %d epp 0x%x window 0x%x pkg 0x%x)\n", - cpu, msr, - (unsigned int)(((msr) >> 0) & 0xff), - (unsigned int)(((msr) >> 8) & 0xff), - (unsigned int)(((msr) >> 16) & 0xff), - (unsigned int)(((msr) >> 24) & 0xff), - (unsigned int)(((msr) >> 32) & 0xff3), - (unsigned int)(((msr) >> 42) & 0x1)); + "(min %d max %d des %d epp 0x%x window 0x%x pkg 0x%x)\n", + cpu, msr, + (unsigned int)(((msr) >> 0) & 0xff), + (unsigned int)(((msr) >> 8) & 0xff), + (unsigned int)(((msr) >> 16) & 0xff), + (unsigned int)(((msr) >> 24) & 0xff), + (unsigned int)(((msr) >> 32) & 0xff3), (unsigned int)(((msr) >> 42) & 0x1)); if (has_hwp_pkg) { if (get_msr(cpu, MSR_HWP_REQUEST_PKG, &msr)) @@ -4050,8 +4226,7 @@ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p) (unsigned int)(((msr) >> 0) & 0xff), (unsigned int)(((msr) >> 8) & 0xff), (unsigned int)(((msr) >> 16) & 0xff), - (unsigned int)(((msr) >> 24) & 0xff), - (unsigned int)(((msr) >> 32) & 0xff3)); + (unsigned int)(((msr) >> 24) & 0xff), (unsigned int)(((msr) >> 32) & 0xff3)); } if (has_hwp_notify) { if (get_msr(cpu, MSR_HWP_INTERRUPT, &msr)) @@ -4059,18 +4234,14 @@ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p) fprintf(outf, "cpu%d: MSR_HWP_INTERRUPT: 0x%08llx " "(%s_Guaranteed_Perf_Change, %s_Excursion_Min)\n", - cpu, msr, - ((msr) & 0x1) ? "EN" : "Dis", - ((msr) & 0x2) ? "EN" : "Dis"); + cpu, msr, ((msr) & 0x1) ? "EN" : "Dis", ((msr) & 0x2) ? "EN" : "Dis"); } if (get_msr(cpu, MSR_HWP_STATUS, &msr)) return 0; fprintf(outf, "cpu%d: MSR_HWP_STATUS: 0x%08llx " - "(%sGuaranteed_Perf_Change, %sExcursion_Min)\n", - cpu, msr, - ((msr) & 0x1) ? "" : "No-", - ((msr) & 0x2) ? "" : "No-"); + "(%sGuaranteed_Perf_Change, %sExcursion_Min)\n", + cpu, msr, ((msr) & 0x1) ? "" : "No-", ((msr) & 0x2) ? "" : "No-"); return 0; } @@ -4110,8 +4281,7 @@ int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data (msr & 1 << 5) ? "Auto-HWP, " : "", (msr & 1 << 4) ? "Graphics, " : "", (msr & 1 << 2) ? "bit2, " : "", - (msr & 1 << 1) ? "ThermStatus, " : "", - (msr & 1 << 0) ? "PROCHOT, " : ""); + (msr & 1 << 1) ? "ThermStatus, " : "", (msr & 1 << 0) ? "PROCHOT, " : ""); fprintf(outf, " (Logged: %s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n", (msr & 1 << 31) ? "bit31, " : "", (msr & 1 << 30) ? "bit30, " : "", @@ -4125,8 +4295,7 @@ int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data (msr & 1 << 21) ? "Auto-HWP, " : "", (msr & 1 << 20) ? "Graphics, " : "", (msr & 1 << 18) ? "bit18, " : "", - (msr & 1 << 17) ? "ThermStatus, " : "", - (msr & 1 << 16) ? "PROCHOT, " : ""); + (msr & 1 << 17) ? "ThermStatus, " : "", (msr & 1 << 16) ? "PROCHOT, " : ""); } if (do_gfx_perf_limit_reasons) { @@ -4139,8 +4308,7 @@ int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data (msr & 1 << 6) ? "VR-Therm, " : "", (msr & 1 << 8) ? "Amps, " : "", (msr & 1 << 9) ? "GFXPwr, " : "", - (msr & 1 << 10) ? "PkgPwrL1, " : "", - (msr & 1 << 11) ? "PkgPwrL2, " : ""); + (msr & 1 << 10) ? "PkgPwrL1, " : "", (msr & 1 << 11) ? "PkgPwrL2, " : ""); fprintf(outf, " (Logged: %s%s%s%s%s%s%s%s)\n", (msr & 1 << 16) ? "PROCHOT, " : "", (msr & 1 << 17) ? "ThermStatus, " : "", @@ -4148,8 +4316,7 @@ int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data (msr & 1 << 22) ? "VR-Therm, " : "", (msr & 1 << 24) ? "Amps, " : "", (msr & 1 << 25) ? "GFXPwr, " : "", - (msr & 1 << 26) ? "PkgPwrL1, " : "", - (msr & 1 << 27) ? "PkgPwrL2, " : ""); + (msr & 1 << 26) ? "PkgPwrL1, " : "", (msr & 1 << 27) ? "PkgPwrL2, " : ""); } if (do_ring_perf_limit_reasons) { get_msr(cpu, MSR_RING_PERF_LIMIT_REASONS, &msr); @@ -4159,21 +4326,19 @@ int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data (msr & 1 << 1) ? "ThermStatus, " : "", (msr & 1 << 6) ? "VR-Therm, " : "", (msr & 1 << 8) ? "Amps, " : "", - (msr & 1 << 10) ? "PkgPwrL1, " : "", - (msr & 1 << 11) ? "PkgPwrL2, " : ""); + (msr & 1 << 10) ? "PkgPwrL1, " : "", (msr & 1 << 11) ? "PkgPwrL2, " : ""); fprintf(outf, " (Logged: %s%s%s%s%s%s)\n", (msr & 1 << 16) ? "PROCHOT, " : "", (msr & 1 << 17) ? "ThermStatus, " : "", (msr & 1 << 22) ? "VR-Therm, " : "", (msr & 1 << 24) ? "Amps, " : "", - (msr & 1 << 26) ? "PkgPwrL1, " : "", - (msr & 1 << 27) ? "PkgPwrL2, " : ""); + (msr & 1 << 26) ? "PkgPwrL1, " : "", (msr & 1 << 27) ? "PkgPwrL2, " : ""); } return 0; } #define RAPL_POWER_GRANULARITY 0x7FFF /* 15 bit power granularity */ -#define RAPL_TIME_GRANULARITY 0x3F /* 6 bit time granularity */ +#define RAPL_TIME_GRANULARITY 0x3F /* 6 bit time granularity */ double get_tdp_intel(unsigned int model) { @@ -4202,14 +4367,14 @@ double get_tdp_amd(unsigned int family) * rapl_dram_energy_units_probe() * Energy units are either hard-coded, or come from RAPL Energy Unit MSR. */ -static double -rapl_dram_energy_units_probe(int model, double rapl_energy_units) +static double rapl_dram_energy_units_probe(int model, double rapl_energy_units) { /* only called for genuine_intel, family 6 */ switch (model) { case INTEL_FAM6_HASWELL_X: /* HSX */ case INTEL_FAM6_BROADWELL_X: /* BDX */ + case INTEL_FAM6_SKYLAKE_X: /* SKX */ case INTEL_FAM6_XEON_PHI_KNL: /* KNL */ return (rapl_dram_energy_units = 15.3 / 1000000); default: @@ -4254,7 +4419,9 @@ void rapl_probe_intel(unsigned int family, unsigned int model) BIC_PRESENT(BIC_PkgWatt); break; case INTEL_FAM6_ATOM_TREMONT: /* EHL */ - do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_GFX | RAPL_PKG_POWER_INFO; + do_rapl = + RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS + | RAPL_GFX | RAPL_PKG_POWER_INFO; if (rapl_joules) { BIC_PRESENT(BIC_Pkg_J); BIC_PRESENT(BIC_Cor_J); @@ -4277,7 +4444,9 @@ void rapl_probe_intel(unsigned int family, unsigned int model) break; case INTEL_FAM6_SKYLAKE_L: /* SKL */ case INTEL_FAM6_CANNONLAKE_L: /* CNL */ - do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_GFX | RAPL_PKG_POWER_INFO; + do_rapl = + RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS + | RAPL_GFX | RAPL_PKG_POWER_INFO; BIC_PRESENT(BIC_PKG__); BIC_PRESENT(BIC_RAM__); if (rapl_joules) { @@ -4295,8 +4464,11 @@ void rapl_probe_intel(unsigned int family, unsigned int model) case INTEL_FAM6_HASWELL_X: /* HSX */ case INTEL_FAM6_BROADWELL_X: /* BDX */ case INTEL_FAM6_SKYLAKE_X: /* SKX */ + case INTEL_FAM6_ICELAKE_X: /* ICX */ case INTEL_FAM6_XEON_PHI_KNL: /* KNL */ - do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO; + do_rapl = + RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | + RAPL_PKG_POWER_INFO; BIC_PRESENT(BIC_PKG__); BIC_PRESENT(BIC_RAM__); if (rapl_joules) { @@ -4309,7 +4481,9 @@ void rapl_probe_intel(unsigned int family, unsigned int model) break; case INTEL_FAM6_SANDYBRIDGE_X: case INTEL_FAM6_IVYBRIDGE_X: - do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_PKG_PERF_STATUS | RAPL_DRAM_PERF_STATUS | RAPL_PKG_POWER_INFO; + do_rapl = + RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_PKG_PERF_STATUS | + RAPL_DRAM_PERF_STATUS | RAPL_PKG_POWER_INFO; BIC_PRESENT(BIC_PKG__); BIC_PRESENT(BIC_RAM__); if (rapl_joules) { @@ -4334,7 +4508,9 @@ void rapl_probe_intel(unsigned int family, unsigned int model) } break; case INTEL_FAM6_ATOM_GOLDMONT_D: /* DNV */ - do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO | RAPL_CORES_ENERGY_STATUS; + do_rapl = + RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | + RAPL_PKG_POWER_INFO | RAPL_CORES_ENERGY_STATUS; BIC_PRESENT(BIC_PKG__); BIC_PRESENT(BIC_RAM__); if (rapl_joules) { @@ -4451,10 +4627,16 @@ void perf_limit_reasons_probe(unsigned int family, unsigned int model) void automatic_cstate_conversion_probe(unsigned int family, unsigned int model) { - if (is_skx(family, model) || is_bdx(family, model)) + if (is_skx(family, model) || is_bdx(family, model) || is_icx(family, model)) has_automatic_cstate_conversion = 1; } +void prewake_cstate_probe(unsigned int family, unsigned int model) +{ + if (is_icx(family, model)) + dis_cstate_prewake = 1; +} + int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p) { unsigned long long msr; @@ -4480,8 +4662,7 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p return 0; dts = (msr >> 16) & 0x7F; - fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_STATUS: 0x%08llx (%d C)\n", - cpu, msr, tcc_activation_temp - dts); + fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_STATUS: 0x%08llx (%d C)\n", cpu, msr, tj_max - dts); if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &msr)) return 0; @@ -4489,10 +4670,9 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p dts = (msr >> 16) & 0x7F; dts2 = (msr >> 8) & 0x7F; fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n", - cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2); + cpu, msr, tj_max - dts, tj_max - dts2); } - if (do_dts && debug) { unsigned int resolution; @@ -4502,7 +4682,7 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p dts = (msr >> 16) & 0x7F; resolution = (msr >> 27) & 0xF; fprintf(outf, "cpu%d: MSR_IA32_THERM_STATUS: 0x%08llx (%d C +/- %d)\n", - cpu, msr, tcc_activation_temp - dts, resolution); + cpu, msr, tj_max - dts, resolution); if (get_msr(cpu, MSR_IA32_THERM_INTERRUPT, &msr)) return 0; @@ -4510,7 +4690,7 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p dts = (msr >> 16) & 0x7F; dts2 = (msr >> 8) & 0x7F; fprintf(outf, "cpu%d: MSR_IA32_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n", - cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2); + cpu, msr, tj_max - dts, tj_max - dts2); } return 0; @@ -4522,7 +4702,7 @@ void print_power_limit_msr(int cpu, unsigned long long msr, char *label) cpu, label, ((msr >> 15) & 1) ? "EN" : "DIS", ((msr >> 0) & 0x7FFF) * rapl_power_units, - (1.0 + (((msr >> 22) & 0x3)/4.0)) * (1 << ((msr >> 17) & 0x1F)) * rapl_time_units, + (1.0 + (((msr >> 22) & 0x3) / 4.0)) * (1 << ((msr >> 17) & 0x1F)) * rapl_time_units, (((msr >> 16) & 1) ? "EN" : "DIS")); return; @@ -4563,12 +4743,11 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) if (do_rapl & RAPL_PKG_POWER_INFO) { if (get_msr(cpu, MSR_PKG_POWER_INFO, &msr)) - return -5; - + return -5; fprintf(outf, "cpu%d: MSR_PKG_POWER_INFO: 0x%08llx (%.0f W TDP, RAPL %.0f - %.0f W, %f sec.)\n", cpu, msr, - ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units, + ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units, ((msr >> 16) & RAPL_POWER_GRANULARITY) * rapl_power_units, ((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units, ((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units); @@ -4587,17 +4766,17 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) cpu, ((msr >> 47) & 1) ? "EN" : "DIS", ((msr >> 32) & 0x7FFF) * rapl_power_units, - (1.0 + (((msr >> 54) & 0x3)/4.0)) * (1 << ((msr >> 49) & 0x1F)) * rapl_time_units, + (1.0 + (((msr >> 54) & 0x3) / 4.0)) * (1 << ((msr >> 49) & 0x1F)) * rapl_time_units, ((msr >> 48) & 1) ? "EN" : "DIS"); } if (do_rapl & RAPL_DRAM_POWER_INFO) { if (get_msr(cpu, MSR_DRAM_POWER_INFO, &msr)) - return -6; + return -6; fprintf(outf, "cpu%d: MSR_DRAM_POWER_INFO,: 0x%08llx (%.0f W TDP, RAPL %.0f - %.0f W, %f sec.)\n", cpu, msr, - ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units, + ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units, ((msr >> 16) & RAPL_POWER_GRANULARITY) * rapl_power_units, ((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units, ((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units); @@ -4606,7 +4785,7 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) if (get_msr(cpu, MSR_DRAM_POWER_LIMIT, &msr)) return -9; fprintf(outf, "cpu%d: MSR_DRAM_POWER_LIMIT: 0x%08llx (%slocked)\n", - cpu, msr, (msr >> 31) & 1 ? "" : "UN"); + cpu, msr, (msr >> 31) & 1 ? "" : "UN"); print_power_limit_msr(cpu, msr, "DRAM Limit"); } @@ -4620,7 +4799,7 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) if (get_msr(cpu, MSR_PP0_POWER_LIMIT, &msr)) return -9; fprintf(outf, "cpu%d: MSR_PP0_POWER_LIMIT: 0x%08llx (%slocked)\n", - cpu, msr, (msr >> 31) & 1 ? "" : "UN"); + cpu, msr, (msr >> 31) & 1 ? "" : "UN"); print_power_limit_msr(cpu, msr, "Cores Limit"); } if (do_rapl & RAPL_GFX) { @@ -4632,7 +4811,7 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) if (get_msr(cpu, MSR_PP1_POWER_LIMIT, &msr)) return -9; fprintf(outf, "cpu%d: MSR_PP1_POWER_LIMIT: 0x%08llx (%slocked)\n", - cpu, msr, (msr >> 31) & 1 ? "" : "UN"); + cpu, msr, (msr >> 31) & 1 ? "" : "UN"); print_power_limit_msr(cpu, msr, "GFX Limit"); } return 0; @@ -4654,23 +4833,24 @@ int has_snb_msrs(unsigned int family, unsigned int model) switch (model) { case INTEL_FAM6_SANDYBRIDGE: case INTEL_FAM6_SANDYBRIDGE_X: - case INTEL_FAM6_IVYBRIDGE: /* IVB */ - case INTEL_FAM6_IVYBRIDGE_X: /* IVB Xeon */ - case INTEL_FAM6_HASWELL: /* HSW */ - case INTEL_FAM6_HASWELL_X: /* HSW */ - case INTEL_FAM6_HASWELL_L: /* HSW */ - case INTEL_FAM6_HASWELL_G: /* HSW */ - case INTEL_FAM6_BROADWELL: /* BDW */ - case INTEL_FAM6_BROADWELL_G: /* BDW */ - case INTEL_FAM6_BROADWELL_X: /* BDX */ - case INTEL_FAM6_SKYLAKE_L: /* SKL */ - case INTEL_FAM6_CANNONLAKE_L: /* CNL */ - case INTEL_FAM6_SKYLAKE_X: /* SKX */ - case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */ + case INTEL_FAM6_IVYBRIDGE: /* IVB */ + case INTEL_FAM6_IVYBRIDGE_X: /* IVB Xeon */ + case INTEL_FAM6_HASWELL: /* HSW */ + case INTEL_FAM6_HASWELL_X: /* HSW */ + case INTEL_FAM6_HASWELL_L: /* HSW */ + case INTEL_FAM6_HASWELL_G: /* HSW */ + case INTEL_FAM6_BROADWELL: /* BDW */ + case INTEL_FAM6_BROADWELL_G: /* BDW */ + case INTEL_FAM6_BROADWELL_X: /* BDX */ + case INTEL_FAM6_SKYLAKE_L: /* SKL */ + case INTEL_FAM6_CANNONLAKE_L: /* CNL */ + case INTEL_FAM6_SKYLAKE_X: /* SKX */ + case INTEL_FAM6_ICELAKE_X: /* ICX */ + case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */ case INTEL_FAM6_ATOM_GOLDMONT_PLUS: case INTEL_FAM6_ATOM_GOLDMONT_D: /* DNV */ - case INTEL_FAM6_ATOM_TREMONT: /* EHL */ - case INTEL_FAM6_ATOM_TREMONT_D: /* JVL */ + case INTEL_FAM6_ATOM_TREMONT: /* EHL */ + case INTEL_FAM6_ATOM_TREMONT_D: /* JVL */ return 1; } return 0; @@ -4756,7 +4936,7 @@ int is_cnl(unsigned int family, unsigned int model) return 0; switch (model) { - case INTEL_FAM6_CANNONLAKE_L: /* CNL */ + case INTEL_FAM6_CANNONLAKE_L: /* CNL */ return 1; } @@ -4771,7 +4951,7 @@ unsigned int get_aperf_mperf_multiplier(unsigned int family, unsigned int model) } #define SLM_BCLK_FREQS 5 -double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0}; +double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0 }; double slm_bclk(void) { @@ -4805,6 +4985,28 @@ double discover_bclk(unsigned int family, unsigned int model) return 133.33; } +int get_cpu_type(struct thread_data *t, struct core_data *c, struct pkg_data *p) +{ + unsigned int eax, ebx, ecx, edx; + + if (!genuine_intel) + return 0; + + if (cpu_migrate(t->cpu_id)) { + fprintf(outf, "Could not migrate to CPU %d\n", t->cpu_id); + return -1; + } + + if (max_level < 0x1a) + return 0; + + __cpuid(0x1a, eax, ebx, ecx, edx); + eax = (eax >> 24) & 0xFF; + if (eax == 0x20) + t->is_atom = true; + return 0; +} + /* * MSR_IA32_TEMPERATURE_TARGET indicates the temperature where * the Thermal Control Circuit (TCC) activates. @@ -4817,53 +5019,69 @@ double discover_bclk(unsigned int family, unsigned int model) * below this value, including the Digital Thermal Sensor (DTS), * Package Thermal Management Sensor (PTM), and thermal event thresholds. */ -int read_tcc_activation_temp() +int set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p) { unsigned long long msr; - unsigned int tcc, target_c, offset_c; + unsigned int tcc_default, tcc_offset; + int cpu; - /* Temperature Target MSR is Nehalem and newer only */ - if (!do_nhm_platform_info) + /* tj_max is used only for dts or ptm */ + if (!(do_dts || do_ptm)) return 0; - if (get_msr(base_cpu, MSR_IA32_TEMPERATURE_TARGET, &msr)) + /* this is a per-package concept */ + if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) return 0; - target_c = (msr >> 16) & 0xFF; + cpu = t->cpu_id; + if (cpu_migrate(cpu)) { + fprintf(outf, "Could not migrate to CPU %d\n", cpu); + return -1; + } - offset_c = (msr >> 24) & 0xF; + if (tj_max_override != 0) { + tj_max = tj_max_override; + fprintf(outf, "cpu%d: Using cmdline TCC Target (%d C)\n", cpu, tj_max); + return 0; + } - tcc = target_c - offset_c; + /* Temperature Target MSR is Nehalem and newer only */ + if (!do_nhm_platform_info) + goto guess; - if (!quiet) - fprintf(outf, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C) (%d default - %d offset)\n", - base_cpu, msr, tcc, target_c, offset_c); + if (get_msr(base_cpu, MSR_IA32_TEMPERATURE_TARGET, &msr)) + goto guess; - return tcc; -} + tcc_default = (msr >> 16) & 0xFF; -int set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p) -{ - /* tcc_activation_temp is used only for dts or ptm */ - if (!(do_dts || do_ptm)) - return 0; + if (!quiet) { + switch (tcc_offset_bits) { + case 4: + tcc_offset = (msr >> 24) & 0xF; + fprintf(outf, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C) (%d default - %d offset)\n", + cpu, msr, tcc_default - tcc_offset, tcc_default, tcc_offset); + break; + case 6: + tcc_offset = (msr >> 24) & 0x3F; + fprintf(outf, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C) (%d default - %d offset)\n", + cpu, msr, tcc_default - tcc_offset, tcc_default, tcc_offset); + break; + default: + fprintf(outf, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C)\n", cpu, msr, tcc_default); + break; + } + } - /* this is a per-package concept */ - if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) - return 0; + if (!tcc_default) + goto guess; - if (tcc_activation_temp_override != 0) { - tcc_activation_temp = tcc_activation_temp_override; - fprintf(outf, "Using cmdline TCC Target (%d C)\n", tcc_activation_temp); - return 0; - } + tj_max = tcc_default; - tcc_activation_temp = read_tcc_activation_temp(); - if (tcc_activation_temp) - return 0; + return 0; - tcc_activation_temp = TJMAX_DEFAULT; - fprintf(outf, "Guessing tjMax %d C, Please use -T to specify\n", tcc_activation_temp); +guess: + tj_max = TJMAX_DEFAULT; + fprintf(outf, "cpu%d: Guessing tjMax %d C, Please use -T to specify\n", cpu, tj_max); return 0; } @@ -4874,9 +5092,7 @@ void decode_feature_control_msr(void) if (!get_msr(base_cpu, MSR_IA32_FEAT_CTL, &msr)) fprintf(outf, "cpu%d: MSR_IA32_FEATURE_CONTROL: 0x%08llx (%sLocked %s)\n", - base_cpu, msr, - msr & FEAT_CTL_LOCKED ? "" : "UN-", - msr & (1 << 18) ? "SGX" : ""); + base_cpu, msr, msr & FEAT_CTL_LOCKED ? "" : "UN-", msr & (1 << 18) ? "SGX" : ""); } void decode_misc_enable_msr(void) @@ -4904,13 +5120,12 @@ void decode_misc_feature_control(void) return; if (!get_msr(base_cpu, MSR_MISC_FEATURE_CONTROL, &msr)) - fprintf(outf, "cpu%d: MSR_MISC_FEATURE_CONTROL: 0x%08llx (%sL2-Prefetch %sL2-Prefetch-pair %sL1-Prefetch %sL1-IP-Prefetch)\n", - base_cpu, msr, - msr & (0 << 0) ? "No-" : "", - msr & (1 << 0) ? "No-" : "", - msr & (2 << 0) ? "No-" : "", - msr & (3 << 0) ? "No-" : ""); + fprintf(outf, + "cpu%d: MSR_MISC_FEATURE_CONTROL: 0x%08llx (%sL2-Prefetch %sL2-Prefetch-pair %sL1-Prefetch %sL1-IP-Prefetch)\n", + base_cpu, msr, msr & (0 << 0) ? "No-" : "", msr & (1 << 0) ? "No-" : "", + msr & (2 << 0) ? "No-" : "", msr & (3 << 0) ? "No-" : ""); } + /* * Decode MSR_MISC_PWR_MGMT * @@ -4931,10 +5146,9 @@ void decode_misc_pwr_mgmt_msr(void) if (!get_msr(base_cpu, MSR_MISC_PWR_MGMT, &msr)) fprintf(outf, "cpu%d: MSR_MISC_PWR_MGMT: 0x%08llx (%sable-EIST_Coordination %sable-EPB %sable-OOB)\n", base_cpu, msr, - msr & (1 << 0) ? "DIS" : "EN", - msr & (1 << 1) ? "EN" : "DIS", - msr & (1 << 8) ? "EN" : "DIS"); + msr & (1 << 0) ? "DIS" : "EN", msr & (1 << 1) ? "EN" : "DIS", msr & (1 << 8) ? "EN" : "DIS"); } + /* * Decode MSR_CC6_DEMOTION_POLICY_CONFIG, MSR_MC6_DEMOTION_POLICY_CONFIG * @@ -4960,10 +5174,10 @@ void decode_c6_demotion_policy_msr(void) unsigned int intel_model_duplicates(unsigned int model) { - switch(model) { + switch (model) { case INTEL_FAM6_NEHALEM_EP: /* Core i7, Xeon 5500 series - Bloomfield, Gainstown NHM-EP */ case INTEL_FAM6_NEHALEM: /* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper Forest */ - case 0x1F: /* Core i7 and i5 Processor - Nehalem */ + case 0x1F: /* Core i7 and i5 Processor - Nehalem */ case INTEL_FAM6_WESTMERE: /* Westmere Client - Clarkdale, Arrandale */ case INTEL_FAM6_WESTMERE_EP: /* Westmere EP - Gulftown */ return INTEL_FAM6_NEHALEM; @@ -4994,14 +5208,15 @@ unsigned int intel_model_duplicates(unsigned int model) case INTEL_FAM6_ROCKETLAKE: case INTEL_FAM6_LAKEFIELD: case INTEL_FAM6_ALDERLAKE: + case INTEL_FAM6_ALDERLAKE_L: return INTEL_FAM6_CANNONLAKE_L; case INTEL_FAM6_ATOM_TREMONT_L: return INTEL_FAM6_ATOM_TREMONT; - case INTEL_FAM6_ICELAKE_X: + case INTEL_FAM6_ICELAKE_D: case INTEL_FAM6_SAPPHIRERAPIDS_X: - return INTEL_FAM6_SKYLAKE_X; + return INTEL_FAM6_ICELAKE_X; } return model; } @@ -5025,17 +5240,36 @@ void print_dev_latency(void) close(fd); return; } - fprintf(outf, "/dev/cpu_dma_latency: %d usec (%s)\n", - value, value == 2000000000 ? "default" : "constrained"); + fprintf(outf, "/dev/cpu_dma_latency: %d usec (%s)\n", value, value == 2000000000 ? "default" : "constrained"); close(fd); } +/* + * Linux-perf manages the the HW instructions-retired counter + * by enabling when requested, and hiding rollover + */ +void linux_perf_init(void) +{ + if (!BIC_IS_ENABLED(BIC_IPC)) + return; + + if (access("/proc/sys/kernel/perf_event_paranoid", F_OK)) + return; + + fd_instr_count_percpu = calloc(topo.max_cpu_num + 1, sizeof(int)); + if (fd_instr_count_percpu == NULL) + err(-1, "calloc fd_instr_count_percpu"); + + BIC_PRESENT(BIC_IPC); +} + void process_cpuid() { unsigned int eax, ebx, ecx, edx; unsigned int fms, family, model, stepping, ecx_flags, edx_flags; unsigned int has_turbo; + unsigned long long ucode_patch = 0; eax = ebx = ecx = edx = 0; @@ -5049,8 +5283,8 @@ void process_cpuid() hygon_genuine = 1; if (!quiet) - fprintf(outf, "CPUID(0): %.4s%.4s%.4s ", - (char *)&ebx, (char *)&edx, (char *)&ecx); + fprintf(outf, "CPUID(0): %.4s%.4s%.4s 0x%x CPUID levels\n", + (char *)&ebx, (char *)&edx, (char *)&ecx, max_level); __cpuid(1, fms, ebx, ecx, edx); family = (fms >> 8) & 0xf; @@ -5063,6 +5297,9 @@ void process_cpuid() ecx_flags = ecx; edx_flags = edx; + if (get_msr(sched_getcpu(), MSR_IA32_UCODE_REV, &ucode_patch)) + warnx("get_msr(UCODE)\n"); + /* * check max extended function levels of CPUID. * This is needed to check for invariant TSC. @@ -5072,8 +5309,10 @@ void process_cpuid() __cpuid(0x80000000, max_extended_level, ebx, ecx, edx); if (!quiet) { - fprintf(outf, "0x%x CPUID levels; 0x%x xlevels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n", - max_level, max_extended_level, family, model, stepping, family, model, stepping); + fprintf(outf, "CPUID(1): family:model:stepping 0x%x:%x:%x (%d:%d:%d) microcode 0x%x\n", + family, model, stepping, family, model, stepping, + (unsigned int)((ucode_patch >> 32) & 0xFFFFFFFF)); + fprintf(outf, "CPUID(0x80000000): max_extended_levels: 0x%x\n", max_extended_level); fprintf(outf, "CPUID(1): %s %s %s %s %s %s %s %s %s %s\n", ecx_flags & (1 << 0) ? "SSE3" : "-", ecx_flags & (1 << 3) ? "MONITOR" : "-", @@ -5083,11 +5322,12 @@ void process_cpuid() edx_flags & (1 << 4) ? "TSC" : "-", edx_flags & (1 << 5) ? "MSR" : "-", edx_flags & (1 << 22) ? "ACPI-TM" : "-", - edx_flags & (1 << 28) ? "HT" : "-", - edx_flags & (1 << 29) ? "TM" : "-"); + edx_flags & (1 << 28) ? "HT" : "-", edx_flags & (1 << 29) ? "TM" : "-"); } - if (genuine_intel) + if (genuine_intel) { + model_orig = model; model = intel_model_duplicates(model); + } if (!(edx_flags & (1 << 5))) errx(1, "CPUID: no MSR"); @@ -5138,14 +5378,11 @@ void process_cpuid() has_hwp ? "" : "No-", has_hwp_notify ? "" : "No-", has_hwp_activity_window ? "" : "No-", - has_hwp_epp ? "" : "No-", - has_hwp_pkg ? "" : "No-", - has_epb ? "" : "No-"); + has_hwp_epp ? "" : "No-", has_hwp_pkg ? "" : "No-", has_epb ? "" : "No-"); if (!quiet) decode_misc_enable_msr(); - if (max_level >= 0x7 && !quiet) { int has_sgx; @@ -5177,7 +5414,7 @@ void process_cpuid() eax_crystal, ebx_tsc, crystal_hz); if (crystal_hz == 0) - switch(model) { + switch (model) { case INTEL_FAM6_SKYLAKE_L: /* SKL */ crystal_hz = 24000000; /* 24.0 MHz */ break; @@ -5190,13 +5427,13 @@ void process_cpuid() break; default: crystal_hz = 0; - } + } if (crystal_hz) { - tsc_hz = (unsigned long long) crystal_hz * ebx_tsc / eax_crystal; + tsc_hz = (unsigned long long)crystal_hz *ebx_tsc / eax_crystal; if (!quiet) fprintf(outf, "TSC: %lld MHz (%d Hz * %d / %d / 1000000)\n", - tsc_hz / 1000000, crystal_hz, ebx_tsc, eax_crystal); + tsc_hz / 1000000, crystal_hz, ebx_tsc, eax_crystal); } } } @@ -5265,7 +5502,7 @@ void process_cpuid() BIC_NOT_PRESENT(BIC_Pkgpc7); use_c1_residency_msr = 1; } - if (is_skx(family, model)) { + if (is_skx(family, model) || is_icx(family, model)) { BIC_NOT_PRESENT(BIC_CPU_c3); BIC_NOT_PRESENT(BIC_Pkgpc3); BIC_NOT_PRESENT(BIC_CPU_c7); @@ -5291,10 +5528,9 @@ void process_cpuid() BIC_PRESENT(BIC_CPUGFX); } do_slm_cstates = is_slm(family, model); - do_knl_cstates = is_knl(family, model); + do_knl_cstates = is_knl(family, model); - if (do_slm_cstates || do_knl_cstates || is_cnl(family, model) || - is_ehl(family, model)) + if (do_slm_cstates || do_knl_cstates || is_cnl(family, model) || is_ehl(family, model)) BIC_NOT_PRESENT(BIC_CPU_c3); if (!quiet) @@ -5307,6 +5543,8 @@ void process_cpuid() perf_limit_reasons_probe(family, model); automatic_cstate_conversion_probe(family, model); + check_tcc_offset(model_orig); + if (!quiet) dump_cstate_pstate_config_info(family, model); @@ -5317,7 +5555,7 @@ void process_cpuid() if (!quiet) dump_sysfs_pstate_config(); - if (has_skl_msrs(family, model)) + if (has_skl_msrs(family, model) || is_ehl(family, model)) calculate_tsc_tweak(); if (!access("/sys/class/drm/card0/power/rc6_residency_ms", R_OK)) @@ -5386,7 +5624,7 @@ void topology_probe() if (debug > 1) fprintf(outf, "num_cpus %d max_cpu_num %d\n", topo.num_cpus, topo.max_cpu_num); - cpus = calloc(1, (topo.max_cpu_num + 1) * sizeof(struct cpu_topology)); + cpus = calloc(1, (topo.max_cpu_num + 1) * sizeof(struct cpu_topology)); if (cpus == NULL) err(1, "calloc cpus"); @@ -5465,22 +5703,19 @@ void topology_probe() topo.cores_per_node = max_core_id + 1; if (debug > 1) - fprintf(outf, "max_core_id %d, sizing for %d cores per package\n", - max_core_id, topo.cores_per_node); + fprintf(outf, "max_core_id %d, sizing for %d cores per package\n", max_core_id, topo.cores_per_node); if (!summary_only && topo.cores_per_node > 1) BIC_PRESENT(BIC_Core); topo.num_die = max_die_id + 1; if (debug > 1) - fprintf(outf, "max_die_id %d, sizing for %d die\n", - max_die_id, topo.num_die); + fprintf(outf, "max_die_id %d, sizing for %d die\n", max_die_id, topo.num_die); if (!summary_only && topo.num_die > 1) BIC_PRESENT(BIC_Die); topo.num_packages = max_package_id + 1; if (debug > 1) - fprintf(outf, "max_package_id %d, sizing for %d packages\n", - max_package_id, topo.num_packages); + fprintf(outf, "max_package_id %d, sizing for %d packages\n", max_package_id, topo.num_packages); if (!summary_only && topo.num_packages > 1) BIC_PRESENT(BIC_Package); @@ -5503,21 +5738,15 @@ void topology_probe() fprintf(outf, "cpu %d pkg %d die %d node %d lnode %d core %d thread %d\n", i, cpus[i].physical_package_id, cpus[i].die_id, - cpus[i].physical_node_id, - cpus[i].logical_node_id, - cpus[i].physical_core_id, - cpus[i].thread_id); + cpus[i].physical_node_id, cpus[i].logical_node_id, cpus[i].physical_core_id, cpus[i].thread_id); } } -void -allocate_counters(struct thread_data **t, struct core_data **c, - struct pkg_data **p) +void allocate_counters(struct thread_data **t, struct core_data **c, struct pkg_data **p) { int i; - int num_cores = topo.cores_per_node * topo.nodes_per_pkg * - topo.num_packages; + int num_cores = topo.cores_per_node * topo.nodes_per_pkg * topo.num_packages; int num_threads = topo.threads_per_core * num_cores; *t = calloc(num_threads, sizeof(struct thread_data)); @@ -5545,13 +5774,13 @@ allocate_counters(struct thread_data **t, struct core_data **c, error: err(1, "calloc counters"); } + /* * init_counter() * * set FIRST_THREAD_IN_CORE and FIRST_CORE_IN_PACKAGE */ -void init_counter(struct thread_data *thread_base, struct core_data *core_base, - struct pkg_data *pkg_base, int cpu_id) +void init_counter(struct thread_data *thread_base, struct core_data *core_base, struct pkg_data *pkg_base, int cpu_id) { int pkg_id = cpus[cpu_id].physical_package_id; int node_id = cpus[cpu_id].logical_node_id; @@ -5561,7 +5790,6 @@ void init_counter(struct thread_data *thread_base, struct core_data *core_base, struct core_data *c; struct pkg_data *p; - /* Workaround for systems where physical_node_id==-1 * and logical_node_id==(-1 - topo.num_cpus) */ @@ -5583,7 +5811,6 @@ void init_counter(struct thread_data *thread_base, struct core_data *core_base, p->package_id = pkg_id; } - int initialize_counters(int cpu_id) { init_counter(EVEN_COUNTERS, cpu_id); @@ -5598,12 +5825,14 @@ void allocate_output_buffer() if (outp == NULL) err(-1, "calloc output buffer"); } + void allocate_fd_percpu(void) { fd_percpu = calloc(topo.max_cpu_num + 1, sizeof(int)); if (fd_percpu == NULL) err(-1, "calloc fd_percpu"); } + void allocate_irq_buffers(void) { irq_column_2_cpu = calloc(topo.num_cpus, sizeof(int)); @@ -5614,6 +5843,7 @@ void allocate_irq_buffers(void) if (irqs_per_cpu == NULL) err(-1, "calloc %d", topo.max_cpu_num + 1); } + void setup_all_buffers(void) { topology_probe(); @@ -5642,7 +5872,7 @@ void turbostat_init() check_dev_msr(); check_permissions(); process_cpuid(); - + linux_perf_init(); if (!quiet) for_all_cpus(print_hwp, ODD_COUNTERS); @@ -5658,6 +5888,9 @@ void turbostat_init() for_all_cpus(set_temperature_target, ODD_COUNTERS); + for_all_cpus(get_cpu_type, ODD_COUNTERS); + for_all_cpus(get_cpu_type, EVEN_COUNTERS); + if (!quiet) for_all_cpus(print_thermal, ODD_COUNTERS); @@ -5713,7 +5946,7 @@ int fork_it(char **argv) format_all_counters(EVEN_COUNTERS); } - fprintf(outf, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0); + fprintf(outf, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec / 1000000.0); flush_output_stderr(); @@ -5738,14 +5971,14 @@ int get_and_dump_counters(void) return status; } -void print_version() { - fprintf(outf, "turbostat version 20.09.30" - " - Len Brown <[email protected]>\n"); +void print_version() +{ + fprintf(outf, "turbostat version 21.05.04" " - Len Brown <[email protected]>\n"); } int add_counter(unsigned int msr_num, char *path, char *name, - unsigned int width, enum counter_scope scope, - enum counter_type type, enum counter_format format, int flags) + unsigned int width, enum counter_scope scope, + enum counter_type type, enum counter_format format, int flags) { struct msr_counter *msrp; @@ -5771,8 +6004,7 @@ int add_counter(unsigned int msr_num, char *path, char *name, sys.tp = msrp; sys.added_thread_counters++; if (sys.added_thread_counters > MAX_ADDED_THREAD_COUNTERS) { - fprintf(stderr, "exceeded max %d added thread counters\n", - MAX_ADDED_COUNTERS); + fprintf(stderr, "exceeded max %d added thread counters\n", MAX_ADDED_COUNTERS); exit(-1); } break; @@ -5782,8 +6014,7 @@ int add_counter(unsigned int msr_num, char *path, char *name, sys.cp = msrp; sys.added_core_counters++; if (sys.added_core_counters > MAX_ADDED_COUNTERS) { - fprintf(stderr, "exceeded max %d added core counters\n", - MAX_ADDED_COUNTERS); + fprintf(stderr, "exceeded max %d added core counters\n", MAX_ADDED_COUNTERS); exit(-1); } break; @@ -5793,8 +6024,7 @@ int add_counter(unsigned int msr_num, char *path, char *name, sys.pp = msrp; sys.added_package_counters++; if (sys.added_package_counters > MAX_ADDED_COUNTERS) { - fprintf(stderr, "exceeded max %d added package counters\n", - MAX_ADDED_COUNTERS); + fprintf(stderr, "exceeded max %d added package counters\n", MAX_ADDED_COUNTERS); exit(-1); } break; @@ -5931,15 +6161,14 @@ void probe_sysfs(void) for (state = 10; state >= 0; --state) { - sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/name", - base_cpu, state); + sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/name", base_cpu, state); input = fopen(path, "r"); if (input == NULL) continue; if (!fgets(name_buf, sizeof(name_buf), input)) err(1, "%s: failed to read file", path); - /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */ + /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */ sp = strchr(name_buf, '-'); if (!sp) sp = strchrnul(name_buf, '\n'); @@ -5955,20 +6184,18 @@ void probe_sysfs(void) if (is_deferred_skip(name_buf)) continue; - add_counter(0, path, name_buf, 64, SCOPE_CPU, COUNTER_USEC, - FORMAT_PERCENT, SYSFS_PERCPU); + add_counter(0, path, name_buf, 64, SCOPE_CPU, COUNTER_USEC, FORMAT_PERCENT, SYSFS_PERCPU); } for (state = 10; state >= 0; --state) { - sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/name", - base_cpu, state); + sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/name", base_cpu, state); input = fopen(path, "r"); if (input == NULL) continue; if (!fgets(name_buf, sizeof(name_buf), input)) err(1, "%s: failed to read file", path); - /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */ + /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */ sp = strchr(name_buf, '-'); if (!sp) sp = strchrnul(name_buf, '\n'); @@ -5982,13 +6209,11 @@ void probe_sysfs(void) if (is_deferred_skip(name_buf)) continue; - add_counter(0, path, name_buf, 64, SCOPE_CPU, COUNTER_ITEMS, - FORMAT_DELTA, SYSFS_PERCPU); + add_counter(0, path, name_buf, 64, SCOPE_CPU, COUNTER_ITEMS, FORMAT_DELTA, SYSFS_PERCPU); } } - /* * parse cpuset with following syntax * 1,2,4..6,8-10 and set bits in cpu_subset @@ -6075,36 +6300,35 @@ error: exit(-1); } - void cmdline(int argc, char **argv) { int opt; int option_index = 0; static struct option long_options[] = { - {"add", required_argument, 0, 'a'}, - {"cpu", required_argument, 0, 'c'}, - {"Dump", no_argument, 0, 'D'}, - {"debug", no_argument, 0, 'd'}, /* internal, not documented */ - {"enable", required_argument, 0, 'e'}, - {"interval", required_argument, 0, 'i'}, - {"num_iterations", required_argument, 0, 'n'}, - {"help", no_argument, 0, 'h'}, - {"hide", required_argument, 0, 'H'}, // meh, -h taken by --help - {"Joules", no_argument, 0, 'J'}, - {"list", no_argument, 0, 'l'}, - {"out", required_argument, 0, 'o'}, - {"quiet", no_argument, 0, 'q'}, - {"show", required_argument, 0, 's'}, - {"Summary", no_argument, 0, 'S'}, - {"TCC", required_argument, 0, 'T'}, - {"version", no_argument, 0, 'v' }, - {0, 0, 0, 0 } + { "add", required_argument, 0, 'a' }, + { "cpu", required_argument, 0, 'c' }, + { "Dump", no_argument, 0, 'D' }, + { "debug", no_argument, 0, 'd' }, /* internal, not documented */ + { "enable", required_argument, 0, 'e' }, + { "interval", required_argument, 0, 'i' }, + { "IPC", no_argument, 0, 'I' }, + { "num_iterations", required_argument, 0, 'n' }, + { "help", no_argument, 0, 'h' }, + { "hide", required_argument, 0, 'H' }, // meh, -h taken by --help + { "Joules", no_argument, 0, 'J' }, + { "list", no_argument, 0, 'l' }, + { "out", required_argument, 0, 'o' }, + { "quiet", no_argument, 0, 'q' }, + { "show", required_argument, 0, 's' }, + { "Summary", no_argument, 0, 'S' }, + { "TCC", required_argument, 0, 'T' }, + { "version", no_argument, 0, 'v' }, + { 0, 0, 0, 0 } }; progname = argv[0]; - while ((opt = getopt_long_only(argc, argv, "+C:c:Dde:hi:Jn:o:qST:v", - long_options, &option_index)) != -1) { + while ((opt = getopt_long_only(argc, argv, "+C:c:Dde:hi:Jn:o:qST:v", long_options, &option_index)) != -1) { switch (opt) { case 'a': parse_add_command(optarg); @@ -6139,8 +6363,7 @@ void cmdline(int argc, char **argv) double interval = strtod(optarg, NULL); if (interval < 0.001) { - fprintf(outf, "interval %f seconds is too small\n", - interval); + fprintf(outf, "interval %f seconds is too small\n", interval); exit(2); } @@ -6167,8 +6390,7 @@ void cmdline(int argc, char **argv) num_iterations = strtod(optarg, NULL); if (num_iterations <= 0) { - fprintf(outf, "iterations %d should be positive number\n", - num_iterations); + fprintf(outf, "iterations %d should be positive number\n", num_iterations); exit(2); } break; @@ -6188,7 +6410,7 @@ void cmdline(int argc, char **argv) summary_only++; break; case 'T': - tcc_activation_temp_override = atoi(optarg); + tj_max_override = atoi(optarg); break; case 'v': print_version(); diff --git a/tools/testing/ktest/examples/vmware.conf b/tools/testing/ktest/examples/vmware.conf new file mode 100644 index 000000000000..61958163d242 --- /dev/null +++ b/tools/testing/ktest/examples/vmware.conf @@ -0,0 +1,137 @@ +# +# This config is an example usage of ktest.pl with a vmware guest +# +# VMware Setup: +# ------------- +# - Edit the Virtual Machine ("Edit virtual machine settings") +# - Add a Serial Port +# - You almost certainly want it set "Connect at power on" +# - Select "Use socket (named pipe)" +# - Select a name that you'll recognize, like 'ktestserialpipe' +# - From: Server +# - To: A Virtual Machine +# - Save +# - Make sure you note the name, it will be in the base directory of the +# virtual machine (where the "disks" are stored. The default +# is /var/lib/vmware/<virtual machine name>/<the name you entered above> +# +# - Make note of the path to the VM +# </End VMware setup> +# +# The guest is called 'Guest' and this would be something that +# could be run on the host to test a virtual machine target. + +MACHINE = Guest + +# Name of the serial pipe you set in the VMware settings +VMWARE_SERIAL_NAME = <the name you entered above> + +# Define a variable of the name of the VM +# Noting this needs to be the name of the kmx file, and usually, the +# name of the directory that it's in. If the directory and name +# differ change the VMWARE_VM_DIR accordingly. +# Please ommit the .kmx extension +VMWARE_VM_NAME = <virtual machine name> + +# VM dir name. This is usually the same as the virtual machine's name, +# but not always the case. Change if they differ +VMWARE_VM_DIR = ${VMWARE_VM_NAME} + +# Base directory that the Virtual machine is contained in +# /var/lib/vmware is the default on Linux +VMWARE_VM_BASE_DIR = /var/lib/vmware/${VMWARE_VM_DIR} + +# Use ncat to read the unix pipe. Anything that can read the Unix Pipe +# and output it's contents to stdout will work +CONSOLE = /usr/bin/ncat -U ${VMWARE_VM_BASE_DIR}/${VMWARE_SERIAL_NAME} + +# Define what version of Workstation you are using +# This is used by vmrun to use the appropriate appripriate pieces to +# test this. In all likelihood you want 'ws' or 'player' +# Valid options: +# ws - Workstation (Windows or Linux host) +# fusion - Fusion (Mac host) +# player - Using VMware Player (Windows or Linux host) +# Note: vmrun has to run directly on the host machine +VMWARE_HOST_TYPE = ws + +# VMware provides `vmrun` to allow you to do certain things to the virtual machine +# This should hard reset the VM and force a boot +VMWARE_POWER_CYCLE = /usr/bin/vmrun -T ${VMWARE_HOST_TYPE} reset ${VMWARE_VM_BASE_DIR}/${VMWARE_VM_NAME}.kmx nogui + +#*************************************# +# This part is the same as test.conf # +#*************************************# + +# The include files will set up the type of test to run. Just set TEST to +# which test you want to run. +# +# TESTS = patchcheck, randconfig, boot, test, config-bisect, bisect, min-config +# +# See the include/*.conf files that define these tests +# +TEST := patchcheck + +# Some tests may have more than one test to run. Define MULTI := 1 to run +# the extra tests. +MULTI := 0 + +# In case you want to differentiate which type of system you are testing +BITS := 64 + +# REBOOT = none, error, fail, empty +# See include/defaults.conf +REBOOT := empty + + +# The defaults file will set up various settings that can be used by all +# machine configs. +INCLUDE include/defaults.conf + + +#*************************************# +# Now we are different from test.conf # +#*************************************# + + +# The example here assumes that Guest is running a Fedora release +# that uses dracut for its initfs. The POST_INSTALL will be executed +# after the install of the kernel and modules are complete. +# +POST_INSTALL = ${SSH} /sbin/dracut -f /boot/initramfs-test.img $KERNEL_VERSION + +# Guests sometimes get stuck on reboot. We wait 3 seconds after running +# the reboot command and then do a full power-cycle of the guest. +# This forces the guest to restart. +# +POWERCYCLE_AFTER_REBOOT = 3 + +# We do the same after the halt command, but this time we wait 20 seconds. +POWEROFF_AFTER_HALT = 20 + + +# As the defaults.conf file has a POWER_CYCLE option already defined, +# and options can not be defined in the same section more than once +# (all DEFAULTS sections are considered the same). We use the +# DEFAULTS OVERRIDE to tell ktest.pl to ignore the previous defined +# options, for the options set in the OVERRIDE section. +# +DEFAULTS OVERRIDE + +# Instead of using the default POWER_CYCLE option defined in +# defaults.conf, we use virsh to cycle it. To do so, we destroy +# the guest, wait 5 seconds, and then start it up again. +# Crude, but effective. +# +POWER_CYCLE = ${VMWARE_POWER_CYCLE} + + +DEFAULTS + +# The following files each handle a different test case. +# Having them included allows you to set up more than one machine and share +# the same tests. +INCLUDE include/patchcheck.conf +INCLUDE include/tests.conf +INCLUDE include/bisect.conf +INCLUDE include/min-config.conf diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index 4e2450964517..09d1578f9d66 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl @@ -24,7 +24,7 @@ my %evals; #default opts my %default = ( - "MAILER" => "sendmail", # default mailer + "MAILER" => "sendmail", # default mailer "EMAIL_ON_ERROR" => 1, "EMAIL_WHEN_FINISHED" => 1, "EMAIL_WHEN_CANCELED" => 0, @@ -36,15 +36,15 @@ my %default = ( "CLOSE_CONSOLE_SIGNAL" => "INT", "TIMEOUT" => 120, "TMP_DIR" => "/tmp/ktest/\${MACHINE}", - "SLEEP_TIME" => 60, # sleep time between tests + "SLEEP_TIME" => 60, # sleep time between tests "BUILD_NOCLEAN" => 0, "REBOOT_ON_ERROR" => 0, "POWEROFF_ON_ERROR" => 0, "REBOOT_ON_SUCCESS" => 1, "POWEROFF_ON_SUCCESS" => 0, "BUILD_OPTIONS" => "", - "BISECT_SLEEP_TIME" => 60, # sleep time between bisects - "PATCHCHECK_SLEEP_TIME" => 60, # sleep time between patch checks + "BISECT_SLEEP_TIME" => 60, # sleep time between bisects + "PATCHCHECK_SLEEP_TIME" => 60, # sleep time between patch checks "CLEAR_LOG" => 0, "BISECT_MANUAL" => 0, "BISECT_SKIP" => 1, @@ -512,6 +512,69 @@ $config_help{"REBOOT_SCRIPT"} = << "EOF" EOF ; +# used with process_expression() +my $d = 0; + +# defined before get_test_name() +my $in_die = 0; + +# defined before process_warning_line() +my $check_build_re = ".*:.*(warning|error|Error):.*"; +my $utf8_quote = "\\x{e2}\\x{80}(\\x{98}|\\x{99})"; + +# defined before child_finished() +my $child_done; + +# config_ignore holds the configs that were set (or unset) for +# a good config and we will ignore these configs for the rest +# of a config bisect. These configs stay as they were. +my %config_ignore; + +# config_set holds what all configs were set as. +my %config_set; + +# config_off holds the set of configs that the bad config had disabled. +# We need to record them and set them in the .config when running +# olddefconfig, because olddefconfig keeps the defaults. +my %config_off; + +# config_off_tmp holds a set of configs to turn off for now +my @config_off_tmp; + +# config_list is the set of configs that are being tested +my %config_list; +my %null_config; + +my %dependency; + +# found above run_config_bisect() +my $pass = 1; + +# found above add_dep() + +my %depends; +my %depcount; +my $iflevel = 0; +my @ifdeps; + +# prevent recursion +my %read_kconfigs; + +# found above test_this_config() +my %min_configs; +my %keep_configs; +my %save_configs; +my %processed_configs; +my %nochange_config; + +# +# These are first defined here, main function later on +# +sub run_command; +sub start_monitor; +sub end_monitor; +sub wait_for_monitor; + sub _logit { if (defined($opt{"LOG_FILE"})) { print LOG @_; @@ -537,7 +600,7 @@ sub read_prompt { my $ans; for (;;) { - if ($cancel) { + if ($cancel) { print "$prompt [y/n/C] "; } else { print "$prompt [Y/n] "; @@ -760,7 +823,7 @@ sub process_variables { # remove the space added in the beginning $retval =~ s/ //; - return "$retval" + return "$retval"; } sub set_value { @@ -863,7 +926,6 @@ sub value_defined { defined($opt{$2}); } -my $d = 0; sub process_expression { my ($name, $val) = @_; @@ -978,7 +1040,6 @@ sub __read_config { $override = 0; if ($type eq "TEST_START") { - if ($num_tests_set) { die "$name: $.: Can not specify both NUM_TESTS and TEST_START\n"; } @@ -1048,7 +1109,6 @@ sub __read_config { $test_num = $old_test_num; $repeat = $old_repeat; } - } elsif (/^\s*ELSE\b(.*)$/) { if (!$if) { die "$name: $.: ELSE found with out matching IF section\n$_"; @@ -1095,7 +1155,7 @@ sub __read_config { } } } - + if ( ! -r $file ) { die "$name: $.: Can't read file $file\n$_"; } @@ -1186,13 +1246,13 @@ sub __read_config { } sub get_test_case { - print "What test case would you like to run?\n"; - print " (build, install or boot)\n"; - print " Other tests are available but require editing ktest.conf\n"; - print " (see tools/testing/ktest/sample.conf)\n"; - my $ans = <STDIN>; - chomp $ans; - $default{"TEST_TYPE"} = $ans; + print "What test case would you like to run?\n"; + print " (build, install or boot)\n"; + print " Other tests are available but require editing ktest.conf\n"; + print " (see tools/testing/ktest/sample.conf)\n"; + my $ans = <STDIN>; + chomp $ans; + $default{"TEST_TYPE"} = $ans; } sub read_config { @@ -1368,11 +1428,6 @@ sub eval_option { return $option; } -sub run_command; -sub start_monitor; -sub end_monitor; -sub wait_for_monitor; - sub reboot { my ($time) = @_; my $powercycle = 0; @@ -1457,8 +1512,6 @@ sub do_not_reboot { ($test_type eq "config_bisect" && $opt{"CONFIG_BISECT_TYPE[$i]"} eq "build"); } -my $in_die = 0; - sub get_test_name() { my $name; @@ -1471,7 +1524,6 @@ sub get_test_name() { } sub dodie { - # avoid recursion return if ($in_die); $in_die = 1; @@ -1481,10 +1533,8 @@ sub dodie { doprint "CRITICAL FAILURE... [TEST $i] ", @_, "\n"; if ($reboot_on_error && !do_not_reboot) { - doprint "REBOOTING\n"; reboot_to_good; - } elsif ($poweroff_on_error && defined($power_off)) { doprint "POWERING OFF\n"; `$power_off`; @@ -1519,13 +1569,14 @@ sub dodie { close O; close L; } - send_email("KTEST: critical failure for test $i [$name]", - "Your test started at $script_start_time has failed with:\n@_\n", $log_file); + + send_email("KTEST: critical failure for test $i [$name]", + "Your test started at $script_start_time has failed with:\n@_\n", $log_file); } if ($monitor_cnt) { - # restore terminal settings - system("stty $stty_orig"); + # restore terminal settings + system("stty $stty_orig"); } if (defined($post_test)) { @@ -1709,81 +1760,81 @@ sub wait_for_monitor { } sub save_logs { - my ($result, $basedir) = @_; - my @t = localtime; - my $date = sprintf "%04d%02d%02d%02d%02d%02d", - 1900+$t[5],$t[4],$t[3],$t[2],$t[1],$t[0]; + my ($result, $basedir) = @_; + my @t = localtime; + my $date = sprintf "%04d%02d%02d%02d%02d%02d", + 1900+$t[5],$t[4],$t[3],$t[2],$t[1],$t[0]; - my $type = $build_type; - if ($type =~ /useconfig/) { - $type = "useconfig"; - } + my $type = $build_type; + if ($type =~ /useconfig/) { + $type = "useconfig"; + } - my $dir = "$machine-$test_type-$type-$result-$date"; + my $dir = "$machine-$test_type-$type-$result-$date"; - $dir = "$basedir/$dir"; + $dir = "$basedir/$dir"; - if (!-d $dir) { - mkpath($dir) or - dodie "can't create $dir"; - } + if (!-d $dir) { + mkpath($dir) or + dodie "can't create $dir"; + } - my %files = ( - "config" => $output_config, - "buildlog" => $buildlog, - "dmesg" => $dmesg, - "testlog" => $testlog, - ); + my %files = ( + "config" => $output_config, + "buildlog" => $buildlog, + "dmesg" => $dmesg, + "testlog" => $testlog, + ); - while (my ($name, $source) = each(%files)) { - if (-f "$source") { - cp "$source", "$dir/$name" or - dodie "failed to copy $source"; - } + while (my ($name, $source) = each(%files)) { + if (-f "$source") { + cp "$source", "$dir/$name" or + dodie "failed to copy $source"; } + } - doprint "*** Saved info to $dir ***\n"; + doprint "*** Saved info to $dir ***\n"; } sub fail { - if ($die_on_failure) { - dodie @_; - } + if ($die_on_failure) { + dodie @_; + } - doprint "FAILED\n"; + doprint "FAILED\n"; - my $i = $iteration; + my $i = $iteration; - # no need to reboot for just building. - if (!do_not_reboot) { - doprint "REBOOTING\n"; - reboot_to_good $sleep_time; - } + # no need to reboot for just building. + if (!do_not_reboot) { + doprint "REBOOTING\n"; + reboot_to_good $sleep_time; + } - my $name = ""; + my $name = ""; - if (defined($test_name)) { - $name = " ($test_name)"; - } + if (defined($test_name)) { + $name = " ($test_name)"; + } - print_times; + print_times; - doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n"; - doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n"; - doprint "KTEST RESULT: TEST $i$name Failed: ", @_, "\n"; - doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n"; - doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n"; + doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n"; + doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n"; + doprint "KTEST RESULT: TEST $i$name Failed: ", @_, "\n"; + doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n"; + doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n"; - if (defined($store_failures)) { - save_logs "fail", $store_failures; - } + if (defined($store_failures)) { + save_logs "fail", $store_failures; + } - if (defined($post_test)) { - run_command $post_test; - } + if (defined($post_test)) { + run_command $post_test; + } - return 1; + return 1; } sub run_command { @@ -1915,8 +1966,8 @@ sub _get_grub_index { my ($command, $target, $skip) = @_; return if (defined($grub_number) && defined($last_grub_menu) && - $last_grub_menu eq $grub_menu && defined($last_machine) && - $last_machine eq $machine); + $last_grub_menu eq $grub_menu && defined($last_machine) && + $last_machine eq $machine); doprint "Find $reboot_type menu ... "; $grub_number = -1; @@ -1924,8 +1975,8 @@ sub _get_grub_index { my $ssh_grub = $ssh_exec; $ssh_grub =~ s,\$SSH_COMMAND,$command,g; - open(IN, "$ssh_grub |") - or dodie "unable to execute $command"; + open(IN, "$ssh_grub |") or + dodie "unable to execute $command"; my $found = 0; @@ -1969,9 +2020,9 @@ sub get_grub_index { $target = '^menuentry.*' . $grub_menu_qt; $skip = '^menuentry\s|^submenu\s'; } elsif ($reboot_type eq "grub2bls") { - $command = $grub_bls_get; - $target = '^title=.*' . $grub_menu_qt; - $skip = '^title='; + $command = $grub_bls_get; + $target = '^title=.*' . $grub_menu_qt; + $skip = '^title='; } else { return; } @@ -1979,8 +2030,7 @@ sub get_grub_index { _get_grub_index($command, $target, $skip); } -sub wait_for_input -{ +sub wait_for_input { my ($fp, $time) = @_; my $start_time; my $rin; @@ -2096,7 +2146,6 @@ sub monitor { my $version_found = 0; while (!$done) { - if ($bug && defined($stop_after_failure) && $stop_after_failure >= 0) { my $time = $stop_after_failure - (time - $failure_start); @@ -2349,9 +2398,6 @@ sub start_monitor_and_install { return monitor; } -my $check_build_re = ".*:.*(warning|error|Error):.*"; -my $utf8_quote = "\\x{e2}\\x{80}(\\x{98}|\\x{99})"; - sub process_warning_line { my ($line) = @_; @@ -2394,7 +2440,7 @@ sub check_buildlog { while (<IN>) { if (/$check_build_re/) { my $warning = process_warning_line $_; - + $warnings_list{$warning} = 1; } } @@ -2571,7 +2617,6 @@ sub build { run_command "mv $outputdir/config_temp $output_config" or dodie "moving config_temp"; } - } elsif (!$noclean) { unlink "$output_config"; run_command "$make mrproper" or @@ -2594,6 +2639,9 @@ sub build { # Run old config regardless, to enforce min configurations make_oldconfig; + if (not defined($build_options)){ + $build_options = ""; + } my $build_ret = run_command "$make $build_options", $buildlog; if (defined($post_build)) { @@ -2649,14 +2697,15 @@ sub success { print_times; - doprint "\n\n*******************************************\n"; - doprint "*******************************************\n"; - doprint "KTEST RESULT: TEST $i$name SUCCESS!!!! **\n"; - doprint "*******************************************\n"; - doprint "*******************************************\n"; + doprint "\n\n"; + doprint "*******************************************\n"; + doprint "*******************************************\n"; + doprint "KTEST RESULT: TEST $i$name SUCCESS!!!! **\n"; + doprint "*******************************************\n"; + doprint "*******************************************\n"; if (defined($store_successes)) { - save_logs "success", $store_successes; + save_logs "success", $store_successes; } if ($i != $opt{"NUM_TESTS"} && !do_not_reboot) { @@ -2698,8 +2747,6 @@ sub child_run_test { exit $run_command_status; } -my $child_done; - sub child_finished { $child_done = 1; } @@ -3031,7 +3078,6 @@ sub bisect { } if ($do_check) { - # get current HEAD my $head = get_sha1("HEAD"); @@ -3071,13 +3117,11 @@ sub bisect { run_command "git bisect replay $replay" or dodie "failed to run replay"; } else { - run_command "git bisect good $good" or dodie "could not set bisect good to $good"; run_git_bisect "git bisect bad $bad" or dodie "could not set bisect bad to $bad"; - } if (defined($start)) { @@ -3103,35 +3147,13 @@ sub bisect { success $i; } -# config_ignore holds the configs that were set (or unset) for -# a good config and we will ignore these configs for the rest -# of a config bisect. These configs stay as they were. -my %config_ignore; - -# config_set holds what all configs were set as. -my %config_set; - -# config_off holds the set of configs that the bad config had disabled. -# We need to record them and set them in the .config when running -# olddefconfig, because olddefconfig keeps the defaults. -my %config_off; - -# config_off_tmp holds a set of configs to turn off for now -my @config_off_tmp; - -# config_list is the set of configs that are being tested -my %config_list; -my %null_config; - -my %dependency; - sub assign_configs { my ($hash, $config) = @_; doprint "Reading configs from $config\n"; - open (IN, $config) - or dodie "Failed to read $config"; + open (IN, $config) or + dodie "Failed to read $config"; while (<IN>) { chomp; @@ -3219,8 +3241,6 @@ sub config_bisect_end { doprint "***************************************\n\n"; } -my $pass = 1; - sub run_config_bisect { my ($good, $bad, $last_result) = @_; my $reset = ""; @@ -3243,13 +3263,13 @@ sub run_config_bisect { $ret = run_config_bisect_test $config_bisect_type; if ($ret) { - doprint "NEW GOOD CONFIG ($pass)\n"; + doprint "NEW GOOD CONFIG ($pass)\n"; system("cp $output_config $tmpdir/good_config.tmp.$pass"); $pass++; # Return 3 for good config return 3; } else { - doprint "NEW BAD CONFIG ($pass)\n"; + doprint "NEW BAD CONFIG ($pass)\n"; system("cp $output_config $tmpdir/bad_config.tmp.$pass"); $pass++; # Return 4 for bad config @@ -3284,10 +3304,11 @@ sub config_bisect { if (!defined($config_bisect_exec)) { # First check the location that ktest.pl ran - my @locations = ( "$pwd/config-bisect.pl", - "$dirname/config-bisect.pl", - "$builddir/tools/testing/ktest/config-bisect.pl", - undef ); + my @locations = ( + "$pwd/config-bisect.pl", + "$dirname/config-bisect.pl", + "$builddir/tools/testing/ktest/config-bisect.pl", + undef ); foreach my $loc (@locations) { doprint "loc = $loc\n"; $config_bisect_exec = $loc; @@ -3368,7 +3389,7 @@ sub config_bisect { } while ($ret == 3 || $ret == 4); if ($ret == 2) { - config_bisect_end "$good_config.tmp", "$bad_config.tmp"; + config_bisect_end "$good_config.tmp", "$bad_config.tmp"; } return $ret if ($ret < 0); @@ -3511,14 +3532,6 @@ sub patchcheck { return 1; } -my %depends; -my %depcount; -my $iflevel = 0; -my @ifdeps; - -# prevent recursion -my %read_kconfigs; - sub add_dep { # $config depends on $dep my ($config, $dep) = @_; @@ -3548,7 +3561,6 @@ sub read_kconfig { my $cont = 0; my $line; - if (! -f $kconfig) { doprint "file $kconfig does not exist, skipping\n"; return; @@ -3630,8 +3642,8 @@ sub read_kconfig { sub read_depends { # find out which arch this is by the kconfig file - open (IN, $output_config) - or dodie "Failed to read $output_config"; + open (IN, $output_config) or + dodie "Failed to read $output_config"; my $arch; while (<IN>) { if (m,Linux/(\S+)\s+\S+\s+Kernel Configuration,) { @@ -3657,7 +3669,7 @@ sub read_depends { if (! -f $kconfig && $arch =~ /\d$/) { my $orig = $arch; - # some subarchs have numbers, truncate them + # some subarchs have numbers, truncate them $arch =~ s/\d*$//; $kconfig = "$builddir/arch/$arch/Kconfig"; if (! -f $kconfig) { @@ -3706,7 +3718,6 @@ sub get_depends { my @configs; while ($dep =~ /[$valid]/) { - if ($dep =~ /^[^$valid]*([$valid]+)/) { my $conf = "CONFIG_" . $1; @@ -3721,12 +3732,6 @@ sub get_depends { return @configs; } -my %min_configs; -my %keep_configs; -my %save_configs; -my %processed_configs; -my %nochange_config; - sub test_this_config { my ($config) = @_; @@ -3852,7 +3857,7 @@ sub make_min_config { foreach my $config (@config_keys) { my $kconfig = chomp_config $config; if (!defined $depcount{$kconfig}) { - $depcount{$kconfig} = 0; + $depcount{$kconfig} = 0; } } @@ -3887,7 +3892,6 @@ sub make_min_config { my $take_two = 0; while (!$done) { - my $config; my $found; @@ -3898,7 +3902,7 @@ sub make_min_config { # Sort keys by who is most dependent on @test_configs = sort { $depcount{chomp_config($b)} <=> $depcount{chomp_config($a)} } - @test_configs ; + @test_configs ; # Put configs that did not modify the config at the end. my $reset = 1; @@ -3954,13 +3958,13 @@ sub make_min_config { my $failed = 0; build "oldconfig" or $failed = 1; if (!$failed) { - start_monitor_and_install or $failed = 1; + start_monitor_and_install or $failed = 1; - if ($type eq "test" && !$failed) { - do_run_test or $failed = 1; - } + if ($type eq "test" && !$failed) { + do_run_test or $failed = 1; + } - end_monitor; + end_monitor; } $in_bisect = 0; @@ -3974,8 +3978,8 @@ sub make_min_config { # update new ignore configs if (defined($ignore_config)) { - open (OUT, ">$temp_config") - or dodie "Can't write to $temp_config"; + open (OUT, ">$temp_config") or + dodie "Can't write to $temp_config"; foreach my $config (keys %save_configs) { print OUT "$save_configs{$config}\n"; } @@ -4002,8 +4006,8 @@ sub make_min_config { } # Save off all the current mandatory configs - open (OUT, ">$temp_config") - or dodie "Can't write to $temp_config"; + open (OUT, ">$temp_config") or + dodie "Can't write to $temp_config"; foreach my $config (keys %keep_configs) { print OUT "$keep_configs{$config}\n"; } @@ -4041,7 +4045,6 @@ sub make_warnings_file { open(IN, $buildlog) or dodie "Can't open $buildlog"; while (<IN>) { - # Some compilers use UTF-8 extended for quotes # for distcc heterogeneous systems, this causes issues s/$utf8_quote/'/g; @@ -4057,98 +4060,6 @@ sub make_warnings_file { success $i; } -$#ARGV < 1 or die "ktest.pl version: $VERSION\n usage: ktest.pl [config-file]\n"; - -if ($#ARGV == 0) { - $ktest_config = $ARGV[0]; - if (! -f $ktest_config) { - print "$ktest_config does not exist.\n"; - if (!read_yn "Create it?") { - exit 0; - } - } -} - -if (! -f $ktest_config) { - $newconfig = 1; - get_test_case; - open(OUT, ">$ktest_config") or die "Can not create $ktest_config"; - print OUT << "EOF" -# Generated by ktest.pl -# - -# PWD is a ktest.pl variable that will result in the process working -# directory that ktest.pl is executed in. - -# THIS_DIR is automatically assigned the PWD of the path that generated -# the config file. It is best to use this variable when assigning other -# directory paths within this directory. This allows you to easily -# move the test cases to other locations or to other machines. -# -THIS_DIR := $variable{"PWD"} - -# Define each test with TEST_START -# The config options below it will override the defaults -TEST_START -TEST_TYPE = $default{"TEST_TYPE"} - -DEFAULTS -EOF -; - close(OUT); -} -read_config $ktest_config; - -if (defined($opt{"LOG_FILE"})) { - $opt{"LOG_FILE"} = eval_option("LOG_FILE", $opt{"LOG_FILE"}, -1); -} - -# Append any configs entered in manually to the config file. -my @new_configs = keys %entered_configs; -if ($#new_configs >= 0) { - print "\nAppending entered in configs to $ktest_config\n"; - open(OUT, ">>$ktest_config") or die "Can not append to $ktest_config"; - foreach my $config (@new_configs) { - print OUT "$config = $entered_configs{$config}\n"; - $opt{$config} = process_variables($entered_configs{$config}); - } -} - -if (defined($opt{"LOG_FILE"})) { - if ($opt{"CLEAR_LOG"}) { - unlink $opt{"LOG_FILE"}; - } - open(LOG, ">> $opt{LOG_FILE}") or die "Can't write to $opt{LOG_FILE}"; - LOG->autoflush(1); -} - -doprint "\n\nSTARTING AUTOMATED TESTS\n\n"; - -for (my $i = 0, my $repeat = 1; $i <= $opt{"NUM_TESTS"}; $i += $repeat) { - - if (!$i) { - doprint "DEFAULT OPTIONS:\n"; - } else { - doprint "\nTEST $i OPTIONS"; - if (defined($repeat_tests{$i})) { - $repeat = $repeat_tests{$i}; - doprint " ITERATE $repeat"; - } - doprint "\n"; - } - - foreach my $option (sort keys %opt) { - - if ($option =~ /\[(\d+)\]$/) { - next if ($i != $1); - } else { - next if ($i); - } - - doprint "$option = $opt{$option}\n"; - } -} - sub option_defined { my ($option) = @_; @@ -4261,7 +4172,6 @@ sub do_send_mail { } sub send_email { - if (defined($mailto)) { if (!defined($mailer)) { doprint "No email sent: email or mailer not specified in config.\n"; @@ -4274,12 +4184,103 @@ sub send_email { sub cancel_test { if ($email_when_canceled) { my $name = get_test_name; - send_email("KTEST: Your [$name] test was cancelled", - "Your test started at $script_start_time was cancelled: sig int"); + send_email("KTEST: Your [$name] test was cancelled", + "Your test started at $script_start_time was cancelled: sig int"); } die "\nCaught Sig Int, test interrupted: $!\n" } +$#ARGV < 1 or die "ktest.pl version: $VERSION\n usage: ktest.pl [config-file]\n"; + +if ($#ARGV == 0) { + $ktest_config = $ARGV[0]; + if (! -f $ktest_config) { + print "$ktest_config does not exist.\n"; + if (!read_yn "Create it?") { + exit 0; + } + } +} + +if (! -f $ktest_config) { + $newconfig = 1; + get_test_case; + open(OUT, ">$ktest_config") or die "Can not create $ktest_config"; + print OUT << "EOF" +# Generated by ktest.pl +# + +# PWD is a ktest.pl variable that will result in the process working +# directory that ktest.pl is executed in. + +# THIS_DIR is automatically assigned the PWD of the path that generated +# the config file. It is best to use this variable when assigning other +# directory paths within this directory. This allows you to easily +# move the test cases to other locations or to other machines. +# +THIS_DIR := $variable{"PWD"} + +# Define each test with TEST_START +# The config options below it will override the defaults +TEST_START +TEST_TYPE = $default{"TEST_TYPE"} + +DEFAULTS +EOF +; + close(OUT); +} +read_config $ktest_config; + +if (defined($opt{"LOG_FILE"})) { + $opt{"LOG_FILE"} = eval_option("LOG_FILE", $opt{"LOG_FILE"}, -1); +} + +# Append any configs entered in manually to the config file. +my @new_configs = keys %entered_configs; +if ($#new_configs >= 0) { + print "\nAppending entered in configs to $ktest_config\n"; + open(OUT, ">>$ktest_config") or die "Can not append to $ktest_config"; + foreach my $config (@new_configs) { + print OUT "$config = $entered_configs{$config}\n"; + $opt{$config} = process_variables($entered_configs{$config}); + } +} + +if (defined($opt{"LOG_FILE"})) { + if ($opt{"CLEAR_LOG"}) { + unlink $opt{"LOG_FILE"}; + } + open(LOG, ">> $opt{LOG_FILE}") or die "Can't write to $opt{LOG_FILE}"; + LOG->autoflush(1); +} + +doprint "\n\nSTARTING AUTOMATED TESTS\n\n"; + +for (my $i = 0, my $repeat = 1; $i <= $opt{"NUM_TESTS"}; $i += $repeat) { + + if (!$i) { + doprint "DEFAULT OPTIONS:\n"; + } else { + doprint "\nTEST $i OPTIONS"; + if (defined($repeat_tests{$i})) { + $repeat = $repeat_tests{$i}; + doprint " ITERATE $repeat"; + } + doprint "\n"; + } + + foreach my $option (sort keys %opt) { + if ($option =~ /\[(\d+)\]$/) { + next if ($i != $1); + } else { + next if ($i); + } + + doprint "$option = $opt{$option}\n"; + } +} + $SIG{INT} = qw(cancel_test); # First we need to do is the builds @@ -4323,15 +4324,15 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) { # The first test may override the PRE_KTEST option if ($i == 1) { - if (defined($pre_ktest)) { - doprint "\n"; - run_command $pre_ktest; - } - if ($email_when_started) { + if (defined($pre_ktest)) { + doprint "\n"; + run_command $pre_ktest; + } + if ($email_when_started) { my $name = get_test_name; - send_email("KTEST: Your [$name] test was started", - "Your test was started on $script_start_time"); - } + send_email("KTEST: Your [$name] test was started", + "Your test was started on $script_start_time"); + } } # Any test can override the POST_KTEST option @@ -4409,7 +4410,7 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) { my $ret = run_command $pre_test; if (!$ret && defined($pre_test_die) && $pre_test_die) { - dodie "failed to pre_test\n"; + dodie "failed to pre_test\n"; } } @@ -4503,12 +4504,11 @@ if ($opt{"POWEROFF_ON_SUCCESS"}) { run_command $switch_to_good; } - doprint "\n $successes of $opt{NUM_TESTS} tests were successful\n\n"; if ($email_when_finished) { send_email("KTEST: Your test has finished!", - "$successes of $opt{NUM_TESTS} tests started at $script_start_time were successful!"); + "$successes of $opt{NUM_TESTS} tests started at $script_start_time were successful!"); } if (defined($opt{"LOG_FILE"})) { @@ -4517,3 +4517,12 @@ if (defined($opt{"LOG_FILE"})) { } exit 0; + +## +# The following are here to standardize tabs/spaces/etc across the most likely editors +### + +# Local Variables: +# mode: perl +# End: +# vim: softtabstop=4 diff --git a/tools/testing/selftests/dma/dma_map_benchmark.c b/tools/testing/selftests/dma/dma_map_benchmark.c index fb23ce9617ea..485dff51bad2 100644 --- a/tools/testing/selftests/dma/dma_map_benchmark.c +++ b/tools/testing/selftests/dma/dma_map_benchmark.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2020 Hisilicon Limited. + * Copyright (C) 2020 HiSilicon Limited. */ #include <fcntl.h> @@ -40,7 +40,8 @@ struct map_benchmark { __u32 dma_bits; /* DMA addressing capability */ __u32 dma_dir; /* DMA data direction */ __u32 dma_trans_ns; /* time for DMA transmission in ns */ - __u8 expansion[80]; /* For future use */ + __u32 granule; /* how many PAGE_SIZE will do map/unmap once a time */ + __u8 expansion[76]; /* For future use */ }; int main(int argc, char **argv) @@ -51,11 +52,13 @@ int main(int argc, char **argv) int threads = 1, seconds = 20, node = -1; /* default dma mask 32bit, bidirectional DMA */ int bits = 32, xdelay = 0, dir = DMA_MAP_BIDIRECTIONAL; + /* default granule 1 PAGESIZE */ + int granule = 1; int cmd = DMA_MAP_BENCHMARK; char *p; - while ((opt = getopt(argc, argv, "t:s:n:b:d:x:")) != -1) { + while ((opt = getopt(argc, argv, "t:s:n:b:d:x:g:")) != -1) { switch (opt) { case 't': threads = atoi(optarg); @@ -75,6 +78,9 @@ int main(int argc, char **argv) case 'x': xdelay = atoi(optarg); break; + case 'g': + granule = atoi(optarg); + break; default: return -1; } @@ -110,6 +116,11 @@ int main(int argc, char **argv) exit(1); } + if (granule < 1 || granule > 1024) { + fprintf(stderr, "invalid granule size\n"); + exit(1); + } + fd = open("/sys/kernel/debug/dma_map_benchmark", O_RDWR); if (fd == -1) { perror("open"); @@ -123,14 +134,15 @@ int main(int argc, char **argv) map.dma_bits = bits; map.dma_dir = dir; map.dma_trans_ns = xdelay; + map.granule = granule; if (ioctl(fd, cmd, &map)) { perror("ioctl"); exit(1); } - printf("dma mapping benchmark: threads:%d seconds:%d node:%d dir:%s\n", - threads, seconds, node, dir[directions]); + printf("dma mapping benchmark: threads:%d seconds:%d node:%d dir:%s granule: %d\n", + threads, seconds, node, dir[directions], granule); printf("average map latency(us):%.1f standard deviation:%.1f\n", map.avg_map_100ns/10.0, map.map_stddev/10.0); printf("average unmap latency(us):%.1f standard deviation:%.1f\n", diff --git a/tools/testing/selftests/vm/.gitignore b/tools/testing/selftests/vm/.gitignore index 9a35c3f6a557..1f651e85ed60 100644 --- a/tools/testing/selftests/vm/.gitignore +++ b/tools/testing/selftests/vm/.gitignore @@ -22,3 +22,4 @@ map_fixed_noreplace write_to_hugetlbfs hmm-tests local_config.* +split_huge_page_test diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile index 8b0cd421ebd3..73e1cc96d7c2 100644 --- a/tools/testing/selftests/vm/Makefile +++ b/tools/testing/selftests/vm/Makefile @@ -42,6 +42,7 @@ TEST_GEN_FILES += on-fault-limit TEST_GEN_FILES += thuge-gen TEST_GEN_FILES += transhuge-stress TEST_GEN_FILES += userfaultfd +TEST_GEN_FILES += split_huge_page_test ifeq ($(MACHINE),x86_64) CAN_BUILD_I386 := $(shell ./../x86/check_cc.sh $(CC) ../x86/trivial_32bit_program.c -m32) diff --git a/tools/testing/selftests/vm/gup_test.c b/tools/testing/selftests/vm/gup_test.c index 6c6336dd3b7f..1e662d59c502 100644 --- a/tools/testing/selftests/vm/gup_test.c +++ b/tools/testing/selftests/vm/gup_test.c @@ -13,6 +13,7 @@ /* Just the flags we need, copied from mm.h: */ #define FOLL_WRITE 0x01 /* check pte is writable */ +#define FOLL_TOUCH 0x02 /* mark page accessed */ static char *cmd_to_str(unsigned long cmd) { @@ -37,13 +38,13 @@ int main(int argc, char **argv) { struct gup_test gup = { 0 }; unsigned long size = 128 * MB; - int i, fd, filed, opt, nr_pages = 1, thp = -1, repeats = 1, write = 0; + int i, fd, filed, opt, nr_pages = 1, thp = -1, repeats = 1, write = 1; unsigned long cmd = GUP_FAST_BENCHMARK; - int flags = MAP_PRIVATE; + int flags = MAP_PRIVATE, touch = 0; char *file = "/dev/zero"; char *p; - while ((opt = getopt(argc, argv, "m:r:n:F:f:abctTLUuwSH")) != -1) { + while ((opt = getopt(argc, argv, "m:r:n:F:f:abctTLUuwWSHpz")) != -1) { switch (opt) { case 'a': cmd = PIN_FAST_BENCHMARK; @@ -65,9 +66,13 @@ int main(int argc, char **argv) */ gup.which_pages[0] = 1; break; + case 'p': + /* works only with DUMP_USER_PAGES_TEST */ + gup.test_flags |= GUP_TEST_FLAG_DUMP_PAGES_USE_PIN; + break; case 'F': /* strtol, so you can pass flags in hex form */ - gup.flags = strtol(optarg, 0, 0); + gup.gup_flags = strtol(optarg, 0, 0); break; case 'm': size = atoi(optarg) * MB; @@ -93,6 +98,9 @@ int main(int argc, char **argv) case 'w': write = 1; break; + case 'W': + write = 0; + break; case 'f': file = optarg; break; @@ -103,6 +111,10 @@ int main(int argc, char **argv) case 'H': flags |= (MAP_HUGETLB | MAP_ANONYMOUS); break; + case 'z': + /* fault pages in gup, do not fault in userland */ + touch = 1; + break; default: return -1; } @@ -140,7 +152,7 @@ int main(int argc, char **argv) gup.nr_pages_per_call = nr_pages; if (write) - gup.flags |= FOLL_WRITE; + gup.gup_flags |= FOLL_WRITE; fd = open("/sys/kernel/debug/gup_test", O_RDWR); if (fd == -1) { @@ -160,8 +172,18 @@ int main(int argc, char **argv) else if (thp == 0) madvise(p, size, MADV_NOHUGEPAGE); - for (; (unsigned long)p < gup.addr + size; p += PAGE_SIZE) - p[0] = 0; + /* + * FOLL_TOUCH, in gup_test, is used as an either/or case: either + * fault pages in from the kernel via FOLL_TOUCH, or fault them + * in here, from user space. This allows comparison of performance + * between those two cases. + */ + if (touch) { + gup.gup_flags |= FOLL_TOUCH; + } else { + for (; (unsigned long)p < gup.addr + size; p += PAGE_SIZE) + p[0] = 0; + } /* Only report timing information on the *_BENCHMARK commands: */ if ((cmd == PIN_FAST_BENCHMARK) || (cmd == GUP_FAST_BENCHMARK) || diff --git a/tools/testing/selftests/vm/split_huge_page_test.c b/tools/testing/selftests/vm/split_huge_page_test.c new file mode 100644 index 000000000000..1af16d2c2a0a --- /dev/null +++ b/tools/testing/selftests/vm/split_huge_page_test.c @@ -0,0 +1,390 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * A test of splitting PMD THPs and PTE-mapped THPs from a specified virtual + * address range in a process via <debugfs>/split_huge_pages interface. + */ + +#define _GNU_SOURCE +#include <stdio.h> +#include <stdlib.h> +#include <stdarg.h> +#include <unistd.h> +#include <inttypes.h> +#include <string.h> +#include <fcntl.h> +#include <sys/mman.h> +#include <sys/mount.h> +#include <malloc.h> +#include <stdbool.h> + +uint64_t pagesize; +unsigned int pageshift; +uint64_t pmd_pagesize; + +#define PMD_SIZE_PATH "/sys/kernel/mm/transparent_hugepage/hpage_pmd_size" +#define SPLIT_DEBUGFS "/sys/kernel/debug/split_huge_pages" +#define SMAP_PATH "/proc/self/smaps" +#define INPUT_MAX 80 + +#define PID_FMT "%d,0x%lx,0x%lx" +#define PATH_FMT "%s,0x%lx,0x%lx" + +#define PFN_MASK ((1UL<<55)-1) +#define KPF_THP (1UL<<22) + +int is_backed_by_thp(char *vaddr, int pagemap_file, int kpageflags_file) +{ + uint64_t paddr; + uint64_t page_flags; + + if (pagemap_file) { + pread(pagemap_file, &paddr, sizeof(paddr), + ((long)vaddr >> pageshift) * sizeof(paddr)); + + if (kpageflags_file) { + pread(kpageflags_file, &page_flags, sizeof(page_flags), + (paddr & PFN_MASK) * sizeof(page_flags)); + + return !!(page_flags & KPF_THP); + } + } + return 0; +} + + +static uint64_t read_pmd_pagesize(void) +{ + int fd; + char buf[20]; + ssize_t num_read; + + fd = open(PMD_SIZE_PATH, O_RDONLY); + if (fd == -1) { + perror("Open hpage_pmd_size failed"); + exit(EXIT_FAILURE); + } + num_read = read(fd, buf, 19); + if (num_read < 1) { + close(fd); + perror("Read hpage_pmd_size failed"); + exit(EXIT_FAILURE); + } + buf[num_read] = '\0'; + close(fd); + + return strtoul(buf, NULL, 10); +} + +static int write_file(const char *path, const char *buf, size_t buflen) +{ + int fd; + ssize_t numwritten; + + fd = open(path, O_WRONLY); + if (fd == -1) + return 0; + + numwritten = write(fd, buf, buflen - 1); + close(fd); + if (numwritten < 1) + return 0; + + return (unsigned int) numwritten; +} + +static void write_debugfs(const char *fmt, ...) +{ + char input[INPUT_MAX]; + int ret; + va_list argp; + + va_start(argp, fmt); + ret = vsnprintf(input, INPUT_MAX, fmt, argp); + va_end(argp); + + if (ret >= INPUT_MAX) { + printf("%s: Debugfs input is too long\n", __func__); + exit(EXIT_FAILURE); + } + + if (!write_file(SPLIT_DEBUGFS, input, ret + 1)) { + perror(SPLIT_DEBUGFS); + exit(EXIT_FAILURE); + } +} + +#define MAX_LINE_LENGTH 500 + +static bool check_for_pattern(FILE *fp, const char *pattern, char *buf) +{ + while (fgets(buf, MAX_LINE_LENGTH, fp) != NULL) { + if (!strncmp(buf, pattern, strlen(pattern))) + return true; + } + return false; +} + +static uint64_t check_huge(void *addr) +{ + uint64_t thp = 0; + int ret; + FILE *fp; + char buffer[MAX_LINE_LENGTH]; + char addr_pattern[MAX_LINE_LENGTH]; + + ret = snprintf(addr_pattern, MAX_LINE_LENGTH, "%08lx-", + (unsigned long) addr); + if (ret >= MAX_LINE_LENGTH) { + printf("%s: Pattern is too long\n", __func__); + exit(EXIT_FAILURE); + } + + + fp = fopen(SMAP_PATH, "r"); + if (!fp) { + printf("%s: Failed to open file %s\n", __func__, SMAP_PATH); + exit(EXIT_FAILURE); + } + if (!check_for_pattern(fp, addr_pattern, buffer)) + goto err_out; + + /* + * Fetch the AnonHugePages: in the same block and check the number of + * hugepages. + */ + if (!check_for_pattern(fp, "AnonHugePages:", buffer)) + goto err_out; + + if (sscanf(buffer, "AnonHugePages:%10ld kB", &thp) != 1) { + printf("Reading smap error\n"); + exit(EXIT_FAILURE); + } + +err_out: + fclose(fp); + return thp; +} + +void split_pmd_thp(void) +{ + char *one_page; + size_t len = 4 * pmd_pagesize; + uint64_t thp_size; + size_t i; + + one_page = memalign(pmd_pagesize, len); + + if (!one_page) { + printf("Fail to allocate memory\n"); + exit(EXIT_FAILURE); + } + + madvise(one_page, len, MADV_HUGEPAGE); + + for (i = 0; i < len; i++) + one_page[i] = (char)i; + + thp_size = check_huge(one_page); + if (!thp_size) { + printf("No THP is allocated\n"); + exit(EXIT_FAILURE); + } + + /* split all THPs */ + write_debugfs(PID_FMT, getpid(), (uint64_t)one_page, + (uint64_t)one_page + len); + + for (i = 0; i < len; i++) + if (one_page[i] != (char)i) { + printf("%ld byte corrupted\n", i); + exit(EXIT_FAILURE); + } + + + thp_size = check_huge(one_page); + if (thp_size) { + printf("Still %ld kB AnonHugePages not split\n", thp_size); + exit(EXIT_FAILURE); + } + + printf("Split huge pages successful\n"); + free(one_page); +} + +void split_pte_mapped_thp(void) +{ + char *one_page, *pte_mapped, *pte_mapped2; + size_t len = 4 * pmd_pagesize; + uint64_t thp_size; + size_t i; + const char *pagemap_template = "/proc/%d/pagemap"; + const char *kpageflags_proc = "/proc/kpageflags"; + char pagemap_proc[255]; + int pagemap_fd; + int kpageflags_fd; + + if (snprintf(pagemap_proc, 255, pagemap_template, getpid()) < 0) { + perror("get pagemap proc error"); + exit(EXIT_FAILURE); + } + pagemap_fd = open(pagemap_proc, O_RDONLY); + + if (pagemap_fd == -1) { + perror("read pagemap:"); + exit(EXIT_FAILURE); + } + + kpageflags_fd = open(kpageflags_proc, O_RDONLY); + + if (kpageflags_fd == -1) { + perror("read kpageflags:"); + exit(EXIT_FAILURE); + } + + one_page = mmap((void *)(1UL << 30), len, PROT_READ | PROT_WRITE, + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + + madvise(one_page, len, MADV_HUGEPAGE); + + for (i = 0; i < len; i++) + one_page[i] = (char)i; + + thp_size = check_huge(one_page); + if (!thp_size) { + printf("No THP is allocated\n"); + exit(EXIT_FAILURE); + } + + /* remap the first pagesize of first THP */ + pte_mapped = mremap(one_page, pagesize, pagesize, MREMAP_MAYMOVE); + + /* remap the Nth pagesize of Nth THP */ + for (i = 1; i < 4; i++) { + pte_mapped2 = mremap(one_page + pmd_pagesize * i + pagesize * i, + pagesize, pagesize, + MREMAP_MAYMOVE|MREMAP_FIXED, + pte_mapped + pagesize * i); + if (pte_mapped2 == (char *)-1) { + perror("mremap failed"); + exit(EXIT_FAILURE); + } + } + + /* smap does not show THPs after mremap, use kpageflags instead */ + thp_size = 0; + for (i = 0; i < pagesize * 4; i++) + if (i % pagesize == 0 && + is_backed_by_thp(&pte_mapped[i], pagemap_fd, kpageflags_fd)) + thp_size++; + + if (thp_size != 4) { + printf("Some THPs are missing during mremap\n"); + exit(EXIT_FAILURE); + } + + /* split all remapped THPs */ + write_debugfs(PID_FMT, getpid(), (uint64_t)pte_mapped, + (uint64_t)pte_mapped + pagesize * 4); + + /* smap does not show THPs after mremap, use kpageflags instead */ + thp_size = 0; + for (i = 0; i < pagesize * 4; i++) { + if (pte_mapped[i] != (char)i) { + printf("%ld byte corrupted\n", i); + exit(EXIT_FAILURE); + } + if (i % pagesize == 0 && + is_backed_by_thp(&pte_mapped[i], pagemap_fd, kpageflags_fd)) + thp_size++; + } + + if (thp_size) { + printf("Still %ld THPs not split\n", thp_size); + exit(EXIT_FAILURE); + } + + printf("Split PTE-mapped huge pages successful\n"); + munmap(one_page, len); + close(pagemap_fd); + close(kpageflags_fd); +} + +void split_file_backed_thp(void) +{ + int status; + int fd; + ssize_t num_written; + char tmpfs_template[] = "/tmp/thp_split_XXXXXX"; + const char *tmpfs_loc = mkdtemp(tmpfs_template); + char testfile[INPUT_MAX]; + uint64_t pgoff_start = 0, pgoff_end = 1024; + + printf("Please enable pr_debug in split_huge_pages_in_file() if you need more info.\n"); + + status = mount("tmpfs", tmpfs_loc, "tmpfs", 0, "huge=always,size=4m"); + + if (status) { + printf("Unable to create a tmpfs for testing\n"); + exit(EXIT_FAILURE); + } + + status = snprintf(testfile, INPUT_MAX, "%s/thp_file", tmpfs_loc); + if (status >= INPUT_MAX) { + printf("Fail to create file-backed THP split testing file\n"); + goto cleanup; + } + + fd = open(testfile, O_CREAT|O_WRONLY); + if (fd == -1) { + perror("Cannot open testing file\n"); + goto cleanup; + } + + /* write something to the file, so a file-backed THP can be allocated */ + num_written = write(fd, tmpfs_loc, sizeof(tmpfs_loc)); + close(fd); + + if (num_written < 1) { + printf("Fail to write data to testing file\n"); + goto cleanup; + } + + /* split the file-backed THP */ + write_debugfs(PATH_FMT, testfile, pgoff_start, pgoff_end); + + status = unlink(testfile); + if (status) + perror("Cannot remove testing file\n"); + +cleanup: + status = umount(tmpfs_loc); + if (status) { + printf("Unable to umount %s\n", tmpfs_loc); + exit(EXIT_FAILURE); + } + status = rmdir(tmpfs_loc); + if (status) { + perror("cannot remove tmp dir"); + exit(EXIT_FAILURE); + } + + printf("file-backed THP split test done, please check dmesg for more information\n"); +} + +int main(int argc, char **argv) +{ + if (geteuid() != 0) { + printf("Please run the benchmark as root\n"); + exit(EXIT_FAILURE); + } + + pagesize = getpagesize(); + pageshift = ffs(pagesize) - 1; + pmd_pagesize = read_pmd_pagesize(); + + split_pmd_thp(); + split_pte_mapped_thp(); + split_file_backed_thp(); + + return 0; +} diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c index 92b8ec423201..f5ab5e0312e7 100644 --- a/tools/testing/selftests/vm/userfaultfd.c +++ b/tools/testing/selftests/vm/userfaultfd.c @@ -81,6 +81,8 @@ static volatile bool test_uffdio_copy_eexist = true; static volatile bool test_uffdio_zeropage_eexist = true; /* Whether to test uffd write-protection */ static bool test_uffdio_wp = false; +/* Whether to test uffd minor faults */ +static bool test_uffdio_minor = false; static bool map_shared; static int huge_fd; @@ -96,6 +98,7 @@ struct uffd_stats { int cpu; unsigned long missing_faults; unsigned long wp_faults; + unsigned long minor_faults; }; /* pthread_mutex_t starts at page offset 0 */ @@ -153,17 +156,19 @@ static void uffd_stats_reset(struct uffd_stats *uffd_stats, uffd_stats[i].cpu = i; uffd_stats[i].missing_faults = 0; uffd_stats[i].wp_faults = 0; + uffd_stats[i].minor_faults = 0; } } static void uffd_stats_report(struct uffd_stats *stats, int n_cpus) { int i; - unsigned long long miss_total = 0, wp_total = 0; + unsigned long long miss_total = 0, wp_total = 0, minor_total = 0; for (i = 0; i < n_cpus; i++) { miss_total += stats[i].missing_faults; wp_total += stats[i].wp_faults; + minor_total += stats[i].minor_faults; } printf("userfaults: %llu missing (", miss_total); @@ -172,6 +177,9 @@ static void uffd_stats_report(struct uffd_stats *stats, int n_cpus) printf("\b), %llu wp (", wp_total); for (i = 0; i < n_cpus; i++) printf("%lu+", stats[i].wp_faults); + printf("\b), %llu minor (", minor_total); + for (i = 0; i < n_cpus; i++) + printf("%lu+", stats[i].minor_faults); printf("\b)\n"); } @@ -328,7 +336,7 @@ static struct uffd_test_ops shmem_uffd_test_ops = { }; static struct uffd_test_ops hugetlb_uffd_test_ops = { - .expected_ioctls = UFFD_API_RANGE_IOCTLS_BASIC, + .expected_ioctls = UFFD_API_RANGE_IOCTLS_BASIC & ~(1 << _UFFDIO_CONTINUE), .allocate_area = hugetlb_allocate_area, .release_pages = hugetlb_release_pages, .alias_mapping = hugetlb_alias_mapping, @@ -362,6 +370,22 @@ static void wp_range(int ufd, __u64 start, __u64 len, bool wp) } } +static void continue_range(int ufd, __u64 start, __u64 len) +{ + struct uffdio_continue req; + + req.range.start = start; + req.range.len = len; + req.mode = 0; + + if (ioctl(ufd, UFFDIO_CONTINUE, &req)) { + fprintf(stderr, + "UFFDIO_CONTINUE failed for address 0x%" PRIx64 "\n", + (uint64_t)start); + exit(1); + } +} + static void *locking_thread(void *arg) { unsigned long cpu = (unsigned long) arg; @@ -569,8 +593,32 @@ static void uffd_handle_page_fault(struct uffd_msg *msg, } if (msg->arg.pagefault.flags & UFFD_PAGEFAULT_FLAG_WP) { + /* Write protect page faults */ wp_range(uffd, msg->arg.pagefault.address, page_size, false); stats->wp_faults++; + } else if (msg->arg.pagefault.flags & UFFD_PAGEFAULT_FLAG_MINOR) { + uint8_t *area; + int b; + + /* + * Minor page faults + * + * To prove we can modify the original range for testing + * purposes, we're going to bit flip this range before + * continuing. + * + * Note that this requires all minor page fault tests operate on + * area_dst (non-UFFD-registered) and area_dst_alias + * (UFFD-registered). + */ + + area = (uint8_t *)(area_dst + + ((char *)msg->arg.pagefault.address - + area_dst_alias)); + for (b = 0; b < page_size; ++b) + area[b] = ~area[b]; + continue_range(uffd, msg->arg.pagefault.address, page_size); + stats->minor_faults++; } else { /* Missing page faults */ if (bounces & BOUNCE_VERIFY && @@ -779,7 +827,7 @@ static int stress(struct uffd_stats *uffd_stats) return 0; } -static int userfaultfd_open(int features) +static int userfaultfd_open_ext(uint64_t *features) { struct uffdio_api uffdio_api; @@ -792,7 +840,7 @@ static int userfaultfd_open(int features) uffd_flags = fcntl(uffd, F_GETFD, NULL); uffdio_api.api = UFFD_API; - uffdio_api.features = features; + uffdio_api.features = *features; if (ioctl(uffd, UFFDIO_API, &uffdio_api)) { fprintf(stderr, "UFFDIO_API failed.\nPlease make sure to " "run with either root or ptrace capability.\n"); @@ -804,9 +852,15 @@ static int userfaultfd_open(int features) return 1; } + *features = uffdio_api.features; return 0; } +static int userfaultfd_open(uint64_t features) +{ + return userfaultfd_open_ext(&features); +} + sigjmp_buf jbuf, *sigbuf; static void sighndl(int sig, siginfo_t *siginfo, void *ptr) @@ -1112,7 +1166,7 @@ static int userfaultfd_events_test(void) } if (!pid) - return faulting_process(0); + exit(faulting_process(0)); waitpid(pid, &err, 0); if (err) { @@ -1215,6 +1269,102 @@ static int userfaultfd_sig_test(void) return userfaults != 0; } +static int userfaultfd_minor_test(void) +{ + struct uffdio_register uffdio_register; + unsigned long expected_ioctls; + unsigned long p; + pthread_t uffd_mon; + uint8_t expected_byte; + void *expected_page; + char c; + struct uffd_stats stats = { 0 }; + uint64_t features = UFFD_FEATURE_MINOR_HUGETLBFS; + + if (!test_uffdio_minor) + return 0; + + printf("testing minor faults: "); + fflush(stdout); + + if (uffd_test_ops->release_pages(area_dst)) + return 1; + + if (userfaultfd_open_ext(&features)) + return 1; + /* If kernel reports the feature isn't supported, skip the test. */ + if (!(features & UFFD_FEATURE_MINOR_HUGETLBFS)) { + printf("skipping test due to lack of feature support\n"); + fflush(stdout); + return 0; + } + + uffdio_register.range.start = (unsigned long)area_dst_alias; + uffdio_register.range.len = nr_pages * page_size; + uffdio_register.mode = UFFDIO_REGISTER_MODE_MINOR; + if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register)) { + fprintf(stderr, "register failure\n"); + exit(1); + } + + expected_ioctls = uffd_test_ops->expected_ioctls; + expected_ioctls |= 1 << _UFFDIO_CONTINUE; + if ((uffdio_register.ioctls & expected_ioctls) != expected_ioctls) { + fprintf(stderr, "unexpected missing ioctl(s)\n"); + exit(1); + } + + /* + * After registering with UFFD, populate the non-UFFD-registered side of + * the shared mapping. This should *not* trigger any UFFD minor faults. + */ + for (p = 0; p < nr_pages; ++p) { + memset(area_dst + (p * page_size), p % ((uint8_t)-1), + page_size); + } + + if (pthread_create(&uffd_mon, &attr, uffd_poll_thread, &stats)) { + perror("uffd_poll_thread create"); + exit(1); + } + + /* + * Read each of the pages back using the UFFD-registered mapping. We + * expect that the first time we touch a page, it will result in a minor + * fault. uffd_poll_thread will resolve the fault by bit-flipping the + * page's contents, and then issuing a CONTINUE ioctl. + */ + + if (posix_memalign(&expected_page, page_size, page_size)) { + fprintf(stderr, "out of memory\n"); + return 1; + } + + for (p = 0; p < nr_pages; ++p) { + expected_byte = ~((uint8_t)(p % ((uint8_t)-1))); + memset(expected_page, expected_byte, page_size); + if (my_bcmp(expected_page, area_dst_alias + (p * page_size), + page_size)) { + fprintf(stderr, + "unexpected page contents after minor fault\n"); + exit(1); + } + } + + if (write(pipefd[1], &c, sizeof(c)) != sizeof(c)) { + perror("pipe write"); + exit(1); + } + if (pthread_join(uffd_mon, NULL)) + return 1; + + close(uffd); + + uffd_stats_report(&stats, 1); + + return stats.missing_faults != 0 || stats.minor_faults != nr_pages; +} + static int userfaultfd_stress(void) { void *area; @@ -1413,7 +1563,7 @@ static int userfaultfd_stress(void) close(uffd); return userfaultfd_zeropage_test() || userfaultfd_sig_test() - || userfaultfd_events_test(); + || userfaultfd_events_test() || userfaultfd_minor_test(); } /* @@ -1454,6 +1604,8 @@ static void set_test_type(const char *type) map_shared = true; test_type = TEST_HUGETLB; uffd_test_ops = &hugetlb_uffd_test_ops; + /* Minor faults require shared hugetlb; only enable here. */ + test_uffdio_minor = true; } else if (!strcmp(type, "shmem")) { map_shared = true; test_type = TEST_SHMEM; |