diff options
231 files changed, 16665 insertions, 2320 deletions
diff --git a/Documentation/ABI/stable/sysfs-fs-orangefs b/Documentation/ABI/stable/sysfs-fs-orangefs new file mode 100644 index 000000000000..affdb114bd33 --- /dev/null +++ b/Documentation/ABI/stable/sysfs-fs-orangefs @@ -0,0 +1,87 @@ +What: /sys/fs/orangefs/perf_counters/* +Date: Jun 2015 +Contact: Mike Marshall <[email protected]> +Description: + Counters and settings for various caches. + Read only. + + +What: /sys/fs/orangefs/perf_counter_reset +Date: June 2015 +Contact: Mike Marshall <[email protected]> +Description: + echo a 0 or a 1 into perf_counter_reset to + reset all the counters in + /sys/fs/orangefs/perf_counters + except ones with PINT_PERF_PRESERVE set. + + +What: /sys/fs/orangefs/perf_time_interval_secs +Date: Jun 2015 +Contact: Mike Marshall <[email protected]> +Description: + Length of perf counter intervals in + seconds. + + +What: /sys/fs/orangefs/perf_history_size +Date: Jun 2015 +Contact: Mike Marshall <[email protected]> +Description: + The perf_counters cache statistics have N, or + perf_history_size, samples. The default is + one. + + Every perf_time_interval_secs the (first) + samples are reset. + + If N is greater than one, the "current" set + of samples is reset, and the samples from the + other N-1 intervals remain available. + + +What: /sys/fs/orangefs/op_timeout_secs +Date: Jun 2015 +Contact: Mike Marshall <[email protected]> +Description: + Service operation timeout in seconds. + + +What: /sys/fs/orangefs/slot_timeout_secs +Date: Jun 2015 +Contact: Mike Marshall <[email protected]> +Description: + "Slot" timeout in seconds. A "slot" + is an indexed buffer in the shared + memory segment used for communication + between the kernel module and userspace. + Slots are requested and waited for, + the wait times out after slot_timeout_secs. + + +What: /sys/fs/orangefs/acache/* +Date: Jun 2015 +Contact: Mike Marshall <[email protected]> +Description: + Attribute cache configurable settings. + + +What: /sys/fs/orangefs/ncache/* +Date: Jun 2015 +Contact: Mike Marshall <[email protected]> +Description: + Name cache configurable settings. + + +What: /sys/fs/orangefs/capcache/* +Date: Jun 2015 +Contact: Mike Marshall <[email protected]> +Description: + Capability cache configurable settings. + + +What: /sys/fs/orangefs/ccache/* +Date: Jun 2015 +Contact: Mike Marshall <[email protected]> +Description: + Credential cache configurable settings. diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index b683e8ee69ec..16501334b99f 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu @@ -271,3 +271,72 @@ Description: Parameters for the CPU cache attributes - WriteBack: data is written only to the cache line and the modified cache line is written to main memory only when it is replaced + +What: /sys/devices/system/cpu/cpuX/cpufreq/throttle_stats + /sys/devices/system/cpu/cpuX/cpufreq/throttle_stats/turbo_stat + /sys/devices/system/cpu/cpuX/cpufreq/throttle_stats/sub_turbo_stat + /sys/devices/system/cpu/cpuX/cpufreq/throttle_stats/unthrottle + /sys/devices/system/cpu/cpuX/cpufreq/throttle_stats/powercap + /sys/devices/system/cpu/cpuX/cpufreq/throttle_stats/overtemp + /sys/devices/system/cpu/cpuX/cpufreq/throttle_stats/supply_fault + /sys/devices/system/cpu/cpuX/cpufreq/throttle_stats/overcurrent + /sys/devices/system/cpu/cpuX/cpufreq/throttle_stats/occ_reset +Date: March 2016 +Contact: Linux kernel mailing list <[email protected]> + Linux for PowerPC mailing list <[email protected]> +Description: POWERNV CPUFreq driver's frequency throttle stats directory and + attributes + + 'cpuX/cpufreq/throttle_stats' directory contains the CPU frequency + throttle stat attributes for the chip. The throttle stats of a cpu + is common across all the cpus belonging to a chip. Below are the + throttle attributes exported in the 'throttle_stats' directory: + + - turbo_stat : This file gives the total number of times the max + frequency is throttled to lower frequency in turbo (at and above + nominal frequency) range of frequencies. + + - sub_turbo_stat : This file gives the total number of times the + max frequency is throttled to lower frequency in sub-turbo(below + nominal frequency) range of frequencies. + + - unthrottle : This file gives the total number of times the max + frequency is unthrottled after being throttled. + + - powercap : This file gives the total number of times the max + frequency is throttled due to 'Power Capping'. + + - overtemp : This file gives the total number of times the max + frequency is throttled due to 'CPU Over Temperature'. + + - supply_fault : This file gives the total number of times the + max frequency is throttled due to 'Power Supply Failure'. + + - overcurrent : This file gives the total number of times the + max frequency is throttled due to 'Overcurrent'. + + - occ_reset : This file gives the total number of times the max + frequency is throttled due to 'OCC Reset'. + + The sysfs attributes representing different throttle reasons like + powercap, overtemp, supply_fault, overcurrent and occ_reset map to + the reasons provided by OCC firmware for throttling the frequency. + +What: /sys/devices/system/cpu/cpufreq/policyX/throttle_stats + /sys/devices/system/cpu/cpufreq/policyX/throttle_stats/turbo_stat + /sys/devices/system/cpu/cpufreq/policyX/throttle_stats/sub_turbo_stat + /sys/devices/system/cpu/cpufreq/policyX/throttle_stats/unthrottle + /sys/devices/system/cpu/cpufreq/policyX/throttle_stats/powercap + /sys/devices/system/cpu/cpufreq/policyX/throttle_stats/overtemp + /sys/devices/system/cpu/cpufreq/policyX/throttle_stats/supply_fault + /sys/devices/system/cpu/cpufreq/policyX/throttle_stats/overcurrent + /sys/devices/system/cpu/cpufreq/policyX/throttle_stats/occ_reset +Date: March 2016 +Contact: Linux kernel mailing list <[email protected]> + Linux for PowerPC mailing list <[email protected]> +Description: POWERNV CPUFreq driver's frequency throttle stats directory and + attributes + + 'policyX/throttle_stats' directory and all the attributes are same as + the /sys/devices/system/cpu/cpuX/cpufreq/throttle_stats directory and + attributes which give the frequency throttle information of the chip. diff --git a/Documentation/devicetree/bindings/power/rockchip-io-domain.txt b/Documentation/devicetree/bindings/power/rockchip-io-domain.txt index b8627e763dba..c84fb47265eb 100644 --- a/Documentation/devicetree/bindings/power/rockchip-io-domain.txt +++ b/Documentation/devicetree/bindings/power/rockchip-io-domain.txt @@ -35,6 +35,8 @@ Required properties: - "rockchip,rk3288-io-voltage-domain" for rk3288 - "rockchip,rk3368-io-voltage-domain" for rk3368 - "rockchip,rk3368-pmu-io-voltage-domain" for rk3368 pmu-domains + - "rockchip,rk3399-io-voltage-domain" for rk3399 + - "rockchip,rk3399-pmu-io-voltage-domain" for rk3399 pmu-domains - rockchip,grf: phandle to the syscon managing the "general register files" @@ -79,6 +81,15 @@ Possible supplies for rk3368 pmu-domains: - pmu-supply: The supply connected to PMUIO_VDD. - vop-supply: The supply connected to LCDC_VDD. +Possible supplies for rk3399: +- bt656-supply: The supply connected to APIO2_VDD. +- audio-supply: The supply connected to APIO5_VDD. +- sdmmc-supply: The supply connected to SDMMC0_VDD. +- gpio1830 The supply connected to APIO4_VDD. + +Possible supplies for rk3399 pmu-domains: +- pmu1830-supply:The supply connected to PMUIO2_VDD. + Example: io-domains { diff --git a/Documentation/devicetree/bindings/rtc/maxim,mcp795.txt b/Documentation/devicetree/bindings/rtc/maxim,mcp795.txt new file mode 100644 index 000000000000..a59fdd8c236d --- /dev/null +++ b/Documentation/devicetree/bindings/rtc/maxim,mcp795.txt @@ -0,0 +1,11 @@ +* Maxim MCP795 SPI Serial Real-Time Clock + +Required properties: +- compatible: Should contain "maxim,mcp795". +- reg: SPI address for chip + +Example: + mcp795: rtc@0 { + compatible = "maxim,mcp795"; + reg = <0>; + }; diff --git a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt index 03c0e989e020..66f6adf8d44d 100644 --- a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt +++ b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt @@ -38,6 +38,9 @@ Optional properties: defined or a value in the array is "0" then it is assumed that the frequency is set by the parent clock or a fixed rate clock source. +-lanes-per-direction : number of lanes available per direction - either 1 or 2. + Note that it is assume same number of lanes is used both + directions at once. If not specified, default is 2 lanes per direction. Note: If above properties are not defined it can be assumed that the supply regulators or clocks are always on. diff --git a/Documentation/dma-buf-sharing.txt b/Documentation/dma-buf-sharing.txt index 32ac32e773e1..ca44c5820585 100644 --- a/Documentation/dma-buf-sharing.txt +++ b/Documentation/dma-buf-sharing.txt @@ -352,7 +352,8 @@ Being able to mmap an export dma-buf buffer object has 2 main use-cases: No special interfaces, userspace simply calls mmap on the dma-buf fd, making sure that the cache synchronization ioctl (DMA_BUF_IOCTL_SYNC) is *always* - used when the access happens. This is discussed next paragraphs. + used when the access happens. Note that DMA_BUF_IOCTL_SYNC can fail with + -EAGAIN or -EINTR, in which case it must be restarted. Some systems might need some sort of cache coherency management e.g. when CPU and GPU domains are being accessed through dma-buf at the same time. To @@ -366,10 +367,10 @@ Being able to mmap an export dma-buf buffer object has 2 main use-cases: want (with the new data being consumed by the GPU or say scanout device) - munmap once you don't need the buffer any more - Therefore, for correctness and optimal performance, systems with the memory - cache shared by the GPU and CPU i.e. the "coherent" and also the - "incoherent" are always required to use SYNC_START and SYNC_END before and - after, respectively, when accessing the mapped address. + For correctness and optimal performance, it is always required to use + SYNC_START and SYNC_END before and after, respectively, when accessing the + mapped address. Userspace cannot rely on coherent access, even when there + are systems where it just works without calling these ioctls. 2. Supporting existing mmap interfaces in importers diff --git a/Documentation/filesystems/orangefs.txt b/Documentation/filesystems/orangefs.txt new file mode 100644 index 000000000000..e1a0056a365f --- /dev/null +++ b/Documentation/filesystems/orangefs.txt @@ -0,0 +1,406 @@ +ORANGEFS +======== + +OrangeFS is an LGPL userspace scale-out parallel storage system. It is ideal +for large storage problems faced by HPC, BigData, Streaming Video, +Genomics, Bioinformatics. + +Orangefs, originally called PVFS, was first developed in 1993 by +Walt Ligon and Eric Blumer as a parallel file system for Parallel +Virtual Machine (PVM) as part of a NASA grant to study the I/O patterns +of parallel programs. + +Orangefs features include: + + * Distributes file data among multiple file servers + * Supports simultaneous access by multiple clients + * Stores file data and metadata on servers using local file system + and access methods + * Userspace implementation is easy to install and maintain + * Direct MPI support + * Stateless + + +MAILING LIST +============ + +http://beowulf-underground.org/mailman/listinfo/pvfs2-users + + +DOCUMENTATION +============= + +http://www.orangefs.org/documentation/ + + +USERSPACE FILESYSTEM SOURCE +=========================== + +http://www.orangefs.org/download + +Orangefs versions prior to 2.9.3 would not be compatible with the +upstream version of the kernel client. + + +BUILDING THE USERSPACE FILESYSTEM ON A SINGLE SERVER +==================================================== + +When Orangefs is upstream, "--with-kernel" shouldn't be needed, but +until then the path to where the kernel with the Orangefs kernel client +patch was built is needed to ensure that pvfs2-client-core (the bridge +between kernel space and user space) will build properly. You can omit +--prefix if you don't care that things are sprinkled around in +/usr/local. + +./configure --prefix=/opt/ofs --with-kernel=/path/to/orangefs/kernel + +make + +make install + +Create an orangefs config file: +/opt/ofs/bin/pvfs2-genconfig /etc/pvfs2.conf + + for "Enter hostnames", use the hostname, don't let it default to + localhost. + +create a pvfs2tab file in /etc: +cat /etc/pvfs2tab +tcp://myhostname:3334/orangefs /mymountpoint pvfs2 defaults,noauto 0 0 + +create the mount point you specified in the tab file if needed: +mkdir /mymountpoint + +bootstrap the server: +/opt/ofs/sbin/pvfs2-server /etc/pvfs2.conf -f + +start the server: +/opt/osf/sbin/pvfs2-server /etc/pvfs2.conf + +Now the server is running. At this point you might like to +prove things are working with: + +/opt/osf/bin/pvfs2-ls /mymountpoint + +You might not want to enforce selinux, it doesn't seem to matter by +linux 3.11... + +If stuff seems to be working, turn on the client core: +/opt/osf/sbin/pvfs2-client -p /opt/osf/sbin/pvfs2-client-core + +Mount your filesystem. +mount -t pvfs2 tcp://myhostname:3334/orangefs /mymountpoint + + +OPTIONS +======= + +The following mount options are accepted: + + acl + Allow the use of Access Control Lists on files and directories. + + intr + Some operations between the kernel client and the user space + filesystem can be interruptible, such as changes in debug levels + and the setting of tunable parameters. + + local_lock + Enable posix locking from the perspective of "this" kernel. The + default file_operations lock action is to return ENOSYS. Posix + locking kicks in if the filesystem is mounted with -o local_lock. + Distributed locking is being worked on for the future. + + +DEBUGGING +========= + +If you want the debug (GOSSIP) statements in a particular +source file (inode.c for example) go to syslog: + + echo inode > /sys/kernel/debug/orangefs/kernel-debug + +No debugging (the default): + + echo none > /sys/kernel/debug/orangefs/kernel-debug + +Debugging from several source files: + + echo inode,dir > /sys/kernel/debug/orangefs/kernel-debug + +All debugging: + + echo all > /sys/kernel/debug/orangefs/kernel-debug + +Get a list of all debugging keywords: + + cat /sys/kernel/debug/orangefs/debug-help + + +PROTOCOL BETWEEN KERNEL MODULE AND USERSPACE +============================================ + +Orangefs is a user space filesystem and an associated kernel module. +We'll just refer to the user space part of Orangefs as "userspace" +from here on out. Orangefs descends from PVFS, and userspace code +still uses PVFS for function and variable names. Userspace typedefs +many of the important structures. Function and variable names in +the kernel module have been transitioned to "orangefs", and The Linux +Coding Style avoids typedefs, so kernel module structures that +correspond to userspace structures are not typedefed. + +The kernel module implements a pseudo device that userspace +can read from and write to. Userspace can also manipulate the +kernel module through the pseudo device with ioctl. + +THE BUFMAP: + +At startup userspace allocates two page-size-aligned (posix_memalign) +mlocked memory buffers, one is used for IO and one is used for readdir +operations. The IO buffer is 41943040 bytes and the readdir buffer is +4194304 bytes. Each buffer contains logical chunks, or partitions, and +a pointer to each buffer is added to its own PVFS_dev_map_desc structure +which also describes its total size, as well as the size and number of +the partitions. + +A pointer to the IO buffer's PVFS_dev_map_desc structure is sent to a +mapping routine in the kernel module with an ioctl. The structure is +copied from user space to kernel space with copy_from_user and is used +to initialize the kernel module's "bufmap" (struct orangefs_bufmap), which +then contains: + + * refcnt - a reference counter + * desc_size - PVFS2_BUFMAP_DEFAULT_DESC_SIZE (4194304) - the IO buffer's + partition size, which represents the filesystem's block size and + is used for s_blocksize in super blocks. + * desc_count - PVFS2_BUFMAP_DEFAULT_DESC_COUNT (10) - the number of + partitions in the IO buffer. + * desc_shift - log2(desc_size), used for s_blocksize_bits in super blocks. + * total_size - the total size of the IO buffer. + * page_count - the number of 4096 byte pages in the IO buffer. + * page_array - a pointer to page_count * (sizeof(struct page*)) bytes + of kcalloced memory. This memory is used as an array of pointers + to each of the pages in the IO buffer through a call to get_user_pages. + * desc_array - a pointer to desc_count * (sizeof(struct orangefs_bufmap_desc)) + bytes of kcalloced memory. This memory is further intialized: + + user_desc is the kernel's copy of the IO buffer's ORANGEFS_dev_map_desc + structure. user_desc->ptr points to the IO buffer. + + pages_per_desc = bufmap->desc_size / PAGE_SIZE + offset = 0 + + bufmap->desc_array[0].page_array = &bufmap->page_array[offset] + bufmap->desc_array[0].array_count = pages_per_desc = 1024 + bufmap->desc_array[0].uaddr = (user_desc->ptr) + (0 * 1024 * 4096) + offset += 1024 + . + . + . + bufmap->desc_array[9].page_array = &bufmap->page_array[offset] + bufmap->desc_array[9].array_count = pages_per_desc = 1024 + bufmap->desc_array[9].uaddr = (user_desc->ptr) + + (9 * 1024 * 4096) + offset += 1024 + + * buffer_index_array - a desc_count sized array of ints, used to + indicate which of the IO buffer's partitions are available to use. + * buffer_index_lock - a spinlock to protect buffer_index_array during update. + * readdir_index_array - a five (ORANGEFS_READDIR_DEFAULT_DESC_COUNT) element + int array used to indicate which of the readdir buffer's partitions are + available to use. + * readdir_index_lock - a spinlock to protect readdir_index_array during + update. + +OPERATIONS: + +The kernel module builds an "op" (struct orangefs_kernel_op_s) when it +needs to communicate with userspace. Part of the op contains the "upcall" +which expresses the request to userspace. Part of the op eventually +contains the "downcall" which expresses the results of the request. + +The slab allocator is used to keep a cache of op structures handy. + +At init time the kernel module defines and initializes a request list +and an in_progress hash table to keep track of all the ops that are +in flight at any given time. + +Ops are stateful: + + * unknown - op was just initialized + * waiting - op is on request_list (upward bound) + * inprogr - op is in progress (waiting for downcall) + * serviced - op has matching downcall; ok + * purged - op has to start a timer since client-core + exited uncleanly before servicing op + * given up - submitter has given up waiting for it + +When some arbitrary userspace program needs to perform a +filesystem operation on Orangefs (readdir, I/O, create, whatever) +an op structure is initialized and tagged with a distinguishing ID +number. The upcall part of the op is filled out, and the op is +passed to the "service_operation" function. + +Service_operation changes the op's state to "waiting", puts +it on the request list, and signals the Orangefs file_operations.poll +function through a wait queue. Userspace is polling the pseudo-device +and thus becomes aware of the upcall request that needs to be read. + +When the Orangefs file_operations.read function is triggered, the +request list is searched for an op that seems ready-to-process. +The op is removed from the request list. The tag from the op and +the filled-out upcall struct are copy_to_user'ed back to userspace. + +If any of these (and some additional protocol) copy_to_users fail, +the op's state is set to "waiting" and the op is added back to +the request list. Otherwise, the op's state is changed to "in progress", +and the op is hashed on its tag and put onto the end of a list in the +in_progress hash table at the index the tag hashed to. + +When userspace has assembled the response to the upcall, it +writes the response, which includes the distinguishing tag, back to +the pseudo device in a series of io_vecs. This triggers the Orangefs +file_operations.write_iter function to find the op with the associated +tag and remove it from the in_progress hash table. As long as the op's +state is not "canceled" or "given up", its state is set to "serviced". +The file_operations.write_iter function returns to the waiting vfs, +and back to service_operation through wait_for_matching_downcall. + +Service operation returns to its caller with the op's downcall +part (the response to the upcall) filled out. + +The "client-core" is the bridge between the kernel module and +userspace. The client-core is a daemon. The client-core has an +associated watchdog daemon. If the client-core is ever signaled +to die, the watchdog daemon restarts the client-core. Even though +the client-core is restarted "right away", there is a period of +time during such an event that the client-core is dead. A dead client-core +can't be triggered by the Orangefs file_operations.poll function. +Ops that pass through service_operation during a "dead spell" can timeout +on the wait queue and one attempt is made to recycle them. Obviously, +if the client-core stays dead too long, the arbitrary userspace processes +trying to use Orangefs will be negatively affected. Waiting ops +that can't be serviced will be removed from the request list and +have their states set to "given up". In-progress ops that can't +be serviced will be removed from the in_progress hash table and +have their states set to "given up". + +Readdir and I/O ops are atypical with respect to their payloads. + + - readdir ops use the smaller of the two pre-allocated pre-partitioned + memory buffers. The readdir buffer is only available to userspace. + The kernel module obtains an index to a free partition before launching + a readdir op. Userspace deposits the results into the indexed partition + and then writes them to back to the pvfs device. + + - io (read and write) ops use the larger of the two pre-allocated + pre-partitioned memory buffers. The IO buffer is accessible from + both userspace and the kernel module. The kernel module obtains an + index to a free partition before launching an io op. The kernel module + deposits write data into the indexed partition, to be consumed + directly by userspace. Userspace deposits the results of read + requests into the indexed partition, to be consumed directly + by the kernel module. + +Responses to kernel requests are all packaged in pvfs2_downcall_t +structs. Besides a few other members, pvfs2_downcall_t contains a +union of structs, each of which is associated with a particular +response type. + +The several members outside of the union are: + - int32_t type - type of operation. + - int32_t status - return code for the operation. + - int64_t trailer_size - 0 unless readdir operation. + - char *trailer_buf - initialized to NULL, used during readdir operations. + +The appropriate member inside the union is filled out for any +particular response. + + PVFS2_VFS_OP_FILE_IO + fill a pvfs2_io_response_t + + PVFS2_VFS_OP_LOOKUP + fill a PVFS_object_kref + + PVFS2_VFS_OP_CREATE + fill a PVFS_object_kref + + PVFS2_VFS_OP_SYMLINK + fill a PVFS_object_kref + + PVFS2_VFS_OP_GETATTR + fill in a PVFS_sys_attr_s (tons of stuff the kernel doesn't need) + fill in a string with the link target when the object is a symlink. + + PVFS2_VFS_OP_MKDIR + fill a PVFS_object_kref + + PVFS2_VFS_OP_STATFS + fill a pvfs2_statfs_response_t with useless info <g>. It is hard for + us to know, in a timely fashion, these statistics about our + distributed network filesystem. + + PVFS2_VFS_OP_FS_MOUNT + fill a pvfs2_fs_mount_response_t which is just like a PVFS_object_kref + except its members are in a different order and "__pad1" is replaced + with "id". + + PVFS2_VFS_OP_GETXATTR + fill a pvfs2_getxattr_response_t + + PVFS2_VFS_OP_LISTXATTR + fill a pvfs2_listxattr_response_t + + PVFS2_VFS_OP_PARAM + fill a pvfs2_param_response_t + + PVFS2_VFS_OP_PERF_COUNT + fill a pvfs2_perf_count_response_t + + PVFS2_VFS_OP_FSKEY + file a pvfs2_fs_key_response_t + + PVFS2_VFS_OP_READDIR + jamb everything needed to represent a pvfs2_readdir_response_t into + the readdir buffer descriptor specified in the upcall. + +Userspace uses writev() on /dev/pvfs2-req to pass responses to the requests +made by the kernel side. + +A buffer_list containing: + - a pointer to the prepared response to the request from the + kernel (struct pvfs2_downcall_t). + - and also, in the case of a readdir request, a pointer to a + buffer containing descriptors for the objects in the target + directory. +... is sent to the function (PINT_dev_write_list) which performs +the writev. + +PINT_dev_write_list has a local iovec array: struct iovec io_array[10]; + +The first four elements of io_array are initialized like this for all +responses: + + io_array[0].iov_base = address of local variable "proto_ver" (int32_t) + io_array[0].iov_len = sizeof(int32_t) + + io_array[1].iov_base = address of global variable "pdev_magic" (int32_t) + io_array[1].iov_len = sizeof(int32_t) + + io_array[2].iov_base = address of parameter "tag" (PVFS_id_gen_t) + io_array[2].iov_len = sizeof(int64_t) + + io_array[3].iov_base = address of out_downcall member (pvfs2_downcall_t) + of global variable vfs_request (vfs_request_t) + io_array[3].iov_len = sizeof(pvfs2_downcall_t) + +Readdir responses initialize the fifth element io_array like this: + + io_array[4].iov_base = contents of member trailer_buf (char *) + from out_downcall member of global variable + vfs_request + io_array[4].iov_len = contents of member trailer_size (PVFS_size) + from out_downcall member of global variable + vfs_request + + diff --git a/Documentation/kasan.txt b/Documentation/kasan.txt index aa1e0c91e368..7dd95b35cd7c 100644 --- a/Documentation/kasan.txt +++ b/Documentation/kasan.txt @@ -12,8 +12,7 @@ KASAN uses compile-time instrumentation for checking every memory access, therefore you will need a GCC version 4.9.2 or later. GCC 5.0 or later is required for detection of out-of-bounds accesses to stack or global variables. -Currently KASAN is supported only for x86_64 architecture and requires the -kernel to be built with the SLUB allocator. +Currently KASAN is supported only for x86_64 architecture. 1. Usage ======== @@ -27,7 +26,7 @@ inline are compiler instrumentation types. The former produces smaller binary the latter is 1.1 - 2 times faster. Inline instrumentation requires a GCC version 5.0 or later. -Currently KASAN works only with the SLUB memory allocator. +KASAN works with both SLUB and SLAB memory allocators. For better bug detection and nicer reporting, enable CONFIG_STACKTRACE. To disable instrumentation for specific files or directories, add a line diff --git a/MAINTAINERS b/MAINTAINERS index cdee67fa9156..03e00c7c88eb 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -228,13 +228,13 @@ F: kernel/sys_ni.c ABIT UGURU 1,2 HARDWARE MONITOR DRIVER M: Hans de Goede <[email protected]> S: Maintained F: drivers/hwmon/abituguru.c ABIT UGURU 3 HARDWARE MONITOR DRIVER M: Alistair John Strachan <[email protected]> S: Maintained F: drivers/hwmon/abituguru3.c @@ -392,14 +392,14 @@ F: Documentation/devicetree/bindings/net/ieee802154/adf7242.txt ADM1025 HARDWARE MONITOR DRIVER M: Jean Delvare <[email protected]> S: Maintained F: Documentation/hwmon/adm1025 F: drivers/hwmon/adm1025.c ADM1029 HARDWARE MONITOR DRIVER M: Corentin Labbe <[email protected]> S: Maintained F: drivers/hwmon/adm1029.c @@ -444,7 +444,7 @@ F: drivers/video/backlight/adp8860_bl.c ADS1015 HARDWARE MONITOR DRIVER M: Dirk Eibach <[email protected]> S: Maintained F: Documentation/hwmon/ads1015 F: drivers/hwmon/ads1015.c @@ -457,7 +457,7 @@ F: drivers/macintosh/therm_adt746x.c ADT7475 HARDWARE MONITOR DRIVER M: Jean Delvare <[email protected]> S: Maintained F: Documentation/hwmon/adt7475 F: drivers/hwmon/adt7475.c @@ -634,7 +634,7 @@ F: include/linux/ccp.h AMD FAM15H PROCESSOR POWER MONITORING DRIVER M: Huang Rui <[email protected]> S: Supported F: Documentation/hwmon/fam15h_power F: drivers/hwmon/fam15h_power.c @@ -806,7 +806,7 @@ F: drivers/input/mouse/bcm5974.c APPLE SMC DRIVER M: Henrik Rydberg <[email protected]> S: Odd fixes F: drivers/hwmon/applesmc.c @@ -1865,7 +1865,7 @@ F: include/media/i2c/as3645a.h ASC7621 HARDWARE MONITOR DRIVER M: George Joseph <[email protected]> S: Maintained F: Documentation/hwmon/asc7621 F: drivers/hwmon/asc7621.c @@ -1958,7 +1958,7 @@ F: drivers/net/wireless/ath/carl9170/ ATK0110 HWMON DRIVER M: Luca Tettamanti <[email protected]> S: Maintained F: drivers/hwmon/asus_atk0110.c @@ -3094,7 +3094,7 @@ F: mm/swap_cgroup.c CORETEMP HARDWARE MONITORING DRIVER M: Fenghua Yu <[email protected]> S: Maintained F: Documentation/hwmon/coretemp F: drivers/hwmon/coretemp.c @@ -3683,7 +3683,7 @@ T: git git://git.infradead.org/users/vkoul/slave-dma.git DME1737 HARDWARE MONITOR DRIVER M: Juerg Haefliger <[email protected]> S: Maintained F: Documentation/hwmon/dme1737 F: drivers/hwmon/dme1737.c @@ -4381,7 +4381,7 @@ F: include/video/exynos_mipi* F71805F HARDWARE MONITORING DRIVER M: Jean Delvare <[email protected]> S: Maintained F: Documentation/hwmon/f71805f F: drivers/hwmon/f71805f.c @@ -4460,7 +4460,7 @@ F: fs/* FINTEK F75375S HARDWARE MONITOR AND FAN CONTROLLER DRIVER M: Riku Voipio <[email protected]> S: Maintained F: drivers/hwmon/f75375s.c F: include/linux/f75375s.h @@ -5021,8 +5021,8 @@ F: drivers/media/usb/hackrf/ HARDWARE MONITORING M: Jean Delvare <[email protected]> M: Guenter Roeck <[email protected]> -W: http://www.lm-sensors.org/ +W: http://hwmon.wiki.kernel.org/ T: quilt http://jdelvare.nerim.net/devel/linux/jdelvare-hwmon/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git S: Maintained @@ -5554,7 +5554,7 @@ F: drivers/usb/atm/ueagle-atm.c INA209 HARDWARE MONITOR DRIVER M: Guenter Roeck <[email protected]> S: Maintained F: Documentation/hwmon/ina209 F: Documentation/devicetree/bindings/i2c/ina209.txt @@ -5562,7 +5562,7 @@ F: drivers/hwmon/ina209.c INA2XX HARDWARE MONITOR DRIVER M: Guenter Roeck <[email protected]> S: Maintained F: Documentation/hwmon/ina2xx F: drivers/hwmon/ina2xx.c @@ -6067,7 +6067,7 @@ F: drivers/isdn/hardware/eicon/ IT87 HARDWARE MONITORING DRIVER M: Jean Delvare <[email protected]> S: Maintained F: Documentation/hwmon/it87 F: drivers/hwmon/it87.c @@ -6103,7 +6103,7 @@ F: drivers/media/dvb-frontends/ix2505v* JC42.4 TEMPERATURE SENSOR DRIVER M: Guenter Roeck <[email protected]> S: Maintained F: drivers/hwmon/jc42.c F: Documentation/hwmon/jc42 @@ -6153,18 +6153,32 @@ F: drivers/tty/serial/jsm/ K10TEMP HARDWARE MONITORING DRIVER M: Clemens Ladisch <[email protected]> S: Maintained F: Documentation/hwmon/k10temp F: drivers/hwmon/k10temp.c K8TEMP HARDWARE MONITORING DRIVER M: Rudolf Marek <[email protected]> S: Maintained F: Documentation/hwmon/k8temp F: drivers/hwmon/k8temp.c +KASAN +M: Andrey Ryabinin <[email protected]> +R: Alexander Potapenko <[email protected]> +R: Dmitry Vyukov <[email protected]> +S: Maintained +F: arch/*/include/asm/kasan.h +F: arch/*/mm/kasan_init* +F: Documentation/kasan.txt +F: include/linux/kasan.h +F: lib/test_kasan.c +F: mm/kasan/ +F: scripts/Makefile.kasan + KCONFIG M: "Yann E. MORIN" <[email protected]> @@ -6693,27 +6707,27 @@ F: net/llc/ LM73 HARDWARE MONITOR DRIVER M: Guillaume Ligneul <[email protected]> S: Maintained F: drivers/hwmon/lm73.c LM78 HARDWARE MONITOR DRIVER M: Jean Delvare <[email protected]> S: Maintained F: Documentation/hwmon/lm78 F: drivers/hwmon/lm78.c LM83 HARDWARE MONITOR DRIVER M: Jean Delvare <[email protected]> S: Maintained F: Documentation/hwmon/lm83 F: drivers/hwmon/lm83.c LM90 HARDWARE MONITOR DRIVER M: Jean Delvare <[email protected]> S: Maintained F: Documentation/hwmon/lm90 F: Documentation/devicetree/bindings/hwmon/lm90.txt @@ -6721,7 +6735,7 @@ F: drivers/hwmon/lm90.c LM95234 HARDWARE MONITOR DRIVER M: Guenter Roeck <[email protected]> S: Maintained F: Documentation/hwmon/lm95234 F: drivers/hwmon/lm95234.c @@ -6787,7 +6801,7 @@ F: drivers/scsi/sym53c8xx_2/ LTC4261 HARDWARE MONITOR DRIVER M: Guenter Roeck <[email protected]> S: Maintained F: Documentation/hwmon/ltc4261 F: drivers/hwmon/ltc4261.c @@ -6957,28 +6971,28 @@ F: include/uapi/linux/matroxfb.h MAX16065 HARDWARE MONITOR DRIVER M: Guenter Roeck <[email protected]> S: Maintained F: Documentation/hwmon/max16065 F: drivers/hwmon/max16065.c MAX20751 HARDWARE MONITOR DRIVER M: Guenter Roeck <[email protected]> S: Maintained F: Documentation/hwmon/max20751 F: drivers/hwmon/max20751.c MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER M: "Hans J. Koch" <[email protected]> S: Maintained F: Documentation/hwmon/max6650 F: drivers/hwmon/max6650.c MAX6697 HARDWARE MONITOR DRIVER M: Guenter Roeck <[email protected]> S: Maintained F: Documentation/hwmon/max6697 F: Documentation/devicetree/bindings/i2c/max6697.txt @@ -7547,7 +7561,7 @@ F: drivers/scsi/NCR_D700.* NCT6775 HARDWARE MONITOR DRIVER M: Guenter Roeck <[email protected]> S: Maintained F: Documentation/hwmon/nct6775 F: drivers/hwmon/nct6775.c @@ -8237,6 +8251,14 @@ S: Supported F: fs/overlayfs/ F: Documentation/filesystems/overlayfs.txt +ORANGEFS FILESYSTEM +M: Mike Marshall <[email protected]> +T: git git://git.kernel.org/pub/scm/linux/kernel/git/hubcap/linux.git +S: Supported +F: fs/orangefs/ +F: Documentation/filesystems/orangefs.txt + P54 WIRELESS DRIVER M: Christian Lamparter <[email protected]> @@ -8337,7 +8359,7 @@ F: drivers/video/logo/logo_parisc* PC87360 HARDWARE MONITORING DRIVER M: Jim Cromie <[email protected]> S: Maintained F: Documentation/hwmon/pc87360 F: drivers/hwmon/pc87360.c @@ -8349,7 +8371,7 @@ F: drivers/char/pc8736x_gpio.c PC87427 HARDWARE MONITORING DRIVER M: Jean Delvare <[email protected]> S: Maintained F: Documentation/hwmon/pc87427 F: drivers/hwmon/pc87427.c @@ -8729,8 +8751,8 @@ F: drivers/rtc/rtc-puv3.c PMBUS HARDWARE MONITORING DRIVERS M: Guenter Roeck <[email protected]> -W: http://www.lm-sensors.org/ +W: http://hwmon.wiki.kernel.org/ W: http://www.roeck-us.net/linux/drivers/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git S: Maintained @@ -8935,7 +8957,7 @@ F: drivers/media/usb/pwc/* PWM FAN DRIVER M: Kamil Debski <[email protected]> S: Supported F: Documentation/devicetree/bindings/hwmon/pwm-fan.txt F: Documentation/hwmon/pwm-fan @@ -10256,28 +10278,28 @@ F: Documentation/devicetree/bindings/media/i2c/nokia,smia.txt SMM665 HARDWARE MONITOR DRIVER M: Guenter Roeck <[email protected]> S: Maintained F: Documentation/hwmon/smm665 F: drivers/hwmon/smm665.c SMSC EMC2103 HARDWARE MONITOR DRIVER M: Steve Glendinning <[email protected]> S: Maintained F: Documentation/hwmon/emc2103 F: drivers/hwmon/emc2103.c SMSC SCH5627 HARDWARE MONITOR DRIVER M: Hans de Goede <[email protected]> S: Supported F: Documentation/hwmon/sch5627 F: drivers/hwmon/sch5627.c SMSC47B397 HARDWARE MONITOR DRIVER M: Jean Delvare <[email protected]> S: Maintained F: Documentation/hwmon/smsc47b397 F: drivers/hwmon/smsc47b397.c @@ -11205,7 +11227,7 @@ F: include/linux/mmc/sh_mobile_sdhi.h TMP401 HARDWARE MONITOR DRIVER M: Guenter Roeck <[email protected]> S: Maintained F: Documentation/hwmon/tmp401 F: drivers/hwmon/tmp401.c @@ -11958,14 +11980,14 @@ F: Documentation/networking/vrf.txt VT1211 HARDWARE MONITOR DRIVER M: Juerg Haefliger <[email protected]> S: Maintained F: Documentation/hwmon/vt1211 F: drivers/hwmon/vt1211.c VT8231 HARDWARE MONITOR DRIVER M: Roger Lucas <[email protected]> S: Maintained F: drivers/hwmon/vt8231.c @@ -11984,21 +12006,21 @@ F: drivers/w1/ W83791D HARDWARE MONITORING DRIVER M: Marc Hulsman <[email protected]> S: Maintained F: Documentation/hwmon/w83791d F: drivers/hwmon/w83791d.c W83793 HARDWARE MONITORING DRIVER M: Rudolf Marek <[email protected]> S: Maintained F: Documentation/hwmon/w83793 F: drivers/hwmon/w83793.c W83795 HARDWARE MONITORING DRIVER M: Jean Delvare <[email protected]> S: Maintained F: drivers/hwmon/w83795.c @@ -1,7 +1,7 @@ VERSION = 4 -PATCHLEVEL = 5 +PATCHLEVEL = 6 SUBLEVEL = 0 -EXTRAVERSION = +EXTRAVERSION = -rc1 NAME = Blurry Fish Butt # *DOCUMENTATION* diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h index 9a7cf521b95c..36da89e2c853 100644 --- a/arch/arc/include/asm/page.h +++ b/arch/arc/include/asm/page.h @@ -12,9 +12,6 @@ #ifndef __ASSEMBLY__ -#define get_user_page(vaddr) __get_free_page(GFP_KERNEL) -#define free_user_page(page, addr) free_page(addr) - #define clear_page(paddr) memset((paddr), 0, PAGE_SIZE) #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) diff --git a/arch/arm/include/asm/exception.h b/arch/arm/include/asm/exception.h index 5abaf5bbd985..bf1991263d2d 100644 --- a/arch/arm/include/asm/exception.h +++ b/arch/arm/include/asm/exception.h @@ -7,7 +7,7 @@ #ifndef __ASM_ARM_EXCEPTION_H #define __ASM_ARM_EXCEPTION_H -#include <linux/ftrace.h> +#include <linux/interrupt.h> #define __exception __attribute__((section(".exception.text"))) #ifdef CONFIG_FUNCTION_GRAPH_TRACER diff --git a/arch/arm/include/asm/page-nommu.h b/arch/arm/include/asm/page-nommu.h index d1b162a18dcb..503f488053de 100644 --- a/arch/arm/include/asm/page-nommu.h +++ b/arch/arm/include/asm/page-nommu.h @@ -17,9 +17,6 @@ #define KTHREAD_SIZE PAGE_SIZE #endif -#define get_user_page(vaddr) __get_free_page(GFP_KERNEL) -#define free_user_page(page, addr) free_page(addr) - #define clear_page(page) memset((page), 0, PAGE_SIZE) #define copy_page(to,from) memcpy((to), (from), PAGE_SIZE) diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 1fab979daeaf..e2c6da096cef 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -108,6 +108,7 @@ SECTIONS *(.exception.text) __exception_text_end = .; IRQENTRY_TEXT + SOFTIRQENTRY_TEXT TEXT_TEXT SCHED_TEXT LOCK_TEXT diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h index 6cb7e1a6bc02..0c2eec490abf 100644 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h @@ -18,7 +18,7 @@ #ifndef __ASM_EXCEPTION_H #define __ASM_EXCEPTION_H -#include <linux/ftrace.h> +#include <linux/interrupt.h> #define __exception __attribute__((section(".exception.text"))) #ifdef CONFIG_FUNCTION_GRAPH_TRACER diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 37f624df68fa..5a1939a74ff3 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -103,6 +103,7 @@ SECTIONS *(.exception.text) __exception_text_end = .; IRQENTRY_TEXT + SOFTIRQENTRY_TEXT TEXT_TEXT SCHED_TEXT LOCK_TEXT diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S index c9eec84aa258..d920b959ff3a 100644 --- a/arch/blackfin/kernel/vmlinux.lds.S +++ b/arch/blackfin/kernel/vmlinux.lds.S @@ -35,6 +35,7 @@ SECTIONS #endif LOCK_TEXT IRQENTRY_TEXT + SOFTIRQENTRY_TEXT KPROBES_TEXT #ifdef CONFIG_ROMKERNEL __sinittext = .; diff --git a/arch/c6x/kernel/vmlinux.lds.S b/arch/c6x/kernel/vmlinux.lds.S index 5a6e141d1641..50bc10f97bcb 100644 --- a/arch/c6x/kernel/vmlinux.lds.S +++ b/arch/c6x/kernel/vmlinux.lds.S @@ -72,6 +72,7 @@ SECTIONS SCHED_TEXT LOCK_TEXT IRQENTRY_TEXT + SOFTIRQENTRY_TEXT KPROBES_TEXT *(.fixup) *(.gnu.warning) diff --git a/arch/frv/include/asm/page.h b/arch/frv/include/asm/page.h index 688d8076a43a..ec5eebce4fb3 100644 --- a/arch/frv/include/asm/page.h +++ b/arch/frv/include/asm/page.h @@ -8,9 +8,6 @@ #ifndef __ASSEMBLY__ -#define get_user_page(vaddr) __get_free_page(GFP_KERNEL) -#define free_user_page(page, addr) free_page(addr) - #define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE) #define copy_page(to,from) memcpy((to), (from), PAGE_SIZE) diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h index 6a8685051b67..8c85209753ab 100644 --- a/arch/ia64/include/asm/unistd.h +++ b/arch/ia64/include/asm/unistd.h @@ -11,7 +11,7 @@ -#define NR_syscalls 324 /* length of syscall table */ +#define NR_syscalls 326 /* length of syscall table */ /* * The following defines stop scripts/checksyscalls.sh from complaining about diff --git a/arch/ia64/include/uapi/asm/unistd.h b/arch/ia64/include/uapi/asm/unistd.h index 41369a103838..ea5363daa7bd 100644 --- a/arch/ia64/include/uapi/asm/unistd.h +++ b/arch/ia64/include/uapi/asm/unistd.h @@ -337,5 +337,7 @@ #define __NR_kcmp 1345 #define __NR_mlock2 1346 #define __NR_copy_file_range 1347 +#define __NR_preadv2 1348 +#define __NR_pwritev2 1349 #endif /* _UAPI_ASM_IA64_UNISTD_H */ diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index 477c55e244ec..cfaa7b25084c 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -1773,5 +1773,7 @@ sys_call_table: data8 sys_kcmp // 1345 data8 sys_mlock2 data8 sys_copy_file_range + data8 sys_preadv2 + data8 sys_pwritev2 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls diff --git a/arch/m68k/include/asm/page_mm.h b/arch/m68k/include/asm/page_mm.h index 5029f73e6294..e7a1946455a8 100644 --- a/arch/m68k/include/asm/page_mm.h +++ b/arch/m68k/include/asm/page_mm.h @@ -6,9 +6,6 @@ #include <linux/compiler.h> #include <asm/module.h> -#define get_user_page(vaddr) __get_free_page(GFP_KERNEL) -#define free_user_page(page, addr) free_page(addr) - /* * We don't need to check for alignment etc. */ diff --git a/arch/m68k/include/asm/page_no.h b/arch/m68k/include/asm/page_no.h index ef209169579a..fa7f32d9836b 100644 --- a/arch/m68k/include/asm/page_no.h +++ b/arch/m68k/include/asm/page_no.h @@ -6,9 +6,6 @@ extern unsigned long memory_start; extern unsigned long memory_end; -#define get_user_page(vaddr) __get_free_page(GFP_KERNEL) -#define free_user_page(page, addr) free_page(addr) - #define clear_page(page) memset((page), 0, PAGE_SIZE) #define copy_page(to,from) memcpy((to), (from), PAGE_SIZE) diff --git a/arch/metag/kernel/vmlinux.lds.S b/arch/metag/kernel/vmlinux.lds.S index e12055e88bfe..150ace92c7ad 100644 --- a/arch/metag/kernel/vmlinux.lds.S +++ b/arch/metag/kernel/vmlinux.lds.S @@ -24,6 +24,7 @@ SECTIONS LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT + SOFTIRQENTRY_TEXT *(.text.*) *(.gnu.warning) } diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S index be9488d69734..0a47f0410554 100644 --- a/arch/microblaze/kernel/vmlinux.lds.S +++ b/arch/microblaze/kernel/vmlinux.lds.S @@ -36,6 +36,7 @@ SECTIONS { LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT + SOFTIRQENTRY_TEXT . = ALIGN (4) ; _etext = . ; } diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 0a93e83cd014..54d653ee17e1 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S @@ -58,6 +58,7 @@ SECTIONS LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT + SOFTIRQENTRY_TEXT *(.text.*) *(.fixup) *(.gnu.warning) diff --git a/arch/nios2/kernel/vmlinux.lds.S b/arch/nios2/kernel/vmlinux.lds.S index 326fab40a9de..e23e89539967 100644 --- a/arch/nios2/kernel/vmlinux.lds.S +++ b/arch/nios2/kernel/vmlinux.lds.S @@ -39,6 +39,7 @@ SECTIONS SCHED_TEXT LOCK_TEXT IRQENTRY_TEXT + SOFTIRQENTRY_TEXT KPROBES_TEXT } =0 _etext = .; diff --git a/arch/openrisc/include/asm/page.h b/arch/openrisc/include/asm/page.h index 108906f991d6..e613d3673034 100644 --- a/arch/openrisc/include/asm/page.h +++ b/arch/openrisc/include/asm/page.h @@ -40,9 +40,6 @@ #ifndef __ASSEMBLY__ -#define get_user_page(vaddr) __get_free_page(GFP_KERNEL) -#define free_user_page(page, addr) free_page(addr) - #define clear_page(page) memset((page), 0, PAGE_SIZE) #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) diff --git a/arch/openrisc/kernel/vmlinux.lds.S b/arch/openrisc/kernel/vmlinux.lds.S index 2d69a853b742..d936de4c07ca 100644 --- a/arch/openrisc/kernel/vmlinux.lds.S +++ b/arch/openrisc/kernel/vmlinux.lds.S @@ -50,6 +50,7 @@ SECTIONS LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT + SOFTIRQENTRY_TEXT *(.fixup) *(.text.__*) _etext = .; diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index 308f29081d46..f3ead0b6ce46 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S @@ -72,6 +72,7 @@ SECTIONS LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT + SOFTIRQENTRY_TEXT *(.text.do_softirq) *(.text.sys_exit) *(.text.do_sigaltstack) diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index d41fd0af8980..2dd91f79de05 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -55,6 +55,7 @@ SECTIONS LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT + SOFTIRQENTRY_TEXT #ifdef CONFIG_PPC32 *(.got1) diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 445657fe658c..0f41a8286378 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -28,6 +28,7 @@ SECTIONS LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT + SOFTIRQENTRY_TEXT *(.fixup) *(.gnu.warning) } :text = 0x0700 diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S index db88cbf9eafd..235a4101999f 100644 --- a/arch/sh/kernel/vmlinux.lds.S +++ b/arch/sh/kernel/vmlinux.lds.S @@ -39,6 +39,7 @@ SECTIONS LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT + SOFTIRQENTRY_TEXT *(.fixup) *(.gnu.warning) _etext = .; /* End of text section */ diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index f1a2f688b28a..aadd321aa05d 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S @@ -48,6 +48,7 @@ SECTIONS LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT + SOFTIRQENTRY_TEXT *(.gnu.warning) } = 0 _etext = .; diff --git a/arch/tile/kernel/vmlinux.lds.S b/arch/tile/kernel/vmlinux.lds.S index 0e059a0101ea..378f5d8d1ec8 100644 --- a/arch/tile/kernel/vmlinux.lds.S +++ b/arch/tile/kernel/vmlinux.lds.S @@ -45,6 +45,7 @@ SECTIONS LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT + SOFTIRQENTRY_TEXT __fix_text_end = .; /* tile-cpack won't rearrange before this */ ALIGN_FUNCTION(); *(.hottext*) diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index adaae2c781c1..616ebd22ef9a 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -19,6 +19,7 @@ endif KASAN_SANITIZE_head$(BITS).o := n KASAN_SANITIZE_dumpstack.o := n KASAN_SANITIZE_dumpstack_$(BITS).o := n +KASAN_SANITIZE_stacktrace.o := n OBJECT_FILES_NON_STANDARD_head_$(BITS).o := y OBJECT_FILES_NON_STANDARD_relocate_kernel_$(BITS).o := y diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index d239639e0c1d..4c941f88d405 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -101,6 +101,7 @@ SECTIONS KPROBES_TEXT ENTRY_TEXT IRQENTRY_TEXT + SOFTIRQENTRY_TEXT *(.fixup) *(.gnu.warning) /* End of text section */ diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c index d0aad06b3872..f245bf35bedb 100644 --- a/drivers/acpi/acpi_apd.c +++ b/drivers/acpi/acpi_apd.c @@ -145,6 +145,7 @@ static const struct acpi_device_id acpi_apd_device_ids[] = { { "AMD0010", APD_ADDR(cz_i2c_desc) }, { "AMDI0010", APD_ADDR(cz_i2c_desc) }, { "AMD0020", APD_ADDR(cz_uart_desc) }, + { "AMDI0020", APD_ADDR(cz_uart_desc) }, { "AMD0030", }, #endif #ifdef CONFIG_ARM64 diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index 2aee41655ce9..f2fd3fee588a 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -816,6 +816,7 @@ struct fwnode_handle *acpi_get_next_subnode(struct device *dev, next = adev->node.next; if (next == head) { child = NULL; + adev = ACPI_COMPANION(dev); goto nondev; } adev = list_entry(next, struct acpi_device, node); diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index d02fd53042a5..56241eb341f4 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -27,8 +27,20 @@ #ifdef CONFIG_X86 #define valid_IRQ(i) (((i) != 0) && ((i) != 2)) +static inline bool acpi_iospace_resource_valid(struct resource *res) +{ + /* On X86 IO space is limited to the [0 - 64K] IO port range */ + return res->end < 0x10003; +} #else #define valid_IRQ(i) (true) +/* + * ACPI IO descriptors on arches other than X86 contain MMIO CPU physical + * addresses mapping IO space in CPU physical address space, IO space + * resources can be placed anywhere in the 64-bit physical address space. + */ +static inline bool +acpi_iospace_resource_valid(struct resource *res) { return true; } #endif static bool acpi_dev_resource_len_valid(u64 start, u64 end, u64 len, bool io) @@ -127,7 +139,7 @@ static void acpi_dev_ioresource_flags(struct resource *res, u64 len, if (!acpi_dev_resource_len_valid(res->start, res->end, len, true)) res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET; - if (res->end >= 0x10003) + if (!acpi_iospace_resource_valid(res)) res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET; if (io_decode == ACPI_DECODE_16) diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index fbfcce3b5227..2a8b59644297 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -748,6 +748,7 @@ static int acpi_hibernation_enter(void) static void acpi_hibernation_leave(void) { + pm_set_resume_via_firmware(); /* * If ACPI is not enabled by the BIOS and the boot kernel, we need to * enable it here. diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c index f12a72428aac..050673f0c0b3 100644 --- a/drivers/acpi/utils.c +++ b/drivers/acpi/utils.c @@ -692,7 +692,7 @@ bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, int rev, u64 funcs) mask = obj->integer.value; else if (obj->type == ACPI_TYPE_BUFFER) for (i = 0; i < obj->buffer.length && i < 8; i++) - mask |= (((u8)obj->buffer.pointer[i]) << (i * 8)); + mask |= (((u64)obj->buffer.pointer[i]) << (i * 8)); ACPI_FREE(obj); /* diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index 272a52ebafc0..0e64a1b5e62a 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c @@ -137,6 +137,62 @@ int pm_clk_add_clk(struct device *dev, struct clk *clk) return __pm_clk_add(dev, NULL, clk); } + +/** + * of_pm_clk_add_clks - Start using device clock(s) for power management. + * @dev: Device whose clock(s) is going to be used for power management. + * + * Add a series of clocks described in the 'clocks' device-tree node for + * a device to the list of clocks used for the power management of @dev. + * On success, returns the number of clocks added. Returns a negative + * error code if there are no clocks in the device node for the device + * or if adding a clock fails. + */ +int of_pm_clk_add_clks(struct device *dev) +{ + struct clk **clks; + unsigned int i, count; + int ret; + + if (!dev || !dev->of_node) + return -EINVAL; + + count = of_count_phandle_with_args(dev->of_node, "clocks", + "#clock-cells"); + if (count == 0) + return -ENODEV; + + clks = kcalloc(count, sizeof(*clks), GFP_KERNEL); + if (!clks) + return -ENOMEM; + + for (i = 0; i < count; i++) { + clks[i] = of_clk_get(dev->of_node, i); + if (IS_ERR(clks[i])) { + ret = PTR_ERR(clks[i]); + goto error; + } + + ret = pm_clk_add_clk(dev, clks[i]); + if (ret) { + clk_put(clks[i]); + goto error; + } + } + + kfree(clks); + + return i; + +error: + while (i--) + pm_clk_remove_clk(dev, clks[i]); + + kfree(clks); + + return ret; +} + /** * __pm_clk_remove - Destroy PM clock entry. * @ce: PM clock entry to destroy. @@ -198,6 +254,39 @@ void pm_clk_remove(struct device *dev, const char *con_id) } /** + * pm_clk_remove_clk - Stop using a device clock for power management. + * @dev: Device whose clock should not be used for PM any more. + * @clk: Clock pointer + * + * Remove the clock pointed to by @clk from the list of clocks used for + * the power management of @dev. + */ +void pm_clk_remove_clk(struct device *dev, struct clk *clk) +{ + struct pm_subsys_data *psd = dev_to_psd(dev); + struct pm_clock_entry *ce; + + if (!psd || !clk) + return; + + spin_lock_irq(&psd->lock); + + list_for_each_entry(ce, &psd->clock_list, node) { + if (clk == ce->clk) + goto remove; + } + + spin_unlock_irq(&psd->lock); + return; + + remove: + list_del(&ce->node); + spin_unlock_irq(&psd->lock); + + __pm_clk_remove(ce); +} + +/** * pm_clk_init - Initialize a device's list of power management clocks. * @dev: Device to initialize the list of PM clocks for. * diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 4a876785b68c..9c6234428607 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1847,14 +1847,12 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req, if (osd_req->r_result < 0) obj_request->result = osd_req->r_result; - rbd_assert(osd_req->r_num_ops <= CEPH_OSD_MAX_OP); - /* * We support a 64-bit length, but ultimately it has to be * passed to the block layer, which just supports a 32-bit * length field. */ - obj_request->xferred = osd_req->r_reply_op_len[0]; + obj_request->xferred = osd_req->r_ops[0].outdata_len; rbd_assert(obj_request->xferred < (u64)UINT_MAX); opcode = osd_req->r_ops[0].op; @@ -5643,18 +5641,12 @@ static void rbd_sysfs_cleanup(void) static int rbd_slab_init(void) { rbd_assert(!rbd_img_request_cache); - rbd_img_request_cache = kmem_cache_create("rbd_img_request", - sizeof (struct rbd_img_request), - __alignof__(struct rbd_img_request), - 0, NULL); + rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0); if (!rbd_img_request_cache) return -ENOMEM; rbd_assert(!rbd_obj_request_cache); - rbd_obj_request_cache = kmem_cache_create("rbd_obj_request", - sizeof (struct rbd_obj_request), - __alignof__(struct rbd_obj_request), - 0, NULL); + rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0); if (!rbd_obj_request_cache) goto out_err; diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c index d23368874710..f8a483c67b07 100644 --- a/drivers/char/ppdev.c +++ b/drivers/char/ppdev.c @@ -286,7 +286,7 @@ static int register_device(int minor, struct pp_struct *pp) struct parport *port; struct pardevice *pdev = NULL; char *name; - struct pardev_cb ppdev_cb; + int fl; name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor); if (name == NULL) @@ -299,11 +299,9 @@ static int register_device(int minor, struct pp_struct *pp) return -ENXIO; } - memset(&ppdev_cb, 0, sizeof(ppdev_cb)); - ppdev_cb.irq_func = pp_irq; - ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0; - ppdev_cb.private = pp; - pdev = parport_register_dev_model(port, name, &ppdev_cb, minor); + fl = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0; + pdev = parport_register_device(port, name, NULL, + NULL, pp_irq, fl, pp); parport_put_port(port); if (!pdev) { @@ -801,23 +799,10 @@ static void pp_detach(struct parport *port) device_destroy(ppdev_class, MKDEV(PP_MAJOR, port->number)); } -static int pp_probe(struct pardevice *par_dev) -{ - struct device_driver *drv = par_dev->dev.driver; - int len = strlen(drv->name); - - if (strncmp(par_dev->name, drv->name, len)) - return -ENODEV; - - return 0; -} - static struct parport_driver pp_driver = { .name = CHRDEV, - .probe = pp_probe, - .match_port = pp_attach, + .attach = pp_attach, .detach = pp_detach, - .devmodel = true, }; static int __init ppdev_init(void) diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 59a7b380fbe2..fb5712141040 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -245,7 +245,7 @@ static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data) } } -u32 cpu_freq_read_intel(struct acpi_pct_register *not_used) +static u32 cpu_freq_read_intel(struct acpi_pct_register *not_used) { u32 val, dummy; @@ -253,7 +253,7 @@ u32 cpu_freq_read_intel(struct acpi_pct_register *not_used) return val; } -void cpu_freq_write_intel(struct acpi_pct_register *not_used, u32 val) +static void cpu_freq_write_intel(struct acpi_pct_register *not_used, u32 val) { u32 lo, hi; @@ -262,7 +262,7 @@ void cpu_freq_write_intel(struct acpi_pct_register *not_used, u32 val) wrmsr(MSR_IA32_PERF_CTL, lo, hi); } -u32 cpu_freq_read_amd(struct acpi_pct_register *not_used) +static u32 cpu_freq_read_amd(struct acpi_pct_register *not_used) { u32 val, dummy; @@ -270,12 +270,12 @@ u32 cpu_freq_read_amd(struct acpi_pct_register *not_used) return val; } -void cpu_freq_write_amd(struct acpi_pct_register *not_used, u32 val) +static void cpu_freq_write_amd(struct acpi_pct_register *not_used, u32 val) { wrmsr(MSR_AMD_PERF_CTL, val, 0); } -u32 cpu_freq_read_io(struct acpi_pct_register *reg) +static u32 cpu_freq_read_io(struct acpi_pct_register *reg) { u32 val; @@ -283,7 +283,7 @@ u32 cpu_freq_read_io(struct acpi_pct_register *reg) return val; } -void cpu_freq_write_io(struct acpi_pct_register *reg, u32 val) +static void cpu_freq_write_io(struct acpi_pct_register *reg, u32 val) { acpi_os_write_port(reg->address, val, reg->bit_width); } @@ -514,8 +514,10 @@ static int boost_notify(struct notifier_block *nb, unsigned long action, */ switch (action) { - case CPU_UP_PREPARE: - case CPU_UP_PREPARE_FROZEN: + case CPU_DOWN_FAILED: + case CPU_DOWN_FAILED_FROZEN: + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: boost_set_msrs(acpi_cpufreq_driver.boost_enabled, cpumask); break; diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 4c7825856eab..b87596b591b3 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -76,6 +76,7 @@ static inline bool has_target(void) /* internal prototypes */ static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); static unsigned int __cpufreq_get(struct cpufreq_policy *policy); +static int cpufreq_start_governor(struct cpufreq_policy *policy); /** * Two notifier lists: the "policy" list is involved in the @@ -964,10 +965,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp cpumask_set_cpu(cpu, policy->cpus); if (has_target()) { - ret = cpufreq_governor(policy, CPUFREQ_GOV_START); - if (!ret) - ret = cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); - + ret = cpufreq_start_governor(policy); if (ret) pr_err("%s: Failed to start governor\n", __func__); } @@ -1308,10 +1306,7 @@ static void cpufreq_offline(unsigned int cpu) /* Start governor again for active policy */ if (!policy_is_inactive(policy)) { if (has_target()) { - ret = cpufreq_governor(policy, CPUFREQ_GOV_START); - if (!ret) - ret = cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); - + ret = cpufreq_start_governor(policy); if (ret) pr_err("%s: Failed to start governor\n", __func__); } @@ -1401,9 +1396,17 @@ unsigned int cpufreq_quick_get(unsigned int cpu) { struct cpufreq_policy *policy; unsigned int ret_freq = 0; + unsigned long flags; - if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) - return cpufreq_driver->get(cpu); + read_lock_irqsave(&cpufreq_driver_lock, flags); + + if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) { + ret_freq = cpufreq_driver->get(cpu); + read_unlock_irqrestore(&cpufreq_driver_lock, flags); + return ret_freq; + } + + read_unlock_irqrestore(&cpufreq_driver_lock, flags); policy = cpufreq_cpu_get(cpu); if (policy) { @@ -1484,6 +1487,24 @@ unsigned int cpufreq_get(unsigned int cpu) } EXPORT_SYMBOL(cpufreq_get); +static unsigned int cpufreq_update_current_freq(struct cpufreq_policy *policy) +{ + unsigned int new_freq; + + new_freq = cpufreq_driver->get(policy->cpu); + if (!new_freq) + return 0; + + if (!policy->cur) { + pr_debug("cpufreq: Driver did not initialize current freq\n"); + policy->cur = new_freq; + } else if (policy->cur != new_freq && has_target()) { + cpufreq_out_of_sync(policy, new_freq); + } + + return new_freq; +} + static struct subsys_interface cpufreq_interface = { .name = "cpufreq", .subsys = &cpu_subsys, @@ -1583,9 +1604,7 @@ void cpufreq_resume(void) policy); } else { down_write(&policy->rwsem); - ret = cpufreq_governor(policy, CPUFREQ_GOV_START); - if (!ret) - cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); + ret = cpufreq_start_governor(policy); up_write(&policy->rwsem); if (ret) @@ -1593,17 +1612,6 @@ void cpufreq_resume(void) __func__, policy); } } - - /* - * schedule call cpufreq_update_policy() for first-online CPU, as that - * wouldn't be hotplugged-out on suspend. It will verify that the - * current freq is in sync with what we believe it to be. - */ - policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask)); - if (WARN_ON(!policy)) - return; - - schedule_work(&policy->update); } /** @@ -1927,6 +1935,17 @@ static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event) return ret; } +static int cpufreq_start_governor(struct cpufreq_policy *policy) +{ + int ret; + + if (cpufreq_driver->get && !cpufreq_driver->setpolicy) + cpufreq_update_current_freq(policy); + + ret = cpufreq_governor(policy, CPUFREQ_GOV_START); + return ret ? ret : cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); +} + int cpufreq_register_governor(struct cpufreq_governor *governor) { int err; @@ -2063,8 +2082,10 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, return cpufreq_driver->setpolicy(new_policy); } - if (new_policy->governor == policy->governor) - goto out; + if (new_policy->governor == policy->governor) { + pr_debug("cpufreq: governor limits update\n"); + return cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); + } pr_debug("governor switch\n"); @@ -2092,10 +2113,11 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, policy->governor = new_policy->governor; ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT); if (!ret) { - ret = cpufreq_governor(policy, CPUFREQ_GOV_START); - if (!ret) - goto out; - + ret = cpufreq_start_governor(policy); + if (!ret) { + pr_debug("cpufreq: governor change\n"); + return 0; + } cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); } @@ -2106,14 +2128,10 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, if (cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) policy->governor = NULL; else - cpufreq_governor(policy, CPUFREQ_GOV_START); + cpufreq_start_governor(policy); } return ret; - - out: - pr_debug("governor: change or update limits\n"); - return cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); } /** @@ -2144,19 +2162,11 @@ int cpufreq_update_policy(unsigned int cpu) * -> ask driver for current freq and notify governors about a change */ if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { - new_policy.cur = cpufreq_driver->get(cpu); + new_policy.cur = cpufreq_update_current_freq(policy); if (WARN_ON(!new_policy.cur)) { ret = -EIO; goto unlock; } - - if (!policy->cur) { - pr_debug("Driver did not initialize current freq\n"); - policy->cur = new_policy.cur; - } else { - if (policy->cur != new_policy.cur && has_target()) - cpufreq_out_of_sync(policy, new_policy.cur); - } } ret = cpufreq_set_policy(policy, &new_policy); diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 1c25ef405616..10a5cfeae8c5 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -329,7 +329,7 @@ static void dbs_irq_work(struct irq_work *irq_work) struct policy_dbs_info *policy_dbs; policy_dbs = container_of(irq_work, struct policy_dbs_info, irq_work); - schedule_work(&policy_dbs->work); + schedule_work_on(smp_processor_id(), &policy_dbs->work); } static void dbs_update_util_handler(struct update_util_data *data, u64 time, diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index cb5607495816..4b644526fd59 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -134,7 +134,7 @@ struct pstate_funcs { int (*get_min)(void); int (*get_turbo)(void); int (*get_scaling)(void); - void (*set)(struct cpudata*, int pstate); + u64 (*get_val)(struct cpudata*, int pstate); void (*get_vid)(struct cpudata *); int32_t (*get_target_pstate)(struct cpudata *); }; @@ -565,7 +565,7 @@ static int atom_get_turbo_pstate(void) return value & 0x7F; } -static void atom_set_pstate(struct cpudata *cpudata, int pstate) +static u64 atom_get_val(struct cpudata *cpudata, int pstate) { u64 val; int32_t vid_fp; @@ -585,9 +585,7 @@ static void atom_set_pstate(struct cpudata *cpudata, int pstate) if (pstate > cpudata->pstate.max_pstate) vid = cpudata->vid.turbo; - val |= vid; - - wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); + return val | vid; } static int silvermont_get_scaling(void) @@ -711,7 +709,7 @@ static inline int core_get_scaling(void) return 100000; } -static void core_set_pstate(struct cpudata *cpudata, int pstate) +static u64 core_get_val(struct cpudata *cpudata, int pstate) { u64 val; @@ -719,7 +717,7 @@ static void core_set_pstate(struct cpudata *cpudata, int pstate) if (limits->no_turbo && !limits->turbo_disabled) val |= (u64)1 << 32; - wrmsrl(MSR_IA32_PERF_CTL, val); + return val; } static int knl_get_turbo_pstate(void) @@ -750,7 +748,7 @@ static struct cpu_defaults core_params = { .get_min = core_get_min_pstate, .get_turbo = core_get_turbo_pstate, .get_scaling = core_get_scaling, - .set = core_set_pstate, + .get_val = core_get_val, .get_target_pstate = get_target_pstate_use_performance, }, }; @@ -769,7 +767,7 @@ static struct cpu_defaults silvermont_params = { .get_max_physical = atom_get_max_pstate, .get_min = atom_get_min_pstate, .get_turbo = atom_get_turbo_pstate, - .set = atom_set_pstate, + .get_val = atom_get_val, .get_scaling = silvermont_get_scaling, .get_vid = atom_get_vid, .get_target_pstate = get_target_pstate_use_cpu_load, @@ -790,7 +788,7 @@ static struct cpu_defaults airmont_params = { .get_max_physical = atom_get_max_pstate, .get_min = atom_get_min_pstate, .get_turbo = atom_get_turbo_pstate, - .set = atom_set_pstate, + .get_val = atom_get_val, .get_scaling = airmont_get_scaling, .get_vid = atom_get_vid, .get_target_pstate = get_target_pstate_use_cpu_load, @@ -812,7 +810,7 @@ static struct cpu_defaults knl_params = { .get_min = core_get_min_pstate, .get_turbo = knl_get_turbo_pstate, .get_scaling = core_get_scaling, - .set = core_set_pstate, + .get_val = core_get_val, .get_target_pstate = get_target_pstate_use_performance, }, }; @@ -839,25 +837,24 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); } -static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force) +static inline void intel_pstate_record_pstate(struct cpudata *cpu, int pstate) { - int max_perf, min_perf; - - if (force) { - update_turbo_state(); - - intel_pstate_get_min_max(cpu, &min_perf, &max_perf); - - pstate = clamp_t(int, pstate, min_perf, max_perf); - - if (pstate == cpu->pstate.current_pstate) - return; - } trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); - cpu->pstate.current_pstate = pstate; +} - pstate_funcs.set(cpu, pstate); +static void intel_pstate_set_min_pstate(struct cpudata *cpu) +{ + int pstate = cpu->pstate.min_pstate; + + intel_pstate_record_pstate(cpu, pstate); + /* + * Generally, there is no guarantee that this code will always run on + * the CPU being updated, so force the register update to run on the + * right CPU. + */ + wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, + pstate_funcs.get_val(cpu, pstate)); } static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) @@ -870,7 +867,8 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) if (pstate_funcs.get_vid) pstate_funcs.get_vid(cpu); - intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false); + + intel_pstate_set_min_pstate(cpu); } static inline void intel_pstate_calc_busy(struct cpudata *cpu) @@ -997,6 +995,21 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu) return cpu->pstate.current_pstate - pid_calc(&cpu->pid, core_busy); } +static inline void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) +{ + int max_perf, min_perf; + + update_turbo_state(); + + intel_pstate_get_min_max(cpu, &min_perf, &max_perf); + pstate = clamp_t(int, pstate, min_perf, max_perf); + if (pstate == cpu->pstate.current_pstate) + return; + + intel_pstate_record_pstate(cpu, pstate); + wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate)); +} + static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) { int from, target_pstate; @@ -1006,7 +1019,7 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) target_pstate = pstate_funcs.get_target_pstate(cpu); - intel_pstate_set_pstate(cpu, target_pstate, true); + intel_pstate_update_pstate(cpu, target_pstate); sample = &cpu->sample; trace_pstate_sample(fp_toint(sample->core_pct_busy), @@ -1180,7 +1193,7 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) if (hwp_active) return; - intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false); + intel_pstate_set_min_pstate(cpu); } static int intel_pstate_cpu_init(struct cpufreq_policy *policy) @@ -1255,7 +1268,7 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs) pstate_funcs.get_min = funcs->get_min; pstate_funcs.get_turbo = funcs->get_turbo; pstate_funcs.get_scaling = funcs->get_scaling; - pstate_funcs.set = funcs->set; + pstate_funcs.get_val = funcs->get_val; pstate_funcs.get_vid = funcs->get_vid; pstate_funcs.get_target_pstate = funcs->get_target_pstate; diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c index 50bf12033bbc..39ac78c94be0 100644 --- a/drivers/cpufreq/powernv-cpufreq.c +++ b/drivers/cpufreq/powernv-cpufreq.c @@ -44,7 +44,6 @@ static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1]; static bool rebooting, throttled, occ_reset; -static unsigned int *core_to_chip_map; static const char * const throttle_reason[] = { "No throttling", @@ -55,6 +54,16 @@ static const char * const throttle_reason[] = { "OCC Reset" }; +enum throttle_reason_type { + NO_THROTTLE = 0, + POWERCAP, + CPU_OVERTEMP, + POWER_SUPPLY_FAILURE, + OVERCURRENT, + OCC_RESET_THROTTLE, + OCC_MAX_REASON +}; + static struct chip { unsigned int id; bool throttled; @@ -62,9 +71,13 @@ static struct chip { u8 throttle_reason; cpumask_t mask; struct work_struct throttle; + int throttle_turbo; + int throttle_sub_turbo; + int reason[OCC_MAX_REASON]; } *chips; static int nr_chips; +static DEFINE_PER_CPU(struct chip *, chip_info); /* * Note: The set of pstates consists of contiguous integers, the @@ -196,6 +209,42 @@ static struct freq_attr *powernv_cpu_freq_attr[] = { NULL, }; +#define throttle_attr(name, member) \ +static ssize_t name##_show(struct cpufreq_policy *policy, char *buf) \ +{ \ + struct chip *chip = per_cpu(chip_info, policy->cpu); \ + \ + return sprintf(buf, "%u\n", chip->member); \ +} \ + \ +static struct freq_attr throttle_attr_##name = __ATTR_RO(name) \ + +throttle_attr(unthrottle, reason[NO_THROTTLE]); +throttle_attr(powercap, reason[POWERCAP]); +throttle_attr(overtemp, reason[CPU_OVERTEMP]); +throttle_attr(supply_fault, reason[POWER_SUPPLY_FAILURE]); +throttle_attr(overcurrent, reason[OVERCURRENT]); +throttle_attr(occ_reset, reason[OCC_RESET_THROTTLE]); +throttle_attr(turbo_stat, throttle_turbo); +throttle_attr(sub_turbo_stat, throttle_sub_turbo); + +static struct attribute *throttle_attrs[] = { + &throttle_attr_unthrottle.attr, + &throttle_attr_powercap.attr, + &throttle_attr_overtemp.attr, + &throttle_attr_supply_fault.attr, + &throttle_attr_overcurrent.attr, + &throttle_attr_occ_reset.attr, + &throttle_attr_turbo_stat.attr, + &throttle_attr_sub_turbo_stat.attr, + NULL, +}; + +static const struct attribute_group throttle_attr_grp = { + .name = "throttle_stats", + .attrs = throttle_attrs, +}; + /* Helper routines */ /* Access helpers to power mgt SPR */ @@ -324,34 +373,35 @@ static inline unsigned int get_nominal_index(void) static void powernv_cpufreq_throttle_check(void *data) { + struct chip *chip; unsigned int cpu = smp_processor_id(); - unsigned int chip_id = core_to_chip_map[cpu_core_index_of_thread(cpu)]; unsigned long pmsr; - int pmsr_pmax, i; + int pmsr_pmax; pmsr = get_pmspr(SPRN_PMSR); - - for (i = 0; i < nr_chips; i++) - if (chips[i].id == chip_id) - break; + chip = this_cpu_read(chip_info); /* Check for Pmax Capping */ pmsr_pmax = (s8)PMSR_MAX(pmsr); if (pmsr_pmax != powernv_pstate_info.max) { - if (chips[i].throttled) + if (chip->throttled) goto next; - chips[i].throttled = true; - if (pmsr_pmax < powernv_pstate_info.nominal) + chip->throttled = true; + if (pmsr_pmax < powernv_pstate_info.nominal) { pr_warn_once("CPU %d on Chip %u has Pmax reduced below nominal frequency (%d < %d)\n", - cpu, chips[i].id, pmsr_pmax, + cpu, chip->id, pmsr_pmax, powernv_pstate_info.nominal); - trace_powernv_throttle(chips[i].id, - throttle_reason[chips[i].throttle_reason], + chip->throttle_sub_turbo++; + } else { + chip->throttle_turbo++; + } + trace_powernv_throttle(chip->id, + throttle_reason[chip->throttle_reason], pmsr_pmax); - } else if (chips[i].throttled) { - chips[i].throttled = false; - trace_powernv_throttle(chips[i].id, - throttle_reason[chips[i].throttle_reason], + } else if (chip->throttled) { + chip->throttled = false; + trace_powernv_throttle(chip->id, + throttle_reason[chip->throttle_reason], pmsr_pmax); } @@ -411,6 +461,21 @@ static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy) for (i = 0; i < threads_per_core; i++) cpumask_set_cpu(base + i, policy->cpus); + if (!policy->driver_data) { + int ret; + + ret = sysfs_create_group(&policy->kobj, &throttle_attr_grp); + if (ret) { + pr_info("Failed to create throttle stats directory for cpu %d\n", + policy->cpu); + return ret; + } + /* + * policy->driver_data is used as a flag for one-time + * creation of throttle sysfs files. + */ + policy->driver_data = policy; + } return cpufreq_table_validate_and_show(policy, powernv_freqs); } @@ -517,8 +582,10 @@ static int powernv_cpufreq_occ_msg(struct notifier_block *nb, break; if (omsg.throttle_status >= 0 && - omsg.throttle_status <= OCC_MAX_THROTTLE_STATUS) + omsg.throttle_status <= OCC_MAX_THROTTLE_STATUS) { chips[i].throttle_reason = omsg.throttle_status; + chips[i].reason[omsg.throttle_status]++; + } if (!omsg.throttle_status) chips[i].restore = true; @@ -558,47 +625,34 @@ static int init_chip_info(void) unsigned int chip[256]; unsigned int cpu, i; unsigned int prev_chip_id = UINT_MAX; - cpumask_t cpu_mask; - int ret = -ENOMEM; - - core_to_chip_map = kcalloc(cpu_nr_cores(), sizeof(unsigned int), - GFP_KERNEL); - if (!core_to_chip_map) - goto out; - cpumask_copy(&cpu_mask, cpu_possible_mask); - for_each_cpu(cpu, &cpu_mask) { + for_each_possible_cpu(cpu) { unsigned int id = cpu_to_chip_id(cpu); if (prev_chip_id != id) { prev_chip_id = id; chip[nr_chips++] = id; } - core_to_chip_map[cpu_core_index_of_thread(cpu)] = id; - cpumask_andnot(&cpu_mask, &cpu_mask, cpu_sibling_mask(cpu)); } chips = kcalloc(nr_chips, sizeof(struct chip), GFP_KERNEL); if (!chips) - goto free_chip_map; + return -ENOMEM; for (i = 0; i < nr_chips; i++) { chips[i].id = chip[i]; cpumask_copy(&chips[i].mask, cpumask_of_node(chip[i])); INIT_WORK(&chips[i].throttle, powernv_cpufreq_work_fn); + for_each_cpu(cpu, &chips[i].mask) + per_cpu(chip_info, cpu) = &chips[i]; } return 0; -free_chip_map: - kfree(core_to_chip_map); -out: - return ret; } static inline void clean_chip_info(void) { kfree(chips); - kfree(core_to_chip_map); } static inline void unregister_all_notifiers(void) diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 27fc733cb5b9..03d38c291de6 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -196,7 +196,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev); * of points is below a threshold. If it is... then use the * average of these 8 points as the estimated value. */ -static void get_typical_interval(struct menu_device *data) +static unsigned int get_typical_interval(struct menu_device *data) { int i, divisor; unsigned int max, thresh, avg; @@ -253,9 +253,7 @@ again: if (likely(variance <= U64_MAX/36)) { if ((((u64)avg*avg > variance*36) && (divisor * 4 >= INTERVALS * 3)) || variance <= 400) { - if (data->next_timer_us > avg) - data->predicted_us = avg; - return; + return avg; } } @@ -269,7 +267,7 @@ again: * with sporadic activity with a bunch of short pauses. */ if ((divisor * 4) <= INTERVALS * 3) - return; + return UINT_MAX; thresh = max - 1; goto again; @@ -286,6 +284,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); int i; unsigned int interactivity_req; + unsigned int expected_interval; unsigned long nr_iowaiters, cpu_load; if (data->needs_update) { @@ -312,32 +311,43 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) data->correction_factor[data->bucket], RESOLUTION * DECAY); - get_typical_interval(data); - - /* - * Performance multiplier defines a minimum predicted idle - * duration / latency ratio. Adjust the latency limit if - * necessary. - */ - interactivity_req = data->predicted_us / performance_multiplier(nr_iowaiters, cpu_load); - if (latency_req > interactivity_req) - latency_req = interactivity_req; + expected_interval = get_typical_interval(data); + expected_interval = min(expected_interval, data->next_timer_us); if (CPUIDLE_DRIVER_STATE_START > 0) { - data->last_state_idx = CPUIDLE_DRIVER_STATE_START - 1; + struct cpuidle_state *s = &drv->states[CPUIDLE_DRIVER_STATE_START]; + unsigned int polling_threshold; + /* * We want to default to C1 (hlt), not to busy polling - * unless the timer is happening really really soon. + * unless the timer is happening really really soon, or + * C1's exit latency exceeds the user configured limit. */ - if (interactivity_req > 20 && - !drv->states[CPUIDLE_DRIVER_STATE_START].disabled && - dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable == 0) + polling_threshold = max_t(unsigned int, 20, s->target_residency); + if (data->next_timer_us > polling_threshold && + latency_req > s->exit_latency && !s->disabled && + !dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable) data->last_state_idx = CPUIDLE_DRIVER_STATE_START; + else + data->last_state_idx = CPUIDLE_DRIVER_STATE_START - 1; } else { data->last_state_idx = CPUIDLE_DRIVER_STATE_START; } /* + * Use the lowest expected idle interval to pick the idle state. + */ + data->predicted_us = min(data->predicted_us, expected_interval); + + /* + * Use the performance multiplier and the user-configurable + * latency_req to determine the maximum exit latency. + */ + interactivity_req = data->predicted_us / performance_multiplier(nr_iowaiters, cpu_load); + if (latency_req > interactivity_req) + latency_req = interactivity_req; + + /* * Find the idle state with the lowest power while satisfying * our constraints. */ diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig index 64281bb2f650..4de78c552251 100644 --- a/drivers/devfreq/Kconfig +++ b/drivers/devfreq/Kconfig @@ -61,7 +61,7 @@ config DEVFREQ_GOV_USERSPACE Sets the frequency at the user specified one. This governor returns the user configured frequency if there has been an input to /sys/devices/.../power/devfreq_set_freq. - Otherwise, the governor does not change the frequnecy + Otherwise, the governor does not change the frequency given at the initialization. comment "DEVFREQ Drivers" diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 9810d1df0691..4a2c07ee6677 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -259,6 +259,7 @@ static long dma_buf_ioctl(struct file *file, struct dma_buf *dmabuf; struct dma_buf_sync sync; enum dma_data_direction direction; + int ret; dmabuf = file->private_data; @@ -285,11 +286,11 @@ static long dma_buf_ioctl(struct file *file, } if (sync.flags & DMA_BUF_SYNC_END) - dma_buf_end_cpu_access(dmabuf, direction); + ret = dma_buf_end_cpu_access(dmabuf, direction); else - dma_buf_begin_cpu_access(dmabuf, direction); + ret = dma_buf_begin_cpu_access(dmabuf, direction); - return 0; + return ret; default: return -ENOTTY; } @@ -611,15 +612,19 @@ EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access); * @dmabuf: [in] buffer to complete cpu access for. * @direction: [in] length of range for cpu access. * - * This call must always succeed. + * Can return negative error values, returns 0 on success. */ -void dma_buf_end_cpu_access(struct dma_buf *dmabuf, - enum dma_data_direction direction) +int dma_buf_end_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction direction) { + int ret = 0; + WARN_ON(!dmabuf); if (dmabuf->ops->end_cpu_access) - dmabuf->ops->end_cpu_access(dmabuf, direction); + ret = dmabuf->ops->end_cpu_access(dmabuf, direction); + + return ret; } EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access); diff --git a/drivers/dma/idma64.h b/drivers/dma/idma64.h index dc6874424188..6b816878e5e7 100644 --- a/drivers/dma/idma64.h +++ b/drivers/dma/idma64.h @@ -16,7 +16,7 @@ #include <linux/spinlock.h> #include <linux/types.h> -#include <asm-generic/io-64-nonatomic-lo-hi.h> +#include <linux/io-64-nonatomic-lo-hi.h> #include "virt-dma.h" diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c index 8a46077129ac..631c977b0da5 100644 --- a/drivers/firewire/nosy.c +++ b/drivers/firewire/nosy.c @@ -446,14 +446,16 @@ static void bus_reset_irq_handler(struct pcilynx *lynx) { struct client *client; - struct timeval tv; + struct timespec64 ts64; + u32 timestamp; - do_gettimeofday(&tv); + ktime_get_real_ts64(&ts64); + timestamp = ts64.tv_nsec / NSEC_PER_USEC; spin_lock(&lynx->client_list_lock); list_for_each_entry(client, &lynx->client_list, link) - packet_buffer_put(&client->buffer, &tv.tv_usec, 4); + packet_buffer_put(&client->buffer, ×tamp, 4); spin_unlock(&lynx->client_list_lock); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index d7ec9bd6755f..9f4a45cd2aab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -48,7 +48,8 @@ struct amdgpu_mn { /* protected by adev->mn_lock */ struct hlist_node node; - /* objects protected by mm->mmap_sem */ + /* objects protected by lock */ + struct mutex lock; struct rb_root objects; }; @@ -72,7 +73,7 @@ static void amdgpu_mn_destroy(struct work_struct *work) struct amdgpu_bo *bo, *next_bo; mutex_lock(&adev->mn_lock); - down_write(&rmn->mm->mmap_sem); + mutex_lock(&rmn->lock); hash_del(&rmn->node); rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects, it.rb) { @@ -82,7 +83,7 @@ static void amdgpu_mn_destroy(struct work_struct *work) } kfree(node); } - up_write(&rmn->mm->mmap_sem); + mutex_unlock(&rmn->lock); mutex_unlock(&adev->mn_lock); mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm); kfree(rmn); @@ -105,6 +106,76 @@ static void amdgpu_mn_release(struct mmu_notifier *mn, } /** + * amdgpu_mn_invalidate_node - unmap all BOs of a node + * + * @node: the node with the BOs to unmap + * + * We block for all BOs and unmap them by move them + * into system domain again. + */ +static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node, + unsigned long start, + unsigned long end) +{ + struct amdgpu_bo *bo; + long r; + + list_for_each_entry(bo, &node->bos, mn_list) { + + if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end)) + continue; + + r = amdgpu_bo_reserve(bo, true); + if (r) { + DRM_ERROR("(%ld) failed to reserve user bo\n", r); + continue; + } + + r = reservation_object_wait_timeout_rcu(bo->tbo.resv, + true, false, MAX_SCHEDULE_TIMEOUT); + if (r <= 0) + DRM_ERROR("(%ld) failed to wait for user bo\n", r); + + amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); + r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); + if (r) + DRM_ERROR("(%ld) failed to validate user bo\n", r); + + amdgpu_bo_unreserve(bo); + } +} + +/** + * amdgpu_mn_invalidate_page - callback to notify about mm change + * + * @mn: our notifier + * @mn: the mm this callback is about + * @address: address of invalidate page + * + * Invalidation of a single page. Blocks for all BOs mapping it + * and unmap them by move them into system domain again. + */ +static void amdgpu_mn_invalidate_page(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long address) +{ + struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn); + struct interval_tree_node *it; + + mutex_lock(&rmn->lock); + + it = interval_tree_iter_first(&rmn->objects, address, address); + if (it) { + struct amdgpu_mn_node *node; + + node = container_of(it, struct amdgpu_mn_node, it); + amdgpu_mn_invalidate_node(node, address, address); + } + + mutex_unlock(&rmn->lock); +} + +/** * amdgpu_mn_invalidate_range_start - callback to notify about mm change * * @mn: our notifier @@ -126,44 +197,24 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn, /* notification is exclusive, but interval is inclusive */ end -= 1; + mutex_lock(&rmn->lock); + it = interval_tree_iter_first(&rmn->objects, start, end); while (it) { struct amdgpu_mn_node *node; - struct amdgpu_bo *bo; - long r; node = container_of(it, struct amdgpu_mn_node, it); it = interval_tree_iter_next(it, start, end); - list_for_each_entry(bo, &node->bos, mn_list) { - - if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, - end)) - continue; - - r = amdgpu_bo_reserve(bo, true); - if (r) { - DRM_ERROR("(%ld) failed to reserve user bo\n", r); - continue; - } - - r = reservation_object_wait_timeout_rcu(bo->tbo.resv, - true, false, MAX_SCHEDULE_TIMEOUT); - if (r <= 0) - DRM_ERROR("(%ld) failed to wait for user bo\n", r); - - amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); - r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); - if (r) - DRM_ERROR("(%ld) failed to validate user bo\n", r); - - amdgpu_bo_unreserve(bo); - } + amdgpu_mn_invalidate_node(node, start, end); } + + mutex_unlock(&rmn->lock); } static const struct mmu_notifier_ops amdgpu_mn_ops = { .release = amdgpu_mn_release, + .invalidate_page = amdgpu_mn_invalidate_page, .invalidate_range_start = amdgpu_mn_invalidate_range_start, }; @@ -196,6 +247,7 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev) rmn->adev = adev; rmn->mm = mm; rmn->mn.ops = &amdgpu_mn_ops; + mutex_init(&rmn->lock); rmn->objects = RB_ROOT; r = __mmu_notifier_register(&rmn->mn, mm); @@ -242,7 +294,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) INIT_LIST_HEAD(&bos); - down_write(&rmn->mm->mmap_sem); + mutex_lock(&rmn->lock); while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) { kfree(node); @@ -256,7 +308,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) if (!node) { node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL); if (!node) { - up_write(&rmn->mm->mmap_sem); + mutex_unlock(&rmn->lock); return -ENOMEM; } } @@ -271,7 +323,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) interval_tree_insert(&node->it, &rmn->objects); - up_write(&rmn->mm->mmap_sem); + mutex_unlock(&rmn->lock); return 0; } @@ -297,7 +349,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo) return; } - down_write(&rmn->mm->mmap_sem); + mutex_lock(&rmn->lock); /* save the next list entry for later */ head = bo->mn_list.next; @@ -312,6 +364,6 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo) kfree(node); } - up_write(&rmn->mm->mmap_sem); + mutex_unlock(&rmn->lock); mutex_unlock(&adev->mn_lock); } diff --git a/drivers/gpu/drm/amd/powerplay/Makefile b/drivers/gpu/drm/amd/powerplay/Makefile index e195bf59da86..043e6ebab575 100644 --- a/drivers/gpu/drm/amd/powerplay/Makefile +++ b/drivers/gpu/drm/amd/powerplay/Makefile @@ -1,17 +1,17 @@ subdir-ccflags-y += -Iinclude/drm \ - -Idrivers/gpu/drm/amd/powerplay/inc/ \ - -Idrivers/gpu/drm/amd/include/asic_reg \ - -Idrivers/gpu/drm/amd/include \ - -Idrivers/gpu/drm/amd/powerplay/smumgr\ - -Idrivers/gpu/drm/amd/powerplay/hwmgr \ - -Idrivers/gpu/drm/amd/powerplay/eventmgr + -I$(FULL_AMD_PATH)/powerplay/inc/ \ + -I$(FULL_AMD_PATH)/include/asic_reg \ + -I$(FULL_AMD_PATH)/include \ + -I$(FULL_AMD_PATH)/powerplay/smumgr\ + -I$(FULL_AMD_PATH)/powerplay/hwmgr \ + -I$(FULL_AMD_PATH)/powerplay/eventmgr AMD_PP_PATH = ../powerplay PP_LIBS = smumgr hwmgr eventmgr -AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix drivers/gpu/drm/amd/powerplay/,$(PP_LIBS))) +AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/powerplay/,$(PP_LIBS))) include $(AMD_POWERPLAY) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c index 34f4bef3691f..b156481b50e8 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c @@ -512,8 +512,10 @@ static int get_cac_tdp_table( hwmgr->dyn_state.cac_dtp_table = kzalloc(table_size, GFP_KERNEL); - if (NULL == hwmgr->dyn_state.cac_dtp_table) + if (NULL == hwmgr->dyn_state.cac_dtp_table) { + kfree(tdp_table); return -ENOMEM; + } memset(hwmgr->dyn_state.cac_dtp_table, 0x00, table_size); diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c index 1ffe9c329c46..d65dcaee3832 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c @@ -558,7 +558,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p, if (!state->base.crtc || !fb) return 0; - crtc_state = s->state->crtc_states[drm_crtc_index(s->crtc)]; + crtc_state = drm_atomic_get_existing_crtc_state(s->state, s->crtc); mode = &crtc_state->adjusted_mode; state->src_x = s->src_x; diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index a2596eb803fc..8ee1db866e80 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -380,7 +380,6 @@ EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc); * drm_atomic_replace_property_blob - replace a blob property * @blob: a pointer to the member blob to be replaced * @new_blob: the new blob to replace with - * @expected_size: the expected size of the new blob * @replaced: whether the blob has been replaced * * RETURNS: diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 2bb90faa0ee2..4befe25c81c7 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -67,7 +67,8 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state, struct drm_crtc_state *crtc_state; if (plane->state->crtc) { - crtc_state = state->crtc_states[drm_crtc_index(plane->state->crtc)]; + crtc_state = drm_atomic_get_existing_crtc_state(state, + plane->state->crtc); if (WARN_ON(!crtc_state)) return; @@ -76,8 +77,8 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state, } if (plane_state->crtc) { - crtc_state = - state->crtc_states[drm_crtc_index(plane_state->crtc)]; + crtc_state = drm_atomic_get_existing_crtc_state(state, + plane_state->crtc); if (WARN_ON(!crtc_state)) return; @@ -374,8 +375,8 @@ mode_fixup(struct drm_atomic_state *state) if (!conn_state->crtc || !conn_state->best_encoder) continue; - crtc_state = - state->crtc_states[drm_crtc_index(conn_state->crtc)]; + crtc_state = drm_atomic_get_existing_crtc_state(state, + conn_state->crtc); /* * Each encoder has at most one connector (since we always steal @@ -679,7 +680,8 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) if (!old_conn_state->crtc) continue; - old_crtc_state = old_state->crtc_states[drm_crtc_index(old_conn_state->crtc)]; + old_crtc_state = drm_atomic_get_existing_crtc_state(old_state, + old_conn_state->crtc); if (!old_crtc_state->active || !drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state)) diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index 1f3eef6fb345..0506016e18e0 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -228,25 +228,20 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire return ret; } -static void i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction) +static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction) { struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - bool was_interruptible; int ret; - mutex_lock(&dev->struct_mutex); - was_interruptible = dev_priv->mm.interruptible; - dev_priv->mm.interruptible = false; + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; ret = i915_gem_object_set_to_gtt_domain(obj, false); - - dev_priv->mm.interruptible = was_interruptible; mutex_unlock(&dev->struct_mutex); - if (unlikely(ret)) - DRM_ERROR("unable to flush buffer following CPU access; rendering may be corrupt\n"); + return ret; } static const struct dma_buf_ops i915_dmabuf_ops = { diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c index 3cf8aab23a39..af267c35d813 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c +++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c @@ -97,11 +97,12 @@ static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer, return omap_gem_get_pages(obj, &pages, true); } -static void omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer, - enum dma_data_direction dir) +static int omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer, + enum dma_data_direction dir) { struct drm_gem_object *obj = buffer->priv; omap_gem_put_pages(obj); + return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c index df7a1719c841..43cffb526b0c 100644 --- a/drivers/gpu/drm/radeon/radeon_dp_mst.c +++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c @@ -510,6 +510,7 @@ static bool radeon_mst_mode_fixup(struct drm_encoder *encoder, { struct radeon_encoder_mst *mst_enc; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct radeon_connector_atom_dig *dig_connector; int bpp = 24; mst_enc = radeon_encoder->enc_priv; @@ -523,22 +524,11 @@ static bool radeon_mst_mode_fixup(struct drm_encoder *encoder, drm_mode_set_crtcinfo(adjusted_mode, 0); - { - struct radeon_connector_atom_dig *dig_connector; - int ret; - - dig_connector = mst_enc->connector->con_priv; - ret = radeon_dp_get_dp_link_config(&mst_enc->connector->base, - dig_connector->dpcd, adjusted_mode->clock, - &dig_connector->dp_lane_count, - &dig_connector->dp_clock); - if (ret) { - dig_connector->dp_lane_count = 0; - dig_connector->dp_clock = 0; - } - DRM_DEBUG_KMS("dig clock %p %d %d\n", dig_connector, - dig_connector->dp_lane_count, dig_connector->dp_clock); - } + dig_connector = mst_enc->connector->con_priv; + dig_connector->dp_lane_count = drm_dp_max_lane_count(dig_connector->dpcd); + dig_connector->dp_clock = drm_dp_max_link_rate(dig_connector->dpcd); + DRM_DEBUG_KMS("dig clock %p %d %d\n", dig_connector, + dig_connector->dp_lane_count, dig_connector->dp_clock); return true; } diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c index c427499133d6..33239a2b264a 100644 --- a/drivers/gpu/drm/udl/udl_fb.c +++ b/drivers/gpu/drm/udl/udl_fb.c @@ -423,8 +423,8 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb, } if (ufb->obj->base.import_attach) { - dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf, - DMA_FROM_DEVICE); + ret = dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf, + DMA_FROM_DEVICE); } unlock: diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index cd4510a63375..ba947df5a8c7 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -65,7 +65,7 @@ #include <asm/mwait.h> #include <asm/msr.h> -#define INTEL_IDLE_VERSION "0.4" +#define INTEL_IDLE_VERSION "0.4.1" #define PREFIX "intel_idle: " static struct cpuidle_driver intel_idle_driver = { @@ -716,6 +716,26 @@ static struct cpuidle_state avn_cstates[] = { { .enter = NULL } }; +static struct cpuidle_state knl_cstates[] = { + { + .name = "C1-KNL", + .desc = "MWAIT 0x00", + .flags = MWAIT2flg(0x00), + .exit_latency = 1, + .target_residency = 2, + .enter = &intel_idle, + .enter_freeze = intel_idle_freeze }, + { + .name = "C6-KNL", + .desc = "MWAIT 0x10", + .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 120, + .target_residency = 500, + .enter = &intel_idle, + .enter_freeze = intel_idle_freeze }, + { + .enter = NULL } +}; /** * intel_idle @@ -890,6 +910,10 @@ static const struct idle_cpu idle_cpu_avn = { .disable_promotion_to_c1e = true, }; +static const struct idle_cpu idle_cpu_knl = { + .state_table = knl_cstates, +}; + #define ICPU(model, cpu) \ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu } @@ -921,6 +945,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { ICPU(0x56, idle_cpu_bdw), ICPU(0x4e, idle_cpu_skl), ICPU(0x5e, idle_cpu_skl), + ICPU(0x57, idle_cpu_knl), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids); @@ -994,36 +1019,92 @@ static void intel_idle_cpuidle_devices_uninit(void) } /* - * intel_idle_state_table_update() + * ivt_idle_state_table_update(void) * - * Update the default state_table for this CPU-id - * - * Currently used to access tuned IVT multi-socket targets + * Tune IVT multi-socket targets * Assumption: num_sockets == (max_package_num + 1) */ -void intel_idle_state_table_update(void) +static void ivt_idle_state_table_update(void) { /* IVT uses a different table for 1-2, 3-4, and > 4 sockets */ - if (boot_cpu_data.x86_model == 0x3e) { /* IVT */ - int cpu, package_num, num_sockets = 1; - - for_each_online_cpu(cpu) { - package_num = topology_physical_package_id(cpu); - if (package_num + 1 > num_sockets) { - num_sockets = package_num + 1; - - if (num_sockets > 4) { - cpuidle_state_table = ivt_cstates_8s; - return; - } + int cpu, package_num, num_sockets = 1; + + for_each_online_cpu(cpu) { + package_num = topology_physical_package_id(cpu); + if (package_num + 1 > num_sockets) { + num_sockets = package_num + 1; + + if (num_sockets > 4) { + cpuidle_state_table = ivt_cstates_8s; + return; } } + } + + if (num_sockets > 2) + cpuidle_state_table = ivt_cstates_4s; - if (num_sockets > 2) - cpuidle_state_table = ivt_cstates_4s; - /* else, 1 and 2 socket systems use default ivt_cstates */ + /* else, 1 and 2 socket systems use default ivt_cstates */ +} +/* + * sklh_idle_state_table_update(void) + * + * On SKL-H (model 0x5e) disable C8 and C9 if: + * C10 is enabled and SGX disabled + */ +static void sklh_idle_state_table_update(void) +{ + unsigned long long msr; + unsigned int eax, ebx, ecx, edx; + + + /* if PC10 disabled via cmdline intel_idle.max_cstate=7 or shallower */ + if (max_cstate <= 7) + return; + + /* if PC10 not present in CPUID.MWAIT.EDX */ + if ((mwait_substates & (0xF << 28)) == 0) + return; + + rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr); + + /* PC10 is not enabled in PKG C-state limit */ + if ((msr & 0xF) != 8) + return; + + ecx = 0; + cpuid(7, &eax, &ebx, &ecx, &edx); + + /* if SGX is present */ + if (ebx & (1 << 2)) { + + rdmsrl(MSR_IA32_FEATURE_CONTROL, msr); + + /* if SGX is enabled */ + if (msr & (1 << 18)) + return; + } + + skl_cstates[5].disabled = 1; /* C8-SKL */ + skl_cstates[6].disabled = 1; /* C9-SKL */ +} +/* + * intel_idle_state_table_update() + * + * Update the default state_table for this CPU-id + */ + +static void intel_idle_state_table_update(void) +{ + switch (boot_cpu_data.x86_model) { + + case 0x3e: /* IVT */ + ivt_idle_state_table_update(); + break; + case 0x5e: /* SKL-H */ + sklh_idle_state_table_update(); + break; } - return; } /* @@ -1063,6 +1144,14 @@ static int __init intel_idle_cpuidle_driver_init(void) if (num_substates == 0) continue; + /* if state marked as disabled, skip it */ + if (cpuidle_state_table[cstate].disabled != 0) { + pr_debug(PREFIX "state %s is disabled", + cpuidle_state_table[cstate].name); + continue; + } + + if (((mwait_cstate + 1) > 2) && !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) mark_tsc_unstable("TSC halts in idle" diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c index 929508e5266c..998dc3caad4c 100644 --- a/drivers/iio/adc/max1363.c +++ b/drivers/iio/adc/max1363.c @@ -1386,7 +1386,7 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = { }, [max11644] = { .bits = 12, - .int_vref_mv = 2048, + .int_vref_mv = 4096, .mode_list = max11644_mode_list, .num_modes = ARRAY_SIZE(max11644_mode_list), .default_mode = s0to1, @@ -1396,7 +1396,7 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = { }, [max11645] = { .bits = 12, - .int_vref_mv = 4096, + .int_vref_mv = 2048, .mode_list = max11644_mode_list, .num_modes = ARRAY_SIZE(max11644_mode_list), .default_mode = s0to1, @@ -1406,7 +1406,7 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = { }, [max11646] = { .bits = 10, - .int_vref_mv = 2048, + .int_vref_mv = 4096, .mode_list = max11644_mode_list, .num_modes = ARRAY_SIZE(max11644_mode_list), .default_mode = s0to1, @@ -1416,7 +1416,7 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = { }, [max11647] = { .bits = 10, - .int_vref_mv = 4096, + .int_vref_mv = 2048, .mode_list = max11644_mode_list, .num_modes = ARRAY_SIZE(max11644_mode_list), .default_mode = s0to1, @@ -1680,6 +1680,10 @@ static const struct i2c_device_id max1363_id[] = { { "max11615", max11615 }, { "max11616", max11616 }, { "max11617", max11617 }, + { "max11644", max11644 }, + { "max11645", max11645 }, + { "max11646", max11646 }, + { "max11647", max11647 }, {} }; diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c index f6a07dc32ae4..a6af56ad10e1 100644 --- a/drivers/iio/light/apds9960.c +++ b/drivers/iio/light/apds9960.c @@ -769,7 +769,7 @@ static void apds9960_read_gesture_fifo(struct apds9960_data *data) mutex_lock(&data->lock); data->gesture_mode_running = 1; - while (cnt-- || (cnt = apds9660_fifo_is_empty(data) > 0)) { + while (cnt || (cnt = apds9660_fifo_is_empty(data) > 0)) { ret = regmap_bulk_read(data->regmap, APDS9960_REG_GFIFO_BASE, &data->buffer, 4); @@ -777,6 +777,7 @@ static void apds9960_read_gesture_fifo(struct apds9960_data *data) goto err_read; iio_push_to_buffers(data->indio_dev, data->buffer); + cnt--; } err_read: diff --git a/drivers/input/input-compat.c b/drivers/input/input-compat.c index 64ca7113ff28..d84d20b9cec0 100644 --- a/drivers/input/input-compat.c +++ b/drivers/input/input-compat.c @@ -17,7 +17,7 @@ int input_event_from_user(const char __user *buffer, struct input_event *event) { - if (INPUT_COMPAT_TEST && !COMPAT_USE_64BIT_TIME) { + if (in_compat_syscall() && !COMPAT_USE_64BIT_TIME) { struct input_event_compat compat_event; if (copy_from_user(&compat_event, buffer, @@ -41,7 +41,7 @@ int input_event_from_user(const char __user *buffer, int input_event_to_user(char __user *buffer, const struct input_event *event) { - if (INPUT_COMPAT_TEST && !COMPAT_USE_64BIT_TIME) { + if (in_compat_syscall() && !COMPAT_USE_64BIT_TIME) { struct input_event_compat compat_event; compat_event.time.tv_sec = event->time.tv_sec; @@ -65,7 +65,7 @@ int input_event_to_user(char __user *buffer, int input_ff_effect_from_user(const char __user *buffer, size_t size, struct ff_effect *effect) { - if (INPUT_COMPAT_TEST) { + if (in_compat_syscall()) { struct ff_effect_compat *compat_effect; if (size != sizeof(struct ff_effect_compat)) diff --git a/drivers/input/input-compat.h b/drivers/input/input-compat.h index 0f25878d5fa2..1563160a7af3 100644 --- a/drivers/input/input-compat.h +++ b/drivers/input/input-compat.h @@ -17,8 +17,6 @@ #ifdef CONFIG_COMPAT -#define INPUT_COMPAT_TEST in_compat_syscall() - struct input_event_compat { struct compat_timeval time; __u16 type; @@ -57,7 +55,7 @@ struct ff_effect_compat { static inline size_t input_event_size(void) { - return (INPUT_COMPAT_TEST && !COMPAT_USE_64BIT_TIME) ? + return (in_compat_syscall() && !COMPAT_USE_64BIT_TIME) ? sizeof(struct input_event_compat) : sizeof(struct input_event); } diff --git a/drivers/input/input.c b/drivers/input/input.c index 880605959aa6..b87ffbd4547d 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -1015,7 +1015,7 @@ static int input_bits_to_string(char *buf, int buf_size, { int len = 0; - if (INPUT_COMPAT_TEST) { + if (in_compat_syscall()) { u32 dword = bits >> 32; if (dword || !skip_empty) len += snprintf(buf, buf_size, "%x ", dword); diff --git a/drivers/input/misc/ati_remote2.c b/drivers/input/misc/ati_remote2.c index cfd58e87da26..1c5914cae853 100644 --- a/drivers/input/misc/ati_remote2.c +++ b/drivers/input/misc/ati_remote2.c @@ -817,26 +817,49 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d ar2->udev = udev; + /* Sanity check, first interface must have an endpoint */ + if (alt->desc.bNumEndpoints < 1 || !alt->endpoint) { + dev_err(&interface->dev, + "%s(): interface 0 must have an endpoint\n", __func__); + r = -ENODEV; + goto fail1; + } ar2->intf[0] = interface; ar2->ep[0] = &alt->endpoint[0].desc; + /* Sanity check, the device must have two interfaces */ ar2->intf[1] = usb_ifnum_to_if(udev, 1); + if ((udev->actconfig->desc.bNumInterfaces < 2) || !ar2->intf[1]) { + dev_err(&interface->dev, "%s(): need 2 interfaces, found %d\n", + __func__, udev->actconfig->desc.bNumInterfaces); + r = -ENODEV; + goto fail1; + } + r = usb_driver_claim_interface(&ati_remote2_driver, ar2->intf[1], ar2); if (r) goto fail1; + + /* Sanity check, second interface must have an endpoint */ alt = ar2->intf[1]->cur_altsetting; + if (alt->desc.bNumEndpoints < 1 || !alt->endpoint) { + dev_err(&interface->dev, + "%s(): interface 1 must have an endpoint\n", __func__); + r = -ENODEV; + goto fail2; + } ar2->ep[1] = &alt->endpoint[0].desc; r = ati_remote2_urb_init(ar2); if (r) - goto fail2; + goto fail3; ar2->channel_mask = channel_mask; ar2->mode_mask = mode_mask; r = ati_remote2_setup(ar2, ar2->channel_mask); if (r) - goto fail2; + goto fail3; usb_make_path(udev, ar2->phys, sizeof(ar2->phys)); strlcat(ar2->phys, "/input0", sizeof(ar2->phys)); @@ -845,11 +868,11 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d r = sysfs_create_group(&udev->dev.kobj, &ati_remote2_attr_group); if (r) - goto fail2; + goto fail3; r = ati_remote2_input_init(ar2); if (r) - goto fail3; + goto fail4; usb_set_intfdata(interface, ar2); @@ -857,10 +880,11 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d return 0; - fail3: + fail4: sysfs_remove_group(&udev->dev.kobj, &ati_remote2_attr_group); - fail2: + fail3: ati_remote2_urb_cleanup(ar2); + fail2: usb_driver_release_interface(&ati_remote2_driver, ar2->intf[1]); fail1: kfree(ar2); diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c index ac1fa5f44580..9c0ea36913b4 100644 --- a/drivers/input/misc/ims-pcu.c +++ b/drivers/input/misc/ims-pcu.c @@ -1663,6 +1663,8 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc pcu->ctrl_intf = usb_ifnum_to_if(pcu->udev, union_desc->bMasterInterface0); + if (!pcu->ctrl_intf) + return -EINVAL; alt = pcu->ctrl_intf->cur_altsetting; pcu->ep_ctrl = &alt->endpoint[0].desc; @@ -1670,6 +1672,8 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc pcu->data_intf = usb_ifnum_to_if(pcu->udev, union_desc->bSlaveInterface0); + if (!pcu->data_intf) + return -EINVAL; alt = pcu->data_intf->cur_altsetting; if (alt->desc.bNumEndpoints != 2) { diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index 4eb9e4d94f46..abe1a927b332 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c @@ -664,7 +664,7 @@ struct uinput_ff_upload_compat { static int uinput_ff_upload_to_user(char __user *buffer, const struct uinput_ff_upload *ff_up) { - if (INPUT_COMPAT_TEST) { + if (in_compat_syscall()) { struct uinput_ff_upload_compat ff_up_compat; ff_up_compat.request_id = ff_up->request_id; @@ -695,7 +695,7 @@ static int uinput_ff_upload_to_user(char __user *buffer, static int uinput_ff_upload_from_user(const char __user *buffer, struct uinput_ff_upload *ff_up) { - if (INPUT_COMPAT_TEST) { + if (in_compat_syscall()) { struct uinput_ff_upload_compat ff_up_compat; if (copy_from_user(&ff_up_compat, buffer, diff --git a/drivers/input/mouse/byd.c b/drivers/input/mouse/byd.c index 9425e0f6c5ce..fdc243ca93ed 100644 --- a/drivers/input/mouse/byd.c +++ b/drivers/input/mouse/byd.c @@ -12,10 +12,12 @@ #include <linux/input.h> #include <linux/libps2.h> #include <linux/serio.h> +#include <linux/slab.h> #include "psmouse.h" #include "byd.h" +/* PS2 Bits */ #define PS2_Y_OVERFLOW BIT_MASK(7) #define PS2_X_OVERFLOW BIT_MASK(6) #define PS2_Y_SIGN BIT_MASK(5) @@ -26,69 +28,249 @@ #define PS2_LEFT BIT_MASK(0) /* - * The touchpad reports gestures in the last byte of each packet. It can take - * any of the following values: + * BYD pad constants */ -/* One-finger scrolling in one of the edge scroll zones. */ -#define BYD_SCROLLUP 0xCA -#define BYD_SCROLLDOWN 0x36 -#define BYD_SCROLLLEFT 0xCB -#define BYD_SCROLLRIGHT 0x35 -/* Two-finger scrolling. */ -#define BYD_2DOWN 0x2B -#define BYD_2UP 0xD5 -#define BYD_2LEFT 0xD6 -#define BYD_2RIGHT 0x2A -/* Pinching in or out. */ -#define BYD_ZOOMOUT 0xD8 -#define BYD_ZOOMIN 0x28 -/* Three-finger swipe. */ -#define BYD_3UP 0xD3 -#define BYD_3DOWN 0x2D -#define BYD_3LEFT 0xD4 -#define BYD_3RIGHT 0x2C -/* Four-finger swipe. */ -#define BYD_4UP 0xCD -#define BYD_4DOWN 0x33 +/* + * True device resolution is unknown, however experiments show the + * resolution is about 111 units/mm. + * Absolute coordinate packets are in the range 0-255 for both X and Y + * we pick ABS_X/ABS_Y dimensions which are multiples of 256 and in + * the right ballpark given the touchpad's physical dimensions and estimate + * resolution per spec sheet, device active area dimensions are + * 101.6 x 60.1 mm. + */ +#define BYD_PAD_WIDTH 11264 +#define BYD_PAD_HEIGHT 6656 +#define BYD_PAD_RESOLUTION 111 -int byd_detect(struct psmouse *psmouse, bool set_properties) +/* + * Given the above dimensions, relative packets velocity is in multiples of + * 1 unit / 11 milliseconds. We use this dt to estimate distance traveled + */ +#define BYD_DT 11 +/* Time in jiffies used to timeout various touch events (64 ms) */ +#define BYD_TOUCH_TIMEOUT msecs_to_jiffies(64) + +/* BYD commands reverse engineered from windows driver */ + +/* + * Swipe gesture from off-pad to on-pad + * 0 : disable + * 1 : enable + */ +#define BYD_CMD_SET_OFFSCREEN_SWIPE 0x10cc +/* + * Tap and drag delay time + * 0 : disable + * 1 - 8 : least to most delay + */ +#define BYD_CMD_SET_TAP_DRAG_DELAY_TIME 0x10cf +/* + * Physical buttons function mapping + * 0 : enable + * 4 : normal + * 5 : left button custom command + * 6 : right button custom command + * 8 : disable + */ +#define BYD_CMD_SET_PHYSICAL_BUTTONS 0x10d0 +/* + * Absolute mode (1 byte X/Y resolution) + * 0 : disable + * 2 : enable + */ +#define BYD_CMD_SET_ABSOLUTE_MODE 0x10d1 +/* + * Two finger scrolling + * 1 : vertical + * 2 : horizontal + * 3 : vertical + horizontal + * 4 : disable + */ +#define BYD_CMD_SET_TWO_FINGER_SCROLL 0x10d2 +/* + * Handedness + * 1 : right handed + * 2 : left handed + */ +#define BYD_CMD_SET_HANDEDNESS 0x10d3 +/* + * Tap to click + * 1 : enable + * 2 : disable + */ +#define BYD_CMD_SET_TAP 0x10d4 +/* + * Tap and drag + * 1 : tap and hold to drag + * 2 : tap and hold to drag + lock + * 3 : disable + */ +#define BYD_CMD_SET_TAP_DRAG 0x10d5 +/* + * Touch sensitivity + * 1 - 7 : least to most sensitive + */ +#define BYD_CMD_SET_TOUCH_SENSITIVITY 0x10d6 +/* + * One finger scrolling + * 1 : vertical + * 2 : horizontal + * 3 : vertical + horizontal + * 4 : disable + */ +#define BYD_CMD_SET_ONE_FINGER_SCROLL 0x10d7 +/* + * One finger scrolling function + * 1 : free scrolling + * 2 : edge motion + * 3 : free scrolling + edge motion + * 4 : disable + */ +#define BYD_CMD_SET_ONE_FINGER_SCROLL_FUNC 0x10d8 +/* + * Sliding speed + * 1 - 5 : slowest to fastest + */ +#define BYD_CMD_SET_SLIDING_SPEED 0x10da +/* + * Edge motion + * 1 : disable + * 2 : enable when dragging + * 3 : enable when dragging and pointing + */ +#define BYD_CMD_SET_EDGE_MOTION 0x10db +/* + * Left edge region size + * 0 - 7 : smallest to largest width + */ +#define BYD_CMD_SET_LEFT_EDGE_REGION 0x10dc +/* + * Top edge region size + * 0 - 9 : smallest to largest height + */ +#define BYD_CMD_SET_TOP_EDGE_REGION 0x10dd +/* + * Disregard palm press as clicks + * 1 - 6 : smallest to largest + */ +#define BYD_CMD_SET_PALM_CHECK 0x10de +/* + * Right edge region size + * 0 - 7 : smallest to largest width + */ +#define BYD_CMD_SET_RIGHT_EDGE_REGION 0x10df +/* + * Bottom edge region size + * 0 - 9 : smallest to largest height + */ +#define BYD_CMD_SET_BOTTOM_EDGE_REGION 0x10e1 +/* + * Multitouch gestures + * 1 : enable + * 2 : disable + */ +#define BYD_CMD_SET_MULTITOUCH 0x10e3 +/* + * Edge motion speed + * 0 : control with finger pressure + * 1 - 9 : slowest to fastest + */ +#define BYD_CMD_SET_EDGE_MOTION_SPEED 0x10e4 +/* + * Two finger scolling function + * 0 : free scrolling + * 1 : free scrolling (with momentum) + * 2 : edge motion + * 3 : free scrolling (with momentum) + edge motion + * 4 : disable + */ +#define BYD_CMD_SET_TWO_FINGER_SCROLL_FUNC 0x10e5 + +/* + * The touchpad generates a mixture of absolute and relative packets, indicated + * by the the last byte of each packet being set to one of the following: + */ +#define BYD_PACKET_ABSOLUTE 0xf8 +#define BYD_PACKET_RELATIVE 0x00 +/* Multitouch gesture packets */ +#define BYD_PACKET_PINCH_IN 0xd8 +#define BYD_PACKET_PINCH_OUT 0x28 +#define BYD_PACKET_ROTATE_CLOCKWISE 0x29 +#define BYD_PACKET_ROTATE_ANTICLOCKWISE 0xd7 +#define BYD_PACKET_TWO_FINGER_SCROLL_RIGHT 0x2a +#define BYD_PACKET_TWO_FINGER_SCROLL_DOWN 0x2b +#define BYD_PACKET_TWO_FINGER_SCROLL_UP 0xd5 +#define BYD_PACKET_TWO_FINGER_SCROLL_LEFT 0xd6 +#define BYD_PACKET_THREE_FINGER_SWIPE_RIGHT 0x2c +#define BYD_PACKET_THREE_FINGER_SWIPE_DOWN 0x2d +#define BYD_PACKET_THREE_FINGER_SWIPE_UP 0xd3 +#define BYD_PACKET_THREE_FINGER_SWIPE_LEFT 0xd4 +#define BYD_PACKET_FOUR_FINGER_DOWN 0x33 +#define BYD_PACKET_FOUR_FINGER_UP 0xcd +#define BYD_PACKET_REGION_SCROLL_RIGHT 0x35 +#define BYD_PACKET_REGION_SCROLL_DOWN 0x36 +#define BYD_PACKET_REGION_SCROLL_UP 0xca +#define BYD_PACKET_REGION_SCROLL_LEFT 0xcb +#define BYD_PACKET_RIGHT_CORNER_CLICK 0xd2 +#define BYD_PACKET_LEFT_CORNER_CLICK 0x2e +#define BYD_PACKET_LEFT_AND_RIGHT_CORNER_CLICK 0x2f +#define BYD_PACKET_ONTO_PAD_SWIPE_RIGHT 0x37 +#define BYD_PACKET_ONTO_PAD_SWIPE_DOWN 0x30 +#define BYD_PACKET_ONTO_PAD_SWIPE_UP 0xd0 +#define BYD_PACKET_ONTO_PAD_SWIPE_LEFT 0xc9 + +struct byd_data { + struct timer_list timer; + s32 abs_x; + s32 abs_y; + typeof(jiffies) last_touch_time; + bool btn_left; + bool btn_right; + bool touch; +}; + +static void byd_report_input(struct psmouse *psmouse) { - struct ps2dev *ps2dev = &psmouse->ps2dev; - unsigned char param[4]; + struct byd_data *priv = psmouse->private; + struct input_dev *dev = psmouse->dev; - param[0] = 0x03; - param[1] = 0x00; - param[2] = 0x00; - param[3] = 0x00; + input_report_key(dev, BTN_TOUCH, priv->touch); + input_report_key(dev, BTN_TOOL_FINGER, priv->touch); - if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES)) - return -1; - if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES)) - return -1; - if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES)) - return -1; - if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES)) - return -1; - if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) - return -1; + input_report_abs(dev, ABS_X, priv->abs_x); + input_report_abs(dev, ABS_Y, priv->abs_y); + input_report_key(dev, BTN_LEFT, priv->btn_left); + input_report_key(dev, BTN_RIGHT, priv->btn_right); - if (param[1] != 0x03 || param[2] != 0x64) - return -ENODEV; + input_sync(dev); +} - psmouse_dbg(psmouse, "BYD touchpad detected\n"); +static void byd_clear_touch(unsigned long data) +{ + struct psmouse *psmouse = (struct psmouse *)data; + struct byd_data *priv = psmouse->private; - if (set_properties) { - psmouse->vendor = "BYD"; - psmouse->name = "TouchPad"; - } + serio_pause_rx(psmouse->ps2dev.serio); + priv->touch = false; - return 0; + byd_report_input(psmouse); + + serio_continue_rx(psmouse->ps2dev.serio); + + /* + * Move cursor back to center of pad when we lose touch - this + * specifically improves user experience when moving cursor with one + * finger, and pressing a button with another. + */ + priv->abs_x = BYD_PAD_WIDTH / 2; + priv->abs_y = BYD_PAD_HEIGHT / 2; } static psmouse_ret_t byd_process_byte(struct psmouse *psmouse) { - struct input_dev *dev = psmouse->dev; + struct byd_data *priv = psmouse->private; u8 *pkt = psmouse->packet; if (psmouse->pktcnt > 0 && !(pkt[0] & PS2_ALWAYS_1)) { @@ -102,53 +284,34 @@ static psmouse_ret_t byd_process_byte(struct psmouse *psmouse) /* Otherwise, a full packet has been received */ switch (pkt[3]) { - case 0: { + case BYD_PACKET_ABSOLUTE: + /* Only use absolute packets for the start of movement. */ + if (!priv->touch) { + /* needed to detect tap */ + typeof(jiffies) tap_time = + priv->last_touch_time + BYD_TOUCH_TIMEOUT; + priv->touch = time_after(jiffies, tap_time); + + /* init abs position */ + priv->abs_x = pkt[1] * (BYD_PAD_WIDTH / 256); + priv->abs_y = (255 - pkt[2]) * (BYD_PAD_HEIGHT / 256); + } + break; + case BYD_PACKET_RELATIVE: { /* Standard packet */ /* Sign-extend if a sign bit is set. */ - unsigned int signx = pkt[0] & PS2_X_SIGN ? ~0xFF : 0; - unsigned int signy = pkt[0] & PS2_Y_SIGN ? ~0xFF : 0; - int dx = signx | (int) pkt[1]; - int dy = signy | (int) pkt[2]; + u32 signx = pkt[0] & PS2_X_SIGN ? ~0xFF : 0; + u32 signy = pkt[0] & PS2_Y_SIGN ? ~0xFF : 0; + s32 dx = signx | (int) pkt[1]; + s32 dy = signy | (int) pkt[2]; - input_report_rel(psmouse->dev, REL_X, dx); - input_report_rel(psmouse->dev, REL_Y, -dy); + /* Update position based on velocity */ + priv->abs_x += dx * BYD_DT; + priv->abs_y -= dy * BYD_DT; - input_report_key(psmouse->dev, BTN_LEFT, pkt[0] & PS2_LEFT); - input_report_key(psmouse->dev, BTN_RIGHT, pkt[0] & PS2_RIGHT); - input_report_key(psmouse->dev, BTN_MIDDLE, pkt[0] & PS2_MIDDLE); + priv->touch = true; break; } - - case BYD_SCROLLDOWN: - case BYD_2DOWN: - input_report_rel(dev, REL_WHEEL, -1); - break; - - case BYD_SCROLLUP: - case BYD_2UP: - input_report_rel(dev, REL_WHEEL, 1); - break; - - case BYD_SCROLLLEFT: - case BYD_2LEFT: - input_report_rel(dev, REL_HWHEEL, -1); - break; - - case BYD_SCROLLRIGHT: - case BYD_2RIGHT: - input_report_rel(dev, REL_HWHEEL, 1); - break; - - case BYD_ZOOMOUT: - case BYD_ZOOMIN: - case BYD_3UP: - case BYD_3DOWN: - case BYD_3LEFT: - case BYD_3RIGHT: - case BYD_4UP: - case BYD_4DOWN: - break; - default: psmouse_warn(psmouse, "Unrecognized Z: pkt = %02x %02x %02x %02x\n", @@ -157,134 +320,76 @@ static psmouse_ret_t byd_process_byte(struct psmouse *psmouse) return PSMOUSE_BAD_DATA; } - input_sync(dev); + priv->btn_left = pkt[0] & PS2_LEFT; + priv->btn_right = pkt[0] & PS2_RIGHT; - return PSMOUSE_FULL_PACKET; -} + byd_report_input(psmouse); -/* Send a sequence of bytes, where each is ACKed before the next is sent. */ -static int byd_send_sequence(struct psmouse *psmouse, const u8 *seq, size_t len) -{ - unsigned int i; - - for (i = 0; i < len; ++i) { - if (ps2_command(&psmouse->ps2dev, NULL, seq[i])) - return -1; + /* Reset time since last touch. */ + if (priv->touch) { + priv->last_touch_time = jiffies; + mod_timer(&priv->timer, jiffies + BYD_TOUCH_TIMEOUT); } - return 0; -} - -/* Keep scrolling after fingers are removed. */ -#define SCROLL_INERTIAL 0x01 -#define SCROLL_NO_INERTIAL 0x02 - -/* Clicking can be done by tapping or pressing. */ -#define CLICK_BOTH 0x01 -/* Clicking can only be done by pressing. */ -#define CLICK_PRESS_ONLY 0x02 -static int byd_enable(struct psmouse *psmouse) -{ - const u8 seq1[] = { 0xE2, 0x00, 0xE0, 0x02, 0xE0 }; - const u8 seq2[] = { - 0xD3, 0x01, - 0xD0, 0x00, - 0xD0, 0x04, - /* Whether clicking is done by tapping or pressing. */ - 0xD4, CLICK_PRESS_ONLY, - 0xD5, 0x01, - 0xD7, 0x03, - /* Vertical and horizontal one-finger scroll zone inertia. */ - 0xD8, SCROLL_INERTIAL, - 0xDA, 0x05, - 0xDB, 0x02, - 0xE4, 0x05, - 0xD6, 0x01, - 0xDE, 0x04, - 0xE3, 0x01, - 0xCF, 0x00, - 0xD2, 0x03, - /* Vertical and horizontal two-finger scrolling inertia. */ - 0xE5, SCROLL_INERTIAL, - 0xD9, 0x02, - 0xD9, 0x07, - 0xDC, 0x03, - 0xDD, 0x03, - 0xDF, 0x03, - 0xE1, 0x03, - 0xD1, 0x00, - 0xCE, 0x00, - 0xCC, 0x00, - 0xE0, 0x00, - 0xE2, 0x01 - }; - u8 param[4]; - - if (byd_send_sequence(psmouse, seq1, ARRAY_SIZE(seq1))) - return -1; - - /* Send a 0x01 command, which should return 4 bytes. */ - if (ps2_command(&psmouse->ps2dev, param, 0x0401)) - return -1; - - if (byd_send_sequence(psmouse, seq2, ARRAY_SIZE(seq2))) - return -1; - - return 0; + return PSMOUSE_FULL_PACKET; } -/* - * Send the set of PS/2 commands required to make it identify as an - * intellimouse with 4-byte instead of 3-byte packets. - */ -static int byd_send_intellimouse_sequence(struct psmouse *psmouse) +static int byd_reset_touchpad(struct psmouse *psmouse) { struct ps2dev *ps2dev = &psmouse->ps2dev; u8 param[4]; - int i; + size_t i; + const struct { u16 command; u8 arg; } seq[] = { - { PSMOUSE_CMD_RESET_BAT, 0 }, - { PSMOUSE_CMD_RESET_BAT, 0 }, - { PSMOUSE_CMD_GETID, 0 }, - { PSMOUSE_CMD_SETSCALE11, 0 }, - { PSMOUSE_CMD_SETSCALE11, 0 }, - { PSMOUSE_CMD_SETSCALE11, 0 }, - { PSMOUSE_CMD_GETINFO, 0 }, - { PSMOUSE_CMD_SETRES, 0x03 }, + /* + * Intellimouse initialization sequence, to get 4-byte instead + * of 3-byte packets. + */ { PSMOUSE_CMD_SETRATE, 0xC8 }, { PSMOUSE_CMD_SETRATE, 0x64 }, { PSMOUSE_CMD_SETRATE, 0x50 }, { PSMOUSE_CMD_GETID, 0 }, - { PSMOUSE_CMD_SETRATE, 0xC8 }, - { PSMOUSE_CMD_SETRATE, 0xC8 }, - { PSMOUSE_CMD_SETRATE, 0x50 }, - { PSMOUSE_CMD_GETID, 0 }, - { PSMOUSE_CMD_SETRATE, 0x64 }, - { PSMOUSE_CMD_SETRES, 0x03 }, - { PSMOUSE_CMD_ENABLE, 0 } + { PSMOUSE_CMD_ENABLE, 0 }, + /* + * BYD-specific initialization, which enables absolute mode and + * (if desired), the touchpad's built-in gesture detection. + */ + { 0x10E2, 0x00 }, + { 0x10E0, 0x02 }, + /* The touchpad should reply with 4 seemingly-random bytes */ + { 0x14E0, 0x01 }, + /* Pairs of parameters and values. */ + { BYD_CMD_SET_HANDEDNESS, 0x01 }, + { BYD_CMD_SET_PHYSICAL_BUTTONS, 0x04 }, + { BYD_CMD_SET_TAP, 0x02 }, + { BYD_CMD_SET_ONE_FINGER_SCROLL, 0x04 }, + { BYD_CMD_SET_ONE_FINGER_SCROLL_FUNC, 0x04 }, + { BYD_CMD_SET_EDGE_MOTION, 0x01 }, + { BYD_CMD_SET_PALM_CHECK, 0x00 }, + { BYD_CMD_SET_MULTITOUCH, 0x02 }, + { BYD_CMD_SET_TWO_FINGER_SCROLL, 0x04 }, + { BYD_CMD_SET_TWO_FINGER_SCROLL_FUNC, 0x04 }, + { BYD_CMD_SET_LEFT_EDGE_REGION, 0x00 }, + { BYD_CMD_SET_TOP_EDGE_REGION, 0x00 }, + { BYD_CMD_SET_RIGHT_EDGE_REGION, 0x00 }, + { BYD_CMD_SET_BOTTOM_EDGE_REGION, 0x00 }, + { BYD_CMD_SET_ABSOLUTE_MODE, 0x02 }, + /* Finalize initialization. */ + { 0x10E0, 0x00 }, + { 0x10E2, 0x01 }, }; - memset(param, 0, sizeof(param)); for (i = 0; i < ARRAY_SIZE(seq); ++i) { + memset(param, 0, sizeof(param)); param[0] = seq[i].arg; if (ps2_command(ps2dev, param, seq[i].command)) - return -1; + return -EIO; } - return 0; -} - -static int byd_reset_touchpad(struct psmouse *psmouse) -{ - if (byd_send_intellimouse_sequence(psmouse)) - return -EIO; - - if (byd_enable(psmouse)) - return -EIO; - + psmouse_set_state(psmouse, PSMOUSE_ACTIVATED); return 0; } @@ -314,9 +419,50 @@ static int byd_reconnect(struct psmouse *psmouse) return 0; } +static void byd_disconnect(struct psmouse *psmouse) +{ + struct byd_data *priv = psmouse->private; + + if (priv) { + del_timer(&priv->timer); + kfree(psmouse->private); + psmouse->private = NULL; + } +} + +int byd_detect(struct psmouse *psmouse, bool set_properties) +{ + struct ps2dev *ps2dev = &psmouse->ps2dev; + u8 param[4] = {0x03, 0x00, 0x00, 0x00}; + + if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES)) + return -1; + if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES)) + return -1; + if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES)) + return -1; + if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES)) + return -1; + if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) + return -1; + + if (param[1] != 0x03 || param[2] != 0x64) + return -ENODEV; + + psmouse_dbg(psmouse, "BYD touchpad detected\n"); + + if (set_properties) { + psmouse->vendor = "BYD"; + psmouse->name = "TouchPad"; + } + + return 0; +} + int byd_init(struct psmouse *psmouse) { struct input_dev *dev = psmouse->dev; + struct byd_data *priv; if (psmouse_reset(psmouse)) return -EIO; @@ -324,14 +470,39 @@ int byd_init(struct psmouse *psmouse) if (byd_reset_touchpad(psmouse)) return -EIO; + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + memset(priv, 0, sizeof(*priv)); + setup_timer(&priv->timer, byd_clear_touch, (unsigned long) psmouse); + + psmouse->private = priv; + psmouse->disconnect = byd_disconnect; psmouse->reconnect = byd_reconnect; psmouse->protocol_handler = byd_process_byte; psmouse->pktsize = 4; psmouse->resync_time = 0; - __set_bit(BTN_MIDDLE, dev->keybit); - __set_bit(REL_WHEEL, dev->relbit); - __set_bit(REL_HWHEEL, dev->relbit); + __set_bit(INPUT_PROP_POINTER, dev->propbit); + /* Touchpad */ + __set_bit(BTN_TOUCH, dev->keybit); + __set_bit(BTN_TOOL_FINGER, dev->keybit); + /* Buttons */ + __set_bit(BTN_LEFT, dev->keybit); + __set_bit(BTN_RIGHT, dev->keybit); + __clear_bit(BTN_MIDDLE, dev->keybit); + + /* Absolute position */ + __set_bit(EV_ABS, dev->evbit); + input_set_abs_params(dev, ABS_X, 0, BYD_PAD_WIDTH, 0, 0); + input_set_abs_params(dev, ABS_Y, 0, BYD_PAD_HEIGHT, 0, 0); + input_abs_set_res(dev, ABS_X, BYD_PAD_RESOLUTION); + input_abs_set_res(dev, ABS_Y, BYD_PAD_RESOLUTION); + /* No relative support */ + __clear_bit(EV_REL, dev->evbit); + __clear_bit(REL_X, dev->relbit); + __clear_bit(REL_Y, dev->relbit); return 0; } diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c index 39d1becd35c9..5784e20542a4 100644 --- a/drivers/input/mouse/psmouse-base.c +++ b/drivers/input/mouse/psmouse-base.c @@ -846,7 +846,7 @@ static const struct psmouse_protocol psmouse_protocols[] = { #ifdef CONFIG_MOUSE_PS2_BYD { .type = PSMOUSE_BYD, - .name = "BydPS/2", + .name = "BYDPS/2", .alias = "byd", .detect = byd_detect, .init = byd_init, diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 6025eb430c0a..a41d8328c064 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -862,8 +862,9 @@ static void synaptics_report_ext_buttons(struct psmouse *psmouse, if (!SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap)) return; - /* Bug in FW 8.1, buttons are reported only when ExtBit is 1 */ - if (SYN_ID_FULL(priv->identity) == 0x801 && + /* Bug in FW 8.1 & 8.2, buttons are reported only when ExtBit is 1 */ + if ((SYN_ID_FULL(priv->identity) == 0x801 || + SYN_ID_FULL(priv->identity) == 0x802) && !((psmouse->packet[0] ^ psmouse->packet[3]) & 0x02)) return; diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c index da38f0ad80ed..faa295ec4f31 100644 --- a/drivers/input/rmi4/rmi_driver.c +++ b/drivers/input/rmi4/rmi_driver.c @@ -126,7 +126,7 @@ static void process_one_interrupt(struct rmi_driver_data *data, return; fh = to_rmi_function_handler(fn->dev.driver); - if (fn->irq_mask && fh->attention) { + if (fh->attention) { bitmap_and(data->fn_irq_bits, data->irq_status, fn->irq_mask, data->irq_count); if (!bitmap_empty(data->fn_irq_bits, data->irq_count)) @@ -172,8 +172,7 @@ int rmi_process_interrupt_requests(struct rmi_device *rmi_dev) * use irq_chip. */ list_for_each_entry(entry, &data->function_list, node) - if (entry->irq_mask) - process_one_interrupt(data, entry); + process_one_interrupt(data, entry); if (data->input) input_sync(data->input); diff --git a/drivers/input/touchscreen/melfas_mip4.c b/drivers/input/touchscreen/melfas_mip4.c index 892729734c51..fb5fb9140ca9 100644 --- a/drivers/input/touchscreen/melfas_mip4.c +++ b/drivers/input/touchscreen/melfas_mip4.c @@ -1310,8 +1310,34 @@ static ssize_t mip4_sysfs_read_fw_version(struct device *dev, static DEVICE_ATTR(fw_version, S_IRUGO, mip4_sysfs_read_fw_version, NULL); +static ssize_t mip4_sysfs_read_hw_version(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct mip4_ts *ts = i2c_get_clientdata(client); + size_t count; + + /* Take lock to prevent racing with firmware update */ + mutex_lock(&ts->input->mutex); + + /* + * product_name shows the name or version of the hardware + * paired with current firmware in the chip. + */ + count = snprintf(buf, PAGE_SIZE, "%.*s\n", + (int)sizeof(ts->product_name), ts->product_name); + + mutex_unlock(&ts->input->mutex); + + return count; +} + +static DEVICE_ATTR(hw_version, S_IRUGO, mip4_sysfs_read_hw_version, NULL); + static struct attribute *mip4_attrs[] = { &dev_attr_fw_version.attr, + &dev_attr_hw_version.attr, &dev_attr_update_fw.attr, NULL, }; @@ -1512,6 +1538,6 @@ static struct i2c_driver mip4_driver = { module_i2c_driver(mip4_driver); MODULE_DESCRIPTION("MELFAS MIP4 Touchscreen"); -MODULE_VERSION("2016.03.03"); +MODULE_VERSION("2016.03.12"); MODULE_AUTHOR("Sangwon Jee <[email protected]>"); MODULE_LICENSE("GPL"); diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c index b6c4d03de340..880c40b23f66 100644 --- a/drivers/input/touchscreen/sur40.c +++ b/drivers/input/touchscreen/sur40.c @@ -197,28 +197,34 @@ static int sur40_command(struct sur40_state *dev, static int sur40_init(struct sur40_state *dev) { int result; - u8 buffer[24]; + u8 *buffer; + + buffer = kmalloc(24, GFP_KERNEL); + if (!buffer) { + result = -ENOMEM; + goto error; + } /* stupidly replay the original MS driver init sequence */ result = sur40_command(dev, SUR40_GET_VERSION, 0x00, buffer, 12); if (result < 0) - return result; + goto error; result = sur40_command(dev, SUR40_GET_VERSION, 0x01, buffer, 12); if (result < 0) - return result; + goto error; result = sur40_command(dev, SUR40_GET_VERSION, 0x02, buffer, 12); if (result < 0) - return result; + goto error; result = sur40_command(dev, SUR40_UNKNOWN2, 0x00, buffer, 24); if (result < 0) - return result; + goto error; result = sur40_command(dev, SUR40_UNKNOWN1, 0x00, buffer, 5); if (result < 0) - return result; + goto error; result = sur40_command(dev, SUR40_GET_VERSION, 0x03, buffer, 12); @@ -226,7 +232,8 @@ static int sur40_init(struct sur40_state *dev) * Discard the result buffer - no known data inside except * some version strings, maybe extract these sometime... */ - +error: + kfree(buffer); return result; } diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c index ef09ba0289d7..d5cfb503b9d6 100644 --- a/drivers/memstick/host/r592.c +++ b/drivers/memstick/host/r592.c @@ -298,8 +298,7 @@ static int r592_transfer_fifo_dma(struct r592_device *dev) sg_count = dma_map_sg(&dev->pci_dev->dev, &dev->req->sg, 1, is_write ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); - if (sg_count != 1 || - (sg_dma_len(&dev->req->sg) < dev->req->sg.length)) { + if (sg_count != 1 || sg_dma_len(&dev->req->sg) < R592_LFIFO_SIZE) { message("problem in dma_map_sg"); return -EIO; } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c index 802d55457f19..fd90f3737963 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c @@ -7,7 +7,7 @@ * (at your option) any later version. */ -#include <asm-generic/io-64-nonatomic-hi-lo.h> +#include <linux/io-64-nonatomic-hi-lo.h> #include <linux/of_mdio.h> #include "hns_dsaf_main.h" #include "hns_dsaf_mac.h" diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index ab264e1bccd0..75683fb26734 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -45,7 +45,7 @@ #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/pci.h> -#include <asm-generic/io-64-nonatomic-hi-lo.h> +#include <linux/io-64-nonatomic-hi-lo.h> #include "nfp_net_ctrl.h" diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c index 588803ad6847..6ccba0d862df 100644 --- a/drivers/ntb/hw/amd/ntb_hw_amd.c +++ b/drivers/ntb/hw/amd/ntb_hw_amd.c @@ -357,20 +357,6 @@ static int amd_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits) return 0; } -static int amd_ntb_peer_db_addr(struct ntb_dev *ntb, - phys_addr_t *db_addr, - resource_size_t *db_size) -{ - struct amd_ntb_dev *ndev = ntb_ndev(ntb); - - if (db_addr) - *db_addr = (phys_addr_t)(ndev->peer_mmio + AMD_DBREQ_OFFSET); - if (db_size) - *db_size = sizeof(u32); - - return 0; -} - static int amd_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits) { struct amd_ntb_dev *ndev = ntb_ndev(ntb); @@ -415,20 +401,6 @@ static int amd_ntb_spad_write(struct ntb_dev *ntb, return 0; } -static int amd_ntb_peer_spad_addr(struct ntb_dev *ntb, int idx, - phys_addr_t *spad_addr) -{ - struct amd_ntb_dev *ndev = ntb_ndev(ntb); - - if (idx < 0 || idx >= ndev->spad_count) - return -EINVAL; - - if (spad_addr) - *spad_addr = (phys_addr_t)(ndev->self_mmio + AMD_SPAD_OFFSET + - ndev->peer_spad + (idx << 2)); - return 0; -} - static u32 amd_ntb_peer_spad_read(struct ntb_dev *ntb, int idx) { struct amd_ntb_dev *ndev = ntb_ndev(ntb); @@ -472,12 +444,10 @@ static const struct ntb_dev_ops amd_ntb_ops = { .db_clear = amd_ntb_db_clear, .db_set_mask = amd_ntb_db_set_mask, .db_clear_mask = amd_ntb_db_clear_mask, - .peer_db_addr = amd_ntb_peer_db_addr, .peer_db_set = amd_ntb_peer_db_set, .spad_count = amd_ntb_spad_count, .spad_read = amd_ntb_spad_read, .spad_write = amd_ntb_spad_write, - .peer_spad_addr = amd_ntb_peer_spad_addr, .peer_spad_read = amd_ntb_peer_spad_read, .peer_spad_write = amd_ntb_peer_spad_write, }; diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index ec4775f0ec16..2ef9d9130864 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -124,6 +124,7 @@ struct ntb_transport_qp { bool client_ready; bool link_is_up; + bool active; u8 qp_num; /* Only 64 QP's are allowed. 0-63 */ u64 qp_bit; @@ -719,6 +720,7 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp) { qp->link_is_up = false; + qp->active = false; qp->tx_index = 0; qp->rx_index = 0; @@ -827,7 +829,7 @@ static void ntb_transport_link_work(struct work_struct *work) struct pci_dev *pdev = ndev->pdev; resource_size_t size; u32 val; - int rc, i, spad; + int rc = 0, i, spad; /* send the local info, in the opposite order of the way we read it */ for (i = 0; i < nt->mw_count; i++) { @@ -897,6 +899,13 @@ static void ntb_transport_link_work(struct work_struct *work) out1: for (i = 0; i < nt->mw_count; i++) ntb_free_mw(nt, i); + + /* if there's an actual failure, we should just bail */ + if (rc < 0) { + ntb_link_disable(ndev); + return; + } + out: if (ntb_link_is_up(ndev, NULL, NULL) == 1) schedule_delayed_work(&nt->link_work, @@ -926,11 +935,13 @@ static void ntb_qp_link_work(struct work_struct *work) if (val & BIT(qp->qp_num)) { dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num); qp->link_is_up = true; + qp->active = true; if (qp->event_handler) qp->event_handler(qp->cb_data, qp->link_is_up); - tasklet_schedule(&qp->rxc_db_work); + if (qp->active) + tasklet_schedule(&qp->rxc_db_work); } else if (nt->link_is_up) schedule_delayed_work(&qp->link_work, msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); @@ -1411,7 +1422,8 @@ static void ntb_transport_rxc_db(unsigned long data) if (i == qp->rx_max_entry) { /* there is more work to do */ - tasklet_schedule(&qp->rxc_db_work); + if (qp->active) + tasklet_schedule(&qp->rxc_db_work); } else if (ntb_db_read(qp->ndev) & BIT_ULL(qp->qp_num)) { /* the doorbell bit is set: clear it */ ntb_db_clear(qp->ndev, BIT_ULL(qp->qp_num)); @@ -1422,7 +1434,8 @@ static void ntb_transport_rxc_db(unsigned long data) * ntb_process_rxc and clearing the doorbell bit: * there might be some more work to do. */ - tasklet_schedule(&qp->rxc_db_work); + if (qp->active) + tasklet_schedule(&qp->rxc_db_work); } } @@ -1760,6 +1773,8 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp) pdev = qp->ndev->pdev; + qp->active = false; + if (qp->tx_dma_chan) { struct dma_chan *chan = qp->tx_dma_chan; /* Putting the dma_chan to NULL will force any new traffic to be @@ -1793,7 +1808,7 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp) qp_bit = BIT_ULL(qp->qp_num); ntb_db_set_mask(qp->ndev, qp_bit); - tasklet_disable(&qp->rxc_db_work); + tasklet_kill(&qp->rxc_db_work); cancel_delayed_work_sync(&qp->link_work); @@ -1886,7 +1901,8 @@ int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q); - tasklet_schedule(&qp->rxc_db_work); + if (qp->active) + tasklet_schedule(&qp->rxc_db_work); return 0; } @@ -2069,7 +2085,8 @@ static void ntb_transport_doorbell_callback(void *data, int vector) qp_num = __ffs(db_bits); qp = &nt->qp_vec[qp_num]; - tasklet_schedule(&qp->rxc_db_work); + if (qp->active) + tasklet_schedule(&qp->rxc_db_work); db_bits &= ~BIT_ULL(qp_num); } diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c index c8a37ba4b4f9..8dfce9c9aad0 100644 --- a/drivers/ntb/test/ntb_perf.c +++ b/drivers/ntb/test/ntb_perf.c @@ -178,7 +178,7 @@ static void perf_copy_callback(void *data) atomic_dec(&pctx->dma_sync); } -static ssize_t perf_copy(struct pthr_ctx *pctx, char *dst, +static ssize_t perf_copy(struct pthr_ctx *pctx, char __iomem *dst, char *src, size_t size) { struct perf_ctx *perf = pctx->perf; @@ -189,7 +189,8 @@ static ssize_t perf_copy(struct pthr_ctx *pctx, char *dst, dma_cookie_t cookie; size_t src_off, dst_off; struct perf_mw *mw = &perf->mw; - u64 vbase, dst_vaddr; + void __iomem *vbase; + void __iomem *dst_vaddr; dma_addr_t dst_phys; int retries = 0; @@ -204,14 +205,14 @@ static ssize_t perf_copy(struct pthr_ctx *pctx, char *dst, } device = chan->device; - src_off = (size_t)src & ~PAGE_MASK; - dst_off = (size_t)dst & ~PAGE_MASK; + src_off = (uintptr_t)src & ~PAGE_MASK; + dst_off = (uintptr_t __force)dst & ~PAGE_MASK; if (!is_dma_copy_aligned(device, src_off, dst_off, size)) return -ENODEV; - vbase = (u64)(u64 *)mw->vbase; - dst_vaddr = (u64)(u64 *)dst; + vbase = mw->vbase; + dst_vaddr = dst; dst_phys = mw->phys_addr + (dst_vaddr - vbase); unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT); @@ -261,13 +262,13 @@ err_get_unmap: return 0; } -static int perf_move_data(struct pthr_ctx *pctx, char *dst, char *src, +static int perf_move_data(struct pthr_ctx *pctx, char __iomem *dst, char *src, u64 buf_size, u64 win_size, u64 total) { int chunks, total_chunks, i; int copied_chunks = 0; u64 copied = 0, result; - char *tmp = dst; + char __iomem *tmp = dst; u64 perf, diff_us; ktime_t kstart, kstop, kdiff; @@ -324,7 +325,7 @@ static int ntb_perf_thread(void *data) struct perf_ctx *perf = pctx->perf; struct pci_dev *pdev = perf->ntb->pdev; struct perf_mw *mw = &perf->mw; - char *dst; + char __iomem *dst; u64 win_size, buf_size, total; void *src; int rc, node, i; @@ -364,7 +365,7 @@ static int ntb_perf_thread(void *data) if (buf_size > MAX_TEST_SIZE) buf_size = MAX_TEST_SIZE; - dst = (char *)mw->vbase; + dst = (char __iomem *)mw->vbase; atomic_inc(&perf->tsync); while (atomic_read(&perf->tsync) != perf->perf_threads) @@ -424,6 +425,7 @@ static int perf_set_mw(struct perf_ctx *perf, resource_size_t size) { struct perf_mw *mw = &perf->mw; size_t xlat_size, buf_size; + int rc; if (!size) return -EINVAL; @@ -447,6 +449,13 @@ static int perf_set_mw(struct perf_ctx *perf, resource_size_t size) mw->buf_size = 0; } + rc = ntb_mw_set_trans(perf->ntb, 0, mw->dma_addr, mw->xlat_size); + if (rc) { + dev_err(&perf->ntb->dev, "Unable to set mw0 translation\n"); + perf_free_mw(perf); + return -EIO; + } + return 0; } @@ -541,6 +550,8 @@ static ssize_t debugfs_run_read(struct file *filp, char __user *ubuf, return 0; buf = kmalloc(64, GFP_KERNEL); + if (!buf) + return -ENOMEM; out_offset = snprintf(buf, 64, "%d\n", perf->run); ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset); kfree(buf); @@ -548,6 +559,21 @@ static ssize_t debugfs_run_read(struct file *filp, char __user *ubuf, return ret; } +static void threads_cleanup(struct perf_ctx *perf) +{ + struct pthr_ctx *pctx; + int i; + + perf->run = false; + for (i = 0; i < MAX_THREADS; i++) { + pctx = &perf->pthr_ctx[i]; + if (pctx->thread) { + kthread_stop(pctx->thread); + pctx->thread = NULL; + } + } +} + static ssize_t debugfs_run_write(struct file *filp, const char __user *ubuf, size_t count, loff_t *offp) { @@ -563,17 +589,9 @@ static ssize_t debugfs_run_write(struct file *filp, const char __user *ubuf, if (atomic_read(&perf->tsync) == 0) perf->run = false; - if (perf->run) { - /* lets stop the threads */ - perf->run = false; - for (i = 0; i < MAX_THREADS; i++) { - if (perf->pthr_ctx[i].thread) { - kthread_stop(perf->pthr_ctx[i].thread); - perf->pthr_ctx[i].thread = NULL; - } else - break; - } - } else { + if (perf->run) + threads_cleanup(perf); + else { perf->run = true; if (perf->perf_threads > MAX_THREADS) { @@ -604,17 +622,11 @@ static ssize_t debugfs_run_write(struct file *filp, const char __user *ubuf, kthread_create_on_node(ntb_perf_thread, (void *)pctx, node, "ntb_perf %d", i); - if (pctx->thread) + if (IS_ERR(pctx->thread)) { + pctx->thread = NULL; + goto err; + } else wake_up_process(pctx->thread); - else { - perf->run = false; - for (i = 0; i < MAX_THREADS; i++) { - if (pctx->thread) { - kthread_stop(pctx->thread); - pctx->thread = NULL; - } - } - } if (perf->run == false) return -ENXIO; @@ -623,6 +635,10 @@ static ssize_t debugfs_run_write(struct file *filp, const char __user *ubuf, } return count; + +err: + threads_cleanup(perf); + return -ENXIO; } static const struct file_operations ntb_perf_debugfs_run = { diff --git a/drivers/power/avs/rockchip-io-domain.c b/drivers/power/avs/rockchip-io-domain.c index 80994566a1c8..8986382718dd 100644 --- a/drivers/power/avs/rockchip-io-domain.c +++ b/drivers/power/avs/rockchip-io-domain.c @@ -47,6 +47,10 @@ #define RK3368_SOC_CON15_FLASH0 BIT(14) #define RK3368_SOC_FLASH_SUPPLY_NUM 2 +#define RK3399_PMUGRF_CON0 0x180 +#define RK3399_PMUGRF_CON0_VSEL BIT(8) +#define RK3399_PMUGRF_VSEL_SUPPLY_NUM 9 + struct rockchip_iodomain; /** @@ -181,6 +185,25 @@ static void rk3368_iodomain_init(struct rockchip_iodomain *iod) dev_warn(iod->dev, "couldn't update flash0 ctrl\n"); } +static void rk3399_pmu_iodomain_init(struct rockchip_iodomain *iod) +{ + int ret; + u32 val; + + /* if no pmu io supply we should leave things alone */ + if (!iod->supplies[RK3399_PMUGRF_VSEL_SUPPLY_NUM].reg) + return; + + /* + * set pmu io iodomain to also use this framework + * instead of a special gpio. + */ + val = RK3399_PMUGRF_CON0_VSEL | (RK3399_PMUGRF_CON0_VSEL << 16); + ret = regmap_write(iod->grf, RK3399_PMUGRF_CON0, val); + if (ret < 0) + dev_warn(iod->dev, "couldn't update pmu io iodomain ctrl\n"); +} + /* * On the rk3188 the io-domains are handled by a shared register with the * lower 8 bits being still being continuing drive-strength settings. @@ -252,6 +275,33 @@ static const struct rockchip_iodomain_soc_data soc_data_rk3368_pmu = { }, }; +static const struct rockchip_iodomain_soc_data soc_data_rk3399 = { + .grf_offset = 0xe640, + .supply_names = { + "bt656", /* APIO2_VDD */ + "audio", /* APIO5_VDD */ + "sdmmc", /* SDMMC0_VDD */ + "gpio1830", /* APIO4_VDD */ + }, +}; + +static const struct rockchip_iodomain_soc_data soc_data_rk3399_pmu = { + .grf_offset = 0x180, + .supply_names = { + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + "pmu1830", /* PMUIO2_VDD */ + }, + .init = rk3399_pmu_iodomain_init, +}; + static const struct of_device_id rockchip_iodomain_match[] = { { .compatible = "rockchip,rk3188-io-voltage-domain", @@ -269,6 +319,14 @@ static const struct of_device_id rockchip_iodomain_match[] = { .compatible = "rockchip,rk3368-pmu-io-voltage-domain", .data = (void *)&soc_data_rk3368_pmu }, + { + .compatible = "rockchip,rk3399-io-voltage-domain", + .data = (void *)&soc_data_rk3399 + }, + { + .compatible = "rockchip,rk3399-pmu-io-voltage-domain", + .data = (void *)&soc_data_rk3399_pmu + }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, rockchip_iodomain_match); diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 544bd3493852..3e84315c6f12 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -589,7 +589,7 @@ config RTC_DRV_RV3029_HWMON default y help Say Y here if you want to expose temperature sensor data on - rtc-rv3029c2. + rtc-rv3029. config RTC_DRV_RV8803 tristate "Micro Crystal RV8803" diff --git a/drivers/rtc/rtc-abx80x.c b/drivers/rtc/rtc-abx80x.c index d41bbcd653f6..ba0d61934d35 100644 --- a/drivers/rtc/rtc-abx80x.c +++ b/drivers/rtc/rtc-abx80x.c @@ -49,7 +49,20 @@ #define ABX8XX_REG_CD_TIMER_CTL 0x18 +#define ABX8XX_REG_OSC 0x1c +#define ABX8XX_OSC_FOS BIT(3) +#define ABX8XX_OSC_BOS BIT(4) +#define ABX8XX_OSC_ACAL_512 BIT(5) +#define ABX8XX_OSC_ACAL_1024 BIT(6) + +#define ABX8XX_OSC_OSEL BIT(7) + +#define ABX8XX_REG_OSS 0x1d +#define ABX8XX_OSS_OF BIT(1) +#define ABX8XX_OSS_OMODE BIT(4) + #define ABX8XX_REG_CFG_KEY 0x1f +#define ABX8XX_CFG_KEY_OSC 0xa1 #define ABX8XX_CFG_KEY_MISC 0x9d #define ABX8XX_REG_ID0 0x28 @@ -81,6 +94,20 @@ static struct abx80x_cap abx80x_caps[] = { [ABX80X] = {.pn = 0} }; +static int abx80x_is_rc_mode(struct i2c_client *client) +{ + int flags = 0; + + flags = i2c_smbus_read_byte_data(client, ABX8XX_REG_OSS); + if (flags < 0) { + dev_err(&client->dev, + "Failed to read autocalibration attribute\n"); + return flags; + } + + return (flags & ABX8XX_OSS_OMODE) ? 1 : 0; +} + static int abx80x_enable_trickle_charger(struct i2c_client *client, u8 trickle_cfg) { @@ -112,7 +139,23 @@ static int abx80x_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct i2c_client *client = to_i2c_client(dev); unsigned char buf[8]; - int err; + int err, flags, rc_mode = 0; + + /* Read the Oscillator Failure only in XT mode */ + rc_mode = abx80x_is_rc_mode(client); + if (rc_mode < 0) + return rc_mode; + + if (!rc_mode) { + flags = i2c_smbus_read_byte_data(client, ABX8XX_REG_OSS); + if (flags < 0) + return flags; + + if (flags & ABX8XX_OSS_OF) { + dev_err(dev, "Oscillator failure, data is invalid.\n"); + return -EINVAL; + } + } err = i2c_smbus_read_i2c_block_data(client, ABX8XX_REG_HTH, sizeof(buf), buf); @@ -140,7 +183,7 @@ static int abx80x_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct i2c_client *client = to_i2c_client(dev); unsigned char buf[8]; - int err; + int err, flags; if (tm->tm_year < 100) return -EINVAL; @@ -161,6 +204,18 @@ static int abx80x_rtc_set_time(struct device *dev, struct rtc_time *tm) return -EIO; } + /* Clear the OF bit of Oscillator Status Register */ + flags = i2c_smbus_read_byte_data(client, ABX8XX_REG_OSS); + if (flags < 0) + return flags; + + err = i2c_smbus_write_byte_data(client, ABX8XX_REG_OSS, + flags & ~ABX8XX_OSS_OF); + if (err < 0) { + dev_err(&client->dev, "Unable to write oscillator status register\n"); + return err; + } + return 0; } @@ -248,6 +303,174 @@ static int abx80x_set_alarm(struct device *dev, struct rtc_wkalrm *t) return 0; } +static int abx80x_rtc_set_autocalibration(struct device *dev, + int autocalibration) +{ + struct i2c_client *client = to_i2c_client(dev); + int retval, flags = 0; + + if ((autocalibration != 0) && (autocalibration != 1024) && + (autocalibration != 512)) { + dev_err(dev, "autocalibration value outside permitted range\n"); + return -EINVAL; + } + + flags = i2c_smbus_read_byte_data(client, ABX8XX_REG_OSC); + if (flags < 0) + return flags; + + if (autocalibration == 0) { + flags &= ~(ABX8XX_OSC_ACAL_512 | ABX8XX_OSC_ACAL_1024); + } else if (autocalibration == 1024) { + /* 1024 autocalibration is 0x10 */ + flags |= ABX8XX_OSC_ACAL_1024; + flags &= ~(ABX8XX_OSC_ACAL_512); + } else { + /* 512 autocalibration is 0x11 */ + flags |= (ABX8XX_OSC_ACAL_1024 | ABX8XX_OSC_ACAL_512); + } + + /* Unlock write access to Oscillator Control Register */ + retval = i2c_smbus_write_byte_data(client, ABX8XX_REG_CFG_KEY, + ABX8XX_CFG_KEY_OSC); + if (retval < 0) { + dev_err(dev, "Failed to write CONFIG_KEY register\n"); + return retval; + } + + retval = i2c_smbus_write_byte_data(client, ABX8XX_REG_OSC, flags); + + return retval; +} + +static int abx80x_rtc_get_autocalibration(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + int flags = 0, autocalibration; + + flags = i2c_smbus_read_byte_data(client, ABX8XX_REG_OSC); + if (flags < 0) + return flags; + + if (flags & ABX8XX_OSC_ACAL_512) + autocalibration = 512; + else if (flags & ABX8XX_OSC_ACAL_1024) + autocalibration = 1024; + else + autocalibration = 0; + + return autocalibration; +} + +static ssize_t autocalibration_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int retval; + unsigned long autocalibration = 0; + + retval = kstrtoul(buf, 10, &autocalibration); + if (retval < 0) { + dev_err(dev, "Failed to store RTC autocalibration attribute\n"); + return -EINVAL; + } + + retval = abx80x_rtc_set_autocalibration(dev, autocalibration); + + return retval ? retval : count; +} + +static ssize_t autocalibration_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int autocalibration = 0; + + autocalibration = abx80x_rtc_get_autocalibration(dev); + if (autocalibration < 0) { + dev_err(dev, "Failed to read RTC autocalibration\n"); + sprintf(buf, "0\n"); + return autocalibration; + } + + return sprintf(buf, "%d\n", autocalibration); +} + +static DEVICE_ATTR_RW(autocalibration); + +static ssize_t oscillator_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + int retval, flags, rc_mode = 0; + + if (strncmp(buf, "rc", 2) == 0) { + rc_mode = 1; + } else if (strncmp(buf, "xtal", 4) == 0) { + rc_mode = 0; + } else { + dev_err(dev, "Oscillator selection value outside permitted ones\n"); + return -EINVAL; + } + + flags = i2c_smbus_read_byte_data(client, ABX8XX_REG_OSC); + if (flags < 0) + return flags; + + if (rc_mode == 0) + flags &= ~(ABX8XX_OSC_OSEL); + else + flags |= (ABX8XX_OSC_OSEL); + + /* Unlock write access on Oscillator Control register */ + retval = i2c_smbus_write_byte_data(client, ABX8XX_REG_CFG_KEY, + ABX8XX_CFG_KEY_OSC); + if (retval < 0) { + dev_err(dev, "Failed to write CONFIG_KEY register\n"); + return retval; + } + + retval = i2c_smbus_write_byte_data(client, ABX8XX_REG_OSC, flags); + if (retval < 0) { + dev_err(dev, "Failed to write Oscillator Control register\n"); + return retval; + } + + return retval ? retval : count; +} + +static ssize_t oscillator_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int rc_mode = 0; + struct i2c_client *client = to_i2c_client(dev); + + rc_mode = abx80x_is_rc_mode(client); + + if (rc_mode < 0) { + dev_err(dev, "Failed to read RTC oscillator selection\n"); + sprintf(buf, "\n"); + return rc_mode; + } + + if (rc_mode) + return sprintf(buf, "rc\n"); + else + return sprintf(buf, "xtal\n"); +} + +static DEVICE_ATTR_RW(oscillator); + +static struct attribute *rtc_calib_attrs[] = { + &dev_attr_autocalibration.attr, + &dev_attr_oscillator.attr, + NULL, +}; + +static const struct attribute_group rtc_calib_attr_group = { + .attrs = rtc_calib_attrs, +}; + static int abx80x_alarm_irq_enable(struct device *dev, unsigned int enabled) { struct i2c_client *client = to_i2c_client(dev); @@ -303,6 +526,13 @@ static int abx80x_dt_trickle_cfg(struct device_node *np) return (trickle_cfg | i); } +static void rtc_calib_remove_sysfs_group(void *_dev) +{ + struct device *dev = _dev; + + sysfs_remove_group(&dev->kobj, &rtc_calib_attr_group); +} + static int abx80x_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -405,6 +635,24 @@ static int abx80x_probe(struct i2c_client *client, } } + /* Export sysfs entries */ + err = sysfs_create_group(&(&client->dev)->kobj, &rtc_calib_attr_group); + if (err) { + dev_err(&client->dev, "Failed to create sysfs group: %d\n", + err); + return err; + } + + err = devm_add_action(&client->dev, rtc_calib_remove_sysfs_group, + &client->dev); + if (err) { + rtc_calib_remove_sysfs_group(&client->dev); + dev_err(&client->dev, + "Failed to add sysfs cleanup action: %d\n", + err); + return err; + } + return 0; } diff --git a/drivers/rtc/rtc-asm9260.c b/drivers/rtc/rtc-asm9260.c index 14e08c4c1a01..355fdb97a006 100644 --- a/drivers/rtc/rtc-asm9260.c +++ b/drivers/rtc/rtc-asm9260.c @@ -255,7 +255,7 @@ static const struct rtc_class_ops asm9260_rtc_ops = { .alarm_irq_enable = asm9260_alarm_irq_enable, }; -static int __init asm9260_rtc_probe(struct platform_device *pdev) +static int asm9260_rtc_probe(struct platform_device *pdev) { struct asm9260_rtc_priv *priv; struct device *dev = &pdev->dev; @@ -323,7 +323,7 @@ err_return: return ret; } -static int __exit asm9260_rtc_remove(struct platform_device *pdev) +static int asm9260_rtc_remove(struct platform_device *pdev) { struct asm9260_rtc_priv *priv = platform_get_drvdata(pdev); diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c index a82937e2f824..d107a8e72a7d 100644 --- a/drivers/rtc/rtc-m41t80.c +++ b/drivers/rtc/rtc-m41t80.c @@ -176,7 +176,13 @@ static int m41t80_set_datetime(struct i2c_client *client, struct rtc_time *tm) bin2bcd(tm->tm_mday) | (buf[M41T80_REG_DAY] & ~0x3f); buf[M41T80_REG_MON] = bin2bcd(tm->tm_mon + 1) | (buf[M41T80_REG_MON] & ~0x1f); + /* assume 20YY not 19YY */ + if (tm->tm_year < 100 || tm->tm_year > 199) { + dev_err(&client->dev, "Year must be between 2000 and 2099. It's %d.\n", + tm->tm_year + 1900); + return -EINVAL; + } buf[M41T80_REG_YEAR] = bin2bcd(tm->tm_year % 100); if (i2c_transfer(client->adapter, msgs, 1) != 1) { diff --git a/drivers/rtc/rtc-mcp795.c b/drivers/rtc/rtc-mcp795.c index 1c91ce8a6d75..025bb33b9cd2 100644 --- a/drivers/rtc/rtc-mcp795.c +++ b/drivers/rtc/rtc-mcp795.c @@ -20,6 +20,7 @@ #include <linux/printk.h> #include <linux/spi/spi.h> #include <linux/rtc.h> +#include <linux/of.h> /* MCP795 Instructions, see datasheet table 3-1 */ #define MCP795_EEREAD 0x03 @@ -183,9 +184,18 @@ static int mcp795_probe(struct spi_device *spi) return 0; } +#ifdef CONFIG_OF +static const struct of_device_id mcp795_of_match[] = { + { .compatible = "maxim,mcp795" }, + { } +}; +MODULE_DEVICE_TABLE(of, mcp795_of_match); +#endif + static struct spi_driver mcp795_driver = { .driver = { .name = "rtc-mcp795", + .of_match_table = of_match_ptr(mcp795_of_match), }, .probe = mcp795_probe, }; diff --git a/drivers/rtc/rtc-rv8803.c b/drivers/rtc/rtc-rv8803.c index 8d9f35ceb808..f623038e586e 100644 --- a/drivers/rtc/rtc-rv8803.c +++ b/drivers/rtc/rtc-rv8803.c @@ -61,11 +61,14 @@ static irqreturn_t rv8803_handle_irq(int irq, void *dev_id) struct i2c_client *client = dev_id; struct rv8803_data *rv8803 = i2c_get_clientdata(client); unsigned long events = 0; - int flags; + int flags, try = 0; mutex_lock(&rv8803->flags_lock); - flags = i2c_smbus_read_byte_data(client, RV8803_FLAG); + do { + flags = i2c_smbus_read_byte_data(client, RV8803_FLAG); + try++; + } while ((flags == -ENXIO) && (try < 3)); if (flags <= 0) { mutex_unlock(&rv8803->flags_lock); return IRQ_NONE; @@ -424,7 +427,7 @@ static int rv8803_probe(struct i2c_client *client, { struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); struct rv8803_data *rv8803; - int err, flags; + int err, flags, try = 0; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_I2C_BLOCK)) { @@ -441,7 +444,16 @@ static int rv8803_probe(struct i2c_client *client, rv8803->client = client; i2c_set_clientdata(client, rv8803); - flags = i2c_smbus_read_byte_data(client, RV8803_FLAG); + /* + * There is a 60µs window where the RTC may not reply on the i2c bus in + * that case, the transfer is not ACKed. In that case, ensure there are + * multiple attempts. + */ + do { + flags = i2c_smbus_read_byte_data(client, RV8803_FLAG); + try++; + } while ((flags == -ENXIO) && (try < 3)); + if (flags < 0) return flags; @@ -476,8 +488,12 @@ static int rv8803_probe(struct i2c_client *client, return PTR_ERR(rv8803->rtc); } - err = i2c_smbus_write_byte_data(rv8803->client, RV8803_EXT, - RV8803_EXT_WADA); + try = 0; + do { + err = i2c_smbus_write_byte_data(rv8803->client, RV8803_EXT, + RV8803_EXT_WADA); + try++; + } while ((err == -ENXIO) && (try < 3)); if (err) return err; diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index ffb860d18701..d01ad7e8078e 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c @@ -501,18 +501,27 @@ static int s3c_rtc_probe(struct platform_device *pdev) info->rtc_clk = devm_clk_get(&pdev->dev, "rtc"); if (IS_ERR(info->rtc_clk)) { - dev_err(&pdev->dev, "failed to find rtc clock\n"); - return PTR_ERR(info->rtc_clk); + ret = PTR_ERR(info->rtc_clk); + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, "failed to find rtc clock\n"); + else + dev_dbg(&pdev->dev, "probe deferred due to missing rtc clk\n"); + return ret; } clk_prepare_enable(info->rtc_clk); if (info->data->needs_src_clk) { info->rtc_src_clk = devm_clk_get(&pdev->dev, "rtc_src"); if (IS_ERR(info->rtc_src_clk)) { - dev_err(&pdev->dev, - "failed to find rtc source clock\n"); + ret = PTR_ERR(info->rtc_src_clk); + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, + "failed to find rtc source clock\n"); + else + dev_dbg(&pdev->dev, + "probe deferred due to missing rtc src clk\n"); clk_disable_unprepare(info->rtc_clk); - return PTR_ERR(info->rtc_src_clk); + return ret; } clk_prepare_enable(info->rtc_src_clk); } diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index 5bcdf8dd6fb0..a404a41e871c 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -332,7 +332,7 @@ static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h, { int rel_port = -1, group_id; struct alua_port_group *pg, *old_pg = NULL; - bool pg_updated; + bool pg_updated = false; unsigned long flags; group_id = scsi_vpd_tpg_id(sdev, &rel_port); diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index 266b909fe854..f3032ca5051b 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -958,23 +958,22 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, case FCPIO_INVALID_PARAM: /* some parameter in request invalid */ case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */ default: - shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n", - fnic_fcpio_status_to_str(hdr_status)); sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; break; } - if (hdr_status != FCPIO_SUCCESS) { - atomic64_inc(&fnic_stats->io_stats.io_failures); - shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n", - fnic_fcpio_status_to_str(hdr_status)); - } /* Break link with the SCSI command */ CMD_SP(sc) = NULL; CMD_FLAGS(sc) |= FNIC_IO_DONE; spin_unlock_irqrestore(io_lock, flags); + if (hdr_status != FCPIO_SUCCESS) { + atomic64_inc(&fnic_stats->io_stats.io_failures); + shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n", + fnic_fcpio_status_to_str(hdr_status)); + } + fnic_release_ioreq_buf(fnic, io_req, sc); mempool_free(io_req, fnic->io_req_pool); diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index a544366a367e..f57d02c3b6cf 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -2860,7 +2860,7 @@ lpfc_online(struct lpfc_hba *phba) } vports = lpfc_create_vport_work_array(phba); - if (vports != NULL) + if (vports != NULL) { for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { struct Scsi_Host *shost; shost = lpfc_shost_from_vport(vports[i]); @@ -2877,7 +2877,8 @@ lpfc_online(struct lpfc_hba *phba) } spin_unlock_irq(shost->host_lock); } - lpfc_destroy_vport_work_array(phba, vports); + } + lpfc_destroy_vport_work_array(phba, vports); lpfc_unblock_mgmt_io(phba); return 0; diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 4484e63033a5..fce414a2cd76 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -2097,7 +2097,7 @@ struct megasas_instance { u8 UnevenSpanSupport; u8 supportmax256vd; - u8 allow_fw_scan; + u8 pd_list_not_supported; u16 fw_supported_vd_count; u16 fw_supported_pd_count; diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 5c08568ccfbf..e6ebc7ae2df1 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -1838,7 +1838,7 @@ static int megasas_slave_configure(struct scsi_device *sdev) struct megasas_instance *instance; instance = megasas_lookup_instance(sdev->host->host_no); - if (instance->allow_fw_scan) { + if (instance->pd_list_not_supported) { if (sdev->channel < MEGASAS_MAX_PD_CHANNELS && sdev->type == TYPE_DISK) { pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + @@ -1874,7 +1874,8 @@ static int megasas_slave_alloc(struct scsi_device *sdev) pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; - if ((instance->allow_fw_scan || instance->pd_list[pd_index].driveState == + if ((instance->pd_list_not_supported || + instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM)) { goto scan_target; } @@ -4087,7 +4088,13 @@ megasas_get_pd_list(struct megasas_instance *instance) switch (ret) { case DCMD_FAILED: - megaraid_sas_kill_hba(instance); + dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY " + "failed/not supported by firmware\n"); + + if (instance->ctrl_context) + megaraid_sas_kill_hba(instance); + else + instance->pd_list_not_supported = 1; break; case DCMD_TIMEOUT: @@ -5034,7 +5041,6 @@ static int megasas_init_fw(struct megasas_instance *instance) case PCI_DEVICE_ID_DELL_PERC5: default: instance->instancet = &megasas_instance_template_xscale; - instance->allow_fw_scan = 1; break; } @@ -6650,12 +6656,13 @@ out: } for (i = 0; i < ioc->sge_count; i++) { - if (kbuff_arr[i]) + if (kbuff_arr[i]) { dma_free_coherent(&instance->pdev->dev, le32_to_cpu(kern_sge32[i].length), kbuff_arr[i], le32_to_cpu(kern_sge32[i].phys_addr)); kbuff_arr[i] = NULL; + } } megasas_return_cmd(instance, cmd); diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 985231900aca..8a44d1541eb4 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -1881,15 +1881,17 @@ static int qlt_check_reserve_free_req(struct scsi_qla_host *vha, else vha->req->cnt = vha->req->length - (vha->req->ring_index - cnt); - } - if (unlikely(vha->req->cnt < (req_cnt + 2))) { - ql_dbg(ql_dbg_io, vha, 0x305a, - "qla_target(%d): There is no room in the request ring: vha->req->ring_index=%d, vha->req->cnt=%d, req_cnt=%d Req-out=%d Req-in=%d Req-Length=%d\n", - vha->vp_idx, vha->req->ring_index, - vha->req->cnt, req_cnt, cnt, cnt_in, vha->req->length); - return -EAGAIN; + if (unlikely(vha->req->cnt < (req_cnt + 2))) { + ql_dbg(ql_dbg_io, vha, 0x305a, + "qla_target(%d): There is no room in the request ring: vha->req->ring_index=%d, vha->req->cnt=%d, req_cnt=%d Req-out=%d Req-in=%d Req-Length=%d\n", + vha->vp_idx, vha->req->ring_index, + vha->req->cnt, req_cnt, cnt, cnt_in, + vha->req->length); + return -EAGAIN; + } } + vha->req->cnt -= req_cnt; return 0; diff --git a/drivers/scsi/scsi_common.c b/drivers/scsi/scsi_common.c index c126966130ab..ce79de822e46 100644 --- a/drivers/scsi/scsi_common.c +++ b/drivers/scsi/scsi_common.c @@ -278,8 +278,16 @@ int scsi_set_sense_information(u8 *buf, int buf_len, u64 info) ucp[3] = 0; put_unaligned_be64(info, &ucp[4]); } else if ((buf[0] & 0x7f) == 0x70) { - buf[0] |= 0x80; - put_unaligned_be64(info, &buf[3]); + /* + * Only set the 'VALID' bit if we can represent the value + * correctly; otherwise just fill out the lower bytes and + * clear the 'VALID' flag. + */ + if (info <= 0xffffffffUL) + buf[0] |= 0x80; + else + buf[0] &= 0x7f; + put_unaligned_be32((u32)info, &buf[3]); } return 0; diff --git a/drivers/scsi/scsi_sas_internal.h b/drivers/scsi/scsi_sas_internal.h index 6266a5d73d0f..e659912498bd 100644 --- a/drivers/scsi/scsi_sas_internal.h +++ b/drivers/scsi/scsi_sas_internal.h @@ -4,7 +4,7 @@ #define SAS_HOST_ATTRS 0 #define SAS_PHY_ATTRS 17 #define SAS_PORT_ATTRS 1 -#define SAS_RPORT_ATTRS 7 +#define SAS_RPORT_ATTRS 8 #define SAS_END_DEV_ATTRS 5 #define SAS_EXPANDER_ATTRS 7 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index d16441961f3a..92ffd2406f97 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -1105,7 +1105,7 @@ static umode_t scsi_sdev_bin_attr_is_visible(struct kobject *kobj, if (attr == &dev_attr_vpd_pg80 && !sdev->vpd_pg80) return 0; - if (attr == &dev_attr_vpd_pg83 && sdev->vpd_pg83) + if (attr == &dev_attr_vpd_pg83 && !sdev->vpd_pg83) return 0; return S_IRUGO; diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index 80520e2f0fa2..b6f958193dad 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -1286,6 +1286,7 @@ sas_rphy_protocol_attr(identify.target_port_protocols, target_port_protocols); sas_rphy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n", unsigned long long); sas_rphy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8); +sas_rphy_simple_attr(scsi_target_id, scsi_target_id, "%d\n", u32); /* only need 8 bytes of data plus header (4 or 8) */ #define BUF_SIZE 64 @@ -1886,6 +1887,7 @@ sas_attach_transport(struct sas_function_template *ft) SETUP_RPORT_ATTRIBUTE(rphy_device_type); SETUP_RPORT_ATTRIBUTE(rphy_sas_address); SETUP_RPORT_ATTRIBUTE(rphy_phy_identifier); + SETUP_RPORT_ATTRIBUTE(rphy_scsi_target_id); SETUP_OPTIONAL_RPORT_ATTRIBUTE(rphy_enclosure_identifier, get_enclosure_identifier); SETUP_OPTIONAL_RPORT_ATTRIBUTE(rphy_bay_identifier, diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig index 5f4530744e0a..097894a1fab5 100644 --- a/drivers/scsi/ufs/Kconfig +++ b/drivers/scsi/ufs/Kconfig @@ -37,6 +37,7 @@ config SCSI_UFSHCD depends on SCSI && SCSI_DMA select PM_DEVFREQ select DEVFREQ_GOV_SIMPLE_ONDEMAND + select NLS ---help--- This selects the support for UFS devices in Linux, say Y and make sure that you know the name of your UFS host adapter (the card diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index 4f38d008bfb4..3aedf73f1131 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2015, Linux Foundation. All rights reserved. + * Copyright (c) 2013-2016, Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -16,8 +16,8 @@ #include <linux/of.h> #include <linux/platform_device.h> #include <linux/phy/phy.h> - #include <linux/phy/phy-qcom-ufs.h> + #include "ufshcd.h" #include "ufshcd-pltfrm.h" #include "unipro.h" @@ -58,6 +58,12 @@ static void ufs_qcom_dump_regs(struct ufs_hba *hba, int offset, int len, len * 4, false); } +static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len, + char *prefix, void *priv) +{ + ufs_qcom_dump_regs(hba, offset, len, prefix); +} + static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes) { int err = 0; @@ -106,9 +112,11 @@ static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host) if (!host->is_lane_clks_enabled) return; - clk_disable_unprepare(host->tx_l1_sync_clk); + if (host->hba->lanes_per_direction > 1) + clk_disable_unprepare(host->tx_l1_sync_clk); clk_disable_unprepare(host->tx_l0_sync_clk); - clk_disable_unprepare(host->rx_l1_sync_clk); + if (host->hba->lanes_per_direction > 1) + clk_disable_unprepare(host->rx_l1_sync_clk); clk_disable_unprepare(host->rx_l0_sync_clk); host->is_lane_clks_enabled = false; @@ -132,21 +140,24 @@ static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host) if (err) goto disable_rx_l0; - err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk", - host->rx_l1_sync_clk); - if (err) - goto disable_tx_l0; + if (host->hba->lanes_per_direction > 1) { + err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk", + host->rx_l1_sync_clk); + if (err) + goto disable_tx_l0; - err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk", - host->tx_l1_sync_clk); - if (err) - goto disable_rx_l1; + err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk", + host->tx_l1_sync_clk); + if (err) + goto disable_rx_l1; + } host->is_lane_clks_enabled = true; goto out; disable_rx_l1: - clk_disable_unprepare(host->rx_l1_sync_clk); + if (host->hba->lanes_per_direction > 1) + clk_disable_unprepare(host->rx_l1_sync_clk); disable_tx_l0: clk_disable_unprepare(host->tx_l0_sync_clk); disable_rx_l0: @@ -170,14 +181,16 @@ static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host) if (err) goto out; - err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk", - &host->rx_l1_sync_clk); - if (err) - goto out; - - err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk", - &host->tx_l1_sync_clk); + /* In case of single lane per direction, don't read lane1 clocks */ + if (host->hba->lanes_per_direction > 1) { + err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk", + &host->rx_l1_sync_clk); + if (err) + goto out; + err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk", + &host->tx_l1_sync_clk); + } out: return err; } @@ -267,9 +280,8 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B); if (ret) { - dev_err(hba->dev, - "%s: ufs_qcom_phy_calibrate_phy()failed, ret = %d\n", - __func__, ret); + dev_err(hba->dev, "%s: ufs_qcom_phy_calibrate_phy() failed, ret = %d\n", + __func__, ret); goto out; } @@ -519,6 +531,18 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba, err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150); + /* + * Some UFS devices (and may be host) have issues if LCC is + * enabled. So we are setting PA_Local_TX_LCC_Enable to 0 + * before link startup which will make sure that both host + * and device TX LCC are disabled once link startup is + * completed. + */ + if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41) + err = ufshcd_dme_set(hba, + UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), + 0); + break; case POST_CHANGE: ufs_qcom_link_startup_post_change(hba); @@ -962,6 +986,10 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, goto out; } + /* enable the device ref clock before changing to HS mode */ + if (!ufshcd_is_hs_mode(&hba->pwr_info) && + ufshcd_is_hs_mode(dev_req_params)) + ufs_qcom_dev_ref_clk_ctrl(host, true); break; case POST_CHANGE: if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx, @@ -989,6 +1017,11 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, memcpy(&host->dev_req_params, dev_req_params, sizeof(*dev_req_params)); ufs_qcom_update_bus_bw_vote(host); + + /* disable the device ref clock if entered PWM mode */ + if (ufshcd_is_hs_mode(&hba->pwr_info) && + !ufshcd_is_hs_mode(dev_req_params)) + ufs_qcom_dev_ref_clk_ctrl(host, false); break; default: ret = -EINVAL; @@ -1090,6 +1123,9 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on) ufs_qcom_phy_disable_iface_clk(host->generic_phy); goto out; } + /* enable the device ref clock for HS mode*/ + if (ufshcd_is_hs_mode(&hba->pwr_info)) + ufs_qcom_dev_ref_clk_ctrl(host, true); vote = host->bus_vote.saved_vote; if (vote == host->bus_vote.min_bw_vote) ufs_qcom_update_bus_bw_vote(host); @@ -1367,6 +1403,74 @@ out: return err; } +static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba, + void *priv, void (*print_fn)(struct ufs_hba *hba, + int offset, int num_regs, char *str, void *priv)) +{ + u32 reg; + struct ufs_qcom_host *host; + + if (unlikely(!hba)) { + pr_err("%s: hba is NULL\n", __func__); + return; + } + if (unlikely(!print_fn)) { + dev_err(hba->dev, "%s: print_fn is NULL\n", __func__); + return; + } + + host = ufshcd_get_variant(hba); + if (!(host->dbg_print_en & UFS_QCOM_DBG_PRINT_REGS_EN)) + return; + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC); + print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv); + + reg = ufshcd_readl(hba, REG_UFS_CFG1); + reg |= UFS_BIT(17); + ufshcd_writel(hba, reg, REG_UFS_CFG1); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM); + print_fn(hba, reg, 32, "UFS_UFS_DBG_RD_EDTL_RAM ", priv); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM); + print_fn(hba, reg, 128, "UFS_UFS_DBG_RD_DESC_RAM ", priv); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM); + print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv); + + ufshcd_writel(hba, (reg & ~UFS_BIT(17)), REG_UFS_CFG1); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM); + print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM); + print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UARM ", priv); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC); + print_fn(hba, reg, 48, "UFS_DBG_RD_REG_TXUC ", priv); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC); + print_fn(hba, reg, 27, "UFS_DBG_RD_REG_RXUC ", priv); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC); + print_fn(hba, reg, 19, "UFS_DBG_RD_REG_DFC ", priv); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT); + print_fn(hba, reg, 34, "UFS_DBG_RD_REG_TRLUT ", priv); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT); + print_fn(hba, reg, 9, "UFS_DBG_RD_REG_TMRLUT ", priv); +} + +static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host) +{ + if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN) + ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1); + else + ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1); +} + static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host) { /* provide a legal default configuration */ @@ -1475,6 +1579,7 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host) ufshcd_rmwl(host->hba, mask, (u32)host->testbus.select_minor << offset, reg); + ufs_qcom_enable_test_bus(host); ufshcd_release(host->hba); pm_runtime_put_sync(host->hba->dev); @@ -1491,8 +1596,10 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16, "HCI Vendor Specific Registers "); + ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper); ufs_qcom_testbus_read(hba); } + /** * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations * @@ -1537,7 +1644,7 @@ static int ufs_qcom_probe(struct platform_device *pdev) * ufs_qcom_remove - set driver_data of the device to NULL * @pdev: pointer to platform device handle * - * Always return 0 + * Always returns 0 */ static int ufs_qcom_remove(struct platform_device *pdev) { diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h index 36249b35f858..a19307a57ce2 100644 --- a/drivers/scsi/ufs/ufs-qcom.h +++ b/drivers/scsi/ufs/ufs-qcom.h @@ -241,6 +241,15 @@ struct ufs_qcom_host { struct ufs_qcom_testbus testbus; }; +static inline u32 +ufs_qcom_get_debug_reg_offset(struct ufs_qcom_host *host, u32 reg) +{ + if (host->hw_ver.major <= 0x02) + return UFS_CNTLR_2_x_x_VEN_REGS_OFFSET(reg); + + return UFS_CNTLR_3_x_x_VEN_REGS_OFFSET(reg); +}; + #define ufs_qcom_is_link_off(hba) ufshcd_is_link_off(hba) #define ufs_qcom_is_link_active(hba) ufshcd_is_link_active(hba) #define ufs_qcom_is_link_hibern8(hba) ufshcd_is_link_hibern8(hba) diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index 54a16cef0367..b291fa6ed2ad 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h @@ -43,6 +43,7 @@ #define GENERAL_UPIU_REQUEST_SIZE 32 #define QUERY_DESC_MAX_SIZE 255 #define QUERY_DESC_MIN_SIZE 2 +#define QUERY_DESC_HDR_SIZE 2 #define QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - \ (sizeof(struct utp_upiu_header))) @@ -195,6 +196,37 @@ enum unit_desc_param { UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1 = 0x22, }; +/* Device descriptor parameters offsets in bytes*/ +enum device_desc_param { + DEVICE_DESC_PARAM_LEN = 0x0, + DEVICE_DESC_PARAM_TYPE = 0x1, + DEVICE_DESC_PARAM_DEVICE_TYPE = 0x2, + DEVICE_DESC_PARAM_DEVICE_CLASS = 0x3, + DEVICE_DESC_PARAM_DEVICE_SUB_CLASS = 0x4, + DEVICE_DESC_PARAM_PRTCL = 0x5, + DEVICE_DESC_PARAM_NUM_LU = 0x6, + DEVICE_DESC_PARAM_NUM_WLU = 0x7, + DEVICE_DESC_PARAM_BOOT_ENBL = 0x8, + DEVICE_DESC_PARAM_DESC_ACCSS_ENBL = 0x9, + DEVICE_DESC_PARAM_INIT_PWR_MODE = 0xA, + DEVICE_DESC_PARAM_HIGH_PR_LUN = 0xB, + DEVICE_DESC_PARAM_SEC_RMV_TYPE = 0xC, + DEVICE_DESC_PARAM_SEC_LU = 0xD, + DEVICE_DESC_PARAM_BKOP_TERM_LT = 0xE, + DEVICE_DESC_PARAM_ACTVE_ICC_LVL = 0xF, + DEVICE_DESC_PARAM_SPEC_VER = 0x10, + DEVICE_DESC_PARAM_MANF_DATE = 0x12, + DEVICE_DESC_PARAM_MANF_NAME = 0x14, + DEVICE_DESC_PARAM_PRDCT_NAME = 0x15, + DEVICE_DESC_PARAM_SN = 0x16, + DEVICE_DESC_PARAM_OEM_ID = 0x17, + DEVICE_DESC_PARAM_MANF_ID = 0x18, + DEVICE_DESC_PARAM_UD_OFFSET = 0x1A, + DEVICE_DESC_PARAM_UD_LEN = 0x1B, + DEVICE_DESC_PARAM_RTT_CAP = 0x1C, + DEVICE_DESC_PARAM_FRQ_RTC = 0x1D, +}; + /* * Logical Unit Write Protect * 00h: LU not write protected @@ -469,6 +501,7 @@ struct ufs_vreg { struct regulator *reg; const char *name; bool enabled; + bool unused; int min_uV; int max_uV; int min_uA; diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h new file mode 100644 index 000000000000..ee4ab85e2801 --- /dev/null +++ b/drivers/scsi/ufs/ufs_quirks.h @@ -0,0 +1,151 @@ +/* + * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _UFS_QUIRKS_H_ +#define _UFS_QUIRKS_H_ + +/* return true if s1 is a prefix of s2 */ +#define STR_PRFX_EQUAL(s1, s2) !strncmp(s1, s2, strlen(s1)) + +#define UFS_ANY_VENDOR 0xFFFF +#define UFS_ANY_MODEL "ANY_MODEL" + +#define MAX_MODEL_LEN 16 + +#define UFS_VENDOR_TOSHIBA 0x198 +#define UFS_VENDOR_SAMSUNG 0x1CE + +/** + * ufs_device_info - ufs device details + * @wmanufacturerid: card details + * @model: card model + */ +struct ufs_device_info { + u16 wmanufacturerid; + char model[MAX_MODEL_LEN + 1]; +}; + +/** + * ufs_dev_fix - ufs device quirk info + * @card: ufs card details + * @quirk: device quirk + */ +struct ufs_dev_fix { + struct ufs_device_info card; + unsigned int quirk; +}; + +#define END_FIX { { 0 }, 0 } + +/* add specific device quirk */ +#define UFS_FIX(_vendor, _model, _quirk) \ + { \ + .card.wmanufacturerid = (_vendor),\ + .card.model = (_model), \ + .quirk = (_quirk), \ + } + +/* + * If UFS device is having issue in processing LCC (Line Control + * Command) coming from UFS host controller then enable this quirk. + * When this quirk is enabled, host controller driver should disable + * the LCC transmission on UFS host controller (by clearing + * TX_LCC_ENABLE attribute of host to 0). + */ +#define UFS_DEVICE_QUIRK_BROKEN_LCC (1 << 0) + +/* + * Some UFS devices don't need VCCQ rail for device operations. Enabling this + * quirk for such devices will make sure that VCCQ rail is not voted. + */ +#define UFS_DEVICE_NO_VCCQ (1 << 1) + +/* + * Some vendor's UFS device sends back to back NACs for the DL data frames + * causing the host controller to raise the DFES error status. Sometimes + * such UFS devices send back to back NAC without waiting for new + * retransmitted DL frame from the host and in such cases it might be possible + * the Host UniPro goes into bad state without raising the DFES error + * interrupt. If this happens then all the pending commands would timeout + * only after respective SW command (which is generally too large). + * + * We can workaround such device behaviour like this: + * - As soon as SW sees the DL NAC error, it should schedule the error handler + * - Error handler would sleep for 50ms to see if there are any fatal errors + * raised by UFS controller. + * - If there are fatal errors then SW does normal error recovery. + * - If there are no fatal errors then SW sends the NOP command to device + * to check if link is alive. + * - If NOP command times out, SW does normal error recovery + * - If NOP command succeed, skip the error handling. + * + * If DL NAC error is seen multiple times with some vendor's UFS devices then + * enable this quirk to initiate quick error recovery and also silence related + * error logs to reduce spamming of kernel logs. + */ +#define UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS (1 << 2) + +/* + * Some UFS devices may not work properly after resume if the link was kept + * in off state during suspend. Enabling this quirk will not allow the + * link to be kept in off state during suspend. + */ +#define UFS_DEVICE_QUIRK_NO_LINK_OFF (1 << 3) + +/* + * Few Toshiba UFS device models advertise RX_MIN_ACTIVATETIME_CAPABILITY as + * 600us which may not be enough for reliable hibern8 exit hardware sequence + * from UFS device. + * To workaround this issue, host should set its PA_TACTIVATE time to 1ms even + * if device advertises RX_MIN_ACTIVATETIME_CAPABILITY less than 1ms. + */ +#define UFS_DEVICE_QUIRK_PA_TACTIVATE (1 << 4) + +/* + * Some UFS memory devices may have really low read/write throughput in + * FAST AUTO mode, enable this quirk to make sure that FAST AUTO mode is + * never enabled for such devices. + */ +#define UFS_DEVICE_NO_FASTAUTO (1 << 5) + +/* + * It seems some UFS devices may keep drawing more than sleep current + * (atleast for 500us) from UFS rails (especially from VCCQ rail). + * To avoid this situation, add 2ms delay before putting these UFS + * rails in LPM mode. + */ +#define UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM (1 << 6) + +struct ufs_hba; +void ufs_advertise_fixup_device(struct ufs_hba *hba); + +static struct ufs_dev_fix ufs_fixups[] = { + /* UFS cards deviations table */ + UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, + UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM), + UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ), + UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, + UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS), + UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, + UFS_DEVICE_NO_FASTAUTO), + UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL, + UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM), + UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG", + UFS_DEVICE_QUIRK_PA_TACTIVATE), + UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG", + UFS_DEVICE_QUIRK_PA_TACTIVATE), + + END_FIX +}; +#endif /* UFS_QUIRKS_H_ */ diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c index d2a7b127b05c..718f12e09885 100644 --- a/drivers/scsi/ufs/ufshcd-pltfrm.c +++ b/drivers/scsi/ufs/ufshcd-pltfrm.c @@ -40,6 +40,8 @@ #include "ufshcd.h" #include "ufshcd-pltfrm.h" +#define UFSHCD_DEFAULT_LANES_PER_DIRECTION 2 + static int ufshcd_parse_clock_info(struct ufs_hba *hba) { int ret = 0; @@ -277,6 +279,21 @@ void ufshcd_pltfrm_shutdown(struct platform_device *pdev) } EXPORT_SYMBOL_GPL(ufshcd_pltfrm_shutdown); +static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba) +{ + struct device *dev = hba->dev; + int ret; + + ret = of_property_read_u32(dev->of_node, "lanes-per-direction", + &hba->lanes_per_direction); + if (ret) { + dev_dbg(hba->dev, + "%s: failed to read lanes-per-direction, ret=%d\n", + __func__, ret); + hba->lanes_per_direction = UFSHCD_DEFAULT_LANES_PER_DIRECTION; + } +} + /** * ufshcd_pltfrm_init - probe routine of the driver * @pdev: pointer to Platform device handle @@ -331,6 +348,8 @@ int ufshcd_pltfrm_init(struct platform_device *pdev, pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); + ufshcd_init_lanes_per_dir(hba); + err = ufshcd_init(hba, mmio_base, irq); if (err) { dev_err(dev, "Initialization failed\n"); diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 9c1b94bef8f3..f8fa72c31a9d 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -39,8 +39,10 @@ #include <linux/async.h> #include <linux/devfreq.h> - +#include <linux/nls.h> +#include <linux/of.h> #include "ufshcd.h" +#include "ufs_quirks.h" #include "unipro.h" #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ @@ -131,9 +133,11 @@ enum { /* UFSHCD UIC layer error flags */ enum { UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */ - UFSHCD_UIC_NL_ERROR = (1 << 1), /* Network layer error */ - UFSHCD_UIC_TL_ERROR = (1 << 2), /* Transport Layer error */ - UFSHCD_UIC_DME_ERROR = (1 << 3), /* DME error */ + UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */ + UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */ + UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */ + UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */ + UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */ }; /* Interrupt configuration options */ @@ -193,6 +197,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba); static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, bool skip_ref_clk); static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on); +static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused); static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba); static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba); static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba); @@ -231,6 +236,16 @@ static inline void ufshcd_disable_irq(struct ufs_hba *hba) } } +/* replace non-printable or non-ASCII characters with spaces */ +static inline void ufshcd_remove_non_printable(char *val) +{ + if (!val) + return; + + if (*val < 0x20 || *val > 0x7e) + *val = ' '; +} + /* * ufshcd_wait_for_register - wait for register value to change * @hba - per-adapter interface @@ -239,11 +254,13 @@ static inline void ufshcd_disable_irq(struct ufs_hba *hba) * @val - wait condition * @interval_us - polling interval in microsecs * @timeout_ms - timeout in millisecs + * @can_sleep - perform sleep or just spin * * Returns -ETIMEDOUT on error, zero on success */ -static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, - u32 val, unsigned long interval_us, unsigned long timeout_ms) +int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, + u32 val, unsigned long interval_us, + unsigned long timeout_ms, bool can_sleep) { int err = 0; unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); @@ -252,9 +269,10 @@ static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, val = val & mask; while ((ufshcd_readl(hba, reg) & mask) != val) { - /* wakeup within 50us of expiry */ - usleep_range(interval_us, interval_us + 50); - + if (can_sleep) + usleep_range(interval_us, interval_us + 50); + else + udelay(interval_us); if (time_after(jiffies, timeout)) { if ((ufshcd_readl(hba, reg) & mask) != val) err = -ETIMEDOUT; @@ -552,6 +570,34 @@ static inline int ufshcd_is_hba_active(struct ufs_hba *hba) return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1; } +u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba) +{ + /* HCI version 1.0 and 1.1 supports UniPro 1.41 */ + if ((hba->ufs_version == UFSHCI_VERSION_10) || + (hba->ufs_version == UFSHCI_VERSION_11)) + return UFS_UNIPRO_VER_1_41; + else + return UFS_UNIPRO_VER_1_6; +} +EXPORT_SYMBOL(ufshcd_get_local_unipro_ver); + +static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba) +{ + /* + * If both host and device support UniPro ver1.6 or later, PA layer + * parameters tuning happens during link startup itself. + * + * We can manually tune PA layer parameters if either host or device + * doesn't support UniPro ver 1.6 or later. But to keep manual tuning + * logic simple, we will only do manual tuning if local unipro version + * doesn't support ver1.6 or later. + */ + if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6) + return true; + else + return false; +} + static void ufshcd_ungate_work(struct work_struct *work) { int ret; @@ -1458,7 +1504,7 @@ ufshcd_clear_cmd(struct ufs_hba *hba, int tag) */ err = ufshcd_wait_for_register(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL, - mask, ~mask, 1000, 1000); + mask, ~mask, 1000, 1000, true); return err; } @@ -1857,21 +1903,7 @@ static int ufshcd_query_attr_retry(struct ufs_hba *hba, return ret; } -/** - * ufshcd_query_descriptor - API function for sending descriptor requests - * hba: per-adapter instance - * opcode: attribute opcode - * idn: attribute idn to access - * index: index field - * selector: selector field - * desc_buf: the buffer that contains the descriptor - * buf_len: length parameter passed to the device - * - * Returns 0 for success, non-zero in case of failure. - * The buf_len parameter will contain, on return, the length parameter - * received on the response. - */ -static int ufshcd_query_descriptor(struct ufs_hba *hba, +static int __ufshcd_query_descriptor(struct ufs_hba *hba, enum query_opcode opcode, enum desc_idn idn, u8 index, u8 selector, u8 *desc_buf, int *buf_len) { @@ -1936,6 +1968,39 @@ out: } /** + * ufshcd_query_descriptor_retry - API function for sending descriptor + * requests + * hba: per-adapter instance + * opcode: attribute opcode + * idn: attribute idn to access + * index: index field + * selector: selector field + * desc_buf: the buffer that contains the descriptor + * buf_len: length parameter passed to the device + * + * Returns 0 for success, non-zero in case of failure. + * The buf_len parameter will contain, on return, the length parameter + * received on the response. + */ +int ufshcd_query_descriptor_retry(struct ufs_hba *hba, + enum query_opcode opcode, enum desc_idn idn, u8 index, + u8 selector, u8 *desc_buf, int *buf_len) +{ + int err; + int retries; + + for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { + err = __ufshcd_query_descriptor(hba, opcode, idn, index, + selector, desc_buf, buf_len); + if (!err || err == -EINVAL) + break; + } + + return err; +} +EXPORT_SYMBOL(ufshcd_query_descriptor_retry); + +/** * ufshcd_read_desc_param - read the specified descriptor parameter * @hba: Pointer to adapter instance * @desc_id: descriptor idn value @@ -1977,9 +2042,9 @@ static int ufshcd_read_desc_param(struct ufs_hba *hba, return -ENOMEM; } - ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC, - desc_id, desc_index, 0, desc_buf, - &buff_len); + ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, + desc_id, desc_index, 0, desc_buf, + &buff_len); if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) || (desc_buf[QUERY_DESC_LENGTH_OFFSET] != @@ -2017,6 +2082,82 @@ static inline int ufshcd_read_power_desc(struct ufs_hba *hba, return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size); } +int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size) +{ + return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size); +} +EXPORT_SYMBOL(ufshcd_read_device_desc); + +/** + * ufshcd_read_string_desc - read string descriptor + * @hba: pointer to adapter instance + * @desc_index: descriptor index + * @buf: pointer to buffer where descriptor would be read + * @size: size of buf + * @ascii: if true convert from unicode to ascii characters + * + * Return 0 in case of success, non-zero otherwise + */ +int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf, + u32 size, bool ascii) +{ + int err = 0; + + err = ufshcd_read_desc(hba, + QUERY_DESC_IDN_STRING, desc_index, buf, size); + + if (err) { + dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n", + __func__, QUERY_REQ_RETRIES, err); + goto out; + } + + if (ascii) { + int desc_len; + int ascii_len; + int i; + char *buff_ascii; + + desc_len = buf[0]; + /* remove header and divide by 2 to move from UTF16 to UTF8 */ + ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1; + if (size < ascii_len + QUERY_DESC_HDR_SIZE) { + dev_err(hba->dev, "%s: buffer allocated size is too small\n", + __func__); + err = -ENOMEM; + goto out; + } + + buff_ascii = kmalloc(ascii_len, GFP_KERNEL); + if (!buff_ascii) { + err = -ENOMEM; + goto out_free_buff; + } + + /* + * the descriptor contains string in UTF16 format + * we need to convert to utf-8 so it can be displayed + */ + utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE], + desc_len - QUERY_DESC_HDR_SIZE, + UTF16_BIG_ENDIAN, buff_ascii, ascii_len); + + /* replace non-printable or non-ASCII characters with spaces */ + for (i = 0; i < ascii_len; i++) + ufshcd_remove_non_printable(&buff_ascii[i]); + + memset(buf + QUERY_DESC_HDR_SIZE, 0, + size - QUERY_DESC_HDR_SIZE); + memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len); + buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE; +out_free_buff: + kfree(buff_ascii); + } +out: + return err; +} +EXPORT_SYMBOL(ufshcd_read_string_desc); + /** * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter * @hba: Pointer to adapter instance @@ -2814,6 +2955,23 @@ out: } /** + * ufshcd_hba_stop - Send controller to reset state + * @hba: per adapter instance + * @can_sleep: perform sleep or just spin + */ +static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep) +{ + int err; + + ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE); + err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE, + CONTROLLER_ENABLE, CONTROLLER_DISABLE, + 10, 1, can_sleep); + if (err) + dev_err(hba->dev, "%s: Controller disable failed\n", __func__); +} + +/** * ufshcd_hba_enable - initialize the controller * @hba: per adapter instance * @@ -2833,18 +2991,9 @@ static int ufshcd_hba_enable(struct ufs_hba *hba) * development and testing of this driver. msleep can be changed to * mdelay and retry count can be reduced based on the controller. */ - if (!ufshcd_is_hba_active(hba)) { - + if (!ufshcd_is_hba_active(hba)) /* change controller state to "reset state" */ - ufshcd_hba_stop(hba); - - /* - * This delay is based on the testing done with UFS host - * controller FPGA. The delay can be changed based on the - * host controller used. - */ - msleep(5); - } + ufshcd_hba_stop(hba, true); /* UniPro link is disabled at this point */ ufshcd_set_link_off(hba); @@ -3365,31 +3514,18 @@ static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) } /** - * ufshcd_transfer_req_compl - handle SCSI and query command completion + * __ufshcd_transfer_req_compl - handle SCSI and query command completion * @hba: per adapter instance + * @completed_reqs: requests to complete */ -static void ufshcd_transfer_req_compl(struct ufs_hba *hba) +static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, + unsigned long completed_reqs) { struct ufshcd_lrb *lrbp; struct scsi_cmnd *cmd; - unsigned long completed_reqs; - u32 tr_doorbell; int result; int index; - /* Resetting interrupt aggregation counters first and reading the - * DOOR_BELL afterward allows us to handle all the completed requests. - * In order to prevent other interrupts starvation the DB is read once - * after reset. The down side of this solution is the possibility of - * false interrupt if device completes another request after resetting - * aggregation and before reading the DB. - */ - if (ufshcd_is_intr_aggr_allowed(hba)) - ufshcd_reset_intr_aggr(hba); - - tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); - completed_reqs = tr_doorbell ^ hba->outstanding_reqs; - for_each_set_bit(index, &completed_reqs, hba->nutrs) { lrbp = &hba->lrb[index]; cmd = lrbp->cmd; @@ -3419,6 +3555,31 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba) } /** + * ufshcd_transfer_req_compl - handle SCSI and query command completion + * @hba: per adapter instance + */ +static void ufshcd_transfer_req_compl(struct ufs_hba *hba) +{ + unsigned long completed_reqs; + u32 tr_doorbell; + + /* Resetting interrupt aggregation counters first and reading the + * DOOR_BELL afterward allows us to handle all the completed requests. + * In order to prevent other interrupts starvation the DB is read once + * after reset. The down side of this solution is the possibility of + * false interrupt if device completes another request after resetting + * aggregation and before reading the DB. + */ + if (ufshcd_is_intr_aggr_allowed(hba)) + ufshcd_reset_intr_aggr(hba); + + tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); + completed_reqs = tr_doorbell ^ hba->outstanding_reqs; + + __ufshcd_transfer_req_compl(hba, completed_reqs); +} + +/** * ufshcd_disable_ee - disable exception event * @hba: per-adapter instance * @mask: exception event to disable @@ -3630,7 +3791,7 @@ out: */ static int ufshcd_urgent_bkops(struct ufs_hba *hba) { - return ufshcd_bkops_ctrl(hba, BKOPS_STATUS_PERF_IMPACT); + return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl); } static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status) @@ -3639,6 +3800,43 @@ static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status) QUERY_ATTR_IDN_EE_STATUS, 0, 0, status); } +static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba) +{ + int err; + u32 curr_status = 0; + + if (hba->is_urgent_bkops_lvl_checked) + goto enable_auto_bkops; + + err = ufshcd_get_bkops_status(hba, &curr_status); + if (err) { + dev_err(hba->dev, "%s: failed to get BKOPS status %d\n", + __func__, err); + goto out; + } + + /* + * We are seeing that some devices are raising the urgent bkops + * exception events even when BKOPS status doesn't indicate performace + * impacted or critical. Handle these device by determining their urgent + * bkops status at runtime. + */ + if (curr_status < BKOPS_STATUS_PERF_IMPACT) { + dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n", + __func__, curr_status); + /* update the current status as the urgent bkops level */ + hba->urgent_bkops_lvl = curr_status; + hba->is_urgent_bkops_lvl_checked = true; + } + +enable_auto_bkops: + err = ufshcd_enable_auto_bkops(hba); +out: + if (err < 0) + dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n", + __func__, err); +} + /** * ufshcd_exception_event_handler - handle exceptions raised by device * @work: pointer to work data @@ -3662,17 +3860,95 @@ static void ufshcd_exception_event_handler(struct work_struct *work) } status &= hba->ee_ctrl_mask; - if (status & MASK_EE_URGENT_BKOPS) { - err = ufshcd_urgent_bkops(hba); - if (err < 0) - dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n", - __func__, err); - } + + if (status & MASK_EE_URGENT_BKOPS) + ufshcd_bkops_exception_event_handler(hba); + out: pm_runtime_put_sync(hba->dev); return; } +/* Complete requests that have door-bell cleared */ +static void ufshcd_complete_requests(struct ufs_hba *hba) +{ + ufshcd_transfer_req_compl(hba); + ufshcd_tmc_handler(hba); +} + +/** + * ufshcd_quirk_dl_nac_errors - This function checks if error handling is + * to recover from the DL NAC errors or not. + * @hba: per-adapter instance + * + * Returns true if error handling is required, false otherwise + */ +static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba) +{ + unsigned long flags; + bool err_handling = true; + + spin_lock_irqsave(hba->host->host_lock, flags); + /* + * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the + * device fatal error and/or DL NAC & REPLAY timeout errors. + */ + if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR)) + goto out; + + if ((hba->saved_err & DEVICE_FATAL_ERROR) || + ((hba->saved_err & UIC_ERROR) && + (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR))) + goto out; + + if ((hba->saved_err & UIC_ERROR) && + (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) { + int err; + /* + * wait for 50ms to see if we can get any other errors or not. + */ + spin_unlock_irqrestore(hba->host->host_lock, flags); + msleep(50); + spin_lock_irqsave(hba->host->host_lock, flags); + + /* + * now check if we have got any other severe errors other than + * DL NAC error? + */ + if ((hba->saved_err & INT_FATAL_ERRORS) || + ((hba->saved_err & UIC_ERROR) && + (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR))) + goto out; + + /* + * As DL NAC is the only error received so far, send out NOP + * command to confirm if link is still active or not. + * - If we don't get any response then do error recovery. + * - If we get response then clear the DL NAC error bit. + */ + + spin_unlock_irqrestore(hba->host->host_lock, flags); + err = ufshcd_verify_dev_init(hba); + spin_lock_irqsave(hba->host->host_lock, flags); + + if (err) + goto out; + + /* Link seems to be alive hence ignore the DL NAC errors */ + if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR) + hba->saved_err &= ~UIC_ERROR; + /* clear NAC error */ + hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR; + if (!hba->saved_uic_err) { + err_handling = false; + goto out; + } + } +out: + spin_unlock_irqrestore(hba->host->host_lock, flags); + return err_handling; +} + /** * ufshcd_err_handler - handle UFS errors that require s/w attention * @work: pointer to work structure @@ -3685,6 +3961,7 @@ static void ufshcd_err_handler(struct work_struct *work) u32 err_tm = 0; int err = 0; int tag; + bool needs_reset = false; hba = container_of(work, struct ufs_hba, eh_work); @@ -3692,40 +3969,86 @@ static void ufshcd_err_handler(struct work_struct *work) ufshcd_hold(hba, false); spin_lock_irqsave(hba->host->host_lock, flags); - if (hba->ufshcd_state == UFSHCD_STATE_RESET) { - spin_unlock_irqrestore(hba->host->host_lock, flags); + if (hba->ufshcd_state == UFSHCD_STATE_RESET) goto out; - } hba->ufshcd_state = UFSHCD_STATE_RESET; ufshcd_set_eh_in_progress(hba); /* Complete requests that have door-bell cleared by h/w */ - ufshcd_transfer_req_compl(hba); - ufshcd_tmc_handler(hba); - spin_unlock_irqrestore(hba->host->host_lock, flags); + ufshcd_complete_requests(hba); + + if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) { + bool ret; + spin_unlock_irqrestore(hba->host->host_lock, flags); + /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */ + ret = ufshcd_quirk_dl_nac_errors(hba); + spin_lock_irqsave(hba->host->host_lock, flags); + if (!ret) + goto skip_err_handling; + } + if ((hba->saved_err & INT_FATAL_ERRORS) || + ((hba->saved_err & UIC_ERROR) && + (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR | + UFSHCD_UIC_DL_NAC_RECEIVED_ERROR | + UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))) + needs_reset = true; + + /* + * if host reset is required then skip clearing the pending + * transfers forcefully because they will automatically get + * cleared after link startup. + */ + if (needs_reset) + goto skip_pending_xfer_clear; + + /* release lock as clear command might sleep */ + spin_unlock_irqrestore(hba->host->host_lock, flags); /* Clear pending transfer requests */ - for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) - if (ufshcd_clear_cmd(hba, tag)) - err_xfer |= 1 << tag; + for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) { + if (ufshcd_clear_cmd(hba, tag)) { + err_xfer = true; + goto lock_skip_pending_xfer_clear; + } + } /* Clear pending task management requests */ - for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) - if (ufshcd_clear_tm_cmd(hba, tag)) - err_tm |= 1 << tag; + for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) { + if (ufshcd_clear_tm_cmd(hba, tag)) { + err_tm = true; + goto lock_skip_pending_xfer_clear; + } + } - /* Complete the requests that are cleared by s/w */ +lock_skip_pending_xfer_clear: spin_lock_irqsave(hba->host->host_lock, flags); - ufshcd_transfer_req_compl(hba); - ufshcd_tmc_handler(hba); - spin_unlock_irqrestore(hba->host->host_lock, flags); + /* Complete the requests that are cleared by s/w */ + ufshcd_complete_requests(hba); + + if (err_xfer || err_tm) + needs_reset = true; + +skip_pending_xfer_clear: /* Fatal errors need reset */ - if (err_xfer || err_tm || (hba->saved_err & INT_FATAL_ERRORS) || - ((hba->saved_err & UIC_ERROR) && - (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR))) { + if (needs_reset) { + unsigned long max_doorbells = (1UL << hba->nutrs) - 1; + + /* + * ufshcd_reset_and_restore() does the link reinitialization + * which will need atleast one empty doorbell slot to send the + * device management commands (NOP and query commands). + * If there is no slot empty at this moment then free up last + * slot forcefully. + */ + if (hba->outstanding_reqs == max_doorbells) + __ufshcd_transfer_req_compl(hba, + (1UL << (hba->nutrs - 1))); + + spin_unlock_irqrestore(hba->host->host_lock, flags); err = ufshcd_reset_and_restore(hba); + spin_lock_irqsave(hba->host->host_lock, flags); if (err) { dev_err(hba->dev, "%s: reset and restore failed\n", __func__); @@ -3739,9 +4062,19 @@ static void ufshcd_err_handler(struct work_struct *work) hba->saved_err = 0; hba->saved_uic_err = 0; } + +skip_err_handling: + if (!needs_reset) { + hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; + if (hba->saved_err || hba->saved_uic_err) + dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x", + __func__, hba->saved_err, hba->saved_uic_err); + } + ufshcd_clear_eh_in_progress(hba); out: + spin_unlock_irqrestore(hba->host->host_lock, flags); scsi_unblock_requests(hba->host); ufshcd_release(hba); pm_runtime_put_sync(hba->dev); @@ -3759,6 +4092,14 @@ static void ufshcd_update_uic_error(struct ufs_hba *hba) reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER); if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR; + else if (hba->dev_quirks & + UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) { + if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED) + hba->uic_error |= + UFSHCD_UIC_DL_NAC_RECEIVED_ERROR; + else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT) + hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR; + } /* UIC NL/TL/DME errors needs software retry */ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER); @@ -3796,15 +4137,18 @@ static void ufshcd_check_errors(struct ufs_hba *hba) } if (queue_eh_work) { + /* + * update the transfer error masks to sticky bits, let's do this + * irrespective of current ufshcd_state. + */ + hba->saved_err |= hba->errors; + hba->saved_uic_err |= hba->uic_error; + /* handle fatal errors only when link is functional */ if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) { /* block commands from scsi mid-layer */ scsi_block_requests(hba->host); - /* transfer error masks to sticky bits */ - hba->saved_err |= hba->errors; - hba->saved_uic_err |= hba->uic_error; - hba->ufshcd_state = UFSHCD_STATE_ERROR; schedule_work(&hba->eh_work); } @@ -3897,7 +4241,7 @@ static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag) /* poll for max. 1 sec to clear door bell register by h/w */ err = ufshcd_wait_for_register(hba, REG_UTP_TASK_REQ_DOOR_BELL, - mask, 0, 1000, 1000); + mask, 0, 1000, 1000, true); out: return err; } @@ -4179,7 +4523,7 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) /* Reset the host controller */ spin_lock_irqsave(hba->host->host_lock, flags); - ufshcd_hba_stop(hba); + ufshcd_hba_stop(hba, false); spin_unlock_irqrestore(hba->host->host_lock, flags); err = ufshcd_hba_enable(hba); @@ -4466,6 +4810,164 @@ out: return ret; } +static int ufs_get_device_info(struct ufs_hba *hba, + struct ufs_device_info *card_data) +{ + int err; + u8 model_index; + u8 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE + 1] = {0}; + u8 desc_buf[QUERY_DESC_DEVICE_MAX_SIZE]; + + err = ufshcd_read_device_desc(hba, desc_buf, + QUERY_DESC_DEVICE_MAX_SIZE); + if (err) { + dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n", + __func__, err); + goto out; + } + + /* + * getting vendor (manufacturerID) and Bank Index in big endian + * format + */ + card_data->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 | + desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1]; + + model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; + + err = ufshcd_read_string_desc(hba, model_index, str_desc_buf, + QUERY_DESC_STRING_MAX_SIZE, ASCII_STD); + if (err) { + dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n", + __func__, err); + goto out; + } + + str_desc_buf[QUERY_DESC_STRING_MAX_SIZE] = '\0'; + strlcpy(card_data->model, (str_desc_buf + QUERY_DESC_HDR_SIZE), + min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET], + MAX_MODEL_LEN)); + + /* Null terminate the model string */ + card_data->model[MAX_MODEL_LEN] = '\0'; + +out: + return err; +} + +void ufs_advertise_fixup_device(struct ufs_hba *hba) +{ + int err; + struct ufs_dev_fix *f; + struct ufs_device_info card_data; + + card_data.wmanufacturerid = 0; + + err = ufs_get_device_info(hba, &card_data); + if (err) { + dev_err(hba->dev, "%s: Failed getting device info. err = %d\n", + __func__, err); + return; + } + + for (f = ufs_fixups; f->quirk; f++) { + if (((f->card.wmanufacturerid == card_data.wmanufacturerid) || + (f->card.wmanufacturerid == UFS_ANY_VENDOR)) && + (STR_PRFX_EQUAL(f->card.model, card_data.model) || + !strcmp(f->card.model, UFS_ANY_MODEL))) + hba->dev_quirks |= f->quirk; + } +} + +/** + * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro + * @hba: per-adapter instance + * + * PA_TActivate parameter can be tuned manually if UniPro version is less than + * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's + * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce + * the hibern8 exit latency. + * + * Returns zero on success, non-zero error value on failure. + */ +static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba) +{ + int ret = 0; + u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate; + + ret = ufshcd_dme_peer_get(hba, + UIC_ARG_MIB_SEL( + RX_MIN_ACTIVATETIME_CAPABILITY, + UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)), + &peer_rx_min_activatetime); + if (ret) + goto out; + + /* make sure proper unit conversion is applied */ + tuned_pa_tactivate = + ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US) + / PA_TACTIVATE_TIME_UNIT_US); + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), + tuned_pa_tactivate); + +out: + return ret; +} + +/** + * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro + * @hba: per-adapter instance + * + * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than + * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's + * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY. + * This optimal value can help reduce the hibern8 exit latency. + * + * Returns zero on success, non-zero error value on failure. + */ +static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba) +{ + int ret = 0; + u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0; + u32 max_hibern8_time, tuned_pa_hibern8time; + + ret = ufshcd_dme_get(hba, + UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY, + UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)), + &local_tx_hibern8_time_cap); + if (ret) + goto out; + + ret = ufshcd_dme_peer_get(hba, + UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY, + UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)), + &peer_rx_hibern8_time_cap); + if (ret) + goto out; + + max_hibern8_time = max(local_tx_hibern8_time_cap, + peer_rx_hibern8_time_cap); + /* make sure proper unit conversion is applied */ + tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US) + / PA_HIBERN8_TIME_UNIT_US); + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), + tuned_pa_hibern8time); +out: + return ret; +} + +static void ufshcd_tune_unipro_params(struct ufs_hba *hba) +{ + if (ufshcd_is_unipro_pa_params_tuning_req(hba)) { + ufshcd_tune_pa_tactivate(hba); + ufshcd_tune_pa_hibern8time(hba); + } + + if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE) + /* set 1ms timeout for PA_TACTIVATE */ + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10); +} + /** * ufshcd_probe_hba - probe hba to detect device and initialize * @hba: per-adapter instance @@ -4482,6 +4984,10 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) ufshcd_init_pwr_info(hba); + /* set the default level for urgent bkops */ + hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT; + hba->is_urgent_bkops_lvl_checked = false; + /* UniPro link is active now */ ufshcd_set_link_active(hba); @@ -4493,6 +4999,14 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) if (ret) goto out; + ufs_advertise_fixup_device(hba); + ufshcd_tune_unipro_params(hba); + + ret = ufshcd_set_vccq_rail_unused(hba, + (hba->dev_quirks & UFS_DEVICE_NO_VCCQ) ? true : false); + if (ret) + goto out; + /* UFS device is also active now */ ufshcd_set_ufs_dev_active(hba); ufshcd_force_reset_auto_bkops(hba); @@ -4567,6 +5081,41 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie) ufshcd_probe_hba(hba); } +static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd) +{ + unsigned long flags; + struct Scsi_Host *host; + struct ufs_hba *hba; + int index; + bool found = false; + + if (!scmd || !scmd->device || !scmd->device->host) + return BLK_EH_NOT_HANDLED; + + host = scmd->device->host; + hba = shost_priv(host); + if (!hba) + return BLK_EH_NOT_HANDLED; + + spin_lock_irqsave(host->host_lock, flags); + + for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) { + if (hba->lrb[index].cmd == scmd) { + found = true; + break; + } + } + + spin_unlock_irqrestore(host->host_lock, flags); + + /* + * Bypass SCSI error handling and reset the block layer timer if this + * SCSI command was not actually dispatched to UFS driver, otherwise + * let SCSI layer handle the error as usual. + */ + return found ? BLK_EH_NOT_HANDLED : BLK_EH_RESET_TIMER; +} + static struct scsi_host_template ufshcd_driver_template = { .module = THIS_MODULE, .name = UFSHCD, @@ -4579,6 +5128,7 @@ static struct scsi_host_template ufshcd_driver_template = { .eh_abort_handler = ufshcd_abort, .eh_device_reset_handler = ufshcd_eh_device_reset_handler, .eh_host_reset_handler = ufshcd_eh_host_reset_handler, + .eh_timed_out = ufshcd_eh_timed_out, .this_id = -1, .sg_tablesize = SG_ALL, .cmd_per_lun = UFSHCD_CMD_PER_LUN, @@ -4607,13 +5157,24 @@ static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg, static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba, struct ufs_vreg *vreg) { - return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA); + if (!vreg) + return 0; + else if (vreg->unused) + return 0; + else + return ufshcd_config_vreg_load(hba->dev, vreg, + UFS_VREG_LPM_LOAD_UA); } static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, struct ufs_vreg *vreg) { - return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); + if (!vreg) + return 0; + else if (vreg->unused) + return 0; + else + return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); } static int ufshcd_config_vreg(struct device *dev, @@ -4648,7 +5209,9 @@ static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg) { int ret = 0; - if (!vreg || vreg->enabled) + if (!vreg) + goto out; + else if (vreg->enabled || vreg->unused) goto out; ret = ufshcd_config_vreg(dev, vreg, true); @@ -4668,7 +5231,9 @@ static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg) { int ret = 0; - if (!vreg || !vreg->enabled) + if (!vreg) + goto out; + else if (!vreg->enabled || vreg->unused) goto out; ret = regulator_disable(vreg->reg); @@ -4774,6 +5339,36 @@ static int ufshcd_init_hba_vreg(struct ufs_hba *hba) return 0; } +static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused) +{ + int ret = 0; + struct ufs_vreg_info *info = &hba->vreg_info; + + if (!info) + goto out; + else if (!info->vccq) + goto out; + + if (unused) { + /* shut off the rail here */ + ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false); + /* + * Mark this rail as no longer used, so it doesn't get enabled + * later by mistake + */ + if (!ret) + info->vccq->unused = true; + } else { + /* + * rail should have been already enabled hence just make sure + * that unused flag is cleared. + */ + info->vccq->unused = false; + } +out: + return ret; +} + static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, bool skip_ref_clk) { @@ -5093,10 +5688,20 @@ static int ufshcd_link_state_transition(struct ufs_hba *hba, (!check_for_bkops || (check_for_bkops && !hba->auto_bkops_enabled))) { /* + * Let's make sure that link is in low power mode, we are doing + * this currently by putting the link in Hibern8. Otherway to + * put the link in low power mode is to send the DME end point + * to device and then send the DME reset command to local + * unipro. But putting the link in hibern8 is much faster. + */ + ret = ufshcd_uic_hibern8_enter(hba); + if (ret) + goto out; + /* * Change controller state to "reset state" which * should also put the link in off/reset state */ - ufshcd_hba_stop(hba); + ufshcd_hba_stop(hba, true); /* * TODO: Check if we need any delay to make sure that * controller is reset @@ -5111,6 +5716,16 @@ out: static void ufshcd_vreg_set_lpm(struct ufs_hba *hba) { /* + * It seems some UFS devices may keep drawing more than sleep current + * (atleast for 500us) from UFS rails (especially from VCCQ rail). + * To avoid this situation, add 2ms delay before putting these UFS + * rails in LPM mode. + */ + if (!ufshcd_is_link_active(hba) && + hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM) + usleep_range(2000, 2100); + + /* * If UFS device is either in UFS_Sleep turn off VCC rail to save some * power. * @@ -5572,7 +6187,7 @@ void ufshcd_remove(struct ufs_hba *hba) scsi_remove_host(hba->host); /* disable interrupts */ ufshcd_disable_intr(hba, hba->intr_mask); - ufshcd_hba_stop(hba); + ufshcd_hba_stop(hba, true); scsi_host_put(hba->host); @@ -5836,6 +6451,21 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) init_waitqueue_head(&hba->dev_cmd.tag_wq); ufshcd_init_clk_gating(hba); + + /* + * In order to avoid any spurious interrupt immediately after + * registering UFS controller interrupt handler, clear any pending UFS + * interrupt status and disable all the UFS interrupts. + */ + ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS), + REG_INTERRUPT_STATUS); + ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE); + /* + * Make sure that UFS interrupts are disabled and any pending interrupt + * status is cleared before registering UFS interrupt handler. + */ + mb(); + /* IRQ registration */ err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); if (err) { diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index e3931d0c94eb..4bb65669f052 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -54,6 +54,7 @@ #include <linux/clk.h> #include <linux/completion.h> #include <linux/regulator/consumer.h> +#include "unipro.h" #include <asm/irq.h> #include <asm/byteorder.h> @@ -383,6 +384,9 @@ struct ufs_init_prefetch { * @clk_list_head: UFS host controller clocks list node head * @pwr_info: holds current power mode * @max_pwr_info: keeps the device max valid pwm + * @urgent_bkops_lvl: keeps track of urgent bkops level for device + * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for + * device is known or not. */ struct ufs_hba { void __iomem *mmio_base; @@ -470,6 +474,9 @@ struct ufs_hba { unsigned int quirks; /* Deviations from standard UFSHCI spec. */ + /* Device deviations from standard UFS device spec. */ + unsigned int dev_quirks; + wait_queue_head_t tm_wq; wait_queue_head_t tm_tag_wq; unsigned long tm_condition; @@ -509,6 +516,8 @@ struct ufs_hba { bool wlun_dev_clr_ua; + /* Number of lanes available (1 or 2) for Rx/Tx */ + u32 lanes_per_direction; struct ufs_pa_layer_attr pwr_info; struct ufs_pwr_mode_info max_pwr_info; @@ -533,6 +542,9 @@ struct ufs_hba { struct devfreq *devfreq; struct ufs_clk_scaling clk_scaling; bool is_sys_suspended; + + enum bkops_status urgent_bkops_lvl; + bool is_urgent_bkops_lvl_checked; }; /* Returns true if clocks can be gated. Otherwise false */ @@ -588,15 +600,9 @@ int ufshcd_alloc_host(struct device *, struct ufs_hba **); void ufshcd_dealloc_host(struct ufs_hba *); int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int); void ufshcd_remove(struct ufs_hba *); - -/** - * ufshcd_hba_stop - Send controller to reset state - * @hba: per adapter instance - */ -static inline void ufshcd_hba_stop(struct ufs_hba *hba) -{ - ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE); -} +int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, + u32 val, unsigned long interval_us, + unsigned long timeout_ms, bool can_sleep); static inline void check_upiu_size(void) { @@ -682,11 +688,27 @@ static inline int ufshcd_dme_peer_get(struct ufs_hba *hba, return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER); } +int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size); + +static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info) +{ + return (pwr_info->pwr_rx == FAST_MODE || + pwr_info->pwr_rx == FASTAUTO_MODE) && + (pwr_info->pwr_tx == FAST_MODE || + pwr_info->pwr_tx == FASTAUTO_MODE); +} + +#define ASCII_STD true + +int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf, + u32 size, bool ascii); + /* Expose Query-Request API */ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, enum flag_idn idn, bool *flag_res); int ufshcd_hold(struct ufs_hba *hba, bool async); void ufshcd_release(struct ufs_hba *hba); +u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba); /* Wrapper functions for safely calling variant operations */ static inline const char *ufshcd_get_var_name(struct ufs_hba *hba) diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h index 0ae0967aaed8..4cb1cc63f1a1 100644 --- a/drivers/scsi/ufs/ufshci.h +++ b/drivers/scsi/ufs/ufshci.h @@ -92,6 +92,7 @@ enum { UFSHCI_VERSION_10 = 0x00010000, /* 1.0 */ UFSHCI_VERSION_11 = 0x00010100, /* 1.1 */ UFSHCI_VERSION_20 = 0x00000200, /* 2.0 */ + UFSHCI_VERSION_21 = 0x00000210, /* 2.1 */ }; /* @@ -170,6 +171,8 @@ enum { #define UIC_DATA_LINK_LAYER_ERROR UFS_BIT(31) #define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0x7FFF #define UIC_DATA_LINK_LAYER_ERROR_PA_INIT 0x2000 +#define UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED 0x0001 +#define UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT 0x0002 /* UECN - Host UIC Error Code Network Layer 40h */ #define UIC_NETWORK_LAYER_ERROR UFS_BIT(31) @@ -209,6 +212,7 @@ enum { /* GenSelectorIndex calculation macros for M-PHY attributes */ #define UIC_ARG_MPHY_TX_GEN_SEL_INDEX(lane) (lane) +#define UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane) (PA_MAXDATALANES + (lane)) #define UIC_ARG_MIB_SEL(attr, sel) ((((attr) & 0xFFFF) << 16) |\ ((sel) & 0xFFFF)) diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h index 816a8a46efb8..e2854e45f8d3 100644 --- a/drivers/scsi/ufs/unipro.h +++ b/drivers/scsi/ufs/unipro.h @@ -15,6 +15,7 @@ /* * M-TX Configuration Attributes */ +#define TX_HIBERN8TIME_CAPABILITY 0x000F #define TX_MODE 0x0021 #define TX_HSRATE_SERIES 0x0022 #define TX_HSGEAR 0x0023 @@ -48,8 +49,12 @@ #define RX_ENTER_HIBERN8 0x00A7 #define RX_BYPASS_8B10B_ENABLE 0x00A8 #define RX_TERMINATION_FORCE_ENABLE 0x0089 +#define RX_MIN_ACTIVATETIME_CAPABILITY 0x008F +#define RX_HIBERN8TIME_CAPABILITY 0x0092 #define is_mphy_tx_attr(attr) (attr < RX_MODE) +#define RX_MIN_ACTIVATETIME_UNIT_US 100 +#define HIBERN8TIME_UNIT_US 100 /* * PHY Adpater attributes */ @@ -70,6 +75,7 @@ #define PA_MAXRXSPEEDFAST 0x1541 #define PA_MAXRXSPEEDSLOW 0x1542 #define PA_TXLINKSTARTUPHS 0x1544 +#define PA_LOCAL_TX_LCC_ENABLE 0x155E #define PA_TXSPEEDFAST 0x1565 #define PA_TXSPEEDSLOW 0x1566 #define PA_REMOTEVERINFO 0x15A0 @@ -110,6 +116,12 @@ #define PA_STALLNOCONFIGTIME 0x15A3 #define PA_SAVECONFIGTIME 0x15A4 +#define PA_TACTIVATE_TIME_UNIT_US 10 +#define PA_HIBERN8_TIME_UNIT_US 100 + +/* PHY Adapter Protocol Constants */ +#define PA_MAXDATALANES 4 + /* PA power modes */ enum { FAST_MODE = 1, @@ -143,6 +155,16 @@ enum ufs_hs_gear_tag { UFS_HS_G3, /* HS Gear 3 */ }; +enum ufs_unipro_ver { + UFS_UNIPRO_VER_RESERVED = 0, + UFS_UNIPRO_VER_1_40 = 1, /* UniPro version 1.40 */ + UFS_UNIPRO_VER_1_41 = 2, /* UniPro version 1.41 */ + UFS_UNIPRO_VER_1_6 = 3, /* UniPro version 1.6 */ + UFS_UNIPRO_VER_MAX = 4, /* UniPro unsupported version */ + /* UniPro version field mask in PA_LOCALVERINFO */ + UFS_UNIPRO_VER_MASK = 0xF, +}; + /* * Data Link Layer Attributes */ diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index bceb81309787..85365672c931 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -1141,14 +1141,16 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, return PTR_ERR_OR_ZERO(vaddr); } -static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, - enum dma_data_direction direction) +static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction direction) { struct ion_buffer *buffer = dmabuf->priv; mutex_lock(&buffer->lock); ion_buffer_kmap_put(buffer); mutex_unlock(&buffer->lock); + + return 0; } static struct dma_buf_ops dma_buf_ops = { diff --git a/fs/Kconfig b/fs/Kconfig index 9d757673bf40..6725f59c18e6 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -209,6 +209,7 @@ menuconfig MISC_FILESYSTEMS if MISC_FILESYSTEMS +source "fs/orangefs/Kconfig" source "fs/adfs/Kconfig" source "fs/affs/Kconfig" source "fs/ecryptfs/Kconfig" diff --git a/fs/Makefile b/fs/Makefile index 252c96898a43..85b6e13b62d3 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -106,6 +106,7 @@ obj-$(CONFIG_AUTOFS4_FS) += autofs4/ obj-$(CONFIG_ADFS_FS) += adfs/ obj-$(CONFIG_FUSE_FS) += fuse/ obj-$(CONFIG_OVERLAY_FS) += overlayfs/ +obj-$(CONFIG_ORANGEFS_FS) += orangefs/ obj-$(CONFIG_UDF_FS) += udf/ obj-$(CONFIG_SUN_OPENPROMFS) += openpromfs/ obj-$(CONFIG_OMFS_FS) += omfs/ diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 19adeb0ef82a..fc5cae2a0db2 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -175,8 +175,8 @@ static void ceph_invalidatepage(struct page *page, unsigned int offset, static int ceph_releasepage(struct page *page, gfp_t g) { - struct inode *inode = page->mapping ? page->mapping->host : NULL; - dout("%p releasepage %p idx %lu\n", inode, page, page->index); + dout("%p releasepage %p idx %lu\n", page->mapping->host, + page, page->index); WARN_ON(PageDirty(page)); /* Can we release the page from the cache? */ @@ -276,7 +276,7 @@ static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg) for (i = 0; i < num_pages; i++) { struct page *page = osd_data->pages[i]; - if (rc < 0 && rc != ENOENT) + if (rc < 0 && rc != -ENOENT) goto unlock; if (bytes < (int)PAGE_CACHE_SIZE) { /* zero (remainder of) page */ @@ -606,71 +606,71 @@ static void writepages_finish(struct ceph_osd_request *req, struct inode *inode = req->r_inode; struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_osd_data *osd_data; - unsigned wrote; struct page *page; - int num_pages; - int i; + int num_pages, total_pages = 0; + int i, j; + int rc = req->r_result; struct ceph_snap_context *snapc = req->r_snapc; struct address_space *mapping = inode->i_mapping; - int rc = req->r_result; - u64 bytes = req->r_ops[0].extent.length; struct ceph_fs_client *fsc = ceph_inode_to_client(inode); - long writeback_stat; - unsigned issued = ceph_caps_issued(ci); + bool remove_page; - osd_data = osd_req_op_extent_osd_data(req, 0); - BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES); - num_pages = calc_pages_for((u64)osd_data->alignment, - (u64)osd_data->length); - if (rc >= 0) { - /* - * Assume we wrote the pages we originally sent. The - * osd might reply with fewer pages if our writeback - * raced with a truncation and was adjusted at the osd, - * so don't believe the reply. - */ - wrote = num_pages; - } else { - wrote = 0; + + dout("writepages_finish %p rc %d\n", inode, rc); + if (rc < 0) mapping_set_error(mapping, rc); - } - dout("writepages_finish %p rc %d bytes %llu wrote %d (pages)\n", - inode, rc, bytes, wrote); - /* clean all pages */ - for (i = 0; i < num_pages; i++) { - page = osd_data->pages[i]; - BUG_ON(!page); - WARN_ON(!PageUptodate(page)); + /* + * We lost the cache cap, need to truncate the page before + * it is unlocked, otherwise we'd truncate it later in the + * page truncation thread, possibly losing some data that + * raced its way in + */ + remove_page = !(ceph_caps_issued(ci) & + (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)); - writeback_stat = - atomic_long_dec_return(&fsc->writeback_count); - if (writeback_stat < - CONGESTION_OFF_THRESH(fsc->mount_options->congestion_kb)) - clear_bdi_congested(&fsc->backing_dev_info, - BLK_RW_ASYNC); + /* clean all pages */ + for (i = 0; i < req->r_num_ops; i++) { + if (req->r_ops[i].op != CEPH_OSD_OP_WRITE) + break; - ceph_put_snap_context(page_snap_context(page)); - page->private = 0; - ClearPagePrivate(page); - dout("unlocking %d %p\n", i, page); - end_page_writeback(page); + osd_data = osd_req_op_extent_osd_data(req, i); + BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES); + num_pages = calc_pages_for((u64)osd_data->alignment, + (u64)osd_data->length); + total_pages += num_pages; + for (j = 0; j < num_pages; j++) { + page = osd_data->pages[j]; + BUG_ON(!page); + WARN_ON(!PageUptodate(page)); + + if (atomic_long_dec_return(&fsc->writeback_count) < + CONGESTION_OFF_THRESH( + fsc->mount_options->congestion_kb)) + clear_bdi_congested(&fsc->backing_dev_info, + BLK_RW_ASYNC); + + ceph_put_snap_context(page_snap_context(page)); + page->private = 0; + ClearPagePrivate(page); + dout("unlocking %p\n", page); + end_page_writeback(page); + + if (remove_page) + generic_error_remove_page(inode->i_mapping, + page); - /* - * We lost the cache cap, need to truncate the page before - * it is unlocked, otherwise we'd truncate it later in the - * page truncation thread, possibly losing some data that - * raced its way in - */ - if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0) - generic_error_remove_page(inode->i_mapping, page); + unlock_page(page); + } + dout("writepages_finish %p wrote %llu bytes cleaned %d pages\n", + inode, osd_data->length, rc >= 0 ? num_pages : 0); - unlock_page(page); + ceph_release_pages(osd_data->pages, num_pages); } - dout("%p wrote+cleaned %d pages\n", inode, wrote); - ceph_put_wrbuffer_cap_refs(ci, num_pages, snapc); - ceph_release_pages(osd_data->pages, num_pages); + ceph_put_wrbuffer_cap_refs(ci, total_pages, snapc); + + osd_data = osd_req_op_extent_osd_data(req, 0); if (osd_data->pages_from_pool) mempool_free(osd_data->pages, ceph_sb_to_client(inode->i_sb)->wb_pagevec_pool); @@ -778,17 +778,15 @@ retry: while (!done && index <= end) { unsigned i; int first; - pgoff_t next; - int pvec_pages, locked_pages; - struct page **pages = NULL; + pgoff_t strip_unit_end = 0; + int num_ops = 0, op_idx; + int pvec_pages, locked_pages = 0; + struct page **pages = NULL, **data_pages; mempool_t *pool = NULL; /* Becomes non-null if mempool used */ struct page *page; int want; - u64 offset, len; - long writeback_stat; + u64 offset = 0, len = 0; - next = 0; - locked_pages = 0; max_pages = max_pages_ever; get_more_pages: @@ -824,8 +822,8 @@ get_more_pages: unlock_page(page); break; } - if (next && (page->index != next)) { - dout("not consecutive %p\n", page); + if (strip_unit_end && (page->index > strip_unit_end)) { + dout("end of strip unit %p\n", page); unlock_page(page); break; } @@ -867,36 +865,31 @@ get_more_pages: /* * We have something to write. If this is * the first locked page this time through, - * allocate an osd request and a page array - * that it will use. + * calculate max possinle write size and + * allocate a page array */ if (locked_pages == 0) { - BUG_ON(pages); + u64 objnum; + u64 objoff; + /* prepare async write request */ offset = (u64)page_offset(page); len = wsize; - req = ceph_osdc_new_request(&fsc->client->osdc, - &ci->i_layout, vino, - offset, &len, 0, - do_sync ? 2 : 1, - CEPH_OSD_OP_WRITE, - CEPH_OSD_FLAG_WRITE | - CEPH_OSD_FLAG_ONDISK, - snapc, truncate_seq, - truncate_size, true); - if (IS_ERR(req)) { - rc = PTR_ERR(req); + + rc = ceph_calc_file_object_mapping(&ci->i_layout, + offset, len, + &objnum, &objoff, + &len); + if (rc < 0) { unlock_page(page); break; } - if (do_sync) - osd_req_op_init(req, 1, - CEPH_OSD_OP_STARTSYNC, 0); - - req->r_callback = writepages_finish; - req->r_inode = inode; + num_ops = 1 + do_sync; + strip_unit_end = page->index + + ((len - 1) >> PAGE_CACHE_SHIFT); + BUG_ON(pages); max_pages = calc_pages_for(0, (u64)len); pages = kmalloc(max_pages * sizeof (*pages), GFP_NOFS); @@ -905,6 +898,20 @@ get_more_pages: pages = mempool_alloc(pool, GFP_NOFS); BUG_ON(!pages); } + + len = 0; + } else if (page->index != + (offset + len) >> PAGE_CACHE_SHIFT) { + if (num_ops >= (pool ? CEPH_OSD_SLAB_OPS : + CEPH_OSD_MAX_OPS)) { + redirty_page_for_writepage(wbc, page); + unlock_page(page); + break; + } + + num_ops++; + offset = (u64)page_offset(page); + len = 0; } /* note position of first page in pvec */ @@ -913,18 +920,16 @@ get_more_pages: dout("%p will write page %p idx %lu\n", inode, page, page->index); - writeback_stat = - atomic_long_inc_return(&fsc->writeback_count); - if (writeback_stat > CONGESTION_ON_THRESH( + if (atomic_long_inc_return(&fsc->writeback_count) > + CONGESTION_ON_THRESH( fsc->mount_options->congestion_kb)) { set_bdi_congested(&fsc->backing_dev_info, BLK_RW_ASYNC); } - set_page_writeback(page); pages[locked_pages] = page; locked_pages++; - next = page->index + 1; + len += PAGE_CACHE_SIZE; } /* did we get anything? */ @@ -944,38 +949,119 @@ get_more_pages: /* shift unused pages over in the pvec... we * will need to release them below. */ for (j = i; j < pvec_pages; j++) { - dout(" pvec leftover page %p\n", - pvec.pages[j]); + dout(" pvec leftover page %p\n", pvec.pages[j]); pvec.pages[j-i+first] = pvec.pages[j]; } pvec.nr -= i-first; } - /* Format the osd request message and submit the write */ +new_request: offset = page_offset(pages[0]); - len = (u64)locked_pages << PAGE_CACHE_SHIFT; - if (snap_size == -1) { - len = min(len, (u64)i_size_read(inode) - offset); - /* writepages_finish() clears writeback pages - * according to the data length, so make sure - * data length covers all locked pages */ - len = max(len, 1 + - ((u64)(locked_pages - 1) << PAGE_CACHE_SHIFT)); - } else { - len = min(len, snap_size - offset); + len = wsize; + + req = ceph_osdc_new_request(&fsc->client->osdc, + &ci->i_layout, vino, + offset, &len, 0, num_ops, + CEPH_OSD_OP_WRITE, + CEPH_OSD_FLAG_WRITE | + CEPH_OSD_FLAG_ONDISK, + snapc, truncate_seq, + truncate_size, false); + if (IS_ERR(req)) { + req = ceph_osdc_new_request(&fsc->client->osdc, + &ci->i_layout, vino, + offset, &len, 0, + min(num_ops, + CEPH_OSD_SLAB_OPS), + CEPH_OSD_OP_WRITE, + CEPH_OSD_FLAG_WRITE | + CEPH_OSD_FLAG_ONDISK, + snapc, truncate_seq, + truncate_size, true); + BUG_ON(IS_ERR(req)); } - dout("writepages got %d pages at %llu~%llu\n", - locked_pages, offset, len); + BUG_ON(len < page_offset(pages[locked_pages - 1]) + + PAGE_CACHE_SIZE - offset); + + req->r_callback = writepages_finish; + req->r_inode = inode; - osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, + /* Format the osd request message and submit the write */ + len = 0; + data_pages = pages; + op_idx = 0; + for (i = 0; i < locked_pages; i++) { + u64 cur_offset = page_offset(pages[i]); + if (offset + len != cur_offset) { + if (op_idx + do_sync + 1 == req->r_num_ops) + break; + osd_req_op_extent_dup_last(req, op_idx, + cur_offset - offset); + dout("writepages got pages at %llu~%llu\n", + offset, len); + osd_req_op_extent_osd_data_pages(req, op_idx, + data_pages, len, 0, !!pool, false); + osd_req_op_extent_update(req, op_idx, len); - pages = NULL; /* request message now owns the pages array */ - pool = NULL; + len = 0; + offset = cur_offset; + data_pages = pages + i; + op_idx++; + } - /* Update the write op length in case we changed it */ + set_page_writeback(pages[i]); + len += PAGE_CACHE_SIZE; + } + + if (snap_size != -1) { + len = min(len, snap_size - offset); + } else if (i == locked_pages) { + /* writepages_finish() clears writeback pages + * according to the data length, so make sure + * data length covers all locked pages */ + u64 min_len = len + 1 - PAGE_CACHE_SIZE; + len = min(len, (u64)i_size_read(inode) - offset); + len = max(len, min_len); + } + dout("writepages got pages at %llu~%llu\n", offset, len); + + osd_req_op_extent_osd_data_pages(req, op_idx, data_pages, len, + 0, !!pool, false); + osd_req_op_extent_update(req, op_idx, len); - osd_req_op_extent_update(req, 0, len); + if (do_sync) { + op_idx++; + osd_req_op_init(req, op_idx, CEPH_OSD_OP_STARTSYNC, 0); + } + BUG_ON(op_idx + 1 != req->r_num_ops); + + pool = NULL; + if (i < locked_pages) { + BUG_ON(num_ops <= req->r_num_ops); + num_ops -= req->r_num_ops; + num_ops += do_sync; + locked_pages -= i; + + /* allocate new pages array for next request */ + data_pages = pages; + pages = kmalloc(locked_pages * sizeof (*pages), + GFP_NOFS); + if (!pages) { + pool = fsc->wb_pagevec_pool; + pages = mempool_alloc(pool, GFP_NOFS); + BUG_ON(!pages); + } + memcpy(pages, data_pages + i, + locked_pages * sizeof(*pages)); + memset(data_pages + i, 0, + locked_pages * sizeof(*pages)); + } else { + BUG_ON(num_ops != req->r_num_ops); + index = pages[i - 1]->index + 1; + /* request message now owns the pages array */ + pages = NULL; + } vino = ceph_vino(inode); ceph_osdc_build_request(req, offset, snapc, vino.snap, @@ -985,9 +1071,10 @@ get_more_pages: BUG_ON(rc); req = NULL; - /* continue? */ - index = next; - wbc->nr_to_write -= locked_pages; + wbc->nr_to_write -= i; + if (pages) + goto new_request; + if (wbc->nr_to_write <= 0) done = 1; @@ -1522,7 +1609,7 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page) ceph_vino(inode), 0, &len, 0, 1, CEPH_OSD_OP_CREATE, CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE, - ceph_empty_snapc, 0, 0, false); + NULL, 0, 0, false); if (IS_ERR(req)) { err = PTR_ERR(req); goto out; @@ -1540,9 +1627,8 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page) ceph_vino(inode), 0, &len, 1, 3, CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE, - ceph_empty_snapc, - ci->i_truncate_seq, ci->i_truncate_size, - false); + NULL, ci->i_truncate_seq, + ci->i_truncate_size, false); if (IS_ERR(req)) { err = PTR_ERR(req); goto out; @@ -1663,8 +1749,7 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci, u32 pool) goto out; } - rd_req = ceph_osdc_alloc_request(&fsc->client->osdc, - ceph_empty_snapc, + rd_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL, 1, false, GFP_NOFS); if (!rd_req) { err = -ENOMEM; @@ -1678,8 +1763,7 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci, u32 pool) "%llx.00000000", ci->i_vino.ino); rd_req->r_base_oid.name_len = strlen(rd_req->r_base_oid.name); - wr_req = ceph_osdc_alloc_request(&fsc->client->osdc, - ceph_empty_snapc, + wr_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL, 1, false, GFP_NOFS); if (!wr_req) { err = -ENOMEM; diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 6fe0ad26a7df..de17bb232ff8 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -991,7 +991,7 @@ static int send_cap_msg(struct ceph_mds_session *session, u32 seq, u64 flush_tid, u64 oldest_flush_tid, u32 issue_seq, u32 mseq, u64 size, u64 max_size, struct timespec *mtime, struct timespec *atime, - u64 time_warp_seq, + struct timespec *ctime, u64 time_warp_seq, kuid_t uid, kgid_t gid, umode_t mode, u64 xattr_version, struct ceph_buffer *xattrs_buf, @@ -1042,6 +1042,8 @@ static int send_cap_msg(struct ceph_mds_session *session, ceph_encode_timespec(&fc->mtime, mtime); if (atime) ceph_encode_timespec(&fc->atime, atime); + if (ctime) + ceph_encode_timespec(&fc->ctime, ctime); fc->time_warp_seq = cpu_to_le32(time_warp_seq); fc->uid = cpu_to_le32(from_kuid(&init_user_ns, uid)); @@ -1116,7 +1118,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap, int held, revoking, dropping, keep; u64 seq, issue_seq, mseq, time_warp_seq, follows; u64 size, max_size; - struct timespec mtime, atime; + struct timespec mtime, atime, ctime; int wake = 0; umode_t mode; kuid_t uid; @@ -1180,6 +1182,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap, ci->i_requested_max_size = max_size; mtime = inode->i_mtime; atime = inode->i_atime; + ctime = inode->i_ctime; time_warp_seq = ci->i_time_warp_seq; uid = inode->i_uid; gid = inode->i_gid; @@ -1198,7 +1201,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap, ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id, op, keep, want, flushing, seq, flush_tid, oldest_flush_tid, issue_seq, mseq, - size, max_size, &mtime, &atime, time_warp_seq, + size, max_size, &mtime, &atime, &ctime, time_warp_seq, uid, gid, mode, xattr_version, xattr_blob, follows, inline_data); if (ret < 0) { @@ -1320,7 +1323,7 @@ retry: capsnap->dirty, 0, capsnap->flush_tid, 0, 0, mseq, capsnap->size, 0, &capsnap->mtime, &capsnap->atime, - capsnap->time_warp_seq, + &capsnap->ctime, capsnap->time_warp_seq, capsnap->uid, capsnap->gid, capsnap->mode, capsnap->xattr_version, capsnap->xattr_blob, capsnap->follows, capsnap->inline_data); diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index fd11fb231a2e..fadc243dfb28 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -38,7 +38,7 @@ int ceph_init_dentry(struct dentry *dentry) if (dentry->d_fsdata) return 0; - di = kmem_cache_alloc(ceph_dentry_cachep, GFP_KERNEL | __GFP_ZERO); + di = kmem_cache_zalloc(ceph_dentry_cachep, GFP_KERNEL); if (!di) return -ENOMEM; /* oh well */ @@ -68,23 +68,6 @@ out_unlock: return 0; } -struct inode *ceph_get_dentry_parent_inode(struct dentry *dentry) -{ - struct inode *inode = NULL; - - if (!dentry) - return NULL; - - spin_lock(&dentry->d_lock); - if (!IS_ROOT(dentry)) { - inode = d_inode(dentry->d_parent); - ihold(inode); - } - spin_unlock(&dentry->d_lock); - return inode; -} - - /* * for readdir, we encode the directory frag and offset within that * frag into f_pos. @@ -624,6 +607,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry, struct ceph_mds_client *mdsc = fsc->mdsc; struct ceph_mds_request *req; int op; + int mask; int err; dout("lookup %p dentry %p '%pd'\n", @@ -666,8 +650,12 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry, return ERR_CAST(req); req->r_dentry = dget(dentry); req->r_num_caps = 2; - /* we only need inode linkage */ - req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE); + + mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED; + if (ceph_security_xattr_wanted(dir)) + mask |= CEPH_CAP_XATTR_SHARED; + req->r_args.getattr.mask = cpu_to_le32(mask); + req->r_locked_dir = dir; err = ceph_mdsc_do_request(mdsc, NULL, req); err = ceph_handle_snapdir(req, dentry, err); @@ -1095,6 +1083,7 @@ static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry) static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags) { int valid = 0; + struct dentry *parent; struct inode *dir; if (flags & LOOKUP_RCU) @@ -1103,7 +1092,8 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags) dout("d_revalidate %p '%pd' inode %p offset %lld\n", dentry, dentry, d_inode(dentry), ceph_dentry(dentry)->offset); - dir = ceph_get_dentry_parent_inode(dentry); + parent = dget_parent(dentry); + dir = d_inode(parent); /* always trust cached snapped dentries, snapdir dentry */ if (ceph_snap(dir) != CEPH_NOSNAP) { @@ -1121,13 +1111,48 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags) valid = 1; } + if (!valid) { + struct ceph_mds_client *mdsc = + ceph_sb_to_client(dir->i_sb)->mdsc; + struct ceph_mds_request *req; + int op, mask, err; + + op = ceph_snap(dir) == CEPH_SNAPDIR ? + CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP; + req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS); + if (!IS_ERR(req)) { + req->r_dentry = dget(dentry); + req->r_num_caps = 2; + + mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED; + if (ceph_security_xattr_wanted(dir)) + mask |= CEPH_CAP_XATTR_SHARED; + req->r_args.getattr.mask = mask; + + req->r_locked_dir = dir; + err = ceph_mdsc_do_request(mdsc, NULL, req); + if (err == 0 || err == -ENOENT) { + if (dentry == req->r_dentry) { + valid = !d_unhashed(dentry); + } else { + d_invalidate(req->r_dentry); + err = -EAGAIN; + } + } + ceph_mdsc_put_request(req); + dout("d_revalidate %p lookup result=%d\n", + dentry, err); + } + } + dout("d_revalidate %p %s\n", dentry, valid ? "valid" : "invalid"); if (valid) { ceph_dentry_lru_touch(dentry); } else { ceph_dir_clear_complete(dir); } - iput(dir); + + dput(parent); return valid; } diff --git a/fs/ceph/export.c b/fs/ceph/export.c index 3b3172357326..6e72c98162d5 100644 --- a/fs/ceph/export.c +++ b/fs/ceph/export.c @@ -71,12 +71,18 @@ static struct dentry *__fh_to_dentry(struct super_block *sb, u64 ino) inode = ceph_find_inode(sb, vino); if (!inode) { struct ceph_mds_request *req; + int mask; req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LOOKUPINO, USE_ANY_MDS); if (IS_ERR(req)) return ERR_CAST(req); + mask = CEPH_STAT_CAP_INODE; + if (ceph_security_xattr_wanted(d_inode(sb->s_root))) + mask |= CEPH_CAP_XATTR_SHARED; + req->r_args.getattr.mask = cpu_to_le32(mask); + req->r_ino1 = vino; req->r_num_caps = 1; err = ceph_mdsc_do_request(mdsc, NULL, req); @@ -128,6 +134,7 @@ static struct dentry *__get_parent(struct super_block *sb, struct ceph_mds_request *req; struct inode *inode; struct dentry *dentry; + int mask; int err; req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LOOKUPPARENT, @@ -144,6 +151,12 @@ static struct dentry *__get_parent(struct super_block *sb, .snap = CEPH_NOSNAP, }; } + + mask = CEPH_STAT_CAP_INODE; + if (ceph_security_xattr_wanted(d_inode(sb->s_root))) + mask |= CEPH_CAP_XATTR_SHARED; + req->r_args.getattr.mask = cpu_to_le32(mask); + req->r_num_caps = 1; err = ceph_mdsc_do_request(mdsc, NULL, req); inode = req->r_target_inode; diff --git a/fs/ceph/file.c b/fs/ceph/file.c index eb9028e8cfc5..ef38f01c1795 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -157,7 +157,7 @@ static int ceph_init_file(struct inode *inode, struct file *file, int fmode) case S_IFDIR: dout("init_file %p %p 0%o (regular)\n", inode, file, inode->i_mode); - cf = kmem_cache_alloc(ceph_file_cachep, GFP_KERNEL | __GFP_ZERO); + cf = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL); if (cf == NULL) { ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ return -ENOMEM; @@ -300,6 +300,7 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry, struct ceph_mds_request *req; struct dentry *dn; struct ceph_acls_info acls = {}; + int mask; int err; dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n", @@ -335,6 +336,12 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry, acls.pagelist = NULL; } } + + mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED; + if (ceph_security_xattr_wanted(dir)) + mask |= CEPH_CAP_XATTR_SHARED; + req->r_args.open.mask = cpu_to_le32(mask); + req->r_locked_dir = dir; /* caller holds dir->i_mutex */ err = ceph_mdsc_do_request(mdsc, (flags & (O_CREAT|O_TRUNC)) ? dir : NULL, @@ -725,7 +732,6 @@ static void ceph_aio_retry_work(struct work_struct *work) ret = ceph_osdc_start_request(req->r_osdc, req, false); out: if (ret < 0) { - BUG_ON(ret == -EOLDSNAPC); req->r_result = ret; ceph_aio_complete_req(req, NULL); } @@ -783,7 +789,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter, int num_pages = 0; int flags; int ret; - struct timespec mtime = CURRENT_TIME; + struct timespec mtime = current_fs_time(inode->i_sb); size_t count = iov_iter_count(iter); loff_t pos = iocb->ki_pos; bool write = iov_iter_rw(iter) == WRITE; @@ -949,7 +955,6 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter, ret = ceph_osdc_start_request(req->r_osdc, req, false); if (ret < 0) { - BUG_ON(ret == -EOLDSNAPC); req->r_result = ret; ceph_aio_complete_req(req, NULL); } @@ -988,7 +993,7 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos, int flags; int check_caps = 0; int ret; - struct timespec mtime = CURRENT_TIME; + struct timespec mtime = current_fs_time(inode->i_sb); size_t count = iov_iter_count(from); if (ceph_snap(file_inode(file)) != CEPH_NOSNAP) diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index e48fd8b23257..ed58b168904a 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -549,6 +549,10 @@ int ceph_fill_file_size(struct inode *inode, int issued, if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 || (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) { dout("size %lld -> %llu\n", inode->i_size, size); + if (size > 0 && S_ISDIR(inode->i_mode)) { + pr_err("fill_file_size non-zero size for directory\n"); + size = 0; + } i_size_write(inode, size); inode->i_blocks = (size + (1<<9) - 1) >> 9; ci->i_reported_size = size; @@ -1261,6 +1265,7 @@ retry_lookup: dout(" %p links to %p %llx.%llx, not %llx.%llx\n", dn, d_inode(dn), ceph_vinop(d_inode(dn)), ceph_vinop(in)); + d_invalidate(dn); have_lease = false; } @@ -1349,15 +1354,20 @@ static int fill_readdir_cache(struct inode *dir, struct dentry *dn, if (!ctl->page || pgoff != page_index(ctl->page)) { ceph_readdir_cache_release(ctl); - ctl->page = grab_cache_page(&dir->i_data, pgoff); + if (idx == 0) + ctl->page = grab_cache_page(&dir->i_data, pgoff); + else + ctl->page = find_lock_page(&dir->i_data, pgoff); if (!ctl->page) { ctl->index = -1; - return -ENOMEM; + return idx == 0 ? -ENOMEM : 0; } /* reading/filling the cache are serialized by * i_mutex, no need to use page lock */ unlock_page(ctl->page); ctl->dentries = kmap(ctl->page); + if (idx == 0) + memset(ctl->dentries, 0, PAGE_CACHE_SIZE); } if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) && @@ -1380,7 +1390,7 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req, struct qstr dname; struct dentry *dn; struct inode *in; - int err = 0, ret, i; + int err = 0, skipped = 0, ret, i; struct inode *snapdir = NULL; struct ceph_mds_request_head *rhead = req->r_request->front.iov_base; struct ceph_dentry_info *di; @@ -1492,7 +1502,17 @@ retry_lookup: } if (d_really_is_negative(dn)) { - struct dentry *realdn = splice_dentry(dn, in); + struct dentry *realdn; + + if (ceph_security_xattr_deadlock(in)) { + dout(" skip splicing dn %p to inode %p" + " (security xattr deadlock)\n", dn, in); + iput(in); + skipped++; + goto next_item; + } + + realdn = splice_dentry(dn, in); if (IS_ERR(realdn)) { err = PTR_ERR(realdn); d_drop(dn); @@ -1509,7 +1529,7 @@ retry_lookup: req->r_session, req->r_request_started); - if (err == 0 && cache_ctl.index >= 0) { + if (err == 0 && skipped == 0 && cache_ctl.index >= 0) { ret = fill_readdir_cache(d_inode(parent), dn, &cache_ctl, req); if (ret < 0) @@ -1520,7 +1540,7 @@ next_item: dput(dn); } out: - if (err == 0) { + if (err == 0 && skipped == 0) { req->r_did_prepopulate = true; req->r_readdir_cache_idx = cache_ctl.index; } @@ -1950,7 +1970,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr) if (dirtied) { inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied, &prealloc_cf); - inode->i_ctime = CURRENT_TIME; + inode->i_ctime = current_fs_time(inode->i_sb); } release &= issued; diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 911d64d865f1..44852c3ae531 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -1729,7 +1729,7 @@ ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode) init_completion(&req->r_safe_completion); INIT_LIST_HEAD(&req->r_unsafe_item); - req->r_stamp = CURRENT_TIME; + req->r_stamp = current_fs_time(mdsc->fsc->sb); req->r_op = op; req->r_direct_mode = mode; @@ -2540,6 +2540,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) /* insert trace into our cache */ mutex_lock(&req->r_fill_mutex); + current->journal_info = req; err = ceph_fill_trace(mdsc->fsc->sb, req, req->r_session); if (err == 0) { if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR || @@ -2547,6 +2548,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) ceph_readdir_prepopulate(req, req->r_session); ceph_unreserve_caps(mdsc, &req->r_caps_reservation); } + current->journal_info = NULL; mutex_unlock(&req->r_fill_mutex); up_read(&mdsc->snap_rwsem); @@ -3764,7 +3766,6 @@ void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc, struct ceph_msg *msg) dout("handle_map epoch %u len %d\n", epoch, (int)maplen); /* do we need it? */ - ceph_monc_got_mdsmap(&mdsc->fsc->client->monc, epoch); mutex_lock(&mdsc->mutex); if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) { dout("handle_map epoch %u <= our %u\n", @@ -3791,6 +3792,8 @@ void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc, struct ceph_msg *msg) mdsc->fsc->sb->s_maxbytes = mdsc->mdsmap->m_max_file_size; __wake_requests(mdsc, &mdsc->waiting_for_map); + ceph_monc_got_map(&mdsc->fsc->client->monc, CEPH_SUB_MDSMAP, + mdsc->mdsmap->m_epoch); mutex_unlock(&mdsc->mutex); schedule_delayed(mdsc); diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index 4aa7122a8d38..9caaa7ffc93f 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c @@ -296,8 +296,6 @@ static int cmpu64_rev(const void *a, const void *b) } -struct ceph_snap_context *ceph_empty_snapc; - /* * build the snap context for a given realm. */ @@ -987,17 +985,3 @@ out: up_write(&mdsc->snap_rwsem); return; } - -int __init ceph_snap_init(void) -{ - ceph_empty_snapc = ceph_create_snap_context(0, GFP_NOFS); - if (!ceph_empty_snapc) - return -ENOMEM; - ceph_empty_snapc->seq = 1; - return 0; -} - -void ceph_snap_exit(void) -{ - ceph_put_snap_context(ceph_empty_snapc); -} diff --git a/fs/ceph/super.c b/fs/ceph/super.c index ca4d5e8457f1..c973043deb0e 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -439,8 +439,8 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root) if (fsopt->flags & CEPH_MOUNT_OPT_DIRSTAT) seq_puts(m, ",dirstat"); - if ((fsopt->flags & CEPH_MOUNT_OPT_RBYTES) == 0) - seq_puts(m, ",norbytes"); + if ((fsopt->flags & CEPH_MOUNT_OPT_RBYTES)) + seq_puts(m, ",rbytes"); if (fsopt->flags & CEPH_MOUNT_OPT_NOASYNCREADDIR) seq_puts(m, ",noasyncreaddir"); if ((fsopt->flags & CEPH_MOUNT_OPT_DCACHE) == 0) @@ -530,7 +530,7 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt, goto fail; } fsc->client->extra_mon_dispatch = extra_mon_dispatch; - fsc->client->monc.want_mdsmap = 1; + ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP, 0, true); fsc->mount_options = fsopt; @@ -793,22 +793,20 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc, struct dentry *root; int first = 0; /* first vfsmount for this super_block */ - dout("mount start\n"); + dout("mount start %p\n", fsc); mutex_lock(&fsc->client->mount_mutex); - err = __ceph_open_session(fsc->client, started); - if (err < 0) - goto out; + if (!fsc->sb->s_root) { + err = __ceph_open_session(fsc->client, started); + if (err < 0) + goto out; - dout("mount opening root\n"); - root = open_root_dentry(fsc, "", started); - if (IS_ERR(root)) { - err = PTR_ERR(root); - goto out; - } - if (fsc->sb->s_root) { - dput(root); - } else { + dout("mount opening root\n"); + root = open_root_dentry(fsc, "", started); + if (IS_ERR(root)) { + err = PTR_ERR(root); + goto out; + } fsc->sb->s_root = root; first = 1; @@ -818,6 +816,7 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc, } if (path[0] == 0) { + root = fsc->sb->s_root; dget(root); } else { dout("mount opening base mountpoint\n"); @@ -833,16 +832,14 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc, mutex_unlock(&fsc->client->mount_mutex); return root; -out: - mutex_unlock(&fsc->client->mount_mutex); - return ERR_PTR(err); - fail: if (first) { dput(fsc->sb->s_root); fsc->sb->s_root = NULL; } - goto out; +out: + mutex_unlock(&fsc->client->mount_mutex); + return ERR_PTR(err); } static int ceph_set_super(struct super_block *s, void *data) @@ -1042,19 +1039,14 @@ static int __init init_ceph(void) ceph_flock_init(); ceph_xattr_init(); - ret = ceph_snap_init(); - if (ret) - goto out_xattr; ret = register_filesystem(&ceph_fs_type); if (ret) - goto out_snap; + goto out_xattr; pr_info("loaded (mds proto %d)\n", CEPH_MDSC_PROTOCOL); return 0; -out_snap: - ceph_snap_exit(); out_xattr: ceph_xattr_exit(); destroy_caches(); @@ -1066,7 +1058,6 @@ static void __exit exit_ceph(void) { dout("exit_ceph\n"); unregister_filesystem(&ceph_fs_type); - ceph_snap_exit(); ceph_xattr_exit(); destroy_caches(); } diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 9c458eb52245..e705c4d612d7 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -37,8 +37,7 @@ #define CEPH_MOUNT_OPT_FSCACHE (1<<10) /* use fscache */ #define CEPH_MOUNT_OPT_NOPOOLPERM (1<<11) /* no pool permission check */ -#define CEPH_MOUNT_OPT_DEFAULT (CEPH_MOUNT_OPT_RBYTES | \ - CEPH_MOUNT_OPT_DCACHE) +#define CEPH_MOUNT_OPT_DEFAULT CEPH_MOUNT_OPT_DCACHE #define ceph_set_mount_opt(fsc, opt) \ (fsc)->mount_options->flags |= CEPH_MOUNT_OPT_##opt; @@ -469,7 +468,7 @@ static inline struct inode *ceph_find_inode(struct super_block *sb, #define CEPH_I_POOL_PERM (1 << 4) /* pool rd/wr bits are valid */ #define CEPH_I_POOL_RD (1 << 5) /* can read from pool */ #define CEPH_I_POOL_WR (1 << 6) /* can write to pool */ - +#define CEPH_I_SEC_INITED (1 << 7) /* security initialized */ static inline void __ceph_dir_set_complete(struct ceph_inode_info *ci, long long release_count, @@ -721,7 +720,6 @@ static inline int default_congestion_kb(void) /* snap.c */ -extern struct ceph_snap_context *ceph_empty_snapc; struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc, u64 ino); extern void ceph_get_snap_realm(struct ceph_mds_client *mdsc, @@ -738,8 +736,6 @@ extern void ceph_queue_cap_snap(struct ceph_inode_info *ci); extern int __ceph_finish_cap_snap(struct ceph_inode_info *ci, struct ceph_cap_snap *capsnap); extern void ceph_cleanup_empty_realms(struct ceph_mds_client *mdsc); -extern int ceph_snap_init(void); -extern void ceph_snap_exit(void); /* * a cap_snap is "pending" if it is still awaiting an in-progress @@ -808,6 +804,20 @@ extern void __init ceph_xattr_init(void); extern void ceph_xattr_exit(void); extern const struct xattr_handler *ceph_xattr_handlers[]; +#ifdef CONFIG_SECURITY +extern bool ceph_security_xattr_deadlock(struct inode *in); +extern bool ceph_security_xattr_wanted(struct inode *in); +#else +static inline bool ceph_security_xattr_deadlock(struct inode *in) +{ + return false; +} +static inline bool ceph_security_xattr_wanted(struct inode *in) +{ + return false; +} +#endif + /* acl.c */ struct ceph_acls_info { void *default_acl; @@ -947,7 +957,6 @@ extern void ceph_dentry_lru_touch(struct dentry *dn); extern void ceph_dentry_lru_del(struct dentry *dn); extern void ceph_invalidate_dentry_lease(struct dentry *dentry); extern unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn); -extern struct inode *ceph_get_dentry_parent_inode(struct dentry *dentry); extern void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl); /* diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c index 819163d8313b..9410abdef3ce 100644 --- a/fs/ceph/xattr.c +++ b/fs/ceph/xattr.c @@ -714,31 +714,62 @@ void __ceph_build_xattrs_blob(struct ceph_inode_info *ci) } } +static inline int __get_request_mask(struct inode *in) { + struct ceph_mds_request *req = current->journal_info; + int mask = 0; + if (req && req->r_target_inode == in) { + if (req->r_op == CEPH_MDS_OP_LOOKUP || + req->r_op == CEPH_MDS_OP_LOOKUPINO || + req->r_op == CEPH_MDS_OP_LOOKUPPARENT || + req->r_op == CEPH_MDS_OP_GETATTR) { + mask = le32_to_cpu(req->r_args.getattr.mask); + } else if (req->r_op == CEPH_MDS_OP_OPEN || + req->r_op == CEPH_MDS_OP_CREATE) { + mask = le32_to_cpu(req->r_args.open.mask); + } + } + return mask; +} + ssize_t __ceph_getxattr(struct inode *inode, const char *name, void *value, size_t size) { struct ceph_inode_info *ci = ceph_inode(inode); - int err; struct ceph_inode_xattr *xattr; struct ceph_vxattr *vxattr = NULL; + int req_mask; + int err; if (!ceph_is_valid_xattr(name)) return -ENODATA; /* let's see if a virtual xattr was requested */ vxattr = ceph_match_vxattr(inode, name); - if (vxattr && !(vxattr->exists_cb && !vxattr->exists_cb(ci))) { - err = vxattr->getxattr_cb(ci, value, size); + if (vxattr) { + err = -ENODATA; + if (!(vxattr->exists_cb && !vxattr->exists_cb(ci))) + err = vxattr->getxattr_cb(ci, value, size); return err; } + req_mask = __get_request_mask(inode); + spin_lock(&ci->i_ceph_lock); dout("getxattr %p ver=%lld index_ver=%lld\n", inode, ci->i_xattrs.version, ci->i_xattrs.index_version); if (ci->i_xattrs.version == 0 || - !__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1)) { + !((req_mask & CEPH_CAP_XATTR_SHARED) || + __ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1))) { spin_unlock(&ci->i_ceph_lock); + + /* security module gets xattr while filling trace */ + if (current->journal_info != NULL) { + pr_warn_ratelimited("sync getxattr %p " + "during filling trace\n", inode); + return -EBUSY; + } + /* get xattrs from mds (if we don't already have them) */ err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR, true); if (err) @@ -765,6 +796,9 @@ ssize_t __ceph_getxattr(struct inode *inode, const char *name, void *value, memcpy(value, xattr->val, xattr->val_len); + if (current->journal_info != NULL && + !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN)) + ci->i_ceph_flags |= CEPH_I_SEC_INITED; out: spin_unlock(&ci->i_ceph_lock); return err; @@ -999,7 +1033,7 @@ retry: dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL, &prealloc_cf); ci->i_xattrs.dirty = true; - inode->i_ctime = CURRENT_TIME; + inode->i_ctime = current_fs_time(inode->i_sb); } spin_unlock(&ci->i_ceph_lock); @@ -1015,7 +1049,15 @@ do_sync: do_sync_unlocked: if (lock_snap_rwsem) up_read(&mdsc->snap_rwsem); - err = ceph_sync_setxattr(dentry, name, value, size, flags); + + /* security module set xattr while filling trace */ + if (current->journal_info != NULL) { + pr_warn_ratelimited("sync setxattr %p " + "during filling trace\n", inode); + err = -EBUSY; + } else { + err = ceph_sync_setxattr(dentry, name, value, size, flags); + } out: ceph_free_cap_flush(prealloc_cf); kfree(newname); @@ -1136,7 +1178,7 @@ retry: dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL, &prealloc_cf); ci->i_xattrs.dirty = true; - inode->i_ctime = CURRENT_TIME; + inode->i_ctime = current_fs_time(inode->i_sb); spin_unlock(&ci->i_ceph_lock); if (lock_snap_rwsem) up_read(&mdsc->snap_rwsem); @@ -1164,3 +1206,25 @@ int ceph_removexattr(struct dentry *dentry, const char *name) return __ceph_removexattr(dentry, name); } + +#ifdef CONFIG_SECURITY +bool ceph_security_xattr_wanted(struct inode *in) +{ + return in->i_security != NULL; +} + +bool ceph_security_xattr_deadlock(struct inode *in) +{ + struct ceph_inode_info *ci; + bool ret; + if (in->i_security == NULL) + return false; + ci = ceph_inode(in); + spin_lock(&ci->i_ceph_lock); + ret = !(ci->i_ceph_flags & CEPH_I_SEC_INITED) && + !(ci->i_xattrs.version > 0 && + __ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 0)); + spin_unlock(&ci->i_ceph_lock); + return ret; +} +#endif diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c index aed9cccca505..06cd1a22240b 100644 --- a/fs/crypto/crypto.c +++ b/fs/crypto/crypto.c @@ -170,7 +170,7 @@ static int do_page_crypto(struct inode *inode, fscrypt_complete, &ecr); BUILD_BUG_ON(FS_XTS_TWEAK_SIZE < sizeof(index)); - memcpy(xts_tweak, &inode->i_ino, sizeof(index)); + memcpy(xts_tweak, &index, sizeof(index)); memset(&xts_tweak[sizeof(index)], 0, FS_XTS_TWEAK_SIZE - sizeof(index)); diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index d002579c6f2b..70907d638b60 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -2516,21 +2516,6 @@ static int ocfs2_update_edge_lengths(handle_t *handle, struct ocfs2_extent_block *eb; u32 range; - /* - * In normal tree rotation process, we will never touch the - * tree branch above subtree_index and ocfs2_extend_rotate_transaction - * doesn't reserve the credits for them either. - * - * But we do have a special case here which will update the rightmost - * records for all the bh in the path. - * So we have to allocate extra credits and access them. - */ - ret = ocfs2_extend_trans(handle, subtree_index); - if (ret) { - mlog_errno(ret); - goto out; - } - ret = ocfs2_journal_access_path(et->et_ci, handle, path); if (ret) { mlog_errno(ret); @@ -2956,7 +2941,7 @@ static int __ocfs2_rotate_tree_left(handle_t *handle, right_path->p_node[subtree_root].bh->b_blocknr, right_path->p_tree_depth); - ret = ocfs2_extend_rotate_transaction(handle, subtree_root, + ret = ocfs2_extend_rotate_transaction(handle, 0, orig_credits, left_path); if (ret) { mlog_errno(ret); @@ -3029,21 +3014,9 @@ static int ocfs2_remove_rightmost_path(handle_t *handle, struct ocfs2_extent_block *eb; struct ocfs2_extent_list *el; - ret = ocfs2_et_sanity_check(et); if (ret) goto out; - /* - * There's two ways we handle this depending on - * whether path is the only existing one. - */ - ret = ocfs2_extend_rotate_transaction(handle, 0, - handle->h_buffer_credits, - path); - if (ret) { - mlog_errno(ret); - goto out; - } ret = ocfs2_journal_access_path(et->et_ci, handle, path); if (ret) { @@ -3641,6 +3614,14 @@ static int ocfs2_merge_rec_left(struct ocfs2_path *right_path, */ if (le16_to_cpu(right_rec->e_leaf_clusters) == 0 && le16_to_cpu(el->l_next_free_rec) == 1) { + /* extend credit for ocfs2_remove_rightmost_path */ + ret = ocfs2_extend_rotate_transaction(handle, 0, + handle->h_buffer_credits, + right_path); + if (ret) { + mlog_errno(ret); + goto out; + } ret = ocfs2_remove_rightmost_path(handle, et, right_path, @@ -3679,6 +3660,14 @@ static int ocfs2_try_to_merge_extent(handle_t *handle, BUG_ON(ctxt->c_contig_type == CONTIG_NONE); if (ctxt->c_split_covers_rec && ctxt->c_has_empty_extent) { + /* extend credit for ocfs2_remove_rightmost_path */ + ret = ocfs2_extend_rotate_transaction(handle, 0, + handle->h_buffer_credits, + path); + if (ret) { + mlog_errno(ret); + goto out; + } /* * The merge code will need to create an empty * extent to take the place of the newly @@ -3727,6 +3716,15 @@ static int ocfs2_try_to_merge_extent(handle_t *handle, */ BUG_ON(!ocfs2_is_empty_extent(&el->l_recs[0])); + /* extend credit for ocfs2_remove_rightmost_path */ + ret = ocfs2_extend_rotate_transaction(handle, 0, + handle->h_buffer_credits, + path); + if (ret) { + mlog_errno(ret); + goto out; + } + /* The merge left us with an empty extent, remove it. */ ret = ocfs2_rotate_tree_left(handle, et, path, dealloc); if (ret) { @@ -3748,6 +3746,15 @@ static int ocfs2_try_to_merge_extent(handle_t *handle, goto out; } + /* extend credit for ocfs2_remove_rightmost_path */ + ret = ocfs2_extend_rotate_transaction(handle, 0, + handle->h_buffer_credits, + path); + if (ret) { + mlog_errno(ret); + goto out; + } + ret = ocfs2_rotate_tree_left(handle, et, path, dealloc); /* * Error from this last rotate is not critical, so @@ -3783,6 +3790,16 @@ static int ocfs2_try_to_merge_extent(handle_t *handle, } if (ctxt->c_split_covers_rec) { + /* extend credit for ocfs2_remove_rightmost_path */ + ret = ocfs2_extend_rotate_transaction(handle, 0, + handle->h_buffer_credits, + path); + if (ret) { + mlog_errno(ret); + ret = 0; + goto out; + } + /* * The merge may have left an empty extent in * our leaf. Try to rotate it away. @@ -5342,6 +5359,15 @@ static int ocfs2_truncate_rec(handle_t *handle, struct ocfs2_extent_block *eb; if (ocfs2_is_empty_extent(&el->l_recs[0]) && index > 0) { + /* extend credit for ocfs2_remove_rightmost_path */ + ret = ocfs2_extend_rotate_transaction(handle, 0, + handle->h_buffer_credits, + path); + if (ret) { + mlog_errno(ret); + goto out; + } + ret = ocfs2_rotate_tree_left(handle, et, path, dealloc); if (ret) { mlog_errno(ret); @@ -5928,16 +5954,6 @@ static int ocfs2_replay_truncate_records(struct ocfs2_super *osb, ocfs2_journal_dirty(handle, tl_bh); - /* TODO: Perhaps we can calculate the bulk of the - * credits up front rather than extending like - * this. */ - status = ocfs2_extend_trans(handle, - OCFS2_TRUNCATE_LOG_FLUSH_ONE_REC); - if (status < 0) { - mlog_errno(status); - goto bail; - } - rec = tl->tl_recs[i]; start_blk = ocfs2_clusters_to_blocks(data_alloc_inode->i_sb, le32_to_cpu(rec.t_start)); @@ -5958,6 +5974,13 @@ static int ocfs2_replay_truncate_records(struct ocfs2_super *osb, goto bail; } } + + status = ocfs2_extend_trans(handle, + OCFS2_TRUNCATE_LOG_FLUSH_ONE_REC); + if (status < 0) { + mlog_errno(status); + goto bail; + } i--; } @@ -6016,7 +6039,7 @@ int __ocfs2_flush_truncate_log(struct ocfs2_super *osb) goto out_mutex; } - handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_UPDATE); + handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_FLUSH_ONE_REC); if (IS_ERR(handle)) { status = PTR_ERR(handle); mlog_errno(status); @@ -6079,7 +6102,7 @@ void ocfs2_schedule_truncate_log_flush(struct ocfs2_super *osb, if (cancel) cancel_delayed_work(&osb->osb_truncate_log_wq); - queue_delayed_work(ocfs2_wq, &osb->osb_truncate_log_wq, + queue_delayed_work(osb->ocfs2_wq, &osb->osb_truncate_log_wq, OCFS2_TRUNCATE_LOG_FLUSH_INTERVAL); } } @@ -6253,7 +6276,7 @@ void ocfs2_truncate_log_shutdown(struct ocfs2_super *osb) if (tl_inode) { cancel_delayed_work(&osb->osb_truncate_log_wq); - flush_workqueue(ocfs2_wq); + flush_workqueue(osb->ocfs2_wq); status = ocfs2_flush_truncate_log(osb); if (status < 0) diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 043110e5212d..1581240a7ca0 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -499,158 +499,6 @@ bail: return status; } -/* - * TODO: Make this into a generic get_blocks function. - * - * From do_direct_io in direct-io.c: - * "So what we do is to permit the ->get_blocks function to populate - * bh.b_size with the size of IO which is permitted at this offset and - * this i_blkbits." - * - * This function is called directly from get_more_blocks in direct-io.c. - * - * called like this: dio->get_blocks(dio->inode, fs_startblk, - * fs_count, map_bh, dio->rw == WRITE); - */ -static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock, - struct buffer_head *bh_result, int create) -{ - int ret; - u32 cpos = 0; - int alloc_locked = 0; - u64 p_blkno, inode_blocks, contig_blocks; - unsigned int ext_flags; - unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits; - unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits; - unsigned long len = bh_result->b_size; - unsigned int clusters_to_alloc = 0, contig_clusters = 0; - - cpos = ocfs2_blocks_to_clusters(inode->i_sb, iblock); - - /* This function won't even be called if the request isn't all - * nicely aligned and of the right size, so there's no need - * for us to check any of that. */ - - inode_blocks = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode)); - - down_read(&OCFS2_I(inode)->ip_alloc_sem); - - /* This figures out the size of the next contiguous block, and - * our logical offset */ - ret = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno, - &contig_blocks, &ext_flags); - up_read(&OCFS2_I(inode)->ip_alloc_sem); - - if (ret) { - mlog(ML_ERROR, "get_blocks() failed iblock=%llu\n", - (unsigned long long)iblock); - ret = -EIO; - goto bail; - } - - /* We should already CoW the refcounted extent in case of create. */ - BUG_ON(create && (ext_flags & OCFS2_EXT_REFCOUNTED)); - - /* allocate blocks if no p_blkno is found, and create == 1 */ - if (!p_blkno && create) { - ret = ocfs2_inode_lock(inode, NULL, 1); - if (ret < 0) { - mlog_errno(ret); - goto bail; - } - - alloc_locked = 1; - - down_write(&OCFS2_I(inode)->ip_alloc_sem); - - /* fill hole, allocate blocks can't be larger than the size - * of the hole */ - clusters_to_alloc = ocfs2_clusters_for_bytes(inode->i_sb, len); - contig_clusters = ocfs2_clusters_for_blocks(inode->i_sb, - contig_blocks); - if (clusters_to_alloc > contig_clusters) - clusters_to_alloc = contig_clusters; - - /* allocate extent and insert them into the extent tree */ - ret = ocfs2_extend_allocation(inode, cpos, - clusters_to_alloc, 0); - if (ret < 0) { - up_write(&OCFS2_I(inode)->ip_alloc_sem); - mlog_errno(ret); - goto bail; - } - - ret = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno, - &contig_blocks, &ext_flags); - if (ret < 0) { - up_write(&OCFS2_I(inode)->ip_alloc_sem); - mlog(ML_ERROR, "get_blocks() failed iblock=%llu\n", - (unsigned long long)iblock); - ret = -EIO; - goto bail; - } - set_buffer_new(bh_result); - up_write(&OCFS2_I(inode)->ip_alloc_sem); - } - - /* - * get_more_blocks() expects us to describe a hole by clearing - * the mapped bit on bh_result(). - * - * Consider an unwritten extent as a hole. - */ - if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN)) - map_bh(bh_result, inode->i_sb, p_blkno); - else - clear_buffer_mapped(bh_result); - - /* make sure we don't map more than max_blocks blocks here as - that's all the kernel will handle at this point. */ - if (max_blocks < contig_blocks) - contig_blocks = max_blocks; - bh_result->b_size = contig_blocks << blocksize_bits; -bail: - if (alloc_locked) - ocfs2_inode_unlock(inode, 1); - return ret; -} - -/* - * ocfs2_dio_end_io is called by the dio core when a dio is finished. We're - * particularly interested in the aio/dio case. We use the rw_lock DLM lock - * to protect io on one node from truncation on another. - */ -static int ocfs2_dio_end_io(struct kiocb *iocb, - loff_t offset, - ssize_t bytes, - void *private) -{ - struct inode *inode = file_inode(iocb->ki_filp); - int level; - - if (bytes <= 0) - return 0; - - /* this io's submitter should not have unlocked this before we could */ - BUG_ON(!ocfs2_iocb_is_rw_locked(iocb)); - - if (ocfs2_iocb_is_unaligned_aio(iocb)) { - ocfs2_iocb_clear_unaligned_aio(iocb); - - mutex_unlock(&OCFS2_I(inode)->ip_unaligned_aio); - } - - /* Let rw unlock to be done later to protect append direct io write */ - if (offset + bytes <= i_size_read(inode)) { - ocfs2_iocb_clear_rw_locked(iocb); - - level = ocfs2_iocb_rw_locked_level(iocb); - ocfs2_rw_unlock(inode, level); - } - - return 0; -} - static int ocfs2_releasepage(struct page *page, gfp_t wait) { if (!page_has_buffers(page)) @@ -658,363 +506,6 @@ static int ocfs2_releasepage(struct page *page, gfp_t wait) return try_to_free_buffers(page); } -static int ocfs2_is_overwrite(struct ocfs2_super *osb, - struct inode *inode, loff_t offset) -{ - int ret = 0; - u32 v_cpos = 0; - u32 p_cpos = 0; - unsigned int num_clusters = 0; - unsigned int ext_flags = 0; - - v_cpos = ocfs2_bytes_to_clusters(osb->sb, offset); - ret = ocfs2_get_clusters(inode, v_cpos, &p_cpos, - &num_clusters, &ext_flags); - if (ret < 0) { - mlog_errno(ret); - return ret; - } - - if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) - return 1; - - return 0; -} - -static int ocfs2_direct_IO_zero_extend(struct ocfs2_super *osb, - struct inode *inode, loff_t offset, - u64 zero_len, int cluster_align) -{ - u32 p_cpos = 0; - u32 v_cpos = ocfs2_bytes_to_clusters(osb->sb, i_size_read(inode)); - unsigned int num_clusters = 0; - unsigned int ext_flags = 0; - int ret = 0; - - if (offset <= i_size_read(inode) || cluster_align) - return 0; - - ret = ocfs2_get_clusters(inode, v_cpos, &p_cpos, &num_clusters, - &ext_flags); - if (ret < 0) { - mlog_errno(ret); - return ret; - } - - if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) { - u64 s = i_size_read(inode); - sector_t sector = ((u64)p_cpos << (osb->s_clustersize_bits - 9)) + - (do_div(s, osb->s_clustersize) >> 9); - - ret = blkdev_issue_zeroout(osb->sb->s_bdev, sector, - zero_len >> 9, GFP_NOFS, false); - if (ret < 0) - mlog_errno(ret); - } - - return ret; -} - -static int ocfs2_direct_IO_extend_no_holes(struct ocfs2_super *osb, - struct inode *inode, loff_t offset) -{ - u64 zero_start, zero_len, total_zero_len; - u32 p_cpos = 0, clusters_to_add; - u32 v_cpos = ocfs2_bytes_to_clusters(osb->sb, i_size_read(inode)); - unsigned int num_clusters = 0; - unsigned int ext_flags = 0; - u32 size_div, offset_div; - int ret = 0; - - { - u64 o = offset; - u64 s = i_size_read(inode); - - offset_div = do_div(o, osb->s_clustersize); - size_div = do_div(s, osb->s_clustersize); - } - - if (offset <= i_size_read(inode)) - return 0; - - clusters_to_add = ocfs2_bytes_to_clusters(inode->i_sb, offset) - - ocfs2_bytes_to_clusters(inode->i_sb, i_size_read(inode)); - total_zero_len = offset - i_size_read(inode); - if (clusters_to_add) - total_zero_len -= offset_div; - - /* Allocate clusters to fill out holes, and this is only needed - * when we add more than one clusters. Otherwise the cluster will - * be allocated during direct IO */ - if (clusters_to_add > 1) { - ret = ocfs2_extend_allocation(inode, - OCFS2_I(inode)->ip_clusters, - clusters_to_add - 1, 0); - if (ret) { - mlog_errno(ret); - goto out; - } - } - - while (total_zero_len) { - ret = ocfs2_get_clusters(inode, v_cpos, &p_cpos, &num_clusters, - &ext_flags); - if (ret < 0) { - mlog_errno(ret); - goto out; - } - - zero_start = ocfs2_clusters_to_bytes(osb->sb, p_cpos) + - size_div; - zero_len = ocfs2_clusters_to_bytes(osb->sb, num_clusters) - - size_div; - zero_len = min(total_zero_len, zero_len); - - if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) { - ret = blkdev_issue_zeroout(osb->sb->s_bdev, - zero_start >> 9, zero_len >> 9, - GFP_NOFS, false); - if (ret < 0) { - mlog_errno(ret); - goto out; - } - } - - total_zero_len -= zero_len; - v_cpos += ocfs2_bytes_to_clusters(osb->sb, zero_len + size_div); - - /* Only at first iteration can be cluster not aligned. - * So set size_div to 0 for the rest */ - size_div = 0; - } - -out: - return ret; -} - -static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb, - struct iov_iter *iter, - loff_t offset) -{ - ssize_t ret = 0; - ssize_t written = 0; - bool orphaned = false; - int is_overwrite = 0; - struct file *file = iocb->ki_filp; - struct inode *inode = file_inode(file)->i_mapping->host; - struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); - struct buffer_head *di_bh = NULL; - size_t count = iter->count; - journal_t *journal = osb->journal->j_journal; - u64 zero_len_head, zero_len_tail; - int cluster_align_head, cluster_align_tail; - loff_t final_size = offset + count; - int append_write = offset >= i_size_read(inode) ? 1 : 0; - unsigned int num_clusters = 0; - unsigned int ext_flags = 0; - - { - u64 o = offset; - u64 s = i_size_read(inode); - - zero_len_head = do_div(o, 1 << osb->s_clustersize_bits); - cluster_align_head = !zero_len_head; - - zero_len_tail = osb->s_clustersize - - do_div(s, osb->s_clustersize); - if ((offset - i_size_read(inode)) < zero_len_tail) - zero_len_tail = offset - i_size_read(inode); - cluster_align_tail = !zero_len_tail; - } - - /* - * when final_size > inode->i_size, inode->i_size will be - * updated after direct write, so add the inode to orphan - * dir first. - */ - if (final_size > i_size_read(inode)) { - ret = ocfs2_add_inode_to_orphan(osb, inode); - if (ret < 0) { - mlog_errno(ret); - goto out; - } - orphaned = true; - } - - if (append_write) { - ret = ocfs2_inode_lock(inode, NULL, 1); - if (ret < 0) { - mlog_errno(ret); - goto clean_orphan; - } - - /* zeroing out the previously allocated cluster tail - * that but not zeroed */ - if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) { - down_read(&OCFS2_I(inode)->ip_alloc_sem); - ret = ocfs2_direct_IO_zero_extend(osb, inode, offset, - zero_len_tail, cluster_align_tail); - up_read(&OCFS2_I(inode)->ip_alloc_sem); - } else { - down_write(&OCFS2_I(inode)->ip_alloc_sem); - ret = ocfs2_direct_IO_extend_no_holes(osb, inode, - offset); - up_write(&OCFS2_I(inode)->ip_alloc_sem); - } - if (ret < 0) { - mlog_errno(ret); - ocfs2_inode_unlock(inode, 1); - goto clean_orphan; - } - - is_overwrite = ocfs2_is_overwrite(osb, inode, offset); - if (is_overwrite < 0) { - mlog_errno(is_overwrite); - ret = is_overwrite; - ocfs2_inode_unlock(inode, 1); - goto clean_orphan; - } - - ocfs2_inode_unlock(inode, 1); - } - - written = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter, - offset, ocfs2_direct_IO_get_blocks, - ocfs2_dio_end_io, NULL, 0); - /* overwrite aio may return -EIOCBQUEUED, and it is not an error */ - if ((written < 0) && (written != -EIOCBQUEUED)) { - loff_t i_size = i_size_read(inode); - - if (offset + count > i_size) { - ret = ocfs2_inode_lock(inode, &di_bh, 1); - if (ret < 0) { - mlog_errno(ret); - goto clean_orphan; - } - - if (i_size == i_size_read(inode)) { - ret = ocfs2_truncate_file(inode, di_bh, - i_size); - if (ret < 0) { - if (ret != -ENOSPC) - mlog_errno(ret); - - ocfs2_inode_unlock(inode, 1); - brelse(di_bh); - di_bh = NULL; - goto clean_orphan; - } - } - - ocfs2_inode_unlock(inode, 1); - brelse(di_bh); - di_bh = NULL; - - ret = jbd2_journal_force_commit(journal); - if (ret < 0) - mlog_errno(ret); - } - } else if (written > 0 && append_write && !is_overwrite && - !cluster_align_head) { - /* zeroing out the allocated cluster head */ - u32 p_cpos = 0; - u32 v_cpos = ocfs2_bytes_to_clusters(osb->sb, offset); - - ret = ocfs2_inode_lock(inode, NULL, 0); - if (ret < 0) { - mlog_errno(ret); - goto clean_orphan; - } - - ret = ocfs2_get_clusters(inode, v_cpos, &p_cpos, - &num_clusters, &ext_flags); - if (ret < 0) { - mlog_errno(ret); - ocfs2_inode_unlock(inode, 0); - goto clean_orphan; - } - - BUG_ON(!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN)); - - ret = blkdev_issue_zeroout(osb->sb->s_bdev, - (u64)p_cpos << (osb->s_clustersize_bits - 9), - zero_len_head >> 9, GFP_NOFS, false); - if (ret < 0) - mlog_errno(ret); - - ocfs2_inode_unlock(inode, 0); - } - -clean_orphan: - if (orphaned) { - int tmp_ret; - int update_isize = written > 0 ? 1 : 0; - loff_t end = update_isize ? offset + written : 0; - - tmp_ret = ocfs2_inode_lock(inode, &di_bh, 1); - if (tmp_ret < 0) { - ret = tmp_ret; - mlog_errno(ret); - goto out; - } - - tmp_ret = ocfs2_del_inode_from_orphan(osb, inode, di_bh, - update_isize, end); - if (tmp_ret < 0) { - ocfs2_inode_unlock(inode, 1); - ret = tmp_ret; - mlog_errno(ret); - brelse(di_bh); - goto out; - } - - ocfs2_inode_unlock(inode, 1); - brelse(di_bh); - - tmp_ret = jbd2_journal_force_commit(journal); - if (tmp_ret < 0) { - ret = tmp_ret; - mlog_errno(tmp_ret); - } - } - -out: - if (ret >= 0) - ret = written; - return ret; -} - -static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter, - loff_t offset) -{ - struct file *file = iocb->ki_filp; - struct inode *inode = file_inode(file)->i_mapping->host; - struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); - int full_coherency = !(osb->s_mount_opt & - OCFS2_MOUNT_COHERENCY_BUFFERED); - - /* - * Fallback to buffered I/O if we see an inode without - * extents. - */ - if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) - return 0; - - /* Fallback to buffered I/O if we are appending and - * concurrent O_DIRECT writes are allowed. - */ - if (i_size_read(inode) <= offset && !full_coherency) - return 0; - - if (iov_iter_rw(iter) == READ) - return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, - iter, offset, - ocfs2_direct_IO_get_blocks, - ocfs2_dio_end_io, NULL, 0); - else - return ocfs2_direct_IO_write(iocb, iter, offset); -} - static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb, u32 cpos, unsigned int *start, @@ -1201,6 +692,13 @@ next_bh: #define OCFS2_MAX_CLUSTERS_PER_PAGE (PAGE_CACHE_SIZE / OCFS2_MIN_CLUSTERSIZE) +struct ocfs2_unwritten_extent { + struct list_head ue_node; + struct list_head ue_ip_node; + u32 ue_cpos; + u32 ue_phys; +}; + /* * Describe the state of a single cluster to be written to. */ @@ -1212,7 +710,7 @@ struct ocfs2_write_cluster_desc { * filled. */ unsigned c_new; - unsigned c_unwritten; + unsigned c_clear_unwritten; unsigned c_needs_zero; }; @@ -1224,6 +722,9 @@ struct ocfs2_write_ctxt { /* First cluster allocated in a nonsparse extend */ u32 w_first_new_cpos; + /* Type of caller. Must be one of buffer, mmap, direct. */ + ocfs2_write_type_t w_type; + struct ocfs2_write_cluster_desc w_desc[OCFS2_MAX_CLUSTERS_PER_PAGE]; /* @@ -1272,6 +773,8 @@ struct ocfs2_write_ctxt { struct buffer_head *w_di_bh; struct ocfs2_cached_dealloc_ctxt w_dealloc; + + struct list_head w_unwritten_list; }; void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages) @@ -1310,8 +813,25 @@ static void ocfs2_unlock_pages(struct ocfs2_write_ctxt *wc) ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages); } -static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc) +static void ocfs2_free_unwritten_list(struct inode *inode, + struct list_head *head) { + struct ocfs2_inode_info *oi = OCFS2_I(inode); + struct ocfs2_unwritten_extent *ue = NULL, *tmp = NULL; + + list_for_each_entry_safe(ue, tmp, head, ue_node) { + list_del(&ue->ue_node); + spin_lock(&oi->ip_lock); + list_del(&ue->ue_ip_node); + spin_unlock(&oi->ip_lock); + kfree(ue); + } +} + +static void ocfs2_free_write_ctxt(struct inode *inode, + struct ocfs2_write_ctxt *wc) +{ + ocfs2_free_unwritten_list(inode, &wc->w_unwritten_list); ocfs2_unlock_pages(wc); brelse(wc->w_di_bh); kfree(wc); @@ -1319,7 +839,8 @@ static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc) static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp, struct ocfs2_super *osb, loff_t pos, - unsigned len, struct buffer_head *di_bh) + unsigned len, ocfs2_write_type_t type, + struct buffer_head *di_bh) { u32 cend; struct ocfs2_write_ctxt *wc; @@ -1334,6 +855,7 @@ static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp, wc->w_clen = cend - wc->w_cpos + 1; get_bh(di_bh); wc->w_di_bh = di_bh; + wc->w_type = type; if (unlikely(PAGE_CACHE_SHIFT > osb->s_clustersize_bits)) wc->w_large_pages = 1; @@ -1341,6 +863,7 @@ static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp, wc->w_large_pages = 0; ocfs2_init_dealloc_ctxt(&wc->w_dealloc); + INIT_LIST_HEAD(&wc->w_unwritten_list); *wcp = wc; @@ -1401,12 +924,13 @@ static void ocfs2_write_failure(struct inode *inode, to = user_pos + user_len; struct page *tmppage; - ocfs2_zero_new_buffers(wc->w_target_page, from, to); + if (wc->w_target_page) + ocfs2_zero_new_buffers(wc->w_target_page, from, to); for(i = 0; i < wc->w_num_pages; i++) { tmppage = wc->w_pages[i]; - if (page_has_buffers(tmppage)) { + if (tmppage && page_has_buffers(tmppage)) { if (ocfs2_should_order_data(inode)) ocfs2_jbd2_file_inode(wc->w_handle, inode); @@ -1536,11 +1060,13 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping, wc->w_num_pages = 1; start = target_index; } + end_index = (user_pos + user_len - 1) >> PAGE_CACHE_SHIFT; for(i = 0; i < wc->w_num_pages; i++) { index = start + i; - if (index == target_index && mmap_page) { + if (index >= target_index && index <= end_index && + wc->w_type == OCFS2_WRITE_MMAP) { /* * ocfs2_pagemkwrite() is a little different * and wants us to directly use the page @@ -1559,6 +1085,11 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping, page_cache_get(mmap_page); wc->w_pages[i] = mmap_page; wc->w_target_locked = true; + } else if (index >= target_index && index <= end_index && + wc->w_type == OCFS2_WRITE_DIRECT) { + /* Direct write has no mapping page. */ + wc->w_pages[i] = NULL; + continue; } else { wc->w_pages[i] = find_or_create_page(mapping, index, GFP_NOFS); @@ -1583,19 +1114,20 @@ out: * Prepare a single cluster for write one cluster into the file. */ static int ocfs2_write_cluster(struct address_space *mapping, - u32 phys, unsigned int unwritten, + u32 *phys, unsigned int new, + unsigned int clear_unwritten, unsigned int should_zero, struct ocfs2_alloc_context *data_ac, struct ocfs2_alloc_context *meta_ac, struct ocfs2_write_ctxt *wc, u32 cpos, loff_t user_pos, unsigned user_len) { - int ret, i, new; - u64 v_blkno, p_blkno; + int ret, i; + u64 p_blkno; struct inode *inode = mapping->host; struct ocfs2_extent_tree et; + int bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1); - new = phys == 0 ? 1 : 0; if (new) { u32 tmp_pos; @@ -1605,9 +1137,9 @@ static int ocfs2_write_cluster(struct address_space *mapping, */ tmp_pos = cpos; ret = ocfs2_add_inode_data(OCFS2_SB(inode->i_sb), inode, - &tmp_pos, 1, 0, wc->w_di_bh, - wc->w_handle, data_ac, - meta_ac, NULL); + &tmp_pos, 1, !clear_unwritten, + wc->w_di_bh, wc->w_handle, + data_ac, meta_ac, NULL); /* * This shouldn't happen because we must have already * calculated the correct meta data allocation required. The @@ -1624,11 +1156,11 @@ static int ocfs2_write_cluster(struct address_space *mapping, mlog_errno(ret); goto out; } - } else if (unwritten) { + } else if (clear_unwritten) { ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), wc->w_di_bh); ret = ocfs2_mark_extent_written(inode, &et, - wc->w_handle, cpos, 1, phys, + wc->w_handle, cpos, 1, *phys, meta_ac, &wc->w_dealloc); if (ret < 0) { mlog_errno(ret); @@ -1636,30 +1168,33 @@ static int ocfs2_write_cluster(struct address_space *mapping, } } - if (should_zero) - v_blkno = ocfs2_clusters_to_blocks(inode->i_sb, cpos); - else - v_blkno = user_pos >> inode->i_sb->s_blocksize_bits; - /* * The only reason this should fail is due to an inability to * find the extent added. */ - ret = ocfs2_extent_map_get_blocks(inode, v_blkno, &p_blkno, NULL, - NULL); + ret = ocfs2_get_clusters(inode, cpos, phys, NULL, NULL); if (ret < 0) { mlog(ML_ERROR, "Get physical blkno failed for inode %llu, " - "at logical block %llu", - (unsigned long long)OCFS2_I(inode)->ip_blkno, - (unsigned long long)v_blkno); + "at logical cluster %u", + (unsigned long long)OCFS2_I(inode)->ip_blkno, cpos); goto out; } - BUG_ON(p_blkno == 0); + BUG_ON(*phys == 0); + + p_blkno = ocfs2_clusters_to_blocks(inode->i_sb, *phys); + if (!should_zero) + p_blkno += (user_pos >> inode->i_sb->s_blocksize_bits) & (u64)(bpc - 1); for(i = 0; i < wc->w_num_pages; i++) { int tmpret; + /* This is the direct io target page. */ + if (wc->w_pages[i] == NULL) { + p_blkno++; + continue; + } + tmpret = ocfs2_prepare_page_for_write(inode, &p_blkno, wc, wc->w_pages[i], cpos, user_pos, user_len, @@ -1706,8 +1241,9 @@ static int ocfs2_write_cluster_by_desc(struct address_space *mapping, if ((cluster_off + local_len) > osb->s_clustersize) local_len = osb->s_clustersize - cluster_off; - ret = ocfs2_write_cluster(mapping, desc->c_phys, - desc->c_unwritten, + ret = ocfs2_write_cluster(mapping, &desc->c_phys, + desc->c_new, + desc->c_clear_unwritten, desc->c_needs_zero, data_ac, meta_ac, wc, desc->c_cpos, pos, local_len); @@ -1778,6 +1314,66 @@ static void ocfs2_set_target_boundaries(struct ocfs2_super *osb, } /* + * Check if this extent is marked UNWRITTEN by direct io. If so, we need not to + * do the zero work. And should not to clear UNWRITTEN since it will be cleared + * by the direct io procedure. + * If this is a new extent that allocated by direct io, we should mark it in + * the ip_unwritten_list. + */ +static int ocfs2_unwritten_check(struct inode *inode, + struct ocfs2_write_ctxt *wc, + struct ocfs2_write_cluster_desc *desc) +{ + struct ocfs2_inode_info *oi = OCFS2_I(inode); + struct ocfs2_unwritten_extent *ue = NULL, *new = NULL; + int ret = 0; + + if (!desc->c_needs_zero) + return 0; + +retry: + spin_lock(&oi->ip_lock); + /* Needs not to zero no metter buffer or direct. The one who is zero + * the cluster is doing zero. And he will clear unwritten after all + * cluster io finished. */ + list_for_each_entry(ue, &oi->ip_unwritten_list, ue_ip_node) { + if (desc->c_cpos == ue->ue_cpos) { + BUG_ON(desc->c_new); + desc->c_needs_zero = 0; + desc->c_clear_unwritten = 0; + goto unlock; + } + } + + if (wc->w_type != OCFS2_WRITE_DIRECT) + goto unlock; + + if (new == NULL) { + spin_unlock(&oi->ip_lock); + new = kmalloc(sizeof(struct ocfs2_unwritten_extent), + GFP_NOFS); + if (new == NULL) { + ret = -ENOMEM; + goto out; + } + goto retry; + } + /* This direct write will doing zero. */ + new->ue_cpos = desc->c_cpos; + new->ue_phys = desc->c_phys; + desc->c_clear_unwritten = 0; + list_add_tail(&new->ue_ip_node, &oi->ip_unwritten_list); + list_add_tail(&new->ue_node, &wc->w_unwritten_list); + new = NULL; +unlock: + spin_unlock(&oi->ip_lock); +out: + if (new) + kfree(new); + return ret; +} + +/* * Populate each single-cluster write descriptor in the write context * with information about the i/o to be done. * @@ -1852,14 +1448,21 @@ static int ocfs2_populate_write_desc(struct inode *inode, if (phys == 0) { desc->c_new = 1; desc->c_needs_zero = 1; + desc->c_clear_unwritten = 1; *clusters_to_alloc = *clusters_to_alloc + 1; } if (ext_flags & OCFS2_EXT_UNWRITTEN) { - desc->c_unwritten = 1; + desc->c_clear_unwritten = 1; desc->c_needs_zero = 1; } + ret = ocfs2_unwritten_check(inode, wc, desc); + if (ret) { + mlog_errno(ret); + goto out; + } + num_clusters--; } @@ -2022,8 +1625,10 @@ static int ocfs2_expand_nonsparse_inode(struct inode *inode, if (ret) mlog_errno(ret); - wc->w_first_new_cpos = - ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode)); + /* There is no wc if this is call from direct. */ + if (wc) + wc->w_first_new_cpos = + ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode)); return ret; } @@ -2077,9 +1682,8 @@ out: return ret; } -int ocfs2_write_begin_nolock(struct file *filp, - struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, +int ocfs2_write_begin_nolock(struct address_space *mapping, + loff_t pos, unsigned len, ocfs2_write_type_t type, struct page **pagep, void **fsdata, struct buffer_head *di_bh, struct page *mmap_page) { @@ -2096,7 +1700,7 @@ int ocfs2_write_begin_nolock(struct file *filp, int try_free = 1, ret1; try_again: - ret = ocfs2_alloc_write_ctxt(&wc, osb, pos, len, di_bh); + ret = ocfs2_alloc_write_ctxt(&wc, osb, pos, len, type, di_bh); if (ret) { mlog_errno(ret); return ret; @@ -2115,14 +1719,17 @@ try_again: } } - if (ocfs2_sparse_alloc(osb)) - ret = ocfs2_zero_tail(inode, di_bh, pos); - else - ret = ocfs2_expand_nonsparse_inode(inode, di_bh, pos, len, - wc); - if (ret) { - mlog_errno(ret); - goto out; + /* Direct io change i_size late, should not zero tail here. */ + if (type != OCFS2_WRITE_DIRECT) { + if (ocfs2_sparse_alloc(osb)) + ret = ocfs2_zero_tail(inode, di_bh, pos); + else + ret = ocfs2_expand_nonsparse_inode(inode, di_bh, pos, + len, wc); + if (ret) { + mlog_errno(ret); + goto out; + } } ret = ocfs2_check_range_for_refcount(inode, pos, len); @@ -2153,7 +1760,7 @@ try_again: (unsigned long long)OCFS2_I(inode)->ip_blkno, (long long)i_size_read(inode), le32_to_cpu(di->i_clusters), - pos, len, flags, mmap_page, + pos, len, type, mmap_page, clusters_to_alloc, extents_to_split); /* @@ -2183,17 +1790,17 @@ try_again: credits = ocfs2_calc_extend_credits(inode->i_sb, &di->id2.i_list); - - } + } else if (type == OCFS2_WRITE_DIRECT) + /* direct write needs not to start trans if no extents alloc. */ + goto success; /* * We have to zero sparse allocated clusters, unwritten extent clusters, * and non-sparse clusters we just extended. For non-sparse writes, * we know zeros will only be needed in the first and/or last cluster. */ - if (clusters_to_alloc || extents_to_split || - (wc->w_clen && (wc->w_desc[0].c_needs_zero || - wc->w_desc[wc->w_clen - 1].c_needs_zero))) + if (wc->w_clen && (wc->w_desc[0].c_needs_zero || + wc->w_desc[wc->w_clen - 1].c_needs_zero)) cluster_of_pages = 1; else cluster_of_pages = 0; @@ -2260,7 +1867,8 @@ try_again: ocfs2_free_alloc_context(meta_ac); success: - *pagep = wc->w_target_page; + if (pagep) + *pagep = wc->w_target_page; *fsdata = wc; return 0; out_quota: @@ -2271,7 +1879,7 @@ out_commit: ocfs2_commit_trans(osb, handle); out: - ocfs2_free_write_ctxt(wc); + ocfs2_free_write_ctxt(inode, wc); if (data_ac) { ocfs2_free_alloc_context(data_ac); @@ -2323,8 +1931,8 @@ static int ocfs2_write_begin(struct file *file, struct address_space *mapping, */ down_write(&OCFS2_I(inode)->ip_alloc_sem); - ret = ocfs2_write_begin_nolock(file, mapping, pos, len, flags, pagep, - fsdata, di_bh, NULL); + ret = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_BUFFER, + pagep, fsdata, di_bh, NULL); if (ret) { mlog_errno(ret); goto out_fail; @@ -2381,12 +1989,16 @@ int ocfs2_write_end_nolock(struct address_space *mapping, handle_t *handle = wc->w_handle; struct page *tmppage; - ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh, - OCFS2_JOURNAL_ACCESS_WRITE); - if (ret) { - copied = ret; - mlog_errno(ret); - goto out; + BUG_ON(!list_empty(&wc->w_unwritten_list)); + + if (handle) { + ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), + wc->w_di_bh, OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + copied = ret; + mlog_errno(ret); + goto out; + } } if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { @@ -2394,18 +2006,23 @@ int ocfs2_write_end_nolock(struct address_space *mapping, goto out_write_size; } - if (unlikely(copied < len)) { + if (unlikely(copied < len) && wc->w_target_page) { if (!PageUptodate(wc->w_target_page)) copied = 0; ocfs2_zero_new_buffers(wc->w_target_page, start+copied, start+len); } - flush_dcache_page(wc->w_target_page); + if (wc->w_target_page) + flush_dcache_page(wc->w_target_page); for(i = 0; i < wc->w_num_pages; i++) { tmppage = wc->w_pages[i]; + /* This is the direct io target page. */ + if (tmppage == NULL) + continue; + if (tmppage == wc->w_target_page) { from = wc->w_target_from; to = wc->w_target_to; @@ -2424,25 +2041,29 @@ int ocfs2_write_end_nolock(struct address_space *mapping, } if (page_has_buffers(tmppage)) { - if (ocfs2_should_order_data(inode)) - ocfs2_jbd2_file_inode(wc->w_handle, inode); + if (handle && ocfs2_should_order_data(inode)) + ocfs2_jbd2_file_inode(handle, inode); block_commit_write(tmppage, from, to); } } out_write_size: - pos += copied; - if (pos > i_size_read(inode)) { - i_size_write(inode, pos); - mark_inode_dirty(inode); - } - inode->i_blocks = ocfs2_inode_sector_count(inode); - di->i_size = cpu_to_le64((u64)i_size_read(inode)); - inode->i_mtime = inode->i_ctime = CURRENT_TIME; - di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec); - di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); - ocfs2_update_inode_fsync_trans(handle, inode, 1); - ocfs2_journal_dirty(handle, wc->w_di_bh); + /* Direct io do not update i_size here. */ + if (wc->w_type != OCFS2_WRITE_DIRECT) { + pos += copied; + if (pos > i_size_read(inode)) { + i_size_write(inode, pos); + mark_inode_dirty(inode); + } + inode->i_blocks = ocfs2_inode_sector_count(inode); + di->i_size = cpu_to_le64((u64)i_size_read(inode)); + inode->i_mtime = inode->i_ctime = CURRENT_TIME; + di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec); + di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); + ocfs2_update_inode_fsync_trans(handle, inode, 1); + } + if (handle) + ocfs2_journal_dirty(handle, wc->w_di_bh); out: /* unlock pages before dealloc since it needs acquiring j_trans_barrier @@ -2452,7 +2073,8 @@ out: */ ocfs2_unlock_pages(wc); - ocfs2_commit_trans(osb, handle); + if (handle) + ocfs2_commit_trans(osb, handle); ocfs2_run_deallocs(osb, &wc->w_dealloc); @@ -2477,6 +2099,360 @@ static int ocfs2_write_end(struct file *file, struct address_space *mapping, return ret; } +struct ocfs2_dio_write_ctxt { + struct list_head dw_zero_list; + unsigned dw_zero_count; + int dw_orphaned; + pid_t dw_writer_pid; +}; + +static struct ocfs2_dio_write_ctxt * +ocfs2_dio_alloc_write_ctx(struct buffer_head *bh, int *alloc) +{ + struct ocfs2_dio_write_ctxt *dwc = NULL; + + if (bh->b_private) + return bh->b_private; + + dwc = kmalloc(sizeof(struct ocfs2_dio_write_ctxt), GFP_NOFS); + if (dwc == NULL) + return NULL; + INIT_LIST_HEAD(&dwc->dw_zero_list); + dwc->dw_zero_count = 0; + dwc->dw_orphaned = 0; + dwc->dw_writer_pid = task_pid_nr(current); + bh->b_private = dwc; + *alloc = 1; + + return dwc; +} + +static void ocfs2_dio_free_write_ctx(struct inode *inode, + struct ocfs2_dio_write_ctxt *dwc) +{ + ocfs2_free_unwritten_list(inode, &dwc->dw_zero_list); + kfree(dwc); +} + +/* + * TODO: Make this into a generic get_blocks function. + * + * From do_direct_io in direct-io.c: + * "So what we do is to permit the ->get_blocks function to populate + * bh.b_size with the size of IO which is permitted at this offset and + * this i_blkbits." + * + * This function is called directly from get_more_blocks in direct-io.c. + * + * called like this: dio->get_blocks(dio->inode, fs_startblk, + * fs_count, map_bh, dio->rw == WRITE); + */ +static int ocfs2_dio_get_block(struct inode *inode, sector_t iblock, + struct buffer_head *bh_result, int create) +{ + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct ocfs2_inode_info *oi = OCFS2_I(inode); + struct ocfs2_write_ctxt *wc; + struct ocfs2_write_cluster_desc *desc = NULL; + struct ocfs2_dio_write_ctxt *dwc = NULL; + struct buffer_head *di_bh = NULL; + u64 p_blkno; + loff_t pos = iblock << inode->i_sb->s_blocksize_bits; + unsigned len, total_len = bh_result->b_size; + int ret = 0, first_get_block = 0; + + len = osb->s_clustersize - (pos & (osb->s_clustersize - 1)); + len = min(total_len, len); + + mlog(0, "get block of %lu at %llu:%u req %u\n", + inode->i_ino, pos, len, total_len); + + /* + * Because we need to change file size in ocfs2_dio_end_io_write(), or + * we may need to add it to orphan dir. So can not fall to fast path + * while file size will be changed. + */ + if (pos + total_len <= i_size_read(inode)) { + down_read(&oi->ip_alloc_sem); + /* This is the fast path for re-write. */ + ret = ocfs2_get_block(inode, iblock, bh_result, create); + + up_read(&oi->ip_alloc_sem); + + if (buffer_mapped(bh_result) && + !buffer_new(bh_result) && + ret == 0) + goto out; + + /* Clear state set by ocfs2_get_block. */ + bh_result->b_state = 0; + } + + dwc = ocfs2_dio_alloc_write_ctx(bh_result, &first_get_block); + if (unlikely(dwc == NULL)) { + ret = -ENOMEM; + mlog_errno(ret); + goto out; + } + + if (ocfs2_clusters_for_bytes(inode->i_sb, pos + total_len) > + ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode)) && + !dwc->dw_orphaned) { + /* + * when we are going to alloc extents beyond file size, add the + * inode to orphan dir, so we can recall those spaces when + * system crashed during write. + */ + ret = ocfs2_add_inode_to_orphan(osb, inode); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + dwc->dw_orphaned = 1; + } + + ret = ocfs2_inode_lock(inode, &di_bh, 1); + if (ret) { + mlog_errno(ret); + goto out; + } + + down_write(&oi->ip_alloc_sem); + + if (first_get_block) { + if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) + ret = ocfs2_zero_tail(inode, di_bh, pos); + else + ret = ocfs2_expand_nonsparse_inode(inode, di_bh, pos, + total_len, NULL); + if (ret < 0) { + mlog_errno(ret); + goto unlock; + } + } + + ret = ocfs2_write_begin_nolock(inode->i_mapping, pos, len, + OCFS2_WRITE_DIRECT, NULL, + (void **)&wc, di_bh, NULL); + if (ret) { + mlog_errno(ret); + goto unlock; + } + + desc = &wc->w_desc[0]; + + p_blkno = ocfs2_clusters_to_blocks(inode->i_sb, desc->c_phys); + BUG_ON(p_blkno == 0); + p_blkno += iblock & (u64)(ocfs2_clusters_to_blocks(inode->i_sb, 1) - 1); + + map_bh(bh_result, inode->i_sb, p_blkno); + bh_result->b_size = len; + if (desc->c_needs_zero) + set_buffer_new(bh_result); + + /* May sleep in end_io. It should not happen in a irq context. So defer + * it to dio work queue. */ + set_buffer_defer_completion(bh_result); + + if (!list_empty(&wc->w_unwritten_list)) { + struct ocfs2_unwritten_extent *ue = NULL; + + ue = list_first_entry(&wc->w_unwritten_list, + struct ocfs2_unwritten_extent, + ue_node); + BUG_ON(ue->ue_cpos != desc->c_cpos); + /* The physical address may be 0, fill it. */ + ue->ue_phys = desc->c_phys; + + list_splice_tail_init(&wc->w_unwritten_list, &dwc->dw_zero_list); + dwc->dw_zero_count++; + } + + ret = ocfs2_write_end_nolock(inode->i_mapping, pos, len, len, NULL, wc); + BUG_ON(ret != len); + ret = 0; +unlock: + up_write(&oi->ip_alloc_sem); + ocfs2_inode_unlock(inode, 1); + brelse(di_bh); +out: + if (ret < 0) + ret = -EIO; + return ret; +} + +static void ocfs2_dio_end_io_write(struct inode *inode, + struct ocfs2_dio_write_ctxt *dwc, + loff_t offset, + ssize_t bytes) +{ + struct ocfs2_cached_dealloc_ctxt dealloc; + struct ocfs2_extent_tree et; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct ocfs2_inode_info *oi = OCFS2_I(inode); + struct ocfs2_unwritten_extent *ue = NULL; + struct buffer_head *di_bh = NULL; + struct ocfs2_dinode *di; + struct ocfs2_alloc_context *data_ac = NULL; + struct ocfs2_alloc_context *meta_ac = NULL; + handle_t *handle = NULL; + loff_t end = offset + bytes; + int ret = 0, credits = 0, locked = 0; + + ocfs2_init_dealloc_ctxt(&dealloc); + + /* We do clear unwritten, delete orphan, change i_size here. If neither + * of these happen, we can skip all this. */ + if (list_empty(&dwc->dw_zero_list) && + end <= i_size_read(inode) && + !dwc->dw_orphaned) + goto out; + + /* ocfs2_file_write_iter will get i_mutex, so we need not lock if we + * are in that context. */ + if (dwc->dw_writer_pid != task_pid_nr(current)) { + mutex_lock(&inode->i_mutex); + locked = 1; + } + + ret = ocfs2_inode_lock(inode, &di_bh, 1); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + + down_write(&oi->ip_alloc_sem); + + /* Delete orphan before acquire i_mutex. */ + if (dwc->dw_orphaned) { + BUG_ON(dwc->dw_writer_pid != task_pid_nr(current)); + + end = end > i_size_read(inode) ? end : 0; + + ret = ocfs2_del_inode_from_orphan(osb, inode, di_bh, + !!end, end); + if (ret < 0) + mlog_errno(ret); + } + + di = (struct ocfs2_dinode *)di_bh; + + ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh); + + ret = ocfs2_lock_allocators(inode, &et, 0, dwc->dw_zero_count*2, + &data_ac, &meta_ac); + if (ret) { + mlog_errno(ret); + goto unlock; + } + + credits = ocfs2_calc_extend_credits(inode->i_sb, &di->id2.i_list); + + handle = ocfs2_start_trans(osb, credits); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + mlog_errno(ret); + goto unlock; + } + ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto commit; + } + + list_for_each_entry(ue, &dwc->dw_zero_list, ue_node) { + ret = ocfs2_mark_extent_written(inode, &et, handle, + ue->ue_cpos, 1, + ue->ue_phys, + meta_ac, &dealloc); + if (ret < 0) { + mlog_errno(ret); + break; + } + } + + if (end > i_size_read(inode)) { + ret = ocfs2_set_inode_size(handle, inode, di_bh, end); + if (ret < 0) + mlog_errno(ret); + } +commit: + ocfs2_commit_trans(osb, handle); +unlock: + up_write(&oi->ip_alloc_sem); + ocfs2_inode_unlock(inode, 1); + brelse(di_bh); +out: + if (data_ac) + ocfs2_free_alloc_context(data_ac); + if (meta_ac) + ocfs2_free_alloc_context(meta_ac); + ocfs2_run_deallocs(osb, &dealloc); + if (locked) + mutex_unlock(&inode->i_mutex); + ocfs2_dio_free_write_ctx(inode, dwc); +} + +/* + * ocfs2_dio_end_io is called by the dio core when a dio is finished. We're + * particularly interested in the aio/dio case. We use the rw_lock DLM lock + * to protect io on one node from truncation on another. + */ +static int ocfs2_dio_end_io(struct kiocb *iocb, + loff_t offset, + ssize_t bytes, + void *private) +{ + struct inode *inode = file_inode(iocb->ki_filp); + int level; + + if (bytes <= 0) + return 0; + + /* this io's submitter should not have unlocked this before we could */ + BUG_ON(!ocfs2_iocb_is_rw_locked(iocb)); + + if (private) + ocfs2_dio_end_io_write(inode, private, offset, bytes); + + ocfs2_iocb_clear_rw_locked(iocb); + + level = ocfs2_iocb_rw_locked_level(iocb); + ocfs2_rw_unlock(inode, level); + return 0; +} + +static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter, + loff_t offset) +{ + struct file *file = iocb->ki_filp; + struct inode *inode = file_inode(file)->i_mapping->host; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + loff_t end = offset + iter->count; + get_block_t *get_block; + + /* + * Fallback to buffered I/O if we see an inode without + * extents. + */ + if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) + return 0; + + /* Fallback to buffered I/O if we do not support append dio. */ + if (end > i_size_read(inode) && !ocfs2_supports_append_dio(osb)) + return 0; + + if (iov_iter_rw(iter) == READ) + get_block = ocfs2_get_block; + else + get_block = ocfs2_dio_get_block; + + return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, + iter, offset, get_block, + ocfs2_dio_end_io, NULL, 0); +} + const struct address_space_operations ocfs2_aops = { .readpage = ocfs2_readpage, .readpages = ocfs2_readpages, diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h index 24e496d6bdcd..b1c9f28a57b1 100644 --- a/fs/ocfs2/aops.h +++ b/fs/ocfs2/aops.h @@ -47,9 +47,14 @@ int ocfs2_write_end_nolock(struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata); -int ocfs2_write_begin_nolock(struct file *filp, - struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, +typedef enum { + OCFS2_WRITE_BUFFER = 0, + OCFS2_WRITE_DIRECT, + OCFS2_WRITE_MMAP, +} ocfs2_write_type_t; + +int ocfs2_write_begin_nolock(struct address_space *mapping, + loff_t pos, unsigned len, ocfs2_write_type_t type, struct page **pagep, void **fsdata, struct buffer_head *di_bh, struct page *mmap_page); @@ -79,7 +84,6 @@ static inline void ocfs2_iocb_set_rw_locked(struct kiocb *iocb, int level) enum ocfs2_iocb_lock_bits { OCFS2_IOCB_RW_LOCK = 0, OCFS2_IOCB_RW_LOCK_LEVEL, - OCFS2_IOCB_UNALIGNED_IO, OCFS2_IOCB_NUM_LOCKS }; @@ -88,11 +92,4 @@ enum ocfs2_iocb_lock_bits { #define ocfs2_iocb_rw_locked_level(iocb) \ test_bit(OCFS2_IOCB_RW_LOCK_LEVEL, (unsigned long *)&iocb->private) -#define ocfs2_iocb_set_unaligned_aio(iocb) \ - set_bit(OCFS2_IOCB_UNALIGNED_IO, (unsigned long *)&iocb->private) -#define ocfs2_iocb_clear_unaligned_aio(iocb) \ - clear_bit(OCFS2_IOCB_UNALIGNED_IO, (unsigned long *)&iocb->private) -#define ocfs2_iocb_is_unaligned_aio(iocb) \ - test_bit(OCFS2_IOCB_UNALIGNED_IO, (unsigned long *)&iocb->private) - #endif /* OCFS2_FILE_H */ diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index ef6a2ec494de..bd15929b5f92 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c @@ -1444,8 +1444,8 @@ static void o2hb_region_release(struct config_item *item) debugfs_remove(reg->hr_debug_dir); kfree(reg->hr_db_livenodes); kfree(reg->hr_db_regnum); - kfree(reg->hr_debug_elapsed_time); - kfree(reg->hr_debug_pinned); + kfree(reg->hr_db_elapsed_time); + kfree(reg->hr_db_pinned); spin_lock(&o2hb_live_lock); list_del(®->hr_all_item); diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c index e36d63ff1783..cdeafb4e7ed6 100644 --- a/fs/ocfs2/dlm/dlmconvert.c +++ b/fs/ocfs2/dlm/dlmconvert.c @@ -212,6 +212,12 @@ grant: if (lock->lksb->flags & DLM_LKSB_PUT_LVB) memcpy(res->lvb, lock->lksb->lvb, DLM_LVB_LEN); + /* + * Move the lock to the tail because it may be the only lock which has + * an invalid lvb. + */ + list_move_tail(&lock->list, &res->granted); + status = DLM_NORMAL; *call_ast = 1; goto unlock_exit; @@ -262,6 +268,7 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm, struct dlm_lock *lock, int flags, int type) { enum dlm_status status; + u8 old_owner = res->owner; mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type, lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS); @@ -287,6 +294,19 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm, status = DLM_DENIED; goto bail; } + + if (lock->ml.type == type && lock->ml.convert_type == LKM_IVMODE) { + mlog(0, "last convert request returned DLM_RECOVERING, but " + "owner has already queued and sent ast to me. res %.*s, " + "(cookie=%u:%llu, type=%d, conv=%d)\n", + res->lockname.len, res->lockname.name, + dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), + dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), + lock->ml.type, lock->ml.convert_type); + status = DLM_NORMAL; + goto bail; + } + res->state |= DLM_LOCK_RES_IN_PROGRESS; /* move lock to local convert queue */ /* do not alter lock refcount. switching lists. */ @@ -316,11 +336,19 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm, spin_lock(&res->spinlock); res->state &= ~DLM_LOCK_RES_IN_PROGRESS; lock->convert_pending = 0; - /* if it failed, move it back to granted queue */ + /* if it failed, move it back to granted queue. + * if master returns DLM_NORMAL and then down before sending ast, + * it may have already been moved to granted queue, reset to + * DLM_RECOVERING and retry convert */ if (status != DLM_NORMAL) { if (status != DLM_NOTQUEUED) dlm_error(status); dlm_revert_pending_convert(res, lock); + } else if ((res->state & DLM_LOCK_RES_RECOVERING) || + (old_owner != res->owner)) { + mlog(0, "res %.*s is in recovering or has been recovered.\n", + res->lockname.len, res->lockname.name); + status = DLM_RECOVERING; } bail: spin_unlock(&res->spinlock); diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index cd38488a10fc..f6b313898763 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c @@ -2083,7 +2083,6 @@ void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm, dlm_lock_get(lock); if (lock->convert_pending) { /* move converting lock back to granted */ - BUG_ON(i != DLM_CONVERTING_LIST); mlog(0, "node died with convert pending " "on %.*s. move back to granted list.\n", res->lockname.len, res->lockname.name); diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 7cb38fdca229..c18ab45f8d21 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -1381,44 +1381,6 @@ out: return ret; } -/* - * Will look for holes and unwritten extents in the range starting at - * pos for count bytes (inclusive). - */ -static int ocfs2_check_range_for_holes(struct inode *inode, loff_t pos, - size_t count) -{ - int ret = 0; - unsigned int extent_flags; - u32 cpos, clusters, extent_len, phys_cpos; - struct super_block *sb = inode->i_sb; - - cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits; - clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos; - - while (clusters) { - ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len, - &extent_flags); - if (ret < 0) { - mlog_errno(ret); - goto out; - } - - if (phys_cpos == 0 || (extent_flags & OCFS2_EXT_UNWRITTEN)) { - ret = 1; - break; - } - - if (extent_len > clusters) - extent_len = clusters; - - clusters -= extent_len; - cpos += extent_len; - } -out: - return ret; -} - static int ocfs2_write_remove_suid(struct inode *inode) { int ret; @@ -2129,18 +2091,12 @@ out: static int ocfs2_prepare_inode_for_write(struct file *file, loff_t pos, - size_t count, - int appending, - int *direct_io, - int *has_refcount) + size_t count) { int ret = 0, meta_level = 0; struct dentry *dentry = file->f_path.dentry; struct inode *inode = d_inode(dentry); loff_t end; - struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); - int full_coherency = !(osb->s_mount_opt & - OCFS2_MOUNT_COHERENCY_BUFFERED); /* * We start with a read level meta lock and only jump to an ex @@ -2189,10 +2145,6 @@ static int ocfs2_prepare_inode_for_write(struct file *file, pos, count, &meta_level); - if (has_refcount) - *has_refcount = 1; - if (direct_io) - *direct_io = 0; } if (ret < 0) { @@ -2200,67 +2152,12 @@ static int ocfs2_prepare_inode_for_write(struct file *file, goto out_unlock; } - /* - * Skip the O_DIRECT checks if we don't need - * them. - */ - if (!direct_io || !(*direct_io)) - break; - - /* - * There's no sane way to do direct writes to an inode - * with inline data. - */ - if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { - *direct_io = 0; - break; - } - - /* - * Allowing concurrent direct writes means - * i_size changes wouldn't be synchronized, so - * one node could wind up truncating another - * nodes writes. - */ - if (end > i_size_read(inode) && !full_coherency) { - *direct_io = 0; - break; - } - - /* - * Fallback to old way if the feature bit is not set. - */ - if (end > i_size_read(inode) && - !ocfs2_supports_append_dio(osb)) { - *direct_io = 0; - break; - } - - /* - * We don't fill holes during direct io, so - * check for them here. If any are found, the - * caller will have to retake some cluster - * locks and initiate the io as buffered. - */ - ret = ocfs2_check_range_for_holes(inode, pos, count); - if (ret == 1) { - /* - * Fallback to old way if the feature bit is not set. - * Otherwise try dio first and then complete the rest - * request through buffer io. - */ - if (!ocfs2_supports_append_dio(osb)) - *direct_io = 0; - ret = 0; - } else if (ret < 0) - mlog_errno(ret); break; } out_unlock: trace_ocfs2_prepare_inode_for_write(OCFS2_I(inode)->ip_blkno, - pos, appending, count, - direct_io, has_refcount); + pos, count); if (meta_level >= 0) ocfs2_inode_unlock(inode, meta_level); @@ -2272,18 +2169,16 @@ out: static ssize_t ocfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from) { - int direct_io, appending, rw_level; - int can_do_direct, has_refcount = 0; + int direct_io, rw_level; ssize_t written = 0; ssize_t ret; - size_t count = iov_iter_count(from), orig_count; + size_t count = iov_iter_count(from); struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); int full_coherency = !(osb->s_mount_opt & OCFS2_MOUNT_COHERENCY_BUFFERED); - int unaligned_dio = 0; - int dropped_dio = 0; + void *saved_ki_complete = NULL; int append_write = ((iocb->ki_pos + count) >= i_size_read(inode) ? 1 : 0); @@ -2296,12 +2191,10 @@ static ssize_t ocfs2_file_write_iter(struct kiocb *iocb, if (count == 0) return 0; - appending = iocb->ki_flags & IOCB_APPEND ? 1 : 0; direct_io = iocb->ki_flags & IOCB_DIRECT ? 1 : 0; inode_lock(inode); -relock: /* * Concurrent O_DIRECT writes are allowed with * mount_option "coherency=buffered". @@ -2334,7 +2227,6 @@ relock: ocfs2_inode_unlock(inode, 1); } - orig_count = iov_iter_count(from); ret = generic_write_checks(iocb, from); if (ret <= 0) { if (ret) @@ -2343,41 +2235,18 @@ relock: } count = ret; - can_do_direct = direct_io; - ret = ocfs2_prepare_inode_for_write(file, iocb->ki_pos, count, appending, - &can_do_direct, &has_refcount); + ret = ocfs2_prepare_inode_for_write(file, iocb->ki_pos, count); if (ret < 0) { mlog_errno(ret); goto out; } - if (direct_io && !is_sync_kiocb(iocb)) - unaligned_dio = ocfs2_is_io_unaligned(inode, count, iocb->ki_pos); - - /* - * We can't complete the direct I/O as requested, fall back to - * buffered I/O. - */ - if (direct_io && !can_do_direct) { - ocfs2_rw_unlock(inode, rw_level); - - rw_level = -1; - - direct_io = 0; - iocb->ki_flags &= ~IOCB_DIRECT; - iov_iter_reexpand(from, orig_count); - dropped_dio = 1; - goto relock; - } - - if (unaligned_dio) { + if (direct_io && !is_sync_kiocb(iocb) && + ocfs2_is_io_unaligned(inode, count, iocb->ki_pos)) { /* - * Wait on previous unaligned aio to complete before - * proceeding. + * Make it a sync io if it's an unaligned aio. */ - mutex_lock(&OCFS2_I(inode)->ip_unaligned_aio); - /* Mark the iocb as needing an unlock in ocfs2_dio_end_io */ - ocfs2_iocb_set_unaligned_aio(iocb); + saved_ki_complete = xchg(&iocb->ki_complete, NULL); } /* communicate with ocfs2_dio_end_io */ @@ -2398,14 +2267,13 @@ relock: */ if ((written == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) { rw_level = -1; - unaligned_dio = 0; } if (unlikely(written <= 0)) - goto no_sync; + goto out; if (((file->f_flags & O_DSYNC) && !direct_io) || - IS_SYNC(inode) || dropped_dio) { + IS_SYNC(inode)) { ret = filemap_fdatawrite_range(file->f_mapping, iocb->ki_pos - written, iocb->ki_pos - 1); @@ -2424,13 +2292,10 @@ relock: iocb->ki_pos - 1); } -no_sync: - if (unaligned_dio && ocfs2_iocb_is_unaligned_aio(iocb)) { - ocfs2_iocb_clear_unaligned_aio(iocb); - mutex_unlock(&OCFS2_I(inode)->ip_unaligned_aio); - } - out: + if (saved_ki_complete) + xchg(&iocb->ki_complete, saved_ki_complete); + if (rw_level != -1) ocfs2_rw_unlock(inode, rw_level); diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c index ba495beff1c2..12f4a9e9800f 100644 --- a/fs/ocfs2/inode.c +++ b/fs/ocfs2/inode.c @@ -1170,6 +1170,9 @@ static void ocfs2_clear_inode(struct inode *inode) mlog_bug_on_msg(!list_empty(&oi->ip_io_markers), "Clear inode of %llu, inode has io markers\n", (unsigned long long)oi->ip_blkno); + mlog_bug_on_msg(!list_empty(&oi->ip_unwritten_list), + "Clear inode of %llu, inode has unwritten extents\n", + (unsigned long long)oi->ip_blkno); ocfs2_extent_map_trunc(inode, 0); diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h index 01635e016b3e..d8f3fc8d2551 100644 --- a/fs/ocfs2/inode.h +++ b/fs/ocfs2/inode.h @@ -43,9 +43,6 @@ struct ocfs2_inode_info /* protects extended attribute changes on this inode */ struct rw_semaphore ip_xattr_sem; - /* Number of outstanding AIO's which are not page aligned */ - struct mutex ip_unaligned_aio; - /* These fields are protected by ip_lock */ spinlock_t ip_lock; u32 ip_open_count; @@ -57,6 +54,9 @@ struct ocfs2_inode_info u32 ip_flags; /* see below */ u32 ip_attr; /* inode attributes */ + /* Record unwritten extents during direct io. */ + struct list_head ip_unwritten_list; + /* protected by recovery_lock. */ struct inode *ip_next_orphan; diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index 61b833b721d8..e607419cdfa4 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -231,7 +231,7 @@ void ocfs2_recovery_exit(struct ocfs2_super *osb) /* At this point, we know that no more recovery threads can be * launched, so wait for any recovery completion work to * complete. */ - flush_workqueue(ocfs2_wq); + flush_workqueue(osb->ocfs2_wq); /* * Now that recovery is shut down, and the osb is about to be @@ -1326,7 +1326,7 @@ static void ocfs2_queue_recovery_completion(struct ocfs2_journal *journal, spin_lock(&journal->j_lock); list_add_tail(&item->lri_list, &journal->j_la_cleanups); - queue_work(ocfs2_wq, &journal->j_recovery_work); + queue_work(journal->j_osb->ocfs2_wq, &journal->j_recovery_work); spin_unlock(&journal->j_lock); } @@ -1968,7 +1968,7 @@ static void ocfs2_orphan_scan_work(struct work_struct *work) mutex_lock(&os->os_lock); ocfs2_queue_orphan_scan(osb); if (atomic_read(&os->os_state) == ORPHAN_SCAN_ACTIVE) - queue_delayed_work(ocfs2_wq, &os->os_orphan_scan_work, + queue_delayed_work(osb->ocfs2_wq, &os->os_orphan_scan_work, ocfs2_orphan_scan_timeout()); mutex_unlock(&os->os_lock); } @@ -2008,7 +2008,7 @@ void ocfs2_orphan_scan_start(struct ocfs2_super *osb) atomic_set(&os->os_state, ORPHAN_SCAN_INACTIVE); else { atomic_set(&os->os_state, ORPHAN_SCAN_ACTIVE); - queue_delayed_work(ocfs2_wq, &os->os_orphan_scan_work, + queue_delayed_work(osb->ocfs2_wq, &os->os_orphan_scan_work, ocfs2_orphan_scan_timeout()); } } diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c index 7d62c43a2c3e..fe0d1f9571bb 100644 --- a/fs/ocfs2/localalloc.c +++ b/fs/ocfs2/localalloc.c @@ -386,7 +386,7 @@ void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb) struct ocfs2_dinode *alloc = NULL; cancel_delayed_work(&osb->la_enable_wq); - flush_workqueue(ocfs2_wq); + flush_workqueue(osb->ocfs2_wq); if (osb->local_alloc_state == OCFS2_LA_UNUSED) goto out; @@ -1085,7 +1085,7 @@ static int ocfs2_recalc_la_window(struct ocfs2_super *osb, } else { osb->local_alloc_state = OCFS2_LA_DISABLED; } - queue_delayed_work(ocfs2_wq, &osb->la_enable_wq, + queue_delayed_work(osb->ocfs2_wq, &osb->la_enable_wq, OCFS2_LA_ENABLE_INTERVAL); goto out_unlock; } diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c index 77ebc2bc1cca..9ea081f4e6e4 100644 --- a/fs/ocfs2/mmap.c +++ b/fs/ocfs2/mmap.c @@ -104,8 +104,8 @@ static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh, if (page->index == last_index) len = ((size - 1) & ~PAGE_CACHE_MASK) + 1; - ret = ocfs2_write_begin_nolock(file, mapping, pos, len, 0, &locked_page, - &fsdata, di_bh, page); + ret = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_MMAP, + &locked_page, &fsdata, di_bh, page); if (ret) { if (ret != -ENOSPC) mlog_errno(ret); diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 7a0126267847..6cf6538a0651 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h @@ -464,6 +464,14 @@ struct ocfs2_super struct ocfs2_refcount_tree *osb_ref_tree_lru; struct mutex system_file_mutex; + + /* + * OCFS2 needs to schedule several different types of work which + * require cluster locking, disk I/O, recovery waits, etc. Since these + * types of work tend to be heavy we avoid using the kernel events + * workqueue and schedule on our own. + */ + struct workqueue_struct *ocfs2_wq; }; #define OCFS2_SB(sb) ((struct ocfs2_super *)(sb)->s_fs_info) diff --git a/fs/ocfs2/ocfs2_trace.h b/fs/ocfs2/ocfs2_trace.h index 24b7e7f591dc..f8f5fc5e6c05 100644 --- a/fs/ocfs2/ocfs2_trace.h +++ b/fs/ocfs2/ocfs2_trace.h @@ -1450,28 +1450,20 @@ DEFINE_OCFS2_ULL_ULL_ULL_EVENT(ocfs2_remove_inode_range); TRACE_EVENT(ocfs2_prepare_inode_for_write, TP_PROTO(unsigned long long ino, unsigned long long saved_pos, - int appending, unsigned long count, - int *direct_io, int *has_refcount), - TP_ARGS(ino, saved_pos, appending, count, direct_io, has_refcount), + unsigned long count), + TP_ARGS(ino, saved_pos, count), TP_STRUCT__entry( __field(unsigned long long, ino) __field(unsigned long long, saved_pos) - __field(int, appending) __field(unsigned long, count) - __field(int, direct_io) - __field(int, has_refcount) ), TP_fast_assign( __entry->ino = ino; __entry->saved_pos = saved_pos; - __entry->appending = appending; __entry->count = count; - __entry->direct_io = direct_io ? *direct_io : -1; - __entry->has_refcount = has_refcount ? *has_refcount : -1; ), - TP_printk("%llu %llu %d %lu %d %d", __entry->ino, - __entry->saved_pos, __entry->appending, __entry->count, - __entry->direct_io, __entry->has_refcount) + TP_printk("%llu %llu %lu", __entry->ino, + __entry->saved_pos, __entry->count) ); DEFINE_OCFS2_INT_EVENT(generic_file_aio_read_ret); diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c index 91bc674203ed..3892f3c079ca 100644 --- a/fs/ocfs2/quota_global.c +++ b/fs/ocfs2/quota_global.c @@ -726,7 +726,7 @@ static int ocfs2_release_dquot(struct dquot *dquot) dqgrab(dquot); /* First entry on list -> queue work */ if (llist_add(&OCFS2_DQUOT(dquot)->list, &osb->dquot_drop_list)) - queue_work(ocfs2_wq, &osb->dquot_drop_work); + queue_work(osb->ocfs2_wq, &osb->dquot_drop_work); goto out; } status = ocfs2_lock_global_qf(oinfo, 1); diff --git a/fs/ocfs2/resize.c b/fs/ocfs2/resize.c index 576b9a04873f..18451e0fab81 100644 --- a/fs/ocfs2/resize.c +++ b/fs/ocfs2/resize.c @@ -196,7 +196,7 @@ static int update_backups(struct inode * inode, u32 clusters, char *data) for (i = 0; i < OCFS2_MAX_BACKUP_SUPERBLOCKS; i++) { blkno = ocfs2_backup_super_blkno(inode->i_sb, i); cluster = ocfs2_blocks_to_clusters(inode->i_sb, blkno); - if (cluster > clusters) + if (cluster >= clusters) break; ret = ocfs2_read_blocks_sync(osb, blkno, 1, &backup); diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index ccc9386c42c5..7db631e1c8b0 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -80,12 +80,6 @@ static struct kmem_cache *ocfs2_inode_cachep; struct kmem_cache *ocfs2_dquot_cachep; struct kmem_cache *ocfs2_qf_chunk_cachep; -/* OCFS2 needs to schedule several different types of work which - * require cluster locking, disk I/O, recovery waits, etc. Since these - * types of work tend to be heavy we avoid using the kernel events - * workqueue and schedule on our own. */ -struct workqueue_struct *ocfs2_wq = NULL; - static struct dentry *ocfs2_debugfs_root; MODULE_AUTHOR("Oracle"); @@ -1613,33 +1607,25 @@ static int __init ocfs2_init(void) if (status < 0) goto out2; - ocfs2_wq = create_singlethread_workqueue("ocfs2_wq"); - if (!ocfs2_wq) { - status = -ENOMEM; - goto out3; - } - ocfs2_debugfs_root = debugfs_create_dir("ocfs2", NULL); if (!ocfs2_debugfs_root) { status = -ENOMEM; mlog(ML_ERROR, "Unable to create ocfs2 debugfs root.\n"); - goto out4; + goto out3; } ocfs2_set_locking_protocol(); status = register_quota_format(&ocfs2_quota_format); if (status < 0) - goto out4; + goto out3; status = register_filesystem(&ocfs2_fs_type); if (!status) return 0; unregister_quota_format(&ocfs2_quota_format); -out4: - destroy_workqueue(ocfs2_wq); - debugfs_remove(ocfs2_debugfs_root); out3: + debugfs_remove(ocfs2_debugfs_root); ocfs2_free_mem_caches(); out2: exit_ocfs2_uptodate_cache(); @@ -1650,11 +1636,6 @@ out1: static void __exit ocfs2_exit(void) { - if (ocfs2_wq) { - flush_workqueue(ocfs2_wq); - destroy_workqueue(ocfs2_wq); - } - unregister_quota_format(&ocfs2_quota_format); debugfs_remove(ocfs2_debugfs_root); @@ -1745,8 +1726,8 @@ static void ocfs2_inode_init_once(void *data) spin_lock_init(&oi->ip_lock); ocfs2_extent_map_init(&oi->vfs_inode); INIT_LIST_HEAD(&oi->ip_io_markers); + INIT_LIST_HEAD(&oi->ip_unwritten_list); oi->ip_dir_start_lookup = 0; - mutex_init(&oi->ip_unaligned_aio); init_rwsem(&oi->ip_alloc_sem); init_rwsem(&oi->ip_xattr_sem); mutex_init(&oi->ip_io_mutex); @@ -2349,6 +2330,12 @@ static int ocfs2_initialize_super(struct super_block *sb, } cleancache_init_shared_fs(sb); + osb->ocfs2_wq = create_singlethread_workqueue("ocfs2_wq"); + if (!osb->ocfs2_wq) { + status = -ENOMEM; + mlog_errno(status); + } + bail: return status; } @@ -2536,6 +2523,12 @@ static void ocfs2_delete_osb(struct ocfs2_super *osb) { /* This function assumes that the caller has the main osb resource */ + /* ocfs2_initializer_super have already created this workqueue */ + if (osb->ocfs2_wq) { + flush_workqueue(osb->ocfs2_wq); + destroy_workqueue(osb->ocfs2_wq); + } + ocfs2_free_slot_info(osb); kfree(osb->osb_orphan_wipes); diff --git a/fs/ocfs2/super.h b/fs/ocfs2/super.h index b477d0b1c7b6..b023e4f3d740 100644 --- a/fs/ocfs2/super.h +++ b/fs/ocfs2/super.h @@ -26,8 +26,6 @@ #ifndef OCFS2_SUPER_H #define OCFS2_SUPER_H -extern struct workqueue_struct *ocfs2_wq; - int ocfs2_publish_get_mount_state(struct ocfs2_super *osb, int node_num); diff --git a/fs/orangefs/Kconfig b/fs/orangefs/Kconfig new file mode 100644 index 000000000000..1554c02489de --- /dev/null +++ b/fs/orangefs/Kconfig @@ -0,0 +1,6 @@ +config ORANGEFS_FS + tristate "ORANGEFS (Powered by PVFS) support" + select FS_POSIX_ACL + help + Orange is a parallel file system designed for use on high end + computing (HEC) systems. diff --git a/fs/orangefs/Makefile b/fs/orangefs/Makefile new file mode 100644 index 000000000000..a9d6a968fe6d --- /dev/null +++ b/fs/orangefs/Makefile @@ -0,0 +1,10 @@ +# +# Makefile for the ORANGEFS filesystem. +# + +obj-$(CONFIG_ORANGEFS_FS) += orangefs.o + +orangefs-objs := acl.o file.o orangefs-cache.o orangefs-utils.o xattr.o \ + dcache.o inode.o orangefs-sysfs.o orangefs-mod.o super.o \ + devorangefs-req.o namei.o symlink.o dir.o orangefs-bufmap.o \ + orangefs-debugfs.o waitqueue.o diff --git a/fs/orangefs/acl.c b/fs/orangefs/acl.c new file mode 100644 index 000000000000..03f89dbb2512 --- /dev/null +++ b/fs/orangefs/acl.c @@ -0,0 +1,175 @@ +/* + * (C) 2001 Clemson University and The University of Chicago + * + * See COPYING in top-level directory. + */ + +#include "protocol.h" +#include "orangefs-kernel.h" +#include "orangefs-bufmap.h" +#include <linux/posix_acl_xattr.h> +#include <linux/fs_struct.h> + +struct posix_acl *orangefs_get_acl(struct inode *inode, int type) +{ + struct posix_acl *acl; + int ret; + char *key = NULL, *value = NULL; + + switch (type) { + case ACL_TYPE_ACCESS: + key = ORANGEFS_XATTR_NAME_ACL_ACCESS; + break; + case ACL_TYPE_DEFAULT: + key = ORANGEFS_XATTR_NAME_ACL_DEFAULT; + break; + default: + gossip_err("orangefs_get_acl: bogus value of type %d\n", type); + return ERR_PTR(-EINVAL); + } + /* + * Rather than incurring a network call just to determine the exact + * length of the attribute, I just allocate a max length to save on + * the network call. Conceivably, we could pass NULL to + * orangefs_inode_getxattr() to probe the length of the value, but + * I don't do that for now. + */ + value = kmalloc(ORANGEFS_MAX_XATTR_VALUELEN, GFP_KERNEL); + if (value == NULL) + return ERR_PTR(-ENOMEM); + + gossip_debug(GOSSIP_ACL_DEBUG, + "inode %pU, key %s, type %d\n", + get_khandle_from_ino(inode), + key, + type); + ret = orangefs_inode_getxattr(inode, + "", + key, + value, + ORANGEFS_MAX_XATTR_VALUELEN); + /* if the key exists, convert it to an in-memory rep */ + if (ret > 0) { + acl = posix_acl_from_xattr(&init_user_ns, value, ret); + } else if (ret == -ENODATA || ret == -ENOSYS) { + acl = NULL; + } else { + gossip_err("inode %pU retrieving acl's failed with error %d\n", + get_khandle_from_ino(inode), + ret); + acl = ERR_PTR(ret); + } + /* kfree(NULL) is safe, so don't worry if value ever got used */ + kfree(value); + return acl; +} + +int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type) +{ + struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); + int error = 0; + void *value = NULL; + size_t size = 0; + const char *name = NULL; + + switch (type) { + case ACL_TYPE_ACCESS: + name = ORANGEFS_XATTR_NAME_ACL_ACCESS; + if (acl) { + umode_t mode = inode->i_mode; + /* + * can we represent this with the traditional file + * mode permission bits? + */ + error = posix_acl_equiv_mode(acl, &mode); + if (error < 0) { + gossip_err("%s: posix_acl_equiv_mode err: %d\n", + __func__, + error); + return error; + } + + if (inode->i_mode != mode) + SetModeFlag(orangefs_inode); + inode->i_mode = mode; + mark_inode_dirty_sync(inode); + if (error == 0) + acl = NULL; + } + break; + case ACL_TYPE_DEFAULT: + name = ORANGEFS_XATTR_NAME_ACL_DEFAULT; + break; + default: + gossip_err("%s: invalid type %d!\n", __func__, type); + return -EINVAL; + } + + gossip_debug(GOSSIP_ACL_DEBUG, + "%s: inode %pU, key %s type %d\n", + __func__, get_khandle_from_ino(inode), + name, + type); + + if (acl) { + size = posix_acl_xattr_size(acl->a_count); + value = kmalloc(size, GFP_KERNEL); + if (!value) + return -ENOMEM; + + error = posix_acl_to_xattr(&init_user_ns, acl, value, size); + if (error < 0) + goto out; + } + + gossip_debug(GOSSIP_ACL_DEBUG, + "%s: name %s, value %p, size %zd, acl %p\n", + __func__, name, value, size, acl); + /* + * Go ahead and set the extended attribute now. NOTE: Suppose acl + * was NULL, then value will be NULL and size will be 0 and that + * will xlate to a removexattr. However, we don't want removexattr + * complain if attributes does not exist. + */ + error = orangefs_inode_setxattr(inode, "", name, value, size, 0); + +out: + kfree(value); + if (!error) + set_cached_acl(inode, type, acl); + return error; +} + +int orangefs_init_acl(struct inode *inode, struct inode *dir) +{ + struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); + struct posix_acl *default_acl, *acl; + umode_t mode = inode->i_mode; + int error = 0; + + ClearModeFlag(orangefs_inode); + + error = posix_acl_create(dir, &mode, &default_acl, &acl); + if (error) + return error; + + if (default_acl) { + error = orangefs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT); + posix_acl_release(default_acl); + } + + if (acl) { + if (!error) + error = orangefs_set_acl(inode, acl, ACL_TYPE_ACCESS); + posix_acl_release(acl); + } + + /* If mode of the inode was changed, then do a forcible ->setattr */ + if (mode != inode->i_mode) { + SetModeFlag(orangefs_inode); + inode->i_mode = mode; + orangefs_flush_inode(inode); + } + + return error; +} diff --git a/fs/orangefs/dcache.c b/fs/orangefs/dcache.c new file mode 100644 index 000000000000..5dfc4f3cfe68 --- /dev/null +++ b/fs/orangefs/dcache.c @@ -0,0 +1,138 @@ +/* + * (C) 2001 Clemson University and The University of Chicago + * + * See COPYING in top-level directory. + */ + +/* + * Implementation of dentry (directory cache) functions. + */ + +#include "protocol.h" +#include "orangefs-kernel.h" + +/* Returns 1 if dentry can still be trusted, else 0. */ +static int orangefs_revalidate_lookup(struct dentry *dentry) +{ + struct dentry *parent_dentry = dget_parent(dentry); + struct inode *parent_inode = parent_dentry->d_inode; + struct orangefs_inode_s *parent = ORANGEFS_I(parent_inode); + struct inode *inode = dentry->d_inode; + struct orangefs_kernel_op_s *new_op; + int ret = 0; + int err = 0; + + gossip_debug(GOSSIP_DCACHE_DEBUG, "%s: attempting lookup.\n", __func__); + + new_op = op_alloc(ORANGEFS_VFS_OP_LOOKUP); + if (!new_op) + goto out_put_parent; + + new_op->upcall.req.lookup.sym_follow = ORANGEFS_LOOKUP_LINK_NO_FOLLOW; + new_op->upcall.req.lookup.parent_refn = parent->refn; + strncpy(new_op->upcall.req.lookup.d_name, + dentry->d_name.name, + ORANGEFS_NAME_MAX); + + gossip_debug(GOSSIP_DCACHE_DEBUG, + "%s:%s:%d interrupt flag [%d]\n", + __FILE__, + __func__, + __LINE__, + get_interruptible_flag(parent_inode)); + + err = service_operation(new_op, "orangefs_lookup", + get_interruptible_flag(parent_inode)); + + /* Positive dentry: reject if error or not the same inode. */ + if (inode) { + if (err) { + gossip_debug(GOSSIP_DCACHE_DEBUG, + "%s:%s:%d lookup failure.\n", + __FILE__, __func__, __LINE__); + goto out_drop; + } + if (!match_handle(new_op->downcall.resp.lookup.refn.khandle, + inode)) { + gossip_debug(GOSSIP_DCACHE_DEBUG, + "%s:%s:%d no match.\n", + __FILE__, __func__, __LINE__); + goto out_drop; + } + + /* Negative dentry: reject if success or error other than ENOENT. */ + } else { + gossip_debug(GOSSIP_DCACHE_DEBUG, "%s: negative dentry.\n", + __func__); + if (!err || err != -ENOENT) { + if (new_op->downcall.status != 0) + gossip_debug(GOSSIP_DCACHE_DEBUG, + "%s:%s:%d lookup failure.\n", + __FILE__, __func__, __LINE__); + goto out_drop; + } + } + + ret = 1; +out_release_op: + op_release(new_op); +out_put_parent: + dput(parent_dentry); + return ret; +out_drop: + gossip_debug(GOSSIP_DCACHE_DEBUG, "%s:%s:%d revalidate failed\n", + __FILE__, __func__, __LINE__); + goto out_release_op; +} + +/* + * Verify that dentry is valid. + * + * Should return 1 if dentry can still be trusted, else 0. + */ +static int orangefs_d_revalidate(struct dentry *dentry, unsigned int flags) +{ + int ret; + + if (flags & LOOKUP_RCU) + return -ECHILD; + + gossip_debug(GOSSIP_DCACHE_DEBUG, "%s: called on dentry %p.\n", + __func__, dentry); + + /* skip root handle lookups. */ + if (dentry->d_inode && is_root_handle(dentry->d_inode)) + return 1; + + /* + * If this passes, the positive dentry still exists or the negative + * dentry still does not exist. + */ + if (!orangefs_revalidate_lookup(dentry)) + return 0; + + /* We do not need to continue with negative dentries. */ + if (!dentry->d_inode) + goto out; + + /* Now we must perform a getattr to validate the inode contents. */ + + ret = orangefs_inode_check_changed(dentry->d_inode); + if (ret < 0) { + gossip_debug(GOSSIP_DCACHE_DEBUG, "%s:%s:%d getattr failure.\n", + __FILE__, __func__, __LINE__); + return 0; + } + if (ret == 0) + return 0; + +out: + gossip_debug(GOSSIP_DCACHE_DEBUG, + "%s: negative dentry or positive dentry and inode valid.\n", + __func__); + return 1; +} + +const struct dentry_operations orangefs_dentry_operations = { + .d_revalidate = orangefs_d_revalidate, +}; diff --git a/fs/orangefs/devorangefs-req.c b/fs/orangefs/devorangefs-req.c new file mode 100644 index 000000000000..db170beba797 --- /dev/null +++ b/fs/orangefs/devorangefs-req.c @@ -0,0 +1,943 @@ +/* + * (C) 2001 Clemson University and The University of Chicago + * + * Changes by Acxiom Corporation to add protocol version to kernel + * communication, Copyright Acxiom Corporation, 2005. + * + * See COPYING in top-level directory. + */ + +#include "protocol.h" +#include "orangefs-kernel.h" +#include "orangefs-dev-proto.h" +#include "orangefs-bufmap.h" + +#include <linux/debugfs.h> +#include <linux/slab.h> + +/* this file implements the /dev/pvfs2-req device node */ + +static int open_access_count; + +#define DUMP_DEVICE_ERROR() \ +do { \ + gossip_err("*****************************************************\n");\ + gossip_err("ORANGEFS Device Error: You cannot open the device file "); \ + gossip_err("\n/dev/%s more than once. Please make sure that\nthere " \ + "are no ", ORANGEFS_REQDEVICE_NAME); \ + gossip_err("instances of a program using this device\ncurrently " \ + "running. (You must verify this!)\n"); \ + gossip_err("For example, you can use the lsof program as follows:\n");\ + gossip_err("'lsof | grep %s' (run this as root)\n", \ + ORANGEFS_REQDEVICE_NAME); \ + gossip_err(" open_access_count = %d\n", open_access_count); \ + gossip_err("*****************************************************\n");\ +} while (0) + +static int hash_func(__u64 tag, int table_size) +{ + return do_div(tag, (unsigned int)table_size); +} + +static void orangefs_devreq_add_op(struct orangefs_kernel_op_s *op) +{ + int index = hash_func(op->tag, hash_table_size); + + list_add_tail(&op->list, &htable_ops_in_progress[index]); +} + +/* + * find the op with this tag and remove it from the in progress + * hash table. + */ +static struct orangefs_kernel_op_s *orangefs_devreq_remove_op(__u64 tag) +{ + struct orangefs_kernel_op_s *op, *next; + int index; + + index = hash_func(tag, hash_table_size); + + spin_lock(&htable_ops_in_progress_lock); + list_for_each_entry_safe(op, + next, + &htable_ops_in_progress[index], + list) { + if (op->tag == tag && !op_state_purged(op) && + !op_state_given_up(op)) { + list_del_init(&op->list); + spin_unlock(&htable_ops_in_progress_lock); + return op; + } + } + + spin_unlock(&htable_ops_in_progress_lock); + return NULL; +} + +/* Returns whether any FS are still pending remounted */ +static int mark_all_pending_mounts(void) +{ + int unmounted = 1; + struct orangefs_sb_info_s *orangefs_sb = NULL; + + spin_lock(&orangefs_superblocks_lock); + list_for_each_entry(orangefs_sb, &orangefs_superblocks, list) { + /* All of these file system require a remount */ + orangefs_sb->mount_pending = 1; + unmounted = 0; + } + spin_unlock(&orangefs_superblocks_lock); + return unmounted; +} + +/* + * Determine if a given file system needs to be remounted or not + * Returns -1 on error + * 0 if already mounted + * 1 if needs remount + */ +static int fs_mount_pending(__s32 fsid) +{ + int mount_pending = -1; + struct orangefs_sb_info_s *orangefs_sb = NULL; + + spin_lock(&orangefs_superblocks_lock); + list_for_each_entry(orangefs_sb, &orangefs_superblocks, list) { + if (orangefs_sb->fs_id == fsid) { + mount_pending = orangefs_sb->mount_pending; + break; + } + } + spin_unlock(&orangefs_superblocks_lock); + return mount_pending; +} + +static int orangefs_devreq_open(struct inode *inode, struct file *file) +{ + int ret = -EINVAL; + + if (!(file->f_flags & O_NONBLOCK)) { + gossip_err("%s: device cannot be opened in blocking mode\n", + __func__); + goto out; + } + ret = -EACCES; + gossip_debug(GOSSIP_DEV_DEBUG, "client-core: opening device\n"); + mutex_lock(&devreq_mutex); + + if (open_access_count == 0) { + open_access_count = 1; + ret = 0; + } else { + DUMP_DEVICE_ERROR(); + } + mutex_unlock(&devreq_mutex); + +out: + + gossip_debug(GOSSIP_DEV_DEBUG, + "pvfs2-client-core: open device complete (ret = %d)\n", + ret); + return ret; +} + +/* Function for read() callers into the device */ +static ssize_t orangefs_devreq_read(struct file *file, + char __user *buf, + size_t count, loff_t *offset) +{ + struct orangefs_kernel_op_s *op, *temp; + __s32 proto_ver = ORANGEFS_KERNEL_PROTO_VERSION; + static __s32 magic = ORANGEFS_DEVREQ_MAGIC; + struct orangefs_kernel_op_s *cur_op = NULL; + unsigned long ret; + + /* We do not support blocking IO. */ + if (!(file->f_flags & O_NONBLOCK)) { + gossip_err("%s: blocking read from client-core.\n", + __func__); + return -EINVAL; + } + + /* + * The client will do an ioctl to find MAX_DEV_REQ_UPSIZE, then + * always read with that size buffer. + */ + if (count != MAX_DEV_REQ_UPSIZE) { + gossip_err("orangefs: client-core tried to read wrong size\n"); + return -EINVAL; + } + +restart: + /* Get next op (if any) from top of list. */ + spin_lock(&orangefs_request_list_lock); + list_for_each_entry_safe(op, temp, &orangefs_request_list, list) { + __s32 fsid; + /* This lock is held past the end of the loop when we break. */ + spin_lock(&op->lock); + if (unlikely(op_state_purged(op) || op_state_given_up(op))) { + spin_unlock(&op->lock); + continue; + } + + fsid = fsid_of_op(op); + if (fsid != ORANGEFS_FS_ID_NULL) { + int ret; + /* Skip ops whose filesystem needs to be mounted. */ + ret = fs_mount_pending(fsid); + if (ret == 1) { + gossip_debug(GOSSIP_DEV_DEBUG, + "%s: mount pending, skipping op tag " + "%llu %s\n", + __func__, + llu(op->tag), + get_opname_string(op)); + spin_unlock(&op->lock); + continue; + /* + * Skip ops whose filesystem we don't know about unless + * it is being mounted. + */ + /* XXX: is there a better way to detect this? */ + } else if (ret == -1 && + !(op->upcall.type == + ORANGEFS_VFS_OP_FS_MOUNT || + op->upcall.type == + ORANGEFS_VFS_OP_GETATTR)) { + gossip_debug(GOSSIP_DEV_DEBUG, + "orangefs: skipping op tag %llu %s\n", + llu(op->tag), get_opname_string(op)); + gossip_err( + "orangefs: ERROR: fs_mount_pending %d\n", + fsid); + spin_unlock(&op->lock); + continue; + } + } + /* + * Either this op does not pertain to a filesystem, is mounting + * a filesystem, or pertains to a mounted filesystem. Let it + * through. + */ + cur_op = op; + break; + } + + /* + * At this point we either have a valid op and can continue or have not + * found an op and must ask the client to try again later. + */ + if (!cur_op) { + spin_unlock(&orangefs_request_list_lock); + return -EAGAIN; + } + + gossip_debug(GOSSIP_DEV_DEBUG, "%s: reading op tag %llu %s\n", + __func__, + llu(cur_op->tag), + get_opname_string(cur_op)); + + /* + * Such an op should never be on the list in the first place. If so, we + * will abort. + */ + if (op_state_in_progress(cur_op) || op_state_serviced(cur_op)) { + gossip_err("orangefs: ERROR: Current op already queued.\n"); + list_del_init(&cur_op->list); + spin_unlock(&cur_op->lock); + spin_unlock(&orangefs_request_list_lock); + return -EAGAIN; + } + + list_del_init(&cur_op->list); + spin_unlock(&orangefs_request_list_lock); + + spin_unlock(&cur_op->lock); + + /* Push the upcall out. */ + ret = copy_to_user(buf, &proto_ver, sizeof(__s32)); + if (ret != 0) + goto error; + ret = copy_to_user(buf+sizeof(__s32), &magic, sizeof(__s32)); + if (ret != 0) + goto error; + ret = copy_to_user(buf+2 * sizeof(__s32), &cur_op->tag, sizeof(__u64)); + if (ret != 0) + goto error; + ret = copy_to_user(buf+2*sizeof(__s32)+sizeof(__u64), &cur_op->upcall, + sizeof(struct orangefs_upcall_s)); + if (ret != 0) + goto error; + + spin_lock(&htable_ops_in_progress_lock); + spin_lock(&cur_op->lock); + if (unlikely(op_state_given_up(cur_op))) { + spin_unlock(&cur_op->lock); + spin_unlock(&htable_ops_in_progress_lock); + complete(&cur_op->waitq); + goto restart; + } + + /* + * Set the operation to be in progress and move it between lists since + * it has been sent to the client. + */ + set_op_state_inprogress(cur_op); + gossip_debug(GOSSIP_DEV_DEBUG, + "%s: 1 op:%s: op_state:%d: process:%s:\n", + __func__, + get_opname_string(cur_op), + cur_op->op_state, + current->comm); + orangefs_devreq_add_op(cur_op); + spin_unlock(&cur_op->lock); + spin_unlock(&htable_ops_in_progress_lock); + + /* The client only asks to read one size buffer. */ + return MAX_DEV_REQ_UPSIZE; +error: + /* + * We were unable to copy the op data to the client. Put the op back in + * list. If client has crashed, the op will be purged later when the + * device is released. + */ + gossip_err("orangefs: Failed to copy data to user space\n"); + spin_lock(&orangefs_request_list_lock); + spin_lock(&cur_op->lock); + if (likely(!op_state_given_up(cur_op))) { + set_op_state_waiting(cur_op); + gossip_debug(GOSSIP_DEV_DEBUG, + "%s: 2 op:%s: op_state:%d: process:%s:\n", + __func__, + get_opname_string(cur_op), + cur_op->op_state, + current->comm); + list_add(&cur_op->list, &orangefs_request_list); + spin_unlock(&cur_op->lock); + } else { + spin_unlock(&cur_op->lock); + complete(&cur_op->waitq); + } + spin_unlock(&orangefs_request_list_lock); + return -EFAULT; +} + +/* + * Function for writev() callers into the device. + * + * Userspace should have written: + * - __u32 version + * - __u32 magic + * - __u64 tag + * - struct orangefs_downcall_s + * - trailer buffer (in the case of READDIR operations) + */ +static ssize_t orangefs_devreq_write_iter(struct kiocb *iocb, + struct iov_iter *iter) +{ + ssize_t ret; + struct orangefs_kernel_op_s *op = NULL; + struct { + __u32 version; + __u32 magic; + __u64 tag; + } head; + int total = ret = iov_iter_count(iter); + int n; + int downcall_size = sizeof(struct orangefs_downcall_s); + int head_size = sizeof(head); + + gossip_debug(GOSSIP_DEV_DEBUG, "%s: total:%d: ret:%zd:\n", + __func__, + total, + ret); + + if (total < MAX_DEV_REQ_DOWNSIZE) { + gossip_err("%s: total:%d: must be at least:%u:\n", + __func__, + total, + (unsigned int) MAX_DEV_REQ_DOWNSIZE); + return -EFAULT; + } + + n = copy_from_iter(&head, head_size, iter); + if (n < head_size) { + gossip_err("%s: failed to copy head.\n", __func__); + return -EFAULT; + } + + if (head.version < ORANGEFS_MINIMUM_USERSPACE_VERSION) { + gossip_err("%s: userspace claims version" + "%d, minimum version required: %d.\n", + __func__, + head.version, + ORANGEFS_MINIMUM_USERSPACE_VERSION); + return -EPROTO; + } + + if (head.magic != ORANGEFS_DEVREQ_MAGIC) { + gossip_err("Error: Device magic number does not match.\n"); + return -EPROTO; + } + + /* remove the op from the in progress hash table */ + op = orangefs_devreq_remove_op(head.tag); + if (!op) { + gossip_err("WARNING: No one's waiting for tag %llu\n", + llu(head.tag)); + return ret; + } + + n = copy_from_iter(&op->downcall, downcall_size, iter); + if (n != downcall_size) { + gossip_err("%s: failed to copy downcall.\n", __func__); + goto Efault; + } + + if (op->downcall.status) + goto wakeup; + + /* + * We've successfully peeled off the head and the downcall. + * Something has gone awry if total doesn't equal the + * sum of head_size, downcall_size and trailer_size. + */ + if ((head_size + downcall_size + op->downcall.trailer_size) != total) { + gossip_err("%s: funky write, head_size:%d" + ": downcall_size:%d: trailer_size:%lld" + ": total size:%d:\n", + __func__, + head_size, + downcall_size, + op->downcall.trailer_size, + total); + goto Efault; + } + + /* Only READDIR operations should have trailers. */ + if ((op->downcall.type != ORANGEFS_VFS_OP_READDIR) && + (op->downcall.trailer_size != 0)) { + gossip_err("%s: %x operation with trailer.", + __func__, + op->downcall.type); + goto Efault; + } + + /* READDIR operations should always have trailers. */ + if ((op->downcall.type == ORANGEFS_VFS_OP_READDIR) && + (op->downcall.trailer_size == 0)) { + gossip_err("%s: %x operation with no trailer.", + __func__, + op->downcall.type); + goto Efault; + } + + if (op->downcall.type != ORANGEFS_VFS_OP_READDIR) + goto wakeup; + + op->downcall.trailer_buf = + vmalloc(op->downcall.trailer_size); + if (op->downcall.trailer_buf == NULL) { + gossip_err("%s: failed trailer vmalloc.\n", + __func__); + goto Enomem; + } + memset(op->downcall.trailer_buf, 0, op->downcall.trailer_size); + n = copy_from_iter(op->downcall.trailer_buf, + op->downcall.trailer_size, + iter); + if (n != op->downcall.trailer_size) { + gossip_err("%s: failed to copy trailer.\n", __func__); + vfree(op->downcall.trailer_buf); + goto Efault; + } + +wakeup: + /* + * Return to vfs waitqueue, and back to service_operation + * through wait_for_matching_downcall. + */ + spin_lock(&op->lock); + if (unlikely(op_is_cancel(op))) { + spin_unlock(&op->lock); + put_cancel(op); + } else if (unlikely(op_state_given_up(op))) { + spin_unlock(&op->lock); + complete(&op->waitq); + } else { + set_op_state_serviced(op); + gossip_debug(GOSSIP_DEV_DEBUG, + "%s: op:%s: op_state:%d: process:%s:\n", + __func__, + get_opname_string(op), + op->op_state, + current->comm); + spin_unlock(&op->lock); + } + return ret; + +Efault: + op->downcall.status = -(ORANGEFS_ERROR_BIT | 9); + ret = -EFAULT; + goto wakeup; + +Enomem: + op->downcall.status = -(ORANGEFS_ERROR_BIT | 8); + ret = -ENOMEM; + goto wakeup; +} + +/* + * NOTE: gets called when the last reference to this device is dropped. + * Using the open_access_count variable, we enforce a reference count + * on this file so that it can be opened by only one process at a time. + * the devreq_mutex is used to make sure all i/o has completed + * before we call orangefs_bufmap_finalize, and similar such tricky + * situations + */ +static int orangefs_devreq_release(struct inode *inode, struct file *file) +{ + int unmounted = 0; + + gossip_debug(GOSSIP_DEV_DEBUG, + "%s:pvfs2-client-core: exiting, closing device\n", + __func__); + + mutex_lock(&devreq_mutex); + orangefs_bufmap_finalize(); + + open_access_count = -1; + + unmounted = mark_all_pending_mounts(); + gossip_debug(GOSSIP_DEV_DEBUG, "ORANGEFS Device Close: Filesystem(s) %s\n", + (unmounted ? "UNMOUNTED" : "MOUNTED")); + + purge_waiting_ops(); + purge_inprogress_ops(); + + orangefs_bufmap_run_down(); + + gossip_debug(GOSSIP_DEV_DEBUG, + "pvfs2-client-core: device close complete\n"); + open_access_count = 0; + mutex_unlock(&devreq_mutex); + return 0; +} + +int is_daemon_in_service(void) +{ + int in_service; + + /* + * What this function does is checks if client-core is alive + * based on the access count we maintain on the device. + */ + mutex_lock(&devreq_mutex); + in_service = open_access_count == 1 ? 0 : -EIO; + mutex_unlock(&devreq_mutex); + return in_service; +} + +bool __is_daemon_in_service(void) +{ + return open_access_count == 1; +} + +static inline long check_ioctl_command(unsigned int command) +{ + /* Check for valid ioctl codes */ + if (_IOC_TYPE(command) != ORANGEFS_DEV_MAGIC) { + gossip_err("device ioctl magic numbers don't match! Did you rebuild pvfs2-client-core/libpvfs2? [cmd %x, magic %x != %x]\n", + command, + _IOC_TYPE(command), + ORANGEFS_DEV_MAGIC); + return -EINVAL; + } + /* and valid ioctl commands */ + if (_IOC_NR(command) >= ORANGEFS_DEV_MAXNR || _IOC_NR(command) <= 0) { + gossip_err("Invalid ioctl command number [%d >= %d]\n", + _IOC_NR(command), ORANGEFS_DEV_MAXNR); + return -ENOIOCTLCMD; + } + return 0; +} + +static long dispatch_ioctl_command(unsigned int command, unsigned long arg) +{ + static __s32 magic = ORANGEFS_DEVREQ_MAGIC; + static __s32 max_up_size = MAX_DEV_REQ_UPSIZE; + static __s32 max_down_size = MAX_DEV_REQ_DOWNSIZE; + struct ORANGEFS_dev_map_desc user_desc; + int ret = 0; + struct dev_mask_info_s mask_info = { 0 }; + struct dev_mask2_info_s mask2_info = { 0, 0 }; + int upstream_kmod = 1; + struct orangefs_sb_info_s *orangefs_sb; + + /* mtmoore: add locking here */ + + switch (command) { + case ORANGEFS_DEV_GET_MAGIC: + return ((put_user(magic, (__s32 __user *) arg) == -EFAULT) ? + -EIO : + 0); + case ORANGEFS_DEV_GET_MAX_UPSIZE: + return ((put_user(max_up_size, + (__s32 __user *) arg) == -EFAULT) ? + -EIO : + 0); + case ORANGEFS_DEV_GET_MAX_DOWNSIZE: + return ((put_user(max_down_size, + (__s32 __user *) arg) == -EFAULT) ? + -EIO : + 0); + case ORANGEFS_DEV_MAP: + ret = copy_from_user(&user_desc, + (struct ORANGEFS_dev_map_desc __user *) + arg, + sizeof(struct ORANGEFS_dev_map_desc)); + /* WTF -EIO and not -EFAULT? */ + return ret ? -EIO : orangefs_bufmap_initialize(&user_desc); + case ORANGEFS_DEV_REMOUNT_ALL: + gossip_debug(GOSSIP_DEV_DEBUG, + "%s: got ORANGEFS_DEV_REMOUNT_ALL\n", + __func__); + + /* + * remount all mounted orangefs volumes to regain the lost + * dynamic mount tables (if any) -- NOTE: this is done + * without keeping the superblock list locked due to the + * upcall/downcall waiting. also, the request mutex is + * used to ensure that no operations will be serviced until + * all of the remounts are serviced (to avoid ops between + * mounts to fail) + */ + ret = mutex_lock_interruptible(&request_mutex); + if (ret < 0) + return ret; + gossip_debug(GOSSIP_DEV_DEBUG, + "%s: priority remount in progress\n", + __func__); + spin_lock(&orangefs_superblocks_lock); + list_for_each_entry(orangefs_sb, &orangefs_superblocks, list) { + /* + * We have to drop the spinlock, so entries can be + * removed. They can't be freed, though, so we just + * keep the forward pointers and zero the back ones - + * that way we can get to the rest of the list. + */ + if (!orangefs_sb->list.prev) + continue; + gossip_debug(GOSSIP_DEV_DEBUG, + "%s: Remounting SB %p\n", + __func__, + orangefs_sb); + + spin_unlock(&orangefs_superblocks_lock); + ret = orangefs_remount(orangefs_sb); + spin_lock(&orangefs_superblocks_lock); + if (ret) { + gossip_debug(GOSSIP_DEV_DEBUG, + "SB %p remount failed\n", + orangefs_sb); + break; + } + } + spin_unlock(&orangefs_superblocks_lock); + gossip_debug(GOSSIP_DEV_DEBUG, + "%s: priority remount complete\n", + __func__); + mutex_unlock(&request_mutex); + return ret; + + case ORANGEFS_DEV_UPSTREAM: + ret = copy_to_user((void __user *)arg, + &upstream_kmod, + sizeof(upstream_kmod)); + + if (ret != 0) + return -EIO; + else + return ret; + + case ORANGEFS_DEV_CLIENT_MASK: + ret = copy_from_user(&mask2_info, + (void __user *)arg, + sizeof(struct dev_mask2_info_s)); + + if (ret != 0) + return -EIO; + + client_debug_mask.mask1 = mask2_info.mask1_value; + client_debug_mask.mask2 = mask2_info.mask2_value; + + pr_info("%s: client debug mask has been been received " + ":%llx: :%llx:\n", + __func__, + (unsigned long long)client_debug_mask.mask1, + (unsigned long long)client_debug_mask.mask2); + + return ret; + + case ORANGEFS_DEV_CLIENT_STRING: + ret = copy_from_user(&client_debug_array_string, + (void __user *)arg, + ORANGEFS_MAX_DEBUG_STRING_LEN); + /* + * The real client-core makes an effort to ensure + * that actual strings that aren't too long to fit in + * this buffer is what we get here. We're going to use + * string functions on the stuff we got, so we'll make + * this extra effort to try and keep from + * flowing out of this buffer when we use the string + * functions, even if somehow the stuff we end up + * with here is garbage. + */ + client_debug_array_string[ORANGEFS_MAX_DEBUG_STRING_LEN - 1] = + '\0'; + + if (ret != 0) { + pr_info("%s: CLIENT_STRING: copy_from_user failed\n", + __func__); + return -EIO; + } + + pr_info("%s: client debug array string has been received.\n", + __func__); + + if (!help_string_initialized) { + + /* Free the "we don't know yet" default string... */ + kfree(debug_help_string); + + /* build a proper debug help string */ + if (orangefs_prepare_debugfs_help_string(0)) { + gossip_err("%s: no debug help string \n", + __func__); + return -EIO; + } + + /* Replace the boilerplate boot-time debug-help file. */ + debugfs_remove(help_file_dentry); + + help_file_dentry = + debugfs_create_file( + ORANGEFS_KMOD_DEBUG_HELP_FILE, + 0444, + debug_dir, + debug_help_string, + &debug_help_fops); + + if (!help_file_dentry) { + gossip_err("%s: debugfs_create_file failed for" + " :%s:!\n", + __func__, + ORANGEFS_KMOD_DEBUG_HELP_FILE); + return -EIO; + } + } + + debug_mask_to_string(&client_debug_mask, 1); + + debugfs_remove(client_debug_dentry); + + orangefs_client_debug_init(); + + help_string_initialized++; + + return ret; + + case ORANGEFS_DEV_DEBUG: + ret = copy_from_user(&mask_info, + (void __user *)arg, + sizeof(mask_info)); + + if (ret != 0) + return -EIO; + + if (mask_info.mask_type == KERNEL_MASK) { + if ((mask_info.mask_value == 0) + && (kernel_mask_set_mod_init)) { + /* + * the kernel debug mask was set when the + * kernel module was loaded; don't override + * it if the client-core was started without + * a value for ORANGEFS_KMODMASK. + */ + return 0; + } + debug_mask_to_string(&mask_info.mask_value, + mask_info.mask_type); + gossip_debug_mask = mask_info.mask_value; + pr_info("%s: kernel debug mask has been modified to " + ":%s: :%llx:\n", + __func__, + kernel_debug_string, + (unsigned long long)gossip_debug_mask); + } else if (mask_info.mask_type == CLIENT_MASK) { + debug_mask_to_string(&mask_info.mask_value, + mask_info.mask_type); + pr_info("%s: client debug mask has been modified to" + ":%s: :%llx:\n", + __func__, + client_debug_string, + llu(mask_info.mask_value)); + } else { + gossip_lerr("Invalid mask type....\n"); + return -EINVAL; + } + + return ret; + + default: + return -ENOIOCTLCMD; + } + return -ENOIOCTLCMD; +} + +static long orangefs_devreq_ioctl(struct file *file, + unsigned int command, unsigned long arg) +{ + long ret; + + /* Check for properly constructed commands */ + ret = check_ioctl_command(command); + if (ret < 0) + return (int)ret; + + return (int)dispatch_ioctl_command(command, arg); +} + +#ifdef CONFIG_COMPAT /* CONFIG_COMPAT is in .config */ + +/* Compat structure for the ORANGEFS_DEV_MAP ioctl */ +struct ORANGEFS_dev_map_desc32 { + compat_uptr_t ptr; + __s32 total_size; + __s32 size; + __s32 count; +}; + +static unsigned long translate_dev_map26(unsigned long args, long *error) +{ + struct ORANGEFS_dev_map_desc32 __user *p32 = (void __user *)args; + /* + * Depending on the architecture, allocate some space on the + * user-call-stack based on our expected layout. + */ + struct ORANGEFS_dev_map_desc __user *p = + compat_alloc_user_space(sizeof(*p)); + compat_uptr_t addr; + + *error = 0; + /* get the ptr from the 32 bit user-space */ + if (get_user(addr, &p32->ptr)) + goto err; + /* try to put that into a 64-bit layout */ + if (put_user(compat_ptr(addr), &p->ptr)) + goto err; + /* copy the remaining fields */ + if (copy_in_user(&p->total_size, &p32->total_size, sizeof(__s32))) + goto err; + if (copy_in_user(&p->size, &p32->size, sizeof(__s32))) + goto err; + if (copy_in_user(&p->count, &p32->count, sizeof(__s32))) + goto err; + return (unsigned long)p; +err: + *error = -EFAULT; + return 0; +} + +/* + * 32 bit user-space apps' ioctl handlers when kernel modules + * is compiled as a 64 bit one + */ +static long orangefs_devreq_compat_ioctl(struct file *filp, unsigned int cmd, + unsigned long args) +{ + long ret; + unsigned long arg = args; + + /* Check for properly constructed commands */ + ret = check_ioctl_command(cmd); + if (ret < 0) + return ret; + if (cmd == ORANGEFS_DEV_MAP) { + /* + * convert the arguments to what we expect internally + * in kernel space + */ + arg = translate_dev_map26(args, &ret); + if (ret < 0) { + gossip_err("Could not translate dev map\n"); + return ret; + } + } + /* no other ioctl requires translation */ + return dispatch_ioctl_command(cmd, arg); +} + +#endif /* CONFIG_COMPAT is in .config */ + +/* the assigned character device major number */ +static int orangefs_dev_major; + +/* + * Initialize orangefs device specific state: + * Must be called at module load time only + */ +int orangefs_dev_init(void) +{ + /* register orangefs-req device */ + orangefs_dev_major = register_chrdev(0, + ORANGEFS_REQDEVICE_NAME, + &orangefs_devreq_file_operations); + if (orangefs_dev_major < 0) { + gossip_debug(GOSSIP_DEV_DEBUG, + "Failed to register /dev/%s (error %d)\n", + ORANGEFS_REQDEVICE_NAME, orangefs_dev_major); + return orangefs_dev_major; + } + + gossip_debug(GOSSIP_DEV_DEBUG, + "*** /dev/%s character device registered ***\n", + ORANGEFS_REQDEVICE_NAME); + gossip_debug(GOSSIP_DEV_DEBUG, "'mknod /dev/%s c %d 0'.\n", + ORANGEFS_REQDEVICE_NAME, orangefs_dev_major); + return 0; +} + +void orangefs_dev_cleanup(void) +{ + unregister_chrdev(orangefs_dev_major, ORANGEFS_REQDEVICE_NAME); + gossip_debug(GOSSIP_DEV_DEBUG, + "*** /dev/%s character device unregistered ***\n", + ORANGEFS_REQDEVICE_NAME); +} + +static unsigned int orangefs_devreq_poll(struct file *file, + struct poll_table_struct *poll_table) +{ + int poll_revent_mask = 0; + + poll_wait(file, &orangefs_request_list_waitq, poll_table); + + if (!list_empty(&orangefs_request_list)) + poll_revent_mask |= POLL_IN; + return poll_revent_mask; +} + +const struct file_operations orangefs_devreq_file_operations = { + .owner = THIS_MODULE, + .read = orangefs_devreq_read, + .write_iter = orangefs_devreq_write_iter, + .open = orangefs_devreq_open, + .release = orangefs_devreq_release, + .unlocked_ioctl = orangefs_devreq_ioctl, + +#ifdef CONFIG_COMPAT /* CONFIG_COMPAT is in .config */ + .compat_ioctl = orangefs_devreq_compat_ioctl, +#endif + .poll = orangefs_devreq_poll +}; diff --git a/fs/orangefs/dir.c b/fs/orangefs/dir.c new file mode 100644 index 000000000000..f30b6ecacdd1 --- /dev/null +++ b/fs/orangefs/dir.c @@ -0,0 +1,400 @@ +/* + * (C) 2001 Clemson University and The University of Chicago + * + * See COPYING in top-level directory. + */ + +#include "protocol.h" +#include "orangefs-kernel.h" +#include "orangefs-bufmap.h" + +/* + * decode routine used by kmod to deal with the blob sent from + * userspace for readdirs. The blob contains zero or more of these + * sub-blobs: + * __u32 - represents length of the character string that follows. + * string - between 1 and ORANGEFS_NAME_MAX bytes long. + * padding - (if needed) to cause the __u32 plus the string to be + * eight byte aligned. + * khandle - sizeof(khandle) bytes. + */ +static long decode_dirents(char *ptr, size_t size, + struct orangefs_readdir_response_s *readdir) +{ + int i; + struct orangefs_readdir_response_s *rd = + (struct orangefs_readdir_response_s *) ptr; + char *buf = ptr; + int khandle_size = sizeof(struct orangefs_khandle); + size_t offset = offsetof(struct orangefs_readdir_response_s, + dirent_array); + /* 8 reflects eight byte alignment */ + int smallest_blob = khandle_size + 8; + __u32 len; + int aligned_len; + int sizeof_u32 = sizeof(__u32); + long ret; + + gossip_debug(GOSSIP_DIR_DEBUG, "%s: size:%zu:\n", __func__, size); + + /* size is = offset on empty dirs, > offset on non-empty dirs... */ + if (size < offset) { + gossip_err("%s: size:%zu: offset:%zu:\n", + __func__, + size, + offset); + ret = -EINVAL; + goto out; + } + + if ((size == offset) && (readdir->orangefs_dirent_outcount != 0)) { + gossip_err("%s: size:%zu: dirent_outcount:%d:\n", + __func__, + size, + readdir->orangefs_dirent_outcount); + ret = -EINVAL; + goto out; + } + + readdir->token = rd->token; + readdir->orangefs_dirent_outcount = rd->orangefs_dirent_outcount; + readdir->dirent_array = kcalloc(readdir->orangefs_dirent_outcount, + sizeof(*readdir->dirent_array), + GFP_KERNEL); + if (readdir->dirent_array == NULL) { + gossip_err("%s: kcalloc failed.\n", __func__); + ret = -ENOMEM; + goto out; + } + + buf += offset; + size -= offset; + + for (i = 0; i < readdir->orangefs_dirent_outcount; i++) { + if (size < smallest_blob) { + gossip_err("%s: size:%zu: smallest_blob:%d:\n", + __func__, + size, + smallest_blob); + ret = -EINVAL; + goto free; + } + + len = *(__u32 *)buf; + if ((len < 1) || (len > ORANGEFS_NAME_MAX)) { + gossip_err("%s: len:%d:\n", __func__, len); + ret = -EINVAL; + goto free; + } + + gossip_debug(GOSSIP_DIR_DEBUG, + "%s: size:%zu: len:%d:\n", + __func__, + size, + len); + + readdir->dirent_array[i].d_name = buf + sizeof_u32; + readdir->dirent_array[i].d_length = len; + + /* + * Calculate "aligned" length of this string and its + * associated __u32 descriptor. + */ + aligned_len = ((sizeof_u32 + len + 1) + 7) & ~7; + gossip_debug(GOSSIP_DIR_DEBUG, + "%s: aligned_len:%d:\n", + __func__, + aligned_len); + + /* + * The end of the blob should coincide with the end + * of the last sub-blob. + */ + if (size < aligned_len + khandle_size) { + gossip_err("%s: ran off the end of the blob.\n", + __func__); + ret = -EINVAL; + goto free; + } + size -= aligned_len + khandle_size; + + buf += aligned_len; + + readdir->dirent_array[i].khandle = + *(struct orangefs_khandle *) buf; + buf += khandle_size; + } + ret = buf - ptr; + gossip_debug(GOSSIP_DIR_DEBUG, "%s: returning:%ld:\n", __func__, ret); + goto out; + +free: + kfree(readdir->dirent_array); + readdir->dirent_array = NULL; + +out: + return ret; +} + +/* + * Read directory entries from an instance of an open directory. + */ +static int orangefs_readdir(struct file *file, struct dir_context *ctx) +{ + int ret = 0; + int buffer_index; + /* + * ptoken supports Orangefs' distributed directory logic, added + * in 2.9.2. + */ + __u64 *ptoken = file->private_data; + __u64 pos = 0; + ino_t ino = 0; + struct dentry *dentry = file->f_path.dentry; + struct orangefs_kernel_op_s *new_op = NULL; + struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(dentry->d_inode); + int buffer_full = 0; + struct orangefs_readdir_response_s readdir_response; + void *dents_buf; + int i = 0; + int len = 0; + ino_t current_ino = 0; + char *current_entry = NULL; + long bytes_decoded; + + gossip_debug(GOSSIP_DIR_DEBUG, + "%s: ctx->pos:%lld, ptoken = %llu\n", + __func__, + lld(ctx->pos), + llu(*ptoken)); + + pos = (__u64) ctx->pos; + + /* are we done? */ + if (pos == ORANGEFS_READDIR_END) { + gossip_debug(GOSSIP_DIR_DEBUG, + "Skipping to termination path\n"); + return 0; + } + + gossip_debug(GOSSIP_DIR_DEBUG, + "orangefs_readdir called on %s (pos=%llu)\n", + dentry->d_name.name, llu(pos)); + + memset(&readdir_response, 0, sizeof(readdir_response)); + + new_op = op_alloc(ORANGEFS_VFS_OP_READDIR); + if (!new_op) + return -ENOMEM; + + /* + * Only the indices are shared. No memory is actually shared, but the + * mechanism is used. + */ + new_op->uses_shared_memory = 1; + new_op->upcall.req.readdir.refn = orangefs_inode->refn; + new_op->upcall.req.readdir.max_dirent_count = + ORANGEFS_MAX_DIRENT_COUNT_READDIR; + + gossip_debug(GOSSIP_DIR_DEBUG, + "%s: upcall.req.readdir.refn.khandle: %pU\n", + __func__, + &new_op->upcall.req.readdir.refn.khandle); + + new_op->upcall.req.readdir.token = *ptoken; + +get_new_buffer_index: + buffer_index = orangefs_readdir_index_get(); + if (buffer_index < 0) { + ret = buffer_index; + gossip_lerr("orangefs_readdir: orangefs_readdir_index_get() failure (%d)\n", + ret); + goto out_free_op; + } + new_op->upcall.req.readdir.buf_index = buffer_index; + + ret = service_operation(new_op, + "orangefs_readdir", + get_interruptible_flag(dentry->d_inode)); + + gossip_debug(GOSSIP_DIR_DEBUG, + "Readdir downcall status is %d. ret:%d\n", + new_op->downcall.status, + ret); + + orangefs_readdir_index_put(buffer_index); + + if (ret == -EAGAIN && op_state_purged(new_op)) { + /* Client-core indices are invalid after it restarted. */ + gossip_debug(GOSSIP_DIR_DEBUG, + "%s: Getting new buffer_index for retry of readdir..\n", + __func__); + goto get_new_buffer_index; + } + + if (ret == -EIO && op_state_purged(new_op)) { + gossip_err("%s: Client is down. Aborting readdir call.\n", + __func__); + goto out_slot; + } + + if (ret < 0 || new_op->downcall.status != 0) { + gossip_debug(GOSSIP_DIR_DEBUG, + "Readdir request failed. Status:%d\n", + new_op->downcall.status); + if (ret >= 0) + ret = new_op->downcall.status; + goto out_slot; + } + + dents_buf = new_op->downcall.trailer_buf; + if (dents_buf == NULL) { + gossip_err("Invalid NULL buffer in readdir response\n"); + ret = -ENOMEM; + goto out_slot; + } + + bytes_decoded = decode_dirents(dents_buf, new_op->downcall.trailer_size, + &readdir_response); + if (bytes_decoded < 0) { + ret = bytes_decoded; + gossip_err("Could not decode readdir from buffer %d\n", ret); + goto out_vfree; + } + + if (bytes_decoded != new_op->downcall.trailer_size) { + gossip_err("orangefs_readdir: # bytes decoded (%ld) " + "!= trailer size (%ld)\n", + bytes_decoded, + (long)new_op->downcall.trailer_size); + ret = -EINVAL; + goto out_destroy_handle; + } + + /* + * orangefs doesn't actually store dot and dot-dot, but + * we need to have them represented. + */ + if (pos == 0) { + ino = get_ino_from_khandle(dentry->d_inode); + gossip_debug(GOSSIP_DIR_DEBUG, + "%s: calling dir_emit of \".\" with pos = %llu\n", + __func__, + llu(pos)); + ret = dir_emit(ctx, ".", 1, ino, DT_DIR); + pos += 1; + } + + if (pos == 1) { + ino = get_parent_ino_from_dentry(dentry); + gossip_debug(GOSSIP_DIR_DEBUG, + "%s: calling dir_emit of \"..\" with pos = %llu\n", + __func__, + llu(pos)); + ret = dir_emit(ctx, "..", 2, ino, DT_DIR); + pos += 1; + } + + /* + * we stored ORANGEFS_ITERATE_NEXT in ctx->pos last time around + * to prevent "finding" dot and dot-dot on any iteration + * other than the first. + */ + if (ctx->pos == ORANGEFS_ITERATE_NEXT) + ctx->pos = 0; + + gossip_debug(GOSSIP_DIR_DEBUG, + "%s: dirent_outcount:%d:\n", + __func__, + readdir_response.orangefs_dirent_outcount); + for (i = ctx->pos; + i < readdir_response.orangefs_dirent_outcount; + i++) { + len = readdir_response.dirent_array[i].d_length; + current_entry = readdir_response.dirent_array[i].d_name; + current_ino = orangefs_khandle_to_ino( + &readdir_response.dirent_array[i].khandle); + + gossip_debug(GOSSIP_DIR_DEBUG, + "calling dir_emit for %s with len %d" + ", ctx->pos %ld\n", + current_entry, + len, + (unsigned long)ctx->pos); + /* + * type is unknown. We don't return object type + * in the dirent_array. This leaves getdents + * clueless about type. + */ + ret = + dir_emit(ctx, current_entry, len, current_ino, DT_UNKNOWN); + if (!ret) + break; + ctx->pos++; + gossip_debug(GOSSIP_DIR_DEBUG, + "%s: ctx->pos:%lld\n", + __func__, + lld(ctx->pos)); + + } + + /* + * we ran all the way through the last batch, set up for + * getting another batch... + */ + if (ret) { + *ptoken = readdir_response.token; + ctx->pos = ORANGEFS_ITERATE_NEXT; + } + + /* + * Did we hit the end of the directory? + */ + if (readdir_response.token == ORANGEFS_READDIR_END && + !buffer_full) { + gossip_debug(GOSSIP_DIR_DEBUG, + "End of dir detected; setting ctx->pos to ORANGEFS_READDIR_END.\n"); + ctx->pos = ORANGEFS_READDIR_END; + } + +out_destroy_handle: + /* kfree(NULL) is safe */ + kfree(readdir_response.dirent_array); +out_vfree: + gossip_debug(GOSSIP_DIR_DEBUG, "vfree %p\n", dents_buf); + vfree(dents_buf); +out_slot: + orangefs_readdir_index_put(buffer_index); +out_free_op: + op_release(new_op); + gossip_debug(GOSSIP_DIR_DEBUG, "orangefs_readdir returning %d\n", ret); + return ret; +} + +static int orangefs_dir_open(struct inode *inode, struct file *file) +{ + __u64 *ptoken; + + file->private_data = kmalloc(sizeof(__u64), GFP_KERNEL); + if (!file->private_data) + return -ENOMEM; + + ptoken = file->private_data; + *ptoken = ORANGEFS_READDIR_START; + return 0; +} + +static int orangefs_dir_release(struct inode *inode, struct file *file) +{ + orangefs_flush_inode(inode); + kfree(file->private_data); + return 0; +} + +/** ORANGEFS implementation of VFS directory operations */ +const struct file_operations orangefs_dir_operations = { + .read = generic_read_dir, + .iterate = orangefs_readdir, + .open = orangefs_dir_open, + .release = orangefs_dir_release, +}; diff --git a/fs/orangefs/downcall.h b/fs/orangefs/downcall.h new file mode 100644 index 000000000000..66b99210f1f9 --- /dev/null +++ b/fs/orangefs/downcall.h @@ -0,0 +1,133 @@ +/* + * (C) 2001 Clemson University and The University of Chicago + * + * See COPYING in top-level directory. + */ + +/* + * Definitions of downcalls used in Linux kernel module. + */ + +#ifndef __DOWNCALL_H +#define __DOWNCALL_H + +/* + * Sanitized the device-client core interaction + * for clean 32-64 bit usage + */ +struct orangefs_io_response { + __s64 amt_complete; +}; + +struct orangefs_lookup_response { + struct orangefs_object_kref refn; +}; + +struct orangefs_create_response { + struct orangefs_object_kref refn; +}; + +struct orangefs_symlink_response { + struct orangefs_object_kref refn; +}; + +struct orangefs_getattr_response { + struct ORANGEFS_sys_attr_s attributes; + char link_target[ORANGEFS_NAME_MAX]; +}; + +struct orangefs_mkdir_response { + struct orangefs_object_kref refn; +}; + +/* + * duplication of some system interface structures so that I don't have + * to allocate extra memory + */ +struct orangefs_dirent { + char *d_name; + int d_length; + struct orangefs_khandle khandle; +}; + +struct orangefs_statfs_response { + __s64 block_size; + __s64 blocks_total; + __s64 blocks_avail; + __s64 files_total; + __s64 files_avail; +}; + +struct orangefs_fs_mount_response { + __s32 fs_id; + __s32 id; + struct orangefs_khandle root_khandle; +}; + +/* the getxattr response is the attribute value */ +struct orangefs_getxattr_response { + __s32 val_sz; + __s32 __pad1; + char val[ORANGEFS_MAX_XATTR_VALUELEN]; +}; + +/* the listxattr response is an array of attribute names */ +struct orangefs_listxattr_response { + __s32 returned_count; + __s32 __pad1; + __u64 token; + char key[ORANGEFS_MAX_XATTR_LISTLEN * ORANGEFS_MAX_XATTR_NAMELEN]; + __s32 keylen; + __s32 __pad2; + __s32 lengths[ORANGEFS_MAX_XATTR_LISTLEN]; +}; + +struct orangefs_param_response { + __s64 value; +}; + +#define PERF_COUNT_BUF_SIZE 4096 +struct orangefs_perf_count_response { + char buffer[PERF_COUNT_BUF_SIZE]; +}; + +#define FS_KEY_BUF_SIZE 4096 +struct orangefs_fs_key_response { + __s32 fs_keylen; + __s32 __pad1; + char fs_key[FS_KEY_BUF_SIZE]; +}; + +struct orangefs_downcall_s { + __s32 type; + __s32 status; + /* currently trailer is used only by readdir */ + __s64 trailer_size; + char *trailer_buf; + + union { + struct orangefs_io_response io; + struct orangefs_lookup_response lookup; + struct orangefs_create_response create; + struct orangefs_symlink_response sym; + struct orangefs_getattr_response getattr; + struct orangefs_mkdir_response mkdir; + struct orangefs_statfs_response statfs; + struct orangefs_fs_mount_response fs_mount; + struct orangefs_getxattr_response getxattr; + struct orangefs_listxattr_response listxattr; + struct orangefs_param_response param; + struct orangefs_perf_count_response perf_count; + struct orangefs_fs_key_response fs_key; + } resp; +}; + +struct orangefs_readdir_response_s { + __u64 token; + __u64 directory_version; + __u32 __pad2; + __u32 orangefs_dirent_outcount; + struct orangefs_dirent *dirent_array; +}; + +#endif /* __DOWNCALL_H */ diff --git a/fs/orangefs/file.c b/fs/orangefs/file.c new file mode 100644 index 000000000000..ae92795ed965 --- /dev/null +++ b/fs/orangefs/file.c @@ -0,0 +1,717 @@ +/* + * (C) 2001 Clemson University and The University of Chicago + * + * See COPYING in top-level directory. + */ + +/* + * Linux VFS file operations. + */ + +#include "protocol.h" +#include "orangefs-kernel.h" +#include "orangefs-bufmap.h" +#include <linux/fs.h> +#include <linux/pagemap.h> + +/* + * Copy to client-core's address space from the buffers specified + * by the iovec upto total_size bytes. + * NOTE: the iovector can either contain addresses which + * can futher be kernel-space or user-space addresses. + * or it can pointers to struct page's + */ +static int precopy_buffers(int buffer_index, + struct iov_iter *iter, + size_t total_size) +{ + int ret = 0; + /* + * copy data from application/kernel by pulling it out + * of the iovec. + */ + + + if (total_size) { + ret = orangefs_bufmap_copy_from_iovec(iter, + buffer_index, + total_size); + if (ret < 0) + gossip_err("%s: Failed to copy-in buffers. Please make sure that the pvfs2-client is running. %ld\n", + __func__, + (long)ret); + } + + if (ret < 0) + gossip_err("%s: Failed to copy-in buffers. Please make sure that the pvfs2-client is running. %ld\n", + __func__, + (long)ret); + return ret; +} + +/* + * Copy from client-core's address space to the buffers specified + * by the iovec upto total_size bytes. + * NOTE: the iovector can either contain addresses which + * can futher be kernel-space or user-space addresses. + * or it can pointers to struct page's + */ +static int postcopy_buffers(int buffer_index, + struct iov_iter *iter, + size_t total_size) +{ + int ret = 0; + /* + * copy data to application/kernel by pushing it out to + * the iovec. NOTE; target buffers can be addresses or + * struct page pointers. + */ + if (total_size) { + ret = orangefs_bufmap_copy_to_iovec(iter, + buffer_index, + total_size); + if (ret < 0) + gossip_err("%s: Failed to copy-out buffers. Please make sure that the pvfs2-client is running (%ld)\n", + __func__, + (long)ret); + } + return ret; +} + +/* + * Post and wait for the I/O upcall to finish + */ +static ssize_t wait_for_direct_io(enum ORANGEFS_io_type type, struct inode *inode, + loff_t *offset, struct iov_iter *iter, + size_t total_size, loff_t readahead_size) +{ + struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); + struct orangefs_khandle *handle = &orangefs_inode->refn.khandle; + struct orangefs_kernel_op_s *new_op = NULL; + struct iov_iter saved = *iter; + int buffer_index = -1; + ssize_t ret; + + new_op = op_alloc(ORANGEFS_VFS_OP_FILE_IO); + if (!new_op) + return -ENOMEM; + + /* synchronous I/O */ + new_op->upcall.req.io.readahead_size = readahead_size; + new_op->upcall.req.io.io_type = type; + new_op->upcall.req.io.refn = orangefs_inode->refn; + +populate_shared_memory: + /* get a shared buffer index */ + buffer_index = orangefs_bufmap_get(); + if (buffer_index < 0) { + ret = buffer_index; + gossip_debug(GOSSIP_FILE_DEBUG, + "%s: orangefs_bufmap_get failure (%zd)\n", + __func__, ret); + goto out; + } + gossip_debug(GOSSIP_FILE_DEBUG, + "%s(%pU): GET op %p -> buffer_index %d\n", + __func__, + handle, + new_op, + buffer_index); + + new_op->uses_shared_memory = 1; + new_op->upcall.req.io.buf_index = buffer_index; + new_op->upcall.req.io.count = total_size; + new_op->upcall.req.io.offset = *offset; + + gossip_debug(GOSSIP_FILE_DEBUG, + "%s(%pU): offset: %llu total_size: %zd\n", + __func__, + handle, + llu(*offset), + total_size); + /* + * Stage 1: copy the buffers into client-core's address space + * precopy_buffers only pertains to writes. + */ + if (type == ORANGEFS_IO_WRITE) { + ret = precopy_buffers(buffer_index, + iter, + total_size); + if (ret < 0) + goto out; + } + + gossip_debug(GOSSIP_FILE_DEBUG, + "%s(%pU): Calling post_io_request with tag (%llu)\n", + __func__, + handle, + llu(new_op->tag)); + + /* Stage 2: Service the I/O operation */ + ret = service_operation(new_op, + type == ORANGEFS_IO_WRITE ? + "file_write" : + "file_read", + get_interruptible_flag(inode)); + + /* + * If service_operation() returns -EAGAIN #and# the operation was + * purged from orangefs_request_list or htable_ops_in_progress, then + * we know that the client was restarted, causing the shared memory + * area to be wiped clean. To restart a write operation in this + * case, we must re-copy the data from the user's iovec to a NEW + * shared memory location. To restart a read operation, we must get + * a new shared memory location. + */ + if (ret == -EAGAIN && op_state_purged(new_op)) { + orangefs_bufmap_put(buffer_index); + buffer_index = -1; + if (type == ORANGEFS_IO_WRITE) + *iter = saved; + gossip_debug(GOSSIP_FILE_DEBUG, + "%s:going to repopulate_shared_memory.\n", + __func__); + goto populate_shared_memory; + } + + if (ret < 0) { + if (ret == -EINTR) { + /* + * We can't return EINTR if any data was written, + * it's not POSIX. It is minimally acceptable + * to give a partial write, the way NFS does. + * + * It would be optimal to return all or nothing, + * but if a userspace write is bigger than + * an IO buffer, and the interrupt occurs + * between buffer writes, that would not be + * possible. + */ + switch (new_op->op_state - OP_VFS_STATE_GIVEN_UP) { + /* + * If the op was waiting when the interrupt + * occurred, then the client-core did not + * trigger the write. + */ + case OP_VFS_STATE_WAITING: + if (*offset == 0) + ret = -EINTR; + else + ret = 0; + break; + /* + * If the op was in progress when the interrupt + * occurred, then the client-core was able to + * trigger the write. + */ + case OP_VFS_STATE_INPROGR: + ret = total_size; + break; + default: + gossip_err("%s: unexpected op state :%d:.\n", + __func__, + new_op->op_state); + ret = 0; + break; + } + gossip_debug(GOSSIP_FILE_DEBUG, + "%s: got EINTR, state:%d: %p\n", + __func__, + new_op->op_state, + new_op); + } else { + gossip_err("%s: error in %s handle %pU, returning %zd\n", + __func__, + type == ORANGEFS_IO_READ ? + "read from" : "write to", + handle, ret); + } + if (orangefs_cancel_op_in_progress(new_op)) + return ret; + + goto out; + } + + /* + * Stage 3: Post copy buffers from client-core's address space + * postcopy_buffers only pertains to reads. + */ + if (type == ORANGEFS_IO_READ) { + ret = postcopy_buffers(buffer_index, + iter, + new_op->downcall.resp.io.amt_complete); + if (ret < 0) + goto out; + } + gossip_debug(GOSSIP_FILE_DEBUG, + "%s(%pU): Amount %s, returned by the sys-io call:%d\n", + __func__, + handle, + type == ORANGEFS_IO_READ ? "read" : "written", + (int)new_op->downcall.resp.io.amt_complete); + + ret = new_op->downcall.resp.io.amt_complete; + +out: + if (buffer_index >= 0) { + orangefs_bufmap_put(buffer_index); + gossip_debug(GOSSIP_FILE_DEBUG, + "%s(%pU): PUT buffer_index %d\n", + __func__, handle, buffer_index); + buffer_index = -1; + } + op_release(new_op); + return ret; +} + +/* + * Common entry point for read/write/readv/writev + * This function will dispatch it to either the direct I/O + * or buffered I/O path depending on the mount options and/or + * augmented/extended metadata attached to the file. + * Note: File extended attributes override any mount options. + */ +static ssize_t do_readv_writev(enum ORANGEFS_io_type type, struct file *file, + loff_t *offset, struct iov_iter *iter) +{ + struct inode *inode = file->f_mapping->host; + struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); + struct orangefs_khandle *handle = &orangefs_inode->refn.khandle; + size_t count = iov_iter_count(iter); + ssize_t total_count = 0; + ssize_t ret = -EINVAL; + + gossip_debug(GOSSIP_FILE_DEBUG, + "%s-BEGIN(%pU): count(%d) after estimate_max_iovecs.\n", + __func__, + handle, + (int)count); + + if (type == ORANGEFS_IO_WRITE) { + gossip_debug(GOSSIP_FILE_DEBUG, + "%s(%pU): proceeding with offset : %llu, " + "size %d\n", + __func__, + handle, + llu(*offset), + (int)count); + } + + if (count == 0) { + ret = 0; + goto out; + } + + while (iov_iter_count(iter)) { + size_t each_count = iov_iter_count(iter); + size_t amt_complete; + + /* how much to transfer in this loop iteration */ + if (each_count > orangefs_bufmap_size_query()) + each_count = orangefs_bufmap_size_query(); + + gossip_debug(GOSSIP_FILE_DEBUG, + "%s(%pU): size of each_count(%d)\n", + __func__, + handle, + (int)each_count); + gossip_debug(GOSSIP_FILE_DEBUG, + "%s(%pU): BEFORE wait_for_io: offset is %d\n", + __func__, + handle, + (int)*offset); + + ret = wait_for_direct_io(type, inode, offset, iter, + each_count, 0); + gossip_debug(GOSSIP_FILE_DEBUG, + "%s(%pU): return from wait_for_io:%d\n", + __func__, + handle, + (int)ret); + + if (ret < 0) + goto out; + + *offset += ret; + total_count += ret; + amt_complete = ret; + + gossip_debug(GOSSIP_FILE_DEBUG, + "%s(%pU): AFTER wait_for_io: offset is %d\n", + __func__, + handle, + (int)*offset); + + /* + * if we got a short I/O operations, + * fall out and return what we got so far + */ + if (amt_complete < each_count) + break; + } /*end while */ + +out: + if (total_count > 0) + ret = total_count; + if (ret > 0) { + if (type == ORANGEFS_IO_READ) { + file_accessed(file); + } else { + SetMtimeFlag(orangefs_inode); + inode->i_mtime = CURRENT_TIME; + mark_inode_dirty_sync(inode); + } + } + + gossip_debug(GOSSIP_FILE_DEBUG, + "%s(%pU): Value(%d) returned.\n", + __func__, + handle, + (int)ret); + + return ret; +} + +/* + * Read data from a specified offset in a file (referenced by inode). + * Data may be placed either in a user or kernel buffer. + */ +ssize_t orangefs_inode_read(struct inode *inode, + struct iov_iter *iter, + loff_t *offset, + loff_t readahead_size) +{ + struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); + size_t count = iov_iter_count(iter); + size_t bufmap_size; + ssize_t ret = -EINVAL; + + g_orangefs_stats.reads++; + + bufmap_size = orangefs_bufmap_size_query(); + if (count > bufmap_size) { + gossip_debug(GOSSIP_FILE_DEBUG, + "%s: count is too large (%zd/%zd)!\n", + __func__, count, bufmap_size); + return -EINVAL; + } + + gossip_debug(GOSSIP_FILE_DEBUG, + "%s(%pU) %zd@%llu\n", + __func__, + &orangefs_inode->refn.khandle, + count, + llu(*offset)); + + ret = wait_for_direct_io(ORANGEFS_IO_READ, inode, offset, iter, + count, readahead_size); + if (ret > 0) + *offset += ret; + + gossip_debug(GOSSIP_FILE_DEBUG, + "%s(%pU): Value(%zd) returned.\n", + __func__, + &orangefs_inode->refn.khandle, + ret); + + return ret; +} + +static ssize_t orangefs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) +{ + struct file *file = iocb->ki_filp; + loff_t pos = *(&iocb->ki_pos); + ssize_t rc = 0; + + BUG_ON(iocb->private); + + gossip_debug(GOSSIP_FILE_DEBUG, "orangefs_file_read_iter\n"); + + g_orangefs_stats.reads++; + + rc = do_readv_writev(ORANGEFS_IO_READ, file, &pos, iter); + iocb->ki_pos = pos; + + return rc; +} + +static ssize_t orangefs_file_write_iter(struct kiocb *iocb, struct iov_iter *iter) +{ + struct file *file = iocb->ki_filp; + loff_t pos; + ssize_t rc; + + BUG_ON(iocb->private); + + gossip_debug(GOSSIP_FILE_DEBUG, "orangefs_file_write_iter\n"); + + mutex_lock(&file->f_mapping->host->i_mutex); + + /* Make sure generic_write_checks sees an up to date inode size. */ + if (file->f_flags & O_APPEND) { + rc = orangefs_inode_getattr(file->f_mapping->host, 0, 1); + if (rc == -ESTALE) + rc = -EIO; + if (rc) { + gossip_err("%s: orangefs_inode_getattr failed, " + "rc:%zd:.\n", __func__, rc); + goto out; + } + } + + if (file->f_pos > i_size_read(file->f_mapping->host)) + orangefs_i_size_write(file->f_mapping->host, file->f_pos); + + rc = generic_write_checks(iocb, iter); + + if (rc <= 0) { + gossip_err("%s: generic_write_checks failed, rc:%zd:.\n", + __func__, rc); + goto out; + } + + /* + * if we are appending, generic_write_checks would have updated + * pos to the end of the file, so we will wait till now to set + * pos... + */ + pos = *(&iocb->ki_pos); + + rc = do_readv_writev(ORANGEFS_IO_WRITE, + file, + &pos, + iter); + if (rc < 0) { + gossip_err("%s: do_readv_writev failed, rc:%zd:.\n", + __func__, rc); + goto out; + } + + iocb->ki_pos = pos; + g_orangefs_stats.writes++; + +out: + + mutex_unlock(&file->f_mapping->host->i_mutex); + return rc; +} + +/* + * Perform a miscellaneous operation on a file. + */ +static long orangefs_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + int ret = -ENOTTY; + __u64 val = 0; + unsigned long uval; + + gossip_debug(GOSSIP_FILE_DEBUG, + "orangefs_ioctl: called with cmd %d\n", + cmd); + + /* + * we understand some general ioctls on files, such as the immutable + * and append flags + */ + if (cmd == FS_IOC_GETFLAGS) { + val = 0; + ret = orangefs_inode_getxattr(file_inode(file), + ORANGEFS_XATTR_NAME_DEFAULT_PREFIX, + "user.pvfs2.meta_hint", + &val, sizeof(val)); + if (ret < 0 && ret != -ENODATA) + return ret; + else if (ret == -ENODATA) + val = 0; + uval = val; + gossip_debug(GOSSIP_FILE_DEBUG, + "orangefs_ioctl: FS_IOC_GETFLAGS: %llu\n", + (unsigned long long)uval); + return put_user(uval, (int __user *)arg); + } else if (cmd == FS_IOC_SETFLAGS) { + ret = 0; + if (get_user(uval, (int __user *)arg)) + return -EFAULT; + /* + * ORANGEFS_MIRROR_FL is set internally when the mirroring mode + * is turned on for a file. The user is not allowed to turn + * on this bit, but the bit is present if the user first gets + * the flags and then updates the flags with some new + * settings. So, we ignore it in the following edit. bligon. + */ + if ((uval & ~ORANGEFS_MIRROR_FL) & + (~(FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NOATIME_FL))) { + gossip_err("orangefs_ioctl: the FS_IOC_SETFLAGS only supports setting one of FS_IMMUTABLE_FL|FS_APPEND_FL|FS_NOATIME_FL\n"); + return -EINVAL; + } + val = uval; + gossip_debug(GOSSIP_FILE_DEBUG, + "orangefs_ioctl: FS_IOC_SETFLAGS: %llu\n", + (unsigned long long)val); + ret = orangefs_inode_setxattr(file_inode(file), + ORANGEFS_XATTR_NAME_DEFAULT_PREFIX, + "user.pvfs2.meta_hint", + &val, sizeof(val), 0); + } + + return ret; +} + +/* + * Memory map a region of a file. + */ +static int orangefs_file_mmap(struct file *file, struct vm_area_struct *vma) +{ + gossip_debug(GOSSIP_FILE_DEBUG, + "orangefs_file_mmap: called on %s\n", + (file ? + (char *)file->f_path.dentry->d_name.name : + (char *)"Unknown")); + + /* set the sequential readahead hint */ + vma->vm_flags |= VM_SEQ_READ; + vma->vm_flags &= ~VM_RAND_READ; + + /* Use readonly mmap since we cannot support writable maps. */ + return generic_file_readonly_mmap(file, vma); +} + +#define mapping_nrpages(idata) ((idata)->nrpages) + +/* + * Called to notify the module that there are no more references to + * this file (i.e. no processes have it open). + * + * \note Not called when each file is closed. + */ +static int orangefs_file_release(struct inode *inode, struct file *file) +{ + gossip_debug(GOSSIP_FILE_DEBUG, + "orangefs_file_release: called on %s\n", + file->f_path.dentry->d_name.name); + + orangefs_flush_inode(inode); + + /* + * remove all associated inode pages from the page cache and mmap + * readahead cache (if any); this forces an expensive refresh of + * data for the next caller of mmap (or 'get_block' accesses) + */ + if (file->f_path.dentry->d_inode && + file->f_path.dentry->d_inode->i_mapping && + mapping_nrpages(&file->f_path.dentry->d_inode->i_data)) + truncate_inode_pages(file->f_path.dentry->d_inode->i_mapping, + 0); + return 0; +} + +/* + * Push all data for a specific file onto permanent storage. + */ +static int orangefs_fsync(struct file *file, + loff_t start, + loff_t end, + int datasync) +{ + int ret = -EINVAL; + struct orangefs_inode_s *orangefs_inode = + ORANGEFS_I(file->f_path.dentry->d_inode); + struct orangefs_kernel_op_s *new_op = NULL; + + /* required call */ + filemap_write_and_wait_range(file->f_mapping, start, end); + + new_op = op_alloc(ORANGEFS_VFS_OP_FSYNC); + if (!new_op) + return -ENOMEM; + new_op->upcall.req.fsync.refn = orangefs_inode->refn; + + ret = service_operation(new_op, + "orangefs_fsync", + get_interruptible_flag(file->f_path.dentry->d_inode)); + + gossip_debug(GOSSIP_FILE_DEBUG, + "orangefs_fsync got return value of %d\n", + ret); + + op_release(new_op); + + orangefs_flush_inode(file->f_path.dentry->d_inode); + return ret; +} + +/* + * Change the file pointer position for an instance of an open file. + * + * \note If .llseek is overriden, we must acquire lock as described in + * Documentation/filesystems/Locking. + * + * Future upgrade could support SEEK_DATA and SEEK_HOLE but would + * require much changes to the FS + */ +static loff_t orangefs_file_llseek(struct file *file, loff_t offset, int origin) +{ + int ret = -EINVAL; + struct inode *inode = file_inode(file); + + if (origin == SEEK_END) { + /* + * revalidate the inode's file size. + * NOTE: We are only interested in file size here, + * so we set mask accordingly. + */ + ret = orangefs_inode_getattr(file->f_mapping->host, 0, 1); + if (ret == -ESTALE) + ret = -EIO; + if (ret) { + gossip_debug(GOSSIP_FILE_DEBUG, + "%s:%s:%d calling make bad inode\n", + __FILE__, + __func__, + __LINE__); + return ret; + } + } + + gossip_debug(GOSSIP_FILE_DEBUG, + "orangefs_file_llseek: offset is %ld | origin is %d" + " | inode size is %lu\n", + (long)offset, + origin, + (unsigned long)i_size_read(inode)); + + return generic_file_llseek(file, offset, origin); +} + +/* + * Support local locks (locks that only this kernel knows about) + * if Orangefs was mounted -o local_lock. + */ +static int orangefs_lock(struct file *filp, int cmd, struct file_lock *fl) +{ + int rc = -EINVAL; + + if (ORANGEFS_SB(filp->f_inode->i_sb)->flags & ORANGEFS_OPT_LOCAL_LOCK) { + if (cmd == F_GETLK) { + rc = 0; + posix_test_lock(filp, fl); + } else { + rc = posix_lock_file(filp, fl, NULL); + } + } + + return rc; +} + +/** ORANGEFS implementation of VFS file operations */ +const struct file_operations orangefs_file_operations = { + .llseek = orangefs_file_llseek, + .read_iter = orangefs_file_read_iter, + .write_iter = orangefs_file_write_iter, + .lock = orangefs_lock, + .unlocked_ioctl = orangefs_ioctl, + .mmap = orangefs_file_mmap, + .open = generic_file_open, + .release = orangefs_file_release, + .fsync = orangefs_fsync, +}; diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c new file mode 100644 index 000000000000..2382e267b49e --- /dev/null +++ b/fs/orangefs/inode.c @@ -0,0 +1,475 @@ +/* + * (C) 2001 Clemson University and The University of Chicago + * + * See COPYING in top-level directory. + */ + +/* + * Linux VFS inode operations. + */ + +#include "protocol.h" +#include "orangefs-kernel.h" +#include "orangefs-bufmap.h" + +static int read_one_page(struct page *page) +{ + int ret; + int max_block; + ssize_t bytes_read = 0; + struct inode *inode = page->mapping->host; + const __u32 blocksize = PAGE_CACHE_SIZE; /* inode->i_blksize */ + const __u32 blockbits = PAGE_CACHE_SHIFT; /* inode->i_blkbits */ + struct iov_iter to; + struct bio_vec bv = {.bv_page = page, .bv_len = PAGE_SIZE}; + + iov_iter_bvec(&to, ITER_BVEC | READ, &bv, 1, PAGE_SIZE); + + gossip_debug(GOSSIP_INODE_DEBUG, + "orangefs_readpage called with page %p\n", + page); + + max_block = ((inode->i_size / blocksize) + 1); + + if (page->index < max_block) { + loff_t blockptr_offset = (((loff_t) page->index) << blockbits); + + bytes_read = orangefs_inode_read(inode, + &to, + &blockptr_offset, + inode->i_size); + } + /* this will only zero remaining unread portions of the page data */ + iov_iter_zero(~0U, &to); + /* takes care of potential aliasing */ + flush_dcache_page(page); + if (bytes_read < 0) { + ret = bytes_read; + SetPageError(page); + } else { + SetPageUptodate(page); + if (PageError(page)) + ClearPageError(page); + ret = 0; + } + /* unlock the page after the ->readpage() routine completes */ + unlock_page(page); + return ret; +} + +static int orangefs_readpage(struct file *file, struct page *page) +{ + return read_one_page(page); +} + +static int orangefs_readpages(struct file *file, + struct address_space *mapping, + struct list_head *pages, + unsigned nr_pages) +{ + int page_idx; + int ret; + + gossip_debug(GOSSIP_INODE_DEBUG, "orangefs_readpages called\n"); + + for (page_idx = 0; page_idx < nr_pages; page_idx++) { + struct page *page; + + page = list_entry(pages->prev, struct page, lru); + list_del(&page->lru); + if (!add_to_page_cache(page, + mapping, + page->index, + GFP_KERNEL)) { + ret = read_one_page(page); + gossip_debug(GOSSIP_INODE_DEBUG, + "failure adding page to cache, read_one_page returned: %d\n", + ret); + } else { + page_cache_release(page); + } + } + BUG_ON(!list_empty(pages)); + return 0; +} + +static void orangefs_invalidatepage(struct page *page, + unsigned int offset, + unsigned int length) +{ + gossip_debug(GOSSIP_INODE_DEBUG, + "orangefs_invalidatepage called on page %p " + "(offset is %u)\n", + page, + offset); + + ClearPageUptodate(page); + ClearPageMappedToDisk(page); + return; + +} + +static int orangefs_releasepage(struct page *page, gfp_t foo) +{ + gossip_debug(GOSSIP_INODE_DEBUG, + "orangefs_releasepage called on page %p\n", + page); + return 0; +} + +/* + * Having a direct_IO entry point in the address_space_operations + * struct causes the kernel to allows us to use O_DIRECT on + * open. Nothing will ever call this thing, but in the future we + * will need to be able to use O_DIRECT on open in order to support + * AIO. Modeled after NFS, they do this too. + */ +/* + * static ssize_t orangefs_direct_IO(int rw, + * struct kiocb *iocb, + * struct iov_iter *iter, + * loff_t offset) + *{ + * gossip_debug(GOSSIP_INODE_DEBUG, + * "orangefs_direct_IO: %s\n", + * iocb->ki_filp->f_path.dentry->d_name.name); + * + * return -EINVAL; + *} + */ + +struct backing_dev_info orangefs_backing_dev_info = { + .name = "orangefs", + .ra_pages = 0, + .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK, +}; + +/** ORANGEFS2 implementation of address space operations */ +const struct address_space_operations orangefs_address_operations = { + .readpage = orangefs_readpage, + .readpages = orangefs_readpages, + .invalidatepage = orangefs_invalidatepage, + .releasepage = orangefs_releasepage, +/* .direct_IO = orangefs_direct_IO */ +}; + +static int orangefs_setattr_size(struct inode *inode, struct iattr *iattr) +{ + struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); + struct orangefs_kernel_op_s *new_op; + loff_t orig_size; + int ret = -EINVAL; + + gossip_debug(GOSSIP_INODE_DEBUG, + "%s: %pU: Handle is %pU | fs_id %d | size is %llu\n", + __func__, + get_khandle_from_ino(inode), + &orangefs_inode->refn.khandle, + orangefs_inode->refn.fs_id, + iattr->ia_size); + + /* Ensure that we have a up to date size, so we know if it changed. */ + ret = orangefs_inode_getattr(inode, 0, 1); + if (ret == -ESTALE) + ret = -EIO; + if (ret) { + gossip_err("%s: orangefs_inode_getattr failed, ret:%d:.\n", + __func__, ret); + return ret; + } + orig_size = i_size_read(inode); + + truncate_setsize(inode, iattr->ia_size); + + new_op = op_alloc(ORANGEFS_VFS_OP_TRUNCATE); + if (!new_op) + return -ENOMEM; + + new_op->upcall.req.truncate.refn = orangefs_inode->refn; + new_op->upcall.req.truncate.size = (__s64) iattr->ia_size; + + ret = service_operation(new_op, __func__, + get_interruptible_flag(inode)); + + /* + * the truncate has no downcall members to retrieve, but + * the status value tells us if it went through ok or not + */ + gossip_debug(GOSSIP_INODE_DEBUG, + "orangefs: orangefs_truncate got return value of %d\n", + ret); + + op_release(new_op); + + if (ret != 0) + return ret; + + /* + * Only change the c/mtime if we are changing the size or we are + * explicitly asked to change it. This handles the semantic difference + * between truncate() and ftruncate() as implemented in the VFS. + * + * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a + * special case where we need to update the times despite not having + * these flags set. For all other operations the VFS set these flags + * explicitly if it wants a timestamp update. + */ + if (orig_size != i_size_read(inode) && + !(iattr->ia_valid & (ATTR_CTIME | ATTR_MTIME))) { + iattr->ia_ctime = iattr->ia_mtime = + current_fs_time(inode->i_sb); + iattr->ia_valid |= ATTR_CTIME | ATTR_MTIME; + } + + return ret; +} + +/* + * Change attributes of an object referenced by dentry. + */ +int orangefs_setattr(struct dentry *dentry, struct iattr *iattr) +{ + int ret = -EINVAL; + struct inode *inode = dentry->d_inode; + + gossip_debug(GOSSIP_INODE_DEBUG, + "orangefs_setattr: called on %s\n", + dentry->d_name.name); + + ret = inode_change_ok(inode, iattr); + if (ret) + goto out; + + if ((iattr->ia_valid & ATTR_SIZE) && + iattr->ia_size != i_size_read(inode)) { + ret = orangefs_setattr_size(inode, iattr); + if (ret) + goto out; + } + + setattr_copy(inode, iattr); + mark_inode_dirty(inode); + + ret = orangefs_inode_setattr(inode, iattr); + gossip_debug(GOSSIP_INODE_DEBUG, + "orangefs_setattr: inode_setattr returned %d\n", + ret); + + if (!ret && (iattr->ia_valid & ATTR_MODE)) + /* change mod on a file that has ACLs */ + ret = posix_acl_chmod(inode, inode->i_mode); + +out: + gossip_debug(GOSSIP_INODE_DEBUG, "orangefs_setattr: returning %d\n", ret); + return ret; +} + +/* + * Obtain attributes of an object given a dentry + */ +int orangefs_getattr(struct vfsmount *mnt, + struct dentry *dentry, + struct kstat *kstat) +{ + int ret = -ENOENT; + struct inode *inode = dentry->d_inode; + struct orangefs_inode_s *orangefs_inode = NULL; + + gossip_debug(GOSSIP_INODE_DEBUG, + "orangefs_getattr: called on %s\n", + dentry->d_name.name); + + ret = orangefs_inode_getattr(inode, 0, 1); + if (ret == 0) { + generic_fillattr(inode, kstat); + + /* override block size reported to stat */ + orangefs_inode = ORANGEFS_I(inode); + kstat->blksize = orangefs_inode->blksize; + } + return ret; +} + +int orangefs_permission(struct inode *inode, int mask) +{ + int ret; + + if (mask & MAY_NOT_BLOCK) + return -ECHILD; + + gossip_debug(GOSSIP_INODE_DEBUG, "%s: refreshing\n", __func__); + + /* Make sure the permission (and other common attrs) are up to date. */ + ret = orangefs_inode_getattr(inode, 0, 0); + if (ret < 0) + return ret; + + return generic_permission(inode, mask); +} + +/* ORANGEDS2 implementation of VFS inode operations for files */ +struct inode_operations orangefs_file_inode_operations = { + .get_acl = orangefs_get_acl, + .set_acl = orangefs_set_acl, + .setattr = orangefs_setattr, + .getattr = orangefs_getattr, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, + .listxattr = orangefs_listxattr, + .removexattr = generic_removexattr, + .permission = orangefs_permission, +}; + +static int orangefs_init_iops(struct inode *inode) +{ + inode->i_mapping->a_ops = &orangefs_address_operations; + + switch (inode->i_mode & S_IFMT) { + case S_IFREG: + inode->i_op = &orangefs_file_inode_operations; + inode->i_fop = &orangefs_file_operations; + inode->i_blkbits = PAGE_CACHE_SHIFT; + break; + case S_IFLNK: + inode->i_op = &orangefs_symlink_inode_operations; + break; + case S_IFDIR: + inode->i_op = &orangefs_dir_inode_operations; + inode->i_fop = &orangefs_dir_operations; + break; + default: + gossip_debug(GOSSIP_INODE_DEBUG, + "%s: unsupported mode\n", + __func__); + return -EINVAL; + } + + return 0; +} + +/* + * Given a ORANGEFS object identifier (fsid, handle), convert it into a ino_t type + * that will be used as a hash-index from where the handle will + * be searched for in the VFS hash table of inodes. + */ +static inline ino_t orangefs_handle_hash(struct orangefs_object_kref *ref) +{ + if (!ref) + return 0; + return orangefs_khandle_to_ino(&(ref->khandle)); +} + +/* + * Called to set up an inode from iget5_locked. + */ +static int orangefs_set_inode(struct inode *inode, void *data) +{ + struct orangefs_object_kref *ref = (struct orangefs_object_kref *) data; + ORANGEFS_I(inode)->refn.fs_id = ref->fs_id; + ORANGEFS_I(inode)->refn.khandle = ref->khandle; + return 0; +} + +/* + * Called to determine if handles match. + */ +static int orangefs_test_inode(struct inode *inode, void *data) +{ + struct orangefs_object_kref *ref = (struct orangefs_object_kref *) data; + struct orangefs_inode_s *orangefs_inode = NULL; + + orangefs_inode = ORANGEFS_I(inode); + return (!ORANGEFS_khandle_cmp(&(orangefs_inode->refn.khandle), &(ref->khandle)) + && orangefs_inode->refn.fs_id == ref->fs_id); +} + +/* + * Front-end to lookup the inode-cache maintained by the VFS using the ORANGEFS + * file handle. + * + * @sb: the file system super block instance. + * @ref: The ORANGEFS object for which we are trying to locate an inode structure. + */ +struct inode *orangefs_iget(struct super_block *sb, struct orangefs_object_kref *ref) +{ + struct inode *inode = NULL; + unsigned long hash; + int error; + + hash = orangefs_handle_hash(ref); + inode = iget5_locked(sb, hash, orangefs_test_inode, orangefs_set_inode, ref); + if (!inode || !(inode->i_state & I_NEW)) + return inode; + + error = orangefs_inode_getattr(inode, 1, 0); + if (error) { + iget_failed(inode); + return ERR_PTR(error); + } + + inode->i_ino = hash; /* needed for stat etc */ + orangefs_init_iops(inode); + unlock_new_inode(inode); + + gossip_debug(GOSSIP_INODE_DEBUG, + "iget handle %pU, fsid %d hash %ld i_ino %lu\n", + &ref->khandle, + ref->fs_id, + hash, + inode->i_ino); + + return inode; +} + +/* + * Allocate an inode for a newly created file and insert it into the inode hash. + */ +struct inode *orangefs_new_inode(struct super_block *sb, struct inode *dir, + int mode, dev_t dev, struct orangefs_object_kref *ref) +{ + unsigned long hash = orangefs_handle_hash(ref); + struct inode *inode; + int error; + + gossip_debug(GOSSIP_INODE_DEBUG, + "%s:(sb is %p | MAJOR(dev)=%u | MINOR(dev)=%u mode=%o)\n", + __func__, + sb, + MAJOR(dev), + MINOR(dev), + mode); + + inode = new_inode(sb); + if (!inode) + return NULL; + + orangefs_set_inode(inode, ref); + inode->i_ino = hash; /* needed for stat etc */ + + error = orangefs_inode_getattr(inode, 1, 0); + if (error) + goto out_iput; + + orangefs_init_iops(inode); + + inode->i_mode = mode; + inode->i_uid = current_fsuid(); + inode->i_gid = current_fsgid(); + inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; + inode->i_size = PAGE_CACHE_SIZE; + inode->i_rdev = dev; + + error = insert_inode_locked4(inode, hash, orangefs_test_inode, ref); + if (error < 0) + goto out_iput; + + gossip_debug(GOSSIP_INODE_DEBUG, + "Initializing ACL's for inode %pU\n", + get_khandle_from_ino(inode)); + orangefs_init_acl(inode, dir); + return inode; + +out_iput: + iput(inode); + return ERR_PTR(error); +} diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c new file mode 100644 index 000000000000..5a60c508af4e --- /dev/null +++ b/fs/orangefs/namei.c @@ -0,0 +1,462 @@ +/* + * (C) 2001 Clemson University and The University of Chicago + * + * See COPYING in top-level directory. + */ + +/* + * Linux VFS namei operations. + */ + +#include "protocol.h" +#include "orangefs-kernel.h" + +/* + * Get a newly allocated inode to go with a negative dentry. + */ +static int orangefs_create(struct inode *dir, + struct dentry *dentry, + umode_t mode, + bool exclusive) +{ + struct orangefs_inode_s *parent = ORANGEFS_I(dir); + struct orangefs_kernel_op_s *new_op; + struct inode *inode; + int ret; + + gossip_debug(GOSSIP_NAME_DEBUG, "%s: %s\n", + __func__, + dentry->d_name.name); + + new_op = op_alloc(ORANGEFS_VFS_OP_CREATE); + if (!new_op) + return -ENOMEM; + + new_op->upcall.req.create.parent_refn = parent->refn; + + fill_default_sys_attrs(new_op->upcall.req.create.attributes, + ORANGEFS_TYPE_METAFILE, mode); + + strncpy(new_op->upcall.req.create.d_name, + dentry->d_name.name, ORANGEFS_NAME_MAX); + + ret = service_operation(new_op, __func__, get_interruptible_flag(dir)); + + gossip_debug(GOSSIP_NAME_DEBUG, + "%s: %s: handle:%pU: fsid:%d: new_op:%p: ret:%d:\n", + __func__, + dentry->d_name.name, + &new_op->downcall.resp.create.refn.khandle, + new_op->downcall.resp.create.refn.fs_id, + new_op, + ret); + + if (ret < 0) + goto out; + + inode = orangefs_new_inode(dir->i_sb, dir, S_IFREG | mode, 0, + &new_op->downcall.resp.create.refn); + if (IS_ERR(inode)) { + gossip_err("%s: Failed to allocate inode for file :%s:\n", + __func__, + dentry->d_name.name); + ret = PTR_ERR(inode); + goto out; + } + + gossip_debug(GOSSIP_NAME_DEBUG, + "%s: Assigned inode :%pU: for file :%s:\n", + __func__, + get_khandle_from_ino(inode), + dentry->d_name.name); + + d_instantiate(dentry, inode); + unlock_new_inode(inode); + + gossip_debug(GOSSIP_NAME_DEBUG, + "%s: dentry instantiated for %s\n", + __func__, + dentry->d_name.name); + + SetMtimeFlag(parent); + dir->i_mtime = dir->i_ctime = current_fs_time(dir->i_sb); + mark_inode_dirty_sync(dir); + ret = 0; +out: + op_release(new_op); + gossip_debug(GOSSIP_NAME_DEBUG, + "%s: %s: returning %d\n", + __func__, + dentry->d_name.name, + ret); + return ret; +} + +/* + * Attempt to resolve an object name (dentry->d_name), parent handle, and + * fsid into a handle for the object. + */ +static struct dentry *orangefs_lookup(struct inode *dir, struct dentry *dentry, + unsigned int flags) +{ + struct orangefs_inode_s *parent = ORANGEFS_I(dir); + struct orangefs_kernel_op_s *new_op; + struct inode *inode; + struct dentry *res; + int ret = -EINVAL; + + /* + * in theory we could skip a lookup here (if the intent is to + * create) in order to avoid a potentially failed lookup, but + * leaving it in can skip a valid lookup and try to create a file + * that already exists (e.g. the vfs already handles checking for + * -EEXIST on O_EXCL opens, which is broken if we skip this lookup + * in the create path) + */ + gossip_debug(GOSSIP_NAME_DEBUG, "%s called on %s\n", + __func__, dentry->d_name.name); + + if (dentry->d_name.len > (ORANGEFS_NAME_MAX - 1)) + return ERR_PTR(-ENAMETOOLONG); + + new_op = op_alloc(ORANGEFS_VFS_OP_LOOKUP); + if (!new_op) + return ERR_PTR(-ENOMEM); + + new_op->upcall.req.lookup.sym_follow = ORANGEFS_LOOKUP_LINK_NO_FOLLOW; + + gossip_debug(GOSSIP_NAME_DEBUG, "%s:%s:%d using parent %pU\n", + __FILE__, + __func__, + __LINE__, + &parent->refn.khandle); + new_op->upcall.req.lookup.parent_refn = parent->refn; + + strncpy(new_op->upcall.req.lookup.d_name, dentry->d_name.name, + ORANGEFS_NAME_MAX); + + gossip_debug(GOSSIP_NAME_DEBUG, + "%s: doing lookup on %s under %pU,%d\n", + __func__, + new_op->upcall.req.lookup.d_name, + &new_op->upcall.req.lookup.parent_refn.khandle, + new_op->upcall.req.lookup.parent_refn.fs_id); + + ret = service_operation(new_op, __func__, get_interruptible_flag(dir)); + + gossip_debug(GOSSIP_NAME_DEBUG, + "Lookup Got %pU, fsid %d (ret=%d)\n", + &new_op->downcall.resp.lookup.refn.khandle, + new_op->downcall.resp.lookup.refn.fs_id, + ret); + + if (ret < 0) { + if (ret == -ENOENT) { + /* + * if no inode was found, add a negative dentry to + * dcache anyway; if we don't, we don't hold expected + * lookup semantics and we most noticeably break + * during directory renames. + * + * however, if the operation failed or exited, do not + * add the dentry (e.g. in the case that a touch is + * issued on a file that already exists that was + * interrupted during this lookup -- no need to add + * another negative dentry for an existing file) + */ + + gossip_debug(GOSSIP_NAME_DEBUG, + "orangefs_lookup: Adding *negative* dentry " + "%p for %s\n", + dentry, + dentry->d_name.name); + + d_add(dentry, NULL); + res = NULL; + goto out; + } + + /* must be a non-recoverable error */ + res = ERR_PTR(ret); + goto out; + } + + inode = orangefs_iget(dir->i_sb, &new_op->downcall.resp.lookup.refn); + if (IS_ERR(inode)) { + gossip_debug(GOSSIP_NAME_DEBUG, + "error %ld from iget\n", PTR_ERR(inode)); + res = ERR_CAST(inode); + goto out; + } + + gossip_debug(GOSSIP_NAME_DEBUG, + "%s:%s:%d " + "Found good inode [%lu] with count [%d]\n", + __FILE__, + __func__, + __LINE__, + inode->i_ino, + (int)atomic_read(&inode->i_count)); + + /* update dentry/inode pair into dcache */ + res = d_splice_alias(inode, dentry); + + gossip_debug(GOSSIP_NAME_DEBUG, + "Lookup success (inode ct = %d)\n", + (int)atomic_read(&inode->i_count)); +out: + op_release(new_op); + return res; +} + +/* return 0 on success; non-zero otherwise */ +static int orangefs_unlink(struct inode *dir, struct dentry *dentry) +{ + struct inode *inode = dentry->d_inode; + struct orangefs_inode_s *parent = ORANGEFS_I(dir); + struct orangefs_kernel_op_s *new_op; + int ret; + + gossip_debug(GOSSIP_NAME_DEBUG, + "%s: called on %s\n" + " (inode %pU): Parent is %pU | fs_id %d\n", + __func__, + dentry->d_name.name, + get_khandle_from_ino(inode), + &parent->refn.khandle, + parent->refn.fs_id); + + new_op = op_alloc(ORANGEFS_VFS_OP_REMOVE); + if (!new_op) + return -ENOMEM; + + new_op->upcall.req.remove.parent_refn = parent->refn; + strncpy(new_op->upcall.req.remove.d_name, dentry->d_name.name, + ORANGEFS_NAME_MAX); + + ret = service_operation(new_op, "orangefs_unlink", + get_interruptible_flag(inode)); + + gossip_debug(GOSSIP_NAME_DEBUG, + "%s: service_operation returned:%d:\n", + __func__, + ret); + + op_release(new_op); + + if (!ret) { + drop_nlink(inode); + + SetMtimeFlag(parent); + dir->i_mtime = dir->i_ctime = current_fs_time(dir->i_sb); + mark_inode_dirty_sync(dir); + } + return ret; +} + +static int orangefs_symlink(struct inode *dir, + struct dentry *dentry, + const char *symname) +{ + struct orangefs_inode_s *parent = ORANGEFS_I(dir); + struct orangefs_kernel_op_s *new_op; + struct inode *inode; + int mode = 755; + int ret; + + gossip_debug(GOSSIP_NAME_DEBUG, "%s: called\n", __func__); + + if (!symname) + return -EINVAL; + + if (strlen(symname)+1 > ORANGEFS_NAME_MAX) + return -ENAMETOOLONG; + + new_op = op_alloc(ORANGEFS_VFS_OP_SYMLINK); + if (!new_op) + return -ENOMEM; + + new_op->upcall.req.sym.parent_refn = parent->refn; + + fill_default_sys_attrs(new_op->upcall.req.sym.attributes, + ORANGEFS_TYPE_SYMLINK, + mode); + + strncpy(new_op->upcall.req.sym.entry_name, + dentry->d_name.name, + ORANGEFS_NAME_MAX); + strncpy(new_op->upcall.req.sym.target, symname, ORANGEFS_NAME_MAX); + + ret = service_operation(new_op, __func__, get_interruptible_flag(dir)); + + gossip_debug(GOSSIP_NAME_DEBUG, + "Symlink Got ORANGEFS handle %pU on fsid %d (ret=%d)\n", + &new_op->downcall.resp.sym.refn.khandle, + new_op->downcall.resp.sym.refn.fs_id, ret); + + if (ret < 0) { + gossip_debug(GOSSIP_NAME_DEBUG, + "%s: failed with error code %d\n", + __func__, ret); + goto out; + } + + inode = orangefs_new_inode(dir->i_sb, dir, S_IFLNK | mode, 0, + &new_op->downcall.resp.sym.refn); + if (IS_ERR(inode)) { + gossip_err + ("*** Failed to allocate orangefs symlink inode\n"); + ret = PTR_ERR(inode); + goto out; + } + + gossip_debug(GOSSIP_NAME_DEBUG, + "Assigned symlink inode new number of %pU\n", + get_khandle_from_ino(inode)); + + d_instantiate(dentry, inode); + unlock_new_inode(inode); + + gossip_debug(GOSSIP_NAME_DEBUG, + "Inode (Symlink) %pU -> %s\n", + get_khandle_from_ino(inode), + dentry->d_name.name); + + SetMtimeFlag(parent); + dir->i_mtime = dir->i_ctime = current_fs_time(dir->i_sb); + mark_inode_dirty_sync(dir); + ret = 0; +out: + op_release(new_op); + return ret; +} + +static int orangefs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) +{ + struct orangefs_inode_s *parent = ORANGEFS_I(dir); + struct orangefs_kernel_op_s *new_op; + struct inode *inode; + int ret; + + new_op = op_alloc(ORANGEFS_VFS_OP_MKDIR); + if (!new_op) + return -ENOMEM; + + new_op->upcall.req.mkdir.parent_refn = parent->refn; + + fill_default_sys_attrs(new_op->upcall.req.mkdir.attributes, + ORANGEFS_TYPE_DIRECTORY, mode); + + strncpy(new_op->upcall.req.mkdir.d_name, + dentry->d_name.name, ORANGEFS_NAME_MAX); + + ret = service_operation(new_op, __func__, get_interruptible_flag(dir)); + + gossip_debug(GOSSIP_NAME_DEBUG, + "Mkdir Got ORANGEFS handle %pU on fsid %d\n", + &new_op->downcall.resp.mkdir.refn.khandle, + new_op->downcall.resp.mkdir.refn.fs_id); + + if (ret < 0) { + gossip_debug(GOSSIP_NAME_DEBUG, + "%s: failed with error code %d\n", + __func__, ret); + goto out; + } + + inode = orangefs_new_inode(dir->i_sb, dir, S_IFDIR | mode, 0, + &new_op->downcall.resp.mkdir.refn); + if (IS_ERR(inode)) { + gossip_err("*** Failed to allocate orangefs dir inode\n"); + ret = PTR_ERR(inode); + goto out; + } + + gossip_debug(GOSSIP_NAME_DEBUG, + "Assigned dir inode new number of %pU\n", + get_khandle_from_ino(inode)); + + d_instantiate(dentry, inode); + unlock_new_inode(inode); + + gossip_debug(GOSSIP_NAME_DEBUG, + "Inode (Directory) %pU -> %s\n", + get_khandle_from_ino(inode), + dentry->d_name.name); + + /* + * NOTE: we have no good way to keep nlink consistent for directories + * across clients; keep constant at 1. + */ + SetMtimeFlag(parent); + dir->i_mtime = dir->i_ctime = current_fs_time(dir->i_sb); + mark_inode_dirty_sync(dir); +out: + op_release(new_op); + return ret; +} + +static int orangefs_rename(struct inode *old_dir, + struct dentry *old_dentry, + struct inode *new_dir, + struct dentry *new_dentry) +{ + struct orangefs_kernel_op_s *new_op; + int ret; + + gossip_debug(GOSSIP_NAME_DEBUG, + "orangefs_rename: called (%s/%s => %s/%s) ct=%d\n", + old_dentry->d_parent->d_name.name, + old_dentry->d_name.name, + new_dentry->d_parent->d_name.name, + new_dentry->d_name.name, + d_count(new_dentry)); + + new_op = op_alloc(ORANGEFS_VFS_OP_RENAME); + if (!new_op) + return -EINVAL; + + new_op->upcall.req.rename.old_parent_refn = ORANGEFS_I(old_dir)->refn; + new_op->upcall.req.rename.new_parent_refn = ORANGEFS_I(new_dir)->refn; + + strncpy(new_op->upcall.req.rename.d_old_name, + old_dentry->d_name.name, + ORANGEFS_NAME_MAX); + strncpy(new_op->upcall.req.rename.d_new_name, + new_dentry->d_name.name, + ORANGEFS_NAME_MAX); + + ret = service_operation(new_op, + "orangefs_rename", + get_interruptible_flag(old_dentry->d_inode)); + + gossip_debug(GOSSIP_NAME_DEBUG, + "orangefs_rename: got downcall status %d\n", + ret); + + if (new_dentry->d_inode) + new_dentry->d_inode->i_ctime = CURRENT_TIME; + + op_release(new_op); + return ret; +} + +/* ORANGEFS implementation of VFS inode operations for directories */ +struct inode_operations orangefs_dir_inode_operations = { + .lookup = orangefs_lookup, + .get_acl = orangefs_get_acl, + .set_acl = orangefs_set_acl, + .create = orangefs_create, + .unlink = orangefs_unlink, + .symlink = orangefs_symlink, + .mkdir = orangefs_mkdir, + .rmdir = orangefs_unlink, + .rename = orangefs_rename, + .setattr = orangefs_setattr, + .getattr = orangefs_getattr, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, + .removexattr = generic_removexattr, + .listxattr = orangefs_listxattr, + .permission = orangefs_permission, +}; diff --git a/fs/orangefs/orangefs-bufmap.c b/fs/orangefs/orangefs-bufmap.c new file mode 100644 index 000000000000..1f8acc9f9a88 --- /dev/null +++ b/fs/orangefs/orangefs-bufmap.c @@ -0,0 +1,556 @@ +/* + * (C) 2001 Clemson University and The University of Chicago + * + * See COPYING in top-level directory. + */ +#include "protocol.h" +#include "orangefs-kernel.h" +#include "orangefs-bufmap.h" + +struct slot_map { + int c; + wait_queue_head_t q; + int count; + unsigned long *map; +}; + +static struct slot_map rw_map = { + .c = -1, + .q = __WAIT_QUEUE_HEAD_INITIALIZER(rw_map.q) +}; +static struct slot_map readdir_map = { + .c = -1, + .q = __WAIT_QUEUE_HEAD_INITIALIZER(readdir_map.q) +}; + + +static void install(struct slot_map *m, int count, unsigned long *map) +{ + spin_lock(&m->q.lock); + m->c = m->count = count; + m->map = map; + wake_up_all_locked(&m->q); + spin_unlock(&m->q.lock); +} + +static void mark_killed(struct slot_map *m) +{ + spin_lock(&m->q.lock); + m->c -= m->count + 1; + spin_unlock(&m->q.lock); +} + +static void run_down(struct slot_map *m) +{ + DEFINE_WAIT(wait); + spin_lock(&m->q.lock); + if (m->c != -1) { + for (;;) { + if (likely(list_empty(&wait.task_list))) + __add_wait_queue_tail(&m->q, &wait); + set_current_state(TASK_UNINTERRUPTIBLE); + + if (m->c == -1) + break; + + spin_unlock(&m->q.lock); + schedule(); + spin_lock(&m->q.lock); + } + __remove_wait_queue(&m->q, &wait); + __set_current_state(TASK_RUNNING); + } + m->map = NULL; + spin_unlock(&m->q.lock); +} + +static void put(struct slot_map *m, int slot) +{ + int v; + spin_lock(&m->q.lock); + __clear_bit(slot, m->map); + v = ++m->c; + if (unlikely(v == 1)) /* no free slots -> one free slot */ + wake_up_locked(&m->q); + else if (unlikely(v == -1)) /* finished dying */ + wake_up_all_locked(&m->q); + spin_unlock(&m->q.lock); +} + +static int wait_for_free(struct slot_map *m) +{ + long left = slot_timeout_secs * HZ; + DEFINE_WAIT(wait); + + do { + long n = left, t; + if (likely(list_empty(&wait.task_list))) + __add_wait_queue_tail_exclusive(&m->q, &wait); + set_current_state(TASK_INTERRUPTIBLE); + + if (m->c > 0) + break; + + if (m->c < 0) { + /* we are waiting for map to be installed */ + /* it would better be there soon, or we go away */ + if (n > ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS * HZ) + n = ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS * HZ; + } + spin_unlock(&m->q.lock); + t = schedule_timeout(n); + spin_lock(&m->q.lock); + if (unlikely(!t) && n != left && m->c < 0) + left = t; + else + left = t + (left - n); + if (unlikely(signal_pending(current))) + left = -EINTR; + } while (left > 0); + + if (!list_empty(&wait.task_list)) + list_del(&wait.task_list); + else if (left <= 0 && waitqueue_active(&m->q)) + __wake_up_locked_key(&m->q, TASK_INTERRUPTIBLE, NULL); + __set_current_state(TASK_RUNNING); + + if (likely(left > 0)) + return 0; + + return left < 0 ? -EINTR : -ETIMEDOUT; +} + +static int get(struct slot_map *m) +{ + int res = 0; + spin_lock(&m->q.lock); + if (unlikely(m->c <= 0)) + res = wait_for_free(m); + if (likely(!res)) { + m->c--; + res = find_first_zero_bit(m->map, m->count); + __set_bit(res, m->map); + } + spin_unlock(&m->q.lock); + return res; +} + +/* used to describe mapped buffers */ +struct orangefs_bufmap_desc { + void *uaddr; /* user space address pointer */ + struct page **page_array; /* array of mapped pages */ + int array_count; /* size of above arrays */ + struct list_head list_link; +}; + +static struct orangefs_bufmap { + int desc_size; + int desc_shift; + int desc_count; + int total_size; + int page_count; + + struct page **page_array; + struct orangefs_bufmap_desc *desc_array; + + /* array to track usage of buffer descriptors */ + unsigned long *buffer_index_array; + + /* array to track usage of buffer descriptors for readdir */ +#define N DIV_ROUND_UP(ORANGEFS_READDIR_DEFAULT_DESC_COUNT, BITS_PER_LONG) + unsigned long readdir_index_array[N]; +#undef N +} *__orangefs_bufmap; + +static DEFINE_SPINLOCK(orangefs_bufmap_lock); + +static void +orangefs_bufmap_unmap(struct orangefs_bufmap *bufmap) +{ + int i; + + for (i = 0; i < bufmap->page_count; i++) + page_cache_release(bufmap->page_array[i]); +} + +static void +orangefs_bufmap_free(struct orangefs_bufmap *bufmap) +{ + kfree(bufmap->page_array); + kfree(bufmap->desc_array); + kfree(bufmap->buffer_index_array); + kfree(bufmap); +} + +/* + * XXX: Can the size and shift change while the caller gives up the + * XXX: lock between calling this and doing something useful? + */ + +int orangefs_bufmap_size_query(void) +{ + struct orangefs_bufmap *bufmap; + int size = 0; + spin_lock(&orangefs_bufmap_lock); + bufmap = __orangefs_bufmap; + if (bufmap) + size = bufmap->desc_size; + spin_unlock(&orangefs_bufmap_lock); + return size; +} + +int orangefs_bufmap_shift_query(void) +{ + struct orangefs_bufmap *bufmap; + int shift = 0; + spin_lock(&orangefs_bufmap_lock); + bufmap = __orangefs_bufmap; + if (bufmap) + shift = bufmap->desc_shift; + spin_unlock(&orangefs_bufmap_lock); + return shift; +} + +static DECLARE_WAIT_QUEUE_HEAD(bufmap_waitq); +static DECLARE_WAIT_QUEUE_HEAD(readdir_waitq); + +/* + * orangefs_get_bufmap_init + * + * If bufmap_init is 1, then the shared memory system, including the + * buffer_index_array, is available. Otherwise, it is not. + * + * returns the value of bufmap_init + */ +int orangefs_get_bufmap_init(void) +{ + return __orangefs_bufmap ? 1 : 0; +} + + +static struct orangefs_bufmap * +orangefs_bufmap_alloc(struct ORANGEFS_dev_map_desc *user_desc) +{ + struct orangefs_bufmap *bufmap; + + bufmap = kzalloc(sizeof(*bufmap), GFP_KERNEL); + if (!bufmap) + goto out; + + bufmap->total_size = user_desc->total_size; + bufmap->desc_count = user_desc->count; + bufmap->desc_size = user_desc->size; + bufmap->desc_shift = ilog2(bufmap->desc_size); + + bufmap->buffer_index_array = + kzalloc(DIV_ROUND_UP(bufmap->desc_count, BITS_PER_LONG), GFP_KERNEL); + if (!bufmap->buffer_index_array) { + gossip_err("orangefs: could not allocate %d buffer indices\n", + bufmap->desc_count); + goto out_free_bufmap; + } + + bufmap->desc_array = + kcalloc(bufmap->desc_count, sizeof(struct orangefs_bufmap_desc), + GFP_KERNEL); + if (!bufmap->desc_array) { + gossip_err("orangefs: could not allocate %d descriptors\n", + bufmap->desc_count); + goto out_free_index_array; + } + + bufmap->page_count = bufmap->total_size / PAGE_SIZE; + + /* allocate storage to track our page mappings */ + bufmap->page_array = + kcalloc(bufmap->page_count, sizeof(struct page *), GFP_KERNEL); + if (!bufmap->page_array) + goto out_free_desc_array; + + return bufmap; + +out_free_desc_array: + kfree(bufmap->desc_array); +out_free_index_array: + kfree(bufmap->buffer_index_array); +out_free_bufmap: + kfree(bufmap); +out: + return NULL; +} + +static int +orangefs_bufmap_map(struct orangefs_bufmap *bufmap, + struct ORANGEFS_dev_map_desc *user_desc) +{ + int pages_per_desc = bufmap->desc_size / PAGE_SIZE; + int offset = 0, ret, i; + + /* map the pages */ + ret = get_user_pages_fast((unsigned long)user_desc->ptr, + bufmap->page_count, 1, bufmap->page_array); + + if (ret < 0) + return ret; + + if (ret != bufmap->page_count) { + gossip_err("orangefs error: asked for %d pages, only got %d.\n", + bufmap->page_count, ret); + + for (i = 0; i < ret; i++) { + SetPageError(bufmap->page_array[i]); + page_cache_release(bufmap->page_array[i]); + } + return -ENOMEM; + } + + /* + * ideally we want to get kernel space pointers for each page, but + * we can't kmap that many pages at once if highmem is being used. + * so instead, we just kmap/kunmap the page address each time the + * kaddr is needed. + */ + for (i = 0; i < bufmap->page_count; i++) + flush_dcache_page(bufmap->page_array[i]); + + /* build a list of available descriptors */ + for (offset = 0, i = 0; i < bufmap->desc_count; i++) { + bufmap->desc_array[i].page_array = &bufmap->page_array[offset]; + bufmap->desc_array[i].array_count = pages_per_desc; + bufmap->desc_array[i].uaddr = + (user_desc->ptr + (i * pages_per_desc * PAGE_SIZE)); + offset += pages_per_desc; + } + + return 0; +} + +/* + * orangefs_bufmap_initialize() + * + * initializes the mapped buffer interface + * + * returns 0 on success, -errno on failure + */ +int orangefs_bufmap_initialize(struct ORANGEFS_dev_map_desc *user_desc) +{ + struct orangefs_bufmap *bufmap; + int ret = -EINVAL; + + gossip_debug(GOSSIP_BUFMAP_DEBUG, + "orangefs_bufmap_initialize: called (ptr (" + "%p) sz (%d) cnt(%d).\n", + user_desc->ptr, + user_desc->size, + user_desc->count); + + /* + * sanity check alignment and size of buffer that caller wants to + * work with + */ + if (PAGE_ALIGN((unsigned long)user_desc->ptr) != + (unsigned long)user_desc->ptr) { + gossip_err("orangefs error: memory alignment (front). %p\n", + user_desc->ptr); + goto out; + } + + if (PAGE_ALIGN(((unsigned long)user_desc->ptr + user_desc->total_size)) + != (unsigned long)(user_desc->ptr + user_desc->total_size)) { + gossip_err("orangefs error: memory alignment (back).(%p + %d)\n", + user_desc->ptr, + user_desc->total_size); + goto out; + } + + if (user_desc->total_size != (user_desc->size * user_desc->count)) { + gossip_err("orangefs error: user provided an oddly sized buffer: (%d, %d, %d)\n", + user_desc->total_size, + user_desc->size, + user_desc->count); + goto out; + } + + if ((user_desc->size % PAGE_SIZE) != 0) { + gossip_err("orangefs error: bufmap size not page size divisible (%d).\n", + user_desc->size); + goto out; + } + + ret = -ENOMEM; + bufmap = orangefs_bufmap_alloc(user_desc); + if (!bufmap) + goto out; + + ret = orangefs_bufmap_map(bufmap, user_desc); + if (ret) + goto out_free_bufmap; + + + spin_lock(&orangefs_bufmap_lock); + if (__orangefs_bufmap) { + spin_unlock(&orangefs_bufmap_lock); + gossip_err("orangefs: error: bufmap already initialized.\n"); + ret = -EINVAL; + goto out_unmap_bufmap; + } + __orangefs_bufmap = bufmap; + install(&rw_map, + bufmap->desc_count, + bufmap->buffer_index_array); + install(&readdir_map, + ORANGEFS_READDIR_DEFAULT_DESC_COUNT, + bufmap->readdir_index_array); + spin_unlock(&orangefs_bufmap_lock); + + gossip_debug(GOSSIP_BUFMAP_DEBUG, + "orangefs_bufmap_initialize: exiting normally\n"); + return 0; + +out_unmap_bufmap: + orangefs_bufmap_unmap(bufmap); +out_free_bufmap: + orangefs_bufmap_free(bufmap); +out: + return ret; +} + +/* + * orangefs_bufmap_finalize() + * + * shuts down the mapped buffer interface and releases any resources + * associated with it + * + * no return value + */ +void orangefs_bufmap_finalize(void) +{ + struct orangefs_bufmap *bufmap = __orangefs_bufmap; + if (!bufmap) + return; + gossip_debug(GOSSIP_BUFMAP_DEBUG, "orangefs_bufmap_finalize: called\n"); + mark_killed(&rw_map); + mark_killed(&readdir_map); + gossip_debug(GOSSIP_BUFMAP_DEBUG, + "orangefs_bufmap_finalize: exiting normally\n"); +} + +void orangefs_bufmap_run_down(void) +{ + struct orangefs_bufmap *bufmap = __orangefs_bufmap; + if (!bufmap) + return; + run_down(&rw_map); + run_down(&readdir_map); + spin_lock(&orangefs_bufmap_lock); + __orangefs_bufmap = NULL; + spin_unlock(&orangefs_bufmap_lock); + orangefs_bufmap_unmap(bufmap); + orangefs_bufmap_free(bufmap); +} + +/* + * orangefs_bufmap_get() + * + * gets a free mapped buffer descriptor, will sleep until one becomes + * available if necessary + * + * returns slot on success, -errno on failure + */ +int orangefs_bufmap_get(void) +{ + return get(&rw_map); +} + +/* + * orangefs_bufmap_put() + * + * returns a mapped buffer descriptor to the collection + * + * no return value + */ +void orangefs_bufmap_put(int buffer_index) +{ + put(&rw_map, buffer_index); +} + +/* + * orangefs_readdir_index_get() + * + * gets a free descriptor, will sleep until one becomes + * available if necessary. + * Although the readdir buffers are not mapped into kernel space + * we could do that at a later point of time. Regardless, these + * indices are used by the client-core. + * + * returns slot on success, -errno on failure + */ +int orangefs_readdir_index_get(void) +{ + return get(&readdir_map); +} + +void orangefs_readdir_index_put(int buffer_index) +{ + put(&readdir_map, buffer_index); +} + +/* + * we've been handed an iovec, we need to copy it to + * the shared memory descriptor at "buffer_index". + */ +int orangefs_bufmap_copy_from_iovec(struct iov_iter *iter, + int buffer_index, + size_t size) +{ + struct orangefs_bufmap_desc *to; + int i; + + gossip_debug(GOSSIP_BUFMAP_DEBUG, + "%s: buffer_index:%d: size:%zu:\n", + __func__, buffer_index, size); + + to = &__orangefs_bufmap->desc_array[buffer_index]; + for (i = 0; size; i++) { + struct page *page = to->page_array[i]; + size_t n = size; + if (n > PAGE_SIZE) + n = PAGE_SIZE; + n = copy_page_from_iter(page, 0, n, iter); + if (!n) + return -EFAULT; + size -= n; + } + return 0; + +} + +/* + * we've been handed an iovec, we need to fill it from + * the shared memory descriptor at "buffer_index". + */ +int orangefs_bufmap_copy_to_iovec(struct iov_iter *iter, + int buffer_index, + size_t size) +{ + struct orangefs_bufmap_desc *from; + int i; + + from = &__orangefs_bufmap->desc_array[buffer_index]; + gossip_debug(GOSSIP_BUFMAP_DEBUG, + "%s: buffer_index:%d: size:%zu:\n", + __func__, buffer_index, size); + + + for (i = 0; size; i++) { + struct page *page = from->page_array[i]; + size_t n = size; + if (n > PAGE_SIZE) + n = PAGE_SIZE; + n = copy_page_to_iter(page, 0, n, iter); + if (!n) + return -EFAULT; + size -= n; + } + return 0; +} diff --git a/fs/orangefs/orangefs-bufmap.h b/fs/orangefs/orangefs-bufmap.h new file mode 100644 index 000000000000..71f64f4057b5 --- /dev/null +++ b/fs/orangefs/orangefs-bufmap.h @@ -0,0 +1,36 @@ +/* + * (C) 2001 Clemson University and The University of Chicago + * + * See COPYING in top-level directory. + */ + +#ifndef __ORANGEFS_BUFMAP_H +#define __ORANGEFS_BUFMAP_H + +int orangefs_bufmap_size_query(void); + +int orangefs_bufmap_shift_query(void); + +int orangefs_bufmap_initialize(struct ORANGEFS_dev_map_desc *user_desc); + +void orangefs_bufmap_finalize(void); + +void orangefs_bufmap_run_down(void); + +int orangefs_bufmap_get(void); + +void orangefs_bufmap_put(int buffer_index); + +int orangefs_readdir_index_get(void); + +void orangefs_readdir_index_put(int buffer_index); + +int orangefs_bufmap_copy_from_iovec(struct iov_iter *iter, + int buffer_index, + size_t size); + +int orangefs_bufmap_copy_to_iovec(struct iov_iter *iter, + int buffer_index, + size_t size); + +#endif /* __ORANGEFS_BUFMAP_H */ diff --git a/fs/orangefs/orangefs-cache.c b/fs/orangefs/orangefs-cache.c new file mode 100644 index 000000000000..900a2e38e11b --- /dev/null +++ b/fs/orangefs/orangefs-cache.c @@ -0,0 +1,161 @@ +/* + * (C) 2001 Clemson University and The University of Chicago + * + * See COPYING in top-level directory. + */ + +#include "protocol.h" +#include "orangefs-kernel.h" + +/* tags assigned to kernel upcall operations */ +static __u64 next_tag_value; +static DEFINE_SPINLOCK(next_tag_value_lock); + +/* the orangefs memory caches */ + +/* a cache for orangefs upcall/downcall operations */ +static struct kmem_cache *op_cache; + +int op_cache_initialize(void) +{ + op_cache = kmem_cache_create("orangefs_op_cache", + sizeof(struct orangefs_kernel_op_s), + 0, + ORANGEFS_CACHE_CREATE_FLAGS, + NULL); + + if (!op_cache) { + gossip_err("Cannot create orangefs_op_cache\n"); + return -ENOMEM; + } + + /* initialize our atomic tag counter */ + spin_lock(&next_tag_value_lock); + next_tag_value = 100; + spin_unlock(&next_tag_value_lock); + return 0; +} + +int op_cache_finalize(void) +{ + kmem_cache_destroy(op_cache); + return 0; +} + +char *get_opname_string(struct orangefs_kernel_op_s *new_op) +{ + if (new_op) { + __s32 type = new_op->upcall.type; + + if (type == ORANGEFS_VFS_OP_FILE_IO) + return "OP_FILE_IO"; + else if (type == ORANGEFS_VFS_OP_LOOKUP) + return "OP_LOOKUP"; + else if (type == ORANGEFS_VFS_OP_CREATE) + return "OP_CREATE"; + else if (type == ORANGEFS_VFS_OP_GETATTR) + return "OP_GETATTR"; + else if (type == ORANGEFS_VFS_OP_REMOVE) + return "OP_REMOVE"; + else if (type == ORANGEFS_VFS_OP_MKDIR) + return "OP_MKDIR"; + else if (type == ORANGEFS_VFS_OP_READDIR) + return "OP_READDIR"; + else if (type == ORANGEFS_VFS_OP_READDIRPLUS) + return "OP_READDIRPLUS"; + else if (type == ORANGEFS_VFS_OP_SETATTR) + return "OP_SETATTR"; + else if (type == ORANGEFS_VFS_OP_SYMLINK) + return "OP_SYMLINK"; + else if (type == ORANGEFS_VFS_OP_RENAME) + return "OP_RENAME"; + else if (type == ORANGEFS_VFS_OP_STATFS) + return "OP_STATFS"; + else if (type == ORANGEFS_VFS_OP_TRUNCATE) + return "OP_TRUNCATE"; + else if (type == ORANGEFS_VFS_OP_MMAP_RA_FLUSH) + return "OP_MMAP_RA_FLUSH"; + else if (type == ORANGEFS_VFS_OP_FS_MOUNT) + return "OP_FS_MOUNT"; + else if (type == ORANGEFS_VFS_OP_FS_UMOUNT) + return "OP_FS_UMOUNT"; + else if (type == ORANGEFS_VFS_OP_GETXATTR) + return "OP_GETXATTR"; + else if (type == ORANGEFS_VFS_OP_SETXATTR) + return "OP_SETXATTR"; + else if (type == ORANGEFS_VFS_OP_LISTXATTR) + return "OP_LISTXATTR"; + else if (type == ORANGEFS_VFS_OP_REMOVEXATTR) + return "OP_REMOVEXATTR"; + else if (type == ORANGEFS_VFS_OP_PARAM) + return "OP_PARAM"; + else if (type == ORANGEFS_VFS_OP_PERF_COUNT) + return "OP_PERF_COUNT"; + else if (type == ORANGEFS_VFS_OP_CANCEL) + return "OP_CANCEL"; + else if (type == ORANGEFS_VFS_OP_FSYNC) + return "OP_FSYNC"; + else if (type == ORANGEFS_VFS_OP_FSKEY) + return "OP_FSKEY"; + } + return "OP_UNKNOWN?"; +} + +void orangefs_new_tag(struct orangefs_kernel_op_s *op) +{ + spin_lock(&next_tag_value_lock); + op->tag = next_tag_value++; + if (next_tag_value == 0) + next_tag_value = 100; + spin_unlock(&next_tag_value_lock); +} + +struct orangefs_kernel_op_s *op_alloc(__s32 type) +{ + struct orangefs_kernel_op_s *new_op = NULL; + + new_op = kmem_cache_zalloc(op_cache, GFP_KERNEL); + if (new_op) { + INIT_LIST_HEAD(&new_op->list); + spin_lock_init(&new_op->lock); + init_completion(&new_op->waitq); + + new_op->upcall.type = ORANGEFS_VFS_OP_INVALID; + new_op->downcall.type = ORANGEFS_VFS_OP_INVALID; + new_op->downcall.status = -1; + + new_op->op_state = OP_VFS_STATE_UNKNOWN; + + /* initialize the op specific tag and upcall credentials */ + orangefs_new_tag(new_op); + new_op->upcall.type = type; + new_op->attempts = 0; + gossip_debug(GOSSIP_CACHE_DEBUG, + "Alloced OP (%p: %llu %s)\n", + new_op, + llu(new_op->tag), + get_opname_string(new_op)); + + new_op->upcall.uid = from_kuid(current_user_ns(), + current_fsuid()); + + new_op->upcall.gid = from_kgid(current_user_ns(), + current_fsgid()); + } else { + gossip_err("op_alloc: kmem_cache_zalloc failed!\n"); + } + return new_op; +} + +void op_release(struct orangefs_kernel_op_s *orangefs_op) +{ + if (orangefs_op) { + gossip_debug(GOSSIP_CACHE_DEBUG, + "Releasing OP (%p: %llu)\n", + orangefs_op, + llu(orangefs_op->tag)); + kmem_cache_free(op_cache, orangefs_op); + } else { + gossip_err("NULL pointer in op_release\n"); + } +} diff --git a/fs/orangefs/orangefs-debug.h b/fs/orangefs/orangefs-debug.h new file mode 100644 index 000000000000..387db17cde2b --- /dev/null +++ b/fs/orangefs/orangefs-debug.h @@ -0,0 +1,92 @@ +/* + * (C) 2001 Clemson University and The University of Chicago + * + * See COPYING in top-level directory. + */ + +/* This file just defines debugging masks to be used with the gossip + * logging utility. All debugging masks for ORANGEFS are kept here to make + * sure we don't have collisions. + */ + +#ifndef __ORANGEFS_DEBUG_H +#define __ORANGEFS_DEBUG_H + +#ifdef __KERNEL__ +#include <linux/types.h> +#else +#include <stdint.h> +#endif + +#define GOSSIP_NO_DEBUG (__u64)0 + +#define GOSSIP_SUPER_DEBUG ((__u64)1 << 0) +#define GOSSIP_INODE_DEBUG ((__u64)1 << 1) +#define GOSSIP_FILE_DEBUG ((__u64)1 << 2) +#define GOSSIP_DIR_DEBUG ((__u64)1 << 3) +#define GOSSIP_UTILS_DEBUG ((__u64)1 << 4) +#define GOSSIP_WAIT_DEBUG ((__u64)1 << 5) +#define GOSSIP_ACL_DEBUG ((__u64)1 << 6) +#define GOSSIP_DCACHE_DEBUG ((__u64)1 << 7) +#define GOSSIP_DEV_DEBUG ((__u64)1 << 8) +#define GOSSIP_NAME_DEBUG ((__u64)1 << 9) +#define GOSSIP_BUFMAP_DEBUG ((__u64)1 << 10) +#define GOSSIP_CACHE_DEBUG ((__u64)1 << 11) +#define GOSSIP_DEBUGFS_DEBUG ((__u64)1 << 12) +#define GOSSIP_XATTR_DEBUG ((__u64)1 << 13) +#define GOSSIP_INIT_DEBUG ((__u64)1 << 14) +#define GOSSIP_SYSFS_DEBUG ((__u64)1 << 15) + +#define GOSSIP_MAX_NR 16 +#define GOSSIP_MAX_DEBUG (((__u64)1 << GOSSIP_MAX_NR) - 1) + +/*function prototypes*/ +__u64 ORANGEFS_kmod_eventlog_to_mask(const char *event_logging); +__u64 ORANGEFS_debug_eventlog_to_mask(const char *event_logging); +char *ORANGEFS_debug_mask_to_eventlog(__u64 mask); +char *ORANGEFS_kmod_mask_to_eventlog(__u64 mask); + +/* a private internal type */ +struct __keyword_mask_s { + const char *keyword; + __u64 mask_val; +}; + +/* + * Map all kmod keywords to kmod debug masks here. Keep this + * structure "packed": + * + * "all" is always last... + * + * keyword mask_val index + * foo 1 0 + * bar 2 1 + * baz 4 2 + * qux 8 3 + * . . . + */ +static struct __keyword_mask_s s_kmod_keyword_mask_map[] = { + {"super", GOSSIP_SUPER_DEBUG}, + {"inode", GOSSIP_INODE_DEBUG}, + {"file", GOSSIP_FILE_DEBUG}, + {"dir", GOSSIP_DIR_DEBUG}, + {"utils", GOSSIP_UTILS_DEBUG}, + {"wait", GOSSIP_WAIT_DEBUG}, + {"acl", GOSSIP_ACL_DEBUG}, + {"dcache", GOSSIP_DCACHE_DEBUG}, + {"dev", GOSSIP_DEV_DEBUG}, + {"name", GOSSIP_NAME_DEBUG}, + {"bufmap", GOSSIP_BUFMAP_DEBUG}, + {"cache", GOSSIP_CACHE_DEBUG}, + {"debugfs", GOSSIP_DEBUGFS_DEBUG}, + {"xattr", GOSSIP_XATTR_DEBUG}, + {"init", GOSSIP_INIT_DEBUG}, + {"sysfs", GOSSIP_SYSFS_DEBUG}, + {"none", GOSSIP_NO_DEBUG}, + {"all", GOSSIP_MAX_DEBUG} +}; + +static const int num_kmod_keyword_mask_map = (int) + (sizeof(s_kmod_keyword_mask_map) / sizeof(struct __keyword_mask_s)); + +#endif /* __ORANGEFS_DEBUG_H */ diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c new file mode 100644 index 000000000000..19670b8b4053 --- /dev/null +++ b/fs/orangefs/orangefs-debugfs.c @@ -0,0 +1,455 @@ +/* + * What: /sys/kernel/debug/orangefs/debug-help + * Date: June 2015 + * Contact: Mike Marshall <[email protected]> + * Description: + * List of client and kernel debug keywords. + * + * + * What: /sys/kernel/debug/orangefs/client-debug + * Date: June 2015 + * Contact: Mike Marshall <[email protected]> + * Description: + * Debug setting for "the client", the userspace + * helper for the kernel module. + * + * + * What: /sys/kernel/debug/orangefs/kernel-debug + * Date: June 2015 + * Contact: Mike Marshall <[email protected]> + * Description: + * Debug setting for the orangefs kernel module. + * + * Any of the keywords, or comma-separated lists + * of keywords, from debug-help can be catted to + * client-debug or kernel-debug. + * + * "none", "all" and "verbose" are special keywords + * for client-debug. Setting client-debug to "all" + * is kind of like trying to drink water from a + * fire hose, "verbose" triggers most of the same + * output except for the constant flow of output + * from the main wait loop. + * + * "none" and "all" are similar settings for kernel-debug + * no need for a "verbose". + */ +#include <linux/debugfs.h> +#include <linux/slab.h> + +#include <linux/uaccess.h> + +#include "orangefs-debugfs.h" +#include "protocol.h" +#include "orangefs-kernel.h" + +static int orangefs_debug_disabled = 1; + +static int orangefs_debug_help_open(struct inode *, struct file *); + +const struct file_operations debug_help_fops = { + .open = orangefs_debug_help_open, + .read = seq_read, + .release = seq_release, + .llseek = seq_lseek, +}; + +static void *help_start(struct seq_file *, loff_t *); +static void *help_next(struct seq_file *, void *, loff_t *); +static void help_stop(struct seq_file *, void *); +static int help_show(struct seq_file *, void *); + +static const struct seq_operations help_debug_ops = { + .start = help_start, + .next = help_next, + .stop = help_stop, + .show = help_show, +}; + +/* + * Used to protect data in ORANGEFS_KMOD_DEBUG_FILE and + * ORANGEFS_KMOD_DEBUG_FILE. + */ +static DEFINE_MUTEX(orangefs_debug_lock); + +int orangefs_debug_open(struct inode *, struct file *); + +static ssize_t orangefs_debug_read(struct file *, + char __user *, + size_t, + loff_t *); + +static ssize_t orangefs_debug_write(struct file *, + const char __user *, + size_t, + loff_t *); + +static const struct file_operations kernel_debug_fops = { + .open = orangefs_debug_open, + .read = orangefs_debug_read, + .write = orangefs_debug_write, + .llseek = generic_file_llseek, +}; + +/* + * initialize kmod debug operations, create orangefs debugfs dir and + * ORANGEFS_KMOD_DEBUG_HELP_FILE. + */ +int orangefs_debugfs_init(void) +{ + + int rc = -ENOMEM; + + debug_dir = debugfs_create_dir("orangefs", NULL); + if (!debug_dir) { + pr_info("%s: debugfs_create_dir failed.\n", __func__); + goto out; + } + + help_file_dentry = debugfs_create_file(ORANGEFS_KMOD_DEBUG_HELP_FILE, + 0444, + debug_dir, + debug_help_string, + &debug_help_fops); + if (!help_file_dentry) { + pr_info("%s: debugfs_create_file failed.\n", __func__); + goto out; + } + + orangefs_debug_disabled = 0; + rc = 0; + +out: + + return rc; +} + +void orangefs_debugfs_cleanup(void) +{ + if (debug_dir) + debugfs_remove_recursive(debug_dir); +} + +/* open ORANGEFS_KMOD_DEBUG_HELP_FILE */ +static int orangefs_debug_help_open(struct inode *inode, struct file *file) +{ + int rc = -ENODEV; + int ret; + + gossip_debug(GOSSIP_DEBUGFS_DEBUG, + "orangefs_debug_help_open: start\n"); + + if (orangefs_debug_disabled) + goto out; + + ret = seq_open(file, &help_debug_ops); + if (ret) + goto out; + + ((struct seq_file *)(file->private_data))->private = inode->i_private; + + rc = 0; + +out: + gossip_debug(GOSSIP_DEBUGFS_DEBUG, + "orangefs_debug_help_open: rc:%d:\n", + rc); + return rc; +} + +/* + * I think start always gets called again after stop. Start + * needs to return NULL when it is done. The whole "payload" + * in this case is a single (long) string, so by the second + * time we get to start (pos = 1), we're done. + */ +static void *help_start(struct seq_file *m, loff_t *pos) +{ + void *payload = NULL; + + gossip_debug(GOSSIP_DEBUGFS_DEBUG, "help_start: start\n"); + + if (*pos == 0) + payload = m->private; + + return payload; +} + +static void *help_next(struct seq_file *m, void *v, loff_t *pos) +{ + gossip_debug(GOSSIP_DEBUGFS_DEBUG, "help_next: start\n"); + + return NULL; +} + +static void help_stop(struct seq_file *m, void *p) +{ + gossip_debug(GOSSIP_DEBUGFS_DEBUG, "help_stop: start\n"); +} + +static int help_show(struct seq_file *m, void *v) +{ + gossip_debug(GOSSIP_DEBUGFS_DEBUG, "help_show: start\n"); + + seq_puts(m, v); + + return 0; +} + +/* + * initialize the kernel-debug file. + */ +int orangefs_kernel_debug_init(void) +{ + int rc = -ENOMEM; + struct dentry *ret; + char *k_buffer = NULL; + + gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: start\n", __func__); + + k_buffer = kzalloc(ORANGEFS_MAX_DEBUG_STRING_LEN, GFP_KERNEL); + if (!k_buffer) + goto out; + + if (strlen(kernel_debug_string) + 1 < ORANGEFS_MAX_DEBUG_STRING_LEN) { + strcpy(k_buffer, kernel_debug_string); + strcat(k_buffer, "\n"); + } else { + strcpy(k_buffer, "none\n"); + pr_info("%s: overflow 1!\n", __func__); + } + + ret = debugfs_create_file(ORANGEFS_KMOD_DEBUG_FILE, + 0444, + debug_dir, + k_buffer, + &kernel_debug_fops); + if (!ret) { + pr_info("%s: failed to create %s.\n", + __func__, + ORANGEFS_KMOD_DEBUG_FILE); + goto out; + } + + rc = 0; + +out: + + gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: rc:%d:\n", __func__, rc); + return rc; +} + +/* + * initialize the client-debug file. + */ +int orangefs_client_debug_init(void) +{ + + int rc = -ENOMEM; + char *c_buffer = NULL; + + gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: start\n", __func__); + + c_buffer = kzalloc(ORANGEFS_MAX_DEBUG_STRING_LEN, GFP_KERNEL); + if (!c_buffer) + goto out; + + if (strlen(client_debug_string) + 1 < ORANGEFS_MAX_DEBUG_STRING_LEN) { + strcpy(c_buffer, client_debug_string); + strcat(c_buffer, "\n"); + } else { + strcpy(c_buffer, "none\n"); + pr_info("%s: overflow! 2\n", __func__); + } + + client_debug_dentry = debugfs_create_file(ORANGEFS_CLIENT_DEBUG_FILE, + 0444, + debug_dir, + c_buffer, + &kernel_debug_fops); + if (!client_debug_dentry) { + pr_info("%s: failed to create updated %s.\n", + __func__, + ORANGEFS_CLIENT_DEBUG_FILE); + goto out; + } + + rc = 0; + +out: + + gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: rc:%d:\n", __func__, rc); + return rc; +} + +/* open ORANGEFS_KMOD_DEBUG_FILE or ORANGEFS_CLIENT_DEBUG_FILE.*/ +int orangefs_debug_open(struct inode *inode, struct file *file) +{ + int rc = -ENODEV; + + gossip_debug(GOSSIP_DEBUGFS_DEBUG, + "%s: orangefs_debug_disabled: %d\n", + __func__, + orangefs_debug_disabled); + + if (orangefs_debug_disabled) + goto out; + + rc = 0; + mutex_lock(&orangefs_debug_lock); + file->private_data = inode->i_private; + mutex_unlock(&orangefs_debug_lock); + +out: + gossip_debug(GOSSIP_DEBUGFS_DEBUG, + "orangefs_debug_open: rc: %d\n", + rc); + return rc; +} + +static ssize_t orangefs_debug_read(struct file *file, + char __user *ubuf, + size_t count, + loff_t *ppos) +{ + char *buf; + int sprintf_ret; + ssize_t read_ret = -ENOMEM; + + gossip_debug(GOSSIP_DEBUGFS_DEBUG, "orangefs_debug_read: start\n"); + + buf = kmalloc(ORANGEFS_MAX_DEBUG_STRING_LEN, GFP_KERNEL); + if (!buf) + goto out; + + mutex_lock(&orangefs_debug_lock); + sprintf_ret = sprintf(buf, "%s", (char *)file->private_data); + mutex_unlock(&orangefs_debug_lock); + + read_ret = simple_read_from_buffer(ubuf, count, ppos, buf, sprintf_ret); + + kfree(buf); + +out: + gossip_debug(GOSSIP_DEBUGFS_DEBUG, + "orangefs_debug_read: ret: %zu\n", + read_ret); + + return read_ret; +} + +static ssize_t orangefs_debug_write(struct file *file, + const char __user *ubuf, + size_t count, + loff_t *ppos) +{ + char *buf; + int rc = -EFAULT; + size_t silly = 0; + char *debug_string; + struct orangefs_kernel_op_s *new_op = NULL; + struct client_debug_mask c_mask = { NULL, 0, 0 }; + + gossip_debug(GOSSIP_DEBUGFS_DEBUG, + "orangefs_debug_write: %s\n", + file->f_path.dentry->d_name.name); + + /* + * Thwart users who try to jamb a ridiculous number + * of bytes into the debug file... + */ + if (count > ORANGEFS_MAX_DEBUG_STRING_LEN + 1) { + silly = count; + count = ORANGEFS_MAX_DEBUG_STRING_LEN + 1; + } + + buf = kzalloc(ORANGEFS_MAX_DEBUG_STRING_LEN, GFP_KERNEL); + if (!buf) + goto out; + + if (copy_from_user(buf, ubuf, count - 1)) { + gossip_debug(GOSSIP_DEBUGFS_DEBUG, + "%s: copy_from_user failed!\n", + __func__); + goto out; + } + + /* + * Map the keyword string from userspace into a valid debug mask. + * The mapping process involves mapping the human-inputted string + * into a valid mask, and then rebuilding the string from the + * verified valid mask. + * + * A service operation is required to set a new client-side + * debug mask. + */ + if (!strcmp(file->f_path.dentry->d_name.name, + ORANGEFS_KMOD_DEBUG_FILE)) { + debug_string_to_mask(buf, &gossip_debug_mask, 0); + debug_mask_to_string(&gossip_debug_mask, 0); + debug_string = kernel_debug_string; + gossip_debug(GOSSIP_DEBUGFS_DEBUG, + "New kernel debug string is %s\n", + kernel_debug_string); + } else { + /* Can't reset client debug mask if client is not running. */ + if (is_daemon_in_service()) { + pr_info("%s: Client not running :%d:\n", + __func__, + is_daemon_in_service()); + goto out; + } + + debug_string_to_mask(buf, &c_mask, 1); + debug_mask_to_string(&c_mask, 1); + debug_string = client_debug_string; + + new_op = op_alloc(ORANGEFS_VFS_OP_PARAM); + if (!new_op) { + pr_info("%s: op_alloc failed!\n", __func__); + goto out; + } + + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_TWO_MASK_VALUES; + new_op->upcall.req.param.type = ORANGEFS_PARAM_REQUEST_SET; + memset(new_op->upcall.req.param.s_value, + 0, + ORANGEFS_MAX_DEBUG_STRING_LEN); + sprintf(new_op->upcall.req.param.s_value, + "%llx %llx\n", + c_mask.mask1, + c_mask.mask2); + + /* service_operation returns 0 on success... */ + rc = service_operation(new_op, + "orangefs_param", + ORANGEFS_OP_INTERRUPTIBLE); + + if (rc) + gossip_debug(GOSSIP_DEBUGFS_DEBUG, + "%s: service_operation failed! rc:%d:\n", + __func__, + rc); + + op_release(new_op); + } + + mutex_lock(&orangefs_debug_lock); + memset(file->f_inode->i_private, 0, ORANGEFS_MAX_DEBUG_STRING_LEN); + sprintf((char *)file->f_inode->i_private, "%s\n", debug_string); + mutex_unlock(&orangefs_debug_lock); + + *ppos += count; + if (silly) + rc = silly; + else + rc = count; + +out: + gossip_debug(GOSSIP_DEBUGFS_DEBUG, + "orangefs_debug_write: rc: %d\n", + rc); + kfree(buf); + return rc; +} diff --git a/fs/orangefs/orangefs-debugfs.h b/fs/orangefs/orangefs-debugfs.h new file mode 100644 index 000000000000..e4828c0e3ef9 --- /dev/null +++ b/fs/orangefs/orangefs-debugfs.h @@ -0,0 +1,3 @@ +int orangefs_debugfs_init(void); +int orangefs_kernel_debug_init(void); +void orangefs_debugfs_cleanup(void); diff --git a/fs/orangefs/orangefs-dev-proto.h b/fs/orangefs/orangefs-dev-proto.h new file mode 100644 index 000000000000..9eac9d9a3f3a --- /dev/null +++ b/fs/orangefs/orangefs-dev-proto.h @@ -0,0 +1,62 @@ +/* + * (C) 2001 Clemson University and The University of Chicago + * + * See COPYING in top-level directory. + */ + +#ifndef _ORANGEFS_DEV_PROTO_H +#define _ORANGEFS_DEV_PROTO_H + +/* + * types and constants shared between user space and kernel space for + * device interaction using a common protocol + */ + +/* + * valid orangefs kernel operation types + */ +#define ORANGEFS_VFS_OP_INVALID 0xFF000000 +#define ORANGEFS_VFS_OP_FILE_IO 0xFF000001 +#define ORANGEFS_VFS_OP_LOOKUP 0xFF000002 +#define ORANGEFS_VFS_OP_CREATE 0xFF000003 +#define ORANGEFS_VFS_OP_GETATTR 0xFF000004 +#define ORANGEFS_VFS_OP_REMOVE 0xFF000005 +#define ORANGEFS_VFS_OP_MKDIR 0xFF000006 +#define ORANGEFS_VFS_OP_READDIR 0xFF000007 +#define ORANGEFS_VFS_OP_SETATTR 0xFF000008 +#define ORANGEFS_VFS_OP_SYMLINK 0xFF000009 +#define ORANGEFS_VFS_OP_RENAME 0xFF00000A +#define ORANGEFS_VFS_OP_STATFS 0xFF00000B +#define ORANGEFS_VFS_OP_TRUNCATE 0xFF00000C +#define ORANGEFS_VFS_OP_MMAP_RA_FLUSH 0xFF00000D +#define ORANGEFS_VFS_OP_FS_MOUNT 0xFF00000E +#define ORANGEFS_VFS_OP_FS_UMOUNT 0xFF00000F +#define ORANGEFS_VFS_OP_GETXATTR 0xFF000010 +#define ORANGEFS_VFS_OP_SETXATTR 0xFF000011 +#define ORANGEFS_VFS_OP_LISTXATTR 0xFF000012 +#define ORANGEFS_VFS_OP_REMOVEXATTR 0xFF000013 +#define ORANGEFS_VFS_OP_PARAM 0xFF000014 +#define ORANGEFS_VFS_OP_PERF_COUNT 0xFF000015 +#define ORANGEFS_VFS_OP_CANCEL 0xFF00EE00 +#define ORANGEFS_VFS_OP_FSYNC 0xFF00EE01 +#define ORANGEFS_VFS_OP_FSKEY 0xFF00EE02 +#define ORANGEFS_VFS_OP_READDIRPLUS 0xFF00EE03 + +/* + * Misc constants. Please retain them as multiples of 8! + * Otherwise 32-64 bit interactions will be messed up :) + */ +#define ORANGEFS_MAX_DEBUG_STRING_LEN 0x00000400 +#define ORANGEFS_MAX_DEBUG_ARRAY_LEN 0x00000800 + +/* + * The maximum number of directory entries in a single request is 96. + * XXX: Why can this not be higher. The client-side code can handle up to 512. + * XXX: What happens if we expect more than the client can return? + */ +#define ORANGEFS_MAX_DIRENT_COUNT_READDIR 96 + +#include "upcall.h" +#include "downcall.h" + +#endif diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h new file mode 100644 index 000000000000..a9925e296ceb --- /dev/null +++ b/fs/orangefs/orangefs-kernel.h @@ -0,0 +1,623 @@ +/* + * (C) 2001 Clemson University and The University of Chicago + * + * See COPYING in top-level directory. + */ + +/* + * The ORANGEFS Linux kernel support allows ORANGEFS volumes to be mounted and + * accessed through the Linux VFS (i.e. using standard I/O system calls). + * This support is only needed on clients that wish to mount the file system. + * + */ + +/* + * Declarations and macros for the ORANGEFS Linux kernel support. + */ + +#ifndef __ORANGEFSKERNEL_H +#define __ORANGEFSKERNEL_H + +#include <linux/kernel.h> +#include <linux/moduleparam.h> +#include <linux/statfs.h> +#include <linux/backing-dev.h> +#include <linux/device.h> +#include <linux/mpage.h> +#include <linux/namei.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/fs.h> +#include <linux/vmalloc.h> + +#include <linux/aio.h> +#include <linux/posix_acl.h> +#include <linux/posix_acl_xattr.h> +#include <linux/compat.h> +#include <linux/mount.h> +#include <linux/uaccess.h> +#include <linux/atomic.h> +#include <linux/uio.h> +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/wait.h> +#include <linux/dcache.h> +#include <linux/pagemap.h> +#include <linux/poll.h> +#include <linux/rwsem.h> +#include <linux/xattr.h> +#include <linux/exportfs.h> + +#include <asm/unaligned.h> + +#include "orangefs-dev-proto.h" + +#ifdef ORANGEFS_KERNEL_DEBUG +#define ORANGEFS_DEFAULT_OP_TIMEOUT_SECS 10 +#else +#define ORANGEFS_DEFAULT_OP_TIMEOUT_SECS 20 +#endif + +#define ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS 30 + +#define ORANGEFS_DEFAULT_SLOT_TIMEOUT_SECS 900 /* 15 minutes */ + +#define ORANGEFS_REQDEVICE_NAME "pvfs2-req" + +#define ORANGEFS_DEVREQ_MAGIC 0x20030529 +#define ORANGEFS_LINK_MAX 0x000000FF +#define ORANGEFS_PURGE_RETRY_COUNT 0x00000005 +#define ORANGEFS_MAX_NUM_OPTIONS 0x00000004 +#define ORANGEFS_MAX_MOUNT_OPT_LEN 0x00000080 +#define ORANGEFS_MAX_FSKEY_LEN 64 + +#define MAX_DEV_REQ_UPSIZE (2 * sizeof(__s32) + \ +sizeof(__u64) + sizeof(struct orangefs_upcall_s)) +#define MAX_DEV_REQ_DOWNSIZE (2 * sizeof(__s32) + \ +sizeof(__u64) + sizeof(struct orangefs_downcall_s)) + +/* + * valid orangefs kernel operation states + * + * unknown - op was just initialized + * waiting - op is on request_list (upward bound) + * inprogr - op is in progress (waiting for downcall) + * serviced - op has matching downcall; ok + * purged - op has to start a timer since client-core + * exited uncleanly before servicing op + * given up - submitter has given up waiting for it + */ +enum orangefs_vfs_op_states { + OP_VFS_STATE_UNKNOWN = 0, + OP_VFS_STATE_WAITING = 1, + OP_VFS_STATE_INPROGR = 2, + OP_VFS_STATE_SERVICED = 4, + OP_VFS_STATE_PURGED = 8, + OP_VFS_STATE_GIVEN_UP = 16, +}; + +/* + * An array of client_debug_mask will be built to hold debug keyword/mask + * values fetched from userspace. + */ +struct client_debug_mask { + char *keyword; + __u64 mask1; + __u64 mask2; +}; + +/* + * orangefs kernel memory related flags + */ + +#if ((defined ORANGEFS_KERNEL_DEBUG) && (defined CONFIG_DEBUG_SLAB)) +#define ORANGEFS_CACHE_CREATE_FLAGS SLAB_RED_ZONE +#else +#define ORANGEFS_CACHE_CREATE_FLAGS 0 +#endif /* ((defined ORANGEFS_KERNEL_DEBUG) && (defined CONFIG_DEBUG_SLAB)) */ + +/* orangefs xattr and acl related defines */ +#define ORANGEFS_XATTR_INDEX_POSIX_ACL_ACCESS 1 +#define ORANGEFS_XATTR_INDEX_POSIX_ACL_DEFAULT 2 +#define ORANGEFS_XATTR_INDEX_TRUSTED 3 +#define ORANGEFS_XATTR_INDEX_DEFAULT 4 + +#define ORANGEFS_XATTR_NAME_ACL_ACCESS XATTR_NAME_POSIX_ACL_ACCESS +#define ORANGEFS_XATTR_NAME_ACL_DEFAULT XATTR_NAME_POSIX_ACL_DEFAULT +#define ORANGEFS_XATTR_NAME_TRUSTED_PREFIX "trusted." +#define ORANGEFS_XATTR_NAME_DEFAULT_PREFIX "" + +/* these functions are defined in orangefs-utils.c */ +int orangefs_prepare_cdm_array(char *debug_array_string); +int orangefs_prepare_debugfs_help_string(int); + +/* defined in orangefs-debugfs.c */ +int orangefs_client_debug_init(void); + +void debug_string_to_mask(char *, void *, int); +void do_c_mask(int, char *, struct client_debug_mask **); +void do_k_mask(int, char *, __u64 **); + +void debug_mask_to_string(void *, int); +void do_k_string(void *, int); +void do_c_string(void *, int); +int check_amalgam_keyword(void *, int); +int keyword_is_amalgam(char *); + +/*these variables are defined in orangefs-mod.c */ +extern char kernel_debug_string[ORANGEFS_MAX_DEBUG_STRING_LEN]; +extern char client_debug_string[ORANGEFS_MAX_DEBUG_STRING_LEN]; +extern char client_debug_array_string[ORANGEFS_MAX_DEBUG_STRING_LEN]; +extern unsigned int kernel_mask_set_mod_init; + +extern int orangefs_init_acl(struct inode *inode, struct inode *dir); +extern const struct xattr_handler *orangefs_xattr_handlers[]; + +extern struct posix_acl *orangefs_get_acl(struct inode *inode, int type); +extern int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type); + +/* + * Redefine xtvec structure so that we could move helper functions out of + * the define + */ +struct xtvec { + __kernel_off_t xtv_off; /* must be off_t */ + __kernel_size_t xtv_len; /* must be size_t */ +}; + +/* + * orangefs data structures + */ +struct orangefs_kernel_op_s { + enum orangefs_vfs_op_states op_state; + __u64 tag; + + /* + * Set uses_shared_memory to non zero if this operation uses + * shared memory. If true, then a retry on the op must also + * get a new shared memory buffer and re-populate it. + * Cancels don't care - it only matters for service_operation() + * retry logics and cancels don't go through it anymore. It + * safely stays non-zero when we use it as slot_to_free. + */ + union { + int uses_shared_memory; + int slot_to_free; + }; + + struct orangefs_upcall_s upcall; + struct orangefs_downcall_s downcall; + + struct completion waitq; + spinlock_t lock; + + int attempts; + + struct list_head list; +}; + +#define set_op_state_waiting(op) ((op)->op_state = OP_VFS_STATE_WAITING) +#define set_op_state_inprogress(op) ((op)->op_state = OP_VFS_STATE_INPROGR) +#define set_op_state_given_up(op) ((op)->op_state = OP_VFS_STATE_GIVEN_UP) +static inline void set_op_state_serviced(struct orangefs_kernel_op_s *op) +{ + op->op_state = OP_VFS_STATE_SERVICED; + complete(&op->waitq); +} + +#define op_state_waiting(op) ((op)->op_state & OP_VFS_STATE_WAITING) +#define op_state_in_progress(op) ((op)->op_state & OP_VFS_STATE_INPROGR) +#define op_state_serviced(op) ((op)->op_state & OP_VFS_STATE_SERVICED) +#define op_state_purged(op) ((op)->op_state & OP_VFS_STATE_PURGED) +#define op_state_given_up(op) ((op)->op_state & OP_VFS_STATE_GIVEN_UP) +#define op_is_cancel(op) ((op)->upcall.type == ORANGEFS_VFS_OP_CANCEL) + +void op_release(struct orangefs_kernel_op_s *op); + +extern void orangefs_bufmap_put(int); +static inline void put_cancel(struct orangefs_kernel_op_s *op) +{ + orangefs_bufmap_put(op->slot_to_free); + op_release(op); +} + +static inline void set_op_state_purged(struct orangefs_kernel_op_s *op) +{ + spin_lock(&op->lock); + if (unlikely(op_is_cancel(op))) { + list_del_init(&op->list); + spin_unlock(&op->lock); + put_cancel(op); + } else { + op->op_state |= OP_VFS_STATE_PURGED; + complete(&op->waitq); + spin_unlock(&op->lock); + } +} + +/* per inode private orangefs info */ +struct orangefs_inode_s { + struct orangefs_object_kref refn; + char link_target[ORANGEFS_NAME_MAX]; + __s64 blksize; + /* + * Reading/Writing Extended attributes need to acquire the appropriate + * reader/writer semaphore on the orangefs_inode_s structure. + */ + struct rw_semaphore xattr_sem; + + struct inode vfs_inode; + sector_t last_failed_block_index_read; + + /* + * State of in-memory attributes not yet flushed to disk associated + * with this object + */ + unsigned long pinode_flags; +}; + +#define P_ATIME_FLAG 0 +#define P_MTIME_FLAG 1 +#define P_CTIME_FLAG 2 +#define P_MODE_FLAG 3 + +#define ClearAtimeFlag(pinode) clear_bit(P_ATIME_FLAG, &(pinode)->pinode_flags) +#define SetAtimeFlag(pinode) set_bit(P_ATIME_FLAG, &(pinode)->pinode_flags) +#define AtimeFlag(pinode) test_bit(P_ATIME_FLAG, &(pinode)->pinode_flags) + +#define ClearMtimeFlag(pinode) clear_bit(P_MTIME_FLAG, &(pinode)->pinode_flags) +#define SetMtimeFlag(pinode) set_bit(P_MTIME_FLAG, &(pinode)->pinode_flags) +#define MtimeFlag(pinode) test_bit(P_MTIME_FLAG, &(pinode)->pinode_flags) + +#define ClearCtimeFlag(pinode) clear_bit(P_CTIME_FLAG, &(pinode)->pinode_flags) +#define SetCtimeFlag(pinode) set_bit(P_CTIME_FLAG, &(pinode)->pinode_flags) +#define CtimeFlag(pinode) test_bit(P_CTIME_FLAG, &(pinode)->pinode_flags) + +#define ClearModeFlag(pinode) clear_bit(P_MODE_FLAG, &(pinode)->pinode_flags) +#define SetModeFlag(pinode) set_bit(P_MODE_FLAG, &(pinode)->pinode_flags) +#define ModeFlag(pinode) test_bit(P_MODE_FLAG, &(pinode)->pinode_flags) + +/* per superblock private orangefs info */ +struct orangefs_sb_info_s { + struct orangefs_khandle root_khandle; + __s32 fs_id; + int id; + int flags; +#define ORANGEFS_OPT_INTR 0x01 +#define ORANGEFS_OPT_LOCAL_LOCK 0x02 + char devname[ORANGEFS_MAX_SERVER_ADDR_LEN]; + struct super_block *sb; + int mount_pending; + struct list_head list; +}; + +/* + * structure that holds the state of any async I/O operation issued + * through the VFS. Needed especially to handle cancellation requests + * or even completion notification so that the VFS client-side daemon + * can free up its vfs_request slots. + */ +struct orangefs_kiocb_s { + /* the pointer to the task that initiated the AIO */ + struct task_struct *tsk; + + /* pointer to the kiocb that kicked this operation */ + struct kiocb *kiocb; + + /* buffer index that was used for the I/O */ + struct orangefs_bufmap *bufmap; + int buffer_index; + + /* orangefs kernel operation type */ + struct orangefs_kernel_op_s *op; + + /* The user space buffers from/to which I/O is being staged */ + struct iovec *iov; + + /* number of elements in the iovector */ + unsigned long nr_segs; + + /* set to indicate the type of the operation */ + int rw; + + /* file offset */ + loff_t offset; + + /* and the count in bytes */ + size_t bytes_to_be_copied; + + ssize_t bytes_copied; + int needs_cleanup; +}; + +struct orangefs_stats { + unsigned long cache_hits; + unsigned long cache_misses; + unsigned long reads; + unsigned long writes; +}; + +extern struct orangefs_stats g_orangefs_stats; + +/* + * NOTE: See Documentation/filesystems/porting for information + * on implementing FOO_I and properly accessing fs private data + */ +static inline struct orangefs_inode_s *ORANGEFS_I(struct inode *inode) +{ + return container_of(inode, struct orangefs_inode_s, vfs_inode); +} + +static inline struct orangefs_sb_info_s *ORANGEFS_SB(struct super_block *sb) +{ + return (struct orangefs_sb_info_s *) sb->s_fs_info; +} + +/* ino_t descends from "unsigned long", 8 bytes, 64 bits. */ +static inline ino_t orangefs_khandle_to_ino(struct orangefs_khandle *khandle) +{ + union { + unsigned char u[8]; + __u64 ino; + } ihandle; + + ihandle.u[0] = khandle->u[0] ^ khandle->u[4]; + ihandle.u[1] = khandle->u[1] ^ khandle->u[5]; + ihandle.u[2] = khandle->u[2] ^ khandle->u[6]; + ihandle.u[3] = khandle->u[3] ^ khandle->u[7]; + ihandle.u[4] = khandle->u[12] ^ khandle->u[8]; + ihandle.u[5] = khandle->u[13] ^ khandle->u[9]; + ihandle.u[6] = khandle->u[14] ^ khandle->u[10]; + ihandle.u[7] = khandle->u[15] ^ khandle->u[11]; + + return ihandle.ino; +} + +static inline struct orangefs_khandle *get_khandle_from_ino(struct inode *inode) +{ + return &(ORANGEFS_I(inode)->refn.khandle); +} + +static inline __s32 get_fsid_from_ino(struct inode *inode) +{ + return ORANGEFS_I(inode)->refn.fs_id; +} + +static inline ino_t get_ino_from_khandle(struct inode *inode) +{ + struct orangefs_khandle *khandle; + ino_t ino; + + khandle = get_khandle_from_ino(inode); + ino = orangefs_khandle_to_ino(khandle); + return ino; +} + +static inline ino_t get_parent_ino_from_dentry(struct dentry *dentry) +{ + return get_ino_from_khandle(dentry->d_parent->d_inode); +} + +static inline int is_root_handle(struct inode *inode) +{ + gossip_debug(GOSSIP_DCACHE_DEBUG, + "%s: root handle: %pU, this handle: %pU:\n", + __func__, + &ORANGEFS_SB(inode->i_sb)->root_khandle, + get_khandle_from_ino(inode)); + + if (ORANGEFS_khandle_cmp(&(ORANGEFS_SB(inode->i_sb)->root_khandle), + get_khandle_from_ino(inode))) + return 0; + else + return 1; +} + +static inline int match_handle(struct orangefs_khandle resp_handle, + struct inode *inode) +{ + gossip_debug(GOSSIP_DCACHE_DEBUG, + "%s: one handle: %pU, another handle:%pU:\n", + __func__, + &resp_handle, + get_khandle_from_ino(inode)); + + if (ORANGEFS_khandle_cmp(&resp_handle, get_khandle_from_ino(inode))) + return 0; + else + return 1; +} + +/* + * defined in orangefs-cache.c + */ +int op_cache_initialize(void); +int op_cache_finalize(void); +struct orangefs_kernel_op_s *op_alloc(__s32 type); +void orangefs_new_tag(struct orangefs_kernel_op_s *op); +char *get_opname_string(struct orangefs_kernel_op_s *new_op); + +int orangefs_inode_cache_initialize(void); +int orangefs_inode_cache_finalize(void); + +/* + * defined in orangefs-mod.c + */ +void purge_inprogress_ops(void); + +/* + * defined in waitqueue.c + */ +void purge_waiting_ops(void); + +/* + * defined in super.c + */ +struct dentry *orangefs_mount(struct file_system_type *fst, + int flags, + const char *devname, + void *data); + +void orangefs_kill_sb(struct super_block *sb); +int orangefs_remount(struct orangefs_sb_info_s *); + +int fsid_key_table_initialize(void); +void fsid_key_table_finalize(void); + +/* + * defined in inode.c + */ +__u32 convert_to_orangefs_mask(unsigned long lite_mask); +struct inode *orangefs_new_inode(struct super_block *sb, + struct inode *dir, + int mode, + dev_t dev, + struct orangefs_object_kref *ref); + +int orangefs_setattr(struct dentry *dentry, struct iattr *iattr); + +int orangefs_getattr(struct vfsmount *mnt, + struct dentry *dentry, + struct kstat *kstat); + +int orangefs_permission(struct inode *inode, int mask); + +/* + * defined in xattr.c + */ +int orangefs_setxattr(struct dentry *dentry, + const char *name, + const void *value, + size_t size, + int flags); + +ssize_t orangefs_getxattr(struct dentry *dentry, + const char *name, + void *buffer, + size_t size); + +ssize_t orangefs_listxattr(struct dentry *dentry, char *buffer, size_t size); + +/* + * defined in namei.c + */ +struct inode *orangefs_iget(struct super_block *sb, + struct orangefs_object_kref *ref); + +ssize_t orangefs_inode_read(struct inode *inode, + struct iov_iter *iter, + loff_t *offset, + loff_t readahead_size); + +/* + * defined in devorangefs-req.c + */ +int orangefs_dev_init(void); +void orangefs_dev_cleanup(void); +int is_daemon_in_service(void); +bool __is_daemon_in_service(void); + +/* + * defined in orangefs-utils.c + */ +__s32 fsid_of_op(struct orangefs_kernel_op_s *op); + +int orangefs_flush_inode(struct inode *inode); + +ssize_t orangefs_inode_getxattr(struct inode *inode, + const char *prefix, + const char *name, + void *buffer, + size_t size); + +int orangefs_inode_setxattr(struct inode *inode, + const char *prefix, + const char *name, + const void *value, + size_t size, + int flags); + +int orangefs_inode_getattr(struct inode *inode, int new, int size); + +int orangefs_inode_check_changed(struct inode *inode); + +int orangefs_inode_setattr(struct inode *inode, struct iattr *iattr); + +void orangefs_make_bad_inode(struct inode *inode); + +int orangefs_unmount_sb(struct super_block *sb); + +bool orangefs_cancel_op_in_progress(struct orangefs_kernel_op_s *op); + +int orangefs_normalize_to_errno(__s32 error_code); + +extern struct mutex devreq_mutex; +extern struct mutex request_mutex; +extern int debug; +extern int op_timeout_secs; +extern int slot_timeout_secs; +extern struct list_head orangefs_superblocks; +extern spinlock_t orangefs_superblocks_lock; +extern struct list_head orangefs_request_list; +extern spinlock_t orangefs_request_list_lock; +extern wait_queue_head_t orangefs_request_list_waitq; +extern struct list_head *htable_ops_in_progress; +extern spinlock_t htable_ops_in_progress_lock; +extern int hash_table_size; + +extern const struct address_space_operations orangefs_address_operations; +extern struct backing_dev_info orangefs_backing_dev_info; +extern struct inode_operations orangefs_file_inode_operations; +extern const struct file_operations orangefs_file_operations; +extern struct inode_operations orangefs_symlink_inode_operations; +extern struct inode_operations orangefs_dir_inode_operations; +extern const struct file_operations orangefs_dir_operations; +extern const struct dentry_operations orangefs_dentry_operations; +extern const struct file_operations orangefs_devreq_file_operations; + +extern wait_queue_head_t orangefs_bufmap_init_waitq; + +/* + * misc convenience macros + */ + +#define ORANGEFS_OP_INTERRUPTIBLE 1 /* service_operation() is interruptible */ +#define ORANGEFS_OP_PRIORITY 2 /* service_operation() is high priority */ +#define ORANGEFS_OP_CANCELLATION 4 /* this is a cancellation */ +#define ORANGEFS_OP_NO_MUTEX 8 /* don't acquire request_mutex */ +#define ORANGEFS_OP_ASYNC 16 /* Queue it, but don't wait */ + +int service_operation(struct orangefs_kernel_op_s *op, + const char *op_name, + int flags); + +#define get_interruptible_flag(inode) \ + ((ORANGEFS_SB(inode->i_sb)->flags & ORANGEFS_OPT_INTR) ? \ + ORANGEFS_OP_INTERRUPTIBLE : 0) + +#define fill_default_sys_attrs(sys_attr, type, mode) \ +do { \ + sys_attr.owner = from_kuid(current_user_ns(), current_fsuid()); \ + sys_attr.group = from_kgid(current_user_ns(), current_fsgid()); \ + sys_attr.perms = ORANGEFS_util_translate_mode(mode); \ + sys_attr.mtime = 0; \ + sys_attr.atime = 0; \ + sys_attr.ctime = 0; \ + sys_attr.mask = ORANGEFS_ATTR_SYS_ALL_SETABLE; \ +} while (0) + +static inline void orangefs_i_size_write(struct inode *inode, loff_t i_size) +{ +#if BITS_PER_LONG == 32 && defined(CONFIG_SMP) + mutex_lock(&inode->i_mutex); +#endif + i_size_write(inode, i_size); +#if BITS_PER_LONG == 32 && defined(CONFIG_SMP) + mutex_unlock(&inode->i_mutex); +#endif +} + +#endif /* __ORANGEFSKERNEL_H */ diff --git a/fs/orangefs/orangefs-mod.c b/fs/orangefs/orangefs-mod.c new file mode 100644 index 000000000000..6f072a8c0de1 --- /dev/null +++ b/fs/orangefs/orangefs-mod.c @@ -0,0 +1,293 @@ +/* + * (C) 2001 Clemson University and The University of Chicago + * + * Changes by Acxiom Corporation to add proc file handler for pvfs2 client + * parameters, Copyright Acxiom Corporation, 2005. + * + * See COPYING in top-level directory. + */ + +#include "protocol.h" +#include "orangefs-kernel.h" +#include "orangefs-debugfs.h" +#include "orangefs-sysfs.h" + +/* ORANGEFS_VERSION is a ./configure define */ +#ifndef ORANGEFS_VERSION +#define ORANGEFS_VERSION "upstream" +#endif + +/* + * global variables declared here + */ + +/* array of client debug keyword/mask values */ +struct client_debug_mask *cdm_array; +int cdm_element_count; + +char kernel_debug_string[ORANGEFS_MAX_DEBUG_STRING_LEN] = "none"; +char client_debug_string[ORANGEFS_MAX_DEBUG_STRING_LEN]; +char client_debug_array_string[ORANGEFS_MAX_DEBUG_STRING_LEN]; + +char *debug_help_string; +int help_string_initialized; +struct dentry *help_file_dentry; +struct dentry *client_debug_dentry; +struct dentry *debug_dir; +int client_verbose_index; +int client_all_index; +struct orangefs_stats g_orangefs_stats; + +/* the size of the hash tables for ops in progress */ +int hash_table_size = 509; + +static ulong module_parm_debug_mask; +__u64 gossip_debug_mask; +struct client_debug_mask client_debug_mask = { NULL, 0, 0 }; +unsigned int kernel_mask_set_mod_init; /* implicitly false */ +int op_timeout_secs = ORANGEFS_DEFAULT_OP_TIMEOUT_SECS; +int slot_timeout_secs = ORANGEFS_DEFAULT_SLOT_TIMEOUT_SECS; + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("ORANGEFS Development Team"); +MODULE_DESCRIPTION("The Linux Kernel VFS interface to ORANGEFS"); +MODULE_PARM_DESC(module_parm_debug_mask, "debugging level (see orangefs-debug.h for values)"); +MODULE_PARM_DESC(op_timeout_secs, "Operation timeout in seconds"); +MODULE_PARM_DESC(slot_timeout_secs, "Slot timeout in seconds"); +MODULE_PARM_DESC(hash_table_size, + "size of hash table for operations in progress"); + +static struct file_system_type orangefs_fs_type = { + .name = "pvfs2", + .mount = orangefs_mount, + .kill_sb = orangefs_kill_sb, + .owner = THIS_MODULE, +}; + +module_param(hash_table_size, int, 0); +module_param(module_parm_debug_mask, ulong, 0644); +module_param(op_timeout_secs, int, 0); +module_param(slot_timeout_secs, int, 0); + +/* synchronizes the request device file */ +DEFINE_MUTEX(devreq_mutex); + +/* + * Blocks non-priority requests from being queued for servicing. This + * could be used for protecting the request list data structure, but + * for now it's only being used to stall the op addition to the request + * list + */ +DEFINE_MUTEX(request_mutex); + +/* hash table for storing operations waiting for matching downcall */ +struct list_head *htable_ops_in_progress; +DEFINE_SPINLOCK(htable_ops_in_progress_lock); + +/* list for queueing upcall operations */ +LIST_HEAD(orangefs_request_list); + +/* used to protect the above orangefs_request_list */ +DEFINE_SPINLOCK(orangefs_request_list_lock); + +/* used for incoming request notification */ +DECLARE_WAIT_QUEUE_HEAD(orangefs_request_list_waitq); + +static int __init orangefs_init(void) +{ + int ret = -1; + __u32 i = 0; + + /* convert input debug mask to a 64-bit unsigned integer */ + gossip_debug_mask = (unsigned long long) module_parm_debug_mask; + + /* + * set the kernel's gossip debug string; invalid mask values will + * be ignored. + */ + debug_mask_to_string(&gossip_debug_mask, 0); + + /* remove any invalid values from the mask */ + debug_string_to_mask(kernel_debug_string, &gossip_debug_mask, 0); + + /* + * if the mask has a non-zero value, then indicate that the mask + * was set when the kernel module was loaded. The orangefs dev ioctl + * command will look at this boolean to determine if the kernel's + * debug mask should be overwritten when the client-core is started. + */ + if (gossip_debug_mask != 0) + kernel_mask_set_mod_init = true; + + pr_info("%s: called with debug mask: :%s: :%llx:\n", + __func__, + kernel_debug_string, + (unsigned long long)gossip_debug_mask); + + ret = bdi_init(&orangefs_backing_dev_info); + + if (ret) + return ret; + + if (op_timeout_secs < 0) + op_timeout_secs = 0; + + if (slot_timeout_secs < 0) + slot_timeout_secs = 0; + + /* initialize global book keeping data structures */ + ret = op_cache_initialize(); + if (ret < 0) + goto err; + + ret = orangefs_inode_cache_initialize(); + if (ret < 0) + goto cleanup_op; + + htable_ops_in_progress = + kcalloc(hash_table_size, sizeof(struct list_head), GFP_KERNEL); + if (!htable_ops_in_progress) { + gossip_err("Failed to initialize op hashtable"); + ret = -ENOMEM; + goto cleanup_inode; + } + + /* initialize a doubly linked at each hash table index */ + for (i = 0; i < hash_table_size; i++) + INIT_LIST_HEAD(&htable_ops_in_progress[i]); + + ret = fsid_key_table_initialize(); + if (ret < 0) + goto cleanup_progress_table; + + /* + * Build the contents of /sys/kernel/debug/orangefs/debug-help + * from the keywords in the kernel keyword/mask array. + * + * The keywords in the client keyword/mask array are + * unknown at boot time. + * + * orangefs_prepare_debugfs_help_string will be used again + * later to rebuild the debug-help file after the client starts + * and passes along the needed info. The argument signifies + * which time orangefs_prepare_debugfs_help_string is being + * called. + */ + ret = orangefs_prepare_debugfs_help_string(1); + if (ret) + goto cleanup_key_table; + + ret = orangefs_debugfs_init(); + if (ret) + goto debugfs_init_failed; + + ret = orangefs_kernel_debug_init(); + if (ret) + goto kernel_debug_init_failed; + + ret = orangefs_sysfs_init(); + if (ret) + goto sysfs_init_failed; + + /* Initialize the orangefsdev subsystem. */ + ret = orangefs_dev_init(); + if (ret < 0) { + gossip_err("%s: could not initialize device subsystem %d!\n", + __func__, + ret); + goto cleanup_device; + } + + ret = register_filesystem(&orangefs_fs_type); + if (ret == 0) { + pr_info("orangefs: module version %s loaded\n", ORANGEFS_VERSION); + ret = 0; + goto out; + } + + orangefs_sysfs_exit(); + +cleanup_device: + orangefs_dev_cleanup(); + +sysfs_init_failed: + +kernel_debug_init_failed: + +debugfs_init_failed: + orangefs_debugfs_cleanup(); + +cleanup_key_table: + fsid_key_table_finalize(); + +cleanup_progress_table: + kfree(htable_ops_in_progress); + +cleanup_inode: + orangefs_inode_cache_finalize(); + +cleanup_op: + op_cache_finalize(); + +err: + bdi_destroy(&orangefs_backing_dev_info); + +out: + return ret; +} + +static void __exit orangefs_exit(void) +{ + int i = 0; + gossip_debug(GOSSIP_INIT_DEBUG, "orangefs: orangefs_exit called\n"); + + unregister_filesystem(&orangefs_fs_type); + orangefs_debugfs_cleanup(); + orangefs_sysfs_exit(); + fsid_key_table_finalize(); + orangefs_dev_cleanup(); + BUG_ON(!list_empty(&orangefs_request_list)); + for (i = 0; i < hash_table_size; i++) + BUG_ON(!list_empty(&htable_ops_in_progress[i])); + + orangefs_inode_cache_finalize(); + op_cache_finalize(); + + kfree(htable_ops_in_progress); + + bdi_destroy(&orangefs_backing_dev_info); + + pr_info("orangefs: module version %s unloaded\n", ORANGEFS_VERSION); +} + +/* + * What we do in this function is to walk the list of operations + * that are in progress in the hash table and mark them as purged as well. + */ +void purge_inprogress_ops(void) +{ + int i; + + for (i = 0; i < hash_table_size; i++) { + struct orangefs_kernel_op_s *op; + struct orangefs_kernel_op_s *next; + + spin_lock(&htable_ops_in_progress_lock); + list_for_each_entry_safe(op, + next, + &htable_ops_in_progress[i], + list) { + set_op_state_purged(op); + gossip_debug(GOSSIP_DEV_DEBUG, + "%s: op:%s: op_state:%d: process:%s:\n", + __func__, + get_opname_string(op), + op->op_state, + current->comm); + } + spin_unlock(&htable_ops_in_progress_lock); + } +} + +module_init(orangefs_init); +module_exit(orangefs_exit); diff --git a/fs/orangefs/orangefs-sysfs.c b/fs/orangefs/orangefs-sysfs.c new file mode 100644 index 000000000000..5c03113e3ad2 --- /dev/null +++ b/fs/orangefs/orangefs-sysfs.c @@ -0,0 +1,1772 @@ +/* + * Documentation/ABI/stable/orangefs-sysfs: + * + * What: /sys/fs/orangefs/perf_counter_reset + * Date: June 2015 + * Contact: Mike Marshall <[email protected]> + * Description: + * echo a 0 or a 1 into perf_counter_reset to + * reset all the counters in + * /sys/fs/orangefs/perf_counters + * except ones with PINT_PERF_PRESERVE set. + * + * + * What: /sys/fs/orangefs/perf_counters/... + * Date: Jun 2015 + * Contact: Mike Marshall <[email protected]> + * Description: + * Counters and settings for various caches. + * Read only. + * + * + * What: /sys/fs/orangefs/perf_time_interval_secs + * Date: Jun 2015 + * Contact: Mike Marshall <[email protected]> + * Description: + * Length of perf counter intervals in + * seconds. + * + * + * What: /sys/fs/orangefs/perf_history_size + * Date: Jun 2015 + * Contact: Mike Marshall <[email protected]> + * Description: + * The perf_counters cache statistics have N, or + * perf_history_size, samples. The default is + * one. + * + * Every perf_time_interval_secs the (first) + * samples are reset. + * + * If N is greater than one, the "current" set + * of samples is reset, and the samples from the + * other N-1 intervals remain available. + * + * + * What: /sys/fs/orangefs/op_timeout_secs + * Date: Jun 2015 + * Contact: Mike Marshall <[email protected]> + * Description: + * Service operation timeout in seconds. + * + * + * What: /sys/fs/orangefs/slot_timeout_secs + * Date: Jun 2015 + * Contact: Mike Marshall <[email protected]> + * Description: + * "Slot" timeout in seconds. A "slot" + * is an indexed buffer in the shared + * memory segment used for communication + * between the kernel module and userspace. + * Slots are requested and waited for, + * the wait times out after slot_timeout_secs. + * + * + * What: /sys/fs/orangefs/acache/... + * Date: Jun 2015 + * Contact: Mike Marshall <[email protected]> + * Description: + * Attribute cache configurable settings. + * + * + * What: /sys/fs/orangefs/ncache/... + * Date: Jun 2015 + * Contact: Mike Marshall <[email protected]> + * Description: + * Name cache configurable settings. + * + * + * What: /sys/fs/orangefs/capcache/... + * Date: Jun 2015 + * Contact: Mike Marshall <[email protected]> + * Description: + * Capability cache configurable settings. + * + * + * What: /sys/fs/orangefs/ccache/... + * Date: Jun 2015 + * Contact: Mike Marshall <[email protected]> + * Description: + * Credential cache configurable settings. + * + */ + +#include <linux/fs.h> +#include <linux/kobject.h> +#include <linux/string.h> +#include <linux/sysfs.h> +#include <linux/module.h> +#include <linux/init.h> + +#include "protocol.h" +#include "orangefs-kernel.h" +#include "orangefs-sysfs.h" + +#define ORANGEFS_KOBJ_ID "orangefs" +#define ACACHE_KOBJ_ID "acache" +#define CAPCACHE_KOBJ_ID "capcache" +#define CCACHE_KOBJ_ID "ccache" +#define NCACHE_KOBJ_ID "ncache" +#define PC_KOBJ_ID "pc" +#define STATS_KOBJ_ID "stats" + +struct orangefs_obj { + struct kobject kobj; + int op_timeout_secs; + int perf_counter_reset; + int perf_history_size; + int perf_time_interval_secs; + int slot_timeout_secs; +}; + +struct acache_orangefs_obj { + struct kobject kobj; + int hard_limit; + int reclaim_percentage; + int soft_limit; + int timeout_msecs; +}; + +struct capcache_orangefs_obj { + struct kobject kobj; + int hard_limit; + int reclaim_percentage; + int soft_limit; + int timeout_secs; +}; + +struct ccache_orangefs_obj { + struct kobject kobj; + int hard_limit; + int reclaim_percentage; + int soft_limit; + int timeout_secs; +}; + +struct ncache_orangefs_obj { + struct kobject kobj; + int hard_limit; + int reclaim_percentage; + int soft_limit; + int timeout_msecs; +}; + +struct pc_orangefs_obj { + struct kobject kobj; + char *acache; + char *capcache; + char *ncache; +}; + +struct stats_orangefs_obj { + struct kobject kobj; + int reads; + int writes; +}; + +struct orangefs_attribute { + struct attribute attr; + ssize_t (*show)(struct orangefs_obj *orangefs_obj, + struct orangefs_attribute *attr, + char *buf); + ssize_t (*store)(struct orangefs_obj *orangefs_obj, + struct orangefs_attribute *attr, + const char *buf, + size_t count); +}; + +struct acache_orangefs_attribute { + struct attribute attr; + ssize_t (*show)(struct acache_orangefs_obj *acache_orangefs_obj, + struct acache_orangefs_attribute *attr, + char *buf); + ssize_t (*store)(struct acache_orangefs_obj *acache_orangefs_obj, + struct acache_orangefs_attribute *attr, + const char *buf, + size_t count); +}; + +struct capcache_orangefs_attribute { + struct attribute attr; + ssize_t (*show)(struct capcache_orangefs_obj *capcache_orangefs_obj, + struct capcache_orangefs_attribute *attr, + char *buf); + ssize_t (*store)(struct capcache_orangefs_obj *capcache_orangefs_obj, + struct capcache_orangefs_attribute *attr, + const char *buf, + size_t count); +}; + +struct ccache_orangefs_attribute { + struct attribute attr; + ssize_t (*show)(struct ccache_orangefs_obj *ccache_orangefs_obj, + struct ccache_orangefs_attribute *attr, + char *buf); + ssize_t (*store)(struct ccache_orangefs_obj *ccache_orangefs_obj, + struct ccache_orangefs_attribute *attr, + const char *buf, + size_t count); +}; + +struct ncache_orangefs_attribute { + struct attribute attr; + ssize_t (*show)(struct ncache_orangefs_obj *ncache_orangefs_obj, + struct ncache_orangefs_attribute *attr, + char *buf); + ssize_t (*store)(struct ncache_orangefs_obj *ncache_orangefs_obj, + struct ncache_orangefs_attribute *attr, + const char *buf, + size_t count); +}; + +struct pc_orangefs_attribute { + struct attribute attr; + ssize_t (*show)(struct pc_orangefs_obj *pc_orangefs_obj, + struct pc_orangefs_attribute *attr, + char *buf); + ssize_t (*store)(struct pc_orangefs_obj *pc_orangefs_obj, + struct pc_orangefs_attribute *attr, + const char *buf, + size_t count); +}; + +struct stats_orangefs_attribute { + struct attribute attr; + ssize_t (*show)(struct stats_orangefs_obj *stats_orangefs_obj, + struct stats_orangefs_attribute *attr, + char *buf); + ssize_t (*store)(struct stats_orangefs_obj *stats_orangefs_obj, + struct stats_orangefs_attribute *attr, + const char *buf, + size_t count); +}; + +static ssize_t orangefs_attr_show(struct kobject *kobj, + struct attribute *attr, + char *buf) +{ + struct orangefs_attribute *attribute; + struct orangefs_obj *orangefs_obj; + int rc; + + attribute = container_of(attr, struct orangefs_attribute, attr); + orangefs_obj = container_of(kobj, struct orangefs_obj, kobj); + + if (!attribute->show) { + rc = -EIO; + goto out; + } + + rc = attribute->show(orangefs_obj, attribute, buf); + +out: + return rc; +} + +static ssize_t orangefs_attr_store(struct kobject *kobj, + struct attribute *attr, + const char *buf, + size_t len) +{ + struct orangefs_attribute *attribute; + struct orangefs_obj *orangefs_obj; + int rc; + + gossip_debug(GOSSIP_SYSFS_DEBUG, + "orangefs_attr_store: start\n"); + + attribute = container_of(attr, struct orangefs_attribute, attr); + orangefs_obj = container_of(kobj, struct orangefs_obj, kobj); + + if (!attribute->store) { + rc = -EIO; + goto out; + } + + rc = attribute->store(orangefs_obj, attribute, buf, len); + +out: + return rc; +} + +static const struct sysfs_ops orangefs_sysfs_ops = { + .show = orangefs_attr_show, + .store = orangefs_attr_store, +}; + +static ssize_t acache_orangefs_attr_show(struct kobject *kobj, + struct attribute *attr, + char *buf) +{ + struct acache_orangefs_attribute *attribute; + struct acache_orangefs_obj *acache_orangefs_obj; + int rc; + + attribute = container_of(attr, struct acache_orangefs_attribute, attr); + acache_orangefs_obj = + container_of(kobj, struct acache_orangefs_obj, kobj); + + if (!attribute->show) { + rc = -EIO; + goto out; + } + + rc = attribute->show(acache_orangefs_obj, attribute, buf); + +out: + return rc; +} + +static ssize_t acache_orangefs_attr_store(struct kobject *kobj, + struct attribute *attr, + const char *buf, + size_t len) +{ + struct acache_orangefs_attribute *attribute; + struct acache_orangefs_obj *acache_orangefs_obj; + int rc; + + gossip_debug(GOSSIP_SYSFS_DEBUG, + "acache_orangefs_attr_store: start\n"); + + attribute = container_of(attr, struct acache_orangefs_attribute, attr); + acache_orangefs_obj = + container_of(kobj, struct acache_orangefs_obj, kobj); + + if (!attribute->store) { + rc = -EIO; + goto out; + } + + rc = attribute->store(acache_orangefs_obj, attribute, buf, len); + +out: + return rc; +} + +static const struct sysfs_ops acache_orangefs_sysfs_ops = { + .show = acache_orangefs_attr_show, + .store = acache_orangefs_attr_store, +}; + +static ssize_t capcache_orangefs_attr_show(struct kobject *kobj, + struct attribute *attr, + char *buf) +{ + struct capcache_orangefs_attribute *attribute; + struct capcache_orangefs_obj *capcache_orangefs_obj; + int rc; + + attribute = + container_of(attr, struct capcache_orangefs_attribute, attr); + capcache_orangefs_obj = + container_of(kobj, struct capcache_orangefs_obj, kobj); + + if (!attribute->show) { + rc = -EIO; + goto out; + } + + rc = attribute->show(capcache_orangefs_obj, attribute, buf); + +out: + return rc; +} + +static ssize_t capcache_orangefs_attr_store(struct kobject *kobj, + struct attribute *attr, + const char *buf, + size_t len) +{ + struct capcache_orangefs_attribute *attribute; + struct capcache_orangefs_obj *capcache_orangefs_obj; + int rc; + + gossip_debug(GOSSIP_SYSFS_DEBUG, + "capcache_orangefs_attr_store: start\n"); + + attribute = + container_of(attr, struct capcache_orangefs_attribute, attr); + capcache_orangefs_obj = + container_of(kobj, struct capcache_orangefs_obj, kobj); + + if (!attribute->store) { + rc = -EIO; + goto out; + } + + rc = attribute->store(capcache_orangefs_obj, attribute, buf, len); + +out: + return rc; +} + +static const struct sysfs_ops capcache_orangefs_sysfs_ops = { + .show = capcache_orangefs_attr_show, + .store = capcache_orangefs_attr_store, +}; + +static ssize_t ccache_orangefs_attr_show(struct kobject *kobj, + struct attribute *attr, + char *buf) +{ + struct ccache_orangefs_attribute *attribute; + struct ccache_orangefs_obj *ccache_orangefs_obj; + int rc; + + attribute = + container_of(attr, struct ccache_orangefs_attribute, attr); + ccache_orangefs_obj = + container_of(kobj, struct ccache_orangefs_obj, kobj); + + if (!attribute->show) { + rc = -EIO; + goto out; + } + + rc = attribute->show(ccache_orangefs_obj, attribute, buf); + +out: + return rc; +} + +static ssize_t ccache_orangefs_attr_store(struct kobject *kobj, + struct attribute *attr, + const char *buf, + size_t len) +{ + struct ccache_orangefs_attribute *attribute; + struct ccache_orangefs_obj *ccache_orangefs_obj; + int rc; + + gossip_debug(GOSSIP_SYSFS_DEBUG, + "ccache_orangefs_attr_store: start\n"); + + attribute = + container_of(attr, struct ccache_orangefs_attribute, attr); + ccache_orangefs_obj = + container_of(kobj, struct ccache_orangefs_obj, kobj); + + if (!attribute->store) { + rc = -EIO; + goto out; + } + + rc = attribute->store(ccache_orangefs_obj, attribute, buf, len); + +out: + return rc; +} + +static const struct sysfs_ops ccache_orangefs_sysfs_ops = { + .show = ccache_orangefs_attr_show, + .store = ccache_orangefs_attr_store, +}; + +static ssize_t ncache_orangefs_attr_show(struct kobject *kobj, + struct attribute *attr, + char *buf) +{ + struct ncache_orangefs_attribute *attribute; + struct ncache_orangefs_obj *ncache_orangefs_obj; + int rc; + + attribute = container_of(attr, struct ncache_orangefs_attribute, attr); + ncache_orangefs_obj = + container_of(kobj, struct ncache_orangefs_obj, kobj); + + if (!attribute->show) { + rc = -EIO; + goto out; + } + + rc = attribute->show(ncache_orangefs_obj, attribute, buf); + +out: + return rc; +} + +static ssize_t ncache_orangefs_attr_store(struct kobject *kobj, + struct attribute *attr, + const char *buf, + size_t len) +{ + struct ncache_orangefs_attribute *attribute; + struct ncache_orangefs_obj *ncache_orangefs_obj; + int rc; + + gossip_debug(GOSSIP_SYSFS_DEBUG, + "ncache_orangefs_attr_store: start\n"); + + attribute = container_of(attr, struct ncache_orangefs_attribute, attr); + ncache_orangefs_obj = + container_of(kobj, struct ncache_orangefs_obj, kobj); + + if (!attribute->store) { + rc = -EIO; + goto out; + } + + rc = attribute->store(ncache_orangefs_obj, attribute, buf, len); + +out: + return rc; +} + +static const struct sysfs_ops ncache_orangefs_sysfs_ops = { + .show = ncache_orangefs_attr_show, + .store = ncache_orangefs_attr_store, +}; + +static ssize_t pc_orangefs_attr_show(struct kobject *kobj, + struct attribute *attr, + char *buf) +{ + struct pc_orangefs_attribute *attribute; + struct pc_orangefs_obj *pc_orangefs_obj; + int rc; + + attribute = container_of(attr, struct pc_orangefs_attribute, attr); + pc_orangefs_obj = + container_of(kobj, struct pc_orangefs_obj, kobj); + + if (!attribute->show) { + rc = -EIO; + goto out; + } + + rc = attribute->show(pc_orangefs_obj, attribute, buf); + +out: + return rc; +} + +static const struct sysfs_ops pc_orangefs_sysfs_ops = { + .show = pc_orangefs_attr_show, +}; + +static ssize_t stats_orangefs_attr_show(struct kobject *kobj, + struct attribute *attr, + char *buf) +{ + struct stats_orangefs_attribute *attribute; + struct stats_orangefs_obj *stats_orangefs_obj; + int rc; + + attribute = container_of(attr, struct stats_orangefs_attribute, attr); + stats_orangefs_obj = + container_of(kobj, struct stats_orangefs_obj, kobj); + + if (!attribute->show) { + rc = -EIO; + goto out; + } + + rc = attribute->show(stats_orangefs_obj, attribute, buf); + +out: + return rc; +} + +static const struct sysfs_ops stats_orangefs_sysfs_ops = { + .show = stats_orangefs_attr_show, +}; + +static void orangefs_release(struct kobject *kobj) +{ + struct orangefs_obj *orangefs_obj; + + orangefs_obj = container_of(kobj, struct orangefs_obj, kobj); + kfree(orangefs_obj); +} + +static void acache_orangefs_release(struct kobject *kobj) +{ + struct acache_orangefs_obj *acache_orangefs_obj; + + acache_orangefs_obj = + container_of(kobj, struct acache_orangefs_obj, kobj); + kfree(acache_orangefs_obj); +} + +static void capcache_orangefs_release(struct kobject *kobj) +{ + struct capcache_orangefs_obj *capcache_orangefs_obj; + + capcache_orangefs_obj = + container_of(kobj, struct capcache_orangefs_obj, kobj); + kfree(capcache_orangefs_obj); +} + +static void ccache_orangefs_release(struct kobject *kobj) +{ + struct ccache_orangefs_obj *ccache_orangefs_obj; + + ccache_orangefs_obj = + container_of(kobj, struct ccache_orangefs_obj, kobj); + kfree(ccache_orangefs_obj); +} + +static void ncache_orangefs_release(struct kobject *kobj) +{ + struct ncache_orangefs_obj *ncache_orangefs_obj; + + ncache_orangefs_obj = + container_of(kobj, struct ncache_orangefs_obj, kobj); + kfree(ncache_orangefs_obj); +} + +static void pc_orangefs_release(struct kobject *kobj) +{ + struct pc_orangefs_obj *pc_orangefs_obj; + + pc_orangefs_obj = + container_of(kobj, struct pc_orangefs_obj, kobj); + kfree(pc_orangefs_obj); +} + +static void stats_orangefs_release(struct kobject *kobj) +{ + struct stats_orangefs_obj *stats_orangefs_obj; + + stats_orangefs_obj = + container_of(kobj, struct stats_orangefs_obj, kobj); + kfree(stats_orangefs_obj); +} + +static ssize_t sysfs_int_show(char *kobj_id, char *buf, void *attr) +{ + int rc = -EIO; + struct orangefs_attribute *orangefs_attr; + struct stats_orangefs_attribute *stats_orangefs_attr; + + gossip_debug(GOSSIP_SYSFS_DEBUG, "sysfs_int_show: id:%s:\n", kobj_id); + + if (!strcmp(kobj_id, ORANGEFS_KOBJ_ID)) { + orangefs_attr = (struct orangefs_attribute *)attr; + + if (!strcmp(orangefs_attr->attr.name, "op_timeout_secs")) { + rc = scnprintf(buf, + PAGE_SIZE, + "%d\n", + op_timeout_secs); + goto out; + } else if (!strcmp(orangefs_attr->attr.name, + "slot_timeout_secs")) { + rc = scnprintf(buf, + PAGE_SIZE, + "%d\n", + slot_timeout_secs); + goto out; + } else { + goto out; + } + + } else if (!strcmp(kobj_id, STATS_KOBJ_ID)) { + stats_orangefs_attr = (struct stats_orangefs_attribute *)attr; + + if (!strcmp(stats_orangefs_attr->attr.name, "reads")) { + rc = scnprintf(buf, + PAGE_SIZE, + "%lu\n", + g_orangefs_stats.reads); + goto out; + } else if (!strcmp(stats_orangefs_attr->attr.name, "writes")) { + rc = scnprintf(buf, + PAGE_SIZE, + "%lu\n", + g_orangefs_stats.writes); + goto out; + } else { + goto out; + } + } + +out: + + return rc; +} + +static ssize_t int_orangefs_show(struct orangefs_obj *orangefs_obj, + struct orangefs_attribute *attr, + char *buf) +{ + int rc; + + gossip_debug(GOSSIP_SYSFS_DEBUG, + "int_orangefs_show:start attr->attr.name:%s:\n", + attr->attr.name); + + rc = sysfs_int_show(ORANGEFS_KOBJ_ID, buf, (void *) attr); + + return rc; +} + +static ssize_t int_stats_show(struct stats_orangefs_obj *stats_orangefs_obj, + struct stats_orangefs_attribute *attr, + char *buf) +{ + int rc; + + gossip_debug(GOSSIP_SYSFS_DEBUG, + "int_stats_show:start attr->attr.name:%s:\n", + attr->attr.name); + + rc = sysfs_int_show(STATS_KOBJ_ID, buf, (void *) attr); + + return rc; +} + +static ssize_t int_store(struct orangefs_obj *orangefs_obj, + struct orangefs_attribute *attr, + const char *buf, + size_t count) +{ + int rc = 0; + + gossip_debug(GOSSIP_SYSFS_DEBUG, + "int_store: start attr->attr.name:%s: buf:%s:\n", + attr->attr.name, buf); + + if (!strcmp(attr->attr.name, "op_timeout_secs")) { + rc = kstrtoint(buf, 0, &op_timeout_secs); + goto out; + } else if (!strcmp(attr->attr.name, "slot_timeout_secs")) { + rc = kstrtoint(buf, 0, &slot_timeout_secs); + goto out; + } else { + goto out; + } + +out: + if (rc) + rc = -EINVAL; + else + rc = count; + + return rc; +} + +/* + * obtain attribute values from userspace with a service operation. + */ +static int sysfs_service_op_show(char *kobj_id, char *buf, void *attr) +{ + struct orangefs_kernel_op_s *new_op = NULL; + int rc = 0; + char *ser_op_type = NULL; + struct orangefs_attribute *orangefs_attr; + struct acache_orangefs_attribute *acache_attr; + struct capcache_orangefs_attribute *capcache_attr; + struct ccache_orangefs_attribute *ccache_attr; + struct ncache_orangefs_attribute *ncache_attr; + struct pc_orangefs_attribute *pc_attr; + __u32 op_alloc_type; + + gossip_debug(GOSSIP_SYSFS_DEBUG, + "sysfs_service_op_show: id:%s:\n", + kobj_id); + + if (strcmp(kobj_id, PC_KOBJ_ID)) + op_alloc_type = ORANGEFS_VFS_OP_PARAM; + else + op_alloc_type = ORANGEFS_VFS_OP_PERF_COUNT; + + new_op = op_alloc(op_alloc_type); + if (!new_op) + return -ENOMEM; + + /* Can't do a service_operation if the client is not running... */ + rc = is_daemon_in_service(); + if (rc) { + pr_info("%s: Client not running :%d:\n", + __func__, + is_daemon_in_service()); + goto out; + } + + if (strcmp(kobj_id, PC_KOBJ_ID)) + new_op->upcall.req.param.type = ORANGEFS_PARAM_REQUEST_GET; + + if (!strcmp(kobj_id, ORANGEFS_KOBJ_ID)) { + orangefs_attr = (struct orangefs_attribute *)attr; + + if (!strcmp(orangefs_attr->attr.name, "perf_history_size")) + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_PERF_HISTORY_SIZE; + else if (!strcmp(orangefs_attr->attr.name, + "perf_time_interval_secs")) + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_PERF_TIME_INTERVAL_SECS; + else if (!strcmp(orangefs_attr->attr.name, + "perf_counter_reset")) + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_PERF_RESET; + + } else if (!strcmp(kobj_id, ACACHE_KOBJ_ID)) { + acache_attr = (struct acache_orangefs_attribute *)attr; + + if (!strcmp(acache_attr->attr.name, "timeout_msecs")) + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_ACACHE_TIMEOUT_MSECS; + + if (!strcmp(acache_attr->attr.name, "hard_limit")) + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_ACACHE_HARD_LIMIT; + + if (!strcmp(acache_attr->attr.name, "soft_limit")) + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_ACACHE_SOFT_LIMIT; + + if (!strcmp(acache_attr->attr.name, "reclaim_percentage")) + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_ACACHE_RECLAIM_PERCENTAGE; + + } else if (!strcmp(kobj_id, CAPCACHE_KOBJ_ID)) { + capcache_attr = (struct capcache_orangefs_attribute *)attr; + + if (!strcmp(capcache_attr->attr.name, "timeout_secs")) + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_TIMEOUT_SECS; + + if (!strcmp(capcache_attr->attr.name, "hard_limit")) + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_HARD_LIMIT; + + if (!strcmp(capcache_attr->attr.name, "soft_limit")) + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_SOFT_LIMIT; + + if (!strcmp(capcache_attr->attr.name, "reclaim_percentage")) + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_RECLAIM_PERCENTAGE; + + } else if (!strcmp(kobj_id, CCACHE_KOBJ_ID)) { + ccache_attr = (struct ccache_orangefs_attribute *)attr; + + if (!strcmp(ccache_attr->attr.name, "timeout_secs")) + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_CCACHE_TIMEOUT_SECS; + + if (!strcmp(ccache_attr->attr.name, "hard_limit")) + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_CCACHE_HARD_LIMIT; + + if (!strcmp(ccache_attr->attr.name, "soft_limit")) + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_CCACHE_SOFT_LIMIT; + + if (!strcmp(ccache_attr->attr.name, "reclaim_percentage")) + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_CCACHE_RECLAIM_PERCENTAGE; + + } else if (!strcmp(kobj_id, NCACHE_KOBJ_ID)) { + ncache_attr = (struct ncache_orangefs_attribute *)attr; + + if (!strcmp(ncache_attr->attr.name, "timeout_msecs")) + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_NCACHE_TIMEOUT_MSECS; + + if (!strcmp(ncache_attr->attr.name, "hard_limit")) + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_NCACHE_HARD_LIMIT; + + if (!strcmp(ncache_attr->attr.name, "soft_limit")) + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_NCACHE_SOFT_LIMIT; + + if (!strcmp(ncache_attr->attr.name, "reclaim_percentage")) + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_NCACHE_RECLAIM_PERCENTAGE; + + } else if (!strcmp(kobj_id, PC_KOBJ_ID)) { + pc_attr = (struct pc_orangefs_attribute *)attr; + + if (!strcmp(pc_attr->attr.name, ACACHE_KOBJ_ID)) + new_op->upcall.req.perf_count.type = + ORANGEFS_PERF_COUNT_REQUEST_ACACHE; + + if (!strcmp(pc_attr->attr.name, CAPCACHE_KOBJ_ID)) + new_op->upcall.req.perf_count.type = + ORANGEFS_PERF_COUNT_REQUEST_CAPCACHE; + + if (!strcmp(pc_attr->attr.name, NCACHE_KOBJ_ID)) + new_op->upcall.req.perf_count.type = + ORANGEFS_PERF_COUNT_REQUEST_NCACHE; + + } else { + gossip_err("sysfs_service_op_show: unknown kobj_id:%s:\n", + kobj_id); + rc = -EINVAL; + goto out; + } + + + if (strcmp(kobj_id, PC_KOBJ_ID)) + ser_op_type = "orangefs_param"; + else + ser_op_type = "orangefs_perf_count"; + + /* + * The service_operation will return an errno return code on + * error, and zero on success. + */ + rc = service_operation(new_op, ser_op_type, ORANGEFS_OP_INTERRUPTIBLE); + +out: + if (!rc) { + if (strcmp(kobj_id, PC_KOBJ_ID)) { + rc = scnprintf(buf, + PAGE_SIZE, + "%d\n", + (int)new_op->downcall.resp.param.value); + } else { + rc = scnprintf( + buf, + PAGE_SIZE, + "%s", + new_op->downcall.resp.perf_count.buffer); + } + } + + op_release(new_op); + + return rc; + +} + +static ssize_t service_orangefs_show(struct orangefs_obj *orangefs_obj, + struct orangefs_attribute *attr, + char *buf) +{ + int rc = 0; + + rc = sysfs_service_op_show(ORANGEFS_KOBJ_ID, buf, (void *)attr); + + return rc; +} + +static ssize_t + service_acache_show(struct acache_orangefs_obj *acache_orangefs_obj, + struct acache_orangefs_attribute *attr, + char *buf) +{ + int rc = 0; + + rc = sysfs_service_op_show(ACACHE_KOBJ_ID, buf, (void *)attr); + + return rc; +} + +static ssize_t service_capcache_show(struct capcache_orangefs_obj + *capcache_orangefs_obj, + struct capcache_orangefs_attribute *attr, + char *buf) +{ + int rc = 0; + + rc = sysfs_service_op_show(CAPCACHE_KOBJ_ID, buf, (void *)attr); + + return rc; +} + +static ssize_t service_ccache_show(struct ccache_orangefs_obj + *ccache_orangefs_obj, + struct ccache_orangefs_attribute *attr, + char *buf) +{ + int rc = 0; + + rc = sysfs_service_op_show(CCACHE_KOBJ_ID, buf, (void *)attr); + + return rc; +} + +static ssize_t + service_ncache_show(struct ncache_orangefs_obj *ncache_orangefs_obj, + struct ncache_orangefs_attribute *attr, + char *buf) +{ + int rc = 0; + + rc = sysfs_service_op_show(NCACHE_KOBJ_ID, buf, (void *)attr); + + return rc; +} + +static ssize_t + service_pc_show(struct pc_orangefs_obj *pc_orangefs_obj, + struct pc_orangefs_attribute *attr, + char *buf) +{ + int rc = 0; + + rc = sysfs_service_op_show(PC_KOBJ_ID, buf, (void *)attr); + + return rc; +} + +/* + * pass attribute values back to userspace with a service operation. + * + * We have to do a memory allocation, an sscanf and a service operation. + * And we have to evaluate what the user entered, to make sure the + * value is within the range supported by the attribute. So, there's + * a lot of return code checking and mapping going on here. + * + * We want to return 1 if we think everything went OK, and + * EINVAL if not. + */ +static int sysfs_service_op_store(char *kobj_id, const char *buf, void *attr) +{ + struct orangefs_kernel_op_s *new_op = NULL; + int val = 0; + int rc = 0; + struct orangefs_attribute *orangefs_attr; + struct acache_orangefs_attribute *acache_attr; + struct capcache_orangefs_attribute *capcache_attr; + struct ccache_orangefs_attribute *ccache_attr; + struct ncache_orangefs_attribute *ncache_attr; + + gossip_debug(GOSSIP_SYSFS_DEBUG, + "sysfs_service_op_store: id:%s:\n", + kobj_id); + + new_op = op_alloc(ORANGEFS_VFS_OP_PARAM); + if (!new_op) + return -EINVAL; /* sic */ + + /* Can't do a service_operation if the client is not running... */ + rc = is_daemon_in_service(); + if (rc) { + pr_info("%s: Client not running :%d:\n", + __func__, + is_daemon_in_service()); + goto out; + } + + /* + * The value we want to send back to userspace is in buf. + */ + rc = kstrtoint(buf, 0, &val); + if (rc) + goto out; + + if (!strcmp(kobj_id, ORANGEFS_KOBJ_ID)) { + orangefs_attr = (struct orangefs_attribute *)attr; + + if (!strcmp(orangefs_attr->attr.name, "perf_history_size")) { + if (val > 0) { + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_PERF_HISTORY_SIZE; + } else { + rc = 0; + goto out; + } + } else if (!strcmp(orangefs_attr->attr.name, + "perf_time_interval_secs")) { + if (val > 0) { + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_PERF_TIME_INTERVAL_SECS; + } else { + rc = 0; + goto out; + } + } else if (!strcmp(orangefs_attr->attr.name, + "perf_counter_reset")) { + if ((val == 0) || (val == 1)) { + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_PERF_RESET; + } else { + rc = 0; + goto out; + } + } + + } else if (!strcmp(kobj_id, ACACHE_KOBJ_ID)) { + acache_attr = (struct acache_orangefs_attribute *)attr; + + if (!strcmp(acache_attr->attr.name, "hard_limit")) { + if (val > -1) { + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_ACACHE_HARD_LIMIT; + } else { + rc = 0; + goto out; + } + } else if (!strcmp(acache_attr->attr.name, "soft_limit")) { + if (val > -1) { + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_ACACHE_SOFT_LIMIT; + } else { + rc = 0; + goto out; + } + } else if (!strcmp(acache_attr->attr.name, + "reclaim_percentage")) { + if ((val > -1) && (val < 101)) { + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_ACACHE_RECLAIM_PERCENTAGE; + } else { + rc = 0; + goto out; + } + } else if (!strcmp(acache_attr->attr.name, "timeout_msecs")) { + if (val > -1) { + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_ACACHE_TIMEOUT_MSECS; + } else { + rc = 0; + goto out; + } + } + + } else if (!strcmp(kobj_id, CAPCACHE_KOBJ_ID)) { + capcache_attr = (struct capcache_orangefs_attribute *)attr; + + if (!strcmp(capcache_attr->attr.name, "hard_limit")) { + if (val > -1) { + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_HARD_LIMIT; + } else { + rc = 0; + goto out; + } + } else if (!strcmp(capcache_attr->attr.name, "soft_limit")) { + if (val > -1) { + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_SOFT_LIMIT; + } else { + rc = 0; + goto out; + } + } else if (!strcmp(capcache_attr->attr.name, + "reclaim_percentage")) { + if ((val > -1) && (val < 101)) { + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_RECLAIM_PERCENTAGE; + } else { + rc = 0; + goto out; + } + } else if (!strcmp(capcache_attr->attr.name, "timeout_secs")) { + if (val > -1) { + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_TIMEOUT_SECS; + } else { + rc = 0; + goto out; + } + } + + } else if (!strcmp(kobj_id, CCACHE_KOBJ_ID)) { + ccache_attr = (struct ccache_orangefs_attribute *)attr; + + if (!strcmp(ccache_attr->attr.name, "hard_limit")) { + if (val > -1) { + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_CCACHE_HARD_LIMIT; + } else { + rc = 0; + goto out; + } + } else if (!strcmp(ccache_attr->attr.name, "soft_limit")) { + if (val > -1) { + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_CCACHE_SOFT_LIMIT; + } else { + rc = 0; + goto out; + } + } else if (!strcmp(ccache_attr->attr.name, + "reclaim_percentage")) { + if ((val > -1) && (val < 101)) { + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_CCACHE_RECLAIM_PERCENTAGE; + } else { + rc = 0; + goto out; + } + } else if (!strcmp(ccache_attr->attr.name, "timeout_secs")) { + if (val > -1) { + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_CCACHE_TIMEOUT_SECS; + } else { + rc = 0; + goto out; + } + } + + } else if (!strcmp(kobj_id, NCACHE_KOBJ_ID)) { + ncache_attr = (struct ncache_orangefs_attribute *)attr; + + if (!strcmp(ncache_attr->attr.name, "hard_limit")) { + if (val > -1) { + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_NCACHE_HARD_LIMIT; + } else { + rc = 0; + goto out; + } + } else if (!strcmp(ncache_attr->attr.name, "soft_limit")) { + if (val > -1) { + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_NCACHE_SOFT_LIMIT; + } else { + rc = 0; + goto out; + } + } else if (!strcmp(ncache_attr->attr.name, + "reclaim_percentage")) { + if ((val > -1) && (val < 101)) { + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_NCACHE_RECLAIM_PERCENTAGE; + } else { + rc = 0; + goto out; + } + } else if (!strcmp(ncache_attr->attr.name, "timeout_msecs")) { + if (val > -1) { + new_op->upcall.req.param.op = + ORANGEFS_PARAM_REQUEST_OP_NCACHE_TIMEOUT_MSECS; + } else { + rc = 0; + goto out; + } + } + + } else { + gossip_err("sysfs_service_op_store: unknown kobj_id:%s:\n", + kobj_id); + rc = -EINVAL; + goto out; + } + + new_op->upcall.req.param.type = ORANGEFS_PARAM_REQUEST_SET; + + new_op->upcall.req.param.value = val; + + /* + * The service_operation will return a errno return code on + * error, and zero on success. + */ + rc = service_operation(new_op, "orangefs_param", ORANGEFS_OP_INTERRUPTIBLE); + + if (rc < 0) { + gossip_err("sysfs_service_op_store: service op returned:%d:\n", + rc); + rc = 0; + } else { + rc = 1; + } + +out: + op_release(new_op); + + if (rc == -ENOMEM || rc == 0) + rc = -EINVAL; + + return rc; +} + +static ssize_t + service_orangefs_store(struct orangefs_obj *orangefs_obj, + struct orangefs_attribute *attr, + const char *buf, + size_t count) +{ + int rc = 0; + + rc = sysfs_service_op_store(ORANGEFS_KOBJ_ID, buf, (void *) attr); + + /* rc should have an errno value if the service_op went bad. */ + if (rc == 1) + rc = count; + + return rc; +} + +static ssize_t + service_acache_store(struct acache_orangefs_obj *acache_orangefs_obj, + struct acache_orangefs_attribute *attr, + const char *buf, + size_t count) +{ + int rc = 0; + + rc = sysfs_service_op_store(ACACHE_KOBJ_ID, buf, (void *) attr); + + /* rc should have an errno value if the service_op went bad. */ + if (rc == 1) + rc = count; + + return rc; +} + +static ssize_t + service_capcache_store(struct capcache_orangefs_obj + *capcache_orangefs_obj, + struct capcache_orangefs_attribute *attr, + const char *buf, + size_t count) +{ + int rc = 0; + + rc = sysfs_service_op_store(CAPCACHE_KOBJ_ID, buf, (void *) attr); + + /* rc should have an errno value if the service_op went bad. */ + if (rc == 1) + rc = count; + + return rc; +} + +static ssize_t service_ccache_store(struct ccache_orangefs_obj + *ccache_orangefs_obj, + struct ccache_orangefs_attribute *attr, + const char *buf, + size_t count) +{ + int rc = 0; + + rc = sysfs_service_op_store(CCACHE_KOBJ_ID, buf, (void *) attr); + + /* rc should have an errno value if the service_op went bad. */ + if (rc == 1) + rc = count; + + return rc; +} + +static ssize_t + service_ncache_store(struct ncache_orangefs_obj *ncache_orangefs_obj, + struct ncache_orangefs_attribute *attr, + const char *buf, + size_t count) +{ + int rc = 0; + + rc = sysfs_service_op_store(NCACHE_KOBJ_ID, buf, (void *) attr); + + /* rc should have an errno value if the service_op went bad. */ + if (rc == 1) + rc = count; + + return rc; +} + +static struct orangefs_attribute op_timeout_secs_attribute = + __ATTR(op_timeout_secs, 0664, int_orangefs_show, int_store); + +static struct orangefs_attribute slot_timeout_secs_attribute = + __ATTR(slot_timeout_secs, 0664, int_orangefs_show, int_store); + +static struct orangefs_attribute perf_counter_reset_attribute = + __ATTR(perf_counter_reset, + 0664, + service_orangefs_show, + service_orangefs_store); + +static struct orangefs_attribute perf_history_size_attribute = + __ATTR(perf_history_size, + 0664, + service_orangefs_show, + service_orangefs_store); + +static struct orangefs_attribute perf_time_interval_secs_attribute = + __ATTR(perf_time_interval_secs, + 0664, + service_orangefs_show, + service_orangefs_store); + +static struct attribute *orangefs_default_attrs[] = { + &op_timeout_secs_attribute.attr, + &slot_timeout_secs_attribute.attr, + &perf_counter_reset_attribute.attr, + &perf_history_size_attribute.attr, + &perf_time_interval_secs_attribute.attr, + NULL, +}; + +static struct kobj_type orangefs_ktype = { + .sysfs_ops = &orangefs_sysfs_ops, + .release = orangefs_release, + .default_attrs = orangefs_default_attrs, +}; + +static struct acache_orangefs_attribute acache_hard_limit_attribute = + __ATTR(hard_limit, + 0664, + service_acache_show, + service_acache_store); + +static struct acache_orangefs_attribute acache_reclaim_percent_attribute = + __ATTR(reclaim_percentage, + 0664, + service_acache_show, + service_acache_store); + +static struct acache_orangefs_attribute acache_soft_limit_attribute = + __ATTR(soft_limit, + 0664, + service_acache_show, + service_acache_store); + +static struct acache_orangefs_attribute acache_timeout_msecs_attribute = + __ATTR(timeout_msecs, + 0664, + service_acache_show, + service_acache_store); + +static struct attribute *acache_orangefs_default_attrs[] = { + &acache_hard_limit_attribute.attr, + &acache_reclaim_percent_attribute.attr, + &acache_soft_limit_attribute.attr, + &acache_timeout_msecs_attribute.attr, + NULL, +}; + +static struct kobj_type acache_orangefs_ktype = { + .sysfs_ops = &acache_orangefs_sysfs_ops, + .release = acache_orangefs_release, + .default_attrs = acache_orangefs_default_attrs, +}; + +static struct capcache_orangefs_attribute capcache_hard_limit_attribute = + __ATTR(hard_limit, + 0664, + service_capcache_show, + service_capcache_store); + +static struct capcache_orangefs_attribute capcache_reclaim_percent_attribute = + __ATTR(reclaim_percentage, + 0664, + service_capcache_show, + service_capcache_store); + +static struct capcache_orangefs_attribute capcache_soft_limit_attribute = + __ATTR(soft_limit, + 0664, + service_capcache_show, + service_capcache_store); + +static struct capcache_orangefs_attribute capcache_timeout_secs_attribute = + __ATTR(timeout_secs, + 0664, + service_capcache_show, + service_capcache_store); + +static struct attribute *capcache_orangefs_default_attrs[] = { + &capcache_hard_limit_attribute.attr, + &capcache_reclaim_percent_attribute.attr, + &capcache_soft_limit_attribute.attr, + &capcache_timeout_secs_attribute.attr, + NULL, +}; + +static struct kobj_type capcache_orangefs_ktype = { + .sysfs_ops = &capcache_orangefs_sysfs_ops, + .release = capcache_orangefs_release, + .default_attrs = capcache_orangefs_default_attrs, +}; + +static struct ccache_orangefs_attribute ccache_hard_limit_attribute = + __ATTR(hard_limit, + 0664, + service_ccache_show, + service_ccache_store); + +static struct ccache_orangefs_attribute ccache_reclaim_percent_attribute = + __ATTR(reclaim_percentage, + 0664, + service_ccache_show, + service_ccache_store); + +static struct ccache_orangefs_attribute ccache_soft_limit_attribute = + __ATTR(soft_limit, + 0664, + service_ccache_show, + service_ccache_store); + +static struct ccache_orangefs_attribute ccache_timeout_secs_attribute = + __ATTR(timeout_secs, + 0664, + service_ccache_show, + service_ccache_store); + +static struct attribute *ccache_orangefs_default_attrs[] = { + &ccache_hard_limit_attribute.attr, + &ccache_reclaim_percent_attribute.attr, + &ccache_soft_limit_attribute.attr, + &ccache_timeout_secs_attribute.attr, + NULL, +}; + +static struct kobj_type ccache_orangefs_ktype = { + .sysfs_ops = &ccache_orangefs_sysfs_ops, + .release = ccache_orangefs_release, + .default_attrs = ccache_orangefs_default_attrs, +}; + +static struct ncache_orangefs_attribute ncache_hard_limit_attribute = + __ATTR(hard_limit, + 0664, + service_ncache_show, + service_ncache_store); + +static struct ncache_orangefs_attribute ncache_reclaim_percent_attribute = + __ATTR(reclaim_percentage, + 0664, + service_ncache_show, + service_ncache_store); + +static struct ncache_orangefs_attribute ncache_soft_limit_attribute = + __ATTR(soft_limit, + 0664, + service_ncache_show, + service_ncache_store); + +static struct ncache_orangefs_attribute ncache_timeout_msecs_attribute = + __ATTR(timeout_msecs, + 0664, + service_ncache_show, + service_ncache_store); + +static struct attribute *ncache_orangefs_default_attrs[] = { + &ncache_hard_limit_attribute.attr, + &ncache_reclaim_percent_attribute.attr, + &ncache_soft_limit_attribute.attr, + &ncache_timeout_msecs_attribute.attr, + NULL, +}; + +static struct kobj_type ncache_orangefs_ktype = { + .sysfs_ops = &ncache_orangefs_sysfs_ops, + .release = ncache_orangefs_release, + .default_attrs = ncache_orangefs_default_attrs, +}; + +static struct pc_orangefs_attribute pc_acache_attribute = + __ATTR(acache, + 0664, + service_pc_show, + NULL); + +static struct pc_orangefs_attribute pc_capcache_attribute = + __ATTR(capcache, + 0664, + service_pc_show, + NULL); + +static struct pc_orangefs_attribute pc_ncache_attribute = + __ATTR(ncache, + 0664, + service_pc_show, + NULL); + +static struct attribute *pc_orangefs_default_attrs[] = { + &pc_acache_attribute.attr, + &pc_capcache_attribute.attr, + &pc_ncache_attribute.attr, + NULL, +}; + +static struct kobj_type pc_orangefs_ktype = { + .sysfs_ops = &pc_orangefs_sysfs_ops, + .release = pc_orangefs_release, + .default_attrs = pc_orangefs_default_attrs, +}; + +static struct stats_orangefs_attribute stats_reads_attribute = + __ATTR(reads, + 0664, + int_stats_show, + NULL); + +static struct stats_orangefs_attribute stats_writes_attribute = + __ATTR(writes, + 0664, + int_stats_show, + NULL); + +static struct attribute *stats_orangefs_default_attrs[] = { + &stats_reads_attribute.attr, + &stats_writes_attribute.attr, + NULL, +}; + +static struct kobj_type stats_orangefs_ktype = { + .sysfs_ops = &stats_orangefs_sysfs_ops, + .release = stats_orangefs_release, + .default_attrs = stats_orangefs_default_attrs, +}; + +static struct orangefs_obj *orangefs_obj; +static struct acache_orangefs_obj *acache_orangefs_obj; +static struct capcache_orangefs_obj *capcache_orangefs_obj; +static struct ccache_orangefs_obj *ccache_orangefs_obj; +static struct ncache_orangefs_obj *ncache_orangefs_obj; +static struct pc_orangefs_obj *pc_orangefs_obj; +static struct stats_orangefs_obj *stats_orangefs_obj; + +int orangefs_sysfs_init(void) +{ + int rc = -EINVAL; + + gossip_debug(GOSSIP_SYSFS_DEBUG, "orangefs_sysfs_init: start\n"); + + /* create /sys/fs/orangefs. */ + orangefs_obj = kzalloc(sizeof(*orangefs_obj), GFP_KERNEL); + if (!orangefs_obj) + goto out; + + rc = kobject_init_and_add(&orangefs_obj->kobj, + &orangefs_ktype, + fs_kobj, + ORANGEFS_KOBJ_ID); + + if (rc) + goto ofs_obj_bail; + + kobject_uevent(&orangefs_obj->kobj, KOBJ_ADD); + + /* create /sys/fs/orangefs/acache. */ + acache_orangefs_obj = kzalloc(sizeof(*acache_orangefs_obj), GFP_KERNEL); + if (!acache_orangefs_obj) { + rc = -EINVAL; + goto ofs_obj_bail; + } + + rc = kobject_init_and_add(&acache_orangefs_obj->kobj, + &acache_orangefs_ktype, + &orangefs_obj->kobj, + ACACHE_KOBJ_ID); + + if (rc) + goto acache_obj_bail; + + kobject_uevent(&acache_orangefs_obj->kobj, KOBJ_ADD); + + /* create /sys/fs/orangefs/capcache. */ + capcache_orangefs_obj = + kzalloc(sizeof(*capcache_orangefs_obj), GFP_KERNEL); + if (!capcache_orangefs_obj) { + rc = -EINVAL; + goto acache_obj_bail; + } + + rc = kobject_init_and_add(&capcache_orangefs_obj->kobj, + &capcache_orangefs_ktype, + &orangefs_obj->kobj, + CAPCACHE_KOBJ_ID); + if (rc) + goto capcache_obj_bail; + + kobject_uevent(&capcache_orangefs_obj->kobj, KOBJ_ADD); + + /* create /sys/fs/orangefs/ccache. */ + ccache_orangefs_obj = + kzalloc(sizeof(*ccache_orangefs_obj), GFP_KERNEL); + if (!ccache_orangefs_obj) { + rc = -EINVAL; + goto capcache_obj_bail; + } + + rc = kobject_init_and_add(&ccache_orangefs_obj->kobj, + &ccache_orangefs_ktype, + &orangefs_obj->kobj, + CCACHE_KOBJ_ID); + if (rc) + goto ccache_obj_bail; + + kobject_uevent(&ccache_orangefs_obj->kobj, KOBJ_ADD); + + /* create /sys/fs/orangefs/ncache. */ + ncache_orangefs_obj = kzalloc(sizeof(*ncache_orangefs_obj), GFP_KERNEL); + if (!ncache_orangefs_obj) { + rc = -EINVAL; + goto ccache_obj_bail; + } + + rc = kobject_init_and_add(&ncache_orangefs_obj->kobj, + &ncache_orangefs_ktype, + &orangefs_obj->kobj, + NCACHE_KOBJ_ID); + + if (rc) + goto ncache_obj_bail; + + kobject_uevent(&ncache_orangefs_obj->kobj, KOBJ_ADD); + + /* create /sys/fs/orangefs/perf_counters. */ + pc_orangefs_obj = kzalloc(sizeof(*pc_orangefs_obj), GFP_KERNEL); + if (!pc_orangefs_obj) { + rc = -EINVAL; + goto ncache_obj_bail; + } + + rc = kobject_init_and_add(&pc_orangefs_obj->kobj, + &pc_orangefs_ktype, + &orangefs_obj->kobj, + "perf_counters"); + + if (rc) + goto pc_obj_bail; + + kobject_uevent(&pc_orangefs_obj->kobj, KOBJ_ADD); + + /* create /sys/fs/orangefs/stats. */ + stats_orangefs_obj = kzalloc(sizeof(*stats_orangefs_obj), GFP_KERNEL); + if (!stats_orangefs_obj) { + rc = -EINVAL; + goto pc_obj_bail; + } + + rc = kobject_init_and_add(&stats_orangefs_obj->kobj, + &stats_orangefs_ktype, + &orangefs_obj->kobj, + STATS_KOBJ_ID); + + if (rc) + goto stats_obj_bail; + + kobject_uevent(&stats_orangefs_obj->kobj, KOBJ_ADD); + goto out; + +stats_obj_bail: + kobject_put(&stats_orangefs_obj->kobj); + +pc_obj_bail: + kobject_put(&pc_orangefs_obj->kobj); + +ncache_obj_bail: + kobject_put(&ncache_orangefs_obj->kobj); + +ccache_obj_bail: + kobject_put(&ccache_orangefs_obj->kobj); + +capcache_obj_bail: + kobject_put(&capcache_orangefs_obj->kobj); + +acache_obj_bail: + kobject_put(&acache_orangefs_obj->kobj); + +ofs_obj_bail: + kobject_put(&orangefs_obj->kobj); +out: + return rc; +} + +void orangefs_sysfs_exit(void) +{ + gossip_debug(GOSSIP_SYSFS_DEBUG, "orangefs_sysfs_exit: start\n"); + + kobject_put(&acache_orangefs_obj->kobj); + kobject_put(&capcache_orangefs_obj->kobj); + kobject_put(&ccache_orangefs_obj->kobj); + kobject_put(&ncache_orangefs_obj->kobj); + kobject_put(&pc_orangefs_obj->kobj); + kobject_put(&stats_orangefs_obj->kobj); + + kobject_put(&orangefs_obj->kobj); +} diff --git a/fs/orangefs/orangefs-sysfs.h b/fs/orangefs/orangefs-sysfs.h new file mode 100644 index 000000000000..f0b76382db02 --- /dev/null +++ b/fs/orangefs/orangefs-sysfs.h @@ -0,0 +1,2 @@ +extern int orangefs_sysfs_init(void); +extern void orangefs_sysfs_exit(void); diff --git a/fs/orangefs/orangefs-utils.c b/fs/orangefs/orangefs-utils.c new file mode 100644 index 000000000000..40f5163b56aa --- /dev/null +++ b/fs/orangefs/orangefs-utils.c @@ -0,0 +1,1048 @@ +/* + * (C) 2001 Clemson University and The University of Chicago + * + * See COPYING in top-level directory. + */ +#include "protocol.h" +#include "orangefs-kernel.h" +#include "orangefs-dev-proto.h" +#include "orangefs-bufmap.h" + +__s32 fsid_of_op(struct orangefs_kernel_op_s *op) +{ + __s32 fsid = ORANGEFS_FS_ID_NULL; + + if (op) { + switch (op->upcall.type) { + case ORANGEFS_VFS_OP_FILE_IO: + fsid = op->upcall.req.io.refn.fs_id; + break; + case ORANGEFS_VFS_OP_LOOKUP: + fsid = op->upcall.req.lookup.parent_refn.fs_id; + break; + case ORANGEFS_VFS_OP_CREATE: + fsid = op->upcall.req.create.parent_refn.fs_id; + break; + case ORANGEFS_VFS_OP_GETATTR: + fsid = op->upcall.req.getattr.refn.fs_id; + break; + case ORANGEFS_VFS_OP_REMOVE: + fsid = op->upcall.req.remove.parent_refn.fs_id; + break; + case ORANGEFS_VFS_OP_MKDIR: + fsid = op->upcall.req.mkdir.parent_refn.fs_id; + break; + case ORANGEFS_VFS_OP_READDIR: + fsid = op->upcall.req.readdir.refn.fs_id; + break; + case ORANGEFS_VFS_OP_SETATTR: + fsid = op->upcall.req.setattr.refn.fs_id; + break; + case ORANGEFS_VFS_OP_SYMLINK: + fsid = op->upcall.req.sym.parent_refn.fs_id; + break; + case ORANGEFS_VFS_OP_RENAME: + fsid = op->upcall.req.rename.old_parent_refn.fs_id; + break; + case ORANGEFS_VFS_OP_STATFS: + fsid = op->upcall.req.statfs.fs_id; + break; + case ORANGEFS_VFS_OP_TRUNCATE: + fsid = op->upcall.req.truncate.refn.fs_id; + break; + case ORANGEFS_VFS_OP_MMAP_RA_FLUSH: + fsid = op->upcall.req.ra_cache_flush.refn.fs_id; + break; + case ORANGEFS_VFS_OP_FS_UMOUNT: + fsid = op->upcall.req.fs_umount.fs_id; + break; + case ORANGEFS_VFS_OP_GETXATTR: + fsid = op->upcall.req.getxattr.refn.fs_id; + break; + case ORANGEFS_VFS_OP_SETXATTR: + fsid = op->upcall.req.setxattr.refn.fs_id; + break; + case ORANGEFS_VFS_OP_LISTXATTR: + fsid = op->upcall.req.listxattr.refn.fs_id; + break; + case ORANGEFS_VFS_OP_REMOVEXATTR: + fsid = op->upcall.req.removexattr.refn.fs_id; + break; + case ORANGEFS_VFS_OP_FSYNC: + fsid = op->upcall.req.fsync.refn.fs_id; + break; + default: + break; + } + } + return fsid; +} + +static int orangefs_inode_flags(struct ORANGEFS_sys_attr_s *attrs) +{ + int flags = 0; + if (attrs->flags & ORANGEFS_IMMUTABLE_FL) + flags |= S_IMMUTABLE; + else + flags &= ~S_IMMUTABLE; + if (attrs->flags & ORANGEFS_APPEND_FL) + flags |= S_APPEND; + else + flags &= ~S_APPEND; + if (attrs->flags & ORANGEFS_NOATIME_FL) + flags |= S_NOATIME; + else + flags &= ~S_NOATIME; + return flags; +} + +static int orangefs_inode_perms(struct ORANGEFS_sys_attr_s *attrs) +{ + int perm_mode = 0; + + if (attrs->perms & ORANGEFS_O_EXECUTE) + perm_mode |= S_IXOTH; + if (attrs->perms & ORANGEFS_O_WRITE) + perm_mode |= S_IWOTH; + if (attrs->perms & ORANGEFS_O_READ) + perm_mode |= S_IROTH; + + if (attrs->perms & ORANGEFS_G_EXECUTE) + perm_mode |= S_IXGRP; + if (attrs->perms & ORANGEFS_G_WRITE) + perm_mode |= S_IWGRP; + if (attrs->perms & ORANGEFS_G_READ) + perm_mode |= S_IRGRP; + + if (attrs->perms & ORANGEFS_U_EXECUTE) + perm_mode |= S_IXUSR; + if (attrs->perms & ORANGEFS_U_WRITE) + perm_mode |= S_IWUSR; + if (attrs->perms & ORANGEFS_U_READ) + perm_mode |= S_IRUSR; + + if (attrs->perms & ORANGEFS_G_SGID) + perm_mode |= S_ISGID; + if (attrs->perms & ORANGEFS_U_SUID) + perm_mode |= S_ISUID; + + return perm_mode; +} + +/* + * NOTE: in kernel land, we never use the sys_attr->link_target for + * anything, so don't bother copying it into the sys_attr object here. + */ +static inline int copy_attributes_from_inode(struct inode *inode, + struct ORANGEFS_sys_attr_s *attrs, + struct iattr *iattr) +{ + umode_t tmp_mode; + + if (!iattr || !inode || !attrs) { + gossip_err("NULL iattr (%p), inode (%p), attrs (%p) " + "in copy_attributes_from_inode!\n", + iattr, + inode, + attrs); + return -EINVAL; + } + /* + * We need to be careful to only copy the attributes out of the + * iattr object that we know are valid. + */ + attrs->mask = 0; + if (iattr->ia_valid & ATTR_UID) { + attrs->owner = from_kuid(current_user_ns(), iattr->ia_uid); + attrs->mask |= ORANGEFS_ATTR_SYS_UID; + gossip_debug(GOSSIP_UTILS_DEBUG, "(UID) %d\n", attrs->owner); + } + if (iattr->ia_valid & ATTR_GID) { + attrs->group = from_kgid(current_user_ns(), iattr->ia_gid); + attrs->mask |= ORANGEFS_ATTR_SYS_GID; + gossip_debug(GOSSIP_UTILS_DEBUG, "(GID) %d\n", attrs->group); + } + + if (iattr->ia_valid & ATTR_ATIME) { + attrs->mask |= ORANGEFS_ATTR_SYS_ATIME; + if (iattr->ia_valid & ATTR_ATIME_SET) { + attrs->atime = (time64_t)iattr->ia_atime.tv_sec; + attrs->mask |= ORANGEFS_ATTR_SYS_ATIME_SET; + } + } + if (iattr->ia_valid & ATTR_MTIME) { + attrs->mask |= ORANGEFS_ATTR_SYS_MTIME; + if (iattr->ia_valid & ATTR_MTIME_SET) { + attrs->mtime = (time64_t)iattr->ia_mtime.tv_sec; + attrs->mask |= ORANGEFS_ATTR_SYS_MTIME_SET; + } + } + if (iattr->ia_valid & ATTR_CTIME) + attrs->mask |= ORANGEFS_ATTR_SYS_CTIME; + + /* + * ORANGEFS cannot set size with a setattr operation. Probably not likely + * to be requested through the VFS, but just in case, don't worry about + * ATTR_SIZE + */ + + if (iattr->ia_valid & ATTR_MODE) { + tmp_mode = iattr->ia_mode; + if (tmp_mode & (S_ISVTX)) { + if (is_root_handle(inode)) { + /* + * allow sticky bit to be set on root (since + * it shows up that way by default anyhow), + * but don't show it to the server + */ + tmp_mode -= S_ISVTX; + } else { + gossip_debug(GOSSIP_UTILS_DEBUG, + "User attempted to set sticky bit on non-root directory; returning EINVAL.\n"); + return -EINVAL; + } + } + + if (tmp_mode & (S_ISUID)) { + gossip_debug(GOSSIP_UTILS_DEBUG, + "Attempting to set setuid bit (not supported); returning EINVAL.\n"); + return -EINVAL; + } + + attrs->perms = ORANGEFS_util_translate_mode(tmp_mode); + attrs->mask |= ORANGEFS_ATTR_SYS_PERM; + } + + return 0; +} + +static int orangefs_inode_type(enum orangefs_ds_type objtype) +{ + if (objtype == ORANGEFS_TYPE_METAFILE) + return S_IFREG; + else if (objtype == ORANGEFS_TYPE_DIRECTORY) + return S_IFDIR; + else if (objtype == ORANGEFS_TYPE_SYMLINK) + return S_IFLNK; + else + return -1; +} + +static int orangefs_inode_is_stale(struct inode *inode, int new, + struct ORANGEFS_sys_attr_s *attrs, char *link_target) +{ + struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); + int type = orangefs_inode_type(attrs->objtype); + if (!new) { + /* + * If the inode type or symlink target have changed then this + * inode is stale. + */ + if (type == -1 || !(inode->i_mode & type)) { + orangefs_make_bad_inode(inode); + return 1; + } + if (type == S_IFLNK && strncmp(orangefs_inode->link_target, + link_target, ORANGEFS_NAME_MAX)) { + orangefs_make_bad_inode(inode); + return 1; + } + } + return 0; +} + +int orangefs_inode_getattr(struct inode *inode, int new, int size) +{ + struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); + struct orangefs_kernel_op_s *new_op; + loff_t inode_size, rounded_up_size; + int ret, type; + + gossip_debug(GOSSIP_UTILS_DEBUG, "%s: called on inode %pU\n", __func__, + get_khandle_from_ino(inode)); + + new_op = op_alloc(ORANGEFS_VFS_OP_GETATTR); + if (!new_op) + return -ENOMEM; + new_op->upcall.req.getattr.refn = orangefs_inode->refn; + new_op->upcall.req.getattr.mask = size ? + ORANGEFS_ATTR_SYS_ALL_NOHINT : ORANGEFS_ATTR_SYS_ALL_NOHINT_NOSIZE; + + ret = service_operation(new_op, __func__, + get_interruptible_flag(inode)); + if (ret != 0) + goto out; + + type = orangefs_inode_type(new_op-> + downcall.resp.getattr.attributes.objtype); + ret = orangefs_inode_is_stale(inode, new, + &new_op->downcall.resp.getattr.attributes, + new_op->downcall.resp.getattr.link_target); + if (ret) { + ret = -ESTALE; + goto out; + } + + switch (type) { + case S_IFREG: + inode->i_flags = orangefs_inode_flags(&new_op-> + downcall.resp.getattr.attributes); + if (size) { + inode_size = (loff_t)new_op-> + downcall.resp.getattr.attributes.size; + rounded_up_size = + (inode_size + (4096 - (inode_size % 4096))); + inode->i_size = inode_size; + orangefs_inode->blksize = + new_op->downcall.resp.getattr.attributes.blksize; + spin_lock(&inode->i_lock); + inode->i_bytes = inode_size; + inode->i_blocks = + (unsigned long)(rounded_up_size / 512); + spin_unlock(&inode->i_lock); + } + break; + case S_IFDIR: + inode->i_size = PAGE_CACHE_SIZE; + orangefs_inode->blksize = (1 << inode->i_blkbits); + spin_lock(&inode->i_lock); + inode_set_bytes(inode, inode->i_size); + spin_unlock(&inode->i_lock); + set_nlink(inode, 1); + break; + case S_IFLNK: + if (new) { + inode->i_size = (loff_t)strlen(new_op-> + downcall.resp.getattr.link_target); + orangefs_inode->blksize = (1 << inode->i_blkbits); + strlcpy(orangefs_inode->link_target, + new_op->downcall.resp.getattr.link_target, + ORANGEFS_NAME_MAX); + inode->i_link = orangefs_inode->link_target; + } + break; + } + + inode->i_uid = make_kuid(&init_user_ns, new_op-> + downcall.resp.getattr.attributes.owner); + inode->i_gid = make_kgid(&init_user_ns, new_op-> + downcall.resp.getattr.attributes.group); + inode->i_atime.tv_sec = (time64_t)new_op-> + downcall.resp.getattr.attributes.atime; + inode->i_mtime.tv_sec = (time64_t)new_op-> + downcall.resp.getattr.attributes.mtime; + inode->i_ctime.tv_sec = (time64_t)new_op-> + downcall.resp.getattr.attributes.ctime; + inode->i_atime.tv_nsec = 0; + inode->i_mtime.tv_nsec = 0; + inode->i_ctime.tv_nsec = 0; + + /* special case: mark the root inode as sticky */ + inode->i_mode = type | (is_root_handle(inode) ? S_ISVTX : 0) | + orangefs_inode_perms(&new_op->downcall.resp.getattr.attributes); + + ret = 0; +out: + op_release(new_op); + return ret; +} + +int orangefs_inode_check_changed(struct inode *inode) +{ + struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); + struct orangefs_kernel_op_s *new_op; + int ret; + + gossip_debug(GOSSIP_UTILS_DEBUG, "%s: called on inode %pU\n", __func__, + get_khandle_from_ino(inode)); + + new_op = op_alloc(ORANGEFS_VFS_OP_GETATTR); + if (!new_op) + return -ENOMEM; + new_op->upcall.req.getattr.refn = orangefs_inode->refn; + new_op->upcall.req.getattr.mask = ORANGEFS_ATTR_SYS_TYPE | + ORANGEFS_ATTR_SYS_LNK_TARGET; + + ret = service_operation(new_op, __func__, + get_interruptible_flag(inode)); + if (ret != 0) + goto out; + + ret = orangefs_inode_is_stale(inode, 0, + &new_op->downcall.resp.getattr.attributes, + new_op->downcall.resp.getattr.link_target); +out: + op_release(new_op); + return ret; +} + +/* + * issues a orangefs setattr request to make sure the new attribute values + * take effect if successful. returns 0 on success; -errno otherwise + */ +int orangefs_inode_setattr(struct inode *inode, struct iattr *iattr) +{ + struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); + struct orangefs_kernel_op_s *new_op; + int ret; + + new_op = op_alloc(ORANGEFS_VFS_OP_SETATTR); + if (!new_op) + return -ENOMEM; + + new_op->upcall.req.setattr.refn = orangefs_inode->refn; + ret = copy_attributes_from_inode(inode, + &new_op->upcall.req.setattr.attributes, + iattr); + if (ret >= 0) { + ret = service_operation(new_op, __func__, + get_interruptible_flag(inode)); + + gossip_debug(GOSSIP_UTILS_DEBUG, + "orangefs_inode_setattr: returning %d\n", + ret); + } + + op_release(new_op); + + /* + * successful setattr should clear the atime, mtime and + * ctime flags. + */ + if (ret == 0) { + ClearAtimeFlag(orangefs_inode); + ClearMtimeFlag(orangefs_inode); + ClearCtimeFlag(orangefs_inode); + ClearModeFlag(orangefs_inode); + } + + return ret; +} + +int orangefs_flush_inode(struct inode *inode) +{ + /* + * If it is a dirty inode, this function gets called. + * Gather all the information that needs to be setattr'ed + * Right now, this will only be used for mode, atime, mtime + * and/or ctime. + */ + struct iattr wbattr; + int ret; + int mtime_flag; + int ctime_flag; + int atime_flag; + int mode_flag; + struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); + + memset(&wbattr, 0, sizeof(wbattr)); + + /* + * check inode flags up front, and clear them if they are set. This + * will prevent multiple processes from all trying to flush the same + * inode if they call close() simultaneously + */ + mtime_flag = MtimeFlag(orangefs_inode); + ClearMtimeFlag(orangefs_inode); + ctime_flag = CtimeFlag(orangefs_inode); + ClearCtimeFlag(orangefs_inode); + atime_flag = AtimeFlag(orangefs_inode); + ClearAtimeFlag(orangefs_inode); + mode_flag = ModeFlag(orangefs_inode); + ClearModeFlag(orangefs_inode); + + /* -- Lazy atime,mtime and ctime update -- + * Note: all times are dictated by server in the new scheme + * and not by the clients + * + * Also mode updates are being handled now.. + */ + + if (mtime_flag) + wbattr.ia_valid |= ATTR_MTIME; + if (ctime_flag) + wbattr.ia_valid |= ATTR_CTIME; + if (atime_flag) + wbattr.ia_valid |= ATTR_ATIME; + + if (mode_flag) { + wbattr.ia_mode = inode->i_mode; + wbattr.ia_valid |= ATTR_MODE; + } + + gossip_debug(GOSSIP_UTILS_DEBUG, + "*********** orangefs_flush_inode: %pU " + "(ia_valid %d)\n", + get_khandle_from_ino(inode), + wbattr.ia_valid); + if (wbattr.ia_valid == 0) { + gossip_debug(GOSSIP_UTILS_DEBUG, + "orangefs_flush_inode skipping setattr()\n"); + return 0; + } + + gossip_debug(GOSSIP_UTILS_DEBUG, + "orangefs_flush_inode (%pU) writing mode %o\n", + get_khandle_from_ino(inode), + inode->i_mode); + + ret = orangefs_inode_setattr(inode, &wbattr); + + return ret; +} + +int orangefs_unmount_sb(struct super_block *sb) +{ + int ret = -EINVAL; + struct orangefs_kernel_op_s *new_op = NULL; + + gossip_debug(GOSSIP_UTILS_DEBUG, + "orangefs_unmount_sb called on sb %p\n", + sb); + + new_op = op_alloc(ORANGEFS_VFS_OP_FS_UMOUNT); + if (!new_op) + return -ENOMEM; + new_op->upcall.req.fs_umount.id = ORANGEFS_SB(sb)->id; + new_op->upcall.req.fs_umount.fs_id = ORANGEFS_SB(sb)->fs_id; + strncpy(new_op->upcall.req.fs_umount.orangefs_config_server, + ORANGEFS_SB(sb)->devname, + ORANGEFS_MAX_SERVER_ADDR_LEN); + + gossip_debug(GOSSIP_UTILS_DEBUG, + "Attempting ORANGEFS Unmount via host %s\n", + new_op->upcall.req.fs_umount.orangefs_config_server); + + ret = service_operation(new_op, "orangefs_fs_umount", 0); + + gossip_debug(GOSSIP_UTILS_DEBUG, + "orangefs_unmount: got return value of %d\n", ret); + if (ret) + sb = ERR_PTR(ret); + else + ORANGEFS_SB(sb)->mount_pending = 1; + + op_release(new_op); + return ret; +} + +void orangefs_make_bad_inode(struct inode *inode) +{ + if (is_root_handle(inode)) { + /* + * if this occurs, the pvfs2-client-core was killed but we + * can't afford to lose the inode operations and such + * associated with the root handle in any case. + */ + gossip_debug(GOSSIP_UTILS_DEBUG, + "*** NOT making bad root inode %pU\n", + get_khandle_from_ino(inode)); + } else { + gossip_debug(GOSSIP_UTILS_DEBUG, + "*** making bad inode %pU\n", + get_khandle_from_ino(inode)); + make_bad_inode(inode); + } +} + +/* + * The following is a very dirty hack that is now a permanent part of the + * ORANGEFS protocol. See protocol.h for more error definitions. + */ + +/* The order matches include/orangefs-types.h in the OrangeFS source. */ +static int PINT_errno_mapping[] = { + 0, EPERM, ENOENT, EINTR, EIO, ENXIO, EBADF, EAGAIN, ENOMEM, + EFAULT, EBUSY, EEXIST, ENODEV, ENOTDIR, EISDIR, EINVAL, EMFILE, + EFBIG, ENOSPC, EROFS, EMLINK, EPIPE, EDEADLK, ENAMETOOLONG, + ENOLCK, ENOSYS, ENOTEMPTY, ELOOP, EWOULDBLOCK, ENOMSG, EUNATCH, + EBADR, EDEADLOCK, ENODATA, ETIME, ENONET, EREMOTE, ECOMM, + EPROTO, EBADMSG, EOVERFLOW, ERESTART, EMSGSIZE, EPROTOTYPE, + ENOPROTOOPT, EPROTONOSUPPORT, EOPNOTSUPP, EADDRINUSE, + EADDRNOTAVAIL, ENETDOWN, ENETUNREACH, ENETRESET, ENOBUFS, + ETIMEDOUT, ECONNREFUSED, EHOSTDOWN, EHOSTUNREACH, EALREADY, + EACCES, ECONNRESET, ERANGE +}; + +int orangefs_normalize_to_errno(__s32 error_code) +{ + __u32 i; + + /* Success */ + if (error_code == 0) { + return 0; + /* + * This shouldn't ever happen. If it does it should be fixed on the + * server. + */ + } else if (error_code > 0) { + gossip_err("orangefs: error status receieved.\n"); + gossip_err("orangefs: assuming error code is inverted.\n"); + error_code = -error_code; + } + + /* + * XXX: This is very bad since error codes from ORANGEFS may not be + * suitable for return into userspace. + */ + + /* + * Convert ORANGEFS error values into errno values suitable for return + * from the kernel. + */ + if ((-error_code) & ORANGEFS_NON_ERRNO_ERROR_BIT) { + if (((-error_code) & + (ORANGEFS_ERROR_NUMBER_BITS|ORANGEFS_NON_ERRNO_ERROR_BIT| + ORANGEFS_ERROR_BIT)) == ORANGEFS_ECANCEL) { + /* + * cancellation error codes generally correspond to + * a timeout from the client's perspective + */ + error_code = -ETIMEDOUT; + } else { + /* assume a default error code */ + gossip_err("orangefs: warning: got error code without errno equivalent: %d.\n", error_code); + error_code = -EINVAL; + } + + /* Convert ORANGEFS encoded errno values into regular errno values. */ + } else if ((-error_code) & ORANGEFS_ERROR_BIT) { + i = (-error_code) & ~(ORANGEFS_ERROR_BIT|ORANGEFS_ERROR_CLASS_BITS); + if (i < sizeof(PINT_errno_mapping)/sizeof(*PINT_errno_mapping)) + error_code = -PINT_errno_mapping[i]; + else + error_code = -EINVAL; + + /* + * Only ORANGEFS protocol error codes should ever come here. Otherwise + * there is a bug somewhere. + */ + } else { + gossip_err("orangefs: orangefs_normalize_to_errno: got error code which is not from ORANGEFS.\n"); + } + return error_code; +} + +#define NUM_MODES 11 +__s32 ORANGEFS_util_translate_mode(int mode) +{ + int ret = 0; + int i = 0; + static int modes[NUM_MODES] = { + S_IXOTH, S_IWOTH, S_IROTH, + S_IXGRP, S_IWGRP, S_IRGRP, + S_IXUSR, S_IWUSR, S_IRUSR, + S_ISGID, S_ISUID + }; + static int orangefs_modes[NUM_MODES] = { + ORANGEFS_O_EXECUTE, ORANGEFS_O_WRITE, ORANGEFS_O_READ, + ORANGEFS_G_EXECUTE, ORANGEFS_G_WRITE, ORANGEFS_G_READ, + ORANGEFS_U_EXECUTE, ORANGEFS_U_WRITE, ORANGEFS_U_READ, + ORANGEFS_G_SGID, ORANGEFS_U_SUID + }; + + for (i = 0; i < NUM_MODES; i++) + if (mode & modes[i]) + ret |= orangefs_modes[i]; + + return ret; +} +#undef NUM_MODES + +/* + * After obtaining a string representation of the client's debug + * keywords and their associated masks, this function is called to build an + * array of these values. + */ +int orangefs_prepare_cdm_array(char *debug_array_string) +{ + int i; + int rc = -EINVAL; + char *cds_head = NULL; + char *cds_delimiter = NULL; + int keyword_len = 0; + + gossip_debug(GOSSIP_UTILS_DEBUG, "%s: start\n", __func__); + + /* + * figure out how many elements the cdm_array needs. + */ + for (i = 0; i < strlen(debug_array_string); i++) + if (debug_array_string[i] == '\n') + cdm_element_count++; + + if (!cdm_element_count) { + pr_info("No elements in client debug array string!\n"); + goto out; + } + + cdm_array = + kzalloc(cdm_element_count * sizeof(struct client_debug_mask), + GFP_KERNEL); + if (!cdm_array) { + pr_info("malloc failed for cdm_array!\n"); + rc = -ENOMEM; + goto out; + } + + cds_head = debug_array_string; + + for (i = 0; i < cdm_element_count; i++) { + cds_delimiter = strchr(cds_head, '\n'); + *cds_delimiter = '\0'; + + keyword_len = strcspn(cds_head, " "); + + cdm_array[i].keyword = kzalloc(keyword_len + 1, GFP_KERNEL); + if (!cdm_array[i].keyword) { + rc = -ENOMEM; + goto out; + } + + sscanf(cds_head, + "%s %llx %llx", + cdm_array[i].keyword, + (unsigned long long *)&(cdm_array[i].mask1), + (unsigned long long *)&(cdm_array[i].mask2)); + + if (!strcmp(cdm_array[i].keyword, ORANGEFS_VERBOSE)) + client_verbose_index = i; + + if (!strcmp(cdm_array[i].keyword, ORANGEFS_ALL)) + client_all_index = i; + + cds_head = cds_delimiter + 1; + } + + rc = cdm_element_count; + + gossip_debug(GOSSIP_UTILS_DEBUG, "%s: rc:%d:\n", __func__, rc); + +out: + + return rc; + +} + +/* + * /sys/kernel/debug/orangefs/debug-help can be catted to + * see all the available kernel and client debug keywords. + * + * When the kernel boots, we have no idea what keywords the + * client supports, nor their associated masks. + * + * We pass through this function once at boot and stamp a + * boilerplate "we don't know" message for the client in the + * debug-help file. We pass through here again when the client + * starts and then we can fill out the debug-help file fully. + * + * The client might be restarted any number of times between + * reboots, we only build the debug-help file the first time. + */ +int orangefs_prepare_debugfs_help_string(int at_boot) +{ + int rc = -EINVAL; + int i; + int byte_count = 0; + char *client_title = "Client Debug Keywords:\n"; + char *kernel_title = "Kernel Debug Keywords:\n"; + + gossip_debug(GOSSIP_UTILS_DEBUG, "%s: start\n", __func__); + + if (at_boot) { + byte_count += strlen(HELP_STRING_UNINITIALIZED); + client_title = HELP_STRING_UNINITIALIZED; + } else { + /* + * fill the client keyword/mask array and remember + * how many elements there were. + */ + cdm_element_count = + orangefs_prepare_cdm_array(client_debug_array_string); + if (cdm_element_count <= 0) + goto out; + + /* Count the bytes destined for debug_help_string. */ + byte_count += strlen(client_title); + + for (i = 0; i < cdm_element_count; i++) { + byte_count += strlen(cdm_array[i].keyword + 2); + if (byte_count >= DEBUG_HELP_STRING_SIZE) { + pr_info("%s: overflow 1!\n", __func__); + goto out; + } + } + + gossip_debug(GOSSIP_UTILS_DEBUG, + "%s: cdm_element_count:%d:\n", + __func__, + cdm_element_count); + } + + byte_count += strlen(kernel_title); + for (i = 0; i < num_kmod_keyword_mask_map; i++) { + byte_count += + strlen(s_kmod_keyword_mask_map[i].keyword + 2); + if (byte_count >= DEBUG_HELP_STRING_SIZE) { + pr_info("%s: overflow 2!\n", __func__); + goto out; + } + } + + /* build debug_help_string. */ + debug_help_string = kzalloc(DEBUG_HELP_STRING_SIZE, GFP_KERNEL); + if (!debug_help_string) { + rc = -ENOMEM; + goto out; + } + + strcat(debug_help_string, client_title); + + if (!at_boot) { + for (i = 0; i < cdm_element_count; i++) { + strcat(debug_help_string, "\t"); + strcat(debug_help_string, cdm_array[i].keyword); + strcat(debug_help_string, "\n"); + } + } + + strcat(debug_help_string, "\n"); + strcat(debug_help_string, kernel_title); + + for (i = 0; i < num_kmod_keyword_mask_map; i++) { + strcat(debug_help_string, "\t"); + strcat(debug_help_string, s_kmod_keyword_mask_map[i].keyword); + strcat(debug_help_string, "\n"); + } + + rc = 0; + +out: + + return rc; + +} + +/* + * kernel = type 0 + * client = type 1 + */ +void debug_mask_to_string(void *mask, int type) +{ + int i; + int len = 0; + char *debug_string; + int element_count = 0; + + gossip_debug(GOSSIP_UTILS_DEBUG, "%s: start\n", __func__); + + if (type) { + debug_string = client_debug_string; + element_count = cdm_element_count; + } else { + debug_string = kernel_debug_string; + element_count = num_kmod_keyword_mask_map; + } + + memset(debug_string, 0, ORANGEFS_MAX_DEBUG_STRING_LEN); + + /* + * Some keywords, like "all" or "verbose", are amalgams of + * numerous other keywords. Make a special check for those + * before grinding through the whole mask only to find out + * later... + */ + if (check_amalgam_keyword(mask, type)) + goto out; + + /* Build the debug string. */ + for (i = 0; i < element_count; i++) + if (type) + do_c_string(mask, i); + else + do_k_string(mask, i); + + len = strlen(debug_string); + + if ((len) && (type)) + client_debug_string[len - 1] = '\0'; + else if (len) + kernel_debug_string[len - 1] = '\0'; + else if (type) + strcpy(client_debug_string, "none"); + else + strcpy(kernel_debug_string, "none"); + +out: +gossip_debug(GOSSIP_UTILS_DEBUG, "%s: string:%s:\n", __func__, debug_string); + + return; + +} + +void do_k_string(void *k_mask, int index) +{ + __u64 *mask = (__u64 *) k_mask; + + if (keyword_is_amalgam((char *) s_kmod_keyword_mask_map[index].keyword)) + goto out; + + if (*mask & s_kmod_keyword_mask_map[index].mask_val) { + if ((strlen(kernel_debug_string) + + strlen(s_kmod_keyword_mask_map[index].keyword)) + < ORANGEFS_MAX_DEBUG_STRING_LEN - 1) { + strcat(kernel_debug_string, + s_kmod_keyword_mask_map[index].keyword); + strcat(kernel_debug_string, ","); + } else { + gossip_err("%s: overflow!\n", __func__); + strcpy(kernel_debug_string, ORANGEFS_ALL); + goto out; + } + } + +out: + + return; +} + +void do_c_string(void *c_mask, int index) +{ + struct client_debug_mask *mask = (struct client_debug_mask *) c_mask; + + if (keyword_is_amalgam(cdm_array[index].keyword)) + goto out; + + if ((mask->mask1 & cdm_array[index].mask1) || + (mask->mask2 & cdm_array[index].mask2)) { + if ((strlen(client_debug_string) + + strlen(cdm_array[index].keyword) + 1) + < ORANGEFS_MAX_DEBUG_STRING_LEN - 2) { + strcat(client_debug_string, + cdm_array[index].keyword); + strcat(client_debug_string, ","); + } else { + gossip_err("%s: overflow!\n", __func__); + strcpy(client_debug_string, ORANGEFS_ALL); + goto out; + } + } +out: + return; +} + +int keyword_is_amalgam(char *keyword) +{ + int rc = 0; + + if ((!strcmp(keyword, ORANGEFS_ALL)) || (!strcmp(keyword, ORANGEFS_VERBOSE))) + rc = 1; + + return rc; +} + +/* + * kernel = type 0 + * client = type 1 + * + * return 1 if we found an amalgam. + */ +int check_amalgam_keyword(void *mask, int type) +{ + __u64 *k_mask; + struct client_debug_mask *c_mask; + int k_all_index = num_kmod_keyword_mask_map - 1; + int rc = 0; + + if (type) { + c_mask = (struct client_debug_mask *) mask; + + if ((c_mask->mask1 == cdm_array[client_all_index].mask1) && + (c_mask->mask2 == cdm_array[client_all_index].mask2)) { + strcpy(client_debug_string, ORANGEFS_ALL); + rc = 1; + goto out; + } + + if ((c_mask->mask1 == cdm_array[client_verbose_index].mask1) && + (c_mask->mask2 == cdm_array[client_verbose_index].mask2)) { + strcpy(client_debug_string, ORANGEFS_VERBOSE); + rc = 1; + goto out; + } + + } else { + k_mask = (__u64 *) mask; + + if (*k_mask >= s_kmod_keyword_mask_map[k_all_index].mask_val) { + strcpy(kernel_debug_string, ORANGEFS_ALL); + rc = 1; + goto out; + } + } + +out: + + return rc; +} + +/* + * kernel = type 0 + * client = type 1 + */ +void debug_string_to_mask(char *debug_string, void *mask, int type) +{ + char *unchecked_keyword; + int i; + char *strsep_fodder = kstrdup(debug_string, GFP_KERNEL); + char *original_pointer; + int element_count = 0; + struct client_debug_mask *c_mask; + __u64 *k_mask; + + gossip_debug(GOSSIP_UTILS_DEBUG, "%s: start\n", __func__); + + if (type) { + c_mask = (struct client_debug_mask *)mask; + element_count = cdm_element_count; + } else { + k_mask = (__u64 *)mask; + *k_mask = 0; + element_count = num_kmod_keyword_mask_map; + } + + original_pointer = strsep_fodder; + while ((unchecked_keyword = strsep(&strsep_fodder, ","))) + if (strlen(unchecked_keyword)) { + for (i = 0; i < element_count; i++) + if (type) + do_c_mask(i, + unchecked_keyword, + &c_mask); + else + do_k_mask(i, + unchecked_keyword, + &k_mask); + } + + kfree(original_pointer); +} + +void do_c_mask(int i, + char *unchecked_keyword, + struct client_debug_mask **sane_mask) +{ + + if (!strcmp(cdm_array[i].keyword, unchecked_keyword)) { + (**sane_mask).mask1 = (**sane_mask).mask1 | cdm_array[i].mask1; + (**sane_mask).mask2 = (**sane_mask).mask2 | cdm_array[i].mask2; + } +} + +void do_k_mask(int i, char *unchecked_keyword, __u64 **sane_mask) +{ + + if (!strcmp(s_kmod_keyword_mask_map[i].keyword, unchecked_keyword)) + **sane_mask = (**sane_mask) | + s_kmod_keyword_mask_map[i].mask_val; +} diff --git a/fs/orangefs/protocol.h b/fs/orangefs/protocol.h new file mode 100644 index 000000000000..45ce4ff4cbc7 --- /dev/null +++ b/fs/orangefs/protocol.h @@ -0,0 +1,452 @@ +#include <linux/types.h> +#include <linux/spinlock_types.h> +#include <linux/slab.h> +#include <linux/ioctl.h> + +extern struct client_debug_mask *cdm_array; +extern char *debug_help_string; +extern int help_string_initialized; +extern struct dentry *debug_dir; +extern struct dentry *help_file_dentry; +extern struct dentry *client_debug_dentry; +extern const struct file_operations debug_help_fops; +extern int client_all_index; +extern int client_verbose_index; +extern int cdm_element_count; +#define DEBUG_HELP_STRING_SIZE 4096 +#define HELP_STRING_UNINITIALIZED \ + "Client Debug Keywords are unknown until the first time\n" \ + "the client is started after boot.\n" +#define ORANGEFS_KMOD_DEBUG_HELP_FILE "debug-help" +#define ORANGEFS_KMOD_DEBUG_FILE "kernel-debug" +#define ORANGEFS_CLIENT_DEBUG_FILE "client-debug" +#define ORANGEFS_VERBOSE "verbose" +#define ORANGEFS_ALL "all" + +/* pvfs2-config.h ***********************************************************/ +#define ORANGEFS_VERSION_MAJOR 2 +#define ORANGEFS_VERSION_MINOR 9 +#define ORANGEFS_VERSION_SUB 0 + +/* khandle stuff ***********************************************************/ + +/* + * The 2.9 core will put 64 bit handles in here like this: + * 1234 0000 0000 5678 + * The 3.0 and beyond cores will put 128 bit handles in here like this: + * 1234 5678 90AB CDEF + * The kernel module will always use the first four bytes and + * the last four bytes as an inum. + */ +struct orangefs_khandle { + unsigned char u[16]; +} __aligned(8); + +/* + * kernel version of an object ref. + */ +struct orangefs_object_kref { + struct orangefs_khandle khandle; + __s32 fs_id; + __s32 __pad1; +}; + +/* + * compare 2 khandles assumes little endian thus from large address to + * small address + */ +static inline int ORANGEFS_khandle_cmp(const struct orangefs_khandle *kh1, + const struct orangefs_khandle *kh2) +{ + int i; + + for (i = 15; i >= 0; i--) { + if (kh1->u[i] > kh2->u[i]) + return 1; + if (kh1->u[i] < kh2->u[i]) + return -1; + } + + return 0; +} + +static inline void ORANGEFS_khandle_to(const struct orangefs_khandle *kh, + void *p, int size) +{ + + memset(p, 0, size); + memcpy(p, kh->u, 16); + +} + +static inline void ORANGEFS_khandle_from(struct orangefs_khandle *kh, + void *p, int size) +{ + memset(kh, 0, 16); + memcpy(kh->u, p, 16); + +} + +/* pvfs2-types.h ************************************************************/ +typedef __u32 ORANGEFS_uid; +typedef __u32 ORANGEFS_gid; +typedef __s32 ORANGEFS_fs_id; +typedef __u32 ORANGEFS_permissions; +typedef __u64 ORANGEFS_time; +typedef __s64 ORANGEFS_size; +typedef __u64 ORANGEFS_flags; +typedef __u64 ORANGEFS_ds_position; +typedef __s32 ORANGEFS_error; +typedef __s64 ORANGEFS_offset; + +#define ORANGEFS_SUPER_MAGIC 0x20030528 + +/* + * ORANGEFS error codes are a signed 32-bit integer. Error codes are negative, but + * the sign is stripped before decoding. + */ + +/* Bit 31 is not used since it is the sign. */ + +/* + * Bit 30 specifies that this is a ORANGEFS error. A ORANGEFS error is either an + * encoded errno value or a ORANGEFS protocol error. + */ +#define ORANGEFS_ERROR_BIT (1 << 30) + +/* + * Bit 29 specifies that this is a ORANGEFS protocol error and not an encoded + * errno value. + */ +#define ORANGEFS_NON_ERRNO_ERROR_BIT (1 << 29) + +/* + * Bits 9, 8, and 7 specify the error class, which encodes the section of + * server code the error originated in for logging purposes. It is not used + * in the kernel except to be masked out. + */ +#define ORANGEFS_ERROR_CLASS_BITS 0x380 + +/* Bits 6 - 0 are reserved for the actual error code. */ +#define ORANGEFS_ERROR_NUMBER_BITS 0x7f + +/* Encoded errno values decoded by PINT_errno_mapping in orangefs-utils.c. */ + +/* Our own ORANGEFS protocol error codes. */ +#define ORANGEFS_ECANCEL (1|ORANGEFS_NON_ERRNO_ERROR_BIT|ORANGEFS_ERROR_BIT) +#define ORANGEFS_EDEVINIT (2|ORANGEFS_NON_ERRNO_ERROR_BIT|ORANGEFS_ERROR_BIT) +#define ORANGEFS_EDETAIL (3|ORANGEFS_NON_ERRNO_ERROR_BIT|ORANGEFS_ERROR_BIT) +#define ORANGEFS_EHOSTNTFD (4|ORANGEFS_NON_ERRNO_ERROR_BIT|ORANGEFS_ERROR_BIT) +#define ORANGEFS_EADDRNTFD (5|ORANGEFS_NON_ERRNO_ERROR_BIT|ORANGEFS_ERROR_BIT) +#define ORANGEFS_ENORECVR (6|ORANGEFS_NON_ERRNO_ERROR_BIT|ORANGEFS_ERROR_BIT) +#define ORANGEFS_ETRYAGAIN (7|ORANGEFS_NON_ERRNO_ERROR_BIT|ORANGEFS_ERROR_BIT) +#define ORANGEFS_ENOTPVFS (8|ORANGEFS_NON_ERRNO_ERROR_BIT|ORANGEFS_ERROR_BIT) +#define ORANGEFS_ESECURITY (9|ORANGEFS_NON_ERRNO_ERROR_BIT|ORANGEFS_ERROR_BIT) + +/* permission bits */ +#define ORANGEFS_O_EXECUTE (1 << 0) +#define ORANGEFS_O_WRITE (1 << 1) +#define ORANGEFS_O_READ (1 << 2) +#define ORANGEFS_G_EXECUTE (1 << 3) +#define ORANGEFS_G_WRITE (1 << 4) +#define ORANGEFS_G_READ (1 << 5) +#define ORANGEFS_U_EXECUTE (1 << 6) +#define ORANGEFS_U_WRITE (1 << 7) +#define ORANGEFS_U_READ (1 << 8) +/* no ORANGEFS_U_VTX (sticky bit) */ +#define ORANGEFS_G_SGID (1 << 10) +#define ORANGEFS_U_SUID (1 << 11) + +/* definition taken from stdint.h */ +#define INT32_MAX (2147483647) +#define ORANGEFS_ITERATE_START (INT32_MAX - 1) +#define ORANGEFS_ITERATE_END (INT32_MAX - 2) +#define ORANGEFS_ITERATE_NEXT (INT32_MAX - 3) +#define ORANGEFS_READDIR_START ORANGEFS_ITERATE_START +#define ORANGEFS_READDIR_END ORANGEFS_ITERATE_END +#define ORANGEFS_IMMUTABLE_FL FS_IMMUTABLE_FL +#define ORANGEFS_APPEND_FL FS_APPEND_FL +#define ORANGEFS_NOATIME_FL FS_NOATIME_FL +#define ORANGEFS_MIRROR_FL 0x01000000ULL +#define ORANGEFS_O_EXECUTE (1 << 0) +#define ORANGEFS_FS_ID_NULL ((__s32)0) +#define ORANGEFS_ATTR_SYS_UID (1 << 0) +#define ORANGEFS_ATTR_SYS_GID (1 << 1) +#define ORANGEFS_ATTR_SYS_PERM (1 << 2) +#define ORANGEFS_ATTR_SYS_ATIME (1 << 3) +#define ORANGEFS_ATTR_SYS_CTIME (1 << 4) +#define ORANGEFS_ATTR_SYS_MTIME (1 << 5) +#define ORANGEFS_ATTR_SYS_TYPE (1 << 6) +#define ORANGEFS_ATTR_SYS_ATIME_SET (1 << 7) +#define ORANGEFS_ATTR_SYS_MTIME_SET (1 << 8) +#define ORANGEFS_ATTR_SYS_SIZE (1 << 20) +#define ORANGEFS_ATTR_SYS_LNK_TARGET (1 << 24) +#define ORANGEFS_ATTR_SYS_DFILE_COUNT (1 << 25) +#define ORANGEFS_ATTR_SYS_DIRENT_COUNT (1 << 26) +#define ORANGEFS_ATTR_SYS_BLKSIZE (1 << 28) +#define ORANGEFS_ATTR_SYS_MIRROR_COPIES_COUNT (1 << 29) +#define ORANGEFS_ATTR_SYS_COMMON_ALL \ + (ORANGEFS_ATTR_SYS_UID | \ + ORANGEFS_ATTR_SYS_GID | \ + ORANGEFS_ATTR_SYS_PERM | \ + ORANGEFS_ATTR_SYS_ATIME | \ + ORANGEFS_ATTR_SYS_CTIME | \ + ORANGEFS_ATTR_SYS_MTIME | \ + ORANGEFS_ATTR_SYS_TYPE) + +#define ORANGEFS_ATTR_SYS_ALL_SETABLE \ +(ORANGEFS_ATTR_SYS_COMMON_ALL-ORANGEFS_ATTR_SYS_TYPE) + +#define ORANGEFS_ATTR_SYS_ALL_NOHINT \ + (ORANGEFS_ATTR_SYS_COMMON_ALL | \ + ORANGEFS_ATTR_SYS_SIZE | \ + ORANGEFS_ATTR_SYS_LNK_TARGET | \ + ORANGEFS_ATTR_SYS_DFILE_COUNT | \ + ORANGEFS_ATTR_SYS_MIRROR_COPIES_COUNT | \ + ORANGEFS_ATTR_SYS_DIRENT_COUNT | \ + ORANGEFS_ATTR_SYS_BLKSIZE) + +#define ORANGEFS_ATTR_SYS_ALL_NOHINT_NOSIZE \ + (ORANGEFS_ATTR_SYS_COMMON_ALL | \ + ORANGEFS_ATTR_SYS_LNK_TARGET | \ + ORANGEFS_ATTR_SYS_DFILE_COUNT | \ + ORANGEFS_ATTR_SYS_MIRROR_COPIES_COUNT | \ + ORANGEFS_ATTR_SYS_DIRENT_COUNT | \ + ORANGEFS_ATTR_SYS_BLKSIZE) + +#define ORANGEFS_XATTR_REPLACE 0x2 +#define ORANGEFS_XATTR_CREATE 0x1 +#define ORANGEFS_MAX_SERVER_ADDR_LEN 256 +#define ORANGEFS_NAME_MAX 256 +/* + * max extended attribute name len as imposed by the VFS and exploited for the + * upcall request types. + * NOTE: Please retain them as multiples of 8 even if you wish to change them + * This is *NECESSARY* for supporting 32 bit user-space binaries on a 64-bit + * kernel. Due to implementation within DBPF, this really needs to be + * ORANGEFS_NAME_MAX, which it was the same value as, but no reason to let it + * break if that changes in the future. + */ +#define ORANGEFS_MAX_XATTR_NAMELEN ORANGEFS_NAME_MAX /* Not the same as + * XATTR_NAME_MAX defined + * by <linux/xattr.h> + */ +#define ORANGEFS_MAX_XATTR_VALUELEN 8192 /* Not the same as XATTR_SIZE_MAX + * defined by <linux/xattr.h> + */ +#define ORANGEFS_MAX_XATTR_LISTLEN 16 /* Not the same as XATTR_LIST_MAX + * defined by <linux/xattr.h> + */ +/* + * ORANGEFS I/O operation types, used in both system and server interfaces. + */ +enum ORANGEFS_io_type { + ORANGEFS_IO_READ = 1, + ORANGEFS_IO_WRITE = 2 +}; + +/* + * If this enum is modified the server parameters related to the precreate pool + * batch and low threshold sizes may need to be modified to reflect this + * change. + */ +enum orangefs_ds_type { + ORANGEFS_TYPE_NONE = 0, + ORANGEFS_TYPE_METAFILE = (1 << 0), + ORANGEFS_TYPE_DATAFILE = (1 << 1), + ORANGEFS_TYPE_DIRECTORY = (1 << 2), + ORANGEFS_TYPE_SYMLINK = (1 << 3), + ORANGEFS_TYPE_DIRDATA = (1 << 4), + ORANGEFS_TYPE_INTERNAL = (1 << 5) /* for the server's private use */ +}; + +/* + * ORANGEFS_certificate simply stores a buffer with the buffer size. + * The buffer can be converted to an OpenSSL X509 struct for use. + */ +struct ORANGEFS_certificate { + __u32 buf_size; + unsigned char *buf; +}; + +/* + * A credential identifies a user and is signed by the client/user + * private key. + */ +struct ORANGEFS_credential { + __u32 userid; /* user id */ + __u32 num_groups; /* length of group_array */ + __u32 *group_array; /* groups for which the user is a member */ + char *issuer; /* alias of the issuing server */ + __u64 timeout; /* seconds after epoch to time out */ + __u32 sig_size; /* length of the signature in bytes */ + unsigned char *signature; /* digital signature */ + struct ORANGEFS_certificate certificate; /* user certificate buffer */ +}; +#define extra_size_ORANGEFS_credential (ORANGEFS_REQ_LIMIT_GROUPS * \ + sizeof(__u32) + \ + ORANGEFS_REQ_LIMIT_ISSUER + \ + ORANGEFS_REQ_LIMIT_SIGNATURE + \ + extra_size_ORANGEFS_certificate) + +/* This structure is used by the VFS-client interaction alone */ +struct ORANGEFS_keyval_pair { + char key[ORANGEFS_MAX_XATTR_NAMELEN]; + __s32 key_sz; /* __s32 for portable, fixed-size structures */ + __s32 val_sz; + char val[ORANGEFS_MAX_XATTR_VALUELEN]; +}; + +/* pvfs2-sysint.h ***********************************************************/ +/* Describes attributes for a file, directory, or symlink. */ +struct ORANGEFS_sys_attr_s { + __u32 owner; + __u32 group; + __u32 perms; + __u64 atime; + __u64 mtime; + __u64 ctime; + __s64 size; + + /* NOTE: caller must free if valid */ + char *link_target; + + /* Changed to __s32 so that size of structure does not change */ + __s32 dfile_count; + + /* Changed to __s32 so that size of structure does not change */ + __s32 distr_dir_servers_initial; + + /* Changed to __s32 so that size of structure does not change */ + __s32 distr_dir_servers_max; + + /* Changed to __s32 so that size of structure does not change */ + __s32 distr_dir_split_size; + + __u32 mirror_copies_count; + + /* NOTE: caller must free if valid */ + char *dist_name; + + /* NOTE: caller must free if valid */ + char *dist_params; + + __s64 dirent_count; + enum orangefs_ds_type objtype; + __u64 flags; + __u32 mask; + __s64 blksize; +}; + +#define ORANGEFS_LOOKUP_LINK_NO_FOLLOW 0 + +/* pint-dev.h ***************************************************************/ + +/* parameter structure used in ORANGEFS_DEV_DEBUG ioctl command */ +struct dev_mask_info_s { + enum { + KERNEL_MASK, + CLIENT_MASK, + } mask_type; + __u64 mask_value; +}; + +struct dev_mask2_info_s { + __u64 mask1_value; + __u64 mask2_value; +}; + +/* pvfs2-util.h *************************************************************/ +__s32 ORANGEFS_util_translate_mode(int mode); + +/* pvfs2-debug.h ************************************************************/ +#include "orangefs-debug.h" + +/* pvfs2-internal.h *********************************************************/ +#define llu(x) (unsigned long long)(x) +#define lld(x) (long long)(x) + +/* pint-dev-shared.h ********************************************************/ +#define ORANGEFS_DEV_MAGIC 'k' + +#define ORANGEFS_READDIR_DEFAULT_DESC_COUNT 5 + +#define DEV_GET_MAGIC 0x1 +#define DEV_GET_MAX_UPSIZE 0x2 +#define DEV_GET_MAX_DOWNSIZE 0x3 +#define DEV_MAP 0x4 +#define DEV_REMOUNT_ALL 0x5 +#define DEV_DEBUG 0x6 +#define DEV_UPSTREAM 0x7 +#define DEV_CLIENT_MASK 0x8 +#define DEV_CLIENT_STRING 0x9 +#define DEV_MAX_NR 0xa + +/* supported ioctls, codes are with respect to user-space */ +enum { + ORANGEFS_DEV_GET_MAGIC = _IOW(ORANGEFS_DEV_MAGIC, DEV_GET_MAGIC, __s32), + ORANGEFS_DEV_GET_MAX_UPSIZE = + _IOW(ORANGEFS_DEV_MAGIC, DEV_GET_MAX_UPSIZE, __s32), + ORANGEFS_DEV_GET_MAX_DOWNSIZE = + _IOW(ORANGEFS_DEV_MAGIC, DEV_GET_MAX_DOWNSIZE, __s32), + ORANGEFS_DEV_MAP = _IO(ORANGEFS_DEV_MAGIC, DEV_MAP), + ORANGEFS_DEV_REMOUNT_ALL = _IO(ORANGEFS_DEV_MAGIC, DEV_REMOUNT_ALL), + ORANGEFS_DEV_DEBUG = _IOR(ORANGEFS_DEV_MAGIC, DEV_DEBUG, __s32), + ORANGEFS_DEV_UPSTREAM = _IOW(ORANGEFS_DEV_MAGIC, DEV_UPSTREAM, int), + ORANGEFS_DEV_CLIENT_MASK = _IOW(ORANGEFS_DEV_MAGIC, + DEV_CLIENT_MASK, + struct dev_mask2_info_s), + ORANGEFS_DEV_CLIENT_STRING = _IOW(ORANGEFS_DEV_MAGIC, + DEV_CLIENT_STRING, + char *), + ORANGEFS_DEV_MAXNR = DEV_MAX_NR, +}; + +/* + * version number for use in communicating between kernel space and user + * space. Zero signifies the upstream version of the kernel module. + */ +#define ORANGEFS_KERNEL_PROTO_VERSION 0 +#define ORANGEFS_MINIMUM_USERSPACE_VERSION 20904 + +/* + * describes memory regions to map in the ORANGEFS_DEV_MAP ioctl. + * NOTE: See devorangefs-req.c for 32 bit compat structure. + * Since this structure has a variable-sized layout that is different + * on 32 and 64 bit platforms, we need to normalize to a 64 bit layout + * on such systems before servicing ioctl calls from user-space binaries + * that may be 32 bit! + */ +struct ORANGEFS_dev_map_desc { + void *ptr; + __s32 total_size; + __s32 size; + __s32 count; +}; + +/* gossip.h *****************************************************************/ + +#ifdef GOSSIP_DISABLE_DEBUG +#define gossip_debug(mask, format, f...) do {} while (0) +#else +extern __u64 gossip_debug_mask; +extern struct client_debug_mask client_debug_mask; + +/* try to avoid function call overhead by checking masks in macro */ +#define gossip_debug(mask, format, f...) \ +do { \ + if (gossip_debug_mask & mask) \ + printk(format, ##f); \ +} while (0) +#endif /* GOSSIP_DISABLE_DEBUG */ + +/* do file and line number printouts w/ the GNU preprocessor */ +#define gossip_ldebug(mask, format, f...) \ + gossip_debug(mask, "%s: " format, __func__, ##f) + +#define gossip_err printk +#define gossip_lerr(format, f...) \ + gossip_err("%s line %d: " format, \ + __FILE__, \ + __LINE__, \ + ##f) diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c new file mode 100644 index 000000000000..b9da9a0281c9 --- /dev/null +++ b/fs/orangefs/super.c @@ -0,0 +1,559 @@ +/* + * (C) 2001 Clemson University and The University of Chicago + * + * See COPYING in top-level directory. + */ + +#include "protocol.h" +#include "orangefs-kernel.h" +#include "orangefs-bufmap.h" + +#include <linux/parser.h> + +/* a cache for orangefs-inode objects (i.e. orangefs inode private data) */ +static struct kmem_cache *orangefs_inode_cache; + +/* list for storing orangefs specific superblocks in use */ +LIST_HEAD(orangefs_superblocks); + +DEFINE_SPINLOCK(orangefs_superblocks_lock); + +enum { + Opt_intr, + Opt_acl, + Opt_local_lock, + + Opt_err +}; + +static const match_table_t tokens = { + { Opt_acl, "acl" }, + { Opt_intr, "intr" }, + { Opt_local_lock, "local_lock" }, + { Opt_err, NULL } +}; + + +static int parse_mount_options(struct super_block *sb, char *options, + int silent) +{ + struct orangefs_sb_info_s *orangefs_sb = ORANGEFS_SB(sb); + substring_t args[MAX_OPT_ARGS]; + char *p; + + /* + * Force any potential flags that might be set from the mount + * to zero, ie, initialize to unset. + */ + sb->s_flags &= ~MS_POSIXACL; + orangefs_sb->flags &= ~ORANGEFS_OPT_INTR; + orangefs_sb->flags &= ~ORANGEFS_OPT_LOCAL_LOCK; + + while ((p = strsep(&options, ",")) != NULL) { + int token; + + if (!*p) + continue; + + token = match_token(p, tokens, args); + switch (token) { + case Opt_acl: + sb->s_flags |= MS_POSIXACL; + break; + case Opt_intr: + orangefs_sb->flags |= ORANGEFS_OPT_INTR; + break; + case Opt_local_lock: + orangefs_sb->flags |= ORANGEFS_OPT_LOCAL_LOCK; + break; + default: + goto fail; + } + } + + return 0; +fail: + if (!silent) + gossip_err("Error: mount option [%s] is not supported.\n", p); + return -EINVAL; +} + +static void orangefs_inode_cache_ctor(void *req) +{ + struct orangefs_inode_s *orangefs_inode = req; + + inode_init_once(&orangefs_inode->vfs_inode); + init_rwsem(&orangefs_inode->xattr_sem); + + orangefs_inode->vfs_inode.i_version = 1; +} + +static struct inode *orangefs_alloc_inode(struct super_block *sb) +{ + struct orangefs_inode_s *orangefs_inode; + + orangefs_inode = kmem_cache_alloc(orangefs_inode_cache, GFP_KERNEL); + if (orangefs_inode == NULL) { + gossip_err("Failed to allocate orangefs_inode\n"); + return NULL; + } + + /* + * We want to clear everything except for rw_semaphore and the + * vfs_inode. + */ + memset(&orangefs_inode->refn.khandle, 0, 16); + orangefs_inode->refn.fs_id = ORANGEFS_FS_ID_NULL; + orangefs_inode->last_failed_block_index_read = 0; + memset(orangefs_inode->link_target, 0, sizeof(orangefs_inode->link_target)); + orangefs_inode->pinode_flags = 0; + + gossip_debug(GOSSIP_SUPER_DEBUG, + "orangefs_alloc_inode: allocated %p\n", + &orangefs_inode->vfs_inode); + return &orangefs_inode->vfs_inode; +} + +static void orangefs_destroy_inode(struct inode *inode) +{ + struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); + + gossip_debug(GOSSIP_SUPER_DEBUG, + "%s: deallocated %p destroying inode %pU\n", + __func__, orangefs_inode, get_khandle_from_ino(inode)); + + kmem_cache_free(orangefs_inode_cache, orangefs_inode); +} + +/* + * NOTE: information filled in here is typically reflected in the + * output of the system command 'df' +*/ +static int orangefs_statfs(struct dentry *dentry, struct kstatfs *buf) +{ + int ret = -ENOMEM; + struct orangefs_kernel_op_s *new_op = NULL; + int flags = 0; + struct super_block *sb = NULL; + + sb = dentry->d_sb; + + gossip_debug(GOSSIP_SUPER_DEBUG, + "orangefs_statfs: called on sb %p (fs_id is %d)\n", + sb, + (int)(ORANGEFS_SB(sb)->fs_id)); + + new_op = op_alloc(ORANGEFS_VFS_OP_STATFS); + if (!new_op) + return ret; + new_op->upcall.req.statfs.fs_id = ORANGEFS_SB(sb)->fs_id; + + if (ORANGEFS_SB(sb)->flags & ORANGEFS_OPT_INTR) + flags = ORANGEFS_OP_INTERRUPTIBLE; + + ret = service_operation(new_op, "orangefs_statfs", flags); + + if (new_op->downcall.status < 0) + goto out_op_release; + + gossip_debug(GOSSIP_SUPER_DEBUG, + "%s: got %ld blocks available | " + "%ld blocks total | %ld block size | " + "%ld files total | %ld files avail\n", + __func__, + (long)new_op->downcall.resp.statfs.blocks_avail, + (long)new_op->downcall.resp.statfs.blocks_total, + (long)new_op->downcall.resp.statfs.block_size, + (long)new_op->downcall.resp.statfs.files_total, + (long)new_op->downcall.resp.statfs.files_avail); + + buf->f_type = sb->s_magic; + memcpy(&buf->f_fsid, &ORANGEFS_SB(sb)->fs_id, sizeof(buf->f_fsid)); + buf->f_bsize = new_op->downcall.resp.statfs.block_size; + buf->f_namelen = ORANGEFS_NAME_MAX; + + buf->f_blocks = (sector_t) new_op->downcall.resp.statfs.blocks_total; + buf->f_bfree = (sector_t) new_op->downcall.resp.statfs.blocks_avail; + buf->f_bavail = (sector_t) new_op->downcall.resp.statfs.blocks_avail; + buf->f_files = (sector_t) new_op->downcall.resp.statfs.files_total; + buf->f_ffree = (sector_t) new_op->downcall.resp.statfs.files_avail; + buf->f_frsize = sb->s_blocksize; + +out_op_release: + op_release(new_op); + gossip_debug(GOSSIP_SUPER_DEBUG, "orangefs_statfs: returning %d\n", ret); + return ret; +} + +/* + * Remount as initiated by VFS layer. We just need to reparse the mount + * options, no need to signal pvfs2-client-core about it. + */ +static int orangefs_remount_fs(struct super_block *sb, int *flags, char *data) +{ + gossip_debug(GOSSIP_SUPER_DEBUG, "orangefs_remount_fs: called\n"); + return parse_mount_options(sb, data, 1); +} + +/* + * Remount as initiated by pvfs2-client-core on restart. This is used to + * repopulate mount information left from previous pvfs2-client-core. + * + * the idea here is that given a valid superblock, we're + * re-initializing the user space client with the initial mount + * information specified when the super block was first initialized. + * this is very different than the first initialization/creation of a + * superblock. we use the special service_priority_operation to make + * sure that the mount gets ahead of any other pending operation that + * is waiting for servicing. this means that the pvfs2-client won't + * fail to start several times for all other pending operations before + * the client regains all of the mount information from us. + * NOTE: this function assumes that the request_mutex is already acquired! + */ +int orangefs_remount(struct orangefs_sb_info_s *orangefs_sb) +{ + struct orangefs_kernel_op_s *new_op; + int ret = -EINVAL; + + gossip_debug(GOSSIP_SUPER_DEBUG, "orangefs_remount: called\n"); + + new_op = op_alloc(ORANGEFS_VFS_OP_FS_MOUNT); + if (!new_op) + return -ENOMEM; + strncpy(new_op->upcall.req.fs_mount.orangefs_config_server, + orangefs_sb->devname, + ORANGEFS_MAX_SERVER_ADDR_LEN); + + gossip_debug(GOSSIP_SUPER_DEBUG, + "Attempting ORANGEFS Remount via host %s\n", + new_op->upcall.req.fs_mount.orangefs_config_server); + + /* + * we assume that the calling function has already acquired the + * request_mutex to prevent other operations from bypassing + * this one + */ + ret = service_operation(new_op, "orangefs_remount", + ORANGEFS_OP_PRIORITY | ORANGEFS_OP_NO_MUTEX); + gossip_debug(GOSSIP_SUPER_DEBUG, + "orangefs_remount: mount got return value of %d\n", + ret); + if (ret == 0) { + /* + * store the id assigned to this sb -- it's just a + * short-lived mapping that the system interface uses + * to map this superblock to a particular mount entry + */ + orangefs_sb->id = new_op->downcall.resp.fs_mount.id; + orangefs_sb->mount_pending = 0; + } + + op_release(new_op); + return ret; +} + +int fsid_key_table_initialize(void) +{ + return 0; +} + +void fsid_key_table_finalize(void) +{ +} + +/* Called whenever the VFS dirties the inode in response to atime updates */ +static void orangefs_dirty_inode(struct inode *inode, int flags) +{ + struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); + + gossip_debug(GOSSIP_SUPER_DEBUG, + "orangefs_dirty_inode: %pU\n", + get_khandle_from_ino(inode)); + SetAtimeFlag(orangefs_inode); +} + +static const struct super_operations orangefs_s_ops = { + .alloc_inode = orangefs_alloc_inode, + .destroy_inode = orangefs_destroy_inode, + .dirty_inode = orangefs_dirty_inode, + .drop_inode = generic_delete_inode, + .statfs = orangefs_statfs, + .remount_fs = orangefs_remount_fs, + .show_options = generic_show_options, +}; + +static struct dentry *orangefs_fh_to_dentry(struct super_block *sb, + struct fid *fid, + int fh_len, + int fh_type) +{ + struct orangefs_object_kref refn; + + if (fh_len < 5 || fh_type > 2) + return NULL; + + ORANGEFS_khandle_from(&(refn.khandle), fid->raw, 16); + refn.fs_id = (u32) fid->raw[4]; + gossip_debug(GOSSIP_SUPER_DEBUG, + "fh_to_dentry: handle %pU, fs_id %d\n", + &refn.khandle, + refn.fs_id); + + return d_obtain_alias(orangefs_iget(sb, &refn)); +} + +static int orangefs_encode_fh(struct inode *inode, + __u32 *fh, + int *max_len, + struct inode *parent) +{ + int len = parent ? 10 : 5; + int type = 1; + struct orangefs_object_kref refn; + + if (*max_len < len) { + gossip_lerr("fh buffer is too small for encoding\n"); + *max_len = len; + type = 255; + goto out; + } + + refn = ORANGEFS_I(inode)->refn; + ORANGEFS_khandle_to(&refn.khandle, fh, 16); + fh[4] = refn.fs_id; + + gossip_debug(GOSSIP_SUPER_DEBUG, + "Encoding fh: handle %pU, fsid %u\n", + &refn.khandle, + refn.fs_id); + + + if (parent) { + refn = ORANGEFS_I(parent)->refn; + ORANGEFS_khandle_to(&refn.khandle, (char *) fh + 20, 16); + fh[9] = refn.fs_id; + + type = 2; + gossip_debug(GOSSIP_SUPER_DEBUG, + "Encoding parent: handle %pU, fsid %u\n", + &refn.khandle, + refn.fs_id); + } + *max_len = len; + +out: + return type; +} + +static const struct export_operations orangefs_export_ops = { + .encode_fh = orangefs_encode_fh, + .fh_to_dentry = orangefs_fh_to_dentry, +}; + +static int orangefs_fill_sb(struct super_block *sb, + struct orangefs_fs_mount_response *fs_mount, + void *data, int silent) +{ + int ret = -EINVAL; + struct inode *root = NULL; + struct dentry *root_dentry = NULL; + struct orangefs_object_kref root_object; + + /* alloc and init our private orangefs sb info */ + sb->s_fs_info = kzalloc(sizeof(struct orangefs_sb_info_s), GFP_KERNEL); + if (!ORANGEFS_SB(sb)) + return -ENOMEM; + ORANGEFS_SB(sb)->sb = sb; + + ORANGEFS_SB(sb)->root_khandle = fs_mount->root_khandle; + ORANGEFS_SB(sb)->fs_id = fs_mount->fs_id; + ORANGEFS_SB(sb)->id = fs_mount->id; + + if (data) { + ret = parse_mount_options(sb, data, silent); + if (ret) + return ret; + } + + /* Hang the xattr handlers off the superblock */ + sb->s_xattr = orangefs_xattr_handlers; + sb->s_magic = ORANGEFS_SUPER_MAGIC; + sb->s_op = &orangefs_s_ops; + sb->s_d_op = &orangefs_dentry_operations; + + sb->s_blocksize = orangefs_bufmap_size_query(); + sb->s_blocksize_bits = orangefs_bufmap_shift_query(); + sb->s_maxbytes = MAX_LFS_FILESIZE; + + root_object.khandle = ORANGEFS_SB(sb)->root_khandle; + root_object.fs_id = ORANGEFS_SB(sb)->fs_id; + gossip_debug(GOSSIP_SUPER_DEBUG, + "get inode %pU, fsid %d\n", + &root_object.khandle, + root_object.fs_id); + + root = orangefs_iget(sb, &root_object); + if (IS_ERR(root)) + return PTR_ERR(root); + + gossip_debug(GOSSIP_SUPER_DEBUG, + "Allocated root inode [%p] with mode %x\n", + root, + root->i_mode); + + /* allocates and places root dentry in dcache */ + root_dentry = d_make_root(root); + if (!root_dentry) + return -ENOMEM; + + sb->s_export_op = &orangefs_export_ops; + sb->s_root = root_dentry; + return 0; +} + +struct dentry *orangefs_mount(struct file_system_type *fst, + int flags, + const char *devname, + void *data) +{ + int ret = -EINVAL; + struct super_block *sb = ERR_PTR(-EINVAL); + struct orangefs_kernel_op_s *new_op; + struct dentry *d = ERR_PTR(-EINVAL); + + gossip_debug(GOSSIP_SUPER_DEBUG, + "orangefs_mount: called with devname %s\n", + devname); + + if (!devname) { + gossip_err("ERROR: device name not specified.\n"); + return ERR_PTR(-EINVAL); + } + + new_op = op_alloc(ORANGEFS_VFS_OP_FS_MOUNT); + if (!new_op) + return ERR_PTR(-ENOMEM); + + strncpy(new_op->upcall.req.fs_mount.orangefs_config_server, + devname, + ORANGEFS_MAX_SERVER_ADDR_LEN); + + gossip_debug(GOSSIP_SUPER_DEBUG, + "Attempting ORANGEFS Mount via host %s\n", + new_op->upcall.req.fs_mount.orangefs_config_server); + + ret = service_operation(new_op, "orangefs_mount", 0); + gossip_debug(GOSSIP_SUPER_DEBUG, + "orangefs_mount: mount got return value of %d\n", ret); + if (ret) + goto free_op; + + if (new_op->downcall.resp.fs_mount.fs_id == ORANGEFS_FS_ID_NULL) { + gossip_err("ERROR: Retrieved null fs_id\n"); + ret = -EINVAL; + goto free_op; + } + + sb = sget(fst, NULL, set_anon_super, flags, NULL); + + if (IS_ERR(sb)) { + d = ERR_CAST(sb); + goto free_op; + } + + ret = orangefs_fill_sb(sb, + &new_op->downcall.resp.fs_mount, data, + flags & MS_SILENT ? 1 : 0); + + if (ret) { + d = ERR_PTR(ret); + goto free_op; + } + + /* + * on successful mount, store the devname and data + * used + */ + strncpy(ORANGEFS_SB(sb)->devname, + devname, + ORANGEFS_MAX_SERVER_ADDR_LEN); + + /* mount_pending must be cleared */ + ORANGEFS_SB(sb)->mount_pending = 0; + + /* + * finally, add this sb to our list of known orangefs + * sb's + */ + gossip_debug(GOSSIP_SUPER_DEBUG, + "Adding SB %p to orangefs superblocks\n", + ORANGEFS_SB(sb)); + spin_lock(&orangefs_superblocks_lock); + list_add_tail(&ORANGEFS_SB(sb)->list, &orangefs_superblocks); + spin_unlock(&orangefs_superblocks_lock); + op_release(new_op); + return dget(sb->s_root); + +free_op: + gossip_err("orangefs_mount: mount request failed with %d\n", ret); + if (ret == -EINVAL) { + gossip_err("Ensure that all orangefs-servers have the same FS configuration files\n"); + gossip_err("Look at pvfs2-client-core log file (typically /tmp/pvfs2-client.log) for more details\n"); + } + + op_release(new_op); + + return d; +} + +void orangefs_kill_sb(struct super_block *sb) +{ + gossip_debug(GOSSIP_SUPER_DEBUG, "orangefs_kill_sb: called\n"); + + /* provided sb cleanup */ + kill_anon_super(sb); + + /* + * issue the unmount to userspace to tell it to remove the + * dynamic mount info it has for this superblock + */ + orangefs_unmount_sb(sb); + + /* remove the sb from our list of orangefs specific sb's */ + + spin_lock(&orangefs_superblocks_lock); + __list_del_entry(&ORANGEFS_SB(sb)->list); /* not list_del_init */ + ORANGEFS_SB(sb)->list.prev = NULL; + spin_unlock(&orangefs_superblocks_lock); + + /* + * make sure that ORANGEFS_DEV_REMOUNT_ALL loop that might've seen us + * gets completed before we free the dang thing. + */ + mutex_lock(&request_mutex); + mutex_unlock(&request_mutex); + + /* free the orangefs superblock private data */ + kfree(ORANGEFS_SB(sb)); +} + +int orangefs_inode_cache_initialize(void) +{ + orangefs_inode_cache = kmem_cache_create("orangefs_inode_cache", + sizeof(struct orangefs_inode_s), + 0, + ORANGEFS_CACHE_CREATE_FLAGS, + orangefs_inode_cache_ctor); + + if (!orangefs_inode_cache) { + gossip_err("Cannot create orangefs_inode_cache\n"); + return -ENOMEM; + } + return 0; +} + +int orangefs_inode_cache_finalize(void) +{ + kmem_cache_destroy(orangefs_inode_cache); + return 0; +} diff --git a/fs/orangefs/symlink.c b/fs/orangefs/symlink.c new file mode 100644 index 000000000000..6418dd638680 --- /dev/null +++ b/fs/orangefs/symlink.c @@ -0,0 +1,19 @@ +/* + * (C) 2001 Clemson University and The University of Chicago + * + * See COPYING in top-level directory. + */ + +#include "protocol.h" +#include "orangefs-kernel.h" +#include "orangefs-bufmap.h" + +struct inode_operations orangefs_symlink_inode_operations = { + .readlink = generic_readlink, + .get_link = simple_get_link, + .setattr = orangefs_setattr, + .getattr = orangefs_getattr, + .listxattr = orangefs_listxattr, + .setxattr = generic_setxattr, + .permission = orangefs_permission, +}; diff --git a/fs/orangefs/upcall.h b/fs/orangefs/upcall.h new file mode 100644 index 000000000000..001b20239407 --- /dev/null +++ b/fs/orangefs/upcall.h @@ -0,0 +1,246 @@ +/* + * (C) 2001 Clemson University and The University of Chicago + * + * See COPYING in top-level directory. + */ + +#ifndef __UPCALL_H +#define __UPCALL_H + +/* + * Sanitized this header file to fix + * 32-64 bit interaction issues between + * client-core and device + */ +struct orangefs_io_request_s { + __s32 __pad1; + __s32 buf_index; + __s32 count; + __s32 __pad2; + __s64 offset; + struct orangefs_object_kref refn; + enum ORANGEFS_io_type io_type; + __s32 readahead_size; +}; + +struct orangefs_lookup_request_s { + __s32 sym_follow; + __s32 __pad1; + struct orangefs_object_kref parent_refn; + char d_name[ORANGEFS_NAME_MAX]; +}; + +struct orangefs_create_request_s { + struct orangefs_object_kref parent_refn; + struct ORANGEFS_sys_attr_s attributes; + char d_name[ORANGEFS_NAME_MAX]; +}; + +struct orangefs_symlink_request_s { + struct orangefs_object_kref parent_refn; + struct ORANGEFS_sys_attr_s attributes; + char entry_name[ORANGEFS_NAME_MAX]; + char target[ORANGEFS_NAME_MAX]; +}; + +struct orangefs_getattr_request_s { + struct orangefs_object_kref refn; + __u32 mask; + __u32 __pad1; +}; + +struct orangefs_setattr_request_s { + struct orangefs_object_kref refn; + struct ORANGEFS_sys_attr_s attributes; +}; + +struct orangefs_remove_request_s { + struct orangefs_object_kref parent_refn; + char d_name[ORANGEFS_NAME_MAX]; +}; + +struct orangefs_mkdir_request_s { + struct orangefs_object_kref parent_refn; + struct ORANGEFS_sys_attr_s attributes; + char d_name[ORANGEFS_NAME_MAX]; +}; + +struct orangefs_readdir_request_s { + struct orangefs_object_kref refn; + __u64 token; + __s32 max_dirent_count; + __s32 buf_index; +}; + +struct orangefs_readdirplus_request_s { + struct orangefs_object_kref refn; + __u64 token; + __s32 max_dirent_count; + __u32 mask; + __s32 buf_index; + __s32 __pad1; +}; + +struct orangefs_rename_request_s { + struct orangefs_object_kref old_parent_refn; + struct orangefs_object_kref new_parent_refn; + char d_old_name[ORANGEFS_NAME_MAX]; + char d_new_name[ORANGEFS_NAME_MAX]; +}; + +struct orangefs_statfs_request_s { + __s32 fs_id; + __s32 __pad1; +}; + +struct orangefs_truncate_request_s { + struct orangefs_object_kref refn; + __s64 size; +}; + +struct orangefs_mmap_ra_cache_flush_request_s { + struct orangefs_object_kref refn; +}; + +struct orangefs_fs_mount_request_s { + char orangefs_config_server[ORANGEFS_MAX_SERVER_ADDR_LEN]; +}; + +struct orangefs_fs_umount_request_s { + __s32 id; + __s32 fs_id; + char orangefs_config_server[ORANGEFS_MAX_SERVER_ADDR_LEN]; +}; + +struct orangefs_getxattr_request_s { + struct orangefs_object_kref refn; + __s32 key_sz; + __s32 __pad1; + char key[ORANGEFS_MAX_XATTR_NAMELEN]; +}; + +struct orangefs_setxattr_request_s { + struct orangefs_object_kref refn; + struct ORANGEFS_keyval_pair keyval; + __s32 flags; + __s32 __pad1; +}; + +struct orangefs_listxattr_request_s { + struct orangefs_object_kref refn; + __s32 requested_count; + __s32 __pad1; + __u64 token; +}; + +struct orangefs_removexattr_request_s { + struct orangefs_object_kref refn; + __s32 key_sz; + __s32 __pad1; + char key[ORANGEFS_MAX_XATTR_NAMELEN]; +}; + +struct orangefs_op_cancel_s { + __u64 op_tag; +}; + +struct orangefs_fsync_request_s { + struct orangefs_object_kref refn; +}; + +enum orangefs_param_request_type { + ORANGEFS_PARAM_REQUEST_SET = 1, + ORANGEFS_PARAM_REQUEST_GET = 2 +}; + +enum orangefs_param_request_op { + ORANGEFS_PARAM_REQUEST_OP_ACACHE_TIMEOUT_MSECS = 1, + ORANGEFS_PARAM_REQUEST_OP_ACACHE_HARD_LIMIT = 2, + ORANGEFS_PARAM_REQUEST_OP_ACACHE_SOFT_LIMIT = 3, + ORANGEFS_PARAM_REQUEST_OP_ACACHE_RECLAIM_PERCENTAGE = 4, + ORANGEFS_PARAM_REQUEST_OP_PERF_TIME_INTERVAL_SECS = 5, + ORANGEFS_PARAM_REQUEST_OP_PERF_HISTORY_SIZE = 6, + ORANGEFS_PARAM_REQUEST_OP_PERF_RESET = 7, + ORANGEFS_PARAM_REQUEST_OP_NCACHE_TIMEOUT_MSECS = 8, + ORANGEFS_PARAM_REQUEST_OP_NCACHE_HARD_LIMIT = 9, + ORANGEFS_PARAM_REQUEST_OP_NCACHE_SOFT_LIMIT = 10, + ORANGEFS_PARAM_REQUEST_OP_NCACHE_RECLAIM_PERCENTAGE = 11, + ORANGEFS_PARAM_REQUEST_OP_STATIC_ACACHE_TIMEOUT_MSECS = 12, + ORANGEFS_PARAM_REQUEST_OP_STATIC_ACACHE_HARD_LIMIT = 13, + ORANGEFS_PARAM_REQUEST_OP_STATIC_ACACHE_SOFT_LIMIT = 14, + ORANGEFS_PARAM_REQUEST_OP_STATIC_ACACHE_RECLAIM_PERCENTAGE = 15, + ORANGEFS_PARAM_REQUEST_OP_CLIENT_DEBUG = 16, + ORANGEFS_PARAM_REQUEST_OP_CCACHE_TIMEOUT_SECS = 17, + ORANGEFS_PARAM_REQUEST_OP_CCACHE_HARD_LIMIT = 18, + ORANGEFS_PARAM_REQUEST_OP_CCACHE_SOFT_LIMIT = 19, + ORANGEFS_PARAM_REQUEST_OP_CCACHE_RECLAIM_PERCENTAGE = 20, + ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_TIMEOUT_SECS = 21, + ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_HARD_LIMIT = 22, + ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_SOFT_LIMIT = 23, + ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_RECLAIM_PERCENTAGE = 24, + ORANGEFS_PARAM_REQUEST_OP_TWO_MASK_VALUES = 25, +}; + +struct orangefs_param_request_s { + enum orangefs_param_request_type type; + enum orangefs_param_request_op op; + __s64 value; + char s_value[ORANGEFS_MAX_DEBUG_STRING_LEN]; +}; + +enum orangefs_perf_count_request_type { + ORANGEFS_PERF_COUNT_REQUEST_ACACHE = 1, + ORANGEFS_PERF_COUNT_REQUEST_NCACHE = 2, + ORANGEFS_PERF_COUNT_REQUEST_CAPCACHE = 3, +}; + +struct orangefs_perf_count_request_s { + enum orangefs_perf_count_request_type type; + __s32 __pad1; +}; + +struct orangefs_fs_key_request_s { + __s32 fsid; + __s32 __pad1; +}; + +struct orangefs_upcall_s { + __s32 type; + __u32 uid; + __u32 gid; + int pid; + int tgid; + /* Trailers unused but must be retained for protocol compatibility. */ + __s64 trailer_size; + char *trailer_buf; + + union { + struct orangefs_io_request_s io; + struct orangefs_lookup_request_s lookup; + struct orangefs_create_request_s create; + struct orangefs_symlink_request_s sym; + struct orangefs_getattr_request_s getattr; + struct orangefs_setattr_request_s setattr; + struct orangefs_remove_request_s remove; + struct orangefs_mkdir_request_s mkdir; + struct orangefs_readdir_request_s readdir; + struct orangefs_readdirplus_request_s readdirplus; + struct orangefs_rename_request_s rename; + struct orangefs_statfs_request_s statfs; + struct orangefs_truncate_request_s truncate; + struct orangefs_mmap_ra_cache_flush_request_s ra_cache_flush; + struct orangefs_fs_mount_request_s fs_mount; + struct orangefs_fs_umount_request_s fs_umount; + struct orangefs_getxattr_request_s getxattr; + struct orangefs_setxattr_request_s setxattr; + struct orangefs_listxattr_request_s listxattr; + struct orangefs_removexattr_request_s removexattr; + struct orangefs_op_cancel_s cancel; + struct orangefs_fsync_request_s fsync; + struct orangefs_param_request_s param; + struct orangefs_perf_count_request_s perf_count; + struct orangefs_fs_key_request_s fs_key; + } req; +}; + +#endif /* __UPCALL_H */ diff --git a/fs/orangefs/waitqueue.c b/fs/orangefs/waitqueue.c new file mode 100644 index 000000000000..31635bc303fe --- /dev/null +++ b/fs/orangefs/waitqueue.c @@ -0,0 +1,357 @@ +/* + * (C) 2001 Clemson University and The University of Chicago + * (C) 2011 Omnibond Systems + * + * Changes by Acxiom Corporation to implement generic service_operation() + * function, Copyright Acxiom Corporation, 2005. + * + * See COPYING in top-level directory. + */ + +/* + * In-kernel waitqueue operations. + */ + +#include "protocol.h" +#include "orangefs-kernel.h" +#include "orangefs-bufmap.h" + +static int wait_for_matching_downcall(struct orangefs_kernel_op_s *, long, bool); +static void orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s *); + +/* + * What we do in this function is to walk the list of operations that are + * present in the request queue and mark them as purged. + * NOTE: This is called from the device close after client-core has + * guaranteed that no new operations could appear on the list since the + * client-core is anyway going to exit. + */ +void purge_waiting_ops(void) +{ + struct orangefs_kernel_op_s *op; + + spin_lock(&orangefs_request_list_lock); + list_for_each_entry(op, &orangefs_request_list, list) { + gossip_debug(GOSSIP_WAIT_DEBUG, + "pvfs2-client-core: purging op tag %llu %s\n", + llu(op->tag), + get_opname_string(op)); + set_op_state_purged(op); + gossip_debug(GOSSIP_DEV_DEBUG, + "%s: op:%s: op_state:%d: process:%s:\n", + __func__, + get_opname_string(op), + op->op_state, + current->comm); + } + spin_unlock(&orangefs_request_list_lock); +} + +/* + * submits a ORANGEFS operation and waits for it to complete + * + * Note op->downcall.status will contain the status of the operation (in + * errno format), whether provided by pvfs2-client or a result of failure to + * service the operation. If the caller wishes to distinguish, then + * op->state can be checked to see if it was serviced or not. + * + * Returns contents of op->downcall.status for convenience + */ +int service_operation(struct orangefs_kernel_op_s *op, + const char *op_name, + int flags) +{ + long timeout = MAX_SCHEDULE_TIMEOUT; + int ret = 0; + + DEFINE_WAIT(wait_entry); + + op->upcall.tgid = current->tgid; + op->upcall.pid = current->pid; + +retry_servicing: + op->downcall.status = 0; + gossip_debug(GOSSIP_WAIT_DEBUG, + "%s: %s op:%p: process:%s: pid:%d:\n", + __func__, + op_name, + op, + current->comm, + current->pid); + + /* + * If ORANGEFS_OP_NO_MUTEX was set in flags, we need to avoid + * acquiring the request_mutex because we're servicing a + * high priority remount operation and the request_mutex is + * already taken. + */ + if (!(flags & ORANGEFS_OP_NO_MUTEX)) { + if (flags & ORANGEFS_OP_INTERRUPTIBLE) + ret = mutex_lock_interruptible(&request_mutex); + else + ret = mutex_lock_killable(&request_mutex); + /* + * check to see if we were interrupted while waiting for + * mutex + */ + if (ret < 0) { + op->downcall.status = ret; + gossip_debug(GOSSIP_WAIT_DEBUG, + "%s: service_operation interrupted.\n", + __func__); + return ret; + } + } + + /* queue up the operation */ + spin_lock(&orangefs_request_list_lock); + spin_lock(&op->lock); + set_op_state_waiting(op); + gossip_debug(GOSSIP_DEV_DEBUG, + "%s: op:%s: op_state:%d: process:%s:\n", + __func__, + get_opname_string(op), + op->op_state, + current->comm); + /* add high priority remount op to the front of the line. */ + if (flags & ORANGEFS_OP_PRIORITY) + list_add(&op->list, &orangefs_request_list); + else + list_add_tail(&op->list, &orangefs_request_list); + spin_unlock(&op->lock); + wake_up_interruptible(&orangefs_request_list_waitq); + if (!__is_daemon_in_service()) { + gossip_debug(GOSSIP_WAIT_DEBUG, + "%s:client core is NOT in service.\n", + __func__); + timeout = op_timeout_secs * HZ; + } + spin_unlock(&orangefs_request_list_lock); + + if (!(flags & ORANGEFS_OP_NO_MUTEX)) + mutex_unlock(&request_mutex); + + ret = wait_for_matching_downcall(op, timeout, + flags & ORANGEFS_OP_INTERRUPTIBLE); + + gossip_debug(GOSSIP_WAIT_DEBUG, + "%s: wait_for_matching_downcall returned %d for %p\n", + __func__, + ret, + op); + + /* got matching downcall; make sure status is in errno format */ + if (!ret) { + spin_unlock(&op->lock); + op->downcall.status = + orangefs_normalize_to_errno(op->downcall.status); + ret = op->downcall.status; + goto out; + } + + /* failed to get matching downcall */ + if (ret == -ETIMEDOUT) { + gossip_err("%s: %s -- wait timed out; aborting attempt.\n", + __func__, + op_name); + } + + /* + * remove a waiting op from the request list or + * remove an in-progress op from the in-progress list. + */ + orangefs_clean_up_interrupted_operation(op); + + op->downcall.status = ret; + /* retry if operation has not been serviced and if requested */ + if (ret == -EAGAIN) { + op->attempts++; + timeout = op_timeout_secs * HZ; + gossip_debug(GOSSIP_WAIT_DEBUG, + "orangefs: tag %llu (%s)" + " -- operation to be retried (%d attempt)\n", + llu(op->tag), + op_name, + op->attempts); + + /* + * io ops (ops that use the shared memory buffer) have + * to be returned to their caller for a retry. Other ops + * can just be recycled here. + */ + if (!op->uses_shared_memory) + goto retry_servicing; + } + +out: + gossip_debug(GOSSIP_WAIT_DEBUG, + "%s: %s returning: %d for %p.\n", + __func__, + op_name, + ret, + op); + return ret; +} + +/* This can get called on an I/O op if it had a bad service_operation. */ +bool orangefs_cancel_op_in_progress(struct orangefs_kernel_op_s *op) +{ + u64 tag = op->tag; + if (!op_state_in_progress(op)) + return false; + + op->slot_to_free = op->upcall.req.io.buf_index; + memset(&op->upcall, 0, sizeof(op->upcall)); + memset(&op->downcall, 0, sizeof(op->downcall)); + op->upcall.type = ORANGEFS_VFS_OP_CANCEL; + op->upcall.req.cancel.op_tag = tag; + op->downcall.type = ORANGEFS_VFS_OP_INVALID; + op->downcall.status = -1; + orangefs_new_tag(op); + + spin_lock(&orangefs_request_list_lock); + /* orangefs_request_list_lock is enough of a barrier here */ + if (!__is_daemon_in_service()) { + spin_unlock(&orangefs_request_list_lock); + return false; + } + spin_lock(&op->lock); + set_op_state_waiting(op); + gossip_debug(GOSSIP_DEV_DEBUG, + "%s: op:%s: op_state:%d: process:%s:\n", + __func__, + get_opname_string(op), + op->op_state, + current->comm); + list_add(&op->list, &orangefs_request_list); + spin_unlock(&op->lock); + spin_unlock(&orangefs_request_list_lock); + + gossip_debug(GOSSIP_WAIT_DEBUG, + "Attempting ORANGEFS operation cancellation of tag %llu\n", + llu(tag)); + return true; +} + +/* + * Change an op to the "given up" state and remove it from its list. + */ +static void + orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s *op) +{ + /* + * handle interrupted cases depending on what state we were in when + * the interruption is detected. + * + * Called with op->lock held. + */ + + /* + * List manipulation code elsewhere will ignore ops that + * have been given up upon. + */ + op->op_state |= OP_VFS_STATE_GIVEN_UP; + + if (list_empty(&op->list)) { + /* caught copying to/from daemon */ + BUG_ON(op_state_serviced(op)); + spin_unlock(&op->lock); + wait_for_completion(&op->waitq); + } else if (op_state_waiting(op)) { + /* + * upcall hasn't been read; remove op from upcall request + * list. + */ + spin_unlock(&op->lock); + spin_lock(&orangefs_request_list_lock); + list_del_init(&op->list); + spin_unlock(&orangefs_request_list_lock); + gossip_debug(GOSSIP_WAIT_DEBUG, + "Interrupted: Removed op %p from request_list\n", + op); + } else if (op_state_in_progress(op)) { + /* op must be removed from the in progress htable */ + spin_unlock(&op->lock); + spin_lock(&htable_ops_in_progress_lock); + list_del_init(&op->list); + spin_unlock(&htable_ops_in_progress_lock); + gossip_debug(GOSSIP_WAIT_DEBUG, + "Interrupted: Removed op %p" + " from htable_ops_in_progress\n", + op); + } else { + spin_unlock(&op->lock); + gossip_err("interrupted operation is in a weird state 0x%x\n", + op->op_state); + } + reinit_completion(&op->waitq); +} + +/* + * Sleeps on waitqueue waiting for matching downcall. + * If client-core finishes servicing, then we are good to go. + * else if client-core exits, we get woken up here, and retry with a timeout + * + * When this call returns to the caller, the specified op will no + * longer be in either the in_progress hash table or on the request list. + * + * Returns 0 on success and -errno on failure + * Errors are: + * EAGAIN in case we want the caller to requeue and try again.. + * EINTR/EIO/ETIMEDOUT indicating we are done trying to service this + * operation since client-core seems to be exiting too often + * or if we were interrupted. + * + * Returns with op->lock taken. + */ +static int wait_for_matching_downcall(struct orangefs_kernel_op_s *op, + long timeout, + bool interruptible) +{ + long n; + + /* + * There's a "schedule_timeout" inside of these wait + * primitives, during which the op is out of the hands of the + * user process that needs something done and is being + * manipulated by the client-core process. + */ + if (interruptible) + n = wait_for_completion_interruptible_timeout(&op->waitq, + timeout); + else + n = wait_for_completion_killable_timeout(&op->waitq, timeout); + + spin_lock(&op->lock); + + if (op_state_serviced(op)) + return 0; + + if (unlikely(n < 0)) { + gossip_debug(GOSSIP_WAIT_DEBUG, + "%s: operation interrupted, tag %llu, %p\n", + __func__, + llu(op->tag), + op); + return -EINTR; + } + if (op_state_purged(op)) { + gossip_debug(GOSSIP_WAIT_DEBUG, + "%s: operation purged, tag %llu, %p, %d\n", + __func__, + llu(op->tag), + op, + op->attempts); + return (op->attempts < ORANGEFS_PURGE_RETRY_COUNT) ? + -EAGAIN : + -EIO; + } + /* must have timed out, then... */ + gossip_debug(GOSSIP_WAIT_DEBUG, + "%s: operation timed out, tag %llu, %p, %d)\n", + __func__, + llu(op->tag), + op, + op->attempts); + return -ETIMEDOUT; +} diff --git a/fs/orangefs/xattr.c b/fs/orangefs/xattr.c new file mode 100644 index 000000000000..ef5da7538cd5 --- /dev/null +++ b/fs/orangefs/xattr.c @@ -0,0 +1,545 @@ +/* + * (C) 2001 Clemson University and The University of Chicago + * + * See COPYING in top-level directory. + */ + +/* + * Linux VFS extended attribute operations. + */ + +#include "protocol.h" +#include "orangefs-kernel.h" +#include "orangefs-bufmap.h" +#include <linux/posix_acl_xattr.h> +#include <linux/xattr.h> + + +#define SYSTEM_ORANGEFS_KEY "system.pvfs2." +#define SYSTEM_ORANGEFS_KEY_LEN 13 + +/* + * this function returns + * 0 if the key corresponding to name is not meant to be printed as part + * of a listxattr. + * 1 if the key corresponding to name is meant to be returned as part of + * a listxattr. + * The ones that start SYSTEM_ORANGEFS_KEY are the ones to avoid printing. + */ +static int is_reserved_key(const char *key, size_t size) +{ + + if (size < SYSTEM_ORANGEFS_KEY_LEN) + return 1; + + return strncmp(key, SYSTEM_ORANGEFS_KEY, SYSTEM_ORANGEFS_KEY_LEN) ? 1 : 0; +} + +static inline int convert_to_internal_xattr_flags(int setxattr_flags) +{ + int internal_flag = 0; + + if (setxattr_flags & XATTR_REPLACE) { + /* Attribute must exist! */ + internal_flag = ORANGEFS_XATTR_REPLACE; + } else if (setxattr_flags & XATTR_CREATE) { + /* Attribute must not exist */ + internal_flag = ORANGEFS_XATTR_CREATE; + } + return internal_flag; +} + + +/* + * Tries to get a specified key's attributes of a given + * file into a user-specified buffer. Note that the getxattr + * interface allows for the users to probe the size of an + * extended attribute by passing in a value of 0 to size. + * Thus our return value is always the size of the attribute + * unless the key does not exist for the file and/or if + * there were errors in fetching the attribute value. + */ +ssize_t orangefs_inode_getxattr(struct inode *inode, const char *prefix, + const char *name, void *buffer, size_t size) +{ + struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); + struct orangefs_kernel_op_s *new_op = NULL; + ssize_t ret = -ENOMEM; + ssize_t length = 0; + int fsuid; + int fsgid; + + gossip_debug(GOSSIP_XATTR_DEBUG, + "%s: prefix %s name %s, buffer_size %zd\n", + __func__, prefix, name, size); + + if (name == NULL || (size > 0 && buffer == NULL)) { + gossip_err("orangefs_inode_getxattr: bogus NULL pointers\n"); + return -EINVAL; + } + if ((strlen(name) + strlen(prefix)) >= ORANGEFS_MAX_XATTR_NAMELEN) { + gossip_err("Invalid key length (%d)\n", + (int)(strlen(name) + strlen(prefix))); + return -EINVAL; + } + + fsuid = from_kuid(current_user_ns(), current_fsuid()); + fsgid = from_kgid(current_user_ns(), current_fsgid()); + + gossip_debug(GOSSIP_XATTR_DEBUG, + "getxattr on inode %pU, name %s " + "(uid %o, gid %o)\n", + get_khandle_from_ino(inode), + name, + fsuid, + fsgid); + + down_read(&orangefs_inode->xattr_sem); + + new_op = op_alloc(ORANGEFS_VFS_OP_GETXATTR); + if (!new_op) + goto out_unlock; + + new_op->upcall.req.getxattr.refn = orangefs_inode->refn; + ret = snprintf((char *)new_op->upcall.req.getxattr.key, + ORANGEFS_MAX_XATTR_NAMELEN, "%s%s", prefix, name); + + /* + * NOTE: Although keys are meant to be NULL terminated textual + * strings, I am going to explicitly pass the length just in case + * we change this later on... + */ + new_op->upcall.req.getxattr.key_sz = ret + 1; + + ret = service_operation(new_op, "orangefs_inode_getxattr", + get_interruptible_flag(inode)); + if (ret != 0) { + if (ret == -ENOENT) { + ret = -ENODATA; + gossip_debug(GOSSIP_XATTR_DEBUG, + "orangefs_inode_getxattr: inode %pU key %s" + " does not exist!\n", + get_khandle_from_ino(inode), + (char *)new_op->upcall.req.getxattr.key); + } + goto out_release_op; + } + + /* + * Length returned includes null terminator. + */ + length = new_op->downcall.resp.getxattr.val_sz; + + /* + * Just return the length of the queried attribute. + */ + if (size == 0) { + ret = length; + goto out_release_op; + } + + /* + * Check to see if key length is > provided buffer size. + */ + if (length > size) { + ret = -ERANGE; + goto out_release_op; + } + + memset(buffer, 0, size); + memcpy(buffer, new_op->downcall.resp.getxattr.val, length); + gossip_debug(GOSSIP_XATTR_DEBUG, + "orangefs_inode_getxattr: inode %pU " + "key %s key_sz %d, val_len %d\n", + get_khandle_from_ino(inode), + (char *)new_op-> + upcall.req.getxattr.key, + (int)new_op-> + upcall.req.getxattr.key_sz, + (int)ret); + + ret = length; + +out_release_op: + op_release(new_op); +out_unlock: + up_read(&orangefs_inode->xattr_sem); + return ret; +} + +static int orangefs_inode_removexattr(struct inode *inode, + const char *prefix, + const char *name, + int flags) +{ + struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); + struct orangefs_kernel_op_s *new_op = NULL; + int ret = -ENOMEM; + + down_write(&orangefs_inode->xattr_sem); + new_op = op_alloc(ORANGEFS_VFS_OP_REMOVEXATTR); + if (!new_op) + goto out_unlock; + + new_op->upcall.req.removexattr.refn = orangefs_inode->refn; + /* + * NOTE: Although keys are meant to be NULL terminated + * textual strings, I am going to explicitly pass the + * length just in case we change this later on... + */ + ret = snprintf((char *)new_op->upcall.req.removexattr.key, + ORANGEFS_MAX_XATTR_NAMELEN, + "%s%s", + (prefix ? prefix : ""), + name); + new_op->upcall.req.removexattr.key_sz = ret + 1; + + gossip_debug(GOSSIP_XATTR_DEBUG, + "orangefs_inode_removexattr: key %s, key_sz %d\n", + (char *)new_op->upcall.req.removexattr.key, + (int)new_op->upcall.req.removexattr.key_sz); + + ret = service_operation(new_op, + "orangefs_inode_removexattr", + get_interruptible_flag(inode)); + if (ret == -ENOENT) { + /* + * Request to replace a non-existent attribute is an error. + */ + if (flags & XATTR_REPLACE) + ret = -ENODATA; + else + ret = 0; + } + + gossip_debug(GOSSIP_XATTR_DEBUG, + "orangefs_inode_removexattr: returning %d\n", ret); + + op_release(new_op); +out_unlock: + up_write(&orangefs_inode->xattr_sem); + return ret; +} + +/* + * Tries to set an attribute for a given key on a file. + * + * Returns a -ve number on error and 0 on success. Key is text, but value + * can be binary! + */ +int orangefs_inode_setxattr(struct inode *inode, const char *prefix, + const char *name, const void *value, size_t size, int flags) +{ + struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); + struct orangefs_kernel_op_s *new_op; + int internal_flag = 0; + int ret = -ENOMEM; + + gossip_debug(GOSSIP_XATTR_DEBUG, + "%s: prefix %s, name %s, buffer_size %zd\n", + __func__, prefix, name, size); + + if (size < 0 || + size >= ORANGEFS_MAX_XATTR_VALUELEN || + flags < 0) { + gossip_err("orangefs_inode_setxattr: bogus values of size(%d), flags(%d)\n", + (int)size, + flags); + return -EINVAL; + } + + if (name == NULL || + (size > 0 && value == NULL)) { + gossip_err("orangefs_inode_setxattr: bogus NULL pointers!\n"); + return -EINVAL; + } + + internal_flag = convert_to_internal_xattr_flags(flags); + + if (prefix) { + if (strlen(name) + strlen(prefix) >= ORANGEFS_MAX_XATTR_NAMELEN) { + gossip_err + ("orangefs_inode_setxattr: bogus key size (%d)\n", + (int)(strlen(name) + strlen(prefix))); + return -EINVAL; + } + } else { + if (strlen(name) >= ORANGEFS_MAX_XATTR_NAMELEN) { + gossip_err + ("orangefs_inode_setxattr: bogus key size (%d)\n", + (int)(strlen(name))); + return -EINVAL; + } + } + + /* This is equivalent to a removexattr */ + if (size == 0 && value == NULL) { + gossip_debug(GOSSIP_XATTR_DEBUG, + "removing xattr (%s%s)\n", + prefix, + name); + return orangefs_inode_removexattr(inode, prefix, name, flags); + } + + gossip_debug(GOSSIP_XATTR_DEBUG, + "setxattr on inode %pU, name %s\n", + get_khandle_from_ino(inode), + name); + + down_write(&orangefs_inode->xattr_sem); + new_op = op_alloc(ORANGEFS_VFS_OP_SETXATTR); + if (!new_op) + goto out_unlock; + + + new_op->upcall.req.setxattr.refn = orangefs_inode->refn; + new_op->upcall.req.setxattr.flags = internal_flag; + /* + * NOTE: Although keys are meant to be NULL terminated textual + * strings, I am going to explicitly pass the length just in + * case we change this later on... + */ + ret = snprintf((char *)new_op->upcall.req.setxattr.keyval.key, + ORANGEFS_MAX_XATTR_NAMELEN, + "%s%s", + prefix, name); + new_op->upcall.req.setxattr.keyval.key_sz = ret + 1; + memcpy(new_op->upcall.req.setxattr.keyval.val, value, size); + new_op->upcall.req.setxattr.keyval.val_sz = size; + + gossip_debug(GOSSIP_XATTR_DEBUG, + "orangefs_inode_setxattr: key %s, key_sz %d " + " value size %zd\n", + (char *)new_op->upcall.req.setxattr.keyval.key, + (int)new_op->upcall.req.setxattr.keyval.key_sz, + size); + + ret = service_operation(new_op, + "orangefs_inode_setxattr", + get_interruptible_flag(inode)); + + gossip_debug(GOSSIP_XATTR_DEBUG, + "orangefs_inode_setxattr: returning %d\n", + ret); + + /* when request is serviced properly, free req op struct */ + op_release(new_op); +out_unlock: + up_write(&orangefs_inode->xattr_sem); + return ret; +} + +/* + * Tries to get a specified object's keys into a user-specified buffer of a + * given size. Note that like the previous instances of xattr routines, this + * also allows you to pass in a NULL pointer and 0 size to probe the size for + * subsequent memory allocations. Thus our return value is always the size of + * all the keys unless there were errors in fetching the keys! + */ +ssize_t orangefs_listxattr(struct dentry *dentry, char *buffer, size_t size) +{ + struct inode *inode = dentry->d_inode; + struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); + struct orangefs_kernel_op_s *new_op; + __u64 token = ORANGEFS_ITERATE_START; + ssize_t ret = -ENOMEM; + ssize_t total = 0; + int count_keys = 0; + int key_size; + int i = 0; + int returned_count = 0; + + if (size > 0 && buffer == NULL) { + gossip_err("%s: bogus NULL pointers\n", __func__); + return -EINVAL; + } + if (size < 0) { + gossip_err("Invalid size (%d)\n", (int)size); + return -EINVAL; + } + + down_read(&orangefs_inode->xattr_sem); + new_op = op_alloc(ORANGEFS_VFS_OP_LISTXATTR); + if (!new_op) + goto out_unlock; + + if (buffer && size > 0) + memset(buffer, 0, size); + +try_again: + key_size = 0; + new_op->upcall.req.listxattr.refn = orangefs_inode->refn; + new_op->upcall.req.listxattr.token = token; + new_op->upcall.req.listxattr.requested_count = + (size == 0) ? 0 : ORANGEFS_MAX_XATTR_LISTLEN; + ret = service_operation(new_op, __func__, + get_interruptible_flag(inode)); + if (ret != 0) + goto done; + + if (size == 0) { + /* + * This is a bit of a big upper limit, but I did not want to + * spend too much time getting this correct, since users end + * up allocating memory rather than us... + */ + total = new_op->downcall.resp.listxattr.returned_count * + ORANGEFS_MAX_XATTR_NAMELEN; + goto done; + } + + returned_count = new_op->downcall.resp.listxattr.returned_count; + if (returned_count < 0 || + returned_count >= ORANGEFS_MAX_XATTR_LISTLEN) { + gossip_err("%s: impossible value for returned_count:%d:\n", + __func__, + returned_count); + ret = -EIO; + goto done; + } + + /* + * Check to see how much can be fit in the buffer. Fit only whole keys. + */ + for (i = 0; i < returned_count; i++) { + if (new_op->downcall.resp.listxattr.lengths[i] < 0 || + new_op->downcall.resp.listxattr.lengths[i] > + ORANGEFS_MAX_XATTR_NAMELEN) { + gossip_err("%s: impossible value for lengths[%d]\n", + __func__, + new_op->downcall.resp.listxattr.lengths[i]); + ret = -EIO; + goto done; + } + if (total + new_op->downcall.resp.listxattr.lengths[i] > size) + goto done; + + /* + * Since many dumb programs try to setxattr() on our reserved + * xattrs this is a feeble attempt at defeating those by not + * listing them in the output of listxattr.. sigh + */ + if (is_reserved_key(new_op->downcall.resp.listxattr.key + + key_size, + new_op->downcall.resp. + listxattr.lengths[i])) { + gossip_debug(GOSSIP_XATTR_DEBUG, "Copying key %d -> %s\n", + i, new_op->downcall.resp.listxattr.key + + key_size); + memcpy(buffer + total, + new_op->downcall.resp.listxattr.key + key_size, + new_op->downcall.resp.listxattr.lengths[i]); + total += new_op->downcall.resp.listxattr.lengths[i]; + count_keys++; + } else { + gossip_debug(GOSSIP_XATTR_DEBUG, "[RESERVED] key %d -> %s\n", + i, new_op->downcall.resp.listxattr.key + + key_size); + } + key_size += new_op->downcall.resp.listxattr.lengths[i]; + } + + /* + * Since the buffer was large enough, we might have to continue + * fetching more keys! + */ + token = new_op->downcall.resp.listxattr.token; + if (token != ORANGEFS_ITERATE_END) + goto try_again; + +done: + gossip_debug(GOSSIP_XATTR_DEBUG, "%s: returning %d" + " [size of buffer %ld] (filled in %d keys)\n", + __func__, + ret ? (int)ret : (int)total, + (long)size, + count_keys); + op_release(new_op); + if (ret == 0) + ret = total; +out_unlock: + up_read(&orangefs_inode->xattr_sem); + return ret; +} + +static int orangefs_xattr_set_default(const struct xattr_handler *handler, + struct dentry *dentry, + const char *name, + const void *buffer, + size_t size, + int flags) +{ + return orangefs_inode_setxattr(dentry->d_inode, + ORANGEFS_XATTR_NAME_DEFAULT_PREFIX, + name, + buffer, + size, + flags); +} + +static int orangefs_xattr_get_default(const struct xattr_handler *handler, + struct dentry *dentry, + const char *name, + void *buffer, + size_t size) +{ + return orangefs_inode_getxattr(dentry->d_inode, + ORANGEFS_XATTR_NAME_DEFAULT_PREFIX, + name, + buffer, + size); + +} + +static int orangefs_xattr_set_trusted(const struct xattr_handler *handler, + struct dentry *dentry, + const char *name, + const void *buffer, + size_t size, + int flags) +{ + return orangefs_inode_setxattr(dentry->d_inode, + ORANGEFS_XATTR_NAME_TRUSTED_PREFIX, + name, + buffer, + size, + flags); +} + +static int orangefs_xattr_get_trusted(const struct xattr_handler *handler, + struct dentry *dentry, + const char *name, + void *buffer, + size_t size) +{ + return orangefs_inode_getxattr(dentry->d_inode, + ORANGEFS_XATTR_NAME_TRUSTED_PREFIX, + name, + buffer, + size); +} + +static struct xattr_handler orangefs_xattr_trusted_handler = { + .prefix = ORANGEFS_XATTR_NAME_TRUSTED_PREFIX, + .get = orangefs_xattr_get_trusted, + .set = orangefs_xattr_set_trusted, +}; + +static struct xattr_handler orangefs_xattr_default_handler = { + /* + * NOTE: this is set to be the empty string. + * so that all un-prefixed xattrs keys get caught + * here! + */ + .prefix = ORANGEFS_XATTR_NAME_DEFAULT_PREFIX, + .get = orangefs_xattr_get_default, + .set = orangefs_xattr_set_default, +}; + +const struct xattr_handler *orangefs_xattr_handlers[] = { + &posix_acl_access_xattr_handler, + &posix_acl_default_xattr_handler, + &orangefs_xattr_trusted_handler, + &orangefs_xattr_default_handler, + NULL +}; diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index f90588abbfd4..6f96247226a4 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h @@ -151,7 +151,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint, #endif #ifndef HAVE_ARCH_BUG_ON -#define BUG_ON(condition) do { if (condition) ; } while (0) +#define BUG_ON(condition) do { if (condition) BUG(); } while (0) #endif #ifndef HAVE_ARCH_WARN_ON diff --git a/include/asm-generic/io-64-nonatomic-hi-lo.h b/include/asm-generic/io-64-nonatomic-hi-lo.h deleted file mode 100644 index 32b73abce1b0..000000000000 --- a/include/asm-generic/io-64-nonatomic-hi-lo.h +++ /dev/null @@ -1,2 +0,0 @@ -/* XXX: delete asm-generic/io-64-nonatomic-hi-lo.h after converting new users */ -#include <linux/io-64-nonatomic-hi-lo.h> diff --git a/include/asm-generic/io-64-nonatomic-lo-hi.h b/include/asm-generic/io-64-nonatomic-lo-hi.h deleted file mode 100644 index 55a627c37721..000000000000 --- a/include/asm-generic/io-64-nonatomic-lo-hi.h +++ /dev/null @@ -1,2 +0,0 @@ -/* XXX: delete asm-generic/io-64-nonatomic-lo-hi.h after converting new users */ -#include <linux/io-64-nonatomic-lo-hi.h> diff --git a/include/asm-generic/page.h b/include/asm-generic/page.h index 37d1fe28960a..67cfb7dbc284 100644 --- a/include/asm-generic/page.h +++ b/include/asm-generic/page.h @@ -24,9 +24,6 @@ #ifndef __ASSEMBLY__ -#define get_user_page(vaddr) __get_free_page(GFP_KERNEL) -#define free_user_page(page, addr) free_page(addr) - #define clear_page(page) memset((page), 0, PAGE_SIZE) #define copy_page(to,from) memcpy((to), (from), PAGE_SIZE) diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 8f5a12ab2f2b..339125bb4d2c 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -456,7 +456,7 @@ *(.entry.text) \ VMLINUX_SYMBOL(__entry_text_end) = .; -#ifdef CONFIG_FUNCTION_GRAPH_TRACER +#if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN) #define IRQENTRY_TEXT \ ALIGN_FUNCTION(); \ VMLINUX_SYMBOL(__irqentry_text_start) = .; \ @@ -466,6 +466,16 @@ #define IRQENTRY_TEXT #endif +#if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN) +#define SOFTIRQENTRY_TEXT \ + ALIGN_FUNCTION(); \ + VMLINUX_SYMBOL(__softirqentry_text_start) = .; \ + *(.softirqentry.text) \ + VMLINUX_SYMBOL(__softirqentry_text_end) = .; +#else +#define SOFTIRQENTRY_TEXT +#endif + /* Section used for early init (in .S files) */ #define HEAD_TEXT *(.head.text) diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h index 15151f3c4120..ae2f66833762 100644 --- a/include/linux/ceph/ceph_features.h +++ b/include/linux/ceph/ceph_features.h @@ -105,6 +105,7 @@ static inline u64 ceph_sanitize_features(u64 features) */ #define CEPH_FEATURES_SUPPORTED_DEFAULT \ (CEPH_FEATURE_NOSRCADDR | \ + CEPH_FEATURE_SUBSCRIBE2 | \ CEPH_FEATURE_RECONNECT_SEQ | \ CEPH_FEATURE_PGID64 | \ CEPH_FEATURE_PGPOOL3 | \ @@ -127,6 +128,7 @@ static inline u64 ceph_sanitize_features(u64 features) #define CEPH_FEATURES_REQUIRED_DEFAULT \ (CEPH_FEATURE_NOSRCADDR | \ + CEPH_FEATURE_SUBSCRIBE2 | \ CEPH_FEATURE_RECONNECT_SEQ | \ CEPH_FEATURE_PGID64 | \ CEPH_FEATURE_PGPOOL3 | \ diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index d7d072a25c27..37f28bf55ce4 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h @@ -198,8 +198,8 @@ struct ceph_client_mount { #define CEPH_SUBSCRIBE_ONETIME 1 /* i want only 1 update after have */ struct ceph_mon_subscribe_item { - __le64 have_version; __le64 have; - __u8 onetime; + __le64 start; + __u8 flags; } __attribute__ ((packed)); struct ceph_mon_subscribe_ack { @@ -376,7 +376,8 @@ union ceph_mds_request_args { __le32 stripe_count; /* ... */ __le32 object_size; __le32 file_replication; - __le32 unused; /* used to be preferred osd */ + __le32 mask; /* CEPH_CAP_* */ + __le32 old_size; } __attribute__ ((packed)) open; struct { __le32 flags; diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index 3e3799cdc6e6..e7975e4681e1 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -47,7 +47,6 @@ struct ceph_options { unsigned long mount_timeout; /* jiffies */ unsigned long osd_idle_ttl; /* jiffies */ unsigned long osd_keepalive_timeout; /* jiffies */ - unsigned long monc_ping_timeout; /* jiffies */ /* * any type that can't be simply compared or doesn't need need @@ -68,7 +67,12 @@ struct ceph_options { #define CEPH_MOUNT_TIMEOUT_DEFAULT msecs_to_jiffies(60 * 1000) #define CEPH_OSD_KEEPALIVE_DEFAULT msecs_to_jiffies(5 * 1000) #define CEPH_OSD_IDLE_TTL_DEFAULT msecs_to_jiffies(60 * 1000) -#define CEPH_MONC_PING_TIMEOUT_DEFAULT msecs_to_jiffies(30 * 1000) + +#define CEPH_MONC_HUNT_INTERVAL msecs_to_jiffies(3 * 1000) +#define CEPH_MONC_PING_INTERVAL msecs_to_jiffies(10 * 1000) +#define CEPH_MONC_PING_TIMEOUT msecs_to_jiffies(30 * 1000) +#define CEPH_MONC_HUNT_BACKOFF 2 +#define CEPH_MONC_HUNT_MAX_MULT 10 #define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024) #define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024) diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h index 81810dc21f06..e230e7ed60d3 100644 --- a/include/linux/ceph/mon_client.h +++ b/include/linux/ceph/mon_client.h @@ -68,18 +68,24 @@ struct ceph_mon_client { bool hunting; int cur_mon; /* last monitor i contacted */ - unsigned long sub_sent, sub_renew_after; + unsigned long sub_renew_after; + unsigned long sub_renew_sent; struct ceph_connection con; + bool had_a_connection; + int hunt_mult; /* [1..CEPH_MONC_HUNT_MAX_MULT] */ + /* pending generic requests */ struct rb_root generic_request_tree; int num_generic_requests; u64 last_tid; - /* mds/osd map */ - int want_mdsmap; - int want_next_osdmap; /* 1 = want, 2 = want+asked */ - u32 have_osdmap, have_mdsmap; + /* subs, indexed with CEPH_SUB_* */ + struct { + struct ceph_mon_subscribe_item item; + bool want; + u32 have; /* epoch */ + } subs[3]; #ifdef CONFIG_DEBUG_FS struct dentry *debugfs_file; @@ -93,14 +99,23 @@ extern int ceph_monmap_contains(struct ceph_monmap *m, extern int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl); extern void ceph_monc_stop(struct ceph_mon_client *monc); +enum { + CEPH_SUB_MDSMAP = 0, + CEPH_SUB_MONMAP, + CEPH_SUB_OSDMAP, +}; + +extern const char *ceph_sub_str[]; + /* * The model here is to indicate that we need a new map of at least - * epoch @want, and also call in when we receive a map. We will + * epoch @epoch, and also call in when we receive a map. We will * periodically rerequest the map from the monitor cluster until we * get what we want. */ -extern int ceph_monc_got_mdsmap(struct ceph_mon_client *monc, u32 have); -extern int ceph_monc_got_osdmap(struct ceph_mon_client *monc, u32 have); +bool ceph_monc_want_map(struct ceph_mon_client *monc, int sub, u32 epoch, + bool continuous); +void ceph_monc_got_map(struct ceph_mon_client *monc, int sub, u32 epoch); extern void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc); extern int ceph_monc_wait_osdmap(struct ceph_mon_client *monc, u32 epoch, diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 7506b485bb6d..4343df806710 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -43,7 +43,8 @@ struct ceph_osd { }; -#define CEPH_OSD_MAX_OP 3 +#define CEPH_OSD_SLAB_OPS 2 +#define CEPH_OSD_MAX_OPS 16 enum ceph_osd_data_type { CEPH_OSD_DATA_TYPE_NONE = 0, @@ -77,7 +78,10 @@ struct ceph_osd_data { struct ceph_osd_req_op { u16 op; /* CEPH_OSD_OP_* */ u32 flags; /* CEPH_OSD_OP_FLAG_* */ - u32 payload_len; + u32 indata_len; /* request */ + u32 outdata_len; /* reply */ + s32 rval; + union { struct ceph_osd_data raw_data_in; struct { @@ -136,7 +140,6 @@ struct ceph_osd_request { /* request osd ops array */ unsigned int r_num_ops; - struct ceph_osd_req_op r_ops[CEPH_OSD_MAX_OP]; /* these are updated on each send */ __le32 *r_request_osdmap_epoch; @@ -148,8 +151,6 @@ struct ceph_osd_request { struct ceph_eversion *r_request_reassert_version; int r_result; - int r_reply_op_len[CEPH_OSD_MAX_OP]; - s32 r_reply_op_result[CEPH_OSD_MAX_OP]; int r_got_reply; int r_linger; @@ -174,6 +175,8 @@ struct ceph_osd_request { unsigned long r_stamp; /* send OR check time */ struct ceph_snap_context *r_snapc; /* snap context for writes */ + + struct ceph_osd_req_op r_ops[]; }; struct ceph_request_redirect { @@ -263,6 +266,8 @@ extern void osd_req_op_extent_init(struct ceph_osd_request *osd_req, u64 truncate_size, u32 truncate_seq); extern void osd_req_op_extent_update(struct ceph_osd_request *osd_req, unsigned int which, u64 length); +extern void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req, + unsigned int which, u64 offset_inc); extern struct ceph_osd_data *osd_req_op_extent_osd_data( struct ceph_osd_request *osd_req, diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 532108ea0c1c..3fe90d494edb 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -94,7 +94,7 @@ struct dma_buf_ops { void (*release)(struct dma_buf *); int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction); - void (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); + int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); void *(*kmap_atomic)(struct dma_buf *, unsigned long); void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *); void *(*kmap)(struct dma_buf *, unsigned long); @@ -224,8 +224,8 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *, enum dma_data_direction); int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction dir); -void dma_buf_end_cpu_access(struct dma_buf *dma_buf, - enum dma_data_direction dir); +int dma_buf_end_cpu_access(struct dma_buf *dma_buf, + enum dma_data_direction dir); void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long); void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *); void *dma_buf_kmap(struct dma_buf *, unsigned long); diff --git a/include/linux/fence.h b/include/linux/fence.h index 605bd88246a6..2b17698b60b8 100644 --- a/include/linux/fence.h +++ b/include/linux/fence.h @@ -294,7 +294,7 @@ static inline bool fence_is_later(struct fence *f1, struct fence *f2) if (WARN_ON(f1->context != f2->context)) return false; - return f1->seqno - f2->seqno < INT_MAX; + return (int)(f1->seqno - f2->seqno) > 0; } /** diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 6d9df3f7e334..dea12a6e413b 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -811,16 +811,6 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, */ #define __notrace_funcgraph notrace -/* - * We want to which function is an entrypoint of a hardirq. - * That will help us to put a signal on output. - */ -#define __irq_entry __attribute__((__section__(".irqentry.text"))) - -/* Limits of hardirq entrypoints */ -extern char __irqentry_text_start[]; -extern char __irqentry_text_end[]; - #define FTRACE_NOTRACE_DEPTH 65536 #define FTRACE_RETFUNC_DEPTH 50 #define FTRACE_RETSTACK_ALLOC_SIZE 32 @@ -857,7 +847,6 @@ static inline void unpause_graph_tracing(void) #else /* !CONFIG_FUNCTION_GRAPH_TRACER */ #define __notrace_funcgraph -#define __irq_entry #define INIT_FTRACE_GRAPH static inline void ftrace_graph_init_task(struct task_struct *t) { } diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 358076eda364..9fcabeb07787 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -683,4 +683,24 @@ extern int early_irq_init(void); extern int arch_probe_nr_irqs(void); extern int arch_early_irq_init(void); +#if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN) +/* + * We want to know which function is an entrypoint of a hardirq or a softirq. + */ +#define __irq_entry __attribute__((__section__(".irqentry.text"))) +#define __softirq_entry \ + __attribute__((__section__(".softirqentry.text"))) + +/* Limits of hardirq entrypoints */ +extern char __irqentry_text_start[]; +extern char __irqentry_text_end[]; +/* Limits of softirq entrypoints */ +extern char __softirqentry_text_start[]; +extern char __softirqentry_text_end[]; + +#else +#define __irq_entry +#define __softirq_entry +#endif + #endif diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 0fdc798e3ff7..737371b56044 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -48,19 +48,28 @@ void kasan_unpoison_task_stack(struct task_struct *task); void kasan_alloc_pages(struct page *page, unsigned int order); void kasan_free_pages(struct page *page, unsigned int order); +void kasan_cache_create(struct kmem_cache *cache, size_t *size, + unsigned long *flags); + void kasan_poison_slab(struct page *page); void kasan_unpoison_object_data(struct kmem_cache *cache, void *object); void kasan_poison_object_data(struct kmem_cache *cache, void *object); -void kasan_kmalloc_large(const void *ptr, size_t size); +void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags); void kasan_kfree_large(const void *ptr); void kasan_kfree(void *ptr); -void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size); -void kasan_krealloc(const void *object, size_t new_size); +void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size, + gfp_t flags); +void kasan_krealloc(const void *object, size_t new_size, gfp_t flags); -void kasan_slab_alloc(struct kmem_cache *s, void *object); +void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags); void kasan_slab_free(struct kmem_cache *s, void *object); +struct kasan_cache { + int alloc_meta_offset; + int free_meta_offset; +}; + int kasan_module_alloc(void *addr, size_t size); void kasan_free_shadow(const struct vm_struct *vm); @@ -76,20 +85,26 @@ static inline void kasan_disable_current(void) {} static inline void kasan_alloc_pages(struct page *page, unsigned int order) {} static inline void kasan_free_pages(struct page *page, unsigned int order) {} +static inline void kasan_cache_create(struct kmem_cache *cache, + size_t *size, + unsigned long *flags) {} + static inline void kasan_poison_slab(struct page *page) {} static inline void kasan_unpoison_object_data(struct kmem_cache *cache, void *object) {} static inline void kasan_poison_object_data(struct kmem_cache *cache, void *object) {} -static inline void kasan_kmalloc_large(void *ptr, size_t size) {} +static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {} static inline void kasan_kfree_large(const void *ptr) {} static inline void kasan_kfree(void *ptr) {} static inline void kasan_kmalloc(struct kmem_cache *s, const void *object, - size_t size) {} -static inline void kasan_krealloc(const void *object, size_t new_size) {} + size_t size, gfp_t flags) {} +static inline void kasan_krealloc(const void *object, size_t new_size, + gfp_t flags) {} -static inline void kasan_slab_alloc(struct kmem_cache *s, void *object) {} +static inline void kasan_slab_alloc(struct kmem_cache *s, void *object, + gfp_t flags) {} static inline void kasan_slab_free(struct kmem_cache *s, void *object) {} static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } diff --git a/include/linux/mm.h b/include/linux/mm.h index 450fc977ed02..ed6407d1b7b5 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1132,6 +1132,8 @@ struct zap_details { struct address_space *check_mapping; /* Check page->mapping if set */ pgoff_t first_index; /* Lowest page->index to unmap */ pgoff_t last_index; /* Highest page->index to unmap */ + bool ignore_dirty; /* Ignore dirty pages */ + bool check_swap_entries; /* Check also swap entries */ }; struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, diff --git a/include/linux/ntb.h b/include/linux/ntb.h index f798e2afba88..6f47562d477b 100644 --- a/include/linux/ntb.h +++ b/include/linux/ntb.h @@ -284,7 +284,7 @@ static inline int ntb_dev_ops_is_valid(const struct ntb_dev_ops *ops) /* ops->db_read_mask && */ ops->db_set_mask && ops->db_clear_mask && - ops->peer_db_addr && + /* ops->peer_db_addr && */ /* ops->peer_db_read && */ ops->peer_db_set && /* ops->peer_db_clear && */ @@ -295,7 +295,7 @@ static inline int ntb_dev_ops_is_valid(const struct ntb_dev_ops *ops) ops->spad_count && ops->spad_read && ops->spad_write && - ops->peer_spad_addr && + /* ops->peer_spad_addr && */ /* ops->peer_spad_read && */ ops->peer_spad_write && 1; @@ -757,6 +757,9 @@ static inline int ntb_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr, resource_size_t *db_size) { + if (!ntb->ops->peer_db_addr) + return -EINVAL; + return ntb->ops->peer_db_addr(ntb, db_addr, db_size); } @@ -948,6 +951,9 @@ static inline int ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val) static inline int ntb_peer_spad_addr(struct ntb_dev *ntb, int idx, phys_addr_t *spad_addr) { + if (!ntb->ops->peer_spad_addr) + return -EINVAL; + return ntb->ops->peer_spad_addr(ntb, idx, spad_addr); } diff --git a/include/linux/oom.h b/include/linux/oom.h index 03e6257321f0..628a43242a34 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -76,8 +76,6 @@ extern unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, const nodemask_t *nodemask, unsigned long totalpages); -extern int oom_kills_count(void); -extern void note_oom_kill(void); extern void oom_kill_process(struct oom_control *oc, struct task_struct *p, unsigned int points, unsigned long totalpages, struct mem_cgroup *memcg, const char *message); @@ -91,7 +89,7 @@ extern enum oom_scan_t oom_scan_process_thread(struct oom_control *oc, extern bool out_of_memory(struct oom_control *oc); -extern void exit_oom_victim(void); +extern void exit_oom_victim(struct task_struct *tsk); extern int register_oom_notifier(struct notifier_block *nb); extern int unregister_oom_notifier(struct notifier_block *nb); diff --git a/include/linux/pm_clock.h b/include/linux/pm_clock.h index 25266c600021..308d6044f153 100644 --- a/include/linux/pm_clock.h +++ b/include/linux/pm_clock.h @@ -42,7 +42,9 @@ extern int pm_clk_create(struct device *dev); extern void pm_clk_destroy(struct device *dev); extern int pm_clk_add(struct device *dev, const char *con_id); extern int pm_clk_add_clk(struct device *dev, struct clk *clk); +extern int of_pm_clk_add_clks(struct device *dev); extern void pm_clk_remove(struct device *dev, const char *con_id); +extern void pm_clk_remove_clk(struct device *dev, struct clk *clk); extern int pm_clk_suspend(struct device *dev); extern int pm_clk_resume(struct device *dev); #else @@ -69,11 +71,18 @@ static inline int pm_clk_add_clk(struct device *dev, struct clk *clk) { return -EINVAL; } +static inline int of_pm_clk_add_clks(struct device *dev) +{ + return -EINVAL; +} static inline void pm_clk_remove(struct device *dev, const char *con_id) { } #define pm_clk_suspend NULL #define pm_clk_resume NULL +static inline void pm_clk_remove_clk(struct device *dev, struct clk *clk) +{ +} #endif #ifdef CONFIG_HAVE_CLK diff --git a/include/linux/sched.h b/include/linux/sched.h index 589c4780b077..60bba7e032dc 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -426,6 +426,7 @@ extern signed long schedule_timeout(signed long timeout); extern signed long schedule_timeout_interruptible(signed long timeout); extern signed long schedule_timeout_killable(signed long timeout); extern signed long schedule_timeout_uninterruptible(signed long timeout); +extern signed long schedule_timeout_idle(signed long timeout); asmlinkage void schedule(void); extern void schedule_preempt_disabled(void); @@ -1848,6 +1849,9 @@ struct task_struct { unsigned long task_state_change; #endif int pagefault_disabled; +#ifdef CONFIG_MMU + struct task_struct *oom_reaper_list; +#endif /* CPU-specific state of this task */ struct thread_struct thread; /* diff --git a/include/linux/slab.h b/include/linux/slab.h index e4b568738ca3..508bd827e6dc 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -92,6 +92,12 @@ # define SLAB_ACCOUNT 0x00000000UL #endif +#ifdef CONFIG_KASAN +#define SLAB_KASAN 0x08000000UL +#else +#define SLAB_KASAN 0x00000000UL +#endif + /* The following flags affect the page allocator grouping pages by mobility */ #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ @@ -370,7 +376,7 @@ static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s, { void *ret = kmem_cache_alloc(s, flags); - kasan_kmalloc(s, ret, size); + kasan_kmalloc(s, ret, size, flags); return ret; } @@ -381,7 +387,7 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s, { void *ret = kmem_cache_alloc_node(s, gfpflags, node); - kasan_kmalloc(s, ret, size); + kasan_kmalloc(s, ret, size, gfpflags); return ret; } #endif /* CONFIG_TRACING */ diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index e878ba35ae91..9edbbf352340 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -76,8 +76,22 @@ struct kmem_cache { #ifdef CONFIG_MEMCG struct memcg_cache_params memcg_params; #endif +#ifdef CONFIG_KASAN + struct kasan_cache kasan_info; +#endif struct kmem_cache_node *node[MAX_NUMNODES]; }; +static inline void *nearest_obj(struct kmem_cache *cache, struct page *page, + void *x) { + void *object = x - (x - page->s_mem) % cache->size; + void *last_object = page->s_mem + (cache->num - 1) * cache->size; + + if (unlikely(object > last_object)) + return last_object; + else + return object; +} + #endif /* _LINUX_SLAB_DEF_H */ diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index ac5143f95ee6..665cd0cd18b8 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -130,4 +130,15 @@ static inline void *virt_to_obj(struct kmem_cache *s, void object_err(struct kmem_cache *s, struct page *page, u8 *object, char *reason); +static inline void *nearest_obj(struct kmem_cache *cache, struct page *page, + void *x) { + void *object = x - (x - page_address(page)) % cache->size; + void *last_object = page_address(page) + + (page->objects - 1) * cache->size; + if (unlikely(object > last_object)) + return last_object; + else + return object; +} + #endif /* _LINUX_SLUB_DEF_H */ diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h new file mode 100644 index 000000000000..7978b3e2c1e1 --- /dev/null +++ b/include/linux/stackdepot.h @@ -0,0 +1,32 @@ +/* + * A generic stack depot implementation + * + * Author: Alexander Potapenko <[email protected]> + * Copyright (C) 2016 Google, Inc. + * + * Based on code by Dmitry Chernenkov. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_STACKDEPOT_H +#define _LINUX_STACKDEPOT_H + +typedef u32 depot_stack_handle_t; + +struct stack_trace; + +depot_stack_handle_t depot_save_stack(struct stack_trace *trace, gfp_t flags); + +void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace); + +#endif diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h index 784bc2c0929f..bf66ea6bed2b 100644 --- a/include/scsi/scsi_transport_fc.h +++ b/include/scsi/scsi_transport_fc.h @@ -28,6 +28,7 @@ #define SCSI_TRANSPORT_FC_H #include <linux/sched.h> +#include <asm/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_netlink.h> @@ -797,22 +798,12 @@ fc_remote_port_chkready(struct fc_rport *rport) static inline u64 wwn_to_u64(u8 *wwn) { - return (u64)wwn[0] << 56 | (u64)wwn[1] << 48 | - (u64)wwn[2] << 40 | (u64)wwn[3] << 32 | - (u64)wwn[4] << 24 | (u64)wwn[5] << 16 | - (u64)wwn[6] << 8 | (u64)wwn[7]; + return get_unaligned_be64(wwn); } static inline void u64_to_wwn(u64 inm, u8 *wwn) { - wwn[0] = (inm >> 56) & 0xff; - wwn[1] = (inm >> 48) & 0xff; - wwn[2] = (inm >> 40) & 0xff; - wwn[3] = (inm >> 32) & 0xff; - wwn[4] = (inm >> 24) & 0xff; - wwn[5] = (inm >> 16) & 0xff; - wwn[6] = (inm >> 8) & 0xff; - wwn[7] = inm & 0xff; + put_unaligned_be64(inm, wwn); } /** diff --git a/kernel/exit.c b/kernel/exit.c index 953d1a1c0387..fd90195667e1 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -435,7 +435,7 @@ static void exit_mm(struct task_struct *tsk) mm_update_next_owner(mm); mmput(mm); if (test_thread_flag(TIF_MEMDIE)) - exit_oom_victim(); + exit_oom_victim(tsk); } static struct task_struct *find_alive_thread(struct task_struct *p) diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index aa0f26b58426..fca9254280ee 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -339,6 +339,7 @@ int hibernation_snapshot(int platform_mode) pm_message_t msg; int error; + pm_suspend_clear_flags(); error = platform_begin(platform_mode); if (error) goto Close; diff --git a/kernel/softirq.c b/kernel/softirq.c index 8aae49dd7da8..17caf4b63342 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -227,7 +227,7 @@ static inline bool lockdep_softirq_start(void) { return false; } static inline void lockdep_softirq_end(bool in_hardirq) { } #endif -asmlinkage __visible void __do_softirq(void) +asmlinkage __visible void __softirq_entry __do_softirq(void) { unsigned long end = jiffies + MAX_SOFTIRQ_TIME; unsigned long old_flags = current->flags; diff --git a/kernel/time/timer.c b/kernel/time/timer.c index d1798fa0c743..73164c3aa56b 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -1566,6 +1566,17 @@ signed long __sched schedule_timeout_uninterruptible(signed long timeout) } EXPORT_SYMBOL(schedule_timeout_uninterruptible); +/* + * Like schedule_timeout_uninterruptible(), except this task will not contribute + * to load average. + */ +signed long __sched schedule_timeout_idle(signed long timeout) +{ + __set_current_state(TASK_IDLE); + return schedule_timeout(timeout); +} +EXPORT_SYMBOL(schedule_timeout_idle); + #ifdef CONFIG_HOTPLUG_CPU static void migrate_timer_list(struct tvec_base *new_base, struct hlist_head *head) { diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 91d6a63a2ea7..3a0244ff7ea8 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -8,6 +8,7 @@ */ #include <linux/uaccess.h> #include <linux/ftrace.h> +#include <linux/interrupt.h> #include <linux/slab.h> #include <linux/fs.h> diff --git a/lib/Kconfig b/lib/Kconfig index 133ebc0c1773..3cca1222578e 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -536,4 +536,8 @@ config ARCH_HAS_PMEM_API config ARCH_HAS_MMIO_FLUSH bool +config STACKDEPOT + bool + select STACKTRACE + endmenu diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan index 0fee5acd5aa0..67d8c6838ba9 100644 --- a/lib/Kconfig.kasan +++ b/lib/Kconfig.kasan @@ -5,8 +5,9 @@ if HAVE_ARCH_KASAN config KASAN bool "KASan: runtime memory debugger" - depends on SLUB_DEBUG + depends on SLUB_DEBUG || (SLAB && !DEBUG_SLAB) select CONSTRUCTORS + select STACKDEPOT if SLAB help Enables kernel address sanitizer - runtime memory debugger, designed to find out-of-bounds accesses and use-after-free bugs. @@ -16,6 +17,8 @@ config KASAN This feature consumes about 1/8 of available memory and brings about ~x3 performance slowdown. For better error detection enable CONFIG_STACKTRACE. + Currently CONFIG_KASAN doesn't work with CONFIG_DEBUG_SLAB + (the resulting kernel does not boot). choice prompt "Instrumentation type" diff --git a/lib/Makefile b/lib/Makefile index a1de5b61ff40..7bd6fd436c97 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -181,6 +181,9 @@ obj-$(CONFIG_SG_SPLIT) += sg_split.o obj-$(CONFIG_STMP_DEVICE) += stmp_device.o obj-$(CONFIG_IRQ_POLL) += irq_poll.o +obj-$(CONFIG_STACKDEPOT) += stackdepot.o +KASAN_SANITIZE_stackdepot.o := n + libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \ fdt_empty_tree.o $(foreach file, $(libfdt_files), \ diff --git a/lib/stackdepot.c b/lib/stackdepot.c new file mode 100644 index 000000000000..654c9d87e83a --- /dev/null +++ b/lib/stackdepot.c @@ -0,0 +1,284 @@ +/* + * Generic stack depot for storing stack traces. + * + * Some debugging tools need to save stack traces of certain events which can + * be later presented to the user. For example, KASAN needs to safe alloc and + * free stacks for each object, but storing two stack traces per object + * requires too much memory (e.g. SLUB_DEBUG needs 256 bytes per object for + * that). + * + * Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc + * and free stacks repeat a lot, we save about 100x space. + * Stacks are never removed from depot, so we store them contiguously one after + * another in a contiguos memory allocation. + * + * Author: Alexander Potapenko <[email protected]> + * Copyright (C) 2016 Google, Inc. + * + * Based on code by Dmitry Chernenkov. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + */ + +#include <linux/gfp.h> +#include <linux/jhash.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/percpu.h> +#include <linux/printk.h> +#include <linux/slab.h> +#include <linux/stacktrace.h> +#include <linux/stackdepot.h> +#include <linux/string.h> +#include <linux/types.h> + +#define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8) + +#define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */ +#define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER)) +#define STACK_ALLOC_ALIGN 4 +#define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \ + STACK_ALLOC_ALIGN) +#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - STACK_ALLOC_OFFSET_BITS) +#define STACK_ALLOC_SLABS_CAP 1024 +#define STACK_ALLOC_MAX_SLABS \ + (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \ + (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP) + +/* The compact structure to store the reference to stacks. */ +union handle_parts { + depot_stack_handle_t handle; + struct { + u32 slabindex : STACK_ALLOC_INDEX_BITS; + u32 offset : STACK_ALLOC_OFFSET_BITS; + }; +}; + +struct stack_record { + struct stack_record *next; /* Link in the hashtable */ + u32 hash; /* Hash in the hastable */ + u32 size; /* Number of frames in the stack */ + union handle_parts handle; + unsigned long entries[1]; /* Variable-sized array of entries. */ +}; + +static void *stack_slabs[STACK_ALLOC_MAX_SLABS]; + +static int depot_index; +static int next_slab_inited; +static size_t depot_offset; +static DEFINE_SPINLOCK(depot_lock); + +static bool init_stack_slab(void **prealloc) +{ + if (!*prealloc) + return false; + /* + * This smp_load_acquire() pairs with smp_store_release() to + * |next_slab_inited| below and in depot_alloc_stack(). + */ + if (smp_load_acquire(&next_slab_inited)) + return true; + if (stack_slabs[depot_index] == NULL) { + stack_slabs[depot_index] = *prealloc; + } else { + stack_slabs[depot_index + 1] = *prealloc; + /* + * This smp_store_release pairs with smp_load_acquire() from + * |next_slab_inited| above and in depot_save_stack(). + */ + smp_store_release(&next_slab_inited, 1); + } + *prealloc = NULL; + return true; +} + +/* Allocation of a new stack in raw storage */ +static struct stack_record *depot_alloc_stack(unsigned long *entries, int size, + u32 hash, void **prealloc, gfp_t alloc_flags) +{ + int required_size = offsetof(struct stack_record, entries) + + sizeof(unsigned long) * size; + struct stack_record *stack; + + required_size = ALIGN(required_size, 1 << STACK_ALLOC_ALIGN); + + if (unlikely(depot_offset + required_size > STACK_ALLOC_SIZE)) { + if (unlikely(depot_index + 1 >= STACK_ALLOC_MAX_SLABS)) { + WARN_ONCE(1, "Stack depot reached limit capacity"); + return NULL; + } + depot_index++; + depot_offset = 0; + /* + * smp_store_release() here pairs with smp_load_acquire() from + * |next_slab_inited| in depot_save_stack() and + * init_stack_slab(). + */ + if (depot_index + 1 < STACK_ALLOC_MAX_SLABS) + smp_store_release(&next_slab_inited, 0); + } + init_stack_slab(prealloc); + if (stack_slabs[depot_index] == NULL) + return NULL; + + stack = stack_slabs[depot_index] + depot_offset; + + stack->hash = hash; + stack->size = size; + stack->handle.slabindex = depot_index; + stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN; + memcpy(stack->entries, entries, size * sizeof(unsigned long)); + depot_offset += required_size; + + return stack; +} + +#define STACK_HASH_ORDER 20 +#define STACK_HASH_SIZE (1L << STACK_HASH_ORDER) +#define STACK_HASH_MASK (STACK_HASH_SIZE - 1) +#define STACK_HASH_SEED 0x9747b28c + +static struct stack_record *stack_table[STACK_HASH_SIZE] = { + [0 ... STACK_HASH_SIZE - 1] = NULL +}; + +/* Calculate hash for a stack */ +static inline u32 hash_stack(unsigned long *entries, unsigned int size) +{ + return jhash2((u32 *)entries, + size * sizeof(unsigned long) / sizeof(u32), + STACK_HASH_SEED); +} + +/* Find a stack that is equal to the one stored in entries in the hash */ +static inline struct stack_record *find_stack(struct stack_record *bucket, + unsigned long *entries, int size, + u32 hash) +{ + struct stack_record *found; + + for (found = bucket; found; found = found->next) { + if (found->hash == hash && + found->size == size && + !memcmp(entries, found->entries, + size * sizeof(unsigned long))) { + return found; + } + } + return NULL; +} + +void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace) +{ + union handle_parts parts = { .handle = handle }; + void *slab = stack_slabs[parts.slabindex]; + size_t offset = parts.offset << STACK_ALLOC_ALIGN; + struct stack_record *stack = slab + offset; + + trace->nr_entries = trace->max_entries = stack->size; + trace->entries = stack->entries; + trace->skip = 0; +} + +/** + * depot_save_stack - save stack in a stack depot. + * @trace - the stacktrace to save. + * @alloc_flags - flags for allocating additional memory if required. + * + * Returns the handle of the stack struct stored in depot. + */ +depot_stack_handle_t depot_save_stack(struct stack_trace *trace, + gfp_t alloc_flags) +{ + u32 hash; + depot_stack_handle_t retval = 0; + struct stack_record *found = NULL, **bucket; + unsigned long flags; + struct page *page = NULL; + void *prealloc = NULL; + + if (unlikely(trace->nr_entries == 0)) + goto fast_exit; + + hash = hash_stack(trace->entries, trace->nr_entries); + /* Bad luck, we won't store this stack. */ + if (hash == 0) + goto exit; + + bucket = &stack_table[hash & STACK_HASH_MASK]; + + /* + * Fast path: look the stack trace up without locking. + * The smp_load_acquire() here pairs with smp_store_release() to + * |bucket| below. + */ + found = find_stack(smp_load_acquire(bucket), trace->entries, + trace->nr_entries, hash); + if (found) + goto exit; + + /* + * Check if the current or the next stack slab need to be initialized. + * If so, allocate the memory - we won't be able to do that under the + * lock. + * + * The smp_load_acquire() here pairs with smp_store_release() to + * |next_slab_inited| in depot_alloc_stack() and init_stack_slab(). + */ + if (unlikely(!smp_load_acquire(&next_slab_inited))) { + /* + * Zero out zone modifiers, as we don't have specific zone + * requirements. Keep the flags related to allocation in atomic + * contexts and I/O. + */ + alloc_flags &= ~GFP_ZONEMASK; + alloc_flags &= (GFP_ATOMIC | GFP_KERNEL); + page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER); + if (page) + prealloc = page_address(page); + } + + spin_lock_irqsave(&depot_lock, flags); + + found = find_stack(*bucket, trace->entries, trace->nr_entries, hash); + if (!found) { + struct stack_record *new = + depot_alloc_stack(trace->entries, trace->nr_entries, + hash, &prealloc, alloc_flags); + if (new) { + new->next = *bucket; + /* + * This smp_store_release() pairs with + * smp_load_acquire() from |bucket| above. + */ + smp_store_release(bucket, new); + found = new; + } + } else if (prealloc) { + /* + * We didn't need to store this stack trace, but let's keep + * the preallocated memory for the future. + */ + WARN_ON(!init_stack_slab(&prealloc)); + } + + spin_unlock_irqrestore(&depot_lock, flags); +exit: + if (prealloc) { + /* Nobody used this memory, ok to free it. */ + free_pages((unsigned long)prealloc, STACK_ALLOC_ORDER); + } + if (found) + retval = found->handle.handle; +fast_exit: + return retval; +} diff --git a/lib/test_kasan.c b/lib/test_kasan.c index c32f3b0048dc..82169fbf2453 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c @@ -65,11 +65,34 @@ static noinline void __init kmalloc_node_oob_right(void) kfree(ptr); } -static noinline void __init kmalloc_large_oob_right(void) +#ifdef CONFIG_SLUB +static noinline void __init kmalloc_pagealloc_oob_right(void) { char *ptr; size_t size = KMALLOC_MAX_CACHE_SIZE + 10; + /* Allocate a chunk that does not fit into a SLUB cache to trigger + * the page allocator fallback. + */ + pr_info("kmalloc pagealloc allocation: out-of-bounds to right\n"); + ptr = kmalloc(size, GFP_KERNEL); + if (!ptr) { + pr_err("Allocation failed\n"); + return; + } + + ptr[size] = 0; + kfree(ptr); +} +#endif + +static noinline void __init kmalloc_large_oob_right(void) +{ + char *ptr; + size_t size = KMALLOC_MAX_CACHE_SIZE - 256; + /* Allocate a chunk that is large enough, but still fits into a slab + * and does not trigger the page allocator fallback in SLUB. + */ pr_info("kmalloc large allocation: out-of-bounds to right\n"); ptr = kmalloc(size, GFP_KERNEL); if (!ptr) { @@ -271,6 +294,8 @@ static noinline void __init kmalloc_uaf2(void) } ptr1[40] = 'x'; + if (ptr1 == ptr2) + pr_err("Could not detect use-after-free: ptr1 == ptr2\n"); kfree(ptr2); } @@ -324,6 +349,9 @@ static int __init kmalloc_tests_init(void) kmalloc_oob_right(); kmalloc_oob_left(); kmalloc_node_oob_right(); +#ifdef CONFIG_SLUB + kmalloc_pagealloc_oob_right(); +#endif kmalloc_large_oob_right(); kmalloc_oob_krealloc_more(); kmalloc_oob_krealloc_less(); diff --git a/mm/Makefile b/mm/Makefile index f5e797cbd128..deb467edca2d 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -3,6 +3,7 @@ # KASAN_SANITIZE_slab_common.o := n +KASAN_SANITIZE_slab.o := n KASAN_SANITIZE_slub.o := n # These files are disabled because they produce non-interesting and/or diff --git a/mm/filemap.c b/mm/filemap.c index 7c00f105845e..a8c69c8c0a90 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1840,15 +1840,16 @@ generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) ssize_t retval = 0; loff_t *ppos = &iocb->ki_pos; loff_t pos = *ppos; + size_t count = iov_iter_count(iter); + + if (!count) + goto out; /* skip atime */ if (iocb->ki_flags & IOCB_DIRECT) { struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; - size_t count = iov_iter_count(iter); loff_t size; - if (!count) - goto out; /* skip atime */ size = i_size_read(inode); retval = filemap_write_and_wait_range(mapping, pos, pos + count - 1); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index fbfb1b8d6726..86f9f8b82f8e 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2578,7 +2578,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, } khugepaged_node_load[node]++; if (!PageLRU(page)) { - result = SCAN_SCAN_ABORT; + result = SCAN_PAGE_LRU; goto out_unmap; } if (PageLocked(page)) { diff --git a/mm/internal.h b/mm/internal.h index 7449392c6faa..b79abb6721cf 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -38,6 +38,11 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, unsigned long floor, unsigned long ceiling); +void unmap_page_range(struct mmu_gather *tlb, + struct vm_area_struct *vma, + unsigned long addr, unsigned long end, + struct zap_details *details); + extern int __do_page_cache_readahead(struct address_space *mapping, struct file *filp, pgoff_t offset, unsigned long nr_to_read, unsigned long lookahead_size); diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index 1ad20ade8c91..acb3b6c4dd89 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c @@ -17,7 +17,9 @@ #define DISABLE_BRANCH_PROFILING #include <linux/export.h> +#include <linux/interrupt.h> #include <linux/init.h> +#include <linux/kasan.h> #include <linux/kernel.h> #include <linux/kmemleak.h> #include <linux/linkage.h> @@ -32,7 +34,6 @@ #include <linux/string.h> #include <linux/types.h> #include <linux/vmalloc.h> -#include <linux/kasan.h> #include "kasan.h" #include "../slab.h" @@ -334,6 +335,59 @@ void kasan_free_pages(struct page *page, unsigned int order) KASAN_FREE_PAGE); } +#ifdef CONFIG_SLAB +/* + * Adaptive redzone policy taken from the userspace AddressSanitizer runtime. + * For larger allocations larger redzones are used. + */ +static size_t optimal_redzone(size_t object_size) +{ + int rz = + object_size <= 64 - 16 ? 16 : + object_size <= 128 - 32 ? 32 : + object_size <= 512 - 64 ? 64 : + object_size <= 4096 - 128 ? 128 : + object_size <= (1 << 14) - 256 ? 256 : + object_size <= (1 << 15) - 512 ? 512 : + object_size <= (1 << 16) - 1024 ? 1024 : 2048; + return rz; +} + +void kasan_cache_create(struct kmem_cache *cache, size_t *size, + unsigned long *flags) +{ + int redzone_adjust; + /* Make sure the adjusted size is still less than + * KMALLOC_MAX_CACHE_SIZE. + * TODO: this check is only useful for SLAB, but not SLUB. We'll need + * to skip it for SLUB when it starts using kasan_cache_create(). + */ + if (*size > KMALLOC_MAX_CACHE_SIZE - + sizeof(struct kasan_alloc_meta) - + sizeof(struct kasan_free_meta)) + return; + *flags |= SLAB_KASAN; + /* Add alloc meta. */ + cache->kasan_info.alloc_meta_offset = *size; + *size += sizeof(struct kasan_alloc_meta); + + /* Add free meta. */ + if (cache->flags & SLAB_DESTROY_BY_RCU || cache->ctor || + cache->object_size < sizeof(struct kasan_free_meta)) { + cache->kasan_info.free_meta_offset = *size; + *size += sizeof(struct kasan_free_meta); + } + redzone_adjust = optimal_redzone(cache->object_size) - + (*size - cache->object_size); + if (redzone_adjust > 0) + *size += redzone_adjust; + *size = min(KMALLOC_MAX_CACHE_SIZE, + max(*size, + cache->object_size + + optimal_redzone(cache->object_size))); +} +#endif + void kasan_poison_slab(struct page *page) { kasan_poison_shadow(page_address(page), @@ -351,11 +405,81 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object) kasan_poison_shadow(object, round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE), KASAN_KMALLOC_REDZONE); +#ifdef CONFIG_SLAB + if (cache->flags & SLAB_KASAN) { + struct kasan_alloc_meta *alloc_info = + get_alloc_info(cache, object); + alloc_info->state = KASAN_STATE_INIT; + } +#endif } -void kasan_slab_alloc(struct kmem_cache *cache, void *object) +#ifdef CONFIG_SLAB +static inline int in_irqentry_text(unsigned long ptr) { - kasan_kmalloc(cache, object, cache->object_size); + return (ptr >= (unsigned long)&__irqentry_text_start && + ptr < (unsigned long)&__irqentry_text_end) || + (ptr >= (unsigned long)&__softirqentry_text_start && + ptr < (unsigned long)&__softirqentry_text_end); +} + +static inline void filter_irq_stacks(struct stack_trace *trace) +{ + int i; + + if (!trace->nr_entries) + return; + for (i = 0; i < trace->nr_entries; i++) + if (in_irqentry_text(trace->entries[i])) { + /* Include the irqentry function into the stack. */ + trace->nr_entries = i + 1; + break; + } +} + +static inline depot_stack_handle_t save_stack(gfp_t flags) +{ + unsigned long entries[KASAN_STACK_DEPTH]; + struct stack_trace trace = { + .nr_entries = 0, + .entries = entries, + .max_entries = KASAN_STACK_DEPTH, + .skip = 0 + }; + + save_stack_trace(&trace); + filter_irq_stacks(&trace); + if (trace.nr_entries != 0 && + trace.entries[trace.nr_entries-1] == ULONG_MAX) + trace.nr_entries--; + + return depot_save_stack(&trace, flags); +} + +static inline void set_track(struct kasan_track *track, gfp_t flags) +{ + track->pid = current->pid; + track->stack = save_stack(flags); +} + +struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache, + const void *object) +{ + BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32); + return (void *)object + cache->kasan_info.alloc_meta_offset; +} + +struct kasan_free_meta *get_free_info(struct kmem_cache *cache, + const void *object) +{ + BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32); + return (void *)object + cache->kasan_info.free_meta_offset; +} +#endif + +void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags) +{ + kasan_kmalloc(cache, object, cache->object_size, flags); } void kasan_slab_free(struct kmem_cache *cache, void *object) @@ -367,10 +491,22 @@ void kasan_slab_free(struct kmem_cache *cache, void *object) if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU)) return; +#ifdef CONFIG_SLAB + if (cache->flags & SLAB_KASAN) { + struct kasan_free_meta *free_info = + get_free_info(cache, object); + struct kasan_alloc_meta *alloc_info = + get_alloc_info(cache, object); + alloc_info->state = KASAN_STATE_FREE; + set_track(&free_info->track); + } +#endif + kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE); } -void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size) +void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size, + gfp_t flags) { unsigned long redzone_start; unsigned long redzone_end; @@ -386,10 +522,20 @@ void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size) kasan_unpoison_shadow(object, size); kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, KASAN_KMALLOC_REDZONE); +#ifdef CONFIG_SLAB + if (cache->flags & SLAB_KASAN) { + struct kasan_alloc_meta *alloc_info = + get_alloc_info(cache, object); + + alloc_info->state = KASAN_STATE_ALLOC; + alloc_info->alloc_size = size; + set_track(&alloc_info->track, flags); + } +#endif } EXPORT_SYMBOL(kasan_kmalloc); -void kasan_kmalloc_large(const void *ptr, size_t size) +void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) { struct page *page; unsigned long redzone_start; @@ -408,7 +554,7 @@ void kasan_kmalloc_large(const void *ptr, size_t size) KASAN_PAGE_REDZONE); } -void kasan_krealloc(const void *object, size_t size) +void kasan_krealloc(const void *object, size_t size, gfp_t flags) { struct page *page; @@ -418,9 +564,9 @@ void kasan_krealloc(const void *object, size_t size) page = virt_to_head_page(object); if (unlikely(!PageSlab(page))) - kasan_kmalloc_large(object, size); + kasan_kmalloc_large(object, size, flags); else - kasan_kmalloc(page->slab_cache, object, size); + kasan_kmalloc(page->slab_cache, object, size, flags); } void kasan_kfree(void *ptr) diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 4f6c62e5c21e..30a2f0ba0e09 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -2,6 +2,7 @@ #define __MM_KASAN_KASAN_H #include <linux/kasan.h> +#include <linux/stackdepot.h> #define KASAN_SHADOW_SCALE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT) #define KASAN_SHADOW_MASK (KASAN_SHADOW_SCALE_SIZE - 1) @@ -54,6 +55,42 @@ struct kasan_global { #endif }; +/** + * Structures to keep alloc and free tracks * + */ + +enum kasan_state { + KASAN_STATE_INIT, + KASAN_STATE_ALLOC, + KASAN_STATE_FREE +}; + +#define KASAN_STACK_DEPTH 64 + +struct kasan_track { + u32 pid; + depot_stack_handle_t stack; +}; + +struct kasan_alloc_meta { + struct kasan_track track; + u32 state : 2; /* enum kasan_state */ + u32 alloc_size : 30; + u32 reserved; +}; + +struct kasan_free_meta { + /* Allocator freelist pointer, unused by KASAN. */ + void **freelist; + struct kasan_track track; +}; + +struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache, + const void *object); +struct kasan_free_meta *get_free_info(struct kmem_cache *cache, + const void *object); + + static inline const void *kasan_shadow_to_mem(const void *shadow_addr) { return (void *)(((unsigned long)shadow_addr - KASAN_SHADOW_OFFSET) diff --git a/mm/kasan/report.c b/mm/kasan/report.c index 745aa8f36028..60869a5a0124 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -18,6 +18,7 @@ #include <linux/printk.h> #include <linux/sched.h> #include <linux/slab.h> +#include <linux/stackdepot.h> #include <linux/stacktrace.h> #include <linux/string.h> #include <linux/types.h> @@ -115,6 +116,53 @@ static inline bool init_task_stack_addr(const void *addr) sizeof(init_thread_union.stack)); } +#ifdef CONFIG_SLAB +static void print_track(struct kasan_track *track) +{ + pr_err("PID = %u\n", track->pid); + if (track->stack) { + struct stack_trace trace; + + depot_fetch_stack(track->stack, &trace); + print_stack_trace(&trace, 0); + } else { + pr_err("(stack is not available)\n"); + } +} + +static void object_err(struct kmem_cache *cache, struct page *page, + void *object, char *unused_reason) +{ + struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object); + struct kasan_free_meta *free_info; + + dump_stack(); + pr_err("Object at %p, in cache %s\n", object, cache->name); + if (!(cache->flags & SLAB_KASAN)) + return; + switch (alloc_info->state) { + case KASAN_STATE_INIT: + pr_err("Object not allocated yet\n"); + break; + case KASAN_STATE_ALLOC: + pr_err("Object allocated with size %u bytes.\n", + alloc_info->alloc_size); + pr_err("Allocation:\n"); + print_track(&alloc_info->track); + break; + case KASAN_STATE_FREE: + pr_err("Object freed, allocated with size %u bytes\n", + alloc_info->alloc_size); + free_info = get_free_info(cache, object); + pr_err("Allocation:\n"); + print_track(&alloc_info->track); + pr_err("Deallocation:\n"); + print_track(&free_info->track); + break; + } +} +#endif + static void print_address_description(struct kasan_access_info *info) { const void *addr = info->access_addr; @@ -126,17 +174,10 @@ static void print_address_description(struct kasan_access_info *info) if (PageSlab(page)) { void *object; struct kmem_cache *cache = page->slab_cache; - void *last_object; - - object = virt_to_obj(cache, page_address(page), addr); - last_object = page_address(page) + - page->objects * cache->size; - - if (unlikely(object > last_object)) - object = last_object; /* we hit into padding */ - + object = nearest_obj(cache, page, + (void *)info->access_addr); object_err(cache, page, object, - "kasan: bad access detected"); + "kasan: bad access detected"); return; } dump_page(page, "kasan: bad access detected"); @@ -146,7 +187,6 @@ static void print_address_description(struct kasan_access_info *info) if (!init_task_stack_addr(addr)) pr_err("Address belongs to variable %pS\n", addr); } - dump_stack(); } diff --git a/mm/memory.c b/mm/memory.c index 81dca0083fcd..098f00d05461 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1102,6 +1102,12 @@ again: if (!PageAnon(page)) { if (pte_dirty(ptent)) { + /* + * oom_reaper cannot tear down dirty + * pages + */ + if (unlikely(details && details->ignore_dirty)) + continue; force_flush = 1; set_page_dirty(page); } @@ -1120,8 +1126,8 @@ again: } continue; } - /* If details->check_mapping, we leave swap entries. */ - if (unlikely(details)) + /* only check swap_entries if explicitly asked for in details */ + if (unlikely(details && !details->check_swap_entries)) continue; entry = pte_to_swp_entry(ptent); @@ -1226,7 +1232,7 @@ static inline unsigned long zap_pud_range(struct mmu_gather *tlb, return addr; } -static void unmap_page_range(struct mmu_gather *tlb, +void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long addr, unsigned long end, struct zap_details *details) @@ -1234,9 +1240,6 @@ static void unmap_page_range(struct mmu_gather *tlb, pgd_t *pgd; unsigned long next; - if (details && !details->check_mapping) - details = NULL; - BUG_ON(addr >= end); tlb_start_vma(tlb, vma); pgd = pgd_offset(vma->vm_mm, addr); @@ -2432,7 +2435,7 @@ static inline void unmap_mapping_range_tree(struct rb_root *root, void unmap_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen, int even_cows) { - struct zap_details details; + struct zap_details details = { }; pgoff_t hba = holebegin >> PAGE_SHIFT; pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; diff --git a/mm/mempool.c b/mm/mempool.c index 07c383ddbbab..9b7a14a791cc 100644 --- a/mm/mempool.c +++ b/mm/mempool.c @@ -112,12 +112,12 @@ static void kasan_poison_element(mempool_t *pool, void *element) kasan_free_pages(element, (unsigned long)pool->pool_data); } -static void kasan_unpoison_element(mempool_t *pool, void *element) +static void kasan_unpoison_element(mempool_t *pool, void *element, gfp_t flags) { if (pool->alloc == mempool_alloc_slab) - kasan_slab_alloc(pool->pool_data, element); + kasan_slab_alloc(pool->pool_data, element, flags); if (pool->alloc == mempool_kmalloc) - kasan_krealloc(element, (size_t)pool->pool_data); + kasan_krealloc(element, (size_t)pool->pool_data, flags); if (pool->alloc == mempool_alloc_pages) kasan_alloc_pages(element, (unsigned long)pool->pool_data); } @@ -130,12 +130,12 @@ static void add_element(mempool_t *pool, void *element) pool->elements[pool->curr_nr++] = element; } -static void *remove_element(mempool_t *pool) +static void *remove_element(mempool_t *pool, gfp_t flags) { void *element = pool->elements[--pool->curr_nr]; BUG_ON(pool->curr_nr < 0); - kasan_unpoison_element(pool, element); + kasan_unpoison_element(pool, element, flags); check_element(pool, element); return element; } @@ -154,7 +154,7 @@ void mempool_destroy(mempool_t *pool) return; while (pool->curr_nr) { - void *element = remove_element(pool); + void *element = remove_element(pool, GFP_KERNEL); pool->free(element, pool->pool_data); } kfree(pool->elements); @@ -250,7 +250,7 @@ int mempool_resize(mempool_t *pool, int new_min_nr) spin_lock_irqsave(&pool->lock, flags); if (new_min_nr <= pool->min_nr) { while (new_min_nr < pool->curr_nr) { - element = remove_element(pool); + element = remove_element(pool, GFP_KERNEL); spin_unlock_irqrestore(&pool->lock, flags); pool->free(element, pool->pool_data); spin_lock_irqsave(&pool->lock, flags); @@ -347,7 +347,7 @@ repeat_alloc: spin_lock_irqsave(&pool->lock, flags); if (likely(pool->curr_nr)) { - element = remove_element(pool); + element = remove_element(pool, gfp_temp); spin_unlock_irqrestore(&pool->lock, flags); /* paired with rmb in mempool_free(), read comment there */ smp_wmb(); diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 06f7e1707847..b34d279a7ee6 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -35,6 +35,11 @@ #include <linux/freezer.h> #include <linux/ftrace.h> #include <linux/ratelimit.h> +#include <linux/kthread.h> +#include <linux/init.h> + +#include <asm/tlb.h> +#include "internal.h" #define CREATE_TRACE_POINTS #include <trace/events/oom.h> @@ -405,6 +410,172 @@ static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait); bool oom_killer_disabled __read_mostly; +#define K(x) ((x) << (PAGE_SHIFT-10)) + +#ifdef CONFIG_MMU +/* + * OOM Reaper kernel thread which tries to reap the memory used by the OOM + * victim (if that is possible) to help the OOM killer to move on. + */ +static struct task_struct *oom_reaper_th; +static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait); +static struct task_struct *oom_reaper_list; +static DEFINE_SPINLOCK(oom_reaper_lock); + + +static bool __oom_reap_task(struct task_struct *tsk) +{ + struct mmu_gather tlb; + struct vm_area_struct *vma; + struct mm_struct *mm; + struct task_struct *p; + struct zap_details details = {.check_swap_entries = true, + .ignore_dirty = true}; + bool ret = true; + + /* + * Make sure we find the associated mm_struct even when the particular + * thread has already terminated and cleared its mm. + * We might have race with exit path so consider our work done if there + * is no mm. + */ + p = find_lock_task_mm(tsk); + if (!p) + return true; + + mm = p->mm; + if (!atomic_inc_not_zero(&mm->mm_users)) { + task_unlock(p); + return true; + } + + task_unlock(p); + + if (!down_read_trylock(&mm->mmap_sem)) { + ret = false; + goto out; + } + + tlb_gather_mmu(&tlb, mm, 0, -1); + for (vma = mm->mmap ; vma; vma = vma->vm_next) { + if (is_vm_hugetlb_page(vma)) + continue; + + /* + * mlocked VMAs require explicit munlocking before unmap. + * Let's keep it simple here and skip such VMAs. + */ + if (vma->vm_flags & VM_LOCKED) + continue; + + /* + * Only anonymous pages have a good chance to be dropped + * without additional steps which we cannot afford as we + * are OOM already. + * + * We do not even care about fs backed pages because all + * which are reclaimable have already been reclaimed and + * we do not want to block exit_mmap by keeping mm ref + * count elevated without a good reason. + */ + if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) + unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end, + &details); + } + tlb_finish_mmu(&tlb, 0, -1); + pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n", + task_pid_nr(tsk), tsk->comm, + K(get_mm_counter(mm, MM_ANONPAGES)), + K(get_mm_counter(mm, MM_FILEPAGES)), + K(get_mm_counter(mm, MM_SHMEMPAGES))); + up_read(&mm->mmap_sem); + + /* + * Clear TIF_MEMDIE because the task shouldn't be sitting on a + * reasonably reclaimable memory anymore. OOM killer can continue + * by selecting other victim if unmapping hasn't led to any + * improvements. This also means that selecting this task doesn't + * make any sense. + */ + tsk->signal->oom_score_adj = OOM_SCORE_ADJ_MIN; + exit_oom_victim(tsk); +out: + mmput(mm); + return ret; +} + +#define MAX_OOM_REAP_RETRIES 10 +static void oom_reap_task(struct task_struct *tsk) +{ + int attempts = 0; + + /* Retry the down_read_trylock(mmap_sem) a few times */ + while (attempts++ < MAX_OOM_REAP_RETRIES && !__oom_reap_task(tsk)) + schedule_timeout_idle(HZ/10); + + if (attempts > MAX_OOM_REAP_RETRIES) { + pr_info("oom_reaper: unable to reap pid:%d (%s)\n", + task_pid_nr(tsk), tsk->comm); + debug_show_all_locks(); + } + + /* Drop a reference taken by wake_oom_reaper */ + put_task_struct(tsk); +} + +static int oom_reaper(void *unused) +{ + set_freezable(); + + while (true) { + struct task_struct *tsk = NULL; + + wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL); + spin_lock(&oom_reaper_lock); + if (oom_reaper_list != NULL) { + tsk = oom_reaper_list; + oom_reaper_list = tsk->oom_reaper_list; + } + spin_unlock(&oom_reaper_lock); + + if (tsk) + oom_reap_task(tsk); + } + + return 0; +} + +static void wake_oom_reaper(struct task_struct *tsk) +{ + if (!oom_reaper_th || tsk->oom_reaper_list) + return; + + get_task_struct(tsk); + + spin_lock(&oom_reaper_lock); + tsk->oom_reaper_list = oom_reaper_list; + oom_reaper_list = tsk; + spin_unlock(&oom_reaper_lock); + wake_up(&oom_reaper_wait); +} + +static int __init oom_init(void) +{ + oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper"); + if (IS_ERR(oom_reaper_th)) { + pr_err("Unable to start OOM reaper %ld. Continuing regardless\n", + PTR_ERR(oom_reaper_th)); + oom_reaper_th = NULL; + } + return 0; +} +subsys_initcall(oom_init) +#else +static void wake_oom_reaper(struct task_struct *tsk) +{ +} +#endif + /** * mark_oom_victim - mark the given task as OOM victim * @tsk: task to mark @@ -431,9 +602,10 @@ void mark_oom_victim(struct task_struct *tsk) /** * exit_oom_victim - note the exit of an OOM victim */ -void exit_oom_victim(void) +void exit_oom_victim(struct task_struct *tsk) { - clear_thread_flag(TIF_MEMDIE); + if (!test_and_clear_tsk_thread_flag(tsk, TIF_MEMDIE)) + return; if (!atomic_dec_return(&oom_victims)) wake_up_all(&oom_victims_wait); @@ -494,7 +666,6 @@ static bool process_shares_mm(struct task_struct *p, struct mm_struct *mm) return false; } -#define K(x) ((x) << (PAGE_SHIFT-10)) /* * Must be called while holding a reference to p, which will be released upon * returning. @@ -510,6 +681,7 @@ void oom_kill_process(struct oom_control *oc, struct task_struct *p, unsigned int victim_points = 0; static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); + bool can_oom_reap = true; /* * If the task is already exiting, don't alarm the sysadmin or kill @@ -600,17 +772,23 @@ void oom_kill_process(struct oom_control *oc, struct task_struct *p, continue; if (same_thread_group(p, victim)) continue; - if (unlikely(p->flags & PF_KTHREAD)) - continue; - if (is_global_init(p)) - continue; - if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) + if (unlikely(p->flags & PF_KTHREAD) || is_global_init(p) || + p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) { + /* + * We cannot use oom_reaper for the mm shared by this + * process because it wouldn't get killed and so the + * memory might be still used. + */ + can_oom_reap = false; continue; - + } do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, true); } rcu_read_unlock(); + if (can_oom_reap) + wake_oom_reaper(victim); + mmdrop(mm); put_task_struct(victim); } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a762be57e46e..59de90d5d3a3 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -692,34 +692,28 @@ static inline void __free_one_page(struct page *page, unsigned long combined_idx; unsigned long uninitialized_var(buddy_idx); struct page *buddy; - unsigned int max_order = MAX_ORDER; + unsigned int max_order; + + max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1); VM_BUG_ON(!zone_is_initialized(zone)); VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page); VM_BUG_ON(migratetype == -1); - if (is_migrate_isolate(migratetype)) { - /* - * We restrict max order of merging to prevent merge - * between freepages on isolate pageblock and normal - * pageblock. Without this, pageblock isolation - * could cause incorrect freepage accounting. - */ - max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1); - } else { + if (likely(!is_migrate_isolate(migratetype))) __mod_zone_freepage_state(zone, 1 << order, migratetype); - } - page_idx = pfn & ((1 << max_order) - 1); + page_idx = pfn & ((1 << MAX_ORDER) - 1); VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page); VM_BUG_ON_PAGE(bad_range(zone, page), page); +continue_merging: while (order < max_order - 1) { buddy_idx = __find_buddy_index(page_idx, order); buddy = page + (buddy_idx - page_idx); if (!page_is_buddy(page, buddy, order)) - break; + goto done_merging; /* * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, * merge with it and move up one order. @@ -736,6 +730,32 @@ static inline void __free_one_page(struct page *page, page_idx = combined_idx; order++; } + if (max_order < MAX_ORDER) { + /* If we are here, it means order is >= pageblock_order. + * We want to prevent merge between freepages on isolate + * pageblock and normal pageblock. Without this, pageblock + * isolation could cause incorrect freepage or CMA accounting. + * + * We don't want to hit this code for the more frequent + * low-order merging. + */ + if (unlikely(has_isolate_pageblock(zone))) { + int buddy_mt; + + buddy_idx = __find_buddy_index(page_idx, order); + buddy = page + (buddy_idx - page_idx); + buddy_mt = get_pageblock_migratetype(buddy); + + if (migratetype != buddy_mt + && (is_migrate_isolate(migratetype) || + is_migrate_isolate(buddy_mt))) + goto done_merging; + } + max_order++; + goto continue_merging; + } + +done_merging: set_page_order(page, order); /* diff --git a/mm/slab.c b/mm/slab.c index e719a5cb3396..17e2848979c5 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2086,6 +2086,8 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) } #endif + kasan_cache_create(cachep, &size, &flags); + size = ALIGN(size, cachep->align); /* * We should restrict the number of objects in a slab to implement @@ -2387,8 +2389,13 @@ static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page) * cache which they are a constructor for. Otherwise, deadlock. * They must also be threaded. */ - if (cachep->ctor && !(cachep->flags & SLAB_POISON)) + if (cachep->ctor && !(cachep->flags & SLAB_POISON)) { + kasan_unpoison_object_data(cachep, + objp + obj_offset(cachep)); cachep->ctor(objp + obj_offset(cachep)); + kasan_poison_object_data( + cachep, objp + obj_offset(cachep)); + } if (cachep->flags & SLAB_RED_ZONE) { if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) @@ -2409,6 +2416,7 @@ static void cache_init_objs(struct kmem_cache *cachep, struct page *page) { int i; + void *objp; cache_init_objs_debug(cachep, page); @@ -2419,8 +2427,12 @@ static void cache_init_objs(struct kmem_cache *cachep, for (i = 0; i < cachep->num; i++) { /* constructor could break poison info */ - if (DEBUG == 0 && cachep->ctor) - cachep->ctor(index_to_obj(cachep, page, i)); + if (DEBUG == 0 && cachep->ctor) { + objp = index_to_obj(cachep, page, i); + kasan_unpoison_object_data(cachep, objp); + cachep->ctor(objp); + kasan_poison_object_data(cachep, objp); + } set_free_obj(page, i, i); } @@ -2550,6 +2562,7 @@ static int cache_grow(struct kmem_cache *cachep, slab_map_pages(cachep, page, freelist); + kasan_poison_slab(page); cache_init_objs(cachep, page); if (gfpflags_allow_blocking(local_flags)) @@ -3316,6 +3329,8 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp, { struct array_cache *ac = cpu_cache_get(cachep); + kasan_slab_free(cachep, objp); + check_irq_off(); kmemleak_free_recursive(objp, cachep->flags); objp = cache_free_debugcheck(cachep, objp, caller); @@ -3363,6 +3378,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) { void *ret = slab_alloc(cachep, flags, _RET_IP_); + kasan_slab_alloc(cachep, ret, flags); trace_kmem_cache_alloc(_RET_IP_, ret, cachep->object_size, cachep->size, flags); @@ -3428,6 +3444,7 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size) ret = slab_alloc(cachep, flags, _RET_IP_); + kasan_kmalloc(cachep, ret, size, flags); trace_kmalloc(_RET_IP_, ret, size, cachep->size, flags); return ret; @@ -3451,6 +3468,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) { void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); + kasan_slab_alloc(cachep, ret, flags); trace_kmem_cache_alloc_node(_RET_IP_, ret, cachep->object_size, cachep->size, flags, nodeid); @@ -3469,6 +3487,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep, ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); + kasan_kmalloc(cachep, ret, size, flags); trace_kmalloc_node(_RET_IP_, ret, size, cachep->size, flags, nodeid); @@ -3481,11 +3500,15 @@ static __always_inline void * __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller) { struct kmem_cache *cachep; + void *ret; cachep = kmalloc_slab(size, flags); if (unlikely(ZERO_OR_NULL_PTR(cachep))) return cachep; - return kmem_cache_alloc_node_trace(cachep, flags, node, size); + ret = kmem_cache_alloc_node_trace(cachep, flags, node, size); + kasan_kmalloc(cachep, ret, size, flags); + + return ret; } void *__kmalloc_node(size_t size, gfp_t flags, int node) @@ -3519,6 +3542,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, return cachep; ret = slab_alloc(cachep, flags, caller); + kasan_kmalloc(cachep, ret, size, flags); trace_kmalloc(caller, ret, size, cachep->size, flags); @@ -4290,10 +4314,18 @@ module_init(slab_proc_init); */ size_t ksize(const void *objp) { + size_t size; + BUG_ON(!objp); if (unlikely(objp == ZERO_SIZE_PTR)) return 0; - return virt_to_cache(objp)->object_size; + size = virt_to_cache(objp)->object_size; + /* We assume that ksize callers could use the whole allocated area, + * so we need to unpoison this area. + */ + kasan_krealloc(objp, size, GFP_NOWAIT); + + return size; } EXPORT_SYMBOL(ksize); diff --git a/mm/slab.h b/mm/slab.h index ff39a8fc3b3f..5969769fbee6 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -405,7 +405,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags); - kasan_slab_alloc(s, object); + kasan_slab_alloc(s, object, flags); } memcg_kmem_put_cache(s); } diff --git a/mm/slab_common.c b/mm/slab_common.c index b2e379639a5b..3239bfd758e6 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -35,7 +35,7 @@ struct kmem_cache *kmem_cache; */ #define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \ - SLAB_FAILSLAB) + SLAB_FAILSLAB | SLAB_KASAN) #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \ SLAB_NOTRACK | SLAB_ACCOUNT) @@ -1013,7 +1013,7 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) page = alloc_kmem_pages(flags, order); ret = page ? page_address(page) : NULL; kmemleak_alloc(ret, size, 1, flags); - kasan_kmalloc_large(ret, size); + kasan_kmalloc_large(ret, size, flags); return ret; } EXPORT_SYMBOL(kmalloc_order); @@ -1192,7 +1192,7 @@ static __always_inline void *__do_krealloc(const void *p, size_t new_size, ks = ksize(p); if (ks >= new_size) { - kasan_krealloc((void *)p, new_size); + kasan_krealloc((void *)p, new_size, flags); return (void *)p; } diff --git a/mm/slub.c b/mm/slub.c index 7277413ebc8b..4dbb109eb8cd 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1313,7 +1313,7 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node, static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) { kmemleak_alloc(ptr, size, 1, flags); - kasan_kmalloc_large(ptr, size); + kasan_kmalloc_large(ptr, size, flags); } static inline void kfree_hook(const void *x) @@ -2596,7 +2596,7 @@ void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) { void *ret = slab_alloc(s, gfpflags, _RET_IP_); trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); - kasan_kmalloc(s, ret, size); + kasan_kmalloc(s, ret, size, gfpflags); return ret; } EXPORT_SYMBOL(kmem_cache_alloc_trace); @@ -2624,7 +2624,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s, trace_kmalloc_node(_RET_IP_, ret, size, s->size, gfpflags, node); - kasan_kmalloc(s, ret, size); + kasan_kmalloc(s, ret, size, gfpflags); return ret; } EXPORT_SYMBOL(kmem_cache_alloc_node_trace); @@ -3182,7 +3182,8 @@ static void early_kmem_cache_node_alloc(int node) init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); init_tracking(kmem_cache_node, n); #endif - kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node)); + kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node), + GFP_KERNEL); init_kmem_cache_node(n); inc_slabs_node(kmem_cache_node, node, page->objects); @@ -3561,7 +3562,7 @@ void *__kmalloc(size_t size, gfp_t flags) trace_kmalloc(_RET_IP_, ret, size, s->size, flags); - kasan_kmalloc(s, ret, size); + kasan_kmalloc(s, ret, size, flags); return ret; } @@ -3606,7 +3607,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); - kasan_kmalloc(s, ret, size); + kasan_kmalloc(s, ret, size, flags); return ret; } @@ -3635,7 +3636,7 @@ size_t ksize(const void *object) size_t size = __ksize(object); /* We assume that ksize callers could use whole allocated area, so we need unpoison this area. */ - kasan_krealloc(object, size); + kasan_krealloc(object, size, GFP_NOWAIT); return size; } EXPORT_SYMBOL(ksize); diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c index bcbec33c6a14..dcc18c6f7cf9 100644 --- a/net/ceph/ceph_common.c +++ b/net/ceph/ceph_common.c @@ -361,7 +361,6 @@ ceph_parse_options(char *options, const char *dev_name, opt->osd_keepalive_timeout = CEPH_OSD_KEEPALIVE_DEFAULT; opt->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT; opt->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT; - opt->monc_ping_timeout = CEPH_MONC_PING_TIMEOUT_DEFAULT; /* get mon ip(s) */ /* ip1[:port1][,ip2[:port2]...] */ @@ -686,6 +685,9 @@ int __ceph_open_session(struct ceph_client *client, unsigned long started) return client->auth_err; } + pr_info("client%llu fsid %pU\n", ceph_client_id(client), &client->fsid); + ceph_debugfs_client_init(client); + return 0; } EXPORT_SYMBOL(__ceph_open_session); diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c index 593dc2eabcc8..b902fbc7863e 100644 --- a/net/ceph/debugfs.c +++ b/net/ceph/debugfs.c @@ -112,15 +112,20 @@ static int monc_show(struct seq_file *s, void *p) struct ceph_mon_generic_request *req; struct ceph_mon_client *monc = &client->monc; struct rb_node *rp; + int i; mutex_lock(&monc->mutex); - if (monc->have_mdsmap) - seq_printf(s, "have mdsmap %u\n", (unsigned int)monc->have_mdsmap); - if (monc->have_osdmap) - seq_printf(s, "have osdmap %u\n", (unsigned int)monc->have_osdmap); - if (monc->want_next_osdmap) - seq_printf(s, "want next osdmap\n"); + for (i = 0; i < ARRAY_SIZE(monc->subs); i++) { + seq_printf(s, "have %s %u", ceph_sub_str[i], + monc->subs[i].have); + if (monc->subs[i].want) + seq_printf(s, " want %llu%s", + le64_to_cpu(monc->subs[i].item.start), + (monc->subs[i].item.flags & + CEPH_SUBSCRIBE_ONETIME ? "" : "+")); + seq_putc(s, '\n'); + } for (rp = rb_first(&monc->generic_request_tree); rp; rp = rb_next(rp)) { __u16 op; diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 9382619a405b..1831f6353622 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -235,18 +235,12 @@ static struct workqueue_struct *ceph_msgr_wq; static int ceph_msgr_slab_init(void) { BUG_ON(ceph_msg_cache); - ceph_msg_cache = kmem_cache_create("ceph_msg", - sizeof (struct ceph_msg), - __alignof__(struct ceph_msg), 0, NULL); - + ceph_msg_cache = KMEM_CACHE(ceph_msg, 0); if (!ceph_msg_cache) return -ENOMEM; BUG_ON(ceph_msg_data_cache); - ceph_msg_data_cache = kmem_cache_create("ceph_msg_data", - sizeof (struct ceph_msg_data), - __alignof__(struct ceph_msg_data), - 0, NULL); + ceph_msg_data_cache = KMEM_CACHE(ceph_msg_data, 0); if (ceph_msg_data_cache) return 0; @@ -1221,25 +1215,19 @@ static void prepare_message_data(struct ceph_msg *msg, u32 data_len) static void prepare_write_message_footer(struct ceph_connection *con) { struct ceph_msg *m = con->out_msg; - int v = con->out_kvec_left; m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE; dout("prepare_write_message_footer %p\n", con); - con->out_kvec[v].iov_base = &m->footer; + con_out_kvec_add(con, sizeof_footer(con), &m->footer); if (con->peer_features & CEPH_FEATURE_MSG_AUTH) { if (con->ops->sign_message) con->ops->sign_message(m); else m->footer.sig = 0; - con->out_kvec[v].iov_len = sizeof(m->footer); - con->out_kvec_bytes += sizeof(m->footer); } else { m->old_footer.flags = m->footer.flags; - con->out_kvec[v].iov_len = sizeof(m->old_footer); - con->out_kvec_bytes += sizeof(m->old_footer); } - con->out_kvec_left++; con->out_more = m->more_to_follow; con->out_msg_done = true; } @@ -2409,11 +2397,7 @@ static int read_partial_message(struct ceph_connection *con) } /* footer */ - if (need_sign) - size = sizeof(m->footer); - else - size = sizeof(m->old_footer); - + size = sizeof_footer(con); end += size; ret = read_partial(con, end, size, &m->footer); if (ret <= 0) @@ -3089,10 +3073,7 @@ void ceph_msg_revoke(struct ceph_msg *msg) con->out_skip += con_out_kvec_skip(con); } else { BUG_ON(!msg->data_length); - if (con->peer_features & CEPH_FEATURE_MSG_AUTH) - con->out_skip += sizeof(msg->footer); - else - con->out_skip += sizeof(msg->old_footer); + con->out_skip += sizeof_footer(con); } /* data, middle, front */ if (msg->data_length) diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c index de85dddc3dc0..cf638c009cfa 100644 --- a/net/ceph/mon_client.c +++ b/net/ceph/mon_client.c @@ -122,51 +122,91 @@ static void __close_session(struct ceph_mon_client *monc) ceph_msg_revoke(monc->m_subscribe); ceph_msg_revoke_incoming(monc->m_subscribe_ack); ceph_con_close(&monc->con); - monc->cur_mon = -1; + monc->pending_auth = 0; ceph_auth_reset(monc->auth); } /* - * Open a session with a (new) monitor. + * Pick a new monitor at random and set cur_mon. If we are repicking + * (i.e. cur_mon is already set), be sure to pick a different one. */ -static int __open_session(struct ceph_mon_client *monc) +static void pick_new_mon(struct ceph_mon_client *monc) { - char r; - int ret; + int old_mon = monc->cur_mon; - if (monc->cur_mon < 0) { - get_random_bytes(&r, 1); - monc->cur_mon = r % monc->monmap->num_mon; - dout("open_session num=%d r=%d -> mon%d\n", - monc->monmap->num_mon, r, monc->cur_mon); - monc->sub_sent = 0; - monc->sub_renew_after = jiffies; /* i.e., expired */ - monc->want_next_osdmap = !!monc->want_next_osdmap; - - dout("open_session mon%d opening\n", monc->cur_mon); - ceph_con_open(&monc->con, - CEPH_ENTITY_TYPE_MON, monc->cur_mon, - &monc->monmap->mon_inst[monc->cur_mon].addr); - - /* send an initial keepalive to ensure our timestamp is - * valid by the time we are in an OPENED state */ - ceph_con_keepalive(&monc->con); - - /* initiatiate authentication handshake */ - ret = ceph_auth_build_hello(monc->auth, - monc->m_auth->front.iov_base, - monc->m_auth->front_alloc_len); - __send_prepared_auth_request(monc, ret); + BUG_ON(monc->monmap->num_mon < 1); + + if (monc->monmap->num_mon == 1) { + monc->cur_mon = 0; } else { - dout("open_session mon%d already open\n", monc->cur_mon); + int max = monc->monmap->num_mon; + int o = -1; + int n; + + if (monc->cur_mon >= 0) { + if (monc->cur_mon < monc->monmap->num_mon) + o = monc->cur_mon; + if (o >= 0) + max--; + } + + n = prandom_u32() % max; + if (o >= 0 && n >= o) + n++; + + monc->cur_mon = n; } - return 0; + + dout("%s mon%d -> mon%d out of %d mons\n", __func__, old_mon, + monc->cur_mon, monc->monmap->num_mon); +} + +/* + * Open a session with a new monitor. + */ +static void __open_session(struct ceph_mon_client *monc) +{ + int ret; + + pick_new_mon(monc); + + monc->hunting = true; + if (monc->had_a_connection) { + monc->hunt_mult *= CEPH_MONC_HUNT_BACKOFF; + if (monc->hunt_mult > CEPH_MONC_HUNT_MAX_MULT) + monc->hunt_mult = CEPH_MONC_HUNT_MAX_MULT; + } + + monc->sub_renew_after = jiffies; /* i.e., expired */ + monc->sub_renew_sent = 0; + + dout("%s opening mon%d\n", __func__, monc->cur_mon); + ceph_con_open(&monc->con, CEPH_ENTITY_TYPE_MON, monc->cur_mon, + &monc->monmap->mon_inst[monc->cur_mon].addr); + + /* + * send an initial keepalive to ensure our timestamp is valid + * by the time we are in an OPENED state + */ + ceph_con_keepalive(&monc->con); + + /* initiate authentication handshake */ + ret = ceph_auth_build_hello(monc->auth, + monc->m_auth->front.iov_base, + monc->m_auth->front_alloc_len); + BUG_ON(ret <= 0); + __send_prepared_auth_request(monc, ret); } -static bool __sub_expired(struct ceph_mon_client *monc) +static void reopen_session(struct ceph_mon_client *monc) { - return time_after_eq(jiffies, monc->sub_renew_after); + if (!monc->hunting) + pr_info("mon%d %s session lost, hunting for new mon\n", + monc->cur_mon, ceph_pr_addr(&monc->con.peer_addr.in_addr)); + + __close_session(monc); + __open_session(monc); } /* @@ -174,74 +214,70 @@ static bool __sub_expired(struct ceph_mon_client *monc) */ static void __schedule_delayed(struct ceph_mon_client *monc) { - struct ceph_options *opt = monc->client->options; unsigned long delay; - if (monc->cur_mon < 0 || __sub_expired(monc)) { - delay = 10 * HZ; - } else { - delay = 20 * HZ; - if (opt->monc_ping_timeout > 0) - delay = min(delay, opt->monc_ping_timeout / 3); - } + if (monc->hunting) + delay = CEPH_MONC_HUNT_INTERVAL * monc->hunt_mult; + else + delay = CEPH_MONC_PING_INTERVAL; + dout("__schedule_delayed after %lu\n", delay); - schedule_delayed_work(&monc->delayed_work, - round_jiffies_relative(delay)); + mod_delayed_work(system_wq, &monc->delayed_work, + round_jiffies_relative(delay)); } +const char *ceph_sub_str[] = { + [CEPH_SUB_MDSMAP] = "mdsmap", + [CEPH_SUB_MONMAP] = "monmap", + [CEPH_SUB_OSDMAP] = "osdmap", +}; + /* - * Send subscribe request for mdsmap and/or osdmap. + * Send subscribe request for one or more maps, according to + * monc->subs. */ static void __send_subscribe(struct ceph_mon_client *monc) { - dout("__send_subscribe sub_sent=%u exp=%u want_osd=%d\n", - (unsigned int)monc->sub_sent, __sub_expired(monc), - monc->want_next_osdmap); - if ((__sub_expired(monc) && !monc->sub_sent) || - monc->want_next_osdmap == 1) { - struct ceph_msg *msg = monc->m_subscribe; - struct ceph_mon_subscribe_item *i; - void *p, *end; - int num; - - p = msg->front.iov_base; - end = p + msg->front_alloc_len; - - num = 1 + !!monc->want_next_osdmap + !!monc->want_mdsmap; - ceph_encode_32(&p, num); - - if (monc->want_next_osdmap) { - dout("__send_subscribe to 'osdmap' %u\n", - (unsigned int)monc->have_osdmap); - ceph_encode_string(&p, end, "osdmap", 6); - i = p; - i->have = cpu_to_le64(monc->have_osdmap); - i->onetime = 1; - p += sizeof(*i); - monc->want_next_osdmap = 2; /* requested */ - } - if (monc->want_mdsmap) { - dout("__send_subscribe to 'mdsmap' %u+\n", - (unsigned int)monc->have_mdsmap); - ceph_encode_string(&p, end, "mdsmap", 6); - i = p; - i->have = cpu_to_le64(monc->have_mdsmap); - i->onetime = 0; - p += sizeof(*i); - } - ceph_encode_string(&p, end, "monmap", 6); - i = p; - i->have = 0; - i->onetime = 0; - p += sizeof(*i); - - msg->front.iov_len = p - msg->front.iov_base; - msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); - ceph_msg_revoke(msg); - ceph_con_send(&monc->con, ceph_msg_get(msg)); - - monc->sub_sent = jiffies | 1; /* never 0 */ + struct ceph_msg *msg = monc->m_subscribe; + void *p = msg->front.iov_base; + void *const end = p + msg->front_alloc_len; + int num = 0; + int i; + + dout("%s sent %lu\n", __func__, monc->sub_renew_sent); + + BUG_ON(monc->cur_mon < 0); + + if (!monc->sub_renew_sent) + monc->sub_renew_sent = jiffies | 1; /* never 0 */ + + msg->hdr.version = cpu_to_le16(2); + + for (i = 0; i < ARRAY_SIZE(monc->subs); i++) { + if (monc->subs[i].want) + num++; } + BUG_ON(num < 1); /* monmap sub is always there */ + ceph_encode_32(&p, num); + for (i = 0; i < ARRAY_SIZE(monc->subs); i++) { + const char *s = ceph_sub_str[i]; + + if (!monc->subs[i].want) + continue; + + dout("%s %s start %llu flags 0x%x\n", __func__, s, + le64_to_cpu(monc->subs[i].item.start), + monc->subs[i].item.flags); + ceph_encode_string(&p, end, s, strlen(s)); + memcpy(p, &monc->subs[i].item, sizeof(monc->subs[i].item)); + p += sizeof(monc->subs[i].item); + } + + BUG_ON(p != (end - 35 - (ARRAY_SIZE(monc->subs) - num) * 19)); + msg->front.iov_len = p - msg->front.iov_base; + msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); + ceph_msg_revoke(msg); + ceph_con_send(&monc->con, ceph_msg_get(msg)); } static void handle_subscribe_ack(struct ceph_mon_client *monc, @@ -255,15 +291,16 @@ static void handle_subscribe_ack(struct ceph_mon_client *monc, seconds = le32_to_cpu(h->duration); mutex_lock(&monc->mutex); - if (monc->hunting) { - pr_info("mon%d %s session established\n", - monc->cur_mon, - ceph_pr_addr(&monc->con.peer_addr.in_addr)); - monc->hunting = false; + if (monc->sub_renew_sent) { + monc->sub_renew_after = monc->sub_renew_sent + + (seconds >> 1) * HZ - 1; + dout("%s sent %lu duration %d renew after %lu\n", __func__, + monc->sub_renew_sent, seconds, monc->sub_renew_after); + monc->sub_renew_sent = 0; + } else { + dout("%s sent %lu renew after %lu, ignoring\n", __func__, + monc->sub_renew_sent, monc->sub_renew_after); } - dout("handle_subscribe_ack after %d seconds\n", seconds); - monc->sub_renew_after = monc->sub_sent + (seconds >> 1)*HZ - 1; - monc->sub_sent = 0; mutex_unlock(&monc->mutex); return; bad: @@ -272,36 +309,82 @@ bad: } /* - * Keep track of which maps we have + * Register interest in a map + * + * @sub: one of CEPH_SUB_* + * @epoch: X for "every map since X", or 0 for "just the latest" */ -int ceph_monc_got_mdsmap(struct ceph_mon_client *monc, u32 got) +static bool __ceph_monc_want_map(struct ceph_mon_client *monc, int sub, + u32 epoch, bool continuous) +{ + __le64 start = cpu_to_le64(epoch); + u8 flags = !continuous ? CEPH_SUBSCRIBE_ONETIME : 0; + + dout("%s %s epoch %u continuous %d\n", __func__, ceph_sub_str[sub], + epoch, continuous); + + if (monc->subs[sub].want && + monc->subs[sub].item.start == start && + monc->subs[sub].item.flags == flags) + return false; + + monc->subs[sub].item.start = start; + monc->subs[sub].item.flags = flags; + monc->subs[sub].want = true; + + return true; +} + +bool ceph_monc_want_map(struct ceph_mon_client *monc, int sub, u32 epoch, + bool continuous) { + bool need_request; + mutex_lock(&monc->mutex); - monc->have_mdsmap = got; + need_request = __ceph_monc_want_map(monc, sub, epoch, continuous); mutex_unlock(&monc->mutex); - return 0; + + return need_request; } -EXPORT_SYMBOL(ceph_monc_got_mdsmap); +EXPORT_SYMBOL(ceph_monc_want_map); -int ceph_monc_got_osdmap(struct ceph_mon_client *monc, u32 got) +/* + * Keep track of which maps we have + * + * @sub: one of CEPH_SUB_* + */ +static void __ceph_monc_got_map(struct ceph_mon_client *monc, int sub, + u32 epoch) +{ + dout("%s %s epoch %u\n", __func__, ceph_sub_str[sub], epoch); + + if (monc->subs[sub].want) { + if (monc->subs[sub].item.flags & CEPH_SUBSCRIBE_ONETIME) + monc->subs[sub].want = false; + else + monc->subs[sub].item.start = cpu_to_le64(epoch + 1); + } + + monc->subs[sub].have = epoch; +} + +void ceph_monc_got_map(struct ceph_mon_client *monc, int sub, u32 epoch) { mutex_lock(&monc->mutex); - monc->have_osdmap = got; - monc->want_next_osdmap = 0; + __ceph_monc_got_map(monc, sub, epoch); mutex_unlock(&monc->mutex); - return 0; } +EXPORT_SYMBOL(ceph_monc_got_map); /* * Register interest in the next osdmap */ void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc) { - dout("request_next_osdmap have %u\n", monc->have_osdmap); + dout("%s have %u\n", __func__, monc->subs[CEPH_SUB_OSDMAP].have); mutex_lock(&monc->mutex); - if (!monc->want_next_osdmap) - monc->want_next_osdmap = 1; - if (monc->want_next_osdmap < 2) + if (__ceph_monc_want_map(monc, CEPH_SUB_OSDMAP, + monc->subs[CEPH_SUB_OSDMAP].have + 1, false)) __send_subscribe(monc); mutex_unlock(&monc->mutex); } @@ -320,15 +403,15 @@ int ceph_monc_wait_osdmap(struct ceph_mon_client *monc, u32 epoch, long ret; mutex_lock(&monc->mutex); - while (monc->have_osdmap < epoch) { + while (monc->subs[CEPH_SUB_OSDMAP].have < epoch) { mutex_unlock(&monc->mutex); if (timeout && time_after_eq(jiffies, started + timeout)) return -ETIMEDOUT; ret = wait_event_interruptible_timeout(monc->client->auth_wq, - monc->have_osdmap >= epoch, - ceph_timeout_jiffies(timeout)); + monc->subs[CEPH_SUB_OSDMAP].have >= epoch, + ceph_timeout_jiffies(timeout)); if (ret < 0) return ret; @@ -341,11 +424,14 @@ int ceph_monc_wait_osdmap(struct ceph_mon_client *monc, u32 epoch, EXPORT_SYMBOL(ceph_monc_wait_osdmap); /* - * + * Open a session with a random monitor. Request monmap and osdmap, + * which are waited upon in __ceph_open_session(). */ int ceph_monc_open_session(struct ceph_mon_client *monc) { mutex_lock(&monc->mutex); + __ceph_monc_want_map(monc, CEPH_SUB_MONMAP, 0, true); + __ceph_monc_want_map(monc, CEPH_SUB_OSDMAP, 0, false); __open_session(monc); __schedule_delayed(monc); mutex_unlock(&monc->mutex); @@ -353,29 +439,15 @@ int ceph_monc_open_session(struct ceph_mon_client *monc) } EXPORT_SYMBOL(ceph_monc_open_session); -/* - * We require the fsid and global_id in order to initialize our - * debugfs dir. - */ -static bool have_debugfs_info(struct ceph_mon_client *monc) -{ - dout("have_debugfs_info fsid %d globalid %lld\n", - (int)monc->client->have_fsid, monc->auth->global_id); - return monc->client->have_fsid && monc->auth->global_id > 0; -} - static void ceph_monc_handle_map(struct ceph_mon_client *monc, struct ceph_msg *msg) { struct ceph_client *client = monc->client; struct ceph_monmap *monmap = NULL, *old = monc->monmap; void *p, *end; - int had_debugfs_info, init_debugfs = 0; mutex_lock(&monc->mutex); - had_debugfs_info = have_debugfs_info(monc); - dout("handle_monmap\n"); p = msg->front.iov_base; end = p + msg->front.iov_len; @@ -395,29 +467,11 @@ static void ceph_monc_handle_map(struct ceph_mon_client *monc, client->monc.monmap = monmap; kfree(old); - if (!client->have_fsid) { - client->have_fsid = true; - if (!had_debugfs_info && have_debugfs_info(monc)) { - pr_info("client%lld fsid %pU\n", - ceph_client_id(monc->client), - &monc->client->fsid); - init_debugfs = 1; - } - mutex_unlock(&monc->mutex); - - if (init_debugfs) { - /* - * do debugfs initialization without mutex to avoid - * creating a locking dependency - */ - ceph_debugfs_client_init(monc->client); - } + __ceph_monc_got_map(monc, CEPH_SUB_MONMAP, monc->monmap->epoch); + client->have_fsid = true; - goto out_unlocked; - } out: mutex_unlock(&monc->mutex); -out_unlocked: wake_up_all(&client->auth_wq); } @@ -745,18 +799,15 @@ static void delayed_work(struct work_struct *work) dout("monc delayed_work\n"); mutex_lock(&monc->mutex); if (monc->hunting) { - __close_session(monc); - __open_session(monc); /* continue hunting */ + dout("%s continuing hunt\n", __func__); + reopen_session(monc); } else { - struct ceph_options *opt = monc->client->options; int is_auth = ceph_auth_is_authenticated(monc->auth); if (ceph_con_keepalive_expired(&monc->con, - opt->monc_ping_timeout)) { + CEPH_MONC_PING_TIMEOUT)) { dout("monc keepalive timeout\n"); is_auth = 0; - __close_session(monc); - monc->hunting = true; - __open_session(monc); + reopen_session(monc); } if (!monc->hunting) { @@ -764,8 +815,14 @@ static void delayed_work(struct work_struct *work) __validate_auth(monc); } - if (is_auth) - __send_subscribe(monc); + if (is_auth) { + unsigned long now = jiffies; + + dout("%s renew subs? now %lu renew after %lu\n", + __func__, now, monc->sub_renew_after); + if (time_after_eq(now, monc->sub_renew_after)) + __send_subscribe(monc); + } } __schedule_delayed(monc); mutex_unlock(&monc->mutex); @@ -852,18 +909,14 @@ int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl) &monc->client->msgr); monc->cur_mon = -1; - monc->hunting = true; - monc->sub_renew_after = jiffies; - monc->sub_sent = 0; + monc->had_a_connection = false; + monc->hunt_mult = 1; INIT_DELAYED_WORK(&monc->delayed_work, delayed_work); monc->generic_request_tree = RB_ROOT; monc->num_generic_requests = 0; monc->last_tid = 0; - monc->have_mdsmap = 0; - monc->have_osdmap = 0; - monc->want_next_osdmap = 1; return 0; out_auth_reply: @@ -888,7 +941,7 @@ void ceph_monc_stop(struct ceph_mon_client *monc) mutex_lock(&monc->mutex); __close_session(monc); - + monc->cur_mon = -1; mutex_unlock(&monc->mutex); /* @@ -910,26 +963,40 @@ void ceph_monc_stop(struct ceph_mon_client *monc) } EXPORT_SYMBOL(ceph_monc_stop); +static void finish_hunting(struct ceph_mon_client *monc) +{ + if (monc->hunting) { + dout("%s found mon%d\n", __func__, monc->cur_mon); + monc->hunting = false; + monc->had_a_connection = true; + monc->hunt_mult /= 2; /* reduce by 50% */ + if (monc->hunt_mult < 1) + monc->hunt_mult = 1; + } +} + static void handle_auth_reply(struct ceph_mon_client *monc, struct ceph_msg *msg) { int ret; int was_auth = 0; - int had_debugfs_info, init_debugfs = 0; mutex_lock(&monc->mutex); - had_debugfs_info = have_debugfs_info(monc); was_auth = ceph_auth_is_authenticated(monc->auth); monc->pending_auth = 0; ret = ceph_handle_auth_reply(monc->auth, msg->front.iov_base, msg->front.iov_len, monc->m_auth->front.iov_base, monc->m_auth->front_alloc_len); + if (ret > 0) { + __send_prepared_auth_request(monc, ret); + goto out; + } + + finish_hunting(monc); + if (ret < 0) { monc->client->auth_err = ret; - wake_up_all(&monc->client->auth_wq); - } else if (ret > 0) { - __send_prepared_auth_request(monc, ret); } else if (!was_auth && ceph_auth_is_authenticated(monc->auth)) { dout("authenticated, starting session\n"); @@ -939,23 +1006,15 @@ static void handle_auth_reply(struct ceph_mon_client *monc, __send_subscribe(monc); __resend_generic_request(monc); - } - if (!had_debugfs_info && have_debugfs_info(monc)) { - pr_info("client%lld fsid %pU\n", - ceph_client_id(monc->client), - &monc->client->fsid); - init_debugfs = 1; + pr_info("mon%d %s session established\n", monc->cur_mon, + ceph_pr_addr(&monc->con.peer_addr.in_addr)); } - mutex_unlock(&monc->mutex); - if (init_debugfs) { - /* - * do debugfs initialization without mutex to avoid - * creating a locking dependency - */ - ceph_debugfs_client_init(monc->client); - } +out: + mutex_unlock(&monc->mutex); + if (monc->client->auth_err < 0) + wake_up_all(&monc->client->auth_wq); } static int __validate_auth(struct ceph_mon_client *monc) @@ -1096,29 +1155,17 @@ static void mon_fault(struct ceph_connection *con) { struct ceph_mon_client *monc = con->private; - if (!monc) - return; - - dout("mon_fault\n"); mutex_lock(&monc->mutex); - if (!con->private) - goto out; - - if (!monc->hunting) - pr_info("mon%d %s session lost, " - "hunting for new mon\n", monc->cur_mon, - ceph_pr_addr(&monc->con.peer_addr.in_addr)); - - __close_session(monc); - if (!monc->hunting) { - /* start hunting */ - monc->hunting = true; - __open_session(monc); - } else { - /* already hunting, let's wait a bit */ - __schedule_delayed(monc); + dout("%s mon%d\n", __func__, monc->cur_mon); + if (monc->cur_mon >= 0) { + if (!monc->hunting) { + dout("%s hunting for new mon\n", __func__); + reopen_session(monc); + __schedule_delayed(monc); + } else { + dout("%s already hunting\n", __func__); + } } -out: mutex_unlock(&monc->mutex); } diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 5bc053778fed..32355d9d0103 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -338,9 +338,10 @@ static void ceph_osdc_release_request(struct kref *kref) ceph_put_snap_context(req->r_snapc); if (req->r_mempool) mempool_free(req, req->r_osdc->req_mempool); - else + else if (req->r_num_ops <= CEPH_OSD_SLAB_OPS) kmem_cache_free(ceph_osd_request_cache, req); - + else + kfree(req); } void ceph_osdc_get_request(struct ceph_osd_request *req) @@ -369,28 +370,22 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, struct ceph_msg *msg; size_t msg_size; - BUILD_BUG_ON(CEPH_OSD_MAX_OP > U16_MAX); - BUG_ON(num_ops > CEPH_OSD_MAX_OP); - - msg_size = 4 + 4 + 8 + 8 + 4+8; - msg_size += 2 + 4 + 8 + 4 + 4; /* oloc */ - msg_size += 1 + 8 + 4 + 4; /* pg_t */ - msg_size += 4 + CEPH_MAX_OID_NAME_LEN; /* oid */ - msg_size += 2 + num_ops*sizeof(struct ceph_osd_op); - msg_size += 8; /* snapid */ - msg_size += 8; /* snap_seq */ - msg_size += 8 * (snapc ? snapc->num_snaps : 0); /* snaps */ - msg_size += 4; - if (use_mempool) { + BUG_ON(num_ops > CEPH_OSD_SLAB_OPS); req = mempool_alloc(osdc->req_mempool, gfp_flags); - memset(req, 0, sizeof(*req)); + } else if (num_ops <= CEPH_OSD_SLAB_OPS) { + req = kmem_cache_alloc(ceph_osd_request_cache, gfp_flags); } else { - req = kmem_cache_zalloc(ceph_osd_request_cache, gfp_flags); + BUG_ON(num_ops > CEPH_OSD_MAX_OPS); + req = kmalloc(sizeof(*req) + num_ops * sizeof(req->r_ops[0]), + gfp_flags); } - if (req == NULL) + if (unlikely(!req)) return NULL; + /* req only, each op is zeroed in _osd_req_op_init() */ + memset(req, 0, sizeof(*req)); + req->r_osdc = osdc; req->r_mempool = use_mempool; req->r_num_ops = num_ops; @@ -408,18 +403,36 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, req->r_base_oloc.pool = -1; req->r_target_oloc.pool = -1; + msg_size = OSD_OPREPLY_FRONT_LEN; + if (num_ops > CEPH_OSD_SLAB_OPS) { + /* ceph_osd_op and rval */ + msg_size += (num_ops - CEPH_OSD_SLAB_OPS) * + (sizeof(struct ceph_osd_op) + 4); + } + /* create reply message */ if (use_mempool) msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0); else - msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, - OSD_OPREPLY_FRONT_LEN, gfp_flags, true); + msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, msg_size, + gfp_flags, true); if (!msg) { ceph_osdc_put_request(req); return NULL; } req->r_reply = msg; + msg_size = 4 + 4 + 4; /* client_inc, osdmap_epoch, flags */ + msg_size += 4 + 4 + 4 + 8; /* mtime, reassert_version */ + msg_size += 2 + 4 + 8 + 4 + 4; /* oloc */ + msg_size += 1 + 8 + 4 + 4; /* pgid */ + msg_size += 4 + CEPH_MAX_OID_NAME_LEN; /* oid */ + msg_size += 2 + num_ops * sizeof(struct ceph_osd_op); + msg_size += 8; /* snapid */ + msg_size += 8; /* snap_seq */ + msg_size += 4 + 8 * (snapc ? snapc->num_snaps : 0); /* snaps */ + msg_size += 4; /* retry_attempt */ + /* create request message; allow space for oid */ if (use_mempool) msg = ceph_msgpool_get(&osdc->msgpool_op, 0); @@ -498,7 +511,7 @@ void osd_req_op_extent_init(struct ceph_osd_request *osd_req, if (opcode == CEPH_OSD_OP_WRITE || opcode == CEPH_OSD_OP_WRITEFULL) payload_len += length; - op->payload_len = payload_len; + op->indata_len = payload_len; } EXPORT_SYMBOL(osd_req_op_extent_init); @@ -517,10 +530,32 @@ void osd_req_op_extent_update(struct ceph_osd_request *osd_req, BUG_ON(length > previous); op->extent.length = length; - op->payload_len -= previous - length; + op->indata_len -= previous - length; } EXPORT_SYMBOL(osd_req_op_extent_update); +void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req, + unsigned int which, u64 offset_inc) +{ + struct ceph_osd_req_op *op, *prev_op; + + BUG_ON(which + 1 >= osd_req->r_num_ops); + + prev_op = &osd_req->r_ops[which]; + op = _osd_req_op_init(osd_req, which + 1, prev_op->op, prev_op->flags); + /* dup previous one */ + op->indata_len = prev_op->indata_len; + op->outdata_len = prev_op->outdata_len; + op->extent = prev_op->extent; + /* adjust offset */ + op->extent.offset += offset_inc; + op->extent.length -= offset_inc; + + if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL) + op->indata_len -= offset_inc; +} +EXPORT_SYMBOL(osd_req_op_extent_dup_last); + void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode, const char *class, const char *method) { @@ -554,7 +589,7 @@ void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which, op->cls.argc = 0; /* currently unused */ - op->payload_len = payload_len; + op->indata_len = payload_len; } EXPORT_SYMBOL(osd_req_op_cls_init); @@ -587,7 +622,7 @@ int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which, op->xattr.cmp_mode = cmp_mode; ceph_osd_data_pagelist_init(&op->xattr.osd_data, pagelist); - op->payload_len = payload_len; + op->indata_len = payload_len; return 0; } EXPORT_SYMBOL(osd_req_op_xattr_init); @@ -707,7 +742,7 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req, BUG_ON(osd_data->type == CEPH_OSD_DATA_TYPE_NONE); dst->cls.indata_len = cpu_to_le32(data_length); ceph_osdc_msg_data_add(req->r_request, osd_data); - src->payload_len += data_length; + src->indata_len += data_length; request_data_len += data_length; } osd_data = &src->cls.response_data; @@ -750,7 +785,7 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req, dst->op = cpu_to_le16(src->op); dst->flags = cpu_to_le32(src->flags); - dst->payload_len = cpu_to_le32(src->payload_len); + dst->payload_len = cpu_to_le32(src->indata_len); return request_data_len; } @@ -1810,7 +1845,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg) ceph_decode_need(&p, end, 4, bad_put); numops = ceph_decode_32(&p); - if (numops > CEPH_OSD_MAX_OP) + if (numops > CEPH_OSD_MAX_OPS) goto bad_put; if (numops != req->r_num_ops) goto bad_put; @@ -1821,7 +1856,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg) int len; len = le32_to_cpu(op->payload_len); - req->r_reply_op_len[i] = len; + req->r_ops[i].outdata_len = len; dout(" op %d has %d bytes\n", i, len); payload_len += len; p += sizeof(*op); @@ -1836,7 +1871,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg) ceph_decode_need(&p, end, 4 + numops * 4, bad_put); retry_attempt = ceph_decode_32(&p); for (i = 0; i < numops; i++) - req->r_reply_op_result[i] = ceph_decode_32(&p); + req->r_ops[i].rval = ceph_decode_32(&p); if (le16_to_cpu(msg->hdr.version) >= 6) { p += 8 + 4; /* skip replay_version */ @@ -2187,7 +2222,8 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg) goto bad; done: downgrade_write(&osdc->map_sem); - ceph_monc_got_osdmap(&osdc->client->monc, osdc->osdmap->epoch); + ceph_monc_got_map(&osdc->client->monc, CEPH_SUB_OSDMAP, + osdc->osdmap->epoch); /* * subscribe to subsequent osdmap updates if full to ensure @@ -2646,8 +2682,8 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client) round_jiffies_relative(osdc->client->options->osd_idle_ttl)); err = -ENOMEM; - osdc->req_mempool = mempool_create_kmalloc_pool(10, - sizeof(struct ceph_osd_request)); + osdc->req_mempool = mempool_create_slab_pool(10, + ceph_osd_request_cache); if (!osdc->req_mempool) goto out; @@ -2782,11 +2818,12 @@ EXPORT_SYMBOL(ceph_osdc_writepages); int ceph_osdc_setup(void) { + size_t size = sizeof(struct ceph_osd_request) + + CEPH_OSD_SLAB_OPS * sizeof(struct ceph_osd_req_op); + BUG_ON(ceph_osd_request_cache); - ceph_osd_request_cache = kmem_cache_create("ceph_osd_request", - sizeof (struct ceph_osd_request), - __alignof__(struct ceph_osd_request), - 0, NULL); + ceph_osd_request_cache = kmem_cache_create("ceph_osd_request", size, + 0, 0, NULL); return ceph_osd_request_cache ? 0 : -ENOMEM; } |