aboutsummaryrefslogtreecommitdiff
path: root/fs/hugetlbfs/inode.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/hugetlbfs/inode.c')
-rw-r--r--fs/hugetlbfs/inode.c125
1 files changed, 93 insertions, 32 deletions
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 54de77e78775..28d2753be094 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -11,7 +11,7 @@
#include <linux/thread_info.h>
#include <asm/current.h>
-#include <linux/sched.h> /* remove ASAP */
+#include <linux/sched/signal.h> /* remove ASAP */
#include <linux/falloc.h>
#include <linux/fs.h>
#include <linux/mount.h>
@@ -46,13 +46,13 @@ static const struct inode_operations hugetlbfs_dir_inode_operations;
static const struct inode_operations hugetlbfs_inode_operations;
struct hugetlbfs_config {
- kuid_t uid;
- kgid_t gid;
- umode_t mode;
- long max_hpages;
- long nr_inodes;
- struct hstate *hstate;
- long min_hpages;
+ struct hstate *hstate;
+ long max_hpages;
+ long nr_inodes;
+ long min_hpages;
+ kuid_t uid;
+ kgid_t gid;
+ umode_t mode;
};
struct hugetlbfs_inode_info {
@@ -136,17 +136,26 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
vma->vm_ops = &hugetlb_vm_ops;
+ /*
+ * Offset passed to mmap (before page shift) could have been
+ * negative when represented as a (l)off_t.
+ */
+ if (((loff_t)vma->vm_pgoff << PAGE_SHIFT) < 0)
+ return -EINVAL;
+
if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
return -EINVAL;
vma_len = (loff_t)(vma->vm_end - vma->vm_start);
+ len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
+ /* check for overflow */
+ if (len < vma_len)
+ return -EINVAL;
inode_lock(inode);
file_accessed(file);
ret = -ENOMEM;
- len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
-
if (hugetlb_reserve_pages(inode,
vma->vm_pgoff >> huge_page_order(h),
len >> huge_page_shift(h), vma,
@@ -155,7 +164,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
ret = 0;
if (vma->vm_flags & VM_WRITE && inode->i_size < len)
- inode->i_size = len;
+ i_size_write(inode, len);
out:
inode_unlock(inode);
@@ -191,7 +200,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
@@ -695,14 +704,11 @@ static struct inode *hugetlbfs_get_root(struct super_block *sb,
inode = new_inode(sb);
if (inode) {
- struct hugetlbfs_inode_info *info;
inode->i_ino = get_next_ino();
inode->i_mode = S_IFDIR | config->mode;
inode->i_uid = config->uid;
inode->i_gid = config->gid;
inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
- info = HUGETLBFS_I(inode);
- mpol_shared_policy_init(&info->policy, NULL);
inode->i_op = &hugetlbfs_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
/* directory inodes start off with i_nlink == 2 (for "." entry) */
@@ -733,7 +739,6 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
inode = new_inode(sb);
if (inode) {
- struct hugetlbfs_inode_info *info;
inode->i_ino = get_next_ino();
inode_init_owner(inode, dir, mode);
lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
@@ -741,15 +746,6 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
inode->i_mapping->a_ops = &hugetlbfs_aops;
inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
inode->i_mapping->private_data = resv_map;
- info = HUGETLBFS_I(inode);
- /*
- * The policy is initialized here even if we are creating a
- * private inode because initialization simply creates an
- * an empty rb tree and calls rwlock_init(), later when we
- * call mpol_free_shared_policy() it will just return because
- * the rb tree will still be empty.
- */
- mpol_shared_policy_init(&info->policy, NULL);
switch (mode & S_IFMT) {
default:
init_special_inode(inode, mode, dev);
@@ -855,6 +851,56 @@ static int hugetlbfs_migrate_page(struct address_space *mapping,
return MIGRATEPAGE_SUCCESS;
}
+static int hugetlbfs_error_remove_page(struct address_space *mapping,
+ struct page *page)
+{
+ struct inode *inode = mapping->host;
+
+ remove_huge_page(page);
+ hugetlb_fix_reserve_counts(inode);
+ return 0;
+}
+
+/*
+ * Display the mount options in /proc/mounts.
+ */
+static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root)
+{
+ struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb);
+ struct hugepage_subpool *spool = sbinfo->spool;
+ unsigned long hpage_size = huge_page_size(sbinfo->hstate);
+ unsigned hpage_shift = huge_page_shift(sbinfo->hstate);
+ char mod;
+
+ if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
+ seq_printf(m, ",uid=%u",
+ from_kuid_munged(&init_user_ns, sbinfo->uid));
+ if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
+ seq_printf(m, ",gid=%u",
+ from_kgid_munged(&init_user_ns, sbinfo->gid));
+ if (sbinfo->mode != 0755)
+ seq_printf(m, ",mode=%o", sbinfo->mode);
+ if (sbinfo->max_inodes != -1)
+ seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes);
+
+ hpage_size /= 1024;
+ mod = 'K';
+ if (hpage_size >= 1024) {
+ hpage_size /= 1024;
+ mod = 'M';
+ }
+ seq_printf(m, ",pagesize=%lu%c", hpage_size, mod);
+ if (spool) {
+ if (spool->max_hpages != -1)
+ seq_printf(m, ",size=%llu",
+ (unsigned long long)spool->max_hpages << hpage_shift);
+ if (spool->min_hpages != -1)
+ seq_printf(m, ",min_size=%llu",
+ (unsigned long long)spool->min_hpages << hpage_shift);
+ }
+ return 0;
+}
+
static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
@@ -937,6 +983,18 @@ static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
hugetlbfs_inc_free_inodes(sbinfo);
return NULL;
}
+
+ /*
+ * Any time after allocation, hugetlbfs_destroy_inode can be called
+ * for the inode. mpol_free_shared_policy is unconditionally called
+ * as part of hugetlbfs_destroy_inode. So, initialize policy here
+ * in case of a quick call to destroy.
+ *
+ * Note that the policy is initialized even if we are creating a
+ * private inode. This simplifies hugetlbfs_destroy_inode.
+ */
+ mpol_shared_policy_init(&p->policy, NULL);
+
return &p->vfs_inode;
}
@@ -958,6 +1016,7 @@ static const struct address_space_operations hugetlbfs_aops = {
.write_end = hugetlbfs_write_end,
.set_page_dirty = hugetlbfs_set_page_dirty,
.migratepage = hugetlbfs_migrate_page,
+ .error_remove_page = hugetlbfs_error_remove_page,
};
@@ -1000,19 +1059,19 @@ static const struct super_operations hugetlbfs_ops = {
.evict_inode = hugetlbfs_evict_inode,
.statfs = hugetlbfs_statfs,
.put_super = hugetlbfs_put_super,
- .show_options = generic_show_options,
+ .show_options = hugetlbfs_show_options,
};
-enum { NO_SIZE, SIZE_STD, SIZE_PERCENT };
+enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT };
/*
* Convert size option passed from command line to number of huge pages
* in the pool specified by hstate. Size option could be in bytes
* (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT).
*/
-static long long
+static long
hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt,
- int val_type)
+ enum hugetlbfs_size_type val_type)
{
if (val_type == NO_SIZE)
return -1;
@@ -1034,7 +1093,7 @@ hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig)
substring_t args[MAX_OPT_ARGS];
int option;
unsigned long long max_size_opt = 0, min_size_opt = 0;
- int max_val_type = NO_SIZE, min_val_type = NO_SIZE;
+ enum hugetlbfs_size_type max_val_type = NO_SIZE, min_val_type = NO_SIZE;
if (!options)
return 0;
@@ -1148,8 +1207,6 @@ hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
struct hugetlbfs_config config;
struct hugetlbfs_sb_info *sbinfo;
- save_mount_options(sb, data);
-
config.max_hpages = -1; /* No limit on size by default */
config.nr_inodes = -1; /* No limit on number of inodes by default */
config.uid = current_fsuid();
@@ -1170,6 +1227,10 @@ hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
sbinfo->max_inodes = config.nr_inodes;
sbinfo->free_inodes = config.nr_inodes;
sbinfo->spool = NULL;
+ sbinfo->uid = config.uid;
+ sbinfo->gid = config.gid;
+ sbinfo->mode = config.mode;
+
/*
* Allocate and initialize subpool if maximum or minimum size is
* specified. Any needed reservations (for minimim size) are taken