aboutsummaryrefslogtreecommitdiff
path: root/fs/ceph
diff options
context:
space:
mode:
Diffstat (limited to 'fs/ceph')
-rw-r--r--fs/ceph/addr.c22
-rw-r--r--fs/ceph/caps.c63
-rw-r--r--fs/ceph/crypto.h2
-rw-r--r--fs/ceph/debugfs.c2
-rw-r--r--fs/ceph/dir.c4
-rw-r--r--fs/ceph/export.c12
-rw-r--r--fs/ceph/file.c77
-rw-r--r--fs/ceph/inode.c2
-rw-r--r--fs/ceph/mds_client.c36
-rw-r--r--fs/ceph/mds_client.h2
-rw-r--r--fs/ceph/super.c14
-rw-r--r--fs/ceph/super.h3
-rw-r--r--fs/ceph/xattr.c2
13 files changed, 116 insertions, 125 deletions
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index c2a9e2cc03de..85936f6d2bf7 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -1054,7 +1054,9 @@ get_more_pages:
if (!nr_folios && !locked_pages)
break;
for (i = 0; i < nr_folios && locked_pages < max_pages; i++) {
- page = &fbatch.folios[i]->page;
+ struct folio *folio = fbatch.folios[i];
+
+ page = &folio->page;
doutc(cl, "? %p idx %lu\n", page, page->index);
if (locked_pages == 0)
lock_page(page); /* first page */
@@ -1081,8 +1083,6 @@ get_more_pages:
continue;
}
if (page_offset(page) >= ceph_wbc.i_size) {
- struct folio *folio = page_folio(page);
-
doutc(cl, "folio at %lu beyond eof %llu\n",
folio->index, ceph_wbc.i_size);
if ((ceph_wbc.size_stable ||
@@ -1098,16 +1098,16 @@ get_more_pages:
unlock_page(page);
break;
}
- if (PageWriteback(page) ||
- PagePrivate2(page) /* [DEPRECATED] */) {
+ if (folio_test_writeback(folio) ||
+ folio_test_private_2(folio) /* [DEPRECATED] */) {
if (wbc->sync_mode == WB_SYNC_NONE) {
- doutc(cl, "%p under writeback\n", page);
- unlock_page(page);
+ doutc(cl, "%p under writeback\n", folio);
+ folio_unlock(folio);
continue;
}
- doutc(cl, "waiting on writeback %p\n", page);
- wait_on_page_writeback(page);
- folio_wait_private_2(page_folio(page)); /* [DEPRECATED] */
+ doutc(cl, "waiting on writeback %p\n", folio);
+ folio_wait_writeback(folio);
+ folio_wait_private_2(folio); /* [DEPRECATED] */
}
if (!clear_page_dirty_for_io(page)) {
@@ -2195,7 +2195,7 @@ int ceph_pool_perm_check(struct inode *inode, int need)
if (ci->i_vino.snap != CEPH_NOSNAP) {
/*
* Pool permission check needs to write to the first object.
- * But for snapshot, head of the first object may have alread
+ * But for snapshot, head of the first object may have already
* been deleted. Skip check to avoid creating orphan object.
*/
return 0;
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index bed34fc11c91..a8d8b56cf9d2 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -978,20 +978,6 @@ int __ceph_caps_revoking_other(struct ceph_inode_info *ci,
return 0;
}
-int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
-{
- struct inode *inode = &ci->netfs.inode;
- struct ceph_client *cl = ceph_inode_to_client(inode);
- int ret;
-
- spin_lock(&ci->i_ceph_lock);
- ret = __ceph_caps_revoking_other(ci, NULL, mask);
- spin_unlock(&ci->i_ceph_lock);
- doutc(cl, "%p %llx.%llx %s = %d\n", inode, ceph_vinop(inode),
- ceph_cap_string(mask), ret);
- return ret;
-}
-
int __ceph_caps_used(struct ceph_inode_info *ci)
{
int used = 0;
@@ -2813,7 +2799,7 @@ void ceph_take_cap_refs(struct ceph_inode_info *ci, int got,
* requested from the MDS.
*
* Returns 0 if caps were not able to be acquired (yet), 1 if succeed,
- * or a negative error code. There are 3 speical error codes:
+ * or a negative error code. There are 3 special error codes:
* -EAGAIN: need to sleep but non-blocking is specified
* -EFBIG: ask caller to call check_max_size() and try again.
* -EUCLEAN: ask caller to call ceph_renew_caps() and try again.
@@ -4085,23 +4071,22 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
struct ceph_cap *cap, *tcap, *new_cap = NULL;
struct ceph_inode_info *ci = ceph_inode(inode);
u64 t_cap_id;
- unsigned mseq = le32_to_cpu(ex->migrate_seq);
- unsigned t_seq, t_mseq;
+ u32 t_issue_seq, t_mseq;
int target, issued;
int mds = session->s_mds;
if (ph) {
t_cap_id = le64_to_cpu(ph->cap_id);
- t_seq = le32_to_cpu(ph->seq);
+ t_issue_seq = le32_to_cpu(ph->issue_seq);
t_mseq = le32_to_cpu(ph->mseq);
target = le32_to_cpu(ph->mds);
} else {
- t_cap_id = t_seq = t_mseq = 0;
+ t_cap_id = t_issue_seq = t_mseq = 0;
target = -1;
}
- doutc(cl, "%p %llx.%llx ci %p mds%d mseq %d target %d\n",
- inode, ceph_vinop(inode), ci, mds, mseq, target);
+ doutc(cl, " cap %llx.%llx export to peer %d piseq %u pmseq %u\n",
+ ceph_vinop(inode), target, t_issue_seq, t_mseq);
retry:
down_read(&mdsc->snap_rwsem);
spin_lock(&ci->i_ceph_lock);
@@ -4134,12 +4119,12 @@ retry:
if (tcap) {
/* already have caps from the target */
if (tcap->cap_id == t_cap_id &&
- ceph_seq_cmp(tcap->seq, t_seq) < 0) {
+ ceph_seq_cmp(tcap->seq, t_issue_seq) < 0) {
doutc(cl, " updating import cap %p mds%d\n", tcap,
target);
tcap->cap_id = t_cap_id;
- tcap->seq = t_seq - 1;
- tcap->issue_seq = t_seq - 1;
+ tcap->seq = t_issue_seq - 1;
+ tcap->issue_seq = t_issue_seq - 1;
tcap->issued |= issued;
tcap->implemented |= issued;
if (cap == ci->i_auth_cap) {
@@ -4154,7 +4139,7 @@ retry:
int flag = (cap == ci->i_auth_cap) ? CEPH_CAP_FLAG_AUTH : 0;
tcap = new_cap;
ceph_add_cap(inode, tsession, t_cap_id, issued, 0,
- t_seq - 1, t_mseq, (u64)-1, flag, &new_cap);
+ t_issue_seq - 1, t_mseq, (u64)-1, flag, &new_cap);
if (!list_empty(&ci->i_cap_flush_list) &&
ci->i_auth_cap == tcap) {
@@ -4228,18 +4213,22 @@ static void handle_cap_import(struct ceph_mds_client *mdsc,
u64 realmino = le64_to_cpu(im->realm);
u64 cap_id = le64_to_cpu(im->cap_id);
u64 p_cap_id;
+ u32 piseq = 0;
+ u32 pmseq = 0;
int peer;
if (ph) {
p_cap_id = le64_to_cpu(ph->cap_id);
peer = le32_to_cpu(ph->mds);
+ piseq = le32_to_cpu(ph->issue_seq);
+ pmseq = le32_to_cpu(ph->mseq);
} else {
p_cap_id = 0;
peer = -1;
}
- doutc(cl, "%p %llx.%llx ci %p mds%d mseq %d peer %d\n",
- inode, ceph_vinop(inode), ci, mds, mseq, peer);
+ doutc(cl, " cap %llx.%llx import from peer %d piseq %u pmseq %u\n",
+ ceph_vinop(inode), peer, piseq, pmseq);
retry:
cap = __get_cap_for_mds(ci, mds);
if (!cap) {
@@ -4268,15 +4257,13 @@ retry:
doutc(cl, " remove export cap %p mds%d flags %d\n",
ocap, peer, ph->flags);
if ((ph->flags & CEPH_CAP_FLAG_AUTH) &&
- (ocap->seq != le32_to_cpu(ph->seq) ||
- ocap->mseq != le32_to_cpu(ph->mseq))) {
+ (ocap->seq != piseq ||
+ ocap->mseq != pmseq)) {
pr_err_ratelimited_client(cl, "mismatched seq/mseq: "
"%p %llx.%llx mds%d seq %d mseq %d"
" importer mds%d has peer seq %d mseq %d\n",
inode, ceph_vinop(inode), peer,
- ocap->seq, ocap->mseq, mds,
- le32_to_cpu(ph->seq),
- le32_to_cpu(ph->mseq));
+ ocap->seq, ocap->mseq, mds, piseq, pmseq);
}
ceph_remove_cap(mdsc, ocap, (ph->flags & CEPH_CAP_FLAG_RELEASE));
}
@@ -4350,7 +4337,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
struct ceph_snap_realm *realm = NULL;
int op;
int msg_version = le16_to_cpu(msg->hdr.version);
- u32 seq, mseq;
+ u32 seq, mseq, issue_seq;
struct ceph_vino vino;
void *snaptrace;
size_t snaptrace_len;
@@ -4360,8 +4347,6 @@ void ceph_handle_caps(struct ceph_mds_session *session,
bool close_sessions = false;
bool do_cap_release = false;
- doutc(cl, "from mds%d\n", session->s_mds);
-
if (!ceph_inc_mds_stopping_blocker(mdsc, session))
return;
@@ -4375,6 +4360,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
vino.snap = CEPH_NOSNAP;
seq = le32_to_cpu(h->seq);
mseq = le32_to_cpu(h->migrate_seq);
+ issue_seq = le32_to_cpu(h->issue_seq);
snaptrace = h + 1;
snaptrace_len = le32_to_cpu(h->snap_trace_len);
@@ -4462,12 +4448,11 @@ void ceph_handle_caps(struct ceph_mds_session *session,
/* lookup ino */
inode = ceph_find_inode(mdsc->fsc->sb, vino);
- doutc(cl, " op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op),
- vino.ino, vino.snap, inode);
+ doutc(cl, " caps mds%d op %s ino %llx.%llx inode %p seq %u iseq %u mseq %u\n",
+ session->s_mds, ceph_cap_op_name(op), vino.ino, vino.snap, inode,
+ seq, issue_seq, mseq);
mutex_lock(&session->s_mutex);
- doutc(cl, " mds%d seq %lld cap seq %u\n", session->s_mds,
- session->s_seq, (unsigned)seq);
if (!inode) {
doutc(cl, " i don't have ino %llx\n", vino.ino);
diff --git a/fs/ceph/crypto.h b/fs/ceph/crypto.h
index 47e0c319fc68..d0768239a1c9 100644
--- a/fs/ceph/crypto.h
+++ b/fs/ceph/crypto.h
@@ -27,7 +27,7 @@ struct ceph_fname {
};
/*
- * Header for the crypted file when truncating the size, this
+ * Header for the encrypted file when truncating the size, this
* will be sent to MDS, and the MDS will update the encrypted
* last block and then truncate the size.
*/
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index 24c08078f5aa..fdf9dc15eafa 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -357,7 +357,7 @@ static int status_show(struct seq_file *s, void *p)
seq_printf(s, "instance: %s.%lld %s/%u\n", ENTITY_NAME(inst->name),
ceph_pr_addr(client_addr), le32_to_cpu(client_addr->nonce));
- seq_printf(s, "blocklisted: %s\n", fsc->blocklisted ? "true" : "false");
+ seq_printf(s, "blocklisted: %s\n", str_true_false(fsc->blocklisted));
return 0;
}
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 952109292d69..0bf388e07a02 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -207,7 +207,7 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
dentry = __dcache_find_get_entry(parent, idx + step,
&cache_ctl);
if (!dentry) {
- /* use linar search */
+ /* use linear search */
idx = 0;
break;
}
@@ -659,7 +659,7 @@ static bool need_reset_readdir(struct ceph_dir_file_info *dfi, loff_t new_pos)
return true;
if (is_hash_order(new_pos)) {
/* no need to reset last_name for a forward seek when
- * dentries are sotred in hash order */
+ * dentries are sorted in hash order */
} else if (dfi->frag != fpos_frag(new_pos)) {
return true;
}
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index 44451749c544..150076ced937 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -393,9 +393,9 @@ static struct dentry *ceph_get_parent(struct dentry *child)
}
dir = snapdir;
}
- /* If directory has already been deleted, futher get_parent
+ /* If directory has already been deleted, further get_parent
* will fail. Do not mark snapdir dentry as disconnected,
- * this prevent exportfs from doing futher get_parent. */
+ * this prevents exportfs from doing further get_parent. */
if (unlinked)
dn = d_obtain_root(dir);
else
@@ -452,7 +452,13 @@ static int __get_snap_name(struct dentry *parent, char *name,
goto out;
if (ceph_snap(inode) == CEPH_SNAPDIR) {
if (ceph_snap(dir) == CEPH_NOSNAP) {
- strcpy(name, fsc->mount_options->snapdir_name);
+ /*
+ * .get_name() from struct export_operations
+ * assumes that its 'name' parameter is pointing
+ * to a NAME_MAX+1 sized buffer
+ */
+ strscpy(name, fsc->mount_options->snapdir_name,
+ NAME_MAX + 1);
err = 0;
}
goto out;
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 4b8d59ebda00..851d70200c6b 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -1066,7 +1066,7 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
if (ceph_inode_is_shutdown(inode))
return -EIO;
- if (!len)
+ if (!len || !i_size)
return 0;
/*
* flush any page cache pages in this range. this
@@ -1086,7 +1086,7 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
int num_pages;
size_t page_off;
bool more;
- int idx;
+ int idx = 0;
size_t left;
struct ceph_osd_req_op *op;
u64 read_off = off;
@@ -1116,6 +1116,16 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
len = read_off + read_len - off;
more = len < iov_iter_count(to);
+ op = &req->r_ops[0];
+ if (sparse) {
+ extent_cnt = __ceph_sparse_read_ext_count(inode, read_len);
+ ret = ceph_alloc_sparse_ext_map(op, extent_cnt);
+ if (ret) {
+ ceph_osdc_put_request(req);
+ break;
+ }
+ }
+
num_pages = calc_pages_for(read_off, read_len);
page_off = offset_in_page(off);
pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
@@ -1127,17 +1137,7 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
osd_req_op_extent_osd_data_pages(req, 0, pages, read_len,
offset_in_page(read_off),
- false, false);
-
- op = &req->r_ops[0];
- if (sparse) {
- extent_cnt = __ceph_sparse_read_ext_count(inode, read_len);
- ret = ceph_alloc_sparse_ext_map(op, extent_cnt);
- if (ret) {
- ceph_osdc_put_request(req);
- break;
- }
- }
+ false, true);
ceph_osdc_start_request(osdc, req);
ret = ceph_osdc_wait_request(osdc, req);
@@ -1160,7 +1160,14 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
else if (ret == -ENOENT)
ret = 0;
- if (ret > 0 && IS_ENCRYPTED(inode)) {
+ if (ret < 0) {
+ ceph_osdc_put_request(req);
+ if (ret == -EBLOCKLISTED)
+ fsc->blocklisted = true;
+ break;
+ }
+
+ if (IS_ENCRYPTED(inode)) {
int fret;
fret = ceph_fscrypt_decrypt_extents(inode, pages,
@@ -1186,10 +1193,8 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
ret = min_t(ssize_t, fret, len);
}
- ceph_osdc_put_request(req);
-
/* Short read but not EOF? Zero out the remainder. */
- if (ret >= 0 && ret < len && (off + ret < i_size)) {
+ if (ret < len && (off + ret < i_size)) {
int zlen = min(len - ret, i_size - off - ret);
int zoff = page_off + ret;
@@ -1199,13 +1204,11 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
ret += zlen;
}
- idx = 0;
- if (ret <= 0)
- left = 0;
- else if (off + ret > i_size)
- left = i_size - off;
+ if (off + ret > i_size)
+ left = (i_size > off) ? i_size - off : 0;
else
left = ret;
+
while (left > 0) {
size_t plen, copied;
@@ -1221,13 +1224,8 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
break;
}
}
- ceph_release_page_vector(pages, num_pages);
- if (ret < 0) {
- if (ret == -EBLOCKLISTED)
- fsc->blocklisted = true;
- break;
- }
+ ceph_osdc_put_request(req);
if (off >= i_size || !more)
break;
@@ -1553,6 +1551,16 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
break;
}
+ op = &req->r_ops[0];
+ if (!write && sparse) {
+ extent_cnt = __ceph_sparse_read_ext_count(inode, size);
+ ret = ceph_alloc_sparse_ext_map(op, extent_cnt);
+ if (ret) {
+ ceph_osdc_put_request(req);
+ break;
+ }
+ }
+
len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages);
if (len < 0) {
ceph_osdc_put_request(req);
@@ -1562,6 +1570,8 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
if (len != size)
osd_req_op_extent_update(req, 0, len);
+ osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
+
/*
* To simplify error handling, allow AIO when IO within i_size
* or IO can be satisfied by single OSD request.
@@ -1593,17 +1603,6 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
req->r_mtime = mtime;
}
- osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
- op = &req->r_ops[0];
- if (sparse) {
- extent_cnt = __ceph_sparse_read_ext_count(inode, size);
- ret = ceph_alloc_sparse_ext_map(op, extent_cnt);
- if (ret) {
- ceph_osdc_put_request(req);
- break;
- }
- }
-
if (aio_req) {
aio_req->total_len += len;
aio_req->num_reqs++;
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 315ef02f9a3f..7dd6c2275085 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -160,7 +160,7 @@ struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino,
}
/*
- * get/constuct snapdir inode for a given directory
+ * get/construct snapdir inode for a given directory
*/
struct inode *ceph_get_snapdir(struct inode *parent)
{
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index c4a5fd94bbbb..785fe489ef4b 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -827,7 +827,7 @@ static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info)
* And the worst case is that for the none async openc request it will
* successfully open the file if the CDentry hasn't been unlinked yet,
* but later the previous delayed async unlink request will remove the
- * CDenty. That means the just created file is possiblly deleted later
+ * CDentry. That means the just created file is possibly deleted later
* by accident.
*
* We need to wait for the inflight async unlink requests to finish
@@ -1747,14 +1747,6 @@ static void __open_export_target_sessions(struct ceph_mds_client *mdsc,
}
}
-void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc,
- struct ceph_mds_session *session)
-{
- mutex_lock(&mdsc->mutex);
- __open_export_target_sessions(mdsc, session);
- mutex_unlock(&mdsc->mutex);
-}
-
/*
* session caps
*/
@@ -2362,7 +2354,7 @@ again:
item->ino = cpu_to_le64(cap->cap_ino);
item->cap_id = cpu_to_le64(cap->cap_id);
item->migrate_seq = cpu_to_le32(cap->mseq);
- item->seq = cpu_to_le32(cap->issue_seq);
+ item->issue_seq = cpu_to_le32(cap->issue_seq);
msg->front.iov_len += sizeof(*item);
ceph_put_cap(mdsc, cap);
@@ -2808,12 +2800,11 @@ retry:
if (pos < 0) {
/*
- * A rename didn't occur, but somehow we didn't end up where
- * we thought we would. Throw a warning and try again.
+ * The path is longer than PATH_MAX and this function
+ * cannot ever succeed. Creating paths that long is
+ * possible with Ceph, but Linux cannot use them.
*/
- pr_warn_client(cl, "did not end path lookup where expected (pos = %d)\n",
- pos);
- goto retry;
+ return ERR_PTR(-ENAMETOOLONG);
}
*pbase = base;
@@ -3269,7 +3260,7 @@ static int __prepare_send_request(struct ceph_mds_session *session,
&session->s_features);
/*
- * Avoid inifinite retrying after overflow. The client will
+ * Avoid infinite retrying after overflow. The client will
* increase the retry count and if the MDS is old version,
* so we limit to retry at most 256 times.
*/
@@ -3522,7 +3513,7 @@ static void __do_request(struct ceph_mds_client *mdsc,
/*
* For async create we will choose the auth MDS of frag in parent
- * directory to send the request and ususally this works fine, but
+ * directory to send the request and usually this works fine, but
* if the migrated the dirtory to another MDS before it could handle
* it the request will be forwarded.
*
@@ -4033,7 +4024,7 @@ static void handle_forward(struct ceph_mds_client *mdsc,
__unregister_request(mdsc, req);
} else if (fwd_seq <= req->r_num_fwd || (uint32_t)fwd_seq >= U32_MAX) {
/*
- * Avoid inifinite retrying after overflow.
+ * Avoid infinite retrying after overflow.
*
* The MDS will increase the fwd count and in client side
* if the num_fwd is less than the one saved in request
@@ -5609,9 +5600,9 @@ void send_flush_mdlog(struct ceph_mds_session *s)
static int ceph_mds_auth_match(struct ceph_mds_client *mdsc,
struct ceph_mds_cap_auth *auth,
+ const struct cred *cred,
char *tpath)
{
- const struct cred *cred = get_current_cred();
u32 caller_uid = from_kuid(&init_user_ns, cred->fsuid);
u32 caller_gid = from_kgid(&init_user_ns, cred->fsgid);
struct ceph_client *cl = mdsc->fsc->client;
@@ -5734,11 +5725,12 @@ int ceph_mds_check_access(struct ceph_mds_client *mdsc, char *tpath, int mask)
for (i = 0; i < mdsc->s_cap_auths_num; i++) {
struct ceph_mds_cap_auth *s = &mdsc->s_cap_auths[i];
- err = ceph_mds_auth_match(mdsc, s, tpath);
+ err = ceph_mds_auth_match(mdsc, s, cred, tpath);
if (err < 0) {
+ put_cred(cred);
return err;
} else if (err > 0) {
- /* always follow the last auth caps' permision */
+ /* always follow the last auth caps' permission */
root_squash_perms = true;
rw_perms_s = NULL;
if ((mask & MAY_WRITE) && s->writeable &&
@@ -5751,6 +5743,8 @@ int ceph_mds_check_access(struct ceph_mds_client *mdsc, char *tpath, int mask)
}
}
+ put_cred(cred);
+
doutc(cl, "root_squash_perms %d, rw_perms_s %p\n", root_squash_perms,
rw_perms_s);
if (root_squash_perms && rw_perms_s == NULL) {
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 3dd54587944a..38bb7e0d2d79 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -634,8 +634,6 @@ extern void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc,
extern struct ceph_mds_session *
ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target);
-extern void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc,
- struct ceph_mds_session *session);
extern int ceph_trim_caps(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session,
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 73f321b52895..4344e1f11806 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -285,8 +285,10 @@ static int ceph_parse_new_source(const char *dev_name, const char *dev_name_end,
size_t len;
struct ceph_fsid fsid;
struct ceph_parse_opts_ctx *pctx = fc->fs_private;
+ struct ceph_options *opts = pctx->copts;
struct ceph_mount_options *fsopt = pctx->opts;
- char *fsid_start, *fs_name_start;
+ const char *name_start = dev_name;
+ const char *fsid_start, *fs_name_start;
if (*dev_name_end != '=') {
dout("separator '=' missing in source");
@@ -296,8 +298,14 @@ static int ceph_parse_new_source(const char *dev_name, const char *dev_name_end,
fsid_start = strchr(dev_name, '@');
if (!fsid_start)
return invalfc(fc, "missing cluster fsid");
- ++fsid_start; /* start of cluster fsid */
+ len = fsid_start - name_start;
+ kfree(opts->name);
+ opts->name = kstrndup(name_start, len, GFP_KERNEL);
+ if (!opts->name)
+ return -ENOMEM;
+ dout("using %s entity name", opts->name);
+ ++fsid_start; /* start of cluster fsid */
fs_name_start = strchr(fsid_start, '.');
if (!fs_name_start)
return invalfc(fc, "missing file system name");
@@ -423,6 +431,8 @@ static int ceph_parse_mount_param(struct fs_context *fc,
switch (token) {
case Opt_snapdirname:
+ if (strlen(param->string) > NAME_MAX)
+ return invalfc(fc, "snapdirname too long");
kfree(fsopt->snapdir_name);
fsopt->snapdir_name = param->string;
param->string = NULL;
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 037eac35a9e0..af14ec382246 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -60,7 +60,7 @@
/* max size of osd read request, limited by libceph */
#define CEPH_MAX_READ_SIZE CEPH_MSG_MAX_DATA_LEN
-/* osd has a configurable limitaion of max write size.
+/* osd has a configurable limitation of max write size.
* CEPH_MSG_MAX_DATA_LEN should be small enough. */
#define CEPH_MAX_WRITE_SIZE CEPH_MSG_MAX_DATA_LEN
#define CEPH_RASIZE_DEFAULT (8192*1024) /* max readahead */
@@ -796,7 +796,6 @@ extern int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
extern int __ceph_caps_revoking_other(struct ceph_inode_info *ci,
struct ceph_cap *ocap, int mask);
-extern int ceph_caps_revoking(struct ceph_inode_info *ci, int mask);
extern int __ceph_caps_used(struct ceph_inode_info *ci);
static inline bool __ceph_is_file_opened(struct ceph_inode_info *ci)
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index e066a556eccb..1a9f12204666 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -899,7 +899,7 @@ static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
}
/*
- * If there are dirty xattrs, reencode xattrs into the prealloc_blob
+ * If there are dirty xattrs, re-encode xattrs into the prealloc_blob
* and swap into place. It returns the old i_xattrs.blob (or NULL) so
* that it can be freed by the caller as the i_ceph_lock is likely to be
* held.