Lines Matching +full:foo +full:- +full:queue
1 // SPDX-License-Identifier: GPL-2.0
28 #define RECONNECT_MAX_SIZE (INT_MAX - PAGE_SIZE)
90 ceph_decode_64_safe(p, end, info->max_bytes, bad); in parse_reply_info_quota()
91 ceph_decode_64_safe(p, end, info->max_files, bad); in parse_reply_info_quota()
95 return -EIO; in parse_reply_info_quota()
108 if (features == (u64)-1) { in parse_reply_info_in()
123 info->in = *p; in parse_reply_info_in()
125 sizeof(*info->in->fragtree.splits) * in parse_reply_info_in()
126 le32_to_cpu(info->in->fragtree.nsplits); in parse_reply_info_in()
128 ceph_decode_32_safe(p, end, info->symlink_len, bad); in parse_reply_info_in()
129 ceph_decode_need(p, end, info->symlink_len, bad); in parse_reply_info_in()
130 info->symlink = *p; in parse_reply_info_in()
131 *p += info->symlink_len; in parse_reply_info_in()
133 ceph_decode_copy_safe(p, end, &info->dir_layout, in parse_reply_info_in()
134 sizeof(info->dir_layout), bad); in parse_reply_info_in()
135 ceph_decode_32_safe(p, end, info->xattr_len, bad); in parse_reply_info_in()
136 ceph_decode_need(p, end, info->xattr_len, bad); in parse_reply_info_in()
137 info->xattr_data = *p; in parse_reply_info_in()
138 *p += info->xattr_len; in parse_reply_info_in()
140 if (features == (u64)-1) { in parse_reply_info_in()
142 ceph_decode_64_safe(p, end, info->inline_version, bad); in parse_reply_info_in()
143 ceph_decode_32_safe(p, end, info->inline_len, bad); in parse_reply_info_in()
144 ceph_decode_need(p, end, info->inline_len, bad); in parse_reply_info_in()
145 info->inline_data = *p; in parse_reply_info_in()
146 *p += info->inline_len; in parse_reply_info_in()
152 ceph_decode_32_safe(p, end, info->pool_ns_len, bad); in parse_reply_info_in()
153 if (info->pool_ns_len > 0) { in parse_reply_info_in()
154 ceph_decode_need(p, end, info->pool_ns_len, bad); in parse_reply_info_in()
155 info->pool_ns_data = *p; in parse_reply_info_in()
156 *p += info->pool_ns_len; in parse_reply_info_in()
160 ceph_decode_need(p, end, sizeof(info->btime), bad); in parse_reply_info_in()
161 ceph_decode_copy(p, &info->btime, sizeof(info->btime)); in parse_reply_info_in()
164 ceph_decode_64_safe(p, end, info->change_attr, bad); in parse_reply_info_in()
168 ceph_decode_32_safe(p, end, info->dir_pin, bad); in parse_reply_info_in()
170 info->dir_pin = -ENODATA; in parse_reply_info_in()
175 ceph_decode_need(p, end, sizeof(info->snap_btime), bad); in parse_reply_info_in()
176 ceph_decode_copy(p, &info->snap_btime, in parse_reply_info_in()
177 sizeof(info->snap_btime)); in parse_reply_info_in()
179 memset(&info->snap_btime, 0, sizeof(info->snap_btime)); in parse_reply_info_in()
184 ceph_decode_64_safe(p, end, info->rsnaps, bad); in parse_reply_info_in()
186 info->rsnaps = 0; in parse_reply_info_in()
194 while (alen--) { in parse_reply_info_in()
206 /* fscrypt flag -- ignore */ in parse_reply_info_in()
210 info->fscrypt_auth = NULL; in parse_reply_info_in()
211 info->fscrypt_auth_len = 0; in parse_reply_info_in()
212 info->fscrypt_file = NULL; in parse_reply_info_in()
213 info->fscrypt_file_len = 0; in parse_reply_info_in()
215 ceph_decode_32_safe(p, end, info->fscrypt_auth_len, bad); in parse_reply_info_in()
216 if (info->fscrypt_auth_len) { in parse_reply_info_in()
217 info->fscrypt_auth = kmalloc(info->fscrypt_auth_len, in parse_reply_info_in()
219 if (!info->fscrypt_auth) in parse_reply_info_in()
220 return -ENOMEM; in parse_reply_info_in()
221 ceph_decode_copy_safe(p, end, info->fscrypt_auth, in parse_reply_info_in()
222 info->fscrypt_auth_len, bad); in parse_reply_info_in()
224 ceph_decode_32_safe(p, end, info->fscrypt_file_len, bad); in parse_reply_info_in()
225 if (info->fscrypt_file_len) { in parse_reply_info_in()
226 info->fscrypt_file = kmalloc(info->fscrypt_file_len, in parse_reply_info_in()
228 if (!info->fscrypt_file) in parse_reply_info_in()
229 return -ENOMEM; in parse_reply_info_in()
230 ceph_decode_copy_safe(p, end, info->fscrypt_file, in parse_reply_info_in()
231 info->fscrypt_file_len, bad); in parse_reply_info_in()
238 ceph_decode_64_safe(p, end, info->inline_version, bad); in parse_reply_info_in()
239 ceph_decode_32_safe(p, end, info->inline_len, bad); in parse_reply_info_in()
240 ceph_decode_need(p, end, info->inline_len, bad); in parse_reply_info_in()
241 info->inline_data = *p; in parse_reply_info_in()
242 *p += info->inline_len; in parse_reply_info_in()
244 info->inline_version = CEPH_INLINE_NONE; in parse_reply_info_in()
251 info->max_bytes = 0; in parse_reply_info_in()
252 info->max_files = 0; in parse_reply_info_in()
255 info->pool_ns_len = 0; in parse_reply_info_in()
256 info->pool_ns_data = NULL; in parse_reply_info_in()
258 ceph_decode_32_safe(p, end, info->pool_ns_len, bad); in parse_reply_info_in()
259 if (info->pool_ns_len > 0) { in parse_reply_info_in()
260 ceph_decode_need(p, end, info->pool_ns_len, bad); in parse_reply_info_in()
261 info->pool_ns_data = *p; in parse_reply_info_in()
262 *p += info->pool_ns_len; in parse_reply_info_in()
267 ceph_decode_need(p, end, sizeof(info->btime), bad); in parse_reply_info_in()
268 ceph_decode_copy(p, &info->btime, sizeof(info->btime)); in parse_reply_info_in()
269 ceph_decode_64_safe(p, end, info->change_attr, bad); in parse_reply_info_in()
272 info->dir_pin = -ENODATA; in parse_reply_info_in()
273 /* info->snap_btime and info->rsnaps remain zero */ in parse_reply_info_in()
277 err = -EIO; in parse_reply_info_in()
286 if (features == (u64)-1) { in parse_reply_info_dir()
302 *p += sizeof(**dirfrag) + sizeof(u32) * le32_to_cpu((*dirfrag)->ndist); in parse_reply_info_dir()
305 if (features == (u64)-1) in parse_reply_info_dir()
309 return -EIO; in parse_reply_info_dir()
320 if (features == (u64)-1) { in parse_reply_info_lease()
343 if (features == (u64)-1) { in parse_reply_info_lease()
357 return -EIO; in parse_reply_info_lease()
370 if (info->head->is_dentry) { in parse_reply_info_trace()
371 err = parse_reply_info_in(p, end, &info->diri, features); in parse_reply_info_trace()
375 err = parse_reply_info_dir(p, end, &info->dirfrag, features); in parse_reply_info_trace()
379 ceph_decode_32_safe(p, end, info->dname_len, bad); in parse_reply_info_trace()
380 ceph_decode_need(p, end, info->dname_len, bad); in parse_reply_info_trace()
381 info->dname = *p; in parse_reply_info_trace()
382 *p += info->dname_len; in parse_reply_info_trace()
384 err = parse_reply_info_lease(p, end, &info->dlease, features, in parse_reply_info_trace()
385 &info->altname_len, &info->altname); in parse_reply_info_trace()
390 if (info->head->is_target) { in parse_reply_info_trace()
391 err = parse_reply_info_in(p, end, &info->targeti, features); in parse_reply_info_trace()
401 err = -EIO; in parse_reply_info_trace()
414 struct ceph_mds_reply_info_parsed *info = &req->r_reply_info; in parse_reply_info_readdir()
415 struct ceph_client *cl = req->r_mdsc->fsc->client; in parse_reply_info_readdir()
419 err = parse_reply_info_dir(p, end, &info->dir_dir, features); in parse_reply_info_readdir()
427 info->dir_end = !!(flags & CEPH_READDIR_FRAG_END); in parse_reply_info_readdir()
428 info->dir_complete = !!(flags & CEPH_READDIR_FRAG_COMPLETE); in parse_reply_info_readdir()
429 info->hash_order = !!(flags & CEPH_READDIR_HASH_ORDER); in parse_reply_info_readdir()
430 info->offset_hash = !!(flags & CEPH_READDIR_OFFSET_HASH); in parse_reply_info_readdir()
435 BUG_ON(!info->dir_entries); in parse_reply_info_readdir()
436 if ((unsigned long)(info->dir_entries + num) > in parse_reply_info_readdir()
437 (unsigned long)info->dir_entries + info->dir_buf_size) { in parse_reply_info_readdir()
443 info->dir_nr = num; in parse_reply_info_readdir()
445 struct inode *inode = d_inode(req->r_dentry); in parse_reply_info_readdir()
447 struct ceph_mds_reply_dir_entry *rde = info->dir_entries + i; in parse_reply_info_readdir()
461 if (info->hash_order) in parse_reply_info_readdir()
462 rde->raw_hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash, in parse_reply_info_readdir()
466 err = parse_reply_info_lease(p, end, &rde->lease, features, in parse_reply_info_readdir()
495 * to do the base64_decode in-place. It's in parse_reply_info_readdir()
504 * used to do the dencryption in-place. in parse_reply_info_readdir()
510 * This will do the decryption only in-place in parse_reply_info_readdir()
516 rde->is_nokey = false; in parse_reply_info_readdir()
517 err = ceph_fname_to_usr(&fname, &tname, &oname, &rde->is_nokey); in parse_reply_info_readdir()
523 rde->name = oname.name; in parse_reply_info_readdir()
524 rde->name_len = oname.len; in parse_reply_info_readdir()
527 err = parse_reply_info_in(p, end, &rde->inode, features); in parse_reply_info_readdir()
531 rde->offset = 0; in parse_reply_info_readdir()
533 num--; in parse_reply_info_readdir()
542 err = -EIO; in parse_reply_info_readdir()
555 if (*p + sizeof(*info->filelock_reply) > end) in parse_reply_info_filelock()
558 info->filelock_reply = *p; in parse_reply_info_filelock()
564 return -EIO; in parse_reply_info_filelock()
575 struct ceph_client *cl = s->s_mdsc->fsc->client; in ceph_parse_deleg_inos()
580 while (sets--) { in ceph_parse_deleg_inos()
593 while (len--) { in ceph_parse_deleg_inos()
594 int err = xa_insert(&s->s_delegated_inos, start++, in ceph_parse_deleg_inos()
598 doutc(cl, "added delegated inode 0x%llx\n", start - 1); in ceph_parse_deleg_inos()
599 } else if (err == -EBUSY) { in ceph_parse_deleg_inos()
602 start - 1); in ceph_parse_deleg_inos()
610 return -EIO; in ceph_parse_deleg_inos()
618 xa_for_each(&s->s_delegated_inos, ino, val) { in ceph_get_deleg_ino()
619 val = xa_erase(&s->s_delegated_inos, ino); in ceph_get_deleg_ino()
628 return xa_insert(&s->s_delegated_inos, ino, DELEGATED_INO_AVAILABLE, in ceph_restore_deleg_ino()
633 * FIXME: xarrays can't handle 64-bit indexes on a 32-bit arch. For now, just
647 return -EIO; in ceph_parse_deleg_inos()
670 if (features == (u64)-1 || in parse_reply_info_create()
674 info->has_create_ino = false; in parse_reply_info_create()
675 } else if (test_bit(CEPHFS_FEATURE_DELEG_INO, &s->s_features)) { in parse_reply_info_create()
676 info->has_create_ino = true; in parse_reply_info_create()
679 ceph_decode_64_safe(p, end, info->ino, bad); in parse_reply_info_create()
685 ceph_decode_64_safe(p, end, info->ino, bad); in parse_reply_info_create()
686 info->has_create_ino = true; in parse_reply_info_create()
697 return -EIO; in parse_reply_info_create()
712 if (value_len == end - *p) { in parse_reply_info_getvxattr()
713 info->xattr_info.xattr_value = *p; in parse_reply_info_getvxattr()
714 info->xattr_info.xattr_value_len = value_len; in parse_reply_info_getvxattr()
719 return -EIO; in parse_reply_info_getvxattr()
729 struct ceph_mds_reply_info_parsed *info = &req->r_reply_info; in parse_reply_info_extra()
730 u32 op = le32_to_cpu(info->head->op); in parse_reply_info_extra()
741 return -EIO; in parse_reply_info_extra()
750 struct ceph_mds_reply_info_parsed *info = &req->r_reply_info; in parse_reply_info()
751 struct ceph_client *cl = s->s_mdsc->fsc->client; in parse_reply_info()
756 info->head = msg->front.iov_base; in parse_reply_info()
757 p = msg->front.iov_base + sizeof(struct ceph_mds_reply_head); in parse_reply_info()
758 end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head); in parse_reply_info()
780 info->snapblob_len = len; in parse_reply_info()
781 info->snapblob = p; in parse_reply_info()
789 err = -EIO; in parse_reply_info()
800 kfree(info->diri.fscrypt_auth); in destroy_reply_info()
801 kfree(info->diri.fscrypt_file); in destroy_reply_info()
802 kfree(info->targeti.fscrypt_auth); in destroy_reply_info()
803 kfree(info->targeti.fscrypt_file); in destroy_reply_info()
804 if (!info->dir_entries) in destroy_reply_info()
807 for (i = 0; i < info->dir_nr; i++) { in destroy_reply_info()
808 struct ceph_mds_reply_dir_entry *rde = info->dir_entries + i; in destroy_reply_info()
810 kfree(rde->inode.fscrypt_auth); in destroy_reply_info()
811 kfree(rde->inode.fscrypt_file); in destroy_reply_info()
813 free_pages((unsigned long)info->dir_entries, get_order(info->dir_buf_size)); in destroy_reply_info()
824 * requests with -EEXIST if the inflight async unlink request was
838 struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dentry->d_sb); in ceph_wait_on_conflict_unlink()
839 struct ceph_client *cl = fsc->client; in ceph_wait_on_conflict_unlink()
840 struct dentry *pdentry = dentry->d_parent; in ceph_wait_on_conflict_unlink()
844 u32 hash = dentry->d_name.hash; in ceph_wait_on_conflict_unlink()
847 dname.name = dentry->d_name.name; in ceph_wait_on_conflict_unlink()
848 dname.len = dentry->d_name.len; in ceph_wait_on_conflict_unlink()
851 hash_for_each_possible_rcu(fsc->async_unlink_conflict, di, in ceph_wait_on_conflict_unlink()
853 udentry = di->dentry; in ceph_wait_on_conflict_unlink()
855 spin_lock(&udentry->d_lock); in ceph_wait_on_conflict_unlink()
856 if (udentry->d_name.hash != hash) in ceph_wait_on_conflict_unlink()
858 if (unlikely(udentry->d_parent != pdentry)) in ceph_wait_on_conflict_unlink()
860 if (!hash_hashed(&di->hnode)) in ceph_wait_on_conflict_unlink()
863 if (!test_bit(CEPH_DENTRY_ASYNC_UNLINK_BIT, &di->flags)) in ceph_wait_on_conflict_unlink()
871 spin_unlock(&udentry->d_lock); in ceph_wait_on_conflict_unlink()
874 spin_unlock(&udentry->d_lock); in ceph_wait_on_conflict_unlink()
884 err = wait_on_bit(&di->flags, CEPH_DENTRY_ASYNC_UNLINK_BIT, in ceph_wait_on_conflict_unlink()
912 if (refcount_inc_not_zero(&s->s_ref)) in ceph_get_mds_session()
922 if (refcount_dec_and_test(&s->s_ref)) { in ceph_put_mds_session()
923 if (s->s_auth.authorizer) in ceph_put_mds_session()
924 ceph_auth_destroy_authorizer(s->s_auth.authorizer); in ceph_put_mds_session()
925 WARN_ON(mutex_is_locked(&s->s_mutex)); in ceph_put_mds_session()
926 xa_destroy(&s->s_delegated_inos); in ceph_put_mds_session()
932 * called under mdsc->mutex
937 if (mds >= mdsc->max_sessions || !mdsc->sessions[mds]) in __ceph_lookup_mds_session()
939 return ceph_get_mds_session(mdsc->sessions[mds]); in __ceph_lookup_mds_session()
944 if (mds >= mdsc->max_sessions || !mdsc->sessions[mds]) in __have_session()
953 if (s->s_mds >= mdsc->max_sessions || in __verify_registered_session()
954 mdsc->sessions[s->s_mds] != s) in __verify_registered_session()
955 return -ENOENT; in __verify_registered_session()
961 * called under mdsc->mutex.
966 struct ceph_client *cl = mdsc->fsc->client; in register_session()
969 if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO) in register_session()
970 return ERR_PTR(-EIO); in register_session()
972 if (mds >= mdsc->mdsmap->possible_max_rank) in register_session()
973 return ERR_PTR(-EINVAL); in register_session()
977 return ERR_PTR(-ENOMEM); in register_session()
979 if (mds >= mdsc->max_sessions) { in register_session()
987 if (mdsc->sessions) { in register_session()
988 memcpy(sa, mdsc->sessions, in register_session()
989 mdsc->max_sessions * sizeof(void *)); in register_session()
990 kfree(mdsc->sessions); in register_session()
992 mdsc->sessions = sa; in register_session()
993 mdsc->max_sessions = newmax; in register_session()
997 s->s_mdsc = mdsc; in register_session()
998 s->s_mds = mds; in register_session()
999 s->s_state = CEPH_MDS_SESSION_NEW; in register_session()
1000 mutex_init(&s->s_mutex); in register_session()
1002 ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr); in register_session()
1004 atomic_set(&s->s_cap_gen, 1); in register_session()
1005 s->s_cap_ttl = jiffies - 1; in register_session()
1007 spin_lock_init(&s->s_cap_lock); in register_session()
1008 INIT_LIST_HEAD(&s->s_caps); in register_session()
1009 refcount_set(&s->s_ref, 1); in register_session()
1010 INIT_LIST_HEAD(&s->s_waiting); in register_session()
1011 INIT_LIST_HEAD(&s->s_unsafe); in register_session()
1012 xa_init(&s->s_delegated_inos); in register_session()
1013 INIT_LIST_HEAD(&s->s_cap_releases); in register_session()
1014 INIT_WORK(&s->s_cap_release_work, ceph_cap_release_work); in register_session()
1016 INIT_LIST_HEAD(&s->s_cap_dirty); in register_session()
1017 INIT_LIST_HEAD(&s->s_cap_flushing); in register_session()
1019 mdsc->sessions[mds] = s; in register_session()
1020 atomic_inc(&mdsc->num_sessions); in register_session()
1021 refcount_inc(&s->s_ref); /* one ref to sessions[], one to caller */ in register_session()
1023 ceph_con_open(&s->s_con, CEPH_ENTITY_TYPE_MDS, mds, in register_session()
1024 ceph_mdsmap_get_addr(mdsc->mdsmap, mds)); in register_session()
1030 return ERR_PTR(-ENOMEM); in register_session()
1034 * called under mdsc->mutex
1039 doutc(mdsc->fsc->client, "mds%d %p\n", s->s_mds, s); in __unregister_session()
1040 BUG_ON(mdsc->sessions[s->s_mds] != s); in __unregister_session()
1041 mdsc->sessions[s->s_mds] = NULL; in __unregister_session()
1042 ceph_con_close(&s->s_con); in __unregister_session()
1044 atomic_dec(&mdsc->num_sessions); in __unregister_session()
1050 * should be last request ref, or hold mdsc->mutex
1054 if (req->r_session) { in put_request_session()
1055 ceph_put_mds_session(req->r_session); in put_request_session()
1056 req->r_session = NULL; in put_request_session()
1066 mutex_lock(&mdsc->mutex); in ceph_mdsc_iterate_sessions()
1067 for (mds = 0; mds < mdsc->max_sessions; ++mds) { in ceph_mdsc_iterate_sessions()
1079 mutex_unlock(&mdsc->mutex); in ceph_mdsc_iterate_sessions()
1082 mutex_lock(&mdsc->mutex); in ceph_mdsc_iterate_sessions()
1084 mutex_unlock(&mdsc->mutex); in ceph_mdsc_iterate_sessions()
1093 destroy_reply_info(&req->r_reply_info); in ceph_mdsc_release_request()
1094 if (req->r_request) in ceph_mdsc_release_request()
1095 ceph_msg_put(req->r_request); in ceph_mdsc_release_request()
1096 if (req->r_reply) in ceph_mdsc_release_request()
1097 ceph_msg_put(req->r_reply); in ceph_mdsc_release_request()
1098 if (req->r_inode) { in ceph_mdsc_release_request()
1099 ceph_put_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN); in ceph_mdsc_release_request()
1100 iput(req->r_inode); in ceph_mdsc_release_request()
1102 if (req->r_parent) { in ceph_mdsc_release_request()
1103 ceph_put_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN); in ceph_mdsc_release_request()
1104 iput(req->r_parent); in ceph_mdsc_release_request()
1106 iput(req->r_target_inode); in ceph_mdsc_release_request()
1107 iput(req->r_new_inode); in ceph_mdsc_release_request()
1108 if (req->r_dentry) in ceph_mdsc_release_request()
1109 dput(req->r_dentry); in ceph_mdsc_release_request()
1110 if (req->r_old_dentry) in ceph_mdsc_release_request()
1111 dput(req->r_old_dentry); in ceph_mdsc_release_request()
1112 if (req->r_old_dentry_dir) { in ceph_mdsc_release_request()
1119 ceph_put_cap_refs(ceph_inode(req->r_old_dentry_dir), in ceph_mdsc_release_request()
1121 iput(req->r_old_dentry_dir); in ceph_mdsc_release_request()
1123 kfree(req->r_path1); in ceph_mdsc_release_request()
1124 kfree(req->r_path2); in ceph_mdsc_release_request()
1125 put_cred(req->r_cred); in ceph_mdsc_release_request()
1126 if (req->r_mnt_idmap) in ceph_mdsc_release_request()
1127 mnt_idmap_put(req->r_mnt_idmap); in ceph_mdsc_release_request()
1128 if (req->r_pagelist) in ceph_mdsc_release_request()
1129 ceph_pagelist_release(req->r_pagelist); in ceph_mdsc_release_request()
1130 kfree(req->r_fscrypt_auth); in ceph_mdsc_release_request()
1131 kfree(req->r_altname); in ceph_mdsc_release_request()
1133 ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation); in ceph_mdsc_release_request()
1134 WARN_ON_ONCE(!list_empty(&req->r_wait)); in ceph_mdsc_release_request()
1143 * called under mdsc->mutex. in DEFINE_RB_FUNCS()
1150 req = lookup_request(&mdsc->request_tree, tid); in DEFINE_RB_FUNCS()
1158 * Register an in-flight request, and assign a tid. Link to directory
1161 * Called under mdsc->mutex.
1167 struct ceph_client *cl = mdsc->fsc->client; in __register_request()
1170 req->r_tid = ++mdsc->last_tid; in __register_request()
1171 if (req->r_num_caps) { in __register_request()
1172 ret = ceph_reserve_caps(mdsc, &req->r_caps_reservation, in __register_request()
1173 req->r_num_caps); in __register_request()
1177 /* set req->r_err to fail early from __do_request */ in __register_request()
1178 req->r_err = ret; in __register_request()
1182 doutc(cl, "%p tid %lld\n", req, req->r_tid); in __register_request()
1184 insert_request(&mdsc->request_tree, req); in __register_request()
1186 req->r_cred = get_current_cred(); in __register_request()
1187 if (!req->r_mnt_idmap) in __register_request()
1188 req->r_mnt_idmap = &nop_mnt_idmap; in __register_request()
1190 if (mdsc->oldest_tid == 0 && req->r_op != CEPH_MDS_OP_SETFILELOCK) in __register_request()
1191 mdsc->oldest_tid = req->r_tid; in __register_request()
1197 req->r_unsafe_dir = dir; in __register_request()
1198 spin_lock(&ci->i_unsafe_lock); in __register_request()
1199 list_add_tail(&req->r_unsafe_dir_item, &ci->i_unsafe_dirops); in __register_request()
1200 spin_unlock(&ci->i_unsafe_lock); in __register_request()
1207 doutc(mdsc->fsc->client, "%p tid %lld\n", req, req->r_tid); in __unregister_request()
1210 list_del_init(&req->r_unsafe_item); in __unregister_request()
1212 if (req->r_tid == mdsc->oldest_tid) { in __unregister_request()
1213 struct rb_node *p = rb_next(&req->r_node); in __unregister_request()
1214 mdsc->oldest_tid = 0; in __unregister_request()
1218 if (next_req->r_op != CEPH_MDS_OP_SETFILELOCK) { in __unregister_request()
1219 mdsc->oldest_tid = next_req->r_tid; in __unregister_request()
1226 erase_request(&mdsc->request_tree, req); in __unregister_request()
1228 if (req->r_unsafe_dir) { in __unregister_request()
1229 struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir); in __unregister_request()
1230 spin_lock(&ci->i_unsafe_lock); in __unregister_request()
1231 list_del_init(&req->r_unsafe_dir_item); in __unregister_request()
1232 spin_unlock(&ci->i_unsafe_lock); in __unregister_request()
1234 if (req->r_target_inode && in __unregister_request()
1235 test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) { in __unregister_request()
1236 struct ceph_inode_info *ci = ceph_inode(req->r_target_inode); in __unregister_request()
1237 spin_lock(&ci->i_unsafe_lock); in __unregister_request()
1238 list_del_init(&req->r_unsafe_target_item); in __unregister_request()
1239 spin_unlock(&ci->i_unsafe_lock); in __unregister_request()
1242 if (req->r_unsafe_dir) { in __unregister_request()
1243 iput(req->r_unsafe_dir); in __unregister_request()
1244 req->r_unsafe_dir = NULL; in __unregister_request()
1247 complete_all(&req->r_safe_completion); in __unregister_request()
1254 * non-snapshot inode. We do this using the rcu_read_lock (which must be held
1267 dentry = dentry->d_parent; in get_nonsnap_parent()
1280 * Called under mdsc->mutex.
1289 int mode = req->r_direct_mode; in __choose_mds()
1290 int mds = -1; in __choose_mds()
1291 u32 hash = req->r_direct_hash; in __choose_mds()
1292 bool is_hash = test_bit(CEPH_MDS_R_DIRECT_IS_HASH, &req->r_req_flags); in __choose_mds()
1293 struct ceph_client *cl = mdsc->fsc->client; in __choose_mds()
1302 if (req->r_resend_mds >= 0 && in __choose_mds()
1303 (__have_session(mdsc, req->r_resend_mds) || in __choose_mds()
1304 ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) { in __choose_mds()
1305 doutc(cl, "using resend_mds mds%d\n", req->r_resend_mds); in __choose_mds()
1306 return req->r_resend_mds; in __choose_mds()
1313 if (req->r_inode) { in __choose_mds()
1314 if (ceph_snap(req->r_inode) != CEPH_SNAPDIR) { in __choose_mds()
1315 inode = req->r_inode; in __choose_mds()
1318 /* req->r_dentry is non-null for LSSNAP request */ in __choose_mds()
1320 inode = get_nonsnap_parent(req->r_dentry); in __choose_mds()
1325 } else if (req->r_dentry) { in __choose_mds()
1331 parent = READ_ONCE(req->r_dentry->d_parent); in __choose_mds()
1332 dir = req->r_parent ? : d_inode_rcu(parent); in __choose_mds()
1334 if (!dir || dir->i_sb != mdsc->fsc->sb) { in __choose_mds()
1336 inode = d_inode(req->r_dentry); in __choose_mds()
1347 inode = d_inode(req->r_dentry); in __choose_mds()
1351 hash = ceph_dentry_hash(dir, req->r_dentry); in __choose_mds()
1367 if (is_hash && S_ISDIR(inode->i_mode)) { in __choose_mds()
1383 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >= in __choose_mds()
1385 !ceph_mdsmap_is_laggy(mdsc->mdsmap, mds)) in __choose_mds()
1397 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >= in __choose_mds()
1399 if (!ceph_mdsmap_is_laggy(mdsc->mdsmap, in __choose_mds()
1408 spin_lock(&ci->i_ceph_lock); in __choose_mds()
1411 cap = ci->i_auth_cap; in __choose_mds()
1412 if (!cap && !RB_EMPTY_ROOT(&ci->i_caps)) in __choose_mds()
1413 cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node); in __choose_mds()
1415 spin_unlock(&ci->i_ceph_lock); in __choose_mds()
1419 mds = cap->session->s_mds; in __choose_mds()
1422 cap == ci->i_auth_cap ? "auth " : "", cap); in __choose_mds()
1423 spin_unlock(&ci->i_ceph_lock); in __choose_mds()
1432 mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap); in __choose_mds()
1453 h = msg->front.iov_base; in ceph_create_session_msg()
1454 h->op = cpu_to_le32(op); in ceph_create_session_msg()
1455 h->seq = cpu_to_le64(seq); in ceph_create_session_msg()
1461 #define FEATURE_BYTES(c) (DIV_ROUND_UP((size_t)feature_bits[c - 1] + 1, 64) * 8)
1472 return -ERANGE; in encode_supported_features()
1483 return -ERANGE; in encode_supported_features()
1492 #define METRIC_BYTES(cnt) (DIV_ROUND_UP((size_t)metric_bits[cnt - 1] + 1, 64) * 8)
1499 return -ERANGE; in encode_metric_spec()
1509 return -ERANGE; in encode_metric_spec()
1522 return -ERANGE; in encode_metric_spec()
1545 struct ceph_options *opt = mdsc->fsc->client->options; in create_session_full_msg()
1546 struct ceph_mount_options *fsopt = mdsc->fsc->mount_options; in create_session_full_msg()
1547 struct ceph_client *cl = mdsc->fsc->client; in create_session_full_msg()
1553 {"hostname", mdsc->nodename}, in create_session_full_msg()
1554 {"kernel_version", init_utsname()->release}, in create_session_full_msg()
1555 {"entity_id", opt->name ? : ""}, in create_session_full_msg()
1556 {"root", fsopt->server_path ? : "/"}, in create_session_full_msg()
1590 return ERR_PTR(-ENOMEM); in create_session_full_msg()
1592 p = msg->front.iov_base; in create_session_full_msg()
1593 end = p + msg->front.iov_len; in create_session_full_msg()
1596 h->op = cpu_to_le32(op); in create_session_full_msg()
1597 h->seq = cpu_to_le64(seq); in create_session_full_msg()
1605 msg->hdr.version = cpu_to_le16(7); in create_session_full_msg()
1606 msg->hdr.compat_version = cpu_to_le16(1); in create_session_full_msg()
1614 /* Two length-prefixed strings for each entry in the map */ in create_session_full_msg()
1648 ceph_encode_64(&p, mdsc->oldest_tid); in create_session_full_msg()
1650 msg->front.iov_len = p - msg->front.iov_base; in create_session_full_msg()
1651 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); in create_session_full_msg()
1659 * called under mdsc->mutex
1666 int mds = session->s_mds; in __open_session()
1668 if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO) in __open_session()
1669 return -EIO; in __open_session()
1672 mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds); in __open_session()
1673 doutc(mdsc->fsc->client, "open_session to mds%d (%s)\n", mds, in __open_session()
1675 session->s_state = CEPH_MDS_SESSION_OPENING; in __open_session()
1676 session->s_renew_requested = jiffies; in __open_session()
1680 session->s_seq); in __open_session()
1683 ceph_con_send(&session->s_con, msg); in __open_session()
1690 * called under mdsc->mutex
1704 if (session->s_state == CEPH_MDS_SESSION_NEW || in __open_export_target_session()
1705 session->s_state == CEPH_MDS_SESSION_CLOSING) { in __open_export_target_session()
1718 struct ceph_client *cl = mdsc->fsc->client; in ceph_mdsc_open_export_target_session()
1722 mutex_lock(&mdsc->mutex); in ceph_mdsc_open_export_target_session()
1724 mutex_unlock(&mdsc->mutex); in ceph_mdsc_open_export_target_session()
1734 int i, mds = session->s_mds; in __open_export_target_sessions()
1735 struct ceph_client *cl = mdsc->fsc->client; in __open_export_target_sessions()
1737 if (mds >= mdsc->mdsmap->possible_max_rank) in __open_export_target_sessions()
1740 mi = &mdsc->mdsmap->m_info[mds]; in __open_export_target_sessions()
1741 doutc(cl, "for mds%d (%d targets)\n", session->s_mds, in __open_export_target_sessions()
1742 mi->num_export_targets); in __open_export_target_sessions()
1744 for (i = 0; i < mi->num_export_targets; i++) { in __open_export_target_sessions()
1745 ts = __open_export_target_session(mdsc, mi->export_targets[i]); in __open_export_target_sessions()
1757 struct ceph_client *cl = session->s_mdsc->fsc->client; in detach_cap_releases()
1759 lockdep_assert_held(&session->s_cap_lock); in detach_cap_releases()
1761 list_splice_init(&session->s_cap_releases, target); in detach_cap_releases()
1762 session->s_num_cap_releases = 0; in detach_cap_releases()
1763 doutc(cl, "mds%d\n", session->s_mds); in detach_cap_releases()
1771 /* zero out the in-progress message */ in dispose_cap_releases()
1773 list_del(&cap->session_caps); in dispose_cap_releases()
1781 struct ceph_client *cl = mdsc->fsc->client; in cleanup_session_requests()
1785 doutc(cl, "mds%d\n", session->s_mds); in cleanup_session_requests()
1786 mutex_lock(&mdsc->mutex); in cleanup_session_requests()
1787 while (!list_empty(&session->s_unsafe)) { in cleanup_session_requests()
1788 req = list_first_entry(&session->s_unsafe, in cleanup_session_requests()
1791 req->r_tid); in cleanup_session_requests()
1792 if (req->r_target_inode) in cleanup_session_requests()
1793 mapping_set_error(req->r_target_inode->i_mapping, -EIO); in cleanup_session_requests()
1794 if (req->r_unsafe_dir) in cleanup_session_requests()
1795 mapping_set_error(req->r_unsafe_dir->i_mapping, -EIO); in cleanup_session_requests()
1798 /* zero r_attempts, so kick_requests() will re-send requests */ in cleanup_session_requests()
1799 p = rb_first(&mdsc->request_tree); in cleanup_session_requests()
1803 if (req->r_session && in cleanup_session_requests()
1804 req->r_session->s_mds == session->s_mds) in cleanup_session_requests()
1805 req->r_attempts = 0; in cleanup_session_requests()
1807 mutex_unlock(&mdsc->mutex); in cleanup_session_requests()
1820 struct ceph_client *cl = session->s_mdsc->fsc->client; in ceph_iterate_session_caps()
1827 doutc(cl, "%p mds%d\n", session, session->s_mds); in ceph_iterate_session_caps()
1828 spin_lock(&session->s_cap_lock); in ceph_iterate_session_caps()
1829 p = session->s_caps.next; in ceph_iterate_session_caps()
1830 while (p != &session->s_caps) { in ceph_iterate_session_caps()
1834 inode = igrab(&cap->ci->netfs.inode); in ceph_iterate_session_caps()
1836 p = p->next; in ceph_iterate_session_caps()
1839 session->s_cap_iterator = cap; in ceph_iterate_session_caps()
1840 mds = cap->mds; in ceph_iterate_session_caps()
1841 spin_unlock(&session->s_cap_lock); in ceph_iterate_session_caps()
1848 ceph_put_cap(session->s_mdsc, old_cap); in ceph_iterate_session_caps()
1855 spin_lock(&session->s_cap_lock); in ceph_iterate_session_caps()
1856 p = p->next; in ceph_iterate_session_caps()
1857 if (!cap->ci) { in ceph_iterate_session_caps()
1859 BUG_ON(cap->session != session); in ceph_iterate_session_caps()
1860 cap->session = NULL; in ceph_iterate_session_caps()
1861 list_del_init(&cap->session_caps); in ceph_iterate_session_caps()
1862 session->s_nr_caps--; in ceph_iterate_session_caps()
1863 atomic64_dec(&session->s_mdsc->metric.total_caps); in ceph_iterate_session_caps()
1864 if (cap->queue_release) in ceph_iterate_session_caps()
1874 session->s_cap_iterator = NULL; in ceph_iterate_session_caps()
1875 spin_unlock(&session->s_cap_lock); in ceph_iterate_session_caps()
1879 ceph_put_cap(session->s_mdsc, old_cap); in ceph_iterate_session_caps()
1892 spin_lock(&ci->i_ceph_lock); in remove_session_caps_cb()
1896 cap, ci, &ci->netfs.inode); in remove_session_caps_cb()
1900 spin_unlock(&ci->i_ceph_lock); in remove_session_caps_cb()
1903 wake_up_all(&ci->i_cap_wq); in remove_session_caps_cb()
1906 while (iputs--) in remove_session_caps_cb()
1916 struct ceph_fs_client *fsc = session->s_mdsc->fsc; in remove_session_caps()
1917 struct super_block *sb = fsc->sb; in remove_session_caps()
1920 doutc(fsc->client, "on %p\n", session); in remove_session_caps()
1923 wake_up_all(&fsc->mdsc->cap_flushing_wq); in remove_session_caps()
1925 spin_lock(&session->s_cap_lock); in remove_session_caps()
1926 if (session->s_nr_caps > 0) { in remove_session_caps()
1937 while (!list_empty(&session->s_caps)) { in remove_session_caps()
1938 cap = list_entry(session->s_caps.next, in remove_session_caps()
1943 vino = cap->ci->i_vino; in remove_session_caps()
1944 spin_unlock(&session->s_cap_lock); in remove_session_caps()
1949 spin_lock(&session->s_cap_lock); in remove_session_caps()
1956 BUG_ON(session->s_nr_caps > 0); in remove_session_caps()
1957 BUG_ON(!list_empty(&session->s_cap_flushing)); in remove_session_caps()
1958 spin_unlock(&session->s_cap_lock); in remove_session_caps()
1959 dispose_cap_releases(session->s_mdsc, &dispose); in remove_session_caps()
1980 spin_lock(&ci->i_ceph_lock); in wake_up_session_cb()
1981 ci->i_wanted_max_size = 0; in wake_up_session_cb()
1982 ci->i_requested_max_size = 0; in wake_up_session_cb()
1983 spin_unlock(&ci->i_ceph_lock); in wake_up_session_cb()
1987 spin_lock(&ci->i_ceph_lock); in wake_up_session_cb()
1989 /* mds did not re-issue stale cap */ in wake_up_session_cb()
1990 if (cap && cap->cap_gen < atomic_read(&cap->session->s_cap_gen)) in wake_up_session_cb()
1991 cap->issued = cap->implemented = CEPH_CAP_PIN; in wake_up_session_cb()
1992 spin_unlock(&ci->i_ceph_lock); in wake_up_session_cb()
1995 wake_up_all(&ci->i_cap_wq); in wake_up_session_cb()
2001 struct ceph_client *cl = session->s_mdsc->fsc->client; in wake_up_session_caps()
2003 doutc(cl, "session %p mds%d\n", session, session->s_mds); in wake_up_session_caps()
2017 struct ceph_client *cl = mdsc->fsc->client; in send_renew_caps()
2021 if (time_after_eq(jiffies, session->s_cap_ttl) && in send_renew_caps()
2022 time_after_eq(session->s_cap_ttl, session->s_renew_requested)) in send_renew_caps()
2023 pr_info_client(cl, "mds%d caps stale\n", session->s_mds); in send_renew_caps()
2024 session->s_renew_requested = jiffies; in send_renew_caps()
2028 state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds); in send_renew_caps()
2030 doutc(cl, "ignoring mds%d (%s)\n", session->s_mds, in send_renew_caps()
2035 doutc(cl, "to mds%d (%s)\n", session->s_mds, in send_renew_caps()
2038 ++session->s_renew_seq); in send_renew_caps()
2041 ceph_con_send(&session->s_con, msg); in send_renew_caps()
2048 struct ceph_client *cl = mdsc->fsc->client; in send_flushmsg_ack()
2051 doutc(cl, "to mds%d (%s)s seq %lld\n", session->s_mds, in send_flushmsg_ack()
2052 ceph_session_state_name(session->s_state), seq); in send_flushmsg_ack()
2055 return -ENOMEM; in send_flushmsg_ack()
2056 ceph_con_send(&session->s_con, msg); in send_flushmsg_ack()
2062 * Note new cap ttl, and any transition from stale -> not stale (fresh?).
2064 * Called under session->s_mutex
2069 struct ceph_client *cl = mdsc->fsc->client; in renewed_caps()
2073 spin_lock(&session->s_cap_lock); in renewed_caps()
2074 was_stale = is_renew && time_after_eq(jiffies, session->s_cap_ttl); in renewed_caps()
2076 session->s_cap_ttl = session->s_renew_requested + in renewed_caps()
2077 mdsc->mdsmap->m_session_timeout*HZ; in renewed_caps()
2080 if (time_before(jiffies, session->s_cap_ttl)) { in renewed_caps()
2082 session->s_mds); in renewed_caps()
2086 session->s_mds); in renewed_caps()
2089 doutc(cl, "mds%d ttl now %lu, was %s, now %s\n", session->s_mds, in renewed_caps()
2090 session->s_cap_ttl, was_stale ? "stale" : "fresh", in renewed_caps()
2091 time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh"); in renewed_caps()
2092 spin_unlock(&session->s_cap_lock); in renewed_caps()
2103 struct ceph_client *cl = session->s_mdsc->fsc->client; in request_close_session()
2106 doutc(cl, "mds%d state %s seq %lld\n", session->s_mds, in request_close_session()
2107 ceph_session_state_name(session->s_state), session->s_seq); in request_close_session()
2109 session->s_seq); in request_close_session()
2111 return -ENOMEM; in request_close_session()
2112 ceph_con_send(&session->s_con, msg); in request_close_session()
2122 if (session->s_state >= CEPH_MDS_SESSION_CLOSING) in __close_session()
2124 session->s_state = CEPH_MDS_SESSION_CLOSING; in __close_session()
2136 spin_lock(&dentry->d_lock); in drop_negative_children()
2137 hlist_for_each_entry(child, &dentry->d_children, d_sib) { in drop_negative_children()
2143 spin_unlock(&dentry->d_lock); in drop_negative_children()
2163 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb); in trim_caps_cb()
2164 struct ceph_client *cl = mdsc->fsc->client; in trim_caps_cb()
2171 return -1; in trim_caps_cb()
2173 spin_lock(&ci->i_ceph_lock); in trim_caps_cb()
2176 spin_unlock(&ci->i_ceph_lock); in trim_caps_cb()
2179 mine = cap->issued | cap->implemented; in trim_caps_cb()
2188 if (cap == ci->i_auth_cap) { in trim_caps_cb()
2189 if (ci->i_dirty_caps || ci->i_flushing_caps || in trim_caps_cb()
2190 !list_empty(&ci->i_cap_snaps)) in trim_caps_cb()
2194 /* Note: it's possible that i_filelock_ref becomes non-zero in trim_caps_cb()
2196 * of lock mds request will re-add auth caps. */ in trim_caps_cb()
2197 if (atomic_read(&ci->i_filelock_ref) > 0) in trim_caps_cb()
2202 if (S_ISREG(inode->i_mode) && in trim_caps_cb()
2214 (*remaining)--; in trim_caps_cb()
2218 spin_unlock(&ci->i_ceph_lock); in trim_caps_cb()
2224 count = atomic_read(&inode->i_count); in trim_caps_cb()
2226 (*remaining)--; in trim_caps_cb()
2236 spin_unlock(&ci->i_ceph_lock); in trim_caps_cb()
2247 struct ceph_client *cl = mdsc->fsc->client; in ceph_trim_caps()
2248 int trim_caps = session->s_nr_caps - max_caps; in ceph_trim_caps()
2250 doutc(cl, "mds%d start: %d / %d, trim %d\n", session->s_mds, in ceph_trim_caps()
2251 session->s_nr_caps, max_caps, trim_caps); in ceph_trim_caps()
2257 session->s_mds, session->s_nr_caps, max_caps, in ceph_trim_caps()
2258 trim_caps - remaining); in ceph_trim_caps()
2268 struct ceph_client *cl = mdsc->fsc->client; in check_caps_flush()
2271 spin_lock(&mdsc->cap_dirty_lock); in check_caps_flush()
2272 if (!list_empty(&mdsc->cap_flush_list)) { in check_caps_flush()
2274 list_first_entry(&mdsc->cap_flush_list, in check_caps_flush()
2276 if (cf->tid <= want_flush_tid) { in check_caps_flush()
2278 cf->tid, want_flush_tid); in check_caps_flush()
2282 spin_unlock(&mdsc->cap_dirty_lock); in check_caps_flush()
2294 struct ceph_client *cl = mdsc->fsc->client; in wait_caps_flush()
2298 wait_event(mdsc->cap_flushing_wq, in wait_caps_flush()
2310 struct ceph_client *cl = mdsc->fsc->client; in ceph_send_cap_releases()
2314 struct ceph_osd_client *osdc = &mdsc->fsc->client->osdc; in ceph_send_cap_releases()
2320 down_read(&osdc->lock); in ceph_send_cap_releases()
2321 barrier = cpu_to_le32(osdc->epoch_barrier); in ceph_send_cap_releases()
2322 up_read(&osdc->lock); in ceph_send_cap_releases()
2324 spin_lock(&session->s_cap_lock); in ceph_send_cap_releases()
2326 list_splice_init(&session->s_cap_releases, &tmp_list); in ceph_send_cap_releases()
2327 num_cap_releases = session->s_num_cap_releases; in ceph_send_cap_releases()
2328 session->s_num_cap_releases = 0; in ceph_send_cap_releases()
2329 spin_unlock(&session->s_cap_lock); in ceph_send_cap_releases()
2337 head = msg->front.iov_base; in ceph_send_cap_releases()
2338 head->num = cpu_to_le32(0); in ceph_send_cap_releases()
2339 msg->front.iov_len = sizeof(*head); in ceph_send_cap_releases()
2341 msg->hdr.version = cpu_to_le16(2); in ceph_send_cap_releases()
2342 msg->hdr.compat_version = cpu_to_le16(1); in ceph_send_cap_releases()
2347 list_del(&cap->session_caps); in ceph_send_cap_releases()
2348 num_cap_releases--; in ceph_send_cap_releases()
2350 head = msg->front.iov_base; in ceph_send_cap_releases()
2351 put_unaligned_le32(get_unaligned_le32(&head->num) + 1, in ceph_send_cap_releases()
2352 &head->num); in ceph_send_cap_releases()
2353 item = msg->front.iov_base + msg->front.iov_len; in ceph_send_cap_releases()
2354 item->ino = cpu_to_le64(cap->cap_ino); in ceph_send_cap_releases()
2355 item->cap_id = cpu_to_le64(cap->cap_id); in ceph_send_cap_releases()
2356 item->migrate_seq = cpu_to_le32(cap->mseq); in ceph_send_cap_releases()
2357 item->issue_seq = cpu_to_le32(cap->issue_seq); in ceph_send_cap_releases()
2358 msg->front.iov_len += sizeof(*item); in ceph_send_cap_releases()
2362 if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) { in ceph_send_cap_releases()
2364 cap_barrier = msg->front.iov_base + msg->front.iov_len; in ceph_send_cap_releases()
2366 msg->front.iov_len += sizeof(*cap_barrier); in ceph_send_cap_releases()
2368 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); in ceph_send_cap_releases()
2369 doutc(cl, "mds%d %p\n", session->s_mds, msg); in ceph_send_cap_releases()
2370 ceph_con_send(&session->s_con, msg); in ceph_send_cap_releases()
2377 spin_lock(&session->s_cap_lock); in ceph_send_cap_releases()
2378 if (!list_empty(&session->s_cap_releases)) in ceph_send_cap_releases()
2380 spin_unlock(&session->s_cap_lock); in ceph_send_cap_releases()
2384 cap_barrier = msg->front.iov_base + msg->front.iov_len; in ceph_send_cap_releases()
2386 msg->front.iov_len += sizeof(*cap_barrier); in ceph_send_cap_releases()
2388 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); in ceph_send_cap_releases()
2389 doutc(cl, "mds%d %p\n", session->s_mds, msg); in ceph_send_cap_releases()
2390 ceph_con_send(&session->s_con, msg); in ceph_send_cap_releases()
2395 session->s_mds); in ceph_send_cap_releases()
2396 spin_lock(&session->s_cap_lock); in ceph_send_cap_releases()
2397 list_splice(&tmp_list, &session->s_cap_releases); in ceph_send_cap_releases()
2398 session->s_num_cap_releases += num_cap_releases; in ceph_send_cap_releases()
2399 spin_unlock(&session->s_cap_lock); in ceph_send_cap_releases()
2407 mutex_lock(&session->s_mutex); in ceph_cap_release_work()
2408 if (session->s_state == CEPH_MDS_SESSION_OPEN || in ceph_cap_release_work()
2409 session->s_state == CEPH_MDS_SESSION_HUNG) in ceph_cap_release_work()
2410 ceph_send_cap_releases(session->s_mdsc, session); in ceph_cap_release_work()
2411 mutex_unlock(&session->s_mutex); in ceph_cap_release_work()
2418 struct ceph_client *cl = mdsc->fsc->client; in ceph_flush_session_cap_releases()
2419 if (mdsc->stopping) in ceph_flush_session_cap_releases()
2423 if (queue_work(mdsc->fsc->cap_wq, in ceph_flush_session_cap_releases()
2424 &session->s_cap_release_work)) { in ceph_flush_session_cap_releases()
2428 doutc(cl, "failed to queue cap release work\n"); in ceph_flush_session_cap_releases()
2433 * caller holds session->s_cap_lock
2438 list_add_tail(&cap->session_caps, &session->s_cap_releases); in __ceph_queue_cap_release()
2439 session->s_num_cap_releases++; in __ceph_queue_cap_release()
2441 if (!(session->s_num_cap_releases % CEPH_CAPS_PER_RELEASE)) in __ceph_queue_cap_release()
2442 ceph_flush_session_cap_releases(session->s_mdsc, session); in __ceph_queue_cap_release()
2450 if (ret == -EAGAIN) in ceph_cap_reclaim_work()
2456 struct ceph_client *cl = mdsc->fsc->client; in ceph_queue_cap_reclaim_work()
2457 if (mdsc->stopping) in ceph_queue_cap_reclaim_work()
2460 if (queue_work(mdsc->fsc->cap_wq, &mdsc->cap_reclaim_work)) { in ceph_queue_cap_reclaim_work()
2463 doutc(cl, "failed to queue caps release work\n"); in ceph_queue_cap_reclaim_work()
2472 val = atomic_add_return(nr, &mdsc->cap_reclaim_pending); in ceph_reclaim_caps_nr()
2474 atomic_set(&mdsc->cap_reclaim_pending, 0); in ceph_reclaim_caps_nr()
2481 struct ceph_client *cl = mdsc->fsc->client; in ceph_queue_cap_unlink_work()
2482 if (mdsc->stopping) in ceph_queue_cap_unlink_work()
2485 if (queue_work(mdsc->fsc->cap_wq, &mdsc->cap_unlink_work)) { in ceph_queue_cap_unlink_work()
2488 doutc(cl, "failed to queue caps unlink work\n"); in ceph_queue_cap_unlink_work()
2496 struct ceph_client *cl = mdsc->fsc->client; in ceph_cap_unlink_work()
2499 spin_lock(&mdsc->cap_delay_lock); in ceph_cap_unlink_work()
2500 while (!list_empty(&mdsc->cap_unlink_delay_list)) { in ceph_cap_unlink_work()
2504 ci = list_first_entry(&mdsc->cap_unlink_delay_list, in ceph_cap_unlink_work()
2507 list_del_init(&ci->i_cap_delay_list); in ceph_cap_unlink_work()
2509 inode = igrab(&ci->netfs.inode); in ceph_cap_unlink_work()
2511 spin_unlock(&mdsc->cap_delay_lock); in ceph_cap_unlink_work()
2516 spin_lock(&mdsc->cap_delay_lock); in ceph_cap_unlink_work()
2519 spin_unlock(&mdsc->cap_delay_lock); in ceph_cap_unlink_work()
2531 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; in ceph_alloc_readdir_reply_buffer()
2532 struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options; in ceph_alloc_readdir_reply_buffer()
2537 spin_lock(&ci->i_ceph_lock); in ceph_alloc_readdir_reply_buffer()
2538 num_entries = ci->i_files + ci->i_subdirs; in ceph_alloc_readdir_reply_buffer()
2539 spin_unlock(&ci->i_ceph_lock); in ceph_alloc_readdir_reply_buffer()
2541 num_entries = min(num_entries, opt->max_readdir); in ceph_alloc_readdir_reply_buffer()
2545 rinfo->dir_entries = (void*)__get_free_pages(GFP_KERNEL | in ceph_alloc_readdir_reply_buffer()
2549 if (rinfo->dir_entries) in ceph_alloc_readdir_reply_buffer()
2551 order--; in ceph_alloc_readdir_reply_buffer()
2553 if (!rinfo->dir_entries) in ceph_alloc_readdir_reply_buffer()
2554 return -ENOMEM; in ceph_alloc_readdir_reply_buffer()
2557 num_entries = min(num_entries, opt->max_readdir); in ceph_alloc_readdir_reply_buffer()
2559 rinfo->dir_buf_size = PAGE_SIZE << order; in ceph_alloc_readdir_reply_buffer()
2560 req->r_num_caps = num_entries + 1; in ceph_alloc_readdir_reply_buffer()
2561 req->r_args.readdir.max_entries = cpu_to_le32(num_entries); in ceph_alloc_readdir_reply_buffer()
2562 req->r_args.readdir.max_bytes = cpu_to_le32(opt->max_readdir_bytes); in ceph_alloc_readdir_reply_buffer()
2576 return ERR_PTR(-ENOMEM); in ceph_mdsc_create_request()
2578 mutex_init(&req->r_fill_mutex); in ceph_mdsc_create_request()
2579 req->r_mdsc = mdsc; in ceph_mdsc_create_request()
2580 req->r_started = jiffies; in ceph_mdsc_create_request()
2581 req->r_start_latency = ktime_get(); in ceph_mdsc_create_request()
2582 req->r_resend_mds = -1; in ceph_mdsc_create_request()
2583 INIT_LIST_HEAD(&req->r_unsafe_dir_item); in ceph_mdsc_create_request()
2584 INIT_LIST_HEAD(&req->r_unsafe_target_item); in ceph_mdsc_create_request()
2585 req->r_fmode = -1; in ceph_mdsc_create_request()
2586 req->r_feature_needed = -1; in ceph_mdsc_create_request()
2587 kref_init(&req->r_kref); in ceph_mdsc_create_request()
2588 RB_CLEAR_NODE(&req->r_node); in ceph_mdsc_create_request()
2589 INIT_LIST_HEAD(&req->r_wait); in ceph_mdsc_create_request()
2590 init_completion(&req->r_completion); in ceph_mdsc_create_request()
2591 init_completion(&req->r_safe_completion); in ceph_mdsc_create_request()
2592 INIT_LIST_HEAD(&req->r_unsafe_item); in ceph_mdsc_create_request()
2594 ktime_get_coarse_real_ts64(&req->r_stamp); in ceph_mdsc_create_request()
2596 req->r_op = op; in ceph_mdsc_create_request()
2597 req->r_direct_mode = mode; in ceph_mdsc_create_request()
2604 * called under mdsc->mutex.
2608 if (RB_EMPTY_ROOT(&mdsc->request_tree)) in __get_oldest_req()
2610 return rb_entry(rb_first(&mdsc->request_tree), in __get_oldest_req()
2616 return mdsc->oldest_tid; in __get_oldest_tid()
2622 struct inode *dir = req->r_parent; in get_fscrypt_altname()
2623 struct dentry *dentry = req->r_dentry; in get_fscrypt_altname()
2624 const struct qstr *name = req->r_dname; in get_fscrypt_altname()
2633 /* No-op unless this is encrypted */ in get_fscrypt_altname()
2646 name = &dentry->d_name; in get_fscrypt_altname()
2648 if (!fscrypt_fname_encrypted_size(dir, name->len, NAME_MAX, &len)) { in get_fscrypt_altname()
2650 return ERR_PTR(-ENAMETOOLONG); in get_fscrypt_altname()
2661 return ERR_PTR(-ENOMEM); in get_fscrypt_altname()
2681 * ceph_mdsc_build_path - build a path string to a given dentry
2696 * the root, building the path until the first non-snapped inode is reached
2700 * foo/.snap/bar -> foo//bar
2705 struct ceph_client *cl = mdsc->fsc->client; in ceph_mdsc_build_path()
2714 return ERR_PTR(-EINVAL); in ceph_mdsc_build_path()
2718 return ERR_PTR(-ENOMEM); in ceph_mdsc_build_path()
2720 pos = PATH_MAX - 1; in ceph_mdsc_build_path()
2728 spin_lock(&cur->d_lock); in ceph_mdsc_build_path()
2732 spin_unlock(&cur->d_lock); in ceph_mdsc_build_path()
2736 spin_unlock(&cur->d_lock); in ceph_mdsc_build_path()
2739 } else if (!for_wire || !IS_ENCRYPTED(d_inode(cur->d_parent))) { in ceph_mdsc_build_path()
2740 pos -= cur->d_name.len; in ceph_mdsc_build_path()
2742 spin_unlock(&cur->d_lock); in ceph_mdsc_build_path()
2745 memcpy(path + pos, cur->d_name.name, cur->d_name.len); in ceph_mdsc_build_path()
2746 spin_unlock(&cur->d_lock); in ceph_mdsc_build_path()
2754 * present it as-is. in ceph_mdsc_build_path()
2756 memcpy(buf, cur->d_name.name, cur->d_name.len); in ceph_mdsc_build_path()
2757 len = cur->d_name.len; in ceph_mdsc_build_path()
2758 spin_unlock(&cur->d_lock); in ceph_mdsc_build_path()
2777 pos -= len; in ceph_mdsc_build_path()
2792 if (--pos < 0) in ceph_mdsc_build_path()
2810 return ERR_PTR(-ENAMETOOLONG); in ceph_mdsc_build_path()
2814 *plen = PATH_MAX - 1 - pos; in ceph_mdsc_build_path()
2828 dir = d_inode_rcu(dentry->d_parent); in build_dentry_path()
2833 *ppath = dentry->d_name.name; in build_dentry_path()
2834 *ppathlen = dentry->d_name.len; in build_dentry_path()
2850 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb); in build_inode_path()
2879 struct ceph_client *cl = mdsc->fsc->client; in set_request_path_attr()
2906 ceph_encode_timespec64(&ts, &req->r_stamp); in encode_mclientrequest_tail()
2910 ceph_encode_32(p, req->r_cred->group_info->ngroups); in encode_mclientrequest_tail()
2911 for (i = 0; i < req->r_cred->group_info->ngroups; i++) in encode_mclientrequest_tail()
2913 req->r_cred->group_info->gid[i])); in encode_mclientrequest_tail()
2916 ceph_encode_32(p, req->r_altname_len); in encode_mclientrequest_tail()
2917 ceph_encode_copy(p, req->r_altname, req->r_altname_len); in encode_mclientrequest_tail()
2920 if (req->r_fscrypt_auth) { in encode_mclientrequest_tail()
2921 u32 authlen = ceph_fscrypt_auth_len(req->r_fscrypt_auth); in encode_mclientrequest_tail()
2924 ceph_encode_copy(p, req->r_fscrypt_auth, authlen); in encode_mclientrequest_tail()
2928 if (test_bit(CEPH_MDS_R_FSCRYPT_FILE, &req->r_req_flags)) { in encode_mclientrequest_tail()
2930 ceph_encode_64(p, req->r_fscrypt_file); in encode_mclientrequest_tail()
2938 if (!test_bit(CEPHFS_FEATURE_32BITS_RETRY_FWD, &session->s_features)) in mds_supported_head_version()
2941 if (!test_bit(CEPHFS_FEATURE_HAS_OWNER_UIDGID, &session->s_features)) in mds_supported_head_version()
2956 return (struct ceph_mds_request_head_legacy *)&head->oldest_client_tid; in find_legacy_request_head()
2960 * called under mdsc->mutex
2966 int mds = session->s_mds; in create_request_message()
2967 struct ceph_mds_client *mdsc = session->s_mdsc; in create_request_message()
2968 struct ceph_client *cl = mdsc->fsc->client; in create_request_message()
2981 bool legacy = !(session->s_con.peer_features & CEPH_FEATURE_FS_BTIME); in create_request_message()
2983 kuid_t caller_fsuid = req->r_cred->fsuid; in create_request_message()
2984 kgid_t caller_fsgid = req->r_cred->fsgid; in create_request_message()
2986 ret = set_request_path_attr(mdsc, req->r_inode, req->r_dentry, in create_request_message()
2987 req->r_parent, req->r_path1, req->r_ino1.ino, in create_request_message()
2990 &req->r_req_flags)); in create_request_message()
2997 if (req->r_old_dentry && in create_request_message()
2998 !(req->r_old_dentry->d_flags & DCACHE_DISCONNECTED)) in create_request_message()
2999 old_dentry = req->r_old_dentry; in create_request_message()
3001 req->r_old_dentry_dir, in create_request_message()
3002 req->r_path2, req->r_ino2.ino, in create_request_message()
3009 req->r_altname = get_fscrypt_altname(req, &req->r_altname_len); in create_request_message()
3010 if (IS_ERR(req->r_altname)) { in create_request_message()
3011 msg = ERR_CAST(req->r_altname); in create_request_message()
3012 req->r_altname = NULL; in create_request_message()
3038 (!!req->r_inode_drop + !!req->r_dentry_drop + in create_request_message()
3039 !!req->r_old_inode_drop + !!req->r_old_dentry_drop); in create_request_message()
3041 if (req->r_dentry_drop) in create_request_message()
3043 if (req->r_old_dentry_drop) in create_request_message()
3048 /* req->r_stamp */ in create_request_message()
3052 len += sizeof(u32) + (sizeof(u64) * req->r_cred->group_info->ngroups); in create_request_message()
3055 len += sizeof(u32) + req->r_altname_len; in create_request_message()
3059 if (req->r_fscrypt_auth) in create_request_message()
3060 len += ceph_fscrypt_auth_len(req->r_fscrypt_auth); in create_request_message()
3064 if (test_bit(CEPH_MDS_R_FSCRYPT_FILE, &req->r_req_flags)) in create_request_message()
3069 msg = ERR_PTR(-ENOMEM); in create_request_message()
3073 msg->hdr.tid = cpu_to_le64(req->r_tid); in create_request_message()
3075 lhead = find_legacy_request_head(msg->front.iov_base, in create_request_message()
3076 session->s_con.peer_features); in create_request_message()
3078 if ((req->r_mnt_idmap != &nop_mnt_idmap) && in create_request_message()
3079 !test_bit(CEPHFS_FEATURE_HAS_OWNER_UIDGID, &session->s_features)) { in create_request_message()
3080 WARN_ON_ONCE(!IS_CEPH_MDS_OP_NEWINODE(req->r_op)); in create_request_message()
3085 " is not supported by MDS. UID/GID-based restrictions may" in create_request_message()
3088 caller_fsuid = from_vfsuid(req->r_mnt_idmap, &init_user_ns, in create_request_message()
3089 VFSUIDT_INIT(req->r_cred->fsuid)); in create_request_message()
3090 caller_fsgid = from_vfsgid(req->r_mnt_idmap, &init_user_ns, in create_request_message()
3091 VFSGIDT_INIT(req->r_cred->fsgid)); in create_request_message()
3095 " is not supported by MDS. Fail request with -EIO.\n"); in create_request_message()
3097 ret = -EIO; in create_request_message()
3104 * one was added when we moved the message version from 3->4. in create_request_message()
3107 msg->hdr.version = cpu_to_le16(3); in create_request_message()
3108 p = msg->front.iov_base + sizeof(*lhead); in create_request_message()
3110 struct ceph_mds_request_head *nhead = msg->front.iov_base; in create_request_message()
3112 msg->hdr.version = cpu_to_le16(4); in create_request_message()
3113 nhead->version = cpu_to_le16(1); in create_request_message()
3114 p = msg->front.iov_base + offsetofend(struct ceph_mds_request_head, args); in create_request_message()
3116 struct ceph_mds_request_head *nhead = msg->front.iov_base; in create_request_message()
3118 msg->hdr.version = cpu_to_le16(6); in create_request_message()
3119 nhead->version = cpu_to_le16(2); in create_request_message()
3121 p = msg->front.iov_base + offsetofend(struct ceph_mds_request_head, ext_num_fwd); in create_request_message()
3123 struct ceph_mds_request_head *nhead = msg->front.iov_base; in create_request_message()
3127 msg->hdr.version = cpu_to_le16(6); in create_request_message()
3128 nhead->version = cpu_to_le16(CEPH_MDS_REQUEST_HEAD_VERSION); in create_request_message()
3129 nhead->struct_len = cpu_to_le32(sizeof(struct ceph_mds_request_head)); in create_request_message()
3131 if (IS_CEPH_MDS_OP_NEWINODE(req->r_op)) { in create_request_message()
3132 owner_fsuid = from_vfsuid(req->r_mnt_idmap, &init_user_ns, in create_request_message()
3133 VFSUIDT_INIT(req->r_cred->fsuid)); in create_request_message()
3134 owner_fsgid = from_vfsgid(req->r_mnt_idmap, &init_user_ns, in create_request_message()
3135 VFSGIDT_INIT(req->r_cred->fsgid)); in create_request_message()
3136 nhead->owner_uid = cpu_to_le32(from_kuid(&init_user_ns, owner_fsuid)); in create_request_message()
3137 nhead->owner_gid = cpu_to_le32(from_kgid(&init_user_ns, owner_fsgid)); in create_request_message()
3139 nhead->owner_uid = cpu_to_le32(-1); in create_request_message()
3140 nhead->owner_gid = cpu_to_le32(-1); in create_request_message()
3143 p = msg->front.iov_base + sizeof(*nhead); in create_request_message()
3146 end = msg->front.iov_base + msg->front.iov_len; in create_request_message()
3148 lhead->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch); in create_request_message()
3149 lhead->op = cpu_to_le32(req->r_op); in create_request_message()
3150 lhead->caller_uid = cpu_to_le32(from_kuid(&init_user_ns, in create_request_message()
3152 lhead->caller_gid = cpu_to_le32(from_kgid(&init_user_ns, in create_request_message()
3154 lhead->ino = cpu_to_le64(req->r_deleg_ino); in create_request_message()
3155 lhead->args = req->r_args; in create_request_message()
3161 req->r_request_release_offset = p - msg->front.iov_base; in create_request_message()
3165 if (req->r_inode_drop) in create_request_message()
3167 req->r_inode ? req->r_inode : d_inode(req->r_dentry), in create_request_message()
3168 mds, req->r_inode_drop, req->r_inode_unless, in create_request_message()
3169 req->r_op == CEPH_MDS_OP_READDIR); in create_request_message()
3170 if (req->r_dentry_drop) { in create_request_message()
3171 ret = ceph_encode_dentry_release(&p, req->r_dentry, in create_request_message()
3172 req->r_parent, mds, req->r_dentry_drop, in create_request_message()
3173 req->r_dentry_unless); in create_request_message()
3178 if (req->r_old_dentry_drop) { in create_request_message()
3179 ret = ceph_encode_dentry_release(&p, req->r_old_dentry, in create_request_message()
3180 req->r_old_dentry_dir, mds, in create_request_message()
3181 req->r_old_dentry_drop, in create_request_message()
3182 req->r_old_dentry_unless); in create_request_message()
3187 if (req->r_old_inode_drop) in create_request_message()
3189 d_inode(req->r_old_dentry), in create_request_message()
3190 mds, req->r_old_inode_drop, req->r_old_inode_unless, 0); in create_request_message()
3194 p = msg->front.iov_base + req->r_request_release_offset; in create_request_message()
3197 lhead->num_releases = cpu_to_le16(releases); in create_request_message()
3203 msg = ERR_PTR(-ERANGE); in create_request_message()
3207 msg->front.iov_len = p - msg->front.iov_base; in create_request_message()
3208 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); in create_request_message()
3210 if (req->r_pagelist) { in create_request_message()
3211 struct ceph_pagelist *pagelist = req->r_pagelist; in create_request_message()
3213 msg->hdr.data_len = cpu_to_le32(pagelist->length); in create_request_message()
3215 msg->hdr.data_len = 0; in create_request_message()
3218 msg->hdr.data_off = cpu_to_le16(0); in create_request_message()
3235 * called under mdsc->mutex if error, under no mutex if
3241 req->r_end_latency = ktime_get(); in complete_request()
3243 if (req->r_callback) in complete_request()
3244 req->r_callback(mdsc, req); in complete_request()
3245 complete_all(&req->r_completion); in complete_request()
3249 * called under mdsc->mutex
3255 int mds = session->s_mds; in __prepare_send_request()
3256 struct ceph_mds_client *mdsc = session->s_mdsc; in __prepare_send_request()
3257 struct ceph_client *cl = mdsc->fsc->client; in __prepare_send_request()
3263 &session->s_features); in __prepare_send_request()
3270 if (req->r_attempts) { in __prepare_send_request()
3274 if ((old_version && req->r_attempts >= old_max_retry) || in __prepare_send_request()
3275 ((uint32_t)req->r_attempts >= U32_MAX)) { in __prepare_send_request()
3277 req->r_tid); in __prepare_send_request()
3278 return -EMULTIHOP; in __prepare_send_request()
3282 req->r_attempts++; in __prepare_send_request()
3283 if (req->r_inode) { in __prepare_send_request()
3285 ceph_get_cap_for_mds(ceph_inode(req->r_inode), mds); in __prepare_send_request()
3288 req->r_sent_on_mseq = cap->mseq; in __prepare_send_request()
3290 req->r_sent_on_mseq = -1; in __prepare_send_request()
3292 doutc(cl, "%p tid %lld %s (attempt %d)\n", req, req->r_tid, in __prepare_send_request()
3293 ceph_mds_op_name(req->r_op), req->r_attempts); in __prepare_send_request()
3295 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) { in __prepare_send_request()
3304 msg = req->r_request; in __prepare_send_request()
3305 lhead = find_legacy_request_head(msg->front.iov_base, in __prepare_send_request()
3306 session->s_con.peer_features); in __prepare_send_request()
3308 flags = le32_to_cpu(lhead->flags); in __prepare_send_request()
3310 lhead->flags = cpu_to_le32(flags); in __prepare_send_request()
3312 if (req->r_target_inode) in __prepare_send_request()
3313 lhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode)); in __prepare_send_request()
3315 lhead->num_retry = req->r_attempts - 1; in __prepare_send_request()
3317 nhead = (struct ceph_mds_request_head*)msg->front.iov_base; in __prepare_send_request()
3318 nhead->ext_num_retry = cpu_to_le32(req->r_attempts - 1); in __prepare_send_request()
3322 lhead->num_releases = 0; in __prepare_send_request()
3324 p = msg->front.iov_base + req->r_request_release_offset; in __prepare_send_request()
3327 msg->front.iov_len = p - msg->front.iov_base; in __prepare_send_request()
3328 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); in __prepare_send_request()
3332 if (req->r_request) { in __prepare_send_request()
3333 ceph_msg_put(req->r_request); in __prepare_send_request()
3334 req->r_request = NULL; in __prepare_send_request()
3338 req->r_err = PTR_ERR(msg); in __prepare_send_request()
3341 req->r_request = msg; in __prepare_send_request()
3343 lhead = find_legacy_request_head(msg->front.iov_base, in __prepare_send_request()
3344 session->s_con.peer_features); in __prepare_send_request()
3345 lhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc)); in __prepare_send_request()
3346 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) in __prepare_send_request()
3348 if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags)) in __prepare_send_request()
3350 if (req->r_parent) in __prepare_send_request()
3352 lhead->flags = cpu_to_le32(flags); in __prepare_send_request()
3353 lhead->num_fwd = req->r_num_fwd; in __prepare_send_request()
3354 lhead->num_retry = req->r_attempts - 1; in __prepare_send_request()
3356 nhead = (struct ceph_mds_request_head*)msg->front.iov_base; in __prepare_send_request()
3357 nhead->ext_num_fwd = cpu_to_le32(req->r_num_fwd); in __prepare_send_request()
3358 nhead->ext_num_retry = cpu_to_le32(req->r_attempts - 1); in __prepare_send_request()
3361 doutc(cl, " r_parent = %p\n", req->r_parent); in __prepare_send_request()
3366 * called under mdsc->mutex
3376 ceph_msg_get(req->r_request); in __send_request()
3377 ceph_con_send(&session->s_con, req->r_request); in __send_request()
3389 struct ceph_client *cl = mdsc->fsc->client; in __do_request()
3391 int mds = -1; in __do_request()
3395 if (req->r_err || test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) { in __do_request()
3396 if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) in __do_request()
3401 if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO) { in __do_request()
3403 err = -EIO; in __do_request()
3406 if (req->r_timeout && in __do_request()
3407 time_after_eq(jiffies, req->r_started + req->r_timeout)) { in __do_request()
3409 err = -ETIMEDOUT; in __do_request()
3412 if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) { in __do_request()
3414 err = -EIO; in __do_request()
3417 if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_MOUNTING) { in __do_request()
3418 if (mdsc->mdsmap_err) { in __do_request()
3419 err = mdsc->mdsmap_err; in __do_request()
3423 if (mdsc->mdsmap->m_epoch == 0) { in __do_request()
3425 list_add(&req->r_wait, &mdsc->waiting_for_map); in __do_request()
3428 if (!(mdsc->fsc->mount_options->flags & in __do_request()
3430 !ceph_mdsmap_is_cluster_available(mdsc->mdsmap)) { in __do_request()
3431 err = -EHOSTUNREACH; in __do_request()
3440 ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) { in __do_request()
3441 if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags)) { in __do_request()
3442 err = -EJUKEBOX; in __do_request()
3446 list_add(&req->r_wait, &mdsc->waiting_for_map); in __do_request()
3459 req->r_session = ceph_get_mds_session(session); in __do_request()
3462 ceph_session_state_name(session->s_state)); in __do_request()
3467 if (req->r_feature_needed > 0 && in __do_request()
3468 !test_bit(req->r_feature_needed, &session->s_features)) { in __do_request()
3469 err = -EOPNOTSUPP; in __do_request()
3473 if (session->s_state != CEPH_MDS_SESSION_OPEN && in __do_request()
3474 session->s_state != CEPH_MDS_SESSION_HUNG) { in __do_request()
3476 * We cannot queue async requests since the caps and delegated in __do_request()
3477 * inodes are bound to the session. Just return -EJUKEBOX and in __do_request()
3480 if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags)) { in __do_request()
3481 err = -EJUKEBOX; in __do_request()
3487 * unless it's a CLEANRECOVER mount, in which case we'll queue in __do_request()
3488 * it to the mdsc queue. in __do_request()
3490 if (session->s_state == CEPH_MDS_SESSION_REJECTED) { in __do_request()
3491 if (ceph_test_mount_opt(mdsc->fsc, CLEANRECOVER)) in __do_request()
3492 list_add(&req->r_wait, &mdsc->waiting_for_map); in __do_request()
3494 err = -EACCES; in __do_request()
3498 if (session->s_state == CEPH_MDS_SESSION_NEW || in __do_request()
3499 session->s_state == CEPH_MDS_SESSION_CLOSING) { in __do_request()
3505 req->r_resend_mds = mds; in __do_request()
3507 list_add(&req->r_wait, &session->s_waiting); in __do_request()
3512 req->r_resend_mds = -1; /* forget any previous mds hint */ in __do_request()
3514 if (req->r_request_started == 0) /* note request start time */ in __do_request()
3515 req->r_request_started = jiffies; in __do_request()
3525 if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags) && req->r_num_fwd) { in __do_request()
3526 struct ceph_dentry_info *di = ceph_dentry(req->r_dentry); in __do_request()
3537 if (!d_inode(req->r_dentry)) { in __do_request()
3538 err = wait_on_bit(&di->flags, CEPH_DENTRY_ASYNC_CREATE_BIT, in __do_request()
3541 mutex_lock(&req->r_fill_mutex); in __do_request()
3542 set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags); in __do_request()
3543 mutex_unlock(&req->r_fill_mutex); in __do_request()
3548 ci = ceph_inode(d_inode(req->r_dentry)); in __do_request()
3550 spin_lock(&ci->i_ceph_lock); in __do_request()
3551 cap = ci->i_auth_cap; in __do_request()
3552 if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE && mds != cap->mds) { in __do_request()
3553 doutc(cl, "session changed for auth cap %d -> %d\n", in __do_request()
3554 cap->session->s_mds, session->s_mds); in __do_request()
3557 spin_lock(&cap->session->s_cap_lock); in __do_request()
3558 cap->session->s_nr_caps--; in __do_request()
3559 list_del_init(&cap->session_caps); in __do_request()
3560 spin_unlock(&cap->session->s_cap_lock); in __do_request()
3563 cap->mds = mds; in __do_request()
3564 cap->session = session; in __do_request()
3565 spin_lock(&session->s_cap_lock); in __do_request()
3566 session->s_nr_caps++; in __do_request()
3567 list_add_tail(&cap->session_caps, &session->s_caps); in __do_request()
3568 spin_unlock(&session->s_cap_lock); in __do_request()
3572 spin_unlock(&ci->i_ceph_lock); in __do_request()
3582 req->r_err = err; in __do_request()
3590 * called under mdsc->mutex
3595 struct ceph_client *cl = mdsc->fsc->client; in __wake_requests()
3604 list_del_init(&req->r_wait); in __wake_requests()
3606 req->r_tid); in __wake_requests()
3617 struct ceph_client *cl = mdsc->fsc->client; in kick_requests()
3619 struct rb_node *p = rb_first(&mdsc->request_tree); in kick_requests()
3625 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) in kick_requests()
3627 if (req->r_attempts > 0) in kick_requests()
3629 if (req->r_session && in kick_requests()
3630 req->r_session->s_mds == mds) { in kick_requests()
3631 doutc(cl, " kicking tid %llu\n", req->r_tid); in kick_requests()
3632 list_del_init(&req->r_wait); in kick_requests()
3641 struct ceph_client *cl = mdsc->fsc->client; in ceph_mdsc_submit_request()
3645 if (req->r_inode) in ceph_mdsc_submit_request()
3646 ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN); in ceph_mdsc_submit_request()
3647 if (req->r_parent) { in ceph_mdsc_submit_request()
3648 struct ceph_inode_info *ci = ceph_inode(req->r_parent); in ceph_mdsc_submit_request()
3649 int fmode = (req->r_op & CEPH_MDS_OP_WRITE) ? in ceph_mdsc_submit_request()
3651 spin_lock(&ci->i_ceph_lock); in ceph_mdsc_submit_request()
3654 spin_unlock(&ci->i_ceph_lock); in ceph_mdsc_submit_request()
3656 if (req->r_old_dentry_dir) in ceph_mdsc_submit_request()
3657 ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir), in ceph_mdsc_submit_request()
3660 if (req->r_inode) { in ceph_mdsc_submit_request()
3661 err = ceph_wait_on_async_create(req->r_inode); in ceph_mdsc_submit_request()
3668 if (!err && req->r_old_inode) { in ceph_mdsc_submit_request()
3669 err = ceph_wait_on_async_create(req->r_old_inode); in ceph_mdsc_submit_request()
3677 mutex_lock(&mdsc->mutex); in ceph_mdsc_submit_request()
3680 err = req->r_err; in ceph_mdsc_submit_request()
3681 mutex_unlock(&mdsc->mutex); in ceph_mdsc_submit_request()
3689 struct ceph_client *cl = mdsc->fsc->client; in ceph_mdsc_wait_request()
3698 &req->r_completion, in ceph_mdsc_wait_request()
3699 ceph_timeout_jiffies(req->r_timeout)); in ceph_mdsc_wait_request()
3703 err = -ETIMEDOUT; /* timed out */ in ceph_mdsc_wait_request()
3708 mutex_lock(&mdsc->mutex); in ceph_mdsc_wait_request()
3711 if (test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) { in ceph_mdsc_wait_request()
3712 err = le32_to_cpu(req->r_reply_info.head->result); in ceph_mdsc_wait_request()
3714 doutc(cl, "aborted request %lld with %d\n", req->r_tid, err); in ceph_mdsc_wait_request()
3721 mutex_lock(&req->r_fill_mutex); in ceph_mdsc_wait_request()
3722 req->r_err = err; in ceph_mdsc_wait_request()
3723 set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags); in ceph_mdsc_wait_request()
3724 mutex_unlock(&req->r_fill_mutex); in ceph_mdsc_wait_request()
3726 if (req->r_parent && in ceph_mdsc_wait_request()
3727 (req->r_op & CEPH_MDS_OP_WRITE)) in ceph_mdsc_wait_request()
3730 err = req->r_err; in ceph_mdsc_wait_request()
3733 mutex_unlock(&mdsc->mutex); in ceph_mdsc_wait_request()
3745 struct ceph_client *cl = mdsc->fsc->client; in ceph_mdsc_do_request()
3764 struct inode *dir = req->r_parent; in ceph_invalidate_dir_request()
3765 struct inode *old_dir = req->r_old_dentry_dir; in ceph_invalidate_dir_request()
3766 struct ceph_client *cl = req->r_mdsc->fsc->client; in ceph_invalidate_dir_request()
3774 if (req->r_dentry) in ceph_invalidate_dir_request()
3775 ceph_invalidate_dentry_lease(req->r_dentry); in ceph_invalidate_dir_request()
3776 if (req->r_old_dentry) in ceph_invalidate_dir_request()
3777 ceph_invalidate_dentry_lease(req->r_old_dentry); in ceph_invalidate_dir_request()
3789 struct ceph_mds_client *mdsc = session->s_mdsc; in handle_reply()
3790 struct ceph_client *cl = mdsc->fsc->client; in handle_reply()
3792 struct ceph_mds_reply_head *head = msg->front.iov_base; in handle_reply()
3797 int mds = session->s_mds; in handle_reply()
3800 if (msg->front.iov_len < sizeof(*head)) { in handle_reply()
3807 tid = le64_to_cpu(msg->hdr.tid); in handle_reply()
3808 mutex_lock(&mdsc->mutex); in handle_reply()
3812 mutex_unlock(&mdsc->mutex); in handle_reply()
3818 if (req->r_session != session) { in handle_reply()
3820 tid, session->s_mds, in handle_reply()
3821 req->r_session ? req->r_session->s_mds : -1); in handle_reply()
3822 mutex_unlock(&mdsc->mutex); in handle_reply()
3827 if ((test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags) && !head->safe) || in handle_reply()
3828 (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags) && head->safe)) { in handle_reply()
3830 head->safe ? "safe" : "unsafe", tid, mds); in handle_reply()
3831 mutex_unlock(&mdsc->mutex); in handle_reply()
3834 if (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags)) { in handle_reply()
3837 mutex_unlock(&mdsc->mutex); in handle_reply()
3841 result = le32_to_cpu(head->result); in handle_reply()
3843 if (head->safe) { in handle_reply()
3844 set_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags); in handle_reply()
3848 if (mdsc->stopping && !__get_oldest_req(mdsc)) in handle_reply()
3849 complete_all(&mdsc->safe_umount_waiters); in handle_reply()
3851 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) { in handle_reply()
3861 mutex_unlock(&mdsc->mutex); in handle_reply()
3865 set_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags); in handle_reply()
3866 list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe); in handle_reply()
3870 if (test_bit(CEPHFS_FEATURE_REPLY_ENCODING, &session->s_features)) in handle_reply()
3871 err = parse_reply_info(session, msg, req, (u64)-1); in handle_reply()
3874 session->s_con.peer_features); in handle_reply()
3875 mutex_unlock(&mdsc->mutex); in handle_reply()
3878 rinfo = &req->r_reply_info; in handle_reply()
3879 if ((err >= 0) && rinfo->head->is_target) { in handle_reply()
3880 struct inode *in = xchg(&req->r_new_inode, NULL); in handle_reply()
3882 .ino = le64_to_cpu(rinfo->targeti.in->ino), in handle_reply()
3883 .snap = le64_to_cpu(rinfo->targeti.in->snapid) in handle_reply()
3890 if (req->r_op == CEPH_MDS_OP_CREATE && in handle_reply()
3891 !req->r_reply_info.has_create_ino) { in handle_reply()
3893 WARN_ON_ONCE(req->r_deleg_ino); in handle_reply()
3898 in = ceph_get_inode(mdsc->fsc->sb, tvino, in); in handle_reply()
3901 mutex_lock(&session->s_mutex); in handle_reply()
3904 req->r_target_inode = in; in handle_reply()
3907 mutex_lock(&session->s_mutex); in handle_reply()
3917 if (rinfo->snapblob_len) { in handle_reply()
3918 down_write(&mdsc->snap_rwsem); in handle_reply()
3919 err = ceph_update_snap_trace(mdsc, rinfo->snapblob, in handle_reply()
3920 rinfo->snapblob + rinfo->snapblob_len, in handle_reply()
3921 le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP, in handle_reply()
3924 up_write(&mdsc->snap_rwsem); in handle_reply()
3926 if (err == -EIO) in handle_reply()
3930 downgrade_write(&mdsc->snap_rwsem); in handle_reply()
3932 down_read(&mdsc->snap_rwsem); in handle_reply()
3936 mutex_lock(&req->r_fill_mutex); in handle_reply()
3937 current->journal_info = req; in handle_reply()
3938 err = ceph_fill_trace(mdsc->fsc->sb, req); in handle_reply()
3940 if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR || in handle_reply()
3941 req->r_op == CEPH_MDS_OP_LSSNAP)) in handle_reply()
3942 err = ceph_readdir_prepopulate(req, req->r_session); in handle_reply()
3944 current->journal_info = NULL; in handle_reply()
3945 mutex_unlock(&req->r_fill_mutex); in handle_reply()
3947 up_read(&mdsc->snap_rwsem); in handle_reply()
3952 if (req->r_target_inode && in handle_reply()
3953 test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) { in handle_reply()
3955 ceph_inode(req->r_target_inode); in handle_reply()
3956 spin_lock(&ci->i_unsafe_lock); in handle_reply()
3957 list_add_tail(&req->r_unsafe_target_item, in handle_reply()
3958 &ci->i_unsafe_iops); in handle_reply()
3959 spin_unlock(&ci->i_unsafe_lock); in handle_reply()
3962 ceph_unreserve_caps(mdsc, &req->r_caps_reservation); in handle_reply()
3965 mutex_lock(&mdsc->mutex); in handle_reply()
3966 if (!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) { in handle_reply()
3968 req->r_err = err; in handle_reply()
3970 req->r_reply = ceph_msg_get(msg); in handle_reply()
3971 set_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags); in handle_reply()
3976 mutex_unlock(&mdsc->mutex); in handle_reply()
3978 mutex_unlock(&session->s_mutex); in handle_reply()
3983 ceph_update_metadata_metrics(&mdsc->metric, req->r_start_latency, in handle_reply()
3984 req->r_end_latency, err); in handle_reply()
4003 struct ceph_client *cl = mdsc->fsc->client; in handle_forward()
4005 u64 tid = le64_to_cpu(msg->hdr.tid); in handle_forward()
4008 int err = -EINVAL; in handle_forward()
4009 void *p = msg->front.iov_base; in handle_forward()
4010 void *end = p + msg->front.iov_len; in handle_forward()
4017 mutex_lock(&mdsc->mutex); in handle_forward()
4020 mutex_unlock(&mdsc->mutex); in handle_forward()
4021 doutc(cl, "forward tid %llu to mds%d - req dne\n", tid, next_mds); in handle_forward()
4025 if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) { in handle_forward()
4028 } else if (fwd_seq <= req->r_num_fwd || (uint32_t)fwd_seq >= U32_MAX) { in handle_forward()
4037 mutex_lock(&req->r_fill_mutex); in handle_forward()
4038 req->r_err = -EMULTIHOP; in handle_forward()
4039 set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags); in handle_forward()
4040 mutex_unlock(&req->r_fill_mutex); in handle_forward()
4047 BUG_ON(req->r_err); in handle_forward()
4048 BUG_ON(test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)); in handle_forward()
4049 req->r_attempts = 0; in handle_forward()
4050 req->r_num_fwd = fwd_seq; in handle_forward()
4051 req->r_resend_mds = next_mds; in handle_forward()
4055 mutex_unlock(&mdsc->mutex); in handle_forward()
4075 while (n-- > 0) { in __decode_session_metadata()
4093 return -1; in __decode_session_metadata()
4102 struct ceph_mds_client *mdsc = session->s_mdsc; in handle_session()
4103 struct ceph_client *cl = mdsc->fsc->client; in handle_session()
4104 int mds = session->s_mds; in handle_session()
4105 int msg_version = le16_to_cpu(msg->hdr.version); in handle_session()
4106 void *p = msg->front.iov_base; in handle_session()
4107 void *end = p + msg->front.iov_len; in handle_session()
4122 op = le32_to_cpu(h->op); in handle_session()
4123 seq = le64_to_cpu(h->seq); in handle_session()
4139 p += len - sizeof(features); in handle_session()
4155 session->s_mds); in handle_session()
4213 while (_len && cap_auths[i].match.path[_len - 1] == '/') { in handle_session()
4214 cap_auths[i].match.path[_len - 1] = '\0'; in handle_session()
4215 _len -= 1; in handle_session()
4242 mutex_lock(&mdsc->mutex); in handle_session()
4244 if (mdsc->s_cap_auths) { in handle_session()
4245 for (i = 0; i < mdsc->s_cap_auths_num; i++) { in handle_session()
4246 kfree(mdsc->s_cap_auths[i].match.gids); in handle_session()
4247 kfree(mdsc->s_cap_auths[i].match.path); in handle_session()
4248 kfree(mdsc->s_cap_auths[i].match.fs_name); in handle_session()
4250 kfree(mdsc->s_cap_auths); in handle_session()
4252 mdsc->s_cap_auths_num = cap_auths_num; in handle_session()
4253 mdsc->s_cap_auths = cap_auths; in handle_session()
4260 session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose; in handle_session()
4261 mutex_unlock(&mdsc->mutex); in handle_session()
4263 mutex_lock(&session->s_mutex); in handle_session()
4267 ceph_session_state_name(session->s_state), seq); in handle_session()
4269 if (session->s_state == CEPH_MDS_SESSION_HUNG) { in handle_session()
4270 session->s_state = CEPH_MDS_SESSION_OPEN; in handle_session()
4271 pr_info_client(cl, "mds%d came back\n", session->s_mds); in handle_session()
4276 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING) in handle_session()
4278 session->s_mds); in handle_session()
4280 session->s_features = features; in handle_session()
4281 if (session->s_state == CEPH_MDS_SESSION_OPEN) { in handle_session()
4283 session->s_mds); in handle_session()
4285 session->s_state = CEPH_MDS_SESSION_OPEN; in handle_session()
4288 &session->s_features)) in handle_session()
4289 metric_schedule_delayed(&mdsc->metric); in handle_session()
4297 if (!session->s_seq && seq) in handle_session()
4298 session->s_seq = seq; in handle_session()
4301 if (mdsc->stopping) in handle_session()
4306 if (session->s_renew_seq == seq) in handle_session()
4311 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING) in handle_session()
4313 session->s_mds); in handle_session()
4314 session->s_state = CEPH_MDS_SESSION_CLOSED; in handle_session()
4318 wake_up_all(&mdsc->session_close_wq); in handle_session()
4323 session->s_mds); in handle_session()
4324 atomic_inc(&session->s_cap_gen); in handle_session()
4325 session->s_cap_ttl = jiffies - 1; in handle_session()
4330 ceph_trim_caps(mdsc, session, le32_to_cpu(h->max_caps)); in handle_session()
4335 spin_lock(&session->s_cap_lock); in handle_session()
4336 if (session->s_num_cap_releases) in handle_session()
4338 spin_unlock(&session->s_cap_lock); in handle_session()
4345 spin_lock(&session->s_cap_lock); in handle_session()
4346 session->s_readonly = true; in handle_session()
4347 spin_unlock(&session->s_cap_lock); in handle_session()
4352 WARN_ON(session->s_state != CEPH_MDS_SESSION_OPENING); in handle_session()
4354 session->s_mds); in handle_session()
4355 session->s_state = CEPH_MDS_SESSION_REJECTED; in handle_session()
4359 mdsc->fsc->blocklisted = true; in handle_session()
4368 mutex_unlock(&session->s_mutex); in handle_session()
4370 mutex_lock(&mdsc->mutex); in handle_session()
4371 __wake_requests(mdsc, &session->s_waiting); in handle_session()
4374 mutex_unlock(&mdsc->mutex); in handle_session()
4382 (int)msg->front.iov_len); in handle_session()
4396 struct ceph_client *cl = req->r_mdsc->fsc->client; in ceph_mdsc_release_dir_caps()
4399 dcaps = xchg(&req->r_dir_caps, 0); in ceph_mdsc_release_dir_caps()
4402 ceph_put_cap_refs(ceph_inode(req->r_parent), dcaps); in ceph_mdsc_release_dir_caps()
4408 struct ceph_client *cl = req->r_mdsc->fsc->client; in ceph_mdsc_release_dir_caps_async()
4411 dcaps = xchg(&req->r_dir_caps, 0); in ceph_mdsc_release_dir_caps_async()
4414 ceph_put_cap_refs_async(ceph_inode(req->r_parent), dcaps); in ceph_mdsc_release_dir_caps_async()
4419 * called under session->mutex.
4427 doutc(mdsc->fsc->client, "mds%d\n", session->s_mds); in replay_unsafe_requests()
4429 mutex_lock(&mdsc->mutex); in replay_unsafe_requests()
4430 list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) in replay_unsafe_requests()
4434 * also re-send old requests when MDS enters reconnect stage. So that MDS in replay_unsafe_requests()
4437 p = rb_first(&mdsc->request_tree); in replay_unsafe_requests()
4441 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) in replay_unsafe_requests()
4443 if (req->r_attempts == 0) in replay_unsafe_requests()
4445 if (!req->r_session) in replay_unsafe_requests()
4447 if (req->r_session->s_mds != session->s_mds) in replay_unsafe_requests()
4454 mutex_unlock(&mdsc->mutex); in replay_unsafe_requests()
4463 int err = -ENOMEM; in send_reconnect_partial()
4465 if (!recon_state->allow_multi) in send_reconnect_partial()
4466 return -ENOSPC; in send_reconnect_partial()
4469 BUG_ON(!recon_state->nr_caps == !recon_state->nr_realms); in send_reconnect_partial()
4471 /* pre-allocate new pagelist */ in send_reconnect_partial()
4474 return -ENOMEM; in send_reconnect_partial()
4485 if (recon_state->nr_caps) { in send_reconnect_partial()
4487 err = ceph_pagelist_encode_32(recon_state->pagelist, 0); in send_reconnect_partial()
4497 err = ceph_pagelist_encode_8(recon_state->pagelist, 1); in send_reconnect_partial()
4501 page = list_first_entry(&recon_state->pagelist->head, struct page, lru); in send_reconnect_partial()
4503 if (recon_state->nr_caps) { in send_reconnect_partial()
4505 *addr = cpu_to_le32(recon_state->nr_caps); in send_reconnect_partial()
4508 *(addr + 1) = cpu_to_le32(recon_state->nr_realms); in send_reconnect_partial()
4512 reply->hdr.version = cpu_to_le16(5); in send_reconnect_partial()
4513 reply->hdr.compat_version = cpu_to_le16(4); in send_reconnect_partial()
4515 reply->hdr.data_len = cpu_to_le32(recon_state->pagelist->length); in send_reconnect_partial()
4516 ceph_msg_data_add_pagelist(reply, recon_state->pagelist); in send_reconnect_partial()
4518 ceph_con_send(&recon_state->session->s_con, reply); in send_reconnect_partial()
4519 ceph_pagelist_release(recon_state->pagelist); in send_reconnect_partial()
4521 recon_state->pagelist = _pagelist; in send_reconnect_partial()
4522 recon_state->nr_caps = 0; in send_reconnect_partial()
4523 recon_state->nr_realms = 0; in send_reconnect_partial()
4524 recon_state->msg_version = 5; in send_reconnect_partial()
4537 if (hlist_empty(&inode->i_dentry)) in d_find_primary()
4540 spin_lock(&inode->i_lock); in d_find_primary()
4541 if (hlist_empty(&inode->i_dentry)) in d_find_primary()
4544 if (S_ISDIR(inode->i_mode)) { in d_find_primary()
4545 alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias); in d_find_primary()
4551 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) { in d_find_primary()
4552 spin_lock(&alias->d_lock); in d_find_primary()
4554 (ceph_dentry(alias)->flags & CEPH_DENTRY_PRIMARY_LINK)) { in d_find_primary()
4557 spin_unlock(&alias->d_lock); in d_find_primary()
4562 spin_unlock(&inode->i_lock); in d_find_primary()
4571 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb); in reconnect_caps_cb()
4579 struct ceph_pagelist *pagelist = recon_state->pagelist; in reconnect_caps_cb()
4591 recon_state->msg_version >= 2); in reconnect_caps_cb()
4602 spin_lock(&ci->i_ceph_lock); in reconnect_caps_cb()
4605 spin_unlock(&ci->i_ceph_lock); in reconnect_caps_cb()
4610 ceph_vinop(inode), cap, cap->cap_id, in reconnect_caps_cb()
4611 ceph_cap_string(cap->issued)); in reconnect_caps_cb()
4613 cap->seq = 0; /* reset cap seq */ in reconnect_caps_cb()
4614 cap->issue_seq = 0; /* and issue_seq */ in reconnect_caps_cb()
4615 cap->mseq = 0; /* and migrate_seq */ in reconnect_caps_cb()
4616 cap->cap_gen = atomic_read(&cap->session->s_cap_gen); in reconnect_caps_cb()
4619 if (S_ISDIR(inode->i_mode)) { in reconnect_caps_cb()
4620 if (cap->issued & CEPH_CAP_DIR_CREATE) { in reconnect_caps_cb()
4621 ceph_put_string(rcu_dereference_raw(ci->i_cached_layout.pool_ns)); in reconnect_caps_cb()
4622 memset(&ci->i_cached_layout, 0, sizeof(ci->i_cached_layout)); in reconnect_caps_cb()
4624 cap->issued &= ~CEPH_CAP_ANY_DIR_OPS; in reconnect_caps_cb()
4627 if (recon_state->msg_version >= 2) { in reconnect_caps_cb()
4628 rec.v2.cap_id = cpu_to_le64(cap->cap_id); in reconnect_caps_cb()
4630 rec.v2.issued = cpu_to_le32(cap->issued); in reconnect_caps_cb()
4631 rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino); in reconnect_caps_cb()
4634 ((ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) ? 0 : 1); in reconnect_caps_cb()
4638 rec.v1.cap_id = cpu_to_le64(cap->cap_id); in reconnect_caps_cb()
4640 rec.v1.issued = cpu_to_le32(cap->issued); in reconnect_caps_cb()
4646 rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino); in reconnect_caps_cb()
4650 if (list_empty(&ci->i_cap_snaps)) { in reconnect_caps_cb()
4651 snap_follows = ci->i_head_snapc ? ci->i_head_snapc->seq : 0; in reconnect_caps_cb()
4654 list_first_entry(&ci->i_cap_snaps, in reconnect_caps_cb()
4656 snap_follows = capsnap->follows; in reconnect_caps_cb()
4658 spin_unlock(&ci->i_ceph_lock); in reconnect_caps_cb()
4660 if (recon_state->msg_version >= 2) { in reconnect_caps_cb()
4678 err = -ENOMEM; in reconnect_caps_cb()
4687 if (err == -ENOSPC) in reconnect_caps_cb()
4696 if (recon_state->msg_version >= 3) { in reconnect_caps_cb()
4716 if (pagelist->length + total_len > RECONNECT_MAX_SIZE) { in reconnect_caps_cb()
4720 pagelist = recon_state->pagelist; in reconnect_caps_cb()
4728 if (recon_state->msg_version >= 3) { in reconnect_caps_cb()
4756 recon_state->nr_caps++; in reconnect_caps_cb()
4764 struct ceph_pagelist *pagelist = recon_state->pagelist; in encode_snap_realms()
4765 struct ceph_client *cl = mdsc->fsc->client; in encode_snap_realms()
4768 if (recon_state->msg_version >= 4) { in encode_snap_realms()
4769 err = ceph_pagelist_encode_32(pagelist, mdsc->num_snap_realms); in encode_snap_realms()
4779 for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) { in encode_snap_realms()
4784 if (recon_state->msg_version >= 4) { in encode_snap_realms()
4788 if (pagelist->length + need > RECONNECT_MAX_SIZE) { in encode_snap_realms()
4792 pagelist = recon_state->pagelist; in encode_snap_realms()
4805 realm->ino, realm->seq, realm->parent_ino); in encode_snap_realms()
4806 sr_rec.ino = cpu_to_le64(realm->ino); in encode_snap_realms()
4807 sr_rec.seq = cpu_to_le64(realm->seq); in encode_snap_realms()
4808 sr_rec.parent = cpu_to_le64(realm->parent_ino); in encode_snap_realms()
4814 recon_state->nr_realms++; in encode_snap_realms()
4834 struct ceph_client *cl = mdsc->fsc->client; in send_mds_reconnect()
4836 int mds = session->s_mds; in send_mds_reconnect()
4837 int err = -ENOMEM; in send_mds_reconnect()
4853 xa_destroy(&session->s_delegated_inos); in send_mds_reconnect()
4855 mutex_lock(&session->s_mutex); in send_mds_reconnect()
4856 session->s_state = CEPH_MDS_SESSION_RECONNECTING; in send_mds_reconnect()
4857 session->s_seq = 0; in send_mds_reconnect()
4860 ceph_session_state_name(session->s_state)); in send_mds_reconnect()
4862 atomic_inc(&session->s_cap_gen); in send_mds_reconnect()
4864 spin_lock(&session->s_cap_lock); in send_mds_reconnect()
4866 session->s_readonly = 0; in send_mds_reconnect()
4872 session->s_cap_reconnect = 1; in send_mds_reconnect()
4875 spin_unlock(&session->s_cap_lock); in send_mds_reconnect()
4879 if (mdsc->fsc->sb->s_root) in send_mds_reconnect()
4880 shrink_dcache_parent(mdsc->fsc->sb->s_root); in send_mds_reconnect()
4882 ceph_con_close(&session->s_con); in send_mds_reconnect()
4883 ceph_con_open(&session->s_con, in send_mds_reconnect()
4885 ceph_mdsmap_get_addr(mdsc->mdsmap, mds)); in send_mds_reconnect()
4892 down_read(&mdsc->snap_rwsem); in send_mds_reconnect()
4899 if (test_bit(CEPHFS_FEATURE_MULTI_RECONNECT, &session->s_features)) { in send_mds_reconnect()
4902 } else if (session->s_con.peer_features & CEPH_FEATURE_MDSENC) { in send_mds_reconnect()
4910 spin_lock(&session->s_cap_lock); in send_mds_reconnect()
4911 session->s_cap_reconnect = 0; in send_mds_reconnect()
4912 spin_unlock(&session->s_cap_lock); in send_mds_reconnect()
4918 if (mdsc->num_snap_realms) { in send_mds_reconnect()
4920 recon_state.pagelist->length + in send_mds_reconnect()
4921 mdsc->num_snap_realms * in send_mds_reconnect()
4927 total_len += mdsc->num_snap_realms * in send_mds_reconnect()
4932 err = -ENOSPC; in send_mds_reconnect()
4956 list_first_entry(&recon_state.pagelist->head, in send_mds_reconnect()
4960 WARN_ON(recon_state.nr_realms != mdsc->num_snap_realms); in send_mds_reconnect()
4968 reply->hdr.version = cpu_to_le16(recon_state.msg_version); in send_mds_reconnect()
4970 reply->hdr.compat_version = cpu_to_le16(4); in send_mds_reconnect()
4972 reply->hdr.data_len = cpu_to_le32(recon_state.pagelist->length); in send_mds_reconnect()
4975 ceph_con_send(&session->s_con, reply); in send_mds_reconnect()
4977 mutex_unlock(&session->s_mutex); in send_mds_reconnect()
4979 mutex_lock(&mdsc->mutex); in send_mds_reconnect()
4980 __wake_requests(mdsc, &session->s_waiting); in send_mds_reconnect()
4981 mutex_unlock(&mdsc->mutex); in send_mds_reconnect()
4983 up_read(&mdsc->snap_rwsem); in send_mds_reconnect()
4989 up_read(&mdsc->snap_rwsem); in send_mds_reconnect()
4990 mutex_unlock(&session->s_mutex); in send_mds_reconnect()
5004 * called under mdsc->mutex.
5014 struct ceph_client *cl = mdsc->fsc->client; in check_new_map()
5016 doutc(cl, "new %u old %u\n", newmap->m_epoch, oldmap->m_epoch); in check_new_map()
5018 if (newmap->m_info) { in check_new_map()
5019 for (i = 0; i < newmap->possible_max_rank; i++) { in check_new_map()
5020 for (j = 0; j < newmap->m_info[i].num_export_targets; j++) in check_new_map()
5021 set_bit(newmap->m_info[i].export_targets[j], targets); in check_new_map()
5025 for (i = 0; i < oldmap->possible_max_rank && i < mdsc->max_sessions; i++) { in check_new_map()
5026 if (!mdsc->sessions[i]) in check_new_map()
5028 s = mdsc->sessions[i]; in check_new_map()
5032 doutc(cl, "mds%d state %s%s -> %s%s (session %s)\n", in check_new_map()
5037 ceph_session_state_name(s->s_state)); in check_new_map()
5039 if (i >= newmap->possible_max_rank) { in check_new_map()
5043 __wake_requests(mdsc, &s->s_waiting); in check_new_map()
5044 mutex_unlock(&mdsc->mutex); in check_new_map()
5046 mutex_lock(&s->s_mutex); in check_new_map()
5049 mutex_unlock(&s->s_mutex); in check_new_map()
5053 mutex_lock(&mdsc->mutex); in check_new_map()
5062 mutex_unlock(&mdsc->mutex); in check_new_map()
5063 mutex_lock(&s->s_mutex); in check_new_map()
5064 mutex_lock(&mdsc->mutex); in check_new_map()
5065 ceph_con_close(&s->s_con); in check_new_map()
5066 mutex_unlock(&s->s_mutex); in check_new_map()
5067 s->s_state = CEPH_MDS_SESSION_RESTARTING; in check_new_map()
5075 if (s->s_state == CEPH_MDS_SESSION_RESTARTING && in check_new_map()
5077 mutex_unlock(&mdsc->mutex); in check_new_map()
5080 mutex_lock(&mdsc->mutex); in check_new_map()
5091 s->s_mds); in check_new_map()
5093 mutex_unlock(&mdsc->mutex); in check_new_map()
5094 mutex_lock(&s->s_mutex); in check_new_map()
5095 mutex_lock(&mdsc->mutex); in check_new_map()
5097 mutex_unlock(&s->s_mutex); in check_new_map()
5105 for (i = 0; i < newmap->possible_max_rank; i++) { in check_new_map()
5125 * the mdsc->mutex's unlock/lock gap below in rare in check_new_map()
5126 * case. But the related MDS daemon will just queue in check_new_map()
5142 mutex_unlock(&mdsc->mutex); in check_new_map()
5145 mutex_lock(&mdsc->mutex); in check_new_map()
5148 for (i = 0; i < newmap->possible_max_rank && i < mdsc->max_sessions; i++) { in check_new_map()
5149 s = mdsc->sessions[i]; in check_new_map()
5154 if (s->s_state == CEPH_MDS_SESSION_OPEN || in check_new_map()
5155 s->s_state == CEPH_MDS_SESSION_HUNG || in check_new_map()
5156 s->s_state == CEPH_MDS_SESSION_CLOSING) { in check_new_map()
5170 * caller must hold session s_mutex, dentry->d_lock
5176 ceph_put_mds_session(di->lease_session); in __ceph_mdsc_drop_dentry_lease()
5177 di->lease_session = NULL; in __ceph_mdsc_drop_dentry_lease()
5184 struct ceph_client *cl = mdsc->fsc->client; in handle_lease()
5185 struct super_block *sb = mdsc->fsc->sb; in handle_lease()
5189 int mds = session->s_mds; in handle_lease()
5190 struct ceph_mds_lease *h = msg->front.iov_base; in handle_lease()
5202 if (msg->front.iov_len < sizeof(*h) + sizeof(u32)) in handle_lease()
5204 vino.ino = le64_to_cpu(h->ino); in handle_lease()
5206 seq = le32_to_cpu(h->seq); in handle_lease()
5208 if (msg->front.iov_len < sizeof(*h) + sizeof(u32) + dname.len) in handle_lease()
5214 doutc(cl, "%s, ino %llx %p %.*s\n", ceph_lease_op_name(h->action), in handle_lease()
5217 mutex_lock(&session->s_mutex); in handle_lease()
5236 spin_lock(&dentry->d_lock); in handle_lease()
5238 switch (h->action) { in handle_lease()
5240 if (di->lease_session == session) { in handle_lease()
5241 if (ceph_seq_cmp(di->lease_seq, seq) > 0) in handle_lease()
5242 h->seq = cpu_to_le32(di->lease_seq); in handle_lease()
5249 if (di->lease_session == session && in handle_lease()
5250 di->lease_gen == atomic_read(&session->s_cap_gen) && in handle_lease()
5251 di->lease_renew_from && in handle_lease()
5252 di->lease_renew_after == 0) { in handle_lease()
5254 msecs_to_jiffies(le32_to_cpu(h->duration_ms)); in handle_lease()
5256 di->lease_seq = seq; in handle_lease()
5257 di->time = di->lease_renew_from + duration; in handle_lease()
5258 di->lease_renew_after = di->lease_renew_from + in handle_lease()
5260 di->lease_renew_from = 0; in handle_lease()
5264 spin_unlock(&dentry->d_lock); in handle_lease()
5272 h->action = CEPH_MDS_LEASE_REVOKE_ACK; in handle_lease()
5274 ceph_con_send(&session->s_con, msg); in handle_lease()
5277 mutex_unlock(&session->s_mutex); in handle_lease()
5294 struct ceph_client *cl = session->s_mdsc->fsc->client; in ceph_mdsc_lease_send_msg()
5301 session->s_mds); in ceph_mdsc_lease_send_msg()
5306 lease = msg->front.iov_base; in ceph_mdsc_lease_send_msg()
5307 lease->action = action; in ceph_mdsc_lease_send_msg()
5308 lease->seq = cpu_to_le32(seq); in ceph_mdsc_lease_send_msg()
5310 spin_lock(&dentry->d_lock); in ceph_mdsc_lease_send_msg()
5311 dir = d_inode(dentry->d_parent); in ceph_mdsc_lease_send_msg()
5312 lease->ino = cpu_to_le64(ceph_ino(dir)); in ceph_mdsc_lease_send_msg()
5313 lease->first = lease->last = cpu_to_le64(ceph_snap(dir)); in ceph_mdsc_lease_send_msg()
5315 put_unaligned_le32(dentry->d_name.len, lease + 1); in ceph_mdsc_lease_send_msg()
5317 dentry->d_name.name, dentry->d_name.len); in ceph_mdsc_lease_send_msg()
5318 spin_unlock(&dentry->d_lock); in ceph_mdsc_lease_send_msg()
5320 ceph_con_send(&session->s_con, msg); in ceph_mdsc_lease_send_msg()
5328 mutex_lock(&s->s_mutex); in lock_unlock_session()
5329 mutex_unlock(&s->s_mutex); in lock_unlock_session()
5334 struct ceph_client *cl = mdsc->fsc->client; in maybe_recover_session()
5335 struct ceph_fs_client *fsc = mdsc->fsc; in maybe_recover_session()
5340 if (READ_ONCE(fsc->mount_state) != CEPH_MOUNT_MOUNTED) in maybe_recover_session()
5343 if (!READ_ONCE(fsc->blocklisted)) in maybe_recover_session()
5347 ceph_force_reconnect(fsc->sb); in maybe_recover_session()
5352 struct ceph_client *cl = s->s_mdsc->fsc->client; in check_session_state()
5354 switch (s->s_state) { in check_session_state()
5356 if (s->s_ttl && time_after(jiffies, s->s_ttl)) { in check_session_state()
5357 s->s_state = CEPH_MDS_SESSION_HUNG; in check_session_state()
5358 pr_info_client(cl, "mds%d hung\n", s->s_mds); in check_session_state()
5378 struct ceph_client *cl = s->s_mdsc->fsc->client; in inc_session_sequence()
5380 lockdep_assert_held(&s->s_mutex); in inc_session_sequence()
5382 s->s_seq++; in inc_session_sequence()
5384 if (s->s_state == CEPH_MDS_SESSION_CLOSING) { in inc_session_sequence()
5387 doutc(cl, "resending session close request for mds%d\n", s->s_mds); in inc_session_sequence()
5391 s->s_mds, ret); in inc_session_sequence()
5396 * delayed work -- periodically trim expired leases, renew caps with mds. If
5407 schedule_delayed_work(&mdsc->delayed_work, in schedule_delayed()
5420 doutc(mdsc->fsc->client, "mdsc delayed_work\n"); in delayed_work()
5422 if (mdsc->stopping >= CEPH_MDSC_STOPPING_FLUSHED) in delayed_work()
5425 mutex_lock(&mdsc->mutex); in delayed_work()
5426 renew_interval = mdsc->mdsmap->m_session_timeout >> 2; in delayed_work()
5428 mdsc->last_renew_caps); in delayed_work()
5430 mdsc->last_renew_caps = jiffies; in delayed_work()
5432 for (i = 0; i < mdsc->max_sessions; i++) { in delayed_work()
5441 mutex_unlock(&mdsc->mutex); in delayed_work()
5445 mutex_lock(&s->s_mutex); in delayed_work()
5449 ceph_con_keepalive(&s->s_con); in delayed_work()
5450 if (s->s_state == CEPH_MDS_SESSION_OPEN || in delayed_work()
5451 s->s_state == CEPH_MDS_SESSION_HUNG) in delayed_work()
5453 mutex_unlock(&s->s_mutex); in delayed_work()
5456 mutex_lock(&mdsc->mutex); in delayed_work()
5458 mutex_unlock(&mdsc->mutex); in delayed_work()
5479 return -ENOMEM; in ceph_mdsc_init()
5480 mdsc->fsc = fsc; in ceph_mdsc_init()
5481 mutex_init(&mdsc->mutex); in ceph_mdsc_init()
5482 mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS); in ceph_mdsc_init()
5483 if (!mdsc->mdsmap) { in ceph_mdsc_init()
5484 err = -ENOMEM; in ceph_mdsc_init()
5488 init_completion(&mdsc->safe_umount_waiters); in ceph_mdsc_init()
5489 spin_lock_init(&mdsc->stopping_lock); in ceph_mdsc_init()
5490 atomic_set(&mdsc->stopping_blockers, 0); in ceph_mdsc_init()
5491 init_completion(&mdsc->stopping_waiter); in ceph_mdsc_init()
5492 init_waitqueue_head(&mdsc->session_close_wq); in ceph_mdsc_init()
5493 INIT_LIST_HEAD(&mdsc->waiting_for_map); in ceph_mdsc_init()
5494 mdsc->quotarealms_inodes = RB_ROOT; in ceph_mdsc_init()
5495 mutex_init(&mdsc->quotarealms_inodes_mutex); in ceph_mdsc_init()
5496 init_rwsem(&mdsc->snap_rwsem); in ceph_mdsc_init()
5497 mdsc->snap_realms = RB_ROOT; in ceph_mdsc_init()
5498 INIT_LIST_HEAD(&mdsc->snap_empty); in ceph_mdsc_init()
5499 spin_lock_init(&mdsc->snap_empty_lock); in ceph_mdsc_init()
5500 mdsc->request_tree = RB_ROOT; in ceph_mdsc_init()
5501 INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work); in ceph_mdsc_init()
5502 mdsc->last_renew_caps = jiffies; in ceph_mdsc_init()
5503 INIT_LIST_HEAD(&mdsc->cap_delay_list); in ceph_mdsc_init()
5505 INIT_LIST_HEAD(&mdsc->cap_wait_list); in ceph_mdsc_init()
5507 spin_lock_init(&mdsc->cap_delay_lock); in ceph_mdsc_init()
5508 INIT_LIST_HEAD(&mdsc->cap_unlink_delay_list); in ceph_mdsc_init()
5509 INIT_LIST_HEAD(&mdsc->snap_flush_list); in ceph_mdsc_init()
5510 spin_lock_init(&mdsc->snap_flush_lock); in ceph_mdsc_init()
5511 mdsc->last_cap_flush_tid = 1; in ceph_mdsc_init()
5512 INIT_LIST_HEAD(&mdsc->cap_flush_list); in ceph_mdsc_init()
5513 INIT_LIST_HEAD(&mdsc->cap_dirty_migrating); in ceph_mdsc_init()
5514 spin_lock_init(&mdsc->cap_dirty_lock); in ceph_mdsc_init()
5515 init_waitqueue_head(&mdsc->cap_flushing_wq); in ceph_mdsc_init()
5516 INIT_WORK(&mdsc->cap_reclaim_work, ceph_cap_reclaim_work); in ceph_mdsc_init()
5517 INIT_WORK(&mdsc->cap_unlink_work, ceph_cap_unlink_work); in ceph_mdsc_init()
5518 err = ceph_metric_init(&mdsc->metric); in ceph_mdsc_init()
5522 spin_lock_init(&mdsc->dentry_list_lock); in ceph_mdsc_init()
5523 INIT_LIST_HEAD(&mdsc->dentry_leases); in ceph_mdsc_init()
5524 INIT_LIST_HEAD(&mdsc->dentry_dir_leases); in ceph_mdsc_init()
5527 ceph_adjust_caps_max_min(mdsc, fsc->mount_options); in ceph_mdsc_init()
5529 spin_lock_init(&mdsc->snapid_map_lock); in ceph_mdsc_init()
5530 mdsc->snapid_map_tree = RB_ROOT; in ceph_mdsc_init()
5531 INIT_LIST_HEAD(&mdsc->snapid_map_lru); in ceph_mdsc_init()
5533 init_rwsem(&mdsc->pool_perm_rwsem); in ceph_mdsc_init()
5534 mdsc->pool_perm_tree = RB_ROOT; in ceph_mdsc_init()
5536 strscpy(mdsc->nodename, utsname()->nodename, in ceph_mdsc_init()
5537 sizeof(mdsc->nodename)); in ceph_mdsc_init()
5539 fsc->mdsc = mdsc; in ceph_mdsc_init()
5543 kfree(mdsc->mdsmap); in ceph_mdsc_init()
5555 struct ceph_client *cl = mdsc->fsc->client; in wait_requests()
5556 struct ceph_options *opts = mdsc->fsc->client->options; in wait_requests()
5559 mutex_lock(&mdsc->mutex); in wait_requests()
5561 mutex_unlock(&mdsc->mutex); in wait_requests()
5564 wait_for_completion_timeout(&mdsc->safe_umount_waiters, in wait_requests()
5565 ceph_timeout_jiffies(opts->mount_timeout)); in wait_requests()
5568 mutex_lock(&mdsc->mutex); in wait_requests()
5570 doutc(cl, "timed out on tid %llu\n", req->r_tid); in wait_requests()
5571 list_del_init(&req->r_wait); in wait_requests()
5575 mutex_unlock(&mdsc->mutex); in wait_requests()
5581 struct ceph_client *cl = s->s_mdsc->fsc->client; in send_flush_mdlog()
5585 * Pre-luminous MDS crashes when it sees an unknown session request in send_flush_mdlog()
5587 if (!CEPH_HAVE_FEATURE(s->s_con.peer_features, SERVER_LUMINOUS)) in send_flush_mdlog()
5590 mutex_lock(&s->s_mutex); in send_flush_mdlog()
5592 s->s_mds, ceph_session_state_name(s->s_state), s->s_seq); in send_flush_mdlog()
5594 s->s_seq); in send_flush_mdlog()
5597 s->s_mds, ceph_session_state_name(s->s_state), s->s_seq); in send_flush_mdlog()
5599 ceph_con_send(&s->s_con, msg); in send_flush_mdlog()
5601 mutex_unlock(&s->s_mutex); in send_flush_mdlog()
5609 u32 caller_uid = from_kuid(&init_user_ns, cred->fsuid); in ceph_mds_auth_match()
5610 u32 caller_gid = from_kgid(&init_user_ns, cred->fsgid); in ceph_mds_auth_match()
5611 struct ceph_client *cl = mdsc->fsc->client; in ceph_mds_auth_match()
5612 const char *spath = mdsc->fsc->mount_options->server_path; in ceph_mds_auth_match()
5617 doutc(cl, "match.uid %lld\n", auth->match.uid); in ceph_mds_auth_match()
5618 if (auth->match.uid != MDS_AUTH_UID_ANY) { in ceph_mds_auth_match()
5619 if (auth->match.uid != caller_uid) in ceph_mds_auth_match()
5621 if (auth->match.num_gids) { in ceph_mds_auth_match()
5622 for (i = 0; i < auth->match.num_gids; i++) { in ceph_mds_auth_match()
5623 if (caller_gid == auth->match.gids[i]) in ceph_mds_auth_match()
5626 if (!gid_matched && cred->group_info->ngroups) { in ceph_mds_auth_match()
5627 for (i = 0; i < cred->group_info->ngroups; i++) { in ceph_mds_auth_match()
5629 cred->group_info->gid[i]); in ceph_mds_auth_match()
5630 for (j = 0; j < auth->match.num_gids; j++) { in ceph_mds_auth_match()
5631 if (gid == auth->match.gids[j]) { in ceph_mds_auth_match()
5646 if (auth->match.path) { in ceph_mds_auth_match()
5651 len = strlen(auth->match.path); in ceph_mds_auth_match()
5658 spath, tpath, auth->match.path); in ceph_mds_auth_match()
5664 return -ENOMEM; in ceph_mds_auth_match()
5677 while (tlen && _tpath[tlen - 1] == '/') { in ceph_mds_auth_match()
5678 _tpath[tlen - 1] = '\0'; in ceph_mds_auth_match()
5679 tlen -= 1; in ceph_mds_auth_match()
5685 * match.path=/foo --> /foo _path=/foo --> match in ceph_mds_auth_match()
5686 * match.path=/foo/ --> /foo _path=/foo --> match in ceph_mds_auth_match()
5689 * match.path=/foo/ --> /foo _path=/foo/ --> match in ceph_mds_auth_match()
5690 * match.path=/foo --> /foo _path=/foo/ --> match in ceph_mds_auth_match()
5691 * match.path=/foo/ --> /foo _path=/foo/d --> match in ceph_mds_auth_match()
5692 * match.path=/foo --> /foo _path=/food --> mismatch in ceph_mds_auth_match()
5694 * All the other cases --> mismatch in ceph_mds_auth_match()
5697 char *first = strstr(_tpath, auth->match.path); in ceph_mds_auth_match()
5718 u32 caller_uid = from_kuid(&init_user_ns, cred->fsuid); in ceph_mds_check_access()
5719 u32 caller_gid = from_kgid(&init_user_ns, cred->fsgid); in ceph_mds_check_access()
5721 struct ceph_client *cl = mdsc->fsc->client; in ceph_mds_check_access()
5728 for (i = 0; i < mdsc->s_cap_auths_num; i++) { in ceph_mds_check_access()
5729 struct ceph_mds_cap_auth *s = &mdsc->s_cap_auths[i]; in ceph_mds_check_access()
5739 if ((mask & MAY_WRITE) && s->writeable && in ceph_mds_check_access()
5740 s->match.root_squash && (!caller_uid || !caller_gid)) in ceph_mds_check_access()
5743 if (((mask & MAY_WRITE) && !s->writeable) || in ceph_mds_check_access()
5744 ((mask & MAY_READ) && !s->readable)) in ceph_mds_check_access()
5764 rw_perms_s->readable, rw_perms_s->writeable, in ceph_mds_check_access()
5768 return -EACCES; in ceph_mds_check_access()
5777 doutc(mdsc->fsc->client, "begin\n"); in ceph_mdsc_pre_umount()
5778 mdsc->stopping = CEPH_MDSC_STOPPING_BEGIN; in ceph_mdsc_pre_umount()
5792 doutc(mdsc->fsc->client, "done\n"); in ceph_mdsc_pre_umount()
5801 struct ceph_client *cl = mdsc->fsc->client; in flush_mdlog_and_wait_mdsc_unsafe_requests()
5806 mutex_lock(&mdsc->mutex); in flush_mdlog_and_wait_mdsc_unsafe_requests()
5810 while (req && req->r_tid <= want_tid) { in flush_mdlog_and_wait_mdsc_unsafe_requests()
5812 n = rb_next(&req->r_node); in flush_mdlog_and_wait_mdsc_unsafe_requests()
5817 if (req->r_op != CEPH_MDS_OP_SETFILELOCK && in flush_mdlog_and_wait_mdsc_unsafe_requests()
5818 (req->r_op & CEPH_MDS_OP_WRITE)) { in flush_mdlog_and_wait_mdsc_unsafe_requests()
5819 struct ceph_mds_session *s = req->r_session; in flush_mdlog_and_wait_mdsc_unsafe_requests()
5831 mutex_unlock(&mdsc->mutex); in flush_mdlog_and_wait_mdsc_unsafe_requests()
5842 req->r_tid, want_tid); in flush_mdlog_and_wait_mdsc_unsafe_requests()
5843 wait_for_completion(&req->r_safe_completion); in flush_mdlog_and_wait_mdsc_unsafe_requests()
5845 mutex_lock(&mdsc->mutex); in flush_mdlog_and_wait_mdsc_unsafe_requests()
5849 if (RB_EMPTY_NODE(&nextreq->r_node)) { in flush_mdlog_and_wait_mdsc_unsafe_requests()
5858 mutex_unlock(&mdsc->mutex); in flush_mdlog_and_wait_mdsc_unsafe_requests()
5865 struct ceph_client *cl = mdsc->fsc->client; in ceph_mdsc_sync()
5868 if (READ_ONCE(mdsc->fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN) in ceph_mdsc_sync()
5872 mutex_lock(&mdsc->mutex); in ceph_mdsc_sync()
5873 want_tid = mdsc->last_tid; in ceph_mdsc_sync()
5874 mutex_unlock(&mdsc->mutex); in ceph_mdsc_sync()
5878 spin_lock(&mdsc->cap_dirty_lock); in ceph_mdsc_sync()
5879 want_flush = mdsc->last_cap_flush_tid; in ceph_mdsc_sync()
5880 if (!list_empty(&mdsc->cap_flush_list)) { in ceph_mdsc_sync()
5882 list_last_entry(&mdsc->cap_flush_list, in ceph_mdsc_sync()
5884 cf->wake = true; in ceph_mdsc_sync()
5886 spin_unlock(&mdsc->cap_dirty_lock); in ceph_mdsc_sync()
5899 if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) in done_closing_sessions()
5901 return atomic_read(&mdsc->num_sessions) <= skipped; in done_closing_sessions()
5909 struct ceph_options *opts = mdsc->fsc->client->options; in ceph_mdsc_close_sessions()
5910 struct ceph_client *cl = mdsc->fsc->client; in ceph_mdsc_close_sessions()
5918 mutex_lock(&mdsc->mutex); in ceph_mdsc_close_sessions()
5919 for (i = 0; i < mdsc->max_sessions; i++) { in ceph_mdsc_close_sessions()
5923 mutex_unlock(&mdsc->mutex); in ceph_mdsc_close_sessions()
5924 mutex_lock(&session->s_mutex); in ceph_mdsc_close_sessions()
5927 mutex_unlock(&session->s_mutex); in ceph_mdsc_close_sessions()
5929 mutex_lock(&mdsc->mutex); in ceph_mdsc_close_sessions()
5931 mutex_unlock(&mdsc->mutex); in ceph_mdsc_close_sessions()
5934 wait_event_timeout(mdsc->session_close_wq, in ceph_mdsc_close_sessions()
5936 ceph_timeout_jiffies(opts->mount_timeout)); in ceph_mdsc_close_sessions()
5939 mutex_lock(&mdsc->mutex); in ceph_mdsc_close_sessions()
5940 for (i = 0; i < mdsc->max_sessions; i++) { in ceph_mdsc_close_sessions()
5941 if (mdsc->sessions[i]) { in ceph_mdsc_close_sessions()
5942 session = ceph_get_mds_session(mdsc->sessions[i]); in ceph_mdsc_close_sessions()
5944 mutex_unlock(&mdsc->mutex); in ceph_mdsc_close_sessions()
5945 mutex_lock(&session->s_mutex); in ceph_mdsc_close_sessions()
5947 mutex_unlock(&session->s_mutex); in ceph_mdsc_close_sessions()
5949 mutex_lock(&mdsc->mutex); in ceph_mdsc_close_sessions()
5952 WARN_ON(!list_empty(&mdsc->cap_delay_list)); in ceph_mdsc_close_sessions()
5953 mutex_unlock(&mdsc->mutex); in ceph_mdsc_close_sessions()
5958 cancel_work_sync(&mdsc->cap_reclaim_work); in ceph_mdsc_close_sessions()
5959 cancel_work_sync(&mdsc->cap_unlink_work); in ceph_mdsc_close_sessions()
5960 cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */ in ceph_mdsc_close_sessions()
5970 doutc(mdsc->fsc->client, "force umount\n"); in ceph_mdsc_force_umount()
5972 mutex_lock(&mdsc->mutex); in ceph_mdsc_force_umount()
5973 for (mds = 0; mds < mdsc->max_sessions; mds++) { in ceph_mdsc_force_umount()
5978 if (session->s_state == CEPH_MDS_SESSION_REJECTED) in ceph_mdsc_force_umount()
5980 __wake_requests(mdsc, &session->s_waiting); in ceph_mdsc_force_umount()
5981 mutex_unlock(&mdsc->mutex); in ceph_mdsc_force_umount()
5983 mutex_lock(&session->s_mutex); in ceph_mdsc_force_umount()
5985 if (session->s_state == CEPH_MDS_SESSION_CLOSING) { in ceph_mdsc_force_umount()
5989 mutex_unlock(&session->s_mutex); in ceph_mdsc_force_umount()
5992 mutex_lock(&mdsc->mutex); in ceph_mdsc_force_umount()
5995 __wake_requests(mdsc, &mdsc->waiting_for_map); in ceph_mdsc_force_umount()
5996 mutex_unlock(&mdsc->mutex); in ceph_mdsc_force_umount()
6001 doutc(mdsc->fsc->client, "stop\n"); in ceph_mdsc_stop()
6008 * delayed work will re-arm itself again after that. in ceph_mdsc_stop()
6010 flush_delayed_work(&mdsc->delayed_work); in ceph_mdsc_stop()
6012 if (mdsc->mdsmap) in ceph_mdsc_stop()
6013 ceph_mdsmap_destroy(mdsc->mdsmap); in ceph_mdsc_stop()
6014 kfree(mdsc->sessions); in ceph_mdsc_stop()
6017 if (mdsc->s_cap_auths) { in ceph_mdsc_stop()
6020 for (i = 0; i < mdsc->s_cap_auths_num; i++) { in ceph_mdsc_stop()
6021 kfree(mdsc->s_cap_auths[i].match.gids); in ceph_mdsc_stop()
6022 kfree(mdsc->s_cap_auths[i].match.path); in ceph_mdsc_stop()
6023 kfree(mdsc->s_cap_auths[i].match.fs_name); in ceph_mdsc_stop()
6025 kfree(mdsc->s_cap_auths); in ceph_mdsc_stop()
6033 struct ceph_mds_client *mdsc = fsc->mdsc; in ceph_mdsc_destroy()
6034 doutc(fsc->client, "%p\n", mdsc); in ceph_mdsc_destroy()
6044 ceph_metric_destroy(&mdsc->metric); in ceph_mdsc_destroy()
6046 fsc->mdsc = NULL; in ceph_mdsc_destroy()
6048 doutc(fsc->client, "%p done\n", mdsc); in ceph_mdsc_destroy()
6053 struct ceph_fs_client *fsc = mdsc->fsc; in ceph_mdsc_handle_fsmap()
6054 struct ceph_client *cl = fsc->client; in ceph_mdsc_handle_fsmap()
6055 const char *mds_namespace = fsc->mount_options->mds_namespace; in ceph_mdsc_handle_fsmap()
6056 void *p = msg->front.iov_base; in ceph_mdsc_handle_fsmap()
6057 void *end = p + msg->front.iov_len; in ceph_mdsc_handle_fsmap()
6060 u32 mount_fscid = (u32)-1; in ceph_mdsc_handle_fsmap()
6061 int err = -EINVAL; in ceph_mdsc_handle_fsmap()
6072 while (num_fs-- > 0) { in ceph_mdsc_handle_fsmap()
6098 ceph_monc_got_map(&fsc->client->monc, CEPH_SUB_FSMAP, epoch); in ceph_mdsc_handle_fsmap()
6099 if (mount_fscid != (u32)-1) { in ceph_mdsc_handle_fsmap()
6100 fsc->client->monc.fs_cluster_id = mount_fscid; in ceph_mdsc_handle_fsmap()
6101 ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP, in ceph_mdsc_handle_fsmap()
6103 ceph_monc_renew_subs(&fsc->client->monc); in ceph_mdsc_handle_fsmap()
6105 err = -ENOENT; in ceph_mdsc_handle_fsmap()
6113 ceph_umount_begin(mdsc->fsc->sb); in ceph_mdsc_handle_fsmap()
6116 mutex_lock(&mdsc->mutex); in ceph_mdsc_handle_fsmap()
6117 mdsc->mdsmap_err = err; in ceph_mdsc_handle_fsmap()
6118 __wake_requests(mdsc, &mdsc->waiting_for_map); in ceph_mdsc_handle_fsmap()
6119 mutex_unlock(&mdsc->mutex); in ceph_mdsc_handle_fsmap()
6127 struct ceph_client *cl = mdsc->fsc->client; in ceph_mdsc_handle_mdsmap()
6130 void *p = msg->front.iov_base; in ceph_mdsc_handle_mdsmap()
6131 void *end = p + msg->front.iov_len; in ceph_mdsc_handle_mdsmap()
6134 int err = -EINVAL; in ceph_mdsc_handle_mdsmap()
6138 if (ceph_check_fsid(mdsc->fsc->client, &fsid) < 0) in ceph_mdsc_handle_mdsmap()
6145 mutex_lock(&mdsc->mutex); in ceph_mdsc_handle_mdsmap()
6146 if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) { in ceph_mdsc_handle_mdsmap()
6147 doutc(cl, "epoch %u <= our %u\n", epoch, mdsc->mdsmap->m_epoch); in ceph_mdsc_handle_mdsmap()
6148 mutex_unlock(&mdsc->mutex); in ceph_mdsc_handle_mdsmap()
6152 newmap = ceph_mdsmap_decode(mdsc, &p, end, ceph_msgr2(mdsc->fsc->client)); in ceph_mdsc_handle_mdsmap()
6159 if (mdsc->mdsmap) { in ceph_mdsc_handle_mdsmap()
6160 oldmap = mdsc->mdsmap; in ceph_mdsc_handle_mdsmap()
6161 mdsc->mdsmap = newmap; in ceph_mdsc_handle_mdsmap()
6165 mdsc->mdsmap = newmap; /* first mds map */ in ceph_mdsc_handle_mdsmap()
6167 mdsc->fsc->max_file_size = min((loff_t)mdsc->mdsmap->m_max_file_size, in ceph_mdsc_handle_mdsmap()
6170 __wake_requests(mdsc, &mdsc->waiting_for_map); in ceph_mdsc_handle_mdsmap()
6171 ceph_monc_got_map(&mdsc->fsc->client->monc, CEPH_SUB_MDSMAP, in ceph_mdsc_handle_mdsmap()
6172 mdsc->mdsmap->m_epoch); in ceph_mdsc_handle_mdsmap()
6174 mutex_unlock(&mdsc->mutex); in ceph_mdsc_handle_mdsmap()
6179 mutex_unlock(&mdsc->mutex); in ceph_mdsc_handle_mdsmap()
6183 ceph_umount_begin(mdsc->fsc->sb); in ceph_mdsc_handle_mdsmap()
6190 struct ceph_mds_session *s = con->private; in mds_get_con()
6199 struct ceph_mds_session *s = con->private; in mds_put_con()
6210 struct ceph_mds_session *s = con->private; in mds_peer_reset()
6211 struct ceph_mds_client *mdsc = s->s_mdsc; in mds_peer_reset()
6213 pr_warn_client(mdsc->fsc->client, "mds%d closed our session\n", in mds_peer_reset()
6214 s->s_mds); in mds_peer_reset()
6215 if (READ_ONCE(mdsc->fsc->mount_state) != CEPH_MOUNT_FENCE_IO && in mds_peer_reset()
6216 ceph_mdsmap_get_state(mdsc->mdsmap, s->s_mds) >= CEPH_MDS_STATE_RECONNECT) in mds_peer_reset()
6222 struct ceph_mds_session *s = con->private; in mds_dispatch()
6223 struct ceph_mds_client *mdsc = s->s_mdsc; in mds_dispatch()
6224 struct ceph_client *cl = mdsc->fsc->client; in mds_dispatch()
6225 int type = le16_to_cpu(msg->hdr.type); in mds_dispatch()
6227 mutex_lock(&mdsc->mutex); in mds_dispatch()
6229 mutex_unlock(&mdsc->mutex); in mds_dispatch()
6232 mutex_unlock(&mdsc->mutex); in mds_dispatch()
6282 struct ceph_mds_session *s = con->private; in mds_get_authorizer()
6283 struct ceph_mds_client *mdsc = s->s_mdsc; in mds_get_authorizer()
6284 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; in mds_get_authorizer()
6285 struct ceph_auth_handshake *auth = &s->s_auth; in mds_get_authorizer()
6299 struct ceph_mds_session *s = con->private; in mds_add_authorizer_challenge()
6300 struct ceph_mds_client *mdsc = s->s_mdsc; in mds_add_authorizer_challenge()
6301 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; in mds_add_authorizer_challenge()
6303 return ceph_auth_add_authorizer_challenge(ac, s->s_auth.authorizer, in mds_add_authorizer_challenge()
6309 struct ceph_mds_session *s = con->private; in mds_verify_authorizer_reply()
6310 struct ceph_mds_client *mdsc = s->s_mdsc; in mds_verify_authorizer_reply()
6311 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; in mds_verify_authorizer_reply()
6312 struct ceph_auth_handshake *auth = &s->s_auth; in mds_verify_authorizer_reply()
6314 return ceph_auth_verify_authorizer_reply(ac, auth->authorizer, in mds_verify_authorizer_reply()
6315 auth->authorizer_reply_buf, auth->authorizer_reply_buf_len, in mds_verify_authorizer_reply()
6321 struct ceph_mds_session *s = con->private; in mds_invalidate_authorizer()
6322 struct ceph_mds_client *mdsc = s->s_mdsc; in mds_invalidate_authorizer()
6323 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; in mds_invalidate_authorizer()
6327 return ceph_monc_validate_auth(&mdsc->fsc->client->monc); in mds_invalidate_authorizer()
6334 struct ceph_mds_session *s = con->private; in mds_get_auth_request()
6335 struct ceph_auth_client *ac = s->s_mdsc->fsc->client->monc.auth; in mds_get_auth_request()
6336 struct ceph_auth_handshake *auth = &s->s_auth; in mds_get_auth_request()
6344 *authorizer = auth->authorizer_buf; in mds_get_auth_request()
6345 *authorizer_len = auth->authorizer_buf_len; in mds_get_auth_request()
6354 struct ceph_mds_session *s = con->private; in mds_handle_auth_reply_more()
6355 struct ceph_auth_client *ac = s->s_mdsc->fsc->client->monc.auth; in mds_handle_auth_reply_more()
6356 struct ceph_auth_handshake *auth = &s->s_auth; in mds_handle_auth_reply_more()
6364 *authorizer = auth->authorizer_buf; in mds_handle_auth_reply_more()
6365 *authorizer_len = auth->authorizer_buf_len; in mds_handle_auth_reply_more()
6374 struct ceph_mds_session *s = con->private; in mds_handle_auth_done()
6375 struct ceph_auth_client *ac = s->s_mdsc->fsc->client->monc.auth; in mds_handle_auth_done()
6376 struct ceph_auth_handshake *auth = &s->s_auth; in mds_handle_auth_done()
6388 struct ceph_mds_session *s = con->private; in mds_handle_auth_bad_method()
6389 struct ceph_mon_client *monc = &s->s_mdsc->fsc->client->monc; in mds_handle_auth_bad_method()
6392 if (ceph_auth_handle_bad_authorizer(monc->auth, CEPH_ENTITY_TYPE_MDS, in mds_handle_auth_bad_method()
6401 return -EACCES; in mds_handle_auth_bad_method()
6408 int type = (int) le16_to_cpu(hdr->type); in mds_alloc_msg()
6409 int front_len = (int) le32_to_cpu(hdr->front_len); in mds_alloc_msg()
6411 if (con->in_msg) in mds_alloc_msg()
6412 return con->in_msg; in mds_alloc_msg()
6427 struct ceph_mds_session *s = msg->con->private; in mds_sign_message()
6428 struct ceph_auth_handshake *auth = &s->s_auth; in mds_sign_message()
6435 struct ceph_mds_session *s = msg->con->private; in mds_check_message_signature()
6436 struct ceph_auth_handshake *auth = &s->s_auth; in mds_check_message_signature()