From 73d9f7eef3d98c3920e144797cc1894c6b005a1e Mon Sep 17 00:00:00 2001 From: majianpeng Date: Tue, 16 Jul 2013 15:45:48 +0800 Subject: libceph: unregister request in __map_request failed and nofail == false For nofail == false request, if __map_request failed, the caller does cleanup work, like releasing the relative pages. It doesn't make any sense to retry this request. CC: stable@vger.kernel.org Signed-off-by: Jianpeng Ma Reviewed-by: Sage Weil --- net/ceph/osd_client.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'net') diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index dd47889adc4a..dbc0a7392d67 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -2129,6 +2129,8 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc, dout("osdc_start_request failed map, " " will retry %lld\n", req->r_tid); rc = 0; + } else { + __unregister_request(osdc, req); } goto out_unlock; } -- cgit v1.2.3 From 4d1829a59de402fc95daf4576c51aa0a7439aee8 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 30 Jul 2013 08:40:27 -0400 Subject: ceph: WQ_NON_REENTRANT is meaningless and going away dbf2576e37 ("workqueue: make all workqueues non-reentrant") made WQ_NON_REENTRANT no-op and the flag is going away. Remove its usages. This patch doesn't introduce any behavior changes. Signed-off-by: Tejun Heo Reviewed-by: Sage Weil Cc: ceph-devel@vger.kernel.org --- net/ceph/messenger.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index eb0a46a49bd4..dd9b5857ef5c 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -290,7 +290,7 @@ int ceph_msgr_init(void) if (ceph_msgr_slab_init()) return -ENOMEM; - ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_NON_REENTRANT, 0); + ceph_msgr_wq = alloc_workqueue("ceph-msgr", 0, 0); if (ceph_msgr_wq) return 0; -- cgit v1.2.3 From ad7a60de882aca31afb58721db166f7e77afcd92 Mon Sep 17 00:00:00 2001 From: Li Wang Date: Thu, 15 Aug 2013 11:51:44 +0800 Subject: ceph: punch hole support This patch implements fallocate and punch hole support for Ceph kernel client. Signed-off-by: Li Wang Signed-off-by: Yunchuan Wen --- fs/ceph/file.c | 196 ++++++++++++++++++++++++++++++++++++++++++++++++++ net/ceph/osd_client.c | 11 ++- 2 files changed, 205 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/fs/ceph/file.c b/fs/ceph/file.c index abc0e0759bdc..68af489c2abd 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -8,6 +8,7 @@ #include #include #include +#include #include "super.h" #include "mds_client.h" @@ -874,6 +875,200 @@ out: return offset; } +static inline void ceph_zero_partial_page( + struct inode *inode, loff_t offset, unsigned size) +{ + struct page *page; + pgoff_t index = offset >> PAGE_CACHE_SHIFT; + + page = find_lock_page(inode->i_mapping, index); + if (page) { + wait_on_page_writeback(page); + zero_user(page, offset & (PAGE_CACHE_SIZE - 1), size); + unlock_page(page); + page_cache_release(page); + } +} + +static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset, + loff_t length) +{ + loff_t nearly = round_up(offset, PAGE_CACHE_SIZE); + if (offset < nearly) { + loff_t size = nearly - offset; + if (length < size) + size = length; + ceph_zero_partial_page(inode, offset, size); + offset += size; + length -= size; + } + if (length >= PAGE_CACHE_SIZE) { + loff_t size = round_down(length, PAGE_CACHE_SIZE); + truncate_pagecache_range(inode, offset, offset + size - 1); + offset += size; + length -= size; + } + if (length) + ceph_zero_partial_page(inode, offset, length); +} + +static int ceph_zero_partial_object(struct inode *inode, + loff_t offset, loff_t *length) +{ + struct ceph_inode_info *ci = ceph_inode(inode); + struct ceph_fs_client *fsc = ceph_inode_to_client(inode); + struct ceph_osd_request *req; + int ret = 0; + loff_t zero = 0; + int op; + + if (!length) { + op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE; + length = &zero; + } else { + op = CEPH_OSD_OP_ZERO; + } + + req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, + ceph_vino(inode), + offset, length, + 1, op, + CEPH_OSD_FLAG_WRITE | + CEPH_OSD_FLAG_ONDISK, + NULL, 0, 0, false); + if (IS_ERR(req)) { + ret = PTR_ERR(req); + goto out; + } + + ceph_osdc_build_request(req, offset, NULL, ceph_vino(inode).snap, + &inode->i_mtime); + + ret = ceph_osdc_start_request(&fsc->client->osdc, req, false); + if (!ret) { + ret = ceph_osdc_wait_request(&fsc->client->osdc, req); + if (ret == -ENOENT) + ret = 0; + } + ceph_osdc_put_request(req); + +out: + return ret; +} + +static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length) +{ + int ret = 0; + struct ceph_inode_info *ci = ceph_inode(inode); + __s32 stripe_unit = ceph_file_layout_su(ci->i_layout); + __s32 stripe_count = ceph_file_layout_stripe_count(ci->i_layout); + __s32 object_size = ceph_file_layout_object_size(ci->i_layout); + loff_t object_set_size = (loff_t)object_size * stripe_count; + + loff_t nearly = (offset + object_set_size - 1) + / object_set_size * object_set_size; + while (length && offset < nearly) { + loff_t size = length; + ret = ceph_zero_partial_object(inode, offset, &size); + if (ret < 0) + return ret; + offset += size; + length -= size; + } + while (length >= object_set_size) { + int i; + loff_t pos = offset; + for (i = 0; i < stripe_count; ++i) { + ret = ceph_zero_partial_object(inode, pos, NULL); + if (ret < 0) + return ret; + pos += stripe_unit; + } + offset += object_set_size; + length -= object_set_size; + } + while (length) { + loff_t size = length; + ret = ceph_zero_partial_object(inode, offset, &size); + if (ret < 0) + return ret; + offset += size; + length -= size; + } + return ret; +} + +static long ceph_fallocate(struct file *file, int mode, + loff_t offset, loff_t length) +{ + struct ceph_file_info *fi = file->private_data; + struct inode *inode = file->f_dentry->d_inode; + struct ceph_inode_info *ci = ceph_inode(inode); + struct ceph_osd_client *osdc = + &ceph_inode_to_client(inode)->client->osdc; + int want, got = 0; + int dirty; + int ret = 0; + loff_t endoff = 0; + loff_t size; + + if (!S_ISREG(inode->i_mode)) + return -EOPNOTSUPP; + + if (IS_SWAPFILE(inode)) + return -ETXTBSY; + + mutex_lock(&inode->i_mutex); + + if (ceph_snap(inode) != CEPH_NOSNAP) { + ret = -EROFS; + goto unlock; + } + + if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) && + !(mode & FALLOC_FL_PUNCH_HOLE)) { + ret = -ENOSPC; + goto unlock; + } + + size = i_size_read(inode); + if (!(mode & FALLOC_FL_KEEP_SIZE)) + endoff = offset + length; + + if (fi->fmode & CEPH_FILE_MODE_LAZY) + want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; + else + want = CEPH_CAP_FILE_BUFFER; + + ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, &got, endoff); + if (ret < 0) + goto unlock; + + if (mode & FALLOC_FL_PUNCH_HOLE) { + if (offset < size) + ceph_zero_pagecache_range(inode, offset, length); + ret = ceph_zero_objects(inode, offset, length); + } else if (endoff > size) { + truncate_pagecache_range(inode, size, -1); + if (ceph_inode_set_size(inode, endoff)) + ceph_check_caps(ceph_inode(inode), + CHECK_CAPS_AUTHONLY, NULL); + } + + if (!ret) { + spin_lock(&ci->i_ceph_lock); + dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR); + spin_unlock(&ci->i_ceph_lock); + if (dirty) + __mark_inode_dirty(inode, dirty); + } + + ceph_put_cap_refs(ci, got); +unlock: + mutex_unlock(&inode->i_mutex); + return ret; +} + const struct file_operations ceph_file_fops = { .open = ceph_open, .release = ceph_release, @@ -890,5 +1085,6 @@ const struct file_operations ceph_file_fops = { .splice_write = generic_file_splice_write, .unlocked_ioctl = ceph_ioctl, .compat_ioctl = ceph_ioctl, + .fallocate = ceph_fallocate, }; diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index dbc0a7392d67..8ec65bc11c71 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -503,7 +503,9 @@ void osd_req_op_extent_init(struct ceph_osd_request *osd_req, struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, opcode); size_t payload_len = 0; - BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE); + BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE && + opcode != CEPH_OSD_OP_DELETE && opcode != CEPH_OSD_OP_ZERO && + opcode != CEPH_OSD_OP_TRUNCATE); op->extent.offset = offset; op->extent.length = length; @@ -631,6 +633,9 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req, break; case CEPH_OSD_OP_READ: case CEPH_OSD_OP_WRITE: + case CEPH_OSD_OP_ZERO: + case CEPH_OSD_OP_DELETE: + case CEPH_OSD_OP_TRUNCATE: if (src->op == CEPH_OSD_OP_WRITE) request_data_len = src->extent.length; dst->extent.offset = cpu_to_le64(src->extent.offset); @@ -715,7 +720,9 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, u64 object_base; int r; - BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE); + BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE && + opcode != CEPH_OSD_OP_DELETE && opcode != CEPH_OSD_OP_ZERO && + opcode != CEPH_OSD_OP_TRUNCATE); req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool, GFP_NOFS); -- cgit v1.2.3 From 1874119664dafda3ef2ed9b51b4759a9540d4a1a Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 15 Aug 2013 08:51:58 +0300 Subject: libceph: fix error handling in handle_reply() We've tried to fix the error paths in this function before, but there is still a hidden goto in the ceph_decode_need() macro which goes to the wrong place. We need to release the "req" and unlock a mutex before returning. Signed-off-by: Dan Carpenter Reviewed-by: Sage Weil --- net/ceph/osd_client.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 8ec65bc11c71..3ee4a1da02cd 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -1495,14 +1495,14 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, dout("handle_reply %p tid %llu req %p result %d\n", msg, tid, req, result); - ceph_decode_need(&p, end, 4, bad); + ceph_decode_need(&p, end, 4, bad_put); numops = ceph_decode_32(&p); if (numops > CEPH_OSD_MAX_OP) goto bad_put; if (numops != req->r_num_ops) goto bad_put; payload_len = 0; - ceph_decode_need(&p, end, numops * sizeof(struct ceph_osd_op), bad); + ceph_decode_need(&p, end, numops * sizeof(struct ceph_osd_op), bad_put); for (i = 0; i < numops; i++) { struct ceph_osd_op *op = p; int len; @@ -1520,7 +1520,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, goto bad_put; } - ceph_decode_need(&p, end, 4 + numops * 4, bad); + ceph_decode_need(&p, end, 4 + numops * 4, bad_put); retry_attempt = ceph_decode_32(&p); for (i = 0; i < numops; i++) req->r_reply_op_result[i] = ceph_decode_32(&p); -- cgit v1.2.3 From b72e19b9225d4297a18715b0998093d843d170fa Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 15 Aug 2013 08:52:48 +0300 Subject: libceph: potential NULL dereference in ceph_osdc_handle_map() There are two places where we read "nr_maps" if both of them are set to zero then we would hit a NULL dereference here. Signed-off-by: Dan Carpenter Reviewed-by: Sage Weil --- net/ceph/osd_client.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'net') diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 3ee4a1da02cd..6a59fb8fc999 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -1793,6 +1793,8 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg) nr_maps--; } + if (!osdc->osdmap) + goto bad; done: downgrade_write(&osdc->map_sem); ceph_monc_got_osdmap(&osdc->client->monc, osdc->osdmap->epoch); -- cgit v1.2.3 From dbcae088fa660086bde6e10d63bb3c9264832d85 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 15 Aug 2013 08:58:59 +0300 Subject: libceph: create_singlethread_workqueue() doesn't return ERR_PTRs create_singlethread_workqueue() returns NULL on error, and it doesn't return ERR_PTRs. I tweaked the error handling a little to be consistent with earlier in the function. Signed-off-by: Dan Carpenter Reviewed-by: Sage Weil --- net/ceph/osd_client.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 6a59fb8fc999..1606f740d6ae 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -2264,12 +2264,10 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client) if (err < 0) goto out_msgpool; + err = -ENOMEM; osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify"); - if (IS_ERR(osdc->notify_wq)) { - err = PTR_ERR(osdc->notify_wq); - osdc->notify_wq = NULL; + if (!osdc->notify_wq) goto out_msgpool; - } return 0; out_msgpool: -- cgit v1.2.3 From 9542cf0bf9b1a3adcc2ef271edbcbdba03abf345 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 28 Aug 2013 17:17:29 -0700 Subject: libceph: use pg_num_mask instead of pgp_num_mask for pg.seed calc Fix a typo that used the wrong bitmask for the pg.seed calculation. This is normally unnoticed because in most cases pg_num == pgp_num. It is, however, a bug that is easily corrected. CC: stable@vger.kernel.org Signed-off-by: Sage Weil Reviewed-by: Alex Elder --- net/ceph/osdmap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c index 603ddd92db19..dbd9a4792427 100644 --- a/net/ceph/osdmap.c +++ b/net/ceph/osdmap.c @@ -1129,7 +1129,7 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid, /* pg_temp? */ pgid.seed = ceph_stable_mod(pgid.seed, pool->pg_num, - pool->pgp_num_mask); + pool->pg_num_mask); pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid); if (pg) { *num = pg->len; -- cgit v1.2.3