1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2011 Red Hat, Inc.
4 *
5 * This file is released under the GPL.
6 */
7 #include "dm-transaction-manager.h"
8 #include "dm-space-map.h"
9 #include "dm-space-map-disk.h"
10 #include "dm-space-map-metadata.h"
11 #include "dm-persistent-data-internal.h"
12
13 #include <linux/export.h>
14 #include <linux/mutex.h>
15 #include <linux/hash.h>
16 #include <linux/rbtree.h>
17 #include <linux/slab.h>
18 #include <linux/device-mapper.h>
19
20 #define DM_MSG_PREFIX "transaction manager"
21
22 /*----------------------------------------------------------------*/
23
24 #define PREFETCH_SIZE 128
25 #define PREFETCH_BITS 7
26 #define PREFETCH_SENTINEL ((dm_block_t) -1ULL)
27
28 struct prefetch_set {
29 struct mutex lock;
30 dm_block_t blocks[PREFETCH_SIZE];
31 };
32
prefetch_hash(dm_block_t b)33 static unsigned int prefetch_hash(dm_block_t b)
34 {
35 return hash_64(b, PREFETCH_BITS);
36 }
37
prefetch_wipe(struct prefetch_set * p)38 static void prefetch_wipe(struct prefetch_set *p)
39 {
40 unsigned int i;
41
42 for (i = 0; i < PREFETCH_SIZE; i++)
43 p->blocks[i] = PREFETCH_SENTINEL;
44 }
45
prefetch_init(struct prefetch_set * p)46 static void prefetch_init(struct prefetch_set *p)
47 {
48 mutex_init(&p->lock);
49 prefetch_wipe(p);
50 }
51
prefetch_add(struct prefetch_set * p,dm_block_t b)52 static void prefetch_add(struct prefetch_set *p, dm_block_t b)
53 {
54 unsigned int h = prefetch_hash(b);
55
56 mutex_lock(&p->lock);
57 if (p->blocks[h] == PREFETCH_SENTINEL)
58 p->blocks[h] = b;
59
60 mutex_unlock(&p->lock);
61 }
62
prefetch_issue(struct prefetch_set * p,struct dm_block_manager * bm)63 static void prefetch_issue(struct prefetch_set *p, struct dm_block_manager *bm)
64 {
65 unsigned int i;
66
67 mutex_lock(&p->lock);
68
69 for (i = 0; i < PREFETCH_SIZE; i++)
70 if (p->blocks[i] != PREFETCH_SENTINEL) {
71 dm_bm_prefetch(bm, p->blocks[i]);
72 p->blocks[i] = PREFETCH_SENTINEL;
73 }
74
75 mutex_unlock(&p->lock);
76 }
77
78 /*----------------------------------------------------------------*/
79
80 struct shadow_info {
81 struct rb_node node;
82 dm_block_t where;
83 };
84
85 /*
86 * It would be nice if we scaled with the size of transaction.
87 */
88 #define DM_HASH_SIZE 256
89 #define DM_HASH_MASK (DM_HASH_SIZE - 1)
90
91 struct dm_transaction_manager {
92 int is_clone;
93 struct dm_transaction_manager *real;
94
95 struct dm_block_manager *bm;
96 struct dm_space_map *sm;
97
98 spinlock_t lock;
99 struct rb_root buckets[DM_HASH_SIZE];
100
101 struct prefetch_set prefetches;
102 };
103
104 /*----------------------------------------------------------------*/
105
is_shadow(struct dm_transaction_manager * tm,dm_block_t b)106 static int is_shadow(struct dm_transaction_manager *tm, dm_block_t b)
107 {
108 int r = 0;
109 unsigned int bucket = dm_hash_block(b, DM_HASH_MASK);
110 struct rb_node **node;
111
112 spin_lock(&tm->lock);
113 node = &tm->buckets[bucket].rb_node;
114 while (*node) {
115 struct shadow_info *si =
116 rb_entry(*node, struct shadow_info, node);
117 if (b == si->where) {
118 r = 1;
119 break;
120 }
121 if (b < si->where)
122 node = &si->node.rb_left;
123 else
124 node = &si->node.rb_right;
125 }
126 spin_unlock(&tm->lock);
127
128 return r;
129 }
130
131 /*
132 * This can silently fail if there's no memory. We're ok with this since
133 * creating redundant shadows causes no harm.
134 */
insert_shadow(struct dm_transaction_manager * tm,dm_block_t b)135 static void insert_shadow(struct dm_transaction_manager *tm, dm_block_t b)
136 {
137 unsigned int bucket;
138 struct shadow_info *si;
139
140 si = kmalloc(sizeof(*si), GFP_NOIO);
141 if (si) {
142 struct rb_node **node, *parent;
143 si->where = b;
144 bucket = dm_hash_block(b, DM_HASH_MASK);
145
146 spin_lock(&tm->lock);
147 node = &tm->buckets[bucket].rb_node;
148 parent = NULL;
149 while (*node) {
150 struct shadow_info *si =
151 rb_entry(*node, struct shadow_info, node);
152 parent = *node;
153 if (b < si->where)
154 node = &si->node.rb_left;
155 else
156 node = &si->node.rb_right;
157 }
158 rb_link_node(&si->node, parent, node);
159 rb_insert_color(&si->node, &tm->buckets[bucket]);
160 spin_unlock(&tm->lock);
161 }
162 }
163
wipe_shadow_table(struct dm_transaction_manager * tm)164 static void wipe_shadow_table(struct dm_transaction_manager *tm)
165 {
166 unsigned int i;
167
168 spin_lock(&tm->lock);
169 for (i = 0; i < DM_HASH_SIZE; i++) {
170 while (!RB_EMPTY_ROOT(&tm->buckets[i])) {
171 struct shadow_info *si =
172 rb_entry(tm->buckets[i].rb_node, struct shadow_info, node);
173 rb_erase(&si->node, &tm->buckets[i]);
174 kfree(si);
175 }
176 }
177 spin_unlock(&tm->lock);
178 }
179
180 /*----------------------------------------------------------------*/
181
dm_tm_create(struct dm_block_manager * bm,struct dm_space_map * sm)182 static struct dm_transaction_manager *dm_tm_create(struct dm_block_manager *bm,
183 struct dm_space_map *sm)
184 {
185 unsigned int i;
186 struct dm_transaction_manager *tm;
187
188 tm = kmalloc(sizeof(*tm), GFP_KERNEL);
189 if (!tm)
190 return ERR_PTR(-ENOMEM);
191
192 tm->is_clone = 0;
193 tm->real = NULL;
194 tm->bm = bm;
195 tm->sm = sm;
196
197 spin_lock_init(&tm->lock);
198 for (i = 0; i < DM_HASH_SIZE; i++)
199 tm->buckets[i] = RB_ROOT;
200
201 prefetch_init(&tm->prefetches);
202
203 return tm;
204 }
205
dm_tm_create_non_blocking_clone(struct dm_transaction_manager * real)206 struct dm_transaction_manager *dm_tm_create_non_blocking_clone(struct dm_transaction_manager *real)
207 {
208 struct dm_transaction_manager *tm;
209
210 tm = kmalloc(sizeof(*tm), GFP_KERNEL);
211 if (tm) {
212 tm->is_clone = 1;
213 tm->real = real;
214 }
215
216 return tm;
217 }
218 EXPORT_SYMBOL_GPL(dm_tm_create_non_blocking_clone);
219
dm_tm_destroy(struct dm_transaction_manager * tm)220 void dm_tm_destroy(struct dm_transaction_manager *tm)
221 {
222 if (!tm)
223 return;
224
225 if (!tm->is_clone)
226 wipe_shadow_table(tm);
227
228 kfree(tm);
229 }
230 EXPORT_SYMBOL_GPL(dm_tm_destroy);
231
dm_tm_pre_commit(struct dm_transaction_manager * tm)232 int dm_tm_pre_commit(struct dm_transaction_manager *tm)
233 {
234 int r;
235
236 if (tm->is_clone)
237 return -EWOULDBLOCK;
238
239 r = dm_sm_commit(tm->sm);
240 if (r < 0)
241 return r;
242
243 return dm_bm_flush(tm->bm);
244 }
245 EXPORT_SYMBOL_GPL(dm_tm_pre_commit);
246
dm_tm_commit(struct dm_transaction_manager * tm,struct dm_block * root)247 int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *root)
248 {
249 if (tm->is_clone)
250 return -EWOULDBLOCK;
251
252 wipe_shadow_table(tm);
253 dm_bm_unlock(root);
254
255 return dm_bm_flush(tm->bm);
256 }
257 EXPORT_SYMBOL_GPL(dm_tm_commit);
258
dm_tm_new_block(struct dm_transaction_manager * tm,const struct dm_block_validator * v,struct dm_block ** result)259 int dm_tm_new_block(struct dm_transaction_manager *tm,
260 const struct dm_block_validator *v,
261 struct dm_block **result)
262 {
263 int r;
264 dm_block_t new_block;
265
266 if (tm->is_clone)
267 return -EWOULDBLOCK;
268
269 r = dm_sm_new_block(tm->sm, &new_block);
270 if (r < 0)
271 return r;
272
273 r = dm_bm_write_lock_zero(tm->bm, new_block, v, result);
274 if (r < 0) {
275 dm_sm_dec_block(tm->sm, new_block);
276 return r;
277 }
278
279 /*
280 * New blocks count as shadows in that they don't need to be
281 * shadowed again.
282 */
283 insert_shadow(tm, new_block);
284
285 return 0;
286 }
287
__shadow_block(struct dm_transaction_manager * tm,dm_block_t orig,const struct dm_block_validator * v,struct dm_block ** result)288 static int __shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,
289 const struct dm_block_validator *v,
290 struct dm_block **result)
291 {
292 int r;
293 dm_block_t new;
294 struct dm_block *orig_block;
295
296 r = dm_sm_new_block(tm->sm, &new);
297 if (r < 0)
298 return r;
299
300 r = dm_sm_dec_block(tm->sm, orig);
301 if (r < 0)
302 return r;
303
304 r = dm_bm_read_lock(tm->bm, orig, v, &orig_block);
305 if (r < 0)
306 return r;
307
308 /*
309 * It would be tempting to use dm_bm_unlock_move here, but some
310 * code, such as the space maps, keeps using the old data structures
311 * secure in the knowledge they won't be changed until the next
312 * transaction. Using unlock_move would force a synchronous read
313 * since the old block would no longer be in the cache.
314 */
315 r = dm_bm_write_lock_zero(tm->bm, new, v, result);
316 if (r) {
317 dm_bm_unlock(orig_block);
318 return r;
319 }
320
321 memcpy(dm_block_data(*result), dm_block_data(orig_block),
322 dm_bm_block_size(tm->bm));
323
324 dm_bm_unlock(orig_block);
325 return r;
326 }
327
dm_tm_shadow_block(struct dm_transaction_manager * tm,dm_block_t orig,const struct dm_block_validator * v,struct dm_block ** result,int * inc_children)328 int dm_tm_shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,
329 const struct dm_block_validator *v, struct dm_block **result,
330 int *inc_children)
331 {
332 int r;
333
334 if (tm->is_clone)
335 return -EWOULDBLOCK;
336
337 r = dm_sm_count_is_more_than_one(tm->sm, orig, inc_children);
338 if (r < 0)
339 return r;
340
341 if (is_shadow(tm, orig) && !*inc_children)
342 return dm_bm_write_lock(tm->bm, orig, v, result);
343
344 r = __shadow_block(tm, orig, v, result);
345 if (r < 0)
346 return r;
347 insert_shadow(tm, dm_block_location(*result));
348
349 return r;
350 }
351 EXPORT_SYMBOL_GPL(dm_tm_shadow_block);
352
dm_tm_read_lock(struct dm_transaction_manager * tm,dm_block_t b,const struct dm_block_validator * v,struct dm_block ** blk)353 int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b,
354 const struct dm_block_validator *v,
355 struct dm_block **blk)
356 {
357 if (tm->is_clone) {
358 int r = dm_bm_read_try_lock(tm->real->bm, b, v, blk);
359
360 if (r == -EWOULDBLOCK)
361 prefetch_add(&tm->real->prefetches, b);
362
363 return r;
364 }
365
366 return dm_bm_read_lock(tm->bm, b, v, blk);
367 }
368 EXPORT_SYMBOL_GPL(dm_tm_read_lock);
369
dm_tm_unlock(struct dm_transaction_manager * tm,struct dm_block * b)370 void dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b)
371 {
372 dm_bm_unlock(b);
373 }
374 EXPORT_SYMBOL_GPL(dm_tm_unlock);
375
dm_tm_inc(struct dm_transaction_manager * tm,dm_block_t b)376 void dm_tm_inc(struct dm_transaction_manager *tm, dm_block_t b)
377 {
378 /*
379 * The non-blocking clone doesn't support this.
380 */
381 BUG_ON(tm->is_clone);
382
383 dm_sm_inc_block(tm->sm, b);
384 }
385 EXPORT_SYMBOL_GPL(dm_tm_inc);
386
dm_tm_inc_range(struct dm_transaction_manager * tm,dm_block_t b,dm_block_t e)387 void dm_tm_inc_range(struct dm_transaction_manager *tm, dm_block_t b, dm_block_t e)
388 {
389 /*
390 * The non-blocking clone doesn't support this.
391 */
392 BUG_ON(tm->is_clone);
393
394 dm_sm_inc_blocks(tm->sm, b, e);
395 }
396 EXPORT_SYMBOL_GPL(dm_tm_inc_range);
397
dm_tm_dec(struct dm_transaction_manager * tm,dm_block_t b)398 void dm_tm_dec(struct dm_transaction_manager *tm, dm_block_t b)
399 {
400 /*
401 * The non-blocking clone doesn't support this.
402 */
403 BUG_ON(tm->is_clone);
404
405 dm_sm_dec_block(tm->sm, b);
406 }
407 EXPORT_SYMBOL_GPL(dm_tm_dec);
408
dm_tm_dec_range(struct dm_transaction_manager * tm,dm_block_t b,dm_block_t e)409 void dm_tm_dec_range(struct dm_transaction_manager *tm, dm_block_t b, dm_block_t e)
410 {
411 /*
412 * The non-blocking clone doesn't support this.
413 */
414 BUG_ON(tm->is_clone);
415
416 dm_sm_dec_blocks(tm->sm, b, e);
417 }
418 EXPORT_SYMBOL_GPL(dm_tm_dec_range);
419
dm_tm_with_runs(struct dm_transaction_manager * tm,const __le64 * value_le,unsigned int count,dm_tm_run_fn fn)420 void dm_tm_with_runs(struct dm_transaction_manager *tm,
421 const __le64 *value_le, unsigned int count, dm_tm_run_fn fn)
422 {
423 uint64_t b, begin, end;
424 bool in_run = false;
425 unsigned int i;
426
427 for (i = 0; i < count; i++, value_le++) {
428 b = le64_to_cpu(*value_le);
429
430 if (in_run) {
431 if (b == end)
432 end++;
433 else {
434 fn(tm, begin, end);
435 begin = b;
436 end = b + 1;
437 }
438 } else {
439 in_run = true;
440 begin = b;
441 end = b + 1;
442 }
443 }
444
445 if (in_run)
446 fn(tm, begin, end);
447 }
448 EXPORT_SYMBOL_GPL(dm_tm_with_runs);
449
dm_tm_ref(struct dm_transaction_manager * tm,dm_block_t b,uint32_t * result)450 int dm_tm_ref(struct dm_transaction_manager *tm, dm_block_t b,
451 uint32_t *result)
452 {
453 if (tm->is_clone)
454 return -EWOULDBLOCK;
455
456 return dm_sm_get_count(tm->sm, b, result);
457 }
458
dm_tm_block_is_shared(struct dm_transaction_manager * tm,dm_block_t b,int * result)459 int dm_tm_block_is_shared(struct dm_transaction_manager *tm, dm_block_t b,
460 int *result)
461 {
462 if (tm->is_clone)
463 return -EWOULDBLOCK;
464
465 return dm_sm_count_is_more_than_one(tm->sm, b, result);
466 }
467
dm_tm_get_bm(struct dm_transaction_manager * tm)468 struct dm_block_manager *dm_tm_get_bm(struct dm_transaction_manager *tm)
469 {
470 return tm->bm;
471 }
472
dm_tm_issue_prefetches(struct dm_transaction_manager * tm)473 void dm_tm_issue_prefetches(struct dm_transaction_manager *tm)
474 {
475 prefetch_issue(&tm->prefetches, tm->bm);
476 }
477 EXPORT_SYMBOL_GPL(dm_tm_issue_prefetches);
478
479 /*----------------------------------------------------------------*/
480
dm_tm_create_internal(struct dm_block_manager * bm,dm_block_t sb_location,struct dm_transaction_manager ** tm,struct dm_space_map ** sm,int create,void * sm_root,size_t sm_len)481 static int dm_tm_create_internal(struct dm_block_manager *bm,
482 dm_block_t sb_location,
483 struct dm_transaction_manager **tm,
484 struct dm_space_map **sm,
485 int create,
486 void *sm_root, size_t sm_len)
487 {
488 int r;
489
490 *sm = dm_sm_metadata_init();
491 if (IS_ERR(*sm))
492 return PTR_ERR(*sm);
493
494 *tm = dm_tm_create(bm, *sm);
495 if (IS_ERR(*tm)) {
496 dm_sm_destroy(*sm);
497 return PTR_ERR(*tm);
498 }
499
500 if (create) {
501 r = dm_sm_metadata_create(*sm, *tm, dm_bm_nr_blocks(bm),
502 sb_location);
503 if (r) {
504 DMERR("couldn't create metadata space map");
505 goto bad;
506 }
507
508 } else {
509 r = dm_sm_metadata_open(*sm, *tm, sm_root, sm_len);
510 if (r) {
511 DMERR("couldn't open metadata space map");
512 goto bad;
513 }
514 }
515
516 return 0;
517
518 bad:
519 dm_tm_destroy(*tm);
520 dm_sm_destroy(*sm);
521 return r;
522 }
523
dm_tm_create_with_sm(struct dm_block_manager * bm,dm_block_t sb_location,struct dm_transaction_manager ** tm,struct dm_space_map ** sm)524 int dm_tm_create_with_sm(struct dm_block_manager *bm, dm_block_t sb_location,
525 struct dm_transaction_manager **tm,
526 struct dm_space_map **sm)
527 {
528 return dm_tm_create_internal(bm, sb_location, tm, sm, 1, NULL, 0);
529 }
530 EXPORT_SYMBOL_GPL(dm_tm_create_with_sm);
531
dm_tm_open_with_sm(struct dm_block_manager * bm,dm_block_t sb_location,void * sm_root,size_t root_len,struct dm_transaction_manager ** tm,struct dm_space_map ** sm)532 int dm_tm_open_with_sm(struct dm_block_manager *bm, dm_block_t sb_location,
533 void *sm_root, size_t root_len,
534 struct dm_transaction_manager **tm,
535 struct dm_space_map **sm)
536 {
537 return dm_tm_create_internal(bm, sb_location, tm, sm, 0, sm_root, root_len);
538 }
539 EXPORT_SYMBOL_GPL(dm_tm_open_with_sm);
540
541 /*----------------------------------------------------------------*/
542