1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (c) International Business Machines Corp., 2006
4 *
5 * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner
6 */
7
8 /*
9 * UBI wear-leveling sub-system.
10 *
11 * This sub-system is responsible for wear-leveling. It works in terms of
12 * physical eraseblocks and erase counters and knows nothing about logical
13 * eraseblocks, volumes, etc. From this sub-system's perspective all physical
14 * eraseblocks are of two types - used and free. Used physical eraseblocks are
15 * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical
16 * eraseblocks are those that were put by the 'ubi_wl_put_peb()' function.
17 *
18 * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter
19 * header. The rest of the physical eraseblock contains only %0xFF bytes.
20 *
21 * When physical eraseblocks are returned to the WL sub-system by means of the
22 * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is
23 * done asynchronously in context of the per-UBI device background thread,
24 * which is also managed by the WL sub-system.
25 *
26 * The wear-leveling is ensured by means of moving the contents of used
27 * physical eraseblocks with low erase counter to free physical eraseblocks
28 * with high erase counter.
29 *
30 * If the WL sub-system fails to erase a physical eraseblock, it marks it as
31 * bad.
32 *
33 * This sub-system is also responsible for scrubbing. If a bit-flip is detected
34 * in a physical eraseblock, it has to be moved. Technically this is the same
35 * as moving it for wear-leveling reasons.
36 *
37 * As it was said, for the UBI sub-system all physical eraseblocks are either
38 * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
39 * used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub
40 * RB-trees, as well as (temporarily) in the @wl->pq queue.
41 *
42 * When the WL sub-system returns a physical eraseblock, the physical
43 * eraseblock is protected from being moved for some "time". For this reason,
44 * the physical eraseblock is not directly moved from the @wl->free tree to the
45 * @wl->used tree. There is a protection queue in between where this
46 * physical eraseblock is temporarily stored (@wl->pq).
47 *
48 * All this protection stuff is needed because:
49 * o we don't want to move physical eraseblocks just after we have given them
50 * to the user; instead, we first want to let users fill them up with data;
51 *
52 * o there is a chance that the user will put the physical eraseblock very
53 * soon, so it makes sense not to move it for some time, but wait.
54 *
55 * Physical eraseblocks stay protected only for limited time. But the "time" is
56 * measured in erase cycles in this case. This is implemented with help of the
57 * protection queue. Eraseblocks are put to the tail of this queue when they
58 * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the
59 * head of the queue on each erase operation (for any eraseblock). So the
60 * length of the queue defines how may (global) erase cycles PEBs are protected.
61 *
62 * To put it differently, each physical eraseblock has 2 main states: free and
63 * used. The former state corresponds to the @wl->free tree. The latter state
64 * is split up on several sub-states:
65 * o the WL movement is allowed (@wl->used tree);
66 * o the WL movement is disallowed (@wl->erroneous) because the PEB is
67 * erroneous - e.g., there was a read error;
68 * o the WL movement is temporarily prohibited (@wl->pq queue);
69 * o scrubbing is needed (@wl->scrub tree).
70 *
71 * Depending on the sub-state, wear-leveling entries of the used physical
72 * eraseblocks may be kept in one of those structures.
73 *
74 * Note, in this implementation, we keep a small in-RAM object for each physical
75 * eraseblock. This is surely not a scalable solution. But it appears to be good
76 * enough for moderately large flashes and it is simple. In future, one may
77 * re-work this sub-system and make it more scalable.
78 *
79 * At the moment this sub-system does not utilize the sequence number, which
80 * was introduced relatively recently. But it would be wise to do this because
81 * the sequence number of a logical eraseblock characterizes how old is it. For
82 * example, when we move a PEB with low erase counter, and we need to pick the
83 * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
84 * pick target PEB with an average EC if our PEB is not very "old". This is a
85 * room for future re-works of the WL sub-system.
86 */
87
88 #include <linux/slab.h>
89 #include <linux/crc32.h>
90 #include <linux/freezer.h>
91 #include <linux/kthread.h>
92 #include "ubi.h"
93 #include "wl.h"
94
95 /* Number of physical eraseblocks reserved for wear-leveling purposes */
96 #define WL_RESERVED_PEBS 1
97
98 /*
99 * Maximum difference between two erase counters. If this threshold is
100 * exceeded, the WL sub-system starts moving data from used physical
101 * eraseblocks with low erase counter to free physical eraseblocks with high
102 * erase counter.
103 */
104 #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
105
106 /*
107 * When a physical eraseblock is moved, the WL sub-system has to pick the target
108 * physical eraseblock to move to. The simplest way would be just to pick the
109 * one with the highest erase counter. But in certain workloads this could lead
110 * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
111 * situation when the picked physical eraseblock is constantly erased after the
112 * data is written to it. So, we have a constant which limits the highest erase
113 * counter of the free physical eraseblock to pick. Namely, the WL sub-system
114 * does not pick eraseblocks with erase counter greater than the lowest erase
115 * counter plus %WL_FREE_MAX_DIFF.
116 */
117 #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
118
119 /*
120 * Maximum number of consecutive background thread failures which is enough to
121 * switch to read-only mode.
122 */
123 #define WL_MAX_FAILURES 32
124
125 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
126 static int self_check_in_wl_tree(const struct ubi_device *ubi,
127 struct ubi_wl_entry *e, struct rb_root *root);
128 static int self_check_in_pq(const struct ubi_device *ubi,
129 struct ubi_wl_entry *e);
130
131 /**
132 * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
133 * @e: the wear-leveling entry to add
134 * @root: the root of the tree
135 *
136 * Note, we use (erase counter, physical eraseblock number) pairs as keys in
137 * the @ubi->used and @ubi->free RB-trees.
138 */
wl_tree_add(struct ubi_wl_entry * e,struct rb_root * root)139 static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
140 {
141 struct rb_node **p, *parent = NULL;
142
143 p = &root->rb_node;
144 while (*p) {
145 struct ubi_wl_entry *e1;
146
147 parent = *p;
148 e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
149
150 if (e->ec < e1->ec)
151 p = &(*p)->rb_left;
152 else if (e->ec > e1->ec)
153 p = &(*p)->rb_right;
154 else {
155 ubi_assert(e->pnum != e1->pnum);
156 if (e->pnum < e1->pnum)
157 p = &(*p)->rb_left;
158 else
159 p = &(*p)->rb_right;
160 }
161 }
162
163 rb_link_node(&e->u.rb, parent, p);
164 rb_insert_color(&e->u.rb, root);
165 }
166
167 /**
168 * wl_entry_destroy - destroy a wear-leveling entry.
169 * @ubi: UBI device description object
170 * @e: the wear-leveling entry to add
171 *
172 * This function destroys a wear leveling entry and removes
173 * the reference from the lookup table.
174 */
wl_entry_destroy(struct ubi_device * ubi,struct ubi_wl_entry * e)175 static void wl_entry_destroy(struct ubi_device *ubi, struct ubi_wl_entry *e)
176 {
177 ubi->lookuptbl[e->pnum] = NULL;
178 kmem_cache_free(ubi_wl_entry_slab, e);
179 }
180
181 /**
182 * do_work - do one pending work.
183 * @ubi: UBI device description object
184 * @executed: whether there is one work is executed
185 *
186 * This function returns zero in case of success and a negative error code in
187 * case of failure. If @executed is not NULL and there is one work executed,
188 * @executed is set as %1, otherwise @executed is set as %0.
189 */
do_work(struct ubi_device * ubi,int * executed)190 static int do_work(struct ubi_device *ubi, int *executed)
191 {
192 int err;
193 struct ubi_work *wrk;
194
195 cond_resched();
196
197 /*
198 * @ubi->work_sem is used to synchronize with the workers. Workers take
199 * it in read mode, so many of them may be doing works at a time. But
200 * the queue flush code has to be sure the whole queue of works is
201 * done, and it takes the mutex in write mode.
202 */
203 down_read(&ubi->work_sem);
204 spin_lock(&ubi->wl_lock);
205 if (list_empty(&ubi->works)) {
206 spin_unlock(&ubi->wl_lock);
207 up_read(&ubi->work_sem);
208 if (executed)
209 *executed = 0;
210 return 0;
211 }
212
213 if (executed)
214 *executed = 1;
215 wrk = list_entry(ubi->works.next, struct ubi_work, list);
216 list_del(&wrk->list);
217 ubi->works_count -= 1;
218 ubi_assert(ubi->works_count >= 0);
219 spin_unlock(&ubi->wl_lock);
220
221 /*
222 * Call the worker function. Do not touch the work structure
223 * after this call as it will have been freed or reused by that
224 * time by the worker function.
225 */
226 err = wrk->func(ubi, wrk, 0);
227 if (err)
228 ubi_err(ubi, "work failed with error code %d", err);
229 up_read(&ubi->work_sem);
230
231 return err;
232 }
233
234 /**
235 * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
236 * @e: the wear-leveling entry to check
237 * @root: the root of the tree
238 *
239 * This function returns non-zero if @e is in the @root RB-tree and zero if it
240 * is not.
241 */
in_wl_tree(struct ubi_wl_entry * e,struct rb_root * root)242 static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
243 {
244 struct rb_node *p;
245
246 p = root->rb_node;
247 while (p) {
248 struct ubi_wl_entry *e1;
249
250 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
251
252 if (e->pnum == e1->pnum) {
253 ubi_assert(e == e1);
254 return 1;
255 }
256
257 if (e->ec < e1->ec)
258 p = p->rb_left;
259 else if (e->ec > e1->ec)
260 p = p->rb_right;
261 else {
262 ubi_assert(e->pnum != e1->pnum);
263 if (e->pnum < e1->pnum)
264 p = p->rb_left;
265 else
266 p = p->rb_right;
267 }
268 }
269
270 return 0;
271 }
272
273 /**
274 * in_pq - check if a wear-leveling entry is present in the protection queue.
275 * @ubi: UBI device description object
276 * @e: the wear-leveling entry to check
277 *
278 * This function returns non-zero if @e is in the protection queue and zero
279 * if it is not.
280 */
in_pq(const struct ubi_device * ubi,struct ubi_wl_entry * e)281 static inline int in_pq(const struct ubi_device *ubi, struct ubi_wl_entry *e)
282 {
283 struct ubi_wl_entry *p;
284 int i;
285
286 for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
287 list_for_each_entry(p, &ubi->pq[i], u.list)
288 if (p == e)
289 return 1;
290
291 return 0;
292 }
293
294 /**
295 * prot_queue_add - add physical eraseblock to the protection queue.
296 * @ubi: UBI device description object
297 * @e: the physical eraseblock to add
298 *
299 * This function adds @e to the tail of the protection queue @ubi->pq, where
300 * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be
301 * temporarily protected from the wear-leveling worker. Note, @wl->lock has to
302 * be locked.
303 */
prot_queue_add(struct ubi_device * ubi,struct ubi_wl_entry * e)304 static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
305 {
306 int pq_tail = ubi->pq_head - 1;
307
308 if (pq_tail < 0)
309 pq_tail = UBI_PROT_QUEUE_LEN - 1;
310 ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
311 list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
312 dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
313 }
314
315 /**
316 * find_wl_entry - find wear-leveling entry closest to certain erase counter.
317 * @ubi: UBI device description object
318 * @root: the RB-tree where to look for
319 * @diff: maximum possible difference from the smallest erase counter
320 * @pick_max: pick PEB even its erase counter beyonds 'min_ec + @diff'
321 *
322 * This function looks for a wear leveling entry with erase counter closest to
323 * min + @diff, where min is the smallest erase counter.
324 */
find_wl_entry(struct ubi_device * ubi,struct rb_root * root,int diff,int pick_max)325 static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
326 struct rb_root *root, int diff,
327 int pick_max)
328 {
329 struct rb_node *p;
330 struct ubi_wl_entry *e;
331 int max;
332
333 e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
334 max = e->ec + diff;
335
336 p = root->rb_node;
337 while (p) {
338 struct ubi_wl_entry *e1;
339
340 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
341 if (e1->ec >= max) {
342 if (pick_max)
343 e = e1;
344 p = p->rb_left;
345 } else {
346 p = p->rb_right;
347 e = e1;
348 }
349 }
350
351 return e;
352 }
353
354 /**
355 * find_mean_wl_entry - find wear-leveling entry with medium erase counter.
356 * @ubi: UBI device description object
357 * @root: the RB-tree where to look for
358 *
359 * This function looks for a wear leveling entry with medium erase counter,
360 * but not greater or equivalent than the lowest erase counter plus
361 * %WL_FREE_MAX_DIFF/2.
362 */
find_mean_wl_entry(struct ubi_device * ubi,struct rb_root * root)363 static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
364 struct rb_root *root)
365 {
366 struct ubi_wl_entry *e, *first, *last;
367
368 first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
369 last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb);
370
371 if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
372 e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
373
374 /*
375 * If no fastmap has been written and fm_anchor is not
376 * reserved and this WL entry can be used as anchor PEB
377 * hold it back and return the second best WL entry such
378 * that fastmap can use the anchor PEB later.
379 */
380 e = may_reserve_for_fm(ubi, e, root);
381 } else
382 e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2, 0);
383
384 return e;
385 }
386
387 /**
388 * wl_get_wle - get a mean wl entry to be used by ubi_wl_get_peb() or
389 * refill_wl_user_pool().
390 * @ubi: UBI device description object
391 *
392 * This function returns a wear leveling entry in case of success and
393 * NULL in case of failure.
394 */
wl_get_wle(struct ubi_device * ubi)395 static struct ubi_wl_entry *wl_get_wle(struct ubi_device *ubi)
396 {
397 struct ubi_wl_entry *e;
398
399 e = find_mean_wl_entry(ubi, &ubi->free);
400 if (!e) {
401 ubi_err(ubi, "no free eraseblocks");
402 return NULL;
403 }
404
405 self_check_in_wl_tree(ubi, e, &ubi->free);
406
407 /*
408 * Move the physical eraseblock to the protection queue where it will
409 * be protected from being moved for some time.
410 */
411 rb_erase(&e->u.rb, &ubi->free);
412 ubi->free_count--;
413 dbg_wl("PEB %d EC %d", e->pnum, e->ec);
414
415 return e;
416 }
417
418 /**
419 * prot_queue_del - remove a physical eraseblock from the protection queue.
420 * @ubi: UBI device description object
421 * @pnum: the physical eraseblock to remove
422 *
423 * This function deletes PEB @pnum from the protection queue and returns zero
424 * in case of success and %-ENODEV if the PEB was not found.
425 */
prot_queue_del(struct ubi_device * ubi,int pnum)426 static int prot_queue_del(struct ubi_device *ubi, int pnum)
427 {
428 struct ubi_wl_entry *e;
429
430 e = ubi->lookuptbl[pnum];
431 if (!e)
432 return -ENODEV;
433
434 if (self_check_in_pq(ubi, e))
435 return -ENODEV;
436
437 list_del(&e->u.list);
438 dbg_wl("deleted PEB %d from the protection queue", e->pnum);
439 return 0;
440 }
441
442 /**
443 * ubi_sync_erase - synchronously erase a physical eraseblock.
444 * @ubi: UBI device description object
445 * @e: the physical eraseblock to erase
446 * @torture: if the physical eraseblock has to be tortured
447 *
448 * This function returns zero in case of success and a negative error code in
449 * case of failure.
450 */
ubi_sync_erase(struct ubi_device * ubi,struct ubi_wl_entry * e,int torture)451 int ubi_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int torture)
452 {
453 int err;
454 struct ubi_ec_hdr *ec_hdr;
455 unsigned long long ec = e->ec;
456
457 dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
458
459 err = self_check_ec(ubi, e->pnum, e->ec);
460 if (err)
461 return -EINVAL;
462
463 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
464 if (!ec_hdr)
465 return -ENOMEM;
466
467 err = ubi_io_sync_erase(ubi, e->pnum, torture);
468 if (err < 0)
469 goto out_free;
470
471 ec += err;
472 if (ec > UBI_MAX_ERASECOUNTER) {
473 /*
474 * Erase counter overflow. Upgrade UBI and use 64-bit
475 * erase counters internally.
476 */
477 ubi_err(ubi, "erase counter overflow at PEB %d, EC %llu",
478 e->pnum, ec);
479 err = -EINVAL;
480 goto out_free;
481 }
482
483 dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
484
485 ec_hdr->ec = cpu_to_be64(ec);
486
487 err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
488 if (err)
489 goto out_free;
490
491 e->ec = ec;
492 spin_lock(&ubi->wl_lock);
493 if (e->ec > ubi->max_ec)
494 ubi->max_ec = e->ec;
495 spin_unlock(&ubi->wl_lock);
496
497 out_free:
498 kfree(ec_hdr);
499 return err;
500 }
501
502 /**
503 * serve_prot_queue - check if it is time to stop protecting PEBs.
504 * @ubi: UBI device description object
505 *
506 * This function is called after each erase operation and removes PEBs from the
507 * tail of the protection queue. These PEBs have been protected for long enough
508 * and should be moved to the used tree.
509 */
serve_prot_queue(struct ubi_device * ubi)510 static void serve_prot_queue(struct ubi_device *ubi)
511 {
512 struct ubi_wl_entry *e, *tmp;
513 int count;
514
515 /*
516 * There may be several protected physical eraseblock to remove,
517 * process them all.
518 */
519 repeat:
520 count = 0;
521 spin_lock(&ubi->wl_lock);
522 list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
523 dbg_wl("PEB %d EC %d protection over, move to used tree",
524 e->pnum, e->ec);
525
526 list_del(&e->u.list);
527 wl_tree_add(e, &ubi->used);
528 if (count++ > 32) {
529 /*
530 * Let's be nice and avoid holding the spinlock for
531 * too long.
532 */
533 spin_unlock(&ubi->wl_lock);
534 cond_resched();
535 goto repeat;
536 }
537 }
538
539 ubi->pq_head += 1;
540 if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
541 ubi->pq_head = 0;
542 ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
543 spin_unlock(&ubi->wl_lock);
544 }
545
546 /**
547 * __schedule_ubi_work - schedule a work.
548 * @ubi: UBI device description object
549 * @wrk: the work to schedule
550 *
551 * This function adds a work defined by @wrk to the tail of the pending works
552 * list. Can only be used if ubi->work_sem is already held in read mode!
553 */
__schedule_ubi_work(struct ubi_device * ubi,struct ubi_work * wrk)554 static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
555 {
556 spin_lock(&ubi->wl_lock);
557 list_add_tail(&wrk->list, &ubi->works);
558 ubi_assert(ubi->works_count >= 0);
559 ubi->works_count += 1;
560 if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi))
561 wake_up_process(ubi->bgt_thread);
562 spin_unlock(&ubi->wl_lock);
563 }
564
565 /**
566 * schedule_ubi_work - schedule a work.
567 * @ubi: UBI device description object
568 * @wrk: the work to schedule
569 *
570 * This function adds a work defined by @wrk to the tail of the pending works
571 * list.
572 */
schedule_ubi_work(struct ubi_device * ubi,struct ubi_work * wrk)573 static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
574 {
575 down_read(&ubi->work_sem);
576 __schedule_ubi_work(ubi, wrk);
577 up_read(&ubi->work_sem);
578 }
579
580 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
581 int shutdown);
582
583 /**
584 * schedule_erase - schedule an erase work.
585 * @ubi: UBI device description object
586 * @e: the WL entry of the physical eraseblock to erase
587 * @vol_id: the volume ID that last used this PEB
588 * @lnum: the last used logical eraseblock number for the PEB
589 * @torture: if the physical eraseblock has to be tortured
590 * @nested: denotes whether the work_sem is already held
591 *
592 * This function returns zero in case of success and a %-ENOMEM in case of
593 * failure.
594 */
schedule_erase(struct ubi_device * ubi,struct ubi_wl_entry * e,int vol_id,int lnum,int torture,bool nested)595 static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
596 int vol_id, int lnum, int torture, bool nested)
597 {
598 struct ubi_work *wl_wrk;
599
600 ubi_assert(e);
601
602 dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
603 e->pnum, e->ec, torture);
604
605 wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
606 if (!wl_wrk)
607 return -ENOMEM;
608
609 wl_wrk->func = &erase_worker;
610 wl_wrk->e = e;
611 wl_wrk->vol_id = vol_id;
612 wl_wrk->lnum = lnum;
613 wl_wrk->torture = torture;
614
615 if (nested)
616 __schedule_ubi_work(ubi, wl_wrk);
617 else
618 schedule_ubi_work(ubi, wl_wrk);
619 return 0;
620 }
621
622 static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk);
623 /**
624 * do_sync_erase - run the erase worker synchronously.
625 * @ubi: UBI device description object
626 * @e: the WL entry of the physical eraseblock to erase
627 * @vol_id: the volume ID that last used this PEB
628 * @lnum: the last used logical eraseblock number for the PEB
629 * @torture: if the physical eraseblock has to be tortured
630 *
631 */
do_sync_erase(struct ubi_device * ubi,struct ubi_wl_entry * e,int vol_id,int lnum,int torture)632 static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
633 int vol_id, int lnum, int torture)
634 {
635 struct ubi_work wl_wrk;
636
637 dbg_wl("sync erase of PEB %i", e->pnum);
638
639 wl_wrk.e = e;
640 wl_wrk.vol_id = vol_id;
641 wl_wrk.lnum = lnum;
642 wl_wrk.torture = torture;
643
644 return __erase_worker(ubi, &wl_wrk);
645 }
646
647 static int ensure_wear_leveling(struct ubi_device *ubi, int nested);
648 /**
649 * wear_leveling_worker - wear-leveling worker function.
650 * @ubi: UBI device description object
651 * @wrk: the work object
652 * @shutdown: non-zero if the worker has to free memory and exit
653 * because the WL-subsystem is shutting down
654 *
655 * This function copies a more worn out physical eraseblock to a less worn out
656 * one. Returns zero in case of success and a negative error code in case of
657 * failure.
658 */
wear_leveling_worker(struct ubi_device * ubi,struct ubi_work * wrk,int shutdown)659 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
660 int shutdown)
661 {
662 int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
663 int erase = 0, keep = 0, vol_id = -1, lnum = -1;
664 struct ubi_wl_entry *e1, *e2;
665 struct ubi_vid_io_buf *vidb;
666 struct ubi_vid_hdr *vid_hdr;
667 int dst_leb_clean = 0;
668
669 kfree(wrk);
670 if (shutdown)
671 return 0;
672
673 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
674 if (!vidb)
675 return -ENOMEM;
676
677 vid_hdr = ubi_get_vid_hdr(vidb);
678
679 down_read(&ubi->fm_eba_sem);
680 mutex_lock(&ubi->move_mutex);
681 spin_lock(&ubi->wl_lock);
682 ubi_assert(!ubi->move_from && !ubi->move_to);
683 ubi_assert(!ubi->move_to_put);
684
685 #ifdef CONFIG_MTD_UBI_FASTMAP
686 if (!next_peb_for_wl(ubi, true) ||
687 #else
688 if (!ubi->free.rb_node ||
689 #endif
690 (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
691 /*
692 * No free physical eraseblocks? Well, they must be waiting in
693 * the queue to be erased. Cancel movement - it will be
694 * triggered again when a free physical eraseblock appears.
695 *
696 * No used physical eraseblocks? They must be temporarily
697 * protected from being moved. They will be moved to the
698 * @ubi->used tree later and the wear-leveling will be
699 * triggered again.
700 */
701 dbg_wl("cancel WL, a list is empty: free %d, used %d",
702 !ubi->free.rb_node, !ubi->used.rb_node);
703 goto out_cancel;
704 }
705
706 #ifdef CONFIG_MTD_UBI_FASTMAP
707 e1 = find_anchor_wl_entry(&ubi->used);
708 if (e1 && ubi->fm_anchor &&
709 (ubi->fm_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) {
710 ubi->fm_do_produce_anchor = 1;
711 /*
712 * fm_anchor is no longer considered a good anchor.
713 * NULL assignment also prevents multiple wear level checks
714 * of this PEB.
715 */
716 wl_tree_add(ubi->fm_anchor, &ubi->free);
717 ubi->fm_anchor = NULL;
718 ubi->free_count++;
719 }
720
721 if (ubi->fm_do_produce_anchor) {
722 if (!e1)
723 goto out_cancel;
724 e2 = get_peb_for_wl(ubi);
725 if (!e2)
726 goto out_cancel;
727
728 self_check_in_wl_tree(ubi, e1, &ubi->used);
729 rb_erase(&e1->u.rb, &ubi->used);
730 dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
731 ubi->fm_do_produce_anchor = 0;
732 } else if (!ubi->scrub.rb_node) {
733 #else
734 if (!ubi->scrub.rb_node) {
735 #endif
736 /*
737 * Now pick the least worn-out used physical eraseblock and a
738 * highly worn-out free physical eraseblock. If the erase
739 * counters differ much enough, start wear-leveling.
740 */
741 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
742 e2 = get_peb_for_wl(ubi);
743 if (!e2)
744 goto out_cancel;
745
746 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
747 dbg_wl("no WL needed: min used EC %d, max free EC %d",
748 e1->ec, e2->ec);
749
750 /* Give the unused PEB back */
751 wl_tree_add(e2, &ubi->free);
752 ubi->free_count++;
753 goto out_cancel;
754 }
755 self_check_in_wl_tree(ubi, e1, &ubi->used);
756 rb_erase(&e1->u.rb, &ubi->used);
757 dbg_wl("move PEB %d EC %d to PEB %d EC %d",
758 e1->pnum, e1->ec, e2->pnum, e2->ec);
759 } else {
760 /* Perform scrubbing */
761 scrubbing = 1;
762 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
763 e2 = get_peb_for_wl(ubi);
764 if (!e2)
765 goto out_cancel;
766
767 self_check_in_wl_tree(ubi, e1, &ubi->scrub);
768 rb_erase(&e1->u.rb, &ubi->scrub);
769 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
770 }
771
772 ubi->move_from = e1;
773 ubi->move_to = e2;
774 spin_unlock(&ubi->wl_lock);
775
776 /*
777 * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum.
778 * We so far do not know which logical eraseblock our physical
779 * eraseblock (@e1) belongs to. We have to read the volume identifier
780 * header first.
781 *
782 * Note, we are protected from this PEB being unmapped and erased. The
783 * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
784 * which is being moved was unmapped.
785 */
786
787 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vidb, 0);
788 if (err && err != UBI_IO_BITFLIPS) {
789 dst_leb_clean = 1;
790 if (err == UBI_IO_FF) {
791 /*
792 * We are trying to move PEB without a VID header. UBI
793 * always write VID headers shortly after the PEB was
794 * given, so we have a situation when it has not yet
795 * had a chance to write it, because it was preempted.
796 * So add this PEB to the protection queue so far,
797 * because presumably more data will be written there
798 * (including the missing VID header), and then we'll
799 * move it.
800 */
801 dbg_wl("PEB %d has no VID header", e1->pnum);
802 protect = 1;
803 goto out_not_moved;
804 } else if (err == UBI_IO_FF_BITFLIPS) {
805 /*
806 * The same situation as %UBI_IO_FF, but bit-flips were
807 * detected. It is better to schedule this PEB for
808 * scrubbing.
809 */
810 dbg_wl("PEB %d has no VID header but has bit-flips",
811 e1->pnum);
812 scrubbing = 1;
813 goto out_not_moved;
814 } else if (ubi->fast_attach && err == UBI_IO_BAD_HDR_EBADMSG) {
815 /*
816 * While a full scan would detect interrupted erasures
817 * at attach time we can face them here when attached from
818 * Fastmap.
819 */
820 dbg_wl("PEB %d has ECC errors, maybe from an interrupted erasure",
821 e1->pnum);
822 erase = 1;
823 goto out_not_moved;
824 }
825
826 ubi_err(ubi, "error %d while reading VID header from PEB %d",
827 err, e1->pnum);
828 goto out_error;
829 }
830
831 vol_id = be32_to_cpu(vid_hdr->vol_id);
832 lnum = be32_to_cpu(vid_hdr->lnum);
833
834 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vidb);
835 if (err) {
836 if (err == MOVE_CANCEL_RACE) {
837 /*
838 * The LEB has not been moved because the volume is
839 * being deleted or the PEB has been put meanwhile. We
840 * should prevent this PEB from being selected for
841 * wear-leveling movement again, so put it to the
842 * protection queue.
843 */
844 protect = 1;
845 dst_leb_clean = 1;
846 goto out_not_moved;
847 }
848 if (err == MOVE_RETRY) {
849 /*
850 * For source PEB:
851 * 1. The scrubbing is set for scrub type PEB, it will
852 * be put back into ubi->scrub list.
853 * 2. Non-scrub type PEB will be put back into ubi->used
854 * list.
855 */
856 keep = 1;
857 dst_leb_clean = 1;
858 goto out_not_moved;
859 }
860 if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
861 err == MOVE_TARGET_RD_ERR) {
862 /*
863 * Target PEB had bit-flips or write error - torture it.
864 */
865 torture = 1;
866 keep = 1;
867 goto out_not_moved;
868 }
869
870 if (err == MOVE_SOURCE_RD_ERR) {
871 /*
872 * An error happened while reading the source PEB. Do
873 * not switch to R/O mode in this case, and give the
874 * upper layers a possibility to recover from this,
875 * e.g. by unmapping corresponding LEB. Instead, just
876 * put this PEB to the @ubi->erroneous list to prevent
877 * UBI from trying to move it over and over again.
878 */
879 if (ubi->erroneous_peb_count > ubi->max_erroneous) {
880 ubi_err(ubi, "too many erroneous eraseblocks (%d)",
881 ubi->erroneous_peb_count);
882 goto out_error;
883 }
884 dst_leb_clean = 1;
885 erroneous = 1;
886 goto out_not_moved;
887 }
888
889 if (err < 0)
890 goto out_error;
891
892 ubi_assert(0);
893 }
894
895 /* The PEB has been successfully moved */
896 if (scrubbing)
897 ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
898 e1->pnum, vol_id, lnum, e2->pnum);
899 ubi_free_vid_buf(vidb);
900
901 spin_lock(&ubi->wl_lock);
902 if (!ubi->move_to_put) {
903 wl_tree_add(e2, &ubi->used);
904 e2 = NULL;
905 }
906 ubi->move_from = ubi->move_to = NULL;
907 ubi->move_to_put = ubi->wl_scheduled = 0;
908 spin_unlock(&ubi->wl_lock);
909
910 err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
911 if (err) {
912 if (e2) {
913 spin_lock(&ubi->wl_lock);
914 wl_entry_destroy(ubi, e2);
915 spin_unlock(&ubi->wl_lock);
916 }
917 goto out_ro;
918 }
919
920 if (e2) {
921 /*
922 * Well, the target PEB was put meanwhile, schedule it for
923 * erasure.
924 */
925 dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
926 e2->pnum, vol_id, lnum);
927 err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
928 if (err)
929 goto out_ro;
930 }
931
932 dbg_wl("done");
933 mutex_unlock(&ubi->move_mutex);
934 up_read(&ubi->fm_eba_sem);
935 return 0;
936
937 /*
938 * For some reasons the LEB was not moved, might be an error, might be
939 * something else. @e1 was not changed, so return it back. @e2 might
940 * have been changed, schedule it for erasure.
941 */
942 out_not_moved:
943 if (vol_id != -1)
944 dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)",
945 e1->pnum, vol_id, lnum, e2->pnum, err);
946 else
947 dbg_wl("cancel moving PEB %d to PEB %d (%d)",
948 e1->pnum, e2->pnum, err);
949 spin_lock(&ubi->wl_lock);
950 if (protect)
951 prot_queue_add(ubi, e1);
952 else if (erroneous) {
953 wl_tree_add(e1, &ubi->erroneous);
954 ubi->erroneous_peb_count += 1;
955 } else if (scrubbing)
956 wl_tree_add(e1, &ubi->scrub);
957 else if (keep)
958 wl_tree_add(e1, &ubi->used);
959 if (dst_leb_clean) {
960 wl_tree_add(e2, &ubi->free);
961 ubi->free_count++;
962 }
963
964 ubi_assert(!ubi->move_to_put);
965 ubi->move_from = ubi->move_to = NULL;
966 ubi->wl_scheduled = 0;
967 spin_unlock(&ubi->wl_lock);
968
969 ubi_free_vid_buf(vidb);
970 if (dst_leb_clean) {
971 ensure_wear_leveling(ubi, 1);
972 } else {
973 err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
974 if (err)
975 goto out_ro;
976 }
977
978 if (erase) {
979 err = do_sync_erase(ubi, e1, vol_id, lnum, 1);
980 if (err)
981 goto out_ro;
982 }
983
984 mutex_unlock(&ubi->move_mutex);
985 up_read(&ubi->fm_eba_sem);
986 return 0;
987
988 out_error:
989 if (vol_id != -1)
990 ubi_err(ubi, "error %d while moving PEB %d to PEB %d",
991 err, e1->pnum, e2->pnum);
992 else
993 ubi_err(ubi, "error %d while moving PEB %d (LEB %d:%d) to PEB %d",
994 err, e1->pnum, vol_id, lnum, e2->pnum);
995 spin_lock(&ubi->wl_lock);
996 ubi->move_from = ubi->move_to = NULL;
997 ubi->move_to_put = ubi->wl_scheduled = 0;
998 wl_entry_destroy(ubi, e1);
999 wl_entry_destroy(ubi, e2);
1000 spin_unlock(&ubi->wl_lock);
1001
1002 ubi_free_vid_buf(vidb);
1003
1004 out_ro:
1005 ubi_ro_mode(ubi);
1006 mutex_unlock(&ubi->move_mutex);
1007 up_read(&ubi->fm_eba_sem);
1008 ubi_assert(err != 0);
1009 return err < 0 ? err : -EIO;
1010
1011 out_cancel:
1012 ubi->wl_scheduled = 0;
1013 spin_unlock(&ubi->wl_lock);
1014 mutex_unlock(&ubi->move_mutex);
1015 up_read(&ubi->fm_eba_sem);
1016 ubi_free_vid_buf(vidb);
1017 return 0;
1018 }
1019
1020 /**
1021 * ensure_wear_leveling - schedule wear-leveling if it is needed.
1022 * @ubi: UBI device description object
1023 * @nested: set to non-zero if this function is called from UBI worker
1024 *
1025 * This function checks if it is time to start wear-leveling and schedules it
1026 * if yes. This function returns zero in case of success and a negative error
1027 * code in case of failure.
1028 */
1029 static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
1030 {
1031 int err = 0;
1032 struct ubi_work *wrk;
1033
1034 spin_lock(&ubi->wl_lock);
1035 if (ubi->wl_scheduled)
1036 /* Wear-leveling is already in the work queue */
1037 goto out_unlock;
1038
1039 /*
1040 * If the ubi->scrub tree is not empty, scrubbing is needed, and the
1041 * WL worker has to be scheduled anyway.
1042 */
1043 if (!ubi->scrub.rb_node) {
1044 #ifdef CONFIG_MTD_UBI_FASTMAP
1045 if (!need_wear_leveling(ubi))
1046 goto out_unlock;
1047 #else
1048 struct ubi_wl_entry *e1;
1049 struct ubi_wl_entry *e2;
1050
1051 if (!ubi->used.rb_node || !ubi->free.rb_node)
1052 /* No physical eraseblocks - no deal */
1053 goto out_unlock;
1054
1055 /*
1056 * We schedule wear-leveling only if the difference between the
1057 * lowest erase counter of used physical eraseblocks and a high
1058 * erase counter of free physical eraseblocks is greater than
1059 * %UBI_WL_THRESHOLD.
1060 */
1061 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
1062 e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF, 0);
1063
1064 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
1065 goto out_unlock;
1066 #endif
1067 dbg_wl("schedule wear-leveling");
1068 } else
1069 dbg_wl("schedule scrubbing");
1070
1071 ubi->wl_scheduled = 1;
1072 spin_unlock(&ubi->wl_lock);
1073
1074 wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
1075 if (!wrk) {
1076 err = -ENOMEM;
1077 goto out_cancel;
1078 }
1079
1080 wrk->func = &wear_leveling_worker;
1081 if (nested)
1082 __schedule_ubi_work(ubi, wrk);
1083 else
1084 schedule_ubi_work(ubi, wrk);
1085 return err;
1086
1087 out_cancel:
1088 spin_lock(&ubi->wl_lock);
1089 ubi->wl_scheduled = 0;
1090 out_unlock:
1091 spin_unlock(&ubi->wl_lock);
1092 return err;
1093 }
1094
1095 /**
1096 * __erase_worker - physical eraseblock erase worker function.
1097 * @ubi: UBI device description object
1098 * @wl_wrk: the work object
1099 *
1100 * This function erases a physical eraseblock and perform torture testing if
1101 * needed. It also takes care about marking the physical eraseblock bad if
1102 * needed. Returns zero in case of success and a negative error code in case of
1103 * failure.
1104 */
1105 static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
1106 {
1107 struct ubi_wl_entry *e = wl_wrk->e;
1108 int pnum = e->pnum;
1109 int vol_id = wl_wrk->vol_id;
1110 int lnum = wl_wrk->lnum;
1111 int err, available_consumed = 0;
1112
1113 dbg_wl("erase PEB %d EC %d LEB %d:%d",
1114 pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
1115
1116 err = ubi_sync_erase(ubi, e, wl_wrk->torture);
1117 if (!err) {
1118 spin_lock(&ubi->wl_lock);
1119
1120 if (!ubi->fm_disabled && !ubi->fm_anchor &&
1121 e->pnum < UBI_FM_MAX_START) {
1122 /*
1123 * Abort anchor production, if needed it will be
1124 * enabled again in the wear leveling started below.
1125 */
1126 ubi->fm_anchor = e;
1127 ubi->fm_do_produce_anchor = 0;
1128 } else {
1129 wl_tree_add(e, &ubi->free);
1130 ubi->free_count++;
1131 }
1132
1133 spin_unlock(&ubi->wl_lock);
1134
1135 /*
1136 * One more erase operation has happened, take care about
1137 * protected physical eraseblocks.
1138 */
1139 serve_prot_queue(ubi);
1140
1141 /* And take care about wear-leveling */
1142 err = ensure_wear_leveling(ubi, 1);
1143 return err;
1144 }
1145
1146 ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err);
1147
1148 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1149 err == -EBUSY) {
1150 int err1;
1151
1152 /* Re-schedule the LEB for erasure */
1153 err1 = schedule_erase(ubi, e, vol_id, lnum, 0, true);
1154 if (err1) {
1155 spin_lock(&ubi->wl_lock);
1156 wl_entry_destroy(ubi, e);
1157 spin_unlock(&ubi->wl_lock);
1158 err = err1;
1159 goto out_ro;
1160 }
1161 return err;
1162 }
1163
1164 spin_lock(&ubi->wl_lock);
1165 wl_entry_destroy(ubi, e);
1166 spin_unlock(&ubi->wl_lock);
1167 if (err != -EIO)
1168 /*
1169 * If this is not %-EIO, we have no idea what to do. Scheduling
1170 * this physical eraseblock for erasure again would cause
1171 * errors again and again. Well, lets switch to R/O mode.
1172 */
1173 goto out_ro;
1174
1175 /* It is %-EIO, the PEB went bad */
1176
1177 if (!ubi->bad_allowed) {
1178 ubi_err(ubi, "bad physical eraseblock %d detected", pnum);
1179 goto out_ro;
1180 }
1181
1182 spin_lock(&ubi->volumes_lock);
1183 if (ubi->beb_rsvd_pebs == 0) {
1184 if (ubi->avail_pebs == 0) {
1185 spin_unlock(&ubi->volumes_lock);
1186 ubi_err(ubi, "no reserved/available physical eraseblocks");
1187 goto out_ro;
1188 }
1189 ubi->avail_pebs -= 1;
1190 available_consumed = 1;
1191 }
1192 spin_unlock(&ubi->volumes_lock);
1193
1194 ubi_msg(ubi, "mark PEB %d as bad", pnum);
1195 err = ubi_io_mark_bad(ubi, pnum);
1196 if (err)
1197 goto out_ro;
1198
1199 spin_lock(&ubi->volumes_lock);
1200 if (ubi->beb_rsvd_pebs > 0) {
1201 if (available_consumed) {
1202 /*
1203 * The amount of reserved PEBs increased since we last
1204 * checked.
1205 */
1206 ubi->avail_pebs += 1;
1207 available_consumed = 0;
1208 }
1209 ubi->beb_rsvd_pebs -= 1;
1210 }
1211 ubi->bad_peb_count += 1;
1212 ubi->good_peb_count -= 1;
1213 ubi_calculate_reserved(ubi);
1214 if (available_consumed)
1215 ubi_warn(ubi, "no PEBs in the reserved pool, used an available PEB");
1216 else if (ubi->beb_rsvd_pebs)
1217 ubi_msg(ubi, "%d PEBs left in the reserve",
1218 ubi->beb_rsvd_pebs);
1219 else
1220 ubi_warn(ubi, "last PEB from the reserve was used");
1221 spin_unlock(&ubi->volumes_lock);
1222
1223 return err;
1224
1225 out_ro:
1226 if (available_consumed) {
1227 spin_lock(&ubi->volumes_lock);
1228 ubi->avail_pebs += 1;
1229 spin_unlock(&ubi->volumes_lock);
1230 }
1231 ubi_ro_mode(ubi);
1232 return err;
1233 }
1234
1235 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1236 int shutdown)
1237 {
1238 int ret;
1239
1240 if (shutdown) {
1241 struct ubi_wl_entry *e = wl_wrk->e;
1242
1243 dbg_wl("cancel erasure of PEB %d EC %d", e->pnum, e->ec);
1244 kfree(wl_wrk);
1245 wl_entry_destroy(ubi, e);
1246 return 0;
1247 }
1248
1249 ret = __erase_worker(ubi, wl_wrk);
1250 kfree(wl_wrk);
1251 return ret;
1252 }
1253
1254 /**
1255 * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
1256 * @ubi: UBI device description object
1257 * @vol_id: the volume ID that last used this PEB
1258 * @lnum: the last used logical eraseblock number for the PEB
1259 * @pnum: physical eraseblock to return
1260 * @torture: if this physical eraseblock has to be tortured
1261 *
1262 * This function is called to return physical eraseblock @pnum to the pool of
1263 * free physical eraseblocks. The @torture flag has to be set if an I/O error
1264 * occurred to this @pnum and it has to be tested. This function returns zero
1265 * in case of success, and a negative error code in case of failure.
1266 */
1267 int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
1268 int pnum, int torture)
1269 {
1270 int err;
1271 struct ubi_wl_entry *e;
1272
1273 dbg_wl("PEB %d", pnum);
1274 ubi_assert(pnum >= 0);
1275 ubi_assert(pnum < ubi->peb_count);
1276
1277 down_read(&ubi->fm_protect);
1278
1279 retry:
1280 spin_lock(&ubi->wl_lock);
1281 e = ubi->lookuptbl[pnum];
1282 if (!e) {
1283 /*
1284 * This wl entry has been removed for some errors by other
1285 * process (eg. wear leveling worker), corresponding process
1286 * (except __erase_worker, which cannot concurrent with
1287 * ubi_wl_put_peb) will set ubi ro_mode at the same time,
1288 * just ignore this wl entry.
1289 */
1290 spin_unlock(&ubi->wl_lock);
1291 up_read(&ubi->fm_protect);
1292 return 0;
1293 }
1294 if (e == ubi->move_from) {
1295 /*
1296 * User is putting the physical eraseblock which was selected to
1297 * be moved. It will be scheduled for erasure in the
1298 * wear-leveling worker.
1299 */
1300 dbg_wl("PEB %d is being moved, wait", pnum);
1301 spin_unlock(&ubi->wl_lock);
1302
1303 /* Wait for the WL worker by taking the @ubi->move_mutex */
1304 mutex_lock(&ubi->move_mutex);
1305 mutex_unlock(&ubi->move_mutex);
1306 goto retry;
1307 } else if (e == ubi->move_to) {
1308 /*
1309 * User is putting the physical eraseblock which was selected
1310 * as the target the data is moved to. It may happen if the EBA
1311 * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()'
1312 * but the WL sub-system has not put the PEB to the "used" tree
1313 * yet, but it is about to do this. So we just set a flag which
1314 * will tell the WL worker that the PEB is not needed anymore
1315 * and should be scheduled for erasure.
1316 */
1317 dbg_wl("PEB %d is the target of data moving", pnum);
1318 ubi_assert(!ubi->move_to_put);
1319 ubi->move_to_put = 1;
1320 spin_unlock(&ubi->wl_lock);
1321 up_read(&ubi->fm_protect);
1322 return 0;
1323 } else {
1324 if (in_wl_tree(e, &ubi->used)) {
1325 self_check_in_wl_tree(ubi, e, &ubi->used);
1326 rb_erase(&e->u.rb, &ubi->used);
1327 } else if (in_wl_tree(e, &ubi->scrub)) {
1328 self_check_in_wl_tree(ubi, e, &ubi->scrub);
1329 rb_erase(&e->u.rb, &ubi->scrub);
1330 } else if (in_wl_tree(e, &ubi->erroneous)) {
1331 self_check_in_wl_tree(ubi, e, &ubi->erroneous);
1332 rb_erase(&e->u.rb, &ubi->erroneous);
1333 ubi->erroneous_peb_count -= 1;
1334 ubi_assert(ubi->erroneous_peb_count >= 0);
1335 /* Erroneous PEBs should be tortured */
1336 torture = 1;
1337 } else {
1338 err = prot_queue_del(ubi, e->pnum);
1339 if (err) {
1340 ubi_err(ubi, "PEB %d not found", pnum);
1341 ubi_ro_mode(ubi);
1342 spin_unlock(&ubi->wl_lock);
1343 up_read(&ubi->fm_protect);
1344 return err;
1345 }
1346 }
1347 }
1348 spin_unlock(&ubi->wl_lock);
1349
1350 err = schedule_erase(ubi, e, vol_id, lnum, torture, false);
1351 if (err) {
1352 spin_lock(&ubi->wl_lock);
1353 wl_tree_add(e, &ubi->used);
1354 spin_unlock(&ubi->wl_lock);
1355 }
1356
1357 up_read(&ubi->fm_protect);
1358 return err;
1359 }
1360
1361 /**
1362 * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing.
1363 * @ubi: UBI device description object
1364 * @pnum: the physical eraseblock to schedule
1365 *
1366 * If a bit-flip in a physical eraseblock is detected, this physical eraseblock
1367 * needs scrubbing. This function schedules a physical eraseblock for
1368 * scrubbing which is done in background. This function returns zero in case of
1369 * success and a negative error code in case of failure.
1370 */
1371 int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1372 {
1373 struct ubi_wl_entry *e;
1374
1375 ubi_msg(ubi, "schedule PEB %d for scrubbing", pnum);
1376
1377 retry:
1378 spin_lock(&ubi->wl_lock);
1379 e = ubi->lookuptbl[pnum];
1380 if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
1381 in_wl_tree(e, &ubi->erroneous)) {
1382 spin_unlock(&ubi->wl_lock);
1383 return 0;
1384 }
1385
1386 if (e == ubi->move_to) {
1387 /*
1388 * This physical eraseblock was used to move data to. The data
1389 * was moved but the PEB was not yet inserted to the proper
1390 * tree. We should just wait a little and let the WL worker
1391 * proceed.
1392 */
1393 spin_unlock(&ubi->wl_lock);
1394 dbg_wl("the PEB %d is not in proper tree, retry", pnum);
1395 yield();
1396 goto retry;
1397 }
1398
1399 if (in_wl_tree(e, &ubi->used)) {
1400 self_check_in_wl_tree(ubi, e, &ubi->used);
1401 rb_erase(&e->u.rb, &ubi->used);
1402 } else {
1403 int err;
1404
1405 err = prot_queue_del(ubi, e->pnum);
1406 if (err) {
1407 ubi_err(ubi, "PEB %d not found", pnum);
1408 ubi_ro_mode(ubi);
1409 spin_unlock(&ubi->wl_lock);
1410 return err;
1411 }
1412 }
1413
1414 wl_tree_add(e, &ubi->scrub);
1415 spin_unlock(&ubi->wl_lock);
1416
1417 /*
1418 * Technically scrubbing is the same as wear-leveling, so it is done
1419 * by the WL worker.
1420 */
1421 return ensure_wear_leveling(ubi, 0);
1422 }
1423
1424 /**
1425 * ubi_wl_flush - flush all pending works.
1426 * @ubi: UBI device description object
1427 * @vol_id: the volume id to flush for
1428 * @lnum: the logical eraseblock number to flush for
1429 *
1430 * This function executes all pending works for a particular volume id /
1431 * logical eraseblock number pair. If either value is set to %UBI_ALL, then it
1432 * acts as a wildcard for all of the corresponding volume numbers or logical
1433 * eraseblock numbers. It returns zero in case of success and a negative error
1434 * code in case of failure.
1435 */
1436 int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
1437 {
1438 int err = 0;
1439 int found = 1;
1440
1441 /*
1442 * Erase while the pending works queue is not empty, but not more than
1443 * the number of currently pending works.
1444 */
1445 dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
1446 vol_id, lnum, ubi->works_count);
1447
1448 while (found) {
1449 struct ubi_work *wrk, *tmp;
1450 found = 0;
1451
1452 down_read(&ubi->work_sem);
1453 spin_lock(&ubi->wl_lock);
1454 list_for_each_entry_safe(wrk, tmp, &ubi->works, list) {
1455 if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
1456 (lnum == UBI_ALL || wrk->lnum == lnum)) {
1457 list_del(&wrk->list);
1458 ubi->works_count -= 1;
1459 ubi_assert(ubi->works_count >= 0);
1460 spin_unlock(&ubi->wl_lock);
1461
1462 err = wrk->func(ubi, wrk, 0);
1463 if (err) {
1464 up_read(&ubi->work_sem);
1465 return err;
1466 }
1467
1468 spin_lock(&ubi->wl_lock);
1469 found = 1;
1470 break;
1471 }
1472 }
1473 spin_unlock(&ubi->wl_lock);
1474 up_read(&ubi->work_sem);
1475 }
1476
1477 /*
1478 * Make sure all the works which have been done in parallel are
1479 * finished.
1480 */
1481 down_write(&ubi->work_sem);
1482 up_write(&ubi->work_sem);
1483
1484 return err;
1485 }
1486
1487 static bool scrub_possible(struct ubi_device *ubi, struct ubi_wl_entry *e)
1488 {
1489 if (in_wl_tree(e, &ubi->scrub))
1490 return false;
1491 else if (in_wl_tree(e, &ubi->erroneous))
1492 return false;
1493 else if (ubi->move_from == e)
1494 return false;
1495 else if (ubi->move_to == e)
1496 return false;
1497
1498 return true;
1499 }
1500
1501 /**
1502 * ubi_bitflip_check - Check an eraseblock for bitflips and scrub it if needed.
1503 * @ubi: UBI device description object
1504 * @pnum: the physical eraseblock to schedule
1505 * @force: don't read the block, assume bitflips happened and take action.
1506 *
1507 * This function reads the given eraseblock and checks if bitflips occured.
1508 * In case of bitflips, the eraseblock is scheduled for scrubbing.
1509 * If scrubbing is forced with @force, the eraseblock is not read,
1510 * but scheduled for scrubbing right away.
1511 *
1512 * Returns:
1513 * %EINVAL, PEB is out of range
1514 * %ENOENT, PEB is no longer used by UBI
1515 * %EBUSY, PEB cannot be checked now or a check is currently running on it
1516 * %EAGAIN, bit flips happened but scrubbing is currently not possible
1517 * %EUCLEAN, bit flips happened and PEB is scheduled for scrubbing
1518 * %0, no bit flips detected
1519 */
1520 int ubi_bitflip_check(struct ubi_device *ubi, int pnum, int force)
1521 {
1522 int err = 0;
1523 struct ubi_wl_entry *e;
1524
1525 if (pnum < 0 || pnum >= ubi->peb_count) {
1526 err = -EINVAL;
1527 goto out;
1528 }
1529
1530 /*
1531 * Pause all parallel work, otherwise it can happen that the
1532 * erase worker frees a wl entry under us.
1533 */
1534 down_write(&ubi->work_sem);
1535
1536 /*
1537 * Make sure that the wl entry does not change state while
1538 * inspecting it.
1539 */
1540 spin_lock(&ubi->wl_lock);
1541 e = ubi->lookuptbl[pnum];
1542 if (!e) {
1543 spin_unlock(&ubi->wl_lock);
1544 err = -ENOENT;
1545 goto out_resume;
1546 }
1547
1548 /*
1549 * Does it make sense to check this PEB?
1550 */
1551 if (!scrub_possible(ubi, e)) {
1552 spin_unlock(&ubi->wl_lock);
1553 err = -EBUSY;
1554 goto out_resume;
1555 }
1556 spin_unlock(&ubi->wl_lock);
1557
1558 if (!force) {
1559 mutex_lock(&ubi->buf_mutex);
1560 err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
1561 mutex_unlock(&ubi->buf_mutex);
1562 }
1563
1564 if (force || err == UBI_IO_BITFLIPS) {
1565 /*
1566 * Okay, bit flip happened, let's figure out what we can do.
1567 */
1568 spin_lock(&ubi->wl_lock);
1569
1570 /*
1571 * Recheck. We released wl_lock, UBI might have killed the
1572 * wl entry under us.
1573 */
1574 e = ubi->lookuptbl[pnum];
1575 if (!e) {
1576 spin_unlock(&ubi->wl_lock);
1577 err = -ENOENT;
1578 goto out_resume;
1579 }
1580
1581 /*
1582 * Need to re-check state
1583 */
1584 if (!scrub_possible(ubi, e)) {
1585 spin_unlock(&ubi->wl_lock);
1586 err = -EBUSY;
1587 goto out_resume;
1588 }
1589
1590 if (in_pq(ubi, e)) {
1591 prot_queue_del(ubi, e->pnum);
1592 wl_tree_add(e, &ubi->scrub);
1593 spin_unlock(&ubi->wl_lock);
1594
1595 err = ensure_wear_leveling(ubi, 1);
1596 } else if (in_wl_tree(e, &ubi->used)) {
1597 rb_erase(&e->u.rb, &ubi->used);
1598 wl_tree_add(e, &ubi->scrub);
1599 spin_unlock(&ubi->wl_lock);
1600
1601 err = ensure_wear_leveling(ubi, 1);
1602 } else if (in_wl_tree(e, &ubi->free)) {
1603 rb_erase(&e->u.rb, &ubi->free);
1604 ubi->free_count--;
1605 spin_unlock(&ubi->wl_lock);
1606
1607 /*
1608 * This PEB is empty we can schedule it for
1609 * erasure right away. No wear leveling needed.
1610 */
1611 err = schedule_erase(ubi, e, UBI_UNKNOWN, UBI_UNKNOWN,
1612 force ? 0 : 1, true);
1613 } else {
1614 spin_unlock(&ubi->wl_lock);
1615 err = -EAGAIN;
1616 }
1617
1618 if (!err && !force)
1619 err = -EUCLEAN;
1620 } else {
1621 err = 0;
1622 }
1623
1624 out_resume:
1625 up_write(&ubi->work_sem);
1626 out:
1627
1628 return err;
1629 }
1630
1631 /**
1632 * tree_destroy - destroy an RB-tree.
1633 * @ubi: UBI device description object
1634 * @root: the root of the tree to destroy
1635 */
1636 static void tree_destroy(struct ubi_device *ubi, struct rb_root *root)
1637 {
1638 struct rb_node *rb;
1639 struct ubi_wl_entry *e;
1640
1641 rb = root->rb_node;
1642 while (rb) {
1643 if (rb->rb_left)
1644 rb = rb->rb_left;
1645 else if (rb->rb_right)
1646 rb = rb->rb_right;
1647 else {
1648 e = rb_entry(rb, struct ubi_wl_entry, u.rb);
1649
1650 rb = rb_parent(rb);
1651 if (rb) {
1652 if (rb->rb_left == &e->u.rb)
1653 rb->rb_left = NULL;
1654 else
1655 rb->rb_right = NULL;
1656 }
1657
1658 wl_entry_destroy(ubi, e);
1659 }
1660 }
1661 }
1662
1663 /**
1664 * ubi_thread - UBI background thread.
1665 * @u: the UBI device description object pointer
1666 */
1667 int ubi_thread(void *u)
1668 {
1669 int failures = 0;
1670 struct ubi_device *ubi = u;
1671
1672 ubi_msg(ubi, "background thread \"%s\" started, PID %d",
1673 ubi->bgt_name, task_pid_nr(current));
1674
1675 set_freezable();
1676 for (;;) {
1677 int err;
1678
1679 if (kthread_should_stop())
1680 break;
1681
1682 if (try_to_freeze())
1683 continue;
1684
1685 spin_lock(&ubi->wl_lock);
1686 if (list_empty(&ubi->works) || ubi->ro_mode ||
1687 !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
1688 set_current_state(TASK_INTERRUPTIBLE);
1689 spin_unlock(&ubi->wl_lock);
1690
1691 /*
1692 * Check kthread_should_stop() after we set the task
1693 * state to guarantee that we either see the stop bit
1694 * and exit or the task state is reset to runnable such
1695 * that it's not scheduled out indefinitely and detects
1696 * the stop bit at kthread_should_stop().
1697 */
1698 if (kthread_should_stop()) {
1699 set_current_state(TASK_RUNNING);
1700 break;
1701 }
1702
1703 schedule();
1704 continue;
1705 }
1706 spin_unlock(&ubi->wl_lock);
1707
1708 err = do_work(ubi, NULL);
1709 if (err) {
1710 ubi_err(ubi, "%s: work failed with error code %d",
1711 ubi->bgt_name, err);
1712 if (failures++ > WL_MAX_FAILURES) {
1713 /*
1714 * Too many failures, disable the thread and
1715 * switch to read-only mode.
1716 */
1717 ubi_msg(ubi, "%s: %d consecutive failures",
1718 ubi->bgt_name, WL_MAX_FAILURES);
1719 ubi_ro_mode(ubi);
1720 ubi->thread_enabled = 0;
1721 continue;
1722 }
1723 } else
1724 failures = 0;
1725
1726 cond_resched();
1727 }
1728
1729 dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1730 ubi->thread_enabled = 0;
1731 return 0;
1732 }
1733
1734 /**
1735 * shutdown_work - shutdown all pending works.
1736 * @ubi: UBI device description object
1737 */
1738 static void shutdown_work(struct ubi_device *ubi)
1739 {
1740 while (!list_empty(&ubi->works)) {
1741 struct ubi_work *wrk;
1742
1743 wrk = list_entry(ubi->works.next, struct ubi_work, list);
1744 list_del(&wrk->list);
1745 wrk->func(ubi, wrk, 1);
1746 ubi->works_count -= 1;
1747 ubi_assert(ubi->works_count >= 0);
1748 }
1749 }
1750
1751 /**
1752 * erase_aeb - erase a PEB given in UBI attach info PEB
1753 * @ubi: UBI device description object
1754 * @aeb: UBI attach info PEB
1755 * @sync: If true, erase synchronously. Otherwise schedule for erasure
1756 */
1757 static int erase_aeb(struct ubi_device *ubi, struct ubi_ainf_peb *aeb, bool sync)
1758 {
1759 struct ubi_wl_entry *e;
1760 int err;
1761
1762 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1763 if (!e)
1764 return -ENOMEM;
1765
1766 e->pnum = aeb->pnum;
1767 e->ec = aeb->ec;
1768 ubi->lookuptbl[e->pnum] = e;
1769
1770 if (sync) {
1771 err = ubi_sync_erase(ubi, e, false);
1772 if (err)
1773 goto out_free;
1774
1775 wl_tree_add(e, &ubi->free);
1776 ubi->free_count++;
1777 } else {
1778 err = schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false);
1779 if (err)
1780 goto out_free;
1781 }
1782
1783 return 0;
1784
1785 out_free:
1786 wl_entry_destroy(ubi, e);
1787
1788 return err;
1789 }
1790
1791 /**
1792 * ubi_wl_init - initialize the WL sub-system using attaching information.
1793 * @ubi: UBI device description object
1794 * @ai: attaching information
1795 *
1796 * This function returns zero in case of success, and a negative error code in
1797 * case of failure.
1798 */
1799 int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1800 {
1801 int err, i, reserved_pebs, found_pebs = 0;
1802 struct rb_node *rb1, *rb2;
1803 struct ubi_ainf_volume *av;
1804 struct ubi_ainf_peb *aeb, *tmp;
1805 struct ubi_wl_entry *e;
1806
1807 ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
1808 spin_lock_init(&ubi->wl_lock);
1809 mutex_init(&ubi->move_mutex);
1810 init_rwsem(&ubi->work_sem);
1811 ubi->max_ec = ai->max_ec;
1812 INIT_LIST_HEAD(&ubi->works);
1813
1814 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1815
1816 err = -ENOMEM;
1817 ubi->lookuptbl = kcalloc(ubi->peb_count, sizeof(void *), GFP_KERNEL);
1818 if (!ubi->lookuptbl)
1819 return err;
1820
1821 for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
1822 INIT_LIST_HEAD(&ubi->pq[i]);
1823 ubi->pq_head = 0;
1824
1825 ubi->free_count = 0;
1826 list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
1827 cond_resched();
1828
1829 err = erase_aeb(ubi, aeb, false);
1830 if (err)
1831 goto out_free;
1832
1833 found_pebs++;
1834 }
1835
1836 list_for_each_entry(aeb, &ai->free, u.list) {
1837 cond_resched();
1838
1839 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1840 if (!e) {
1841 err = -ENOMEM;
1842 goto out_free;
1843 }
1844
1845 e->pnum = aeb->pnum;
1846 e->ec = aeb->ec;
1847 ubi_assert(e->ec >= 0);
1848
1849 wl_tree_add(e, &ubi->free);
1850 ubi->free_count++;
1851
1852 ubi->lookuptbl[e->pnum] = e;
1853
1854 found_pebs++;
1855 }
1856
1857 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
1858 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
1859 cond_resched();
1860
1861 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1862 if (!e) {
1863 err = -ENOMEM;
1864 goto out_free;
1865 }
1866
1867 e->pnum = aeb->pnum;
1868 e->ec = aeb->ec;
1869 ubi->lookuptbl[e->pnum] = e;
1870
1871 if (!aeb->scrub) {
1872 dbg_wl("add PEB %d EC %d to the used tree",
1873 e->pnum, e->ec);
1874 wl_tree_add(e, &ubi->used);
1875 } else {
1876 dbg_wl("add PEB %d EC %d to the scrub tree",
1877 e->pnum, e->ec);
1878 wl_tree_add(e, &ubi->scrub);
1879 }
1880
1881 found_pebs++;
1882 }
1883 }
1884
1885 list_for_each_entry(aeb, &ai->fastmap, u.list) {
1886 cond_resched();
1887
1888 e = ubi_find_fm_block(ubi, aeb->pnum);
1889
1890 if (e) {
1891 ubi_assert(!ubi->lookuptbl[e->pnum]);
1892 ubi->lookuptbl[e->pnum] = e;
1893 } else {
1894 bool sync = false;
1895
1896 /*
1897 * Usually old Fastmap PEBs are scheduled for erasure
1898 * and we don't have to care about them but if we face
1899 * an power cut before scheduling them we need to
1900 * take care of them here.
1901 */
1902 if (ubi->lookuptbl[aeb->pnum])
1903 continue;
1904
1905 /*
1906 * The fastmap update code might not find a free PEB for
1907 * writing the fastmap anchor to and then reuses the
1908 * current fastmap anchor PEB. When this PEB gets erased
1909 * and a power cut happens before it is written again we
1910 * must make sure that the fastmap attach code doesn't
1911 * find any outdated fastmap anchors, hence we erase the
1912 * outdated fastmap anchor PEBs synchronously here.
1913 */
1914 if (aeb->vol_id == UBI_FM_SB_VOLUME_ID)
1915 sync = true;
1916
1917 err = erase_aeb(ubi, aeb, sync);
1918 if (err)
1919 goto out_free;
1920 }
1921
1922 found_pebs++;
1923 }
1924
1925 dbg_wl("found %i PEBs", found_pebs);
1926
1927 ubi_assert(ubi->good_peb_count == found_pebs);
1928
1929 reserved_pebs = WL_RESERVED_PEBS;
1930 ubi_fastmap_init(ubi, &reserved_pebs);
1931
1932 if (ubi->avail_pebs < reserved_pebs) {
1933 ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
1934 ubi->avail_pebs, reserved_pebs);
1935 if (ubi->corr_peb_count)
1936 ubi_err(ubi, "%d PEBs are corrupted and not used",
1937 ubi->corr_peb_count);
1938 err = -ENOSPC;
1939 goto out_free;
1940 }
1941 ubi->avail_pebs -= reserved_pebs;
1942 ubi->rsvd_pebs += reserved_pebs;
1943
1944 /* Schedule wear-leveling if needed */
1945 err = ensure_wear_leveling(ubi, 0);
1946 if (err)
1947 goto out_free;
1948
1949 #ifdef CONFIG_MTD_UBI_FASTMAP
1950 if (!ubi->ro_mode && !ubi->fm_disabled)
1951 ubi_ensure_anchor_pebs(ubi);
1952 #endif
1953 return 0;
1954
1955 out_free:
1956 shutdown_work(ubi);
1957 tree_destroy(ubi, &ubi->used);
1958 tree_destroy(ubi, &ubi->free);
1959 tree_destroy(ubi, &ubi->scrub);
1960 kfree(ubi->lookuptbl);
1961 return err;
1962 }
1963
1964 /**
1965 * protection_queue_destroy - destroy the protection queue.
1966 * @ubi: UBI device description object
1967 */
1968 static void protection_queue_destroy(struct ubi_device *ubi)
1969 {
1970 int i;
1971 struct ubi_wl_entry *e, *tmp;
1972
1973 for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
1974 list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
1975 list_del(&e->u.list);
1976 wl_entry_destroy(ubi, e);
1977 }
1978 }
1979 }
1980
1981 /**
1982 * ubi_wl_close - close the wear-leveling sub-system.
1983 * @ubi: UBI device description object
1984 */
1985 void ubi_wl_close(struct ubi_device *ubi)
1986 {
1987 dbg_wl("close the WL sub-system");
1988 ubi_fastmap_close(ubi);
1989 shutdown_work(ubi);
1990 protection_queue_destroy(ubi);
1991 tree_destroy(ubi, &ubi->used);
1992 tree_destroy(ubi, &ubi->erroneous);
1993 tree_destroy(ubi, &ubi->free);
1994 tree_destroy(ubi, &ubi->scrub);
1995 kfree(ubi->lookuptbl);
1996 }
1997
1998 /**
1999 * self_check_ec - make sure that the erase counter of a PEB is correct.
2000 * @ubi: UBI device description object
2001 * @pnum: the physical eraseblock number to check
2002 * @ec: the erase counter to check
2003 *
2004 * This function returns zero if the erase counter of physical eraseblock @pnum
2005 * is equivalent to @ec, and a negative error code if not or if an error
2006 * occurred.
2007 */
2008 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
2009 {
2010 int err;
2011 long long read_ec;
2012 struct ubi_ec_hdr *ec_hdr;
2013
2014 if (!ubi_dbg_chk_gen(ubi))
2015 return 0;
2016
2017 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
2018 if (!ec_hdr)
2019 return -ENOMEM;
2020
2021 err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
2022 if (err && err != UBI_IO_BITFLIPS) {
2023 /* The header does not have to exist */
2024 err = 0;
2025 goto out_free;
2026 }
2027
2028 read_ec = be64_to_cpu(ec_hdr->ec);
2029 if (ec != read_ec && read_ec - ec > 1) {
2030 ubi_err(ubi, "self-check failed for PEB %d", pnum);
2031 ubi_err(ubi, "read EC is %lld, should be %d", read_ec, ec);
2032 dump_stack();
2033 err = 1;
2034 } else
2035 err = 0;
2036
2037 out_free:
2038 kfree(ec_hdr);
2039 return err;
2040 }
2041
2042 /**
2043 * self_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
2044 * @ubi: UBI device description object
2045 * @e: the wear-leveling entry to check
2046 * @root: the root of the tree
2047 *
2048 * This function returns zero if @e is in the @root RB-tree and %-EINVAL if it
2049 * is not.
2050 */
2051 static int self_check_in_wl_tree(const struct ubi_device *ubi,
2052 struct ubi_wl_entry *e, struct rb_root *root)
2053 {
2054 if (!ubi_dbg_chk_gen(ubi))
2055 return 0;
2056
2057 if (in_wl_tree(e, root))
2058 return 0;
2059
2060 ubi_err(ubi, "self-check failed for PEB %d, EC %d, RB-tree %p ",
2061 e->pnum, e->ec, root);
2062 dump_stack();
2063 return -EINVAL;
2064 }
2065
2066 /**
2067 * self_check_in_pq - check if wear-leveling entry is in the protection
2068 * queue.
2069 * @ubi: UBI device description object
2070 * @e: the wear-leveling entry to check
2071 *
2072 * This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not.
2073 */
2074 static int self_check_in_pq(const struct ubi_device *ubi,
2075 struct ubi_wl_entry *e)
2076 {
2077 if (!ubi_dbg_chk_gen(ubi))
2078 return 0;
2079
2080 if (in_pq(ubi, e))
2081 return 0;
2082
2083 ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue",
2084 e->pnum, e->ec);
2085 dump_stack();
2086 return -EINVAL;
2087 }
2088 #ifndef CONFIG_MTD_UBI_FASTMAP
2089 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
2090 {
2091 struct ubi_wl_entry *e;
2092
2093 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF, 0);
2094 self_check_in_wl_tree(ubi, e, &ubi->free);
2095 ubi->free_count--;
2096 ubi_assert(ubi->free_count >= 0);
2097 rb_erase(&e->u.rb, &ubi->free);
2098
2099 return e;
2100 }
2101
2102 /**
2103 * produce_free_peb - produce a free physical eraseblock.
2104 * @ubi: UBI device description object
2105 *
2106 * This function tries to make a free PEB by means of synchronous execution of
2107 * pending works. This may be needed if, for example the background thread is
2108 * disabled. Returns zero in case of success and a negative error code in case
2109 * of failure.
2110 */
2111 static int produce_free_peb(struct ubi_device *ubi)
2112 {
2113 int err;
2114
2115 while (!ubi->free.rb_node && ubi->works_count) {
2116 spin_unlock(&ubi->wl_lock);
2117
2118 dbg_wl("do one work synchronously");
2119 err = do_work(ubi, NULL);
2120
2121 spin_lock(&ubi->wl_lock);
2122 if (err)
2123 return err;
2124 }
2125
2126 return 0;
2127 }
2128
2129 /**
2130 * ubi_wl_get_peb - get a physical eraseblock.
2131 * @ubi: UBI device description object
2132 *
2133 * This function returns a physical eraseblock in case of success and a
2134 * negative error code in case of failure.
2135 * Returns with ubi->fm_eba_sem held in read mode!
2136 */
2137 int ubi_wl_get_peb(struct ubi_device *ubi)
2138 {
2139 int err;
2140 struct ubi_wl_entry *e;
2141
2142 retry:
2143 down_read(&ubi->fm_eba_sem);
2144 spin_lock(&ubi->wl_lock);
2145 if (!ubi->free.rb_node) {
2146 if (ubi->works_count == 0) {
2147 ubi_err(ubi, "no free eraseblocks");
2148 ubi_assert(list_empty(&ubi->works));
2149 spin_unlock(&ubi->wl_lock);
2150 return -ENOSPC;
2151 }
2152
2153 err = produce_free_peb(ubi);
2154 if (err < 0) {
2155 spin_unlock(&ubi->wl_lock);
2156 return err;
2157 }
2158 spin_unlock(&ubi->wl_lock);
2159 up_read(&ubi->fm_eba_sem);
2160 goto retry;
2161
2162 }
2163 e = wl_get_wle(ubi);
2164 prot_queue_add(ubi, e);
2165 spin_unlock(&ubi->wl_lock);
2166
2167 err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
2168 ubi->peb_size - ubi->vid_hdr_aloffset);
2169 if (err) {
2170 ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes", e->pnum);
2171 return err;
2172 }
2173
2174 return e->pnum;
2175 }
2176 #else
2177 #include "fastmap-wl.c"
2178 #endif
2179