Merge tag 'ieee802154-for-davem-2019-09-28' of git://git.kernel.org/pub/scm/linux...
[platform/kernel/linux-rpi.git] / drivers / lightnvm / pblk-gc.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2016 CNEX Labs
4  * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5  *                  Matias Bjorling <matias@cnexlabs.com>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License version
9  * 2 as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * General Public License for more details.
15  *
16  * pblk-gc.c - pblk's garbage collector
17  */
18
19 #include "pblk.h"
20 #include "pblk-trace.h"
21 #include <linux/delay.h>
22
23
24 static void pblk_gc_free_gc_rq(struct pblk_gc_rq *gc_rq)
25 {
26         if (gc_rq->data)
27                 vfree(gc_rq->data);
28         kfree(gc_rq);
29 }
30
31 static int pblk_gc_write(struct pblk *pblk)
32 {
33         struct pblk_gc *gc = &pblk->gc;
34         struct pblk_gc_rq *gc_rq, *tgc_rq;
35         LIST_HEAD(w_list);
36
37         spin_lock(&gc->w_lock);
38         if (list_empty(&gc->w_list)) {
39                 spin_unlock(&gc->w_lock);
40                 return 1;
41         }
42
43         list_cut_position(&w_list, &gc->w_list, gc->w_list.prev);
44         gc->w_entries = 0;
45         spin_unlock(&gc->w_lock);
46
47         list_for_each_entry_safe(gc_rq, tgc_rq, &w_list, list) {
48                 pblk_write_gc_to_cache(pblk, gc_rq);
49                 list_del(&gc_rq->list);
50                 kref_put(&gc_rq->line->ref, pblk_line_put);
51                 pblk_gc_free_gc_rq(gc_rq);
52         }
53
54         return 0;
55 }
56
57 static void pblk_gc_writer_kick(struct pblk_gc *gc)
58 {
59         wake_up_process(gc->gc_writer_ts);
60 }
61
62 void pblk_put_line_back(struct pblk *pblk, struct pblk_line *line)
63 {
64         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
65         struct list_head *move_list;
66
67         spin_lock(&l_mg->gc_lock);
68         spin_lock(&line->lock);
69         WARN_ON(line->state != PBLK_LINESTATE_GC);
70         line->state = PBLK_LINESTATE_CLOSED;
71         trace_pblk_line_state(pblk_disk_name(pblk), line->id,
72                                         line->state);
73
74         /* We need to reset gc_group in order to ensure that
75          * pblk_line_gc_list will return proper move_list
76          * since right now current line is not on any of the
77          * gc lists.
78          */
79         line->gc_group = PBLK_LINEGC_NONE;
80         move_list = pblk_line_gc_list(pblk, line);
81         spin_unlock(&line->lock);
82         list_add_tail(&line->list, move_list);
83         spin_unlock(&l_mg->gc_lock);
84 }
85
86 static void pblk_gc_line_ws(struct work_struct *work)
87 {
88         struct pblk_line_ws *gc_rq_ws = container_of(work,
89                                                 struct pblk_line_ws, ws);
90         struct pblk *pblk = gc_rq_ws->pblk;
91         struct pblk_gc *gc = &pblk->gc;
92         struct pblk_line *line = gc_rq_ws->line;
93         struct pblk_gc_rq *gc_rq = gc_rq_ws->priv;
94         int ret;
95
96         up(&gc->gc_sem);
97
98         /* Read from GC victim block */
99         ret = pblk_submit_read_gc(pblk, gc_rq);
100         if (ret) {
101                 line->w_err_gc->has_gc_err = 1;
102                 goto out;
103         }
104
105         if (!gc_rq->secs_to_gc)
106                 goto out;
107
108 retry:
109         spin_lock(&gc->w_lock);
110         if (gc->w_entries >= PBLK_GC_RQ_QD) {
111                 spin_unlock(&gc->w_lock);
112                 pblk_gc_writer_kick(&pblk->gc);
113                 usleep_range(128, 256);
114                 goto retry;
115         }
116         gc->w_entries++;
117         list_add_tail(&gc_rq->list, &gc->w_list);
118         spin_unlock(&gc->w_lock);
119
120         pblk_gc_writer_kick(&pblk->gc);
121
122         kfree(gc_rq_ws);
123         return;
124
125 out:
126         pblk_gc_free_gc_rq(gc_rq);
127         kref_put(&line->ref, pblk_line_put);
128         kfree(gc_rq_ws);
129 }
130
131 static __le64 *get_lba_list_from_emeta(struct pblk *pblk,
132                                        struct pblk_line *line)
133 {
134         struct line_emeta *emeta_buf;
135         struct pblk_line_meta *lm = &pblk->lm;
136         unsigned int lba_list_size = lm->emeta_len[2];
137         __le64 *lba_list;
138         int ret;
139
140         emeta_buf = kvmalloc(lm->emeta_len[0], GFP_KERNEL);
141         if (!emeta_buf)
142                 return NULL;
143
144         ret = pblk_line_emeta_read(pblk, line, emeta_buf);
145         if (ret) {
146                 pblk_err(pblk, "line %d read emeta failed (%d)\n",
147                                 line->id, ret);
148                 kvfree(emeta_buf);
149                 return NULL;
150         }
151
152         /* If this read fails, it means that emeta is corrupted.
153          * For now, leave the line untouched.
154          * TODO: Implement a recovery routine that scans and moves
155          * all sectors on the line.
156          */
157
158         ret = pblk_recov_check_emeta(pblk, emeta_buf);
159         if (ret) {
160                 pblk_err(pblk, "inconsistent emeta (line %d)\n",
161                                 line->id);
162                 kvfree(emeta_buf);
163                 return NULL;
164         }
165
166         lba_list = kvmalloc(lba_list_size, GFP_KERNEL);
167
168         if (lba_list)
169                 memcpy(lba_list, emeta_to_lbas(pblk, emeta_buf), lba_list_size);
170
171         kvfree(emeta_buf);
172
173         return lba_list;
174 }
175
176 static void pblk_gc_line_prepare_ws(struct work_struct *work)
177 {
178         struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
179                                                                         ws);
180         struct pblk *pblk = line_ws->pblk;
181         struct pblk_line *line = line_ws->line;
182         struct pblk_line_meta *lm = &pblk->lm;
183         struct nvm_tgt_dev *dev = pblk->dev;
184         struct nvm_geo *geo = &dev->geo;
185         struct pblk_gc *gc = &pblk->gc;
186         struct pblk_line_ws *gc_rq_ws;
187         struct pblk_gc_rq *gc_rq;
188         __le64 *lba_list;
189         unsigned long *invalid_bitmap;
190         int sec_left, nr_secs, bit;
191
192         invalid_bitmap = kmalloc(lm->sec_bitmap_len, GFP_KERNEL);
193         if (!invalid_bitmap)
194                 goto fail_free_ws;
195
196         if (line->w_err_gc->has_write_err) {
197                 lba_list = line->w_err_gc->lba_list;
198                 line->w_err_gc->lba_list = NULL;
199         } else {
200                 lba_list = get_lba_list_from_emeta(pblk, line);
201                 if (!lba_list) {
202                         pblk_err(pblk, "could not interpret emeta (line %d)\n",
203                                         line->id);
204                         goto fail_free_invalid_bitmap;
205                 }
206         }
207
208         spin_lock(&line->lock);
209         bitmap_copy(invalid_bitmap, line->invalid_bitmap, lm->sec_per_line);
210         sec_left = pblk_line_vsc(line);
211         spin_unlock(&line->lock);
212
213         if (sec_left < 0) {
214                 pblk_err(pblk, "corrupted GC line (%d)\n", line->id);
215                 goto fail_free_lba_list;
216         }
217
218         bit = -1;
219 next_rq:
220         gc_rq = kmalloc(sizeof(struct pblk_gc_rq), GFP_KERNEL);
221         if (!gc_rq)
222                 goto fail_free_lba_list;
223
224         nr_secs = 0;
225         do {
226                 bit = find_next_zero_bit(invalid_bitmap, lm->sec_per_line,
227                                                                 bit + 1);
228                 if (bit > line->emeta_ssec)
229                         break;
230
231                 gc_rq->paddr_list[nr_secs] = bit;
232                 gc_rq->lba_list[nr_secs++] = le64_to_cpu(lba_list[bit]);
233         } while (nr_secs < pblk->max_write_pgs);
234
235         if (unlikely(!nr_secs)) {
236                 kfree(gc_rq);
237                 goto out;
238         }
239
240         gc_rq->nr_secs = nr_secs;
241         gc_rq->line = line;
242
243         gc_rq->data = vmalloc(array_size(gc_rq->nr_secs, geo->csecs));
244         if (!gc_rq->data)
245                 goto fail_free_gc_rq;
246
247         gc_rq_ws = kmalloc(sizeof(struct pblk_line_ws), GFP_KERNEL);
248         if (!gc_rq_ws)
249                 goto fail_free_gc_data;
250
251         gc_rq_ws->pblk = pblk;
252         gc_rq_ws->line = line;
253         gc_rq_ws->priv = gc_rq;
254
255         /* The write GC path can be much slower than the read GC one due to
256          * the budget imposed by the rate-limiter. Balance in case that we get
257          * back pressure from the write GC path.
258          */
259         while (down_timeout(&gc->gc_sem, msecs_to_jiffies(30000)))
260                 io_schedule();
261
262         kref_get(&line->ref);
263
264         INIT_WORK(&gc_rq_ws->ws, pblk_gc_line_ws);
265         queue_work(gc->gc_line_reader_wq, &gc_rq_ws->ws);
266
267         sec_left -= nr_secs;
268         if (sec_left > 0)
269                 goto next_rq;
270
271 out:
272         kvfree(lba_list);
273         kfree(line_ws);
274         kfree(invalid_bitmap);
275
276         kref_put(&line->ref, pblk_line_put);
277         atomic_dec(&gc->read_inflight_gc);
278
279         return;
280
281 fail_free_gc_data:
282         vfree(gc_rq->data);
283 fail_free_gc_rq:
284         kfree(gc_rq);
285 fail_free_lba_list:
286         kvfree(lba_list);
287 fail_free_invalid_bitmap:
288         kfree(invalid_bitmap);
289 fail_free_ws:
290         kfree(line_ws);
291
292         /* Line goes back to closed state, so we cannot release additional
293          * reference for line, since we do that only when we want to do
294          * gc to free line state transition.
295          */
296         pblk_put_line_back(pblk, line);
297         atomic_dec(&gc->read_inflight_gc);
298
299         pblk_err(pblk, "failed to GC line %d\n", line->id);
300 }
301
302 static int pblk_gc_line(struct pblk *pblk, struct pblk_line *line)
303 {
304         struct pblk_gc *gc = &pblk->gc;
305         struct pblk_line_ws *line_ws;
306
307         pblk_debug(pblk, "line '%d' being reclaimed for GC\n", line->id);
308
309         line_ws = kmalloc(sizeof(struct pblk_line_ws), GFP_KERNEL);
310         if (!line_ws)
311                 return -ENOMEM;
312
313         line_ws->pblk = pblk;
314         line_ws->line = line;
315
316         atomic_inc(&gc->pipeline_gc);
317         INIT_WORK(&line_ws->ws, pblk_gc_line_prepare_ws);
318         queue_work(gc->gc_reader_wq, &line_ws->ws);
319
320         return 0;
321 }
322
323 static void pblk_gc_reader_kick(struct pblk_gc *gc)
324 {
325         wake_up_process(gc->gc_reader_ts);
326 }
327
328 static void pblk_gc_kick(struct pblk *pblk)
329 {
330         struct pblk_gc *gc = &pblk->gc;
331
332         pblk_gc_writer_kick(gc);
333         pblk_gc_reader_kick(gc);
334
335         /* If we're shutting down GC, let's not start it up again */
336         if (gc->gc_enabled) {
337                 wake_up_process(gc->gc_ts);
338                 mod_timer(&gc->gc_timer,
339                           jiffies + msecs_to_jiffies(GC_TIME_MSECS));
340         }
341 }
342
343 static int pblk_gc_read(struct pblk *pblk)
344 {
345         struct pblk_gc *gc = &pblk->gc;
346         struct pblk_line *line;
347
348         spin_lock(&gc->r_lock);
349         if (list_empty(&gc->r_list)) {
350                 spin_unlock(&gc->r_lock);
351                 return 1;
352         }
353
354         line = list_first_entry(&gc->r_list, struct pblk_line, list);
355         list_del(&line->list);
356         spin_unlock(&gc->r_lock);
357
358         pblk_gc_kick(pblk);
359
360         if (pblk_gc_line(pblk, line)) {
361                 pblk_err(pblk, "failed to GC line %d\n", line->id);
362                 /* rollback */
363                 spin_lock(&gc->r_lock);
364                 list_add_tail(&line->list, &gc->r_list);
365                 spin_unlock(&gc->r_lock);
366         }
367
368         return 0;
369 }
370
371 static struct pblk_line *pblk_gc_get_victim_line(struct pblk *pblk,
372                                                  struct list_head *group_list)
373 {
374         struct pblk_line *line, *victim;
375         unsigned int line_vsc = ~0x0L, victim_vsc = ~0x0L;
376
377         victim = list_first_entry(group_list, struct pblk_line, list);
378
379         list_for_each_entry(line, group_list, list) {
380                 if (!atomic_read(&line->sec_to_update))
381                         line_vsc = le32_to_cpu(*line->vsc);
382                 if (line_vsc < victim_vsc) {
383                         victim = line;
384                         victim_vsc = le32_to_cpu(*victim->vsc);
385                 }
386         }
387
388         if (victim_vsc == ~0x0)
389                 return NULL;
390
391         return victim;
392 }
393
394 static bool pblk_gc_should_run(struct pblk_gc *gc, struct pblk_rl *rl)
395 {
396         unsigned int nr_blocks_free, nr_blocks_need;
397         unsigned int werr_lines = atomic_read(&rl->werr_lines);
398
399         nr_blocks_need = pblk_rl_high_thrs(rl);
400         nr_blocks_free = pblk_rl_nr_free_blks(rl);
401
402         /* This is not critical, no need to take lock here */
403         return ((werr_lines > 0) ||
404                 ((gc->gc_active) && (nr_blocks_need > nr_blocks_free)));
405 }
406
407 void pblk_gc_free_full_lines(struct pblk *pblk)
408 {
409         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
410         struct pblk_gc *gc = &pblk->gc;
411         struct pblk_line *line;
412
413         do {
414                 spin_lock(&l_mg->gc_lock);
415                 if (list_empty(&l_mg->gc_full_list)) {
416                         spin_unlock(&l_mg->gc_lock);
417                         return;
418                 }
419
420                 line = list_first_entry(&l_mg->gc_full_list,
421                                                         struct pblk_line, list);
422
423                 spin_lock(&line->lock);
424                 WARN_ON(line->state != PBLK_LINESTATE_CLOSED);
425                 line->state = PBLK_LINESTATE_GC;
426                 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
427                                         line->state);
428                 spin_unlock(&line->lock);
429
430                 list_del(&line->list);
431                 spin_unlock(&l_mg->gc_lock);
432
433                 atomic_inc(&gc->pipeline_gc);
434                 kref_put(&line->ref, pblk_line_put);
435         } while (1);
436 }
437
438 /*
439  * Lines with no valid sectors will be returned to the free list immediately. If
440  * GC is activated - either because the free block count is under the determined
441  * threshold, or because it is being forced from user space - only lines with a
442  * high count of invalid sectors will be recycled.
443  */
444 static void pblk_gc_run(struct pblk *pblk)
445 {
446         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
447         struct pblk_gc *gc = &pblk->gc;
448         struct pblk_line *line;
449         struct list_head *group_list;
450         bool run_gc;
451         int read_inflight_gc, gc_group = 0, prev_group = 0;
452
453         pblk_gc_free_full_lines(pblk);
454
455         run_gc = pblk_gc_should_run(&pblk->gc, &pblk->rl);
456         if (!run_gc || (atomic_read(&gc->read_inflight_gc) >= PBLK_GC_L_QD))
457                 return;
458
459 next_gc_group:
460         group_list = l_mg->gc_lists[gc_group++];
461
462         do {
463                 spin_lock(&l_mg->gc_lock);
464
465                 line = pblk_gc_get_victim_line(pblk, group_list);
466                 if (!line) {
467                         spin_unlock(&l_mg->gc_lock);
468                         break;
469                 }
470
471                 spin_lock(&line->lock);
472                 WARN_ON(line->state != PBLK_LINESTATE_CLOSED);
473                 line->state = PBLK_LINESTATE_GC;
474                 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
475                                         line->state);
476                 spin_unlock(&line->lock);
477
478                 list_del(&line->list);
479                 spin_unlock(&l_mg->gc_lock);
480
481                 spin_lock(&gc->r_lock);
482                 list_add_tail(&line->list, &gc->r_list);
483                 spin_unlock(&gc->r_lock);
484
485                 read_inflight_gc = atomic_inc_return(&gc->read_inflight_gc);
486                 pblk_gc_reader_kick(gc);
487
488                 prev_group = 1;
489
490                 /* No need to queue up more GC lines than we can handle */
491                 run_gc = pblk_gc_should_run(&pblk->gc, &pblk->rl);
492                 if (!run_gc || read_inflight_gc >= PBLK_GC_L_QD)
493                         break;
494         } while (1);
495
496         if (!prev_group && pblk->rl.rb_state > gc_group &&
497                                                 gc_group < PBLK_GC_NR_LISTS)
498                 goto next_gc_group;
499 }
500
501 static void pblk_gc_timer(struct timer_list *t)
502 {
503         struct pblk *pblk = from_timer(pblk, t, gc.gc_timer);
504
505         pblk_gc_kick(pblk);
506 }
507
508 static int pblk_gc_ts(void *data)
509 {
510         struct pblk *pblk = data;
511
512         while (!kthread_should_stop()) {
513                 pblk_gc_run(pblk);
514                 set_current_state(TASK_INTERRUPTIBLE);
515                 io_schedule();
516         }
517
518         return 0;
519 }
520
521 static int pblk_gc_writer_ts(void *data)
522 {
523         struct pblk *pblk = data;
524
525         while (!kthread_should_stop()) {
526                 if (!pblk_gc_write(pblk))
527                         continue;
528                 set_current_state(TASK_INTERRUPTIBLE);
529                 io_schedule();
530         }
531
532         return 0;
533 }
534
535 static int pblk_gc_reader_ts(void *data)
536 {
537         struct pblk *pblk = data;
538         struct pblk_gc *gc = &pblk->gc;
539
540         while (!kthread_should_stop()) {
541                 if (!pblk_gc_read(pblk))
542                         continue;
543                 set_current_state(TASK_INTERRUPTIBLE);
544                 io_schedule();
545         }
546
547 #ifdef CONFIG_NVM_PBLK_DEBUG
548         pblk_info(pblk, "flushing gc pipeline, %d lines left\n",
549                 atomic_read(&gc->pipeline_gc));
550 #endif
551
552         do {
553                 if (!atomic_read(&gc->pipeline_gc))
554                         break;
555
556                 schedule();
557         } while (1);
558
559         return 0;
560 }
561
562 static void pblk_gc_start(struct pblk *pblk)
563 {
564         pblk->gc.gc_active = 1;
565         pblk_debug(pblk, "gc start\n");
566 }
567
568 void pblk_gc_should_start(struct pblk *pblk)
569 {
570         struct pblk_gc *gc = &pblk->gc;
571
572         if (gc->gc_enabled && !gc->gc_active) {
573                 pblk_gc_start(pblk);
574                 pblk_gc_kick(pblk);
575         }
576 }
577
578 void pblk_gc_should_stop(struct pblk *pblk)
579 {
580         struct pblk_gc *gc = &pblk->gc;
581
582         if (gc->gc_active && !gc->gc_forced)
583                 gc->gc_active = 0;
584 }
585
586 void pblk_gc_should_kick(struct pblk *pblk)
587 {
588         pblk_rl_update_rates(&pblk->rl);
589 }
590
591 void pblk_gc_sysfs_state_show(struct pblk *pblk, int *gc_enabled,
592                               int *gc_active)
593 {
594         struct pblk_gc *gc = &pblk->gc;
595
596         spin_lock(&gc->lock);
597         *gc_enabled = gc->gc_enabled;
598         *gc_active = gc->gc_active;
599         spin_unlock(&gc->lock);
600 }
601
602 int pblk_gc_sysfs_force(struct pblk *pblk, int force)
603 {
604         struct pblk_gc *gc = &pblk->gc;
605
606         if (force < 0 || force > 1)
607                 return -EINVAL;
608
609         spin_lock(&gc->lock);
610         gc->gc_forced = force;
611
612         if (force)
613                 gc->gc_enabled = 1;
614         else
615                 gc->gc_enabled = 0;
616         spin_unlock(&gc->lock);
617
618         pblk_gc_should_start(pblk);
619
620         return 0;
621 }
622
623 int pblk_gc_init(struct pblk *pblk)
624 {
625         struct pblk_gc *gc = &pblk->gc;
626         int ret;
627
628         gc->gc_ts = kthread_create(pblk_gc_ts, pblk, "pblk-gc-ts");
629         if (IS_ERR(gc->gc_ts)) {
630                 pblk_err(pblk, "could not allocate GC main kthread\n");
631                 return PTR_ERR(gc->gc_ts);
632         }
633
634         gc->gc_writer_ts = kthread_create(pblk_gc_writer_ts, pblk,
635                                                         "pblk-gc-writer-ts");
636         if (IS_ERR(gc->gc_writer_ts)) {
637                 pblk_err(pblk, "could not allocate GC writer kthread\n");
638                 ret = PTR_ERR(gc->gc_writer_ts);
639                 goto fail_free_main_kthread;
640         }
641
642         gc->gc_reader_ts = kthread_create(pblk_gc_reader_ts, pblk,
643                                                         "pblk-gc-reader-ts");
644         if (IS_ERR(gc->gc_reader_ts)) {
645                 pblk_err(pblk, "could not allocate GC reader kthread\n");
646                 ret = PTR_ERR(gc->gc_reader_ts);
647                 goto fail_free_writer_kthread;
648         }
649
650         timer_setup(&gc->gc_timer, pblk_gc_timer, 0);
651         mod_timer(&gc->gc_timer, jiffies + msecs_to_jiffies(GC_TIME_MSECS));
652
653         gc->gc_active = 0;
654         gc->gc_forced = 0;
655         gc->gc_enabled = 1;
656         gc->w_entries = 0;
657         atomic_set(&gc->read_inflight_gc, 0);
658         atomic_set(&gc->pipeline_gc, 0);
659
660         /* Workqueue that reads valid sectors from a line and submit them to the
661          * GC writer to be recycled.
662          */
663         gc->gc_line_reader_wq = alloc_workqueue("pblk-gc-line-reader-wq",
664                         WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_GC_MAX_READERS);
665         if (!gc->gc_line_reader_wq) {
666                 pblk_err(pblk, "could not allocate GC line reader workqueue\n");
667                 ret = -ENOMEM;
668                 goto fail_free_reader_kthread;
669         }
670
671         /* Workqueue that prepare lines for GC */
672         gc->gc_reader_wq = alloc_workqueue("pblk-gc-line_wq",
673                                         WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
674         if (!gc->gc_reader_wq) {
675                 pblk_err(pblk, "could not allocate GC reader workqueue\n");
676                 ret = -ENOMEM;
677                 goto fail_free_reader_line_wq;
678         }
679
680         spin_lock_init(&gc->lock);
681         spin_lock_init(&gc->w_lock);
682         spin_lock_init(&gc->r_lock);
683
684         sema_init(&gc->gc_sem, PBLK_GC_RQ_QD);
685
686         INIT_LIST_HEAD(&gc->w_list);
687         INIT_LIST_HEAD(&gc->r_list);
688
689         return 0;
690
691 fail_free_reader_line_wq:
692         destroy_workqueue(gc->gc_line_reader_wq);
693 fail_free_reader_kthread:
694         kthread_stop(gc->gc_reader_ts);
695 fail_free_writer_kthread:
696         kthread_stop(gc->gc_writer_ts);
697 fail_free_main_kthread:
698         kthread_stop(gc->gc_ts);
699
700         return ret;
701 }
702
703 void pblk_gc_exit(struct pblk *pblk, bool graceful)
704 {
705         struct pblk_gc *gc = &pblk->gc;
706
707         gc->gc_enabled = 0;
708         del_timer_sync(&gc->gc_timer);
709         gc->gc_active = 0;
710
711         if (gc->gc_ts)
712                 kthread_stop(gc->gc_ts);
713
714         if (gc->gc_reader_ts)
715                 kthread_stop(gc->gc_reader_ts);
716
717         if (graceful) {
718                 flush_workqueue(gc->gc_reader_wq);
719                 flush_workqueue(gc->gc_line_reader_wq);
720         }
721
722         destroy_workqueue(gc->gc_reader_wq);
723         destroy_workqueue(gc->gc_line_reader_wq);
724
725         if (gc->gc_writer_ts)
726                 kthread_stop(gc->gc_writer_ts);
727 }