Skip to content

Commit

Permalink
bcache: Make gc wakeup sane, remove set_task_state()
Browse files Browse the repository at this point in the history
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
  • Loading branch information
koverstreet authored and axboe committed Dec 17, 2016
1 parent 59331c2 commit be628be
Show file tree
Hide file tree
Showing 5 changed files with 26 additions and 26 deletions.
4 changes: 2 additions & 2 deletions drivers/md/bcache/bcache.h
Original file line number Diff line number Diff line change
Expand Up @@ -425,7 +425,7 @@ struct cache {
* until a gc finishes - otherwise we could pointlessly burn a ton of
* cpu
*/
unsigned invalidate_needs_gc:1;
unsigned invalidate_needs_gc;

bool discard; /* Get rid of? */

Expand Down Expand Up @@ -593,8 +593,8 @@ struct cache_set {

/* Counts how many sectors bio_insert has added to the cache */
atomic_t sectors_to_gc;
wait_queue_head_t gc_wait;

wait_queue_head_t moving_gc_wait;
struct keybuf moving_gc_keys;
/* Number of moving GC bios in flight */
struct semaphore moving_in_flight;
Expand Down
39 changes: 20 additions & 19 deletions drivers/md/bcache/btree.c
Original file line number Diff line number Diff line change
Expand Up @@ -1757,44 +1757,45 @@ static void bch_btree_gc(struct cache_set *c)
bch_moving_gc(c);
}

static int bch_gc_thread(void *arg)
static bool gc_should_run(struct cache_set *c)
{
struct cache_set *c = arg;
struct cache *ca;
unsigned i;

while (1) {
again:
bch_btree_gc(c);
for_each_cache(ca, c, i)
if (ca->invalidate_needs_gc)
return true;

set_current_state(TASK_INTERRUPTIBLE);
if (kthread_should_stop())
break;
if (atomic_read(&c->sectors_to_gc) < 0)
return true;

mutex_lock(&c->bucket_lock);
return false;
}

for_each_cache(ca, c, i)
if (ca->invalidate_needs_gc) {
mutex_unlock(&c->bucket_lock);
set_current_state(TASK_RUNNING);
goto again;
}
static int bch_gc_thread(void *arg)
{
struct cache_set *c = arg;

mutex_unlock(&c->bucket_lock);
while (1) {
wait_event_interruptible(c->gc_wait,
kthread_should_stop() || gc_should_run(c));

schedule();
if (kthread_should_stop())
break;

set_gc_sectors(c);
bch_btree_gc(c);
}

return 0;
}

int bch_gc_thread_start(struct cache_set *c)
{
c->gc_thread = kthread_create(bch_gc_thread, c, "bcache_gc");
c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
if (IS_ERR(c->gc_thread))
return PTR_ERR(c->gc_thread);

set_task_state(c->gc_thread, TASK_INTERRUPTIBLE);
return 0;
}

Expand Down
3 changes: 1 addition & 2 deletions drivers/md/bcache/btree.h
Original file line number Diff line number Diff line change
Expand Up @@ -260,8 +260,7 @@ void bch_initial_mark_key(struct cache_set *, int, struct bkey *);

static inline void wake_up_gc(struct cache_set *c)
{
if (c->gc_thread)
wake_up_process(c->gc_thread);
wake_up(&c->gc_wait);
}

#define MAP_DONE 0
Expand Down
4 changes: 1 addition & 3 deletions drivers/md/bcache/request.c
Original file line number Diff line number Diff line change
Expand Up @@ -196,10 +196,8 @@ static void bch_data_insert_start(struct closure *cl)
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
struct bio *bio = op->bio, *n;

if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
set_gc_sectors(op->c);
if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
wake_up_gc(op->c);
}

if (op->bypass)
return bch_data_invalidate(cl);
Expand Down
2 changes: 2 additions & 0 deletions drivers/md/bcache/super.c
Original file line number Diff line number Diff line change
Expand Up @@ -1489,6 +1489,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
mutex_init(&c->bucket_lock);
init_waitqueue_head(&c->btree_cache_wait);
init_waitqueue_head(&c->bucket_wait);
init_waitqueue_head(&c->gc_wait);
sema_init(&c->uuid_write_mutex, 1);

spin_lock_init(&c->btree_gc_time.lock);
Expand Down Expand Up @@ -1548,6 +1549,7 @@ static void run_cache_set(struct cache_set *c)

for_each_cache(ca, c, i)
c->nbuckets += ca->sb.nbuckets;
set_gc_sectors(c);

if (CACHE_SYNC(&c->sb)) {
LIST_HEAD(journal);
Expand Down

0 comments on commit be628be

Please sign in to comment.