Created
April 18, 2014 14:11
-
-
Save akiradeveloper/11046203 to your computer and use it in GitHub Desktop.
redesign writeback structure patch draft
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
diff --git a/drivers/md/dm-writeboost-daemon.c b/drivers/md/dm-writeboost-daemon.c | |
index 0136baa..f1e4574 100644 | |
--- a/drivers/md/dm-writeboost-daemon.c | |
+++ b/drivers/md/dm-writeboost-daemon.c | |
@@ -255,24 +255,25 @@ static void add_migrate_io(struct wb_device *wb, struct migrate_io *mio) | |
rb_insert_color(&mio->rb_node, &wb->migrate_tree); | |
} | |
-static void prepare_migrate_ios(struct wb_device *wb, struct segment_header *seg, | |
- size_t k, size_t *migrate_io_count) | |
+static void prepare_migrate_ios(struct wb_device *wb, struct segment_migrate *segmig, | |
+ size_t *migrate_io_count) | |
{ | |
int r = 0; | |
+ struct segment_header *seg = segmig->seg; | |
+ | |
u8 i; | |
- void *p = wb->migrate_buffer + (wb->nr_caches_inseg << 12) * k; | |
struct dm_io_request io_req_r = { | |
.client = wb_io_client, | |
.bi_rw = READ, | |
.notify.fn = NULL, | |
- .mem.type = DM_IO_VMA, | |
- .mem.ptr.vma = p, | |
+ .mem.type = DM_IO_KMEM, | |
+ .mem.ptr.vma = segmig->buf, | |
}; | |
struct dm_io_region region_r = { | |
.bdev = wb->cache_dev->bdev, | |
- .sector = seg->start_sector + (1 << 3), | |
+ .sector = seg->start_sector + (1 << 3), /* header excluded */ | |
.count = seg->length << 3, | |
}; | |
IO(dm_safe_io(&io_req_r, 1, ®ion_r, NULL, false)); | |
@@ -280,11 +281,11 @@ static void prepare_migrate_ios(struct wb_device *wb, struct segment_header *seg | |
for (i = 0; i < seg->length; i++) { | |
struct metablock *mb = seg->mb_array + i; | |
- struct migrate_io *mio = wb->migrate_ios + (wb->nr_caches_inseg * k + i); | |
+ struct migrate_io *mio = segmig->ios + i; | |
mio->memorized_dirtiness = read_mb_dirtiness(wb, seg, mb); | |
inc_migrate_io_count(mio->memorized_dirtiness, migrate_io_count); | |
mio->sector = mb->sector; | |
- mio->data = p + (i << 12); | |
+ mio->data = segmig->buf + (i << 12); | |
mio->id = seg->id; | |
add_migrate_io(wb, mio); | |
@@ -305,10 +306,14 @@ static void transport_emigrates(struct wb_device *wb) | |
int r; | |
size_t k, migrate_io_count = 0; | |
+ struct segment_migrate *segmig; | |
+ | |
wb->migrate_tree = RB_ROOT; | |
- for (k = 0; k < wb->num_emigrates; k++) | |
- prepare_migrate_ios(wb, *(wb->emigrates + k), k, &migrate_io_count); | |
+ for (k = 0; k < wb->num_emigrates; k++) { | |
+ segmig = *(wb->emigrates + k); | |
+ prepare_migrate_ios(wb, segmig, &migrate_io_count); | |
+ } | |
atomic_set(&wb->migrate_io_count, migrate_io_count); | |
atomic_set(&wb->migrate_fail_count, 0); | |
@@ -323,8 +328,10 @@ static void transport_emigrates(struct wb_device *wb) | |
* we clean up the metablocks because there is no reason | |
* to leave the them dirty. | |
*/ | |
- for (k = 0; k < wb->num_emigrates; k++) | |
- cleanup_segment(wb, *(wb->emigrates + k)); | |
+ for (k = 0; k < wb->num_emigrates; k++) { | |
+ segmig = *(wb->emigrates + k); | |
+ cleanup_segment(wb, segmig->seg); | |
+ } | |
/* | |
* we must write back a segments if it was written persistently. | |
@@ -361,7 +368,7 @@ static bool should_migrate(struct wb_device *wb) | |
static void do_migrate_proc(struct wb_device *wb) | |
{ | |
- u32 i, nr_mig; | |
+ u32 k, nr_mig; | |
if (!should_migrate(wb)) { | |
schedule_timeout_interruptible(msecs_to_jiffies(1000)); | |
@@ -377,10 +384,10 @@ static void do_migrate_proc(struct wb_device *wb) | |
/* | |
* store emigrates | |
*/ | |
- for (i = 0; i < nr_mig; i++) { | |
- struct segment_header *seg = get_segment_header_by_id(wb, | |
- atomic64_read(&wb->last_migrated_segment_id) + 1 + i); | |
- *(wb->emigrates + i) = seg; | |
+ for (k = 0; k < nr_mig; k++) { | |
+ struct segment_migrate *segmig = *(wb->emigrates + k); | |
+ segmig->seg = get_segment_header_by_id(wb, | |
+ atomic64_read(&wb->last_migrated_segment_id) + 1 + k); | |
} | |
wb->num_emigrates = nr_mig; | |
transport_emigrates(wb); | |
diff --git a/drivers/md/dm-writeboost-metadata.c b/drivers/md/dm-writeboost-metadata.c | |
index 5b90a6e..e7d4eaa 100644 | |
--- a/drivers/md/dm-writeboost-metadata.c | |
+++ b/drivers/md/dm-writeboost-metadata.c | |
@@ -1552,6 +1552,45 @@ static int recover_cache(struct wb_device *wb) | |
/*----------------------------------------------------------------*/ | |
+static struct segment_migrate *alloc_segment_migrate(struct wb_device *wb) | |
+{ | |
+ int r = 0; | |
+ u8 i; | |
+ | |
+ struct segment_migrate *segmig = kmalloc(sizeof(*segmig), GFP_NOIO); | |
+ if (!segmig) | |
+ goto bad_segmig; | |
+ | |
+ segmig->ios = kmalloc(wb->nr_caches_inseg * sizeof(struct migrate_io), GFP_NOIO); | |
+ if (!segmig->ios) | |
+ goto bad_ios; | |
+ | |
+ segmig->buf = kmem_cache_alloc(wb->rambuf_cachep, GFP_NOIO); | |
+ if (!segmig->buf) | |
+ goto bad_buf; | |
+ | |
+ for (i = 0; i < wb->nr_caches_inseg; i++) { | |
+ struct migrate_io *mio = segmig->ios + i; | |
+ mio->data = segmig->buf + (i << 12); | |
+ } | |
+ | |
+ return segmig; | |
+ | |
+bad_buf: | |
+ kfree(segmig->ios); | |
+bad_ios: | |
+ kfree(segmig); | |
+bad_segmig: | |
+ return NULL; | |
+} | |
+ | |
+static void free_segment_migrate(struct wb_device *wb, struct segment_migrate *segmig) | |
+{ | |
+ kmem_cache_free(wb->rambuf_cachep, segmig->buf); | |
+ kfree(segmig->ios); | |
+ kfree(segmig); | |
+} | |
+ | |
/* | |
* try to allocate new migration buffer by the @nr_batch size. | |
* on success, it frees the old buffer. | |
@@ -1559,66 +1598,55 @@ static int recover_cache(struct wb_device *wb) | |
* bad User may set # of batches that can hardly allocate. | |
* This function is robust in that case. | |
*/ | |
+static void free_migrate_ios(struct wb_device *wb) | |
+{ | |
+ size_t i; | |
+ for (i = 0; i < wb->nr_cur_batched_migration; i++) | |
+ free_segment_migrate(wb, *(wb->emigrates + i)); | |
+ kfree(wb->emigrates); | |
+} | |
+ | |
+/* | |
+ * request to allocate data structures to migrate @nr_batch segments. | |
+ * previous structures are preserved in failure. | |
+ */ | |
int try_alloc_migrate_ios(struct wb_device *wb, size_t nr_batch) | |
{ | |
int r = 0; | |
- struct segment_header **emigrates; | |
- void *buf; | |
- struct migrate_io *migrate_ios; | |
- | |
- emigrates = kmalloc(nr_batch * sizeof(struct segment_header *), GFP_KERNEL); | |
- if (!emigrates) { | |
- WBERR("failed to allocate emigrates"); | |
- r = -ENOMEM; | |
- return r; | |
- } | |
+ size_t i; | |
- buf = vmalloc(nr_batch * (wb->nr_caches_inseg << 12)); | |
- if (!buf) { | |
- WBERR("failed to allocate migration buffer"); | |
- r = -ENOMEM; | |
- goto bad_alloc_buffer; | |
- } | |
+ struct segment_migrate **emigrates = kzalloc( | |
+ nr_batch * sizeof(struct segment_migrate *), GFP_KERNEL); | |
+ if (!emigrates) | |
+ return -ENOMEM; | |
- migrate_ios = kmalloc(nr_batch * wb->nr_caches_inseg * sizeof(struct migrate_io), GFP_KERNEL); | |
- if (!migrate_ios) { | |
- WBERR("failed to allocate memorized dirtiness"); | |
- r = -ENOMEM; | |
- goto bad_alloc_migrate_ios; | |
+ for (i = 0; i < nr_batch; i++) { | |
+ struct segment_migrate **segmig = emigrates + i; | |
+ *segmig = alloc_segment_migrate(wb); | |
+ if (!segmig) { | |
+ int j; | |
+ for (j = 0; j < i; j++) | |
+ free_segment_migrate(wb, *(wb->emigrates + j)); | |
+ kfree(emigrates); | |
+ | |
+ WBERR("failed to allocate emigrates"); | |
+ return -ENOMEM; | |
+ } | |
} | |
/* | |
* free old buffers | |
*/ | |
- kfree(wb->emigrates); /* kfree(NULL) is safe */ | |
- if (wb->migrate_buffer) | |
- vfree(wb->migrate_buffer); | |
- kfree(wb->migrate_ios); | |
+ free_migrate_ios(wb); | |
/* | |
* swap by new values | |
*/ | |
wb->emigrates = emigrates; | |
- wb->migrate_buffer = buf; | |
- wb->migrate_ios = migrate_ios; | |
wb->nr_cur_batched_migration = nr_batch; | |
return r; | |
- | |
-bad_alloc_buffer: | |
- kfree(wb->emigrates); | |
-bad_alloc_migrate_ios: | |
- vfree(wb->migrate_buffer); | |
- | |
- return r; | |
-} | |
- | |
-static void free_migration_buffer(struct wb_device *wb) | |
-{ | |
- kfree(wb->emigrates); | |
- vfree(wb->migrate_buffer); | |
- kfree(wb->migrate_ios); | |
} | |
/*----------------------------------------------------------------*/ | |
@@ -1698,7 +1726,7 @@ static int init_migrate_daemon(struct wb_device *wb) | |
return r; | |
bad_migrate_daemon: | |
- free_migration_buffer(wb); | |
+ free_migrate_ios(wb); | |
return r; | |
} | |
@@ -1858,7 +1886,7 @@ bad_migrate_modulator: | |
bad_flusher: | |
bad_recover: | |
kthread_stop(wb->migrate_daemon); | |
- free_migration_buffer(wb); | |
+ free_migrate_ios(wb); | |
bad_migrate_daemon: | |
free_metadata(wb); | |
bad_metadata: | |
@@ -1883,7 +1911,7 @@ void free_cache(struct wb_device *wb) | |
destroy_workqueue(wb->flusher_wq); | |
kthread_stop(wb->migrate_daemon); | |
- free_migration_buffer(wb); | |
+ free_migrate_ios(wb); | |
free_metadata(wb); | |
diff --git a/drivers/md/dm-writeboost.h b/drivers/md/dm-writeboost.h | |
index 9c89b18..ad5d45f 100644 | |
--- a/drivers/md/dm-writeboost.h | |
+++ b/drivers/md/dm-writeboost.h | |
@@ -250,6 +250,12 @@ struct migrate_io { | |
}; | |
#define migrate_io_from_node(node) rb_entry((node), struct migrate_io, rb_node) | |
+struct segment_migrate { | |
+ struct migrate_io *ios; | |
+ void *buf; | |
+ struct segment_header *seg; | |
+}; | |
+ | |
/*----------------------------------------------------------------*/ | |
enum STATFLAG { | |
@@ -433,9 +439,7 @@ struct wb_device { | |
struct rb_root migrate_tree; | |
u32 num_emigrates; /* number of emigrates */ | |
- struct segment_header **emigrates; /* segments to be migrated */ | |
- void *migrate_buffer; /* the data blocks of the emigrates */ | |
- struct migrate_io *migrate_ios; | |
+ struct segment_migrate **emigrates; | |
/*---------------------------------------------*/ | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment