diff -Nur linux-2.6.0-test2.orig/drivers/md/dm.c linux-2.6.0-test2/drivers/md/dm.c --- linux-2.6.0-test2.orig/drivers/md/dm.c 2003-07-31 19:34:25.272437600 +0200 +++ linux-2.6.0-test2/drivers/md/dm.c 2003-07-31 19:32:37.116879744 +0200 @@ -124,6 +124,9 @@ xx(dm_linear) xx(dm_stripe) xx(dm_interface) +#ifdef CONFIG_BLK_DM_CRYPT + xx(dm_crypt) +#endif #undef xx }; diff -Nur linux-2.6.0-test2.orig/drivers/md/dm-crypt.c linux-2.6.0-test2/drivers/md/dm-crypt.c --- linux-2.6.0-test2.orig/drivers/md/dm-crypt.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.6.0-test2/drivers/md/dm-crypt.c 2003-07-31 19:32:37.118879440 +0200 @@ -0,0 +1,784 @@ +/* + * Copyright (C) 2003 Christophe Saout + * + * This file is released under the GPL. + */ + +#include "dm.h" +#include "dm-daemon.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * per bio private data + */ +struct crypt_io { + struct dm_target *target; + struct bio *bio; + struct bio *first_clone; + atomic_t pending; + int error; +}; + +/* + * context holding the current state of a multi-part conversion + */ +struct convert_context { + struct bio *bio_in; + struct bio *bio_out; + unsigned int offset_in; + unsigned int offset_out; + int idx_in; + int idx_out; + sector_t sector; + int write; +}; + +/* + * Crypt: maps a linear range of a block device + * and encrypts / decrypts at the same time. + */ +struct crypt_c { + struct dm_dev *dev; + sector_t start; + + /* + * pool for per bio private data and + * for encryption buffer pages + */ + mempool_t *io_pool; + mempool_t *page_pool; + + /* + * crypto related data + */ + struct crypto_tfm *tfm; + sector_t iv_offset; + int iv_size; + int key_size; + u8 key[0]; +}; + +#define MIN_IOS 256 +#define MIN_POOL_PAGES 16 +#define MIN_BIO_PAGES 8 + +static kmem_cache_t *_io_cache; + +/* + * Mempool alloc and free functions for the page and io pool + */ +static void *mempool_alloc_page(int gfp_mask, void *data) +{ + return alloc_page(gfp_mask); +} + +static void mempool_free_page(void *page, void *data) +{ + __free_page(page); +} + +static inline struct page *crypt_alloc_page(struct crypt_c *cc, int gfp_mask) +{ + return mempool_alloc(cc->page_pool, gfp_mask); +} + +static inline void crypt_free_page(struct crypt_c *cc, struct page *page) +{ + mempool_free(page, cc->page_pool); +} + +static inline struct crypt_io *crypt_alloc_io(struct crypt_c *cc) +{ + return mempool_alloc(cc->io_pool, GFP_NOIO); +} + +static inline void crypt_free_io(struct crypt_c *cc, struct crypt_io *io) +{ + return mempool_free(io, cc->io_pool); +} + +/* + * Encrypt / decrypt a single sector, source and destination buffers + * are stored in scatterlists. In CBC mode initialise the "previous + * block" with the sector number (it's not a real chaining because + * it would not allow to seek on the device...) + */ +static inline int +crypt_convert_scatterlist(struct crypt_c *cc, struct scatterlist *out, + struct scatterlist *in, unsigned int length, + int write, sector_t sector) +{ + u8 iv[cc->iv_size]; + int r; + + if (cc->iv_size) { + *(u32 *)iv = cpu_to_le32(sector & 0xffffffff); + if (cc->iv_size > sizeof(u32) / sizeof(u8)) + memset(iv + (sizeof(u32) / sizeof(u8)), 0, + cc->iv_size - (sizeof(u32) / sizeof(u8))); + + if (write) + r = crypto_cipher_encrypt_iv(cc->tfm, out, in, length, iv); + else + r = crypto_cipher_decrypt_iv(cc->tfm, out, in, length, iv); + } else { + if (write) + r = crypto_cipher_encrypt(cc->tfm, out, in, length); + else + r = crypto_cipher_decrypt(cc->tfm, out, in, length); + } + + return r; +} + +static void +crypt_convert_init(struct crypt_c *cc, struct convert_context *ctx, + struct bio *bio_out, struct bio *bio_in, + sector_t sector, int write) +{ + ctx->bio_in = bio_in; + ctx->bio_out = bio_out; + ctx->offset_in = 0; + ctx->offset_out = 0; + ctx->idx_in = bio_in ? bio_in->bi_idx : 0; + ctx->idx_out = bio_out ? bio_out->bi_idx : 0; + ctx->sector = sector + cc->iv_offset; + ctx->write = write; +} + +/* + * Encrypt / decrypt data from one bio to another one (may be the same) + */ +static int crypt_convert(struct crypt_c *cc, + struct convert_context *ctx) +{ + int r = 0; + + while(ctx->idx_in < ctx->bio_in->bi_vcnt && + ctx->idx_out < ctx->bio_out->bi_vcnt) { + struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in); + struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out); + struct scatterlist sg_in = { + .page = bv_in->bv_page, + .offset = bv_in->bv_offset + ctx->offset_in, + .length = 1 << SECTOR_SHIFT + }; + struct scatterlist sg_out = { + .page = bv_out->bv_page, + .offset = bv_out->bv_offset + ctx->offset_out, + .length = 1 << SECTOR_SHIFT + }; + + ctx->offset_in += sg_in.length; + if (ctx->offset_in >= bv_in->bv_len) { + ctx->offset_in = 0; + ctx->idx_in++; + } + + ctx->offset_out += sg_out.length; + if (ctx->offset_out >= bv_out->bv_len) { + ctx->offset_out = 0; + ctx->idx_out++; + } + + r = crypt_convert_scatterlist(cc, &sg_out, &sg_in, sg_in.length, + ctx->write, ctx->sector); + if (r < 0) + break; + + ctx->sector++; + } + + return r; +} + +/* + * Generate a new unfragmented bio with the given size + * This should never violate the device limitations + * May return a smaller bio when running out of pages + */ +static struct bio * +crypt_alloc_buffer(struct crypt_c *cc, unsigned int size, + struct bio *base_bio, int *bio_vec_idx) +{ + struct bio *bio; + int nr_iovecs = dm_div_up(size, PAGE_SIZE); + int gfp_mask = GFP_NOIO | __GFP_HIGHMEM; + int i; + + if (base_bio) + bio = bio_clone(base_bio, GFP_NOIO); + else + bio = bio_alloc(GFP_NOIO, nr_iovecs); + if (!bio) + return NULL; + + /* if the last bio was not complete, continue where that one ends */ + bio->bi_idx = *bio_vec_idx; + bio->bi_vcnt = *bio_vec_idx; + bio->bi_size = 0; + + /* bio->bi_idx pages have already been allocated */ + size -= bio->bi_idx * PAGE_SIZE; + + for(i = bio->bi_idx; i < nr_iovecs; i++) { + struct bio_vec *bv = bio_iovec_idx(bio, i); + + bv->bv_page = crypt_alloc_page(cc, gfp_mask); + if (!bv->bv_page) + break; + + /* + * if additional pages cannot be allocated without waiting + * return a partially allocated bio, the caller will then try + * to allocate additional bios while submitting this partial bio + */ + if ((i - bio->bi_idx) == (MIN_BIO_PAGES - 1)) + gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; + + bv->bv_offset = 0; + if (size > PAGE_SIZE) + bv->bv_len = PAGE_SIZE; + else + bv->bv_len = size; + + bio->bi_size += bv->bv_len; + bio->bi_vcnt++; + size -= bv->bv_len; + } + + if (!bio->bi_size) { + bio_put(bio); + return NULL; + } + + /* + * remember the last bio_vec allocated to be able to correctly + * continue after splitting caused by memory pressure + */ + *bio_vec_idx = bio->bi_vcnt; + + return bio; +} + +static void crypt_free_buffer_pages(struct crypt_c *cc, struct bio *bio, + unsigned int bytes) +{ + int i = bio->bi_idx; + + while(bytes) { + struct bio_vec *bv = bio_iovec_idx(bio, i++); + crypt_free_page(cc, bv->bv_page); + bytes -= bv->bv_len; + } +} + +/* + * One of the bios was finished. Check for completion of + * the whole request and correctly cleanup the buffer. + */ +static void dec_pending(struct crypt_io *io, int error) +{ + struct crypt_c *cc = (struct crypt_c*) io->target->private; + + if (!atomic_dec_and_test(&io->pending)) + return; + + if (io->first_clone) + bio_put(io->first_clone); + + if (error < 0) + io->error = error; + + if (io->bio) + bio_endio(io->bio, io->bio->bi_size, io->error); + + crypt_free_io(cc, io); +} + +/* + * kcryptd: + * + * Needed because we can't decrypt when called in an + * interrupt context, so returning bios from read requests get + * queued here. + */ +static spinlock_t _kcryptd_lock = SPIN_LOCK_UNLOCKED; +static struct bio *_bio_head; +static struct bio *_bio_tail; + +static struct dm_daemon _kcryptd; + +/* + * Fetch a list of the complete bios. + */ +static struct bio *kcryptd_get_bios(void) +{ + struct bio *bio; + + spin_lock_irq(&_kcryptd_lock); + bio = _bio_head; + if (bio) + _bio_head = _bio_tail = NULL; + spin_unlock_irq(&_kcryptd_lock); + + return bio; +} + +/* + * Append bio to work queue + */ +static void kcryptd_queue_bio(struct bio *bio) +{ + unsigned long flags; + + spin_lock_irqsave(&_kcryptd_lock, flags); + if (_bio_tail) + _bio_tail->bi_next = bio; + else + _bio_head = bio; + _bio_tail = bio; + spin_unlock_irqrestore(&_kcryptd_lock, flags); +} + +static void kcryptd(void) +{ + int r; + struct bio *bio; + struct bio *next_bio; + struct crypt_io *io; + struct crypt_c *cc; + struct convert_context ctx; + + bio = kcryptd_get_bios(); + + while (bio) { + io = (struct crypt_io *) bio->bi_private; + cc = (struct crypt_c *) io->target->private; + + crypt_convert_init(cc, &ctx, io->bio, io->bio, + io->bio->bi_sector - io->target->begin, 0); + r = crypt_convert(cc, &ctx); + + next_bio = bio->bi_next; + + bio->bi_next = NULL; + bio_put(bio); + dec_pending(io, r); + + bio = next_bio; + } +} + +/* + * Decode key from its hex representation + */ +static int crypt_decode_key(u8 *key, char *hex, int size) +{ + int i; + for(i = 0; i < size; i++) { + int digits; + if (*hex >= 'a' && *hex <= 'f') + digits = *hex - ('a' - 10); + else if (*hex >= 'A' && *hex <= 'F') + digits = *hex - ('A' - 10); + else if (*hex >= '0' && *hex <= '9') + digits = *hex - '0'; + else + return -EINVAL; + + digits <<= 4; + hex++; + + if (*hex >= 'a' && *hex <= 'f') + digits += *hex - ('a' - 10); + else if (*hex >= 'A' && *hex <= 'F') + digits += *hex - ('A' - 10); + else if (*hex >= '0' && *hex <= '9') + digits += *hex - '0'; + else + return -EINVAL; + + hex++; + key[i] = (u8)digits; + } + + if (*hex != '\0') + return -EINVAL; + + return 0; +} + +/* + * Encode key into its hex representation + */ +static void crypt_encode_key(char *hex, u8 *key, int size) +{ + static char hex_digits[] = "0123456789abcdef"; + int i; + + for(i = 0; i < size; i++) { + *hex++ = hex_digits[*key >> 4]; + *hex++ = hex_digits[*key & 0x0f]; + key++; + } + + *hex++ = '\0'; +} + +/* + * Construct an encryption mapping: + * + */ +static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) +{ + struct crypt_c *cc; + struct crypto_tfm *tfm; + char *tmp; + char *cipher; + char *mode; + int crypto_flags; + int iv_size; + int key_size; + + if (argc != 5) { + ti->error = "dm-crypt: Not enough arguments"; + return -EINVAL; + } + + tmp = argv[0]; + cipher = strsep(&tmp, "-"); + mode = strsep(&tmp, "-"); + + if (tmp) + DMWARN("dm-crypt: Unexpected additional cipher options"); + + if (!mode || strcmp(mode, "cbc") == 0) + crypto_flags = CRYPTO_TFM_MODE_CBC; + else if (strcmp(mode, "ecb") == 0) + crypto_flags = CRYPTO_TFM_MODE_ECB; + else { + ti->error = "dm-crypt: Invalid chaining mode"; + return -EINVAL; + } + + tfm = crypto_alloc_tfm(cipher, crypto_flags); + if (!tfm) { + ti->error = "dm-crypt: Error allocating crypto tfm"; + return -EINVAL; + } + + key_size = strlen(argv[1]) >> 1; + if (tfm->crt_u.cipher.cit_decrypt_iv && tfm->crt_u.cipher.cit_encrypt_iv) + iv_size = max(crypto_tfm_alg_ivsize(tfm), sizeof(u32) / sizeof(u8)); + else + iv_size = 0; + + cc = kmalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL); + if (cc == NULL) { + ti->error = + "dm-crypt: Cannot allocate transparent encryption context"; + crypto_free_tfm(tfm); + return -ENOMEM; + } + + cc->io_pool = mempool_create(MIN_IOS, mempool_alloc_slab, + mempool_free_slab, _io_cache); + if (!cc->io_pool) { + ti->error = "dm-crypt: Cannot allocate crypt io mempool"; + goto bad1; + } + + cc->page_pool = mempool_create(MIN_POOL_PAGES, mempool_alloc_page, + mempool_free_page, NULL); + if (!cc->page_pool) { + ti->error = "dm-crypt: Cannot allocate page mempool"; + goto bad2; + } + + cc->tfm = tfm; + cc->iv_size = iv_size; + cc->key_size = key_size; + if ((key_size == 0 && strcmp(argv[1], "-") != 0) + || crypt_decode_key(cc->key, argv[1], key_size) < 0) { + ti->error = "dm-crypt: Error decoding key"; + goto bad3; + } + + if (tfm->crt_u.cipher.cit_setkey(tfm, cc->key, key_size) < 0) { + ti->error = "dm-crypt: Error setting key"; + goto bad3; + } + + if (sscanf(argv[2], SECTOR_FORMAT, &cc->iv_offset) != 1) { + ti->error = "dm-crypt: Invalid iv_offset sector"; + goto bad3; + } + + if (sscanf(argv[4], SECTOR_FORMAT, &cc->start) != 1) { + ti->error = "dm-crypt: Invalid device sector"; + goto bad3; + } + + if (dm_get_device(ti, argv[3], cc->start, ti->len, + dm_table_get_mode(ti->table), &cc->dev)) { + ti->error = "dm-crypt: Device lookup failed"; + goto bad3; + } + + ti->private = cc; + return 0; + +bad3: + mempool_destroy(cc->page_pool); +bad2: + mempool_destroy(cc->io_pool); +bad1: + crypto_free_tfm(tfm); + kfree(cc); + return -EINVAL; +} + +static void crypt_dtr(struct dm_target *ti) +{ + struct crypt_c *cc = (struct crypt_c *) ti->private; + + mempool_destroy(cc->page_pool); + mempool_destroy(cc->io_pool); + + crypto_free_tfm(cc->tfm); + dm_put_device(ti, cc->dev); + kfree(cc); +} + +static int crypt_endio(struct bio *bio, unsigned int done, int error) +{ + struct crypt_io *io = (struct crypt_io*) bio->bi_private; + struct crypt_c *cc = (struct crypt_c*) io->target->private; + + if (bio_rw(bio) == WRITE) { + /* + * free the processed pages, even if + * it's only a partially completed write + */ + crypt_free_buffer_pages(cc, bio, done); + } + + if (bio->bi_size) + return 1; + + /* + * successful reads get decrypted by the worker thread + * because we never want to decrypt in an irq context + */ + if ((bio_rw(bio) == READ || bio_rw(bio) == READA) + && bio_flagged(bio, BIO_UPTODATE)) { + kcryptd_queue_bio(bio); + dm_daemon_wake(&_kcryptd); + return 0; + } + + bio_put(bio); + dec_pending(io, error); + + return error; +} + +static int crypt_map(struct dm_target *ti, struct bio *bio) +{ + struct crypt_c *cc = (struct crypt_c*) ti->private; + struct crypt_io *io = crypt_alloc_io(cc); + struct bio *clone = NULL; + struct convert_context ctx; + unsigned int remaining = bio->bi_size; + sector_t sector = bio->bi_sector - ti->begin; + int bio_vec_idx = 0; + int r = 0; + + io->target = ti; + io->bio = bio; + io->first_clone = NULL; + io->error = 0; + atomic_set(&io->pending, 1); /* hold a reference */ + + if (bio_rw(bio) == WRITE) + crypt_convert_init(cc, &ctx, NULL, bio, sector, 1); + + /* + * The allocated buffers can be smaller then the whole bio, + * so repeat the whole process until all the data can be handled. + */ + while (remaining) { + if (bio_rw(bio) == WRITE) { + clone = crypt_alloc_buffer(cc, bio->bi_size, + io->first_clone, + &bio_vec_idx); + if (clone) { + ctx.bio_out = clone; + r = crypt_convert(cc, &ctx); + if (r < 0) { + crypt_free_buffer_pages(cc, clone, + clone->bi_size); + bio_put(clone); + goto cleanup; + } + } + } else + clone = bio_clone(bio, GFP_NOIO); + + if (!clone) { + r = -ENOMEM; + goto cleanup; + } + + if (!io->first_clone) { + /* + * hold a reference to the first clone, because it holds + * the bio_vec array and that needs to be released only + * after all other clones are released + */ + bio_get(clone); + io->first_clone = clone; + } + atomic_inc(&io->pending); + + clone->bi_private = io; + clone->bi_end_io = crypt_endio; + clone->bi_bdev = cc->dev->bdev; + clone->bi_sector = cc->start + sector; + clone->bi_rw = bio->bi_rw; + + remaining -= clone->bi_size; + sector += bio_sectors(clone); + + generic_make_request(clone); + } + + /* drop reference, clones could have returned before we reach this */ + dec_pending(io, 0); + return 0; + +cleanup: + if (io->first_clone) { + dec_pending(io, r); + return 0; + } + + /* if no bio has been dispatched yet, we can directly return the error */ + crypt_free_io(cc, io); + return r; +} + +static int crypt_status(struct dm_target *ti, status_type_t type, + char *result, unsigned int maxlen) +{ + struct crypt_c *cc = (struct crypt_c *) ti->private; + char b[BDEVNAME_SIZE]; + const char *cipher; + const char *mode = NULL; + int offset; + + switch (type) { + case STATUSTYPE_INFO: + result[0] = '\0'; + break; + + case STATUSTYPE_TABLE: + cipher = crypto_tfm_alg_name(cc->tfm); + + switch(cc->tfm->crt_u.cipher.cit_mode) { + case CRYPTO_TFM_MODE_CBC: + mode = "cbc"; + break; + case CRYPTO_TFM_MODE_ECB: + mode = "ecb"; + break; + default: + BUG(); + } + + snprintf(result, maxlen, "%s-%s ", cipher, mode); + offset = strlen(result); + + if (cc->key_size > 0) { + if ((maxlen - offset) < ((cc->key_size << 1) + 1)) + return -ENOMEM; + + crypt_encode_key(result + offset, cc->key, cc->key_size); + offset += cc->key_size << 1; + } else { + if (offset >= maxlen) + return -ENOMEM; + result[offset++] = '-'; + } + + snprintf(result + offset, maxlen - offset, " " SECTOR_FORMAT + " %s " SECTOR_FORMAT, cc->iv_offset, + bdevname(cc->dev->bdev, b), cc->start); + break; + } + return 0; +} + +static struct target_type crypt_target = { + .name = "crypt", + .module = THIS_MODULE, + .ctr = crypt_ctr, + .dtr = crypt_dtr, + .map = crypt_map, + .status = crypt_status, +}; + +int __init dm_crypt_init(void) +{ + int r; + + _io_cache = kmem_cache_create("dm-crypt io", sizeof(struct crypt_io), + 0, 0, NULL, NULL); + if (!_io_cache) + return -ENOMEM; + + r = dm_daemon_start(&_kcryptd, "kcryptd", kcryptd); + if (r) { + kmem_cache_destroy(_io_cache); + DMERR("couldn't create kcryptd: %d", r); + } + + r = dm_register_target(&crypt_target); + if (r < 0) { + dm_daemon_stop(&_kcryptd); + kmem_cache_destroy(_io_cache); + DMERR("crypt: register failed %d", r); + } + + return r; +} + +void dm_crypt_exit(void) +{ + int r = dm_unregister_target(&crypt_target); + + if (r < 0) + DMERR("crypt: unregister failed %d", r); + + dm_daemon_stop(&_kcryptd); + kmem_cache_destroy(_io_cache); +} + +/* + * module hooks + */ +module_init(dm_crypt_init) +module_exit(dm_crypt_exit) + +MODULE_AUTHOR("Christophe Saout "); +MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption"); +MODULE_LICENSE("GPL"); diff -Nur linux-2.6.0-test2.orig/drivers/md/dm-daemon.c linux-2.6.0-test2/drivers/md/dm-daemon.c --- linux-2.6.0-test2.orig/drivers/md/dm-daemon.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.6.0-test2/drivers/md/dm-daemon.c 2003-07-31 19:32:37.118879440 +0200 @@ -0,0 +1,104 @@ +/* + * Copyright (C) 2003 Sistina Software + * + * This file is released under the LGPL. + */ + +#include "dm.h" +#include "dm-daemon.h" + +#include +#include + +static int daemon(void *arg) +{ + struct dm_daemon *dd = (struct dm_daemon *) arg; + DECLARE_WAITQUEUE(wq, current); + + daemonize("%s", dd->name); + + atomic_set(&dd->please_die, 0); + + add_wait_queue(&dd->job_queue, &wq); + + down(&dd->run_lock); + up(&dd->start_lock); + + /* + * dd->fn() could do anything, very likely it will + * suspend. So we can't set the state to + * TASK_INTERRUPTIBLE before calling it. In order to + * prevent a race with a waking thread we do this little + * dance with the dd->woken variable. + */ + while (1) { + if (atomic_read(&dd->please_die)) + goto out; + + do { + set_current_state(TASK_RUNNING); + + atomic_set(&dd->woken, 0); + dd->fn(); + + set_current_state(TASK_INTERRUPTIBLE); + } while (atomic_read(&dd->woken)); + + schedule(); + } + + out: + remove_wait_queue(&dd->job_queue, &wq); + up(&dd->run_lock); + return 0; +} + +int dm_daemon_start(struct dm_daemon *dd, const char *name, void (*fn)(void)) +{ + pid_t pid = 0; + + /* + * Initialise the dm_daemon. + */ + dd->fn = fn; + strncpy(dd->name, name, sizeof(dd->name) - 1); + sema_init(&dd->start_lock, 1); + sema_init(&dd->run_lock, 1); + init_waitqueue_head(&dd->job_queue); + + /* + * Start the new thread. + */ + down(&dd->start_lock); + pid = kernel_thread(daemon, dd, CLONE_KERNEL); + if (pid <= 0) { + DMERR("Failed to start %s thread", name); + return -EAGAIN; + } + + /* + * wait for the daemon to up this mutex. + */ + down(&dd->start_lock); + up(&dd->start_lock); + + return 0; +} + +void dm_daemon_stop(struct dm_daemon *dd) +{ + atomic_set(&dd->please_die, 1); + dm_daemon_wake(dd); + down(&dd->run_lock); + up(&dd->run_lock); +} + +void dm_daemon_wake(struct dm_daemon *dd) +{ + atomic_set(&dd->woken, 1); + wake_up_interruptible(&dd->job_queue); +} + +EXPORT_SYMBOL(dm_daemon_start); +EXPORT_SYMBOL(dm_daemon_stop); +EXPORT_SYMBOL(dm_daemon_wake); diff -Nur linux-2.6.0-test2.orig/drivers/md/dm-daemon.h linux-2.6.0-test2/drivers/md/dm-daemon.h --- linux-2.6.0-test2.orig/drivers/md/dm-daemon.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.6.0-test2/drivers/md/dm-daemon.h 2003-07-31 19:32:37.119879288 +0200 @@ -0,0 +1,29 @@ +/* + * Copyright (C) 2003 Sistina Software + * + * This file is released under the LGPL. + */ + +#ifndef DM_DAEMON_H +#define DM_DAEMON_H + +#include +#include + +struct dm_daemon { + void (*fn)(void); + char name[16]; + atomic_t please_die; + struct semaphore start_lock; + struct semaphore run_lock; + + atomic_t woken; + wait_queue_head_t job_queue; +}; + +int dm_daemon_start(struct dm_daemon *dd, const char *name, void (*fn)(void)); +void dm_daemon_stop(struct dm_daemon *dd); +void dm_daemon_wake(struct dm_daemon *dd); +int dm_daemon_running(struct dm_daemon *dd); + +#endif diff -Nur linux-2.6.0-test2.orig/drivers/md/dm.h linux-2.6.0-test2/drivers/md/dm.h --- linux-2.6.0-test2.orig/drivers/md/dm.h 2003-07-31 19:34:25.277436840 +0200 +++ linux-2.6.0-test2/drivers/md/dm.h 2003-07-31 19:32:37.119879288 +0200 @@ -159,7 +159,7 @@ void dm_interface_exit(void); /* - * Targets for linear and striped mappings + * Targets for the mappings */ int dm_linear_init(void); void dm_linear_exit(void); @@ -167,4 +167,9 @@ int dm_stripe_init(void); void dm_stripe_exit(void); +#ifdef DM_TARGET_CRYPT +int dm_crypt_init(void); +void dm_crypt_exit(void); +#endif + #endif diff -Nur linux-2.6.0-test2.orig/drivers/md/Kconfig linux-2.6.0-test2/drivers/md/Kconfig --- linux-2.6.0-test2.orig/drivers/md/Kconfig 2003-07-31 19:34:25.278436688 +0200 +++ linux-2.6.0-test2/drivers/md/Kconfig 2003-07-31 19:32:37.119879288 +0200 @@ -152,5 +152,15 @@ Recent tools use a new version of the ioctl interface, only select this option if you intend using such tools. +config BLK_DM_CRYPT + tristate "dm-crypt target" + depends on BLK_DEV_DM && EXPERIMENTAL + select CRYPTO + ---help--- + This device-mapper target allows you to create a device that + transparently encrypts the data on it. You'll need to activate + the required ciphers in the cryptoapi configuration in order to + be able to use it. + endmenu diff -Nur linux-2.6.0-test2.orig/drivers/md/Makefile linux-2.6.0-test2/drivers/md/Makefile --- linux-2.6.0-test2.orig/drivers/md/Makefile 2003-07-31 19:34:25.278436688 +0200 +++ linux-2.6.0-test2/drivers/md/Makefile 2003-07-31 19:32:37.120879136 +0200 @@ -3,7 +3,7 @@ # dm-mod-objs := dm.o dm-table.o dm-target.o dm-linear.o dm-stripe.o \ - dm-ioctl.o + dm-ioctl.o dm-daemon.o # Note: link order is important. All raid personalities # and xor.o must come before md.o, as they each initialise @@ -17,3 +17,4 @@ obj-$(CONFIG_MD_MULTIPATH) += multipath.o obj-$(CONFIG_BLK_DEV_MD) += md.o obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o +obj-$(CONFIG_BLK_DM_CRYPT) += dm-crypt.o