All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH RESEND 1/1] crypto API: RSA algorithm patch (kernel version 2.6.20.1)
@ 2007-03-19 16:22 Tasos Parisinos
  2007-03-19 22:58 ` Matt Mackall
  2007-03-20  0:40 ` Francois Romieu
  0 siblings, 2 replies; 26+ messages in thread
From: Tasos Parisinos @ 2007-03-19 16:22 UTC (permalink / raw)
  To: herbert; +Cc: linux-kernel

From: Tasos Parisinos <t.parisinos@sciensis.com>

This patch changes the crypto/Kconfig and crypto/Makefile and adds 
crypto/rsa.c and crypto/rsa.h in the source tree. These files add module 
rsa.o (or rsa.ko) in the
kernel (built-in or as a kernel module) and offer an API to do fast 
modular exponentiation. The modular exponentiation algorithm is in fact 
implementation of the
Montgomery algorithm. The API can also be used to do some multi 
precision arithmetics as well (multiplication, addition, subtraction, 
remainder) and can serve as
a basis for future multi-precision arithmetics implementations. The 
module is totally written in C, thus being portable but less efficient 
than any assembly implementation,
but the code architecture is such that one could replace C code with 
assembly easily in the future. The module does not have a userland 
interface yet, so it can
be only used by other in-kernel code. As mentioned in the subject this 
patch applies in kernel version 2.6.20.1.
Signed-off-by: Tasos Parisinos <t.parisinos@sciensis.com>

-

---

diff -uprN -X linux-2.6.20.1-vanilla/Documentation/dontdiff 
linux-2.6.20.1/crypto/Kconfig linux-2.6.20.1-vanilla/crypto/Kconfig
--- linux-2.6.20.1/crypto/Kconfig    2007-02-20 08:34:32.000000000 +0200
+++ linux-2.6.20.1-vanilla/crypto/Kconfig    2007-03-11 
14:12:24.000000000 +0200
@@ -458,6 +458,46 @@ config CRYPTO_CRC32C
       See Castagnoli93.  This implementation uses lib/libcrc32c.
           Module will be crc32c.
 
+config CRYPTO_RSA
+    tristate "RSA cipher algorithm"
+    depends on CRYPTO
+    help
+      The famous RSA asymmetric cipher algorithm. This may be used 
in-kernel  
+      (no userland interface yet) to compute modular exponentiation.  
It can
+      be also used to do some multi-precision arithmetics.
+     
+      If it is selected it will add approximately 8K to the kernel size.
+      Select M to build this driver as a module.
+      If unsure say N.
+
+config CRYPTO_RSA_DEBUG
+    bool "Debugging capabilities"
+    depends on CRYPTO_RSA
+    help
+        This adds lots of debugging information and debugging 
capabilities in
+        the rsa module. It also adds approximately 1 more K to the kernel.
+
+config RSA_AUXCOUNT
+    int "Initial preallocated mpi pool size"
+    default "8"
+    depends on CRYPTO_RSA
+    help
+      The rsa module needs some preallocated space to avoid 
computation-time
+      allocations. The 'mpi' is the struct used by the rsa module to 
hold  a
+      multi-precision integer, so this setting is the number of mpi's 
alloca
+      ted at module load time.
+
+config RSA_AUXSIZE
+    int "Initial preallocated mpi limb size"
+    default "128"
+    depends on CRYPTO_RSA && RSA_AUXCOUNT!="0"
+    help
+      The rsa module needs some preallocated space to avoid 
computation-time
+      allocations. The 'mpi' is the struct used by the rsa module to 
hold  a
+      multi-precision  integer. This struct maps a number on multiple 
32 bit
+      limbs (it is actually a 32 bit array).Here you select the default 
size
+      (in limbs) of the preallocated mpis.
+
 config CRYPTO_TEST
     tristate "Testing module"
     depends on m
diff -uprN -X linux-2.6.20.1-vanilla/Documentation/dontdiff 
linux-2.6.20.1/crypto/Makefile linux-2.6.20.1-vanilla/crypto/Makefile
--- linux-2.6.20.1/crypto/Makefile    2007-02-20 08:34:32.000000000 +0200
+++ linux-2.6.20.1-vanilla/crypto/Makefile    2007-03-11 
14:17:22.000000000 +0200
@@ -2,6 +2,11 @@
 # Cryptographic API
 #
 
+ifeq ($(CRYPTO_RSA_DEBUG),y)
+    EXTRA_CFLAGS += -DRSA_DEBUG
+endif
+EXTRA_CFLAGS += -DRSA_HAVE_MPI_PRINT -Wno-unused-function
+
 obj-$(CONFIG_CRYPTO) += api.o scatterwalk.o cipher.o digest.o compress.o
 
 crypto_algapi-$(CONFIG_PROC_FS) += proc.o
@@ -43,5 +48,6 @@ obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o
 obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o
 obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
 obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o
+obj-$(CONFIG_CRYPTO_RSA) += rsa.o
 
 obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
diff -uprN -X linux-2.6.20.1-vanilla/Documentation/dontdiff 
linux-2.6.20.1/crypto/rsa.c linux-2.6.20.1-vanilla/crypto/rsa.c
--- linux-2.6.20.1/crypto/rsa.c    1970-01-01 02:00:00.000000000 +0200
+++ linux-2.6.20.1-vanilla/crypto/rsa.c    2007-03-19 16:45:21.000000000 
+0200
@@ -0,0 +1,884 @@
+/*                                                                        
+ *
+ * Cryptographic API
+ 
*                                                                             

+ * RSA cipher algorithm implementation
+ *          
+ * Copyright (c) Tasos Parisinos <t.parisinos@sciensis.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *                                                                 
+ */  
+
+#include "rsa.h"
+
+static mpi *    aux[RSA_AUX_COUNT];
+static _u32     modinv = 0;
+
+static inline _i32 rsa_max(_i32 x, _i32 y)
+{
+    return (x > y)? x: y;
+}
+
+/*
+ * Module loading callback function
+ *
+ * Returns 0 on success or a negative value indicating error
+ */
+static _err __init rsa_load(void)
+{
+    _u32 i;
+    _err retval = RSA_NO_ERR;
+   
+    /* Pre-allocate some auxilliary mpis */
+    rsa_echo("Preallocating %lu bytes for auxilliary operands\n",
+         RSA_AUX_SIZE * RSA_AUX_COUNT * sizeof(_u32));
+   
+    memset(&aux, 0, sizeof(aux));
+    for(i = 0; i < RSA_AUX_COUNT; i++) {
+        retval = rsa_mpi_alloc(&aux[i], RSA_AUX_SIZE);
+        if(retval < 0)
+            goto rollback;
+    }
+           
+    rsa_echo("RSA cipher algorithm module initialized\n");
+    return RSA_NO_ERR;
+
+/* Free all allocated resources if any errors occur */
+rollback:
+    for(i = 0; i < RSA_AUX_COUNT; i++)
+        rsa_mpi_free(aux[i]);
+    return retval;
+}
+
+/*
+ * Module unloading callback function
+ */
+static void __exit rsa_unload(void)
+{
+    _u32 i;
+   
+    /* Free all the pre-allocated auxilliary mpis */
+    for(i = 0; i < RSA_AUX_COUNT; i++)
+        rsa_mpi_free(aux[i]);
+    rsa_echo("RSA cipher algorithm module unloaded\n");
+}
+
+/*
+ * Preallocate an mpi. The allocated mpi will be all-zeroed and not
+ * canonicalized.
+ *
+ * Returns 0 on success or a negative value indicating error
+ *
+ * @n:       pointer pointer to the allocated mpi
+ * @limbs: number of allocated limbs (32 bit digits)
+ */
+static _err rsa_mpi_alloc(mpi ** n, _i32 limbs)
+{
+    mpi * handle;
+       
+    *n = NULL;
+    if(!limbs)
+        return RSA_ERR_INVARG;
+   
+    /* Allocate space for the mpi */
+    handle = *n = kmalloc(sizeof(mpi), GFP_KERNEL);
+    if(!handle) {
+        rsa_debug("%s: kmalloc failed\n", __FUNCTION__);
+        return RSA_ERR_NOMEM;
+    }
+   
+    handle->data = kzalloc(limbs * sizeof(_u32), GFP_KERNEL);        
+    if(!handle->data) {
+        kfree(handle);
+        *n = NULL;
+        rsa_debug("%s: kzalloc failed\n", __FUNCTION__);
+        return RSA_ERR_NOMEM;   
+    }
+   
+    handle->sign = 0;
+    handle->size = handle->limbs = limbs;
+    return RSA_NO_ERR;
+}
+
+/*
+ * Free the resources held by an mpi
+ */
+static void rsa_mpi_free(mpi * n)
+{
+    if(!n)
+        return;
+    kfree(n->data);
+    kfree(n);
+}
+
+/*
+ * Initialize an mpi given its hex (absolute) value. The optional leading
+ * zeroes will be taken into account, so that the mpi created will not be
+ * canonicalized
+ *
+ * Returns 0 on success or a negative value indicating error
+ *
+ * @n:      pointer pointer to the allocated mpi
+ * @str:  hex data
+ * @size: sizeof(str)
+ * @xtra: how many extra limbs to preallocate to avoid reallocations
+ */
+static _err rsa_mpi_init(mpi **    n, _u8 * str, _u32 size, _u32 xtra)
+{
+    _i32 i, j;
+    _u32 * buf;
+    _i32 s;
+    _err retval;
+       
+    *n = NULL;
+    if(!size && !xtra) {
+        rsa_debug("%s: invalid initialization string\n", __FUNCTION__);
+        return RSA_ERR_INVARG;
+    }
+   
+    /* Allocate space for the mpi and its data */
+    s = (size / 4) + ((size % 4)? 1: 0);
+    retval = rsa_mpi_alloc(n, s + xtra);
+    if(retval < 0)
+        return retval;
+   
+    (*n)->size = s;
+    buf = (*n)->data;
+       
+    /* Copy the data */
+    for(i = size - 1, j = 0; i >= 0; i--, j++)
+        buf[j / 4] |= ((_u32)str[i] << ((j % 4) * 8));
+    return RSA_NO_ERR;
+}
+
+/*
+ * Resize an mpi, doing all the needed re-allocations
+ *
+ * Returns 0 on success or a negative value indicating error
+ *
+ * @n:     pointer pointer to the allocated mpi
+ * @size: the new size
+ * @hold: true to keep the current data
+ */
+static _err rsa_mpi_resize(mpi ** n, _i32 size, _u8 hold)
+{
+    _err retval;
+    mpi * handle = *n;
+   
+    /* If there is an mpi passed in, that has the available limbs */
+    if(handle && handle->limbs >= size) {   
+         _i32 i, s;
+         _u32 * buf;
+       
+        s = handle->size;
+        buf = handle->data;
+       
+        /* If the original data are not needed they are zeroed */
+        if(!hold) {   
+            for(i = 0; i < s; i++)
+                buf[i] = 0;
+            handle->sign = 0;
+        }
+        /* Zero the xtra limbs */
+        else if(size < handle->size)
+            for(i = size; i < s; i++)
+                buf[i] = 0;
+               
+        handle->size = size;
+    }
+    /* If there is an mpi passed in, that doesn't have the available
+     * limbs already allocated
+     */
+    else if(handle) {
+        mpi * tmp = NULL;
+       
+        retval = rsa_mpi_alloc(&tmp, size);
+        if(retval < 0)
+            return retval;
+       
+        /* Copy the original data if they are needed */
+        if(hold) {
+            memcpy(tmp->data, handle->data, handle->size * 
sizeof(_u32));   
+            tmp->sign = handle->sign;
+        }
+       
+        rsa_mpi_free(*n);
+        *n = tmp;
+        return retval;
+    }
+    /* If there is no allocated mpi passed in, allocate one */
+    else if(!handle) {
+        retval = rsa_mpi_alloc(n, size);
+        if(retval < 0)
+            return retval;
+    }
+   
+    return RSA_NO_ERR;
+}
+
+/*
+ * Set the value of an mpi given its hex (absolute) value. The optional
+ * leading zeroes will be taken into account, so that the mpi created will
+ * not be canonicalized. The mpi passed in will be re-allocated (and 
relocated)
+ * if needeed
+ *
+ * Returns 0 on success or a negative value indicating error
+ *
+ * @n:     pointer pointer to the allocated mpi
+ * @str:  hex data
+ * @size: sizeof(str)
+ */
+static _err rsa_mpi_set(mpi ** n, _u8 * str, _u32 size)
+{
+    _i32 s, i, j;
+    _u32 * buf;
+    _err retval;
+   
+    if(!size) {
+        rsa_debug("%s: invalid initialization string\n", __FUNCTION__);
+        return RSA_ERR_INVARG;
+    }
+   
+    /* Size of the new mpi value (in limbs) */
+    s = (size / 4) + ((size % 4)? 1: 0);
+    retval = rsa_mpi_resize(n, s, false);
+    if(retval < 0)
+        return retval;
+   
+    /* Copy the data */
+    buf = (*n)->data;
+    for(i = size - 1, j = 0; i >= 0; i--, j++)
+        buf[j / 4] |= ((_u32)str[i] << ((j % 4) * 8));
+    return RSA_NO_ERR;
+}
+
+/*
+ * Mpi to mpi copy
+ *
+ * Returns 0 on success or a negative value indicating error
+ *
+ * @dest: destination mpi
+ * @src:  source mpi
+ */
+static inline _err rsa_mpi_copy(mpi **    dest, mpi * src)
+{
+    _i32 i, s;
+    _u32 * destbuf, * srcbuf;
+    _err retval;
+   
+    retval = rsa_mpi_resize(dest, src->size, false);
+    if(retval < 0)
+        return retval;
+   
+    (*dest)->sign = src->sign;
+    destbuf = (*dest)->data;
+    srcbuf = src->data;
+    for(i = 0, s = src->size; i < s; i++)
+        destbuf[i] = srcbuf[i];
+    return RSA_NO_ERR;
+}
+
+/*
+ * Print the value of an mpi
+ *
+ * @n:     pointer to the mpi
+ * @how:  true to print canonicalized
+ */
+static void rsa_mpi_print(mpi *    n, _u8 how)
+{
+#ifdef RSA_HAVE_MPI_PRINT
+    _i32 i, j;
+    _u32 limb;
+    _u8 byte, started = false;
+
+    rsa_echo("Mpi at 0x%x, %d limbs in size, %d limbs allocated, value 
= ",
+         (_u32)n, n->size, n->limbs);
+   
+    /* If the mpi is merely zero */
+    if(rsa_mpi_iszero(n) && how) {
+        printk("0\n");
+        return;
+    }
+       
+    /* Print the sign */
+    printk("%s", (n->sign)? "-": " ");
+   
+    /* Print the hex value */
+    for(i = n->size - 1; i >= 0; i--) {
+        limb = n->data[i];
+       
+        /* Ignore leading zero limbs if canonicalized printing is 
selected */
+        if(!limb && !started && how)
+            continue;
+       
+        /* Print each limb as though each of its nibbles was a 
character from
+         * the set '0' to '9' and 'a' to 'f'
+         */
+        for(j = 28; j >= 0; j -= 4) {
+            byte = (_u8)((limb >> j) & 0x0F);
+           
+            /* Ignore leading zero bytes if canonicalized printing is 
selected */
+            if(!byte && !started && how)
+                continue;
+           
+            started = true;   
+            byte += (byte <= 0x09)? '0': 'a' - 0x0A;
+            printk("%c", byte);
+        }
+    }
+   
+    printk("\n");
+#endif
+}
+
+/*
+ * Check if an mpi is zero
+ *
+ * Returns true if it is, false otherwise
+ *
+ * @n: the mpi
+ */
+static inline _u8 rsa_mpi_iszero(mpi * n)
+{
+    _i32 i, s;
+    _u32 * buf;
+   
+    s = n->size;
+    buf = n->data;
+    for(i = 0; i < s; i++)
+        if(buf[i])
+            return false;
+    return true;
+}
+
+/*
+ * Compare two mpis
+ *
+ * Returns -1 if a < b, 1 if b < a, 0 otherwise
+ *
+ * @a: the left operand
+ * @b: the right operand
+ */
+static _i8 rsa_mpi_compare(mpi * a, mpi * b)
+{
+    _i32 i, j;
+    _u32 * abuf, * bbuf;
+   
+    /* Compare the two mpis based on sign */
+    if(a->sign != b->sign)
+        return (a->sign)? -1: 1;
+   
+    /* Compare the two mpis based on their size */
+    if(a->size > b->size && rsa_mpi_clz(a) < (a->size - b->size) * 32)
+        return 1;
+    else if(a->size > b->size)
+        j = b->size;
+    else if(b->size > a->size && rsa_mpi_clz(b) < (b->size - a->size) * 32)
+        return -1;
+    else
+        j = a->size;
+   
+    /* Compare the two mpis based on their hex values */
+    abuf = a->data;
+    bbuf = b->data;
+    for(i = j - 1; i >= 0; i--)
+        if(abuf[i] > bbuf[i])
+            return 1;
+        else if(abuf[i] < bbuf[i])
+            return -1;
+
+    return 0;
+}
+
+/*
+ * Count leading zeroes
+ *
+ * Returns the number of leading zero bits
+ *
+ * @n: the mpi
+ */
+static _u64 rsa_mpi_clz( mpi * n)
+{
+    _u64 retval = 0;
+    _i32 i;
+    _u32 limb;
+   
+    for(i = n->size - 1; i >= 0; i--) {
+        limb = n->data[i];
+       
+        if(!limb) {
+            retval += 32;
+            continue;
+        }
+       
+        while(!(limb & 0x80000000)) {
+            retval++;
+            limb = limb << 1;
+        }
+       
+        break;
+    }
+   
+    return retval;
+}
+
+/*
+ * Complement an mpi (either 1's or 2's complement) ignoring overflow
+ *
+ * @n:       the mpi
+ * @which: true to compute 2's complement
+ */
+static inline void rsa_mpi_complement(mpi * n, _u8 which)
+{
+    _i32 i, s;
+    _u32 * buf;
+   
+    s = n->size;
+    buf = n->data;
+    for(i = 0; i < s; i++)
+        buf[i] ^= RSA_MAX_U32;
+    if(!which)
+        return;   
+   
+    /* Add 1 using the addition carry */
+    for(i = 0; i < s; i++) {
+        buf[i] += 1;
+        if(buf[i])
+            break;
+    }
+}
+
+/*
+ * Ignore any leading zero limbs of an mpi
+ *
+ * @n: the mpi
+ */
+static inline void rsa_mpi_canonicalize(mpi * n)
+{
+    _i32 i;
+    _u32 * buf = n->data;
+   
+    for(i = n->size - 1; i >= 0; i--)
+        if(!buf[i] && n->size > 1)
+            n->size--;
+        else
+            break;
+}
+
+/*
+ * Shift a number both directions
+ *
+ * Returns 0 on success or a negative value indicating error
+ *
+ * @n:       pointer pointer to the allocated mpi
+ * @bits: shift to the right if positive, otherwise shift to the left
+ */
+static _err rsa_mpi_shift(mpi ** n, _i64 bits)
+{
+    _i32 i, distance, size, lz;
+    _u32 * buf;
+    mpi * handle;
+    _err retval;
+   
+    handle = *n;
+    if(!bits || rsa_mpi_iszero(handle))
+        return RSA_NO_ERR;
+       
+    /* Shifting to the right, no resize needed */
+    if(bits > 0) {
+        /* Drop off one limb for each 32 bit shift */
+        distance = bits / 32;
+        size = handle->size;
+        buf = handle->data;
+        for(i = 0; i < size; i++)
+            buf[i] = (i + distance >= size)? 0x00: buf[i + distance];
+       
+        /* Shift the remaining 'bits' mod 32 */
+        bits = bits % 32;
+        if(bits) {
+            size -= distance;
+            distance = 32 - bits;
+            for(i = 0; i < size; i++) {
+                buf[i] = buf[i] >> bits;
+                if(i < size - 1)
+                    buf[i] |=  buf[i + 1] << distance;
+            }   
+        }
+       
+        rsa_mpi_canonicalize(handle);
+        return RSA_NO_ERR;
+    }   
+   
+    bits = -bits;
+    lz = rsa_mpi_clz(handle) + (handle->limbs - handle->size) * 32;
+       
+    /* Shifting to the left
+     * Reallocation is needed when the leading zeroes are less than the 
shift
+     * distance
+     */
+    if(lz < bits) {
+        /* Compute the size of the reallocation */
+        size = bits - lz;
+        size = (size / 32) + (size % 32 != 0);
+        retval = rsa_mpi_resize(n, handle->limbs + size, true);
+        if(retval < 0)
+            return retval;
+        handle = *n;
+    }
+    else {
+        size = bits - rsa_mpi_clz(handle);
+        handle->size += ((size / 32) + (size % 32 != 0));
+    }
+   
+    buf = handle->data;   
+    distance = bits / 32;
+    /* Shift data 1 byte to the left for each 32 bit shift */
+    if(distance) {
+        /* Shift bytes */
+        for(i = handle->size - distance - 1; i >= 0; i--)
+            buf[i + distance] = buf[i];
+        /* Zero the shifted in bytes */
+        for(i = 0; i < distance; i++)
+            buf[i] = 0;
+    }
+   
+    /* Shift the remaining 'bits' mod 32 */
+    bits = bits % 32;
+    distance = 32 - bits;
+    if(bits)
+        for(i = handle->size - 1; i >= 0; i--) {
+            buf[i] = buf[i] << bits;
+            if(i > 0)
+                buf[i] |= (buf[i - 1] >> distance);
+        }
+   
+    return RSA_NO_ERR;
+}
+
+/*
+ * Multiply two mpis
+ *
+ * Returns 0 on success or a negative value indicating error
+ *
+ * @res: pointer pointer to the result
+ * @a:     left operand
+ * @b:   right operand
+ */
+static _err rsa_mpi_multiply(mpi ** res, mpi *    a, mpi * b)
+{
+    _i32 i, j, size, asize, bsize;
+    _u32 * buf, * abuf, * bbuf;
+    _u64 tmp;
+    mpi * handle;
+    _err retval;
+   
+    asize = a->size;
+    bsize = b->size;
+    size = asize + bsize;
+    retval = rsa_mpi_resize(res, size, false);
+    if(retval < 0)
+        return retval;
+       
+    handle = *res;
+    handle->sign = a->sign ^ b->sign;
+   
+    buf = handle->data;
+    abuf = a->data;
+    bbuf = b->data;   
+    /* Make the multiplication, using the standard algorithm */
+    for(i = 0; i < bsize; i++) {
+        tmp = 0;
+        for(j = 0; j < asize; j++)
+            buf[i + j] = tmp = buf[i + j] + (abuf[j] * (_u64)bbuf[i]) + 
(tmp >> 32);
+        buf[i + asize] = tmp >> 32;
+    }
+   
+    rsa_mpi_canonicalize(handle);
+    return RSA_NO_ERR;
+}
+
+/*
+ * Subtract two mpis
+ *
+ * Returns 0 on success or a negative value indicating error
+ *
+ * @res: pointer pointer to the result
+ * @a:     left operand
+ * @b:   right operand
+ */
+static _err rsa_mpi_subtract(mpi ** res, mpi * a, mpi *    b)
+{
+    _u32 * buf, * abuf, * bbuf;
+    _i32 i, size;
+    mpi * handle;
+    _err retval;
+   
+    size = rsa_max(a->size, b->size) + (a->sign != b->sign);
+    if((retval = rsa_mpi_resize(res, size, true)) < 0 ||
+       (retval = rsa_mpi_resize(&a, size, true)) < 0  ||
+       (retval = rsa_mpi_resize(&b, size, true)) < 0)
+        return retval;
+   
+    handle = *res;
+    buf = handle->data;
+    abuf = a->data;
+    bbuf = b->data;
+   
+    /* If the operands are both negative or positive we perform 
subtraction */
+    if(a->sign == b->sign) {
+         _u8 borrow = false;
+         _u32 limb;
+               
+        for(i = 0; i < size; i++) {
+            limb = borrow + bbuf[i];   
+            buf[i] = abuf[i] - limb;
+            borrow = (abuf[i] < limb);
+        }
+       
+        handle->sign = (borrow)? !b->sign: b->sign;
+        if(borrow)
+            rsa_mpi_complement(handle, true);
+    }
+    /* If the operands are not signed in the same way we perform 
addition */
+    else {
+         _u8 carry = false;
+         _u64 sum;
+       
+        for(i = 0; i < size; i++) {
+            buf[i] = sum = abuf[i] + bbuf[i] + carry;
+            carry = (sum > RSA_MAX_U32);   
+        }
+       
+        handle->sign = a->sign;
+    }
+   
+    rsa_mpi_canonicalize(handle);
+    rsa_mpi_canonicalize(a);
+    rsa_mpi_canonicalize(b);
+    return RSA_NO_ERR;
+}
+
+/*
+ * Compute the remainder of a/b (aka a mod b)
+ *
+ * Returns 0 on success or a negative value indicating error
+ *
+ * @res: pointer pointer to the result
+ * @a:     left operand
+ * @b:   right operand
+ */
+static _err rsa_mpi_remainder(mpi ** res, mpi * a, mpi * b)
+{
+    _i32 i, k;
+    _err retval;
+   
+    /* Because b operand will be shifted we need to have a copy of it in
+     * order not to mess with the prototype b
+     */
+    if((retval = rsa_mpi_copy(&aux[0], a)) < 0 ||
+       (retval = rsa_mpi_copy(&aux[1], b)) < 0)
+        return retval;
+   
+    k = (aux[0]->size - aux[1]->size) * 32;   
+    k += (rsa_mpi_clz(aux[1]) - rsa_mpi_clz(aux[0]));
+   
+    /* Align the divisor to the divident */
+    retval = rsa_mpi_shift(&aux[1], -k);
+    if(retval < 0)
+        return retval;
+   
+    for(i = 0; i <= k; i++) {
+        retval = rsa_mpi_subtract(res, aux[0], aux[1]);
+        if(retval < 0)
+            return retval;
+           
+        if(!(*res)->sign) {
+            retval = rsa_mpi_copy(&aux[0], (*res));
+            if(retval < 0)
+                return retval;
+        }
+       
+        retval = rsa_mpi_shift(&aux[1], 1);
+        if(retval < 0)
+            return retval;   
+    }
+   
+    rsa_mpi_canonicalize(aux[0]);
+    return rsa_mpi_copy(res, aux[0]);
+}
+
+/*
+ * Compute the first 32 bits of the modular inverse of an mpi
+ *
+ * Returns 0 on success or a negative value indicating error
+ *
+ * @n: the mpi
+ */
+static _u32 rsa_modinv(mpi * n)
+{
+    _u32 i, x, y, tmp, pow1;
+   
+    pow1 = y = 1;
+    x = n->data[0];
+   
+    for(i = 2; i <= RSA_RADIX_BITS; i++) {
+        pow1 = pow1 << 1;
+        tmp = ((_i64)x * y) & (RSA_MAX_U32 >> (32 - i));
+        if(pow1 < tmp)
+            y += pow1;
+    }
+
+    y = (y ^ RSA_MAX_U32) + 1;
+    return y;
+}
+
+/*
+ * Compute the montgomery product (res = a * b mod n) using single 
precision
+ * modulus modular inverse
+ *
+ * Returns 0 on success or a negative value indicating error
+ *
+ * @res: pointer pointer to the result
+ * @a:     left operand
+ * @b:   right operand
+ * @n:     divisor
+ */
+static _err rsa_monpro(mpi ** res, mpi * a, mpi * b, mpi * n)
+{
+    _i32 nsize, i, j, k;
+    _u32 * buf, * nbuf, * tmp, m;
+    _u64 product = 0;
+    _err retval;
+       
+    nsize = n->size;
+    k = nsize << 1;
+    retval = rsa_mpi_multiply(&aux[2], a, b);
+    if(retval < 0)
+        return retval;
+    retval = rsa_mpi_resize(&aux[2], rsa_max(aux[2]->size, k), true);
+    if(retval < 0)
+        return retval;
+       
+    tmp = buf = aux[2]->data;
+    nbuf = n->data;
+   
+    for(i = 0; i < nsize; i++, tmp++) {
+        m = buf[i] * modinv;
+        product = 0;   
+       
+        for(j = 0; j < nsize; j++)
+            tmp[j] = product = tmp[j] + (m * (_u64)nbuf[j]) + (product 
 >> 32);
+       
+        for(j = nsize + i; j < k; j++)
+            buf[j] = product = buf[j] + (product >> 32);
+    }
+
+    retval = rsa_mpi_resize(&aux[2], aux[2]->size + 1, true);
+    if(retval < 0)
+        return retval;
+    aux[2]->data[aux[2]->size - 1] = product >> 32;
+    retval = rsa_mpi_shift(&aux[2], nsize * 32);
+    if(retval < 0)
+        return retval;
+   
+    if(rsa_mpi_compare(aux[2], n) >= 0) {
+        if((retval = rsa_mpi_subtract(&aux[3], aux[2], n)) < 0 ||
+           (retval = rsa_mpi_copy(res, aux[3])) < 0)
+            return retval;
+    }   
+    else if((retval = rsa_mpi_copy(res, aux[2])) < 0)
+        return retval;
+    return RSA_NO_ERR;
+}
+
+/*
+ * Computes the RSA of m, a.k.a res = m ^ e mod n
+ *
+ * Returns 0 on success or a negative value indicating error
+ *
+ * @res: pointer pointer to the result
+ * @m:     base
+ * @e:   exponent
+ * @n:     divisor
+ */
+static _err rsa_modexp(mpi ** res, mpi * m, mpi * e, mpi * n)
+{
+    _i32 i, j;
+    _u32 limb;
+    _u8 started = false;
+    _err retval;
+   
+    if(m->size != n->size || rsa_mpi_compare(m, n) > 0)
+        return RSA_ERR_INVARG;
+
+    modinv = rsa_modinv(n);
+   
+    /* Compute m * r mod n where r is 2 ^ k and n is the k-bit modulus
+     * The resulting m' is stored in aux[4]
+     */
+    if((retval = rsa_mpi_copy(&aux[5], m)) < 0)
+        return retval;
+    if((retval = rsa_mpi_shift(&aux[5], -(n->size * 32))) < 0)
+        return retval;
+    if((retval = rsa_mpi_remainder(&aux[4], aux[5], n)) < 0)
+        return retval;
+   
+    /* Compute r mod n where r is 2 ^ k and n is the k-bit modulus
+     * The resulting x' is stored in aux[7]
+     */
+    if((retval = rsa_mpi_set(&aux[5], "\x01", 1)) < 0)
+        return retval;
+    if((retval = rsa_mpi_shift(&aux[5], -(n->size * 32))) < 0)
+        return retval;
+    if((retval = rsa_mpi_remainder(&aux[7], aux[5], n)) < 0)
+        return retval;
+
+    /* Canonicalize the exponent and compute the modular exponentiation.
+     * For each limb of the exponent perform left to right binary 
exponentiation
+     */
+    rsa_mpi_canonicalize(e);
+    for(i = e->size - 1; i >= 0; i--) {
+        /* Take a limb of the exponent */
+        limb = e->data[i];
+
+        /* For each of its bits */
+        for(j = 0; j < 32; j++) {
+            /* While the exponent has non significant zeroes,
+             * shift it to the left
+             */
+            if(!(limb & 0x80000000) && !started) {
+                limb = limb << 1;
+                continue;
+            }
+
+            started = true;   
+            /* Compute x' * x' mod n */
+            retval = rsa_monpro(&aux[5], aux[7], aux[7], n);
+            if(retval < 0)
+                return retval;
+
+            if(limb & 0x80000000) {
+                /* Compute x' = m' * x' mod n */
+                retval = rsa_monpro(&aux[7], aux[4], aux[5], n);
+                if(retval < 0)
+                    return retval;
+            }
+            else if((retval = rsa_mpi_copy(&aux[7], aux[5])) < 0)
+                return retval;
+
+            limb = limb << 1;
+        }
+    }
+
+    /* Compute res = x' mod n */
+    retval = rsa_mpi_set(&aux[6], "\x01", 1);
+    if(retval < 0)
+        return retval;
+    return rsa_monpro(res, aux[7], aux[6], n);
+}
+
+module_init(rsa_load);
+module_exit(rsa_unload);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tasos Parisinos @ Sciensis Advanced Technology Systems");
+MODULE_DESCRIPTION("RSA cipher algorithm implementation");
diff -uprN -X linux-2.6.20.1-vanilla/Documentation/dontdiff 
linux-2.6.20.1/crypto/rsa.h linux-2.6.20.1-vanilla/crypto/rsa.h
--- linux-2.6.20.1/crypto/rsa.h    1970-01-01 02:00:00.000000000 +0200
+++ linux-2.6.20.1-vanilla/crypto/rsa.h    2007-03-19 16:29:34.000000000 
+0200
@@ -0,0 +1,86 @@
+#ifndef _KM_RSA_H_
+#define _KM_RSA_H_
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <config/rsa/auxcount.h>
+#include <config/rsa/auxsize.h>
+
+#define RSA_AUX_COUNT         CONFIG_RSA_AUXCOUNT
+#define RSA_AUX_SIZE         CONFIG_RSA_AUXSIZE
+#define RSA_MAX_U32        0xFFFFFFFF
+#define RSA_RADIX_BITS        0x20
+#define RSA_LOGLEVEL        KERN_ALERT
+#define RSA_NO_ERR        0
+#define RSA_ERR_INVARG        -1
+#define RSA_ERR_NOMEM        -2
+
+#define true            0x01
+#define false            0x00
+
+#ifdef RSA_DEBUG
+    #define rsa_debug(fmt, args...)                \
+    {                                \
+        if(printk_ratelimit())                    \
+            printk(RSA_LOGLEVEL "rsa: " fmt, ## args); \
+    }
+#else
+    #define rsa_debug(fmt, args...)
+#endif
+
+#define rsa_echo(fmt, args...) printk(RSA_LOGLEVEL "rsa: " fmt, ## args)
+
+#if RSA_AUX_COUNT < 8
+    #error "Rsa module needs at least 8 auxilliary mpis"
+#endif
+
+typedef signed char         _i8;
+typedef signed short        _i16;
+typedef signed int         _i32;
+typedef signed long long    _i64;
+typedef unsigned char         _u8;
+typedef unsigned short        _u16;
+typedef unsigned int         _u32;
+typedef unsigned long long    _u64;
+typedef    signed int        _err;
+
+/* Multi-precision integer */
+typedef struct mpi {
+    _u32    * data;    /* _u32 array holding the number absolute value */
+    _u8    sign;    /* 1 for negative, 0 for positive */
+    _i32    size;    /* Significant number limbs */
+    _i32    limbs;    /* Allocated limbs (sizeof data) */
+} mpi;
+
+/* Module loading/unloading functions */
+static _err __init    rsa_load(void);
+static void __exit    rsa_unload(void);
+
+/* Mpi utility functions */
+static _err        rsa_mpi_alloc(mpi **, _i32);
+static void         rsa_mpi_free(mpi *);
+static _err         rsa_mpi_init(mpi **, _u8 *, _u32, _u32);
+static _err         rsa_mpi_resize(mpi **, _i32, _u8);
+static _err         rsa_mpi_set(mpi **, _u8 *, _u32);
+static inline _err     rsa_mpi_copy(mpi **, mpi *);
+static void         rsa_mpi_print(mpi *, _u8);
+
+/* Mpi comparing and misc functions */
+static inline _u8     rsa_mpi_iszero(mpi *);
+static _i8         rsa_mpi_compare(mpi *, mpi *);
+static _u64         rsa_mpi_clz(mpi *);
+static inline void     rsa_mpi_complement(mpi *, _u8);
+static inline void     rsa_mpi_canonicalize(mpi *);
+
+/* Mpi arithmetic functions */
+static _err        rsa_mpi_shift(mpi **, _i64);
+static _err         rsa_mpi_multiply(mpi **, mpi *, mpi *);
+static _err         rsa_mpi_subtract(mpi **, mpi *, mpi *);
+static _err         rsa_mpi_remainder(mpi **, mpi *, mpi *);
+
+/* Rsa functions */
+static _u32        rsa_modinv(mpi *);
+static _err         rsa_monpro(mpi **, mpi *, mpi *, mpi *);
+static _err         rsa_modexp(mpi **, mpi *, mpi *, mpi *);
+
+#endif /* _KM_RSA_H_ */


^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH RESEND 1/1] crypto API: RSA algorithm patch (kernel version 2.6.20.1)
  2007-03-19 16:22 [PATCH RESEND 1/1] crypto API: RSA algorithm patch (kernel version 2.6.20.1) Tasos Parisinos
@ 2007-03-19 22:58 ` Matt Mackall
  2007-03-20 14:44   ` Tasos Parisinos
  2007-03-20 15:43   ` Paulo Marques
  2007-03-20  0:40 ` Francois Romieu
  1 sibling, 2 replies; 26+ messages in thread
From: Matt Mackall @ 2007-03-19 22:58 UTC (permalink / raw)
  To: Tasos Parisinos; +Cc: herbert, linux-kernel

On Mon, Mar 19, 2007 at 06:22:15PM +0200, Tasos Parisinos wrote:
> +static inline _i32 rsa_max(_i32 x, _i32 y)
> +{
> +    return (x > y)? x: y;
> +}

We've got a max() already. Use tabs.

> +
> +/*
> + * Module loading callback function
> + *
> + * Returns 0 on success or a negative value indicating error
> + */

This comment is not very useful.

> +static _err __init rsa_load(void)
> +{
> +    _u32 i;

Can we use int and u32 instead of _err and _u32, please?

> +    _err retval = RSA_NO_ERR;

And 0.

> +    /* Pre-allocate some auxilliary mpis */
> +    rsa_echo("Preallocating %lu bytes for auxilliary operands\n",
> +         RSA_AUX_SIZE * RSA_AUX_COUNT * sizeof(_u32));

And printk.

> +    memset(&aux, 0, sizeof(aux));
> +    for(i = 0; i < RSA_AUX_COUNT; i++) {
> +        retval = rsa_mpi_alloc(&aux[i], RSA_AUX_SIZE);

kmalloc, please? RSA_AUX_SIZE appears to be in bytes.

> +        if(retval < 0)

We use "for (" and "if (" so they don't look like function calls.

> +            goto rollback;
> +    }
> +           
> +    rsa_echo("RSA cipher algorithm module initialized\n");
> +    return RSA_NO_ERR;
> +
> +/* Free all allocated resources if any errors occur */
> +rollback:
> +    for(i = 0; i < RSA_AUX_COUNT; i++)
> +        rsa_mpi_free(aux[i]);

kfree()

> +/*
> + * Preallocate an mpi. The allocated mpi will be all-zeroed and not
> + * canonicalized.
> + *
> + * Returns 0 on success or a negative value indicating error
> + *
> + * @n:       pointer pointer to the allocated mpi
> + * @limbs: number of allocated limbs (32 bit digits)
> + */
> +static _err rsa_mpi_alloc(mpi ** n, _i32 limbs)

Kerneldoc style is "function_name - short description". We write
pointers as "mpi **n". These things probably all want to be named
mpi_* rather than rsa_mpi_*, as they're not specific to the RSA algorithm.

> +{
> +    mpi * handle;

And here.

> +        rsa_debug("%s: kzalloc failed\n", __FUNCTION__);

printk.

> +static _err rsa_mpi_init(mpi **    n, _u8 * str, _u32 size, _u32 xtra)

If str is an actual string, use char *str.

> +    /* Allocate space for the mpi and its data */
> +    s = (size / 4) + ((size % 4)? 1: 0);

Uhhh.. (size + 1) / 4?

> +    retval = rsa_mpi_alloc(n, s + xtra);

Is this not in bytes?

> +    /* Copy the data */
> +    for(i = size - 1, j = 0; i >= 0; i--, j++)
> +        buf[j / 4] |= ((_u32)str[i] << ((j % 4) * 8));

Ew.

> +        /* Zero the xtra limbs */
> +        else if(size < handle->size)
> +            for(i = size; i < s; i++)
> +                buf[i] = 0;

memset?

> +        return RSA_ERR_INVARG;

-EINVAL

> +    buf = (*n)->data;
> +    for(i = size - 1, j = 0; i >= 0; i--, j++)
> +        buf[j / 4] |= ((_u32)str[i] << ((j % 4) * 8));

That mess looks familiar.

> +#define RSA_AUX_COUNT         CONFIG_RSA_AUXCOUNT
> +#define RSA_AUX_SIZE         CONFIG_RSA_AUXSIZE

Just use the config value.

> +#define RSA_MAX_U32        0xFFFFFFFF

I'm sure we've got this somewhere.

> +#define RSA_NO_ERR        0
> +#define RSA_ERR_INVARG        -1
> +#define RSA_ERR_NOMEM        -2

0, -EINVAL, -ENOMEM.

> +#define true            0x01
> +#define false            0x00

Ew.

> +/* Mpi utility functions */
> +static _err        rsa_mpi_alloc(mpi **, _i32);
> +static void         rsa_mpi_free(mpi *);
> +static _err         rsa_mpi_init(mpi **, _u8 *, _u32, _u32);
> +static _err         rsa_mpi_resize(mpi **, _i32, _u8);
> +static _err         rsa_mpi_set(mpi **, _u8 *, _u32);
> +static inline _err     rsa_mpi_copy(mpi **, mpi *);
> +static void         rsa_mpi_print(mpi *, _u8);

Why are you declaring a bunch of static functions in a header file?

-- 
Mathematics is the supreme nostalgia of our time.

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH RESEND 1/1] crypto API: RSA algorithm patch (kernel version 2.6.20.1)
  2007-03-19 16:22 [PATCH RESEND 1/1] crypto API: RSA algorithm patch (kernel version 2.6.20.1) Tasos Parisinos
  2007-03-19 22:58 ` Matt Mackall
@ 2007-03-20  0:40 ` Francois Romieu
  2007-03-20 14:11   ` Tasos Parisinos
  1 sibling, 1 reply; 26+ messages in thread
From: Francois Romieu @ 2007-03-20  0:40 UTC (permalink / raw)
  To: Tasos Parisinos; +Cc: herbert, linux-kernel

Tasos Parisinos <t.parisinos@sciensis.com> :
[...]

RSA is slow. syscalls are fast.

Which part of the kernel is supposed to benefit from this code ?

-- 
Ueimor

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH RESEND 1/1] crypto API: RSA algorithm patch (kernel version 2.6.20.1)
  2007-03-20  0:40 ` Francois Romieu
@ 2007-03-20 14:11   ` Tasos Parisinos
  2007-03-20 15:09     ` James Morris
  2007-03-20 21:43     ` Indan Zupancic
  0 siblings, 2 replies; 26+ messages in thread
From: Tasos Parisinos @ 2007-03-20 14:11 UTC (permalink / raw)
  To: Francois Romieu; +Cc: herbert, linux-kernel

Francois Romieu wrote:

> RSA is slow. syscalls are fast.
> 
> Which part of the kernel is supposed to benefit from this code ?

The main purpose behind the development of this module was to create an in-kernel 
system of signed modules. The scenario applies most in embedded systems that are running linux
where the kernel is physically secure but its filesystem is not, so one may tamper executable code.

In such systems we need to detect the tampering in a centralized way (e.g without using hash databases e.t.c). 
So what you need to do is sign the executable (or the library) with a private key and have a kernel that will 
use the public key part to authenticate the executable againts its digital signature.

Of course the signing and the authentication is not done against the whole executable but against its 
secure hash, and this takes milliseconds to complete before loading the code onto memory.
You see, in such a usage scenario most of the time the kernel will spend will be on hashing (sha1 module) and not in 
modular exponentiation. Of course this will not produce any soft lockups

Such a system cannot depend on userland to do the authentication for obvious security reasons, it 
must be in kernel.

Of course sha1 is slow as well but there is an sha1 module for anyone who may need it.
That was my idea, we developed it for our own security needs, why not make it available for
others that may want to use it?

Furthermore one can use the API to do multi-precision arithmetics, if any kernel module may need it

--
Tasos Parisinos


^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH RESEND 1/1] crypto API: RSA algorithm patch (kernel version 2.6.20.1)
  2007-03-19 22:58 ` Matt Mackall
@ 2007-03-20 14:44   ` Tasos Parisinos
  2007-03-20 15:15     ` Matt Mackall
  2007-03-20 15:43   ` Paulo Marques
  1 sibling, 1 reply; 26+ messages in thread
From: Tasos Parisinos @ 2007-03-20 14:44 UTC (permalink / raw)
  To: Matt Mackall; +Cc: herbert, linux-kernel

Thanks for your comments

> On Mon, Mar 19, 2007 at 06:22:15PM +0200, Tasos Parisinos wrote:
>> +static inline _i32 rsa_max(_i32 x, _i32 y)
>> +{
>> +    return (x > y)? x: y;
>> +}
> 
> We've got a max() already. Use tabs.
> 

This is right, will be fixed, just hate discipline

>> +
>> +/*
>> + * Module loading callback function
>> + *
>> + * Returns 0 on success or a negative value indicating error
>> + */
> 
> This comment is not very useful.
> 

Some of them are just bookmarks for me, i can get rid of them

>> +static _err __init rsa_load(void)
>> +{
>> +    _u32 i;
> 
> Can we use int and u32 instead of _err and _u32, please?
> 
>> +    _err retval = RSA_NO_ERR;
> 
> And 0.

right

> 
>> +    /* Pre-allocate some auxilliary mpis */
>> +    rsa_echo("Preallocating %lu bytes for auxilliary operands\n",
>> +         RSA_AUX_SIZE * RSA_AUX_COUNT * sizeof(_u32));
> 
> And printk.

i made such a printk wrapper not to mess with all the printk instances when i needed to
does this hurt, to be left as is?

> 
>> +    memset(&aux, 0, sizeof(aux));
>> +    for(i = 0; i < RSA_AUX_COUNT; i++) {
>> +        retval = rsa_mpi_alloc(&aux[i], RSA_AUX_SIZE);
> 
> kmalloc, please? RSA_AUX_SIZE appears to be in bytes.
> 

I need such a wrapper because there are other things done in rsa_mpi_alloc
than kmalloc

>> +        if(retval < 0)
> 
> We use "for (" and "if (" so they don't look like function calls.
> 

right, will fix

>> +            goto rollback;
>> +    }
>> +           
>> +    rsa_echo("RSA cipher algorithm module initialized\n");
>> +    return RSA_NO_ERR;
>> +
>> +/* Free all allocated resources if any errors occur */
>> +rollback:
>> +    for(i = 0; i < RSA_AUX_COUNT; i++)
>> +        rsa_mpi_free(aux[i]);
> 
> kfree()
> 

same as above, i need this wrapper


>> +/*
>> + * Preallocate an mpi. The allocated mpi will be all-zeroed and not
>> + * canonicalized.
>> + *
>> + * Returns 0 on success or a negative value indicating error
>> + *
>> + * @n:       pointer pointer to the allocated mpi
>> + * @limbs: number of allocated limbs (32 bit digits)
>> + */
>> +static _err rsa_mpi_alloc(mpi ** n, _i32 limbs)
> 
> Kerneldoc style is "function_name - short description". We write
> pointers as "mpi **n". These things probably all want to be named
> mpi_* rather than rsa_mpi_*, as they're not specific to the RSA algorithm.
> 
>> +{
>> +    mpi * handle;
> 
> And here.
> 
>> +        rsa_debug("%s: kzalloc failed\n", __FUNCTION__);
> 
> printk.
> 
>> +static _err rsa_mpi_init(mpi **    n, _u8 * str, _u32 size, _u32 xtra)
> 
> If str is an actual string, use char *str.
> 
>> +    /* Allocate space for the mpi and its data */
>> +    s = (size / 4) + ((size % 4)? 1: 0);
> 
> Uhhh.. (size + 1) / 4?
> 

i think (size + 3)/4 


>> +    retval = rsa_mpi_alloc(n, s + xtra);
> 
> Is this not in bytes?
> 
>> +    /* Copy the data */
>> +    for(i = size - 1, j = 0; i >= 0; i--, j++)
>> +        buf[j / 4] |= ((_u32)str[i] << ((j % 4) * 8));
> 
> Ew.

not obvious eh? ok will break it apart

> 
>> +        /* Zero the xtra limbs */
>> +        else if(size < handle->size)
>> +            for(i = size; i < s; i++)
>> +                buf[i] = 0;
> 
> memset?

in the first case it broke my results, so i left it for later

> 
>> +        return RSA_ERR_INVARG;
> 
> -EINVAL
> 
>> +    buf = (*n)->data;
>> +    for(i = size - 1, j = 0; i >= 0; i--, j++)
>> +        buf[j / 4] |= ((_u32)str[i] << ((j % 4) * 8));
> 
> That mess looks familiar.
> 

not obvious as well, will break it apart

>> +#define RSA_AUX_COUNT         CONFIG_RSA_AUXCOUNT
>> +#define RSA_AUX_SIZE         CONFIG_RSA_AUXSIZE
> 
> Just use the config value.
> 
>> +#define RSA_MAX_U32        0xFFFFFFFF
> 
> I'm sure we've got this somewhere.
> 

if you could tell me i will fix it

>> +#define RSA_NO_ERR        0
>> +#define RSA_ERR_INVARG        -1
>> +#define RSA_ERR_NOMEM        -2
> 
> 0, -EINVAL, -ENOMEM.
> 
>> +#define true            0x01
>> +#define false            0x00
> 
> Ew.
> 

development leftovers, will fix


>> +/* Mpi utility functions */
>> +static _err        rsa_mpi_alloc(mpi **, _i32);
>> +static void         rsa_mpi_free(mpi *);
>> +static _err         rsa_mpi_init(mpi **, _u8 *, _u32, _u32);
>> +static _err         rsa_mpi_resize(mpi **, _i32, _u8);
>> +static _err         rsa_mpi_set(mpi **, _u8 *, _u32);
>> +static inline _err     rsa_mpi_copy(mpi **, mpi *);
>> +static void         rsa_mpi_print(mpi *, _u8);
> 
> Why are you declaring a bunch of static functions in a header file?
> 

i think the compiler will disagree if i dont, but i will give it a try

> -- 
> Mathematics is the supreme nostalgia of our time.

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH RESEND 1/1] crypto API: RSA algorithm patch (kernel version 2.6.20.1)
  2007-03-20 14:11   ` Tasos Parisinos
@ 2007-03-20 15:09     ` James Morris
  2007-03-20 15:40       ` Tasos Parisinos
  2007-03-20 21:43     ` Indan Zupancic
  1 sibling, 1 reply; 26+ messages in thread
From: James Morris @ 2007-03-20 15:09 UTC (permalink / raw)
  To: Tasos Parisinos; +Cc: Francois Romieu, herbert, linux-kernel

On Tue, 20 Mar 2007, Tasos Parisinos wrote:

> The main purpose behind the development of this module was to create an
> in-kernel system of signed modules.

I suggest you read this thread:
http://lkml.org/lkml/2007/2/14/164


-- 
James Morris
<jmorris@namei.org>

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH RESEND 1/1] crypto API: RSA algorithm patch (kernel version 2.6.20.1)
  2007-03-20 14:44   ` Tasos Parisinos
@ 2007-03-20 15:15     ` Matt Mackall
  2007-03-20 16:36       ` Jan Engelhardt
  0 siblings, 1 reply; 26+ messages in thread
From: Matt Mackall @ 2007-03-20 15:15 UTC (permalink / raw)
  To: Tasos Parisinos; +Cc: herbert, linux-kernel

On Tue, Mar 20, 2007 at 04:44:01PM +0200, Tasos Parisinos wrote:
> >>+    /* Pre-allocate some auxilliary mpis */
> >>+    rsa_echo("Preallocating %lu bytes for auxilliary operands\n",
> >>+         RSA_AUX_SIZE * RSA_AUX_COUNT * sizeof(_u32));
> >
> >And printk.
> 
> i made such a printk wrapper not to mess with all the printk instances when 
> i needed to
> does this hurt, to be left as is?

It's not horrible, but it's not pretty either.

> >>+    memset(&aux, 0, sizeof(aux));
> >>+    for(i = 0; i < RSA_AUX_COUNT; i++) {
> >>+        retval = rsa_mpi_alloc(&aux[i], RSA_AUX_SIZE);
> >
> >kmalloc, please? RSA_AUX_SIZE appears to be in bytes.
> 
> I need such a wrapper because there are other things done in rsa_mpi_alloc
> than kmalloc

Did you see my comment about removing all the rsa_ prefixes from the
MPI functions?

> >>+    /* Copy the data */
> >>+    for(i = size - 1, j = 0; i >= 0; i--, j++)
> >>+        buf[j / 4] |= ((_u32)str[i] << ((j % 4) * 8));
> >
> >Ew.
> 
> not obvious eh? ok will break it apart

You probably want to do it using ntohl.

> >>+        buf[j / 4] |= ((_u32)str[i] << ((j % 4) * 8));
> >
> >That mess looks familiar.
> >
> 
> not obvious as well, will break it apart

Well, it probably wants a function call.

> 
> >>+#define RSA_AUX_COUNT         CONFIG_RSA_AUXCOUNT
> >>+#define RSA_AUX_SIZE         CONFIG_RSA_AUXSIZE
> >
> >Just use the config value.
> >
> >>+#define RSA_MAX_U32        0xFFFFFFFF
> >
> >I'm sure we've got this somewhere.
> 
> if you could tell me i will fix it

Hmmm, odd. I can't find one. There are plenty of things that roll
their own though.

> >>+/* Mpi utility functions */
> >>+static _err        rsa_mpi_alloc(mpi **, _i32);
> >>+static void         rsa_mpi_free(mpi *);
> >>+static _err         rsa_mpi_init(mpi **, _u8 *, _u32, _u32);
> >>+static _err         rsa_mpi_resize(mpi **, _i32, _u8);
> >>+static _err         rsa_mpi_set(mpi **, _u8 *, _u32);
> >>+static inline _err     rsa_mpi_copy(mpi **, mpi *);
> >>+static void         rsa_mpi_print(mpi *, _u8);
> >
> >Why are you declaring a bunch of static functions in a header file?
> >
> 
> i think the compiler will disagree if i dont, but i will give it a try

static at the global level marks something as internal to a
compilation unit. We generally put these sorts of private declarations
straight into the .c file.

And we often skip such prototypes entirely if the .c file can be
ordered such that no forward declarations are necessary.

-- 
Mathematics is the supreme nostalgia of our time.

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH RESEND 1/1] crypto API: RSA algorithm patch (kernel version 2.6.20.1)
  2007-03-20 15:09     ` James Morris
@ 2007-03-20 15:40       ` Tasos Parisinos
  0 siblings, 0 replies; 26+ messages in thread
From: Tasos Parisinos @ 2007-03-20 15:40 UTC (permalink / raw)
  To: James Morris; +Cc: Francois Romieu, herbert, linux-kernel

Ok, from what i read, there are many-many similarities in objective but i think
that i take a different aproach in the solution

This is a mod for modular exponentiation only, and this is specific and simple,
cut-down and (as much as possible) overhead free. I don't use gpg structs and /or 
other code, don't depend anyhow on userland, don't care about elf formats, 
or where the signature is, or key management, i leave those up to the others 
to design 

this is just modular exponentiation as simple as can be

Tasos Parisinos
-

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH RESEND 1/1] crypto API: RSA algorithm patch (kernel version 2.6.20.1)
  2007-03-19 22:58 ` Matt Mackall
  2007-03-20 14:44   ` Tasos Parisinos
@ 2007-03-20 15:43   ` Paulo Marques
  1 sibling, 0 replies; 26+ messages in thread
From: Paulo Marques @ 2007-03-20 15:43 UTC (permalink / raw)
  To: Matt Mackall; +Cc: Tasos Parisinos, herbert, linux-kernel

Matt Mackall wrote:
> [...]
>> +    /* Allocate space for the mpi and its data */
>> +    s = (size / 4) + ((size % 4)? 1: 0);
> 
> Uhhh.. (size + 1) / 4?

You mean "(size + 3) / 4", no?

Overall, I agree with your comments: this file looks like it needs a lot 
more CodingStyle ;)

Redefining standard kernel interfaces (error numbers, memory allocation, 
printk, etc.) is not the right way to get code merged.

-- 
Paulo Marques - www.grupopie.com

"For every problem there is one solution which is simple, neat, and wrong."
H. L. Mencken

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH RESEND 1/1] crypto API: RSA algorithm patch (kernel version 2.6.20.1)
  2007-03-20 15:15     ` Matt Mackall
@ 2007-03-20 16:36       ` Jan Engelhardt
  0 siblings, 0 replies; 26+ messages in thread
From: Jan Engelhardt @ 2007-03-20 16:36 UTC (permalink / raw)
  To: Matt Mackall; +Cc: Tasos Parisinos, herbert, linux-kernel


On Mar 20 2007 10:15, Matt Mackall wrote:
>On Tue, Mar 20, 2007 at 04:44:01PM +0200, Tasos Parisinos wrote:
>> >>+    /* Pre-allocate some auxilliary mpis */
>> >>+    rsa_echo("Preallocating %lu bytes for auxilliary operands\n",
>> >>+         RSA_AUX_SIZE * RSA_AUX_COUNT * sizeof(_u32));
>> >
>> >And printk.
>> 
>> i made such a printk wrapper not to mess with all the printk instances when 
>> i needed to
>> does this hurt, to be left as is?
>
>It's not horrible, but it's not pretty either.

printk(KERN_DEBUG ...) I say.

>> >>+#define RSA_AUX_COUNT         CONFIG_RSA_AUXCOUNT
>> >>+#define RSA_AUX_SIZE         CONFIG_RSA_AUXSIZE
>> >
>> >Just use the config value.
>> >
>> >>+#define RSA_MAX_U32        0xFFFFFFFF
>> >
>> >I'm sure we've got this somewhere.
>> 
>> if you could tell me i will fix it
>
>Hmmm, odd. I can't find one. There are plenty of things that roll
>their own though.

#include <linux/kernel.h>
#define UINT32_T_MAX ((uint32_t)UINT_MAX)

s/RSA_MAX_U32/UINT32_T_MAX/g perhaps?

>And we often skip such prototypes entirely if the .c file can be
>ordered such that no forward declarations are necessary.

Even if <shrug> deallocation functions come before initialisation?


Jan
-- 

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH RESEND 1/1] crypto API: RSA algorithm patch (kernel  version 2.6.20.1)
  2007-03-20 14:11   ` Tasos Parisinos
  2007-03-20 15:09     ` James Morris
@ 2007-03-20 21:43     ` Indan Zupancic
  2007-03-21  9:15       ` Tasos Parisinos
  1 sibling, 1 reply; 26+ messages in thread
From: Indan Zupancic @ 2007-03-20 21:43 UTC (permalink / raw)
  To: Tasos Parisinos; +Cc: Francois Romieu, herbert, linux-kernel

On Tue, March 20, 2007 15:11, Tasos Parisinos wrote:
> Francois Romieu wrote:
>
>> RSA is slow. syscalls are fast.
>>
>> Which part of the kernel is supposed to benefit from this code ?
>
> The main purpose behind the development of this module was to create an in-kernel
> system of signed modules. The scenario applies most in embedded systems that are running linux
> where the kernel is physically secure but its filesystem is not, so one may tamper executable
> code.

I'll summarize/rephrase what I've already said in the thread James Morris linked to:

You need a way to protect your kernel binary, or else this whole thing becomes a joke.
(Modify kernel, reboot...)

Assuming you have a secure kernel binary that is tamper proof, why do you need
slow and complex asymmetric encryption again? If you can write protect the kernel,
you can also read protect it (or let the boot loader pass the key to the kernel).
So what stops you from using a simple symmetric key cipher for signing?

Regards,

Indan



^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH RESEND 1/1] crypto API: RSA algorithm patch (kernel      version 2.6.20.1)
  2007-03-20 21:43     ` Indan Zupancic
@ 2007-03-21  9:15       ` Tasos Parisinos
  2007-03-21 12:08         ` Indan Zupancic
  2007-03-21 12:36         ` Indan Zupancic
  0 siblings, 2 replies; 26+ messages in thread
From: Tasos Parisinos @ 2007-03-21  9:15 UTC (permalink / raw)
  To: Indan Zupancic; +Cc: Francois Romieu, herbert, linux-kernel

> Assuming you have a secure kernel binary that is tamper proof, why do you need
> slow and complex asymmetric encryption again? If you can write protect the kernel,
> you can also read protect it (or let the boot loader pass the key to the kernel).
> So what stops you from using a simple symmetric key cipher for signing?

In symmetric cryptography you would give away your key if one could read the kernel binary
while in assymetric one can only get the public key

Protecting a TripleDES key in high security standards is not as simple as making the kernel read protected, you need a whole lot and 
that also means hardware (cryptomemories e.t.c)
So you forget about all this overhead when you use assymetric

Also this is the way this is done in all implementations ranging from Linux platforms (see DigSig@sourceforge for an example, or in 
Debian, Fedora) and in Microsoft platforms as far as i know



^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH RESEND 1/1] crypto API: RSA algorithm patch (kernel      version 2.6.20.1)
  2007-03-21  9:15       ` Tasos Parisinos
@ 2007-03-21 12:08         ` Indan Zupancic
  2007-03-21 12:34           ` Tasos Parisinos
  2007-03-21 23:31           ` David Schwartz
  2007-03-21 12:36         ` Indan Zupancic
  1 sibling, 2 replies; 26+ messages in thread
From: Indan Zupancic @ 2007-03-21 12:08 UTC (permalink / raw)
  To: Tasos Parisinos; +Cc: Francois Romieu, herbert, linux-kernel

On Wed, March 21, 2007 10:15, Tasos Parisinos wrote:
>> Assuming you have a secure kernel binary that is tamper proof, why do you need
>> slow and complex asymmetric encryption again? If you can write protect the kernel,
>> you can also read protect it (or let the boot loader pass the key to the kernel).
>> So what stops you from using a simple symmetric key cipher for signing?
>
> In symmetric cryptography you would give away your key if one could read the kernel binary
> while in assymetric one can only get the public key

If you can't read protect your kernel, you can't write protect it either. Of course the
symmetric key would be per kernel, not a single global one.

> Protecting a TripleDES key in high security standards is not as simple as making the kernel
> read protected, you need a whole lot and that also means hardware (cryptomemories e.t.c)
> So you forget about all this overhead when you use assymetric

You need to protect your kernel binary already, adding a key to that doesn't increase the
complexity or safety requirements, so all that hardware safety is already in place.
(And I'd use AES instead of TripleDES.)

> Also this is the way this is done in all implementations ranging from Linux platforms (see
> DigSig@sourceforge for an example, or in
> Debian, Fedora) and in Microsoft platforms as far as i know

Nothing stops you from signing the binaries with an asymmetric key. After checking that
signature the user can sign the binary with his private symmetric key and upload it to
the device.

Greetings,

Indan



^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH RESEND 1/1] crypto API: RSA algorithm patch (kernel   version 2.6.20.1)
  2007-03-21 12:08         ` Indan Zupancic
@ 2007-03-21 12:34           ` Tasos Parisinos
  2007-03-21 13:00             ` Indan Zupancic
  2007-03-21 23:31           ` David Schwartz
  1 sibling, 1 reply; 26+ messages in thread
From: Tasos Parisinos @ 2007-03-21 12:34 UTC (permalink / raw)
  To: Indan Zupancic; +Cc: Francois Romieu, herbert, linux-kernel

Indan Zupancic wrote:
>> Protecting a TripleDES key in high security standards is not as simple as making the kernel
>> read protected, you need a whole lot and that also means hardware (cryptomemories e.t.c)
>> So you forget about all this overhead when you use assymetric
>>     
>
> You need to protect your kernel binary already, adding a key to that doesn't increase the
> complexity or safety requirements, so all that hardware safety is already in place.
> (And I'd use AES instead of TripleDES.)
>
>   
Well, lets assume you have a trapped casing that prevents a flash chip 
(which holds
the kernel) from being tamperred. Then you have write protection of the 
bzimage

When this thing will run, and it will need to check an executable using 
AES for example
(which is a lot better than TripleDes, i agree) then the key will be for 
a time window
onto buses and memory. Then it can be probed and retrieved by someone.

Then you need cryptomemory

While with asymmetric you don't. There are no high-risk data anywhere, 
only a public
key

Of course if you have other data that need to be secured, and you 
already run
on a trusted platform, including all these crypto hardware modules, then 
you can use
a symmetric scheme

Tasos Parisinos
-

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH RESEND 1/1] crypto API: RSA algorithm patch (kernel      version 2.6.20.1)
  2007-03-21  9:15       ` Tasos Parisinos
  2007-03-21 12:08         ` Indan Zupancic
@ 2007-03-21 12:36         ` Indan Zupancic
  2007-03-21 13:07           ` Tasos Parisinos
  1 sibling, 1 reply; 26+ messages in thread
From: Indan Zupancic @ 2007-03-21 12:36 UTC (permalink / raw)
  To: Tasos Parisinos; +Cc: Francois Romieu, herbert, linux-kernel

On Wed, March 21, 2007 10:15, Tasos Parisinos wrote:
> Protecting a TripleDES key in high security standards is not as
> simple as making the kernel read
> protected, you need a whole lot and
> that also means hardware (cryptomemories e.t.c)
> So you forget about all this overhead when you use assymetric

Ah, you're talking about fishing the key out of RAM here, right?
My point stays the same for that: If you can't read protect the
kernel RAM, small chance you can write protect it. And then they
can just bypass all signature checking you put in it anyway.

Greetings,

Indan



^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH RESEND 1/1] crypto API: RSA algorithm patch (kernel      version 2.6.20.1)
  2007-03-21 12:34           ` Tasos Parisinos
@ 2007-03-21 13:00             ` Indan Zupancic
  0 siblings, 0 replies; 26+ messages in thread
From: Indan Zupancic @ 2007-03-21 13:00 UTC (permalink / raw)
  To: Tasos Parisinos; +Cc: Francois Romieu, herbert, linux-kernel

On Wed, March 21, 2007 13:34, Tasos Parisinos wrote:
> Indan Zupancic wrote:
>>> Protecting a TripleDES key in high security standards is not as simple as making the kernel
>>> read protected, you need a whole lot and that also means hardware (cryptomemories e.t.c)
>>> So you forget about all this overhead when you use assymetric
>>>
>>
>> You need to protect your kernel binary already, adding a key to that doesn't increase the
>> complexity or safety requirements, so all that hardware safety is already in place.
>> (And I'd use AES instead of TripleDES.)
>>
>>
> Well, lets assume you have a trapped casing that prevents a flash chip
> (which holds
> the kernel) from being tamperred. Then you have write protection of the
> bzimage

Not sure what you mean with "trapped casing", I thought we were talking about
the security from the software side, not the physical one?

> When this thing will run, and it will need to check an executable using
> AES for example
> (which is a lot better than TripleDes, i agree) then the key will be for
> a time window
> onto buses and memory. Then it can be probed and retrieved by someone.
>
> Then you need cryptomemory
>
> While with asymmetric you don't. There are no high-risk data anywhere,
> only a public
> key

Our emails crossed eachother, but if you read kernel RAM you can in general
also modify it, in which case the signature checking can be disabled.

The moment someone with advanced hardware cracking knowledge puts his hands
on your stuff you're screwed anyway. If you don't believe me, try getting
hard security guarantees from hardware vendors. ;-)

> Of course if you have other data that need to be secured, and you
> already run
> on a trusted platform, including all these crypto hardware modules, then
> you can use
> a symmetric scheme

How do you want to get the thing secure with asymmetric encryption when you
can't protect the kernel? That "other data" is your kernel binary, on flash
or in RAM. So if you can't protect that adequately, then who are you kidding
with signed modules? And the moment you've covered that, you can as well use
symmetric encryption...

Greetings,

Indan



^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH RESEND 1/1] crypto API: RSA algorithm patch (kernel   version 2.6.20.1)
  2007-03-21 12:36         ` Indan Zupancic
@ 2007-03-21 13:07           ` Tasos Parisinos
  2007-03-21 13:59             ` Indan Zupancic
  0 siblings, 1 reply; 26+ messages in thread
From: Tasos Parisinos @ 2007-03-21 13:07 UTC (permalink / raw)
  To: Indan Zupancic; +Cc: Francois Romieu, herbert, linux-kernel


> On Wed, March 21, 2007 10:15, Tasos Parisinos wrote:
>   
>> Protecting a TripleDES key in high security standards is not as
>> simple as making the kernel read
>> protected, you need a whole lot and
>> that also means hardware (cryptomemories e.t.c)
>> So you forget about all this overhead when you use assymetric
>>     
>
> Ah, you're talking about fishing the key out of RAM here, right?
> My point stays the same for that: If you can't read protect the
> kernel RAM, small chance you can write protect it. And then they
> can just bypass all signature checking you put in it anyway.
>   

How can one tamper (write) the kernel memory of a booted and running kernel
without using an exploitable bug?

I mean, you can't mess with the bzImage on flash, the secure bootloader 
boots it without
letting someone alter the (non crypto-) memory while loading the bzImage 
on it, and then
no-one can run something that will tamper the system or write anywhere 
on kernel memory
without exploiting a bug

I mean, am i missing something here?



^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH RESEND 1/1] crypto API: RSA algorithm patch (kernel      version 2.6.20.1)
  2007-03-21 13:07           ` Tasos Parisinos
@ 2007-03-21 13:59             ` Indan Zupancic
  2007-03-21 14:31               ` Tasos Parisinos
  2007-03-21 14:49               ` Tasos Parisinos
  0 siblings, 2 replies; 26+ messages in thread
From: Indan Zupancic @ 2007-03-21 13:59 UTC (permalink / raw)
  To: Tasos Parisinos; +Cc: Francois Romieu, herbert, linux-kernel

On Wed, March 21, 2007 14:07, Tasos Parisinos wrote:
>
>> On Wed, March 21, 2007 10:15, Tasos Parisinos wrote:
>>
>>> Protecting a TripleDES key in high security standards is not as
>>> simple as making the kernel read
>>> protected, you need a whole lot and
>>> that also means hardware (cryptomemories e.t.c)
>>> So you forget about all this overhead when you use assymetric
>>>
>>
>> Ah, you're talking about fishing the key out of RAM here, right?
>> My point stays the same for that: If you can't read protect the
>> kernel RAM, small chance you can write protect it. And then they
>> can just bypass all signature checking you put in it anyway.
>>
>
> How can one tamper (write) the kernel memory of a booted and running kernel
> without using an exploitable bug?
>
> I mean, you can't mess with the bzImage on flash, the secure bootloader
> boots it without
> letting someone alter the (non crypto-) memory while loading the bzImage
> on it, and then
> no-one can run something that will tamper the system or write anywhere
> on kernel memory
> without exploiting a bug
>
> I mean, am i missing something here?

Depends on what you consider an exploitable bug. Does getting root access count?
If not, then you must make very sure that all possible ways to modify kernel
memory from userspace are thwarted (I don't know what those are, hopefully
loading modules is the only one, but maybe there are smart other ways).

Assuming one can't write kernel memory, it's also safe to assume kernel memory
can be protected against reads (else the whole keys infrastructure is useless).

But instead of only reading the bus traffic also modifying it doesn't seem so
far fetched to me. That modification can be reduced to swapping one bit which
tells whether the modules being loaded has a valid signature or not.
Timing it might be tricky, but that can be automated.

When someone has the hardware in his hands and really want to exploit it, you
lose no matter what you do. At best you can make it harder and more expensive.

In the end my point is that you might think that you can get away with less
security when using RSA, but perhaps in reality you don't. At least when using
symmetric key encryption you're forced to secure the whole thing more.

So design it for symmetric keys. If it turns out that using asymmetric keys is
more practical for whatever reason, fine, use those. But they won't give you
added security.

Greetings,

Indan


P.S. The whole argument of secure bootloader checks the kernel can be
extrapolated to a secure kernel checking a user space program. Why not
letting the kernel check the signature of a monolithic modprobe program,
and let it do all the (RSA) checking. The expected hash of the modprobe
program can be hardcoded in the kernel.



^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH RESEND 1/1] crypto API: RSA algorithm patch (kernel   version 2.6.20.1)
  2007-03-21 13:59             ` Indan Zupancic
@ 2007-03-21 14:31               ` Tasos Parisinos
  2007-03-21 15:10                 ` Indan Zupancic
  2007-03-21 14:49               ` Tasos Parisinos
  1 sibling, 1 reply; 26+ messages in thread
From: Tasos Parisinos @ 2007-03-21 14:31 UTC (permalink / raw)
  To: Indan Zupancic; +Cc: Francois Romieu, herbert, linux-kernel

Indan Zupancic wrote:
> On Wed, March 21, 2007 14:07, Tasos Parisinos wrote:
>   
>>> On Wed, March 21, 2007 10:15, Tasos Parisinos wrote:
>>>
>>>       
>>>> Protecting a TripleDES key in high security standards is not as
>>>> simple as making the kernel read
>>>> protected, you need a whole lot and
>>>> that also means hardware (cryptomemories e.t.c)
>>>> So you forget about all this overhead when you use assymetric
>>>>
>>>>         
>>> Ah, you're talking about fishing the key out of RAM here, right?
>>> My point stays the same for that: If you can't read protect the
>>> kernel RAM, small chance you can write protect it. And then they
>>> can just bypass all signature checking you put in it anyway.
>>>
>>>       
>> How can one tamper (write) the kernel memory of a booted and running kernel
>> without using an exploitable bug?
>>
>> I mean, you can't mess with the bzImage on flash, the secure bootloader
>> boots it without
>> letting someone alter the (non crypto-) memory while loading the bzImage
>> on it, and then
>> no-one can run something that will tamper the system or write anywhere
>> on kernel memory
>> without exploiting a bug
>>
>> I mean, am i missing something here?
>>     
>
> Depends on what you consider an exploitable bug. Does getting root access count?
> If not, then you must make very sure that all possible ways to modify kernel
> memory from userspace are thwarted (I don't know what those are, hopefully
> loading modules is the only one, but maybe there are smart other ways).
>
> Assuming one can't write kernel memory, it's also safe to assume kernel memory
> can be protected against reads (else the whole keys infrastructure is useless).
>
> But instead of only reading the bus traffic also modifying it doesn't seem so
> far fetched to me. That modification can be reduced to swapping one bit which
> tells whether the modules being loaded has a valid signature or not.
> Timing it might be tricky, but that can be automated.
>
> When someone has the hardware in his hands and really want to exploit it, you
> lose no matter what you do. At best you can make it harder and more expensive.
>
> In the end my point is that you might think that you can get away with less
> security when using RSA, but perhaps in reality you don't. At least when using
> symmetric key encryption you're forced to secure the whole thing more.
>
> So design it for symmetric keys. If it turns out that using asymmetric keys is
> more practical for whatever reason, fine, use those. But they won't give you
> added security.
>
> Greetings,
>
> Indan
>
>
> P.S. The whole argument of secure bootloader checks the kernel can be
> extrapolated to a secure kernel checking a user space program. Why not
> letting the kernel check the signature of a monolithic modprobe program,
> and let it do all the (RSA) checking. The expected hash of the modprobe
> program can be hardcoded in the kernel.
>
>
>
>   
I agree that you have no more security that using symmetric
but we believe you have lower costs, simpler key management
(which is a big headache alone), tougher to break through
(not unbreakable) and more centralization

As for modprobe u are right but we also need to check (apart
from kernel modules) the executables and libraries in the
usage scenario.

About time:
In my pc system running 2.6.20.3 (2.66 GHz P4, 1G mem) the computation of
modular exponentiation of 1Kbit (with a 32bit exponent all bits set and 
a 1024 bit key)
took almost 3ms. That's the time needed to check the signature of any 
code loaded
in ram using this module, after having it hashed (sha1) and signature 
extracted from elf.

Best regards
Tasos Parisinos
-

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH RESEND 1/1] crypto API: RSA algorithm patch (kernel   version 2.6.20.1)
  2007-03-21 13:59             ` Indan Zupancic
  2007-03-21 14:31               ` Tasos Parisinos
@ 2007-03-21 14:49               ` Tasos Parisinos
  1 sibling, 0 replies; 26+ messages in thread
From: Tasos Parisinos @ 2007-03-21 14:49 UTC (permalink / raw)
  To: Indan Zupancic; +Cc: Francois Romieu, herbert, linux-kernel

on the previous message the exponent is not 32 bits but 64 bits, sorry

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH RESEND 1/1] crypto API: RSA algorithm patch (kernel      version 2.6.20.1)
  2007-03-21 14:31               ` Tasos Parisinos
@ 2007-03-21 15:10                 ` Indan Zupancic
  2007-03-21 15:50                   ` Tasos Parisinos
  0 siblings, 1 reply; 26+ messages in thread
From: Indan Zupancic @ 2007-03-21 15:10 UTC (permalink / raw)
  To: Tasos Parisinos; +Cc: Francois Romieu, herbert, linux-kernel

On Wed, March 21, 2007 15:31, Tasos Parisinos wrote:
> Indan Zupancic wrote:
>> On Wed, March 21, 2007 14:07, Tasos Parisinos wrote:
>>> How can one tamper (write) the kernel memory of a booted and running kernel
>>> without using an exploitable bug?
>>>
>>> I mean, you can't mess with the bzImage on flash, the secure bootloader
>>> boots it without
>>> letting someone alter the (non crypto-) memory while loading the bzImage
>>> on it, and then
>>> no-one can run something that will tamper the system or write anywhere
>>> on kernel memory
>>> without exploiting a bug
>>>
>>> I mean, am i missing something here?
>>>
>>
>> Depends on what you consider an exploitable bug. Does getting root access count?
>> If not, then you must make very sure that all possible ways to modify kernel
>> memory from userspace are thwarted (I don't know what those are, hopefully
>> loading modules is the only one, but maybe there are smart other ways).
>>
>> Assuming one can't write kernel memory, it's also safe to assume kernel memory
>> can be protected against reads (else the whole keys infrastructure is useless).
>>
>> But instead of only reading the bus traffic also modifying it doesn't seem so
>> far fetched to me. That modification can be reduced to swapping one bit which
>> tells whether the modules being loaded has a valid signature or not.
>> Timing it might be tricky, but that can be automated.
>>
>> When someone has the hardware in his hands and really want to exploit it, you
>> lose no matter what you do. At best you can make it harder and more expensive.
>>
>> In the end my point is that you might think that you can get away with less
>> security when using RSA, but perhaps in reality you don't. At least when using
>> symmetric key encryption you're forced to secure the whole thing more.
>>
>> So design it for symmetric keys. If it turns out that using asymmetric keys is
>> more practical for whatever reason, fine, use those. But they won't give you
>> added security.
>>
>> Greetings,
>>
>> Indan
>>
>>
>> P.S. The whole argument of secure bootloader checks the kernel can be
>> extrapolated to a secure kernel checking a user space program. Why not
>> letting the kernel check the signature of a monolithic modprobe program,
>> and let it do all the (RSA) checking. The expected hash of the modprobe
>> program can be hardcoded in the kernel.
>>
>>
>>
>>
> I agree that you have no more security that using symmetric
> but we believe you have lower costs, simpler key management
> (which is a big headache alone), tougher to break through
> (not unbreakable) and more centralization

It depends a bit on who you want to give control over what can and what
can't be loaded whether centralization is an advantage or not. It might
be a bit easier and simpler for the vendor, but if users have control
over their hardware and thus the keys, it doesn't make any difference.
Even for the vendor it isn't very hard to keep a database with all keys
and signing modules with the right key when needed.

I don't see where the lower cost or the increased toughness comes from.
Don't forget that you need to protect the stored public key against
modification as well (As well as the boot loader).

> As for modprobe u are right but we also need to check (apart
> from kernel modules) the executables and libraries in the
> usage scenario.

What about bytecode programs, self modifying software and mmap?
One exploitable bug in any program renders all this checking void.

But even then you can move all the checking to a userspace helper program.
(Which can be in initramfs, glued to the kernel binary.)

> About time:
> In my pc system running 2.6.20.3 (2.66 GHz P4, 1G mem) the computation of
> modular exponentiation of 1Kbit (with a 32bit exponent all bits set and
> a 1024 bit key)
> took almost 3ms. That's the time needed to check the signature of any
> code loaded
> in ram using this module, after having it hashed (sha1) and signature
> extracted from elf.

Time was never the problem, the extra code bloat and complexity is.
(Though if you're going to check all binaries it probably is.)

Greetings,

Indan



^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH RESEND 1/1] crypto API: RSA algorithm patch (kernel   version 2.6.20.1)
  2007-03-21 15:10                 ` Indan Zupancic
@ 2007-03-21 15:50                   ` Tasos Parisinos
  2007-03-21 16:36                     ` Indan Zupancic
  0 siblings, 1 reply; 26+ messages in thread
From: Tasos Parisinos @ 2007-03-21 15:50 UTC (permalink / raw)
  To: Indan Zupancic; +Cc: Francois Romieu, herbert, linux-kernel


>> I agree that you have no more security that using symmetric
>> but we believe you have lower costs, simpler key management
>> (which is a big headache alone), tougher to break through
>> (not unbreakable) and more centralization
>>     
>
> It depends a bit on who you want to give control over what can and what
> can't be loaded whether centralization is an advantage or not. It might
> be a bit easier and simpler for the vendor, but if users have control
> over their hardware and thus the keys, it doesn't make any difference.
> Even for the vendor it isn't very hard to keep a database with all keys
> and signing modules with the right key when needed.
>
> I don't see where the lower cost or the increased toughness comes from.
> Don't forget that you need to protect the stored public key against
> modification as well (As well as the boot loader).
>
>   
We developed this for an embedded device that runs Linux in which
it is crucial to reach the highest point of security possible
having in mind previous experience on signed executable code
developing under WindowsCE. The end user has nothing to do with the 
system, no logons

A malicious person may want to alter code on the detachable (and unsafe) 
file system.
Lots of stuff including the kernel will be in a trapped casing (opening, 
probing it, power
analyzing it, heating it etc will result in system suicide and erasure)

If one alters one device then he can go on and play with it at home
But if one finds the key that authenticates the executable code it will 
be possible to
attack and tamper the non-system software on some of the networked devices

That is why we can't use symmetric, the risk is a lot greater there. And 
of course
we cant have one key per device (maybe thousands)
>> As for modprobe u are right but we also need to check (apart
>> from kernel modules) the executables and libraries in the
>> usage scenario.
>>     
>
> What about bytecode programs, self modifying software and mmap?
> One exploitable bug in any program renders all this checking void.
>
> But even then you can move all the checking to a userspace helper program.
> (Which can be in initramfs, glued to the kernel binary.)
>
>   
Of course we are talking about a restricted execution enviroment, no 
user logons
the system runs what it is supposed to run, and only that

If we where to use a userland program then we would propably have to 
physically
secure the whole filesystem, and that is difficult, costs more, and it 
is cumbersome
(difficult to update)
>> About time:
>> In my pc system running 2.6.20.3 (2.66 GHz P4, 1G mem) the computation of
>> modular exponentiation of 1Kbit (with a 32bit exponent all bits set and
>> a 1024 bit key)
>> took almost 3ms. That's the time needed to check the signature of any
>> code loaded
>> in ram using this module, after having it hashed (sha1) and signature
>> extracted from elf.
>>     
>
> Time was never the problem, the extra code bloat and complexity is.
> (Though if you're going to check all binaries it probably is.)
>
> Greetings,
>
> Indan
>
>
>   
You are not going to check all at once but only on load. I tell you 
again this is
not going to be running as a web server, but in a restricted environment

As for the code bloat and complexity... well you know its up to u to use 
it or leave it
dont include where you don't need it.
I mean we created for our specific use, other may want to use it to 
(maybe for
the same reasons, who knows) why not make it available? Isn't that what 
open source
is about?

And on the bottom line, why not have a module and functionality that Linux
competitors provide and advertise?

Best Regards
Tasos Parisinos
-



^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH RESEND 1/1] crypto API: RSA algorithm patch (kernel      version 2.6.20.1)
  2007-03-21 15:50                   ` Tasos Parisinos
@ 2007-03-21 16:36                     ` Indan Zupancic
  2007-03-22  7:47                       ` Tasos Parisinos
  0 siblings, 1 reply; 26+ messages in thread
From: Indan Zupancic @ 2007-03-21 16:36 UTC (permalink / raw)
  To: Tasos Parisinos; +Cc: Francois Romieu, herbert, linux-kernel

On Wed, March 21, 2007 16:50, Tasos Parisinos wrote:
> A malicious person may want to alter code on the detachable (and unsafe)
> file system.
> Lots of stuff including the kernel will be in a trapped casing (opening,
> probing it, power
> analyzing it, heating it etc will result in system suicide and erasure)

What happens ift he whole thing is cooled so much that it stops functioning?

What if the part that is supposed to do the system suicide is shot away?

There are all kind of interesting ways of bypassing such protections, I
wouldn't count on covering all of them (which doesn't mean you shouldn't try).


> If one alters one device then he can go on and play with it at home
> But if one finds the key that authenticates the executable code it will
> be possible to
> attack and tamper the non-system software on some of the networked devices
>
> That is why we can't use symmetric, the risk is a lot greater there. And
> of course
> we cant have one key per device (maybe thousands)

How many devices there are doesn't matter, a RSA key is ten times as big
as an AES key anyway. But maybe you've a more complex system where having
multiple keys indeed isn't possible (e.g. the filesystem is shared between
multiple devices).


>>> As for modprobe u are right but we also need to check (apart
>>> from kernel modules) the executables and libraries in the
>>> usage scenario.
>>>
>>
>> What about bytecode programs, self modifying software and mmap?
>> One exploitable bug in any program renders all this checking void.
>>
>> But even then you can move all the checking to a userspace helper program.
>> (Which can be in initramfs, glued to the kernel binary.)
>>
>>
> Of course we are talking about a restricted execution enviroment, no
> user logons
> the system runs what it is supposed to run, and only that
>
> If we where to use a userland program then we would propably have to
> physically
> secure the whole filesystem, and that is difficult, costs more, and it
> is cumbersome
> (difficult to update)

Not if you put it in initramfs included in the kernel binary.
Depending on how much room you have on the secured flash area,
you'd want to put as much as possible there anyway.


> You are not going to check all at once but only on load. I tell you
> again this is
> not going to be running as a web server, but in a restricted environment

Well, as the filesystem isn't restricted you should be prepared for anything.

Is the RAM in restricted area or not? Because if it isn't and they have
access to the bus then it can be tampered with.

> As for the code bloat and complexity... well you know its up to u to use
> it or leave it
> dont include where you don't need it.
> I mean we created for our specific use, other may want to use it to
> (maybe for
> the same reasons, who knows) why not make it available? Isn't that what
> open source
> is about?
>
> And on the bottom line, why not have a module and functionality that Linux
> competitors provide and advertise?

I've nothing against your RSA implementation, it's one of the cleaner and
smaller ones. Merging it is probably a good idea to stop others and to have
a minimalistic reference implementation.

I've problems with the assumed security it brings to many uses of it though.

Depending on the expected lifetime of your product I'd also consider using
something that can't be broken by quantum computers in the (near?) future. ;-)

Good luck,

Indan



^ permalink raw reply	[flat|nested] 26+ messages in thread

* RE: [PATCH RESEND 1/1] crypto API: RSA algorithm patch (kernel version 2.6.20.1)
  2007-03-21 12:08         ` Indan Zupancic
  2007-03-21 12:34           ` Tasos Parisinos
@ 2007-03-21 23:31           ` David Schwartz
  2007-03-22 13:15             ` Indan Zupancic
  1 sibling, 1 reply; 26+ messages in thread
From: David Schwartz @ 2007-03-21 23:31 UTC (permalink / raw)
  To: Linux-Kernel@Vger. Kernel. Org


> If you can't read protect your kernel, you can't write protect it
> either.

This is so misleading as to basically be false.

It is true that any security scheme that can prevent people from taking
money out of my account can also prevent people from putting money in.
However, the set of people I allow to put money in my account might include
people I don't want to be able to take money out.

As a more concrete counter-example, consider a signed kernel module that I
wish to publically distribute. I can't stop anyone from reading it. I can
certainly keep people from changing the copy running on my machine.

Your point is not even valid about keys residing in RAM. There are perfectly
sensible scenarios in which the same key needs to be in several different
places, some very secure, some not so. Using a symettric key means that the
securest use can be compromised by a break of the least-secure use. Consider
data that is signed by the kernel and verified by a user-space program.

DS



^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH RESEND 1/1] crypto API: RSA algorithm patch (kernel   version 2.6.20.1)
  2007-03-21 16:36                     ` Indan Zupancic
@ 2007-03-22  7:47                       ` Tasos Parisinos
  0 siblings, 0 replies; 26+ messages in thread
From: Tasos Parisinos @ 2007-03-22  7:47 UTC (permalink / raw)
  To: Indan Zupancic; +Cc: Francois Romieu, herbert, linux-kernel



> On Wed, March 21, 2007 16:50, Tasos Parisinos wrote:
>   
>> A malicious person may want to alter code on the detachable (and unsafe)
>> file system.
>> Lots of stuff including the kernel will be in a trapped casing (opening,
>> probing it, power
>> analyzing it, heating it etc will result in system suicide and erasure)
>>     
>
> What happens ift he whole thing is cooled so much that it stops functioning?
>
> What if the part that is supposed to do the system suicide is shot away?
>
> There are all kind of interesting ways of bypassing such protections, I
> wouldn't count on covering all of them (which doesn't mean you shouldn't try).
>
>
>   
I really can't comment more on these... you understand that in that case 
i would give away some interesting
security details... ;) But i tell you that things have evolved, i am 
even suprised to see these pieces of hardware
work against lots (i mean lots) of kinds of attacks. If you search a 
little you will find lots of hardware security
solutions on the market. Of course having the money to obtain them is 
another issue. Of course it also needs
a lot of paperwork and NDAs

>> If one alters one device then he can go on and play with it at home
>> But if one finds the key that authenticates the executable code it will
>> be possible to
>> attack and tamper the non-system software on some of the networked devices
>>
>> That is why we can't use symmetric, the risk is a lot greater there. And
>> of course
>> we cant have one key per device (maybe thousands)
>>     
>
> How many devices there are doesn't matter, a RSA key is ten times as big
> as an AES key anyway. But maybe you've a more complex system where having
> multiple keys indeed isn't possible (e.g. the filesystem is shared between
> multiple devices).
>
>
>   
it would be a mess to do desent key management

>> You are not going to check all at once but only on load. I tell you
>> again this is
>> not going to be running as a web server, but in a restricted environment
>>     
>
> Well, as the filesystem isn't restricted you should be prepared for anything.
>
> Is the RAM in restricted area or not? Because if it isn't and they have
> access to the bus then it can be tampered with.
>
>   
RAM will be restricted, we try to do the best we can on filesystem.
>> As for the code bloat and complexity... well you know its up to u to use
>> it or leave it
>> dont include where you don't need it.
>> I mean we created for our specific use, other may want to use it to
>> (maybe for
>> the same reasons, who knows) why not make it available? Isn't that what
>> open source
>> is about?
>>
>> And on the bottom line, why not have a module and functionality that Linux
>> competitors provide and advertise?
>>     
>
> I've nothing against your RSA implementation, it's one of the cleaner and
> smaller ones. Merging it is probably a good idea to stop others and to have
> a minimalistic reference implementation.
>
> I've problems with the assumed security it brings to many uses of it though.
>
> Depending on the expected lifetime of your product I'd also consider using
> something that can't be broken by quantum computers in the (near?) future. ;-)
>
> Good luck,
>
> Indan
>
>
>   
Well in our design the most HOT data will be inside chips that you need 
serious equipment (and money)
to tamper with. But these have our own OS running. Even if someone 
breaks the signed modules system he can do
damage but not to such an extent. We understand that this is not 
unbreakable (yes, no assumed super-security) ,
but we need to do it as hard as we can for others to intrude.
This is going to work as the first security-barrier.

Well with quantum, RSA will be obsolete. Also elliptic curve is now 
becoming more and more popular
But the RSA is nowadays in common use, ranging from payment systems to 
the military. It will take some years
for this to change

Thanks for your interest and usefull comments

Tasos Parisinos
-





^ permalink raw reply	[flat|nested] 26+ messages in thread

* RE: [PATCH RESEND 1/1] crypto API: RSA algorithm patch (kernel  version 2.6.20.1)
  2007-03-21 23:31           ` David Schwartz
@ 2007-03-22 13:15             ` Indan Zupancic
  0 siblings, 0 replies; 26+ messages in thread
From: Indan Zupancic @ 2007-03-22 13:15 UTC (permalink / raw)
  To: davids; +Cc: Linux-Kernel@Vger. Kernel. Org

(Please don't trim me from the CC list if you're replying to what I've said,
thanks.)

On Thu, March 22, 2007 00:31, David Schwartz wrote:
>> If you can't read protect your kernel, you can't write protect it
>> either.
>
> This is so misleading as to basically be false.

Please elaborate. Short of ROM I don't see how you'd achieve it.
Looks more like a misunderstanding than misleading going on here.


> It is true that any security scheme that can prevent people from taking
> money out of my account can also prevent people from putting money in.
> However, the set of people I allow to put money in my account might include
> people I don't want to be able to take money out.

I wasn't talking about a single security scheme, I was talking about the
possible schemes. Would you trust someone to protect your account if he says
he's able to write protect your account, but not read protect it? And keep
in mind I'm talking about technical abilities, not cases where one access
is chosen to be allowed.

Purposely allowing the one and disallowing the other is a different case.
But there's a difference between opening your door and having a door with
a bad lock.


> As a more concrete counter-example, consider a signed kernel module that I
> wish to publically distribute. I can't stop anyone from reading it. I can
> certainly keep people from changing the copy running on my machine.

Maybe, maybe not. The signature won't stop people from modifying the loaded
kernel, something else has to. And if that 'something' can stop modifications,
it most likely can also read protect a part of kernel RAM too, with little
tweaking. If it can't, I wouldn't trust it to write protect the kernel either.


> Your point is not even valid about keys residing in RAM. There are perfectly
> sensible scenarios in which the same key needs to be in several different
> places, some very secure, some not so. Using a symettric key means that the
> securest use can be compromised by a break of the least-secure use. Consider
> data that is signed by the kernel and verified by a user-space program.

My point is valid, but you seem to confuse it with scenarios where you want to
allow the one and disallow the other, and using that as an argument that my
point isn't valid.

I already said in another email that symmetric key encryption is only as secure
as RSA if each device has its own key, in the case of signed binaries. I'm also
not trying to argue that all asymmetric crypto uses can be replaced with
symmetric ones, in case you wonder.

What I do try to make clear is that for certain cases there are alternatives
which are as secure as RSA, and that a lot apparent weaknesses of that approach
are also present with RSA, and that no matter what you use, you need to have
the same basic security in place for both.

Greetings,

Indan



^ permalink raw reply	[flat|nested] 26+ messages in thread

end of thread, other threads:[~2007-03-22 13:16 UTC | newest]

Thread overview: 26+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2007-03-19 16:22 [PATCH RESEND 1/1] crypto API: RSA algorithm patch (kernel version 2.6.20.1) Tasos Parisinos
2007-03-19 22:58 ` Matt Mackall
2007-03-20 14:44   ` Tasos Parisinos
2007-03-20 15:15     ` Matt Mackall
2007-03-20 16:36       ` Jan Engelhardt
2007-03-20 15:43   ` Paulo Marques
2007-03-20  0:40 ` Francois Romieu
2007-03-20 14:11   ` Tasos Parisinos
2007-03-20 15:09     ` James Morris
2007-03-20 15:40       ` Tasos Parisinos
2007-03-20 21:43     ` Indan Zupancic
2007-03-21  9:15       ` Tasos Parisinos
2007-03-21 12:08         ` Indan Zupancic
2007-03-21 12:34           ` Tasos Parisinos
2007-03-21 13:00             ` Indan Zupancic
2007-03-21 23:31           ` David Schwartz
2007-03-22 13:15             ` Indan Zupancic
2007-03-21 12:36         ` Indan Zupancic
2007-03-21 13:07           ` Tasos Parisinos
2007-03-21 13:59             ` Indan Zupancic
2007-03-21 14:31               ` Tasos Parisinos
2007-03-21 15:10                 ` Indan Zupancic
2007-03-21 15:50                   ` Tasos Parisinos
2007-03-21 16:36                     ` Indan Zupancic
2007-03-22  7:47                       ` Tasos Parisinos
2007-03-21 14:49               ` Tasos Parisinos

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.