kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [KVM-AUTOTEST PATCH 0/2] Add KSM test
@ 2009-08-31  9:29 Lukáš Doktor
  2009-08-31  9:35 ` [KVM-AUTOTEST PATCH 1/2] " Lukáš Doktor
                   ` (2 more replies)
  0 siblings, 3 replies; 15+ messages in thread
From: Lukáš Doktor @ 2009-08-31  9:29 UTC (permalink / raw)
  To: KVM list, Autotest mailing list

This patch adds KSM test. We faced many difficulties which weren't 
solvable by regular ways so please take a look and comment.

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [KVM-AUTOTEST PATCH 1/2] Add KSM test
  2009-08-31  9:29 [KVM-AUTOTEST PATCH 0/2] Add KSM test Lukáš Doktor
@ 2009-08-31  9:35 ` Lukáš Doktor
  2009-09-01  8:49   ` Lukáš Doktor
  2009-09-07 12:03   ` Dor Laor
  2009-08-31  9:48 ` [KVM-AUTOTEST PATCH 2/2] " Lukáš Doktor
  2009-09-07 12:37 ` [KVM-AUTOTEST PATCH 0/2] " Uri Lublin
  2 siblings, 2 replies; 15+ messages in thread
From: Lukáš Doktor @ 2009-08-31  9:35 UTC (permalink / raw)
  To: KVM list, Autotest mailing list

allocator.c is a program, which allocates pages in the memory and allow 
us to fill or test those pages. It's controlled using sockets.

Signed-off-by: Lukáš Doktor <ldoktor@redhat.com>
Signed-off-by: Jiří Župka <jzupka@redhat.com>
---
  client/tests/kvm/allocator.c |  571 
++++++++++++++++++++++++++++++++++++++++++
  1 files changed, 571 insertions(+), 0 deletions(-)
  create mode 100644 client/tests/kvm/allocator.c

diff --git a/client/tests/kvm/allocator.c b/client/tests/kvm/allocator.c
new file mode 100644
index 0000000..89e8ce4
--- /dev/null
+++ b/client/tests/kvm/allocator.c
@@ -0,0 +1,571 @@
+/*
+ * KSM test program.
+ * Copyright(C) 2009 Redhat
+ * Jason Wang (jasowang@redhat.com)
+ */
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <string.h>
+#include <errno.h>
+#include <syscall.h>
+#include <time.h>
+#include <stdint.h>
+//socket linux
+#include <sys/socket.h>
+#include <arpa/inet.h>
+#include <netinet/in.h>
+#include <signal.h>
+//TODO: socket windows
+
+
+
+#define PS (4096)
+long PAGE_SIZE = PS;
+long intInPage = PS/sizeof(int);
+#define MAP_FLAGS ( MAP_ANON | MAP_SHARED )
+#define PROT_FLAGS ( PROT_WRITE )
+#define FILE_MODE ( O_RDWR | O_CREAT )
+#define LOG_FILE "/var/log/vksmd"
+#define FIFO_FILE "/tmp/vksmd"
+#define MODE 0666
+#define FILE_BASE "/tmp/ksm_file"
+#define MAX_SIZESIZE 6
+#define MAX_COMMANDSIZE 50
+#define BLOCK_COUNT 8
+
+int log_fd = -1;
+int base_fd = -1;
+int checkvalue = 0;
+
+
+//Socket
+struct sockaddr_in sockName;
+struct sockaddr_in clientInfo;
+int mainSocket,clientSocket;
+int port;
+
+socklen_t addrlen;
+
+
+
+
+const uint32_t random_mask = UINT32_MAX>>1;
+uint32_t random_x = 0;
+const uint32_t random_a = 1103515245;
+const uint32_t random_m = 2^32;
+const uint32_t random_c = 12345;
+
+int statickey = 0;
+int dynamickey = 0;
+
+typedef enum _COMMANDS
+{
+  wrongcommad,
+  ninit,
+  nrandom,
+  nexit,
+  nsrandom,
+  nsrverify,
+  nfillzero,
+  nfillvalue,
+  ndfill,
+  nverify
+} COMMANDS;
+
+void sigpipe (int param)
+{
+  fprintf(stderr,"write error\n");
+  //exit(-1); //uncomment end if network connetion is down
+}
+
+int writefull(int socket,char * data,int size){
+  int sz = 0;
+  while (sz < size)
+    sz += write(socket, data+sz, size-sz);
+  return sz;
+}
+
+
+int write_message(int s,char * message){
+  size_t len = strlen(message);
+  char buf[10];
+  sprintf(buf,"%d:",(unsigned int)len);
+  size_t size = strlen(buf);
+
+  struct timeval tv;
+  fd_set writeset;
+  fd_set errorset;
+  FD_ZERO(&writeset);
+  FD_ZERO(&errorset);
+  FD_SET(clientSocket, &writeset);
+  FD_SET(clientSocket, &errorset);
+  tv.tv_sec = 0;
+  tv.tv_usec = 100;
+  int max = s+1;
+  tv.tv_sec = 10;
+  tv.tv_usec = 0;
+  int ret = select(max, NULL, &writeset, NULL, &tv);
+  if (ret == -1)
+  {
+    return -1;
+  }
+  if (ret == 0)
+  {
+    return -1;
+  }
+  if (FD_ISSET(s, &writeset))
+  {
+    if (writefull(s, buf, size) != size){
+      return -1;
+    }
+    if (writefull(s, message, len) != len){
+      return -1;
+    }
+  }
+  return 0;
+}
+
+void log_info(char *str)
+{
+  if (write_message(clientSocket, str) != 0){
+    fprintf(stderr,"write error\n");
+  }
+}
+
+/* fill pages with zero */
+void zero_pages(void **page_array,int npages)
+{
+  int n = 0;
+  for(n=0;n<npages;n++)
+    memset(page_array[n],0,intInPage);
+}
+
+/* fill pages with zero */
+void value_to_pages(void **page_array,int npages,char value)
+{
+  int n = 0;
+  for(n=0;n<npages;n++)
+    memset(page_array[n],value,PAGE_SIZE/sizeof(char));
+}
+
+/* initialise page_array */
+void **map_zero_page(unsigned long npages)
+{
+  void **page_array=(void **)malloc(sizeof(void *)*npages);
+  long n = 0;
+
+  if ( page_array == NULL ) {
+    log_info("page array allocated failed\n");
+    return NULL;
+  }
+
+#if 0
+  /* Map the /dev/zero in order to be detected by KSM */
+  for( n=0 ; n < npages; n++){
+    int i;
+    void *addr=(void *)mmap(0,PAGE_SIZE,PROT_FLAGS,MAP_FLAGS,0,0);
+    if ( addr == MAP_FAILED ){
+      log_info("map failed!\n");
+      for (i=0;i<n;i++)
+	munmap( page_array[i], 0);
+      free(page_array);
+      return NULL;
+    }
+
+    page_array[n] = addr;
+  }
+#endif
+
+ void *addr = (void *)mmap(0,PAGE_SIZE*npages,PROT_FLAGS,MAP_FLAGS,0,0);
+  if (addr == MAP_FAILED){
+    log_info("FAIL: map failed!\n");
+    free(page_array);
+    return NULL;
+  }
+
+  for (n=0;n<npages;n++)
+    page_array[n] = addr+PAGE_SIZE*n;
+
+  zero_pages(page_array,npages);
+
+  return page_array;
+}
+
+/* fill page with random data */
+void random_fill(void **page_array, unsigned long npages)
+{
+  int n = 0;
+  int value = 0;
+  int offset = 0;
+  void *addr = NULL;
+
+  for( n = 0; n < npages; n++){
+    offset = rand() % (intInPage);
+    value = rand();
+    addr = page_array[n] + offset;
+    *((int *)addr) = value;
+  }
+}
+
+
+/*set random series seed*/
+void mrseed(int seed){
+  random_x = seed;
+}
+
+/*Generate random number*/
+int mrand(){
+  random_x  = random_a*random_x+random_c;
+  return random_x & random_mask;
+}
+
+/* Generate randomcode array*/
+int* random_code_array(int nblock)
+{
+  int * randArray = malloc(PAGE_SIZE*nblock);
+  int n = 0;
+  for (;n < nblock;n++){
+    int i = 0;
+    for (;i < intInPage;i++){
+      randArray[n*intInPage+i]=mrand();
+    }
+  }
+  return randArray;
+}
+
+/* fill page with static random series data*/
+void static_random_fill(void **page_array, unsigned long npages,int nblock)
+{
+  mrseed(dynamickey);
+  int* randomArray = random_code_array(nblock);
+  int n = 0;
+  int q = -1;
+  int blocksize = npages/nblock;
+  int offset = 0;
+  void *addr = NULL;
+
+  mrseed(randomArray[0]);
+  for (;n < npages;n++){
+    if (n%(blocksize) == 0) q++;
+    memcpy(page_array[n],&randomArray[q*intInPage],PAGE_SIZE);
+    offset = mrand() % (intInPage);
+    addr = ((int *)page_array[n]) + offset;
+    *((int *)addr) = n;
+  }
+  free(randomArray);
+  return;
+}
+
+/* fill page with static random series data*/
+int static_random_verify(void **page_array, unsigned long npages,int 
nblock)
+{
+  int* p = malloc(PAGE_SIZE);
+  mrseed(dynamickey);
+  int* randomArray = random_code_array(nblock);
+  int n = 0;
+  int q = -1;
+  int blocksize = npages/nblock;
+  int offset = 0;
+  void *addr = NULL;
+  char buf[128];
+
+  int ret = 1;
+
+  mrseed(randomArray[0]);
+  for (;n < npages;n++){
+    if (n%(blocksize) == 0) q++;
+    memcpy(p,&randomArray[q*intInPage],PAGE_SIZE);
+    offset = mrand() % (intInPage);
+    p[offset] = n;
+    addr = ((int*)page_array[n]) + offset;
+    int r = memcmp(p,page_array[n],PAGE_SIZE);
+    if (r != 0){
+      for (r = 0;r < intInPage;r++){
+        addr = ((int *)page_array[n]) + r;
+        if (*((int *)addr) != p[r]){
+          sprintf(buf,"verify failed [0x%p] %d instead of 
%d\n",addr,*((int *)addr),n);
+          log_info(buf);
+          ret = 0;
+        }
+      }
+    }
+  }
+  free(randomArray);
+  free(p);
+  return ret;
+}
+
+
+/* verify value */
+int verify_address_space(void **page_array, unsigned long npages, int 
checkvalue)
+{
+  int m,n;
+  char buf[128];
+  sprintf(buf,"verify value = %d\n",checkvalue);
+  log_info(buf);
+  if ( checkvalue == -1 ){
+    return 1;
+  }
+  for( n = 0; n < npages; n++ ){
+    for ( m = 0; m < PAGE_SIZE ; m++ ){
+      char *address = (char *)(page_array[n]+m);
+      if (*address != checkvalue) {
+	sprintf(buf,"verify failed [0x%p] %d instead of %d\n", address, 
*address, checkvalue);
+	log_info(buf);
+	return 0;
+      }
+    }
+  }
+  return 1;
+}
+
+
+/* Parse command from message*/
+COMMANDS parse_command(const char* data,int size,const char** startOfData)
+{
+  char command[MAX_COMMANDSIZE];
+  memset(command,0,MAX_COMMANDSIZE);
+  COMMANDS retc;
+  int i=0;
+  for(;i < MAX_COMMANDSIZE && data[i] != ':';i++){
+    command[i] = data[i];
+  }
+  *startOfData = &data[i+1];
+
+  if (strcmp(command,"init") == 0){
+    if ((size-i-1) == 7){
+      retc = ninit;
+    }
+  }else if(strcmp(command,"random") == 0){
+    retc = nrandom;
+  }else if(strcmp(command,"srandom") == 0){
+    retc = nsrandom;
+  }else if(strcmp(command,"srverify") == 0){
+    retc = nsrverify;
+  }else if(strcmp(command,"fillzero") == 0){
+    retc = nfillzero;
+  }else if(strcmp(command,"fillvalue") == 0){
+    retc = nfillvalue;
+  }else if(strcmp(command,"verify") == 0){
+      retc = nverify;
+  }else if(strcmp(command,"exit") == 0){
+    retc = nexit;
+  }
+  return retc;
+}
+
+void daemon_loop(void **page_array, unsigned long npages, int socket)
+{
+  COMMANDS com = wrongcommad;
+  char csize[MAX_SIZESIZE+1];  //size max
+  memset(csize,0,MAX_SIZESIZE+1);
+  int end = 0;
+  while(!end){
+
+    /*Data
+    size:xxx:xxx;
+    */
+
+    //Read data size
+    char * data;
+    const char * startOfData = NULL;
+
+    int i = 0;
+    for (;(i <= MAX_SIZESIZE) && (csize[i-1] != ':');i++){
+      recv(socket,&csize[i],1,0);
+    }
+    if (i <= MAX_SIZESIZE) { //data is good
+      int size = atoi(csize)-1;
+      data = malloc(size*sizeof(char)+1);
+      int sz = 0;
+      while (sz < size)
+        sz += recv(socket,data+sz,size-sz,0);
+      if (data[size-1] == ';'){//Decode data
+        com = parse_command(data,size,&startOfData);
+      }
+    }
+
+    char buf[128];
+    switch(com){
+    case nfillzero: /* Zero all pages */
+      log_info("into zero mapped mode\n");
+      zero_pages(page_array, npages);
+      checkvalue = 0;
+      log_info("PASS: zero mapped mode\n");
+      break;
+    case nfillvalue: /* Zero all pages */
+      log_info("fill value statickey\n");
+      checkvalue = statickey;
+      value_to_pages(page_array, npages, checkvalue);
+      sprintf(buf,"PASS: filled by %c\n", statickey);
+      log_info(buf);
+      break;
+    case nrandom: /* Fill all pages with random number */
+      log_info("into random fill mode\n");
+      random_fill(page_array, npages);
+      checkvalue = -1;
+      log_info("PASS: filled by random value\n");
+      break;
+    case nexit: /* Do exit */
+      log_info("PASS: exit\n");
+      end = 1;
+      break;
+    case nverify: /* verify */
+      log_info("veriy value\n");
+
+      if (!verify_address_space(page_array,npages,checkvalue)){
+	sprintf(buf,"value %d verify error\n",checkvalue);
+	log_info(buf);
+	sprintf(buf,"FAIL: verification with checkvalue = %x\n", checkvalue);
+	log_info(buf);
+      }else{
+        sprintf(buf,"PASS: verification with checkvalue = %x\n", 
checkvalue);
+        log_info(buf);
+      }
+      break;
+    case nsrandom:/*Generate static random series*/
+      log_info("fill static random series\n");
+      clock_t starttime = clock();
+      static_random_fill(page_array, npages,BLOCK_COUNT);
+      clock_t endtime = clock();
+      sprintf(buf,"PASS: filling duration = %ld 
ms\n",(long)(1.0*(endtime-starttime))/(CLOCKS_PER_SEC/1000));
+      log_info(buf);
+      break;
+    case nsrverify: /* verify */
+      log_info("veriy value\n");
+
+      if (!static_random_verify(page_array,npages,BLOCK_COUNT)){
+        sprintf(buf,"value %d verify error\n",checkvalue);
+        log_info(buf);
+        log_info("FAIL: random series verification\n");
+      }else{
+        log_info("PASS: random series verification\n");
+      }
+      break;
+    case ninit:/*Parametrs*/
+      memset(buf,0,5);
+      log_info("Init daemon\n");
+      strncpy(buf,&startOfData[0],3);
+      statickey = atoi(buf);
+      strncpy(buf,&startOfData[3],3);
+      dynamickey = atoi(buf);
+      sprintf(buf,"PASS: Static key %d; Dynamic key 
%d\n",statickey,dynamickey);
+      log_info(buf);
+      break;
+    default:
+      log_info("FAIL: Wrong command!\n");
+      exit(EBADMSG);
+      break;
+    }
+    free(data);
+  }
+}
+
+int main(int argc,char *argv[])
+{
+  int n = 0;
+  unsigned long npages = 0;
+  int ret;
+  void **page_array = NULL;
+
+
+  void (*prev_fn)(int);
+
+  prev_fn = signal (SIGPIPE,sigpipe);
+
+
+  if (argc != 3){
+    fprintf(stderr,"Usage %s size(MB) port\n",argv[0]);
+    return -1;
+  }
+
+  port = atoi(argv[2]);
+  // Vytvoříme soket - viz minulý díl
+  if ((mainSocket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP)) == -1)
+  {
+    fprintf(stderr,"Could not create socket!\n");
+    return -1;
+  }
+
+  sockName.sin_family = AF_INET;
+  sockName.sin_port = htons(port);
+  sockName.sin_addr.s_addr = INADDR_ANY;
+
+
+  if (bind(mainSocket, (struct sockaddr *)&sockName, sizeof(sockName)) 
== -1)
+  {
+    fprintf(stderr,"Could not bind socket!\n");
+    return -1;
+  }
+
+  if (listen(mainSocket, 1) == -1)
+  {
+    fprintf(stderr,"Could not listen socket!\n");
+    return -1;
+  }
+
+  unlink(FIFO_FILE);
+  unlink(LOG_FILE);
+  PAGE_SIZE = getpagesize();
+  intInPage = PAGE_SIZE/sizeof(int);
+  long page = atoi(argv[1]);
+  npages = (page * 1024 * 1024)/PAGE_SIZE;
+
+  ret = daemon(0,0);
+  if(ret == -1){
+    log_info("FAIL: failed to run in daemon mode\n");
+    return -1;
+  }
+
+  addrlen = sizeof(clientInfo);
+
+  clientSocket = accept(mainSocket, (struct sockaddr*)&clientInfo, 
&addrlen);
+  int set = 1;
+  setsockopt(clientSocket, SOL_SOCKET, SO_KEEPALIVE, (void *)&set, 
sizeof(int));
+  if (clientSocket == -1)
+  {
+    fprintf(stderr,"Could not connect client\n");
+    return -1;
+  }
+
+  log_info("Initialising zero mapped pages!\n");
+  page_array = map_zero_page(npages);
+  if (page_array == NULL){
+    log_info("FAIL: could not initialise maps\n");
+    return -1;
+  }
+  log_info("PASS: first start\n");
+
+  srand(getpid());
+  daemon_loop(page_array, npages, clientSocket);
+
+
+  log_info("Free page array\n");
+  for(n=0;n<npages;n++){
+    munmap(page_array[n],0);
+  }
+  free(page_array);
+
+  log_info("exit");
+
+  sleep(5);
+
+
+  char ch;
+  while (recv(clientSocket,&ch,1,0) > 0);
+
+  close(clientSocket);
+  close(mainSocket);
+
+  if (prev_fn==SIG_IGN) signal (SIGTERM,SIG_IGN);
+
+  return 0;
+}
+
+
-- 
1.6.2.5


^ permalink raw reply related	[flat|nested] 15+ messages in thread

* Re: [KVM-AUTOTEST PATCH 2/2] Add KSM test
  2009-08-31  9:29 [KVM-AUTOTEST PATCH 0/2] Add KSM test Lukáš Doktor
  2009-08-31  9:35 ` [KVM-AUTOTEST PATCH 1/2] " Lukáš Doktor
@ 2009-08-31  9:48 ` Lukáš Doktor
  2009-09-01  8:50   ` Lukáš Doktor
  2009-09-07 13:00   ` Dor Laor
  2009-09-07 12:37 ` [KVM-AUTOTEST PATCH 0/2] " Uri Lublin
  2 siblings, 2 replies; 15+ messages in thread
From: Lukáš Doktor @ 2009-08-31  9:48 UTC (permalink / raw)
  To: KVM list, Autotest mailing list

This is an actual KSM test.

It allows to test merging resp splitting the pages in serial, parallel 
or both. Also you can specify an overcommit ratio for KSM overcommit 
testing.

We were forced to destroy all previous defined vms and to create them 
inside the test (similar to stress_boot), because we don't know how many 
machines will be required during the vm preparation.

Second nasty thing is filling the memory by the guests. We didn't find 
better way to test filled memory without the python(kvm-autotest) fall. 
This version continue filling until a small reserve than destroy 
previous machines and let the actual machine finish the work.

Signed-off-by: Lukáš Doktor <ldoktor@redhat.com>
Signed-off-by: Jiří Župka <jzupka@redhat.com>
---
  client/tests/kvm/kvm.py               |    2 +
  client/tests/kvm/kvm_tests.cfg.sample |   17 +
  client/tests/kvm/kvm_tests.py         |  548 
+++++++++++++++++++++++++++++++++
  3 files changed, 567 insertions(+), 0 deletions(-)

diff --git a/client/tests/kvm/kvm.py b/client/tests/kvm/kvm.py
index 4930e80..b9839df 100644
--- a/client/tests/kvm/kvm.py
+++ b/client/tests/kvm/kvm.py
@@ -53,6 +53,8 @@ class kvm(test.test):
                  "yum_update":   test_routine("kvm_tests", 
"run_yum_update"),
                  "autotest":     test_routine("kvm_tests", "run_autotest"),
                  "kvm_install":  test_routine("kvm_install", 
"run_kvm_install"),
+                "ksm":
+                                test_routine("kvm_tests", "run_ksm"),
                  "linux_s3":     test_routine("kvm_tests", "run_linux_s3"),
                  "stress_boot":  test_routine("kvm_tests", 
"run_stress_boot"),
                  "timedrift":    test_routine("kvm_tests", 
"run_timedrift"),
diff --git a/client/tests/kvm/kvm_tests.cfg.sample 
b/client/tests/kvm/kvm_tests.cfg.sample
index a83ef9b..f4a41b9 100644
--- a/client/tests/kvm/kvm_tests.cfg.sample
+++ b/client/tests/kvm/kvm_tests.cfg.sample
@@ -100,6 +100,23 @@ variants:
                  test_name = disktest
                  test_control_file = disktest.control

+    - ksm:
+        # Don't preprocess any vms as we need to change it's params
+        vms = ''
+        image_snapshot = yes
+        kill_vm_gracefully = no
+        type = ksm
+        variants:
+            - ratio_3:
+                ksm_ratio = 3
+            - ratio_10:
+                ksm_ratio = 10
+        variants:
+            - serial
+                ksm_test_size = "serial"
+            - paralel
+                ksm_test_size = "paralel"
+
      - linux_s3:     install setup
          type = linux_s3

diff --git a/client/tests/kvm/kvm_tests.py b/client/tests/kvm/kvm_tests.py
index b100269..ada4c6b 100644
--- a/client/tests/kvm/kvm_tests.py
+++ b/client/tests/kvm/kvm_tests.py
@@ -462,6 +462,554 @@ def run_yum_update(test, params, env):

      session.close()

+def run_ksm(test, params, env):
+    """
+    Test how KSM (Kernel Shared Memory) act with more than physical 
memory is
+    used. In second part is also tested, how KVM can handle the situation,
+    when the host runs out of memory (expected is to pause the guest 
system,
+    wait until some process returns the memory and bring the guest back 
to life)
+
+    @param test: kvm test object.
+    @param params: Dictionary with test parameters.
+    @param env: Dictionary with the test wnvironment.
+    """
+    # We are going to create the main VM so we use kvm_preprocess functions
+    # FIXME: not a nice thing
+    import kvm_preprocessing
+    import random
+    import socket
+    import select
+    import math
+
+    class allocator_com:
+        """
+        This class is used for communication with the allocator
+        """
+        def __init__(self, vm, _port, _host='127.0.0.1'):
+            self.vm = vm
+            self.PORT = _port
+            self.HOST = _host
+            self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+            self.isConnect = False
+
+        def __str__(self):
+            return self.vm + ":" + self.HOST + ":" + str(self.PORT)
+
+        def connect(self):
+            print self
+            logging.debug("ALLOC: connect to %s", self.vm)
+            try:
+                self.socket.connect((self.HOST, self.PORT))
+            except:
+                raise error.TestFail("ALLOC: Could not establish the "\
+                                     "communication with %s" % (self.vm))
+            self.isConnect = True
+
+        def isConnected(self):
+            return self.isConnect;
+
+        def readsize(self):
+            read,write,error = 
select.select([self.socket.fileno()],[],[],0.5)
+            size = 0
+            if (self.socket.fileno() in read):
+                data = self.socket.recv(1);
+                size = "";
+                while data[0] != ':':
+                    size = size + data[0]
+                    data = self.socket.recv(1)
+            return int(size)
+
+        def _recv(self):
+            msg = ""
+            read, write, error = select.select([self.socket.fileno()],\
+                                               [], [], 0.5)
+            if (self.socket.fileno() in read):
+                size = self.readsize()
+                msg = self.socket.recv(size)
+                if (len(msg) < size):
+                    raise error.TestFail("ALLOC: Could not recive the 
message")
+
+            logging.debug("ALLOC: output '%s' from %s" % (msg, self.vm))
+            return msg
+
+        def recv(self, wait=1, loops=20):
+            out = ""
+            log = ""
+            while not out.startswith("PASS") and not 
out.startswith("FAIL"):
+                logging.debug("Sleep(%d)" % (wait))
+                time.sleep(wait)
+                log += out
+                out = self._recv()
+
+                if loops == 0:
+                    logging.error(repr(out))
+                    raise error.TestFail("Command wasn't finished until 
DL")
+                loops = loops - 1
+
+            if not out.startswith("PASS"):
+                logging.error("Allocator failed on guest %s\nAttaching 
the"\
+                              "recent log"  % (self.vm))
+                raise error.TestFail(log)
+
+            return out
+
+
+        def send(self, command, data=""):
+            msg = str(len(command) + len(data) + 3)
+            msg += ":" + command + ":" + data + ";"
+            logging.debug("ALLOC: execute %s on %s" %(repr(msg), self.vm))
+            try:
+                self.socket.sendall(msg)
+            except:
+                raise error.TestFail("ALLOC: Could not send the message")
+
+        def disconnect(self):
+            logging.debug("ALLOC: disconnect")
+            self.send("exit")
+            self.recv()
+            time.sleep(5)
+            self.socket.close()
+            self.isConnect = False
+
+    def get_stat(lvms):
+        """
+        Get statistics in format:
+        Host: memfree = XXXM; Guests memsh = {XXX,XXX,...}
+
+        @params lvms: List of VMs
+        """
+        if not isinstance(lvms, list):
+            raise error.TestError("get_stat: parameter have to be 
proper list")
+
+        try:
+            stat = "Host: memfree = "
+            stat += str(int(os.popen("cat /proc/meminfo | grep MemFree")\
+                                     .readline().split()[1]) / 1024) + 
"M; "
+            stat += "swapfree = "
+            stat += str(int(os.popen("cat /proc/meminfo | grep SwapFree")\
+                                     .readline().split()[1]) / 1024) + 
"M; "
+        except:
+            raise error.TestFail("Could not fetch free memory info")
+
+
+        stat += "Guests memsh = {"
+        for vm in lvms:
+            try:
+                cmd = "cat /proc/%d/statm" % vm.pid
+                shm = int(os.popen(cmd).readline().split()[2])
+                # statm stores informations in pages, recalculate to MB
+                shm = shm * 4 / 1024
+                stat += "%dM; " % (shm)
+            except:
+                raise error.TestError("Could not fetch shmem info from 
proc")
+        stat = stat[0:-2] + "}"
+        return stat
+
+
+
+
+
+    logging.info("Starting phase 0: Initialization")
+    # host_reserve: mem reserve keept for the host system to run
+    host_reserve = 256
+    # guest_reserve: mem reserve which is not used by allocator on the 
guests
+    guest_reserve = 256
+    max_alloc = 10
+    max_vms = params.get("max_vms")
+    if max_vms:
+        max_vms = int(max_vms)
+    else:
+        max_vms = 2
+    overcommit = params.get("ksm_overcommit_ratio")
+    if overcommit:
+        overcommit = float(overcommit)
+    else:
+        overcommit = 2.0
+    # vmsc: count of all used VMs
+    vmsc = int(overcommit) + 1
+    vmsc = max(vmsc, max_vms)
+
+    if (params['ksm_test_size'] == "paralel") :
+        host_mem = (int(os.popen("grep MemTotal: /proc/meminfo")\
+                       .readline().split()[1]) / 1024 - host_reserve)
+        vmsc = 1
+        overcommit = 1
+        mem = host_mem
+        # 32bit system adjustment
+        if not params['image_name'].endswith("64"):
+            logging.debug("Probably i386 guest architecture, "\
+                          "max allocator mem = 2G")
+            # Guest can have more than 2G but kvm mem + 1MB (allocator 
itself) can't
+            if (host_mem > 2048):
+                mem = 2047
+
+
+        if os.popen("uname -i").readline().startswith("i386"):
+            logging.debug("Host is i386 architecture, max guest mem is 2G")
+            # Guest system with qemu overhead (64M) can't have more than 2G
+            if mem > 2048 - 64:
+                mem = 2048 - 64
+
+    else:
+        host_mem = (int(os.popen("grep MemTotal: /proc/meminfo")\
+                       .readline().split()[1]) / 1024 - host_reserve)
+        # mem: Memory of the guest systems. Maximum must be less than 
amount of the
+        # host's physical ram
+        mem = int(overcommit * host_mem / vmsc)
+
+        # 32bit system adjustment
+        if not params['image_name'].endswith("64"):
+            logging.debug("Probably i386 guest architecture, "\
+                          "max allocator mem = 2G")
+            # Guest can have more than 2G but kvm mem + 1MB (allocator 
itself) can't
+            if mem-guest_reserve-1 > 2048:
+                vmsc = 
int(math.ceil((host_mem*overcommit)/(2048.0+guest_reserve)))
+                mem = int(math.floor(host_mem*overcommit/vmsc))
+
+        if os.popen("uname -i").readline().startswith("i386"):
+            logging.debug("Host is i386 architecture, max guest mem is 2G")
+            # Guest system with qemu overhead (64M) can't have more than 2G
+            if mem > 2048 - 64:
+                vmsc = int(math.ceil((host_mem*overcommit)/(2048 - 64.0)))
+                mem = int(math.floor(host_mem*overcommit/vmsc))
+
+
+    logging.info("overcommit = %f" % (overcommit))
+    logging.info("true overcommit = %f " % (float(vmsc*mem) / 
float(host_mem)))
+    logging.info("host mem = %dM" % (host_mem))
+    logging.info("mem = %dM" % (mem))
+    logging.info("swap = %dM" %\
+                 (int(os.popen("cat /proc/meminfo | grep SwapTotal")\
+                              .readline().split()[1]) / 1024))
+    logging.info("max_vms = %d" % (max_vms))
+    logging.info("vmsc = %d" % (vmsc))
+
+    # Generate unique keys for random series
+    skeys = []
+    dkeys = []
+    for i in range(0, max(vmsc, max_alloc)):
+        key = "%03s" % (random.randrange(0,999))
+        while key in skeys:
+            key = "%03s" % (random.randrange(0,999))
+        skeys.append(key)
+
+        key = "%03s" % (random.randrange(0,999))
+        while key in dkeys:
+            key = "%03s" % (random.randrange(0,999))
+        dkeys.append(key)
+
+    lvms = []
+    lsessions = []
+    lallocators = []
+    alloc_port = 31284
+
+    # As we don't know the number and memory amount of VMs in advance, 
we need
+    # to specify and create them here (FIXME: not a nice thing)
+    params['mem'] = mem
+    params['vms'] = params.get("main_vm")
+    # ksm_size: amount of memory used by allocator
+    ksm_size = mem - guest_reserve
+    logging.info("ksm_size = %dM" % (ksm_size))
+
+
+    params['redirs'] += ' alloc0'
+    params['guest_port_alloc0'] = str(alloc_port)
+
+    if (params['ksm_test_size'] == "paralel") :
+        for j in range(1, max_alloc):
+            params['redirs'] += ' alloc' + str(j)
+            params['guest_port_alloc' + str(j)] = str(alloc_port + j)
+
+    # Creating of the first guest
+    kvm_preprocessing.preprocess_vm(test, params, env, params['vms'])
+    lvms.append(kvm_utils.env_get_vm(env, params.get("main_vm")))
+    if not lvms[0]:
+        raise error.TestError("VM object not found in environment")
+    if not lvms[0].is_alive():
+        raise error.TestError("VM seems to be dead; Test requires a 
living VM")
+
+    logging.info("Booting the first guest %s" % lvms[0].name)
+
+    lsessions.append(kvm_utils.wait_for(lvms[0].ssh_login, 360, 0, 2))
+    if not lsessions[0]:
+        raise error.TestFail("Could not log into first guest")
+
+
+    lallocators.append(allocator_com(lvms[0].name, 
lvms[0].redirs[alloc_port]))
+    if not lallocators[0]:
+        raise error.TestFail("Could not create allocator_com class for 
vm1")
+
+
+
+    # Creating of other guest systems
+    for i in range(1, vmsc):
+        vm_name = "vm" + str(i + 1)
+        # Last VM is later used to run more allocators simultaneously
+        """for j in range(1, max_alloc):
+            params['redirs'] += ' alloc' + str(j)
+            params['guest_port_alloc' + str(j)] = str(alloc_port + j)"""
+
+        lvms.append(lvms[0].clone(vm_name, params))
+        kvm_utils.env_register_vm(env, vm_name, lvms[i])
+        params['vms'] += " " + vm_name
+
+        logging.info("Booting guest %s" % lvms[i].name)
+        if not lvms[i].create():
+            raise error.TestFail("Cannot create VM %s" % lvms[i].name)
+        if not lvms[i].is_alive():
+            raise error.TestError("VM %s seems to be dead; Test 
requires a"\
+                                  "living VM" % lvms[i].name)
+
+        lsessions.append(kvm_utils.wait_for(lvms[i].ssh_login, 360, 0, 2))
+        if not lsessions[i]:
+            raise error.TestFail("Could not log into guest %s" % 
lvms[i].name)
+
+        lallocators.append(allocator_com(lvms[i].name,\
+                                         lvms[i].redirs[alloc_port]))
+        if not lallocators[i]:
+            raise error.TestFail("Could not create allocator_com class 
for %s"\
+                                 % (lvms[i].name))
+
+
+    # Let systems take a rest :-)
+    time.sleep(vmsc * 2)
+    logging.info(get_stat(lvms))
+
+    # Copy the allocator.c into guests
+    pwd = os.path.join(os.environ['AUTODIR'],'tests/kvm')
+    vksmd_src = os.path.join(pwd, "allocator.c")
+    dst_dir = "/tmp"
+    for vm in lvms:
+        if not vm.scp_to_remote(vksmd_src, dst_dir):
+            raise error.TestFail("Remote scp failed %s" % (vm.name))
+    logging.info("Phase 0 => passed")
+
+    def phase_1():
+        """ Inicialize virtual machine """
+        logging.info("Starting phase 1: filling with 0")
+        logging.info("Preparing the guests and fill in pages by zero")
+        for session in lsessions:
+            vm = lvms[lsessions.index(session)]
+            allocator = lallocators[lsessions.index(session)]
+            # Build the test suite
+            ret = session.get_command_status("gcc -o /tmp/allocator "\
+                                             "/tmp/allocator.c",\
+                                             timeout=300)
+            if ret == None or ret:
+                raise error.TestFail("Failed to build vksmd in the %s"\
+                                     % (vm.name))
+
+            # Start the daemon
+            ret = session.get_command_status("/tmp/allocator %d %d" % 
(ksm_size,\
+ 
alloc_port))
+            if ret == None:
+                raise error.TestFail("Could not run vksmd in guest %s"\
+                                     % (vm.name))
+            if ret:
+                raise error.TestFail("Could not run vksmd in %s errno: %d"\
+                                      % (vm.name, ret))
+
+            ret = session.get_command_status("iptables -F;"\
+                                             "iptables -P INPUT ACCEPT;")
+
+            allocator.connect()
+            allocator.recv((ksm_size / 200), 100)
+
+            # Let kksmd works (until shared mem rich expected value)
+            shm = 0
+            i = 0
+            cmd = "cat /proc/%d/statm" % vm.pid
+            while shm < ksm_size:
+                if i > 64:
+                    logging.info(get_stat(lvms))
+                    raise error.TestError("SHM didn't merged the memory 
until "\
+					  "the DL")
+                logging.debug("Sleep(%d)" % (ksm_size / 200))
+                time.sleep(ksm_size / 200)
+                try:
+                    shm = int(os.popen(cmd).readline().split()[2])
+                    shm = shm * 4 / 1024
+                    i = i + 1
+                except:
+                    raise error.TestError("Could not fetch shmem info 
from "
+					  "the /proc")
+
+        # Keep some reserve
+        time.sleep(ksm_size / 200)
+
+        # Set allocator keys
+        for i in range(0, vmsc):
+            lallocators[i].send("init", "%s%s" % (skeys[i], dkeys[i]))
+            lallocators[i].recv(1, 10)
+        logging.info(get_stat(lvms))
+        logging.info("Phase 1 => passed")
+
+    def phase_2():
+        """ Separate first guest memory by generate a special random 
series """
+        logging.info("Starting phase 2: Split the pages on the first 
guest")
+
+        lallocators[0].send("srandom")
+        out = lallocators[0].recv(ksm_size / 500, 50)
+        out = int(out.split()[4])
+        logging.info("PERFORMANCE: %dMB * 1000 / %dms = %dMB/s"\
+                     % (ksm_size, out, (ksm_size * 1000 / out)))
+        logging.info(get_stat(lvms))
+        logging.info("Phase 2 => passed")
+
+    def phase_3():
+        """ Sequentional split of pages on guests up to memory limit """
+        logging.info("Starting phase 3a: Sequentional split of pages on 
guests up "\
+                     "to memory limit")
+        last_vm = 0
+        for i in range(1, vmsc):
+            vm = lvms[i]
+            session = lsessions[i]
+            allocator = lallocators[i]
+
+            allocator.send("srandom")
+            out = ""
+            while not out.startswith("PASS") and not 
out.startswith("FAIL"):
+                free_mem = int(os.popen("grep MemFree /proc/meminfo")\
+                             .readline().split()[1])
+                logging.debug("FreeMem = %d" % (free_mem))
+                # We need to keep some memory for python to run.
+                if free_mem < 32000:
+                    logging.debug("Only %s free memory, killing 0 - %d 
hosts"\
+                                  % (free_mem, (i-1)))
+                    for j in range(0, i):
+                        lvms[j].destroy(gracefully = False)
+                    last_vm = i
+                    break
+                out = allocator._recv()
+            if last_vm != 0:
+                break
+
+        allocator.recv(mem / 500, 50)
+        logging.info("Memory filled by the guest %s" % (vm.name))
+        logging.info("Phase 3a => passed")
+
+        """ Check if memory in max loading guest is allright"""
+        logging.info("Starting phase 3b")
+        allocator.send("srverify")
+        allocator.recv(mem / 200, 50)
+        allocator.disconnect()
+        # We are going to use the last VM later
+        if i != (vmsc):
+            session.close()
+            vm.destroy(gracefully = False)
+            for i in range(last_vm + 1, vmsc):
+                lallocators[i].send("verify")
+                lallocators[i].recv(mem / 200, 50)
+                lallocators[i].disconnect()
+                # We are going to use the last VM later
+                if i != (vmsc - 1):
+                    lsessions[i].close()
+                    lvms[i].destroy(gracefully = False)
+        logging.info(get_stat([lvms[i]]))
+        logging.info("Phase 3b => passed")
+
+    def phase_4():
+        """ Paralel page spliting """
+        logging.info("Phase 4: Paralel page spliting")
+        # We have to wait until allocator is finished (it waits 5 
seconds to clean
+        # the socket
+
+        session = lsessions[0]
+        vm = lvms[0]
+
+        ret = session.get_command_status("gcc -o /tmp/allocator "\
+                                             "/tmp/allocator.c",\
+                                             timeout=300)
+        if ret == None or ret:
+            raise error.TestFail("Failed to build vksmd in the %s"\
+                                  % (vm.name))
+
+        for all in lallocators:
+            if all.isConnected():
+                all.disconnect()
+
+        del lallocators[:]
+        ret = session.get_command_status("iptables -F;"\
+                                    "iptables -P INPUT ACCEPT;")
+
+        for i in range(0, max_alloc):
+            ret = session.get_command_status("/tmp/allocator %d %d"
+                                   % (ksm_size / max_alloc, alloc_port 
+ i))
+            if ret == None:
+                raise error.TestFail("Could not run vksmd in guest %s"\
+                                     % (vm.name))
+            if ret:
+                raise error.TestFail("Could not run allocator in %s 
errno: %d"\
+                                     % (vm.name, ret))
+
+            lallocators.append(allocator_com(vm.name,\
+                                             vm.redirs[alloc_port + i]))
+            if not lallocators[i]:
+                raise error.TestFail("Could not create allocator_com 
class for"\
+				     " %s" % (vm.name))
+
+        logging.info("Phase 4a: Simultaneous merging")
+        for i in range(0, max_alloc):
+            lallocators[i].connect()
+
+        for i in range(0, max_alloc):
+            lallocators[i].recv((ksm_size / 200), 100)
+        # Wait until kksmd merges the pages (3 x ksm_size / 3)
+        shm = 0
+        i = 0
+        cmd = "cat /proc/%d/statm" % vm.pid
+        while shm < ksm_size:
+            if i > 64:
+                logging.info(get_stat(lvms))
+                raise error.TestError("SHM didn't merged the memory 
until DL")
+            logging.debug("Sleep(%d)" % (ksm_size / 200))
+            time.sleep(ksm_size / 200)
+            try:
+                shm = int(os.popen(cmd).readline().split()[2])
+                shm = shm * 4 / 1024
+            except:
+                raise error.TestError("Could not fetch shmem info from 
proc")
+        logging.info(get_stat([vm]))
+
+
+        logging.info("Phases 4b: Simultaneous spliting")
+        # Set keys
+        for i in range(0, max_alloc):
+            lallocators[i].send("init", "%s%s" % (skeys[i], dkeys[i]))
+            lallocators[i].recv(1, 10)
+
+        # Actual splitting
+        for i in range(0, max_alloc):
+            lallocators[i].send("srandom")
+
+        for i in range(0, max_alloc):
+            out = lallocators[i].recv(ksm_size / 500, 50)
+            out = int(out.split()[4])
+            logging.info("PERFORMANCE: %dMB * 1000 / %dms = %dMB/s"\
+                         % (ksm_size, out, (ksm_size * 1000 / out / 
max_alloc)))
+        logging.info(get_stat([vm]))
+
+        logging.info("Phase 4c: Simultaneous verification")
+        for i in range(0, max_alloc):
+            lallocators[i].send("srverify")
+        for i in range(0, max_alloc):
+            lallocators[i].recv(mem / 200, 50)
+        logging.info(get_stat([vm]))
+
+        logging.info("Phase 4 => passed")
+        # Clean-up
+        for i in range(0, max_alloc):
+            lallocators[i].disconnect()
+        session.close()
+        vm.destroy(gracefully = False)
+
+    if params['ksm_test_size'] == "paralel":
+        phase_4()
+    elif params['ksm_test_size'] == "serial":
+        phase_1()
+        phase_2()
+        phase_3()
+

  def run_linux_s3(test, params, env):
      """
-- 
1.6.2.5



^ permalink raw reply related	[flat|nested] 15+ messages in thread

* Re: [KVM-AUTOTEST PATCH 1/2] Add KSM test
  2009-08-31  9:35 ` [KVM-AUTOTEST PATCH 1/2] " Lukáš Doktor
@ 2009-09-01  8:49   ` Lukáš Doktor
  2009-09-07 12:03   ` Dor Laor
  1 sibling, 0 replies; 15+ messages in thread
From: Lukáš Doktor @ 2009-09-01  8:49 UTC (permalink / raw)
  To: KVM list, Autotest mailing list

[-- Attachment #1: Type: text/plain, Size: 86 bytes --]

I'm sorry but thunderbird apparently crippled the path. Resending as the 
attachment.

[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #2: 0001-Allocator-is-a-program-which-allocates-pages-in-the.patch --]
[-- Type: text/x-patch; name="0001-Allocator-is-a-program-which-allocates-pages-in-the.patch", Size: 13544 bytes --]

diff --git a/client/tests/kvm/allocator.c b/client/tests/kvm/allocator.c
new file mode 100644
index 0000000..89e8ce4
--- /dev/null
+++ b/client/tests/kvm/allocator.c
@@ -0,0 +1,571 @@
+/*
+ * KSM test program.
+ * Copyright(C) 2009 Redhat
+ * Jason Wang (jasowang@redhat.com)
+ */
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <string.h>
+#include <errno.h>
+#include <syscall.h>
+#include <time.h>
+#include <stdint.h>
+//socket linux
+#include <sys/socket.h>
+#include <arpa/inet.h>
+#include <netinet/in.h>
+#include <signal.h>
+//TODO: socket windows
+
+
+
+#define PS (4096)
+long PAGE_SIZE = PS;
+long intInPage = PS/sizeof(int);
+#define MAP_FLAGS ( MAP_ANON | MAP_SHARED )
+#define PROT_FLAGS ( PROT_WRITE )
+#define FILE_MODE ( O_RDWR | O_CREAT )
+#define LOG_FILE "/var/log/vksmd"
+#define FIFO_FILE "/tmp/vksmd"
+#define MODE 0666
+#define FILE_BASE "/tmp/ksm_file"
+#define MAX_SIZESIZE 6
+#define MAX_COMMANDSIZE 50
+#define BLOCK_COUNT 8
+
+int log_fd = -1;
+int base_fd = -1;
+int checkvalue = 0;
+
+
+//Socket
+struct sockaddr_in sockName;
+struct sockaddr_in clientInfo;
+int mainSocket,clientSocket;
+int port;
+
+socklen_t addrlen;
+
+
+
+
+const uint32_t random_mask = UINT32_MAX>>1;
+uint32_t random_x = 0;
+const uint32_t random_a = 1103515245;
+const uint32_t random_m = 2^32;
+const uint32_t random_c = 12345;
+
+int statickey = 0;
+int dynamickey = 0;
+
+typedef enum _COMMANDS
+{
+  wrongcommad,
+  ninit,
+  nrandom,
+  nexit,
+  nsrandom,
+  nsrverify,
+  nfillzero,
+  nfillvalue,
+  ndfill,
+  nverify
+} COMMANDS;
+
+void sigpipe (int param)
+{
+  fprintf(stderr,"write error\n");
+  //exit(-1); //uncomment end if network connetion is down
+}
+
+int writefull(int socket,char * data,int size){
+  int sz = 0;
+  while (sz < size)
+    sz += write(socket, data+sz, size-sz);
+  return sz;
+}
+
+
+int write_message(int s,char * message){
+  size_t len = strlen(message);
+  char buf[10];
+  sprintf(buf,"%d:",(unsigned int)len);
+  size_t size = strlen(buf);
+
+  struct timeval tv;
+  fd_set writeset;
+  fd_set errorset;
+  FD_ZERO(&writeset);
+  FD_ZERO(&errorset);
+  FD_SET(clientSocket, &writeset);
+  FD_SET(clientSocket, &errorset);
+  tv.tv_sec = 0;
+  tv.tv_usec = 100;
+  int max = s+1;
+  tv.tv_sec = 10;
+  tv.tv_usec = 0;
+  int ret = select(max, NULL, &writeset, NULL, &tv);
+  if (ret == -1)
+  {
+    return -1;
+  }
+  if (ret == 0)
+  {
+    return -1;
+  }
+  if (FD_ISSET(s, &writeset))
+  {
+    if (writefull(s, buf, size) != size){
+      return -1;
+    }
+    if (writefull(s, message, len) != len){
+      return -1;
+    }
+  }
+  return 0;
+}
+
+void log_info(char *str)
+{
+  if (write_message(clientSocket, str) != 0){
+    fprintf(stderr,"write error\n");
+  }
+}
+
+/* fill pages with zero */
+void zero_pages(void **page_array,int npages)
+{
+  int n = 0;
+  for(n=0;n<npages;n++)
+    memset(page_array[n],0,intInPage);
+}
+
+/* fill pages with zero */
+void value_to_pages(void **page_array,int npages,char value)
+{
+  int n = 0;
+  for(n=0;n<npages;n++)
+    memset(page_array[n],value,PAGE_SIZE/sizeof(char));
+}
+
+/* initialise page_array */
+void **map_zero_page(unsigned long npages)
+{
+  void **page_array=(void **)malloc(sizeof(void *)*npages);
+  long n = 0;
+
+  if ( page_array == NULL ) {
+    log_info("page array allocated failed\n");
+    return NULL;
+  }
+
+#if 0
+  /* Map the /dev/zero in order to be detected by KSM */
+  for( n=0 ; n < npages; n++){
+    int i;
+    void *addr=(void *)mmap(0,PAGE_SIZE,PROT_FLAGS,MAP_FLAGS,0,0);
+    if ( addr == MAP_FAILED ){
+      log_info("map failed!\n");
+      for (i=0;i<n;i++)
+	munmap( page_array[i], 0);
+      free(page_array);
+      return NULL;
+    }
+
+    page_array[n] = addr;
+  }
+#endif
+
+ void *addr = (void *)mmap(0,PAGE_SIZE*npages,PROT_FLAGS,MAP_FLAGS,0,0);
+  if (addr == MAP_FAILED){
+    log_info("FAIL: map failed!\n");
+    free(page_array);
+    return NULL;
+  }
+
+  for (n=0;n<npages;n++)
+    page_array[n] = addr+PAGE_SIZE*n;
+
+  zero_pages(page_array,npages);
+
+  return page_array;
+}
+
+/* fill page with random data */
+void random_fill(void **page_array, unsigned long npages)
+{
+  int n = 0;
+  int value = 0;
+  int offset = 0;
+  void *addr = NULL;
+
+  for( n = 0; n < npages; n++){
+    offset = rand() % (intInPage);
+    value = rand();
+    addr = page_array[n] + offset;
+    *((int *)addr) = value;
+  }
+}
+
+
+/*set random series seed*/
+void mrseed(int seed){
+  random_x = seed;
+}
+
+/*Generate random number*/
+int mrand(){
+  random_x  = random_a*random_x+random_c;
+  return random_x & random_mask;
+}
+
+/* Generate randomcode array*/
+int* random_code_array(int nblock)
+{
+  int * randArray = malloc(PAGE_SIZE*nblock);
+  int n = 0;
+  for (;n < nblock;n++){
+    int i = 0;
+    for (;i < intInPage;i++){
+      randArray[n*intInPage+i]=mrand();
+    }
+  }
+  return randArray;
+}
+
+/* fill page with static random series data*/
+void static_random_fill(void **page_array, unsigned long npages,int nblock)
+{
+  mrseed(dynamickey);
+  int* randomArray = random_code_array(nblock);
+  int n = 0;
+  int q = -1;
+  int blocksize = npages/nblock;
+  int offset = 0;
+  void *addr = NULL;
+
+  mrseed(randomArray[0]);
+  for (;n < npages;n++){
+    if (n%(blocksize) == 0) q++;
+    memcpy(page_array[n],&randomArray[q*intInPage],PAGE_SIZE);
+    offset = mrand() % (intInPage);
+    addr = ((int *)page_array[n]) + offset;
+    *((int *)addr) = n;
+  }
+  free(randomArray);
+  return;
+}
+
+/* fill page with static random series data*/
+int static_random_verify(void **page_array, unsigned long npages,int nblock)
+{
+  int* p = malloc(PAGE_SIZE);
+  mrseed(dynamickey);
+  int* randomArray = random_code_array(nblock);
+  int n = 0;
+  int q = -1;
+  int blocksize = npages/nblock;
+  int offset = 0;
+  void *addr = NULL;
+  char buf[128];
+
+  int ret = 1;
+
+  mrseed(randomArray[0]);
+  for (;n < npages;n++){
+    if (n%(blocksize) == 0) q++;
+    memcpy(p,&randomArray[q*intInPage],PAGE_SIZE);
+    offset = mrand() % (intInPage);
+    p[offset] = n;
+    addr = ((int*)page_array[n]) + offset;
+    int r = memcmp(p,page_array[n],PAGE_SIZE);
+    if (r != 0){
+      for (r = 0;r < intInPage;r++){
+        addr = ((int *)page_array[n]) + r;
+        if (*((int *)addr) != p[r]){
+          sprintf(buf,"verify failed [0x%p] %d instead of %d\n",addr,*((int *)addr),n);
+          log_info(buf);
+          ret = 0;
+        }
+      }
+    }
+  }
+  free(randomArray);
+  free(p);
+  return ret;
+}
+
+
+/* verify value */
+int verify_address_space(void **page_array, unsigned long npages, int checkvalue)
+{
+  int m,n;
+  char buf[128];
+  sprintf(buf,"verify value = %d\n",checkvalue);
+  log_info(buf);
+  if ( checkvalue == -1 ){
+    return 1;
+  }
+  for( n = 0; n < npages; n++ ){
+    for ( m = 0; m < PAGE_SIZE ; m++ ){
+      char *address = (char *)(page_array[n]+m);
+      if (*address != checkvalue) {
+	sprintf(buf,"verify failed [0x%p] %d instead of %d\n", address, *address, checkvalue);
+	log_info(buf);
+	return 0;
+      }
+    }
+  }
+  return 1;
+}
+
+
+/* Parse command from message*/
+COMMANDS parse_command(const char* data,int size,const char** startOfData)
+{
+  char command[MAX_COMMANDSIZE];
+  memset(command,0,MAX_COMMANDSIZE);
+  COMMANDS retc;
+  int i=0;
+  for(;i < MAX_COMMANDSIZE && data[i] != ':';i++){
+    command[i] = data[i];
+  }
+  *startOfData = &data[i+1];
+
+  if (strcmp(command,"init") == 0){
+    if ((size-i-1) == 7){
+      retc = ninit;
+    }
+  }else if(strcmp(command,"random") == 0){
+    retc = nrandom;
+  }else if(strcmp(command,"srandom") == 0){
+    retc = nsrandom;
+  }else if(strcmp(command,"srverify") == 0){
+    retc = nsrverify;
+  }else if(strcmp(command,"fillzero") == 0){
+    retc = nfillzero;
+  }else if(strcmp(command,"fillvalue") == 0){
+    retc = nfillvalue;
+  }else if(strcmp(command,"verify") == 0){
+      retc = nverify;
+  }else if(strcmp(command,"exit") == 0){
+    retc = nexit;
+  }
+  return retc;
+}
+
+void daemon_loop(void **page_array, unsigned long npages, int socket)
+{
+  COMMANDS com = wrongcommad;
+  char csize[MAX_SIZESIZE+1];  //size max
+  memset(csize,0,MAX_SIZESIZE+1);
+  int end = 0;
+  while(!end){
+
+    /*Data
+    size:xxx:xxx;
+    */
+
+    //Read data size
+    char * data;
+    const char * startOfData = NULL;
+
+    int i = 0;
+    for (;(i <= MAX_SIZESIZE) && (csize[i-1] != ':');i++){
+      recv(socket,&csize[i],1,0);
+    }
+    if (i <= MAX_SIZESIZE) { //data is good
+      int size = atoi(csize)-1;
+      data = malloc(size*sizeof(char)+1);
+      int sz = 0;
+      while (sz < size)
+        sz += recv(socket,data+sz,size-sz,0);
+      if (data[size-1] == ';'){//Decode data
+        com = parse_command(data,size,&startOfData);
+      }
+    }
+
+    char buf[128];
+    switch(com){
+    case nfillzero: /* Zero all pages */
+      log_info("into zero mapped mode\n");
+      zero_pages(page_array, npages);
+      checkvalue = 0;
+      log_info("PASS: zero mapped mode\n");
+      break;
+    case nfillvalue: /* Zero all pages */
+      log_info("fill value statickey\n");
+      checkvalue = statickey;
+      value_to_pages(page_array, npages, checkvalue);
+      sprintf(buf,"PASS: filled by %c\n", statickey);
+      log_info(buf);
+      break;
+    case nrandom: /* Fill all pages with random number */
+      log_info("into random fill mode\n");
+      random_fill(page_array, npages);
+      checkvalue = -1;
+      log_info("PASS: filled by random value\n");
+      break;
+    case nexit: /* Do exit */
+      log_info("PASS: exit\n");
+      end = 1;
+      break;
+    case nverify: /* verify */
+      log_info("veriy value\n");
+
+      if (!verify_address_space(page_array,npages,checkvalue)){
+	sprintf(buf,"value %d verify error\n",checkvalue);
+	log_info(buf);
+	sprintf(buf,"FAIL: verification with checkvalue = %x\n", checkvalue);
+	log_info(buf);
+      }else{
+        sprintf(buf,"PASS: verification with checkvalue = %x\n", checkvalue);
+        log_info(buf);
+      }
+      break;
+    case nsrandom:/*Generate static random series*/
+      log_info("fill static random series\n");
+      clock_t starttime = clock();
+      static_random_fill(page_array, npages,BLOCK_COUNT);
+      clock_t endtime = clock();
+      sprintf(buf,"PASS: filling duration = %ld ms\n",(long)(1.0*(endtime-starttime))/(CLOCKS_PER_SEC/1000));
+      log_info(buf);
+      break;
+    case nsrverify: /* verify */
+      log_info("veriy value\n");
+
+      if (!static_random_verify(page_array,npages,BLOCK_COUNT)){
+        sprintf(buf,"value %d verify error\n",checkvalue);
+        log_info(buf);
+        log_info("FAIL: random series verification\n");
+      }else{
+        log_info("PASS: random series verification\n");
+      }
+      break;
+    case ninit:/*Parametrs*/
+      memset(buf,0,5);
+      log_info("Init daemon\n");
+      strncpy(buf,&startOfData[0],3);
+      statickey = atoi(buf);
+      strncpy(buf,&startOfData[3],3);
+      dynamickey = atoi(buf);
+      sprintf(buf,"PASS: Static key %d; Dynamic key %d\n",statickey,dynamickey);
+      log_info(buf);
+      break;
+    default:
+      log_info("FAIL: Wrong command!\n");
+      exit(EBADMSG);
+      break;
+    }
+    free(data);
+  }
+}
+
+int main(int argc,char *argv[])
+{
+  int n = 0;
+  unsigned long npages = 0;
+  int ret;
+  void **page_array = NULL;
+
+
+  void (*prev_fn)(int);
+
+  prev_fn = signal (SIGPIPE,sigpipe);
+
+
+  if (argc != 3){
+    fprintf(stderr,"Usage %s size(MB) port\n",argv[0]);
+    return -1;
+  }
+
+  port = atoi(argv[2]);
+  // Vytvoříme soket - viz minulý díl
+  if ((mainSocket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP)) == -1)
+  {
+    fprintf(stderr,"Could not create socket!\n");
+    return -1;
+  }
+
+  sockName.sin_family = AF_INET;
+  sockName.sin_port = htons(port);
+  sockName.sin_addr.s_addr = INADDR_ANY;
+
+
+  if (bind(mainSocket, (struct sockaddr *)&sockName, sizeof(sockName)) == -1)
+  {
+    fprintf(stderr,"Could not bind socket!\n");
+    return -1;
+  }
+
+  if (listen(mainSocket, 1) == -1)
+  {
+    fprintf(stderr,"Could not listen socket!\n");
+    return -1;
+  }
+
+  unlink(FIFO_FILE);
+  unlink(LOG_FILE);
+  PAGE_SIZE = getpagesize();
+  intInPage = PAGE_SIZE/sizeof(int);
+  long page = atoi(argv[1]);
+  npages = (page * 1024 * 1024)/PAGE_SIZE;
+
+  ret = daemon(0,0);
+  if(ret == -1){
+    log_info("FAIL: failed to run in daemon mode\n");
+    return -1;
+  }
+
+  addrlen = sizeof(clientInfo);
+
+  clientSocket = accept(mainSocket, (struct sockaddr*)&clientInfo, &addrlen);
+  int set = 1;
+  setsockopt(clientSocket, SOL_SOCKET, SO_KEEPALIVE, (void *)&set, sizeof(int));
+  if (clientSocket == -1)
+  {
+    fprintf(stderr,"Could not connect client\n");
+    return -1;
+  }
+
+  log_info("Initialising zero mapped pages!\n");
+  page_array = map_zero_page(npages);
+  if (page_array == NULL){
+    log_info("FAIL: could not initialise maps\n");
+    return -1;
+  }
+  log_info("PASS: first start\n");
+
+  srand(getpid());
+  daemon_loop(page_array, npages, clientSocket);
+
+
+  log_info("Free page array\n");
+  for(n=0;n<npages;n++){
+    munmap(page_array[n],0);
+  }
+  free(page_array);
+
+  log_info("exit");
+
+  sleep(5);
+
+
+  char ch;
+  while (recv(clientSocket,&ch,1,0) > 0);
+
+  close(clientSocket);
+  close(mainSocket);
+
+  if (prev_fn==SIG_IGN) signal (SIGTERM,SIG_IGN);
+
+  return 0;
+}
+
+

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* Re: [KVM-AUTOTEST PATCH 2/2] Add KSM test
  2009-08-31  9:48 ` [KVM-AUTOTEST PATCH 2/2] " Lukáš Doktor
@ 2009-09-01  8:50   ` Lukáš Doktor
  2009-09-07 13:00   ` Dor Laor
  1 sibling, 0 replies; 15+ messages in thread
From: Lukáš Doktor @ 2009-09-01  8:50 UTC (permalink / raw)
  To: KVM list, Autotest mailing list

[-- Attachment #1: Type: text/plain, Size: 86 bytes --]

I'm sorry but thunderbird apparently crippled the path. Resending as the 
attachment.

[-- Attachment #2: 0002--Add-ksm-test.patch --]
[-- Type: text/x-patch, Size: 23749 bytes --]

diff --git a/client/tests/kvm/kvm.py b/client/tests/kvm/kvm.py
index 4930e80..b9839df 100644
--- a/client/tests/kvm/kvm.py
+++ b/client/tests/kvm/kvm.py
@@ -53,6 +53,8 @@ class kvm(test.test):
                 "yum_update":   test_routine("kvm_tests", "run_yum_update"),
                 "autotest":     test_routine("kvm_tests", "run_autotest"),
                 "kvm_install":  test_routine("kvm_install", "run_kvm_install"),
+                "ksm":
+                                test_routine("kvm_tests", "run_ksm"),
                 "linux_s3":     test_routine("kvm_tests", "run_linux_s3"),
                 "stress_boot":  test_routine("kvm_tests", "run_stress_boot"),
                 "timedrift":    test_routine("kvm_tests", "run_timedrift"),
diff --git a/client/tests/kvm/kvm_tests.cfg.sample b/client/tests/kvm/kvm_tests.cfg.sample
index a83ef9b..f4a41b9 100644
--- a/client/tests/kvm/kvm_tests.cfg.sample
+++ b/client/tests/kvm/kvm_tests.cfg.sample
@@ -100,6 +100,23 @@ variants:
                 test_name = disktest
                 test_control_file = disktest.control
 
+    - ksm:
+        # Don't preprocess any vms as we need to change it's params
+        vms = ''
+        image_snapshot = yes
+        kill_vm_gracefully = no
+        type = ksm
+        variants:
+            - ratio_3:
+                ksm_ratio = 3
+            - ratio_10:
+                ksm_ratio = 10
+        variants:
+            - serial 
+                ksm_test_size = "serial"
+            - paralel
+                ksm_test_size = "paralel"
+
     - linux_s3:     install setup
         type = linux_s3
 
diff --git a/client/tests/kvm/kvm_tests.py b/client/tests/kvm/kvm_tests.py
index b100269..ada4c6b 100644
--- a/client/tests/kvm/kvm_tests.py
+++ b/client/tests/kvm/kvm_tests.py
@@ -462,6 +462,554 @@ def run_yum_update(test, params, env):
 
     session.close()
 
+def run_ksm(test, params, env):
+    """
+    Test how KSM (Kernel Shared Memory) act with more than physical memory is
+    used. In second part is also tested, how KVM can handle the situation,
+    when the host runs out of memory (expected is to pause the guest system,
+    wait until some process returns the memory and bring the guest back to life)
+
+    @param test: kvm test object.
+    @param params: Dictionary with test parameters.
+    @param env: Dictionary with the test wnvironment.
+    """
+    # We are going to create the main VM so we use kvm_preprocess functions
+    # FIXME: not a nice thing
+    import kvm_preprocessing
+    import random
+    import socket
+    import select
+    import math
+
+    class allocator_com:
+        """
+        This class is used for communication with the allocator
+        """
+        def __init__(self, vm, _port, _host='127.0.0.1'):
+            self.vm = vm
+            self.PORT = _port
+            self.HOST = _host
+            self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+            self.isConnect = False
+
+        def __str__(self):
+            return self.vm + ":" + self.HOST + ":" + str(self.PORT)
+
+        def connect(self):
+            print self
+            logging.debug("ALLOC: connect to %s", self.vm)
+            try:
+                self.socket.connect((self.HOST, self.PORT))
+            except:
+                raise error.TestFail("ALLOC: Could not establish the "\
+                                     "communication with %s" % (self.vm))
+            self.isConnect = True
+        
+        def isConnected(self):
+            return self.isConnect;
+
+        def readsize(self):
+            read,write,error = select.select([self.socket.fileno()],[],[],0.5)
+            size = 0
+            if (self.socket.fileno() in read):
+                data = self.socket.recv(1);
+                size = "";
+                while data[0] != ':':
+                    size = size + data[0]
+                    data = self.socket.recv(1)
+            return int(size)
+
+        def _recv(self):
+            msg = ""
+            read, write, error = select.select([self.socket.fileno()],\
+                                               [], [], 0.5)
+            if (self.socket.fileno() in read):
+                size = self.readsize()
+                msg = self.socket.recv(size)
+                if (len(msg) < size):
+                    raise error.TestFail("ALLOC: Could not recive the message")
+            
+            logging.debug("ALLOC: output '%s' from %s" % (msg, self.vm))
+            return msg
+
+        def recv(self, wait=1, loops=20):
+            out = ""
+            log = ""
+            while not out.startswith("PASS") and not out.startswith("FAIL"):
+                logging.debug("Sleep(%d)" % (wait))
+                time.sleep(wait)
+                log += out
+                out = self._recv()
+
+                if loops == 0:
+                    logging.error(repr(out))
+                    raise error.TestFail("Command wasn't finished until DL")
+                loops = loops - 1
+
+            if not out.startswith("PASS"):
+                logging.error("Allocator failed on guest %s\nAttaching the"\
+                              "recent log"  % (self.vm))
+                raise error.TestFail(log)
+
+            return out
+            
+
+        def send(self, command, data=""):
+            msg = str(len(command) + len(data) + 3)
+            msg += ":" + command + ":" + data + ";"
+            logging.debug("ALLOC: execute %s on %s" %(repr(msg), self.vm))
+            try:
+                self.socket.sendall(msg)
+            except:
+                raise error.TestFail("ALLOC: Could not send the message")
+
+        def disconnect(self):
+            logging.debug("ALLOC: disconnect")
+            self.send("exit")
+            self.recv()
+            time.sleep(5)
+            self.socket.close()
+            self.isConnect = False
+
+    def get_stat(lvms):
+        """
+        Get statistics in format:
+        Host: memfree = XXXM; Guests memsh = {XXX,XXX,...}
+
+        @params lvms: List of VMs
+        """
+        if not isinstance(lvms, list):
+            raise error.TestError("get_stat: parameter have to be proper list")
+
+        try:
+            stat = "Host: memfree = "
+            stat += str(int(os.popen("cat /proc/meminfo | grep MemFree")\
+                                     .readline().split()[1]) / 1024) + "M; "
+            stat += "swapfree = "
+            stat += str(int(os.popen("cat /proc/meminfo | grep SwapFree")\
+                                     .readline().split()[1]) / 1024) + "M; "
+        except:
+            raise error.TestFail("Could not fetch free memory info")
+
+
+        stat += "Guests memsh = {"
+        for vm in lvms:
+            try:
+                cmd = "cat /proc/%d/statm" % vm.pid
+                shm = int(os.popen(cmd).readline().split()[2])
+                # statm stores informations in pages, recalculate to MB
+                shm = shm * 4 / 1024
+                stat += "%dM; " % (shm)
+            except:
+                raise error.TestError("Could not fetch shmem info from proc")
+        stat = stat[0:-2] + "}"
+        return stat
+    
+    
+    
+
+
+    logging.info("Starting phase 0: Initialization")
+    # host_reserve: mem reserve keept for the host system to run
+    host_reserve = 256
+    # guest_reserve: mem reserve which is not used by allocator on the guests
+    guest_reserve = 256
+    max_alloc = 10
+    max_vms = params.get("max_vms")
+    if max_vms:
+        max_vms = int(max_vms)
+    else:
+        max_vms = 2
+    overcommit = params.get("ksm_overcommit_ratio")
+    if overcommit:
+        overcommit = float(overcommit)
+    else:
+        overcommit = 2.0
+    # vmsc: count of all used VMs
+    vmsc = int(overcommit) + 1
+    vmsc = max(vmsc, max_vms)
+    
+    if (params['ksm_test_size'] == "paralel") :
+        host_mem = (int(os.popen("grep MemTotal: /proc/meminfo")\
+                       .readline().split()[1]) / 1024 - host_reserve)
+        vmsc = 1
+        overcommit = 1
+        mem = host_mem
+        # 32bit system adjustment
+        if not params['image_name'].endswith("64"):
+            logging.debug("Probably i386 guest architecture, "\
+                          "max allocator mem = 2G")
+            # Guest can have more than 2G but kvm mem + 1MB (allocator itself) can't
+            if (host_mem > 2048):
+                mem = 2047 
+                
+                
+        if os.popen("uname -i").readline().startswith("i386"):
+            logging.debug("Host is i386 architecture, max guest mem is 2G")
+            # Guest system with qemu overhead (64M) can't have more than 2G
+            if mem > 2048 - 64:
+                mem = 2048 - 64
+        
+    else:
+        host_mem = (int(os.popen("grep MemTotal: /proc/meminfo")\
+                       .readline().split()[1]) / 1024 - host_reserve)
+        # mem: Memory of the guest systems. Maximum must be less than amount of the 
+        # host's physical ram
+        mem = int(overcommit * host_mem / vmsc)
+    
+        # 32bit system adjustment
+        if not params['image_name'].endswith("64"):
+            logging.debug("Probably i386 guest architecture, "\
+                          "max allocator mem = 2G")
+            # Guest can have more than 2G but kvm mem + 1MB (allocator itself) can't
+            if mem-guest_reserve-1 > 2048:
+                vmsc = int(math.ceil((host_mem*overcommit)/(2048.0+guest_reserve))) 
+                mem = int(math.floor(host_mem*overcommit/vmsc))
+                
+        if os.popen("uname -i").readline().startswith("i386"):
+            logging.debug("Host is i386 architecture, max guest mem is 2G")
+            # Guest system with qemu overhead (64M) can't have more than 2G
+            if mem > 2048 - 64:
+                vmsc = int(math.ceil((host_mem*overcommit)/(2048 - 64.0))) 
+                mem = int(math.floor(host_mem*overcommit/vmsc))
+    
+    
+    logging.info("overcommit = %f" % (overcommit))
+    logging.info("true overcommit = %f " % (float(vmsc*mem) / float(host_mem)))
+    logging.info("host mem = %dM" % (host_mem))
+    logging.info("mem = %dM" % (mem))
+    logging.info("swap = %dM" %\
+                 (int(os.popen("cat /proc/meminfo | grep SwapTotal")\
+                              .readline().split()[1]) / 1024))
+    logging.info("max_vms = %d" % (max_vms))
+    logging.info("vmsc = %d" % (vmsc))
+
+    # Generate unique keys for random series
+    skeys = []
+    dkeys = []
+    for i in range(0, max(vmsc, max_alloc)):
+        key = "%03s" % (random.randrange(0,999))
+        while key in skeys:
+            key = "%03s" % (random.randrange(0,999))
+        skeys.append(key)
+
+        key = "%03s" % (random.randrange(0,999))
+        while key in dkeys:
+            key = "%03s" % (random.randrange(0,999))
+        dkeys.append(key)
+    
+    lvms = []
+    lsessions = []
+    lallocators = []
+    alloc_port = 31284
+
+    # As we don't know the number and memory amount of VMs in advance, we need 
+    # to specify and create them here (FIXME: not a nice thing)
+    params['mem'] = mem
+    params['vms'] = params.get("main_vm")
+    # ksm_size: amount of memory used by allocator
+    ksm_size = mem - guest_reserve
+    logging.info("ksm_size = %dM" % (ksm_size))
+
+    
+    params['redirs'] += ' alloc0'
+    params['guest_port_alloc0'] = str(alloc_port)
+    
+    if (params['ksm_test_size'] == "paralel") :
+        for j in range(1, max_alloc):
+            params['redirs'] += ' alloc' + str(j)
+            params['guest_port_alloc' + str(j)] = str(alloc_port + j)
+
+    # Creating of the first guest
+    kvm_preprocessing.preprocess_vm(test, params, env, params['vms'])
+    lvms.append(kvm_utils.env_get_vm(env, params.get("main_vm")))
+    if not lvms[0]:
+        raise error.TestError("VM object not found in environment")
+    if not lvms[0].is_alive():
+        raise error.TestError("VM seems to be dead; Test requires a living VM")
+
+    logging.info("Booting the first guest %s" % lvms[0].name)
+
+    lsessions.append(kvm_utils.wait_for(lvms[0].ssh_login, 360, 0, 2))
+    if not lsessions[0]:
+        raise error.TestFail("Could not log into first guest")
+    
+    
+    lallocators.append(allocator_com(lvms[0].name, lvms[0].redirs[alloc_port]))
+    if not lallocators[0]:
+        raise error.TestFail("Could not create allocator_com class for vm1")
+    
+
+    
+    # Creating of other guest systems
+    for i in range(1, vmsc):
+        vm_name = "vm" + str(i + 1)
+        # Last VM is later used to run more allocators simultaneously
+        """for j in range(1, max_alloc):
+            params['redirs'] += ' alloc' + str(j)
+            params['guest_port_alloc' + str(j)] = str(alloc_port + j)"""
+        
+        lvms.append(lvms[0].clone(vm_name, params))
+        kvm_utils.env_register_vm(env, vm_name, lvms[i])
+        params['vms'] += " " + vm_name
+
+        logging.info("Booting guest %s" % lvms[i].name)
+        if not lvms[i].create():
+            raise error.TestFail("Cannot create VM %s" % lvms[i].name)
+        if not lvms[i].is_alive():
+            raise error.TestError("VM %s seems to be dead; Test requires a"\
+                                  "living VM" % lvms[i].name)
+
+        lsessions.append(kvm_utils.wait_for(lvms[i].ssh_login, 360, 0, 2))
+        if not lsessions[i]:
+            raise error.TestFail("Could not log into guest %s" % lvms[i].name)
+
+        lallocators.append(allocator_com(lvms[i].name,\
+                                         lvms[i].redirs[alloc_port]))
+        if not lallocators[i]:
+            raise error.TestFail("Could not create allocator_com class for %s"\
+                                 % (lvms[i].name))
+        
+
+    # Let systems take a rest :-)
+    time.sleep(vmsc * 2)
+    logging.info(get_stat(lvms))
+
+    # Copy the allocator.c into guests
+    pwd = os.path.join(os.environ['AUTODIR'],'tests/kvm')
+    vksmd_src = os.path.join(pwd, "allocator.c")
+    dst_dir = "/tmp"
+    for vm in lvms:
+        if not vm.scp_to_remote(vksmd_src, dst_dir):
+            raise error.TestFail("Remote scp failed %s" % (vm.name))
+    logging.info("Phase 0 => passed")
+
+    def phase_1():
+        """ Inicialize virtual machine """
+        logging.info("Starting phase 1: filling with 0")
+        logging.info("Preparing the guests and fill in pages by zero")
+        for session in lsessions:
+            vm = lvms[lsessions.index(session)]
+            allocator = lallocators[lsessions.index(session)]
+            # Build the test suite
+            ret = session.get_command_status("gcc -o /tmp/allocator "\
+                                             "/tmp/allocator.c",\
+                                             timeout=300)
+            if ret == None or ret:
+                raise error.TestFail("Failed to build vksmd in the %s"\
+                                     % (vm.name))
+    
+            # Start the daemon
+            ret = session.get_command_status("/tmp/allocator %d %d" % (ksm_size,\
+                                                                      alloc_port))
+            if ret == None:
+                raise error.TestFail("Could not run vksmd in guest %s"\
+                                     % (vm.name))
+            if ret:
+                raise error.TestFail("Could not run vksmd in %s errno: %d"\
+                                      % (vm.name, ret))
+    
+            ret = session.get_command_status("iptables -F;"\
+                                             "iptables -P INPUT ACCEPT;")
+    
+            allocator.connect()
+            allocator.recv((ksm_size / 200), 100)
+            
+            # Let kksmd works (until shared mem rich expected value)
+            shm = 0
+            i = 0
+            cmd = "cat /proc/%d/statm" % vm.pid
+            while shm < ksm_size:
+                if i > 64:
+                    logging.info(get_stat(lvms))
+                    raise error.TestError("SHM didn't merged the memory until "\
+					  "the DL")
+                logging.debug("Sleep(%d)" % (ksm_size / 200))
+                time.sleep(ksm_size / 200)
+                try:
+                    shm = int(os.popen(cmd).readline().split()[2])
+                    shm = shm * 4 / 1024
+                    i = i + 1
+                except:
+                    raise error.TestError("Could not fetch shmem info from "
+					  "the /proc")
+    
+        # Keep some reserve
+        time.sleep(ksm_size / 200)
+    
+        # Set allocator keys
+        for i in range(0, vmsc):
+            lallocators[i].send("init", "%s%s" % (skeys[i], dkeys[i]))
+            lallocators[i].recv(1, 10)
+        logging.info(get_stat(lvms))
+        logging.info("Phase 1 => passed")
+
+    def phase_2():
+        """ Separate first guest memory by generate a special random series """
+        logging.info("Starting phase 2: Split the pages on the first guest")
+    
+        lallocators[0].send("srandom")
+        out = lallocators[0].recv(ksm_size / 500, 50)
+        out = int(out.split()[4])
+        logging.info("PERFORMANCE: %dMB * 1000 / %dms = %dMB/s"\
+                     % (ksm_size, out, (ksm_size * 1000 / out)))
+        logging.info(get_stat(lvms))
+        logging.info("Phase 2 => passed")
+
+    def phase_3():
+        """ Sequentional split of pages on guests up to memory limit """
+        logging.info("Starting phase 3a: Sequentional split of pages on guests up "\
+                     "to memory limit")
+        last_vm = 0
+        for i in range(1, vmsc):
+            vm = lvms[i]
+            session = lsessions[i]
+            allocator = lallocators[i]
+    
+            allocator.send("srandom")
+            out = ""
+            while not out.startswith("PASS") and not out.startswith("FAIL"):
+                free_mem = int(os.popen("grep MemFree /proc/meminfo")\
+                             .readline().split()[1])
+                logging.debug("FreeMem = %d" % (free_mem))
+                # We need to keep some memory for python to run.
+                if free_mem < 32000:
+                    logging.debug("Only %s free memory, killing 0 - %d hosts"\
+                                  % (free_mem, (i-1)))
+                    for j in range(0, i):
+                        lvms[j].destroy(gracefully = False)
+                    last_vm = i
+                    break
+                out = allocator._recv()
+            if last_vm != 0:
+                break
+    
+        allocator.recv(mem / 500, 50)
+        logging.info("Memory filled by the guest %s" % (vm.name))
+        logging.info("Phase 3a => passed")
+        
+        """ Check if memory in max loading guest is allright"""
+        logging.info("Starting phase 3b")
+        allocator.send("srverify")
+        allocator.recv(mem / 200, 50)
+        allocator.disconnect()
+        # We are going to use the last VM later
+        if i != (vmsc):
+            session.close()
+            vm.destroy(gracefully = False)
+            for i in range(last_vm + 1, vmsc):
+                lallocators[i].send("verify")
+                lallocators[i].recv(mem / 200, 50)
+                lallocators[i].disconnect()
+                # We are going to use the last VM later
+                if i != (vmsc - 1):
+                    lsessions[i].close()
+                    lvms[i].destroy(gracefully = False)
+        logging.info(get_stat([lvms[i]]))
+        logging.info("Phase 3b => passed")
+
+    def phase_4():
+        """ Paralel page spliting """
+        logging.info("Phase 4: Paralel page spliting")
+        # We have to wait until allocator is finished (it waits 5 seconds to clean
+        # the socket
+        
+        session = lsessions[0]
+        vm = lvms[0]
+        
+        ret = session.get_command_status("gcc -o /tmp/allocator "\
+                                             "/tmp/allocator.c",\
+                                             timeout=300)
+        if ret == None or ret:
+            raise error.TestFail("Failed to build vksmd in the %s"\
+                                  % (vm.name))
+                                  
+        for all in lallocators:
+            if all.isConnected():
+                all.disconnect()
+        
+        del lallocators[:]
+        ret = session.get_command_status("iptables -F;"\
+                                    "iptables -P INPUT ACCEPT;")
+         
+        for i in range(0, max_alloc):
+            ret = session.get_command_status("/tmp/allocator %d %d" 
+                                   % (ksm_size / max_alloc, alloc_port + i))
+            if ret == None:
+                raise error.TestFail("Could not run vksmd in guest %s"\
+                                     % (vm.name))
+            if ret:
+                raise error.TestFail("Could not run allocator in %s errno: %d"\
+                                     % (vm.name, ret))
+        
+            lallocators.append(allocator_com(vm.name,\
+                                             vm.redirs[alloc_port + i]))
+            if not lallocators[i]:
+                raise error.TestFail("Could not create allocator_com class for"\
+				     " %s" % (vm.name))
+        
+        logging.info("Phase 4a: Simultaneous merging")
+        for i in range(0, max_alloc):
+            lallocators[i].connect()
+        
+        for i in range(0, max_alloc):
+            lallocators[i].recv((ksm_size / 200), 100)
+        # Wait until kksmd merges the pages (3 x ksm_size / 3)
+        shm = 0
+        i = 0
+        cmd = "cat /proc/%d/statm" % vm.pid
+        while shm < ksm_size:
+            if i > 64:
+                logging.info(get_stat(lvms))
+                raise error.TestError("SHM didn't merged the memory until DL")
+            logging.debug("Sleep(%d)" % (ksm_size / 200))
+            time.sleep(ksm_size / 200)
+            try:
+                shm = int(os.popen(cmd).readline().split()[2])
+                shm = shm * 4 / 1024
+            except:
+                raise error.TestError("Could not fetch shmem info from proc")
+        logging.info(get_stat([vm]))
+    
+    
+        logging.info("Phases 4b: Simultaneous spliting")
+        # Set keys
+        for i in range(0, max_alloc):
+            lallocators[i].send("init", "%s%s" % (skeys[i], dkeys[i]))
+            lallocators[i].recv(1, 10)
+    
+        # Actual splitting
+        for i in range(0, max_alloc):
+            lallocators[i].send("srandom")
+        
+        for i in range(0, max_alloc):
+            out = lallocators[i].recv(ksm_size / 500, 50)
+            out = int(out.split()[4])
+            logging.info("PERFORMANCE: %dMB * 1000 / %dms = %dMB/s"\
+                         % (ksm_size, out, (ksm_size * 1000 / out / max_alloc)))
+        logging.info(get_stat([vm]))
+    
+        logging.info("Phase 4c: Simultaneous verification")
+        for i in range(0, max_alloc):
+            lallocators[i].send("srverify")
+        for i in range(0, max_alloc):
+            lallocators[i].recv(mem / 200, 50)
+        logging.info(get_stat([vm]))
+    
+        logging.info("Phase 4 => passed")
+        # Clean-up
+        for i in range(0, max_alloc):
+            lallocators[i].disconnect()
+        session.close()
+        vm.destroy(gracefully = False)
+        
+    if params['ksm_test_size'] == "paralel":
+        phase_4()
+    elif params['ksm_test_size'] == "serial":
+        phase_1()
+        phase_2()
+        phase_3()
+        
 
 def run_linux_s3(test, params, env):
     """

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* Re: [KVM-AUTOTEST PATCH 1/2] Add KSM test
  2009-08-31  9:35 ` [KVM-AUTOTEST PATCH 1/2] " Lukáš Doktor
  2009-09-01  8:49   ` Lukáš Doktor
@ 2009-09-07 12:03   ` Dor Laor
  1 sibling, 0 replies; 15+ messages in thread
From: Dor Laor @ 2009-09-07 12:03 UTC (permalink / raw)
  To: Lukáš Doktor; +Cc: KVM list, Autotest mailing list

On 08/31/2009 12:35 PM, Lukáš Doktor wrote:
> allocator.c is a program, which allocates pages in the memory and allow
> us to fill or test those pages. It's controlled using sockets.


After a quick review I have the following questions:
1. Why did you implement the guest tool in 'c' and not in python?
    Python is much simpler and you can share some code with the server.
    This 'test protocol' would also be easier to understand this way.
2. IMHO there is no need to use select, you can do blocking read.
3. Also you can use plain malloc without the more complex ( a bit) mmap.

>
> Signed-off-by: Lukáš Doktor<ldoktor@redhat.com>
> Signed-off-by: Jiří Župka<jzupka@redhat.com>
> ---
>    client/tests/kvm/allocator.c |  571
> ++++++++++++++++++++++++++++++++++++++++++
>    1 files changed, 571 insertions(+), 0 deletions(-)
>    create mode 100644 client/tests/kvm/allocator.c
>
> diff --git a/client/tests/kvm/allocator.c b/client/tests/kvm/allocator.c
> new file mode 100644
> index 0000000..89e8ce4
> --- /dev/null
> +++ b/client/tests/kvm/allocator.c
> @@ -0,0 +1,571 @@
> +/*
> + * KSM test program.
> + * Copyright(C) 2009 Redhat
> + * Jason Wang (jasowang@redhat.com)
> + */
> +
> +#include<sys/types.h>
> +#include<sys/stat.h>
> +#include<sys/mman.h>
> +#include<stdio.h>
> +#include<stdlib.h>
> +#include<unistd.h>
> +#include<fcntl.h>
> +#include<string.h>
> +#include<errno.h>
> +#include<syscall.h>
> +#include<time.h>
> +#include<stdint.h>
> +//socket linux
> +#include<sys/socket.h>
> +#include<arpa/inet.h>
> +#include<netinet/in.h>
> +#include<signal.h>
> +//TODO: socket windows
> +
> +
> +
> +#define PS (4096)
> +long PAGE_SIZE = PS;
> +long intInPage = PS/sizeof(int);
> +#define MAP_FLAGS ( MAP_ANON | MAP_SHARED )
> +#define PROT_FLAGS ( PROT_WRITE )
> +#define FILE_MODE ( O_RDWR | O_CREAT )
> +#define LOG_FILE "/var/log/vksmd"
> +#define FIFO_FILE "/tmp/vksmd"
> +#define MODE 0666
> +#define FILE_BASE "/tmp/ksm_file"
> +#define MAX_SIZESIZE 6
> +#define MAX_COMMANDSIZE 50
> +#define BLOCK_COUNT 8
> +
> +int log_fd = -1;
> +int base_fd = -1;
> +int checkvalue = 0;
> +
> +
> +//Socket
> +struct sockaddr_in sockName;
> +struct sockaddr_in clientInfo;
> +int mainSocket,clientSocket;
> +int port;
> +
> +socklen_t addrlen;
> +
> +
> +
> +
> +const uint32_t random_mask = UINT32_MAX>>1;
> +uint32_t random_x = 0;
> +const uint32_t random_a = 1103515245;
> +const uint32_t random_m = 2^32;
> +const uint32_t random_c = 12345;
> +
> +int statickey = 0;
> +int dynamickey = 0;
> +
> +typedef enum _COMMANDS
> +{
> +  wrongcommad,
> +  ninit,
> +  nrandom,
> +  nexit,
> +  nsrandom,
> +  nsrverify,
> +  nfillzero,
> +  nfillvalue,
> +  ndfill,
> +  nverify
> +} COMMANDS;
> +
> +void sigpipe (int param)
> +{
> +  fprintf(stderr,"write error\n");
> +  //exit(-1); //uncomment end if network connetion is down
> +}
> +
> +int writefull(int socket,char * data,int size){
> +  int sz = 0;
> +  while (sz<  size)
> +    sz += write(socket, data+sz, size-sz);
> +  return sz;
> +}
> +
> +
> +int write_message(int s,char * message){
> +  size_t len = strlen(message);
> +  char buf[10];
> +  sprintf(buf,"%d:",(unsigned int)len);
> +  size_t size = strlen(buf);
> +
> +  struct timeval tv;
> +  fd_set writeset;
> +  fd_set errorset;
> +  FD_ZERO(&writeset);
> +  FD_ZERO(&errorset);
> +  FD_SET(clientSocket,&writeset);
> +  FD_SET(clientSocket,&errorset);
> +  tv.tv_sec = 0;
> +  tv.tv_usec = 100;
> +  int max = s+1;
> +  tv.tv_sec = 10;
> +  tv.tv_usec = 0;
> +  int ret = select(max, NULL,&writeset, NULL,&tv);
> +  if (ret == -1)
> +  {
> +    return -1;
> +  }
> +  if (ret == 0)
> +  {
> +    return -1;
> +  }
> +  if (FD_ISSET(s,&writeset))
> +  {
> +    if (writefull(s, buf, size) != size){
> +      return -1;
> +    }
> +    if (writefull(s, message, len) != len){
> +      return -1;
> +    }
> +  }
> +  return 0;
> +}
> +
> +void log_info(char *str)
> +{
> +  if (write_message(clientSocket, str) != 0){
> +    fprintf(stderr,"write error\n");
> +  }
> +}
> +
> +/* fill pages with zero */
> +void zero_pages(void **page_array,int npages)
> +{
> +  int n = 0;
> +  for(n=0;n<npages;n++)
> +    memset(page_array[n],0,intInPage);
> +}
> +
> +/* fill pages with zero */
> +void value_to_pages(void **page_array,int npages,char value)
> +{
> +  int n = 0;
> +  for(n=0;n<npages;n++)
> +    memset(page_array[n],value,PAGE_SIZE/sizeof(char));
> +}
> +
> +/* initialise page_array */
> +void **map_zero_page(unsigned long npages)
> +{
> +  void **page_array=(void **)malloc(sizeof(void *)*npages);
> +  long n = 0;
> +
> +  if ( page_array == NULL ) {
> +    log_info("page array allocated failed\n");
> +    return NULL;
> +  }
> +
> +#if 0
> +  /* Map the /dev/zero in order to be detected by KSM */
> +  for( n=0 ; n<  npages; n++){
> +    int i;
> +    void *addr=(void *)mmap(0,PAGE_SIZE,PROT_FLAGS,MAP_FLAGS,0,0);
> +    if ( addr == MAP_FAILED ){
> +      log_info("map failed!\n");
> +      for (i=0;i<n;i++)
> +	munmap( page_array[i], 0);
> +      free(page_array);
> +      return NULL;
> +    }
> +
> +    page_array[n] = addr;
> +  }
> +#endif
> +
> + void *addr = (void *)mmap(0,PAGE_SIZE*npages,PROT_FLAGS,MAP_FLAGS,0,0);
> +  if (addr == MAP_FAILED){
> +    log_info("FAIL: map failed!\n");
> +    free(page_array);
> +    return NULL;
> +  }
> +
> +  for (n=0;n<npages;n++)
> +    page_array[n] = addr+PAGE_SIZE*n;
> +
> +  zero_pages(page_array,npages);
> +
> +  return page_array;
> +}
> +
> +/* fill page with random data */
> +void random_fill(void **page_array, unsigned long npages)
> +{
> +  int n = 0;
> +  int value = 0;
> +  int offset = 0;
> +  void *addr = NULL;
> +
> +  for( n = 0; n<  npages; n++){
> +    offset = rand() % (intInPage);
> +    value = rand();
> +    addr = page_array[n] + offset;
> +    *((int *)addr) = value;
> +  }
> +}
> +
> +
> +/*set random series seed*/
> +void mrseed(int seed){
> +  random_x = seed;
> +}
> +
> +/*Generate random number*/
> +int mrand(){
> +  random_x  = random_a*random_x+random_c;
> +  return random_x&  random_mask;
> +}
> +
> +/* Generate randomcode array*/
> +int* random_code_array(int nblock)
> +{
> +  int * randArray = malloc(PAGE_SIZE*nblock);
> +  int n = 0;
> +  for (;n<  nblock;n++){
> +    int i = 0;
> +    for (;i<  intInPage;i++){
> +      randArray[n*intInPage+i]=mrand();
> +    }
> +  }
> +  return randArray;
> +}
> +
> +/* fill page with static random series data*/
> +void static_random_fill(void **page_array, unsigned long npages,int nblock)
> +{
> +  mrseed(dynamickey);
> +  int* randomArray = random_code_array(nblock);
> +  int n = 0;
> +  int q = -1;
> +  int blocksize = npages/nblock;
> +  int offset = 0;
> +  void *addr = NULL;
> +
> +  mrseed(randomArray[0]);
> +  for (;n<  npages;n++){
> +    if (n%(blocksize) == 0) q++;
> +    memcpy(page_array[n],&randomArray[q*intInPage],PAGE_SIZE);
> +    offset = mrand() % (intInPage);
> +    addr = ((int *)page_array[n]) + offset;
> +    *((int *)addr) = n;
> +  }
> +  free(randomArray);
> +  return;
> +}
> +
> +/* fill page with static random series data*/
> +int static_random_verify(void **page_array, unsigned long npages,int
> nblock)
> +{
> +  int* p = malloc(PAGE_SIZE);
> +  mrseed(dynamickey);
> +  int* randomArray = random_code_array(nblock);
> +  int n = 0;
> +  int q = -1;
> +  int blocksize = npages/nblock;
> +  int offset = 0;
> +  void *addr = NULL;
> +  char buf[128];
> +
> +  int ret = 1;
> +
> +  mrseed(randomArray[0]);
> +  for (;n<  npages;n++){
> +    if (n%(blocksize) == 0) q++;
> +    memcpy(p,&randomArray[q*intInPage],PAGE_SIZE);
> +    offset = mrand() % (intInPage);
> +    p[offset] = n;
> +    addr = ((int*)page_array[n]) + offset;
> +    int r = memcmp(p,page_array[n],PAGE_SIZE);
> +    if (r != 0){
> +      for (r = 0;r<  intInPage;r++){
> +        addr = ((int *)page_array[n]) + r;
> +        if (*((int *)addr) != p[r]){
> +          sprintf(buf,"verify failed [0x%p] %d instead of
> %d\n",addr,*((int *)addr),n);
> +          log_info(buf);
> +          ret = 0;
> +        }
> +      }
> +    }
> +  }
> +  free(randomArray);
> +  free(p);
> +  return ret;
> +}
> +
> +
> +/* verify value */
> +int verify_address_space(void **page_array, unsigned long npages, int
> checkvalue)
> +{
> +  int m,n;
> +  char buf[128];
> +  sprintf(buf,"verify value = %d\n",checkvalue);
> +  log_info(buf);
> +  if ( checkvalue == -1 ){
> +    return 1;
> +  }
> +  for( n = 0; n<  npages; n++ ){
> +    for ( m = 0; m<  PAGE_SIZE ; m++ ){
> +      char *address = (char *)(page_array[n]+m);
> +      if (*address != checkvalue) {
> +	sprintf(buf,"verify failed [0x%p] %d instead of %d\n", address,
> *address, checkvalue);
> +	log_info(buf);
> +	return 0;
> +      }
> +    }
> +  }
> +  return 1;
> +}
> +
> +
> +/* Parse command from message*/
> +COMMANDS parse_command(const char* data,int size,const char** startOfData)
> +{
> +  char command[MAX_COMMANDSIZE];
> +  memset(command,0,MAX_COMMANDSIZE);
> +  COMMANDS retc;
> +  int i=0;
> +  for(;i<  MAX_COMMANDSIZE&&  data[i] != ':';i++){
> +    command[i] = data[i];
> +  }
> +  *startOfData =&data[i+1];
> +
> +  if (strcmp(command,"init") == 0){
> +    if ((size-i-1) == 7){
> +      retc = ninit;
> +    }
> +  }else if(strcmp(command,"random") == 0){
> +    retc = nrandom;
> +  }else if(strcmp(command,"srandom") == 0){
> +    retc = nsrandom;
> +  }else if(strcmp(command,"srverify") == 0){
> +    retc = nsrverify;
> +  }else if(strcmp(command,"fillzero") == 0){
> +    retc = nfillzero;
> +  }else if(strcmp(command,"fillvalue") == 0){
> +    retc = nfillvalue;
> +  }else if(strcmp(command,"verify") == 0){
> +      retc = nverify;
> +  }else if(strcmp(command,"exit") == 0){
> +    retc = nexit;
> +  }
> +  return retc;
> +}
> +
> +void daemon_loop(void **page_array, unsigned long npages, int socket)
> +{
> +  COMMANDS com = wrongcommad;
> +  char csize[MAX_SIZESIZE+1];  //size max
> +  memset(csize,0,MAX_SIZESIZE+1);
> +  int end = 0;
> +  while(!end){
> +
> +    /*Data
> +    size:xxx:xxx;
> +    */
> +
> +    //Read data size
> +    char * data;
> +    const char * startOfData = NULL;
> +
> +    int i = 0;
> +    for (;(i<= MAX_SIZESIZE)&&  (csize[i-1] != ':');i++){
> +      recv(socket,&csize[i],1,0);
> +    }
> +    if (i<= MAX_SIZESIZE) { //data is good
> +      int size = atoi(csize)-1;
> +      data = malloc(size*sizeof(char)+1);
> +      int sz = 0;
> +      while (sz<  size)
> +        sz += recv(socket,data+sz,size-sz,0);
> +      if (data[size-1] == ';'){//Decode data
> +        com = parse_command(data,size,&startOfData);
> +      }
> +    }
> +
> +    char buf[128];
> +    switch(com){
> +    case nfillzero: /* Zero all pages */
> +      log_info("into zero mapped mode\n");
> +      zero_pages(page_array, npages);
> +      checkvalue = 0;
> +      log_info("PASS: zero mapped mode\n");
> +      break;
> +    case nfillvalue: /* Zero all pages */
> +      log_info("fill value statickey\n");
> +      checkvalue = statickey;
> +      value_to_pages(page_array, npages, checkvalue);
> +      sprintf(buf,"PASS: filled by %c\n", statickey);
> +      log_info(buf);
> +      break;
> +    case nrandom: /* Fill all pages with random number */
> +      log_info("into random fill mode\n");
> +      random_fill(page_array, npages);
> +      checkvalue = -1;
> +      log_info("PASS: filled by random value\n");
> +      break;
> +    case nexit: /* Do exit */
> +      log_info("PASS: exit\n");
> +      end = 1;
> +      break;
> +    case nverify: /* verify */
> +      log_info("veriy value\n");
> +
> +      if (!verify_address_space(page_array,npages,checkvalue)){
> +	sprintf(buf,"value %d verify error\n",checkvalue);
> +	log_info(buf);
> +	sprintf(buf,"FAIL: verification with checkvalue = %x\n", checkvalue);
> +	log_info(buf);
> +      }else{
> +        sprintf(buf,"PASS: verification with checkvalue = %x\n",
> checkvalue);
> +        log_info(buf);
> +      }
> +      break;
> +    case nsrandom:/*Generate static random series*/
> +      log_info("fill static random series\n");
> +      clock_t starttime = clock();
> +      static_random_fill(page_array, npages,BLOCK_COUNT);
> +      clock_t endtime = clock();
> +      sprintf(buf,"PASS: filling duration = %ld
> ms\n",(long)(1.0*(endtime-starttime))/(CLOCKS_PER_SEC/1000));
> +      log_info(buf);
> +      break;
> +    case nsrverify: /* verify */
> +      log_info("veriy value\n");
> +
> +      if (!static_random_verify(page_array,npages,BLOCK_COUNT)){
> +        sprintf(buf,"value %d verify error\n",checkvalue);
> +        log_info(buf);
> +        log_info("FAIL: random series verification\n");
> +      }else{
> +        log_info("PASS: random series verification\n");
> +      }
> +      break;
> +    case ninit:/*Parametrs*/
> +      memset(buf,0,5);
> +      log_info("Init daemon\n");
> +      strncpy(buf,&startOfData[0],3);
> +      statickey = atoi(buf);
> +      strncpy(buf,&startOfData[3],3);
> +      dynamickey = atoi(buf);
> +      sprintf(buf,"PASS: Static key %d; Dynamic key
> %d\n",statickey,dynamickey);
> +      log_info(buf);
> +      break;
> +    default:
> +      log_info("FAIL: Wrong command!\n");
> +      exit(EBADMSG);
> +      break;
> +    }
> +    free(data);
> +  }
> +}
> +
> +int main(int argc,char *argv[])
> +{
> +  int n = 0;
> +  unsigned long npages = 0;
> +  int ret;
> +  void **page_array = NULL;
> +
> +
> +  void (*prev_fn)(int);
> +
> +  prev_fn = signal (SIGPIPE,sigpipe);
> +
> +
> +  if (argc != 3){
> +    fprintf(stderr,"Usage %s size(MB) port\n",argv[0]);
> +    return -1;
> +  }
> +
> +  port = atoi(argv[2]);
> +  // Vytvoříme soket - viz minulý díl
> +  if ((mainSocket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP)) == -1)
> +  {
> +    fprintf(stderr,"Could not create socket!\n");
> +    return -1;
> +  }
> +
> +  sockName.sin_family = AF_INET;
> +  sockName.sin_port = htons(port);
> +  sockName.sin_addr.s_addr = INADDR_ANY;
> +
> +
> +  if (bind(mainSocket, (struct sockaddr *)&sockName, sizeof(sockName))
> == -1)
> +  {
> +    fprintf(stderr,"Could not bind socket!\n");
> +    return -1;
> +  }
> +
> +  if (listen(mainSocket, 1) == -1)
> +  {
> +    fprintf(stderr,"Could not listen socket!\n");
> +    return -1;
> +  }
> +
> +  unlink(FIFO_FILE);
> +  unlink(LOG_FILE);
> +  PAGE_SIZE = getpagesize();
> +  intInPage = PAGE_SIZE/sizeof(int);
> +  long page = atoi(argv[1]);
> +  npages = (page * 1024 * 1024)/PAGE_SIZE;
> +
> +  ret = daemon(0,0);
> +  if(ret == -1){
> +    log_info("FAIL: failed to run in daemon mode\n");
> +    return -1;
> +  }
> +
> +  addrlen = sizeof(clientInfo);
> +
> +  clientSocket = accept(mainSocket, (struct sockaddr*)&clientInfo,
> &addrlen);
> +  int set = 1;
> +  setsockopt(clientSocket, SOL_SOCKET, SO_KEEPALIVE, (void *)&set,
> sizeof(int));
> +  if (clientSocket == -1)
> +  {
> +    fprintf(stderr,"Could not connect client\n");
> +    return -1;
> +  }
> +
> +  log_info("Initialising zero mapped pages!\n");
> +  page_array = map_zero_page(npages);
> +  if (page_array == NULL){
> +    log_info("FAIL: could not initialise maps\n");
> +    return -1;
> +  }
> +  log_info("PASS: first start\n");
> +
> +  srand(getpid());
> +  daemon_loop(page_array, npages, clientSocket);
> +
> +
> +  log_info("Free page array\n");
> +  for(n=0;n<npages;n++){
> +    munmap(page_array[n],0);
> +  }
> +  free(page_array);
> +
> +  log_info("exit");
> +
> +  sleep(5);
> +
> +
> +  char ch;
> +  while (recv(clientSocket,&ch,1,0)>  0);
> +
> +  close(clientSocket);
> +  close(mainSocket);
> +
> +  if (prev_fn==SIG_IGN) signal (SIGTERM,SIG_IGN);
> +
> +  return 0;
> +}
> +
> +


^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [KVM-AUTOTEST PATCH 0/2] Add KSM test
  2009-08-31  9:29 [KVM-AUTOTEST PATCH 0/2] Add KSM test Lukáš Doktor
  2009-08-31  9:35 ` [KVM-AUTOTEST PATCH 1/2] " Lukáš Doktor
  2009-08-31  9:48 ` [KVM-AUTOTEST PATCH 2/2] " Lukáš Doktor
@ 2009-09-07 12:37 ` Uri Lublin
  2 siblings, 0 replies; 15+ messages in thread
From: Uri Lublin @ 2009-09-07 12:37 UTC (permalink / raw)
  To: Lukáš Doktor; +Cc: KVM list, Autotest mailing list

On 08/31/2009 12:29 PM, Lukáš Doktor wrote:
> This patch adds KSM test. We faced many difficulties which weren't
> solvable by regular ways so please take a look and comment.

Hello Lukas,

Can you please provide reviewers with some more information:
1. What does your test do. Please briefly outline the test.
    What are the different steps, what are parallel/serial, etc.
2. What do the configuration parameters represent.
    - Also, is there a name mismatch ksm_ratio ?
3. Why do you communicate with your guest application using sockets ?
    I think it would be better to communicate with stdin/stdout (on the
    application) and session.sendline/session.read_until_last_line_matches.
4. With regards to the allocator program on the guest:
    - What is the difference between "random" and "static random"
       - is it just the seed ?
       - how do you use them ?
    - Why do you implement your own rand functions?
5. Why don't you define the first VM in the configuration file, and
    clone it for all other VMs (memory is calculated during test ?).
    Also please use an existing function to find a "free" port, instead
       of hard coding the port number. (you don't need it if you
       switch to using stdin/stdout)
6. Please replace os.popen('cat file') with directly opening/reading the file.
    - same for grep commands.
    - we should probably write a util-function for grepping a file, if one
      does not already exist.
7. What were the difficulties you encountered and how did you solve them ?
    - Host out-of-memory condition kills python program is one I saw.
      I think leaving some memory to the host is a good solution.

Thanks,
     Uri.

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [KVM-AUTOTEST PATCH 2/2] Add KSM test
  2009-08-31  9:48 ` [KVM-AUTOTEST PATCH 2/2] " Lukáš Doktor
  2009-09-01  8:50   ` Lukáš Doktor
@ 2009-09-07 13:00   ` Dor Laor
  1 sibling, 0 replies; 15+ messages in thread
From: Dor Laor @ 2009-09-07 13:00 UTC (permalink / raw)
  To: Lukáš Doktor; +Cc: KVM list, Autotest mailing list

On 08/31/2009 12:48 PM, Lukáš Doktor wrote:
> This is an actual KSM test.
>
> It allows to test merging resp splitting the pages in serial, parallel
> or both. Also you can specify an overcommit ratio for KSM overcommit
> testing.
>
> We were forced to destroy all previous defined vms and to create them
> inside the test (similar to stress_boot), because we don't know how many
> machines will be required during the vm preparation.
>
> Second nasty thing is filling the memory by the guests. We didn't find
> better way to test filled memory without the python(kvm-autotest) fall.
> This version continue filling until a small reserve than destroy
> previous machines and let the actual machine finish the work.

This is quite helpful /me has few ideas for enhancements/simplification, 
you might have implemented some of them, but I think it can improve ksm 
coverage testing:
[btw: for all newbies ksm is in F12 now]

1. Guest tool that creates the memory pressure
    Instead of writing your own tool in 'c' and open specific tcp port
    to talk with it I suggest the following:
    a. Use ssh into the guest. Autotest already support it.
    b. Basically, instead of your program you can use some existing tool.
       If you mount tmpfs you can create files on this directory using
       dd and it will fill the guest ram, like dd if=/dev/urandom
       of=MOUNT_POINT bs=4k count=x.
       It might be simpler than a new binary but python code might be ok
       too.
2. Measure the exact activity of ksm
    Let's cancel guest swap (swapoff -a) and write specific values to
    the guest ram. For instance, if you have 10 VMs and you allocate
    512M in each of them with the same random value in each of their
    pages, you would expect to merge 10 * 512M / 4k pages into a single
    page in the host.

3. KSM test cases.
    a. standard case - like above - merge many pages into one.
       Change the value from all random into the same one and check
       convergence.
    b. Worse case - ksm use red-black trees for ram. The trees
       use the entire page as a key. So putting the same value in the
       first 4000 bytes and afterwards random value in the last 96 bytes
       of each page will cause ksm scanner to work really hard.
       Keep, changing the last bytes during the test duration.
    c. Change ksm scan speed and memory limits.
       We need to test that too.
    d. KVM swap support and ksm
       Over commit the host memory using scenario a by 200%
       At once, write random values to all pages of the guests.
       It will cause the host to swap the VM pages.
       The test is actually testing kvm host swap support.

HTH,
Dor


>
> Signed-off-by: Lukáš Doktor<ldoktor@redhat.com>
> Signed-off-by: Jiří Župka<jzupka@redhat.com>
> ---
>    client/tests/kvm/kvm.py               |    2 +
>    client/tests/kvm/kvm_tests.cfg.sample |   17 +
>    client/tests/kvm/kvm_tests.py         |  548
> +++++++++++++++++++++++++++++++++
>    3 files changed, 567 insertions(+), 0 deletions(-)
>
> diff --git a/client/tests/kvm/kvm.py b/client/tests/kvm/kvm.py
> index 4930e80..b9839df 100644
> --- a/client/tests/kvm/kvm.py
> +++ b/client/tests/kvm/kvm.py
> @@ -53,6 +53,8 @@ class kvm(test.test):
>                    "yum_update":   test_routine("kvm_tests",
> "run_yum_update"),
>                    "autotest":     test_routine("kvm_tests", "run_autotest"),
>                    "kvm_install":  test_routine("kvm_install",
> "run_kvm_install"),
> +                "ksm":
> +                                test_routine("kvm_tests", "run_ksm"),
>                    "linux_s3":     test_routine("kvm_tests", "run_linux_s3"),
>                    "stress_boot":  test_routine("kvm_tests",
> "run_stress_boot"),
>                    "timedrift":    test_routine("kvm_tests",
> "run_timedrift"),
> diff --git a/client/tests/kvm/kvm_tests.cfg.sample
> b/client/tests/kvm/kvm_tests.cfg.sample
> index a83ef9b..f4a41b9 100644
> --- a/client/tests/kvm/kvm_tests.cfg.sample
> +++ b/client/tests/kvm/kvm_tests.cfg.sample
> @@ -100,6 +100,23 @@ variants:
>                    test_name = disktest
>                    test_control_file = disktest.control
>
> +    - ksm:
> +        # Don't preprocess any vms as we need to change it's params
> +        vms = ''
> +        image_snapshot = yes
> +        kill_vm_gracefully = no
> +        type = ksm
> +        variants:
> +            - ratio_3:
> +                ksm_ratio = 3
> +            - ratio_10:
> +                ksm_ratio = 10
> +        variants:
> +            - serial
> +                ksm_test_size = "serial"
> +            - paralel
> +                ksm_test_size = "paralel"
> +
>        - linux_s3:     install setup
>            type = linux_s3
>
> diff --git a/client/tests/kvm/kvm_tests.py b/client/tests/kvm/kvm_tests.py
> index b100269..ada4c6b 100644
> --- a/client/tests/kvm/kvm_tests.py
> +++ b/client/tests/kvm/kvm_tests.py
> @@ -462,6 +462,554 @@ def run_yum_update(test, params, env):
>
>        session.close()
>
> +def run_ksm(test, params, env):
> +    """
> +    Test how KSM (Kernel Shared Memory) act with more than physical
> memory is
> +    used. In second part is also tested, how KVM can handle the situation,
> +    when the host runs out of memory (expected is to pause the guest
> system,
> +    wait until some process returns the memory and bring the guest back
> to life)
> +
> +    @param test: kvm test object.
> +    @param params: Dictionary with test parameters.
> +    @param env: Dictionary with the test wnvironment.
> +    """
> +    # We are going to create the main VM so we use kvm_preprocess functions
> +    # FIXME: not a nice thing
> +    import kvm_preprocessing
> +    import random
> +    import socket
> +    import select
> +    import math
> +
> +    class allocator_com:
> +        """
> +        This class is used for communication with the allocator
> +        """
> +        def __init__(self, vm, _port, _host='127.0.0.1'):
> +            self.vm = vm
> +            self.PORT = _port
> +            self.HOST = _host
> +            self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
> +            self.isConnect = False
> +
> +        def __str__(self):
> +            return self.vm + ":" + self.HOST + ":" + str(self.PORT)
> +
> +        def connect(self):
> +            print self
> +            logging.debug("ALLOC: connect to %s", self.vm)
> +            try:
> +                self.socket.connect((self.HOST, self.PORT))
> +            except:
> +                raise error.TestFail("ALLOC: Could not establish the "\
> +                                     "communication with %s" % (self.vm))
> +            self.isConnect = True
> +
> +        def isConnected(self):
> +            return self.isConnect;
> +
> +        def readsize(self):
> +            read,write,error =
> select.select([self.socket.fileno()],[],[],0.5)
> +            size = 0
> +            if (self.socket.fileno() in read):
> +                data = self.socket.recv(1);
> +                size = "";
> +                while data[0] != ':':
> +                    size = size + data[0]
> +                    data = self.socket.recv(1)
> +            return int(size)
> +
> +        def _recv(self):
> +            msg = ""
> +            read, write, error = select.select([self.socket.fileno()],\
> +                                               [], [], 0.5)
> +            if (self.socket.fileno() in read):
> +                size = self.readsize()
> +                msg = self.socket.recv(size)
> +                if (len(msg)<  size):
> +                    raise error.TestFail("ALLOC: Could not recive the
> message")
> +
> +            logging.debug("ALLOC: output '%s' from %s" % (msg, self.vm))
> +            return msg
> +
> +        def recv(self, wait=1, loops=20):
> +            out = ""
> +            log = ""
> +            while not out.startswith("PASS") and not
> out.startswith("FAIL"):
> +                logging.debug("Sleep(%d)" % (wait))
> +                time.sleep(wait)
> +                log += out
> +                out = self._recv()
> +
> +                if loops == 0:
> +                    logging.error(repr(out))
> +                    raise error.TestFail("Command wasn't finished until
> DL")
> +                loops = loops - 1
> +
> +            if not out.startswith("PASS"):
> +                logging.error("Allocator failed on guest %s\nAttaching
> the"\
> +                              "recent log"  % (self.vm))
> +                raise error.TestFail(log)
> +
> +            return out
> +
> +
> +        def send(self, command, data=""):
> +            msg = str(len(command) + len(data) + 3)
> +            msg += ":" + command + ":" + data + ";"
> +            logging.debug("ALLOC: execute %s on %s" %(repr(msg), self.vm))
> +            try:
> +                self.socket.sendall(msg)
> +            except:
> +                raise error.TestFail("ALLOC: Could not send the message")
> +
> +        def disconnect(self):
> +            logging.debug("ALLOC: disconnect")
> +            self.send("exit")
> +            self.recv()
> +            time.sleep(5)
> +            self.socket.close()
> +            self.isConnect = False
> +
> +    def get_stat(lvms):
> +        """
> +        Get statistics in format:
> +        Host: memfree = XXXM; Guests memsh = {XXX,XXX,...}
> +
> +        @params lvms: List of VMs
> +        """
> +        if not isinstance(lvms, list):
> +            raise error.TestError("get_stat: parameter have to be
> proper list")
> +
> +        try:
> +            stat = "Host: memfree = "
> +            stat += str(int(os.popen("cat /proc/meminfo | grep MemFree")\
> +                                     .readline().split()[1]) / 1024) +
> "M;"
> +            stat += "swapfree = "
> +            stat += str(int(os.popen("cat /proc/meminfo | grep SwapFree")\
> +                                     .readline().split()[1]) / 1024) +
> "M;"
> +        except:
> +            raise error.TestFail("Could not fetch free memory info")
> +
> +
> +        stat += "Guests memsh = {"
> +        for vm in lvms:
> +            try:
> +                cmd = "cat /proc/%d/statm" % vm.pid
> +                shm = int(os.popen(cmd).readline().split()[2])
> +                # statm stores informations in pages, recalculate to MB
> +                shm = shm * 4 / 1024
> +                stat += "%dM; " % (shm)
> +            except:
> +                raise error.TestError("Could not fetch shmem info from
> proc")
> +        stat = stat[0:-2] + "}"
> +        return stat
> +
> +
> +
> +
> +
> +    logging.info("Starting phase 0: Initialization")
> +    # host_reserve: mem reserve keept for the host system to run
> +    host_reserve = 256
> +    # guest_reserve: mem reserve which is not used by allocator on the
> guests
> +    guest_reserve = 256
> +    max_alloc = 10
> +    max_vms = params.get("max_vms")
> +    if max_vms:
> +        max_vms = int(max_vms)
> +    else:
> +        max_vms = 2
> +    overcommit = params.get("ksm_overcommit_ratio")
> +    if overcommit:
> +        overcommit = float(overcommit)
> +    else:
> +        overcommit = 2.0
> +    # vmsc: count of all used VMs
> +    vmsc = int(overcommit) + 1
> +    vmsc = max(vmsc, max_vms)
> +
> +    if (params['ksm_test_size'] == "paralel") :
> +        host_mem = (int(os.popen("grep MemTotal: /proc/meminfo")\
> +                       .readline().split()[1]) / 1024 - host_reserve)
> +        vmsc = 1
> +        overcommit = 1
> +        mem = host_mem
> +        # 32bit system adjustment
> +        if not params['image_name'].endswith("64"):
> +            logging.debug("Probably i386 guest architecture, "\
> +                          "max allocator mem = 2G")
> +            # Guest can have more than 2G but kvm mem + 1MB (allocator
> itself) can't
> +            if (host_mem>  2048):
> +                mem = 2047
> +
> +
> +        if os.popen("uname -i").readline().startswith("i386"):
> +            logging.debug("Host is i386 architecture, max guest mem is 2G")
> +            # Guest system with qemu overhead (64M) can't have more than 2G
> +            if mem>  2048 - 64:
> +                mem = 2048 - 64
> +
> +    else:
> +        host_mem = (int(os.popen("grep MemTotal: /proc/meminfo")\
> +                       .readline().split()[1]) / 1024 - host_reserve)
> +        # mem: Memory of the guest systems. Maximum must be less than
> amount of the
> +        # host's physical ram
> +        mem = int(overcommit * host_mem / vmsc)
> +
> +        # 32bit system adjustment
> +        if not params['image_name'].endswith("64"):
> +            logging.debug("Probably i386 guest architecture, "\
> +                          "max allocator mem = 2G")
> +            # Guest can have more than 2G but kvm mem + 1MB (allocator
> itself) can't
> +            if mem-guest_reserve-1>  2048:
> +                vmsc =
> int(math.ceil((host_mem*overcommit)/(2048.0+guest_reserve)))
> +                mem = int(math.floor(host_mem*overcommit/vmsc))
> +
> +        if os.popen("uname -i").readline().startswith("i386"):
> +            logging.debug("Host is i386 architecture, max guest mem is 2G")
> +            # Guest system with qemu overhead (64M) can't have more than 2G
> +            if mem>  2048 - 64:
> +                vmsc = int(math.ceil((host_mem*overcommit)/(2048 - 64.0)))
> +                mem = int(math.floor(host_mem*overcommit/vmsc))
> +
> +
> +    logging.info("overcommit = %f" % (overcommit))
> +    logging.info("true overcommit = %f " % (float(vmsc*mem) /
> float(host_mem)))
> +    logging.info("host mem = %dM" % (host_mem))
> +    logging.info("mem = %dM" % (mem))
> +    logging.info("swap = %dM" %\
> +                 (int(os.popen("cat /proc/meminfo | grep SwapTotal")\
> +                              .readline().split()[1]) / 1024))
> +    logging.info("max_vms = %d" % (max_vms))
> +    logging.info("vmsc = %d" % (vmsc))
> +
> +    # Generate unique keys for random series
> +    skeys = []
> +    dkeys = []
> +    for i in range(0, max(vmsc, max_alloc)):
> +        key = "%03s" % (random.randrange(0,999))
> +        while key in skeys:
> +            key = "%03s" % (random.randrange(0,999))
> +        skeys.append(key)
> +
> +        key = "%03s" % (random.randrange(0,999))
> +        while key in dkeys:
> +            key = "%03s" % (random.randrange(0,999))
> +        dkeys.append(key)
> +
> +    lvms = []
> +    lsessions = []
> +    lallocators = []
> +    alloc_port = 31284
> +
> +    # As we don't know the number and memory amount of VMs in advance,
> we need
> +    # to specify and create them here (FIXME: not a nice thing)
> +    params['mem'] = mem
> +    params['vms'] = params.get("main_vm")
> +    # ksm_size: amount of memory used by allocator
> +    ksm_size = mem - guest_reserve
> +    logging.info("ksm_size = %dM" % (ksm_size))
> +
> +
> +    params['redirs'] += ' alloc0'
> +    params['guest_port_alloc0'] = str(alloc_port)
> +
> +    if (params['ksm_test_size'] == "paralel") :
> +        for j in range(1, max_alloc):
> +            params['redirs'] += ' alloc' + str(j)
> +            params['guest_port_alloc' + str(j)] = str(alloc_port + j)
> +
> +    # Creating of the first guest
> +    kvm_preprocessing.preprocess_vm(test, params, env, params['vms'])
> +    lvms.append(kvm_utils.env_get_vm(env, params.get("main_vm")))
> +    if not lvms[0]:
> +        raise error.TestError("VM object not found in environment")
> +    if not lvms[0].is_alive():
> +        raise error.TestError("VM seems to be dead; Test requires a
> living VM")
> +
> +    logging.info("Booting the first guest %s" % lvms[0].name)
> +
> +    lsessions.append(kvm_utils.wait_for(lvms[0].ssh_login, 360, 0, 2))
> +    if not lsessions[0]:
> +        raise error.TestFail("Could not log into first guest")
> +
> +
> +    lallocators.append(allocator_com(lvms[0].name,
> lvms[0].redirs[alloc_port]))
> +    if not lallocators[0]:
> +        raise error.TestFail("Could not create allocator_com class for
> vm1")
> +
> +
> +
> +    # Creating of other guest systems
> +    for i in range(1, vmsc):
> +        vm_name = "vm" + str(i + 1)
> +        # Last VM is later used to run more allocators simultaneously
> +        """for j in range(1, max_alloc):
> +            params['redirs'] += ' alloc' + str(j)
> +            params['guest_port_alloc' + str(j)] = str(alloc_port + j)"""
> +
> +        lvms.append(lvms[0].clone(vm_name, params))
> +        kvm_utils.env_register_vm(env, vm_name, lvms[i])
> +        params['vms'] += " " + vm_name
> +
> +        logging.info("Booting guest %s" % lvms[i].name)
> +        if not lvms[i].create():
> +            raise error.TestFail("Cannot create VM %s" % lvms[i].name)
> +        if not lvms[i].is_alive():
> +            raise error.TestError("VM %s seems to be dead; Test
> requires a"\
> +                                  "living VM" % lvms[i].name)
> +
> +        lsessions.append(kvm_utils.wait_for(lvms[i].ssh_login, 360, 0, 2))
> +        if not lsessions[i]:
> +            raise error.TestFail("Could not log into guest %s" %
> lvms[i].name)
> +
> +        lallocators.append(allocator_com(lvms[i].name,\
> +                                         lvms[i].redirs[alloc_port]))
> +        if not lallocators[i]:
> +            raise error.TestFail("Could not create allocator_com class
> for %s"\
> +                                 % (lvms[i].name))
> +
> +
> +    # Let systems take a rest :-)
> +    time.sleep(vmsc * 2)
> +    logging.info(get_stat(lvms))
> +
> +    # Copy the allocator.c into guests
> +    pwd = os.path.join(os.environ['AUTODIR'],'tests/kvm')
> +    vksmd_src = os.path.join(pwd, "allocator.c")
> +    dst_dir = "/tmp"
> +    for vm in lvms:
> +        if not vm.scp_to_remote(vksmd_src, dst_dir):
> +            raise error.TestFail("Remote scp failed %s" % (vm.name))
> +    logging.info("Phase 0 =>  passed")
> +
> +    def phase_1():
> +        """ Inicialize virtual machine """
> +        logging.info("Starting phase 1: filling with 0")
> +        logging.info("Preparing the guests and fill in pages by zero")
> +        for session in lsessions:
> +            vm = lvms[lsessions.index(session)]
> +            allocator = lallocators[lsessions.index(session)]
> +            # Build the test suite
> +            ret = session.get_command_status("gcc -o /tmp/allocator "\
> +                                             "/tmp/allocator.c",\
> +                                             timeout=300)
> +            if ret == None or ret:
> +                raise error.TestFail("Failed to build vksmd in the %s"\
> +                                     % (vm.name))
> +
> +            # Start the daemon
> +            ret = session.get_command_status("/tmp/allocator %d %d" %
> (ksm_size,\
> +
> alloc_port))
> +            if ret == None:
> +                raise error.TestFail("Could not run vksmd in guest %s"\
> +                                     % (vm.name))
> +            if ret:
> +                raise error.TestFail("Could not run vksmd in %s errno: %d"\
> +                                      % (vm.name, ret))
> +
> +            ret = session.get_command_status("iptables -F;"\
> +                                             "iptables -P INPUT ACCEPT;")
> +
> +            allocator.connect()
> +            allocator.recv((ksm_size / 200), 100)
> +
> +            # Let kksmd works (until shared mem rich expected value)
> +            shm = 0
> +            i = 0
> +            cmd = "cat /proc/%d/statm" % vm.pid
> +            while shm<  ksm_size:
> +                if i>  64:
> +                    logging.info(get_stat(lvms))
> +                    raise error.TestError("SHM didn't merged the memory
> until "\
> +					  "the DL")
> +                logging.debug("Sleep(%d)" % (ksm_size / 200))
> +                time.sleep(ksm_size / 200)
> +                try:
> +                    shm = int(os.popen(cmd).readline().split()[2])
> +                    shm = shm * 4 / 1024
> +                    i = i + 1
> +                except:
> +                    raise error.TestError("Could not fetch shmem info
> from "
> +					  "the /proc")
> +
> +        # Keep some reserve
> +        time.sleep(ksm_size / 200)
> +
> +        # Set allocator keys
> +        for i in range(0, vmsc):
> +            lallocators[i].send("init", "%s%s" % (skeys[i], dkeys[i]))
> +            lallocators[i].recv(1, 10)
> +        logging.info(get_stat(lvms))
> +        logging.info("Phase 1 =>  passed")
> +
> +    def phase_2():
> +        """ Separate first guest memory by generate a special random
> series """
> +        logging.info("Starting phase 2: Split the pages on the first
> guest")
> +
> +        lallocators[0].send("srandom")
> +        out = lallocators[0].recv(ksm_size / 500, 50)
> +        out = int(out.split()[4])
> +        logging.info("PERFORMANCE: %dMB * 1000 / %dms = %dMB/s"\
> +                     % (ksm_size, out, (ksm_size * 1000 / out)))
> +        logging.info(get_stat(lvms))
> +        logging.info("Phase 2 =>  passed")
> +
> +    def phase_3():
> +        """ Sequentional split of pages on guests up to memory limit """
> +        logging.info("Starting phase 3a: Sequentional split of pages on
> guests up "\
> +                     "to memory limit")
> +        last_vm = 0
> +        for i in range(1, vmsc):
> +            vm = lvms[i]
> +            session = lsessions[i]
> +            allocator = lallocators[i]
> +
> +            allocator.send("srandom")
> +            out = ""
> +            while not out.startswith("PASS") and not
> out.startswith("FAIL"):
> +                free_mem = int(os.popen("grep MemFree /proc/meminfo")\
> +                             .readline().split()[1])
> +                logging.debug("FreeMem = %d" % (free_mem))
> +                # We need to keep some memory for python to run.
> +                if free_mem<  32000:
> +                    logging.debug("Only %s free memory, killing 0 - %d
> hosts"\
> +                                  % (free_mem, (i-1)))
> +                    for j in range(0, i):
> +                        lvms[j].destroy(gracefully = False)
> +                    last_vm = i
> +                    break
> +                out = allocator._recv()
> +            if last_vm != 0:
> +                break
> +
> +        allocator.recv(mem / 500, 50)
> +        logging.info("Memory filled by the guest %s" % (vm.name))
> +        logging.info("Phase 3a =>  passed")
> +
> +        """ Check if memory in max loading guest is allright"""
> +        logging.info("Starting phase 3b")
> +        allocator.send("srverify")
> +        allocator.recv(mem / 200, 50)
> +        allocator.disconnect()
> +        # We are going to use the last VM later
> +        if i != (vmsc):
> +            session.close()
> +            vm.destroy(gracefully = False)
> +            for i in range(last_vm + 1, vmsc):
> +                lallocators[i].send("verify")
> +                lallocators[i].recv(mem / 200, 50)
> +                lallocators[i].disconnect()
> +                # We are going to use the last VM later
> +                if i != (vmsc - 1):
> +                    lsessions[i].close()
> +                    lvms[i].destroy(gracefully = False)
> +        logging.info(get_stat([lvms[i]]))
> +        logging.info("Phase 3b =>  passed")
> +
> +    def phase_4():
> +        """ Paralel page spliting """
> +        logging.info("Phase 4: Paralel page spliting")
> +        # We have to wait until allocator is finished (it waits 5
> seconds to clean
> +        # the socket
> +
> +        session = lsessions[0]
> +        vm = lvms[0]
> +
> +        ret = session.get_command_status("gcc -o /tmp/allocator "\
> +                                             "/tmp/allocator.c",\
> +                                             timeout=300)
> +        if ret == None or ret:
> +            raise error.TestFail("Failed to build vksmd in the %s"\
> +                                  % (vm.name))
> +
> +        for all in lallocators:
> +            if all.isConnected():
> +                all.disconnect()
> +
> +        del lallocators[:]
> +        ret = session.get_command_status("iptables -F;"\
> +                                    "iptables -P INPUT ACCEPT;")
> +
> +        for i in range(0, max_alloc):
> +            ret = session.get_command_status("/tmp/allocator %d %d"
> +                                   % (ksm_size / max_alloc, alloc_port
> + i))
> +            if ret == None:
> +                raise error.TestFail("Could not run vksmd in guest %s"\
> +                                     % (vm.name))
> +            if ret:
> +                raise error.TestFail("Could not run allocator in %s
> errno: %d"\
> +                                     % (vm.name, ret))
> +
> +            lallocators.append(allocator_com(vm.name,\
> +                                             vm.redirs[alloc_port + i]))
> +            if not lallocators[i]:
> +                raise error.TestFail("Could not create allocator_com
> class for"\
> +				     " %s" % (vm.name))
> +
> +        logging.info("Phase 4a: Simultaneous merging")
> +        for i in range(0, max_alloc):
> +            lallocators[i].connect()
> +
> +        for i in range(0, max_alloc):
> +            lallocators[i].recv((ksm_size / 200), 100)
> +        # Wait until kksmd merges the pages (3 x ksm_size / 3)
> +        shm = 0
> +        i = 0
> +        cmd = "cat /proc/%d/statm" % vm.pid
> +        while shm<  ksm_size:
> +            if i>  64:
> +                logging.info(get_stat(lvms))
> +                raise error.TestError("SHM didn't merged the memory
> until DL")
> +            logging.debug("Sleep(%d)" % (ksm_size / 200))
> +            time.sleep(ksm_size / 200)
> +            try:
> +                shm = int(os.popen(cmd).readline().split()[2])
> +                shm = shm * 4 / 1024
> +            except:
> +                raise error.TestError("Could not fetch shmem info from
> proc")
> +        logging.info(get_stat([vm]))
> +
> +
> +        logging.info("Phases 4b: Simultaneous spliting")
> +        # Set keys
> +        for i in range(0, max_alloc):
> +            lallocators[i].send("init", "%s%s" % (skeys[i], dkeys[i]))
> +            lallocators[i].recv(1, 10)
> +
> +        # Actual splitting
> +        for i in range(0, max_alloc):
> +            lallocators[i].send("srandom")
> +
> +        for i in range(0, max_alloc):
> +            out = lallocators[i].recv(ksm_size / 500, 50)
> +            out = int(out.split()[4])
> +            logging.info("PERFORMANCE: %dMB * 1000 / %dms = %dMB/s"\
> +                         % (ksm_size, out, (ksm_size * 1000 / out /
> max_alloc)))
> +        logging.info(get_stat([vm]))
> +
> +        logging.info("Phase 4c: Simultaneous verification")
> +        for i in range(0, max_alloc):
> +            lallocators[i].send("srverify")
> +        for i in range(0, max_alloc):
> +            lallocators[i].recv(mem / 200, 50)
> +        logging.info(get_stat([vm]))
> +
> +        logging.info("Phase 4 =>  passed")
> +        # Clean-up
> +        for i in range(0, max_alloc):
> +            lallocators[i].disconnect()
> +        session.close()
> +        vm.destroy(gracefully = False)
> +
> +    if params['ksm_test_size'] == "paralel":
> +        phase_4()
> +    elif params['ksm_test_size'] == "serial":
> +        phase_1()
> +        phase_2()
> +        phase_3()
> +
>
>    def run_linux_s3(test, params, env):
>        """


^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [KVM-AUTOTEST PATCH 1/2] Add KSM test
  2009-09-29 15:50   ` Lucas Meneghel Rodrigues
@ 2009-09-30 12:23     ` Dor Laor
  0 siblings, 0 replies; 15+ messages in thread
From: Dor Laor @ 2009-09-30 12:23 UTC (permalink / raw)
  To: Lucas Meneghel Rodrigues; +Cc: Jiri Zupka, kvm, autotest, Lukas Doktor

On 09/29/2009 05:50 PM, Lucas Meneghel Rodrigues wrote:
> On Fri, 2009-09-25 at 05:22 -0400, Jiri Zupka wrote:
>> ----- "Dor Laor"<dlaor@redhat.com>  wrote:
>>
>>> On 09/16/2009 04:09 PM, Jiri Zupka wrote:
>>>>
>>>> ----- "Dor Laor"<dlaor@redhat.com>   wrote:
>>>>
>>>>> On 09/15/2009 09:58 PM, Jiri Zupka wrote:
>>>>>>> After a quick review I have the following questions:
>>>>>>> 1. Why did you implement the guest tool in 'c' and not in
>>> python?
>>>>>>>      Python is much simpler and you can share some code with the
>>>>> server.
>>>>>>>      This 'test protocol' would also be easier to understand this
>>>>> way.
>>>>>>
>>>>>> We need speed and the precise control of allocate memory in
>>> pages.
>>>>>>
>>>>>>> 2. IMHO there is no need to use select, you can do blocking
>>> read.
>>>>>>
>>>>>> We replace socket communication by interactive program
>>> communication
>>>>> via ssh/telnet
>>>>>>
>>>>>>> 3. Also you can use plain malloc without the more complex ( a
>>> bit)
>>>>> mmap.
>>>>>>
>>>>>> We need address exactly the memory pages. We can't allow shift of
>>>>> the data in memory.
>>>>>
>>>>> You can use the tmpfs+dd idea instead of the specific program as I
>>>>> detailed before. Maybe some other binary can be used. My intention
>>> is
>>>>> to
>>>>> simplify the test/environment as much as possible.
>>>>>
>>>>
>>>> We need compatibility with others system, like Windows etc..
>>>> We want to add support for others system in next version
>>>
>>> KSM is a host feature and should be agnostic to the guest.
>>> Also I don't think your code will compile on windows...
>>
>> Yes, I think you have true.
>
> First of all, sorry, I am doing the best I can to review carefully all
> the patch queue, and as KSM is a more involved feature that I am not
> very familiar with, I need a bit more time to review it!
>
>> But because we need generate special data to pages in memory.
>> We need use script on guest side of test. Because communication
>> over ssh is to slow to transfer lot of GB of special data to guests.
>>
>> We can use optimized C program which is 10x and more faster than
>> python script on native system. Heavy load of virtual guest can
>> make some performance problem.
>
> About code compiling under windows, I guess making a native windows c or
> c++ program is an option, I generally agree with your reasoning, this
> case seems to be better covered with a c program. Will get into it in
> more detail ASAP...
>
>> We can use tmpfs but with python script to generate special data.
>> We can't use dd with random because we need test some special case.
>> (change only last 96B of page etc.. )
>>
>>
>> What do you think about it?


I think it can be done with some simple scripting and it will be fast 
enough and more importantly, easier to understand and to change in the 
future.

Here is a short example for creating lots of identical pages that 
contain '0' apart for the last two bytes. If you'll run it in a single 
guest you should expect to save lots of memory. Then you can change the 
last bytes to random value and see the memory consumption grow:
[Remember to cancel the guest swap to keep it in the guest ram]

dd if=/dev/zero of=template  count=1 bs=4094
echo '1' >> template
cp template large_file
for ((i=0;i<10;i++)) do dd if=large_file of=large_file conv=notrunc 
oflag=append > /dev/null 2>&1 ; done

It creates a 4k*2^10 file with identical pages (since it's on tmpfs with 
no swap)

Can you try it? It should be far simpler than the original option.

Thanks,
Dor

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [KVM-AUTOTEST PATCH 1/2] Add KSM test
  2009-09-25  9:22 ` Jiri Zupka
@ 2009-09-29 15:50   ` Lucas Meneghel Rodrigues
  2009-09-30 12:23     ` Dor Laor
  0 siblings, 1 reply; 15+ messages in thread
From: Lucas Meneghel Rodrigues @ 2009-09-29 15:50 UTC (permalink / raw)
  To: Jiri Zupka; +Cc: dlaor, kvm, autotest, Lukas Doktor

On Fri, 2009-09-25 at 05:22 -0400, Jiri Zupka wrote:
> ----- "Dor Laor" <dlaor@redhat.com> wrote:
> 
> > On 09/16/2009 04:09 PM, Jiri Zupka wrote:
> > >
> > > ----- "Dor Laor"<dlaor@redhat.com>  wrote:
> > >
> > >> On 09/15/2009 09:58 PM, Jiri Zupka wrote:
> > >>>> After a quick review I have the following questions:
> > >>>> 1. Why did you implement the guest tool in 'c' and not in
> > python?
> > >>>>     Python is much simpler and you can share some code with the
> > >> server.
> > >>>>     This 'test protocol' would also be easier to understand this
> > >> way.
> > >>>
> > >>> We need speed and the precise control of allocate memory in
> > pages.
> > >>>
> > >>>> 2. IMHO there is no need to use select, you can do blocking
> > read.
> > >>>
> > >>> We replace socket communication by interactive program
> > communication
> > >> via ssh/telnet
> > >>>
> > >>>> 3. Also you can use plain malloc without the more complex ( a
> > bit)
> > >> mmap.
> > >>>
> > >>> We need address exactly the memory pages. We can't allow shift of
> > >> the data in memory.
> > >>
> > >> You can use the tmpfs+dd idea instead of the specific program as I
> > >> detailed before. Maybe some other binary can be used. My intention
> > is
> > >> to
> > >> simplify the test/environment as much as possible.
> > >>
> > >
> > > We need compatibility with others system, like Windows etc..
> > > We want to add support for others system in next version
> > 
> > KSM is a host feature and should be agnostic to the guest.
> > Also I don't think your code will compile on windows...
> 
> Yes, I think you have true. 

First of all, sorry, I am doing the best I can to review carefully all
the patch queue, and as KSM is a more involved feature that I am not
very familiar with, I need a bit more time to review it!

> But because we need generate special data to pages in memory. 
> We need use script on guest side of test. Because communication 
> over ssh is to slow to transfer lot of GB of special data to guests.
> 
> We can use optimized C program which is 10x and more faster than 
> python script on native system. Heavy load of virtual guest can 
> make some performance problem.

About code compiling under windows, I guess making a native windows c or
c++ program is an option, I generally agree with your reasoning, this
case seems to be better covered with a c program. Will get into it in
more detail ASAP...

> We can use tmpfs but with python script to generate special data. 
> We can't use dd with random because we need test some special case.
> (change only last 96B of page etc.. )
> 
> 
> What do you think about it? 
> 
> > 
> > >
> > >>
> > >>> --
> > >>> To unsubscribe from this list: send the line "unsubscribe kvm" in
> > >>> the body of a message to majordomo@vger.kernel.org
> > >>> More majordomo info at 
> > http://vger.kernel.org/majordomo-info.html
> > 
> > --
> > To unsubscribe from this list: send the line "unsubscribe kvm" in
> > the body of a message to majordomo@vger.kernel.org
> > More majordomo info at  http://vger.kernel.org/majordomo-info.html
> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html


^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [KVM-AUTOTEST PATCH 1/2] Add KSM test
       [not found] <830286859.775301253870289468.JavaMail.root@zmail05.collab.prod.int.phx2.redhat.com>
@ 2009-09-25  9:22 ` Jiri Zupka
  2009-09-29 15:50   ` Lucas Meneghel Rodrigues
  0 siblings, 1 reply; 15+ messages in thread
From: Jiri Zupka @ 2009-09-25  9:22 UTC (permalink / raw)
  To: dlaor; +Cc: kvm, autotest, Lukas Doktor


----- "Dor Laor" <dlaor@redhat.com> wrote:

> On 09/16/2009 04:09 PM, Jiri Zupka wrote:
> >
> > ----- "Dor Laor"<dlaor@redhat.com>  wrote:
> >
> >> On 09/15/2009 09:58 PM, Jiri Zupka wrote:
> >>>> After a quick review I have the following questions:
> >>>> 1. Why did you implement the guest tool in 'c' and not in
> python?
> >>>>     Python is much simpler and you can share some code with the
> >> server.
> >>>>     This 'test protocol' would also be easier to understand this
> >> way.
> >>>
> >>> We need speed and the precise control of allocate memory in
> pages.
> >>>
> >>>> 2. IMHO there is no need to use select, you can do blocking
> read.
> >>>
> >>> We replace socket communication by interactive program
> communication
> >> via ssh/telnet
> >>>
> >>>> 3. Also you can use plain malloc without the more complex ( a
> bit)
> >> mmap.
> >>>
> >>> We need address exactly the memory pages. We can't allow shift of
> >> the data in memory.
> >>
> >> You can use the tmpfs+dd idea instead of the specific program as I
> >> detailed before. Maybe some other binary can be used. My intention
> is
> >> to
> >> simplify the test/environment as much as possible.
> >>
> >
> > We need compatibility with others system, like Windows etc..
> > We want to add support for others system in next version
> 
> KSM is a host feature and should be agnostic to the guest.
> Also I don't think your code will compile on windows...

Yes, I think you have true. 

But because we need generate special data to pages in memory. 
We need use script on guest side of test. Because communication 
over ssh is to slow to transfer lot of GB of special data to guests.

We can use optimized C program which is 10x and more faster than 
python script on native system. Heavy load of virtual guest can 
make some performance problem.

We can use tmpfs but with python script to generate special data. 
We can't use dd with random because we need test some special case.
(change only last 96B of page etc.. )


What do you think about it? 

> 
> >
> >>
> >>> --
> >>> To unsubscribe from this list: send the line "unsubscribe kvm" in
> >>> the body of a message to majordomo@vger.kernel.org
> >>> More majordomo info at 
> http://vger.kernel.org/majordomo-info.html
> 
> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [KVM-AUTOTEST PATCH 1/2] Add KSM test
  2009-09-16 13:09 ` Jiri Zupka
@ 2009-09-16 13:21   ` Dor Laor
  0 siblings, 0 replies; 15+ messages in thread
From: Dor Laor @ 2009-09-16 13:21 UTC (permalink / raw)
  To: Jiri Zupka; +Cc: kvm, autotest, Lukas Doktor

On 09/16/2009 04:09 PM, Jiri Zupka wrote:
>
> ----- "Dor Laor"<dlaor@redhat.com>  wrote:
>
>> On 09/15/2009 09:58 PM, Jiri Zupka wrote:
>>>> After a quick review I have the following questions:
>>>> 1. Why did you implement the guest tool in 'c' and not in python?
>>>>     Python is much simpler and you can share some code with the
>> server.
>>>>     This 'test protocol' would also be easier to understand this
>> way.
>>>
>>> We need speed and the precise control of allocate memory in pages.
>>>
>>>> 2. IMHO there is no need to use select, you can do blocking read.
>>>
>>> We replace socket communication by interactive program communication
>> via ssh/telnet
>>>
>>>> 3. Also you can use plain malloc without the more complex ( a bit)
>> mmap.
>>>
>>> We need address exactly the memory pages. We can't allow shift of
>> the data in memory.
>>
>> You can use the tmpfs+dd idea instead of the specific program as I
>> detailed before. Maybe some other binary can be used. My intention is
>> to
>> simplify the test/environment as much as possible.
>>
>
> We need compatibility with others system, like Windows etc..
> We want to add support for others system in next version

KSM is a host feature and should be agnostic to the guest.
Also I don't think your code will compile on windows...

>
>>
>>> --
>>> To unsubscribe from this list: send the line "unsubscribe kvm" in
>>> the body of a message to majordomo@vger.kernel.org
>>> More majordomo info at  http://vger.kernel.org/majordomo-info.html


^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [KVM-AUTOTEST PATCH 1/2] Add KSM test
       [not found] <83259492.232221253106465155.JavaMail.root@zmail05.collab.prod.int.phx2.redhat.com>
@ 2009-09-16 13:09 ` Jiri Zupka
  2009-09-16 13:21   ` Dor Laor
  0 siblings, 1 reply; 15+ messages in thread
From: Jiri Zupka @ 2009-09-16 13:09 UTC (permalink / raw)
  To: dlaor; +Cc: kvm, autotest, Lukas Doktor


----- "Dor Laor" <dlaor@redhat.com> wrote:

> On 09/15/2009 09:58 PM, Jiri Zupka wrote:
> >> After a quick review I have the following questions:
> >> 1. Why did you implement the guest tool in 'c' and not in python?
> >>    Python is much simpler and you can share some code with the
> server.
> >>    This 'test protocol' would also be easier to understand this
> way.
> >
> > We need speed and the precise control of allocate memory in pages.
> >
> >> 2. IMHO there is no need to use select, you can do blocking read.
> >
> > We replace socket communication by interactive program communication
> via ssh/telnet
> >
> >> 3. Also you can use plain malloc without the more complex ( a bit)
> mmap.
> >
> > We need address exactly the memory pages. We can't allow shift of
> the data in memory.
> 
> You can use the tmpfs+dd idea instead of the specific program as I 
> detailed before. Maybe some other binary can be used. My intention is
> to 
> simplify the test/environment as much as possible.
> 

We need compatibility with others system, like Windows etc..  
We want to add support for others system in next version 

> 
> > --
> > To unsubscribe from this list: send the line "unsubscribe kvm" in
> > the body of a message to majordomo@vger.kernel.org
> > More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [KVM-AUTOTEST PATCH 1/2] Add KSM test
  2009-09-15 18:58 ` [KVM-AUTOTEST PATCH 1/2] " Jiri Zupka
@ 2009-09-16  8:56   ` Dor Laor
  0 siblings, 0 replies; 15+ messages in thread
From: Dor Laor @ 2009-09-16  8:56 UTC (permalink / raw)
  To: Jiri Zupka; +Cc: kvm, autotest, Lukas Doktor

On 09/15/2009 09:58 PM, Jiri Zupka wrote:
>> After a quick review I have the following questions:
>> 1. Why did you implement the guest tool in 'c' and not in python?
>>    Python is much simpler and you can share some code with the server.
>>    This 'test protocol' would also be easier to understand this way.
>
> We need speed and the precise control of allocate memory in pages.
>
>> 2. IMHO there is no need to use select, you can do blocking read.
>
> We replace socket communication by interactive program communication via ssh/telnet
>
>> 3. Also you can use plain malloc without the more complex ( a bit) mmap.
>
> We need address exactly the memory pages. We can't allow shift of the data in memory.

You can use the tmpfs+dd idea instead of the specific program as I 
detailed before. Maybe some other binary can be used. My intention is to 
simplify the test/environment as much as possible.


> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html


^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [KVM-AUTOTEST PATCH 1/2] Add KSM test
       [not found] <1785968988.186531253041095308.JavaMail.root@zmail05.collab.prod.int.phx2.redhat.com>
@ 2009-09-15 18:58 ` Jiri Zupka
  2009-09-16  8:56   ` Dor Laor
  0 siblings, 1 reply; 15+ messages in thread
From: Jiri Zupka @ 2009-09-15 18:58 UTC (permalink / raw)
  To: kvm, autotest; +Cc: Lukas Doktor, dlaor

> After a quick review I have the following questions:
> 1. Why did you implement the guest tool in 'c' and not in python?
>   Python is much simpler and you can share some code with the server.
>   This 'test protocol' would also be easier to understand this way.

We need speed and the precise control of allocate memory in pages. 

> 2. IMHO there is no need to use select, you can do blocking read.

We replace socket communication by interactive program communication via ssh/telnet

> 3. Also you can use plain malloc without the more complex ( a bit) mmap.

We need address exactly the memory pages. We can't allow shift of the data in memory. 

^ permalink raw reply	[flat|nested] 15+ messages in thread

end of thread, other threads:[~2009-09-30 12:23 UTC | newest]

Thread overview: 15+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2009-08-31  9:29 [KVM-AUTOTEST PATCH 0/2] Add KSM test Lukáš Doktor
2009-08-31  9:35 ` [KVM-AUTOTEST PATCH 1/2] " Lukáš Doktor
2009-09-01  8:49   ` Lukáš Doktor
2009-09-07 12:03   ` Dor Laor
2009-08-31  9:48 ` [KVM-AUTOTEST PATCH 2/2] " Lukáš Doktor
2009-09-01  8:50   ` Lukáš Doktor
2009-09-07 13:00   ` Dor Laor
2009-09-07 12:37 ` [KVM-AUTOTEST PATCH 0/2] " Uri Lublin
     [not found] <1785968988.186531253041095308.JavaMail.root@zmail05.collab.prod.int.phx2.redhat.com>
2009-09-15 18:58 ` [KVM-AUTOTEST PATCH 1/2] " Jiri Zupka
2009-09-16  8:56   ` Dor Laor
     [not found] <83259492.232221253106465155.JavaMail.root@zmail05.collab.prod.int.phx2.redhat.com>
2009-09-16 13:09 ` Jiri Zupka
2009-09-16 13:21   ` Dor Laor
     [not found] <830286859.775301253870289468.JavaMail.root@zmail05.collab.prod.int.phx2.redhat.com>
2009-09-25  9:22 ` Jiri Zupka
2009-09-29 15:50   ` Lucas Meneghel Rodrigues
2009-09-30 12:23     ` Dor Laor

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).