All of lore.kernel.org
 help / color / mirror / Atom feed
* [Xen-devel] [PATCH for-4.13 1/2] python/xc.c: Remove trailing whitespace
@ 2019-11-26 17:17 George Dunlap
  2019-11-26 17:17 ` [Xen-devel] [PATCH for-4.13 2/2] Rationalize max_grant_frames and max_maptrack_frames handling George Dunlap
  0 siblings, 1 reply; 20+ messages in thread
From: George Dunlap @ 2019-11-26 17:17 UTC (permalink / raw)
  To: xen-devel; +Cc: Juergen Gross, George Dunlap, Marek Marczykowski-Górecki

No functional change.

Signed-off-by: George Dunlap <george.dunlap@citrix.com>
---
CC: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
CC: Juergen Gross <jgross@suse.com>
---
 tools/python/xen/lowlevel/xc/xc.c | 210 +++++++++++++++---------------
 1 file changed, 105 insertions(+), 105 deletions(-)

diff --git a/tools/python/xen/lowlevel/xc/xc.c b/tools/python/xen/lowlevel/xc/xc.c
index 44d3606141..6d2afd5695 100644
--- a/tools/python/xen/lowlevel/xc/xc.c
+++ b/tools/python/xen/lowlevel/xc/xc.c
@@ -1,6 +1,6 @@
 /******************************************************************************
  * Xc.c
- * 
+ *
  * Copyright (c) 2003-2004, K A Fraser (University of Cambridge)
  */
 
@@ -107,7 +107,7 @@ static PyObject *pyxc_domain_dumpcore(XcObject *self, PyObject *args)
 
     if ( xc_domain_dumpcore(self->xc_handle, dom, corefile) != 0 )
         return pyxc_error_to_exception(self->xc_handle);
-    
+
     Py_INCREF(zero);
     return zero;
 }
@@ -141,7 +141,7 @@ static PyObject *pyxc_domain_create(XcObject *self,
         return NULL;
     if ( pyhandle != NULL )
     {
-        if ( !PyList_Check(pyhandle) || 
+        if ( !PyList_Check(pyhandle) ||
              (PyList_Size(pyhandle) != sizeof(xen_domain_handle_t)) )
             goto out_exception;
 
@@ -188,7 +188,7 @@ static PyObject *pyxc_domain_max_vcpus(XcObject *self, PyObject *args)
 
     if (xc_domain_max_vcpus(self->xc_handle, dom, max) != 0)
         return pyxc_error_to_exception(self->xc_handle);
-    
+
     Py_INCREF(zero);
     return zero;
 }
@@ -223,7 +223,7 @@ static PyObject *pyxc_domain_shutdown(XcObject *self, PyObject *args)
 
     if ( xc_domain_shutdown(self->xc_handle, dom, reason) != 0 )
         return pyxc_error_to_exception(self->xc_handle);
-    
+
     Py_INCREF(zero);
     return zero;
 }
@@ -255,7 +255,7 @@ static PyObject *pyxc_vcpu_setaffinity(XcObject *self,
 
     static char *kwd_list[] = { "domid", "vcpu", "cpumap", NULL };
 
-    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|iO", kwd_list, 
+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|iO", kwd_list,
                                       &dom, &vcpu, &cpulist) )
         return NULL;
 
@@ -269,7 +269,7 @@ static PyObject *pyxc_vcpu_setaffinity(XcObject *self,
 
     if ( (cpulist != NULL) && PyList_Check(cpulist) )
     {
-        for ( i = 0; i < PyList_Size(cpulist); i++ ) 
+        for ( i = 0; i < PyList_Size(cpulist); i++ )
         {
             long cpu = PyLongOrInt_AsLong(PyList_GetItem(cpulist, i));
             if ( cpu < 0 || cpu >= nr_cpus )
@@ -282,7 +282,7 @@ static PyObject *pyxc_vcpu_setaffinity(XcObject *self,
             cpumap[cpu / 8] |= 1 << (cpu % 8);
         }
     }
-  
+
     if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap,
                              NULL, XEN_VCPUAFFINITY_HARD) != 0 )
     {
@@ -290,7 +290,7 @@ static PyObject *pyxc_vcpu_setaffinity(XcObject *self,
         return pyxc_error_to_exception(self->xc_handle);
     }
     Py_INCREF(zero);
-    free(cpumap); 
+    free(cpumap);
     return zero;
 }
 
@@ -304,7 +304,7 @@ static PyObject *pyxc_domain_sethandle(XcObject *self, PyObject *args)
     if (!PyArg_ParseTuple(args, "iO", &dom, &pyhandle))
         return NULL;
 
-    if ( !PyList_Check(pyhandle) || 
+    if ( !PyList_Check(pyhandle) ||
          (PyList_Size(pyhandle) != sizeof(xen_domain_handle_t)) )
     {
         goto out_exception;
@@ -320,7 +320,7 @@ static PyObject *pyxc_domain_sethandle(XcObject *self, PyObject *args)
 
     if (xc_domain_sethandle(self->xc_handle, dom, handle) < 0)
         return pyxc_error_to_exception(self->xc_handle);
-    
+
     Py_INCREF(zero);
     return zero;
 
@@ -342,7 +342,7 @@ static PyObject *pyxc_domain_getinfo(XcObject *self,
     xc_dominfo_t *info;
 
     static char *kwd_list[] = { "first_dom", "max_doms", NULL };
-    
+
     if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|ii", kwd_list,
                                       &first_dom, &max_doms) )
         return NULL;
@@ -415,7 +415,7 @@ static PyObject *pyxc_vcpu_getinfo(XcObject *self,
     int nr_cpus;
 
     static char *kwd_list[] = { "domid", "vcpu", NULL };
-    
+
     if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|i", kwd_list,
                                       &dom, &vcpu) )
         return NULL;
@@ -470,7 +470,7 @@ static PyObject *pyxc_hvm_param_get(XcObject *self,
     int param;
     uint64_t value;
 
-    static char *kwd_list[] = { "domid", "param", NULL }; 
+    static char *kwd_list[] = { "domid", "param", NULL };
     if ( !PyArg_ParseTupleAndKeywords(args, kwds, "ii", kwd_list,
                                       &dom, &param) )
         return NULL;
@@ -490,7 +490,7 @@ static PyObject *pyxc_hvm_param_set(XcObject *self,
     int param;
     uint64_t value;
 
-    static char *kwd_list[] = { "domid", "param", "value", NULL }; 
+    static char *kwd_list[] = { "domid", "param", "value", NULL };
     if ( !PyArg_ParseTupleAndKeywords(args, kwds, "iiL", kwd_list,
                                       &dom, &param, &value) )
         return NULL;
@@ -660,7 +660,7 @@ static PyObject *pyxc_get_device_group(XcObject *self,
 
     if ( rc < 0 )
     {
-        free(sdev_array); 
+        free(sdev_array);
         return pyxc_error_to_exception(self->xc_handle);
     }
 
@@ -861,7 +861,7 @@ static PyObject *pyxc_physdev_pci_access_modify(XcObject *self,
 
     static char *kwd_list[] = { "domid", "bus", "dev", "func", "enable", NULL };
 
-    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "iiiii", kwd_list, 
+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "iiiii", kwd_list,
                                       &dom, &bus, &dev, &func, &enable) )
         return NULL;
 
@@ -976,7 +976,7 @@ static PyObject *pyxc_physinfo(XcObject *self)
                             "nr_nodes",         pinfo.nr_nodes,
                             "threads_per_core", pinfo.threads_per_core,
                             "cores_per_socket", pinfo.cores_per_socket,
-                            "nr_cpus",          pinfo.nr_cpus, 
+                            "nr_cpus",          pinfo.nr_cpus,
                             "total_memory",     pages_to_kib(pinfo.total_pages),
                             "free_memory",      pages_to_kib(pinfo.free_pages),
                             "scrub_memory",     pages_to_kib(pinfo.scrub_pages),
@@ -1266,14 +1266,14 @@ static PyObject *pyxc_shadow_control(PyObject *self,
 
     static char *kwd_list[] = { "dom", "op", NULL };
 
-    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|i", kwd_list, 
+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|i", kwd_list,
                                       &dom, &op) )
         return NULL;
-    
-    if ( xc_shadow_control(xc->xc_handle, dom, op, NULL, 0, NULL, 0, NULL) 
+
+    if ( xc_shadow_control(xc->xc_handle, dom, op, NULL, 0, NULL, 0, NULL)
          < 0 )
         return pyxc_error_to_exception(xc->xc_handle);
-    
+
     Py_INCREF(zero);
     return zero;
 }
@@ -1290,26 +1290,26 @@ static PyObject *pyxc_shadow_mem_control(PyObject *self,
 
     static char *kwd_list[] = { "dom", "mb", NULL };
 
-    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|i", kwd_list, 
+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|i", kwd_list,
                                       &dom, &mbarg) )
         return NULL;
-    
-    if ( mbarg < 0 ) 
+
+    if ( mbarg < 0 )
         op = XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION;
-    else 
+    else
     {
         mb = mbarg;
         op = XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION;
     }
     if ( xc_shadow_control(xc->xc_handle, dom, op, NULL, 0, &mb, 0, NULL) < 0 )
         return pyxc_error_to_exception(xc->xc_handle);
-    
+
     mbarg = mb;
     return Py_BuildValue("i", mbarg);
 }
 
 static PyObject *pyxc_sched_id_get(XcObject *self) {
-    
+
     int sched_id;
     if (xc_sched_id(self->xc_handle, &sched_id) != 0)
         return PyErr_SetFromErrno(xc_error_obj);
@@ -1327,10 +1327,10 @@ static PyObject *pyxc_sched_credit_domain_set(XcObject *self,
     static char *kwd_list[] = { "domid", "weight", "cap", NULL };
     static char kwd_type[] = "I|HH";
     struct xen_domctl_sched_credit sdom;
-    
+
     weight = 0;
     cap = (uint16_t)~0U;
-    if( !PyArg_ParseTupleAndKeywords(args, kwds, kwd_type, kwd_list, 
+    if( !PyArg_ParseTupleAndKeywords(args, kwds, kwd_type, kwd_list,
                                      &domid, &weight, &cap) )
         return NULL;
 
@@ -1348,10 +1348,10 @@ static PyObject *pyxc_sched_credit_domain_get(XcObject *self, PyObject *args)
 {
     uint32_t domid;
     struct xen_domctl_sched_credit sdom;
-    
+
     if( !PyArg_ParseTuple(args, "I", &domid) )
         return NULL;
-    
+
     if ( xc_sched_credit_domain_get(self->xc_handle, domid, &sdom) != 0 )
         return pyxc_error_to_exception(self->xc_handle);
 
@@ -1412,7 +1412,7 @@ static PyObject *pyxc_domain_setmaxmem(XcObject *self, PyObject *args)
 
     if (xc_domain_setmaxmem(self->xc_handle, dom, maxmem_kb) != 0)
         return pyxc_error_to_exception(self->xc_handle);
-    
+
     Py_INCREF(zero);
     return zero;
 }
@@ -1425,12 +1425,12 @@ static PyObject *pyxc_domain_set_target_mem(XcObject *self, PyObject *args)
     if (!PyArg_ParseTuple(args, "ii", &dom, &mem_kb))
         return NULL;
 
-    mem_pages = mem_kb / 4; 
+    mem_pages = mem_kb / 4;
 
     if (xc_domain_set_pod_target(self->xc_handle, dom, mem_pages,
 				 NULL, NULL, NULL) != 0)
         return pyxc_error_to_exception(self->xc_handle);
-    
+
     Py_INCREF(zero);
     return zero;
 }
@@ -1445,7 +1445,7 @@ static PyObject *pyxc_domain_set_memmap_limit(XcObject *self, PyObject *args)
 
     if ( xc_domain_set_memmap_limit(self->xc_handle, dom, maplimit_kb) != 0 )
         return pyxc_error_to_exception(self->xc_handle);
-    
+
     Py_INCREF(zero);
     return zero;
 }
@@ -1459,7 +1459,7 @@ static PyObject *pyxc_domain_ioport_permission(XcObject *self,
 
     static char *kwd_list[] = { "domid", "first_port", "nr_ports", "allow_access", NULL };
 
-    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "iiii", kwd_list, 
+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "iiii", kwd_list,
                                       &dom, &first_port, &nr_ports, &allow_access) )
         return NULL;
 
@@ -1482,7 +1482,7 @@ static PyObject *pyxc_domain_irq_permission(PyObject *self,
 
     static char *kwd_list[] = { "domid", "pirq", "allow_access", NULL };
 
-    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "iii", kwd_list, 
+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "iii", kwd_list,
                                       &dom, &pirq, &allow_access) )
         return NULL;
 
@@ -1505,7 +1505,7 @@ static PyObject *pyxc_domain_iomem_permission(PyObject *self,
 
     static char *kwd_list[] = { "domid", "first_pfn", "nr_pfns", "allow_access", NULL };
 
-    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "illi", kwd_list, 
+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "illi", kwd_list,
                                       &dom, &first_pfn, &nr_pfns, &allow_access) )
         return NULL;
 
@@ -1570,7 +1570,7 @@ static PyObject *pyxc_domain_send_trigger(XcObject *self,
 
     static char *kwd_list[] = { "domid", "trigger", "vcpu", NULL };
 
-    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "ii|i", kwd_list, 
+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "ii|i", kwd_list,
                                       &dom, &trigger, &vcpu) )
         return NULL;
 
@@ -1624,7 +1624,7 @@ static PyObject *pyxc_dom_set_memshr(XcObject *self, PyObject *args)
 
     if (xc_memshr_control(self->xc_handle, dom, enable) != 0)
         return pyxc_error_to_exception(self->xc_handle);
-    
+
     Py_INCREF(zero);
     return zero;
 }
@@ -1848,11 +1848,11 @@ static PyObject *pyflask_sid_to_context(PyObject *self, PyObject *args,
     if (!xc_handle) {
         return PyErr_SetFromErrno(xc_error_obj);
     }
-    
+
     ret = xc_flask_sid_to_context(xc_handle, sid, ctx, ctx_len);
-    
+
     xc_interface_close(xc_handle);
-    
+
     if ( ret != 0 ) {
         errno = -ret;
         return PyErr_SetFromErrno(xc_error_obj);
@@ -1869,7 +1869,7 @@ static PyObject *pyflask_load(PyObject *self, PyObject *args, PyObject *kwds)
     int ret;
 
     static char *kwd_list[] = { "policy", NULL };
-  
+
     if( !PyArg_ParseTupleAndKeywords(args, kwds, "s#", kwd_list, &policy, &len) )
         return NULL;
 
@@ -1899,11 +1899,11 @@ static PyObject *pyflask_getenforce(PyObject *self)
     if (!xc_handle) {
         return PyErr_SetFromErrno(xc_error_obj);
     }
-    
+
     ret = xc_flask_getenforce(xc_handle);
-    
+
     xc_interface_close(xc_handle);
-    
+
     if ( ret < 0 ) {
         errno = -ret;
         return PyErr_SetFromErrno(xc_error_obj);
@@ -1929,11 +1929,11 @@ static PyObject *pyflask_setenforce(PyObject *self, PyObject *args,
     if (!xc_handle) {
         return PyErr_SetFromErrno(xc_error_obj);
     }
-    
+
     ret = xc_flask_setenforce(xc_handle, mode);
-    
+
     xc_interface_close(xc_handle);
-    
+
     if ( ret != 0 ) {
         errno = -ret;
         return PyErr_SetFromErrno(xc_error_obj);
@@ -1951,7 +1951,7 @@ static PyObject *pyflask_access(PyObject *self, PyObject *args,
     uint32_t req, allowed, decided, auditallow, auditdeny, seqno;
     int ret;
 
-    static char *kwd_list[] = { "src_context", "tar_context", 
+    static char *kwd_list[] = { "src_context", "tar_context",
                                 "tar_class", "req_permissions",
                                 "decided", "auditallow","auditdeny",
                                 "seqno", NULL };
@@ -1965,10 +1965,10 @@ static PyObject *pyflask_access(PyObject *self, PyObject *args,
     if (!xc_handle) {
         return PyErr_SetFromErrno(xc_error_obj);
     }
-    
+
     ret = xc_flask_access(xc_handle, scon, tcon, tclass, req, &allowed, &decided,
                         &auditallow, &auditdeny, &seqno);
-        
+
     xc_interface_close(xc_handle);
 
     if ( ret != 0 ) {
@@ -1980,14 +1980,14 @@ static PyObject *pyflask_access(PyObject *self, PyObject *args,
 }
 
 static PyMethodDef pyxc_methods[] = {
-    { "domain_create", 
-      (PyCFunction)pyxc_domain_create, 
+    { "domain_create",
+      (PyCFunction)pyxc_domain_create,
       METH_VARARGS | METH_KEYWORDS, "\n"
       "Create a new domain.\n"
       " dom    [int, 0]:        Domain identifier to use (allocated if zero).\n"
       "Returns: [int] new domain identifier; -1 on error.\n" },
 
-    { "domain_max_vcpus", 
+    { "domain_max_vcpus",
       (PyCFunction)pyxc_domain_max_vcpus,
       METH_VARARGS, "\n"
       "Set the maximum number of VCPUs a domain may create.\n"
@@ -1995,43 +1995,43 @@ static PyMethodDef pyxc_methods[] = {
       " max     [int, 0]:      New maximum number of VCPUs in domain.\n"
       "Returns: [int] 0 on success; -1 on error.\n" },
 
-    { "domain_dumpcore", 
-      (PyCFunction)pyxc_domain_dumpcore, 
+    { "domain_dumpcore",
+      (PyCFunction)pyxc_domain_dumpcore,
       METH_VARARGS, "\n"
       "Dump core of a domain.\n"
       " dom [int]: Identifier of domain to dump core of.\n"
       " corefile [string]: Name of corefile to be created.\n\n"
       "Returns: [int] 0 on success; -1 on error.\n" },
 
-    { "domain_pause", 
-      (PyCFunction)pyxc_domain_pause, 
+    { "domain_pause",
+      (PyCFunction)pyxc_domain_pause,
       METH_VARARGS, "\n"
       "Temporarily pause execution of a domain.\n"
       " dom [int]: Identifier of domain to be paused.\n\n"
       "Returns: [int] 0 on success; -1 on error.\n" },
 
-    { "domain_unpause", 
-      (PyCFunction)pyxc_domain_unpause, 
+    { "domain_unpause",
+      (PyCFunction)pyxc_domain_unpause,
       METH_VARARGS, "\n"
       "(Re)start execution of a domain.\n"
       " dom [int]: Identifier of domain to be unpaused.\n\n"
       "Returns: [int] 0 on success; -1 on error.\n" },
 
-    { "domain_destroy", 
-      (PyCFunction)pyxc_domain_destroy, 
+    { "domain_destroy",
+      (PyCFunction)pyxc_domain_destroy,
       METH_VARARGS, "\n"
       "Destroy a domain.\n"
       " dom [int]:    Identifier of domain to be destroyed.\n\n"
       "Returns: [int] 0 on success; -1 on error.\n" },
 
-    { "domain_destroy_hook", 
-      (PyCFunction)pyxc_domain_destroy_hook, 
+    { "domain_destroy_hook",
+      (PyCFunction)pyxc_domain_destroy_hook,
       METH_VARARGS, "\n"
       "Add a hook for arch stuff before destroy a domain.\n"
       " dom [int]:    Identifier of domain to be destroyed.\n\n"
       "Returns: [int] 0 on success; -1 on error.\n" },
 
-    { "domain_resume", 
+    { "domain_resume",
       (PyCFunction)pyxc_domain_resume,
       METH_VARARGS, "\n"
       "Resume execution of a suspended domain.\n"
@@ -2039,7 +2039,7 @@ static PyMethodDef pyxc_methods[] = {
       " fast [int]: Use cooperative resume.\n\n"
       "Returns: [int] 0 on success; -1 on error.\n" },
 
-    { "domain_shutdown", 
+    { "domain_shutdown",
       (PyCFunction)pyxc_domain_shutdown,
       METH_VARARGS, "\n"
       "Shutdown a domain.\n"
@@ -2047,8 +2047,8 @@ static PyMethodDef pyxc_methods[] = {
       " reason     [int, 0]:      Reason for shutdown.\n"
       "Returns: [int] 0 on success; -1 on error.\n" },
 
-    { "vcpu_setaffinity", 
-      (PyCFunction)pyxc_vcpu_setaffinity, 
+    { "vcpu_setaffinity",
+      (PyCFunction)pyxc_vcpu_setaffinity,
       METH_VARARGS | METH_KEYWORDS, "\n"
       "Pin a VCPU to a specified set CPUs.\n"
       " dom [int]:     Identifier of domain to which VCPU belongs.\n"
@@ -2056,7 +2056,7 @@ static PyMethodDef pyxc_methods[] = {
       " cpumap [list, []]: list of usable CPUs.\n\n"
       "Returns: [int] 0 on success; -1 on error.\n" },
 
-    { "domain_sethandle", 
+    { "domain_sethandle",
       (PyCFunction)pyxc_domain_sethandle,
       METH_VARARGS, "\n"
       "Set domain's opaque handle.\n"
@@ -2064,8 +2064,8 @@ static PyMethodDef pyxc_methods[] = {
       " handle [list of 16 ints]: New opaque handle.\n"
       "Returns: [int] 0 on success; -1 on error.\n" },
 
-    { "domain_getinfo", 
-      (PyCFunction)pyxc_domain_getinfo, 
+    { "domain_getinfo",
+      (PyCFunction)pyxc_domain_getinfo,
       METH_VARARGS | METH_KEYWORDS, "\n"
       "Get information regarding a set of domains, in increasing id order.\n"
       " first_dom [int, 0]:    First domain to retrieve info about.\n"
@@ -2090,8 +2090,8 @@ static PyMethodDef pyxc_methods[] = {
       "reason why it shut itself down.\n"
       " cpupool  [int]   Id of cpupool domain is bound to.\n" },
 
-    { "vcpu_getinfo", 
-      (PyCFunction)pyxc_vcpu_getinfo, 
+    { "vcpu_getinfo",
+      (PyCFunction)pyxc_vcpu_getinfo,
       METH_VARARGS | METH_KEYWORDS, "\n"
       "Get information regarding a VCPU.\n"
       " dom  [int]:    Domain to retrieve info about.\n"
@@ -2115,7 +2115,7 @@ static PyMethodDef pyxc_methods[] = {
       " xenstore_domid [int]: \n"
       "Returns: None on success. Raises exception on error.\n" },
 
-    { "hvm_get_param", 
+    { "hvm_get_param",
       (PyCFunction)pyxc_hvm_param_get,
       METH_VARARGS | METH_KEYWORDS, "\n"
       "get a parameter of HVM guest OS.\n"
@@ -2123,7 +2123,7 @@ static PyMethodDef pyxc_methods[] = {
       " param   [int]:      No. of HVM param.\n"
       "Returns: [long] value of the param.\n" },
 
-    { "hvm_set_param", 
+    { "hvm_set_param",
       (PyCFunction)pyxc_hvm_param_set,
       METH_VARARGS | METH_KEYWORDS, "\n"
       "set a parameter of HVM guest OS.\n"
@@ -2166,12 +2166,12 @@ static PyMethodDef pyxc_methods[] = {
        " dom     [int]:      Domain to deassign device from.\n"
        " pci_str [str]:      PCI devices.\n"
        "Returns: [int] 0 on success, or device bdf that can't be deassigned.\n" },
-  
+
     { "sched_id_get",
       (PyCFunction)pyxc_sched_id_get,
       METH_NOARGS, "\n"
       "Get the current scheduler type in use.\n"
-      "Returns: [int] sched_id.\n" },    
+      "Returns: [int] sched_id.\n" },
 
     { "sched_credit_domain_set",
       (PyCFunction)pyxc_sched_credit_domain_set,
@@ -2209,7 +2209,7 @@ static PyMethodDef pyxc_methods[] = {
       "Returns:   [dict]\n"
       " weight    [short]: domain's scheduling weight\n"},
 
-    { "evtchn_alloc_unbound", 
+    { "evtchn_alloc_unbound",
       (PyCFunction)pyxc_evtchn_alloc_unbound,
       METH_VARARGS | METH_KEYWORDS, "\n"
       "Allocate an unbound port that will await a remote connection.\n"
@@ -2217,7 +2217,7 @@ static PyMethodDef pyxc_methods[] = {
       " remote_dom [int]: Remote domain to accept connections from.\n\n"
       "Returns: [int] Unbound event-channel port.\n" },
 
-    { "evtchn_reset", 
+    { "evtchn_reset",
       (PyCFunction)pyxc_evtchn_reset,
       METH_VARARGS | METH_KEYWORDS, "\n"
       "Reset all connections.\n"
@@ -2242,9 +2242,9 @@ static PyMethodDef pyxc_methods[] = {
       " func   [int]: PCI function\n"
       " enable [int]: Non-zero means enable access; else disable access\n\n"
       "Returns: [int] 0 on success; -1 on error.\n" },
- 
-    { "readconsolering", 
-      (PyCFunction)pyxc_readconsolering, 
+
+    { "readconsolering",
+      (PyCFunction)pyxc_readconsolering,
       METH_VARARGS | METH_KEYWORDS, "\n"
       "Read Xen's console ring.\n"
       " clear [int, 0]: Bool - clear the ring after reading from it?\n\n"
@@ -2292,40 +2292,40 @@ static PyMethodDef pyxc_methods[] = {
       "Returns [str]: Xen buildid"
       "        [None]: on failure.\n" },
 
-    { "shadow_control", 
-      (PyCFunction)pyxc_shadow_control, 
+    { "shadow_control",
+      (PyCFunction)pyxc_shadow_control,
       METH_VARARGS | METH_KEYWORDS, "\n"
       "Set parameter for shadow pagetable interface\n"
       " dom [int]:   Identifier of domain.\n"
       " op [int, 0]: operation\n\n"
       "Returns: [int] 0 on success; -1 on error.\n" },
 
-    { "shadow_mem_control", 
-      (PyCFunction)pyxc_shadow_mem_control, 
+    { "shadow_mem_control",
+      (PyCFunction)pyxc_shadow_mem_control,
       METH_VARARGS | METH_KEYWORDS, "\n"
       "Set or read shadow pagetable memory use\n"
       " dom [int]:   Identifier of domain.\n"
       " mb [int, -1]: MB of shadow memory this domain should have.\n\n"
       "Returns: [int] MB of shadow memory in use by this domain.\n" },
 
-    { "domain_setmaxmem", 
-      (PyCFunction)pyxc_domain_setmaxmem, 
+    { "domain_setmaxmem",
+      (PyCFunction)pyxc_domain_setmaxmem,
       METH_VARARGS, "\n"
       "Set a domain's memory limit\n"
       " dom [int]: Identifier of domain.\n"
       " maxmem_kb [int]: .\n"
       "Returns: [int] 0 on success; -1 on error.\n" },
 
-    { "domain_set_target_mem", 
-      (PyCFunction)pyxc_domain_set_target_mem, 
+    { "domain_set_target_mem",
+      (PyCFunction)pyxc_domain_set_target_mem,
       METH_VARARGS, "\n"
       "Set a domain's memory target\n"
       " dom [int]: Identifier of domain.\n"
       " mem_kb [int]: .\n"
       "Returns: [int] 0 on success; -1 on error.\n" },
 
-    { "domain_set_memmap_limit", 
-      (PyCFunction)pyxc_domain_set_memmap_limit, 
+    { "domain_set_memmap_limit",
+      (PyCFunction)pyxc_domain_set_memmap_limit,
       METH_VARARGS, "\n"
       "Set a domain's physical memory mapping limit\n"
       " dom [int]: Identifier of domain.\n"
@@ -2407,8 +2407,8 @@ static PyMethodDef pyxc_methods[] = {
       " keys    [str]: String of keys to inject.\n" },
 
 #if defined(__i386__) || defined(__x86_64__)
-    { "domain_set_cpuid", 
-      (PyCFunction)pyxc_dom_set_cpuid, 
+    { "domain_set_cpuid",
+      (PyCFunction)pyxc_dom_set_cpuid,
       METH_VARARGS, "\n"
       "Set cpuid response for an input and a domain.\n"
       " dom [int]: Identifier of domain.\n"
@@ -2418,15 +2418,15 @@ static PyMethodDef pyxc_methods[] = {
       " config [dict]: Dictionary of register\n\n"
       "Returns: [int] 0 on success; exception on error.\n" },
 
-    { "domain_set_policy_cpuid", 
-      (PyCFunction)pyxc_dom_set_policy_cpuid, 
+    { "domain_set_policy_cpuid",
+      (PyCFunction)pyxc_dom_set_policy_cpuid,
       METH_VARARGS, "\n"
       "Set the default cpuid policy for a domain.\n"
       " dom [int]: Identifier of domain.\n\n"
       "Returns: [int] 0 on success; exception on error.\n" },
 #endif
 
-    { "dom_set_memshr", 
+    { "dom_set_memshr",
       (PyCFunction)pyxc_dom_set_memshr,
       METH_VARARGS, "\n"
       "Enable/disable memory sharing for the domain.\n"
@@ -2508,20 +2508,20 @@ static PyMethodDef pyxc_methods[] = {
       METH_KEYWORDS, "\n"
       "Loads a policy into the hypervisor.\n"
       " policy [str]: policy to be load\n"
-      "Returns: [int]: 0 on success; -1 on failure.\n" }, 
-      
+      "Returns: [int]: 0 on success; -1 on failure.\n" },
+
     { "flask_getenforce",
       (PyCFunction)pyflask_getenforce,
       METH_NOARGS, "\n"
       "Returns the current mode of the Flask XSM module.\n"
-      "Returns: [int]: 0 for permissive; 1 for enforcing; -1 on failure.\n" }, 
+      "Returns: [int]: 0 for permissive; 1 for enforcing; -1 on failure.\n" },
 
     { "flask_setenforce",
       (PyCFunction)pyflask_setenforce,
       METH_KEYWORDS, "\n"
       "Modifies the current mode for the Flask XSM module.\n"
       " mode [int]: mode to change to\n"
-      "Returns: [int]: 0 on success; -1 on failure.\n" }, 
+      "Returns: [int]: 0 on success; -1 on failure.\n" },
 
     { "flask_access",
       (PyCFunction)pyflask_access,
@@ -2540,7 +2540,7 @@ static PyMethodDef pyxc_methods[] = {
       " auditdeny [int] permissions set to audit on deny\n"
       " seqno [int] not used\n"
       "Returns: [int]: 0 on all permission granted; -1 if any permissions are \
-       denied\n" }, 
+       denied\n" },
 
     { NULL, NULL, 0, NULL }
 };
-- 
2.24.0


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [Xen-devel] [PATCH for-4.13 2/2] Rationalize max_grant_frames and max_maptrack_frames handling
  2019-11-26 17:17 [Xen-devel] [PATCH for-4.13 1/2] python/xc.c: Remove trailing whitespace George Dunlap
@ 2019-11-26 17:17 ` George Dunlap
  2019-11-26 17:30   ` George Dunlap
                     ` (3 more replies)
  0 siblings, 4 replies; 20+ messages in thread
From: George Dunlap @ 2019-11-26 17:17 UTC (permalink / raw)
  To: xen-devel
  Cc: Juergen Gross, Stefano Stabellini, Julien Grall, Wei Liu,
	Paul Durrant, Andrew Cooper, Konrad Rzeszutek Wilk,
	George Dunlap, Marek Marczykowski-Górecki, Jan Beulich,
	Ian Jackson

Xen used to have single, system-wide limits for the number of grant
frames and maptrack frames a guest was allowed to create.  Increasing
or decreasing this single limit on the Xen command-line would change
the limit for all guests on the system.

Later, per-domain limits for these values was created.  The
system-wide limits became strict limits: domains could not be created
with higher limits, but could be created with lower limits.

However, the change also introduced a range of different "default"
values into various places in the toolstack:

- The python libxc bindings hard-coded these values to 32 and 1024,
  respectively

- The libxl default values are 32 and 1024 respectively.

- xl will use the libxl default for maptrack, but does its own default
  calculation for grant frames: either 32 or 64, based on the max
  possible mfn.

These defaults interact poorly with the hypervisor command-line limit:

- The hypervisor command-line limit cannot be used to raise the limit
  for all guests anymore, as the default in the toolstack will
  effectively override this.

- If you use the hypervisor command-line limit to *reduce* the limit,
  then the "default" values generated by the toolstack are too high,
  and all guest creations will fail.

In other words, the toolstack defaults require any change to be
effected by having the admin explicitly specify a new value in every
guest.

In order to address this, have grant_table_init treat '0' values for
max_grant_frames and max_maptrack_frames as instructions to use the
system-wide default.  Have all the above toolstacks default to passing
0 unless a different value is explicitly given.

This restores the old behavior, that changing the hypervisor
command-line option can change the behavior for all guests, while
retaining the ability to set per-guest values.  It also removes the
bug that *reducing* the system-wide max will cause all domains without
explicit limits to fail.

(The ocaml bindings require the caller to always specify a value, and
the code to start a xenstored stubdomain hard-codes these to 4 and 128
respectively; these will not be addressed here.)

Signed-off-by: George Dunlap <george.dunlap@citrix.com>
---
Release justification: This is an observed regression (albeit one that
has spanned several releases now).

Compile-tested only.

NB this patch could be applied without the whitespace fixes (perhaps
with some fix-ups); it's just easier since my editor strips trailing
whitespace out automatically.

CC: Ian Jackson <ian.jackson@citrix.com>
CC: Wei Liu <wl@xen.org>
CC: Andrew Cooper <andrew.cooper3@citrix.com>
CC: Jan Beulich <jbeulich@suse.com>
CC: Paul Durrant <paul@xen.org>
CC: Julien Grall <julien@xen.org>
CC: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
CC: Stefano Stabellini <sstabellini@kernel.org>
CC: Juergen Gross <jgross@suse.com>
CC: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
---
 tools/libxl/libxl.h               |  4 ++--
 tools/python/xen/lowlevel/xc/xc.c |  2 --
 tools/xl/xl.c                     | 12 ++----------
 xen/common/grant_table.c          |  7 +++++++
 xen/include/public/domctl.h       |  6 ++++--
 5 files changed, 15 insertions(+), 16 deletions(-)

diff --git a/tools/libxl/libxl.h b/tools/libxl/libxl.h
index 49b56fa1a3..1648d337e7 100644
--- a/tools/libxl/libxl.h
+++ b/tools/libxl/libxl.h
@@ -364,8 +364,8 @@
  */
 #define LIBXL_HAVE_BUILDINFO_GRANT_LIMITS 1
 
-#define LIBXL_MAX_GRANT_FRAMES_DEFAULT 32
-#define LIBXL_MAX_MAPTRACK_FRAMES_DEFAULT 1024
+#define LIBXL_MAX_GRANT_FRAMES_DEFAULT 0
+#define LIBXL_MAX_MAPTRACK_FRAMES_DEFAULT 0
 
 /*
  * LIBXL_HAVE_BUILDINFO_* indicates that libxl_domain_build_info has
diff --git a/tools/python/xen/lowlevel/xc/xc.c b/tools/python/xen/lowlevel/xc/xc.c
index 6d2afd5695..0f861872ce 100644
--- a/tools/python/xen/lowlevel/xc/xc.c
+++ b/tools/python/xen/lowlevel/xc/xc.c
@@ -127,8 +127,6 @@ static PyObject *pyxc_domain_create(XcObject *self,
         },
         .max_vcpus = 1,
         .max_evtchn_port = -1, /* No limit. */
-        .max_grant_frames = 32,
-        .max_maptrack_frames = 1024,
     };
 
     static char *kwd_list[] = { "domid", "ssidref", "handle", "flags",
diff --git a/tools/xl/xl.c b/tools/xl/xl.c
index ddd29b3f1b..b6e220184d 100644
--- a/tools/xl/xl.c
+++ b/tools/xl/xl.c
@@ -51,8 +51,8 @@ libxl_bitmap global_pv_affinity_mask;
 enum output_format default_output_format = OUTPUT_FORMAT_JSON;
 int claim_mode = 1;
 bool progress_use_cr = 0;
-int max_grant_frames = -1;
-int max_maptrack_frames = -1;
+int max_grant_frames = 0;
+int max_maptrack_frames = 0;
 
 xentoollog_level minmsglevel = minmsglevel_default;
 
@@ -96,7 +96,6 @@ static void parse_global_config(const char *configfile,
     XLU_Config *config;
     int e;
     const char *buf;
-    libxl_physinfo physinfo;
 
     config = xlu_cfg_init(stderr, configfile);
     if (!config) {
@@ -199,13 +198,6 @@ static void parse_global_config(const char *configfile,
 
     if (!xlu_cfg_get_long (config, "max_grant_frames", &l, 0))
         max_grant_frames = l;
-    else {
-        libxl_physinfo_init(&physinfo);
-        max_grant_frames = (libxl_get_physinfo(ctx, &physinfo) != 0 ||
-                            !(physinfo.max_possible_mfn >> 32))
-                           ? 32 : 64;
-        libxl_physinfo_dispose(&physinfo);
-    }
     if (!xlu_cfg_get_long (config, "max_maptrack_frames", &l, 0))
         max_maptrack_frames = l;
 
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index b34d520f6d..cd24029e33 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -1843,6 +1843,13 @@ int grant_table_init(struct domain *d, unsigned int max_grant_frames,
     struct grant_table *gt;
     int ret = -ENOMEM;
 
+    /* Default to maximum values if no lower ones are specified */
+    if ( !max_grant_frames )
+        max_grant_frames = opt_max_grant_frames;
+
+    if ( !max_maptrack_frames )
+        max_maptrack_frames = opt_max_maptrack_frames;
+
     if ( max_grant_frames < INITIAL_NR_GRANT_FRAMES ||
          max_grant_frames > opt_max_grant_frames ||
          max_maptrack_frames > opt_max_maptrack_frames )
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index 9f2cfd602c..27d04f67aa 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -82,8 +82,10 @@ struct xen_domctl_createdomain {
     uint32_t iommu_opts;
 
     /*
-     * Various domain limits, which impact the quantity of resources (global
-     * mapping space, xenheap, etc) a guest may consume.
+     * Various domain limits, which impact the quantity of resources
+     * (global mapping space, xenheap, etc) a guest may consume.  For
+     * max_grant_frames and max_maptrack_frames, "0" means "use the
+     * default maximum value".
      */
     uint32_t max_vcpus;
     uint32_t max_evtchn_port;
-- 
2.24.0


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* Re: [Xen-devel] [PATCH for-4.13 2/2] Rationalize max_grant_frames and max_maptrack_frames handling
  2019-11-26 17:17 ` [Xen-devel] [PATCH for-4.13 2/2] Rationalize max_grant_frames and max_maptrack_frames handling George Dunlap
@ 2019-11-26 17:30   ` George Dunlap
  2019-11-27  4:34     ` Jürgen Groß
  2019-11-26 17:32   ` Durrant, Paul
                     ` (2 subsequent siblings)
  3 siblings, 1 reply; 20+ messages in thread
From: George Dunlap @ 2019-11-26 17:30 UTC (permalink / raw)
  To: xen-devel
  Cc: Juergen Gross, Stefano Stabellini, Julien Grall, Wei Liu,
	Paul Durrant, Andrew Cooper, Konrad Rzeszutek Wilk,
	Marek Marczykowski-Górecki, Jan Beulich, Ian Jackson

On 11/26/19 5:17 PM, George Dunlap wrote:
> - xl will use the libxl default for maptrack, but does its own default
>   calculation for grant frames: either 32 or 64, based on the max
>   possible mfn.

[snip]

> @@ -199,13 +198,6 @@ static void parse_global_config(const char *configfile,
>  
>      if (!xlu_cfg_get_long (config, "max_grant_frames", &l, 0))
>          max_grant_frames = l;
> -    else {
> -        libxl_physinfo_init(&physinfo);
> -        max_grant_frames = (libxl_get_physinfo(ctx, &physinfo) != 0 ||
> -                            !(physinfo.max_possible_mfn >> 32))
> -                           ? 32 : 64;
> -        libxl_physinfo_dispose(&physinfo);
> -    }

Sorry, meant to add a patch to add this functionality back into the
hypervisor -- i.e., so that opt_max_grant_frames would be 32 on systems
with 32-bit mfns.

But this seems like a fairly strange calculation anyway; it's not clear
to me where it would have come from.

 -George

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Xen-devel] [PATCH for-4.13 2/2] Rationalize max_grant_frames and max_maptrack_frames handling
  2019-11-26 17:17 ` [Xen-devel] [PATCH for-4.13 2/2] Rationalize max_grant_frames and max_maptrack_frames handling George Dunlap
  2019-11-26 17:30   ` George Dunlap
@ 2019-11-26 17:32   ` Durrant, Paul
  2019-11-26 17:36   ` Ian Jackson
  2019-11-27  4:32   ` Jürgen Groß
  3 siblings, 0 replies; 20+ messages in thread
From: Durrant, Paul @ 2019-11-26 17:32 UTC (permalink / raw)
  To: George Dunlap, xen-devel
  Cc: Juergen Gross, Stefano Stabellini, Julien Grall, Wei Liu,
	Paul Durrant, Andrew Cooper, Konrad Rzeszutek Wilk,
	Marek Marczykowski-Górecki, Jan Beulich, Ian Jackson

> -----Original Message-----
> From: Xen-devel <xen-devel-bounces@lists.xenproject.org> On Behalf Of
> George Dunlap
> Sent: 26 November 2019 17:18
> To: xen-devel@lists.xenproject.org
> Cc: Juergen Gross <jgross@suse.com>; Stefano Stabellini
> <sstabellini@kernel.org>; Julien Grall <julien@xen.org>; Wei Liu
> <wl@xen.org>; Paul Durrant <paul@xen.org>; Andrew Cooper
> <andrew.cooper3@citrix.com>; Konrad Rzeszutek Wilk
> <konrad.wilk@oracle.com>; George Dunlap <george.dunlap@citrix.com>; Marek
> Marczykowski-Górecki <marmarek@invisiblethingslab.com>; Jan Beulich
> <jbeulich@suse.com>; Ian Jackson <ian.jackson@citrix.com>
> Subject: [Xen-devel] [PATCH for-4.13 2/2] Rationalize max_grant_frames and
> max_maptrack_frames handling
> 
> Xen used to have single, system-wide limits for the number of grant
> frames and maptrack frames a guest was allowed to create.  Increasing
> or decreasing this single limit on the Xen command-line would change
> the limit for all guests on the system.
> 
> Later, per-domain limits for these values was created.  The
> system-wide limits became strict limits: domains could not be created
> with higher limits, but could be created with lower limits.
> 
> However, the change also introduced a range of different "default"
> values into various places in the toolstack:
> 
> - The python libxc bindings hard-coded these values to 32 and 1024,
>   respectively
> 
> - The libxl default values are 32 and 1024 respectively.
> 
> - xl will use the libxl default for maptrack, but does its own default
>   calculation for grant frames: either 32 or 64, based on the max
>   possible mfn.
> 
> These defaults interact poorly with the hypervisor command-line limit:
> 
> - The hypervisor command-line limit cannot be used to raise the limit
>   for all guests anymore, as the default in the toolstack will
>   effectively override this.
> 
> - If you use the hypervisor command-line limit to *reduce* the limit,
>   then the "default" values generated by the toolstack are too high,
>   and all guest creations will fail.
> 
> In other words, the toolstack defaults require any change to be
> effected by having the admin explicitly specify a new value in every
> guest.
> 
> In order to address this, have grant_table_init treat '0' values for
> max_grant_frames and max_maptrack_frames as instructions to use the
> system-wide default.  Have all the above toolstacks default to passing
> 0 unless a different value is explicitly given.
> 
> This restores the old behavior, that changing the hypervisor
> command-line option can change the behavior for all guests, while
> retaining the ability to set per-guest values.  It also removes the
> bug that *reducing* the system-wide max will cause all domains without
> explicit limits to fail.
> 
> (The ocaml bindings require the caller to always specify a value, and
> the code to start a xenstored stubdomain hard-codes these to 4 and 128
> respectively; these will not be addressed here.)
> 
> Signed-off-by: George Dunlap <george.dunlap@citrix.com>
> ---
> Release justification: This is an observed regression (albeit one that
> has spanned several releases now).
> 
> Compile-tested only.
> 
> NB this patch could be applied without the whitespace fixes (perhaps
> with some fix-ups); it's just easier since my editor strips trailing
> whitespace out automatically.
> 
> CC: Ian Jackson <ian.jackson@citrix.com>
> CC: Wei Liu <wl@xen.org>
> CC: Andrew Cooper <andrew.cooper3@citrix.com>
> CC: Jan Beulich <jbeulich@suse.com>
> CC: Paul Durrant <paul@xen.org>
> CC: Julien Grall <julien@xen.org>
> CC: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
> CC: Stefano Stabellini <sstabellini@kernel.org>
> CC: Juergen Gross <jgross@suse.com>
> CC: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
> ---
>  tools/libxl/libxl.h               |  4 ++--
>  tools/python/xen/lowlevel/xc/xc.c |  2 --
>  tools/xl/xl.c                     | 12 ++----------
>  xen/common/grant_table.c          |  7 +++++++
>  xen/include/public/domctl.h       |  6 ++++--
>  5 files changed, 15 insertions(+), 16 deletions(-)
> 
> diff --git a/tools/libxl/libxl.h b/tools/libxl/libxl.h
> index 49b56fa1a3..1648d337e7 100644
> --- a/tools/libxl/libxl.h
> +++ b/tools/libxl/libxl.h
> @@ -364,8 +364,8 @@
>   */
>  #define LIBXL_HAVE_BUILDINFO_GRANT_LIMITS 1
> 
> -#define LIBXL_MAX_GRANT_FRAMES_DEFAULT 32
> -#define LIBXL_MAX_MAPTRACK_FRAMES_DEFAULT 1024
> +#define LIBXL_MAX_GRANT_FRAMES_DEFAULT 0
> +#define LIBXL_MAX_MAPTRACK_FRAMES_DEFAULT 0
> 
>  /*
>   * LIBXL_HAVE_BUILDINFO_* indicates that libxl_domain_build_info has
> diff --git a/tools/python/xen/lowlevel/xc/xc.c
> b/tools/python/xen/lowlevel/xc/xc.c
> index 6d2afd5695..0f861872ce 100644
> --- a/tools/python/xen/lowlevel/xc/xc.c
> +++ b/tools/python/xen/lowlevel/xc/xc.c
> @@ -127,8 +127,6 @@ static PyObject *pyxc_domain_create(XcObject *self,
>          },
>          .max_vcpus = 1,
>          .max_evtchn_port = -1, /* No limit. */
> -        .max_grant_frames = 32,
> -        .max_maptrack_frames = 1024,
>      };
> 
>      static char *kwd_list[] = { "domid", "ssidref", "handle", "flags",
> diff --git a/tools/xl/xl.c b/tools/xl/xl.c
> index ddd29b3f1b..b6e220184d 100644
> --- a/tools/xl/xl.c
> +++ b/tools/xl/xl.c
> @@ -51,8 +51,8 @@ libxl_bitmap global_pv_affinity_mask;
>  enum output_format default_output_format = OUTPUT_FORMAT_JSON;
>  int claim_mode = 1;
>  bool progress_use_cr = 0;
> -int max_grant_frames = -1;
> -int max_maptrack_frames = -1;
> +int max_grant_frames = 0;
> +int max_maptrack_frames = 0;
> 
>  xentoollog_level minmsglevel = minmsglevel_default;
> 
> @@ -96,7 +96,6 @@ static void parse_global_config(const char *configfile,
>      XLU_Config *config;
>      int e;
>      const char *buf;
> -    libxl_physinfo physinfo;
> 
>      config = xlu_cfg_init(stderr, configfile);
>      if (!config) {
> @@ -199,13 +198,6 @@ static void parse_global_config(const char
> *configfile,
> 
>      if (!xlu_cfg_get_long (config, "max_grant_frames", &l, 0))
>          max_grant_frames = l;
> -    else {
> -        libxl_physinfo_init(&physinfo);
> -        max_grant_frames = (libxl_get_physinfo(ctx, &physinfo) != 0 ||
> -                            !(physinfo.max_possible_mfn >> 32))
> -                           ? 32 : 64;
> -        libxl_physinfo_dispose(&physinfo);
> -    }
>      if (!xlu_cfg_get_long (config, "max_maptrack_frames", &l, 0))
>          max_maptrack_frames = l;
> 
> diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
> index b34d520f6d..cd24029e33 100644
> --- a/xen/common/grant_table.c
> +++ b/xen/common/grant_table.c
> @@ -1843,6 +1843,13 @@ int grant_table_init(struct domain *d, unsigned int
> max_grant_frames,
>      struct grant_table *gt;
>      int ret = -ENOMEM;
> 
> +    /* Default to maximum values if no lower ones are specified */
> +    if ( !max_grant_frames )
> +        max_grant_frames = opt_max_grant_frames;
> +
> +    if ( !max_maptrack_frames )
> +        max_maptrack_frames = opt_max_maptrack_frames;
> +

This means should also be able to drop the field setting in dom0_cfg in __start_xen() too :-)

  Paul

>      if ( max_grant_frames < INITIAL_NR_GRANT_FRAMES ||
>           max_grant_frames > opt_max_grant_frames ||
>           max_maptrack_frames > opt_max_maptrack_frames )
> diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
> index 9f2cfd602c..27d04f67aa 100644
> --- a/xen/include/public/domctl.h
> +++ b/xen/include/public/domctl.h
> @@ -82,8 +82,10 @@ struct xen_domctl_createdomain {
>      uint32_t iommu_opts;
> 
>      /*
> -     * Various domain limits, which impact the quantity of resources
> (global
> -     * mapping space, xenheap, etc) a guest may consume.
> +     * Various domain limits, which impact the quantity of resources
> +     * (global mapping space, xenheap, etc) a guest may consume.  For
> +     * max_grant_frames and max_maptrack_frames, "0" means "use the
> +     * default maximum value".
>       */
>      uint32_t max_vcpus;
>      uint32_t max_evtchn_port;
> --
> 2.24.0
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xenproject.org
> https://lists.xenproject.org/mailman/listinfo/xen-devel
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Xen-devel] [PATCH for-4.13 2/2] Rationalize max_grant_frames and max_maptrack_frames handling
  2019-11-26 17:17 ` [Xen-devel] [PATCH for-4.13 2/2] Rationalize max_grant_frames and max_maptrack_frames handling George Dunlap
  2019-11-26 17:30   ` George Dunlap
  2019-11-26 17:32   ` Durrant, Paul
@ 2019-11-26 17:36   ` Ian Jackson
  2019-11-27  8:42     ` Durrant, Paul
  2019-11-27  4:32   ` Jürgen Groß
  3 siblings, 1 reply; 20+ messages in thread
From: Ian Jackson @ 2019-11-26 17:36 UTC (permalink / raw)
  To: George Dunlap
  Cc: Juergen Gross, Stefano Stabellini, Julien Grall, Wei Liu,
	Paul Durrant, Andrew Cooper, Konrad Rzeszutek Wilk,
	Marek Marczykowski-Górecki, Hans van Kranenburg,
	Jan Beulich, xen-devel

George Dunlap writes ("[PATCH for-4.13 2/2] Rationalize max_grant_frames and max_maptrack_frames handling"):
> Xen used to have single, system-wide limits for the number of grant
> frames and maptrack frames a guest was allowed to create.  Increasing
> or decreasing this single limit on the Xen command-line would change
> the limit for all guests on the system.

If I am not mistaken, this is an important change to have.

I have seen reports of users who ran out of grant/maptrack frames
because of updates to use multiring protocols etc.  The error messages
are not very good and the recommended workaround has been to increase
the default limit on the hypervisor command line.

It is important that we don't break that workaround!

Thanks,
Ian.

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Xen-devel] [PATCH for-4.13 2/2] Rationalize max_grant_frames and max_maptrack_frames handling
  2019-11-26 17:17 ` [Xen-devel] [PATCH for-4.13 2/2] Rationalize max_grant_frames and max_maptrack_frames handling George Dunlap
                     ` (2 preceding siblings ...)
  2019-11-26 17:36   ` Ian Jackson
@ 2019-11-27  4:32   ` Jürgen Groß
  2019-11-27  9:25     ` Jan Beulich
  2019-11-27 10:40     ` George Dunlap
  3 siblings, 2 replies; 20+ messages in thread
From: Jürgen Groß @ 2019-11-27  4:32 UTC (permalink / raw)
  To: George Dunlap, xen-devel
  Cc: Stefano Stabellini, Julien Grall, Wei Liu, Paul Durrant,
	Andrew Cooper, Konrad Rzeszutek Wilk,
	Marek Marczykowski-Górecki, Jan Beulich, Ian Jackson

On 26.11.19 18:17, George Dunlap wrote:
> Xen used to have single, system-wide limits for the number of grant
> frames and maptrack frames a guest was allowed to create.  Increasing
> or decreasing this single limit on the Xen command-line would change
> the limit for all guests on the system.
> 
> Later, per-domain limits for these values was created.  The
> system-wide limits became strict limits: domains could not be created
> with higher limits, but could be created with lower limits.
> 
> However, the change also introduced a range of different "default"
> values into various places in the toolstack:
> 
> - The python libxc bindings hard-coded these values to 32 and 1024,
>    respectively
> 
> - The libxl default values are 32 and 1024 respectively.
> 
> - xl will use the libxl default for maptrack, but does its own default
>    calculation for grant frames: either 32 or 64, based on the max
>    possible mfn.
> 
> These defaults interact poorly with the hypervisor command-line limit:
> 
> - The hypervisor command-line limit cannot be used to raise the limit
>    for all guests anymore, as the default in the toolstack will
>    effectively override this.
> 
> - If you use the hypervisor command-line limit to *reduce* the limit,
>    then the "default" values generated by the toolstack are too high,
>    and all guest creations will fail.
> 
> In other words, the toolstack defaults require any change to be
> effected by having the admin explicitly specify a new value in every
> guest.
> 
> In order to address this, have grant_table_init treat '0' values for
> max_grant_frames and max_maptrack_frames as instructions to use the
> system-wide default.  Have all the above toolstacks default to passing
> 0 unless a different value is explicitly given.
> 
> This restores the old behavior, that changing the hypervisor
> command-line option can change the behavior for all guests, while
> retaining the ability to set per-guest values.  It also removes the
> bug that *reducing* the system-wide max will cause all domains without
> explicit limits to fail.
> 
> (The ocaml bindings require the caller to always specify a value, and
> the code to start a xenstored stubdomain hard-codes these to 4 and 128
> respectively; these will not be addressed here.)
> 
> Signed-off-by: George Dunlap <george.dunlap@citrix.com>
> ---
> Release justification: This is an observed regression (albeit one that
> has spanned several releases now).
> 
> Compile-tested only.
> 
> NB this patch could be applied without the whitespace fixes (perhaps
> with some fix-ups); it's just easier since my editor strips trailing
> whitespace out automatically.
> 
> CC: Ian Jackson <ian.jackson@citrix.com>
> CC: Wei Liu <wl@xen.org>
> CC: Andrew Cooper <andrew.cooper3@citrix.com>
> CC: Jan Beulich <jbeulich@suse.com>
> CC: Paul Durrant <paul@xen.org>
> CC: Julien Grall <julien@xen.org>
> CC: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
> CC: Stefano Stabellini <sstabellini@kernel.org>
> CC: Juergen Gross <jgross@suse.com>
> CC: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
> ---
>   tools/libxl/libxl.h               |  4 ++--
>   tools/python/xen/lowlevel/xc/xc.c |  2 --
>   tools/xl/xl.c                     | 12 ++----------
>   xen/common/grant_table.c          |  7 +++++++
>   xen/include/public/domctl.h       |  6 ++++--
>   5 files changed, 15 insertions(+), 16 deletions(-)
> 
> diff --git a/tools/libxl/libxl.h b/tools/libxl/libxl.h
> index 49b56fa1a3..1648d337e7 100644
> --- a/tools/libxl/libxl.h
> +++ b/tools/libxl/libxl.h
> @@ -364,8 +364,8 @@
>    */
>   #define LIBXL_HAVE_BUILDINFO_GRANT_LIMITS 1
>   
> -#define LIBXL_MAX_GRANT_FRAMES_DEFAULT 32
> -#define LIBXL_MAX_MAPTRACK_FRAMES_DEFAULT 1024
> +#define LIBXL_MAX_GRANT_FRAMES_DEFAULT 0
> +#define LIBXL_MAX_MAPTRACK_FRAMES_DEFAULT 0

I'd rather use -1 for the "not specified" value. This allows to set e.g.
the maptrack frames to 0 for non-driver domains.


Juergen

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Xen-devel] [PATCH for-4.13 2/2] Rationalize max_grant_frames and max_maptrack_frames handling
  2019-11-26 17:30   ` George Dunlap
@ 2019-11-27  4:34     ` Jürgen Groß
  2019-11-27 12:07       ` George Dunlap
  0 siblings, 1 reply; 20+ messages in thread
From: Jürgen Groß @ 2019-11-27  4:34 UTC (permalink / raw)
  To: George Dunlap, xen-devel
  Cc: Stefano Stabellini, Julien Grall, Wei Liu, Paul Durrant,
	Andrew Cooper, Konrad Rzeszutek Wilk,
	Marek Marczykowski-Górecki, Jan Beulich, Ian Jackson

On 26.11.19 18:30, George Dunlap wrote:
> On 11/26/19 5:17 PM, George Dunlap wrote:
>> - xl will use the libxl default for maptrack, but does its own default
>>    calculation for grant frames: either 32 or 64, based on the max
>>    possible mfn.
> 
> [snip]
> 
>> @@ -199,13 +198,6 @@ static void parse_global_config(const char *configfile,
>>   
>>       if (!xlu_cfg_get_long (config, "max_grant_frames", &l, 0))
>>           max_grant_frames = l;
>> -    else {
>> -        libxl_physinfo_init(&physinfo);
>> -        max_grant_frames = (libxl_get_physinfo(ctx, &physinfo) != 0 ||
>> -                            !(physinfo.max_possible_mfn >> 32))
>> -                           ? 32 : 64;
>> -        libxl_physinfo_dispose(&physinfo);
>> -    }
> 
> Sorry, meant to add a patch to add this functionality back into the
> hypervisor -- i.e., so that opt_max_grant_frames would be 32 on systems
> with 32-bit mfns.
> 
> But this seems like a fairly strange calculation anyway; it's not clear
> to me where it would have come from.
mfns above the 32-bit limit require to use grant v2. This in turn
doubles the grant frames needed for the same number of grants.


Juergen


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Xen-devel] [PATCH for-4.13 2/2] Rationalize max_grant_frames and max_maptrack_frames handling
  2019-11-26 17:36   ` Ian Jackson
@ 2019-11-27  8:42     ` Durrant, Paul
  2019-11-27 11:10       ` Ian Jackson
  0 siblings, 1 reply; 20+ messages in thread
From: Durrant, Paul @ 2019-11-27  8:42 UTC (permalink / raw)
  To: Ian Jackson, George Dunlap
  Cc: Juergen Gross, Stefano Stabellini, Julien Grall, Wei Liu,
	Paul Durrant, Andrew Cooper, Konrad Rzeszutek Wilk,
	Marek Marczykowski-Górecki, Hans van Kranenburg,
	Jan Beulich, xen-devel

> -----Original Message-----
> From: Xen-devel <xen-devel-bounces@lists.xenproject.org> On Behalf Of Ian
> Jackson
> Sent: 26 November 2019 17:36
> To: George Dunlap <george.dunlap@citrix.com>
> Cc: Juergen Gross <jgross@suse.com>; Stefano Stabellini
> <sstabellini@kernel.org>; Julien Grall <julien@xen.org>; Wei Liu
> <wl@xen.org>; Paul Durrant <paul@xen.org>; Andrew Cooper
> <Andrew.Cooper3@citrix.com>; Konrad Rzeszutek Wilk
> <konrad.wilk@oracle.com>; Marek Marczykowski-Górecki
> <marmarek@invisiblethingslab.com>; Hans van Kranenburg <hans@knorrie.org>;
> Jan Beulich <jbeulich@suse.com>; xen-devel@lists.xenproject.org
> Subject: Re: [Xen-devel] [PATCH for-4.13 2/2] Rationalize max_grant_frames
> and max_maptrack_frames handling
> 
> George Dunlap writes ("[PATCH for-4.13 2/2] Rationalize max_grant_frames
> and max_maptrack_frames handling"):
> > Xen used to have single, system-wide limits for the number of grant
> > frames and maptrack frames a guest was allowed to create.  Increasing
> > or decreasing this single limit on the Xen command-line would change
> > the limit for all guests on the system.
> 
> If I am not mistaken, this is an important change to have.
> 

It is, and many thanks to George for picking this up.

> I have seen reports of users who ran out of grant/maptrack frames
> because of updates to use multiring protocols etc.  The error messages
> are not very good and the recommended workaround has been to increase
> the default limit on the hypervisor command line.
> 
> It is important that we don't break that workaround!

Alas it has apparently been broken for several releases now :-(

  Paul

> 
> Thanks,
> Ian.
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xenproject.org
> https://lists.xenproject.org/mailman/listinfo/xen-devel
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Xen-devel] [PATCH for-4.13 2/2] Rationalize max_grant_frames and max_maptrack_frames handling
  2019-11-27  4:32   ` Jürgen Groß
@ 2019-11-27  9:25     ` Jan Beulich
  2019-11-27  9:38       ` Jürgen Groß
  2019-11-27 10:40     ` George Dunlap
  1 sibling, 1 reply; 20+ messages in thread
From: Jan Beulich @ 2019-11-27  9:25 UTC (permalink / raw)
  To: Jürgen Groß, George Dunlap
  Cc: Stefano Stabellini, Julien Grall, Wei Liu, Paul Durrant,
	Andrew Cooper, Konrad Rzeszutek Wilk,
	Marek Marczykowski-Górecki, Ian Jackson, xen-devel

On 27.11.2019 05:32, Jürgen Groß wrote:
> On 26.11.19 18:17, George Dunlap wrote:
>> --- a/tools/libxl/libxl.h
>> +++ b/tools/libxl/libxl.h
>> @@ -364,8 +364,8 @@
>>    */
>>   #define LIBXL_HAVE_BUILDINFO_GRANT_LIMITS 1
>>   
>> -#define LIBXL_MAX_GRANT_FRAMES_DEFAULT 32
>> -#define LIBXL_MAX_MAPTRACK_FRAMES_DEFAULT 1024
>> +#define LIBXL_MAX_GRANT_FRAMES_DEFAULT 0
>> +#define LIBXL_MAX_MAPTRACK_FRAMES_DEFAULT 0
> 
> I'd rather use -1 for the "not specified" value. This allows to set e.g.
> the maptrack frames to 0 for non-driver domains.

Yes. But it in turn wouldn't allow taking 0 to mean "default" in the
hypervisor.

Jan

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Xen-devel] [PATCH for-4.13 2/2] Rationalize max_grant_frames and max_maptrack_frames handling
  2019-11-27  9:25     ` Jan Beulich
@ 2019-11-27  9:38       ` Jürgen Groß
  0 siblings, 0 replies; 20+ messages in thread
From: Jürgen Groß @ 2019-11-27  9:38 UTC (permalink / raw)
  To: Jan Beulich, George Dunlap
  Cc: Stefano Stabellini, Julien Grall, Wei Liu, Konrad Rzeszutek Wilk,
	Andrew Cooper, Paul Durrant, Marek Marczykowski-Górecki,
	xen-devel, Ian Jackson

On 27.11.19 10:25, Jan Beulich wrote:
> On 27.11.2019 05:32, Jürgen Groß wrote:
>> On 26.11.19 18:17, George Dunlap wrote:
>>> --- a/tools/libxl/libxl.h
>>> +++ b/tools/libxl/libxl.h
>>> @@ -364,8 +364,8 @@
>>>     */
>>>    #define LIBXL_HAVE_BUILDINFO_GRANT_LIMITS 1
>>>    
>>> -#define LIBXL_MAX_GRANT_FRAMES_DEFAULT 32
>>> -#define LIBXL_MAX_MAPTRACK_FRAMES_DEFAULT 1024
>>> +#define LIBXL_MAX_GRANT_FRAMES_DEFAULT 0
>>> +#define LIBXL_MAX_MAPTRACK_FRAMES_DEFAULT 0
>>
>> I'd rather use -1 for the "not specified" value. This allows to set e.g.
>> the maptrack frames to 0 for non-driver domains.
> 
> Yes. But it in turn wouldn't allow taking 0 to mean "default" in the
> hypervisor.

That is a logical consequence, yes.


Juergen

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Xen-devel] [PATCH for-4.13 2/2] Rationalize max_grant_frames and max_maptrack_frames handling
  2019-11-27  4:32   ` Jürgen Groß
  2019-11-27  9:25     ` Jan Beulich
@ 2019-11-27 10:40     ` George Dunlap
  1 sibling, 0 replies; 20+ messages in thread
From: George Dunlap @ 2019-11-27 10:40 UTC (permalink / raw)
  To: Jürgen Groß
  Cc: Stefano Stabellini, Julien Grall, Wei Liu, Paul Durrant,
	Andrew Cooper, Konrad Rzeszutek Wilk,
	Marek Marczykowski-Górecki, George Dunlap, Jan Beulich,
	Ian Jackson, xen-devel

On Wed, Nov 27, 2019 at 4:33 AM Jürgen Groß <jgross@suse.com> wrote:
>
> On 26.11.19 18:17, George Dunlap wrote:
> > Xen used to have single, system-wide limits for the number of grant
> > frames and maptrack frames a guest was allowed to create.  Increasing
> > or decreasing this single limit on the Xen command-line would change
> > the limit for all guests on the system.
> >
> > Later, per-domain limits for these values was created.  The
> > system-wide limits became strict limits: domains could not be created
> > with higher limits, but could be created with lower limits.
> >
> > However, the change also introduced a range of different "default"
> > values into various places in the toolstack:
> >
> > - The python libxc bindings hard-coded these values to 32 and 1024,
> >    respectively
> >
> > - The libxl default values are 32 and 1024 respectively.
> >
> > - xl will use the libxl default for maptrack, but does its own default
> >    calculation for grant frames: either 32 or 64, based on the max
> >    possible mfn.
> >
> > These defaults interact poorly with the hypervisor command-line limit:
> >
> > - The hypervisor command-line limit cannot be used to raise the limit
> >    for all guests anymore, as the default in the toolstack will
> >    effectively override this.
> >
> > - If you use the hypervisor command-line limit to *reduce* the limit,
> >    then the "default" values generated by the toolstack are too high,
> >    and all guest creations will fail.
> >
> > In other words, the toolstack defaults require any change to be
> > effected by having the admin explicitly specify a new value in every
> > guest.
> >
> > In order to address this, have grant_table_init treat '0' values for
> > max_grant_frames and max_maptrack_frames as instructions to use the
> > system-wide default.  Have all the above toolstacks default to passing
> > 0 unless a different value is explicitly given.
> >
> > This restores the old behavior, that changing the hypervisor
> > command-line option can change the behavior for all guests, while
> > retaining the ability to set per-guest values.  It also removes the
> > bug that *reducing* the system-wide max will cause all domains without
> > explicit limits to fail.
> >
> > (The ocaml bindings require the caller to always specify a value, and
> > the code to start a xenstored stubdomain hard-codes these to 4 and 128
> > respectively; these will not be addressed here.)
> >
> > Signed-off-by: George Dunlap <george.dunlap@citrix.com>
> > ---
> > Release justification: This is an observed regression (albeit one that
> > has spanned several releases now).
> >
> > Compile-tested only.
> >
> > NB this patch could be applied without the whitespace fixes (perhaps
> > with some fix-ups); it's just easier since my editor strips trailing
> > whitespace out automatically.
> >
> > CC: Ian Jackson <ian.jackson@citrix.com>
> > CC: Wei Liu <wl@xen.org>
> > CC: Andrew Cooper <andrew.cooper3@citrix.com>
> > CC: Jan Beulich <jbeulich@suse.com>
> > CC: Paul Durrant <paul@xen.org>
> > CC: Julien Grall <julien@xen.org>
> > CC: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
> > CC: Stefano Stabellini <sstabellini@kernel.org>
> > CC: Juergen Gross <jgross@suse.com>
> > CC: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
> > ---
> >   tools/libxl/libxl.h               |  4 ++--
> >   tools/python/xen/lowlevel/xc/xc.c |  2 --
> >   tools/xl/xl.c                     | 12 ++----------
> >   xen/common/grant_table.c          |  7 +++++++
> >   xen/include/public/domctl.h       |  6 ++++--
> >   5 files changed, 15 insertions(+), 16 deletions(-)
> >
> > diff --git a/tools/libxl/libxl.h b/tools/libxl/libxl.h
> > index 49b56fa1a3..1648d337e7 100644
> > --- a/tools/libxl/libxl.h
> > +++ b/tools/libxl/libxl.h
> > @@ -364,8 +364,8 @@
> >    */
> >   #define LIBXL_HAVE_BUILDINFO_GRANT_LIMITS 1
> >
> > -#define LIBXL_MAX_GRANT_FRAMES_DEFAULT 32
> > -#define LIBXL_MAX_MAPTRACK_FRAMES_DEFAULT 1024
> > +#define LIBXL_MAX_GRANT_FRAMES_DEFAULT 0
> > +#define LIBXL_MAX_MAPTRACK_FRAMES_DEFAULT 0
>
> I'd rather use -1 for the "not specified" value. This allows to set e.g.
> the maptrack frames to 0 for non-driver domains.

I did wonder whether having 0 frames was something we might want.  I
can certainly change that.

 -George

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Xen-devel] [PATCH for-4.13 2/2] Rationalize max_grant_frames and max_maptrack_frames handling
  2019-11-27  8:42     ` Durrant, Paul
@ 2019-11-27 11:10       ` Ian Jackson
  2019-11-27 11:13         ` Durrant, Paul
  0 siblings, 1 reply; 20+ messages in thread
From: Ian Jackson @ 2019-11-27 11:10 UTC (permalink / raw)
  To: Durrant, Paul
  Cc: Juergen Gross, Stefano Stabellini, Julien Grall, Wei Liu,
	Paul Durrant, Andrew Cooper, Konrad Rzeszutek Wilk,
	George Dunlap, Marek Marczykowski-Górecki,
	Hans van Kranenburg, Jan Beulich, xen-devel, Ian Jackson

Durrant, Paul writes ("RE: [Xen-devel] [PATCH for-4.13 2/2] Rationalize max_grant_frames and max_maptrack_frames handling"):
> > -----Original Message-----
> > From: Xen-devel <xen-devel-bounces@lists.xenproject.org> On Behalf Of Ian
> > Jackson
> > I have seen reports of users who ran out of grant/maptrack frames
> > because of updates to use multiring protocols etc.  The error messages
> > are not very good and the recommended workaround has been to increase
> > the default limit on the hypervisor command line.
> > 
> > It is important that we don't break that workaround!
> 
> Alas it has apparently been broken for several releases now :-(

I guess at least in Debian (where I have seen this) we haven't
released with any affected versions yet...

Ian.

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Xen-devel] [PATCH for-4.13 2/2] Rationalize max_grant_frames and max_maptrack_frames handling
  2019-11-27 11:10       ` Ian Jackson
@ 2019-11-27 11:13         ` Durrant, Paul
  2019-11-27 22:32           ` Hans van Kranenburg
  0 siblings, 1 reply; 20+ messages in thread
From: Durrant, Paul @ 2019-11-27 11:13 UTC (permalink / raw)
  To: Ian Jackson
  Cc: Juergen Gross, Stefano Stabellini, Julien Grall, Wei Liu,
	Paul Durrant, Andrew Cooper, Konrad Rzeszutek Wilk,
	George Dunlap, Marek Marczykowski-Górecki,
	Hans van Kranenburg, Jan Beulich, xen-devel

> -----Original Message-----
> From: Ian Jackson <ian.jackson@citrix.com>
> Sent: 27 November 2019 11:10
> To: Durrant, Paul <pdurrant@amazon.com>
> Cc: Ian Jackson <Ian.Jackson@citrix.com>; George Dunlap
> <George.Dunlap@citrix.com>; Juergen Gross <jgross@suse.com>; Stefano
> Stabellini <sstabellini@kernel.org>; Julien Grall <julien@xen.org>; Wei
> Liu <wl@xen.org>; Paul Durrant <paul@xen.org>; Andrew Cooper
> <Andrew.Cooper3@citrix.com>; Konrad Rzeszutek Wilk
> <konrad.wilk@oracle.com>; Marek Marczykowski-Górecki
> <marmarek@invisiblethingslab.com>; Hans van Kranenburg <hans@knorrie.org>;
> Jan Beulich <jbeulich@suse.com>; xen-devel@lists.xenproject.org
> Subject: RE: [Xen-devel] [PATCH for-4.13 2/2] Rationalize max_grant_frames
> and max_maptrack_frames handling
> 
> Durrant, Paul writes ("RE: [Xen-devel] [PATCH for-4.13 2/2] Rationalize
> max_grant_frames and max_maptrack_frames handling"):
> > > -----Original Message-----
> > > From: Xen-devel <xen-devel-bounces@lists.xenproject.org> On Behalf Of
> Ian
> > > Jackson
> > > I have seen reports of users who ran out of grant/maptrack frames
> > > because of updates to use multiring protocols etc.  The error messages
> > > are not very good and the recommended workaround has been to increase
> > > the default limit on the hypervisor command line.
> > >
> > > It is important that we don't break that workaround!
> >
> > Alas it has apparently been broken for several releases now :-(
> 
> I guess at least in Debian (where I have seen this) we haven't
> released with any affected versions yet...

I believe the problem was introduce in 4.10, so I think it would be prudent to also back-port the final fix to stable trees from then on.

  Paul

> 
> Ian.

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Xen-devel] [PATCH for-4.13 2/2] Rationalize max_grant_frames and max_maptrack_frames handling
  2019-11-27  4:34     ` Jürgen Groß
@ 2019-11-27 12:07       ` George Dunlap
  2019-11-27 12:15         ` Jürgen Groß
  0 siblings, 1 reply; 20+ messages in thread
From: George Dunlap @ 2019-11-27 12:07 UTC (permalink / raw)
  To: Jürgen Groß, xen-devel
  Cc: Stefano Stabellini, Julien Grall, Wei Liu, Paul Durrant,
	Andrew Cooper, Konrad Rzeszutek Wilk,
	Marek Marczykowski-Górecki, Jan Beulich, Ian Jackson

On 11/27/19 4:34 AM, Jürgen Groß wrote:
> On 26.11.19 18:30, George Dunlap wrote:
>> On 11/26/19 5:17 PM, George Dunlap wrote:
>>> - xl will use the libxl default for maptrack, but does its own default
>>>    calculation for grant frames: either 32 or 64, based on the max
>>>    possible mfn.
>>
>> [snip]
>>
>>> @@ -199,13 +198,6 @@ static void parse_global_config(const char
>>> *configfile,
>>>         if (!xlu_cfg_get_long (config, "max_grant_frames", &l, 0))
>>>           max_grant_frames = l;
>>> -    else {
>>> -        libxl_physinfo_init(&physinfo);
>>> -        max_grant_frames = (libxl_get_physinfo(ctx, &physinfo) != 0 ||
>>> -                            !(physinfo.max_possible_mfn >> 32))
>>> -                           ? 32 : 64;
>>> -        libxl_physinfo_dispose(&physinfo);
>>> -    }
>>
>> Sorry, meant to add a patch to add this functionality back into the
>> hypervisor -- i.e., so that opt_max_grant_frames would be 32 on systems
>> with 32-bit mfns.
>>
>> But this seems like a fairly strange calculation anyway; it's not clear
>> to me where it would have come from.
> mfns above the 32-bit limit require to use grant v2. This in turn
> doubles the grant frames needed for the same number of grants.

But is large mfns the *only* reason to use grant v2?  Aren't modern
guests going to use grant v2 regardless of the max mfn size?

 -George

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Xen-devel] [PATCH for-4.13 2/2] Rationalize max_grant_frames and max_maptrack_frames handling
  2019-11-27 12:07       ` George Dunlap
@ 2019-11-27 12:15         ` Jürgen Groß
  0 siblings, 0 replies; 20+ messages in thread
From: Jürgen Groß @ 2019-11-27 12:15 UTC (permalink / raw)
  To: George Dunlap, xen-devel
  Cc: Stefano Stabellini, Julien Grall, Wei Liu, Paul Durrant,
	Andrew Cooper, Konrad Rzeszutek Wilk,
	Marek Marczykowski-Górecki, Jan Beulich, Ian Jackson

On 27.11.19 13:07, George Dunlap wrote:
> On 11/27/19 4:34 AM, Jürgen Groß wrote:
>> On 26.11.19 18:30, George Dunlap wrote:
>>> On 11/26/19 5:17 PM, George Dunlap wrote:
>>>> - xl will use the libxl default for maptrack, but does its own default
>>>>     calculation for grant frames: either 32 or 64, based on the max
>>>>     possible mfn.
>>>
>>> [snip]
>>>
>>>> @@ -199,13 +198,6 @@ static void parse_global_config(const char
>>>> *configfile,
>>>>          if (!xlu_cfg_get_long (config, "max_grant_frames", &l, 0))
>>>>            max_grant_frames = l;
>>>> -    else {
>>>> -        libxl_physinfo_init(&physinfo);
>>>> -        max_grant_frames = (libxl_get_physinfo(ctx, &physinfo) != 0 ||
>>>> -                            !(physinfo.max_possible_mfn >> 32))
>>>> -                           ? 32 : 64;
>>>> -        libxl_physinfo_dispose(&physinfo);
>>>> -    }
>>>
>>> Sorry, meant to add a patch to add this functionality back into the
>>> hypervisor -- i.e., so that opt_max_grant_frames would be 32 on systems
>>> with 32-bit mfns.
>>>
>>> But this seems like a fairly strange calculation anyway; it's not clear
>>> to me where it would have come from.
>> mfns above the 32-bit limit require to use grant v2. This in turn
>> doubles the grant frames needed for the same number of grants.
> 
> But is large mfns the *only* reason to use grant v2?  Aren't modern
> guests going to use grant v2 regardless of the max mfn size?

Large mfns leave the guest no choice. Linux kernel V2 support was
removed and I reintroduced it for being able to support large mfns in
guests.

Current Linux kernel will use V1 if the max mfn fits in 32 bits and V2
only if there can be memory above that boundary.


Juergen

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Xen-devel] [PATCH for-4.13 2/2] Rationalize max_grant_frames and max_maptrack_frames handling
  2019-11-27 11:13         ` Durrant, Paul
@ 2019-11-27 22:32           ` Hans van Kranenburg
  2019-11-28  5:57             ` Jürgen Groß
  2019-11-28 14:54             ` George Dunlap
  0 siblings, 2 replies; 20+ messages in thread
From: Hans van Kranenburg @ 2019-11-27 22:32 UTC (permalink / raw)
  To: Durrant, Paul, Ian Jackson
  Cc: Juergen Gross, Stefano Stabellini, Julien Grall, Wei Liu,
	Paul Durrant, Andrew Cooper, Konrad Rzeszutek Wilk,
	George Dunlap, Marek Marczykowski-Górecki, Jan Beulich,
	xen-devel

Hi all,

On 11/27/19 12:13 PM, Durrant, Paul wrote:
>> -----Original Message-----
>> From: Ian Jackson <ian.jackson@citrix.com>
>> Sent: 27 November 2019 11:10
>> [...]
>> Subject: RE: [Xen-devel] [PATCH for-4.13 2/2] Rationalize max_grant_frames
>> and max_maptrack_frames handling
>>
>> Durrant, Paul writes ("RE: [Xen-devel] [PATCH for-4.13 2/2] Rationalize
>> max_grant_frames and max_maptrack_frames handling"):
>>>> -----Original Message-----
>>>> From: Xen-devel <xen-devel-bounces@lists.xenproject.org> On Behalf Of
>> Ian
>>>> Jackson
>>>> I have seen reports of users who ran out of grant/maptrack frames
>>>> because of updates to use multiring protocols etc.  The error messages
>>>> are not very good and the recommended workaround has been to increase
>>>> the default limit on the hypervisor command line.
>>>>
>>>> It is important that we don't break that workaround!
>>>
>>> Alas it has apparently been broken for several releases now :-(
>>
>> I guess at least in Debian (where I have seen this) we haven't
>> released with any affected versions yet...
> 
> I believe the problem was introduce in 4.10, so I think it would be prudent to also back-port the final fix to stable trees from then on.

Yes, the max grant frame issue has historically always been a painful
experience for end users, and Xen 4.11 which we now have in the current
Debian stable has made it worse compared to previous versions indeed.

Changing the hypervisor command line worked in the past, and now that
value is overwritten again by a lower value in the toolstack, which
requires setting per-domU settings, or, what I did, just additionally
also setting max_grant_frames in /etc/xen/xl.conf to the same value as
the hypervisor command line.

This change is very welcome, even to 4.11-stable if possible, since it
will not break existing configuration of users.

If changing only the value of the hypervisor command line works again,
then old information that shows up when the users searches the web will
be useful again, which is good.

Hans

P.S. Now I'm curious to figure out what a maptrack frame is, didn't hear
about that one before.

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Xen-devel] [PATCH for-4.13 2/2] Rationalize max_grant_frames and max_maptrack_frames handling
  2019-11-27 22:32           ` Hans van Kranenburg
@ 2019-11-28  5:57             ` Jürgen Groß
  2019-11-28 14:54             ` George Dunlap
  1 sibling, 0 replies; 20+ messages in thread
From: Jürgen Groß @ 2019-11-28  5:57 UTC (permalink / raw)
  To: Hans van Kranenburg, Durrant, Paul, Ian Jackson
  Cc: Stefano Stabellini, Julien Grall, Wei Liu, Konrad Rzeszutek Wilk,
	Andrew Cooper, Paul Durrant, George Dunlap,
	Marek Marczykowski-Górecki, Jan Beulich, xen-devel

On 27.11.19 23:32, Hans van Kranenburg wrote:
> Hi all,
> 
> On 11/27/19 12:13 PM, Durrant, Paul wrote:
>>> -----Original Message-----
>>> From: Ian Jackson <ian.jackson@citrix.com>
>>> Sent: 27 November 2019 11:10
>>> [...]
>>> Subject: RE: [Xen-devel] [PATCH for-4.13 2/2] Rationalize max_grant_frames
>>> and max_maptrack_frames handling
>>>
>>> Durrant, Paul writes ("RE: [Xen-devel] [PATCH for-4.13 2/2] Rationalize
>>> max_grant_frames and max_maptrack_frames handling"):
>>>>> -----Original Message-----
>>>>> From: Xen-devel <xen-devel-bounces@lists.xenproject.org> On Behalf Of
>>> Ian
>>>>> Jackson
>>>>> I have seen reports of users who ran out of grant/maptrack frames
>>>>> because of updates to use multiring protocols etc.  The error messages
>>>>> are not very good and the recommended workaround has been to increase
>>>>> the default limit on the hypervisor command line.
>>>>>
>>>>> It is important that we don't break that workaround!
>>>>
>>>> Alas it has apparently been broken for several releases now :-(
>>>
>>> I guess at least in Debian (where I have seen this) we haven't
>>> released with any affected versions yet...
>>
>> I believe the problem was introduce in 4.10, so I think it would be prudent to also back-port the final fix to stable trees from then on.
> 
> Yes, the max grant frame issue has historically always been a painful
> experience for end users, and Xen 4.11 which we now have in the current
> Debian stable has made it worse compared to previous versions indeed.
> 
> Changing the hypervisor command line worked in the past, and now that
> value is overwritten again by a lower value in the toolstack, which
> requires setting per-domU settings, or, what I did, just additionally
> also setting max_grant_frames in /etc/xen/xl.conf to the same value as
> the hypervisor command line.
> 
> This change is very welcome, even to 4.11-stable if possible, since it
> will not break existing configuration of users.
> 
> If changing only the value of the hypervisor command line works again,
> then old information that shows up when the users searches the web will
> be useful again, which is good.
> 
> Hans
> 
> P.S. Now I'm curious to figure out what a maptrack frame is, didn't hear
> about that one before.

The maptrack frames are used by the hypervisor for keeping track which
grants are mapped by a specific domain. So they are necessary for driver
domains (including dom0), and max_maptrack_frames limits how many
mappings of other domain's pages can be active simultaneously in a
domain.


Juergen

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Xen-devel] [PATCH for-4.13 2/2] Rationalize max_grant_frames and max_maptrack_frames handling
  2019-11-27 22:32           ` Hans van Kranenburg
  2019-11-28  5:57             ` Jürgen Groß
@ 2019-11-28 14:54             ` George Dunlap
  2019-11-28 15:33               ` Hans van Kranenburg
  1 sibling, 1 reply; 20+ messages in thread
From: George Dunlap @ 2019-11-28 14:54 UTC (permalink / raw)
  To: Hans van Kranenburg, Durrant, Paul, Ian Jackson
  Cc: Juergen Gross, Stefano Stabellini, Julien Grall, Wei Liu,
	Paul Durrant, Andrew Cooper, Konrad Rzeszutek Wilk,
	Marek Marczykowski-Górecki, Jan Beulich, xen-devel

On 11/27/19 10:32 PM, Hans van Kranenburg wrote:
> Hi all,
> 
> On 11/27/19 12:13 PM, Durrant, Paul wrote:
>>> -----Original Message-----
>>> From: Ian Jackson <ian.jackson@citrix.com>
>>> Sent: 27 November 2019 11:10
>>> [...]
>>> Subject: RE: [Xen-devel] [PATCH for-4.13 2/2] Rationalize max_grant_frames
>>> and max_maptrack_frames handling
>>>
>>> Durrant, Paul writes ("RE: [Xen-devel] [PATCH for-4.13 2/2] Rationalize
>>> max_grant_frames and max_maptrack_frames handling"):
>>>>> -----Original Message-----
>>>>> From: Xen-devel <xen-devel-bounces@lists.xenproject.org> On Behalf Of
>>> Ian
>>>>> Jackson
>>>>> I have seen reports of users who ran out of grant/maptrack frames
>>>>> because of updates to use multiring protocols etc.  The error messages
>>>>> are not very good and the recommended workaround has been to increase
>>>>> the default limit on the hypervisor command line.
>>>>>
>>>>> It is important that we don't break that workaround!
>>>>
>>>> Alas it has apparently been broken for several releases now :-(
>>>
>>> I guess at least in Debian (where I have seen this) we haven't
>>> released with any affected versions yet...
>>
>> I believe the problem was introduce in 4.10, so I think it would be prudent to also back-port the final fix to stable trees from then on.
> 
> Yes, the max grant frame issue has historically always been a painful
> experience for end users, and Xen 4.11 which we now have in the current
> Debian stable has made it worse compared to previous versions indeed.

This rather suggests that the default value isn't very well chosen.
Ideally some investigation would be done to improve the default sizing;
end-users shouldn't have to know anything about grant table frames.

 -George

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Xen-devel] [PATCH for-4.13 2/2] Rationalize max_grant_frames and max_maptrack_frames handling
  2019-11-28 14:54             ` George Dunlap
@ 2019-11-28 15:33               ` Hans van Kranenburg
  2019-11-28 15:43                 ` Jürgen Groß
  0 siblings, 1 reply; 20+ messages in thread
From: Hans van Kranenburg @ 2019-11-28 15:33 UTC (permalink / raw)
  To: George Dunlap, Durrant, Paul, Ian Jackson
  Cc: Juergen Gross, Stefano Stabellini, Julien Grall, Wei Liu,
	Paul Durrant, Andrew Cooper, Konrad Rzeszutek Wilk,
	Marek Marczykowski-Górecki, Jan Beulich, xen-devel

On 11/28/19 3:54 PM, George Dunlap wrote:
> On 11/27/19 10:32 PM, Hans van Kranenburg wrote:
>> Hi all,
>>
>> On 11/27/19 12:13 PM, Durrant, Paul wrote:
>>>> -----Original Message-----
>>>> From: Ian Jackson <ian.jackson@citrix.com>
>>>> Sent: 27 November 2019 11:10
>>>> [...]
>>>> Subject: RE: [Xen-devel] [PATCH for-4.13 2/2] Rationalize max_grant_frames
>>>> and max_maptrack_frames handling
>>>>
>>>> Durrant, Paul writes ("RE: [Xen-devel] [PATCH for-4.13 2/2] Rationalize
>>>> max_grant_frames and max_maptrack_frames handling"):
>>>>>> -----Original Message-----
>>>>>> From: Xen-devel <xen-devel-bounces@lists.xenproject.org> On Behalf Of
>>>> Ian
>>>>>> Jackson
>>>>>> I have seen reports of users who ran out of grant/maptrack frames
>>>>>> because of updates to use multiring protocols etc.  The error messages
>>>>>> are not very good and the recommended workaround has been to increase
>>>>>> the default limit on the hypervisor command line.
>>>>>>
>>>>>> It is important that we don't break that workaround!
>>>>>
>>>>> Alas it has apparently been broken for several releases now :-(
>>>>
>>>> I guess at least in Debian (where I have seen this) we haven't
>>>> released with any affected versions yet...
>>>
>>> I believe the problem was introduce in 4.10, so I think it would be prudent to also back-port the final fix to stable trees from then on.
>>
>> Yes, the max grant frame issue has historically always been a painful
>> experience for end users, and Xen 4.11 which we now have in the current
>> Debian stable has made it worse compared to previous versions indeed.
> 
> This rather suggests that the default value isn't very well chosen.
> Ideally some investigation would be done to improve the default sizing;
> end-users shouldn't have to know anything about grant table frames.

Most of the problems started happening a few years ago when using a
newer Linux that got all kinds of multiqueue block stuff for disk and
network enabled on top of an older Xen. (e.g. in Debian using the Linux
4.9 backports kernel on top of Xen 4.4 in Jessie).

The default for the hypervisor option has already been doubled from 32
to 64, which I think is sufficient. However, having the toolstack revert
it back to 32 again is not very helpful, but that's what this thread is
about to solve. :)

A while ago I did some testing:
   https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=880554#119

I haven't been able to cause nr_frames to go over 64 in any test myself,
and also have never seen values that high in production use. The above
debian bug also does not contain any other report from anyone with a
number above 64. There are reports of users setting it to 256 and then
not caring about it any more, but they didn't report the xen_diag output
back after that, so there's no real data.

Hans

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Xen-devel] [PATCH for-4.13 2/2] Rationalize max_grant_frames and max_maptrack_frames handling
  2019-11-28 15:33               ` Hans van Kranenburg
@ 2019-11-28 15:43                 ` Jürgen Groß
  0 siblings, 0 replies; 20+ messages in thread
From: Jürgen Groß @ 2019-11-28 15:43 UTC (permalink / raw)
  To: Hans van Kranenburg, George Dunlap, Durrant, Paul, Ian Jackson
  Cc: Stefano Stabellini, Julien Grall, Wei Liu, Konrad Rzeszutek Wilk,
	Andrew Cooper, Paul Durrant, Marek Marczykowski-Górecki,
	Jan Beulich, xen-devel

On 28.11.19 16:33, Hans van Kranenburg wrote:
> On 11/28/19 3:54 PM, George Dunlap wrote:
>> On 11/27/19 10:32 PM, Hans van Kranenburg wrote:
>>> Hi all,
>>>
>>> On 11/27/19 12:13 PM, Durrant, Paul wrote:
>>>>> -----Original Message-----
>>>>> From: Ian Jackson <ian.jackson@citrix.com>
>>>>> Sent: 27 November 2019 11:10
>>>>> [...]
>>>>> Subject: RE: [Xen-devel] [PATCH for-4.13 2/2] Rationalize max_grant_frames
>>>>> and max_maptrack_frames handling
>>>>>
>>>>> Durrant, Paul writes ("RE: [Xen-devel] [PATCH for-4.13 2/2] Rationalize
>>>>> max_grant_frames and max_maptrack_frames handling"):
>>>>>>> -----Original Message-----
>>>>>>> From: Xen-devel <xen-devel-bounces@lists.xenproject.org> On Behalf Of
>>>>> Ian
>>>>>>> Jackson
>>>>>>> I have seen reports of users who ran out of grant/maptrack frames
>>>>>>> because of updates to use multiring protocols etc.  The error messages
>>>>>>> are not very good and the recommended workaround has been to increase
>>>>>>> the default limit on the hypervisor command line.
>>>>>>>
>>>>>>> It is important that we don't break that workaround!
>>>>>>
>>>>>> Alas it has apparently been broken for several releases now :-(
>>>>>
>>>>> I guess at least in Debian (where I have seen this) we haven't
>>>>> released with any affected versions yet...
>>>>
>>>> I believe the problem was introduce in 4.10, so I think it would be prudent to also back-port the final fix to stable trees from then on.
>>>
>>> Yes, the max grant frame issue has historically always been a painful
>>> experience for end users, and Xen 4.11 which we now have in the current
>>> Debian stable has made it worse compared to previous versions indeed.
>>
>> This rather suggests that the default value isn't very well chosen.
>> Ideally some investigation would be done to improve the default sizing;
>> end-users shouldn't have to know anything about grant table frames.
> 
> Most of the problems started happening a few years ago when using a
> newer Linux that got all kinds of multiqueue block stuff for disk and
> network enabled on top of an older Xen. (e.g. in Debian using the Linux
> 4.9 backports kernel on top of Xen 4.4 in Jessie).
> 
> The default for the hypervisor option has already been doubled from 32
> to 64, which I think is sufficient. However, having the toolstack revert
> it back to 32 again is not very helpful, but that's what this thread is
> about to solve. :)
> 
> A while ago I did some testing:
>     https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=880554#119
> 
> I haven't been able to cause nr_frames to go over 64 in any test myself,
> and also have never seen values that high in production use. The above
> debian bug also does not contain any other report from anyone with a
> number above 64. There are reports of users setting it to 256 and then
> not caring about it any more, but they didn't report the xen_diag output
> back after that, so there's no real data.

I have seen guests needing 256.

My Linux kernel patches reducing the default max. number of queues in
netfront/netback to 8 made things much better (on a large host running
a guest with 64 vcpus using 8 network interfaces was blowing up rather
fast).


Juergen


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 20+ messages in thread

end of thread, other threads:[~2019-11-28 15:43 UTC | newest]

Thread overview: 20+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-11-26 17:17 [Xen-devel] [PATCH for-4.13 1/2] python/xc.c: Remove trailing whitespace George Dunlap
2019-11-26 17:17 ` [Xen-devel] [PATCH for-4.13 2/2] Rationalize max_grant_frames and max_maptrack_frames handling George Dunlap
2019-11-26 17:30   ` George Dunlap
2019-11-27  4:34     ` Jürgen Groß
2019-11-27 12:07       ` George Dunlap
2019-11-27 12:15         ` Jürgen Groß
2019-11-26 17:32   ` Durrant, Paul
2019-11-26 17:36   ` Ian Jackson
2019-11-27  8:42     ` Durrant, Paul
2019-11-27 11:10       ` Ian Jackson
2019-11-27 11:13         ` Durrant, Paul
2019-11-27 22:32           ` Hans van Kranenburg
2019-11-28  5:57             ` Jürgen Groß
2019-11-28 14:54             ` George Dunlap
2019-11-28 15:33               ` Hans van Kranenburg
2019-11-28 15:43                 ` Jürgen Groß
2019-11-27  4:32   ` Jürgen Groß
2019-11-27  9:25     ` Jan Beulich
2019-11-27  9:38       ` Jürgen Groß
2019-11-27 10:40     ` George Dunlap

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.