All of lore.kernel.org
 help / color / mirror / Atom feed
* Can you please help with the failure to create namespace via Ndctl issue?
@ 2021-03-11 13:33 Xu, Chunye
  0 siblings, 0 replies; only message in thread
From: Xu, Chunye @ 2021-03-11 13:33 UTC (permalink / raw)
  To: Verma, Vishal L, linux-nvdimm

[-- Attachment #1: Type: text/plain, Size: 653 bytes --]

Hi Vishal Verma,

Now there is an urgent issue from Alibaba - AEP namespace disable&destroy issue https://hsdes.intel.com/appstore/article/#/22012576039.

Here attached the log of ndctl cmd and you can find several error prompting info inside.

You can see it fails to recreate namespace again after destroying the namespace with mode being devdax.
]0;root@cloud-dev-benz: ~root@cloud-dev-benz:~# ndctl create-namespace -r region0 --mode=devdax
libndctl: ndctl_dax_enable: dax0.1: failed to enable
  Error: namespace0.0: failed to enable

Can you please help to check with priority as it blocks Alibaba's deployment?

Many thanks,
Chunye

[-- Attachment #2: ndctl_cmd_log.txt --]
[-- Type: text/plain, Size: 50252 bytes --]

Script started on 2021-03-10 16:18:12+00:00 [TERM="xterm" TTY="/dev/pts/0" COLUMNS="149" LINES="43"]
^[]0;root@cloud-dev-benz: ~\aroot@cloud-dev-benz:~# ipmctl show -memoryresources
 MemoryType   | DDR         | DCPMM       | Total       
========================================================
 Volatile     | 192.000 GiB | 0.000 GiB   | 192.000 GiB
 AppDirect    | -           | 504.000 GiB | 504.000 GiB
 Cache        | 0.000 GiB   | -           | 0.000 GiB
 Inaccessible | -           | 1.689 GiB   | 1.689 GiB
 Physical     | 192.000 GiB | 505.689 GiB | 697.689 GiB
^[]0;root@cloud-dev-benz: ~\aroot@cloud-dev-benz:~# ndctl create-namespace -r region0 --mode=devdax
{
  "dev":"namespace0.0",
  "mode":"devdax",
  "map":"dev",
  "size":"248.06 GiB (266.35 GB)",
  "uuid":"9ad75b6c-52f1-4c58-94e7-a59b937a0a3e",
  "daxregion":{
    "id":0,
    "size":"248.06 GiB (266.35 GB)",
    "align":2097152,
    "devices":[
      {
        "chardev":"dax0.0",
        "size":"248.06 GiB (266.35 GB)",
        "target_node":2,
        "mode":"devdax"
      }
    ]
  },
  "align":2097152
}
^[]0;root@cloud-dev-benz: ~\aroot@cloud-dev-benz:~# ndctl list --regions --namespaces --human --buses
{
  "provider":"ACPI.NFIT",
  "dev":"ndbus0",
  "scrub_state":"idle",
  "regions":[
    {
      "dev":"region1",
      "size":"252.00 GiB (270.58 GB)",
      "available_size":"252.00 GiB (270.58 GB)",
      "max_available_extent":"252.00 GiB (270.58 GB)",
      "type":"pmem",
      "iset_id":"0xbcd6eeb86a8f2444",
      "persistence_domain":"memory_controller"
    },
    {
      "dev":"region0",
      "size":"252.00 GiB (270.58 GB)",
      "available_size":0,
      "max_available_extent":0,
      "type":"pmem",
      "iset_id":"0x1c72eeb8e48b2444",
      "persistence_domain":"memory_controller",
      "namespaces":[
        {
          "dev":"namespace0.0",
          "mode":"devdax",
          "map":"dev",
          "size":"248.06 GiB (266.35 GB)",
          "uuid":"9ad75b6c-52f1-4c58-94e7-a59b937a0a3e",
          "chardev":"dax0.0",
          "align":2097152
        }
      ]
    }
  ]
}
^[]0;root@cloud-dev-benz: ~\aroot@cloud-dev-benz:~# daxctl reconfigure-device dax0.0 --mode=system-ram
dax0.0:
  WARNING: detected a race while onlining memory
  Some memory may not be in the expected zone. It is
  recommended to disable any other onlining mechanisms,
  and retry. If onlining is to be left to other agents,
  use the --no-online option to suppress this warning
dax0.0: all memory sections (248) already online
[
  {
    "chardev":"dax0.0",
    "size":266352984064,
    "target_node":2,
    "mode":"system-ram",
    "movable":false
  }
]
reconfigured 1 device
^[]0;root@cloud-dev-benz: ~\aroot@cloud-dev-benz:~# mount -t tmpfs -o size=4g,mpol=bind:2 tmpfs /mnt/pmem0
^[]0;root@cloud-dev-benz: ~\aroot@cloud-dev-benz:~# ndctl list --regions --namespaces --human --buses
{
  "provider":"ACPI.NFIT",
  "dev":"ndbus0",
  "scrub_state":"idle",
  "regions":[
    {
      "dev":"region1",
      "size":"252.00 GiB (270.58 GB)",
      "available_size":"252.00 GiB (270.58 GB)",
      "max_available_extent":"252.00 GiB (270.58 GB)",
      "type":"pmem",
      "iset_id":"0xbcd6eeb86a8f2444",
      "persistence_domain":"memory_controller"
    },
    {
      "dev":"region0",
      "size":"252.00 GiB (270.58 GB)",
      "available_size":0,
      "max_available_extent":0,
      "type":"pmem",
      "iset_id":"0x1c72eeb8e48b2444",
      "persistence_domain":"memory_controller",
      "namespaces":[
        {
          "dev":"namespace0.0",
          "mode":"devdax",
          "map":"dev",
          "size":"248.06 GiB (266.35 GB)",
          "uuid":"9ad75b6c-52f1-4c58-94e7-a59b937a0a3e",
          "chardev":"dax0.0",
          "align":2097152
        }
      ]
    }
  ]
}
^[]0;root@cloud-dev-benz: ~\aroot@cloud-dev-benz:~# ndctl disable-namespace namespace0.0
disabled 1 namespace
^[]0;root@cloud-dev-benz: ~\aroot@cloud-dev-benz:~# ndctl destroy-namespace -f namespace0.0
destroyed 1 namespace
^[]0;root@cloud-dev-benz: ~\aroot@cloud-dev-benz:~# ndctl list -NR 
[
  {
    "dev":"region1",
    "size":270582939648,
    "available_size":270582939648,
    "max_available_extent":270582939648,
    "type":"pmem",
    "iset_id":-4839418273733860284,
    "persistence_domain":"memory_controller"
  },
  {
    "dev":"region0",
    "size":270582939648,
    "available_size":270582939648,
    "max_available_extent":270582939648,
    "type":"pmem",
    "iset_id":2049963258282714180,
    "persistence_domain":"memory_controller"
  }
]
^[]0;root@cloud-dev-benz: ~\aroot@cloud-dev-benz:~# ndctl list --regions --namespaces --human --buses
{
  "provider":"ACPI.NFIT",
  "dev":"ndbus0",
  "scrub_state":"idle",
  "regions":[
    {
      "dev":"region1",
      "size":"252.00 GiB (270.58 GB)",
      "available_size":"252.00 GiB (270.58 GB)",
      "max_available_extent":"252.00 GiB (270.58 GB)",
      "type":"pmem",
      "iset_id":"0xbcd6eeb86a8f2444",
      "persistence_domain":"memory_controller"
    },
    {
      "dev":"region0",
      "size":"252.00 GiB (270.58 GB)",
      "available_size":"252.00 GiB (270.58 GB)",
      "max_available_extent":"252.00 GiB (270.58 GB)",
      "type":"pmem",
      "iset_id":"0x1c72eeb8e48b2444",
      "persistence_domain":"memory_controller"
    }
  ]
}
^[]0;root@cloud-dev-benz: ~\aroot@cloud-dev-benz:~# ndctl create-namespace -r region0 --mode=devdax
libndctl: ndctl_dax_enable: dax0.1: failed to enable
  Error: namespace0.0: failed to enable

failed to create namespace: No such device or address
^[]0;root@cloud-dev-benz: ~\aroot@cloud-dev-benz:~# pwd
/root
^[]0;root@cloud-dev-benz: ~\aroot@cloud-dev-benz:~# ll ndctl*
-rw-r--r--  1 root root 4096 Mar 10 16:19 ndctl_cmd_log.txt

ndctl:
total 2120
drwxr-xr-x 16 root root   4096 Dec 24 16:02 ^[[0m^[[01;34m.^[[0m/
drwx------ 20 root root   4096 Mar 10 16:18 ^[[01;34m..^[[0m/
-rw-r--r--  1 root root  52286 Dec 24 15:58 aclocal.m4
-rwxr-xr-x  1 root root    790 Dec 24 15:32 ^[[01;32mautogen.sh^[[0m*
drwxr-xr-x  2 root root   4096 Dec 24 15:58 ^[[01;34mautom4te.cache^[[0m/
drwxr-xr-x  2 root root   4096 Dec 24 15:51 ^[[01;34mbuild-aux^[[0m/
drwxr-xr-x 11 root root   4096 Dec 24 15:32 ^[[01;34mccan^[[0m/
-rw-r--r--  1 root root   4952 Dec 24 16:01 config.h
-rw-r--r--  1 root root   4602 Dec 24 15:58 config.h.in
-rw-r--r--  1 root root   4549 Dec 24 15:55 config.h.in~
-rw-r--r--  1 root root  40705 Dec 24 16:01 config.log
-rwxr-xr-x  1 root root  60575 Dec 24 16:01 ^[[01;32mconfig.status^[[0m*
-rwxr-xr-x  1 root root 498738 Dec 24 15:58 ^[[01;32mconfigure^[[0m*
-rw-r--r--  1 root root   8196 Dec 24 15:32 configure.ac
-rwxr-xr-x  1 root root 473354 Dec 24 15:58 ^[[01;32mconfigure.lineno^[[0m*
drwxr-xr-x  2 root root   4096 Dec 24 15:32 ^[[01;34mcontrib^[[0m/
-rw-r--r--  1 root root   2275 Dec 24 15:32 CONTRIBUTING.md
-rw-r--r--  1 root root  26517 Dec 24 15:32 COPYING
drwxr-xr-x  5 root root   4096 Dec 24 16:02 ^[[01;34mdaxctl^[[0m/
drwxr-xr-x  4 root root   4096 Dec 24 15:32 ^[[01;34mDocumentation^[[0m/
drwxr-xr-x  8 root root   4096 Dec 24 16:02 ^[[01;34m.git^[[0m/
-rw-r--r--  1 root root    932 Dec 24 15:32 .gitignore
-rwxr-xr-x  1 root root    935 Dec 24 15:32 ^[[01;32mgit-version^[[0m*
-rwxr-xr-x  1 root root    290 Dec 24 15:32 ^[[01;32mgit-version-gen^[[0m*
-rw-r--r--  1 root root  24508 Dec 24 16:02 libccan.a
-rwxr-xr-x  1 root root 342320 Dec 24 16:01 ^[[01;32mlibtool^[[0m*
-rw-r--r--  1 root root 378338 Dec 24 16:02 libutil.a
drwxr-xr-x  2 root root   4096 Dec 24 15:32 ^[[01;34mlicenses^[[0m/
drwxr-xr-x  2 root root   4096 Dec 24 15:50 ^[[01;34mm4^[[0m/
-rw-r--r--  1 root root  42953 Dec 24 16:01 Makefile
-rw-r--r--  1 root root   2374 Dec 24 15:32 Makefile.am
-rw-r--r--  1 root root    919 Dec 24 15:32 Makefile.am.in
-rw-r--r--  1 root root  43990 Dec 24 15:58 Makefile.in
-rwxr-xr-x  1 root root    717 Dec 24 15:32 ^[[01;32mmake-git-snapshot.sh^[[0m*
drwxr-xr-x  6 root root   4096 Dec 24 16:02 ^[[01;34mndctl^[[0m/
-rw-r--r--  1 root root   4214 Dec 24 15:32 ndctl.spec.in
-rw-r--r--  1 root root   1619 Dec 24 15:32 nfit.h
-rw-r--r--  1 root root   3764 Dec 24 15:32 README.md
drwxr-xr-x  2 root root   4096 Dec 24 15:32 ^[[01;34mrhel^[[0m/
-rwxr-xr-x  1 root root    196 Dec 24 15:32 ^[[01;32mrpmbuild.sh^[[0m*
drwxr-xr-x  2 root root   4096 Dec 24 15:32 ^[[01;34msles^[[0m/
-rw-r--r--  1 root root     23 Dec 24 16:01 stamp-h1
drwxr-xr-x  3 root root   4096 Dec 24 16:01 ^[[01;34mtest^[[0m/
-rw-r--r--  1 root root   2470 Dec 24 15:32 test.h
-rw-r--r--  1 root root    626 Dec 24 15:32 .travis.yml
drwxr-xr-x  4 root root   4096 Dec 24 16:02 ^[[01;34mutil^[[0m/
-rw-r--r--  1 root root     31 Dec 24 15:55 version.m4
^[]0;root@cloud-dev-benz: ~\aroot@cloud-dev-benz:~# history
 1014  kubectl get pods
 1015  kubectl get pods|wc -l
 1016  kubectl get pods
 1017  numactl -H
 1018  kubectl apply -f pytorch-kata.yaml 
 1019  kubectl get pods
 1020  numactl -H
 1021  kubectl get pods
 1022  numactl -H
 1023  kubectl get pods
 1024  kubectl get pods|wc -l
 1025  kubectl get pods
 1026  numactl --cpunodebind=1 --membind=1 ./hey -cpus 10 -z 30s -c 100 -q 7  -m POST -D ./input.json -host "pytorch-cifar10-predictor-default.default.example.com" "http://192.160.172.178/v1/models/pytorch-cifar10:predict"
 1027  numactl --cpunodebind=1 --membind=1 ./hey -cpus 10 -z 30s -c 100 -q 10  -m POST -D ./input.json -host "pytorch-cifar10-predictor-default.default.example.com" "http://192.160.172.178/v1/models/pytorch-cifar10:predict"
 1028  numactl --cpunodebind=1 --membind=1 ./hey -cpus 10 -z 30s -c 100 -q 5  -m POST -D ./input.json -host "pytorch-cifar10-predictor-default.default.example.com" "http://192.160.172.178/v1/models/pytorch-cifar10:predict"
 1029  numactl --cpunodebind=1 --membind=1 ./hey -cpus 10 -z 30s -c 100 -q 2  -m POST -D ./input.json -host "pytorch-cifar10-predictor-default.default.example.com" "http://192.160.172.178/v1/models/pytorch-cifar10:predict"
 1030  numactl --cpunodebind=1 --membind=1 ./hey -cpus 10 -z 30s -c 100 -q 1  -m POST -D ./input.json -host "pytorch-cifar10-predictor-default.default.example.com" "http://192.160.172.178/v1/models/pytorch-cifar10:predict"
 1031  kubectl get pods
 1032  numactl --cpunodebind=1 --membind=1 ./hey -cpus 10 -z 30s -c 10 -q 1  -m POST -D ./input.json -host "pytorch-cifar10-predictor-default.default.example.com" "http://192.160.172.178/v1/models/pytorch-cifar10:predict"
 1033  curl -v -H "Host: pytorch-cifar10-predictor-default.default.example.com" -d @./input.json http://192.160.172.178/v1/models/pytorch-cifar10:predict
 1034  time curl -v -H "Host: pytorch-cifar10-predictor-default.default.example.com" -d @./input.json http://192.160.172.178/v1/models/pytorch-cifar10:predict
 1035  kubectl get pods
 1036  numactl -H
 1037  vim pytorch-kata.yaml 
 1038  time curl -v -H "Host: pytorch-cifar10-predictor-default.default.example.com" -d @./input.json http://192.160.172.178/v1/models/pytorch-cifar10:predict
 1039  kubectl get pods
 1040  time curl -v -H "Host: pytorch-cifar10-predictor-default.default.example.com" -d @./input.json http://192.160.172.178/v1/models/pytorch-cifar10:predict
 1041  env |grep proxy
 1042  time curl -v -H "Host: pytorch-cifar10-predictor-default.default.example.com" -d @./input.json http://192.160.172.178/v1/models/pytorch-cifar10:predict
 1043  top
 1044  kubectl get pods
 1045  kubectl delete -f pytorch-kata.yaml 
 1046  kubectl get pods
 1047  ps -ef|grep cloud
 1048  kubectl get pods
 1049  ps -ef|grep cloud
 1050  kubectl get pods
 1051  kubectl get ksvc
 1052  kubectl get svc
 1053  kubectl get pods
 1054  systemctl restart kubelet
 1055  kubectl get pods
 1056  ps -ef|grep cloud
 1057  kubectl apply -f pytorch-kata.yaml 
 1058  kubectl get pods
 1059  numactl -H
 1060  kubectl get pods
 1061  numactl -H
 1062  ps -ef|grep cloud
 1063  numactl -H
 1064  kubectl get pods
 1065  numactl --cpunodebind=1 --membind=1 ./hey -cpus 10 -z 30s -c 100  -m POST -D ./input.json -host "pytorch-cifar10-predictor-default.default.example.com" "http://192.160.172.178/v1/models/pytorch-cifar10:predict"
 1066  numactl --cpunodebind=1 --membind=1 ./hey -cpus 10 -z 30s -c 100 -q 3  -m POST -D ./input.json -host "pytorch-cifar10-predictor-default.default.example.com" "http://192.160.172.178/v1/models/pytorch-cifar10:predict"
 1067  numactl --cpunodebind=1 --membind=1 ./hey -cpus 10 -z 30s -c 50 -q 3  -m POST -D ./input.json -host "pytorch-cifar10-predictor-default.default.example.com" "http://192.160.172.178/v1/models/pytorch-cifar10:predict"
 1068  numactl --cpunodebind=1 --membind=1 ./hey -cpus 10 -z 30s -c 50 -q 4  -m POST -D ./input.json -host "pytorch-cifar10-predictor-default.default.example.com" "http://192.160.172.178/v1/models/pytorch-cifar10:predict"
 1069  numactl --cpunodebind=1 --membind=1 ./hey -cpus 10 -z 30s -c 10 -q 20  -m POST -D ./input.json -host "pytorch-cifar10-predictor-default.default.example.com" "http://192.160.172.178/v1/models/pytorch-cifar10:predict"
 1070  numactl --cpunodebind=1 --membind=1 ./hey -cpus 10 -z 30s -c 200 -q 1  -m POST -D ./input.json -host "pytorch-cifar10-predictor-default.default.example.com" "http://192.160.172.178/v1/models/pytorch-cifar10:predict"
 1071  numactl --cpunodebind=1 --membind=1 ./hey -cpus 10 -z 30s -c 300 -q 1  -m POST -D ./input.json -host "pytorch-cifar10-predictor-default.default.example.com" "http://192.160.172.178/v1/models/pytorch-cifar10:predict"
 1072  numactl --cpunodebind=1 --membind=1 ./hey -cpus 10 -z 30s -c 400 -q 1  -m POST -D ./input.json -host "pytorch-cifar10-predictor-default.default.example.com" "http://192.160.172.178/v1/models/pytorch-cifar10:predict"
 1073  kubectl delete -f pytorch-kata.yaml 
 1074  vim pytorch-kata.yaml 
 1075  kubectl get pods
 1076  ps -ef|grep cloud
 1077  vim /opt/kata/bin/cloud-hypervisor
 1078  kubectl apply -f pytorch-kata.yaml 
 1079  numactl -H
 1080  kubectl get pods
 1081  kubectl get pods|wc -l
 1082  kubectl get pods
 1083  numactl --cpunodebind=1 --membind=1 ./hey -cpus 10 -z 30s -c 100   -m POST -D ./input.json -host "pytorch-cifar10-predictor-default.default.example.com" "http://192.160.172.178/v1/models/pytorch-cifar10:predict"
 1084  kubectl delete -f pytorch-kata.yaml 
 1085  vim /opt/kata/bin/cloud-hypervisor
 1086  numactl -H
 1087  kubectl get pods
 1088  ps -ef|grep cloud
 1089  kubectl get pods
 1090  kubectl apply -f pytorch-kata.yaml 
 1091  kubectl get pods
 1092  numactl -H
 1093  kubectl get pods
 1094  numactl -H
 1095  kubectl get pods
 1096  numactl -H
 1097  kubectl get pods
 1098  numactl -H
 1099  kubectl get pods
 1100  numactl -H
 1101  kubectl get pods
 1102  numactl -H
 1103  kubectl get pods
 1104  numactl -H
 1105  kubectl get pods
 1106  numactl -H
 1107  kubectl get pods
 1108  numactl -H
 1109  kubectl get pods
 1110  numactl -H
 1111  kubectl get pods
 1112  numactl -H
 1113  kubectl get pods
 1114  numactl -H
 1115  kubectl get pods
 1116  numactl -H
 1117  kubectl get pods
 1118  numactl --cpunodebind=1 --membind=1 ./hey -cpus 10 -z 30s -c 100   -m POST -D ./input.json -host "pytorch-cifar10-predictor-default.default.example.com" "http://192.160.172.178/v1/models/pytorch-cifar10:predict"
 1119  kubectl delete -f pytorch-kata.yaml 
 1120  kubectl get pods
 1121  vim /opt/kata/bin/cloud-hypervisor
 1122  kubectl get pods
 1123  kubectl apply -f pytorch-kata.yaml 
 1124  vim pytorch-kata.yaml 
 1125  kubectl delete -f pytorch-kata.yaml 
 1126  vim pytorch-kata.yaml 
 1127  kubectl get pods
 1128  kubectl get pods|wc -l
 1129  vim pytorch-kata.yaml 
 1130  kubectl apply -f pytorch-kata.yaml 
 1131  kubectl get pods
 1132  numactl -H
 1133  ps -ef|grep cloud
 1134  numactl -H
 1135  kubectl delete -f pytorch-kata.yaml 
 1136  numactl -H
 1137  kubectl get pods
 1138  numactl -H
 1139  kubectl get pods
 1140  ps -ef|grep cloud
 1141  pkill -9 -f cloud-hypervisor
 1142  ps -ef|grep cloud
 1143  numactl -H
 1144  vim pytorch-kata.yaml 
 1145  kubectl apply -f pytorch-kata.yaml 
 1146  vim pytorch-kata.yaml 
 1147  kubectl get pods
 1148  numactl -H
 1149  kubectl get pods
 1150  numactl --cpunodebind=1 --membind=1 ./hey -cpus 10 -z 30s -c 100   -m POST -D ./input.json -host "pytorch-cifar10-predictor-default.default.example.com" "http://192.160.172.178/v1/models/pytorch-cifar10:predict"
 1151  numactl --cpunodebind=1 --membind=1 ./hey -cpus 10 -z 30s -c 220   -m POST -D ./input.json -host "pytorch-cifar10-predictor-default.default.example.com" "http://192.160.172.178/v1/models/pytorch-cifar10:predict"
 1152  kubectl delete -f pytorch-kata.yaml 
 1153  vim /opt/kata/bin/cloud-hypervisor
 1154  kubectl get  pods
 1155  vim pytorch-kata.yaml 
 1156  kubectl get  pods
 1157  kubectl get  pods|wc -l
 1158  kubectl get  pods
 1159  ps -ef|grep cloud
 1160  cat /opt/kata/bin/cloud-hypervisor
 1161  kubectl apply -f pytorch-kata.yaml 
 1162  numactl -H
 1163  ps -ef|grep cloud
 1164  numactl -H
 1165  kubectl get pods
 1166  numactl --cpunodebind=1 --membind=1 ./hey -cpus 10 -z 30s -c 110   -m POST -D ./input.json -host "pytorch-cifar10-predictor-default.default.example.com" "http://192.160.172.178/v1/models/pytorch-cifar10:predict"
 1167  numactl --cpunodebind=1 --membind=1 ./hey -cpus 10 -z 30s -c 66   -m POST -D ./input.json -host "pytorch-cifar10-predictor-default.default.example.com" "http://192.160.172.178/v1/models/pytorch-cifar10:predict"
 1168  kubectl delete -f pytorch-kata.yaml 
 1169  vim /etc/containerd/config.toml
 1170  kubectl get pods
 1171  systemctl restart containerd
 1172  ls
 1173  cd ..
 1174  ls
 1175  vim inferenceservice.yaml 
 1176  kubectl apply -f inferenceservice.yaml 
 1177  cd -
 1178  ls
 1179  vim pytorch-kata.yaml 
 1180  kubectl apply -f pytorch-kata.yaml 
 1181  kubectl get pods
 1182  kubectl edit ksvc
 1183  kubectl get pods
 1184  kubectl get ksvc
 1185  kubectl get pods
 1186  numactl --cpunodebind=1 --membind=1 ./hey -cpus 10 -z 30s -c 10   -m POST -D ./input.json -host "pytorch-cifar10-predictor-default.default.example.com" "http://192.160.172.178/v1/models/pytorch-cifar10:predict"
 1187  kubectl get pods
 1188  kubectl get ksvc
 1189  time curl -v -H "Host: pytorch-cifar10-predictor-default.default.example.com" -d @./input.json http://192.160.172.178/v1/models/pytorch-cifar10:predict
 1190  kubectl get ksvc
 1191  vim ../inferenceservice.yaml 
 1192  kubectl create secret `container-registry`   --docker-server=https://docker.io/   --docker-email=375686234@qq.com   --docker-username=arronwang  --docker-password=arron12345
 1193  kubectl create secret docker-registry myRegistry   --docker-server=https://docker.io/   --docker-email=375686234@qq.com   --docker-username=arronwang  --docker-password=arron12345
 1194  kubectl create secret docker-registry arron.wang   --docker-server=https://docker.io/   --docker-email=375686234@qq.com   --docker-username=arronwang  --docker-password=arron12345
 1195  kubectl get secret [arron.wang] --output=yaml
 1196  kubectl get secret [REGISTRY-CRED-SECRETS] --output=yaml
 1197  kubectl get secret  --output=yaml
 1198  kubectl get secret [docker-registry] --output=yaml
 1199  kubectl create secret docker-registry    --docker-server=https://docker.io/   --docker-email=375686234@qq.com   --docker-username=arronwang  --docker-password=arron12345
 1200  kubectl delete -f pytorch-kata.yaml 
 1201  kubectl get pods
 1202  kubectl get ksvc
 1203  kubectl apply -f pytorch-kata.yaml 
 1204  kubectl describe ksvc
 1205  kubectl apply -f pytorch-kata.yaml 
 1206  kubectl describe ksvc
 1207  kubectl delete -f pytorch-kata.yaml 
 1208  kubectl apply -f pytorch-kata.yaml 
 1209  kubectl describe ksvc
 1210  curl https://index.docker.io/v2/arronwang/pytorchserver/manifests/latest
 1211  env|grep proxy
 1212  export https_proxy=http://
 1213  export https_proxy=http://child-prc.intel.com:913/
 1214  curl https://index.docker.io/v2/arronwang/pytorchserver/manifests/latest
 1215  `container-registry`
 1216  kubectl create secret docker-registry container-registry   --docker-server=https://docker.io/   --docker-email=375686234@qq.com  --docker-username=arronwang   --docker-password=arron12345
 1217  kubectl get secret [container-registry] --output=yaml
 1218  kubectl get secret container-registry --output=yaml
 1219  kubectl patch serviceaccount default -p "{\"imagePullSecrets\": [{\"name\": \"container-registry\"}]}"
 1220  kubectl get pods
 1221  kubectl get ksvc
 1222  kubectl delete -f pytorch-kata.yaml 
 1223  kubectl get ksvc
 1224  kubectl describe ksvc
 1225  kubectl apply -f pytorch-kata.yaml 
 1226  kubectl describe ksvc
 1227  kubectl delete -f pytorch-kata.yaml 
 1228  kubectl apply -f pytorch-kata.yaml 
 1229  kubectl describe ksvc
 1230  kubectl get ksvc
 1231  kubectl get pods
 1232  kubectl descibe pod
 1233  kubectl describe pod
 1234  kubectl describe deployment
 1235  vim pytorch-kata.yaml 
 1236  numactl --cpunodebind=1 --membind=1 ./hey -cpus 10 -z 30s -c 10   -m POST -D ./input.json -host "pytorch-cifar10-predictor-default.default.example.com" "http://192.160.172.178/v1/models/pytorch-cifar10:predict"
 1237  time curl -v -H "Host: pytorch-cifar10-predictor-default.default.example.com" -d @./input.json http://192.160.172.178/v1/models/pytorch-cifar10:predict
 1238  env|grep proxy
 1239  unset https_proxy
 1240  time curl -v -H "Host: pytorch-cifar10-predictor-default.default.example.com" -d @./input.json http://192.160.172.178/v1/models/pytorch-cifar10:predict
 1241  kubectl get ksvc
 1242  vim /etc/hosts
 1243  time curl -v -H "Host: pytorch-cifar10-predictor-default.default.example.com" -d @./input.json http://192.160.172.178/v1/models/pytorch-cifar10:predict
 1244  time curl -v -d @./input.json http://pytorch-cifar10-predictor-default.default.example.com/v1/models/pytorch-cifar10:predict
 1245  ls
 1246  vim pytorch.yaml 
 1247  vim pytorch-kata.yaml 
 1248  kubectl describe route
 1249  time curl -v -d @./input.json http://pytorch-cifar10-predictor-default.default.svc.cluster.local/v1/models/pytorch-cifar10:predict
 1250  kubectl get pods
 1251  kubectl get ksvc
 1252  numactl --cpunodebind=1 --membind=1 ./hey -cpus 10 -z 30s -c 100 -q 1  -m POST -D ./input.json -host "pytorch-cifar10-predictor-default.default.example.com" "http://192.160.172.178/v1/models/pytorch-cifar10:predict"
 1253  time curl -v -d @./input.json http://pytorch-cifar10-predictor-default.default.svc.cluster.local/v1/models/pytorch-cifar10:predict
 1254  time curl -v -d @./input.json http://pytorch-cifar10-predictor-default.default.example.com/v1/models/pytorch-cifar10:predict
 1255  ls
 1256  vim pytorch.yaml 
 1257  vim pytorch-kata.yaml 
 1258  time curl -v -d @./input.json http://pytorch-cifar10-predictor-default.default.example.com/v1/models/pytorch-cifar10:predict
 1259  vim pytorch-kata.yaml 
 1260  time curl -v -d @./input.json http://pytorch-cifar10-predictor-default.default.example.com/v1/models/pytorch-cifar10:predict
 1261  kubectl get svc istio-ingressgateway --namespace istio-system
 1262  time curl -v -H "Host: pytorch-cifar10-predictor-default.default.example.com" -d @./input.json http://192.160.172.178/v1/models/pytorch-cifar10:predict
 1263  kubectl get pods
 1264  ps -ef|grep python
 1265  ps -ef|grep pytorchserver
 1266  ps -ef|grep pytorchserver|wc -l
 1267  ls
 1268  kubectl delete -f pytorch-kata.yaml 
 1269  vim pytorch-kata.yaml 
 1270  kubectl get pods
 1271  vim pytorch-kata.yaml 
 1272  kubectl get pods
 1273  vim /etc/containerd/config.toml
 1274  ps -ef|grep runc
 1275  kubectl get pods
 1276  ps -ef|grep python
 1277  kubectl get pods
 1278  kubectl describe pod
 1279  kubectl get pods
 1280  vim /etc/containerd/config.toml
 1281  reboot
 1282  modprobe br_netfilter
 1283  lsmod 
 1284  kubectl get pods
 1285  kubectl get pods --all-namespace
 1286  kubectl get pods --all-namespaces
 1287  htop
 1288  cd knative/kfserving/
 1289  vim docs/samples/v1alpha2/pytorch/README.md 
 1290  cd docs/samples/v1alpha2/pytorch/
 1291  ls
 1292  vim pytorch.yaml 
 1293  vim perf.yaml 
 1294  kubectl apply -f perf.yaml 
 1295  vim README.md 
 1296  kubectl create -f perf.yaml 
 1297  kubectl get pods
 1298  ls
 1299  vim perf.yaml 
 1300  kubectl get pods
 1301  kubectl describe pod pytorch-load-test2hq8l-ndv7l
 1302  kubectl get pods
 1303  kubectl logs pytorch-load-test2hq8l-ndv7l
 1304  vim README.md 
 1305  kubectl logs pytorch-load-test2hq8l-ndv7l
 1306  kubectl delete -f perf.yaml 
 1307  kubectl logs pytorch-load-test2hq8l-ndv7l
 1308  kubectl delete -f perf.yaml 
 1309  nslookup pytorch-cifar10.default.svc.cluster.local
 1310  kubectl describe route
 1311  kubectl logs pytorch-load-test2hq8l-ndv7l
 1312  kubectl describe route
 1313  kubectl describe ksvc
 1314  ls
 1315  vim pytorch.yaml 
 1316  vim perf.yaml 
 1317  kubectl create -f perf.yaml 
 1318  kubectl get pods
 1319  kubectl logs pytorch-load-test5b7gc-gsh9g
 1320  kubectl delete -f perf.yaml 
 1321  ls
 1322  vim pytorch.yaml 
 1323  ls
 1324  vim input.json 
 1325  vim README.md 
 1326  htop
 1327  cd ~/knative/kfserving/pytorch
 1328  kubectl get ksvc
 1329  vim pytorch-kata.yaml 
 1330  kubectl apply -f pytorch-kata.yaml 
 1331  kubectl describe ksvc
 1332  kubectl get pods
 1333  kubectl describe ksvc
 1334  kubectl get pods
 1335  numactl --cpunodebind=1 --membind=1 ./hey -cpus 10 -z 30s -c 10   -m POST -D ./input.json -host "pytorch-cifar10-predictor-default.default.example.com" "http://192.160.172.178/v1/models/pytorch-cifar10:predict"
 1336  kubectl get svc istio-ingressgateway --namespace istio-system
 1337  unset http_proxy
 1338  numactl --cpunodebind=1 --membind=1 ./hey -cpus 10 -z 30s -c 10   -m POST -D ./input.json -host "pytorch-cifar10-predictor-default.default.example.com" "http://192.160.172.178/v1/models/pytorch-cifar10:predict"
 1339  htop
 1340  ls
 1341  vim pytorch-kata.yaml 
 1342  time curl -v -H "Host: pytorch-cifar10-predictor-default.default.example.com" -d @./input.json http://192.160.172.178/v1/models/pytorch-cifar10:predict
 1343  kubectl delete -f pytorch-kata.yaml 
 1344  vim pytorch-kata.yaml 
 1345  kubectl get deployment
 1346  kubectl get pods
 1347  kubectl delete pod pytorch-load-test2hq8l-ndv7l pytorch-load-test5b7gc-gsh9g
 1348  kubectl get pods
 1349  kubectl describe pods
 1350  kubectl get pods
 1351  cp pytorch-kata.yaml pytorch-runc.yaml
 1352  vim pytorch-runc.yaml 
 1353  kubectl get pods
 1354  kubectl apply -f pytorch-runc.yaml 
 1355  kubectl get pods
 1356  time curl -v -H "Host: pytorch-cifar10-predictor-default.default.example.com" -d @./input.json http://192.160.172.178/v1/models/pytorch-cifar10:predict
 1357  numactl --cpunodebind=1 --membind=1 ./hey -cpus 10 -z 30s -c 10   -m POST -D ./input.json -host "pytorch-cifar10-predictor-default.default.example.com" "http://192.160.172.178/v1/models/pytorch-cifar10:predict"
 1358  kubectl get pods
 1359  kubectl delete -f pytorch-runc.yaml 
 1360  kubectl get pods
 1361  vim /etc/containerd/config.toml
 1362  kubectl get pods
 1363  kubectl delete pod pytorch-cifar10-predictor-default-6vj6s-deployment-7cd84d67srdp
 1364  kubectl get pods
 1365  ps -ef|grep python
 1366  kill 45251 
 1367  ps -ef|grep python
 1368  kill -9 45251
 1369  ps -ef|grep python
 1370  kubectl get pods
 1371  vim pytorch-kata.yaml 
 1372  vim /opt/kata/bin/cloud-hypervisor
 1373  vim pytorch-kata.yaml 
 1374  kubectl apply -f pytorch-kata.yaml 
 1375  kubectl get pods
 1376  ps -ef|grep cloud
 1377  kubectl get pods
 1378  ps -ef|grep cloud
 1379  kubectl get pods
 1380  ps -ef|grep cloud
 1381  ps -ef|grep python
 1382  ps -ef|grep cloud
 1383  kubectl delete -f pytorch-kata.yaml 
 1384  kubectl get pods
 1385  ps -ef|grep python
 1386  kubectl get pods
 1387  numactl -H
 1388  kubectl get pods
 1389  vim /etc/containerd/config.toml
 1390  systemctl restart containerd
 1391  vim pytorch-kata.yaml 
 1392  kubectl apply -f pytorch-kata.yaml 
 1393  cat /opt/kata/bin/cloud-hypervisor
 1394  kubectl get pods
 1395  kubectl get ksvc
 1396  kubectl get pods
 1397  numactl -H
 1398  daxctl reconfigure-device --mode=system-ram dax0.0
 1399  cd /sys/fs/cgroup
 1400  numactl -H
 1401  echo "0-2" > cpuset/kubepods/cpuset.mems
 1402  echo "0-2" > cpuset/kubepods/besteffort/cpuset.mems
 1403  echo "0-2" > cpuset/kubepods/burstable/cpuset.mems
 1404  kubectl get pods
 1405  ps -ef|grep cloud
 1406  kubectl get pods
 1407  numactl -H
 1408  kubectl get pods
 1409  numactl -H
 1410  kubectl get pods
 1411  cd -
 1412  kubectl delete -f pytorch-kata.yaml 
 1413  kubectl get pods
 1414  ps -ef|grep cloud
 1415  pkill -9 -f cloud-hypervisor
 1416  ps -ef|grep cloud
 1417  ps -ef|grep hyper
 1418  ps -ef|grep hyper|wc -l
 1419  kubectl get pods
 1420  kubectl get pods|wc -l
 1421  kubectl get pods
 1422  ps -ef|grep virtio
 1423  ps -ef
 1424  ps -ef|grep containerd-shim-kata-v2
 1425  ps -ef|grep containerd-shim-kata-v2|wc -l
 1426  kubectl get pods
 1427  pkill -9 -f containerd-shim-kata-v2
 1428  kubectl get pods
 1429  ps -ef|grep cloud
 1430  ps -ef|grep virtiof
 1431  kubectl get pods
 1432  kubectl apply -f pytorch-kata.yaml 
 1433  kubectl get pods
 1434  numactl -H
 1435  kubectl get pods
 1436  kubectl describe pod pytorch-cifar10-predictor-default-jvl9z-deployment-6b99bd4prlrs
 1437  ps -ef|grep cloud
 1438  kubectl describe pod pytorch-cifar10-predictor-default-jvl9z-deployment-6b99bd4prlrs
 1439  cd /sys/fs/cgroup
 1440  cat cpuset/kubepods/cpuset.mems
 1441  cat cpuset/kubepods/besteffort/cpuset.mems
 1442  cat cpuset/kubepods/burstable/cpuset.mems
 1443  kubectl describe pod pytorch-cifar10-predictor-default-jvl9z-deployment-6b99bd4prlrs
 1444  kubectl get pods
 1445  cd -
 1446  ls
 1447  kubectl delete -f pytorch-kata.yaml 
 1448  ps -ef|grep cloud
 1449  kubectl get pods
 1450  ps -ef|grep cloud
 1451  vim pytorch-kata.yaml 
 1452  ps -ef|grep cloud
 1453  vim pytorch-kata.yaml 
 1454  ps -ef|grep cloud
 1455  kubectl get pods
 1456  pkill -9 -f cloud-hypervisor
 1457  kubectl get pods
 1458  ps -ef|grep cloud
 1459  ps -ef|grep kata
 1460  pkill -9 -f containerd-shim-kata-v2
 1461  ps -ef|grep kata
 1462  kubectl get pods
 1463  ps -ef|grep kata
 1464  kubectl get pods
 1465  vim pytorch-kata.yaml 
 1466  cat /opt/kata/bin/cloud-hypervisor
 1467  numactl -H
 1468  ps -ef|grep cloud
 1469  kubectl apply -f pytorch-kata.yaml 
 1470  kubectl get pods
 1471  kubectl describe pod pytorch-cifar10-predictor-default-bt8tp-deployment-76c4f49zmc55
 1472  kubectl get pods
 1473  kubectl describe pod pytorch-cifar10-predictor-default-bt8tp-deployment-76c4f49zmc55
 1474  kubectl get pods
 1475  kubectl describe pod pytorch-cifar10-predictor-default-bt8tp-deployment-76c4f49zmc55
 1476  kubectl get pods
 1477  kubectl describe pod pytorch-cifar10-predictor-default-bt8tp-deployment-76c4f49zmc55
 1478  kubectl get pods
 1479  kubectl describe pod pytorch-cifar10-predictor-default-bt8tp-deployment-76c4f49zmc55
 1480  kubectl get pods
 1481  kubectl logs pytorch-cifar10-predictor-default-bt8tp-deployment-76c4f49zmc55
 1482  kubectl logs pytorch-cifar10-predictor-default-bt8tp-deployment-76c4f49zmc55 kfserving-container
 1483  kubectl logs pytorch-cifar10-predictor-default-bt8tp-deployment-76c4f49zmc55 queue-proxy
 1484  kubectl delete -f pytorch-kata.yaml 
 1485  cd ../
 1486  find . -name inferenceservice.yaml 
 1487  kubectl apply -f ./config/configmap/inferenceservice.yaml
 1488  cd -
 1489  vim pytorch-kata.yaml 
 1490  kubectl apply -f pytorch-kata.yaml 
 1491  kubectl get pods
 1492  kubectl describe pod pytorch-cifar10-predictor-default-25nzv-deployment-86d54956rcnn
 1493  kubectl get pods
 1494  numactl -H
 1495  kubectl get pods
 1496  numactl -H
 1497  kubectl get pods
 1498  numactl -H
 1499  kubectl get pods
 1500  kubectl delete pod pytorch-cifar10-predictor-default-bt8tp-deployment-76c4f49zmc55
 1501  kubectl get pods
 1502  numactl --cpunodebind=1 --membind=1 ./hey -cpus 10 -z 30s -c 22   -m POST -D ./input.json -host "pytorch-cifar10-predictor-default.default.example.com" "http://192.160.172.178/v1/models/pytorch-cifar10:predict"
 1503  numactl --cpunodebind=1 --membind=1 ./hey -cpus 10 -z 30s -c 33   -m POST -D ./input.json -host "pytorch-cifar10-predictor-default.default.example.com" "http://192.160.172.178/v1/models/pytorch-cifar10:predict"
 1504  numactl --cpunodebind=1 --membind=1 ./hey -cpus 10 -z 30s -c 44   -m POST -D ./input.json -host "pytorch-cifar10-predictor-default.default.example.com" "http://192.160.172.178/v1/models/pytorch-cifar10:predict"
 1505  kubectl get pods
 1506  kubectl delete -f pytorch-kata.yaml 
 1507  kubectl get pods
 1508  ps -ef|grep cloud
 1509  pkill -9 -f cloud-hypervisor
 1510  ps -ef|grep kata
 1511  kill -9 193326
 1512  ps -ef|grep kata
 1513  ps -ef|grep cloud
 1514  kubectl get pods
 1515  kubectl apply -f pytorch-kata.yaml 
 1516  cat pytorch-kata.yaml 
 1517  kubectl get pods
 1518  numactl -H
 1519  kubectl get pods
 1520  numactl -H
 1521  kubectl get pods
 1522  numactl -H
 1523  kubectl get pods
 1524  numactl -H
 1525  kubectl get pods
 1526  numactl -H
 1527  kubectl get pods
 1528  numactl --cpunodebind=1 --membind=1 ./hey -cpus 10 -z 30s -c 44   -m POST -D ./input.json -host "pytorch-cifar10-predictor-default.default.example.com" "http://192.160.172.178/v1/models/pytorch-cifar10:predict"
 1529  numactl --cpunodebind=1 --membind=1 ./hey -cpus 10 -z 30s -c 50   -m POST -D ./input.json -host "pytorch-cifar10-predictor-default.default.example.com" "http://192.160.172.178/v1/models/pytorch-cifar10:predict"
 1530  kubectl delete -f pytorch-kata.yaml 
 1531  vim pytorch-kata.yaml 
 1532  kubectl get pods
 1533  ps -ef|grep cloud
 1534  pkill -9 -f cloud-hypervisor
 1535  ps -ef|grep cloud
 1536  ps -ef|grep kata
 1537  kubectl apply -f pytorch-kata.yaml 
 1538  cat pytorch-
 1539  cat pytorch-kata.yaml 
 1540  kubectl get pods
 1541  numactl --cpunodebind=1 --membind=1 ./hey -cpus 10 -z 30s -c 30   -m POST -D ./input.json -host "pytorch-cifar10-predictor-default.default.example.com" "http://192.160.172.178/v1/models/pytorch-cifar10:predict"
 1542  numactl --cpunodebind=1 --membind=1 ./hey -cpus 10 -z 30s -c 40   -m POST -D ./input.json -host "pytorch-cifar10-predictor-default.default.example.com" "http://192.160.172.178/v1/models/pytorch-cifar10:predict"
 1543  kubectl delete -f pytorch-kata.yaml 
 1544  cd ../
 1545  ls
 1546  vim sklearn-full.yaml 
 1547  htop
 1548  kubectl get pods
 1549  htop
 1550  cd knative/kfserving/
 1551  ls
 1552  vim sklearn-full.yaml 
 1553  vim pytorch/pytorch-kata.yaml 
 1554  vim sklearn-full.yaml 
 1555  vim /opt/kata/bin/cloud-hypervisor
 1556  kubectl apply -f sklearn-full.yaml 
 1557  ps -ef|grep cloud
 1558  kubectl get pods
 1559  kubectl get ksvc
 1560  numactl --cpunodebind=1 --membind=1 ./hey -z 30s -c 100 -m POST -D ./iris-input.json -host "sklearn-iris-predictor-default.default.example.com" "http://192.160.172.178/v1/models/sklearn-iris:predict"
 1561  numactl -H
 1562  ps -ef|grep cloud
 1563  cat /opt/kata/bin/cloud-hypervisor
 1564  numactl -H
 1565  kubectl delete -f sklearn-full.yaml 
 1566  ps -ef|grep cloud
 1567  numactl -H
 1568  ps -ef|grep cloud
 1569  numactl -H
 1570  ps -ef|grep cloud
 1571  numactl -H
 1572  ps -ef|grep cloud
 1573  numactl -H
 1574  ps -ef|grep cloud
 1575  kubectl get pods
 1576  ps -ef|grep cloud
 1577  ps -ef|grep kata
 1578  numactl -H
 1579  vim /opt/kata/bin/cloud-hypervisor
 1580  kubectl apply -f sklearn-full.yaml 
 1581  kubectl get pods
 1582  numactl -H
 1583  kubectl get pods
 1584  numactl --cpunodebind=1 --membind=1 ./hey -z 30s -c 100 -m POST -D ./iris-input.json -host "sklearn-iris-predictor-default.default.example.com" "http://192.160.172.178/v1/models/sklearn-iris:predict"
 1585  numactl --cpunodebind=1 --membind=1 ./hey -z 30s -c 90 -m POST -D ./iris-input.json -host "sklearn-iris-predictor-default.default.example.com" "http://192.160.172.178/v1/models/sklearn-iris:predict"
 1586  kubectl delete -f sklearn-full.yaml 
 1587  vim /opt/kata/bin/cloud-hypervisor
 1588  vim sklearn-full.yaml 
 1589  kubectl get pods
 1590  ps -ef|grep kata
 1591  ps -ef|grep cloud
 1592  cat sklearn-full.yaml 
 1593  kubectl apply -f sklearn-full.yaml 
 1594  kubectl get pods
 1595  numactl -H
 1596  numactl --cpunodebind=1 --membind=1 ./hey -z 30s -c 200 -m POST -D ./iris-input.json -host "sklearn-iris-predictor-default.default.example.com" "http://192.160.172.178/v1/models/sklearn-iris:predict"
 1597  numactl --cpunodebind=1 --membind=1 ./hey -z 30s -c 300 -m POST -D ./iris-input.json -host "sklearn-iris-predictor-default.default.example.com" "http://192.160.172.178/v1/models/sklearn-iris:predict"
 1598  numactl --cpunodebind=1 --membind=1 ./hey -z 30s -c 500 -m POST -D ./iris-input.json -host "sklearn-iris-predictor-default.default.example.com" "http://192.160.172.178/v1/models/sklearn-iris:predict"
 1599  kubectl delete -f sklearn-full.yaml 
 1600  vim /opt/kata/bin/cloud-hypervisor
 1601  kubectl get pods
 1602  ps -ef|grep cloud
 1603  ps -ef|grep kata
 1604  numactl -H
 1605  kubectl apply -f sklearn-full.yaml 
 1606  kubectl get pods
 1607  kubectl get pods|wc -l
 1608  numactl -H
 1609  kubectl get pods|wc -l
 1610  kubectl get pods
 1611  numactl -H
 1612  numactl --cpunodebind=1 --membind=1 ./hey -z 30s -c 500 -m POST -D ./iris-input.json -host "sklearn-iris-predictor-default.default.example.com" "http://192.160.172.178/v1/models/sklearn-iris:predict"
 1613  numactl --cpunodebind=1 --membind=1 ./hey -z 30s -c 400 -m POST -D ./iris-input.json -host "sklearn-iris-predictor-default.default.example.com" "http://192.160.172.178/v1/models/sklearn-iris:predict"
 1614  kubectl get pods
 1615  numactl -H
 1616  numactl --cpunodebind=1 --membind=1 ./hey -z 30s -c 250 -m POST -D ./iris-input.json -host "sklearn-iris-predictor-default.default.example.com" "http://192.160.172.178/v1/models/sklearn-iris:predict"
 1617  numactl --cpunodebind=1 --membind=1 ./hey -z 30s -c 200 -m POST -D ./iris-input.json -host "sklearn-iris-predictor-default.default.example.com" "http://192.160.172.178/v1/models/sklearn-iris:predict"
 1618  numactl --cpunodebind=1 --membind=1 ./hey -z 30s -c 180 -m POST -D ./iris-input.json -host "sklearn-iris-predictor-default.default.example.com" "http://192.160.172.178/v1/models/sklearn-iris:predict"
 1619  numactl --cpunodebind=1 --membind=1 ./hey -z 30s -c 160 -m POST -D ./iris-input.json -host "sklearn-iris-predictor-default.default.example.com" "http://192.160.172.178/v1/models/sklearn-iris:predict"
 1620  kubectl get pods
 1621  kubectl get pods|wc -l
 1622  numactl --cpunodebind=1 --membind=1 ./hey -z 30s -c 160 -m POST -D ./iris-input.json -host "sklearn-iris-predictor-default.default.example.com" "http://192.160.172.178/v1/models/sklearn-iris:predict"
 1623  kubectl get pods
 1624  kubectl get pods|wc -l
 1625  numactl --cpunodebind=1 --membind=1 ./hey -z 30s -c 160 -m POST -D ./iris-input.json -host "sklearn-iris-predictor-default.default.example.com" "http://192.160.172.178/v1/models/sklearn-iris:predict"
 1626  numactl --cpunodebind=1 --membind=1 ./hey -z 30s -c 100 -m POST -D ./iris-input.json -host "sklearn-iris-predictor-default.default.example.com" "http://192.160.172.178/v1/models/sklearn-iris:predict"
 1627  numactl --cpunodebind=1 --membind=1 ./hey -z 30s -c 150 -m POST -D ./iris-input.json -host "sklearn-iris-predictor-default.default.example.com" "http://192.160.172.178/v1/models/sklearn-iris:predict"
 1628  numactl --cpunodebind=1 --membind=1 ./hey -z 30s -c 120 -m POST -D ./iris-input.json -host "sklearn-iris-predictor-default.default.example.com" "http://192.160.172.178/v1/models/sklearn-iris:predict"
 1629  kubectl get pods
 1630  cd knative/kfserving/
 1631  kubectl delete -f sklearn-full.yaml 
 1632  vim /etc/containerd/config.toml
 1633  kubectl get pods
 1634  vim /etc/containerd/config.toml
 1635  systemctl restart containerd
 1636  htop
 1637  ls
 1638  exit
 1639  ls
 1640  kubectl get pods
 1641  ps -ax 
 1642  ps -aux
 1643  exit
 1644  htop
 1645  ls
 1646  cd mp4/
 1647  ls
 1648  ls -alh *.mp4
 1649  history |grep ffmpeg
 1650  numactl -H
 1651  rm /tmp/destination.flv
 1652  time numactl --cpunodebind=0 --membind=0 ffmpeg -i test_1080p.mp4 -c:v libx264 -crf 30 -threads 16 /tmp/destination.flv
 1653  rm /tmp/destination.flv 
 1654  time numactl --cpunodebind=0 --membind=2 ffmpeg -i test_1080p.mp4 -c:v libx264 -crf 30 -threads 16 /tmp/destination.flv
 1655  rm /tmp/destination.flv 
 1656  time numactl --cpunodebind=0 --membind=2 ffmpeg -i test_1080p.mp4 -c:v libx264 -crf 30 -threads 8 /tmp/destination.flv
 1657  rm /tmp/destination.flv 
 1658  time numactl --cpunodebind=0 --membind=2 ffmpeg -i test_1080p.mp4 -c:v libx264 -crf 30 -threads 8 /tmp/destination.flv
 1659  rm  /tmp/destination.flv
 1660  time numactl --cpunodebind=0 --membind=2 ffmpeg -i test_1080p.mp4 -c:v libx264 -crf 30 -threads 4 /tmp/destination.flv
 1661  rm /tmp/destination.flv 
 1662  time numactl --cpunodebind=0 --membind=2 ffmpeg -i test_1080p.mp4 -c:v libx264 -crf 30 -threads 4 /tmp/destination.flv
 1663  rm /tmp/destination.flv 
 1664  time numactl --cpunodebind=0 --membind=0 ffmpeg -i test_1080p.mp4 -c:v libx264 -crf 30 -threads 22 /tmp/destination.flv
 1665  rm /tmp/destination.flv 
 1666  time numactl --cpunodebind=0 --membind=2 ffmpeg -i test_1080p.mp4 -c:v libx264 -crf 30 -threads 22 /tmp/destination.flv
 1667  ls
 1668  rm /tmp/destination.flv 
 1669  time numactl --cpunodebind=0 --membind=0 ffmpeg -i test_1080p.mp4 -c:v libx264 -crf 30 -threads 4 /tmp/destination.flv
 1670  rm /tmp/destination.flv 
 1671  time numactl --cpunodebind=0 --membind=0 ffmpeg -i test_1080p.mp4 -c:v libx264 -crf 30 -threads 8 /tmp/destination.flv
 1672  ls -alh test_1080p.mp4
 1673  rm /tmp/destination.flv 
 1674  time numactl --cpunodebind=0 --membind=0 ffmpeg -i test_1080p.mp4 -c:v libx264 -crf 30 -threads 4 /tmp/destination.flv
 1675  rm /tmp/destination.flv 
 1676  time numactl --cpunodebind=0 --membind=0 ffmpeg -i test_1080p.mp4 -c:v libx264 -crf 30 -threads 2 /tmp/destination.flv
 1677  rm /tmp/destination.flv 
 1678  time numactl --cpunodebind=0 --membind=2 ffmpeg -i test_1080p.mp4 -c:v libx264 -crf 30 -threads 2 /tmp/destination.flv
 1679  ls
 1680  cd knative/
 1681  ls
 1682  cd knative-routing-tutorial/
 1683  ls
 1684  vim nginx1.yaml 
 1685  ls
 1686  vim nginx_test.yaml 
 1687  vim helloworld1.yaml 
 1688  vim test.yaml 
 1689  vim nginx1.yaml 
 1690  proxychains
 1691  apt install proxychains4
 1692  lscpu 
 1693  uname -a
 1694  vim /etc/proxychains4.conf 
 1695  proxychains ssh -p 12222 arron@139.227.46.229
 1696  ls
 1697  free -m
 1698  w
 1699  ps -ef
 1700  ps -ef|grep ssh
 1701  kill 195572
 1702  ps -ef|grep ssh
 1703  ls
 1704  w
 1705  dmidecode -t memory
 1706  exit
 1707  top
 1708  cd /root/wrk/
 1709  ls
 1710  make
 1711  ls
 1712  cp wrk  /usr/bin/
 1713  taskset -c 0-25 wrk --latency --timeout 2s -t 100 -c 10000 -d 60s http://10.239.158/82:30001
 1714  taskset -c 0-25 wrk --latency --timeout 2s -t 100 -c 10000 -d 60s http://10.239.158.82:30001
 1715  ps -aux | grep taskset
 1716  ps -aux | grep wrk
 1717  kill -9 1597329
 1718  taskset -c 0-25 wrk --latency --timeout 2s -t 100 -c 10000 -d 60s http://10.239.85.182:30001
 1719  cat /proc/cpuinfo 
 1720  taskset -c 0-25 wrk --latency --timeout 2s -t 50 -c 10000 -d 60s http://10.239.85.182:30001
 1721  lscpu 
 1722  taskset -c 0-21 wrk --latency --timeout 2s -t 88 -c 10000 -d 60s http://10.239.85.182:30001
 1723  taskset -c 0-21 wrk --latency --timeout 2s -t 44 -c 10000 -d 60s http://10.239.85.182:30001
 1724  ping 10.239.85.182
 1725  taskset -c 0-21 wrk --latency --timeout 2s -t 40 -c 8000 -d 60s http://10.239.85.182:30001
 1726  taskset -c 0-21 wrk --latency --timeout 2s -t 10 -c 1000 -d 60s http://10.239.85.182:30001
 1727  taskset -c 0-21 wrk --latency --timeout 2s -t 20 -c 2000 -d 60s http://10.239.85.182:30001
 1728  uname -r
 1729  taskset -c 0-50 wrk --latency --timeout 2s -t 20 -c 2000 -d 60s http://10.239.85.182:30001
 1730  ulimit -a
 1731  ulimit -n 65535
 1732  taskset -c 0-21 wrk --latency --timeout 2s -t 20 -c 2000 -d 60s http://10.239.85.182:30001
 1733  taskset -c 0-25 wrk --latency --timeout 2s -t 100 -c 10000 -d 30s http://10.239.85.182:30001
 1734  ping 10.239.58.182
 1735  ping 10.239.85.182
 1736  taskset -c 0-25 wrk --latency --timeout 2s -t 100 -c 10000 -d 30s http://10.239.85.182:30001
 1737  ifconfig `
 1738  ifconfig 
 1739  exit
 1740  ls
 1741  mkdir tdx
 1742  cd tdx/
 1743  git clone 
 1744  git clone https://gitlab.devtools.intel.com/tdx/linux.git
 1745  cd tdx/linux/
 1746  ls
 1747  vim KVM-TDX.README.md 
 1748  pwd
 1749  vim .config 
 1750  make bzImage -j44
 1751  sudo apt-get install build-essential libncurses-dev bison flex libssl-dev libelf-dev
 1752  cat /etc/lsb-release 
 1753  apt-get install build-essential libncurses-dev bison flex libssl-dev libelf-dev
 1754  make bzImage -j44
 1755  cp arch/x86/boot/bzImage .
 1756  find -name "*.ko"
 1757  scp arch/x86/boot/bzImage tdx@10.239.85.188:/home/tdx/host_kernel/
 1758  ls -alh bzImage 
 1759  ps -fe|grep -i net
 1760  cat /etc/networks 
 1761  cat /etc/network/interfaces 
 1762  cat /etc/network/if-up.d/ethtool 
 1763  ls
 1764  vim Makefile 
 1765  make install
 1766  vim /boot/grub/grub.cfg 
 1767  scp /boot/initrd.img-5.11.0-rc5+ /boot/System.map-5.11.0-rc5+ /boot/vmlinuz-5.11.0-rc5+ tdx@10.239.85.188:/home/tdx/host_kernel/
 1768  make uninstall
 1769  efibootmgr 
 1770  ls /boot/efi/
 1771  vim /boot/grub/grub.cfg 
 1772  update-initramfs -c -k 5.4.0-65-generic 
 1773  update-grub
 1774  vim /etc/default/grub
 1775  ls
 1776  ls /boot/
 1777  rm /boot/vmlinuz-5.11.0-rc5+ 
 1778  rm /boot/initrd.img-5.11.0-rc5+ 
 1779  rm /boot/System.map-5.11.0-rc5+ 
 1780  update-grub
 1781  vim /boot/grub/grub.cfg 
 1782  git tag
 1783  git checkout tdx-kvm-2021.02.01
 1784  ls
 1785  scp .config  tdx@10.239.85.188:/home/tdx/
 1786  cd tdx/linux/
 1787  ls
 1788  vim .config
 1789  make bzImage -j64
 1790  ls
 1791  scp -r  root@vt-master:/root/Music .
 1792  rmdir --help
 1793  sudo apt install cpuid
 1794  lscpu 
 1795  cpuid -1 --leaf=0x15
 1796  cpuid -1 --leaf=0x16~
 1797  cpuid -1 --leaf=0x15
 1798  exit
 1799  cpuid -1 --leaf=0x15
 1800  exit
 1801  cat /boot/grub/grub.cfg 
 1802  :q
 1803  kubectl get pods
 1804  kubectl get pods --all-namespaces
 1805  lsmod 
 1806  ps -ef|grep kubelet
 1807  systemctl status kubelet
 1808  vim /etc/containerd/config.toml
 1809  kubectl get pods
 1810  exit
 1811  vim /var/lib/kubelet/config.yaml
 1812  vim "/usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf"
 1813  vim /usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf
 1814  systemctl status kubelet
 1815  vim /etc/systemd/system/kubelet.service.d/10-kubeadm.conf 
 1816  vim /etc/systemd/system/kubelet.service.d/0-containerd.conf 
 1817  vim /etc/systemd/system/kubelet.service.d/10-kubeadm.conf 
 1818  vim ~/config
 1819  vim /usr/lib/systemd/system/kubelet.service 
 1820  vim /etc/systemd/system/kubelet.service.d/10-kubeadm.conf 
 1821  vim /usr/lib/systemd/system/kubelet.service 
 1822  vim /etc/systemd/system/kubelet.service.d/10-kubeadm.conf 
 1823  ls
 1824  ms mp4/
 1825  ls mp4/
 1826  cd mp4/
 1827  ls
 1828  ls -alh 
 1829  pwd
 1830  ifconfig
 1831  pwd
 1832  ls
 1833  du -h
 1834  ls -alh 
 1835  daxctl 
 1836  daxctl --list-cmds
 1837  daxctl reconfigure-device
 1838  daxctl reconfigure-device -m --help
 1839  ls
 1840  numactl --hardware
 1841  uname -a
 1842  ipmctl version
 1843  ndctl -v
 1844  mount
 1845  free -m
 1846  numactl -H
 1847  ls
 1848  mount
 1849  ls
 1850  cd /mnt/
 1851  ls
 1852  w
 1853  ls
 1854  mkdir pmem0
 1855  ls
 1856  numactl -h
 1857  numactl -H
 1858  ls
 1859  mount
 1860  umount  pmem0 
 1861  ls
 1862  mount -t tmpfs -o size=4g,mpol=bind:2 tmpfs /mnt/pmem0
 1863  ls
 1864  mount
 1865  numactl --hardware
 1866  dd if=/dev/zero of=/mnt/pmem0/A bs=1M count=1024
 1867  numactl -H
 1868  free -m
 1869  rm pmem0/A
 1870  free -m
 1871  numactl -H
 1872  ndctl list
 1873  ndctl list -a
 1874  ndctl list ls
 1875  ndctl list
 1876  ndctl destroy-namespace -f namespace0.0
 1877  ndctl list
 1878  free -m
 1879  n

[-- Attachment #3: Type: text/plain, Size: 167 bytes --]

_______________________________________________
Linux-nvdimm mailing list -- linux-nvdimm@lists.01.org
To unsubscribe send an email to linux-nvdimm-leave@lists.01.org

^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2021-03-11 13:33 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-03-11 13:33 Can you please help with the failure to create namespace via Ndctl issue? Xu, Chunye

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.