Skip to content

Define way to expose VM workload to users #614

@yb01

Description

@yb01

What happened:

in the case below, this is the veth for the VM POD

veth-a1bfdb1a: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 9000

root@ip-172-31-39-83:~/go/src/k8s.io/arktos# kubectl get pods vmdefault -o wide
NAME        HASHKEY               READY   STATUS    RESTARTS   AGE   IP          NODE              NOMINATED NODE   READINESS GATES
vmdefault   3885105893249453356   1/1     Running   0          24s   21.0.21.5   ip-172-31-39-83   <none>           <none>
root@ip-172-31-39-83:~/go/src/k8s.io/arktos# ssh cirros@21.0.21.5
ssh: connect to host 21.0.21.5 port 22: Connection timed out
root@ip-172-31-39-83:~/go/src/k8s.io/arktos# ifconfig
docker0: flags=4099<UP,BROADCAST,MULTICAST>  mtu 1500
        inet 172.17.0.1  netmask 255.255.0.0  broadcast 172.17.255.255
        ether 02:42:12:ec:ca:59  txqueuelen 0  (Ethernet)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 9000
        inet 172.31.39.83  netmask 255.255.240.0  broadcast 172.31.47.255
        inet6 fe80::4a5:99ff:fe3e:79d  prefixlen 64  scopeid 0x20<link>
        ether 06:a5:99:3e:07:9d  txqueuelen 1000  (Ethernet)
        RX packets 48858  bytes 68788955 (68.7 MB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 8245  bytes 769594 (769.5 KB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

eth-hostep: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 172.31.39.83  netmask 255.255.255.255  broadcast 0.0.0.0
        inet6 fe80::f055:52ff:fedd:e4ae  prefixlen 64  scopeid 0x20<link>
        ether f2:55:52:dd:e4:ae  txqueuelen 1000  (Ethernet)
        RX packets 12  bytes 936 (936.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 12  bytes 936 (936.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
        inet 127.0.0.1  netmask 255.0.0.0
        inet6 ::1  prefixlen 128  scopeid 0x10<host>
        loop  txqueuelen 1000  (Local Loopback)
        RX packets 177212  bytes 41143162 (41.1 MB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 177212  bytes 41143162 (41.1 MB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

veth-5a235ada: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 9000
        inet6 fe80::dc98:fff:fe3d:7558  prefixlen 64  scopeid 0x20<link>
        ether de:98:0f:3d:75:58  txqueuelen 1000  (Ethernet)
        RX packets 103  bytes 7504 (7.5 KB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 17  bytes 1146 (1.1 KB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

veth-a1bfdb1a: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 9000
        inet6 fe80::c8b9:bdff:fe49:351  prefixlen 64  scopeid 0x20<link>
        ether ca:b9:bd:49:03:51  txqueuelen 1000  (Ethernet)
        RX packets 58  bytes 3908 (3.9 KB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 33  bytes 1838 (1.8 KB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

veth-e71105cc: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 9000
        inet6 fe80::5047:caff:fe5e:54ef  prefixlen 64  scopeid 0x20<link>
        ether 52:47:ca:5e:54:ef  txqueuelen 1000  (Ethernet)
        RX packets 404  bytes 28220 (28.2 KB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 19  bytes 1230 (1.2 KB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

veth-hostep: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 9000
        inet6 fe80::5048:94ff:fee9:e7b4  prefixlen 64  scopeid 0x20<link>
        ether 52:48:94:e9:e7:b4  txqueuelen 1000  (Ethernet)
        RX packets 12  bytes 936 (936.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 12  bytes 936 (936.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

root@ip-172-31-39-83:~/go/src/k8s.io/arktos# 

What you expected to happen:

like with the bridge cni, the IP should be accessiable

root@ip-172-31-39-83:~/go/src/k8s.io/arktos# kubectl get pods vmdefault -o wide
NAME        HASHKEY               READY   STATUS    RESTARTS   AGE   IP           NODE              NOMINATED NODE   READINESS GATES
vmdefault   5566664455589313951   1/1     Running   0          16s   10.88.0.11   ip-172-31-39-83   <none>           <none>
root@ip-172-31-39-83:~/go/src/k8s.io/arktos# ssh cirros@10.88.0.11
The authenticity of host '10.88.0.11 (10.88.0.11)' can't be established.
ECDSA key fingerprint is SHA256:xlFkonzYp308uzMA+oEirugxa8FGPirTgPQuIM63vq4.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added '10.88.0.11' (ECDSA) to the list of known hosts.
cirros@10.88.0.11's password: 
$ 

How to reproduce it (as minimally and precisely as possible):

Anything else we need to know?:
Here the qemu log:

root@ip-172-31-39-83:/var/log/libvirt/qemu# cat arktosRT--71afacb0-d3f9-vm.log 
2022-02-04 05:58:38.035+0000: starting up libvirt version: 6.5.0, qemu version: 4.0.0, kernel: 5.6.0-rc2, hostname: ip-172-31-39-83
LC_ALL=C \
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin \
HOME=/var/lib/libvirt/qemu/domain-1-arktosRT--71afacb0-d \
XDG_DATA_HOME=/var/lib/libvirt/qemu/domain-1-arktosRT--71afacb0-d/.local/share \
XDG_CACHE_HOME=/var/lib/libvirt/qemu/domain-1-arktosRT--71afacb0-d/.cache \
XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain-1-arktosRT--71afacb0-d/.config \
QEMU_AUDIO_DRV=none \
VIRTLET_EMULATOR=/usr/local/bin/qemu-system-x86_64 \
VIRTLET_NET_KEY=5249bc16-6c53-4f24-81fe-a41d4796c37c \
VIRTLET_CONTAINER_ID=71afacb0-d3f9-5171-5218-c0532a1160f7 \
VIRTLET_CONTAINER_LOG_PATH=/var/log/pods/system_default_vmdefault_5249bc16-6c53-4f24-81fe-a41d4796c37c/vm/0.log \
CGROUP_PARENT=/kubepods/pod5249bc16-6c53-4f24-81fe-a41d4796c37c/71afacb0-d3f9-5171-5218-c0532a1160f7 \
/vmwrapper \
-name guest=arktosRT--71afacb0-d3f9-vm,debug-threads=on \
-S \
-object secret,id=masterKey0,format=raw,file=/var/lib/libvirt/qemu/domain-1-arktosRT--71afacb0-d/master-key.aes \
-machine pc-i440fx-4.0,accel=tcg,usb=off,dump-guest-core=off \
-cpu EPYC,acpi=on,ss=on,hypervisor=on,erms=on,mpx=on,pcommit=on,clwb=on,pku=on,la57=on,3dnowext=on,3dnow=on,npt=on,vme=off,fma=off,avx=off,f16c=off,rdrand=off,avx2=off,rdseed=off,sha-ni=off,xsavec=off,fxsr_opt=off,misalignsse=off,3dnowprefetch=off,osvw=off,topoext=off,nrip-save=off \
-m size=1048576k,slots=16,maxmem=2097152k \
-overcommit mem-lock=off \
-smp 1,maxcpus=2,sockets=2,cores=1,threads=1 \
-numa node,nodeid=0,cpus=0-1,mem=1024 \
-uuid 71afacb0-d3f9-5171-5218-c0532a1160f7 \
-no-user-config \
-nodefaults \
-chardev socket,id=charmonitor,fd=22,server,nowait \
-mon chardev=charmonitor,id=monitor,mode=control \
-rtc base=utc \
-no-shutdown \
-boot strict=on \
-device piix3-usb-uhci,id=usb,bus=pci.0,addr=0x1.0x2 \
-device virtio-scsi-pci,id=scsi0,bus=pci.0,addr=0x3 \
-drive file=/var/lib/virtlet/volumes/virtlet_root_71afacb0-d3f9-5171-5218-c0532a1160f7,format=qcow2,if=none,id=drive-scsi0-0-0-0 \
-device scsi-hd,bus=scsi0.0,channel=0,scsi-id=0,lun=0,device_id=drive-scsi0-0-0-0,drive=drive-scsi0-0-0-0,id=scsi0-0-0-0,bootindex=1 \
-drive file=/var/lib/virtlet/config/config-71afacb0-d3f9-5171-5218-c0532a1160f7.iso,format=raw,if=none,id=drive-scsi0-0-0-1,readonly=on \
-device scsi-cd,bus=scsi0.0,channel=0,scsi-id=0,lun=1,device_id=drive-scsi0-0-0-1,drive=drive-scsi0-0-0-1,id=scsi0-0-0-1 \
-chardev socket,id=charserial0,path=/var/lib/libvirt/streamer.sock,reconnect=1 \
-device isa-serial,chardev=charserial0,id=serial0 \
-device usb-tablet,id=input0,bus=usb.0,port=1 \
-vnc 127.0.0.1:0 \
-device cirrus-vga,id=video0,bus=pci.0,addr=0x2 \
-device virtio-balloon-pci,id=balloon0,bus=pci.0,addr=0x4 \
-sandbox on,obsolete=deny,elevateprivileges=deny,spawn=deny,resourcecontrol=deny \
-msg timestamp=on
2022-02-04 05:58:38.035+0000: Domain id=1 is tainted: custom-argv
I0204 05:58:38.052155   10120 vmwrapper.go:67] Obtaining PID of the VM container process...
W0204 05:58:38.052973   10120 vmwrapper.go:93] POD cgroupParent /kubepods/pod5249bc16-6c53-4f24-81fe-a41d4796c37c/71afacb0-d3f9-5171-5218-c0532a1160f7 for controller  does not exist
W0204 05:58:38.053001   10120 vmwrapper.go:100] Failed to move pid into cgroup "" path /: open /sys/fs/cgroup/cgroup.procs: read-only file system
W0204 05:58:38.053418   10120 vmwrapper.go:93] POD cgroupParent /kubepods/pod5249bc16-6c53-4f24-81fe-a41d4796c37c/71afacb0-d3f9-5171-5218-c0532a1160f7 for controller rdma does not exist
nsfix reexec: pid 10120: entering the namespaces of target pid 27334
nsfix reexec: dropping privs
root@ip-172-31-39-83:/var/log/libvirt/qemu# 

Environment:

  • Mizar version:

  • Cloud provider or hardware configuration:

  • OS (e.g: cat /etc/os-release):
    Ubuntu 18.04 with kernel update

  • Kernel (e.g. uname -a):

  • Install tools:

  • Network plugin and version (if this is a network-related bug):

  • Others:

Metadata

Metadata

Assignees

Labels

No labels
No labels

Type

No type

Projects

No projects

Milestone

No milestone

Relationships

None yet

Development

No branches or pull requests

Issue actions