unsafe.Pointer runtime.newobject() SSA(Static Single Assignment) AST 内存分配

ovsdb-server ovs-vswitchd

ovs-appctl

docker run -d -p 4379:2379 k8s.gcr.io/etcd-amd64:3.1.12 /usr/local/bin/etcd –advertise-client-urls http://10.222.16.88:4379 –listen-client-urls http://0.0.0.0:2379

docker run -it –rm k8s.gcr.io/etcd-amd64:3.1.12 etcdctl –endpoints=http://10.222.16.88:4379 ls /

http://10.222.16.88:4379

docker.service

Environment=”HTTP_PROXY=http://10.9.1.147:1087/” Environment=”HTTPS_PROXY=https://10.9.1.147:1087/” Environment=”NO_PROXY=localhost,127.0.0.1,docker.io”

[addons] Applied essential addon: kube-dns [addons] Applied essential addon: kube-proxy

Your Kubernetes master has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster. Run “kubectl apply -f [podnetwork].yaml” with one of the options listed at: https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of machines by running the following on each node as root:

kubeadm join –token 67de04.1f6909a18044d07b 10.0.2.15:6443 –discovery-token-ca-cert-hash sha256:b293612565b6a79f4dc399d23b1166ab626e3a79e1f5ec98c12d77f5b5e438ea

etcdctl –endpoints=http://127.0.0.1:6666 ls /contiv.io/master

[root@x1 contiv-1.1.8]# ./install/k8s/install.sh -n 10.0.2.15 -w bridge Installing Contiv for Kubernetes secret “aci.key” created Generating local certs for Contiv Proxy Setting installation parameters Applying contiv installation To customize the installation press Ctrl+C and edit ./.contiv.yaml.

Extracting netctl from netplugin container d153993bd3110e6b3872e8a7e2bbfb5a5cc0b028cdc9b334c5bbf055f448fe30 clusterrolebinding “contiv-netplugin” created clusterrole “contiv-netplugin” created serviceaccount “contiv-netplugin” created clusterrolebinding “contiv-netmaster” created clusterrole “contiv-netmaster” created serviceaccount “contiv-netmaster” created configmap “contiv-config” created daemonset “contiv-netplugin” created replicaset “contiv-netmaster” created daemonset “contiv-etcd” created Installation is complete =========================================================

Contiv UI is available at https://10.0.2.15:10000 Please use the first run wizard or configure the setup as follows: Configure forwarding mode (optional, default is routing). netctl global set –fwd-mode routing Configure ACI mode (optional) netctl global set –fabric-mode aci –vlan-range - Create a default network netctl net create -t default --subnet= default-net For example, netctl net create -t default --subnet=20.1.1.0/24 -g 20.1.1.1 default-net

kubectl taint nodes –all node-role.kubernetes.io/master-

[root@x1 ~]# ovs-dpctl dump-flows

recirc_id(0),in_port(2),eth(src=6a:07:d4:ba:4a:ac,dst=00:00:11:11:11:11),eth_type(0x0800),ipv4(src=0.0.0.0/19.0.0.0,dst=20.1.1.7,frag=no), packets:440, bytes:32608, used:0.573s, flags:S, actions:set(eth(src=00:00:11:11:11:11,dst=02:02:14:01:01:07)),3 recirc_id(0),in_port(3),eth(src=02:02:14:01:01:07,dst=00:00:11:11:11:11),eth_type(0x0800),ipv4(src=0.0.0.0/1.0.0.0,dst=132.1.1.2,proto=0/0x1,frag=no), packets:863, bytes:63842, used:0.325s, flags:SR., actions:set(eth(src=00:00:11:11:11:11,dst=02:02:84:01:01:02)),2

recirc_id(0),in_port(3),eth(src=02:02:14:01:01:07,dst=00:00:11:11:11:11),eth_type(0x0800),ipv4(src=20.1.1.7,dst=10.96.0.1,proto=6,frag=no),tcp(dst=443), packets:461, bytes:37072, used:0.179s, flags:SFPR., actions:set(eth(src=02:02:14:01:01:07,dst=02:02:ac:13:ff:fe)),set(ipv4(src=172.19.0.18,dst=10.0.2.15)),set(tcp(dst=6443)),1 recirc_id(0),in_port(1),eth(src=02:02:ac:13:ff:fe,dst=00:00:11:11:11:11),eth_type(0x0800),ipv4(src=10.0.2.15,dst=172.19.0.18,proto=6,frag=no),tcp(src=6443), packets:448, bytes:200799, used:0.179s, flags:SP., actions:set(eth(src=00:00:11:11:11:11,dst=02:02:14:01:01:07)),set(ipv4(src=10.96.0.1,dst=20.1.1.7)),set(tcp(src=443)),3

[root@zlatan ~]# ovs-dpctl dump-flows recirc_id(0),in_port(1),eth(src=02:02:ac:13:ff:fe,dst=00:00:11:11:11:11),eth_type(0x0800),ipv4(src=192.168.0.4,dst=172.19.0.1,proto=6,frag=no),tcp(src=6443), packets:68833, bytes:32191699, used:0.011s, flags:FP., actions:set(eth(src=00:00:11:11:11:11,dst=02:02:14:01:01:02)),set(ipv4(src=10.96.0.1,dst=20.1.1.2)),set(tcp(src=443)),3 recirc_id(0),in_port(3),eth(src=02:02:14:01:01:02,dst=00:00:11:11:11:11),eth_type(0x0800),ipv4(src=20.1.1.2,dst=10.96.0.1,proto=6,frag=no),tcp(dst=443), packets:53225, bytes:3782845, used:0.052s, flags:P., actions:set(eth(src=02:02:14:01:01:02,dst=02:02:ac:13:ff:fe)),set(ipv4(src=172.19.0.1,dst=192.168.0.4)),set(tcp(dst=6443)),1

recirc_id(0),in_port(2),eth(src=02:02:84:01:01:02,dst=00:00:11:11:11:11),eth_type(0x0800),ipv4(src=4.0.0.0/21.0.0.0,dst=20.1.1.2,proto=0/0x1,frag=no), packets:133518, bytes:12120436, used:6.867s, flags:SFP., actions:set(eth(src=00:00:11:11:11:11,dst=02:02:14:01:01:02)),3 recirc_id(0),in_port(3),eth(src=02:02:14:01:01:02,dst=00:00:11:11:11:11),eth_type(0x0800),ipv4(src=0.0.0.0/1.0.0.0,dst=132.1.1.2,proto=0/0x1,frag=no), packets:131144, bytes:43404309, used:6.867s, flags:SFP., actions:set(eth(src=00:00:11:11:11:11,dst=02:02:84:01:01:02)),2

问题: Mar 20 07:19:05 localhost kubelet: E0320 07:19:05.644094 3669 summary.go:92] Failed to get system container stats for “/system.slice/kubelet.service”: failed to get cgroup stats for “/system.slice/kubelet.service”: failed to get container info for “/system.slice/kubelet.service”: unknown container “/system.slice/kubelet.service


/run/contiv/contiv-cni.sock


: shutting down, got signal: Terminated Mar 17 09:51:47 localhost dockerd-current: time=”2018-03-17T09:51:47.19397552-04:00” level=error msg=”containerd: deleting container” error=”exit status 1: "container f379d846a47e792e348e2086b4c5b07f9c1cbedf4d4401213454b07554e0360a does not exist\none or more of the container deletions failed\n"” Mar 17 09:51:47 localhost kubelet: E0317 09:51:47.216411 1545 cni.go:259] Error adding network: Contiv:Post http://localhost/ContivCNI.AddPod: dial unix /run/contiv/contiv-cni.sock: connect: no such file or directory Mar 17 09:51:47 localhost kubelet: E0317 09:51:47.216483 1545 cni.go:227] Error while adding to cni network: Contiv:Post http://localhost/ContivCNI.AddPod: dial unix /run/contiv/contiv-cni.sock: connect: no such file or directory


ovs —————-

yum -y install wget openssl-devel gcc make python-devel openssl-devel kernel-devel graphviz kernel-debug-devel autoconf automake rpm-build redhat-rpm-config libtool python-twisted-core python-zope-interface PyQt4 desktop-file-utils libcap-ng-devel groff checkpolicy selinux-policy-devel

adduser ovs su - ovs ls cp openvswitch-2.5.4.tar.gz /home/ovs/rpmbuild/SOURCES/ su - ovs cd /home/ovs/rpmbuild/SOURCES/ ll chown ovs openvswitch-2.5.4.tar.gz su - ovs yum localinstall /home/ovs/rpmbuild/RPMS/x86_64/openvswitch-2.5.4-1.el7.centos.x86_64.rpm -y

//add br0, port0 and port1 89 ovs-vsctl add-br br0 91 ovs-vsctl add-port br0 port0 – set Interface port0 type=internal 92 ovs-vsctl add-port br0 port1 – set Interface port1 type=internal 93 ovs-vsctl list-ports br0

95 ethtool -i port0 96 ethtool -i port1

94 ifconfig

//move port0 to ns0 and set ns0 ip netns add ns0 ip link set port0 netns ns0 ip netns exec ns0 ip addr add 10.0.0.1/24 dev port0 ip netns exec ns0 ifconfig port0 up ip netns exec ns0 ifconfig lo up

ip addr add 10.0.0.2/24 dev port1 ifconfig port1 up

//查看br0端口状态 ovs-vsctl show

//查看br0 openflow状态 ovs-ofctl show br0

//查看br0上端口的of id ovs-vsctl get Interface port0 ofport

//查看br0上的datapath ovs-dpctl show

//从ns0上去ping 10.0.0.2,此时是可通的 ip netns exec ns0 ping 10.0.0.2

//在br0上新增一条flow,让从of id为1(即port0)上收到的报文,源地址都改为1.2.3.4 ovs-ofctl add-flow br0 “priority=1 idle_timeout=0,in_port=1,actions=mod_nw_src:1.2.3.4,normal”

//看一下新配置的flow,注意第一条flow的n_packets为0,因为没有任何发包 ovs-ofctl dump-flows br0 NXST_FLOW reply (xid=0x4): cookie=0x0, duration=63.685s, table=0, n_packets=0, n_bytes=0, idle_age=63, priority=1,in_port=1 actions=mod_nw_src:1.2.3.4,NORMAL cookie=0x0, duration=1082.342s, table=0, n_packets=24, n_bytes=1856, idle_age=515, priority=0 actions=NORMAL

//trace下flow看看 ovs-appctl ofproto/trace br0 in_port=1 Bridge: br0 Flow: in_port=1,vlan_tci=0x0000,dl_src=00:00:00:00:00:00,dl_dst=00:00:00:00:00:00,dl_type=0x0000

Rule: table=0 cookie=0 priority=0 OpenFlow actions=NORMAL no learned MAC for destination, flooding

Final flow: unchanged Megaflow: recirc_id=0,in_port=1,vlan_tci=0x0000,dl_src=00:00:00:00:00:00,dl_dst=00:00:00:00:00:00,dl_type=0x0000 Datapath actions: push_vlan(vid=101,pcp=0),1,pop_vlan,3

没有任何修改。

//实验一:通过of修改报文的源地址

//从ns0上去ping 10.0.0.2,发现不通 ip netns exec ns0 ping 10.0.0.2

//从ns1上对port1抓包,可以看到ns0上出来的报文,源地址已经被改为1.2.3.4了 ip netns exec ns1 tcpdump -i port1 -n tcpdump: verbose output suppressed, use -v or -vv for full protocol decode listening on port1, link-type EN10MB (Ethernet), capture size 262144 bytes 17:22:00.933659 IP 1.2.3.4 > 10.0.0.2: ICMP echo request, id 15347, seq 22, length 64

//再来看看flow的计数,可以看到随着ping的进行,有增加 ovs-ofctl dump-flows br0 NXST_FLOW reply (xid=0x4): cookie=0x0, duration=258.820s, table=0, n_packets=145, n_bytes=13986, idle_age=0, priority=1,in_port=1 actions=mod_nw_src:1.2.3.4,NORMAL cookie=0x0, duration=1277.477s, table=0, n_packets=28, n_bytes=2024, idle_age=6, priority=0 actions=NORMAL

//删掉下发的flow,ping恢复如初 ovs-ofctl del-flows br0 in_port=1

//实验二:通过of修改报文的vlan id

//默认没有设置过vlan tag, 把port1的vlan设置为101,ping会不通, port1上抓包看不到 ovs-vsctl set Port port1 tag=101

//设置从port0来的报文,如果没有带vlan tag,则修改vlan id为101 ovs-ofctl add-flow br0 “priority=3,in_port=1,dl_vlan=0xffff,\ actions=mod_vlan_vid:101,normal”

//看看现在的规则 ovs-ofctl dump-flows br0 NXST_FLOW reply (xid=0x4): cookie=0x0, duration=2126.158s, table=0, n_packets=1396, n_bytes=58632, idle_age=1, priority=3,in_port=1,vlan_tci=0x0000 actions=mod_vlan_vid:101,NORMAL cookie=0x0, duration=4062.532s, table=0, n_packets=1020, n_bytes=47776, idle_age=1097, priority=0 actions=NORMAL

//之后可以在ns1里看到从ns0发过来的arp请求,但ns0没有学习这个arp,查看arp状态一直是incomplete ip netns exec ns0 arp -n Address HWtype HWaddress Flags Mask Iface 10.0.0.2 (incomplete) port0

为什么呢?抓个包看看,ns1回的报文是带vlan id的。明白了吗?

//加一条规则,把从port1出来的报文,去掉vlan ovs-ofctl add-flow br0 “priority=4,in_port=2,dl_vlan=101,actions=strip_vlan,normal”

ovs-ofctl add-flow br0 “priority=4,in_port=2,actions=strip_vlan,normal”

cookie=0x0, duration=1095.558s, table=0, n_packets=615, n_bytes=25830, idle_age=19, priority=4,in_port=2 actions=strip_vlan,NORMAL

100 ifconfig 101 ethtool -i port0 102 ethtool -i port1 103 ip addr 105 ifconfig port1 promisc up 106 ip addr 107 ovs-vsctl show 108 ovs-ofctl show ovs-switch 109 ovs-ofctl show br0 110 ovs-vsctl get Interface port0 ofport


limit

apiVersion: v1 kind: LimitRange metadata: name: mem-limit-range spec: limits:

  • default: memory: 512Mi cpu: 1 defaultRequest: memory: 256Mi cpu: 0.5 max: memory: 1024Mi cpu: 1 min: memory: 128Mi cpu: 0.5 type: Container

min <= defaultRequest <= default <= max

POST curl -X POST -H “Content-Type: application/json” -H “Accept: application/json” -H “Authorization: Bearer $token” -k https://apigw.example.com:8143/clusters/system/api/v1/namespaces/resource-quota/limitranges –data ‘{“apiVersion”:”v1”,”kind”:”LimitRange”,”metadata”:{“name”:”limit-range”},”spec”:{“limits”:[{“default”:{“memory”:”512Mi”,”cpu”:1},”defaultRequest”:{“memory”:”256Mi”,”cpu”:0.5},”max”:{“memory”:”1024Mi”,”cpu”:1},”min”:{“memory”:”128Mi”,”cpu”:0.5},”type”:”Container”}]}}’

LIST curl -H “Authorization: Bearer $token” -k https://apigw.example.com:8143/clusters/system/api/v1/namespaces/resource-quota/limitranges

READ curl -H “Authorization: Bearer $token” -k https://apigw.example.com:8143/clusters/system/api/v1/namespaces/resource-quota/limitranges/limit-range

DELETE curl -X DELETE -H “Authorization: Bearer $token” -k https://apigw.example.com:8143/clusters/system/api/v1/namespaces/resource-quota/limitranges/mem-limit-range


quota

apiVersion: v1 kind: ResourceQuota metadata: name: compute-resources spec: hard: pods: “4” requests.cpu: “1” requests.memory: 1Gi limits.cpu: “2” limits.memory: 2Gi persistentvolumeclaims: 2 requests.storage: 60Gi

目前支持设置的quota: namespace下的CPU。web的值,对应后端 requests.cpu=limits.cpu namespace下的memory。web的值,对应后端 requests.memory=limits.memory namespace下的PVC总数 namespace下的storage总量,单位Gi namespace下的pod总数

POST curl -X POST -H “Content-Type: application/json” -H “Accept: application/json” -H “Authorization: Bearer $token” -k https://apigw.example.com:8143/clusters/system/api/v1/namespaces/myspace/resourcequotas –data ‘{“apiVersion”:”v1”,”kind”:”ResourceQuota”,”metadata”:{“name”:”compute-resources”},”spec”:{“hard”:{“pods”:”4”,”requests.cpu”:”1”,”requests.memory”:”1Gi”,”limits.cpu”:”2”,”limits.memory”:”2Gi”,”persistentvolumeclaims”:2,”requests.storage”:”60Gi”}}}’

LIST curl -H “Authorization: Bearer $token” -k https://apigw.example.com:8143/clusters/system/api/v1/namespaces/myspace/resourcequotas

READ curl -H “Authorization: Bearer $token” -k https://apigw.example.com:8143/clusters/system/api/v1/namespaces/myspace/resourcequotas/compute-resources


Deploying multiple ingress controller and not specifying the annotation will result in both controllers fighting to satisfy the Ingress.

curl -H “Authorization: Bearer $token” -k https://apigw.example.com:8143/clusters/system/apis/k8s.io/v1alpha1/clusterregistry/my-cluster

获取某namespace下的所有ingress curl -H “Authorization: Bearer $token” -k https://apigw.example.com:8143/clusters/system/apis/extensions/v1beta1/namespaces/default/ingresses

获取所有ingress curl -H “Authorization: Bearer $token” -k https://apigw.example.com:8143/clusters/system/apis/extensions/v1beta1/ingresses

获取某一ingress信息 curl -H “Authorization: Bearer $token” -k https://apigw.example.com:8143/clusters/system/apis/extensions/v1beta1/namespaces/argo/ingresses/test

删除某一ingress信息 curl -X DELETE -H “Authorization: Bearer $token” -k https://apigw.example.com:8143/clusters/system/apis/extensions/v1beta1/namespaces/argo/ingresses/test

创建一个ingress curl -X POST -H “Content-Type: application/json” -H “Accept: application/json” -H “Authorization: Bearer $token” -k https://apigw.example.com:8143/clusters/system/apis/extensions/v1beta1/namespaces/argo/ingresses –data ‘{“apiVersion”:”extensions/v1beta1”,”kind”:”Ingress”,”metadata”:{“name”:”test”,”annotations”:{“ingress.kubernetes.io/rewrite-target”:”/”}},”spec”:{“rules”:[{“host”:”foo.bar.com”,”http”:{“paths”:[{“backend”:{“serviceName”:”http-svc”,”servicePort”:80}}]}},{“host”:”bar.foo.com”,”http”:{“paths”:[{“backend”:{“serviceName”:”http-svc2”,”servicePort”:80}}]}}]}}’

curl -X POST -H “Content-Type: application/json” -H “Accept: application/yaml” -H “Authorization: Bearer $token” -k https://apigw.example.com:8143/clusters/system/apis/extensions/v1beta1/namespaces/argo/ingresses –data ‘’

{ “apiVersion”: “extensions/v1beta1”, “kind”: “Ingress”, “metadata”: { “name”: “test”, “annotations”: { “ingress.kubernetes.io/rewrite-target”: “/” } }, “spec”: { “rules”: [ { “host”: “foo.bar.com”, “http”: { “paths”: [ { “backend”: { “serviceName”: “http-svc”, “servicePort”: 80 } } ] } }, { “host”: “bar.foo.com”, “http”: { “paths”: [ { “backend”: { “serviceName”: “http-svc2”, “servicePort”: 80 } } ] } } ] } }

kind metadata spec status


apiVersion: extensions/v1beta1 kind: Ingress metadata: name: mini-ingress annotations: nginx.ingress.kubernetes.io/rewrite-target: / kubernetes.io/ingress.class: “nginx” nginx.ingress.kubernetes.io/ssl-redirect: “false” spec: rules:

  • http: paths:
    • path: /mini backend: serviceName: nginx servicePort: 80

创建service curl -X POST -H “Content-Type: application/json” -H “Accept: application/json” -H “Authorization: Bearer $token” -k https://apigw.example.com:8143/clusters/system/api/v1/namespaces/argo/services –data ‘{“apiVersion”:”v1”,”kind”:”Service”,”metadata”:{“name”:”myapp-svc”,”namespace”: “argo”,”labels”:{“svc”:”myapp-svc”}},”spec”:{“ports”:[{“port”:8080,”targetPort”:8080,”protocol”:”TCP”,”name”:”myapp-svc-port-8080”},{“port”:8081,”targetPort”:8081,”protocol”:”TCP”,”name”:”myapp-svc-port-8081”}],”selector”:{“app”:”myapp”}}}’

{ “apiVersion”:”v1”, “kind”:”Service”, “metadata”:{
“name”:”myapp-svc”, “namespace”: “argo”, “labels”:{
“svc”:”myapp-svc” } }, “spec”:{
“ports”:[
{
“port”:8080, “targetPort”:8080, “protocol”: “TCP”, “name”: “myapp-svc-port-8080” }, {
“port”:8081, “targetPort”:8081, “protocol”: “TCP”, “name”: “myapp-svc-port-8081” } ], “selector”: { “app”: “myapp” } } }

service name: ${app name}-svc port name: ${app name}-svc-port-${port number} label: “svc”: “${app name}-svc” selector: “app”: “${app name}”