此教程繼續為上一個SDN開發實戰(1)-透明HTTP代理[Openflow+floodlight]做相關配置和實驗結果說明
3.2 Mininet配置和代理伺服器腳本
3.2.1 代理伺服器腳本
代理主機prox中需要運作一段程式來轉發接收到的package,是以編寫proxy.c檔案如下,prox.c經過編譯後運作在prox主機中,實作對收到的package在同樣的端口轉發出去的功能
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <pcap/pcap.h>
#include <net/ethernet.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <arpa/inet.h>
#include <sys/types.h>
#include <sys/socket.h>
#define FILTER "icmp or tcp"
pcap_t *handle;
void got_packet(u_char *args, const struct pcap_pkthdr*, const u_char *pkt);
int main(int argc, char *argv[])
{
char *dev ; /* The device to sniff on */
char errbuf[PCAP_ERRBUF_SIZE]; /* Error string */
struct bpf_program fp; /* The compiled filter */
bpf_u_int32 mask; /* Our netmask */
bpf_u_int32 net; /* Our IP */
if (argc != ) {
printf("usage: proxy <dev>\n");
return EXIT_FAILURE;
} else {
dev = argv[];
}
/* Find the properties for the device */
if (pcap_lookupnet(dev, &net, &mask, errbuf) == -) {
printf("warning: %s: could not get network: %s\n", dev, errbuf);
net = ;
mask = ;
}
/* Open the session in promiscuous mode */
handle = pcap_open_live(dev, BUFSIZ, , , errbuf);
if (handle == NULL) {
printf("error: %s: could not open: %s\n", dev, errbuf);
return EXIT_FAILURE;
}
if (pcap_compile(handle, &fp, FILTER, , mask) == -) {
printf("error: could not compile filter '%s': %s\n", FILTER, pcap_geterr(handle));
return EXIT_FAILURE;
}
if (pcap_setfilter(handle, &fp) == -) {
printf("error: could not set filter '%s': %s\n", FILTER, pcap_geterr(handle));
return EXIT_FAILURE;
}
/* 擷取package,傳回給got_packet處理 */
int r = pcap_loop(handle, -, got_packet, NULL);
printf("pcal_loop() quit with: %d\n", r);
pcap_close(handle);
return EXIT_SUCCESS;
}
void got_packet(
u_char *args,
const struct pcap_pkthdr *header,
const u_char *packet)
{
const struct ether_header *ethernet;
const struct ip *ip;
char src_ip_str[];
char dst_ip_str[];
ethernet = (struct ether_header*) packet;
if (ethernet->ether_type != ntohs(ETHERTYPE_IP)) {
printf("ignoring non-ip packet (0x%02X) of length %d\n",
ntohs(ethernet->ether_type), header->len);
fflush(stdout);
return;
}
ip = (struct ip*) (ethernet+);
strcpy(src_ip_str, inet_ntoa(ip->ip_src));
strcpy(dst_ip_str, inet_ntoa(ip->ip_dst));
if (ip->ip_p == IPPROTO_ICMP)
printf("%15s --> %15s [ICMP]\n", src_ip_str, dst_ip_str);
else if (ip->ip_p == IPPROTO_TCP)
printf("%15s --> %15s [TCP]\n", src_ip_str, dst_ip_str);
else
printf("%15s --> %15s [%d]\n", src_ip_str, dst_ip_str, ip->ip_p);
fflush(stdout);
/* 将收到的package發送回去 */
if (pcap_inject(handle, packet, header->len) == -) {
printf("error: unable to proxy packet: %s\n", pcap_geterr(handle));
fflush(stdout);
}
}
3.2.2 Mininet配置
啟動Mininet網絡和運作prox.c需要在終端中輸入大量代碼,是以我們直接把這些代碼寫入Python腳本來自動運作,下面的run.py能夠為我們做下面幾個工作:
- 嘗試用套接字連接配接Floodlight控制器 (localhost,port=6653), 等待直到連接配接成功
- 啟動Mininet并建立制定拓撲結構的虛拟網絡,包括Open vswitch 和主機
- 編譯proxy.c并在host主機中運作編譯出的proxy
#!/usr/bin/env python2.7
from __future__ import print_function
from argparse import ArgumentParser
from subprocess import Popen, STDOUT, PIPE
from socket import socket, AF_INET, SOCK_STREAM
from time import sleep
from sys import stdout
from threading import Thread;
from mininet.net import Mininet
from mininet.topo import Topo
from mininet.node import RemoteController
from mininet.node import OVSKernelSwitch
MAGIC_MAC = "00:11:00:11:00:11"
MAGIC_IP = "10.111.111.111"
# Mininet拓撲結構
class MyTopo(Topo):
def __init__(self):
"""Create custom topo."""
Topo.__init__(self)
switch1 = self.addSwitch('s1')
switch2 = self.addSwitch('s2')
switch3 = self.addSwitch('s3')
h1 = self.addHost('h1')
h2 = self.addHost('h2')
h3 = self.addHost('h3')
prox = self.addHost('prox')
link1 = self.addLink(h1, switch1)
link2 = self.addLink(h2, switch1)
link4 = self.addLink(h3, switch3)
link0 = self.addLink(prox, switch2)
link2 = self.addLink(switch1, switch2)
link3 = self.addLink(switch2, switch3)
# 運作prox腳本
class Prox(Thread):
def __init__(self, node, log=None):
Thread.__init__(self)
self.node = node
self.log = log
def run(self):
if self.log != None:
self.log = open(self.log, 'w')
self.proc = self.node.popen(
["./proxy", "prox-eth0"],
stdout=self.log, stderr=self.log
)
print("proxy is running")
self.proc.wait()
# 嘗試連接配接控制器
def wait_on_controller():
s = socket(AF_INET, SOCK_STREAM)
addr = ("localhost", )
try:
s.connect(addr)
s.close()
return
except:
pass
print("Waiting on controller", end=""); stdout.flush()
while True:
sleep()
try:
s.connect(addr)
s.close()
print("")
return
except:
print(".", end=""); stdout.flush()
continue
# 編譯proxy.c檔案
def build_prox(psrc):
gcc_proc = Popen(stdout=PIPE, stderr=STDOUT,
args=("gcc", psrc, "-o", "proxy", "-l", "pcap")
)
r = gcc_proc.wait()
if r != :
out, _ = gcc_proc.communicate()
print(out)
exit()
if __name__ == "__main__":
build_prox("proxy.c")
wait_on_controller()
mn = Mininet(
topo=MyTopo(),
autoSetMacs=True,
autoStaticArp=True,
controller=RemoteController('c0',port=),
switch=OVSKernelSwitch
)
mn.start()
sleep()
# 每個host向調試主機發送ping包,紀錄主機的Mac位址
for src in mn.hosts:
# setARP能繞過ARP協定
src.setARP(ip=MAGIC_IP, mac=MAGIC_MAC)
src.cmd("ping", "-c1", "-W1", MAGIC_IP)
px = Prox(mn.getNodeByName("prox"), "proxy.log")
px.start()
mn.interact()
4. 運作和實驗結果
4.1. 執行代碼
1.在eclipse中運作上個教程中修改的floodlight項目,floodlight控制器會在0.0.0.0:6653監聽Openflow的switch,關于如何運作floodlight控制器, 請點選這裡,如果運作正常,控制台會列印以下内容:
-- :: INFO [n.f.c.m.FloodlightModuleLoader] Loading modules from src/main/resources/floodlightdefault.properties
-- :: WARN [n.f.r.RestApiServer] HTTPS disabled; HTTPS will not be used to connect to the REST API.
-- :: WARN [n.f.r.RestApiServer] HTTP enabled; Allowing unsecure access to REST API on port 8080.
-- :: WARN [n.f.r.RestApiServer] CORS access control allow ALL origins: true
-- :: WARN [n.f.c.i.OFSwitchManager] SSL disabled. Using unsecure connections between Floodlight and switches.
-- :: INFO [n.f.c.i.OFSwitchManager] Clear switch flow tables on initial handshake as master: TRUE
-- :: INFO [n.f.c.i.OFSwitchManager] Clear switch flow tables on each transition to master: TRUE
-- :: INFO [n.f.c.i.OFSwitchManager] Setting as the default max tables to receive table-miss flow
-- :: INFO [n.f.c.i.OFSwitchManager] OpenFlow version OF_15 will be advertised to switches. Supported fallback versions [OF_10, OF_11, OF_12, OF_13, OF_14, OF_15]
-- :: INFO [n.f.c.i.OFSwitchManager] Listening for OpenFlow switches on []:
...
2.執行run.py腳本 ,通過以下操作來檢查網絡是否運作正常:
- nodes
- net
- h1 ping h3
- h2 ping h3
- h1 ping h2
- h1 ping prox
# 4.2. 實驗結果
從下面的輸出可以得出實驗結果與我們預期是相符合的,h1和h2是直接路由模式,而他們與h3的連接配接是代理模式,可以從proxy.log看出(由proxy産生),但是網絡延遲很長>1000ms,這種延遲應該是proxy轉發不及時或者其他問題;而且,對于h1和h2來說,它們完全不知道代理主機prox的存在,因為它們與prox之間為丢包模式,是以無法ping通prox代理主機
[email protected]:~/Downloads/TransHttpProxy$ sudo ./run.py
proxy is running
mininet> nodes
available nodes are:
c0 h1 h2 h3 prox s1 s2 s3
mininet> net
h1 h1-eth0:s1-eth1
h2 h2-eth0:s1-eth2
h3 h3-eth0:s3-eth1
prox prox-eth0:s2-eth1
s1 lo: s1-eth1:h1-eth0 s1-eth2:h2-eth0 s1-eth3:s2-eth2
s2 lo: s2-eth1:prox-eth0 s2-eth2:s1-eth3 s2-eth3:s3-eth2
s3 lo: s3-eth1:h3-eth0 s3-eth2:s2-eth3
c0
mininet> h1 ping h3
PING () () bytes of data.
bytes from : icmp_seq= ttl= time= ms
bytes from : icmp_seq= ttl= time= ms
bytes from : icmp_seq= ttl= time= ms
bytes from : icmp_seq= ttl= time= ms
^C
--- 10.0.0.3 ping statistics ---
packets transmitted, received, % packet loss, time ms
rtt min/avg/max/mdev = /// ms, pipe
mininet> h2 ping h3
PING () () bytes of data.
bytes from : icmp_seq= ttl= time= ms
bytes from : icmp_seq= ttl= time= ms
bytes from : icmp_seq= ttl= time= ms
bytes from : icmp_seq= ttl= time= ms
bytes from : icmp_seq= ttl= time= ms
^C
--- 10.0.0.3 ping statistics ---
packets transmitted, received, % packet loss, time ms
rtt min/avg/max/mdev = /// ms, pipe
mininet> h1 ping h2
PING () () bytes of data.
bytes from : icmp_seq= ttl= time= ms
bytes from : icmp_seq= ttl= time= ms
bytes from : icmp_seq= ttl= time= ms
bytes from : icmp_seq= ttl= time= ms
^C
--- 10.0.0.2 ping statistics ---
packets transmitted, received, % packet loss, time ms
rtt min/avg/max/mdev = /// ms
mininet> h1 ping prox
PING () () bytes of data.
^C
--- 10.0.0.4 ping statistics ---
packets transmitted, received, % packet loss, time ms
mininet> exit
在代理伺服器prox中運作的proxy代碼會産生一個proxy.log檔案能夠檢視他轉發的package,裡面記錄了轉發的目的Mac位址和源Mac位址以及package的類型:
10.0.0.1 --> 10.0.0.3 [ICMP]
10.0.0.3 --> 10.0.0.1 [ICMP]
10.0.0.1 --> 10.0.0.3 [ICMP]
10.0.0.3 --> 10.0.0.1 [ICMP]
10.0.0.1 --> 10.0.0.3 [ICMP]
10.0.0.3 --> 10.0.0.1 [ICMP]
10.0.0.1 --> 10.0.0.3 [ICMP]
10.0.0.3 --> 10.0.0.1 [ICMP]
10.0.0.1 --> 10.0.0.3 [ICMP]
10.0.0.3 --> 10.0.0.1 [ICMP]
...
最後我們可以檢視switch中的flows table,在Mininet還運作的過程中,通過
ovs-ofctl dump-flows
指令檢視 :
[email protected]-virtual-machine:~$ sudo ovs-ofctl dump-flows s1
NXST_FLOW reply (xid=):
cookie=, duration=s, table=, n_packets=, n_bytes=, idle_timeout=, idle_age=, priority=,ip,in_port=,dl_src=:::::,dl_dst=::::: actions=output:
cookie=, duration=s, table=, n_packets=, n_bytes=, idle_timeout=, idle_age=, priority=,ip,in_port=,dl_src=:::::,dl_dst=::::: actions=output:
cookie=, duration=s, table=, n_packets=, n_bytes=, idle_age=, priority= actions=CONTROLLER:
[email protected]-virtual-machine:~$ sudo ovs-ofctl dump-flows s2
NXST_FLOW reply (xid=):
cookie=, duration=s, table=, n_packets=, n_bytes=, idle_timeout=, idle_age=, priority=,ip,in_port=,dl_src=:::::,dl_dst=::::: actions=output:
cookie=, duration=s, table=, n_packets=, n_bytes=, idle_timeout=, idle_age=, priority=,ip,in_port=,dl_src=:::::,dl_dst=::::: actions=output:
cookie=, duration=s, table=, n_packets=, n_bytes=, idle_timeout=, idle_age=, priority=,ip,in_port=,dl_src=:::::,dl_dst=::::: actions=output:
cookie=, duration=s, table=, n_packets=, n_bytes=, idle_timeout=, idle_age=, priority=,ip,in_port=,dl_src=:::::,dl_dst=::::: actions=output:
cookie=, duration=s, table=, n_packets=, n_bytes=, idle_age=, priority= actions=CONTROLLER:
[email protected]-virtual-machine:~$ sudo ovs-ofctl dump-flows s3
NXST_FLOW reply (xid=):
cookie=, duration=s, table=, n_packets=, n_bytes=, idle_timeout=, idle_age=, priority=,ip,in_port=,dl_src=:::::,dl_dst=::::: actions=output:
cookie=, duration=s, table=, n_packets=, n_bytes=, idle_timeout=, idle_age=, priority=,ip,in_port=,dl_src=:::::,dl_dst=::::: actions=output:
cookie=, duration=s, table=, n_packets=, n_bytes=, idle_age=, priority= actions=CONTROLLER:
以交換機s1的第一條rule為例子解釋:
cookie=x0, duration=s, table=, n_packets=, n_bytes=, idle_timeout=, idle_age=, priority=,ip,in_port=,dl_src=:::::,dl_dst=::::: actions=output:
一個資料包如果能夠從s1的1端口(s1-eth1)進入,并有着源Mac位址00:00:00:00:00:01和目的Mac位址00:00:00:00:00:03,就會從s1的3端口轉發出去。注意,此包的priority=1,若有其他的比對的rule的優先級高于這個,就會執行優先級高的rule。
5. 總結
所有的代碼位于TransHttpProxDemo,總結一下幾個要注意的部分:
- v1.3的Floodlight在建立FlowMod的時候一定要設定priority>0,因為switch向Floodlight發送未比對的package的時候priority=0,若有其他priority=0的rule存在時,這些rule即使能夠比對package,也都不會被觸發,而是作為未比對的package傳遞給控制器,設定priority>0能夠避免switch執行預設的傳遞而順利執行其他的rule
- v1.3Floodlight控制器的運作在6653端口,而mininet預設的為6633端口,是以需要指定mininet與控制器連接配接的端口為6653
- run.py檔案執行的内容其實都是終端的指令行
- 為了可以在一開始就能知道所有主機的mac位址以及連接配接的switch,用了一個小trick:讓所有的主機ping調試主機Magic的IP,然後在控制器中儲存起來。這樣能夠幫助Floodlight尋找兩個主機之間的路徑
- 實驗結果沒有考慮連接配接的時延和帶寬,代理模式和直連模式互相能夠ping通就可以,應該有其他改善的地方