diff --git a/allpairsping.r2py b/allpairsping.r2py new file mode 100644 index 0000000..4259d42 --- /dev/null +++ b/allpairsping.r2py @@ -0,0 +1,197 @@ +""" + + Urvashi Soni + + + gives the connectivity graph and latency information of all nodes in a group. + +""" + +# send a probe message to each neighbor +def probe_neighbors_forever(): + # Call me again in 10 seconds + while True: + for neighborip in mycontext["neighborlist"]: + mycontext['sendtime'][neighborip] = getruntime() + if neighborip == mycontext['myip']: + #skip if ip address in neighboriplist matches the local ip because in repy we cann't have same localip and dest ip + continue + sendmessage(neighborip, mycontext['port'], 'ping',mycontext['myip'],mycontext['port']) + sendmessage(neighborip,mycontext['port'],'share'+encode_row(mycontext['myip'],mycontext["neighborlist"],mycontext['latency'].copy()),mycontext['myip'],mycontext['port']) + sleep(0.5) + #sleep for 10 sec and start from while loop again + sleep(10) + + + + +# Handle an incoming message +def got_message(srcip,srcport,mess): + if mess == 'ping': + sendmessage(srcip,srcport,'pong',mycontext['myip'], pingport) + elif mess == 'pong': + # elapsed time is now - time when I sent the ping + mycontext['latency'][srcip] = getruntime() - mycontext['sendtime'][srcip] + elif mess.startswith('share'): + mycontext['row'][srcip] = mess[len('share'):] + + + +def encode_row(rowip, neighborlist, latencylist): + retstring = ""+rowip+"" + for neighborip in neighborlist: + if neighborip in latencylist: + retstring = retstring + ""+str(latencylist[neighborip])[:4]+"s" + else: + retstring = retstring + "Unknown" + + retstring = retstring + "" + return retstring + + + +# Generates a HTML page that represents the current status of the program +def generate_status_page(): + webpage = "Latency Information

Latency information from "+mycontext['myip']+'

' + webpage = webpage + "" + for nodeip in mycontext['neighborlist']: + if nodeip in mycontext['row']: + webpage = webpage + mycontext['row'][nodeip]+'\n' + else: + webpage = webpage + '\n' + + # now the footer... + webpage = webpage + '
"+ "".join(mycontext['neighborlist'])+"
'+nodeip+'No Data Reported
' + return webpage + + + +# Displays a web page with the latency information +def handle_http_request(srcip,srcport,connobj): + log('received request from',srcip,'\n') + + # Get the header + total_data = '' + # The HTTP header ends once we see the char combination '\n\n', which + # is an empty string. + while '\n\n' not in total_data: + # Receive in chunks to avoid reading too much data + try: + data = connobj.recv(4096) + except SocketWouldBlockError: + # retry if they haven't completed sending the header + sleep(.05) + continue + except SocketClosedRemote: + log('client from',srcip,'aborted before sending HTTP header...\n') + return + total_data += data + total_data = total_data.replace('\r\n', '\n') + + header, overflow = total_data.split('\n\n', 1) + + + # Get the request path, which is inbetween the HTTP action keyword and the + # HTTP version number. + # The http action keyword is the first word with no whitespace. + everything_after_httpaction = header.split(None, 1)[1] + # Following the path is the HTTP/[VERSION_NUMBER]. + # We can use that as a delimiter to extract the path. + request_path = everything_after_httpaction.split(" HTTP/")[0] + + # Generate the data to send back + # Don't respond with anything if they have something in the request path. + # This include favicons. We don't want to generate the webpage needlessly. + if request_path != '/': + data = 'HTTP/1.1 404 Not Found\n\n' + else: + webpage = generate_status_page() + # combine everything into one unit for sending + data = 'HTTP/1.1 200 OK\nContent-Type: text/html\nContent-Length: '+str(len(webpage))+'\nServer: Seattle Testbed\n\n'+webpage + + # send the response + + sent = 0 + while sent < len(data): + try: + sent += connobj.send(data[sent:]) + except SocketWouldBlockError: + # retry if response hasn't been sent yet + sleep(.05) + continue + except SocketClosedRemote: + log('client from',srcip,'aborted before response could be sent...\n') + return + # and we're done, so let's close this connection... + connobj.close() + +def handle_message_forever(): + while True: + try: + srcip, srcport, mess = udpserversocket.getmessage() + except SocketWouldBlockError: + sleep(0.1) + continue + got_message(srcip, srcport, mess) + + + +def handle_connection_forever(): + while True: + try: + ret_ip, ret_port, ret_socket = connobj.getconnection() + except SocketWouldBlockError: + sleep(0.1) + continue + handle_http_request(ret_ip, ret_port, ret_socket) + + + +if callfunc == 'initialize': + #check if user has provided port number as call argument + if len(callargs) != 1: + raise Exception("Must specify the port to use") + + pingport = int(callargs[0]) + mycontext['port'] = pingport + + # this holds the response information (i.e. when nodes responded) + mycontext['latency'] = {} + mycontext['myip'] = getmyip() + + # this remembers when we sent a probe + mycontext['sendtime'] = {} + + # this remembers row data from the other nodes + mycontext['row'] = {} + # get the nodes to probe + mycontext['neighborlist'] = [] + + try: + fileobject = openfile('neighboriplist.txt',False) + except FileNotFoundError, e: + #raise error if file doesn't exists + raise FileNotFoundError("neighboriplist.txt file doesn't exist. Please provide the required file in same directory.") + + filecontent = fileobject.readat(None,0) + neighbor_array = filecontent.splitlines() + for line in neighbor_array: + if line == '': + #skip if file contains any blank line + continue + mycontext['neighborlist'].append(line.strip()) + + + + #listen for a new message and call handle_message in new thread + udpserversocket = listenformessage(mycontext['myip'], mycontext['port']) + createthread(handle_message_forever) + + createthread(probe_neighbors_forever) + + #listen for connection and call handle_http_request once a connection is got + connobj = listenforconnection(mycontext['myip'],mycontext['port']) + createthread(handle_connection_forever) + + + diff --git a/apps/allpairsping/allpairsping.r2py b/apps/allpairsping/allpairsping.r2py new file mode 100644 index 0000000..22f85ff --- /dev/null +++ b/apps/allpairsping/allpairsping.r2py @@ -0,0 +1,194 @@ +""" + + Urvashi Soni + + + gives the connectivity graph and latency information of all nodes in a group. + +""" + +# send a probe message to each neighbor +def probe_neighbors_forever(): + # Call me again in 10 seconds + while True: + for neighborip in mycontext["neighborlist"]: + mycontext['sendtime'][neighborip] = getruntime() + if neighborip == mycontext['myip']: + #skip if ip address in neighboriplist matches the local ip because in repy we cann't have same localip and dest ip + continue + sendmessage(neighborip, mycontext['port'], 'ping',mycontext['myip'],mycontext['port']) + sendmessage(neighborip,mycontext['port'],'share'+encode_row(mycontext['myip'],mycontext["neighborlist"],mycontext['latency'].copy()),mycontext['myip'],mycontext['port']) + sleep(0.5) + #sleep for 10 sec and start from while loop again + sleep(10) + + + + +# Handle an incoming message +def got_message(srcip,srcport,mess): + if mess == 'ping': + sendmessage(srcip,srcport,'pong',mycontext['myip'], pingport) + elif mess == 'pong': + # elapsed time is now - time when I sent the ping + mycontext['latency'][srcip] = getruntime() - mycontext['sendtime'][srcip] + elif mess.startswith('share'): + mycontext['row'][srcip] = mess[len('share'):] + + + +def encode_row(rowip, neighborlist, latencylist): + retstring = ""+rowip+"" + for neighborip in neighborlist: + if neighborip in latencylist: + retstring = retstring + ""+str(latencylist[neighborip])[:4]+"s" + else: + retstring = retstring + "Unknown" + + retstring = retstring + "" + return retstring + + + +# Generates a HTML page that represents the current status of the program +def generate_status_page(): + webpage = "Latency Information

Latency information from "+mycontext['myip']+'

' + webpage = webpage + "" + for nodeip in mycontext['neighborlist']: + if nodeip in mycontext['row']: + webpage = webpage + mycontext['row'][nodeip]+'\n' + else: + webpage = webpage + '\n' + + # now the footer... + webpage = webpage + '
"+ "".join(mycontext['neighborlist'])+"
'+nodeip+'No Data Reported
' + return webpage + + + +# Displays a web page with the latency information +def handle_http_request(srcip,srcport,connobj): + log('received request from',srcip,'\n') + + # Get the header + total_data = '' + # The HTTP header ends once we see the char combination '\n\n', which + # is an empty string. + while '\n\n' not in total_data: + # Receive in chunks to avoid reading too much data + try: + data = connobj.recv(4096) + except SocketWouldBlockError: + # retry if they haven't completed sending the header + sleep(.05) + continue + except SocketClosedRemote: + log('client from',srcip,'aborted before sending HTTP header...\n') + return + total_data += data + total_data = total_data.replace('\r\n', '\n') + + header, overflow = total_data.split('\n\n', 1) + + + # Get the request path, which is inbetween the HTTP action keyword and the + # HTTP version number. + # The http action keyword is the first word with no whitespace. + everything_after_httpaction = header.split(None, 1)[1] + # Following the path is the HTTP/[VERSION_NUMBER]. + # We can use that as a delimiter to extract the path. + request_path = everything_after_httpaction.split(" HTTP/")[0] + + # Generate the data to send back + # Don't respond with anything if they have something in the request path. + # This include favicons. We don't want to generate the webpage needlessly. + if request_path != '/': + data = 'HTTP/1.1 404 Not Found\n\n' + else: + webpage = generate_status_page() + # combine everything into one unit for sending + data = 'HTTP/1.1 200 OK\nContent-Type: text/html\nContent-Length: '+str(len(webpage))+'\nServer: Seattle Testbed\n\n'+webpage + + # send the response + + sent = 0 + while sent < len(data): + try: + sent += connobj.send(data[sent:]) + except SocketWouldBlockError: + # retry if response hasn't been sent yet + sleep(.05) + continue + except SocketClosedRemote: + log('client from',srcip,'aborted before response could be sent...\n') + return + # and we're done, so let's close this connection... + connobj.close() + +def handle_message_forever(): + while True: + try: + srcip, srcport, mess = udpserversocket.getmessage() + got_message(srcip, srcport, mess) + except SocketWouldBlockError: + sleep(0.1) + + +def handle_connection_forever(): + while True: + try: + ret_ip, ret_port, ret_socket = connobj.getconnection() + handle_http_request(ret_ip, ret_port, ret_socket) + except SocketWouldBlockError: + sleep(0.1) + + +if callfunc == 'initialize': + #check if user has provided port number as call argument + if len(callargs) != 1: + raise Exception("Must specify the port to use") + + pingport = int(callargs[0]) + mycontext['port'] = pingport + + # this holds the response information (i.e. when nodes responded) + mycontext['latency'] = {} + mycontext['myip'] = getmyip() + log(mycontext['myip']) + + # this remembers when we sent a probe + mycontext['sendtime'] = {} + + # this remembers row data from the other nodes + mycontext['row'] = {} + # get the nodes to probe + mycontext['neighborlist'] = [] + + try: + fileobject = openfile('neighboriplist.txt',False) + except FileNotFoundError, e: + #raise error if file doesn't exists + raise FileNotFoundError("neighboriplist.txt file doesn't exist. Please provide the required file in same directory.") + + filecontent = fileobject.readat(None,0) + neighbor_array = filecontent.splitlines() + for line in neighbor_array: + if line == '': + #skip if file contains any blank line + continue + mycontext['neighborlist'].append(line.strip()) + + + + #listen for a new message and call handle_message in new thread + udpserversocket = listenformessage(mycontext['myip'], mycontext['port']) + createthread(handle_message_forever) + + createthread(probe_neighbors_forever) + + #listen for connection and call handle_http_request once a connection is got + connobj = listenforconnection(mycontext['myip'],mycontext['port']) + createthread(handle_connection_forever) + + + diff --git a/apps/allpairspingmap/allpairspingmap.r2py b/apps/allpairspingmap/allpairspingmap.r2py new file mode 100644 index 0000000..a0d8b3e --- /dev/null +++ b/apps/allpairspingmap/allpairspingmap.r2py @@ -0,0 +1,487 @@ +''' + + allpairspingmap.r2py + + + This program gives a visual representation of the vessels that a user has + acquired. It includes a map that contains markers on where the vessels are + reported to be, according to the geoip client. + + + A file containing the ip addresses of all the vessels to contact must be + uploaded. This file should be named 'neighboriplist.txt'. Each line should + contain one vessel. This file can be prepared within seash by issuing the + following command within a group: + show ip to neighboriplist.txt + + Additionally, the following files (which are provided) need to be uploaded to + the vessels: + jquerygmaps.js + style.css + map_marker_icon.png + map_marker_icon_blue.png + + Once this is set up, you need to pass your Clearinghouse port to the program + as an argument: + username@group> run allpairspingmap.r2py [port number] + +''' + +dy_import_module_symbols('geoip_client.r2py') + + +class InvalidRequestError(Exception): + ''' The user made a request for a nonexistent file. ''' + + +# These are the paths that the web server will serve files for. +# This is to prevent clients from seeing anything else that we might not want +# them to be able to see. +# +# '': +# Index file. We generate a webpage representing the current program status +# +# style.css: +# Stylesheet that controls how the webpage is formatted. Without this, +# the map will not render correctly. +# +# jquerygmaps.js: +# Contains code that interfaces against google's MapV2 API. +# +# map_marker_icon.png: +# This image is used as a marker for where the vessels are, on the map. + +mycontext['RECOGNIZED_FILES'] = ( + '', 'style.css', 'jquerygmaps.js', 'map_marker_icon.png', 'map_marker_icon_blue.png') + + +# When responding to HTTP requests, we need to include a MIME type, identifying +# the type of file that we are serving to the HTTP client. This dictionary +# maps a file extension to its common MIME type. +mycontext['MIME_TYPES'] = { + '.js': 'text/javascript', + '.css': 'text/css', + '.png': 'image/png' + } + + + +def probe_neighbors_forever(): + ''' + + Send a probe message to each neighbor + + + port: The clearinghouse port assigned to the current user. + + + Starts the ping loop to calculate the latency between the local node + and neighboring nodes. We also send our latency data to each node. + + + None + + + None + ''' + while True: + for neighborip in mycontext["neighborlist"]: + mycontext['sendtime'][neighborip] = getruntime() + if neighborip == mycontext['myip']: + #skip if ip address in neighboriplist matches the local ip because in repy we cann't have same localip and dest ip + continue + sendmessage(neighborip, mycontext['port'], 'ping',mycontext['myip'],mycontext['port']) + sendmessage(neighborip,mycontext['port'],'share'+encode_row(mycontext['myip'],mycontext["neighborlist"],mycontext['latency'].copy()),mycontext['myip'],mycontext['port']) + sleep(0.5) + #sleep for 10 sec and start from while loop again + sleep(10) + + + + +# Handle an incoming message +def got_message(srcip,srcport,mess): + ''' + + Handle an incoming message from a neighboring node. + + + See documentation for recvmess(). + + + If we receive a ping message, we respond with a pong message. + If we receive a pong message, we take the difference between when we sent + this neighbor a ping message and record that as the latency. + If we receive a share message, we record the neighbor's neighbor row + information. + + + None + + + None + ''' + + + if mess == 'ping': + sendmessage(srcip,srcport,'pong',getmyip(), pingport) + elif mess == 'pong': + # elapsed time is now - time when I sent the ping + mycontext['latency'][srcip] = getruntime() - mycontext['sendtime'][srcip] + + elif mess.startswith('share'): + mycontext['row'][srcip] = mess[len('share'):] + + + +def encode_row(rowip, neighborlist, latencylist): + ''' + + Prepares the local node's latency information into a format that is + recognizable by the other nodes. + + + neighborlist: The IP addresses of all our neighbors. + latencylist: The list of latencies associated with the neighbors. + + + None + + + None + + + A string representing a HTML row containing the latency information + between this node and the other neighbor nodes. + ''' + + + retstring = ""+rowip+"" + for neighborip in neighborlist: + if neighborip in latencylist: + retstring = retstring + ""+str(latencylist[neighborip])[:4]+"s" + else: + retstring = retstring + "Unknown" + + retstring = retstring + "" + return retstring + + +def generate_node_list(): + """ + + Generates an HTMl string of an unsorted list of nodes. + + + None. + + + None. + + + None. + + + HTML string of unsorted list of nodes. + """ + # Is there a specific reason why we sort these keys? I'm leaving these + # intact since the original version sorted them. + nodeiplist = mycontext['neighborlist'] + nodeiplist.sort() + + nodelist_str = '
    ' + for nodeip in nodeiplist: + # Print node list element + nodelist_str += ('
  • ' + + str(nodeip) + '') + + if nodeip in mycontext['locationdata']: + nodelocdict = mycontext['locationdata'][nodeip] + if nodelocdict is not None: + nodelist_str += ('' + str(nodelocdict['longitude']) + + '' + str(nodelocdict['latitude']) + + '' + geoip_location_str(nodelocdict) + '') + # We didn't perform the lookup yet. This is used to tell the client to + # refresh the page. + else: + nodelist_str += "" + nodelist_str += '
  • ' + nodelist_str += '
' + return nodelist_str + + + + + +def generate_status_page(): + ''' + + Generates a HTML page that represents the current status of the program + + + None + + + The webpage returned may cause a client's browser to make additional + requests to the current program, or other machines. + + + None + + + A string representing a HTML webpage containing the current status of the + program. + ''' + webpage = "Latency Information" + + webpage += '' + webpage += '' + # Include our script/stylesheet for the webpage + webpage += '' + webpage += '' + + # Begin displaying body content; Lets start with the map. + webpage += "
" + + # Display the latency information as a table + webpage += "

Latency information from "+getmyip()+'

' + + # Create a column in the table for each vessel + webpage += "" + + # Create a row in the table for each vessel + for nodeip in mycontext['neighborlist']: + # Show ip and location data, if present + webpage += '' + + # Add latency information + if nodeip in mycontext['row']: + webpage += mycontext['row'][nodeip] + else: + webpage += '' + webpage += '\n' + + webpage += '
"+ "".join(mycontext['neighborlist'])+"
' +nodeip+'
\n' + + # Was there a geoip lookup yet? + if nodeip in mycontext['locationdata']: + # Did the geoip lookup succeed? + if mycontext['locationdata'][nodeip] is not None: + nodelocation = mycontext['locationdata'][nodeip] + webpage += nodelocation['city'] + ', ' + nodelocation['country_code'] + # The lookup failed + else: + webpage += "No location data available" + + # We haven't done a geoip lookup yet. Let the user know. + else: + webpage += "Location data not yet retrieved" + webpage += '
No Data Reported
' + + # We need this for the map data to work + webpage += generate_node_list() + # now the footer... + webpage += '' + + return webpage + + +def handle_http_request(srcip,srcport,connobj): + # Get the header + total_data = '' + # The HTTP header ends once we see the char combination '\n\n', which + # is an empty string. + while '\n\n' not in total_data: + # Receive in chunks to avoid reading too much data + try: + data = connobj.recv(4096) + except SocketWouldBlockError: + # retry if they haven't completed sending the header + sleep(.05) + continue + except SocketClosedRemote: + log('client from',srcip,'aborted before sending HTTP header...\n') + return + total_data += data + total_data = total_data.replace('\r\n', '\n') + + header, overflow = total_data.split('\n\n', 1) + + # Get the request path, which is inbetween the HTTP action keyword and the + # HTTP version number. + # The http action keyword is the first word with no whitespace. + everything_after_httpaction = header.split(None, 1)[1] + # Following the path is the HTTP/[VERSION_NUMBER]. + # We can use that as a delimiter to extract the path. + requested_file = everything_after_httpaction.split(" HTTP/")[0] + + # Get rid of the leading '/' because its useless to us + requested_file = requested_file.lstrip('/') + + # Generate the data to send back. + try: + # We default to this content type. + content_type = 'text/html' + # Don't respond with anything if they have something in the request path. + + if requested_file not in mycontext['RECOGNIZED_FILES']: + raise InvalidRequestError("Unrecognized file:" + requested_file) + + # Generate the index page if the request field is empty. + elif not requested_file: + + contents = generate_status_page() + + # This is a file that we recognize. Send its contents to the client. + else: + # PNGs are binary files. Plaintext files still work when read in + # binary mode. + contents = openfile(requested_file, True).readat(None,0) + + # Figure out the MIME type to send to the client + for extension in mycontext['MIME_TYPES']: + if requested_file.endswith(extension): + content_type = mycontext['MIME_TYPES'][extension] + break + else: + content_type = 'text/plain' + + # combine everything into one unit for sending + data = 'HTTP/1.1 200 OK\nContent-Type: '+content_type+'\nContent-Length: '+str(len(contents))+'\nServer: Seattle Testbed\n\n'+contents + + except InvalidRequestError: + data = 'HTTP/1.1 404 Not Found\n\n' + + + # send the response + sent = 0 + while sent < len(data): + try: + sent += connobj.send(data[sent:]) + except SocketWouldBlockError: + # retry if response hasn't been sent yet + sleep(.05) + continue + except SocketClosedRemote: + log('client from',srcip,'aborted before response could be sent...\n') + return + # and we're done, so let's close this connection... + connobj.close() + + + +def lookup_geoip_info(): + ''' + + Acquires the geoip information for all the vessels specified in the + neighbor ip table. + + + None + + + The geoip data recorded will be stored in mycontext['locationdata'][neighborip]. + If the lookup was successful, then the looked up data will be stored. + Otherwise, we put + + + None + + + None + ''' + + geoip_init_client() + for neighbor in mycontext['neighborlist']: + try: + locationdict = geoip_record_by_addr(neighbor) + if locationdict is not None: + # Sometimes we don't get a city name. + if 'city' not in locationdict: + locationdict['city'] = "Unknown" + mycontext['locationdata'][neighbor] = locationdict + + # The lookup failed + else: + mycontext['locationdata'][neighbor] = None + + except Exception, e: + if not "Unable to contact the geoip server" in str(e): + raise + # We use this to indicate that no location data is available. + mycontext['locationdata'][neighbor] = None + +def handle_message_forever(): + while True: + try: + srcip, srcport, mess = udpserversocket.getmessage() + except SocketWouldBlockError: + sleep(0.1) + continue + got_message(srcip, srcport, mess) + + + +def handle_connection_forever(): + while True: + try: + ret_ip, ret_port, ret_socket = connobj.getconnection() + except SocketWouldBlockError: + sleep(0.1) + continue + handle_http_request(ret_ip, ret_port, ret_socket) + + + +if callfunc == 'initialize': + + #check if user has provided port number as call argument + if len(callargs) != 1: + raise Exception("Must specify the port to use") + + pingport = int(callargs[0]) + mycontext['port'] = pingport + + # this holds the response information (i.e. when nodes responded) + mycontext['latency'] = {} + mycontext['myip'] = getmyip() + + # this remembers when we sent a probe + mycontext['sendtime'] = {} + + # this remembers row data from the other nodes + mycontext['row'] = {} + # get the nodes to probe + mycontext['neighborlist'] = [] + + mycontext['locationdata'] = {} + try: + fileobject = openfile('neighboriplist.txt',False) + except FileNotFoundError, e: + #raise error if file doesn't exists + raise FileNotFoundError("neighboriplist.txt file doesn't exist. Please provide the required file in same directory.") + + filecontent = fileobject.readat(None,0) + neighbor_array = filecontent.splitlines() + for line in neighbor_array: + if line == '': + #skip if file contains any blank line + continue + mycontext['neighborlist'].append(line.strip()) + + + + #listen for a new message and call handle_message in new thread + udpserversocket = listenformessage(mycontext['myip'], mycontext['port']) + createthread(handle_message_forever) + + createthread(probe_neighbors_forever) + + #listen for connection and call handle_http_request once a connection is got + connobj = listenforconnection(mycontext['myip'],mycontext['port']) + createthread(handle_connection_forever) + + # Getting geoip data takes a while, in the meanwhile we allow the user to + # see a status webpage and display a notice there. + lookup_geoip_info() + diff --git a/apps/allpairspingmap/allpairspingmap_new,r2py b/apps/allpairspingmap/allpairspingmap_new,r2py new file mode 100644 index 0000000..a0d8b3e --- /dev/null +++ b/apps/allpairspingmap/allpairspingmap_new,r2py @@ -0,0 +1,487 @@ +''' + + allpairspingmap.r2py + + + This program gives a visual representation of the vessels that a user has + acquired. It includes a map that contains markers on where the vessels are + reported to be, according to the geoip client. + + + A file containing the ip addresses of all the vessels to contact must be + uploaded. This file should be named 'neighboriplist.txt'. Each line should + contain one vessel. This file can be prepared within seash by issuing the + following command within a group: + show ip to neighboriplist.txt + + Additionally, the following files (which are provided) need to be uploaded to + the vessels: + jquerygmaps.js + style.css + map_marker_icon.png + map_marker_icon_blue.png + + Once this is set up, you need to pass your Clearinghouse port to the program + as an argument: + username@group> run allpairspingmap.r2py [port number] + +''' + +dy_import_module_symbols('geoip_client.r2py') + + +class InvalidRequestError(Exception): + ''' The user made a request for a nonexistent file. ''' + + +# These are the paths that the web server will serve files for. +# This is to prevent clients from seeing anything else that we might not want +# them to be able to see. +# +# '': +# Index file. We generate a webpage representing the current program status +# +# style.css: +# Stylesheet that controls how the webpage is formatted. Without this, +# the map will not render correctly. +# +# jquerygmaps.js: +# Contains code that interfaces against google's MapV2 API. +# +# map_marker_icon.png: +# This image is used as a marker for where the vessels are, on the map. + +mycontext['RECOGNIZED_FILES'] = ( + '', 'style.css', 'jquerygmaps.js', 'map_marker_icon.png', 'map_marker_icon_blue.png') + + +# When responding to HTTP requests, we need to include a MIME type, identifying +# the type of file that we are serving to the HTTP client. This dictionary +# maps a file extension to its common MIME type. +mycontext['MIME_TYPES'] = { + '.js': 'text/javascript', + '.css': 'text/css', + '.png': 'image/png' + } + + + +def probe_neighbors_forever(): + ''' + + Send a probe message to each neighbor + + + port: The clearinghouse port assigned to the current user. + + + Starts the ping loop to calculate the latency between the local node + and neighboring nodes. We also send our latency data to each node. + + + None + + + None + ''' + while True: + for neighborip in mycontext["neighborlist"]: + mycontext['sendtime'][neighborip] = getruntime() + if neighborip == mycontext['myip']: + #skip if ip address in neighboriplist matches the local ip because in repy we cann't have same localip and dest ip + continue + sendmessage(neighborip, mycontext['port'], 'ping',mycontext['myip'],mycontext['port']) + sendmessage(neighborip,mycontext['port'],'share'+encode_row(mycontext['myip'],mycontext["neighborlist"],mycontext['latency'].copy()),mycontext['myip'],mycontext['port']) + sleep(0.5) + #sleep for 10 sec and start from while loop again + sleep(10) + + + + +# Handle an incoming message +def got_message(srcip,srcport,mess): + ''' + + Handle an incoming message from a neighboring node. + + + See documentation for recvmess(). + + + If we receive a ping message, we respond with a pong message. + If we receive a pong message, we take the difference between when we sent + this neighbor a ping message and record that as the latency. + If we receive a share message, we record the neighbor's neighbor row + information. + + + None + + + None + ''' + + + if mess == 'ping': + sendmessage(srcip,srcport,'pong',getmyip(), pingport) + elif mess == 'pong': + # elapsed time is now - time when I sent the ping + mycontext['latency'][srcip] = getruntime() - mycontext['sendtime'][srcip] + + elif mess.startswith('share'): + mycontext['row'][srcip] = mess[len('share'):] + + + +def encode_row(rowip, neighborlist, latencylist): + ''' + + Prepares the local node's latency information into a format that is + recognizable by the other nodes. + + + neighborlist: The IP addresses of all our neighbors. + latencylist: The list of latencies associated with the neighbors. + + + None + + + None + + + A string representing a HTML row containing the latency information + between this node and the other neighbor nodes. + ''' + + + retstring = ""+rowip+"" + for neighborip in neighborlist: + if neighborip in latencylist: + retstring = retstring + ""+str(latencylist[neighborip])[:4]+"s" + else: + retstring = retstring + "Unknown" + + retstring = retstring + "" + return retstring + + +def generate_node_list(): + """ + + Generates an HTMl string of an unsorted list of nodes. + + + None. + + + None. + + + None. + + + HTML string of unsorted list of nodes. + """ + # Is there a specific reason why we sort these keys? I'm leaving these + # intact since the original version sorted them. + nodeiplist = mycontext['neighborlist'] + nodeiplist.sort() + + nodelist_str = '
    ' + for nodeip in nodeiplist: + # Print node list element + nodelist_str += ('
  • ' + + str(nodeip) + '') + + if nodeip in mycontext['locationdata']: + nodelocdict = mycontext['locationdata'][nodeip] + if nodelocdict is not None: + nodelist_str += ('' + str(nodelocdict['longitude']) + + '' + str(nodelocdict['latitude']) + + '' + geoip_location_str(nodelocdict) + '') + # We didn't perform the lookup yet. This is used to tell the client to + # refresh the page. + else: + nodelist_str += "" + nodelist_str += '
  • ' + nodelist_str += '
' + return nodelist_str + + + + + +def generate_status_page(): + ''' + + Generates a HTML page that represents the current status of the program + + + None + + + The webpage returned may cause a client's browser to make additional + requests to the current program, or other machines. + + + None + + + A string representing a HTML webpage containing the current status of the + program. + ''' + webpage = "Latency Information" + + webpage += '' + webpage += '' + # Include our script/stylesheet for the webpage + webpage += '' + webpage += '' + + # Begin displaying body content; Lets start with the map. + webpage += "
" + + # Display the latency information as a table + webpage += "

Latency information from "+getmyip()+'

' + + # Create a column in the table for each vessel + webpage += "" + + # Create a row in the table for each vessel + for nodeip in mycontext['neighborlist']: + # Show ip and location data, if present + webpage += '' + + # Add latency information + if nodeip in mycontext['row']: + webpage += mycontext['row'][nodeip] + else: + webpage += '' + webpage += '\n' + + webpage += '
"+ "".join(mycontext['neighborlist'])+"
' +nodeip+'
\n' + + # Was there a geoip lookup yet? + if nodeip in mycontext['locationdata']: + # Did the geoip lookup succeed? + if mycontext['locationdata'][nodeip] is not None: + nodelocation = mycontext['locationdata'][nodeip] + webpage += nodelocation['city'] + ', ' + nodelocation['country_code'] + # The lookup failed + else: + webpage += "No location data available" + + # We haven't done a geoip lookup yet. Let the user know. + else: + webpage += "Location data not yet retrieved" + webpage += '
No Data Reported
' + + # We need this for the map data to work + webpage += generate_node_list() + # now the footer... + webpage += '' + + return webpage + + +def handle_http_request(srcip,srcport,connobj): + # Get the header + total_data = '' + # The HTTP header ends once we see the char combination '\n\n', which + # is an empty string. + while '\n\n' not in total_data: + # Receive in chunks to avoid reading too much data + try: + data = connobj.recv(4096) + except SocketWouldBlockError: + # retry if they haven't completed sending the header + sleep(.05) + continue + except SocketClosedRemote: + log('client from',srcip,'aborted before sending HTTP header...\n') + return + total_data += data + total_data = total_data.replace('\r\n', '\n') + + header, overflow = total_data.split('\n\n', 1) + + # Get the request path, which is inbetween the HTTP action keyword and the + # HTTP version number. + # The http action keyword is the first word with no whitespace. + everything_after_httpaction = header.split(None, 1)[1] + # Following the path is the HTTP/[VERSION_NUMBER]. + # We can use that as a delimiter to extract the path. + requested_file = everything_after_httpaction.split(" HTTP/")[0] + + # Get rid of the leading '/' because its useless to us + requested_file = requested_file.lstrip('/') + + # Generate the data to send back. + try: + # We default to this content type. + content_type = 'text/html' + # Don't respond with anything if they have something in the request path. + + if requested_file not in mycontext['RECOGNIZED_FILES']: + raise InvalidRequestError("Unrecognized file:" + requested_file) + + # Generate the index page if the request field is empty. + elif not requested_file: + + contents = generate_status_page() + + # This is a file that we recognize. Send its contents to the client. + else: + # PNGs are binary files. Plaintext files still work when read in + # binary mode. + contents = openfile(requested_file, True).readat(None,0) + + # Figure out the MIME type to send to the client + for extension in mycontext['MIME_TYPES']: + if requested_file.endswith(extension): + content_type = mycontext['MIME_TYPES'][extension] + break + else: + content_type = 'text/plain' + + # combine everything into one unit for sending + data = 'HTTP/1.1 200 OK\nContent-Type: '+content_type+'\nContent-Length: '+str(len(contents))+'\nServer: Seattle Testbed\n\n'+contents + + except InvalidRequestError: + data = 'HTTP/1.1 404 Not Found\n\n' + + + # send the response + sent = 0 + while sent < len(data): + try: + sent += connobj.send(data[sent:]) + except SocketWouldBlockError: + # retry if response hasn't been sent yet + sleep(.05) + continue + except SocketClosedRemote: + log('client from',srcip,'aborted before response could be sent...\n') + return + # and we're done, so let's close this connection... + connobj.close() + + + +def lookup_geoip_info(): + ''' + + Acquires the geoip information for all the vessels specified in the + neighbor ip table. + + + None + + + The geoip data recorded will be stored in mycontext['locationdata'][neighborip]. + If the lookup was successful, then the looked up data will be stored. + Otherwise, we put + + + None + + + None + ''' + + geoip_init_client() + for neighbor in mycontext['neighborlist']: + try: + locationdict = geoip_record_by_addr(neighbor) + if locationdict is not None: + # Sometimes we don't get a city name. + if 'city' not in locationdict: + locationdict['city'] = "Unknown" + mycontext['locationdata'][neighbor] = locationdict + + # The lookup failed + else: + mycontext['locationdata'][neighbor] = None + + except Exception, e: + if not "Unable to contact the geoip server" in str(e): + raise + # We use this to indicate that no location data is available. + mycontext['locationdata'][neighbor] = None + +def handle_message_forever(): + while True: + try: + srcip, srcport, mess = udpserversocket.getmessage() + except SocketWouldBlockError: + sleep(0.1) + continue + got_message(srcip, srcport, mess) + + + +def handle_connection_forever(): + while True: + try: + ret_ip, ret_port, ret_socket = connobj.getconnection() + except SocketWouldBlockError: + sleep(0.1) + continue + handle_http_request(ret_ip, ret_port, ret_socket) + + + +if callfunc == 'initialize': + + #check if user has provided port number as call argument + if len(callargs) != 1: + raise Exception("Must specify the port to use") + + pingport = int(callargs[0]) + mycontext['port'] = pingport + + # this holds the response information (i.e. when nodes responded) + mycontext['latency'] = {} + mycontext['myip'] = getmyip() + + # this remembers when we sent a probe + mycontext['sendtime'] = {} + + # this remembers row data from the other nodes + mycontext['row'] = {} + # get the nodes to probe + mycontext['neighborlist'] = [] + + mycontext['locationdata'] = {} + try: + fileobject = openfile('neighboriplist.txt',False) + except FileNotFoundError, e: + #raise error if file doesn't exists + raise FileNotFoundError("neighboriplist.txt file doesn't exist. Please provide the required file in same directory.") + + filecontent = fileobject.readat(None,0) + neighbor_array = filecontent.splitlines() + for line in neighbor_array: + if line == '': + #skip if file contains any blank line + continue + mycontext['neighborlist'].append(line.strip()) + + + + #listen for a new message and call handle_message in new thread + udpserversocket = listenformessage(mycontext['myip'], mycontext['port']) + createthread(handle_message_forever) + + createthread(probe_neighbors_forever) + + #listen for connection and call handle_http_request once a connection is got + connobj = listenforconnection(mycontext['myip'],mycontext['port']) + createthread(handle_connection_forever) + + # Getting geoip data takes a while, in the meanwhile we allow the user to + # see a status webpage and display a notice there. + lookup_geoip_info() + diff --git a/apps/allpairspingmap/jquerygmaps.js b/apps/allpairspingmap/jquerygmaps.js new file mode 100644 index 0000000..1f07b6f --- /dev/null +++ b/apps/allpairspingmap/jquerygmaps.js @@ -0,0 +1,91 @@ +// Function to run at page load +$(document).ready(function() { + function initialize_map() { + // Initialize map inside #map div element + var map = new GMap2(document.getElementById('map')); + map.setUIToDefault(); + + // Set up points for Seattle nodes + var markers = []; + $("ul#coords li").each(function(i) { + var latitude = $(this).children(".latitude").text(); + var longitude = $(this).children(".longitude").text(); + if(!latitude && !longitude){ + var point = new GLatLng(85,0); + marker = new GMarker(point); + map.addOverlay(marker); + marker.setImage("map_marker_icon_blue.png"); + map.setCenter(point, 2); + markers[i] = marker; + }else{ + var point = new GLatLng(latitude, longitude); + marker = new GMarker(point); + map.addOverlay(marker); + marker.setImage("map_marker_icon.png"); + map.setCenter(point, 2); + markers[i] = marker; + } + }); + + // Pan to point when clicked + $(markers).each(function(i, marker) { + GEvent.addListener(marker, "click", function(){ + displayPoint(marker, i); + }); + }); + return map; + } + + // Whenever a marker is clicked, pan to it and move/populate the tooltip div + function displayPoint(marker, i) { + map.panTo(marker.getPoint()); + var markerOffset = map.fromLatLngToDivPixel(marker.getPoint()); + + // Get node information from adjacency table + var nodeip = $("#node" + i).children(".nodeip").text(); + var nodelocation = $("#node" + i).children(".locationname").text(); + var nodelat = $("#node" + i).children(".latitude").text(); + var nodelong = $("#node" + i).children(".longitude").text(); + + // Populate #message div with node information + $("#message").empty().append("Node IP: " + nodeip + "
Location: " + nodelocation + "
Lat/Long: " + nodelat + "/" + nodelong + "
Select this node"); + + // If a node bas been selected to base latencies on... + if (typeof(selected_node) != "undefined") { + // Remove any existing lines + if (typeof(line) != "undefined") { + map.removeOverlay(line); + } + + // Draw new line between selected node and clicked node + line = new GPolyline([selected_marker.getLatLng(), marker.getLatLng()]); + map.addOverlay(line); + + // Populate #message div with latency info + var latency = $("td." + selected_node + "_" + i).text(); + $("#message a").before("Latency to " + selected_location + ": " + latency + ((latency != 'N/A') ? " s" : "") + "
"); + } + + // Function to select node as latency hub on click + $("#message").children("a").click(function() { + if (typeof(selected_marker) != "undefined") { + selected_marker.setImage("map_marker_icon.png"); + } + selected_marker = marker; + selected_node = i; + selected_location = nodelocation.split(",")[0]; + marker.setImage("map_marker_sel_icon.png"); + }); + + // Finally, display the #message div tooltip + $("#message").show().css({ top:markerOffset.y, left:markerOffset.x }); + } + + + var map = initialize_map(); + $("#message").appendTo(map.getPane(G_MAP_FLOAT_SHADOW_PANE)); + var selected_node; + var selected_marker; + var selected_location; + var line; +}); diff --git a/apps/allpairspingmap/map_marker_icon.png b/apps/allpairspingmap/map_marker_icon.png new file mode 100644 index 0000000..a0ff05f Binary files /dev/null and b/apps/allpairspingmap/map_marker_icon.png differ diff --git a/apps/allpairspingmap/map_marker_icon_blue.png b/apps/allpairspingmap/map_marker_icon_blue.png new file mode 100644 index 0000000..98b280d Binary files /dev/null and b/apps/allpairspingmap/map_marker_icon_blue.png differ diff --git a/apps/allpairspingmap/style.css b/apps/allpairspingmap/style.css new file mode 100644 index 0000000..e5a34c5 --- /dev/null +++ b/apps/allpairspingmap/style.css @@ -0,0 +1,58 @@ +body { + margin: 0; + font-family: lucida grande, helvetica, sans-serif; + font-size: 85%; +} + +ul#coords { + display: none; +} + +#map { + width: 1024px; + height: 600px; +} + +.gmnoprint { + font-family: sans-serif; +} + +#message { + position: absolute; + padding: 8px; + background: #555; + border: 1px solid #ccc; + color: #fff; + font-size: 12px; + font-family: lucida grande, sans-serif; + width: 270px; +} + +#message a { + color: #bee0f3; +} + +#message a:hover { + color: #d7e3ea; +} + +h2 { + margin-bottom: .5em; +} + +table { + border-collapse: collapse; + margin-bottom: 2em; +} + +th, td { + padding: 6px; +} + +th { + background-color: #eaeaea; +} + +td { + border: 1px solid #eeeeee; +} diff --git a/httpserver.r2py b/httpserver.r2py new file mode 100644 index 0000000..baf72e2 --- /dev/null +++ b/httpserver.r2py @@ -0,0 +1,937 @@ +""" + + httpserver.r2py + + + July 29, 2009 + + + Conrad Meyer + + + This is a library that abstracts away the details of the HTTP protocol, + instead calling a user-supplied function on each request. The return + value of the user-supplied function determines the response that is sent + to the HTTP client. + +""" + + + +dy_import_module_symbols("librepy.r2py") +dy_import_module_symbols("urllib.r2py") +dy_import_module_symbols("urlparse.r2py") +dy_import_module_symbols("uniqueid.r2py") +dy_import_module_symbols("sockettimeout.r2py") +dy_import_module_symbols("httpretrieve.r2py") + + + + +class _httpserver_ClientClosedSockEarly(Exception): + # Raised internally when the client unexpectedly closes the socket. The + # correct behavior in this instance is to clean up that handler and + # continue. + pass + + + + +class _httpserver_BadRequest(Exception): + # Raised internally when the client's request is malformed. + pass + + + + +class _httpserver_ServerError(Exception): + # Raised internally when the callback function unexpectedly raises an + # exception. + pass + + + + +class _httpserver_BadTransferCoding(Exception): + # Raised internally when the request's encoding is something we can't + # handle (most everything at the time of writing). + pass + + + + +# This global dictionary is used to keep track of open HTTP callbacks. +# The 'lock' entry is used to serialize changes to the other entries. +# The 'handles' dictionary maps numeric ids we hand out in +# httpserver_registercallback() and take in httpserver_stopcallback() +# to ids returned and used by waitforconn(). The 'cbfuncs' entry +# maps httpserver numeric ids to the callback function associated +# with them. +_httpserver_context = { + 'handles': {}, + 'cbfuncs': {}, + 'lock': createlock()} + + + +def httpserver_registercallback(addresstuple, cbfunc): + """ + + Registers a callback function on the (host, port). + + + addresstuple: + An address 2-tuple to bind to: ('host', port). + + cbfunc: + The callback function to process requests. It takes one argument, + which is a dictionary describing the HTTP request. It looks like + this (just an example): + { + 'verb': 'HEAD', + 'path': '/', + 'querystr': 'foo=bar&baz', + 'querydict': { 'foo': 'bar', 'baz': None } + 'version': '0.9', + 'datastream': object with a file-like read() method, + 'headers': { 'Content-Type': 'application/x-xmlrpc-data'}, + 'httpdid': 17, + 'remoteipstr': '10.0.0.4', + 'remoteportnum': 54001 + } + ('datastream' is a stream of any HTTP message body data sent by the + client.) + + It is expected that this callback function returns a dictionary of: + { + 'version': '0.9' or '1.0' or '1.1', + 'statuscode': any integer from 100 to 599, + 'statusmsg' (optional): an arbitrary string without newlines, + 'headers': { 'X-Header-Foo': 'Bar' }, + 'message': arbitrary string + } + + + TypeError, ValueError, KeyError, IndexError if arguments to this + function are malformed. + + Raises any exception waitforconn() will raise if the (hostname, port) + tuple is restricted, already taken, etc. + + + Starts a listener on the given host and port. + + + A handle for the listener (an httpdid). This can be used to stop the + server. + + """ + + _httpserver_context['lock'].acquire(True) + + try: + newhttpdid = uniqueid_getid() + + # Keep track of this server's id in a closure: + def _httpserver_cbclosure(remoteip, remoteport, sock, ch, listench): + # Do the actual processing on the request. + _httpserver_socketcb(remoteip, remoteport, sock, ch, listench, \ + newhttpdid) + + # Close the socket afterwards. + try: + sock.close() + except Exception, e: + if "socket" not in str(e).lower(): + raise + pass # Best effort. + + + _httpserver_context['handles'][newhttpdid] = \ + waitforconn(addresstuple[0], addresstuple[1], _httpserver_cbclosure) + _httpserver_context['cbfuncs'][newhttpdid] = cbfunc + + return newhttpdid + finally: + _httpserver_context['lock'].release() + + + + +def _httpserver_socketcb(remoteip, remoteport, sock, ch, listench, httpdid): + # This function gets invoked each time a client connects to our socket. + + # It proceeds in a loop -- reading in requests, handing them off to the + # callback function, and sending the result to the client. If errors are + # encountered, it sends an error message (we choose HTTP/1.0 for + # compatibility and because we don't always know what version the client + # wants) and closes the connection. Additionally, if the response + # generated by the callback requests protocol version 0.9 or 1.0, or is + # 1.1 but includes the Connection: close header, the connection is closed + # and the loop broken. + + _httpserver_context['lock'].acquire(True) + try: + cbfunc = _httpserver_context['cbfuncs'][httpdid] + finally: + _httpserver_context['lock'].release() + + extradata = "" + + # HTTP/1.0 and HTTP/1.1 Connection: close requests break out of this + # loop immediately; HTTP/1.1 clients can keep sending requests and + # receiving responses in this loop indefinitely. + while True: + try: + # Reads request headers, parses them, lets callback handle headers + # and possible request body, sends the response that the callback + # function tells it to send. On error, may raise one of many + # exceptions, which we deal with here: + closeconn, extradata = \ + _httpserver_process_single_request(sock, cbfunc, extradata, \ + httpdid, remoteip, remoteport) + + if closeconn: + break + + except _httpserver_BadRequest, br: + # There was some sort of flaw in the client's request. + response = "HTTP/1.0 400 Bad Request\r\n" + \ + "Content-Type: text/plain\r\n\r\n" + str(br) + "\r\n" + _httpserver_sendAll(sock, response, besteffort=True) + break + + except _httpserver_ServerError, se: + # The callback function raised an exception or returned some object + # we didn't expect. + response = "HTTP/1.0 500 Internal Server Error\r\n" + \ + "Content-Type: text/plain\r\n\r\n" + str(se) + "\r\n" + _httpserver_sendAll(sock, response, besteffort=True) + break + + except _httpserver_BadTransferCoding, bte: + # The HTTP/1.1 client sent us something with a Transport-Encoding we + # can't handle. + response = "HTTP/1.1 501 Not Implemented\r\n" + \ + ("Content-Length: %d\r\n" % (len(str(bte)) + 2)) + \ + "Connection: close\r\n" + \ + "Content-Type: text/plain\r\n\r\n" + str(bte) + "\r\n" + _httpserver_sendAll(sock, response, besteffort=True) + break + + except _httpserver_ClientClosedSockEarly: + # Not much else we can do. + break + + except Exception, e: + if "Socket closed" in str(e): + break + + # We shouldn't encounter these, other than 'Socket closed' ones. They + # represent a bug in our code somewhere. However, not raising the + # exception makes HTTP server software incredibly unintuitive to + # debug. + raise + + + + +def _httpserver_readHTTPheader(sock, data): + # Reads data from the socket in 4k chunks, replacing \r\n newlines with + # \n newlines. When it encounters a (decoded) \n\n sequence, it returns + # (data_before, data_after). + + headers = [] + command = True + while True: + line, data = _httpserver_getline(sock, data) + if len(line) == 0: + raise _httpserver_ClientClosedSockEarly() + + # Be a well-behaved server, and handle normal newlines and telnet-style + # newlines in the same fashion. + line = line.rstrip("\r") + if command: + splitln = line.split(" ") + if len(splitln) != 3: + raise _httpserver_BadRequest("HTTP/0.9 or malformed request.") + if not splitln[2].lower().startswith("http/1."): + raise _httpserver_BadRequest("Malformed request.") + command = False + + if len(line) == 0: + break + + headers.append(line) + + return (headers, data) + + + + +def _httpserver_parseHTTPheader(headerdatalist): + lineslist = headerdatalist + commandstr = lineslist[0] + otherheaderslist = lineslist[1:] + + infodict = {} + + verbstr, rawpathstr, versionstr = commandstr.split(" ", 2) + + infodict['verb'] = verbstr + + if len(versionstr) != len("HTTP/1.1"): + raise _httpserver_BadRequest("Bad HTTP command") + versionstr = versionstr.upper() + if versionstr == "HTTP/1.0": + infodict['version'] = '1.0' + elif versionstr == "HTTP/1.1": + infodict['version'] = '1.1' + else: + raise _httpserver_BadRequest("Unrecognized HTTP version") + + if rawpathstr.find("?") != -1: + infodict['path'], infodict['querystr'] = rawpathstr.split("?", 1) + else: + infodict['path'] = rawpathstr + infodict['querystr'] = None + + try: + infodict['headers'] = _httpretrieve_parse_responseheaders(otherheaderslist) + except HttpBrokenServerError: + raise _httpserver_BadRequest("Request headers are misformed.") + + try: + infodict['querydict'] = urllib_unquote_parameters(infodict['querystr']) + except (ValueError, AttributeError, TypeError): + infodict['querydict'] = None + + return infodict + + + + + +def _httpserver_sendAll(sock, datastr, besteffort=False): + # Sends all the data in datastr to sock. If besteffort is True, + # we don't care if it fails or not. + #try: + #while len(datastr) > 0: + #datastr = datastr[sock.send(datastr):] + #except Exception, e: + #if "socket" not in str(e).lower(): + #raise + + # If the caller didn't want this function to raise an exception for + # any reason, we don't, and instead return silently. If they are ok + # with exceptions, we re-raise. + #if not besteffort: + #raise + + sent = 0 + while sent < len(datastr): + try: + sent += sock.send(data[sent:]) + except SocketWouldBlockError: + # retry if response hasn't been sent yet + sleep(.05) + continue + except SocketClosedRemote: + log('client from',srcip,'aborted before response could be sent...\n') + return + + + + + +def _httpserver_getline(sock, datastr): + # Reads a line out of datastr (if possible), or failing that, gets more from + # the socket. Returns (line, extra) + newdatastr = "" + while True: + endloc = datastr.find("\n", -len(newdatastr)) + if endloc != -1: + return (datastr[:endloc], datastr[endloc+1:]) + try: + newdatastr = sock.recv(4096) + except SocketWouldBlockError: + # retry if they haven't completed sending the header + sleep(.05) + continue + except SocketClosedRemote: + log('client from',srcip,'aborted before sending HTTP header...\n') + return + datastr += newdatastr + + + + + +def _httpserver_getblock(blocksize, sock, datastr): + # Reads a block of size blocksize out of datastr (if possible), or failing + # that, gets more from the socket. Returns (block, extra). + + try: + while len(datastr) < blocksize: + datastr += sock.recv(4096) + + return (datastr[:blocksize], datastr[blocksize:]) + except Exception, e: + if "Socket closed" in str(e) and len(datastr) != 0: + return (datastr, "") + raise + + + + +def httpserver_stopcallback(callbackid): + """ + + Removes an existing callback function. + + + callbackid: + The id returned by httpserver_registercallback(). + + + IndexError, KeyError if the id is invalid or has already been deleted. + + + Removes this listener from the registry, deletes the listening socket. + + + Nothing. + + """ + _httpserver_context['lock'].acquire(True) + try: + stopcomm(_httpserver_context['handles'][callbackid]) + del _httpserver_context['handles'][callbackid] + del _httpserver_context['cbfuncs'][callbackid] + finally: + _httpserver_context['lock'].release() + + + + +class _httpserver_bodystream: + """ + _httpserver_bodystream is a helper class passed as the 'datastream' entry + in the dictionary sent to user callback functions. It has a read() method + that behaves very similarly to file.read(). Other than that, this object + is nothing like Python's file. + + """ + + # Reads the rest of the HTTP request from the socket, erroring + # appropriately. Understands transfer-coding. Returns chunks + # at a time. + + def __init__(self, sock, data, verb, headers): + # The socket object for communicating with the client: + self._sock = sock + # And any data we have already read off the socket, but has not been + # consumed (we read data in 4 kilobyte chunks and keep the extra + # around for later use). + self._rawdata = data + + # The HTTP request verb (GET, POST, etc); a string. + self._verb = verb + + # A dictionary of the client's request headers, as parsed by + # _httpretrieve_parse_responseheaders() (this is a function which + # should be moved elsewhere; for example, an http_common.r2py + # library would work). + self._headers = headers + + # The number of bytes left in the current chunk, if the client's + # request body is transfer-encoded (integer, or None), or the + # number of bytes in the entire request body if it is not transfer- + # encoded. + self._leftinchunk = None + + # Whether or not the client's request body was transfer-encoded. + self._chunked = False + + # A flag set when we know we have finished reading the current + # request body, so we can return the empty string. + self._done = False + + # A lock used to serialize reads from this file-like object. + self._lock = createlock() + + # A flag set when we are finished reading HTTP "trailers". Only applies + # to client requests sent with chunked transfer-encoding. + self._trailersread = False + + # For chunked transfers only: Keep a queue of unconsumed but decoded + # data (string). + self._data = "" + + + # Deal with methods that cannot send a message body: + if verb in ("GET", "HEAD", "TRACE", "DELETE"): + if "Content-Length" in headers or \ + "Transfer-Encoding" in headers: + raise _httpserver_BadRequest("Method '" + verb + \ + "' cannot send an entity-body and therefore a " + \ + "Content-Length or Transfer-Encoding header is invalid.") + else: + self._done = True + return + + # Deal with methods that send a message body. + if "Content-Length" not in headers and "Transfer-Encoding" not in headers: + raise _httpserver_BadRequest("Method '" + str(verb) + \ + "' can send an entity-body and requires a Content-Length " + \ + "or Transfer-Encoding header.") + + if "Transfer-Encoding" in headers: + # Decode Transfer-coded messages + if len(headers["Transfer-Encoding"]) != 1: + raise _httpserver_BadRequest("Multiple Transfer-Encoding headers " + \ + "is unacceptable.") + + # "chunked" must be in the codings list, and it must be last. + codings = headers["Transfer-Encoding"][0].split(",") + codings.reverse() + + realcodings = [] + + # Strip 'identity' codings. + for coding in codings: + coding = coding.strip().lower() + token = coding.split(None, 1) + if token != "identity": + realcodings.append(coding) + + if len(realcodings) > 1 or realcodings[0] != "chunked": + raise _httpserver_BadTransferCoding("Cannot handle any transfer-" + \ + "codings other than chunked.") + + self._chunked = True + + else: + # If we get here, that means we have a Content-Length and no Transfer- + # Encoding, so we can read the message body directly. + msglen = headers["Content-Length"] + + if len(msglen) != 1: + raise _httpserver_BadRequest("Multiple Content-Length headers is " + \ + "unacceptable.") + + self._leftinchunk = int(msglen[0]) + + + + def read(self, size=None): + """ + + Read a sequence of bytes from an HTTP request body. + + + size (optional): + An upper limit on the number of bytes to return. + + + Any raised by socket.read(). + + + Possibly reads more from the socket that the HTTP request is passed + on. + + + A string of bytes comprising part of the HTTP message body. Does not + include any chunked encoding trailers. + + """ + + self._lock.acquire(True) # Serialize file-like object reads. + + # Keep the same API as file(), but use a more understandable variable + # name through this method. + requestsize = size + + # The following 'Try' block is used to always unlock self._lock, no + # matter how we return up the stack: + try: + # If we already know we are finished, simply return the empty string. + if self._done: + return "" + + if requestsize == 0: + return "" + + # If the client's request was not chunked, but instead they specified + # the Content-length header: + if not self._chunked: + # toyieldstr is the string we will return to the user this time, + # as if this were a python generator (which is a nice way to think + # about it). We determine the amount to return, toyieldlen: + toyieldlen = self._leftinchunk + if requestsize is not None: + toyieldlen = min(requestsize, toyieldlen) + + # And strive to return as much of it as possible: + toyieldstr, self._rawdata = _httpserver_getblock(toyieldlen, \ + self._sock, self._rawdata) + self._leftinchunk -= len(toyieldstr) + + # If there is nothing left in the request, we're done; keep a note + # for future read() calls. + if self._leftinchunk == 0: + self._done = True + + return toyieldstr + + # If the client's request was chunked: + else: + # Read until there isn't anymore, OR until we have enough to satisfy + # the user's request. + while requestsize is None or len(self._data) < requestsize: + # If we have more raw bytes to read from the socket before + # reaching the end of this chunk: + if self._leftinchunk > 0: + # Determine how many bytes to (attempt to) read from the client: + nextblocksize = self._leftinchunk + if requestsize is not None: + nextblocksize = min(self._leftinchunk, requestsize) + + # Try and request the rest of the chunk, or as much as is + # needed to fulfill the caller's request. + chunkstr, self._rawdata = _httpserver_getblock(nextblocksize, self._sock, \ + self._rawdata) + self._leftinchunk -= len(chunkstr) + self._data += chunkstr + + # We stop trying if we can't get as much data as we wanted to, + # because this means that the client has closed the socket. + if len(chunkstr) < nextblocksize: + break + + # We are finished with this chunk; now we must determine if there + # is another chunk, and what its length is. + else: + # HTTP chunks have "\r\n" appended to the end of them; if we + # just finished reading a chunk, read off the trailing "\r\n" + # and discard it. + if self._leftinchunk is not None: + _, self._rawdata = _httpserver_getblock(2, self._sock, \ + self._rawdata) + + # Read the next chunk's size information: + line, self._rawdata = _httpserver_getline(self._sock, \ + self._rawdata) + + # Remove optional chunk extensions (";" -> newline) that we don't + # understand (this is advised by the HTTP 1.1 RFC), and read the + # hex chunk size: + self._leftinchunk = int(line.split(";", 1)[0].strip(), 16) + + # A chunk size of '0' indicates the end of a chunked message: + if self._leftinchunk == 0: + self._done = True + break + + # Determine how much data we should return, and how much we should + # keep around for the next read. + retlen = len(self._data) + if requestsize is not None: + retlen = min(requestsize, retlen) + + retval, self._data = self._data[:retlen], self._data[retlen:] + return retval + + finally: + self._lock.release() + + + + def get_trailers(self): + """ + + Read any 'trailers' sent by the client. The caller does not need to + ensure that the entire message body has been read first, though they + should be aware that calling this method consumes any remaining + message body and immediately forgets it. + + Note: not multithread safe, not multi-call safe (calls after the + first call will simply return the empty dictionary). + + + None. + + + Any raised by socket.read(). + + + Possibly reads more from the socket that the HTTP request is passed + on. + + + A dictionary of headers in the same style as + _httpretrieve_parse_responseheaders(). + + """ + # Reads the rest of the message, and then reads and returns any trailer + # headers (only sent in chunked messages). + + # Discard extra message without filling RAM. No-op if the user function + # has already read the entire stream. + while True: + unuseddatastr = self.read(4096) + if len(unuseddatastr) == 0: + break + + if not self._chunked: + return {} + + if self._trailersread: + return {} + + trailers = {} + + # Read 'trailer' headers. + while True: + line, self._rawdata = _httpserver_getline(self._sock, self._rawdata) + + # Empty line? Then the trailers are done. + if len(line.strip("\r")) == 0: + break + + # Check for a multi-line header by peeking ahead: + while True: + nextchar, self._rawdata = _httpserver_getblock(1, self._sock, \ + self._rawdata) + self._rawdata = nextchar + self._rawdata + if nextchar in (" ", "\t"): + line2, self._rawdata = _httpserver_getline(self._sock, self._rawdata) + line += " " + line2.lstrip() + else: + break + + # Insert header into existing headers + hdrname, hdrval = line.split(":", 1) + if hdrname not in trailers: + trailers[hdrname] = [] + trailers[hdrname].append(hdrval.strip()) + + self._trailersread = True + return trailers + + + + + def _get_extra(self): + # Private function of httpserver, *should not be used by callback + # functions*! NOT parallel-safe, NOT meant to be called more than + # once. + + # Read the socket up to (at least) the end of the current message; + # if we read beyond the end of our message, return any extra. + + # Discard extra message without filling RAM + while True: + if len(self.read(4096)) == 0: + break + + # Discard trailers, if any are left: + if self._chunked: + self.get_trailers() + + rawdata = self._rawdata + self._rawdata = "" + + return rawdata + + + + +class _httpserver_StringIO: + # Implements a read-only file-like object encapsulating a string. + def __init__(self, string): + self._data = string + self._closed = False + + + + def read(self, limit=None): + if limit is None: + limit = 4096 + + if self._closed: + raise ValueError("Trying to read from a closed StringIO object.") + + res, self._data = self._data[:limit], self._data[limit:] + return res + + + + def close(self): + if self._closed: + raise ValueError("Trying to close a closed StringIO object.") + self._closed = True + + + + +def _httpserver_sendfile(sock, filelikeobj): + # Attempts to forward all of the data from filelikeobj to sock. + while True: + chunk = filelikeobj.read(4096) + if len(chunk) == 0: + break + _httpserver_sendAll(sock, chunk, besteffort=True) + + + + +def _httpserver_sendfile_chunked(sock, filelikeobj): + # Attempts to forward all of the data from filelikeobj to sock, using chunked + # encoding. + totallen = 0 + while True: + chunk = filelikeobj.read(4096) + if len(chunk) == 0: + break + # encode as HTTP/1.1 chunks: + totallen += len(chunk) + chunk = "%X\r\n%s\r\n" % (len(chunk), chunk) + _httpserver_sendAll(sock, chunk) + + lastchunk = "0\r\n" + lastchunk += ("Content-Length: %d\r\n" % totallen) + lastchunk += "\r\n" + _httpserver_sendAll(sock, lastchunk) + + + + +def _httpserver_process_single_request(sock, cbfunc, extradata, httpdid, \ + remoteip, remoteport): + # This function processes a single request in from sock, and either + # puts a response to the socket and returns, or raises an exception. + + # Read HTTP request off the socket + headerdata, extradata = _httpserver_readHTTPheader(sock, extradata) + + # Interpret the header. + reqinfo = _httpserver_parseHTTPheader(headerdata) + + # Wrap the (possibly) remaining data into a file-like object. + messagebodystream = _httpserver_bodystream(sock, \ + extradata, reqinfo['verb'], reqinfo['headers']) + reqinfo['datastream'] = messagebodystream + reqinfo['httpdid'] = httpdid + reqinfo['remoteipstr'] = remoteip + reqinfo['remoteportnum'] = remoteport + + # By default, we don't want to close the connection. (Though, nearly + # every case will change this to True -- only HTTP/1.1 sockets + # that don't set Connection: close will keep this False.) + closeconn = False + + # Send request information to callback. + try: + result = cbfunc(reqinfo) + + except Exception, e: + raise _httpserver_ServerError("httpserver: Callback function " + \ + "raised an exception: " + str(e)) + + # Get any extra data consumed from sock by the message body stream object, + # but that was not actually part of the message body. + extradata = messagebodystream._get_extra() + + # Interpret result of callback function + try: + version = result['version'] + statuscode = result['statuscode'] + statusmsg = result['statusmsg'] + headers = result['headers'] + messagestream = result['message'] + except (KeyError, TypeError): + raise _httpserver_ServerError("httpserver: Callback function " + \ + "returned malformed dictionary") + + # Do some basic sanity checks: + if None in (version, statuscode, statusmsg, headers, messagestream): + raise _httpserver_ServerError("httpserver: Callback function " + \ + "returned dictionary with None for some values.") + + if (type(version), type(statuscode), type(statusmsg), type(headers)) != \ + (str, int, str, dict): + raise _httpserver_ServerError("httpserver: Callback function " + \ + "returned dictionary with invalid values (wrong types).") + + # If the message object is a string, wrap it in a file-like object. + if type(messagestream) is str: + messagestream = _httpserver_StringIO(messagestream) + + # Don't let the callback function serve HTTP/1.1 responses to an HTTP/1.0 + # request. + if reqinfo['version'] == "1.0" and version == "1.1": + version = "1.0" + + # Send response as instructed by callback + if version == "0.9": + # HTTP/0.9 doesn't have response headers. + _httpserver_sendfile(sock, messagestream) + closeconn = True + + elif version == "1.0": + # Send the response headers: + response = "HTTP/1.0 " + str(statuscode) + " " + statusmsg + "\r\n" + for key, val in headers.items(): + response += key + ": " + val + "\r\n" + response += "\r\n" + _httpserver_sendAll(sock, response, besteffort=True) + + # Send the response body: + _httpserver_sendfile(sock, messagestream) + closeconn = True + + elif version == "1.1": + response = "HTTP/1.1 " + str(statuscode) + " " + statusmsg + "\r\n" + for key, val in headers.items(): + response += key + ": " + val + "\r\n" + response += "Transfer-Encoding: chunked\r\n" + response += "\r\n" + + # Close client socket if they or the callback function asked us + # to close. + if ("Connection" in headers and "close" == headers["Connection"]) \ + or ("Connection" in reqinfo['headers'] and "close" in \ + reqinfo['headers']["Connection"]): + closeconn = True + + try: + # Send response headers. + _httpserver_sendAll(sock, response) + + # Read chunks from the callback and efficiently send them to + # the client using HTTP/1.1 chunked encoding. + _httpserver_sendfile_chunked(sock, messagestream) + + except Exception, e: + if "socket" not in str(e).lower(): + raise + + # The exception we're trying to catch here is anything sock.send() + # raises. However, it just raises plain exceptions. + + # The reason we care about the data actually going through for HTTP/1.1 + # is that we keep connections open. If there is an error, we shouldn't + # keep going, so we indicate that the socket should be closed. + closeconn = True + + else: + # If the cbfunc's response dictionary didn't specify 0.9, 1.0, or 1.1, + # it's an error. + raise _httpserver_ServerError("httpserver: Callback function gave " + \ + "invalid HTTP version") + + # Clean up open file handles: + messagestream.close() + + return (closeconn, extradata)