Improved shutdown handling and table saving
This commit is contained in:
parent
8f4cf433ba
commit
af538e0489
@ -495,6 +495,7 @@ class TCPServerInterface(Interface):
|
|||||||
def __str__(self):
|
def __str__(self):
|
||||||
return "TCPServerInterface["+self.name+"/"+self.bind_ip+":"+str(self.bind_port)+"]"
|
return "TCPServerInterface["+self.name+"/"+self.bind_ip+":"+str(self.bind_port)+"]"
|
||||||
|
|
||||||
|
|
||||||
class TCPInterfaceHandler(socketserver.BaseRequestHandler):
|
class TCPInterfaceHandler(socketserver.BaseRequestHandler):
|
||||||
def __init__(self, callback, *args, **keys):
|
def __init__(self, callback, *args, **keys):
|
||||||
self.callback = callback
|
self.callback = callback
|
||||||
|
@ -1984,10 +1984,27 @@ class Transport:
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def detach_interfaces():
|
def detach_interfaces():
|
||||||
for interface in Transport.interfaces:
|
detachable_interfaces = []
|
||||||
interface.detach()
|
|
||||||
|
|
||||||
|
for interface in Transport.interfaces:
|
||||||
|
# Currently no rules are being applied
|
||||||
|
# here, and all interfaces will be sent
|
||||||
|
# the detach call on RNS teardown.
|
||||||
|
if True:
|
||||||
|
detachable_interfaces.append(interface)
|
||||||
|
else:
|
||||||
|
pass
|
||||||
|
|
||||||
for interface in Transport.local_client_interfaces:
|
for interface in Transport.local_client_interfaces:
|
||||||
|
# Currently no rules are being applied
|
||||||
|
# here, and all interfaces will be sent
|
||||||
|
# the detach call on RNS teardown.
|
||||||
|
if True:
|
||||||
|
detachable_interfaces.append(interface)
|
||||||
|
else:
|
||||||
|
pass
|
||||||
|
|
||||||
|
for interface in detachable_interfaces:
|
||||||
interface.detach()
|
interface.detach()
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@ -2035,8 +2052,10 @@ class Transport:
|
|||||||
return announce_emitted
|
return announce_emitted
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def exit_handler():
|
def save_packet_hashlist():
|
||||||
try:
|
try:
|
||||||
|
save_start = time.time()
|
||||||
|
|
||||||
if not RNS.Reticulum.transport_enabled():
|
if not RNS.Reticulum.transport_enabled():
|
||||||
Transport.packet_hashlist = []
|
Transport.packet_hashlist = []
|
||||||
else:
|
else:
|
||||||
@ -2047,10 +2066,21 @@ class Transport:
|
|||||||
file.write(umsgpack.packb(Transport.packet_hashlist))
|
file.write(umsgpack.packb(Transport.packet_hashlist))
|
||||||
file.close()
|
file.close()
|
||||||
|
|
||||||
|
save_time = time.time() - save_start
|
||||||
|
if save_time < 1:
|
||||||
|
time_str = str(round(save_time*1000,2))+"ms"
|
||||||
|
else:
|
||||||
|
time_str = str(round(save_time,2))+"s"
|
||||||
|
RNS.log("Saved packet hashlist in "+time_str, RNS.LOG_VERBOSE)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
RNS.log("Could not save packet hashlist to storage, the contained exception was: "+str(e), RNS.LOG_ERROR)
|
RNS.log("Could not save packet hashlist to storage, the contained exception was: "+str(e), RNS.LOG_ERROR)
|
||||||
|
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def save_path_table():
|
||||||
if not Transport.owner.is_connected_to_shared_instance:
|
if not Transport.owner.is_connected_to_shared_instance:
|
||||||
|
save_start = time.time()
|
||||||
RNS.log("Saving path table to storage...", RNS.LOG_VERBOSE)
|
RNS.log("Saving path table to storage...", RNS.LOG_VERBOSE)
|
||||||
try:
|
try:
|
||||||
serialised_destinations = []
|
serialised_destinations = []
|
||||||
@ -2091,10 +2121,21 @@ class Transport:
|
|||||||
file = open(destination_table_path, "wb")
|
file = open(destination_table_path, "wb")
|
||||||
file.write(umsgpack.packb(serialised_destinations))
|
file.write(umsgpack.packb(serialised_destinations))
|
||||||
file.close()
|
file.close()
|
||||||
RNS.log("Done saving "+str(len(serialised_destinations))+" path table entries to storage", RNS.LOG_VERBOSE)
|
|
||||||
|
save_time = time.time() - save_start
|
||||||
|
if save_time < 1:
|
||||||
|
time_str = str(round(save_time*1000,2))+"ms"
|
||||||
|
else:
|
||||||
|
time_str = str(round(save_time,2))+"s"
|
||||||
|
RNS.log("Saved "+str(len(serialised_destinations))+" path table entries in "+time_str, RNS.LOG_VERBOSE)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
RNS.log("Could not save path table to storage, the contained exception was: "+str(e), RNS.LOG_ERROR)
|
RNS.log("Could not save path table to storage, the contained exception was: "+str(e), RNS.LOG_ERROR)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def save_tunnel_table():
|
||||||
|
if not Transport.owner.is_connected_to_shared_instance:
|
||||||
|
save_start = time.time()
|
||||||
RNS.log("Saving tunnel table to storage...", RNS.LOG_VERBOSE)
|
RNS.log("Saving tunnel table to storage...", RNS.LOG_VERBOSE)
|
||||||
try:
|
try:
|
||||||
serialised_tunnels = []
|
serialised_tunnels = []
|
||||||
@ -2143,6 +2184,18 @@ class Transport:
|
|||||||
file = open(tunnels_path, "wb")
|
file = open(tunnels_path, "wb")
|
||||||
file.write(umsgpack.packb(serialised_tunnels))
|
file.write(umsgpack.packb(serialised_tunnels))
|
||||||
file.close()
|
file.close()
|
||||||
RNS.log("Done saving "+str(len(serialised_tunnels))+" tunnel table entries to storage", RNS.LOG_VERBOSE)
|
|
||||||
|
save_time = time.time() - save_start
|
||||||
|
if save_time < 1:
|
||||||
|
time_str = str(round(save_time*1000,2))+"ms"
|
||||||
|
else:
|
||||||
|
time_str = str(round(save_time,2))+"s"
|
||||||
|
RNS.log("Saved "+str(len(serialised_tunnels))+" tunnel table entries in "+time_str, RNS.LOG_VERBOSE)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
RNS.log("Could not save tunnel table to storage, the contained exception was: "+str(e), RNS.LOG_ERROR)
|
RNS.log("Could not save tunnel table to storage, the contained exception was: "+str(e), RNS.LOG_ERROR)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def exit_handler():
|
||||||
|
Transport.save_packet_hashlist()
|
||||||
|
Transport.save_path_table()
|
||||||
|
Transport.save_tunnel_table()
|
Loading…
Reference in New Issue
Block a user