Files
@ 870c5c6c334f
Branch filter:
Location: Morevna/src/client.py
870c5c6c334f
6.4 KiB
text/x-python
reacquiring old locks
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 | import collections
import socket
import ssl
import logging as log
from datetime import datetime
import config as conf
import status
import stats
from util import Progress
from hashtree import HashTree, hash_block
from netnode import BaseConnection, NetNode, FailedConnection, LockedException, IncompatibleException
from datafile import DataFile
class DeniedConnection(Exception): pass
class Connection(BaseConnection):
def __init__(self, host, port):
super().__init__()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssl_context = ssl.create_default_context(cafile=conf.peers)
ssl_context.check_hostname = False
ssl_context.load_cert_chain(conf.certfile, conf.keyfile)
self._socket = ssl_context.wrap_socket(sock)
try:
self._socket.connect((host, port))
except (ConnectionRefusedError, OSError) as e:
log.exception(e)
print("Couldn't connect to {0}:{1}".format(host, port))
raise FailedConnection()
except ssl.SSLError as e:
log.exception(e)
print("Error creating SSL connection to {0}:{1}".format(host, port))
raise FailedConnection()
self.create_networkers()
print("Connected to {0}".format(host))
class Client(NetNode):
def __init__(self, filename, tree_file=""):
print(datetime.now(), "initializing...")
super().__init__(filename, tree_file)
def init(self, action):
json_data = {
"command": "init",
"blockSize": self._tree.BLOCK_SIZE,
"blockCount": self._tree.leaf_count,
"version": conf.version,
"action": action
}
self._outcoming.write_msg(json_data)
json_data, bin_data = self._incoming.read_msg()
if json_data["command"] == "deny":
if json_data["status"] == status.incompatible.version:
raise DeniedConnection("Incompatible client version. Consider upgrading it.")
raise DeniedConnection()
assert json_data["command"] == "init"
if json_data["version"] < conf.lowest_compatible:
raise IncompatibleException("Incompatible server version. Consider upgrading it.")
## Asks server for node hashes to determine which are to be transferred.
#
# Uses a binary HashTree, where item at k is hash of items at 2k+1, 2k+2.
#
# Requests nodes in order of a batch DFS. Needs a stack of size O(tree_depth*batch_size). Nodes in each tree level are accessed in order.
def negotiate(self):
local_tree = self._tree
blocks_to_transfer = []
node_stack = collections.deque([0]) # root
# determine which blocks to send
print(datetime.now(), "negotiating:")
progress = Progress(local_tree.leaf_count)
while len(node_stack) > 0:
indices = []
for i in range(conf.batch_size.hash):
indices.append(node_stack.pop())
if len(node_stack) == 0: break
self._outcoming.write_msg({"command": "req", "index": indices, "dataType": "hash"})
json_data, bin_data = self._incoming.read_msg()
assert json_data["index"] == indices
assert json_data["dataType"] == "hash"
stats.log_exchanged_node(len(indices))
frontier = []
for (j, i) in enumerate(indices):
(j1, j2) = [HashTree.HASH_LEN*ji for ji in (j, j+1)]
if local_tree.store[i] != bin_data[j1:j2]:
# ie. 0-6 nodes, 7-14 leaves. 2*6+2<15
if 2*i+2 < len(local_tree.store): # inner node
frontier.append(2*i+1)
frontier.append(2*i+2)
else:
blocks_to_transfer.append(i-local_tree.leaf_start) # leaf
progress.p(i-local_tree.leaf_start)
node_stack.extend(reversed(frontier))
progress.done()
size = stats.format_bytes(len(blocks_to_transfer)*self._tree.BLOCK_SIZE)
print(datetime.now(), "{0} to transfer".format(size))
return blocks_to_transfer
def send_data(self, blocks_to_transfer):
log.info(blocks_to_transfer)
data_file = DataFile.open(self._filename)
print(datetime.now(), "sending data:")
progress = Progress(len(blocks_to_transfer))
for k in range(0, len(blocks_to_transfer), conf.batch_size.data):
indices = []
blocks = []
for j in range(conf.batch_size.data):
if k+j >= len(blocks_to_transfer): break
i = blocks_to_transfer[k+j]
block = data_file.read_from(i)
indices.append(i)
blocks.append(block)
log.info("block #{0}: {1}...{2}".format(i, block[:5], block[-5:]))
progress.p(k+j)
if indices: self._send_data(indices, blocks)
progress.done()
self._outcoming.write_msg({"command": "end", "action": "push"})
log.info("closing session...")
data_file.close()
def pull_data(self, blocks_to_transfer, ignore_lock=False):
if not ignore_lock:
try:
self._lock()
except LockedException:
print(
"The file is locked. Either (a) there's another pull going on (then wait or kill it), or (b) a previous pull ended prematurely and the file is probably corrupt (then repeat pull with -f for force).")
return
log.info(blocks_to_transfer)
data_file = DataFile.open(self._filename, mode="rb+")
print(datetime.now(), "receiving data:")
progress = Progress(len(blocks_to_transfer))
last_flushed = datetime.now().timestamp()
for k in range(0, len(blocks_to_transfer), conf.batch_size.data):
indices = blocks_to_transfer[k:k+conf.batch_size.data]
self._outcoming.write_msg({"command": "req", "index": indices, "dataType": "data"})
json_data, bin_data = self._incoming.read_msg()
assert json_data["command"]=="send" and json_data["index"]==indices and json_data["dataType"]=="data", json_data
for (j, i) in enumerate(indices):
block = bin_data[j*HashTree.BLOCK_SIZE:(j+1)*HashTree.BLOCK_SIZE]
data_file.write_at(i, block)
log.info("block #{0}: {1}...{2}".format(i, block[:5], block[-5:]))
if self._tree_file:
self._new_leaves[i+self._tree.leaf_start] = hash_block(block)
t = datetime.now().timestamp()
if t-last_flushed >= 60:
if self._tree_file:
self._update_tree()
self._refresh_lock()
last_flushed = t
stats.log_transferred_block()
progress.p(k+j)
progress.done()
self._outcoming.write_msg({"command": "end"})
log.info("closing session...")
data_file.close()
self._unlock()
if self._tree_file:
self._update_tree()
def _send_data(self, indices, blocks):
json_data = {"command": "send", "index": indices, "dataType": "data"}
bin_data = b"".join(blocks)
self._outcoming.write_msg(json_data, bin_data)
stats.log_transferred_block(len(indices))
json_data, bin_data = self._incoming.read_msg()
assert json_data["command"]=="ack" and json_data["index"]==indices, json_data
def set_connection(self, connection):
(self._incoming, self._outcoming) = connection
|