Files @ 870c5c6c334f
Branch filter:

Location: Morevna/src/client.py - annotation

Laman
reacquiring old locks
ee936b917440
34f4027c1bd6
cd2ba192bf12
34f4027c1bd6
b052f27e1cbc
34f4027c1bd6
34f4027c1bd6
ccbe369ce439
41ea9614ce8c
4b88aca70fbc
6a0ab4fe9f5e
5c80ca07f00c
bb3b53ee15d6
34f4027c1bd6
34f4027c1bd6
362cff560740
362cff560740
362cff560740
75e070b6b447
5c80ca07f00c
75e070b6b447
5c80ca07f00c
cd2ba192bf12
6a0ab4fe9f5e
6a0ab4fe9f5e
6a0ab4fe9f5e
6c8e994fd906
6a0ab4fe9f5e
02ea4fed2520
7e101f53704e
5c80ca07f00c
5c80ca07f00c
02ea4fed2520
5c80ca07f00c
02ea4fed2520
02ea4fed2520
02ea4fed2520
5c80ca07f00c
02ea4fed2520
34f4027c1bd6
6a0ab4fe9f5e
02ea4fed2520
34f4027c1bd6
34f4027c1bd6
75e070b6b447
6a0ab4fe9f5e
6c8e994fd906
6a0ab4fe9f5e
cd2ba192bf12
5c80ca07f00c
6a0ab4fe9f5e
5c80ca07f00c
5c80ca07f00c
6a0ab4fe9f5e
5c80ca07f00c
5c80ca07f00c
5c80ca07f00c
6a0ab4fe9f5e
6a0ab4fe9f5e
6a0ab4fe9f5e
6a0ab4fe9f5e
ccbe369ce439
362cff560740
6a0ab4fe9f5e
6a0ab4fe9f5e
ccbe369ce439
362cff560740
2e5828ec7d49
2e5828ec7d49
2e5828ec7d49
2e5828ec7d49
6a0ab4fe9f5e
b73a5d69a11b
6a0ab4fe9f5e
6a0ab4fe9f5e
6a0ab4fe9f5e
34f4027c1bd6
6c8e994fd906
6c8e994fd906
6a0ab4fe9f5e
6a0ab4fe9f5e
5c80ca07f00c
6a0ab4fe9f5e
6a0ab4fe9f5e
6a0ab4fe9f5e
6a0ab4fe9f5e
34f4027c1bd6
6a0ab4fe9f5e
6a0ab4fe9f5e
6a0ab4fe9f5e
6a0ab4fe9f5e
34f4027c1bd6
5c80ca07f00c
5c80ca07f00c
5c80ca07f00c
6a0ab4fe9f5e
ee936b917440
6a0ab4fe9f5e
2e5828ec7d49
2e5828ec7d49
5813971dbecc
6a0ab4fe9f5e
6a0ab4fe9f5e
6a0ab4fe9f5e
4b88aca70fbc
34f4027c1bd6
6a0ab4fe9f5e
095908159393
095908159393
6a0ab4fe9f5e
34f4027c1bd6
6a0ab4fe9f5e
6a0ab4fe9f5e
6a0ab4fe9f5e
34f4027c1bd6
b052f27e1cbc
6a0ab4fe9f5e
34f4027c1bd6
6a0ab4fe9f5e
5c80ca07f00c
5c80ca07f00c
6a0ab4fe9f5e
6a0ab4fe9f5e
6a0ab4fe9f5e
6a0ab4fe9f5e
34f4027c1bd6
a4bfaabaeabb
87a9ced6e7b5
5c80ca07f00c
87a9ced6e7b5
87a9ced6e7b5
6a0ab4fe9f5e
4b88aca70fbc
34f4027c1bd6
6a0ab4fe9f5e
34f4027c1bd6
dad65188b1a0
6a0ab4fe9f5e
3d0876534e40
6a0ab4fe9f5e
6a0ab4fe9f5e
0ad71a952f92
0ad71a952f92
0ad71a952f92
6a0ab4fe9f5e
6a0ab4fe9f5e
0ad71a952f92
6a0ab4fe9f5e
6a0ab4fe9f5e
3d0876534e40
3d0876534e40
6a0ab4fe9f5e
3d0876534e40
83dc5e1e183e
6a0ab4fe9f5e
6a0ab4fe9f5e
6a0ab4fe9f5e
6a0ab4fe9f5e
6a0ab4fe9f5e
5c80ca07f00c
6a0ab4fe9f5e
6a0ab4fe9f5e
8bb6a904d50b
83dc5e1e183e
6a0ab4fe9f5e
6a0ab4fe9f5e
3d0876534e40
83dc5e1e183e
870c5c6c334f
870c5c6c334f
870c5c6c334f
870c5c6c334f
83dc5e1e183e
bb3b53ee15d6
6a0ab4fe9f5e
bb3b53ee15d6
3d0876534e40
3d0876534e40
6a0ab4fe9f5e
3d0876534e40
3d0876534e40
6a0ab4fe9f5e
362cff560740
6c8e994fd906
6a0ab4fe9f5e
6a0ab4fe9f5e
8bb6a904d50b
6a0ab4fe9f5e
6a0ab4fe9f5e
6a0ab4fe9f5e
6a0ab4fe9f5e
6a0ab4fe9f5e
6a0ab4fe9f5e
6a0ab4fe9f5e
87a9ced6e7b5
6a0ab4fe9f5e
5c80ca07f00c
import collections
import socket
import ssl
import logging as log
from datetime import datetime

import config as conf
import status
import stats
from util import Progress
from hashtree import HashTree, hash_block
from netnode import BaseConnection, NetNode, FailedConnection, LockedException, IncompatibleException
from datafile import DataFile


class DeniedConnection(Exception): pass


class Connection(BaseConnection):
	def __init__(self, host, port):
		super().__init__()
		sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

		ssl_context = ssl.create_default_context(cafile=conf.peers)
		ssl_context.check_hostname = False
		ssl_context.load_cert_chain(conf.certfile, conf.keyfile)

		self._socket = ssl_context.wrap_socket(sock)

		try:
			self._socket.connect((host, port))
		except (ConnectionRefusedError, OSError) as e:
			log.exception(e)
			print("Couldn't connect to {0}:{1}".format(host, port))
			raise FailedConnection()
		except ssl.SSLError as e:
			log.exception(e)
			print("Error creating SSL connection to {0}:{1}".format(host, port))
			raise FailedConnection()

		self.create_networkers()
		print("Connected to {0}".format(host))


class Client(NetNode):
	def __init__(self, filename, tree_file=""):
		print(datetime.now(), "initializing...")
		super().__init__(filename, tree_file)

	def init(self, action):
		json_data = {
			"command": "init",
			"blockSize": self._tree.BLOCK_SIZE,
			"blockCount": self._tree.leaf_count,
			"version": conf.version,
			"action": action
		}
		self._outcoming.write_msg(json_data)
		json_data, bin_data = self._incoming.read_msg()
		if json_data["command"] == "deny":
			if json_data["status"] == status.incompatible.version:
				raise DeniedConnection("Incompatible client version. Consider upgrading it.")
			raise DeniedConnection()
		assert json_data["command"] == "init"
		if json_data["version"] < conf.lowest_compatible:
			raise IncompatibleException("Incompatible server version. Consider upgrading it.")

	## Asks server for node hashes to determine which are to be transferred.
	#
	# Uses a binary HashTree, where item at k is hash of items at 2k+1, 2k+2.
	#
	# Requests nodes in order of a batch DFS. Needs a stack of size O(tree_depth*batch_size). Nodes in each tree level are accessed in order.
	def negotiate(self):
		local_tree = self._tree
		blocks_to_transfer = []
		node_stack = collections.deque([0])  # root

		# determine which blocks to send
		print(datetime.now(), "negotiating:")
		progress = Progress(local_tree.leaf_count)
		while len(node_stack) > 0:
			indices = []
			for i in range(conf.batch_size.hash):
				indices.append(node_stack.pop())
				if len(node_stack) == 0: break
			self._outcoming.write_msg({"command": "req", "index": indices, "dataType": "hash"})

			json_data, bin_data = self._incoming.read_msg()
			assert json_data["index"] == indices
			assert json_data["dataType"] == "hash"
			stats.log_exchanged_node(len(indices))

			frontier = []
			for (j, i) in enumerate(indices):
				(j1, j2) = [HashTree.HASH_LEN*ji for ji in (j, j+1)]
				if local_tree.store[i] != bin_data[j1:j2]:
					# ie. 0-6 nodes, 7-14 leaves. 2*6+2<15
					if 2*i+2 < len(local_tree.store):  # inner node
						frontier.append(2*i+1)
						frontier.append(2*i+2)
					else:
						blocks_to_transfer.append(i-local_tree.leaf_start)  # leaf
						progress.p(i-local_tree.leaf_start)
			node_stack.extend(reversed(frontier))
		progress.done()

		size = stats.format_bytes(len(blocks_to_transfer)*self._tree.BLOCK_SIZE)
		print(datetime.now(), "{0} to transfer".format(size))

		return blocks_to_transfer

	def send_data(self, blocks_to_transfer):
		log.info(blocks_to_transfer)
		data_file = DataFile.open(self._filename)

		print(datetime.now(), "sending data:")
		progress = Progress(len(blocks_to_transfer))

		for k in range(0, len(blocks_to_transfer), conf.batch_size.data):
			indices = []
			blocks = []
			for j in range(conf.batch_size.data):
				if k+j >= len(blocks_to_transfer): break
				i = blocks_to_transfer[k+j]
				block = data_file.read_from(i)

				indices.append(i)
				blocks.append(block)
				log.info("block #{0}: {1}...{2}".format(i, block[:5], block[-5:]))

				progress.p(k+j)
			if indices: self._send_data(indices, blocks)
		progress.done()

		self._outcoming.write_msg({"command": "end", "action": "push"})

		log.info("closing session...")
		data_file.close()

	def pull_data(self, blocks_to_transfer, ignore_lock=False):
		if not ignore_lock:
			try:
				self._lock()
			except LockedException:
				print(
					"The file is locked. Either (a) there's another pull going on (then wait or kill it), or (b) a previous pull ended prematurely and the file is probably corrupt (then repeat pull with -f for force).")
				return
		log.info(blocks_to_transfer)
		data_file = DataFile.open(self._filename, mode="rb+")

		print(datetime.now(), "receiving data:")
		progress = Progress(len(blocks_to_transfer))

		last_flushed = datetime.now().timestamp()
		for k in range(0, len(blocks_to_transfer), conf.batch_size.data):
			indices = blocks_to_transfer[k:k+conf.batch_size.data]
			self._outcoming.write_msg({"command": "req", "index": indices, "dataType": "data"})
			json_data, bin_data = self._incoming.read_msg()
			assert json_data["command"]=="send" and json_data["index"]==indices and json_data["dataType"]=="data", json_data
			for (j, i) in enumerate(indices):
				block = bin_data[j*HashTree.BLOCK_SIZE:(j+1)*HashTree.BLOCK_SIZE]
				data_file.write_at(i, block)

				log.info("block #{0}: {1}...{2}".format(i, block[:5], block[-5:]))
				if self._tree_file:
					self._new_leaves[i+self._tree.leaf_start] = hash_block(block)

					t = datetime.now().timestamp()
					if t-last_flushed >= 60:
						if self._tree_file:
							self._update_tree()
						self._refresh_lock()
						last_flushed = t

				stats.log_transferred_block()
				progress.p(k+j)
		progress.done()

		self._outcoming.write_msg({"command": "end"})

		log.info("closing session...")
		data_file.close()
		self._unlock()

		if self._tree_file:
			self._update_tree()

	def _send_data(self, indices, blocks):
		json_data = {"command": "send", "index": indices, "dataType": "data"}
		bin_data = b"".join(blocks)
		self._outcoming.write_msg(json_data, bin_data)
		stats.log_transferred_block(len(indices))
		json_data, bin_data = self._incoming.read_msg()
		assert json_data["command"]=="ack" and json_data["index"]==indices, json_data

	def set_connection(self, connection):
		(self._incoming, self._outcoming) = connection