# HG changeset patch
# User Laman
# Date 2019-05-02 23:43:41
# Node ID 6d82b10803791b965058cbb9490cccc62fc1ec3a
# Parent  1874230ef2bdde73a392de1bc417f18c474689d4

neural network grid detection

diff --git a/exp/keras/__init__.py b/exp/keras/__init__.py
new file mode 100644
diff --git a/exp/keras/prepare_data.py b/exp/keras/prepare_data.py
new file mode 100644
--- /dev/null
+++ b/exp/keras/prepare_data.py
@@ -0,0 +1,63 @@
+import os
+import sys
+import re
+import random
+
+import cv2 as cv
+
+sys.path.append("../exp")
+from annotations import DataFile,computeBoundingBox
+
+random.seed(361)
+
+
+def traverseDirs(root):
+	stack=[root]
+	while len(stack)>0:
+		d=stack.pop()
+		contents=sorted(os.scandir(d),key=lambda f: f.name,reverse=True)
+		if any(f.name=="annotations.json.gz" for f in contents):
+			print(d)
+			yield d
+		for f in contents:
+			if f.is_dir(): stack.append(f.path)
+
+
+def harvestDir(path):
+	annotations=DataFile(os.path.join(path,"annotations.json.gz"))
+	imgFilter=lambda f: f.is_file() and re.match(r".*\.(jpg|jpeg|png|gif)$", f.name.lower())
+	files=sorted(filter(imgFilter,os.scandir(path)),key=lambda f: f.name)
+	boards=annotations["."]
+	for f in files:
+		img=cv.imread(f.path)
+		for b in boards:
+			crop(img,b)
+
+
+def crop(img,board):
+	margin=0.2
+	(hi,wi)=img.shape[:2]
+	(x1,y1,x2,y2)=computeBoundingBox(board.board)
+	(wb,hb)=(x2-x1,y2-y1)
+	dx1=min(int(wb*margin),x1)
+	dx2=min(int(wb*margin),wi-x2)
+	dy1=min(int(hb*margin),y1)
+	dy2=min(int(hb*margin),hi-y2)
+	xa=x1-random.randint(0,dx1)
+	xb=x2+random.randint(0,dx2)
+	ya=y1-random.randint(0,dy1)
+	yb=y2+random.randint(0,dy2)
+	show(img[ya:yb,xa:xb])
+	return img[ya:yb,xa:xb]
+
+
+def show(img,filename="x"):
+	cv.imshow(filename,img)
+	cv.waitKey(0)
+	cv.destroyAllWindows()
+
+
+if __name__=="__main__":
+	root=sys.argv[1]
+	for d in traverseDirs(root):
+		harvestDir(d)
diff --git a/exp/keras/train.py b/exp/keras/train.py
new file mode 100644
--- /dev/null
+++ b/exp/keras/train.py
@@ -0,0 +1,19 @@
+from keras.layers import Conv2D,Dropout,Dense,Flatten
+from keras.models import Sequential
+
+
+model = Sequential([
+	Flatten(input_shape=(96,96)),
+	Dense(128, activation="relu"),
+	Dropout(0.1),
+	Dense(64, activation="relu"),
+	Dense(30)
+])
+
+model.compile(
+	optimizer='adam',
+	loss='mse',
+	metrics=['mae','accuracy']
+)
+
+model.fit(X_train,y_train,epochs = 500,batch_size = 128,validation_split = 0.2)
diff --git a/exp/prep.sh b/exp/prep.sh
new file mode 100644
--- /dev/null
+++ b/exp/prep.sh
@@ -0,0 +1,5 @@
+export PYTHONPATH=${PYTHONPATH}:/home/laman/Programy/tensorflow/models/research/:/home/laman/Programy/tensorflow/models/research/slim
+
+python export_inference_graph.py --input_type=image_tensor --pipeline_config_path=../models/model/ssd_mobilenet_v2_coco.config --trained_checkpoint_prefix=../models/model/model.ckpt-360 --output_directory=/tmp/nn3
+
+# ffmpeg -i video.mp4 -r 1/10 out-dir/frame-%04d.jpg