Changeset - 6d82b1080379
[Not reviewed]
default
0 0 4
Laman - 6 years ago 2019-05-02 23:43:41

neural network grid detection
4 files changed with 87 insertions and 0 deletions:
0 comments (0 inline, 0 general)
exp/keras/__init__.py
Show inline comments
 
new file 100644
exp/keras/prepare_data.py
Show inline comments
 
new file 100644
 
import os
 
import sys
 
import re
 
import random
 

	
 
import cv2 as cv
 

	
 
sys.path.append("../exp")
 
from annotations import DataFile,computeBoundingBox
 

	
 
random.seed(361)
 

	
 

	
 
def traverseDirs(root):
 
	stack=[root]
 
	while len(stack)>0:
 
		d=stack.pop()
 
		contents=sorted(os.scandir(d),key=lambda f: f.name,reverse=True)
 
		if any(f.name=="annotations.json.gz" for f in contents):
 
			print(d)
 
			yield d
 
		for f in contents:
 
			if f.is_dir(): stack.append(f.path)
 

	
 

	
 
def harvestDir(path):
 
	annotations=DataFile(os.path.join(path,"annotations.json.gz"))
 
	imgFilter=lambda f: f.is_file() and re.match(r".*\.(jpg|jpeg|png|gif)$", f.name.lower())
 
	files=sorted(filter(imgFilter,os.scandir(path)),key=lambda f: f.name)
 
	boards=annotations["."]
 
	for f in files:
 
		img=cv.imread(f.path)
 
		for b in boards:
 
			crop(img,b)
 

	
 

	
 
def crop(img,board):
 
	margin=0.2
 
	(hi,wi)=img.shape[:2]
 
	(x1,y1,x2,y2)=computeBoundingBox(board.board)
 
	(wb,hb)=(x2-x1,y2-y1)
 
	dx1=min(int(wb*margin),x1)
 
	dx2=min(int(wb*margin),wi-x2)
 
	dy1=min(int(hb*margin),y1)
 
	dy2=min(int(hb*margin),hi-y2)
 
	xa=x1-random.randint(0,dx1)
 
	xb=x2+random.randint(0,dx2)
 
	ya=y1-random.randint(0,dy1)
 
	yb=y2+random.randint(0,dy2)
 
	show(img[ya:yb,xa:xb])
 
	return img[ya:yb,xa:xb]
 

	
 

	
 
def show(img,filename="x"):
 
	cv.imshow(filename,img)
 
	cv.waitKey(0)
 
	cv.destroyAllWindows()
 

	
 

	
 
if __name__=="__main__":
 
	root=sys.argv[1]
 
	for d in traverseDirs(root):
 
		harvestDir(d)
exp/keras/train.py
Show inline comments
 
new file 100644
 
from keras.layers import Conv2D,Dropout,Dense,Flatten
 
from keras.models import Sequential
 

	
 

	
 
model = Sequential([
 
	Flatten(input_shape=(96,96)),
 
	Dense(128, activation="relu"),
 
	Dropout(0.1),
 
	Dense(64, activation="relu"),
 
	Dense(30)
 
])
 

	
 
model.compile(
 
	optimizer='adam',
 
	loss='mse',
 
	metrics=['mae','accuracy']
 
)
 

	
 
model.fit(X_train,y_train,epochs = 500,batch_size = 128,validation_split = 0.2)
exp/prep.sh
Show inline comments
 
new file 100644
 
export PYTHONPATH=${PYTHONPATH}:/home/laman/Programy/tensorflow/models/research/:/home/laman/Programy/tensorflow/models/research/slim
 

	
 
python export_inference_graph.py --input_type=image_tensor --pipeline_config_path=../models/model/ssd_mobilenet_v2_coco.config --trained_checkpoint_prefix=../models/model/model.ckpt-360 --output_directory=/tmp/nn3
 

	
 
# ffmpeg -i video.mp4 -r 1/10 out-dir/frame-%04d.jpg
0 comments (0 inline, 0 general)