diff --git a/exp/kerokero/prepare_data.py b/exp/kerokero/prepare_data.py
--- a/exp/kerokero/prepare_data.py
+++ b/exp/kerokero/prepare_data.py
@@ -20,7 +20,7 @@ class Sample:
 	SIDE=224
 
 	def __init__(self,img,grid):
-		""":param img: an image as a 3D np.uint8, channels-last
+		""":param img: a greyscale image as a 2D np.uint8
 		:param grid: iterable of 4 EPoints, ie. Corners"""
 		self.img=img
 		self.grid=grid
@@ -38,7 +38,7 @@ class Sample:
 			m=np.matmul(mi,m)
 		m=np.matmul(self._computeCrop(m),m)
 		img=cv.warpPerspective(self.img,m,(self.SIDE,self.SIDE))
-		img=cv.cvtColor(img,cv.COLOR_BGR2RGB)
+		img=np.float32(img)/128-1
 		grid=Corners(c.transform(m) for c in self.grid)
 		grid=list(map(lambda p: 2*p/self.SIDE-EPoint(1,1), grid))
 		return (img,grid,list(itertools.chain.from_iterable(grid)))
@@ -59,7 +59,7 @@ class Sample:
 		return np.matmul(scale,t2)
 
 	def show(self):
-		img=np.copy(self.img)
+		img=cv.cvtColor(self.img,cv.COLOR_GRAY2BGR)
 		for c in self.grid:
 			cv.circle(img,(int(c.x),int(c.y)),3,[0,255,0],-1)
 		show(img)
@@ -80,15 +80,16 @@ def traverseDirs(root):
 def harvestDir(path):
 	annotations=DataFile(os.path.join(path,"annotations.json.gz"))
 	imgFilter=lambda f: f.is_file() and re.match(r".*\.(jpg|jpeg|png|gif)$", f.name.lower())
-	files=sorted(filter(imgFilter,os.scandir(path)),key=lambda f: f.name)[::3]
+	files=sorted(filter(imgFilter,os.scandir(path)),key=lambda f: f.name)
 	boards=annotations["."]
 	for f in files:
 		img=cv.imread(f.path)
+		img=cv.cvtColor(img,cv.COLOR_BGR2GRAY)
 		for b in boards:
 			sample=Sample(img,b.grid)
 			# sample.show()
 			(transformedImg,transformedGrid,label)=sample.transform()
-			# Sample(transformedImg,map(lambda c: (c+EPoint(1,1))*Sample.SIDE/2,transformedGrid)).show()
+			# Sample(np.uint8((transformedImg+1)*128),map(lambda c: (c+EPoint(1,1))*Sample.SIDE/2,transformedGrid)).show()
 			yield (transformedImg,label)
 
 
@@ -108,8 +109,8 @@ def loadDataset(root):
 	labels=[labels[k] for k in keys]
 	m=int(n*trainRatio)
 	return (
-		(images[:m],np.float32(labels[:m])),
-		(images[m:],np.float32(labels[m:]))
+		(np.float32(images[:m]),np.float32(labels[:m])),
+		(np.float32(images[m:]),np.float32(labels[m:]))
 	)
 
 
diff --git a/exp/kerokero/test.py b/exp/kerokero/test.py
--- a/exp/kerokero/test.py
+++ b/exp/kerokero/test.py
@@ -3,7 +3,6 @@ import logging as log
 
 import numpy as np
 from keras.models import load_model
-from keras.applications.inception_v3 import preprocess_input
 
 from prepare_data import loadDataset,Sample
 from analyzer.epoint import EPoint
@@ -25,14 +24,14 @@ with np.load(args.data) as data:
 	testLabels=data["testLabels"]
 log.info("done")
 
-log.info(model.evaluate(preprocess_input(testImages).reshape((-1,224,224,3)),testLabels))
+log.info(model.evaluate(testImages.reshape((-1,224,224,1)),testLabels))
 
 for img in testImages:
-	label=model.predict(preprocess_input(np.reshape(img,(1,224,224,3))))
+	label=model.predict(np.reshape(img,(1,224,224,1)))
 	print(label)
 	points=[]
 	for i in range(4):
 		points.append(EPoint((label[0][i*2]+1)*112,(label[0][i*2+1]+1)*112))
 	corners=Corners(points)
-	sample=Sample(img,corners)
+	sample=Sample(np.uint8((img+1)*128),corners)
 	sample.show()
diff --git a/exp/kerokero/train.py b/exp/kerokero/train.py
--- a/exp/kerokero/train.py
+++ b/exp/kerokero/train.py
@@ -4,11 +4,9 @@ import argparse
 import logging as log
 
 import numpy as np
-from keras.layers import Conv2D,Dropout,Dense,Flatten,MaxPooling2D,GlobalAveragePooling2D,BatchNormalization
-from keras.models import Sequential,load_model,Model
-from keras.optimizers import SGD
+from keras.layers import Conv2D,Dropout,Dense,Flatten,MaxPooling2D,BatchNormalization,GlobalAveragePooling2D
+from keras.models import Sequential,load_model
 from keras.callbacks import TensorBoard
-from keras.applications.inception_v3 import InceptionV3,preprocess_input
 
 import config as cfg
 import ftp
@@ -41,84 +39,51 @@ def createFullyConnected():
 
 def createCNN():
 	model=Sequential()
-	
-	model.add(Conv2D(filters=16,kernel_size=2,padding="same",activation="relu",input_shape=(224,224,1)))
-	model.add(Dropout(0.1))
-	model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2),padding="valid"))
-	model.add(BatchNormalization())
-	
-	model.add(Conv2D(32,(5,5),activation="relu"))
-	model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2),padding="valid"))
-	model.add(Dropout(0.2))
-	model.add(BatchNormalization())
-	
-	model.add(Conv2D(64,(5,5),activation="relu"))
-	model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2),padding="valid"))
-	model.add(BatchNormalization())
-	
-	model.add(Conv2D(128,(3,3),activation="relu"))
-	model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2),padding="valid"))
-	model.add(Dropout(0.4))
-	model.add(BatchNormalization())
-	
-	model.add(Flatten())
-	
+
+	model.add(BatchNormalization(input_shape=(224,224,1)))
+
+	model.add(Conv2D(24,(5,5),border_mode="same",init="he_normal",activation="relu",input_shape=(224,224,1),dim_ordering="tf"))
+	model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2),border_mode="valid"))
+
+	model.add(Conv2D(36,(5,5),activation="relu"))
+	model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2),border_mode="valid"))
+
+	model.add(Conv2D(48,(5,5),activation="relu"))
+	model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2),border_mode="valid"))
+
+	model.add(Conv2D(64,(3,3),activation="relu"))
+	model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2),border_mode="valid"))
+
+	model.add(Conv2D(64,(3,3),activation="relu"))
+
+	model.add(GlobalAveragePooling2D())
+
 	model.add(Dense(500,activation="relu"))
-	model.add(Dropout(0.1))
-	
-	model.add(Dense(128,activation="relu"))
-	model.add(Dropout(0.1))
-	
+	model.add(Dense(90,activation="relu"))
 	model.add(Dense(8))
 
-	model.compile(optimizer='adam',loss='mse',metrics=['mae','accuracy'])
+	model.compile(optimizer="rmsprop",loss="mse",metrics=["mae","accuracy"])
 	return model
 
 
-def createPretrained():
-	base=InceptionV3(weights="imagenet",include_top=False,input_shape=(224,224,3))
-
-	x=base.output
-	x=GlobalAveragePooling2D()(x)
-	x=Dense(1024,activation="relu")(x)
-	predictions=Dense(8)(x)
-
-	model=Model(inputs=base.input,outputs=predictions)
-	for layer in base.layers:
-		layer.trainable=False
-
-	model.compile(optimizer='adam',loss='mse',metrics=['mae','accuracy'])
-	return model
-
-
+model=createCNN()
 if args.load_model:
 	model=load_model(args.load_model)
-else:
-	model=createPretrained()
 
 log.info("loading data...")
 with np.load(args.data) as data:
-	trainImages=preprocess_input(data["trainImages"])
+	trainImages=data["trainImages"]
 	trainLabels=data["trainLabels"]
-	testImages=preprocess_input(data["testImages"])
+	testImages=data["testImages"]
 	testLabels=data["testLabels"]
 log.info("done")
 
-tensorboard = TensorBoard(log_dir=os.path.join(args.log_dir,"{}".format(time())))
-
-if not args.load_model:
-	model.fit(trainImages.reshape((-1,224,224,3)),trainLabels,epochs=10,batch_size=128,validation_split=0.2,callbacks=[tensorboard])
-for layer in model.layers[:249]:
-	layer.trainable = False
-for layer in model.layers[249:]:
-	layer.trainable = True
-model.compile(optimizer=SGD(lr=0.0001,momentum=0.9),loss='mse')
-
+tensorboard = TensorBoard(log_dir=os.path.join(cfg.thisDir,"../logs","{}".format(time())))
 BIG_STEP=20
 for i in range(args.initial_epoch//BIG_STEP,args.epochs//BIG_STEP):
-	model.fit(trainImages.reshape((-1,224,224,3)),trainLabels,epochs=(i+1)*BIG_STEP,initial_epoch=i*BIG_STEP,batch_size=128,validation_split=0.2,callbacks=[tensorboard])
+	model.fit(trainImages.reshape((-1,224,224,1)),trainLabels,epochs=(i+1)*BIG_STEP,initial_epoch=i*BIG_STEP,batch_size=20,validation_split=0.2,callbacks=[tensorboard])
 	path=args.save_model.format((i+1)*BIG_STEP)
 	log.info("saving model...")
 	model.save(path)
 	# ftp.push(path)
-log.info(model.evaluate(testImages.reshape((-1,224,224,3)),testLabels))
+log.info(model.evaluate(testImages.reshape((-1,224,224,1)),testLabels))