Changeset - 68719ba74601
[Not reviewed]
default
0 2 1
Laman - 6 years ago 2019-03-22 16:27:34

position detection, refactored out color quantization
3 files changed with 119 insertions and 53 deletions:
0 comments (0 inline, 0 general)
exp/board_detect.py
Show inline comments
 
@@ -9,12 +9,10 @@ import logging as log
 

	
 
import cv2 as cv
 
import numpy as np
 
import scipy.cluster
 
import scipy.ndimage
 
import scipy.signal
 

	
 
from geometry import Line
 
from ransac import DiagonalRansac
 
from quantization import kmeans,QuantizedImage
 
from annotations import DataFile,computeBoundingBox
 
from hough import show,prepareEdgeImg,HoughTransform
 
from analyzer.epoint import EPoint
 
@@ -24,28 +22,6 @@ random.seed(361)
 
log.basicConfig(level=log.DEBUG,format="%(message)s")
 

	
 

	
 
def kmeans(img):
 
	arr=np.reshape(img,(-1,3)).astype(np.float)
 
	wood=[193,165,116]
 
	(centers,distortion)=scipy.cluster.vq.kmeans(arr,3)
 
	log.debug("k-means centers: %s",centers)
 
	(black,empty,white)=sorted(centers,key=sum)
 
	if np.linalg.norm(black)>np.linalg.norm(black-wood):
 
		black=None
 
	if np.linalg.norm(white-[255,255,255])>np.linalg.norm(white-wood):
 
		white=None
 
	log.debug("black, white: %s, %s",black,white)
 
	return (black,white,centers)
 

	
 

	
 
def quantize(img,centers):
 
	origShape=img.shape
 
	data=np.reshape(img,(-1,3))
 
	(keys,dists)=scipy.cluster.vq.vq(data,centers)
 
	pixels=np.array([centers[k] for k in keys],dtype=np.uint8).reshape(origShape)
 
	return pixels
 

	
 

	
 
def filterStones(contours,bwImg,stoneDims):
 
	contourImg=cv.cvtColor(bwImg,cv.COLOR_GRAY2BGR)
 
	res=[]
 
@@ -85,6 +61,8 @@ class BoardDetector:
 
		self._rectiMatrix=None
 
		self._inverseMatrix=None
 

	
 
		self.grid=None
 

	
 
	def __call__(self,img,filename):
 
		# approximately detect the board
 
		(h,w)=img.shape[:2]
 
@@ -97,16 +75,16 @@ class BoardDetector:
 
		self._rect=rect
 

	
 
		# quantize colors
 
		(black,white,colors)=self._sampleColors(rect)
 
		quantized=quantize(rect,colors)
 
		quantized=QuantizedImage(rect)
 
		gray=cv.cvtColor(rect,cv.COLOR_BGR2GRAY)
 
		edges=cv.Canny(gray,70,130)
 
		show(edges,"edges")
 
		quantized=quantized & (255-cv.cvtColor(edges,cv.COLOR_GRAY2BGR))
 
		show(quantized,"quantized, edges separated")
 
		edgeMask=(255-edges)
 
		quantizedImg=quantized.img & cv.cvtColor(edgeMask,cv.COLOR_GRAY2BGR)
 
		show(quantizedImg,"quantized, edges separated")
 

	
 
		# detect black and white stones
 
		stones=self._detectStones(quantized,black,white)
 
		stones=self._detectStones(quantized,edgeMask)
 

	
 
		# detect lines from edges and stones
 
		edgeImg=prepareEdgeImg(rect)
 
@@ -124,13 +102,15 @@ class BoardDetector:
 
			self._drawLine(linesImg,line)
 
		show(linesImg,"detected lines")
 

	
 
		# # rectify the image
 
		# rectify the image
 
		matrix=self._computeTransformationMatrix(lines[0][0],lines[0][-1],lines[1][0],lines[1][-1])
 
		transformed=cv.warpPerspective(rect,matrix,(self._rectW,self._rectH))
 
		rectiLines=[[line.transform(matrix) for line in pack] for pack in lines]
 
		quantized.transform(matrix)
 

	
 
		# determine precise board edges
 
		self._detectBestGrid(rectiLines,linesImg)
 
		self.grid=self._detectGrid(rectiLines,linesImg)
 

	
 
		self.detectPosition(quantized)
 

	
 
	def _detectRough(self,img,filename):
 
		corners=self._annotations[filename][0]
 
@@ -143,9 +123,9 @@ class BoardDetector:
 
		minirect=rect[h//4:3*h//4, w//4:3*w//4]
 
		return kmeans(minirect)
 

	
 
	def _detectStones(self,quantized,black,white):
 
		(h,w)=quantized.shape[:2]
 
		mask=self._maskStones(quantized,black,white)
 
	def _detectStones(self,quantized,edgeMask):
 
		(h,w)=quantized.img.shape[:2]
 
		mask=self._maskStones(quantized,edgeMask)
 
		stoneDims=(w/19,h/19)
 
		log.debug("stone dims: %s - %s",tuple(x/2 for x in stoneDims),stoneDims)
 

	
 
@@ -154,22 +134,14 @@ class BoardDetector:
 

	
 
		return stoneLocs
 

	
 
	def _maskStones(self,quantized,black,white):
 
		unit=np.array([1,1,1],dtype=np.uint8)
 
		if black is not None:
 
			maskB=cv.inRange(quantized,black-unit,black+unit)
 
	def _maskStones(self,quantized,edgeMask):
 
		distTransform=cv.distanceTransform(quantized.maskB&edgeMask,cv.DIST_L2,5)
 
		maskB=cv.inRange(distTransform,6,20)
 
		show(maskB,"black areas")
 

	
 
			distTransform=cv.distanceTransform(maskB,cv.DIST_L2,5)
 
			maskB=cv.inRange(distTransform,6,20)
 
			show(maskB,"black areas")
 
		else: maskB=np.zeros(quantized.shape[:2],dtype=np.uint8)
 

	
 
		if white is not None:
 
			maskW=cv.inRange(quantized,white-unit,white+unit)
 
			distTransform=cv.distanceTransform(maskW,cv.DIST_L2,5)
 
			maskW=cv.inRange(distTransform,6,20)
 
			show(maskW,"white areas")
 
		else: maskW=np.zeros(quantized.shape[:2],dtype=np.uint8)
 
		distTransform=cv.distanceTransform(quantized.maskW&edgeMask,cv.DIST_L2,5)
 
		maskW=cv.inRange(distTransform,6,20)
 
		show(maskW,"white areas")
 

	
 
		stones=cv.bitwise_or(maskB,maskW)
 
		show(stones,"black and white areas")
 
@@ -200,7 +172,7 @@ class BoardDetector:
 
		self._inverseMatrix=np.linalg.inv(matrix)
 
		return matrix
 

	
 
	def _detectBestGrid(self,lines,img):
 
	def _detectGrid(self,lines,img):
 
		intersections=[]
 
		for p in lines[0]:
 
			for q in lines[1]:
 
@@ -215,21 +187,26 @@ class BoardDetector:
 

	
 
		best=(0,None)
 
		transformedImg=cv.warpPerspective(img,self._rectiMatrix,(self._rectW,self._rectH))
 
		explored=[0,0,0]
 

	
 
		for e in diagonals:
 
			for f in diagonals:
 
				explored[0]+=1
 
				center=e.intersect(f)
 
				if not center: continue
 
				if center.x<0 or center.x>self._rectW or center.y<0 or center.y>self._rectH: continue
 
				for line in itertools.chain(*lines):
 
					for i in range(1,10): # 10th is useless, 11-19 are symmetrical to 1-9
 
						explored[1]+=1
 
						grid=self._constructGrid(e,f,line,i)
 
						if not grid: continue
 
						explored[2]+=1
 
						score=self._scoreGrid(grid)
 
						if score>best[0]:
 
							best=(score,grid)
 
							log.debug("new best grid: %s",score)
 
							self._showGrid(transformedImg,grid)
 
		log.debug("diagonal pairs: %s, explored grids: %s, scored grids: %s",*explored)
 
		return best[1]
 

	
 
	def _constructGrid(self,e,f,line,i):
 
@@ -260,8 +237,32 @@ class BoardDetector:
 
		return (rows,cols)
 

	
 
	def _scoreGrid(self,lines):
 
		(p,q,r,s)=(lines[0][0],lines[0][-1],lines[-1][0],lines[-1][-1])
 
		corners=(p.intersect(r),p.intersect(s),q.intersect(r),q.intersect(s))
 
		origCorners=[c.transform(self._inverseMatrix) for c in corners]
 
		# must fit
 
		if not all(0<=c.x<self._rectW and 0<=c.y<self._rectH for c in origCorners):
 
			return 0
 
		return sum(self._hough.scoreLine(p.transform(self._inverseMatrix)) for p in itertools.chain(*lines))
 

	
 
	def detectPosition(self,img):
 
		(rows,cols)=self.grid
 
		intersections=[[row.intersect(col) for col in cols] for row in rows]
 
		position=[[self._detectStoneAt(img,point) for point in row] for row in intersections]
 
		log.debug("detected position:\n%s","\n".join("".join(row) for row in position))
 
		return position
 

	
 
	def _detectStoneAt(self,img,intersection):
 
		(height,width)=img.img.shape[:2]
 
		(x,y)=map(int,intersection)
 
		scores=[0,0,0]
 
		for xi in range(x-2,x+3):
 
			if xi<0 or xi>=width: continue
 
			for yi in range(y-2,y+3):
 
				if yi<0 or yi>=height: continue
 
				scores[img.get(xi,yi)]+=1
 
		return sorted(list(zip(scores,"XO.")))[-1][1]
 

	
 
	def _drawLine(self,img,line,color=None):
 
		if not color: color=[0,255,0]
 
		(h,w)=img.shape[:2]
exp/hough.py
Show inline comments
 
@@ -98,7 +98,7 @@ class HoughTransform:
 
		(x0,y0)=self._center
 
		(dx,dy)=(x-x0,y0-y)
 
		d=dx*math.cos(alphaRad)+dy*math.sin(alphaRad)
 
		return round(d)
 
		return int(d)
 

	
 
	def _detectLines(self):
 
		bag=LineBag()
exp/quantization.py
Show inline comments
 
new file 100644
 
import logging as log
 

	
 
import numpy as np
 
import scipy.cluster
 
import cv2 as cv
 

	
 

	
 
def kmeans(img):
 
	arr=np.reshape(img,(-1,3)).astype(np.float)
 
	wood=[193,165,116]
 
	(centers,distortion)=scipy.cluster.vq.kmeans(arr,3)
 
	log.debug("k-means centers: %s",centers)
 
	(black,empty,white)=sorted(centers,key=sum)
 
	if np.linalg.norm(black)>np.linalg.norm(black-wood):
 
		black=None
 
	if np.linalg.norm(white-[255,255,255])>np.linalg.norm(white-wood):
 
		white=None
 
	log.debug("black, white: %s, %s",black,white)
 
	return (black,white,centers)
 

	
 

	
 
class QuantizedImage:
 
	BLACK=0
 
	WHITE=1
 
	EMPTY=2
 

	
 
	def __init__(self,img):
 
		self.img=self._quantize(img)
 
		self._mask()
 

	
 
	def transform(self,matrix):
 
		(h,w)=self.img.shape[:2]
 
		self.img=cv.warpPerspective(self.img,matrix,(w,h))
 
		self.maskB=cv.warpPerspective(self.maskB,matrix,(w,h))
 
		self.maskW=cv.warpPerspective(self.maskW,matrix,(w,h))
 

	
 
	def get(self,x,y):
 
		if self.maskB[y,x]: return self.BLACK
 
		elif self.maskW[y,x]: return self.WHITE
 
		else: return self.EMPTY
 

	
 
	def _quantize(self,img):
 
		(self._black,self._white,colors)=self._sampleColors(img)
 
		origShape=img.shape
 
		data=np.reshape(img,(-1,3))
 
		(keys,dists)=scipy.cluster.vq.vq(data,colors)
 
		pixels=np.array([colors[k] for k in keys],dtype=np.uint8).reshape(origShape)
 
		return pixels
 

	
 
	def _sampleColors(self,rect):
 
		(h,w)=rect.shape[:2]
 
		minirect=rect[h//4:3*h//4, w//4:3*w//4]
 
		return kmeans(minirect)
 

	
 
	def _mask(self):
 
		unit=np.array([1,1,1],dtype=np.uint8)
 
		if self._black is not None:
 
			self.maskB=cv.inRange(self.img,self._black-unit,self._black+unit)
 
		else:
 
			self.maskB=np.zeros(self.img.shape[:2],dtype=np.uint8)
 

	
 
		if self._white is not None:
 
			self.maskW=cv.inRange(self.img,self._white-unit,self._white+unit)
 
		else:
 
			self.maskW=np.zeros(self.img.shape[:2],dtype=np.uint8)
0 comments (0 inline, 0 general)