Changeset - 79b929f58012
[Not reviewed]
default
0 2 0
Laman - 6 years ago 2019-03-03 00:47:31

image rectification
2 files changed with 13 insertions and 47 deletions:
0 comments (0 inline, 0 general)
exp/board_detect.py
Show inline comments
 
import sys
 

	
 
sys.path.append("../src")
 

	
 
import os
 
import math
 
import random
 
import itertools
 
import logging as log
 

	
 
import cv2 as cv
 
import numpy as np
 
import scipy.cluster
 
import scipy.ndimage
 
import scipy.signal
 

	
 
from geometry import Line
 
from annotations import DataFile,computeBoundingBox
 
from hough import show,prepareEdgeImg,HoughTransform
 
from analyzer.epoint import EPoint
 
from analyzer.corners import Corners
 

	
 
random.seed(361)
 
log.basicConfig(level=log.DEBUG,format="%(message)s")
 

	
 

	
 
def kmeans(img):
 
	arr=np.reshape(img,(-1,3)).astype(np.float)
 
	wood=[193,165,116]
 
	(centers,distortion)=scipy.cluster.vq.kmeans(arr,3)
 
	log.debug("k-means centers: %s",centers)
 
	(black,empty,white)=sorted(centers,key=sum)
 
@@ -88,163 +89,126 @@ class BoardDetector:
 
		(x1,y1,x2,y2)=self._detectRough(img,filename)
 
		rect=img[y1:y2,x1:x2]
 
		self._rectW=x2-x1
 
		self._rectH=y2-y1
 
		self._rect=rect
 

	
 
		# quantize colors
 
		(black,white,colors)=self._sampleColors(rect)
 
		quantized=quantize(rect,colors)
 
		gray=cv.cvtColor(rect,cv.COLOR_BGR2GRAY)
 
		edges=cv.Canny(gray,70,130)
 
		show(edges,"edges")
 
		quantized=quantized & (255-cv.cvtColor(edges,cv.COLOR_GRAY2BGR))
 
		show(quantized,"quantized, edges separated")
 

	
 
		# detect black and white stones
 
		stones=self._detectStones(quantized,black,white)
 

	
 
		# detect lines from edges and stones
 
		edgeImg=prepareEdgeImg(rect)
 
		hough=HoughTransform(edgeImg)
 
		stonesImg=np.zeros((self._rectH,self._rectW),np.uint8)
 
		for (point,c) in stones:
 
			cv.circle(stonesImg,(int(point.x),int(point.y)),2,255,-1)
 
		# cv.drawContours(stonesImg,[c for (point,c) in stones],-1,255,-1)
 

	
 
		show(stonesImg,"detected stones")
 
		hough.update(stonesImg,10)
 
		lines=hough.extract()
 

	
 
		linesImg=np.copy(rect)
 
		for line in lines:
 
		for line in itertools.chain(*lines):
 
			self._drawLine(linesImg,line)
 
		show(linesImg,"detected lines")
 

	
 
		# # detect vanishing points of the lines
 
		# imgCenter=EPoint(w//2-x1, h//2-y1)
 
		# (a,b,c,d)=(p-EPoint(x1,y1) for p in self._annotations[filename][0])
 
		# (p,q,r,s)=(Line(a,b),Line(b,c),Line(c,d),Line(d,a))
 
		# v1=p.intersect(r)
 
		# v2=q.intersect(s)
 
		# log.debug("true vanishing points: %s ~ %s, %s ~ %s",v1,v1.toPolar(imgCenter),v2,v2.toPolar(imgCenter))
 
		# vanish=self._detectVanishingPoints(lines,imgCenter,(v1.toPolar(imgCenter),v2.toPolar(imgCenter)))
 
		#
 
		# # rectify the image
 
		# matrix=self._computeTransformationMatrix(vanish,lines)
 
		# transformed=cv.warpPerspective(rect,matrix,(self._rectW,self._rectH))
 
		#
 
		# # determine precise board edges
 
		matrix=self._computeTransformationMatrix(lines[0][0],lines[0][-1],lines[1][0],lines[1][-1])
 
		transformed=cv.warpPerspective(rect,matrix,(self._rectW,self._rectH))
 

	
 
		# determine precise board edges
 

	
 
	def _detectRough(self,img,filename):
 
		corners=self._annotations[filename][0]
 
		(x1,y1,x2,y2)=computeBoundingBox(corners)
 
		log.debug("bounding box: (%s,%s) - (%s,%s)",x1,y1,x2,y2)
 
		return (x1,y1,x2,y2)
 

	
 
	def _sampleColors(self,rect):
 
		(h,w)=rect.shape[:2]
 
		minirect=rect[h//4:3*h//4, w//4:3*w//4]
 
		return kmeans(minirect)
 

	
 
	def _detectStones(self,quantized,black,white):
 
		(h,w)=quantized.shape[:2]
 
		mask=self._maskStones(quantized,black,white)
 
		stoneDims=(w/19,h/19)
 
		log.debug("stone dims: %s - %s",tuple(x/2 for x in stoneDims),stoneDims)
 

	
 
		(contours,hierarchy)=cv.findContours(mask,cv.RETR_LIST,cv.CHAIN_APPROX_SIMPLE)
 
		stoneLocs=filterStones(contours,mask,stoneDims)
 

	
 
		return stoneLocs
 

	
 
	def _maskStones(self,quantized,black,white):
 
		unit=np.array([1,1,1],dtype=np.uint8)
 
		if black is not None:
 
			maskB=cv.inRange(quantized,black-unit,black+unit)
 

	
 
			distTransform=cv.distanceTransform(maskB,cv.DIST_L2,5)
 
			maskB=cv.inRange(distTransform,6,20)
 
			show(maskB,"black areas")
 
		else: maskB=np.zeros(quantized.shape[:2],dtype=np.uint8)
 

	
 
		if white is not None:
 
			maskW=cv.inRange(quantized,white-unit,white+unit)
 
			distTransform=cv.distanceTransform(maskW,cv.DIST_L2,5)
 
			maskW=cv.inRange(distTransform,6,20)
 
			show(maskW,"white areas")
 
		else: maskW=np.zeros(quantized.shape[:2],dtype=np.uint8)
 

	
 
		stones=cv.bitwise_or(maskB,maskW)
 
		show(stones,"black and white areas")
 
		return stones
 

	
 
	def _printLines(self,lines,allPoints,img):
 
		for (i,line) in enumerate(lines):
 
			img_=np.copy(img)
 
			points=list(line.getSortedPoints())
 
			(a,b)=max(((a,b) for a in points for b in points if a<b),key=lambda ab: ab[0].dist(ab[1]))
 
			(xa,ya)=a
 
			(xb,yb)=b
 
			points.sort(key=lambda p: a.dist(p))
 
			cv.line(img_,(int(xa),int(ya)),(int(xb),int(yb)),(255,255,0),1)
 
			cv.imwrite("/tmp/{0}.png".format(i),img_)
 
			pointDists=",".join(str(round(p1.dist(p2),3)) for (p1,p2) in zip(points[:-1],points[1:]))
 
			log.debug("\t".join(map(str,[i,line,line.score(allPoints),pointDists])))
 

	
 
	def _detectVanishingPoints(self,lines,imgCenter,trueVs):
 
		polarHough=PolarHough(math.pi/180,10)
 
		for (i,ab) in enumerate(lines):
 
			for cd in lines[i+1:]:
 
				point=ab.intersect(cd)
 
				if 0<=point.x<=self._rectW and 0<=point.y<=self._rectH: continue
 
				# log.debug("%s -> %s",point,point.toPolar(imgCenter))
 
				polarHough.put(point.toPolar(imgCenter))
 
		vanish=[EPoint.fromPolar(p,imgCenter) for p in polarHough.extract(2,trueVs)]
 
		log.debug(vanish)
 
		return vanish
 

	
 
	def _computeTransformationMatrix(self,vanish,lines):
 
		(v1,v2)=vanish
 
		(p,r)=sorted(lines,key=lambda p: point2lineDistance(p.a,p.b,v1))[:2]
 
		(q,s)=sorted(lines,key=lambda p: point2lineDistance(p.a,p.b,v2))[:2]
 
		(a,b,c,d)=Corners([p.intersect(q),q.intersect(r),r.intersect(s),s.intersect(p)]) # canonize the abcd order
 
	def _computeTransformationMatrix(self,p,q,r,s): # p || q, r || s
 
		(a,b,c,d)=Corners([p.intersect(r),p.intersect(s),q.intersect(r),q.intersect(s)]) # canonize the abcd order
 
		a_=EPoint(b.x,min(a.y,d.y))
 
		b_=EPoint(b.x,max(b.y,c.y))
 
		c_=EPoint(c.x,max(b.y,c.y))
 
		d_=EPoint(c.x,min(a.y,d.y))
 
		abcd=[list(point) for point in (a,b,c,d)]
 
		abcd_=[list(point) for point in (a_,b_,c_,d_)]
 
		log.debug("abcd: %s ->",(a,b,c,d))
 
		log.debug("-> abcd_: %s",(a_,b_,c_,d_))
 
		matrix=cv.getPerspectiveTransform(np.float32(abcd),np.float32(abcd_))
 
		log.debug("transformation matrix: %s",matrix)
 

	
 
		rect=np.copy(self._rect)
 
		for point in (a,b,c,d):
 
			cv.drawMarker(rect,(int(point.x),int(point.y)),(0,255,255),cv.MARKER_TILTED_CROSS)
 
		show(rect)
 
		transformed=cv.warpPerspective(rect,matrix,(self._rectW,self._rectH))
 
		show(transformed)
 
		show(transformed,"rectified image")
 

	
 
		return matrix
 

	
 
	def _drawLine(self,img,line):
 
		(h,w)=img.shape[:2]
 
		corners=[EPoint(0,0),EPoint(w,0),EPoint(0,h),EPoint(w,h)] # NW NE SW SE
 
		borders=[
 
			[Line.fromPoints(corners[0],corners[1]), Line.fromPoints(corners[2],corners[3])], # N S
 
			[Line.fromPoints(corners[0],corners[2]), Line.fromPoints(corners[1],corners[3])] # W E
 
		]
 

	
 
		(a,b)=(line.intersect(borders[0][0]), line.intersect(borders[0][1]))
 
		log.debug("%s %s",line,(a,b))
 
		if not a or not b:
 
			(a,b)=(line.intersect(borders[1][0]), line.intersect(borders[1][1]))
 
			log.debug("* %s %s",line,(a,b))
 
		if any(abs(x)>10**5 for x in [*a,*b]):
 
			log.debug("ignored")
 
			return
 
		cv.line(img,(int(a.x),int(a.y)),(int(b.x),int(b.y)),[0,255,0])
 

	
 

	
 
if __name__=="__main__":
 
	detector=BoardDetector(sys.argv[2])
exp/hough.py
Show inline comments
 
@@ -38,53 +38,55 @@ class HoughTransform:
 

	
 
	Uses usual image coordinates on input and output, with [0,0] in the upper left corner and [height-1,width-1] in the lower right.
 
	However, internally it uses the usual cartesian coordinates, centered at the image center. [-w/2,-h/2] in the upper left and [w/2,h/2] in the lower right."""
 
	def __init__(self,img):
 
		self._angleBandwidth=30 # degrees
 

	
 
		(h,w)=img.shape[:2]
 
		self._diagLen=int(np.sqrt(h**2+w**2))+1
 
		self._center=(w//2,h//2)
 
		self._acc=np.zeros((180,self._diagLen),dtype=np.int32)
 

	
 
		self.update(img)
 

	
 
	def extract(self):
 
		img=self._createImg()
 
		self.show(img)
 
		lines=self._detectLines()
 
		res=[]
 
		i=0
 
		for (score,alpha,beta,peaks) in lines:
 
			log.debug("score: %s",score)
 
			log.debug("alpha, beta: %s, %s",alpha,beta)
 
			self._drawLine(img,alpha,beta,peaks,i)
 

	
 
			res.append([])
 
			keys=self._readLineKeys(alpha,beta)
 
			for k in peaks:
 
				(alphaDeg,d)=keys[k]
 
				line=Line(alphaDeg*math.pi/180,d-self._diagLen//2)
 
				res.append(self._transformOutput(line))
 
				res[-1].append(self._transformOutput(line))
 
			res[-1].sort(key=lambda line: line.d)
 
			i+=1
 

	
 
		self.show(img)
 
		return res
 

	
 
	def update(self,img,weight=1):
 
		start=datetime.now().timestamp()
 
		for (r,row) in enumerate(img):
 
			for (c,pix) in enumerate(row):
 
				if pix==0: continue
 
				for alphaDeg in range(0,180):
 
					d=self._computeDist(c,r,alphaDeg)+self._diagLen//2
 
					self._acc[(alphaDeg,d)]+=weight
 
		log.debug("Hough updated in %s s",round(datetime.now().timestamp()-start,3))
 

	
 
	def show(self,img=None):
 
		if img is None: img=self._createImg()
 
		show(img,"Hough transform accumulator")
 

	
 
	def _computeDist(self,x,y,alphaDeg):
 
		alphaRad=alphaDeg*math.pi/180
 
		(x0,y0)=self._center
 
		(dx,dy)=(x-x0,y0-y)
 
		d=dx*math.cos(alphaRad)+dy*math.sin(alphaRad)
0 comments (0 inline, 0 general)