Changeset - 275f7307dd32
[Not reviewed]
default
0 2 0
Laman - 6 years ago 2019-02-20 14:25:16

detecting lines in Hough transform accumulator
2 files changed with 42 insertions and 102 deletions:
0 comments (0 inline, 0 general)
exp/board_detect.py
Show inline comments
 
import sys
 

	
 
sys.path.append("../src")
 

	
 
import os
 
import math
 
import random
 
import logging as log
 

	
 
import cv2 as cv
 
import numpy as np
 
import scipy.cluster
 
import scipy.ndimage
 
import scipy.signal
 

	
 
from geometry import Line
 
from polar_hough import PolarHough
 
from annotations import DataFile,computeBoundingBox
 
from hough import show,prepareEdgeImg,HoughTransform
 
from analyzer.epoint import EPoint
 
from analyzer.corners import Corners
 

	
 
random.seed(361)
 
log.basicConfig(level=log.DEBUG,format="%(message)s")
 

	
 

	
 
def kmeans(img):
 
	arr=np.reshape(img,(-1,3)).astype(np.float)
 
	colors=np.array([[0,0,0],[255,255,255],[193,165,116]],np.float)
 
	log.debug(colors)
 
	(centers,distortion)=scipy.cluster.vq.kmeans(arr,colors)
 
	log.debug("k-means centers: %s",centers)
 
	return centers
 

	
 

	
 
def quantize(img,centers):
 
	origShape=img.shape
 
	data=np.reshape(img,(-1,3))
 
	(keys,dists)=scipy.cluster.vq.vq(data,centers)
 
	pixels=np.array([centers[k] for k in keys],dtype=np.uint8).reshape(origShape)
 
	return pixels
 

	
 

	
 
def filterStones(contours,bwImg,stoneDims):
 
	contourImg=cv.cvtColor(bwImg,cv.COLOR_GRAY2BGR)
 
	res=[]
 
	for (i,c) in enumerate(contours):
 
		keep=True
 
		moments=cv.moments(c)
 
		center=(moments["m10"]/(moments["m00"] or 1), moments["m01"]/(moments["m00"] or 1))
 
		area=cv.contourArea(c)
 
		(x,y,w,h)=cv.boundingRect(c)
 
		if w>stoneDims[0] or h>stoneDims[1]*1.5 or w<2 or h<2:
 
			cv.drawMarker(contourImg,tuple(map(int,center)),(0,0,255),cv.MARKER_TILTED_CROSS,12)
 
			keep=False
 
		coverage1=area/(w*h or 1)
 
		hull=cv.convexHull(c)
 
		coverage2=area/(cv.contourArea(hull) or 1)
 
		# if coverage2<0.8:
 
		# 	cv.drawMarker(contourImg,tuple(map(int,center)),(0,127,255),cv.MARKER_DIAMOND,12)
 
		# 	keep=False
 
		if keep:
 
			res.append((EPoint(*center),c))
 
			cv.drawMarker(contourImg,tuple(map(int,center)),(255,0,0),cv.MARKER_CROSS)
 
	log.debug("accepted: %s",len(res))
 
	log.debug("rejected: %s",len(contours)-len(res))
 
	show(contourImg,"accepted and rejected stones")
 
	return res
 

	
 

	
 
def groupLines(points,minCount,tolerance):
 
	random.shuffle(points)
 
	sample=points[:]
 
	for (i,a) in enumerate(sample):
 
		for (j,b) in enumerate(sample):
 
			if j<=i: continue
 
			ab=Line(a,b)
 
			for c in points:
 
				if c is a or c is b: continue
 
				if point2lineDistance(a,b,c)<=tolerance:
 
					ab.points.add(c)
 
			if len(ab.points)>=minCount:
 
				yield ab
 

	
 

	
 
class BoardDetector:
 
	def __init__(self,annotationsPath):
 
		self._annotations=DataFile(annotationsPath)
 

	
 
		self._rectW=0
 
		self._rectH=0
 
		self._rect=None
 

	
 
	def __call__(self,img,filename):
 
		# approximately detect the board
 
		(h,w)=img.shape[:2]
 
		log.debug("image dimensions: %s x %s",w,h)
 
		show(img,filename)
 
		(x1,y1,x2,y2)=self._detectRough(img,filename)
 
		rect=img[y1:y2,x1:x2]
 
		self._rectW=x2-x1
 
		self._rectH=y2-y1
 
		self._rect=rect
 

	
 
		# quantize colors
 
		colors=self._sampleColors(rect)
 
		quantized=quantize(rect,colors)
 
		gray=cv.cvtColor(rect,cv.COLOR_BGR2GRAY)
 
		edges=cv.Canny(gray,70,130)
 
		show(edges,"edges")
 
		quantized=quantized & (255-cv.cvtColor(edges,cv.COLOR_GRAY2BGR))
 
		show(quantized,"quantized, edges separated")
 

	
 
		# detect black and white stones
 
		stones=self._detectStones(quantized,colors)
 

	
 
		# detect lines from edges and stones
 
		edgeImg=prepareEdgeImg(rect)
 
		hough=HoughTransform(edgeImg)
 
		stonesImg=np.zeros((self._rectH,self._rectW),np.uint8)
 
		for (point,c) in stones:
 
			cv.circle(stonesImg,(int(point.x),int(point.y)),2,255,-1)
 
		# cv.drawContours(stonesImg,[c for (point,c) in stones],-1,255,-1)
 
		show(stonesImg,"detected stones")
 
		hough.update(stonesImg,5)
 
		hough.update(stonesImg,3)
 
		hough.extract()
 

	
 
		# # detect lines passing through the stones
 
		# lines=self._constructLines(stones)
 
		#
 
		# # detect vanishing points of the lines
 
		# imgCenter=EPoint(w//2-x1, h//2-y1)
 
		# (a,b,c,d)=(p-EPoint(x1,y1) for p in self._annotations[filename][0])
 
		# (p,q,r,s)=(Line(a,b),Line(b,c),Line(c,d),Line(d,a))
 
		# v1=p.intersect(r)
 
		# v2=q.intersect(s)
 
		# log.debug("true vanishing points: %s ~ %s, %s ~ %s",v1,v1.toPolar(imgCenter),v2,v2.toPolar(imgCenter))
 
		# vanish=self._detectVanishingPoints(lines,imgCenter,(v1.toPolar(imgCenter),v2.toPolar(imgCenter)))
 
		#
 
		# # rectify the image
 
		# matrix=self._computeTransformationMatrix(vanish,lines)
 
		# transformed=cv.warpPerspective(rect,matrix,(self._rectW,self._rectH))
 
		#
 
		# # determine precise board edges
 

	
 
	def _detectRough(self,img,filename):
 
		corners=self._annotations[filename][0]
 
		(x1,y1,x2,y2)=computeBoundingBox(corners)
 
		log.debug("bounding box: (%s,%s) - (%s,%s)",x1,y1,x2,y2)
 
		return (x1,y1,x2,y2)
 

	
 
	def _sampleColors(self,rect):
 
		(h,w)=rect.shape[:2]
 
		minirect=rect[h//4:3*h//4, w//4:3*w//4]
 
		return kmeans(minirect)
 

	
 
	def _detectStones(self,quantized,colors):
 
		(h,w)=quantized.shape[:2]
 
		mask=self._maskStones(quantized,colors)
 
		stoneDims=(w/19,h/19)
 
		log.debug("stone dims: %s - %s",tuple(x/2 for x in stoneDims),stoneDims)
 

	
 
		(contours,hierarchy)=cv.findContours(mask,cv.RETR_LIST,cv.CHAIN_APPROX_SIMPLE)
 
		stoneLocs=filterStones(contours,mask,stoneDims)
 

	
 
		return stoneLocs
 

	
 
	def _maskStones(self,quantized,colors):
 
		unit=np.array([1,1,1],dtype=np.uint8)
 
		maskB=cv.inRange(quantized,colors[0]-unit,colors[0]+unit)
 

	
 
		distTransform=cv.distanceTransform(maskB,cv.DIST_L2,5)
 
		maskB=cv.inRange(distTransform,6,20)
 
		show(maskB,"black areas")
 

	
 
		maskW=cv.inRange(quantized,colors[1]-unit,colors[1]+unit)
 
		distTransform=cv.distanceTransform(maskW,cv.DIST_L2,5)
 
		maskW=cv.inRange(distTransform,6,20)
 

	
 
		show(maskW,"white areas")
 
		stones=cv.bitwise_or(maskB,maskW)
 
		show(stones,"black and white areas")
 
		return stones
 

	
 
	def _constructLines(self,stoneLocs):
 
		lineDict=dict()
 
		# minCount=min(max(math.sqrt(len(stoneLocs))-4,3),7)
 
		minCount=6
 
		log.debug("min count: %s",minCount)
 
		points=[point for (point,contour) in stoneLocs]
 
		for line in groupLines(points,minCount,2):
 
			key=line.getSortedPoints()
 
			if key in lineDict: # we already have a line with the same incident points
 
				continue
 
			lineDict[line.getSortedPoints()]=line
 
			obsolete=set()
 
			for ab in lineDict.values():
 
				if ab is line: continue
 
				if line.points<ab.points: # == impossible
 
					del lineDict[key]
 
					break
 
				if ab.points<line.points:
 
					obsolete.add(ab.getSortedPoints())
 
			for key in obsolete: del lineDict[key]
 
		log.debug("valid lines: %s",len(lineDict))
 
		lines=sorted(lineDict.values(), key=lambda ab: len(ab.points), reverse=True)
 

	
 
		# visualize
 
		linesImg=cv.cvtColor(np.zeros((self._rectH,self._rectW),np.uint8),cv.COLOR_GRAY2BGR)
 
		cv.drawContours(linesImg,[c for (point,c) in stoneLocs],-1,(255,255,255),-1)
 
		for (p,c) in stoneLocs:
 
			cv.drawMarker(linesImg,(int(p.x),int(p.y)),(255,0,0),cv.MARKER_CROSS)
 
		self._printLines(lines,points,linesImg)
 
		for line in lines:
 
			points=line.getSortedPoints()
 
			(xa,ya)=points[0]
 
			(xb,yb)=points[-1]
 
			cv.line(linesImg,(int(xa),int(ya)),(int(xb),int(yb)),(255,255,0),1)
 
		show(linesImg)
 

	
 
		return lines
 

	
 
	def _printLines(self,lines,allPoints,img):
 
		for (i,line) in enumerate(lines):
 
			img_=np.copy(img)
 
			points=list(line.getSortedPoints())
 
			(a,b)=max(((a,b) for a in points for b in points if a<b),key=lambda ab: ab[0].dist(ab[1]))
 
			(xa,ya)=a
 
			(xb,yb)=b
 
			points.sort(key=lambda p: a.dist(p))
 
			cv.line(img_,(int(xa),int(ya)),(int(xb),int(yb)),(255,255,0),1)
 
			cv.imwrite("/tmp/{0}.png".format(i),img_)
 
			pointDists=",".join(str(round(p1.dist(p2),3)) for (p1,p2) in zip(points[:-1],points[1:]))
 
			log.debug("\t".join(map(str,[i,line,line.score(allPoints),pointDists])))
 

	
 
	def _detectVanishingPoints(self,lines,imgCenter,trueVs):
 
		polarHough=PolarHough(math.pi/180,10)
 
		for (i,ab) in enumerate(lines):
 
			for cd in lines[i+1:]:
 
				point=ab.intersect(cd)
 
				if 0<=point.x<=self._rectW and 0<=point.y<=self._rectH: continue
 
				# log.debug("%s -> %s",point,point.toPolar(imgCenter))
 
				polarHough.put(point.toPolar(imgCenter))
 
		vanish=[EPoint.fromPolar(p,imgCenter) for p in polarHough.extract(2,trueVs)]
 
		log.debug(vanish)
 
		return vanish
 

	
 
	def _computeTransformationMatrix(self,vanish,lines):
 
		(v1,v2)=vanish
 
		(p,r)=sorted(lines,key=lambda p: point2lineDistance(p.a,p.b,v1))[:2]
 
		(q,s)=sorted(lines,key=lambda p: point2lineDistance(p.a,p.b,v2))[:2]
 
		(a,b,c,d)=Corners([p.intersect(q),q.intersect(r),r.intersect(s),s.intersect(p)]) # canonize the abcd order
 
		a_=EPoint(b.x,min(a.y,d.y))
 
		b_=EPoint(b.x,max(b.y,c.y))
 
		c_=EPoint(c.x,max(b.y,c.y))
 
		d_=EPoint(c.x,min(a.y,d.y))
 
		abcd=[list(point) for point in (a,b,c,d)]
 
		abcd_=[list(point) for point in (a_,b_,c_,d_)]
 
		log.debug("abcd: %s ->",(a,b,c,d))
 
		log.debug("-> abcd_: %s",(a_,b_,c_,d_))
 
		matrix=cv.getPerspectiveTransform(np.float32(abcd),np.float32(abcd_))
 
		log.debug("transformation matrix: %s",matrix)
 

	
 
		rect=np.copy(self._rect)
 
		for point in (a,b,c,d):
 
			cv.drawMarker(rect,(int(point.x),int(point.y)),(0,255,255),cv.MARKER_TILTED_CROSS)
 
		show(rect)
 
		transformed=cv.warpPerspective(rect,matrix,(self._rectW,self._rectH))
 
		show(transformed)
 

	
 
		return matrix
 

	
 

	
 
if __name__=="__main__":
 
	detector=BoardDetector(sys.argv[2])
 
	filepath=sys.argv[1]
 
	filename=os.path.basename(filepath)
 
	img=cv.imread(filepath)
 
	detector(img,filename)
exp/hough.py
Show inline comments
 
import sys
 
sys.path.append("../src")
 

	
 
import math
 
from datetime import datetime
 
import logging as log
 

	
 
import numpy as np
 
import scipy.optimize
 
import scipy.signal
 
import cv2 as cv
 

	
 
from geometry import Line
 
from analyzer.epoint import EPoint
 

	
 
DEBUG=True
 

	
 

	
 
class BaseHough:
 
	def __init__(self,width,height):
 
		self._diagLen=int(np.sqrt(height**2+width**2))+1
 
		self._center=(width//2,height//2)
 
		self._acc=np.zeros((180,self._diagLen),dtype=np.int32)
 

	
 
	def update(self,x,y,weight=1):
 
		""":param x: number, 0 <= x < width
 
		:param y: number, 0 <= y < height"""
 
		for alphaDeg in range(0,180):
 
			d=self._computeDist(x,y,alphaDeg)+self._diagLen//2
 
			self._acc[(alphaDeg,d)]+=weight
 

	
 
	def extract(self,n):
 
		shift=self._diagLen//2
 
		peaks=sorted(list(findPeaks(self._acc)),key=lambda rc: self._acc[rc],reverse=True)
 
		peaks=self._filterClose(peaks)[:n]
 
		log.debug("detected peaks: %s",[(alpha,d-shift) for (alpha,d) in peaks])
 
class LineBag:
 
	def __init__(self):
 
		self._lines=[]
 

	
 
		img=self._createImg()
 
		img=self._markPeaks(img,self._filterClose(peaks))
 
		self.show(img)
 

	
 
		res=[]
 
		for (alphaDeg,d) in peaks:
 
			alphaRad=alphaDeg*math.pi/180
 
			baseLine=Line(alphaRad,0)
 
			dd=baseLine.distanceTo(EPoint(*self._center)) # to shift d from the center to 0,0
 
			res.append(Line(alphaRad, dd+d-shift))
 
		log.debug("detected lines: %s",res)
 
		return res
 

	
 
	def _computeDist(self,x,y,alphaDeg):
 
		"""Compute the distance of a line with the provided alphaDeg declination and passing the (x,y) point to the image center.
 
		The returned distance might be negative (meaning the angle is in fact alpha+180)."""
 
		alphaRad=alphaDeg*math.pi/180
 
		(x0,y0)=self._center
 
		(dx,dy)=(x-x0,y-y0)
 
		d=dx*math.cos(alphaRad)+dy*math.sin(alphaRad)
 
		return round(d)
 
	def put(self,score,alpha,beta):
 
		self._lines.append((score,alpha,beta))
 

	
 
	def _filterClose(self,peaks): # a naive implementation
 
		"""Discard points with Euclidean distance on the original image lower than 10.
 
		From such pairs keep only the one with a higher value in the accumulator.
 
		This can delete a series of points. If a-b and b-c are close and a>b>c, only a is kept."""
 
		minDist=13
 
		center=EPoint(*self._center)
 
	def pull(self,count):
 
		self._lines.sort(reverse=True)
 
		res=[]
 
		for (alphaDeg,d) in peaks:
 
			alphaRad=alphaDeg*math.pi/180
 
			point=EPoint.fromPolar((alphaRad,d),center)
 
			ctrl=True
 
			for (betaDeg,e) in peaks:
 
				betaRad=betaDeg*math.pi/180
 
				point_=EPoint.fromPolar((betaRad,e),center)
 
				if point.dist(point_)<minDist and self._acc[(alphaDeg,d)]<self._acc[(betaDeg,e)]:
 
					ctrl=False
 
			if ctrl: res.append((alphaDeg,d))
 
		for (score,alpha,beta) in self._lines:
 
			if any(abs(alpha-gamma)<10 and abs(beta-delta)<10 for (_,gamma,delta) in res): continue
 
			if any((beta-delta)!=0 and (alpha-gamma)/(beta-delta)<0 for (_,gamma,delta) in res): continue
 
			res.append((score,alpha,beta))
 
			if len(res)>=count: break
 
		return res
 

	
 
	def show(self,img=None):
 
		if not DEBUG: return
 
		if img is None: img=self._createImg()
 

	
 
		show(img,"Hough transform accumulator")
 

	
 
	def _createImg(self):
 
		maxVal=self._acc.max()
 
		arr=np.expand_dims(np.uint8(255*self._acc//maxVal),axis=2)
 
		img=np.concatenate((arr,arr,arr),axis=2)
 

	
 
		(h,w)=img.shape[:2]
 

	
 
		for x in range(0,w,4): # y axis
 
			img[h//2,x]=[255,255,255]
 
		for y in range(0,h,4):
 
			img[y,w//2]=[255,255,255]
 

	
 
		return img
 

	
 
	def _markPeaks(self,img,peaks):
 
		colors=[[255,0,0],[255,255,0],[0,255,0],[0,255,255],[0,0,255]]
 
		for (i,(alpha,d)) in enumerate(peaks[:38]):
 
			cv.drawMarker(img,(d,alpha),colors[i//9],cv.MARKER_TILTED_CROSS)
 
		return img
 

	
 

	
 
class HoughTransform:
 
	def __init__(self,img):
 
		self._angleBandwidth=30 # degrees
 

	
 
		(h,w)=img.shape[:2]
 
		self._diagLen=int(np.sqrt(h**2+w**2))+1
 
		self._center=(w//2,h//2)
 
		self._acc=np.zeros((180,self._diagLen),dtype=np.int32)
 

	
 
		self.update(img)
 

	
 
	def extract(self):
 
		shift=self._diagLen//2
 
		allPeaks=sorted(list(findPeaks(self._acc)),key=lambda rc: self._acc[rc],reverse=True)
 
		peaks=allPeaks[:38]
 
		peaks=[(alpha,d-shift) for (alpha,d) in peaks]
 
		peaks=self._filterClose(peaks)
 
		peaks.sort(key=lambda rc: rc[0])
 
		log.debug("detected peaks: %s",peaks)
 
		img=self._createImg()
 
		(ab,cd)=self._detectLines()
 
		for (score,alpha,beta) in (ab,cd):
 
			log.debug("score: %s",score)
 
			log.debug("alpha, beta: %s, %s",alpha,beta)
 
			cv.line(img,(0,alpha),(self._diagLen-1,beta),(0,255,255))
 

	
 
		h2=BaseHough(self._diagLen,180+90)
 
		for (alpha,d) in peaks:
 
			h2.update(d+shift,alpha)
 
			if alpha<90:
 
				h2.update(shift-d,alpha+180)
 
		lines=h2.extract(3)
 

	
 
		img=self._createImg()
 
		img=self._markPeaks(img,self._filterClose(allPeaks[:38]))
 

	
 
		for (i,line) in enumerate(lines):
 
			self.drawLine(img,line,i)
 
		self.show(img)
 

	
 
	def update(self,img,weight=1):
 
		start=datetime.now().timestamp()
 
		for (r,row) in enumerate(img):
 
			for (c,pix) in enumerate(row):
 
				if pix==0: continue
 
				for alphaDeg in range(0,180):
 
					d=self._computeDist(c,r,alphaDeg)+self._diagLen//2
 
					self._acc[(alphaDeg,d)]+=weight
 
		log.debug("Hough updated in %s s",round(datetime.now().timestamp()-start,3))
 

	
 
	def _computeDist(self,x,y,alphaDeg):
 
		alphaRad=alphaDeg*math.pi/180
 
		(x0,y0)=self._center
 
		(dx,dy)=(x-x0,y-y0)
 
		d=dx*math.cos(alphaRad)+dy*math.sin(alphaRad)
 
		return round(d)
 

	
 
	def _filterClose(self,peaks): # a naive implementation
 
		"""Discard points with Euclidean distance on the original image lower than 10.
 
		From such pairs keep only the one with a higher value in the accumulator.
 
		This can delete a series of points. If a-b and b-c are close and a>b>c, only a is kept."""
 
		minDist=13
 
		center=EPoint(*self._center)
 
		res=[]
 
		for (alphaDeg,d) in peaks:
 
			alphaRad=alphaDeg*math.pi/180
 
			point=EPoint.fromPolar((alphaRad,d),center)
 
			ctrl=True
 
			for (betaDeg,e) in peaks:
 
				betaRad=betaDeg*math.pi/180
 
				point_=EPoint.fromPolar((betaRad,e),center)
 
				if point.dist(point_)<minDist and self._acc[(alphaDeg,d)]<self._acc[(betaDeg,e)]:
 
					ctrl=False
 
			if ctrl: res.append((alphaDeg,d))
 
		return res
 

	
 
	def _detectDominantAngles(self,peaks):
 
		angles=[alpha for (alpha,d) in peaks]
 
		n=len(angles)
 
		bandwidth=self._angleBandwidth
 
		k1=0
 
		k2=1
 
		histogram=[]
 
		while k1<n:
 
			while (k2<n and angles[k1]+bandwidth>angles[k2]) or (k2>=n and angles[k1]+bandwidth>angles[k2%n]+180):
 
				k2+=1
 
			histogram.append((angles[k1],k2-k1))
 
			k1+=1
 
		log.debug("angles histogram: %s",histogram)
 
		dominantAngles=sorted(histogram,key=lambda xy: xy[1],reverse=True)
 
		alpha=dominantAngles[0]
 
		dominantAngles=[beta for beta in dominantAngles if 180-bandwidth>abs(alpha[0]-beta[0])>bandwidth]
 
		beta=dominantAngles[0]
 
		log.debug("dominant angles: %s, %s",alpha,beta)
 
		return (alpha[0],beta[0])
 

	
 
	def _detectLines(self):
 
		bag=LineBag()
 
		for alpha in range(0,180,2):
 
			for beta in range(alpha-45,alpha+45,2):
 
				accLine=self._readLine(alpha,beta)
 
				(peaks,props)=scipy.signal.find_peaks(accLine,prominence=0)
 
				prominences=sorted(props["prominences"],reverse=True)[:19]
 
				bag.put(sum(prominences),alpha,beta)
 
		return bag.pull(2)
 

	
 
	def _readLine(self,alpha,beta):
 
		n=self._diagLen-1
 
		res=[]
 
		for i in range(n+1):
 
			k=round((alpha*(n-i)+beta*i)/n)
 
			if k<0 or k>=180:
 
				if k<-180 or k>360: print(alpha,beta,i,k)
 
				k=k%180
 
				i=n+1-i
 
			res.append(self._acc[k][i])
 
		return res
 

	
 
	def _computeGridParams(self,lines):
 
		log.debug("computing grid parameters for: %s",lines)
 
		angles=[alpha for (alpha,d) in lines]
 
		dists=[d for (alpha,d) in lines]
 
		curve=lambda x,a,b,c,d: a*x**3+b*x**2+c*x+d
 
		(params,cov)=scipy.optimize.curve_fit(curve,dists,angles)
 
		log.debug("result: %s",params)
 
		return params
 

	
 
	def show(self,img=None):
 
		if img is None: img=self._createImg()
 

	
 
		show(img,"Hough transform accumulator")
 

	
 
	def _createImg(self):
 
		maxVal=self._acc.max()
 
		arr=np.expand_dims(np.uint8(255*self._acc//maxVal),axis=2)
 
		img=np.concatenate((arr,arr,arr),axis=2)
 

	
 
		(h,w)=img.shape[:2]
 

	
 
		for x in range(0,w,4): # y axis
 
			img[h//2,x]=[255,255,255]
 
		for y in range(0,h,4):
 
			img[y,w//2]=[255,255,255]
 

	
 
		return img
 

	
 
	def _markPeaks(self,img,peaks):
 
		colors=[[255,0,0],[255,255,0],[0,255,0],[0,255,255],[0,0,255]]
 
		for (i,(alpha,d)) in enumerate(peaks[:38]):
 
			cv.drawMarker(img,(d,alpha),colors[i//9],cv.MARKER_TILTED_CROSS)
 
		return img
 

	
 
	def _drawGridCurve(self,img,params,colorKey=0):
 
		colors=[[0,255,255],[255,0,255],[255,255,0]]
 
		color=colors[colorKey]
 
		(a,b,c,d)=params
 
		(h,w)=img.shape[:2]
 
		curve=lambda x: a*x**3+b*x**2+c*x+d
 
		for x in range(0,w,3):
 
			y=round(curve(x))
 
			if y<0 or y>=2*h: continue
 
			if y<h:	img[y,x]=color
 
			else: img[y%h,-x]=color
 

	
 
	def drawLine(self,img,line,colorKey=0):
 
		colors=[[0,255,255],[255,0,255],[255,255,0]]
 
		color=colors[colorKey]
 
		(h,w)=img.shape[:2]
 
		(a,b,c)=line.toNormal()
 
		print("%",a,b,c)
 
		if b==0: return
 
		for x in range(1,w,3):
 
			y=round((-c-a*x)/b) + (0 if b>=0 else 180)
 
			if y<0 or y>=h: continue
 
			img[y,x]=color
 

	
 

	
 
def findPeaks(arr2d): # a naive implementation
 
	"""Scan 8-neighbourhood and for each peak or top plateau yield one point. For plateaus yield the """
 
	(h,w)=arr2d.shape
 
	neighbours=[(-1,-1),(-1,0),(-1,1),(0,-1),(0,1),(1,-1),(1,0),(1,1)]
 
	for r in range(h):
 
		for c in range(w):
 
			if all(r+dr<0 or r+dr>=h or c+dc<0 or c+dc>=w or arr2d[r,c]>arr2d[r+dr,c+dc] or (i<4 and arr2d[r,c]>=arr2d[r+dr,c+dc]) for (i,(dr,dc)) in enumerate(neighbours)):
 
				yield (r,c)
 

	
 

	
 
def show(img,filename="x"):
 
	cv.imshow(filename,img)
 
	cv.waitKey(0)
 
	cv.destroyAllWindows()
 

	
 

	
 
def filterVert(edges):
 
	kernel = np.array([[1,0,1],[1,0,1],[1,0,1]],np.uint8)
 
	edges = cv.erode(edges,kernel)
 
	kernel=np.array([[0,1,0],[0,1,0],[0,1,0]],np.uint8)
 
	edges=cv.dilate(edges,kernel)
 
	return edges
 

	
 
def filterHor(edges):
 
	kernel = np.array([[1,1,1],[0,0,0],[1,1,1]],np.uint8)
 
	edges = cv.erode(edges,kernel)
 
	kernel=np.array([[0,0,0],[1,1,1],[0,0,0]],np.uint8)
 
	edges=cv.dilate(edges,kernel)
 
	return edges
 

	
 
def filterDiag(edges):
 
	kernel = np.array([[0,0,1],[1,0,0],[0,1,0]],np.uint8)
 
	edges1 = cv.erode(edges,kernel)
 
	kernel=np.array([[1,0,0],[0,1,0],[0,0,1]],np.uint8)
 
	edges1=cv.dilate(edges1,kernel)
 

	
 
	kernel = np.array([[0,1,0],[1,0,0],[0,0,1]],np.uint8)
 
	edges2 = cv.erode(edges,kernel)
 
	kernel=np.array([[0,0,1],[0,1,0],[1,0,0]],np.uint8)
 
	edges2=cv.dilate(edges2,kernel)
 

	
 
	return edges1+edges2
 

	
 
def prepareEdgeImg(img):
 
	gray=cv.cvtColor(img,cv.COLOR_BGR2GRAY)
 
	show(gray,"greyscale image")
 
	edges=cv.Canny(gray,70,130)
 
	show(edges,"Canny edge detector")
 
	edges=filterHor(edges)+filterVert(edges)+filterDiag(edges)
 
	show(edges,"kernel filtered edges")
 
	return edges
 

	
 
def houghLines(bwImg):
 
	colorImg=cv.cvtColor(bwImg,cv.COLOR_GRAY2BGR)
 
	lines = cv.HoughLinesP(bwImg,1,np.pi/180,10,minLineLength=10,maxLineGap=40)
 
	if lines is None: lines=[]
 
	for line in lines:
 
		x1,y1,x2,y2 = line[0]
 
		cv.line(colorImg,(x1,y1),(x2,y2),(0,255,0),1)
 

	
 
	show(colorImg)
0 comments (0 inline, 0 general)