Changeset - 386837ecb39c
[Not reviewed]
default
0 1 1
Laman - 6 years ago 2019-04-03 00:05:48

option of running non-interactively
2 files changed with 17 insertions and 0 deletions:
0 comments (0 inline, 0 general)
exp/config.py
Show inline comments
 
new file 100644
 
import os
 

	
 

	
 
INTERACTIVE=False
 
imgDir="/tmp/oneEye"
 
i=1
 

	
 
if not os.path.exists(imgDir):
 
	os.mkdir(imgDir)
exp/hough.py
Show inline comments
 
import sys
 
sys.path.append("../src")
 

	
 
import math
 
from datetime import datetime
 
import os.path
 
import logging as log
 

	
 
import numpy as np
 
import scipy.optimize
 
import scipy.signal
 
import cv2 as cv
 

	
 
import config as cfg
 
from geometry import EPoint,Line
 

	
 
DEBUG=True
 

	
 

	
 
class LineBag:
 
	def __init__(self):
 
		self._lines=[]
 

	
 
	def put(self,score,alpha,beta,peaks):
 
		self._lines.append((score,alpha,beta,peaks))
 

	
 
	def pull(self,count):
 
		self._lines.sort(reverse=True)
 
		res=[]
 
		for (score,alpha,beta,peaks) in self._lines:
 
			if any(abs(alpha-gamma)<10 and abs(beta-delta)<10 for (_,gamma,delta,_) in res): continue
 
			# avoid intersecting lines
 
			if any((beta-delta)!=0 and (alpha-gamma)/(beta-delta)<0 for (_,gamma,delta,_) in res): continue
 
			res.append((score,alpha,beta,peaks))
 
			if len(res)>=count: break
 
		return res
 

	
 

	
 
class HoughTransform:
 
	"""Find line sequences with Hough transform.
 

	
 
	Uses usual image coordinates on input and output, with [0,0] in the upper left corner and [height-1,width-1] in the lower right.
 
	However, internally it uses the usual cartesian coordinates, centered at the image center. [-w/2,-h/2] in the upper left and [w/2,h/2] in the lower right."""
 
	def __init__(self,img):
 
		self._angleBandwidth=30 # degrees
 

	
 
		(h,w)=img.shape[:2]
 
		self._diagLen=int(np.sqrt(h**2+w**2))+1
 
		self._center=(w//2,h//2)
 
		self._acc=np.zeros((180,self._diagLen),dtype=np.int32)
 

	
 
		self.update(img)
 

	
 
	def extract(self):
 
		img=self._createImg()
 
		self.show(img)
 
		lines=self._detectLines()
 
		res=[]
 
		i=0
 
		for (score,alpha,beta,peaks) in lines:
 
			log.debug("score: %s",score)
 
			log.debug("alpha, beta: %s, %s",alpha,beta)
 
			self._drawLine(img,alpha,beta,peaks,i)
 

	
 
			res.append([])
 
			keys=self._readLineKeys(alpha,beta)
 
			for k in peaks:
 
				(alphaDeg,d)=keys[k]
 
				line=Line(alphaDeg*math.pi/180,d-self._diagLen//2)
 
				res[-1].append(self._transformOutput(line))
 
			res[-1].sort(key=lambda line: line.d)
 
			i+=1
 

	
 
		self.show(img)
 
		return res
 

	
 
	def update(self,img,weight=1):
 
		start=datetime.now().timestamp()
 
		for (r,row) in enumerate(img):
 
			for (c,pix) in enumerate(row):
 
				if pix==0: continue
 
				for alphaDeg in range(0,180):
 
					d=self._computeDist(c,r,alphaDeg)+self._diagLen//2
 
					self._acc[(alphaDeg,d)]+=weight
 
		log.debug("Hough updated in %s s",round(datetime.now().timestamp()-start,3))
 

	
 
	def scoreLine(self,line):
 
		transformed=self._transformInput(line)
 
		alphaDeg=round(transformed.alpha*180/math.pi)%180
 
		d=round(transformed.d+self._diagLen//2)
 
		if not 0<=d<self._diagLen: return 0
 
		return self._acc[(alphaDeg,d)]
 

	
 
	def show(self,img=None):
 
		if img is None: img=self._createImg()
 
		show(img,"Hough transform accumulator")
 

	
 
	def _computeDist(self,x,y,alphaDeg):
 
		alphaRad=alphaDeg*math.pi/180
 
		(x0,y0)=self._center
 
		(dx,dy)=(x-x0,y0-y)
 
		d=dx*math.cos(alphaRad)+dy*math.sin(alphaRad)
 
		return int(d)
 

	
 
	def _detectLines(self):
 
		bag=LineBag()
 
		for alpha in range(0,180+60,2):
 
			for beta in range(max(alpha-60,0),min(alpha+60,180+60),2):
 
				accLine=[self._acc[key] for key in self._readLineKeys(alpha,beta)]
 
				(peaks,props)=scipy.signal.find_peaks(accLine,prominence=0)
 
				(prominences,peaks)=zip(*sorted(zip(props["prominences"],peaks),reverse=True)[:19])
 
				bag.put(sum(prominences),alpha,beta,peaks)
 
		return bag.pull(2)
 

	
 
	def _readLineKeys(self,alpha,beta):
 
		n=self._diagLen-1
 
		res=[]
 
		for i in range(n+1):
 
			k=round((alpha*(n-i)+beta*i)/n)
 
			if k>=180:
 
				k=k%180
 
				i=n-i
 
			res.append((k,i))
 
		return res
 

	
 
	def _transformInput(self,line):
 
		reflectedLine=Line(math.pi*2-line.alpha,line.d)
 
		(x,y)=self._center
 
		basis=EPoint(x,-y)
 
		shiftedLine=reflectedLine.shiftBasis(basis)
 
		if shiftedLine.alpha>=math.pi:
 
			shiftedLine=Line(shiftedLine.alpha-math.pi,-shiftedLine.d)
 
		return shiftedLine
 

	
 
	def _transformOutput(self,line):
 
		(x,y)=self._center
 
		basis=EPoint(-x,y)
 
		shiftedLine=line.shiftBasis(basis)
 
		reflectedLine=Line(math.pi*2-shiftedLine.alpha,shiftedLine.d)
 
		log.debug("%s -> %s",line,reflectedLine)
 
		return reflectedLine
 

	
 
	def _createImg(self):
 
		maxVal=self._acc.max()
 
		arr=np.expand_dims(np.uint8(255*self._acc//maxVal),axis=2)
 
		img=np.concatenate((arr,arr,arr),axis=2)
 

	
 
		(h,w)=img.shape[:2]
 

	
 
		for x in range(0,w,4): # y axis
 
			img[h//2,x]=[255,255,255]
 
		for y in range(0,h,4):
 
			img[y,w//2]=[255,255,255]
 

	
 
		return img
 

	
 
	def _drawLine(self,img,alpha,beta,peaks,colorKey):
 
		colors=[[0,255,255],[255,0,255],[255,255,0]]
 
		color=colors[colorKey]
 
		(h,w)=img.shape[:2]
 
		keys=self._readLineKeys(alpha,beta)
 
		for (y,x) in keys:
 
			if x%3!=0: continue
 
			if y<0 or y>=h: continue
 
			img[y,x]=color
 
		for k in peaks:
 
			(y,x)=keys[k]
 
			cv.drawMarker(img,(x,y),color,cv.MARKER_TILTED_CROSS,8)
 

	
 

	
 
def show(img,filename="x"):
 
	if cfg.INTERACTIVE:
 
	cv.imshow(filename,img)
 
	cv.waitKey(0)
 
	cv.destroyAllWindows()
 
	else:
 
		d=int(datetime.now().timestamp())
 
		path=os.path.join(cfg.imgDir,"{0} {1:03} {2}.png".format(d,cfg.i,filename))
 
		cfg.i+=1
 
		cv.imwrite(path,img)
 

	
 

	
 
def filterVert(edges):
 
	kernel = np.array([[1,0,1],[1,0,1],[1,0,1]],np.uint8)
 
	edges = cv.erode(edges,kernel)
 
	kernel=np.array([[0,1,0],[0,1,0],[0,1,0]],np.uint8)
 
	edges=cv.dilate(edges,kernel)
 
	return edges
 

	
 
def filterHor(edges):
 
	kernel = np.array([[1,1,1],[0,0,0],[1,1,1]],np.uint8)
 
	edges = cv.erode(edges,kernel)
 
	kernel=np.array([[0,0,0],[1,1,1],[0,0,0]],np.uint8)
 
	edges=cv.dilate(edges,kernel)
 
	return edges
 

	
 
def filterDiag(edges):
 
	kernel = np.array([[0,0,1],[1,0,0],[0,1,0]],np.uint8)
 
	edges1 = cv.erode(edges,kernel)
 
	kernel=np.array([[1,0,0],[0,1,0],[0,0,1]],np.uint8)
 
	edges1=cv.dilate(edges1,kernel)
 

	
 
	kernel = np.array([[0,1,0],[1,0,0],[0,0,1]],np.uint8)
 
	edges2 = cv.erode(edges,kernel)
 
	kernel=np.array([[0,0,1],[0,1,0],[1,0,0]],np.uint8)
 
	edges2=cv.dilate(edges2,kernel)
 

	
 
	return edges1+edges2
 

	
 
def prepareEdgeImg(img):
 
	gray=cv.cvtColor(img,cv.COLOR_BGR2GRAY)
 
	show(gray,"greyscale image")
 
	edges=cv.Canny(gray,70,130)
 
	show(edges,"Canny edge detector")
 
	edges=filterHor(edges)+filterVert(edges)+filterDiag(edges)
 
	show(edges,"kernel filtered edges")
 
	return edges
0 comments (0 inline, 0 general)